aboutsummaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/ARCMigrate/ARCMT.cpp2
-rw-r--r--lib/ARCMigrate/ObjCMT.cpp6
-rw-r--r--lib/ARCMigrate/PlistReporter.cpp3
-rw-r--r--lib/ARCMigrate/TransEmptyStatementsAndDealloc.cpp2
-rw-r--r--lib/ARCMigrate/TransGCAttrs.cpp7
-rw-r--r--lib/ARCMigrate/TransProperties.cpp2
-rw-r--r--lib/ARCMigrate/TransRetainReleaseDealloc.cpp4
-rw-r--r--lib/ARCMigrate/TransUnbridgedCasts.cpp5
-rw-r--r--lib/ARCMigrate/TransformActions.cpp24
-rw-r--r--lib/ARCMigrate/Transforms.cpp4
-rw-r--r--lib/ARCMigrate/Transforms.h12
-rw-r--r--lib/AST/APValue.cpp79
-rw-r--r--lib/AST/ASTContext.cpp1307
-rw-r--r--lib/AST/ASTDiagnostic.cpp2
-rw-r--r--lib/AST/ASTDumper.cpp60
-rw-r--r--lib/AST/ASTImporter.cpp2911
-rw-r--r--lib/AST/ASTStructuralEquivalence.cpp535
-rw-r--r--lib/AST/CMakeLists.txt1
-rw-r--r--lib/AST/CXXInheritance.cpp33
-rw-r--r--lib/AST/CommentBriefParser.cpp4
-rw-r--r--lib/AST/CommentLexer.cpp246
-rw-r--r--lib/AST/CommentSema.cpp2
-rw-r--r--lib/AST/ComparisonCategories.cpp211
-rw-r--r--lib/AST/Decl.cpp143
-rw-r--r--lib/AST/DeclBase.cpp278
-rw-r--r--lib/AST/DeclCXX.cpp452
-rw-r--r--lib/AST/DeclFriend.cpp6
-rw-r--r--lib/AST/DeclObjC.cpp142
-rw-r--r--lib/AST/DeclOpenMP.cpp9
-rw-r--r--lib/AST/DeclPrinter.cpp51
-rw-r--r--lib/AST/DeclTemplate.cpp56
-rw-r--r--lib/AST/Expr.cpp130
-rw-r--r--lib/AST/ExprCXX.cpp76
-rw-r--r--lib/AST/ExprClassification.cpp26
-rw-r--r--lib/AST/ExprConstant.cpp1486
-rw-r--r--lib/AST/ExternalASTMerger.cpp45
-rw-r--r--lib/AST/ItaniumCXXABI.cpp63
-rw-r--r--lib/AST/ItaniumMangle.cpp125
-rw-r--r--lib/AST/MicrosoftCXXABI.cpp4
-rw-r--r--lib/AST/MicrosoftMangle.cpp329
-rw-r--r--lib/AST/NSAPI.cpp31
-rw-r--r--lib/AST/NestedNameSpecifier.cpp22
-rw-r--r--lib/AST/ODRHash.cpp212
-rw-r--r--lib/AST/OpenMPClause.cpp4
-rw-r--r--lib/AST/ParentMap.cpp3
-rw-r--r--lib/AST/QualTypeNames.cpp20
-rw-r--r--lib/AST/RawCommentList.cpp110
-rw-r--r--lib/AST/RecordLayoutBuilder.cpp155
-rw-r--r--lib/AST/Stmt.cpp78
-rw-r--r--lib/AST/StmtCXX.cpp10
-rw-r--r--lib/AST/StmtPrinter.cpp253
-rw-r--r--lib/AST/StmtProfile.cpp64
-rw-r--r--lib/AST/TemplateBase.cpp4
-rw-r--r--lib/AST/TemplateName.cpp5
-rw-r--r--lib/AST/Type.cpp702
-rw-r--r--lib/AST/TypeLoc.cpp38
-rw-r--r--lib/AST/TypePrinter.cpp254
-rw-r--r--lib/AST/VTableBuilder.cpp62
-rw-r--r--lib/ASTMatchers/ASTMatchFinder.cpp37
-rw-r--r--lib/ASTMatchers/ASTMatchersInternal.cpp40
-rw-r--r--lib/ASTMatchers/Dynamic/Marshallers.h50
-rw-r--r--lib/ASTMatchers/Dynamic/Parser.cpp30
-rw-r--r--lib/ASTMatchers/Dynamic/Registry.cpp16
-rw-r--r--lib/ASTMatchers/Dynamic/VariantValue.cpp2
-rw-r--r--lib/Analysis/AnalysisDeclContext.cpp165
-rw-r--r--lib/Analysis/BodyFarm.cpp68
-rw-r--r--lib/Analysis/CFG.cpp644
-rw-r--r--lib/Analysis/CFGReachabilityAnalysis.cpp11
-rw-r--r--lib/Analysis/CMakeLists.txt1
-rw-r--r--lib/Analysis/CloneDetection.cpp6
-rw-r--r--lib/Analysis/ConstructionContext.cpp184
-rw-r--r--lib/Analysis/Consumed.cpp171
-rw-r--r--lib/Analysis/Dominators.cpp4
-rw-r--r--lib/Analysis/LiveVariables.cpp87
-rw-r--r--lib/Analysis/PostOrderCFGView.cpp7
-rw-r--r--lib/Analysis/PrintfFormatString.cpp114
-rw-r--r--lib/Analysis/ReachableCode.cpp25
-rw-r--r--lib/Analysis/ThreadSafety.cpp547
-rw-r--r--lib/Analysis/ThreadSafetyCommon.cpp107
-rw-r--r--lib/Analysis/ThreadSafetyTIL.cpp38
-rw-r--r--lib/Analysis/UninitializedValues.cpp172
-rw-r--r--lib/Basic/Builtins.cpp23
-rw-r--r--lib/Basic/CMakeLists.txt3
-rw-r--r--lib/Basic/Cuda.cpp103
-rw-r--r--lib/Basic/Diagnostic.cpp171
-rw-r--r--lib/Basic/DiagnosticIDs.cpp6
-rw-r--r--lib/Basic/DiagnosticOptions.cpp5
-rw-r--r--lib/Basic/FileManager.cpp28
-rw-r--r--lib/Basic/FileSystemStatCache.cpp7
-rw-r--r--lib/Basic/IdentifierTable.cpp58
-rw-r--r--lib/Basic/LangOptions.cpp12
-rw-r--r--lib/Basic/Module.cpp19
-rw-r--r--lib/Basic/ObjCRuntime.cpp7
-rw-r--r--lib/Basic/OpenMPKinds.cpp14
-rw-r--r--lib/Basic/OperatorPrecedence.cpp2
-rw-r--r--lib/Basic/Sanitizers.cpp5
-rw-r--r--lib/Basic/SourceLocation.cpp30
-rw-r--r--lib/Basic/SourceManager.cpp113
-rw-r--r--lib/Basic/TargetInfo.cpp105
-rw-r--r--lib/Basic/Targets.cpp22
-rw-r--r--lib/Basic/Targets.h2
-rw-r--r--lib/Basic/Targets/AArch64.cpp64
-rw-r--r--lib/Basic/Targets/AArch64.h9
-rw-r--r--lib/Basic/Targets/AMDGPU.cpp309
-rw-r--r--lib/Basic/Targets/AMDGPU.h234
-rw-r--r--lib/Basic/Targets/ARM.cpp34
-rw-r--r--lib/Basic/Targets/ARM.h8
-rw-r--r--lib/Basic/Targets/AVR.cpp48
-rw-r--r--lib/Basic/Targets/AVR.h3
-rw-r--r--lib/Basic/Targets/BPF.cpp12
-rw-r--r--lib/Basic/Targets/BPF.h19
-rw-r--r--lib/Basic/Targets/Hexagon.cpp35
-rw-r--r--lib/Basic/Targets/Hexagon.h2
-rw-r--r--lib/Basic/Targets/Lanai.cpp4
-rw-r--r--lib/Basic/Targets/Lanai.h2
-rw-r--r--lib/Basic/Targets/Mips.cpp43
-rw-r--r--lib/Basic/Targets/Mips.h16
-rw-r--r--lib/Basic/Targets/NVPTX.cpp37
-rw-r--r--lib/Basic/Targets/NVPTX.h10
-rw-r--r--lib/Basic/Targets/Nios2.h4
-rw-r--r--lib/Basic/Targets/OSTargets.h46
-rw-r--r--lib/Basic/Targets/PPC.cpp185
-rw-r--r--lib/Basic/Targets/PPC.h135
-rw-r--r--lib/Basic/Targets/RISCV.cpp104
-rw-r--r--lib/Basic/Targets/RISCV.h114
-rw-r--r--lib/Basic/Targets/SPIR.h5
-rw-r--r--lib/Basic/Targets/Sparc.cpp140
-rw-r--r--lib/Basic/Targets/Sparc.h49
-rw-r--r--lib/Basic/Targets/SystemZ.cpp56
-rw-r--r--lib/Basic/Targets/SystemZ.h6
-rw-r--r--lib/Basic/Targets/WebAssembly.cpp32
-rw-r--r--lib/Basic/Targets/WebAssembly.h18
-rw-r--r--lib/Basic/Targets/X86.cpp231
-rw-r--r--lib/Basic/Targets/X86.h64
-rw-r--r--lib/Basic/VersionTuple.cpp100
-rw-r--r--lib/Basic/VirtualFileSystem.cpp363
-rw-r--r--lib/Basic/XRayInstr.cpp30
-rw-r--r--lib/Basic/XRayLists.cpp24
-rw-r--r--lib/CodeGen/ABIInfo.h15
-rw-r--r--lib/CodeGen/BackendUtil.cpp282
-rw-r--r--lib/CodeGen/CGAtomic.cpp67
-rw-r--r--lib/CodeGen/CGBlocks.cpp493
-rw-r--r--lib/CodeGen/CGBlocks.h9
-rw-r--r--lib/CodeGen/CGBuilder.h37
-rw-r--r--lib/CodeGen/CGBuiltin.cpp3055
-rw-r--r--lib/CodeGen/CGCUDANV.cpp394
-rw-r--r--lib/CodeGen/CGCXX.cpp29
-rw-r--r--lib/CodeGen/CGCXXABI.cpp14
-rw-r--r--lib/CodeGen/CGCXXABI.h29
-rw-r--r--lib/CodeGen/CGCall.cpp510
-rw-r--r--lib/CodeGen/CGCall.h114
-rw-r--r--lib/CodeGen/CGClass.cpp111
-rw-r--r--lib/CodeGen/CGCleanup.cpp48
-rw-r--r--lib/CodeGen/CGCleanup.h9
-rw-r--r--lib/CodeGen/CGCoroutine.cpp73
-rw-r--r--lib/CodeGen/CGDebugInfo.cpp508
-rw-r--r--lib/CodeGen/CGDebugInfo.h69
-rw-r--r--lib/CodeGen/CGDecl.cpp568
-rw-r--r--lib/CodeGen/CGDeclCXX.cpp25
-rw-r--r--lib/CodeGen/CGException.cpp299
-rw-r--r--lib/CodeGen/CGExpr.cpp201
-rw-r--r--lib/CodeGen/CGExprAgg.cpp454
-rw-r--r--lib/CodeGen/CGExprCXX.cpp80
-rw-r--r--lib/CodeGen/CGExprComplex.cpp13
-rw-r--r--lib/CodeGen/CGExprConstant.cpp174
-rw-r--r--lib/CodeGen/CGExprScalar.cpp108
-rw-r--r--lib/CodeGen/CGGPUBuiltin.cpp13
-rw-r--r--lib/CodeGen/CGLoopInfo.h70
-rw-r--r--lib/CodeGen/CGNonTrivialStruct.cpp885
-rw-r--r--lib/CodeGen/CGObjC.cpp68
-rw-r--r--lib/CodeGen/CGObjCGNU.cpp1808
-rw-r--r--lib/CodeGen/CGObjCMac.cpp47
-rw-r--r--lib/CodeGen/CGOpenCLRuntime.cpp81
-rw-r--r--lib/CodeGen/CGOpenCLRuntime.h33
-rw-r--r--lib/CodeGen/CGOpenMPRuntime.cpp4158
-rw-r--r--lib/CodeGen/CGOpenMPRuntime.h998
-rw-r--r--lib/CodeGen/CGOpenMPRuntimeNVPTX.cpp2300
-rw-r--r--lib/CodeGen/CGOpenMPRuntimeNVPTX.h165
-rw-r--r--lib/CodeGen/CGRecordLayout.h18
-rw-r--r--lib/CodeGen/CGRecordLayoutBuilder.cpp83
-rw-r--r--lib/CodeGen/CGStmt.cpp51
-rw-r--r--lib/CodeGen/CGStmtOpenMP.cpp1676
-rw-r--r--lib/CodeGen/CGVTT.cpp2
-rw-r--r--lib/CodeGen/CGVTables.cpp283
-rw-r--r--lib/CodeGen/CGVTables.h10
-rw-r--r--lib/CodeGen/CGValue.h44
-rw-r--r--lib/CodeGen/CMakeLists.txt7
-rw-r--r--lib/CodeGen/CodeGenAction.cpp54
-rw-r--r--lib/CodeGen/CodeGenFunction.cpp255
-rw-r--r--lib/CodeGen/CodeGenFunction.h928
-rw-r--r--lib/CodeGen/CodeGenModule.cpp1020
-rw-r--r--lib/CodeGen/CodeGenModule.h155
-rw-r--r--lib/CodeGen/CodeGenPGO.cpp8
-rw-r--r--lib/CodeGen/CodeGenTBAA.cpp31
-rw-r--r--lib/CodeGen/CodeGenTBAA.h9
-rw-r--r--lib/CodeGen/CodeGenTypeCache.h2
-rw-r--r--lib/CodeGen/CodeGenTypes.cpp27
-rw-r--r--lib/CodeGen/CodeGenTypes.h9
-rw-r--r--lib/CodeGen/ConstantEmitter.h2
-rw-r--r--lib/CodeGen/CoverageMappingGen.cpp153
-rw-r--r--lib/CodeGen/CoverageMappingGen.h16
-rw-r--r--lib/CodeGen/ItaniumCXXABI.cpp423
-rw-r--r--lib/CodeGen/MacroPPCallbacks.cpp3
-rw-r--r--lib/CodeGen/MacroPPCallbacks.h3
-rw-r--r--lib/CodeGen/MicrosoftCXXABI.cpp264
-rw-r--r--lib/CodeGen/ObjectFilePCHContainerOperations.cpp12
-rw-r--r--lib/CodeGen/SanitizerMetadata.cpp9
-rw-r--r--lib/CodeGen/SwiftCallingConv.cpp44
-rw-r--r--lib/CodeGen/TargetInfo.cpp615
-rw-r--r--lib/CodeGen/TargetInfo.h12
-rw-r--r--lib/CodeGen/VarBypassDetector.cpp2
-rw-r--r--lib/Driver/Action.cpp47
-rw-r--r--lib/Driver/CMakeLists.txt2
-rw-r--r--lib/Driver/Compilation.cpp66
-rw-r--r--lib/Driver/Distro.cpp3
-rw-r--r--lib/Driver/Driver.cpp992
-rw-r--r--lib/Driver/Job.cpp59
-rw-r--r--lib/Driver/Multilib.cpp31
-rw-r--r--lib/Driver/SanitizerArgs.cpp192
-rw-r--r--lib/Driver/ToolChain.cpp124
-rw-r--r--lib/Driver/ToolChains/AMDGPU.cpp1
-rw-r--r--lib/Driver/ToolChains/Ananas.cpp38
-rw-r--r--lib/Driver/ToolChains/Arch/AArch64.cpp6
-rw-r--r--lib/Driver/ToolChains/Arch/ARM.cpp24
-rw-r--r--lib/Driver/ToolChains/Arch/Mips.cpp78
-rw-r--r--lib/Driver/ToolChains/Arch/Mips.h3
-rw-r--r--lib/Driver/ToolChains/Arch/PPC.cpp10
-rw-r--r--lib/Driver/ToolChains/Arch/PPC.h7
-rw-r--r--lib/Driver/ToolChains/Arch/RISCV.cpp378
-rw-r--r--lib/Driver/ToolChains/Arch/RISCV.h32
-rw-r--r--lib/Driver/ToolChains/Arch/Sparc.cpp23
-rw-r--r--lib/Driver/ToolChains/Arch/X86.cpp43
-rw-r--r--lib/Driver/ToolChains/BareMetal.cpp33
-rw-r--r--lib/Driver/ToolChains/BareMetal.h1
-rw-r--r--lib/Driver/ToolChains/Clang.cpp555
-rw-r--r--lib/Driver/ToolChains/Clang.h6
-rw-r--r--lib/Driver/ToolChains/CloudABI.cpp13
-rw-r--r--lib/Driver/ToolChains/CloudABI.h4
-rw-r--r--lib/Driver/ToolChains/CommonArgs.cpp344
-rw-r--r--lib/Driver/ToolChains/CommonArgs.h24
-rw-r--r--lib/Driver/ToolChains/Contiki.h4
-rw-r--r--lib/Driver/ToolChains/CrossWindows.cpp3
-rw-r--r--lib/Driver/ToolChains/Cuda.cpp228
-rw-r--r--lib/Driver/ToolChains/Cuda.h28
-rw-r--r--lib/Driver/ToolChains/Darwin.cpp197
-rw-r--r--lib/Driver/ToolChains/Darwin.h15
-rw-r--r--lib/Driver/ToolChains/FreeBSD.cpp69
-rw-r--r--lib/Driver/ToolChains/Fuchsia.cpp50
-rw-r--r--lib/Driver/ToolChains/Fuchsia.h5
-rw-r--r--lib/Driver/ToolChains/Gnu.cpp655
-rw-r--r--lib/Driver/ToolChains/Gnu.h55
-rw-r--r--lib/Driver/ToolChains/HIP.cpp350
-rw-r--r--lib/Driver/ToolChains/HIP.h123
-rw-r--r--lib/Driver/ToolChains/Haiku.cpp6
-rw-r--r--lib/Driver/ToolChains/Haiku.h4
-rw-r--r--lib/Driver/ToolChains/Hexagon.cpp59
-rw-r--r--lib/Driver/ToolChains/Hexagon.h1
-rw-r--r--lib/Driver/ToolChains/Lanai.h4
-rw-r--r--lib/Driver/ToolChains/Linux.cpp120
-rw-r--r--lib/Driver/ToolChains/Linux.h4
-rw-r--r--lib/Driver/ToolChains/MSVC.cpp37
-rw-r--r--lib/Driver/ToolChains/MSVC.h4
-rw-r--r--lib/Driver/ToolChains/MinGW.cpp88
-rw-r--r--lib/Driver/ToolChains/MinGW.h1
-rw-r--r--lib/Driver/ToolChains/MipsLinux.cpp9
-rw-r--r--lib/Driver/ToolChains/MipsLinux.h4
-rw-r--r--lib/Driver/ToolChains/Myriad.cpp7
-rw-r--r--lib/Driver/ToolChains/Myriad.h4
-rw-r--r--lib/Driver/ToolChains/NaCl.cpp20
-rw-r--r--lib/Driver/ToolChains/NaCl.h4
-rw-r--r--lib/Driver/ToolChains/NetBSD.cpp62
-rw-r--r--lib/Driver/ToolChains/NetBSD.h4
-rw-r--r--lib/Driver/ToolChains/OpenBSD.cpp49
-rw-r--r--lib/Driver/ToolChains/OpenBSD.h4
-rw-r--r--lib/Driver/ToolChains/PS4CPU.cpp11
-rw-r--r--lib/Driver/ToolChains/PS4CPU.h6
-rw-r--r--lib/Driver/ToolChains/Solaris.cpp173
-rw-r--r--lib/Driver/ToolChains/Solaris.h11
-rw-r--r--lib/Driver/ToolChains/WebAssembly.cpp22
-rw-r--r--lib/Driver/ToolChains/WebAssembly.h2
-rw-r--r--lib/Driver/Types.cpp19
-rw-r--r--lib/Driver/XRayArgs.cpp99
-rw-r--r--lib/Edit/Commit.cpp25
-rw-r--r--lib/Edit/EditedSource.cpp27
-rw-r--r--lib/Edit/RewriteObjCFoundationAPI.cpp8
-rw-r--r--lib/Format/AffectedRangeManager.cpp22
-rw-r--r--lib/Format/AffectedRangeManager.h11
-rw-r--r--lib/Format/BreakableToken.cpp76
-rw-r--r--lib/Format/BreakableToken.h56
-rw-r--r--lib/Format/CMakeLists.txt1
-rw-r--r--lib/Format/ContinuationIndenter.cpp458
-rw-r--r--lib/Format/ContinuationIndenter.h158
-rw-r--r--lib/Format/Encoding.h14
-rw-r--r--lib/Format/Format.cpp745
-rw-r--r--lib/Format/FormatInternal.h4
-rw-r--r--lib/Format/FormatToken.cpp3
-rw-r--r--lib/Format/FormatToken.h175
-rw-r--r--lib/Format/FormatTokenLexer.cpp10
-rw-r--r--lib/Format/FormatTokenLexer.h2
-rw-r--r--lib/Format/NamespaceEndCommentsFixer.cpp29
-rw-r--r--lib/Format/NamespaceEndCommentsFixer.h12
-rw-r--r--lib/Format/SortJavaScriptImports.cpp20
-rw-r--r--lib/Format/SortJavaScriptImports.h2
-rw-r--r--lib/Format/TokenAnalyzer.cpp57
-rw-r--r--lib/Format/TokenAnalyzer.h51
-rw-r--r--lib/Format/TokenAnnotator.cpp617
-rw-r--r--lib/Format/TokenAnnotator.h12
-rw-r--r--lib/Format/UnwrappedLineFormatter.cpp119
-rw-r--r--lib/Format/UnwrappedLineFormatter.h13
-rw-r--r--lib/Format/UnwrappedLineParser.cpp302
-rw-r--r--lib/Format/UnwrappedLineParser.h44
-rw-r--r--lib/Format/UsingDeclarationsSorter.cpp7
-rw-r--r--lib/Format/UsingDeclarationsSorter.h2
-rw-r--r--lib/Format/WhitespaceManager.cpp4
-rw-r--r--lib/Format/WhitespaceManager.h36
-rw-r--r--lib/Frontend/ASTConsumers.cpp19
-rw-r--r--lib/Frontend/ASTMerge.cpp2
-rw-r--r--lib/Frontend/ASTUnit.cpp524
-rw-r--r--lib/Frontend/CMakeLists.txt1
-rw-r--r--lib/Frontend/CacheTokens.cpp32
-rw-r--r--lib/Frontend/CodeGenOptions.cpp2
-rw-r--r--lib/Frontend/CompilerInstance.cpp181
-rw-r--r--lib/Frontend/CompilerInvocation.cpp576
-rw-r--r--lib/Frontend/DependencyFile.cpp45
-rw-r--r--lib/Frontend/DependencyGraph.cpp23
-rw-r--r--lib/Frontend/DiagnosticRenderer.cpp168
-rw-r--r--lib/Frontend/FrontendAction.cpp41
-rw-r--r--lib/Frontend/FrontendActions.cpp254
-rw-r--r--lib/Frontend/FrontendOptions.cpp3
-rw-r--r--lib/Frontend/FrontendTiming.cpp20
-rw-r--r--lib/Frontend/HeaderIncludeGen.cpp16
-rw-r--r--lib/Frontend/InitHeaderSearch.cpp94
-rw-r--r--lib/Frontend/InitPreprocessor.cpp235
-rw-r--r--lib/Frontend/LayoutOverrideSource.cpp2
-rw-r--r--lib/Frontend/ModuleDependencyCollector.cpp6
-rw-r--r--lib/Frontend/MultiplexConsumer.cpp24
-rw-r--r--lib/Frontend/PCHContainerOperations.cpp2
-rw-r--r--lib/Frontend/PrecompiledPreamble.cpp60
-rw-r--r--lib/Frontend/PrintPreprocessedOutput.cpp25
-rw-r--r--lib/Frontend/Rewrite/FixItRewriter.cpp31
-rw-r--r--lib/Frontend/Rewrite/HTMLPrint.cpp7
-rw-r--r--lib/Frontend/Rewrite/InclusionRewriter.cpp38
-rw-r--r--lib/Frontend/Rewrite/RewriteModernObjC.cpp31
-rw-r--r--lib/Frontend/Rewrite/RewriteObjC.cpp30
-rw-r--r--lib/Frontend/SerializedDiagnosticPrinter.cpp76
-rw-r--r--lib/Frontend/SerializedDiagnosticReader.cpp31
-rw-r--r--lib/Frontend/TextDiagnostic.cpp70
-rw-r--r--lib/Frontend/TextDiagnosticBuffer.cpp21
-rw-r--r--lib/Frontend/TextDiagnosticPrinter.cpp2
-rw-r--r--lib/Frontend/VerifyDiagnosticConsumer.cpp133
-rw-r--r--lib/FrontendTool/ExecuteCompilerInvocation.cpp11
-rw-r--r--lib/Headers/CMakeLists.txt26
-rw-r--r--lib/Headers/__clang_cuda_builtin_vars.h2
-rw-r--r--lib/Headers/__clang_cuda_device_functions.h1768
-rw-r--r--lib/Headers/__clang_cuda_intrinsics.h3
-rw-r--r--lib/Headers/__clang_cuda_libdevice_declares.h466
-rw-r--r--lib/Headers/__clang_cuda_runtime_wrapper.h95
-rw-r--r--lib/Headers/__wmmintrin_aes.h25
-rw-r--r--lib/Headers/__wmmintrin_pclmul.h19
-rw-r--r--lib/Headers/ammintrin.h14
-rw-r--r--lib/Headers/avx2intrin.h658
-rw-r--r--lib/Headers/avx512bitalgintrin.h6
-rw-r--r--lib/Headers/avx512bwintrin.h866
-rw-r--r--lib/Headers/avx512cdintrin.h34
-rw-r--r--lib/Headers/avx512dqintrin.h660
-rw-r--r--lib/Headers/avx512erintrin.h128
-rw-r--r--lib/Headers/avx512fintrin.h6513
-rw-r--r--lib/Headers/avx512ifmaintrin.h48
-rw-r--r--lib/Headers/avx512ifmavlintrin.h116
-rw-r--r--lib/Headers/avx512pfintrin.h70
-rw-r--r--lib/Headers/avx512vbmi2intrin.h130
-rw-r--r--lib/Headers/avx512vbmiintrin.h66
-rw-r--r--lib/Headers/avx512vbmivlintrin.h183
-rw-r--r--lib/Headers/avx512vlbitalgintrin.h72
-rw-r--r--lib/Headers/avx512vlbwintrin.h977
-rw-r--r--lib/Headers/avx512vlcdintrin.h138
-rw-r--r--lib/Headers/avx512vldqintrin.h579
-rw-r--r--lib/Headers/avx512vlintrin.h5697
-rw-r--r--lib/Headers/avx512vlvbmi2intrin.h475
-rw-r--r--lib/Headers/avx512vlvnniintrin.h253
-rw-r--r--lib/Headers/avx512vnniintrin.h99
-rw-r--r--lib/Headers/avx512vpopcntdqintrin.h6
-rw-r--r--lib/Headers/avx512vpopcntdqvlintrin.h40
-rw-r--r--lib/Headers/avxintrin.h1181
-rw-r--r--lib/Headers/bmiintrin.h38
-rw-r--r--lib/Headers/cetintrin.h22
-rw-r--r--lib/Headers/cldemoteintrin.h42
-rw-r--r--lib/Headers/clflushoptintrin.h2
-rw-r--r--lib/Headers/clwbintrin.h2
-rw-r--r--lib/Headers/clzerointrin.h10
-rw-r--r--lib/Headers/cpuid.h14
-rw-r--r--lib/Headers/cuda_wrappers/algorithm64
-rw-r--r--lib/Headers/emmintrin.h869
-rw-r--r--lib/Headers/f16cintrin.h82
-rw-r--r--lib/Headers/fma4intrin.h70
-rw-r--r--lib/Headers/fmaintrin.h72
-rw-r--r--lib/Headers/fxsrintrin.h16
-rw-r--r--lib/Headers/gfniintrin.h112
-rw-r--r--lib/Headers/htmxlintrin.h2
-rw-r--r--lib/Headers/ia32intrin.h5
-rw-r--r--lib/Headers/immintrin.h197
-rw-r--r--lib/Headers/intrin.h158
-rw-r--r--lib/Headers/invpcidintrin.h37
-rw-r--r--lib/Headers/lwpintrin.h14
-rw-r--r--lib/Headers/lzcntintrin.h14
-rw-r--r--lib/Headers/mm3dnow.h6
-rw-r--r--lib/Headers/mmintrin.h160
-rw-r--r--lib/Headers/module.modulemap20
-rw-r--r--lib/Headers/movdirintrin.h63
-rw-r--r--lib/Headers/mwaitxintrin.h6
-rw-r--r--lib/Headers/nmmintrin.h6
-rw-r--r--lib/Headers/opencl-c.h6
-rw-r--r--lib/Headers/pconfigintrin.h50
-rw-r--r--lib/Headers/pkuintrin.h4
-rw-r--r--lib/Headers/pmmintrin.h32
-rw-r--r--lib/Headers/popcntintrin.h14
-rw-r--r--lib/Headers/prfchwintrin.h6
-rw-r--r--lib/Headers/ptwriteintrin.h51
-rw-r--r--lib/Headers/rdseedintrin.h2
-rw-r--r--lib/Headers/sgxintrin.h70
-rw-r--r--lib/Headers/shaintrin.h6
-rw-r--r--lib/Headers/smmintrin.h345
-rw-r--r--lib/Headers/stdint.h4
-rw-r--r--lib/Headers/tmmintrin.h124
-rw-r--r--lib/Headers/vaesintrin.h4
-rw-r--r--lib/Headers/vpclmulqdqintrin.h10
-rw-r--r--lib/Headers/waitpkgintrin.h56
-rw-r--r--lib/Headers/wbnoinvdintrin.h38
-rw-r--r--lib/Headers/wmmintrin.h6
-rw-r--r--lib/Headers/x86intrin.h25
-rw-r--r--lib/Headers/xmmintrin.h462
-rw-r--r--lib/Headers/xopintrin.h74
-rw-r--r--lib/Headers/xsavecintrin.h2
-rw-r--r--lib/Headers/xsaveintrin.h10
-rw-r--r--lib/Headers/xsaveoptintrin.h6
-rw-r--r--lib/Headers/xsavesintrin.h2
-rw-r--r--lib/Headers/xtestintrin.h2
-rw-r--r--lib/Index/CMakeLists.txt1
-rw-r--r--lib/Index/IndexDecl.cpp11
-rw-r--r--lib/Index/IndexSymbol.cpp12
-rw-r--r--lib/Index/IndexTypeSourceInfo.cpp2
-rw-r--r--lib/Index/IndexingAction.cpp159
-rw-r--r--lib/Index/IndexingContext.cpp57
-rw-r--r--lib/Index/IndexingContext.h15
-rw-r--r--lib/Index/SimpleFormatContext.h4
-rw-r--r--lib/Index/USRGeneration.cpp30
-rw-r--r--lib/Lex/HeaderSearch.cpp140
-rw-r--r--lib/Lex/Lexer.cpp103
-rw-r--r--lib/Lex/LiteralSupport.cpp197
-rw-r--r--lib/Lex/MacroArgs.cpp5
-rw-r--r--lib/Lex/MacroInfo.cpp2
-rw-r--r--lib/Lex/ModuleMap.cpp310
-rw-r--r--lib/Lex/PPCaching.cpp4
-rw-r--r--lib/Lex/PPDirectives.cpp124
-rw-r--r--lib/Lex/PPExpressions.cpp2
-rw-r--r--lib/Lex/PPLexerChange.cpp32
-rw-r--r--lib/Lex/PPMacroExpansion.cpp238
-rw-r--r--lib/Lex/PTHLexer.cpp10
-rw-r--r--lib/Lex/Pragma.cpp83
-rw-r--r--lib/Lex/PreprocessingRecord.cpp34
-rw-r--r--lib/Lex/Preprocessor.cpp117
-rw-r--r--lib/Lex/PreprocessorLexer.cpp2
-rw-r--r--lib/Lex/ScratchBuffer.cpp8
-rw-r--r--lib/Lex/TokenLexer.cpp14
-rw-r--r--lib/Parse/ParseAST.cpp18
-rw-r--r--lib/Parse/ParseCXXInlineMethods.cpp24
-rw-r--r--lib/Parse/ParseDecl.cpp738
-rw-r--r--lib/Parse/ParseDeclCXX.cpp283
-rw-r--r--lib/Parse/ParseExpr.cpp208
-rw-r--r--lib/Parse/ParseExprCXX.cpp348
-rw-r--r--lib/Parse/ParseObjc.cpp197
-rw-r--r--lib/Parse/ParseOpenMP.cpp319
-rw-r--r--lib/Parse/ParsePragma.cpp208
-rw-r--r--lib/Parse/ParseStmt.cpp74
-rw-r--r--lib/Parse/ParseStmtAsm.cpp2
-rw-r--r--lib/Parse/ParseTemplate.cpp383
-rw-r--r--lib/Parse/ParseTentative.cpp63
-rw-r--r--lib/Parse/Parser.cpp81
-rw-r--r--lib/Rewrite/DeltaTree.cpp48
-rw-r--r--lib/Rewrite/HTMLRewrite.cpp224
-rw-r--r--lib/Rewrite/RewriteRope.cpp77
-rw-r--r--lib/Rewrite/Rewriter.cpp42
-rw-r--r--lib/Rewrite/TokenRewriter.cpp15
-rw-r--r--lib/Sema/AnalysisBasedWarnings.cpp270
-rw-r--r--lib/Sema/CMakeLists.txt2
-rw-r--r--lib/Sema/CodeCompleteConsumer.cpp162
-rw-r--r--lib/Sema/CoroutineStmtBuilder.h10
-rw-r--r--lib/Sema/DeclSpec.cpp70
-rw-r--r--lib/Sema/DelayedDiagnostic.cpp21
-rw-r--r--lib/Sema/IdentifierResolver.cpp39
-rw-r--r--lib/Sema/JumpDiagnostics.cpp12
-rw-r--r--lib/Sema/MultiplexExternalSemaSource.cpp18
-rw-r--r--lib/Sema/ParsedAttr.cpp (renamed from lib/Sema/AttributeList.cpp)162
-rw-r--r--lib/Sema/Scope.cpp99
-rw-r--r--lib/Sema/ScopeInfo.cpp1
-rw-r--r--lib/Sema/Sema.cpp232
-rw-r--r--lib/Sema/SemaAccess.cpp29
-rw-r--r--lib/Sema/SemaAttr.cpp17
-rw-r--r--lib/Sema/SemaCUDA.cpp88
-rw-r--r--lib/Sema/SemaCXXScopeSpec.cpp21
-rw-r--r--lib/Sema/SemaCast.cpp457
-rw-r--r--lib/Sema/SemaChecking.cpp2529
-rw-r--r--lib/Sema/SemaCodeComplete.cpp2365
-rw-r--r--lib/Sema/SemaCoroutine.cpp326
-rw-r--r--lib/Sema/SemaDecl.cpp1443
-rw-r--r--lib/Sema/SemaDeclAttr.cpp4288
-rw-r--r--lib/Sema/SemaDeclCXX.cpp1150
-rw-r--r--lib/Sema/SemaDeclObjC.cpp211
-rw-r--r--lib/Sema/SemaExceptionSpec.cpp421
-rw-r--r--lib/Sema/SemaExpr.cpp1159
-rw-r--r--lib/Sema/SemaExprCXX.cpp588
-rw-r--r--lib/Sema/SemaExprMember.cpp152
-rw-r--r--lib/Sema/SemaExprObjC.cpp76
-rw-r--r--lib/Sema/SemaInit.cpp1076
-rw-r--r--lib/Sema/SemaLambda.cpp162
-rw-r--r--lib/Sema/SemaLookup.cpp240
-rw-r--r--lib/Sema/SemaObjCProperty.cpp24
-rw-r--r--lib/Sema/SemaOpenMP.cpp3152
-rw-r--r--lib/Sema/SemaOverload.cpp724
-rw-r--r--lib/Sema/SemaPseudoObject.cpp95
-rw-r--r--lib/Sema/SemaStmt.cpp579
-rw-r--r--lib/Sema/SemaStmtAsm.cpp4
-rw-r--r--lib/Sema/SemaStmtAttr.cpp27
-rw-r--r--lib/Sema/SemaTemplate.cpp772
-rw-r--r--lib/Sema/SemaTemplateDeduction.cpp909
-rw-r--r--lib/Sema/SemaTemplateInstantiate.cpp329
-rw-r--r--lib/Sema/SemaTemplateInstantiateDecl.cpp201
-rw-r--r--lib/Sema/SemaTemplateVariadic.cpp119
-rw-r--r--lib/Sema/SemaType.cpp1488
-rw-r--r--lib/Sema/TreeTransform.h703
-rw-r--r--lib/Sema/TypeLocBuilder.h6
-rw-r--r--lib/Serialization/ASTCommon.cpp97
-rw-r--r--lib/Serialization/ASTCommon.h8
-rw-r--r--lib/Serialization/ASTReader.cpp992
-rw-r--r--lib/Serialization/ASTReaderDecl.cpp660
-rw-r--r--lib/Serialization/ASTReaderInternals.h16
-rw-r--r--lib/Serialization/ASTReaderStmt.cpp180
-rw-r--r--lib/Serialization/ASTWriter.cpp348
-rw-r--r--lib/Serialization/ASTWriterDecl.cpp49
-rw-r--r--lib/Serialization/ASTWriterStmt.cpp16
-rw-r--r--lib/Serialization/GlobalModuleIndex.cpp76
-rw-r--r--lib/Serialization/Module.cpp9
-rw-r--r--lib/Serialization/MultiOnDiskHashTable.h22
-rw-r--r--lib/StaticAnalyzer/Checkers/AllocationDiagnostics.h2
-rw-r--r--lib/StaticAnalyzer/Checkers/AllocationState.h34
-rw-r--r--lib/StaticAnalyzer/Checkers/AnalysisOrderChecker.cpp62
-rw-r--r--lib/StaticAnalyzer/Checkers/AnalyzerStatsChecker.cpp2
-rw-r--r--lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp25
-rw-r--r--lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp14
-rw-r--r--lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp10
-rw-r--r--lib/StaticAnalyzer/Checkers/CMakeLists.txt7
-rw-r--r--lib/StaticAnalyzer/Checkers/CStringChecker.cpp470
-rw-r--r--lib/StaticAnalyzer/Checkers/CStringSyntaxChecker.cpp79
-rw-r--r--lib/StaticAnalyzer/Checkers/CXXSelfAssignmentChecker.cpp2
-rw-r--r--lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp2
-rw-r--r--lib/StaticAnalyzer/Checkers/CastToStructChecker.cpp2
-rw-r--r--lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp8
-rw-r--r--lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp152
-rw-r--r--lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp61
-rw-r--r--lib/StaticAnalyzer/Checkers/ChrootChecker.cpp4
-rw-r--r--lib/StaticAnalyzer/Checkers/DeleteWithNonVirtualDtorChecker.cpp6
-rw-r--r--lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp18
-rw-r--r--lib/StaticAnalyzer/Checkers/DynamicTypeChecker.cpp3
-rw-r--r--lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp8
-rw-r--r--lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp4
-rw-r--r--lib/StaticAnalyzer/Checkers/FixedAddressChecker.cpp3
-rw-r--r--lib/StaticAnalyzer/Checkers/GCDAntipatternChecker.cpp229
-rw-r--r--lib/StaticAnalyzer/Checkers/GTestChecker.cpp2
-rw-r--r--lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp60
-rw-r--r--lib/StaticAnalyzer/Checkers/IdenticalExprChecker.cpp4
-rw-r--r--lib/StaticAnalyzer/Checkers/InnerPointerChecker.cpp252
-rw-r--r--lib/StaticAnalyzer/Checkers/IteratorChecker.cpp512
-rw-r--r--lib/StaticAnalyzer/Checkers/IvarInvalidationChecker.cpp12
-rw-r--r--lib/StaticAnalyzer/Checkers/LocalizationChecker.cpp10
-rw-r--r--lib/StaticAnalyzer/Checkers/MPI-Checker/MPIBugReporter.h2
-rw-r--r--lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp10
-rw-r--r--lib/StaticAnalyzer/Checkers/MallocChecker.cpp328
-rw-r--r--lib/StaticAnalyzer/Checkers/MisusedMovedObjectChecker.cpp9
-rw-r--r--lib/StaticAnalyzer/Checkers/MmapWriteExecChecker.cpp88
-rw-r--r--lib/StaticAnalyzer/Checkers/NSErrorChecker.cpp3
-rw-r--r--lib/StaticAnalyzer/Checkers/NonNullParamChecker.cpp140
-rw-r--r--lib/StaticAnalyzer/Checkers/NonnullGlobalConstantsChecker.cpp4
-rw-r--r--lib/StaticAnalyzer/Checkers/NullabilityChecker.cpp44
-rw-r--r--lib/StaticAnalyzer/Checkers/NumberObjectConversionChecker.cpp6
-rw-r--r--lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp2
-rw-r--r--lib/StaticAnalyzer/Checkers/ObjCAutoreleaseWriteChecker.cpp209
-rw-r--r--lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp8
-rw-r--r--lib/StaticAnalyzer/Checkers/ObjCMissingSuperCallChecker.cpp2
-rw-r--r--lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp16
-rw-r--r--lib/StaticAnalyzer/Checkers/ObjCSuperDeallocChecker.cpp4
-rw-r--r--lib/StaticAnalyzer/Checkers/PaddingChecker.cpp6
-rw-r--r--lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp22
-rw-r--r--lib/StaticAnalyzer/Checkers/PointerSubChecker.cpp6
-rw-r--r--lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp23
-rw-r--r--lib/StaticAnalyzer/Checkers/RetainCountChecker.cpp77
-rw-r--r--lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp2
-rw-r--r--lib/StaticAnalyzer/Checkers/RunLoopAutoreleaseLeakChecker.cpp217
-rw-r--r--lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp13
-rw-r--r--lib/StaticAnalyzer/Checkers/StreamChecker.cpp36
-rw-r--r--lib/StaticAnalyzer/Checkers/TestAfterDivZeroChecker.cpp10
-rw-r--r--lib/StaticAnalyzer/Checkers/TraversalChecker.cpp5
-rw-r--r--lib/StaticAnalyzer/Checkers/TrustNonnullChecker.cpp90
-rw-r--r--lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp2
-rw-r--r--lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp4
-rw-r--r--lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp33
-rw-r--r--lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp34
-rw-r--r--lib/StaticAnalyzer/Checkers/UninitializedObjectChecker.cpp688
-rw-r--r--lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp9
-rw-r--r--lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp3
-rw-r--r--lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp21
-rw-r--r--lib/StaticAnalyzer/Checkers/ValistChecker.cpp28
-rw-r--r--lib/StaticAnalyzer/Checkers/VirtualCallChecker.cpp13
-rw-r--r--lib/StaticAnalyzer/Core/AnalysisManager.cpp3
-rw-r--r--lib/StaticAnalyzer/Core/AnalyzerOptions.cpp113
-rw-r--r--lib/StaticAnalyzer/Core/BasicValueFactory.cpp62
-rw-r--r--lib/StaticAnalyzer/Core/BugReporter.cpp2538
-rw-r--r--lib/StaticAnalyzer/Core/BugReporterVisitors.cpp1245
-rw-r--r--lib/StaticAnalyzer/Core/CMakeLists.txt5
-rw-r--r--lib/StaticAnalyzer/Core/CallEvent.cpp240
-rw-r--r--lib/StaticAnalyzer/Core/CheckerContext.cpp3
-rw-r--r--lib/StaticAnalyzer/Core/CheckerHelpers.cpp29
-rw-r--r--lib/StaticAnalyzer/Core/CheckerManager.cpp310
-rw-r--r--lib/StaticAnalyzer/Core/CheckerRegistry.cpp72
-rw-r--r--lib/StaticAnalyzer/Core/ConstraintManager.cpp11
-rw-r--r--lib/StaticAnalyzer/Core/CoreEngine.cpp257
-rw-r--r--lib/StaticAnalyzer/Core/DynamicTypeMap.cpp38
-rw-r--r--lib/StaticAnalyzer/Core/Environment.cpp95
-rw-r--r--lib/StaticAnalyzer/Core/ExplodedGraph.cpp47
-rw-r--r--lib/StaticAnalyzer/Core/ExprEngine.cpp1088
-rw-r--r--lib/StaticAnalyzer/Core/ExprEngineC.cpp49
-rw-r--r--lib/StaticAnalyzer/Core/ExprEngineCXX.cpp530
-rw-r--r--lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp224
-rw-r--r--lib/StaticAnalyzer/Core/ExprEngineObjC.cpp94
-rw-r--r--lib/StaticAnalyzer/Core/FunctionSummary.cpp13
-rw-r--r--lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp266
-rw-r--r--lib/StaticAnalyzer/Core/LoopUnrolling.cpp20
-rw-r--r--lib/StaticAnalyzer/Core/LoopWidening.cpp33
-rw-r--r--lib/StaticAnalyzer/Core/MemRegion.cpp303
-rw-r--r--lib/StaticAnalyzer/Core/PathDiagnostic.cpp409
-rw-r--r--lib/StaticAnalyzer/Core/PlistDiagnostics.cpp142
-rw-r--r--lib/StaticAnalyzer/Core/ProgramState.cpp65
-rw-r--r--lib/StaticAnalyzer/Core/RangeConstraintManager.cpp455
-rw-r--r--lib/StaticAnalyzer/Core/RangedConstraintManager.cpp25
-rw-r--r--lib/StaticAnalyzer/Core/RangedConstraintManager.h102
-rw-r--r--lib/StaticAnalyzer/Core/RegionStore.cpp140
-rw-r--r--lib/StaticAnalyzer/Core/SMTConstraintManager.cpp181
-rw-r--r--lib/StaticAnalyzer/Core/SValBuilder.cpp85
-rw-r--r--lib/StaticAnalyzer/Core/SVals.cpp65
-rw-r--r--lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp260
-rw-r--r--lib/StaticAnalyzer/Core/Store.cpp96
-rw-r--r--lib/StaticAnalyzer/Core/SymbolManager.cpp46
-rw-r--r--lib/StaticAnalyzer/Core/WorkList.cpp254
-rw-r--r--lib/StaticAnalyzer/Core/Z3ConstraintManager.cpp2053
-rw-r--r--lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp174
-rw-r--r--lib/StaticAnalyzer/Frontend/CMakeLists.txt3
-rw-r--r--lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp13
-rw-r--r--lib/StaticAnalyzer/Frontend/ModelConsumer.cpp2
-rw-r--r--lib/StaticAnalyzer/Frontend/ModelInjector.cpp6
-rw-r--r--lib/StaticAnalyzer/Frontend/ModelInjector.h4
-rw-r--r--lib/Tooling/ASTDiff/ASTDiff.cpp4
-rw-r--r--lib/Tooling/AllTUsExecution.cpp161
-rw-r--r--lib/Tooling/ArgumentsAdjusters.cpp7
-rw-r--r--lib/Tooling/CMakeLists.txt3
-rw-r--r--lib/Tooling/CompilationDatabase.cpp57
-rw-r--r--lib/Tooling/Core/CMakeLists.txt2
-rw-r--r--lib/Tooling/Core/Replacement.cpp69
-rw-r--r--lib/Tooling/Execution.cpp9
-rw-r--r--lib/Tooling/FileMatchTrie.cpp45
-rw-r--r--lib/Tooling/Inclusions/CMakeLists.txt12
-rw-r--r--lib/Tooling/Inclusions/HeaderIncludes.cpp330
-rw-r--r--lib/Tooling/Inclusions/IncludeStyle.cpp31
-rw-r--r--lib/Tooling/InterpolatingCompilationDatabase.cpp458
-rw-r--r--lib/Tooling/JSONCompilationDatabase.cpp88
-rw-r--r--lib/Tooling/Refactoring/AtomicChange.cpp6
-rw-r--r--lib/Tooling/Refactoring/Extract/Extract.cpp2
-rw-r--r--lib/Tooling/Refactoring/Rename/RenamingAction.cpp2
-rw-r--r--lib/Tooling/Refactoring/Rename/USRFinder.cpp4
-rw-r--r--lib/Tooling/Refactoring/Rename/USRFindingAction.cpp4
-rw-r--r--lib/Tooling/Refactoring/Rename/USRLocFinder.cpp8
-rw-r--r--lib/Tooling/StandaloneExecution.cpp8
-rw-r--r--lib/Tooling/Tooling.cpp196
681 files changed, 82957 insertions, 47716 deletions
diff --git a/lib/ARCMigrate/ARCMT.cpp b/lib/ARCMigrate/ARCMT.cpp
index cf7cddefc03d..74c9974cc810 100644
--- a/lib/ARCMigrate/ARCMT.cpp
+++ b/lib/ARCMigrate/ARCMT.cpp
@@ -503,7 +503,7 @@ public:
} // end anonymous namespace.
-/// \brief Anchor for VTable.
+/// Anchor for VTable.
MigrationProcess::RewriteListener::~RewriteListener() { }
MigrationProcess::MigrationProcess(
diff --git a/lib/ARCMigrate/ObjCMT.cpp b/lib/ARCMigrate/ObjCMT.cpp
index fcc67da1f774..433e6194a2c2 100644
--- a/lib/ARCMigrate/ObjCMT.cpp
+++ b/lib/ARCMigrate/ObjCMT.cpp
@@ -226,7 +226,7 @@ namespace {
isa<ParenListExpr>(Expr) || isa<SizeOfPackExpr>(Expr));
}
- /// \brief - Rewrite message expression for Objective-C setter and getters into
+ /// - Rewrite message expression for Objective-C setter and getters into
/// property-dot syntax.
bool rewriteToPropertyDotSyntax(const ObjCMessageExpr *Msg,
Preprocessor &PP,
@@ -1065,7 +1065,7 @@ static bool TypeIsInnerPointer(QualType T) {
return true;
}
-/// \brief Check whether the two versions match.
+/// Check whether the two versions match.
static bool versionsMatch(const VersionTuple &X, const VersionTuple &Y) {
return (X == Y);
}
@@ -1101,7 +1101,7 @@ static bool MatchTwoAttributeLists(const AttrVec &Attrs1, const AttrVec &Attrs2,
for (unsigned i = 0, e = Attrs1.size(); i != e; i++) {
bool match = false;
for (unsigned j = 0, f = Attrs2.size(); j != f; j++) {
- // Matching attribute kind only. Except for Availabilty attributes,
+ // Matching attribute kind only. Except for Availability attributes,
// we are not getting into details of the attributes. For all practical purposes
// this is sufficient.
if (Attrs1[i]->getKind() == Attrs2[j]->getKind()) {
diff --git a/lib/ARCMigrate/PlistReporter.cpp b/lib/ARCMigrate/PlistReporter.cpp
index 9a51690c0ce7..2ad1c8591a9e 100644
--- a/lib/ARCMigrate/PlistReporter.cpp
+++ b/lib/ARCMigrate/PlistReporter.cpp
@@ -107,8 +107,7 @@ void arcmt::writeARCDiagsToPlist(const std::string &outPath,
o << " <key>ranges</key>\n";
o << " <array>\n";
for (auto &R : D.getRanges()) {
- CharSourceRange ExpansionRange(SM.getExpansionRange(R.getAsRange()),
- R.isTokenRange());
+ CharSourceRange ExpansionRange = SM.getExpansionRange(R);
EmitRange(o, SM, Lexer::getAsCharRange(ExpansionRange, SM, LangOpts),
FM, 4);
}
diff --git a/lib/ARCMigrate/TransEmptyStatementsAndDealloc.cpp b/lib/ARCMigrate/TransEmptyStatementsAndDealloc.cpp
index d45d5d60b78a..cbc22ed60172 100644
--- a/lib/ARCMigrate/TransEmptyStatementsAndDealloc.cpp
+++ b/lib/ARCMigrate/TransEmptyStatementsAndDealloc.cpp
@@ -73,7 +73,7 @@ static bool isEmptyARCMTMacroStatement(NullStmt *S,
namespace {
-/// \brief Returns true if the statement became empty due to previous
+/// Returns true if the statement became empty due to previous
/// transformations.
class EmptyChecker : public StmtVisitor<EmptyChecker, bool> {
ASTContext &Ctx;
diff --git a/lib/ARCMigrate/TransGCAttrs.cpp b/lib/ARCMigrate/TransGCAttrs.cpp
index 2ae6b78a4634..4fd21aa6c269 100644
--- a/lib/ARCMigrate/TransGCAttrs.cpp
+++ b/lib/ARCMigrate/TransGCAttrs.cpp
@@ -23,7 +23,7 @@ using namespace trans;
namespace {
-/// \brief Collects all the places where GC attributes __strong/__weak occur.
+/// Collects all the places where GC attributes __strong/__weak occur.
class GCAttrsCollector : public RecursiveASTVisitor<GCAttrsCollector> {
MigrationContext &MigrateCtx;
bool FullyMigratable;
@@ -92,7 +92,7 @@ public:
ASTContext &Ctx = MigrateCtx.Pass.Ctx;
SourceManager &SM = Ctx.getSourceManager();
if (Loc.isMacroID())
- Loc = SM.getImmediateExpansionRange(Loc).first;
+ Loc = SM.getImmediateExpansionRange(Loc).getBegin();
SmallString<32> Buf;
bool Invalid = false;
StringRef Spell = Lexer::getSpelling(
@@ -287,7 +287,8 @@ static void checkAllAtProps(MigrationContext &MigrateCtx,
SourceLocation Loc = ATLs[i].first.getAttrNameLoc();
if (Loc.isMacroID())
Loc = MigrateCtx.Pass.Ctx.getSourceManager()
- .getImmediateExpansionRange(Loc).first;
+ .getImmediateExpansionRange(Loc)
+ .getBegin();
TA.remove(Loc);
TA.clearDiagnostic(diag::err_objc_property_attr_mutually_exclusive, AtLoc);
TA.clearDiagnostic(diag::err_arc_inconsistent_property_ownership,
diff --git a/lib/ARCMigrate/TransProperties.cpp b/lib/ARCMigrate/TransProperties.cpp
index 389b03666bf7..1468c21a0093 100644
--- a/lib/ARCMigrate/TransProperties.cpp
+++ b/lib/ARCMigrate/TransProperties.cpp
@@ -330,7 +330,7 @@ private:
return false;
}
- // \brief Returns true if all declarations in the @property have GC __weak.
+ // Returns true if all declarations in the @property have GC __weak.
bool hasGCWeak(PropsTy &props, SourceLocation atLoc) const {
if (!Pass.isGCMigration())
return false;
diff --git a/lib/ARCMigrate/TransRetainReleaseDealloc.cpp b/lib/ARCMigrate/TransRetainReleaseDealloc.cpp
index 389f3655aa52..ebe289b34df8 100644
--- a/lib/ARCMigrate/TransRetainReleaseDealloc.cpp
+++ b/lib/ARCMigrate/TransRetainReleaseDealloc.cpp
@@ -158,7 +158,7 @@ public:
}
private:
- /// \brief Checks for idioms where an unused -autorelease is common.
+ /// Checks for idioms where an unused -autorelease is common.
///
/// Returns true for this idiom which is common in property
/// setters:
@@ -309,7 +309,7 @@ private:
return nullptr;
}
- /// \brief Check if the retain/release is due to a GCD/XPC macro that are
+ /// Check if the retain/release is due to a GCD/XPC macro that are
/// defined as:
///
/// #define dispatch_retain(object) ({ dispatch_object_t _o = (object); _dispatch_object_validate(_o); (void)[_o retain]; })
diff --git a/lib/ARCMigrate/TransUnbridgedCasts.cpp b/lib/ARCMigrate/TransUnbridgedCasts.cpp
index 7ca49558a7f0..de52bef4d206 100644
--- a/lib/ARCMigrate/TransUnbridgedCasts.cpp
+++ b/lib/ARCMigrate/TransUnbridgedCasts.cpp
@@ -283,13 +283,12 @@ private:
SourceManager &SM = Pass.Ctx.getSourceManager();
SourceLocation Loc = E->getExprLoc();
assert(Loc.isMacroID());
- SourceLocation MacroBegin, MacroEnd;
- std::tie(MacroBegin, MacroEnd) = SM.getImmediateExpansionRange(Loc);
+ CharSourceRange MacroRange = SM.getImmediateExpansionRange(Loc);
SourceRange SubRange = E->getSubExpr()->IgnoreParenImpCasts()->getSourceRange();
SourceLocation InnerBegin = SM.getImmediateMacroCallerLoc(SubRange.getBegin());
SourceLocation InnerEnd = SM.getImmediateMacroCallerLoc(SubRange.getEnd());
- Outer = SourceRange(MacroBegin, MacroEnd);
+ Outer = MacroRange.getAsRange();
Inner = SourceRange(InnerBegin, InnerEnd);
}
diff --git a/lib/ARCMigrate/TransformActions.cpp b/lib/ARCMigrate/TransformActions.cpp
index 4f3fb5845925..704be4374d3d 100644
--- a/lib/ARCMigrate/TransformActions.cpp
+++ b/lib/ARCMigrate/TransformActions.cpp
@@ -19,7 +19,7 @@ using namespace arcmt;
namespace {
-/// \brief Collects transformations and merges them before applying them with
+/// Collects transformations and merges them before applying them with
/// with applyRewrites(). E.g. if the same source range
/// is requested to be removed twice, only one rewriter remove will be invoked.
/// Rewrites happen in "transactions"; if one rewrite in the transaction cannot
@@ -61,7 +61,7 @@ class TransformActionsImpl {
Range_ExtendsEnd
};
- /// \brief A range to remove. It is a character range.
+ /// A range to remove. It is a character range.
struct CharRange {
FullSourceLoc Begin, End;
@@ -107,7 +107,7 @@ class TransformActionsImpl {
typedef std::map<FullSourceLoc, TextsVec, FullSourceLoc::BeforeThanCompare>
InsertsMap;
InsertsMap Inserts;
- /// \brief A list of ranges to remove. They are always sorted and they never
+ /// A list of ranges to remove. They are always sorted and they never
/// intersect with each other.
std::list<CharRange> Removals;
@@ -115,7 +115,7 @@ class TransformActionsImpl {
std::vector<std::pair<CharRange, SourceLocation> > IndentationRanges;
- /// \brief Keeps text passed to transformation methods.
+ /// Keeps text passed to transformation methods.
llvm::StringMap<bool> UniqueText;
public:
@@ -167,12 +167,12 @@ private:
void addRemoval(CharSourceRange range);
void addInsertion(SourceLocation loc, StringRef text);
- /// \brief Stores text passed to the transformation methods to keep the string
+ /// Stores text passed to the transformation methods to keep the string
/// "alive". Since the vast majority of text will be the same, we also unique
/// the strings using a StringMap.
StringRef getUniqueText(StringRef text);
- /// \brief Computes the source location just past the end of the token at
+ /// Computes the source location just past the end of the token at
/// the given source location. If the location points at a macro, the whole
/// macro expansion is skipped.
static SourceLocation getLocForEndOfToken(SourceLocation loc,
@@ -577,21 +577,25 @@ void TransformActionsImpl::applyRewrites(
}
}
-/// \brief Stores text passed to the transformation methods to keep the string
+/// Stores text passed to the transformation methods to keep the string
/// "alive". Since the vast majority of text will be the same, we also unique
/// the strings using a StringMap.
StringRef TransformActionsImpl::getUniqueText(StringRef text) {
return UniqueText.insert(std::make_pair(text, false)).first->first();
}
-/// \brief Computes the source location just past the end of the token at
+/// Computes the source location just past the end of the token at
/// the given source location. If the location points at a macro, the whole
/// macro expansion is skipped.
SourceLocation TransformActionsImpl::getLocForEndOfToken(SourceLocation loc,
SourceManager &SM,
Preprocessor &PP) {
- if (loc.isMacroID())
- loc = SM.getExpansionRange(loc).second;
+ if (loc.isMacroID()) {
+ CharSourceRange Exp = SM.getExpansionRange(loc);
+ if (Exp.isCharRange())
+ return Exp.getEnd();
+ loc = Exp.getEnd();
+ }
return PP.getLocForEndOfToken(loc);
}
diff --git a/lib/ARCMigrate/Transforms.cpp b/lib/ARCMigrate/Transforms.cpp
index cb96a547fbac..1f4e6a297fc3 100644
--- a/lib/ARCMigrate/Transforms.cpp
+++ b/lib/ARCMigrate/Transforms.cpp
@@ -111,7 +111,7 @@ bool trans::isPlusOne(const Expr *E) {
return implCE && implCE->getCastKind() == CK_ARCConsumeObject;
}
-/// \brief 'Loc' is the end of a statement range. This returns the location
+/// 'Loc' is the end of a statement range. This returns the location
/// immediately after the semicolon following the statement.
/// If no semicolon is found or the location is inside a macro, the returned
/// source location will be invalid.
@@ -123,7 +123,7 @@ SourceLocation trans::findLocationAfterSemi(SourceLocation loc,
return SemiLoc.getLocWithOffset(1);
}
-/// \brief \arg Loc is the end of a statement range. This returns the location
+/// \arg Loc is the end of a statement range. This returns the location
/// of the semicolon following the statement.
/// If no semicolon is found or the location is inside a macro, the returned
/// source location will be invalid.
diff --git a/lib/ARCMigrate/Transforms.h b/lib/ARCMigrate/Transforms.h
index 7e3dd34e7607..8ea4f79456a8 100644
--- a/lib/ARCMigrate/Transforms.h
+++ b/lib/ARCMigrate/Transforms.h
@@ -89,7 +89,7 @@ public:
SourceLocation Loc;
QualType ModifiedType;
Decl *Dcl;
- /// \brief true if the attribute is owned, e.g. it is in a body and not just
+ /// true if the attribute is owned, e.g. it is in a body and not just
/// in an interface.
bool FullyMigratable;
};
@@ -97,7 +97,7 @@ public:
llvm::DenseSet<unsigned> AttrSet;
llvm::DenseSet<unsigned> RemovedAttrSet;
- /// \brief Set of raw '@' locations for 'assign' properties group that contain
+ /// Set of raw '@' locations for 'assign' properties group that contain
/// GC __weak.
llvm::DenseSet<unsigned> AtPropsWeak;
@@ -156,21 +156,21 @@ public:
// Helpers.
//===----------------------------------------------------------------------===//
-/// \brief Determine whether we can add weak to the given type.
+/// Determine whether we can add weak to the given type.
bool canApplyWeak(ASTContext &Ctx, QualType type,
bool AllowOnUnknownClass = false);
bool isPlusOneAssign(const BinaryOperator *E);
bool isPlusOne(const Expr *E);
-/// \brief 'Loc' is the end of a statement range. This returns the location
+/// 'Loc' is the end of a statement range. This returns the location
/// immediately after the semicolon following the statement.
/// If no semicolon is found or the location is inside a macro, the returned
/// source location will be invalid.
SourceLocation findLocationAfterSemi(SourceLocation loc, ASTContext &Ctx,
bool IsDecl = false);
-/// \brief 'Loc' is the end of a statement range. This returns the location
+/// 'Loc' is the end of a statement range. This returns the location
/// of the semicolon following the statement.
/// If no semicolon is found or the location is inside a macro, the returned
/// source location will be invalid.
@@ -179,7 +179,7 @@ SourceLocation findSemiAfterLocation(SourceLocation loc, ASTContext &Ctx,
bool hasSideEffects(Expr *E, ASTContext &Ctx);
bool isGlobalVar(Expr *E);
-/// \brief Returns "nil" or "0" if 'nil' macro is not actually defined.
+/// Returns "nil" or "0" if 'nil' macro is not actually defined.
StringRef getNilString(MigrationPass &Pass);
template <typename BODY_TRANS>
diff --git a/lib/AST/APValue.cpp b/lib/AST/APValue.cpp
index 488ad3373ca3..c45b52a65a4d 100644
--- a/lib/AST/APValue.cpp
+++ b/lib/AST/APValue.cpp
@@ -23,14 +23,57 @@ using namespace clang;
namespace {
struct LVBase {
- llvm::PointerIntPair<APValue::LValueBase, 1, bool> BaseAndIsOnePastTheEnd;
+ APValue::LValueBase Base;
CharUnits Offset;
unsigned PathLength;
- unsigned CallIndex;
- bool IsNullPtr;
+ bool IsNullPtr : 1;
+ bool IsOnePastTheEnd : 1;
};
}
+void *APValue::LValueBase::getOpaqueValue() const {
+ return Ptr.getOpaqueValue();
+}
+
+bool APValue::LValueBase::isNull() const {
+ return Ptr.isNull();
+}
+
+APValue::LValueBase::operator bool () const {
+ return static_cast<bool>(Ptr);
+}
+
+clang::APValue::LValueBase
+llvm::DenseMapInfo<clang::APValue::LValueBase>::getEmptyKey() {
+ return clang::APValue::LValueBase(
+ DenseMapInfo<clang::APValue::LValueBase::PtrTy>::getEmptyKey(),
+ DenseMapInfo<unsigned>::getEmptyKey(),
+ DenseMapInfo<unsigned>::getEmptyKey());
+}
+
+clang::APValue::LValueBase
+llvm::DenseMapInfo<clang::APValue::LValueBase>::getTombstoneKey() {
+ return clang::APValue::LValueBase(
+ DenseMapInfo<clang::APValue::LValueBase::PtrTy>::getTombstoneKey(),
+ DenseMapInfo<unsigned>::getTombstoneKey(),
+ DenseMapInfo<unsigned>::getTombstoneKey());
+}
+
+unsigned llvm::DenseMapInfo<clang::APValue::LValueBase>::getHashValue(
+ const clang::APValue::LValueBase &Base) {
+ llvm::FoldingSetNodeID ID;
+ ID.AddPointer(Base.getOpaqueValue());
+ ID.AddInteger(Base.getCallIndex());
+ ID.AddInteger(Base.getVersion());
+ return ID.ComputeHash();
+}
+
+bool llvm::DenseMapInfo<clang::APValue::LValueBase>::isEqual(
+ const clang::APValue::LValueBase &LHS,
+ const clang::APValue::LValueBase &RHS) {
+ return LHS == RHS;
+}
+
struct APValue::LV : LVBase {
static const unsigned InlinePathSpace =
(DataSize - sizeof(LVBase)) / sizeof(LValuePathEntry);
@@ -150,11 +193,10 @@ APValue::APValue(const APValue &RHS) : Kind(Uninitialized) {
MakeLValue();
if (RHS.hasLValuePath())
setLValue(RHS.getLValueBase(), RHS.getLValueOffset(), RHS.getLValuePath(),
- RHS.isLValueOnePastTheEnd(), RHS.getLValueCallIndex(),
- RHS.isNullPointer());
+ RHS.isLValueOnePastTheEnd(), RHS.isNullPointer());
else
setLValue(RHS.getLValueBase(), RHS.getLValueOffset(), NoLValuePath(),
- RHS.getLValueCallIndex(), RHS.isNullPointer());
+ RHS.isNullPointer());
break;
case Array:
MakeArray(RHS.getArrayInitializedElts(), RHS.getArraySize());
@@ -552,12 +594,12 @@ std::string APValue::getAsString(ASTContext &Ctx, QualType Ty) const {
const APValue::LValueBase APValue::getLValueBase() const {
assert(isLValue() && "Invalid accessor");
- return ((const LV*)(const void*)Data.buffer)->BaseAndIsOnePastTheEnd.getPointer();
+ return ((const LV*)(const void*)Data.buffer)->Base;
}
bool APValue::isLValueOnePastTheEnd() const {
assert(isLValue() && "Invalid accessor");
- return ((const LV*)(const void*)Data.buffer)->BaseAndIsOnePastTheEnd.getInt();
+ return ((const LV*)(const void*)Data.buffer)->IsOnePastTheEnd;
}
CharUnits &APValue::getLValueOffset() {
@@ -578,7 +620,12 @@ ArrayRef<APValue::LValuePathEntry> APValue::getLValuePath() const {
unsigned APValue::getLValueCallIndex() const {
assert(isLValue() && "Invalid accessor");
- return ((const LV*)(const char*)Data.buffer)->CallIndex;
+ return ((const LV*)(const char*)Data.buffer)->Base.getCallIndex();
+}
+
+unsigned APValue::getLValueVersion() const {
+ assert(isLValue() && "Invalid accessor");
+ return ((const LV*)(const char*)Data.buffer)->Base.getVersion();
}
bool APValue::isNullPointer() const {
@@ -587,26 +634,24 @@ bool APValue::isNullPointer() const {
}
void APValue::setLValue(LValueBase B, const CharUnits &O, NoLValuePath,
- unsigned CallIndex, bool IsNullPtr) {
+ bool IsNullPtr) {
assert(isLValue() && "Invalid accessor");
LV &LVal = *((LV*)(char*)Data.buffer);
- LVal.BaseAndIsOnePastTheEnd.setPointer(B);
- LVal.BaseAndIsOnePastTheEnd.setInt(false);
+ LVal.Base = B;
+ LVal.IsOnePastTheEnd = false;
LVal.Offset = O;
- LVal.CallIndex = CallIndex;
LVal.resizePath((unsigned)-1);
LVal.IsNullPtr = IsNullPtr;
}
void APValue::setLValue(LValueBase B, const CharUnits &O,
ArrayRef<LValuePathEntry> Path, bool IsOnePastTheEnd,
- unsigned CallIndex, bool IsNullPtr) {
+ bool IsNullPtr) {
assert(isLValue() && "Invalid accessor");
LV &LVal = *((LV*)(char*)Data.buffer);
- LVal.BaseAndIsOnePastTheEnd.setPointer(B);
- LVal.BaseAndIsOnePastTheEnd.setInt(IsOnePastTheEnd);
+ LVal.Base = B;
+ LVal.IsOnePastTheEnd = IsOnePastTheEnd;
LVal.Offset = O;
- LVal.CallIndex = CallIndex;
LVal.resizePath(Path.size());
memcpy(LVal.getPath(), Path.data(), Path.size() * sizeof(LValuePathEntry));
LVal.IsNullPtr = IsNullPtr;
diff --git a/lib/AST/ASTContext.cpp b/lib/AST/ASTContext.cpp
index 3dc961d4f12b..25dc4441aafd 100644
--- a/lib/AST/ASTContext.cpp
+++ b/lib/AST/ASTContext.cpp
@@ -47,6 +47,7 @@
#include "clang/Basic/AddressSpaces.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/CommentOptions.h"
+#include "clang/Basic/ExceptionSpecificationType.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/LangOptions.h"
@@ -130,35 +131,34 @@ RawComment *ASTContext::getRawCommentForDeclNoCache(const Decl *D) const {
return nullptr;
// User can not attach documentation to implicit instantiations.
- if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
if (FD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
return nullptr;
}
- if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
+ if (const auto *VD = dyn_cast<VarDecl>(D)) {
if (VD->isStaticDataMember() &&
VD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
return nullptr;
}
- if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(D)) {
+ if (const auto *CRD = dyn_cast<CXXRecordDecl>(D)) {
if (CRD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
return nullptr;
}
- if (const ClassTemplateSpecializationDecl *CTSD =
- dyn_cast<ClassTemplateSpecializationDecl>(D)) {
+ if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(D)) {
TemplateSpecializationKind TSK = CTSD->getSpecializationKind();
if (TSK == TSK_ImplicitInstantiation ||
TSK == TSK_Undeclared)
return nullptr;
}
- if (const EnumDecl *ED = dyn_cast<EnumDecl>(D)) {
+ if (const auto *ED = dyn_cast<EnumDecl>(D)) {
if (ED->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
return nullptr;
}
- if (const TagDecl *TD = dyn_cast<TagDecl>(D)) {
+ if (const auto *TD = dyn_cast<TagDecl>(D)) {
// When tag declaration (but not definition!) is part of the
// decl-specifier-seq of some other declaration, it doesn't get comment
if (TD->isEmbeddedInDeclarator() && !TD->isCompleteDefinition())
@@ -201,7 +201,7 @@ RawComment *ASTContext::getRawCommentForDeclNoCache(const Decl *D) const {
// declared via a macro. Try using declaration's starting location as
// the "declaration location".
DeclLoc = D->getLocStart();
- } else if (const TagDecl *TD = dyn_cast<TagDecl>(D)) {
+ } else if (const auto *TD = dyn_cast<TagDecl>(D)) {
// If location of the tag decl is inside a macro, but the spelling of
// the tag name comes from a macro argument, it looks like a special
// macro like NS_ENUM is being used to define the tag decl. In that
@@ -226,8 +226,7 @@ RawComment *ASTContext::getRawCommentForDeclNoCache(const Decl *D) const {
// for is usually among the last two comments we parsed -- check them
// first.
RawComment CommentAtDeclLoc(
- SourceMgr, SourceRange(DeclLoc), false,
- LangOpts.CommentOpts.ParseAllComments);
+ SourceMgr, SourceRange(DeclLoc), LangOpts.CommentOpts, false);
BeforeThanCompare<RawComment> Compare(SourceMgr);
ArrayRef<RawComment *>::iterator MaybeBeforeDecl = RawComments.end() - 1;
bool Found = Compare(*MaybeBeforeDecl, &CommentAtDeclLoc);
@@ -253,7 +252,8 @@ RawComment *ASTContext::getRawCommentForDeclNoCache(const Decl *D) const {
// First check whether we have a trailing comment.
if (Comment != RawComments.end() &&
- (*Comment)->isDocumentation() && (*Comment)->isTrailingComment() &&
+ ((*Comment)->isDocumentation() || LangOpts.CommentOpts.ParseAllComments)
+ && (*Comment)->isTrailingComment() &&
(isa<FieldDecl>(D) || isa<EnumConstantDecl>(D) || isa<VarDecl>(D) ||
isa<ObjCMethodDecl>(D) || isa<ObjCPropertyDecl>(D))) {
std::pair<FileID, unsigned> CommentBeginDecomp
@@ -275,7 +275,9 @@ RawComment *ASTContext::getRawCommentForDeclNoCache(const Decl *D) const {
--Comment;
// Check that we actually have a non-member Doxygen comment.
- if (!(*Comment)->isDocumentation() || (*Comment)->isTrailingComment())
+ if (!((*Comment)->isDocumentation() ||
+ LangOpts.CommentOpts.ParseAllComments) ||
+ (*Comment)->isTrailingComment())
return nullptr;
// Decompose the end of the comment.
@@ -310,7 +312,7 @@ RawComment *ASTContext::getRawCommentForDeclNoCache(const Decl *D) const {
/// refer to the actual template.
/// If we have an implicit instantiation, adjust 'D' to refer to template.
static const Decl *adjustDeclToTemplate(const Decl *D) {
- if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
// Is this function declaration part of a function template?
if (const FunctionTemplateDecl *FTD = FD->getDescribedFunctionTemplate())
return FTD;
@@ -330,7 +332,7 @@ static const Decl *adjustDeclToTemplate(const Decl *D) {
return D;
}
- if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
+ if (const auto *VD = dyn_cast<VarDecl>(D)) {
// Static data member is instantiated from a member definition of a class
// template?
if (VD->isStaticDataMember())
@@ -339,15 +341,14 @@ static const Decl *adjustDeclToTemplate(const Decl *D) {
return D;
}
- if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(D)) {
+ if (const auto *CRD = dyn_cast<CXXRecordDecl>(D)) {
// Is this class declaration part of a class template?
if (const ClassTemplateDecl *CTD = CRD->getDescribedClassTemplate())
return CTD;
// Class is an implicit instantiation of a class template or partial
// specialization?
- if (const ClassTemplateSpecializationDecl *CTSD =
- dyn_cast<ClassTemplateSpecializationDecl>(CRD)) {
+ if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(CRD)) {
if (CTSD->getSpecializationKind() != TSK_ImplicitInstantiation)
return D;
llvm::PointerUnion<ClassTemplateDecl *,
@@ -366,7 +367,7 @@ static const Decl *adjustDeclToTemplate(const Decl *D) {
return D;
}
- if (const EnumDecl *ED = dyn_cast<EnumDecl>(D)) {
+ if (const auto *ED = dyn_cast<EnumDecl>(D)) {
// Enum is instantiated from a member definition of a class template?
if (const EnumDecl *MemberDecl = ED->getInstantiatedFromMemberEnum())
return MemberDecl;
@@ -428,7 +429,7 @@ const RawComment *ASTContext::getRawCommentForAnyRedecl(
}
// If we found a comment, it should be a documentation comment.
- assert(!RC || RC->isDocumentation());
+ assert(!RC || RC->isDocumentation() || LangOpts.CommentOpts.ParseAllComments);
if (OriginalDecl)
*OriginalDecl = OriginalDeclForRC;
@@ -451,7 +452,7 @@ const RawComment *ASTContext::getRawCommentForAnyRedecl(
static void addRedeclaredMethods(const ObjCMethodDecl *ObjCMethod,
SmallVectorImpl<const NamedDecl *> &Redeclared) {
const DeclContext *DC = ObjCMethod->getDeclContext();
- if (const ObjCImplDecl *IMD = dyn_cast<ObjCImplDecl>(DC)) {
+ if (const auto *IMD = dyn_cast<ObjCImplDecl>(DC)) {
const ObjCInterfaceDecl *ID = IMD->getClassInterface();
if (!ID)
return;
@@ -467,7 +468,7 @@ static void addRedeclaredMethods(const ObjCMethodDecl *ObjCMethod,
comments::FullComment *ASTContext::cloneFullComment(comments::FullComment *FC,
const Decl *D) const {
- comments::DeclInfo *ThisDeclInfo = new (*this) comments::DeclInfo;
+ auto *ThisDeclInfo = new (*this) comments::DeclInfo;
ThisDeclInfo->CommentDecl = D;
ThisDeclInfo->IsFilled = false;
ThisDeclInfo->fill();
@@ -511,7 +512,7 @@ comments::FullComment *ASTContext::getCommentForDecl(
if (!RC) {
if (isa<ObjCMethodDecl>(D) || isa<FunctionDecl>(D)) {
SmallVector<const NamedDecl*, 8> Overridden;
- const ObjCMethodDecl *OMD = dyn_cast<ObjCMethodDecl>(D);
+ const auto *OMD = dyn_cast<ObjCMethodDecl>(D);
if (OMD && OMD->isPropertyAccessor())
if (const ObjCPropertyDecl *PDecl = OMD->findPropertyDecl())
if (comments::FullComment *FC = getCommentForDecl(PDecl, PP))
@@ -523,28 +524,28 @@ comments::FullComment *ASTContext::getCommentForDecl(
if (comments::FullComment *FC = getCommentForDecl(Overridden[i], PP))
return cloneFullComment(FC, D);
}
- else if (const TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(D)) {
+ else if (const auto *TD = dyn_cast<TypedefNameDecl>(D)) {
// Attach any tag type's documentation to its typedef if latter
// does not have one of its own.
QualType QT = TD->getUnderlyingType();
- if (const TagType *TT = QT->getAs<TagType>())
+ if (const auto *TT = QT->getAs<TagType>())
if (const Decl *TD = TT->getDecl())
if (comments::FullComment *FC = getCommentForDecl(TD, PP))
return cloneFullComment(FC, D);
}
- else if (const ObjCInterfaceDecl *IC = dyn_cast<ObjCInterfaceDecl>(D)) {
+ else if (const auto *IC = dyn_cast<ObjCInterfaceDecl>(D)) {
while (IC->getSuperClass()) {
IC = IC->getSuperClass();
if (comments::FullComment *FC = getCommentForDecl(IC, PP))
return cloneFullComment(FC, D);
}
}
- else if (const ObjCCategoryDecl *CD = dyn_cast<ObjCCategoryDecl>(D)) {
+ else if (const auto *CD = dyn_cast<ObjCCategoryDecl>(D)) {
if (const ObjCInterfaceDecl *IC = CD->getClassInterface())
if (comments::FullComment *FC = getCommentForDecl(IC, PP))
return cloneFullComment(FC, D);
}
- else if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D)) {
+ else if (const auto *RD = dyn_cast<CXXRecordDecl>(D)) {
if (!(RD = RD->getDefinition()))
return nullptr;
// Check non-virtual bases.
@@ -604,13 +605,13 @@ ASTContext::CanonicalTemplateTemplateParm::Profile(llvm::FoldingSetNodeID &ID,
for (TemplateParameterList::const_iterator P = Params->begin(),
PEnd = Params->end();
P != PEnd; ++P) {
- if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) {
+ if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) {
ID.AddInteger(0);
ID.AddBoolean(TTP->isParameterPack());
continue;
}
- if (NonTypeTemplateParmDecl *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) {
+ if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) {
ID.AddInteger(1);
ID.AddBoolean(NTTP->isParameterPack());
ID.AddPointer(NTTP->getType().getCanonicalType().getAsOpaquePtr());
@@ -626,7 +627,7 @@ ASTContext::CanonicalTemplateTemplateParm::Profile(llvm::FoldingSetNodeID &ID,
continue;
}
- TemplateTemplateParmDecl *TTP = cast<TemplateTemplateParmDecl>(*P);
+ auto *TTP = cast<TemplateTemplateParmDecl>(*P);
ID.AddInteger(2);
Profile(ID, TTP);
}
@@ -651,7 +652,7 @@ ASTContext::getCanonicalTemplateTemplateParmDecl(
for (TemplateParameterList::const_iterator P = Params->begin(),
PEnd = Params->end();
P != PEnd; ++P) {
- if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(*P))
+ if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(*P))
CanonParams.push_back(
TemplateTypeParmDecl::Create(*this, getTranslationUnitDecl(),
SourceLocation(),
@@ -659,8 +660,7 @@ ASTContext::getCanonicalTemplateTemplateParmDecl(
TTP->getDepth(),
TTP->getIndex(), nullptr, false,
TTP->isParameterPack()));
- else if (NonTypeTemplateParmDecl *NTTP
- = dyn_cast<NonTypeTemplateParmDecl>(*P)) {
+ else if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) {
QualType T = getCanonicalType(NTTP->getType());
TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(T);
NonTypeTemplateParmDecl *Param;
@@ -788,10 +788,12 @@ ASTContext::ASTContext(LangOptions &LOpts, SourceManager &SM,
SubstTemplateTemplateParmPacks(this_()), SourceMgr(SM), LangOpts(LOpts),
SanitizerBL(new SanitizerBlacklist(LangOpts.SanitizerBlacklistFiles, SM)),
XRayFilter(new XRayFunctionFilter(LangOpts.XRayAlwaysInstrumentFiles,
- LangOpts.XRayNeverInstrumentFiles, SM)),
+ LangOpts.XRayNeverInstrumentFiles,
+ LangOpts.XRayAttrListFiles, SM)),
PrintingPolicy(LOpts), Idents(idents), Selectors(sels),
BuiltinInfo(builtins), DeclarationNames(*this), Comments(SM),
- CommentCommandTraits(BumpAlloc, LOpts.CommentOpts), LastSDM(nullptr, 0) {
+ CommentCommandTraits(BumpAlloc, LOpts.CommentOpts),
+ CompCategories(this_()), LastSDM(nullptr, 0) {
TUDecl = TranslationUnitDecl::Create(*this);
}
@@ -812,13 +814,13 @@ ASTContext::~ASTContext() {
const ASTRecordLayout*>::iterator
I = ObjCLayouts.begin(), E = ObjCLayouts.end(); I != E; )
// Increment in loop to prevent using deallocated memory.
- if (ASTRecordLayout *R = const_cast<ASTRecordLayout*>((I++)->second))
+ if (auto *R = const_cast<ASTRecordLayout *>((I++)->second))
R->Destroy(*this);
for (llvm::DenseMap<const RecordDecl*, const ASTRecordLayout*>::iterator
I = ASTRecordLayouts.begin(), E = ASTRecordLayouts.end(); I != E; ) {
// Increment in loop to prevent using deallocated memory.
- if (ASTRecordLayout *R = const_cast<ASTRecordLayout*>((I++)->second))
+ if (auto *R = const_cast<ASTRecordLayout *>((I++)->second))
R->Destroy(*this);
}
@@ -966,7 +968,7 @@ void ASTContext::PerModuleInitializers::resolve(ASTContext &Ctx) {
void ASTContext::addModuleInitializer(Module *M, Decl *D) {
// One special case: if we add a module initializer that imports another
// module, and that module's only initializer is an ImportDecl, simplify.
- if (auto *ID = dyn_cast<ImportDecl>(D)) {
+ if (const auto *ID = dyn_cast<ImportDecl>(D)) {
auto It = ModuleInitializers.find(ID->getImportedModule());
// Maybe the ImportDecl does nothing at all. (Common case.)
@@ -997,7 +999,7 @@ void ASTContext::addLazyModuleInitializers(Module *M, ArrayRef<uint32_t> IDs) {
IDs.begin(), IDs.end());
}
-ArrayRef<Decl*> ASTContext::getModuleInitializers(Module *M) {
+ArrayRef<Decl *> ASTContext::getModuleInitializers(Module *M) {
auto It = ModuleInitializers.find(M);
if (It == ModuleInitializers.end())
return None;
@@ -1079,7 +1081,7 @@ TypedefDecl *ASTContext::getUInt128Decl() const {
}
void ASTContext::InitBuiltinType(CanQualType &R, BuiltinType::Kind K) {
- BuiltinType *Ty = new (*this, TypeAlignment) BuiltinType(K);
+ auto *Ty = new (*this, TypeAlignment) BuiltinType(K);
R = CanQualType::CreateUnsafe(QualType(Ty, 0));
Types.push_back(Ty);
}
@@ -1132,6 +1134,32 @@ void ASTContext::InitBuiltinTypes(const TargetInfo &Target,
// C11 extension ISO/IEC TS 18661-3
InitBuiltinType(Float16Ty, BuiltinType::Float16);
+ // ISO/IEC JTC1 SC22 WG14 N1169 Extension
+ InitBuiltinType(ShortAccumTy, BuiltinType::ShortAccum);
+ InitBuiltinType(AccumTy, BuiltinType::Accum);
+ InitBuiltinType(LongAccumTy, BuiltinType::LongAccum);
+ InitBuiltinType(UnsignedShortAccumTy, BuiltinType::UShortAccum);
+ InitBuiltinType(UnsignedAccumTy, BuiltinType::UAccum);
+ InitBuiltinType(UnsignedLongAccumTy, BuiltinType::ULongAccum);
+ InitBuiltinType(ShortFractTy, BuiltinType::ShortFract);
+ InitBuiltinType(FractTy, BuiltinType::Fract);
+ InitBuiltinType(LongFractTy, BuiltinType::LongFract);
+ InitBuiltinType(UnsignedShortFractTy, BuiltinType::UShortFract);
+ InitBuiltinType(UnsignedFractTy, BuiltinType::UFract);
+ InitBuiltinType(UnsignedLongFractTy, BuiltinType::ULongFract);
+ InitBuiltinType(SatShortAccumTy, BuiltinType::SatShortAccum);
+ InitBuiltinType(SatAccumTy, BuiltinType::SatAccum);
+ InitBuiltinType(SatLongAccumTy, BuiltinType::SatLongAccum);
+ InitBuiltinType(SatUnsignedShortAccumTy, BuiltinType::SatUShortAccum);
+ InitBuiltinType(SatUnsignedAccumTy, BuiltinType::SatUAccum);
+ InitBuiltinType(SatUnsignedLongAccumTy, BuiltinType::SatULongAccum);
+ InitBuiltinType(SatShortFractTy, BuiltinType::SatShortFract);
+ InitBuiltinType(SatFractTy, BuiltinType::SatFract);
+ InitBuiltinType(SatLongFractTy, BuiltinType::SatLongFract);
+ InitBuiltinType(SatUnsignedShortFractTy, BuiltinType::SatUShortFract);
+ InitBuiltinType(SatUnsignedFractTy, BuiltinType::SatUFract);
+ InitBuiltinType(SatUnsignedLongFractTy, BuiltinType::SatULongFract);
+
// GNU extension, 128-bit integers.
InitBuiltinType(Int128Ty, BuiltinType::Int128);
InitBuiltinType(UnsignedInt128Ty, BuiltinType::UInt128);
@@ -1150,6 +1178,9 @@ void ASTContext::InitBuiltinTypes(const TargetInfo &Target,
WIntTy = getFromTargetType(Target.getWIntType());
+ // C++20 (proposed)
+ InitBuiltinType(Char8Ty, BuiltinType::Char8);
+
if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++
InitBuiltinType(Char16Ty, BuiltinType::Char16);
else // C99
@@ -1254,7 +1285,7 @@ AttrVec& ASTContext::getDeclAttrs(const Decl *D) {
return *Result;
}
-/// \brief Erase the attributes corresponding to the given declaration.
+/// Erase the attributes corresponding to the given declaration.
void ASTContext::eraseDeclAttrs(const Decl *D) {
llvm::DenseMap<const Decl*, AttrVec*>::iterator Pos = DeclAttrs.find(D);
if (Pos != DeclAttrs.end()) {
@@ -1276,7 +1307,7 @@ ASTContext::getTemplateOrSpecializationInfo(const VarDecl *Var) {
llvm::DenseMap<const VarDecl *, TemplateOrSpecializationInfo>::iterator Pos =
TemplateOrInstantiation.find(Var);
if (Pos == TemplateOrInstantiation.end())
- return TemplateOrSpecializationInfo();
+ return {};
return Pos->second;
}
@@ -1412,13 +1443,13 @@ void ASTContext::getOverriddenMethods(
SmallVectorImpl<const NamedDecl *> &Overridden) const {
assert(D);
- if (const CXXMethodDecl *CXXMethod = dyn_cast<CXXMethodDecl>(D)) {
+ if (const auto *CXXMethod = dyn_cast<CXXMethodDecl>(D)) {
Overridden.append(overridden_methods_begin(CXXMethod),
overridden_methods_end(CXXMethod));
return;
}
- const ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(D);
+ const auto *Method = dyn_cast<ObjCMethodDecl>(D);
if (!Method)
return;
@@ -1447,7 +1478,7 @@ void ASTContext::addedLocalImportDecl(ImportDecl *Import) {
/// getFloatTypeSemantics - Return the APFloat 'semantics' for the specified
/// scalar floating point type.
const llvm::fltSemantics &ASTContext::getFloatTypeSemantics(QualType T) const {
- const BuiltinType *BT = T->getAs<BuiltinType>();
+ const auto *BT = T->getAs<BuiltinType>();
assert(BT && "Not a floating point type!");
switch (BT->getKind()) {
default: llvm_unreachable("Not a floating point type!");
@@ -1490,9 +1521,9 @@ CharUnits ASTContext::getDeclAlign(const Decl *D, bool ForAlignof) const {
// else about the declaration and its type.
if (UseAlignAttrOnly) {
// do nothing
- } else if (const ValueDecl *VD = dyn_cast<ValueDecl>(D)) {
+ } else if (const auto *VD = dyn_cast<ValueDecl>(D)) {
QualType T = VD->getType();
- if (const ReferenceType *RT = T->getAs<ReferenceType>()) {
+ if (const auto *RT = T->getAs<ReferenceType>()) {
if (ForAlignof)
T = RT->getPointeeType();
else
@@ -1517,7 +1548,7 @@ CharUnits ASTContext::getDeclAlign(const Decl *D, bool ForAlignof) const {
Align = std::max(Align, getPreferredTypeAlign(T.getTypePtr()));
if (BaseT.getQualifiers().hasUnaligned())
Align = Target->getCharWidth();
- if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
+ if (const auto *VD = dyn_cast<VarDecl>(D)) {
if (VD->hasGlobalStorage() && !ForAlignof)
Align = std::max(Align, getTargetInfo().getMinGlobalAlign());
}
@@ -1528,7 +1559,7 @@ CharUnits ASTContext::getDeclAlign(const Decl *D, bool ForAlignof) const {
// a max-field-alignment constraint (#pragma pack). So calculate
// the actual alignment of the field within the struct, and then
// (as we're expected to) constrain that by the alignment of the type.
- if (const FieldDecl *Field = dyn_cast<FieldDecl>(VD)) {
+ if (const auto *Field = dyn_cast<FieldDecl>(VD)) {
const RecordDecl *Parent = Field->getParent();
// We can only produce a sensible answer if the record is valid.
if (!Parent->isInvalidDecl()) {
@@ -1567,7 +1598,7 @@ ASTContext::getTypeInfoDataSizeInChars(QualType T) const {
// of a base-class subobject. We decide whether that's possible
// during class layout, so here we can just trust the layout results.
if (getLangOpts().CPlusPlus) {
- if (const RecordType *RT = T->getAs<RecordType>()) {
+ if (const auto *RT = T->getAs<RecordType>()) {
const ASTRecordLayout &layout = getASTRecordLayout(RT->getDecl());
sizeAndAlign.first = layout.getDataSize();
}
@@ -1598,7 +1629,7 @@ static getConstantArrayInfoInChars(const ASTContext &Context,
std::pair<CharUnits, CharUnits>
ASTContext::getTypeInfoInChars(const Type *T) const {
- if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(T))
+ if (const auto *CAT = dyn_cast<ConstantArrayType>(T))
return getConstantArrayInfoInChars(*this, CAT);
TypeInfo Info = getTypeInfo(T);
return std::make_pair(toCharUnitsFromBits(Info.Width),
@@ -1620,7 +1651,7 @@ bool ASTContext::isAlignmentRequired(QualType T) const {
unsigned ASTContext::getTypeAlignIfKnown(QualType T) const {
// An alignment on a typedef overrides anything else.
- if (auto *TT = T->getAs<TypedefType>())
+ if (const auto *TT = T->getAs<TypedefType>())
if (unsigned Align = TT->getDecl()->getMaxAlignment())
return Align;
@@ -1631,12 +1662,12 @@ unsigned ASTContext::getTypeAlignIfKnown(QualType T) const {
// If we had an array type, its element type might be a typedef
// type with an alignment attribute.
- if (auto *TT = T->getAs<TypedefType>())
+ if (const auto *TT = T->getAs<TypedefType>())
if (unsigned Align = TT->getDecl()->getMaxAlignment())
return Align;
// Otherwise, see if the declaration of the type had an attribute.
- if (auto *TT = T->getAs<TagType>())
+ if (const auto *TT = T->getAs<TagType>())
return TT->getDecl()->getMaxAlignment();
return 0;
@@ -1690,7 +1721,7 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
break;
case Type::ConstantArray: {
- const ConstantArrayType *CAT = cast<ConstantArrayType>(T);
+ const auto *CAT = cast<ConstantArrayType>(T);
TypeInfo EltInfo = getTypeInfo(CAT->getElementType());
uint64_t Size = CAT->getSize().getZExtValue();
@@ -1705,7 +1736,7 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
}
case Type::ExtVector:
case Type::Vector: {
- const VectorType *VT = cast<VectorType>(T);
+ const auto *VT = cast<VectorType>(T);
TypeInfo EltInfo = getTypeInfo(VT->getElementType());
Width = EltInfo.Width * VT->getNumElements();
Align = Width;
@@ -1738,6 +1769,7 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
case BuiltinType::Char_U:
case BuiltinType::UChar:
case BuiltinType::SChar:
+ case BuiltinType::Char8:
Width = Target->getCharWidth();
Align = Target->getCharAlign();
break;
@@ -1779,6 +1811,48 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
Width = 128;
Align = 128; // int128_t is 128-bit aligned on all targets.
break;
+ case BuiltinType::ShortAccum:
+ case BuiltinType::UShortAccum:
+ case BuiltinType::SatShortAccum:
+ case BuiltinType::SatUShortAccum:
+ Width = Target->getShortAccumWidth();
+ Align = Target->getShortAccumAlign();
+ break;
+ case BuiltinType::Accum:
+ case BuiltinType::UAccum:
+ case BuiltinType::SatAccum:
+ case BuiltinType::SatUAccum:
+ Width = Target->getAccumWidth();
+ Align = Target->getAccumAlign();
+ break;
+ case BuiltinType::LongAccum:
+ case BuiltinType::ULongAccum:
+ case BuiltinType::SatLongAccum:
+ case BuiltinType::SatULongAccum:
+ Width = Target->getLongAccumWidth();
+ Align = Target->getLongAccumAlign();
+ break;
+ case BuiltinType::ShortFract:
+ case BuiltinType::UShortFract:
+ case BuiltinType::SatShortFract:
+ case BuiltinType::SatUShortFract:
+ Width = Target->getShortFractWidth();
+ Align = Target->getShortFractAlign();
+ break;
+ case BuiltinType::Fract:
+ case BuiltinType::UFract:
+ case BuiltinType::SatFract:
+ case BuiltinType::SatUFract:
+ Width = Target->getFractWidth();
+ Align = Target->getFractAlign();
+ break;
+ case BuiltinType::LongFract:
+ case BuiltinType::ULongFract:
+ case BuiltinType::SatLongFract:
+ case BuiltinType::SatULongFract:
+ Width = Target->getLongFractWidth();
+ Align = Target->getLongFractAlign();
+ break;
case BuiltinType::Float16:
case BuiltinType::Half:
Width = Target->getHalfWidth();
@@ -1848,7 +1922,7 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
Align = Target->getPointerAlign(AS);
break;
case Type::MemberPointer: {
- const MemberPointerType *MPT = cast<MemberPointerType>(T);
+ const auto *MPT = cast<MemberPointerType>(T);
CXXABI::MemberPointerInfo MPI = ABI->getMemberPointerInfo(MPT);
Width = MPI.Width;
Align = MPI.Align;
@@ -1868,7 +1942,7 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
case Type::Decayed:
return getTypeInfo(cast<AdjustedType>(T)->getAdjustedType().getTypePtr());
case Type::ObjCInterface: {
- const ObjCInterfaceType *ObjCI = cast<ObjCInterfaceType>(T);
+ const auto *ObjCI = cast<ObjCInterfaceType>(T);
const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(ObjCI->getDecl());
Width = toBits(Layout.getSize());
Align = toBits(Layout.getAlignment());
@@ -1876,7 +1950,7 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
}
case Type::Record:
case Type::Enum: {
- const TagType *TT = cast<TagType>(T);
+ const auto *TT = cast<TagType>(T);
if (TT->getDecl()->isInvalidDecl()) {
Width = 8;
@@ -1884,7 +1958,7 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
break;
}
- if (const EnumType *ET = dyn_cast<EnumType>(TT)) {
+ if (const auto *ET = dyn_cast<EnumType>(TT)) {
const EnumDecl *ED = ET->getDecl();
TypeInfo Info =
getTypeInfo(ED->getIntegerType()->getUnqualifiedDesugaredType());
@@ -1895,7 +1969,7 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
return Info;
}
- const RecordType *RT = cast<RecordType>(TT);
+ const auto *RT = cast<RecordType>(TT);
const RecordDecl *RD = RT->getDecl();
const ASTRecordLayout &Layout = getASTRecordLayout(RD);
Width = toBits(Layout.getSize());
@@ -1910,7 +1984,7 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
case Type::Auto:
case Type::DeducedTemplateSpecialization: {
- const DeducedType *A = cast<DeducedType>(T);
+ const auto *A = cast<DeducedType>(T);
assert(!A->getDeducedType().isNull() &&
"cannot request the size of an undeduced or dependent auto type");
return getTypeInfo(A->getDeducedType().getTypePtr());
@@ -1952,10 +2026,16 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
Width = Info.Width;
Align = Info.Align;
- // If the size of the type doesn't exceed the platform's max
- // atomic promotion width, make the size and alignment more
- // favorable to atomic operations:
- if (Width != 0 && Width <= Target->getMaxAtomicPromoteWidth()) {
+ if (!Width) {
+ // An otherwise zero-sized type should still generate an
+ // atomic operation.
+ Width = Target->getCharWidth();
+ assert(Align);
+ } else if (Width <= Target->getMaxAtomicPromoteWidth()) {
+ // If the size of the type doesn't exceed the platform's max
+ // atomic promotion width, make the size and alignment more
+ // favorable to atomic operations:
+
// Round the size up to a power of 2.
if (!llvm::isPowerOf2_64(Width))
Width = llvm::NextPowerOf2(Width);
@@ -2033,9 +2113,9 @@ unsigned ASTContext::getPreferredTypeAlign(const Type *T) const {
return ABIAlign;
// Double and long long should be naturally aligned if possible.
- if (const ComplexType *CT = T->getAs<ComplexType>())
+ if (const auto *CT = T->getAs<ComplexType>())
T = CT->getElementType().getTypePtr();
- if (const EnumType *ET = T->getAs<EnumType>())
+ if (const auto *ET = T->getAs<EnumType>())
T = ET->getDecl()->getIntegerType().getTypePtr();
if (T->isSpecificBuiltinType(BuiltinType::Double) ||
T->isSpecificBuiltinType(BuiltinType::LongLong) ||
@@ -2091,7 +2171,7 @@ void ASTContext::DeepCollectObjCIvars(const ObjCInterfaceDecl *OI,
for (const auto *I : OI->ivars())
Ivars.push_back(I);
} else {
- ObjCInterfaceDecl *IDecl = const_cast<ObjCInterfaceDecl *>(OI);
+ auto *IDecl = const_cast<ObjCInterfaceDecl *>(OI);
for (const ObjCIvarDecl *Iv = IDecl->all_declared_ivar_begin(); Iv;
Iv= Iv->getNextIvar())
Ivars.push_back(Iv);
@@ -2102,7 +2182,7 @@ void ASTContext::DeepCollectObjCIvars(const ObjCInterfaceDecl *OI,
/// those inherited by it.
void ASTContext::CollectInheritedProtocols(const Decl *CDecl,
llvm::SmallPtrSet<ObjCProtocolDecl*, 8> &Protocols) {
- if (const ObjCInterfaceDecl *OI = dyn_cast<ObjCInterfaceDecl>(CDecl)) {
+ if (const auto *OI = dyn_cast<ObjCInterfaceDecl>(CDecl)) {
// We can use protocol_iterator here instead of
// all_referenced_protocol_iterator since we are walking all categories.
for (auto *Proto : OI->all_referenced_protocols()) {
@@ -2118,11 +2198,11 @@ void ASTContext::CollectInheritedProtocols(const Decl *CDecl,
CollectInheritedProtocols(SD, Protocols);
SD = SD->getSuperClass();
}
- } else if (const ObjCCategoryDecl *OC = dyn_cast<ObjCCategoryDecl>(CDecl)) {
+ } else if (const auto *OC = dyn_cast<ObjCCategoryDecl>(CDecl)) {
for (auto *Proto : OC->protocols()) {
CollectInheritedProtocols(Proto, Protocols);
}
- } else if (const ObjCProtocolDecl *OP = dyn_cast<ObjCProtocolDecl>(CDecl)) {
+ } else if (const auto *OP = dyn_cast<ObjCProtocolDecl>(CDecl)) {
// Insert the protocol.
if (!Protocols.insert(
const_cast<ObjCProtocolDecl *>(OP->getCanonicalDecl())).second)
@@ -2145,7 +2225,7 @@ static bool unionHasUniqueObjectRepresentations(const ASTContext &Context,
if (FieldSize != UnionSize)
return false;
}
- return true;
+ return !RD->field_empty();
}
static bool isStructEmpty(QualType Ty) {
@@ -2184,7 +2264,7 @@ structHasUniqueObjectRepresentations(const ASTContext &Context,
}
}
- std::sort(
+ llvm::sort(
Bases.begin(), Bases.end(), [&](const std::pair<QualType, int64_t> &L,
const std::pair<QualType, int64_t> &R) {
return Layout.getBaseClassOffset(L.first->getAsCXXRecordDecl()) <
@@ -2264,7 +2344,7 @@ bool ASTContext::hasUniqueObjectRepresentations(QualType Ty) const {
return true;
if (Ty->isMemberPointerType()) {
- const MemberPointerType *MPT = Ty->getAs<MemberPointerType>();
+ const auto *MPT = Ty->getAs<MemberPointerType>();
return !ABI->getMemberPointerInfo(MPT).HasPadding;
}
@@ -2330,7 +2410,7 @@ bool ASTContext::isSentinelNullExpr(const Expr *E) {
return false;
}
-/// \brief Get the implementation of ObjCInterfaceDecl, or nullptr if none
+/// Get the implementation of ObjCInterfaceDecl, or nullptr if none
/// exists.
ObjCImplementationDecl *ASTContext::getObjCImplementation(ObjCInterfaceDecl *D) {
llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator
@@ -2340,7 +2420,7 @@ ObjCImplementationDecl *ASTContext::getObjCImplementation(ObjCInterfaceDecl *D)
return nullptr;
}
-/// \brief Get the implementation of ObjCCategoryDecl, or nullptr if none
+/// Get the implementation of ObjCCategoryDecl, or nullptr if none
/// exists.
ObjCCategoryImplDecl *ASTContext::getObjCImplementation(ObjCCategoryDecl *D) {
llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator
@@ -2350,14 +2430,14 @@ ObjCCategoryImplDecl *ASTContext::getObjCImplementation(ObjCCategoryDecl *D) {
return nullptr;
}
-/// \brief Set the implementation of ObjCInterfaceDecl.
+/// Set the implementation of ObjCInterfaceDecl.
void ASTContext::setObjCImplementation(ObjCInterfaceDecl *IFaceD,
ObjCImplementationDecl *ImplD) {
assert(IFaceD && ImplD && "Passed null params");
ObjCImpls[IFaceD] = ImplD;
}
-/// \brief Set the implementation of ObjCCategoryDecl.
+/// Set the implementation of ObjCCategoryDecl.
void ASTContext::setObjCImplementation(ObjCCategoryDecl *CatD,
ObjCCategoryImplDecl *ImplD) {
assert(CatD && ImplD && "Passed null params");
@@ -2377,20 +2457,17 @@ void ASTContext::setObjCMethodRedeclaration(const ObjCMethodDecl *MD,
const ObjCInterfaceDecl *ASTContext::getObjContainingInterface(
const NamedDecl *ND) const {
- if (const ObjCInterfaceDecl *ID =
- dyn_cast<ObjCInterfaceDecl>(ND->getDeclContext()))
+ if (const auto *ID = dyn_cast<ObjCInterfaceDecl>(ND->getDeclContext()))
return ID;
- if (const ObjCCategoryDecl *CD =
- dyn_cast<ObjCCategoryDecl>(ND->getDeclContext()))
+ if (const auto *CD = dyn_cast<ObjCCategoryDecl>(ND->getDeclContext()))
return CD->getClassInterface();
- if (const ObjCImplDecl *IMD =
- dyn_cast<ObjCImplDecl>(ND->getDeclContext()))
+ if (const auto *IMD = dyn_cast<ObjCImplDecl>(ND->getDeclContext()))
return IMD->getClassInterface();
return nullptr;
}
-/// \brief Get the copy initialization expression of VarDecl, or nullptr if
+/// Get the copy initialization expression of VarDecl, or nullptr if
/// none exists.
Expr *ASTContext::getBlockVarCopyInits(const VarDecl*VD) {
assert(VD && "Passed null params");
@@ -2398,10 +2475,10 @@ Expr *ASTContext::getBlockVarCopyInits(const VarDecl*VD) {
"getBlockVarCopyInits - not __block var");
llvm::DenseMap<const VarDecl*, Expr*>::iterator
I = BlockVarCopyInits.find(VD);
- return (I != BlockVarCopyInits.end()) ? cast<Expr>(I->second) : nullptr;
+ return (I != BlockVarCopyInits.end()) ? I->second : nullptr;
}
-/// \brief Set the copy inialization expression of a block var decl.
+/// Set the copy inialization expression of a block var decl.
void ASTContext::setBlockVarCopyInits(VarDecl*VD, Expr* Init) {
assert(VD && Init && "Passed null params");
assert(VD->hasAttr<BlocksAttr>() &&
@@ -2417,7 +2494,7 @@ TypeSourceInfo *ASTContext::CreateTypeSourceInfo(QualType T,
assert(DataSize == TypeLoc::getFullDataSizeForType(T) &&
"incorrect data size provided to CreateTypeSourceInfo!");
- TypeSourceInfo *TInfo =
+ auto *TInfo =
(TypeSourceInfo*)BumpAlloc.Allocate(sizeof(TypeSourceInfo) + DataSize, 8);
new (TInfo) TypeSourceInfo(T);
return TInfo;
@@ -2470,7 +2547,7 @@ ASTContext::getExtQualType(const Type *baseType, Qualifiers quals) const {
(void) ExtQualNodes.FindNodeOrInsertPos(ID, insertPos);
}
- ExtQuals *eq = new (*this, TypeAlignment) ExtQuals(baseType, canon, quals);
+ auto *eq = new (*this, TypeAlignment) ExtQuals(baseType, canon, quals);
ExtQualNodes.InsertNode(eq, insertPos);
return QualType(eq, fastQuals);
}
@@ -2522,7 +2599,7 @@ QualType ASTContext::getObjCGCQualType(QualType T,
if (CanT.getObjCGCAttr() == GCAttr)
return T;
- if (const PointerType *ptr = T->getAs<PointerType>()) {
+ if (const auto *ptr = T->getAs<PointerType>()) {
QualType Pointee = ptr->getPointeeType();
if (Pointee->isAnyPointerType()) {
QualType ResultType = getObjCGCQualType(Pointee, GCAttr);
@@ -2550,10 +2627,10 @@ const FunctionType *ASTContext::adjustFunctionType(const FunctionType *T,
return T;
QualType Result;
- if (const FunctionNoProtoType *FNPT = dyn_cast<FunctionNoProtoType>(T)) {
+ if (const auto *FNPT = dyn_cast<FunctionNoProtoType>(T)) {
Result = getFunctionNoProtoType(FNPT->getReturnType(), Info);
} else {
- const FunctionProtoType *FPT = cast<FunctionProtoType>(T);
+ const auto *FPT = cast<FunctionProtoType>(T);
FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
EPI.ExtInfo = Info;
Result = getFunctionType(FPT->getReturnType(), FPT->getParamTypes(), EPI);
@@ -2566,7 +2643,7 @@ void ASTContext::adjustDeducedFunctionResultType(FunctionDecl *FD,
QualType ResultType) {
FD = FD->getMostRecentDecl();
while (true) {
- const FunctionProtoType *FPT = FD->getType()->castAs<FunctionProtoType>();
+ const auto *FPT = FD->getType()->castAs<FunctionProtoType>();
FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
FD->setType(getFunctionType(ResultType, FPT->getParamTypes(), EPI));
if (FunctionDecl *Next = FD->getPreviousDecl())
@@ -2582,26 +2659,24 @@ void ASTContext::adjustDeducedFunctionResultType(FunctionDecl *FD,
/// specified exception specification. Type sugar that can be present on a
/// declaration of a function with an exception specification is permitted
/// and preserved. Other type sugar (for instance, typedefs) is not.
-static QualType getFunctionTypeWithExceptionSpec(
- ASTContext &Context, QualType Orig,
- const FunctionProtoType::ExceptionSpecInfo &ESI) {
+QualType ASTContext::getFunctionTypeWithExceptionSpec(
+ QualType Orig, const FunctionProtoType::ExceptionSpecInfo &ESI) {
// Might have some parens.
- if (auto *PT = dyn_cast<ParenType>(Orig))
- return Context.getParenType(
- getFunctionTypeWithExceptionSpec(Context, PT->getInnerType(), ESI));
+ if (const auto *PT = dyn_cast<ParenType>(Orig))
+ return getParenType(
+ getFunctionTypeWithExceptionSpec(PT->getInnerType(), ESI));
// Might have a calling-convention attribute.
- if (auto *AT = dyn_cast<AttributedType>(Orig))
- return Context.getAttributedType(
+ if (const auto *AT = dyn_cast<AttributedType>(Orig))
+ return getAttributedType(
AT->getAttrKind(),
- getFunctionTypeWithExceptionSpec(Context, AT->getModifiedType(), ESI),
- getFunctionTypeWithExceptionSpec(Context, AT->getEquivalentType(),
- ESI));
+ getFunctionTypeWithExceptionSpec(AT->getModifiedType(), ESI),
+ getFunctionTypeWithExceptionSpec(AT->getEquivalentType(), ESI));
// Anything else must be a function type. Rebuild it with the new exception
// specification.
- const FunctionProtoType *Proto = cast<FunctionProtoType>(Orig);
- return Context.getFunctionType(
+ const auto *Proto = cast<FunctionProtoType>(Orig);
+ return getFunctionType(
Proto->getReturnType(), Proto->getParamTypes(),
Proto->getExtProtoInfo().withExceptionSpec(ESI));
}
@@ -2610,8 +2685,8 @@ bool ASTContext::hasSameFunctionTypeIgnoringExceptionSpec(QualType T,
QualType U) {
return hasSameType(T, U) ||
(getLangOpts().CPlusPlus17 &&
- hasSameType(getFunctionTypeWithExceptionSpec(*this, T, EST_None),
- getFunctionTypeWithExceptionSpec(*this, U, EST_None)));
+ hasSameType(getFunctionTypeWithExceptionSpec(T, EST_None),
+ getFunctionTypeWithExceptionSpec(U, EST_None)));
}
void ASTContext::adjustExceptionSpec(
@@ -2619,7 +2694,7 @@ void ASTContext::adjustExceptionSpec(
bool AsWritten) {
// Update the type.
QualType Updated =
- getFunctionTypeWithExceptionSpec(*this, FD->getType(), ESI);
+ getFunctionTypeWithExceptionSpec(FD->getType(), ESI);
FD->setType(Updated);
if (!AsWritten)
@@ -2630,7 +2705,7 @@ void ASTContext::adjustExceptionSpec(
// If the type and the type-as-written differ, we may need to update
// the type-as-written too.
if (TSInfo->getType() != FD->getType())
- Updated = getFunctionTypeWithExceptionSpec(*this, TSInfo->getType(), ESI);
+ Updated = getFunctionTypeWithExceptionSpec(TSInfo->getType(), ESI);
// FIXME: When we get proper type location information for exceptions,
// we'll also have to rebuild the TypeSourceInfo. For now, we just patch
@@ -2664,7 +2739,7 @@ QualType ASTContext::getComplexType(QualType T) const {
ComplexType *NewIP = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos);
assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
}
- ComplexType *New = new (*this, TypeAlignment) ComplexType(T, Canonical);
+ auto *New = new (*this, TypeAlignment) ComplexType(T, Canonical);
Types.push_back(New);
ComplexTypes.InsertNode(New, InsertPos);
return QualType(New, 0);
@@ -2692,7 +2767,7 @@ QualType ASTContext::getPointerType(QualType T) const {
PointerType *NewIP = PointerTypes.FindNodeOrInsertPos(ID, InsertPos);
assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
}
- PointerType *New = new (*this, TypeAlignment) PointerType(T, Canonical);
+ auto *New = new (*this, TypeAlignment) PointerType(T, Canonical);
Types.push_back(New);
PointerTypes.InsertNode(New, InsertPos);
return QualType(New, 0);
@@ -2783,8 +2858,7 @@ QualType ASTContext::getBlockPointerType(QualType T) const {
BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos);
assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
}
- BlockPointerType *New
- = new (*this, TypeAlignment) BlockPointerType(T, Canonical);
+ auto *New = new (*this, TypeAlignment) BlockPointerType(T, Canonical);
Types.push_back(New);
BlockPointerTypes.InsertNode(New, InsertPos);
return QualType(New, 0);
@@ -2807,7 +2881,7 @@ ASTContext::getLValueReferenceType(QualType T, bool SpelledAsLValue) const {
LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos))
return QualType(RT, 0);
- const ReferenceType *InnerRef = T->getAs<ReferenceType>();
+ const auto *InnerRef = T->getAs<ReferenceType>();
// If the referencee type isn't canonical, this won't be a canonical type
// either, so fill in the canonical type field.
@@ -2822,9 +2896,8 @@ ASTContext::getLValueReferenceType(QualType T, bool SpelledAsLValue) const {
assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
}
- LValueReferenceType *New
- = new (*this, TypeAlignment) LValueReferenceType(T, Canonical,
- SpelledAsLValue);
+ auto *New = new (*this, TypeAlignment) LValueReferenceType(T, Canonical,
+ SpelledAsLValue);
Types.push_back(New);
LValueReferenceTypes.InsertNode(New, InsertPos);
@@ -2844,7 +2917,7 @@ QualType ASTContext::getRValueReferenceType(QualType T) const {
RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos))
return QualType(RT, 0);
- const ReferenceType *InnerRef = T->getAs<ReferenceType>();
+ const auto *InnerRef = T->getAs<ReferenceType>();
// If the referencee type isn't canonical, this won't be a canonical type
// either, so fill in the canonical type field.
@@ -2859,8 +2932,7 @@ QualType ASTContext::getRValueReferenceType(QualType T) const {
assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
}
- RValueReferenceType *New
- = new (*this, TypeAlignment) RValueReferenceType(T, Canonical);
+ auto *New = new (*this, TypeAlignment) RValueReferenceType(T, Canonical);
Types.push_back(New);
RValueReferenceTypes.InsertNode(New, InsertPos);
return QualType(New, 0);
@@ -2890,8 +2962,7 @@ QualType ASTContext::getMemberPointerType(QualType T, const Type *Cls) const {
MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos);
assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
}
- MemberPointerType *New
- = new (*this, TypeAlignment) MemberPointerType(T, Cls, Canonical);
+ auto *New = new (*this, TypeAlignment) MemberPointerType(T, Cls, Canonical);
Types.push_back(New);
MemberPointerTypes.InsertNode(New, InsertPos);
return QualType(New, 0);
@@ -2935,7 +3006,7 @@ QualType ASTContext::getConstantArrayType(QualType EltTy,
assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
}
- ConstantArrayType *New = new(*this,TypeAlignment)
+ auto *New = new (*this,TypeAlignment)
ConstantArrayType(EltTy, Canon, ArySize, ASM, IndexTypeQuals);
ConstantArrayTypes.InsertNode(New, InsertPos);
Types.push_back(New);
@@ -2964,6 +3035,7 @@ QualType ASTContext::getVariableArrayDecayedType(QualType type) const {
case Type::Builtin:
case Type::Complex:
case Type::Vector:
+ case Type::DependentVector:
case Type::ExtVector:
case Type::DependentSizedExtVector:
case Type::DependentAddressSpace:
@@ -3007,7 +3079,7 @@ QualType ASTContext::getVariableArrayDecayedType(QualType type) const {
break;
case Type::LValueReference: {
- const LValueReferenceType *lv = cast<LValueReferenceType>(ty);
+ const auto *lv = cast<LValueReferenceType>(ty);
result = getLValueReferenceType(
getVariableArrayDecayedType(lv->getPointeeType()),
lv->isSpelledAsLValue());
@@ -3015,20 +3087,20 @@ QualType ASTContext::getVariableArrayDecayedType(QualType type) const {
}
case Type::RValueReference: {
- const RValueReferenceType *lv = cast<RValueReferenceType>(ty);
+ const auto *lv = cast<RValueReferenceType>(ty);
result = getRValueReferenceType(
getVariableArrayDecayedType(lv->getPointeeType()));
break;
}
case Type::Atomic: {
- const AtomicType *at = cast<AtomicType>(ty);
+ const auto *at = cast<AtomicType>(ty);
result = getAtomicType(getVariableArrayDecayedType(at->getValueType()));
break;
}
case Type::ConstantArray: {
- const ConstantArrayType *cat = cast<ConstantArrayType>(ty);
+ const auto *cat = cast<ConstantArrayType>(ty);
result = getConstantArrayType(
getVariableArrayDecayedType(cat->getElementType()),
cat->getSize(),
@@ -3038,7 +3110,7 @@ QualType ASTContext::getVariableArrayDecayedType(QualType type) const {
}
case Type::DependentSizedArray: {
- const DependentSizedArrayType *dat = cast<DependentSizedArrayType>(ty);
+ const auto *dat = cast<DependentSizedArrayType>(ty);
result = getDependentSizedArrayType(
getVariableArrayDecayedType(dat->getElementType()),
dat->getSizeExpr(),
@@ -3050,7 +3122,7 @@ QualType ASTContext::getVariableArrayDecayedType(QualType type) const {
// Turn incomplete types into [*] types.
case Type::IncompleteArray: {
- const IncompleteArrayType *iat = cast<IncompleteArrayType>(ty);
+ const auto *iat = cast<IncompleteArrayType>(ty);
result = getVariableArrayType(
getVariableArrayDecayedType(iat->getElementType()),
/*size*/ nullptr,
@@ -3062,7 +3134,7 @@ QualType ASTContext::getVariableArrayDecayedType(QualType type) const {
// Turn VLA types into [*] types.
case Type::VariableArray: {
- const VariableArrayType *vat = cast<VariableArrayType>(ty);
+ const auto *vat = cast<VariableArrayType>(ty);
result = getVariableArrayType(
getVariableArrayDecayedType(vat->getElementType()),
/*size*/ nullptr,
@@ -3096,7 +3168,7 @@ QualType ASTContext::getVariableArrayType(QualType EltTy,
Canon = getQualifiedType(Canon, canonSplit.Quals);
}
- VariableArrayType *New = new(*this, TypeAlignment)
+ auto *New = new (*this, TypeAlignment)
VariableArrayType(EltTy, Canon, NumElts, ASM, IndexTypeQuals, Brackets);
VariableArrayTypes.push_back(New);
@@ -3121,7 +3193,7 @@ QualType ASTContext::getDependentSizedArrayType(QualType elementType,
// initializer. We do no canonicalization here at all, which is okay
// because they can't be used in most locations.
if (!numElements) {
- DependentSizedArrayType *newType
+ auto *newType
= new (*this, TypeAlignment)
DependentSizedArrayType(*this, elementType, QualType(),
numElements, ASM, elementTypeQuals,
@@ -3167,7 +3239,7 @@ QualType ASTContext::getDependentSizedArrayType(QualType elementType,
// Otherwise, we need to build a type which follows the spelling
// of the element type.
- DependentSizedArrayType *sugaredType
+ auto *sugaredType
= new (*this, TypeAlignment)
DependentSizedArrayType(*this, elementType, canon, numElements,
ASM, elementTypeQuals, brackets);
@@ -3203,7 +3275,7 @@ QualType ASTContext::getIncompleteArrayType(QualType elementType,
assert(!existing && "Shouldn't be in the map!"); (void) existing;
}
- IncompleteArrayType *newType = new (*this, TypeAlignment)
+ auto *newType = new (*this, TypeAlignment)
IncompleteArrayType(elementType, canon, ASM, elementTypeQuals);
IncompleteArrayTypes.InsertNode(newType, insertPos);
@@ -3235,13 +3307,52 @@ QualType ASTContext::getVectorType(QualType vecType, unsigned NumElts,
VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos);
assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
}
- VectorType *New = new (*this, TypeAlignment)
+ auto *New = new (*this, TypeAlignment)
VectorType(vecType, NumElts, Canonical, VecKind);
VectorTypes.InsertNode(New, InsertPos);
Types.push_back(New);
return QualType(New, 0);
}
+QualType
+ASTContext::getDependentVectorType(QualType VecType, Expr *SizeExpr,
+ SourceLocation AttrLoc,
+ VectorType::VectorKind VecKind) const {
+ llvm::FoldingSetNodeID ID;
+ DependentVectorType::Profile(ID, *this, getCanonicalType(VecType), SizeExpr,
+ VecKind);
+ void *InsertPos = nullptr;
+ DependentVectorType *Canon =
+ DependentVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
+ DependentVectorType *New;
+
+ if (Canon) {
+ New = new (*this, TypeAlignment) DependentVectorType(
+ *this, VecType, QualType(Canon, 0), SizeExpr, AttrLoc, VecKind);
+ } else {
+ QualType CanonVecTy = getCanonicalType(VecType);
+ if (CanonVecTy == VecType) {
+ New = new (*this, TypeAlignment) DependentVectorType(
+ *this, VecType, QualType(), SizeExpr, AttrLoc, VecKind);
+
+ DependentVectorType *CanonCheck =
+ DependentVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
+ assert(!CanonCheck &&
+ "Dependent-sized vector_size canonical type broken");
+ (void)CanonCheck;
+ DependentVectorTypes.InsertNode(New, InsertPos);
+ } else {
+ QualType Canon = getDependentSizedExtVectorType(CanonVecTy, SizeExpr,
+ SourceLocation());
+ New = new (*this, TypeAlignment) DependentVectorType(
+ *this, VecType, Canon, SizeExpr, AttrLoc, VecKind);
+ }
+ }
+
+ Types.push_back(New);
+ return QualType(New, 0);
+}
+
/// getExtVectorType - Return the unique reference to an extended vector type of
/// the specified element type and size. VectorType must be a built-in type.
QualType
@@ -3266,7 +3377,7 @@ ASTContext::getExtVectorType(QualType vecType, unsigned NumElts) const {
VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos);
assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
}
- ExtVectorType *New = new (*this, TypeAlignment)
+ auto *New = new (*this, TypeAlignment)
ExtVectorType(vecType, NumElts, Canonical);
VectorTypes.InsertNode(New, InsertPos);
Types.push_back(New);
@@ -3342,7 +3453,7 @@ QualType ASTContext::getDependentAddressSpaceType(QualType PointeeType,
canonTy->getAddrSpaceExpr() == AddrSpaceExpr)
return QualType(canonTy, 0);
- DependentAddressSpaceType *sugaredType
+ auto *sugaredType
= new (*this, TypeAlignment)
DependentAddressSpaceType(*this, PointeeType, QualType(canonTy, 0),
AddrSpaceExpr, AttrLoc);
@@ -3350,7 +3461,7 @@ QualType ASTContext::getDependentAddressSpaceType(QualType PointeeType,
return QualType(sugaredType, 0);
}
-/// \brief Determine whether \p T is canonical as the result type of a function.
+/// Determine whether \p T is canonical as the result type of a function.
static bool isCanonicalResultType(QualType T) {
return T.isCanonical() &&
(T.getObjCLifetime() == Qualifiers::OCL_None ||
@@ -3382,7 +3493,7 @@ ASTContext::getFunctionNoProtoType(QualType ResultTy,
assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
}
- FunctionNoProtoType *New = new (*this, TypeAlignment)
+ auto *New = new (*this, TypeAlignment)
FunctionNoProtoType(ResultTy, Canonical, Info);
Types.push_back(New);
FunctionNoProtoTypes.InsertNode(New, InsertPos);
@@ -3416,6 +3527,11 @@ static bool isCanonicalExceptionSpecification(
if (ESI.Type == EST_BasicNoexcept)
return true;
+ // A noexcept(expr) specification is (possibly) canonical if expr is
+ // value-dependent.
+ if (ESI.Type == EST_DependentNoexcept)
+ return true;
+
// A dynamic exception specification is canonical if it only contains pack
// expansions (so we can't tell whether it's non-throwing) and all its
// contained types are canonical.
@@ -3430,11 +3546,6 @@ static bool isCanonicalExceptionSpecification(
return AnyPackExpansions;
}
- // A noexcept(expr) specification is (possibly) canonical if expr is
- // value-dependent.
- if (ESI.Type == EST_ComputedNoexcept)
- return ESI.NoexceptExpr && ESI.NoexceptExpr->isValueDependent();
-
return false;
}
@@ -3462,7 +3573,7 @@ QualType ASTContext::getFunctionTypeInternal(
// noexcept expression, or we're just looking for a canonical type.
// Otherwise, we're going to need to create a type
// sugar node to hold the concrete expression.
- if (OnlyWantCanonical || EPI.ExceptionSpec.Type != EST_ComputedNoexcept ||
+ if (OnlyWantCanonical || !isComputedNoexcept(EPI.ExceptionSpec.Type) ||
EPI.ExceptionSpec.NoexceptExpr == FPT->getNoexceptExpr())
return Existing;
@@ -3509,7 +3620,7 @@ QualType ASTContext::getFunctionTypeInternal(
// We don't know yet. It shouldn't matter what we pick here; no-one
// should ever look at this.
LLVM_FALLTHROUGH;
- case EST_None: case EST_MSAny:
+ case EST_None: case EST_MSAny: case EST_NoexceptFalse:
CanonicalEPI.ExceptionSpec.Type = EST_None;
break;
@@ -3531,24 +3642,12 @@ QualType ASTContext::getFunctionTypeInternal(
break;
}
- case EST_DynamicNone: case EST_BasicNoexcept:
+ case EST_DynamicNone: case EST_BasicNoexcept: case EST_NoexceptTrue:
CanonicalEPI.ExceptionSpec.Type = EST_BasicNoexcept;
break;
- case EST_ComputedNoexcept:
- llvm::APSInt Value(1);
- auto *E = CanonicalEPI.ExceptionSpec.NoexceptExpr;
- if (!E || !E->isIntegerConstantExpr(Value, *this, nullptr,
- /*IsEvaluated*/false)) {
- // This noexcept specification is invalid.
- // FIXME: Should this be able to happen?
- CanonicalEPI.ExceptionSpec.Type = EST_None;
- break;
- }
-
- CanonicalEPI.ExceptionSpec.Type =
- Value.getBoolValue() ? EST_BasicNoexcept : EST_None;
- break;
+ case EST_DependentNoexcept:
+ llvm_unreachable("dependent noexcept is already canonical");
}
} else {
CanonicalEPI.ExceptionSpec = FunctionProtoType::ExceptionSpecInfo();
@@ -3573,18 +3672,10 @@ QualType ASTContext::getFunctionTypeInternal(
// Instead of the exception types, there could be a noexcept
// expression, or information used to resolve the exception
// specification.
- size_t Size = sizeof(FunctionProtoType) +
- NumArgs * sizeof(QualType);
-
- if (EPI.ExceptionSpec.Type == EST_Dynamic) {
- Size += EPI.ExceptionSpec.Exceptions.size() * sizeof(QualType);
- } else if (EPI.ExceptionSpec.Type == EST_ComputedNoexcept) {
- Size += sizeof(Expr*);
- } else if (EPI.ExceptionSpec.Type == EST_Uninstantiated) {
- Size += 2 * sizeof(FunctionDecl*);
- } else if (EPI.ExceptionSpec.Type == EST_Unevaluated) {
- Size += sizeof(FunctionDecl*);
- }
+ size_t Size =
+ sizeof(FunctionProtoType) + NumArgs * sizeof(QualType) +
+ FunctionProtoType::getExceptionSpecSize(
+ EPI.ExceptionSpec.Type, EPI.ExceptionSpec.Exceptions.size());
// Put the ExtParameterInfos last. If all were equal, it would make
// more sense to put these before the exception specification, because
@@ -3596,7 +3687,7 @@ QualType ASTContext::getFunctionTypeInternal(
Size += NumArgs * sizeof(FunctionProtoType::ExtParameterInfo);
}
- FunctionProtoType *FTP = (FunctionProtoType*) Allocate(Size, TypeAlignment);
+ auto *FTP = (FunctionProtoType *) Allocate(Size, TypeAlignment);
FunctionProtoType::ExtProtoInfo newEPI = EPI;
new (FTP) FunctionProtoType(ResultTy, ArgArray, Canonical, newEPI);
Types.push_back(FTP);
@@ -3624,12 +3715,18 @@ QualType ASTContext::getPipeType(QualType T, bool ReadOnly) const {
assert(!NewIP && "Shouldn't be in the map!");
(void)NewIP;
}
- PipeType *New = new (*this, TypeAlignment) PipeType(T, Canonical, ReadOnly);
+ auto *New = new (*this, TypeAlignment) PipeType(T, Canonical, ReadOnly);
Types.push_back(New);
PipeTypes.InsertNode(New, InsertPos);
return QualType(New, 0);
}
+QualType ASTContext::adjustStringLiteralBaseType(QualType Ty) const {
+ // OpenCL v1.1 s6.5.3: a string literal is in the constant address space.
+ return LangOpts.OpenCL ? getAddrSpaceQualType(Ty, LangAS::opencl_constant)
+ : Ty;
+}
+
QualType ASTContext::getReadPipeType(QualType T) const {
return getPipeType(T, true);
}
@@ -3641,7 +3738,7 @@ QualType ASTContext::getWritePipeType(QualType T) const {
#ifndef NDEBUG
static bool NeedsInjectedClassNameType(const RecordDecl *D) {
if (!isa<CXXRecordDecl>(D)) return false;
- const CXXRecordDecl *RD = cast<CXXRecordDecl>(D);
+ const auto *RD = cast<CXXRecordDecl>(D);
if (isa<ClassTemplatePartialSpecializationDecl>(RD))
return true;
if (RD->getDescribedClassTemplate() &&
@@ -3677,21 +3774,20 @@ QualType ASTContext::getTypeDeclTypeSlow(const TypeDecl *Decl) const {
assert(Decl && "Passed null for Decl param");
assert(!Decl->TypeForDecl && "TypeForDecl present in slow case");
- if (const TypedefNameDecl *Typedef = dyn_cast<TypedefNameDecl>(Decl))
+ if (const auto *Typedef = dyn_cast<TypedefNameDecl>(Decl))
return getTypedefType(Typedef);
assert(!isa<TemplateTypeParmDecl>(Decl) &&
"Template type parameter types are always available.");
- if (const RecordDecl *Record = dyn_cast<RecordDecl>(Decl)) {
+ if (const auto *Record = dyn_cast<RecordDecl>(Decl)) {
assert(Record->isFirstDecl() && "struct/union has previous declaration");
assert(!NeedsInjectedClassNameType(Record));
return getRecordType(Record);
- } else if (const EnumDecl *Enum = dyn_cast<EnumDecl>(Decl)) {
+ } else if (const auto *Enum = dyn_cast<EnumDecl>(Decl)) {
assert(Enum->isFirstDecl() && "enum has previous declaration");
return getEnumType(Enum);
- } else if (const UnresolvedUsingTypenameDecl *Using =
- dyn_cast<UnresolvedUsingTypenameDecl>(Decl)) {
+ } else if (const auto *Using = dyn_cast<UnresolvedUsingTypenameDecl>(Decl)) {
Type *newType = new (*this, TypeAlignment) UnresolvedUsingType(Using);
Decl->TypeForDecl = newType;
Types.push_back(newType);
@@ -3710,7 +3806,7 @@ ASTContext::getTypedefType(const TypedefNameDecl *Decl,
if (Canonical.isNull())
Canonical = getCanonicalType(Decl->getUnderlyingType());
- TypedefType *newType = new(*this, TypeAlignment)
+ auto *newType = new (*this, TypeAlignment)
TypedefType(Type::Typedef, Decl, Canonical);
Decl->TypeForDecl = newType;
Types.push_back(newType);
@@ -3724,7 +3820,7 @@ QualType ASTContext::getRecordType(const RecordDecl *Decl) const {
if (PrevDecl->TypeForDecl)
return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0);
- RecordType *newType = new (*this, TypeAlignment) RecordType(Decl);
+ auto *newType = new (*this, TypeAlignment) RecordType(Decl);
Decl->TypeForDecl = newType;
Types.push_back(newType);
return QualType(newType, 0);
@@ -3737,7 +3833,7 @@ QualType ASTContext::getEnumType(const EnumDecl *Decl) const {
if (PrevDecl->TypeForDecl)
return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0);
- EnumType *newType = new (*this, TypeAlignment) EnumType(Decl);
+ auto *newType = new (*this, TypeAlignment) EnumType(Decl);
Decl->TypeForDecl = newType;
Types.push_back(newType);
return QualType(newType, 0);
@@ -3763,7 +3859,7 @@ QualType ASTContext::getAttributedType(AttributedType::Kind attrKind,
return QualType(type, 0);
}
-/// \brief Retrieve a substitution-result type.
+/// Retrieve a substitution-result type.
QualType
ASTContext::getSubstTemplateTypeParmType(const TemplateTypeParmType *Parm,
QualType Replacement) const {
@@ -3786,7 +3882,7 @@ ASTContext::getSubstTemplateTypeParmType(const TemplateTypeParmType *Parm,
return QualType(SubstParm, 0);
}
-/// \brief Retrieve a
+/// Retrieve a
QualType ASTContext::getSubstTemplateTypeParmPackType(
const TemplateTypeParmType *Parm,
const TemplateArgument &ArgPack) {
@@ -3812,7 +3908,7 @@ QualType ASTContext::getSubstTemplateTypeParmPackType(
SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos);
}
- SubstTemplateTypeParmPackType *SubstParm
+ auto *SubstParm
= new (*this, TypeAlignment) SubstTemplateTypeParmPackType(Parm, Canon,
ArgPack);
Types.push_back(SubstParm);
@@ -3820,7 +3916,7 @@ QualType ASTContext::getSubstTemplateTypeParmPackType(
return QualType(SubstParm, 0);
}
-/// \brief Retrieve the template type parameter type for a template
+/// Retrieve the template type parameter type for a template
/// parameter or parameter pack with the given depth, index, and (optionally)
/// name.
QualType ASTContext::getTemplateTypeParmType(unsigned Depth, unsigned Index,
@@ -3931,7 +4027,7 @@ ASTContext::getTemplateSpecializationType(TemplateName Template,
sizeof(TemplateArgument) * Args.size() +
(IsTypeAlias? sizeof(QualType) : 0),
TypeAlignment);
- TemplateSpecializationType *Spec
+ auto *Spec
= new (Mem) TemplateSpecializationType(Template, Args, CanonType,
IsTypeAlias ? Underlying : QualType());
@@ -3983,12 +4079,12 @@ QualType ASTContext::getCanonicalTemplateSpecializationType(
return QualType(Spec, 0);
}
-QualType
-ASTContext::getElaboratedType(ElaboratedTypeKeyword Keyword,
- NestedNameSpecifier *NNS,
- QualType NamedType) const {
+QualType ASTContext::getElaboratedType(ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier *NNS,
+ QualType NamedType,
+ TagDecl *OwnedTagDecl) const {
llvm::FoldingSetNodeID ID;
- ElaboratedType::Profile(ID, Keyword, NNS, NamedType);
+ ElaboratedType::Profile(ID, Keyword, NNS, NamedType, OwnedTagDecl);
void *InsertPos = nullptr;
ElaboratedType *T = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos);
@@ -4003,7 +4099,8 @@ ASTContext::getElaboratedType(ElaboratedTypeKeyword Keyword,
(void)CheckT;
}
- T = new (*this, TypeAlignment) ElaboratedType(Keyword, NNS, NamedType, Canon);
+ T = new (*this, TypeAlignment)
+ ElaboratedType(Keyword, NNS, NamedType, Canon, OwnedTagDecl);
Types.push_back(T);
ElaboratedTypes.InsertNode(T, InsertPos);
return QualType(T, 0);
@@ -4126,7 +4223,7 @@ ASTContext::getDependentTemplateSpecializationType(
TemplateArgument ASTContext::getInjectedTemplateArg(NamedDecl *Param) {
TemplateArgument Arg;
- if (auto *TTP = dyn_cast<TemplateTypeParmDecl>(Param)) {
+ if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(Param)) {
QualType ArgType = getTypeDeclType(TTP);
if (TTP->isParameterPack())
ArgType = getPackExpansionType(ArgType, None);
@@ -4265,7 +4362,7 @@ QualType ASTContext::getObjCObjectType(
// type.
ArrayRef<QualType> effectiveTypeArgs = typeArgs;
if (effectiveTypeArgs.empty()) {
- if (auto baseObject = baseType->getAs<ObjCObjectType>())
+ if (const auto *baseObject = baseType->getAs<ObjCObjectType>())
effectiveTypeArgs = baseObject->getTypeArgs();
}
@@ -4313,7 +4410,7 @@ QualType ASTContext::getObjCObjectType(
size += typeArgs.size() * sizeof(QualType);
size += protocols.size() * sizeof(ObjCProtocolDecl *);
void *mem = Allocate(size, TypeAlignment);
- ObjCObjectTypeImpl *T =
+ auto *T =
new (mem) ObjCObjectTypeImpl(canonical, baseType, typeArgs, protocols,
isKindOf);
@@ -4331,15 +4428,14 @@ ASTContext::applyObjCProtocolQualifiers(QualType type,
bool allowOnPointerType) const {
hasError = false;
- if (const ObjCTypeParamType *objT =
- dyn_cast<ObjCTypeParamType>(type.getTypePtr())) {
+ if (const auto *objT = dyn_cast<ObjCTypeParamType>(type.getTypePtr())) {
return getObjCTypeParamType(objT->getDecl(), protocols);
}
// Apply protocol qualifiers to ObjCObjectPointerType.
if (allowOnPointerType) {
- if (const ObjCObjectPointerType *objPtr =
- dyn_cast<ObjCObjectPointerType>(type.getTypePtr())) {
+ if (const auto *objPtr =
+ dyn_cast<ObjCObjectPointerType>(type.getTypePtr())) {
const ObjCObjectType *objT = objPtr->getObjectType();
// Merge protocol lists and construct ObjCObjectType.
SmallVector<ObjCProtocolDecl*, 8> protocolsVec;
@@ -4357,7 +4453,7 @@ ASTContext::applyObjCProtocolQualifiers(QualType type,
}
// Apply protocol qualifiers to ObjCObjectType.
- if (const ObjCObjectType *objT = dyn_cast<ObjCObjectType>(type.getTypePtr())){
+ if (const auto *objT = dyn_cast<ObjCObjectType>(type.getTypePtr())){
// FIXME: Check for protocols to which the class type is already
// known to conform.
@@ -4379,7 +4475,7 @@ ASTContext::applyObjCProtocolQualifiers(QualType type,
// id<protocol-list>
if (type->isObjCIdType()) {
- const ObjCObjectPointerType *objPtr = type->castAs<ObjCObjectPointerType>();
+ const auto *objPtr = type->castAs<ObjCObjectPointerType>();
type = getObjCObjectType(ObjCBuiltinIdTy, {}, protocols,
objPtr->isKindOfType());
return getObjCObjectPointerType(type);
@@ -4387,7 +4483,7 @@ ASTContext::applyObjCProtocolQualifiers(QualType type,
// Class<protocol-list>
if (type->isObjCClassType()) {
- const ObjCObjectPointerType *objPtr = type->castAs<ObjCObjectPointerType>();
+ const auto *objPtr = type->castAs<ObjCObjectPointerType>();
type = getObjCObjectType(ObjCBuiltinClassTy, {}, protocols,
objPtr->isKindOfType());
return getObjCObjectPointerType(type);
@@ -4424,8 +4520,7 @@ ASTContext::getObjCTypeParamType(const ObjCTypeParamDecl *Decl,
unsigned size = sizeof(ObjCTypeParamType);
size += protocols.size() * sizeof(ObjCProtocolDecl *);
void *mem = Allocate(size, TypeAlignment);
- ObjCTypeParamType *newType = new (mem)
- ObjCTypeParamType(Decl, Canonical, protocols);
+ auto *newType = new (mem) ObjCTypeParamType(Decl, Canonical, protocols);
Types.push_back(newType);
ObjCTypeParamTypes.InsertNode(newType, InsertPos);
@@ -4440,7 +4535,7 @@ bool ASTContext::ObjCObjectAdoptsQTypeProtocols(QualType QT,
if (!QT->isObjCQualifiedIdType())
return false;
- if (const ObjCObjectPointerType *OPT = QT->getAs<ObjCObjectPointerType>()) {
+ if (const auto *OPT = QT->getAs<ObjCObjectPointerType>()) {
// If both the right and left sides have qualifiers.
for (auto *Proto : OPT->quals()) {
if (!IC->ClassImplementsProtocol(Proto, false))
@@ -4458,7 +4553,7 @@ bool ASTContext::QIdProtocolsAdoptObjCObjectProtocols(QualType QT,
ObjCInterfaceDecl *IDecl) {
if (!QT->isObjCQualifiedIdType())
return false;
- const ObjCObjectPointerType *OPT = QT->getAs<ObjCObjectPointerType>();
+ const auto *OPT = QT->getAs<ObjCObjectPointerType>();
if (!OPT)
return false;
if (!IDecl->hasDefinition())
@@ -4467,7 +4562,7 @@ bool ASTContext::QIdProtocolsAdoptObjCObjectProtocols(QualType QT,
CollectInheritedProtocols(IDecl, InheritedProtocols);
if (InheritedProtocols.empty())
return false;
- // Check that if every protocol in list of id<plist> conforms to a protcol
+ // Check that if every protocol in list of id<plist> conforms to a protocol
// of IDecl's, then bridge casting is ok.
bool Conforms = false;
for (auto *Proto : OPT->quals()) {
@@ -4520,7 +4615,7 @@ QualType ASTContext::getObjCObjectPointerType(QualType ObjectT) const {
// No match.
void *Mem = Allocate(sizeof(ObjCObjectPointerType), TypeAlignment);
- ObjCObjectPointerType *QType =
+ auto *QType =
new (Mem) ObjCObjectPointerType(Canonical, ObjectT);
Types.push_back(QType);
@@ -4546,7 +4641,7 @@ QualType ASTContext::getObjCInterfaceType(const ObjCInterfaceDecl *Decl,
Decl = Def;
void *Mem = Allocate(sizeof(ObjCInterfaceType), TypeAlignment);
- ObjCInterfaceType *T = new (Mem) ObjCInterfaceType(Decl);
+ auto *T = new (Mem) ObjCInterfaceType(Decl);
Decl->TypeForDecl = T;
Types.push_back(T);
return QualType(T, 0);
@@ -4593,12 +4688,12 @@ QualType ASTContext::getTypeOfExprType(Expr *tofExpr) const {
/// on canonical types (which are always unique).
QualType ASTContext::getTypeOfType(QualType tofType) const {
QualType Canonical = getCanonicalType(tofType);
- TypeOfType *tot = new (*this, TypeAlignment) TypeOfType(tofType, Canonical);
+ auto *tot = new (*this, TypeAlignment) TypeOfType(tofType, Canonical);
Types.push_back(tot);
return QualType(tot, 0);
}
-/// \brief Unlike many "get<Type>" functions, we don't unique DecltypeType
+/// Unlike many "get<Type>" functions, we don't unique DecltypeType
/// nodes. This would never be helpful, since each such type has its own
/// expression, and would not give a significant memory saving, since there
/// is an Expr tree under each such type.
@@ -4683,9 +4778,8 @@ QualType ASTContext::getAutoType(QualType DeducedType, AutoTypeKeyword Keyword,
if (AutoType *AT = AutoTypes.FindNodeOrInsertPos(ID, InsertPos))
return QualType(AT, 0);
- AutoType *AT = new (*this, TypeAlignment) AutoType(DeducedType,
- Keyword,
- IsDependent);
+ auto *AT = new (*this, TypeAlignment)
+ AutoType(DeducedType, Keyword, IsDependent);
Types.push_back(AT);
if (InsertPos)
AutoTypes.InsertNode(AT, InsertPos);
@@ -4706,7 +4800,7 @@ QualType ASTContext::getDeducedTemplateSpecializationType(
DeducedTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos))
return QualType(DTST, 0);
- DeducedTemplateSpecializationType *DTST = new (*this, TypeAlignment)
+ auto *DTST = new (*this, TypeAlignment)
DeducedTemplateSpecializationType(Template, DeducedType, IsDependent);
Types.push_back(DTST);
if (InsertPos)
@@ -4736,7 +4830,7 @@ QualType ASTContext::getAtomicType(QualType T) const {
AtomicType *NewIP = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos);
assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
}
- AtomicType *New = new (*this, TypeAlignment) AtomicType(T, Canonical);
+ auto *New = new (*this, TypeAlignment) AtomicType(T, Canonical);
Types.push_back(New);
AtomicTypes.InsertNode(New, InsertPos);
return QualType(New, 0);
@@ -4820,14 +4914,14 @@ QualType ASTContext::getPointerDiffType() const {
return getFromTargetType(Target->getPtrDiffType(0));
}
-/// \brief Return the unique unsigned counterpart of "ptrdiff_t"
+/// Return the unique unsigned counterpart of "ptrdiff_t"
/// integer type. The standard (C11 7.21.6.1p7) refers to this type
/// in the definition of %tu format specifier.
QualType ASTContext::getUnsignedPointerDiffType() const {
return getFromTargetType(Target->getUnsignedPtrDiffType(0));
}
-/// \brief Return the unique type for "pid_t" defined in
+/// Return the unique type for "pid_t" defined in
/// <sys/types.h>. We need this to compute the correct type for vfork().
QualType ASTContext::getProcessIDType() const {
return getFromTargetType(Target->getProcessIDType());
@@ -4863,8 +4957,8 @@ QualType ASTContext::getUnqualifiedArrayType(QualType type,
// the unqualified desugared type and then drops it on the floor.
// We then have to strip that sugar back off with
// getUnqualifiedDesugaredType(), which is silly.
- const ArrayType *AT =
- dyn_cast<ArrayType>(splitType.Ty->getUnqualifiedDesugaredType());
+ const auto *AT =
+ dyn_cast<ArrayType>(splitType.Ty->getUnqualifiedDesugaredType());
// If we don't have an array, just use the results in splitType.
if (!AT) {
@@ -4888,16 +4982,16 @@ QualType ASTContext::getUnqualifiedArrayType(QualType type,
// build the type back up.
quals.addConsistentQualifiers(splitType.Quals);
- if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(AT)) {
+ if (const auto *CAT = dyn_cast<ConstantArrayType>(AT)) {
return getConstantArrayType(unqualElementType, CAT->getSize(),
CAT->getSizeModifier(), 0);
}
- if (const IncompleteArrayType *IAT = dyn_cast<IncompleteArrayType>(AT)) {
+ if (const auto *IAT = dyn_cast<IncompleteArrayType>(AT)) {
return getIncompleteArrayType(unqualElementType, IAT->getSizeModifier(), 0);
}
- if (const VariableArrayType *VAT = dyn_cast<VariableArrayType>(AT)) {
+ if (const auto *VAT = dyn_cast<VariableArrayType>(AT)) {
return getVariableArrayType(unqualElementType,
VAT->getSizeExpr(),
VAT->getSizeModifier(),
@@ -4905,31 +4999,66 @@ QualType ASTContext::getUnqualifiedArrayType(QualType type,
VAT->getBracketsRange());
}
- const DependentSizedArrayType *DSAT = cast<DependentSizedArrayType>(AT);
+ const auto *DSAT = cast<DependentSizedArrayType>(AT);
return getDependentSizedArrayType(unqualElementType, DSAT->getSizeExpr(),
DSAT->getSizeModifier(), 0,
SourceRange());
}
-/// UnwrapSimilarPointerTypes - If T1 and T2 are pointer types that
-/// may be similar (C++ 4.4), replaces T1 and T2 with the type that
-/// they point to and return true. If T1 and T2 aren't pointer types
-/// or pointer-to-member types, or if they are not similar at this
-/// level, returns false and leaves T1 and T2 unchanged. Top-level
-/// qualifiers on T1 and T2 are ignored. This function will typically
-/// be called in a loop that successively "unwraps" pointer and
-/// pointer-to-member types to compare them at each level.
-bool ASTContext::UnwrapSimilarPointerTypes(QualType &T1, QualType &T2) {
- const PointerType *T1PtrType = T1->getAs<PointerType>(),
- *T2PtrType = T2->getAs<PointerType>();
+/// Attempt to unwrap two types that may both be array types with the same bound
+/// (or both be array types of unknown bound) for the purpose of comparing the
+/// cv-decomposition of two types per C++ [conv.qual].
+bool ASTContext::UnwrapSimilarArrayTypes(QualType &T1, QualType &T2) {
+ bool UnwrappedAny = false;
+ while (true) {
+ auto *AT1 = getAsArrayType(T1);
+ if (!AT1) return UnwrappedAny;
+
+ auto *AT2 = getAsArrayType(T2);
+ if (!AT2) return UnwrappedAny;
+
+ // If we don't have two array types with the same constant bound nor two
+ // incomplete array types, we've unwrapped everything we can.
+ if (auto *CAT1 = dyn_cast<ConstantArrayType>(AT1)) {
+ auto *CAT2 = dyn_cast<ConstantArrayType>(AT2);
+ if (!CAT2 || CAT1->getSize() != CAT2->getSize())
+ return UnwrappedAny;
+ } else if (!isa<IncompleteArrayType>(AT1) ||
+ !isa<IncompleteArrayType>(AT2)) {
+ return UnwrappedAny;
+ }
+
+ T1 = AT1->getElementType();
+ T2 = AT2->getElementType();
+ UnwrappedAny = true;
+ }
+}
+
+/// Attempt to unwrap two types that may be similar (C++ [conv.qual]).
+///
+/// If T1 and T2 are both pointer types of the same kind, or both array types
+/// with the same bound, unwraps layers from T1 and T2 until a pointer type is
+/// unwrapped. Top-level qualifiers on T1 and T2 are ignored.
+///
+/// This function will typically be called in a loop that successively
+/// "unwraps" pointer and pointer-to-member types to compare them at each
+/// level.
+///
+/// \return \c true if a pointer type was unwrapped, \c false if we reached a
+/// pair of types that can't be unwrapped further.
+bool ASTContext::UnwrapSimilarTypes(QualType &T1, QualType &T2) {
+ UnwrapSimilarArrayTypes(T1, T2);
+
+ const auto *T1PtrType = T1->getAs<PointerType>();
+ const auto *T2PtrType = T2->getAs<PointerType>();
if (T1PtrType && T2PtrType) {
T1 = T1PtrType->getPointeeType();
T2 = T2PtrType->getPointeeType();
return true;
}
-
- const MemberPointerType *T1MPType = T1->getAs<MemberPointerType>(),
- *T2MPType = T2->getAs<MemberPointerType>();
+
+ const auto *T1MPType = T1->getAs<MemberPointerType>();
+ const auto *T2MPType = T2->getAs<MemberPointerType>();
if (T1MPType && T2MPType &&
hasSameUnqualifiedType(QualType(T1MPType->getClass(), 0),
QualType(T2MPType->getClass(), 0))) {
@@ -4939,8 +5068,8 @@ bool ASTContext::UnwrapSimilarPointerTypes(QualType &T1, QualType &T2) {
}
if (getLangOpts().ObjC1) {
- const ObjCObjectPointerType *T1OPType = T1->getAs<ObjCObjectPointerType>(),
- *T2OPType = T2->getAs<ObjCObjectPointerType>();
+ const auto *T1OPType = T1->getAs<ObjCObjectPointerType>();
+ const auto *T2OPType = T2->getAs<ObjCObjectPointerType>();
if (T1OPType && T2OPType) {
T1 = T1OPType->getPointeeType();
T2 = T2OPType->getPointeeType();
@@ -4953,6 +5082,37 @@ bool ASTContext::UnwrapSimilarPointerTypes(QualType &T1, QualType &T2) {
return false;
}
+bool ASTContext::hasSimilarType(QualType T1, QualType T2) {
+ while (true) {
+ Qualifiers Quals;
+ T1 = getUnqualifiedArrayType(T1, Quals);
+ T2 = getUnqualifiedArrayType(T2, Quals);
+ if (hasSameType(T1, T2))
+ return true;
+ if (!UnwrapSimilarTypes(T1, T2))
+ return false;
+ }
+}
+
+bool ASTContext::hasCvrSimilarType(QualType T1, QualType T2) {
+ while (true) {
+ Qualifiers Quals1, Quals2;
+ T1 = getUnqualifiedArrayType(T1, Quals1);
+ T2 = getUnqualifiedArrayType(T2, Quals2);
+
+ Quals1.removeCVRQualifiers();
+ Quals2.removeCVRQualifiers();
+ if (Quals1 != Quals2)
+ return false;
+
+ if (hasSameType(T1, T2))
+ return true;
+
+ if (!UnwrapSimilarTypes(T1, T2))
+ return false;
+ }
+}
+
DeclarationNameInfo
ASTContext::getNameForTemplate(TemplateName Name,
SourceLocation NameLoc) const {
@@ -5008,8 +5168,7 @@ TemplateName ASTContext::getCanonicalTemplateName(TemplateName Name) const {
case TemplateName::QualifiedTemplate:
case TemplateName::Template: {
TemplateDecl *Template = Name.getAsTemplateDecl();
- if (TemplateTemplateParmDecl *TTP
- = dyn_cast<TemplateTemplateParmDecl>(Template))
+ if (auto *TTP = dyn_cast<TemplateTemplateParmDecl>(Template))
Template = getCanonicalTemplateTemplateParmDecl(TTP);
// The canonical template name is the canonical template declaration.
@@ -5061,7 +5220,7 @@ ASTContext::getCanonicalTemplateArgument(const TemplateArgument &Arg) const {
return Arg;
case TemplateArgument::Declaration: {
- ValueDecl *D = cast<ValueDecl>(Arg.getAsDecl()->getCanonicalDecl());
+ auto *D = cast<ValueDecl>(Arg.getAsDecl()->getCanonicalDecl());
return TemplateArgument(D, Arg.getParamTypeForDecl());
}
@@ -5087,8 +5246,7 @@ ASTContext::getCanonicalTemplateArgument(const TemplateArgument &Arg) const {
if (Arg.pack_size() == 0)
return Arg;
- TemplateArgument *CanonArgs
- = new (*this) TemplateArgument[Arg.pack_size()];
+ auto *CanonArgs = new (*this) TemplateArgument[Arg.pack_size()];
unsigned Idx = 0;
for (TemplateArgument::pack_iterator A = Arg.pack_begin(),
AEnd = Arg.pack_end();
@@ -5139,7 +5297,7 @@ ASTContext::getCanonicalNestedNameSpecifier(NestedNameSpecifier *NNS) const {
// types, e.g.,
// typedef typename T::type T1;
// typedef typename T1::type T2;
- if (const DependentNameType *DNT = T->getAs<DependentNameType>())
+ if (const auto *DNT = T->getAs<DependentNameType>())
return NestedNameSpecifier::Create(*this, DNT->getQualifier(),
const_cast<IdentifierInfo *>(DNT->getIdentifier()));
@@ -5163,7 +5321,7 @@ const ArrayType *ASTContext::getAsArrayType(QualType T) const {
// Handle the non-qualified case efficiently.
if (!T.hasLocalQualifiers()) {
// Handle the common positive case fast.
- if (const ArrayType *AT = dyn_cast<ArrayType>(T))
+ if (const auto *AT = dyn_cast<ArrayType>(T))
return AT;
}
@@ -5183,7 +5341,7 @@ const ArrayType *ASTContext::getAsArrayType(QualType T) const {
Qualifiers qs = split.Quals;
// If we have a simple case, just return now.
- const ArrayType *ATy = dyn_cast<ArrayType>(split.Ty);
+ const auto *ATy = dyn_cast<ArrayType>(split.Ty);
if (!ATy || qs.empty())
return ATy;
@@ -5191,17 +5349,16 @@ const ArrayType *ASTContext::getAsArrayType(QualType T) const {
// qualifiers into the array element type and return a new array type.
QualType NewEltTy = getQualifiedType(ATy->getElementType(), qs);
- if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(ATy))
+ if (const auto *CAT = dyn_cast<ConstantArrayType>(ATy))
return cast<ArrayType>(getConstantArrayType(NewEltTy, CAT->getSize(),
CAT->getSizeModifier(),
CAT->getIndexTypeCVRQualifiers()));
- if (const IncompleteArrayType *IAT = dyn_cast<IncompleteArrayType>(ATy))
+ if (const auto *IAT = dyn_cast<IncompleteArrayType>(ATy))
return cast<ArrayType>(getIncompleteArrayType(NewEltTy,
IAT->getSizeModifier(),
IAT->getIndexTypeCVRQualifiers()));
- if (const DependentSizedArrayType *DSAT
- = dyn_cast<DependentSizedArrayType>(ATy))
+ if (const auto *DSAT = dyn_cast<DependentSizedArrayType>(ATy))
return cast<ArrayType>(
getDependentSizedArrayType(NewEltTy,
DSAT->getSizeExpr(),
@@ -5209,7 +5366,7 @@ const ArrayType *ASTContext::getAsArrayType(QualType T) const {
DSAT->getIndexTypeCVRQualifiers(),
DSAT->getBracketsRange()));
- const VariableArrayType *VAT = cast<VariableArrayType>(ATy);
+ const auto *VAT = cast<VariableArrayType>(ATy);
return cast<ArrayType>(getVariableArrayType(NewEltTy,
VAT->getSizeExpr(),
VAT->getSizeModifier(),
@@ -5303,7 +5460,7 @@ ASTContext::getConstantArrayElementCount(const ConstantArrayType *CA) const {
/// getFloatingRank - Return a relative rank for floating point types.
/// This routine will assert if passed a built-in type that isn't a float.
static FloatingRank getFloatingRank(QualType T) {
- if (const ComplexType *CT = T->getAs<ComplexType>())
+ if (const auto *CT = T->getAs<ComplexType>())
return getFloatingRank(CT->getElementType());
assert(T->getAs<BuiltinType>() && "getFloatingRank(): not a floating type");
@@ -5396,14 +5553,20 @@ unsigned ASTContext::getIntegerRank(const Type *T) const {
}
}
-/// \brief Whether this is a promotable bitfield reference according
+/// Whether this is a promotable bitfield reference according
/// to C99 6.3.1.1p2, bullet 2 (and GCC extensions).
///
/// \returns the type this bit-field will promote to, or NULL if no
/// promotion occurs.
QualType ASTContext::isPromotableBitField(Expr *E) const {
if (E->isTypeDependent() || E->isValueDependent())
- return QualType();
+ return {};
+
+ // C++ [conv.prom]p5:
+ // If the bit-field has an enumerated type, it is treated as any other
+ // value of that type for promotion purposes.
+ if (getLangOpts().CPlusPlus && E->getType()->isEnumeralType())
+ return {};
// FIXME: We should not do this unless E->refersToBitField() is true. This
// matters in C where getSourceBitField() will find bit-fields for various
@@ -5411,7 +5574,7 @@ QualType ASTContext::isPromotableBitField(Expr *E) const {
FieldDecl *Field = E->getSourceBitField(); // FIXME: conditional bit-fields?
if (!Field)
- return QualType();
+ return {};
QualType FT = Field->getType();
@@ -5431,18 +5594,20 @@ QualType ASTContext::isPromotableBitField(Expr *E) const {
//
// FIXME: C does not permit promotion of a 'long : 3' bitfield to int.
// We perform that promotion here to match GCC and C++.
+ // FIXME: C does not permit promotion of an enum bit-field whose rank is
+ // greater than that of 'int'. We perform that promotion to match GCC.
if (BitWidth < IntSize)
return IntTy;
if (BitWidth == IntSize)
return FT->isSignedIntegerType() ? IntTy : UnsignedIntTy;
- // Types bigger than int are not subject to promotions, and therefore act
+ // Bit-fields wider than int are not subject to promotions, and therefore act
// like the base type. GCC has some weird bugs in this area that we
// deliberately do not follow (GCC follows a pre-standard resolution to
// C's DR315 which treats bit-width as being part of the type, and this leaks
// into their semantics in some cases).
- return QualType();
+ return {};
}
/// getPromotedIntegerType - Returns the type that Promotable will
@@ -5451,10 +5616,10 @@ QualType ASTContext::isPromotableBitField(Expr *E) const {
QualType ASTContext::getPromotedIntegerType(QualType Promotable) const {
assert(!Promotable.isNull());
assert(Promotable->isPromotableIntegerType());
- if (const EnumType *ET = Promotable->getAs<EnumType>())
+ if (const auto *ET = Promotable->getAs<EnumType>())
return ET->getDecl()->getPromotionType();
- if (const BuiltinType *BT = Promotable->getAs<BuiltinType>()) {
+ if (const auto *BT = Promotable->getAs<BuiltinType>()) {
// C++ [conv.prom]: A prvalue of type char16_t, char32_t, or wchar_t
// (3.9.1) can be converted to a prvalue of the first of the following
// types that can represent all the values of its underlying type:
@@ -5463,6 +5628,7 @@ QualType ASTContext::getPromotedIntegerType(QualType Promotable) const {
// FIXME: Is there some better way to compute this?
if (BT->getKind() == BuiltinType::WChar_S ||
BT->getKind() == BuiltinType::WChar_U ||
+ BT->getKind() == BuiltinType::Char8 ||
BT->getKind() == BuiltinType::Char16 ||
BT->getKind() == BuiltinType::Char32) {
bool FromIsSigned = BT->getKind() == BuiltinType::WChar_S;
@@ -5489,7 +5655,7 @@ QualType ASTContext::getPromotedIntegerType(QualType Promotable) const {
return (PromotableSize != IntSize) ? IntTy : UnsignedIntTy;
}
-/// \brief Recurses in pointer/array types until it finds an objc retainable
+/// Recurses in pointer/array types until it finds an objc retainable
/// type and returns its ownership.
Qualifiers::ObjCLifetime ASTContext::getInnerObjCOwnership(QualType T) const {
while (!T.isNull()) {
@@ -5497,9 +5663,9 @@ Qualifiers::ObjCLifetime ASTContext::getInnerObjCOwnership(QualType T) const {
return T.getObjCLifetime();
if (T->isArrayType())
T = getBaseElementType(T);
- else if (const PointerType *PT = T->getAs<PointerType>())
+ else if (const auto *PT = T->getAs<PointerType>())
T = PT->getPointeeType();
- else if (const ReferenceType *RT = T->getAs<ReferenceType>())
+ else if (const auto *RT = T->getAs<ReferenceType>())
T = RT->getPointeeType();
else
break;
@@ -5524,9 +5690,9 @@ int ASTContext::getIntegerTypeOrder(QualType LHS, QualType RHS) const {
const Type *RHSC = getCanonicalType(RHS).getTypePtr();
// Unwrap enums to their underlying type.
- if (const EnumType *ET = dyn_cast<EnumType>(LHSC))
+ if (const auto *ET = dyn_cast<EnumType>(LHSC))
LHSC = getIntegerTypeForEnum(ET);
- if (const EnumType *ET = dyn_cast<EnumType>(RHSC))
+ if (const auto *ET = dyn_cast<EnumType>(RHSC))
RHSC = getIntegerTypeForEnum(ET);
if (LHSC == RHSC) return 0;
@@ -5633,10 +5799,10 @@ QualType ASTContext::getObjCSuperType() const {
}
void ASTContext::setCFConstantStringType(QualType T) {
- const TypedefType *TD = T->getAs<TypedefType>();
+ const auto *TD = T->getAs<TypedefType>();
assert(TD && "Invalid CFConstantStringType");
CFConstantStringTypeDecl = cast<TypedefDecl>(TD->getDecl());
- auto TagType =
+ const auto *TagType =
CFConstantStringTypeDecl->getUnderlyingType()->getAs<RecordType>();
assert(TagType && "Invalid CFConstantStringType");
CFConstantStringTagDecl = TagType->getDecl();
@@ -5717,7 +5883,7 @@ QualType ASTContext::getBlockDescriptorExtendedType() const {
}
TargetInfo::OpenCLTypeKind ASTContext::getOpenCLTypeKind(const Type *T) const {
- auto BT = dyn_cast<BuiltinType>(T);
+ const auto *BT = dyn_cast<BuiltinType>(T);
if (!BT) {
if (isa<PipeType>(T))
@@ -5768,6 +5934,11 @@ bool ASTContext::BlockRequiresCopying(QualType Ty,
return true;
}
+ // The block needs copy/destroy helpers if Ty is non-trivial to destructively
+ // move or destroy.
+ if (Ty.isNonTrivialToPrimitiveDestructiveMove() || Ty.isDestructedType())
+ return true;
+
if (!Ty->isObjCRetainableType()) return false;
Qualifiers qs = Ty.getQualifiers();
@@ -5781,13 +5952,12 @@ bool ASTContext::BlockRequiresCopying(QualType Ty,
case Qualifiers::OCL_ExplicitNone:
case Qualifiers::OCL_Autoreleasing:
return false;
-
- // Tell the runtime that this is ARC __weak, called by the
- // byref routines.
+
+ // These cases should have been taken care of when checking the type's
+ // non-triviality.
case Qualifiers::OCL_Weak:
- // ARC __strong __block variables need to be retained.
case Qualifiers::OCL_Strong:
- return true;
+ llvm_unreachable("impossible");
}
llvm_unreachable("fell out of lifetime switch!");
}
@@ -5827,7 +5997,7 @@ TypedefDecl *ASTContext::getObjCInstanceTypeDecl() {
// This returns true if a type has been typedefed to BOOL:
// typedef <type> BOOL;
static bool isTypeTypedefedAsBOOL(QualType T) {
- if (const TypedefType *TT = dyn_cast<TypedefType>(T))
+ if (const auto *TT = dyn_cast<TypedefType>(T))
if (IdentifierInfo *II = TT->getDecl()->getIdentifier())
return II->isStr("BOOL");
@@ -5879,8 +6049,7 @@ ASTContext::getInlineVariableDefinitionKind(const VarDecl *VD) const {
return InlineVariableDefinitionKind::WeakUnknown;
}
-static inline
-std::string charUnitsToString(const CharUnits &CU) {
+static std::string charUnitsToString(const CharUnits &CU) {
return llvm::itostr(CU.getQuantity());
}
@@ -5921,8 +6090,8 @@ std::string ASTContext::getObjCEncodingForBlock(const BlockExpr *Expr) const {
ParmOffset = PtrSize;
for (auto PVDecl : Decl->parameters()) {
QualType PType = PVDecl->getOriginalType();
- if (const ArrayType *AT =
- dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) {
+ if (const auto *AT =
+ dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) {
// Use array's original type only if it has known number of
// elements.
if (!isa<ConstantArrayType>(AT))
@@ -5964,8 +6133,8 @@ ASTContext::getObjCEncodingForFunctionDecl(const FunctionDecl *Decl) const {
// Argument types.
for (auto PVDecl : Decl->parameters()) {
QualType PType = PVDecl->getOriginalType();
- if (const ArrayType *AT =
- dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) {
+ if (const auto *AT =
+ dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) {
// Use array's original type only if it has known number of
// elements.
if (!isa<ConstantArrayType>(AT))
@@ -6034,8 +6203,8 @@ std::string ASTContext::getObjCEncodingForMethodDecl(const ObjCMethodDecl *Decl,
E = Decl->sel_param_end(); PI != E; ++PI) {
const ParmVarDecl *PVDecl = *PI;
QualType PType = PVDecl->getOriginalType();
- if (const ArrayType *AT =
- dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) {
+ if (const auto *AT =
+ dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) {
// Use array's original type only if it has known number of
// elements.
if (!isa<ConstantArrayType>(AT))
@@ -6057,13 +6226,12 @@ ASTContext::getObjCPropertyImplDeclForPropertyDecl(
const Decl *Container) const {
if (!Container)
return nullptr;
- if (const ObjCCategoryImplDecl *CID =
- dyn_cast<ObjCCategoryImplDecl>(Container)) {
+ if (const auto *CID = dyn_cast<ObjCCategoryImplDecl>(Container)) {
for (auto *PID : CID->property_impls())
if (PID->getPropertyDecl() == PD)
return PID;
} else {
- const ObjCImplementationDecl *OID=cast<ObjCImplementationDecl>(Container);
+ const auto *OID = cast<ObjCImplementationDecl>(Container);
for (auto *PID : OID->property_impls())
if (PID->getPropertyDecl() == PD)
return PID;
@@ -6170,7 +6338,7 @@ ASTContext::getObjCEncodingForPropertyDecl(const ObjCPropertyDecl *PD,
/// 'i' or 'I' instead if encoding a struct field, or a pointer!
void ASTContext::getLegacyIntegralTypeEncoding (QualType &PointeeTy) const {
if (isa<TypedefType>(PointeeTy.getTypePtr())) {
- if (const BuiltinType *BT = PointeeTy->getAs<BuiltinType>()) {
+ if (const auto *BT = PointeeTy->getAs<BuiltinType>()) {
if (BT->getKind() == BuiltinType::ULong && getIntWidth(PointeeTy) == 32)
PointeeTy = UnsignedIntTy;
else
@@ -6207,6 +6375,7 @@ static char getObjCEncodingForPrimitiveKind(const ASTContext *C,
switch (kind) {
case BuiltinType::Void: return 'v';
case BuiltinType::Bool: return 'B';
+ case BuiltinType::Char8:
case BuiltinType::Char_U:
case BuiltinType::UChar: return 'C';
case BuiltinType::Char16:
@@ -6235,6 +6404,30 @@ static char getObjCEncodingForPrimitiveKind(const ASTContext *C,
case BuiltinType::Float16:
case BuiltinType::Float128:
case BuiltinType::Half:
+ case BuiltinType::ShortAccum:
+ case BuiltinType::Accum:
+ case BuiltinType::LongAccum:
+ case BuiltinType::UShortAccum:
+ case BuiltinType::UAccum:
+ case BuiltinType::ULongAccum:
+ case BuiltinType::ShortFract:
+ case BuiltinType::Fract:
+ case BuiltinType::LongFract:
+ case BuiltinType::UShortFract:
+ case BuiltinType::UFract:
+ case BuiltinType::ULongFract:
+ case BuiltinType::SatShortAccum:
+ case BuiltinType::SatAccum:
+ case BuiltinType::SatLongAccum:
+ case BuiltinType::SatUShortAccum:
+ case BuiltinType::SatUAccum:
+ case BuiltinType::SatULongAccum:
+ case BuiltinType::SatShortFract:
+ case BuiltinType::SatFract:
+ case BuiltinType::SatLongFract:
+ case BuiltinType::SatUShortFract:
+ case BuiltinType::SatUFract:
+ case BuiltinType::SatULongFract:
// FIXME: potentially need @encodes for these!
return ' ';
@@ -6270,7 +6463,7 @@ static char ObjCEncodingForEnumType(const ASTContext *C, const EnumType *ET) {
return 'i';
// The encoding of a fixed enum type matches its fixed underlying type.
- const BuiltinType *BT = Enum->getIntegerType()->castAs<BuiltinType>();
+ const auto *BT = Enum->getIntegerType()->castAs<BuiltinType>();
return getObjCEncodingForPrimitiveKind(C, BT->getKind());
}
@@ -6307,10 +6500,10 @@ static void EncodeBitField(const ASTContext *Ctx, std::string& S,
S += llvm::utostr(Offset);
- if (const EnumType *ET = T->getAs<EnumType>())
+ if (const auto *ET = T->getAs<EnumType>())
S += ObjCEncodingForEnumType(Ctx, ET);
else {
- const BuiltinType *BT = T->castAs<BuiltinType>();
+ const auto *BT = T->castAs<BuiltinType>();
S += getObjCEncodingForPrimitiveKind(Ctx, BT->getKind());
}
}
@@ -6335,21 +6528,21 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string& S,
case Type::Enum:
if (FD && FD->isBitField())
return EncodeBitField(this, S, T, FD);
- if (const BuiltinType *BT = dyn_cast<BuiltinType>(CT))
+ if (const auto *BT = dyn_cast<BuiltinType>(CT))
S += getObjCEncodingForPrimitiveKind(this, BT->getKind());
else
S += ObjCEncodingForEnumType(this, cast<EnumType>(CT));
return;
case Type::Complex: {
- const ComplexType *CT = T->castAs<ComplexType>();
+ const auto *CT = T->castAs<ComplexType>();
S += 'j';
getObjCEncodingForTypeImpl(CT->getElementType(), S, false, false, nullptr);
return;
}
case Type::Atomic: {
- const AtomicType *AT = T->castAs<AtomicType>();
+ const auto *AT = T->castAs<AtomicType>();
S += 'A';
getObjCEncodingForTypeImpl(AT->getValueType(), S, false, false, nullptr);
return;
@@ -6361,7 +6554,7 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string& S,
case Type::RValueReference: {
QualType PointeeTy;
if (isa<PointerType>(CT)) {
- const PointerType *PT = T->castAs<PointerType>();
+ const auto *PT = T->castAs<PointerType>();
if (PT->isObjCSelType()) {
S += ':';
return;
@@ -6405,7 +6598,7 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string& S,
S += '*';
return;
}
- } else if (const RecordType *RTy = PointeeTy->getAs<RecordType>()) {
+ } else if (const auto *RTy = PointeeTy->getAs<RecordType>()) {
// GCC binary compat: Need to convert "struct objc_class *" to "#".
if (RTy->getDecl()->getIdentifier() == &Idents.get("objc_class")) {
S += '#';
@@ -6430,7 +6623,7 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string& S,
case Type::ConstantArray:
case Type::IncompleteArray:
case Type::VariableArray: {
- const ArrayType *AT = cast<ArrayType>(CT);
+ const auto *AT = cast<ArrayType>(CT);
if (isa<IncompleteArrayType>(AT) && !StructField) {
// Incomplete arrays are encoded as a pointer to the array element.
@@ -6441,7 +6634,7 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string& S,
} else {
S += '[';
- if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(AT))
+ if (const auto *CAT = dyn_cast<ConstantArrayType>(AT))
S += llvm::utostr(CAT->getSize().getZExtValue());
else {
//Variable length arrays are encoded as a regular array with 0 elements.
@@ -6470,8 +6663,7 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string& S,
// Anonymous structures print as '?'
if (const IdentifierInfo *II = RDecl->getIdentifier()) {
S += II->getName();
- if (ClassTemplateSpecializationDecl *Spec
- = dyn_cast<ClassTemplateSpecializationDecl>(RDecl)) {
+ if (const auto *Spec = dyn_cast<ClassTemplateSpecializationDecl>(RDecl)) {
const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs();
llvm::raw_string_ostream OS(S);
printTemplateArgumentList(OS, TemplateArgs.asArray(),
@@ -6513,10 +6705,10 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string& S,
}
case Type::BlockPointer: {
- const BlockPointerType *BT = T->castAs<BlockPointerType>();
+ const auto *BT = T->castAs<BlockPointerType>();
S += "@?"; // Unlike a pointer-to-function, which is "^?".
if (EncodeBlockParameters) {
- const FunctionType *FT = BT->getPointeeType()->castAs<FunctionType>();
+ const auto *FT = BT->getPointeeType()->castAs<FunctionType>();
S += '<';
// Block return type
@@ -6528,7 +6720,7 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string& S,
// Block self
S += "@?";
// Block parameters
- if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT)) {
+ if (const auto *FPT = dyn_cast<FunctionProtoType>(FT)) {
for (const auto &I : FPT->param_types())
getObjCEncodingForTypeImpl(
I, S, ExpandPointedToStructures, ExpandStructures, FD,
@@ -6552,7 +6744,7 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string& S,
S += "{objc_class=}";
return;
}
- // TODO: Double check to make sure this intentially falls through.
+ // TODO: Double check to make sure this intentionally falls through.
LLVM_FALLTHROUGH;
}
@@ -6567,7 +6759,7 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string& S,
SmallVector<const ObjCIvarDecl*, 32> Ivars;
DeepCollectObjCIvars(OI, true, Ivars);
for (unsigned i = 0, e = Ivars.size(); i != e; ++i) {
- const FieldDecl *Field = cast<FieldDecl>(Ivars[i]);
+ const FieldDecl *Field = Ivars[i];
if (Field->isBitField())
getObjCEncodingForTypeImpl(Field->getType(), S, false, true, Field);
else
@@ -6582,7 +6774,7 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string& S,
}
case Type::ObjCObjectPointer: {
- const ObjCObjectPointerType *OPT = T->castAs<ObjCObjectPointerType>();
+ const auto *OPT = T->castAs<ObjCObjectPointerType>();
if (OPT->isObjCIdType()) {
S += '@';
return;
@@ -6591,7 +6783,7 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string& S,
if (OPT->isObjCClassType() || OPT->isObjCQualifiedClassType()) {
// FIXME: Consider if we need to output qualifiers for 'Class<p>'.
// Since this is a binary compatibility issue, need to consult with runtime
- // folks. Fortunately, this is a *very* obsure construct.
+ // folks. Fortunately, this is a *very* obscure construct.
S += '#';
return;
}
@@ -6628,7 +6820,7 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string& S,
SmallVector<const ObjCIvarDecl*, 32> Ivars;
DeepCollectObjCIvars(OI, true, Ivars);
for (unsigned i = 0, e = Ivars.size(); i != e; ++i) {
- if (cast<FieldDecl>(Ivars[i]) == FD) {
+ if (Ivars[i] == FD) {
S += '{';
S += OI->getObjCRuntimeNameAsString();
S += '}';
@@ -6702,7 +6894,7 @@ void ASTContext::getObjCEncodingForStructureImpl(RecordDecl *RDecl,
if (!RDecl->getDefinition() || RDecl->getDefinition()->isInvalidDecl())
return;
- CXXRecordDecl *CXXRec = dyn_cast<CXXRecordDecl>(RDecl);
+ const auto *CXXRec = dyn_cast<CXXRecordDecl>(RDecl);
std::multimap<uint64_t, NamedDecl *> FieldOrBaseOffsets;
const ASTRecordLayout &layout = getASTRecordLayout(RDecl);
@@ -6795,7 +6987,7 @@ void ASTContext::getObjCEncodingForStructureImpl(RecordDecl *RDecl,
if (!dcl)
break; // reached end of structure.
- if (CXXRecordDecl *base = dyn_cast<CXXRecordDecl>(dcl)) {
+ if (auto *base = dyn_cast<CXXRecordDecl>(dcl)) {
// We expand the bases without their virtual bases since those are going
// in the initial structure. Note that this differs from gcc which
// expands virtual bases each time one is encountered in the hierarchy,
@@ -6807,7 +6999,7 @@ void ASTContext::getObjCEncodingForStructureImpl(RecordDecl *RDecl,
CurOffs += toBits(getASTRecordLayout(base).getNonVirtualSize());
#endif
} else {
- FieldDecl *field = cast<FieldDecl>(dcl);
+ const auto *field = cast<FieldDecl>(dcl);
if (FD) {
S += '"';
S += field->getNameAsString();
@@ -7250,6 +7442,10 @@ TypedefDecl *ASTContext::getBuiltinMSVaListDecl() const {
return BuiltinMSVaListDecl;
}
+bool ASTContext::canBuiltinBeRedeclared(const FunctionDecl *FD) const {
+ return BuiltinInfo.canBeRedeclared(FD->getBuiltinID());
+}
+
void ASTContext::setObjCConstantStringInterface(ObjCInterfaceDecl *Decl) {
assert(ObjCConstantStringType.isNull() &&
"'NSConstantString' type already set!");
@@ -7257,7 +7453,7 @@ void ASTContext::setObjCConstantStringInterface(ObjCInterfaceDecl *Decl) {
ObjCConstantStringType = getObjCInterfaceType(Decl);
}
-/// \brief Retrieve the template name that corresponds to a non-empty
+/// Retrieve the template name that corresponds to a non-empty
/// lookup.
TemplateName
ASTContext::getOverloadedTemplateName(UnresolvedSetIterator Begin,
@@ -7267,12 +7463,13 @@ ASTContext::getOverloadedTemplateName(UnresolvedSetIterator Begin,
void *memory = Allocate(sizeof(OverloadedTemplateStorage) +
size * sizeof(FunctionTemplateDecl*));
- OverloadedTemplateStorage *OT = new(memory) OverloadedTemplateStorage(size);
+ auto *OT = new (memory) OverloadedTemplateStorage(size);
NamedDecl **Storage = OT->getStorage();
for (UnresolvedSetIterator I = Begin; I != End; ++I) {
NamedDecl *D = *I;
assert(isa<FunctionTemplateDecl>(D) ||
+ isa<UnresolvedUsingValueDecl>(D) ||
(isa<UsingShadowDecl>(D) &&
isa<FunctionTemplateDecl>(D->getUnderlyingDecl())));
*Storage++ = D;
@@ -7281,7 +7478,7 @@ ASTContext::getOverloadedTemplateName(UnresolvedSetIterator Begin,
return TemplateName(OT);
}
-/// \brief Retrieve the template name that represents a qualified
+/// Retrieve the template name that represents a qualified
/// template name such as \c std::vector.
TemplateName
ASTContext::getQualifiedTemplateName(NestedNameSpecifier *NNS,
@@ -7305,7 +7502,7 @@ ASTContext::getQualifiedTemplateName(NestedNameSpecifier *NNS,
return TemplateName(QTN);
}
-/// \brief Retrieve the template name that represents a dependent
+/// Retrieve the template name that represents a dependent
/// template name such as \c MetaFun::template apply.
TemplateName
ASTContext::getDependentTemplateName(NestedNameSpecifier *NNS,
@@ -7341,7 +7538,7 @@ ASTContext::getDependentTemplateName(NestedNameSpecifier *NNS,
return TemplateName(QTN);
}
-/// \brief Retrieve the template name that represents a dependent
+/// Retrieve the template name that represents a dependent
/// template name such as \c MetaFun::template operator+.
TemplateName
ASTContext::getDependentTemplateName(NestedNameSpecifier *NNS,
@@ -7399,7 +7596,7 @@ ASTContext::getSubstTemplateTemplateParm(TemplateTemplateParmDecl *param,
TemplateName
ASTContext::getSubstTemplateTemplateParmPack(TemplateTemplateParmDecl *Param,
const TemplateArgument &ArgPack) const {
- ASTContext &Self = const_cast<ASTContext &>(*this);
+ auto &Self = const_cast<ASTContext &>(*this);
llvm::FoldingSetNodeID ID;
SubstTemplateTemplateParmPackStorage::Profile(ID, Self, Param, ArgPack);
@@ -7422,7 +7619,7 @@ ASTContext::getSubstTemplateTemplateParmPack(TemplateTemplateParmDecl *Param,
/// is actually a value of type @c TargetInfo::IntType.
CanQualType ASTContext::getFromTargetType(unsigned Type) const {
switch (Type) {
- case TargetInfo::NoInt: return CanQualType();
+ case TargetInfo::NoInt: return {};
case TargetInfo::SignedChar: return SignedCharTy;
case TargetInfo::UnsignedChar: return UnsignedCharTy;
case TargetInfo::SignedShort: return ShortTy;
@@ -7465,7 +7662,7 @@ Qualifiers::GC ASTContext::getObjCGCAttrKind(QualType Ty) const {
// pointer.
#ifndef NDEBUG
QualType CT = Ty->getCanonicalTypeInternal();
- while (const ArrayType *AT = dyn_cast<ArrayType>(CT))
+ while (const auto *AT = dyn_cast<ArrayType>(CT))
CT = AT->getElementType();
assert(CT->isAnyPointerType() || CT->isBlockPointerType());
#endif
@@ -7496,8 +7693,8 @@ bool ASTContext::areCompatibleVectorTypes(QualType FirstVec,
// Treat Neon vector types and most AltiVec vector types as if they are the
// equivalent GCC vector types.
- const VectorType *First = FirstVec->getAs<VectorType>();
- const VectorType *Second = SecondVec->getAs<VectorType>();
+ const auto *First = FirstVec->getAs<VectorType>();
+ const auto *Second = SecondVec->getAs<VectorType>();
if (First->getNumElements() == Second->getNumElements() &&
hasSameType(First->getElementType(), Second->getElementType()) &&
First->getVectorKind() != VectorType::AltiVecPixel &&
@@ -7530,8 +7727,8 @@ ASTContext::ProtocolCompatibleWithProtocol(ObjCProtocolDecl *lProto,
/// Class<pr1, ...>.
bool ASTContext::ObjCQualifiedClassTypesAreCompatible(QualType lhs,
QualType rhs) {
- const ObjCObjectPointerType *lhsQID = lhs->getAs<ObjCObjectPointerType>();
- const ObjCObjectPointerType *rhsOPT = rhs->getAs<ObjCObjectPointerType>();
+ const auto *lhsQID = lhs->getAs<ObjCObjectPointerType>();
+ const auto *rhsOPT = rhs->getAs<ObjCObjectPointerType>();
assert((lhsQID && rhsOPT) && "ObjCQualifiedClassTypesAreCompatible");
for (auto *lhsProto : lhsQID->quals()) {
@@ -7561,7 +7758,7 @@ bool ASTContext::ObjCQualifiedIdTypesAreCompatible(QualType lhs, QualType rhs,
return true;
if (const ObjCObjectPointerType *lhsQID = lhs->getAsObjCQualifiedIdType()) {
- const ObjCObjectPointerType *rhsOPT = rhs->getAs<ObjCObjectPointerType>();
+ const auto *rhsOPT = rhs->getAs<ObjCObjectPointerType>();
if (!rhsOPT) return false;
@@ -7847,14 +8044,14 @@ void getIntersectionOfProtocols(ASTContext &Context,
static bool canAssignObjCObjectTypes(ASTContext &ctx, QualType lhs,
QualType rhs) {
// Common case: two object pointers.
- const ObjCObjectPointerType *lhsOPT = lhs->getAs<ObjCObjectPointerType>();
- const ObjCObjectPointerType *rhsOPT = rhs->getAs<ObjCObjectPointerType>();
+ const auto *lhsOPT = lhs->getAs<ObjCObjectPointerType>();
+ const auto *rhsOPT = rhs->getAs<ObjCObjectPointerType>();
if (lhsOPT && rhsOPT)
return ctx.canAssignObjCInterfaces(lhsOPT, rhsOPT);
// Two block pointers.
- const BlockPointerType *lhsBlock = lhs->getAs<BlockPointerType>();
- const BlockPointerType *rhsBlock = rhs->getAs<BlockPointerType>();
+ const auto *lhsBlock = lhs->getAs<BlockPointerType>();
+ const auto *rhsBlock = rhs->getAs<BlockPointerType>();
if (lhsBlock && rhsBlock)
return ctx.typesAreBlockPointerCompatible(lhs, rhs);
@@ -7914,7 +8111,7 @@ QualType ASTContext::areCommonBaseCompatible(
const ObjCInterfaceDecl* RDecl = RHS->getInterface();
if (!LDecl || !RDecl)
- return QualType();
+ return {};
// When either LHS or RHS is a kindof type, we should return a kindof type.
// For example, for common base of kindof(ASub1) and kindof(ASub2), we return
@@ -7939,7 +8136,7 @@ QualType ASTContext::areCommonBaseCompatible(
if (!sameObjCTypeArgs(*this, LHS->getInterface(),
LHS->getTypeArgs(), RHS->getTypeArgs(),
/*stripKindOf=*/true))
- return QualType();
+ return {};
} else if (LHS->isSpecialized() != RHS->isSpecialized()) {
// If only one has type arguments, the result will not have type
// arguments.
@@ -7990,7 +8187,7 @@ QualType ASTContext::areCommonBaseCompatible(
if (!sameObjCTypeArgs(*this, LHS->getInterface(),
LHS->getTypeArgs(), RHS->getTypeArgs(),
/*stripKindOf=*/true))
- return QualType();
+ return {};
} else if (LHS->isSpecialized() != RHS->isSpecialized()) {
// If only one has type arguments, the result will not have type
// arguments.
@@ -8025,7 +8222,7 @@ QualType ASTContext::areCommonBaseCompatible(
RHS = RHSSuperType->castAs<ObjCObjectType>();
}
- return QualType();
+ return {};
}
bool ASTContext::canAssignObjCInterfaces(const ObjCObjectType *LHS,
@@ -8092,8 +8289,8 @@ bool ASTContext::canAssignObjCInterfaces(const ObjCObjectType *LHS,
bool ASTContext::areComparableObjCPointerTypes(QualType LHS, QualType RHS) {
// get the "pointed to" types
- const ObjCObjectPointerType *LHSOPT = LHS->getAs<ObjCObjectPointerType>();
- const ObjCObjectPointerType *RHSOPT = RHS->getAs<ObjCObjectPointerType>();
+ const auto *LHSOPT = LHS->getAs<ObjCObjectPointerType>();
+ const auto *RHSOPT = RHS->getAs<ObjCObjectPointerType>();
if (!LHSOPT || !RHSOPT)
return false;
@@ -8146,7 +8343,7 @@ QualType ASTContext::mergeTransparentUnionType(QualType T, QualType SubType,
}
}
- return QualType();
+ return {};
}
/// mergeFunctionParameterTypes - merge two types which appear as function
@@ -8173,10 +8370,10 @@ QualType ASTContext::mergeFunctionParameterTypes(QualType lhs, QualType rhs,
QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs,
bool OfBlockPointer,
bool Unqualified) {
- const FunctionType *lbase = lhs->getAs<FunctionType>();
- const FunctionType *rbase = rhs->getAs<FunctionType>();
- const FunctionProtoType *lproto = dyn_cast<FunctionProtoType>(lbase);
- const FunctionProtoType *rproto = dyn_cast<FunctionProtoType>(rbase);
+ const auto *lbase = lhs->getAs<FunctionType>();
+ const auto *rbase = rhs->getAs<FunctionType>();
+ const auto *lproto = dyn_cast<FunctionProtoType>(lbase);
+ const auto *rproto = dyn_cast<FunctionProtoType>(rbase);
bool allLTypes = true;
bool allRTypes = true;
@@ -8193,7 +8390,8 @@ QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs,
else
retType = mergeTypes(lbase->getReturnType(), rbase->getReturnType(), false,
Unqualified);
- if (retType.isNull()) return QualType();
+ if (retType.isNull())
+ return {};
if (Unqualified)
retType = retType.getUnqualifiedType();
@@ -8219,18 +8417,20 @@ QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs,
// Compatible functions must have compatible calling conventions
if (lbaseInfo.getCC() != rbaseInfo.getCC())
- return QualType();
+ return {};
// Regparm is part of the calling convention.
if (lbaseInfo.getHasRegParm() != rbaseInfo.getHasRegParm())
- return QualType();
+ return {};
if (lbaseInfo.getRegParm() != rbaseInfo.getRegParm())
- return QualType();
+ return {};
if (lbaseInfo.getProducesResult() != rbaseInfo.getProducesResult())
- return QualType();
+ return {};
if (lbaseInfo.getNoCallerSavedRegs() != rbaseInfo.getNoCallerSavedRegs())
- return QualType();
+ return {};
+ if (lbaseInfo.getNoCfCheck() != rbaseInfo.getNoCfCheck())
+ return {};
// FIXME: some uses, e.g. conditional exprs, really want this to be 'both'.
bool NoReturn = lbaseInfo.getNoReturn() || rbaseInfo.getNoReturn();
@@ -8247,20 +8447,20 @@ QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs,
"C++ shouldn't be here");
// Compatible functions must have the same number of parameters
if (lproto->getNumParams() != rproto->getNumParams())
- return QualType();
+ return {};
// Variadic and non-variadic functions aren't compatible
if (lproto->isVariadic() != rproto->isVariadic())
- return QualType();
+ return {};
if (lproto->getTypeQuals() != rproto->getTypeQuals())
- return QualType();
+ return {};
SmallVector<FunctionProtoType::ExtParameterInfo, 4> newParamInfos;
bool canUseLeft, canUseRight;
if (!mergeExtParameterInfo(lproto, rproto, canUseLeft, canUseRight,
newParamInfos))
- return QualType();
+ return {};
if (!canUseLeft)
allLTypes = false;
@@ -8275,7 +8475,7 @@ QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs,
QualType paramType = mergeFunctionParameterTypes(
lParamType, rParamType, OfBlockPointer, Unqualified);
if (paramType.isNull())
- return QualType();
+ return {};
if (Unqualified)
paramType = paramType.getUnqualifiedType();
@@ -8308,7 +8508,8 @@ QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs,
const FunctionProtoType *proto = lproto ? lproto : rproto;
if (proto) {
assert(!proto->hasExceptionSpec() && "C++ shouldn't be here");
- if (proto->isVariadic()) return QualType();
+ if (proto->isVariadic())
+ return {};
// Check that the types are compatible with the types that
// would result from default argument promotions (C99 6.7.5.3p15).
// The only types actually affected are promotable integer
@@ -8319,15 +8520,15 @@ QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs,
// Look at the converted type of enum types, since that is the type used
// to pass enum values.
- if (const EnumType *Enum = paramTy->getAs<EnumType>()) {
+ if (const auto *Enum = paramTy->getAs<EnumType>()) {
paramTy = Enum->getDecl()->getIntegerType();
if (paramTy.isNull())
- return QualType();
+ return {};
}
if (paramTy->isPromotableIntegerType() ||
getCanonicalType(paramTy).getUnqualifiedType() == FloatTy)
- return QualType();
+ return {};
}
if (allLTypes) return lhs;
@@ -8351,7 +8552,8 @@ static QualType mergeEnumWithInteger(ASTContext &Context, const EnumType *ET,
// Compatibility is based on the underlying type, not the promotion
// type.
QualType underlyingType = ET->getDecl()->getIntegerType();
- if (underlyingType.isNull()) return QualType();
+ if (underlyingType.isNull())
+ return {};
if (Context.hasSameType(underlyingType, other))
return other;
@@ -8361,7 +8563,7 @@ static QualType mergeEnumWithInteger(ASTContext &Context, const EnumType *ET,
Context.getTypeSize(underlyingType) == Context.getTypeSize(other))
return other;
- return QualType();
+ return {};
}
QualType ASTContext::mergeTypes(QualType LHS, QualType RHS,
@@ -8397,7 +8599,7 @@ QualType ASTContext::mergeTypes(QualType LHS, QualType RHS,
LQuals.getAddressSpace() != RQuals.getAddressSpace() ||
LQuals.getObjCLifetime() != RQuals.getObjCLifetime() ||
LQuals.hasUnaligned() != RQuals.hasUnaligned())
- return QualType();
+ return {};
// Exactly one GC qualifier difference is allowed: __strong is
// okay if the other type has no GC qualifier but is an Objective
@@ -8409,7 +8611,7 @@ QualType ASTContext::mergeTypes(QualType LHS, QualType RHS,
assert((GC_L != GC_R) && "unequal qualifier sets had only equal elements");
if (GC_L == Qualifiers::Weak || GC_R == Qualifiers::Weak)
- return QualType();
+ return {};
if (GC_L == Qualifiers::Strong && RHSCan->isObjCObjectPointerType()) {
return mergeTypes(LHS, getObjCGCQualType(RHS, Qualifiers::Strong));
@@ -8417,7 +8619,7 @@ QualType ASTContext::mergeTypes(QualType LHS, QualType RHS,
if (GC_R == Qualifiers::Strong && LHSCan->isObjCObjectPointerType()) {
return mergeTypes(getObjCGCQualType(LHS, Qualifiers::Strong), RHS);
}
- return QualType();
+ return {};
}
// Okay, qualifiers are equal.
@@ -8448,7 +8650,7 @@ QualType ASTContext::mergeTypes(QualType LHS, QualType RHS,
if (LHSClass != RHSClass) {
// Note that we only have special rules for turning block enum
// returns into block int returns, not vice-versa.
- if (const EnumType* ETy = LHS->getAs<EnumType>()) {
+ if (const auto *ETy = LHS->getAs<EnumType>()) {
return mergeEnumWithInteger(*this, ETy, RHS, false);
}
if (const EnumType* ETy = RHS->getAs<EnumType>()) {
@@ -8462,7 +8664,7 @@ QualType ASTContext::mergeTypes(QualType LHS, QualType RHS,
return RHS;
}
- return QualType();
+ return {};
}
// The canonical type classes match.
@@ -8500,7 +8702,8 @@ QualType ASTContext::mergeTypes(QualType LHS, QualType RHS,
}
QualType ResultType = mergeTypes(LHSPointee, RHSPointee, false,
Unqualified);
- if (ResultType.isNull()) return QualType();
+ if (ResultType.isNull())
+ return {};
if (getCanonicalType(LHSPointee) == getCanonicalType(ResultType))
return LHS;
if (getCanonicalType(RHSPointee) == getCanonicalType(ResultType))
@@ -8522,7 +8725,7 @@ QualType ASTContext::mergeTypes(QualType LHS, QualType RHS,
// Blocks can't be an expression in a ternary operator (OpenCL v2.0
// 6.12.5) thus the following check is asymmetric.
if (!LHSPteeQual.isAddressSpaceSupersetOf(RHSPteeQual))
- return QualType();
+ return {};
LHSPteeQual.removeAddressSpace();
RHSPteeQual.removeAddressSpace();
LHSPointee =
@@ -8532,7 +8735,8 @@ QualType ASTContext::mergeTypes(QualType LHS, QualType RHS,
}
QualType ResultType = mergeTypes(LHSPointee, RHSPointee, OfBlockPointer,
Unqualified);
- if (ResultType.isNull()) return QualType();
+ if (ResultType.isNull())
+ return {};
if (getCanonicalType(LHSPointee) == getCanonicalType(ResultType))
return LHS;
if (getCanonicalType(RHSPointee) == getCanonicalType(ResultType))
@@ -8550,7 +8754,8 @@ QualType ASTContext::mergeTypes(QualType LHS, QualType RHS,
}
QualType ResultType = mergeTypes(LHSValue, RHSValue, false,
Unqualified);
- if (ResultType.isNull()) return QualType();
+ if (ResultType.isNull())
+ return {};
if (getCanonicalType(LHSValue) == getCanonicalType(ResultType))
return LHS;
if (getCanonicalType(RHSValue) == getCanonicalType(ResultType))
@@ -8562,7 +8767,7 @@ QualType ASTContext::mergeTypes(QualType LHS, QualType RHS,
const ConstantArrayType* LCAT = getAsConstantArrayType(LHS);
const ConstantArrayType* RCAT = getAsConstantArrayType(RHS);
if (LCAT && RCAT && RCAT->getSize() != LCAT->getSize())
- return QualType();
+ return {};
QualType LHSElem = getAsArrayType(LHS)->getElementType();
QualType RHSElem = getAsArrayType(RHS)->getElementType();
@@ -8572,7 +8777,40 @@ QualType ASTContext::mergeTypes(QualType LHS, QualType RHS,
}
QualType ResultType = mergeTypes(LHSElem, RHSElem, false, Unqualified);
- if (ResultType.isNull()) return QualType();
+ if (ResultType.isNull())
+ return {};
+
+ const VariableArrayType* LVAT = getAsVariableArrayType(LHS);
+ const VariableArrayType* RVAT = getAsVariableArrayType(RHS);
+
+ // If either side is a variable array, and both are complete, check whether
+ // the current dimension is definite.
+ if (LVAT || RVAT) {
+ auto SizeFetch = [this](const VariableArrayType* VAT,
+ const ConstantArrayType* CAT)
+ -> std::pair<bool,llvm::APInt> {
+ if (VAT) {
+ llvm::APSInt TheInt;
+ Expr *E = VAT->getSizeExpr();
+ if (E && E->isIntegerConstantExpr(TheInt, *this))
+ return std::make_pair(true, TheInt);
+ else
+ return std::make_pair(false, TheInt);
+ } else if (CAT) {
+ return std::make_pair(true, CAT->getSize());
+ } else {
+ return std::make_pair(false, llvm::APInt());
+ }
+ };
+
+ bool HaveLSize, HaveRSize;
+ llvm::APInt LSize, RSize;
+ std::tie(HaveLSize, LSize) = SizeFetch(LVAT, LCAT);
+ std::tie(HaveRSize, RSize) = SizeFetch(RVAT, RCAT);
+ if (HaveLSize && HaveRSize && !llvm::APInt::isSameValue(LSize, RSize))
+ return {}; // Definite, but unequal, array dimension
+ }
+
if (LCAT && getCanonicalType(LHSElem) == getCanonicalType(ResultType))
return LHS;
if (RCAT && getCanonicalType(RHSElem) == getCanonicalType(ResultType))
@@ -8581,8 +8819,6 @@ QualType ASTContext::mergeTypes(QualType LHS, QualType RHS,
ArrayType::ArraySizeModifier(), 0);
if (RCAT) return getConstantArrayType(ResultType, RCAT->getSize(),
ArrayType::ArraySizeModifier(), 0);
- const VariableArrayType* LVAT = getAsVariableArrayType(LHS);
- const VariableArrayType* RVAT = getAsVariableArrayType(RHS);
if (LVAT && getCanonicalType(LHSElem) == getCanonicalType(ResultType))
return LHS;
if (RVAT && getCanonicalType(RHSElem) == getCanonicalType(ResultType))
@@ -8608,29 +8844,29 @@ QualType ASTContext::mergeTypes(QualType LHS, QualType RHS,
return mergeFunctionTypes(LHS, RHS, OfBlockPointer, Unqualified);
case Type::Record:
case Type::Enum:
- return QualType();
+ return {};
case Type::Builtin:
// Only exactly equal builtin types are compatible, which is tested above.
- return QualType();
+ return {};
case Type::Complex:
// Distinct complex types are incompatible.
- return QualType();
+ return {};
case Type::Vector:
// FIXME: The merged type should be an ExtVector!
if (areCompatVectorTypes(LHSCan->getAs<VectorType>(),
RHSCan->getAs<VectorType>()))
return LHS;
- return QualType();
+ return {};
case Type::ObjCObject: {
// Check if the types are assignment compatible.
// FIXME: This should be type compatibility, e.g. whether
// "LHS x; RHS x;" at global scope is legal.
- const ObjCObjectType* LHSIface = LHS->getAs<ObjCObjectType>();
- const ObjCObjectType* RHSIface = RHS->getAs<ObjCObjectType>();
+ const auto *LHSIface = LHS->getAs<ObjCObjectType>();
+ const auto *RHSIface = RHS->getAs<ObjCObjectType>();
if (canAssignObjCInterfaces(LHSIface, RHSIface))
return LHS;
- return QualType();
+ return {};
}
case Type::ObjCObjectPointer:
if (OfBlockPointer) {
@@ -8639,17 +8875,17 @@ QualType ASTContext::mergeTypes(QualType LHS, QualType RHS,
RHS->getAs<ObjCObjectPointerType>(),
BlockReturnType))
return LHS;
- return QualType();
+ return {};
}
if (canAssignObjCInterfaces(LHS->getAs<ObjCObjectPointerType>(),
RHS->getAs<ObjCObjectPointerType>()))
return LHS;
- return QualType();
+ return {};
case Type::Pipe:
assert(LHS != RHS &&
"Equivalent pipe types should have already been handled!");
- return QualType();
+ return {};
}
llvm_unreachable("Invalid Type::Class!");
@@ -8717,7 +8953,7 @@ QualType ASTContext::mergeObjCGCQualifiers(QualType LHS, QualType RHS) {
return LHS;
if (RHSCan->isFunctionType()) {
if (!LHSCan->isFunctionType())
- return QualType();
+ return {};
QualType OldReturnType =
cast<FunctionType>(RHSCan.getTypePtr())->getReturnType();
QualType NewReturnType =
@@ -8725,12 +8961,12 @@ QualType ASTContext::mergeObjCGCQualifiers(QualType LHS, QualType RHS) {
QualType ResReturnType =
mergeObjCGCQualifiers(NewReturnType, OldReturnType);
if (ResReturnType.isNull())
- return QualType();
+ return {};
if (ResReturnType == NewReturnType || ResReturnType == OldReturnType) {
// id foo(); ... __strong id foo(); or: __strong id foo(); ... id foo();
// In either case, use OldReturnType to build the new function type.
- const FunctionType *F = LHS->getAs<FunctionType>();
- if (const FunctionProtoType *FPT = cast<FunctionProtoType>(F)) {
+ const auto *F = LHS->getAs<FunctionType>();
+ if (const auto *FPT = cast<FunctionProtoType>(F)) {
FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
EPI.ExtInfo = getFunctionExtInfo(LHS);
QualType ResultType =
@@ -8738,7 +8974,7 @@ QualType ASTContext::mergeObjCGCQualifiers(QualType LHS, QualType RHS) {
return ResultType;
}
}
- return QualType();
+ return {};
}
// If the qualifiers are different, the types can still be merged.
@@ -8748,7 +8984,7 @@ QualType ASTContext::mergeObjCGCQualifiers(QualType LHS, QualType RHS) {
// If any of these qualifiers are different, we have a type mismatch.
if (LQuals.getCVRQualifiers() != RQuals.getCVRQualifiers() ||
LQuals.getAddressSpace() != RQuals.getAddressSpace())
- return QualType();
+ return {};
// Exactly one GC qualifier difference is allowed: __strong is
// okay if the other type has no GC qualifier but is an Objective
@@ -8760,13 +8996,13 @@ QualType ASTContext::mergeObjCGCQualifiers(QualType LHS, QualType RHS) {
assert((GC_L != GC_R) && "unequal qualifier sets had only equal elements");
if (GC_L == Qualifiers::Weak || GC_R == Qualifiers::Weak)
- return QualType();
+ return {};
if (GC_L == Qualifiers::Strong)
return LHS;
if (GC_R == Qualifiers::Strong)
return RHS;
- return QualType();
+ return {};
}
if (LHSCan->isObjCObjectPointerType() && RHSCan->isObjCObjectPointerType()) {
@@ -8778,7 +9014,7 @@ QualType ASTContext::mergeObjCGCQualifiers(QualType LHS, QualType RHS) {
if (ResQT == RHSBaseQT)
return RHS;
}
- return QualType();
+ return {};
}
//===----------------------------------------------------------------------===//
@@ -8786,7 +9022,7 @@ QualType ASTContext::mergeObjCGCQualifiers(QualType LHS, QualType RHS) {
//===----------------------------------------------------------------------===//
unsigned ASTContext::getIntWidth(QualType T) const {
- if (const EnumType *ET = T->getAs<EnumType>())
+ if (const auto *ET = T->getAs<EnumType>())
T = ET->getDecl()->getIntegerType();
if (T->isBooleanType())
return 1;
@@ -8795,19 +9031,20 @@ unsigned ASTContext::getIntWidth(QualType T) const {
}
QualType ASTContext::getCorrespondingUnsignedType(QualType T) const {
- assert(T->hasSignedIntegerRepresentation() && "Unexpected type");
+ assert((T->hasSignedIntegerRepresentation() || T->isSignedFixedPointType()) &&
+ "Unexpected type");
// Turn <4 x signed int> -> <4 x unsigned int>
- if (const VectorType *VTy = T->getAs<VectorType>())
+ if (const auto *VTy = T->getAs<VectorType>())
return getVectorType(getCorrespondingUnsignedType(VTy->getElementType()),
VTy->getNumElements(), VTy->getVectorKind());
// For enums, we return the unsigned version of the base type.
- if (const EnumType *ETy = T->getAs<EnumType>())
+ if (const auto *ETy = T->getAs<EnumType>())
T = ETy->getDecl()->getIntegerType();
- const BuiltinType *BTy = T->getAs<BuiltinType>();
- assert(BTy && "Unexpected signed integer type");
+ const auto *BTy = T->getAs<BuiltinType>();
+ assert(BTy && "Unexpected signed integer or fixed point type");
switch (BTy->getKind()) {
case BuiltinType::Char_S:
case BuiltinType::SChar:
@@ -8822,8 +9059,33 @@ QualType ASTContext::getCorrespondingUnsignedType(QualType T) const {
return UnsignedLongLongTy;
case BuiltinType::Int128:
return UnsignedInt128Ty;
+
+ case BuiltinType::ShortAccum:
+ return UnsignedShortAccumTy;
+ case BuiltinType::Accum:
+ return UnsignedAccumTy;
+ case BuiltinType::LongAccum:
+ return UnsignedLongAccumTy;
+ case BuiltinType::SatShortAccum:
+ return SatUnsignedShortAccumTy;
+ case BuiltinType::SatAccum:
+ return SatUnsignedAccumTy;
+ case BuiltinType::SatLongAccum:
+ return SatUnsignedLongAccumTy;
+ case BuiltinType::ShortFract:
+ return UnsignedShortFractTy;
+ case BuiltinType::Fract:
+ return UnsignedFractTy;
+ case BuiltinType::LongFract:
+ return UnsignedLongFractTy;
+ case BuiltinType::SatShortFract:
+ return SatUnsignedShortFractTy;
+ case BuiltinType::SatFract:
+ return SatUnsignedFractTy;
+ case BuiltinType::SatLongFract:
+ return SatUnsignedLongFractTy;
default:
- llvm_unreachable("Unexpected signed integer type");
+ llvm_unreachable("Unexpected signed integer or fixed point type");
}
}
@@ -8931,10 +9193,12 @@ static QualType DecodeTypeFromStr(const char *&Str, const ASTContext &Context,
Type = Context.FloatTy;
break;
case 'd':
- assert(HowLong < 2 && !Signed && !Unsigned &&
+ assert(HowLong < 3 && !Signed && !Unsigned &&
"Bad modifiers used with 'd'!");
- if (HowLong)
+ if (HowLong == 1)
Type = Context.LongDoubleTy;
+ else if (HowLong == 2)
+ Type = Context.Float128Ty;
else
Type = Context.DoubleTy;
break;
@@ -9050,7 +9314,7 @@ static QualType DecodeTypeFromStr(const char *&Str, const ASTContext &Context,
Type = Context.getFILEType();
if (Type.isNull()) {
Error = ASTContext::GE_Missing_stdio;
- return QualType();
+ return {};
}
break;
case 'J':
@@ -9061,7 +9325,7 @@ static QualType DecodeTypeFromStr(const char *&Str, const ASTContext &Context,
if (Type.isNull()) {
Error = ASTContext::GE_Missing_setjmp;
- return QualType();
+ return {};
}
break;
case 'K':
@@ -9070,7 +9334,7 @@ static QualType DecodeTypeFromStr(const char *&Str, const ASTContext &Context,
if (Type.isNull()) {
Error = ASTContext::GE_Missing_ucontext;
- return QualType();
+ return {};
}
break;
case 'p':
@@ -9132,14 +9396,14 @@ QualType ASTContext::GetBuiltinType(unsigned Id,
QualType ResType = DecodeTypeFromStr(TypeStr, *this, Error,
RequiresICE, true);
if (Error != GE_None)
- return QualType();
+ return {};
assert(!RequiresICE && "Result of intrinsic cannot be required to be an ICE");
while (TypeStr[0] && TypeStr[0] != '.') {
QualType Ty = DecodeTypeFromStr(TypeStr, *this, Error, RequiresICE, true);
if (Error != GE_None)
- return QualType();
+ return {};
// If this argument is required to be an IntegerConstantExpression and the
// caller cares, fill in the bitmask we return.
@@ -9154,7 +9418,7 @@ QualType ASTContext::GetBuiltinType(unsigned Id,
}
if (Id == Builtin::BI__GetExceptionInfo)
- return QualType();
+ return {};
assert((TypeStr[0] != '.' || TypeStr[1] == 0) &&
"'.' should only occur at end of builtin type list!");
@@ -9185,7 +9449,7 @@ static GVALinkage basicGVALinkageForFunction(const ASTContext &Context,
// Non-user-provided functions get emitted as weak definitions with every
// use, no matter whether they've been explicitly instantiated etc.
- if (auto *MD = dyn_cast<CXXMethodDecl>(FD))
+ if (const auto *MD = dyn_cast<CXXMethodDecl>(FD))
if (!MD->isUserProvided())
return GVA_DiscardableODR;
@@ -9240,6 +9504,21 @@ static GVALinkage basicGVALinkageForFunction(const ASTContext &Context,
return GVA_DiscardableODR;
}
+static bool isDeclareTargetToDeclaration(const Decl *VD) {
+ for (const Decl *D : VD->redecls()) {
+ if (!D->hasAttrs())
+ continue;
+ if (const auto *Attr = D->getAttr<OMPDeclareTargetDeclAttr>())
+ return Attr->getMapType() == OMPDeclareTargetDeclAttr::MT_To;
+ }
+ if (const auto *V = dyn_cast<VarDecl>(VD)) {
+ if (const VarDecl *TD = V->getTemplateInstantiationPattern())
+ return isDeclareTargetToDeclaration(TD);
+ }
+
+ return false;
+}
+
static GVALinkage adjustGVALinkageForAttributes(const ASTContext &Context,
const Decl *D, GVALinkage L) {
// See http://msdn.microsoft.com/en-us/library/xa0d9ste.aspx
@@ -9256,6 +9535,12 @@ static GVALinkage adjustGVALinkageForAttributes(const ASTContext &Context,
// visible externally so they can be launched from host.
if (L == GVA_DiscardableODR || L == GVA_Internal)
return GVA_StrongODR;
+ } else if (Context.getLangOpts().OpenMP && Context.getLangOpts().OpenMPIsDevice &&
+ isDeclareTargetToDeclaration(D)) {
+ // Static variables must be visible externally so they can be mapped from
+ // host.
+ if (L == GVA_Internal)
+ return GVA_StrongODR;
}
return L;
}
@@ -9375,7 +9660,7 @@ GVALinkage ASTContext::GetGVALinkageForVariable(const VarDecl *VD) {
}
bool ASTContext::DeclMustBeEmitted(const Decl *D) {
- if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
+ if (const auto *VD = dyn_cast<VarDecl>(D)) {
if (!VD->isFileVarDecl())
return false;
// Global named register variables (GNU extension) are never emitted.
@@ -9384,14 +9669,13 @@ bool ASTContext::DeclMustBeEmitted(const Decl *D) {
if (VD->getDescribedVarTemplate() ||
isa<VarTemplatePartialSpecializationDecl>(VD))
return false;
- } else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ } else if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
// We never need to emit an uninstantiated function template.
if (FD->getTemplatedKind() == FunctionDecl::TK_FunctionTemplate)
return false;
} else if (isa<PragmaCommentDecl>(D))
return true;
- else if (isa<OMPThreadPrivateDecl>(D) ||
- D->hasAttr<OMPDeclareTargetDeclAttr>())
+ else if (isa<OMPThreadPrivateDecl>(D))
return true;
else if (isa<PragmaDetectMismatchDecl>(D))
return true;
@@ -9404,6 +9688,29 @@ bool ASTContext::DeclMustBeEmitted(const Decl *D) {
else
return false;
+ if (D->isFromASTFile() && !LangOpts.BuildingPCHWithObjectFile) {
+ assert(getExternalSource() && "It's from an AST file; must have a source.");
+ // On Windows, PCH files are built together with an object file. If this
+ // declaration comes from such a PCH and DeclMustBeEmitted would return
+ // true, it would have returned true and the decl would have been emitted
+ // into that object file, so it doesn't need to be emitted here.
+ // Note that decls are still emitted if they're referenced, as usual;
+ // DeclMustBeEmitted is used to decide whether a decl must be emitted even
+ // if it's not referenced.
+ //
+ // Explicit template instantiation definitions are tricky. If there was an
+ // explicit template instantiation decl in the PCH before, it will look like
+ // the definition comes from there, even if that was just the declaration.
+ // (Explicit instantiation defs of variable templates always get emitted.)
+ bool IsExpInstDef =
+ isa<FunctionDecl>(D) &&
+ cast<FunctionDecl>(D)->getTemplateSpecializationKind() ==
+ TSK_ExplicitInstantiationDefinition;
+
+ if (getExternalSource()->DeclIsFromPCHWithObjectFile(D) && !IsExpInstDef)
+ return false;
+ }
+
// If this is a member of a class template, we do not need to emit it.
if (D->getDeclContext()->isDependentContext())
return false;
@@ -9416,7 +9723,7 @@ bool ASTContext::DeclMustBeEmitted(const Decl *D) {
if (D->hasAttr<AliasAttr>() || D->hasAttr<UsedAttr>())
return true;
- if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
// Forward declarations aren't required.
if (!FD->doesThisDeclarationHaveABody())
return FD->doesDeclarationForceExternallyVisibleDefinition();
@@ -9424,11 +9731,11 @@ bool ASTContext::DeclMustBeEmitted(const Decl *D) {
// Constructors and destructors are required.
if (FD->hasAttr<ConstructorAttr>() || FD->hasAttr<DestructorAttr>())
return true;
-
+
// The key function for a class is required. This rule only comes
// into play when inline functions can be key functions, though.
if (getTargetInfo().getCXXABI().canKeyFunctionBeInline()) {
- if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) {
+ if (const auto *MD = dyn_cast<CXXMethodDecl>(FD)) {
const CXXRecordDecl *RD = MD->getParent();
if (MD->isOutOfLine() && RD->isDynamicClass()) {
const CXXMethodDecl *KeyFunc = getCurrentKeyFunction(RD);
@@ -9445,8 +9752,8 @@ bool ASTContext::DeclMustBeEmitted(const Decl *D) {
// Implicit template instantiations can also be deferred in C++.
return !isDiscardableGVALinkage(Linkage);
}
-
- const VarDecl *VD = cast<VarDecl>(D);
+
+ const auto *VD = cast<VarDecl>(D);
assert(VD->isFileVarDecl() && "Expected file scoped var");
if (VD->isThisDeclarationADefinition() == VarDecl::DeclarationOnly &&
@@ -9474,15 +9781,41 @@ bool ASTContext::DeclMustBeEmitted(const Decl *D) {
// Likewise, variables with tuple-like bindings are required if their
// bindings have side-effects.
- if (auto *DD = dyn_cast<DecompositionDecl>(VD))
- for (auto *BD : DD->bindings())
- if (auto *BindingVD = BD->getHoldingVar())
+ if (const auto *DD = dyn_cast<DecompositionDecl>(VD))
+ for (const auto *BD : DD->bindings())
+ if (const auto *BindingVD = BD->getHoldingVar())
if (DeclMustBeEmitted(BindingVD))
return true;
+ // If the decl is marked as `declare target`, it should be emitted.
+ for (const auto *Decl : D->redecls()) {
+ if (!Decl->hasAttrs())
+ continue;
+ if (const auto *Attr = Decl->getAttr<OMPDeclareTargetDeclAttr>())
+ if (Attr->getMapType() != OMPDeclareTargetDeclAttr::MT_Link)
+ return true;
+ }
+
return false;
}
+void ASTContext::forEachMultiversionedFunctionVersion(
+ const FunctionDecl *FD,
+ llvm::function_ref<void(const FunctionDecl *)> Pred) const {
+ assert(FD->isMultiVersion() && "Only valid for multiversioned functions");
+ llvm::SmallDenseSet<const FunctionDecl*, 4> SeenDecls;
+ FD = FD->getCanonicalDecl();
+ for (auto *CurDecl :
+ FD->getDeclContext()->getRedeclContext()->lookup(FD->getDeclName())) {
+ FunctionDecl *CurFD = CurDecl->getAsFunction()->getCanonicalDecl();
+ if (CurFD && hasSameType(CurFD->getType(), FD->getType()) &&
+ std::end(SeenDecls) == llvm::find(SeenDecls, CurFD)) {
+ SeenDecls.insert(CurFD);
+ Pred(CurFD);
+ }
+ }
+}
+
CallingConv ASTContext::getDefaultCallingConvention(bool IsVariadic,
bool IsCXXMethod) const {
// Pass through to the C++ ABI object
@@ -9595,7 +9928,7 @@ QualType ASTContext::getRealTypeForBitwidth(unsigned DestWidth) const {
case TargetInfo::Float128:
return Float128Ty;
case TargetInfo::NoFloat:
- return QualType();
+ return {};
}
llvm_unreachable("Unhandled TargetInfo::RealType value");
@@ -9739,7 +10072,7 @@ createDynTypedNode(const NestedNameSpecifierLoc &Node) {
}
/// @}
- /// \brief A \c RecursiveASTVisitor that builds a map from nodes to their
+ /// A \c RecursiveASTVisitor that builds a map from nodes to their
/// parents as defined by the \c RecursiveASTVisitor.
///
/// Note that the relationship described here is purely in terms of AST
@@ -9749,7 +10082,7 @@ createDynTypedNode(const NestedNameSpecifierLoc &Node) {
/// FIXME: Currently only builds up the map using \c Stmt and \c Decl nodes.
class ParentMapASTVisitor : public RecursiveASTVisitor<ParentMapASTVisitor> {
public:
- /// \brief Builds and returns the translation unit's parent map.
+ /// Builds and returns the translation unit's parent map.
///
/// The caller takes ownership of the returned \c ParentMap.
static std::pair<ASTContext::ParentMapPointers *,
@@ -9874,7 +10207,8 @@ static ASTContext::DynTypedNodeList getDynNodeFromMap(const NodeTy &Node,
if (I == Map.end()) {
return llvm::ArrayRef<ast_type_traits::DynTypedNode>();
}
- if (auto *V = I->second.template dyn_cast<ASTContext::ParentVector *>()) {
+ if (const auto *V =
+ I->second.template dyn_cast<ASTContext::ParentVector *>()) {
return llvm::makeArrayRef(*V);
}
return getSingleDynTypedNodeFromParentMap(I->second);
@@ -9942,6 +10276,42 @@ unsigned ASTContext::getTargetAddressSpace(LangAS AS) const {
return (*AddrSpaceMap)[(unsigned)AS];
}
+QualType ASTContext::getCorrespondingSaturatedType(QualType Ty) const {
+ assert(Ty->isFixedPointType());
+
+ if (Ty->isSaturatedFixedPointType()) return Ty;
+
+ const auto &BT = Ty->getAs<BuiltinType>();
+ switch (BT->getKind()) {
+ default:
+ llvm_unreachable("Not a fixed point type!");
+ case BuiltinType::ShortAccum:
+ return SatShortAccumTy;
+ case BuiltinType::Accum:
+ return SatAccumTy;
+ case BuiltinType::LongAccum:
+ return SatLongAccumTy;
+ case BuiltinType::UShortAccum:
+ return SatUnsignedShortAccumTy;
+ case BuiltinType::UAccum:
+ return SatUnsignedAccumTy;
+ case BuiltinType::ULongAccum:
+ return SatUnsignedLongAccumTy;
+ case BuiltinType::ShortFract:
+ return SatShortFractTy;
+ case BuiltinType::Fract:
+ return SatFractTy;
+ case BuiltinType::LongFract:
+ return SatLongFractTy;
+ case BuiltinType::UShortFract:
+ return SatUnsignedShortFractTy;
+ case BuiltinType::UFract:
+ return SatUnsignedFractTy;
+ case BuiltinType::ULongFract:
+ return SatUnsignedLongFractTy;
+ }
+}
+
// Explicitly instantiate this in case a Redeclarable<T> is used from a TU that
// doesn't include ASTContext.h
template
@@ -9950,3 +10320,92 @@ clang::LazyGenerationalUpdatePtr<
clang::LazyGenerationalUpdatePtr<
const Decl *, Decl *, &ExternalASTSource::CompleteRedeclChain>::makeValue(
const clang::ASTContext &Ctx, Decl *Value);
+
+unsigned char ASTContext::getFixedPointScale(QualType Ty) const {
+ assert(Ty->isFixedPointType());
+
+ const auto *BT = Ty->getAs<BuiltinType>();
+ const TargetInfo &Target = getTargetInfo();
+ switch (BT->getKind()) {
+ default:
+ llvm_unreachable("Not a fixed point type!");
+ case BuiltinType::ShortAccum:
+ case BuiltinType::SatShortAccum:
+ return Target.getShortAccumScale();
+ case BuiltinType::Accum:
+ case BuiltinType::SatAccum:
+ return Target.getAccumScale();
+ case BuiltinType::LongAccum:
+ case BuiltinType::SatLongAccum:
+ return Target.getLongAccumScale();
+ case BuiltinType::UShortAccum:
+ case BuiltinType::SatUShortAccum:
+ return Target.getUnsignedShortAccumScale();
+ case BuiltinType::UAccum:
+ case BuiltinType::SatUAccum:
+ return Target.getUnsignedAccumScale();
+ case BuiltinType::ULongAccum:
+ case BuiltinType::SatULongAccum:
+ return Target.getUnsignedLongAccumScale();
+ case BuiltinType::ShortFract:
+ case BuiltinType::SatShortFract:
+ return Target.getShortFractScale();
+ case BuiltinType::Fract:
+ case BuiltinType::SatFract:
+ return Target.getFractScale();
+ case BuiltinType::LongFract:
+ case BuiltinType::SatLongFract:
+ return Target.getLongFractScale();
+ case BuiltinType::UShortFract:
+ case BuiltinType::SatUShortFract:
+ return Target.getUnsignedShortFractScale();
+ case BuiltinType::UFract:
+ case BuiltinType::SatUFract:
+ return Target.getUnsignedFractScale();
+ case BuiltinType::ULongFract:
+ case BuiltinType::SatULongFract:
+ return Target.getUnsignedLongFractScale();
+ }
+}
+
+unsigned char ASTContext::getFixedPointIBits(QualType Ty) const {
+ assert(Ty->isFixedPointType());
+
+ const auto *BT = Ty->getAs<BuiltinType>();
+ const TargetInfo &Target = getTargetInfo();
+ switch (BT->getKind()) {
+ default:
+ llvm_unreachable("Not a fixed point type!");
+ case BuiltinType::ShortAccum:
+ case BuiltinType::SatShortAccum:
+ return Target.getShortAccumIBits();
+ case BuiltinType::Accum:
+ case BuiltinType::SatAccum:
+ return Target.getAccumIBits();
+ case BuiltinType::LongAccum:
+ case BuiltinType::SatLongAccum:
+ return Target.getLongAccumIBits();
+ case BuiltinType::UShortAccum:
+ case BuiltinType::SatUShortAccum:
+ return Target.getUnsignedShortAccumIBits();
+ case BuiltinType::UAccum:
+ case BuiltinType::SatUAccum:
+ return Target.getUnsignedAccumIBits();
+ case BuiltinType::ULongAccum:
+ case BuiltinType::SatULongAccum:
+ return Target.getUnsignedLongAccumIBits();
+ case BuiltinType::ShortFract:
+ case BuiltinType::SatShortFract:
+ case BuiltinType::Fract:
+ case BuiltinType::SatFract:
+ case BuiltinType::LongFract:
+ case BuiltinType::SatLongFract:
+ case BuiltinType::UShortFract:
+ case BuiltinType::SatUShortFract:
+ case BuiltinType::UFract:
+ case BuiltinType::SatUFract:
+ case BuiltinType::ULongFract:
+ case BuiltinType::SatULongFract:
+ return 0;
+ }
+}
diff --git a/lib/AST/ASTDiagnostic.cpp b/lib/AST/ASTDiagnostic.cpp
index b43c28deb362..c4c0f6e5ebe3 100644
--- a/lib/AST/ASTDiagnostic.cpp
+++ b/lib/AST/ASTDiagnostic.cpp
@@ -200,7 +200,7 @@ break; \
return QC.apply(Context, QT);
}
-/// \brief Convert the given type to a string suitable for printing as part of
+/// Convert the given type to a string suitable for printing as part of
/// a diagnostic.
///
/// There are four main criteria when determining whether we should have an
diff --git a/lib/AST/ASTDumper.cpp b/lib/AST/ASTDumper.cpp
index 92be6d95e898..f46ae58d192d 100644
--- a/lib/AST/ASTDumper.cpp
+++ b/lib/AST/ASTDumper.cpp
@@ -521,10 +521,12 @@ namespace {
// Exprs
void VisitExpr(const Expr *Node);
void VisitCastExpr(const CastExpr *Node);
+ void VisitImplicitCastExpr(const ImplicitCastExpr *Node);
void VisitDeclRefExpr(const DeclRefExpr *Node);
void VisitPredefinedExpr(const PredefinedExpr *Node);
void VisitCharacterLiteral(const CharacterLiteral *Node);
void VisitIntegerLiteral(const IntegerLiteral *Node);
+ void VisitFixedPointLiteral(const FixedPointLiteral *Node);
void VisitFloatingLiteral(const FloatingLiteral *Node);
void VisitStringLiteral(const StringLiteral *Str);
void VisitInitListExpr(const InitListExpr *ILE);
@@ -539,6 +541,7 @@ namespace {
void VisitAddrLabelExpr(const AddrLabelExpr *Node);
void VisitBlockExpr(const BlockExpr *Node);
void VisitOpaqueValueExpr(const OpaqueValueExpr *Node);
+ void VisitGenericSelectionExpr(const GenericSelectionExpr *E);
// C++
void VisitCXXNamedCastExpr(const CXXNamedCastExpr *Node);
@@ -808,11 +811,10 @@ void ASTDumper::dumpLookups(const DeclContext *DC, bool DumpDecls) {
bool HasUndeserializedLookups = Primary->hasExternalVisibleStorage();
- for (auto I = Deserialize ? Primary->lookups_begin()
- : Primary->noload_lookups_begin(),
- E = Deserialize ? Primary->lookups_end()
- : Primary->noload_lookups_end();
- I != E; ++I) {
+ auto Range = Deserialize
+ ? Primary->lookups()
+ : Primary->noload_lookups(/*PreserveInternalState=*/true);
+ for (auto I = Range.begin(), E = Range.end(); I != E; ++I) {
DeclarationName Name = I.getLookupName();
DeclContextLookupResult R = *I;
@@ -1602,7 +1604,7 @@ void ASTDumper::VisitClassTemplatePartialSpecializationDecl(
void ASTDumper::VisitClassScopeFunctionSpecializationDecl(
const ClassScopeFunctionSpecializationDecl *D) {
- dumpDeclRef(D->getSpecialization());
+ dumpDecl(D->getSpecialization());
if (D->hasExplicitTemplateArgs())
dumpTemplateArgumentListInfo(D->templateArgs());
}
@@ -1946,10 +1948,15 @@ void ASTDumper::dumpStmt(const Stmt *S) {
return;
}
+ // Some statements have custom mechanisms for dumping their children.
if (const DeclStmt *DS = dyn_cast<DeclStmt>(S)) {
VisitDeclStmt(DS);
return;
}
+ if (const GenericSelectionExpr *GSE = dyn_cast<GenericSelectionExpr>(S)) {
+ VisitGenericSelectionExpr(GSE);
+ return;
+ }
ConstStmtVisitor<ASTDumper>::Visit(S);
@@ -2113,6 +2120,12 @@ void ASTDumper::VisitCastExpr(const CastExpr *Node) {
OS << ">";
}
+void ASTDumper::VisitImplicitCastExpr(const ImplicitCastExpr *Node) {
+ VisitCastExpr(Node);
+ if (Node->isPartOfExplicitCast())
+ OS << " part_of_explicit_cast";
+}
+
void ASTDumper::VisitDeclRefExpr(const DeclRefExpr *Node) {
VisitExpr(Node);
@@ -2172,6 +2185,13 @@ void ASTDumper::VisitIntegerLiteral(const IntegerLiteral *Node) {
OS << " " << Node->getValue().toString(10, isSigned);
}
+void ASTDumper::VisitFixedPointLiteral(const FixedPointLiteral *Node) {
+ VisitExpr(Node);
+
+ ColorScope Color(*this, ValueColor);
+ OS << " " << Node->getValueAsString(/*Radix=*/10);
+}
+
void ASTDumper::VisitFloatingLiteral(const FloatingLiteral *Node) {
VisitExpr(Node);
ColorScope Color(*this, ValueColor);
@@ -2211,6 +2231,8 @@ void ASTDumper::VisitUnaryOperator(const UnaryOperator *Node) {
VisitExpr(Node);
OS << " " << (Node->isPostfix() ? "postfix" : "prefix")
<< " '" << UnaryOperator::getOpcodeStr(Node->getOpcode()) << "'";
+ if (!Node->canOverflow())
+ OS << " cannot overflow";
}
void ASTDumper::VisitUnaryExprOrTypeTraitExpr(
@@ -2272,6 +2294,32 @@ void ASTDumper::VisitOpaqueValueExpr(const OpaqueValueExpr *Node) {
dumpStmt(Source);
}
+void ASTDumper::VisitGenericSelectionExpr(const GenericSelectionExpr *E) {
+ VisitExpr(E);
+ if (E->isResultDependent())
+ OS << " result_dependent";
+ dumpStmt(E->getControllingExpr());
+ dumpTypeAsChild(E->getControllingExpr()->getType()); // FIXME: remove
+
+ for (unsigned I = 0, N = E->getNumAssocs(); I != N; ++I) {
+ dumpChild([=] {
+ if (const TypeSourceInfo *TSI = E->getAssocTypeSourceInfo(I)) {
+ OS << "case ";
+ dumpType(TSI->getType());
+ } else {
+ OS << "default";
+ }
+
+ if (!E->isResultDependent() && E->getResultIndex() == I)
+ OS << " selected";
+
+ if (const TypeSourceInfo *TSI = E->getAssocTypeSourceInfo(I))
+ dumpTypeAsChild(TSI->getType());
+ dumpStmt(E->getAssocExpr(I));
+ });
+ }
+}
+
// GNU extensions.
void ASTDumper::VisitAddrLabelExpr(const AddrLabelExpr *Node) {
diff --git a/lib/AST/ASTImporter.cpp b/lib/AST/ASTImporter.cpp
index 0d1d9807549f..6668067233e4 100644
--- a/lib/AST/ASTImporter.cpp
+++ b/lib/AST/ASTImporter.cpp
@@ -1,4 +1,4 @@
-//===--- ASTImporter.cpp - Importing ASTs from other Contexts ---*- C++ -*-===//
+//===- ASTImporter.cpp - Importing ASTs from other Contexts ---------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -11,29 +11,165 @@
// context into another context.
//
//===----------------------------------------------------------------------===//
+
#include "clang/AST/ASTImporter.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTDiagnostic.h"
#include "clang/AST/ASTStructuralEquivalence.h"
+#include "clang/AST/Attr.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclAccessPair.h"
+#include "clang/AST/DeclBase.h"
#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclFriend.h"
+#include "clang/AST/DeclGroup.h"
#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
#include "clang/AST/DeclVisitor.h"
+#include "clang/AST/DeclarationName.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/AST/ExternalASTSource.h"
+#include "clang/AST/LambdaCapture.h"
+#include "clang/AST/NestedNameSpecifier.h"
+#include "clang/AST/OperationKinds.h"
+#include "clang/AST/Stmt.h"
+#include "clang/AST/StmtCXX.h"
+#include "clang/AST/StmtObjC.h"
#include "clang/AST/StmtVisitor.h"
+#include "clang/AST/TemplateBase.h"
+#include "clang/AST/TemplateName.h"
+#include "clang/AST/Type.h"
+#include "clang/AST/TypeLoc.h"
#include "clang/AST/TypeVisitor.h"
+#include "clang/AST/UnresolvedSet.h"
+#include "clang/Basic/ExceptionSpecificationType.h"
#include "clang/Basic/FileManager.h"
+#include "clang/Basic/IdentifierTable.h"
+#include "clang/Basic/LLVM.h"
+#include "clang/Basic/LangOptions.h"
+#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/Specifiers.h"
+#include "llvm/ADT/APSInt.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MemoryBuffer.h"
-#include <deque>
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <memory>
+#include <type_traits>
+#include <utility>
namespace clang {
+
+ template <class T>
+ SmallVector<Decl*, 2>
+ getCanonicalForwardRedeclChain(Redeclarable<T>* D) {
+ SmallVector<Decl*, 2> Redecls;
+ for (auto *R : D->getFirstDecl()->redecls()) {
+ if (R != D->getFirstDecl())
+ Redecls.push_back(R);
+ }
+ Redecls.push_back(D->getFirstDecl());
+ std::reverse(Redecls.begin(), Redecls.end());
+ return Redecls;
+ }
+
+ SmallVector<Decl*, 2> getCanonicalForwardRedeclChain(Decl* D) {
+ // Currently only FunctionDecl is supported
+ auto FD = cast<FunctionDecl>(D);
+ return getCanonicalForwardRedeclChain<FunctionDecl>(FD);
+ }
+
+ void updateFlags(const Decl *From, Decl *To) {
+ // Check if some flags or attrs are new in 'From' and copy into 'To'.
+ // FIXME: Other flags or attrs?
+ if (From->isUsed(false) && !To->isUsed(false))
+ To->setIsUsed();
+ }
+
class ASTNodeImporter : public TypeVisitor<ASTNodeImporter, QualType>,
public DeclVisitor<ASTNodeImporter, Decl *>,
public StmtVisitor<ASTNodeImporter, Stmt *> {
ASTImporter &Importer;
+ // Wrapper for an overload set.
+ template <typename ToDeclT> struct CallOverloadedCreateFun {
+ template <typename... Args>
+ auto operator()(Args &&... args)
+ -> decltype(ToDeclT::Create(std::forward<Args>(args)...)) {
+ return ToDeclT::Create(std::forward<Args>(args)...);
+ }
+ };
+
+ // Always use these functions to create a Decl during import. There are
+ // certain tasks which must be done after the Decl was created, e.g. we
+ // must immediately register that as an imported Decl. The parameter `ToD`
+ // will be set to the newly created Decl or if had been imported before
+ // then to the already imported Decl. Returns a bool value set to true if
+ // the `FromD` had been imported before.
+ template <typename ToDeclT, typename FromDeclT, typename... Args>
+ LLVM_NODISCARD bool GetImportedOrCreateDecl(ToDeclT *&ToD, FromDeclT *FromD,
+ Args &&... args) {
+ // There may be several overloads of ToDeclT::Create. We must make sure
+ // to call the one which would be chosen by the arguments, thus we use a
+ // wrapper for the overload set.
+ CallOverloadedCreateFun<ToDeclT> OC;
+ return GetImportedOrCreateSpecialDecl(ToD, OC, FromD,
+ std::forward<Args>(args)...);
+ }
+ // Use this overload if a special Type is needed to be created. E.g if we
+ // want to create a `TypeAliasDecl` and assign that to a `TypedefNameDecl`
+ // then:
+ // TypedefNameDecl *ToTypedef;
+ // GetImportedOrCreateDecl<TypeAliasDecl>(ToTypedef, FromD, ...);
+ template <typename NewDeclT, typename ToDeclT, typename FromDeclT,
+ typename... Args>
+ LLVM_NODISCARD bool GetImportedOrCreateDecl(ToDeclT *&ToD, FromDeclT *FromD,
+ Args &&... args) {
+ CallOverloadedCreateFun<NewDeclT> OC;
+ return GetImportedOrCreateSpecialDecl(ToD, OC, FromD,
+ std::forward<Args>(args)...);
+ }
+ // Use this version if a special create function must be
+ // used, e.g. CXXRecordDecl::CreateLambda .
+ template <typename ToDeclT, typename CreateFunT, typename FromDeclT,
+ typename... Args>
+ LLVM_NODISCARD bool
+ GetImportedOrCreateSpecialDecl(ToDeclT *&ToD, CreateFunT CreateFun,
+ FromDeclT *FromD, Args &&... args) {
+ ToD = cast_or_null<ToDeclT>(Importer.GetAlreadyImportedOrNull(FromD));
+ if (ToD)
+ return true; // Already imported.
+ ToD = CreateFun(std::forward<Args>(args)...);
+ InitializeImportedDecl(FromD, ToD);
+ return false; // A new Decl is created.
+ }
+
+ void InitializeImportedDecl(Decl *FromD, Decl *ToD) {
+ Importer.MapImported(FromD, ToD);
+ ToD->IdentifierNamespace = FromD->IdentifierNamespace;
+ if (FromD->hasAttrs())
+ for (const Attr *FromAttr : FromD->getAttrs())
+ ToD->addAttr(Importer.Import(FromAttr));
+ if (FromD->isUsed())
+ ToD->setIsUsed();
+ if (FromD->isImplicit())
+ ToD->setImplicit();
+ }
+
public:
- explicit ASTNodeImporter(ASTImporter &Importer) : Importer(Importer) { }
-
+ explicit ASTNodeImporter(ASTImporter &Importer) : Importer(Importer) {}
+
using TypeVisitor<ASTNodeImporter, QualType>::Visit;
using DeclVisitor<ASTNodeImporter, Decl *>::Visit;
using StmtVisitor<ASTNodeImporter, Stmt *>::Visit;
@@ -52,7 +188,7 @@ namespace clang {
QualType VisitConstantArrayType(const ConstantArrayType *T);
QualType VisitIncompleteArrayType(const IncompleteArrayType *T);
QualType VisitVariableArrayType(const VariableArrayType *T);
- // FIXME: DependentSizedArrayType
+ QualType VisitDependentSizedArrayType(const DependentSizedArrayType *T);
// FIXME: DependentSizedExtVectorType
QualType VisitVectorType(const VectorType *T);
QualType VisitExtVectorType(const ExtVectorType *T);
@@ -76,14 +212,15 @@ namespace clang {
QualType VisitSubstTemplateTypeParmType(const SubstTemplateTypeParmType *T);
QualType VisitTemplateSpecializationType(const TemplateSpecializationType *T);
QualType VisitElaboratedType(const ElaboratedType *T);
- // FIXME: DependentNameType
+ QualType VisitDependentNameType(const DependentNameType *T);
QualType VisitPackExpansionType(const PackExpansionType *T);
- // FIXME: DependentTemplateSpecializationType
+ QualType VisitDependentTemplateSpecializationType(
+ const DependentTemplateSpecializationType *T);
QualType VisitObjCInterfaceType(const ObjCInterfaceType *T);
QualType VisitObjCObjectType(const ObjCObjectType *T);
QualType VisitObjCObjectPointerType(const ObjCObjectPointerType *T);
-
- // Importing declarations
+
+ // Importing declarations
bool ImportDeclParts(NamedDecl *D, DeclContext *&DC,
DeclContext *&LexicalDC, DeclarationName &Name,
NamedDecl *&ToD, SourceLocation &Loc);
@@ -91,22 +228,27 @@ namespace clang {
void ImportDeclarationNameLoc(const DeclarationNameInfo &From,
DeclarationNameInfo& To);
void ImportDeclContext(DeclContext *FromDC, bool ForceImport = false);
+ void ImportImplicitMethods(const CXXRecordDecl *From, CXXRecordDecl *To);
bool ImportCastPath(CastExpr *E, CXXCastPath &Path);
- typedef DesignatedInitExpr::Designator Designator;
+ using Designator = DesignatedInitExpr::Designator;
+
Designator ImportDesignator(const Designator &D);
+ Optional<LambdaCapture> ImportLambdaCapture(const LambdaCapture &From);
- /// \brief What we should import from the definition.
+ /// What we should import from the definition.
enum ImportDefinitionKind {
- /// \brief Import the default subset of the definition, which might be
+ /// Import the default subset of the definition, which might be
/// nothing (if minimal import is set) or might be everything (if minimal
/// import is not set).
IDK_Default,
- /// \brief Import everything.
+
+ /// Import everything.
IDK_Everything,
- /// \brief Import only the bare bones needed to establish a valid
+
+ /// Import only the bare bones needed to establish a valid
/// DeclContext.
IDK_Basic
};
@@ -127,16 +269,33 @@ namespace clang {
bool ImportDefinition(ObjCProtocolDecl *From, ObjCProtocolDecl *To,
ImportDefinitionKind Kind = IDK_Default);
TemplateParameterList *ImportTemplateParameterList(
- TemplateParameterList *Params);
+ TemplateParameterList *Params);
TemplateArgument ImportTemplateArgument(const TemplateArgument &From);
Optional<TemplateArgumentLoc> ImportTemplateArgumentLoc(
const TemplateArgumentLoc &TALoc);
bool ImportTemplateArguments(const TemplateArgument *FromArgs,
unsigned NumFromArgs,
- SmallVectorImpl<TemplateArgument> &ToArgs);
+ SmallVectorImpl<TemplateArgument> &ToArgs);
+
template <typename InContainerTy>
bool ImportTemplateArgumentListInfo(const InContainerTy &Container,
TemplateArgumentListInfo &ToTAInfo);
+
+ template<typename InContainerTy>
+ bool ImportTemplateArgumentListInfo(SourceLocation FromLAngleLoc,
+ SourceLocation FromRAngleLoc,
+ const InContainerTy &Container,
+ TemplateArgumentListInfo &Result);
+
+ using TemplateArgsTy = SmallVector<TemplateArgument, 8>;
+ using OptionalTemplateArgsTy = Optional<TemplateArgsTy>;
+ std::tuple<FunctionTemplateDecl *, OptionalTemplateArgsTy>
+ ImportFunctionTemplateWithTemplateArgsFromSpecialization(
+ FunctionDecl *FromFD);
+
+ bool ImportTemplateInformation(FunctionDecl *FromFD, FunctionDecl *ToFD);
+
+ bool IsStructuralMatch(Decl *From, Decl *To, bool Complain);
bool IsStructuralMatch(RecordDecl *FromRecord, RecordDecl *ToRecord,
bool Complain = true);
bool IsStructuralMatch(VarDecl *FromVar, VarDecl *ToVar,
@@ -145,6 +304,7 @@ namespace clang {
bool IsStructuralMatch(EnumConstantDecl *FromEC, EnumConstantDecl *ToEC);
bool IsStructuralMatch(FunctionTemplateDecl *From,
FunctionTemplateDecl *To);
+ bool IsStructuralMatch(FunctionDecl *From, FunctionDecl *To);
bool IsStructuralMatch(ClassTemplateDecl *From, ClassTemplateDecl *To);
bool IsStructuralMatch(VarTemplateDecl *From, VarTemplateDecl *To);
Decl *VisitDecl(Decl *D);
@@ -185,7 +345,6 @@ namespace clang {
Decl *VisitUnresolvedUsingValueDecl(UnresolvedUsingValueDecl *D);
Decl *VisitUnresolvedUsingTypenameDecl(UnresolvedUsingTypenameDecl *D);
-
ObjCTypeParamList *ImportObjCTypeParamList(ObjCTypeParamList *list);
Decl *VisitObjCInterfaceDecl(ObjCInterfaceDecl *D);
Decl *VisitObjCCategoryImplDecl(ObjCCategoryImplDecl *D);
@@ -282,29 +441,38 @@ namespace clang {
Expr *VisitCXXTemporaryObjectExpr(CXXTemporaryObjectExpr *CE);
Expr *VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E);
Expr *VisitPackExpansionExpr(PackExpansionExpr *E);
+ Expr *VisitSizeOfPackExpr(SizeOfPackExpr *E);
Expr *VisitCXXNewExpr(CXXNewExpr *CE);
Expr *VisitCXXDeleteExpr(CXXDeleteExpr *E);
Expr *VisitCXXConstructExpr(CXXConstructExpr *E);
Expr *VisitCXXMemberCallExpr(CXXMemberCallExpr *E);
Expr *VisitCXXDependentScopeMemberExpr(CXXDependentScopeMemberExpr *E);
+ Expr *VisitDependentScopeDeclRefExpr(DependentScopeDeclRefExpr *E);
+ Expr *VisitCXXUnresolvedConstructExpr(CXXUnresolvedConstructExpr *CE);
+ Expr *VisitUnresolvedLookupExpr(UnresolvedLookupExpr *E);
+ Expr *VisitUnresolvedMemberExpr(UnresolvedMemberExpr *E);
Expr *VisitExprWithCleanups(ExprWithCleanups *EWC);
Expr *VisitCXXThisExpr(CXXThisExpr *E);
Expr *VisitCXXBoolLiteralExpr(CXXBoolLiteralExpr *E);
Expr *VisitCXXPseudoDestructorExpr(CXXPseudoDestructorExpr *E);
Expr *VisitMemberExpr(MemberExpr *E);
Expr *VisitCallExpr(CallExpr *E);
+ Expr *VisitLambdaExpr(LambdaExpr *LE);
Expr *VisitInitListExpr(InitListExpr *E);
+ Expr *VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E);
+ Expr *VisitCXXInheritedCtorInitExpr(CXXInheritedCtorInitExpr *E);
Expr *VisitArrayInitLoopExpr(ArrayInitLoopExpr *E);
Expr *VisitArrayInitIndexExpr(ArrayInitIndexExpr *E);
Expr *VisitCXXDefaultInitExpr(CXXDefaultInitExpr *E);
Expr *VisitCXXNamedCastExpr(CXXNamedCastExpr *E);
Expr *VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E);
Expr *VisitTypeTraitExpr(TypeTraitExpr *E);
-
+ Expr *VisitCXXTypeidExpr(CXXTypeidExpr *E);
template<typename IIter, typename OIter>
void ImportArray(IIter Ibegin, IIter Iend, OIter Obegin) {
- typedef typename std::remove_reference<decltype(*Obegin)>::type ItemT;
+ using ItemT = typename std::remove_reference<decltype(*Obegin)>::type;
+
ASTImporter &ImporterRef = Importer;
std::transform(Ibegin, Iend, Obegin,
[&ImporterRef](ItemT From) -> ItemT {
@@ -314,13 +482,13 @@ namespace clang {
template<typename IIter, typename OIter>
bool ImportArrayChecked(IIter Ibegin, IIter Iend, OIter Obegin) {
- typedef typename std::remove_reference<decltype(**Obegin)>::type ItemT;
+ using ItemT = typename std::remove_reference<decltype(**Obegin)>::type;
+
ASTImporter &ImporterRef = Importer;
bool Failed = false;
std::transform(Ibegin, Iend, Obegin,
[&ImporterRef, &Failed](ItemT *From) -> ItemT * {
- ItemT *To = cast_or_null<ItemT>(
- ImporterRef.Import(From));
+ auto *To = cast_or_null<ItemT>(ImporterRef.Import(From));
if (!To && From)
Failed = true;
return To;
@@ -342,9 +510,58 @@ namespace clang {
// Importing overrides.
void ImportOverrides(CXXMethodDecl *ToMethod, CXXMethodDecl *FromMethod);
+
+ FunctionDecl *FindFunctionTemplateSpecialization(FunctionDecl *FromFD);
};
+
+template <typename InContainerTy>
+bool ASTNodeImporter::ImportTemplateArgumentListInfo(
+ SourceLocation FromLAngleLoc, SourceLocation FromRAngleLoc,
+ const InContainerTy &Container, TemplateArgumentListInfo &Result) {
+ TemplateArgumentListInfo ToTAInfo(Importer.Import(FromLAngleLoc),
+ Importer.Import(FromRAngleLoc));
+ if (ImportTemplateArgumentListInfo(Container, ToTAInfo))
+ return true;
+ Result = ToTAInfo;
+ return false;
+}
+
+template <>
+bool ASTNodeImporter::ImportTemplateArgumentListInfo<TemplateArgumentListInfo>(
+ const TemplateArgumentListInfo &From, TemplateArgumentListInfo &Result) {
+ return ImportTemplateArgumentListInfo(
+ From.getLAngleLoc(), From.getRAngleLoc(), From.arguments(), Result);
+}
+
+template <>
+bool ASTNodeImporter::ImportTemplateArgumentListInfo<
+ ASTTemplateArgumentListInfo>(const ASTTemplateArgumentListInfo &From,
+ TemplateArgumentListInfo &Result) {
+ return ImportTemplateArgumentListInfo(From.LAngleLoc, From.RAngleLoc,
+ From.arguments(), Result);
+}
+
+std::tuple<FunctionTemplateDecl *, ASTNodeImporter::OptionalTemplateArgsTy>
+ASTNodeImporter::ImportFunctionTemplateWithTemplateArgsFromSpecialization(
+ FunctionDecl *FromFD) {
+ assert(FromFD->getTemplatedKind() ==
+ FunctionDecl::TK_FunctionTemplateSpecialization);
+ auto *FTSInfo = FromFD->getTemplateSpecializationInfo();
+ auto *Template = cast_or_null<FunctionTemplateDecl>(
+ Importer.Import(FTSInfo->getTemplate()));
+
+ // Import template arguments.
+ auto TemplArgs = FTSInfo->TemplateArguments->asArray();
+ TemplateArgsTy ToTemplArgs;
+ if (ImportTemplateArguments(TemplArgs.data(), TemplArgs.size(),
+ ToTemplArgs)) // Error during import.
+ return std::make_tuple(Template, OptionalTemplateArgsTy());
+
+ return std::make_tuple(Template, ToTemplArgs);
}
+} // namespace clang
+
//----------------------------------------------------------------------------
// Import Types
//----------------------------------------------------------------------------
@@ -354,13 +571,13 @@ using namespace clang;
QualType ASTNodeImporter::VisitType(const Type *T) {
Importer.FromDiag(SourceLocation(), diag::err_unsupported_ast_node)
<< T->getTypeClassName();
- return QualType();
+ return {};
}
QualType ASTNodeImporter::VisitAtomicType(const AtomicType *T){
QualType UnderlyingType = Importer.Import(T->getValueType());
if(UnderlyingType.isNull())
- return QualType();
+ return {};
return Importer.getToContext().getAtomicType(UnderlyingType);
}
@@ -413,7 +630,7 @@ QualType ASTNodeImporter::VisitBuiltinType(const BuiltinType *T) {
QualType ASTNodeImporter::VisitDecayedType(const DecayedType *T) {
QualType OrigT = Importer.Import(T->getOriginalType());
if (OrigT.isNull())
- return QualType();
+ return {};
return Importer.getToContext().getDecayedType(OrigT);
}
@@ -421,7 +638,7 @@ QualType ASTNodeImporter::VisitDecayedType(const DecayedType *T) {
QualType ASTNodeImporter::VisitComplexType(const ComplexType *T) {
QualType ToElementType = Importer.Import(T->getElementType());
if (ToElementType.isNull())
- return QualType();
+ return {};
return Importer.getToContext().getComplexType(ToElementType);
}
@@ -429,7 +646,7 @@ QualType ASTNodeImporter::VisitComplexType(const ComplexType *T) {
QualType ASTNodeImporter::VisitPointerType(const PointerType *T) {
QualType ToPointeeType = Importer.Import(T->getPointeeType());
if (ToPointeeType.isNull())
- return QualType();
+ return {};
return Importer.getToContext().getPointerType(ToPointeeType);
}
@@ -438,7 +655,7 @@ QualType ASTNodeImporter::VisitBlockPointerType(const BlockPointerType *T) {
// FIXME: Check for blocks support in "to" context.
QualType ToPointeeType = Importer.Import(T->getPointeeType());
if (ToPointeeType.isNull())
- return QualType();
+ return {};
return Importer.getToContext().getBlockPointerType(ToPointeeType);
}
@@ -448,7 +665,7 @@ ASTNodeImporter::VisitLValueReferenceType(const LValueReferenceType *T) {
// FIXME: Check for C++ support in "to" context.
QualType ToPointeeType = Importer.Import(T->getPointeeTypeAsWritten());
if (ToPointeeType.isNull())
- return QualType();
+ return {};
return Importer.getToContext().getLValueReferenceType(ToPointeeType);
}
@@ -458,7 +675,7 @@ ASTNodeImporter::VisitRValueReferenceType(const RValueReferenceType *T) {
// FIXME: Check for C++0x support in "to" context.
QualType ToPointeeType = Importer.Import(T->getPointeeTypeAsWritten());
if (ToPointeeType.isNull())
- return QualType();
+ return {};
return Importer.getToContext().getRValueReferenceType(ToPointeeType);
}
@@ -467,7 +684,7 @@ QualType ASTNodeImporter::VisitMemberPointerType(const MemberPointerType *T) {
// FIXME: Check for C++ support in "to" context.
QualType ToPointeeType = Importer.Import(T->getPointeeType());
if (ToPointeeType.isNull())
- return QualType();
+ return {};
QualType ClassType = Importer.Import(QualType(T->getClass(), 0));
return Importer.getToContext().getMemberPointerType(ToPointeeType,
@@ -477,7 +694,7 @@ QualType ASTNodeImporter::VisitMemberPointerType(const MemberPointerType *T) {
QualType ASTNodeImporter::VisitConstantArrayType(const ConstantArrayType *T) {
QualType ToElementType = Importer.Import(T->getElementType());
if (ToElementType.isNull())
- return QualType();
+ return {};
return Importer.getToContext().getConstantArrayType(ToElementType,
T->getSize(),
@@ -489,7 +706,7 @@ QualType
ASTNodeImporter::VisitIncompleteArrayType(const IncompleteArrayType *T) {
QualType ToElementType = Importer.Import(T->getElementType());
if (ToElementType.isNull())
- return QualType();
+ return {};
return Importer.getToContext().getIncompleteArrayType(ToElementType,
T->getSizeModifier(),
@@ -499,11 +716,11 @@ ASTNodeImporter::VisitIncompleteArrayType(const IncompleteArrayType *T) {
QualType ASTNodeImporter::VisitVariableArrayType(const VariableArrayType *T) {
QualType ToElementType = Importer.Import(T->getElementType());
if (ToElementType.isNull())
- return QualType();
+ return {};
Expr *Size = Importer.Import(T->getSizeExpr());
if (!Size)
- return QualType();
+ return {};
SourceRange Brackets = Importer.Import(T->getBracketsRange());
return Importer.getToContext().getVariableArrayType(ToElementType, Size,
@@ -512,10 +729,28 @@ QualType ASTNodeImporter::VisitVariableArrayType(const VariableArrayType *T) {
Brackets);
}
+QualType ASTNodeImporter::VisitDependentSizedArrayType(
+ const DependentSizedArrayType *T) {
+ QualType ToElementType = Importer.Import(T->getElementType());
+ if (ToElementType.isNull())
+ return {};
+
+ // SizeExpr may be null if size is not specified directly.
+ // For example, 'int a[]'.
+ Expr *Size = Importer.Import(T->getSizeExpr());
+ if (!Size && T->getSizeExpr())
+ return {};
+
+ SourceRange Brackets = Importer.Import(T->getBracketsRange());
+ return Importer.getToContext().getDependentSizedArrayType(
+ ToElementType, Size, T->getSizeModifier(), T->getIndexTypeCVRQualifiers(),
+ Brackets);
+}
+
QualType ASTNodeImporter::VisitVectorType(const VectorType *T) {
QualType ToElementType = Importer.Import(T->getElementType());
if (ToElementType.isNull())
- return QualType();
+ return {};
return Importer.getToContext().getVectorType(ToElementType,
T->getNumElements(),
@@ -525,7 +760,7 @@ QualType ASTNodeImporter::VisitVectorType(const VectorType *T) {
QualType ASTNodeImporter::VisitExtVectorType(const ExtVectorType *T) {
QualType ToElementType = Importer.Import(T->getElementType());
if (ToElementType.isNull())
- return QualType();
+ return {};
return Importer.getToContext().getExtVectorType(ToElementType,
T->getNumElements());
@@ -537,7 +772,7 @@ ASTNodeImporter::VisitFunctionNoProtoType(const FunctionNoProtoType *T) {
// into C++? Should we make it variadic?
QualType ToResultType = Importer.Import(T->getReturnType());
if (ToResultType.isNull())
- return QualType();
+ return {};
return Importer.getToContext().getFunctionNoProtoType(ToResultType,
T->getExtInfo());
@@ -546,14 +781,14 @@ ASTNodeImporter::VisitFunctionNoProtoType(const FunctionNoProtoType *T) {
QualType ASTNodeImporter::VisitFunctionProtoType(const FunctionProtoType *T) {
QualType ToResultType = Importer.Import(T->getReturnType());
if (ToResultType.isNull())
- return QualType();
+ return {};
// Import argument types
SmallVector<QualType, 4> ArgTypes;
for (const auto &A : T->param_types()) {
QualType ArgType = Importer.Import(A);
if (ArgType.isNull())
- return QualType();
+ return {};
ArgTypes.push_back(ArgType);
}
@@ -562,7 +797,7 @@ QualType ASTNodeImporter::VisitFunctionProtoType(const FunctionProtoType *T) {
for (const auto &E : T->exceptions()) {
QualType ExceptionType = Importer.Import(E);
if (ExceptionType.isNull())
- return QualType();
+ return {};
ExceptionTypes.push_back(ExceptionType);
}
@@ -588,16 +823,16 @@ QualType ASTNodeImporter::VisitFunctionProtoType(const FunctionProtoType *T) {
QualType ASTNodeImporter::VisitUnresolvedUsingType(
const UnresolvedUsingType *T) {
- UnresolvedUsingTypenameDecl *ToD = cast_or_null<UnresolvedUsingTypenameDecl>(
- Importer.Import(T->getDecl()));
+ const auto *ToD =
+ cast_or_null<UnresolvedUsingTypenameDecl>(Importer.Import(T->getDecl()));
if (!ToD)
- return QualType();
+ return {};
- UnresolvedUsingTypenameDecl *ToPrevD =
+ auto *ToPrevD =
cast_or_null<UnresolvedUsingTypenameDecl>(
Importer.Import(T->getDecl()->getPreviousDecl()));
if (!ToPrevD && T->getDecl()->getPreviousDecl())
- return QualType();
+ return {};
return Importer.getToContext().getTypeDeclType(ToD, ToPrevD);
}
@@ -605,16 +840,16 @@ QualType ASTNodeImporter::VisitUnresolvedUsingType(
QualType ASTNodeImporter::VisitParenType(const ParenType *T) {
QualType ToInnerType = Importer.Import(T->getInnerType());
if (ToInnerType.isNull())
- return QualType();
+ return {};
return Importer.getToContext().getParenType(ToInnerType);
}
QualType ASTNodeImporter::VisitTypedefType(const TypedefType *T) {
- TypedefNameDecl *ToDecl
- = dyn_cast_or_null<TypedefNameDecl>(Importer.Import(T->getDecl()));
+ auto *ToDecl =
+ dyn_cast_or_null<TypedefNameDecl>(Importer.Import(T->getDecl()));
if (!ToDecl)
- return QualType();
+ return {};
return Importer.getToContext().getTypeDeclType(ToDecl);
}
@@ -622,7 +857,7 @@ QualType ASTNodeImporter::VisitTypedefType(const TypedefType *T) {
QualType ASTNodeImporter::VisitTypeOfExprType(const TypeOfExprType *T) {
Expr *ToExpr = Importer.Import(T->getUnderlyingExpr());
if (!ToExpr)
- return QualType();
+ return {};
return Importer.getToContext().getTypeOfExprType(ToExpr);
}
@@ -630,7 +865,7 @@ QualType ASTNodeImporter::VisitTypeOfExprType(const TypeOfExprType *T) {
QualType ASTNodeImporter::VisitTypeOfType(const TypeOfType *T) {
QualType ToUnderlyingType = Importer.Import(T->getUnderlyingType());
if (ToUnderlyingType.isNull())
- return QualType();
+ return {};
return Importer.getToContext().getTypeOfType(ToUnderlyingType);
}
@@ -639,11 +874,11 @@ QualType ASTNodeImporter::VisitDecltypeType(const DecltypeType *T) {
// FIXME: Make sure that the "to" context supports C++0x!
Expr *ToExpr = Importer.Import(T->getUnderlyingExpr());
if (!ToExpr)
- return QualType();
+ return {};
QualType UnderlyingType = Importer.Import(T->getUnderlyingType());
if (UnderlyingType.isNull())
- return QualType();
+ return {};
return Importer.getToContext().getDecltypeType(ToExpr, UnderlyingType);
}
@@ -652,7 +887,7 @@ QualType ASTNodeImporter::VisitUnaryTransformType(const UnaryTransformType *T) {
QualType ToBaseType = Importer.Import(T->getBaseType());
QualType ToUnderlyingType = Importer.Import(T->getUnderlyingType());
if (ToBaseType.isNull() || ToUnderlyingType.isNull())
- return QualType();
+ return {};
return Importer.getToContext().getUnaryTransformType(ToBaseType,
ToUnderlyingType,
@@ -666,7 +901,7 @@ QualType ASTNodeImporter::VisitAutoType(const AutoType *T) {
if (!FromDeduced.isNull()) {
ToDeduced = Importer.Import(FromDeduced);
if (ToDeduced.isNull())
- return QualType();
+ return {};
}
return Importer.getToContext().getAutoType(ToDeduced, T->getKeyword(),
@@ -675,13 +910,13 @@ QualType ASTNodeImporter::VisitAutoType(const AutoType *T) {
QualType ASTNodeImporter::VisitInjectedClassNameType(
const InjectedClassNameType *T) {
- CXXRecordDecl *D = cast_or_null<CXXRecordDecl>(Importer.Import(T->getDecl()));
+ auto *D = cast_or_null<CXXRecordDecl>(Importer.Import(T->getDecl()));
if (!D)
- return QualType();
+ return {};
QualType InjType = Importer.Import(T->getInjectedSpecializationType());
if (InjType.isNull())
- return QualType();
+ return {};
// FIXME: ASTContext::getInjectedClassNameType is not suitable for AST reading
// See comments in InjectedClassNameType definition for details
@@ -696,19 +931,17 @@ QualType ASTNodeImporter::VisitInjectedClassNameType(
}
QualType ASTNodeImporter::VisitRecordType(const RecordType *T) {
- RecordDecl *ToDecl
- = dyn_cast_or_null<RecordDecl>(Importer.Import(T->getDecl()));
+ auto *ToDecl = dyn_cast_or_null<RecordDecl>(Importer.Import(T->getDecl()));
if (!ToDecl)
- return QualType();
+ return {};
return Importer.getToContext().getTagDeclType(ToDecl);
}
QualType ASTNodeImporter::VisitEnumType(const EnumType *T) {
- EnumDecl *ToDecl
- = dyn_cast_or_null<EnumDecl>(Importer.Import(T->getDecl()));
+ auto *ToDecl = dyn_cast_or_null<EnumDecl>(Importer.Import(T->getDecl()));
if (!ToDecl)
- return QualType();
+ return {};
return Importer.getToContext().getTagDeclType(ToDecl);
}
@@ -722,25 +955,24 @@ QualType ASTNodeImporter::VisitAttributedType(const AttributedType *T) {
if (!FromModifiedType.isNull()) {
ToModifiedType = Importer.Import(FromModifiedType);
if (ToModifiedType.isNull())
- return QualType();
+ return {};
}
if (!FromEquivalentType.isNull()) {
ToEquivalentType = Importer.Import(FromEquivalentType);
if (ToEquivalentType.isNull())
- return QualType();
+ return {};
}
return Importer.getToContext().getAttributedType(T->getAttrKind(),
ToModifiedType, ToEquivalentType);
}
-
QualType ASTNodeImporter::VisitTemplateTypeParmType(
const TemplateTypeParmType *T) {
- TemplateTypeParmDecl *ParmDecl =
+ auto *ParmDecl =
cast_or_null<TemplateTypeParmDecl>(Importer.Import(T->getDecl()));
if (!ParmDecl && T->getDecl())
- return QualType();
+ return {};
return Importer.getToContext().getTemplateTypeParmType(
T->getDepth(), T->getIndex(), T->isParameterPack(), ParmDecl);
@@ -748,15 +980,15 @@ QualType ASTNodeImporter::VisitTemplateTypeParmType(
QualType ASTNodeImporter::VisitSubstTemplateTypeParmType(
const SubstTemplateTypeParmType *T) {
- const TemplateTypeParmType *Replaced =
+ const auto *Replaced =
cast_or_null<TemplateTypeParmType>(Importer.Import(
QualType(T->getReplacedParameter(), 0)).getTypePtr());
if (!Replaced)
- return QualType();
+ return {};
QualType Replacement = Importer.Import(T->getReplacementType());
if (Replacement.isNull())
- return QualType();
+ return {};
Replacement = Replacement.getCanonicalType();
return Importer.getToContext().getSubstTemplateTypeParmType(
@@ -767,11 +999,11 @@ QualType ASTNodeImporter::VisitTemplateSpecializationType(
const TemplateSpecializationType *T) {
TemplateName ToTemplate = Importer.Import(T->getTemplateName());
if (ToTemplate.isNull())
- return QualType();
+ return {};
SmallVector<TemplateArgument, 2> ToTemplateArgs;
if (ImportTemplateArguments(T->getArgs(), T->getNumArgs(), ToTemplateArgs))
- return QualType();
+ return {};
QualType ToCanonType;
if (!QualType(T, 0).isCanonical()) {
@@ -779,7 +1011,7 @@ QualType ASTNodeImporter::VisitTemplateSpecializationType(
= Importer.getFromContext().getCanonicalType(QualType(T, 0));
ToCanonType =Importer.Import(FromCanonType);
if (ToCanonType.isNull())
- return QualType();
+ return {};
}
return Importer.getToContext().getTemplateSpecializationType(ToTemplate,
ToTemplateArgs,
@@ -792,31 +1024,75 @@ QualType ASTNodeImporter::VisitElaboratedType(const ElaboratedType *T) {
if (T->getQualifier()) {
ToQualifier = Importer.Import(T->getQualifier());
if (!ToQualifier)
- return QualType();
+ return {};
}
QualType ToNamedType = Importer.Import(T->getNamedType());
if (ToNamedType.isNull())
- return QualType();
+ return {};
+
+ TagDecl *OwnedTagDecl =
+ cast_or_null<TagDecl>(Importer.Import(T->getOwnedTagDecl()));
+ if (!OwnedTagDecl && T->getOwnedTagDecl())
+ return {};
return Importer.getToContext().getElaboratedType(T->getKeyword(),
- ToQualifier, ToNamedType);
+ ToQualifier, ToNamedType,
+ OwnedTagDecl);
}
QualType ASTNodeImporter::VisitPackExpansionType(const PackExpansionType *T) {
QualType Pattern = Importer.Import(T->getPattern());
if (Pattern.isNull())
- return QualType();
+ return {};
return Importer.getToContext().getPackExpansionType(Pattern,
T->getNumExpansions());
}
+QualType ASTNodeImporter::VisitDependentTemplateSpecializationType(
+ const DependentTemplateSpecializationType *T) {
+ NestedNameSpecifier *Qualifier = Importer.Import(T->getQualifier());
+ if (!Qualifier && T->getQualifier())
+ return {};
+
+ IdentifierInfo *Name = Importer.Import(T->getIdentifier());
+ if (!Name && T->getIdentifier())
+ return {};
+
+ SmallVector<TemplateArgument, 2> ToPack;
+ ToPack.reserve(T->getNumArgs());
+ if (ImportTemplateArguments(T->getArgs(), T->getNumArgs(), ToPack))
+ return {};
+
+ return Importer.getToContext().getDependentTemplateSpecializationType(
+ T->getKeyword(), Qualifier, Name, ToPack);
+}
+
+QualType ASTNodeImporter::VisitDependentNameType(const DependentNameType *T) {
+ NestedNameSpecifier *NNS = Importer.Import(T->getQualifier());
+ if (!NNS && T->getQualifier())
+ return QualType();
+
+ IdentifierInfo *Name = Importer.Import(T->getIdentifier());
+ if (!Name && T->getIdentifier())
+ return QualType();
+
+ QualType Canon = (T == T->getCanonicalTypeInternal().getTypePtr())
+ ? QualType()
+ : Importer.Import(T->getCanonicalTypeInternal());
+ if (!Canon.isNull())
+ Canon = Canon.getCanonicalType();
+
+ return Importer.getToContext().getDependentNameType(T->getKeyword(), NNS,
+ Name, Canon);
+}
+
QualType ASTNodeImporter::VisitObjCInterfaceType(const ObjCInterfaceType *T) {
- ObjCInterfaceDecl *Class
- = dyn_cast_or_null<ObjCInterfaceDecl>(Importer.Import(T->getDecl()));
+ auto *Class =
+ dyn_cast_or_null<ObjCInterfaceDecl>(Importer.Import(T->getDecl()));
if (!Class)
- return QualType();
+ return {};
return Importer.getToContext().getObjCInterfaceType(Class);
}
@@ -824,23 +1100,22 @@ QualType ASTNodeImporter::VisitObjCInterfaceType(const ObjCInterfaceType *T) {
QualType ASTNodeImporter::VisitObjCObjectType(const ObjCObjectType *T) {
QualType ToBaseType = Importer.Import(T->getBaseType());
if (ToBaseType.isNull())
- return QualType();
+ return {};
SmallVector<QualType, 4> TypeArgs;
for (auto TypeArg : T->getTypeArgsAsWritten()) {
QualType ImportedTypeArg = Importer.Import(TypeArg);
if (ImportedTypeArg.isNull())
- return QualType();
+ return {};
TypeArgs.push_back(ImportedTypeArg);
}
SmallVector<ObjCProtocolDecl *, 4> Protocols;
for (auto *P : T->quals()) {
- ObjCProtocolDecl *Protocol
- = dyn_cast_or_null<ObjCProtocolDecl>(Importer.Import(P));
+ auto *Protocol = dyn_cast_or_null<ObjCProtocolDecl>(Importer.Import(P));
if (!Protocol)
- return QualType();
+ return {};
Protocols.push_back(Protocol);
}
@@ -853,7 +1128,7 @@ QualType
ASTNodeImporter::VisitObjCObjectPointerType(const ObjCObjectPointerType *T) {
QualType ToPointeeType = Importer.Import(T->getPointeeType());
if (ToPointeeType.isNull())
- return QualType();
+ return {};
return Importer.getToContext().getObjCObjectPointerType(ToPointeeType);
}
@@ -866,8 +1141,26 @@ bool ASTNodeImporter::ImportDeclParts(NamedDecl *D, DeclContext *&DC,
DeclarationName &Name,
NamedDecl *&ToD,
SourceLocation &Loc) {
+ // Check if RecordDecl is in FunctionDecl parameters to avoid infinite loop.
+ // example: int struct_in_proto(struct data_t{int a;int b;} *d);
+ DeclContext *OrigDC = D->getDeclContext();
+ FunctionDecl *FunDecl;
+ if (isa<RecordDecl>(D) && (FunDecl = dyn_cast<FunctionDecl>(OrigDC)) &&
+ FunDecl->hasBody()) {
+ SourceRange RecR = D->getSourceRange();
+ SourceRange BodyR = FunDecl->getBody()->getSourceRange();
+ // If RecordDecl is not in Body (it is a param), we bail out.
+ if (RecR.isValid() && BodyR.isValid() &&
+ (RecR.getBegin() < BodyR.getBegin() ||
+ BodyR.getEnd() < RecR.getEnd())) {
+ Importer.FromDiag(D->getLocation(), diag::err_unsupported_ast_node)
+ << D->getDeclKindName();
+ return true;
+ }
+ }
+
// Import the context of this declaration.
- DC = Importer.ImportContext(D->getDeclContext());
+ DC = Importer.ImportContext(OrigDC);
if (!DC)
return true;
@@ -899,8 +1192,8 @@ void ASTNodeImporter::ImportDefinitionIfNeeded(Decl *FromD, Decl *ToD) {
return;
}
- if (RecordDecl *FromRecord = dyn_cast<RecordDecl>(FromD)) {
- if (RecordDecl *ToRecord = cast_or_null<RecordDecl>(ToD)) {
+ if (auto *FromRecord = dyn_cast<RecordDecl>(FromD)) {
+ if (auto *ToRecord = cast_or_null<RecordDecl>(ToD)) {
if (FromRecord->getDefinition() && FromRecord->isCompleteDefinition() && !ToRecord->getDefinition()) {
ImportDefinition(FromRecord, ToRecord);
}
@@ -908,8 +1201,8 @@ void ASTNodeImporter::ImportDefinitionIfNeeded(Decl *FromD, Decl *ToD) {
return;
}
- if (EnumDecl *FromEnum = dyn_cast<EnumDecl>(FromD)) {
- if (EnumDecl *ToEnum = cast_or_null<EnumDecl>(ToD)) {
+ if (auto *FromEnum = dyn_cast<EnumDecl>(FromD)) {
+ if (auto *ToEnum = cast_or_null<EnumDecl>(ToD)) {
if (FromEnum->getDefinition() && !ToEnum->getDefinition()) {
ImportDefinition(FromEnum, ToEnum);
}
@@ -963,6 +1256,27 @@ void ASTNodeImporter::ImportDeclContext(DeclContext *FromDC, bool ForceImport) {
Importer.Import(From);
}
+void ASTNodeImporter::ImportImplicitMethods(
+ const CXXRecordDecl *From, CXXRecordDecl *To) {
+ assert(From->isCompleteDefinition() && To->getDefinition() == To &&
+ "Import implicit methods to or from non-definition");
+
+ for (CXXMethodDecl *FromM : From->methods())
+ if (FromM->isImplicit())
+ Importer.Import(FromM);
+}
+
+static void setTypedefNameForAnonDecl(TagDecl *From, TagDecl *To,
+ ASTImporter &Importer) {
+ if (TypedefNameDecl *FromTypedef = From->getTypedefNameForAnonDecl()) {
+ auto *ToTypedef =
+ cast_or_null<TypedefNameDecl>(Importer.Import(FromTypedef));
+ assert (ToTypedef && "Failed to import typedef of an anonymous structure");
+
+ To->setTypedefNameForAnonDecl(ToTypedef);
+ }
+}
+
bool ASTNodeImporter::ImportDefinition(RecordDecl *From, RecordDecl *To,
ImportDefinitionKind Kind) {
if (To->getDefinition() || To->isBeingDefined()) {
@@ -973,10 +1287,12 @@ bool ASTNodeImporter::ImportDefinition(RecordDecl *From, RecordDecl *To,
}
To->startDefinition();
+
+ setTypedefNameForAnonDecl(From, To, Importer);
// Add base classes.
- if (CXXRecordDecl *ToCXX = dyn_cast<CXXRecordDecl>(To)) {
- CXXRecordDecl *FromCXX = cast<CXXRecordDecl>(From);
+ if (auto *ToCXX = dyn_cast<CXXRecordDecl>(To)) {
+ auto *FromCXX = cast<CXXRecordDecl>(From);
struct CXXRecordDecl::DefinitionData &ToData = ToCXX->data();
struct CXXRecordDecl::DefinitionData &FromData = FromCXX->data();
@@ -988,7 +1304,10 @@ bool ASTNodeImporter::ImportDefinition(RecordDecl *From, RecordDecl *To,
ToData.Polymorphic = FromData.Polymorphic;
ToData.Abstract = FromData.Abstract;
ToData.IsStandardLayout = FromData.IsStandardLayout;
- ToData.HasNoNonEmptyBases = FromData.HasNoNonEmptyBases;
+ ToData.IsCXX11StandardLayout = FromData.IsCXX11StandardLayout;
+ ToData.HasBasesWithFields = FromData.HasBasesWithFields;
+ ToData.HasBasesWithNonStaticDataMembers =
+ FromData.HasBasesWithNonStaticDataMembers;
ToData.HasPrivateFields = FromData.HasPrivateFields;
ToData.HasProtectedFields = FromData.HasProtectedFields;
ToData.HasPublicFields = FromData.HasPublicFields;
@@ -1022,7 +1341,6 @@ bool ASTNodeImporter::ImportDefinition(RecordDecl *From, RecordDecl *To,
= FromData.HasConstexprNonCopyMoveConstructor;
ToData.HasDefaultedDefaultConstructor
= FromData.HasDefaultedDefaultConstructor;
- ToData.CanPassInRegisters = FromData.CanPassInRegisters;
ToData.DefaultedDefaultConstructorIsConstexpr
= FromData.DefaultedDefaultConstructorIsConstexpr;
ToData.HasConstexprDefaultConstructor
@@ -1043,7 +1361,6 @@ bool ASTNodeImporter::ImportDefinition(RecordDecl *From, RecordDecl *To,
= FromData.HasDeclaredCopyConstructorWithConstParam;
ToData.HasDeclaredCopyAssignmentWithConstParam
= FromData.HasDeclaredCopyAssignmentWithConstParam;
- ToData.IsLambda = FromData.IsLambda;
SmallVector<CXXBaseSpecifier *, 4> Bases;
for (const auto &Base1 : FromCXX->bases()) {
@@ -1103,6 +1420,8 @@ bool ASTNodeImporter::ImportDefinition(EnumDecl *From, EnumDecl *To,
To->startDefinition();
+ setTypedefNameForAnonDecl(From, To, Importer);
+
QualType T = Importer.Import(Importer.getFromContext().getTypeDeclType(From));
if (T.isNull())
return true;
@@ -1154,36 +1473,36 @@ ASTNodeImporter::ImportTemplateArgument(const TemplateArgument &From) {
case TemplateArgument::Type: {
QualType ToType = Importer.Import(From.getAsType());
if (ToType.isNull())
- return TemplateArgument();
+ return {};
return TemplateArgument(ToType);
}
case TemplateArgument::Integral: {
QualType ToType = Importer.Import(From.getIntegralType());
if (ToType.isNull())
- return TemplateArgument();
+ return {};
return TemplateArgument(From, ToType);
}
case TemplateArgument::Declaration: {
- ValueDecl *To = cast_or_null<ValueDecl>(Importer.Import(From.getAsDecl()));
+ auto *To = cast_or_null<ValueDecl>(Importer.Import(From.getAsDecl()));
QualType ToType = Importer.Import(From.getParamTypeForDecl());
if (!To || ToType.isNull())
- return TemplateArgument();
+ return {};
return TemplateArgument(To, ToType);
}
case TemplateArgument::NullPtr: {
QualType ToType = Importer.Import(From.getNullPtrType());
if (ToType.isNull())
- return TemplateArgument();
+ return {};
return TemplateArgument(ToType, /*isNullPtr*/true);
}
case TemplateArgument::Template: {
TemplateName ToTemplate = Importer.Import(From.getAsTemplate());
if (ToTemplate.isNull())
- return TemplateArgument();
+ return {};
return TemplateArgument(ToTemplate);
}
@@ -1192,7 +1511,7 @@ ASTNodeImporter::ImportTemplateArgument(const TemplateArgument &From) {
TemplateName ToTemplate
= Importer.Import(From.getAsTemplateOrTemplatePattern());
if (ToTemplate.isNull())
- return TemplateArgument();
+ return {};
return TemplateArgument(ToTemplate, From.getNumTemplateExpansions());
}
@@ -1206,7 +1525,7 @@ ASTNodeImporter::ImportTemplateArgument(const TemplateArgument &From) {
SmallVector<TemplateArgument, 2> ToPack;
ToPack.reserve(From.pack_size());
if (ImportTemplateArguments(From.pack_begin(), From.pack_size(), ToPack))
- return TemplateArgument();
+ return {};
return TemplateArgument(
llvm::makeArrayRef(ToPack).copy(Importer.getToContext()));
@@ -1254,6 +1573,9 @@ bool ASTNodeImporter::ImportTemplateArguments(const TemplateArgument *FromArgs,
return false;
}
+// We cannot use Optional<> pattern here and below because
+// TemplateArgumentListInfo's operator new is declared as deleted so it cannot
+// be stored in Optional.
template <typename InContainerTy>
bool ASTNodeImporter::ImportTemplateArgumentListInfo(
const InContainerTy &Container, TemplateArgumentListInfo &ToTAInfo) {
@@ -1266,13 +1588,27 @@ bool ASTNodeImporter::ImportTemplateArgumentListInfo(
return false;
}
-bool ASTNodeImporter::IsStructuralMatch(RecordDecl *FromRecord,
+static StructuralEquivalenceKind
+getStructuralEquivalenceKind(const ASTImporter &Importer) {
+ return Importer.isMinimalImport() ? StructuralEquivalenceKind::Minimal
+ : StructuralEquivalenceKind::Default;
+}
+
+bool ASTNodeImporter::IsStructuralMatch(Decl *From, Decl *To, bool Complain) {
+ StructuralEquivalenceContext Ctx(
+ Importer.getFromContext(), Importer.getToContext(),
+ Importer.getNonEquivalentDecls(), getStructuralEquivalenceKind(Importer),
+ false, Complain);
+ return Ctx.IsEquivalent(From, To);
+}
+
+bool ASTNodeImporter::IsStructuralMatch(RecordDecl *FromRecord,
RecordDecl *ToRecord, bool Complain) {
// Eliminate a potential failure point where we attempt to re-import
// something we're trying to import while completing ToRecord.
Decl *ToOrigin = Importer.GetOriginalDecl(ToRecord);
if (ToOrigin) {
- RecordDecl *ToOriginRecord = dyn_cast<RecordDecl>(ToOrigin);
+ auto *ToOriginRecord = dyn_cast<RecordDecl>(ToOrigin);
if (ToOriginRecord)
ToRecord = ToOriginRecord;
}
@@ -1280,36 +1616,46 @@ bool ASTNodeImporter::IsStructuralMatch(RecordDecl *FromRecord,
StructuralEquivalenceContext Ctx(Importer.getFromContext(),
ToRecord->getASTContext(),
Importer.getNonEquivalentDecls(),
+ getStructuralEquivalenceKind(Importer),
false, Complain);
- return Ctx.IsStructurallyEquivalent(FromRecord, ToRecord);
+ return Ctx.IsEquivalent(FromRecord, ToRecord);
}
bool ASTNodeImporter::IsStructuralMatch(VarDecl *FromVar, VarDecl *ToVar,
bool Complain) {
StructuralEquivalenceContext Ctx(
Importer.getFromContext(), Importer.getToContext(),
- Importer.getNonEquivalentDecls(), false, Complain);
- return Ctx.IsStructurallyEquivalent(FromVar, ToVar);
+ Importer.getNonEquivalentDecls(), getStructuralEquivalenceKind(Importer),
+ false, Complain);
+ return Ctx.IsEquivalent(FromVar, ToVar);
}
bool ASTNodeImporter::IsStructuralMatch(EnumDecl *FromEnum, EnumDecl *ToEnum) {
- StructuralEquivalenceContext Ctx(Importer.getFromContext(),
- Importer.getToContext(),
- Importer.getNonEquivalentDecls());
- return Ctx.IsStructurallyEquivalent(FromEnum, ToEnum);
+ StructuralEquivalenceContext Ctx(
+ Importer.getFromContext(), Importer.getToContext(),
+ Importer.getNonEquivalentDecls(), getStructuralEquivalenceKind(Importer));
+ return Ctx.IsEquivalent(FromEnum, ToEnum);
}
bool ASTNodeImporter::IsStructuralMatch(FunctionTemplateDecl *From,
FunctionTemplateDecl *To) {
StructuralEquivalenceContext Ctx(
Importer.getFromContext(), Importer.getToContext(),
- Importer.getNonEquivalentDecls(), false, false);
- return Ctx.IsStructurallyEquivalent(From, To);
+ Importer.getNonEquivalentDecls(), getStructuralEquivalenceKind(Importer),
+ false, false);
+ return Ctx.IsEquivalent(From, To);
+}
+
+bool ASTNodeImporter::IsStructuralMatch(FunctionDecl *From, FunctionDecl *To) {
+ StructuralEquivalenceContext Ctx(
+ Importer.getFromContext(), Importer.getToContext(),
+ Importer.getNonEquivalentDecls(), getStructuralEquivalenceKind(Importer),
+ false, false);
+ return Ctx.IsEquivalent(From, To);
}
bool ASTNodeImporter::IsStructuralMatch(EnumConstantDecl *FromEC,
- EnumConstantDecl *ToEC)
-{
+ EnumConstantDecl *ToEC) {
const llvm::APSInt &FromVal = FromEC->getInitVal();
const llvm::APSInt &ToVal = ToEC->getInitVal();
@@ -1322,16 +1668,18 @@ bool ASTNodeImporter::IsStructuralMatch(ClassTemplateDecl *From,
ClassTemplateDecl *To) {
StructuralEquivalenceContext Ctx(Importer.getFromContext(),
Importer.getToContext(),
- Importer.getNonEquivalentDecls());
- return Ctx.IsStructurallyEquivalent(From, To);
+ Importer.getNonEquivalentDecls(),
+ getStructuralEquivalenceKind(Importer));
+ return Ctx.IsEquivalent(From, To);
}
bool ASTNodeImporter::IsStructuralMatch(VarTemplateDecl *From,
VarTemplateDecl *To) {
StructuralEquivalenceContext Ctx(Importer.getFromContext(),
Importer.getToContext(),
- Importer.getNonEquivalentDecls());
- return Ctx.IsStructurallyEquivalent(From, To);
+ Importer.getNonEquivalentDecls(),
+ getStructuralEquivalenceKind(Importer));
+ return Ctx.IsEquivalent(From, To);
}
Decl *ASTNodeImporter::VisitDecl(Decl *D) {
@@ -1356,9 +1704,11 @@ Decl *ASTNodeImporter::VisitEmptyDecl(EmptyDecl *D) {
// Import the location of this declaration.
SourceLocation Loc = Importer.Import(D->getLocation());
- EmptyDecl *ToD = EmptyDecl::Create(Importer.getToContext(), DC, Loc);
+ EmptyDecl *ToD;
+ if (GetImportedOrCreateDecl(ToD, D, Importer.getToContext(), DC, Loc))
+ return ToD;
+
ToD->setLexicalDeclContext(LexicalDC);
- Importer.Imported(D, ToD);
LexicalDC->addDeclInternal(ToD);
return ToD;
}
@@ -1367,13 +1717,12 @@ Decl *ASTNodeImporter::VisitTranslationUnitDecl(TranslationUnitDecl *D) {
TranslationUnitDecl *ToD =
Importer.getToContext().getTranslationUnitDecl();
- Importer.Imported(D, ToD);
+ Importer.MapImported(D, ToD);
return ToD;
}
Decl *ASTNodeImporter::VisitAccessSpecDecl(AccessSpecDecl *D) {
-
SourceLocation Loc = Importer.Import(D->getLocation());
SourceLocation ColonLoc = Importer.Import(D->getColonLoc());
@@ -1382,19 +1731,17 @@ Decl *ASTNodeImporter::VisitAccessSpecDecl(AccessSpecDecl *D) {
if (!DC)
return nullptr;
- AccessSpecDecl *accessSpecDecl
- = AccessSpecDecl::Create(Importer.getToContext(), D->getAccess(),
- DC, Loc, ColonLoc);
-
- if (!accessSpecDecl)
- return nullptr;
+ AccessSpecDecl *ToD;
+ if (GetImportedOrCreateDecl(ToD, D, Importer.getToContext(), D->getAccess(),
+ DC, Loc, ColonLoc))
+ return ToD;
// Lexical DeclContext and Semantic DeclContext
// is always the same for the accessSpec.
- accessSpecDecl->setLexicalDeclContext(DC);
- DC->addDeclInternal(accessSpecDecl);
+ ToD->setLexicalDeclContext(DC);
+ DC->addDeclInternal(ToD);
- return accessSpecDecl;
+ return ToD;
}
Decl *ASTNodeImporter::VisitStaticAssertDecl(StaticAssertDecl *D) {
@@ -1412,17 +1759,18 @@ Decl *ASTNodeImporter::VisitStaticAssertDecl(StaticAssertDecl *D) {
return nullptr;
StringLiteral *FromMsg = D->getMessage();
- StringLiteral *ToMsg = cast_or_null<StringLiteral>(Importer.Import(FromMsg));
+ auto *ToMsg = cast_or_null<StringLiteral>(Importer.Import(FromMsg));
if (!ToMsg && FromMsg)
return nullptr;
- StaticAssertDecl *ToD = StaticAssertDecl::Create(
- Importer.getToContext(), DC, Loc, AssertExpr, ToMsg,
- Importer.Import(D->getRParenLoc()), D->isFailed());
+ StaticAssertDecl *ToD;
+ if (GetImportedOrCreateDecl(
+ ToD, D, Importer.getToContext(), DC, Loc, AssertExpr, ToMsg,
+ Importer.Import(D->getRParenLoc()), D->isFailed()))
+ return ToD;
ToD->setLexicalDeclContext(LexicalDC);
LexicalDC->addDeclInternal(ToD);
- Importer.Imported(D, ToD);
return ToD;
}
@@ -1442,7 +1790,7 @@ Decl *ASTNodeImporter::VisitNamespaceDecl(NamespaceDecl *D) {
// This is an anonymous namespace. Adopt an existing anonymous
// namespace if we can.
// FIXME: Not testable.
- if (TranslationUnitDecl *TU = dyn_cast<TranslationUnitDecl>(DC))
+ if (auto *TU = dyn_cast<TranslationUnitDecl>(DC))
MergeWithNamespace = TU->getAnonymousNamespace();
else
MergeWithNamespace = cast<NamespaceDecl>(DC)->getAnonymousNamespace();
@@ -1450,17 +1798,17 @@ Decl *ASTNodeImporter::VisitNamespaceDecl(NamespaceDecl *D) {
SmallVector<NamedDecl *, 4> ConflictingDecls;
SmallVector<NamedDecl *, 2> FoundDecls;
DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
- for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
- if (!FoundDecls[I]->isInIdentifierNamespace(Decl::IDNS_Namespace))
+ for (auto *FoundDecl : FoundDecls) {
+ if (!FoundDecl->isInIdentifierNamespace(Decl::IDNS_Namespace))
continue;
- if (NamespaceDecl *FoundNS = dyn_cast<NamespaceDecl>(FoundDecls[I])) {
+ if (auto *FoundNS = dyn_cast<NamespaceDecl>(FoundDecl)) {
MergeWithNamespace = FoundNS;
ConflictingDecls.clear();
break;
}
- ConflictingDecls.push_back(FoundDecls[I]);
+ ConflictingDecls.push_back(FoundDecl);
}
if (!ConflictingDecls.empty()) {
@@ -1473,24 +1821,24 @@ Decl *ASTNodeImporter::VisitNamespaceDecl(NamespaceDecl *D) {
// Create the "to" namespace, if needed.
NamespaceDecl *ToNamespace = MergeWithNamespace;
if (!ToNamespace) {
- ToNamespace = NamespaceDecl::Create(Importer.getToContext(), DC,
- D->isInline(),
- Importer.Import(D->getLocStart()),
- Loc, Name.getAsIdentifierInfo(),
- /*PrevDecl=*/nullptr);
+ if (GetImportedOrCreateDecl(
+ ToNamespace, D, Importer.getToContext(), DC, D->isInline(),
+ Importer.Import(D->getLocStart()), Loc, Name.getAsIdentifierInfo(),
+ /*PrevDecl=*/nullptr))
+ return ToNamespace;
ToNamespace->setLexicalDeclContext(LexicalDC);
LexicalDC->addDeclInternal(ToNamespace);
// If this is an anonymous namespace, register it as the anonymous
// namespace within its context.
if (!Name) {
- if (TranslationUnitDecl *TU = dyn_cast<TranslationUnitDecl>(DC))
+ if (auto *TU = dyn_cast<TranslationUnitDecl>(DC))
TU->setAnonymousNamespace(ToNamespace);
else
cast<NamespaceDecl>(DC)->setAnonymousNamespace(ToNamespace);
}
}
- Importer.Imported(D, ToNamespace);
+ Importer.MapImported(D, ToNamespace);
ImportDeclContext(D);
@@ -1510,8 +1858,8 @@ Decl *ASTNodeImporter::VisitNamespaceAliasDecl(NamespaceAliasDecl *D) {
// NOTE: No conflict resolution is done for namespace aliases now.
- NamespaceDecl *TargetDecl = cast_or_null<NamespaceDecl>(
- Importer.Import(D->getNamespace()));
+ auto *TargetDecl = cast_or_null<NamespaceDecl>(
+ Importer.Import(D->getNamespace()));
if (!TargetDecl)
return nullptr;
@@ -1523,13 +1871,15 @@ Decl *ASTNodeImporter::VisitNamespaceAliasDecl(NamespaceAliasDecl *D) {
if (D->getQualifierLoc() && !ToQLoc)
return nullptr;
- NamespaceAliasDecl *ToD = NamespaceAliasDecl::Create(
- Importer.getToContext(), DC, Importer.Import(D->getNamespaceLoc()),
- Importer.Import(D->getAliasLoc()), ToII, ToQLoc,
- Importer.Import(D->getTargetNameLoc()), TargetDecl);
+ NamespaceAliasDecl *ToD;
+ if (GetImportedOrCreateDecl(ToD, D, Importer.getToContext(), DC,
+ Importer.Import(D->getNamespaceLoc()),
+ Importer.Import(D->getAliasLoc()), ToII, ToQLoc,
+ Importer.Import(D->getTargetNameLoc()),
+ TargetDecl))
+ return ToD;
ToD->setLexicalDeclContext(LexicalDC);
- Importer.Imported(D, ToD);
LexicalDC->addDeclInternal(ToD);
return ToD;
@@ -1554,17 +1904,16 @@ Decl *ASTNodeImporter::VisitTypedefNameDecl(TypedefNameDecl *D, bool IsAlias) {
unsigned IDNS = Decl::IDNS_Ordinary;
SmallVector<NamedDecl *, 2> FoundDecls;
DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
- for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
- if (!FoundDecls[I]->isInIdentifierNamespace(IDNS))
+ for (auto *FoundDecl : FoundDecls) {
+ if (!FoundDecl->isInIdentifierNamespace(IDNS))
continue;
- if (TypedefNameDecl *FoundTypedef =
- dyn_cast<TypedefNameDecl>(FoundDecls[I])) {
+ if (auto *FoundTypedef = dyn_cast<TypedefNameDecl>(FoundDecl)) {
if (Importer.IsStructurallyEquivalent(D->getUnderlyingType(),
FoundTypedef->getUnderlyingType()))
- return Importer.Imported(D, FoundTypedef);
+ return Importer.MapImported(D, FoundTypedef);
}
- ConflictingDecls.push_back(FoundDecls[I]);
+ ConflictingDecls.push_back(FoundDecl);
}
if (!ConflictingDecls.empty()) {
@@ -1584,22 +1933,25 @@ Decl *ASTNodeImporter::VisitTypedefNameDecl(TypedefNameDecl *D, bool IsAlias) {
// Create the new typedef node.
TypeSourceInfo *TInfo = Importer.Import(D->getTypeSourceInfo());
SourceLocation StartL = Importer.Import(D->getLocStart());
+
TypedefNameDecl *ToTypedef;
- if (IsAlias)
- ToTypedef = TypeAliasDecl::Create(Importer.getToContext(), DC,
- StartL, Loc,
- Name.getAsIdentifierInfo(),
- TInfo);
- else
- ToTypedef = TypedefDecl::Create(Importer.getToContext(), DC,
- StartL, Loc,
- Name.getAsIdentifierInfo(),
- TInfo);
+ if (IsAlias) {
+ if (GetImportedOrCreateDecl<TypeAliasDecl>(
+ ToTypedef, D, Importer.getToContext(), DC, StartL, Loc,
+ Name.getAsIdentifierInfo(), TInfo))
+ return ToTypedef;
+ } else if (GetImportedOrCreateDecl<TypedefDecl>(
+ ToTypedef, D, Importer.getToContext(), DC, StartL, Loc,
+ Name.getAsIdentifierInfo(), TInfo))
+ return ToTypedef;
ToTypedef->setAccess(D->getAccess());
ToTypedef->setLexicalDeclContext(LexicalDC);
- Importer.Imported(D, ToTypedef);
- LexicalDC->addDeclInternal(ToTypedef);
+
+ // Templated declarations should not appear in DeclContext.
+ TypeAliasDecl *FromAlias = IsAlias ? cast<TypeAliasDecl>(D) : nullptr;
+ if (!FromAlias || !FromAlias->getDescribedAliasTemplate())
+ LexicalDC->addDeclInternal(ToTypedef);
return ToTypedef;
}
@@ -1617,11 +1969,11 @@ Decl *ASTNodeImporter::VisitTypeAliasTemplateDecl(TypeAliasTemplateDecl *D) {
DeclContext *DC, *LexicalDC;
DeclarationName Name;
SourceLocation Loc;
- NamedDecl *ToD;
- if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
+ NamedDecl *FoundD;
+ if (ImportDeclParts(D, DC, LexicalDC, Name, FoundD, Loc))
return nullptr;
- if (ToD)
- return ToD;
+ if (FoundD)
+ return FoundD;
// If this typedef is not in block scope, determine whether we've
// seen a typedef with the same name (that we can merge with) or any
@@ -1631,13 +1983,12 @@ Decl *ASTNodeImporter::VisitTypeAliasTemplateDecl(TypeAliasTemplateDecl *D) {
unsigned IDNS = Decl::IDNS_Ordinary;
SmallVector<NamedDecl *, 2> FoundDecls;
DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
- for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
- if (!FoundDecls[I]->isInIdentifierNamespace(IDNS))
+ for (auto *FoundDecl : FoundDecls) {
+ if (!FoundDecl->isInIdentifierNamespace(IDNS))
continue;
- if (auto *FoundAlias =
- dyn_cast<TypeAliasTemplateDecl>(FoundDecls[I]))
- return Importer.Imported(D, FoundAlias);
- ConflictingDecls.push_back(FoundDecls[I]);
+ if (auto *FoundAlias = dyn_cast<TypeAliasTemplateDecl>(FoundDecl))
+ return Importer.MapImported(D, FoundAlias);
+ ConflictingDecls.push_back(FoundDecl);
}
if (!ConflictingDecls.empty()) {
@@ -1654,19 +2005,22 @@ Decl *ASTNodeImporter::VisitTypeAliasTemplateDecl(TypeAliasTemplateDecl *D) {
if (!Params)
return nullptr;
- NamedDecl *TemplDecl = cast_or_null<NamedDecl>(
+ auto *TemplDecl = cast_or_null<TypeAliasDecl>(
Importer.Import(D->getTemplatedDecl()));
if (!TemplDecl)
return nullptr;
- TypeAliasTemplateDecl *ToAlias = TypeAliasTemplateDecl::Create(
- Importer.getToContext(), DC, Loc, Name, Params, TemplDecl);
+ TypeAliasTemplateDecl *ToAlias;
+ if (GetImportedOrCreateDecl(ToAlias, D, Importer.getToContext(), DC, Loc,
+ Name, Params, TemplDecl))
+ return ToAlias;
+
+ TemplDecl->setDescribedAliasTemplate(ToAlias);
ToAlias->setAccess(D->getAccess());
ToAlias->setLexicalDeclContext(LexicalDC);
- Importer.Imported(D, ToAlias);
LexicalDC->addDeclInternal(ToAlias);
- return ToD;
+ return ToAlias;
}
Decl *ASTNodeImporter::VisitLabelDecl(LabelDecl *D) {
@@ -1682,17 +2036,18 @@ Decl *ASTNodeImporter::VisitLabelDecl(LabelDecl *D) {
assert(LexicalDC->isFunctionOrMethod());
- LabelDecl *ToLabel = D->isGnuLocal()
- ? LabelDecl::Create(Importer.getToContext(),
- DC, Importer.Import(D->getLocation()),
- Name.getAsIdentifierInfo(),
- Importer.Import(D->getLocStart()))
- : LabelDecl::Create(Importer.getToContext(),
- DC, Importer.Import(D->getLocation()),
- Name.getAsIdentifierInfo());
- Importer.Imported(D, ToLabel);
-
- LabelStmt *Label = cast_or_null<LabelStmt>(Importer.Import(D->getStmt()));
+ LabelDecl *ToLabel;
+ if (D->isGnuLocal()
+ ? GetImportedOrCreateDecl(ToLabel, D, Importer.getToContext(), DC,
+ Importer.Import(D->getLocation()),
+ Name.getAsIdentifierInfo(),
+ Importer.Import(D->getLocStart()))
+ : GetImportedOrCreateDecl(ToLabel, D, Importer.getToContext(), DC,
+ Importer.Import(D->getLocation()),
+ Name.getAsIdentifierInfo()))
+ return ToLabel;
+
+ auto *Label = cast_or_null<LabelStmt>(Importer.Import(D->getStmt()));
if (!Label)
return nullptr;
@@ -1727,22 +2082,22 @@ Decl *ASTNodeImporter::VisitEnumDecl(EnumDecl *D) {
SmallVector<NamedDecl *, 4> ConflictingDecls;
SmallVector<NamedDecl *, 2> FoundDecls;
DC->getRedeclContext()->localUncachedLookup(SearchName, FoundDecls);
- for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
- if (!FoundDecls[I]->isInIdentifierNamespace(IDNS))
+ for (auto *FoundDecl : FoundDecls) {
+ if (!FoundDecl->isInIdentifierNamespace(IDNS))
continue;
- Decl *Found = FoundDecls[I];
- if (TypedefNameDecl *Typedef = dyn_cast<TypedefNameDecl>(Found)) {
- if (const TagType *Tag = Typedef->getUnderlyingType()->getAs<TagType>())
+ Decl *Found = FoundDecl;
+ if (auto *Typedef = dyn_cast<TypedefNameDecl>(Found)) {
+ if (const auto *Tag = Typedef->getUnderlyingType()->getAs<TagType>())
Found = Tag->getDecl();
}
- if (EnumDecl *FoundEnum = dyn_cast<EnumDecl>(Found)) {
+ if (auto *FoundEnum = dyn_cast<EnumDecl>(Found)) {
if (IsStructuralMatch(D, FoundEnum))
- return Importer.Imported(D, FoundEnum);
+ return Importer.MapImported(D, FoundEnum);
}
- ConflictingDecls.push_back(FoundDecls[I]);
+ ConflictingDecls.push_back(FoundDecl);
}
if (!ConflictingDecls.empty()) {
@@ -1751,18 +2106,19 @@ Decl *ASTNodeImporter::VisitEnumDecl(EnumDecl *D) {
ConflictingDecls.size());
}
}
-
+
// Create the enum declaration.
- EnumDecl *D2 = EnumDecl::Create(Importer.getToContext(), DC,
- Importer.Import(D->getLocStart()),
- Loc, Name.getAsIdentifierInfo(), nullptr,
- D->isScoped(), D->isScopedUsingClassTag(),
- D->isFixed());
+ EnumDecl *D2;
+ if (GetImportedOrCreateDecl(
+ D2, D, Importer.getToContext(), DC, Importer.Import(D->getLocStart()),
+ Loc, Name.getAsIdentifierInfo(), nullptr, D->isScoped(),
+ D->isScopedUsingClassTag(), D->isFixed()))
+ return D2;
+
// Import the qualifier, if any.
D2->setQualifierInfo(Importer.Import(D->getQualifierLoc()));
D2->setAccess(D->getAccess());
D2->setLexicalDeclContext(LexicalDC);
- Importer.Imported(D, D2);
LexicalDC->addDeclInternal(D2);
// Import the integer type.
@@ -1783,14 +2139,20 @@ Decl *ASTNodeImporter::VisitRecordDecl(RecordDecl *D) {
// but this particular declaration is not that definition, import the
// definition and map to that.
TagDecl *Definition = D->getDefinition();
- if (Definition && Definition != D) {
+ if (Definition && Definition != D &&
+ // In contrast to a normal CXXRecordDecl, the implicit
+ // CXXRecordDecl of ClassTemplateSpecializationDecl is its redeclaration.
+ // The definition of the implicit CXXRecordDecl in this case is the
+ // ClassTemplateSpecializationDecl itself. Thus, we start with an extra
+ // condition in order to be able to import the implict Decl.
+ !D->isImplicit()) {
Decl *ImportedDef = Importer.Import(Definition);
if (!ImportedDef)
return nullptr;
- return Importer.Imported(D, ImportedDef);
+ return Importer.MapImported(D, ImportedDef);
}
-
+
// Import the major distinguishing characteristics of this record.
DeclContext *DC, *LexicalDC;
DeclarationName Name;
@@ -1824,30 +2186,27 @@ Decl *ASTNodeImporter::VisitRecordDecl(RecordDecl *D) {
D->getASTContext().getExternalSource()->CompleteType(D);
}
- for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
- if (!FoundDecls[I]->isInIdentifierNamespace(IDNS))
+ for (auto *FoundDecl : FoundDecls) {
+ if (!FoundDecl->isInIdentifierNamespace(IDNS))
continue;
- Decl *Found = FoundDecls[I];
- if (TypedefNameDecl *Typedef = dyn_cast<TypedefNameDecl>(Found)) {
- if (const TagType *Tag = Typedef->getUnderlyingType()->getAs<TagType>())
+ Decl *Found = FoundDecl;
+ if (auto *Typedef = dyn_cast<TypedefNameDecl>(Found)) {
+ if (const auto *Tag = Typedef->getUnderlyingType()->getAs<TagType>())
Found = Tag->getDecl();
}
-
- if (RecordDecl *FoundRecord = dyn_cast<RecordDecl>(Found)) {
- if (D->isAnonymousStructOrUnion() &&
- FoundRecord->isAnonymousStructOrUnion()) {
- // If both anonymous structs/unions are in a record context, make sure
- // they occur in the same location in the context records.
- if (Optional<unsigned> Index1 =
- StructuralEquivalenceContext::findUntaggedStructOrUnionIndex(
- D)) {
- if (Optional<unsigned> Index2 = StructuralEquivalenceContext::
- findUntaggedStructOrUnionIndex(FoundRecord)) {
- if (*Index1 != *Index2)
- continue;
- }
- }
+
+ if (D->getDescribedTemplate()) {
+ if (auto *Template = dyn_cast<ClassTemplateDecl>(Found))
+ Found = Template->getTemplatedDecl();
+ else
+ continue;
+ }
+
+ if (auto *FoundRecord = dyn_cast<RecordDecl>(Found)) {
+ if (!SearchName) {
+ if (!IsStructuralMatch(D, FoundRecord, false))
+ continue;
}
PrevDecl = FoundRecord;
@@ -1861,8 +2220,19 @@ Decl *ASTNodeImporter::VisitRecordDecl(RecordDecl *D) {
// The record types structurally match, or the "from" translation
// unit only had a forward declaration anyway; call it the same
// function.
- // FIXME: For C++, we should also merge methods here.
- return Importer.Imported(D, FoundDef);
+ // FIXME: Structural equivalence check should check for same
+ // user-defined methods.
+ Importer.MapImported(D, FoundDef);
+ if (const auto *DCXX = dyn_cast<CXXRecordDecl>(D)) {
+ auto *FoundCXX = dyn_cast<CXXRecordDecl>(FoundDef);
+ assert(FoundCXX && "Record type mismatch");
+
+ if (D->isCompleteDefinition() && !Importer.isMinimalImport())
+ // FoundDef may not have every implicit method that D has
+ // because implicit methods are created only if they are used.
+ ImportImplicitMethods(DCXX, FoundCXX);
+ }
+ return FoundDef;
}
} else if (!D->isCompleteDefinition()) {
// We have a forward declaration of this type, so adopt that forward
@@ -1889,7 +2259,7 @@ Decl *ASTNodeImporter::VisitRecordDecl(RecordDecl *D) {
}
}
- ConflictingDecls.push_back(FoundDecls[I]);
+ ConflictingDecls.push_back(FoundDecl);
}
if (!ConflictingDecls.empty() && SearchName) {
@@ -1904,53 +2274,80 @@ Decl *ASTNodeImporter::VisitRecordDecl(RecordDecl *D) {
SourceLocation StartLoc = Importer.Import(D->getLocStart());
if (!D2) {
CXXRecordDecl *D2CXX = nullptr;
- if (CXXRecordDecl *DCXX = llvm::dyn_cast<CXXRecordDecl>(D)) {
+ if (auto *DCXX = dyn_cast<CXXRecordDecl>(D)) {
if (DCXX->isLambda()) {
TypeSourceInfo *TInfo = Importer.Import(DCXX->getLambdaTypeInfo());
- D2CXX = CXXRecordDecl::CreateLambda(Importer.getToContext(),
- DC, TInfo, Loc,
- DCXX->isDependentLambda(),
- DCXX->isGenericLambda(),
- DCXX->getLambdaCaptureDefault());
+ if (GetImportedOrCreateSpecialDecl(
+ D2CXX, CXXRecordDecl::CreateLambda, D, Importer.getToContext(),
+ DC, TInfo, Loc, DCXX->isDependentLambda(),
+ DCXX->isGenericLambda(), DCXX->getLambdaCaptureDefault()))
+ return D2CXX;
Decl *CDecl = Importer.Import(DCXX->getLambdaContextDecl());
if (DCXX->getLambdaContextDecl() && !CDecl)
return nullptr;
D2CXX->setLambdaMangling(DCXX->getLambdaManglingNumber(), CDecl);
- } else if (DCXX->isInjectedClassName()) {
- // We have to be careful to do a similar dance to the one in
- // Sema::ActOnStartCXXMemberDeclarations
- CXXRecordDecl *const PrevDecl = nullptr;
- const bool DelayTypeCreation = true;
- D2CXX = CXXRecordDecl::Create(
- Importer.getToContext(), D->getTagKind(), DC, StartLoc, Loc,
- Name.getAsIdentifierInfo(), PrevDecl, DelayTypeCreation);
- Importer.getToContext().getTypeDeclType(
- D2CXX, llvm::dyn_cast<CXXRecordDecl>(DC));
+ } else if (DCXX->isInjectedClassName()) {
+ // We have to be careful to do a similar dance to the one in
+ // Sema::ActOnStartCXXMemberDeclarations
+ CXXRecordDecl *const PrevDecl = nullptr;
+ const bool DelayTypeCreation = true;
+ if (GetImportedOrCreateDecl(D2CXX, D, Importer.getToContext(),
+ D->getTagKind(), DC, StartLoc, Loc,
+ Name.getAsIdentifierInfo(), PrevDecl,
+ DelayTypeCreation))
+ return D2CXX;
+ Importer.getToContext().getTypeDeclType(
+ D2CXX, dyn_cast<CXXRecordDecl>(DC));
} else {
- D2CXX = CXXRecordDecl::Create(Importer.getToContext(),
- D->getTagKind(),
- DC, StartLoc, Loc,
- Name.getAsIdentifierInfo());
+ if (GetImportedOrCreateDecl(D2CXX, D, Importer.getToContext(),
+ D->getTagKind(), DC, StartLoc, Loc,
+ Name.getAsIdentifierInfo(),
+ cast_or_null<CXXRecordDecl>(PrevDecl)))
+ return D2CXX;
}
+
D2 = D2CXX;
D2->setAccess(D->getAccess());
-
- Importer.Imported(D, D2);
+ D2->setLexicalDeclContext(LexicalDC);
+ if (!DCXX->getDescribedClassTemplate() || DCXX->isImplicit())
+ LexicalDC->addDeclInternal(D2);
if (ClassTemplateDecl *FromDescribed =
DCXX->getDescribedClassTemplate()) {
- ClassTemplateDecl *ToDescribed = cast_or_null<ClassTemplateDecl>(
- Importer.Import(FromDescribed));
+ auto *ToDescribed = cast_or_null<ClassTemplateDecl>(
+ Importer.Import(FromDescribed));
if (!ToDescribed)
return nullptr;
D2CXX->setDescribedClassTemplate(ToDescribed);
-
+ if (!DCXX->isInjectedClassName()) {
+ // In a record describing a template the type should be an
+ // InjectedClassNameType (see Sema::CheckClassTemplate). Update the
+ // previously set type to the correct value here (ToDescribed is not
+ // available at record create).
+ // FIXME: The previous type is cleared but not removed from
+ // ASTContext's internal storage.
+ CXXRecordDecl *Injected = nullptr;
+ for (NamedDecl *Found : D2CXX->noload_lookup(Name)) {
+ auto *Record = dyn_cast<CXXRecordDecl>(Found);
+ if (Record && Record->isInjectedClassName()) {
+ Injected = Record;
+ break;
+ }
+ }
+ D2CXX->setTypeForDecl(nullptr);
+ Importer.getToContext().getInjectedClassNameType(D2CXX,
+ ToDescribed->getInjectedClassNameSpecialization());
+ if (Injected) {
+ Injected->setTypeForDecl(nullptr);
+ Importer.getToContext().getTypeDeclType(Injected, D2CXX);
+ }
+ }
} else if (MemberSpecializationInfo *MemberInfo =
DCXX->getMemberSpecializationInfo()) {
TemplateSpecializationKind SK =
MemberInfo->getTemplateSpecializationKind();
CXXRecordDecl *FromInst = DCXX->getInstantiatedFromMemberClass();
- CXXRecordDecl *ToInst =
+ auto *ToInst =
cast_or_null<CXXRecordDecl>(Importer.Import(FromInst));
if (FromInst && !ToInst)
return nullptr;
@@ -1958,24 +2355,21 @@ Decl *ASTNodeImporter::VisitRecordDecl(RecordDecl *D) {
D2CXX->getMemberSpecializationInfo()->setPointOfInstantiation(
Importer.Import(MemberInfo->getPointOfInstantiation()));
}
-
} else {
- D2 = RecordDecl::Create(Importer.getToContext(), D->getTagKind(),
- DC, StartLoc, Loc, Name.getAsIdentifierInfo());
+ if (GetImportedOrCreateDecl(D2, D, Importer.getToContext(),
+ D->getTagKind(), DC, StartLoc, Loc,
+ Name.getAsIdentifierInfo(), PrevDecl))
+ return D2;
+ D2->setLexicalDeclContext(LexicalDC);
+ LexicalDC->addDeclInternal(D2);
}
-
+
D2->setQualifierInfo(Importer.Import(D->getQualifierLoc()));
- D2->setLexicalDeclContext(LexicalDC);
- LexicalDC->addDeclInternal(D2);
if (D->isAnonymousStructOrUnion())
D2->setAnonymousStructOrUnion(true);
- if (PrevDecl) {
- // FIXME: do this for all Redeclarables, not just RecordDecls.
- D2->setPreviousDecl(PrevDecl);
- }
}
-
- Importer.Imported(D, D2);
+
+ Importer.MapImported(D, D2);
if (D->isCompleteDefinition() && ImportDefinition(D, D2, IDK_Default))
return nullptr;
@@ -2005,17 +2399,16 @@ Decl *ASTNodeImporter::VisitEnumConstantDecl(EnumConstantDecl *D) {
unsigned IDNS = Decl::IDNS_Ordinary;
SmallVector<NamedDecl *, 2> FoundDecls;
DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
- for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
- if (!FoundDecls[I]->isInIdentifierNamespace(IDNS))
+ for (auto *FoundDecl : FoundDecls) {
+ if (!FoundDecl->isInIdentifierNamespace(IDNS))
continue;
- if (EnumConstantDecl *FoundEnumConstant
- = dyn_cast<EnumConstantDecl>(FoundDecls[I])) {
+ if (auto *FoundEnumConstant = dyn_cast<EnumConstantDecl>(FoundDecl)) {
if (IsStructuralMatch(D, FoundEnumConstant))
- return Importer.Imported(D, FoundEnumConstant);
+ return Importer.MapImported(D, FoundEnumConstant);
}
- ConflictingDecls.push_back(FoundDecls[I]);
+ ConflictingDecls.push_back(FoundDecl);
}
if (!ConflictingDecls.empty()) {
@@ -2031,18 +2424,120 @@ Decl *ASTNodeImporter::VisitEnumConstantDecl(EnumConstantDecl *D) {
if (D->getInitExpr() && !Init)
return nullptr;
- EnumConstantDecl *ToEnumerator
- = EnumConstantDecl::Create(Importer.getToContext(), cast<EnumDecl>(DC), Loc,
- Name.getAsIdentifierInfo(), T,
- Init, D->getInitVal());
+ EnumConstantDecl *ToEnumerator;
+ if (GetImportedOrCreateDecl(
+ ToEnumerator, D, Importer.getToContext(), cast<EnumDecl>(DC), Loc,
+ Name.getAsIdentifierInfo(), T, Init, D->getInitVal()))
+ return ToEnumerator;
+
ToEnumerator->setAccess(D->getAccess());
ToEnumerator->setLexicalDeclContext(LexicalDC);
- Importer.Imported(D, ToEnumerator);
LexicalDC->addDeclInternal(ToEnumerator);
return ToEnumerator;
}
+bool ASTNodeImporter::ImportTemplateInformation(FunctionDecl *FromFD,
+ FunctionDecl *ToFD) {
+ switch (FromFD->getTemplatedKind()) {
+ case FunctionDecl::TK_NonTemplate:
+ case FunctionDecl::TK_FunctionTemplate:
+ return false;
+
+ case FunctionDecl::TK_MemberSpecialization: {
+ auto *InstFD = cast_or_null<FunctionDecl>(
+ Importer.Import(FromFD->getInstantiatedFromMemberFunction()));
+ if (!InstFD)
+ return true;
+
+ TemplateSpecializationKind TSK = FromFD->getTemplateSpecializationKind();
+ SourceLocation POI = Importer.Import(
+ FromFD->getMemberSpecializationInfo()->getPointOfInstantiation());
+ ToFD->setInstantiationOfMemberFunction(InstFD, TSK);
+ ToFD->getMemberSpecializationInfo()->setPointOfInstantiation(POI);
+ return false;
+ }
+
+ case FunctionDecl::TK_FunctionTemplateSpecialization: {
+ FunctionTemplateDecl* Template;
+ OptionalTemplateArgsTy ToTemplArgs;
+ std::tie(Template, ToTemplArgs) =
+ ImportFunctionTemplateWithTemplateArgsFromSpecialization(FromFD);
+ if (!Template || !ToTemplArgs)
+ return true;
+
+ TemplateArgumentList *ToTAList = TemplateArgumentList::CreateCopy(
+ Importer.getToContext(), *ToTemplArgs);
+
+ auto *FTSInfo = FromFD->getTemplateSpecializationInfo();
+ TemplateArgumentListInfo ToTAInfo;
+ const auto *FromTAArgsAsWritten = FTSInfo->TemplateArgumentsAsWritten;
+ if (FromTAArgsAsWritten)
+ if (ImportTemplateArgumentListInfo(*FromTAArgsAsWritten, ToTAInfo))
+ return true;
+
+ SourceLocation POI = Importer.Import(FTSInfo->getPointOfInstantiation());
+
+ TemplateSpecializationKind TSK = FTSInfo->getTemplateSpecializationKind();
+ ToFD->setFunctionTemplateSpecialization(
+ Template, ToTAList, /* InsertPos= */ nullptr,
+ TSK, FromTAArgsAsWritten ? &ToTAInfo : nullptr, POI);
+ return false;
+ }
+
+ case FunctionDecl::TK_DependentFunctionTemplateSpecialization: {
+ auto *FromInfo = FromFD->getDependentSpecializationInfo();
+ UnresolvedSet<8> TemplDecls;
+ unsigned NumTemplates = FromInfo->getNumTemplates();
+ for (unsigned I = 0; I < NumTemplates; I++) {
+ if (auto *ToFTD = cast_or_null<FunctionTemplateDecl>(
+ Importer.Import(FromInfo->getTemplate(I))))
+ TemplDecls.addDecl(ToFTD);
+ else
+ return true;
+ }
+
+ // Import TemplateArgumentListInfo.
+ TemplateArgumentListInfo ToTAInfo;
+ if (ImportTemplateArgumentListInfo(
+ FromInfo->getLAngleLoc(), FromInfo->getRAngleLoc(),
+ llvm::makeArrayRef(FromInfo->getTemplateArgs(),
+ FromInfo->getNumTemplateArgs()),
+ ToTAInfo))
+ return true;
+
+ ToFD->setDependentTemplateSpecialization(Importer.getToContext(),
+ TemplDecls, ToTAInfo);
+ return false;
+ }
+ }
+ llvm_unreachable("All cases should be covered!");
+}
+
+FunctionDecl *
+ASTNodeImporter::FindFunctionTemplateSpecialization(FunctionDecl *FromFD) {
+ FunctionTemplateDecl* Template;
+ OptionalTemplateArgsTy ToTemplArgs;
+ std::tie(Template, ToTemplArgs) =
+ ImportFunctionTemplateWithTemplateArgsFromSpecialization(FromFD);
+ if (!Template || !ToTemplArgs)
+ return nullptr;
+
+ void *InsertPos = nullptr;
+ auto *FoundSpec = Template->findSpecialization(*ToTemplArgs, InsertPos);
+ return FoundSpec;
+}
+
Decl *ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
+
+ SmallVector<Decl*, 2> Redecls = getCanonicalForwardRedeclChain(D);
+ auto RedeclIt = Redecls.begin();
+ // Import the first part of the decl chain. I.e. import all previous
+ // declarations starting from the canonical decl.
+ for (; RedeclIt != Redecls.end() && *RedeclIt != D; ++RedeclIt)
+ if (!Importer.Import(*RedeclIt))
+ return nullptr;
+ assert(*RedeclIt == D);
+
// Import the major distinguishing characteristics of this function.
DeclContext *DC, *LexicalDC;
DeclarationName Name;
@@ -2053,33 +2548,54 @@ Decl *ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
if (ToD)
return ToD;
- const FunctionDecl *FoundWithoutBody = nullptr;
-
+ const FunctionDecl *FoundByLookup = nullptr;
+ FunctionTemplateDecl *FromFT = D->getDescribedFunctionTemplate();
+
+ // If this is a function template specialization, then try to find the same
+ // existing specialization in the "to" context. The localUncachedLookup
+ // below will not find any specialization, but would find the primary
+ // template; thus, we have to skip normal lookup in case of specializations.
+ // FIXME handle member function templates (TK_MemberSpecialization) similarly?
+ if (D->getTemplatedKind() ==
+ FunctionDecl::TK_FunctionTemplateSpecialization) {
+ if (FunctionDecl *FoundFunction = FindFunctionTemplateSpecialization(D)) {
+ if (D->doesThisDeclarationHaveABody() &&
+ FoundFunction->hasBody())
+ return Importer.Imported(D, FoundFunction);
+ FoundByLookup = FoundFunction;
+ }
+ }
// Try to find a function in our own ("to") context with the same name, same
// type, and in the same context as the function we're importing.
- if (!LexicalDC->isFunctionOrMethod()) {
+ else if (!LexicalDC->isFunctionOrMethod()) {
SmallVector<NamedDecl *, 4> ConflictingDecls;
- unsigned IDNS = Decl::IDNS_Ordinary;
+ unsigned IDNS = Decl::IDNS_Ordinary | Decl::IDNS_OrdinaryFriend;
SmallVector<NamedDecl *, 2> FoundDecls;
DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
- for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
- if (!FoundDecls[I]->isInIdentifierNamespace(IDNS))
+ for (auto *FoundDecl : FoundDecls) {
+ if (!FoundDecl->isInIdentifierNamespace(IDNS))
continue;
- if (FunctionDecl *FoundFunction = dyn_cast<FunctionDecl>(FoundDecls[I])) {
+ // If template was found, look at the templated function.
+ if (FromFT) {
+ if (auto *Template = dyn_cast<FunctionTemplateDecl>(FoundDecl))
+ FoundDecl = Template->getTemplatedDecl();
+ else
+ continue;
+ }
+
+ if (auto *FoundFunction = dyn_cast<FunctionDecl>(FoundDecl)) {
if (FoundFunction->hasExternalFormalLinkage() &&
D->hasExternalFormalLinkage()) {
- if (Importer.IsStructurallyEquivalent(D->getType(),
- FoundFunction->getType())) {
- // FIXME: Actually try to merge the body and other attributes.
- const FunctionDecl *FromBodyDecl = nullptr;
- D->hasBody(FromBodyDecl);
- if (D == FromBodyDecl && !FoundFunction->hasBody()) {
- // This function is needed to merge completely.
- FoundWithoutBody = FoundFunction;
- break;
+ if (IsStructuralMatch(D, FoundFunction)) {
+ const FunctionDecl *Definition = nullptr;
+ if (D->doesThisDeclarationHaveABody() &&
+ FoundFunction->hasBody(Definition)) {
+ return Importer.MapImported(
+ D, const_cast<FunctionDecl *>(Definition));
}
- return Importer.Imported(D, FoundFunction);
+ FoundByLookup = FoundFunction;
+ break;
}
// FIXME: Check for overloading more carefully, e.g., by boosting
@@ -2098,7 +2614,7 @@ Decl *ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
}
}
- ConflictingDecls.push_back(FoundDecls[I]);
+ ConflictingDecls.push_back(FoundDecl);
}
if (!ConflictingDecls.empty()) {
@@ -2117,8 +2633,7 @@ Decl *ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
QualType FromTy = D->getType();
bool usedDifferentExceptionSpec = false;
- if (const FunctionProtoType *
- FromFPT = D->getType()->getAs<FunctionProtoType>()) {
+ if (const auto *FromFPT = D->getType()->getAs<FunctionProtoType>()) {
FunctionProtoType::ExtProtoInfo FromEPI = FromFPT->getExtProtoInfo();
// FunctionProtoType::ExtProtoInfo's ExceptionSpecDecl can point to the
// FunctionDecl that we are importing the FunctionProtoType for.
@@ -2142,75 +2657,66 @@ Decl *ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
// Import the function parameters.
SmallVector<ParmVarDecl *, 8> Parameters;
for (auto P : D->parameters()) {
- ParmVarDecl *ToP = cast_or_null<ParmVarDecl>(Importer.Import(P));
+ auto *ToP = cast_or_null<ParmVarDecl>(Importer.Import(P));
if (!ToP)
return nullptr;
Parameters.push_back(ToP);
}
- // Create the imported function.
TypeSourceInfo *TInfo = Importer.Import(D->getTypeSourceInfo());
+ if (D->getTypeSourceInfo() && !TInfo)
+ return nullptr;
+
+ // Create the imported function.
FunctionDecl *ToFunction = nullptr;
SourceLocation InnerLocStart = Importer.Import(D->getInnerLocStart());
- if (CXXConstructorDecl *FromConstructor = dyn_cast<CXXConstructorDecl>(D)) {
- ToFunction = CXXConstructorDecl::Create(Importer.getToContext(),
- cast<CXXRecordDecl>(DC),
- InnerLocStart,
- NameInfo, T, TInfo,
- FromConstructor->isExplicit(),
- D->isInlineSpecified(),
- D->isImplicit(),
- D->isConstexpr());
+ if (auto *FromConstructor = dyn_cast<CXXConstructorDecl>(D)) {
+ if (GetImportedOrCreateDecl<CXXConstructorDecl>(
+ ToFunction, D, Importer.getToContext(), cast<CXXRecordDecl>(DC),
+ InnerLocStart, NameInfo, T, TInfo, FromConstructor->isExplicit(),
+ D->isInlineSpecified(), D->isImplicit(), D->isConstexpr()))
+ return ToFunction;
if (unsigned NumInitializers = FromConstructor->getNumCtorInitializers()) {
SmallVector<CXXCtorInitializer *, 4> CtorInitializers;
- for (CXXCtorInitializer *I : FromConstructor->inits()) {
- CXXCtorInitializer *ToI =
- cast_or_null<CXXCtorInitializer>(Importer.Import(I));
+ for (auto *I : FromConstructor->inits()) {
+ auto *ToI = cast_or_null<CXXCtorInitializer>(Importer.Import(I));
if (!ToI && I)
return nullptr;
CtorInitializers.push_back(ToI);
}
- CXXCtorInitializer **Memory =
+ auto **Memory =
new (Importer.getToContext()) CXXCtorInitializer *[NumInitializers];
std::copy(CtorInitializers.begin(), CtorInitializers.end(), Memory);
- CXXConstructorDecl *ToCtor = llvm::cast<CXXConstructorDecl>(ToFunction);
+ auto *ToCtor = cast<CXXConstructorDecl>(ToFunction);
ToCtor->setCtorInitializers(Memory);
ToCtor->setNumCtorInitializers(NumInitializers);
}
} else if (isa<CXXDestructorDecl>(D)) {
- ToFunction = CXXDestructorDecl::Create(Importer.getToContext(),
- cast<CXXRecordDecl>(DC),
- InnerLocStart,
- NameInfo, T, TInfo,
- D->isInlineSpecified(),
- D->isImplicit());
- } else if (CXXConversionDecl *FromConversion
- = dyn_cast<CXXConversionDecl>(D)) {
- ToFunction = CXXConversionDecl::Create(Importer.getToContext(),
- cast<CXXRecordDecl>(DC),
- InnerLocStart,
- NameInfo, T, TInfo,
- D->isInlineSpecified(),
- FromConversion->isExplicit(),
- D->isConstexpr(),
- Importer.Import(D->getLocEnd()));
- } else if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D)) {
- ToFunction = CXXMethodDecl::Create(Importer.getToContext(),
- cast<CXXRecordDecl>(DC),
- InnerLocStart,
- NameInfo, T, TInfo,
- Method->getStorageClass(),
- Method->isInlineSpecified(),
- D->isConstexpr(),
- Importer.Import(D->getLocEnd()));
+ if (GetImportedOrCreateDecl<CXXDestructorDecl>(
+ ToFunction, D, Importer.getToContext(), cast<CXXRecordDecl>(DC),
+ InnerLocStart, NameInfo, T, TInfo, D->isInlineSpecified(),
+ D->isImplicit()))
+ return ToFunction;
+ } else if (CXXConversionDecl *FromConversion =
+ dyn_cast<CXXConversionDecl>(D)) {
+ if (GetImportedOrCreateDecl<CXXConversionDecl>(
+ ToFunction, D, Importer.getToContext(), cast<CXXRecordDecl>(DC),
+ InnerLocStart, NameInfo, T, TInfo, D->isInlineSpecified(),
+ FromConversion->isExplicit(), D->isConstexpr(), SourceLocation()))
+ return ToFunction;
+ } else if (auto *Method = dyn_cast<CXXMethodDecl>(D)) {
+ if (GetImportedOrCreateDecl<CXXMethodDecl>(
+ ToFunction, D, Importer.getToContext(), cast<CXXRecordDecl>(DC),
+ InnerLocStart, NameInfo, T, TInfo, Method->getStorageClass(),
+ Method->isInlineSpecified(), D->isConstexpr(), SourceLocation()))
+ return ToFunction;
} else {
- ToFunction = FunctionDecl::Create(Importer.getToContext(), DC,
- InnerLocStart,
- NameInfo, T, TInfo, D->getStorageClass(),
- D->isInlineSpecified(),
- D->hasWrittenPrototype(),
- D->isConstexpr());
+ if (GetImportedOrCreateDecl(ToFunction, D, Importer.getToContext(), DC,
+ InnerLocStart, NameInfo, T, TInfo,
+ D->getStorageClass(), D->isInlineSpecified(),
+ D->hasWrittenPrototype(), D->isConstexpr()))
+ return ToFunction;
}
// Import the qualifier, if any.
@@ -2220,21 +2726,31 @@ Decl *ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
ToFunction->setVirtualAsWritten(D->isVirtualAsWritten());
ToFunction->setTrivial(D->isTrivial());
ToFunction->setPure(D->isPure());
- Importer.Imported(D, ToFunction);
+ ToFunction->setRangeEnd(Importer.Import(D->getLocEnd()));
// Set the parameters.
- for (unsigned I = 0, N = Parameters.size(); I != N; ++I) {
- Parameters[I]->setOwningFunction(ToFunction);
- ToFunction->addDeclInternal(Parameters[I]);
+ for (auto *Param : Parameters) {
+ Param->setOwningFunction(ToFunction);
+ ToFunction->addDeclInternal(Param);
}
ToFunction->setParams(Parameters);
- if (FoundWithoutBody) {
+ if (FoundByLookup) {
auto *Recent = const_cast<FunctionDecl *>(
- FoundWithoutBody->getMostRecentDecl());
+ FoundByLookup->getMostRecentDecl());
ToFunction->setPreviousDecl(Recent);
}
+ // We need to complete creation of FunctionProtoTypeLoc manually with setting
+ // params it refers to.
+ if (TInfo) {
+ if (auto ProtoLoc =
+ TInfo->getTypeLoc().IgnoreParens().getAs<FunctionProtoTypeLoc>()) {
+ for (unsigned I = 0, N = Parameters.size(); I != N; ++I)
+ ProtoLoc.setParam(I, Parameters[I]);
+ }
+ }
+
if (usedDifferentExceptionSpec) {
// Update FunctionProtoType::ExtProtoInfo.
QualType T = Importer.Import(D->getType());
@@ -2243,17 +2759,47 @@ Decl *ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
ToFunction->setType(T);
}
- // Import the body, if any.
- if (Stmt *FromBody = D->getBody()) {
- if (Stmt *ToBody = Importer.Import(FromBody)) {
- ToFunction->setBody(ToBody);
+ // Import the describing template function, if any.
+ if (FromFT)
+ if (!Importer.Import(FromFT))
+ return nullptr;
+
+ if (D->doesThisDeclarationHaveABody()) {
+ if (Stmt *FromBody = D->getBody()) {
+ if (Stmt *ToBody = Importer.Import(FromBody)) {
+ ToFunction->setBody(ToBody);
+ }
}
}
// FIXME: Other bits to merge?
- // Add this function to the lexical context.
- LexicalDC->addDeclInternal(ToFunction);
+ // If it is a template, import all related things.
+ if (ImportTemplateInformation(D, ToFunction))
+ return nullptr;
+
+ bool IsFriend = D->isInIdentifierNamespace(Decl::IDNS_OrdinaryFriend);
+
+ // TODO Can we generalize this approach to other AST nodes as well?
+ if (D->getDeclContext()->containsDeclAndLoad(D))
+ DC->addDeclInternal(ToFunction);
+ if (DC != LexicalDC && D->getLexicalDeclContext()->containsDeclAndLoad(D))
+ LexicalDC->addDeclInternal(ToFunction);
+
+ // Friend declaration's lexical context is the befriending class, but the
+ // semantic context is the enclosing scope of the befriending class.
+ // We want the friend functions to be found in the semantic context by lookup.
+ // FIXME should we handle this generically in VisitFriendDecl?
+ // In Other cases when LexicalDC != DC we don't want it to be added,
+ // e.g out-of-class definitions like void B::f() {} .
+ if (LexicalDC != DC && IsFriend) {
+ DC->makeDeclVisibleInContext(ToFunction);
+ }
+
+ // Import the rest of the chain. I.e. import all subsequent declarations.
+ for (++RedeclIt; RedeclIt != Redecls.end(); ++RedeclIt)
+ if (!Importer.Import(*RedeclIt))
+ return nullptr;
if (auto *FromCXXMethod = dyn_cast<CXXMethodDecl>(D))
ImportOverrides(cast<CXXMethodDecl>(ToFunction), FromCXXMethod);
@@ -2278,7 +2824,7 @@ Decl *ASTNodeImporter::VisitCXXConversionDecl(CXXConversionDecl *D) {
}
static unsigned getFieldIndex(Decl *F) {
- RecordDecl *Owner = dyn_cast<RecordDecl>(F->getDeclContext());
+ auto *Owner = dyn_cast<RecordDecl>(F->getDeclContext());
if (!Owner)
return 0;
@@ -2308,15 +2854,15 @@ Decl *ASTNodeImporter::VisitFieldDecl(FieldDecl *D) {
// Determine whether we've already imported this field.
SmallVector<NamedDecl *, 2> FoundDecls;
DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
- for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
- if (FieldDecl *FoundField = dyn_cast<FieldDecl>(FoundDecls[I])) {
+ for (auto *FoundDecl : FoundDecls) {
+ if (auto *FoundField = dyn_cast<FieldDecl>(FoundDecl)) {
// For anonymous fields, match up by index.
if (!Name && getFieldIndex(D) != getFieldIndex(FoundField))
continue;
if (Importer.IsStructurallyEquivalent(D->getType(),
FoundField->getType())) {
- Importer.Imported(D, FoundField);
+ Importer.MapImported(D, FoundField);
return FoundField;
}
@@ -2338,11 +2884,13 @@ Decl *ASTNodeImporter::VisitFieldDecl(FieldDecl *D) {
if (!BitWidth && D->getBitWidth())
return nullptr;
- FieldDecl *ToField = FieldDecl::Create(Importer.getToContext(), DC,
- Importer.Import(D->getInnerLocStart()),
- Loc, Name.getAsIdentifierInfo(),
- T, TInfo, BitWidth, D->isMutable(),
- D->getInClassInitStyle());
+ FieldDecl *ToField;
+ if (GetImportedOrCreateDecl(ToField, D, Importer.getToContext(), DC,
+ Importer.Import(D->getInnerLocStart()), Loc,
+ Name.getAsIdentifierInfo(), T, TInfo, BitWidth,
+ D->isMutable(), D->getInClassInitStyle()))
+ return ToField;
+
ToField->setAccess(D->getAccess());
ToField->setLexicalDeclContext(LexicalDC);
if (Expr *FromInitializer = D->getInClassInitializer()) {
@@ -2353,7 +2901,6 @@ Decl *ASTNodeImporter::VisitFieldDecl(FieldDecl *D) {
return nullptr;
}
ToField->setImplicit(D->isImplicit());
- Importer.Imported(D, ToField);
LexicalDC->addDeclInternal(ToField);
return ToField;
}
@@ -2373,8 +2920,7 @@ Decl *ASTNodeImporter::VisitIndirectFieldDecl(IndirectFieldDecl *D) {
SmallVector<NamedDecl *, 2> FoundDecls;
DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
- if (IndirectFieldDecl *FoundField
- = dyn_cast<IndirectFieldDecl>(FoundDecls[I])) {
+ if (auto *FoundField = dyn_cast<IndirectFieldDecl>(FoundDecls[I])) {
// For anonymous indirect fields, match up by index.
if (!Name && getFieldIndex(D) != getFieldIndex(FoundField))
continue;
@@ -2382,7 +2928,7 @@ Decl *ASTNodeImporter::VisitIndirectFieldDecl(IndirectFieldDecl *D) {
if (Importer.IsStructurallyEquivalent(D->getType(),
FoundField->getType(),
!Name.isEmpty())) {
- Importer.Imported(D, FoundField);
+ Importer.MapImported(D, FoundField);
return FoundField;
}
@@ -2403,8 +2949,8 @@ Decl *ASTNodeImporter::VisitIndirectFieldDecl(IndirectFieldDecl *D) {
if (T.isNull())
return nullptr;
- NamedDecl **NamedChain =
- new (Importer.getToContext())NamedDecl*[D->getChainingSize()];
+ auto **NamedChain =
+ new (Importer.getToContext()) NamedDecl*[D->getChainingSize()];
unsigned i = 0;
for (auto *PI : D->chain()) {
@@ -2414,16 +2960,18 @@ Decl *ASTNodeImporter::VisitIndirectFieldDecl(IndirectFieldDecl *D) {
NamedChain[i++] = cast<NamedDecl>(D);
}
- IndirectFieldDecl *ToIndirectField = IndirectFieldDecl::Create(
- Importer.getToContext(), DC, Loc, Name.getAsIdentifierInfo(), T,
- {NamedChain, D->getChainingSize()});
+ llvm::MutableArrayRef<NamedDecl *> CH = {NamedChain, D->getChainingSize()};
+ IndirectFieldDecl *ToIndirectField;
+ if (GetImportedOrCreateDecl(ToIndirectField, D, Importer.getToContext(), DC,
+ Loc, Name.getAsIdentifierInfo(), T, CH))
+ // FIXME here we leak `NamedChain` which is allocated before
+ return ToIndirectField;
- for (const auto *Attr : D->attrs())
- ToIndirectField->addAttr(Attr->clone(Importer.getToContext()));
+ for (const auto *A : D->attrs())
+ ToIndirectField->addAttr(Importer.Import(A));
ToIndirectField->setAccess(D->getAccess());
ToIndirectField->setLexicalDeclContext(LexicalDC);
- Importer.Imported(D, ToIndirectField);
LexicalDC->addDeclInternal(ToIndirectField);
return ToIndirectField;
}
@@ -2440,37 +2988,38 @@ Decl *ASTNodeImporter::VisitFriendDecl(FriendDecl *D) {
// FriendDecl is not a NamedDecl so we cannot use localUncachedLookup.
auto *RD = cast<CXXRecordDecl>(DC);
FriendDecl *ImportedFriend = RD->getFirstFriend();
- StructuralEquivalenceContext Context(
- Importer.getFromContext(), Importer.getToContext(),
- Importer.getNonEquivalentDecls(), false, false);
while (ImportedFriend) {
if (D->getFriendDecl() && ImportedFriend->getFriendDecl()) {
- if (Context.IsStructurallyEquivalent(D->getFriendDecl(),
- ImportedFriend->getFriendDecl()))
- return Importer.Imported(D, ImportedFriend);
+ if (IsStructuralMatch(D->getFriendDecl(), ImportedFriend->getFriendDecl(),
+ /*Complain=*/false))
+ return Importer.MapImported(D, ImportedFriend);
} else if (D->getFriendType() && ImportedFriend->getFriendType()) {
if (Importer.IsStructurallyEquivalent(
D->getFriendType()->getType(),
ImportedFriend->getFriendType()->getType(), true))
- return Importer.Imported(D, ImportedFriend);
+ return Importer.MapImported(D, ImportedFriend);
}
ImportedFriend = ImportedFriend->getNextFriend();
}
// Not found. Create it.
FriendDecl::FriendUnion ToFU;
- if (NamedDecl *FriendD = D->getFriendDecl())
- ToFU = cast_or_null<NamedDecl>(Importer.Import(FriendD));
- else
+ if (NamedDecl *FriendD = D->getFriendDecl()) {
+ auto *ToFriendD = cast_or_null<NamedDecl>(Importer.Import(FriendD));
+ if (ToFriendD && FriendD->getFriendObjectKind() != Decl::FOK_None &&
+ !(FriendD->isInIdentifierNamespace(Decl::IDNS_NonMemberOperator)))
+ ToFriendD->setObjectOfFriendDecl(false);
+
+ ToFU = ToFriendD;
+ } else // The friend is a type, not a decl.
ToFU = Importer.Import(D->getFriendType());
if (!ToFU)
return nullptr;
SmallVector<TemplateParameterList *, 1> ToTPLists(D->NumTPLists);
- TemplateParameterList **FromTPLists =
- D->getTrailingObjects<TemplateParameterList *>();
+ auto **FromTPLists = D->getTrailingObjects<TemplateParameterList *>();
for (unsigned I = 0; I < D->NumTPLists; I++) {
TemplateParameterList *List = ImportTemplateParameterList(FromTPLists[I]);
if (!List)
@@ -2478,13 +3027,11 @@ Decl *ASTNodeImporter::VisitFriendDecl(FriendDecl *D) {
ToTPLists[I] = List;
}
- FriendDecl *FrD = FriendDecl::Create(Importer.getToContext(), DC,
- Importer.Import(D->getLocation()),
- ToFU, Importer.Import(D->getFriendLoc()),
- ToTPLists);
-
- Importer.Imported(D, FrD);
- RD->pushFriendDecl(FrD);
+ FriendDecl *FrD;
+ if (GetImportedOrCreateDecl(FrD, D, Importer.getToContext(), DC,
+ Importer.Import(D->getLocation()), ToFU,
+ Importer.Import(D->getFriendLoc()), ToTPLists))
+ return FrD;
FrD->setAccess(D->getAccess());
FrD->setLexicalDeclContext(LexicalDC);
@@ -2506,11 +3053,11 @@ Decl *ASTNodeImporter::VisitObjCIvarDecl(ObjCIvarDecl *D) {
// Determine whether we've already imported this ivar
SmallVector<NamedDecl *, 2> FoundDecls;
DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
- for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
- if (ObjCIvarDecl *FoundIvar = dyn_cast<ObjCIvarDecl>(FoundDecls[I])) {
+ for (auto *FoundDecl : FoundDecls) {
+ if (auto *FoundIvar = dyn_cast<ObjCIvarDecl>(FoundDecl)) {
if (Importer.IsStructurallyEquivalent(D->getType(),
FoundIvar->getType())) {
- Importer.Imported(D, FoundIvar);
+ Importer.MapImported(D, FoundIvar);
return FoundIvar;
}
@@ -2532,17 +3079,17 @@ Decl *ASTNodeImporter::VisitObjCIvarDecl(ObjCIvarDecl *D) {
if (!BitWidth && D->getBitWidth())
return nullptr;
- ObjCIvarDecl *ToIvar = ObjCIvarDecl::Create(Importer.getToContext(),
- cast<ObjCContainerDecl>(DC),
- Importer.Import(D->getInnerLocStart()),
- Loc, Name.getAsIdentifierInfo(),
- T, TInfo, D->getAccessControl(),
- BitWidth, D->getSynthesize());
+ ObjCIvarDecl *ToIvar;
+ if (GetImportedOrCreateDecl(
+ ToIvar, D, Importer.getToContext(), cast<ObjCContainerDecl>(DC),
+ Importer.Import(D->getInnerLocStart()), Loc,
+ Name.getAsIdentifierInfo(), T, TInfo, D->getAccessControl(), BitWidth,
+ D->getSynthesize()))
+ return ToIvar;
+
ToIvar->setLexicalDeclContext(LexicalDC);
- Importer.Imported(D, ToIvar);
LexicalDC->addDeclInternal(ToIvar);
return ToIvar;
-
}
Decl *ASTNodeImporter::VisitVarDecl(VarDecl *D) {
@@ -2564,11 +3111,11 @@ Decl *ASTNodeImporter::VisitVarDecl(VarDecl *D) {
unsigned IDNS = Decl::IDNS_Ordinary;
SmallVector<NamedDecl *, 2> FoundDecls;
DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
- for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
- if (!FoundDecls[I]->isInIdentifierNamespace(IDNS))
+ for (auto *FoundDecl : FoundDecls) {
+ if (!FoundDecl->isInIdentifierNamespace(IDNS))
continue;
- if (VarDecl *FoundVar = dyn_cast<VarDecl>(FoundDecls[I])) {
+ if (auto *FoundVar = dyn_cast<VarDecl>(FoundDecl)) {
// We have found a variable that we may need to merge with. Check it.
if (FoundVar->hasExternalFormalLinkage() &&
D->hasExternalFormalLinkage()) {
@@ -2607,14 +3154,15 @@ Decl *ASTNodeImporter::VisitVarDecl(VarDecl *D) {
}
}
- ConflictingDecls.push_back(FoundDecls[I]);
+ ConflictingDecls.push_back(FoundDecl);
}
if (MergeWithVar) {
- // An equivalent variable with external linkage has been found. Link
+ // An equivalent variable with external linkage has been found. Link
// the two declarations, then merge them.
- Importer.Imported(D, MergeWithVar);
-
+ Importer.MapImported(D, MergeWithVar);
+ updateFlags(D, MergeWithVar);
+
if (VarDecl *DDef = D->getDefinition()) {
if (VarDecl *ExistingDef = MergeWithVar->getDefinition()) {
Importer.ToDiag(ExistingDef->getLocation(),
@@ -2651,20 +3199,20 @@ Decl *ASTNodeImporter::VisitVarDecl(VarDecl *D) {
// Create the imported variable.
TypeSourceInfo *TInfo = Importer.Import(D->getTypeSourceInfo());
- VarDecl *ToVar = VarDecl::Create(Importer.getToContext(), DC,
- Importer.Import(D->getInnerLocStart()),
- Loc, Name.getAsIdentifierInfo(),
- T, TInfo,
- D->getStorageClass());
+ VarDecl *ToVar;
+ if (GetImportedOrCreateDecl(ToVar, D, Importer.getToContext(), DC,
+ Importer.Import(D->getInnerLocStart()), Loc,
+ Name.getAsIdentifierInfo(), T, TInfo,
+ D->getStorageClass()))
+ return ToVar;
+
ToVar->setQualifierInfo(Importer.Import(D->getQualifierLoc()));
ToVar->setAccess(D->getAccess());
ToVar->setLexicalDeclContext(LexicalDC);
- Importer.Imported(D, ToVar);
- LexicalDC->addDeclInternal(ToVar);
- if (!D->isFileVarDecl() &&
- D->isUsed())
- ToVar->setIsUsed();
+ // Templated declarations should never appear in the enclosing DeclContext.
+ if (!D->getDescribedVarTemplate())
+ LexicalDC->addDeclInternal(ToVar);
// Merge the initializer.
if (ImportDefinition(D, ToVar))
@@ -2695,10 +3243,12 @@ Decl *ASTNodeImporter::VisitImplicitParamDecl(ImplicitParamDecl *D) {
return nullptr;
// Create the imported parameter.
- auto *ToParm = ImplicitParamDecl::Create(Importer.getToContext(), DC, Loc,
- Name.getAsIdentifierInfo(), T,
- D->getParameterKind());
- return Importer.Imported(D, ToParm);
+ ImplicitParamDecl *ToParm = nullptr;
+ if (GetImportedOrCreateDecl(ToParm, D, Importer.getToContext(), DC, Loc,
+ Name.getAsIdentifierInfo(), T,
+ D->getParameterKind()))
+ return ToParm;
+ return ToParm;
}
Decl *ASTNodeImporter::VisitParmVarDecl(ParmVarDecl *D) {
@@ -2721,11 +3271,13 @@ Decl *ASTNodeImporter::VisitParmVarDecl(ParmVarDecl *D) {
// Create the imported parameter.
TypeSourceInfo *TInfo = Importer.Import(D->getTypeSourceInfo());
- ParmVarDecl *ToParm = ParmVarDecl::Create(Importer.getToContext(), DC,
- Importer.Import(D->getInnerLocStart()),
- Loc, Name.getAsIdentifierInfo(),
- T, TInfo, D->getStorageClass(),
- /*DefaultArg*/ nullptr);
+ ParmVarDecl *ToParm;
+ if (GetImportedOrCreateDecl(ToParm, D, Importer.getToContext(), DC,
+ Importer.Import(D->getInnerLocStart()), Loc,
+ Name.getAsIdentifierInfo(), T, TInfo,
+ D->getStorageClass(),
+ /*DefaultArg*/ nullptr))
+ return ToParm;
// Set the default argument.
ToParm->setHasInheritedDefaultArg(D->hasInheritedDefaultArg());
@@ -2747,10 +3299,15 @@ Decl *ASTNodeImporter::VisitParmVarDecl(ParmVarDecl *D) {
if (FromDefArg && !ToDefArg)
return nullptr;
- if (D->isUsed())
- ToParm->setIsUsed();
+ if (D->isObjCMethodParameter()) {
+ ToParm->setObjCMethodScopeInfo(D->getFunctionScopeIndex());
+ ToParm->setObjCDeclQualifier(D->getObjCDeclQualifier());
+ } else {
+ ToParm->setScopeInfo(D->getFunctionScopeDepth(),
+ D->getFunctionScopeIndex());
+ }
- return Importer.Imported(D, ToParm);
+ return ToParm;
}
Decl *ASTNodeImporter::VisitObjCMethodDecl(ObjCMethodDecl *D) {
@@ -2766,8 +3323,8 @@ Decl *ASTNodeImporter::VisitObjCMethodDecl(ObjCMethodDecl *D) {
SmallVector<NamedDecl *, 2> FoundDecls;
DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
- for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
- if (ObjCMethodDecl *FoundMethod = dyn_cast<ObjCMethodDecl>(FoundDecls[I])) {
+ for (auto *FoundDecl : FoundDecls) {
+ if (auto *FoundMethod = dyn_cast<ObjCMethodDecl>(FoundDecl)) {
if (FoundMethod->isInstanceMethod() != D->isInstanceMethod())
continue;
@@ -2822,7 +3379,7 @@ Decl *ASTNodeImporter::VisitObjCMethodDecl(ObjCMethodDecl *D) {
}
// FIXME: Any other bits we need to merge?
- return Importer.Imported(D, FoundMethod);
+ return Importer.MapImported(D, FoundMethod);
}
}
@@ -2833,11 +3390,14 @@ Decl *ASTNodeImporter::VisitObjCMethodDecl(ObjCMethodDecl *D) {
TypeSourceInfo *ReturnTInfo = Importer.Import(D->getReturnTypeSourceInfo());
- ObjCMethodDecl *ToMethod = ObjCMethodDecl::Create(
- Importer.getToContext(), Loc, Importer.Import(D->getLocEnd()),
- Name.getObjCSelector(), ResultTy, ReturnTInfo, DC, D->isInstanceMethod(),
- D->isVariadic(), D->isPropertyAccessor(), D->isImplicit(), D->isDefined(),
- D->getImplementationControl(), D->hasRelatedResultType());
+ ObjCMethodDecl *ToMethod;
+ if (GetImportedOrCreateDecl(
+ ToMethod, D, Importer.getToContext(), Loc,
+ Importer.Import(D->getLocEnd()), Name.getObjCSelector(), ResultTy,
+ ReturnTInfo, DC, D->isInstanceMethod(), D->isVariadic(),
+ D->isPropertyAccessor(), D->isImplicit(), D->isDefined(),
+ D->getImplementationControl(), D->hasRelatedResultType()))
+ return ToMethod;
// FIXME: When we decide to merge method definitions, we'll need to
// deal with implicit parameters.
@@ -2845,7 +3405,7 @@ Decl *ASTNodeImporter::VisitObjCMethodDecl(ObjCMethodDecl *D) {
// Import the parameters
SmallVector<ParmVarDecl *, 5> ToParams;
for (auto *FromP : D->parameters()) {
- ParmVarDecl *ToP = cast_or_null<ParmVarDecl>(Importer.Import(FromP));
+ auto *ToP = cast_or_null<ParmVarDecl>(Importer.Import(FromP));
if (!ToP)
return nullptr;
@@ -2853,16 +3413,19 @@ Decl *ASTNodeImporter::VisitObjCMethodDecl(ObjCMethodDecl *D) {
}
// Set the parameters.
- for (unsigned I = 0, N = ToParams.size(); I != N; ++I) {
- ToParams[I]->setOwningFunction(ToMethod);
- ToMethod->addDeclInternal(ToParams[I]);
+ for (auto *ToParam : ToParams) {
+ ToParam->setOwningFunction(ToMethod);
+ ToMethod->addDeclInternal(ToParam);
}
+
SmallVector<SourceLocation, 12> SelLocs;
D->getSelectorLocs(SelLocs);
- ToMethod->setMethodParams(Importer.getToContext(), ToParams, SelLocs);
+ for (auto &Loc : SelLocs)
+ Loc = Importer.Import(Loc);
+
+ ToMethod->setMethodParams(Importer.getToContext(), ToParams, SelLocs);
ToMethod->setLexicalDeclContext(LexicalDC);
- Importer.Imported(D, ToMethod);
LexicalDC->addDeclInternal(ToMethod);
return ToMethod;
}
@@ -2882,16 +3445,14 @@ Decl *ASTNodeImporter::VisitObjCTypeParamDecl(ObjCTypeParamDecl *D) {
if (!BoundInfo)
return nullptr;
- ObjCTypeParamDecl *Result = ObjCTypeParamDecl::Create(
- Importer.getToContext(), DC,
- D->getVariance(),
- Importer.Import(D->getVarianceLoc()),
- D->getIndex(),
- Importer.Import(D->getLocation()),
- Name.getAsIdentifierInfo(),
- Importer.Import(D->getColonLoc()),
- BoundInfo);
- Importer.Imported(D, Result);
+ ObjCTypeParamDecl *Result;
+ if (GetImportedOrCreateDecl(
+ Result, D, Importer.getToContext(), DC, D->getVariance(),
+ Importer.Import(D->getVarianceLoc()), D->getIndex(),
+ Importer.Import(D->getLocation()), Name.getAsIdentifierInfo(),
+ Importer.Import(D->getColonLoc()), BoundInfo))
+ return Result;
+
Result->setLexicalDeclContext(LexicalDC);
return Result;
}
@@ -2907,8 +3468,8 @@ Decl *ASTNodeImporter::VisitObjCCategoryDecl(ObjCCategoryDecl *D) {
if (ToD)
return ToD;
- ObjCInterfaceDecl *ToInterface
- = cast_or_null<ObjCInterfaceDecl>(Importer.Import(D->getClassInterface()));
+ auto *ToInterface =
+ cast_or_null<ObjCInterfaceDecl>(Importer.Import(D->getClassInterface()));
if (!ToInterface)
return nullptr;
@@ -2917,18 +3478,18 @@ Decl *ASTNodeImporter::VisitObjCCategoryDecl(ObjCCategoryDecl *D) {
= ToInterface->FindCategoryDeclaration(Name.getAsIdentifierInfo());
ObjCCategoryDecl *ToCategory = MergeWithCategory;
if (!ToCategory) {
- ToCategory = ObjCCategoryDecl::Create(Importer.getToContext(), DC,
- Importer.Import(D->getAtStartLoc()),
- Loc,
- Importer.Import(D->getCategoryNameLoc()),
- Name.getAsIdentifierInfo(),
- ToInterface,
- /*TypeParamList=*/nullptr,
- Importer.Import(D->getIvarLBraceLoc()),
- Importer.Import(D->getIvarRBraceLoc()));
+
+ if (GetImportedOrCreateDecl(ToCategory, D, Importer.getToContext(), DC,
+ Importer.Import(D->getAtStartLoc()), Loc,
+ Importer.Import(D->getCategoryNameLoc()),
+ Name.getAsIdentifierInfo(), ToInterface,
+ /*TypeParamList=*/nullptr,
+ Importer.Import(D->getIvarLBraceLoc()),
+ Importer.Import(D->getIvarRBraceLoc())))
+ return ToCategory;
+
ToCategory->setLexicalDeclContext(LexicalDC);
LexicalDC->addDeclInternal(ToCategory);
- Importer.Imported(D, ToCategory);
// Import the type parameter list after calling Imported, to avoid
// loops when bringing in their DeclContext.
ToCategory->setTypeParamList(ImportObjCTypeParamList(
@@ -2943,8 +3504,8 @@ Decl *ASTNodeImporter::VisitObjCCategoryDecl(ObjCCategoryDecl *D) {
FromProtoEnd = D->protocol_end();
FromProto != FromProtoEnd;
++FromProto, ++FromProtoLoc) {
- ObjCProtocolDecl *ToProto
- = cast_or_null<ObjCProtocolDecl>(Importer.Import(*FromProto));
+ auto *ToProto =
+ cast_or_null<ObjCProtocolDecl>(Importer.Import(*FromProto));
if (!ToProto)
return nullptr;
Protocols.push_back(ToProto);
@@ -2954,9 +3515,8 @@ Decl *ASTNodeImporter::VisitObjCCategoryDecl(ObjCCategoryDecl *D) {
// FIXME: If we're merging, make sure that the protocol list is the same.
ToCategory->setProtocolList(Protocols.data(), Protocols.size(),
ProtocolLocs.data(), Importer.getToContext());
-
} else {
- Importer.Imported(D, ToCategory);
+ Importer.MapImported(D, ToCategory);
}
// Import all of the members of this category.
@@ -2964,8 +3524,8 @@ Decl *ASTNodeImporter::VisitObjCCategoryDecl(ObjCCategoryDecl *D) {
// If we have an implementation, import it as well.
if (D->getImplementation()) {
- ObjCCategoryImplDecl *Impl
- = cast_or_null<ObjCCategoryImplDecl>(
+ auto *Impl =
+ cast_or_null<ObjCCategoryImplDecl>(
Importer.Import(D->getImplementation()));
if (!Impl)
return nullptr;
@@ -2997,8 +3557,7 @@ bool ASTNodeImporter::ImportDefinition(ObjCProtocolDecl *From,
FromProtoEnd = From->protocol_end();
FromProto != FromProtoEnd;
++FromProto, ++FromProtoLoc) {
- ObjCProtocolDecl *ToProto
- = cast_or_null<ObjCProtocolDecl>(Importer.Import(*FromProto));
+ auto *ToProto = cast_or_null<ObjCProtocolDecl>(Importer.Import(*FromProto));
if (!ToProto)
return true;
Protocols.push_back(ToProto);
@@ -3026,7 +3585,7 @@ Decl *ASTNodeImporter::VisitObjCProtocolDecl(ObjCProtocolDecl *D) {
if (!ImportedDef)
return nullptr;
- return Importer.Imported(D, ImportedDef);
+ return Importer.MapImported(D, ImportedDef);
}
// Import the major distinguishing characteristics of a protocol.
@@ -3042,25 +3601,26 @@ Decl *ASTNodeImporter::VisitObjCProtocolDecl(ObjCProtocolDecl *D) {
ObjCProtocolDecl *MergeWithProtocol = nullptr;
SmallVector<NamedDecl *, 2> FoundDecls;
DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
- for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
- if (!FoundDecls[I]->isInIdentifierNamespace(Decl::IDNS_ObjCProtocol))
+ for (auto *FoundDecl : FoundDecls) {
+ if (!FoundDecl->isInIdentifierNamespace(Decl::IDNS_ObjCProtocol))
continue;
- if ((MergeWithProtocol = dyn_cast<ObjCProtocolDecl>(FoundDecls[I])))
+ if ((MergeWithProtocol = dyn_cast<ObjCProtocolDecl>(FoundDecl)))
break;
}
ObjCProtocolDecl *ToProto = MergeWithProtocol;
if (!ToProto) {
- ToProto = ObjCProtocolDecl::Create(Importer.getToContext(), DC,
- Name.getAsIdentifierInfo(), Loc,
- Importer.Import(D->getAtStartLoc()),
- /*PrevDecl=*/nullptr);
+ if (GetImportedOrCreateDecl(ToProto, D, Importer.getToContext(), DC,
+ Name.getAsIdentifierInfo(), Loc,
+ Importer.Import(D->getAtStartLoc()),
+ /*PrevDecl=*/nullptr))
+ return ToProto;
ToProto->setLexicalDeclContext(LexicalDC);
LexicalDC->addDeclInternal(ToProto);
}
-
- Importer.Imported(D, ToProto);
+
+ Importer.MapImported(D, ToProto);
if (D->isThisDeclarationADefinition() && ImportDefinition(D, ToProto))
return nullptr;
@@ -3076,14 +3636,11 @@ Decl *ASTNodeImporter::VisitLinkageSpecDecl(LinkageSpecDecl *D) {
SourceLocation LangLoc = Importer.Import(D->getLocation());
bool HasBraces = D->hasBraces();
-
- LinkageSpecDecl *ToLinkageSpec =
- LinkageSpecDecl::Create(Importer.getToContext(),
- DC,
- ExternLoc,
- LangLoc,
- D->getLanguage(),
- HasBraces);
+
+ LinkageSpecDecl *ToLinkageSpec;
+ if (GetImportedOrCreateDecl(ToLinkageSpec, D, Importer.getToContext(), DC,
+ ExternLoc, LangLoc, D->getLanguage(), HasBraces))
+ return ToLinkageSpec;
if (HasBraces) {
SourceLocation RBraceLoc = Importer.Import(D->getRBraceLoc());
@@ -3093,8 +3650,6 @@ Decl *ASTNodeImporter::VisitLinkageSpecDecl(LinkageSpecDecl *D) {
ToLinkageSpec->setLexicalDeclContext(LexicalDC);
LexicalDC->addDeclInternal(ToLinkageSpec);
- Importer.Imported(D, ToLinkageSpec);
-
return ToLinkageSpec;
}
@@ -3112,26 +3667,28 @@ Decl *ASTNodeImporter::VisitUsingDecl(UsingDecl *D) {
Importer.Import(D->getNameInfo().getLoc()));
ImportDeclarationNameLoc(D->getNameInfo(), NameInfo);
- UsingDecl *ToUsing = UsingDecl::Create(Importer.getToContext(), DC,
- Importer.Import(D->getUsingLoc()),
- Importer.Import(D->getQualifierLoc()),
- NameInfo, D->hasTypename());
+ UsingDecl *ToUsing;
+ if (GetImportedOrCreateDecl(ToUsing, D, Importer.getToContext(), DC,
+ Importer.Import(D->getUsingLoc()),
+ Importer.Import(D->getQualifierLoc()), NameInfo,
+ D->hasTypename()))
+ return ToUsing;
+
ToUsing->setLexicalDeclContext(LexicalDC);
LexicalDC->addDeclInternal(ToUsing);
- Importer.Imported(D, ToUsing);
if (NamedDecl *FromPattern =
Importer.getFromContext().getInstantiatedFromUsingDecl(D)) {
- if (NamedDecl *ToPattern =
- dyn_cast_or_null<NamedDecl>(Importer.Import(FromPattern)))
+ if (auto *ToPattern =
+ dyn_cast_or_null<NamedDecl>(Importer.Import(FromPattern)))
Importer.getToContext().setInstantiatedFromUsingDecl(ToUsing, ToPattern);
else
return nullptr;
}
- for (UsingShadowDecl *FromShadow : D->shadows()) {
- if (UsingShadowDecl *ToShadow =
- dyn_cast_or_null<UsingShadowDecl>(Importer.Import(FromShadow)))
+ for (auto *FromShadow : D->shadows()) {
+ if (auto *ToShadow =
+ dyn_cast_or_null<UsingShadowDecl>(Importer.Import(FromShadow)))
ToUsing->addShadowDecl(ToShadow);
else
// FIXME: We return a nullptr here but the definition is already created
@@ -3151,27 +3708,28 @@ Decl *ASTNodeImporter::VisitUsingShadowDecl(UsingShadowDecl *D) {
if (ToD)
return ToD;
- UsingDecl *ToUsing = dyn_cast_or_null<UsingDecl>(
- Importer.Import(D->getUsingDecl()));
+ auto *ToUsing = dyn_cast_or_null<UsingDecl>(
+ Importer.Import(D->getUsingDecl()));
if (!ToUsing)
return nullptr;
- NamedDecl *ToTarget = dyn_cast_or_null<NamedDecl>(
- Importer.Import(D->getTargetDecl()));
+ auto *ToTarget = dyn_cast_or_null<NamedDecl>(
+ Importer.Import(D->getTargetDecl()));
if (!ToTarget)
return nullptr;
- UsingShadowDecl *ToShadow = UsingShadowDecl::Create(
- Importer.getToContext(), DC, Loc, ToUsing, ToTarget);
+ UsingShadowDecl *ToShadow;
+ if (GetImportedOrCreateDecl(ToShadow, D, Importer.getToContext(), DC, Loc,
+ ToUsing, ToTarget))
+ return ToShadow;
ToShadow->setLexicalDeclContext(LexicalDC);
ToShadow->setAccess(D->getAccess());
- Importer.Imported(D, ToShadow);
if (UsingShadowDecl *FromPattern =
Importer.getFromContext().getInstantiatedFromUsingShadowDecl(D)) {
- if (UsingShadowDecl *ToPattern =
- dyn_cast_or_null<UsingShadowDecl>(Importer.Import(FromPattern)))
+ if (auto *ToPattern =
+ dyn_cast_or_null<UsingShadowDecl>(Importer.Import(FromPattern)))
Importer.getToContext().setInstantiatedFromUsingShadowDecl(ToShadow,
ToPattern);
else
@@ -3185,7 +3743,6 @@ Decl *ASTNodeImporter::VisitUsingShadowDecl(UsingShadowDecl *D) {
return ToShadow;
}
-
Decl *ASTNodeImporter::VisitUsingDirectiveDecl(UsingDirectiveDecl *D) {
DeclContext *DC, *LexicalDC;
DeclarationName Name;
@@ -3200,19 +3757,22 @@ Decl *ASTNodeImporter::VisitUsingDirectiveDecl(UsingDirectiveDecl *D) {
if (!ToComAncestor)
return nullptr;
- NamespaceDecl *ToNominated = cast_or_null<NamespaceDecl>(
- Importer.Import(D->getNominatedNamespace()));
+ auto *ToNominated = cast_or_null<NamespaceDecl>(
+ Importer.Import(D->getNominatedNamespace()));
if (!ToNominated)
return nullptr;
- UsingDirectiveDecl *ToUsingDir = UsingDirectiveDecl::Create(
- Importer.getToContext(), DC, Importer.Import(D->getUsingLoc()),
- Importer.Import(D->getNamespaceKeyLocation()),
- Importer.Import(D->getQualifierLoc()),
- Importer.Import(D->getIdentLocation()), ToNominated, ToComAncestor);
+ UsingDirectiveDecl *ToUsingDir;
+ if (GetImportedOrCreateDecl(ToUsingDir, D, Importer.getToContext(), DC,
+ Importer.Import(D->getUsingLoc()),
+ Importer.Import(D->getNamespaceKeyLocation()),
+ Importer.Import(D->getQualifierLoc()),
+ Importer.Import(D->getIdentLocation()),
+ ToNominated, ToComAncestor))
+ return ToUsingDir;
+
ToUsingDir->setLexicalDeclContext(LexicalDC);
LexicalDC->addDeclInternal(ToUsingDir);
- Importer.Imported(D, ToUsingDir);
return ToUsingDir;
}
@@ -3231,12 +3791,13 @@ Decl *ASTNodeImporter::VisitUnresolvedUsingValueDecl(
DeclarationNameInfo NameInfo(Name, Importer.Import(D->getNameInfo().getLoc()));
ImportDeclarationNameLoc(D->getNameInfo(), NameInfo);
- UnresolvedUsingValueDecl *ToUsingValue = UnresolvedUsingValueDecl::Create(
- Importer.getToContext(), DC, Importer.Import(D->getUsingLoc()),
- Importer.Import(D->getQualifierLoc()), NameInfo,
- Importer.Import(D->getEllipsisLoc()));
+ UnresolvedUsingValueDecl *ToUsingValue;
+ if (GetImportedOrCreateDecl(ToUsingValue, D, Importer.getToContext(), DC,
+ Importer.Import(D->getUsingLoc()),
+ Importer.Import(D->getQualifierLoc()), NameInfo,
+ Importer.Import(D->getEllipsisLoc())))
+ return ToUsingValue;
- Importer.Imported(D, ToUsingValue);
ToUsingValue->setAccess(D->getAccess());
ToUsingValue->setLexicalDeclContext(LexicalDC);
LexicalDC->addDeclInternal(ToUsingValue);
@@ -3255,13 +3816,14 @@ Decl *ASTNodeImporter::VisitUnresolvedUsingTypenameDecl(
if (ToD)
return ToD;
- UnresolvedUsingTypenameDecl *ToUsing = UnresolvedUsingTypenameDecl::Create(
- Importer.getToContext(), DC, Importer.Import(D->getUsingLoc()),
- Importer.Import(D->getTypenameLoc()),
- Importer.Import(D->getQualifierLoc()), Loc, Name,
- Importer.Import(D->getEllipsisLoc()));
+ UnresolvedUsingTypenameDecl *ToUsing;
+ if (GetImportedOrCreateDecl(ToUsing, D, Importer.getToContext(), DC,
+ Importer.Import(D->getUsingLoc()),
+ Importer.Import(D->getTypenameLoc()),
+ Importer.Import(D->getQualifierLoc()), Loc, Name,
+ Importer.Import(D->getEllipsisLoc())))
+ return ToUsing;
- Importer.Imported(D, ToUsing);
ToUsing->setAccess(D->getAccess());
ToUsing->setLexicalDeclContext(LexicalDC);
LexicalDC->addDeclInternal(ToUsing);
@@ -3269,7 +3831,6 @@ Decl *ASTNodeImporter::VisitUnresolvedUsingTypenameDecl(
return ToUsing;
}
-
bool ASTNodeImporter::ImportDefinition(ObjCInterfaceDecl *From,
ObjCInterfaceDecl *To,
ImportDefinitionKind Kind) {
@@ -3330,8 +3891,7 @@ bool ASTNodeImporter::ImportDefinition(ObjCInterfaceDecl *From,
FromProtoEnd = From->protocol_end();
FromProto != FromProtoEnd;
++FromProto, ++FromProtoLoc) {
- ObjCProtocolDecl *ToProto
- = cast_or_null<ObjCProtocolDecl>(Importer.Import(*FromProto));
+ auto *ToProto = cast_or_null<ObjCProtocolDecl>(Importer.Import(*FromProto));
if (!ToProto)
return true;
Protocols.push_back(ToProto);
@@ -3349,8 +3909,8 @@ bool ASTNodeImporter::ImportDefinition(ObjCInterfaceDecl *From,
// If we have an @implementation, import it as well.
if (From->getImplementation()) {
- ObjCImplementationDecl *Impl = cast_or_null<ObjCImplementationDecl>(
- Importer.Import(From->getImplementation()));
+ auto *Impl = cast_or_null<ObjCImplementationDecl>(
+ Importer.Import(From->getImplementation()));
if (!Impl)
return true;
@@ -3371,8 +3931,8 @@ ASTNodeImporter::ImportObjCTypeParamList(ObjCTypeParamList *list) {
SmallVector<ObjCTypeParamDecl *, 4> toTypeParams;
for (auto fromTypeParam : *list) {
- auto toTypeParam = cast_or_null<ObjCTypeParamDecl>(
- Importer.Import(fromTypeParam));
+ auto *toTypeParam = cast_or_null<ObjCTypeParamDecl>(
+ Importer.Import(fromTypeParam));
if (!toTypeParam)
return nullptr;
@@ -3395,7 +3955,7 @@ Decl *ASTNodeImporter::VisitObjCInterfaceDecl(ObjCInterfaceDecl *D) {
if (!ImportedDef)
return nullptr;
- return Importer.Imported(D, ImportedDef);
+ return Importer.MapImported(D, ImportedDef);
}
// Import the major distinguishing characteristics of an @interface.
@@ -3412,27 +3972,27 @@ Decl *ASTNodeImporter::VisitObjCInterfaceDecl(ObjCInterfaceDecl *D) {
ObjCInterfaceDecl *MergeWithIface = nullptr;
SmallVector<NamedDecl *, 2> FoundDecls;
DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
- for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
- if (!FoundDecls[I]->isInIdentifierNamespace(Decl::IDNS_Ordinary))
+ for (auto *FoundDecl : FoundDecls) {
+ if (!FoundDecl->isInIdentifierNamespace(Decl::IDNS_Ordinary))
continue;
- if ((MergeWithIface = dyn_cast<ObjCInterfaceDecl>(FoundDecls[I])))
+ if ((MergeWithIface = dyn_cast<ObjCInterfaceDecl>(FoundDecl)))
break;
}
// Create an interface declaration, if one does not already exist.
ObjCInterfaceDecl *ToIface = MergeWithIface;
if (!ToIface) {
- ToIface = ObjCInterfaceDecl::Create(Importer.getToContext(), DC,
- Importer.Import(D->getAtStartLoc()),
- Name.getAsIdentifierInfo(),
- /*TypeParamList=*/nullptr,
- /*PrevDecl=*/nullptr, Loc,
- D->isImplicitInterfaceDecl());
+ if (GetImportedOrCreateDecl(
+ ToIface, D, Importer.getToContext(), DC,
+ Importer.Import(D->getAtStartLoc()), Name.getAsIdentifierInfo(),
+ /*TypeParamList=*/nullptr,
+ /*PrevDecl=*/nullptr, Loc, D->isImplicitInterfaceDecl()))
+ return ToIface;
ToIface->setLexicalDeclContext(LexicalDC);
LexicalDC->addDeclInternal(ToIface);
}
- Importer.Imported(D, ToIface);
+ Importer.MapImported(D, ToIface);
// Import the type parameter list after calling Imported, to avoid
// loops when bringing in their DeclContext.
ToIface->setTypeParamList(ImportObjCTypeParamList(
@@ -3445,8 +4005,8 @@ Decl *ASTNodeImporter::VisitObjCInterfaceDecl(ObjCInterfaceDecl *D) {
}
Decl *ASTNodeImporter::VisitObjCCategoryImplDecl(ObjCCategoryImplDecl *D) {
- ObjCCategoryDecl *Category = cast_or_null<ObjCCategoryDecl>(
- Importer.Import(D->getCategoryDecl()));
+ auto *Category = cast_or_null<ObjCCategoryDecl>(
+ Importer.Import(D->getCategoryDecl()));
if (!Category)
return nullptr;
@@ -3457,13 +4017,13 @@ Decl *ASTNodeImporter::VisitObjCCategoryImplDecl(ObjCCategoryImplDecl *D) {
return nullptr;
SourceLocation CategoryNameLoc = Importer.Import(D->getCategoryNameLoc());
- ToImpl = ObjCCategoryImplDecl::Create(Importer.getToContext(), DC,
- Importer.Import(D->getIdentifier()),
- Category->getClassInterface(),
- Importer.Import(D->getLocation()),
- Importer.Import(D->getAtStartLoc()),
- CategoryNameLoc);
-
+ if (GetImportedOrCreateDecl(
+ ToImpl, D, Importer.getToContext(), DC,
+ Importer.Import(D->getIdentifier()), Category->getClassInterface(),
+ Importer.Import(D->getLocation()),
+ Importer.Import(D->getAtStartLoc()), CategoryNameLoc))
+ return ToImpl;
+
DeclContext *LexicalDC = DC;
if (D->getDeclContext() != D->getLexicalDeclContext()) {
LexicalDC = Importer.ImportContext(D->getLexicalDeclContext());
@@ -3477,15 +4037,15 @@ Decl *ASTNodeImporter::VisitObjCCategoryImplDecl(ObjCCategoryImplDecl *D) {
Category->setImplementation(ToImpl);
}
- Importer.Imported(D, ToImpl);
+ Importer.MapImported(D, ToImpl);
ImportDeclContext(D);
return ToImpl;
}
Decl *ASTNodeImporter::VisitObjCImplementationDecl(ObjCImplementationDecl *D) {
// Find the corresponding interface.
- ObjCInterfaceDecl *Iface = cast_or_null<ObjCInterfaceDecl>(
- Importer.Import(D->getClassInterface()));
+ auto *Iface = cast_or_null<ObjCInterfaceDecl>(
+ Importer.Import(D->getClassInterface()));
if (!Iface)
return nullptr;
@@ -3502,15 +4062,15 @@ Decl *ASTNodeImporter::VisitObjCImplementationDecl(ObjCImplementationDecl *D) {
if (!Impl) {
// We haven't imported an implementation yet. Create a new @implementation
// now.
- Impl = ObjCImplementationDecl::Create(Importer.getToContext(),
- Importer.ImportContext(D->getDeclContext()),
- Iface, Super,
- Importer.Import(D->getLocation()),
- Importer.Import(D->getAtStartLoc()),
- Importer.Import(D->getSuperClassLoc()),
- Importer.Import(D->getIvarLBraceLoc()),
- Importer.Import(D->getIvarRBraceLoc()));
-
+ if (GetImportedOrCreateDecl(Impl, D, Importer.getToContext(),
+ Importer.ImportContext(D->getDeclContext()),
+ Iface, Super, Importer.Import(D->getLocation()),
+ Importer.Import(D->getAtStartLoc()),
+ Importer.Import(D->getSuperClassLoc()),
+ Importer.Import(D->getIvarLBraceLoc()),
+ Importer.Import(D->getIvarRBraceLoc())))
+ return Impl;
+
if (D->getDeclContext() != D->getLexicalDeclContext()) {
DeclContext *LexicalDC
= Importer.ImportContext(D->getLexicalDeclContext());
@@ -3518,12 +4078,12 @@ Decl *ASTNodeImporter::VisitObjCImplementationDecl(ObjCImplementationDecl *D) {
return nullptr;
Impl->setLexicalDeclContext(LexicalDC);
}
-
+
// Associate the implementation with the class it implements.
Iface->setImplementation(Impl);
- Importer.Imported(D, Iface->getImplementation());
+ Importer.MapImported(D, Iface->getImplementation());
} else {
- Importer.Imported(D, Iface->getImplementation());
+ Importer.MapImported(D, Iface->getImplementation());
// Verify that the existing @implementation has the same superclass.
if ((Super && !Impl->getSuperClass()) ||
@@ -3574,9 +4134,8 @@ Decl *ASTNodeImporter::VisitObjCPropertyDecl(ObjCPropertyDecl *D) {
// Check whether we have already imported this property.
SmallVector<NamedDecl *, 2> FoundDecls;
DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
- for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
- if (ObjCPropertyDecl *FoundProp
- = dyn_cast<ObjCPropertyDecl>(FoundDecls[I])) {
+ for (auto *FoundDecl : FoundDecls) {
+ if (auto *FoundProp = dyn_cast<ObjCPropertyDecl>(FoundDecl)) {
// Check property types.
if (!Importer.IsStructurallyEquivalent(D->getType(),
FoundProp->getType())) {
@@ -3590,7 +4149,7 @@ Decl *ASTNodeImporter::VisitObjCPropertyDecl(ObjCPropertyDecl *D) {
// FIXME: Check property attributes, getters, setters, etc.?
// Consider these properties to be equivalent.
- Importer.Imported(D, FoundProp);
+ Importer.MapImported(D, FoundProp);
return FoundProp;
}
}
@@ -3601,15 +4160,14 @@ Decl *ASTNodeImporter::VisitObjCPropertyDecl(ObjCPropertyDecl *D) {
return nullptr;
// Create the new property.
- ObjCPropertyDecl *ToProperty
- = ObjCPropertyDecl::Create(Importer.getToContext(), DC, Loc,
- Name.getAsIdentifierInfo(),
- Importer.Import(D->getAtLoc()),
- Importer.Import(D->getLParenLoc()),
- Importer.Import(D->getType()),
- TSI,
- D->getPropertyImplementation());
- Importer.Imported(D, ToProperty);
+ ObjCPropertyDecl *ToProperty;
+ if (GetImportedOrCreateDecl(
+ ToProperty, D, Importer.getToContext(), DC, Loc,
+ Name.getAsIdentifierInfo(), Importer.Import(D->getAtLoc()),
+ Importer.Import(D->getLParenLoc()), Importer.Import(D->getType()),
+ TSI, D->getPropertyImplementation()))
+ return ToProperty;
+
ToProperty->setLexicalDeclContext(LexicalDC);
LexicalDC->addDeclInternal(ToProperty);
@@ -3630,8 +4188,8 @@ Decl *ASTNodeImporter::VisitObjCPropertyDecl(ObjCPropertyDecl *D) {
}
Decl *ASTNodeImporter::VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *D) {
- ObjCPropertyDecl *Property = cast_or_null<ObjCPropertyDecl>(
- Importer.Import(D->getPropertyDecl()));
+ auto *Property = cast_or_null<ObjCPropertyDecl>(
+ Importer.Import(D->getPropertyDecl()));
if (!Property)
return nullptr;
@@ -3647,7 +4205,7 @@ Decl *ASTNodeImporter::VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *D) {
return nullptr;
}
- ObjCImplDecl *InImpl = dyn_cast<ObjCImplDecl>(LexicalDC);
+ auto *InImpl = dyn_cast<ObjCImplDecl>(LexicalDC);
if (!InImpl)
return nullptr;
@@ -3663,16 +4221,15 @@ Decl *ASTNodeImporter::VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *D) {
ObjCPropertyImplDecl *ToImpl
= InImpl->FindPropertyImplDecl(Property->getIdentifier(),
Property->getQueryKind());
- if (!ToImpl) {
- ToImpl = ObjCPropertyImplDecl::Create(Importer.getToContext(), DC,
- Importer.Import(D->getLocStart()),
- Importer.Import(D->getLocation()),
- Property,
- D->getPropertyImplementation(),
- Ivar,
- Importer.Import(D->getPropertyIvarDeclLoc()));
+ if (!ToImpl) {
+ if (GetImportedOrCreateDecl(ToImpl, D, Importer.getToContext(), DC,
+ Importer.Import(D->getLocStart()),
+ Importer.Import(D->getLocation()), Property,
+ D->getPropertyImplementation(), Ivar,
+ Importer.Import(D->getPropertyIvarDeclLoc())))
+ return ToImpl;
+
ToImpl->setLexicalDeclContext(LexicalDC);
- Importer.Imported(D, ToImpl);
LexicalDC->addDeclInternal(ToImpl);
} else {
// Check that we have the same kind of property implementation (@synthesize
@@ -3705,7 +4262,7 @@ Decl *ASTNodeImporter::VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *D) {
}
// Merge the existing implementation with the new implementation.
- Importer.Imported(D, ToImpl);
+ Importer.MapImported(D, ToImpl);
}
return ToImpl;
@@ -3717,15 +4274,14 @@ Decl *ASTNodeImporter::VisitTemplateTypeParmDecl(TemplateTypeParmDecl *D) {
// is created.
// FIXME: Import default argument.
- return TemplateTypeParmDecl::Create(Importer.getToContext(),
- Importer.getToContext().getTranslationUnitDecl(),
- Importer.Import(D->getLocStart()),
- Importer.Import(D->getLocation()),
- D->getDepth(),
- D->getIndex(),
- Importer.Import(D->getIdentifier()),
- D->wasDeclaredWithTypename(),
- D->isParameterPack());
+ TemplateTypeParmDecl *ToD = nullptr;
+ (void)GetImportedOrCreateDecl(
+ ToD, D, Importer.getToContext(),
+ Importer.getToContext().getTranslationUnitDecl(),
+ Importer.Import(D->getLocStart()), Importer.Import(D->getLocation()),
+ D->getDepth(), D->getIndex(), Importer.Import(D->getIdentifier()),
+ D->wasDeclaredWithTypename(), D->isParameterPack());
+ return ToD;
}
Decl *
@@ -3749,13 +4305,15 @@ ASTNodeImporter::VisitNonTypeTemplateParmDecl(NonTypeTemplateParmDecl *D) {
return nullptr;
// FIXME: Import default argument.
-
- return NonTypeTemplateParmDecl::Create(Importer.getToContext(),
- Importer.getToContext().getTranslationUnitDecl(),
- Importer.Import(D->getInnerLocStart()),
- Loc, D->getDepth(), D->getPosition(),
- Name.getAsIdentifierInfo(),
- T, D->isParameterPack(), TInfo);
+
+ NonTypeTemplateParmDecl *ToD = nullptr;
+ (void)GetImportedOrCreateDecl(
+ ToD, D, Importer.getToContext(),
+ Importer.getToContext().getTranslationUnitDecl(),
+ Importer.Import(D->getInnerLocStart()), Loc, D->getDepth(),
+ D->getPosition(), Name.getAsIdentifierInfo(), T, D->isParameterPack(),
+ TInfo);
+ return ToD;
}
Decl *
@@ -3767,7 +4325,7 @@ ASTNodeImporter::VisitTemplateTemplateParmDecl(TemplateTemplateParmDecl *D) {
// Import the location of this declaration.
SourceLocation Loc = Importer.Import(D->getLocation());
-
+
// Import template parameters.
TemplateParameterList *TemplateParams
= ImportTemplateParameterList(D->getTemplateParameters());
@@ -3775,30 +4333,42 @@ ASTNodeImporter::VisitTemplateTemplateParmDecl(TemplateTemplateParmDecl *D) {
return nullptr;
// FIXME: Import default argument.
-
- return TemplateTemplateParmDecl::Create(Importer.getToContext(),
- Importer.getToContext().getTranslationUnitDecl(),
- Loc, D->getDepth(), D->getPosition(),
- D->isParameterPack(),
- Name.getAsIdentifierInfo(),
- TemplateParams);
+
+ TemplateTemplateParmDecl *ToD = nullptr;
+ (void)GetImportedOrCreateDecl(
+ ToD, D, Importer.getToContext(),
+ Importer.getToContext().getTranslationUnitDecl(), Loc, D->getDepth(),
+ D->getPosition(), D->isParameterPack(), Name.getAsIdentifierInfo(),
+ TemplateParams);
+ return ToD;
+}
+
+// Returns the definition for a (forward) declaration of a ClassTemplateDecl, if
+// it has any definition in the redecl chain.
+static ClassTemplateDecl *getDefinition(ClassTemplateDecl *D) {
+ CXXRecordDecl *ToTemplatedDef = D->getTemplatedDecl()->getDefinition();
+ if (!ToTemplatedDef)
+ return nullptr;
+ ClassTemplateDecl *TemplateWithDef =
+ ToTemplatedDef->getDescribedClassTemplate();
+ return TemplateWithDef;
}
Decl *ASTNodeImporter::VisitClassTemplateDecl(ClassTemplateDecl *D) {
// If this record has a definition in the translation unit we're coming from,
// but this particular declaration is not that definition, import the
// definition and map to that.
- CXXRecordDecl *Definition
- = cast_or_null<CXXRecordDecl>(D->getTemplatedDecl()->getDefinition());
+ auto *Definition =
+ cast_or_null<CXXRecordDecl>(D->getTemplatedDecl()->getDefinition());
if (Definition && Definition != D->getTemplatedDecl()) {
Decl *ImportedDef
= Importer.Import(Definition->getDescribedClassTemplate());
if (!ImportedDef)
return nullptr;
- return Importer.Imported(D, ImportedDef);
+ return Importer.MapImported(D, ImportedDef);
}
-
+
// Import the major distinguishing characteristics of this class template.
DeclContext *DC, *LexicalDC;
DeclarationName Name;
@@ -3814,69 +4384,73 @@ Decl *ASTNodeImporter::VisitClassTemplateDecl(ClassTemplateDecl *D) {
SmallVector<NamedDecl *, 4> ConflictingDecls;
SmallVector<NamedDecl *, 2> FoundDecls;
DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
- for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
- if (!FoundDecls[I]->isInIdentifierNamespace(Decl::IDNS_Ordinary))
+ for (auto *FoundDecl : FoundDecls) {
+ if (!FoundDecl->isInIdentifierNamespace(Decl::IDNS_Ordinary))
continue;
-
- Decl *Found = FoundDecls[I];
- if (ClassTemplateDecl *FoundTemplate
- = dyn_cast<ClassTemplateDecl>(Found)) {
+
+ Decl *Found = FoundDecl;
+ if (auto *FoundTemplate = dyn_cast<ClassTemplateDecl>(Found)) {
+
+ // The class to be imported is a definition.
+ if (D->isThisDeclarationADefinition()) {
+ // Lookup will find the fwd decl only if that is more recent than the
+ // definition. So, try to get the definition if that is available in
+ // the redecl chain.
+ ClassTemplateDecl *TemplateWithDef = getDefinition(FoundTemplate);
+ if (!TemplateWithDef)
+ continue;
+ FoundTemplate = TemplateWithDef; // Continue with the definition.
+ }
+
if (IsStructuralMatch(D, FoundTemplate)) {
// The class templates structurally match; call it the same template.
- // FIXME: We may be filling in a forward declaration here. Handle
- // this case!
- Importer.Imported(D->getTemplatedDecl(),
- FoundTemplate->getTemplatedDecl());
- return Importer.Imported(D, FoundTemplate);
- }
+
+ Importer.MapImported(D->getTemplatedDecl(),
+ FoundTemplate->getTemplatedDecl());
+ return Importer.MapImported(D, FoundTemplate);
+ }
}
-
- ConflictingDecls.push_back(FoundDecls[I]);
+
+ ConflictingDecls.push_back(FoundDecl);
}
-
+
if (!ConflictingDecls.empty()) {
Name = Importer.HandleNameConflict(Name, DC, Decl::IDNS_Ordinary,
- ConflictingDecls.data(),
+ ConflictingDecls.data(),
ConflictingDecls.size());
}
-
+
if (!Name)
return nullptr;
}
- CXXRecordDecl *DTemplated = D->getTemplatedDecl();
-
+ CXXRecordDecl *FromTemplated = D->getTemplatedDecl();
+
// Create the declaration that is being templated.
- CXXRecordDecl *D2Templated = cast_or_null<CXXRecordDecl>(
- Importer.Import(DTemplated));
- if (!D2Templated)
+ auto *ToTemplated = cast_or_null<CXXRecordDecl>(
+ Importer.Import(FromTemplated));
+ if (!ToTemplated)
return nullptr;
- // Resolve possible cyclic import.
- if (Decl *AlreadyImported = Importer.GetAlreadyImportedOrNull(D))
- return AlreadyImported;
-
// Create the class template declaration itself.
- TemplateParameterList *TemplateParams
- = ImportTemplateParameterList(D->getTemplateParameters());
+ TemplateParameterList *TemplateParams =
+ ImportTemplateParameterList(D->getTemplateParameters());
if (!TemplateParams)
return nullptr;
- ClassTemplateDecl *D2 = ClassTemplateDecl::Create(Importer.getToContext(), DC,
- Loc, Name, TemplateParams,
- D2Templated);
- D2Templated->setDescribedClassTemplate(D2);
+ ClassTemplateDecl *D2;
+ if (GetImportedOrCreateDecl(D2, D, Importer.getToContext(), DC, Loc, Name,
+ TemplateParams, ToTemplated))
+ return D2;
+
+ ToTemplated->setDescribedClassTemplate(D2);
D2->setAccess(D->getAccess());
D2->setLexicalDeclContext(LexicalDC);
LexicalDC->addDeclInternal(D2);
- // Note the relationship between the class templates.
- Importer.Imported(D, D2);
- Importer.Imported(DTemplated, D2Templated);
-
- if (DTemplated->isCompleteDefinition() &&
- !D2Templated->isCompleteDefinition()) {
+ if (FromTemplated->isCompleteDefinition() &&
+ !ToTemplated->isCompleteDefinition()) {
// FIXME: Import definition!
}
@@ -3894,11 +4468,11 @@ Decl *ASTNodeImporter::VisitClassTemplateSpecializationDecl(
if (!ImportedDef)
return nullptr;
- return Importer.Imported(D, ImportedDef);
+ return Importer.MapImported(D, ImportedDef);
}
- ClassTemplateDecl *ClassTemplate
- = cast_or_null<ClassTemplateDecl>(Importer.Import(
+ auto *ClassTemplate =
+ cast_or_null<ClassTemplateDecl>(Importer.Import(
D->getSpecializedTemplate()));
if (!ClassTemplate)
return nullptr;
@@ -3941,23 +4515,18 @@ Decl *ASTNodeImporter::VisitClassTemplateSpecializationDecl(
// The record types structurally match, or the "from" translation
// unit only had a forward declaration anyway; call it the same
// function.
- return Importer.Imported(D, FoundDef);
+ return Importer.MapImported(D, FoundDef);
}
}
} else {
// Create a new specialization.
- if (ClassTemplatePartialSpecializationDecl *PartialSpec =
- dyn_cast<ClassTemplatePartialSpecializationDecl>(D)) {
-
+ if (auto *PartialSpec =
+ dyn_cast<ClassTemplatePartialSpecializationDecl>(D)) {
// Import TemplateArgumentListInfo
TemplateArgumentListInfo ToTAInfo;
- auto &ASTTemplateArgs = *PartialSpec->getTemplateArgsAsWritten();
- for (unsigned I = 0, E = ASTTemplateArgs.NumTemplateArgs; I < E; ++I) {
- if (auto ToLoc = ImportTemplateArgumentLoc(ASTTemplateArgs[I]))
- ToTAInfo.addArgument(*ToLoc);
- else
- return nullptr;
- }
+ const auto &ASTTemplateArgs = *PartialSpec->getTemplateArgsAsWritten();
+ if (ImportTemplateArgumentListInfo(ASTTemplateArgs, ToTAInfo))
+ return nullptr;
QualType CanonInjType = Importer.Import(
PartialSpec->getInjectedSpecializationType());
@@ -3970,19 +4539,18 @@ Decl *ASTNodeImporter::VisitClassTemplateSpecializationDecl(
if (!ToTPList && PartialSpec->getTemplateParameters())
return nullptr;
- D2 = ClassTemplatePartialSpecializationDecl::Create(
- Importer.getToContext(), D->getTagKind(), DC, StartLoc, IdLoc,
- ToTPList, ClassTemplate,
- llvm::makeArrayRef(TemplateArgs.data(), TemplateArgs.size()),
- ToTAInfo, CanonInjType, nullptr);
+ if (GetImportedOrCreateDecl<ClassTemplatePartialSpecializationDecl>(
+ D2, D, Importer.getToContext(), D->getTagKind(), DC, StartLoc,
+ IdLoc, ToTPList, ClassTemplate,
+ llvm::makeArrayRef(TemplateArgs.data(), TemplateArgs.size()),
+ ToTAInfo, CanonInjType, nullptr))
+ return D2;
} else {
- D2 = ClassTemplateSpecializationDecl::Create(Importer.getToContext(),
- D->getTagKind(), DC,
- StartLoc, IdLoc,
- ClassTemplate,
- TemplateArgs,
- /*PrevDecl=*/nullptr);
+ if (GetImportedOrCreateDecl(
+ D2, D, Importer.getToContext(), D->getTagKind(), DC, StartLoc,
+ IdLoc, ClassTemplate, TemplateArgs, /*PrevDecl=*/nullptr))
+ return D2;
}
D2->setSpecializationKind(D->getSpecializationKind());
@@ -3993,8 +4561,6 @@ Decl *ASTNodeImporter::VisitClassTemplateSpecializationDecl(
// Import the qualifier, if any.
D2->setQualifierInfo(Importer.Import(D->getQualifierLoc()));
- Importer.Imported(D, D2);
-
if (auto *TSI = D->getTypeAsWritten()) {
TypeSourceInfo *TInfo = Importer.Import(TSI);
if (!TInfo)
@@ -4012,11 +4578,14 @@ Decl *ASTNodeImporter::VisitClassTemplateSpecializationDecl(
D2->setTemplateSpecializationKind(D->getTemplateSpecializationKind());
- // Add the specialization to this context.
+ // Set the context of this specialization/instantiation.
D2->setLexicalDeclContext(LexicalDC);
- LexicalDC->addDeclInternal(D2);
+
+ // Add to the DC only if it was an explicit specialization/instantiation.
+ if (D2->isExplicitInstantiationOrSpecialization()) {
+ LexicalDC->addDeclInternal(D2);
+ }
}
- Importer.Imported(D, D2);
if (D->isCompleteDefinition() && ImportDefinition(D, D2))
return nullptr;
@@ -4028,14 +4597,14 @@ Decl *ASTNodeImporter::VisitVarTemplateDecl(VarTemplateDecl *D) {
// from,
// but this particular declaration is not that definition, import the
// definition and map to that.
- VarDecl *Definition =
+ auto *Definition =
cast_or_null<VarDecl>(D->getTemplatedDecl()->getDefinition());
if (Definition && Definition != D->getTemplatedDecl()) {
Decl *ImportedDef = Importer.Import(Definition->getDescribedVarTemplate());
if (!ImportedDef)
return nullptr;
- return Importer.Imported(D, ImportedDef);
+ return Importer.MapImported(D, ImportedDef);
}
// Import the major distinguishing characteristics of this variable template.
@@ -4054,21 +4623,21 @@ Decl *ASTNodeImporter::VisitVarTemplateDecl(VarTemplateDecl *D) {
SmallVector<NamedDecl *, 4> ConflictingDecls;
SmallVector<NamedDecl *, 2> FoundDecls;
DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
- for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
- if (!FoundDecls[I]->isInIdentifierNamespace(Decl::IDNS_Ordinary))
+ for (auto *FoundDecl : FoundDecls) {
+ if (!FoundDecl->isInIdentifierNamespace(Decl::IDNS_Ordinary))
continue;
- Decl *Found = FoundDecls[I];
- if (VarTemplateDecl *FoundTemplate = dyn_cast<VarTemplateDecl>(Found)) {
+ Decl *Found = FoundDecl;
+ if (auto *FoundTemplate = dyn_cast<VarTemplateDecl>(Found)) {
if (IsStructuralMatch(D, FoundTemplate)) {
// The variable templates structurally match; call it the same template.
- Importer.Imported(D->getTemplatedDecl(),
- FoundTemplate->getTemplatedDecl());
- return Importer.Imported(D, FoundTemplate);
+ Importer.MapImported(D->getTemplatedDecl(),
+ FoundTemplate->getTemplatedDecl());
+ return Importer.MapImported(D, FoundTemplate);
}
}
- ConflictingDecls.push_back(FoundDecls[I]);
+ ConflictingDecls.push_back(FoundDecl);
}
if (!ConflictingDecls.empty()) {
@@ -4088,21 +4657,8 @@ Decl *ASTNodeImporter::VisitVarTemplateDecl(VarTemplateDecl *D) {
return nullptr;
// Create the declaration that is being templated.
- SourceLocation StartLoc = Importer.Import(DTemplated->getLocStart());
- SourceLocation IdLoc = Importer.Import(DTemplated->getLocation());
- TypeSourceInfo *TInfo = Importer.Import(DTemplated->getTypeSourceInfo());
- VarDecl *D2Templated = VarDecl::Create(Importer.getToContext(), DC, StartLoc,
- IdLoc, Name.getAsIdentifierInfo(), T,
- TInfo, DTemplated->getStorageClass());
- D2Templated->setAccess(DTemplated->getAccess());
- D2Templated->setQualifierInfo(Importer.Import(DTemplated->getQualifierLoc()));
- D2Templated->setLexicalDeclContext(LexicalDC);
-
- // Importer.Imported(DTemplated, D2Templated);
- // LexicalDC->addDeclInternal(D2Templated);
-
- // Merge the initializer.
- if (ImportDefinition(DTemplated, D2Templated))
+ auto *ToTemplated = dyn_cast_or_null<VarDecl>(Importer.Import(DTemplated));
+ if (!ToTemplated)
return nullptr;
// Create the variable template declaration itself.
@@ -4111,24 +4667,23 @@ Decl *ASTNodeImporter::VisitVarTemplateDecl(VarTemplateDecl *D) {
if (!TemplateParams)
return nullptr;
- VarTemplateDecl *D2 = VarTemplateDecl::Create(
- Importer.getToContext(), DC, Loc, Name, TemplateParams, D2Templated);
- D2Templated->setDescribedVarTemplate(D2);
+ VarTemplateDecl *ToVarTD;
+ if (GetImportedOrCreateDecl(ToVarTD, D, Importer.getToContext(), DC, Loc,
+ Name, TemplateParams, ToTemplated))
+ return ToVarTD;
- D2->setAccess(D->getAccess());
- D2->setLexicalDeclContext(LexicalDC);
- LexicalDC->addDeclInternal(D2);
+ ToTemplated->setDescribedVarTemplate(ToVarTD);
- // Note the relationship between the variable templates.
- Importer.Imported(D, D2);
- Importer.Imported(DTemplated, D2Templated);
+ ToVarTD->setAccess(D->getAccess());
+ ToVarTD->setLexicalDeclContext(LexicalDC);
+ LexicalDC->addDeclInternal(ToVarTD);
if (DTemplated->isThisDeclarationADefinition() &&
- !D2Templated->isThisDeclarationADefinition()) {
+ !ToTemplated->isThisDeclarationADefinition()) {
// FIXME: Import definition!
}
- return D2;
+ return ToVarTD;
}
Decl *ASTNodeImporter::VisitVarTemplateSpecializationDecl(
@@ -4142,10 +4697,10 @@ Decl *ASTNodeImporter::VisitVarTemplateSpecializationDecl(
if (!ImportedDef)
return nullptr;
- return Importer.Imported(D, ImportedDef);
+ return Importer.MapImported(D, ImportedDef);
}
- VarTemplateDecl *VarTemplate = cast_or_null<VarTemplateDecl>(
+ auto *VarTemplate = cast_or_null<VarTemplateDecl>(
Importer.Import(D->getSpecializedTemplate()));
if (!VarTemplate)
return nullptr;
@@ -4188,23 +4743,68 @@ Decl *ASTNodeImporter::VisitVarTemplateSpecializationDecl(
// The record types structurally match, or the "from" translation
// unit only had a forward declaration anyway; call it the same
// variable.
- return Importer.Imported(D, FoundDef);
+ return Importer.MapImported(D, FoundDef);
}
}
} else {
-
// Import the type.
QualType T = Importer.Import(D->getType());
if (T.isNull())
return nullptr;
+
TypeSourceInfo *TInfo = Importer.Import(D->getTypeSourceInfo());
+ if (D->getTypeSourceInfo() && !TInfo)
+ return nullptr;
+
+ TemplateArgumentListInfo ToTAInfo;
+ if (ImportTemplateArgumentListInfo(D->getTemplateArgsInfo(), ToTAInfo))
+ return nullptr;
+ using PartVarSpecDecl = VarTemplatePartialSpecializationDecl;
// Create a new specialization.
- D2 = VarTemplateSpecializationDecl::Create(
- Importer.getToContext(), DC, StartLoc, IdLoc, VarTemplate, T, TInfo,
- D->getStorageClass(), TemplateArgs);
+ if (auto *FromPartial = dyn_cast<PartVarSpecDecl>(D)) {
+ // Import TemplateArgumentListInfo
+ TemplateArgumentListInfo ArgInfos;
+ const auto *FromTAArgsAsWritten = FromPartial->getTemplateArgsAsWritten();
+ // NOTE: FromTAArgsAsWritten and template parameter list are non-null.
+ if (ImportTemplateArgumentListInfo(*FromTAArgsAsWritten, ArgInfos))
+ return nullptr;
+
+ TemplateParameterList *ToTPList = ImportTemplateParameterList(
+ FromPartial->getTemplateParameters());
+ if (!ToTPList)
+ return nullptr;
+
+ PartVarSpecDecl *ToPartial;
+ if (GetImportedOrCreateDecl(ToPartial, D, Importer.getToContext(), DC,
+ StartLoc, IdLoc, ToTPList, VarTemplate, T,
+ TInfo, D->getStorageClass(), TemplateArgs,
+ ArgInfos))
+ return ToPartial;
+
+ auto *FromInst = FromPartial->getInstantiatedFromMember();
+ auto *ToInst = cast_or_null<PartVarSpecDecl>(Importer.Import(FromInst));
+ if (FromInst && !ToInst)
+ return nullptr;
+
+ ToPartial->setInstantiatedFromMember(ToInst);
+ if (FromPartial->isMemberSpecialization())
+ ToPartial->setMemberSpecialization();
+
+ D2 = ToPartial;
+ } else { // Full specialization
+ if (GetImportedOrCreateDecl(D2, D, Importer.getToContext(), DC, StartLoc,
+ IdLoc, VarTemplate, T, TInfo,
+ D->getStorageClass(), TemplateArgs))
+ return D2;
+ }
+
+ SourceLocation POI = D->getPointOfInstantiation();
+ if (POI.isValid())
+ D2->setPointOfInstantiation(Importer.Import(POI));
+
D2->setSpecializationKind(D->getSpecializationKind());
- D2->setTemplateArgsInfo(D->getTemplateArgsInfo());
+ D2->setTemplateArgsInfo(ToTAInfo);
// Add this specialization to the class template.
VarTemplate->AddSpecialization(D2, InsertPos);
@@ -4212,13 +4812,22 @@ Decl *ASTNodeImporter::VisitVarTemplateSpecializationDecl(
// Import the qualifier, if any.
D2->setQualifierInfo(Importer.Import(D->getQualifierLoc()));
+ if (D->isConstexpr())
+ D2->setConstexpr(true);
+
// Add the specialization to this context.
D2->setLexicalDeclContext(LexicalDC);
LexicalDC->addDeclInternal(D2);
+
+ D2->setAccess(D->getAccess());
}
- Importer.Imported(D, D2);
- if (D->isThisDeclarationADefinition() && ImportDefinition(D, D2))
+ // NOTE: isThisDeclarationADefinition() can return DeclarationOnly even if
+ // declaration has initializer. Should this be fixed in the AST?.. Anyway,
+ // we have to check the declaration for initializer - otherwise, it won't be
+ // imported.
+ if ((D->isThisDeclarationADefinition() || D->hasInit()) &&
+ ImportDefinition(D, D2))
return nullptr;
return D2;
@@ -4242,16 +4851,15 @@ Decl *ASTNodeImporter::VisitFunctionTemplateDecl(FunctionTemplateDecl *D) {
unsigned IDNS = Decl::IDNS_Ordinary;
SmallVector<NamedDecl *, 2> FoundDecls;
DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
- for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
- if (!FoundDecls[I]->isInIdentifierNamespace(IDNS))
+ for (auto *FoundDecl : FoundDecls) {
+ if (!FoundDecl->isInIdentifierNamespace(IDNS))
continue;
- if (FunctionTemplateDecl *FoundFunction =
- dyn_cast<FunctionTemplateDecl>(FoundDecls[I])) {
+ if (auto *FoundFunction = dyn_cast<FunctionTemplateDecl>(FoundDecl)) {
if (FoundFunction->hasExternalFormalLinkage() &&
D->hasExternalFormalLinkage()) {
if (IsStructuralMatch(D, FoundFunction)) {
- Importer.Imported(D, FoundFunction);
+ Importer.MapImported(D, FoundFunction);
// FIXME: Actually try to merge the body and other attributes.
return FoundFunction;
}
@@ -4265,18 +4873,19 @@ Decl *ASTNodeImporter::VisitFunctionTemplateDecl(FunctionTemplateDecl *D) {
if (!Params)
return nullptr;
- FunctionDecl *TemplatedFD =
+ auto *TemplatedFD =
cast_or_null<FunctionDecl>(Importer.Import(D->getTemplatedDecl()));
if (!TemplatedFD)
return nullptr;
- FunctionTemplateDecl *ToFunc = FunctionTemplateDecl::Create(
- Importer.getToContext(), DC, Loc, Name, Params, TemplatedFD);
+ FunctionTemplateDecl *ToFunc;
+ if (GetImportedOrCreateDecl(ToFunc, D, Importer.getToContext(), DC, Loc, Name,
+ Params, TemplatedFD))
+ return ToFunc;
TemplatedFD->setDescribedFunctionTemplate(ToFunc);
ToFunc->setAccess(D->getAccess());
ToFunc->setLexicalDeclContext(LexicalDC);
- Importer.Imported(D, ToFunc);
LexicalDC->addDeclInternal(ToFunc);
return ToFunc;
@@ -4301,12 +4910,11 @@ DeclGroupRef ASTNodeImporter::ImportDeclGroup(DeclGroupRef DG) {
NumDecls);
}
- Stmt *ASTNodeImporter::VisitStmt(Stmt *S) {
- Importer.FromDiag(S->getLocStart(), diag::err_unsupported_ast_node)
- << S->getStmtClassName();
- return nullptr;
- }
-
+Stmt *ASTNodeImporter::VisitStmt(Stmt *S) {
+ Importer.FromDiag(S->getLocStart(), diag::err_unsupported_ast_node)
+ << S->getStmtClassName();
+ return nullptr;
+}
Stmt *ASTNodeImporter::VisitGCCAsmStmt(GCCAsmStmt *S) {
SmallVector<IdentifierInfo *, 4> Names;
@@ -4329,8 +4937,8 @@ Stmt *ASTNodeImporter::VisitGCCAsmStmt(GCCAsmStmt *S) {
SmallVector<StringLiteral *, 4> Clobbers;
for (unsigned I = 0, E = S->getNumClobbers(); I != E; I++) {
- StringLiteral *Clobber = cast_or_null<StringLiteral>(
- Importer.Import(S->getClobberStringLiteral(I)));
+ auto *Clobber = cast_or_null<StringLiteral>(
+ Importer.Import(S->getClobberStringLiteral(I)));
if (!Clobber)
return nullptr;
Clobbers.push_back(Clobber);
@@ -4338,16 +4946,16 @@ Stmt *ASTNodeImporter::VisitGCCAsmStmt(GCCAsmStmt *S) {
SmallVector<StringLiteral *, 4> Constraints;
for (unsigned I = 0, E = S->getNumOutputs(); I != E; I++) {
- StringLiteral *Output = cast_or_null<StringLiteral>(
- Importer.Import(S->getOutputConstraintLiteral(I)));
+ auto *Output = cast_or_null<StringLiteral>(
+ Importer.Import(S->getOutputConstraintLiteral(I)));
if (!Output)
return nullptr;
Constraints.push_back(Output);
}
for (unsigned I = 0, E = S->getNumInputs(); I != E; I++) {
- StringLiteral *Input = cast_or_null<StringLiteral>(
- Importer.Import(S->getInputConstraintLiteral(I)));
+ auto *Input = cast_or_null<StringLiteral>(
+ Importer.Import(S->getInputConstraintLiteral(I)));
if (!Input)
return nullptr;
Constraints.push_back(Input);
@@ -4360,8 +4968,8 @@ Stmt *ASTNodeImporter::VisitGCCAsmStmt(GCCAsmStmt *S) {
if (ImportArrayChecked(S->inputs(), Exprs.begin() + S->getNumOutputs()))
return nullptr;
- StringLiteral *AsmStr = cast_or_null<StringLiteral>(
- Importer.Import(S->getAsmString()));
+ auto *AsmStr = cast_or_null<StringLiteral>(
+ Importer.Import(S->getAsmString()));
if (!AsmStr)
return nullptr;
@@ -4383,7 +4991,7 @@ Stmt *ASTNodeImporter::VisitGCCAsmStmt(GCCAsmStmt *S) {
Stmt *ASTNodeImporter::VisitDeclStmt(DeclStmt *S) {
DeclGroupRef ToDG = ImportDeclGroup(S->getDeclGroup());
- for (Decl *ToD : ToDG) {
+ for (auto *ToD : ToDG) {
if (!ToD)
return nullptr;
}
@@ -4399,7 +5007,7 @@ Stmt *ASTNodeImporter::VisitNullStmt(NullStmt *S) {
}
Stmt *ASTNodeImporter::VisitCompoundStmt(CompoundStmt *S) {
- llvm::SmallVector<Stmt *, 8> ToStmts(S->size());
+ SmallVector<Stmt *, 8> ToStmts(S->size());
if (ImportContainerChecked(S->body(), ToStmts))
return nullptr;
@@ -4423,7 +5031,7 @@ Stmt *ASTNodeImporter::VisitCaseStmt(CaseStmt *S) {
SourceLocation ToCaseLoc = Importer.Import(S->getCaseLoc());
SourceLocation ToEllipsisLoc = Importer.Import(S->getEllipsisLoc());
SourceLocation ToColonLoc = Importer.Import(S->getColonLoc());
- CaseStmt *ToStmt = new (Importer.getToContext())
+ auto *ToStmt = new (Importer.getToContext())
CaseStmt(ToLHS, ToRHS, ToCaseLoc, ToEllipsisLoc, ToColonLoc);
ToStmt->setSubStmt(ToSubStmt);
return ToStmt;
@@ -4441,8 +5049,7 @@ Stmt *ASTNodeImporter::VisitDefaultStmt(DefaultStmt *S) {
Stmt *ASTNodeImporter::VisitLabelStmt(LabelStmt *S) {
SourceLocation ToIdentLoc = Importer.Import(S->getIdentLoc());
- LabelDecl *ToLabelDecl =
- cast_or_null<LabelDecl>(Importer.Import(S->getDecl()));
+ auto *ToLabelDecl = cast_or_null<LabelDecl>(Importer.Import(S->getDecl()));
if (!ToLabelDecl && S->getDecl())
return nullptr;
Stmt *ToSubStmt = Importer.Import(S->getSubStmt());
@@ -4456,15 +5063,8 @@ Stmt *ASTNodeImporter::VisitAttributedStmt(AttributedStmt *S) {
SourceLocation ToAttrLoc = Importer.Import(S->getAttrLoc());
ArrayRef<const Attr*> FromAttrs(S->getAttrs());
SmallVector<const Attr *, 1> ToAttrs(FromAttrs.size());
- ASTContext &_ToContext = Importer.getToContext();
- std::transform(FromAttrs.begin(), FromAttrs.end(), ToAttrs.begin(),
- [&_ToContext](const Attr *A) -> const Attr * {
- return A->clone(_ToContext);
- });
- for (const Attr *ToA : ToAttrs) {
- if (!ToA)
- return nullptr;
- }
+ if (ImportContainerChecked(FromAttrs, ToAttrs))
+ return nullptr;
Stmt *ToSubStmt = Importer.Import(S->getSubStmt());
if (!ToSubStmt && S->getSubStmt())
return nullptr;
@@ -4516,7 +5116,7 @@ Stmt *ASTNodeImporter::VisitSwitchStmt(SwitchStmt *S) {
Expr *ToCondition = Importer.Import(S->getCond());
if (!ToCondition && S->getCond())
return nullptr;
- SwitchStmt *ToStmt = new (Importer.getToContext()) SwitchStmt(
+ auto *ToStmt = new (Importer.getToContext()) SwitchStmt(
Importer.getToContext(), ToInit,
ToConditionVariable, ToCondition);
Stmt *ToBody = Importer.Import(S->getBody());
@@ -4528,7 +5128,7 @@ Stmt *ASTNodeImporter::VisitSwitchStmt(SwitchStmt *S) {
SwitchCase *LastChainedSwitchCase = nullptr;
for (SwitchCase *SC = S->getSwitchCaseList(); SC != nullptr;
SC = SC->getNextSwitchCase()) {
- SwitchCase *ToSC = dyn_cast_or_null<SwitchCase>(Importer.Import(SC));
+ auto *ToSC = dyn_cast_or_null<SwitchCase>(Importer.Import(SC));
if (!ToSC)
return nullptr;
if (LastChainedSwitchCase)
@@ -4645,8 +5245,8 @@ Stmt *ASTNodeImporter::VisitReturnStmt(ReturnStmt *S) {
Expr *ToRetExpr = Importer.Import(S->getRetValue());
if (!ToRetExpr && S->getRetValue())
return nullptr;
- VarDecl *NRVOCandidate = const_cast<VarDecl*>(S->getNRVOCandidate());
- VarDecl *ToNRVOCandidate = cast_or_null<VarDecl>(Importer.Import(NRVOCandidate));
+ auto *NRVOCandidate = const_cast<VarDecl *>(S->getNRVOCandidate());
+ auto *ToNRVOCandidate = cast_or_null<VarDecl>(Importer.Import(NRVOCandidate));
if (!ToNRVOCandidate && NRVOCandidate)
return nullptr;
return new (Importer.getToContext()) ReturnStmt(ToRetLoc, ToRetExpr,
@@ -4688,15 +5288,15 @@ Stmt *ASTNodeImporter::VisitCXXTryStmt(CXXTryStmt *S) {
}
Stmt *ASTNodeImporter::VisitCXXForRangeStmt(CXXForRangeStmt *S) {
- DeclStmt *ToRange =
+ auto *ToRange =
dyn_cast_or_null<DeclStmt>(Importer.Import(S->getRangeStmt()));
if (!ToRange && S->getRangeStmt())
return nullptr;
- DeclStmt *ToBegin =
+ auto *ToBegin =
dyn_cast_or_null<DeclStmt>(Importer.Import(S->getBeginStmt()));
if (!ToBegin && S->getBeginStmt())
return nullptr;
- DeclStmt *ToEnd =
+ auto *ToEnd =
dyn_cast_or_null<DeclStmt>(Importer.Import(S->getEndStmt()));
if (!ToEnd && S->getEndStmt())
return nullptr;
@@ -4706,7 +5306,7 @@ Stmt *ASTNodeImporter::VisitCXXForRangeStmt(CXXForRangeStmt *S) {
Expr *ToInc = Importer.Import(S->getInc());
if (!ToInc && S->getInc())
return nullptr;
- DeclStmt *ToLoopVar =
+ auto *ToLoopVar =
dyn_cast_or_null<DeclStmt>(Importer.Import(S->getLoopVarStmt()));
if (!ToLoopVar && S->getLoopVarStmt())
return nullptr;
@@ -4851,7 +5451,6 @@ Expr *ASTNodeImporter::VisitVAArgExpr(VAArgExpr *E) {
Importer.Import(E->getRParenLoc()), T, E->isMicrosoftABI());
}
-
Expr *ASTNodeImporter::VisitGNUNullExpr(GNUNullExpr *E) {
QualType T = Importer.Import(E->getType());
if (T.isNull())
@@ -4866,8 +5465,7 @@ Expr *ASTNodeImporter::VisitPredefinedExpr(PredefinedExpr *E) {
if (T.isNull())
return nullptr;
- StringLiteral *SL = cast_or_null<StringLiteral>(
- Importer.Import(E->getFunctionName()));
+ auto *SL = cast_or_null<StringLiteral>(Importer.Import(E->getFunctionName()));
if (!SL && E->getFunctionName())
return nullptr;
@@ -4876,7 +5474,7 @@ Expr *ASTNodeImporter::VisitPredefinedExpr(PredefinedExpr *E) {
}
Expr *ASTNodeImporter::VisitDeclRefExpr(DeclRefExpr *E) {
- ValueDecl *ToD = cast_or_null<ValueDecl>(Importer.Import(E->getDecl()));
+ auto *ToD = cast_or_null<ValueDecl>(Importer.Import(E->getDecl()));
if (!ToD)
return nullptr;
@@ -4891,16 +5489,11 @@ Expr *ASTNodeImporter::VisitDeclRefExpr(DeclRefExpr *E) {
if (T.isNull())
return nullptr;
-
TemplateArgumentListInfo ToTAInfo;
TemplateArgumentListInfo *ResInfo = nullptr;
if (E->hasExplicitTemplateArgs()) {
- for (const auto &FromLoc : E->template_arguments()) {
- if (auto ToTALoc = ImportTemplateArgumentLoc(FromLoc))
- ToTAInfo.addArgument(*ToTALoc);
- else
- return nullptr;
- }
+ if (ImportTemplateArgumentListInfo(E->template_arguments(), ToTAInfo))
+ return nullptr;
ResInfo = &ToTAInfo;
}
@@ -4947,14 +5540,14 @@ ASTNodeImporter::ImportDesignator(const Designator &D) {
Expr *ASTNodeImporter::VisitDesignatedInitExpr(DesignatedInitExpr *DIE) {
- Expr *Init = cast_or_null<Expr>(Importer.Import(DIE->getInit()));
+ auto *Init = cast_or_null<Expr>(Importer.Import(DIE->getInit()));
if (!Init)
return nullptr;
SmallVector<Expr *, 4> IndexExprs(DIE->getNumSubExprs() - 1);
// List elements from the second, the first is Init itself
for (unsigned I = 1, E = DIE->getNumSubExprs(); I < E; I++) {
- if (Expr *Arg = cast_or_null<Expr>(Importer.Import(DIE->getSubExpr(I))))
+ if (auto *Arg = cast_or_null<Expr>(Importer.Import(DIE->getSubExpr(I))))
IndexExprs[I - 1] = Arg;
else
return nullptr;
@@ -4966,7 +5559,7 @@ Expr *ASTNodeImporter::VisitDesignatedInitExpr(DesignatedInitExpr *DIE) {
return ImportDesignator(D);
});
- for (const Designator &D : DIE->designators())
+ for (const auto &D : DIE->designators())
if (D.isFieldDesignator() && !D.getFieldName())
return nullptr;
@@ -5067,7 +5660,7 @@ Expr *ASTNodeImporter::VisitAddrLabelExpr(AddrLabelExpr *E) {
if (T.isNull())
return nullptr;
- LabelDecl *ToLabel = cast_or_null<LabelDecl>(Importer.Import(E->getLabel()));
+ auto *ToLabel = cast_or_null<LabelDecl>(Importer.Import(E->getLabel()));
if (!ToLabel)
return nullptr;
@@ -5102,8 +5695,8 @@ Expr *ASTNodeImporter::VisitStmtExpr(StmtExpr *E) {
if (T.isNull())
return nullptr;
- CompoundStmt *ToSubStmt = cast_or_null<CompoundStmt>(
- Importer.Import(E->getSubStmt()));
+ auto *ToSubStmt = cast_or_null<CompoundStmt>(
+ Importer.Import(E->getSubStmt()));
if (!ToSubStmt && E->getSubStmt())
return nullptr;
@@ -5120,14 +5713,13 @@ Expr *ASTNodeImporter::VisitUnaryOperator(UnaryOperator *E) {
if (!SubExpr)
return nullptr;
- return new (Importer.getToContext()) UnaryOperator(SubExpr, E->getOpcode(),
- T, E->getValueKind(),
- E->getObjectKind(),
- Importer.Import(E->getOperatorLoc()));
+ return new (Importer.getToContext()) UnaryOperator(
+ SubExpr, E->getOpcode(), T, E->getValueKind(), E->getObjectKind(),
+ Importer.Import(E->getOperatorLoc()), E->canOverflow());
}
-Expr *ASTNodeImporter::VisitUnaryExprOrTypeTraitExpr(
- UnaryExprOrTypeTraitExpr *E) {
+Expr *
+ASTNodeImporter::VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *E) {
QualType ResultType = Importer.Import(E->getType());
if (E->isArgumentType()) {
@@ -5208,8 +5800,8 @@ Expr *ASTNodeImporter::VisitBinaryConditionalOperator(
if (!Cond)
return nullptr;
- OpaqueValueExpr *OpaqueValue = cast_or_null<OpaqueValueExpr>(
- Importer.Import(E->getOpaqueValue()));
+ auto *OpaqueValue = cast_or_null<OpaqueValueExpr>(
+ Importer.Import(E->getOpaqueValue()));
if (!OpaqueValue)
return nullptr;
@@ -5367,7 +5959,7 @@ Expr *ASTNodeImporter::VisitExplicitCastExpr(ExplicitCastExpr *E) {
switch (E->getStmtClass()) {
case Stmt::CStyleCastExprClass: {
- CStyleCastExpr *CCE = cast<CStyleCastExpr>(E);
+ auto *CCE = cast<CStyleCastExpr>(E);
return CStyleCastExpr::Create(Importer.getToContext(), T,
E->getValueKind(), E->getCastKind(),
SubExpr, &BasePath, TInfo,
@@ -5376,7 +5968,7 @@ Expr *ASTNodeImporter::VisitExplicitCastExpr(ExplicitCastExpr *E) {
}
case Stmt::CXXFunctionalCastExprClass: {
- CXXFunctionalCastExpr *FCE = cast<CXXFunctionalCastExpr>(E);
+ auto *FCE = cast<CXXFunctionalCastExpr>(E);
return CXXFunctionalCastExpr::Create(Importer.getToContext(), T,
E->getValueKind(), TInfo,
E->getCastKind(), SubExpr, &BasePath,
@@ -5385,7 +5977,7 @@ Expr *ASTNodeImporter::VisitExplicitCastExpr(ExplicitCastExpr *E) {
}
case Stmt::ObjCBridgedCastExprClass: {
- ObjCBridgedCastExpr *OCE = cast<ObjCBridgedCastExpr>(E);
+ auto *OCE = cast<ObjCBridgedCastExpr>(E);
return new (Importer.getToContext()) ObjCBridgedCastExpr(
Importer.Import(OCE->getLParenLoc()), OCE->getBridgeKind(),
E->getCastKind(), Importer.Import(OCE->getBridgeKeywordLoc()),
@@ -5395,7 +5987,7 @@ Expr *ASTNodeImporter::VisitExplicitCastExpr(ExplicitCastExpr *E) {
break; // just fall through
}
- CXXNamedCastExpr *Named = cast<CXXNamedCastExpr>(E);
+ auto *Named = cast<CXXNamedCastExpr>(E);
SourceLocation ExprLoc = Importer.Import(Named->getOperatorLoc()),
RParenLoc = Importer.Import(Named->getRParenLoc());
SourceRange Brackets = Importer.Import(Named->getAngleBrackets());
@@ -5453,7 +6045,7 @@ Expr *ASTNodeImporter::VisitOffsetOfExpr(OffsetOfExpr *OE) {
break;
}
case OffsetOfNode::Field: {
- FieldDecl *FD = cast_or_null<FieldDecl>(Importer.Import(Node.getField()));
+ auto *FD = cast_or_null<FieldDecl>(Importer.Import(Node.getField()));
if (!FD)
return nullptr;
Nodes.push_back(OffsetOfNode(Importer.Import(Node.getLocStart()), FD,
@@ -5524,8 +6116,7 @@ Expr *ASTNodeImporter::VisitCXXThrowExpr(CXXThrowExpr *E) {
}
Expr *ASTNodeImporter::VisitCXXDefaultArgExpr(CXXDefaultArgExpr *E) {
- ParmVarDecl *Param = cast_or_null<ParmVarDecl>(
- Importer.Import(E->getParam()));
+ auto *Param = cast_or_null<ParmVarDecl>(Importer.Import(E->getParam()));
if (!Param)
return nullptr;
@@ -5567,6 +6158,10 @@ Expr *ASTNodeImporter::VisitCXXTemporaryObjectExpr(CXXTemporaryObjectExpr *CE) {
if (T.isNull())
return nullptr;
+ TypeSourceInfo *TInfo = Importer.Import(CE->getTypeSourceInfo());
+ if (!TInfo)
+ return nullptr;
+
SmallVector<Expr *, 8> Args(CE->getNumArgs());
if (ImportContainerChecked(CE->arguments(), Args))
return nullptr;
@@ -5576,18 +6171,11 @@ Expr *ASTNodeImporter::VisitCXXTemporaryObjectExpr(CXXTemporaryObjectExpr *CE) {
if (!Ctor)
return nullptr;
- return CXXTemporaryObjectExpr::Create(
- Importer.getToContext(), T,
- Importer.Import(CE->getLocStart()),
- Ctor,
- CE->isElidable(),
- Args,
- CE->hadMultipleCandidates(),
- CE->isListInitialization(),
- CE->isStdInitListInitialization(),
- CE->requiresZeroInitialization(),
- CE->getConstructionKind(),
- Importer.Import(CE->getParenOrBraceRange()));
+ return new (Importer.getToContext()) CXXTemporaryObjectExpr(
+ Importer.getToContext(), Ctor, T, TInfo, Args,
+ Importer.Import(CE->getParenOrBraceRange()), CE->hadMultipleCandidates(),
+ CE->isListInitialization(), CE->isStdInitListInitialization(),
+ CE->requiresZeroInitialization());
}
Expr *
@@ -5600,7 +6188,7 @@ ASTNodeImporter::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E) {
if (!TempE)
return nullptr;
- ValueDecl *ExtendedBy = cast_or_null<ValueDecl>(
+ auto *ExtendedBy = cast_or_null<ValueDecl>(
Importer.Import(const_cast<ValueDecl *>(E->getExtendingDecl())));
if (!ExtendedBy && E->getExtendingDecl())
return nullptr;
@@ -5627,6 +6215,30 @@ Expr *ASTNodeImporter::VisitPackExpansionExpr(PackExpansionExpr *E) {
E->getNumExpansions());
}
+Expr *ASTNodeImporter::VisitSizeOfPackExpr(SizeOfPackExpr *E) {
+ auto *Pack = cast_or_null<NamedDecl>(Importer.Import(E->getPack()));
+ if (!Pack)
+ return nullptr;
+
+ Optional<unsigned> Length;
+
+ if (!E->isValueDependent())
+ Length = E->getPackLength();
+
+ SmallVector<TemplateArgument, 8> PartialArguments;
+ if (E->isPartiallySubstituted()) {
+ if (ImportTemplateArguments(E->getPartialArguments().data(),
+ E->getPartialArguments().size(),
+ PartialArguments))
+ return nullptr;
+ }
+
+ return SizeOfPackExpr::Create(
+ Importer.getToContext(), Importer.Import(E->getOperatorLoc()), Pack,
+ Importer.Import(E->getPackLoc()), Importer.Import(E->getRParenLoc()),
+ Length, PartialArguments);
+}
+
Expr *ASTNodeImporter::VisitCXXNewExpr(CXXNewExpr *CE) {
QualType T = Importer.Import(CE->getType());
if (T.isNull())
@@ -5636,12 +6248,12 @@ Expr *ASTNodeImporter::VisitCXXNewExpr(CXXNewExpr *CE) {
if (ImportContainerChecked(CE->placement_arguments(), PlacementArgs))
return nullptr;
- FunctionDecl *OperatorNewDecl = cast_or_null<FunctionDecl>(
+ auto *OperatorNewDecl = cast_or_null<FunctionDecl>(
Importer.Import(CE->getOperatorNew()));
if (!OperatorNewDecl && CE->getOperatorNew())
return nullptr;
- FunctionDecl *OperatorDeleteDecl = cast_or_null<FunctionDecl>(
+ auto *OperatorDeleteDecl = cast_or_null<FunctionDecl>(
Importer.Import(CE->getOperatorDelete()));
if (!OperatorDeleteDecl && CE->getOperatorDelete())
return nullptr;
@@ -5676,7 +6288,7 @@ Expr *ASTNodeImporter::VisitCXXDeleteExpr(CXXDeleteExpr *E) {
if (T.isNull())
return nullptr;
- FunctionDecl *OperatorDeleteDecl = cast_or_null<FunctionDecl>(
+ auto *OperatorDeleteDecl = cast_or_null<FunctionDecl>(
Importer.Import(E->getOperatorDelete()));
if (!OperatorDeleteDecl && E->getOperatorDelete())
return nullptr;
@@ -5700,7 +6312,7 @@ Expr *ASTNodeImporter::VisitCXXConstructExpr(CXXConstructExpr *E) {
if (T.isNull())
return nullptr;
- CXXConstructorDecl *ToCCD =
+ auto *ToCCD =
dyn_cast_or_null<CXXConstructorDecl>(Importer.Import(E->getConstructor()));
if (!ToCCD)
return nullptr;
@@ -5784,13 +6396,17 @@ Expr *ASTNodeImporter::VisitMemberExpr(MemberExpr *E) {
if (!ToBase && E->getBase())
return nullptr;
- ValueDecl *ToMember = dyn_cast<ValueDecl>(Importer.Import(E->getMemberDecl()));
+ auto *ToMember = dyn_cast<ValueDecl>(Importer.Import(E->getMemberDecl()));
if (!ToMember && E->getMemberDecl())
return nullptr;
- DeclAccessPair ToFoundDecl = DeclAccessPair::make(
- dyn_cast<NamedDecl>(Importer.Import(E->getFoundDecl().getDecl())),
- E->getFoundDecl().getAccess());
+ auto *ToDecl =
+ dyn_cast_or_null<NamedDecl>(Importer.Import(E->getFoundDecl().getDecl()));
+ if (!ToDecl && E->getFoundDecl().getDecl())
+ return nullptr;
+
+ DeclAccessPair ToFoundDecl =
+ DeclAccessPair::make(ToDecl, E->getFoundDecl().getAccess());
DeclarationNameInfo ToMemberNameInfo(
Importer.Import(E->getMemberNameInfo().getName()),
@@ -5812,7 +6428,6 @@ Expr *ASTNodeImporter::VisitMemberExpr(MemberExpr *E) {
Expr *ASTNodeImporter::VisitCXXPseudoDestructorExpr(
CXXPseudoDestructorExpr *E) {
-
Expr *BaseE = Importer.Import(E->getBase());
if (!BaseE)
return nullptr;
@@ -5856,11 +6471,10 @@ Expr *ASTNodeImporter::VisitCXXDependentScopeMemberExpr(
if (BaseType.isNull())
return nullptr;
- TemplateArgumentListInfo ToTAInfo(Importer.Import(E->getLAngleLoc()),
- Importer.Import(E->getRAngleLoc()));
- TemplateArgumentListInfo *ResInfo = nullptr;
+ TemplateArgumentListInfo ToTAInfo, *ResInfo = nullptr;
if (E->hasExplicitTemplateArgs()) {
- if (ImportTemplateArgumentListInfo(E->template_arguments(), ToTAInfo))
+ if (ImportTemplateArgumentListInfo(E->getLAngleLoc(), E->getRAngleLoc(),
+ E->template_arguments(), ToTAInfo))
return nullptr;
ResInfo = &ToTAInfo;
}
@@ -5884,6 +6498,127 @@ Expr *ASTNodeImporter::VisitCXXDependentScopeMemberExpr(
cast_or_null<NamedDecl>(ToFQ), MemberNameInfo, ResInfo);
}
+Expr *
+ASTNodeImporter::VisitDependentScopeDeclRefExpr(DependentScopeDeclRefExpr *E) {
+ DeclarationName Name = Importer.Import(E->getDeclName());
+ if (!E->getDeclName().isEmpty() && Name.isEmpty())
+ return nullptr;
+
+ DeclarationNameInfo NameInfo(Name, Importer.Import(E->getExprLoc()));
+ ImportDeclarationNameLoc(E->getNameInfo(), NameInfo);
+
+ TemplateArgumentListInfo ToTAInfo(Importer.Import(E->getLAngleLoc()),
+ Importer.Import(E->getRAngleLoc()));
+ TemplateArgumentListInfo *ResInfo = nullptr;
+ if (E->hasExplicitTemplateArgs()) {
+ if (ImportTemplateArgumentListInfo(E->template_arguments(), ToTAInfo))
+ return nullptr;
+ ResInfo = &ToTAInfo;
+ }
+
+ return DependentScopeDeclRefExpr::Create(
+ Importer.getToContext(), Importer.Import(E->getQualifierLoc()),
+ Importer.Import(E->getTemplateKeywordLoc()), NameInfo, ResInfo);
+}
+
+Expr *ASTNodeImporter::VisitCXXUnresolvedConstructExpr(
+ CXXUnresolvedConstructExpr *CE) {
+ unsigned NumArgs = CE->arg_size();
+
+ SmallVector<Expr *, 8> ToArgs(NumArgs);
+ if (ImportArrayChecked(CE->arg_begin(), CE->arg_end(), ToArgs.begin()))
+ return nullptr;
+
+ return CXXUnresolvedConstructExpr::Create(
+ Importer.getToContext(), Importer.Import(CE->getTypeSourceInfo()),
+ Importer.Import(CE->getLParenLoc()), llvm::makeArrayRef(ToArgs),
+ Importer.Import(CE->getRParenLoc()));
+}
+
+Expr *ASTNodeImporter::VisitUnresolvedLookupExpr(UnresolvedLookupExpr *E) {
+ auto *NamingClass =
+ cast_or_null<CXXRecordDecl>(Importer.Import(E->getNamingClass()));
+ if (E->getNamingClass() && !NamingClass)
+ return nullptr;
+
+ DeclarationName Name = Importer.Import(E->getName());
+ if (E->getName() && !Name)
+ return nullptr;
+
+ DeclarationNameInfo NameInfo(Name, Importer.Import(E->getNameLoc()));
+ // Import additional name location/type info.
+ ImportDeclarationNameLoc(E->getNameInfo(), NameInfo);
+
+ UnresolvedSet<8> ToDecls;
+ for (auto *D : E->decls()) {
+ if (auto *To = cast_or_null<NamedDecl>(Importer.Import(D)))
+ ToDecls.addDecl(To);
+ else
+ return nullptr;
+ }
+
+ TemplateArgumentListInfo ToTAInfo, *ResInfo = nullptr;
+ if (E->hasExplicitTemplateArgs()) {
+ if (ImportTemplateArgumentListInfo(E->getLAngleLoc(), E->getRAngleLoc(),
+ E->template_arguments(), ToTAInfo))
+ return nullptr;
+ ResInfo = &ToTAInfo;
+ }
+
+ if (ResInfo || E->getTemplateKeywordLoc().isValid())
+ return UnresolvedLookupExpr::Create(
+ Importer.getToContext(), NamingClass,
+ Importer.Import(E->getQualifierLoc()),
+ Importer.Import(E->getTemplateKeywordLoc()), NameInfo, E->requiresADL(),
+ ResInfo, ToDecls.begin(), ToDecls.end());
+
+ return UnresolvedLookupExpr::Create(
+ Importer.getToContext(), NamingClass,
+ Importer.Import(E->getQualifierLoc()), NameInfo, E->requiresADL(),
+ E->isOverloaded(), ToDecls.begin(), ToDecls.end());
+}
+
+Expr *ASTNodeImporter::VisitUnresolvedMemberExpr(UnresolvedMemberExpr *E) {
+ DeclarationName Name = Importer.Import(E->getName());
+ if (!E->getName().isEmpty() && Name.isEmpty())
+ return nullptr;
+ DeclarationNameInfo NameInfo(Name, Importer.Import(E->getNameLoc()));
+ // Import additional name location/type info.
+ ImportDeclarationNameLoc(E->getNameInfo(), NameInfo);
+
+ QualType BaseType = Importer.Import(E->getType());
+ if (!E->getType().isNull() && BaseType.isNull())
+ return nullptr;
+
+ UnresolvedSet<8> ToDecls;
+ for (Decl *D : E->decls()) {
+ if (NamedDecl *To = cast_or_null<NamedDecl>(Importer.Import(D)))
+ ToDecls.addDecl(To);
+ else
+ return nullptr;
+ }
+
+ TemplateArgumentListInfo ToTAInfo;
+ TemplateArgumentListInfo *ResInfo = nullptr;
+ if (E->hasExplicitTemplateArgs()) {
+ if (ImportTemplateArgumentListInfo(E->template_arguments(), ToTAInfo))
+ return nullptr;
+ ResInfo = &ToTAInfo;
+ }
+
+ Expr *BaseE = E->isImplicitAccess() ? nullptr : Importer.Import(E->getBase());
+ if (!BaseE && !E->isImplicitAccess() && E->getBase()) {
+ return nullptr;
+ }
+
+ return UnresolvedMemberExpr::Create(
+ Importer.getToContext(), E->hasUnresolvedUsing(), BaseE, BaseType,
+ E->isArrow(), Importer.Import(E->getOperatorLoc()),
+ Importer.Import(E->getQualifierLoc()),
+ Importer.Import(E->getTemplateKeywordLoc()), NameInfo, ResInfo,
+ ToDecls.begin(), ToDecls.end());
+}
+
Expr *ASTNodeImporter::VisitCallExpr(CallExpr *E) {
QualType T = Importer.Import(E->getType());
if (T.isNull())
@@ -5894,35 +6629,100 @@ Expr *ASTNodeImporter::VisitCallExpr(CallExpr *E) {
return nullptr;
unsigned NumArgs = E->getNumArgs();
+ SmallVector<Expr *, 2> ToArgs(NumArgs);
+ if (ImportContainerChecked(E->arguments(), ToArgs))
+ return nullptr;
- llvm::SmallVector<Expr *, 2> ToArgs(NumArgs);
-
- for (unsigned ai = 0, ae = NumArgs; ai != ae; ++ai) {
- Expr *FromArg = E->getArg(ai);
- Expr *ToArg = Importer.Import(FromArg);
- if (!ToArg)
- return nullptr;
- ToArgs[ai] = ToArg;
- }
-
- Expr **ToArgs_Copied = new (Importer.getToContext())
- Expr*[NumArgs];
+ auto **ToArgs_Copied = new (Importer.getToContext()) Expr*[NumArgs];
for (unsigned ai = 0, ae = NumArgs; ai != ae; ++ai)
ToArgs_Copied[ai] = ToArgs[ai];
+ if (const auto *OCE = dyn_cast<CXXOperatorCallExpr>(E)) {
+ return new (Importer.getToContext()) CXXOperatorCallExpr(
+ Importer.getToContext(), OCE->getOperator(), ToCallee, ToArgs, T,
+ OCE->getValueKind(), Importer.Import(OCE->getRParenLoc()),
+ OCE->getFPFeatures());
+ }
+
return new (Importer.getToContext())
CallExpr(Importer.getToContext(), ToCallee,
llvm::makeArrayRef(ToArgs_Copied, NumArgs), T, E->getValueKind(),
Importer.Import(E->getRParenLoc()));
}
+Optional<LambdaCapture>
+ASTNodeImporter::ImportLambdaCapture(const LambdaCapture &From) {
+ VarDecl *Var = nullptr;
+ if (From.capturesVariable()) {
+ Var = cast_or_null<VarDecl>(Importer.Import(From.getCapturedVar()));
+ if (!Var)
+ return None;
+ }
+
+ return LambdaCapture(Importer.Import(From.getLocation()), From.isImplicit(),
+ From.getCaptureKind(), Var,
+ From.isPackExpansion()
+ ? Importer.Import(From.getEllipsisLoc())
+ : SourceLocation());
+}
+
+Expr *ASTNodeImporter::VisitLambdaExpr(LambdaExpr *LE) {
+ CXXRecordDecl *FromClass = LE->getLambdaClass();
+ auto *ToClass = dyn_cast_or_null<CXXRecordDecl>(Importer.Import(FromClass));
+ if (!ToClass)
+ return nullptr;
+
+ // NOTE: lambda classes are created with BeingDefined flag set up.
+ // It means that ImportDefinition doesn't work for them and we should fill it
+ // manually.
+ if (ToClass->isBeingDefined()) {
+ for (auto FromField : FromClass->fields()) {
+ auto *ToField = cast_or_null<FieldDecl>(Importer.Import(FromField));
+ if (!ToField)
+ return nullptr;
+ }
+ }
+
+ auto *ToCallOp = dyn_cast_or_null<CXXMethodDecl>(
+ Importer.Import(LE->getCallOperator()));
+ if (!ToCallOp)
+ return nullptr;
+
+ ToClass->completeDefinition();
+
+ unsigned NumCaptures = LE->capture_size();
+ SmallVector<LambdaCapture, 8> Captures;
+ Captures.reserve(NumCaptures);
+ for (const auto &FromCapture : LE->captures()) {
+ if (auto ToCapture = ImportLambdaCapture(FromCapture))
+ Captures.push_back(*ToCapture);
+ else
+ return nullptr;
+ }
+
+ SmallVector<Expr *, 8> InitCaptures(NumCaptures);
+ if (ImportContainerChecked(LE->capture_inits(), InitCaptures))
+ return nullptr;
+
+ return LambdaExpr::Create(Importer.getToContext(), ToClass,
+ Importer.Import(LE->getIntroducerRange()),
+ LE->getCaptureDefault(),
+ Importer.Import(LE->getCaptureDefaultLoc()),
+ Captures,
+ LE->hasExplicitParameters(),
+ LE->hasExplicitResultType(),
+ InitCaptures,
+ Importer.Import(LE->getLocEnd()),
+ LE->containsUnexpandedParameterPack());
+}
+
Expr *ASTNodeImporter::VisitInitListExpr(InitListExpr *ILE) {
QualType T = Importer.Import(ILE->getType());
if (T.isNull())
return nullptr;
- llvm::SmallVector<Expr *, 4> Exprs(ILE->getNumInits());
+ SmallVector<Expr *, 4> Exprs(ILE->getNumInits());
if (ImportContainerChecked(ILE->inits(), Exprs))
return nullptr;
@@ -5940,15 +6740,14 @@ Expr *ASTNodeImporter::VisitInitListExpr(InitListExpr *ILE) {
}
if (FieldDecl *FromFD = ILE->getInitializedFieldInUnion()) {
- FieldDecl *ToFD = cast_or_null<FieldDecl>(Importer.Import(FromFD));
+ auto *ToFD = cast_or_null<FieldDecl>(Importer.Import(FromFD));
if (!ToFD)
return nullptr;
To->setInitializedFieldInUnion(ToFD);
}
if (InitListExpr *SyntForm = ILE->getSyntacticForm()) {
- InitListExpr *ToSyntForm = cast_or_null<InitListExpr>(
- Importer.Import(SyntForm));
+ auto *ToSyntForm = cast_or_null<InitListExpr>(Importer.Import(SyntForm));
if (!ToSyntForm)
return nullptr;
To->setSyntacticForm(ToSyntForm);
@@ -5961,6 +6760,35 @@ Expr *ASTNodeImporter::VisitInitListExpr(InitListExpr *ILE) {
return To;
}
+Expr *ASTNodeImporter::VisitCXXStdInitializerListExpr(
+ CXXStdInitializerListExpr *E) {
+ QualType T = Importer.Import(E->getType());
+ if (T.isNull())
+ return nullptr;
+
+ Expr *SE = Importer.Import(E->getSubExpr());
+ if (!SE)
+ return nullptr;
+
+ return new (Importer.getToContext()) CXXStdInitializerListExpr(T, SE);
+}
+
+Expr *ASTNodeImporter::VisitCXXInheritedCtorInitExpr(
+ CXXInheritedCtorInitExpr *E) {
+ QualType T = Importer.Import(E->getType());
+ if (T.isNull())
+ return nullptr;
+
+ auto *Ctor = cast_or_null<CXXConstructorDecl>(Importer.Import(
+ E->getConstructor()));
+ if (!Ctor)
+ return nullptr;
+
+ return new (Importer.getToContext()) CXXInheritedCtorInitExpr(
+ Importer.Import(E->getLocation()), T, Ctor,
+ E->constructsVBase(), E->inheritedFromVBase());
+}
+
Expr *ASTNodeImporter::VisitArrayInitLoopExpr(ArrayInitLoopExpr *E) {
QualType ToType = Importer.Import(E->getType());
if (ToType.isNull())
@@ -5986,8 +6814,7 @@ Expr *ASTNodeImporter::VisitArrayInitIndexExpr(ArrayInitIndexExpr *E) {
}
Expr *ASTNodeImporter::VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
- FieldDecl *ToField = llvm::dyn_cast_or_null<FieldDecl>(
- Importer.Import(DIE->getField()));
+ auto *ToField = dyn_cast_or_null<FieldDecl>(Importer.Import(DIE->getField()));
if (!ToField && DIE->getField())
return nullptr;
@@ -6029,14 +6856,13 @@ Expr *ASTNodeImporter::VisitCXXNamedCastExpr(CXXNamedCastExpr *E) {
}
}
-
Expr *ASTNodeImporter::VisitSubstNonTypeTemplateParmExpr(
SubstNonTypeTemplateParmExpr *E) {
QualType T = Importer.Import(E->getType());
if (T.isNull())
return nullptr;
- NonTypeTemplateParmDecl *Param = cast_or_null<NonTypeTemplateParmDecl>(
+ auto *Param = cast_or_null<NonTypeTemplateParmDecl>(
Importer.Import(E->getParameter()));
if (!Param)
return nullptr;
@@ -6070,6 +6896,28 @@ Expr *ASTNodeImporter::VisitTypeTraitExpr(TypeTraitExpr *E) {
E->getTrait(), ToArgs, Importer.Import(E->getLocEnd()), ToValue);
}
+Expr *ASTNodeImporter::VisitCXXTypeidExpr(CXXTypeidExpr *E) {
+ QualType ToType = Importer.Import(E->getType());
+ if (ToType.isNull())
+ return nullptr;
+
+ if (E->isTypeOperand()) {
+ TypeSourceInfo *TSI = Importer.Import(E->getTypeOperandSourceInfo());
+ if (!TSI)
+ return nullptr;
+
+ return new (Importer.getToContext())
+ CXXTypeidExpr(ToType, TSI, Importer.Import(E->getSourceRange()));
+ }
+
+ Expr *Op = Importer.Import(E->getExprOperand());
+ if (!Op)
+ return nullptr;
+
+ return new (Importer.getToContext())
+ CXXTypeidExpr(ToType, Op, Importer.Import(E->getSourceRange()));
+}
+
void ASTNodeImporter::ImportOverrides(CXXMethodDecl *ToMethod,
CXXMethodDecl *FromMethod) {
for (auto *FromOverriddenMethod : FromMethod->overridden_methods())
@@ -6081,19 +6929,18 @@ void ASTNodeImporter::ImportOverrides(CXXMethodDecl *ToMethod,
ASTImporter::ASTImporter(ASTContext &ToContext, FileManager &ToFileManager,
ASTContext &FromContext, FileManager &FromFileManager,
bool MinimalImport)
- : ToContext(ToContext), FromContext(FromContext),
- ToFileManager(ToFileManager), FromFileManager(FromFileManager),
- Minimal(MinimalImport), LastDiagFromFrom(false)
-{
+ : ToContext(ToContext), FromContext(FromContext),
+ ToFileManager(ToFileManager), FromFileManager(FromFileManager),
+ Minimal(MinimalImport) {
ImportedDecls[FromContext.getTranslationUnitDecl()]
= ToContext.getTranslationUnitDecl();
}
-ASTImporter::~ASTImporter() { }
+ASTImporter::~ASTImporter() = default;
QualType ASTImporter::Import(QualType FromT) {
if (FromT.isNull())
- return QualType();
+ return {};
const Type *fromTy = FromT.getTypePtr();
@@ -6129,10 +6976,17 @@ TypeSourceInfo *ASTImporter::Import(TypeSourceInfo *FromTSI) {
Import(FromTSI->getTypeLoc().getLocStart()));
}
+Attr *ASTImporter::Import(const Attr *FromAttr) {
+ Attr *ToAttr = FromAttr->clone(ToContext);
+ ToAttr->setRange(Import(FromAttr->getRange()));
+ return ToAttr;
+}
+
Decl *ASTImporter::GetAlreadyImportedOrNull(Decl *FromD) {
llvm::DenseMap<Decl *, Decl *>::iterator Pos = ImportedDecls.find(FromD);
if (Pos != ImportedDecls.end()) {
Decl *ToD = Pos->second;
+ // FIXME: move this call to ImportDeclParts().
ASTNodeImporter(*this).ImportDefinitionIfNeeded(FromD, ToD);
return ToD;
} else {
@@ -6146,44 +7000,22 @@ Decl *ASTImporter::Import(Decl *FromD) {
ASTNodeImporter Importer(*this);
- // Check whether we've already imported this declaration.
- llvm::DenseMap<Decl *, Decl *>::iterator Pos = ImportedDecls.find(FromD);
- if (Pos != ImportedDecls.end()) {
- Decl *ToD = Pos->second;
- Importer.ImportDefinitionIfNeeded(FromD, ToD);
+ // Check whether we've already imported this declaration.
+ Decl *ToD = GetAlreadyImportedOrNull(FromD);
+ if (ToD) {
+ // If FromD has some updated flags after last import, apply it
+ updateFlags(FromD, ToD);
return ToD;
}
-
- // Import the type
- Decl *ToD = Importer.Visit(FromD);
+
+ // Import the type.
+ ToD = Importer.Visit(FromD);
if (!ToD)
return nullptr;
- // Record the imported declaration.
- ImportedDecls[FromD] = ToD;
-
- if (TagDecl *FromTag = dyn_cast<TagDecl>(FromD)) {
- // Keep track of anonymous tags that have an associated typedef.
- if (FromTag->getTypedefNameForAnonDecl())
- AnonTagsWithPendingTypedefs.push_back(FromTag);
- } else if (TypedefNameDecl *FromTypedef = dyn_cast<TypedefNameDecl>(FromD)) {
- // When we've finished transforming a typedef, see whether it was the
- // typedef for an anonymous tag.
- for (SmallVectorImpl<TagDecl *>::iterator
- FromTag = AnonTagsWithPendingTypedefs.begin(),
- FromTagEnd = AnonTagsWithPendingTypedefs.end();
- FromTag != FromTagEnd; ++FromTag) {
- if ((*FromTag)->getTypedefNameForAnonDecl() == FromTypedef) {
- if (TagDecl *ToTag = cast_or_null<TagDecl>(Import(*FromTag))) {
- // We found the typedef for an anonymous tag; link them.
- ToTag->setTypedefNameForAnonDecl(cast<TypedefNameDecl>(ToD));
- AnonTagsWithPendingTypedefs.erase(FromTag);
- break;
- }
- }
- }
- }
-
+ // Notify subclasses.
+ Imported(FromD, ToD);
+
return ToD;
}
@@ -6191,14 +7023,14 @@ DeclContext *ASTImporter::ImportContext(DeclContext *FromDC) {
if (!FromDC)
return FromDC;
- DeclContext *ToDC = cast_or_null<DeclContext>(Import(cast<Decl>(FromDC)));
+ auto *ToDC = cast_or_null<DeclContext>(Import(cast<Decl>(FromDC)));
if (!ToDC)
return nullptr;
// When we're using a record/enum/Objective-C class/protocol as a context, we
// need it to have a definition.
- if (RecordDecl *ToRecord = dyn_cast<RecordDecl>(ToDC)) {
- RecordDecl *FromRecord = cast<RecordDecl>(FromDC);
+ if (auto *ToRecord = dyn_cast<RecordDecl>(ToDC)) {
+ auto *FromRecord = cast<RecordDecl>(FromDC);
if (ToRecord->isCompleteDefinition()) {
// Do nothing.
} else if (FromRecord->isCompleteDefinition()) {
@@ -6207,8 +7039,8 @@ DeclContext *ASTImporter::ImportContext(DeclContext *FromDC) {
} else {
CompleteDecl(ToRecord);
}
- } else if (EnumDecl *ToEnum = dyn_cast<EnumDecl>(ToDC)) {
- EnumDecl *FromEnum = cast<EnumDecl>(FromDC);
+ } else if (auto *ToEnum = dyn_cast<EnumDecl>(ToDC)) {
+ auto *FromEnum = cast<EnumDecl>(FromDC);
if (ToEnum->isCompleteDefinition()) {
// Do nothing.
} else if (FromEnum->isCompleteDefinition()) {
@@ -6217,8 +7049,8 @@ DeclContext *ASTImporter::ImportContext(DeclContext *FromDC) {
} else {
CompleteDecl(ToEnum);
}
- } else if (ObjCInterfaceDecl *ToClass = dyn_cast<ObjCInterfaceDecl>(ToDC)) {
- ObjCInterfaceDecl *FromClass = cast<ObjCInterfaceDecl>(FromDC);
+ } else if (auto *ToClass = dyn_cast<ObjCInterfaceDecl>(ToDC)) {
+ auto *FromClass = cast<ObjCInterfaceDecl>(FromDC);
if (ToClass->getDefinition()) {
// Do nothing.
} else if (ObjCInterfaceDecl *FromDef = FromClass->getDefinition()) {
@@ -6227,8 +7059,8 @@ DeclContext *ASTImporter::ImportContext(DeclContext *FromDC) {
} else {
CompleteDecl(ToClass);
}
- } else if (ObjCProtocolDecl *ToProto = dyn_cast<ObjCProtocolDecl>(ToDC)) {
- ObjCProtocolDecl *FromProto = cast<ObjCProtocolDecl>(FromDC);
+ } else if (auto *ToProto = dyn_cast<ObjCProtocolDecl>(ToDC)) {
+ auto *FromProto = cast<ObjCProtocolDecl>(FromDC);
if (ToProto->getDefinition()) {
// Do nothing.
} else if (ObjCProtocolDecl *FromDef = FromProto->getDefinition()) {
@@ -6283,14 +7115,14 @@ NestedNameSpecifier *ASTImporter::Import(NestedNameSpecifier *FromNNS) {
return nullptr;
case NestedNameSpecifier::Namespace:
- if (NamespaceDecl *NS =
- cast_or_null<NamespaceDecl>(Import(FromNNS->getAsNamespace()))) {
+ if (auto *NS =
+ cast_or_null<NamespaceDecl>(Import(FromNNS->getAsNamespace()))) {
return NestedNameSpecifier::Create(ToContext, prefix, NS);
}
return nullptr;
case NestedNameSpecifier::NamespaceAlias:
- if (NamespaceAliasDecl *NSAD =
+ if (auto *NSAD =
cast_or_null<NamespaceAliasDecl>(Import(FromNNS->getAsNamespaceAlias()))) {
return NestedNameSpecifier::Create(ToContext, prefix, NSAD);
}
@@ -6300,7 +7132,7 @@ NestedNameSpecifier *ASTImporter::Import(NestedNameSpecifier *FromNNS) {
return NestedNameSpecifier::GlobalSpecifier(ToContext);
case NestedNameSpecifier::Super:
- if (CXXRecordDecl *RD =
+ if (auto *RD =
cast_or_null<CXXRecordDecl>(Import(FromNNS->getAsRecordDecl()))) {
return NestedNameSpecifier::SuperSpecifier(ToContext, RD);
}
@@ -6396,22 +7228,20 @@ NestedNameSpecifierLoc ASTImporter::Import(NestedNameSpecifierLoc FromNNS) {
TemplateName ASTImporter::Import(TemplateName From) {
switch (From.getKind()) {
case TemplateName::Template:
- if (TemplateDecl *ToTemplate
- = cast_or_null<TemplateDecl>(Import(From.getAsTemplateDecl())))
+ if (auto *ToTemplate =
+ cast_or_null<TemplateDecl>(Import(From.getAsTemplateDecl())))
return TemplateName(ToTemplate);
- return TemplateName();
+ return {};
case TemplateName::OverloadedTemplate: {
OverloadedTemplateStorage *FromStorage = From.getAsOverloadedTemplate();
UnresolvedSet<2> ToTemplates;
- for (OverloadedTemplateStorage::iterator I = FromStorage->begin(),
- E = FromStorage->end();
- I != E; ++I) {
- if (NamedDecl *To = cast_or_null<NamedDecl>(Import(*I)))
+ for (auto *I : *FromStorage) {
+ if (auto *To = cast_or_null<NamedDecl>(Import(I)))
ToTemplates.addDecl(To);
else
- return TemplateName();
+ return {};
}
return ToContext.getOverloadedTemplateName(ToTemplates.begin(),
ToTemplates.end());
@@ -6421,22 +7251,22 @@ TemplateName ASTImporter::Import(TemplateName From) {
QualifiedTemplateName *QTN = From.getAsQualifiedTemplateName();
NestedNameSpecifier *Qualifier = Import(QTN->getQualifier());
if (!Qualifier)
- return TemplateName();
+ return {};
- if (TemplateDecl *ToTemplate
- = cast_or_null<TemplateDecl>(Import(From.getAsTemplateDecl())))
+ if (auto *ToTemplate =
+ cast_or_null<TemplateDecl>(Import(From.getAsTemplateDecl())))
return ToContext.getQualifiedTemplateName(Qualifier,
QTN->hasTemplateKeyword(),
ToTemplate);
-
- return TemplateName();
+
+ return {};
}
case TemplateName::DependentTemplate: {
DependentTemplateName *DTN = From.getAsDependentTemplateName();
NestedNameSpecifier *Qualifier = Import(DTN->getQualifier());
if (!Qualifier)
- return TemplateName();
+ return {};
if (DTN->isIdentifier()) {
return ToContext.getDependentTemplateName(Qualifier,
@@ -6449,13 +7279,14 @@ TemplateName ASTImporter::Import(TemplateName From) {
case TemplateName::SubstTemplateTemplateParm: {
SubstTemplateTemplateParmStorage *subst
= From.getAsSubstTemplateTemplateParm();
- TemplateTemplateParmDecl *param
- = cast_or_null<TemplateTemplateParmDecl>(Import(subst->getParameter()));
+ auto *param =
+ cast_or_null<TemplateTemplateParmDecl>(Import(subst->getParameter()));
if (!param)
- return TemplateName();
+ return {};
TemplateName replacement = Import(subst->getReplacement());
- if (replacement.isNull()) return TemplateName();
+ if (replacement.isNull())
+ return {};
return ToContext.getSubstTemplateTemplateParm(param, replacement);
}
@@ -6463,17 +7294,17 @@ TemplateName ASTImporter::Import(TemplateName From) {
case TemplateName::SubstTemplateTemplateParmPack: {
SubstTemplateTemplateParmPackStorage *SubstPack
= From.getAsSubstTemplateTemplateParmPack();
- TemplateTemplateParmDecl *Param
- = cast_or_null<TemplateTemplateParmDecl>(
- Import(SubstPack->getParameterPack()));
+ auto *Param =
+ cast_or_null<TemplateTemplateParmDecl>(
+ Import(SubstPack->getParameterPack()));
if (!Param)
- return TemplateName();
+ return {};
ASTNodeImporter Importer(*this);
TemplateArgument ArgPack
= Importer.ImportTemplateArgument(SubstPack->getArgumentPack());
if (ArgPack.isNull())
- return TemplateName();
+ return {};
return ToContext.getSubstTemplateTemplateParmPack(Param, ArgPack);
}
@@ -6484,22 +7315,16 @@ TemplateName ASTImporter::Import(TemplateName From) {
SourceLocation ASTImporter::Import(SourceLocation FromLoc) {
if (FromLoc.isInvalid())
- return SourceLocation();
+ return {};
SourceManager &FromSM = FromContext.getSourceManager();
-
- // For now, map everything down to its file location, so that we
- // don't have to import macro expansions.
- // FIXME: Import macro expansions!
- FromLoc = FromSM.getFileLoc(FromLoc);
+
std::pair<FileID, unsigned> Decomposed = FromSM.getDecomposedLoc(FromLoc);
- SourceManager &ToSM = ToContext.getSourceManager();
FileID ToFileID = Import(Decomposed.first);
if (ToFileID.isInvalid())
- return SourceLocation();
- SourceLocation ret = ToSM.getLocForStartOfFile(ToFileID)
- .getLocWithOffset(Decomposed.second);
- return ret;
+ return {};
+ SourceManager &ToSM = ToContext.getSourceManager();
+ return ToSM.getComposedLoc(ToFileID, Decomposed.second);
}
SourceRange ASTImporter::Import(SourceRange FromRange) {
@@ -6507,44 +7332,58 @@ SourceRange ASTImporter::Import(SourceRange FromRange) {
}
FileID ASTImporter::Import(FileID FromID) {
- llvm::DenseMap<FileID, FileID>::iterator Pos
- = ImportedFileIDs.find(FromID);
+ llvm::DenseMap<FileID, FileID>::iterator Pos = ImportedFileIDs.find(FromID);
if (Pos != ImportedFileIDs.end())
return Pos->second;
-
+
SourceManager &FromSM = FromContext.getSourceManager();
SourceManager &ToSM = ToContext.getSourceManager();
const SrcMgr::SLocEntry &FromSLoc = FromSM.getSLocEntry(FromID);
- assert(FromSLoc.isFile() && "Cannot handle macro expansions yet");
-
- // Include location of this file.
- SourceLocation ToIncludeLoc = Import(FromSLoc.getFile().getIncludeLoc());
-
- // Map the FileID for to the "to" source manager.
+
+ // Map the FromID to the "to" source manager.
FileID ToID;
- const SrcMgr::ContentCache *Cache = FromSLoc.getFile().getContentCache();
- if (Cache->OrigEntry && Cache->OrigEntry->getDir()) {
- // FIXME: We probably want to use getVirtualFile(), so we don't hit the
- // disk again
- // FIXME: We definitely want to re-use the existing MemoryBuffer, rather
- // than mmap the files several times.
- const FileEntry *Entry = ToFileManager.getFile(Cache->OrigEntry->getName());
- if (!Entry)
- return FileID();
- ToID = ToSM.createFileID(Entry, ToIncludeLoc,
- FromSLoc.getFile().getFileCharacteristic());
+ if (FromSLoc.isExpansion()) {
+ const SrcMgr::ExpansionInfo &FromEx = FromSLoc.getExpansion();
+ SourceLocation ToSpLoc = Import(FromEx.getSpellingLoc());
+ SourceLocation ToExLocS = Import(FromEx.getExpansionLocStart());
+ unsigned TokenLen = FromSM.getFileIDSize(FromID);
+ SourceLocation MLoc;
+ if (FromEx.isMacroArgExpansion()) {
+ MLoc = ToSM.createMacroArgExpansionLoc(ToSpLoc, ToExLocS, TokenLen);
+ } else {
+ SourceLocation ToExLocE = Import(FromEx.getExpansionLocEnd());
+ MLoc = ToSM.createExpansionLoc(ToSpLoc, ToExLocS, ToExLocE, TokenLen,
+ FromEx.isExpansionTokenRange());
+ }
+ ToID = ToSM.getFileID(MLoc);
} else {
- // FIXME: We want to re-use the existing MemoryBuffer!
- const llvm::MemoryBuffer *
- FromBuf = Cache->getBuffer(FromContext.getDiagnostics(), FromSM);
- std::unique_ptr<llvm::MemoryBuffer> ToBuf
- = llvm::MemoryBuffer::getMemBufferCopy(FromBuf->getBuffer(),
- FromBuf->getBufferIdentifier());
- ToID = ToSM.createFileID(std::move(ToBuf),
- FromSLoc.getFile().getFileCharacteristic());
+ // Include location of this file.
+ SourceLocation ToIncludeLoc = Import(FromSLoc.getFile().getIncludeLoc());
+
+ const SrcMgr::ContentCache *Cache = FromSLoc.getFile().getContentCache();
+ if (Cache->OrigEntry && Cache->OrigEntry->getDir()) {
+ // FIXME: We probably want to use getVirtualFile(), so we don't hit the
+ // disk again
+ // FIXME: We definitely want to re-use the existing MemoryBuffer, rather
+ // than mmap the files several times.
+ const FileEntry *Entry =
+ ToFileManager.getFile(Cache->OrigEntry->getName());
+ if (!Entry)
+ return {};
+ ToID = ToSM.createFileID(Entry, ToIncludeLoc,
+ FromSLoc.getFile().getFileCharacteristic());
+ } else {
+ // FIXME: We want to re-use the existing MemoryBuffer!
+ const llvm::MemoryBuffer *FromBuf =
+ Cache->getBuffer(FromContext.getDiagnostics(), FromSM);
+ std::unique_ptr<llvm::MemoryBuffer> ToBuf =
+ llvm::MemoryBuffer::getMemBufferCopy(FromBuf->getBuffer(),
+ FromBuf->getBufferIdentifier());
+ ToID = ToSM.createFileID(std::move(ToBuf),
+ FromSLoc.getFile().getFileCharacteristic());
+ }
}
-
-
+
ImportedFileIDs[FromID] = ToID;
return ToID;
}
@@ -6565,8 +7404,7 @@ CXXCtorInitializer *ASTImporter::Import(CXXCtorInitializer *From) {
From->isPackExpansion() ? Import(From->getEllipsisLoc())
: SourceLocation());
} else if (From->isMemberInitializer()) {
- FieldDecl *ToField =
- llvm::cast_or_null<FieldDecl>(Import(From->getMember()));
+ auto *ToField = cast_or_null<FieldDecl>(Import(From->getMember()));
if (!ToField && From->getMember())
return nullptr;
@@ -6574,7 +7412,7 @@ CXXCtorInitializer *ASTImporter::Import(CXXCtorInitializer *From) {
ToContext, ToField, Import(From->getMemberLocation()),
Import(From->getLParenLoc()), ToExpr, Import(From->getRParenLoc()));
} else if (From->isIndirectMemberInitializer()) {
- IndirectFieldDecl *ToIField = llvm::cast_or_null<IndirectFieldDecl>(
+ auto *ToIField = cast_or_null<IndirectFieldDecl>(
Import(From->getIndirectMember()));
if (!ToIField && From->getIndirectMember())
return nullptr;
@@ -6595,7 +7433,6 @@ CXXCtorInitializer *ASTImporter::Import(CXXCtorInitializer *From) {
}
}
-
CXXBaseSpecifier *ASTImporter::Import(const CXXBaseSpecifier *BaseSpec) {
auto Pos = ImportedCXXBaseSpecifiers.find(BaseSpec);
if (Pos != ImportedCXXBaseSpecifiers.end())
@@ -6616,10 +7453,10 @@ void ASTImporter::ImportDefinition(Decl *From) {
if (!To)
return;
- if (DeclContext *FromDC = cast<DeclContext>(From)) {
+ if (auto *FromDC = cast<DeclContext>(From)) {
ASTNodeImporter Importer(*this);
- if (RecordDecl *ToRecord = dyn_cast<RecordDecl>(To)) {
+ if (auto *ToRecord = dyn_cast<RecordDecl>(To)) {
if (!ToRecord->getDefinition()) {
Importer.ImportDefinition(cast<RecordDecl>(FromDC), ToRecord,
ASTNodeImporter::IDK_Everything);
@@ -6627,7 +7464,7 @@ void ASTImporter::ImportDefinition(Decl *From) {
}
}
- if (EnumDecl *ToEnum = dyn_cast<EnumDecl>(To)) {
+ if (auto *ToEnum = dyn_cast<EnumDecl>(To)) {
if (!ToEnum->getDefinition()) {
Importer.ImportDefinition(cast<EnumDecl>(FromDC), ToEnum,
ASTNodeImporter::IDK_Everything);
@@ -6635,7 +7472,7 @@ void ASTImporter::ImportDefinition(Decl *From) {
}
}
- if (ObjCInterfaceDecl *ToIFace = dyn_cast<ObjCInterfaceDecl>(To)) {
+ if (auto *ToIFace = dyn_cast<ObjCInterfaceDecl>(To)) {
if (!ToIFace->getDefinition()) {
Importer.ImportDefinition(cast<ObjCInterfaceDecl>(FromDC), ToIFace,
ASTNodeImporter::IDK_Everything);
@@ -6643,7 +7480,7 @@ void ASTImporter::ImportDefinition(Decl *From) {
}
}
- if (ObjCProtocolDecl *ToProto = dyn_cast<ObjCProtocolDecl>(To)) {
+ if (auto *ToProto = dyn_cast<ObjCProtocolDecl>(To)) {
if (!ToProto->getDefinition()) {
Importer.ImportDefinition(cast<ObjCProtocolDecl>(FromDC), ToProto,
ASTNodeImporter::IDK_Everything);
@@ -6657,7 +7494,7 @@ void ASTImporter::ImportDefinition(Decl *From) {
DeclarationName ASTImporter::Import(DeclarationName FromName) {
if (!FromName)
- return DeclarationName();
+ return {};
switch (FromName.getNameKind()) {
case DeclarationName::Identifier:
@@ -6671,7 +7508,7 @@ DeclarationName ASTImporter::Import(DeclarationName FromName) {
case DeclarationName::CXXConstructorName: {
QualType T = Import(FromName.getCXXNameType());
if (T.isNull())
- return DeclarationName();
+ return {};
return ToContext.DeclarationNames.getCXXConstructorName(
ToContext.getCanonicalType(T));
@@ -6680,24 +7517,24 @@ DeclarationName ASTImporter::Import(DeclarationName FromName) {
case DeclarationName::CXXDestructorName: {
QualType T = Import(FromName.getCXXNameType());
if (T.isNull())
- return DeclarationName();
+ return {};
return ToContext.DeclarationNames.getCXXDestructorName(
ToContext.getCanonicalType(T));
}
case DeclarationName::CXXDeductionGuideName: {
- TemplateDecl *Template = cast_or_null<TemplateDecl>(
+ auto *Template = cast_or_null<TemplateDecl>(
Import(FromName.getCXXDeductionGuideTemplate()));
if (!Template)
- return DeclarationName();
+ return {};
return ToContext.DeclarationNames.getCXXDeductionGuideName(Template);
}
case DeclarationName::CXXConversionFunctionName: {
QualType T = Import(FromName.getCXXNameType());
if (T.isNull())
- return DeclarationName();
+ return {};
return ToContext.DeclarationNames.getCXXConversionFunctionName(
ToContext.getCanonicalType(T));
@@ -6733,7 +7570,7 @@ IdentifierInfo *ASTImporter::Import(const IdentifierInfo *FromId) {
Selector ASTImporter::Import(Selector FromSel) {
if (FromSel.isNull())
- return Selector();
+ return {};
SmallVector<IdentifierInfo *, 4> Idents;
Idents.push_back(Import(FromSel.getIdentifierInfoForSlot(0)));
@@ -6767,36 +7604,31 @@ DiagnosticBuilder ASTImporter::FromDiag(SourceLocation Loc, unsigned DiagID) {
}
void ASTImporter::CompleteDecl (Decl *D) {
- if (ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(D)) {
+ if (auto *ID = dyn_cast<ObjCInterfaceDecl>(D)) {
if (!ID->getDefinition())
ID->startDefinition();
}
- else if (ObjCProtocolDecl *PD = dyn_cast<ObjCProtocolDecl>(D)) {
+ else if (auto *PD = dyn_cast<ObjCProtocolDecl>(D)) {
if (!PD->getDefinition())
PD->startDefinition();
}
- else if (TagDecl *TD = dyn_cast<TagDecl>(D)) {
+ else if (auto *TD = dyn_cast<TagDecl>(D)) {
if (!TD->getDefinition() && !TD->isBeingDefined()) {
TD->startDefinition();
TD->setCompleteDefinition(true);
}
}
else {
- assert (0 && "CompleteDecl called on a Decl that can't be completed");
+ assert(0 && "CompleteDecl called on a Decl that can't be completed");
}
}
-Decl *ASTImporter::Imported(Decl *From, Decl *To) {
- if (From->hasAttrs()) {
- for (Attr *FromAttr : From->getAttrs())
- To->addAttr(FromAttr->clone(To->getASTContext()));
- }
- if (From->isUsed()) {
- To->setIsUsed();
- }
- if (From->isImplicit()) {
- To->setImplicit();
- }
+Decl *ASTImporter::MapImported(Decl *From, Decl *To) {
+ llvm::DenseMap<Decl *, Decl *>::iterator Pos = ImportedDecls.find(From);
+ assert((Pos == ImportedDecls.end() || Pos->second == To) &&
+ "Try to import an already imported Decl");
+ if (Pos != ImportedDecls.end())
+ return Pos->second;
ImportedDecls[From] = To;
return To;
}
@@ -6809,6 +7641,7 @@ bool ASTImporter::IsStructurallyEquivalent(QualType From, QualType To,
return true;
StructuralEquivalenceContext Ctx(FromContext, ToContext, NonEquivalentDecls,
- false, Complain);
- return Ctx.IsStructurallyEquivalent(From, To);
+ getStructuralEquivalenceKind(*this), false,
+ Complain);
+ return Ctx.IsEquivalent(From, To);
}
diff --git a/lib/AST/ASTStructuralEquivalence.cpp b/lib/AST/ASTStructuralEquivalence.cpp
index 0df8e5653f3b..7853ab28810b 100644
--- a/lib/AST/ASTStructuralEquivalence.cpp
+++ b/lib/AST/ASTStructuralEquivalence.cpp
@@ -1,4 +1,4 @@
-//===--- ASTStructuralEquivalence.cpp - -------------------------*- C++ -*-===//
+//===- ASTStructuralEquivalence.cpp ---------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -10,20 +10,87 @@
// This file implement StructuralEquivalenceContext class and helper functions
// for layout matching.
//
+// The structural equivalence check could have been implemented as a parallel
+// BFS on a pair of graphs. That must have been the original approach at the
+// beginning.
+// Let's consider this simple BFS algorithm from the `s` source:
+// ```
+// void bfs(Graph G, int s)
+// {
+// Queue<Integer> queue = new Queue<Integer>();
+// marked[s] = true; // Mark the source
+// queue.enqueue(s); // and put it on the queue.
+// while (!q.isEmpty()) {
+// int v = queue.dequeue(); // Remove next vertex from the queue.
+// for (int w : G.adj(v))
+// if (!marked[w]) // For every unmarked adjacent vertex,
+// {
+// marked[w] = true;
+// queue.enqueue(w);
+// }
+// }
+// }
+// ```
+// Indeed, it has it's queue, which holds pairs of nodes, one from each graph,
+// this is the `DeclsToCheck` and it's pair is in `TentativeEquivalences`.
+// `TentativeEquivalences` also plays the role of the marking (`marked`)
+// functionality above, we use it to check whether we've already seen a pair of
+// nodes.
+//
+// We put in the elements into the queue only in the toplevel decl check
+// function:
+// ```
+// static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+// Decl *D1, Decl *D2);
+// ```
+// The `while` loop where we iterate over the children is implemented in
+// `Finish()`. And `Finish` is called only from the two **member** functions
+// which check the equivalency of two Decls or two Types. ASTImporter (and
+// other clients) call only these functions.
+//
+// The `static` implementation functions are called from `Finish`, these push
+// the children nodes to the queue via `static bool
+// IsStructurallyEquivalent(StructuralEquivalenceContext &Context, Decl *D1,
+// Decl *D2)`. So far so good, this is almost like the BFS. However, if we
+// let a static implementation function to call `Finish` via another **member**
+// function that means we end up with two nested while loops each of them
+// working on the same queue. This is wrong and nobody can reason about it's
+// doing. Thus, static implementation functions must not call the **member**
+// functions.
+//
+// So, now `TentativeEquivalences` plays two roles. It is used to store the
+// second half of the decls which we want to compare, plus it plays a role in
+// closing the recursion. On a long term, we could refactor structural
+// equivalency to be more alike to the traditional BFS.
+//
//===----------------------------------------------------------------------===//
#include "clang/AST/ASTStructuralEquivalence.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTDiagnostic.h"
-#include "clang/AST/ASTImporter.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclBase.h"
#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclFriend.h"
#include "clang/AST/DeclObjC.h"
-#include "clang/AST/DeclVisitor.h"
-#include "clang/AST/StmtVisitor.h"
-#include "clang/AST/TypeVisitor.h"
-#include "clang/Basic/SourceManager.h"
-
-namespace {
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/NestedNameSpecifier.h"
+#include "clang/AST/TemplateBase.h"
+#include "clang/AST/TemplateName.h"
+#include "clang/AST/Type.h"
+#include "clang/Basic/ExceptionSpecificationType.h"
+#include "clang/Basic/IdentifierTable.h"
+#include "clang/Basic/LLVM.h"
+#include "clang/Basic/SourceLocation.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/APSInt.h"
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <cassert>
+#include <utility>
using namespace clang;
@@ -37,7 +104,7 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
/// Determine structural equivalence of two expressions.
static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
- Expr *E1, Expr *E2) {
+ const Expr *E1, const Expr *E2) {
if (!E1 || !E2)
return E1 == E2;
@@ -144,6 +211,7 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
IsStructurallyEquivalent(Context, TS1->getReplacement(),
TS2->getReplacement());
}
+
case TemplateName::SubstTemplateTemplateParmPack: {
SubstTemplateTemplateParmPackStorage
*P1 = N1.getAsSubstTemplateTemplateParmPack(),
@@ -169,10 +237,10 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
return true;
case TemplateArgument::Type:
- return Context.IsStructurallyEquivalent(Arg1.getAsType(), Arg2.getAsType());
+ return IsStructurallyEquivalent(Context, Arg1.getAsType(), Arg2.getAsType());
case TemplateArgument::Integral:
- if (!Context.IsStructurallyEquivalent(Arg1.getIntegralType(),
+ if (!IsStructurallyEquivalent(Context, Arg1.getIntegralType(),
Arg2.getIntegralType()))
return false;
@@ -180,7 +248,7 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
Arg2.getAsIntegral());
case TemplateArgument::Declaration:
- return Context.IsStructurallyEquivalent(Arg1.getAsDecl(), Arg2.getAsDecl());
+ return IsStructurallyEquivalent(Context, Arg1.getAsDecl(), Arg2.getAsDecl());
case TemplateArgument::NullPtr:
return true; // FIXME: Is this correct?
@@ -235,6 +303,9 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
if (T1.isNull() || T2.isNull())
return T1.isNull() && T2.isNull();
+ QualType OrigT1 = T1;
+ QualType OrigT2 = T2;
+
if (!Context.StrictTypeSpelling) {
// We aren't being strict about token-to-token equivalence of types,
// so map down to the canonical type.
@@ -298,8 +369,8 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
case Type::LValueReference:
case Type::RValueReference: {
- const ReferenceType *Ref1 = cast<ReferenceType>(T1);
- const ReferenceType *Ref2 = cast<ReferenceType>(T2);
+ const auto *Ref1 = cast<ReferenceType>(T1);
+ const auto *Ref2 = cast<ReferenceType>(T2);
if (Ref1->isSpelledAsLValue() != Ref2->isSpelledAsLValue())
return false;
if (Ref1->isInnerRef() != Ref2->isInnerRef())
@@ -311,8 +382,8 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
}
case Type::MemberPointer: {
- const MemberPointerType *MemPtr1 = cast<MemberPointerType>(T1);
- const MemberPointerType *MemPtr2 = cast<MemberPointerType>(T2);
+ const auto *MemPtr1 = cast<MemberPointerType>(T1);
+ const auto *MemPtr2 = cast<MemberPointerType>(T2);
if (!IsStructurallyEquivalent(Context, MemPtr1->getPointeeType(),
MemPtr2->getPointeeType()))
return false;
@@ -323,8 +394,8 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
}
case Type::ConstantArray: {
- const ConstantArrayType *Array1 = cast<ConstantArrayType>(T1);
- const ConstantArrayType *Array2 = cast<ConstantArrayType>(T2);
+ const auto *Array1 = cast<ConstantArrayType>(T1);
+ const auto *Array2 = cast<ConstantArrayType>(T2);
if (!llvm::APInt::isSameValue(Array1->getSize(), Array2->getSize()))
return false;
@@ -340,8 +411,8 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
break;
case Type::VariableArray: {
- const VariableArrayType *Array1 = cast<VariableArrayType>(T1);
- const VariableArrayType *Array2 = cast<VariableArrayType>(T2);
+ const auto *Array1 = cast<VariableArrayType>(T1);
+ const auto *Array2 = cast<VariableArrayType>(T2);
if (!IsStructurallyEquivalent(Context, Array1->getSizeExpr(),
Array2->getSizeExpr()))
return false;
@@ -353,8 +424,8 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
}
case Type::DependentSizedArray: {
- const DependentSizedArrayType *Array1 = cast<DependentSizedArrayType>(T1);
- const DependentSizedArrayType *Array2 = cast<DependentSizedArrayType>(T2);
+ const auto *Array1 = cast<DependentSizedArrayType>(T1);
+ const auto *Array2 = cast<DependentSizedArrayType>(T2);
if (!IsStructurallyEquivalent(Context, Array1->getSizeExpr(),
Array2->getSizeExpr()))
return false;
@@ -366,10 +437,8 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
}
case Type::DependentAddressSpace: {
- const DependentAddressSpaceType *DepAddressSpace1 =
- cast<DependentAddressSpaceType>(T1);
- const DependentAddressSpaceType *DepAddressSpace2 =
- cast<DependentAddressSpaceType>(T2);
+ const auto *DepAddressSpace1 = cast<DependentAddressSpaceType>(T1);
+ const auto *DepAddressSpace2 = cast<DependentAddressSpaceType>(T2);
if (!IsStructurallyEquivalent(Context, DepAddressSpace1->getAddrSpaceExpr(),
DepAddressSpace2->getAddrSpaceExpr()))
return false;
@@ -381,10 +450,22 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
}
case Type::DependentSizedExtVector: {
- const DependentSizedExtVectorType *Vec1 =
- cast<DependentSizedExtVectorType>(T1);
- const DependentSizedExtVectorType *Vec2 =
- cast<DependentSizedExtVectorType>(T2);
+ const auto *Vec1 = cast<DependentSizedExtVectorType>(T1);
+ const auto *Vec2 = cast<DependentSizedExtVectorType>(T2);
+ if (!IsStructurallyEquivalent(Context, Vec1->getSizeExpr(),
+ Vec2->getSizeExpr()))
+ return false;
+ if (!IsStructurallyEquivalent(Context, Vec1->getElementType(),
+ Vec2->getElementType()))
+ return false;
+ break;
+ }
+
+ case Type::DependentVector: {
+ const auto *Vec1 = cast<DependentVectorType>(T1);
+ const auto *Vec2 = cast<DependentVectorType>(T2);
+ if (Vec1->getVectorKind() != Vec2->getVectorKind())
+ return false;
if (!IsStructurallyEquivalent(Context, Vec1->getSizeExpr(),
Vec2->getSizeExpr()))
return false;
@@ -396,8 +477,8 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
case Type::Vector:
case Type::ExtVector: {
- const VectorType *Vec1 = cast<VectorType>(T1);
- const VectorType *Vec2 = cast<VectorType>(T2);
+ const auto *Vec1 = cast<VectorType>(T1);
+ const auto *Vec2 = cast<VectorType>(T2);
if (!IsStructurallyEquivalent(Context, Vec1->getElementType(),
Vec2->getElementType()))
return false;
@@ -409,8 +490,9 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
}
case Type::FunctionProto: {
- const FunctionProtoType *Proto1 = cast<FunctionProtoType>(T1);
- const FunctionProtoType *Proto2 = cast<FunctionProtoType>(T2);
+ const auto *Proto1 = cast<FunctionProtoType>(T1);
+ const auto *Proto2 = cast<FunctionProtoType>(T2);
+
if (Proto1->getNumParams() != Proto2->getNumParams())
return false;
for (unsigned I = 0, N = Proto1->getNumParams(); I != N; ++I) {
@@ -420,31 +502,41 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
}
if (Proto1->isVariadic() != Proto2->isVariadic())
return false;
- if (Proto1->getExceptionSpecType() != Proto2->getExceptionSpecType())
+
+ if (Proto1->getTypeQuals() != Proto2->getTypeQuals())
return false;
- if (Proto1->getExceptionSpecType() == EST_Dynamic) {
- if (Proto1->getNumExceptions() != Proto2->getNumExceptions())
+
+ // Check exceptions, this information is lost in canonical type.
+ const auto *OrigProto1 =
+ cast<FunctionProtoType>(OrigT1.getDesugaredType(Context.FromCtx));
+ const auto *OrigProto2 =
+ cast<FunctionProtoType>(OrigT2.getDesugaredType(Context.ToCtx));
+ auto Spec1 = OrigProto1->getExceptionSpecType();
+ auto Spec2 = OrigProto2->getExceptionSpecType();
+
+ if (Spec1 != Spec2)
+ return false;
+ if (Spec1 == EST_Dynamic) {
+ if (OrigProto1->getNumExceptions() != OrigProto2->getNumExceptions())
return false;
- for (unsigned I = 0, N = Proto1->getNumExceptions(); I != N; ++I) {
- if (!IsStructurallyEquivalent(Context, Proto1->getExceptionType(I),
- Proto2->getExceptionType(I)))
+ for (unsigned I = 0, N = OrigProto1->getNumExceptions(); I != N; ++I) {
+ if (!IsStructurallyEquivalent(Context, OrigProto1->getExceptionType(I),
+ OrigProto2->getExceptionType(I)))
return false;
}
- } else if (Proto1->getExceptionSpecType() == EST_ComputedNoexcept) {
- if (!IsStructurallyEquivalent(Context, Proto1->getNoexceptExpr(),
- Proto2->getNoexceptExpr()))
+ } else if (isComputedNoexcept(Spec1)) {
+ if (!IsStructurallyEquivalent(Context, OrigProto1->getNoexceptExpr(),
+ OrigProto2->getNoexceptExpr()))
return false;
}
- if (Proto1->getTypeQuals() != Proto2->getTypeQuals())
- return false;
// Fall through to check the bits common with FunctionNoProtoType.
LLVM_FALLTHROUGH;
}
case Type::FunctionNoProto: {
- const FunctionType *Function1 = cast<FunctionType>(T1);
- const FunctionType *Function2 = cast<FunctionType>(T2);
+ const auto *Function1 = cast<FunctionType>(T1);
+ const auto *Function2 = cast<FunctionType>(T2);
if (!IsStructurallyEquivalent(Context, Function1->getReturnType(),
Function2->getReturnType()))
return false;
@@ -458,7 +550,6 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
cast<UnresolvedUsingType>(T1)->getDecl(),
cast<UnresolvedUsingType>(T2)->getDecl()))
return false;
-
break;
case Type::Attributed:
@@ -501,7 +592,7 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
case Type::UnaryTransform:
if (!IsStructurallyEquivalent(
Context, cast<UnaryTransformType>(T1)->getUnderlyingType(),
- cast<UnaryTransformType>(T1)->getUnderlyingType()))
+ cast<UnaryTransformType>(T2)->getUnderlyingType()))
return false;
break;
@@ -519,8 +610,8 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
break;
case Type::DeducedTemplateSpecialization: {
- auto *DT1 = cast<DeducedTemplateSpecializationType>(T1);
- auto *DT2 = cast<DeducedTemplateSpecializationType>(T2);
+ const auto *DT1 = cast<DeducedTemplateSpecializationType>(T1);
+ const auto *DT2 = cast<DeducedTemplateSpecializationType>(T2);
if (!IsStructurallyEquivalent(Context, DT1->getTemplateName(),
DT2->getTemplateName()))
return false;
@@ -538,8 +629,8 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
break;
case Type::TemplateTypeParm: {
- const TemplateTypeParmType *Parm1 = cast<TemplateTypeParmType>(T1);
- const TemplateTypeParmType *Parm2 = cast<TemplateTypeParmType>(T2);
+ const auto *Parm1 = cast<TemplateTypeParmType>(T1);
+ const auto *Parm2 = cast<TemplateTypeParmType>(T2);
if (Parm1->getDepth() != Parm2->getDepth())
return false;
if (Parm1->getIndex() != Parm2->getIndex())
@@ -552,10 +643,8 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
}
case Type::SubstTemplateTypeParm: {
- const SubstTemplateTypeParmType *Subst1 =
- cast<SubstTemplateTypeParmType>(T1);
- const SubstTemplateTypeParmType *Subst2 =
- cast<SubstTemplateTypeParmType>(T2);
+ const auto *Subst1 = cast<SubstTemplateTypeParmType>(T1);
+ const auto *Subst2 = cast<SubstTemplateTypeParmType>(T2);
if (!IsStructurallyEquivalent(Context,
QualType(Subst1->getReplacedParameter(), 0),
QualType(Subst2->getReplacedParameter(), 0)))
@@ -567,10 +656,8 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
}
case Type::SubstTemplateTypeParmPack: {
- const SubstTemplateTypeParmPackType *Subst1 =
- cast<SubstTemplateTypeParmPackType>(T1);
- const SubstTemplateTypeParmPackType *Subst2 =
- cast<SubstTemplateTypeParmPackType>(T2);
+ const auto *Subst1 = cast<SubstTemplateTypeParmPackType>(T1);
+ const auto *Subst2 = cast<SubstTemplateTypeParmPackType>(T2);
if (!IsStructurallyEquivalent(Context,
QualType(Subst1->getReplacedParameter(), 0),
QualType(Subst2->getReplacedParameter(), 0)))
@@ -580,11 +667,10 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
return false;
break;
}
+
case Type::TemplateSpecialization: {
- const TemplateSpecializationType *Spec1 =
- cast<TemplateSpecializationType>(T1);
- const TemplateSpecializationType *Spec2 =
- cast<TemplateSpecializationType>(T2);
+ const auto *Spec1 = cast<TemplateSpecializationType>(T1);
+ const auto *Spec2 = cast<TemplateSpecializationType>(T2);
if (!IsStructurallyEquivalent(Context, Spec1->getTemplateName(),
Spec2->getTemplateName()))
return false;
@@ -599,8 +685,8 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
}
case Type::Elaborated: {
- const ElaboratedType *Elab1 = cast<ElaboratedType>(T1);
- const ElaboratedType *Elab2 = cast<ElaboratedType>(T2);
+ const auto *Elab1 = cast<ElaboratedType>(T1);
+ const auto *Elab2 = cast<ElaboratedType>(T2);
// CHECKME: what if a keyword is ETK_None or ETK_typename ?
if (Elab1->getKeyword() != Elab2->getKeyword())
return false;
@@ -614,8 +700,8 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
}
case Type::InjectedClassName: {
- const InjectedClassNameType *Inj1 = cast<InjectedClassNameType>(T1);
- const InjectedClassNameType *Inj2 = cast<InjectedClassNameType>(T2);
+ const auto *Inj1 = cast<InjectedClassNameType>(T1);
+ const auto *Inj2 = cast<InjectedClassNameType>(T2);
if (!IsStructurallyEquivalent(Context,
Inj1->getInjectedSpecializationType(),
Inj2->getInjectedSpecializationType()))
@@ -624,8 +710,8 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
}
case Type::DependentName: {
- const DependentNameType *Typename1 = cast<DependentNameType>(T1);
- const DependentNameType *Typename2 = cast<DependentNameType>(T2);
+ const auto *Typename1 = cast<DependentNameType>(T1);
+ const auto *Typename2 = cast<DependentNameType>(T2);
if (!IsStructurallyEquivalent(Context, Typename1->getQualifier(),
Typename2->getQualifier()))
return false;
@@ -637,10 +723,8 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
}
case Type::DependentTemplateSpecialization: {
- const DependentTemplateSpecializationType *Spec1 =
- cast<DependentTemplateSpecializationType>(T1);
- const DependentTemplateSpecializationType *Spec2 =
- cast<DependentTemplateSpecializationType>(T2);
+ const auto *Spec1 = cast<DependentTemplateSpecializationType>(T1);
+ const auto *Spec2 = cast<DependentTemplateSpecializationType>(T2);
if (!IsStructurallyEquivalent(Context, Spec1->getQualifier(),
Spec2->getQualifier()))
return false;
@@ -665,8 +749,8 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
break;
case Type::ObjCInterface: {
- const ObjCInterfaceType *Iface1 = cast<ObjCInterfaceType>(T1);
- const ObjCInterfaceType *Iface2 = cast<ObjCInterfaceType>(T2);
+ const auto *Iface1 = cast<ObjCInterfaceType>(T1);
+ const auto *Iface2 = cast<ObjCInterfaceType>(T2);
if (!IsStructurallyEquivalent(Context, Iface1->getDecl(),
Iface2->getDecl()))
return false;
@@ -674,8 +758,8 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
}
case Type::ObjCTypeParam: {
- const ObjCTypeParamType *Obj1 = cast<ObjCTypeParamType>(T1);
- const ObjCTypeParamType *Obj2 = cast<ObjCTypeParamType>(T2);
+ const auto *Obj1 = cast<ObjCTypeParamType>(T1);
+ const auto *Obj2 = cast<ObjCTypeParamType>(T2);
if (!IsStructurallyEquivalent(Context, Obj1->getDecl(), Obj2->getDecl()))
return false;
@@ -688,9 +772,10 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
}
break;
}
+
case Type::ObjCObject: {
- const ObjCObjectType *Obj1 = cast<ObjCObjectType>(T1);
- const ObjCObjectType *Obj2 = cast<ObjCObjectType>(T2);
+ const auto *Obj1 = cast<ObjCObjectType>(T1);
+ const auto *Obj2 = cast<ObjCObjectType>(T2);
if (!IsStructurallyEquivalent(Context, Obj1->getBaseType(),
Obj2->getBaseType()))
return false;
@@ -705,28 +790,25 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
}
case Type::ObjCObjectPointer: {
- const ObjCObjectPointerType *Ptr1 = cast<ObjCObjectPointerType>(T1);
- const ObjCObjectPointerType *Ptr2 = cast<ObjCObjectPointerType>(T2);
+ const auto *Ptr1 = cast<ObjCObjectPointerType>(T1);
+ const auto *Ptr2 = cast<ObjCObjectPointerType>(T2);
if (!IsStructurallyEquivalent(Context, Ptr1->getPointeeType(),
Ptr2->getPointeeType()))
return false;
break;
}
- case Type::Atomic: {
+ case Type::Atomic:
if (!IsStructurallyEquivalent(Context, cast<AtomicType>(T1)->getValueType(),
cast<AtomicType>(T2)->getValueType()))
return false;
break;
- }
- case Type::Pipe: {
+ case Type::Pipe:
if (!IsStructurallyEquivalent(Context, cast<PipeType>(T1)->getElementType(),
cast<PipeType>(T2)->getElementType()))
return false;
break;
- }
-
} // end switch
return true;
@@ -735,7 +817,7 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
/// Determine structural equivalence of two fields.
static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
FieldDecl *Field1, FieldDecl *Field2) {
- RecordDecl *Owner2 = cast<RecordDecl>(Field2->getDeclContext());
+ const auto *Owner2 = cast<RecordDecl>(Field2->getDeclContext());
// For anonymous structs/unions, match up the anonymous struct/union type
// declarations directly, so that we don't go off searching for anonymous
@@ -829,6 +911,56 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
return true;
}
+/// Determine structural equivalence of two methodss.
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ CXXMethodDecl *Method1,
+ CXXMethodDecl *Method2) {
+ bool PropertiesEqual =
+ Method1->getDeclKind() == Method2->getDeclKind() &&
+ Method1->getRefQualifier() == Method2->getRefQualifier() &&
+ Method1->getAccess() == Method2->getAccess() &&
+ Method1->getOverloadedOperator() == Method2->getOverloadedOperator() &&
+ Method1->isStatic() == Method2->isStatic() &&
+ Method1->isConst() == Method2->isConst() &&
+ Method1->isVolatile() == Method2->isVolatile() &&
+ Method1->isVirtual() == Method2->isVirtual() &&
+ Method1->isPure() == Method2->isPure() &&
+ Method1->isDefaulted() == Method2->isDefaulted() &&
+ Method1->isDeleted() == Method2->isDeleted();
+ if (!PropertiesEqual)
+ return false;
+ // FIXME: Check for 'final'.
+
+ if (auto *Constructor1 = dyn_cast<CXXConstructorDecl>(Method1)) {
+ auto *Constructor2 = cast<CXXConstructorDecl>(Method2);
+ if (Constructor1->isExplicit() != Constructor2->isExplicit())
+ return false;
+ }
+
+ if (auto *Conversion1 = dyn_cast<CXXConversionDecl>(Method1)) {
+ auto *Conversion2 = cast<CXXConversionDecl>(Method2);
+ if (Conversion1->isExplicit() != Conversion2->isExplicit())
+ return false;
+ if (!IsStructurallyEquivalent(Context, Conversion1->getConversionType(),
+ Conversion2->getConversionType()))
+ return false;
+ }
+
+ const IdentifierInfo *Name1 = Method1->getIdentifier();
+ const IdentifierInfo *Name2 = Method2->getIdentifier();
+ if (!::IsStructurallyEquivalent(Name1, Name2)) {
+ return false;
+ // TODO: Names do not match, add warning like at check for FieldDecl.
+ }
+
+ // Check the prototypes.
+ if (!::IsStructurallyEquivalent(Context,
+ Method1->getType(), Method2->getType()))
+ return false;
+
+ return true;
+}
+
/// Determine structural equivalence of two records.
static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
RecordDecl *D1, RecordDecl *D2) {
@@ -845,7 +977,7 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
return false;
}
- if (D1->isAnonymousStructOrUnion() && D2->isAnonymousStructOrUnion()) {
+ if (!D1->getDeclName() && !D2->getDeclName()) {
// If both anonymous structs/unions are in a record context, make sure
// they occur in the same location in the context records.
if (Optional<unsigned> Index1 =
@@ -861,10 +993,8 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
// If both declarations are class template specializations, we know
// the ODR applies, so check the template and template arguments.
- ClassTemplateSpecializationDecl *Spec1 =
- dyn_cast<ClassTemplateSpecializationDecl>(D1);
- ClassTemplateSpecializationDecl *Spec2 =
- dyn_cast<ClassTemplateSpecializationDecl>(D2);
+ const auto *Spec1 = dyn_cast<ClassTemplateSpecializationDecl>(D1);
+ const auto *Spec2 = dyn_cast<ClassTemplateSpecializationDecl>(D2);
if (Spec1 && Spec2) {
// Check that the specialized templates are the same.
if (!IsStructurallyEquivalent(Context, Spec1->getSpecializedTemplate(),
@@ -892,8 +1022,17 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
if (!D1 || !D2)
return true;
- if (CXXRecordDecl *D1CXX = dyn_cast<CXXRecordDecl>(D1)) {
- if (CXXRecordDecl *D2CXX = dyn_cast<CXXRecordDecl>(D2)) {
+ // If any of the records has external storage and we do a minimal check (or
+ // AST import) we assmue they are equivalent. (If we didn't have this
+ // assumption then `RecordDecl::LoadFieldsFromExternalStorage` could trigger
+ // another AST import which in turn would call the structural equivalency
+ // check again and finally we'd have an improper result.)
+ if (Context.EqKind == StructuralEquivalenceKind::Minimal)
+ if (D1->hasExternalLexicalStorage() || D2->hasExternalLexicalStorage())
+ return true;
+
+ if (auto *D1CXX = dyn_cast<CXXRecordDecl>(D1)) {
+ if (auto *D2CXX = dyn_cast<CXXRecordDecl>(D2)) {
if (D1CXX->hasExternalLexicalStorage() &&
!D1CXX->isCompleteDefinition()) {
D1CXX->getASTContext().getExternalSource()->CompleteType(D1CXX);
@@ -944,6 +1083,44 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
return false;
}
}
+
+ // Check the friends for consistency.
+ CXXRecordDecl::friend_iterator Friend2 = D2CXX->friend_begin(),
+ Friend2End = D2CXX->friend_end();
+ for (CXXRecordDecl::friend_iterator Friend1 = D1CXX->friend_begin(),
+ Friend1End = D1CXX->friend_end();
+ Friend1 != Friend1End; ++Friend1, ++Friend2) {
+ if (Friend2 == Friend2End) {
+ if (Context.Complain) {
+ Context.Diag2(D2->getLocation(),
+ diag::warn_odr_tag_type_inconsistent)
+ << Context.ToCtx.getTypeDeclType(D2CXX);
+ Context.Diag1((*Friend1)->getFriendLoc(), diag::note_odr_friend);
+ Context.Diag2(D2->getLocation(), diag::note_odr_missing_friend);
+ }
+ return false;
+ }
+
+ if (!IsStructurallyEquivalent(Context, *Friend1, *Friend2)) {
+ if (Context.Complain) {
+ Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent)
+ << Context.ToCtx.getTypeDeclType(D2CXX);
+ Context.Diag1((*Friend1)->getFriendLoc(), diag::note_odr_friend);
+ Context.Diag2((*Friend2)->getFriendLoc(), diag::note_odr_friend);
+ }
+ return false;
+ }
+ }
+
+ if (Friend2 != Friend2End) {
+ if (Context.Complain) {
+ Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent)
+ << Context.ToCtx.getTypeDeclType(D2);
+ Context.Diag2((*Friend2)->getFriendLoc(), diag::note_odr_friend);
+ Context.Diag1(D1->getLocation(), diag::note_odr_missing_friend);
+ }
+ return false;
+ }
} else if (D1CXX->getNumBases() > 0) {
if (Context.Complain) {
Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent)
@@ -1081,11 +1258,9 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
return false;
}
- if (!Context.IsStructurallyEquivalent(Params1->getParam(I),
- Params2->getParam(I))) {
-
+ if (!IsStructurallyEquivalent(Context, Params1->getParam(I),
+ Params2->getParam(I)))
return false;
- }
}
return true;
@@ -1121,7 +1296,7 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
}
// Check types.
- if (!Context.IsStructurallyEquivalent(D1->getType(), D2->getType())) {
+ if (!IsStructurallyEquivalent(Context, D1->getType(), D2->getType())) {
if (Context.Complain) {
Context.Diag2(D2->getLocation(),
diag::err_odr_non_type_parameter_type_inconsistent)
@@ -1153,17 +1328,64 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
D2->getTemplateParameters());
}
+static bool IsTemplateDeclCommonStructurallyEquivalent(
+ StructuralEquivalenceContext &Ctx, TemplateDecl *D1, TemplateDecl *D2) {
+ if (!IsStructurallyEquivalent(D1->getIdentifier(), D2->getIdentifier()))
+ return false;
+ if (!D1->getIdentifier()) // Special name
+ if (D1->getNameAsString() != D2->getNameAsString())
+ return false;
+ return IsStructurallyEquivalent(Ctx, D1->getTemplateParameters(),
+ D2->getTemplateParameters());
+}
+
static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
ClassTemplateDecl *D1,
ClassTemplateDecl *D2) {
// Check template parameters.
- if (!IsStructurallyEquivalent(Context, D1->getTemplateParameters(),
- D2->getTemplateParameters()))
+ if (!IsTemplateDeclCommonStructurallyEquivalent(Context, D1, D2))
+ return false;
+
+ // Check the templated declaration.
+ return IsStructurallyEquivalent(Context, D1->getTemplatedDecl(),
+ D2->getTemplatedDecl());
+}
+
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ FunctionTemplateDecl *D1,
+ FunctionTemplateDecl *D2) {
+ // Check template parameters.
+ if (!IsTemplateDeclCommonStructurallyEquivalent(Context, D1, D2))
return false;
// Check the templated declaration.
- return Context.IsStructurallyEquivalent(D1->getTemplatedDecl(),
- D2->getTemplatedDecl());
+ return IsStructurallyEquivalent(Context, D1->getTemplatedDecl()->getType(),
+ D2->getTemplatedDecl()->getType());
+}
+
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ FriendDecl *D1, FriendDecl *D2) {
+ if ((D1->getFriendType() && D2->getFriendDecl()) ||
+ (D1->getFriendDecl() && D2->getFriendType())) {
+ return false;
+ }
+ if (D1->getFriendType() && D2->getFriendType())
+ return IsStructurallyEquivalent(Context,
+ D1->getFriendType()->getType(),
+ D2->getFriendType()->getType());
+ if (D1->getFriendDecl() && D2->getFriendDecl())
+ return IsStructurallyEquivalent(Context, D1->getFriendDecl(),
+ D2->getFriendDecl());
+ return false;
+}
+
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ FunctionDecl *D1, FunctionDecl *D2) {
+ // FIXME: Consider checking for function attributes as well.
+ if (!IsStructurallyEquivalent(Context, D1->getType(), D2->getType()))
+ return false;
+
+ return true;
}
/// Determine structural equivalence of two declarations.
@@ -1187,9 +1409,6 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
Context.DeclsToCheck.push_back(D1->getCanonicalDecl());
return true;
}
-} // namespace
-
-namespace clang {
DiagnosticBuilder StructuralEquivalenceContext::Diag1(SourceLocation Loc,
unsigned DiagID) {
@@ -1214,7 +1433,7 @@ StructuralEquivalenceContext::findUntaggedStructOrUnionIndex(RecordDecl *Anon) {
ASTContext &Context = Anon->getASTContext();
QualType AnonTy = Context.getRecordType(Anon);
- RecordDecl *Owner = dyn_cast<RecordDecl>(Anon->getDeclContext());
+ const auto *Owner = dyn_cast<RecordDecl>(Anon->getDeclContext());
if (!Owner)
return None;
@@ -1234,6 +1453,10 @@ StructuralEquivalenceContext::findUntaggedStructOrUnionIndex(RecordDecl *Anon) {
// If the field looks like this:
// struct { ... } A;
QualType FieldType = F->getType();
+ // In case of nested structs.
+ while (const auto *ElabType = dyn_cast<ElaboratedType>(FieldType))
+ FieldType = ElabType->getNamedType();
+
if (const auto *RecType = dyn_cast<RecordType>(FieldType)) {
const RecordDecl *RecDecl = RecType->getDecl();
if (RecDecl->getDeclContext() == Owner && !RecDecl->getIdentifier()) {
@@ -1248,16 +1471,26 @@ StructuralEquivalenceContext::findUntaggedStructOrUnionIndex(RecordDecl *Anon) {
return Index;
}
-bool StructuralEquivalenceContext::IsStructurallyEquivalent(Decl *D1,
- Decl *D2) {
+bool StructuralEquivalenceContext::IsEquivalent(Decl *D1, Decl *D2) {
+
+ // Ensure that the implementation functions (all static functions in this TU)
+ // never call the public ASTStructuralEquivalence::IsEquivalent() functions,
+ // because that will wreak havoc the internal state (DeclsToCheck and
+ // TentativeEquivalences members) and can cause faulty behaviour. For
+ // instance, some leaf declarations can be stated and cached as inequivalent
+ // as a side effect of one inequivalent element in the DeclsToCheck list.
+ assert(DeclsToCheck.empty());
+ assert(TentativeEquivalences.empty());
+
if (!::IsStructurallyEquivalent(*this, D1, D2))
return false;
return !Finish();
}
-bool StructuralEquivalenceContext::IsStructurallyEquivalent(QualType T1,
- QualType T2) {
+bool StructuralEquivalenceContext::IsEquivalent(QualType T1, QualType T2) {
+ assert(DeclsToCheck.empty());
+ assert(TentativeEquivalences.empty());
if (!::IsStructurallyEquivalent(*this, T1, T2))
return false;
@@ -1277,8 +1510,8 @@ bool StructuralEquivalenceContext::Finish() {
// FIXME: Switch on all declaration kinds. For now, we're just going to
// check the obvious ones.
- if (RecordDecl *Record1 = dyn_cast<RecordDecl>(D1)) {
- if (RecordDecl *Record2 = dyn_cast<RecordDecl>(D2)) {
+ if (auto *Record1 = dyn_cast<RecordDecl>(D1)) {
+ if (auto *Record2 = dyn_cast<RecordDecl>(D2)) {
// Check for equivalent structure names.
IdentifierInfo *Name1 = Record1->getIdentifier();
if (!Name1 && Record1->getTypedefNameForAnonDecl())
@@ -1293,8 +1526,8 @@ bool StructuralEquivalenceContext::Finish() {
// Record/non-record mismatch.
Equivalent = false;
}
- } else if (EnumDecl *Enum1 = dyn_cast<EnumDecl>(D1)) {
- if (EnumDecl *Enum2 = dyn_cast<EnumDecl>(D2)) {
+ } else if (auto *Enum1 = dyn_cast<EnumDecl>(D1)) {
+ if (auto *Enum2 = dyn_cast<EnumDecl>(D2)) {
// Check for equivalent enum names.
IdentifierInfo *Name1 = Enum1->getIdentifier();
if (!Name1 && Enum1->getTypedefNameForAnonDecl())
@@ -1309,8 +1542,8 @@ bool StructuralEquivalenceContext::Finish() {
// Enum/non-enum mismatch
Equivalent = false;
}
- } else if (TypedefNameDecl *Typedef1 = dyn_cast<TypedefNameDecl>(D1)) {
- if (TypedefNameDecl *Typedef2 = dyn_cast<TypedefNameDecl>(D2)) {
+ } else if (const auto *Typedef1 = dyn_cast<TypedefNameDecl>(D1)) {
+ if (const auto *Typedef2 = dyn_cast<TypedefNameDecl>(D2)) {
if (!::IsStructurallyEquivalent(Typedef1->getIdentifier(),
Typedef2->getIdentifier()) ||
!::IsStructurallyEquivalent(*this, Typedef1->getUnderlyingType(),
@@ -1320,46 +1553,75 @@ bool StructuralEquivalenceContext::Finish() {
// Typedef/non-typedef mismatch.
Equivalent = false;
}
- } else if (ClassTemplateDecl *ClassTemplate1 =
- dyn_cast<ClassTemplateDecl>(D1)) {
- if (ClassTemplateDecl *ClassTemplate2 = dyn_cast<ClassTemplateDecl>(D2)) {
- if (!::IsStructurallyEquivalent(ClassTemplate1->getIdentifier(),
- ClassTemplate2->getIdentifier()) ||
- !::IsStructurallyEquivalent(*this, ClassTemplate1, ClassTemplate2))
+ } else if (auto *ClassTemplate1 = dyn_cast<ClassTemplateDecl>(D1)) {
+ if (auto *ClassTemplate2 = dyn_cast<ClassTemplateDecl>(D2)) {
+ if (!::IsStructurallyEquivalent(*this, ClassTemplate1,
+ ClassTemplate2))
+ Equivalent = false;
+ } else {
+ // Class template/non-class-template mismatch.
+ Equivalent = false;
+ }
+ } else if (auto *FunctionTemplate1 = dyn_cast<FunctionTemplateDecl>(D1)) {
+ if (auto *FunctionTemplate2 = dyn_cast<FunctionTemplateDecl>(D2)) {
+ if (!::IsStructurallyEquivalent(*this, FunctionTemplate1,
+ FunctionTemplate2))
Equivalent = false;
} else {
// Class template/non-class-template mismatch.
Equivalent = false;
}
- } else if (TemplateTypeParmDecl *TTP1 =
- dyn_cast<TemplateTypeParmDecl>(D1)) {
- if (TemplateTypeParmDecl *TTP2 = dyn_cast<TemplateTypeParmDecl>(D2)) {
+ } else if (auto *TTP1 = dyn_cast<TemplateTypeParmDecl>(D1)) {
+ if (auto *TTP2 = dyn_cast<TemplateTypeParmDecl>(D2)) {
if (!::IsStructurallyEquivalent(*this, TTP1, TTP2))
Equivalent = false;
} else {
// Kind mismatch.
Equivalent = false;
}
- } else if (NonTypeTemplateParmDecl *NTTP1 =
- dyn_cast<NonTypeTemplateParmDecl>(D1)) {
- if (NonTypeTemplateParmDecl *NTTP2 =
- dyn_cast<NonTypeTemplateParmDecl>(D2)) {
+ } else if (auto *NTTP1 = dyn_cast<NonTypeTemplateParmDecl>(D1)) {
+ if (auto *NTTP2 = dyn_cast<NonTypeTemplateParmDecl>(D2)) {
if (!::IsStructurallyEquivalent(*this, NTTP1, NTTP2))
Equivalent = false;
} else {
// Kind mismatch.
Equivalent = false;
}
- } else if (TemplateTemplateParmDecl *TTP1 =
- dyn_cast<TemplateTemplateParmDecl>(D1)) {
- if (TemplateTemplateParmDecl *TTP2 =
- dyn_cast<TemplateTemplateParmDecl>(D2)) {
+ } else if (auto *TTP1 = dyn_cast<TemplateTemplateParmDecl>(D1)) {
+ if (auto *TTP2 = dyn_cast<TemplateTemplateParmDecl>(D2)) {
if (!::IsStructurallyEquivalent(*this, TTP1, TTP2))
Equivalent = false;
} else {
// Kind mismatch.
Equivalent = false;
}
+ } else if (auto *MD1 = dyn_cast<CXXMethodDecl>(D1)) {
+ if (auto *MD2 = dyn_cast<CXXMethodDecl>(D2)) {
+ if (!::IsStructurallyEquivalent(*this, MD1, MD2))
+ Equivalent = false;
+ } else {
+ // Kind mismatch.
+ Equivalent = false;
+ }
+ } else if (FunctionDecl *FD1 = dyn_cast<FunctionDecl>(D1)) {
+ if (FunctionDecl *FD2 = dyn_cast<FunctionDecl>(D2)) {
+ if (!::IsStructurallyEquivalent(FD1->getIdentifier(),
+ FD2->getIdentifier()))
+ Equivalent = false;
+ if (!::IsStructurallyEquivalent(*this, FD1, FD2))
+ Equivalent = false;
+ } else {
+ // Kind mismatch.
+ Equivalent = false;
+ }
+ } else if (FriendDecl *FrD1 = dyn_cast<FriendDecl>(D1)) {
+ if (FriendDecl *FrD2 = dyn_cast<FriendDecl>(D2)) {
+ if (!::IsStructurallyEquivalent(*this, FrD1, FrD2))
+ Equivalent = false;
+ } else {
+ // Kind mismatch.
+ Equivalent = false;
+ }
}
if (!Equivalent) {
@@ -1374,4 +1636,3 @@ bool StructuralEquivalenceContext::Finish() {
return false;
}
-} // namespace clang
diff --git a/lib/AST/CMakeLists.txt b/lib/AST/CMakeLists.txt
index a6f1027856c7..4f868a3af59e 100644
--- a/lib/AST/CMakeLists.txt
+++ b/lib/AST/CMakeLists.txt
@@ -20,6 +20,7 @@ add_clang_library(clangAST
CommentLexer.cpp
CommentParser.cpp
CommentSema.cpp
+ ComparisonCategories.cpp
DataCollection.cpp
Decl.cpp
DeclarationName.cpp
diff --git a/lib/AST/CXXInheritance.cpp b/lib/AST/CXXInheritance.cpp
index 24e96ba38015..2825329775ed 100644
--- a/lib/AST/CXXInheritance.cpp
+++ b/lib/AST/CXXInheritance.cpp
@@ -34,13 +34,13 @@
using namespace clang;
-/// \brief Computes the set of declarations referenced by these base
+/// Computes the set of declarations referenced by these base
/// paths.
void CXXBasePaths::ComputeDeclsFound() {
assert(NumDeclsFound == 0 && !DeclsFound &&
"Already computed the set of declarations");
- llvm::SetVector<NamedDecl *, SmallVector<NamedDecl *, 8>> Decls;
+ llvm::SmallSetVector<NamedDecl *, 8> Decls;
for (paths_iterator Path = begin(), PathEnd = end(); Path != PathEnd; ++Path)
Decls.insert(Path->Decls.front());
@@ -63,8 +63,8 @@ CXXBasePaths::decl_range CXXBasePaths::found_decls() {
/// an unqualified, canonical class type.
bool CXXBasePaths::isAmbiguous(CanQualType BaseType) {
BaseType = BaseType.getUnqualifiedType();
- std::pair<bool, unsigned>& Subobjects = ClassSubobjects[BaseType];
- return Subobjects.second + (Subobjects.first? 1 : 0) > 1;
+ IsVirtBaseAndNumberNonVirtBases Subobjects = ClassSubobjects[BaseType];
+ return Subobjects.NumberOfNonVirtBases + (Subobjects.IsVirtBase ? 1 : 0) > 1;
}
/// clear - Clear out all prior path information.
@@ -76,7 +76,7 @@ void CXXBasePaths::clear() {
DetectedVirtual = nullptr;
}
-/// @brief Swaps the contents of this CXXBasePaths structure with the
+/// Swaps the contents of this CXXBasePaths structure with the
/// contents of Other.
void CXXBasePaths::swap(CXXBasePaths &Other) {
std::swap(Origin, Other.Origin);
@@ -217,21 +217,21 @@ bool CXXBasePaths::lookupInBases(ASTContext &Context,
// Determine whether we need to visit this base class at all,
// updating the count of subobjects appropriately.
- std::pair<bool, unsigned>& Subobjects = ClassSubobjects[BaseType];
+ IsVirtBaseAndNumberNonVirtBases &Subobjects = ClassSubobjects[BaseType];
bool VisitBase = true;
bool SetVirtual = false;
if (BaseSpec.isVirtual()) {
- VisitBase = !Subobjects.first;
- Subobjects.first = true;
+ VisitBase = !Subobjects.IsVirtBase;
+ Subobjects.IsVirtBase = true;
if (isDetectingVirtual() && DetectedVirtual == nullptr) {
// If this is the first virtual we find, remember it. If it turns out
// there is no base path here, we'll reset it later.
DetectedVirtual = BaseType->getAs<RecordType>();
SetVirtual = true;
}
- } else
- ++Subobjects.second;
-
+ } else {
+ ++Subobjects.NumberOfNonVirtBases;
+ }
if (isRecordingPaths()) {
// Add this base specifier to the current path.
CXXBasePathElement Element;
@@ -240,7 +240,7 @@ bool CXXBasePaths::lookupInBases(ASTContext &Context,
if (BaseSpec.isVirtual())
Element.SubobjectNumber = 0;
else
- Element.SubobjectNumber = Subobjects.second;
+ Element.SubobjectNumber = Subobjects.NumberOfNonVirtBases;
ScratchPath.push_back(Element);
// Calculate the "top-down" access to this base class.
@@ -567,11 +567,11 @@ void OverridingMethods::replaceAll(UniqueVirtualMethod Overriding) {
namespace {
class FinalOverriderCollector {
- /// \brief The number of subobjects of a given class type that
+ /// The number of subobjects of a given class type that
/// occur within the class hierarchy.
llvm::DenseMap<const CXXRecordDecl *, unsigned> SubobjectCount;
- /// \brief Overriders for each virtual base subobject.
+ /// Overriders for each virtual base subobject.
llvm::DenseMap<const CXXRecordDecl *, CXXFinalOverriderMap *> VirtualOverriders;
CXXFinalOverriderMap FinalOverriders;
@@ -637,8 +637,7 @@ void FinalOverriderCollector::Collect(const CXXRecordDecl *RD,
OMEnd = BaseOverriders->end();
OM != OMEnd;
++OM) {
- const CXXMethodDecl *CanonOM
- = cast<CXXMethodDecl>(OM->first->getCanonicalDecl());
+ const CXXMethodDecl *CanonOM = OM->first->getCanonicalDecl();
Overriders[CanonOM].add(OM->second);
}
}
@@ -649,7 +648,7 @@ void FinalOverriderCollector::Collect(const CXXRecordDecl *RD,
if (!M->isVirtual())
continue;
- CXXMethodDecl *CanonM = cast<CXXMethodDecl>(M->getCanonicalDecl());
+ CXXMethodDecl *CanonM = M->getCanonicalDecl();
using OverriddenMethodsRange =
llvm::iterator_range<CXXMethodDecl::method_iterator>;
OverriddenMethodsRange OverriddenMethods = CanonM->overridden_methods();
diff --git a/lib/AST/CommentBriefParser.cpp b/lib/AST/CommentBriefParser.cpp
index eecea8fc11df..5ec7586a475d 100644
--- a/lib/AST/CommentBriefParser.cpp
+++ b/lib/AST/CommentBriefParser.cpp
@@ -122,8 +122,8 @@ std::string BriefParser::Parse() {
if (Tok.is(tok::newline)) {
ConsumeToken();
// We found a paragraph end. This ends the brief description if
- // \\brief command or its equivalent was explicitly used.
- // Stop scanning text because an explicit \\brief paragraph is the
+ // \command or its equivalent was explicitly used.
+ // Stop scanning text because an explicit \paragraph is the
// preffered one.
if (InBrief)
break;
diff --git a/lib/AST/CommentLexer.cpp b/lib/AST/CommentLexer.cpp
index 65d0f56f09ab..6ff4d45a9572 100644
--- a/lib/AST/CommentLexer.cpp
+++ b/lib/AST/CommentLexer.cpp
@@ -294,6 +294,39 @@ void Lexer::lexCommentText(Token &T) {
assert(CommentState == LCS_InsideBCPLComment ||
CommentState == LCS_InsideCComment);
+ // Handles lexing non-command text, i.e. text and newline.
+ auto HandleNonCommandToken = [&]() -> void {
+ assert(State == LS_Normal);
+
+ const char *TokenPtr = BufferPtr;
+ assert(TokenPtr < CommentEnd);
+ switch (*TokenPtr) {
+ case '\n':
+ case '\r':
+ TokenPtr = skipNewline(TokenPtr, CommentEnd);
+ formTokenWithChars(T, TokenPtr, tok::newline);
+
+ if (CommentState == LCS_InsideCComment)
+ skipLineStartingDecorations();
+ return;
+
+ default: {
+ StringRef TokStartSymbols = ParseCommands ? "\n\r\\@&<" : "\n\r";
+ size_t End = StringRef(TokenPtr, CommentEnd - TokenPtr)
+ .find_first_of(TokStartSymbols);
+ if (End != StringRef::npos)
+ TokenPtr += End;
+ else
+ TokenPtr = CommentEnd;
+ formTextToken(T, TokenPtr);
+ return;
+ }
+ }
+ };
+
+ if (!ParseCommands)
+ return HandleNonCommandToken();
+
switch (State) {
case LS_Normal:
break;
@@ -315,136 +348,116 @@ void Lexer::lexCommentText(Token &T) {
}
assert(State == LS_Normal);
-
const char *TokenPtr = BufferPtr;
assert(TokenPtr < CommentEnd);
- while (TokenPtr != CommentEnd) {
- switch(*TokenPtr) {
- case '\\':
- case '@': {
- // Commands that start with a backslash and commands that start with
- // 'at' have equivalent semantics. But we keep information about the
- // exact syntax in AST for comments.
- tok::TokenKind CommandKind =
- (*TokenPtr == '@') ? tok::at_command : tok::backslash_command;
+ switch(*TokenPtr) {
+ case '\\':
+ case '@': {
+ // Commands that start with a backslash and commands that start with
+ // 'at' have equivalent semantics. But we keep information about the
+ // exact syntax in AST for comments.
+ tok::TokenKind CommandKind =
+ (*TokenPtr == '@') ? tok::at_command : tok::backslash_command;
+ TokenPtr++;
+ if (TokenPtr == CommentEnd) {
+ formTextToken(T, TokenPtr);
+ return;
+ }
+ char C = *TokenPtr;
+ switch (C) {
+ default:
+ break;
+
+ case '\\': case '@': case '&': case '$':
+ case '#': case '<': case '>': case '%':
+ case '\"': case '.': case ':':
+ // This is one of \\ \@ \& \$ etc escape sequences.
TokenPtr++;
- if (TokenPtr == CommentEnd) {
- formTextToken(T, TokenPtr);
- return;
- }
- char C = *TokenPtr;
- switch (C) {
- default:
- break;
-
- case '\\': case '@': case '&': case '$':
- case '#': case '<': case '>': case '%':
- case '\"': case '.': case ':':
- // This is one of \\ \@ \& \$ etc escape sequences.
+ if (C == ':' && TokenPtr != CommentEnd && *TokenPtr == ':') {
+ // This is the \:: escape sequence.
TokenPtr++;
- if (C == ':' && TokenPtr != CommentEnd && *TokenPtr == ':') {
- // This is the \:: escape sequence.
- TokenPtr++;
- }
- StringRef UnescapedText(BufferPtr + 1, TokenPtr - (BufferPtr + 1));
- formTokenWithChars(T, TokenPtr, tok::text);
- T.setText(UnescapedText);
- return;
}
+ StringRef UnescapedText(BufferPtr + 1, TokenPtr - (BufferPtr + 1));
+ formTokenWithChars(T, TokenPtr, tok::text);
+ T.setText(UnescapedText);
+ return;
+ }
- // Don't make zero-length commands.
- if (!isCommandNameStartCharacter(*TokenPtr)) {
- formTextToken(T, TokenPtr);
- return;
- }
+ // Don't make zero-length commands.
+ if (!isCommandNameStartCharacter(*TokenPtr)) {
+ formTextToken(T, TokenPtr);
+ return;
+ }
- TokenPtr = skipCommandName(TokenPtr, CommentEnd);
- unsigned Length = TokenPtr - (BufferPtr + 1);
-
- // Hardcoded support for lexing LaTeX formula commands
- // \f$ \f[ \f] \f{ \f} as a single command.
- if (Length == 1 && TokenPtr[-1] == 'f' && TokenPtr != CommentEnd) {
- C = *TokenPtr;
- if (C == '$' || C == '[' || C == ']' || C == '{' || C == '}') {
- TokenPtr++;
- Length++;
- }
- }
+ TokenPtr = skipCommandName(TokenPtr, CommentEnd);
+ unsigned Length = TokenPtr - (BufferPtr + 1);
- StringRef CommandName(BufferPtr + 1, Length);
-
- const CommandInfo *Info = Traits.getCommandInfoOrNULL(CommandName);
- if (!Info) {
- if ((Info = Traits.getTypoCorrectCommandInfo(CommandName))) {
- StringRef CorrectedName = Info->Name;
- SourceLocation Loc = getSourceLocation(BufferPtr);
- SourceLocation EndLoc = getSourceLocation(TokenPtr);
- SourceRange FullRange = SourceRange(Loc, EndLoc);
- SourceRange CommandRange(Loc.getLocWithOffset(1), EndLoc);
- Diag(Loc, diag::warn_correct_comment_command_name)
- << FullRange << CommandName << CorrectedName
- << FixItHint::CreateReplacement(CommandRange, CorrectedName);
- } else {
- formTokenWithChars(T, TokenPtr, tok::unknown_command);
- T.setUnknownCommandName(CommandName);
- Diag(T.getLocation(), diag::warn_unknown_comment_command_name)
- << SourceRange(T.getLocation(), T.getEndLocation());
- return;
- }
- }
- if (Info->IsVerbatimBlockCommand) {
- setupAndLexVerbatimBlock(T, TokenPtr, *BufferPtr, Info);
- return;
- }
- if (Info->IsVerbatimLineCommand) {
- setupAndLexVerbatimLine(T, TokenPtr, Info);
- return;
+ // Hardcoded support for lexing LaTeX formula commands
+ // \f$ \f[ \f] \f{ \f} as a single command.
+ if (Length == 1 && TokenPtr[-1] == 'f' && TokenPtr != CommentEnd) {
+ C = *TokenPtr;
+ if (C == '$' || C == '[' || C == ']' || C == '{' || C == '}') {
+ TokenPtr++;
+ Length++;
}
- formTokenWithChars(T, TokenPtr, CommandKind);
- T.setCommandID(Info->getID());
- return;
}
- case '&':
- lexHTMLCharacterReference(T);
- return;
-
- case '<': {
- TokenPtr++;
- if (TokenPtr == CommentEnd) {
- formTextToken(T, TokenPtr);
+ StringRef CommandName(BufferPtr + 1, Length);
+
+ const CommandInfo *Info = Traits.getCommandInfoOrNULL(CommandName);
+ if (!Info) {
+ if ((Info = Traits.getTypoCorrectCommandInfo(CommandName))) {
+ StringRef CorrectedName = Info->Name;
+ SourceLocation Loc = getSourceLocation(BufferPtr);
+ SourceLocation EndLoc = getSourceLocation(TokenPtr);
+ SourceRange FullRange = SourceRange(Loc, EndLoc);
+ SourceRange CommandRange(Loc.getLocWithOffset(1), EndLoc);
+ Diag(Loc, diag::warn_correct_comment_command_name)
+ << FullRange << CommandName << CorrectedName
+ << FixItHint::CreateReplacement(CommandRange, CorrectedName);
+ } else {
+ formTokenWithChars(T, TokenPtr, tok::unknown_command);
+ T.setUnknownCommandName(CommandName);
+ Diag(T.getLocation(), diag::warn_unknown_comment_command_name)
+ << SourceRange(T.getLocation(), T.getEndLocation());
return;
}
- const char C = *TokenPtr;
- if (isHTMLIdentifierStartingCharacter(C))
- setupAndLexHTMLStartTag(T);
- else if (C == '/')
- setupAndLexHTMLEndTag(T);
- else
- formTextToken(T, TokenPtr);
+ }
+ if (Info->IsVerbatimBlockCommand) {
+ setupAndLexVerbatimBlock(T, TokenPtr, *BufferPtr, Info);
return;
}
-
- case '\n':
- case '\r':
- TokenPtr = skipNewline(TokenPtr, CommentEnd);
- formTokenWithChars(T, TokenPtr, tok::newline);
-
- if (CommentState == LCS_InsideCComment)
- skipLineStartingDecorations();
+ if (Info->IsVerbatimLineCommand) {
+ setupAndLexVerbatimLine(T, TokenPtr, Info);
return;
+ }
+ formTokenWithChars(T, TokenPtr, CommandKind);
+ T.setCommandID(Info->getID());
+ return;
+ }
- default: {
- size_t End = StringRef(TokenPtr, CommentEnd - TokenPtr).
- find_first_of("\n\r\\@&<");
- if (End != StringRef::npos)
- TokenPtr += End;
- else
- TokenPtr = CommentEnd;
+ case '&':
+ lexHTMLCharacterReference(T);
+ return;
+
+ case '<': {
+ TokenPtr++;
+ if (TokenPtr == CommentEnd) {
formTextToken(T, TokenPtr);
return;
}
+ const char C = *TokenPtr;
+ if (isHTMLIdentifierStartingCharacter(C))
+ setupAndLexHTMLStartTag(T);
+ else if (C == '/')
+ setupAndLexHTMLEndTag(T);
+ else
+ formTextToken(T, TokenPtr);
+ return;
}
+
+ default:
+ return HandleNonCommandToken();
}
}
@@ -727,14 +740,13 @@ void Lexer::lexHTMLEndTag(Token &T) {
}
Lexer::Lexer(llvm::BumpPtrAllocator &Allocator, DiagnosticsEngine &Diags,
- const CommandTraits &Traits,
- SourceLocation FileLoc,
- const char *BufferStart, const char *BufferEnd):
- Allocator(Allocator), Diags(Diags), Traits(Traits),
- BufferStart(BufferStart), BufferEnd(BufferEnd),
- FileLoc(FileLoc), BufferPtr(BufferStart),
- CommentState(LCS_BeforeComment), State(LS_Normal) {
-}
+ const CommandTraits &Traits, SourceLocation FileLoc,
+ const char *BufferStart, const char *BufferEnd,
+ bool ParseCommands)
+ : Allocator(Allocator), Diags(Diags), Traits(Traits),
+ BufferStart(BufferStart), BufferEnd(BufferEnd), FileLoc(FileLoc),
+ BufferPtr(BufferStart), CommentState(LCS_BeforeComment), State(LS_Normal),
+ ParseCommands(ParseCommands) {}
void Lexer::lex(Token &T) {
again:
diff --git a/lib/AST/CommentSema.cpp b/lib/AST/CommentSema.cpp
index 6c2019e1a72b..4bc98bf10765 100644
--- a/lib/AST/CommentSema.cpp
+++ b/lib/AST/CommentSema.cpp
@@ -215,7 +215,7 @@ void Sema::checkContainerDecl(const BlockCommandComment *Comment) {
<< Comment->getSourceRange();
}
-/// \brief Turn a string into the corresponding PassDirection or -1 if it's not
+/// Turn a string into the corresponding PassDirection or -1 if it's not
/// valid.
static int getParamPassDirection(StringRef Arg) {
return llvm::StringSwitch<int>(Arg)
diff --git a/lib/AST/ComparisonCategories.cpp b/lib/AST/ComparisonCategories.cpp
new file mode 100644
index 000000000000..87f51facc59b
--- /dev/null
+++ b/lib/AST/ComparisonCategories.cpp
@@ -0,0 +1,211 @@
+//===- ComparisonCategories.cpp - Three Way Comparison Data -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the Comparison Category enum and data types, which
+// store the types and expressions needed to support operator<=>
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/ComparisonCategories.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/Type.h"
+#include "llvm/ADT/SmallVector.h"
+
+using namespace clang;
+
+bool ComparisonCategoryInfo::ValueInfo::hasValidIntValue() const {
+ assert(VD && "must have var decl");
+ if (!VD->checkInitIsICE())
+ return false;
+
+ // Before we attempt to get the value of the first field, ensure that we
+ // actually have one (and only one) field.
+ auto *Record = VD->getType()->getAsCXXRecordDecl();
+ if (std::distance(Record->field_begin(), Record->field_end()) != 1 ||
+ !Record->field_begin()->getType()->isIntegralOrEnumerationType())
+ return false;
+
+ return true;
+}
+
+/// Attempt to determine the integer value used to represent the comparison
+/// category result by evaluating the initializer for the specified VarDecl as
+/// a constant expression and retreiving the value of the class's first
+/// (and only) field.
+///
+/// Note: The STL types are expected to have the form:
+/// struct X { T value; };
+/// where T is an integral or enumeration type.
+llvm::APSInt ComparisonCategoryInfo::ValueInfo::getIntValue() const {
+ assert(hasValidIntValue() && "must have a valid value");
+ return VD->evaluateValue()->getStructField(0).getInt();
+}
+
+ComparisonCategoryInfo::ValueInfo *ComparisonCategoryInfo::lookupValueInfo(
+ ComparisonCategoryResult ValueKind) const {
+ // Check if we already have a cache entry for this value.
+ auto It = llvm::find_if(
+ Objects, [&](ValueInfo const &Info) { return Info.Kind == ValueKind; });
+ if (It != Objects.end())
+ return &(*It);
+
+ // We don't have a cached result. Lookup the variable declaration and create
+ // a new entry representing it.
+ DeclContextLookupResult Lookup = Record->getCanonicalDecl()->lookup(
+ &Ctx.Idents.get(ComparisonCategories::getResultString(ValueKind)));
+ if (Lookup.size() != 1 || !isa<VarDecl>(Lookup.front()))
+ return nullptr;
+ Objects.emplace_back(ValueKind, cast<VarDecl>(Lookup.front()));
+ return &Objects.back();
+}
+
+static const NamespaceDecl *lookupStdNamespace(const ASTContext &Ctx,
+ NamespaceDecl *&StdNS) {
+ if (!StdNS) {
+ DeclContextLookupResult Lookup =
+ Ctx.getTranslationUnitDecl()->lookup(&Ctx.Idents.get("std"));
+ if (Lookup.size() == 1)
+ StdNS = dyn_cast<NamespaceDecl>(Lookup.front());
+ }
+ return StdNS;
+}
+
+static CXXRecordDecl *lookupCXXRecordDecl(const ASTContext &Ctx,
+ const NamespaceDecl *StdNS,
+ ComparisonCategoryType Kind) {
+ StringRef Name = ComparisonCategories::getCategoryString(Kind);
+ DeclContextLookupResult Lookup = StdNS->lookup(&Ctx.Idents.get(Name));
+ if (Lookup.size() == 1)
+ if (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(Lookup.front()))
+ return RD;
+ return nullptr;
+}
+
+const ComparisonCategoryInfo *
+ComparisonCategories::lookupInfo(ComparisonCategoryType Kind) const {
+ auto It = Data.find(static_cast<char>(Kind));
+ if (It != Data.end())
+ return &It->second;
+
+ if (const NamespaceDecl *NS = lookupStdNamespace(Ctx, StdNS))
+ if (CXXRecordDecl *RD = lookupCXXRecordDecl(Ctx, NS, Kind))
+ return &Data.try_emplace((char)Kind, Ctx, RD, Kind).first->second;
+
+ return nullptr;
+}
+
+const ComparisonCategoryInfo *
+ComparisonCategories::lookupInfoForType(QualType Ty) const {
+ assert(!Ty.isNull() && "type must be non-null");
+ using CCT = ComparisonCategoryType;
+ auto *RD = Ty->getAsCXXRecordDecl();
+ if (!RD)
+ return nullptr;
+
+ // Check to see if we have information for the specified type cached.
+ const auto *CanonRD = RD->getCanonicalDecl();
+ for (auto &KV : Data) {
+ const ComparisonCategoryInfo &Info = KV.second;
+ if (CanonRD == Info.Record->getCanonicalDecl())
+ return &Info;
+ }
+
+ if (!RD->getEnclosingNamespaceContext()->isStdNamespace())
+ return nullptr;
+
+ // If not, check to see if the decl names a type in namespace std with a name
+ // matching one of the comparison category types.
+ for (unsigned I = static_cast<unsigned>(CCT::First),
+ End = static_cast<unsigned>(CCT::Last);
+ I <= End; ++I) {
+ CCT Kind = static_cast<CCT>(I);
+
+ // We've found the comparison category type. Build a new cache entry for
+ // it.
+ if (getCategoryString(Kind) == RD->getName())
+ return &Data.try_emplace((char)Kind, Ctx, RD, Kind).first->second;
+ }
+
+ // We've found nothing. This isn't a comparison category type.
+ return nullptr;
+}
+
+const ComparisonCategoryInfo &ComparisonCategories::getInfoForType(QualType Ty) const {
+ const ComparisonCategoryInfo *Info = lookupInfoForType(Ty);
+ assert(Info && "info for comparison category not found");
+ return *Info;
+}
+
+QualType ComparisonCategoryInfo::getType() const {
+ assert(Record);
+ return QualType(Record->getTypeForDecl(), 0);
+}
+
+StringRef ComparisonCategories::getCategoryString(ComparisonCategoryType Kind) {
+ using CCKT = ComparisonCategoryType;
+ switch (Kind) {
+ case CCKT::WeakEquality:
+ return "weak_equality";
+ case CCKT::StrongEquality:
+ return "strong_equality";
+ case CCKT::PartialOrdering:
+ return "partial_ordering";
+ case CCKT::WeakOrdering:
+ return "weak_ordering";
+ case CCKT::StrongOrdering:
+ return "strong_ordering";
+ }
+ llvm_unreachable("unhandled cases in switch");
+}
+
+StringRef ComparisonCategories::getResultString(ComparisonCategoryResult Kind) {
+ using CCVT = ComparisonCategoryResult;
+ switch (Kind) {
+ case CCVT::Equal:
+ return "equal";
+ case CCVT::Nonequal:
+ return "nonequal";
+ case CCVT::Equivalent:
+ return "equivalent";
+ case CCVT::Nonequivalent:
+ return "nonequivalent";
+ case CCVT::Less:
+ return "less";
+ case CCVT::Greater:
+ return "greater";
+ case CCVT::Unordered:
+ return "unordered";
+ }
+ llvm_unreachable("unhandled case in switch");
+}
+
+std::vector<ComparisonCategoryResult>
+ComparisonCategories::getPossibleResultsForType(ComparisonCategoryType Type) {
+ using CCT = ComparisonCategoryType;
+ using CCR = ComparisonCategoryResult;
+ std::vector<CCR> Values;
+ Values.reserve(6);
+ Values.push_back(CCR::Equivalent);
+ bool IsStrong = (Type == CCT::StrongEquality || Type == CCT::StrongOrdering);
+ if (IsStrong)
+ Values.push_back(CCR::Equal);
+ if (Type == CCT::StrongOrdering || Type == CCT::WeakOrdering ||
+ Type == CCT::PartialOrdering) {
+ Values.push_back(CCR::Less);
+ Values.push_back(CCR::Greater);
+ } else {
+ Values.push_back(CCR::Nonequivalent);
+ if (IsStrong)
+ Values.push_back(CCR::Nonequal);
+ }
+ if (Type == CCT::PartialOrdering)
+ Values.push_back(CCR::Unordered);
+ return Values;
+}
diff --git a/lib/AST/Decl.cpp b/lib/AST/Decl.cpp
index 4c1d591b41e9..3b9b85a20af6 100644
--- a/lib/AST/Decl.cpp
+++ b/lib/AST/Decl.cpp
@@ -27,6 +27,7 @@
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/ODRHash.h"
+#include "clang/AST/PrettyDeclStackTrace.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/Redeclarable.h"
#include "clang/AST/Stmt.h"
@@ -76,6 +77,24 @@ Decl *clang::getPrimaryMergedDecl(Decl *D) {
return D->getASTContext().getPrimaryMergedDecl(D);
}
+void PrettyDeclStackTraceEntry::print(raw_ostream &OS) const {
+ SourceLocation Loc = this->Loc;
+ if (!Loc.isValid() && TheDecl) Loc = TheDecl->getLocation();
+ if (Loc.isValid()) {
+ Loc.print(OS, Context.getSourceManager());
+ OS << ": ";
+ }
+ OS << Message;
+
+ if (auto *ND = dyn_cast_or_null<NamedDecl>(TheDecl)) {
+ OS << " '";
+ ND->getNameForDiagnostic(OS, Context.getPrintingPolicy(), true);
+ OS << "'";
+ }
+
+ OS << '\n';
+}
+
// Defined here so that it can be inlined into its direct callers.
bool Decl::isOutOfLine() const {
return !getLexicalDeclContext()->Equals(getDeclContext());
@@ -224,7 +243,7 @@ LinkageInfo LinkageComputer::getLVForType(const Type &T,
return getTypeLinkageAndVisibility(&T);
}
-/// \brief Get the most restrictive linkage for the types in the given
+/// Get the most restrictive linkage for the types in the given
/// template parameter list. For visibility purposes, template
/// parameters are part of the signature of a template.
LinkageInfo LinkageComputer::getLVForTemplateParameterList(
@@ -291,7 +310,7 @@ static const Decl *getOutermostFuncOrBlockContext(const Decl *D) {
return Ret;
}
-/// \brief Get the most restrictive linkage for the types and
+/// Get the most restrictive linkage for the types and
/// declarations in the given template argument list.
///
/// Note that we don't take an LVComputationKind because we always
@@ -312,12 +331,12 @@ LinkageComputer::getLVForTemplateArgumentList(ArrayRef<TemplateArgument> Args,
LV.merge(getLVForType(*Arg.getAsType(), computation));
continue;
- case TemplateArgument::Declaration:
- if (const auto *ND = dyn_cast<NamedDecl>(Arg.getAsDecl())) {
- assert(!usesTypeVisibility(ND));
- LV.merge(getLVForDecl(ND, computation));
- }
+ case TemplateArgument::Declaration: {
+ const NamedDecl *ND = Arg.getAsDecl();
+ assert(!usesTypeVisibility(ND));
+ LV.merge(getLVForDecl(ND, computation));
continue;
+ }
case TemplateArgument::NullPtr:
LV.merge(getTypeLinkageAndVisibility(Arg.getNullPtrType()));
@@ -779,7 +798,7 @@ LinkageComputer::getLVForNamespaceScopeDecl(const NamedDecl *D,
// unique-external linkage, it's not legally usable from outside
// this translation unit. However, we should use the C linkage
// rules instead for extern "C" declarations.
- if (Context.getLangOpts().CPlusPlus && !Function->isInExternCContext()) {
+ if (Context.getLangOpts().CPlusPlus && !isFirstInExternCContext(Function)) {
// Only look at the type-as-written. Otherwise, deducing the return type
// of a function could change its linkage.
QualType TypeAsWritten = Function->getType();
@@ -1073,9 +1092,18 @@ getExplicitVisibilityAux(const NamedDecl *ND,
// If there wasn't explicit visibility there, and this is a
// specialization of a class template, check for visibility
// on the pattern.
- if (const auto *spec = dyn_cast<ClassTemplateSpecializationDecl>(ND))
- return getVisibilityOf(spec->getSpecializedTemplate()->getTemplatedDecl(),
- kind);
+ if (const auto *spec = dyn_cast<ClassTemplateSpecializationDecl>(ND)) {
+ // Walk all the template decl till this point to see if there are
+ // explicit visibility attributes.
+ const auto *TD = spec->getSpecializedTemplate()->getTemplatedDecl();
+ while (TD != nullptr) {
+ auto Vis = getVisibilityOf(TD, kind);
+ if (Vis != None)
+ return Vis;
+ TD = TD->getPreviousDecl();
+ }
+ return None;
+ }
// Use the most recent declaration.
if (!IsMostRecent && !isa<NamespaceDecl>(ND)) {
@@ -1165,7 +1193,7 @@ LinkageInfo LinkageComputer::getLVForLocalDecl(const NamedDecl *D,
LVComputationKind computation) {
if (const auto *Function = dyn_cast<FunctionDecl>(D)) {
if (Function->isInAnonymousNamespace() &&
- !Function->isInExternCContext())
+ !isFirstInExternCContext(Function))
return getInternalLinkageFor(Function);
// This is a "void f();" which got merged with a file static.
@@ -1188,7 +1216,7 @@ LinkageInfo LinkageComputer::getLVForLocalDecl(const NamedDecl *D,
if (const auto *Var = dyn_cast<VarDecl>(D)) {
if (Var->hasExternalStorage()) {
- if (Var->isInAnonymousNamespace() && !Var->isInExternCContext())
+ if (Var->isInAnonymousNamespace() && !isFirstInExternCContext(Var))
return getInternalLinkageFor(Var);
LinkageInfo LV;
@@ -1497,9 +1525,10 @@ void NamedDecl::printQualifiedName(raw_ostream &OS,
using ContextsTy = SmallVector<const DeclContext *, 8>;
ContextsTy Contexts;
- // Collect contexts.
- while (Ctx && isa<NamedDecl>(Ctx)) {
- Contexts.push_back(Ctx);
+ // Collect named contexts.
+ while (Ctx) {
+ if (isa<NamedDecl>(Ctx))
+ Contexts.push_back(Ctx);
Ctx = Ctx->getParent();
}
@@ -2403,6 +2432,23 @@ void VarDecl::setDescribedVarTemplate(VarTemplateDecl *Template) {
getASTContext().setTemplateOrSpecializationInfo(this, Template);
}
+bool VarDecl::isKnownToBeDefined() const {
+ const auto &LangOpts = getASTContext().getLangOpts();
+ // In CUDA mode without relocatable device code, variables of form 'extern
+ // __shared__ Foo foo[]' are pointers to the base of the GPU core's shared
+ // memory pool. These are never undefined variables, even if they appear
+ // inside of an anon namespace or static function.
+ //
+ // With CUDA relocatable device code enabled, these variables don't get
+ // special handling; they're treated like regular extern variables.
+ if (LangOpts.CUDA && !LangOpts.CUDARelocatableDeviceCode &&
+ hasExternalStorage() && hasAttr<CUDASharedAttr>() &&
+ isa<IncompleteArrayType>(getType()))
+ return true;
+
+ return hasDefinition();
+}
+
MemberSpecializationInfo *VarDecl::getMemberSpecializationInfo() const {
if (isStaticDataMember())
// FIXME: Remove ?
@@ -2827,6 +2873,14 @@ bool FunctionDecl::isNoReturn() const {
return false;
}
+bool FunctionDecl::isCPUDispatchMultiVersion() const {
+ return isMultiVersion() && hasAttr<CPUDispatchAttr>();
+}
+
+bool FunctionDecl::isCPUSpecificMultiVersion() const {
+ return isMultiVersion() && hasAttr<CPUSpecificAttr>();
+}
+
void
FunctionDecl::setPreviousDeclaration(FunctionDecl *PrevDecl) {
redeclarable_base::setPreviousDecl(PrevDecl);
@@ -2844,7 +2898,7 @@ FunctionDecl::setPreviousDeclaration(FunctionDecl *PrevDecl) {
FunctionDecl *FunctionDecl::getCanonicalDecl() { return getFirstDecl(); }
-/// \brief Returns a value indicating whether this function
+/// Returns a value indicating whether this function
/// corresponds to a builtin function.
///
/// The function corresponds to a built-in function if it is
@@ -2901,6 +2955,13 @@ unsigned FunctionDecl::getBuiltinID() const {
Context.BuiltinInfo.isPredefinedLibFunction(BuiltinID))
return 0;
+ // CUDA does not have device-side standard library. printf and malloc are the
+ // only special cases that are supported by device-side runtime.
+ if (Context.getLangOpts().CUDA && hasAttr<CUDADeviceAttr>() &&
+ !hasAttr<CUDAHostAttr>() &&
+ !(BuiltinID == Builtin::BIprintf || BuiltinID == Builtin::BImalloc))
+ return 0;
+
return BuiltinID;
}
@@ -2939,7 +3000,7 @@ unsigned FunctionDecl::getMinRequiredArguments() const {
return NumRequiredArgs;
}
-/// \brief The combination of the extern and inline keywords under MSVC forces
+/// The combination of the extern and inline keywords under MSVC forces
/// the function to be required.
///
/// Note: This function assumes that we will only get called when isInlined()
@@ -2988,7 +3049,7 @@ static bool RedeclForcesDefC99(const FunctionDecl *Redecl) {
return false;
}
-/// \brief For a function declaration in C or C++, determine whether this
+/// For a function declaration in C or C++, determine whether this
/// declaration causes the definition to be externally visible.
///
/// For instance, this determines if adding the current declaration to the set
@@ -3103,7 +3164,7 @@ const Attr *FunctionDecl::getUnusedResultAttr() const {
return getAttr<WarnUnusedResultAttr>();
}
-/// \brief For an inline function definition in C, or for a gnu_inline function
+/// For an inline function definition in C, or for a gnu_inline function
/// in C++, determine whether the definition will be externally visible.
///
/// Inline function definitions are always available for inlining optimizations.
@@ -3605,16 +3666,19 @@ unsigned FunctionDecl::getMemoryFunctionKind() const {
return 0;
}
+unsigned FunctionDecl::getODRHash() const {
+ assert(HasODRHash);
+ return ODRHash;
+}
+
unsigned FunctionDecl::getODRHash() {
if (HasODRHash)
return ODRHash;
- if (FunctionDecl *Definition = getDefinition()) {
- if (Definition != this) {
- HasODRHash = true;
- ODRHash = Definition->getODRHash();
- return ODRHash;
- }
+ if (auto *FT = getInstantiatedFromMemberFunction()) {
+ HasODRHash = true;
+ ODRHash = FT->getODRHash();
+ return ODRHash;
}
class ODRHash Hash;
@@ -3658,6 +3722,11 @@ unsigned FieldDecl::getBitWidthValue(const ASTContext &Ctx) const {
return getBitWidth()->EvaluateKnownConstInt(Ctx).getZExtValue();
}
+bool FieldDecl::isZeroLengthBitField(const ASTContext &Ctx) const {
+ return isUnnamedBitfield() && !getBitWidth()->isValueDependent() &&
+ getBitWidthValue(Ctx) == 0;
+}
+
unsigned FieldDecl::getFieldIndex() const {
const FieldDecl *Canonical = getCanonicalDecl();
if (Canonical != this)
@@ -3904,6 +3973,17 @@ void EnumDecl::setInstantiationOfMemberEnum(ASTContext &C, EnumDecl *ED,
SpecializationInfo = new (C) MemberSpecializationInfo(ED, TSK);
}
+unsigned EnumDecl::getODRHash() {
+ if (HasODRHash)
+ return ODRHash;
+
+ class ODRHash Hash;
+ Hash.AddEnumDecl(this);
+ HasODRHash = true;
+ ODRHash = Hash.CalculateHash();
+ return ODRHash;
+}
+
//===----------------------------------------------------------------------===//
// RecordDecl Implementation
//===----------------------------------------------------------------------===//
@@ -3915,7 +3995,10 @@ RecordDecl::RecordDecl(Kind DK, TagKind TK, const ASTContext &C,
: TagDecl(DK, TK, C, DC, IdLoc, Id, PrevDecl, StartLoc),
HasFlexibleArrayMember(false), AnonymousStructOrUnion(false),
HasObjectMember(false), HasVolatileMember(false),
- LoadedFieldsFromExternalStorage(false) {
+ LoadedFieldsFromExternalStorage(false),
+ NonTrivialToPrimitiveDefaultInitialize(false),
+ NonTrivialToPrimitiveCopy(false), NonTrivialToPrimitiveDestroy(false),
+ ParamDestroyedInCallee(false), ArgPassingRestrictions(APK_CanPassInRegs) {
assert(classof(static_cast<Decl*>(this)) && "Invalid Kind!");
}
@@ -4365,9 +4448,7 @@ bool TypedefNameDecl::isTransparentTagSlow() const {
};
bool isTransparent = determineIsTransparent();
- CacheIsTransparentTag = 1;
- if (isTransparent)
- CacheIsTransparentTag |= 0x2;
+ MaybeModedTInfo.setInt((isTransparent << 1) | 1);
return isTransparent;
}
@@ -4433,7 +4514,7 @@ EmptyDecl *EmptyDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
// ImportDecl Implementation
//===----------------------------------------------------------------------===//
-/// \brief Retrieve the number of module identifiers needed to name the given
+/// Retrieve the number of module identifiers needed to name the given
/// module.
static unsigned getNumModuleIdentifiers(Module *Mod) {
unsigned Result = 1;
diff --git a/lib/AST/DeclBase.cpp b/lib/AST/DeclBase.cpp
index 29ce7ae034b5..e3817c0abc38 100644
--- a/lib/AST/DeclBase.cpp
+++ b/lib/AST/DeclBase.cpp
@@ -34,7 +34,6 @@
#include "clang/Basic/PartialDiagnostic.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/TargetInfo.h"
-#include "clang/Basic/VersionTuple.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/SmallVector.h"
@@ -42,6 +41,7 @@
#include "llvm/Support/Casting.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/VersionTuple.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <cassert>
@@ -101,7 +101,7 @@ void *Decl::operator new(std::size_t Size, const ASTContext &Ctx,
// padding at the start if required.
size_t ExtraAlign =
llvm::OffsetToAlignment(sizeof(Module *), alignof(Decl));
- char *Buffer = reinterpret_cast<char *>(
+ auto *Buffer = reinterpret_cast<char *>(
::operator new(ExtraAlign + sizeof(Module *) + Size + Extra, Ctx));
Buffer += ExtraAlign;
auto *ParentModule =
@@ -145,8 +145,8 @@ void Decl::setInvalidDecl(bool Invalid) {
// Marking a DecompositionDecl as invalid implies all the child BindingDecl's
// are invalid too.
- if (DecompositionDecl *DD = dyn_cast<DecompositionDecl>(this)) {
- for (BindingDecl *Binding : DD->bindings()) {
+ if (auto *DD = dyn_cast<DecompositionDecl>(this)) {
+ for (auto *Binding : DD->bindings()) {
Binding->setInvalidDecl();
}
}
@@ -199,28 +199,26 @@ void Decl::add(Kind k) {
}
bool Decl::isTemplateParameterPack() const {
- if (const TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(this))
+ if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(this))
return TTP->isParameterPack();
- if (const NonTypeTemplateParmDecl *NTTP
- = dyn_cast<NonTypeTemplateParmDecl>(this))
+ if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(this))
return NTTP->isParameterPack();
- if (const TemplateTemplateParmDecl *TTP
- = dyn_cast<TemplateTemplateParmDecl>(this))
+ if (const auto *TTP = dyn_cast<TemplateTemplateParmDecl>(this))
return TTP->isParameterPack();
return false;
}
bool Decl::isParameterPack() const {
- if (const ParmVarDecl *Parm = dyn_cast<ParmVarDecl>(this))
+ if (const auto *Parm = dyn_cast<ParmVarDecl>(this))
return Parm->isParameterPack();
return isTemplateParameterPack();
}
FunctionDecl *Decl::getAsFunction() {
- if (FunctionDecl *FD = dyn_cast<FunctionDecl>(this))
+ if (auto *FD = dyn_cast<FunctionDecl>(this))
return FD;
- if (const FunctionTemplateDecl *FTD = dyn_cast<FunctionTemplateDecl>(this))
+ if (const auto *FTD = dyn_cast<FunctionTemplateDecl>(this))
return FTD->getTemplatedDecl();
return nullptr;
}
@@ -236,10 +234,23 @@ TemplateDecl *Decl::getDescribedTemplate() const {
return RD->getDescribedClassTemplate();
else if (auto *VD = dyn_cast<VarDecl>(this))
return VD->getDescribedVarTemplate();
+ else if (auto *AD = dyn_cast<TypeAliasDecl>(this))
+ return AD->getDescribedAliasTemplate();
return nullptr;
}
+bool Decl::isTemplated() const {
+ // A declaration is dependent if it is a template or a template pattern, or
+ // is within (lexcially for a friend, semantically otherwise) a dependent
+ // context.
+ // FIXME: Should local extern declarations be treated like friends?
+ if (auto *AsDC = dyn_cast<DeclContext>(this))
+ return AsDC->isDependentContext();
+ auto *DC = getFriendObjectKind() ? getLexicalDeclContext() : getDeclContext();
+ return DC->isDependentContext() || isTemplateDecl() || getDescribedTemplate();
+}
+
const DeclContext *Decl::getParentFunctionOrMethod() const {
for (const DeclContext *DC = getDeclContext();
DC && !DC->isTranslationUnit() && !DC->isNamespace();
@@ -266,7 +277,7 @@ void PrettyStackTraceDecl::print(raw_ostream &OS) const {
OS << Message;
- if (const NamedDecl *DN = dyn_cast_or_null<NamedDecl>(TheDecl)) {
+ if (const auto *DN = dyn_cast_or_null<NamedDecl>(TheDecl)) {
OS << " '";
DN->printQualifiedName(OS);
OS << '\'';
@@ -314,7 +325,7 @@ void Decl::setDeclContextsImpl(DeclContext *SemaDC, DeclContext *LexicalDC,
if (SemaDC == LexicalDC) {
DeclCtx = SemaDC;
} else {
- Decl::MultipleDC *MDC = new (Ctx) Decl::MultipleDC();
+ auto *MDC = new (Ctx) Decl::MultipleDC();
MDC->SemanticDC = SemaDC;
MDC->LexicalDC = LexicalDC;
DeclCtx = MDC;
@@ -335,7 +346,7 @@ bool Decl::isLexicallyWithinFunctionOrMethod() const {
bool Decl::isInAnonymousNamespace() const {
for (const DeclContext *DC = getDeclContext(); DC; DC = DC->getParent()) {
- if (const NamespaceDecl *ND = dyn_cast<NamespaceDecl>(DC))
+ if (const auto *ND = dyn_cast<NamespaceDecl>(DC))
if (ND->isAnonymousNamespace())
return true;
}
@@ -348,7 +359,7 @@ bool Decl::isInStdNamespace() const {
}
TranslationUnitDecl *Decl::getTranslationUnitDecl() {
- if (TranslationUnitDecl *TUD = dyn_cast<TranslationUnitDecl>(this))
+ if (auto *TUD = dyn_cast<TranslationUnitDecl>(this))
return TUD;
DeclContext *DC = getDeclContext();
@@ -413,7 +424,7 @@ bool Decl::isReferenced() const {
return true;
// Check redeclarations.
- for (auto I : redecls())
+ for (const auto *I : redecls())
if (I->Referenced)
return true;
@@ -438,11 +449,11 @@ bool Decl::isExported() const {
ExternalSourceSymbolAttr *Decl::getExternalSourceSymbolAttr() const {
const Decl *Definition = nullptr;
- if (auto ID = dyn_cast<ObjCInterfaceDecl>(this)) {
+ if (auto *ID = dyn_cast<ObjCInterfaceDecl>(this)) {
Definition = ID->getDefinition();
- } else if (auto PD = dyn_cast<ObjCProtocolDecl>(this)) {
+ } else if (auto *PD = dyn_cast<ObjCProtocolDecl>(this)) {
Definition = PD->getDefinition();
- } else if (auto TD = dyn_cast<TagDecl>(this)) {
+ } else if (auto *TD = dyn_cast<TagDecl>(this)) {
Definition = TD->getDefinition();
}
if (!Definition)
@@ -462,9 +473,9 @@ bool Decl::hasDefiningAttr() const {
}
const Attr *Decl::getDefiningAttr() const {
- if (AliasAttr *AA = getAttr<AliasAttr>())
+ if (auto *AA = getAttr<AliasAttr>())
return AA;
- if (IFuncAttr *IFA = getAttr<IFuncAttr>())
+ if (auto *IFA = getAttr<IFuncAttr>())
return IFA;
return nullptr;
}
@@ -482,7 +493,7 @@ static StringRef getRealizedPlatform(const AvailabilityAttr *A,
return RealizedPlatform;
}
-/// \brief Determine the availability of the given declaration based on
+/// Determine the availability of the given declaration based on
/// the target platform.
///
/// When it returns an availability result other than \c AR_Available,
@@ -539,7 +550,6 @@ static AvailabilityResult CheckAvailability(ASTContext &Context,
Message->clear();
llvm::raw_string_ostream Out(*Message);
VersionTuple VTI(A->getIntroduced());
- VTI.UseDotAsSeparator();
Out << "introduced in " << PrettyPlatformName << ' '
<< VTI << HintMessage;
}
@@ -553,7 +563,6 @@ static AvailabilityResult CheckAvailability(ASTContext &Context,
Message->clear();
llvm::raw_string_ostream Out(*Message);
VersionTuple VTO(A->getObsoleted());
- VTO.UseDotAsSeparator();
Out << "obsoleted in " << PrettyPlatformName << ' '
<< VTO << HintMessage;
}
@@ -567,7 +576,6 @@ static AvailabilityResult CheckAvailability(ASTContext &Context,
Message->clear();
llvm::raw_string_ostream Out(*Message);
VersionTuple VTD(A->getDeprecated());
- VTD.UseDotAsSeparator();
Out << "first deprecated in " << PrettyPlatformName << ' '
<< VTD << HintMessage;
}
@@ -579,9 +587,11 @@ static AvailabilityResult CheckAvailability(ASTContext &Context,
}
AvailabilityResult Decl::getAvailability(std::string *Message,
- VersionTuple EnclosingVersion) const {
+ VersionTuple EnclosingVersion,
+ StringRef *RealizedPlatform) const {
if (auto *FTD = dyn_cast<FunctionTemplateDecl>(this))
- return FTD->getTemplatedDecl()->getAvailability(Message, EnclosingVersion);
+ return FTD->getTemplatedDecl()->getAvailability(Message, EnclosingVersion,
+ RealizedPlatform);
AvailabilityResult Result = AR_Available;
std::string ResultMessage;
@@ -608,8 +618,11 @@ AvailabilityResult Decl::getAvailability(std::string *Message,
AvailabilityResult AR = CheckAvailability(getASTContext(), Availability,
Message, EnclosingVersion);
- if (AR == AR_Unavailable)
+ if (AR == AR_Unavailable) {
+ if (RealizedPlatform)
+ *RealizedPlatform = Availability->getPlatform()->getName();
return AR_Unavailable;
+ }
if (AR > Result) {
Result = AR;
@@ -636,14 +649,14 @@ VersionTuple Decl::getVersionIntroduced() const {
return Availability->getIntroduced();
}
}
- return VersionTuple();
+ return {};
}
bool Decl::canBeWeakImported(bool &IsDefinition) const {
IsDefinition = false;
// Variables, if they aren't definitions.
- if (const VarDecl *Var = dyn_cast<VarDecl>(this)) {
+ if (const auto *Var = dyn_cast<VarDecl>(this)) {
if (Var->isThisDeclarationADefinition()) {
IsDefinition = true;
return false;
@@ -651,7 +664,7 @@ bool Decl::canBeWeakImported(bool &IsDefinition) const {
return true;
// Functions, if they aren't definitions.
- } else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(this)) {
+ } else if (const auto *FD = dyn_cast<FunctionDecl>(this)) {
if (FD->hasBody()) {
IsDefinition = true;
return false;
@@ -834,14 +847,14 @@ Decl *Decl::castFromDeclContext (const DeclContext *D) {
#define DECL(NAME, BASE)
#define DECL_CONTEXT(NAME) \
case Decl::NAME: \
- return static_cast<NAME##Decl*>(const_cast<DeclContext*>(D));
+ return static_cast<NAME##Decl *>(const_cast<DeclContext *>(D));
#define DECL_CONTEXT_BASE(NAME)
#include "clang/AST/DeclNodes.inc"
default:
#define DECL(NAME, BASE)
#define DECL_CONTEXT_BASE(NAME) \
if (DK >= first##NAME && DK <= last##NAME) \
- return static_cast<NAME##Decl*>(const_cast<DeclContext*>(D));
+ return static_cast<NAME##Decl *>(const_cast<DeclContext *>(D));
#include "clang/AST/DeclNodes.inc"
llvm_unreachable("a decl that inherits DeclContext isn't handled");
}
@@ -853,14 +866,14 @@ DeclContext *Decl::castToDeclContext(const Decl *D) {
#define DECL(NAME, BASE)
#define DECL_CONTEXT(NAME) \
case Decl::NAME: \
- return static_cast<NAME##Decl*>(const_cast<Decl*>(D));
+ return static_cast<NAME##Decl *>(const_cast<Decl *>(D));
#define DECL_CONTEXT_BASE(NAME)
#include "clang/AST/DeclNodes.inc"
default:
#define DECL(NAME, BASE)
#define DECL_CONTEXT_BASE(NAME) \
if (DK >= first##NAME && DK <= last##NAME) \
- return static_cast<NAME##Decl*>(const_cast<Decl*>(D));
+ return static_cast<NAME##Decl *>(const_cast<Decl *>(D));
#include "clang/AST/DeclNodes.inc"
llvm_unreachable("a decl that inherits DeclContext isn't handled");
}
@@ -869,17 +882,17 @@ DeclContext *Decl::castToDeclContext(const Decl *D) {
SourceLocation Decl::getBodyRBrace() const {
// Special handling of FunctionDecl to avoid de-serializing the body from PCH.
// FunctionDecl stores EndRangeLoc for this purpose.
- if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(this)) {
+ if (const auto *FD = dyn_cast<FunctionDecl>(this)) {
const FunctionDecl *Definition;
if (FD->hasBody(Definition))
return Definition->getSourceRange().getEnd();
- return SourceLocation();
+ return {};
}
if (Stmt *Body = getBody())
return Body->getSourceRange().getEnd();
- return SourceLocation();
+ return {};
}
bool Decl::AccessDeclContextSanity() const {
@@ -891,12 +904,14 @@ bool Decl::AccessDeclContextSanity() const {
// 4. the context is not a record
// 5. it's invalid
// 6. it's a C++0x static_assert.
+ // 7. it's a block literal declaration
if (isa<TranslationUnitDecl>(this) ||
isa<TemplateTypeParmDecl>(this) ||
isa<NonTypeTemplateParmDecl>(this) ||
!isa<CXXRecordDecl>(getDeclContext()) ||
isInvalidDecl() ||
isa<StaticAssertDecl>(this) ||
+ isa<BlockDecl>(this) ||
// FIXME: a ParmVarDecl can have ClassTemplateSpecialization
// as DeclContext (?).
isa<ParmVarDecl>(this) ||
@@ -917,9 +932,9 @@ static Decl::Kind getKind(const DeclContext *DC) { return DC->getDeclKind(); }
const FunctionType *Decl::getFunctionType(bool BlocksToo) const {
QualType Ty;
- if (const ValueDecl *D = dyn_cast<ValueDecl>(this))
+ if (const auto *D = dyn_cast<ValueDecl>(this))
Ty = D->getType();
- else if (const TypedefNameDecl *D = dyn_cast<TypedefNameDecl>(this))
+ else if (const auto *D = dyn_cast<TypedefNameDecl>(this))
Ty = D->getUnderlyingType();
else
return nullptr;
@@ -936,22 +951,21 @@ const FunctionType *Decl::getFunctionType(bool BlocksToo) const {
/// code context that is not a closure (a lambda, block, etc.).
template <class T> static Decl *getNonClosureContext(T *D) {
if (getKind(D) == Decl::CXXMethod) {
- CXXMethodDecl *MD = cast<CXXMethodDecl>(D);
+ auto *MD = cast<CXXMethodDecl>(D);
if (MD->getOverloadedOperator() == OO_Call &&
MD->getParent()->isLambda())
return getNonClosureContext(MD->getParent()->getParent());
return MD;
- } else if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ } else if (auto *FD = dyn_cast<FunctionDecl>(D))
return FD;
- } else if (ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) {
+ else if (auto *MD = dyn_cast<ObjCMethodDecl>(D))
return MD;
- } else if (BlockDecl *BD = dyn_cast<BlockDecl>(D)) {
+ else if (auto *BD = dyn_cast<BlockDecl>(D))
return getNonClosureContext(BD->getParent());
- } else if (CapturedDecl *CD = dyn_cast<CapturedDecl>(D)) {
+ else if (auto *CD = dyn_cast<CapturedDecl>(D))
return getNonClosureContext(CD->getParent());
- } else {
+ else
return nullptr;
- }
}
Decl *Decl::getNonClosureContext() {
@@ -986,7 +1000,7 @@ bool DeclContext::classof(const Decl *D) {
DeclContext::~DeclContext() = default;
-/// \brief Find the parent context of this context that will be
+/// Find the parent context of this context that will be
/// used for unqualified name lookup.
///
/// Generally, the parent lookup context is the semantic context. However, for
@@ -1011,7 +1025,7 @@ bool DeclContext::isStdNamespace() const {
if (!isNamespace())
return false;
- const NamespaceDecl *ND = cast<NamespaceDecl>(this);
+ const auto *ND = cast<NamespaceDecl>(this);
if (ND->isInline()) {
return ND->getParent()->isStdNamespace();
}
@@ -1030,7 +1044,7 @@ bool DeclContext::isDependentContext() const {
if (isa<ClassTemplatePartialSpecializationDecl>(this))
return true;
- if (const CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(this)) {
+ if (const auto *Record = dyn_cast<CXXRecordDecl>(this)) {
if (Record->getDescribedClassTemplate())
return true;
@@ -1038,7 +1052,7 @@ bool DeclContext::isDependentContext() const {
return true;
}
- if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(this)) {
+ if (const auto *Function = dyn_cast<FunctionDecl>(this)) {
if (Function->getDescribedFunctionTemplate())
return true;
@@ -1117,18 +1131,18 @@ DeclContext *DeclContext::getPrimaryContext() {
case Decl::Namespace:
// The original namespace is our primary context.
- return static_cast<NamespaceDecl*>(this)->getOriginalNamespace();
+ return static_cast<NamespaceDecl *>(this)->getOriginalNamespace();
case Decl::ObjCMethod:
return this;
case Decl::ObjCInterface:
- if (ObjCInterfaceDecl *Def = cast<ObjCInterfaceDecl>(this)->getDefinition())
+ if (auto *Def = cast<ObjCInterfaceDecl>(this)->getDefinition())
return Def;
return this;
case Decl::ObjCProtocol:
- if (ObjCProtocolDecl *Def = cast<ObjCProtocolDecl>(this)->getDefinition())
+ if (auto *Def = cast<ObjCProtocolDecl>(this)->getDefinition())
return Def;
return this;
@@ -1143,12 +1157,12 @@ DeclContext *DeclContext::getPrimaryContext() {
if (DeclKind >= Decl::firstTag && DeclKind <= Decl::lastTag) {
// If this is a tag type that has a definition or is currently
// being defined, that definition is our primary context.
- TagDecl *Tag = cast<TagDecl>(this);
+ auto *Tag = cast<TagDecl>(this);
if (TagDecl *Def = Tag->getDefinition())
return Def;
- if (const TagType *TagTy = dyn_cast<TagType>(Tag->getTypeForDecl())) {
+ if (const auto *TagTy = dyn_cast<TagType>(Tag->getTypeForDecl())) {
// Note, TagType::getDecl returns the (partial) definition one exists.
TagDecl *PossiblePartialDef = TagTy->getDecl();
if (PossiblePartialDef->isBeingDefined())
@@ -1175,7 +1189,7 @@ DeclContext::collectAllContexts(SmallVectorImpl<DeclContext *> &Contexts){
return;
}
- NamespaceDecl *Self = static_cast<NamespaceDecl *>(this);
+ auto *Self = static_cast<NamespaceDecl *>(this);
for (NamespaceDecl *N = Self->getMostRecentDecl(); N;
N = N->getPreviousDecl())
Contexts.push_back(N);
@@ -1184,16 +1198,15 @@ DeclContext::collectAllContexts(SmallVectorImpl<DeclContext *> &Contexts){
}
std::pair<Decl *, Decl *>
-DeclContext::BuildDeclChain(ArrayRef<Decl*> Decls,
+DeclContext::BuildDeclChain(ArrayRef<Decl *> Decls,
bool FieldsAlreadyLoaded) {
// Build up a chain of declarations via the Decl::NextInContextAndBits field.
Decl *FirstNewDecl = nullptr;
Decl *PrevDecl = nullptr;
- for (unsigned I = 0, N = Decls.size(); I != N; ++I) {
- if (FieldsAlreadyLoaded && isa<FieldDecl>(Decls[I]))
+ for (auto *D : Decls) {
+ if (FieldsAlreadyLoaded && isa<FieldDecl>(D))
continue;
- Decl *D = Decls[I];
if (PrevDecl)
PrevDecl->NextInContextAndBits.setPointer(D);
else
@@ -1205,7 +1218,7 @@ DeclContext::BuildDeclChain(ArrayRef<Decl*> Decls,
return std::make_pair(FirstNewDecl, PrevDecl);
}
-/// \brief We have just acquired external visible storage, and we already have
+/// We have just acquired external visible storage, and we already have
/// built a lookup map. For every name in the map, pull in the new names from
/// the external storage.
void DeclContext::reconcileExternalVisibleStorage() const {
@@ -1216,7 +1229,7 @@ void DeclContext::reconcileExternalVisibleStorage() const {
Lookup.second.setHasExternalDecls();
}
-/// \brief Load the declarations within this lexical storage from an
+/// Load the declarations within this lexical storage from an
/// external source.
/// \return \c true if any declarations were added.
bool
@@ -1238,7 +1251,7 @@ DeclContext::LoadLexicalDeclsFromExternalStorage() const {
// We may have already loaded just the fields of this record, in which case
// we need to ignore them.
bool FieldsAlreadyLoaded = false;
- if (const RecordDecl *RD = dyn_cast<RecordDecl>(this))
+ if (const auto *RD = dyn_cast<RecordDecl>(this))
FieldsAlreadyLoaded = RD->LoadedFieldsFromExternalStorage;
// Splice the newly-read declarations into the beginning of the list
@@ -1305,12 +1318,11 @@ ExternalASTSource::SetExternalVisibleDeclsForName(const DeclContext *DC,
}
} else {
// Convert the array to a StoredDeclsList.
- for (ArrayRef<NamedDecl*>::iterator
- I = Decls.begin(), E = Decls.end(); I != E; ++I) {
+ for (auto *D : Decls) {
if (List.isNull())
- List.setOnlyValue(*I);
+ List.setOnlyValue(D);
else
- List.AddSubsequentDecl(*I);
+ List.AddSubsequentDecl(D);
}
}
@@ -1335,6 +1347,38 @@ bool DeclContext::containsDecl(Decl *D) const {
(D->NextInContextAndBits.getPointer() || D == LastDecl));
}
+bool DeclContext::containsDeclAndLoad(Decl *D) const {
+ if (hasExternalLexicalStorage())
+ LoadLexicalDeclsFromExternalStorage();
+ return containsDecl(D);
+}
+
+/// shouldBeHidden - Determine whether a declaration which was declared
+/// within its semantic context should be invisible to qualified name lookup.
+static bool shouldBeHidden(NamedDecl *D) {
+ // Skip unnamed declarations.
+ if (!D->getDeclName())
+ return true;
+
+ // Skip entities that can't be found by name lookup into a particular
+ // context.
+ if ((D->getIdentifierNamespace() == 0 && !isa<UsingDirectiveDecl>(D)) ||
+ D->isTemplateParameter())
+ return true;
+
+ // Skip template specializations.
+ // FIXME: This feels like a hack. Should DeclarationName support
+ // template-ids, or is there a better way to keep specializations
+ // from being visible?
+ if (isa<ClassTemplateSpecializationDecl>(D))
+ return true;
+ if (auto *FD = dyn_cast<FunctionDecl>(D))
+ if (FD->isFunctionTemplateSpecialization())
+ return true;
+
+ return false;
+}
+
void DeclContext::removeDecl(Decl *D) {
assert(D->getLexicalDeclContext() == this &&
"decl being removed from non-lexical context");
@@ -1357,16 +1401,22 @@ void DeclContext::removeDecl(Decl *D) {
}
}
}
-
+
// Mark that D is no longer in the decl chain.
D->NextInContextAndBits.setPointer(nullptr);
// Remove D from the lookup table if necessary.
if (isa<NamedDecl>(D)) {
- NamedDecl *ND = cast<NamedDecl>(D);
+ auto *ND = cast<NamedDecl>(D);
+
+ // Do not try to remove the declaration if that is invisible to qualified
+ // lookup. E.g. template specializations are skipped.
+ if (shouldBeHidden(ND))
+ return;
// Remove only decls that have a name
- if (!ND->getDeclName()) return;
+ if (!ND->getDeclName())
+ return;
auto *DC = D->getDeclContext();
do {
@@ -1396,13 +1446,13 @@ void DeclContext::addHiddenDecl(Decl *D) {
// Notify a C++ record declaration that we've added a member, so it can
// update its class-specific state.
- if (CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(this))
+ if (auto *Record = dyn_cast<CXXRecordDecl>(this))
Record->addedMember(D);
// If this is a newly-created (not de-serialized) import declaration, wire
// it in to the list of local import declarations.
if (!D->isFromASTFile()) {
- if (ImportDecl *Import = dyn_cast<ImportDecl>(D))
+ if (auto *Import = dyn_cast<ImportDecl>(D))
D->getASTContext().addedLocalImportDecl(Import);
}
}
@@ -1410,7 +1460,7 @@ void DeclContext::addHiddenDecl(Decl *D) {
void DeclContext::addDecl(Decl *D) {
addHiddenDecl(D);
- if (NamedDecl *ND = dyn_cast<NamedDecl>(D))
+ if (auto *ND = dyn_cast<NamedDecl>(D))
ND->getDeclContext()->getPrimaryContext()->
makeDeclVisibleInContextWithFlags(ND, false, true);
}
@@ -1418,37 +1468,11 @@ void DeclContext::addDecl(Decl *D) {
void DeclContext::addDeclInternal(Decl *D) {
addHiddenDecl(D);
- if (NamedDecl *ND = dyn_cast<NamedDecl>(D))
+ if (auto *ND = dyn_cast<NamedDecl>(D))
ND->getDeclContext()->getPrimaryContext()->
makeDeclVisibleInContextWithFlags(ND, true, true);
}
-/// shouldBeHidden - Determine whether a declaration which was declared
-/// within its semantic context should be invisible to qualified name lookup.
-static bool shouldBeHidden(NamedDecl *D) {
- // Skip unnamed declarations.
- if (!D->getDeclName())
- return true;
-
- // Skip entities that can't be found by name lookup into a particular
- // context.
- if ((D->getIdentifierNamespace() == 0 && !isa<UsingDirectiveDecl>(D)) ||
- D->isTemplateParameter())
- return true;
-
- // Skip template specializations.
- // FIXME: This feels like a hack. Should DeclarationName support
- // template-ids, or is there a better way to keep specializations
- // from being visible?
- if (isa<ClassTemplateSpecializationDecl>(D))
- return true;
- if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
- if (FD->isFunctionTemplateSpecialization())
- return true;
-
- return false;
-}
-
/// buildLookup - Build the lookup data structure with all of the
/// declarations in this DeclContext (and any other contexts linked
/// to it or transparent contexts nested within it) and return it.
@@ -1490,7 +1514,7 @@ StoredDeclsMap *DeclContext::buildLookup() {
/// DeclContext, a DeclContext linked to it, or a transparent context
/// nested within it.
void DeclContext::buildLookupImpl(DeclContext *DCtx, bool Internal) {
- for (Decl *D : DCtx->noload_decls()) {
+ for (auto *D : DCtx->noload_decls()) {
// Insert this declaration into the lookup structure, but only if
// it's semantically within its decl context. Any other decls which
// should be found in this context are added eagerly.
@@ -1499,7 +1523,7 @@ void DeclContext::buildLookupImpl(DeclContext *DCtx, bool Internal) {
// FindExternalVisibleDeclsByName if needed. Exception: if we're not
// in C++, we do not track external visible decls for the TU, so in
// that case we need to collect them all here.
- if (NamedDecl *ND = dyn_cast<NamedDecl>(D))
+ if (auto *ND = dyn_cast<NamedDecl>(D))
if (ND->getDeclContext() == DCtx && !shouldBeHidden(ND) &&
(!ND->isFromASTFile() ||
(isTranslationUnit() &&
@@ -1509,7 +1533,7 @@ void DeclContext::buildLookupImpl(DeclContext *DCtx, bool Internal) {
// If this declaration is itself a transparent declaration context
// or inline namespace, add the members of this declaration of that
// context (recursively).
- if (DeclContext *InnerCtx = dyn_cast<DeclContext>(D))
+ if (auto *InnerCtx = dyn_cast<DeclContext>(D))
if (InnerCtx->isTransparentContext() || InnerCtx->isInlineNamespace())
buildLookupImpl(InnerCtx, Internal);
}
@@ -1562,7 +1586,7 @@ DeclContext::lookup(DeclarationName Name) const {
}
}
- return lookup_result();
+ return {};
}
StoredDeclsMap *Map = LookupPtr;
@@ -1570,11 +1594,11 @@ DeclContext::lookup(DeclarationName Name) const {
Map = const_cast<DeclContext*>(this)->buildLookup();
if (!Map)
- return lookup_result();
+ return {};
StoredDeclsMap::iterator I = Map->find(Name);
if (I == Map->end())
- return lookup_result();
+ return {};
return I->second.getLookupResult();
}
@@ -1588,26 +1612,29 @@ DeclContext::noload_lookup(DeclarationName Name) {
if (PrimaryContext != this)
return PrimaryContext->noload_lookup(Name);
- // If we have any lazy lexical declarations not in our lookup map, add them
- // now. Don't import any external declarations, not even if we know we have
- // some missing from the external visible lookups.
- if (HasLazyLocalLexicalLookups) {
- SmallVector<DeclContext *, 2> Contexts;
- collectAllContexts(Contexts);
- for (unsigned I = 0, N = Contexts.size(); I != N; ++I)
- buildLookupImpl(Contexts[I], hasExternalVisibleStorage());
- HasLazyLocalLexicalLookups = false;
- }
-
+ loadLazyLocalLexicalLookups();
StoredDeclsMap *Map = LookupPtr;
if (!Map)
- return lookup_result();
+ return {};
StoredDeclsMap::iterator I = Map->find(Name);
return I != Map->end() ? I->second.getLookupResult()
: lookup_result();
}
+// If we have any lazy lexical declarations not in our lookup map, add them
+// now. Don't import any external declarations, not even if we know we have
+// some missing from the external visible lookups.
+void DeclContext::loadLazyLocalLexicalLookups() {
+ if (HasLazyLocalLexicalLookups) {
+ SmallVector<DeclContext *, 2> Contexts;
+ collectAllContexts(Contexts);
+ for (auto *Context : Contexts)
+ buildLookupImpl(Context, hasExternalVisibleStorage());
+ HasLazyLocalLexicalLookups = false;
+ }
+}
+
void DeclContext::localUncachedLookup(DeclarationName Name,
SmallVectorImpl<NamedDecl *> &Results) {
Results.clear();
@@ -1639,7 +1666,7 @@ void DeclContext::localUncachedLookup(DeclarationName Name,
// FIXME: If we have lazy external declarations, this will not find them!
// FIXME: Should we CollectAllContexts and walk them all here?
for (Decl *D = FirstDecl; D; D = D->getNextDeclInContext()) {
- if (NamedDecl *ND = dyn_cast<NamedDecl>(D))
+ if (auto *ND = dyn_cast<NamedDecl>(D))
if (ND->getDeclName() == Name)
Results.push_back(ND);
}
@@ -1681,7 +1708,7 @@ bool DeclContext::InEnclosingNamespaceSetOf(const DeclContext *O) const {
if (O->Equals(this))
return true;
- const NamespaceDecl *NS = dyn_cast<NamespaceDecl>(O);
+ const auto *NS = dyn_cast<NamespaceDecl>(O);
if (!NS || !NS->isInline())
break;
O = NS->getParent();
@@ -1740,7 +1767,7 @@ void DeclContext::makeDeclVisibleInContextWithFlags(NamedDecl *D, bool Internal,
getParent()->getPrimaryContext()->
makeDeclVisibleInContextWithFlags(D, Internal, Recoverable);
- Decl *DCAsDecl = cast<Decl>(this);
+ auto *DCAsDecl = cast<Decl>(this);
// Notify that a decl was made visible unless we are a Tag being defined.
if (!(isa<TagDecl>(DCAsDecl) && cast<TagDecl>(DCAsDecl)->isBeingDefined()))
if (ASTMutationListener *L = DCAsDecl->getASTMutationListener())
@@ -1858,8 +1885,7 @@ DependentDiagnostic *DependentDiagnostic::Create(ASTContext &C,
if (!Parent->LookupPtr)
Parent->CreateStoredDeclsMap(C);
- DependentStoredDeclsMap *Map =
- static_cast<DependentStoredDeclsMap *>(Parent->LookupPtr);
+ auto *Map = static_cast<DependentStoredDeclsMap *>(Parent->LookupPtr);
// Allocate the copy of the PartialDiagnostic via the ASTContext's
// BumpPtrAllocator, rather than the ASTContext itself.
@@ -1867,7 +1893,7 @@ DependentDiagnostic *DependentDiagnostic::Create(ASTContext &C,
if (PDiag.hasStorage())
DiagStorage = new (C) PartialDiagnostic::Storage;
- DependentDiagnostic *DD = new (C) DependentDiagnostic(PDiag, DiagStorage);
+ auto *DD = new (C) DependentDiagnostic(PDiag, DiagStorage);
// TODO: Maybe we shouldn't reverse the order during insertion.
DD->NextDiagnostic = Map->FirstDiagnostic;
diff --git a/lib/AST/DeclCXX.cpp b/lib/AST/DeclCXX.cpp
index 41f2449a9d6a..076e6376d157 100644
--- a/lib/AST/DeclCXX.cpp
+++ b/lib/AST/DeclCXX.cpp
@@ -74,7 +74,8 @@ void LazyASTUnresolvedSet::getFromExternalSource(ASTContext &C) const {
CXXRecordDecl::DefinitionData::DefinitionData(CXXRecordDecl *D)
: UserDeclaredConstructor(false), UserDeclaredSpecialMembers(0),
Aggregate(true), PlainOldData(true), Empty(true), Polymorphic(false),
- Abstract(false), IsStandardLayout(true), HasNoNonEmptyBases(true),
+ Abstract(false), IsStandardLayout(true), IsCXX11StandardLayout(true),
+ HasBasesWithFields(false), HasBasesWithNonStaticDataMembers(false),
HasPrivateFields(false), HasProtectedFields(false),
HasPublicFields(false), HasMutableFields(false), HasVariantMembers(false),
HasOnlyCMembers(true), HasInClassInitializer(false),
@@ -88,10 +89,11 @@ CXXRecordDecl::DefinitionData::DefinitionData(CXXRecordDecl *D)
DefaultedMoveConstructorIsDeleted(false),
DefaultedMoveAssignmentIsDeleted(false),
DefaultedDestructorIsDeleted(false), HasTrivialSpecialMembers(SMF_All),
- DeclaredNonTrivialSpecialMembers(0), HasIrrelevantDestructor(true),
+ HasTrivialSpecialMembersForCall(SMF_All),
+ DeclaredNonTrivialSpecialMembers(0),
+ DeclaredNonTrivialSpecialMembersForCall(0), HasIrrelevantDestructor(true),
HasConstexprNonCopyMoveConstructor(false),
HasDefaultedDefaultConstructor(false),
- CanPassInRegisters(true),
DefaultedDefaultConstructorIsConstexpr(true),
HasConstexprDefaultConstructor(false),
HasNonLiteralTypeFieldsOrBases(false), ComputedVisibleConversions(false),
@@ -124,8 +126,8 @@ CXXRecordDecl *CXXRecordDecl::Create(const ASTContext &C, TagKind TK,
SourceLocation IdLoc, IdentifierInfo *Id,
CXXRecordDecl *PrevDecl,
bool DelayTypeCreation) {
- CXXRecordDecl *R = new (C, DC) CXXRecordDecl(CXXRecord, TK, C, DC, StartLoc,
- IdLoc, Id, PrevDecl);
+ auto *R = new (C, DC) CXXRecordDecl(CXXRecord, TK, C, DC, StartLoc, IdLoc, Id,
+ PrevDecl);
R->MayHaveOutOfDateDef = C.getLangOpts().Modules;
// FIXME: DelayTypeCreation seems like such a hack
@@ -139,9 +141,8 @@ CXXRecordDecl::CreateLambda(const ASTContext &C, DeclContext *DC,
TypeSourceInfo *Info, SourceLocation Loc,
bool Dependent, bool IsGeneric,
LambdaCaptureDefault CaptureDefault) {
- CXXRecordDecl *R =
- new (C, DC) CXXRecordDecl(CXXRecord, TTK_Class, C, DC, Loc, Loc,
- nullptr, nullptr);
+ auto *R = new (C, DC) CXXRecordDecl(CXXRecord, TTK_Class, C, DC, Loc, Loc,
+ nullptr, nullptr);
R->IsBeingDefined = true;
R->DefinitionData =
new (C) struct LambdaDefinitionData(R, Info, Dependent, IsGeneric,
@@ -154,13 +155,32 @@ CXXRecordDecl::CreateLambda(const ASTContext &C, DeclContext *DC,
CXXRecordDecl *
CXXRecordDecl::CreateDeserialized(const ASTContext &C, unsigned ID) {
- CXXRecordDecl *R = new (C, ID) CXXRecordDecl(
+ auto *R = new (C, ID) CXXRecordDecl(
CXXRecord, TTK_Struct, C, nullptr, SourceLocation(), SourceLocation(),
nullptr, nullptr);
R->MayHaveOutOfDateDef = false;
return R;
}
+/// Determine whether a class has a repeated base class. This is intended for
+/// use when determining if a class is standard-layout, so makes no attempt to
+/// handle virtual bases.
+static bool hasRepeatedBaseClass(const CXXRecordDecl *StartRD) {
+ llvm::SmallPtrSet<const CXXRecordDecl*, 8> SeenBaseTypes;
+ SmallVector<const CXXRecordDecl*, 8> WorkList = {StartRD};
+ while (!WorkList.empty()) {
+ const CXXRecordDecl *RD = WorkList.pop_back_val();
+ for (const CXXBaseSpecifier &BaseSpec : RD->bases()) {
+ if (const CXXRecordDecl *B = BaseSpec.getType()->getAsCXXRecordDecl()) {
+ if (!SeenBaseTypes.insert(B).second)
+ return true;
+ WorkList.push_back(B);
+ }
+ }
+ }
+ return false;
+}
+
void
CXXRecordDecl::setBases(CXXBaseSpecifier const * const *Bases,
unsigned NumBases) {
@@ -197,29 +217,40 @@ CXXRecordDecl::setBases(CXXBaseSpecifier const * const *Bases,
// Skip dependent types; we can't do any checking on them now.
if (BaseType->isDependentType())
continue;
- CXXRecordDecl *BaseClassDecl
- = cast<CXXRecordDecl>(BaseType->getAs<RecordType>()->getDecl());
+ auto *BaseClassDecl =
+ cast<CXXRecordDecl>(BaseType->getAs<RecordType>()->getDecl());
- if (!BaseClassDecl->isEmpty()) {
- if (!data().Empty) {
- // C++0x [class]p7:
- // A standard-layout class is a class that:
- // [...]
- // -- either has no non-static data members in the most derived
- // class and at most one base class with non-static data members,
- // or has no base classes with non-static data members, and
- // If this is the second non-empty base, then neither of these two
- // clauses can be true.
+ // C++2a [class]p7:
+ // A standard-layout class is a class that:
+ // [...]
+ // -- has all non-static data members and bit-fields in the class and
+ // its base classes first declared in the same class
+ if (BaseClassDecl->data().HasBasesWithFields ||
+ !BaseClassDecl->field_empty()) {
+ if (data().HasBasesWithFields)
+ // Two bases have members or bit-fields: not standard-layout.
data().IsStandardLayout = false;
- }
+ data().HasBasesWithFields = true;
+ }
+
+ // C++11 [class]p7:
+ // A standard-layout class is a class that:
+ // -- [...] has [...] at most one base class with non-static data
+ // members
+ if (BaseClassDecl->data().HasBasesWithNonStaticDataMembers ||
+ BaseClassDecl->hasDirectFields()) {
+ if (data().HasBasesWithNonStaticDataMembers)
+ data().IsCXX11StandardLayout = false;
+ data().HasBasesWithNonStaticDataMembers = true;
+ }
+ if (!BaseClassDecl->isEmpty()) {
// C++14 [meta.unary.prop]p4:
// T is a class type [...] with [...] no base class B for which
// is_empty<B>::value is false.
data().Empty = false;
- data().HasNoNonEmptyBases = false;
}
-
+
// C++1z [dcl.init.agg]p1:
// An aggregate is a class with [...] no private or protected base classes
if (Base->getAccessSpecifier() != AS_public)
@@ -228,14 +259,20 @@ CXXRecordDecl::setBases(CXXBaseSpecifier const * const *Bases,
// C++ [class.virtual]p1:
// A class that declares or inherits a virtual function is called a
// polymorphic class.
- if (BaseClassDecl->isPolymorphic())
+ if (BaseClassDecl->isPolymorphic()) {
data().Polymorphic = true;
+ // An aggregate is a class with [...] no virtual functions.
+ data().Aggregate = false;
+ }
+
// C++0x [class]p7:
// A standard-layout class is a class that: [...]
// -- has no non-standard-layout base classes
if (!BaseClassDecl->isStandardLayout())
data().IsStandardLayout = false;
+ if (!BaseClassDecl->isCXX11StandardLayout())
+ data().IsCXX11StandardLayout = false;
// Record if this base is the first non-literal field or base.
if (!hasNonLiteralTypeFieldsOrBases() && !BaseType->isLiteralType(C))
@@ -281,11 +318,13 @@ CXXRecordDecl::setBases(CXXBaseSpecifier const * const *Bases,
// operator for a class X] is trivial [...] if:
// -- class X has [...] no virtual base classes
data().HasTrivialSpecialMembers &= SMF_Destructor;
+ data().HasTrivialSpecialMembersForCall &= SMF_Destructor;
// C++0x [class]p7:
// A standard-layout class is a class that: [...]
// -- has [...] no virtual base classes
data().IsStandardLayout = false;
+ data().IsCXX11StandardLayout = false;
// C++11 [dcl.constexpr]p4:
// In the definition of a constexpr constructor [...]
@@ -314,6 +353,10 @@ CXXRecordDecl::setBases(CXXBaseSpecifier const * const *Bases,
// subobject is trivial, and
if (!BaseClassDecl->hasTrivialCopyConstructor())
data().HasTrivialSpecialMembers &= ~SMF_CopyConstructor;
+
+ if (!BaseClassDecl->hasTrivialCopyConstructorForCall())
+ data().HasTrivialSpecialMembersForCall &= ~SMF_CopyConstructor;
+
// If the base class doesn't have a simple move constructor, we'll eagerly
// declare it and perform overload resolution to determine which function
// it actually calls. If it does have a simple move constructor, this
@@ -321,6 +364,9 @@ CXXRecordDecl::setBases(CXXBaseSpecifier const * const *Bases,
if (!BaseClassDecl->hasTrivialMoveConstructor())
data().HasTrivialSpecialMembers &= ~SMF_MoveConstructor;
+ if (!BaseClassDecl->hasTrivialMoveConstructorForCall())
+ data().HasTrivialSpecialMembersForCall &= ~SMF_MoveConstructor;
+
// C++0x [class.copy]p27:
// A copy/move assignment operator for class X is trivial if [...]
// [...]
@@ -357,6 +403,9 @@ CXXRecordDecl::setBases(CXXBaseSpecifier const * const *Bases,
if (!BaseClassDecl->hasTrivialDestructor())
data().HasTrivialSpecialMembers &= ~SMF_Destructor;
+ if (!BaseClassDecl->hasTrivialDestructorForCall())
+ data().HasTrivialSpecialMembersForCall &= ~SMF_Destructor;
+
if (!BaseClassDecl->hasIrrelevantDestructor())
data().HasIrrelevantDestructor = false;
@@ -376,6 +425,10 @@ CXXRecordDecl::setBases(CXXBaseSpecifier const * const *Bases,
if (BaseClassDecl->hasVolatileMember())
setHasVolatileMember(true);
+ if (BaseClassDecl->getArgPassingRestrictions() ==
+ RecordDecl::APK_CanNeverPassInRegs)
+ setArgPassingRestrictions(RecordDecl::APK_CanNeverPassInRegs);
+
// Keep track of the presence of mutable fields.
if (BaseClassDecl->hasMutableFields()) {
data().HasMutableFields = true;
@@ -390,6 +443,16 @@ CXXRecordDecl::setBases(CXXBaseSpecifier const * const *Bases,
addedClassSubobject(BaseClassDecl);
}
+
+ // C++2a [class]p7:
+ // A class S is a standard-layout class if it:
+ // -- has at most one base class subobject of any given type
+ //
+ // Note that we only need to check this for classes with more than one base
+ // class. If there's only one base class, and it's standard layout, then
+ // we know there are no repeated base classes.
+ if (data().IsStandardLayout && NumBases > 1 && hasRepeatedBaseClass(this))
+ data().IsStandardLayout = false;
if (VBases.empty()) {
data().IsParsingBaseSpecifiers = false;
@@ -490,6 +553,81 @@ void CXXRecordDecl::markedVirtualFunctionPure() {
data().Abstract = true;
}
+bool CXXRecordDecl::hasSubobjectAtOffsetZeroOfEmptyBaseType(
+ ASTContext &Ctx, const CXXRecordDecl *XFirst) {
+ if (!getNumBases())
+ return false;
+
+ llvm::SmallPtrSet<const CXXRecordDecl*, 8> Bases;
+ llvm::SmallPtrSet<const CXXRecordDecl*, 8> M;
+ SmallVector<const CXXRecordDecl*, 8> WorkList;
+
+ // Visit a type that we have determined is an element of M(S).
+ auto Visit = [&](const CXXRecordDecl *RD) -> bool {
+ RD = RD->getCanonicalDecl();
+
+ // C++2a [class]p8:
+ // A class S is a standard-layout class if it [...] has no element of the
+ // set M(S) of types as a base class.
+ //
+ // If we find a subobject of an empty type, it might also be a base class,
+ // so we'll need to walk the base classes to check.
+ if (!RD->data().HasBasesWithFields) {
+ // Walk the bases the first time, stopping if we find the type. Build a
+ // set of them so we don't need to walk them again.
+ if (Bases.empty()) {
+ bool RDIsBase = !forallBases([&](const CXXRecordDecl *Base) -> bool {
+ Base = Base->getCanonicalDecl();
+ if (RD == Base)
+ return false;
+ Bases.insert(Base);
+ return true;
+ });
+ if (RDIsBase)
+ return true;
+ } else {
+ if (Bases.count(RD))
+ return true;
+ }
+ }
+
+ if (M.insert(RD).second)
+ WorkList.push_back(RD);
+ return false;
+ };
+
+ if (Visit(XFirst))
+ return true;
+
+ while (!WorkList.empty()) {
+ const CXXRecordDecl *X = WorkList.pop_back_val();
+
+ // FIXME: We don't check the bases of X. That matches the standard, but
+ // that sure looks like a wording bug.
+
+ // -- If X is a non-union class type with a non-static data member
+ // [recurse to] the first non-static data member of X
+ // -- If X is a union type, [recurse to union members]
+ for (auto *FD : X->fields()) {
+ // FIXME: Should we really care about the type of the first non-static
+ // data member of a non-union if there are preceding unnamed bit-fields?
+ if (FD->isUnnamedBitfield())
+ continue;
+
+ // -- If X is n array type, [visit the element type]
+ QualType T = Ctx.getBaseElementType(FD->getType());
+ if (auto *RD = T->getAsCXXRecordDecl())
+ if (Visit(RD))
+ return true;
+
+ if (!X->isUnion())
+ break;
+ }
+ }
+
+ return false;
+}
+
void CXXRecordDecl::addedMember(Decl *D) {
if (!D->isImplicit() &&
!isa<FieldDecl>(D) &&
@@ -502,7 +640,7 @@ void CXXRecordDecl::addedMember(Decl *D) {
if (D->getFriendObjectKind() || D->isInvalidDecl())
return;
- FunctionTemplateDecl *FunTmpl = dyn_cast<FunctionTemplateDecl>(D);
+ auto *FunTmpl = dyn_cast<FunctionTemplateDecl>(D);
if (FunTmpl)
D = FunTmpl->getTemplatedDecl();
@@ -510,12 +648,11 @@ void CXXRecordDecl::addedMember(Decl *D) {
Decl *DUnderlying = D;
if (auto *ND = dyn_cast<NamedDecl>(DUnderlying)) {
DUnderlying = ND->getUnderlyingDecl();
- if (FunctionTemplateDecl *UnderlyingFunTmpl =
- dyn_cast<FunctionTemplateDecl>(DUnderlying))
+ if (auto *UnderlyingFunTmpl = dyn_cast<FunctionTemplateDecl>(DUnderlying))
DUnderlying = UnderlyingFunTmpl->getTemplatedDecl();
}
- if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D)) {
+ if (const auto *Method = dyn_cast<CXXMethodDecl>(D)) {
if (Method->isVirtual()) {
// C++ [dcl.init.aggr]p1:
// An aggregate is an array or a class with [...] no virtual functions.
@@ -539,11 +676,13 @@ void CXXRecordDecl::addedMember(Decl *D) {
// assignment operator for a class X] is trivial [...] if:
// -- class X has no virtual functions [...]
data().HasTrivialSpecialMembers &= SMF_Destructor;
+ data().HasTrivialSpecialMembersForCall &= SMF_Destructor;
// C++0x [class]p7:
// A standard-layout class is a class that: [...]
// -- has no virtual functions
data().IsStandardLayout = false;
+ data().IsCXX11StandardLayout = false;
}
}
@@ -557,7 +696,7 @@ void CXXRecordDecl::addedMember(Decl *D) {
unsigned SMKind = 0;
// Handle constructors.
- if (CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(D)) {
+ if (const auto *Constructor = dyn_cast<CXXConstructorDecl>(D)) {
if (!Constructor->isImplicit()) {
// Note that we have a user-declared constructor.
data().UserDeclaredConstructor = true;
@@ -599,8 +738,7 @@ void CXXRecordDecl::addedMember(Decl *D) {
}
// Handle constructors, including those inherited from base classes.
- if (CXXConstructorDecl *Constructor =
- dyn_cast<CXXConstructorDecl>(DUnderlying)) {
+ if (const auto *Constructor = dyn_cast<CXXConstructorDecl>(DUnderlying)) {
// Record if we see any constexpr constructors which are neither copy
// nor move constructors.
// C++1z [basic.types]p10:
@@ -612,7 +750,7 @@ void CXXRecordDecl::addedMember(Decl *D) {
}
// Handle destructors.
- if (CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(D)) {
+ if (const auto *DD = dyn_cast<CXXDestructorDecl>(D)) {
SMKind |= SMF_Destructor;
if (DD->isUserProvided())
@@ -623,17 +761,19 @@ void CXXRecordDecl::addedMember(Decl *D) {
// C++11 [class.dtor]p5:
// A destructor is trivial if [...] the destructor is not virtual.
- if (DD->isVirtual())
+ if (DD->isVirtual()) {
data().HasTrivialSpecialMembers &= ~SMF_Destructor;
+ data().HasTrivialSpecialMembersForCall &= ~SMF_Destructor;
+ }
}
// Handle member functions.
- if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D)) {
+ if (const auto *Method = dyn_cast<CXXMethodDecl>(D)) {
if (Method->isCopyAssignmentOperator()) {
SMKind |= SMF_CopyAssignment;
- const ReferenceType *ParamTy =
- Method->getParamDecl(0)->getType()->getAs<ReferenceType>();
+ const auto *ParamTy =
+ Method->getParamDecl(0)->getType()->getAs<ReferenceType>();
if (!ParamTy || ParamTy->getPointeeType().isConstQualified())
data().HasDeclaredCopyAssignmentWithConstParam = true;
}
@@ -642,7 +782,7 @@ void CXXRecordDecl::addedMember(Decl *D) {
SMKind |= SMF_MoveAssignment;
// Keep the list of conversion functions up-to-date.
- if (CXXConversionDecl *Conversion = dyn_cast<CXXConversionDecl>(D)) {
+ if (auto *Conversion = dyn_cast<CXXConversionDecl>(D)) {
// FIXME: We use the 'unsafe' accessor for the access specifier here,
// because Sema may not have set it yet. That's really just a misdesign
// in Sema. However, LLDB *will* have set the access specifier correctly,
@@ -670,16 +810,30 @@ void CXXRecordDecl::addedMember(Decl *D) {
// If this is the first declaration of a special member, we no longer have
// an implicit trivial special member.
data().HasTrivialSpecialMembers &=
- data().DeclaredSpecialMembers | ~SMKind;
+ data().DeclaredSpecialMembers | ~SMKind;
+ data().HasTrivialSpecialMembersForCall &=
+ data().DeclaredSpecialMembers | ~SMKind;
if (!Method->isImplicit() && !Method->isUserProvided()) {
// This method is user-declared but not user-provided. We can't work out
// whether it's trivial yet (not until we get to the end of the class).
// We'll handle this method in finishedDefaultedOrDeletedMember.
- } else if (Method->isTrivial())
+ } else if (Method->isTrivial()) {
data().HasTrivialSpecialMembers |= SMKind;
- else
+ data().HasTrivialSpecialMembersForCall |= SMKind;
+ } else if (Method->isTrivialForCall()) {
+ data().HasTrivialSpecialMembersForCall |= SMKind;
data().DeclaredNonTrivialSpecialMembers |= SMKind;
+ } else {
+ data().DeclaredNonTrivialSpecialMembers |= SMKind;
+ // If this is a user-provided function, do not set
+ // DeclaredNonTrivialSpecialMembersForCall here since we don't know
+ // yet whether the method would be considered non-trivial for the
+ // purpose of calls (attribute "trivial_abi" can be dropped from the
+ // class later, which can change the special method's triviality).
+ if (!Method->isUserProvided())
+ data().DeclaredNonTrivialSpecialMembersForCall |= SMKind;
+ }
// Note when we have declared a declared special member, and suppress the
// implicit declaration of this special member.
@@ -707,14 +861,39 @@ void CXXRecordDecl::addedMember(Decl *D) {
}
// Handle non-static data members.
- if (FieldDecl *Field = dyn_cast<FieldDecl>(D)) {
+ if (const auto *Field = dyn_cast<FieldDecl>(D)) {
+ ASTContext &Context = getASTContext();
+
+ // C++2a [class]p7:
+ // A standard-layout class is a class that:
+ // [...]
+ // -- has all non-static data members and bit-fields in the class and
+ // its base classes first declared in the same class
+ if (data().HasBasesWithFields)
+ data().IsStandardLayout = false;
+
// C++ [class.bit]p2:
// A declaration for a bit-field that omits the identifier declares an
// unnamed bit-field. Unnamed bit-fields are not members and cannot be
// initialized.
- if (Field->isUnnamedBitfield())
+ if (Field->isUnnamedBitfield()) {
+ // C++ [meta.unary.prop]p4: [LWG2358]
+ // T is a class type [...] with [...] no unnamed bit-fields of non-zero
+ // length
+ if (data().Empty && !Field->isZeroLengthBitField(Context) &&
+ Context.getLangOpts().getClangABICompat() >
+ LangOptions::ClangABI::Ver6)
+ data().Empty = false;
return;
+ }
+ // C++11 [class]p7:
+ // A standard-layout class is a class that:
+ // -- either has no non-static data members in the most derived class
+ // [...] or has no base classes with non-static data members
+ if (data().HasBasesWithNonStaticDataMembers)
+ data().IsCXX11StandardLayout = false;
+
// C++ [dcl.init.aggr]p1:
// An aggregate is an array or a class (clause 9) with [...] no
// private or protected non-static data members (clause 11).
@@ -725,6 +904,11 @@ void CXXRecordDecl::addedMember(Decl *D) {
data().PlainOldData = false;
}
+ // Track whether this is the first field. We use this when checking
+ // whether the class is standard-layout below.
+ bool IsFirstField = !data().HasPrivateFields &&
+ !data().HasProtectedFields && !data().HasPublicFields;
+
// C++0x [class]p7:
// A standard-layout class is a class that:
// [...]
@@ -736,8 +920,10 @@ void CXXRecordDecl::addedMember(Decl *D) {
case AS_none: llvm_unreachable("Invalid access specifier");
};
if ((data().HasPrivateFields + data().HasProtectedFields +
- data().HasPublicFields) > 1)
+ data().HasPublicFields) > 1) {
data().IsStandardLayout = false;
+ data().IsCXX11StandardLayout = false;
+ }
// Keep track of the presence of mutable fields.
if (Field->isMutable()) {
@@ -758,7 +944,6 @@ void CXXRecordDecl::addedMember(Decl *D) {
//
// Automatic Reference Counting: the presence of a member of Objective-C pointer type
// that does not explicitly have no lifetime makes the class a non-POD.
- ASTContext &Context = getASTContext();
QualType T = Context.getBaseElementType(Field->getType());
if (T->isObjCRetainableType() || T.isObjCGCStrong()) {
if (T.hasNonTrivialObjCLifetime()) {
@@ -772,6 +957,17 @@ void CXXRecordDecl::addedMember(Decl *D) {
struct DefinitionData &Data = data();
Data.PlainOldData = false;
Data.HasTrivialSpecialMembers = 0;
+
+ // __strong or __weak fields do not make special functions non-trivial
+ // for the purpose of calls.
+ Qualifiers::ObjCLifetime LT = T.getQualifiers().getObjCLifetime();
+ if (LT != Qualifiers::OCL_Strong && LT != Qualifiers::OCL_Weak)
+ data().HasTrivialSpecialMembersForCall = 0;
+
+ // Structs with __weak fields should never be passed directly.
+ if (LT == Qualifiers::OCL_Weak)
+ setArgPassingRestrictions(RecordDecl::APK_CanNeverPassInRegs);
+
Data.HasIrrelevantDestructor = false;
} else if (!Context.getLangOpts().ObjCAutoRefCount) {
setHasObjectMember(true);
@@ -787,6 +983,7 @@ void CXXRecordDecl::addedMember(Decl *D) {
// A standard-layout class is a class that:
// -- has no non-static data members of type [...] reference,
data().IsStandardLayout = false;
+ data().IsCXX11StandardLayout = false;
// C++1z [class.copy.ctor]p10:
// A defaulted copy constructor for a class X is defined as deleted if X has:
@@ -838,8 +1035,8 @@ void CXXRecordDecl::addedMember(Decl *D) {
if (T->isReferenceType())
data().DefaultedMoveAssignmentIsDeleted = true;
- if (const RecordType *RecordTy = T->getAs<RecordType>()) {
- CXXRecordDecl* FieldRec = cast<CXXRecordDecl>(RecordTy->getDecl());
+ if (const auto *RecordTy = T->getAs<RecordType>()) {
+ auto *FieldRec = cast<CXXRecordDecl>(RecordTy->getDecl());
if (FieldRec->getDefinition()) {
addedClassSubobject(FieldRec);
@@ -899,12 +1096,19 @@ void CXXRecordDecl::addedMember(Decl *D) {
// member is trivial;
if (!FieldRec->hasTrivialCopyConstructor())
data().HasTrivialSpecialMembers &= ~SMF_CopyConstructor;
+
+ if (!FieldRec->hasTrivialCopyConstructorForCall())
+ data().HasTrivialSpecialMembersForCall &= ~SMF_CopyConstructor;
+
// If the field doesn't have a simple move constructor, we'll eagerly
// declare the move constructor for this class and we'll decide whether
// it's trivial then.
if (!FieldRec->hasTrivialMoveConstructor())
data().HasTrivialSpecialMembers &= ~SMF_MoveConstructor;
+ if (!FieldRec->hasTrivialMoveConstructorForCall())
+ data().HasTrivialSpecialMembersForCall &= ~SMF_MoveConstructor;
+
// C++0x [class.copy]p27:
// A copy/move assignment operator for class X is trivial if [...]
// [...]
@@ -921,12 +1125,17 @@ void CXXRecordDecl::addedMember(Decl *D) {
if (!FieldRec->hasTrivialDestructor())
data().HasTrivialSpecialMembers &= ~SMF_Destructor;
+ if (!FieldRec->hasTrivialDestructorForCall())
+ data().HasTrivialSpecialMembersForCall &= ~SMF_Destructor;
if (!FieldRec->hasIrrelevantDestructor())
data().HasIrrelevantDestructor = false;
if (FieldRec->hasObjectMember())
setHasObjectMember(true);
if (FieldRec->hasVolatileMember())
setHasVolatileMember(true);
+ if (FieldRec->getArgPassingRestrictions() ==
+ RecordDecl::APK_CanNeverPassInRegs)
+ setArgPassingRestrictions(RecordDecl::APK_CanNeverPassInRegs);
// C++0x [class]p7:
// A standard-layout class is a class that:
@@ -934,31 +1143,32 @@ void CXXRecordDecl::addedMember(Decl *D) {
// class (or array of such types) [...]
if (!FieldRec->isStandardLayout())
data().IsStandardLayout = false;
+ if (!FieldRec->isCXX11StandardLayout())
+ data().IsCXX11StandardLayout = false;
- // C++0x [class]p7:
+ // C++2a [class]p7:
// A standard-layout class is a class that:
// [...]
+ // -- has no element of the set M(S) of types as a base class.
+ if (data().IsStandardLayout && (isUnion() || IsFirstField) &&
+ hasSubobjectAtOffsetZeroOfEmptyBaseType(Context, FieldRec))
+ data().IsStandardLayout = false;
+
+ // C++11 [class]p7:
+ // A standard-layout class is a class that:
// -- has no base classes of the same type as the first non-static
- // data member.
- // We don't want to expend bits in the state of the record decl
- // tracking whether this is the first non-static data member so we
- // cheat a bit and use some of the existing state: the empty bit.
- // Virtual bases and virtual methods make a class non-empty, but they
- // also make it non-standard-layout so we needn't check here.
- // A non-empty base class may leave the class standard-layout, but not
- // if we have arrived here, and have at least one non-static data
- // member. If IsStandardLayout remains true, then the first non-static
- // data member must come through here with Empty still true, and Empty
- // will subsequently be set to false below.
- if (data().IsStandardLayout && data().Empty) {
+ // data member
+ if (data().IsCXX11StandardLayout && IsFirstField) {
+ // FIXME: We should check all base classes here, not just direct
+ // base classes.
for (const auto &BI : bases()) {
if (Context.hasSameUnqualifiedType(BI.getType(), T)) {
- data().IsStandardLayout = false;
+ data().IsCXX11StandardLayout = false;
break;
}
}
}
-
+
// Keep track of the presence of mutable fields.
if (FieldRec->hasMutableFields()) {
data().HasMutableFields = true;
@@ -1021,31 +1231,13 @@ void CXXRecordDecl::addedMember(Decl *D) {
data().DefaultedMoveAssignmentIsDeleted = true;
}
- // C++0x [class]p7:
- // A standard-layout class is a class that:
- // [...]
- // -- either has no non-static data members in the most derived
- // class and at most one base class with non-static data members,
- // or has no base classes with non-static data members, and
- // At this point we know that we have a non-static data member, so the last
- // clause holds.
- if (!data().HasNoNonEmptyBases)
- data().IsStandardLayout = false;
-
// C++14 [meta.unary.prop]p4:
- // T is a class type [...] with [...] no non-static data members other
- // than bit-fields of length 0...
- if (data().Empty) {
- if (!Field->isBitField() ||
- (!Field->getBitWidth()->isTypeDependent() &&
- !Field->getBitWidth()->isValueDependent() &&
- Field->getBitWidthValue(Context) != 0))
- data().Empty = false;
- }
+ // T is a class type [...] with [...] no non-static data members
+ data().Empty = false;
}
// Handle using declarations of conversion functions.
- if (UsingShadowDecl *Shadow = dyn_cast<UsingShadowDecl>(D)) {
+ if (auto *Shadow = dyn_cast<UsingShadowDecl>(D)) {
if (Shadow->getDeclName().getNameKind()
== DeclarationName::CXXConversionFunctionName) {
ASTContext &Ctx = getASTContext();
@@ -1053,7 +1245,7 @@ void CXXRecordDecl::addedMember(Decl *D) {
}
}
- if (UsingDecl *Using = dyn_cast<UsingDecl>(D)) {
+ if (const auto *Using = dyn_cast<UsingDecl>(D)) {
if (Using->getDeclName().getNameKind() ==
DeclarationName::CXXConstructorName) {
data().HasInheritedConstructor = true;
@@ -1073,7 +1265,7 @@ void CXXRecordDecl::finishedDefaultedOrDeletedMember(CXXMethodDecl *D) {
// The kind of special member this declaration is, if any.
unsigned SMKind = 0;
- if (CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(D)) {
+ if (const auto *Constructor = dyn_cast<CXXConstructorDecl>(D)) {
if (Constructor->isDefaultConstructor()) {
SMKind |= SMF_DefaultConstructor;
if (Constructor->isConstexpr())
@@ -1103,6 +1295,23 @@ void CXXRecordDecl::finishedDefaultedOrDeletedMember(CXXMethodDecl *D) {
data().DeclaredNonTrivialSpecialMembers |= SMKind;
}
+void CXXRecordDecl::setTrivialForCallFlags(CXXMethodDecl *D) {
+ unsigned SMKind = 0;
+
+ if (const auto *Constructor = dyn_cast<CXXConstructorDecl>(D)) {
+ if (Constructor->isCopyConstructor())
+ SMKind = SMF_CopyConstructor;
+ else if (Constructor->isMoveConstructor())
+ SMKind = SMF_MoveConstructor;
+ } else if (isa<CXXDestructorDecl>(D))
+ SMKind = SMF_Destructor;
+
+ if (D->isTrivialForCall())
+ data().HasTrivialSpecialMembersForCall |= SMKind;
+ else
+ data().DeclaredNonTrivialSpecialMembersForCall |= SMKind;
+}
+
bool CXXRecordDecl::isCLike() const {
if (getTagKind() == TTK_Class || getTagKind() == TTK_Interface ||
!TemplateOrInstantiation.isNull())
@@ -1128,8 +1337,7 @@ CXXMethodDecl* CXXRecordDecl::getLambdaCallOperator() const {
assert(Calls.size() == 1 && "More than one lambda call operator!");
NamedDecl *CallOp = Calls.front();
- if (FunctionTemplateDecl *CallOpTmpl =
- dyn_cast<FunctionTemplateDecl>(CallOp))
+ if (const auto *CallOpTmpl = dyn_cast<FunctionTemplateDecl>(CallOp))
return cast<CXXMethodDecl>(CallOpTmpl->getTemplatedDecl());
return cast<CXXMethodDecl>(CallOp);
@@ -1143,8 +1351,7 @@ CXXMethodDecl* CXXRecordDecl::getLambdaStaticInvoker() const {
if (Invoker.empty()) return nullptr;
assert(Invoker.size() == 1 && "More than one static invoker operator!");
NamedDecl *InvokerFun = Invoker.front();
- if (FunctionTemplateDecl *InvokerTemplate =
- dyn_cast<FunctionTemplateDecl>(InvokerFun))
+ if (const auto *InvokerTemplate = dyn_cast<FunctionTemplateDecl>(InvokerFun))
return cast<CXXMethodDecl>(InvokerTemplate->getTemplatedDecl());
return cast<CXXMethodDecl>(InvokerFun);
@@ -1257,7 +1464,7 @@ static void CollectVisibleConversions(ASTContext &Context,
= CXXRecordDecl::MergeAccess(Access, I.getAccessSpecifier());
bool BaseInVirtual = InVirtual || I.isVirtual();
- CXXRecordDecl *Base = cast<CXXRecordDecl>(RT->getDecl());
+ auto *Base = cast<CXXRecordDecl>(RT->getDecl());
CollectVisibleConversions(Context, Base, BaseInVirtual, BaseAccess,
*HiddenTypes, Output, VOutput, HiddenVBaseCs);
}
@@ -1384,8 +1591,7 @@ void CXXRecordDecl::setDescribedClassTemplate(ClassTemplateDecl *Template) {
}
TemplateSpecializationKind CXXRecordDecl::getTemplateSpecializationKind() const{
- if (const ClassTemplateSpecializationDecl *Spec
- = dyn_cast<ClassTemplateSpecializationDecl>(this))
+ if (const auto *Spec = dyn_cast<ClassTemplateSpecializationDecl>(this))
return Spec->getSpecializationKind();
if (MemberSpecializationInfo *MSInfo = getMemberSpecializationInfo())
@@ -1396,8 +1602,7 @@ TemplateSpecializationKind CXXRecordDecl::getTemplateSpecializationKind() const{
void
CXXRecordDecl::setTemplateSpecializationKind(TemplateSpecializationKind TSK) {
- if (ClassTemplateSpecializationDecl *Spec
- = dyn_cast<ClassTemplateSpecializationDecl>(this)) {
+ if (auto *Spec = dyn_cast<ClassTemplateSpecializationDecl>(this)) {
Spec->setSpecializationKind(TSK);
return;
}
@@ -1575,7 +1780,7 @@ void CXXRecordDecl::completeDefinition(CXXFinalOverriderMap *FinalOverriders) {
SOEnd = M->second.end();
SO != SOEnd && !Done; ++SO) {
assert(SO->second.size() > 0 &&
- "All virtual functions have overridding virtual functions");
+ "All virtual functions have overriding virtual functions");
// C++ [class.abstract]p4:
// A class is abstract if it contains or inherits at least one
@@ -1602,8 +1807,8 @@ bool CXXRecordDecl::mayBeAbstract() const {
return false;
for (const auto &B : bases()) {
- CXXRecordDecl *BaseDecl
- = cast<CXXRecordDecl>(B.getType()->getAs<RecordType>()->getDecl());
+ const auto *BaseDecl =
+ cast<CXXRecordDecl>(B.getType()->getAs<RecordType>()->getDecl());
if (BaseDecl->isAbstract())
return true;
}
@@ -1670,7 +1875,7 @@ CXXMethodDecl::getCorrespondingMethodInClass(const CXXRecordDecl *RD,
}
for (auto *ND : RD->lookup(getDeclName())) {
- CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(ND);
+ auto *MD = dyn_cast<CXXMethodDecl>(ND);
if (!MD)
continue;
if (recursivelyOverrides(MD, this))
@@ -1683,7 +1888,7 @@ CXXMethodDecl::getCorrespondingMethodInClass(const CXXRecordDecl *RD,
const RecordType *RT = I.getType()->getAs<RecordType>();
if (!RT)
continue;
- const CXXRecordDecl *Base = cast<CXXRecordDecl>(RT->getDecl());
+ const auto *Base = cast<CXXRecordDecl>(RT->getDecl());
CXXMethodDecl *T = this->getCorrespondingMethodInClass(Base);
if (T)
return T;
@@ -1758,8 +1963,8 @@ CXXMethodDecl *CXXMethodDecl::getDevirtualizedMethod(const Expr *Base,
if (BestDynamicDecl->hasAttr<FinalAttr>())
return DevirtualizedMethod;
- if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base)) {
- if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl()))
+ if (const auto *DRE = dyn_cast<DeclRefExpr>(Base)) {
+ if (const auto *VD = dyn_cast<VarDecl>(DRE->getDecl()))
if (VD->getType()->isRecordType())
// This is a record decl. We know the type and can devirtualize it.
return DevirtualizedMethod;
@@ -1770,9 +1975,10 @@ CXXMethodDecl *CXXMethodDecl::getDevirtualizedMethod(const Expr *Base,
// We can devirtualize calls on an object accessed by a class member access
// expression, since by C++11 [basic.life]p6 we know that it can't refer to
// a derived class object constructed in the same location.
- if (const MemberExpr *ME = dyn_cast<MemberExpr>(Base))
- if (const ValueDecl *VD = dyn_cast<ValueDecl>(ME->getMemberDecl()))
- return VD->getType()->isRecordType() ? DevirtualizedMethod : nullptr;
+ if (const auto *ME = dyn_cast<MemberExpr>(Base)) {
+ const ValueDecl *VD = ME->getMemberDecl();
+ return VD->getType()->isRecordType() ? DevirtualizedMethod : nullptr;
+ }
// Likewise for calls on an object accessed by a (non-reference) pointer to
// member access.
@@ -1848,7 +2054,7 @@ bool CXXMethodDecl::isUsualDeallocationFunction() const {
DeclContext::lookup_result R = getDeclContext()->lookup(getDeclName());
for (DeclContext::lookup_result::iterator I = R.begin(), E = R.end();
I != E; ++I) {
- if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(*I))
+ if (const auto *FD = dyn_cast<FunctionDecl>(*I))
if (FD->getNumParams() == 1)
return false;
}
@@ -1868,7 +2074,7 @@ bool CXXMethodDecl::isCopyAssignmentOperator() const {
return false;
QualType ParamType = getParamDecl(0)->getType();
- if (const LValueReferenceType *Ref = ParamType->getAs<LValueReferenceType>())
+ if (const auto *Ref = ParamType->getAs<LValueReferenceType>())
ParamType = Ref->getPointeeType();
ASTContext &Context = getASTContext();
@@ -2006,7 +2212,7 @@ TypeLoc CXXCtorInitializer::getBaseClassLoc() const {
if (isBaseInitializer())
return Initializee.get<TypeSourceInfo*>()->getTypeLoc();
else
- return TypeLoc();
+ return {};
}
const Type *CXXCtorInitializer::getBaseClass() const {
@@ -2023,10 +2229,10 @@ SourceLocation CXXCtorInitializer::getSourceLocation() const {
if (isAnyMemberInitializer())
return getMemberLocation();
- if (TypeSourceInfo *TSInfo = Initializee.get<TypeSourceInfo*>())
+ if (const auto *TSInfo = Initializee.get<TypeSourceInfo *>())
return TSInfo->getTypeLoc().getLocalSourceRange().getBegin();
- return SourceLocation();
+ return {};
}
SourceRange CXXCtorInitializer::getSourceRange() const {
@@ -2034,7 +2240,7 @@ SourceRange CXXCtorInitializer::getSourceRange() const {
FieldDecl *D = getAnyMember();
if (Expr *I = D->getInClassInitializer())
return I->getSourceRange();
- return SourceRange();
+ return {};
}
return SourceRange(getSourceLocation(), getRParenLoc());
@@ -2078,7 +2284,7 @@ CXXConstructorDecl::init_const_iterator CXXConstructorDecl::init_begin() const {
CXXConstructorDecl *CXXConstructorDecl::getTargetConstructor() const {
assert(isDelegatingConstructor() && "Not a delegating constructor!");
Expr *E = (*init_begin())->getInit()->IgnoreImplicit();
- if (CXXConstructExpr *Construct = dyn_cast<CXXConstructExpr>(E))
+ if (const auto *Construct = dyn_cast<CXXConstructExpr>(E))
return Construct->getConstructor();
return nullptr;
@@ -2103,7 +2309,7 @@ bool CXXConstructorDecl::isMoveConstructor(unsigned &TypeQuals) const {
getParamDecl(0)->getType()->isRValueReferenceType();
}
-/// \brief Determine whether this is a copy or move constructor.
+/// Determine whether this is a copy or move constructor.
bool CXXConstructorDecl::isCopyOrMoveConstructor(unsigned &TypeQuals) const {
// C++ [class.copy]p2:
// A non-template constructor for class X is a copy constructor
@@ -2124,7 +2330,7 @@ bool CXXConstructorDecl::isCopyOrMoveConstructor(unsigned &TypeQuals) const {
const ParmVarDecl *Param = getParamDecl(0);
// Do we have a reference type?
- const ReferenceType *ParamRefType = Param->getType()->getAs<ReferenceType>();
+ const auto *ParamRefType = Param->getType()->getAs<ReferenceType>();
if (!ParamRefType)
return false;
@@ -2174,7 +2380,7 @@ bool CXXConstructorDecl::isSpecializationCopyingObject() const {
ASTContext &Context = getASTContext();
CanQualType ParamType = Context.getCanonicalType(Param->getType());
- // Is it the same as our our class type?
+ // Is it the same as our class type?
CanQualType ClassTy
= Context.getCanonicalType(Context.getTagDeclType(getParent()));
if (ParamType.getUnqualifiedType() != ClassTy)
@@ -2271,7 +2477,7 @@ UsingDirectiveDecl *UsingDirectiveDecl::Create(ASTContext &C, DeclContext *DC,
SourceLocation IdentLoc,
NamedDecl *Used,
DeclContext *CommonAncestor) {
- if (NamespaceDecl *NS = dyn_cast_or_null<NamespaceDecl>(Used))
+ if (auto *NS = dyn_cast_or_null<NamespaceDecl>(Used))
Used = NS->getOriginalNamespace();
return new (C, DC) UsingDirectiveDecl(DC, L, NamespaceLoc, QualifierLoc,
IdentLoc, Used, CommonAncestor);
@@ -2286,8 +2492,7 @@ UsingDirectiveDecl *UsingDirectiveDecl::CreateDeserialized(ASTContext &C,
}
NamespaceDecl *UsingDirectiveDecl::getNominatedNamespace() {
- if (NamespaceAliasDecl *NA =
- dyn_cast_or_null<NamespaceAliasDecl>(NominatedNamespace))
+ if (auto *NA = dyn_cast_or_null<NamespaceAliasDecl>(NominatedNamespace))
return NA->getNamespace();
return cast_or_null<NamespaceDecl>(NominatedNamespace);
}
@@ -2367,7 +2572,7 @@ NamespaceAliasDecl *NamespaceAliasDecl::Create(ASTContext &C, DeclContext *DC,
SourceLocation IdentLoc,
NamedDecl *Namespace) {
// FIXME: Preserve the aliased namespace as written.
- if (NamespaceDecl *NS = dyn_cast_or_null<NamespaceDecl>(Namespace))
+ if (auto *NS = dyn_cast_or_null<NamespaceDecl>(Namespace))
Namespace = NS->getOriginalNamespace();
return new (C, DC) NamespaceAliasDecl(C, DC, UsingLoc, AliasLoc, Alias,
QualifierLoc, IdentLoc, Namespace);
@@ -2387,10 +2592,9 @@ UsingShadowDecl::UsingShadowDecl(Kind K, ASTContext &C, DeclContext *DC,
SourceLocation Loc, UsingDecl *Using,
NamedDecl *Target)
: NamedDecl(K, DC, Loc, Using ? Using->getDeclName() : DeclarationName()),
- redeclarable_base(C), Underlying(Target),
- UsingOrNextShadow(cast<NamedDecl>(Using)) {
+ redeclarable_base(C), UsingOrNextShadow(cast<NamedDecl>(Using)) {
if (Target)
- IdentifierNamespace = Target->getIdentifierNamespace();
+ setTargetDecl(Target);
setImplicit();
}
@@ -2405,8 +2609,8 @@ UsingShadowDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
UsingDecl *UsingShadowDecl::getUsingDecl() const {
const UsingShadowDecl *Shadow = this;
- while (const UsingShadowDecl *NextShadow =
- dyn_cast<UsingShadowDecl>(Shadow->UsingOrNextShadow))
+ while (const auto *NextShadow =
+ dyn_cast<UsingShadowDecl>(Shadow->UsingOrNextShadow))
Shadow = NextShadow;
return cast<UsingDecl>(Shadow->UsingOrNextShadow);
}
@@ -2625,7 +2829,7 @@ DecompositionDecl *DecompositionDecl::CreateDeserialized(ASTContext &C,
void DecompositionDecl::printName(llvm::raw_ostream &os) const {
os << '[';
bool Comma = false;
- for (auto *B : bindings()) {
+ for (const auto *B : bindings()) {
if (Comma)
os << ", ";
B->printName(os);
diff --git a/lib/AST/DeclFriend.cpp b/lib/AST/DeclFriend.cpp
index 461bf36858b7..08fbed361579 100644
--- a/lib/AST/DeclFriend.cpp
+++ b/lib/AST/DeclFriend.cpp
@@ -39,7 +39,7 @@ FriendDecl *FriendDecl::Create(ASTContext &C, DeclContext *DC,
ArrayRef<TemplateParameterList *> FriendTypeTPLists) {
#ifndef NDEBUG
if (Friend.is<NamedDecl *>()) {
- NamedDecl *D = Friend.get<NamedDecl*>();
+ const auto *D = Friend.get<NamedDecl*>();
assert(isa<FunctionDecl>(D) ||
isa<CXXRecordDecl>(D) ||
isa<FunctionTemplateDecl>(D) ||
@@ -57,8 +57,8 @@ FriendDecl *FriendDecl::Create(ASTContext &C, DeclContext *DC,
std::size_t Extra =
FriendDecl::additionalSizeToAlloc<TemplateParameterList *>(
FriendTypeTPLists.size());
- FriendDecl *FD = new (C, DC, Extra) FriendDecl(DC, L, Friend, FriendL,
- FriendTypeTPLists);
+ auto *FD = new (C, DC, Extra) FriendDecl(DC, L, Friend, FriendL,
+ FriendTypeTPLists);
cast<CXXRecordDecl>(DC)->pushFriendDecl(FD);
return FD;
}
diff --git a/lib/AST/DeclObjC.cpp b/lib/AST/DeclObjC.cpp
index f95d5def47ac..5db045099997 100644
--- a/lib/AST/DeclObjC.cpp
+++ b/lib/AST/DeclObjC.cpp
@@ -74,7 +74,7 @@ ObjCContainerDecl::getIvarDecl(IdentifierInfo *Id) const {
lookup_result R = lookup(Id);
for (lookup_iterator Ivar = R.begin(), IvarEnd = R.end();
Ivar != IvarEnd; ++Ivar) {
- if (ObjCIvarDecl *ivar = dyn_cast<ObjCIvarDecl>(*Ivar))
+ if (auto *ivar = dyn_cast<ObjCIvarDecl>(*Ivar))
return ivar;
}
return nullptr;
@@ -86,7 +86,7 @@ ObjCContainerDecl::getMethod(Selector Sel, bool isInstance,
bool AllowHidden) const {
// If this context is a hidden protocol definition, don't find any
// methods there.
- if (const ObjCProtocolDecl *Proto = dyn_cast<ObjCProtocolDecl>(this)) {
+ if (const auto *Proto = dyn_cast<ObjCProtocolDecl>(this)) {
if (const ObjCProtocolDecl *Def = Proto->getDefinition())
if (Def->isHidden() && !AllowHidden)
return nullptr;
@@ -102,14 +102,14 @@ ObjCContainerDecl::getMethod(Selector Sel, bool isInstance,
lookup_result R = lookup(Sel);
for (lookup_iterator Meth = R.begin(), MethEnd = R.end();
Meth != MethEnd; ++Meth) {
- ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(*Meth);
+ auto *MD = dyn_cast<ObjCMethodDecl>(*Meth);
if (MD && MD->isInstanceMethod() == isInstance)
return MD;
}
return nullptr;
}
-/// \brief This routine returns 'true' if a user declared setter method was
+/// This routine returns 'true' if a user declared setter method was
/// found in the class, its protocols, its super classes or categories.
/// It also returns 'true' if one of its categories has declared a 'readwrite'
/// property. This is because, user must provide a setter method for the
@@ -120,12 +120,12 @@ bool ObjCContainerDecl::HasUserDeclaredSetterMethod(
lookup_result R = lookup(Sel);
for (lookup_iterator Meth = R.begin(), MethEnd = R.end();
Meth != MethEnd; ++Meth) {
- ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(*Meth);
+ auto *MD = dyn_cast<ObjCMethodDecl>(*Meth);
if (MD && MD->isInstanceMethod() && !MD->isImplicit())
return true;
}
- if (const ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(this)) {
+ if (const auto *ID = dyn_cast<ObjCInterfaceDecl>(this)) {
// Also look into categories, including class extensions, looking
// for a user declared instance method.
for (const auto *Cat : ID->visible_categories()) {
@@ -159,7 +159,7 @@ bool ObjCContainerDecl::HasUserDeclaredSetterMethod(
OSC = OSC->getSuperClass();
}
}
- if (const ObjCProtocolDecl *PD = dyn_cast<ObjCProtocolDecl>(this))
+ if (const auto *PD = dyn_cast<ObjCProtocolDecl>(this))
for (const auto *PI : PD->protocols())
if (PI->HasUserDeclaredSetterMethod(Property))
return true;
@@ -172,7 +172,7 @@ ObjCPropertyDecl::findPropertyDecl(const DeclContext *DC,
ObjCPropertyQueryKind queryKind) {
// If this context is a hidden protocol definition, don't find any
// property.
- if (const ObjCProtocolDecl *Proto = dyn_cast<ObjCProtocolDecl>(DC)) {
+ if (const auto *Proto = dyn_cast<ObjCProtocolDecl>(DC)) {
if (const ObjCProtocolDecl *Def = Proto->getDefinition())
if (Def->isHidden())
return nullptr;
@@ -192,7 +192,7 @@ ObjCPropertyDecl::findPropertyDecl(const DeclContext *DC,
ObjCPropertyDecl *classProp = nullptr;
for (DeclContext::lookup_iterator I = R.begin(), E = R.end(); I != E;
++I)
- if (ObjCPropertyDecl *PD = dyn_cast<ObjCPropertyDecl>(*I)) {
+ if (auto *PD = dyn_cast<ObjCPropertyDecl>(*I)) {
// If queryKind is unknown, we return the instance property if one
// exists; otherwise we return the class property.
if ((queryKind == ObjCPropertyQueryKind::OBJC_PR_query_unknown &&
@@ -230,7 +230,7 @@ ObjCPropertyDecl *ObjCContainerDecl::FindPropertyDeclaration(
const IdentifierInfo *PropertyId,
ObjCPropertyQueryKind QueryKind) const {
// Don't find properties within hidden protocol definitions.
- if (const ObjCProtocolDecl *Proto = dyn_cast<ObjCProtocolDecl>(this)) {
+ if (const auto *Proto = dyn_cast<ObjCProtocolDecl>(this)) {
if (const ObjCProtocolDecl *Def = Proto->getDefinition())
if (Def->isHidden())
return nullptr;
@@ -254,7 +254,7 @@ ObjCPropertyDecl *ObjCContainerDecl::FindPropertyDeclaration(
default:
break;
case Decl::ObjCProtocol: {
- const ObjCProtocolDecl *PID = cast<ObjCProtocolDecl>(this);
+ const auto *PID = cast<ObjCProtocolDecl>(this);
for (const auto *I : PID->protocols())
if (ObjCPropertyDecl *P = I->FindPropertyDeclaration(PropertyId,
QueryKind))
@@ -262,7 +262,7 @@ ObjCPropertyDecl *ObjCContainerDecl::FindPropertyDeclaration(
break;
}
case Decl::ObjCInterface: {
- const ObjCInterfaceDecl *OID = cast<ObjCInterfaceDecl>(this);
+ const auto *OID = cast<ObjCInterfaceDecl>(this);
// Look through categories (but not extensions; they were handled above).
for (const auto *Cat : OID->visible_categories()) {
if (!Cat->IsClassExtension())
@@ -283,7 +283,7 @@ ObjCPropertyDecl *ObjCContainerDecl::FindPropertyDeclaration(
break;
}
case Decl::ObjCCategory: {
- const ObjCCategoryDecl *OCD = cast<ObjCCategoryDecl>(this);
+ const auto *OCD = cast<ObjCCategoryDecl>(this);
// Look through protocols.
if (!OCD->IsClassExtension())
for (const auto *I : OCD->protocols())
@@ -310,7 +310,8 @@ ObjCTypeParamList *ObjCInterfaceDecl::getTypeParamList() const {
// Otherwise, look at previous declarations to determine whether any
// of them has a type parameter list, skipping over those
// declarations that do not.
- for (auto decl = getMostRecentDecl(); decl; decl = decl->getPreviousDecl()) {
+ for (const ObjCInterfaceDecl *decl = getMostRecentDecl(); decl;
+ decl = decl->getPreviousDecl()) {
if (ObjCTypeParamList *written = decl->getTypeParamListAsWritten())
return written;
}
@@ -323,7 +324,7 @@ void ObjCInterfaceDecl::setTypeParamList(ObjCTypeParamList *TPL) {
if (!TPL)
return;
// Set the declaration context of each of the type parameters.
- for (auto typeParam : *TypeParamList)
+ for (auto *typeParam : *TypeParamList)
typeParam->setDeclContext(this);
}
@@ -437,7 +438,7 @@ void ObjCInterfaceDecl::mergeClassExtensionProtocolList(
// Check for duplicate protocol in class's protocol list.
// This is O(n*m). But it is extremely rare and number of protocols in
// class or its extension are very few.
- SmallVector<ObjCProtocolDecl*, 8> ProtocolRefs;
+ SmallVector<ObjCProtocolDecl *, 8> ProtocolRefs;
for (unsigned i = 0; i < ExtNum; i++) {
bool protocolExists = false;
ObjCProtocolDecl *ProtoInExtension = ExtList[i];
@@ -604,7 +605,7 @@ void ObjCInterfaceDecl::startDefinition() {
allocateDefinitionData();
// Update all of the declarations with a pointer to the definition.
- for (auto RD : redecls()) {
+ for (auto *RD : redecls()) {
if (RD != this)
RD->Data = Data;
}
@@ -710,9 +711,8 @@ ObjCMethodDecl *ObjCInterfaceDecl::lookupMethod(Selector Sel,
// Didn't find one yet - look through protocols.
const ObjCList<ObjCProtocolDecl> &Protocols =
Cat->getReferencedProtocols();
- for (ObjCList<ObjCProtocolDecl>::iterator I = Protocols.begin(),
- E = Protocols.end(); I != E; ++I)
- if ((MethodDecl = (*I)->lookupMethod(Sel, isInstance)))
+ for (auto *Protocol : Protocols)
+ if ((MethodDecl = Protocol->lookupMethod(Sel, isInstance)))
if (C != Cat || !MethodDecl->isImplicit())
return MethodDecl;
}
@@ -854,7 +854,7 @@ void ObjCMethodDecl::setMethodParams(ASTContext &C,
setParamsAndSelLocs(C, Params, SelLocs);
}
-/// \brief A definition will return its interface declaration.
+/// A definition will return its interface declaration.
/// An interface declaration will return its definition.
/// Otherwise it will return itself.
ObjCMethodDecl *ObjCMethodDecl::getNextRedeclarationImpl() {
@@ -865,27 +865,25 @@ ObjCMethodDecl *ObjCMethodDecl::getNextRedeclarationImpl() {
if (Redecl)
return Redecl;
- Decl *CtxD = cast<Decl>(getDeclContext());
+ auto *CtxD = cast<Decl>(getDeclContext());
if (!CtxD->isInvalidDecl()) {
- if (ObjCInterfaceDecl *IFD = dyn_cast<ObjCInterfaceDecl>(CtxD)) {
+ if (auto *IFD = dyn_cast<ObjCInterfaceDecl>(CtxD)) {
if (ObjCImplementationDecl *ImplD = Ctx.getObjCImplementation(IFD))
if (!ImplD->isInvalidDecl())
Redecl = ImplD->getMethod(getSelector(), isInstanceMethod());
- } else if (ObjCCategoryDecl *CD = dyn_cast<ObjCCategoryDecl>(CtxD)) {
+ } else if (auto *CD = dyn_cast<ObjCCategoryDecl>(CtxD)) {
if (ObjCCategoryImplDecl *ImplD = Ctx.getObjCImplementation(CD))
if (!ImplD->isInvalidDecl())
Redecl = ImplD->getMethod(getSelector(), isInstanceMethod());
- } else if (ObjCImplementationDecl *ImplD =
- dyn_cast<ObjCImplementationDecl>(CtxD)) {
+ } else if (auto *ImplD = dyn_cast<ObjCImplementationDecl>(CtxD)) {
if (ObjCInterfaceDecl *IFD = ImplD->getClassInterface())
if (!IFD->isInvalidDecl())
Redecl = IFD->getMethod(getSelector(), isInstanceMethod());
- } else if (ObjCCategoryImplDecl *CImplD =
- dyn_cast<ObjCCategoryImplDecl>(CtxD)) {
+ } else if (auto *CImplD = dyn_cast<ObjCCategoryImplDecl>(CtxD)) {
if (ObjCCategoryDecl *CatD = CImplD->getCategoryDecl())
if (!CatD->isInvalidDecl())
Redecl = CatD->getMethod(getSelector(), isInstanceMethod());
@@ -908,15 +906,14 @@ ObjCMethodDecl *ObjCMethodDecl::getNextRedeclarationImpl() {
}
ObjCMethodDecl *ObjCMethodDecl::getCanonicalDecl() {
- Decl *CtxD = cast<Decl>(getDeclContext());
+ auto *CtxD = cast<Decl>(getDeclContext());
- if (ObjCImplementationDecl *ImplD = dyn_cast<ObjCImplementationDecl>(CtxD)) {
+ if (auto *ImplD = dyn_cast<ObjCImplementationDecl>(CtxD)) {
if (ObjCInterfaceDecl *IFD = ImplD->getClassInterface())
if (ObjCMethodDecl *MD = IFD->getMethod(getSelector(),
isInstanceMethod()))
return MD;
- } else if (ObjCCategoryImplDecl *CImplD =
- dyn_cast<ObjCCategoryImplDecl>(CtxD)) {
+ } else if (auto *CImplD = dyn_cast<ObjCCategoryImplDecl>(CtxD)) {
if (ObjCCategoryDecl *CatD = CImplD->getCategoryDecl())
if (ObjCMethodDecl *MD = CatD->getMethod(getSelector(),
isInstanceMethod()))
@@ -941,7 +938,7 @@ SourceLocation ObjCMethodDecl::getLocEnd() const {
}
ObjCMethodFamily ObjCMethodDecl::getMethodFamily() const {
- ObjCMethodFamily family = static_cast<ObjCMethodFamily>(Family);
+ auto family = static_cast<ObjCMethodFamily>(Family);
if (family != static_cast<unsigned>(InvalidObjCMethodFamily))
return family;
@@ -1099,11 +1096,11 @@ void ObjCMethodDecl::createImplicitParams(ASTContext &Context,
}
ObjCInterfaceDecl *ObjCMethodDecl::getClassInterface() {
- if (ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(getDeclContext()))
+ if (auto *ID = dyn_cast<ObjCInterfaceDecl>(getDeclContext()))
return ID;
- if (ObjCCategoryDecl *CD = dyn_cast<ObjCCategoryDecl>(getDeclContext()))
+ if (auto *CD = dyn_cast<ObjCCategoryDecl>(getDeclContext()))
return CD->getClassInterface();
- if (ObjCImplDecl *IMD = dyn_cast<ObjCImplDecl>(getDeclContext()))
+ if (auto *IMD = dyn_cast<ObjCImplDecl>(getDeclContext()))
return IMD->getClassInterface();
if (isa<ObjCProtocolDecl>(getDeclContext()))
return nullptr;
@@ -1138,11 +1135,10 @@ static void CollectOverriddenMethodsRecurse(const ObjCContainerDecl *Container,
if (!Container)
return;
- // In categories look for overriden methods from protocols. A method from
- // category is not "overriden" since it is considered as the "same" method
+ // In categories look for overridden methods from protocols. A method from
+ // category is not "overridden" since it is considered as the "same" method
// (same USR) as the one from the interface.
- if (const ObjCCategoryDecl *
- Category = dyn_cast<ObjCCategoryDecl>(Container)) {
+ if (const auto *Category = dyn_cast<ObjCCategoryDecl>(Container)) {
// Check whether we have a matching method at this category but only if we
// are at the super class level.
if (MovedToSuper)
@@ -1174,13 +1170,12 @@ static void CollectOverriddenMethodsRecurse(const ObjCContainerDecl *Container,
return;
}
- if (const ObjCProtocolDecl *Protocol = dyn_cast<ObjCProtocolDecl>(Container)){
+ if (const auto *Protocol = dyn_cast<ObjCProtocolDecl>(Container)){
for (const auto *P : Protocol->protocols())
CollectOverriddenMethodsRecurse(P, Method, Methods, MovedToSuper);
}
- if (const ObjCInterfaceDecl *
- Interface = dyn_cast<ObjCInterfaceDecl>(Container)) {
+ if (const auto *Interface = dyn_cast<ObjCInterfaceDecl>(Container)) {
for (const auto *P : Interface->protocols())
CollectOverriddenMethodsRecurse(P, Method, Methods, MovedToSuper);
@@ -1204,12 +1199,12 @@ static void collectOverriddenMethodsSlow(const ObjCMethodDecl *Method,
SmallVectorImpl<const ObjCMethodDecl *> &overridden) {
assert(Method->isOverriding());
- if (const ObjCProtocolDecl *
- ProtD = dyn_cast<ObjCProtocolDecl>(Method->getDeclContext())) {
+ if (const auto *ProtD =
+ dyn_cast<ObjCProtocolDecl>(Method->getDeclContext())) {
CollectOverriddenMethods(ProtD, Method, overridden);
- } else if (const ObjCImplDecl *
- IMD = dyn_cast<ObjCImplDecl>(Method->getDeclContext())) {
+ } else if (const auto *IMD =
+ dyn_cast<ObjCImplDecl>(Method->getDeclContext())) {
const ObjCInterfaceDecl *ID = IMD->getClassInterface();
if (!ID)
return;
@@ -1221,8 +1216,8 @@ static void collectOverriddenMethodsSlow(const ObjCMethodDecl *Method,
Method = IFaceMeth;
CollectOverriddenMethods(ID, Method, overridden);
- } else if (const ObjCCategoryDecl *
- CatD = dyn_cast<ObjCCategoryDecl>(Method->getDeclContext())) {
+ } else if (const auto *CatD =
+ dyn_cast<ObjCCategoryDecl>(Method->getDeclContext())) {
const ObjCInterfaceDecl *ID = CatD->getClassInterface();
if (!ID)
return;
@@ -1265,7 +1260,7 @@ ObjCMethodDecl::findPropertyDecl(bool CheckOverrides) const {
return nullptr;
if (isPropertyAccessor()) {
- const ObjCContainerDecl *Container = cast<ObjCContainerDecl>(getParent());
+ const auto *Container = cast<ObjCContainerDecl>(getParent());
bool IsGetter = (NumArgs == 0);
bool IsInstance = isInstanceMethod();
@@ -1328,11 +1323,9 @@ ObjCMethodDecl::findPropertyDecl(bool CheckOverrides) const {
OverridesTy Overrides;
getOverriddenMethods(Overrides);
- for (OverridesTy::const_iterator I = Overrides.begin(), E = Overrides.end();
- I != E; ++I) {
- if (const ObjCPropertyDecl *Prop = (*I)->findPropertyDecl(false))
+ for (const auto *Override : Overrides)
+ if (const ObjCPropertyDecl *Prop = Override->findPropertyDecl(false))
return Prop;
- }
return nullptr;
}
@@ -1422,7 +1415,7 @@ ObjCInterfaceDecl *ObjCInterfaceDecl::Create(const ASTContext &C,
ObjCInterfaceDecl *PrevDecl,
SourceLocation ClassLoc,
bool isInternal){
- ObjCInterfaceDecl *Result = new (C, DC)
+ auto *Result = new (C, DC)
ObjCInterfaceDecl(C, DC, atLoc, Id, typeParamList, ClassLoc, PrevDecl,
isInternal);
Result->Data.setInt(!C.getLangOpts().Modules);
@@ -1432,12 +1425,9 @@ ObjCInterfaceDecl *ObjCInterfaceDecl::Create(const ASTContext &C,
ObjCInterfaceDecl *ObjCInterfaceDecl::CreateDeserialized(const ASTContext &C,
unsigned ID) {
- ObjCInterfaceDecl *Result = new (C, ID) ObjCInterfaceDecl(C, nullptr,
- SourceLocation(),
- nullptr,
- nullptr,
- SourceLocation(),
- nullptr, false);
+ auto *Result = new (C, ID)
+ ObjCInterfaceDecl(C, nullptr, SourceLocation(), nullptr, nullptr,
+ SourceLocation(), nullptr, false);
Result->Data.setInt(!C.getLangOpts().Modules);
return Result;
}
@@ -1495,7 +1485,7 @@ bool ObjCInterfaceDecl::hasDesignatedInitializers() const {
StringRef
ObjCInterfaceDecl::getObjCRuntimeNameAsString() const {
- if (ObjCRuntimeNameAttr *ObjCRTName = getAttr<ObjCRuntimeNameAttr>())
+ if (const auto *ObjCRTName = getAttr<ObjCRuntimeNameAttr>())
return ObjCRTName->getMetadataName();
return getName();
@@ -1731,9 +1721,9 @@ ObjCIvarDecl *ObjCIvarDecl::Create(ASTContext &C, ObjCContainerDecl *DC,
"Invalid ivar decl context!");
// Once a new ivar is created in any of class/class-extension/implementation
// decl contexts, the previously built IvarList must be rebuilt.
- ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(DC);
+ auto *ID = dyn_cast<ObjCInterfaceDecl>(DC);
if (!ID) {
- if (ObjCImplementationDecl *IM = dyn_cast<ObjCImplementationDecl>(DC))
+ if (auto *IM = dyn_cast<ObjCImplementationDecl>(DC))
ID = IM->getClassInterface();
else
ID = cast<ObjCCategoryDecl>(DC)->getClassInterface();
@@ -1752,7 +1742,7 @@ ObjCIvarDecl *ObjCIvarDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
}
const ObjCInterfaceDecl *ObjCIvarDecl::getContainingInterface() const {
- const ObjCContainerDecl *DC = cast<ObjCContainerDecl>(getDeclContext());
+ const auto *DC = cast<ObjCContainerDecl>(getDeclContext());
switch (DC->getKind()) {
default:
@@ -1762,7 +1752,7 @@ const ObjCInterfaceDecl *ObjCIvarDecl::getContainingInterface() const {
// Ivars can only appear in class extension categories.
case ObjCCategory: {
- const ObjCCategoryDecl *CD = cast<ObjCCategoryDecl>(DC);
+ const auto *CD = cast<ObjCCategoryDecl>(DC);
assert(CD->IsClassExtension() && "invalid container for ivar!");
return CD->getClassInterface();
}
@@ -1822,7 +1812,7 @@ ObjCProtocolDecl *ObjCProtocolDecl::Create(ASTContext &C, DeclContext *DC,
SourceLocation nameLoc,
SourceLocation atStartLoc,
ObjCProtocolDecl *PrevDecl) {
- ObjCProtocolDecl *Result =
+ auto *Result =
new (C, DC) ObjCProtocolDecl(C, DC, Id, nameLoc, atStartLoc, PrevDecl);
Result->Data.setInt(!C.getLangOpts().Modules);
return Result;
@@ -1881,7 +1871,7 @@ void ObjCProtocolDecl::startDefinition() {
allocateDefinitionData();
// Update all of the declarations with a pointer to the definition.
- for (auto RD : redecls())
+ for (auto *RD : redecls())
RD->Data = this->Data;
}
@@ -1923,7 +1913,7 @@ void ObjCProtocolDecl::collectInheritedProtocolProperties(
StringRef
ObjCProtocolDecl::getObjCRuntimeNameAsString() const {
- if (ObjCRuntimeNameAttr *ObjCRTName = getAttr<ObjCRuntimeNameAttr>())
+ if (const auto *ObjCRTName = getAttr<ObjCRuntimeNameAttr>())
return ObjCRTName->getMetadataName();
return getName();
@@ -1957,7 +1947,7 @@ ObjCCategoryDecl *ObjCCategoryDecl::Create(ASTContext &C, DeclContext *DC,
ObjCTypeParamList *typeParamList,
SourceLocation IvarLBraceLoc,
SourceLocation IvarRBraceLoc) {
- ObjCCategoryDecl *CatDecl =
+ auto *CatDecl =
new (C, DC) ObjCCategoryDecl(DC, AtLoc, ClassNameLoc, CategoryNameLoc, Id,
IDecl, typeParamList, IvarLBraceLoc,
IvarRBraceLoc);
@@ -1995,11 +1985,10 @@ void ObjCCategoryDecl::setTypeParamList(ObjCTypeParamList *TPL) {
if (!TPL)
return;
// Set the declaration context of each of the type parameters.
- for (auto typeParam : *TypeParamList)
+ for (auto *typeParam : *TypeParamList)
typeParam->setDeclContext(this);
}
-
//===----------------------------------------------------------------------===//
// ObjCCategoryImplDecl
//===----------------------------------------------------------------------===//
@@ -2044,13 +2033,11 @@ void ObjCImplDecl::addPropertyImplementation(ObjCPropertyImplDecl *property) {
void ObjCImplDecl::setClassInterface(ObjCInterfaceDecl *IFace) {
ASTContext &Ctx = getASTContext();
- if (ObjCImplementationDecl *ImplD
- = dyn_cast_or_null<ObjCImplementationDecl>(this)) {
+ if (auto *ImplD = dyn_cast_or_null<ObjCImplementationDecl>(this)) {
if (IFace)
Ctx.setObjCImplementation(IFace, ImplD);
- } else if (ObjCCategoryImplDecl *ImplD =
- dyn_cast_or_null<ObjCCategoryImplDecl>(this)) {
+ } else if (auto *ImplD = dyn_cast_or_null<ObjCCategoryImplDecl>(this)) {
if (ObjCCategoryDecl *CD = IFace->FindCategoryDeclaration(getIdentifier()))
Ctx.setObjCImplementation(CD, ImplD);
}
@@ -2139,8 +2126,7 @@ void ObjCImplementationDecl::setIvarInitializers(ASTContext &C,
unsigned numInitializers) {
if (numInitializers > 0) {
NumIvarInitializers = numInitializers;
- CXXCtorInitializer **ivarInitializers =
- new (C) CXXCtorInitializer*[NumIvarInitializers];
+ auto **ivarInitializers = new (C) CXXCtorInitializer*[NumIvarInitializers];
memcpy(ivarInitializers, initializers,
numInitializers * sizeof(CXXCtorInitializer*));
IvarInitializers = ivarInitializers;
diff --git a/lib/AST/DeclOpenMP.cpp b/lib/AST/DeclOpenMP.cpp
index 95e44acca032..f5c3599ef6c6 100644
--- a/lib/AST/DeclOpenMP.cpp
+++ b/lib/AST/DeclOpenMP.cpp
@@ -7,7 +7,7 @@
//
//===----------------------------------------------------------------------===//
/// \file
-/// \brief This file implements OMPThreadPrivateDecl, OMPCapturedExprDecl
+/// This file implements OMPThreadPrivateDecl, OMPCapturedExprDecl
/// classes.
///
//===----------------------------------------------------------------------===//
@@ -92,13 +92,14 @@ void OMPCapturedExprDecl::anchor() {}
OMPCapturedExprDecl *OMPCapturedExprDecl::Create(ASTContext &C, DeclContext *DC,
IdentifierInfo *Id, QualType T,
SourceLocation StartLoc) {
- return new (C, DC) OMPCapturedExprDecl(C, DC, Id, T, StartLoc);
+ return new (C, DC) OMPCapturedExprDecl(
+ C, DC, Id, T, C.getTrivialTypeSourceInfo(T), StartLoc);
}
OMPCapturedExprDecl *OMPCapturedExprDecl::CreateDeserialized(ASTContext &C,
unsigned ID) {
- return new (C, ID)
- OMPCapturedExprDecl(C, nullptr, nullptr, QualType(), SourceLocation());
+ return new (C, ID) OMPCapturedExprDecl(C, nullptr, nullptr, QualType(),
+ /*TInfo=*/nullptr, SourceLocation());
}
SourceRange OMPCapturedExprDecl::getSourceRange() const {
diff --git a/lib/AST/DeclPrinter.cpp b/lib/AST/DeclPrinter.cpp
index b792c5920a55..d3d9c23cca6e 100644
--- a/lib/AST/DeclPrinter.cpp
+++ b/lib/AST/DeclPrinter.cpp
@@ -128,9 +128,7 @@ static QualType GetBaseType(QualType T) {
// FIXME: This should be on the Type class!
QualType BaseType = T;
while (!BaseType->isSpecifierType()) {
- if (isa<TypedefType>(BaseType))
- break;
- else if (const PointerType* PTy = BaseType->getAs<PointerType>())
+ if (const PointerType *PTy = BaseType->getAs<PointerType>())
BaseType = PTy->getPointeeType();
else if (const BlockPointerType *BPy = BaseType->getAs<BlockPointerType>())
BaseType = BPy->getPointeeType();
@@ -144,8 +142,11 @@ static QualType GetBaseType(QualType T) {
BaseType = RTy->getPointeeType();
else if (const AutoType *ATy = BaseType->getAs<AutoType>())
BaseType = ATy->getDeducedType();
+ else if (const ParenType *PTy = BaseType->getAs<ParenType>())
+ BaseType = PTy->desugar();
else
- llvm_unreachable("Unknown declarator!");
+ // This must be a syntax error.
+ break;
}
return BaseType;
}
@@ -214,6 +215,8 @@ void DeclPrinter::prettyPrintAttributes(Decl *D) {
if (D->hasAttrs()) {
AttrVec &Attrs = D->getAttrs();
for (auto *A : Attrs) {
+ if (A->isInherited() || A->isImplicit())
+ continue;
switch (A->getKind()) {
#define ATTR(X)
#define PRAGMA_SPELLING_ATTR(X) case attr::X:
@@ -372,21 +375,23 @@ void DeclPrinter::VisitDeclContext(DeclContext *DC, bool Indent) {
!isa<ClassTemplateSpecializationDecl>(DC))
continue;
- // The next bits of code handles stuff like "struct {int x;} a,b"; we're
+ // The next bits of code handle stuff like "struct {int x;} a,b"; we're
// forced to merge the declarations because there's no other way to
- // refer to the struct in question. This limited merging is safe without
- // a bunch of other checks because it only merges declarations directly
- // referring to the tag, not typedefs.
+ // refer to the struct in question. When that struct is named instead, we
+ // also need to merge to avoid splitting off a stand-alone struct
+ // declaration that produces the warning ext_no_declarators in some
+ // contexts.
+ //
+ // This limited merging is safe without a bunch of other checks because it
+ // only merges declarations directly referring to the tag, not typedefs.
//
// Check whether the current declaration should be grouped with a previous
- // unnamed struct.
+ // non-free-standing tag declaration.
QualType CurDeclType = getDeclType(*D);
if (!Decls.empty() && !CurDeclType.isNull()) {
QualType BaseType = GetBaseType(CurDeclType);
- if (!BaseType.isNull() && isa<ElaboratedType>(BaseType))
- BaseType = cast<ElaboratedType>(BaseType)->getNamedType();
- if (!BaseType.isNull() && isa<TagType>(BaseType) &&
- cast<TagType>(BaseType)->getDecl() == Decls[0]) {
+ if (!BaseType.isNull() && isa<ElaboratedType>(BaseType) &&
+ cast<ElaboratedType>(BaseType)->getOwnedTagDecl() == Decls[0]) {
Decls.push_back(*D);
continue;
}
@@ -396,9 +401,9 @@ void DeclPrinter::VisitDeclContext(DeclContext *DC, bool Indent) {
if (!Decls.empty())
ProcessDeclGroup(Decls);
- // If the current declaration is an unnamed tag type, save it
+ // If the current declaration is not a free standing declaration, save it
// so we can merge it with the subsequent declaration(s) using it.
- if (isa<TagDecl>(*D) && !cast<TagDecl>(*D)->getIdentifier()) {
+ if (isa<TagDecl>(*D) && !cast<TagDecl>(*D)->isFreeStanding()) {
Decls.push_back(*D);
continue;
}
@@ -495,14 +500,17 @@ void DeclPrinter::VisitTypeAliasDecl(TypeAliasDecl *D) {
void DeclPrinter::VisitEnumDecl(EnumDecl *D) {
if (!Policy.SuppressSpecifiers && D->isModulePrivate())
Out << "__module_private__ ";
- Out << "enum ";
+ Out << "enum";
if (D->isScoped()) {
if (D->isScopedUsingClassTag())
- Out << "class ";
+ Out << " class";
else
- Out << "struct ";
+ Out << " struct";
}
- Out << *D;
+
+ prettyPrintAttributes(D);
+
+ Out << ' ' << *D;
if (D->isFixed() && D->getASTContext().getLangOpts().CPlusPlus11)
Out << " : " << D->getIntegerType().stream(Policy);
@@ -512,7 +520,6 @@ void DeclPrinter::VisitEnumDecl(EnumDecl *D) {
VisitDeclContext(D);
Indent() << "}";
}
- prettyPrintAttributes(D);
}
void DeclPrinter::VisitRecordDecl(RecordDecl *D) {
@@ -669,7 +676,7 @@ void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) {
Proto += ")";
} else if (FT && isNoexceptExceptionSpec(FT->getExceptionSpecType())) {
Proto += " noexcept";
- if (FT->getExceptionSpecType() == EST_ComputedNoexcept) {
+ if (isComputedNoexcept(FT->getExceptionSpecType())) {
Proto += "(";
llvm::raw_string_ostream EOut(Proto);
FT->getNoexceptExpr()->printPretty(EOut, nullptr, SubPolicy,
@@ -1526,7 +1533,7 @@ void DeclPrinter::VisitOMPThreadPrivateDecl(OMPThreadPrivateDecl *D) {
E = D->varlist_end();
I != E; ++I) {
Out << (I == D->varlist_begin() ? '(' : ',');
- NamedDecl *ND = cast<NamedDecl>(cast<DeclRefExpr>(*I)->getDecl());
+ NamedDecl *ND = cast<DeclRefExpr>(*I)->getDecl();
ND->printQualifiedName(Out);
}
Out << ")";
diff --git a/lib/AST/DeclTemplate.cpp b/lib/AST/DeclTemplate.cpp
index a7949b310cef..8854f7879ac6 100644
--- a/lib/AST/DeclTemplate.cpp
+++ b/lib/AST/DeclTemplate.cpp
@@ -17,6 +17,7 @@
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/Expr.h"
+#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/TemplateBase.h"
#include "clang/AST/TemplateName.h"
#include "clang/AST/Type.h"
@@ -56,11 +57,11 @@ TemplateParameterList::TemplateParameterList(SourceLocation TemplateLoc,
begin()[Idx] = P;
if (!P->isTemplateParameterPack()) {
- if (NonTypeTemplateParmDecl *NTTP = dyn_cast<NonTypeTemplateParmDecl>(P))
+ if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(P))
if (NTTP->getType()->containsUnexpandedParameterPack())
ContainsUnexpandedParameterPack = true;
- if (TemplateTemplateParmDecl *TTP = dyn_cast<TemplateTemplateParmDecl>(P))
+ if (const auto *TTP = dyn_cast<TemplateTemplateParmDecl>(P))
if (TTP->getTemplateParameters()->containsUnexpandedParameterPack())
ContainsUnexpandedParameterPack = true;
@@ -118,11 +119,9 @@ unsigned TemplateParameterList::getDepth() const {
return 0;
const NamedDecl *FirstParm = getParam(0);
- if (const TemplateTypeParmDecl *TTP
- = dyn_cast<TemplateTypeParmDecl>(FirstParm))
+ if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(FirstParm))
return TTP->getDepth();
- else if (const NonTypeTemplateParmDecl *NTTP
- = dyn_cast<NonTypeTemplateParmDecl>(FirstParm))
+ else if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(FirstParm))
return NTTP->getDepth();
else
return cast<TemplateTemplateParmDecl>(FirstParm)->getDepth();
@@ -133,7 +132,7 @@ static void AdoptTemplateParameterList(TemplateParameterList *Params,
for (NamedDecl *P : *Params) {
P->setDeclContext(Owner);
- if (auto *TTP = dyn_cast<TemplateTemplateParmDecl>(P))
+ if (const auto *TTP = dyn_cast<TemplateTemplateParmDecl>(P))
AdoptTemplateParameterList(TTP->getTemplateParameters(), Owner);
}
}
@@ -258,7 +257,7 @@ FunctionTemplateDecl *FunctionTemplateDecl::CreateDeserialized(ASTContext &C,
RedeclarableTemplateDecl::CommonBase *
FunctionTemplateDecl::newCommon(ASTContext &C) const {
- Common *CommonPtr = new (C) Common;
+ auto *CommonPtr = new (C) Common;
C.addDestruction(CommonPtr);
return CommonPtr;
}
@@ -318,8 +317,8 @@ ClassTemplateDecl *ClassTemplateDecl::Create(ASTContext &C,
return new (C, DC) ClassTemplateDecl(C, DC, L, Name, Params, Decl);
}
- ConstrainedTemplateDeclInfo *const CTDI = new (C) ConstrainedTemplateDeclInfo;
- ClassTemplateDecl *const New =
+ auto *const CTDI = new (C) ConstrainedTemplateDeclInfo;
+ auto *const New =
new (C, DC) ClassTemplateDecl(CTDI, C, DC, L, Name, Params, Decl);
New->setAssociatedConstraints(AssociatedConstraints);
return New;
@@ -349,7 +348,7 @@ ClassTemplateDecl::getPartialSpecializations() {
RedeclarableTemplateDecl::CommonBase *
ClassTemplateDecl::newCommon(ASTContext &C) const {
- Common *CommonPtr = new (C) Common;
+ auto *CommonPtr = new (C) Common;
C.addDestruction(CommonPtr);
return CommonPtr;
}
@@ -453,8 +452,8 @@ TemplateTypeParmDecl::Create(const ASTContext &C, DeclContext *DC,
SourceLocation KeyLoc, SourceLocation NameLoc,
unsigned D, unsigned P, IdentifierInfo *Id,
bool Typename, bool ParameterPack) {
- TemplateTypeParmDecl *TTPDecl =
- new (C, DC) TemplateTypeParmDecl(DC, KeyLoc, NameLoc, Id, Typename);
+ auto *TTPDecl =
+ new (C, DC) TemplateTypeParmDecl(DC, KeyLoc, NameLoc, Id, Typename);
QualType TTPType = C.getTemplateTypeParmType(D, P, ParameterPack, TTPDecl);
TTPDecl->setTypeForDecl(TTPType.getTypePtr());
return TTPDecl;
@@ -709,7 +708,7 @@ ClassTemplateSpecializationDecl::Create(ASTContext &Context, TagKind TK,
ClassTemplateDecl *SpecializedTemplate,
ArrayRef<TemplateArgument> Args,
ClassTemplateSpecializationDecl *PrevDecl) {
- ClassTemplateSpecializationDecl *Result =
+ auto *Result =
new (Context, DC) ClassTemplateSpecializationDecl(
Context, ClassTemplateSpecialization, TK, DC, StartLoc, IdLoc,
SpecializedTemplate, Args, PrevDecl);
@@ -722,7 +721,7 @@ ClassTemplateSpecializationDecl::Create(ASTContext &Context, TagKind TK,
ClassTemplateSpecializationDecl *
ClassTemplateSpecializationDecl::CreateDeserialized(ASTContext &C,
unsigned ID) {
- ClassTemplateSpecializationDecl *Result =
+ auto *Result =
new (C, ID) ClassTemplateSpecializationDecl(C, ClassTemplateSpecialization);
Result->MayHaveOutOfDateDef = false;
return Result;
@@ -732,7 +731,7 @@ void ClassTemplateSpecializationDecl::getNameForDiagnostic(
raw_ostream &OS, const PrintingPolicy &Policy, bool Qualified) const {
NamedDecl::getNameForDiagnostic(OS, Policy, Qualified);
- auto *PS = dyn_cast<ClassTemplatePartialSpecializationDecl>(this);
+ const auto *PS = dyn_cast<ClassTemplatePartialSpecializationDecl>(this);
if (const ASTTemplateArgumentListInfo *ArgsAsWritten =
PS ? PS->getTemplateArgsAsWritten() : nullptr) {
printTemplateArgumentList(OS, ArgsAsWritten->arguments(), Policy);
@@ -744,8 +743,8 @@ void ClassTemplateSpecializationDecl::getNameForDiagnostic(
ClassTemplateDecl *
ClassTemplateSpecializationDecl::getSpecializedTemplate() const {
- if (SpecializedPartialSpecialization *PartialSpec
- = SpecializedTemplate.dyn_cast<SpecializedPartialSpecialization*>())
+ if (const auto *PartialSpec =
+ SpecializedTemplate.dyn_cast<SpecializedPartialSpecialization*>())
return PartialSpec->PartialSpecialization->getSpecializedTemplate();
return SpecializedTemplate.get<ClassTemplateDecl*>();
}
@@ -770,7 +769,7 @@ ClassTemplateSpecializationDecl::getSourceRange() const {
// uses ExplicitInfo to record the TypeAsWritten, but the source
// locations should be retrieved from the instantiation pattern.
using CTPSDecl = ClassTemplatePartialSpecializationDecl;
- CTPSDecl *ctpsd = const_cast<CTPSDecl*>(cast<CTPSDecl>(this));
+ auto *ctpsd = const_cast<CTPSDecl *>(cast<CTPSDecl>(this));
CTPSDecl *inst_from = ctpsd->getInstantiatedFromMember();
assert(inst_from != nullptr);
return inst_from->getSourceRange();
@@ -782,9 +781,9 @@ ClassTemplateSpecializationDecl::getSourceRange() const {
inst_from = getInstantiatedFrom();
if (inst_from.isNull())
return getSpecializedTemplate()->getSourceRange();
- if (ClassTemplateDecl *ctd = inst_from.dyn_cast<ClassTemplateDecl*>())
+ if (const auto *ctd = inst_from.dyn_cast<ClassTemplateDecl *>())
return ctd->getSourceRange();
- return inst_from.get<ClassTemplatePartialSpecializationDecl*>()
+ return inst_from.get<ClassTemplatePartialSpecializationDecl *>()
->getSourceRange();
}
}
@@ -826,7 +825,7 @@ Create(ASTContext &Context, TagKind TK,DeclContext *DC,
const ASTTemplateArgumentListInfo *ASTArgInfos =
ASTTemplateArgumentListInfo::Create(Context, ArgInfos);
- ClassTemplatePartialSpecializationDecl *Result = new (Context, DC)
+ auto *Result = new (Context, DC)
ClassTemplatePartialSpecializationDecl(Context, TK, DC, StartLoc, IdLoc,
Params, SpecializedTemplate, Args,
ASTArgInfos, PrevDecl);
@@ -840,8 +839,7 @@ Create(ASTContext &Context, TagKind TK,DeclContext *DC,
ClassTemplatePartialSpecializationDecl *
ClassTemplatePartialSpecializationDecl::CreateDeserialized(ASTContext &C,
unsigned ID) {
- ClassTemplatePartialSpecializationDecl *Result =
- new (C, ID) ClassTemplatePartialSpecializationDecl(C);
+ auto *Result = new (C, ID) ClassTemplatePartialSpecializationDecl(C);
Result->MayHaveOutOfDateDef = false;
return Result;
}
@@ -887,7 +885,7 @@ TypeAliasTemplateDecl *TypeAliasTemplateDecl::CreateDeserialized(ASTContext &C,
RedeclarableTemplateDecl::CommonBase *
TypeAliasTemplateDecl::newCommon(ASTContext &C) const {
- Common *CommonPtr = new (C) Common;
+ auto *CommonPtr = new (C) Common;
C.addDestruction(CommonPtr);
return CommonPtr;
}
@@ -950,7 +948,7 @@ VarTemplateDecl::getPartialSpecializations() {
RedeclarableTemplateDecl::CommonBase *
VarTemplateDecl::newCommon(ASTContext &C) const {
- Common *CommonPtr = new (C) Common;
+ auto *CommonPtr = new (C) Common;
C.addDestruction(CommonPtr);
return CommonPtr;
}
@@ -1048,7 +1046,7 @@ void VarTemplateSpecializationDecl::getNameForDiagnostic(
raw_ostream &OS, const PrintingPolicy &Policy, bool Qualified) const {
NamedDecl::getNameForDiagnostic(OS, Policy, Qualified);
- auto *PS = dyn_cast<VarTemplatePartialSpecializationDecl>(this);
+ const auto *PS = dyn_cast<VarTemplatePartialSpecializationDecl>(this);
if (const ASTTemplateArgumentListInfo *ArgsAsWritten =
PS ? PS->getTemplateArgsAsWritten() : nullptr) {
printTemplateArgumentList(OS, ArgsAsWritten->arguments(), Policy);
@@ -1059,7 +1057,7 @@ void VarTemplateSpecializationDecl::getNameForDiagnostic(
}
VarTemplateDecl *VarTemplateSpecializationDecl::getSpecializedTemplate() const {
- if (SpecializedPartialSpecialization *PartialSpec =
+ if (const auto *PartialSpec =
SpecializedTemplate.dyn_cast<SpecializedPartialSpecialization *>())
return PartialSpec->PartialSpecialization->getSpecializedTemplate();
return SpecializedTemplate.get<VarTemplateDecl *>();
@@ -1104,7 +1102,7 @@ VarTemplatePartialSpecializationDecl::Create(
const ASTTemplateArgumentListInfo *ASTArgInfos
= ASTTemplateArgumentListInfo::Create(Context, ArgInfos);
- VarTemplatePartialSpecializationDecl *Result =
+ auto *Result =
new (Context, DC) VarTemplatePartialSpecializationDecl(
Context, DC, StartLoc, IdLoc, Params, SpecializedTemplate, T, TInfo,
S, Args, ASTArgInfos);
diff --git a/lib/AST/Expr.cpp b/lib/AST/Expr.cpp
index 7ddab9356b54..193efa4e097d 100644
--- a/lib/AST/Expr.cpp
+++ b/lib/AST/Expr.cpp
@@ -108,7 +108,7 @@ const Expr *Expr::skipRValueSubobjectAdjustments(
}
}
} else if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) {
- if (BO->isPtrMemOp()) {
+ if (BO->getOpcode() == BO_PtrMemD) {
assert(BO->getRHS()->isRValue());
E = BO->getLHS();
const MemberPointerType *MPT =
@@ -230,7 +230,7 @@ SourceLocation Expr::getExprLoc() const {
// Primary Expressions.
//===----------------------------------------------------------------------===//
-/// \brief Compute the type-, value-, and instantiation-dependence of a
+/// Compute the type-, value-, and instantiation-dependence of a
/// declaration reference
/// based on the declaration being referenced.
static void computeDeclRefDependence(const ASTContext &Ctx, NamedDecl *D,
@@ -484,6 +484,8 @@ StringRef PredefinedExpr::getIdentTypeName(PredefinedExpr::IdentType IT) {
return "__PRETTY_FUNCTION__";
case FuncSig:
return "__FUNCSIG__";
+ case LFuncSig:
+ return "L__FUNCSIG__";
case PrettyFunctionNoVirtual:
break;
}
@@ -536,7 +538,8 @@ std::string PredefinedExpr::ComputeName(IdentType IT, const Decl *CurrentDecl) {
return Out.str();
}
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CurrentDecl)) {
- if (IT != PrettyFunction && IT != PrettyFunctionNoVirtual && IT != FuncSig)
+ if (IT != PrettyFunction && IT != PrettyFunctionNoVirtual &&
+ IT != FuncSig && IT != LFuncSig)
return FD->getNameAsString();
SmallString<256> Name;
@@ -561,7 +564,7 @@ std::string PredefinedExpr::ComputeName(IdentType IT, const Decl *CurrentDecl) {
if (FD->hasWrittenPrototype())
FT = dyn_cast<FunctionProtoType>(AFT);
- if (IT == FuncSig) {
+ if (IT == FuncSig || IT == LFuncSig) {
switch (AFT->getCallConv()) {
case CC_C: POut << "__cdecl "; break;
case CC_X86StdCall: POut << "__stdcall "; break;
@@ -586,7 +589,8 @@ std::string PredefinedExpr::ComputeName(IdentType IT, const Decl *CurrentDecl) {
if (FT->isVariadic()) {
if (FD->getNumParams()) POut << ", ";
POut << "...";
- } else if ((IT == FuncSig || !Context.getLangOpts().CPlusPlus) &&
+ } else if ((IT == FuncSig || IT == LFuncSig ||
+ !Context.getLangOpts().CPlusPlus) &&
!Decl->getNumParams()) {
POut << "void";
}
@@ -755,6 +759,36 @@ IntegerLiteral::Create(const ASTContext &C, EmptyShell Empty) {
return new (C) IntegerLiteral(Empty);
}
+FixedPointLiteral::FixedPointLiteral(const ASTContext &C, const llvm::APInt &V,
+ QualType type, SourceLocation l,
+ unsigned Scale)
+ : Expr(FixedPointLiteralClass, type, VK_RValue, OK_Ordinary, false, false,
+ false, false),
+ Loc(l), Scale(Scale) {
+ assert(type->isFixedPointType() && "Illegal type in FixedPointLiteral");
+ assert(V.getBitWidth() == C.getTypeInfo(type).Width &&
+ "Fixed point type is not the correct size for constant.");
+ setValue(C, V);
+}
+
+FixedPointLiteral *FixedPointLiteral::CreateFromRawInt(const ASTContext &C,
+ const llvm::APInt &V,
+ QualType type,
+ SourceLocation l,
+ unsigned Scale) {
+ return new (C) FixedPointLiteral(C, V, type, l, Scale);
+}
+
+std::string FixedPointLiteral::getValueAsString(unsigned Radix) const {
+ // Currently the longest decimal number that can be printed is the max for an
+ // unsigned long _Accum: 4294967295.99999999976716935634613037109375
+ // which is 43 characters.
+ SmallString<64> S;
+ FixedPointValueToString(
+ S, llvm::APSInt::getUnsigned(getValue().getZExtValue()), Scale, Radix);
+ return S.str();
+}
+
FloatingLiteral::FloatingLiteral(const ASTContext &C, const llvm::APFloat &V,
bool isexact, QualType Type, SourceLocation L)
: Expr(FloatingLiteralClass, Type, VK_RValue, OK_Ordinary, false, false,
@@ -881,7 +915,8 @@ StringLiteral *StringLiteral::CreateEmpty(const ASTContext &C,
void *Mem =
C.Allocate(sizeof(StringLiteral) + sizeof(SourceLocation) * (NumStrs - 1),
alignof(StringLiteral));
- StringLiteral *SL = new (Mem) StringLiteral(QualType());
+ StringLiteral *SL =
+ new (Mem) StringLiteral(C.adjustStringLiteralBaseType(QualType()));
SL->CharByteWidth = 0;
SL->Length = 0;
SL->NumConcatenated = NumStrs;
@@ -1633,8 +1668,8 @@ bool CastExpr::CastConsistency() const {
return true;
}
-const char *CastExpr::getCastKindName() const {
- switch (getCastKind()) {
+const char *CastExpr::getCastKindName(CastKind CK) {
+ switch (CK) {
#define CAST_OPERATION(Name) case CK_##Name: return #Name;
#include "clang/AST/OperationKinds.def"
}
@@ -1642,23 +1677,22 @@ const char *CastExpr::getCastKindName() const {
}
namespace {
- Expr *skipImplicitTemporary(Expr *expr) {
+ const Expr *skipImplicitTemporary(const Expr *E) {
// Skip through reference binding to temporary.
- if (MaterializeTemporaryExpr *Materialize
- = dyn_cast<MaterializeTemporaryExpr>(expr))
- expr = Materialize->GetTemporaryExpr();
+ if (auto *Materialize = dyn_cast<MaterializeTemporaryExpr>(E))
+ E = Materialize->GetTemporaryExpr();
// Skip any temporary bindings; they're implicit.
- if (CXXBindTemporaryExpr *Binder = dyn_cast<CXXBindTemporaryExpr>(expr))
- expr = Binder->getSubExpr();
+ if (auto *Binder = dyn_cast<CXXBindTemporaryExpr>(E))
+ E = Binder->getSubExpr();
- return expr;
+ return E;
}
}
Expr *CastExpr::getSubExprAsWritten() {
- Expr *SubExpr = nullptr;
- CastExpr *E = this;
+ const Expr *SubExpr = nullptr;
+ const CastExpr *E = this;
do {
SubExpr = skipImplicitTemporary(E->getSubExpr());
@@ -1671,15 +1705,33 @@ Expr *CastExpr::getSubExprAsWritten() {
assert((isa<CXXMemberCallExpr>(SubExpr) ||
isa<BlockExpr>(SubExpr)) &&
"Unexpected SubExpr for CK_UserDefinedConversion.");
- if (isa<CXXMemberCallExpr>(SubExpr))
- SubExpr = cast<CXXMemberCallExpr>(SubExpr)->getImplicitObjectArgument();
+ if (auto *MCE = dyn_cast<CXXMemberCallExpr>(SubExpr))
+ SubExpr = MCE->getImplicitObjectArgument();
}
// If the subexpression we're left with is an implicit cast, look
// through that, too.
} while ((E = dyn_cast<ImplicitCastExpr>(SubExpr)));
- return SubExpr;
+ return const_cast<Expr*>(SubExpr);
+}
+
+NamedDecl *CastExpr::getConversionFunction() const {
+ const Expr *SubExpr = nullptr;
+
+ for (const CastExpr *E = this; E; E = dyn_cast<ImplicitCastExpr>(SubExpr)) {
+ SubExpr = skipImplicitTemporary(E->getSubExpr());
+
+ if (E->getCastKind() == CK_ConstructorConversion)
+ return cast<CXXConstructExpr>(SubExpr)->getConstructor();
+
+ if (E->getCastKind() == CK_UserDefinedConversion) {
+ if (auto *MCE = dyn_cast<CXXMemberCallExpr>(SubExpr))
+ return MCE->getMethodDecl();
+ }
+ }
+
+ return nullptr;
}
CXXBaseSpecifier **CastExpr::path_buffer() {
@@ -2049,6 +2101,10 @@ bool Expr::isUnusedResultAWarning(const Expr *&WarnE, SourceLocation &Loc,
case GenericSelectionExprClass:
return cast<GenericSelectionExpr>(this)->getResultExpr()->
isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx);
+ case CoawaitExprClass:
+ case CoyieldExprClass:
+ return cast<CoroutineSuspendExpr>(this)->getResumeExpr()->
+ isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx);
case ChooseExprClass:
return cast<ChooseExpr>(this)->getChosenSubExpr()->
isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx);
@@ -2628,7 +2684,7 @@ bool Expr::isDefaultArgument() const {
return isa<CXXDefaultArgExpr>(E);
}
-/// \brief Skip over any no-op casts and any temporary-binding
+/// Skip over any no-op casts and any temporary-binding
/// expressions.
static const Expr *skipTemporaryBindingsNoOpCastsAndParens(const Expr *E) {
if (const MaterializeTemporaryExpr *M = dyn_cast<MaterializeTemporaryExpr>(E))
@@ -2917,8 +2973,20 @@ bool Expr::isConstantInitializer(ASTContext &Ctx, bool IsForRef,
return false;
}
+bool CallExpr::isBuiltinAssumeFalse(const ASTContext &Ctx) const {
+ const FunctionDecl* FD = getDirectCallee();
+ if (!FD || (FD->getBuiltinID() != Builtin::BI__assume &&
+ FD->getBuiltinID() != Builtin::BI__builtin_assume))
+ return false;
+
+ const Expr* Arg = getArg(0);
+ bool ArgVal;
+ return !Arg->isValueDependent() &&
+ Arg->EvaluateAsBooleanCondition(ArgVal, Ctx) && !ArgVal;
+}
+
namespace {
- /// \brief Look for any side effects within a Stmt.
+ /// Look for any side effects within a Stmt.
class SideEffectFinder : public ConstEvaluatedExprVisitor<SideEffectFinder> {
typedef ConstEvaluatedExprVisitor<SideEffectFinder> Inherited;
const bool IncludePossibleEffects;
@@ -2974,6 +3042,7 @@ bool Expr::HasSideEffects(const ASTContext &Ctx,
case ObjCIvarRefExprClass:
case PredefinedExprClass:
case IntegerLiteralClass:
+ case FixedPointLiteralClass:
case FloatingLiteralClass:
case ImaginaryLiteralClass:
case StringLiteralClass:
@@ -3214,7 +3283,7 @@ bool Expr::HasSideEffects(const ASTContext &Ctx,
}
namespace {
- /// \brief Look for a call to a non-trivial function within an expression.
+ /// Look for a call to a non-trivial function within an expression.
class NonTrivialCallFinder : public ConstEvaluatedExprVisitor<NonTrivialCallFinder>
{
typedef ConstEvaluatedExprVisitor<NonTrivialCallFinder> Inherited;
@@ -3390,7 +3459,7 @@ Expr::isNullPointerConstant(ASTContext &Ctx,
return NPCK_ZeroExpression;
}
-/// \brief If this expression is an l-value for an Objective C
+/// If this expression is an l-value for an Objective C
/// property, find the underlying property reference expression.
const ObjCPropertyRefExpr *Expr::getObjCProperty() const {
const Expr *E = this;
@@ -3446,10 +3515,11 @@ FieldDecl *Expr::getSourceBitField() {
if (Field->isBitField())
return Field;
- if (ObjCIvarRefExpr *IvarRef = dyn_cast<ObjCIvarRefExpr>(E))
- if (FieldDecl *Ivar = dyn_cast<FieldDecl>(IvarRef->getDecl()))
- if (Ivar->isBitField())
- return Ivar;
+ if (ObjCIvarRefExpr *IvarRef = dyn_cast<ObjCIvarRefExpr>(E)) {
+ FieldDecl *Ivar = IvarRef->getDecl();
+ if (Ivar->isBitField())
+ return Ivar;
+ }
if (DeclRefExpr *DeclRef = dyn_cast<DeclRefExpr>(E)) {
if (FieldDecl *Field = dyn_cast<FieldDecl>(DeclRef->getDecl()))
@@ -3813,7 +3883,7 @@ Expr *DesignatedInitExpr::getArrayRangeEnd(const Designator &D) const {
return getSubExpr(D.ArrayOrRange.Index + 2);
}
-/// \brief Replaces the designator at index @p Idx with the series
+/// Replaces the designator at index @p Idx with the series
/// of designators in [First, Last).
void DesignatedInitExpr::ExpandDesignator(const ASTContext &C, unsigned Idx,
const Designator *First,
@@ -4034,6 +4104,8 @@ unsigned AtomicExpr::getNumSubExprs(AtomicOp Op) {
case AO__atomic_or_fetch:
case AO__atomic_xor_fetch:
case AO__atomic_nand_fetch:
+ case AO__atomic_fetch_min:
+ case AO__atomic_fetch_max:
return 3;
case AO__opencl_atomic_store:
diff --git a/lib/AST/ExprCXX.cpp b/lib/AST/ExprCXX.cpp
index a0d611381123..3a204c244f68 100644
--- a/lib/AST/ExprCXX.cpp
+++ b/lib/AST/ExprCXX.cpp
@@ -169,8 +169,8 @@ void CXXNewExpr::AllocateArgsArray(const ASTContext &C, bool isArray,
}
bool CXXNewExpr::shouldNullCheckAllocation(const ASTContext &Ctx) const {
- return getOperatorNew()->getType()->castAs<FunctionProtoType>()->isNothrow(
- Ctx) &&
+ return getOperatorNew()->getType()->castAs<FunctionProtoType>()
+ ->isNothrow() &&
!getOperatorNew()->isReservedGlobalPlacementOperator();
}
@@ -181,7 +181,7 @@ QualType CXXDeleteExpr::getDestroyedType() const {
// For a destroying operator delete, we may have implicitly converted the
// pointer type to the type of the parameter of the 'operator delete'
// function.
- while (auto *ICE = dyn_cast<ImplicitCastExpr>(Arg)) {
+ while (const auto *ICE = dyn_cast<ImplicitCastExpr>(Arg)) {
if (ICE->getCastKind() == CK_DerivedToBase ||
ICE->getCastKind() == CK_UncheckedDerivedToBase ||
ICE->getCastKind() == CK_NoOp) {
@@ -290,7 +290,7 @@ UnresolvedLookupExpr::CreateEmpty(const ASTContext &C,
totalSizeToAlloc<ASTTemplateKWAndArgsInfo, TemplateArgumentLoc>(
HasTemplateKWAndArgsInfo, NumTemplateArgs);
void *Mem = C.Allocate(Size, alignof(UnresolvedLookupExpr));
- UnresolvedLookupExpr *E = new (Mem) UnresolvedLookupExpr(EmptyShell());
+ auto *E = new (Mem) UnresolvedLookupExpr(EmptyShell());
E->HasTemplateKWAndArgsInfo = HasTemplateKWAndArgsInfo;
return E;
}
@@ -442,8 +442,8 @@ DependentScopeDeclRefExpr::CreateEmpty(const ASTContext &C,
totalSizeToAlloc<ASTTemplateKWAndArgsInfo, TemplateArgumentLoc>(
HasTemplateKWAndArgsInfo, NumTemplateArgs);
void *Mem = C.Allocate(Size);
- DependentScopeDeclRefExpr *E
- = new (Mem) DependentScopeDeclRefExpr(QualType(), NestedNameSpecifierLoc(),
+ auto *E =
+ new (Mem) DependentScopeDeclRefExpr(QualType(), NestedNameSpecifierLoc(),
SourceLocation(),
DeclarationNameInfo(), nullptr);
E->HasTemplateKWAndArgsInfo = HasTemplateKWAndArgsInfo;
@@ -504,9 +504,9 @@ SourceRange CXXOperatorCallExpr::getSourceRangeImpl() const {
Expr *CXXMemberCallExpr::getImplicitObjectArgument() const {
const Expr *Callee = getCallee()->IgnoreParens();
- if (const MemberExpr *MemExpr = dyn_cast<MemberExpr>(Callee))
+ if (const auto *MemExpr = dyn_cast<MemberExpr>(Callee))
return MemExpr->getBase();
- if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(Callee))
+ if (const auto *BO = dyn_cast<BinaryOperator>(Callee))
if (BO->getOpcode() == BO_PtrMemD || BO->getOpcode() == BO_PtrMemI)
return BO->getLHS();
@@ -515,8 +515,7 @@ Expr *CXXMemberCallExpr::getImplicitObjectArgument() const {
}
CXXMethodDecl *CXXMemberCallExpr::getMethodDecl() const {
- if (const MemberExpr *MemExpr =
- dyn_cast<MemberExpr>(getCallee()->IgnoreParens()))
+ if (const auto *MemExpr = dyn_cast<MemberExpr>(getCallee()->IgnoreParens()))
return cast<CXXMethodDecl>(MemExpr->getMemberDecl());
// FIXME: Will eventually need to cope with member pointers.
@@ -561,9 +560,9 @@ CXXStaticCastExpr *CXXStaticCastExpr::Create(const ASTContext &C, QualType T,
SourceRange AngleBrackets) {
unsigned PathSize = (BasePath ? BasePath->size() : 0);
void *Buffer = C.Allocate(totalSizeToAlloc<CXXBaseSpecifier *>(PathSize));
- CXXStaticCastExpr *E =
- new (Buffer) CXXStaticCastExpr(T, VK, K, Op, PathSize, WrittenTy, L,
- RParenLoc, AngleBrackets);
+ auto *E =
+ new (Buffer) CXXStaticCastExpr(T, VK, K, Op, PathSize, WrittenTy, L,
+ RParenLoc, AngleBrackets);
if (PathSize)
std::uninitialized_copy_n(BasePath->data(), BasePath->size(),
E->getTrailingObjects<CXXBaseSpecifier *>());
@@ -586,9 +585,9 @@ CXXDynamicCastExpr *CXXDynamicCastExpr::Create(const ASTContext &C, QualType T,
SourceRange AngleBrackets) {
unsigned PathSize = (BasePath ? BasePath->size() : 0);
void *Buffer = C.Allocate(totalSizeToAlloc<CXXBaseSpecifier *>(PathSize));
- CXXDynamicCastExpr *E =
- new (Buffer) CXXDynamicCastExpr(T, VK, K, Op, PathSize, WrittenTy, L,
- RParenLoc, AngleBrackets);
+ auto *E =
+ new (Buffer) CXXDynamicCastExpr(T, VK, K, Op, PathSize, WrittenTy, L,
+ RParenLoc, AngleBrackets);
if (PathSize)
std::uninitialized_copy_n(BasePath->data(), BasePath->size(),
E->getTrailingObjects<CXXBaseSpecifier *>());
@@ -614,7 +613,7 @@ bool CXXDynamicCastExpr::isAlwaysNull() const
QualType SrcType = getSubExpr()->getType();
QualType DestType = getType();
- if (const PointerType *SrcPTy = SrcType->getAs<PointerType>()) {
+ if (const auto *SrcPTy = SrcType->getAs<PointerType>()) {
SrcType = SrcPTy->getPointeeType();
DestType = DestType->castAs<PointerType>()->getPointeeType();
}
@@ -622,14 +621,14 @@ bool CXXDynamicCastExpr::isAlwaysNull() const
if (DestType->isVoidType())
return false;
- const CXXRecordDecl *SrcRD =
- cast<CXXRecordDecl>(SrcType->castAs<RecordType>()->getDecl());
+ const auto *SrcRD =
+ cast<CXXRecordDecl>(SrcType->castAs<RecordType>()->getDecl());
if (!SrcRD->hasAttr<FinalAttr>())
return false;
- const CXXRecordDecl *DestRD =
- cast<CXXRecordDecl>(DestType->castAs<RecordType>()->getDecl());
+ const auto *DestRD =
+ cast<CXXRecordDecl>(DestType->castAs<RecordType>()->getDecl());
return !DestRD->isDerivedFrom(SrcRD);
}
@@ -643,9 +642,9 @@ CXXReinterpretCastExpr::Create(const ASTContext &C, QualType T,
SourceRange AngleBrackets) {
unsigned PathSize = (BasePath ? BasePath->size() : 0);
void *Buffer = C.Allocate(totalSizeToAlloc<CXXBaseSpecifier *>(PathSize));
- CXXReinterpretCastExpr *E =
- new (Buffer) CXXReinterpretCastExpr(T, VK, K, Op, PathSize, WrittenTy, L,
- RParenLoc, AngleBrackets);
+ auto *E =
+ new (Buffer) CXXReinterpretCastExpr(T, VK, K, Op, PathSize, WrittenTy, L,
+ RParenLoc, AngleBrackets);
if (PathSize)
std::uninitialized_copy_n(BasePath->data(), BasePath->size(),
E->getTrailingObjects<CXXBaseSpecifier *>());
@@ -678,8 +677,8 @@ CXXFunctionalCastExpr::Create(const ASTContext &C, QualType T, ExprValueKind VK,
SourceLocation L, SourceLocation R) {
unsigned PathSize = (BasePath ? BasePath->size() : 0);
void *Buffer = C.Allocate(totalSizeToAlloc<CXXBaseSpecifier *>(PathSize));
- CXXFunctionalCastExpr *E =
- new (Buffer) CXXFunctionalCastExpr(T, VK, Written, K, Op, PathSize, L, R);
+ auto *E =
+ new (Buffer) CXXFunctionalCastExpr(T, VK, Written, K, Op, PathSize, L, R);
if (PathSize)
std::uninitialized_copy_n(BasePath->data(), BasePath->size(),
E->getTrailingObjects<CXXBaseSpecifier *>());
@@ -1079,7 +1078,7 @@ CXXUnresolvedConstructExpr::CXXUnresolvedConstructExpr(TypeSourceInfo *Type,
true, true, Type->getType()->containsUnexpandedParameterPack()),
Type(Type), LParenLoc(LParenLoc), RParenLoc(RParenLoc),
NumArgs(Args.size()) {
- Expr **StoredArgs = getTrailingObjects<Expr *>();
+ auto **StoredArgs = getTrailingObjects<Expr *>();
for (unsigned I = 0; I != Args.size(); ++I) {
if (Args[I]->containsUnexpandedParameterPack())
ExprBits.ContainsUnexpandedParameterPack = true;
@@ -1176,12 +1175,12 @@ CXXDependentScopeMemberExpr::CreateEmpty(const ASTContext &C,
totalSizeToAlloc<ASTTemplateKWAndArgsInfo, TemplateArgumentLoc>(
HasTemplateKWAndArgsInfo, NumTemplateArgs);
void *Mem = C.Allocate(Size, alignof(CXXDependentScopeMemberExpr));
- CXXDependentScopeMemberExpr *E
- = new (Mem) CXXDependentScopeMemberExpr(C, nullptr, QualType(),
- false, SourceLocation(),
- NestedNameSpecifierLoc(),
- SourceLocation(), nullptr,
- DeclarationNameInfo(), nullptr);
+ auto *E =
+ new (Mem) CXXDependentScopeMemberExpr(C, nullptr, QualType(),
+ false, SourceLocation(),
+ NestedNameSpecifierLoc(),
+ SourceLocation(), nullptr,
+ DeclarationNameInfo(), nullptr);
E->HasTemplateKWAndArgsInfo = HasTemplateKWAndArgsInfo;
return E;
}
@@ -1274,7 +1273,7 @@ UnresolvedMemberExpr::CreateEmpty(const ASTContext &C,
HasTemplateKWAndArgsInfo, NumTemplateArgs);
void *Mem = C.Allocate(Size, alignof(UnresolvedMemberExpr));
- UnresolvedMemberExpr *E = new (Mem) UnresolvedMemberExpr(EmptyShell());
+ auto *E = new (Mem) UnresolvedMemberExpr(EmptyShell());
E->HasTemplateKWAndArgsInfo = HasTemplateKWAndArgsInfo;
return E;
}
@@ -1297,7 +1296,7 @@ CXXRecordDecl *UnresolvedMemberExpr::getNamingClass() const {
else {
QualType BaseType = getBaseType().getNonReferenceType();
if (isArrow()) {
- const PointerType *PT = BaseType->getAs<PointerType>();
+ const auto *PT = BaseType->getAs<PointerType>();
assert(PT && "base of arrow member access is not pointer");
BaseType = PT->getPointeeType();
}
@@ -1330,10 +1329,11 @@ SizeOfPackExpr *SizeOfPackExpr::CreateDeserialized(ASTContext &Context,
SubstNonTypeTemplateParmPackExpr::
SubstNonTypeTemplateParmPackExpr(QualType T,
+ ExprValueKind ValueKind,
NonTypeTemplateParmDecl *Param,
SourceLocation NameLoc,
const TemplateArgument &ArgPack)
- : Expr(SubstNonTypeTemplateParmPackExprClass, T, VK_RValue, OK_Ordinary,
+ : Expr(SubstNonTypeTemplateParmPackExprClass, T, ValueKind, OK_Ordinary,
true, true, true, true),
Param(Param), Arguments(ArgPack.pack_begin()),
NumArguments(ArgPack.pack_size()), NameLoc(NameLoc) {}
@@ -1378,7 +1378,7 @@ void MaterializeTemporaryExpr::setExtendingDecl(const ValueDecl *ExtendedBy,
// We may need to allocate extra storage for the mangling number and the
// extended-by ValueDecl.
if (!State.is<ExtraState *>()) {
- auto ES = new (ExtendedBy->getASTContext()) ExtraState;
+ auto *ES = new (ExtendedBy->getASTContext()) ExtraState;
ES->Temporary = State.get<Stmt *>();
State = ES;
}
@@ -1402,7 +1402,7 @@ TypeTraitExpr::TypeTraitExpr(QualType T, SourceLocation Loc, TypeTrait Kind,
TypeTraitExprBits.Value = Value;
TypeTraitExprBits.NumArgs = Args.size();
- TypeSourceInfo **ToArgs = getTrailingObjects<TypeSourceInfo *>();
+ auto **ToArgs = getTrailingObjects<TypeSourceInfo *>();
for (unsigned I = 0, N = Args.size(); I != N; ++I) {
if (Args[I]->getType()->isDependentType())
diff --git a/lib/AST/ExprClassification.cpp b/lib/AST/ExprClassification.cpp
index 3bb2b4eb5fc1..c5b3b361a0a5 100644
--- a/lib/AST/ExprClassification.cpp
+++ b/lib/AST/ExprClassification.cpp
@@ -1,4 +1,4 @@
-//===--- ExprClassification.cpp - Expression AST Node Implementation ------===//
+//===- ExprClassification.cpp - Expression AST Node Implementation --------===//
//
// The LLVM Compiler Infrastructure
//
@@ -19,9 +19,10 @@
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "llvm/Support/ErrorHandling.h"
+
using namespace clang;
-typedef Expr::Classification Cl;
+using Cl = Expr::Classification;
static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E);
static Cl::Kinds ClassifyDecl(ASTContext &Ctx, const Decl *D);
@@ -160,6 +161,7 @@ static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) {
case Expr::ShuffleVectorExprClass:
case Expr::ConvertVectorExprClass:
case Expr::IntegerLiteralClass:
+ case Expr::FixedPointLiteralClass:
case Expr::CharacterLiteralClass:
case Expr::AddrLabelExprClass:
case Expr::CXXDeleteExprClass:
@@ -348,14 +350,14 @@ static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) {
case Expr::BinaryConditionalOperatorClass: {
if (!Lang.CPlusPlus) return Cl::CL_PRValue;
- const BinaryConditionalOperator *co = cast<BinaryConditionalOperator>(E);
+ const auto *co = cast<BinaryConditionalOperator>(E);
return ClassifyConditional(Ctx, co->getTrueExpr(), co->getFalseExpr());
}
case Expr::ConditionalOperatorClass: {
// Once again, only C++ is interesting.
if (!Lang.CPlusPlus) return Cl::CL_PRValue;
- const ConditionalOperator *co = cast<ConditionalOperator>(E);
+ const auto *co = cast<ConditionalOperator>(E);
return ClassifyConditional(Ctx, co->getTrueExpr(), co->getFalseExpr());
}
@@ -385,7 +387,7 @@ static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) {
case Expr::StmtExprClass: {
const CompoundStmt *S = cast<StmtExpr>(E)->getSubStmt();
- if (const Expr *LastExpr = dyn_cast_or_null<Expr>(S->body_back()))
+ if (const auto *LastExpr = dyn_cast_or_null<Expr>(S->body_back()))
return ClassifyUnnamed(Ctx, LastExpr->getType());
return Cl::CL_PRValue;
}
@@ -434,8 +436,7 @@ static Cl::Kinds ClassifyDecl(ASTContext &Ctx, const Decl *D) {
return Cl::CL_MemberFunction;
bool islvalue;
- if (const NonTypeTemplateParmDecl *NTTParm =
- dyn_cast<NonTypeTemplateParmDecl>(D))
+ if (const auto *NTTParm = dyn_cast<NonTypeTemplateParmDecl>(D))
islvalue = NTTParm->getType()->isReferenceType();
else
islvalue = isa<VarDecl>(D) || isa<FieldDecl>(D) ||
@@ -461,7 +462,7 @@ static Cl::Kinds ClassifyUnnamed(ASTContext &Ctx, QualType T) {
// otherwise.
if (T->isLValueReferenceType())
return Cl::CL_LValue;
- const RValueReferenceType *RV = T->getAs<RValueReferenceType>();
+ const auto *RV = T->getAs<RValueReferenceType>();
if (!RV) // Could still be a class temporary, though.
return ClassifyTemporary(T);
@@ -491,7 +492,7 @@ static Cl::Kinds ClassifyMemberExpr(ASTContext &Ctx, const MemberExpr *E) {
// C++ [expr.ref]p3: E1->E2 is converted to the equivalent form (*(E1)).E2.
// C++ [expr.ref]p4: If E2 is declared to have type "reference to T", then
// E1.E2 is an lvalue.
- if (ValueDecl *Value = dyn_cast<ValueDecl>(Member))
+ if (const auto *Value = dyn_cast<ValueDecl>(Member))
if (Value->getType()->isReferenceType())
return Cl::CL_LValue;
@@ -517,7 +518,7 @@ static Cl::Kinds ClassifyMemberExpr(ASTContext &Ctx, const MemberExpr *E) {
// -- If it refers to a static member function [...], then E1.E2 is an
// lvalue; [...]
// -- Otherwise [...] E1.E2 is a prvalue.
- if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Member))
+ if (const auto *Method = dyn_cast<CXXMethodDecl>(Member))
return Method->isStatic() ? Cl::CL_LValue : Cl::CL_MemberFunction;
// -- If E2 is a member enumerator [...], the expression E1.E2 is a prvalue.
@@ -599,8 +600,7 @@ static Cl::ModifiableType IsModifiable(ASTContext &Ctx, const Expr *E,
if (Kind == Cl::CL_PRValue) {
// For the sake of better diagnostics, we want to specifically recognize
// use of the GCC cast-as-lvalue extension.
- if (const ExplicitCastExpr *CE =
- dyn_cast<ExplicitCastExpr>(E->IgnoreParens())) {
+ if (const auto *CE = dyn_cast<ExplicitCastExpr>(E->IgnoreParens())) {
if (CE->getSubExpr()->IgnoreParenImpCasts()->isLValue()) {
Loc = CE->getExprLoc();
return Cl::CM_LValueCast;
@@ -617,7 +617,7 @@ static Cl::ModifiableType IsModifiable(ASTContext &Ctx, const Expr *E,
// Assignment to a property in ObjC is an implicit setter access. But a
// setter might not exist.
- if (const ObjCPropertyRefExpr *Expr = dyn_cast<ObjCPropertyRefExpr>(E)) {
+ if (const auto *Expr = dyn_cast<ObjCPropertyRefExpr>(E)) {
if (Expr->isImplicitProperty() &&
Expr->getImplicitPropertySetter() == nullptr)
return Cl::CM_NoSetterProperty;
diff --git a/lib/AST/ExprConstant.cpp b/lib/AST/ExprConstant.cpp
index 8d9b3c3bebc0..e69914f25da2 100644
--- a/lib/AST/ExprConstant.cpp
+++ b/lib/AST/ExprConstant.cpp
@@ -48,6 +48,8 @@
#include <cstring>
#include <functional>
+#define DEBUG_TYPE "exprconstant"
+
using namespace clang;
using llvm::APSInt;
using llvm::APFloat;
@@ -61,14 +63,22 @@ namespace {
static QualType getType(APValue::LValueBase B) {
if (!B) return QualType();
- if (const ValueDecl *D = B.dyn_cast<const ValueDecl*>())
+ if (const ValueDecl *D = B.dyn_cast<const ValueDecl*>()) {
// FIXME: It's unclear where we're supposed to take the type from, and
- // this actually matters for arrays of unknown bound. Using the type of
- // the most recent declaration isn't clearly correct in general. Eg:
+ // this actually matters for arrays of unknown bound. Eg:
//
// extern int arr[]; void f() { extern int arr[3]; };
// constexpr int *p = &arr[1]; // valid?
- return cast<ValueDecl>(D->getMostRecentDecl())->getType();
+ //
+ // For now, we take the array bound from the most recent declaration.
+ for (auto *Redecl = cast<ValueDecl>(D->getMostRecentDecl()); Redecl;
+ Redecl = cast_or_null<ValueDecl>(Redecl->getPreviousDecl())) {
+ QualType T = Redecl->getType();
+ if (!T->isIncompleteArrayType())
+ return T;
+ }
+ return D->getType();
+ }
const Expr *Base = B.get<const Expr*>();
@@ -131,7 +141,11 @@ namespace {
E = E->IgnoreParens();
// If we're doing a variable assignment from e.g. malloc(N), there will
- // probably be a cast of some kind. Ignore it.
+ // probably be a cast of some kind. In exotic cases, we might also see a
+ // top-level ExprWithCleanups. Ignore them either way.
+ if (const auto *EC = dyn_cast<ExprWithCleanups>(E))
+ E = EC->getSubExpr()->IgnoreParens();
+
if (const auto *Cast = dyn_cast<CastExpr>(E))
E = Cast->getSubExpr()->IgnoreParens();
@@ -438,8 +452,8 @@ namespace {
// Note that we intentionally use std::map here so that references to
// values are stable.
- typedef std::map<const void*, APValue> MapTy;
- typedef MapTy::const_iterator temp_iterator;
+ typedef std::pair<const void *, unsigned> MapKeyTy;
+ typedef std::map<MapKeyTy, APValue> MapTy;
/// Temporaries - Temporary lvalues materialized within this stack frame.
MapTy Temporaries;
@@ -449,6 +463,20 @@ namespace {
/// Index - The call index of this call.
unsigned Index;
+ /// The stack of integers for tracking version numbers for temporaries.
+ SmallVector<unsigned, 2> TempVersionStack = {1};
+ unsigned CurTempVersion = TempVersionStack.back();
+
+ unsigned getTempVersion() const { return TempVersionStack.back(); }
+
+ void pushTempVersion() {
+ TempVersionStack.push_back(++CurTempVersion);
+ }
+
+ void popTempVersion() {
+ TempVersionStack.pop_back();
+ }
+
// FIXME: Adding this to every 'CallStackFrame' may have a nontrivial impact
// on the overall stack usage of deeply-recursing constexpr evaluataions.
// (We should cache this map rather than recomputing it repeatedly.)
@@ -465,10 +493,36 @@ namespace {
APValue *Arguments);
~CallStackFrame();
- APValue *getTemporary(const void *Key) {
- MapTy::iterator I = Temporaries.find(Key);
- return I == Temporaries.end() ? nullptr : &I->second;
+ // Return the temporary for Key whose version number is Version.
+ APValue *getTemporary(const void *Key, unsigned Version) {
+ MapKeyTy KV(Key, Version);
+ auto LB = Temporaries.lower_bound(KV);
+ if (LB != Temporaries.end() && LB->first == KV)
+ return &LB->second;
+ // Pair (Key,Version) wasn't found in the map. Check that no elements
+ // in the map have 'Key' as their key.
+ assert((LB == Temporaries.end() || LB->first.first != Key) &&
+ (LB == Temporaries.begin() || std::prev(LB)->first.first != Key) &&
+ "Element with key 'Key' found in map");
+ return nullptr;
+ }
+
+ // Return the current temporary for Key in the map.
+ APValue *getCurrentTemporary(const void *Key) {
+ auto UB = Temporaries.upper_bound(MapKeyTy(Key, UINT_MAX));
+ if (UB != Temporaries.begin() && std::prev(UB)->first.first == Key)
+ return &std::prev(UB)->second;
+ return nullptr;
+ }
+
+ // Return the version number of the current temporary for Key.
+ unsigned getCurrentTemporaryVersion(const void *Key) const {
+ auto UB = Temporaries.upper_bound(MapKeyTy(Key, UINT_MAX));
+ if (UB != Temporaries.begin() && std::prev(UB)->first.first == Key)
+ return std::prev(UB)->first.second;
+ return 0;
}
+
APValue &createTemporary(const void *Key, bool IsLifetimeExtended);
};
@@ -598,7 +652,8 @@ namespace {
/// EvaluatingObject - Pair of the AST node that an lvalue represents and
/// the call index that that lvalue was allocated in.
- typedef std::pair<APValue::LValueBase, unsigned> EvaluatingObject;
+ typedef std::pair<APValue::LValueBase, std::pair<unsigned, unsigned>>
+ EvaluatingObject;
/// EvaluatingConstructors - Set of objects that are currently being
/// constructed.
@@ -617,8 +672,10 @@ namespace {
}
};
- bool isEvaluatingConstructor(APValue::LValueBase Decl, unsigned CallIndex) {
- return EvaluatingConstructors.count(EvaluatingObject(Decl, CallIndex));
+ bool isEvaluatingConstructor(APValue::LValueBase Decl, unsigned CallIndex,
+ unsigned Version) {
+ return EvaluatingConstructors.count(
+ EvaluatingObject(Decl, {CallIndex, Version}));
}
/// The current array initialization index, if we're performing array
@@ -629,11 +686,11 @@ namespace {
/// notes attached to it will also be stored, otherwise they will not be.
bool HasActiveDiagnostic;
- /// \brief Have we emitted a diagnostic explaining why we couldn't constant
+ /// Have we emitted a diagnostic explaining why we couldn't constant
/// fold (not just why it's not strictly a constant expression)?
bool HasFoldFailureDiagnostic;
- /// \brief Whether or not we're currently speculatively evaluating.
+ /// Whether or not we're currently speculatively evaluating.
bool IsSpeculativelyEvaluating;
enum EvaluationMode {
@@ -714,7 +771,7 @@ namespace {
void setEvaluatingDecl(APValue::LValueBase Base, APValue &Value) {
EvaluatingDecl = Base;
EvaluatingDeclValue = &Value;
- EvaluatingConstructors.insert({Base, 0});
+ EvaluatingConstructors.insert({Base, {0, 0}});
}
const LangOptions &getLangOpts() const { return Ctx.getLangOpts(); }
@@ -1078,11 +1135,16 @@ namespace {
unsigned OldStackSize;
public:
ScopeRAII(EvalInfo &Info)
- : Info(Info), OldStackSize(Info.CleanupStack.size()) {}
+ : Info(Info), OldStackSize(Info.CleanupStack.size()) {
+ // Push a new temporary version. This is needed to distinguish between
+ // temporaries created in different iterations of a loop.
+ Info.CurrentCall->pushTempVersion();
+ }
~ScopeRAII() {
// Body moved to a static method to encourage the compiler to inline away
// instances of this class.
cleanup(Info, OldStackSize);
+ Info.CurrentCall->popTempVersion();
}
private:
static void cleanup(EvalInfo &Info, unsigned OldStackSize) {
@@ -1162,7 +1224,8 @@ CallStackFrame::~CallStackFrame() {
APValue &CallStackFrame::createTemporary(const void *Key,
bool IsLifetimeExtended) {
- APValue &Result = Temporaries[Key];
+ unsigned Version = Info.CurrentCall->getTempVersion();
+ APValue &Result = Temporaries[MapKeyTy(Key, Version)];
assert(Result.isUninit() && "temporary created multiple times");
Info.CleanupStack.push_back(Cleanup(&Result, IsLifetimeExtended));
return Result;
@@ -1254,27 +1317,27 @@ namespace {
struct LValue {
APValue::LValueBase Base;
CharUnits Offset;
- unsigned InvalidBase : 1;
- unsigned CallIndex : 31;
SubobjectDesignator Designator;
- bool IsNullPtr;
+ bool IsNullPtr : 1;
+ bool InvalidBase : 1;
const APValue::LValueBase getLValueBase() const { return Base; }
CharUnits &getLValueOffset() { return Offset; }
const CharUnits &getLValueOffset() const { return Offset; }
- unsigned getLValueCallIndex() const { return CallIndex; }
SubobjectDesignator &getLValueDesignator() { return Designator; }
const SubobjectDesignator &getLValueDesignator() const { return Designator;}
bool isNullPointer() const { return IsNullPtr;}
+ unsigned getLValueCallIndex() const { return Base.getCallIndex(); }
+ unsigned getLValueVersion() const { return Base.getVersion(); }
+
void moveInto(APValue &V) const {
if (Designator.Invalid)
- V = APValue(Base, Offset, APValue::NoLValuePath(), CallIndex,
- IsNullPtr);
+ V = APValue(Base, Offset, APValue::NoLValuePath(), IsNullPtr);
else {
assert(!InvalidBase && "APValues can't handle invalid LValue bases");
V = APValue(Base, Offset, Designator.Entries,
- Designator.IsOnePastTheEnd, CallIndex, IsNullPtr);
+ Designator.IsOnePastTheEnd, IsNullPtr);
}
}
void setFrom(ASTContext &Ctx, const APValue &V) {
@@ -1282,12 +1345,11 @@ namespace {
Base = V.getLValueBase();
Offset = V.getLValueOffset();
InvalidBase = false;
- CallIndex = V.getLValueCallIndex();
Designator = SubobjectDesignator(Ctx, V);
IsNullPtr = V.isNullPointer();
}
- void set(APValue::LValueBase B, unsigned I = 0, bool BInvalid = false) {
+ void set(APValue::LValueBase B, bool BInvalid = false) {
#ifndef NDEBUG
// We only allow a few types of invalid bases. Enforce that here.
if (BInvalid) {
@@ -1300,7 +1362,6 @@ namespace {
Base = B;
Offset = CharUnits::fromQuantity(0);
InvalidBase = BInvalid;
- CallIndex = I;
Designator = SubobjectDesignator(getType(B));
IsNullPtr = false;
}
@@ -1309,13 +1370,12 @@ namespace {
Base = (Expr *)nullptr;
Offset = CharUnits::fromQuantity(TargetVal);
InvalidBase = false;
- CallIndex = 0;
Designator = SubobjectDesignator(PointerTy->getPointeeType());
IsNullPtr = true;
}
void setInvalid(APValue::LValueBase B, unsigned I = 0) {
- set(B, I, true);
+ set(B, true);
}
// Check that this LValue is not based on a null pointer. If it is, produce
@@ -1517,6 +1577,15 @@ static bool EvaluateAsRValue(EvalInfo &Info, const Expr *E, APValue &Result);
// Misc utilities
//===----------------------------------------------------------------------===//
+/// A helper function to create a temporary and set an LValue.
+template <class KeyTy>
+static APValue &createTemporary(const KeyTy *Key, bool IsLifetimeExtended,
+ LValue &LV, CallStackFrame &Frame) {
+ LV.set({Key, Frame.Info.CurrentCall->Index,
+ Frame.Info.CurrentCall->getTempVersion()});
+ return Frame.createTemporary(Key, IsLifetimeExtended);
+}
+
/// Negate an APSInt in place, converting it to a signed form if necessary, and
/// preserving its value (by extending by up to one bit as needed).
static void negateAsSigned(APSInt &Int) {
@@ -1651,7 +1720,8 @@ static void NoteLValueLocation(EvalInfo &Info, APValue::LValueBase Base) {
/// value for an address or reference constant expression. Return true if we
/// can fold this expression, whether or not it's a constant expression.
static bool CheckLValueConstantExpression(EvalInfo &Info, SourceLocation Loc,
- QualType Type, const LValue &LVal) {
+ QualType Type, const LValue &LVal,
+ Expr::ConstExprUsage Usage) {
bool IsReferenceType = Type->isReferenceType();
APValue::LValueBase Base = LVal.getLValueBase();
@@ -1684,7 +1754,7 @@ static bool CheckLValueConstantExpression(EvalInfo &Info, SourceLocation Loc,
return false;
// A dllimport variable never acts like a constant.
- if (Var->hasAttr<DLLImportAttr>())
+ if (Usage == Expr::EvaluateForCodeGen && Var->hasAttr<DLLImportAttr>())
return false;
}
if (const auto *FD = dyn_cast<const FunctionDecl>(VD)) {
@@ -1698,7 +1768,8 @@ static bool CheckLValueConstantExpression(EvalInfo &Info, SourceLocation Loc,
// The C language has no notion of ODR; furthermore, it has no notion of
// dynamic initialization. This means that we are permitted to
// perform initialization with the address of the thunk.
- if (Info.getLangOpts().CPlusPlus && FD->hasAttr<DLLImportAttr>())
+ if (Info.getLangOpts().CPlusPlus && Usage == Expr::EvaluateForCodeGen &&
+ FD->hasAttr<DLLImportAttr>())
return false;
}
}
@@ -1731,12 +1802,14 @@ static bool CheckLValueConstantExpression(EvalInfo &Info, SourceLocation Loc,
static bool CheckMemberPointerConstantExpression(EvalInfo &Info,
SourceLocation Loc,
QualType Type,
- const APValue &Value) {
+ const APValue &Value,
+ Expr::ConstExprUsage Usage) {
const ValueDecl *Member = Value.getMemberPointerDecl();
const auto *FD = dyn_cast_or_null<CXXMethodDecl>(Member);
if (!FD)
return true;
- return FD->isVirtual() || !FD->hasAttr<DLLImportAttr>();
+ return Usage == Expr::EvaluateForMangling || FD->isVirtual() ||
+ !FD->hasAttr<DLLImportAttr>();
}
/// Check that this core constant expression is of literal type, and if not,
@@ -1774,8 +1847,10 @@ static bool CheckLiteralType(EvalInfo &Info, const Expr *E,
/// Check that this core constant expression value is a valid value for a
/// constant expression. If not, report an appropriate diagnostic. Does not
/// check that the expression is of literal type.
-static bool CheckConstantExpression(EvalInfo &Info, SourceLocation DiagLoc,
- QualType Type, const APValue &Value) {
+static bool
+CheckConstantExpression(EvalInfo &Info, SourceLocation DiagLoc, QualType Type,
+ const APValue &Value,
+ Expr::ConstExprUsage Usage = Expr::EvaluateForCodeGen) {
if (Value.isUninit()) {
Info.FFDiag(DiagLoc, diag::note_constexpr_uninitialized)
<< true << Type;
@@ -1794,28 +1869,28 @@ static bool CheckConstantExpression(EvalInfo &Info, SourceLocation DiagLoc,
QualType EltTy = Type->castAsArrayTypeUnsafe()->getElementType();
for (unsigned I = 0, N = Value.getArrayInitializedElts(); I != N; ++I) {
if (!CheckConstantExpression(Info, DiagLoc, EltTy,
- Value.getArrayInitializedElt(I)))
+ Value.getArrayInitializedElt(I), Usage))
return false;
}
if (!Value.hasArrayFiller())
return true;
- return CheckConstantExpression(Info, DiagLoc, EltTy,
- Value.getArrayFiller());
+ return CheckConstantExpression(Info, DiagLoc, EltTy, Value.getArrayFiller(),
+ Usage);
}
if (Value.isUnion() && Value.getUnionField()) {
return CheckConstantExpression(Info, DiagLoc,
Value.getUnionField()->getType(),
- Value.getUnionValue());
+ Value.getUnionValue(), Usage);
}
if (Value.isStruct()) {
RecordDecl *RD = Type->castAs<RecordType>()->getDecl();
if (const CXXRecordDecl *CD = dyn_cast<CXXRecordDecl>(RD)) {
unsigned BaseIndex = 0;
- for (CXXRecordDecl::base_class_const_iterator I = CD->bases_begin(),
- End = CD->bases_end(); I != End; ++I, ++BaseIndex) {
- if (!CheckConstantExpression(Info, DiagLoc, I->getType(),
- Value.getStructBase(BaseIndex)))
+ for (const CXXBaseSpecifier &BS : CD->bases()) {
+ if (!CheckConstantExpression(Info, DiagLoc, BS.getType(),
+ Value.getStructBase(BaseIndex), Usage))
return false;
+ ++BaseIndex;
}
}
for (const auto *I : RD->fields()) {
@@ -1823,7 +1898,8 @@ static bool CheckConstantExpression(EvalInfo &Info, SourceLocation DiagLoc,
continue;
if (!CheckConstantExpression(Info, DiagLoc, I->getType(),
- Value.getStructField(I->getFieldIndex())))
+ Value.getStructField(I->getFieldIndex()),
+ Usage))
return false;
}
}
@@ -1831,11 +1907,11 @@ static bool CheckConstantExpression(EvalInfo &Info, SourceLocation DiagLoc,
if (Value.isLValue()) {
LValue LVal;
LVal.setFrom(Info.Ctx, Value);
- return CheckLValueConstantExpression(Info, DiagLoc, Type, LVal);
+ return CheckLValueConstantExpression(Info, DiagLoc, Type, LVal, Usage);
}
if (Value.isMemberPointer())
- return CheckMemberPointerConstantExpression(Info, DiagLoc, Type, Value);
+ return CheckMemberPointerConstantExpression(Info, DiagLoc, Type, Value, Usage);
// Everything else is fine.
return true;
@@ -1846,7 +1922,7 @@ static const ValueDecl *GetLValueBaseDecl(const LValue &LVal) {
}
static bool IsLiteralLValue(const LValue &Value) {
- if (Value.CallIndex)
+ if (Value.getLValueCallIndex())
return false;
const Expr *E = Value.Base.dyn_cast<const Expr*>();
return E && !isa<MaterializeTemporaryExpr>(E);
@@ -2173,6 +2249,8 @@ static bool handleIntIntBinOp(EvalInfo &Info, const Expr *E, const APSInt &LHS,
case BO_GE: Result = LHS >= RHS; return true;
case BO_EQ: Result = LHS == RHS; return true;
case BO_NE: Result = LHS != RHS; return true;
+ case BO_Cmp:
+ llvm_unreachable("BO_Cmp should be handled elsewhere");
}
}
@@ -2396,7 +2474,7 @@ static bool handleLValueToRValueConversion(EvalInfo &Info, const Expr *Conv,
/// \param Result Filled in with a pointer to the value of the variable.
static bool evaluateVarDeclInit(EvalInfo &Info, const Expr *E,
const VarDecl *VD, CallStackFrame *Frame,
- APValue *&Result) {
+ APValue *&Result, const LValue *LVal) {
// If this is a parameter to an active constexpr function call, perform
// argument substitution.
@@ -2415,7 +2493,8 @@ static bool evaluateVarDeclInit(EvalInfo &Info, const Expr *E,
// If this is a local variable, dig out its value.
if (Frame) {
- Result = Frame->getTemporary(VD);
+ Result = LVal ? Frame->getTemporary(VD, LVal->getLValueVersion())
+ : Frame->getCurrentTemporary(VD);
if (!Result) {
// Assume variables referenced within a lambda's call operator that were
// not declared within the call operator are captures and during checking
@@ -2644,10 +2723,13 @@ struct CompleteObject {
APValue *Value;
/// The type of the complete object.
QualType Type;
+ bool LifetimeStartedInEvaluation;
CompleteObject() : Value(nullptr) {}
- CompleteObject(APValue *Value, QualType Type)
- : Value(Value), Type(Type) {
+ CompleteObject(APValue *Value, QualType Type,
+ bool LifetimeStartedInEvaluation)
+ : Value(Value), Type(Type),
+ LifetimeStartedInEvaluation(LifetimeStartedInEvaluation) {
assert(Value && "missing value for complete object");
}
@@ -2677,6 +2759,8 @@ findSubobject(EvalInfo &Info, const Expr *E, const CompleteObject &Obj,
APValue *O = Obj.Value;
QualType ObjType = Obj.Type;
const FieldDecl *LastField = nullptr;
+ const bool MayReadMutableMembers =
+ Obj.LifetimeStartedInEvaluation && Info.getLangOpts().CPlusPlus14;
// Walk the designator's path to find the subobject.
for (unsigned I = 0, N = Sub.Entries.size(); /**/; ++I) {
@@ -2692,7 +2776,7 @@ findSubobject(EvalInfo &Info, const Expr *E, const CompleteObject &Obj,
// cannot perform this read. (This only happens when performing a trivial
// copy or assignment.)
if (ObjType->isRecordType() && handler.AccessKind == AK_Read &&
- diagnoseUnreadableFields(Info, E, ObjType))
+ !MayReadMutableMembers && diagnoseUnreadableFields(Info, E, ObjType))
return handler.failed();
if (!handler.found(*O, ObjType))
@@ -2772,7 +2856,11 @@ findSubobject(EvalInfo &Info, const Expr *E, const CompleteObject &Obj,
: O->getComplexFloatReal(), ObjType);
}
} else if (const FieldDecl *Field = getAsField(Sub.Entries[I])) {
- if (Field->isMutable() && handler.AccessKind == AK_Read) {
+ // In C++14 onwards, it is permitted to read a mutable member whose
+ // lifetime began within the evaluation.
+ // FIXME: Should we also allow this in C++11?
+ if (Field->isMutable() && handler.AccessKind == AK_Read &&
+ !MayReadMutableMembers) {
Info.FFDiag(E, diag::note_constexpr_ltor_mutable, 1)
<< Field;
Info.Note(Field->getLocation(), diag::note_declared_at);
@@ -2992,8 +3080,8 @@ static CompleteObject findCompleteObject(EvalInfo &Info, const Expr *E,
}
CallStackFrame *Frame = nullptr;
- if (LVal.CallIndex) {
- Frame = Info.getCallFrame(LVal.CallIndex);
+ if (LVal.getLValueCallIndex()) {
+ Frame = Info.getCallFrame(LVal.getLValueCallIndex());
if (!Frame) {
Info.FFDiag(E, diag::note_constexpr_lifetime_ended, 1)
<< AK << LVal.Base.is<const ValueDecl*>();
@@ -3018,6 +3106,7 @@ static CompleteObject findCompleteObject(EvalInfo &Info, const Expr *E,
// Compute value storage location and type of base object.
APValue *BaseVal = nullptr;
QualType BaseType = getType(LVal.Base);
+ bool LifetimeStartedInEvaluation = Frame;
if (const ValueDecl *D = LVal.Base.dyn_cast<const ValueDecl*>()) {
// In C++98, const, non-volatile integers initialized with ICEs are ICEs.
@@ -3105,7 +3194,7 @@ static CompleteObject findCompleteObject(EvalInfo &Info, const Expr *E,
}
}
- if (!evaluateVarDeclInit(Info, E, VD, Frame, BaseVal))
+ if (!evaluateVarDeclInit(Info, E, VD, Frame, BaseVal, &LVal))
return CompleteObject();
} else {
const Expr *Base = LVal.Base.dyn_cast<const Expr*>();
@@ -3129,7 +3218,7 @@ static CompleteObject findCompleteObject(EvalInfo &Info, const Expr *E,
// int &&r = 1;
// int x = ++r;
// constexpr int k = r;
- // Therefore we use the C++1y rules in C++11 too.
+ // Therefore we use the C++14 rules in C++11 too.
const ValueDecl *VD = Info.EvaluatingDecl.dyn_cast<const ValueDecl*>();
const ValueDecl *ED = MTE->getExtendingDecl();
if (!(BaseType.isConstQualified() &&
@@ -3142,12 +3231,13 @@ static CompleteObject findCompleteObject(EvalInfo &Info, const Expr *E,
BaseVal = Info.Ctx.getMaterializedTemporaryValue(MTE, false);
assert(BaseVal && "got reference to unevaluated temporary");
+ LifetimeStartedInEvaluation = true;
} else {
Info.FFDiag(E);
return CompleteObject();
}
} else {
- BaseVal = Frame->getTemporary(Base);
+ BaseVal = Frame->getTemporary(Base, LVal.Base.getVersion());
assert(BaseVal && "missing value for temporary");
}
@@ -3167,12 +3257,15 @@ static CompleteObject findCompleteObject(EvalInfo &Info, const Expr *E,
// During the construction of an object, it is not yet 'const'.
// FIXME: This doesn't do quite the right thing for const subobjects of the
// object under construction.
- if (Info.isEvaluatingConstructor(LVal.getLValueBase(), LVal.CallIndex)) {
+ if (Info.isEvaluatingConstructor(LVal.getLValueBase(),
+ LVal.getLValueCallIndex(),
+ LVal.getLValueVersion())) {
BaseType = Info.Ctx.getCanonicalType(BaseType);
BaseType.removeLocalConst();
+ LifetimeStartedInEvaluation = true;
}
- // In C++1y, we can't safely access any mutable state when we might be
+ // In C++14, we can't safely access any mutable state when we might be
// evaluating after an unmodeled side effect.
//
// FIXME: Not all local state is mutable. Allow local constant subobjects
@@ -3182,10 +3275,10 @@ static CompleteObject findCompleteObject(EvalInfo &Info, const Expr *E,
(AK != AK_Read && Info.IsSpeculativelyEvaluating))
return CompleteObject();
- return CompleteObject(BaseVal, BaseType);
+ return CompleteObject(BaseVal, BaseType, LifetimeStartedInEvaluation);
}
-/// \brief Perform an lvalue-to-rvalue conversion on the given glvalue. This
+/// Perform an lvalue-to-rvalue conversion on the given glvalue. This
/// can also be used for 'lvalue-to-lvalue' conversions for looking up the
/// glvalue referred to by an entity of reference type.
///
@@ -3204,7 +3297,7 @@ static bool handleLValueToRValueConversion(EvalInfo &Info, const Expr *Conv,
// Check for special cases where there is no existing APValue to look at.
const Expr *Base = LVal.Base.dyn_cast<const Expr*>();
- if (Base && !LVal.CallIndex && !Type.isVolatileQualified()) {
+ if (Base && !LVal.getLValueCallIndex() && !Type.isVolatileQualified()) {
if (const CompoundLiteralExpr *CLE = dyn_cast<CompoundLiteralExpr>(Base)) {
// In C99, a CompoundLiteralExpr is an lvalue, and we defer evaluating the
// initializer until now for such expressions. Such an expression can't be
@@ -3216,14 +3309,14 @@ static bool handleLValueToRValueConversion(EvalInfo &Info, const Expr *Conv,
APValue Lit;
if (!Evaluate(Lit, Info, CLE->getInitializer()))
return false;
- CompleteObject LitObj(&Lit, Base->getType());
+ CompleteObject LitObj(&Lit, Base->getType(), false);
return extractSubobject(Info, Conv, LitObj, LVal.Designator, RVal);
} else if (isa<StringLiteral>(Base) || isa<PredefinedExpr>(Base)) {
// We represent a string literal array as an lvalue pointing at the
// corresponding expression, rather than building an array of chars.
// FIXME: Support ObjCEncodeExpr, MakeStringConstant
APValue Str(Base, CharUnits::Zero(), APValue::NoLValuePath(), 0);
- CompleteObject StrObj(&Str, Base->getType());
+ CompleteObject StrObj(&Str, Base->getType(), false);
return extractSubobject(Info, Conv, StrObj, LVal.Designator, RVal);
}
}
@@ -3247,11 +3340,6 @@ static bool handleAssignment(EvalInfo &Info, const Expr *E, const LValue &LVal,
return Obj && modifySubobject(Info, E, Obj, LVal.Designator, Val);
}
-static bool isOverflowingIntegerType(ASTContext &Ctx, QualType T) {
- return T->isSignedIntegerType() &&
- Ctx.getIntWidth(T) >= Ctx.getIntWidth(Ctx.IntTy);
-}
-
namespace {
struct CompoundAssignSubobjectHandler {
EvalInfo &Info;
@@ -3373,7 +3461,7 @@ static bool handleCompoundAssignment(
namespace {
struct IncDecSubobjectHandler {
EvalInfo &Info;
- const Expr *E;
+ const UnaryOperator *E;
AccessKinds AccessKind;
APValue *Old;
@@ -3445,16 +3533,14 @@ struct IncDecSubobjectHandler {
if (AccessKind == AK_Increment) {
++Value;
- if (!WasNegative && Value.isNegative() &&
- isOverflowingIntegerType(Info.Ctx, SubobjType)) {
+ if (!WasNegative && Value.isNegative() && E->canOverflow()) {
APSInt ActualValue(Value, /*IsUnsigned*/true);
return HandleOverflow(Info, E, ActualValue, SubobjType);
}
} else {
--Value;
- if (WasNegative && !Value.isNegative() &&
- isOverflowingIntegerType(Info.Ctx, SubobjType)) {
+ if (WasNegative && !Value.isNegative() && E->canOverflow()) {
unsigned BitWidth = Value.getBitWidth();
APSInt ActualValue(Value.sext(BitWidth + 1), /*IsUnsigned*/false);
ActualValue.setBit(BitWidth);
@@ -3515,7 +3601,7 @@ static bool handleIncDec(EvalInfo &Info, const Expr *E, const LValue &LVal,
AccessKinds AK = IsIncrement ? AK_Increment : AK_Decrement;
CompleteObject Obj = findCompleteObject(Info, E, AK, LVal, LValType);
- IncDecSubobjectHandler Handler = { Info, E, AK, Old };
+ IncDecSubobjectHandler Handler = {Info, cast<UnaryOperator>(E), AK, Old};
return Obj && findSubobject(Info, E, Obj, LVal.Designator, Handler);
}
@@ -3707,8 +3793,7 @@ static bool EvaluateVarDecl(EvalInfo &Info, const VarDecl *VD) {
return true;
LValue Result;
- Result.set(VD, Info.CurrentCall->Index);
- APValue &Val = Info.CurrentCall->createTemporary(VD, true);
+ APValue &Val = createTemporary(VD, true, Result, *Info.CurrentCall);
const Expr *InitE = VD->getInit();
if (!InitE) {
@@ -3756,7 +3841,7 @@ static bool EvaluateCond(EvalInfo &Info, const VarDecl *CondDecl,
}
namespace {
-/// \brief A location where the result (returned value) of evaluating a
+/// A location where the result (returned value) of evaluating a
/// statement should be stored.
struct StmtResult {
/// The APValue that should be filled in with the returned value.
@@ -3764,6 +3849,19 @@ struct StmtResult {
/// The location containing the result, if any (used to support RVO).
const LValue *Slot;
};
+
+struct TempVersionRAII {
+ CallStackFrame &Frame;
+
+ TempVersionRAII(CallStackFrame &Frame) : Frame(Frame) {
+ Frame.pushTempVersion();
+ }
+
+ ~TempVersionRAII() {
+ Frame.popTempVersion();
+ }
+};
+
}
static EvalStmtResult EvaluateStmt(StmtResult &Result, EvalInfo &Info,
@@ -4290,9 +4388,15 @@ static bool HandleFunctionCall(SourceLocation CallLoc,
This->moveInto(Result);
return true;
} else if (MD && isLambdaCallOperator(MD)) {
- // We're in a lambda; determine the lambda capture field maps.
- MD->getParent()->getCaptureFields(Frame.LambdaCaptureFields,
- Frame.LambdaThisCaptureField);
+ // We're in a lambda; determine the lambda capture field maps unless we're
+ // just constexpr checking a lambda's call operator. constexpr checking is
+ // done before the captures have been added to the closure object (unless
+ // we're inferring constexpr-ness), so we don't have access to them in this
+ // case. But since we don't need the captures to constexpr check, we can
+ // just ignore them.
+ if (!Info.checkingPotentialConstantExpression())
+ MD->getParent()->getCaptureFields(Frame.LambdaCaptureFields,
+ Frame.LambdaThisCaptureField);
}
StmtResult Ret = {Result, ResultSlot};
@@ -4321,7 +4425,8 @@ static bool HandleConstructorCall(const Expr *E, const LValue &This,
}
EvalInfo::EvaluatingConstructorRAII EvalObj(
- Info, {This.getLValueBase(), This.CallIndex});
+ Info, {This.getLValueBase(),
+ {This.getLValueCallIndex(), This.getLValueVersion()}});
CallStackFrame Frame(Info, CallLoc, Definition, &This, ArgValues);
// FIXME: Creating an APValue just to hold a nonexistent return value is
@@ -4376,6 +4481,7 @@ static bool HandleConstructorCall(const Expr *E, const LValue &This,
#endif
for (const auto *I : Definition->inits()) {
LValue Subobject = This;
+ LValue SubobjectParent = This;
APValue *Value = &Result;
// Determine the subobject to initialize.
@@ -4406,7 +4512,8 @@ static bool HandleConstructorCall(const Expr *E, const LValue &This,
} else if (IndirectFieldDecl *IFD = I->getIndirectMember()) {
// Walk the indirect field decl's chain to find the object to initialize,
// and make sure we've initialized every step along it.
- for (auto *C : IFD->chain()) {
+ auto IndirectFieldChain = IFD->chain();
+ for (auto *C : IndirectFieldChain) {
FD = cast<FieldDecl>(C);
CXXRecordDecl *CD = cast<CXXRecordDecl>(FD->getParent());
// Switch the union field if it differs. This happens if we had
@@ -4422,6 +4529,10 @@ static bool HandleConstructorCall(const Expr *E, const LValue &This,
*Value = APValue(APValue::UninitStruct(), CD->getNumBases(),
std::distance(CD->field_begin(), CD->field_end()));
}
+ // Store Subobject as its parent before updating it for the last element
+ // in the chain.
+ if (C == IndirectFieldChain.back())
+ SubobjectParent = Subobject;
if (!HandleLValueMember(Info, I->getInit(), Subobject, FD))
return false;
if (CD->isUnion())
@@ -4433,10 +4544,16 @@ static bool HandleConstructorCall(const Expr *E, const LValue &This,
llvm_unreachable("unknown base initializer kind");
}
+ // Need to override This for implicit field initializers as in this case
+ // This refers to innermost anonymous struct/union containing initializer,
+ // not to currently constructed class.
+ const Expr *Init = I->getInit();
+ ThisOverrideRAII ThisOverride(*Info.CurrentCall, &SubobjectParent,
+ isa<CXXDefaultInitExpr>(Init));
FullExpressionRAII InitScope(Info);
- if (!EvaluateInPlace(*Value, Info, Subobject, I->getInit()) ||
- (FD && FD->isBitField() && !truncateBitfieldValue(Info, I->getInit(),
- *Value, FD))) {
+ if (!EvaluateInPlace(*Value, Info, Subobject, Init) ||
+ (FD && FD->isBitField() &&
+ !truncateBitfieldValue(Info, Init, *Value, FD))) {
// If we're checking for a potential constant expression, evaluate all
// initializers even if some of them fail.
if (!Info.noteFailure())
@@ -4570,9 +4687,12 @@ public:
{ return StmtVisitorTy::Visit(E->getResultExpr()); }
bool VisitSubstNonTypeTemplateParmExpr(const SubstNonTypeTemplateParmExpr *E)
{ return StmtVisitorTy::Visit(E->getReplacement()); }
- bool VisitCXXDefaultArgExpr(const CXXDefaultArgExpr *E)
- { return StmtVisitorTy::Visit(E->getExpr()); }
+ bool VisitCXXDefaultArgExpr(const CXXDefaultArgExpr *E) {
+ TempVersionRAII RAII(*Info.CurrentCall);
+ return StmtVisitorTy::Visit(E->getExpr());
+ }
bool VisitCXXDefaultInitExpr(const CXXDefaultInitExpr *E) {
+ TempVersionRAII RAII(*Info.CurrentCall);
// The initializer may not have been parsed yet, or might be erroneous.
if (!E->getExpr())
return Error(E);
@@ -4650,7 +4770,7 @@ public:
}
bool VisitOpaqueValueExpr(const OpaqueValueExpr *E) {
- if (APValue *Value = Info.CurrentCall->getTemporary(E))
+ if (APValue *Value = Info.CurrentCall->getCurrentTemporary(E))
return DerivedSuccess(*Value, E);
const Expr *Source = E->getSourceExpr();
@@ -4828,7 +4948,7 @@ public:
assert(BaseTy->castAs<RecordType>()->getDecl()->getCanonicalDecl() ==
FD->getParent()->getCanonicalDecl() && "record / field mismatch");
- CompleteObject Obj(&Val, BaseTy);
+ CompleteObject Obj(&Val, BaseTy, true);
SubobjectDesignator Designator(BaseTy);
Designator.addDeclUnchecked(FD);
@@ -4948,7 +5068,7 @@ public:
}
};
-}
+} // namespace
//===----------------------------------------------------------------------===//
// Common base class for lvalue and temporary evaluation.
@@ -5170,10 +5290,17 @@ bool LValueExprEvaluator::VisitVarDecl(const Expr *E, const VarDecl *VD) {
// to within 'E' actually represents a lambda-capture that maps to a
// data-member/field within the closure object, and if so, evaluate to the
// field or what the field refers to.
- if (Info.CurrentCall && isLambdaCallOperator(Info.CurrentCall->Callee)) {
+ if (Info.CurrentCall && isLambdaCallOperator(Info.CurrentCall->Callee) &&
+ isa<DeclRefExpr>(E) &&
+ cast<DeclRefExpr>(E)->refersToEnclosingVariableOrCapture()) {
+ // We don't always have a complete capture-map when checking or inferring if
+ // the function call operator meets the requirements of a constexpr function
+ // - but we don't need to evaluate the captures to determine constexprness
+ // (dcl.constexpr C++17).
+ if (Info.checkingPotentialConstantExpression())
+ return false;
+
if (auto *FD = Info.CurrentCall->LambdaCaptureFields.lookup(VD)) {
- if (Info.checkingPotentialConstantExpression())
- return false;
// Start with 'Result' referring to the complete closure object...
Result = *Info.CurrentCall->This;
// ... then update it to refer to the field of the closure object
@@ -5208,14 +5335,15 @@ bool LValueExprEvaluator::VisitVarDecl(const Expr *E, const VarDecl *VD) {
if (!VD->getType()->isReferenceType()) {
if (Frame) {
- Result.set(VD, Frame->Index);
+ Result.set({VD, Frame->Index,
+ Info.CurrentCall->getCurrentTemporaryVersion(VD)});
return true;
}
return Success(VD);
}
APValue *V;
- if (!evaluateVarDeclInit(Info, E, VD, Frame, V))
+ if (!evaluateVarDeclInit(Info, E, VD, Frame, V, nullptr))
return false;
if (V->isUninit()) {
if (!Info.checkingPotentialConstantExpression())
@@ -5247,9 +5375,8 @@ bool LValueExprEvaluator::VisitMaterializeTemporaryExpr(
*Value = APValue();
Result.set(E);
} else {
- Value = &Info.CurrentCall->
- createTemporary(E, E->getStorageDuration() == SD_Automatic);
- Result.set(E, Info.CurrentCall->Index);
+ Value = &createTemporary(E, E->getStorageDuration() == SD_Automatic, Result,
+ *Info.CurrentCall);
}
QualType Type = Inner->getType();
@@ -5433,7 +5560,7 @@ bool LValueExprEvaluator::VisitBinAssign(const BinaryOperator *E) {
// Pointer Evaluation
//===----------------------------------------------------------------------===//
-/// \brief Attempts to compute the number of bytes available at the pointer
+/// Attempts to compute the number of bytes available at the pointer
/// returned by a function with the alloc_size attribute. Returns true if we
/// were successful. Places an unsigned number into `Result`.
///
@@ -5444,9 +5571,8 @@ static bool getBytesReturnedByAllocSizeCall(const ASTContext &Ctx,
llvm::APInt &Result) {
const AllocSizeAttr *AllocSize = getAllocSizeAttr(Call);
- // alloc_size args are 1-indexed, 0 means not present.
- assert(AllocSize && AllocSize->getElemSizeParam() != 0);
- unsigned SizeArgNo = AllocSize->getElemSizeParam() - 1;
+ assert(AllocSize && AllocSize->getElemSizeParam().isValid());
+ unsigned SizeArgNo = AllocSize->getElemSizeParam().getASTIndex();
unsigned BitsInSizeT = Ctx.getTypeSize(Ctx.getSizeType());
if (Call->getNumArgs() <= SizeArgNo)
return false;
@@ -5464,14 +5590,13 @@ static bool getBytesReturnedByAllocSizeCall(const ASTContext &Ctx,
if (!EvaluateAsSizeT(Call->getArg(SizeArgNo), SizeOfElem))
return false;
- if (!AllocSize->getNumElemsParam()) {
+ if (!AllocSize->getNumElemsParam().isValid()) {
Result = std::move(SizeOfElem);
return true;
}
APSInt NumberOfElems;
- // Argument numbers start at 1
- unsigned NumArgNo = AllocSize->getNumElemsParam() - 1;
+ unsigned NumArgNo = AllocSize->getNumElemsParam().getASTIndex();
if (!EvaluateAsSizeT(Call->getArg(NumArgNo), NumberOfElems))
return false;
@@ -5484,7 +5609,7 @@ static bool getBytesReturnedByAllocSizeCall(const ASTContext &Ctx,
return true;
}
-/// \brief Convenience function. LVal's base must be a call to an alloc_size
+/// Convenience function. LVal's base must be a call to an alloc_size
/// function.
static bool getBytesReturnedByAllocSizeCall(const ASTContext &Ctx,
const LValue &LVal,
@@ -5496,7 +5621,7 @@ static bool getBytesReturnedByAllocSizeCall(const ASTContext &Ctx,
return getBytesReturnedByAllocSizeCall(Ctx, CE, Result);
}
-/// \brief Attempts to evaluate the given LValueBase as the result of a call to
+/// Attempts to evaluate the given LValueBase as the result of a call to
/// a function with the alloc_size attribute. If it was possible to do so, this
/// function will return true, make Result's Base point to said function call,
/// and mark Result's Base as invalid.
@@ -5662,8 +5787,8 @@ bool PointerExprEvaluator::VisitUnaryAddrOf(const UnaryOperator *E) {
return evaluateLValue(E->getSubExpr(), Result);
}
-bool PointerExprEvaluator::VisitCastExpr(const CastExpr* E) {
- const Expr* SubExpr = E->getSubExpr();
+bool PointerExprEvaluator::VisitCastExpr(const CastExpr *E) {
+ const Expr *SubExpr = E->getSubExpr();
switch (E->getCastKind()) {
default:
@@ -5680,7 +5805,11 @@ bool PointerExprEvaluator::VisitCastExpr(const CastExpr* E) {
// permitted in constant expressions in C++11. Bitcasts from cv void* are
// also static_casts, but we disallow them as a resolution to DR1312.
if (!E->getType()->isVoidPointerType()) {
- Result.Designator.setInvalid();
+ // If we changed anything other than cvr-qualifiers, we can't use this
+ // value for constant folding. FIXME: Qualification conversions should
+ // always be CK_NoOp, but we get this wrong in C.
+ if (!Info.Ctx.hasCvrSimilarType(E->getType(), E->getSubExpr()->getType()))
+ Result.Designator.setInvalid();
if (SubExpr->getType()->isVoidPointerType())
CCEDiag(E, diag::note_constexpr_invalid_cast)
<< 3 << SubExpr->getType();
@@ -5728,7 +5857,6 @@ bool PointerExprEvaluator::VisitCastExpr(const CastExpr* E) {
Result.Base = (Expr*)nullptr;
Result.InvalidBase = false;
Result.Offset = CharUnits::fromQuantity(N);
- Result.CallIndex = 0;
Result.Designator.setInvalid();
Result.IsNullPtr = false;
return true;
@@ -5744,9 +5872,9 @@ bool PointerExprEvaluator::VisitCastExpr(const CastExpr* E) {
if (!evaluateLValue(SubExpr, Result))
return false;
} else {
- Result.set(SubExpr, Info.CurrentCall->Index);
- if (!EvaluateInPlace(Info.CurrentCall->createTemporary(SubExpr, false),
- Info, Result, SubExpr))
+ APValue &Value = createTemporary(SubExpr, false, Result,
+ *Info.CurrentCall);
+ if (!EvaluateInPlace(Value, Info, Result, SubExpr))
return false;
}
// The result is a pointer to the first element of the array.
@@ -6117,6 +6245,8 @@ namespace {
bool VisitCXXInheritedCtorInitExpr(const CXXInheritedCtorInitExpr *E);
bool VisitCXXConstructExpr(const CXXConstructExpr *E, QualType T);
bool VisitCXXStdInitializerListExpr(const CXXStdInitializerListExpr *E);
+
+ bool VisitBinCmp(const BinaryOperator *E);
};
}
@@ -6512,9 +6642,8 @@ public:
/// Visit an expression which constructs the value of this temporary.
bool VisitConstructExpr(const Expr *E) {
- Result.set(E, Info.CurrentCall->Index);
- return EvaluateInPlace(Info.CurrentCall->createTemporary(E, false),
- Info, Result, E);
+ APValue &Value = createTemporary(E, false, Result, *Info.CurrentCall);
+ return EvaluateInPlace(Value, Info, Result, E);
}
bool VisitCastExpr(const CastExpr *E) {
@@ -6787,6 +6916,22 @@ static bool EvaluateArray(const Expr *E, const LValue &This,
return ArrayExprEvaluator(Info, This, Result).Visit(E);
}
+// Return true iff the given array filler may depend on the element index.
+static bool MaybeElementDependentArrayFiller(const Expr *FillerExpr) {
+ // For now, just whitelist non-class value-initialization and initialization
+ // lists comprised of them.
+ if (isa<ImplicitValueInitExpr>(FillerExpr))
+ return false;
+ if (const InitListExpr *ILE = dyn_cast<InitListExpr>(FillerExpr)) {
+ for (unsigned I = 0, E = ILE->getNumInits(); I != E; ++I) {
+ if (MaybeElementDependentArrayFiller(ILE->getInit(I)))
+ return true;
+ }
+ return false;
+ }
+ return true;
+}
+
bool ArrayExprEvaluator::VisitInitListExpr(const InitListExpr *E) {
const ConstantArrayType *CAT = Info.Ctx.getAsConstantArrayType(E->getType());
if (!CAT)
@@ -6816,10 +6961,13 @@ bool ArrayExprEvaluator::VisitInitListExpr(const InitListExpr *E) {
const Expr *FillerExpr = E->hasArrayFiller() ? E->getArrayFiller() : nullptr;
// If the initializer might depend on the array index, run it for each
- // array element. For now, just whitelist non-class value-initialization.
- if (NumEltsToInit != NumElts && !isa<ImplicitValueInitExpr>(FillerExpr))
+ // array element.
+ if (NumEltsToInit != NumElts && MaybeElementDependentArrayFiller(FillerExpr))
NumEltsToInit = NumElts;
+ LLVM_DEBUG(llvm::dbgs() << "The number of elements to initialize: "
+ << NumEltsToInit << ".\n");
+
Result = APValue(APValue::UninitArray(), NumEltsToInit, NumElts);
// If the array was previously zero-initialized, preserve the
@@ -6939,11 +7087,11 @@ bool ArrayExprEvaluator::VisitCXXConstructExpr(const CXXConstructExpr *E,
namespace {
class IntExprEvaluator
- : public ExprEvaluatorBase<IntExprEvaluator> {
+ : public ExprEvaluatorBase<IntExprEvaluator> {
APValue &Result;
public:
IntExprEvaluator(EvalInfo &info, APValue &result)
- : ExprEvaluatorBaseTy(info), Result(result) {}
+ : ExprEvaluatorBaseTy(info), Result(result) {}
bool Success(const llvm::APSInt &SI, const Expr *E, APValue &Result) {
assert(E->getType()->isIntegralOrEnumerationType() &&
@@ -6974,7 +7122,7 @@ public:
}
bool Success(uint64_t Value, const Expr *E, APValue &Result) {
- assert(E->getType()->isIntegralOrEnumerationType() &&
+ assert(E->getType()->isIntegralOrEnumerationType() &&
"Invalid evaluation result.");
Result = APValue(Info.Ctx.MakeIntValue(Value, E->getType()));
return true;
@@ -7076,6 +7224,73 @@ public:
// FIXME: Missing: array subscript of vector, member of vector
};
+
+class FixedPointExprEvaluator
+ : public ExprEvaluatorBase<FixedPointExprEvaluator> {
+ APValue &Result;
+
+ public:
+ FixedPointExprEvaluator(EvalInfo &info, APValue &result)
+ : ExprEvaluatorBaseTy(info), Result(result) {}
+
+ bool Success(const llvm::APSInt &SI, const Expr *E, APValue &Result) {
+ assert(E->getType()->isFixedPointType() && "Invalid evaluation result.");
+ assert(SI.isSigned() == E->getType()->isSignedFixedPointType() &&
+ "Invalid evaluation result.");
+ assert(SI.getBitWidth() == Info.Ctx.getIntWidth(E->getType()) &&
+ "Invalid evaluation result.");
+ Result = APValue(SI);
+ return true;
+ }
+ bool Success(const llvm::APSInt &SI, const Expr *E) {
+ return Success(SI, E, Result);
+ }
+
+ bool Success(const llvm::APInt &I, const Expr *E, APValue &Result) {
+ assert(E->getType()->isFixedPointType() && "Invalid evaluation result.");
+ assert(I.getBitWidth() == Info.Ctx.getIntWidth(E->getType()) &&
+ "Invalid evaluation result.");
+ Result = APValue(APSInt(I));
+ Result.getInt().setIsUnsigned(E->getType()->isUnsignedFixedPointType());
+ return true;
+ }
+ bool Success(const llvm::APInt &I, const Expr *E) {
+ return Success(I, E, Result);
+ }
+
+ bool Success(uint64_t Value, const Expr *E, APValue &Result) {
+ assert(E->getType()->isFixedPointType() && "Invalid evaluation result.");
+ Result = APValue(Info.Ctx.MakeIntValue(Value, E->getType()));
+ return true;
+ }
+ bool Success(uint64_t Value, const Expr *E) {
+ return Success(Value, E, Result);
+ }
+
+ bool Success(CharUnits Size, const Expr *E) {
+ return Success(Size.getQuantity(), E);
+ }
+
+ bool Success(const APValue &V, const Expr *E) {
+ if (V.isLValue() || V.isAddrLabelDiff()) {
+ Result = V;
+ return true;
+ }
+ return Success(V.getInt(), E);
+ }
+
+ bool ZeroInitialization(const Expr *E) { return Success(0, E); }
+
+ //===--------------------------------------------------------------------===//
+ // Visitor Methods
+ //===--------------------------------------------------------------------===//
+
+ bool VisitFixedPointLiteral(const FixedPointLiteral *E) {
+ return Success(E->getValue(), E);
+ }
+
+ bool VisitUnaryOperator(const UnaryOperator *E);
+};
} // end anonymous namespace
/// EvaluateIntegerOrLValue - Evaluate an rvalue integral-typed expression, and
@@ -7133,30 +7348,43 @@ bool IntExprEvaluator::CheckReferencedDecl(const Expr* E, const Decl* D) {
return false;
}
+/// Values returned by __builtin_classify_type, chosen to match the values
+/// produced by GCC's builtin.
+enum class GCCTypeClass {
+ None = -1,
+ Void = 0,
+ Integer = 1,
+ // GCC reserves 2 for character types, but instead classifies them as
+ // integers.
+ Enum = 3,
+ Bool = 4,
+ Pointer = 5,
+ // GCC reserves 6 for references, but appears to never use it (because
+ // expressions never have reference type, presumably).
+ PointerToDataMember = 7,
+ RealFloat = 8,
+ Complex = 9,
+ // GCC reserves 10 for functions, but does not use it since GCC version 6 due
+ // to decay to pointer. (Prior to version 6 it was only used in C++ mode).
+ // GCC claims to reserve 11 for pointers to member functions, but *actually*
+ // uses 12 for that purpose, same as for a class or struct. Maybe it
+ // internally implements a pointer to member as a struct? Who knows.
+ PointerToMemberFunction = 12, // Not a bug, see above.
+ ClassOrStruct = 12,
+ Union = 13,
+ // GCC reserves 14 for arrays, but does not use it since GCC version 6 due to
+ // decay to pointer. (Prior to version 6 it was only used in C++ mode).
+ // GCC reserves 15 for strings, but actually uses 5 (pointer) for string
+ // literals.
+};
+
/// EvaluateBuiltinClassifyType - Evaluate __builtin_classify_type the same way
/// as GCC.
-static int EvaluateBuiltinClassifyType(const CallExpr *E,
- const LangOptions &LangOpts) {
- // The following enum mimics the values returned by GCC.
- // FIXME: Does GCC differ between lvalue and rvalue references here?
- enum gcc_type_class {
- no_type_class = -1,
- void_type_class, integer_type_class, char_type_class,
- enumeral_type_class, boolean_type_class,
- pointer_type_class, reference_type_class, offset_type_class,
- real_type_class, complex_type_class,
- function_type_class, method_type_class,
- record_type_class, union_type_class,
- array_type_class, string_type_class,
- lang_type_class
- };
+static GCCTypeClass
+EvaluateBuiltinClassifyType(QualType T, const LangOptions &LangOpts) {
+ assert(!T->isDependentType() && "unexpected dependent type");
- // If no argument was supplied, default to "no_type_class". This isn't
- // ideal, however it is what gcc does.
- if (E->getNumArgs() == 0)
- return no_type_class;
-
- QualType CanTy = E->getArg(0)->getType().getCanonicalType();
+ QualType CanTy = T.getCanonicalType();
const BuiltinType *BT = dyn_cast<BuiltinType>(CanTy);
switch (CanTy->getTypeClass()) {
@@ -7165,36 +7393,55 @@ static int EvaluateBuiltinClassifyType(const CallExpr *E,
#define NON_CANONICAL_TYPE(ID, BASE) case Type::ID:
#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(ID, BASE) case Type::ID:
#include "clang/AST/TypeNodes.def"
- llvm_unreachable("CallExpr::isBuiltinClassifyType(): unimplemented type");
+ case Type::Auto:
+ case Type::DeducedTemplateSpecialization:
+ llvm_unreachable("unexpected non-canonical or dependent type");
case Type::Builtin:
switch (BT->getKind()) {
#define BUILTIN_TYPE(ID, SINGLETON_ID)
-#define SIGNED_TYPE(ID, SINGLETON_ID) case BuiltinType::ID: return integer_type_class;
-#define FLOATING_TYPE(ID, SINGLETON_ID) case BuiltinType::ID: return real_type_class;
-#define PLACEHOLDER_TYPE(ID, SINGLETON_ID) case BuiltinType::ID: break;
+#define SIGNED_TYPE(ID, SINGLETON_ID) \
+ case BuiltinType::ID: return GCCTypeClass::Integer;
+#define FLOATING_TYPE(ID, SINGLETON_ID) \
+ case BuiltinType::ID: return GCCTypeClass::RealFloat;
+#define PLACEHOLDER_TYPE(ID, SINGLETON_ID) \
+ case BuiltinType::ID: break;
#include "clang/AST/BuiltinTypes.def"
case BuiltinType::Void:
- return void_type_class;
+ return GCCTypeClass::Void;
case BuiltinType::Bool:
- return boolean_type_class;
+ return GCCTypeClass::Bool;
- case BuiltinType::Char_U: // gcc doesn't appear to use char_type_class
+ case BuiltinType::Char_U:
case BuiltinType::UChar:
+ case BuiltinType::WChar_U:
+ case BuiltinType::Char8:
+ case BuiltinType::Char16:
+ case BuiltinType::Char32:
case BuiltinType::UShort:
case BuiltinType::UInt:
case BuiltinType::ULong:
case BuiltinType::ULongLong:
case BuiltinType::UInt128:
- return integer_type_class;
+ return GCCTypeClass::Integer;
+
+ case BuiltinType::UShortAccum:
+ case BuiltinType::UAccum:
+ case BuiltinType::ULongAccum:
+ case BuiltinType::UShortFract:
+ case BuiltinType::UFract:
+ case BuiltinType::ULongFract:
+ case BuiltinType::SatUShortAccum:
+ case BuiltinType::SatUAccum:
+ case BuiltinType::SatULongAccum:
+ case BuiltinType::SatUShortFract:
+ case BuiltinType::SatUFract:
+ case BuiltinType::SatULongFract:
+ return GCCTypeClass::None;
case BuiltinType::NullPtr:
- return pointer_type_class;
- case BuiltinType::WChar_U:
- case BuiltinType::Char16:
- case BuiltinType::Char32:
case BuiltinType::ObjCId:
case BuiltinType::ObjCClass:
case BuiltinType::ObjCSel:
@@ -7206,74 +7453,73 @@ static int EvaluateBuiltinClassifyType(const CallExpr *E,
case BuiltinType::OCLClkEvent:
case BuiltinType::OCLQueue:
case BuiltinType::OCLReserveID:
+ return GCCTypeClass::None;
+
case BuiltinType::Dependent:
- llvm_unreachable("CallExpr::isBuiltinClassifyType(): unimplemented type");
+ llvm_unreachable("unexpected dependent type");
};
- break;
+ llvm_unreachable("unexpected placeholder type");
case Type::Enum:
- return LangOpts.CPlusPlus ? enumeral_type_class : integer_type_class;
- break;
+ return LangOpts.CPlusPlus ? GCCTypeClass::Enum : GCCTypeClass::Integer;
case Type::Pointer:
- return pointer_type_class;
- break;
+ case Type::ConstantArray:
+ case Type::VariableArray:
+ case Type::IncompleteArray:
+ case Type::FunctionNoProto:
+ case Type::FunctionProto:
+ return GCCTypeClass::Pointer;
case Type::MemberPointer:
- if (CanTy->isMemberDataPointerType())
- return offset_type_class;
- else {
- // We expect member pointers to be either data or function pointers,
- // nothing else.
- assert(CanTy->isMemberFunctionPointerType());
- return method_type_class;
- }
+ return CanTy->isMemberDataPointerType()
+ ? GCCTypeClass::PointerToDataMember
+ : GCCTypeClass::PointerToMemberFunction;
case Type::Complex:
- return complex_type_class;
-
- case Type::FunctionNoProto:
- case Type::FunctionProto:
- return LangOpts.CPlusPlus ? function_type_class : pointer_type_class;
+ return GCCTypeClass::Complex;
case Type::Record:
- if (const RecordType *RT = CanTy->getAs<RecordType>()) {
- switch (RT->getDecl()->getTagKind()) {
- case TagTypeKind::TTK_Struct:
- case TagTypeKind::TTK_Class:
- case TagTypeKind::TTK_Interface:
- return record_type_class;
-
- case TagTypeKind::TTK_Enum:
- return LangOpts.CPlusPlus ? enumeral_type_class : integer_type_class;
-
- case TagTypeKind::TTK_Union:
- return union_type_class;
- }
- }
- llvm_unreachable("CallExpr::isBuiltinClassifyType(): unimplemented type");
+ return CanTy->isUnionType() ? GCCTypeClass::Union
+ : GCCTypeClass::ClassOrStruct;
- case Type::ConstantArray:
- case Type::VariableArray:
- case Type::IncompleteArray:
- return LangOpts.CPlusPlus ? array_type_class : pointer_type_class;
+ case Type::Atomic:
+ // GCC classifies _Atomic T the same as T.
+ return EvaluateBuiltinClassifyType(
+ CanTy->castAs<AtomicType>()->getValueType(), LangOpts);
case Type::BlockPointer:
- case Type::LValueReference:
- case Type::RValueReference:
case Type::Vector:
case Type::ExtVector:
- case Type::Auto:
- case Type::DeducedTemplateSpecialization:
case Type::ObjCObject:
case Type::ObjCInterface:
case Type::ObjCObjectPointer:
case Type::Pipe:
- case Type::Atomic:
- llvm_unreachable("CallExpr::isBuiltinClassifyType(): unimplemented type");
+ // GCC classifies vectors as None. We follow its lead and classify all
+ // other types that don't fit into the regular classification the same way.
+ return GCCTypeClass::None;
+
+ case Type::LValueReference:
+ case Type::RValueReference:
+ llvm_unreachable("invalid type for expression");
}
- llvm_unreachable("CallExpr::isBuiltinClassifyType(): unimplemented type");
+ llvm_unreachable("unexpected type class");
+}
+
+/// EvaluateBuiltinClassifyType - Evaluate __builtin_classify_type the same way
+/// as GCC.
+static GCCTypeClass
+EvaluateBuiltinClassifyType(const CallExpr *E, const LangOptions &LangOpts) {
+ // If no argument was supplied, default to None. This isn't
+ // ideal, however it is what gcc does.
+ if (E->getNumArgs() == 0)
+ return GCCTypeClass::None;
+
+ // FIXME: Bizarrely, GCC treats a call with more than one argument as not
+ // being an ICE, but still folds it to a constant using the type of the first
+ // argument.
+ return EvaluateBuiltinClassifyType(E->getArg(0)->getType(), LangOpts);
}
/// EvaluateBuiltinConstantPForLValue - Determine the result of
@@ -7592,7 +7838,7 @@ static bool determineEndOffset(EvalInfo &Info, SourceLocation ExprLoc,
return true;
}
-/// \brief Tries to evaluate the __builtin_object_size for @p E. If successful,
+/// Tries to evaluate the __builtin_object_size for @p E. If successful,
/// returns true and stores the result in @p Size.
///
/// If @p WasError is non-null, this will report whether the failure to evaluate
@@ -7697,7 +7943,7 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
}
case Builtin::BI__builtin_classify_type:
- return Success(EvaluateBuiltinClassifyType(E, Info.getLangOpts()), E);
+ return Success((int)EvaluateBuiltinClassifyType(E, Info.getLangOpts()), E);
// FIXME: BI__builtin_clrsb
// FIXME: BI__builtin_clrsbl
@@ -7913,14 +8159,24 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
BuiltinOp != Builtin::BIwmemcmp &&
BuiltinOp != Builtin::BI__builtin_memcmp &&
BuiltinOp != Builtin::BI__builtin_wmemcmp);
+ bool IsWide = BuiltinOp == Builtin::BIwcscmp ||
+ BuiltinOp == Builtin::BIwcsncmp ||
+ BuiltinOp == Builtin::BIwmemcmp ||
+ BuiltinOp == Builtin::BI__builtin_wcscmp ||
+ BuiltinOp == Builtin::BI__builtin_wcsncmp ||
+ BuiltinOp == Builtin::BI__builtin_wmemcmp;
for (; MaxLength; --MaxLength) {
APValue Char1, Char2;
if (!handleLValueToRValueConversion(Info, E, CharTy, String1, Char1) ||
!handleLValueToRValueConversion(Info, E, CharTy, String2, Char2) ||
!Char1.isInt() || !Char2.isInt())
return false;
- if (Char1.getInt() != Char2.getInt())
- return Success(Char1.getInt() < Char2.getInt() ? -1 : 1, E);
+ if (Char1.getInt() != Char2.getInt()) {
+ if (IsWide) // wmemcmp compares with wchar_t signedness.
+ return Success(Char1.getInt() < Char2.getInt() ? -1 : 1, E);
+ // memcmp always compares unsigned chars.
+ return Success(Char1.getInt().ult(Char2.getInt()) ? -1 : 1, E);
+ }
if (StopAtNull && !Char1.getInt())
return Success(0, E);
assert(!(StopAtNull && !Char2.getInt()));
@@ -7979,6 +8235,125 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
case Builtin::BIomp_is_initial_device:
// We can decide statically which value the runtime would return if called.
return Success(Info.getLangOpts().OpenMPIsDevice ? 0 : 1, E);
+ case Builtin::BI__builtin_add_overflow:
+ case Builtin::BI__builtin_sub_overflow:
+ case Builtin::BI__builtin_mul_overflow:
+ case Builtin::BI__builtin_sadd_overflow:
+ case Builtin::BI__builtin_uadd_overflow:
+ case Builtin::BI__builtin_uaddl_overflow:
+ case Builtin::BI__builtin_uaddll_overflow:
+ case Builtin::BI__builtin_usub_overflow:
+ case Builtin::BI__builtin_usubl_overflow:
+ case Builtin::BI__builtin_usubll_overflow:
+ case Builtin::BI__builtin_umul_overflow:
+ case Builtin::BI__builtin_umull_overflow:
+ case Builtin::BI__builtin_umulll_overflow:
+ case Builtin::BI__builtin_saddl_overflow:
+ case Builtin::BI__builtin_saddll_overflow:
+ case Builtin::BI__builtin_ssub_overflow:
+ case Builtin::BI__builtin_ssubl_overflow:
+ case Builtin::BI__builtin_ssubll_overflow:
+ case Builtin::BI__builtin_smul_overflow:
+ case Builtin::BI__builtin_smull_overflow:
+ case Builtin::BI__builtin_smulll_overflow: {
+ LValue ResultLValue;
+ APSInt LHS, RHS;
+
+ QualType ResultType = E->getArg(2)->getType()->getPointeeType();
+ if (!EvaluateInteger(E->getArg(0), LHS, Info) ||
+ !EvaluateInteger(E->getArg(1), RHS, Info) ||
+ !EvaluatePointer(E->getArg(2), ResultLValue, Info))
+ return false;
+
+ APSInt Result;
+ bool DidOverflow = false;
+
+ // If the types don't have to match, enlarge all 3 to the largest of them.
+ if (BuiltinOp == Builtin::BI__builtin_add_overflow ||
+ BuiltinOp == Builtin::BI__builtin_sub_overflow ||
+ BuiltinOp == Builtin::BI__builtin_mul_overflow) {
+ bool IsSigned = LHS.isSigned() || RHS.isSigned() ||
+ ResultType->isSignedIntegerOrEnumerationType();
+ bool AllSigned = LHS.isSigned() && RHS.isSigned() &&
+ ResultType->isSignedIntegerOrEnumerationType();
+ uint64_t LHSSize = LHS.getBitWidth();
+ uint64_t RHSSize = RHS.getBitWidth();
+ uint64_t ResultSize = Info.Ctx.getTypeSize(ResultType);
+ uint64_t MaxBits = std::max(std::max(LHSSize, RHSSize), ResultSize);
+
+ // Add an additional bit if the signedness isn't uniformly agreed to. We
+ // could do this ONLY if there is a signed and an unsigned that both have
+ // MaxBits, but the code to check that is pretty nasty. The issue will be
+ // caught in the shrink-to-result later anyway.
+ if (IsSigned && !AllSigned)
+ ++MaxBits;
+
+ LHS = APSInt(IsSigned ? LHS.sextOrSelf(MaxBits) : LHS.zextOrSelf(MaxBits),
+ !IsSigned);
+ RHS = APSInt(IsSigned ? RHS.sextOrSelf(MaxBits) : RHS.zextOrSelf(MaxBits),
+ !IsSigned);
+ Result = APSInt(MaxBits, !IsSigned);
+ }
+
+ // Find largest int.
+ switch (BuiltinOp) {
+ default:
+ llvm_unreachable("Invalid value for BuiltinOp");
+ case Builtin::BI__builtin_add_overflow:
+ case Builtin::BI__builtin_sadd_overflow:
+ case Builtin::BI__builtin_saddl_overflow:
+ case Builtin::BI__builtin_saddll_overflow:
+ case Builtin::BI__builtin_uadd_overflow:
+ case Builtin::BI__builtin_uaddl_overflow:
+ case Builtin::BI__builtin_uaddll_overflow:
+ Result = LHS.isSigned() ? LHS.sadd_ov(RHS, DidOverflow)
+ : LHS.uadd_ov(RHS, DidOverflow);
+ break;
+ case Builtin::BI__builtin_sub_overflow:
+ case Builtin::BI__builtin_ssub_overflow:
+ case Builtin::BI__builtin_ssubl_overflow:
+ case Builtin::BI__builtin_ssubll_overflow:
+ case Builtin::BI__builtin_usub_overflow:
+ case Builtin::BI__builtin_usubl_overflow:
+ case Builtin::BI__builtin_usubll_overflow:
+ Result = LHS.isSigned() ? LHS.ssub_ov(RHS, DidOverflow)
+ : LHS.usub_ov(RHS, DidOverflow);
+ break;
+ case Builtin::BI__builtin_mul_overflow:
+ case Builtin::BI__builtin_smul_overflow:
+ case Builtin::BI__builtin_smull_overflow:
+ case Builtin::BI__builtin_smulll_overflow:
+ case Builtin::BI__builtin_umul_overflow:
+ case Builtin::BI__builtin_umull_overflow:
+ case Builtin::BI__builtin_umulll_overflow:
+ Result = LHS.isSigned() ? LHS.smul_ov(RHS, DidOverflow)
+ : LHS.umul_ov(RHS, DidOverflow);
+ break;
+ }
+
+ // In the case where multiple sizes are allowed, truncate and see if
+ // the values are the same.
+ if (BuiltinOp == Builtin::BI__builtin_add_overflow ||
+ BuiltinOp == Builtin::BI__builtin_sub_overflow ||
+ BuiltinOp == Builtin::BI__builtin_mul_overflow) {
+ // APSInt doesn't have a TruncOrSelf, so we use extOrTrunc instead,
+ // since it will give us the behavior of a TruncOrSelf in the case where
+ // its parameter <= its size. We previously set Result to be at least the
+ // type-size of the result, so getTypeSize(ResultType) <= Result.BitWidth
+ // will work exactly like TruncOrSelf.
+ APSInt Temp = Result.extOrTrunc(Info.Ctx.getTypeSize(ResultType));
+ Temp.setIsSigned(ResultType->isSignedIntegerOrEnumerationType());
+
+ if (!APSInt::isSameValue(Temp, Result))
+ DidOverflow = true;
+ Result = Temp;
+ }
+
+ APValue APV{Result};
+ if (!handleAssignment(Info, E, ResultLValue, ResultType, APV))
+ return false;
+ return Success(DidOverflow, E);
+ }
}
}
@@ -7999,10 +8374,11 @@ static bool HasSameBase(const LValue &A, const LValue &B) {
}
return IsGlobalLValue(A.getLValueBase()) ||
- A.getLValueCallIndex() == B.getLValueCallIndex();
+ (A.getLValueCallIndex() == B.getLValueCallIndex() &&
+ A.getLValueVersion() == B.getLValueVersion());
}
-/// \brief Determine whether this is a pointer past the end of the complete
+/// Determine whether this is a pointer past the end of the complete
/// object referred to by the lvalue.
static bool isOnePastTheEndOfCompleteObject(const ASTContext &Ctx,
const LValue &LV) {
@@ -8031,7 +8407,7 @@ static bool isOnePastTheEndOfCompleteObject(const ASTContext &Ctx,
namespace {
-/// \brief Data recursive integer evaluator of certain binary operators.
+/// Data recursive integer evaluator of certain binary operators.
///
/// We use a data recursive algorithm for binary operators so that we are able
/// to handle extreme cases of chained binary operators without causing stack
@@ -8076,15 +8452,13 @@ public:
DataRecursiveIntBinOpEvaluator(IntExprEvaluator &IntEval, APValue &Result)
: IntEval(IntEval), Info(IntEval.getEvalInfo()), FinalResult(Result) { }
- /// \brief True if \param E is a binary operator that we are going to handle
+ /// True if \param E is a binary operator that we are going to handle
/// data recursively.
/// We handle binary operators that are comma, logical, or that have operands
/// with integral or enumeration type.
static bool shouldEnqueue(const BinaryOperator *E) {
- return E->getOpcode() == BO_Comma ||
- E->isLogicalOp() ||
- (E->isRValue() &&
- E->getType()->isIntegralOrEnumerationType() &&
+ return E->getOpcode() == BO_Comma || E->isLogicalOp() ||
+ (E->isRValue() && E->getType()->isIntegralOrEnumerationType() &&
E->getLHS()->getType()->isIntegralOrEnumerationType() &&
E->getRHS()->getType()->isIntegralOrEnumerationType());
}
@@ -8119,7 +8493,7 @@ private:
return Info.CCEDiag(E, D);
}
- // \brief Returns true if visiting the RHS is necessary, false otherwise.
+ // Returns true if visiting the RHS is necessary, false otherwise.
bool VisitBinOpLHSOnly(EvalResult &LHSResult, const BinaryOperator *E,
bool &SuppressRHSDiags);
@@ -8363,19 +8737,47 @@ public:
};
}
-bool IntExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
- // We don't call noteFailure immediately because the assignment happens after
- // we evaluate LHS and RHS.
- if (!Info.keepEvaluatingAfterFailure() && E->isAssignmentOp())
- return Error(E);
+template <class SuccessCB, class AfterCB>
+static bool
+EvaluateComparisonBinaryOperator(EvalInfo &Info, const BinaryOperator *E,
+ SuccessCB &&Success, AfterCB &&DoAfter) {
+ assert(E->isComparisonOp() && "expected comparison operator");
+ assert((E->getOpcode() == BO_Cmp ||
+ E->getType()->isIntegralOrEnumerationType()) &&
+ "unsupported binary expression evaluation");
+ auto Error = [&](const Expr *E) {
+ Info.FFDiag(E, diag::note_invalid_subexpr_in_const_expr);
+ return false;
+ };
- DelayedNoteFailureRAII MaybeNoteFailureLater(Info, E->isAssignmentOp());
- if (DataRecursiveIntBinOpEvaluator::shouldEnqueue(E))
- return DataRecursiveIntBinOpEvaluator(*this, Result).Traverse(E);
+ using CCR = ComparisonCategoryResult;
+ bool IsRelational = E->isRelationalOp();
+ bool IsEquality = E->isEqualityOp();
+ if (E->getOpcode() == BO_Cmp) {
+ const ComparisonCategoryInfo &CmpInfo =
+ Info.Ctx.CompCategories.getInfoForType(E->getType());
+ IsRelational = CmpInfo.isOrdered();
+ IsEquality = CmpInfo.isEquality();
+ }
QualType LHSTy = E->getLHS()->getType();
QualType RHSTy = E->getRHS()->getType();
+ if (LHSTy->isIntegralOrEnumerationType() &&
+ RHSTy->isIntegralOrEnumerationType()) {
+ APSInt LHS, RHS;
+ bool LHSOK = EvaluateInteger(E->getLHS(), LHS, Info);
+ if (!LHSOK && !Info.noteFailure())
+ return false;
+ if (!EvaluateInteger(E->getRHS(), RHS, Info) || !LHSOK)
+ return false;
+ if (LHS < RHS)
+ return Success(CCR::Less, E);
+ if (LHS > RHS)
+ return Success(CCR::Greater, E);
+ return Success(CCR::Equal, E);
+ }
+
if (LHSTy->isAnyComplexType() || RHSTy->isAnyComplexType()) {
ComplexValue LHS, RHS;
bool LHSOK;
@@ -8408,30 +8810,13 @@ bool IntExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
LHS.getComplexFloatReal().compare(RHS.getComplexFloatReal());
APFloat::cmpResult CR_i =
LHS.getComplexFloatImag().compare(RHS.getComplexFloatImag());
-
- if (E->getOpcode() == BO_EQ)
- return Success((CR_r == APFloat::cmpEqual &&
- CR_i == APFloat::cmpEqual), E);
- else {
- assert(E->getOpcode() == BO_NE &&
- "Invalid complex comparison.");
- return Success(((CR_r == APFloat::cmpGreaterThan ||
- CR_r == APFloat::cmpLessThan ||
- CR_r == APFloat::cmpUnordered) ||
- (CR_i == APFloat::cmpGreaterThan ||
- CR_i == APFloat::cmpLessThan ||
- CR_i == APFloat::cmpUnordered)), E);
- }
+ bool IsEqual = CR_r == APFloat::cmpEqual && CR_i == APFloat::cmpEqual;
+ return Success(IsEqual ? CCR::Equal : CCR::Nonequal, E);
} else {
- if (E->getOpcode() == BO_EQ)
- return Success((LHS.getComplexIntReal() == RHS.getComplexIntReal() &&
- LHS.getComplexIntImag() == RHS.getComplexIntImag()), E);
- else {
- assert(E->getOpcode() == BO_NE &&
- "Invalid compex comparison.");
- return Success((LHS.getComplexIntReal() != RHS.getComplexIntReal() ||
- LHS.getComplexIntImag() != RHS.getComplexIntImag()), E);
- }
+ assert(IsEquality && "invalid complex comparison");
+ bool IsEqual = LHS.getComplexIntReal() == RHS.getComplexIntReal() &&
+ LHS.getComplexIntImag() == RHS.getComplexIntImag();
+ return Success(IsEqual ? CCR::Equal : CCR::Nonequal, E);
}
}
@@ -8446,246 +8831,161 @@ bool IntExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
if (!EvaluateFloat(E->getLHS(), LHS, Info) || !LHSOK)
return false;
- APFloat::cmpResult CR = LHS.compare(RHS);
-
- switch (E->getOpcode()) {
- default:
- llvm_unreachable("Invalid binary operator!");
- case BO_LT:
- return Success(CR == APFloat::cmpLessThan, E);
- case BO_GT:
- return Success(CR == APFloat::cmpGreaterThan, E);
- case BO_LE:
- return Success(CR == APFloat::cmpLessThan || CR == APFloat::cmpEqual, E);
- case BO_GE:
- return Success(CR == APFloat::cmpGreaterThan || CR == APFloat::cmpEqual,
- E);
- case BO_EQ:
- return Success(CR == APFloat::cmpEqual, E);
- case BO_NE:
- return Success(CR == APFloat::cmpGreaterThan
- || CR == APFloat::cmpLessThan
- || CR == APFloat::cmpUnordered, E);
- }
+ assert(E->isComparisonOp() && "Invalid binary operator!");
+ auto GetCmpRes = [&]() {
+ switch (LHS.compare(RHS)) {
+ case APFloat::cmpEqual:
+ return CCR::Equal;
+ case APFloat::cmpLessThan:
+ return CCR::Less;
+ case APFloat::cmpGreaterThan:
+ return CCR::Greater;
+ case APFloat::cmpUnordered:
+ return CCR::Unordered;
+ }
+ llvm_unreachable("Unrecognised APFloat::cmpResult enum");
+ };
+ return Success(GetCmpRes(), E);
}
if (LHSTy->isPointerType() && RHSTy->isPointerType()) {
- if (E->getOpcode() == BO_Sub || E->isComparisonOp()) {
- LValue LHSValue, RHSValue;
-
- bool LHSOK = EvaluatePointer(E->getLHS(), LHSValue, Info);
- if (!LHSOK && !Info.noteFailure())
- return false;
-
- if (!EvaluatePointer(E->getRHS(), RHSValue, Info) || !LHSOK)
- return false;
-
- // Reject differing bases from the normal codepath; we special-case
- // comparisons to null.
- if (!HasSameBase(LHSValue, RHSValue)) {
- if (E->getOpcode() == BO_Sub) {
- // Handle &&A - &&B.
- if (!LHSValue.Offset.isZero() || !RHSValue.Offset.isZero())
- return Error(E);
- const Expr *LHSExpr = LHSValue.Base.dyn_cast<const Expr*>();
- const Expr *RHSExpr = RHSValue.Base.dyn_cast<const Expr*>();
- if (!LHSExpr || !RHSExpr)
- return Error(E);
- const AddrLabelExpr *LHSAddrExpr = dyn_cast<AddrLabelExpr>(LHSExpr);
- const AddrLabelExpr *RHSAddrExpr = dyn_cast<AddrLabelExpr>(RHSExpr);
- if (!LHSAddrExpr || !RHSAddrExpr)
- return Error(E);
- // Make sure both labels come from the same function.
- if (LHSAddrExpr->getLabel()->getDeclContext() !=
- RHSAddrExpr->getLabel()->getDeclContext())
- return Error(E);
- return Success(APValue(LHSAddrExpr, RHSAddrExpr), E);
- }
- // Inequalities and subtractions between unrelated pointers have
- // unspecified or undefined behavior.
- if (!E->isEqualityOp())
- return Error(E);
- // A constant address may compare equal to the address of a symbol.
- // The one exception is that address of an object cannot compare equal
- // to a null pointer constant.
- if ((!LHSValue.Base && !LHSValue.Offset.isZero()) ||
- (!RHSValue.Base && !RHSValue.Offset.isZero()))
- return Error(E);
- // It's implementation-defined whether distinct literals will have
- // distinct addresses. In clang, the result of such a comparison is
- // unspecified, so it is not a constant expression. However, we do know
- // that the address of a literal will be non-null.
- if ((IsLiteralLValue(LHSValue) || IsLiteralLValue(RHSValue)) &&
- LHSValue.Base && RHSValue.Base)
- return Error(E);
- // We can't tell whether weak symbols will end up pointing to the same
- // object.
- if (IsWeakLValue(LHSValue) || IsWeakLValue(RHSValue))
- return Error(E);
- // We can't compare the address of the start of one object with the
- // past-the-end address of another object, per C++ DR1652.
- if ((LHSValue.Base && LHSValue.Offset.isZero() &&
- isOnePastTheEndOfCompleteObject(Info.Ctx, RHSValue)) ||
- (RHSValue.Base && RHSValue.Offset.isZero() &&
- isOnePastTheEndOfCompleteObject(Info.Ctx, LHSValue)))
- return Error(E);
- // We can't tell whether an object is at the same address as another
- // zero sized object.
- if ((RHSValue.Base && isZeroSized(LHSValue)) ||
- (LHSValue.Base && isZeroSized(RHSValue)))
- return Error(E);
- // Pointers with different bases cannot represent the same object.
- // (Note that clang defaults to -fmerge-all-constants, which can
- // lead to inconsistent results for comparisons involving the address
- // of a constant; this generally doesn't matter in practice.)
- return Success(E->getOpcode() == BO_NE, E);
- }
-
- const CharUnits &LHSOffset = LHSValue.getLValueOffset();
- const CharUnits &RHSOffset = RHSValue.getLValueOffset();
-
- SubobjectDesignator &LHSDesignator = LHSValue.getLValueDesignator();
- SubobjectDesignator &RHSDesignator = RHSValue.getLValueDesignator();
-
- if (E->getOpcode() == BO_Sub) {
- // C++11 [expr.add]p6:
- // Unless both pointers point to elements of the same array object, or
- // one past the last element of the array object, the behavior is
- // undefined.
- if (!LHSDesignator.Invalid && !RHSDesignator.Invalid &&
- !AreElementsOfSameArray(getType(LHSValue.Base),
- LHSDesignator, RHSDesignator))
- CCEDiag(E, diag::note_constexpr_pointer_subtraction_not_same_array);
-
- QualType Type = E->getLHS()->getType();
- QualType ElementType = Type->getAs<PointerType>()->getPointeeType();
+ LValue LHSValue, RHSValue;
- CharUnits ElementSize;
- if (!HandleSizeof(Info, E->getExprLoc(), ElementType, ElementSize))
- return false;
-
- // As an extension, a type may have zero size (empty struct or union in
- // C, array of zero length). Pointer subtraction in such cases has
- // undefined behavior, so is not constant.
- if (ElementSize.isZero()) {
- Info.FFDiag(E, diag::note_constexpr_pointer_subtraction_zero_size)
- << ElementType;
- return false;
- }
+ bool LHSOK = EvaluatePointer(E->getLHS(), LHSValue, Info);
+ if (!LHSOK && !Info.noteFailure())
+ return false;
- // FIXME: LLVM and GCC both compute LHSOffset - RHSOffset at runtime,
- // and produce incorrect results when it overflows. Such behavior
- // appears to be non-conforming, but is common, so perhaps we should
- // assume the standard intended for such cases to be undefined behavior
- // and check for them.
-
- // Compute (LHSOffset - RHSOffset) / Size carefully, checking for
- // overflow in the final conversion to ptrdiff_t.
- APSInt LHS(
- llvm::APInt(65, (int64_t)LHSOffset.getQuantity(), true), false);
- APSInt RHS(
- llvm::APInt(65, (int64_t)RHSOffset.getQuantity(), true), false);
- APSInt ElemSize(
- llvm::APInt(65, (int64_t)ElementSize.getQuantity(), true), false);
- APSInt TrueResult = (LHS - RHS) / ElemSize;
- APSInt Result = TrueResult.trunc(Info.Ctx.getIntWidth(E->getType()));
-
- if (Result.extend(65) != TrueResult &&
- !HandleOverflow(Info, E, TrueResult, E->getType()))
- return false;
- return Success(Result, E);
- }
+ if (!EvaluatePointer(E->getRHS(), RHSValue, Info) || !LHSOK)
+ return false;
- // C++11 [expr.rel]p3:
- // Pointers to void (after pointer conversions) can be compared, with a
- // result defined as follows: If both pointers represent the same
- // address or are both the null pointer value, the result is true if the
- // operator is <= or >= and false otherwise; otherwise the result is
- // unspecified.
- // We interpret this as applying to pointers to *cv* void.
- if (LHSTy->isVoidPointerType() && LHSOffset != RHSOffset &&
- E->isRelationalOp())
- CCEDiag(E, diag::note_constexpr_void_comparison);
-
- // C++11 [expr.rel]p2:
- // - If two pointers point to non-static data members of the same object,
- // or to subobjects or array elements fo such members, recursively, the
- // pointer to the later declared member compares greater provided the
- // two members have the same access control and provided their class is
- // not a union.
- // [...]
- // - Otherwise pointer comparisons are unspecified.
- if (!LHSDesignator.Invalid && !RHSDesignator.Invalid &&
- E->isRelationalOp()) {
- bool WasArrayIndex;
- unsigned Mismatch =
- FindDesignatorMismatch(getType(LHSValue.Base), LHSDesignator,
- RHSDesignator, WasArrayIndex);
- // At the point where the designators diverge, the comparison has a
- // specified value if:
- // - we are comparing array indices
- // - we are comparing fields of a union, or fields with the same access
- // Otherwise, the result is unspecified and thus the comparison is not a
- // constant expression.
- if (!WasArrayIndex && Mismatch < LHSDesignator.Entries.size() &&
- Mismatch < RHSDesignator.Entries.size()) {
- const FieldDecl *LF = getAsField(LHSDesignator.Entries[Mismatch]);
- const FieldDecl *RF = getAsField(RHSDesignator.Entries[Mismatch]);
- if (!LF && !RF)
- CCEDiag(E, diag::note_constexpr_pointer_comparison_base_classes);
- else if (!LF)
- CCEDiag(E, diag::note_constexpr_pointer_comparison_base_field)
+ // Reject differing bases from the normal codepath; we special-case
+ // comparisons to null.
+ if (!HasSameBase(LHSValue, RHSValue)) {
+ // Inequalities and subtractions between unrelated pointers have
+ // unspecified or undefined behavior.
+ if (!IsEquality)
+ return Error(E);
+ // A constant address may compare equal to the address of a symbol.
+ // The one exception is that address of an object cannot compare equal
+ // to a null pointer constant.
+ if ((!LHSValue.Base && !LHSValue.Offset.isZero()) ||
+ (!RHSValue.Base && !RHSValue.Offset.isZero()))
+ return Error(E);
+ // It's implementation-defined whether distinct literals will have
+ // distinct addresses. In clang, the result of such a comparison is
+ // unspecified, so it is not a constant expression. However, we do know
+ // that the address of a literal will be non-null.
+ if ((IsLiteralLValue(LHSValue) || IsLiteralLValue(RHSValue)) &&
+ LHSValue.Base && RHSValue.Base)
+ return Error(E);
+ // We can't tell whether weak symbols will end up pointing to the same
+ // object.
+ if (IsWeakLValue(LHSValue) || IsWeakLValue(RHSValue))
+ return Error(E);
+ // We can't compare the address of the start of one object with the
+ // past-the-end address of another object, per C++ DR1652.
+ if ((LHSValue.Base && LHSValue.Offset.isZero() &&
+ isOnePastTheEndOfCompleteObject(Info.Ctx, RHSValue)) ||
+ (RHSValue.Base && RHSValue.Offset.isZero() &&
+ isOnePastTheEndOfCompleteObject(Info.Ctx, LHSValue)))
+ return Error(E);
+ // We can't tell whether an object is at the same address as another
+ // zero sized object.
+ if ((RHSValue.Base && isZeroSized(LHSValue)) ||
+ (LHSValue.Base && isZeroSized(RHSValue)))
+ return Error(E);
+ return Success(CCR::Nonequal, E);
+ }
+
+ const CharUnits &LHSOffset = LHSValue.getLValueOffset();
+ const CharUnits &RHSOffset = RHSValue.getLValueOffset();
+
+ SubobjectDesignator &LHSDesignator = LHSValue.getLValueDesignator();
+ SubobjectDesignator &RHSDesignator = RHSValue.getLValueDesignator();
+
+ // C++11 [expr.rel]p3:
+ // Pointers to void (after pointer conversions) can be compared, with a
+ // result defined as follows: If both pointers represent the same
+ // address or are both the null pointer value, the result is true if the
+ // operator is <= or >= and false otherwise; otherwise the result is
+ // unspecified.
+ // We interpret this as applying to pointers to *cv* void.
+ if (LHSTy->isVoidPointerType() && LHSOffset != RHSOffset && IsRelational)
+ Info.CCEDiag(E, diag::note_constexpr_void_comparison);
+
+ // C++11 [expr.rel]p2:
+ // - If two pointers point to non-static data members of the same object,
+ // or to subobjects or array elements fo such members, recursively, the
+ // pointer to the later declared member compares greater provided the
+ // two members have the same access control and provided their class is
+ // not a union.
+ // [...]
+ // - Otherwise pointer comparisons are unspecified.
+ if (!LHSDesignator.Invalid && !RHSDesignator.Invalid && IsRelational) {
+ bool WasArrayIndex;
+ unsigned Mismatch = FindDesignatorMismatch(
+ getType(LHSValue.Base), LHSDesignator, RHSDesignator, WasArrayIndex);
+ // At the point where the designators diverge, the comparison has a
+ // specified value if:
+ // - we are comparing array indices
+ // - we are comparing fields of a union, or fields with the same access
+ // Otherwise, the result is unspecified and thus the comparison is not a
+ // constant expression.
+ if (!WasArrayIndex && Mismatch < LHSDesignator.Entries.size() &&
+ Mismatch < RHSDesignator.Entries.size()) {
+ const FieldDecl *LF = getAsField(LHSDesignator.Entries[Mismatch]);
+ const FieldDecl *RF = getAsField(RHSDesignator.Entries[Mismatch]);
+ if (!LF && !RF)
+ Info.CCEDiag(E, diag::note_constexpr_pointer_comparison_base_classes);
+ else if (!LF)
+ Info.CCEDiag(E, diag::note_constexpr_pointer_comparison_base_field)
<< getAsBaseClass(LHSDesignator.Entries[Mismatch])
<< RF->getParent() << RF;
- else if (!RF)
- CCEDiag(E, diag::note_constexpr_pointer_comparison_base_field)
+ else if (!RF)
+ Info.CCEDiag(E, diag::note_constexpr_pointer_comparison_base_field)
<< getAsBaseClass(RHSDesignator.Entries[Mismatch])
<< LF->getParent() << LF;
- else if (!LF->getParent()->isUnion() &&
- LF->getAccess() != RF->getAccess())
- CCEDiag(E, diag::note_constexpr_pointer_comparison_differing_access)
+ else if (!LF->getParent()->isUnion() &&
+ LF->getAccess() != RF->getAccess())
+ Info.CCEDiag(E,
+ diag::note_constexpr_pointer_comparison_differing_access)
<< LF << LF->getAccess() << RF << RF->getAccess()
<< LF->getParent();
- }
- }
-
- // The comparison here must be unsigned, and performed with the same
- // width as the pointer.
- unsigned PtrSize = Info.Ctx.getTypeSize(LHSTy);
- uint64_t CompareLHS = LHSOffset.getQuantity();
- uint64_t CompareRHS = RHSOffset.getQuantity();
- assert(PtrSize <= 64 && "Unexpected pointer width");
- uint64_t Mask = ~0ULL >> (64 - PtrSize);
- CompareLHS &= Mask;
- CompareRHS &= Mask;
-
- // If there is a base and this is a relational operator, we can only
- // compare pointers within the object in question; otherwise, the result
- // depends on where the object is located in memory.
- if (!LHSValue.Base.isNull() && E->isRelationalOp()) {
- QualType BaseTy = getType(LHSValue.Base);
- if (BaseTy->isIncompleteType())
- return Error(E);
- CharUnits Size = Info.Ctx.getTypeSizeInChars(BaseTy);
- uint64_t OffsetLimit = Size.getQuantity();
- if (CompareLHS > OffsetLimit || CompareRHS > OffsetLimit)
- return Error(E);
}
+ }
- switch (E->getOpcode()) {
- default: llvm_unreachable("missing comparison operator");
- case BO_LT: return Success(CompareLHS < CompareRHS, E);
- case BO_GT: return Success(CompareLHS > CompareRHS, E);
- case BO_LE: return Success(CompareLHS <= CompareRHS, E);
- case BO_GE: return Success(CompareLHS >= CompareRHS, E);
- case BO_EQ: return Success(CompareLHS == CompareRHS, E);
- case BO_NE: return Success(CompareLHS != CompareRHS, E);
- }
+ // The comparison here must be unsigned, and performed with the same
+ // width as the pointer.
+ unsigned PtrSize = Info.Ctx.getTypeSize(LHSTy);
+ uint64_t CompareLHS = LHSOffset.getQuantity();
+ uint64_t CompareRHS = RHSOffset.getQuantity();
+ assert(PtrSize <= 64 && "Unexpected pointer width");
+ uint64_t Mask = ~0ULL >> (64 - PtrSize);
+ CompareLHS &= Mask;
+ CompareRHS &= Mask;
+
+ // If there is a base and this is a relational operator, we can only
+ // compare pointers within the object in question; otherwise, the result
+ // depends on where the object is located in memory.
+ if (!LHSValue.Base.isNull() && IsRelational) {
+ QualType BaseTy = getType(LHSValue.Base);
+ if (BaseTy->isIncompleteType())
+ return Error(E);
+ CharUnits Size = Info.Ctx.getTypeSizeInChars(BaseTy);
+ uint64_t OffsetLimit = Size.getQuantity();
+ if (CompareLHS > OffsetLimit || CompareRHS > OffsetLimit)
+ return Error(E);
}
+
+ if (CompareLHS < CompareRHS)
+ return Success(CCR::Less, E);
+ if (CompareLHS > CompareRHS)
+ return Success(CCR::Greater, E);
+ return Success(CCR::Equal, E);
}
if (LHSTy->isMemberPointerType()) {
- assert(E->isEqualityOp() && "unexpected member pointer operation");
+ assert(IsEquality && "unexpected member pointer operation");
assert(RHSTy->isMemberPointerType() && "invalid comparison");
MemberPtr LHSValue, RHSValue;
@@ -8702,24 +9002,24 @@ bool IntExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
// null, they compare unequal.
if (!LHSValue.getDecl() || !RHSValue.getDecl()) {
bool Equal = !LHSValue.getDecl() && !RHSValue.getDecl();
- return Success(E->getOpcode() == BO_EQ ? Equal : !Equal, E);
+ return Success(Equal ? CCR::Equal : CCR::Nonequal, E);
}
// Otherwise if either is a pointer to a virtual member function, the
// result is unspecified.
if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(LHSValue.getDecl()))
if (MD->isVirtual())
- CCEDiag(E, diag::note_constexpr_compare_virtual_mem_ptr) << MD;
+ Info.CCEDiag(E, diag::note_constexpr_compare_virtual_mem_ptr) << MD;
if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(RHSValue.getDecl()))
if (MD->isVirtual())
- CCEDiag(E, diag::note_constexpr_compare_virtual_mem_ptr) << MD;
+ Info.CCEDiag(E, diag::note_constexpr_compare_virtual_mem_ptr) << MD;
// Otherwise they compare equal if and only if they would refer to the
// same member of the same most derived object or the same subobject if
// they were dereferenced with a hypothetical object of the associated
// class type.
bool Equal = LHSValue == RHSValue;
- return Success(E->getOpcode() == BO_EQ ? Equal : !Equal, E);
+ return Success(Equal ? CCR::Equal : CCR::Nonequal, E);
}
if (LHSTy->isNullPtrType()) {
@@ -8728,14 +9028,163 @@ bool IntExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
// C++11 [expr.rel]p4, [expr.eq]p3: If two operands of type std::nullptr_t
// are compared, the result is true of the operator is <=, >= or ==, and
// false otherwise.
- BinaryOperator::Opcode Opcode = E->getOpcode();
- return Success(Opcode == BO_EQ || Opcode == BO_LE || Opcode == BO_GE, E);
+ return Success(CCR::Equal, E);
}
- assert((!LHSTy->isIntegralOrEnumerationType() ||
- !RHSTy->isIntegralOrEnumerationType()) &&
+ return DoAfter();
+}
+
+bool RecordExprEvaluator::VisitBinCmp(const BinaryOperator *E) {
+ if (!CheckLiteralType(Info, E))
+ return false;
+
+ auto OnSuccess = [&](ComparisonCategoryResult ResKind,
+ const BinaryOperator *E) {
+ // Evaluation succeeded. Lookup the information for the comparison category
+ // type and fetch the VarDecl for the result.
+ const ComparisonCategoryInfo &CmpInfo =
+ Info.Ctx.CompCategories.getInfoForType(E->getType());
+ const VarDecl *VD =
+ CmpInfo.getValueInfo(CmpInfo.makeWeakResult(ResKind))->VD;
+ // Check and evaluate the result as a constant expression.
+ LValue LV;
+ LV.set(VD);
+ if (!handleLValueToRValueConversion(Info, E, E->getType(), LV, Result))
+ return false;
+ return CheckConstantExpression(Info, E->getExprLoc(), E->getType(), Result);
+ };
+ return EvaluateComparisonBinaryOperator(Info, E, OnSuccess, [&]() {
+ return ExprEvaluatorBaseTy::VisitBinCmp(E);
+ });
+}
+
+bool IntExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
+ // We don't call noteFailure immediately because the assignment happens after
+ // we evaluate LHS and RHS.
+ if (!Info.keepEvaluatingAfterFailure() && E->isAssignmentOp())
+ return Error(E);
+
+ DelayedNoteFailureRAII MaybeNoteFailureLater(Info, E->isAssignmentOp());
+ if (DataRecursiveIntBinOpEvaluator::shouldEnqueue(E))
+ return DataRecursiveIntBinOpEvaluator(*this, Result).Traverse(E);
+
+ assert((!E->getLHS()->getType()->isIntegralOrEnumerationType() ||
+ !E->getRHS()->getType()->isIntegralOrEnumerationType()) &&
"DataRecursiveIntBinOpEvaluator should have handled integral types");
- // We can't continue from here for non-integral types.
+
+ if (E->isComparisonOp()) {
+ // Evaluate builtin binary comparisons by evaluating them as C++2a three-way
+ // comparisons and then translating the result.
+ auto OnSuccess = [&](ComparisonCategoryResult ResKind,
+ const BinaryOperator *E) {
+ using CCR = ComparisonCategoryResult;
+ bool IsEqual = ResKind == CCR::Equal,
+ IsLess = ResKind == CCR::Less,
+ IsGreater = ResKind == CCR::Greater;
+ auto Op = E->getOpcode();
+ switch (Op) {
+ default:
+ llvm_unreachable("unsupported binary operator");
+ case BO_EQ:
+ case BO_NE:
+ return Success(IsEqual == (Op == BO_EQ), E);
+ case BO_LT: return Success(IsLess, E);
+ case BO_GT: return Success(IsGreater, E);
+ case BO_LE: return Success(IsEqual || IsLess, E);
+ case BO_GE: return Success(IsEqual || IsGreater, E);
+ }
+ };
+ return EvaluateComparisonBinaryOperator(Info, E, OnSuccess, [&]() {
+ return ExprEvaluatorBaseTy::VisitBinaryOperator(E);
+ });
+ }
+
+ QualType LHSTy = E->getLHS()->getType();
+ QualType RHSTy = E->getRHS()->getType();
+
+ if (LHSTy->isPointerType() && RHSTy->isPointerType() &&
+ E->getOpcode() == BO_Sub) {
+ LValue LHSValue, RHSValue;
+
+ bool LHSOK = EvaluatePointer(E->getLHS(), LHSValue, Info);
+ if (!LHSOK && !Info.noteFailure())
+ return false;
+
+ if (!EvaluatePointer(E->getRHS(), RHSValue, Info) || !LHSOK)
+ return false;
+
+ // Reject differing bases from the normal codepath; we special-case
+ // comparisons to null.
+ if (!HasSameBase(LHSValue, RHSValue)) {
+ // Handle &&A - &&B.
+ if (!LHSValue.Offset.isZero() || !RHSValue.Offset.isZero())
+ return Error(E);
+ const Expr *LHSExpr = LHSValue.Base.dyn_cast<const Expr *>();
+ const Expr *RHSExpr = RHSValue.Base.dyn_cast<const Expr *>();
+ if (!LHSExpr || !RHSExpr)
+ return Error(E);
+ const AddrLabelExpr *LHSAddrExpr = dyn_cast<AddrLabelExpr>(LHSExpr);
+ const AddrLabelExpr *RHSAddrExpr = dyn_cast<AddrLabelExpr>(RHSExpr);
+ if (!LHSAddrExpr || !RHSAddrExpr)
+ return Error(E);
+ // Make sure both labels come from the same function.
+ if (LHSAddrExpr->getLabel()->getDeclContext() !=
+ RHSAddrExpr->getLabel()->getDeclContext())
+ return Error(E);
+ return Success(APValue(LHSAddrExpr, RHSAddrExpr), E);
+ }
+ const CharUnits &LHSOffset = LHSValue.getLValueOffset();
+ const CharUnits &RHSOffset = RHSValue.getLValueOffset();
+
+ SubobjectDesignator &LHSDesignator = LHSValue.getLValueDesignator();
+ SubobjectDesignator &RHSDesignator = RHSValue.getLValueDesignator();
+
+ // C++11 [expr.add]p6:
+ // Unless both pointers point to elements of the same array object, or
+ // one past the last element of the array object, the behavior is
+ // undefined.
+ if (!LHSDesignator.Invalid && !RHSDesignator.Invalid &&
+ !AreElementsOfSameArray(getType(LHSValue.Base), LHSDesignator,
+ RHSDesignator))
+ Info.CCEDiag(E, diag::note_constexpr_pointer_subtraction_not_same_array);
+
+ QualType Type = E->getLHS()->getType();
+ QualType ElementType = Type->getAs<PointerType>()->getPointeeType();
+
+ CharUnits ElementSize;
+ if (!HandleSizeof(Info, E->getExprLoc(), ElementType, ElementSize))
+ return false;
+
+ // As an extension, a type may have zero size (empty struct or union in
+ // C, array of zero length). Pointer subtraction in such cases has
+ // undefined behavior, so is not constant.
+ if (ElementSize.isZero()) {
+ Info.FFDiag(E, diag::note_constexpr_pointer_subtraction_zero_size)
+ << ElementType;
+ return false;
+ }
+
+ // FIXME: LLVM and GCC both compute LHSOffset - RHSOffset at runtime,
+ // and produce incorrect results when it overflows. Such behavior
+ // appears to be non-conforming, but is common, so perhaps we should
+ // assume the standard intended for such cases to be undefined behavior
+ // and check for them.
+
+ // Compute (LHSOffset - RHSOffset) / Size carefully, checking for
+ // overflow in the final conversion to ptrdiff_t.
+ APSInt LHS(llvm::APInt(65, (int64_t)LHSOffset.getQuantity(), true), false);
+ APSInt RHS(llvm::APInt(65, (int64_t)RHSOffset.getQuantity(), true), false);
+ APSInt ElemSize(llvm::APInt(65, (int64_t)ElementSize.getQuantity(), true),
+ false);
+ APSInt TrueResult = (LHS - RHS) / ElemSize;
+ APSInt Result = TrueResult.trunc(Info.Ctx.getIntWidth(E->getType()));
+
+ if (Result.extend(65) != TrueResult &&
+ !HandleOverflow(Info, E, TrueResult, E->getType()))
+ return false;
+ return Success(Result, E);
+ }
+
return ExprEvaluatorBaseTy::VisitBinaryOperator(E);
}
@@ -8878,7 +9327,7 @@ bool IntExprEvaluator::VisitUnaryOperator(const UnaryOperator *E) {
return false;
if (!Result.isInt()) return Error(E);
const APSInt &Value = Result.getInt();
- if (Value.isSigned() && Value.isMinSignedValue() &&
+ if (Value.isSigned() && Value.isMinSignedValue() && E->canOverflow() &&
!HandleOverflow(Info, E, -Value.extend(Value.getBitWidth() + 1),
E->getType()))
return false;
@@ -9083,6 +9532,37 @@ bool IntExprEvaluator::VisitCXXNoexceptExpr(const CXXNoexceptExpr *E) {
return Success(E->getValue(), E);
}
+bool FixedPointExprEvaluator::VisitUnaryOperator(const UnaryOperator *E) {
+ switch (E->getOpcode()) {
+ default:
+ // Invalid unary operators
+ return Error(E);
+ case UO_Plus:
+ // The result is just the value.
+ return Visit(E->getSubExpr());
+ case UO_Minus: {
+ if (!Visit(E->getSubExpr())) return false;
+ if (!Result.isInt()) return Error(E);
+ const APSInt &Value = Result.getInt();
+ if (Value.isSigned() && Value.isMinSignedValue() && E->canOverflow()) {
+ SmallString<64> S;
+ FixedPointValueToString(S, Value,
+ Info.Ctx.getTypeInfo(E->getType()).Width,
+ /*Radix=*/10);
+ Info.CCEDiag(E, diag::note_constexpr_overflow) << S << E->getType();
+ if (Info.noteUndefinedBehavior()) return false;
+ }
+ return Success(-Value, E);
+ }
+ case UO_LNot: {
+ bool bres;
+ if (!EvaluateAsBooleanCondition(E->getSubExpr(), bres, Info))
+ return false;
+ return Success(!bres, E);
+ }
+ }
+}
+
//===----------------------------------------------------------------------===//
// Float Evaluation
//===----------------------------------------------------------------------===//
@@ -9170,9 +9650,11 @@ bool FloatExprEvaluator::VisitCallExpr(const CallExpr *E) {
case Builtin::BI__builtin_huge_val:
case Builtin::BI__builtin_huge_valf:
case Builtin::BI__builtin_huge_vall:
+ case Builtin::BI__builtin_huge_valf128:
case Builtin::BI__builtin_inf:
case Builtin::BI__builtin_inff:
- case Builtin::BI__builtin_infl: {
+ case Builtin::BI__builtin_infl:
+ case Builtin::BI__builtin_inff128: {
const llvm::fltSemantics &Sem =
Info.Ctx.getFloatTypeSemantics(E->getType());
Result = llvm::APFloat::getInf(Sem);
@@ -9182,6 +9664,7 @@ bool FloatExprEvaluator::VisitCallExpr(const CallExpr *E) {
case Builtin::BI__builtin_nans:
case Builtin::BI__builtin_nansf:
case Builtin::BI__builtin_nansl:
+ case Builtin::BI__builtin_nansf128:
if (!TryEvaluateBuiltinNaN(Info.Ctx, E->getType(), E->getArg(0),
true, Result))
return Error(E);
@@ -9190,6 +9673,7 @@ bool FloatExprEvaluator::VisitCallExpr(const CallExpr *E) {
case Builtin::BI__builtin_nan:
case Builtin::BI__builtin_nanf:
case Builtin::BI__builtin_nanl:
+ case Builtin::BI__builtin_nanf128:
// If this is __builtin_nan() turn this into a nan, otherwise we
// can't constant fold it.
if (!TryEvaluateBuiltinNaN(Info.Ctx, E->getType(), E->getArg(0),
@@ -9200,6 +9684,7 @@ bool FloatExprEvaluator::VisitCallExpr(const CallExpr *E) {
case Builtin::BI__builtin_fabs:
case Builtin::BI__builtin_fabsf:
case Builtin::BI__builtin_fabsl:
+ case Builtin::BI__builtin_fabsf128:
if (!EvaluateFloat(E->getArg(0), Result, Info))
return false;
@@ -9213,7 +9698,8 @@ bool FloatExprEvaluator::VisitCallExpr(const CallExpr *E) {
case Builtin::BI__builtin_copysign:
case Builtin::BI__builtin_copysignf:
- case Builtin::BI__builtin_copysignl: {
+ case Builtin::BI__builtin_copysignl:
+ case Builtin::BI__builtin_copysignf128: {
APFloat RHS(0.);
if (!EvaluateFloat(E->getArg(0), Result, Info) ||
!EvaluateFloat(E->getArg(1), RHS, Info))
@@ -9928,6 +10414,8 @@ static bool Evaluate(APValue &Result, EvalInfo &Info, const Expr *E) {
if (!EvaluateComplex(E, C, Info))
return false;
C.moveInto(Result);
+ } else if (T->isFixedPointType()) {
+ if (!FixedPointExprEvaluator(Info, Result).Visit(E)) return false;
} else if (T->isMemberPointerType()) {
MemberPtr P;
if (!EvaluateMemberPointer(E, P, Info))
@@ -9936,15 +10424,13 @@ static bool Evaluate(APValue &Result, EvalInfo &Info, const Expr *E) {
return true;
} else if (T->isArrayType()) {
LValue LV;
- LV.set(E, Info.CurrentCall->Index);
- APValue &Value = Info.CurrentCall->createTemporary(E, false);
+ APValue &Value = createTemporary(E, false, LV, *Info.CurrentCall);
if (!EvaluateArray(E, LV, Value, Info))
return false;
Result = Value;
} else if (T->isRecordType()) {
LValue LV;
- LV.set(E, Info.CurrentCall->Index);
- APValue &Value = Info.CurrentCall->createTemporary(E, false);
+ APValue &Value = createTemporary(E, false, LV, *Info.CurrentCall);
if (!EvaluateRecord(E, LV, Value, Info))
return false;
Result = Value;
@@ -9958,8 +10444,7 @@ static bool Evaluate(APValue &Result, EvalInfo &Info, const Expr *E) {
QualType Unqual = T.getAtomicUnqualifiedType();
if (Unqual->isArrayType() || Unqual->isRecordType()) {
LValue LV;
- LV.set(E, Info.CurrentCall->Index);
- APValue &Value = Info.CurrentCall->createTemporary(E, false);
+ APValue &Value = createTemporary(E, false, LV, *Info.CurrentCall);
if (!EvaluateAtomic(E, &LV, Value, Info))
return false;
} else {
@@ -10120,13 +10605,25 @@ bool Expr::EvaluateAsLValue(EvalResult &Result, const ASTContext &Ctx) const {
LValue LV;
if (!EvaluateLValue(this, LV, Info) || Result.HasSideEffects ||
!CheckLValueConstantExpression(Info, getExprLoc(),
- Ctx.getLValueReferenceType(getType()), LV))
+ Ctx.getLValueReferenceType(getType()), LV,
+ Expr::EvaluateForCodeGen))
return false;
LV.moveInto(Result.Val);
return true;
}
+bool Expr::EvaluateAsConstantExpr(EvalResult &Result, ConstExprUsage Usage,
+ const ASTContext &Ctx) const {
+ EvalInfo::EvaluationMode EM = EvalInfo::EM_ConstantExpression;
+ EvalInfo Info(Ctx, Result, EM);
+ if (!::Evaluate(Result.Val, Info, this))
+ return false;
+
+ return CheckConstantExpression(Info, getExprLoc(), getType(), Result.Val,
+ Usage);
+}
+
bool Expr::EvaluateAsInitializer(APValue &Value, const ASTContext &Ctx,
const VarDecl *VD,
SmallVectorImpl<PartialDiagnosticAt> &Notes) const {
@@ -10367,6 +10864,7 @@ static ICEDiag CheckICE(const Expr* E, const ASTContext &Ctx) {
case Expr::GenericSelectionExprClass:
return CheckICE(cast<GenericSelectionExpr>(E)->getResultExpr(), Ctx);
case Expr::IntegerLiteralClass:
+ case Expr::FixedPointLiteralClass:
case Expr::CharacterLiteralClass:
case Expr::ObjCBoolLiteralExprClass:
case Expr::CXXBoolLiteralExprClass:
@@ -10389,7 +10887,7 @@ static ICEDiag CheckICE(const Expr* E, const ASTContext &Ctx) {
case Expr::DeclRefExprClass: {
if (isa<EnumConstantDecl>(cast<DeclRefExpr>(E)->getDecl()))
return NoDiag();
- const ValueDecl *D = dyn_cast<ValueDecl>(cast<DeclRefExpr>(E)->getDecl());
+ const ValueDecl *D = cast<DeclRefExpr>(E)->getDecl();
if (Ctx.getLangOpts().CPlusPlus &&
D && IsConstNonVolatile(D->getType())) {
// Parameter variables are never constants. Without this check,
@@ -10475,7 +10973,6 @@ static ICEDiag CheckICE(const Expr* E, const ASTContext &Ctx) {
case BO_AndAssign:
case BO_XorAssign:
case BO_OrAssign:
- case BO_Cmp: // FIXME: Re-enable once we can evaluate this.
// C99 6.6/3 allows assignments within unevaluated subexpressions of
// constant expressions, but they can never be ICEs because an ICE cannot
// contain an lvalue operand.
@@ -10497,7 +10994,8 @@ static ICEDiag CheckICE(const Expr* E, const ASTContext &Ctx) {
case BO_And:
case BO_Xor:
case BO_Or:
- case BO_Comma: {
+ case BO_Comma:
+ case BO_Cmp: {
ICEDiag LHSResult = CheckICE(Exp->getLHS(), Ctx);
ICEDiag RHSResult = CheckICE(Exp->getRHS(), Ctx);
if (Exp->getOpcode() == BO_Div ||
@@ -10644,7 +11142,7 @@ static bool EvaluateCPlusPlus11IntegralConstantExpr(const ASTContext &Ctx,
const Expr *E,
llvm::APSInt *Value,
SourceLocation *Loc) {
- if (!E->getType()->isIntegralOrEnumerationType()) {
+ if (!E->getType()->isIntegralOrUnscopedEnumerationType()) {
if (Loc) *Loc = E->getExprLoc();
return false;
}
@@ -10781,7 +11279,7 @@ bool Expr::isPotentialConstantExpr(const FunctionDecl *FD,
// is a temporary being used as the 'this' pointer.
LValue This;
ImplicitValueInitExpr VIE(RD ? Info.Ctx.getRecordType(RD) : Info.Ctx.IntTy);
- This.set(&VIE, Info.CurrentCall->Index);
+ This.set({&VIE, Info.CurrentCall->Index});
ArrayRef<const Expr*> Args;
diff --git a/lib/AST/ExternalASTMerger.cpp b/lib/AST/ExternalASTMerger.cpp
index 6b75c51c6420..ae28c588ca31 100644
--- a/lib/AST/ExternalASTMerger.cpp
+++ b/lib/AST/ExternalASTMerger.cpp
@@ -16,6 +16,7 @@
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
#include "clang/AST/ExternalASTMerger.h"
using namespace clang;
@@ -153,7 +154,7 @@ public:
ToContainer->setMustBuildLookupTable();
assert(Parent.CanComplete(ToContainer));
}
- return ASTImporter::Imported(From, To);
+ return To;
}
ASTImporter &GetReverse() { return Reverse; }
};
@@ -228,7 +229,7 @@ void ExternalASTMerger::CompleteType(TagDecl *Tag) {
SourceTag->getASTContext().getExternalSource()->CompleteType(SourceTag);
if (!SourceTag->getDefinition())
return false;
- Forward.Imported(SourceTag, Tag);
+ Forward.MapImported(SourceTag, Tag);
Forward.ImportDefinition(SourceTag);
Tag->setCompleteDefinition(SourceTag->isCompleteDefinition());
return true;
@@ -247,7 +248,7 @@ void ExternalASTMerger::CompleteType(ObjCInterfaceDecl *Interface) {
SourceInterface);
if (!SourceInterface->getDefinition())
return false;
- Forward.Imported(SourceInterface, Interface);
+ Forward.MapImported(SourceInterface, Interface);
Forward.ImportDefinition(SourceInterface);
return true;
});
@@ -303,7 +304,7 @@ void ExternalASTMerger::ForceRecordOrigin(const DeclContext *ToDC,
void ExternalASTMerger::RecordOriginImpl(const DeclContext *ToDC, DCOrigin Origin,
ASTImporter &Importer) {
Origins[ToDC] = Origin;
- Importer.ASTImporter::Imported(cast<Decl>(Origin.DC), const_cast<Decl*>(cast<Decl>(ToDC)));
+ Importer.ASTImporter::MapImported(cast<Decl>(Origin.DC), const_cast<Decl*>(cast<Decl>(ToDC)));
}
ExternalASTMerger::ExternalASTMerger(const ImporterTarget &Target,
@@ -351,6 +352,27 @@ void ExternalASTMerger::RemoveSources(llvm::ArrayRef<ImporterSource> Sources) {
}
}
+template <typename DeclTy>
+static bool importSpecializations(DeclTy *D, ASTImporter *Importer) {
+ for (auto *Spec : D->specializations())
+ if (!Importer->Import(Spec))
+ return true;
+ return false;
+}
+
+/// Imports specializations from template declarations that can be specialized.
+static bool importSpecializationsIfNeeded(Decl *D, ASTImporter *Importer) {
+ if (!isa<TemplateDecl>(D))
+ return false;
+ if (auto *FunctionTD = dyn_cast<FunctionTemplateDecl>(D))
+ return importSpecializations(FunctionTD, Importer);
+ else if (auto *ClassTD = dyn_cast<ClassTemplateDecl>(D))
+ return importSpecializations(ClassTD, Importer);
+ else if (auto *VarTD = dyn_cast<VarTemplateDecl>(D))
+ return importSpecializations(VarTD, Importer);
+ return false;
+}
+
bool ExternalASTMerger::FindExternalVisibleDeclsByName(const DeclContext *DC,
DeclarationName Name) {
llvm::SmallVector<NamedDecl *, 1> Decls;
@@ -376,9 +398,18 @@ bool ExternalASTMerger::FindExternalVisibleDeclsByName(const DeclContext *DC,
Decls.reserve(Candidates.size());
for (const Candidate &C : Candidates) {
- NamedDecl *d = cast<NamedDecl>(C.second->Import(C.first.get()));
- assert(d);
- Decls.push_back(d);
+ Decl *LookupRes = C.first.get();
+ ASTImporter *Importer = C.second;
+ NamedDecl *ND = cast_or_null<NamedDecl>(Importer->Import(LookupRes));
+ assert(ND);
+ // If we don't import specialization, they are not available via lookup
+ // because the lookup result is imported TemplateDecl and it does not
+ // reference its specializations until they are imported explicitly.
+ bool IsSpecImportFailed =
+ importSpecializationsIfNeeded(LookupRes, Importer);
+ assert(!IsSpecImportFailed);
+ (void)IsSpecImportFailed;
+ Decls.push_back(ND);
}
SetExternalVisibleDeclsForName(DC, Name, Decls);
return true;
diff --git a/lib/AST/ItaniumCXXABI.cpp b/lib/AST/ItaniumCXXABI.cpp
index d6bc16b6350f..a75ae14f9015 100644
--- a/lib/AST/ItaniumCXXABI.cpp
+++ b/lib/AST/ItaniumCXXABI.cpp
@@ -24,6 +24,7 @@
#include "clang/AST/RecordLayout.h"
#include "clang/AST/Type.h"
#include "clang/Basic/TargetInfo.h"
+#include "llvm/ADT/iterator.h"
using namespace clang;
@@ -50,12 +51,64 @@ static const IdentifierInfo *findAnonymousUnionVarDeclName(const VarDecl& VD) {
return nullptr;
}
-/// \brief Keeps track of the mangled names of lambda expressions and block
+/// The name of a decomposition declaration.
+struct DecompositionDeclName {
+ using BindingArray = ArrayRef<const BindingDecl*>;
+
+ /// Representative example of a set of bindings with these names.
+ BindingArray Bindings;
+
+ /// Iterators over the sequence of identifiers in the name.
+ struct Iterator
+ : llvm::iterator_adaptor_base<Iterator, BindingArray::const_iterator,
+ std::random_access_iterator_tag,
+ const IdentifierInfo *> {
+ Iterator(BindingArray::const_iterator It) : iterator_adaptor_base(It) {}
+ const IdentifierInfo *operator*() const {
+ return (*this->I)->getIdentifier();
+ }
+ };
+ Iterator begin() const { return Iterator(Bindings.begin()); }
+ Iterator end() const { return Iterator(Bindings.end()); }
+};
+}
+
+namespace llvm {
+template<>
+struct DenseMapInfo<DecompositionDeclName> {
+ using ArrayInfo = llvm::DenseMapInfo<ArrayRef<const BindingDecl*>>;
+ using IdentInfo = llvm::DenseMapInfo<const IdentifierInfo*>;
+ static DecompositionDeclName getEmptyKey() {
+ return {ArrayInfo::getEmptyKey()};
+ }
+ static DecompositionDeclName getTombstoneKey() {
+ return {ArrayInfo::getTombstoneKey()};
+ }
+ static unsigned getHashValue(DecompositionDeclName Key) {
+ assert(!isEqual(Key, getEmptyKey()) && !isEqual(Key, getTombstoneKey()));
+ return llvm::hash_combine_range(Key.begin(), Key.end());
+ }
+ static bool isEqual(DecompositionDeclName LHS, DecompositionDeclName RHS) {
+ if (ArrayInfo::isEqual(LHS.Bindings, ArrayInfo::getEmptyKey()))
+ return ArrayInfo::isEqual(RHS.Bindings, ArrayInfo::getEmptyKey());
+ if (ArrayInfo::isEqual(LHS.Bindings, ArrayInfo::getTombstoneKey()))
+ return ArrayInfo::isEqual(RHS.Bindings, ArrayInfo::getTombstoneKey());
+ return LHS.Bindings.size() == RHS.Bindings.size() &&
+ std::equal(LHS.begin(), LHS.end(), RHS.begin());
+ }
+};
+}
+
+namespace {
+
+/// Keeps track of the mangled names of lambda expressions and block
/// literals within a particular context.
class ItaniumNumberingContext : public MangleNumberingContext {
llvm::DenseMap<const Type *, unsigned> ManglingNumbers;
llvm::DenseMap<const IdentifierInfo *, unsigned> VarManglingNumbers;
llvm::DenseMap<const IdentifierInfo *, unsigned> TagManglingNumbers;
+ llvm::DenseMap<DecompositionDeclName, unsigned>
+ DecompsitionDeclManglingNumbers;
public:
unsigned getManglingNumber(const CXXMethodDecl *CallOperator) override {
@@ -82,9 +135,15 @@ public:
/// Variable decls are numbered by identifier.
unsigned getManglingNumber(const VarDecl *VD, unsigned) override {
+ if (auto *DD = dyn_cast<DecompositionDecl>(VD)) {
+ DecompositionDeclName Name{DD->bindings()};
+ return ++DecompsitionDeclManglingNumbers[Name];
+ }
+
const IdentifierInfo *Identifier = VD->getIdentifier();
if (!Identifier) {
- // VarDecl without an identifier represents an anonymous union declaration.
+ // VarDecl without an identifier represents an anonymous union
+ // declaration.
Identifier = findAnonymousUnionVarDeclName(*VD);
}
return ++VarManglingNumbers[Identifier];
diff --git a/lib/AST/ItaniumMangle.cpp b/lib/AST/ItaniumMangle.cpp
index 3c7e26d41370..3b99a3d9afda 100644
--- a/lib/AST/ItaniumMangle.cpp
+++ b/lib/AST/ItaniumMangle.cpp
@@ -323,7 +323,7 @@ class CXXNameMangler {
AdditionalAbiTags->end());
}
- std::sort(TagList.begin(), TagList.end());
+ llvm::sort(TagList.begin(), TagList.end());
TagList.erase(std::unique(TagList.begin(), TagList.end()), TagList.end());
writeSortedUniqueAbiTags(Out, TagList);
@@ -339,7 +339,7 @@ class CXXNameMangler {
}
const AbiTagList &getSortedUniqueUsedAbiTags() {
- std::sort(UsedAbiTags.begin(), UsedAbiTags.end());
+ llvm::sort(UsedAbiTags.begin(), UsedAbiTags.end());
UsedAbiTags.erase(std::unique(UsedAbiTags.begin(), UsedAbiTags.end()),
UsedAbiTags.end());
return UsedAbiTags;
@@ -539,7 +539,9 @@ private:
void mangleBareFunctionType(const FunctionProtoType *T, bool MangleReturnType,
const FunctionDecl *FD = nullptr);
void mangleNeonVectorType(const VectorType *T);
+ void mangleNeonVectorType(const DependentVectorType *T);
void mangleAArch64NeonVectorType(const VectorType *T);
+ void mangleAArch64NeonVectorType(const DependentVectorType *T);
void mangleIntegerLiteral(QualType T, const llvm::APSInt &Value);
void mangleMemberExprBase(const Expr *base, bool isArrow);
@@ -590,6 +592,18 @@ bool ItaniumMangleContextImpl::shouldMangleCXXName(const NamedDecl *D) {
if (FD->isMain())
return false;
+ // The Windows ABI expects that we would never mangle "typical"
+ // user-defined entry points regardless of visibility or freestanding-ness.
+ //
+ // N.B. This is distinct from asking about "main". "main" has a lot of
+ // special rules associated with it in the standard while these
+ // user-defined entry points are outside of the purview of the standard.
+ // For example, there can be only one definition for "main" in a standards
+ // compliant program; however nothing forbids the existence of wmain and
+ // WinMain in the same translation unit.
+ if (FD->isMSVCRTEntryPoint())
+ return false;
+
// C++ functions and those whose names are not a simple identifier need
// mangling.
if (!FD->getDeclName().isIdentifier() || L == CXXLanguageLinkage)
@@ -1324,8 +1338,7 @@ void CXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
if (const VarDecl *VD = dyn_cast<VarDecl>(ND)) {
// We must have an anonymous union or struct declaration.
- const RecordDecl *RD =
- cast<RecordDecl>(VD->getType()->getAs<RecordType>()->getDecl());
+ const RecordDecl *RD = VD->getType()->getAs<RecordType>()->getDecl();
// Itanium C++ ABI 5.1.2:
//
@@ -1931,6 +1944,7 @@ bool CXXNameMangler::mangleUnresolvedTypeOrSimpleId(QualType Ty,
case Type::VariableArray:
case Type::DependentSizedArray:
case Type::DependentAddressSpace:
+ case Type::DependentVector:
case Type::DependentSizedExtVector:
case Type::Vector:
case Type::ExtVector:
@@ -2330,7 +2344,8 @@ void CXXNameMangler::mangleObjCMethodName(const ObjCMethodDecl *MD) {
Context.mangleObjCMethodName(MD, Out);
}
-static bool isTypeSubstitutable(Qualifiers Quals, const Type *Ty) {
+static bool isTypeSubstitutable(Qualifiers Quals, const Type *Ty,
+ ASTContext &Ctx) {
if (Quals)
return true;
if (Ty->isSpecificBuiltinType(BuiltinType::ObjCSel))
@@ -2339,7 +2354,11 @@ static bool isTypeSubstitutable(Qualifiers Quals, const Type *Ty) {
return true;
if (Ty->isBuiltinType())
return false;
-
+ // Through to Clang 6.0, we accidentally treated undeduced auto types as
+ // substitution candidates.
+ if (Ctx.getLangOpts().getClangABICompat() > LangOptions::ClangABI::Ver6 &&
+ isa<AutoType>(Ty))
+ return false;
return true;
}
@@ -2400,7 +2419,8 @@ void CXXNameMangler::mangleType(QualType T) {
Qualifiers quals = split.Quals;
const Type *ty = split.Ty;
- bool isSubstitutable = isTypeSubstitutable(quals, ty);
+ bool isSubstitutable =
+ isTypeSubstitutable(quals, ty, Context.getASTContext());
if (isSubstitutable && mangleSubstitution(T))
return;
@@ -2520,6 +2540,9 @@ void CXXNameMangler::mangleType(const BuiltinType *T) {
case BuiltinType::WChar_U:
Out << 'w';
break;
+ case BuiltinType::Char8:
+ Out << "Du";
+ break;
case BuiltinType::Char16:
Out << "Ds";
break;
@@ -2544,6 +2567,31 @@ void CXXNameMangler::mangleType(const BuiltinType *T) {
case BuiltinType::Float16:
Out << "DF16_";
break;
+ case BuiltinType::ShortAccum:
+ case BuiltinType::Accum:
+ case BuiltinType::LongAccum:
+ case BuiltinType::UShortAccum:
+ case BuiltinType::UAccum:
+ case BuiltinType::ULongAccum:
+ case BuiltinType::ShortFract:
+ case BuiltinType::Fract:
+ case BuiltinType::LongFract:
+ case BuiltinType::UShortFract:
+ case BuiltinType::UFract:
+ case BuiltinType::ULongFract:
+ case BuiltinType::SatShortAccum:
+ case BuiltinType::SatAccum:
+ case BuiltinType::SatLongAccum:
+ case BuiltinType::SatUShortAccum:
+ case BuiltinType::SatUAccum:
+ case BuiltinType::SatULongAccum:
+ case BuiltinType::SatShortFract:
+ case BuiltinType::SatFract:
+ case BuiltinType::SatLongFract:
+ case BuiltinType::SatUShortFract:
+ case BuiltinType::SatUFract:
+ case BuiltinType::SatULongFract:
+ llvm_unreachable("Fixed point types are disabled for c++");
case BuiltinType::Half:
Out << "Dh";
break;
@@ -2689,12 +2737,12 @@ void CXXNameMangler::mangleType(const FunctionProtoType *T) {
// Mangle CV-qualifiers, if present. These are 'this' qualifiers,
// e.g. "const" in "int (A::*)() const".
- mangleQualifiers(Qualifiers::fromCVRMask(T->getTypeQuals()));
+ mangleQualifiers(Qualifiers::fromCVRUMask(T->getTypeQuals()));
// Mangle instantiation-dependent exception-specification, if present,
// per cxx-abi-dev proposal on 2016-10-11.
if (T->hasInstantiationDependentExceptionSpec()) {
- if (T->getExceptionSpecType() == EST_ComputedNoexcept) {
+ if (isComputedNoexcept(T->getExceptionSpecType())) {
Out << "DO";
mangleExpression(T->getNoexceptExpr());
Out << "E";
@@ -2705,7 +2753,7 @@ void CXXNameMangler::mangleType(const FunctionProtoType *T) {
mangleType(ExceptTy);
Out << "E";
}
- } else if (T->isNothrow(getASTContext())) {
+ } else if (T->isNothrow()) {
Out << "Do";
}
@@ -2967,6 +3015,14 @@ void CXXNameMangler::mangleNeonVectorType(const VectorType *T) {
Out << BaseName << EltName;
}
+void CXXNameMangler::mangleNeonVectorType(const DependentVectorType *T) {
+ DiagnosticsEngine &Diags = Context.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(
+ DiagnosticsEngine::Error,
+ "cannot mangle this dependent neon vector type yet");
+ Diags.Report(T->getAttributeLoc(), DiagID);
+}
+
static StringRef mangleAArch64VectorBase(const BuiltinType *EltType) {
switch (EltType->getKind()) {
case BuiltinType::SChar:
@@ -3034,6 +3090,13 @@ void CXXNameMangler::mangleAArch64NeonVectorType(const VectorType *T) {
("__" + EltName + "x" + Twine(T->getNumElements()) + "_t").str();
Out << TypeName.length() << TypeName;
}
+void CXXNameMangler::mangleAArch64NeonVectorType(const DependentVectorType *T) {
+ DiagnosticsEngine &Diags = Context.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(
+ DiagnosticsEngine::Error,
+ "cannot mangle this dependent neon vector type yet");
+ Diags.Report(T->getAttributeLoc(), DiagID);
+}
// GNU extension: vector types
// <type> ::= <vector-type>
@@ -3064,6 +3127,32 @@ void CXXNameMangler::mangleType(const VectorType *T) {
else
mangleType(T->getElementType());
}
+
+void CXXNameMangler::mangleType(const DependentVectorType *T) {
+ if ((T->getVectorKind() == VectorType::NeonVector ||
+ T->getVectorKind() == VectorType::NeonPolyVector)) {
+ llvm::Triple Target = getASTContext().getTargetInfo().getTriple();
+ llvm::Triple::ArchType Arch =
+ getASTContext().getTargetInfo().getTriple().getArch();
+ if ((Arch == llvm::Triple::aarch64 || Arch == llvm::Triple::aarch64_be) &&
+ !Target.isOSDarwin())
+ mangleAArch64NeonVectorType(T);
+ else
+ mangleNeonVectorType(T);
+ return;
+ }
+
+ Out << "Dv";
+ mangleExpression(T->getSizeExpr());
+ Out << '_';
+ if (T->getVectorKind() == VectorType::AltiVecPixel)
+ Out << 'p';
+ else if (T->getVectorKind() == VectorType::AltiVecBool)
+ Out << 'b';
+ else
+ mangleType(T->getElementType());
+}
+
void CXXNameMangler::mangleType(const ExtVectorType *T) {
mangleType(static_cast<const VectorType*>(T));
}
@@ -3251,14 +3340,13 @@ void CXXNameMangler::mangleType(const UnaryTransformType *T) {
}
void CXXNameMangler::mangleType(const AutoType *T) {
- QualType D = T->getDeducedType();
- // <builtin-type> ::= Da # dependent auto
- if (D.isNull()) {
- assert(T->getKeyword() != AutoTypeKeyword::GNUAutoType &&
- "shouldn't need to mangle __auto_type!");
- Out << (T->isDecltypeAuto() ? "Dc" : "Da");
- } else
- mangleType(D);
+ assert(T->getDeducedType().isNull() &&
+ "Deduced AutoType shouldn't be handled here!");
+ assert(T->getKeyword() != AutoTypeKeyword::GNUAutoType &&
+ "shouldn't need to mangle __auto_type!");
+ // <builtin-type> ::= Da # auto
+ // ::= Dc # decltype(auto)
+ Out << (T->isDecltypeAuto() ? "Dc" : "Da");
}
void CXXNameMangler::mangleType(const DeducedTemplateSpecializationType *T) {
@@ -3471,6 +3559,7 @@ recurse:
case Expr::AsTypeExprClass:
case Expr::PseudoObjectExprClass:
case Expr::AtomicExprClass:
+ case Expr::FixedPointLiteralClass:
{
if (!NullOut) {
// As bad as this diagnostic is, it's better than crashing.
diff --git a/lib/AST/MicrosoftCXXABI.cpp b/lib/AST/MicrosoftCXXABI.cpp
index b19491f31304..3b417c135285 100644
--- a/lib/AST/MicrosoftCXXABI.cpp
+++ b/lib/AST/MicrosoftCXXABI.cpp
@@ -25,7 +25,7 @@ using namespace clang;
namespace {
-/// \brief Numbers things which need to correspond across multiple TUs.
+/// Numbers things which need to correspond across multiple TUs.
/// Typically these are things like static locals, lambdas, or blocks.
class MicrosoftNumberingContext : public MangleNumberingContext {
llvm::DenseMap<const Type *, unsigned> ManglingNumbers;
@@ -106,7 +106,7 @@ public:
void addTypedefNameForUnnamedTagDecl(TagDecl *TD,
TypedefNameDecl *DD) override {
TD = TD->getCanonicalDecl();
- DD = cast<TypedefNameDecl>(DD->getCanonicalDecl());
+ DD = DD->getCanonicalDecl();
TypedefNameDecl *&I = UnnamedTagDeclToTypedefNameDecl[TD];
if (!I)
I = DD;
diff --git a/lib/AST/MicrosoftMangle.cpp b/lib/AST/MicrosoftMangle.cpp
index 0c55c1a92287..e45f9f7902e2 100644
--- a/lib/AST/MicrosoftMangle.cpp
+++ b/lib/AST/MicrosoftMangle.cpp
@@ -76,7 +76,7 @@ getLambdaDefaultArgumentDeclContext(const Decl *D) {
return nullptr;
}
-/// \brief Retrieve the declaration context that should be used when mangling
+/// Retrieve the declaration context that should be used when mangling
/// the given declaration.
static const DeclContext *getEffectiveDeclContext(const Decl *D) {
// The ABI assumes that lambda closure types that occur within
@@ -135,7 +135,8 @@ public:
bool shouldMangleStringLiteral(const StringLiteral *SL) override;
void mangleCXXName(const NamedDecl *D, raw_ostream &Out) override;
void mangleVirtualMemPtrThunk(const CXXMethodDecl *MD,
- raw_ostream &) override;
+ const MethodVFTableLocation &ML,
+ raw_ostream &Out) override;
void mangleThunk(const CXXMethodDecl *MD, const ThunkInfo &Thunk,
raw_ostream &) override;
void mangleCXXDtorThunk(const CXXDestructorDecl *DD, CXXDtorType Type,
@@ -290,16 +291,15 @@ public:
raw_ostream &getStream() const { return Out; }
- void mangle(const NamedDecl *D, StringRef Prefix = "\01?");
+ void mangle(const NamedDecl *D, StringRef Prefix = "?");
void mangleName(const NamedDecl *ND);
void mangleFunctionEncoding(const FunctionDecl *FD, bool ShouldMangle);
void mangleVariableEncoding(const VarDecl *VD);
void mangleMemberDataPointer(const CXXRecordDecl *RD, const ValueDecl *VD);
void mangleMemberFunctionPointer(const CXXRecordDecl *RD,
const CXXMethodDecl *MD);
- void mangleVirtualMemPtrThunk(
- const CXXMethodDecl *MD,
- const MicrosoftVTableContext::MethodVFTableLocation &ML);
+ void mangleVirtualMemPtrThunk(const CXXMethodDecl *MD,
+ const MethodVFTableLocation &ML);
void mangleNumber(int64_t Number);
void mangleTagTypeKind(TagTypeKind TK);
void mangleArtificalTagType(TagTypeKind TK, StringRef UnqualifiedName,
@@ -337,6 +337,8 @@ private:
void mangleArgumentType(QualType T, SourceRange Range);
void manglePassObjectSizeArg(const PassObjectSizeAttr *POSA);
+ bool isArtificialTagType(QualType T) const;
+
// Declare manglers for every type class.
#define ABSTRACT_TYPE(CLASS, PARENT)
#define NON_CANONICAL_TYPE(CLASS, PARENT)
@@ -362,6 +364,10 @@ private:
const TemplateArgumentList &TemplateArgs);
void mangleTemplateArg(const TemplateDecl *TD, const TemplateArgument &TA,
const NamedDecl *Parm);
+
+ void mangleObjCProtocol(const ObjCProtocolDecl *PD);
+ void mangleObjCLifetime(const QualType T, Qualifiers Quals,
+ SourceRange Range);
};
}
@@ -603,7 +609,7 @@ MicrosoftCXXNameMangler::mangleMemberFunctionPointer(const CXXRecordDecl *RD,
if (MD->isVirtual()) {
MicrosoftVTableContext *VTContext =
cast<MicrosoftVTableContext>(getASTContext().getVTableContext());
- const MicrosoftVTableContext::MethodVFTableLocation &ML =
+ MethodVFTableLocation ML =
VTContext->getMethodVFTableLocation(GlobalDecl(MD));
mangleVirtualMemPtrThunk(MD, ML);
NVOffset = ML.VFPtrOffset.getQuantity();
@@ -640,8 +646,7 @@ MicrosoftCXXNameMangler::mangleMemberFunctionPointer(const CXXRecordDecl *RD,
}
void MicrosoftCXXNameMangler::mangleVirtualMemPtrThunk(
- const CXXMethodDecl *MD,
- const MicrosoftVTableContext::MethodVFTableLocation &ML) {
+ const CXXMethodDecl *MD, const MethodVFTableLocation &ML) {
// Get the vftable offset.
CharUnits PointerWidth = getASTContext().toCharUnitsFromBits(
getASTContext().getTargetInfo().getPointerWidth(0));
@@ -881,11 +886,13 @@ void MicrosoftCXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
// associate typedef mangled in if they have one.
Name += "<unnamed-type-";
Name += TND->getName();
- } else if (auto *ED = dyn_cast<EnumDecl>(TD)) {
- auto EnumeratorI = ED->enumerator_begin();
- assert(EnumeratorI != ED->enumerator_end());
+ } else if (isa<EnumDecl>(TD) &&
+ cast<EnumDecl>(TD)->enumerator_begin() !=
+ cast<EnumDecl>(TD)->enumerator_end()) {
+ // Anonymous non-empty enums mangle in the first enumerator.
+ auto *ED = cast<EnumDecl>(TD);
Name += "<unnamed-enum-";
- Name += EnumeratorI->getName();
+ Name += ED->enumerator_begin()->getName();
} else {
// Otherwise, number the types using a $S prefix.
Name += "<unnamed-type-$S";
@@ -950,11 +957,10 @@ void MicrosoftCXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
}
}
+// <postfix> ::= <unqualified-name> [<postfix>]
+// ::= <substitution> [<postfix>]
void MicrosoftCXXNameMangler::mangleNestedName(const NamedDecl *ND) {
- // <postfix> ::= <unqualified-name> [<postfix>]
- // ::= <substitution> [<postfix>]
const DeclContext *DC = getEffectiveDeclContext(ND);
-
while (!DC->isTranslationUnit()) {
if (isa<TagDecl>(ND) || isa<VarDecl>(ND)) {
unsigned Disc;
@@ -1007,7 +1013,7 @@ void MicrosoftCXXNameMangler::mangleNestedName(const NamedDecl *ND) {
if (const auto *ND = dyn_cast<NamedDecl>(MC))
mangleUnqualifiedName(ND);
// MS ABI and Itanium manglings are in inverted scopes. In the case of a
- // RecordDecl, mangle the entire scope hierachy at this point rather than
+ // RecordDecl, mangle the entire scope hierarchy at this point rather than
// just the unqualified name to get the ordering correct.
if (const auto *RD = dyn_cast<RecordDecl>(DC))
mangleName(RD);
@@ -1365,15 +1371,15 @@ void MicrosoftCXXNameMangler::mangleTemplateArg(const TemplateDecl *TD,
break;
}
case TemplateArgument::Declaration: {
- const NamedDecl *ND = cast<NamedDecl>(TA.getAsDecl());
+ const NamedDecl *ND = TA.getAsDecl();
if (isa<FieldDecl>(ND) || isa<IndirectFieldDecl>(ND)) {
mangleMemberDataPointer(
- cast<CXXRecordDecl>(ND->getDeclContext())->getMostRecentDecl(),
+ cast<CXXRecordDecl>(ND->getDeclContext())->getMostRecentNonInjectedDecl(),
cast<ValueDecl>(ND));
} else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) {
const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD);
if (MD && MD->isInstance()) {
- mangleMemberFunctionPointer(MD->getParent()->getMostRecentDecl(), MD);
+ mangleMemberFunctionPointer(MD->getParent()->getMostRecentNonInjectedDecl(), MD);
} else {
Out << "$1?";
mangleName(FD);
@@ -1457,6 +1463,47 @@ void MicrosoftCXXNameMangler::mangleTemplateArg(const TemplateDecl *TD,
}
}
+void MicrosoftCXXNameMangler::mangleObjCProtocol(const ObjCProtocolDecl *PD) {
+ llvm::SmallString<64> TemplateMangling;
+ llvm::raw_svector_ostream Stream(TemplateMangling);
+ MicrosoftCXXNameMangler Extra(Context, Stream);
+
+ Stream << "?$";
+ Extra.mangleSourceName("Protocol");
+ Extra.mangleArtificalTagType(TTK_Struct, PD->getName());
+
+ mangleArtificalTagType(TTK_Struct, TemplateMangling, {"__ObjC"});
+}
+
+void MicrosoftCXXNameMangler::mangleObjCLifetime(const QualType Type,
+ Qualifiers Quals,
+ SourceRange Range) {
+ llvm::SmallString<64> TemplateMangling;
+ llvm::raw_svector_ostream Stream(TemplateMangling);
+ MicrosoftCXXNameMangler Extra(Context, Stream);
+
+ Stream << "?$";
+ switch (Quals.getObjCLifetime()) {
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ break;
+ case Qualifiers::OCL_Autoreleasing:
+ Extra.mangleSourceName("Autoreleasing");
+ break;
+ case Qualifiers::OCL_Strong:
+ Extra.mangleSourceName("Strong");
+ break;
+ case Qualifiers::OCL_Weak:
+ Extra.mangleSourceName("Weak");
+ break;
+ }
+ Extra.manglePointerCVQualifiers(Quals);
+ Extra.manglePointerExtQualifiers(Quals, Type);
+ Extra.mangleType(Type, Range);
+
+ mangleArtificalTagType(TTK_Struct, TemplateMangling, {"__ObjC"});
+}
+
void MicrosoftCXXNameMangler::mangleQualifiers(Qualifiers Quals,
bool IsMember) {
// <cvr-qualifiers> ::= [E] [F] [I] <base-cvr-qualifiers>
@@ -1559,12 +1606,11 @@ MicrosoftCXXNameMangler::mangleRefQualifier(RefQualifierKind RefQualifier) {
void MicrosoftCXXNameMangler::manglePointerExtQualifiers(Qualifiers Quals,
QualType PointeeType) {
- bool HasRestrict = Quals.hasRestrict();
if (PointersAre64Bit &&
(PointeeType.isNull() || !PointeeType->isFunctionType()))
Out << 'E';
- if (HasRestrict)
+ if (Quals.hasRestrict())
Out << 'I';
if (Quals.hasUnaligned() ||
@@ -1685,6 +1731,8 @@ void MicrosoftCXXNameMangler::mangleType(QualType T, SourceRange Range,
switch (QMM) {
case QMM_Drop:
+ if (Quals.hasObjCLifetime())
+ Quals = Quals.withoutObjCLifetime();
break;
case QMM_Mangle:
if (const FunctionType *FT = dyn_cast<FunctionType>(T)) {
@@ -1703,7 +1751,9 @@ void MicrosoftCXXNameMangler::mangleType(QualType T, SourceRange Range,
case QMM_Result:
// Presence of __unaligned qualifier shouldn't affect mangling here.
Quals.removeUnaligned();
- if ((!IsPointer && Quals) || isa<TagType>(T)) {
+ if (Quals.hasObjCLifetime())
+ Quals = Quals.withoutObjCLifetime();
+ if ((!IsPointer && Quals) || isa<TagType>(T) || isArtificialTagType(T)) {
Out << '?';
mangleQualifiers(Quals, false);
}
@@ -1833,15 +1883,12 @@ void MicrosoftCXXNameMangler::mangleType(const BuiltinType *T, Qualifiers,
llvm_unreachable("placeholder types shouldn't get to name mangling");
case BuiltinType::ObjCId:
- Out << "PA";
mangleArtificalTagType(TTK_Struct, "objc_object");
break;
case BuiltinType::ObjCClass:
- Out << "PA";
mangleArtificalTagType(TTK_Struct, "objc_class");
break;
case BuiltinType::ObjCSel:
- Out << "PA";
mangleArtificalTagType(TTK_Struct, "objc_selector");
break;
@@ -1876,8 +1923,39 @@ void MicrosoftCXXNameMangler::mangleType(const BuiltinType *T, Qualifiers,
break;
case BuiltinType::Float16:
- case BuiltinType::Float128:
- case BuiltinType::Half: {
+ mangleArtificalTagType(TTK_Struct, "_Float16", {"__clang"});
+ break;
+
+ case BuiltinType::Half:
+ mangleArtificalTagType(TTK_Struct, "_Half", {"__clang"});
+ break;
+
+ case BuiltinType::ShortAccum:
+ case BuiltinType::Accum:
+ case BuiltinType::LongAccum:
+ case BuiltinType::UShortAccum:
+ case BuiltinType::UAccum:
+ case BuiltinType::ULongAccum:
+ case BuiltinType::ShortFract:
+ case BuiltinType::Fract:
+ case BuiltinType::LongFract:
+ case BuiltinType::UShortFract:
+ case BuiltinType::UFract:
+ case BuiltinType::ULongFract:
+ case BuiltinType::SatShortAccum:
+ case BuiltinType::SatAccum:
+ case BuiltinType::SatLongAccum:
+ case BuiltinType::SatUShortAccum:
+ case BuiltinType::SatUAccum:
+ case BuiltinType::SatULongAccum:
+ case BuiltinType::SatShortFract:
+ case BuiltinType::SatFract:
+ case BuiltinType::SatLongFract:
+ case BuiltinType::SatUShortFract:
+ case BuiltinType::SatUFract:
+ case BuiltinType::SatULongFract:
+ case BuiltinType::Char8:
+ case BuiltinType::Float128: {
DiagnosticsEngine &Diags = Context.getDiags();
unsigned DiagID = Diags.getCustomDiagID(
DiagnosticsEngine::Error, "cannot mangle this built-in %0 type yet");
@@ -2140,6 +2218,8 @@ void MicrosoftCXXNameMangler::mangleCallingConvention(CallingConv CC) {
case CC_X86StdCall: Out << 'G'; break;
case CC_X86FastCall: Out << 'I'; break;
case CC_X86VectorCall: Out << 'Q'; break;
+ case CC_Swift: Out << 'S'; break;
+ case CC_PreserveMost: Out << 'U'; break;
case CC_X86RegCall: Out << 'w'; break;
}
}
@@ -2202,6 +2282,8 @@ void MicrosoftCXXNameMangler::mangleType(const TagDecl *TD) {
mangleTagTypeKind(TD->getTagKind());
mangleName(TD);
}
+
+// If you add a call to this, consider updating isArtificialTagType() too.
void MicrosoftCXXNameMangler::mangleArtificalTagType(
TagTypeKind TK, StringRef UnqualifiedName, ArrayRef<StringRef> NestedNames) {
// <name> ::= <unscoped-name> {[<named-scope>]+ | [<nested-name>]}? @
@@ -2337,10 +2419,16 @@ void MicrosoftCXXNameMangler::mangleType(const PointerType *T, Qualifiers Quals,
void MicrosoftCXXNameMangler::mangleType(const ObjCObjectPointerType *T,
Qualifiers Quals, SourceRange Range) {
- if (T->isObjCIdType() || T->isObjCClassType())
- return mangleType(T->getPointeeType(), Range, QMM_Drop);
-
QualType PointeeType = T->getPointeeType();
+ switch (Quals.getObjCLifetime()) {
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ break;
+ case Qualifiers::OCL_Autoreleasing:
+ case Qualifiers::OCL_Strong:
+ case Qualifiers::OCL_Weak:
+ return mangleObjCLifetime(PointeeType, Quals, Range);
+ }
manglePointerCVQualifiers(Quals);
manglePointerExtQualifiers(Quals, PointeeType);
mangleType(PointeeType, Range);
@@ -2384,6 +2472,26 @@ void MicrosoftCXXNameMangler::mangleType(const ComplexType *T, Qualifiers,
mangleArtificalTagType(TTK_Struct, TemplateMangling, {"__clang"});
}
+// Returns true for types that mangleArtificalTagType() gets called for with
+// TTK_Union, TTK_Struct, TTK_Class and where compatibility with MSVC's
+// mangling matters.
+// (It doesn't matter for Objective-C types and the like that cl.exe doesn't
+// support.)
+bool MicrosoftCXXNameMangler::isArtificialTagType(QualType T) const {
+ const Type *ty = T.getTypePtr();
+ switch (ty->getTypeClass()) {
+ default:
+ return false;
+
+ case Type::Vector: {
+ // For ABI compatibility only __m64, __m128(id), and __m256(id) matter,
+ // but since mangleType(VectorType*) always calls mangleArtificalTagType()
+ // just always return true (the other vector types are clang-only).
+ return true;
+ }
+ }
+}
+
void MicrosoftCXXNameMangler::mangleType(const VectorType *T, Qualifiers Quals,
SourceRange Range) {
const BuiltinType *ET = T->getElementType()->getAs<BuiltinType>();
@@ -2430,6 +2538,16 @@ void MicrosoftCXXNameMangler::mangleType(const ExtVectorType *T,
Qualifiers Quals, SourceRange Range) {
mangleType(static_cast<const VectorType *>(T), Quals, Range);
}
+
+void MicrosoftCXXNameMangler::mangleType(const DependentVectorType *T,
+ Qualifiers, SourceRange Range) {
+ DiagnosticsEngine &Diags = Context.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(
+ DiagnosticsEngine::Error,
+ "cannot mangle this dependent-sized vector type yet");
+ Diags.Report(Range.getBegin(), DiagID) << Range;
+}
+
void MicrosoftCXXNameMangler::mangleType(const DependentSizedExtVectorType *T,
Qualifiers, SourceRange Range) {
DiagnosticsEngine &Diags = Context.getDiags();
@@ -2457,9 +2575,33 @@ void MicrosoftCXXNameMangler::mangleType(const ObjCInterfaceType *T, Qualifiers,
void MicrosoftCXXNameMangler::mangleType(const ObjCObjectType *T, Qualifiers,
SourceRange Range) {
- // We don't allow overloading by different protocol qualification,
- // so mangling them isn't necessary.
- mangleType(T->getBaseType(), Range, QMM_Drop);
+ if (T->qual_empty())
+ return mangleType(T->getBaseType(), Range, QMM_Drop);
+
+ ArgBackRefMap OuterArgsContext;
+ BackRefVec OuterTemplateContext;
+
+ TypeBackReferences.swap(OuterArgsContext);
+ NameBackReferences.swap(OuterTemplateContext);
+
+ mangleTagTypeKind(TTK_Struct);
+
+ Out << "?$";
+ if (T->isObjCId())
+ mangleSourceName("objc_object");
+ else if (T->isObjCClass())
+ mangleSourceName("objc_class");
+ else
+ mangleSourceName(T->getInterface()->getName());
+
+ for (const auto &Q : T->quals())
+ mangleObjCProtocol(Q);
+ Out << '@';
+
+ Out << '@';
+
+ TypeBackReferences.swap(OuterArgsContext);
+ NameBackReferences.swap(OuterTemplateContext);
}
void MicrosoftCXXNameMangler::mangleType(const BlockPointerType *T,
@@ -2700,17 +2842,12 @@ static void mangleThunkThisAdjustment(const CXXMethodDecl *MD,
}
}
-void
-MicrosoftMangleContextImpl::mangleVirtualMemPtrThunk(const CXXMethodDecl *MD,
- raw_ostream &Out) {
- MicrosoftVTableContext *VTContext =
- cast<MicrosoftVTableContext>(getASTContext().getVTableContext());
- const MicrosoftVTableContext::MethodVFTableLocation &ML =
- VTContext->getMethodVFTableLocation(GlobalDecl(MD));
-
+void MicrosoftMangleContextImpl::mangleVirtualMemPtrThunk(
+ const CXXMethodDecl *MD, const MethodVFTableLocation &ML,
+ raw_ostream &Out) {
msvc_hashing_ostream MHO(Out);
MicrosoftCXXNameMangler Mangler(*this, MHO);
- Mangler.getStream() << "\01?";
+ Mangler.getStream() << '?';
Mangler.mangleVirtualMemPtrThunk(MD, ML);
}
@@ -2719,7 +2856,7 @@ void MicrosoftMangleContextImpl::mangleThunk(const CXXMethodDecl *MD,
raw_ostream &Out) {
msvc_hashing_ostream MHO(Out);
MicrosoftCXXNameMangler Mangler(*this, MHO);
- Mangler.getStream() << "\01?";
+ Mangler.getStream() << '?';
Mangler.mangleName(MD);
mangleThunkThisAdjustment(MD, Thunk.This, Mangler, MHO);
if (!Thunk.Return.isEmpty())
@@ -2740,7 +2877,7 @@ void MicrosoftMangleContextImpl::mangleCXXDtorThunk(
assert(Type == Dtor_Deleting);
msvc_hashing_ostream MHO(Out);
MicrosoftCXXNameMangler Mangler(*this, MHO, DD, Type);
- Mangler.getStream() << "\01??_E";
+ Mangler.getStream() << "??_E";
Mangler.mangleName(DD->getParent());
mangleThunkThisAdjustment(DD, Adjustment, Mangler, MHO);
Mangler.mangleFunctionType(DD->getType()->castAs<FunctionProtoType>(), DD);
@@ -2756,9 +2893,9 @@ void MicrosoftMangleContextImpl::mangleCXXVFTable(
msvc_hashing_ostream MHO(Out);
MicrosoftCXXNameMangler Mangler(*this, MHO);
if (Derived->hasAttr<DLLImportAttr>())
- Mangler.getStream() << "\01??_S";
+ Mangler.getStream() << "??_S";
else
- Mangler.getStream() << "\01??_7";
+ Mangler.getStream() << "??_7";
Mangler.mangleName(Derived);
Mangler.getStream() << "6B"; // '6' for vftable, 'B' for const.
for (const CXXRecordDecl *RD : BasePath)
@@ -2775,7 +2912,7 @@ void MicrosoftMangleContextImpl::mangleCXXVBTable(
// is always '7' for vbtables.
msvc_hashing_ostream MHO(Out);
MicrosoftCXXNameMangler Mangler(*this, MHO);
- Mangler.getStream() << "\01??_8";
+ Mangler.getStream() << "??_8";
Mangler.mangleName(Derived);
Mangler.getStream() << "7B"; // '7' for vbtable, 'B' for const.
for (const CXXRecordDecl *RD : BasePath)
@@ -2786,7 +2923,7 @@ void MicrosoftMangleContextImpl::mangleCXXVBTable(
void MicrosoftMangleContextImpl::mangleCXXRTTI(QualType T, raw_ostream &Out) {
msvc_hashing_ostream MHO(Out);
MicrosoftCXXNameMangler Mangler(*this, MHO);
- Mangler.getStream() << "\01??_R0";
+ Mangler.getStream() << "??_R0";
Mangler.mangleType(T, SourceRange(), MicrosoftCXXNameMangler::QMM_Result);
Mangler.getStream() << "@8";
}
@@ -2802,7 +2939,7 @@ void MicrosoftMangleContextImpl::mangleCXXVirtualDisplacementMap(
const CXXRecordDecl *SrcRD, const CXXRecordDecl *DstRD, raw_ostream &Out) {
msvc_hashing_ostream MHO(Out);
MicrosoftCXXNameMangler Mangler(*this, MHO);
- Mangler.getStream() << "\01??_K";
+ Mangler.getStream() << "??_K";
Mangler.mangleName(SrcRD);
Mangler.getStream() << "$C";
Mangler.mangleName(DstRD);
@@ -2848,7 +2985,7 @@ void MicrosoftMangleContextImpl::mangleCXXCatchableType(
msvc_hashing_ostream MHO(Stream);
mangleCXXRTTI(T, MHO);
}
- Mangler.getStream() << RTTIMangling.substr(1);
+ Mangler.getStream() << RTTIMangling;
// VS2015 CTP6 omits the copy-constructor in the mangled name. This name is,
// in fact, superfluous but I'm not sure the change was made consciously.
@@ -2860,7 +2997,7 @@ void MicrosoftMangleContextImpl::mangleCXXCatchableType(
msvc_hashing_ostream MHO(Stream);
mangleCXXCtor(CD, CT, MHO);
}
- Mangler.getStream() << CopyCtorMangling.substr(1);
+ Mangler.getStream() << CopyCtorMangling;
Mangler.getStream() << Size;
if (VBPtrOffset == -1) {
@@ -2879,7 +3016,7 @@ void MicrosoftMangleContextImpl::mangleCXXRTTIBaseClassDescriptor(
uint32_t VBTableOffset, uint32_t Flags, raw_ostream &Out) {
msvc_hashing_ostream MHO(Out);
MicrosoftCXXNameMangler Mangler(*this, MHO);
- Mangler.getStream() << "\01??_R1";
+ Mangler.getStream() << "??_R1";
Mangler.mangleNumber(NVOffset);
Mangler.mangleNumber(VBPtrOffset);
Mangler.mangleNumber(VBTableOffset);
@@ -2892,7 +3029,7 @@ void MicrosoftMangleContextImpl::mangleCXXRTTIBaseClassArray(
const CXXRecordDecl *Derived, raw_ostream &Out) {
msvc_hashing_ostream MHO(Out);
MicrosoftCXXNameMangler Mangler(*this, MHO);
- Mangler.getStream() << "\01??_R2";
+ Mangler.getStream() << "??_R2";
Mangler.mangleName(Derived);
Mangler.getStream() << "8";
}
@@ -2901,7 +3038,7 @@ void MicrosoftMangleContextImpl::mangleCXXRTTIClassHierarchyDescriptor(
const CXXRecordDecl *Derived, raw_ostream &Out) {
msvc_hashing_ostream MHO(Out);
MicrosoftCXXNameMangler Mangler(*this, MHO);
- Mangler.getStream() << "\01??_R3";
+ Mangler.getStream() << "??_R3";
Mangler.mangleName(Derived);
Mangler.getStream() << "8";
}
@@ -2917,16 +3054,16 @@ void MicrosoftMangleContextImpl::mangleCXXRTTICompleteObjectLocator(
llvm::raw_svector_ostream Stream(VFTableMangling);
mangleCXXVFTable(Derived, BasePath, Stream);
- if (VFTableMangling.startswith("\01??@")) {
+ if (VFTableMangling.startswith("??@")) {
assert(VFTableMangling.endswith("@"));
Out << VFTableMangling << "??_R4@";
return;
}
- assert(VFTableMangling.startswith("\01??_7") ||
- VFTableMangling.startswith("\01??_S"));
+ assert(VFTableMangling.startswith("??_7") ||
+ VFTableMangling.startswith("??_S"));
- Out << "\01??_R4" << StringRef(VFTableMangling).drop_front(5);
+ Out << "??_R4" << StringRef(VFTableMangling).drop_front(4);
}
void MicrosoftMangleContextImpl::mangleSEHFilterExpression(
@@ -2937,7 +3074,7 @@ void MicrosoftMangleContextImpl::mangleSEHFilterExpression(
// so the numbering here doesn't have to be the same across TUs.
//
// <mangled-name> ::= ?filt$ <filter-number> @0
- Mangler.getStream() << "\01?filt$" << SEHFilterIds[EnclosingDecl]++ << "@0@";
+ Mangler.getStream() << "?filt$" << SEHFilterIds[EnclosingDecl]++ << "@0@";
Mangler.mangleName(EnclosingDecl);
}
@@ -2949,7 +3086,7 @@ void MicrosoftMangleContextImpl::mangleSEHFinallyBlock(
// so the numbering here doesn't have to be the same across TUs.
//
// <mangled-name> ::= ?fin$ <filter-number> @0
- Mangler.getStream() << "\01?fin$" << SEHFinallyIds[EnclosingDecl]++ << "@0@";
+ Mangler.getStream() << "?fin$" << SEHFinallyIds[EnclosingDecl]++ << "@0@";
Mangler.mangleName(EnclosingDecl);
}
@@ -2982,7 +3119,7 @@ void MicrosoftMangleContextImpl::mangleReferenceTemporary(
msvc_hashing_ostream MHO(Out);
MicrosoftCXXNameMangler Mangler(*this, MHO);
- Mangler.getStream() << "\01?$RT" << ManglingNumber << '@';
+ Mangler.getStream() << "?$RT" << ManglingNumber << '@';
Mangler.mangle(VD, "");
}
@@ -2991,7 +3128,7 @@ void MicrosoftMangleContextImpl::mangleThreadSafeStaticGuardVariable(
msvc_hashing_ostream MHO(Out);
MicrosoftCXXNameMangler Mangler(*this, MHO);
- Mangler.getStream() << "\01?$TSS" << GuardNum << '@';
+ Mangler.getStream() << "?$TSS" << GuardNum << '@';
Mangler.mangleNestedName(VD);
Mangler.getStream() << "@4HA";
}
@@ -3013,9 +3150,9 @@ void MicrosoftMangleContextImpl::mangleStaticGuardVariable(const VarDecl *VD,
bool Visible = VD->isExternallyVisible();
if (Visible) {
- Mangler.getStream() << (VD->getTLSKind() ? "\01??__J" : "\01??_B");
+ Mangler.getStream() << (VD->getTLSKind() ? "??__J" : "??_B");
} else {
- Mangler.getStream() << "\01?$S1@";
+ Mangler.getStream() << "?$S1@";
}
unsigned ScopeDepth = 0;
if (Visible && !getNextDiscriminator(VD, ScopeDepth))
@@ -3035,7 +3172,7 @@ void MicrosoftMangleContextImpl::mangleInitFiniStub(const VarDecl *D,
raw_ostream &Out) {
msvc_hashing_ostream MHO(Out);
MicrosoftCXXNameMangler Mangler(*this, MHO);
- Mangler.getStream() << "\01??__" << CharCode;
+ Mangler.getStream() << "??__" << CharCode;
Mangler.mangleName(D);
if (D->isStaticDataMember()) {
Mangler.mangleVariableEncoding(D);
@@ -3061,14 +3198,14 @@ MicrosoftMangleContextImpl::mangleDynamicAtExitDestructor(const VarDecl *D,
void MicrosoftMangleContextImpl::mangleStringLiteral(const StringLiteral *SL,
raw_ostream &Out) {
- // <char-type> ::= 0 # char
- // ::= 1 # wchar_t
- // ::= ??? # char16_t/char32_t will need a mangling too...
+ // <char-type> ::= 0 # char, char16_t, char32_t
+ // # (little endian char data in mangling)
+ // ::= 1 # wchar_t (big endian char data in mangling)
//
// <literal-length> ::= <non-negative integer> # the length of the literal
//
// <encoded-crc> ::= <hex digit>+ @ # crc of the literal including
- // # null-terminator
+ // # trailing null bytes
//
// <encoded-string> ::= <simple character> # uninteresting character
// ::= '?$' <hex digit> <hex digit> # these two nibbles
@@ -3081,7 +3218,19 @@ void MicrosoftMangleContextImpl::mangleStringLiteral(const StringLiteral *SL,
// <literal> ::= '??_C@_' <char-type> <literal-length> <encoded-crc>
// <encoded-string> '@'
MicrosoftCXXNameMangler Mangler(*this, Out);
- Mangler.getStream() << "\01??_C@_";
+ Mangler.getStream() << "??_C@_";
+
+ // The actual string length might be different from that of the string literal
+ // in cases like:
+ // char foo[3] = "foobar";
+ // char bar[42] = "foobar";
+ // Where it is truncated or zero-padded to fit the array. This is the length
+ // used for mangling, and any trailing null-bytes also need to be mangled.
+ unsigned StringLength = getASTContext()
+ .getAsConstantArrayType(SL->getType())
+ ->getSize()
+ .getZExtValue();
+ unsigned StringByteLength = StringLength * SL->getCharByteWidth();
// <char-type>: The "kind" of string literal is encoded into the mangled name.
if (SL->isWide())
@@ -3090,14 +3239,13 @@ void MicrosoftMangleContextImpl::mangleStringLiteral(const StringLiteral *SL,
Mangler.getStream() << '0';
// <literal-length>: The next part of the mangled name consists of the length
- // of the string.
- // The StringLiteral does not consider the NUL terminator byte(s) but the
- // mangling does.
- // N.B. The length is in terms of bytes, not characters.
- Mangler.mangleNumber(SL->getByteLength() + SL->getCharByteWidth());
+ // of the string in bytes.
+ Mangler.mangleNumber(StringByteLength);
auto GetLittleEndianByte = [&SL](unsigned Index) {
unsigned CharByteWidth = SL->getCharByteWidth();
+ if (Index / CharByteWidth >= SL->getLength())
+ return static_cast<char>(0);
uint32_t CodeUnit = SL->getCodeUnit(Index / CharByteWidth);
unsigned OffsetInCodeUnit = Index % CharByteWidth;
return static_cast<char>((CodeUnit >> (8 * OffsetInCodeUnit)) & 0xff);
@@ -3105,6 +3253,8 @@ void MicrosoftMangleContextImpl::mangleStringLiteral(const StringLiteral *SL,
auto GetBigEndianByte = [&SL](unsigned Index) {
unsigned CharByteWidth = SL->getCharByteWidth();
+ if (Index / CharByteWidth >= SL->getLength())
+ return static_cast<char>(0);
uint32_t CodeUnit = SL->getCodeUnit(Index / CharByteWidth);
unsigned OffsetInCodeUnit = (CharByteWidth - 1) - (Index % CharByteWidth);
return static_cast<char>((CodeUnit >> (8 * OffsetInCodeUnit)) & 0xff);
@@ -3112,21 +3262,15 @@ void MicrosoftMangleContextImpl::mangleStringLiteral(const StringLiteral *SL,
// CRC all the bytes of the StringLiteral.
llvm::JamCRC JC;
- for (unsigned I = 0, E = SL->getByteLength(); I != E; ++I)
+ for (unsigned I = 0, E = StringByteLength; I != E; ++I)
JC.update(GetLittleEndianByte(I));
- // The NUL terminator byte(s) were not present earlier,
- // we need to manually process those bytes into the CRC.
- for (unsigned NullTerminator = 0; NullTerminator < SL->getCharByteWidth();
- ++NullTerminator)
- JC.update('\x00');
-
// <encoded-crc>: The CRC is encoded utilizing the standard number mangling
// scheme.
Mangler.mangleNumber(JC.getCRC());
- // <encoded-string>: The mangled name also contains the first 32 _characters_
- // (including null-terminator bytes) of the StringLiteral.
+ // <encoded-string>: The mangled name also contains the first 32 bytes
+ // (including null-terminator bytes) of the encoded StringLiteral.
// Each character is encoded by splitting them into bytes and then encoding
// the constituent bytes.
auto MangleByte = [&Mangler](char Byte) {
@@ -3155,20 +3299,15 @@ void MicrosoftMangleContextImpl::mangleStringLiteral(const StringLiteral *SL,
}
};
- // Enforce our 32 character max.
- unsigned NumCharsToMangle = std::min(32U, SL->getLength());
- for (unsigned I = 0, E = NumCharsToMangle * SL->getCharByteWidth(); I != E;
- ++I)
+ // Enforce our 32 bytes max, except wchar_t which gets 32 chars instead.
+ unsigned MaxBytesToMangle = SL->isWide() ? 64U : 32U;
+ unsigned NumBytesToMangle = std::min(MaxBytesToMangle, StringByteLength);
+ for (unsigned I = 0; I != NumBytesToMangle; ++I) {
if (SL->isWide())
MangleByte(GetBigEndianByte(I));
else
MangleByte(GetLittleEndianByte(I));
-
- // Encode the NUL terminator if there is room.
- if (NumCharsToMangle < 32)
- for (unsigned NullTerminator = 0; NullTerminator < SL->getCharByteWidth();
- ++NullTerminator)
- MangleByte(0);
+ }
Mangler.getStream() << '@';
}
diff --git a/lib/AST/NSAPI.cpp b/lib/AST/NSAPI.cpp
index 8adaef1fb640..536bf2c378fa 100644
--- a/lib/AST/NSAPI.cpp
+++ b/lib/AST/NSAPI.cpp
@@ -436,10 +436,35 @@ NSAPI::getNSNumberFactoryMethodKind(QualType T) const {
case BuiltinType::Void:
case BuiltinType::WChar_U:
case BuiltinType::WChar_S:
+ case BuiltinType::Char8:
case BuiltinType::Char16:
case BuiltinType::Char32:
case BuiltinType::Int128:
case BuiltinType::LongDouble:
+ case BuiltinType::ShortAccum:
+ case BuiltinType::Accum:
+ case BuiltinType::LongAccum:
+ case BuiltinType::UShortAccum:
+ case BuiltinType::UAccum:
+ case BuiltinType::ULongAccum:
+ case BuiltinType::ShortFract:
+ case BuiltinType::Fract:
+ case BuiltinType::LongFract:
+ case BuiltinType::UShortFract:
+ case BuiltinType::UFract:
+ case BuiltinType::ULongFract:
+ case BuiltinType::SatShortAccum:
+ case BuiltinType::SatAccum:
+ case BuiltinType::SatLongAccum:
+ case BuiltinType::SatUShortAccum:
+ case BuiltinType::SatUAccum:
+ case BuiltinType::SatULongAccum:
+ case BuiltinType::SatShortFract:
+ case BuiltinType::SatFract:
+ case BuiltinType::SatLongFract:
+ case BuiltinType::SatUShortFract:
+ case BuiltinType::SatUFract:
+ case BuiltinType::SatULongFract:
case BuiltinType::UInt128:
case BuiltinType::Float16:
case BuiltinType::Float128:
@@ -470,15 +495,15 @@ NSAPI::getNSNumberFactoryMethodKind(QualType T) const {
return None;
}
-/// \brief Returns true if \param T is a typedef of "BOOL" in objective-c.
+/// Returns true if \param T is a typedef of "BOOL" in objective-c.
bool NSAPI::isObjCBOOLType(QualType T) const {
return isObjCTypedef(T, "BOOL", BOOLId);
}
-/// \brief Returns true if \param T is a typedef of "NSInteger" in objective-c.
+/// Returns true if \param T is a typedef of "NSInteger" in objective-c.
bool NSAPI::isObjCNSIntegerType(QualType T) const {
return isObjCTypedef(T, "NSInteger", NSIntegerId);
}
-/// \brief Returns true if \param T is a typedef of "NSUInteger" in objective-c.
+/// Returns true if \param T is a typedef of "NSUInteger" in objective-c.
bool NSAPI::isObjCNSUIntegerType(QualType T) const {
return isObjCTypedef(T, "NSUInteger", NSUIntegerId);
}
diff --git a/lib/AST/NestedNameSpecifier.cpp b/lib/AST/NestedNameSpecifier.cpp
index 889f8308a93c..503d0eb65e1e 100644
--- a/lib/AST/NestedNameSpecifier.cpp
+++ b/lib/AST/NestedNameSpecifier.cpp
@@ -164,7 +164,7 @@ NestedNameSpecifier::SpecifierKind NestedNameSpecifier::getKind() const {
llvm_unreachable("Invalid NNS Kind!");
}
-/// \brief Retrieve the namespace stored in this nested name specifier.
+/// Retrieve the namespace stored in this nested name specifier.
NamespaceDecl *NestedNameSpecifier::getAsNamespace() const {
if (Prefix.getInt() == StoredDecl)
return dyn_cast<NamespaceDecl>(static_cast<NamedDecl *>(Specifier));
@@ -172,7 +172,7 @@ NamespaceDecl *NestedNameSpecifier::getAsNamespace() const {
return nullptr;
}
-/// \brief Retrieve the namespace alias stored in this nested name specifier.
+/// Retrieve the namespace alias stored in this nested name specifier.
NamespaceAliasDecl *NestedNameSpecifier::getAsNamespaceAlias() const {
if (Prefix.getInt() == StoredDecl)
return dyn_cast<NamespaceAliasDecl>(static_cast<NamedDecl *>(Specifier));
@@ -180,7 +180,7 @@ NamespaceAliasDecl *NestedNameSpecifier::getAsNamespaceAlias() const {
return nullptr;
}
-/// \brief Retrieve the record declaration stored in this nested name specifier.
+/// Retrieve the record declaration stored in this nested name specifier.
CXXRecordDecl *NestedNameSpecifier::getAsRecordDecl() const {
switch (Prefix.getInt()) {
case StoredIdentifier:
@@ -197,7 +197,7 @@ CXXRecordDecl *NestedNameSpecifier::getAsRecordDecl() const {
llvm_unreachable("Invalid NNS Kind!");
}
-/// \brief Whether this nested name specifier refers to a dependent
+/// Whether this nested name specifier refers to a dependent
/// type or not.
bool NestedNameSpecifier::isDependent() const {
switch (getKind()) {
@@ -227,7 +227,7 @@ bool NestedNameSpecifier::isDependent() const {
llvm_unreachable("Invalid NNS Kind!");
}
-/// \brief Whether this nested name specifier refers to a dependent
+/// Whether this nested name specifier refers to a dependent
/// type or not.
bool NestedNameSpecifier::isInstantiationDependent() const {
switch (getKind()) {
@@ -268,7 +268,7 @@ bool NestedNameSpecifier::containsUnexpandedParameterPack() const {
llvm_unreachable("Invalid NNS Kind!");
}
-/// \brief Print this nested name specifier to the given output
+/// Print this nested name specifier to the given output
/// stream.
void
NestedNameSpecifier::print(raw_ostream &OS,
@@ -387,7 +387,7 @@ NestedNameSpecifierLoc::getDataLength(NestedNameSpecifier *Qualifier) {
return Length;
}
-/// \brief Load a (possibly unaligned) source location from a given address
+/// Load a (possibly unaligned) source location from a given address
/// and offset.
static SourceLocation LoadSourceLocation(void *Data, unsigned Offset) {
unsigned Raw;
@@ -395,7 +395,7 @@ static SourceLocation LoadSourceLocation(void *Data, unsigned Offset) {
return SourceLocation::getFromRawEncoding(Raw);
}
-/// \brief Load a (possibly unaligned) pointer from a given address and
+/// Load a (possibly unaligned) pointer from a given address and
/// offset.
static void *LoadPointer(void *Data, unsigned Offset) {
void *Result;
@@ -466,7 +466,7 @@ static void Append(char *Start, char *End, char *&Buffer, unsigned &BufferSize,
unsigned NewCapacity = std::max(
(unsigned)(BufferCapacity ? BufferCapacity * 2 : sizeof(void *) * 2),
(unsigned)(BufferSize + (End - Start)));
- char *NewBuffer = static_cast<char *>(malloc(NewCapacity));
+ char *NewBuffer = static_cast<char *>(llvm::safe_malloc(NewCapacity));
if (BufferCapacity) {
memcpy(NewBuffer, Buffer, BufferSize);
free(Buffer);
@@ -479,7 +479,7 @@ static void Append(char *Start, char *End, char *&Buffer, unsigned &BufferSize,
BufferSize += End-Start;
}
-/// \brief Save a source location to the given buffer.
+/// Save a source location to the given buffer.
static void SaveSourceLocation(SourceLocation Loc, char *&Buffer,
unsigned &BufferSize, unsigned &BufferCapacity) {
unsigned Raw = Loc.getRawEncoding();
@@ -488,7 +488,7 @@ static void SaveSourceLocation(SourceLocation Loc, char *&Buffer,
Buffer, BufferSize, BufferCapacity);
}
-/// \brief Save a pointer to the given buffer.
+/// Save a pointer to the given buffer.
static void SavePointer(void *Ptr, char *&Buffer, unsigned &BufferSize,
unsigned &BufferCapacity) {
Append(reinterpret_cast<char *>(&Ptr),
diff --git a/lib/AST/ODRHash.cpp b/lib/AST/ODRHash.cpp
index 088d8bedd453..e710d3780337 100644
--- a/lib/AST/ODRHash.cpp
+++ b/lib/AST/ODRHash.cpp
@@ -33,6 +33,15 @@ void ODRHash::AddIdentifierInfo(const IdentifierInfo *II) {
}
void ODRHash::AddDeclarationName(DeclarationName Name) {
+ // Index all DeclarationName and use index numbers to refer to them.
+ auto Result = DeclNameMap.insert(std::make_pair(Name, DeclNameMap.size()));
+ ID.AddInteger(Result.first->second);
+ if (!Result.second) {
+ // If found in map, the the DeclarationName has previously been processed.
+ return;
+ }
+
+ // First time processing each DeclarationName, also process its details.
AddBoolean(Name.isEmpty());
if (Name.isEmpty())
return;
@@ -139,6 +148,8 @@ void ODRHash::AddTemplateArgument(TemplateArgument TA) {
AddQualType(TA.getAsType());
break;
case TemplateArgument::Declaration:
+ AddDecl(TA.getAsDecl());
+ break;
case TemplateArgument::NullPtr:
case TemplateArgument::Integral:
break;
@@ -168,8 +179,7 @@ void ODRHash::AddTemplateParameterList(const TemplateParameterList *TPL) {
}
void ODRHash::clear() {
- DeclMap.clear();
- TypeMap.clear();
+ DeclNameMap.clear();
Bools.clear();
ID.clear();
}
@@ -307,26 +317,14 @@ public:
}
void VisitFunctionDecl(const FunctionDecl *D) {
- ID.AddInteger(D->getStorageClass());
- Hash.AddBoolean(D->isInlineSpecified());
- Hash.AddBoolean(D->isVirtualAsWritten());
- Hash.AddBoolean(D->isPure());
- Hash.AddBoolean(D->isDeletedAsWritten());
-
- ID.AddInteger(D->param_size());
-
- for (auto *Param : D->parameters()) {
- Hash.AddSubDecl(Param);
- }
-
- AddQualType(D->getReturnType());
+ // Handled by the ODRHash for FunctionDecl
+ ID.AddInteger(D->getODRHash());
Inherited::VisitFunctionDecl(D);
}
void VisitCXXMethodDecl(const CXXMethodDecl *D) {
- Hash.AddBoolean(D->isConst());
- Hash.AddBoolean(D->isVolatile());
+ // Handled by the ODRHash for FunctionDecl
Inherited::VisitCXXMethodDecl(D);
}
@@ -363,6 +361,7 @@ public:
if (hasDefaultArgument) {
AddTemplateArgument(D->getDefaultArgument());
}
+ Hash.AddBoolean(D->isParameterPack());
Inherited::VisitTemplateTypeParmDecl(D);
}
@@ -375,6 +374,7 @@ public:
if (hasDefaultArgument) {
AddStmt(D->getDefaultArgument());
}
+ Hash.AddBoolean(D->isParameterPack());
Inherited::VisitNonTypeTemplateParmDecl(D);
}
@@ -387,15 +387,37 @@ public:
if (hasDefaultArgument) {
AddTemplateArgument(D->getDefaultArgument().getArgument());
}
+ Hash.AddBoolean(D->isParameterPack());
Inherited::VisitTemplateTemplateParmDecl(D);
}
+
+ void VisitTemplateDecl(const TemplateDecl *D) {
+ Hash.AddTemplateParameterList(D->getTemplateParameters());
+
+ Inherited::VisitTemplateDecl(D);
+ }
+
+ void VisitRedeclarableTemplateDecl(const RedeclarableTemplateDecl *D) {
+ Hash.AddBoolean(D->isMemberSpecialization());
+ Inherited::VisitRedeclarableTemplateDecl(D);
+ }
+
+ void VisitFunctionTemplateDecl(const FunctionTemplateDecl *D) {
+ AddDecl(D->getTemplatedDecl());
+ Inherited::VisitFunctionTemplateDecl(D);
+ }
+
+ void VisitEnumConstantDecl(const EnumConstantDecl *D) {
+ AddStmt(D->getInitExpr());
+ Inherited::VisitEnumConstantDecl(D);
+ }
};
} // namespace
// Only allow a small portion of Decl's to be processed. Remove this once
// all Decl's can be handled.
-bool ODRHash::isWhitelistedDecl(const Decl *D, const CXXRecordDecl *Parent) {
+bool ODRHash::isWhitelistedDecl(const Decl *D, const DeclContext *Parent) {
if (D->isImplicit()) return false;
if (D->getDeclContext() != Parent) return false;
@@ -406,8 +428,10 @@ bool ODRHash::isWhitelistedDecl(const Decl *D, const CXXRecordDecl *Parent) {
case Decl::CXXConstructor:
case Decl::CXXDestructor:
case Decl::CXXMethod:
+ case Decl::EnumConstant: // Only found in EnumDecl's.
case Decl::Field:
case Decl::Friend:
+ case Decl::FunctionTemplate:
case Decl::StaticAssert:
case Decl::TypeAlias:
case Decl::Typedef:
@@ -418,7 +442,6 @@ bool ODRHash::isWhitelistedDecl(const Decl *D, const CXXRecordDecl *Parent) {
void ODRHash::AddSubDecl(const Decl *D) {
assert(D && "Expecting non-null pointer.");
- AddDecl(D);
ODRDeclVisitor(ID, *this).Visit(D);
}
@@ -440,9 +463,13 @@ void ODRHash::AddCXXRecordDecl(const CXXRecordDecl *Record) {
// Filter out sub-Decls which will not be processed in order to get an
// accurate count of Decl's.
llvm::SmallVector<const Decl *, 16> Decls;
- for (const Decl *SubDecl : Record->decls()) {
+ for (Decl *SubDecl : Record->decls()) {
if (isWhitelistedDecl(SubDecl, Record)) {
Decls.push_back(SubDecl);
+ if (auto *Function = dyn_cast<FunctionDecl>(SubDecl)) {
+ // Compute/Preload ODRHash into FunctionDecl.
+ Function->getODRHash();
+ }
}
}
@@ -466,28 +493,48 @@ void ODRHash::AddCXXRecordDecl(const CXXRecordDecl *Record) {
}
}
-void ODRHash::AddFunctionDecl(const FunctionDecl *Function) {
+void ODRHash::AddFunctionDecl(const FunctionDecl *Function,
+ bool SkipBody) {
assert(Function && "Expecting non-null pointer.");
- // Skip hashing these kinds of function.
- if (Function->isImplicit()) return;
- if (Function->isDefaulted()) return;
- if (Function->isDeleted()) return;
- if (!Function->hasBody()) return;
- if (!Function->getBody()) return;
-
- // TODO: Fix hashing for class methods.
- if (isa<CXXMethodDecl>(Function)) return;
-
// Skip functions that are specializations or in specialization context.
const DeclContext *DC = Function;
while (DC) {
if (isa<ClassTemplateSpecializationDecl>(DC)) return;
- if (auto *F = dyn_cast<FunctionDecl>(DC))
- if (F->isFunctionTemplateSpecialization()) return;
+ if (auto *F = dyn_cast<FunctionDecl>(DC)) {
+ if (F->isFunctionTemplateSpecialization()) {
+ if (!isa<CXXMethodDecl>(DC)) return;
+ if (DC->getLexicalParent()->isFileContext()) return;
+ // Inline method specializations are the only supported
+ // specialization for now.
+ }
+ }
DC = DC->getParent();
}
+ ID.AddInteger(Function->getDeclKind());
+
+ const auto *SpecializationArgs = Function->getTemplateSpecializationArgs();
+ AddBoolean(SpecializationArgs);
+ if (SpecializationArgs) {
+ ID.AddInteger(SpecializationArgs->size());
+ for (const TemplateArgument &TA : SpecializationArgs->asArray()) {
+ AddTemplateArgument(TA);
+ }
+ }
+
+ if (const auto *Method = dyn_cast<CXXMethodDecl>(Function)) {
+ AddBoolean(Method->isConst());
+ AddBoolean(Method->isVolatile());
+ }
+
+ ID.AddInteger(Function->getStorageClass());
+ AddBoolean(Function->isInlineSpecified());
+ AddBoolean(Function->isVirtualAsWritten());
+ AddBoolean(Function->isPure());
+ AddBoolean(Function->isDeletedAsWritten());
+ AddBoolean(Function->isExplicitlyDefaulted());
+
AddDecl(Function);
AddQualType(Function->getReturnType());
@@ -496,25 +543,62 @@ void ODRHash::AddFunctionDecl(const FunctionDecl *Function) {
for (auto Param : Function->parameters())
AddSubDecl(Param);
- AddStmt(Function->getBody());
+ if (SkipBody) {
+ AddBoolean(false);
+ return;
+ }
+
+ const bool HasBody = Function->isThisDeclarationADefinition() &&
+ !Function->isDefaulted() && !Function->isDeleted() &&
+ !Function->isLateTemplateParsed();
+ AddBoolean(HasBody);
+ if (HasBody) {
+ auto *Body = Function->getBody();
+ AddBoolean(Body);
+ if (Body)
+ AddStmt(Body);
+ }
+}
+
+void ODRHash::AddEnumDecl(const EnumDecl *Enum) {
+ assert(Enum);
+ AddDeclarationName(Enum->getDeclName());
+
+ AddBoolean(Enum->isScoped());
+ if (Enum->isScoped())
+ AddBoolean(Enum->isScopedUsingClassTag());
+
+ if (Enum->getIntegerTypeSourceInfo())
+ AddQualType(Enum->getIntegerType());
+
+ // Filter out sub-Decls which will not be processed in order to get an
+ // accurate count of Decl's.
+ llvm::SmallVector<const Decl *, 16> Decls;
+ for (Decl *SubDecl : Enum->decls()) {
+ if (isWhitelistedDecl(SubDecl, Enum)) {
+ assert(isa<EnumConstantDecl>(SubDecl) && "Unexpected Decl");
+ Decls.push_back(SubDecl);
+ }
+ }
+
+ ID.AddInteger(Decls.size());
+ for (auto SubDecl : Decls) {
+ AddSubDecl(SubDecl);
+ }
+
}
void ODRHash::AddDecl(const Decl *D) {
assert(D && "Expecting non-null pointer.");
D = D->getCanonicalDecl();
- auto Result = DeclMap.insert(std::make_pair(D, DeclMap.size()));
- ID.AddInteger(Result.first->second);
- // On first encounter of a Decl pointer, process it. Every time afterwards,
- // only the index value is needed.
- if (!Result.second) {
- return;
- }
-
- ID.AddInteger(D->getKind());
if (const NamedDecl *ND = dyn_cast<NamedDecl>(D)) {
AddDeclarationName(ND->getDeclName());
+ return;
}
+
+ ID.AddInteger(D->getKind());
+ // TODO: Handle non-NamedDecl here.
}
namespace {
@@ -642,13 +726,41 @@ public:
VisitFunctionType(T);
}
+ void VisitPointerType(const PointerType *T) {
+ AddQualType(T->getPointeeType());
+ VisitType(T);
+ }
+
+ void VisitReferenceType(const ReferenceType *T) {
+ AddQualType(T->getPointeeTypeAsWritten());
+ VisitType(T);
+ }
+
+ void VisitLValueReferenceType(const LValueReferenceType *T) {
+ VisitReferenceType(T);
+ }
+
+ void VisitRValueReferenceType(const RValueReferenceType *T) {
+ VisitReferenceType(T);
+ }
+
void VisitTypedefType(const TypedefType *T) {
AddDecl(T->getDecl());
QualType UnderlyingType = T->getDecl()->getUnderlyingType();
VisitQualifiers(UnderlyingType.getQualifiers());
- while (const TypedefType *Underlying =
- dyn_cast<TypedefType>(UnderlyingType.getTypePtr())) {
- UnderlyingType = Underlying->getDecl()->getUnderlyingType();
+ while (true) {
+ if (const TypedefType *Underlying =
+ dyn_cast<TypedefType>(UnderlyingType.getTypePtr())) {
+ UnderlyingType = Underlying->getDecl()->getUnderlyingType();
+ continue;
+ }
+ if (const ElaboratedType *Underlying =
+ dyn_cast<ElaboratedType>(UnderlyingType.getTypePtr())) {
+ UnderlyingType = Underlying->getNamedType();
+ continue;
+ }
+
+ break;
}
AddType(UnderlyingType.getTypePtr());
VisitType(T);
@@ -710,14 +822,6 @@ public:
void ODRHash::AddType(const Type *T) {
assert(T && "Expecting non-null pointer.");
- auto Result = TypeMap.insert(std::make_pair(T, TypeMap.size()));
- ID.AddInteger(Result.first->second);
- // On first encounter of a Type pointer, process it. Every time afterwards,
- // only the index value is needed.
- if (!Result.second) {
- return;
- }
-
ODRTypeVisitor(ID, *this).Visit(T);
}
diff --git a/lib/AST/OpenMPClause.cpp b/lib/AST/OpenMPClause.cpp
index 4feac0cfd041..50729264bfe1 100644
--- a/lib/AST/OpenMPClause.cpp
+++ b/lib/AST/OpenMPClause.cpp
@@ -702,10 +702,10 @@ unsigned OMPClauseMappableExprCommon::getComponentsTotalNumber(
}
unsigned OMPClauseMappableExprCommon::getUniqueDeclarationsTotalNumber(
- ArrayRef<ValueDecl *> Declarations) {
+ ArrayRef<const ValueDecl *> Declarations) {
unsigned TotalNum = 0u;
llvm::SmallPtrSet<const ValueDecl *, 8> Cache;
- for (auto *D : Declarations) {
+ for (const ValueDecl *D : Declarations) {
const ValueDecl *VD = D ? cast<ValueDecl>(D->getCanonicalDecl()) : nullptr;
if (Cache.count(VD))
continue;
diff --git a/lib/AST/ParentMap.cpp b/lib/AST/ParentMap.cpp
index d8882c9030b2..bc57b20790d9 100644
--- a/lib/AST/ParentMap.cpp
+++ b/lib/AST/ParentMap.cpp
@@ -15,6 +15,7 @@
#include "clang/AST/Decl.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
+#include "clang/AST/StmtObjC.h"
#include "llvm/ADT/DenseMap.h"
using namespace clang;
@@ -193,6 +194,8 @@ bool ParentMap::isConsumedExpr(Expr* E) const {
return DirectChild == cast<IndirectGotoStmt>(P)->getTarget();
case Stmt::SwitchStmtClass:
return DirectChild == cast<SwitchStmt>(P)->getCond();
+ case Stmt::ObjCForCollectionStmtClass:
+ return DirectChild == cast<ObjCForCollectionStmt>(P)->getCollection();
case Stmt::ReturnStmtClass:
return true;
}
diff --git a/lib/AST/QualTypeNames.cpp b/lib/AST/QualTypeNames.cpp
index 86c0eff9f78c..8b605ef295a0 100644
--- a/lib/AST/QualTypeNames.cpp
+++ b/lib/AST/QualTypeNames.cpp
@@ -22,7 +22,7 @@ namespace clang {
namespace TypeName {
-/// \brief Create a NestedNameSpecifier for Namesp and its enclosing
+/// Create a NestedNameSpecifier for Namesp and its enclosing
/// scopes.
///
/// \param[in] Ctx - the AST Context to be used.
@@ -35,7 +35,7 @@ static NestedNameSpecifier *createNestedNameSpecifier(
const NamespaceDecl *Namesp,
bool WithGlobalNsPrefix);
-/// \brief Create a NestedNameSpecifier for TagDecl and its enclosing
+/// Create a NestedNameSpecifier for TagDecl and its enclosing
/// scopes.
///
/// \param[in] Ctx - the AST Context to be used.
@@ -210,7 +210,7 @@ static NestedNameSpecifier *createOuterNNS(const ASTContext &Ctx, const Decl *D,
return nullptr; // no starting '::' if |WithGlobalNsPrefix| is false
}
-/// \brief Return a fully qualified version of this name specifier.
+/// Return a fully qualified version of this name specifier.
static NestedNameSpecifier *getFullyQualifiedNestedNameSpecifier(
const ASTContext &Ctx, NestedNameSpecifier *Scope,
bool WithGlobalNsPrefix) {
@@ -262,7 +262,7 @@ static NestedNameSpecifier *getFullyQualifiedNestedNameSpecifier(
llvm_unreachable("bad NNS kind");
}
-/// \brief Create a nested name specifier for the declaring context of
+/// Create a nested name specifier for the declaring context of
/// the type.
static NestedNameSpecifier *createNestedNameSpecifierForScopeOf(
const ASTContext &Ctx, const Decl *Decl,
@@ -314,7 +314,7 @@ static NestedNameSpecifier *createNestedNameSpecifierForScopeOf(
return nullptr;
}
-/// \brief Create a nested name specifier for the declaring context of
+/// Create a nested name specifier for the declaring context of
/// the type.
static NestedNameSpecifier *createNestedNameSpecifierForScopeOf(
const ASTContext &Ctx, const Type *TypePtr,
@@ -366,7 +366,7 @@ NestedNameSpecifier *createNestedNameSpecifier(const ASTContext &Ctx,
TD->getTypeForDecl());
}
-/// \brief Return the fully qualified type, including fully-qualified
+/// Return the fully qualified type, including fully-qualified
/// versions of any template parameters.
QualType getFullyQualifiedType(QualType QT, const ASTContext &Ctx,
bool WithGlobalNsPrefix) {
@@ -408,7 +408,7 @@ QualType getFullyQualifiedType(QualType QT, const ASTContext &Ctx,
// Get the qualifiers.
Qualifiers Quals = QT.getQualifiers();
- QT = dyn_cast<SubstTemplateTypeParmType>(QT.getTypePtr())->desugar();
+ QT = cast<SubstTemplateTypeParmType>(QT.getTypePtr())->desugar();
// Add back the qualifiers.
QT = Ctx.getQualifiedType(QT, Quals);
@@ -452,12 +452,8 @@ QualType getFullyQualifiedType(QualType QT, const ASTContext &Ctx,
std::string getFullyQualifiedName(QualType QT,
const ASTContext &Ctx,
+ const PrintingPolicy &Policy,
bool WithGlobalNsPrefix) {
- PrintingPolicy Policy(Ctx.getPrintingPolicy());
- Policy.SuppressScope = false;
- Policy.AnonymousTagLocations = false;
- Policy.PolishForDeclaration = true;
- Policy.SuppressUnwrittenScope = true;
QualType FQQT = getFullyQualifiedType(QT, Ctx, WithGlobalNsPrefix);
return FQQT.getAsString(Policy);
}
diff --git a/lib/AST/RawCommentList.cpp b/lib/AST/RawCommentList.cpp
index 881a7d9c61be..95da9ed6d238 100644
--- a/lib/AST/RawCommentList.cpp
+++ b/lib/AST/RawCommentList.cpp
@@ -80,7 +80,7 @@ bool commentsStartOnSameColumn(const SourceManager &SM, const RawComment &R1,
}
} // unnamed namespace
-/// \brief Determines whether there is only whitespace in `Buffer` between `P`
+/// Determines whether there is only whitespace in `Buffer` between `P`
/// and the previous line.
/// \param Buffer The buffer to search in.
/// \param P The offset from the beginning of `Buffer` to start from.
@@ -107,10 +107,10 @@ static bool isOrdinaryKind(RawComment::CommentKind K) {
}
RawComment::RawComment(const SourceManager &SourceMgr, SourceRange SR,
- bool Merged, bool ParseAllComments) :
+ const CommentOptions &CommentOpts, bool Merged) :
Range(SR), RawTextValid(false), BriefTextValid(false),
- IsAttached(false), IsTrailingComment(false), IsAlmostTrailingComment(false),
- ParseAllComments(ParseAllComments) {
+ IsAttached(false), IsTrailingComment(false),
+ IsAlmostTrailingComment(false) {
// Extract raw comment text, if possible.
if (SR.getBegin() == SR.getEnd() || getRawText(SourceMgr).empty()) {
Kind = RCK_Invalid;
@@ -118,10 +118,11 @@ RawComment::RawComment(const SourceManager &SourceMgr, SourceRange SR,
}
// Guess comment kind.
- std::pair<CommentKind, bool> K = getCommentKind(RawText, ParseAllComments);
+ std::pair<CommentKind, bool> K =
+ getCommentKind(RawText, CommentOpts.ParseAllComments);
// Guess whether an ordinary comment is trailing.
- if (ParseAllComments && isOrdinaryKind(K.first)) {
+ if (CommentOpts.ParseAllComments && isOrdinaryKind(K.first)) {
FileID BeginFileID;
unsigned BeginOffset;
std::tie(BeginFileID, BeginOffset) =
@@ -270,6 +271,7 @@ static bool onlyWhitespaceBetween(SourceManager &SM,
}
void RawCommentList::addComment(const RawComment &RC,
+ const CommentOptions &CommentOpts,
llvm::BumpPtrAllocator &Allocator) {
if (RC.isInvalid())
return;
@@ -284,7 +286,7 @@ void RawCommentList::addComment(const RawComment &RC,
}
// Ordinary comments are not interesting for us.
- if (RC.isOrdinary())
+ if (RC.isOrdinary() && !CommentOpts.ParseAllComments)
return;
// If this is the first Doxygen comment, save it (because there isn't
@@ -317,8 +319,7 @@ void RawCommentList::addComment(const RawComment &RC,
onlyWhitespaceBetween(SourceMgr, C1.getLocEnd(), C2.getLocStart(),
/*MaxNewlinesAllowed=*/1)) {
SourceRange MergedRange(C1.getLocStart(), C2.getLocEnd());
- *Comments.back() = RawComment(SourceMgr, MergedRange, true,
- RC.isParseAllComments());
+ *Comments.back() = RawComment(SourceMgr, MergedRange, CommentOpts, true);
} else {
Comments.push_back(new (Allocator) RawComment(RC));
}
@@ -334,3 +335,94 @@ void RawCommentList::addDeserializedComments(ArrayRef<RawComment *> Deserialized
BeforeThanCompare<RawComment>(SourceMgr));
std::swap(Comments, MergedComments);
}
+
+std::string RawComment::getFormattedText(const SourceManager &SourceMgr,
+ DiagnosticsEngine &Diags) const {
+ llvm::StringRef CommentText = getRawText(SourceMgr);
+ if (CommentText.empty())
+ return "";
+
+ llvm::BumpPtrAllocator Allocator;
+ // We do not parse any commands, so CommentOptions are ignored by
+ // comments::Lexer. Therefore, we just use default-constructed options.
+ CommentOptions DefOpts;
+ comments::CommandTraits EmptyTraits(Allocator, DefOpts);
+ comments::Lexer L(Allocator, Diags, EmptyTraits, getSourceRange().getBegin(),
+ CommentText.begin(), CommentText.end(),
+ /*ParseCommands=*/false);
+
+ std::string Result;
+ // A column number of the first non-whitespace token in the comment text.
+ // We skip whitespace up to this column, but keep the whitespace after this
+ // column. IndentColumn is calculated when lexing the first line and reused
+ // for the rest of lines.
+ unsigned IndentColumn = 0;
+
+ // Processes one line of the comment and adds it to the result.
+ // Handles skipping the indent at the start of the line.
+ // Returns false when eof is reached and true otherwise.
+ auto LexLine = [&](bool IsFirstLine) -> bool {
+ comments::Token Tok;
+ // Lex the first token on the line. We handle it separately, because we to
+ // fix up its indentation.
+ L.lex(Tok);
+ if (Tok.is(comments::tok::eof))
+ return false;
+ if (Tok.is(comments::tok::newline)) {
+ Result += "\n";
+ return true;
+ }
+ llvm::StringRef TokText = L.getSpelling(Tok, SourceMgr);
+ bool LocInvalid = false;
+ unsigned TokColumn =
+ SourceMgr.getSpellingColumnNumber(Tok.getLocation(), &LocInvalid);
+ assert(!LocInvalid && "getFormattedText for invalid location");
+
+ // Amount of leading whitespace in TokText.
+ size_t WhitespaceLen = TokText.find_first_not_of(" \t");
+ if (WhitespaceLen == StringRef::npos)
+ WhitespaceLen = TokText.size();
+ // Remember the amount of whitespace we skipped in the first line to remove
+ // indent up to that column in the following lines.
+ if (IsFirstLine)
+ IndentColumn = TokColumn + WhitespaceLen;
+
+ // Amount of leading whitespace we actually want to skip.
+ // For the first line we skip all the whitespace.
+ // For the rest of the lines, we skip whitespace up to IndentColumn.
+ unsigned SkipLen =
+ IsFirstLine
+ ? WhitespaceLen
+ : std::min<size_t>(
+ WhitespaceLen,
+ std::max<int>(static_cast<int>(IndentColumn) - TokColumn, 0));
+ llvm::StringRef Trimmed = TokText.drop_front(SkipLen);
+ Result += Trimmed;
+ // Lex all tokens in the rest of the line.
+ for (L.lex(Tok); Tok.isNot(comments::tok::eof); L.lex(Tok)) {
+ if (Tok.is(comments::tok::newline)) {
+ Result += "\n";
+ return true;
+ }
+ Result += L.getSpelling(Tok, SourceMgr);
+ }
+ // We've reached the end of file token.
+ return false;
+ };
+
+ auto DropTrailingNewLines = [](std::string &Str) {
+ while (Str.back() == '\n')
+ Str.pop_back();
+ };
+
+ // Proces first line separately to remember indent for the following lines.
+ if (!LexLine(/*IsFirstLine=*/true)) {
+ DropTrailingNewLines(Result);
+ return Result;
+ }
+ // Process the rest of the lines.
+ while (LexLine(/*IsFirstLine=*/false))
+ ;
+ DropTrailingNewLines(Result);
+ return Result;
+}
diff --git a/lib/AST/RecordLayoutBuilder.cpp b/lib/AST/RecordLayoutBuilder.cpp
index a9d43dfa80c5..b4b09c7cecd7 100644
--- a/lib/AST/RecordLayoutBuilder.cpp
+++ b/lib/AST/RecordLayoutBuilder.cpp
@@ -54,25 +54,25 @@ struct BaseSubobjectInfo {
const BaseSubobjectInfo *Derived;
};
-/// \brief Externally provided layout. Typically used when the AST source, such
+/// Externally provided layout. Typically used when the AST source, such
/// as DWARF, lacks all the information that was available at compile time, such
/// as alignment attributes on fields and pragmas in effect.
struct ExternalLayout {
ExternalLayout() : Size(0), Align(0) {}
- /// \brief Overall record size in bits.
+ /// Overall record size in bits.
uint64_t Size;
- /// \brief Overall record alignment in bits.
+ /// Overall record alignment in bits.
uint64_t Align;
- /// \brief Record field offsets in bits.
+ /// Record field offsets in bits.
llvm::DenseMap<const FieldDecl *, uint64_t> FieldOffsets;
- /// \brief Direct, non-virtual base offsets.
+ /// Direct, non-virtual base offsets.
llvm::DenseMap<const CXXRecordDecl *, CharUnits> BaseOffsets;
- /// \brief Virtual base offsets.
+ /// Virtual base offsets.
llvm::DenseMap<const CXXRecordDecl *, CharUnits> VirtualBaseOffsets;
/// Get the offset of the given field. The external source must provide
@@ -579,16 +579,16 @@ protected:
/// Alignment - The current alignment of the record layout.
CharUnits Alignment;
- /// \brief The alignment if attribute packed is not used.
+ /// The alignment if attribute packed is not used.
CharUnits UnpackedAlignment;
SmallVector<uint64_t, 16> FieldOffsets;
- /// \brief Whether the external AST source has provided a layout for this
+ /// Whether the external AST source has provided a layout for this
/// record.
unsigned UseExternalLayout : 1;
- /// \brief Whether we need to infer alignment, even when we have an
+ /// Whether we need to infer alignment, even when we have an
/// externally-provided layout.
unsigned InferAlignment : 1;
@@ -632,7 +632,7 @@ protected:
/// pointer, as opposed to inheriting one from a primary base class.
bool HasOwnVFPtr;
- /// \brief the flag of field offset changing due to packed attribute.
+ /// the flag of field offset changing due to packed attribute.
bool HasPackedField;
typedef llvm::DenseMap<const CXXRecordDecl *, CharUnits> BaseOffsetsMapTy;
@@ -749,7 +749,7 @@ protected:
UpdateAlignment(NewAlignment, NewAlignment);
}
- /// \brief Retrieve the externally-supplied field offset for the given
+ /// Retrieve the externally-supplied field offset for the given
/// field.
///
/// \param Field The field whose offset is being queried.
@@ -967,7 +967,7 @@ void ItaniumRecordLayoutBuilder::ComputeBaseSubobjectInfo(
void ItaniumRecordLayoutBuilder::EnsureVTablePointerAlignment(
CharUnits UnpackedBaseAlign) {
- CharUnits BaseAlign = (Packed) ? CharUnits::One() : UnpackedBaseAlign;
+ CharUnits BaseAlign = Packed ? CharUnits::One() : UnpackedBaseAlign;
// The maximum field alignment overrides base align.
if (!MaxFieldAlignment.isZero()) {
@@ -1175,9 +1175,16 @@ ItaniumRecordLayoutBuilder::LayoutBase(const BaseSubobjectInfo *Base) {
HasExternalLayout = External.getExternalVBaseOffset(Base->Class, Offset);
}
+ // Clang <= 6 incorrectly applied the 'packed' attribute to base classes.
+ // Per GCC's documentation, it only applies to non-static data members.
CharUnits UnpackedBaseAlign = Layout.getNonVirtualAlignment();
- CharUnits BaseAlign = (Packed) ? CharUnits::One() : UnpackedBaseAlign;
-
+ CharUnits BaseAlign =
+ (Packed && ((Context.getLangOpts().getClangABICompat() <=
+ LangOptions::ClangABI::Ver6) ||
+ Context.getTargetInfo().getTriple().isPS4()))
+ ? CharUnits::One()
+ : UnpackedBaseAlign;
+
// If we have an empty base class, try to place it at offset 0.
if (Base->Class->isEmpty() &&
(!HasExternalLayout || Offset == CharUnits::Zero()) &&
@@ -1504,9 +1511,10 @@ void ItaniumRecordLayoutBuilder::LayoutBitField(const FieldDecl *D) {
FieldAlign = TypeSize;
// If the previous field was not a bitfield, or was a bitfield
- // with a different storage unit size, we're done with that
- // storage unit.
- if (LastBitfieldTypeSize != TypeSize) {
+ // with a different storage unit size, or if this field doesn't fit into
+ // the current storage unit, we're done with that storage unit.
+ if (LastBitfieldTypeSize != TypeSize ||
+ UnfilledBitsInLastUnit < FieldSize) {
// Also, ignore zero-length bitfields after non-bitfields.
if (!LastBitfieldTypeSize && !FieldSize)
FieldAlign = 1;
@@ -1751,7 +1759,34 @@ void ItaniumRecordLayoutBuilder::LayoutField(const FieldDecl *D,
QualType T = Context.getBaseElementType(D->getType());
if (const BuiltinType *BTy = T->getAs<BuiltinType>()) {
CharUnits TypeSize = Context.getTypeSizeInChars(BTy);
- if (TypeSize > FieldAlign)
+
+ if (!llvm::isPowerOf2_64(TypeSize.getQuantity())) {
+ assert(
+ !Context.getTargetInfo().getTriple().isWindowsMSVCEnvironment() &&
+ "Non PowerOf2 size in MSVC mode");
+ // Base types with sizes that aren't a power of two don't work
+ // with the layout rules for MS structs. This isn't an issue in
+ // MSVC itself since there are no such base data types there.
+ // On e.g. x86_32 mingw and linux, long double is 12 bytes though.
+ // Any structs involving that data type obviously can't be ABI
+ // compatible with MSVC regardless of how it is laid out.
+
+ // Since ms_struct can be mass enabled (via a pragma or via the
+ // -mms-bitfields command line parameter), this can trigger for
+ // structs that don't actually need MSVC compatibility, so we
+ // need to be able to sidestep the ms_struct layout for these types.
+
+ // Since the combination of -mms-bitfields together with structs
+ // like max_align_t (which contains a long double) for mingw is
+ // quite comon (and GCC handles it silently), just handle it
+ // silently there. For other targets that have ms_struct enabled
+ // (most probably via a pragma or attribute), trigger a diagnostic
+ // that defaults to an error.
+ if (!Context.getTargetInfo().getTriple().isWindowsGNUEnvironment())
+ Diag(D->getLocation(), diag::warn_npot_ms_struct);
+ }
+ if (TypeSize > FieldAlign &&
+ llvm::isPowerOf2_64(TypeSize.getQuantity()))
FieldAlign = TypeSize;
}
}
@@ -1929,7 +1964,7 @@ ItaniumRecordLayoutBuilder::updateExternalFieldOffset(const FieldDecl *Field,
return ExternalFieldOffset;
}
-/// \brief Get diagnostic %select index for tag kind for
+/// Get diagnostic %select index for tag kind for
/// field padding diagnostic message.
/// WARNING: Indexes apply to particular diagnostics only!
///
@@ -2106,7 +2141,7 @@ static bool mustSkipTailPadding(TargetCXXABI ABI, const CXXRecordDecl *RD) {
// mode; fortunately, that is true because we want to assign
// consistently semantics to the type-traits intrinsics (or at
// least as many of them as possible).
- return RD->isTrivial() && RD->isStandardLayout();
+ return RD->isTrivial() && RD->isCXX11StandardLayout();
}
llvm_unreachable("bad tail-padding use kind");
@@ -2220,9 +2255,9 @@ private:
public:
void layout(const RecordDecl *RD);
void cxxLayout(const CXXRecordDecl *RD);
- /// \brief Initializes size and alignment and honors some flags.
+ /// Initializes size and alignment and honors some flags.
void initializeLayout(const RecordDecl *RD);
- /// \brief Initialized C++ layout, compute alignment and virtual alignment and
+ /// Initialized C++ layout, compute alignment and virtual alignment and
/// existence of vfptrs and vbptrs. Alignment is needed before the vfptr is
/// laid out.
void initializeCXXLayout(const CXXRecordDecl *RD);
@@ -2233,93 +2268,93 @@ public:
const ASTRecordLayout *&PreviousBaseLayout);
void injectVFPtr(const CXXRecordDecl *RD);
void injectVBPtr(const CXXRecordDecl *RD);
- /// \brief Lays out the fields of the record. Also rounds size up to
+ /// Lays out the fields of the record. Also rounds size up to
/// alignment.
void layoutFields(const RecordDecl *RD);
void layoutField(const FieldDecl *FD);
void layoutBitField(const FieldDecl *FD);
- /// \brief Lays out a single zero-width bit-field in the record and handles
+ /// Lays out a single zero-width bit-field in the record and handles
/// special cases associated with zero-width bit-fields.
void layoutZeroWidthBitField(const FieldDecl *FD);
void layoutVirtualBases(const CXXRecordDecl *RD);
void finalizeLayout(const RecordDecl *RD);
- /// \brief Gets the size and alignment of a base taking pragma pack and
+ /// Gets the size and alignment of a base taking pragma pack and
/// __declspec(align) into account.
ElementInfo getAdjustedElementInfo(const ASTRecordLayout &Layout);
- /// \brief Gets the size and alignment of a field taking pragma pack and
+ /// Gets the size and alignment of a field taking pragma pack and
/// __declspec(align) into account. It also updates RequiredAlignment as a
/// side effect because it is most convenient to do so here.
ElementInfo getAdjustedElementInfo(const FieldDecl *FD);
- /// \brief Places a field at an offset in CharUnits.
+ /// Places a field at an offset in CharUnits.
void placeFieldAtOffset(CharUnits FieldOffset) {
FieldOffsets.push_back(Context.toBits(FieldOffset));
}
- /// \brief Places a bitfield at a bit offset.
+ /// Places a bitfield at a bit offset.
void placeFieldAtBitOffset(uint64_t FieldOffset) {
FieldOffsets.push_back(FieldOffset);
}
- /// \brief Compute the set of virtual bases for which vtordisps are required.
+ /// Compute the set of virtual bases for which vtordisps are required.
void computeVtorDispSet(
llvm::SmallPtrSetImpl<const CXXRecordDecl *> &HasVtorDispSet,
const CXXRecordDecl *RD) const;
const ASTContext &Context;
- /// \brief The size of the record being laid out.
+ /// The size of the record being laid out.
CharUnits Size;
- /// \brief The non-virtual size of the record layout.
+ /// The non-virtual size of the record layout.
CharUnits NonVirtualSize;
- /// \brief The data size of the record layout.
+ /// The data size of the record layout.
CharUnits DataSize;
- /// \brief The current alignment of the record layout.
+ /// The current alignment of the record layout.
CharUnits Alignment;
- /// \brief The maximum allowed field alignment. This is set by #pragma pack.
+ /// The maximum allowed field alignment. This is set by #pragma pack.
CharUnits MaxFieldAlignment;
- /// \brief The alignment that this record must obey. This is imposed by
+ /// The alignment that this record must obey. This is imposed by
/// __declspec(align()) on the record itself or one of its fields or bases.
CharUnits RequiredAlignment;
- /// \brief The size of the allocation of the currently active bitfield.
+ /// The size of the allocation of the currently active bitfield.
/// This value isn't meaningful unless LastFieldIsNonZeroWidthBitfield
/// is true.
CharUnits CurrentBitfieldSize;
- /// \brief Offset to the virtual base table pointer (if one exists).
+ /// Offset to the virtual base table pointer (if one exists).
CharUnits VBPtrOffset;
- /// \brief Minimum record size possible.
+ /// Minimum record size possible.
CharUnits MinEmptyStructSize;
- /// \brief The size and alignment info of a pointer.
+ /// The size and alignment info of a pointer.
ElementInfo PointerInfo;
- /// \brief The primary base class (if one exists).
+ /// The primary base class (if one exists).
const CXXRecordDecl *PrimaryBase;
- /// \brief The class we share our vb-pointer with.
+ /// The class we share our vb-pointer with.
const CXXRecordDecl *SharedVBPtrBase;
- /// \brief The collection of field offsets.
+ /// The collection of field offsets.
SmallVector<uint64_t, 16> FieldOffsets;
- /// \brief Base classes and their offsets in the record.
+ /// Base classes and their offsets in the record.
BaseOffsetsMapTy Bases;
- /// \brief virtual base classes and their offsets in the record.
+ /// virtual base classes and their offsets in the record.
ASTRecordLayout::VBaseOffsetsMapTy VBases;
- /// \brief The number of remaining bits in our last bitfield allocation.
+ /// The number of remaining bits in our last bitfield allocation.
/// This value isn't meaningful unless LastFieldIsNonZeroWidthBitfield is
/// true.
unsigned RemainingBitsInField;
bool IsUnion : 1;
- /// \brief True if the last field laid out was a bitfield and was not 0
+ /// True if the last field laid out was a bitfield and was not 0
/// width.
bool LastFieldIsNonZeroWidthBitfield : 1;
- /// \brief True if the class has its own vftable pointer.
+ /// True if the class has its own vftable pointer.
bool HasOwnVFPtr : 1;
- /// \brief True if the class has a vbtable pointer.
+ /// True if the class has a vbtable pointer.
bool HasVBPtr : 1;
- /// \brief True if the last sub-object within the type is zero sized or the
+ /// True if the last sub-object within the type is zero sized or the
/// object itself is zero sized. This *does not* count members that are not
/// records. Only used for MS-ABI.
bool EndsWithZeroSizedObject : 1;
- /// \brief True if this class is zero sized or first base is zero sized or
+ /// True if this class is zero sized or first base is zero sized or
/// has this property. Only used for MS-ABI.
bool LeadsWithZeroSizedBase : 1;
- /// \brief True if the external AST source provided a layout for this record.
+ /// True if the external AST source provided a layout for this record.
bool UseExternalLayout : 1;
- /// \brief The layout provided by the external AST source. Only active if
+ /// The layout provided by the external AST source. Only active if
/// UseExternalLayout is true.
ExternalLayout External;
};
@@ -2584,8 +2619,8 @@ void MicrosoftRecordLayoutBuilder::layoutNonVirtualBase(
}
if (!FoundBase) {
- if (MDCUsesEBO && BaseDecl->isEmpty() &&
- BaseLayout.getNonVirtualSize() == CharUnits::Zero()) {
+ if (MDCUsesEBO && BaseDecl->isEmpty()) {
+ assert(BaseLayout.getNonVirtualSize() == CharUnits::Zero());
BaseOffset = CharUnits::Zero();
} else {
// Otherwise, lay the base out at the end of the MDC.
@@ -2642,7 +2677,7 @@ void MicrosoftRecordLayoutBuilder::layoutBitField(const FieldDecl *FD) {
// Check to see if this bitfield fits into an existing allocation. Note:
// MSVC refuses to pack bitfields of formal types with different sizes
// into the same allocation.
- if (!IsUnion && LastFieldIsNonZeroWidthBitfield &&
+ if (!UseExternalLayout && !IsUnion && LastFieldIsNonZeroWidthBitfield &&
CurrentBitfieldSize == Info.Size && Width <= RemainingBitsInField) {
placeFieldAtBitOffset(Context.toBits(Size) - RemainingBitsInField);
RemainingBitsInField -= Width;
@@ -2654,6 +2689,14 @@ void MicrosoftRecordLayoutBuilder::layoutBitField(const FieldDecl *FD) {
placeFieldAtOffset(CharUnits::Zero());
Size = std::max(Size, Info.Size);
// TODO: Add a Sema warning that MS ignores bitfield alignment in unions.
+ } else if (UseExternalLayout) {
+ auto FieldBitOffset = External.getExternalFieldOffset(FD);
+ placeFieldAtBitOffset(FieldBitOffset);
+ auto NewSize = Context.toCharUnitsFromBits(
+ llvm::alignTo(FieldBitOffset + Width, Context.getCharWidth()));
+ assert(NewSize >= Size && "bit field offset already allocated");
+ Size = NewSize;
+ Alignment = std::max(Alignment, Info.Alignment);
} else {
// Allocate a new block of memory and place the bitfield in it.
CharUnits FieldOffset = Size.alignTo(Info.Alignment);
@@ -3010,7 +3053,7 @@ const CXXMethodDecl *ASTContext::getCurrentKeyFunction(const CXXRecordDecl *RD)
return nullptr;
assert(RD->getDefinition() && "Cannot get key function for forward decl!");
- RD = cast<CXXRecordDecl>(RD->getDefinition());
+ RD = RD->getDefinition();
// Beware:
// 1) computing the key function might trigger deserialization, which might
diff --git a/lib/AST/Stmt.cpp b/lib/AST/Stmt.cpp
index 982fd458493f..a041006c905e 100644
--- a/lib/AST/Stmt.cpp
+++ b/lib/AST/Stmt.cpp
@@ -11,14 +11,15 @@
//
//===----------------------------------------------------------------------===//
+#include "clang/AST/Stmt.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTDiagnostic.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclGroup.h"
+#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExprOpenMP.h"
-#include "clang/AST/Stmt.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/StmtObjC.h"
#include "clang/AST/StmtOpenMP.h"
@@ -55,7 +56,7 @@ static StmtClassNameTable &getStmtInfoTableEntry(Stmt::StmtClass E) {
if (Initialized)
return StmtClassInfo[E];
- // Intialize the table on the first use.
+ // Initialize the table on the first use.
Initialized = true;
#define ABSTRACT_STMT(STMT)
#define STMT(CLASS, PARENT) \
@@ -127,7 +128,7 @@ Stmt *Stmt::IgnoreImplicit() {
return s;
}
-/// \brief Skip no-op (attributed, compound) container stmts and skip captured
+/// Skip no-op (attributed, compound) container stmts and skip captured
/// stmt at the top, if \a IgnoreCaptured is true.
Stmt *Stmt::IgnoreContainers(bool IgnoreCaptured) {
Stmt *S = this;
@@ -147,18 +148,18 @@ Stmt *Stmt::IgnoreContainers(bool IgnoreCaptured) {
return S;
}
-/// \brief Strip off all label-like statements.
+/// Strip off all label-like statements.
///
/// This will strip off label statements, case statements, attributed
/// statements and default statements recursively.
const Stmt *Stmt::stripLabelLikeStatements() const {
const Stmt *S = this;
while (true) {
- if (const LabelStmt *LS = dyn_cast<LabelStmt>(S))
+ if (const auto *LS = dyn_cast<LabelStmt>(S))
S = LS->getSubStmt();
- else if (const SwitchCase *SC = dyn_cast<SwitchCase>(S))
+ else if (const auto *SC = dyn_cast<SwitchCase>(S))
S = SC->getSubStmt();
- else if (const AttributedStmt *AS = dyn_cast<AttributedStmt>(S))
+ else if (const auto *AS = dyn_cast<AttributedStmt>(S))
S = AS->getSubStmt();
else
return S;
@@ -173,14 +174,14 @@ namespace {
// These silly little functions have to be static inline to suppress
// unused warnings, and they have to be defined to suppress other
// warnings.
- static inline good is_good(good) { return good(); }
+ static good is_good(good) { return good(); }
typedef Stmt::child_range children_t();
template <class T> good implements_children(children_t T::*) {
return good();
}
LLVM_ATTRIBUTE_UNUSED
- static inline bad implements_children(children_t Stmt::*) {
+ static bad implements_children(children_t Stmt::*) {
return bad();
}
@@ -189,7 +190,7 @@ namespace {
return good();
}
LLVM_ATTRIBUTE_UNUSED
- static inline bad implements_getLocStart(getLocStart_t Stmt::*) {
+ static bad implements_getLocStart(getLocStart_t Stmt::*) {
return bad();
}
@@ -198,7 +199,7 @@ namespace {
return good();
}
LLVM_ATTRIBUTE_UNUSED
- static inline bad implements_getLocEnd(getLocEnd_t Stmt::*) {
+ static bad implements_getLocEnd(getLocEnd_t Stmt::*) {
return bad();
}
@@ -351,49 +352,49 @@ AttributedStmt *AttributedStmt::CreateEmpty(const ASTContext &C,
}
std::string AsmStmt::generateAsmString(const ASTContext &C) const {
- if (const GCCAsmStmt *gccAsmStmt = dyn_cast<GCCAsmStmt>(this))
+ if (const auto *gccAsmStmt = dyn_cast<GCCAsmStmt>(this))
return gccAsmStmt->generateAsmString(C);
- if (const MSAsmStmt *msAsmStmt = dyn_cast<MSAsmStmt>(this))
+ if (const auto *msAsmStmt = dyn_cast<MSAsmStmt>(this))
return msAsmStmt->generateAsmString(C);
llvm_unreachable("unknown asm statement kind!");
}
StringRef AsmStmt::getOutputConstraint(unsigned i) const {
- if (const GCCAsmStmt *gccAsmStmt = dyn_cast<GCCAsmStmt>(this))
+ if (const auto *gccAsmStmt = dyn_cast<GCCAsmStmt>(this))
return gccAsmStmt->getOutputConstraint(i);
- if (const MSAsmStmt *msAsmStmt = dyn_cast<MSAsmStmt>(this))
+ if (const auto *msAsmStmt = dyn_cast<MSAsmStmt>(this))
return msAsmStmt->getOutputConstraint(i);
llvm_unreachable("unknown asm statement kind!");
}
const Expr *AsmStmt::getOutputExpr(unsigned i) const {
- if (const GCCAsmStmt *gccAsmStmt = dyn_cast<GCCAsmStmt>(this))
+ if (const auto *gccAsmStmt = dyn_cast<GCCAsmStmt>(this))
return gccAsmStmt->getOutputExpr(i);
- if (const MSAsmStmt *msAsmStmt = dyn_cast<MSAsmStmt>(this))
+ if (const auto *msAsmStmt = dyn_cast<MSAsmStmt>(this))
return msAsmStmt->getOutputExpr(i);
llvm_unreachable("unknown asm statement kind!");
}
StringRef AsmStmt::getInputConstraint(unsigned i) const {
- if (const GCCAsmStmt *gccAsmStmt = dyn_cast<GCCAsmStmt>(this))
+ if (const auto *gccAsmStmt = dyn_cast<GCCAsmStmt>(this))
return gccAsmStmt->getInputConstraint(i);
- if (const MSAsmStmt *msAsmStmt = dyn_cast<MSAsmStmt>(this))
+ if (const auto *msAsmStmt = dyn_cast<MSAsmStmt>(this))
return msAsmStmt->getInputConstraint(i);
llvm_unreachable("unknown asm statement kind!");
}
const Expr *AsmStmt::getInputExpr(unsigned i) const {
- if (const GCCAsmStmt *gccAsmStmt = dyn_cast<GCCAsmStmt>(this))
+ if (const auto *gccAsmStmt = dyn_cast<GCCAsmStmt>(this))
return gccAsmStmt->getInputExpr(i);
- if (const MSAsmStmt *msAsmStmt = dyn_cast<MSAsmStmt>(this))
+ if (const auto *msAsmStmt = dyn_cast<MSAsmStmt>(this))
return msAsmStmt->getInputExpr(i);
llvm_unreachable("unknown asm statement kind!");
}
StringRef AsmStmt::getClobber(unsigned i) const {
- if (const GCCAsmStmt *gccAsmStmt = dyn_cast<GCCAsmStmt>(this))
+ if (const auto *gccAsmStmt = dyn_cast<GCCAsmStmt>(this))
return gccAsmStmt->getClobber(i);
- if (const MSAsmStmt *msAsmStmt = dyn_cast<MSAsmStmt>(this))
+ if (const auto *msAsmStmt = dyn_cast<MSAsmStmt>(this))
return msAsmStmt->getClobber(i);
llvm_unreachable("unknown asm statement kind!");
}
@@ -681,14 +682,14 @@ std::string GCCAsmStmt::generateAsmString(const ASTContext &C) const {
AnalyzeAsmString(Pieces, C, DiagOffs);
std::string AsmString;
- for (unsigned i = 0, e = Pieces.size(); i != e; ++i) {
- if (Pieces[i].isString())
- AsmString += Pieces[i].getString();
- else if (Pieces[i].getModifier() == '\0')
- AsmString += '$' + llvm::utostr(Pieces[i].getOperandNo());
+ for (const auto &Piece : Pieces) {
+ if (Piece.isString())
+ AsmString += Piece.getString();
+ else if (Piece.getModifier() == '\0')
+ AsmString += '$' + llvm::utostr(Piece.getOperandNo());
else
- AsmString += "${" + llvm::utostr(Pieces[i].getOperandNo()) + ':' +
- Pieces[i].getModifier() + '}';
+ AsmString += "${" + llvm::utostr(Piece.getOperandNo()) + ':' +
+ Piece.getModifier() + '}';
}
return AsmString;
}
@@ -804,7 +805,7 @@ VarDecl *IfStmt::getConditionVariable() const {
if (!SubExprs[VAR])
return nullptr;
- DeclStmt *DS = cast<DeclStmt>(SubExprs[VAR]);
+ auto *DS = cast<DeclStmt>(SubExprs[VAR]);
return cast<VarDecl>(DS->getSingleDecl());
}
@@ -839,7 +840,7 @@ VarDecl *ForStmt::getConditionVariable() const {
if (!SubExprs[CONDVAR])
return nullptr;
- DeclStmt *DS = cast<DeclStmt>(SubExprs[CONDVAR]);
+ auto *DS = cast<DeclStmt>(SubExprs[CONDVAR]);
return cast<VarDecl>(DS->getSingleDecl());
}
@@ -867,7 +868,7 @@ VarDecl *SwitchStmt::getConditionVariable() const {
if (!SubExprs[VAR])
return nullptr;
- DeclStmt *DS = cast<DeclStmt>(SubExprs[VAR]);
+ auto *DS = cast<DeclStmt>(SubExprs[VAR]);
return cast<VarDecl>(DS->getSingleDecl());
}
@@ -901,7 +902,7 @@ VarDecl *WhileStmt::getConditionVariable() const {
if (!SubExprs[VAR])
return nullptr;
- DeclStmt *DS = cast<DeclStmt>(SubExprs[VAR]);
+ auto *DS = cast<DeclStmt>(SubExprs[VAR]);
return cast<VarDecl>(DS->getSingleDecl());
}
@@ -918,8 +919,7 @@ void WhileStmt::setConditionVariable(const ASTContext &C, VarDecl *V) {
// IndirectGotoStmt
LabelDecl *IndirectGotoStmt::getConstantTarget() {
- if (AddrLabelExpr *E =
- dyn_cast<AddrLabelExpr>(getTarget()->IgnoreParenImpCasts()))
+ if (auto *E = dyn_cast<AddrLabelExpr>(getTarget()->IgnoreParenImpCasts()))
return E->getLabel();
return nullptr;
}
@@ -1105,18 +1105,18 @@ const CapturedDecl *CapturedStmt::getCapturedDecl() const {
return CapDeclAndKind.getPointer();
}
-/// \brief Set the outlined function declaration.
+/// Set the outlined function declaration.
void CapturedStmt::setCapturedDecl(CapturedDecl *D) {
assert(D && "null CapturedDecl");
CapDeclAndKind.setPointer(D);
}
-/// \brief Retrieve the captured region kind.
+/// Retrieve the captured region kind.
CapturedRegionKind CapturedStmt::getCapturedRegionKind() const {
return CapDeclAndKind.getInt();
}
-/// \brief Set the captured region kind.
+/// Set the captured region kind.
void CapturedStmt::setCapturedRegionKind(CapturedRegionKind Kind) {
CapDeclAndKind.setInt(Kind);
}
diff --git a/lib/AST/StmtCXX.cpp b/lib/AST/StmtCXX.cpp
index 666f5dcc9d97..bf2d6a16fb5f 100644
--- a/lib/AST/StmtCXX.cpp
+++ b/lib/AST/StmtCXX.cpp
@@ -25,18 +25,14 @@ QualType CXXCatchStmt::getCaughtType() const {
CXXTryStmt *CXXTryStmt::Create(const ASTContext &C, SourceLocation tryLoc,
Stmt *tryBlock, ArrayRef<Stmt *> handlers) {
- std::size_t Size = sizeof(CXXTryStmt);
- Size += ((handlers.size() + 1) * sizeof(Stmt *));
-
+ const size_t Size = totalSizeToAlloc<Stmt *>(handlers.size() + 1);
void *Mem = C.Allocate(Size, alignof(CXXTryStmt));
return new (Mem) CXXTryStmt(tryLoc, tryBlock, handlers);
}
CXXTryStmt *CXXTryStmt::Create(const ASTContext &C, EmptyShell Empty,
unsigned numHandlers) {
- std::size_t Size = sizeof(CXXTryStmt);
- Size += ((numHandlers + 1) * sizeof(Stmt *));
-
+ const size_t Size = totalSizeToAlloc<Stmt *>(numHandlers + 1);
void *Mem = C.Allocate(Size, alignof(CXXTryStmt));
return new (Mem) CXXTryStmt(Empty, numHandlers);
}
@@ -44,7 +40,7 @@ CXXTryStmt *CXXTryStmt::Create(const ASTContext &C, EmptyShell Empty,
CXXTryStmt::CXXTryStmt(SourceLocation tryLoc, Stmt *tryBlock,
ArrayRef<Stmt *> handlers)
: Stmt(CXXTryStmtClass), TryLoc(tryLoc), NumHandlers(handlers.size()) {
- Stmt **Stmts = reinterpret_cast<Stmt **>(this + 1);
+ Stmt **Stmts = getStmts();
Stmts[0] = tryBlock;
std::copy(handlers.begin(), handlers.end(), Stmts + 1);
}
diff --git a/lib/AST/StmtPrinter.cpp b/lib/AST/StmtPrinter.cpp
index d7e668a83280..dad57de8940b 100644
--- a/lib/AST/StmtPrinter.cpp
+++ b/lib/AST/StmtPrinter.cpp
@@ -1,4 +1,4 @@
-//===--- StmtPrinter.cpp - Printing implementation for Stmt ASTs ----------===//
+//===- StmtPrinter.cpp - Printing implementation for Stmt ASTs ------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -14,30 +14,60 @@
#include "clang/AST/ASTContext.h"
#include "clang/AST/Attr.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclBase.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclOpenMP.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExprOpenMP.h"
+#include "clang/AST/NestedNameSpecifier.h"
+#include "clang/AST/OpenMPClause.h"
#include "clang/AST/PrettyPrinter.h"
+#include "clang/AST/Stmt.h"
+#include "clang/AST/StmtCXX.h"
+#include "clang/AST/StmtObjC.h"
+#include "clang/AST/StmtOpenMP.h"
#include "clang/AST/StmtVisitor.h"
+#include "clang/AST/TemplateBase.h"
+#include "clang/AST/Type.h"
#include "clang/Basic/CharInfo.h"
+#include "clang/Basic/ExpressionTraits.h"
+#include "clang/Basic/IdentifierTable.h"
+#include "clang/Basic/LLVM.h"
+#include "clang/Basic/Lambda.h"
+#include "clang/Basic/OpenMPKinds.h"
+#include "clang/Basic/OperatorKinds.h"
+#include "clang/Basic/SourceLocation.h"
+#include "clang/Basic/TypeTraits.h"
#include "clang/Lex/Lexer.h"
+#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Format.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cassert>
+#include <string>
+
using namespace clang;
//===----------------------------------------------------------------------===//
// StmtPrinter Visitor
//===----------------------------------------------------------------------===//
-namespace {
+namespace {
+
class StmtPrinter : public StmtVisitor<StmtPrinter> {
raw_ostream &OS;
unsigned IndentLevel;
- clang::PrinterHelper* Helper;
+ PrinterHelper* Helper;
PrintingPolicy Policy;
const ASTContext *Context;
@@ -100,9 +130,11 @@ namespace {
void VisitStmt(Stmt *Node) LLVM_ATTRIBUTE_UNUSED {
Indent() << "<<unknown stmt type>>\n";
}
+
void VisitExpr(Expr *Node) LLVM_ATTRIBUTE_UNUSED {
OS << "<<unknown expr type>>";
}
+
void VisitCXXNamedCastExpr(CXXNamedCastExpr *Node);
#define ABSTRACT_STMT(CLASS)
@@ -110,7 +142,8 @@ namespace {
void Visit##CLASS(CLASS *Node);
#include "clang/AST/StmtNodes.inc"
};
-}
+
+} // namespace
//===----------------------------------------------------------------------===//
// Stmt printing methods.
@@ -131,7 +164,7 @@ void StmtPrinter::PrintRawDecl(Decl *D) {
}
void StmtPrinter::PrintRawDeclStmt(const DeclStmt *S) {
- SmallVector<Decl*, 2> Decls(S->decls());
+ SmallVector<Decl *, 2> Decls(S->decls());
Decl::printGroup(Decls.data(), Decls.size(), OS, Policy, IndentLevel);
}
@@ -189,7 +222,7 @@ void StmtPrinter::PrintRawIfStmt(IfStmt *If) {
PrintExpr(If->getCond());
OS << ')';
- if (CompoundStmt *CS = dyn_cast<CompoundStmt>(If->getThen())) {
+ if (auto *CS = dyn_cast<CompoundStmt>(If->getThen())) {
OS << ' ';
PrintRawCompoundStmt(CS);
OS << (If->getElse() ? ' ' : '\n');
@@ -202,11 +235,11 @@ void StmtPrinter::PrintRawIfStmt(IfStmt *If) {
if (Stmt *Else = If->getElse()) {
OS << "else";
- if (CompoundStmt *CS = dyn_cast<CompoundStmt>(Else)) {
+ if (auto *CS = dyn_cast<CompoundStmt>(Else)) {
OS << ' ';
PrintRawCompoundStmt(CS);
OS << '\n';
- } else if (IfStmt *ElseIf = dyn_cast<IfStmt>(Else)) {
+ } else if (auto *ElseIf = dyn_cast<IfStmt>(Else)) {
OS << ' ';
PrintRawIfStmt(ElseIf);
} else {
@@ -230,7 +263,7 @@ void StmtPrinter::VisitSwitchStmt(SwitchStmt *Node) {
OS << ")";
// Pretty print compoundstmt bodies (very common).
- if (CompoundStmt *CS = dyn_cast<CompoundStmt>(Node->getBody())) {
+ if (auto *CS = dyn_cast<CompoundStmt>(Node->getBody())) {
OS << " ";
PrintRawCompoundStmt(CS);
OS << "\n";
@@ -252,7 +285,7 @@ void StmtPrinter::VisitWhileStmt(WhileStmt *Node) {
void StmtPrinter::VisitDoStmt(DoStmt *Node) {
Indent() << "do ";
- if (CompoundStmt *CS = dyn_cast<CompoundStmt>(Node->getBody())) {
+ if (auto *CS = dyn_cast<CompoundStmt>(Node->getBody())) {
PrintRawCompoundStmt(CS);
OS << " ";
} else {
@@ -269,7 +302,7 @@ void StmtPrinter::VisitDoStmt(DoStmt *Node) {
void StmtPrinter::VisitForStmt(ForStmt *Node) {
Indent() << "for (";
if (Node->getInit()) {
- if (DeclStmt *DS = dyn_cast<DeclStmt>(Node->getInit()))
+ if (auto *DS = dyn_cast<DeclStmt>(Node->getInit()))
PrintRawDeclStmt(DS);
else
PrintExpr(cast<Expr>(Node->getInit()));
@@ -286,7 +319,7 @@ void StmtPrinter::VisitForStmt(ForStmt *Node) {
}
OS << ") ";
- if (CompoundStmt *CS = dyn_cast<CompoundStmt>(Node->getBody())) {
+ if (auto *CS = dyn_cast<CompoundStmt>(Node->getBody())) {
PrintRawCompoundStmt(CS);
OS << "\n";
} else {
@@ -297,7 +330,7 @@ void StmtPrinter::VisitForStmt(ForStmt *Node) {
void StmtPrinter::VisitObjCForCollectionStmt(ObjCForCollectionStmt *Node) {
Indent() << "for (";
- if (DeclStmt *DS = dyn_cast<DeclStmt>(Node->getElement()))
+ if (auto *DS = dyn_cast<DeclStmt>(Node->getElement()))
PrintRawDeclStmt(DS);
else
PrintExpr(cast<Expr>(Node->getElement()));
@@ -305,7 +338,7 @@ void StmtPrinter::VisitObjCForCollectionStmt(ObjCForCollectionStmt *Node) {
PrintExpr(Node->getCollection());
OS << ") ";
- if (CompoundStmt *CS = dyn_cast<CompoundStmt>(Node->getBody())) {
+ if (auto *CS = dyn_cast<CompoundStmt>(Node->getBody())) {
PrintRawCompoundStmt(CS);
OS << "\n";
} else {
@@ -365,7 +398,6 @@ void StmtPrinter::VisitBreakStmt(BreakStmt *Node) {
if (Policy.IncludeNewlines) OS << "\n";
}
-
void StmtPrinter::VisitReturnStmt(ReturnStmt *Node) {
Indent() << "return";
if (Node->getRetValue()) {
@@ -376,7 +408,6 @@ void StmtPrinter::VisitReturnStmt(ReturnStmt *Node) {
if (Policy.IncludeNewlines) OS << "\n";
}
-
void StmtPrinter::VisitGCCAsmStmt(GCCAsmStmt *Node) {
Indent() << "asm ";
@@ -458,7 +489,7 @@ void StmtPrinter::VisitCapturedStmt(CapturedStmt *Node) {
void StmtPrinter::VisitObjCAtTryStmt(ObjCAtTryStmt *Node) {
Indent() << "@try";
- if (CompoundStmt *TS = dyn_cast<CompoundStmt>(Node->getTryBody())) {
+ if (auto *TS = dyn_cast<CompoundStmt>(Node->getTryBody())) {
PrintRawCompoundStmt(TS);
OS << "\n";
}
@@ -471,14 +502,13 @@ void StmtPrinter::VisitObjCAtTryStmt(ObjCAtTryStmt *Node) {
PrintRawDecl(DS);
}
OS << ")";
- if (CompoundStmt *CS = dyn_cast<CompoundStmt>(catchStmt->getCatchBody())) {
+ if (auto *CS = dyn_cast<CompoundStmt>(catchStmt->getCatchBody())) {
PrintRawCompoundStmt(CS);
OS << "\n";
}
}
- if (ObjCAtFinallyStmt *FS = static_cast<ObjCAtFinallyStmt *>(
- Node->getFinallyStmt())) {
+ if (auto *FS = static_cast<ObjCAtFinallyStmt *>(Node->getFinallyStmt())) {
Indent() << "@finally";
PrintRawCompoundStmt(dyn_cast<CompoundStmt>(FS->getFinallyBody()));
OS << "\n";
@@ -596,20 +626,26 @@ void StmtPrinter::VisitSEHLeaveStmt(SEHLeaveStmt *Node) {
//===----------------------------------------------------------------------===//
namespace {
+
class OMPClausePrinter : public OMPClauseVisitor<OMPClausePrinter> {
raw_ostream &OS;
const PrintingPolicy &Policy;
- /// \brief Process clauses with list of variables.
+
+ /// Process clauses with list of variables.
template <typename T>
void VisitOMPClauseList(T *Node, char StartSym);
+
public:
OMPClausePrinter(raw_ostream &OS, const PrintingPolicy &Policy)
- : OS(OS), Policy(Policy) { }
+ : OS(OS), Policy(Policy) {}
+
#define OPENMP_CLAUSE(Name, Class) \
void Visit##Class(Class *S);
#include "clang/Basic/OpenMPKinds.def"
};
+} // namespace
+
void OMPClausePrinter::VisitOMPIfClause(OMPIfClause *Node) {
OS << "if(";
if (Node->getNameModifier() != OMPD_unknown)
@@ -776,7 +812,7 @@ void OMPClausePrinter::VisitOMPClauseList(T *Node, char StartSym) {
I != E; ++I) {
assert(*I && "Expected non-null Stmt");
OS << (I == Node->varlist_begin() ? StartSym : ',');
- if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(*I)) {
+ if (auto *DRE = dyn_cast<DeclRefExpr>(*I)) {
if (isa<OMPCapturedExprDecl>(DRE->getDecl()))
DRE->printPretty(OS, nullptr, Policy, 0);
else
@@ -1017,7 +1053,6 @@ void OMPClausePrinter::VisitOMPIsDevicePtrClause(OMPIsDevicePtrClause *Node) {
OS << ")";
}
}
-}
//===----------------------------------------------------------------------===//
// OpenMP directives printing methods
@@ -1027,43 +1062,38 @@ void StmtPrinter::PrintOMPExecutableDirective(OMPExecutableDirective *S,
bool ForceNoStmt) {
OMPClausePrinter Printer(OS, Policy);
ArrayRef<OMPClause *> Clauses = S->clauses();
- for (ArrayRef<OMPClause *>::iterator I = Clauses.begin(), E = Clauses.end();
- I != E; ++I)
- if (*I && !(*I)->isImplicit()) {
- Printer.Visit(*I);
+ for (auto *Clause : Clauses)
+ if (Clause && !Clause->isImplicit()) {
OS << ' ';
+ Printer.Visit(Clause);
}
OS << "\n";
- if (S->hasAssociatedStmt() && S->getAssociatedStmt() && !ForceNoStmt) {
- assert(isa<CapturedStmt>(S->getAssociatedStmt()) &&
- "Expected captured statement!");
- Stmt *CS = cast<CapturedStmt>(S->getAssociatedStmt())->getCapturedStmt();
- PrintStmt(CS);
- }
+ if (!ForceNoStmt && S->hasAssociatedStmt())
+ PrintStmt(S->getInnermostCapturedStmt()->getCapturedStmt());
}
void StmtPrinter::VisitOMPParallelDirective(OMPParallelDirective *Node) {
- Indent() << "#pragma omp parallel ";
+ Indent() << "#pragma omp parallel";
PrintOMPExecutableDirective(Node);
}
void StmtPrinter::VisitOMPSimdDirective(OMPSimdDirective *Node) {
- Indent() << "#pragma omp simd ";
+ Indent() << "#pragma omp simd";
PrintOMPExecutableDirective(Node);
}
void StmtPrinter::VisitOMPForDirective(OMPForDirective *Node) {
- Indent() << "#pragma omp for ";
+ Indent() << "#pragma omp for";
PrintOMPExecutableDirective(Node);
}
void StmtPrinter::VisitOMPForSimdDirective(OMPForSimdDirective *Node) {
- Indent() << "#pragma omp for simd ";
+ Indent() << "#pragma omp for simd";
PrintOMPExecutableDirective(Node);
}
void StmtPrinter::VisitOMPSectionsDirective(OMPSectionsDirective *Node) {
- Indent() << "#pragma omp sections ";
+ Indent() << "#pragma omp sections";
PrintOMPExecutableDirective(Node);
}
@@ -1073,7 +1103,7 @@ void StmtPrinter::VisitOMPSectionDirective(OMPSectionDirective *Node) {
}
void StmtPrinter::VisitOMPSingleDirective(OMPSingleDirective *Node) {
- Indent() << "#pragma omp single ";
+ Indent() << "#pragma omp single";
PrintOMPExecutableDirective(Node);
}
@@ -1089,29 +1119,28 @@ void StmtPrinter::VisitOMPCriticalDirective(OMPCriticalDirective *Node) {
Node->getDirectiveName().printName(OS);
OS << ")";
}
- OS << " ";
PrintOMPExecutableDirective(Node);
}
void StmtPrinter::VisitOMPParallelForDirective(OMPParallelForDirective *Node) {
- Indent() << "#pragma omp parallel for ";
+ Indent() << "#pragma omp parallel for";
PrintOMPExecutableDirective(Node);
}
void StmtPrinter::VisitOMPParallelForSimdDirective(
OMPParallelForSimdDirective *Node) {
- Indent() << "#pragma omp parallel for simd ";
+ Indent() << "#pragma omp parallel for simd";
PrintOMPExecutableDirective(Node);
}
void StmtPrinter::VisitOMPParallelSectionsDirective(
OMPParallelSectionsDirective *Node) {
- Indent() << "#pragma omp parallel sections ";
+ Indent() << "#pragma omp parallel sections";
PrintOMPExecutableDirective(Node);
}
void StmtPrinter::VisitOMPTaskDirective(OMPTaskDirective *Node) {
- Indent() << "#pragma omp task ";
+ Indent() << "#pragma omp task";
PrintOMPExecutableDirective(Node);
}
@@ -1131,61 +1160,61 @@ void StmtPrinter::VisitOMPTaskwaitDirective(OMPTaskwaitDirective *Node) {
}
void StmtPrinter::VisitOMPTaskgroupDirective(OMPTaskgroupDirective *Node) {
- Indent() << "#pragma omp taskgroup ";
+ Indent() << "#pragma omp taskgroup";
PrintOMPExecutableDirective(Node);
}
void StmtPrinter::VisitOMPFlushDirective(OMPFlushDirective *Node) {
- Indent() << "#pragma omp flush ";
+ Indent() << "#pragma omp flush";
PrintOMPExecutableDirective(Node);
}
void StmtPrinter::VisitOMPOrderedDirective(OMPOrderedDirective *Node) {
- Indent() << "#pragma omp ordered ";
- PrintOMPExecutableDirective(Node);
+ Indent() << "#pragma omp ordered";
+ PrintOMPExecutableDirective(Node, Node->hasClausesOfKind<OMPDependClause>());
}
void StmtPrinter::VisitOMPAtomicDirective(OMPAtomicDirective *Node) {
- Indent() << "#pragma omp atomic ";
+ Indent() << "#pragma omp atomic";
PrintOMPExecutableDirective(Node);
}
void StmtPrinter::VisitOMPTargetDirective(OMPTargetDirective *Node) {
- Indent() << "#pragma omp target ";
+ Indent() << "#pragma omp target";
PrintOMPExecutableDirective(Node);
}
void StmtPrinter::VisitOMPTargetDataDirective(OMPTargetDataDirective *Node) {
- Indent() << "#pragma omp target data ";
+ Indent() << "#pragma omp target data";
PrintOMPExecutableDirective(Node);
}
void StmtPrinter::VisitOMPTargetEnterDataDirective(
OMPTargetEnterDataDirective *Node) {
- Indent() << "#pragma omp target enter data ";
+ Indent() << "#pragma omp target enter data";
PrintOMPExecutableDirective(Node, /*ForceNoStmt=*/true);
}
void StmtPrinter::VisitOMPTargetExitDataDirective(
OMPTargetExitDataDirective *Node) {
- Indent() << "#pragma omp target exit data ";
+ Indent() << "#pragma omp target exit data";
PrintOMPExecutableDirective(Node, /*ForceNoStmt=*/true);
}
void StmtPrinter::VisitOMPTargetParallelDirective(
OMPTargetParallelDirective *Node) {
- Indent() << "#pragma omp target parallel ";
+ Indent() << "#pragma omp target parallel";
PrintOMPExecutableDirective(Node);
}
void StmtPrinter::VisitOMPTargetParallelForDirective(
OMPTargetParallelForDirective *Node) {
- Indent() << "#pragma omp target parallel for ";
+ Indent() << "#pragma omp target parallel for";
PrintOMPExecutableDirective(Node);
}
void StmtPrinter::VisitOMPTeamsDirective(OMPTeamsDirective *Node) {
- Indent() << "#pragma omp teams ";
+ Indent() << "#pragma omp teams";
PrintOMPExecutableDirective(Node);
}
@@ -1198,111 +1227,111 @@ void StmtPrinter::VisitOMPCancellationPointDirective(
void StmtPrinter::VisitOMPCancelDirective(OMPCancelDirective *Node) {
Indent() << "#pragma omp cancel "
- << getOpenMPDirectiveName(Node->getCancelRegion()) << " ";
+ << getOpenMPDirectiveName(Node->getCancelRegion());
PrintOMPExecutableDirective(Node);
}
void StmtPrinter::VisitOMPTaskLoopDirective(OMPTaskLoopDirective *Node) {
- Indent() << "#pragma omp taskloop ";
+ Indent() << "#pragma omp taskloop";
PrintOMPExecutableDirective(Node);
}
void StmtPrinter::VisitOMPTaskLoopSimdDirective(
OMPTaskLoopSimdDirective *Node) {
- Indent() << "#pragma omp taskloop simd ";
+ Indent() << "#pragma omp taskloop simd";
PrintOMPExecutableDirective(Node);
}
void StmtPrinter::VisitOMPDistributeDirective(OMPDistributeDirective *Node) {
- Indent() << "#pragma omp distribute ";
+ Indent() << "#pragma omp distribute";
PrintOMPExecutableDirective(Node);
}
void StmtPrinter::VisitOMPTargetUpdateDirective(
OMPTargetUpdateDirective *Node) {
- Indent() << "#pragma omp target update ";
+ Indent() << "#pragma omp target update";
PrintOMPExecutableDirective(Node, /*ForceNoStmt=*/true);
}
void StmtPrinter::VisitOMPDistributeParallelForDirective(
OMPDistributeParallelForDirective *Node) {
- Indent() << "#pragma omp distribute parallel for ";
+ Indent() << "#pragma omp distribute parallel for";
PrintOMPExecutableDirective(Node);
}
void StmtPrinter::VisitOMPDistributeParallelForSimdDirective(
OMPDistributeParallelForSimdDirective *Node) {
- Indent() << "#pragma omp distribute parallel for simd ";
+ Indent() << "#pragma omp distribute parallel for simd";
PrintOMPExecutableDirective(Node);
}
void StmtPrinter::VisitOMPDistributeSimdDirective(
OMPDistributeSimdDirective *Node) {
- Indent() << "#pragma omp distribute simd ";
+ Indent() << "#pragma omp distribute simd";
PrintOMPExecutableDirective(Node);
}
void StmtPrinter::VisitOMPTargetParallelForSimdDirective(
OMPTargetParallelForSimdDirective *Node) {
- Indent() << "#pragma omp target parallel for simd ";
+ Indent() << "#pragma omp target parallel for simd";
PrintOMPExecutableDirective(Node);
}
void StmtPrinter::VisitOMPTargetSimdDirective(OMPTargetSimdDirective *Node) {
- Indent() << "#pragma omp target simd ";
+ Indent() << "#pragma omp target simd";
PrintOMPExecutableDirective(Node);
}
void StmtPrinter::VisitOMPTeamsDistributeDirective(
OMPTeamsDistributeDirective *Node) {
- Indent() << "#pragma omp teams distribute ";
+ Indent() << "#pragma omp teams distribute";
PrintOMPExecutableDirective(Node);
}
void StmtPrinter::VisitOMPTeamsDistributeSimdDirective(
OMPTeamsDistributeSimdDirective *Node) {
- Indent() << "#pragma omp teams distribute simd ";
+ Indent() << "#pragma omp teams distribute simd";
PrintOMPExecutableDirective(Node);
}
void StmtPrinter::VisitOMPTeamsDistributeParallelForSimdDirective(
OMPTeamsDistributeParallelForSimdDirective *Node) {
- Indent() << "#pragma omp teams distribute parallel for simd ";
+ Indent() << "#pragma omp teams distribute parallel for simd";
PrintOMPExecutableDirective(Node);
}
void StmtPrinter::VisitOMPTeamsDistributeParallelForDirective(
OMPTeamsDistributeParallelForDirective *Node) {
- Indent() << "#pragma omp teams distribute parallel for ";
+ Indent() << "#pragma omp teams distribute parallel for";
PrintOMPExecutableDirective(Node);
}
void StmtPrinter::VisitOMPTargetTeamsDirective(OMPTargetTeamsDirective *Node) {
- Indent() << "#pragma omp target teams ";
+ Indent() << "#pragma omp target teams";
PrintOMPExecutableDirective(Node);
}
void StmtPrinter::VisitOMPTargetTeamsDistributeDirective(
OMPTargetTeamsDistributeDirective *Node) {
- Indent() << "#pragma omp target teams distribute ";
+ Indent() << "#pragma omp target teams distribute";
PrintOMPExecutableDirective(Node);
}
void StmtPrinter::VisitOMPTargetTeamsDistributeParallelForDirective(
OMPTargetTeamsDistributeParallelForDirective *Node) {
- Indent() << "#pragma omp target teams distribute parallel for ";
+ Indent() << "#pragma omp target teams distribute parallel for";
PrintOMPExecutableDirective(Node);
}
void StmtPrinter::VisitOMPTargetTeamsDistributeParallelForSimdDirective(
OMPTargetTeamsDistributeParallelForSimdDirective *Node) {
- Indent() << "#pragma omp target teams distribute parallel for simd ";
+ Indent() << "#pragma omp target teams distribute parallel for simd";
PrintOMPExecutableDirective(Node);
}
void StmtPrinter::VisitOMPTargetTeamsDistributeSimdDirective(
OMPTargetTeamsDistributeSimdDirective *Node) {
- Indent() << "#pragma omp target teams distribute simd ";
+ Indent() << "#pragma omp target teams distribute simd";
PrintOMPExecutableDirective(Node);
}
@@ -1311,7 +1340,7 @@ void StmtPrinter::VisitOMPTargetTeamsDistributeSimdDirective(
//===----------------------------------------------------------------------===//
void StmtPrinter::VisitDeclRefExpr(DeclRefExpr *Node) {
- if (auto *OCED = dyn_cast<OMPCapturedExprDecl>(Node->getDecl())) {
+ if (const auto *OCED = dyn_cast<OMPCapturedExprDecl>(Node->getDecl())) {
OCED->getInit()->IgnoreImpCasts()->printPretty(OS, nullptr, Policy);
return;
}
@@ -1347,8 +1376,7 @@ void StmtPrinter::VisitUnresolvedLookupExpr(UnresolvedLookupExpr *Node) {
static bool isImplicitSelf(const Expr *E) {
if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) {
- if (const ImplicitParamDecl *PD =
- dyn_cast<ImplicitParamDecl>(DRE->getDecl())) {
+ if (const auto *PD = dyn_cast<ImplicitParamDecl>(DRE->getDecl())) {
if (PD->getParameterKind() == ImplicitParamDecl::ObjCSelf &&
DRE->getLocStart().isInvalid())
return true;
@@ -1378,14 +1406,17 @@ void StmtPrinter::VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *Node) {
OS << Node->getClassReceiver()->getName() << ".";
}
- if (Node->isImplicitProperty())
- Node->getImplicitPropertyGetter()->getSelector().print(OS);
- else
+ if (Node->isImplicitProperty()) {
+ if (const auto *Getter = Node->getImplicitPropertyGetter())
+ Getter->getSelector().print(OS);
+ else
+ OS << SelectorTable::getPropertyNameFromSetterSelector(
+ Node->getImplicitPropertySetter()->getSelector());
+ } else
OS << Node->getExplicitProperty()->getName();
}
void StmtPrinter::VisitObjCSubscriptRefExpr(ObjCSubscriptRefExpr *Node) {
-
PrintExpr(Node->getBaseExpr());
OS << "[";
PrintExpr(Node->getKeyExpr());
@@ -1498,6 +1529,28 @@ void StmtPrinter::VisitIntegerLiteral(IntegerLiteral *Node) {
}
}
+void StmtPrinter::VisitFixedPointLiteral(FixedPointLiteral *Node) {
+ if (Policy.ConstantsAsWritten && printExprAsWritten(OS, Node, Context))
+ return;
+ OS << Node->getValueAsString(/*Radix=*/10);
+
+ switch (Node->getType()->getAs<BuiltinType>()->getKind()) {
+ default: llvm_unreachable("Unexpected type for fixed point literal!");
+ case BuiltinType::ShortFract: OS << "hr"; break;
+ case BuiltinType::ShortAccum: OS << "hk"; break;
+ case BuiltinType::UShortFract: OS << "uhr"; break;
+ case BuiltinType::UShortAccum: OS << "uhk"; break;
+ case BuiltinType::Fract: OS << "r"; break;
+ case BuiltinType::Accum: OS << "k"; break;
+ case BuiltinType::UFract: OS << "ur"; break;
+ case BuiltinType::UAccum: OS << "uk"; break;
+ case BuiltinType::LongFract: OS << "lr"; break;
+ case BuiltinType::LongAccum: OS << "lk"; break;
+ case BuiltinType::ULongFract: OS << "ulr"; break;
+ case BuiltinType::ULongAccum: OS << "ulk"; break;
+ }
+}
+
static void PrintFloatingLiteral(raw_ostream &OS, FloatingLiteral *Node,
bool PrintSuffix) {
SmallString<16> Str;
@@ -1535,11 +1588,13 @@ void StmtPrinter::VisitImaginaryLiteral(ImaginaryLiteral *Node) {
void StmtPrinter::VisitStringLiteral(StringLiteral *Str) {
Str->outputString(OS);
}
+
void StmtPrinter::VisitParenExpr(ParenExpr *Node) {
OS << "(";
PrintExpr(Node->getSubExpr());
OS << ")";
}
+
void StmtPrinter::VisitUnaryOperator(UnaryOperator *Node) {
if (!Node->isPostfix()) {
OS << UnaryOperator::getOpcodeStr(Node->getOpcode());
@@ -1695,7 +1750,7 @@ void StmtPrinter::VisitMemberExpr(MemberExpr *Node) {
if (!Policy.SuppressImplicitBase || !isImplicitThis(Node->getBase())) {
PrintExpr(Node->getBase());
- MemberExpr *ParentMember = dyn_cast<MemberExpr>(Node->getBase());
+ auto *ParentMember = dyn_cast<MemberExpr>(Node->getBase());
FieldDecl *ParentDecl =
ParentMember ? dyn_cast<FieldDecl>(ParentMember->getMemberDecl())
: nullptr;
@@ -1704,7 +1759,7 @@ void StmtPrinter::VisitMemberExpr(MemberExpr *Node) {
OS << (Node->isArrow() ? "->" : ".");
}
- if (FieldDecl *FD = dyn_cast<FieldDecl>(Node->getMemberDecl()))
+ if (auto *FD = dyn_cast<FieldDecl>(Node->getMemberDecl()))
if (FD->isAnonymousStructOrUnion())
return;
@@ -1716,6 +1771,7 @@ void StmtPrinter::VisitMemberExpr(MemberExpr *Node) {
if (Node->hasExplicitTemplateArgs())
printTemplateArgumentList(OS, Node->template_arguments(), Policy);
}
+
void StmtPrinter::VisitObjCIsaExpr(ObjCIsaExpr *Node) {
PrintExpr(Node->getBase());
OS << (Node->isArrow() ? "->isa" : ".isa");
@@ -1726,32 +1782,38 @@ void StmtPrinter::VisitExtVectorElementExpr(ExtVectorElementExpr *Node) {
OS << ".";
OS << Node->getAccessor().getName();
}
+
void StmtPrinter::VisitCStyleCastExpr(CStyleCastExpr *Node) {
OS << '(';
Node->getTypeAsWritten().print(OS, Policy);
OS << ')';
PrintExpr(Node->getSubExpr());
}
+
void StmtPrinter::VisitCompoundLiteralExpr(CompoundLiteralExpr *Node) {
OS << '(';
Node->getType().print(OS, Policy);
OS << ')';
PrintExpr(Node->getInitializer());
}
+
void StmtPrinter::VisitImplicitCastExpr(ImplicitCastExpr *Node) {
// No need to print anything, simply forward to the subexpression.
PrintExpr(Node->getSubExpr());
}
+
void StmtPrinter::VisitBinaryOperator(BinaryOperator *Node) {
PrintExpr(Node->getLHS());
OS << " " << BinaryOperator::getOpcodeStr(Node->getOpcode()) << " ";
PrintExpr(Node->getRHS());
}
+
void StmtPrinter::VisitCompoundAssignOperator(CompoundAssignOperator *Node) {
PrintExpr(Node->getLHS());
OS << " " << BinaryOperator::getOpcodeStr(Node->getOpcode()) << " ";
PrintExpr(Node->getRHS());
}
+
void StmtPrinter::VisitConditionalOperator(ConditionalOperator *Node) {
PrintExpr(Node->getCond());
OS << " ? ";
@@ -1768,6 +1830,7 @@ StmtPrinter::VisitBinaryConditionalOperator(BinaryConditionalOperator *Node) {
OS << " ?: ";
PrintExpr(Node->getFalseExpr());
}
+
void StmtPrinter::VisitAddrLabelExpr(AddrLabelExpr *Node) {
OS << "&&" << Node->getLabel()->getName();
}
@@ -2100,7 +2163,7 @@ void StmtPrinter::VisitUserDefinedLiteral(UserDefinedLiteral *Node) {
OS << cast<StringLiteral>(Node->getArg(0)->IgnoreImpCasts())->getString();
break;
case UserDefinedLiteral::LOK_Template: {
- DeclRefExpr *DRE = cast<DeclRefExpr>(Node->getCallee()->IgnoreImpCasts());
+ const auto *DRE = cast<DeclRefExpr>(Node->getCallee()->IgnoreImpCasts());
const TemplateArgumentList *Args =
cast<FunctionDecl>(DRE->getDecl())->getTemplateSpecializationArgs();
assert(Args);
@@ -2121,13 +2184,13 @@ void StmtPrinter::VisitUserDefinedLiteral(UserDefinedLiteral *Node) {
}
case UserDefinedLiteral::LOK_Integer: {
// Print integer literal without suffix.
- IntegerLiteral *Int = cast<IntegerLiteral>(Node->getCookedLiteral());
+ const auto *Int = cast<IntegerLiteral>(Node->getCookedLiteral());
OS << Int->getValue().toString(10, /*isSigned*/false);
break;
}
case UserDefinedLiteral::LOK_Floating: {
// Print floating literal without suffix.
- FloatingLiteral *Float = cast<FloatingLiteral>(Node->getCookedLiteral());
+ auto *Float = cast<FloatingLiteral>(Node->getCookedLiteral());
PrintFloatingLiteral(OS, Float, /*PrintSuffix=*/false);
break;
}
@@ -2240,9 +2303,11 @@ void StmtPrinter::VisitLambdaExpr(LambdaExpr *Node) {
case LCK_This:
OS << "this";
break;
+
case LCK_StarThis:
OS << "*this";
break;
+
case LCK_ByRef:
if (Node->getCaptureDefault() != LCD_ByRef || Node->isInitCapture(C))
OS << '&';
@@ -2252,6 +2317,7 @@ void StmtPrinter::VisitLambdaExpr(LambdaExpr *Node) {
case LCK_ByCopy:
OS << C->getCapturedVar()->getName();
break;
+
case LCK_VLAType:
llvm_unreachable("VLA type in explicit captures.");
}
@@ -2265,7 +2331,7 @@ void StmtPrinter::VisitLambdaExpr(LambdaExpr *Node) {
OS << " (";
CXXMethodDecl *Method = Node->getCallOperator();
NeedComma = false;
- for (auto P : Method->parameters()) {
+ for (const auto *P : Method->parameters()) {
if (NeedComma) {
OS << ", ";
} else {
@@ -2284,8 +2350,7 @@ void StmtPrinter::VisitLambdaExpr(LambdaExpr *Node) {
if (Node->isMutable())
OS << " mutable";
- const FunctionProtoType *Proto
- = Method->getType()->getAs<FunctionProtoType>();
+ auto *Proto = Method->getType()->getAs<FunctionProtoType>();
Proto->printExceptionSpecification(OS, Policy);
// FIXME: Attributes
@@ -2569,13 +2634,11 @@ void StmtPrinter::VisitCoawaitExpr(CoawaitExpr *S) {
PrintExpr(S->getOperand());
}
-
void StmtPrinter::VisitDependentCoawaitExpr(DependentCoawaitExpr *S) {
OS << "co_await ";
PrintExpr(S->getOperand());
}
-
void StmtPrinter::VisitCoyieldExpr(CoyieldExpr *S) {
OS << "co_yield ";
PrintExpr(S->getOperand());
@@ -2708,7 +2771,7 @@ void StmtPrinter::VisitBlockExpr(BlockExpr *Node) {
(*AI)->getType().print(OS, Policy, ParamStr);
}
- const FunctionProtoType *FT = cast<FunctionProtoType>(AFT);
+ const auto *FT = cast<FunctionProtoType>(AFT);
if (FT->isVariadic()) {
if (!BD->param_empty()) OS << ", ";
OS << "...";
@@ -2755,4 +2818,4 @@ void Stmt::printPretty(raw_ostream &OS, PrinterHelper *Helper,
//===----------------------------------------------------------------------===//
// Implement virtual destructor.
-PrinterHelper::~PrinterHelper() {}
+PrinterHelper::~PrinterHelper() = default;
diff --git a/lib/AST/StmtProfile.cpp b/lib/AST/StmtProfile.cpp
index 00ef0da18bbb..791ec569cc41 100644
--- a/lib/AST/StmtProfile.cpp
+++ b/lib/AST/StmtProfile.cpp
@@ -38,37 +38,39 @@ namespace {
void VisitStmt(const Stmt *S);
+ virtual void HandleStmtClass(Stmt::StmtClass SC) = 0;
+
#define STMT(Node, Base) void Visit##Node(const Node *S);
#include "clang/AST/StmtNodes.inc"
- /// \brief Visit a declaration that is referenced within an expression
+ /// Visit a declaration that is referenced within an expression
/// or statement.
virtual void VisitDecl(const Decl *D) = 0;
- /// \brief Visit a type that is referenced within an expression or
+ /// Visit a type that is referenced within an expression or
/// statement.
virtual void VisitType(QualType T) = 0;
- /// \brief Visit a name that occurs within an expression or statement.
- virtual void VisitName(DeclarationName Name) = 0;
+ /// Visit a name that occurs within an expression or statement.
+ virtual void VisitName(DeclarationName Name, bool TreatAsDecl = false) = 0;
- /// \brief Visit identifiers that are not in Decl's or Type's.
+ /// Visit identifiers that are not in Decl's or Type's.
virtual void VisitIdentifierInfo(IdentifierInfo *II) = 0;
- /// \brief Visit a nested-name-specifier that occurs within an expression
+ /// Visit a nested-name-specifier that occurs within an expression
/// or statement.
virtual void VisitNestedNameSpecifier(NestedNameSpecifier *NNS) = 0;
- /// \brief Visit a template name that occurs within an expression or
+ /// Visit a template name that occurs within an expression or
/// statement.
virtual void VisitTemplateName(TemplateName Name) = 0;
- /// \brief Visit template arguments that occur within an expression or
+ /// Visit template arguments that occur within an expression or
/// statement.
void VisitTemplateArguments(const TemplateArgumentLoc *Args,
unsigned NumArgs);
- /// \brief Visit a single template argument.
+ /// Visit a single template argument.
void VisitTemplateArgument(const TemplateArgument &Arg);
};
@@ -80,6 +82,10 @@ namespace {
const ASTContext &Context, bool Canonical)
: StmtProfiler(ID, Canonical), Context(Context) {}
private:
+ void HandleStmtClass(Stmt::StmtClass SC) override {
+ ID.AddInteger(SC);
+ }
+
void VisitDecl(const Decl *D) override {
ID.AddInteger(D ? D->getKind() : 0);
@@ -134,7 +140,7 @@ namespace {
ID.AddPointer(T.getAsOpaquePtr());
}
- void VisitName(DeclarationName Name) override {
+ void VisitName(DeclarationName Name, bool /*TreatAsDecl*/) override {
ID.AddPointer(Name.getAsOpaquePtr());
}
@@ -163,11 +169,26 @@ namespace {
: StmtProfiler(ID, false), Hash(Hash) {}
private:
+ void HandleStmtClass(Stmt::StmtClass SC) override {
+ if (SC == Stmt::UnresolvedLookupExprClass) {
+ // Pretend that the name looked up is a Decl due to how templates
+ // handle some Decl lookups.
+ ID.AddInteger(Stmt::DeclRefExprClass);
+ } else {
+ ID.AddInteger(SC);
+ }
+ }
+
void VisitType(QualType T) override {
Hash.AddQualType(T);
}
- void VisitName(DeclarationName Name) override {
+ void VisitName(DeclarationName Name, bool TreatAsDecl) override {
+ if (TreatAsDecl) {
+ // A Decl can be null, so each Decl is preceded by a boolean to
+ // store its nullness. Add a boolean here to match.
+ ID.AddBoolean(true);
+ }
Hash.AddDeclarationName(Name);
}
void VisitIdentifierInfo(IdentifierInfo *II) override {
@@ -196,7 +217,9 @@ namespace {
void StmtProfiler::VisitStmt(const Stmt *S) {
assert(S && "Requires non-null Stmt pointer");
- ID.AddInteger(S->getStmtClass());
+
+ HandleStmtClass(S->getStmtClass());
+
for (const Stmt *SubStmt : S->children()) {
if (SubStmt)
Visit(SubStmt);
@@ -382,7 +405,7 @@ StmtProfiler::VisitObjCAutoreleasePoolStmt(const ObjCAutoreleasePoolStmt *S) {
namespace {
class OMPClauseProfiler : public ConstOMPClauseVisitor<OMPClauseProfiler> {
StmtProfiler *Profiler;
- /// \brief Process clauses with list of variables.
+ /// Process clauses with list of variables.
template <typename T>
void VisitOMPClauseList(T *Node);
@@ -966,8 +989,11 @@ void StmtProfiler::VisitDeclRefExpr(const DeclRefExpr *S) {
if (!Canonical)
VisitNestedNameSpecifier(S->getQualifier());
VisitDecl(S->getDecl());
- if (!Canonical)
- VisitTemplateArguments(S->getTemplateArgs(), S->getNumTemplateArgs());
+ if (!Canonical) {
+ ID.AddBoolean(S->hasExplicitTemplateArgs());
+ if (S->hasExplicitTemplateArgs())
+ VisitTemplateArguments(S->getTemplateArgs(), S->getNumTemplateArgs());
+ }
}
void StmtProfiler::VisitPredefinedExpr(const PredefinedExpr *S) {
@@ -981,6 +1007,12 @@ void StmtProfiler::VisitIntegerLiteral(const IntegerLiteral *S) {
ID.AddInteger(S->getType()->castAs<BuiltinType>()->getKind());
}
+void StmtProfiler::VisitFixedPointLiteral(const FixedPointLiteral *S) {
+ VisitExpr(S);
+ S->getValue().Profile(ID);
+ ID.AddInteger(S->getType()->castAs<BuiltinType>()->getKind());
+}
+
void StmtProfiler::VisitCharacterLiteral(const CharacterLiteral *S) {
VisitExpr(S);
ID.AddInteger(S->getKind());
@@ -1659,7 +1691,7 @@ StmtProfiler::VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *S) {
void StmtProfiler::VisitOverloadExpr(const OverloadExpr *S) {
VisitExpr(S);
VisitNestedNameSpecifier(S->getQualifier());
- VisitName(S->getName());
+ VisitName(S->getName(), /*TreatAsDecl*/ true);
ID.AddBoolean(S->hasExplicitTemplateArgs());
if (S->hasExplicitTemplateArgs())
VisitTemplateArguments(S->getTemplateArgs(), S->getNumTemplateArgs());
diff --git a/lib/AST/TemplateBase.cpp b/lib/AST/TemplateBase.cpp
index e81c11a77825..394e9f38bcfd 100644
--- a/lib/AST/TemplateBase.cpp
+++ b/lib/AST/TemplateBase.cpp
@@ -43,7 +43,7 @@
using namespace clang;
-/// \brief Print a template integral argument value.
+/// Print a template integral argument value.
///
/// \param TemplArg the TemplateArgument instance to print.
///
@@ -406,7 +406,7 @@ void TemplateArgument::print(const PrintingPolicy &Policy,
}
case Declaration: {
- NamedDecl *ND = cast<NamedDecl>(getAsDecl());
+ NamedDecl *ND = getAsDecl();
Out << '&';
if (ND->getDeclName()) {
// FIXME: distinguish between pointer and reference args?
diff --git a/lib/AST/TemplateName.cpp b/lib/AST/TemplateName.cpp
index bd04fd8366b3..548468ed17cd 100644
--- a/lib/AST/TemplateName.cpp
+++ b/lib/AST/TemplateName.cpp
@@ -185,6 +185,11 @@ bool TemplateName::isInstantiationDependent() const {
}
bool TemplateName::containsUnexpandedParameterPack() const {
+ if (QualifiedTemplateName *QTN = getAsQualifiedTemplateName()) {
+ if (QTN->getQualifier()->containsUnexpandedParameterPack())
+ return true;
+ }
+
if (TemplateDecl *Template = getAsTemplateDecl()) {
if (TemplateTemplateParmDecl *TTP
= dyn_cast<TemplateTemplateParmDecl>(Template))
diff --git a/lib/AST/Type.cpp b/lib/AST/Type.cpp
index 38f2a16fa16f..fad8c0d1c6b2 100644
--- a/lib/AST/Type.cpp
+++ b/lib/AST/Type.cpp
@@ -29,6 +29,7 @@
#include "clang/AST/TypeVisitor.h"
#include "clang/Basic/AddressSpaces.h"
#include "clang/Basic/ExceptionSpecificationType.h"
+#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/Linkage.h"
@@ -87,6 +88,16 @@ const IdentifierInfo* QualType::getBaseTypeIdentifier() const {
return nullptr;
}
+bool QualType::mayBeDynamicClass() const {
+ const auto *ClassDecl = getTypePtr()->getPointeeCXXRecordDecl();
+ return ClassDecl && ClassDecl->mayBeDynamicClass();
+}
+
+bool QualType::mayBeNotDynamicClass() const {
+ const auto *ClassDecl = getTypePtr()->getPointeeCXXRecordDecl();
+ return !ClassDecl || ClassDecl->mayBeNonDynamicClass();
+}
+
bool QualType::isConstant(QualType T, const ASTContext &Ctx) {
if (T.isConstQualified())
return true;
@@ -166,6 +177,27 @@ void DependentSizedArrayType::Profile(llvm::FoldingSetNodeID &ID,
E->Profile(ID, Context, true);
}
+DependentVectorType::DependentVectorType(
+ const ASTContext &Context, QualType ElementType, QualType CanonType,
+ Expr *SizeExpr, SourceLocation Loc, VectorType::VectorKind VecKind)
+ : Type(DependentVector, CanonType, /*Dependent=*/true,
+ /*InstantiationDependent=*/true,
+ ElementType->isVariablyModifiedType(),
+ ElementType->containsUnexpandedParameterPack() ||
+ (SizeExpr && SizeExpr->containsUnexpandedParameterPack())),
+ Context(Context), ElementType(ElementType), SizeExpr(SizeExpr), Loc(Loc) {
+ VectorTypeBits.VecKind = VecKind;
+}
+
+void DependentVectorType::Profile(llvm::FoldingSetNodeID &ID,
+ const ASTContext &Context,
+ QualType ElementType, const Expr *SizeExpr,
+ VectorType::VectorKind VecKind) {
+ ID.AddPointer(ElementType.getAsOpaquePtr());
+ ID.AddInteger(VecKind);
+ SizeExpr->Profile(ID, Context, true);
+}
+
DependentSizedExtVectorType::DependentSizedExtVectorType(const
ASTContext &Context,
QualType ElementType,
@@ -228,7 +260,7 @@ VectorType::VectorType(TypeClass tc, QualType vecType, unsigned nElements,
/// This method should never be used when type qualifiers are meaningful.
const Type *Type::getArrayElementTypeNoTypeQual() const {
// If this is directly an array type, return it.
- if (const ArrayType *ATy = dyn_cast<ArrayType>(this))
+ if (const auto *ATy = dyn_cast<ArrayType>(this))
return ATy->getElementType().getTypePtr();
// If the canonical form of this type isn't the right kind, reject it.
@@ -264,7 +296,7 @@ QualType Type::getLocallyUnqualifiedSingleStepDesugaredType() const {
#define ABSTRACT_TYPE(Class, Parent)
#define TYPE(Class, Parent) \
case Type::Class: { \
- const Class##Type *ty = cast<Class##Type>(this); \
+ const auto *ty = cast<Class##Type>(this); \
if (!ty->isSugared()) return QualType(ty, 0); \
return ty->desugar(); \
}
@@ -283,7 +315,7 @@ SplitQualType QualType::getSplitDesugaredType(QualType T) {
#define ABSTRACT_TYPE(Class, Parent)
#define TYPE(Class, Parent) \
case Type::Class: { \
- const Class##Type *Ty = cast<Class##Type>(CurTy); \
+ const auto *Ty = cast<Class##Type>(CurTy); \
if (!Ty->isSugared()) \
return SplitQualType(Ty, Qs); \
Cur = Ty->desugar(); \
@@ -312,7 +344,7 @@ SplitQualType QualType::getSplitUnqualifiedTypeImpl(QualType type) {
#define ABSTRACT_TYPE(Class, Parent)
#define TYPE(Class, Parent) \
case Type::Class: { \
- const Class##Type *ty = cast<Class##Type>(split.Ty); \
+ const auto *ty = cast<Class##Type>(split.Ty); \
if (!ty->isSugared()) goto done; \
next = ty->desugar(); \
break; \
@@ -335,23 +367,23 @@ SplitQualType QualType::getSplitUnqualifiedTypeImpl(QualType type) {
QualType QualType::IgnoreParens(QualType T) {
// FIXME: this seems inherently un-qualifiers-safe.
- while (const ParenType *PT = T->getAs<ParenType>())
+ while (const auto *PT = T->getAs<ParenType>())
T = PT->getInnerType();
return T;
}
-/// \brief This will check for a T (which should be a Type which can act as
+/// This will check for a T (which should be a Type which can act as
/// sugar, such as a TypedefType) by removing any existing sugar until it
/// reaches a T or a non-sugared type.
template<typename T> static const T *getAsSugar(const Type *Cur) {
while (true) {
- if (const T *Sugar = dyn_cast<T>(Cur))
+ if (const auto *Sugar = dyn_cast<T>(Cur))
return Sugar;
switch (Cur->getTypeClass()) {
#define ABSTRACT_TYPE(Class, Parent)
#define TYPE(Class, Parent) \
case Type::Class: { \
- const Class##Type *Ty = cast<Class##Type>(Cur); \
+ const auto *Ty = cast<Class##Type>(Cur); \
if (!Ty->isSugared()) return 0; \
Cur = Ty->desugar().getTypePtr(); \
break; \
@@ -384,7 +416,7 @@ const Type *Type::getUnqualifiedDesugaredType() const {
#define ABSTRACT_TYPE(Class, Parent)
#define TYPE(Class, Parent) \
case Class: { \
- const Class##Type *Ty = cast<Class##Type>(Cur); \
+ const auto *Ty = cast<Class##Type>(Cur); \
if (!Ty->isSugared()) return Cur; \
Cur = Ty->desugar().getTypePtr(); \
break; \
@@ -395,28 +427,31 @@ const Type *Type::getUnqualifiedDesugaredType() const {
}
bool Type::isClassType() const {
- if (const RecordType *RT = getAs<RecordType>())
+ if (const auto *RT = getAs<RecordType>())
return RT->getDecl()->isClass();
return false;
}
bool Type::isStructureType() const {
- if (const RecordType *RT = getAs<RecordType>())
+ if (const auto *RT = getAs<RecordType>())
return RT->getDecl()->isStruct();
return false;
}
+
bool Type::isObjCBoxableRecordType() const {
- if (const RecordType *RT = getAs<RecordType>())
+ if (const auto *RT = getAs<RecordType>())
return RT->getDecl()->hasAttr<ObjCBoxableAttr>();
return false;
}
+
bool Type::isInterfaceType() const {
- if (const RecordType *RT = getAs<RecordType>())
+ if (const auto *RT = getAs<RecordType>())
return RT->getDecl()->isInterface();
return false;
}
+
bool Type::isStructureOrClassType() const {
- if (const RecordType *RT = getAs<RecordType>()) {
+ if (const auto *RT = getAs<RecordType>()) {
RecordDecl *RD = RT->getDecl();
return RD->isStruct() || RD->isClass() || RD->isInterface();
}
@@ -424,19 +459,19 @@ bool Type::isStructureOrClassType() const {
}
bool Type::isVoidPointerType() const {
- if (const PointerType *PT = getAs<PointerType>())
+ if (const auto *PT = getAs<PointerType>())
return PT->getPointeeType()->isVoidType();
return false;
}
bool Type::isUnionType() const {
- if (const RecordType *RT = getAs<RecordType>())
+ if (const auto *RT = getAs<RecordType>())
return RT->getDecl()->isUnion();
return false;
}
bool Type::isComplexType() const {
- if (const ComplexType *CT = dyn_cast<ComplexType>(CanonicalType))
+ if (const auto *CT = dyn_cast<ComplexType>(CanonicalType))
return CT->getElementType()->isFloatingType();
return false;
}
@@ -446,38 +481,44 @@ bool Type::isComplexIntegerType() const {
return getAsComplexIntegerType();
}
+bool Type::isScopedEnumeralType() const {
+ if (const auto *ET = getAs<EnumType>())
+ return ET->getDecl()->isScoped();
+ return false;
+}
+
const ComplexType *Type::getAsComplexIntegerType() const {
- if (const ComplexType *Complex = getAs<ComplexType>())
+ if (const auto *Complex = getAs<ComplexType>())
if (Complex->getElementType()->isIntegerType())
return Complex;
return nullptr;
}
QualType Type::getPointeeType() const {
- if (const PointerType *PT = getAs<PointerType>())
+ if (const auto *PT = getAs<PointerType>())
return PT->getPointeeType();
- if (const ObjCObjectPointerType *OPT = getAs<ObjCObjectPointerType>())
+ if (const auto *OPT = getAs<ObjCObjectPointerType>())
return OPT->getPointeeType();
- if (const BlockPointerType *BPT = getAs<BlockPointerType>())
+ if (const auto *BPT = getAs<BlockPointerType>())
return BPT->getPointeeType();
- if (const ReferenceType *RT = getAs<ReferenceType>())
+ if (const auto *RT = getAs<ReferenceType>())
return RT->getPointeeType();
- if (const MemberPointerType *MPT = getAs<MemberPointerType>())
+ if (const auto *MPT = getAs<MemberPointerType>())
return MPT->getPointeeType();
- if (const DecayedType *DT = getAs<DecayedType>())
+ if (const auto *DT = getAs<DecayedType>())
return DT->getPointeeType();
- return QualType();
+ return {};
}
const RecordType *Type::getAsStructureType() const {
// If this is directly a structure type, return it.
- if (const RecordType *RT = dyn_cast<RecordType>(this)) {
+ if (const auto *RT = dyn_cast<RecordType>(this)) {
if (RT->getDecl()->isStruct())
return RT;
}
// If the canonical form of this type isn't the right kind, reject it.
- if (const RecordType *RT = dyn_cast<RecordType>(CanonicalType)) {
+ if (const auto *RT = dyn_cast<RecordType>(CanonicalType)) {
if (!RT->getDecl()->isStruct())
return nullptr;
@@ -490,13 +531,13 @@ const RecordType *Type::getAsStructureType() const {
const RecordType *Type::getAsUnionType() const {
// If this is directly a union type, return it.
- if (const RecordType *RT = dyn_cast<RecordType>(this)) {
+ if (const auto *RT = dyn_cast<RecordType>(this)) {
if (RT->getDecl()->isUnion())
return RT;
}
// If the canonical form of this type isn't the right kind, reject it.
- if (const RecordType *RT = dyn_cast<RecordType>(CanonicalType)) {
+ if (const auto *RT = dyn_cast<RecordType>(CanonicalType)) {
if (!RT->getDecl()->isUnion())
return nullptr;
@@ -512,7 +553,7 @@ bool Type::isObjCIdOrObjectKindOfType(const ASTContext &ctx,
const ObjCObjectType *&bound) const {
bound = nullptr;
- const ObjCObjectPointerType *OPT = getAs<ObjCObjectPointerType>();
+ const auto *OPT = getAs<ObjCObjectPointerType>();
if (!OPT)
return false;
@@ -535,7 +576,7 @@ bool Type::isObjCIdOrObjectKindOfType(const ASTContext &ctx,
}
bool Type::isObjCClassOrClassKindOfType() const {
- const ObjCObjectPointerType *OPT = getAs<ObjCObjectPointerType>();
+ const auto *OPT = getAs<ObjCObjectPointerType>();
if (!OPT)
return false;
@@ -560,7 +601,7 @@ bool Type::isObjCClassOrClassKindOfType() const {
bool Type::isObjCInertUnsafeUnretainedType() const {
const Type *cur = this;
while (true) {
- if (auto attributed = dyn_cast<AttributedType>(cur)) {
+ if (const auto attributed = dyn_cast<AttributedType>(cur)) {
if (attributed->getAttrKind() ==
AttributedType::attr_objc_inert_unsafe_unretained)
return true;
@@ -622,7 +663,7 @@ bool ObjCObjectType::isSpecialized() const {
return true;
// Otherwise, check whether the base type is specialized.
- if (auto objcObject = getBaseType()->getAs<ObjCObjectType>()) {
+ if (const auto objcObject = getBaseType()->getAs<ObjCObjectType>()) {
// Terminate when we reach an interface type.
if (isa<ObjCInterfaceType>(objcObject))
return false;
@@ -640,7 +681,7 @@ ArrayRef<QualType> ObjCObjectType::getTypeArgs() const {
return getTypeArgsAsWritten();
// Look at the base type, which might have type arguments.
- if (auto objcObject = getBaseType()->getAs<ObjCObjectType>()) {
+ if (const auto objcObject = getBaseType()->getAs<ObjCObjectType>()) {
// Terminate when we reach an interface type.
if (isa<ObjCInterfaceType>(objcObject))
return {};
@@ -657,7 +698,7 @@ bool ObjCObjectType::isKindOfType() const {
return true;
// Look at the base type, which might have type arguments.
- if (auto objcObject = getBaseType()->getAs<ObjCObjectType>()) {
+ if (const auto objcObject = getBaseType()->getAs<ObjCObjectType>()) {
// Terminate when we reach an interface type.
if (isa<ObjCInterfaceType>(objcObject))
return false;
@@ -677,10 +718,8 @@ QualType ObjCObjectType::stripObjCKindOfTypeAndQuals(
// Recursively strip __kindof.
SplitQualType splitBaseType = getBaseType().split();
QualType baseType(splitBaseType.Ty, 0);
- if (const ObjCObjectType *baseObj
- = splitBaseType.Ty->getAs<ObjCObjectType>()) {
+ if (const auto *baseObj = splitBaseType.Ty->getAs<ObjCObjectType>())
baseType = baseObj->stripObjCKindOfTypeAndQuals(ctx);
- }
return ctx.getObjCObjectType(ctx.getQualifiedType(baseType,
splitBaseType.Quals),
@@ -733,7 +772,7 @@ public:
QualType VisitComplexType(const ComplexType *T) {
QualType elementType = recurse(T->getElementType());
if (elementType.isNull())
- return QualType();
+ return {};
if (elementType.getAsOpaquePtr() == T->getElementType().getAsOpaquePtr())
return QualType(T, 0);
@@ -744,7 +783,7 @@ public:
QualType VisitPointerType(const PointerType *T) {
QualType pointeeType = recurse(T->getPointeeType());
if (pointeeType.isNull())
- return QualType();
+ return {};
if (pointeeType.getAsOpaquePtr() == T->getPointeeType().getAsOpaquePtr())
return QualType(T, 0);
@@ -755,7 +794,7 @@ public:
QualType VisitBlockPointerType(const BlockPointerType *T) {
QualType pointeeType = recurse(T->getPointeeType());
if (pointeeType.isNull())
- return QualType();
+ return {};
if (pointeeType.getAsOpaquePtr() == T->getPointeeType().getAsOpaquePtr())
return QualType(T, 0);
@@ -766,7 +805,7 @@ public:
QualType VisitLValueReferenceType(const LValueReferenceType *T) {
QualType pointeeType = recurse(T->getPointeeTypeAsWritten());
if (pointeeType.isNull())
- return QualType();
+ return {};
if (pointeeType.getAsOpaquePtr()
== T->getPointeeTypeAsWritten().getAsOpaquePtr())
@@ -778,7 +817,7 @@ public:
QualType VisitRValueReferenceType(const RValueReferenceType *T) {
QualType pointeeType = recurse(T->getPointeeTypeAsWritten());
if (pointeeType.isNull())
- return QualType();
+ return {};
if (pointeeType.getAsOpaquePtr()
== T->getPointeeTypeAsWritten().getAsOpaquePtr())
@@ -790,7 +829,7 @@ public:
QualType VisitMemberPointerType(const MemberPointerType *T) {
QualType pointeeType = recurse(T->getPointeeType());
if (pointeeType.isNull())
- return QualType();
+ return {};
if (pointeeType.getAsOpaquePtr() == T->getPointeeType().getAsOpaquePtr())
return QualType(T, 0);
@@ -801,7 +840,7 @@ public:
QualType VisitConstantArrayType(const ConstantArrayType *T) {
QualType elementType = recurse(T->getElementType());
if (elementType.isNull())
- return QualType();
+ return {};
if (elementType.getAsOpaquePtr() == T->getElementType().getAsOpaquePtr())
return QualType(T, 0);
@@ -814,7 +853,7 @@ public:
QualType VisitVariableArrayType(const VariableArrayType *T) {
QualType elementType = recurse(T->getElementType());
if (elementType.isNull())
- return QualType();
+ return {};
if (elementType.getAsOpaquePtr() == T->getElementType().getAsOpaquePtr())
return QualType(T, 0);
@@ -828,7 +867,7 @@ public:
QualType VisitIncompleteArrayType(const IncompleteArrayType *T) {
QualType elementType = recurse(T->getElementType());
if (elementType.isNull())
- return QualType();
+ return {};
if (elementType.getAsOpaquePtr() == T->getElementType().getAsOpaquePtr())
return QualType(T, 0);
@@ -840,7 +879,7 @@ public:
QualType VisitVectorType(const VectorType *T) {
QualType elementType = recurse(T->getElementType());
if (elementType.isNull())
- return QualType();
+ return {};
if (elementType.getAsOpaquePtr() == T->getElementType().getAsOpaquePtr())
return QualType(T, 0);
@@ -852,7 +891,7 @@ public:
QualType VisitExtVectorType(const ExtVectorType *T) {
QualType elementType = recurse(T->getElementType());
if (elementType.isNull())
- return QualType();
+ return {};
if (elementType.getAsOpaquePtr() == T->getElementType().getAsOpaquePtr())
return QualType(T, 0);
@@ -863,7 +902,7 @@ public:
QualType VisitFunctionNoProtoType(const FunctionNoProtoType *T) {
QualType returnType = recurse(T->getReturnType());
if (returnType.isNull())
- return QualType();
+ return {};
if (returnType.getAsOpaquePtr() == T->getReturnType().getAsOpaquePtr())
return QualType(T, 0);
@@ -874,7 +913,7 @@ public:
QualType VisitFunctionProtoType(const FunctionProtoType *T) {
QualType returnType = recurse(T->getReturnType());
if (returnType.isNull())
- return QualType();
+ return {};
// Transform parameter types.
SmallVector<QualType, 4> paramTypes;
@@ -882,7 +921,7 @@ public:
for (auto paramType : T->getParamTypes()) {
QualType newParamType = recurse(paramType);
if (newParamType.isNull())
- return QualType();
+ return {};
if (newParamType.getAsOpaquePtr() != paramType.getAsOpaquePtr())
paramChanged = true;
@@ -898,10 +937,9 @@ public:
for (auto exceptionType : info.ExceptionSpec.Exceptions) {
QualType newExceptionType = recurse(exceptionType);
if (newExceptionType.isNull())
- return QualType();
+ return {};
- if (newExceptionType.getAsOpaquePtr()
- != exceptionType.getAsOpaquePtr())
+ if (newExceptionType.getAsOpaquePtr() != exceptionType.getAsOpaquePtr())
exceptionChanged = true;
exceptionTypes.push_back(newExceptionType);
@@ -923,7 +961,7 @@ public:
QualType VisitParenType(const ParenType *T) {
QualType innerType = recurse(T->getInnerType());
if (innerType.isNull())
- return QualType();
+ return {};
if (innerType.getAsOpaquePtr() == T->getInnerType().getAsOpaquePtr())
return QualType(T, 0);
@@ -937,11 +975,11 @@ public:
QualType VisitAdjustedType(const AdjustedType *T) {
QualType originalType = recurse(T->getOriginalType());
if (originalType.isNull())
- return QualType();
+ return {};
QualType adjustedType = recurse(T->getAdjustedType());
if (adjustedType.isNull())
- return QualType();
+ return {};
if (originalType.getAsOpaquePtr()
== T->getOriginalType().getAsOpaquePtr() &&
@@ -954,7 +992,7 @@ public:
QualType VisitDecayedType(const DecayedType *T) {
QualType originalType = recurse(T->getOriginalType());
if (originalType.isNull())
- return QualType();
+ return {};
if (originalType.getAsOpaquePtr()
== T->getOriginalType().getAsOpaquePtr())
@@ -976,11 +1014,11 @@ public:
QualType VisitAttributedType(const AttributedType *T) {
QualType modifiedType = recurse(T->getModifiedType());
if (modifiedType.isNull())
- return QualType();
+ return {};
QualType equivalentType = recurse(T->getEquivalentType());
if (equivalentType.isNull())
- return QualType();
+ return {};
if (modifiedType.getAsOpaquePtr()
== T->getModifiedType().getAsOpaquePtr() &&
@@ -995,7 +1033,7 @@ public:
QualType VisitSubstTemplateTypeParmType(const SubstTemplateTypeParmType *T) {
QualType replacementType = recurse(T->getReplacementType());
if (replacementType.isNull())
- return QualType();
+ return {};
if (replacementType.getAsOpaquePtr()
== T->getReplacementType().getAsOpaquePtr())
@@ -1014,7 +1052,7 @@ public:
QualType deducedType = recurse(T->getDeducedType());
if (deducedType.isNull())
- return QualType();
+ return {};
if (deducedType.getAsOpaquePtr()
== T->getDeducedType().getAsOpaquePtr())
@@ -1030,7 +1068,7 @@ public:
QualType VisitObjCObjectType(const ObjCObjectType *T) {
QualType baseType = recurse(T->getBaseType());
if (baseType.isNull())
- return QualType();
+ return {};
// Transform type arguments.
bool typeArgChanged = false;
@@ -1038,7 +1076,7 @@ public:
for (auto typeArg : T->getTypeArgsAsWritten()) {
QualType newTypeArg = recurse(typeArg);
if (newTypeArg.isNull())
- return QualType();
+ return {};
if (newTypeArg.getAsOpaquePtr() != typeArg.getAsOpaquePtr())
typeArgChanged = true;
@@ -1061,7 +1099,7 @@ public:
QualType VisitObjCObjectPointerType(const ObjCObjectPointerType *T) {
QualType pointeeType = recurse(T->getPointeeType());
if (pointeeType.isNull())
- return QualType();
+ return {};
if (pointeeType.getAsOpaquePtr()
== T->getPointeeType().getAsOpaquePtr())
@@ -1073,7 +1111,7 @@ public:
QualType VisitAtomicType(const AtomicType *T) {
QualType valueType = recurse(T->getValueType());
if (valueType.isNull())
- return QualType();
+ return {};
if (valueType.getAsOpaquePtr()
== T->getValueType().getAsOpaquePtr())
@@ -1123,57 +1161,56 @@ QualType QualType::substObjCTypeArgs(
// Replace an Objective-C type parameter reference with the corresponding
// type argument.
if (const auto *OTPTy = dyn_cast<ObjCTypeParamType>(splitType.Ty)) {
- if (auto *typeParam = dyn_cast<ObjCTypeParamDecl>(OTPTy->getDecl())) {
- // If we have type arguments, use them.
- if (!typeArgs.empty()) {
- QualType argType = typeArgs[typeParam->getIndex()];
- if (OTPTy->qual_empty())
- return ctx.getQualifiedType(argType, splitType.Quals);
-
- // Apply protocol lists if exists.
- bool hasError;
- SmallVector<ObjCProtocolDecl*, 8> protocolsVec;
- protocolsVec.append(OTPTy->qual_begin(),
- OTPTy->qual_end());
- ArrayRef<ObjCProtocolDecl *> protocolsToApply = protocolsVec;
- QualType resultTy = ctx.applyObjCProtocolQualifiers(argType,
- protocolsToApply, hasError, true/*allowOnPointerType*/);
-
- return ctx.getQualifiedType(resultTy, splitType.Quals);
- }
+ ObjCTypeParamDecl *typeParam = OTPTy->getDecl();
+ // If we have type arguments, use them.
+ if (!typeArgs.empty()) {
+ QualType argType = typeArgs[typeParam->getIndex()];
+ if (OTPTy->qual_empty())
+ return ctx.getQualifiedType(argType, splitType.Quals);
+
+ // Apply protocol lists if exists.
+ bool hasError;
+ SmallVector<ObjCProtocolDecl*, 8> protocolsVec;
+ protocolsVec.append(OTPTy->qual_begin(),
+ OTPTy->qual_end());
+ ArrayRef<ObjCProtocolDecl *> protocolsToApply = protocolsVec;
+ QualType resultTy = ctx.applyObjCProtocolQualifiers(argType,
+ protocolsToApply, hasError, true/*allowOnPointerType*/);
+
+ return ctx.getQualifiedType(resultTy, splitType.Quals);
+ }
- switch (context) {
- case ObjCSubstitutionContext::Ordinary:
- case ObjCSubstitutionContext::Parameter:
- case ObjCSubstitutionContext::Superclass:
- // Substitute the bound.
+ switch (context) {
+ case ObjCSubstitutionContext::Ordinary:
+ case ObjCSubstitutionContext::Parameter:
+ case ObjCSubstitutionContext::Superclass:
+ // Substitute the bound.
+ return ctx.getQualifiedType(typeParam->getUnderlyingType(),
+ splitType.Quals);
+
+ case ObjCSubstitutionContext::Result:
+ case ObjCSubstitutionContext::Property: {
+ // Substitute the __kindof form of the underlying type.
+ const auto *objPtr = typeParam->getUnderlyingType()
+ ->castAs<ObjCObjectPointerType>();
+
+ // __kindof types, id, and Class don't need an additional
+ // __kindof.
+ if (objPtr->isKindOfType() || objPtr->isObjCIdOrClassType())
return ctx.getQualifiedType(typeParam->getUnderlyingType(),
splitType.Quals);
- case ObjCSubstitutionContext::Result:
- case ObjCSubstitutionContext::Property: {
- // Substitute the __kindof form of the underlying type.
- const auto *objPtr = typeParam->getUnderlyingType()
- ->castAs<ObjCObjectPointerType>();
-
- // __kindof types, id, and Class don't need an additional
- // __kindof.
- if (objPtr->isKindOfType() || objPtr->isObjCIdOrClassType())
- return ctx.getQualifiedType(typeParam->getUnderlyingType(),
- splitType.Quals);
-
- // Add __kindof.
- const auto *obj = objPtr->getObjectType();
- QualType resultTy = ctx.getObjCObjectType(obj->getBaseType(),
- obj->getTypeArgsAsWritten(),
- obj->getProtocols(),
- /*isKindOf=*/true);
-
- // Rebuild object pointer type.
- resultTy = ctx.getObjCObjectPointerType(resultTy);
- return ctx.getQualifiedType(resultTy, splitType.Quals);
- }
- }
+ // Add __kindof.
+ const auto *obj = objPtr->getObjectType();
+ QualType resultTy = ctx.getObjCObjectType(obj->getBaseType(),
+ obj->getTypeArgsAsWritten(),
+ obj->getProtocols(),
+ /*isKindOf=*/true);
+
+ // Rebuild object pointer type.
+ resultTy = ctx.getObjCObjectPointerType(resultTy);
+ return ctx.getQualifiedType(resultTy, splitType.Quals);
+ }
}
}
@@ -1185,7 +1222,7 @@ QualType QualType::substObjCTypeArgs(
typeArgs,
ObjCSubstitutionContext::Result);
if (returnType.isNull())
- return QualType();
+ return {};
// Handle non-prototyped functions, which only substitute into the result
// type.
@@ -1210,7 +1247,7 @@ QualType QualType::substObjCTypeArgs(
typeArgs,
ObjCSubstitutionContext::Parameter);
if (newParamType.isNull())
- return QualType();
+ return {};
if (newParamType.getAsOpaquePtr() != paramType.getAsOpaquePtr())
paramChanged = true;
@@ -1229,7 +1266,7 @@ QualType QualType::substObjCTypeArgs(
typeArgs,
ObjCSubstitutionContext::Ordinary);
if (newExceptionType.isNull())
- return QualType();
+ return {};
if (newExceptionType.getAsOpaquePtr()
!= exceptionType.getAsOpaquePtr())
@@ -1263,7 +1300,7 @@ QualType QualType::substObjCTypeArgs(
ctx, typeArgs,
ObjCSubstitutionContext::Ordinary);
if (newTypeArg.isNull())
- return QualType();
+ return {};
if (newTypeArg.getAsOpaquePtr() != typeArg.getAsOpaquePtr()) {
// If we're substituting based on an unspecialized context type,
@@ -1336,7 +1373,7 @@ QualType QualType::stripObjCKindOfType(const ASTContext &constCtx) const {
}
QualType QualType::getAtomicUnqualifiedType() const {
- if (auto AT = getTypePtr()->getAs<AtomicType>())
+ if (const auto AT = getTypePtr()->getAs<AtomicType>())
return AT->getValueType().getUnqualifiedType();
return getUnqualifiedType();
}
@@ -1344,12 +1381,12 @@ QualType QualType::getAtomicUnqualifiedType() const {
Optional<ArrayRef<QualType>> Type::getObjCSubstitutions(
const DeclContext *dc) const {
// Look through method scopes.
- if (auto method = dyn_cast<ObjCMethodDecl>(dc))
+ if (const auto method = dyn_cast<ObjCMethodDecl>(dc))
dc = method->getDeclContext();
// Find the class or category in which the type we're substituting
// was declared.
- const ObjCInterfaceDecl *dcClassDecl = dyn_cast<ObjCInterfaceDecl>(dc);
+ const auto *dcClassDecl = dyn_cast<ObjCInterfaceDecl>(dc);
const ObjCCategoryDecl *dcCategoryDecl = nullptr;
ObjCTypeParamList *dcTypeParams = nullptr;
if (dcClassDecl) {
@@ -1526,7 +1563,7 @@ const ObjCObjectType *Type::getAsObjCQualifiedInterfaceType() const {
// There is no sugar for ObjCObjectType's, just return the canonical
// type pointer if it is the right class. There is no typedef information to
// return and these cannot be Address-space qualified.
- if (const ObjCObjectType *T = getAs<ObjCObjectType>())
+ if (const auto *T = getAs<ObjCObjectType>())
if (T->getNumProtocols() && T->getInterface())
return T;
return nullptr;
@@ -1539,7 +1576,7 @@ bool Type::isObjCQualifiedInterfaceType() const {
const ObjCObjectPointerType *Type::getAsObjCQualifiedIdType() const {
// There is no sugar for ObjCQualifiedIdType's, just return the canonical
// type pointer if it is the right class.
- if (const ObjCObjectPointerType *OPT = getAs<ObjCObjectPointerType>()) {
+ if (const auto *OPT = getAs<ObjCObjectPointerType>()) {
if (OPT->isObjCQualifiedIdType())
return OPT;
}
@@ -1549,7 +1586,7 @@ const ObjCObjectPointerType *Type::getAsObjCQualifiedIdType() const {
const ObjCObjectPointerType *Type::getAsObjCQualifiedClassType() const {
// There is no sugar for ObjCQualifiedClassType's, just return the canonical
// type pointer if it is the right class.
- if (const ObjCObjectPointerType *OPT = getAs<ObjCObjectPointerType>()) {
+ if (const auto *OPT = getAs<ObjCObjectPointerType>()) {
if (OPT->isObjCQualifiedClassType())
return OPT;
}
@@ -1557,7 +1594,7 @@ const ObjCObjectPointerType *Type::getAsObjCQualifiedClassType() const {
}
const ObjCObjectType *Type::getAsObjCInterfaceType() const {
- if (const ObjCObjectType *OT = getAs<ObjCObjectType>()) {
+ if (const auto *OT = getAs<ObjCObjectType>()) {
if (OT->getInterface())
return OT;
}
@@ -1565,7 +1602,7 @@ const ObjCObjectType *Type::getAsObjCInterfaceType() const {
}
const ObjCObjectPointerType *Type::getAsObjCInterfacePointerType() const {
- if (const ObjCObjectPointerType *OPT = getAs<ObjCObjectPointerType>()) {
+ if (const auto *OPT = getAs<ObjCObjectPointerType>()) {
if (OPT->getInterfaceType())
return OPT;
}
@@ -1574,14 +1611,14 @@ const ObjCObjectPointerType *Type::getAsObjCInterfacePointerType() const {
const CXXRecordDecl *Type::getPointeeCXXRecordDecl() const {
QualType PointeeType;
- if (const PointerType *PT = getAs<PointerType>())
+ if (const auto *PT = getAs<PointerType>())
PointeeType = PT->getPointeeType();
- else if (const ReferenceType *RT = getAs<ReferenceType>())
+ else if (const auto *RT = getAs<ReferenceType>())
PointeeType = RT->getPointeeType();
else
return nullptr;
- if (const RecordType *RT = PointeeType->getAs<RecordType>())
+ if (const auto *RT = PointeeType->getAs<RecordType>())
return dyn_cast<CXXRecordDecl>(RT->getDecl());
return nullptr;
@@ -1593,7 +1630,7 @@ CXXRecordDecl *Type::getAsCXXRecordDecl() const {
TagDecl *Type::getAsTagDecl() const {
if (const auto *TT = getAs<TagType>())
- return cast<TagDecl>(TT->getDecl());
+ return TT->getDecl();
if (const auto *Injected = getAs<InjectedClassNameType>())
return Injected->getDecl();
@@ -1694,13 +1731,13 @@ bool Type::hasAutoForTrailingReturnType() const {
}
bool Type::hasIntegerRepresentation() const {
- if (const VectorType *VT = dyn_cast<VectorType>(CanonicalType))
+ if (const auto *VT = dyn_cast<VectorType>(CanonicalType))
return VT->getElementType()->isIntegerType();
else
return isIntegerType();
}
-/// \brief Determine whether this type is an integral type.
+/// Determine whether this type is an integral type.
///
/// This routine determines whether the given type is an integral type per
/// C++ [basic.fundamental]p7. Although the C standard does not define the
@@ -1720,20 +1757,20 @@ bool Type::hasIntegerRepresentation() const {
///
/// \returns true if the type is considered an integral type, false otherwise.
bool Type::isIntegralType(const ASTContext &Ctx) const {
- if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
+ if (const auto *BT = dyn_cast<BuiltinType>(CanonicalType))
return BT->getKind() >= BuiltinType::Bool &&
BT->getKind() <= BuiltinType::Int128;
// Complete enum types are integral in C.
if (!Ctx.getLangOpts().CPlusPlus)
- if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType))
+ if (const auto *ET = dyn_cast<EnumType>(CanonicalType))
return ET->getDecl()->isComplete();
return false;
}
bool Type::isIntegralOrUnscopedEnumerationType() const {
- if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
+ if (const auto *BT = dyn_cast<BuiltinType>(CanonicalType))
return BT->getKind() >= BuiltinType::Bool &&
BT->getKind() <= BuiltinType::Int128;
@@ -1741,14 +1778,14 @@ bool Type::isIntegralOrUnscopedEnumerationType() const {
// enumeration type in the sense required here.
// C++0x: However, if the underlying type of the enum is fixed, it is
// considered complete.
- if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType))
+ if (const auto *ET = dyn_cast<EnumType>(CanonicalType))
return ET->getDecl()->isComplete() && !ET->getDecl()->isScoped();
return false;
}
bool Type::isCharType() const {
- if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
+ if (const auto *BT = dyn_cast<BuiltinType>(CanonicalType))
return BT->getKind() == BuiltinType::Char_U ||
BT->getKind() == BuiltinType::UChar ||
BT->getKind() == BuiltinType::Char_S ||
@@ -1757,34 +1794,41 @@ bool Type::isCharType() const {
}
bool Type::isWideCharType() const {
- if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
+ if (const auto *BT = dyn_cast<BuiltinType>(CanonicalType))
return BT->getKind() == BuiltinType::WChar_S ||
BT->getKind() == BuiltinType::WChar_U;
return false;
}
-bool Type::isChar16Type() const {
+bool Type::isChar8Type() const {
if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
+ return BT->getKind() == BuiltinType::Char8;
+ return false;
+}
+
+bool Type::isChar16Type() const {
+ if (const auto *BT = dyn_cast<BuiltinType>(CanonicalType))
return BT->getKind() == BuiltinType::Char16;
return false;
}
bool Type::isChar32Type() const {
- if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
+ if (const auto *BT = dyn_cast<BuiltinType>(CanonicalType))
return BT->getKind() == BuiltinType::Char32;
return false;
}
-/// \brief Determine whether this type is any of the built-in character
+/// Determine whether this type is any of the built-in character
/// types.
bool Type::isAnyCharacterType() const {
- const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType);
+ const auto *BT = dyn_cast<BuiltinType>(CanonicalType);
if (!BT) return false;
switch (BT->getKind()) {
default: return false;
case BuiltinType::Char_U:
case BuiltinType::UChar:
case BuiltinType::WChar_U:
+ case BuiltinType::Char8:
case BuiltinType::Char16:
case BuiltinType::Char32:
case BuiltinType::Char_S:
@@ -1798,7 +1842,7 @@ bool Type::isAnyCharacterType() const {
/// signed, according to C99 6.2.5p4 [char, signed char, short, int, long..],
/// an enum decl which has a signed representation
bool Type::isSignedIntegerType() const {
- if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType)) {
+ if (const auto *BT = dyn_cast<BuiltinType>(CanonicalType)) {
return BT->getKind() >= BuiltinType::Char_S &&
BT->getKind() <= BuiltinType::Int128;
}
@@ -1814,12 +1858,12 @@ bool Type::isSignedIntegerType() const {
}
bool Type::isSignedIntegerOrEnumerationType() const {
- if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType)) {
+ if (const auto *BT = dyn_cast<BuiltinType>(CanonicalType)) {
return BT->getKind() >= BuiltinType::Char_S &&
BT->getKind() <= BuiltinType::Int128;
}
- if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType)) {
+ if (const auto *ET = dyn_cast<EnumType>(CanonicalType)) {
if (ET->getDecl()->isComplete())
return ET->getDecl()->getIntegerType()->isSignedIntegerType();
}
@@ -1828,7 +1872,7 @@ bool Type::isSignedIntegerOrEnumerationType() const {
}
bool Type::hasSignedIntegerRepresentation() const {
- if (const VectorType *VT = dyn_cast<VectorType>(CanonicalType))
+ if (const auto *VT = dyn_cast<VectorType>(CanonicalType))
return VT->getElementType()->isSignedIntegerOrEnumerationType();
else
return isSignedIntegerOrEnumerationType();
@@ -1838,12 +1882,12 @@ bool Type::hasSignedIntegerRepresentation() const {
/// unsigned, according to C99 6.2.5p6 [which returns true for _Bool], an enum
/// decl which has an unsigned representation
bool Type::isUnsignedIntegerType() const {
- if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType)) {
+ if (const auto *BT = dyn_cast<BuiltinType>(CanonicalType)) {
return BT->getKind() >= BuiltinType::Bool &&
BT->getKind() <= BuiltinType::UInt128;
}
- if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType)) {
+ if (const auto *ET = dyn_cast<EnumType>(CanonicalType)) {
// Incomplete enum types are not treated as integer types.
// FIXME: In C++, enum types are never integer types.
if (ET->getDecl()->isComplete() && !ET->getDecl()->isScoped())
@@ -1854,12 +1898,12 @@ bool Type::isUnsignedIntegerType() const {
}
bool Type::isUnsignedIntegerOrEnumerationType() const {
- if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType)) {
+ if (const auto *BT = dyn_cast<BuiltinType>(CanonicalType)) {
return BT->getKind() >= BuiltinType::Bool &&
BT->getKind() <= BuiltinType::UInt128;
}
- if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType)) {
+ if (const auto *ET = dyn_cast<EnumType>(CanonicalType)) {
if (ET->getDecl()->isComplete())
return ET->getDecl()->getIntegerType()->isUnsignedIntegerType();
}
@@ -1868,48 +1912,48 @@ bool Type::isUnsignedIntegerOrEnumerationType() const {
}
bool Type::hasUnsignedIntegerRepresentation() const {
- if (const VectorType *VT = dyn_cast<VectorType>(CanonicalType))
+ if (const auto *VT = dyn_cast<VectorType>(CanonicalType))
return VT->getElementType()->isUnsignedIntegerOrEnumerationType();
else
return isUnsignedIntegerOrEnumerationType();
}
bool Type::isFloatingType() const {
- if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
+ if (const auto *BT = dyn_cast<BuiltinType>(CanonicalType))
return BT->getKind() >= BuiltinType::Half &&
BT->getKind() <= BuiltinType::Float128;
- if (const ComplexType *CT = dyn_cast<ComplexType>(CanonicalType))
+ if (const auto *CT = dyn_cast<ComplexType>(CanonicalType))
return CT->getElementType()->isFloatingType();
return false;
}
bool Type::hasFloatingRepresentation() const {
- if (const VectorType *VT = dyn_cast<VectorType>(CanonicalType))
+ if (const auto *VT = dyn_cast<VectorType>(CanonicalType))
return VT->getElementType()->isFloatingType();
else
return isFloatingType();
}
bool Type::isRealFloatingType() const {
- if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
+ if (const auto *BT = dyn_cast<BuiltinType>(CanonicalType))
return BT->isFloatingPoint();
return false;
}
bool Type::isRealType() const {
- if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
+ if (const auto *BT = dyn_cast<BuiltinType>(CanonicalType))
return BT->getKind() >= BuiltinType::Bool &&
BT->getKind() <= BuiltinType::Float128;
- if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType))
+ if (const auto *ET = dyn_cast<EnumType>(CanonicalType))
return ET->getDecl()->isComplete() && !ET->getDecl()->isScoped();
return false;
}
bool Type::isArithmeticType() const {
- if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
+ if (const auto *BT = dyn_cast<BuiltinType>(CanonicalType))
return BT->getKind() >= BuiltinType::Bool &&
BT->getKind() <= BuiltinType::Float128;
- if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType))
+ if (const auto *ET = dyn_cast<EnumType>(CanonicalType))
// GCC allows forward declaration of enum types (forbid by C99 6.7.2.3p2).
// If a body isn't seen by the time we get here, return false.
//
@@ -1924,7 +1968,7 @@ Type::ScalarTypeKind Type::getScalarTypeKind() const {
assert(isScalarType());
const Type *T = CanonicalType.getTypePtr();
- if (const BuiltinType *BT = dyn_cast<BuiltinType>(T)) {
+ if (const auto *BT = dyn_cast<BuiltinType>(T)) {
if (BT->getKind() == BuiltinType::Bool) return STK_Bool;
if (BT->getKind() == BuiltinType::NullPtr) return STK_CPointer;
if (BT->isInteger()) return STK_Integral;
@@ -1941,7 +1985,7 @@ Type::ScalarTypeKind Type::getScalarTypeKind() const {
} else if (isa<EnumType>(T)) {
assert(cast<EnumType>(T)->getDecl()->isComplete());
return STK_Integral;
- } else if (const ComplexType *CT = dyn_cast<ComplexType>(T)) {
+ } else if (const auto *CT = dyn_cast<ComplexType>(T)) {
if (CT->getElementType()->isRealFloatingType())
return STK_FloatingComplex;
return STK_IntegralComplex;
@@ -1950,7 +1994,7 @@ Type::ScalarTypeKind Type::getScalarTypeKind() const {
llvm_unreachable("unknown scalar type");
}
-/// \brief Determines whether the type is a C++ aggregate type or C
+/// Determines whether the type is a C++ aggregate type or C
/// aggregate or union type.
///
/// An aggregate type is an array or a class type (struct, union, or
@@ -1960,8 +2004,8 @@ Type::ScalarTypeKind Type::getScalarTypeKind() const {
/// subsumes the notion of C aggregates (C99 6.2.5p21) because it also
/// includes union types.
bool Type::isAggregateType() const {
- if (const RecordType *Record = dyn_cast<RecordType>(CanonicalType)) {
- if (CXXRecordDecl *ClassDecl = dyn_cast<CXXRecordDecl>(Record->getDecl()))
+ if (const auto *Record = dyn_cast<RecordType>(CanonicalType)) {
+ if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(Record->getDecl()))
return ClassDecl->isAggregate();
return true;
@@ -1997,12 +2041,7 @@ bool Type::isIncompleteType(NamedDecl **Def) const {
EnumDecl *EnumD = cast<EnumType>(CanonicalType)->getDecl();
if (Def)
*Def = EnumD;
-
- // An enumeration with fixed underlying type is complete (C++0x 7.2p3).
- if (EnumD->isFixed())
- return false;
-
- return !EnumD->isCompleteDefinition();
+ return !EnumD->isComplete();
}
case Record: {
// A tagged type (struct/union/enum/class) is incomplete if the decl is a
@@ -2038,7 +2077,7 @@ bool Type::isIncompleteType(NamedDecl **Def) const {
return false;
// The inheritance attribute might only be present on the most recent
// CXXRecordDecl, use that one.
- RD = RD->getMostRecentDecl();
+ RD = RD->getMostRecentNonInjectedDecl();
// Nothing interesting to do if the inheritance attribute is already set.
if (RD->hasAttr<MSInheritanceAttr>())
return false;
@@ -2105,8 +2144,8 @@ bool QualType::isCXX98PODType(const ASTContext &Context) const {
return true;
case Type::Record:
- if (CXXRecordDecl *ClassDecl
- = dyn_cast<CXXRecordDecl>(cast<RecordType>(CanonicalType)->getDecl()))
+ if (const auto *ClassDecl =
+ dyn_cast<CXXRecordDecl>(cast<RecordType>(CanonicalType)->getDecl()))
return ClassDecl->isPOD();
// C struct/union is POD.
@@ -2144,9 +2183,8 @@ bool QualType::isTrivialType(const ASTContext &Context) const {
// As an extension, Clang treats vector types as Scalar types.
if (CanonicalType->isScalarType() || CanonicalType->isVectorType())
return true;
- if (const RecordType *RT = CanonicalType->getAs<RecordType>()) {
- if (const CXXRecordDecl *ClassDecl =
- dyn_cast<CXXRecordDecl>(RT->getDecl())) {
+ if (const auto *RT = CanonicalType->getAs<RecordType>()) {
+ if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
// C++11 [class]p6:
// A trivial class is a class that has a default constructor,
// has no non-trivial default constructors, and is trivially
@@ -2188,9 +2226,8 @@ bool QualType::isTriviallyCopyableType(const ASTContext &Context) const {
if (CanonicalType->isScalarType() || CanonicalType->isVectorType())
return true;
- if (const RecordType *RT = CanonicalType->getAs<RecordType>()) {
- if (const CXXRecordDecl *ClassDecl =
- dyn_cast<CXXRecordDecl>(RT->getDecl())) {
+ if (const auto *RT = CanonicalType->getAs<RecordType>()) {
+ if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
if (!ClassDecl->isTriviallyCopyable()) return false;
}
@@ -2207,6 +2244,45 @@ bool QualType::isNonWeakInMRRWithObjCWeak(const ASTContext &Context) const {
getObjCLifetime() != Qualifiers::OCL_Weak;
}
+QualType::PrimitiveDefaultInitializeKind
+QualType::isNonTrivialToPrimitiveDefaultInitialize() const {
+ if (const auto *RT =
+ getTypePtr()->getBaseElementTypeUnsafe()->getAs<RecordType>())
+ if (RT->getDecl()->isNonTrivialToPrimitiveDefaultInitialize())
+ return PDIK_Struct;
+
+ switch (getQualifiers().getObjCLifetime()) {
+ case Qualifiers::OCL_Strong:
+ return PDIK_ARCStrong;
+ case Qualifiers::OCL_Weak:
+ return PDIK_ARCWeak;
+ default:
+ return PDIK_Trivial;
+ }
+}
+
+QualType::PrimitiveCopyKind QualType::isNonTrivialToPrimitiveCopy() const {
+ if (const auto *RT =
+ getTypePtr()->getBaseElementTypeUnsafe()->getAs<RecordType>())
+ if (RT->getDecl()->isNonTrivialToPrimitiveCopy())
+ return PCK_Struct;
+
+ Qualifiers Qs = getQualifiers();
+ switch (Qs.getObjCLifetime()) {
+ case Qualifiers::OCL_Strong:
+ return PCK_ARCStrong;
+ case Qualifiers::OCL_Weak:
+ return PCK_ARCWeak;
+ default:
+ return Qs.hasVolatile() ? PCK_VolatileTrivial : PCK_Trivial;
+ }
+}
+
+QualType::PrimitiveCopyKind
+QualType::isNonTrivialToPrimitiveDestructiveMove() const {
+ return isNonTrivialToPrimitiveCopy();
+}
+
bool Type::isLiteralType(const ASTContext &Ctx) const {
if (isDependentType())
return false;
@@ -2243,7 +2319,7 @@ bool Type::isLiteralType(const ASTContext &Ctx) const {
if (BaseTy->isReferenceType())
return true;
// -- a class type that has all of the following properties:
- if (const RecordType *RT = BaseTy->getAs<RecordType>()) {
+ if (const auto *RT = BaseTy->getAs<RecordType>()) {
// -- a trivial destructor,
// -- every constructor call and full-expression in the
// brace-or-equal-initializers for non-static data members (if any)
@@ -2254,15 +2330,14 @@ bool Type::isLiteralType(const ASTContext &Ctx) const {
// -- all non-static data members and base classes of literal types
//
// We resolve DR1361 by ignoring the second bullet.
- if (const CXXRecordDecl *ClassDecl =
- dyn_cast<CXXRecordDecl>(RT->getDecl()))
+ if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl()))
return ClassDecl->isLiteral();
return true;
}
// We treat _Atomic T as a literal type if T is a literal type.
- if (const AtomicType *AT = BaseTy->getAs<AtomicType>())
+ if (const auto *AT = BaseTy->getAs<AtomicType>())
return AT->getValueType()->isLiteralType(Ctx);
// If this type hasn't been deduced yet, then conservatively assume that
@@ -2291,9 +2366,8 @@ bool Type::isStandardLayoutType() const {
// As an extension, Clang treats vector types as Scalar types.
if (BaseTy->isScalarType() || BaseTy->isVectorType()) return true;
- if (const RecordType *RT = BaseTy->getAs<RecordType>()) {
- if (const CXXRecordDecl *ClassDecl =
- dyn_cast<CXXRecordDecl>(RT->getDecl()))
+ if (const auto *RT = BaseTy->getAs<RecordType>()) {
+ if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl()))
if (!ClassDecl->isStandardLayout())
return false;
@@ -2331,9 +2405,8 @@ bool QualType::isCXX11PODType(const ASTContext &Context) const {
// As an extension, Clang treats vector types as Scalar types.
if (BaseTy->isScalarType() || BaseTy->isVectorType()) return true;
- if (const RecordType *RT = BaseTy->getAs<RecordType>()) {
- if (const CXXRecordDecl *ClassDecl =
- dyn_cast<CXXRecordDecl>(RT->getDecl())) {
+ if (const auto *RT = BaseTy->getAs<RecordType>()) {
+ if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
// C++11 [class]p10:
// A POD struct is a non-union class that is both a trivial class [...]
if (!ClassDecl->isTrivial()) return false;
@@ -2361,8 +2434,8 @@ bool QualType::isCXX11PODType(const ASTContext &Context) const {
}
bool Type::isAlignValT() const {
- if (auto *ET = getAs<EnumType>()) {
- auto *II = ET->getDecl()->getIdentifier();
+ if (const auto *ET = getAs<EnumType>()) {
+ IdentifierInfo *II = ET->getDecl()->getIdentifier();
if (II && II->isStr("align_val_t") && ET->getDecl()->isInStdNamespace())
return true;
}
@@ -2370,8 +2443,8 @@ bool Type::isAlignValT() const {
}
bool Type::isStdByteType() const {
- if (auto *ET = getAs<EnumType>()) {
- auto *II = ET->getDecl()->getIdentifier();
+ if (const auto *ET = getAs<EnumType>()) {
+ IdentifierInfo *II = ET->getDecl()->getIdentifier();
if (II && II->isStr("byte") && ET->getDecl()->isInStdNamespace())
return true;
}
@@ -2379,7 +2452,7 @@ bool Type::isStdByteType() const {
}
bool Type::isPromotableIntegerType() const {
- if (const BuiltinType *BT = getAs<BuiltinType>())
+ if (const auto *BT = getAs<BuiltinType>())
switch (BT->getKind()) {
case BuiltinType::Bool:
case BuiltinType::Char_S:
@@ -2390,6 +2463,7 @@ bool Type::isPromotableIntegerType() const {
case BuiltinType::UShort:
case BuiltinType::WChar_S:
case BuiltinType::WChar_U:
+ case BuiltinType::Char8:
case BuiltinType::Char16:
case BuiltinType::Char32:
return true;
@@ -2399,7 +2473,7 @@ bool Type::isPromotableIntegerType() const {
// Enumerated types are promotable to their compatible integer types
// (C99 6.3.1.1) a.k.a. its underlying type (C++ [conv.prom]p2).
- if (const EnumType *ET = getAs<EnumType>()){
+ if (const auto *ET = getAs<EnumType>()){
if (this->isDependentType() || ET->getDecl()->getPromotionType().isNull()
|| ET->getDecl()->isScoped())
return false;
@@ -2506,7 +2580,7 @@ TypeWithKeyword::KeywordIsTagTypeKind(ElaboratedTypeKeyword Keyword) {
StringRef TypeWithKeyword::getKeywordName(ElaboratedTypeKeyword Keyword) {
switch (Keyword) {
- case ETK_None: return "";
+ case ETK_None: return {};
case ETK_Typename: return "typename";
case ETK_Class: return "class";
case ETK_Struct: return "struct";
@@ -2554,12 +2628,12 @@ DependentTemplateSpecializationType::Profile(llvm::FoldingSetNodeID &ID,
bool Type::isElaboratedTypeSpecifier() const {
ElaboratedTypeKeyword Keyword;
- if (const ElaboratedType *Elab = dyn_cast<ElaboratedType>(this))
+ if (const auto *Elab = dyn_cast<ElaboratedType>(this))
Keyword = Elab->getKeyword();
- else if (const DependentNameType *DepName = dyn_cast<DependentNameType>(this))
+ else if (const auto *DepName = dyn_cast<DependentNameType>(this))
Keyword = DepName->getKeyword();
- else if (const DependentTemplateSpecializationType *DepTST =
- dyn_cast<DependentTemplateSpecializationType>(this))
+ else if (const auto *DepTST =
+ dyn_cast<DependentTemplateSpecializationType>(this))
Keyword = DepTST->getKeyword();
else
return false;
@@ -2619,6 +2693,54 @@ StringRef BuiltinType::getName(const PrintingPolicy &Policy) const {
return "double";
case LongDouble:
return "long double";
+ case ShortAccum:
+ return "short _Accum";
+ case Accum:
+ return "_Accum";
+ case LongAccum:
+ return "long _Accum";
+ case UShortAccum:
+ return "unsigned short _Accum";
+ case UAccum:
+ return "unsigned _Accum";
+ case ULongAccum:
+ return "unsigned long _Accum";
+ case BuiltinType::ShortFract:
+ return "short _Fract";
+ case BuiltinType::Fract:
+ return "_Fract";
+ case BuiltinType::LongFract:
+ return "long _Fract";
+ case BuiltinType::UShortFract:
+ return "unsigned short _Fract";
+ case BuiltinType::UFract:
+ return "unsigned _Fract";
+ case BuiltinType::ULongFract:
+ return "unsigned long _Fract";
+ case BuiltinType::SatShortAccum:
+ return "_Sat short _Accum";
+ case BuiltinType::SatAccum:
+ return "_Sat _Accum";
+ case BuiltinType::SatLongAccum:
+ return "_Sat long _Accum";
+ case BuiltinType::SatUShortAccum:
+ return "_Sat unsigned short _Accum";
+ case BuiltinType::SatUAccum:
+ return "_Sat unsigned _Accum";
+ case BuiltinType::SatULongAccum:
+ return "_Sat unsigned long _Accum";
+ case BuiltinType::SatShortFract:
+ return "_Sat short _Fract";
+ case BuiltinType::SatFract:
+ return "_Sat _Fract";
+ case BuiltinType::SatLongFract:
+ return "_Sat long _Fract";
+ case BuiltinType::SatUShortFract:
+ return "_Sat unsigned short _Fract";
+ case BuiltinType::SatUFract:
+ return "_Sat unsigned _Fract";
+ case BuiltinType::SatULongFract:
+ return "_Sat unsigned long _Fract";
case Float16:
return "_Float16";
case Float128:
@@ -2626,6 +2748,8 @@ StringRef BuiltinType::getName(const PrintingPolicy &Policy) const {
case WChar_S:
case WChar_U:
return Policy.MSWChar ? "__wchar_t" : "wchar_t";
+ case Char8:
+ return "char8_t";
case Char16:
return "char16_t";
case Char32:
@@ -2674,7 +2798,7 @@ StringRef BuiltinType::getName(const PrintingPolicy &Policy) const {
}
QualType QualType::getNonLValueExprType(const ASTContext &Context) const {
- if (const ReferenceType *RefType = getTypePtr()->getAs<ReferenceType>())
+ if (const auto *RefType = getTypePtr()->getAs<ReferenceType>())
return RefType->getPointeeType();
// C++0x [basic.lval]:
@@ -2732,7 +2856,7 @@ FunctionProtoType::FunctionProtoType(QualType result, ArrayRef<QualType> params,
FunctionTypeBits.RefQualifier = epi.RefQualifier;
// Fill in the trailing argument array.
- QualType *argSlot = reinterpret_cast<QualType*>(this+1);
+ auto *argSlot = reinterpret_cast<QualType *>(this+1);
for (unsigned i = 0; i != NumParams; ++i) {
if (params[i]->isDependentType())
setDependent();
@@ -2761,24 +2885,25 @@ FunctionProtoType::FunctionProtoType(QualType result, ArrayRef<QualType> params,
exnSlot[I++] = ExceptionType;
}
- } else if (getExceptionSpecType() == EST_ComputedNoexcept) {
+ } else if (isComputedNoexcept(getExceptionSpecType())) {
+ assert(epi.ExceptionSpec.NoexceptExpr && "computed noexcept with no expr");
+ assert((getExceptionSpecType() == EST_DependentNoexcept) ==
+ epi.ExceptionSpec.NoexceptExpr->isValueDependent());
+
// Store the noexcept expression and context.
- Expr **noexSlot = reinterpret_cast<Expr **>(argSlot + NumParams);
+ auto **noexSlot = reinterpret_cast<Expr **>(argSlot + NumParams);
*noexSlot = epi.ExceptionSpec.NoexceptExpr;
- if (epi.ExceptionSpec.NoexceptExpr) {
- if (epi.ExceptionSpec.NoexceptExpr->isValueDependent() ||
- epi.ExceptionSpec.NoexceptExpr->isInstantiationDependent())
- setInstantiationDependent();
+ if (epi.ExceptionSpec.NoexceptExpr->isValueDependent() ||
+ epi.ExceptionSpec.NoexceptExpr->isInstantiationDependent())
+ setInstantiationDependent();
- if (epi.ExceptionSpec.NoexceptExpr->containsUnexpandedParameterPack())
- setContainsUnexpandedParameterPack();
- }
+ if (epi.ExceptionSpec.NoexceptExpr->containsUnexpandedParameterPack())
+ setContainsUnexpandedParameterPack();
} else if (getExceptionSpecType() == EST_Uninstantiated) {
// Store the function decl from which we will resolve our
// exception specification.
- FunctionDecl **slot =
- reinterpret_cast<FunctionDecl **>(argSlot + NumParams);
+ auto **slot = reinterpret_cast<FunctionDecl **>(argSlot + NumParams);
slot[0] = epi.ExceptionSpec.SourceDecl;
slot[1] = epi.ExceptionSpec.SourceTemplate;
// This exception specification doesn't make the type dependent, because
@@ -2786,8 +2911,7 @@ FunctionProtoType::FunctionProtoType(QualType result, ArrayRef<QualType> params,
} else if (getExceptionSpecType() == EST_Unevaluated) {
// Store the function decl from which we will resolve our
// exception specification.
- FunctionDecl **slot =
- reinterpret_cast<FunctionDecl **>(argSlot + NumParams);
+ auto **slot = reinterpret_cast<FunctionDecl **>(argSlot + NumParams);
slot[0] = epi.ExceptionSpec.SourceDecl;
}
@@ -2795,7 +2919,7 @@ FunctionProtoType::FunctionProtoType(QualType result, ArrayRef<QualType> params,
// then it's a dependent type. This only happens in C++17 onwards.
if (isCanonicalUnqualified()) {
if (getExceptionSpecType() == EST_Dynamic ||
- getExceptionSpecType() == EST_ComputedNoexcept) {
+ getExceptionSpecType() == EST_DependentNoexcept) {
assert(hasDependentExceptionSpec() && "type should not be canonical");
setDependent();
}
@@ -2805,7 +2929,7 @@ FunctionProtoType::FunctionProtoType(QualType result, ArrayRef<QualType> params,
}
if (epi.ExtParameterInfos) {
- ExtParameterInfo *extParamInfos =
+ auto *extParamInfos =
const_cast<ExtParameterInfo *>(getExtParameterInfosBuffer());
for (unsigned i = 0; i != NumParams; ++i)
extParamInfos[i] = epi.ExtParameterInfos[i];
@@ -2833,52 +2957,36 @@ bool FunctionProtoType::hasInstantiationDependentExceptionSpec() const {
return false;
}
-FunctionProtoType::NoexceptResult
-FunctionProtoType::getNoexceptSpec(const ASTContext &ctx) const {
- ExceptionSpecificationType est = getExceptionSpecType();
- if (est == EST_BasicNoexcept)
- return NR_Nothrow;
-
- if (est != EST_ComputedNoexcept)
- return NR_NoNoexcept;
-
- Expr *noexceptExpr = getNoexceptExpr();
- if (!noexceptExpr)
- return NR_BadNoexcept;
- if (noexceptExpr->isValueDependent())
- return NR_Dependent;
-
- llvm::APSInt value;
- bool isICE = noexceptExpr->isIntegerConstantExpr(value, ctx, nullptr,
- /*evaluated*/false);
- (void)isICE;
- assert(isICE && "AST should not contain bad noexcept expressions.");
-
- return value.getBoolValue() ? NR_Nothrow : NR_Throw;
-}
+CanThrowResult FunctionProtoType::canThrow() const {
+ switch (getExceptionSpecType()) {
+ case EST_Unparsed:
+ case EST_Unevaluated:
+ case EST_Uninstantiated:
+ llvm_unreachable("should not call this with unresolved exception specs");
-CanThrowResult FunctionProtoType::canThrow(const ASTContext &Ctx) const {
- ExceptionSpecificationType EST = getExceptionSpecType();
- assert(EST != EST_Unevaluated && EST != EST_Uninstantiated);
- if (EST == EST_DynamicNone || EST == EST_BasicNoexcept)
+ case EST_DynamicNone:
+ case EST_BasicNoexcept:
+ case EST_NoexceptTrue:
return CT_Cannot;
- if (EST == EST_Dynamic) {
+ case EST_None:
+ case EST_MSAny:
+ case EST_NoexceptFalse:
+ return CT_Can;
+
+ case EST_Dynamic:
// A dynamic exception specification is throwing unless every exception
// type is an (unexpanded) pack expansion type.
for (unsigned I = 0, N = NumExceptions; I != N; ++I)
if (!getExceptionType(I)->getAs<PackExpansionType>())
return CT_Can;
return CT_Dependent;
- }
- if (EST != EST_ComputedNoexcept)
- return CT_Can;
-
- NoexceptResult NR = getNoexceptSpec(Ctx);
- if (NR == NR_Dependent)
+ case EST_DependentNoexcept:
return CT_Dependent;
- return NR == NR_Nothrow ? CT_Cannot : CT_Can;
+ }
+
+ llvm_unreachable("unexpected exception specification kind");
}
bool FunctionProtoType::isTemplateVariadic() const {
@@ -2928,8 +3036,7 @@ void FunctionProtoType::Profile(llvm::FoldingSetNodeID &ID, QualType Result,
if (epi.ExceptionSpec.Type == EST_Dynamic) {
for (QualType Ex : epi.ExceptionSpec.Exceptions)
ID.AddPointer(Ex.getAsOpaquePtr());
- } else if (epi.ExceptionSpec.Type == EST_ComputedNoexcept &&
- epi.ExceptionSpec.NoexceptExpr) {
+ } else if (isComputedNoexcept(epi.ExceptionSpec.Type)) {
epi.ExceptionSpec.NoexceptExpr->Profile(ID, Context, Canonical);
} else if (epi.ExceptionSpec.Type == EST_Uninstantiated ||
epi.ExceptionSpec.Type == EST_Unevaluated) {
@@ -3048,7 +3155,7 @@ bool RecordType::hasConstFields() const {
if (FieldTy.isConstQualified())
return true;
FieldTy = FieldTy.getCanonicalType();
- if (const RecordType *FieldRecTy = FieldTy->getAs<RecordType>())
+ if (const auto *FieldRecTy = FieldTy->getAs<RecordType>())
if (FieldRecTy->hasConstFields())
return true;
}
@@ -3097,6 +3204,7 @@ bool AttributedType::isQualifier() const {
case AttributedType::attr_uptr:
case AttributedType::attr_objc_kindof:
case AttributedType::attr_ns_returns_retained:
+ case AttributedType::attr_nocf_check:
return false;
}
llvm_unreachable("bad attributed type kind");
@@ -3134,6 +3242,7 @@ bool AttributedType::isCallingConv() const {
case attr_nullable:
case attr_null_unspecified:
case attr_objc_kindof:
+ case attr_nocf_check:
return false;
case attr_pcs:
@@ -3229,8 +3338,7 @@ TemplateSpecializationType(TemplateName T,
T.getKind() == TemplateName::SubstTemplateTemplateParmPack) &&
"Unexpected template name for TemplateSpecializationType");
- TemplateArgument *TemplateArgs
- = reinterpret_cast<TemplateArgument *>(this + 1);
+ auto *TemplateArgs = reinterpret_cast<TemplateArgument *>(this + 1);
for (const TemplateArgument &Arg : Args) {
// Update instantiation-dependent and variably-modified bits.
// If the canonical type exists and is non-dependent, the template
@@ -3252,7 +3360,7 @@ TemplateSpecializationType(TemplateName T,
// Store the aliased type if this is a type alias template specialization.
if (TypeAlias) {
- TemplateArgument *Begin = reinterpret_cast<TemplateArgument *>(this + 1);
+ auto *Begin = reinterpret_cast<TemplateArgument *>(this + 1);
*reinterpret_cast<QualType*>(Begin + getNumArgs()) = AliasedType;
}
}
@@ -3320,7 +3428,7 @@ void ObjCTypeParamType::Profile(llvm::FoldingSetNodeID &ID) {
namespace {
-/// \brief The cached properties of a type.
+/// The cached properties of a type.
class CachedProperties {
Linkage L;
bool local;
@@ -3450,7 +3558,7 @@ static CachedProperties computeCachedProperties(const Type *T) {
case Type::RValueReference:
return Cache::get(cast<ReferenceType>(T)->getPointeeType());
case Type::MemberPointer: {
- const MemberPointerType *MPT = cast<MemberPointerType>(T);
+ const auto *MPT = cast<MemberPointerType>(T);
return merge(Cache::get(MPT->getClass()),
Cache::get(MPT->getPointeeType()));
}
@@ -3464,7 +3572,7 @@ static CachedProperties computeCachedProperties(const Type *T) {
case Type::FunctionNoProto:
return Cache::get(cast<FunctionType>(T)->getReturnType());
case Type::FunctionProto: {
- const FunctionProtoType *FPT = cast<FunctionProtoType>(T);
+ const auto *FPT = cast<FunctionProtoType>(T);
CachedProperties result = Cache::get(FPT->getReturnType());
for (const auto &ai : FPT->param_types())
result = merge(result, Cache::get(ai));
@@ -3487,7 +3595,7 @@ static CachedProperties computeCachedProperties(const Type *T) {
llvm_unreachable("unhandled type class");
}
-/// \brief Determine the linkage of this type.
+/// Determine the linkage of this type.
Linkage Type::getLinkage() const {
Cache::ensure(this);
return TypeBits.getLinkage();
@@ -3534,7 +3642,7 @@ LinkageInfo LinkageComputer::computeTypeLinkageInfo(const Type *T) {
case Type::RValueReference:
return computeTypeLinkageInfo(cast<ReferenceType>(T)->getPointeeType());
case Type::MemberPointer: {
- const MemberPointerType *MPT = cast<MemberPointerType>(T);
+ const auto *MPT = cast<MemberPointerType>(T);
LinkageInfo LV = computeTypeLinkageInfo(MPT->getClass());
LV.merge(computeTypeLinkageInfo(MPT->getPointeeType()));
return LV;
@@ -3549,7 +3657,7 @@ LinkageInfo LinkageComputer::computeTypeLinkageInfo(const Type *T) {
case Type::FunctionNoProto:
return computeTypeLinkageInfo(cast<FunctionType>(T)->getReturnType());
case Type::FunctionProto: {
- const FunctionProtoType *FPT = cast<FunctionProtoType>(T);
+ const auto *FPT = cast<FunctionProtoType>(T);
LinkageInfo LV = computeTypeLinkageInfo(FPT->getReturnType());
for (const auto &ai : FPT->param_types())
LV.merge(computeTypeLinkageInfo(ai));
@@ -3702,6 +3810,7 @@ bool Type::canHaveNullability(bool ResultIfUnknown) const {
case Type::IncompleteArray:
case Type::VariableArray:
case Type::DependentSizedArray:
+ case Type::DependentVector:
case Type::DependentSizedExtVector:
case Type::Vector:
case Type::ExtVector:
@@ -3744,7 +3853,7 @@ Optional<NullabilityKind> AttributedType::stripOuterNullability(QualType &T) {
}
bool Type::isBlockCompatibleObjCPointerType(ASTContext &ctx) const {
- const ObjCObjectPointerType *objcPtr = getAs<ObjCObjectPointerType>();
+ const auto *objcPtr = getAs<ObjCObjectPointerType>();
if (!objcPtr)
return false;
@@ -3789,11 +3898,10 @@ bool Type::isObjCARCImplicitlyUnretainedType() const {
const Type *canon = getCanonicalTypeInternal().getTypePtr();
// Walk down to the base type. We don't care about qualifiers for this.
- while (const ArrayType *array = dyn_cast<ArrayType>(canon))
+ while (const auto *array = dyn_cast<ArrayType>(canon))
canon = array->getElementType().getTypePtr();
- if (const ObjCObjectPointerType *opt
- = dyn_cast<ObjCObjectPointerType>(canon)) {
+ if (const auto *opt = dyn_cast<ObjCObjectPointerType>(canon)) {
// Class and Class<Protocol> don't require retention.
if (opt->getObjectType()->isObjCClass())
return true;
@@ -3805,7 +3913,7 @@ bool Type::isObjCARCImplicitlyUnretainedType() const {
bool Type::isObjCNSObjectType() const {
const Type *cur = this;
while (true) {
- if (const TypedefType *typedefType = dyn_cast<TypedefType>(cur))
+ if (const auto *typedefType = dyn_cast<TypedefType>(cur))
return typedefType->getDecl()->hasAttr<ObjCNSObjectAttr>();
// Single-step desugar until we run out of sugar.
@@ -3816,7 +3924,7 @@ bool Type::isObjCNSObjectType() const {
}
bool Type::isObjCIndependentClassType() const {
- if (const TypedefType *typedefType = dyn_cast<TypedefType>(this))
+ if (const auto *typedefType = dyn_cast<TypedefType>(this))
return typedefType->getDecl()->hasAttr<ObjCIndependentClassAttr>();
return false;
}
@@ -3830,11 +3938,11 @@ bool Type::isObjCRetainableType() const {
bool Type::isObjCIndirectLifetimeType() const {
if (isObjCLifetimeType())
return true;
- if (const PointerType *OPT = getAs<PointerType>())
+ if (const auto *OPT = getAs<PointerType>())
return OPT->getPointeeType()->isObjCIndirectLifetimeType();
- if (const ReferenceType *Ref = getAs<ReferenceType>())
+ if (const auto *Ref = getAs<ReferenceType>())
return Ref->getPointeeType()->isObjCIndirectLifetimeType();
- if (const MemberPointerType *MemPtr = getAs<MemberPointerType>())
+ if (const auto *MemPtr = getAs<MemberPointerType>())
return MemPtr->getPointeeType()->isObjCIndirectLifetimeType();
return false;
}
@@ -3848,15 +3956,15 @@ bool Type::isObjCLifetimeType() const {
return type->isObjCRetainableType();
}
-/// \brief Determine whether the given type T is a "bridgable" Objective-C type,
+/// Determine whether the given type T is a "bridgable" Objective-C type,
/// which is either an Objective-C object pointer type or an
bool Type::isObjCARCBridgableType() const {
return isObjCObjectPointerType() || isBlockPointerType();
}
-/// \brief Determine whether the given type T is a "bridgeable" C type.
+/// Determine whether the given type T is a "bridgeable" C type.
bool Type::isCARCBridgableType() const {
- const PointerType *Pointer = getAs<PointerType>();
+ const auto *Pointer = getAs<PointerType>();
if (!Pointer)
return false;
@@ -3867,9 +3975,9 @@ bool Type::isCARCBridgableType() const {
bool Type::hasSizedVLAType() const {
if (!isVariablyModifiedType()) return false;
- if (const PointerType *ptr = getAs<PointerType>())
+ if (const auto *ptr = getAs<PointerType>())
return ptr->getPointeeType()->hasSizedVLAType();
- if (const ReferenceType *ref = getAs<ReferenceType>())
+ if (const auto *ref = getAs<ReferenceType>())
return ref->getPointeeType()->hasSizedVLAType();
if (const ArrayType *arr = getAsArrayTypeUnsafe()) {
if (isa<VariableArrayType>(arr) &&
@@ -3895,16 +4003,40 @@ QualType::DestructionKind QualType::isDestructedTypeImpl(QualType type) {
return DK_objc_weak_lifetime;
}
- /// Currently, the only destruction kind we recognize is C++ objects
- /// with non-trivial destructors.
- const CXXRecordDecl *record =
- type->getBaseElementTypeUnsafe()->getAsCXXRecordDecl();
- if (record && record->hasDefinition() && !record->hasTrivialDestructor())
- return DK_cxx_destructor;
+ if (const auto *RT =
+ type->getBaseElementTypeUnsafe()->getAs<RecordType>()) {
+ const RecordDecl *RD = RT->getDecl();
+ if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
+ /// Check if this is a C++ object with a non-trivial destructor.
+ if (CXXRD->hasDefinition() && !CXXRD->hasTrivialDestructor())
+ return DK_cxx_destructor;
+ } else {
+ /// Check if this is a C struct that is non-trivial to destroy or an array
+ /// that contains such a struct.
+ if (RD->isNonTrivialToPrimitiveDestroy())
+ return DK_nontrivial_c_struct;
+ }
+ }
return DK_none;
}
CXXRecordDecl *MemberPointerType::getMostRecentCXXRecordDecl() const {
- return getClass()->getAsCXXRecordDecl()->getMostRecentDecl();
+ return getClass()->getAsCXXRecordDecl()->getMostRecentNonInjectedDecl();
+}
+
+void clang::FixedPointValueToString(SmallVectorImpl<char> &Str,
+ const llvm::APSInt &Val, unsigned Scale,
+ unsigned Radix) {
+ llvm::APSInt ScaleVal = llvm::APSInt::getUnsigned(1ULL << Scale);
+ llvm::APSInt IntPart = Val / ScaleVal;
+ llvm::APSInt FractPart = Val % ScaleVal;
+ llvm::APSInt RadixInt = llvm::APSInt::getUnsigned(Radix);
+
+ IntPart.toString(Str, Radix);
+ Str.push_back('.');
+ do {
+ (FractPart * RadixInt / ScaleVal).toString(Str, Radix);
+ FractPart = (FractPart * RadixInt) % ScaleVal;
+ } while (FractPart.getExtValue());
}
diff --git a/lib/AST/TypeLoc.cpp b/lib/AST/TypeLoc.cpp
index 0ac50b31acec..6fa76e14a590 100644
--- a/lib/AST/TypeLoc.cpp
+++ b/lib/AST/TypeLoc.cpp
@@ -68,7 +68,7 @@ public:
} // namespace
-/// \brief Returns the alignment of the type source info data block.
+/// Returns the alignment of the type source info data block.
unsigned TypeLoc::getLocalAlignmentForType(QualType Ty) {
if (Ty.isNull()) return 1;
return TypeAligner().Visit(TypeLoc(Ty, nullptr));
@@ -88,7 +88,7 @@ public:
} // namespace
-/// \brief Returns the size of the type source info data block.
+/// Returns the size of the type source info data block.
unsigned TypeLoc::getFullDataSizeForType(QualType Ty) {
unsigned Total = 0;
TypeLoc TyLoc(Ty, nullptr);
@@ -118,13 +118,13 @@ public:
} // namespace
-/// \brief Get the next TypeLoc pointed by this TypeLoc, e.g for "int*" the
+/// Get the next TypeLoc pointed by this TypeLoc, e.g for "int*" the
/// TypeLoc is a PointerLoc and next TypeLoc is for "int".
TypeLoc TypeLoc::getNextTypeLocImpl(TypeLoc TL) {
return NextLoc().Visit(TL);
}
-/// \brief Initializes a type location, and all of its children
+/// Initializes a type location, and all of its children
/// recursively, as if the entire tree had been written in the
/// given location.
void TypeLoc::initializeImpl(ASTContext &Context, TypeLoc TL,
@@ -254,7 +254,7 @@ SourceLocation TypeLoc::getEndLoc() const {
case RValueReference:
case PackExpansion:
if (!Last)
- Last = Cur;
+ Last = Cur;
break;
case Qualified:
case Elaborated:
@@ -281,7 +281,7 @@ struct TSTChecker : public TypeLocVisitor<TSTChecker, bool> {
} // namespace
-/// \brief Determines if the given type loc corresponds to a
+/// Determines if the given type loc corresponds to a
/// TypeSpecTypeLoc. Since there is not actually a TypeSpecType in
/// the type hierarchy, this is made somewhat complicated.
///
@@ -317,6 +317,8 @@ TypeSpecifierType BuiltinTypeLoc::getWrittenTypeSpec() const {
case BuiltinType::Char_U:
case BuiltinType::Char_S:
return TST_char;
+ case BuiltinType::Char8:
+ return TST_char8;
case BuiltinType::Char16:
return TST_char16;
case BuiltinType::Char32:
@@ -342,6 +344,30 @@ TypeSpecifierType BuiltinTypeLoc::getWrittenTypeSpec() const {
case BuiltinType::LongDouble:
case BuiltinType::Float16:
case BuiltinType::Float128:
+ case BuiltinType::ShortAccum:
+ case BuiltinType::Accum:
+ case BuiltinType::LongAccum:
+ case BuiltinType::UShortAccum:
+ case BuiltinType::UAccum:
+ case BuiltinType::ULongAccum:
+ case BuiltinType::ShortFract:
+ case BuiltinType::Fract:
+ case BuiltinType::LongFract:
+ case BuiltinType::UShortFract:
+ case BuiltinType::UFract:
+ case BuiltinType::ULongFract:
+ case BuiltinType::SatShortAccum:
+ case BuiltinType::SatAccum:
+ case BuiltinType::SatLongAccum:
+ case BuiltinType::SatUShortAccum:
+ case BuiltinType::SatUAccum:
+ case BuiltinType::SatULongAccum:
+ case BuiltinType::SatShortFract:
+ case BuiltinType::SatFract:
+ case BuiltinType::SatLongFract:
+ case BuiltinType::SatUShortFract:
+ case BuiltinType::SatUFract:
+ case BuiltinType::SatULongFract:
llvm_unreachable("Builtin type needs extra local data!");
// Fall through, if the impossible happens.
diff --git a/lib/AST/TypePrinter.cpp b/lib/AST/TypePrinter.cpp
index c28ada7dcb8b..c5e2244e26c5 100644
--- a/lib/AST/TypePrinter.cpp
+++ b/lib/AST/TypePrinter.cpp
@@ -1,4 +1,4 @@
-//===--- TypePrinter.cpp - Pretty-Print Clang Types -----------------------===//
+//===- TypePrinter.cpp - Pretty-Print Clang Types -------------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -14,20 +14,40 @@
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
+#include "clang/AST/DeclBase.h"
+#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
+#include "clang/AST/NestedNameSpecifier.h"
+#include "clang/AST/TemplateBase.h"
+#include "clang/AST/TemplateName.h"
#include "clang/AST/Type.h"
+#include "clang/Basic/AddressSpaces.h"
+#include "clang/Basic/ExceptionSpecificationType.h"
+#include "clang/Basic/IdentifierTable.h"
+#include "clang/Basic/LLVM.h"
#include "clang/Basic/LangOptions.h"
+#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/Specifiers.h"
+#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallString.h"
-#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/SaveAndRestore.h"
#include "llvm/Support/raw_ostream.h"
+#include <cassert>
+#include <string>
+
using namespace clang;
namespace {
- /// \brief RAII object that enables printing of the ARC __strong lifetime
+
+ /// RAII object that enables printing of the ARC __strong lifetime
/// qualifier.
class IncludeStrongLifetimeRAII {
PrintingPolicy &Policy;
@@ -35,7 +55,7 @@ namespace {
public:
explicit IncludeStrongLifetimeRAII(PrintingPolicy &Policy)
- : Policy(Policy), Old(Policy.SuppressStrongLifetime) {
+ : Policy(Policy), Old(Policy.SuppressStrongLifetime) {
if (!Policy.SuppressLifetimeQualifiers)
Policy.SuppressStrongLifetime = false;
}
@@ -51,7 +71,7 @@ namespace {
public:
explicit ParamPolicyRAII(PrintingPolicy &Policy)
- : Policy(Policy), Old(Policy.SuppressSpecifiers) {
+ : Policy(Policy), Old(Policy.SuppressSpecifiers) {
Policy.SuppressSpecifiers = false;
}
@@ -82,13 +102,12 @@ namespace {
class TypePrinter {
PrintingPolicy Policy;
unsigned Indentation;
- bool HasEmptyPlaceHolder;
- bool InsideCCAttribute;
+ bool HasEmptyPlaceHolder = false;
+ bool InsideCCAttribute = false;
public:
explicit TypePrinter(const PrintingPolicy &Policy, unsigned Indentation = 0)
- : Policy(Policy), Indentation(Indentation),
- HasEmptyPlaceHolder(false), InsideCCAttribute(false) { }
+ : Policy(Policy), Indentation(Indentation) {}
void print(const Type *ty, Qualifiers qs, raw_ostream &OS,
StringRef PlaceHolder);
@@ -111,7 +130,8 @@ namespace {
void print##CLASS##After(const CLASS##Type *T, raw_ostream &OS);
#include "clang/AST/TypeNodes.def"
};
-}
+
+} // namespace
static void AppendTypeQualList(raw_ostream &OS, unsigned TypeQuals,
bool HasRestrictKeyword) {
@@ -169,10 +189,9 @@ bool TypePrinter::canPrefixQualifiers(const Type *T,
bool CanPrefixQualifiers = false;
NeedARCStrongQualifier = false;
Type::TypeClass TC = T->getTypeClass();
- if (const AutoType *AT = dyn_cast<AutoType>(T))
+ if (const auto *AT = dyn_cast<AutoType>(T))
TC = AT->desugar()->getTypeClass();
- if (const SubstTemplateTypeParmType *Subst
- = dyn_cast<SubstTemplateTypeParmType>(T))
+ if (const auto *Subst = dyn_cast<SubstTemplateTypeParmType>(T))
TC = Subst->getReplacementType()->getTypeClass();
switch (TC) {
@@ -223,6 +242,7 @@ bool TypePrinter::canPrefixQualifiers(const Type *T,
case Type::RValueReference:
case Type::MemberPointer:
case Type::DependentAddressSpace:
+ case Type::DependentVector:
case Type::DependentSizedExtVector:
case Type::Vector:
case Type::ExtVector:
@@ -245,14 +265,13 @@ void TypePrinter::printBefore(QualType T, raw_ostream &OS) {
// If we have cv1 T, where T is substituted for cv2 U, only print cv1 - cv2
// at this level.
Qualifiers Quals = Split.Quals;
- if (const SubstTemplateTypeParmType *Subst =
- dyn_cast<SubstTemplateTypeParmType>(Split.Ty))
+ if (const auto *Subst = dyn_cast<SubstTemplateTypeParmType>(Split.Ty))
Quals -= QualType(Subst, 0).getQualifiers();
printBefore(Split.Ty, Quals, OS);
}
-/// \brief Prints the part of the type string before an identifier, e.g. for
+/// Prints the part of the type string before an identifier, e.g. for
/// "int foo[10]" it prints "int ".
void TypePrinter::printBefore(const Type *T,Qualifiers Quals, raw_ostream &OS) {
if (Policy.SuppressSpecifiers && T->isSpecifierType())
@@ -305,7 +324,7 @@ void TypePrinter::printAfter(QualType t, raw_ostream &OS) {
printAfter(split.Ty, split.Quals, OS);
}
-/// \brief Prints the part of the type string after an identifier, e.g. for
+/// Prints the part of the type string after an identifier, e.g. for
/// "int foo[10]" it prints "[10]".
void TypePrinter::printAfter(const Type *T, Qualifiers Quals, raw_ostream &OS) {
switch (T->getTypeClass()) {
@@ -321,12 +340,14 @@ void TypePrinter::printBuiltinBefore(const BuiltinType *T, raw_ostream &OS) {
OS << T->getName(Policy);
spaceBeforePlaceHolder(OS);
}
-void TypePrinter::printBuiltinAfter(const BuiltinType *T, raw_ostream &OS) { }
+
+void TypePrinter::printBuiltinAfter(const BuiltinType *T, raw_ostream &OS) {}
void TypePrinter::printComplexBefore(const ComplexType *T, raw_ostream &OS) {
OS << "_Complex ";
printBefore(T->getElementType(), OS);
}
+
void TypePrinter::printComplexAfter(const ComplexType *T, raw_ostream &OS) {
printAfter(T->getElementType(), OS);
}
@@ -341,6 +362,7 @@ void TypePrinter::printPointerBefore(const PointerType *T, raw_ostream &OS) {
OS << '(';
OS << '*';
}
+
void TypePrinter::printPointerAfter(const PointerType *T, raw_ostream &OS) {
IncludeStrongLifetimeRAII Strong(Policy);
SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false);
@@ -357,54 +379,69 @@ void TypePrinter::printBlockPointerBefore(const BlockPointerType *T,
printBefore(T->getPointeeType(), OS);
OS << '^';
}
+
void TypePrinter::printBlockPointerAfter(const BlockPointerType *T,
raw_ostream &OS) {
SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false);
printAfter(T->getPointeeType(), OS);
}
+// When printing a reference, the referenced type might also be a reference.
+// If so, we want to skip that before printing the inner type.
+static QualType skipTopLevelReferences(QualType T) {
+ if (auto *Ref = T->getAs<ReferenceType>())
+ return skipTopLevelReferences(Ref->getPointeeTypeAsWritten());
+ return T;
+}
+
void TypePrinter::printLValueReferenceBefore(const LValueReferenceType *T,
raw_ostream &OS) {
IncludeStrongLifetimeRAII Strong(Policy);
SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false);
- printBefore(T->getPointeeTypeAsWritten(), OS);
+ QualType Inner = skipTopLevelReferences(T->getPointeeTypeAsWritten());
+ printBefore(Inner, OS);
// Handle things like 'int (&A)[4];' correctly.
// FIXME: this should include vectors, but vectors use attributes I guess.
- if (isa<ArrayType>(T->getPointeeTypeAsWritten()))
+ if (isa<ArrayType>(Inner))
OS << '(';
OS << '&';
}
+
void TypePrinter::printLValueReferenceAfter(const LValueReferenceType *T,
raw_ostream &OS) {
IncludeStrongLifetimeRAII Strong(Policy);
SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false);
+ QualType Inner = skipTopLevelReferences(T->getPointeeTypeAsWritten());
// Handle things like 'int (&A)[4];' correctly.
// FIXME: this should include vectors, but vectors use attributes I guess.
- if (isa<ArrayType>(T->getPointeeTypeAsWritten()))
+ if (isa<ArrayType>(Inner))
OS << ')';
- printAfter(T->getPointeeTypeAsWritten(), OS);
+ printAfter(Inner, OS);
}
void TypePrinter::printRValueReferenceBefore(const RValueReferenceType *T,
raw_ostream &OS) {
IncludeStrongLifetimeRAII Strong(Policy);
SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false);
- printBefore(T->getPointeeTypeAsWritten(), OS);
+ QualType Inner = skipTopLevelReferences(T->getPointeeTypeAsWritten());
+ printBefore(Inner, OS);
// Handle things like 'int (&&A)[4];' correctly.
// FIXME: this should include vectors, but vectors use attributes I guess.
- if (isa<ArrayType>(T->getPointeeTypeAsWritten()))
+ if (isa<ArrayType>(Inner))
OS << '(';
OS << "&&";
}
+
void TypePrinter::printRValueReferenceAfter(const RValueReferenceType *T,
raw_ostream &OS) {
IncludeStrongLifetimeRAII Strong(Policy);
SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false);
+ QualType Inner = skipTopLevelReferences(T->getPointeeTypeAsWritten());
// Handle things like 'int (&&A)[4];' correctly.
// FIXME: this should include vectors, but vectors use attributes I guess.
- if (isa<ArrayType>(T->getPointeeTypeAsWritten()))
+ if (isa<ArrayType>(Inner))
OS << ')';
- printAfter(T->getPointeeTypeAsWritten(), OS);
+ printAfter(Inner, OS);
}
void TypePrinter::printMemberPointerBefore(const MemberPointerType *T,
@@ -423,6 +460,7 @@ void TypePrinter::printMemberPointerBefore(const MemberPointerType *T,
OS << "::*";
}
+
void TypePrinter::printMemberPointerAfter(const MemberPointerType *T,
raw_ostream &OS) {
IncludeStrongLifetimeRAII Strong(Policy);
@@ -440,6 +478,7 @@ void TypePrinter::printConstantArrayBefore(const ConstantArrayType *T,
SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false);
printBefore(T->getElementType(), OS);
}
+
void TypePrinter::printConstantArrayAfter(const ConstantArrayType *T,
raw_ostream &OS) {
OS << '[';
@@ -462,6 +501,7 @@ void TypePrinter::printIncompleteArrayBefore(const IncompleteArrayType *T,
SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false);
printBefore(T->getElementType(), OS);
}
+
void TypePrinter::printIncompleteArrayAfter(const IncompleteArrayType *T,
raw_ostream &OS) {
OS << "[]";
@@ -474,6 +514,7 @@ void TypePrinter::printVariableArrayBefore(const VariableArrayType *T,
SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false);
printBefore(T->getElementType(), OS);
}
+
void TypePrinter::printVariableArrayAfter(const VariableArrayType *T,
raw_ostream &OS) {
OS << '[';
@@ -499,6 +540,7 @@ void TypePrinter::printAdjustedBefore(const AdjustedType *T, raw_ostream &OS) {
// invisible.
printBefore(T->getAdjustedType(), OS);
}
+
void TypePrinter::printAdjustedAfter(const AdjustedType *T, raw_ostream &OS) {
printAfter(T->getAdjustedType(), OS);
}
@@ -507,6 +549,7 @@ void TypePrinter::printDecayedBefore(const DecayedType *T, raw_ostream &OS) {
// Print as though it's a pointer.
printAdjustedBefore(T, OS);
}
+
void TypePrinter::printDecayedAfter(const DecayedType *T, raw_ostream &OS) {
printAdjustedAfter(T, OS);
}
@@ -518,6 +561,7 @@ void TypePrinter::printDependentSizedArrayBefore(
SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false);
printBefore(T->getElementType(), OS);
}
+
void TypePrinter::printDependentSizedArrayAfter(
const DependentSizedArrayType *T,
raw_ostream &OS) {
@@ -532,6 +576,7 @@ void TypePrinter::printDependentAddressSpaceBefore(
const DependentAddressSpaceType *T, raw_ostream &OS) {
printBefore(T->getPointeeType(), OS);
}
+
void TypePrinter::printDependentAddressSpaceAfter(
const DependentAddressSpaceType *T, raw_ostream &OS) {
OS << " __attribute__((address_space(";
@@ -546,6 +591,7 @@ void TypePrinter::printDependentSizedExtVectorBefore(
raw_ostream &OS) {
printBefore(T->getElementType(), OS);
}
+
void TypePrinter::printDependentSizedExtVectorAfter(
const DependentSizedExtVectorType *T,
raw_ostream &OS) {
@@ -592,14 +638,64 @@ void TypePrinter::printVectorBefore(const VectorType *T, raw_ostream &OS) {
}
}
}
+
void TypePrinter::printVectorAfter(const VectorType *T, raw_ostream &OS) {
printAfter(T->getElementType(), OS);
-}
+}
+
+void TypePrinter::printDependentVectorBefore(
+ const DependentVectorType *T, raw_ostream &OS) {
+ switch (T->getVectorKind()) {
+ case VectorType::AltiVecPixel:
+ OS << "__vector __pixel ";
+ break;
+ case VectorType::AltiVecBool:
+ OS << "__vector __bool ";
+ printBefore(T->getElementType(), OS);
+ break;
+ case VectorType::AltiVecVector:
+ OS << "__vector ";
+ printBefore(T->getElementType(), OS);
+ break;
+ case VectorType::NeonVector:
+ OS << "__attribute__((neon_vector_type(";
+ if (T->getSizeExpr())
+ T->getSizeExpr()->printPretty(OS, nullptr, Policy);
+ OS << "))) ";
+ printBefore(T->getElementType(), OS);
+ break;
+ case VectorType::NeonPolyVector:
+ OS << "__attribute__((neon_polyvector_type(";
+ if (T->getSizeExpr())
+ T->getSizeExpr()->printPretty(OS, nullptr, Policy);
+ OS << "))) ";
+ printBefore(T->getElementType(), OS);
+ break;
+ case VectorType::GenericVector: {
+ // FIXME: We prefer to print the size directly here, but have no way
+ // to get the size of the type.
+ OS << "__attribute__((__vector_size__(";
+ if (T->getSizeExpr())
+ T->getSizeExpr()->printPretty(OS, nullptr, Policy);
+ OS << " * sizeof(";
+ print(T->getElementType(), OS, StringRef());
+ OS << ")))) ";
+ printBefore(T->getElementType(), OS);
+ break;
+ }
+ }
+}
+
+void TypePrinter::printDependentVectorAfter(
+ const DependentVectorType *T, raw_ostream &OS) {
+ printAfter(T->getElementType(), OS);
+}
void TypePrinter::printExtVectorBefore(const ExtVectorType *T,
raw_ostream &OS) {
printBefore(T->getElementType(), OS);
}
+
void TypePrinter::printExtVectorAfter(const ExtVectorType *T, raw_ostream &OS) {
printAfter(T->getElementType(), OS);
OS << " __attribute__((ext_vector_type(";
@@ -611,7 +707,6 @@ void
FunctionProtoType::printExceptionSpecification(raw_ostream &OS,
const PrintingPolicy &Policy)
const {
-
if (hasDynamicExceptionSpec()) {
OS << " throw(";
if (getExceptionSpecType() == EST_MSAny)
@@ -626,7 +721,9 @@ FunctionProtoType::printExceptionSpecification(raw_ostream &OS,
OS << ')';
} else if (isNoexceptExceptionSpec(getExceptionSpecType())) {
OS << " noexcept";
- if (getExceptionSpecType() == EST_ComputedNoexcept) {
+ // FIXME:Is it useful to print out the expression for a non-dependent
+ // noexcept specification?
+ if (isComputedNoexcept(getExceptionSpecType())) {
OS << '(';
if (getNoexceptExpr())
getNoexceptExpr()->printPretty(OS, nullptr, Policy);
@@ -650,7 +747,7 @@ void TypePrinter::printFunctionProtoBefore(const FunctionProtoType *T,
}
}
-llvm::StringRef clang::getParameterABISpelling(ParameterABI ABI) {
+StringRef clang::getParameterABISpelling(ParameterABI ABI) {
switch (ABI) {
case ParameterABI::Ordinary:
llvm_unreachable("asking for spelling of ordinary parameter ABI");
@@ -801,6 +898,8 @@ void TypePrinter::printFunctionAfter(const FunctionType::ExtInfo &Info,
<< Info.getRegParm() << ")))";
if (Info.getNoCallerSavedRegs())
OS << " __attribute__((no_caller_saved_registers))";
+ if (Info.getNoCfCheck())
+ OS << " __attribute__((nocf_check))";
}
void TypePrinter::printFunctionNoProtoBefore(const FunctionNoProtoType *T,
@@ -811,6 +910,7 @@ void TypePrinter::printFunctionNoProtoBefore(const FunctionNoProtoType *T,
if (!PrevPHIsEmpty.get())
OS << '(';
}
+
void TypePrinter::printFunctionNoProtoAfter(const FunctionNoProtoType *T,
raw_ostream &OS) {
// If needed for precedence reasons, wrap the inner part in grouping parens.
@@ -840,13 +940,15 @@ void TypePrinter::printUnresolvedUsingBefore(const UnresolvedUsingType *T,
raw_ostream &OS) {
printTypeSpec(T->getDecl(), OS);
}
+
void TypePrinter::printUnresolvedUsingAfter(const UnresolvedUsingType *T,
- raw_ostream &OS) { }
+ raw_ostream &OS) {}
void TypePrinter::printTypedefBefore(const TypedefType *T, raw_ostream &OS) {
printTypeSpec(T->getDecl(), OS);
}
-void TypePrinter::printTypedefAfter(const TypedefType *T, raw_ostream &OS) { }
+
+void TypePrinter::printTypedefAfter(const TypedefType *T, raw_ostream &OS) {}
void TypePrinter::printTypeOfExprBefore(const TypeOfExprType *T,
raw_ostream &OS) {
@@ -855,8 +957,9 @@ void TypePrinter::printTypeOfExprBefore(const TypeOfExprType *T,
T->getUnderlyingExpr()->printPretty(OS, nullptr, Policy);
spaceBeforePlaceHolder(OS);
}
+
void TypePrinter::printTypeOfExprAfter(const TypeOfExprType *T,
- raw_ostream &OS) { }
+ raw_ostream &OS) {}
void TypePrinter::printTypeOfBefore(const TypeOfType *T, raw_ostream &OS) {
OS << "typeof(";
@@ -864,7 +967,8 @@ void TypePrinter::printTypeOfBefore(const TypeOfType *T, raw_ostream &OS) {
OS << ')';
spaceBeforePlaceHolder(OS);
}
-void TypePrinter::printTypeOfAfter(const TypeOfType *T, raw_ostream &OS) { }
+
+void TypePrinter::printTypeOfAfter(const TypeOfType *T, raw_ostream &OS) {}
void TypePrinter::printDecltypeBefore(const DecltypeType *T, raw_ostream &OS) {
OS << "decltype(";
@@ -873,7 +977,8 @@ void TypePrinter::printDecltypeBefore(const DecltypeType *T, raw_ostream &OS) {
OS << ')';
spaceBeforePlaceHolder(OS);
}
-void TypePrinter::printDecltypeAfter(const DecltypeType *T, raw_ostream &OS) { }
+
+void TypePrinter::printDecltypeAfter(const DecltypeType *T, raw_ostream &OS) {}
void TypePrinter::printUnaryTransformBefore(const UnaryTransformType *T,
raw_ostream &OS) {
@@ -890,6 +995,7 @@ void TypePrinter::printUnaryTransformBefore(const UnaryTransformType *T,
printBefore(T->getBaseType(), OS);
}
+
void TypePrinter::printUnaryTransformAfter(const UnaryTransformType *T,
raw_ostream &OS) {
IncludeStrongLifetimeRAII Strong(Policy);
@@ -915,6 +1021,7 @@ void TypePrinter::printAutoBefore(const AutoType *T, raw_ostream &OS) {
spaceBeforePlaceHolder(OS);
}
}
+
void TypePrinter::printAutoAfter(const AutoType *T, raw_ostream &OS) {
// If the type has been deduced, do not print 'auto'.
if (!T->getDeducedType().isNull())
@@ -932,6 +1039,7 @@ void TypePrinter::printDeducedTemplateSpecializationBefore(
spaceBeforePlaceHolder(OS);
}
}
+
void TypePrinter::printDeducedTemplateSpecializationAfter(
const DeducedTemplateSpecializationType *T, raw_ostream &OS) {
// If the type has been deduced, print the deduced type.
@@ -947,7 +1055,8 @@ void TypePrinter::printAtomicBefore(const AtomicType *T, raw_ostream &OS) {
OS << ')';
spaceBeforePlaceHolder(OS);
}
-void TypePrinter::printAtomicAfter(const AtomicType *T, raw_ostream &OS) { }
+
+void TypePrinter::printAtomicAfter(const AtomicType *T, raw_ostream &OS) {}
void TypePrinter::printPipeBefore(const PipeType *T, raw_ostream &OS) {
IncludeStrongLifetimeRAII Strong(Policy);
@@ -961,15 +1070,15 @@ void TypePrinter::printPipeBefore(const PipeType *T, raw_ostream &OS) {
spaceBeforePlaceHolder(OS);
}
-void TypePrinter::printPipeAfter(const PipeType *T, raw_ostream &OS) {
-}
+void TypePrinter::printPipeAfter(const PipeType *T, raw_ostream &OS) {}
+
/// Appends the given scope to the end of a string.
void TypePrinter::AppendScope(DeclContext *DC, raw_ostream &OS) {
if (DC->isTranslationUnit()) return;
if (DC->isFunctionOrMethod()) return;
AppendScope(DC->getParent(), OS);
- if (NamespaceDecl *NS = dyn_cast<NamespaceDecl>(DC)) {
+ if (const auto *NS = dyn_cast<NamespaceDecl>(DC)) {
if (Policy.SuppressUnwrittenScope &&
(NS->isAnonymousNamespace() || NS->isInline()))
return;
@@ -977,14 +1086,13 @@ void TypePrinter::AppendScope(DeclContext *DC, raw_ostream &OS) {
OS << NS->getName() << "::";
else
OS << "(anonymous namespace)::";
- } else if (ClassTemplateSpecializationDecl *Spec
- = dyn_cast<ClassTemplateSpecializationDecl>(DC)) {
+ } else if (const auto *Spec = dyn_cast<ClassTemplateSpecializationDecl>(DC)) {
IncludeStrongLifetimeRAII Strong(Policy);
OS << Spec->getIdentifier()->getName();
const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs();
printTemplateArgumentList(OS, TemplateArgs.asArray(), Policy);
OS << "::";
- } else if (TagDecl *Tag = dyn_cast<TagDecl>(DC)) {
+ } else if (const auto *Tag = dyn_cast<TagDecl>(DC)) {
if (TypedefNameDecl *Typedef = Tag->getTypedefNameForAnonDecl())
OS << Typedef->getIdentifier()->getName() << "::";
else if (Tag->getIdentifier())
@@ -1057,8 +1165,7 @@ void TypePrinter::printTag(TagDecl *D, raw_ostream &OS) {
// If this is a class template specialization, print the template
// arguments.
- if (ClassTemplateSpecializationDecl *Spec
- = dyn_cast<ClassTemplateSpecializationDecl>(D)) {
+ if (const auto *Spec = dyn_cast<ClassTemplateSpecializationDecl>(D)) {
ArrayRef<TemplateArgument> Args;
if (TypeSourceInfo *TAW = Spec->getTypeAsWritten()) {
const TemplateSpecializationType *TST =
@@ -1078,12 +1185,14 @@ void TypePrinter::printTag(TagDecl *D, raw_ostream &OS) {
void TypePrinter::printRecordBefore(const RecordType *T, raw_ostream &OS) {
printTag(T->getDecl(), OS);
}
-void TypePrinter::printRecordAfter(const RecordType *T, raw_ostream &OS) { }
+
+void TypePrinter::printRecordAfter(const RecordType *T, raw_ostream &OS) {}
void TypePrinter::printEnumBefore(const EnumType *T, raw_ostream &OS) {
printTag(T->getDecl(), OS);
}
-void TypePrinter::printEnumAfter(const EnumType *T, raw_ostream &OS) { }
+
+void TypePrinter::printEnumAfter(const EnumType *T, raw_ostream &OS) {}
void TypePrinter::printTemplateTypeParmBefore(const TemplateTypeParmType *T,
raw_ostream &OS) {
@@ -1093,8 +1202,9 @@ void TypePrinter::printTemplateTypeParmBefore(const TemplateTypeParmType *T,
OS << "type-parameter-" << T->getDepth() << '-' << T->getIndex();
spaceBeforePlaceHolder(OS);
}
+
void TypePrinter::printTemplateTypeParmAfter(const TemplateTypeParmType *T,
- raw_ostream &OS) { }
+ raw_ostream &OS) {}
void TypePrinter::printSubstTemplateTypeParmBefore(
const SubstTemplateTypeParmType *T,
@@ -1102,6 +1212,7 @@ void TypePrinter::printSubstTemplateTypeParmBefore(
IncludeStrongLifetimeRAII Strong(Policy);
printBefore(T->getReplacementType(), OS);
}
+
void TypePrinter::printSubstTemplateTypeParmAfter(
const SubstTemplateTypeParmType *T,
raw_ostream &OS) {
@@ -1115,6 +1226,7 @@ void TypePrinter::printSubstTemplateTypeParmPackBefore(
IncludeStrongLifetimeRAII Strong(Policy);
printTemplateTypeParmBefore(T->getReplacedParameter(), OS);
}
+
void TypePrinter::printSubstTemplateTypeParmPackAfter(
const SubstTemplateTypeParmPackType *T,
raw_ostream &OS) {
@@ -1131,26 +1243,39 @@ void TypePrinter::printTemplateSpecializationBefore(
printTemplateArgumentList(OS, T->template_arguments(), Policy);
spaceBeforePlaceHolder(OS);
}
+
void TypePrinter::printTemplateSpecializationAfter(
const TemplateSpecializationType *T,
- raw_ostream &OS) { }
+ raw_ostream &OS) {}
void TypePrinter::printInjectedClassNameBefore(const InjectedClassNameType *T,
raw_ostream &OS) {
printTemplateSpecializationBefore(T->getInjectedTST(), OS);
}
+
void TypePrinter::printInjectedClassNameAfter(const InjectedClassNameType *T,
- raw_ostream &OS) { }
+ raw_ostream &OS) {}
void TypePrinter::printElaboratedBefore(const ElaboratedType *T,
raw_ostream &OS) {
+ if (Policy.IncludeTagDefinition && T->getOwnedTagDecl()) {
+ TagDecl *OwnedTagDecl = T->getOwnedTagDecl();
+ assert(OwnedTagDecl->getTypeForDecl() == T->getNamedType().getTypePtr() &&
+ "OwnedTagDecl expected to be a declaration for the type");
+ PrintingPolicy SubPolicy = Policy;
+ SubPolicy.IncludeTagDefinition = false;
+ OwnedTagDecl->print(OS, SubPolicy, Indentation);
+ spaceBeforePlaceHolder(OS);
+ return;
+ }
+
// The tag definition will take care of these.
if (!Policy.IncludeTagDefinition)
{
OS << TypeWithKeyword::getKeywordName(T->getKeyword());
if (T->getKeyword() != ETK_None)
OS << " ";
- NestedNameSpecifier* Qualifier = T->getQualifier();
+ NestedNameSpecifier *Qualifier = T->getQualifier();
if (Qualifier)
Qualifier->print(OS, Policy);
}
@@ -1158,8 +1283,11 @@ void TypePrinter::printElaboratedBefore(const ElaboratedType *T,
ElaboratedTypePolicyRAII PolicyRAII(Policy);
printBefore(T->getNamedType(), OS);
}
+
void TypePrinter::printElaboratedAfter(const ElaboratedType *T,
raw_ostream &OS) {
+ if (Policy.IncludeTagDefinition && T->getOwnedTagDecl())
+ return;
ElaboratedTypePolicyRAII PolicyRAII(Policy);
printAfter(T->getNamedType(), OS);
}
@@ -1171,6 +1299,7 @@ void TypePrinter::printParenBefore(const ParenType *T, raw_ostream &OS) {
} else
printBefore(T->getInnerType(), OS);
}
+
void TypePrinter::printParenAfter(const ParenType *T, raw_ostream &OS) {
if (!HasEmptyPlaceHolder && !isa<FunctionType>(T->getInnerType())) {
OS << ')';
@@ -1190,8 +1319,9 @@ void TypePrinter::printDependentNameBefore(const DependentNameType *T,
OS << T->getIdentifier()->getName();
spaceBeforePlaceHolder(OS);
}
+
void TypePrinter::printDependentNameAfter(const DependentNameType *T,
- raw_ostream &OS) { }
+ raw_ostream &OS) {}
void TypePrinter::printDependentTemplateSpecializationBefore(
const DependentTemplateSpecializationType *T, raw_ostream &OS) {
@@ -1209,12 +1339,13 @@ void TypePrinter::printDependentTemplateSpecializationBefore(
}
void TypePrinter::printDependentTemplateSpecializationAfter(
- const DependentTemplateSpecializationType *T, raw_ostream &OS) { }
+ const DependentTemplateSpecializationType *T, raw_ostream &OS) {}
void TypePrinter::printPackExpansionBefore(const PackExpansionType *T,
raw_ostream &OS) {
printBefore(T->getPattern(), OS);
}
+
void TypePrinter::printPackExpansionAfter(const PackExpansionType *T,
raw_ostream &OS) {
printAfter(T->getPattern(), OS);
@@ -1323,9 +1454,9 @@ void TypePrinter::printAttributedAfter(const AttributedType *T,
OS << ')';
break;
- case AttributedType::attr_vector_size: {
+ case AttributedType::attr_vector_size:
OS << "__vector_size__(";
- if (const VectorType *vector =T->getEquivalentType()->getAs<VectorType>()) {
+ if (const auto *vector = T->getEquivalentType()->getAs<VectorType>()) {
OS << vector->getNumElements();
OS << " * sizeof(";
print(vector->getElementType(), OS, StringRef());
@@ -1333,7 +1464,6 @@ void TypePrinter::printAttributedAfter(const AttributedType *T,
}
OS << ')';
break;
- }
case AttributedType::attr_neon_vector_type:
case AttributedType::attr_neon_polyvector_type: {
@@ -1341,7 +1471,7 @@ void TypePrinter::printAttributedAfter(const AttributedType *T,
OS << "neon_vector_type(";
else
OS << "neon_polyvector_type(";
- const VectorType *vector = T->getEquivalentType()->getAs<VectorType>();
+ const auto *vector = T->getEquivalentType()->getAs<VectorType>();
OS << vector->getNumElements();
OS << ')';
break;
@@ -1396,7 +1526,7 @@ void TypePrinter::printAttributedAfter(const AttributedType *T,
// FIXME: When Sema learns to form this AttributedType, avoid printing the
// attribute again in printFunctionProtoAfter.
case AttributedType::attr_noreturn: OS << "noreturn"; break;
-
+ case AttributedType::attr_nocf_check: OS << "nocf_check"; break;
case AttributedType::attr_cdecl: OS << "cdecl"; break;
case AttributedType::attr_fastcall: OS << "fastcall"; break;
case AttributedType::attr_stdcall: OS << "stdcall"; break;
@@ -1418,10 +1548,12 @@ void TypePrinter::printAttributedAfter(const AttributedType *T,
OS << ')';
break;
}
+
case AttributedType::attr_inteloclbicc: OS << "inteloclbicc"; break;
case AttributedType::attr_preserve_most:
OS << "preserve_most";
break;
+
case AttributedType::attr_preserve_all:
OS << "preserve_all";
break;
@@ -1434,8 +1566,9 @@ void TypePrinter::printObjCInterfaceBefore(const ObjCInterfaceType *T,
OS << T->getDecl()->getName();
spaceBeforePlaceHolder(OS);
}
+
void TypePrinter::printObjCInterfaceAfter(const ObjCInterfaceType *T,
- raw_ostream &OS) { }
+ raw_ostream &OS) {}
void TypePrinter::printObjCTypeParamBefore(const ObjCTypeParamType *T,
raw_ostream &OS) {
@@ -1457,7 +1590,7 @@ void TypePrinter::printObjCTypeParamBefore(const ObjCTypeParamType *T,
}
void TypePrinter::printObjCTypeParamAfter(const ObjCTypeParamType *T,
- raw_ostream &OS) { }
+ raw_ostream &OS) {}
void TypePrinter::printObjCObjectBefore(const ObjCObjectType *T,
raw_ostream &OS) {
@@ -1499,6 +1632,7 @@ void TypePrinter::printObjCObjectBefore(const ObjCObjectType *T,
spaceBeforePlaceHolder(OS);
}
+
void TypePrinter::printObjCObjectAfter(const ObjCObjectType *T,
raw_ostream &OS) {
if (T->qual_empty() && T->isUnspecializedAsWritten() &&
@@ -1520,7 +1654,7 @@ void TypePrinter::printObjCObjectPointerBefore(const ObjCObjectPointerType *T,
}
void TypePrinter::printObjCObjectPointerAfter(const ObjCObjectPointerType *T,
- raw_ostream &OS) { }
+ raw_ostream &OS) {}
static
const TemplateArgument &getArgument(const TemplateArgument &A) { return A; }
diff --git a/lib/AST/VTableBuilder.cpp b/lib/AST/VTableBuilder.cpp
index 347c516ef6a5..0a3da024f147 100644
--- a/lib/AST/VTableBuilder.cpp
+++ b/lib/AST/VTableBuilder.cpp
@@ -2105,8 +2105,8 @@ void ItaniumVTableBuilder::dumpLayout(raw_ostream &Out) {
const CXXMethodDecl *MD = I.second;
ThunkInfoVectorTy ThunksVector = Thunks[MD];
- std::sort(ThunksVector.begin(), ThunksVector.end(),
- [](const ThunkInfo &LHS, const ThunkInfo &RHS) {
+ llvm::sort(ThunksVector.begin(), ThunksVector.end(),
+ [](const ThunkInfo &LHS, const ThunkInfo &RHS) {
assert(LHS.Method == nullptr && RHS.Method == nullptr);
return std::tie(LHS.This, LHS.Return) < std::tie(RHS.This, RHS.Return);
});
@@ -2206,9 +2206,9 @@ VTableLayout::VTableLayout(ArrayRef<size_t> VTableIndices,
else
this->VTableIndices = OwningArrayRef<size_t>(VTableIndices);
- std::sort(this->VTableThunks.begin(), this->VTableThunks.end(),
- [](const VTableLayout::VTableThunkTy &LHS,
- const VTableLayout::VTableThunkTy &RHS) {
+ llvm::sort(this->VTableThunks.begin(), this->VTableThunks.end(),
+ [](const VTableLayout::VTableThunkTy &LHS,
+ const VTableLayout::VTableThunkTy &RHS) {
assert((LHS.first != RHS.first || LHS.second == RHS.second) &&
"Different thunks should have unique indices!");
return LHS.first < RHS.first;
@@ -2223,6 +2223,7 @@ ItaniumVTableContext::ItaniumVTableContext(ASTContext &Context)
ItaniumVTableContext::~ItaniumVTableContext() {}
uint64_t ItaniumVTableContext::getMethodVTableIndex(GlobalDecl GD) {
+ GD = GD.getCanonicalDecl();
MethodVTableIndicesTy::iterator I = MethodVTableIndices.find(GD);
if (I != MethodVTableIndices.end())
return I->second;
@@ -2367,8 +2368,6 @@ namespace {
class VFTableBuilder {
public:
- typedef MicrosoftVTableContext::MethodVFTableLocation MethodVFTableLocation;
-
typedef llvm::DenseMap<GlobalDecl, MethodVFTableLocation>
MethodVFTableLocationsTy;
@@ -2398,7 +2397,7 @@ private:
MethodVFTableLocationsTy MethodVFTableLocations;
- /// \brief Does this class have an RTTI component?
+ /// Does this class have an RTTI component?
bool HasRTTIComponent = false;
/// MethodInfo - Contains information about a method in a vtable.
@@ -2505,6 +2504,8 @@ private:
for (const auto &I : MethodInfoMap) {
const CXXMethodDecl *MD = I.first;
const MethodInfo &MI = I.second;
+ assert(MD == MD->getCanonicalDecl());
+
// Skip the methods that the MostDerivedClass didn't override
// and the entries shadowed by return adjusting thunks.
if (MD->getParent() != MostDerivedClass || MI.Shadowed)
@@ -2997,7 +2998,7 @@ void VFTableBuilder::AddMethods(BaseSubobject Base, unsigned BaseDepth,
}
// In case we need a return adjustment, we'll add a new slot for
- // the overrider. Mark the overriden method as shadowed by the new slot.
+ // the overrider. Mark the overridden method as shadowed by the new slot.
OverriddenMethodInfo.Shadowed = true;
// Force a special name mangling for a return-adjusting thunk
@@ -3344,8 +3345,8 @@ static bool rebucketPaths(VPtrInfoVector &Paths) {
PathsSorted.reserve(Paths.size());
for (auto& P : Paths)
PathsSorted.push_back(*P);
- std::sort(PathsSorted.begin(), PathsSorted.end(),
- [](const VPtrInfo &LHS, const VPtrInfo &RHS) {
+ llvm::sort(PathsSorted.begin(), PathsSorted.end(),
+ [](const VPtrInfo &LHS, const VPtrInfo &RHS) {
return LHS.MangledPath < RHS.MangledPath;
});
bool Changed = false;
@@ -3544,6 +3545,18 @@ static void computeFullPathsForVFTables(ASTContext &Context,
}
}
+static bool vfptrIsEarlierInMDC(const ASTRecordLayout &Layout,
+ const MethodVFTableLocation &LHS,
+ const MethodVFTableLocation &RHS) {
+ CharUnits L = LHS.VFPtrOffset;
+ CharUnits R = RHS.VFPtrOffset;
+ if (LHS.VBase)
+ L += Layout.getVBaseClassOffset(LHS.VBase);
+ if (RHS.VBase)
+ R += Layout.getVBaseClassOffset(RHS.VBase);
+ return L < R;
+}
+
void MicrosoftVTableContext::computeVTableRelatedInformation(
const CXXRecordDecl *RD) {
assert(RD->isDynamicClass());
@@ -3555,14 +3568,14 @@ void MicrosoftVTableContext::computeVTableRelatedInformation(
const VTableLayout::AddressPointsMapTy EmptyAddressPointsMap;
{
- VPtrInfoVector VFPtrs;
- computeVTablePaths(/*ForVBTables=*/false, RD, VFPtrs);
- computeFullPathsForVFTables(Context, RD, VFPtrs);
+ auto VFPtrs = llvm::make_unique<VPtrInfoVector>();
+ computeVTablePaths(/*ForVBTables=*/false, RD, *VFPtrs);
+ computeFullPathsForVFTables(Context, RD, *VFPtrs);
VFPtrLocations[RD] = std::move(VFPtrs);
}
MethodVFTableLocationsTy NewMethodLocations;
- for (const std::unique_ptr<VPtrInfo> &VFPtr : VFPtrLocations[RD]) {
+ for (const std::unique_ptr<VPtrInfo> &VFPtr : *VFPtrLocations[RD]) {
VFTableBuilder Builder(*this, RD, *VFPtr);
VFTableIdTy id(RD, VFPtr->FullOffsetInMDC);
@@ -3574,12 +3587,15 @@ void MicrosoftVTableContext::computeVTableRelatedInformation(
EmptyAddressPointsMap);
Thunks.insert(Builder.thunks_begin(), Builder.thunks_end());
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
for (const auto &Loc : Builder.vtable_locations()) {
- GlobalDecl GD = Loc.first;
- MethodVFTableLocation NewLoc = Loc.second;
- auto M = NewMethodLocations.find(GD);
- if (M == NewMethodLocations.end() || NewLoc < M->second)
- NewMethodLocations[GD] = NewLoc;
+ auto Insert = NewMethodLocations.insert(Loc);
+ if (!Insert.second) {
+ const MethodVFTableLocation &NewLoc = Loc.second;
+ MethodVFTableLocation &OldLoc = Insert.first->second;
+ if (vfptrIsEarlierInMDC(Layout, NewLoc, OldLoc))
+ OldLoc = NewLoc;
+ }
}
}
@@ -3704,7 +3720,7 @@ MicrosoftVTableContext::getVFPtrOffsets(const CXXRecordDecl *RD) {
computeVTableRelatedInformation(RD);
assert(VFPtrLocations.count(RD) && "Couldn't find vfptr locations");
- return VFPtrLocations[RD];
+ return *VFPtrLocations[RD];
}
const VTableLayout &
@@ -3717,13 +3733,15 @@ MicrosoftVTableContext::getVFTableLayout(const CXXRecordDecl *RD,
return *VFTableLayouts[id];
}
-const MicrosoftVTableContext::MethodVFTableLocation &
+MethodVFTableLocation
MicrosoftVTableContext::getMethodVFTableLocation(GlobalDecl GD) {
assert(cast<CXXMethodDecl>(GD.getDecl())->isVirtual() &&
"Only use this method for virtual methods or dtors");
if (isa<CXXDestructorDecl>(GD.getDecl()))
assert(GD.getDtorType() == Dtor_Deleting);
+ GD = GD.getCanonicalDecl();
+
MethodVFTableLocationsTy::iterator I = MethodVFTableLocations.find(GD);
if (I != MethodVFTableLocations.end())
return I->second;
diff --git a/lib/ASTMatchers/ASTMatchFinder.cpp b/lib/ASTMatchers/ASTMatchFinder.cpp
index 02aee4b46ddd..63f8395b8277 100644
--- a/lib/ASTMatchers/ASTMatchFinder.cpp
+++ b/lib/ASTMatchers/ASTMatchFinder.cpp
@@ -145,17 +145,22 @@ public:
ScopedIncrement ScopedDepth(&CurrentDepth);
return (DeclNode == nullptr) || traverse(*DeclNode);
}
- bool TraverseStmt(Stmt *StmtNode) {
+ bool TraverseStmt(Stmt *StmtNode, DataRecursionQueue *Queue = nullptr) {
+ // If we need to keep track of the depth, we can't perform data recursion.
+ if (CurrentDepth == 0 || (CurrentDepth <= MaxDepth && MaxDepth < INT_MAX))
+ Queue = nullptr;
+
ScopedIncrement ScopedDepth(&CurrentDepth);
- const Stmt *StmtToTraverse = StmtNode;
- if (Traversal ==
- ASTMatchFinder::TK_IgnoreImplicitCastsAndParentheses) {
- const Expr *ExprNode = dyn_cast_or_null<Expr>(StmtNode);
- if (ExprNode) {
+ Stmt *StmtToTraverse = StmtNode;
+ if (Traversal == ASTMatchFinder::TK_IgnoreImplicitCastsAndParentheses) {
+ if (Expr *ExprNode = dyn_cast_or_null<Expr>(StmtNode))
StmtToTraverse = ExprNode->IgnoreParenImpCasts();
- }
}
- return (StmtToTraverse == nullptr) || traverse(*StmtToTraverse);
+ if (!StmtToTraverse)
+ return true;
+ if (!match(*StmtToTraverse))
+ return false;
+ return VisitorBase::TraverseStmt(StmtToTraverse, Queue);
}
// We assume that the QualType and the contained type are on the same
// hierarchy level. Thus, we try to match either of them.
@@ -378,7 +383,7 @@ public:
}
bool TraverseDecl(Decl *DeclNode);
- bool TraverseStmt(Stmt *StmtNode);
+ bool TraverseStmt(Stmt *StmtNode, DataRecursionQueue *Queue = nullptr);
bool TraverseType(QualType TypeNode);
bool TraverseTypeLoc(TypeLoc TypeNode);
bool TraverseNestedNameSpecifier(NestedNameSpecifier *NNS);
@@ -506,7 +511,7 @@ private:
TimeBucketRegion() : Bucket(nullptr) {}
~TimeBucketRegion() { setBucket(nullptr); }
- /// \brief Start timing for \p NewBucket.
+ /// Start timing for \p NewBucket.
///
/// If there was a bucket already set, it will finish the timing for that
/// other bucket.
@@ -529,7 +534,7 @@ private:
llvm::TimeRecord *Bucket;
};
- /// \brief Runs all the \p Matchers on \p Node.
+ /// Runs all the \p Matchers on \p Node.
///
/// Used by \c matchDispatch() below.
template <typename T, typename MC>
@@ -585,7 +590,7 @@ private:
}
/// @{
- /// \brief Overloads to pair the different node types to their matchers.
+ /// Overloads to pair the different node types to their matchers.
void matchDispatch(const Decl *Node) {
return matchWithFilter(ast_type_traits::DynTypedNode::create(*Node));
}
@@ -747,14 +752,14 @@ private:
return false;
}
- /// \brief Bucket to record map.
+ /// Bucket to record map.
///
/// Used to get the appropriate bucket for each matcher.
llvm::StringMap<llvm::TimeRecord> TimeByBucket;
const MatchFinder::MatchersByType *Matchers;
- /// \brief Filtered list of matcher indices for each matcher kind.
+ /// Filtered list of matcher indices for each matcher kind.
///
/// \c Decl and \c Stmt toplevel matchers usually apply to a specific node
/// kind (and derived kinds) so it is a waste to try every matcher on every
@@ -841,12 +846,12 @@ bool MatchASTVisitor::TraverseDecl(Decl *DeclNode) {
return RecursiveASTVisitor<MatchASTVisitor>::TraverseDecl(DeclNode);
}
-bool MatchASTVisitor::TraverseStmt(Stmt *StmtNode) {
+bool MatchASTVisitor::TraverseStmt(Stmt *StmtNode, DataRecursionQueue *Queue) {
if (!StmtNode) {
return true;
}
match(*StmtNode);
- return RecursiveASTVisitor<MatchASTVisitor>::TraverseStmt(StmtNode);
+ return RecursiveASTVisitor<MatchASTVisitor>::TraverseStmt(StmtNode, Queue);
}
bool MatchASTVisitor::TraverseType(QualType TypeNode) {
diff --git a/lib/ASTMatchers/ASTMatchersInternal.cpp b/lib/ASTMatchers/ASTMatchersInternal.cpp
index 0bcdd8e32804..9cea2f5efc5b 100644
--- a/lib/ASTMatchers/ASTMatchersInternal.cpp
+++ b/lib/ASTMatchers/ASTMatchersInternal.cpp
@@ -38,6 +38,16 @@
namespace clang {
namespace ast_matchers {
+
+AST_MATCHER_P(ObjCMessageExpr, hasAnySelectorMatcher, std::vector<std::string>,
+ Matches) {
+ std::string SelString = Node.getSelector().getAsString();
+ for (const std::string &S : Matches)
+ if (S == SelString)
+ return true;
+ return false;
+}
+
namespace internal {
bool NotUnaryOperator(const ast_type_traits::DynTypedNode &DynNode,
@@ -108,7 +118,7 @@ private:
const IntrusiveRefCntPtr<DynMatcherInterface> InnerMatcher;
};
-/// \brief A matcher that always returns true.
+/// A matcher that always returns true.
///
/// We only ever need one instance of this matcher, so we create a global one
/// and reuse it to reduce the overhead of the matcher and increase the chance
@@ -315,12 +325,22 @@ bool AnyOfVariadicOperator(const ast_type_traits::DynTypedNode &DynNode,
return false;
}
-Matcher<NamedDecl> hasAnyNameFunc(ArrayRef<const StringRef *> NameRefs) {
+inline static
+std::vector<std::string> vectorFromRefs(ArrayRef<const StringRef *> NameRefs) {
std::vector<std::string> Names;
for (auto *Name : NameRefs)
Names.emplace_back(*Name);
- return internal::Matcher<NamedDecl>(
- new internal::HasNameMatcher(std::move(Names)));
+ return Names;
+}
+
+Matcher<NamedDecl> hasAnyNameFunc(ArrayRef<const StringRef *> NameRefs) {
+ std::vector<std::string> Names = vectorFromRefs(NameRefs);
+ return internal::Matcher<NamedDecl>(new internal::HasNameMatcher(Names));
+}
+
+Matcher<ObjCMessageExpr> hasAnySelectorFunc(
+ ArrayRef<const StringRef *> NameRefs) {
+ return hasAnySelectorMatcher(vectorFromRefs(NameRefs));
}
HasNameMatcher::HasNameMatcher(std::vector<std::string> N)
@@ -393,7 +413,8 @@ public:
/// Return true if there are still any patterns left.
bool consumeNameSuffix(StringRef NodeName, bool CanSkip) {
for (size_t I = 0; I < Patterns.size();) {
- if (internal::consumeNameSuffix(Patterns[I].P, NodeName) ||
+ if (::clang::ast_matchers::internal::consumeNameSuffix(Patterns[I].P,
+ NodeName) ||
CanSkip) {
++I;
} else {
@@ -527,6 +548,8 @@ bool HasNameMatcher::matchesNode(const NamedDecl &Node) const {
} // end namespace internal
+const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAutoreleasePoolStmt>
+ autoreleasePoolStmt;
const internal::VariadicDynCastAllOfMatcher<Decl, TranslationUnitDecl>
translationUnitDecl;
const internal::VariadicDynCastAllOfMatcher<Decl, TypedefDecl> typedefDecl;
@@ -605,6 +628,8 @@ const internal::VariadicDynCastAllOfMatcher<Decl, ObjCCategoryImplDecl>
objcCategoryImplDecl;
const internal::VariadicDynCastAllOfMatcher<Decl, ObjCMethodDecl>
objcMethodDecl;
+const internal::VariadicDynCastAllOfMatcher<Decl, BlockDecl>
+ blockDecl;
const internal::VariadicDynCastAllOfMatcher<Decl, ObjCIvarDecl> objcIvarDecl;
const internal::VariadicDynCastAllOfMatcher<Decl, ObjCPropertyDecl>
objcPropertyDecl;
@@ -654,6 +679,7 @@ const internal::VariadicDynCastAllOfMatcher<Stmt, CXXOperatorCallExpr>
cxxOperatorCallExpr;
const internal::VariadicDynCastAllOfMatcher<Stmt, Expr> expr;
const internal::VariadicDynCastAllOfMatcher<Stmt, DeclRefExpr> declRefExpr;
+const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCIvarRefExpr> objcIvarRefExpr;
const internal::VariadicDynCastAllOfMatcher<Stmt, IfStmt> ifStmt;
const internal::VariadicDynCastAllOfMatcher<Stmt, ForStmt> forStmt;
const internal::VariadicDynCastAllOfMatcher<Stmt, CXXForRangeStmt>
@@ -739,6 +765,9 @@ const internal::VariadicOperatorMatcherFunc<
const internal::VariadicFunction<internal::Matcher<NamedDecl>, StringRef,
internal::hasAnyNameFunc>
hasAnyName = {};
+const internal::VariadicFunction<internal::Matcher<ObjCMessageExpr>, StringRef,
+ internal::hasAnySelectorFunc>
+ hasAnySelector = {};
const internal::ArgumentAdaptingMatcherFunc<internal::HasMatcher> has = {};
const internal::ArgumentAdaptingMatcherFunc<internal::HasDescendantMatcher>
hasDescendant = {};
@@ -772,6 +801,7 @@ const AstTypeMatcher<IncompleteArrayType> incompleteArrayType;
const AstTypeMatcher<VariableArrayType> variableArrayType;
const AstTypeMatcher<AtomicType> atomicType;
const AstTypeMatcher<AutoType> autoType;
+const AstTypeMatcher<DecltypeType> decltypeType;
const AstTypeMatcher<FunctionType> functionType;
const AstTypeMatcher<FunctionProtoType> functionProtoType;
const AstTypeMatcher<ParenType> parenType;
diff --git a/lib/ASTMatchers/Dynamic/Marshallers.h b/lib/ASTMatchers/Dynamic/Marshallers.h
index af90e2c7eca1..c6c89351afd3 100644
--- a/lib/ASTMatchers/Dynamic/Marshallers.h
+++ b/lib/ASTMatchers/Dynamic/Marshallers.h
@@ -8,7 +8,7 @@
//===----------------------------------------------------------------------===//
//
/// \file
-/// \brief Functions templates and classes to wrap matcher construct functions.
+/// Functions templates and classes to wrap matcher construct functions.
///
/// A collection of template function and classes that provide a generic
/// marshalling layer on top of matcher construct functions.
@@ -47,7 +47,7 @@ namespace ast_matchers {
namespace dynamic {
namespace internal {
-/// \brief Helper template class to just from argument type to the right is/get
+/// Helper template class to just from argument type to the right is/get
/// functions in VariantValue.
/// Used to verify and extract the matcher arguments below.
template <class T> struct ArgTypeTraits;
@@ -166,7 +166,7 @@ public:
}
};
-/// \brief Matcher descriptor interface.
+/// Matcher descriptor interface.
///
/// Provides a \c create() method that constructs the matcher from the provided
/// arguments, and various other methods for type introspection.
@@ -222,7 +222,7 @@ inline bool isRetKindConvertibleTo(
return false;
}
-/// \brief Simple callback implementation. Marshaller and function are provided.
+/// Simple callback implementation. Marshaller and function are provided.
///
/// This class wraps a function of arbitrary signature and a marshaller
/// function into a MatcherDescriptor.
@@ -279,7 +279,7 @@ private:
const std::vector<ArgKind> ArgKinds;
};
-/// \brief Helper methods to extract and merge all possible typed matchers
+/// Helper methods to extract and merge all possible typed matchers
/// out of the polymorphic object.
template <class PolyMatcher>
static void mergePolyMatchers(const PolyMatcher &Poly,
@@ -293,7 +293,7 @@ static void mergePolyMatchers(const PolyMatcher &Poly,
mergePolyMatchers(Poly, Out, typename TypeList::tail());
}
-/// \brief Convert the return values of the functions into a VariantMatcher.
+/// Convert the return values of the functions into a VariantMatcher.
///
/// There are 2 cases right now: The return value is a Matcher<T> or is a
/// polymorphic matcher. For the former, we just construct the VariantMatcher.
@@ -347,7 +347,7 @@ struct BuildReturnTypeVector<ast_matchers::internal::BindableMatcher<T>> {
}
};
-/// \brief Variadic marshaller function.
+/// Variadic marshaller function.
template <typename ResultT, typename ArgT,
ResultT (*Func)(ArrayRef<const ArgT *>)>
VariantMatcher
@@ -383,7 +383,7 @@ variadicMatcherDescriptor(StringRef MatcherName, SourceRange NameRange,
return Out;
}
-/// \brief Matcher descriptor for variadic functions.
+/// Matcher descriptor for variadic functions.
///
/// This class simply wraps a VariadicFunction with the right signature to export
/// it as a MatcherDescriptor.
@@ -436,7 +436,7 @@ private:
const ArgKind ArgsKind;
};
-/// \brief Return CK_Trivial when appropriate for VariadicDynCastAllOfMatchers.
+/// Return CK_Trivial when appropriate for VariadicDynCastAllOfMatchers.
class DynCastAllOfMatcherDescriptor : public VariadicFuncMatcherDescriptor {
public:
template <typename BaseT, typename DerivedT>
@@ -470,7 +470,7 @@ private:
const ast_type_traits::ASTNodeKind DerivedKind;
};
-/// \brief Helper macros to check the arguments on all marshaller functions.
+/// Helper macros to check the arguments on all marshaller functions.
#define CHECK_ARG_COUNT(count) \
if (Args.size() != count) { \
Error->addError(NameRange, Error->ET_RegistryWrongArgCount) \
@@ -486,7 +486,7 @@ private:
return VariantMatcher(); \
}
-/// \brief 0-arg marshaller function.
+/// 0-arg marshaller function.
template <typename ReturnType>
static VariantMatcher matcherMarshall0(void (*Func)(), StringRef MatcherName,
SourceRange NameRange,
@@ -497,7 +497,7 @@ static VariantMatcher matcherMarshall0(void (*Func)(), StringRef MatcherName,
return outvalueToVariantMatcher(reinterpret_cast<FuncType>(Func)());
}
-/// \brief 1-arg marshaller function.
+/// 1-arg marshaller function.
template <typename ReturnType, typename ArgType1>
static VariantMatcher matcherMarshall1(void (*Func)(), StringRef MatcherName,
SourceRange NameRange,
@@ -510,7 +510,7 @@ static VariantMatcher matcherMarshall1(void (*Func)(), StringRef MatcherName,
ArgTypeTraits<ArgType1>::get(Args[0].Value)));
}
-/// \brief 2-arg marshaller function.
+/// 2-arg marshaller function.
template <typename ReturnType, typename ArgType1, typename ArgType2>
static VariantMatcher matcherMarshall2(void (*Func)(), StringRef MatcherName,
SourceRange NameRange,
@@ -528,7 +528,7 @@ static VariantMatcher matcherMarshall2(void (*Func)(), StringRef MatcherName,
#undef CHECK_ARG_COUNT
#undef CHECK_ARG_TYPE
-/// \brief Helper class used to collect all the possible overloads of an
+/// Helper class used to collect all the possible overloads of an
/// argument adaptative matcher function.
template <template <typename ToArg, typename FromArg> class ArgumentAdapterT,
typename FromTypes, typename ToTypes>
@@ -544,10 +544,10 @@ private:
using AdaptativeFunc = ast_matchers::internal::ArgumentAdaptingMatcherFunc<
ArgumentAdapterT, FromTypes, ToTypes>;
- /// \brief End case for the recursion
+ /// End case for the recursion
static void collect(ast_matchers::internal::EmptyTypeList) {}
- /// \brief Recursive case. Get the overload for the head of the list, and
+ /// Recursive case. Get the overload for the head of the list, and
/// recurse to the tail.
template <typename FromTypeList>
inline void collect(FromTypeList);
@@ -556,7 +556,7 @@ private:
std::vector<std::unique_ptr<MatcherDescriptor>> &Out;
};
-/// \brief MatcherDescriptor that wraps multiple "overloads" of the same
+/// MatcherDescriptor that wraps multiple "overloads" of the same
/// matcher.
///
/// It will try every overload and generate appropriate errors for when none or
@@ -635,7 +635,7 @@ private:
std::vector<std::unique_ptr<MatcherDescriptor>> Overloads;
};
-/// \brief Variadic operator marshaller function.
+/// Variadic operator marshaller function.
class VariadicOperatorMatcherDescriptor : public MatcherDescriptor {
public:
using VarOp = DynTypedMatcher::VariadicOperator;
@@ -701,7 +701,7 @@ private:
/// Helper functions to select the appropriate marshaller functions.
/// They detect the number of arguments, arguments types and return type.
-/// \brief 0-arg overload
+/// 0-arg overload
template <typename ReturnType>
std::unique_ptr<MatcherDescriptor>
makeMatcherAutoMarshall(ReturnType (*Func)(), StringRef MatcherName) {
@@ -712,7 +712,7 @@ makeMatcherAutoMarshall(ReturnType (*Func)(), StringRef MatcherName) {
MatcherName, RetTypes, None);
}
-/// \brief 1-arg overload
+/// 1-arg overload
template <typename ReturnType, typename ArgType1>
std::unique_ptr<MatcherDescriptor>
makeMatcherAutoMarshall(ReturnType (*Func)(ArgType1), StringRef MatcherName) {
@@ -724,7 +724,7 @@ makeMatcherAutoMarshall(ReturnType (*Func)(ArgType1), StringRef MatcherName) {
reinterpret_cast<void (*)()>(Func), MatcherName, RetTypes, AK);
}
-/// \brief 2-arg overload
+/// 2-arg overload
template <typename ReturnType, typename ArgType1, typename ArgType2>
std::unique_ptr<MatcherDescriptor>
makeMatcherAutoMarshall(ReturnType (*Func)(ArgType1, ArgType2),
@@ -738,7 +738,7 @@ makeMatcherAutoMarshall(ReturnType (*Func)(ArgType1, ArgType2),
reinterpret_cast<void (*)()>(Func), MatcherName, RetTypes, AKs);
}
-/// \brief Variadic overload.
+/// Variadic overload.
template <typename ResultT, typename ArgT,
ResultT (*Func)(ArrayRef<const ArgT *>)>
std::unique_ptr<MatcherDescriptor> makeMatcherAutoMarshall(
@@ -747,7 +747,7 @@ std::unique_ptr<MatcherDescriptor> makeMatcherAutoMarshall(
return llvm::make_unique<VariadicFuncMatcherDescriptor>(VarFunc, MatcherName);
}
-/// \brief Overload for VariadicDynCastAllOfMatchers.
+/// Overload for VariadicDynCastAllOfMatchers.
///
/// Not strictly necessary, but DynCastAllOfMatcherDescriptor gives us better
/// completion results for that type of matcher.
@@ -759,7 +759,7 @@ std::unique_ptr<MatcherDescriptor> makeMatcherAutoMarshall(
return llvm::make_unique<DynCastAllOfMatcherDescriptor>(VarFunc, MatcherName);
}
-/// \brief Argument adaptative overload.
+/// Argument adaptative overload.
template <template <typename ToArg, typename FromArg> class ArgumentAdapterT,
typename FromTypes, typename ToTypes>
std::unique_ptr<MatcherDescriptor> makeMatcherAutoMarshall(
@@ -782,7 +782,7 @@ inline void AdaptativeOverloadCollector<ArgumentAdapterT, FromTypes,
collect(typename FromTypeList::tail());
}
-/// \brief Variadic operator overload.
+/// Variadic operator overload.
template <unsigned MinCount, unsigned MaxCount>
std::unique_ptr<MatcherDescriptor> makeMatcherAutoMarshall(
ast_matchers::internal::VariadicOperatorMatcherFunc<MinCount, MaxCount>
diff --git a/lib/ASTMatchers/Dynamic/Parser.cpp b/lib/ASTMatchers/Dynamic/Parser.cpp
index 89e1a2695860..da8df907ba7f 100644
--- a/lib/ASTMatchers/Dynamic/Parser.cpp
+++ b/lib/ASTMatchers/Dynamic/Parser.cpp
@@ -8,7 +8,7 @@
//===----------------------------------------------------------------------===//
///
/// \file
-/// \brief Recursive parser implementation for the matcher expression grammar.
+/// Recursive parser implementation for the matcher expression grammar.
///
//===----------------------------------------------------------------------===//
@@ -34,9 +34,9 @@ namespace clang {
namespace ast_matchers {
namespace dynamic {
-/// \brief Simple structure to hold information for one token from the parser.
+/// Simple structure to hold information for one token from the parser.
struct Parser::TokenInfo {
- /// \brief Different possible tokens.
+ /// Different possible tokens.
enum TokenKind {
TK_Eof,
TK_OpenParen,
@@ -50,7 +50,7 @@ struct Parser::TokenInfo {
TK_CodeCompletion
};
- /// \brief Some known identifiers.
+ /// Some known identifiers.
static const char* const ID_Bind;
TokenInfo() = default;
@@ -63,7 +63,7 @@ struct Parser::TokenInfo {
const char* const Parser::TokenInfo::ID_Bind = "bind";
-/// \brief Simple tokenizer for the parser.
+/// Simple tokenizer for the parser.
class Parser::CodeTokenizer {
public:
explicit CodeTokenizer(StringRef MatcherCode, Diagnostics *Error)
@@ -78,10 +78,10 @@ public:
NextToken = getNextToken();
}
- /// \brief Returns but doesn't consume the next token.
+ /// Returns but doesn't consume the next token.
const TokenInfo &peekNextToken() const { return NextToken; }
- /// \brief Consumes and returns the next token.
+ /// Consumes and returns the next token.
TokenInfo consumeNextToken() {
TokenInfo ThisToken = NextToken;
NextToken = getNextToken();
@@ -185,7 +185,7 @@ private:
return Result;
}
- /// \brief Consume an unsigned and float literal.
+ /// Consume an unsigned and float literal.
void consumeNumberLiteral(TokenInfo *Result) {
bool isFloatingLiteral = false;
unsigned Length = 1;
@@ -238,7 +238,7 @@ private:
Result->Kind = TokenInfo::TK_Error;
}
- /// \brief Consume a string literal.
+ /// Consume a string literal.
///
/// \c Code must be positioned at the start of the literal (the opening
/// quote). Consumed until it finds the same closing quote character.
@@ -272,7 +272,7 @@ private:
Result->Kind = TokenInfo::TK_Error;
}
- /// \brief Consume all leading whitespace from \c Code.
+ /// Consume all leading whitespace from \c Code.
void consumeWhitespace() {
while (!Code.empty() && isWhitespace(Code[0])) {
if (Code[0] == '\n') {
@@ -326,7 +326,7 @@ struct Parser::ScopedContextEntry {
}
};
-/// \brief Parse expressions that start with an identifier.
+/// Parse expressions that start with an identifier.
///
/// This function can parse named values and matchers.
/// In case of failure it will try to determine the user's intent to give
@@ -359,7 +359,7 @@ bool Parser::parseIdentifierPrefixImpl(VariantValue *Value) {
return parseMatcherExpressionImpl(NameToken, Value);
}
-/// \brief Parse and validate a matcher expression.
+/// Parse and validate a matcher expression.
/// \return \c true on success, in which case \c Value has the matcher parsed.
/// If the input is malformed, or some argument has an error, it
/// returns \c false.
@@ -524,7 +524,7 @@ void Parser::addExpressionCompletions() {
}
}
-/// \brief Parse an <Expresssion>
+/// Parse an <Expression>
bool Parser::parseExpressionImpl(VariantValue *Value) {
switch (Tokenizer->nextTokenKind()) {
case TokenInfo::TK_Literal:
@@ -619,8 +619,8 @@ Parser::completeExpression(StringRef Code, unsigned CompletionOffset, Sema *S,
P.parseExpressionImpl(&Dummy);
// Sort by specificity, then by name.
- std::sort(P.Completions.begin(), P.Completions.end(),
- [](const MatcherCompletion &A, const MatcherCompletion &B) {
+ llvm::sort(P.Completions.begin(), P.Completions.end(),
+ [](const MatcherCompletion &A, const MatcherCompletion &B) {
if (A.Specificity != B.Specificity)
return A.Specificity > B.Specificity;
return A.TypedText < B.TypedText;
diff --git a/lib/ASTMatchers/Dynamic/Registry.cpp b/lib/ASTMatchers/Dynamic/Registry.cpp
index 2b7bb7a2120d..4d2d76f6a75b 100644
--- a/lib/ASTMatchers/Dynamic/Registry.cpp
+++ b/lib/ASTMatchers/Dynamic/Registry.cpp
@@ -8,7 +8,7 @@
//===----------------------------------------------------------------------===//
//
/// \file
-/// \brief Registry map populated at static initialization time.
+/// Registry map populated at static initialization time.
//
//===----------------------------------------------------------------------===//
@@ -90,7 +90,7 @@ void RegistryMaps::registerMatcher(
REGISTER_MATCHER_OVERLOAD(name); \
} while (false)
-/// \brief Generate a registry map with all the known matchers.
+/// Generate a registry map with all the known matchers.
RegistryMaps::RegistryMaps() {
// TODO: Here is the list of the missing matchers, grouped by reason.
//
@@ -134,8 +134,10 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(atomicExpr);
REGISTER_MATCHER(atomicType);
REGISTER_MATCHER(autoType);
+ REGISTER_MATCHER(autoreleasePoolStmt)
REGISTER_MATCHER(binaryOperator);
REGISTER_MATCHER(binaryConditionalOperator);
+ REGISTER_MATCHER(blockDecl);
REGISTER_MATCHER(blockPointerType);
REGISTER_MATCHER(booleanType);
REGISTER_MATCHER(breakStmt);
@@ -186,6 +188,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(decayedType);
REGISTER_MATCHER(decl);
REGISTER_MATCHER(declaratorDecl);
+ REGISTER_MATCHER(decltypeType);
REGISTER_MATCHER(declCountIs);
REGISTER_MATCHER(declRefExpr);
REGISTER_MATCHER(declStmt);
@@ -281,11 +284,13 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(hasParent);
REGISTER_MATCHER(hasQualifier);
REGISTER_MATCHER(hasRangeInit);
+ REGISTER_MATCHER(hasReceiver);
REGISTER_MATCHER(hasReceiverType);
REGISTER_MATCHER(hasReplacementType);
REGISTER_MATCHER(hasReturnValue);
REGISTER_MATCHER(hasRHS);
REGISTER_MATCHER(hasSelector);
+ REGISTER_MATCHER(hasAnySelector);
REGISTER_MATCHER(hasSingleDecl);
REGISTER_MATCHER(hasSize);
REGISTER_MATCHER(hasSizeExpr);
@@ -296,6 +301,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(hasTemplateArgument);
REGISTER_MATCHER(hasThen);
REGISTER_MATCHER(hasThreadStorageDuration);
+ REGISTER_MATCHER(hasTrailingReturn);
REGISTER_MATCHER(hasTrueExpression);
REGISTER_MATCHER(hasTypeLoc);
REGISTER_MATCHER(hasUnaryOperand);
@@ -321,6 +327,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(isAnyPointer);
REGISTER_MATCHER(isArray);
REGISTER_MATCHER(isArrow);
+ REGISTER_MATCHER(isAssignmentOperator);
REGISTER_MATCHER(isBaseInitializer);
REGISTER_MATCHER(isBitField);
REGISTER_MATCHER(isCatchAll);
@@ -344,6 +351,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(isImplicit);
REGISTER_MATCHER(isExpansionInFileMatching);
REGISTER_MATCHER(isExpansionInMainFile);
+ REGISTER_MATCHER(isInstanceMessage);
REGISTER_MATCHER(isInstantiated);
REGISTER_MATCHER(isExpansionInSystemHeader);
REGISTER_MATCHER(isInteger);
@@ -351,15 +359,18 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(isInTemplateInstantiation);
REGISTER_MATCHER(isLambda);
REGISTER_MATCHER(isListInitialization);
+ REGISTER_MATCHER(isMain);
REGISTER_MATCHER(isMemberInitializer);
REGISTER_MATCHER(isMoveAssignmentOperator);
REGISTER_MATCHER(isMoveConstructor);
+ REGISTER_MATCHER(isNoReturn);
REGISTER_MATCHER(isNoThrow);
REGISTER_MATCHER(isOverride);
REGISTER_MATCHER(isPrivate);
REGISTER_MATCHER(isProtected);
REGISTER_MATCHER(isPublic);
REGISTER_MATCHER(isPure);
+ REGISTER_MATCHER(isScoped);
REGISTER_MATCHER(isSignedInteger);
REGISTER_MATCHER(isStaticStorageClass);
REGISTER_MATCHER(isStruct);
@@ -399,6 +410,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(objcImplementationDecl);
REGISTER_MATCHER(objcInterfaceDecl);
REGISTER_MATCHER(objcIvarDecl);
+ REGISTER_MATCHER(objcIvarRefExpr);
REGISTER_MATCHER(objcMessageExpr);
REGISTER_MATCHER(objcMethodDecl);
REGISTER_MATCHER(objcObjectPointerType);
diff --git a/lib/ASTMatchers/Dynamic/VariantValue.cpp b/lib/ASTMatchers/Dynamic/VariantValue.cpp
index 57858d00acb4..06d95eaa7563 100644
--- a/lib/ASTMatchers/Dynamic/VariantValue.cpp
+++ b/lib/ASTMatchers/Dynamic/VariantValue.cpp
@@ -8,7 +8,7 @@
//===----------------------------------------------------------------------===//
///
/// \file
-/// \brief Polymorphic value type.
+/// Polymorphic value type.
///
//===----------------------------------------------------------------------===//
diff --git a/lib/Analysis/AnalysisDeclContext.cpp b/lib/Analysis/AnalysisDeclContext.cpp
index 181edff0a03f..486fffbe1299 100644
--- a/lib/Analysis/AnalysisDeclContext.cpp
+++ b/lib/Analysis/AnalysisDeclContext.cpp
@@ -1,4 +1,4 @@
-//== AnalysisDeclContext.cpp - Analysis context for Path Sens analysis -*- C++ -*-//
+//===- AnalysisDeclContext.cpp - Analysis context for Path Sens analysis --===//
//
// The LLVM Compiler Infrastructure
//
@@ -7,67 +7,72 @@
//
//===----------------------------------------------------------------------===//
//
-// This file defines AnalysisDeclContext, a class that manages the analysis context
-// data for path sensitive analysis.
+// This file defines AnalysisDeclContext, a class that manages the analysis
+// context data for path sensitive analysis.
//
//===----------------------------------------------------------------------===//
#include "clang/Analysis/AnalysisDeclContext.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
+#include "clang/AST/DeclBase.h"
+#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/LambdaCapture.h"
#include "clang/AST/ParentMap.h"
+#include "clang/AST/PrettyPrinter.h"
+#include "clang/AST/Stmt.h"
+#include "clang/AST/StmtCXX.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/Analysis/Analyses/CFGReachabilityAnalysis.h"
-#include "clang/Analysis/Analyses/LiveVariables.h"
#include "clang/Analysis/Analyses/PseudoConstantAnalysis.h"
#include "clang/Analysis/BodyFarm.h"
#include "clang/Analysis/CFG.h"
#include "clang/Analysis/CFGStmtMap.h"
#include "clang/Analysis/Support/BumpVector.h"
+#include "clang/Basic/LLVM.h"
+#include "clang/Basic/SourceLocation.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/SaveAndRestore.h"
#include "llvm/Support/raw_ostream.h"
+#include <cassert>
+#include <memory>
using namespace clang;
-typedef llvm::DenseMap<const void *, ManagedAnalysis *> ManagedAnalysisMap;
+using ManagedAnalysisMap = llvm::DenseMap<const void *, ManagedAnalysis *>;
AnalysisDeclContext::AnalysisDeclContext(AnalysisDeclContextManager *Mgr,
const Decl *d,
const CFG::BuildOptions &buildOptions)
- : Manager(Mgr),
- D(d),
- cfgBuildOptions(buildOptions),
- forcedBlkExprs(nullptr),
- builtCFG(false),
- builtCompleteCFG(false),
- ReferencedBlockVars(nullptr),
- ManagedAnalyses(nullptr)
-{
+ : Manager(Mgr), D(d), cfgBuildOptions(buildOptions) {
cfgBuildOptions.forcedBlkExprs = &forcedBlkExprs;
}
AnalysisDeclContext::AnalysisDeclContext(AnalysisDeclContextManager *Mgr,
const Decl *d)
-: Manager(Mgr),
- D(d),
- forcedBlkExprs(nullptr),
- builtCFG(false),
- builtCompleteCFG(false),
- ReferencedBlockVars(nullptr),
- ManagedAnalyses(nullptr)
-{
+ : Manager(Mgr), D(d) {
cfgBuildOptions.forcedBlkExprs = &forcedBlkExprs;
}
AnalysisDeclContextManager::AnalysisDeclContextManager(
ASTContext &ASTCtx, bool useUnoptimizedCFG, bool addImplicitDtors,
bool addInitializers, bool addTemporaryDtors, bool addLifetime,
- bool addLoopExit, bool synthesizeBodies, bool addStaticInitBranch,
- bool addCXXNewAllocator, CodeInjector *injector)
+ bool addLoopExit, bool addScopes, bool synthesizeBodies,
+ bool addStaticInitBranch, bool addCXXNewAllocator,
+ bool addRichCXXConstructors, bool markElidedCXXConstructors,
+ CodeInjector *injector)
: Injector(injector), FunctionBodyFarm(ASTCtx, injector),
SynthesizeBodies(synthesizeBodies) {
cfgBuildOptions.PruneTriviallyFalseEdges = !useUnoptimizedCFG;
@@ -76,15 +81,18 @@ AnalysisDeclContextManager::AnalysisDeclContextManager(
cfgBuildOptions.AddTemporaryDtors = addTemporaryDtors;
cfgBuildOptions.AddLifetime = addLifetime;
cfgBuildOptions.AddLoopExit = addLoopExit;
+ cfgBuildOptions.AddScopes = addScopes;
cfgBuildOptions.AddStaticInitBranches = addStaticInitBranch;
cfgBuildOptions.AddCXXNewAllocator = addCXXNewAllocator;
+ cfgBuildOptions.AddRichCXXConstructors = addRichCXXConstructors;
+ cfgBuildOptions.MarkElidedCXXConstructors = markElidedCXXConstructors;
}
void AnalysisDeclContextManager::clear() { Contexts.clear(); }
Stmt *AnalysisDeclContext::getBody(bool &IsAutosynthesized) const {
IsAutosynthesized = false;
- if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
Stmt *Body = FD->getBody();
if (auto *CoroBody = dyn_cast_or_null<CoroutineBodyStmt>(Body))
Body = CoroBody->getBody();
@@ -97,7 +105,7 @@ Stmt *AnalysisDeclContext::getBody(bool &IsAutosynthesized) const {
}
return Body;
}
- else if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) {
+ else if (const auto *MD = dyn_cast<ObjCMethodDecl>(D)) {
Stmt *Body = MD->getBody();
if (Manager && Manager->synthesizeBodies()) {
Stmt *SynthesizedBody = Manager->getBodyFarm().getBody(MD);
@@ -107,10 +115,9 @@ Stmt *AnalysisDeclContext::getBody(bool &IsAutosynthesized) const {
}
}
return Body;
- } else if (const BlockDecl *BD = dyn_cast<BlockDecl>(D))
+ } else if (const auto *BD = dyn_cast<BlockDecl>(D))
return BD->getBody();
- else if (const FunctionTemplateDecl *FunTmpl
- = dyn_cast_or_null<FunctionTemplateDecl>(D))
+ else if (const auto *FunTmpl = dyn_cast_or_null<FunctionTemplateDecl>(D))
return FunTmpl->getTemplatedDecl()->getBody();
llvm_unreachable("unknown code decl");
@@ -139,9 +146,9 @@ static bool isSelfDecl(const VarDecl *VD) {
}
const ImplicitParamDecl *AnalysisDeclContext::getSelfDecl() const {
- if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D))
+ if (const auto *MD = dyn_cast<ObjCMethodDecl>(D))
return MD->getSelfDecl();
- if (const BlockDecl *BD = dyn_cast<BlockDecl>(D)) {
+ if (const auto *BD = dyn_cast<BlockDecl>(D)) {
// See if 'self' was captured by the block.
for (const auto &I : BD->captures()) {
const VarDecl *VD = I.getVariable();
@@ -158,7 +165,7 @@ const ImplicitParamDecl *AnalysisDeclContext::getSelfDecl() const {
if (!parent->isLambda())
return nullptr;
- for (const LambdaCapture &LC : parent->captures()) {
+ for (const auto &LC : parent->captures()) {
if (!LC.capturesVariable())
continue;
@@ -174,7 +181,7 @@ void AnalysisDeclContext::registerForcedBlockExpression(const Stmt *stmt) {
if (!forcedBlkExprs)
forcedBlkExprs = new CFG::BuildOptions::ForcedBlkExprs();
// Default construct an entry for 'stmt'.
- if (const Expr *e = dyn_cast<Expr>(stmt))
+ if (const auto *e = dyn_cast<Expr>(stmt))
stmt = e->IgnoreParens();
(void) (*forcedBlkExprs)[stmt];
}
@@ -182,7 +189,7 @@ void AnalysisDeclContext::registerForcedBlockExpression(const Stmt *stmt) {
const CFGBlock *
AnalysisDeclContext::getBlockForRegisteredExpression(const Stmt *stmt) {
assert(forcedBlkExprs);
- if (const Expr *e = dyn_cast<Expr>(stmt))
+ if (const auto *e = dyn_cast<Expr>(stmt))
stmt = e->IgnoreParens();
CFG::BuildOptions::ForcedBlkExprs::const_iterator itr =
forcedBlkExprs->find(stmt);
@@ -266,13 +273,13 @@ CFGReverseBlockReachabilityAnalysis *AnalysisDeclContext::getCFGReachablityAnaly
}
void AnalysisDeclContext::dumpCFG(bool ShowColors) {
- getCFG()->dump(getASTContext().getLangOpts(), ShowColors);
+ getCFG()->dump(getASTContext().getLangOpts(), ShowColors);
}
ParentMap &AnalysisDeclContext::getParentMap() {
if (!PM) {
PM.reset(new ParentMap(getBody()));
- if (const CXXConstructorDecl *C = dyn_cast<CXXConstructorDecl>(getDecl())) {
+ if (const auto *C = dyn_cast<CXXConstructorDecl>(getDecl())) {
for (const auto *I : C->inits()) {
PM->addStmt(I->getInit());
}
@@ -292,7 +299,7 @@ PseudoConstantAnalysis *AnalysisDeclContext::getPseudoConstantAnalysis() {
}
AnalysisDeclContext *AnalysisDeclContextManager::getContext(const Decl *D) {
- if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
// Calling 'hasBody' replaces 'FD' in place with the FunctionDecl
// that has the body.
FD->hasBody(FD);
@@ -315,7 +322,7 @@ AnalysisDeclContext::getStackFrame(LocationContext const *Parent, const Stmt *S,
const BlockInvocationContext *
AnalysisDeclContext::getBlockInvocationContext(const LocationContext *parent,
- const clang::BlockDecl *BD,
+ const BlockDecl *BD,
const void *ContextData) {
return getLocationContextManager().getBlockInvocationContext(this, parent,
BD, ContextData);
@@ -323,7 +330,7 @@ AnalysisDeclContext::getBlockInvocationContext(const LocationContext *parent,
bool AnalysisDeclContext::isInStdNamespace(const Decl *D) {
const DeclContext *DC = D->getDeclContext()->getEnclosingNamespaceContext();
- const NamespaceDecl *ND = dyn_cast<NamespaceDecl>(DC);
+ const auto *ND = dyn_cast<NamespaceDecl>(DC);
if (!ND)
return false;
@@ -336,7 +343,7 @@ bool AnalysisDeclContext::isInStdNamespace(const Decl *D) {
return ND->isStdNamespace();
}
-LocationContextManager & AnalysisDeclContext::getLocationContextManager() {
+LocationContextManager &AnalysisDeclContext::getLocationContextManager() {
assert(Manager &&
"Cannot create LocationContexts without an AnalysisDeclContextManager!");
return Manager->getLocationContextManager();
@@ -399,7 +406,7 @@ LocationContextManager::getStackFrame(AnalysisDeclContext *ctx,
llvm::FoldingSetNodeID ID;
StackFrameContext::Profile(ID, ctx, parent, s, blk, idx);
void *InsertPos;
- StackFrameContext *L =
+ auto *L =
cast_or_null<StackFrameContext>(Contexts.FindNodeOrInsertPos(ID, InsertPos));
if (!L) {
L = new StackFrameContext(ctx, parent, s, blk, idx);
@@ -423,7 +430,7 @@ LocationContextManager::getBlockInvocationContext(AnalysisDeclContext *ctx,
llvm::FoldingSetNodeID ID;
BlockInvocationContext::Profile(ID, ctx, parent, BD, ContextData);
void *InsertPos;
- BlockInvocationContext *L =
+ auto *L =
cast_or_null<BlockInvocationContext>(Contexts.FindNodeOrInsertPos(ID,
InsertPos));
if (!L) {
@@ -437,10 +444,10 @@ LocationContextManager::getBlockInvocationContext(AnalysisDeclContext *ctx,
// LocationContext methods.
//===----------------------------------------------------------------------===//
-const StackFrameContext *LocationContext::getCurrentStackFrame() const {
+const StackFrameContext *LocationContext::getStackFrame() const {
const LocationContext *LC = this;
while (LC) {
- if (const StackFrameContext *SFC = dyn_cast<StackFrameContext>(LC))
+ if (const auto *SFC = dyn_cast<StackFrameContext>(LC))
return SFC;
LC = LC->getParent();
}
@@ -448,7 +455,7 @@ const StackFrameContext *LocationContext::getCurrentStackFrame() const {
}
bool LocationContext::inTopFrame() const {
- return getCurrentStackFrame()->inTopFrame();
+ return getStackFrame()->inTopFrame();
}
bool LocationContext::isParentOf(const LocationContext *LC) const {
@@ -463,28 +470,53 @@ bool LocationContext::isParentOf(const LocationContext *LC) const {
return false;
}
-void LocationContext::dumpStack(raw_ostream &OS, StringRef Indent) const {
+static void printLocation(raw_ostream &OS, const SourceManager &SM,
+ SourceLocation SLoc) {
+ if (SLoc.isFileID() && SM.isInMainFile(SLoc))
+ OS << "line " << SM.getExpansionLineNumber(SLoc);
+ else
+ SLoc.print(OS, SM);
+}
+
+void LocationContext::dumpStack(
+ raw_ostream &OS, StringRef Indent, const char *NL, const char *Sep,
+ std::function<void(const LocationContext *)> printMoreInfoPerContext) const {
ASTContext &Ctx = getAnalysisDeclContext()->getASTContext();
PrintingPolicy PP(Ctx.getLangOpts());
PP.TerseOutput = 1;
+ const SourceManager &SM =
+ getAnalysisDeclContext()->getASTContext().getSourceManager();
+
unsigned Frame = 0;
for (const LocationContext *LCtx = this; LCtx; LCtx = LCtx->getParent()) {
switch (LCtx->getKind()) {
case StackFrame:
- OS << Indent << '#' << Frame++ << ' ';
- cast<StackFrameContext>(LCtx)->getDecl()->print(OS, PP);
- OS << '\n';
+ OS << Indent << '#' << Frame << ' ';
+ ++Frame;
+ if (const auto *D = dyn_cast<NamedDecl>(LCtx->getDecl()))
+ OS << "Calling " << D->getQualifiedNameAsString();
+ else
+ OS << "Calling anonymous code";
+ if (const Stmt *S = cast<StackFrameContext>(LCtx)->getCallSite()) {
+ OS << " at ";
+ printLocation(OS, SM, S->getLocStart());
+ }
break;
case Scope:
- OS << Indent << " (scope)\n";
+ OS << "Entering scope";
break;
case Block:
- OS << Indent << " (block context: "
- << cast<BlockInvocationContext>(LCtx)->getContextData()
- << ")\n";
+ OS << "Invoking block";
+ if (const Decl *D = cast<BlockInvocationContext>(LCtx)->getDecl()) {
+ OS << " defined at ";
+ printLocation(OS, SM, D->getLocStart());
+ }
break;
}
+ OS << NL;
+
+ printMoreInfoPerContext(LCtx);
}
}
@@ -497,25 +529,27 @@ LLVM_DUMP_METHOD void LocationContext::dumpStack() const {
//===----------------------------------------------------------------------===//
namespace {
+
class FindBlockDeclRefExprsVals : public StmtVisitor<FindBlockDeclRefExprsVals>{
- BumpVector<const VarDecl*> &BEVals;
+ BumpVector<const VarDecl *> &BEVals;
BumpVectorContext &BC;
- llvm::SmallPtrSet<const VarDecl*, 4> Visited;
- llvm::SmallPtrSet<const DeclContext*, 4> IgnoredContexts;
+ llvm::SmallPtrSet<const VarDecl *, 4> Visited;
+ llvm::SmallPtrSet<const DeclContext *, 4> IgnoredContexts;
+
public:
FindBlockDeclRefExprsVals(BumpVector<const VarDecl*> &bevals,
BumpVectorContext &bc)
- : BEVals(bevals), BC(bc) {}
+ : BEVals(bevals), BC(bc) {}
void VisitStmt(Stmt *S) {
- for (Stmt *Child : S->children())
+ for (auto *Child : S->children())
if (Child)
Visit(Child);
}
void VisitDeclRefExpr(DeclRefExpr *DR) {
// Non-local variables are also directly modified.
- if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
+ if (const auto *VD = dyn_cast<VarDecl>(DR->getDecl())) {
if (!VD->hasLocalStorage()) {
if (Visited.insert(VD).second)
BEVals.push_back(VD, BC);
@@ -533,15 +567,16 @@ public:
for (PseudoObjectExpr::semantics_iterator it = PE->semantics_begin(),
et = PE->semantics_end(); it != et; ++it) {
Expr *Semantic = *it;
- if (OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(Semantic))
+ if (auto *OVE = dyn_cast<OpaqueValueExpr>(Semantic))
Semantic = OVE->getSourceExpr();
Visit(Semantic);
}
}
};
-} // end anonymous namespace
-typedef BumpVector<const VarDecl*> DeclVec;
+} // namespace
+
+using DeclVec = BumpVector<const VarDecl *>;
static DeclVec* LazyInitializeReferencedDecls(const BlockDecl *BD,
void *&Vec,
@@ -587,7 +622,7 @@ ManagedAnalysis *&AnalysisDeclContext::getAnalysisImpl(const void *tag) {
// Cleanup.
//===----------------------------------------------------------------------===//
-ManagedAnalysis::~ManagedAnalysis() {}
+ManagedAnalysis::~ManagedAnalysis() = default;
AnalysisDeclContext::~AnalysisDeclContext() {
delete forcedBlkExprs;
@@ -600,7 +635,7 @@ AnalysisDeclContext::~AnalysisDeclContext() {
}
}
-LocationContext::~LocationContext() {}
+LocationContext::~LocationContext() = default;
LocationContextManager::~LocationContextManager() {
clear();
@@ -613,7 +648,5 @@ void LocationContextManager::clear() {
++I;
delete LC;
}
-
Contexts.clear();
}
-
diff --git a/lib/Analysis/BodyFarm.cpp b/lib/Analysis/BodyFarm.cpp
index 89ca8484819d..b9fb15b2db25 100644
--- a/lib/Analysis/BodyFarm.cpp
+++ b/lib/Analysis/BodyFarm.cpp
@@ -149,7 +149,8 @@ DeclRefExpr *ASTMaker::makeDeclRefExpr(
UnaryOperator *ASTMaker::makeDereference(const Expr *Arg, QualType Ty) {
return new (C) UnaryOperator(const_cast<Expr*>(Arg), UO_Deref, Ty,
- VK_LValue, OK_Ordinary, SourceLocation());
+ VK_LValue, OK_Ordinary, SourceLocation(),
+ /*CanOverflow*/ false);
}
ImplicitCastExpr *ASTMaker::makeLvalueToRvalue(const Expr *Arg, QualType Ty) {
@@ -253,21 +254,24 @@ static CallExpr *create_call_once_funcptr_call(ASTContext &C, ASTMaker M,
QualType Ty = Callback->getType();
DeclRefExpr *Call = M.makeDeclRefExpr(Callback);
- CastKind CK;
+ Expr *SubExpr;
if (Ty->isRValueReferenceType()) {
- CK = CK_LValueToRValue;
- } else {
- assert(Ty->isLValueReferenceType());
- CK = CK_FunctionToPointerDecay;
+ SubExpr = M.makeImplicitCast(
+ Call, Ty.getNonReferenceType(), CK_LValueToRValue);
+ } else if (Ty->isLValueReferenceType() &&
+ Call->getType()->isFunctionType()) {
Ty = C.getPointerType(Ty.getNonReferenceType());
+ SubExpr = M.makeImplicitCast(Call, Ty, CK_FunctionToPointerDecay);
+ } else if (Ty->isLValueReferenceType()
+ && Call->getType()->isPointerType()
+ && Call->getType()->getPointeeType()->isFunctionType()){
+ SubExpr = Call;
+ } else {
+ llvm_unreachable("Unexpected state");
}
return new (C)
- CallExpr(C, M.makeImplicitCast(Call, Ty.getNonReferenceType(), CK),
- /*args=*/CallArgs,
- /*QualType=*/C.VoidTy,
- /*ExprValueType=*/VK_RValue,
- /*SourceLocation=*/SourceLocation());
+ CallExpr(C, SubExpr, CallArgs, C.VoidTy, VK_RValue, SourceLocation());
}
static CallExpr *create_call_once_lambda_call(ASTContext &C, ASTMaker M,
@@ -313,7 +317,7 @@ static CallExpr *create_call_once_lambda_call(ASTContext &C, ASTMaker M,
/// }
/// \endcode
static Stmt *create_call_once(ASTContext &C, const FunctionDecl *D) {
- DEBUG(llvm::dbgs() << "Generating body for call_once\n");
+ LLVM_DEBUG(llvm::dbgs() << "Generating body for call_once\n");
// We need at least two parameters.
if (D->param_size() < 2)
@@ -341,9 +345,9 @@ static Stmt *create_call_once(ASTContext &C, const FunctionDecl *D) {
auto *FlagRecordDecl = dyn_cast_or_null<RecordDecl>(FlagType->getAsTagDecl());
if (!FlagRecordDecl) {
- DEBUG(llvm::dbgs() << "Flag field is not a record: "
- << "unknown std::call_once implementation, "
- << "ignoring the call.\n");
+ LLVM_DEBUG(llvm::dbgs() << "Flag field is not a record: "
+ << "unknown std::call_once implementation, "
+ << "ignoring the call.\n");
return nullptr;
}
@@ -358,16 +362,17 @@ static Stmt *create_call_once(ASTContext &C, const FunctionDecl *D) {
}
if (!FlagFieldDecl) {
- DEBUG(llvm::dbgs() << "No field _M_once or __state_ found on "
- << "std::once_flag struct: unknown std::call_once "
- << "implementation, ignoring the call.");
+ LLVM_DEBUG(llvm::dbgs() << "No field _M_once or __state_ found on "
+ << "std::once_flag struct: unknown std::call_once "
+ << "implementation, ignoring the call.");
return nullptr;
}
bool isLambdaCall = CallbackRecordDecl && CallbackRecordDecl->isLambda();
if (CallbackRecordDecl && !isLambdaCall) {
- DEBUG(llvm::dbgs() << "Not supported: synthesizing body for functors when "
- << "body farming std::call_once, ignoring the call.");
+ LLVM_DEBUG(llvm::dbgs()
+ << "Not supported: synthesizing body for functors when "
+ << "body farming std::call_once, ignoring the call.");
return nullptr;
}
@@ -394,9 +399,9 @@ static Stmt *create_call_once(ASTContext &C, const FunctionDecl *D) {
// First two arguments are used for the flag and for the callback.
if (D->getNumParams() != CallbackFunctionType->getNumParams() + 2) {
- DEBUG(llvm::dbgs() << "Types of params of the callback do not match "
- << "params passed to std::call_once, "
- << "ignoring the call\n");
+ LLVM_DEBUG(llvm::dbgs() << "Types of params of the callback do not match "
+ << "params passed to std::call_once, "
+ << "ignoring the call\n");
return nullptr;
}
@@ -405,6 +410,16 @@ static Stmt *create_call_once(ASTContext &C, const FunctionDecl *D) {
// reference.
for (unsigned int ParamIdx = 2; ParamIdx < D->getNumParams(); ParamIdx++) {
const ParmVarDecl *PDecl = D->getParamDecl(ParamIdx);
+ if (PDecl &&
+ CallbackFunctionType->getParamType(ParamIdx - 2)
+ .getNonReferenceType()
+ .getCanonicalType() !=
+ PDecl->getType().getNonReferenceType().getCanonicalType()) {
+ LLVM_DEBUG(llvm::dbgs() << "Types of params of the callback do not match "
+ << "params passed to std::call_once, "
+ << "ignoring the call\n");
+ return nullptr;
+ }
Expr *ParamExpr = M.makeDeclRefExpr(PDecl);
if (!CallbackFunctionType->getParamType(ParamIdx - 2)->isReferenceType()) {
QualType PTy = PDecl->getType().getNonReferenceType();
@@ -441,7 +456,8 @@ static Stmt *create_call_once(ASTContext &C, const FunctionDecl *D) {
/* opc=*/ UO_LNot,
/* QualType=*/ C.IntTy,
/* ExprValueKind=*/ VK_RValue,
- /* ExprObjectKind=*/ OK_Ordinary, SourceLocation());
+ /* ExprObjectKind=*/ OK_Ordinary, SourceLocation(),
+ /* CanOverflow*/ false);
// Create assignment.
BinaryOperator *FlagAssignment = M.makeAssignment(
@@ -505,7 +521,8 @@ static Stmt *create_dispatch_once(ASTContext &C, const FunctionDecl *D) {
// (2) Create the assignment to the predicate.
Expr *DoneValue =
new (C) UnaryOperator(M.makeIntegerLiteral(0, C.LongTy), UO_Not, C.LongTy,
- VK_RValue, OK_Ordinary, SourceLocation());
+ VK_RValue, OK_Ordinary, SourceLocation(),
+ /*CanOverflow*/false);
BinaryOperator *B =
M.makeAssignment(
@@ -813,4 +830,3 @@ Stmt *BodyFarm::getBody(const ObjCMethodDecl *D) {
return Val.getValue();
}
-
diff --git a/lib/Analysis/CFG.cpp b/lib/Analysis/CFG.cpp
index 714b85d3a9ff..8a3ab15458dd 100644
--- a/lib/Analysis/CFG.cpp
+++ b/lib/Analysis/CFG.cpp
@@ -29,6 +29,7 @@
#include "clang/AST/StmtVisitor.h"
#include "clang/AST/Type.h"
#include "clang/Analysis/Support/BumpVector.h"
+#include "clang/Analysis/ConstructionContext.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/ExceptionSpecificationType.h"
#include "clang/Basic/LLVM.h"
@@ -233,6 +234,13 @@ public:
assert(VarIter != 0 && "Iterator has invalid value of VarIter member");
return &Scope->Vars[VarIter - 1];
}
+
+ const VarDecl *getFirstVarInScope() const {
+ assert(Scope && "Dereferencing invalid iterator is not allowed");
+ assert(VarIter != 0 && "Iterator has invalid value of VarIter member");
+ return Scope->Vars[0];
+ }
+
VarDecl *operator*() const {
return *this->operator->();
}
@@ -266,6 +274,7 @@ public:
int distance(const_iterator L);
const_iterator shared_parent(const_iterator L);
+ bool pointsToFirstDeclaredVar() { return VarIter == 1; }
};
private:
@@ -472,6 +481,15 @@ class CFGBuilder {
using LabelSetTy = llvm::SmallSetVector<LabelDecl *, 8>;
LabelSetTy AddressTakenLabels;
+ // Information about the currently visited C++ object construction site.
+ // This is set in the construction trigger and read when the constructor
+ // or a function that returns an object by value is being visited.
+ llvm::DenseMap<Expr *, const ConstructionContextLayer *>
+ ConstructionContextMap;
+
+ using DeclsWithEndedScopeSetTy = llvm::SmallSetVector<VarDecl *, 16>;
+ DeclsWithEndedScopeSetTy DeclsWithEndedScope;
+
bool badCFG = false;
const CFG::BuildOptions &BuildOpts;
@@ -491,7 +509,8 @@ public:
explicit CFGBuilder(ASTContext *astContext,
const CFG::BuildOptions &buildOpts)
: Context(astContext), cfg(new CFG()), // crew a new CFG
- BuildOpts(buildOpts) {}
+ ConstructionContextMap(), BuildOpts(buildOpts) {}
+
// buildCFG - Used by external clients to construct the CFG.
std::unique_ptr<CFG> buildCFG(const Decl *D, Stmt *Statement);
@@ -541,6 +560,8 @@ private:
Stmt *Term,
CFGBlock *TrueBlock,
CFGBlock *FalseBlock);
+ CFGBlock *VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *MTE,
+ AddStmtChoice asc);
CFGBlock *VisitMemberExpr(MemberExpr *M, AddStmtChoice asc);
CFGBlock *VisitObjCAtCatchStmt(ObjCAtCatchStmt *S);
CFGBlock *VisitObjCAtSynchronizedStmt(ObjCAtSynchronizedStmt *S);
@@ -566,6 +587,12 @@ private:
CFGBlock *VisitChildren(Stmt *S);
CFGBlock *VisitNoRecurse(Expr *E, AddStmtChoice asc);
+ void maybeAddScopeBeginForVarDecl(CFGBlock *B, const VarDecl *VD,
+ const Stmt *S) {
+ if (ScopePos && (VD == ScopePos.getFirstVarInScope()))
+ appendScopeBegin(B, VD, S);
+ }
+
/// When creating the CFG for temporary destructors, we want to mirror the
/// branch structure of the corresponding constructor calls.
/// Thus, while visiting a statement for temporary destructors, we keep a
@@ -643,6 +670,24 @@ private:
return Block;
}
+ // Remember to apply the construction context based on the current \p Layer
+ // when constructing the CFG element for \p CE.
+ void consumeConstructionContext(const ConstructionContextLayer *Layer,
+ Expr *E);
+
+ // Scan \p Child statement to find constructors in it, while keeping in mind
+ // that its parent statement is providing a partial construction context
+ // described by \p Layer. If a constructor is found, it would be assigned
+ // the context based on the layer. If an additional construction context layer
+ // is found, the function recurses into that.
+ void findConstructionContexts(const ConstructionContextLayer *Layer,
+ Stmt *Child);
+
+ // Unset the construction context after consuming it. This is done immediately
+ // after adding the CFGConstructor or CFGCXXRecordTypedCall element, so
+ // there's no need to do this manually in every Visit... function.
+ void cleanupConstructionContext(Expr *E);
+
void autoCreateBlock() { if (!Block) Block = createBlock(); }
CFGBlock *createBlock(bool add_successor = true);
CFGBlock *createNoReturnBlock();
@@ -660,6 +705,11 @@ private:
void addAutomaticObjHandling(LocalScope::const_iterator B,
LocalScope::const_iterator E, Stmt *S);
void addImplicitDtorsForDestructor(const CXXDestructorDecl *DD);
+ void addScopesEnd(LocalScope::const_iterator B, LocalScope::const_iterator E,
+ Stmt *S);
+
+ void getDeclsWithEndedScope(LocalScope::const_iterator B,
+ LocalScope::const_iterator E, Stmt *S);
// Local scopes creation.
LocalScope* createOrReuseLocalScope(LocalScope* Scope);
@@ -682,6 +732,45 @@ private:
B->appendStmt(const_cast<Stmt*>(S), cfg->getBumpVectorContext());
}
+ void appendConstructor(CFGBlock *B, CXXConstructExpr *CE) {
+ if (BuildOpts.AddRichCXXConstructors) {
+ if (const ConstructionContextLayer *Layer =
+ ConstructionContextMap.lookup(CE)) {
+ cleanupConstructionContext(CE);
+ if (const auto *CC = ConstructionContext::createFromLayers(
+ cfg->getBumpVectorContext(), Layer)) {
+ B->appendConstructor(CE, CC, cfg->getBumpVectorContext());
+ return;
+ }
+ }
+ }
+
+ // No valid construction context found. Fall back to statement.
+ B->appendStmt(CE, cfg->getBumpVectorContext());
+ }
+
+ void appendCall(CFGBlock *B, CallExpr *CE) {
+ if (alwaysAdd(CE) && cachedEntry)
+ cachedEntry->second = B;
+
+ if (BuildOpts.AddRichCXXConstructors) {
+ if (CFGCXXRecordTypedCall::isCXXRecordTypedCall(CE, *Context)) {
+ if (const ConstructionContextLayer *Layer =
+ ConstructionContextMap.lookup(CE)) {
+ cleanupConstructionContext(CE);
+ if (const auto *CC = ConstructionContext::createFromLayers(
+ cfg->getBumpVectorContext(), Layer)) {
+ B->appendCXXRecordTypedCall(CE, CC, cfg->getBumpVectorContext());
+ return;
+ }
+ }
+ }
+ }
+
+ // No valid construction context found. Fall back to statement.
+ B->appendStmt(CE, cfg->getBumpVectorContext());
+ }
+
void appendInitializer(CFGBlock *B, CXXCtorInitializer *I) {
B->appendInitializer(I, cfg->getBumpVectorContext());
}
@@ -725,6 +814,11 @@ private:
LocalScope::const_iterator B,
LocalScope::const_iterator E);
+ const VarDecl *
+ prependAutomaticObjScopeEndWithTerminator(CFGBlock *Blk,
+ LocalScope::const_iterator B,
+ LocalScope::const_iterator E);
+
void addSuccessor(CFGBlock *B, CFGBlock *S, bool IsReachable = true) {
B->addSuccessor(CFGBlock::AdjacentBlock(S, IsReachable),
cfg->getBumpVectorContext());
@@ -737,7 +831,27 @@ private:
cfg->getBumpVectorContext());
}
- /// \brief Find a relational comparison with an expression evaluating to a
+ void appendScopeBegin(CFGBlock *B, const VarDecl *VD, const Stmt *S) {
+ if (BuildOpts.AddScopes)
+ B->appendScopeBegin(VD, S, cfg->getBumpVectorContext());
+ }
+
+ void prependScopeBegin(CFGBlock *B, const VarDecl *VD, const Stmt *S) {
+ if (BuildOpts.AddScopes)
+ B->prependScopeBegin(VD, S, cfg->getBumpVectorContext());
+ }
+
+ void appendScopeEnd(CFGBlock *B, const VarDecl *VD, const Stmt *S) {
+ if (BuildOpts.AddScopes)
+ B->appendScopeEnd(VD, S, cfg->getBumpVectorContext());
+ }
+
+ void prependScopeEnd(CFGBlock *B, const VarDecl *VD, const Stmt *S) {
+ if (BuildOpts.AddScopes)
+ B->prependScopeEnd(VD, S, cfg->getBumpVectorContext());
+ }
+
+ /// Find a relational comparison with an expression evaluating to a
/// boolean and a constant other than 0 and 1.
/// e.g. if ((x < y) == 10)
TryResult checkIncorrectRelationalOperator(const BinaryOperator *B) {
@@ -850,7 +964,7 @@ private:
}
}
- /// \brief Find a pair of comparison expressions with or without parentheses
+ /// Find a pair of comparison expressions with or without parentheses
/// with a shared variable and constants and a logical operator between them
/// that always evaluates to either true or false.
/// e.g. if (x != 3 || x != 4)
@@ -1006,7 +1120,7 @@ private:
return evaluateAsBooleanConditionNoCache(S);
}
- /// \brief Evaluate as boolean \param E without using the cache.
+ /// Evaluate as boolean \param E without using the cache.
TryResult evaluateAsBooleanConditionNoCache(Expr *E) {
if (BinaryOperator *Bop = dyn_cast<BinaryOperator>(E)) {
if (Bop->isLogicalOp()) {
@@ -1116,6 +1230,127 @@ static const VariableArrayType *FindVA(const Type *t) {
return nullptr;
}
+void CFGBuilder::consumeConstructionContext(
+ const ConstructionContextLayer *Layer, Expr *E) {
+ if (const ConstructionContextLayer *PreviouslyStoredLayer =
+ ConstructionContextMap.lookup(E)) {
+ (void)PreviouslyStoredLayer;
+ // We might have visited this child when we were finding construction
+ // contexts within its parents.
+ assert(PreviouslyStoredLayer->isStrictlyMoreSpecificThan(Layer) &&
+ "Already within a different construction context!");
+ } else {
+ ConstructionContextMap[E] = Layer;
+ }
+}
+
+void CFGBuilder::findConstructionContexts(
+ const ConstructionContextLayer *Layer, Stmt *Child) {
+ if (!BuildOpts.AddRichCXXConstructors)
+ return;
+
+ if (!Child)
+ return;
+
+ auto withExtraLayer = [this, Layer](Stmt *S) {
+ return ConstructionContextLayer::create(cfg->getBumpVectorContext(), S,
+ Layer);
+ };
+
+ switch(Child->getStmtClass()) {
+ case Stmt::CXXConstructExprClass:
+ case Stmt::CXXTemporaryObjectExprClass: {
+ // Support pre-C++17 copy elision AST.
+ auto *CE = cast<CXXConstructExpr>(Child);
+ if (BuildOpts.MarkElidedCXXConstructors && CE->isElidable()) {
+ findConstructionContexts(withExtraLayer(CE), CE->getArg(0));
+ }
+
+ consumeConstructionContext(Layer, CE);
+ break;
+ }
+ // FIXME: This, like the main visit, doesn't support CUDAKernelCallExpr.
+ // FIXME: An isa<> would look much better but this whole switch is a
+ // workaround for an internal compiler error in MSVC 2015 (see r326021).
+ case Stmt::CallExprClass:
+ case Stmt::CXXMemberCallExprClass:
+ case Stmt::CXXOperatorCallExprClass:
+ case Stmt::UserDefinedLiteralClass: {
+ auto *CE = cast<CallExpr>(Child);
+ if (CFGCXXRecordTypedCall::isCXXRecordTypedCall(CE, *Context))
+ consumeConstructionContext(Layer, CE);
+ break;
+ }
+ case Stmt::ExprWithCleanupsClass: {
+ auto *Cleanups = cast<ExprWithCleanups>(Child);
+ findConstructionContexts(Layer, Cleanups->getSubExpr());
+ break;
+ }
+ case Stmt::CXXFunctionalCastExprClass: {
+ auto *Cast = cast<CXXFunctionalCastExpr>(Child);
+ findConstructionContexts(Layer, Cast->getSubExpr());
+ break;
+ }
+ case Stmt::ImplicitCastExprClass: {
+ auto *Cast = cast<ImplicitCastExpr>(Child);
+ // Should we support other implicit cast kinds?
+ switch (Cast->getCastKind()) {
+ case CK_NoOp:
+ case CK_ConstructorConversion:
+ findConstructionContexts(Layer, Cast->getSubExpr());
+ default:
+ break;
+ }
+ break;
+ }
+ case Stmt::CXXBindTemporaryExprClass: {
+ auto *BTE = cast<CXXBindTemporaryExpr>(Child);
+ findConstructionContexts(withExtraLayer(BTE), BTE->getSubExpr());
+ break;
+ }
+ case Stmt::MaterializeTemporaryExprClass: {
+ // Normally we don't want to search in MaterializeTemporaryExpr because
+ // it indicates the beginning of a temporary object construction context,
+ // so it shouldn't be found in the middle. However, if it is the beginning
+ // of an elidable copy or move construction context, we need to include it.
+ if (const auto *CE =
+ dyn_cast_or_null<CXXConstructExpr>(Layer->getTriggerStmt())) {
+ if (CE->isElidable()) {
+ auto *MTE = cast<MaterializeTemporaryExpr>(Child);
+ findConstructionContexts(withExtraLayer(MTE), MTE->GetTemporaryExpr());
+ }
+ }
+ break;
+ }
+ case Stmt::ConditionalOperatorClass: {
+ auto *CO = cast<ConditionalOperator>(Child);
+ if (!dyn_cast_or_null<MaterializeTemporaryExpr>(Layer->getTriggerStmt())) {
+ // If the object returned by the conditional operator is not going to be a
+ // temporary object that needs to be immediately materialized, then
+ // it must be C++17 with its mandatory copy elision. Do not yet promise
+ // to support this case.
+ assert(!CO->getType()->getAsCXXRecordDecl() || CO->isGLValue() ||
+ Context->getLangOpts().CPlusPlus17);
+ break;
+ }
+ findConstructionContexts(Layer, CO->getLHS());
+ findConstructionContexts(Layer, CO->getRHS());
+ break;
+ }
+ default:
+ break;
+ }
+}
+
+void CFGBuilder::cleanupConstructionContext(Expr *E) {
+ assert(BuildOpts.AddRichCXXConstructors &&
+ "We should not be managing construction contexts!");
+ assert(ConstructionContextMap.count(E) &&
+ "Cannot exit construction context without the context!");
+ ConstructionContextMap.erase(E);
+}
+
+
/// BuildCFG - Constructs a CFG from an AST (a Stmt*). The AST can represent an
/// arbitrary statement. Examples include a single expression or a function
/// body (compound statement). The ownership of the returned CFG is
@@ -1176,6 +1411,9 @@ std::unique_ptr<CFG> CFGBuilder::buildCFG(const Decl *D, Stmt *Statement) {
JT.scopePosition);
prependAutomaticObjDtorsWithTerminator(B, I->scopePosition,
JT.scopePosition);
+ const VarDecl *VD = prependAutomaticObjScopeEndWithTerminator(
+ B, I->scopePosition, JT.scopePosition);
+ appendScopeBegin(JT.block, VD, G);
addSuccessor(B, JT.block);
}
@@ -1196,6 +1434,10 @@ std::unique_ptr<CFG> CFGBuilder::buildCFG(const Decl *D, Stmt *Statement) {
// Create an empty entry block that has no predecessors.
cfg->setEntry(createBlock());
+ if (BuildOpts.AddRichCXXConstructors)
+ assert(ConstructionContextMap.empty() &&
+ "Not all construction contexts were cleaned up!");
+
return std::move(cfg);
}
@@ -1243,6 +1485,10 @@ CFGBlock *CFGBuilder::addInitializer(CXXCtorInitializer *I) {
appendInitializer(Block, I);
if (Init) {
+ findConstructionContexts(
+ ConstructionContextLayer::create(cfg->getBumpVectorContext(), I),
+ Init);
+
if (HasTemporaries) {
// For expression with temporaries go directly to subexpression to omit
// generating destructors for the second time.
@@ -1267,21 +1513,20 @@ CFGBlock *CFGBuilder::addInitializer(CXXCtorInitializer *I) {
return Block;
}
-/// \brief Retrieve the type of the temporary object whose lifetime was
+/// Retrieve the type of the temporary object whose lifetime was
/// extended by a local reference with the given initializer.
-static QualType getReferenceInitTemporaryType(ASTContext &Context,
- const Expr *Init,
+static QualType getReferenceInitTemporaryType(const Expr *Init,
bool *FoundMTE = nullptr) {
while (true) {
// Skip parentheses.
Init = Init->IgnoreParens();
-
+
// Skip through cleanups.
if (const ExprWithCleanups *EWC = dyn_cast<ExprWithCleanups>(Init)) {
Init = EWC->getSubExpr();
continue;
}
-
+
// Skip through the temporary-materialization expression.
if (const MaterializeTemporaryExpr *MTE
= dyn_cast<MaterializeTemporaryExpr>(Init)) {
@@ -1290,26 +1535,17 @@ static QualType getReferenceInitTemporaryType(ASTContext &Context,
*FoundMTE = true;
continue;
}
-
- // Skip derived-to-base and no-op casts.
- if (const CastExpr *CE = dyn_cast<CastExpr>(Init)) {
- if ((CE->getCastKind() == CK_DerivedToBase ||
- CE->getCastKind() == CK_UncheckedDerivedToBase ||
- CE->getCastKind() == CK_NoOp) &&
- Init->getType()->isRecordType()) {
- Init = CE->getSubExpr();
- continue;
- }
- }
-
- // Skip member accesses into rvalues.
- if (const MemberExpr *ME = dyn_cast<MemberExpr>(Init)) {
- if (!ME->isArrow() && ME->getBase()->isRValue()) {
- Init = ME->getBase();
- continue;
- }
+
+ // Skip sub-object accesses into rvalues.
+ SmallVector<const Expr *, 2> CommaLHSs;
+ SmallVector<SubobjectAdjustment, 2> Adjustments;
+ const Expr *SkippedInit =
+ Init->skipRValueSubobjectAdjustments(CommaLHSs, Adjustments);
+ if (SkippedInit != Init) {
+ Init = SkippedInit;
+ continue;
}
-
+
break;
}
@@ -1325,9 +1561,34 @@ void CFGBuilder::addLoopExit(const Stmt *LoopStmt){
appendLoopExit(Block, LoopStmt);
}
+void CFGBuilder::getDeclsWithEndedScope(LocalScope::const_iterator B,
+ LocalScope::const_iterator E, Stmt *S) {
+ if (!BuildOpts.AddScopes)
+ return;
+
+ if (B == E)
+ return;
+
+ // To go from B to E, one first goes up the scopes from B to P
+ // then sideways in one scope from P to P' and then down
+ // the scopes from P' to E.
+ // The lifetime of all objects between B and P end.
+ LocalScope::const_iterator P = B.shared_parent(E);
+ int Dist = B.distance(P);
+ if (Dist <= 0)
+ return;
+
+ for (LocalScope::const_iterator I = B; I != P; ++I)
+ if (I.pointsToFirstDeclaredVar())
+ DeclsWithEndedScope.insert(*I);
+}
+
void CFGBuilder::addAutomaticObjHandling(LocalScope::const_iterator B,
LocalScope::const_iterator E,
Stmt *S) {
+ getDeclsWithEndedScope(B, E, S);
+ if (BuildOpts.AddScopes)
+ addScopesEnd(B, E, S);
if (BuildOpts.AddImplicitDtors)
addAutomaticObjDtors(B, E, S);
if (BuildOpts.AddLifetime)
@@ -1379,6 +1640,23 @@ void CFGBuilder::addLifetimeEnds(LocalScope::const_iterator B,
appendLifetimeEnds(Block, *I, S);
}
+/// Add to current block markers for ending scopes.
+void CFGBuilder::addScopesEnd(LocalScope::const_iterator B,
+ LocalScope::const_iterator E, Stmt *S) {
+ // If implicit destructors are enabled, we'll add scope ends in
+ // addAutomaticObjDtors.
+ if (BuildOpts.AddImplicitDtors)
+ return;
+
+ autoCreateBlock();
+
+ for (auto I = DeclsWithEndedScope.rbegin(), E = DeclsWithEndedScope.rend();
+ I != E; ++I)
+ appendScopeEnd(Block, *I, S);
+
+ return;
+}
+
/// addAutomaticObjDtors - Add to current block automatic objects destructors
/// for objects in range of local scope positions. Use S as trigger statement
/// for destructors.
@@ -1402,12 +1680,21 @@ void CFGBuilder::addAutomaticObjDtors(LocalScope::const_iterator B,
for (SmallVectorImpl<VarDecl*>::reverse_iterator I = Decls.rbegin(),
E = Decls.rend();
I != E; ++I) {
+ if (hasTrivialDestructor(*I)) {
+ // If AddScopes is enabled and *I is a first variable in a scope, add a
+ // ScopeEnd marker in a Block.
+ if (BuildOpts.AddScopes && DeclsWithEndedScope.count(*I)) {
+ autoCreateBlock();
+ appendScopeEnd(Block, *I, S);
+ }
+ continue;
+ }
// If this destructor is marked as a no-return destructor, we need to
// create a new block for the destructor which does not have as a successor
// anything built thus far: control won't flow out of this block.
QualType Ty = (*I)->getType();
if (Ty->isReferenceType()) {
- Ty = getReferenceInitTemporaryType(*Context, (*I)->getInit());
+ Ty = getReferenceInitTemporaryType((*I)->getInit());
}
Ty = Context->getBaseElementType(Ty);
@@ -1416,6 +1703,9 @@ void CFGBuilder::addAutomaticObjDtors(LocalScope::const_iterator B,
else
autoCreateBlock();
+ // Add ScopeEnd just after automatic obj destructor.
+ if (BuildOpts.AddScopes && DeclsWithEndedScope.count(*I))
+ appendScopeEnd(Block, *I, S);
appendAutomaticObjDtor(Block, *I, S);
}
}
@@ -1478,7 +1768,8 @@ LocalScope* CFGBuilder::createOrReuseLocalScope(LocalScope* Scope) {
/// addLocalScopeForStmt - Add LocalScope to local scopes tree for statement
/// that should create implicit scope (e.g. if/else substatements).
void CFGBuilder::addLocalScopeForStmt(Stmt *S) {
- if (!BuildOpts.AddImplicitDtors && !BuildOpts.AddLifetime)
+ if (!BuildOpts.AddImplicitDtors && !BuildOpts.AddLifetime &&
+ !BuildOpts.AddScopes)
return;
LocalScope *Scope = nullptr;
@@ -1503,7 +1794,8 @@ void CFGBuilder::addLocalScopeForStmt(Stmt *S) {
/// reuse Scope if not NULL.
LocalScope* CFGBuilder::addLocalScopeForDeclStmt(DeclStmt *DS,
LocalScope* Scope) {
- if (!BuildOpts.AddImplicitDtors && !BuildOpts.AddLifetime)
+ if (!BuildOpts.AddImplicitDtors && !BuildOpts.AddLifetime &&
+ !BuildOpts.AddScopes)
return Scope;
for (auto *DI : DS->decls())
@@ -1515,7 +1807,7 @@ LocalScope* CFGBuilder::addLocalScopeForDeclStmt(DeclStmt *DS,
bool CFGBuilder::hasTrivialDestructor(VarDecl *VD) {
// Check for const references bound to temporary. Set type to pointee.
QualType QT = VD->getType();
- if (QT.getTypePtr()->isReferenceType()) {
+ if (QT->isReferenceType()) {
// Attempt to determine whether this declaration lifetime-extends a
// temporary.
//
@@ -1525,12 +1817,16 @@ bool CFGBuilder::hasTrivialDestructor(VarDecl *VD) {
// MaterializeTemporaryExpr instead.
const Expr *Init = VD->getInit();
- if (!Init)
+ if (!Init) {
+ // Probably an exception catch-by-reference variable.
+ // FIXME: It doesn't really mean that the object has a trivial destructor.
+ // Also are there other cases?
return true;
+ }
- // Lifetime-extending a temporary.
+ // Lifetime-extending a temporary?
bool FoundMTE = false;
- QT = getReferenceInitTemporaryType(*Context, Init, &FoundMTE);
+ QT = getReferenceInitTemporaryType(Init, &FoundMTE);
if (!FoundMTE)
return true;
}
@@ -1555,7 +1851,8 @@ LocalScope* CFGBuilder::addLocalScopeForVarDecl(VarDecl *VD,
LocalScope* Scope) {
assert(!(BuildOpts.AddImplicitDtors && BuildOpts.AddLifetime) &&
"AddImplicitDtors and AddLifetime cannot be used at the same time");
- if (!BuildOpts.AddImplicitDtors && !BuildOpts.AddLifetime)
+ if (!BuildOpts.AddImplicitDtors && !BuildOpts.AddLifetime &&
+ !BuildOpts.AddScopes)
return Scope;
// Check if variable is local.
@@ -1568,7 +1865,7 @@ LocalScope* CFGBuilder::addLocalScopeForVarDecl(VarDecl *VD,
}
if (BuildOpts.AddImplicitDtors) {
- if (!hasTrivialDestructor(VD)) {
+ if (!hasTrivialDestructor(VD) || BuildOpts.AddScopes) {
// Add the variable to scope
Scope = createOrReuseLocalScope(Scope);
Scope->addVar(VD);
@@ -1628,6 +1925,26 @@ void CFGBuilder::prependAutomaticObjLifetimeWithTerminator(
InsertPos = Blk->insertLifetimeEnds(InsertPos, *I, Blk->getTerminator());
}
+/// prependAutomaticObjScopeEndWithTerminator - Prepend scope end CFGElements for
+/// variables with automatic storage duration to CFGBlock's elements vector.
+/// Elements will be prepended to physical beginning of the vector which
+/// happens to be logical end. Use blocks terminator as statement that specifies
+/// where scope ends.
+const VarDecl *
+CFGBuilder::prependAutomaticObjScopeEndWithTerminator(
+ CFGBlock *Blk, LocalScope::const_iterator B, LocalScope::const_iterator E) {
+ if (!BuildOpts.AddScopes)
+ return nullptr;
+ BumpVectorContext &C = cfg->getBumpVectorContext();
+ CFGBlock::iterator InsertPos =
+ Blk->beginScopeEndInsert(Blk->end(), 1, C);
+ LocalScope::const_iterator PlaceToInsert = B;
+ for (LocalScope::const_iterator I = B; I != E; ++I)
+ PlaceToInsert = I;
+ Blk->insertScopeEnd(InsertPos, *PlaceToInsert, Blk->getTerminator());
+ return *PlaceToInsert;
+}
+
/// Visit - Walk the subtree of a statement and add extra
/// blocks for ternary operators, &&, and ||. We also process "," and
/// DeclStmts (which may contain nested control-flow).
@@ -1756,6 +2073,10 @@ CFGBlock *CFGBuilder::Visit(Stmt * S, AddStmtChoice asc) {
case Stmt::LambdaExprClass:
return VisitLambdaExpr(cast<LambdaExpr>(S), asc);
+ case Stmt::MaterializeTemporaryExprClass:
+ return VisitMaterializeTemporaryExpr(cast<MaterializeTemporaryExpr>(S),
+ asc);
+
case Stmt::MemberExprClass:
return VisitMemberExpr(cast<MemberExpr>(S), asc);
@@ -2045,7 +2366,7 @@ static bool CanThrow(Expr *E, ASTContext &Ctx) {
if (FT) {
if (const FunctionProtoType *Proto = dyn_cast<FunctionProtoType>(FT))
if (!isUnresolvedExceptionSpec(Proto->getExceptionSpecType()) &&
- Proto->isNothrow(Ctx))
+ Proto->isNothrow())
return false;
}
return true;
@@ -2062,6 +2383,13 @@ CFGBlock *CFGBuilder::VisitCallExpr(CallExpr *C, AddStmtChoice asc) {
if (!boundType.isNull()) calleeType = boundType;
}
+ // FIXME: Once actually implemented, this construction context layer should
+ // include the number of the argument as well.
+ for (auto Arg: C->arguments()) {
+ findConstructionContexts(
+ ConstructionContextLayer::create(cfg->getBumpVectorContext(), C), Arg);
+ }
+
// If this is a call to a no-return function, this stops the block here.
bool NoReturn = getFunctionExtInfo(*calleeType).getNoReturn();
@@ -2078,7 +2406,7 @@ CFGBlock *CFGBuilder::VisitCallExpr(CallExpr *C, AddStmtChoice asc) {
bool OmitArguments = false;
if (FunctionDecl *FD = C->getDirectCallee()) {
- if (FD->isNoReturn())
+ if (FD->isNoReturn() || C->isBuiltinAssumeFalse(*Context))
NoReturn = true;
if (FD->hasAttr<NoThrowAttr>())
AddEHEdge = false;
@@ -2098,7 +2426,10 @@ CFGBlock *CFGBuilder::VisitCallExpr(CallExpr *C, AddStmtChoice asc) {
}
if (!NoReturn && !AddEHEdge) {
- return VisitStmt(C, asc.withAlwaysAdd(true));
+ autoCreateBlock();
+ appendCall(Block, C);
+
+ return VisitChildren(C);
}
if (Block) {
@@ -2112,7 +2443,7 @@ CFGBlock *CFGBuilder::VisitCallExpr(CallExpr *C, AddStmtChoice asc) {
else
Block = createBlock();
- appendStmt(Block, C);
+ appendCall(Block, C);
if (AddEHEdge) {
// Add exceptional edges.
@@ -2326,7 +2657,11 @@ CFGBlock *CFGBuilder::VisitDeclSubExpr(DeclStmt *DS) {
autoCreateBlock();
appendStmt(Block, DS);
-
+
+ findConstructionContexts(
+ ConstructionContextLayer::create(cfg->getBumpVectorContext(), DS),
+ Init);
+
// Keep track of the last non-null block, as 'Block' can be nulled out
// if the initializer expression is something like a 'while' in a
// statement-expression.
@@ -2353,6 +2688,8 @@ CFGBlock *CFGBuilder::VisitDeclSubExpr(DeclStmt *DS) {
LastBlock = newBlock;
}
+ maybeAddScopeBeginForVarDecl(Block, VD, DS);
+
// Remove variable from local scope.
if (ScopePos && VD == *ScopePos)
++ScopePos;
@@ -2517,6 +2854,10 @@ CFGBlock *CFGBuilder::VisitReturnStmt(ReturnStmt *R) {
addAutomaticObjHandling(ScopePos, LocalScope::const_iterator(), R);
+ findConstructionContexts(
+ ConstructionContextLayer::create(cfg->getBumpVectorContext(), R),
+ R->getRetValue());
+
// If the one of the destructors does not return, we already have the Exit
// block as a successor.
if (!Block->hasNoReturnElement())
@@ -2813,6 +3154,7 @@ CFGBlock *CFGBuilder::VisitForStmt(ForStmt *F) {
do {
Expr *C = F->getCond();
+ SaveAndRestore<LocalScope::const_iterator> save_scope_pos(ScopePos);
// Specially handle logical operators, which have a slightly
// more optimal CFG representation.
@@ -2843,9 +3185,16 @@ CFGBlock *CFGBuilder::VisitForStmt(ForStmt *F) {
if (VarDecl *VD = F->getConditionVariable()) {
if (Expr *Init = VD->getInit()) {
autoCreateBlock();
- appendStmt(Block, F->getConditionVariableDeclStmt());
+ const DeclStmt *DS = F->getConditionVariableDeclStmt();
+ assert(DS->isSingleDecl());
+ findConstructionContexts(
+ ConstructionContextLayer::create(cfg->getBumpVectorContext(),
+ const_cast<DeclStmt *>(DS)),
+ Init);
+ appendStmt(Block, DS);
EntryConditionBlock = addStmt(Init);
assert(Block == EntryConditionBlock);
+ maybeAddScopeBeginForVarDecl(EntryConditionBlock, VD, C);
}
}
@@ -2872,6 +3221,8 @@ CFGBlock *CFGBuilder::VisitForStmt(ForStmt *F) {
// If the loop contains initialization, create a new block for those
// statements. This block can also contain statements that precede the loop.
if (Stmt *I = F->getInit()) {
+ SaveAndRestore<LocalScope::const_iterator> save_scope_pos(ScopePos);
+ ScopePos = LoopBeginScopePos;
Block = createBlock();
return addStmt(I);
}
@@ -2883,6 +3234,16 @@ CFGBlock *CFGBuilder::VisitForStmt(ForStmt *F) {
return EntryConditionBlock;
}
+CFGBlock *
+CFGBuilder::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *MTE,
+ AddStmtChoice asc) {
+ findConstructionContexts(
+ ConstructionContextLayer::create(cfg->getBumpVectorContext(), MTE),
+ MTE->getTemporary());
+
+ return VisitStmt(MTE, asc);
+}
+
CFGBlock *CFGBuilder::VisitMemberExpr(MemberExpr *M, AddStmtChoice asc) {
if (asc.alwaysAdd(*this, M)) {
autoCreateBlock();
@@ -3155,9 +3516,16 @@ CFGBlock *CFGBuilder::VisitWhileStmt(WhileStmt *W) {
if (VarDecl *VD = W->getConditionVariable()) {
if (Expr *Init = VD->getInit()) {
autoCreateBlock();
- appendStmt(Block, W->getConditionVariableDeclStmt());
+ const DeclStmt *DS = W->getConditionVariableDeclStmt();
+ assert(DS->isSingleDecl());
+ findConstructionContexts(
+ ConstructionContextLayer::create(cfg->getBumpVectorContext(),
+ const_cast<DeclStmt *>(DS)),
+ Init);
+ appendStmt(Block, DS);
EntryConditionBlock = addStmt(Init);
assert(Block == EntryConditionBlock);
+ maybeAddScopeBeginForVarDecl(EntryConditionBlock, VD, C);
}
}
@@ -3483,6 +3851,7 @@ CFGBlock *CFGBuilder::VisitSwitchStmt(SwitchStmt *Terminator) {
autoCreateBlock();
appendStmt(Block, Terminator->getConditionVariableDeclStmt());
LastBlock = addStmt(Init);
+ maybeAddScopeBeginForVarDecl(LastBlock, VD, Init);
}
}
@@ -3863,6 +4232,10 @@ CFGBlock *CFGBuilder::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E,
autoCreateBlock();
appendStmt(Block, E);
+ findConstructionContexts(
+ ConstructionContextLayer::create(cfg->getBumpVectorContext(), E),
+ E->getSubExpr());
+
// We do not want to propagate the AlwaysAdd property.
asc = asc.withAlwaysAdd(false);
}
@@ -3872,7 +4245,7 @@ CFGBlock *CFGBuilder::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E,
CFGBlock *CFGBuilder::VisitCXXConstructExpr(CXXConstructExpr *C,
AddStmtChoice asc) {
autoCreateBlock();
- appendStmt(Block, C);
+ appendConstructor(Block, C);
return VisitChildren(C);
}
@@ -3882,15 +4255,23 @@ CFGBlock *CFGBuilder::VisitCXXNewExpr(CXXNewExpr *NE,
autoCreateBlock();
appendStmt(Block, NE);
+ findConstructionContexts(
+ ConstructionContextLayer::create(cfg->getBumpVectorContext(), NE),
+ const_cast<CXXConstructExpr *>(NE->getConstructExpr()));
+
if (NE->getInitializer())
Block = Visit(NE->getInitializer());
+
if (BuildOpts.AddCXXNewAllocator)
appendNewAllocator(Block, NE);
+
if (NE->isArray())
Block = Visit(NE->getArraySize());
+
for (CXXNewExpr::arg_iterator I = NE->placement_arg_begin(),
E = NE->placement_arg_end(); I != E; ++I)
Block = Visit(*I);
+
return Block;
}
@@ -3925,7 +4306,7 @@ CFGBlock *CFGBuilder::VisitCXXFunctionalCastExpr(CXXFunctionalCastExpr *E,
CFGBlock *CFGBuilder::VisitCXXTemporaryObjectExpr(CXXTemporaryObjectExpr *C,
AddStmtChoice asc) {
autoCreateBlock();
- appendStmt(Block, C);
+ appendConstructor(Block, C);
return VisitChildren(C);
}
@@ -4027,9 +4408,11 @@ tryAgain:
auto *LE = cast<LambdaExpr>(E);
CFGBlock *B = Block;
for (Expr *Init : LE->capture_inits()) {
- if (CFGBlock *R = VisitForTemporaryDtors(
- Init, /*BindToTemporary=*/false, Context))
- B = R;
+ if (Init) {
+ if (CFGBlock *R = VisitForTemporaryDtors(
+ Init, /*BindToTemporary=*/false, Context))
+ B = R;
+ }
}
return B;
}
@@ -4210,11 +4593,15 @@ std::unique_ptr<CFG> CFG::buildCFG(const Decl *D, Stmt *Statement,
const CXXDestructorDecl *
CFGImplicitDtor::getDestructorDecl(ASTContext &astContext) const {
switch (getKind()) {
- case CFGElement::Statement:
case CFGElement::Initializer:
case CFGElement::NewAllocator:
case CFGElement::LoopExit:
case CFGElement::LifetimeEnds:
+ case CFGElement::Statement:
+ case CFGElement::Constructor:
+ case CFGElement::CXXRecordTypedCall:
+ case CFGElement::ScopeBegin:
+ case CFGElement::ScopeEnd:
llvm_unreachable("getDestructorDecl should only be used with "
"ImplicitDtors");
case CFGElement::AutomaticObjectDtor: {
@@ -4227,7 +4614,7 @@ CFGImplicitDtor::getDestructorDecl(ASTContext &astContext) const {
// temporary in an initializer expression.
if (ty->isReferenceType()) {
if (const Expr *Init = var->getInit()) {
- ty = getReferenceInitTemporaryType(astContext, Init);
+ ty = getReferenceInitTemporaryType(Init);
}
}
@@ -4343,8 +4730,8 @@ public:
switch (stmt->getStmtClass()) {
case Stmt::DeclStmtClass:
- DeclMap[cast<DeclStmt>(stmt)->getSingleDecl()] = P;
- break;
+ DeclMap[cast<DeclStmt>(stmt)->getSingleDecl()] = P;
+ break;
case Stmt::IfStmtClass: {
const VarDecl *var = cast<IfStmt>(stmt)->getConditionVariable();
if (var)
@@ -4544,6 +4931,95 @@ public:
} // namespace
+static void print_initializer(raw_ostream &OS, StmtPrinterHelper &Helper,
+ const CXXCtorInitializer *I) {
+ if (I->isBaseInitializer())
+ OS << I->getBaseClass()->getAsCXXRecordDecl()->getName();
+ else if (I->isDelegatingInitializer())
+ OS << I->getTypeSourceInfo()->getType()->getAsCXXRecordDecl()->getName();
+ else
+ OS << I->getAnyMember()->getName();
+ OS << "(";
+ if (Expr *IE = I->getInit())
+ IE->printPretty(OS, &Helper, PrintingPolicy(Helper.getLangOpts()));
+ OS << ")";
+
+ if (I->isBaseInitializer())
+ OS << " (Base initializer)";
+ else if (I->isDelegatingInitializer())
+ OS << " (Delegating initializer)";
+ else
+ OS << " (Member initializer)";
+}
+
+static void print_construction_context(raw_ostream &OS,
+ StmtPrinterHelper &Helper,
+ const ConstructionContext *CC) {
+ SmallVector<const Stmt *, 3> Stmts;
+ switch (CC->getKind()) {
+ case ConstructionContext::SimpleConstructorInitializerKind: {
+ OS << ", ";
+ const auto *SICC = cast<SimpleConstructorInitializerConstructionContext>(CC);
+ print_initializer(OS, Helper, SICC->getCXXCtorInitializer());
+ break;
+ }
+ case ConstructionContext::CXX17ElidedCopyConstructorInitializerKind: {
+ OS << ", ";
+ const auto *CICC =
+ cast<CXX17ElidedCopyConstructorInitializerConstructionContext>(CC);
+ print_initializer(OS, Helper, CICC->getCXXCtorInitializer());
+ Stmts.push_back(CICC->getCXXBindTemporaryExpr());
+ break;
+ }
+ case ConstructionContext::SimpleVariableKind: {
+ const auto *SDSCC = cast<SimpleVariableConstructionContext>(CC);
+ Stmts.push_back(SDSCC->getDeclStmt());
+ break;
+ }
+ case ConstructionContext::CXX17ElidedCopyVariableKind: {
+ const auto *CDSCC = cast<CXX17ElidedCopyVariableConstructionContext>(CC);
+ Stmts.push_back(CDSCC->getDeclStmt());
+ Stmts.push_back(CDSCC->getCXXBindTemporaryExpr());
+ break;
+ }
+ case ConstructionContext::NewAllocatedObjectKind: {
+ const auto *NECC = cast<NewAllocatedObjectConstructionContext>(CC);
+ Stmts.push_back(NECC->getCXXNewExpr());
+ break;
+ }
+ case ConstructionContext::SimpleReturnedValueKind: {
+ const auto *RSCC = cast<SimpleReturnedValueConstructionContext>(CC);
+ Stmts.push_back(RSCC->getReturnStmt());
+ break;
+ }
+ case ConstructionContext::CXX17ElidedCopyReturnedValueKind: {
+ const auto *RSCC =
+ cast<CXX17ElidedCopyReturnedValueConstructionContext>(CC);
+ Stmts.push_back(RSCC->getReturnStmt());
+ Stmts.push_back(RSCC->getCXXBindTemporaryExpr());
+ break;
+ }
+ case ConstructionContext::SimpleTemporaryObjectKind: {
+ const auto *TOCC = cast<SimpleTemporaryObjectConstructionContext>(CC);
+ Stmts.push_back(TOCC->getCXXBindTemporaryExpr());
+ Stmts.push_back(TOCC->getMaterializedTemporaryExpr());
+ break;
+ }
+ case ConstructionContext::ElidedTemporaryObjectKind: {
+ const auto *TOCC = cast<ElidedTemporaryObjectConstructionContext>(CC);
+ Stmts.push_back(TOCC->getCXXBindTemporaryExpr());
+ Stmts.push_back(TOCC->getMaterializedTemporaryExpr());
+ Stmts.push_back(TOCC->getConstructorAfterElision());
+ break;
+ }
+ }
+ for (auto I: Stmts)
+ if (I) {
+ OS << ", ";
+ Helper.handledStmt(const_cast<Stmt *>(I), OS);
+ }
+}
+
static void print_elem(raw_ostream &OS, StmtPrinterHelper &Helper,
const CFGElement &E) {
if (Optional<CFGStmt> CS = E.getAs<CFGStmt>()) {
@@ -4573,16 +5049,23 @@ static void print_elem(raw_ostream &OS, StmtPrinterHelper &Helper,
}
S->printPretty(OS, &Helper, PrintingPolicy(Helper.getLangOpts()));
- if (isa<CXXOperatorCallExpr>(S)) {
+ if (auto VTC = E.getAs<CFGCXXRecordTypedCall>()) {
+ if (isa<CXXOperatorCallExpr>(S))
+ OS << " (OperatorCall)";
+ OS << " (CXXRecordTypedCall";
+ print_construction_context(OS, Helper, VTC->getConstructionContext());
+ OS << ")";
+ } else if (isa<CXXOperatorCallExpr>(S)) {
OS << " (OperatorCall)";
- }
- else if (isa<CXXBindTemporaryExpr>(S)) {
+ } else if (isa<CXXBindTemporaryExpr>(S)) {
OS << " (BindTemporary)";
- }
- else if (const CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(S)) {
- OS << " (CXXConstructExpr, " << CCE->getType().getAsString() << ")";
- }
- else if (const CastExpr *CE = dyn_cast<CastExpr>(S)) {
+ } else if (const CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(S)) {
+ OS << " (CXXConstructExpr";
+ if (Optional<CFGConstructor> CE = E.getAs<CFGConstructor>()) {
+ print_construction_context(OS, Helper, CE->getConstructionContext());
+ }
+ OS << ", " << CCE->getType().getAsString() << ")";
+ } else if (const CastExpr *CE = dyn_cast<CastExpr>(S)) {
OS << " (" << CE->getStmtClassName() << ", "
<< CE->getCastKindName()
<< ", " << CE->getType().getAsString()
@@ -4593,32 +5076,19 @@ static void print_elem(raw_ostream &OS, StmtPrinterHelper &Helper,
if (isa<Expr>(S))
OS << '\n';
} else if (Optional<CFGInitializer> IE = E.getAs<CFGInitializer>()) {
- const CXXCtorInitializer *I = IE->getInitializer();
- if (I->isBaseInitializer())
- OS << I->getBaseClass()->getAsCXXRecordDecl()->getName();
- else if (I->isDelegatingInitializer())
- OS << I->getTypeSourceInfo()->getType()->getAsCXXRecordDecl()->getName();
- else OS << I->getAnyMember()->getName();
-
- OS << "(";
- if (Expr *IE = I->getInit())
- IE->printPretty(OS, &Helper, PrintingPolicy(Helper.getLangOpts()));
- OS << ")";
-
- if (I->isBaseInitializer())
- OS << " (Base initializer)\n";
- else if (I->isDelegatingInitializer())
- OS << " (Delegating initializer)\n";
- else OS << " (Member initializer)\n";
+ print_initializer(OS, Helper, IE->getInitializer());
+ OS << '\n';
} else if (Optional<CFGAutomaticObjDtor> DE =
E.getAs<CFGAutomaticObjDtor>()) {
const VarDecl *VD = DE->getVarDecl();
Helper.handleDecl(VD, OS);
- const Type* T = VD->getType().getTypePtr();
- if (const ReferenceType* RT = T->getAs<ReferenceType>())
- T = RT->getPointeeType().getTypePtr();
- T = T->getBaseElementTypeUnsafe();
+ ASTContext &ACtx = VD->getASTContext();
+ QualType T = VD->getType();
+ if (T->isReferenceType())
+ T = getReferenceInitTemporaryType(VD->getInit(), nullptr);
+ if (const ArrayType *AT = ACtx.getAsArrayType(T))
+ T = ACtx.getBaseElementType(AT);
OS << ".~" << T->getAsCXXRecordDecl()->getName().str() << "()";
OS << " (Implicit destructor)\n";
@@ -4630,6 +5100,16 @@ static void print_elem(raw_ostream &OS, StmtPrinterHelper &Helper,
} else if (Optional<CFGLoopExit> LE = E.getAs<CFGLoopExit>()) {
const Stmt *LoopStmt = LE->getLoopStmt();
OS << LoopStmt->getStmtClassName() << " (LoopExit)\n";
+ } else if (Optional<CFGScopeBegin> SB = E.getAs<CFGScopeBegin>()) {
+ OS << "CFGScopeBegin(";
+ if (const VarDecl *VD = SB->getVarDecl())
+ OS << VD->getQualifiedNameAsString();
+ OS << ")\n";
+ } else if (Optional<CFGScopeEnd> SE = E.getAs<CFGScopeEnd>()) {
+ OS << "CFGScopeEnd(";
+ if (const VarDecl *VD = SE->getVarDecl())
+ OS << VD->getQualifiedNameAsString();
+ OS << ")\n";
} else if (Optional<CFGNewAllocator> NE = E.getAs<CFGNewAllocator>()) {
OS << "CFGNewAllocator(";
if (const CXXNewExpr *AllocExpr = NE->getAllocatorExpr())
diff --git a/lib/Analysis/CFGReachabilityAnalysis.cpp b/lib/Analysis/CFGReachabilityAnalysis.cpp
index 4ae135f1ea77..6f557e092fd7 100644
--- a/lib/Analysis/CFGReachabilityAnalysis.cpp
+++ b/lib/Analysis/CFGReachabilityAnalysis.cpp
@@ -1,4 +1,4 @@
-//==- CFGReachabilityAnalysis.cpp - Basic reachability analysis --*- C++ -*-==//
+//===- CFGReachabilityAnalysis.cpp - Basic reachability analysis ----------===//
//
// The LLVM Compiler Infrastructure
//
@@ -13,18 +13,19 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/ADT/SmallVector.h"
#include "clang/Analysis/Analyses/CFGReachabilityAnalysis.h"
#include "clang/Analysis/CFG.h"
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/SmallVector.h"
using namespace clang;
-CFGReverseBlockReachabilityAnalysis::CFGReverseBlockReachabilityAnalysis(const CFG &cfg)
- : analyzed(cfg.getNumBlockIDs(), false) {}
+CFGReverseBlockReachabilityAnalysis::CFGReverseBlockReachabilityAnalysis(
+ const CFG &cfg)
+ : analyzed(cfg.getNumBlockIDs(), false) {}
bool CFGReverseBlockReachabilityAnalysis::isReachable(const CFGBlock *Src,
const CFGBlock *Dst) {
-
const unsigned DstBlockID = Dst->getBlockID();
// If we haven't analyzed the destination node, run the analysis now
diff --git a/lib/Analysis/CMakeLists.txt b/lib/Analysis/CMakeLists.txt
index fdc9e6cee8e1..432067d98157 100644
--- a/lib/Analysis/CMakeLists.txt
+++ b/lib/Analysis/CMakeLists.txt
@@ -11,6 +11,7 @@ add_clang_library(clangAnalysis
CallGraph.cpp
CloneDetection.cpp
CocoaConventions.cpp
+ ConstructionContext.cpp
Consumed.cpp
CodeInjector.cpp
Dominators.cpp
diff --git a/lib/Analysis/CloneDetection.cpp b/lib/Analysis/CloneDetection.cpp
index 098803f9a417..8912b3b76751 100644
--- a/lib/Analysis/CloneDetection.cpp
+++ b/lib/Analysis/CloneDetection.cpp
@@ -381,7 +381,7 @@ void RecursiveCloneTypeIIHashConstraint::constrain(
for (unsigned i = 0; i < StmtsByHash.size() - 1; ++i) {
const auto Current = StmtsByHash[i];
- // It's likely that we just found an sequence of StmtSequences that
+ // It's likely that we just found a sequence of StmtSequences that
// represent a CloneGroup, so we create a new group and start checking and
// adding the StmtSequences in this sequence.
CloneDetector::CloneGroup NewGroup;
@@ -534,14 +534,14 @@ void VariablePattern::addVariableOccurence(const VarDecl *VarDecl,
// First check if we already reference this variable
for (size_t KindIndex = 0; KindIndex < Variables.size(); ++KindIndex) {
if (Variables[KindIndex] == VarDecl) {
- // If yes, add a new occurence that points to the existing entry in
+ // If yes, add a new occurrence that points to the existing entry in
// the Variables vector.
Occurences.emplace_back(KindIndex, Mention);
return;
}
}
// If this variable wasn't already referenced, add it to the list of
- // referenced variables and add a occurence that points to this new entry.
+ // referenced variables and add a occurrence that points to this new entry.
Occurences.emplace_back(Variables.size(), Mention);
Variables.push_back(VarDecl);
}
diff --git a/lib/Analysis/ConstructionContext.cpp b/lib/Analysis/ConstructionContext.cpp
new file mode 100644
index 000000000000..ed1e63243217
--- /dev/null
+++ b/lib/Analysis/ConstructionContext.cpp
@@ -0,0 +1,184 @@
+//===- ConstructionContext.cpp - CFG constructor information --------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ConstructionContext class and its sub-classes,
+// which represent various different ways of constructing C++ objects
+// with the additional information the users may want to know about
+// the constructor.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/ConstructionContext.h"
+
+using namespace clang;
+
+const ConstructionContextLayer *
+ConstructionContextLayer::create(BumpVectorContext &C, TriggerTy Trigger,
+ const ConstructionContextLayer *Parent) {
+ ConstructionContextLayer *CC =
+ C.getAllocator().Allocate<ConstructionContextLayer>();
+ return new (CC) ConstructionContextLayer(Trigger, Parent);
+}
+
+bool ConstructionContextLayer::isStrictlyMoreSpecificThan(
+ const ConstructionContextLayer *Other) const {
+ const ConstructionContextLayer *Self = this;
+ while (true) {
+ if (!Other)
+ return Self;
+ if (!Self || !Self->isSameLayer(Other))
+ return false;
+ Self = Self->getParent();
+ Other = Other->getParent();
+ }
+ llvm_unreachable("The above loop can only be terminated via return!");
+}
+
+const ConstructionContext *ConstructionContext::createFromLayers(
+ BumpVectorContext &C, const ConstructionContextLayer *TopLayer) {
+ // Before this point all we've had was a stockpile of arbitrary layers.
+ // Now validate that it is shaped as one of the finite amount of expected
+ // patterns.
+ if (const Stmt *S = TopLayer->getTriggerStmt()) {
+ if (const auto *DS = dyn_cast<DeclStmt>(S)) {
+ assert(TopLayer->isLast());
+ return create<SimpleVariableConstructionContext>(C, DS);
+ }
+ if (const auto *NE = dyn_cast<CXXNewExpr>(S)) {
+ assert(TopLayer->isLast());
+ return create<NewAllocatedObjectConstructionContext>(C, NE);
+ }
+ if (const auto *BTE = dyn_cast<CXXBindTemporaryExpr>(S)) {
+ const MaterializeTemporaryExpr *MTE = nullptr;
+ assert(BTE->getType().getCanonicalType()
+ ->getAsCXXRecordDecl()->hasNonTrivialDestructor());
+ // For temporaries with destructors, there may or may not be
+ // lifetime extension on the parent layer.
+ if (const ConstructionContextLayer *ParentLayer = TopLayer->getParent()) {
+ // C++17 *requires* elision of the constructor at the return site
+ // and at variable/member initialization site, while previous standards
+ // were allowing an optional elidable constructor.
+ // This is the C++17 copy-elided construction into a ctor initializer.
+ if (const CXXCtorInitializer *I = ParentLayer->getTriggerInit()) {
+ return create<
+ CXX17ElidedCopyConstructorInitializerConstructionContext>(C,
+ I, BTE);
+ }
+ assert(ParentLayer->getTriggerStmt() &&
+ "Non-statement-based layers have been handled above!");
+ // This is the normal, non-C++17 case: a temporary object which has
+ // both destruction and materialization info attached to it in the AST.
+ if ((MTE = dyn_cast<MaterializeTemporaryExpr>(
+ ParentLayer->getTriggerStmt()))) {
+ if (MTE->getStorageDuration() != SD_FullExpression) {
+ // If the temporary is lifetime-extended, don't save the BTE,
+ // because we don't need a temporary destructor, but an automatic
+ // destructor.
+ BTE = nullptr;
+ }
+
+ // Handle pre-C++17 copy and move elision.
+ const CXXConstructExpr *ElidedCE = nullptr;
+ const ConstructionContext *ElidedCC = nullptr;
+ if (const ConstructionContextLayer *ElidedLayer =
+ ParentLayer->getParent()) {
+ ElidedCE = cast<CXXConstructExpr>(ElidedLayer->getTriggerStmt());
+ assert(ElidedCE->isElidable());
+ // We're creating a construction context that might have already
+ // been created elsewhere. Maybe we should unique our construction
+ // contexts. That's what we often do, but in this case it's unlikely
+ // to bring any benefits.
+ ElidedCC = createFromLayers(C, ElidedLayer->getParent());
+ if (!ElidedCC) {
+ // We may fail to create the elided construction context.
+ // In this case, skip copy elision entirely.
+ return create<SimpleTemporaryObjectConstructionContext>(C, BTE,
+ MTE);
+ } else {
+ return create<ElidedTemporaryObjectConstructionContext>(
+ C, BTE, MTE, ElidedCE, ElidedCC);
+ }
+ }
+ assert(ParentLayer->isLast());
+ return create<SimpleTemporaryObjectConstructionContext>(C, BTE, MTE);
+ }
+ assert(ParentLayer->isLast());
+
+ // This is a constructor into a function argument. Not implemented yet.
+ if (isa<CallExpr>(ParentLayer->getTriggerStmt()))
+ return nullptr;
+ // This is C++17 copy-elided construction into return statement.
+ if (auto *RS = dyn_cast<ReturnStmt>(ParentLayer->getTriggerStmt())) {
+ assert(!RS->getRetValue()->getType().getCanonicalType()
+ ->getAsCXXRecordDecl()->hasTrivialDestructor());
+ return create<CXX17ElidedCopyReturnedValueConstructionContext>(C,
+ RS, BTE);
+ }
+ // This is C++17 copy-elided construction into a simple variable.
+ if (auto *DS = dyn_cast<DeclStmt>(ParentLayer->getTriggerStmt())) {
+ assert(!cast<VarDecl>(DS->getSingleDecl())->getType()
+ .getCanonicalType()->getAsCXXRecordDecl()
+ ->hasTrivialDestructor());
+ return create<CXX17ElidedCopyVariableConstructionContext>(C, DS, BTE);
+ }
+ llvm_unreachable("Unexpected construction context with destructor!");
+ }
+ // A temporary object that doesn't require materialization.
+ // In particular, it shouldn't require copy elision, because
+ // copy/move constructors take a reference, which requires
+ // materialization to obtain the glvalue.
+ return create<SimpleTemporaryObjectConstructionContext>(C, BTE,
+ /*MTE=*/nullptr);
+ }
+ if (const auto *MTE = dyn_cast<MaterializeTemporaryExpr>(S)) {
+ // If the object requires destruction and is not lifetime-extended,
+ // then it must have a BTE within its MTE.
+ // FIXME: This should be an assertion.
+ if (!(MTE->getType().getCanonicalType()
+ ->getAsCXXRecordDecl()->hasTrivialDestructor() ||
+ MTE->getStorageDuration() != SD_FullExpression))
+ return nullptr;
+
+ // Handle pre-C++17 copy and move elision.
+ const CXXConstructExpr *ElidedCE = nullptr;
+ const ConstructionContext *ElidedCC = nullptr;
+ if (const ConstructionContextLayer *ElidedLayer = TopLayer->getParent()) {
+ ElidedCE = cast<CXXConstructExpr>(ElidedLayer->getTriggerStmt());
+ assert(ElidedCE->isElidable());
+ // We're creating a construction context that might have already
+ // been created elsewhere. Maybe we should unique our construction
+ // contexts. That's what we often do, but in this case it's unlikely
+ // to bring any benefits.
+ ElidedCC = createFromLayers(C, ElidedLayer->getParent());
+ if (!ElidedCC) {
+ // We may fail to create the elided construction context.
+ // In this case, skip copy elision entirely.
+ return create<SimpleTemporaryObjectConstructionContext>(C, nullptr,
+ MTE);
+ }
+ return create<ElidedTemporaryObjectConstructionContext>(
+ C, nullptr, MTE, ElidedCE, ElidedCC);
+ }
+ assert(TopLayer->isLast());
+ return create<SimpleTemporaryObjectConstructionContext>(C, nullptr, MTE);
+ }
+ if (const auto *RS = dyn_cast<ReturnStmt>(S)) {
+ assert(TopLayer->isLast());
+ return create<SimpleReturnedValueConstructionContext>(C, RS);
+ }
+ // This is a constructor into a function argument. Not implemented yet.
+ if (isa<CallExpr>(TopLayer->getTriggerStmt()))
+ return nullptr;
+ llvm_unreachable("Unexpected construction context with statement!");
+ } else if (const CXXCtorInitializer *I = TopLayer->getTriggerInit()) {
+ assert(TopLayer->isLast());
+ return create<SimpleConstructorInitializerConstructionContext>(C, I);
+ }
+ llvm_unreachable("Unexpected construction context!");
+}
diff --git a/lib/Analysis/Consumed.cpp b/lib/Analysis/Consumed.cpp
index 96edad0c3019..a46386e2d13d 100644
--- a/lib/Analysis/Consumed.cpp
+++ b/lib/Analysis/Consumed.cpp
@@ -1,4 +1,4 @@
-//===- Consumed.cpp --------------------------------------------*- C++ --*-===//
+//===- Consumed.cpp -------------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -13,21 +13,29 @@
//===----------------------------------------------------------------------===//
#include "clang/Analysis/Analyses/Consumed.h"
-#include "clang/AST/ASTContext.h"
#include "clang/AST/Attr.h"
+#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
+#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
-#include "clang/AST/RecursiveASTVisitor.h"
-#include "clang/AST/StmtCXX.h"
+#include "clang/AST/Stmt.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/AST/Type.h"
#include "clang/Analysis/Analyses/PostOrderCFGView.h"
#include "clang/Analysis/AnalysisDeclContext.h"
#include "clang/Analysis/CFG.h"
+#include "clang/Basic/LLVM.h"
#include "clang/Basic/OperatorKinds.h"
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <cassert>
#include <memory>
+#include <utility>
// TODO: Adjust states of args to constructors in the same way that arguments to
// function calls are handled.
@@ -49,7 +57,7 @@ using namespace clang;
using namespace consumed;
// Key method definition
-ConsumedWarningsHandlerBase::~ConsumedWarningsHandlerBase() {}
+ConsumedWarningsHandlerBase::~ConsumedWarningsHandlerBase() = default;
static SourceLocation getFirstStmtLoc(const CFGBlock *Block) {
// Find the source location of the first statement in the block, if the block
@@ -63,7 +71,7 @@ static SourceLocation getFirstStmtLoc(const CFGBlock *Block) {
if (Block->succ_size() == 1 && *Block->succ_begin())
return getFirstStmtLoc(*Block->succ_begin());
- return SourceLocation();
+ return {};
}
static SourceLocation getLastStmtLoc(const CFGBlock *Block) {
@@ -109,7 +117,6 @@ static ConsumedState invertConsumedUnconsumed(ConsumedState State) {
static bool isCallableInState(const CallableWhenAttr *CWAttr,
ConsumedState State) {
-
for (const auto &S : CWAttr->callableStates()) {
ConsumedState MappedAttrState = CS_None;
@@ -134,7 +141,6 @@ static bool isCallableInState(const CallableWhenAttr *CWAttr,
return false;
}
-
static bool isConsumableType(const QualType &QT) {
if (QT->isPointerType() || QT->isReferenceType())
return false;
@@ -161,7 +167,6 @@ static bool isSetOnReadPtrType(const QualType &QT) {
return false;
}
-
static bool isKnownState(ConsumedState State) {
switch (State) {
case CS_Unconsumed:
@@ -270,11 +275,13 @@ static ConsumedState testsFor(const FunctionDecl *FunDecl) {
}
namespace {
+
struct VarTestResult {
const VarDecl *Var;
ConsumedState TestsFor;
};
-} // end anonymous::VarTestResult
+
+} // namespace
namespace clang {
namespace consumed {
@@ -292,7 +299,7 @@ class PropagationInfo {
IT_BinTest,
IT_Var,
IT_Tmp
- } InfoType;
+ } InfoType = IT_None;
struct BinTestTy {
const BinaryOperator *Source;
@@ -310,22 +317,19 @@ class PropagationInfo {
};
public:
- PropagationInfo() : InfoType(IT_None) {}
-
+ PropagationInfo() = default;
PropagationInfo(const VarTestResult &VarTest)
- : InfoType(IT_VarTest), VarTest(VarTest) {}
-
+ : InfoType(IT_VarTest), VarTest(VarTest) {}
+
PropagationInfo(const VarDecl *Var, ConsumedState TestsFor)
- : InfoType(IT_VarTest) {
-
+ : InfoType(IT_VarTest) {
VarTest.Var = Var;
VarTest.TestsFor = TestsFor;
}
PropagationInfo(const BinaryOperator *Source, EffectiveOp EOp,
const VarTestResult &LTest, const VarTestResult &RTest)
- : InfoType(IT_BinTest) {
-
+ : InfoType(IT_BinTest) {
BinTest.Source = Source;
BinTest.EOp = EOp;
BinTest.LTest = LTest;
@@ -335,8 +339,7 @@ public:
PropagationInfo(const BinaryOperator *Source, EffectiveOp EOp,
const VarDecl *LVar, ConsumedState LTestsFor,
const VarDecl *RVar, ConsumedState RTestsFor)
- : InfoType(IT_BinTest) {
-
+ : InfoType(IT_BinTest) {
BinTest.Source = Source;
BinTest.EOp = EOp;
BinTest.LTest.Var = LVar;
@@ -346,38 +349,37 @@ public:
}
PropagationInfo(ConsumedState State)
- : InfoType(IT_State), State(State) {}
-
+ : InfoType(IT_State), State(State) {}
PropagationInfo(const VarDecl *Var) : InfoType(IT_Var), Var(Var) {}
PropagationInfo(const CXXBindTemporaryExpr *Tmp)
- : InfoType(IT_Tmp), Tmp(Tmp) {}
+ : InfoType(IT_Tmp), Tmp(Tmp) {}
- const ConsumedState & getState() const {
+ const ConsumedState &getState() const {
assert(InfoType == IT_State);
return State;
}
- const VarTestResult & getVarTest() const {
+ const VarTestResult &getVarTest() const {
assert(InfoType == IT_VarTest);
return VarTest;
}
- const VarTestResult & getLTest() const {
+ const VarTestResult &getLTest() const {
assert(InfoType == IT_BinTest);
return BinTest.LTest;
}
- const VarTestResult & getRTest() const {
+ const VarTestResult &getRTest() const {
assert(InfoType == IT_BinTest);
return BinTest.RTest;
}
- const VarDecl * getVar() const {
+ const VarDecl *getVar() const {
assert(InfoType == IT_Var);
return Var;
}
- const CXXBindTemporaryExpr * getTmp() const {
+ const CXXBindTemporaryExpr *getTmp() const {
assert(InfoType == IT_Tmp);
return Tmp;
}
@@ -405,12 +407,12 @@ public:
return BinTest.Source;
}
- inline bool isValid() const { return InfoType != IT_None; }
- inline bool isState() const { return InfoType == IT_State; }
- inline bool isVarTest() const { return InfoType == IT_VarTest; }
- inline bool isBinTest() const { return InfoType == IT_BinTest; }
- inline bool isVar() const { return InfoType == IT_Var; }
- inline bool isTmp() const { return InfoType == IT_Tmp; }
+ bool isValid() const { return InfoType != IT_None; }
+ bool isState() const { return InfoType == IT_State; }
+ bool isVarTest() const { return InfoType == IT_VarTest; }
+ bool isBinTest() const { return InfoType == IT_BinTest; }
+ bool isVar() const { return InfoType == IT_Var; }
+ bool isTmp() const { return InfoType == IT_Tmp; }
bool isTest() const {
return InfoType == IT_VarTest || InfoType == IT_BinTest;
@@ -433,15 +435,17 @@ public:
BinTest.LTest.Var, invertConsumedUnconsumed(BinTest.LTest.TestsFor),
BinTest.RTest.Var, invertConsumedUnconsumed(BinTest.RTest.TestsFor));
} else {
- return PropagationInfo();
+ return {};
}
}
};
-static inline void
+} // namespace consumed
+} // namespace clang
+
+static void
setStateForVarOrTmp(ConsumedStateMap *StateMap, const PropagationInfo &PInfo,
ConsumedState State) {
-
assert(PInfo.isVar() || PInfo.isTmp());
if (PInfo.isVar())
@@ -450,12 +454,14 @@ setStateForVarOrTmp(ConsumedStateMap *StateMap, const PropagationInfo &PInfo,
StateMap->setState(PInfo.getTmp(), State);
}
+namespace clang {
+namespace consumed {
+
class ConsumedStmtVisitor : public ConstStmtVisitor<ConsumedStmtVisitor> {
-
- typedef llvm::DenseMap<const Stmt *, PropagationInfo> MapType;
- typedef std::pair<const Stmt *, PropagationInfo> PairType;
- typedef MapType::iterator InfoEntry;
- typedef MapType::const_iterator ConstInfoEntry;
+ using MapType = llvm::DenseMap<const Stmt *, PropagationInfo>;
+ using PairType= std::pair<const Stmt *, PropagationInfo>;
+ using InfoEntry = MapType::iterator;
+ using ConstInfoEntry = MapType::const_iterator;
AnalysisDeclContext &AC;
ConsumedAnalyzer &Analyzer;
@@ -463,17 +469,19 @@ class ConsumedStmtVisitor : public ConstStmtVisitor<ConsumedStmtVisitor> {
MapType PropagationMap;
InfoEntry findInfo(const Expr *E) {
- if (auto Cleanups = dyn_cast<ExprWithCleanups>(E))
+ if (const auto Cleanups = dyn_cast<ExprWithCleanups>(E))
if (!Cleanups->cleanupsHaveSideEffects())
E = Cleanups->getSubExpr();
return PropagationMap.find(E->IgnoreParens());
}
+
ConstInfoEntry findInfo(const Expr *E) const {
- if (auto Cleanups = dyn_cast<ExprWithCleanups>(E))
+ if (const auto Cleanups = dyn_cast<ExprWithCleanups>(E))
if (!Cleanups->cleanupsHaveSideEffects())
E = Cleanups->getSubExpr();
return PropagationMap.find(E->IgnoreParens());
}
+
void insertInfo(const Expr *E, const PropagationInfo &PI) {
PropagationMap.insert(PairType(E->IgnoreParens(), PI));
}
@@ -517,7 +525,7 @@ public:
if (Entry != PropagationMap.end())
return Entry->second;
else
- return PropagationInfo();
+ return {};
}
void reset(ConsumedStateMap *NewStateMap) {
@@ -525,6 +533,8 @@ public:
}
};
+} // namespace consumed
+} // namespace clang
void ConsumedStmtVisitor::forwardInfo(const Expr *From, const Expr *To) {
InfoEntry Entry = findInfo(From);
@@ -532,7 +542,6 @@ void ConsumedStmtVisitor::forwardInfo(const Expr *From, const Expr *To) {
insertInfo(To, Entry->second);
}
-
// Create a new state for To, which is initialized to the state of From.
// If NS is not CS_None, sets the state of From to NS.
void ConsumedStmtVisitor::copyInfo(const Expr *From, const Expr *To,
@@ -548,7 +557,6 @@ void ConsumedStmtVisitor::copyInfo(const Expr *From, const Expr *To,
}
}
-
// Get the ConsumedState for From
ConsumedState ConsumedStmtVisitor::getInfo(const Expr *From) {
InfoEntry Entry = findInfo(From);
@@ -559,7 +567,6 @@ ConsumedState ConsumedStmtVisitor::getInfo(const Expr *From) {
return CS_None;
}
-
// If we already have info for To then update it, otherwise create a new entry.
void ConsumedStmtVisitor::setInfo(const Expr *To, ConsumedState NS) {
InfoEntry Entry = findInfo(To);
@@ -572,8 +579,6 @@ void ConsumedStmtVisitor::setInfo(const Expr *To, ConsumedState NS) {
}
}
-
-
void ConsumedStmtVisitor::checkCallability(const PropagationInfo &PInfo,
const FunctionDecl *FunDecl,
SourceLocation BlameLoc) {
@@ -592,7 +597,6 @@ void ConsumedStmtVisitor::checkCallability(const PropagationInfo &PInfo,
Analyzer.WarningsHandler.warnUseInInvalidState(
FunDecl->getNameAsString(), PInfo.getVar()->getNameAsString(),
stateToString(VarState), BlameLoc);
-
} else {
ConsumedState TmpState = PInfo.getAsState(StateMap);
@@ -604,7 +608,6 @@ void ConsumedStmtVisitor::checkCallability(const PropagationInfo &PInfo,
}
}
-
// Factors out common behavior for function, method, and operator calls.
// Check parameters and set parameter state if necessary.
// Returns true if the state of ObjArg is set, or false otherwise.
@@ -681,7 +684,6 @@ bool ConsumedStmtVisitor::handleCall(const CallExpr *Call, const Expr *ObjArg,
return false;
}
-
void ConsumedStmtVisitor::propagateReturnType(const Expr *Call,
const FunctionDecl *Fun) {
QualType RetType = Fun->getCallResultType();
@@ -699,7 +701,6 @@ void ConsumedStmtVisitor::propagateReturnType(const Expr *Call,
}
}
-
void ConsumedStmtVisitor::VisitBinaryOperator(const BinaryOperator *BinOp) {
switch (BinOp->getOpcode()) {
case BO_LAnd:
@@ -711,7 +712,6 @@ void ConsumedStmtVisitor::VisitBinaryOperator(const BinaryOperator *BinOp) {
if (LEntry != PropagationMap.end() && LEntry->second.isVarTest()) {
LTest = LEntry->second.getVarTest();
-
} else {
LTest.Var = nullptr;
LTest.TestsFor = CS_None;
@@ -719,7 +719,6 @@ void ConsumedStmtVisitor::VisitBinaryOperator(const BinaryOperator *BinOp) {
if (REntry != PropagationMap.end() && REntry->second.isVarTest()) {
RTest = REntry->second.getVarTest();
-
} else {
RTest.Var = nullptr;
RTest.TestsFor = CS_None;
@@ -728,7 +727,6 @@ void ConsumedStmtVisitor::VisitBinaryOperator(const BinaryOperator *BinOp) {
if (!(LTest.Var == nullptr && RTest.Var == nullptr))
PropagationMap.insert(PairType(BinOp, PropagationInfo(BinOp,
static_cast<EffectiveOp>(BinOp->getOpcode() == BO_LOr), LTest, RTest)));
-
break;
}
@@ -805,7 +803,6 @@ void ConsumedStmtVisitor::VisitCXXConstructExpr(const CXXConstructExpr *Call) {
}
}
-
void ConsumedStmtVisitor::VisitCXXMemberCallExpr(
const CXXMemberCallExpr *Call) {
CXXMethodDecl* MD = Call->getMethodDecl();
@@ -816,12 +813,9 @@ void ConsumedStmtVisitor::VisitCXXMemberCallExpr(
propagateReturnType(Call, MD);
}
-
void ConsumedStmtVisitor::VisitCXXOperatorCallExpr(
const CXXOperatorCallExpr *Call) {
-
- const FunctionDecl *FunDecl =
- dyn_cast_or_null<FunctionDecl>(Call->getDirectCallee());
+ const auto *FunDecl = dyn_cast_or_null<FunctionDecl>(Call->getDirectCallee());
if (!FunDecl) return;
if (Call->getOperator() == OO_Equal) {
@@ -831,7 +825,7 @@ void ConsumedStmtVisitor::VisitCXXOperatorCallExpr(
return;
}
- if (const CXXMemberCallExpr *MCall = dyn_cast<CXXMemberCallExpr>(Call))
+ if (const auto *MCall = dyn_cast<CXXMemberCallExpr>(Call))
handleCall(MCall, MCall->getImplicitObjectArgument(), FunDecl);
else
handleCall(Call, Call->getArg(0), FunDecl);
@@ -840,7 +834,7 @@ void ConsumedStmtVisitor::VisitCXXOperatorCallExpr(
}
void ConsumedStmtVisitor::VisitDeclRefExpr(const DeclRefExpr *DeclRef) {
- if (const VarDecl *Var = dyn_cast_or_null<VarDecl>(DeclRef->getDecl()))
+ if (const auto *Var = dyn_cast_or_null<VarDecl>(DeclRef->getDecl()))
if (StateMap->getState(Var) != consumed::CS_None)
PropagationMap.insert(PairType(DeclRef, PropagationInfo(Var)));
}
@@ -851,13 +845,12 @@ void ConsumedStmtVisitor::VisitDeclStmt(const DeclStmt *DeclS) {
VisitVarDecl(cast<VarDecl>(DI));
if (DeclS->isSingleDecl())
- if (const VarDecl *Var = dyn_cast_or_null<VarDecl>(DeclS->getSingleDecl()))
+ if (const auto *Var = dyn_cast_or_null<VarDecl>(DeclS->getSingleDecl()))
PropagationMap.insert(PairType(DeclS, PropagationInfo(Var)));
}
void ConsumedStmtVisitor::VisitMaterializeTemporaryExpr(
const MaterializeTemporaryExpr *Temp) {
-
forwardInfo(Temp->GetTemporaryExpr(), Temp);
}
@@ -865,7 +858,6 @@ void ConsumedStmtVisitor::VisitMemberExpr(const MemberExpr *MExpr) {
forwardInfo(MExpr->getBase(), MExpr);
}
-
void ConsumedStmtVisitor::VisitParmVarDecl(const ParmVarDecl *Param) {
QualType ParamType = Param->getType();
ConsumedState ParamState = consumed::CS_None;
@@ -943,10 +935,6 @@ void ConsumedStmtVisitor::VisitVarDecl(const VarDecl *Var) {
StateMap->setState(Var, consumed::CS_Unknown);
}
}
-}} // end clang::consumed::ConsumedStmtVisitor
-
-namespace clang {
-namespace consumed {
static void splitVarStateForIf(const IfStmt *IfNode, const VarTestResult &Test,
ConsumedStateMap *ThenStates,
@@ -956,10 +944,8 @@ static void splitVarStateForIf(const IfStmt *IfNode, const VarTestResult &Test,
if (VarState == CS_Unknown) {
ThenStates->setState(Test.Var, Test.TestsFor);
ElseStates->setState(Test.Var, invertConsumedUnconsumed(Test.TestsFor));
-
} else if (VarState == invertConsumedUnconsumed(Test.TestsFor)) {
ThenStates->markUnreachable();
-
} else if (VarState == Test.TestsFor) {
ElseStates->markUnreachable();
}
@@ -978,28 +964,22 @@ static void splitVarStateForIfBinOp(const PropagationInfo &PInfo,
if (PInfo.testEffectiveOp() == EO_And) {
if (LState == CS_Unknown) {
ThenStates->setState(LTest.Var, LTest.TestsFor);
-
} else if (LState == invertConsumedUnconsumed(LTest.TestsFor)) {
ThenStates->markUnreachable();
-
} else if (LState == LTest.TestsFor && isKnownState(RState)) {
if (RState == RTest.TestsFor)
ElseStates->markUnreachable();
else
ThenStates->markUnreachable();
}
-
} else {
if (LState == CS_Unknown) {
ElseStates->setState(LTest.Var,
invertConsumedUnconsumed(LTest.TestsFor));
-
} else if (LState == LTest.TestsFor) {
ElseStates->markUnreachable();
-
} else if (LState == invertConsumedUnconsumed(LTest.TestsFor) &&
isKnownState(RState)) {
-
if (RState == RTest.TestsFor)
ElseStates->markUnreachable();
else
@@ -1014,7 +994,6 @@ static void splitVarStateForIfBinOp(const PropagationInfo &PInfo,
ThenStates->setState(RTest.Var, RTest.TestsFor);
else if (RState == invertConsumedUnconsumed(RTest.TestsFor))
ThenStates->markUnreachable();
-
} else {
if (RState == CS_Unknown)
ElseStates->setState(RTest.Var,
@@ -1027,7 +1006,6 @@ static void splitVarStateForIfBinOp(const PropagationInfo &PInfo,
bool ConsumedBlockInfo::allBackEdgesVisited(const CFGBlock *CurrBlock,
const CFGBlock *TargetBlock) {
-
assert(CurrBlock && "Block pointer must not be NULL");
assert(TargetBlock && "TargetBlock pointer must not be NULL");
@@ -1043,7 +1021,6 @@ bool ConsumedBlockInfo::allBackEdgesVisited(const CFGBlock *CurrBlock,
void ConsumedBlockInfo::addInfo(
const CFGBlock *Block, ConsumedStateMap *StateMap,
std::unique_ptr<ConsumedStateMap> &OwnedStateMap) {
-
assert(Block && "Block pointer must not be NULL");
auto &Entry = StateMapsArray[Block->getBlockID()];
@@ -1058,7 +1035,6 @@ void ConsumedBlockInfo::addInfo(
void ConsumedBlockInfo::addInfo(const CFGBlock *Block,
std::unique_ptr<ConsumedStateMap> StateMap) {
-
assert(Block && "Block pointer must not be NULL");
auto &Entry = StateMapsArray[Block->getBlockID()];
@@ -1119,7 +1095,7 @@ void ConsumedStateMap::checkParamsForReturnTypestate(SourceLocation BlameLoc,
for (const auto &DM : VarMap) {
if (isa<ParmVarDecl>(DM.first)) {
- const ParmVarDecl *Param = cast<ParmVarDecl>(DM.first);
+ const auto *Param = cast<ParmVarDecl>(DM.first);
const ReturnTypestateAttr *RTA = Param->getAttr<ReturnTypestateAttr>();
if (!RTA)
@@ -1226,7 +1202,7 @@ bool ConsumedStateMap::operator!=(const ConsumedStateMap *Other) const {
void ConsumedAnalyzer::determineExpectedReturnState(AnalysisDeclContext &AC,
const FunctionDecl *D) {
QualType ReturnType;
- if (const CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(D)) {
+ if (const auto *Constructor = dyn_cast<CXXConstructorDecl>(D)) {
ASTContext &CurrContext = AC.getASTContext();
ReturnType = Constructor->getThisType(CurrContext)->getPointeeType();
} else
@@ -1256,14 +1232,12 @@ void ConsumedAnalyzer::determineExpectedReturnState(AnalysisDeclContext &AC,
bool ConsumedAnalyzer::splitState(const CFGBlock *CurrBlock,
const ConsumedStmtVisitor &Visitor) {
-
std::unique_ptr<ConsumedStateMap> FalseStates(
new ConsumedStateMap(*CurrStates));
PropagationInfo PInfo;
- if (const IfStmt *IfNode =
- dyn_cast_or_null<IfStmt>(CurrBlock->getTerminator().getStmt())) {
-
+ if (const auto *IfNode =
+ dyn_cast_or_null<IfStmt>(CurrBlock->getTerminator().getStmt())) {
const Expr *Cond = IfNode->getCond();
PInfo = Visitor.getInfo(Cond);
@@ -1275,19 +1249,15 @@ bool ConsumedAnalyzer::splitState(const CFGBlock *CurrBlock,
FalseStates->setSource(Cond);
splitVarStateForIf(IfNode, PInfo.getVarTest(), CurrStates.get(),
FalseStates.get());
-
} else if (PInfo.isBinTest()) {
CurrStates->setSource(PInfo.testSourceNode());
FalseStates->setSource(PInfo.testSourceNode());
splitVarStateForIfBinOp(PInfo, CurrStates.get(), FalseStates.get());
-
} else {
return false;
}
-
- } else if (const BinaryOperator *BinOp =
- dyn_cast_or_null<BinaryOperator>(CurrBlock->getTerminator().getStmt())) {
-
+ } else if (const auto *BinOp =
+ dyn_cast_or_null<BinaryOperator>(CurrBlock->getTerminator().getStmt())) {
PInfo = Visitor.getInfo(BinOp->getLHS());
if (!PInfo.isVarTest()) {
if ((BinOp = dyn_cast_or_null<BinaryOperator>(BinOp->getLHS()))) {
@@ -1295,7 +1265,6 @@ bool ConsumedAnalyzer::splitState(const CFGBlock *CurrBlock,
if (!PInfo.isVarTest())
return false;
-
} else {
return false;
}
@@ -1320,7 +1289,6 @@ bool ConsumedAnalyzer::splitState(const CFGBlock *CurrBlock,
else if (VarState == Test.TestsFor)
FalseStates->markUnreachable();
}
-
} else {
return false;
}
@@ -1339,7 +1307,7 @@ bool ConsumedAnalyzer::splitState(const CFGBlock *CurrBlock,
}
void ConsumedAnalyzer::run(AnalysisDeclContext &AC) {
- const FunctionDecl *D = dyn_cast_or_null<FunctionDecl>(AC.getDecl());
+ const auto *D = dyn_cast_or_null<FunctionDecl>(AC.getDecl());
if (!D)
return;
@@ -1368,7 +1336,6 @@ void ConsumedAnalyzer::run(AnalysisDeclContext &AC) {
if (!CurrStates) {
continue;
-
} else if (!CurrStates->isReachable()) {
CurrStates = nullptr;
continue;
@@ -1423,7 +1390,6 @@ void ConsumedAnalyzer::run(AnalysisDeclContext &AC) {
for (CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin(),
SE = CurrBlock->succ_end(); SI != SE; ++SI) {
-
if (*SI == nullptr) continue;
if (BlockInfo.isBackEdge(CurrBlock, *SI)) {
@@ -1452,4 +1418,3 @@ void ConsumedAnalyzer::run(AnalysisDeclContext &AC) {
WarningsHandler.emitDiagnostics();
}
-}} // end namespace clang::consumed
diff --git a/lib/Analysis/Dominators.cpp b/lib/Analysis/Dominators.cpp
index 0e02c6d7174a..1b7dd8c804e1 100644
--- a/lib/Analysis/Dominators.cpp
+++ b/lib/Analysis/Dominators.cpp
@@ -1,4 +1,4 @@
-//=- Dominators.cpp - Implementation of dominators tree for Clang CFG C++ -*-=//
+//===- Dominators.cpp - Implementation of dominators tree for Clang CFG ---===//
//
// The LLVM Compiler Infrastructure
//
@@ -11,4 +11,4 @@
using namespace clang;
-void DominatorTree::anchor() { }
+void DominatorTree::anchor() {}
diff --git a/lib/Analysis/LiveVariables.cpp b/lib/Analysis/LiveVariables.cpp
index 4752c2b020ae..b8ea1e960095 100644
--- a/lib/Analysis/LiveVariables.cpp
+++ b/lib/Analysis/LiveVariables.cpp
@@ -77,6 +77,7 @@ public:
AnalysisDeclContext &analysisContext;
llvm::ImmutableSet<const Stmt *>::Factory SSetFact;
llvm::ImmutableSet<const VarDecl *>::Factory DSetFact;
+ llvm::ImmutableSet<const BindingDecl *>::Factory BSetFact;
llvm::DenseMap<const CFGBlock *, LiveVariables::LivenessValues> blocksEndToLiveness;
llvm::DenseMap<const CFGBlock *, LiveVariables::LivenessValues> blocksBeginToLiveness;
llvm::DenseMap<const Stmt *, LiveVariables::LivenessValues> stmtsToLiveness;
@@ -97,6 +98,7 @@ public:
: analysisContext(ac),
SSetFact(false), // Do not canonicalize ImmutableSets by default.
DSetFact(false), // This is a *major* performance win.
+ BSetFact(false),
killAtAssign(KillAtAssign) {}
};
}
@@ -114,6 +116,12 @@ bool LiveVariables::LivenessValues::isLive(const Stmt *S) const {
}
bool LiveVariables::LivenessValues::isLive(const VarDecl *D) const {
+ if (const auto *DD = dyn_cast<DecompositionDecl>(D)) {
+ bool alive = false;
+ for (const BindingDecl *BD : DD->bindings())
+ alive |= liveBindings.contains(BD);
+ return alive;
+ }
return liveDecls.contains(D);
}
@@ -145,14 +153,19 @@ LiveVariablesImpl::merge(LiveVariables::LivenessValues valsA,
DSetRefA(valsA.liveDecls.getRootWithoutRetain(), DSetFact.getTreeFactory()),
DSetRefB(valsB.liveDecls.getRootWithoutRetain(), DSetFact.getTreeFactory());
+ llvm::ImmutableSetRef<const BindingDecl *>
+ BSetRefA(valsA.liveBindings.getRootWithoutRetain(), BSetFact.getTreeFactory()),
+ BSetRefB(valsB.liveBindings.getRootWithoutRetain(), BSetFact.getTreeFactory());
SSetRefA = mergeSets(SSetRefA, SSetRefB);
DSetRefA = mergeSets(DSetRefA, DSetRefB);
+ BSetRefA = mergeSets(BSetRefA, BSetRefB);
// asImmutableSet() canonicalizes the tree, allowing us to do an easy
// comparison afterwards.
return LiveVariables::LivenessValues(SSetRefA.asImmutableSet(),
- DSetRefA.asImmutableSet());
+ DSetRefA.asImmutableSet(),
+ BSetRefA.asImmutableSet());
}
bool LiveVariables::LivenessValues::equals(const LivenessValues &V) const {
@@ -322,6 +335,11 @@ void TransferFunctions::Visit(Stmt *S) {
}
}
+static bool writeShouldKill(const VarDecl *VD) {
+ return VD && !VD->getType()->isReferenceType() &&
+ !isAlwaysAlive(VD);
+}
+
void TransferFunctions::VisitBinaryOperator(BinaryOperator *B) {
if (B->isAssignmentOp()) {
if (!LV.killAtAssign)
@@ -329,21 +347,25 @@ void TransferFunctions::VisitBinaryOperator(BinaryOperator *B) {
// Assigning to a variable?
Expr *LHS = B->getLHS()->IgnoreParens();
-
- if (DeclRefExpr *DR = dyn_cast<DeclRefExpr>(LHS))
- if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
- // Assignments to references don't kill the ref's address
- if (VD->getType()->isReferenceType())
- return;
-
- if (!isAlwaysAlive(VD)) {
- // The variable is now dead.
+
+ if (DeclRefExpr *DR = dyn_cast<DeclRefExpr>(LHS)) {
+ const Decl* D = DR->getDecl();
+ bool Killed = false;
+
+ if (const BindingDecl* BD = dyn_cast<BindingDecl>(D)) {
+ Killed = !BD->getType()->isReferenceType();
+ if (Killed)
+ val.liveBindings = LV.BSetFact.remove(val.liveBindings, BD);
+ } else if (const auto *VD = dyn_cast<VarDecl>(D)) {
+ Killed = writeShouldKill(VD);
+ if (Killed)
val.liveDecls = LV.DSetFact.remove(val.liveDecls, VD);
- }
- if (observer)
- observer->observerKill(DR);
}
+
+ if (Killed && observer)
+ observer->observerKill(DR);
+ }
}
}
@@ -357,17 +379,27 @@ void TransferFunctions::VisitBlockExpr(BlockExpr *BE) {
}
void TransferFunctions::VisitDeclRefExpr(DeclRefExpr *DR) {
- if (const VarDecl *D = dyn_cast<VarDecl>(DR->getDecl()))
- if (!isAlwaysAlive(D) && LV.inAssignment.find(DR) == LV.inAssignment.end())
- val.liveDecls = LV.DSetFact.add(val.liveDecls, D);
+ const Decl* D = DR->getDecl();
+ bool InAssignment = LV.inAssignment[DR];
+ if (const auto *BD = dyn_cast<BindingDecl>(D)) {
+ if (!InAssignment)
+ val.liveBindings = LV.BSetFact.add(val.liveBindings, BD);
+ } else if (const auto *VD = dyn_cast<VarDecl>(D)) {
+ if (!InAssignment && !isAlwaysAlive(VD))
+ val.liveDecls = LV.DSetFact.add(val.liveDecls, VD);
+ }
}
void TransferFunctions::VisitDeclStmt(DeclStmt *DS) {
- for (const auto *DI : DS->decls())
- if (const auto *VD = dyn_cast<VarDecl>(DI)) {
+ for (const auto *DI : DS->decls()) {
+ if (const auto *DD = dyn_cast<DecompositionDecl>(DI)) {
+ for (const auto *BD : DD->bindings())
+ val.liveBindings = LV.BSetFact.remove(val.liveBindings, BD);
+ } else if (const auto *VD = dyn_cast<VarDecl>(DI)) {
if (!isAlwaysAlive(VD))
val.liveDecls = LV.DSetFact.remove(val.liveDecls, VD);
}
+ }
}
void TransferFunctions::VisitObjCForCollectionStmt(ObjCForCollectionStmt *OS) {
@@ -422,12 +454,14 @@ void TransferFunctions::VisitUnaryOperator(UnaryOperator *UO) {
case UO_PreDec:
break;
}
-
- if (DeclRefExpr *DR = dyn_cast<DeclRefExpr>(UO->getSubExpr()->IgnoreParens()))
- if (isa<VarDecl>(DR->getDecl())) {
+
+ if (auto *DR = dyn_cast<DeclRefExpr>(UO->getSubExpr()->IgnoreParens())) {
+ const Decl *D = DR->getDecl();
+ if (isa<VarDecl>(D) || isa<BindingDecl>(D)) {
// Treat ++/-- as a kill.
observer->observerKill(DR);
}
+ }
}
LiveVariables::LivenessValues
@@ -508,10 +542,10 @@ LiveVariables::computeLiveness(AnalysisDeclContext &AC,
for (CFGBlock::const_iterator bi = block->begin(), be = block->end();
bi != be; ++bi) {
if (Optional<CFGStmt> cs = bi->getAs<CFGStmt>()) {
- if (const BinaryOperator *BO =
- dyn_cast<BinaryOperator>(cs->getStmt())) {
+ const Stmt* stmt = cs->getStmt();
+ if (const auto *BO = dyn_cast<BinaryOperator>(stmt)) {
if (BO->getOpcode() == BO_Assign) {
- if (const DeclRefExpr *DR =
+ if (const auto *DR =
dyn_cast<DeclRefExpr>(BO->getLHS()->IgnoreParens())) {
LV->inAssignment[DR] = 1;
}
@@ -563,7 +597,7 @@ void LiveVariablesImpl::dumpBlockLiveness(const SourceManager &M) {
it != ei; ++it) {
vec.push_back(it->first);
}
- std::sort(vec.begin(), vec.end(), [](const CFGBlock *A, const CFGBlock *B) {
+ llvm::sort(vec.begin(), vec.end(), [](const CFGBlock *A, const CFGBlock *B) {
return A->getBlockID() < B->getBlockID();
});
@@ -583,7 +617,8 @@ void LiveVariablesImpl::dumpBlockLiveness(const SourceManager &M) {
declVec.push_back(*si);
}
- std::sort(declVec.begin(), declVec.end(), [](const Decl *A, const Decl *B) {
+ llvm::sort(declVec.begin(), declVec.end(),
+ [](const Decl *A, const Decl *B) {
return A->getLocStart() < B->getLocStart();
});
diff --git a/lib/Analysis/PostOrderCFGView.cpp b/lib/Analysis/PostOrderCFGView.cpp
index 5a3c8182a140..124424bf2567 100644
--- a/lib/Analysis/PostOrderCFGView.cpp
+++ b/lib/Analysis/PostOrderCFGView.cpp
@@ -1,4 +1,4 @@
-//===- PostOrderCFGView.cpp - Post order view of CFG blocks -------*- C++ --*-//
+//===- PostOrderCFGView.cpp - Post order view of CFG blocks ---------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -12,10 +12,12 @@
//===----------------------------------------------------------------------===//
#include "clang/Analysis/Analyses/PostOrderCFGView.h"
+#include "clang/Analysis/AnalysisDeclContext.h"
+#include "clang/Analysis/CFG.h"
using namespace clang;
-void PostOrderCFGView::anchor() { }
+void PostOrderCFGView::anchor() {}
PostOrderCFGView::PostOrderCFGView(const CFG *cfg) {
Blocks.reserve(cfg->getNumBlockIDs());
@@ -46,4 +48,3 @@ bool PostOrderCFGView::BlockOrderCompare::operator()(const CFGBlock *b1,
unsigned b2V = (b2It == POV.BlockOrder.end()) ? 0 : b2It->second;
return b1V > b2V;
}
-
diff --git a/lib/Analysis/PrintfFormatString.cpp b/lib/Analysis/PrintfFormatString.cpp
index dfaed26564e6..00591ab2b048 100644
--- a/lib/Analysis/PrintfFormatString.cpp
+++ b/lib/Analysis/PrintfFormatString.cpp
@@ -13,6 +13,7 @@
//===----------------------------------------------------------------------===//
#include "clang/Analysis/Analyses/FormatString.h"
+#include "clang/Analysis/Analyses/OSLog.h"
#include "FormatStringParsing.h"
#include "clang/Basic/TargetInfo.h"
@@ -119,36 +120,56 @@ static PrintfSpecifierResult ParsePrintfSpecifier(FormatStringHandler &H,
return true;
}
- const char *OSLogVisibilityFlagsStart = nullptr,
- *OSLogVisibilityFlagsEnd = nullptr;
if (*I == '{') {
- OSLogVisibilityFlagsStart = I++;
- // Find the end of the modifier.
- while (I != E && *I != '}') {
- I++;
- }
- if (I == E) {
- if (Warn)
- H.HandleIncompleteSpecifier(Start, E - Start);
- return true;
- }
- assert(*I == '}');
- OSLogVisibilityFlagsEnd = I++;
-
- // Just see if 'private' or 'public' is the first word. os_log itself will
- // do any further parsing.
- const char *P = OSLogVisibilityFlagsStart + 1;
- while (P < OSLogVisibilityFlagsEnd && isspace(*P))
- P++;
- const char *WordStart = P;
- while (P < OSLogVisibilityFlagsEnd && (isalnum(*P) || *P == '_'))
- P++;
- const char *WordEnd = P;
- StringRef Word(WordStart, WordEnd - WordStart);
- if (Word == "private") {
- FS.setIsPrivate(WordStart);
- } else if (Word == "public") {
- FS.setIsPublic(WordStart);
+ ++I;
+ unsigned char PrivacyFlags = 0;
+ StringRef MatchedStr;
+
+ do {
+ StringRef Str(I, E - I);
+ std::string Match = "^[\t\n\v\f\r ]*(private|public)[\t\n\v\f\r ]*(,|})";
+ llvm::Regex R(Match);
+ SmallVector<StringRef, 2> Matches;
+
+ if (R.match(Str, &Matches)) {
+ MatchedStr = Matches[1];
+ I += Matches[0].size();
+
+ // Set the privacy flag if the privacy annotation in the
+ // comma-delimited segment is at least as strict as the privacy
+ // annotations in previous comma-delimited segments.
+ if (MatchedStr.equals("private"))
+ PrivacyFlags = clang::analyze_os_log::OSLogBufferItem::IsPrivate;
+ else if (PrivacyFlags == 0 && MatchedStr.equals("public"))
+ PrivacyFlags = clang::analyze_os_log::OSLogBufferItem::IsPublic;
+ } else {
+ size_t CommaOrBracePos =
+ Str.find_if([](char c) { return c == ',' || c == '}'; });
+
+ if (CommaOrBracePos == StringRef::npos) {
+ // Neither a comma nor the closing brace was found.
+ if (Warn)
+ H.HandleIncompleteSpecifier(Start, E - Start);
+ return true;
+ }
+
+ I += CommaOrBracePos + 1;
+ }
+ // Continue until the closing brace is found.
+ } while (*(I - 1) == ',');
+
+ // Set the privacy flag.
+ switch (PrivacyFlags) {
+ case 0:
+ break;
+ case clang::analyze_os_log::OSLogBufferItem::IsPrivate:
+ FS.setIsPrivate(MatchedStr.data());
+ break;
+ case clang::analyze_os_log::OSLogBufferItem::IsPublic:
+ FS.setIsPublic(MatchedStr.data());
+ break;
+ default:
+ llvm_unreachable("Unexpected privacy flag value");
}
}
@@ -466,13 +487,14 @@ ArgType PrintfSpecifier::getArgType(ASTContext &Ctx,
case LengthModifier::AsIntMax:
return ArgType(Ctx.getIntMaxType(), "intmax_t");
case LengthModifier::AsSizeT:
- return ArgType(Ctx.getSignedSizeType(), "ssize_t");
+ return ArgType::makeSizeT(ArgType(Ctx.getSignedSizeType(), "ssize_t"));
case LengthModifier::AsInt3264:
return Ctx.getTargetInfo().getTriple().isArch64Bit()
? ArgType(Ctx.LongLongTy, "__int64")
: ArgType(Ctx.IntTy, "__int32");
case LengthModifier::AsPtrDiff:
- return ArgType(Ctx.getPointerDiffType(), "ptrdiff_t");
+ return ArgType::makePtrdiffT(
+ ArgType(Ctx.getPointerDiffType(), "ptrdiff_t"));
case LengthModifier::AsAllocate:
case LengthModifier::AsMAllocate:
case LengthModifier::AsWide:
@@ -499,13 +521,14 @@ ArgType PrintfSpecifier::getArgType(ASTContext &Ctx,
case LengthModifier::AsIntMax:
return ArgType(Ctx.getUIntMaxType(), "uintmax_t");
case LengthModifier::AsSizeT:
- return ArgType(Ctx.getSizeType(), "size_t");
+ return ArgType::makeSizeT(ArgType(Ctx.getSizeType(), "size_t"));
case LengthModifier::AsInt3264:
return Ctx.getTargetInfo().getTriple().isArch64Bit()
? ArgType(Ctx.UnsignedLongLongTy, "unsigned __int64")
: ArgType(Ctx.UnsignedIntTy, "unsigned __int32");
case LengthModifier::AsPtrDiff:
- return ArgType(Ctx.getUnsignedPointerDiffType(), "unsigned ptrdiff_t");
+ return ArgType::makePtrdiffT(
+ ArgType(Ctx.getUnsignedPointerDiffType(), "unsigned ptrdiff_t"));
case LengthModifier::AsAllocate:
case LengthModifier::AsMAllocate:
case LengthModifier::AsWide:
@@ -647,6 +670,7 @@ bool PrintfSpecifier::fixType(QualType QT, const LangOptions &LangOpt,
case BuiltinType::Bool:
case BuiltinType::WChar_U:
case BuiltinType::WChar_S:
+ case BuiltinType::Char8: // FIXME: Treat like 'char'?
case BuiltinType::Char16:
case BuiltinType::Char32:
case BuiltinType::UInt128:
@@ -654,6 +678,30 @@ bool PrintfSpecifier::fixType(QualType QT, const LangOptions &LangOpt,
case BuiltinType::Half:
case BuiltinType::Float16:
case BuiltinType::Float128:
+ case BuiltinType::ShortAccum:
+ case BuiltinType::Accum:
+ case BuiltinType::LongAccum:
+ case BuiltinType::UShortAccum:
+ case BuiltinType::UAccum:
+ case BuiltinType::ULongAccum:
+ case BuiltinType::ShortFract:
+ case BuiltinType::Fract:
+ case BuiltinType::LongFract:
+ case BuiltinType::UShortFract:
+ case BuiltinType::UFract:
+ case BuiltinType::ULongFract:
+ case BuiltinType::SatShortAccum:
+ case BuiltinType::SatAccum:
+ case BuiltinType::SatLongAccum:
+ case BuiltinType::SatUShortAccum:
+ case BuiltinType::SatUAccum:
+ case BuiltinType::SatULongAccum:
+ case BuiltinType::SatShortFract:
+ case BuiltinType::SatFract:
+ case BuiltinType::SatLongFract:
+ case BuiltinType::SatUShortFract:
+ case BuiltinType::SatUFract:
+ case BuiltinType::SatULongFract:
// Various types which are non-trivial to correct.
return false;
diff --git a/lib/Analysis/ReachableCode.cpp b/lib/Analysis/ReachableCode.cpp
index 7e72795a47f6..f644d503dc49 100644
--- a/lib/Analysis/ReachableCode.cpp
+++ b/lib/Analysis/ReachableCode.cpp
@@ -66,6 +66,21 @@ static bool isBuiltinUnreachable(const Stmt *S) {
return false;
}
+static bool isBuiltinAssumeFalse(const CFGBlock *B, const Stmt *S,
+ ASTContext &C) {
+ if (B->empty()) {
+ // Happens if S is B's terminator and B contains nothing else
+ // (e.g. a CFGBlock containing only a goto).
+ return false;
+ }
+ if (Optional<CFGStmt> CS = B->back().getAs<CFGStmt>()) {
+ if (const auto *CE = dyn_cast<CallExpr>(CS->getStmt())) {
+ return CE->getCallee()->IgnoreCasts() == S && CE->isBuiltinAssumeFalse(C);
+ }
+ }
+ return false;
+}
+
static bool isDeadReturn(const CFGBlock *B, const Stmt *S) {
// Look to see if the current control flow ends with a 'return', and see if
// 'S' is a substatement. The 'return' may not be the last element in the
@@ -372,6 +387,7 @@ namespace {
llvm::BitVector &Reachable;
SmallVector<const CFGBlock *, 10> WorkList;
Preprocessor &PP;
+ ASTContext &C;
typedef SmallVector<std::pair<const CFGBlock *, const Stmt *>, 12>
DeferredLocsTy;
@@ -379,10 +395,10 @@ namespace {
DeferredLocsTy DeferredLocs;
public:
- DeadCodeScan(llvm::BitVector &reachable, Preprocessor &PP)
+ DeadCodeScan(llvm::BitVector &reachable, Preprocessor &PP, ASTContext &C)
: Visited(reachable.size()),
Reachable(reachable),
- PP(PP) {}
+ PP(PP), C(C) {}
void enqueue(const CFGBlock *block);
unsigned scanBackwards(const CFGBlock *Start,
@@ -600,7 +616,8 @@ void DeadCodeScan::reportDeadCode(const CFGBlock *B,
if (isa<BreakStmt>(S)) {
UK = reachable_code::UK_Break;
- } else if (isTrivialDoWhile(B, S) || isBuiltinUnreachable(S)) {
+ } else if (isTrivialDoWhile(B, S) || isBuiltinUnreachable(S) ||
+ isBuiltinAssumeFalse(B, S, C)) {
return;
}
else if (isDeadReturn(B, S)) {
@@ -693,7 +710,7 @@ void FindUnreachableCode(AnalysisDeclContext &AC, Preprocessor &PP,
if (reachable[block->getBlockID()])
continue;
- DeadCodeScan DS(reachable, PP);
+ DeadCodeScan DS(reachable, PP, AC.getASTContext());
numReachable += DS.scanBackwards(block, CB);
if (numReachable == cfg->getNumBlockIDs())
diff --git a/lib/Analysis/ThreadSafety.cpp b/lib/Analysis/ThreadSafety.cpp
index 6a9c9a04c55d..03cc234dce5c 100644
--- a/lib/Analysis/ThreadSafety.cpp
+++ b/lib/Analysis/ThreadSafety.cpp
@@ -1,4 +1,4 @@
-//===- ThreadSafety.cpp ----------------------------------------*- C++ --*-===//
+//===- ThreadSafety.cpp ---------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -17,41 +17,59 @@
#include "clang/Analysis/Analyses/ThreadSafety.h"
#include "clang/AST/Attr.h"
+#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclGroup.h"
+#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
-#include "clang/AST/StmtCXX.h"
+#include "clang/AST/OperationKinds.h"
+#include "clang/AST/Stmt.h"
#include "clang/AST/StmtVisitor.h"
+#include "clang/AST/Type.h"
#include "clang/Analysis/Analyses/PostOrderCFGView.h"
#include "clang/Analysis/Analyses/ThreadSafetyCommon.h"
-#include "clang/Analysis/Analyses/ThreadSafetyLogical.h"
#include "clang/Analysis/Analyses/ThreadSafetyTIL.h"
#include "clang/Analysis/Analyses/ThreadSafetyTraverse.h"
+#include "clang/Analysis/Analyses/ThreadSafetyUtil.h"
#include "clang/Analysis/AnalysisDeclContext.h"
#include "clang/Analysis/CFG.h"
-#include "clang/Analysis/CFGStmtMap.h"
+#include "clang/Basic/LLVM.h"
#include "clang/Basic/OperatorKinds.h"
#include "clang/Basic/SourceLocation.h"
-#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/Specifiers.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/ImmutableMap.h"
-#include "llvm/ADT/PostOrderIterator.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
-#include <ostream>
-#include <sstream>
+#include <cassert>
+#include <functional>
+#include <iterator>
+#include <memory>
+#include <string>
+#include <type_traits>
#include <utility>
#include <vector>
+
using namespace clang;
using namespace threadSafety;
// Key method definition
-ThreadSafetyHandler::~ThreadSafetyHandler() {}
+ThreadSafetyHandler::~ThreadSafetyHandler() = default;
namespace {
+
class TILPrinter :
- public til::PrettyPrinter<TILPrinter, llvm::raw_ostream> {};
+ public til::PrettyPrinter<TILPrinter, llvm::raw_ostream> {};
+} // namespace
/// Issue a warning about an invalid lock expression
static void warnInvalidLock(ThreadSafetyHandler &Handler,
@@ -66,11 +84,13 @@ static void warnInvalidLock(ThreadSafetyHandler &Handler,
Handler.handleInvalidLockExp(Kind, Loc);
}
-/// \brief A set of CapabilityInfo objects, which are compiled from the
+namespace {
+
+/// A set of CapabilityInfo objects, which are compiled from the
/// requires attributes on a function.
class CapExprSet : public SmallVector<CapabilityExpr, 4> {
public:
- /// \brief Push M onto list, but discard duplicates.
+ /// Push M onto list, but discard duplicates.
void push_back_nodup(const CapabilityExpr &CapE) {
iterator It = std::find_if(begin(), end(),
[=](const CapabilityExpr &CapE2) {
@@ -84,33 +104,37 @@ public:
class FactManager;
class FactSet;
-/// \brief This is a helper class that stores a fact that is known at a
+/// This is a helper class that stores a fact that is known at a
/// particular point in program execution. Currently, a fact is a capability,
/// along with additional information, such as where it was acquired, whether
/// it is exclusive or shared, etc.
///
-/// FIXME: this analysis does not currently support either re-entrant
-/// locking or lock "upgrading" and "downgrading" between exclusive and
-/// shared.
+/// FIXME: this analysis does not currently support re-entrant locking.
class FactEntry : public CapabilityExpr {
private:
- LockKind LKind; ///< exclusive or shared
- SourceLocation AcquireLoc; ///< where it was acquired.
- bool Asserted; ///< true if the lock was asserted
- bool Declared; ///< true if the lock was declared
+ /// Exclusive or shared.
+ LockKind LKind;
+
+ /// Where it was acquired.
+ SourceLocation AcquireLoc;
+
+ /// True if the lock was asserted.
+ bool Asserted;
+
+ /// True if the lock was declared.
+ bool Declared;
public:
FactEntry(const CapabilityExpr &CE, LockKind LK, SourceLocation Loc,
bool Asrt, bool Declrd = false)
: CapabilityExpr(CE), LKind(LK), AcquireLoc(Loc), Asserted(Asrt),
Declared(Declrd) {}
+ virtual ~FactEntry() = default;
- virtual ~FactEntry() {}
-
- LockKind kind() const { return LKind; }
- SourceLocation loc() const { return AcquireLoc; }
- bool asserted() const { return Asserted; }
- bool declared() const { return Declared; }
+ LockKind kind() const { return LKind; }
+ SourceLocation loc() const { return AcquireLoc; }
+ bool asserted() const { return Asserted; }
+ bool declared() const { return Declared; }
void setDeclared(bool D) { Declared = D; }
@@ -129,10 +153,9 @@ public:
}
};
+using FactID = unsigned short;
-typedef unsigned short FactID;
-
-/// \brief FactManager manages the memory for all facts that are created during
+/// FactManager manages the memory for all facts that are created during
/// the analysis of a single routine.
class FactManager {
private:
@@ -148,8 +171,7 @@ public:
FactEntry &operator[](FactID F) { return *Facts[F]; }
};
-
-/// \brief A FactSet is the set of facts that are known to be true at a
+/// A FactSet is the set of facts that are known to be true at a
/// particular program point. FactSets must be small, because they are
/// frequently copied, and are thus implemented as a set of indices into a
/// table maintained by a FactManager. A typical FactSet only holds 1 or 2
@@ -158,25 +180,25 @@ public:
/// may involve partial pattern matches, rather than exact matches.
class FactSet {
private:
- typedef SmallVector<FactID, 4> FactVec;
+ using FactVec = SmallVector<FactID, 4>;
FactVec FactIDs;
public:
- typedef FactVec::iterator iterator;
- typedef FactVec::const_iterator const_iterator;
+ using iterator = FactVec::iterator;
+ using const_iterator = FactVec::const_iterator;
- iterator begin() { return FactIDs.begin(); }
+ iterator begin() { return FactIDs.begin(); }
const_iterator begin() const { return FactIDs.begin(); }
- iterator end() { return FactIDs.end(); }
+ iterator end() { return FactIDs.end(); }
const_iterator end() const { return FactIDs.end(); }
bool isEmpty() const { return FactIDs.size() == 0; }
// Return true if the set contains only negative facts
bool isEmpty(FactManager &FactMan) const {
- for (FactID FID : *this) {
+ for (const auto FID : *this) {
if (!FactMan[FID].negative())
return false;
}
@@ -247,28 +269,30 @@ public:
};
class ThreadSafetyAnalyzer;
+
} // namespace
namespace clang {
namespace threadSafety {
+
class BeforeSet {
private:
- typedef SmallVector<const ValueDecl*, 4> BeforeVect;
+ using BeforeVect = SmallVector<const ValueDecl *, 4>;
struct BeforeInfo {
- BeforeInfo() : Visited(0) {}
- BeforeInfo(BeforeInfo &&) = default;
-
BeforeVect Vect;
- int Visited;
+ int Visited = 0;
+
+ BeforeInfo() = default;
+ BeforeInfo(BeforeInfo &&) = default;
};
- typedef llvm::DenseMap<const ValueDecl *, std::unique_ptr<BeforeInfo>>
- BeforeMap;
- typedef llvm::DenseMap<const ValueDecl*, bool> CycleMap;
+ using BeforeMap =
+ llvm::DenseMap<const ValueDecl *, std::unique_ptr<BeforeInfo>>;
+ using CycleMap = llvm::DenseMap<const ValueDecl *, bool>;
public:
- BeforeSet() { }
+ BeforeSet() = default;
BeforeInfo* insertAttrExprs(const ValueDecl* Vd,
ThreadSafetyAnalyzer& Analyzer);
@@ -283,15 +307,18 @@ public:
private:
BeforeMap BMap;
- CycleMap CycMap;
+ CycleMap CycMap;
};
-} // end namespace threadSafety
-} // end namespace clang
+
+} // namespace threadSafety
+} // namespace clang
namespace {
-typedef llvm::ImmutableMap<const NamedDecl*, unsigned> LocalVarContext;
+
class LocalVariableMap;
+using LocalVarContext = llvm::ImmutableMap<const NamedDecl *, unsigned>;
+
/// A side (entry or exit) of a CFG node.
enum CFGBlockSide { CBS_Entry, CBS_Exit };
@@ -299,33 +326,46 @@ enum CFGBlockSide { CBS_Entry, CBS_Exit };
/// maintained for each block in the CFG. See LocalVariableMap for more
/// information about the contexts.
struct CFGBlockInfo {
- FactSet EntrySet; // Lockset held at entry to block
- FactSet ExitSet; // Lockset held at exit from block
- LocalVarContext EntryContext; // Context held at entry to block
- LocalVarContext ExitContext; // Context held at exit from block
- SourceLocation EntryLoc; // Location of first statement in block
- SourceLocation ExitLoc; // Location of last statement in block.
- unsigned EntryIndex; // Used to replay contexts later
- bool Reachable; // Is this block reachable?
+ // Lockset held at entry to block
+ FactSet EntrySet;
+
+ // Lockset held at exit from block
+ FactSet ExitSet;
+
+ // Context held at entry to block
+ LocalVarContext EntryContext;
+
+ // Context held at exit from block
+ LocalVarContext ExitContext;
+
+ // Location of first statement in block
+ SourceLocation EntryLoc;
+
+ // Location of last statement in block.
+ SourceLocation ExitLoc;
+
+ // Used to replay contexts later
+ unsigned EntryIndex;
+
+ // Is this block reachable?
+ bool Reachable = false;
const FactSet &getSet(CFGBlockSide Side) const {
return Side == CBS_Entry ? EntrySet : ExitSet;
}
+
SourceLocation getLocation(CFGBlockSide Side) const {
return Side == CBS_Entry ? EntryLoc : ExitLoc;
}
private:
CFGBlockInfo(LocalVarContext EmptyCtx)
- : EntryContext(EmptyCtx), ExitContext(EmptyCtx), Reachable(false)
- { }
+ : EntryContext(EmptyCtx), ExitContext(EmptyCtx) {}
public:
static CFGBlockInfo getEmptyBlockInfo(LocalVariableMap &M);
};
-
-
// A LocalVariableMap maintains a map from local variables to their currently
// valid definitions. It provides SSA-like functionality when traversing the
// CFG. Like SSA, each definition or assignment to a variable is assigned a
@@ -341,7 +381,7 @@ public:
// that Context to look up the definitions of variables.
class LocalVariableMap {
public:
- typedef LocalVarContext Context;
+ using Context = LocalVarContext;
/// A VarDefinition consists of an expression, representing the value of the
/// variable, along with the context in which that expression should be
@@ -351,30 +391,35 @@ public:
public:
friend class LocalVariableMap;
- const NamedDecl *Dec; // The original declaration for this variable.
- const Expr *Exp; // The expression for this variable, OR
- unsigned Ref; // Reference to another VarDefinition
- Context Ctx; // The map with which Exp should be interpreted.
+ // The original declaration for this variable.
+ const NamedDecl *Dec;
+
+ // The expression for this variable, OR
+ const Expr *Exp = nullptr;
+
+ // Reference to another VarDefinition
+ unsigned Ref = 0;
+
+ // The map with which Exp should be interpreted.
+ Context Ctx;
bool isReference() { return !Exp; }
private:
// Create ordinary variable definition
VarDefinition(const NamedDecl *D, const Expr *E, Context C)
- : Dec(D), Exp(E), Ref(0), Ctx(C)
- { }
+ : Dec(D), Exp(E), Ctx(C) {}
// Create reference to previous definition
VarDefinition(const NamedDecl *D, unsigned R, Context C)
- : Dec(D), Exp(nullptr), Ref(R), Ctx(C)
- { }
+ : Dec(D), Ref(R), Ctx(C) {}
};
private:
Context::Factory ContextFactory;
std::vector<VarDefinition> VarDefinitions;
std::vector<unsigned> CtxIndices;
- std::vector<std::pair<Stmt*, Context> > SavedContexts;
+ std::vector<std::pair<Stmt *, Context>> SavedContexts;
public:
LocalVariableMap() {
@@ -471,12 +516,14 @@ public:
std::vector<CFGBlockInfo> &BlockInfo);
protected:
+ friend class VarMapBuilder;
+
// Get the current context index
unsigned getContextIndex() { return SavedContexts.size()-1; }
// Save the current context for later replay
void saveContext(Stmt *S, Context C) {
- SavedContexts.push_back(std::make_pair(S,C));
+ SavedContexts.push_back(std::make_pair(S, C));
}
// Adds a new definition to the given context, and returns a new context.
@@ -533,16 +580,16 @@ protected:
Context intersectContexts(Context C1, Context C2);
Context createReferenceContext(Context C);
void intersectBackEdge(Context C1, Context C2);
-
- friend class VarMapBuilder;
};
+} // namespace
// This has to be defined after LocalVariableMap.
CFGBlockInfo CFGBlockInfo::getEmptyBlockInfo(LocalVariableMap &M) {
return CFGBlockInfo(M.getEmptyContext());
}
+namespace {
/// Visitor which builds a LocalVariableMap
class VarMapBuilder : public StmtVisitor<VarMapBuilder> {
@@ -551,12 +598,13 @@ public:
LocalVariableMap::Context Ctx;
VarMapBuilder(LocalVariableMap *VM, LocalVariableMap::Context C)
- : VMap(VM), Ctx(C) {}
+ : VMap(VM), Ctx(C) {}
void VisitDeclStmt(DeclStmt *S);
void VisitBinaryOperator(BinaryOperator *BO);
};
+} // namespace
// Add new local variables to the variable map
void VarMapBuilder::VisitDeclStmt(DeclStmt *S) {
@@ -586,8 +634,8 @@ void VarMapBuilder::VisitBinaryOperator(BinaryOperator *BO) {
Expr *LHSExp = BO->getLHS()->IgnoreParenCasts();
// Update the variable map and current context.
- if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(LHSExp)) {
- ValueDecl *VDec = DRE->getDecl();
+ if (const auto *DRE = dyn_cast<DeclRefExpr>(LHSExp)) {
+ const ValueDecl *VDec = DRE->getDecl();
if (Ctx.lookup(VDec)) {
if (BO->getOpcode() == BO_Assign)
Ctx = VMap->updateDefinition(VDec, BO->getRHS(), Ctx);
@@ -599,7 +647,6 @@ void VarMapBuilder::VisitBinaryOperator(BinaryOperator *BO) {
}
}
-
// Computes the intersection of two contexts. The intersection is the
// set of variables which have the same definition in both contexts;
// variables with different definitions are discarded.
@@ -642,7 +689,6 @@ void LocalVariableMap::intersectBackEdge(Context C1, Context C2) {
}
}
-
// Traverse the CFG in topological order, so all predecessors of a block
// (excluding back-edges) are visited before the block itself. At
// each point in the code, we calculate a Context, which holds the set of
@@ -680,7 +726,6 @@ void LocalVariableMap::intersectBackEdge(Context C1, Context C2) {
// while (b) { x -> x2, y -> y1 | [1st:] x2=x1; [2nd:] x2=NULL; }
// x = x+1; { x -> x3, y -> y1 | x3 = x2 + 1, ... }
// ... { y -> y1 | x3 = 2, x2 = 1, ... }
-//
void LocalVariableMap::traverseCFG(CFG *CFGraph,
const PostOrderCFGView *SortedGraph,
std::vector<CFGBlockInfo> &BlockInfo) {
@@ -731,12 +776,11 @@ void LocalVariableMap::traverseCFG(CFG *CFGraph,
// Visit all the statements in the basic block.
VarMapBuilder VMapBuilder(this, CurrBlockInfo->EntryContext);
- for (CFGBlock::const_iterator BI = CurrBlock->begin(),
- BE = CurrBlock->end(); BI != BE; ++BI) {
- switch (BI->getKind()) {
+ for (const auto &BI : *CurrBlock) {
+ switch (BI.getKind()) {
case CFGElement::Statement: {
- CFGStmt CS = BI->castAs<CFGStmt>();
- VMapBuilder.Visit(const_cast<Stmt*>(CS.getStmt()));
+ CFGStmt CS = BI.castAs<CFGStmt>();
+ VMapBuilder.Visit(const_cast<Stmt *>(CS.getStmt()));
break;
}
default:
@@ -790,10 +834,9 @@ static void findBlockLocations(CFG *CFGraph,
if (CurrBlockInfo->ExitLoc.isValid()) {
// This block contains at least one statement. Find the source location
// of the first statement in the block.
- for (CFGBlock::const_iterator BI = CurrBlock->begin(),
- BE = CurrBlock->end(); BI != BE; ++BI) {
+ for (const auto &BI : *CurrBlock) {
// FIXME: Handle other CFGElement kinds.
- if (Optional<CFGStmt> CS = BI->getAs<CFGStmt>()) {
+ if (Optional<CFGStmt> CS = BI.getAs<CFGStmt>()) {
CurrBlockInfo->EntryLoc = CS->getStmt()->getLocStart();
break;
}
@@ -808,9 +851,12 @@ static void findBlockLocations(CFG *CFGraph,
}
}
+namespace {
+
class LockableFactEntry : public FactEntry {
private:
- bool Managed; ///< managed by ScopedLockable object
+ /// managed by ScopedLockable object
+ bool Managed;
public:
LockableFactEntry(const CapabilityExpr &CE, LockKind LK, SourceLocation Loc,
@@ -857,7 +903,7 @@ public:
handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan,
SourceLocation JoinLoc, LockErrorKind LEK,
ThreadSafetyHandler &Handler) const override {
- for (const til::SExpr *UnderlyingMutex : UnderlyingMutexes) {
+ for (const auto *UnderlyingMutex : UnderlyingMutexes) {
if (FSet.findLock(FactMan, CapabilityExpr(UnderlyingMutex, false))) {
// If this scoped lock manages another mutex, and if the underlying
// mutex is still held, then warn about the underlying mutex.
@@ -872,7 +918,7 @@ public:
bool FullyRemove, ThreadSafetyHandler &Handler,
StringRef DiagKind) const override {
assert(!Cp.negative() && "Managing object cannot be negative.");
- for (const til::SExpr *UnderlyingMutex : UnderlyingMutexes) {
+ for (const auto *UnderlyingMutex : UnderlyingMutexes) {
CapabilityExpr UnderCp(UnderlyingMutex, false);
auto UnderEntry = llvm::make_unique<LockableFactEntry>(
!UnderCp, LK_Exclusive, UnlockLoc);
@@ -900,7 +946,7 @@ public:
}
};
-/// \brief Class which implements the core thread safety analysis routines.
+/// Class which implements the core thread safety analysis routines.
class ThreadSafetyAnalyzer {
friend class BuildLockset;
friend class threadSafety::BeforeSet;
@@ -909,17 +955,17 @@ class ThreadSafetyAnalyzer {
threadSafety::til::MemRegionRef Arena;
threadSafety::SExprBuilder SxBuilder;
- ThreadSafetyHandler &Handler;
- const CXXMethodDecl *CurrentMethod;
- LocalVariableMap LocalVarMap;
- FactManager FactMan;
+ ThreadSafetyHandler &Handler;
+ const CXXMethodDecl *CurrentMethod;
+ LocalVariableMap LocalVarMap;
+ FactManager FactMan;
std::vector<CFGBlockInfo> BlockInfo;
- BeforeSet* GlobalBeforeSet;
+ BeforeSet *GlobalBeforeSet;
public:
ThreadSafetyAnalyzer(ThreadSafetyHandler &H, BeforeSet* Bset)
- : Arena(&Bpa), SxBuilder(Arena), Handler(H), GlobalBeforeSet(Bset) {}
+ : Arena(&Bpa), SxBuilder(Arena), Handler(H), GlobalBeforeSet(Bset) {}
bool inCurrentScope(const CapabilityExpr &CapE);
@@ -959,6 +1005,7 @@ public:
void runAnalysis(AnalysisDeclContext &AC);
};
+
} // namespace
/// Process acquired_before and acquired_after attributes on Vd.
@@ -975,10 +1022,10 @@ BeforeSet::BeforeInfo* BeforeSet::insertAttrExprs(const ValueDecl* Vd,
Info = InfoPtr.get();
}
- for (Attr* At : Vd->attrs()) {
+ for (const auto *At : Vd->attrs()) {
switch (At->getKind()) {
case attr::AcquiredBefore: {
- auto *A = cast<AcquiredBeforeAttr>(At);
+ const auto *A = cast<AcquiredBeforeAttr>(At);
// Read exprs from the attribute, and add them to BeforeVect.
for (const auto *Arg : A->args()) {
@@ -986,7 +1033,7 @@ BeforeSet::BeforeInfo* BeforeSet::insertAttrExprs(const ValueDecl* Vd,
Analyzer.SxBuilder.translateAttrExpr(Arg, nullptr);
if (const ValueDecl *Cpvd = Cp.valueDecl()) {
Info->Vect.push_back(Cpvd);
- auto It = BMap.find(Cpvd);
+ const auto It = BMap.find(Cpvd);
if (It == BMap.end())
insertAttrExprs(Cpvd, Analyzer);
}
@@ -994,7 +1041,7 @@ BeforeSet::BeforeInfo* BeforeSet::insertAttrExprs(const ValueDecl* Vd,
break;
}
case attr::AcquiredAfter: {
- auto *A = cast<AcquiredAfterAttr>(At);
+ const auto *A = cast<AcquiredAfterAttr>(At);
// Read exprs from the attribute, and add them to BeforeVect.
for (const auto *Arg : A->args()) {
@@ -1055,7 +1102,7 @@ void BeforeSet::checkBeforeAfter(const ValueDecl* StartVd,
InfoVect.push_back(Info);
Info->Visited = 1;
- for (auto *Vdb : Info->Vect) {
+ for (const auto *Vdb : Info->Vect) {
// Exclude mutexes in our immediate before set.
if (FSet.containsMutexDecl(Analyzer.FactMan, Vdb)) {
StringRef L1 = StartVd->getName();
@@ -1077,13 +1124,11 @@ void BeforeSet::checkBeforeAfter(const ValueDecl* StartVd,
traverse(StartVd);
- for (auto* Info : InfoVect)
+ for (auto *Info : InfoVect)
Info->Visited = 0;
}
-
-
-/// \brief Gets the value decl pointer from DeclRefExprs or MemberExprs.
+/// Gets the value decl pointer from DeclRefExprs or MemberExprs.
static const ValueDecl *getValueDecl(const Expr *Exp) {
if (const auto *CE = dyn_cast<ImplicitCastExpr>(Exp))
return getValueDecl(CE->getSubExpr());
@@ -1098,10 +1143,11 @@ static const ValueDecl *getValueDecl(const Expr *Exp) {
}
namespace {
+
template <typename Ty>
class has_arg_iterator_range {
- typedef char yes[1];
- typedef char no[2];
+ using yes = char[1];
+ using no = char[2];
template <typename Inner>
static yes& test(Inner *I, decltype(I->args()) * = nullptr);
@@ -1112,6 +1158,7 @@ class has_arg_iterator_range {
public:
static const bool value = sizeof(test<Ty>(nullptr)) == sizeof(yes);
};
+
} // namespace
static StringRef ClassifyDiagnostic(const CapabilityAttr *A) {
@@ -1163,20 +1210,18 @@ ClassifyDiagnostic(const AttrTy *A) {
return "mutex";
}
-
-inline bool ThreadSafetyAnalyzer::inCurrentScope(const CapabilityExpr &CapE) {
+bool ThreadSafetyAnalyzer::inCurrentScope(const CapabilityExpr &CapE) {
if (!CurrentMethod)
return false;
- if (auto *P = dyn_cast_or_null<til::Project>(CapE.sexpr())) {
- auto *VD = P->clangDecl();
+ if (const auto *P = dyn_cast_or_null<til::Project>(CapE.sexpr())) {
+ const auto *VD = P->clangDecl();
if (VD)
return VD->getDeclContext() == CurrentMethod->getDeclContext();
}
return false;
}
-
-/// \brief Add a new lock to the lockset, warning if the lock is already there.
+/// Add a new lock to the lockset, warning if the lock is already there.
/// \param ReqAttr -- true if this is part of an initial Requires attribute.
void ThreadSafetyAnalyzer::addLock(FactSet &FSet,
std::unique_ptr<FactEntry> Entry,
@@ -1214,8 +1259,7 @@ void ThreadSafetyAnalyzer::addLock(FactSet &FSet,
}
}
-
-/// \brief Remove a lock from the lockset, warning if the lock is not there.
+/// Remove a lock from the lockset, warning if the lock is not there.
/// \param UnlockLoc The source location of the unlock (only used in error msg)
void ThreadSafetyAnalyzer::removeLock(FactSet &FSet, const CapabilityExpr &Cp,
SourceLocation UnlockLoc,
@@ -1241,8 +1285,7 @@ void ThreadSafetyAnalyzer::removeLock(FactSet &FSet, const CapabilityExpr &Cp,
DiagKind);
}
-
-/// \brief Extract the list of mutexIDs from the attribute on an expression,
+/// Extract the list of mutexIDs from the attribute on an expression,
/// and push them onto Mtxs, discarding any duplicates.
template <typename AttrType>
void ThreadSafetyAnalyzer::getMutexIDs(CapExprSet &Mtxs, AttrType *Attr,
@@ -1273,8 +1316,7 @@ void ThreadSafetyAnalyzer::getMutexIDs(CapExprSet &Mtxs, AttrType *Attr,
}
}
-
-/// \brief Extract the list of mutexIDs from a trylock attribute. If the
+/// Extract the list of mutexIDs from a trylock attribute. If the
/// trylock applies to the given edge, then push them onto Mtxs, discarding
/// any duplicates.
template <class AttrType>
@@ -1285,9 +1327,9 @@ void ThreadSafetyAnalyzer::getMutexIDs(CapExprSet &Mtxs, AttrType *Attr,
Expr *BrE, bool Neg) {
// Find out which branch has the lock
bool branch = false;
- if (CXXBoolLiteralExpr *BLE = dyn_cast_or_null<CXXBoolLiteralExpr>(BrE))
+ if (const auto *BLE = dyn_cast_or_null<CXXBoolLiteralExpr>(BrE))
branch = BLE->getValue();
- else if (IntegerLiteral *ILE = dyn_cast_or_null<IntegerLiteral>(BrE))
+ else if (const auto *ILE = dyn_cast_or_null<IntegerLiteral>(BrE))
branch = ILE->getValue().getBoolValue();
int branchnum = branch ? 0 : 1;
@@ -1307,19 +1349,17 @@ static bool getStaticBooleanValue(Expr *E, bool &TCond) {
if (isa<CXXNullPtrLiteralExpr>(E) || isa<GNUNullExpr>(E)) {
TCond = false;
return true;
- } else if (CXXBoolLiteralExpr *BLE = dyn_cast<CXXBoolLiteralExpr>(E)) {
+ } else if (const auto *BLE = dyn_cast<CXXBoolLiteralExpr>(E)) {
TCond = BLE->getValue();
return true;
- } else if (IntegerLiteral *ILE = dyn_cast<IntegerLiteral>(E)) {
+ } else if (const auto *ILE = dyn_cast<IntegerLiteral>(E)) {
TCond = ILE->getValue().getBoolValue();
return true;
- } else if (ImplicitCastExpr *CE = dyn_cast<ImplicitCastExpr>(E)) {
+ } else if (auto *CE = dyn_cast<ImplicitCastExpr>(E))
return getStaticBooleanValue(CE->getSubExpr(), TCond);
- }
return false;
}
-
// If Cond can be traced back to a function call, return the call expression.
// The negate variable should be called with false, and will be set to true
// if the function call is negated, e.g. if (!mu.tryLock(...))
@@ -1329,30 +1369,26 @@ const CallExpr* ThreadSafetyAnalyzer::getTrylockCallExpr(const Stmt *Cond,
if (!Cond)
return nullptr;
- if (const CallExpr *CallExp = dyn_cast<CallExpr>(Cond)) {
+ if (const auto *CallExp = dyn_cast<CallExpr>(Cond))
return CallExp;
- }
- else if (const ParenExpr *PE = dyn_cast<ParenExpr>(Cond)) {
+ else if (const auto *PE = dyn_cast<ParenExpr>(Cond))
return getTrylockCallExpr(PE->getSubExpr(), C, Negate);
- }
- else if (const ImplicitCastExpr *CE = dyn_cast<ImplicitCastExpr>(Cond)) {
+ else if (const auto *CE = dyn_cast<ImplicitCastExpr>(Cond))
return getTrylockCallExpr(CE->getSubExpr(), C, Negate);
- }
- else if (const ExprWithCleanups* EWC = dyn_cast<ExprWithCleanups>(Cond)) {
+ else if (const auto *EWC = dyn_cast<ExprWithCleanups>(Cond))
return getTrylockCallExpr(EWC->getSubExpr(), C, Negate);
- }
- else if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Cond)) {
+ else if (const auto *DRE = dyn_cast<DeclRefExpr>(Cond)) {
const Expr *E = LocalVarMap.lookupExpr(DRE->getDecl(), C);
return getTrylockCallExpr(E, C, Negate);
}
- else if (const UnaryOperator *UOP = dyn_cast<UnaryOperator>(Cond)) {
+ else if (const auto *UOP = dyn_cast<UnaryOperator>(Cond)) {
if (UOP->getOpcode() == UO_LNot) {
Negate = !Negate;
return getTrylockCallExpr(UOP->getSubExpr(), C, Negate);
}
return nullptr;
}
- else if (const BinaryOperator *BOP = dyn_cast<BinaryOperator>(Cond)) {
+ else if (const auto *BOP = dyn_cast<BinaryOperator>(Cond)) {
if (BOP->getOpcode() == BO_EQ || BOP->getOpcode() == BO_NE) {
if (BOP->getOpcode() == BO_NE)
Negate = !Negate;
@@ -1373,16 +1409,14 @@ const CallExpr* ThreadSafetyAnalyzer::getTrylockCallExpr(const Stmt *Cond,
// LHS must have been evaluated in a different block.
return getTrylockCallExpr(BOP->getRHS(), C, Negate);
}
- if (BOP->getOpcode() == BO_LOr) {
+ if (BOP->getOpcode() == BO_LOr)
return getTrylockCallExpr(BOP->getRHS(), C, Negate);
- }
return nullptr;
}
return nullptr;
}
-
-/// \brief Find the lockset that holds on the edge between PredBlock
+/// Find the lockset that holds on the edge between PredBlock
/// and CurrBlock. The edge set is the exit set of PredBlock (passed
/// as the ExitSet parameter) plus any trylocks, which are conditionally held.
void ThreadSafetyAnalyzer::getEdgeLockset(FactSet& Result,
@@ -1400,12 +1434,11 @@ void ThreadSafetyAnalyzer::getEdgeLockset(FactSet& Result,
const LocalVarContext &LVarCtx = PredBlockInfo->ExitContext;
StringRef CapDiagKind = "mutex";
- CallExpr *Exp =
- const_cast<CallExpr*>(getTrylockCallExpr(Cond, LVarCtx, Negate));
+ auto *Exp = const_cast<CallExpr *>(getTrylockCallExpr(Cond, LVarCtx, Negate));
if (!Exp)
return;
- NamedDecl *FunDecl = dyn_cast_or_null<NamedDecl>(Exp->getCalleeDecl());
+ auto *FunDecl = dyn_cast_or_null<NamedDecl>(Exp->getCalleeDecl());
if(!FunDecl || !FunDecl->hasAttrs())
return;
@@ -1413,19 +1446,25 @@ void ThreadSafetyAnalyzer::getEdgeLockset(FactSet& Result,
CapExprSet SharedLocksToAdd;
// If the condition is a call to a Trylock function, then grab the attributes
- for (auto *Attr : FunDecl->attrs()) {
+ for (const auto *Attr : FunDecl->attrs()) {
switch (Attr->getKind()) {
+ case attr::TryAcquireCapability: {
+ auto *A = cast<TryAcquireCapabilityAttr>(Attr);
+ getMutexIDs(A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, A,
+ Exp, FunDecl, PredBlock, CurrBlock, A->getSuccessValue(),
+ Negate);
+ CapDiagKind = ClassifyDiagnostic(A);
+ break;
+ };
case attr::ExclusiveTrylockFunction: {
- ExclusiveTrylockFunctionAttr *A =
- cast<ExclusiveTrylockFunctionAttr>(Attr);
+ const auto *A = cast<ExclusiveTrylockFunctionAttr>(Attr);
getMutexIDs(ExclusiveLocksToAdd, A, Exp, FunDecl,
PredBlock, CurrBlock, A->getSuccessValue(), Negate);
CapDiagKind = ClassifyDiagnostic(A);
break;
}
case attr::SharedTrylockFunction: {
- SharedTrylockFunctionAttr *A =
- cast<SharedTrylockFunctionAttr>(Attr);
+ const auto *A = cast<SharedTrylockFunctionAttr>(Attr);
getMutexIDs(SharedLocksToAdd, A, Exp, FunDecl,
PredBlock, CurrBlock, A->getSuccessValue(), Negate);
CapDiagKind = ClassifyDiagnostic(A);
@@ -1449,7 +1488,8 @@ void ThreadSafetyAnalyzer::getEdgeLockset(FactSet& Result,
}
namespace {
-/// \brief We use this class to visit different types of expressions in
+
+/// We use this class to visit different types of expressions in
/// CFGBlocks, and build up the lockset.
/// An expression may cause us to add or remove locks from the lockset, or else
/// output error messages related to missing locks.
@@ -1478,12 +1518,8 @@ class BuildLockset : public StmtVisitor<BuildLockset> {
public:
BuildLockset(ThreadSafetyAnalyzer *Anlzr, CFGBlockInfo &Info)
- : StmtVisitor<BuildLockset>(),
- Analyzer(Anlzr),
- FSet(Info.EntrySet),
- LVarCtx(Info.EntryContext),
- CtxIndex(Info.EntryIndex)
- {}
+ : StmtVisitor<BuildLockset>(), Analyzer(Anlzr), FSet(Info.EntrySet),
+ LVarCtx(Info.EntryContext), CtxIndex(Info.EntryIndex) {}
void VisitUnaryOperator(UnaryOperator *UO);
void VisitBinaryOperator(BinaryOperator *BO);
@@ -1492,9 +1528,10 @@ public:
void VisitCXXConstructExpr(CXXConstructExpr *Exp);
void VisitDeclStmt(DeclStmt *S);
};
+
} // namespace
-/// \brief Warn if the LSet does not contain a lock sufficient to protect access
+/// Warn if the LSet does not contain a lock sufficient to protect access
/// of at least the passed in AccessKind.
void BuildLockset::warnIfMutexNotHeld(const NamedDecl *D, const Expr *Exp,
AccessKind AK, Expr *MutexExp,
@@ -1558,7 +1595,7 @@ void BuildLockset::warnIfMutexNotHeld(const NamedDecl *D, const Expr *Exp,
}
}
-/// \brief Warn if the LSet contains the given lock.
+/// Warn if the LSet contains the given lock.
void BuildLockset::warnIfMutexHeld(const NamedDecl *D, const Expr *Exp,
Expr *MutexExp, StringRef DiagKind) {
CapabilityExpr Cp = Analyzer->SxBuilder.translateAttrExpr(MutexExp, D, Exp);
@@ -1576,7 +1613,7 @@ void BuildLockset::warnIfMutexHeld(const NamedDecl *D, const Expr *Exp,
}
}
-/// \brief Checks guarded_by and pt_guarded_by attributes.
+/// Checks guarded_by and pt_guarded_by attributes.
/// Whenever we identify an access (read or write) to a DeclRefExpr that is
/// marked with guarded_by, we must ensure the appropriate mutexes are held.
/// Similarly, we check if the access is to an expression that dereferences
@@ -1600,19 +1637,19 @@ void BuildLockset::checkAccess(const Expr *Exp, AccessKind AK,
break;
}
- if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(Exp)) {
+ if (const auto *UO = dyn_cast<UnaryOperator>(Exp)) {
// For dereferences
- if (UO->getOpcode() == clang::UO_Deref)
+ if (UO->getOpcode() == UO_Deref)
checkPtAccess(UO->getSubExpr(), AK, POK);
return;
}
- if (const ArraySubscriptExpr *AE = dyn_cast<ArraySubscriptExpr>(Exp)) {
+ if (const auto *AE = dyn_cast<ArraySubscriptExpr>(Exp)) {
checkPtAccess(AE->getLHS(), AK, POK);
return;
}
- if (const MemberExpr *ME = dyn_cast<MemberExpr>(Exp)) {
+ if (const auto *ME = dyn_cast<MemberExpr>(Exp)) {
if (ME->isArrow())
checkPtAccess(ME->getBase(), AK, POK);
else
@@ -1632,17 +1669,16 @@ void BuildLockset::checkAccess(const Expr *Exp, AccessKind AK,
ClassifyDiagnostic(I), Loc);
}
-
-/// \brief Checks pt_guarded_by and pt_guarded_var attributes.
+/// Checks pt_guarded_by and pt_guarded_var attributes.
/// POK is the same operationKind that was passed to checkAccess.
void BuildLockset::checkPtAccess(const Expr *Exp, AccessKind AK,
ProtectedOperationKind POK) {
while (true) {
- if (const ParenExpr *PE = dyn_cast<ParenExpr>(Exp)) {
+ if (const auto *PE = dyn_cast<ParenExpr>(Exp)) {
Exp = PE->getSubExpr();
continue;
}
- if (const CastExpr *CE = dyn_cast<CastExpr>(Exp)) {
+ if (const auto *CE = dyn_cast<CastExpr>(Exp)) {
if (CE->getCastKind() == CK_ArrayToPointerDecay) {
// If it's an actual array, and not a pointer, then it's elements
// are protected by GUARDED_BY, not PT_GUARDED_BY;
@@ -1672,7 +1708,7 @@ void BuildLockset::checkPtAccess(const Expr *Exp, AccessKind AK,
ClassifyDiagnostic(I), Exp->getExprLoc());
}
-/// \brief Process a function call, method call, constructor call,
+/// Process a function call, method call, constructor call,
/// or destructor call. This involves looking at the attributes on the
/// corresponding function/method/constructor/destructor, issuing warnings,
/// and updating the locksets accordingly.
@@ -1689,23 +1725,22 @@ void BuildLockset::handleCall(Expr *Exp, const NamedDecl *D, VarDecl *VD) {
CapExprSet ScopedExclusiveReqs, ScopedSharedReqs;
StringRef CapDiagKind = "mutex";
- // Figure out if we're calling the constructor of scoped lockable class
+ // Figure out if we're constructing an object of scoped lockable class
bool isScopedVar = false;
if (VD) {
- if (const CXXConstructorDecl *CD = dyn_cast<const CXXConstructorDecl>(D)) {
+ if (const auto *CD = dyn_cast<const CXXConstructorDecl>(D)) {
const CXXRecordDecl* PD = CD->getParent();
if (PD && PD->hasAttr<ScopedLockableAttr>())
isScopedVar = true;
}
}
- for(Attr *Atconst : D->attrs()) {
- Attr* At = const_cast<Attr*>(Atconst);
+ for(const Attr *At : D->attrs()) {
switch (At->getKind()) {
// When we encounter a lock function, we need to add the lock to our
// lockset.
case attr::AcquireCapability: {
- auto *A = cast<AcquireCapabilityAttr>(At);
+ const auto *A = cast<AcquireCapabilityAttr>(At);
Analyzer->getMutexIDs(A->isShared() ? SharedLocksToAdd
: ExclusiveLocksToAdd,
A, Exp, D, VD);
@@ -1718,7 +1753,7 @@ void BuildLockset::handleCall(Expr *Exp, const NamedDecl *D, VarDecl *VD) {
// a warning if it is already there, and will not generate a warning
// if it is not removed.
case attr::AssertExclusiveLock: {
- AssertExclusiveLockAttr *A = cast<AssertExclusiveLockAttr>(At);
+ const auto *A = cast<AssertExclusiveLockAttr>(At);
CapExprSet AssertLocks;
Analyzer->getMutexIDs(AssertLocks, A, Exp, D, VD);
@@ -1730,7 +1765,7 @@ void BuildLockset::handleCall(Expr *Exp, const NamedDecl *D, VarDecl *VD) {
break;
}
case attr::AssertSharedLock: {
- AssertSharedLockAttr *A = cast<AssertSharedLockAttr>(At);
+ const auto *A = cast<AssertSharedLockAttr>(At);
CapExprSet AssertLocks;
Analyzer->getMutexIDs(AssertLocks, A, Exp, D, VD);
@@ -1743,7 +1778,7 @@ void BuildLockset::handleCall(Expr *Exp, const NamedDecl *D, VarDecl *VD) {
}
case attr::AssertCapability: {
- AssertCapabilityAttr *A = cast<AssertCapabilityAttr>(At);
+ const auto *A = cast<AssertCapabilityAttr>(At);
CapExprSet AssertLocks;
Analyzer->getMutexIDs(AssertLocks, A, Exp, D, VD);
for (const auto &AssertLock : AssertLocks)
@@ -1759,7 +1794,7 @@ void BuildLockset::handleCall(Expr *Exp, const NamedDecl *D, VarDecl *VD) {
// When we encounter an unlock function, we need to remove unlocked
// mutexes from the lockset, and flag a warning if they are not there.
case attr::ReleaseCapability: {
- auto *A = cast<ReleaseCapabilityAttr>(At);
+ const auto *A = cast<ReleaseCapabilityAttr>(At);
if (A->isGeneric())
Analyzer->getMutexIDs(GenericLocksToRemove, A, Exp, D, VD);
else if (A->isShared())
@@ -1772,7 +1807,7 @@ void BuildLockset::handleCall(Expr *Exp, const NamedDecl *D, VarDecl *VD) {
}
case attr::RequiresCapability: {
- RequiresCapabilityAttr *A = cast<RequiresCapabilityAttr>(At);
+ const auto *A = cast<RequiresCapabilityAttr>(At);
for (auto *Arg : A->args()) {
warnIfMutexNotHeld(D, Exp, A->isShared() ? AK_Read : AK_Written, Arg,
POK_FunctionCall, ClassifyDiagnostic(A),
@@ -1788,7 +1823,7 @@ void BuildLockset::handleCall(Expr *Exp, const NamedDecl *D, VarDecl *VD) {
}
case attr::LocksExcluded: {
- LocksExcludedAttr *A = cast<LocksExcludedAttr>(At);
+ const auto *A = cast<LocksExcludedAttr>(At);
for (auto *Arg : A->args())
warnIfMutexHeld(D, Exp, Arg, ClassifyDiagnostic(A));
break;
@@ -1800,6 +1835,16 @@ void BuildLockset::handleCall(Expr *Exp, const NamedDecl *D, VarDecl *VD) {
}
}
+ // Remove locks first to allow lock upgrading/downgrading.
+ // FIXME -- should only fully remove if the attribute refers to 'this'.
+ bool Dtor = isa<CXXDestructorDecl>(D);
+ for (const auto &M : ExclusiveLocksToRemove)
+ Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Exclusive, CapDiagKind);
+ for (const auto &M : SharedLocksToRemove)
+ Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Shared, CapDiagKind);
+ for (const auto &M : GenericLocksToRemove)
+ Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Generic, CapDiagKind);
+
// Add locks.
for (const auto &M : ExclusiveLocksToAdd)
Analyzer->addLock(FSet, llvm::make_unique<LockableFactEntry>(
@@ -1826,31 +1871,19 @@ void BuildLockset::handleCall(Expr *Exp, const NamedDecl *D, VarDecl *VD) {
Scp, MLoc, ExclusiveLocksToAdd, SharedLocksToAdd),
CapDiagKind);
}
-
- // Remove locks.
- // FIXME -- should only fully remove if the attribute refers to 'this'.
- bool Dtor = isa<CXXDestructorDecl>(D);
- for (const auto &M : ExclusiveLocksToRemove)
- Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Exclusive, CapDiagKind);
- for (const auto &M : SharedLocksToRemove)
- Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Shared, CapDiagKind);
- for (const auto &M : GenericLocksToRemove)
- Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Generic, CapDiagKind);
}
-
-/// \brief For unary operations which read and write a variable, we need to
+/// For unary operations which read and write a variable, we need to
/// check whether we hold any required mutexes. Reads are checked in
/// VisitCastExpr.
void BuildLockset::VisitUnaryOperator(UnaryOperator *UO) {
switch (UO->getOpcode()) {
- case clang::UO_PostDec:
- case clang::UO_PostInc:
- case clang::UO_PreDec:
- case clang::UO_PreInc: {
+ case UO_PostDec:
+ case UO_PostInc:
+ case UO_PreDec:
+ case UO_PreInc:
checkAccess(UO->getSubExpr(), AK_Written);
break;
- }
default:
break;
}
@@ -1869,7 +1902,6 @@ void BuildLockset::VisitBinaryOperator(BinaryOperator *BO) {
checkAccess(BO->getLHS(), AK_Written);
}
-
/// Whenever we do an LValue to Rvalue cast, we are reading a variable and
/// need to ensure we hold any required mutexes.
/// FIXME: Deal with non-primitive types.
@@ -1879,23 +1911,21 @@ void BuildLockset::VisitCastExpr(CastExpr *CE) {
checkAccess(CE->getSubExpr(), AK_Read);
}
-
void BuildLockset::VisitCallExpr(CallExpr *Exp) {
bool ExamineArgs = true;
bool OperatorFun = false;
- if (CXXMemberCallExpr *CE = dyn_cast<CXXMemberCallExpr>(Exp)) {
- MemberExpr *ME = dyn_cast<MemberExpr>(CE->getCallee());
+ if (const auto *CE = dyn_cast<CXXMemberCallExpr>(Exp)) {
+ const auto *ME = dyn_cast<MemberExpr>(CE->getCallee());
// ME can be null when calling a method pointer
- CXXMethodDecl *MD = CE->getMethodDecl();
+ const CXXMethodDecl *MD = CE->getMethodDecl();
if (ME && MD) {
if (ME->isArrow()) {
- if (MD->isConst()) {
+ if (MD->isConst())
checkPtAccess(CE->getImplicitObjectArgument(), AK_Read);
- } else { // FIXME -- should be AK_Written
+ else // FIXME -- should be AK_Written
checkPtAccess(CE->getImplicitObjectArgument(), AK_Read);
- }
} else {
if (MD->isConst())
checkAccess(CE->getImplicitObjectArgument(), AK_Read);
@@ -1903,7 +1933,7 @@ void BuildLockset::VisitCallExpr(CallExpr *Exp) {
checkAccess(CE->getImplicitObjectArgument(), AK_Read);
}
}
- } else if (CXXOperatorCallExpr *OE = dyn_cast<CXXOperatorCallExpr>(Exp)) {
+ } else if (const auto *OE = dyn_cast<CXXOperatorCallExpr>(Exp)) {
OperatorFun = true;
auto OEop = OE->getOperator();
@@ -1938,13 +1968,11 @@ void BuildLockset::VisitCallExpr(CallExpr *Exp) {
if (ExamineArgs) {
if (FunctionDecl *FD = Exp->getDirectCallee()) {
-
// NO_THREAD_SAFETY_ANALYSIS does double duty here. Normally it
// only turns off checking within the body of a function, but we also
// use it to turn off checking in arguments to the function. This
// could result in some false negatives, but the alternative is to
// create yet another attribute.
- //
if (!FD->hasAttr<NoThreadSafetyAnalysisAttr>()) {
unsigned Fn = FD->getNumParams();
unsigned Cn = Exp->getNumArgs();
@@ -1976,7 +2004,7 @@ void BuildLockset::VisitCallExpr(CallExpr *Exp) {
}
}
- NamedDecl *D = dyn_cast_or_null<NamedDecl>(Exp->getCalleeDecl());
+ auto *D = dyn_cast_or_null<NamedDecl>(Exp->getCalleeDecl());
if(!D || !D->hasAttrs())
return;
handleCall(Exp, D);
@@ -1991,30 +2019,74 @@ void BuildLockset::VisitCXXConstructExpr(CXXConstructExpr *Exp) {
// FIXME -- only handles constructors in DeclStmt below.
}
+static CXXConstructorDecl *
+findConstructorForByValueReturn(const CXXRecordDecl *RD) {
+ // Prefer a move constructor over a copy constructor. If there's more than
+ // one copy constructor or more than one move constructor, we arbitrarily
+ // pick the first declared such constructor rather than trying to guess which
+ // one is more appropriate.
+ CXXConstructorDecl *CopyCtor = nullptr;
+ for (auto *Ctor : RD->ctors()) {
+ if (Ctor->isDeleted())
+ continue;
+ if (Ctor->isMoveConstructor())
+ return Ctor;
+ if (!CopyCtor && Ctor->isCopyConstructor())
+ CopyCtor = Ctor;
+ }
+ return CopyCtor;
+}
+
+static Expr *buildFakeCtorCall(CXXConstructorDecl *CD, ArrayRef<Expr *> Args,
+ SourceLocation Loc) {
+ ASTContext &Ctx = CD->getASTContext();
+ return CXXConstructExpr::Create(Ctx, Ctx.getRecordType(CD->getParent()), Loc,
+ CD, true, Args, false, false, false, false,
+ CXXConstructExpr::CK_Complete,
+ SourceRange(Loc, Loc));
+}
+
void BuildLockset::VisitDeclStmt(DeclStmt *S) {
// adjust the context
LVarCtx = Analyzer->LocalVarMap.getNextContext(CtxIndex, S, LVarCtx);
for (auto *D : S->getDeclGroup()) {
- if (VarDecl *VD = dyn_cast_or_null<VarDecl>(D)) {
+ if (auto *VD = dyn_cast_or_null<VarDecl>(D)) {
Expr *E = VD->getInit();
+ if (!E)
+ continue;
+ E = E->IgnoreParens();
+
// handle constructors that involve temporaries
- if (ExprWithCleanups *EWC = dyn_cast_or_null<ExprWithCleanups>(E))
+ if (auto *EWC = dyn_cast<ExprWithCleanups>(E))
E = EWC->getSubExpr();
+ if (auto *BTE = dyn_cast<CXXBindTemporaryExpr>(E))
+ E = BTE->getSubExpr();
- if (CXXConstructExpr *CE = dyn_cast_or_null<CXXConstructExpr>(E)) {
- NamedDecl *CtorD = dyn_cast_or_null<NamedDecl>(CE->getConstructor());
+ if (const auto *CE = dyn_cast<CXXConstructExpr>(E)) {
+ const auto *CtorD = dyn_cast_or_null<NamedDecl>(CE->getConstructor());
if (!CtorD || !CtorD->hasAttrs())
- return;
- handleCall(CE, CtorD, VD);
+ continue;
+ handleCall(E, CtorD, VD);
+ } else if (isa<CallExpr>(E) && E->isRValue()) {
+ // If the object is initialized by a function call that returns a
+ // scoped lockable by value, use the attributes on the copy or move
+ // constructor to figure out what effect that should have on the
+ // lockset.
+ // FIXME: Is this really the best way to handle this situation?
+ auto *RD = E->getType()->getAsCXXRecordDecl();
+ if (!RD || !RD->hasAttr<ScopedLockableAttr>())
+ continue;
+ CXXConstructorDecl *CtorD = findConstructorForByValueReturn(RD);
+ if (!CtorD || !CtorD->hasAttrs())
+ continue;
+ handleCall(buildFakeCtorCall(CtorD, {E}, E->getLocStart()), CtorD, VD);
}
}
}
}
-
-
-/// \brief Compute the intersection of two locksets and issue warnings for any
+/// Compute the intersection of two locksets and issue warnings for any
/// locks in the symmetric difference.
///
/// This function is used at a merge point in the CFG when comparing the lockset
@@ -2076,7 +2148,6 @@ void ThreadSafetyAnalyzer::intersectAndWarn(FactSet &FSet1,
}
}
-
// Return true if block B never continues to its successors.
static bool neverReturns(const CFGBlock *B) {
if (B->hasNoReturnElement())
@@ -2092,8 +2163,7 @@ static bool neverReturns(const CFGBlock *B) {
return false;
}
-
-/// \brief Check a function's CFG for thread-safety violations.
+/// Check a function's CFG for thread-safety violations.
///
/// We traverse the blocks in the CFG, compute the set of mutexes that are held
/// at the end of each block, and issue warnings for thread safety violations.
@@ -2110,7 +2180,7 @@ void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
CFG *CFGraph = walker.getGraph();
const NamedDecl *D = walker.getDecl();
- const FunctionDecl *CurrentFunction = dyn_cast<FunctionDecl>(D);
+ const auto *CurrentFunction = dyn_cast<FunctionDecl>(D);
CurrentMethod = dyn_cast<CXXMethodDecl>(D);
if (D->hasAttr<NoThreadSafetyAnalysisAttr>())
@@ -2184,10 +2254,13 @@ void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
A, nullptr, D);
CapDiagKind = ClassifyDiagnostic(A);
} else if (isa<ExclusiveTrylockFunctionAttr>(Attr)) {
- // Don't try to check trylock functions for now
+ // Don't try to check trylock functions for now.
return;
} else if (isa<SharedTrylockFunctionAttr>(Attr)) {
- // Don't try to check trylock functions for now
+ // Don't try to check trylock functions for now.
+ return;
+ } else if (isa<TryAcquireCapabilityAttr>(Attr)) {
+ // Don't try to check trylock functions for now.
return;
}
}
@@ -2229,7 +2302,6 @@ void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
SmallVector<CFGBlock *, 8> SpecialBlocks;
for (CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(),
PE = CurrBlock->pred_end(); PI != PE; ++PI) {
-
// if *PI -> CurrBlock is a back edge
if (*PI == nullptr || !VisitedBlocks.alreadySet(*PI))
continue;
@@ -2306,24 +2378,23 @@ void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
BuildLockset LocksetBuilder(this, *CurrBlockInfo);
// Visit all the statements in the basic block.
- for (CFGBlock::const_iterator BI = CurrBlock->begin(),
- BE = CurrBlock->end(); BI != BE; ++BI) {
- switch (BI->getKind()) {
+ for (const auto &BI : *CurrBlock) {
+ switch (BI.getKind()) {
case CFGElement::Statement: {
- CFGStmt CS = BI->castAs<CFGStmt>();
- LocksetBuilder.Visit(const_cast<Stmt*>(CS.getStmt()));
+ CFGStmt CS = BI.castAs<CFGStmt>();
+ LocksetBuilder.Visit(const_cast<Stmt *>(CS.getStmt()));
break;
}
// Ignore BaseDtor, MemberDtor, and TemporaryDtor for now.
case CFGElement::AutomaticObjectDtor: {
- CFGAutomaticObjDtor AD = BI->castAs<CFGAutomaticObjDtor>();
- CXXDestructorDecl *DD = const_cast<CXXDestructorDecl *>(
+ CFGAutomaticObjDtor AD = BI.castAs<CFGAutomaticObjDtor>();
+ auto *DD = const_cast<CXXDestructorDecl *>(
AD.getDestructorDecl(AC.getASTContext()));
if (!DD->hasAttrs())
break;
// Create a dummy expression,
- VarDecl *VD = const_cast<VarDecl*>(AD.getVarDecl());
+ auto *VD = const_cast<VarDecl *>(AD.getVarDecl());
DeclRefExpr DRE(VD, false, VD->getType().getNonReferenceType(),
VK_LValue, AD.getTriggerStmt()->getLocEnd());
LocksetBuilder.handleCall(&DRE, DD);
@@ -2341,7 +2412,6 @@ void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
// Lockset held at the beginning of FirstLoopBlock in the EntryLockSets map.
for (CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin(),
SE = CurrBlock->succ_end(); SI != SE; ++SI) {
-
// if CurrBlock -> *SI is *not* a back edge
if (*SI == nullptr || !VisitedBlocks.alreadySet(*SI))
continue;
@@ -2389,8 +2459,7 @@ void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
Handler.leaveFunction(CurrentFunction);
}
-
-/// \brief Check a function's CFG for thread-safety violations.
+/// Check a function's CFG for thread-safety violations.
///
/// We traverse the blocks in the CFG, compute the set of mutexes that are held
/// at the end of each block, and issue warnings for thread safety violations.
@@ -2406,7 +2475,7 @@ void threadSafety::runThreadSafetyAnalysis(AnalysisDeclContext &AC,
void threadSafety::threadSafetyCleanup(BeforeSet *Cache) { delete Cache; }
-/// \brief Helper function that returns a LockKind required for the given level
+/// Helper function that returns a LockKind required for the given level
/// of access.
LockKind threadSafety::getLockKindFromAccessKind(AccessKind AK) {
switch (AK) {
diff --git a/lib/Analysis/ThreadSafetyCommon.cpp b/lib/Analysis/ThreadSafetyCommon.cpp
index 99284f07b45b..fced17ff9197 100644
--- a/lib/Analysis/ThreadSafetyCommon.cpp
+++ b/lib/Analysis/ThreadSafetyCommon.cpp
@@ -1,4 +1,4 @@
-//===- ThreadSafetyCommon.cpp -----------------------------------*- C++ -*-===//
+//===- ThreadSafetyCommon.cpp ---------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -13,24 +13,32 @@
#include "clang/Analysis/Analyses/ThreadSafetyCommon.h"
#include "clang/AST/Attr.h"
+#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclGroup.h"
#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
-#include "clang/AST/StmtCXX.h"
+#include "clang/AST/OperationKinds.h"
+#include "clang/AST/Stmt.h"
+#include "clang/AST/Type.h"
#include "clang/Analysis/Analyses/ThreadSafetyTIL.h"
-#include "clang/Analysis/Analyses/ThreadSafetyTraverse.h"
-#include "clang/Analysis/AnalysisDeclContext.h"
#include "clang/Analysis/CFG.h"
+#include "clang/Basic/LLVM.h"
#include "clang/Basic/OperatorKinds.h"
-#include "clang/Basic/SourceLocation.h"
+#include "clang/Basic/Specifiers.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Casting.h"
#include <algorithm>
+#include <cassert>
+#include <string>
+#include <utility>
using namespace clang;
using namespace threadSafety;
// From ThreadSafetyUtil.h
-std::string threadSafety::getSourceLiteralString(const clang::Expr *CE) {
+std::string threadSafety::getSourceLiteralString(const Expr *CE) {
switch (CE->getStmtClass()) {
case Stmt::IntegerLiteralClass:
return cast<IntegerLiteral>(CE)->getValue().toString(10, true);
@@ -59,7 +67,7 @@ static bool isIncompletePhi(const til::SExpr *E) {
return false;
}
-typedef SExprBuilder::CallingContext CallingContext;
+using CallingContext = SExprBuilder::CallingContext;
til::SExpr *SExprBuilder::lookupStmt(const Stmt *S) {
auto It = SMap.find(S);
@@ -74,11 +82,11 @@ til::SCFG *SExprBuilder::buildCFG(CFGWalker &Walker) {
}
static bool isCalleeArrow(const Expr *E) {
- const MemberExpr *ME = dyn_cast<MemberExpr>(E->IgnoreParenCasts());
+ const auto *ME = dyn_cast<MemberExpr>(E->IgnoreParenCasts());
return ME ? ME->isArrow() : false;
}
-/// \brief Translate a clang expression in an attribute to a til::SExpr.
+/// Translate a clang expression in an attribute to a til::SExpr.
/// Constructs the context from D, DeclExp, and SelfDecl.
///
/// \param AttrExp The expression to translate.
@@ -97,20 +105,18 @@ CapabilityExpr SExprBuilder::translateAttrExpr(const Expr *AttrExp,
// Examine DeclExp to find SelfArg and FunArgs, which are used to substitute
// for formal parameters when we call buildMutexID later.
- if (const MemberExpr *ME = dyn_cast<MemberExpr>(DeclExp)) {
+ if (const auto *ME = dyn_cast<MemberExpr>(DeclExp)) {
Ctx.SelfArg = ME->getBase();
Ctx.SelfArrow = ME->isArrow();
- } else if (const CXXMemberCallExpr *CE =
- dyn_cast<CXXMemberCallExpr>(DeclExp)) {
+ } else if (const auto *CE = dyn_cast<CXXMemberCallExpr>(DeclExp)) {
Ctx.SelfArg = CE->getImplicitObjectArgument();
Ctx.SelfArrow = isCalleeArrow(CE->getCallee());
Ctx.NumArgs = CE->getNumArgs();
Ctx.FunArgs = CE->getArgs();
- } else if (const CallExpr *CE = dyn_cast<CallExpr>(DeclExp)) {
+ } else if (const auto *CE = dyn_cast<CallExpr>(DeclExp)) {
Ctx.NumArgs = CE->getNumArgs();
Ctx.FunArgs = CE->getArgs();
- } else if (const CXXConstructExpr *CE =
- dyn_cast<CXXConstructExpr>(DeclExp)) {
+ } else if (const auto *CE = dyn_cast<CXXConstructExpr>(DeclExp)) {
Ctx.SelfArg = nullptr; // Will be set below
Ctx.NumArgs = CE->getNumArgs();
Ctx.FunArgs = CE->getArgs();
@@ -140,14 +146,14 @@ CapabilityExpr SExprBuilder::translateAttrExpr(const Expr *AttrExp,
return translateAttrExpr(AttrExp, &Ctx);
}
-/// \brief Translate a clang expression in an attribute to a til::SExpr.
+/// Translate a clang expression in an attribute to a til::SExpr.
// This assumes a CallingContext has already been created.
CapabilityExpr SExprBuilder::translateAttrExpr(const Expr *AttrExp,
CallingContext *Ctx) {
if (!AttrExp)
return CapabilityExpr(nullptr, false);
- if (auto* SLit = dyn_cast<StringLiteral>(AttrExp)) {
+ if (const auto* SLit = dyn_cast<StringLiteral>(AttrExp)) {
if (SLit->getString() == StringRef("*"))
// The "*" expr is a universal lock, which essentially turns off
// checks until it is removed from the lockset.
@@ -158,13 +164,13 @@ CapabilityExpr SExprBuilder::translateAttrExpr(const Expr *AttrExp,
}
bool Neg = false;
- if (auto *OE = dyn_cast<CXXOperatorCallExpr>(AttrExp)) {
+ if (const auto *OE = dyn_cast<CXXOperatorCallExpr>(AttrExp)) {
if (OE->getOperator() == OO_Exclaim) {
Neg = true;
AttrExp = OE->getArg(0);
}
}
- else if (auto *UO = dyn_cast<UnaryOperator>(AttrExp)) {
+ else if (const auto *UO = dyn_cast<UnaryOperator>(AttrExp)) {
if (UO->getOpcode() == UO_LNot) {
Neg = true;
AttrExp = UO->getSubExpr();
@@ -179,7 +185,7 @@ CapabilityExpr SExprBuilder::translateAttrExpr(const Expr *AttrExp,
return CapabilityExpr(nullptr, false);
// Hack to deal with smart pointers -- strip off top-level pointer casts.
- if (auto *CE = dyn_cast_or_null<til::Cast>(E)) {
+ if (const auto *CE = dyn_cast_or_null<til::Cast>(E)) {
if (CE->castOpcode() == til::CAST_objToPtr)
return CapabilityExpr(CE->expr(), Neg);
}
@@ -254,7 +260,7 @@ til::SExpr *SExprBuilder::translate(const Stmt *S, CallingContext *Ctx) {
default:
break;
}
- if (const CastExpr *CE = dyn_cast<CastExpr>(S))
+ if (const auto *CE = dyn_cast<CastExpr>(S))
return translateCastExpr(CE, Ctx);
return new (Arena) til::Undefined(S);
@@ -262,11 +268,11 @@ til::SExpr *SExprBuilder::translate(const Stmt *S, CallingContext *Ctx) {
til::SExpr *SExprBuilder::translateDeclRefExpr(const DeclRefExpr *DRE,
CallingContext *Ctx) {
- const ValueDecl *VD = cast<ValueDecl>(DRE->getDecl()->getCanonicalDecl());
+ const auto *VD = cast<ValueDecl>(DRE->getDecl()->getCanonicalDecl());
// Function parameters require substitution and/or renaming.
- if (const ParmVarDecl *PV = dyn_cast_or_null<ParmVarDecl>(VD)) {
- const FunctionDecl *FD =
+ if (const auto *PV = dyn_cast_or_null<ParmVarDecl>(VD)) {
+ const auto *FD =
cast<FunctionDecl>(PV->getDeclContext())->getCanonicalDecl();
unsigned I = PV->getFunctionScopeIndex();
@@ -294,13 +300,13 @@ til::SExpr *SExprBuilder::translateCXXThisExpr(const CXXThisExpr *TE,
}
static const ValueDecl *getValueDeclFromSExpr(const til::SExpr *E) {
- if (auto *V = dyn_cast<til::Variable>(E))
+ if (const auto *V = dyn_cast<til::Variable>(E))
return V->clangDecl();
- if (auto *Ph = dyn_cast<til::Phi>(E))
+ if (const auto *Ph = dyn_cast<til::Phi>(E))
return Ph->clangDecl();
- if (auto *P = dyn_cast<til::Project>(E))
+ if (const auto *P = dyn_cast<til::Project>(E))
return P->clangDecl();
- if (auto *L = dyn_cast<til::LiteralPtr>(E))
+ if (const auto *L = dyn_cast<til::LiteralPtr>(E))
return L->clangDecl();
return nullptr;
}
@@ -309,7 +315,7 @@ static bool hasCppPointerType(const til::SExpr *E) {
auto *VD = getValueDeclFromSExpr(E);
if (VD && VD->getType()->isPointerType())
return true;
- if (auto *C = dyn_cast<til::Cast>(E))
+ if (const auto *C = dyn_cast<til::Cast>(E))
return C->castOpcode() == til::CAST_objToPtr;
return false;
@@ -333,9 +339,8 @@ til::SExpr *SExprBuilder::translateMemberExpr(const MemberExpr *ME,
til::SExpr *BE = translate(ME->getBase(), Ctx);
til::SExpr *E = new (Arena) til::SApply(BE);
- const ValueDecl *D =
- cast<ValueDecl>(ME->getMemberDecl()->getCanonicalDecl());
- if (auto *VD = dyn_cast<CXXMethodDecl>(D))
+ const auto *D = cast<ValueDecl>(ME->getMemberDecl()->getCanonicalDecl());
+ if (const auto *VD = dyn_cast<CXXMethodDecl>(D))
D = getFirstVirtualDecl(VD);
til::Project *P = new (Arena) til::Project(E, D);
@@ -356,7 +361,7 @@ til::SExpr *SExprBuilder::translateCallExpr(const CallExpr *CE,
LRCallCtx.SelfArg = SelfE;
LRCallCtx.NumArgs = CE->getNumArgs();
LRCallCtx.FunArgs = CE->getArgs();
- return const_cast<til::SExpr*>(
+ return const_cast<til::SExpr *>(
translateAttrExpr(At->getArg(), &LRCallCtx).sexpr());
}
}
@@ -407,10 +412,10 @@ til::SExpr *SExprBuilder::translateUnaryOperator(const UnaryOperator *UO,
case UO_PreDec:
return new (Arena) til::Undefined(UO);
- case UO_AddrOf: {
+ case UO_AddrOf:
if (CapabilityExprMode) {
// interpret &Graph::mu_ as an existential.
- if (DeclRefExpr* DRE = dyn_cast<DeclRefExpr>(UO->getSubExpr())) {
+ if (const auto *DRE = dyn_cast<DeclRefExpr>(UO->getSubExpr())) {
if (DRE->getDecl()->isCXXInstanceMember()) {
// This is a pointer-to-member expression, e.g. &MyClass::mu_.
// We interpret this syntax specially, as a wildcard.
@@ -421,7 +426,6 @@ til::SExpr *SExprBuilder::translateUnaryOperator(const UnaryOperator *UO,
}
// otherwise, & is a no-op
return translate(UO->getSubExpr(), Ctx);
- }
// We treat these as no-ops
case UO_Deref:
@@ -470,7 +474,7 @@ til::SExpr *SExprBuilder::translateBinAssign(til::TIL_BinaryOpcode Op,
const ValueDecl *VD = nullptr;
til::SExpr *CV = nullptr;
- if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(LHS)) {
+ if (const auto *DRE = dyn_cast<DeclRefExpr>(LHS)) {
VD = DRE->getDecl();
CV = lookupVarDecl(VD);
}
@@ -533,10 +537,10 @@ til::SExpr *SExprBuilder::translateBinaryOperator(const BinaryOperator *BO,
til::SExpr *SExprBuilder::translateCastExpr(const CastExpr *CE,
CallingContext *Ctx) {
- clang::CastKind K = CE->getCastKind();
+ CastKind K = CE->getCastKind();
switch (K) {
case CK_LValueToRValue: {
- if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(CE->getSubExpr())) {
+ if (const auto *DRE = dyn_cast<DeclRefExpr>(CE->getSubExpr())) {
til::SExpr *E0 = lookupVarDecl(DRE->getDecl());
if (E0)
return E0;
@@ -584,16 +588,15 @@ SExprBuilder::translateAbstractConditionalOperator(
til::SExpr *
SExprBuilder::translateDeclStmt(const DeclStmt *S, CallingContext *Ctx) {
DeclGroupRef DGrp = S->getDeclGroup();
- for (DeclGroupRef::iterator I = DGrp.begin(), E = DGrp.end(); I != E; ++I) {
- if (VarDecl *VD = dyn_cast_or_null<VarDecl>(*I)) {
+ for (auto I : DGrp) {
+ if (auto *VD = dyn_cast_or_null<VarDecl>(I)) {
Expr *E = VD->getInit();
til::SExpr* SE = translate(E, Ctx);
// Add local variables with trivial type to the variable map
QualType T = VD->getType();
- if (T.isTrivialType(VD->getASTContext())) {
+ if (T.isTrivialType(VD->getASTContext()))
return addVarDecl(VD, SE);
- }
else {
// TODO: add alloca
}
@@ -632,7 +635,7 @@ til::SExpr *SExprBuilder::lookupVarDecl(const ValueDecl *VD) {
static void maybeUpdateVD(til::SExpr *E, const ValueDecl *VD) {
if (!E)
return;
- if (til::Variable *V = dyn_cast<til::Variable>(E)) {
+ if (auto *V = dyn_cast<til::Variable>(E)) {
if (!V->clangDecl())
V->setClangDecl(VD);
}
@@ -672,7 +675,7 @@ void SExprBuilder::makePhiNodeVar(unsigned i, unsigned NPreds, til::SExpr *E) {
if (CurrE->block() == CurrentBB) {
// We already have a Phi node in the current block,
// so just add the new variable to the Phi node.
- til::Phi *Ph = dyn_cast<til::Phi>(CurrE);
+ auto *Ph = dyn_cast<til::Phi>(CurrE);
assert(Ph && "Expecting Phi node.");
if (E)
Ph->values()[ArgIndex] = E;
@@ -690,9 +693,8 @@ void SExprBuilder::makePhiNodeVar(unsigned i, unsigned NPreds, til::SExpr *E) {
Ph->setClangDecl(CurrentLVarMap[i].first);
// If E is from a back-edge, or either E or CurrE are incomplete, then
// mark this node as incomplete; we may need to remove it later.
- if (!E || isIncompletePhi(E) || isIncompletePhi(CurrE)) {
+ if (!E || isIncompletePhi(E) || isIncompletePhi(CurrE))
Ph->setStatus(til::Phi::PH_Incomplete);
- }
// Add Phi node to current block, and update CurrentLVarMap[i]
CurrentArguments.push_back(Ph);
@@ -721,7 +723,7 @@ void SExprBuilder::mergeEntryMap(LVarDefinitionMap Map) {
unsigned MSz = Map.size();
unsigned Sz = std::min(ESz, MSz);
- for (unsigned i=0; i<Sz; ++i) {
+ for (unsigned i = 0; i < Sz; ++i) {
if (CurrentLVarMap[i].first != Map[i].first) {
// We've reached the end of variables in common.
CurrentLVarMap.makeWritable();
@@ -758,9 +760,8 @@ void SExprBuilder::mergeEntryMapBackEdge() {
unsigned Sz = CurrentLVarMap.size();
unsigned NPreds = CurrentBB->numPredecessors();
- for (unsigned i=0; i < Sz; ++i) {
+ for (unsigned i = 0; i < Sz; ++i)
makePhiNodeVar(i, NPreds, nullptr);
- }
}
// Update the phi nodes that were initially created for a back edge
@@ -772,7 +773,7 @@ void SExprBuilder::mergePhiNodesBackEdge(const CFGBlock *Blk) {
assert(ArgIndex > 0 && ArgIndex < BB->numPredecessors());
for (til::SExpr *PE : BB->arguments()) {
- til::Phi *Ph = dyn_cast_or_null<til::Phi>(PE);
+ auto *Ph = dyn_cast_or_null<til::Phi>(PE);
assert(Ph && "Expecting Phi Node.");
assert(Ph->values()[ArgIndex] == nullptr && "Wrong index for back edge.");
@@ -816,7 +817,7 @@ void SExprBuilder::enterCFG(CFG *Cfg, const NamedDecl *D,
}
void SExprBuilder::enterCFGBlock(const CFGBlock *B) {
- // Intialize TIL basic block and add it to the CFG.
+ // Initialize TIL basic block and add it to the CFG.
CurrentBB = lookupBlock(B);
CurrentBB->reservePredecessors(B->pred_size());
Scfg->add(CurrentBB);
@@ -891,7 +892,7 @@ void SExprBuilder::exitCFGBlockBody(const CFGBlock *B) {
til::BasicBlock *BB1 = *It ? lookupBlock(*It) : nullptr;
++It;
til::BasicBlock *BB2 = *It ? lookupBlock(*It) : nullptr;
- // FIXME: make sure these arent' critical edges.
+ // FIXME: make sure these aren't critical edges.
auto *Tm = new (Arena) til::Branch(C, BB1, BB2);
CurrentBB->setTerminator(Tm);
}
diff --git a/lib/Analysis/ThreadSafetyTIL.cpp b/lib/Analysis/ThreadSafetyTIL.cpp
index cd7cdc69ab73..798bbfb29d7b 100644
--- a/lib/Analysis/ThreadSafetyTIL.cpp
+++ b/lib/Analysis/ThreadSafetyTIL.cpp
@@ -1,4 +1,4 @@
-//===- ThreadSafetyTIL.cpp -------------------------------------*- C++ --*-===//
+//===- ThreadSafetyTIL.cpp ------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -8,7 +8,11 @@
//===----------------------------------------------------------------------===//
#include "clang/Analysis/Analyses/ThreadSafetyTIL.h"
-#include "clang/Analysis/Analyses/ThreadSafetyTraverse.h"
+#include "clang/Basic/LLVM.h"
+#include "llvm/Support/Casting.h"
+#include <cassert>
+#include <cstddef>
+
using namespace clang;
using namespace threadSafety;
using namespace til;
@@ -19,7 +23,7 @@ StringRef til::getUnaryOpcodeString(TIL_UnaryOpcode Op) {
case UOP_BitNot: return "~";
case UOP_LogicNot: return "!";
}
- return "";
+ return {};
}
StringRef til::getBinaryOpcodeString(TIL_BinaryOpcode Op) {
@@ -42,10 +46,9 @@ StringRef til::getBinaryOpcodeString(TIL_BinaryOpcode Op) {
case BOP_LogicAnd: return "&&";
case BOP_LogicOr: return "||";
}
- return "";
+ return {};
}
-
SExpr* Future::force() {
Status = FS_evaluating;
Result = compute();
@@ -53,13 +56,12 @@ SExpr* Future::force() {
return Result;
}
-
unsigned BasicBlock::addPredecessor(BasicBlock *Pred) {
unsigned Idx = Predecessors.size();
Predecessors.reserveCheck(1, Arena);
Predecessors.push_back(Pred);
- for (SExpr *E : Args) {
- if (Phi* Ph = dyn_cast<Phi>(E)) {
+ for (auto *E : Args) {
+ if (auto *Ph = dyn_cast<Phi>(E)) {
Ph->values().reserveCheck(1, Arena);
Ph->values().push_back(nullptr);
}
@@ -67,28 +69,26 @@ unsigned BasicBlock::addPredecessor(BasicBlock *Pred) {
return Idx;
}
-
void BasicBlock::reservePredecessors(unsigned NumPreds) {
Predecessors.reserve(NumPreds, Arena);
- for (SExpr *E : Args) {
- if (Phi* Ph = dyn_cast<Phi>(E)) {
+ for (auto *E : Args) {
+ if (auto *Ph = dyn_cast<Phi>(E)) {
Ph->values().reserve(NumPreds, Arena);
}
}
}
-
// If E is a variable, then trace back through any aliases or redundant
// Phi nodes to find the canonical definition.
const SExpr *til::getCanonicalVal(const SExpr *E) {
while (true) {
- if (auto *V = dyn_cast<Variable>(E)) {
+ if (const auto *V = dyn_cast<Variable>(E)) {
if (V->kind() == Variable::VK_Let) {
E = V->definition();
continue;
}
}
- if (const Phi *Ph = dyn_cast<Phi>(E)) {
+ if (const auto *Ph = dyn_cast<Phi>(E)) {
if (Ph->status() == Phi::PH_SingleVal) {
E = Ph->values()[0];
continue;
@@ -99,7 +99,6 @@ const SExpr *til::getCanonicalVal(const SExpr *E) {
return E;
}
-
// If E is a variable, then trace back through any aliases or redundant
// Phi nodes to find the canonical definition.
// The non-const version will simplify incomplete Phi nodes.
@@ -129,7 +128,6 @@ SExpr *til::simplifyToCanonicalVal(SExpr *E) {
}
}
-
// Trace the arguments of an incomplete Phi node to see if they have the same
// canonical definition. If so, mark the Phi node as redundant.
// getCanonicalVal() will recursively call simplifyIncompletePhi().
@@ -140,7 +138,7 @@ void til::simplifyIncompleteArg(til::Phi *Ph) {
Ph->setStatus(Phi::PH_MultiVal);
SExpr *E0 = simplifyToCanonicalVal(Ph->values()[0]);
- for (unsigned i=1, n=Ph->values().size(); i<n; ++i) {
+ for (unsigned i = 1, n = Ph->values().size(); i < n; ++i) {
SExpr *Ei = simplifyToCanonicalVal(Ph->values()[i]);
if (Ei == Ph)
continue; // Recursive reference to itself. Don't count.
@@ -151,7 +149,6 @@ void til::simplifyIncompleteArg(til::Phi *Ph) {
Ph->setStatus(Phi::PH_SingleVal);
}
-
// Renumbers the arguments and instructions to have unique, sequential IDs.
int BasicBlock::renumberInstrs(int ID) {
for (auto *Arg : Args)
@@ -166,7 +163,7 @@ int BasicBlock::renumberInstrs(int ID) {
// Each block will be written into the Blocks array in order, and its BlockID
// will be set to the index in the array. Sorting should start from the entry
// block, and ID should be the total number of blocks.
-int BasicBlock::topologicalSort(SimpleArray<BasicBlock*>& Blocks, int ID) {
+int BasicBlock::topologicalSort(SimpleArray<BasicBlock *> &Blocks, int ID) {
if (Visited) return ID;
Visited = true;
for (auto *Block : successors())
@@ -258,7 +255,6 @@ void BasicBlock::computePostDominator() {
PostDominatorNode.SizeOfSubTree = 1;
}
-
// Renumber instructions in all blocks
void SCFG::renumberInstrs() {
int InstrID = 0;
@@ -266,7 +262,6 @@ void SCFG::renumberInstrs() {
InstrID = Block->renumberInstrs(InstrID);
}
-
static inline void computeNodeSize(BasicBlock *B,
BasicBlock::TopologyNode BasicBlock::*TN) {
BasicBlock::TopologyNode *N = &(B->*TN);
@@ -287,7 +282,6 @@ static inline void computeNodeID(BasicBlock *B,
}
}
-
// Normalizes a CFG. Normalization has a few major components:
// 1) Removing unreachable blocks.
// 2) Computing dominators and post-dominators
diff --git a/lib/Analysis/UninitializedValues.cpp b/lib/Analysis/UninitializedValues.cpp
index 5f11d8a2a36b..63353292349b 100644
--- a/lib/Analysis/UninitializedValues.cpp
+++ b/lib/Analysis/UninitializedValues.cpp
@@ -1,4 +1,4 @@
-//==- UninitializedValues.cpp - Find Uninitialized Values -------*- C++ --*-==//
+//===- UninitializedValues.cpp - Find Uninitialized Values ----------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -11,23 +11,31 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/AST/ASTContext.h"
+#include "clang/Analysis/Analyses/UninitializedValues.h"
#include "clang/AST/Attr.h"
#include "clang/AST/Decl.h"
-#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclBase.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/OperationKinds.h"
+#include "clang/AST/Stmt.h"
+#include "clang/AST/StmtObjC.h"
#include "clang/AST/StmtVisitor.h"
+#include "clang/AST/Type.h"
#include "clang/Analysis/Analyses/PostOrderCFGView.h"
-#include "clang/Analysis/Analyses/UninitializedValues.h"
#include "clang/Analysis/AnalysisDeclContext.h"
#include "clang/Analysis/CFG.h"
#include "clang/Analysis/DomainSpecific/ObjCNoReturn.h"
+#include "clang/Basic/LLVM.h"
+#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/None.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/PackedVector.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallVector.h"
-#include "llvm/Support/SaveAndRestore.h"
-#include <utility>
+#include "llvm/Support/Casting.h"
+#include <algorithm>
+#include <cassert>
using namespace clang;
@@ -48,10 +56,12 @@ static bool isTrackedVar(const VarDecl *vd, const DeclContext *dc) {
//====------------------------------------------------------------------------//
namespace {
+
class DeclToIndex {
llvm::DenseMap<const VarDecl *, unsigned> map;
+
public:
- DeclToIndex() {}
+ DeclToIndex() = default;
/// Compute the actual mapping from declarations to bits.
void computeMap(const DeclContext &dc);
@@ -62,7 +72,8 @@ public:
/// Returns the bit vector index for a given declaration.
Optional<unsigned> getValueIndex(const VarDecl *d) const;
};
-}
+
+} // namespace
void DeclToIndex::computeMap(const DeclContext &dc) {
unsigned count = 0;
@@ -96,25 +107,28 @@ enum Value { Unknown = 0x0, /* 00 */
static bool isUninitialized(const Value v) {
return v >= Uninitialized;
}
+
static bool isAlwaysUninit(const Value v) {
return v == Uninitialized;
}
namespace {
-typedef llvm::PackedVector<Value, 2, llvm::SmallBitVector> ValueVector;
+using ValueVector = llvm::PackedVector<Value, 2, llvm::SmallBitVector>;
class CFGBlockValues {
const CFG &cfg;
SmallVector<ValueVector, 8> vals;
ValueVector scratch;
DeclToIndex declToIndex;
+
public:
CFGBlockValues(const CFG &cfg);
unsigned getNumEntries() const { return declToIndex.size(); }
void computeSetOfDeclarations(const DeclContext &dc);
+
ValueVector &getValueVector(const CFGBlock *block) {
return vals[block->getBlockID()];
}
@@ -138,7 +152,8 @@ public:
return getValueVector(block)[idx.getValue()];
}
};
-} // end anonymous namespace
+
+} // namespace
CFGBlockValues::CFGBlockValues(const CFG &c) : cfg(c), vals(0) {}
@@ -150,17 +165,16 @@ void CFGBlockValues::computeSetOfDeclarations(const DeclContext &dc) {
if (!n)
return;
vals.resize(n);
- for (unsigned i = 0; i < n; ++i)
- vals[i].resize(decls);
+ for (auto &val : vals)
+ val.resize(decls);
}
#if DEBUG_LOGGING
static void printVector(const CFGBlock *block, ValueVector &bv,
unsigned num) {
llvm::errs() << block->getBlockID() << " :";
- for (unsigned i = 0; i < bv.size(); ++i) {
- llvm::errs() << ' ' << bv[i];
- }
+ for (const auto &i : bv)
+ llvm::errs() << ' ' << i;
llvm::errs() << " : " << num << '\n';
}
#endif
@@ -204,28 +218,31 @@ ValueVector::reference CFGBlockValues::operator[](const VarDecl *vd) {
//====------------------------------------------------------------------------//
namespace {
+
class DataflowWorklist {
PostOrderCFGView::iterator PO_I, PO_E;
SmallVector<const CFGBlock *, 20> worklist;
llvm::BitVector enqueuedBlocks;
+
public:
DataflowWorklist(const CFG &cfg, PostOrderCFGView &view)
- : PO_I(view.begin()), PO_E(view.end()),
- enqueuedBlocks(cfg.getNumBlockIDs(), true) {
- // Treat the first block as already analyzed.
- if (PO_I != PO_E) {
- assert(*PO_I == &cfg.getEntry());
- enqueuedBlocks[(*PO_I)->getBlockID()] = false;
- ++PO_I;
- }
- }
+ : PO_I(view.begin()), PO_E(view.end()),
+ enqueuedBlocks(cfg.getNumBlockIDs(), true) {
+ // Treat the first block as already analyzed.
+ if (PO_I != PO_E) {
+ assert(*PO_I == &cfg.getEntry());
+ enqueuedBlocks[(*PO_I)->getBlockID()] = false;
+ ++PO_I;
+ }
+ }
void enqueueSuccessors(const CFGBlock *block);
const CFGBlock *dequeue();
};
-}
-void DataflowWorklist::enqueueSuccessors(const clang::CFGBlock *block) {
+} // namespace
+
+void DataflowWorklist::enqueueSuccessors(const CFGBlock *block) {
for (CFGBlock::const_succ_iterator I = block->succ_begin(),
E = block->succ_end(); I != E; ++I) {
const CFGBlock *Successor = *I;
@@ -250,9 +267,8 @@ const CFGBlock *DataflowWorklist::dequeue() {
B = *PO_I;
++PO_I;
}
- else {
+ else
return nullptr;
- }
assert(enqueuedBlocks[B->getBlockID()] == true);
enqueuedBlocks[B->getBlockID()] = false;
@@ -264,9 +280,11 @@ const CFGBlock *DataflowWorklist::dequeue() {
//====------------------------------------------------------------------------//
namespace {
+
class FindVarResult {
const VarDecl *vd;
const DeclRefExpr *dr;
+
public:
FindVarResult(const VarDecl *vd, const DeclRefExpr *dr) : vd(vd), dr(dr) {}
@@ -274,10 +292,12 @@ public:
const VarDecl *getDecl() const { return vd; }
};
+} // namespace
+
static const Expr *stripCasts(ASTContext &C, const Expr *Ex) {
while (Ex) {
Ex = Ex->IgnoreParenNoopCasts(C);
- if (const CastExpr *CE = dyn_cast<CastExpr>(Ex)) {
+ if (const auto *CE = dyn_cast<CastExpr>(Ex)) {
if (CE->getCastKind() == CK_LValueBitCast) {
Ex = CE->getSubExpr();
continue;
@@ -291,15 +311,17 @@ static const Expr *stripCasts(ASTContext &C, const Expr *Ex) {
/// If E is an expression comprising a reference to a single variable, find that
/// variable.
static FindVarResult findVar(const Expr *E, const DeclContext *DC) {
- if (const DeclRefExpr *DRE =
- dyn_cast<DeclRefExpr>(stripCasts(DC->getParentASTContext(), E)))
- if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl()))
+ if (const auto *DRE =
+ dyn_cast<DeclRefExpr>(stripCasts(DC->getParentASTContext(), E)))
+ if (const auto *VD = dyn_cast<VarDecl>(DRE->getDecl()))
if (isTrackedVar(VD, DC))
return FindVarResult(VD, DRE);
return FindVarResult(nullptr, nullptr);
}
-/// \brief Classify each DeclRefExpr as an initialization or a use. Any
+namespace {
+
+/// Classify each DeclRefExpr as an initialization or a use. Any
/// DeclRefExpr which isn't explicitly classified will be assumed to have
/// escaped the analysis and will be treated as an initialization.
class ClassifyRefs : public StmtVisitor<ClassifyRefs> {
@@ -313,7 +335,7 @@ public:
private:
const DeclContext *DC;
- llvm::DenseMap<const DeclRefExpr*, Class> Classification;
+ llvm::DenseMap<const DeclRefExpr *, Class> Classification;
bool isTrackedVar(const VarDecl *VD) const {
return ::isTrackedVar(VD, DC);
@@ -338,21 +360,22 @@ public:
if (I != Classification.end())
return I->second;
- const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl());
+ const auto *VD = dyn_cast<VarDecl>(DRE->getDecl());
if (!VD || !isTrackedVar(VD))
return Ignore;
return Init;
}
};
-}
+
+} // namespace
static const DeclRefExpr *getSelfInitExpr(VarDecl *VD) {
if (VD->getType()->isRecordType())
return nullptr;
if (Expr *Init = VD->getInit()) {
- const DeclRefExpr *DRE
- = dyn_cast<DeclRefExpr>(stripCasts(VD->getASTContext(), Init));
+ const auto *DRE =
+ dyn_cast<DeclRefExpr>(stripCasts(VD->getASTContext(), Init));
if (DRE && DRE->getDecl() == VD)
return DRE;
}
@@ -362,32 +385,31 @@ static const DeclRefExpr *getSelfInitExpr(VarDecl *VD) {
void ClassifyRefs::classify(const Expr *E, Class C) {
// The result of a ?: could also be an lvalue.
E = E->IgnoreParens();
- if (const ConditionalOperator *CO = dyn_cast<ConditionalOperator>(E)) {
+ if (const auto *CO = dyn_cast<ConditionalOperator>(E)) {
classify(CO->getTrueExpr(), C);
classify(CO->getFalseExpr(), C);
return;
}
- if (const BinaryConditionalOperator *BCO =
- dyn_cast<BinaryConditionalOperator>(E)) {
+ if (const auto *BCO = dyn_cast<BinaryConditionalOperator>(E)) {
classify(BCO->getFalseExpr(), C);
return;
}
- if (const OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(E)) {
+ if (const auto *OVE = dyn_cast<OpaqueValueExpr>(E)) {
classify(OVE->getSourceExpr(), C);
return;
}
- if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) {
- if (VarDecl *VD = dyn_cast<VarDecl>(ME->getMemberDecl())) {
+ if (const auto *ME = dyn_cast<MemberExpr>(E)) {
+ if (const auto *VD = dyn_cast<VarDecl>(ME->getMemberDecl())) {
if (!VD->isStaticDataMember())
classify(ME->getBase(), C);
}
return;
}
- if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) {
+ if (const auto *BO = dyn_cast<BinaryOperator>(E)) {
switch (BO->getOpcode()) {
case BO_PtrMemD:
case BO_PtrMemI:
@@ -408,7 +430,7 @@ void ClassifyRefs::classify(const Expr *E, Class C) {
void ClassifyRefs::VisitDeclStmt(DeclStmt *DS) {
for (auto *DI : DS->decls()) {
- VarDecl *VD = dyn_cast<VarDecl>(DI);
+ auto *VD = dyn_cast<VarDecl>(DI);
if (VD && isTrackedVar(VD))
if (const DeclRefExpr *DRE = getSelfInitExpr(VD))
Classification[DRE] = SelfInit;
@@ -457,7 +479,7 @@ void ClassifyRefs::VisitCallExpr(CallExpr *CE) {
classify((*I), Ignore);
} else if (isPointerToConst((*I)->getType())) {
const Expr *Ex = stripCasts(DC->getParentASTContext(), *I);
- const UnaryOperator *UO = dyn_cast<UnaryOperator>(Ex);
+ const auto *UO = dyn_cast<UnaryOperator>(Ex);
if (UO && UO->getOpcode() == UO_AddrOf)
Ex = UO->getSubExpr();
classify(Ex, Ignore);
@@ -468,7 +490,7 @@ void ClassifyRefs::VisitCallExpr(CallExpr *CE) {
void ClassifyRefs::VisitCastExpr(CastExpr *CE) {
if (CE->getCastKind() == CK_LValueToRValue)
classify(CE->getSubExpr(), Use);
- else if (CStyleCastExpr *CSE = dyn_cast<CStyleCastExpr>(CE)) {
+ else if (const auto *CSE = dyn_cast<CStyleCastExpr>(CE)) {
if (CSE->getType()->isVoidType()) {
// Squelch any detected load of an uninitialized value if
// we cast it to void.
@@ -483,6 +505,7 @@ void ClassifyRefs::VisitCastExpr(CastExpr *CE) {
//====------------------------------------------------------------------------//
namespace {
+
class TransferFunctions : public StmtVisitor<TransferFunctions> {
CFGBlockValues &vals;
const CFG &cfg;
@@ -497,9 +520,9 @@ public:
const CFGBlock *block, AnalysisDeclContext &ac,
const ClassifyRefs &classification,
UninitVariablesHandler &handler)
- : vals(vals), cfg(cfg), block(block), ac(ac),
- classification(classification), objCNoRet(ac.getASTContext()),
- handler(handler) {}
+ : vals(vals), cfg(cfg), block(block), ac(ac),
+ classification(classification), objCNoRet(ac.getASTContext()),
+ handler(handler) {}
void reportUse(const Expr *ex, const VarDecl *vd);
@@ -627,8 +650,7 @@ public:
// Scan the frontier, looking for blocks where the variable was
// uninitialized.
- for (CFG::const_iterator BI = cfg.begin(), BE = cfg.end(); BI != BE; ++BI) {
- const CFGBlock *Block = *BI;
+ for (const auto *Block : cfg) {
unsigned BlockID = Block->getBlockID();
const Stmt *Term = Block->getTerminator();
if (SuccsVisited[BlockID] && SuccsVisited[BlockID] < Block->succ_size() &&
@@ -668,7 +690,8 @@ public:
return Use;
}
};
-}
+
+} // namespace
void TransferFunctions::reportUse(const Expr *ex, const VarDecl *vd) {
Value v = vals[vd];
@@ -678,8 +701,8 @@ void TransferFunctions::reportUse(const Expr *ex, const VarDecl *vd) {
void TransferFunctions::VisitObjCForCollectionStmt(ObjCForCollectionStmt *FS) {
// This represents an initialization of the 'element' value.
- if (DeclStmt *DS = dyn_cast<DeclStmt>(FS->getElement())) {
- const VarDecl *VD = cast<VarDecl>(DS->getSingleDecl());
+ if (const auto *DS = dyn_cast<DeclStmt>(FS->getElement())) {
+ const auto *VD = cast<VarDecl>(DS->getSingleDecl());
if (isTrackedVar(VD))
vals[VD] = Initialized;
}
@@ -748,7 +771,7 @@ void TransferFunctions::VisitBinaryOperator(BinaryOperator *BO) {
void TransferFunctions::VisitDeclStmt(DeclStmt *DS) {
for (auto *DI : DS->decls()) {
- VarDecl *VD = dyn_cast<VarDecl>(DI);
+ auto *VD = dyn_cast<VarDecl>(DI);
if (VD && isTrackedVar(VD)) {
if (getSelfInitExpr(VD)) {
// If the initializer consists solely of a reference to itself, we
@@ -815,34 +838,32 @@ static bool runOnBlock(const CFGBlock *block, const CFG &cfg,
}
// Apply the transfer function.
TransferFunctions tf(vals, cfg, block, ac, classification, handler);
- for (CFGBlock::const_iterator I = block->begin(), E = block->end();
- I != E; ++I) {
- if (Optional<CFGStmt> cs = I->getAs<CFGStmt>())
- tf.Visit(const_cast<Stmt*>(cs->getStmt()));
+ for (const auto &I : *block) {
+ if (Optional<CFGStmt> cs = I.getAs<CFGStmt>())
+ tf.Visit(const_cast<Stmt *>(cs->getStmt()));
}
return vals.updateValueVectorWithScratch(block);
}
+namespace {
+
/// PruneBlocksHandler is a special UninitVariablesHandler that is used
/// to detect when a CFGBlock has any *potential* use of an uninitialized
/// variable. It is mainly used to prune out work during the final
/// reporting pass.
-namespace {
struct PruneBlocksHandler : public UninitVariablesHandler {
- PruneBlocksHandler(unsigned numBlocks)
- : hadUse(numBlocks, false), hadAnyUse(false),
- currentBlock(0) {}
-
- ~PruneBlocksHandler() override {}
-
/// Records if a CFGBlock had a potential use of an uninitialized variable.
llvm::BitVector hadUse;
/// Records if any CFGBlock had a potential use of an uninitialized variable.
- bool hadAnyUse;
+ bool hadAnyUse = false;
/// The current block to scribble use information.
- unsigned currentBlock;
+ unsigned currentBlock = 0;
+
+ PruneBlocksHandler(unsigned numBlocks) : hadUse(numBlocks, false) {}
+
+ ~PruneBlocksHandler() override = default;
void handleUseOfUninitVariable(const VarDecl *vd,
const UninitUse &use) override {
@@ -858,7 +879,8 @@ struct PruneBlocksHandler : public UninitVariablesHandler {
hadAnyUse = true;
}
};
-}
+
+} // namespace
void clang::runUninitializedVariablesAnalysis(
const DeclContext &dc,
@@ -881,7 +903,7 @@ void clang::runUninitializedVariablesAnalysis(
const CFGBlock &entry = cfg.getEntry();
ValueVector &vec = vals.getValueVector(&entry);
const unsigned n = vals.getNumEntries();
- for (unsigned j = 0; j < n ; ++j) {
+ for (unsigned j = 0; j < n; ++j) {
vec[j] = Uninitialized;
}
@@ -909,13 +931,11 @@ void clang::runUninitializedVariablesAnalysis(
return;
// Run through the blocks one more time, and report uninitialized variables.
- for (CFG::const_iterator BI = cfg.begin(), BE = cfg.end(); BI != BE; ++BI) {
- const CFGBlock *block = *BI;
+ for (const auto *block : cfg)
if (PBH.hadUse[block->getBlockID()]) {
runOnBlock(block, cfg, ac, vals, classification, wasAnalyzed, handler);
++stats.NumBlockVisits;
}
- }
}
-UninitVariablesHandler::~UninitVariablesHandler() {}
+UninitVariablesHandler::~UninitVariablesHandler() = default;
diff --git a/lib/Basic/Builtins.cpp b/lib/Basic/Builtins.cpp
index ed7f87c9b95c..a3210ba09068 100644
--- a/lib/Basic/Builtins.cpp
+++ b/lib/Basic/Builtins.cpp
@@ -107,6 +107,22 @@ void Builtin::Context::forgetBuiltin(unsigned ID, IdentifierTable &Table) {
Table.get(getRecord(ID).Name).setBuiltinID(0);
}
+unsigned Builtin::Context::getRequiredVectorWidth(unsigned ID) const {
+ const char *WidthPos = ::strchr(getRecord(ID).Attributes, 'V');
+ if (!WidthPos)
+ return 0;
+
+ ++WidthPos;
+ assert(*WidthPos == ':' &&
+ "Vector width specifier must be followed by a ':'");
+ ++WidthPos;
+
+ char *EndPos;
+ unsigned Width = ::strtol(WidthPos, &EndPos, 10);
+ assert(*EndPos == ':' && "Vector width specific must end with a ':'");
+ return Width;
+}
+
bool Builtin::Context::isLike(unsigned ID, unsigned &FormatIdx,
bool &HasVAListArg, const char *Fmt) const {
assert(Fmt && "Not passed a format string");
@@ -139,3 +155,10 @@ bool Builtin::Context::isScanfLike(unsigned ID, unsigned &FormatIdx,
bool &HasVAListArg) {
return isLike(ID, FormatIdx, HasVAListArg, "sS");
}
+
+bool Builtin::Context::canBeRedeclared(unsigned ID) const {
+ return ID == Builtin::NotBuiltin ||
+ ID == Builtin::BI__va_start ||
+ (!hasReferenceArgsOrResult(ID) &&
+ !hasCustomTypechecking(ID));
+}
diff --git a/lib/Basic/CMakeLists.txt b/lib/Basic/CMakeLists.txt
index d0c9b902f67e..e82f451dea17 100644
--- a/lib/Basic/CMakeLists.txt
+++ b/lib/Basic/CMakeLists.txt
@@ -83,6 +83,7 @@ add_clang_library(clangBasic
Targets/OSTargets.cpp
Targets/PNaCl.cpp
Targets/PPC.cpp
+ Targets/RISCV.cpp
Targets/SPIR.cpp
Targets/Sparc.cpp
Targets/SystemZ.cpp
@@ -92,9 +93,9 @@ add_clang_library(clangBasic
Targets/XCore.cpp
TokenKinds.cpp
Version.cpp
- VersionTuple.cpp
VirtualFileSystem.cpp
Warnings.cpp
+ XRayInstr.cpp
XRayLists.cpp
${version_inc}
)
diff --git a/lib/Basic/Cuda.cpp b/lib/Basic/Cuda.cpp
index 58b99a3b58cb..dc7e61c02b24 100644
--- a/lib/Basic/Cuda.cpp
+++ b/lib/Basic/Cuda.cpp
@@ -18,12 +18,18 @@ const char *CudaVersionToString(CudaVersion V) {
return "8.0";
case CudaVersion::CUDA_90:
return "9.0";
+ case CudaVersion::CUDA_91:
+ return "9.1";
+ case CudaVersion::CUDA_92:
+ return "9.2";
}
llvm_unreachable("invalid enum");
}
const char *CudaArchToString(CudaArch A) {
switch (A) {
+ case CudaArch::LAST:
+ break;
case CudaArch::UNKNOWN:
return "unknown";
case CudaArch::SM_20:
@@ -52,6 +58,34 @@ const char *CudaArchToString(CudaArch A) {
return "sm_62";
case CudaArch::SM_70:
return "sm_70";
+ case CudaArch::SM_72:
+ return "sm_72";
+ case CudaArch::GFX600: // tahiti
+ return "gfx600";
+ case CudaArch::GFX601: // pitcairn, verde, oland,hainan
+ return "gfx601";
+ case CudaArch::GFX700: // kaveri
+ return "gfx700";
+ case CudaArch::GFX701: // hawaii
+ return "gfx701";
+ case CudaArch::GFX702: // 290,290x,R390,R390x
+ return "gfx702";
+ case CudaArch::GFX703: // kabini mullins
+ return "gfx703";
+ case CudaArch::GFX704: // bonaire
+ return "gfx704";
+ case CudaArch::GFX801: // carrizo
+ return "gfx801";
+ case CudaArch::GFX802: // tonga,iceland
+ return "gfx802";
+ case CudaArch::GFX803: // fiji,polaris10
+ return "gfx803";
+ case CudaArch::GFX810: // stoney
+ return "gfx810";
+ case CudaArch::GFX900: // vega, instinct
+ return "gfx900";
+ case CudaArch::GFX902: // TBA
+ return "gfx902";
}
llvm_unreachable("invalid enum");
}
@@ -71,6 +105,20 @@ CudaArch StringToCudaArch(llvm::StringRef S) {
.Case("sm_61", CudaArch::SM_61)
.Case("sm_62", CudaArch::SM_62)
.Case("sm_70", CudaArch::SM_70)
+ .Case("sm_72", CudaArch::SM_72)
+ .Case("gfx600", CudaArch::GFX600)
+ .Case("gfx601", CudaArch::GFX601)
+ .Case("gfx700", CudaArch::GFX700)
+ .Case("gfx701", CudaArch::GFX701)
+ .Case("gfx702", CudaArch::GFX702)
+ .Case("gfx703", CudaArch::GFX703)
+ .Case("gfx704", CudaArch::GFX704)
+ .Case("gfx801", CudaArch::GFX801)
+ .Case("gfx802", CudaArch::GFX802)
+ .Case("gfx803", CudaArch::GFX803)
+ .Case("gfx810", CudaArch::GFX810)
+ .Case("gfx900", CudaArch::GFX900)
+ .Case("gfx902", CudaArch::GFX902)
.Default(CudaArch::UNKNOWN);
}
@@ -102,6 +150,10 @@ const char *CudaVirtualArchToString(CudaVirtualArch A) {
return "compute_62";
case CudaVirtualArch::COMPUTE_70:
return "compute_70";
+ case CudaVirtualArch::COMPUTE_72:
+ return "compute_72";
+ case CudaVirtualArch::COMPUTE_AMDGCN:
+ return "compute_amdgcn";
}
llvm_unreachable("invalid enum");
}
@@ -120,11 +172,15 @@ CudaVirtualArch StringToCudaVirtualArch(llvm::StringRef S) {
.Case("compute_61", CudaVirtualArch::COMPUTE_61)
.Case("compute_62", CudaVirtualArch::COMPUTE_62)
.Case("compute_70", CudaVirtualArch::COMPUTE_70)
+ .Case("compute_72", CudaVirtualArch::COMPUTE_72)
+ .Case("compute_amdgcn", CudaVirtualArch::COMPUTE_AMDGCN)
.Default(CudaVirtualArch::UNKNOWN);
}
CudaVirtualArch VirtualArchForCudaArch(CudaArch A) {
switch (A) {
+ case CudaArch::LAST:
+ break;
case CudaArch::UNKNOWN:
return CudaVirtualArch::UNKNOWN;
case CudaArch::SM_20:
@@ -152,12 +208,30 @@ CudaVirtualArch VirtualArchForCudaArch(CudaArch A) {
return CudaVirtualArch::COMPUTE_62;
case CudaArch::SM_70:
return CudaVirtualArch::COMPUTE_70;
+ case CudaArch::SM_72:
+ return CudaVirtualArch::COMPUTE_72;
+ case CudaArch::GFX600:
+ case CudaArch::GFX601:
+ case CudaArch::GFX700:
+ case CudaArch::GFX701:
+ case CudaArch::GFX702:
+ case CudaArch::GFX703:
+ case CudaArch::GFX704:
+ case CudaArch::GFX801:
+ case CudaArch::GFX802:
+ case CudaArch::GFX803:
+ case CudaArch::GFX810:
+ case CudaArch::GFX900:
+ case CudaArch::GFX902:
+ return CudaVirtualArch::COMPUTE_AMDGCN;
}
llvm_unreachable("invalid enum");
}
CudaVersion MinVersionForCudaArch(CudaArch A) {
switch (A) {
+ case CudaArch::LAST:
+ break;
case CudaArch::UNKNOWN:
return CudaVersion::UNKNOWN;
case CudaArch::SM_20:
@@ -176,6 +250,22 @@ CudaVersion MinVersionForCudaArch(CudaArch A) {
return CudaVersion::CUDA_80;
case CudaArch::SM_70:
return CudaVersion::CUDA_90;
+ case CudaArch::SM_72:
+ return CudaVersion::CUDA_91;
+ case CudaArch::GFX600:
+ case CudaArch::GFX601:
+ case CudaArch::GFX700:
+ case CudaArch::GFX701:
+ case CudaArch::GFX702:
+ case CudaArch::GFX703:
+ case CudaArch::GFX704:
+ case CudaArch::GFX801:
+ case CudaArch::GFX802:
+ case CudaArch::GFX803:
+ case CudaArch::GFX810:
+ case CudaArch::GFX900:
+ case CudaArch::GFX902:
+ return CudaVersion::CUDA_70;
}
llvm_unreachable("invalid enum");
}
@@ -186,6 +276,19 @@ CudaVersion MaxVersionForCudaArch(CudaArch A) {
return CudaVersion::UNKNOWN;
case CudaArch::SM_20:
case CudaArch::SM_21:
+ case CudaArch::GFX600:
+ case CudaArch::GFX601:
+ case CudaArch::GFX700:
+ case CudaArch::GFX701:
+ case CudaArch::GFX702:
+ case CudaArch::GFX703:
+ case CudaArch::GFX704:
+ case CudaArch::GFX801:
+ case CudaArch::GFX802:
+ case CudaArch::GFX803:
+ case CudaArch::GFX810:
+ case CudaArch::GFX900:
+ case CudaArch::GFX902:
return CudaVersion::CUDA_80;
default:
return CudaVersion::LATEST;
diff --git a/lib/Basic/Diagnostic.cpp b/lib/Basic/Diagnostic.cpp
index 26baa838f8c6..519e835e32a2 100644
--- a/lib/Basic/Diagnostic.cpp
+++ b/lib/Basic/Diagnostic.cpp
@@ -1,4 +1,4 @@
-//===--- Diagnostic.cpp - C Language Family Diagnostic Handling -----------===//
+//===- Diagnostic.cpp - C Language Family Diagnostic Handling -------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -14,15 +14,30 @@
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/CharInfo.h"
#include "clang/Basic/DiagnosticError.h"
+#include "clang/Basic/DiagnosticIDs.h"
#include "clang/Basic/DiagnosticOptions.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/PartialDiagnostic.h"
+#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/Specifiers.h"
+#include "clang/Basic/TokenKinds.h"
#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/Support/CrashRecoveryContext.h"
#include "llvm/Support/Locale.h"
#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <cstring>
+#include <limits>
+#include <string>
+#include <utility>
+#include <vector>
using namespace clang;
@@ -57,27 +72,13 @@ static void DummyArgToStringFn(DiagnosticsEngine::ArgumentKind AK, intptr_t QT,
Output.append(Str.begin(), Str.end());
}
-DiagnosticsEngine::DiagnosticsEngine(IntrusiveRefCntPtr<DiagnosticIDs> diags,
- DiagnosticOptions *DiagOpts,
- DiagnosticConsumer *client,
- bool ShouldOwnClient)
- : Diags(std::move(diags)), DiagOpts(DiagOpts), Client(nullptr),
- SourceMgr(nullptr) {
+DiagnosticsEngine::DiagnosticsEngine(
+ IntrusiveRefCntPtr<DiagnosticIDs> diags,
+ IntrusiveRefCntPtr<DiagnosticOptions> DiagOpts, DiagnosticConsumer *client,
+ bool ShouldOwnClient)
+ : Diags(std::move(diags)), DiagOpts(std::move(DiagOpts)) {
setClient(client, ShouldOwnClient);
ArgToStringFn = DummyArgToStringFn;
- ArgToStringCookie = nullptr;
-
- AllExtensionsSilenced = 0;
- SuppressAfterFatalError = true;
- SuppressAllDiagnostics = false;
- ElideType = true;
- PrintTemplateTree = false;
- ShowColors = false;
- ShowOverloads = Ovl_All;
-
- ErrorLimit = 0;
- TemplateBacktraceLimit = 0;
- ConstexprBacktraceLimit = 0;
Reset();
}
@@ -121,7 +122,7 @@ void DiagnosticsEngine::Reset() {
TrapNumErrorsOccurred = 0;
TrapNumUnrecoverableErrorsOccurred = 0;
- CurDiagID = ~0U;
+ CurDiagID = std::numeric_limits<unsigned>::max();
LastDiagLevel = DiagnosticIDs::Ignored;
DelayedDiagID = 0;
@@ -152,8 +153,7 @@ void DiagnosticsEngine::ReportDelayed() {
Report(ID) << DelayedDiagArg1 << DelayedDiagArg2;
}
-void DiagnosticsEngine::DiagStateMap::appendFirst(
- DiagState *State) {
+void DiagnosticsEngine::DiagStateMap::appendFirst(DiagState *State) {
assert(Files.empty() && "not first");
FirstDiagState = CurDiagState = State;
CurDiagStateLoc = SourceLocation();
@@ -236,6 +236,96 @@ DiagnosticsEngine::DiagStateMap::getFile(SourceManager &SrcMgr,
return &F;
}
+void DiagnosticsEngine::DiagStateMap::dump(SourceManager &SrcMgr,
+ StringRef DiagName) const {
+ llvm::errs() << "diagnostic state at ";
+ CurDiagStateLoc.dump(SrcMgr);
+ llvm::errs() << ": " << CurDiagState << "\n";
+
+ for (auto &F : Files) {
+ FileID ID = F.first;
+ File &File = F.second;
+
+ bool PrintedOuterHeading = false;
+ auto PrintOuterHeading = [&] {
+ if (PrintedOuterHeading) return;
+ PrintedOuterHeading = true;
+
+ llvm::errs() << "File " << &File << " <FileID " << ID.getHashValue()
+ << ">: " << SrcMgr.getBuffer(ID)->getBufferIdentifier();
+ if (F.second.Parent) {
+ std::pair<FileID, unsigned> Decomp =
+ SrcMgr.getDecomposedIncludedLoc(ID);
+ assert(File.ParentOffset == Decomp.second);
+ llvm::errs() << " parent " << File.Parent << " <FileID "
+ << Decomp.first.getHashValue() << "> ";
+ SrcMgr.getLocForStartOfFile(Decomp.first)
+ .getLocWithOffset(Decomp.second)
+ .dump(SrcMgr);
+ }
+ if (File.HasLocalTransitions)
+ llvm::errs() << " has_local_transitions";
+ llvm::errs() << "\n";
+ };
+
+ if (DiagName.empty())
+ PrintOuterHeading();
+
+ for (DiagStatePoint &Transition : File.StateTransitions) {
+ bool PrintedInnerHeading = false;
+ auto PrintInnerHeading = [&] {
+ if (PrintedInnerHeading) return;
+ PrintedInnerHeading = true;
+
+ PrintOuterHeading();
+ llvm::errs() << " ";
+ SrcMgr.getLocForStartOfFile(ID)
+ .getLocWithOffset(Transition.Offset)
+ .dump(SrcMgr);
+ llvm::errs() << ": state " << Transition.State << ":\n";
+ };
+
+ if (DiagName.empty())
+ PrintInnerHeading();
+
+ for (auto &Mapping : *Transition.State) {
+ StringRef Option =
+ DiagnosticIDs::getWarningOptionForDiag(Mapping.first);
+ if (!DiagName.empty() && DiagName != Option)
+ continue;
+
+ PrintInnerHeading();
+ llvm::errs() << " ";
+ if (Option.empty())
+ llvm::errs() << "<unknown " << Mapping.first << ">";
+ else
+ llvm::errs() << Option;
+ llvm::errs() << ": ";
+
+ switch (Mapping.second.getSeverity()) {
+ case diag::Severity::Ignored: llvm::errs() << "ignored"; break;
+ case diag::Severity::Remark: llvm::errs() << "remark"; break;
+ case diag::Severity::Warning: llvm::errs() << "warning"; break;
+ case diag::Severity::Error: llvm::errs() << "error"; break;
+ case diag::Severity::Fatal: llvm::errs() << "fatal"; break;
+ }
+
+ if (!Mapping.second.isUser())
+ llvm::errs() << " default";
+ if (Mapping.second.isPragma())
+ llvm::errs() << " pragma";
+ if (Mapping.second.hasNoWarningAsError())
+ llvm::errs() << " no-error";
+ if (Mapping.second.hasNoErrorAsFatal())
+ llvm::errs() << " no-fatal";
+ if (Mapping.second.wasUpgradedFromWarning())
+ llvm::errs() << " overruled";
+ llvm::errs() << "\n";
+ }
+ }
+ }
+}
+
void DiagnosticsEngine::PushDiagStatePoint(DiagState *State,
SourceLocation Loc) {
assert(Loc.isValid() && "Adding invalid loc point");
@@ -373,7 +463,8 @@ void DiagnosticsEngine::setSeverityForAll(diag::Flavor Flavor,
}
void DiagnosticsEngine::Report(const StoredDiagnostic &storedDiag) {
- assert(CurDiagID == ~0U && "Multiple diagnostics in flight at once!");
+ assert(CurDiagID == std::numeric_limits<unsigned>::max() &&
+ "Multiple diagnostics in flight at once!");
CurDiagLoc = storedDiag.getLocation();
CurDiagID = storedDiag.getID();
@@ -394,7 +485,7 @@ void DiagnosticsEngine::Report(const StoredDiagnostic &storedDiag) {
++NumWarnings;
}
- CurDiagID = ~0U;
+ CurDiagID = std::numeric_limits<unsigned>::max();
}
bool DiagnosticsEngine::EmitCurrentDiagnostic(bool Force) {
@@ -429,8 +520,7 @@ bool DiagnosticsEngine::EmitCurrentDiagnostic(bool Force) {
return Emitted;
}
-
-DiagnosticConsumer::~DiagnosticConsumer() {}
+DiagnosticConsumer::~DiagnosticConsumer() = default;
void DiagnosticConsumer::HandleDiagnostic(DiagnosticsEngine::Level DiagLevel,
const Diagnostic &Info) {
@@ -447,7 +537,7 @@ void DiagnosticConsumer::HandleDiagnostic(DiagnosticsEngine::Level DiagLevel,
template <std::size_t StrLen>
static bool ModifierIs(const char *Modifier, unsigned ModifierLen,
const char (&Str)[StrLen]) {
- return StrLen-1 == ModifierLen && !memcmp(Modifier, Str, StrLen-1);
+ return StrLen-1 == ModifierLen && memcmp(Modifier, Str, StrLen-1) == 0;
}
/// ScanForward - Scans forward, looking for the given character, skipping
@@ -527,7 +617,6 @@ static void HandleOrdinalModifier(unsigned ValNo,
Out << ValNo << llvm::getOrdinalSuffix(ValNo);
}
-
/// PluralNumber - Parse an unsigned integer and advance Start.
static unsigned PluralNumber(const char *&Start, const char *End) {
// Programming 101: Parse a decimal number :-)
@@ -563,7 +652,7 @@ static bool EvalPluralExpr(unsigned ValNo, const char *Start, const char *End) {
if (*Start == ':')
return true;
- while (1) {
+ while (true) {
char C = *Start;
if (C == '%') {
// Modulo expression
@@ -628,7 +717,7 @@ static void HandlePluralModifier(const Diagnostic &DInfo, unsigned ValNo,
const char *Argument, unsigned ArgumentLen,
SmallVectorImpl<char> &OutStr) {
const char *ArgumentEnd = Argument + ArgumentLen;
- while (1) {
+ while (true) {
assert(Argument < ArgumentEnd && "Plural expression didn't match.");
const char *ExprEnd = Argument;
while (*ExprEnd != ':') {
@@ -648,7 +737,7 @@ static void HandlePluralModifier(const Diagnostic &DInfo, unsigned ValNo,
}
}
-/// \brief Returns the friendly description for a token kind that will appear
+/// Returns the friendly description for a token kind that will appear
/// without quotes in diagnostic messages. These strings may be translatable in
/// future.
static const char *getTokenDescForDiagnostic(tok::TokenKind Kind) {
@@ -679,7 +768,6 @@ FormatDiagnostic(SmallVectorImpl<char> &OutStr) const {
void Diagnostic::
FormatDiagnostic(const char *DiagStr, const char *DiagEnd,
SmallVectorImpl<char> &OutStr) const {
-
// When the diagnostic string is only "%0", the entire string is being given
// by an outside source. Remove unprintable characters from this string
// and skip all the other string processing.
@@ -899,7 +987,7 @@ FormatDiagnostic(const char *DiagStr, const char *DiagEnd,
FormattedArgs,
OutStr, QualTypeVals);
break;
- case DiagnosticsEngine::ak_qualtype_pair:
+ case DiagnosticsEngine::ak_qualtype_pair: {
// Create a struct with all the info needed for printing.
TemplateDiffTypes TDT;
TDT.FromType = getRawArg(ArgNo);
@@ -967,6 +1055,7 @@ FormatDiagnostic(const char *DiagStr, const char *DiagEnd,
FormatDiagnostic(SecondDollar + 1, Pipe, OutStr);
break;
}
+ }
// Remember this argument info for subsequent formatting operations. Turn
// std::strings into a null terminated string to make it be the same case as
@@ -978,7 +1067,6 @@ FormatDiagnostic(const char *DiagStr, const char *DiagEnd,
else
FormattedArgs.push_back(std::make_pair(DiagnosticsEngine::ak_c_string,
(intptr_t)getArgStdStr(ArgNo).c_str()));
-
}
// Append the type tree to the end of the diagnostics.
@@ -987,12 +1075,11 @@ FormatDiagnostic(const char *DiagStr, const char *DiagEnd,
StoredDiagnostic::StoredDiagnostic(DiagnosticsEngine::Level Level, unsigned ID,
StringRef Message)
- : ID(ID), Level(Level), Loc(), Message(Message) { }
+ : ID(ID), Level(Level), Message(Message) {}
StoredDiagnostic::StoredDiagnostic(DiagnosticsEngine::Level Level,
const Diagnostic &Info)
- : ID(Info.getID()), Level(Level)
-{
+ : ID(Info.getID()), Level(Level) {
assert((Info.getLocation().isInvalid() || Info.hasSourceManager()) &&
"Valid source location without setting a source manager for diagnostic");
if (Info.getLocation().isValid())
@@ -1008,8 +1095,8 @@ StoredDiagnostic::StoredDiagnostic(DiagnosticsEngine::Level Level, unsigned ID,
StringRef Message, FullSourceLoc Loc,
ArrayRef<CharSourceRange> Ranges,
ArrayRef<FixItHint> FixIts)
- : ID(ID), Level(Level), Loc(Loc), Message(Message),
- Ranges(Ranges.begin(), Ranges.end()), FixIts(FixIts.begin(), FixIts.end())
+ : ID(ID), Level(Level), Loc(Loc), Message(Message),
+ Ranges(Ranges.begin(), Ranges.end()), FixIts(FixIts.begin(), FixIts.end())
{
}
@@ -1019,9 +1106,9 @@ StoredDiagnostic::StoredDiagnostic(DiagnosticsEngine::Level Level, unsigned ID,
/// reported by DiagnosticsEngine.
bool DiagnosticConsumer::IncludeInDiagnosticCounts() const { return true; }
-void IgnoringDiagConsumer::anchor() { }
+void IgnoringDiagConsumer::anchor() {}
-ForwardingDiagnosticConsumer::~ForwardingDiagnosticConsumer() {}
+ForwardingDiagnosticConsumer::~ForwardingDiagnosticConsumer() = default;
void ForwardingDiagnosticConsumer::HandleDiagnostic(
DiagnosticsEngine::Level DiagLevel,
diff --git a/lib/Basic/DiagnosticIDs.cpp b/lib/Basic/DiagnosticIDs.cpp
index c4c425d9eb1d..697de68a5afb 100644
--- a/lib/Basic/DiagnosticIDs.cpp
+++ b/lib/Basic/DiagnosticIDs.cpp
@@ -340,7 +340,7 @@ bool DiagnosticIDs::isBuiltinWarningOrExtension(unsigned DiagID) {
getBuiltinDiagClass(DiagID) != CLASS_ERROR;
}
-/// \brief Determine whether the given built-in diagnostic ID is a
+/// Determine whether the given built-in diagnostic ID is a
/// Note.
bool DiagnosticIDs::isBuiltinNote(unsigned DiagID) {
return DiagID < diag::DIAG_UPPER_LIMIT &&
@@ -412,7 +412,7 @@ DiagnosticIDs::getDiagnosticLevel(unsigned DiagID, SourceLocation Loc,
return toLevel(getDiagnosticSeverity(DiagID, Loc, Diag));
}
-/// \brief Based on the way the client configured the Diagnostic
+/// Based on the way the client configured the Diagnostic
/// object, classify the specified diagnostic ID into a Level, consumable by
/// the DiagnosticClient.
///
@@ -470,7 +470,7 @@ DiagnosticIDs::getDiagnosticSeverity(unsigned DiagID, SourceLocation Loc,
Result = diag::Severity::Error;
}
- // If -Wfatal-errors is enabled, map errors to fatal unless explicity
+ // If -Wfatal-errors is enabled, map errors to fatal unless explicitly
// disabled.
if (Result == diag::Severity::Error) {
if (State->ErrorsAsFatal && !Mapping.hasNoErrorAsFatal())
diff --git a/lib/Basic/DiagnosticOptions.cpp b/lib/Basic/DiagnosticOptions.cpp
index 93c2196ca979..ebd9bb45f380 100644
--- a/lib/Basic/DiagnosticOptions.cpp
+++ b/lib/Basic/DiagnosticOptions.cpp
@@ -1,4 +1,4 @@
-//===--- DiagnosticOptions.cpp - C Language Family Diagnostic Handling ----===//
+//===- DiagnosticOptions.cpp - C Language Family Diagnostic Handling ------===//
//
// The LLVM Compiler Infrastructure
//
@@ -13,6 +13,7 @@
#include "clang/Basic/DiagnosticOptions.h"
#include "llvm/Support/raw_ostream.h"
+#include <type_traits>
namespace clang {
@@ -21,4 +22,4 @@ raw_ostream &operator<<(raw_ostream &Out, DiagnosticLevelMask M) {
return Out << static_cast<UT>(M);
}
-} // end namespace clang
+} // namespace clang
diff --git a/lib/Basic/FileManager.cpp b/lib/Basic/FileManager.cpp
index a3e226d6cc96..7e2d01c4981d 100644
--- a/lib/Basic/FileManager.cpp
+++ b/lib/Basic/FileManager.cpp
@@ -102,7 +102,7 @@ void FileManager::clearStatCaches() {
StatCache.reset();
}
-/// \brief Retrieve the directory that the given file name resides in.
+/// Retrieve the directory that the given file name resides in.
/// Filename can point to either a real file or a virtual file.
static const DirectoryEntry *getDirectoryFromFile(FileManager &FileMgr,
StringRef Filename,
@@ -157,7 +157,7 @@ const DirectoryEntry *FileManager::getDirectory(StringRef DirName,
DirName != llvm::sys::path::root_path(DirName) &&
llvm::sys::path::is_separator(DirName.back()))
DirName = DirName.substr(0, DirName.size()-1);
-#ifdef LLVM_ON_WIN32
+#ifdef _WIN32
// Fixing a problem with "clang C:test.c" on Windows.
// Stat("C:") does not recognize "C:" as a valid directory
std::string DirNameStr;
@@ -450,13 +450,13 @@ FileManager::getBufferForFile(const FileEntry *Entry, bool isVolatile,
}
llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>>
-FileManager::getBufferForFile(StringRef Filename) {
+FileManager::getBufferForFile(StringRef Filename, bool isVolatile) {
if (FileSystemOpts.WorkingDir.empty())
- return FS->getBufferForFile(Filename);
+ return FS->getBufferForFile(Filename, -1, true, isVolatile);
SmallString<128> FilePath(Filename);
FixupRelativePath(FilePath);
- return FS->getBufferForFile(FilePath.c_str());
+ return FS->getBufferForFile(FilePath.c_str(), -1, true, isVolatile);
}
/// getStatValue - Get the 'stat' information for the specified path,
@@ -534,23 +534,9 @@ StringRef FileManager::getCanonicalName(const DirectoryEntry *Dir) {
StringRef CanonicalName(Dir->getName());
-#ifdef LLVM_ON_UNIX
- char CanonicalNameBuf[PATH_MAX];
- if (realpath(Dir->getName().str().c_str(), CanonicalNameBuf))
+ SmallString<4096> CanonicalNameBuf;
+ if (!FS->getRealPath(Dir->getName(), CanonicalNameBuf))
CanonicalName = StringRef(CanonicalNameBuf).copy(CanonicalNameStorage);
-#else
- SmallString<256> CanonicalNameBuf(CanonicalName);
- llvm::sys::fs::make_absolute(CanonicalNameBuf);
- llvm::sys::path::native(CanonicalNameBuf);
- // We've run into needing to remove '..' here in the wild though, so
- // remove it.
- // On Windows, symlinks are significantly less prevalent, so removing
- // '..' is pretty safe.
- // Ideally we'd have an equivalent of `realpath` and could implement
- // sys::fs::canonical across all the platforms.
- llvm::sys::path::remove_dots(CanonicalNameBuf, /* remove_dot_dot */ true);
- CanonicalName = StringRef(CanonicalNameBuf).copy(CanonicalNameStorage);
-#endif
CanonicalDirNames.insert(std::make_pair(Dir, CanonicalName));
return CanonicalName;
diff --git a/lib/Basic/FileSystemStatCache.cpp b/lib/Basic/FileSystemStatCache.cpp
index 799df1d3c3a6..ebee32670e0a 100644
--- a/lib/Basic/FileSystemStatCache.cpp
+++ b/lib/Basic/FileSystemStatCache.cpp
@@ -1,4 +1,4 @@
-//===--- FileSystemStatCache.cpp - Caching for 'stat' calls ---------------===//
+//===- FileSystemStatCache.cpp - Caching for 'stat' calls -----------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -13,11 +13,14 @@
#include "clang/Basic/FileSystemStatCache.h"
#include "clang/Basic/VirtualFileSystem.h"
+#include "llvm/Support/Chrono.h"
+#include "llvm/Support/ErrorOr.h"
#include "llvm/Support/Path.h"
+#include <utility>
using namespace clang;
-void FileSystemStatCache::anchor() { }
+void FileSystemStatCache::anchor() {}
static void copyStatusToFileData(const vfs::Status &Status,
FileData &Data) {
diff --git a/lib/Basic/IdentifierTable.cpp b/lib/Basic/IdentifierTable.cpp
index 2bed531ae3d7..2fef481ae2c5 100644
--- a/lib/Basic/IdentifierTable.cpp
+++ b/lib/Basic/IdentifierTable.cpp
@@ -65,7 +65,7 @@ IdentifierInfoLookup::~IdentifierInfoLookup() = default;
namespace {
-/// \brief A simple identifier lookup iterator that represents an
+/// A simple identifier lookup iterator that represents an
/// empty sequence of identifiers.
class EmptyLookupIterator : public IdentifierIterator
{
@@ -79,16 +79,16 @@ IdentifierIterator *IdentifierInfoLookup::getIdentifiers() {
return new EmptyLookupIterator();
}
+IdentifierTable::IdentifierTable(IdentifierInfoLookup *ExternalLookup)
+ : HashTable(8192), // Start with space for 8K identifiers.
+ ExternalLookup(ExternalLookup) {}
+
IdentifierTable::IdentifierTable(const LangOptions &LangOpts,
- IdentifierInfoLookup* externalLookup)
- : HashTable(8192), // Start with space for 8K identifiers.
- ExternalLookup(externalLookup) {
+ IdentifierInfoLookup *ExternalLookup)
+ : IdentifierTable(ExternalLookup) {
// Populate the identifier table with info about keywords for the current
// language.
AddKeywords(LangOpts);
-
- // Add the '_experimental_modules_import' contextual keyword.
- get("import").setModulesImport(true);
}
//===----------------------------------------------------------------------===//
@@ -108,25 +108,27 @@ namespace {
KEYALTIVEC = 0x40,
KEYNOCXX = 0x80,
KEYBORLAND = 0x100,
- KEYOPENCL = 0x200,
+ KEYOPENCLC = 0x200,
KEYC11 = 0x400,
KEYARC = 0x800,
KEYNOMS18 = 0x01000,
KEYNOOPENCL = 0x02000,
WCHARSUPPORT = 0x04000,
HALFSUPPORT = 0x08000,
- KEYCONCEPTS = 0x10000,
- KEYOBJC2 = 0x20000,
- KEYZVECTOR = 0x40000,
- KEYCOROUTINES = 0x80000,
- KEYMODULES = 0x100000,
- KEYCXX2A = 0x200000,
+ CHAR8SUPPORT = 0x10000,
+ KEYCONCEPTS = 0x20000,
+ KEYOBJC2 = 0x40000,
+ KEYZVECTOR = 0x80000,
+ KEYCOROUTINES = 0x100000,
+ KEYMODULES = 0x200000,
+ KEYCXX2A = 0x400000,
+ KEYOPENCLCXX = 0x800000,
KEYALLCXX = KEYCXX | KEYCXX11 | KEYCXX2A,
- KEYALL = (0x3fffff & ~KEYNOMS18 &
+ KEYALL = (0xffffff & ~KEYNOMS18 &
~KEYNOOPENCL) // KEYNOMS18 and KEYNOOPENCL are used to exclude.
};
- /// \brief How a keyword is treated in the selected standard.
+ /// How a keyword is treated in the selected standard.
enum KeywordStatus {
KS_Disabled, // Disabled
KS_Extension, // Is an extension
@@ -136,7 +138,7 @@ namespace {
} // namespace
-/// \brief Translates flags as specified in TokenKinds.def into keyword status
+/// Translates flags as specified in TokenKinds.def into keyword status
/// in the given language standard.
static KeywordStatus getKeywordStatus(const LangOptions &LangOpts,
unsigned Flags) {
@@ -151,8 +153,11 @@ static KeywordStatus getKeywordStatus(const LangOptions &LangOpts,
if (LangOpts.Bool && (Flags & BOOLSUPPORT)) return KS_Enabled;
if (LangOpts.Half && (Flags & HALFSUPPORT)) return KS_Enabled;
if (LangOpts.WChar && (Flags & WCHARSUPPORT)) return KS_Enabled;
+ if (LangOpts.Char8 && (Flags & CHAR8SUPPORT)) return KS_Enabled;
if (LangOpts.AltiVec && (Flags & KEYALTIVEC)) return KS_Enabled;
- if (LangOpts.OpenCL && (Flags & KEYOPENCL)) return KS_Enabled;
+ if (LangOpts.OpenCL && !LangOpts.OpenCLCPlusPlus && (Flags & KEYOPENCLC))
+ return KS_Enabled;
+ if (LangOpts.OpenCLCPlusPlus && (Flags & KEYOPENCLCXX)) return KS_Enabled;
if (!LangOpts.CPlusPlus && (Flags & KEYNOCXX)) return KS_Enabled;
if (LangOpts.C11 && (Flags & KEYC11)) return KS_Enabled;
// We treat bridge casts as objective-C keywords so we can warn on them
@@ -237,9 +242,12 @@ void IdentifierTable::AddKeywords(const LangOptions &LangOpts) {
if (LangOpts.DeclSpecKeyword)
AddKeyword("__declspec", tok::kw___declspec, KEYALL, LangOpts, *this);
+
+ // Add the '_experimental_modules_import' contextual keyword.
+ get("import").setModulesImport(true);
}
-/// \brief Checks if the specified token kind represents a keyword in the
+/// Checks if the specified token kind represents a keyword in the
/// specified language.
/// \returns Status of the keyword in the language.
static KeywordStatus getTokenKwStatus(const LangOptions &LangOpts,
@@ -252,7 +260,7 @@ static KeywordStatus getTokenKwStatus(const LangOptions &LangOpts,
}
}
-/// \brief Returns true if the identifier represents a keyword in the
+/// Returns true if the identifier represents a keyword in the
/// specified language.
bool IdentifierInfo::isKeyword(const LangOptions &LangOpts) const {
switch (getTokenKwStatus(LangOpts, getTokenID())) {
@@ -264,7 +272,7 @@ bool IdentifierInfo::isKeyword(const LangOptions &LangOpts) const {
}
}
-/// \brief Returns true if the identifier represents a C++ keyword in the
+/// Returns true if the identifier represents a C++ keyword in the
/// specified language.
bool IdentifierInfo::isCPlusPlusKeyword(const LangOptions &LangOpts) const {
if (!LangOpts.CPlusPlus || !isKeyword(LangOpts))
@@ -496,6 +504,8 @@ void Selector::print(llvm::raw_ostream &OS) const {
OS << getAsString();
}
+LLVM_DUMP_METHOD void Selector::dump() const { print(llvm::errs()); }
+
/// Interpreting the given string using the normal CamelCase
/// conventions, determine whether the given string starts with the
/// given "word", which is assumed to end in a lowercase letter.
@@ -637,6 +647,12 @@ SelectorTable::constructSetterSelector(IdentifierTable &Idents,
return SelTable.getUnarySelector(SetterName);
}
+std::string SelectorTable::getPropertyNameFromSetterSelector(Selector Sel) {
+ StringRef Name = Sel.getNameForSlot(0);
+ assert(Name.startswith("set") && "invalid setter name");
+ return (Twine(toLowercase(Name[3])) + Name.drop_front(4)).str();
+}
+
size_t SelectorTable::getTotalMemory() const {
SelectorTableImpl &SelTabImpl = getSelectorTableImpl(Impl);
return SelTabImpl.Allocator.getTotalMemory();
diff --git a/lib/Basic/LangOptions.cpp b/lib/Basic/LangOptions.cpp
index db81507aa209..763ba33683bc 100644
--- a/lib/Basic/LangOptions.cpp
+++ b/lib/Basic/LangOptions.cpp
@@ -1,4 +1,4 @@
-//===--- LangOptions.cpp - C Language Family Language Options ---*- C++ -*-===//
+//===- LangOptions.cpp - C Language Family Language Options ---------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -10,13 +10,12 @@
// This file defines the LangOptions class.
//
//===----------------------------------------------------------------------===//
+
#include "clang/Basic/LangOptions.h"
-#include "llvm/ADT/StringRef.h"
using namespace clang;
-LangOptions::LangOptions()
- : IsHeaderFile(false) {
+LangOptions::LangOptions() {
#define LANGOPT(Name, Bits, Default, Description) Name = Default;
#define ENUM_LANGOPT(Name, Type, Bits, Default, Description) set##Name(Default);
#include "clang/Basic/LangOptions.def"
@@ -44,3 +43,8 @@ bool LangOptions::isNoBuiltinFunc(StringRef FuncName) const {
return true;
return false;
}
+
+VersionTuple LangOptions::getOpenCLVersionTuple() const {
+ const int Ver = OpenCLCPlusPlus ? OpenCLCPlusPlusVersion : OpenCLVersion;
+ return VersionTuple(Ver / 100, (Ver % 100) / 10);
+}
diff --git a/lib/Basic/Module.cpp b/lib/Basic/Module.cpp
index 7124184865c6..2714b98120c0 100644
--- a/lib/Basic/Module.cpp
+++ b/lib/Basic/Module.cpp
@@ -44,7 +44,8 @@ Module::Module(StringRef Name, SourceLocation DefinitionLoc, Module *Parent,
IsSystem(false), IsExternC(false), IsInferred(false),
InferSubmodules(false), InferExplicitSubmodules(false),
InferExportWildcard(false), ConfigMacrosExhaustive(false),
- NoUndeclaredIncludes(false), NameVisibility(Hidden) {
+ NoUndeclaredIncludes(false), ModuleMapIsPrivate(false),
+ NameVisibility(Hidden) {
if (Parent) {
if (!Parent->isAvailable())
IsAvailable = false;
@@ -54,6 +55,8 @@ Module::Module(StringRef Name, SourceLocation DefinitionLoc, Module *Parent,
IsExternC = true;
if (Parent->NoUndeclaredIncludes)
NoUndeclaredIncludes = true;
+ if (Parent->ModuleMapIsPrivate)
+ ModuleMapIsPrivate = true;
IsMissingRequirement = Parent->IsMissingRequirement;
Parent->SubModuleIndex[Name] = Parent->SubModules.size();
@@ -68,7 +71,7 @@ Module::~Module() {
}
}
-/// \brief Determine whether a translation unit built using the current
+/// Determine whether a translation unit built using the current
/// language options has the given feature.
static bool hasFeature(StringRef Feature, const LangOptions &LangOpts,
const TargetInfo &Target) {
@@ -78,6 +81,11 @@ static bool hasFeature(StringRef Feature, const LangOptions &LangOpts,
.Case("coroutines", LangOpts.CoroutinesTS)
.Case("cplusplus", LangOpts.CPlusPlus)
.Case("cplusplus11", LangOpts.CPlusPlus11)
+ .Case("cplusplus14", LangOpts.CPlusPlus14)
+ .Case("cplusplus17", LangOpts.CPlusPlus17)
+ .Case("c99", LangOpts.C99)
+ .Case("c11", LangOpts.C11)
+ .Case("c17", LangOpts.C17)
.Case("freestanding", LangOpts.Freestanding)
.Case("gnuinlineasm", LangOpts.GNUAsm)
.Case("objc", LangOpts.ObjC1)
@@ -95,11 +103,16 @@ static bool hasFeature(StringRef Feature, const LangOptions &LangOpts,
bool Module::isAvailable(const LangOptions &LangOpts, const TargetInfo &Target,
Requirement &Req,
- UnresolvedHeaderDirective &MissingHeader) const {
+ UnresolvedHeaderDirective &MissingHeader,
+ Module *&ShadowingModule) const {
if (IsAvailable)
return true;
for (const Module *Current = this; Current; Current = Current->Parent) {
+ if (Current->ShadowingModule) {
+ ShadowingModule = Current->ShadowingModule;
+ return false;
+ }
for (unsigned I = 0, N = Current->Requirements.size(); I != N; ++I) {
if (hasFeature(Current->Requirements[I].first, LangOpts, Target) !=
Current->Requirements[I].second) {
diff --git a/lib/Basic/ObjCRuntime.cpp b/lib/Basic/ObjCRuntime.cpp
index 133c66945dde..8fa0afbe03f2 100644
--- a/lib/Basic/ObjCRuntime.cpp
+++ b/lib/Basic/ObjCRuntime.cpp
@@ -1,4 +1,4 @@
-//===- ObjCRuntime.cpp - Objective-C Runtime Handling -----------*- C++ -*-===//
+//===- ObjCRuntime.cpp - Objective-C Runtime Handling ---------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -11,8 +11,13 @@
// target Objective-C runtime.
//
//===----------------------------------------------------------------------===//
+
#include "clang/Basic/ObjCRuntime.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/VersionTuple.h"
#include "llvm/Support/raw_ostream.h"
+#include <cstddef>
+#include <string>
using namespace clang;
diff --git a/lib/Basic/OpenMPKinds.cpp b/lib/Basic/OpenMPKinds.cpp
index 09c919e2b072..67b7d91e6292 100644
--- a/lib/Basic/OpenMPKinds.cpp
+++ b/lib/Basic/OpenMPKinds.cpp
@@ -7,7 +7,7 @@
//
//===----------------------------------------------------------------------===//
/// \file
-/// \brief This file implements the OpenMP enum and support functions.
+/// This file implements the OpenMP enum and support functions.
///
//===----------------------------------------------------------------------===//
@@ -891,6 +891,7 @@ void clang::getOpenMPCaptureRegions(
case OMPD_target_teams:
case OMPD_target_teams_distribute:
case OMPD_target_teams_distribute_simd:
+ CaptureRegions.push_back(OMPD_task);
CaptureRegions.push_back(OMPD_target);
CaptureRegions.push_back(OMPD_teams);
break;
@@ -901,6 +902,7 @@ void clang::getOpenMPCaptureRegions(
break;
case OMPD_target:
case OMPD_target_simd:
+ CaptureRegions.push_back(OMPD_task);
CaptureRegions.push_back(OMPD_target);
break;
case OMPD_teams_distribute_parallel_for:
@@ -911,6 +913,7 @@ void clang::getOpenMPCaptureRegions(
case OMPD_target_parallel:
case OMPD_target_parallel_for:
case OMPD_target_parallel_for_simd:
+ CaptureRegions.push_back(OMPD_task);
CaptureRegions.push_back(OMPD_target);
CaptureRegions.push_back(OMPD_parallel);
break;
@@ -924,6 +927,13 @@ void clang::getOpenMPCaptureRegions(
case OMPD_taskloop_simd:
CaptureRegions.push_back(OMPD_taskloop);
break;
+ case OMPD_target_teams_distribute_parallel_for:
+ case OMPD_target_teams_distribute_parallel_for_simd:
+ CaptureRegions.push_back(OMPD_task);
+ CaptureRegions.push_back(OMPD_target);
+ CaptureRegions.push_back(OMPD_teams);
+ CaptureRegions.push_back(OMPD_parallel);
+ break;
case OMPD_simd:
case OMPD_for:
case OMPD_for_simd:
@@ -938,8 +948,6 @@ void clang::getOpenMPCaptureRegions(
case OMPD_atomic:
case OMPD_target_data:
case OMPD_distribute_simd:
- case OMPD_target_teams_distribute_parallel_for:
- case OMPD_target_teams_distribute_parallel_for_simd:
CaptureRegions.push_back(OMPD_unknown);
break;
case OMPD_threadprivate:
diff --git a/lib/Basic/OperatorPrecedence.cpp b/lib/Basic/OperatorPrecedence.cpp
index 3743b6ad5fef..bf805fc7deb1 100644
--- a/lib/Basic/OperatorPrecedence.cpp
+++ b/lib/Basic/OperatorPrecedence.cpp
@@ -8,7 +8,7 @@
//===----------------------------------------------------------------------===//
///
/// \file
-/// \brief Defines and computes precedence levels for binary/ternary operators.
+/// Defines and computes precedence levels for binary/ternary operators.
///
//===----------------------------------------------------------------------===//
#include "clang/Basic/OperatorPrecedence.h"
diff --git a/lib/Basic/Sanitizers.cpp b/lib/Basic/Sanitizers.cpp
index 91b6b2dc74eb..8faf17b8f22e 100644
--- a/lib/Basic/Sanitizers.cpp
+++ b/lib/Basic/Sanitizers.cpp
@@ -1,4 +1,4 @@
-//===--- Sanitizers.cpp - C Language Family Language Options ----*- C++ -*-===//
+//===- Sanitizers.cpp - C Language Family Language Options ----------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -10,9 +10,8 @@
// This file defines the classes from Sanitizers.h
//
//===----------------------------------------------------------------------===//
+
#include "clang/Basic/Sanitizers.h"
-#include "clang/Basic/LLVM.h"
-#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSwitch.h"
using namespace clang;
diff --git a/lib/Basic/SourceLocation.cpp b/lib/Basic/SourceLocation.cpp
index 89ddbc946a49..fef1f44fc8a5 100644
--- a/lib/Basic/SourceLocation.cpp
+++ b/lib/Basic/SourceLocation.cpp
@@ -1,4 +1,4 @@
-//==--- SourceLocation.cpp - Compact identifier for Source Files -*- C++ -*-==//
+//===- SourceLocation.cpp - Compact identifier for Source Files -----------===//
//
// The LLVM Compiler Infrastructure
//
@@ -12,10 +12,17 @@
//===----------------------------------------------------------------------===//
#include "clang/Basic/SourceLocation.h"
+#include "clang/Basic/LLVM.h"
#include "clang/Basic/PrettyStackTrace.h"
#include "clang/Basic/SourceManager.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/raw_ostream.h"
-#include <cstdio>
+#include <cassert>
+#include <string>
+#include <utility>
+
using namespace clang;
//===----------------------------------------------------------------------===//
@@ -81,7 +88,6 @@ FileID FullSourceLoc::getFileID() const {
return SrcMgr->getFileID(*this);
}
-
FullSourceLoc FullSourceLoc::getExpansionLoc() const {
assert(isValid());
return FullSourceLoc(SrcMgr->getExpansionLoc(*this), *SrcMgr);
@@ -97,15 +103,6 @@ FullSourceLoc FullSourceLoc::getFileLoc() const {
return FullSourceLoc(SrcMgr->getFileLoc(*this), *SrcMgr);
}
-std::pair<FullSourceLoc, FullSourceLoc>
-FullSourceLoc::getImmediateExpansionRange() const {
- assert(isValid());
- std::pair<SourceLocation, SourceLocation> Range =
- SrcMgr->getImmediateExpansionRange(*this);
- return std::make_pair(FullSourceLoc(Range.first, *SrcMgr),
- FullSourceLoc(Range.second, *SrcMgr));
-}
-
PresumedLoc FullSourceLoc::getPresumedLoc(bool UseLineDirectives) const {
if (!isValid())
return PresumedLoc();
@@ -148,15 +145,6 @@ unsigned FullSourceLoc::getColumnNumber(bool *Invalid) const {
return SrcMgr->getColumnNumber(getFileID(), getFileOffset(), Invalid);
}
-std::pair<FullSourceLoc, FullSourceLoc>
-FullSourceLoc::getExpansionRange() const {
- assert(isValid());
- std::pair<SourceLocation, SourceLocation> Range =
- SrcMgr->getExpansionRange(*this);
- return std::make_pair(FullSourceLoc(Range.first, *SrcMgr),
- FullSourceLoc(Range.second, *SrcMgr));
-}
-
const FileEntry *FullSourceLoc::getFileEntry() const {
assert(isValid());
return SrcMgr->getFileEntryForID(getFileID());
diff --git a/lib/Basic/SourceManager.cpp b/lib/Basic/SourceManager.cpp
index 0a51985614c8..ae76817826e1 100644
--- a/lib/Basic/SourceManager.cpp
+++ b/lib/Basic/SourceManager.cpp
@@ -170,8 +170,10 @@ llvm::MemoryBuffer *ContentCache::getBuffer(DiagnosticsEngine &Diag,
const char *InvalidBOM = llvm::StringSwitch<const char *>(BufStr)
.StartsWith("\xFE\xFF", "UTF-16 (BE)")
.StartsWith("\xFF\xFE", "UTF-16 (LE)")
- .StartsWith("\x00\x00\xFE\xFF", "UTF-32 (BE)")
- .StartsWith("\xFF\xFE\x00\x00", "UTF-32 (LE)")
+ .StartsWith(llvm::StringLiteral::withInnerNUL("\x00\x00\xFE\xFF"),
+ "UTF-32 (BE)")
+ .StartsWith(llvm::StringLiteral::withInnerNUL("\xFF\xFE\x00\x00"),
+ "UTF-32 (LE)")
.StartsWith("\x2B\x2F\x76", "UTF-7")
.StartsWith("\xF7\x64\x4C", "UTF-1")
.StartsWith("\xDD\x73\x66\x73", "UTF-EBCDIC")
@@ -258,7 +260,7 @@ const LineEntry *LineTableInfo::FindNearestLineEntry(FileID FID,
return &*--I;
}
-/// \brief Add a new line entry that has already been encoded into
+/// Add a new line entry that has already been encoded into
/// the internal representation of the line table.
void LineTableInfo::AddEntry(FileID FID,
const std::vector<LineEntry> &Entries) {
@@ -466,7 +468,7 @@ SourceManager::AllocateLoadedSLocEntries(unsigned NumSLocEntries,
return std::make_pair(-ID - 1, CurrentLoadedOffset);
}
-/// \brief As part of recovering from missing or changed content, produce a
+/// As part of recovering from missing or changed content, produce a
/// fake, non-empty buffer.
llvm::MemoryBuffer *SourceManager::getFakeBufferForRecovery() const {
if (!FakeBufferForRecovery)
@@ -476,7 +478,7 @@ llvm::MemoryBuffer *SourceManager::getFakeBufferForRecovery() const {
return FakeBufferForRecovery.get();
}
-/// \brief As part of recovering from missing or changed content, produce a
+/// As part of recovering from missing or changed content, produce a
/// fake content cache.
const SrcMgr::ContentCache *
SourceManager::getFakeContentCacheForRecovery() const {
@@ -488,7 +490,7 @@ SourceManager::getFakeContentCacheForRecovery() const {
return FakeContentCacheForRecovery.get();
}
-/// \brief Returns the previous in-order FileID or an invalid FileID if there
+/// Returns the previous in-order FileID or an invalid FileID if there
/// is no previous one.
FileID SourceManager::getPreviousFileID(FileID FID) const {
if (FID.isInvalid())
@@ -508,7 +510,7 @@ FileID SourceManager::getPreviousFileID(FileID FID) const {
return FileID::get(ID-1);
}
-/// \brief Returns the next in-order FileID or an invalid FileID if there is
+/// Returns the next in-order FileID or an invalid FileID if there is
/// no next one.
FileID SourceManager::getNextFileID(FileID FID) const {
if (FID.isInvalid())
@@ -577,13 +579,24 @@ SourceManager::createExpansionLoc(SourceLocation SpellingLoc,
SourceLocation ExpansionLocStart,
SourceLocation ExpansionLocEnd,
unsigned TokLength,
+ bool ExpansionIsTokenRange,
int LoadedID,
unsigned LoadedOffset) {
- ExpansionInfo Info = ExpansionInfo::create(SpellingLoc, ExpansionLocStart,
- ExpansionLocEnd);
+ ExpansionInfo Info = ExpansionInfo::create(
+ SpellingLoc, ExpansionLocStart, ExpansionLocEnd, ExpansionIsTokenRange);
return createExpansionLocImpl(Info, TokLength, LoadedID, LoadedOffset);
}
+SourceLocation SourceManager::createTokenSplitLoc(SourceLocation Spelling,
+ SourceLocation TokenStart,
+ SourceLocation TokenEnd) {
+ assert(getFileID(TokenStart) == getFileID(TokenEnd) &&
+ "token spans multiple files");
+ return createExpansionLocImpl(
+ ExpansionInfo::createForTokenSplit(Spelling, TokenStart, TokenEnd),
+ TokenEnd.getOffset() - TokenStart.getOffset());
+}
+
SourceLocation
SourceManager::createExpansionLocImpl(const ExpansionInfo &Info,
unsigned TokLength,
@@ -679,7 +692,7 @@ StringRef SourceManager::getBufferData(FileID FID, bool *Invalid) const {
// SourceLocation manipulation methods.
//===----------------------------------------------------------------------===//
-/// \brief Return the FileID for a SourceLocation.
+/// Return the FileID for a SourceLocation.
///
/// This is the cache-miss path of getFileID. Not as hot as that function, but
/// still very important. It is responsible for finding the entry in the
@@ -695,7 +708,7 @@ FileID SourceManager::getFileIDSlow(unsigned SLocOffset) const {
return getFileIDLoaded(SLocOffset);
}
-/// \brief Return the FileID for a SourceLocation with a low offset.
+/// Return the FileID for a SourceLocation with a low offset.
///
/// This function knows that the SourceLocation is in a local buffer, not a
/// loaded one.
@@ -786,7 +799,7 @@ FileID SourceManager::getFileIDLocal(unsigned SLocOffset) const {
}
}
-/// \brief Return the FileID for a SourceLocation with a high offset.
+/// Return the FileID for a SourceLocation with a high offset.
///
/// This function knows that the SourceLocation is in a loaded buffer, not a
/// local one.
@@ -893,7 +906,7 @@ SourceLocation SourceManager::getFileLocSlowCase(SourceLocation Loc) const {
if (isMacroArgExpansion(Loc))
Loc = getImmediateSpellingLoc(Loc);
else
- Loc = getImmediateExpansionRange(Loc).first;
+ Loc = getImmediateExpansionRange(Loc).getBegin();
} while (!Loc.isFileID());
return Loc;
}
@@ -948,28 +961,36 @@ SourceLocation SourceManager::getImmediateSpellingLoc(SourceLocation Loc) const{
/// getImmediateExpansionRange - Loc is required to be an expansion location.
/// Return the start/end of the expansion information.
-std::pair<SourceLocation,SourceLocation>
+CharSourceRange
SourceManager::getImmediateExpansionRange(SourceLocation Loc) const {
assert(Loc.isMacroID() && "Not a macro expansion loc!");
const ExpansionInfo &Expansion = getSLocEntry(getFileID(Loc)).getExpansion();
return Expansion.getExpansionLocRange();
}
+SourceLocation SourceManager::getTopMacroCallerLoc(SourceLocation Loc) const {
+ while (isMacroArgExpansion(Loc))
+ Loc = getImmediateSpellingLoc(Loc);
+ return Loc;
+}
+
/// getExpansionRange - Given a SourceLocation object, return the range of
/// tokens covered by the expansion in the ultimate file.
-std::pair<SourceLocation,SourceLocation>
-SourceManager::getExpansionRange(SourceLocation Loc) const {
- if (Loc.isFileID()) return std::make_pair(Loc, Loc);
+CharSourceRange SourceManager::getExpansionRange(SourceLocation Loc) const {
+ if (Loc.isFileID())
+ return CharSourceRange(SourceRange(Loc, Loc), true);
- std::pair<SourceLocation,SourceLocation> Res =
- getImmediateExpansionRange(Loc);
+ CharSourceRange Res = getImmediateExpansionRange(Loc);
// Fully resolve the start and end locations to their ultimate expansion
// points.
- while (!Res.first.isFileID())
- Res.first = getImmediateExpansionRange(Res.first).first;
- while (!Res.second.isFileID())
- Res.second = getImmediateExpansionRange(Res.second).second;
+ while (!Res.getBegin().isFileID())
+ Res.setBegin(getImmediateExpansionRange(Res.getBegin()).getBegin());
+ while (!Res.getEnd().isFileID()) {
+ CharSourceRange EndRange = getImmediateExpansionRange(Res.getEnd());
+ Res.setEnd(EndRange.getEnd());
+ Res.setTokenRange(EndRange.isTokenRange());
+ }
return Res;
}
@@ -1498,7 +1519,7 @@ PresumedLoc SourceManager::getPresumedLoc(SourceLocation Loc,
return PresumedLoc(Filename.data(), LineNo, ColNo, IncludeLoc);
}
-/// \brief Returns whether the PresumedLoc for a given SourceLocation is
+/// Returns whether the PresumedLoc for a given SourceLocation is
/// in the main file.
///
/// This computes the "presumed" location for a SourceLocation, then checks
@@ -1528,7 +1549,7 @@ bool SourceManager::isInMainFile(SourceLocation Loc) const {
return FI.getIncludeLoc().isInvalid();
}
-/// \brief The size of the SLocEntry that \p FID represents.
+/// The size of the SLocEntry that \p FID represents.
unsigned SourceManager::getFileIDSize(FileID FID) const {
bool Invalid = false;
const SrcMgr::SLocEntry &Entry = getSLocEntry(FID, &Invalid);
@@ -1551,7 +1572,7 @@ unsigned SourceManager::getFileIDSize(FileID FID) const {
// Other miscellaneous methods.
//===----------------------------------------------------------------------===//
-/// \brief Retrieve the inode for the given file entry, if possible.
+/// Retrieve the inode for the given file entry, if possible.
///
/// This routine involves a system call, and therefore should only be used
/// in non-performance-critical code.
@@ -1567,7 +1588,7 @@ getActualFileUID(const FileEntry *File) {
return ID;
}
-/// \brief Get the source location for the given file:line:col triplet.
+/// Get the source location for the given file:line:col triplet.
///
/// If the source file is included multiple times, the source location will
/// be based upon an arbitrary inclusion.
@@ -1581,7 +1602,7 @@ SourceLocation SourceManager::translateFileLineCol(const FileEntry *SourceFile,
return translateLineCol(FirstFID, Line, Col);
}
-/// \brief Get the FileID for the given file.
+/// Get the FileID for the given file.
///
/// If the source file is included multiple times, the FileID will be the
/// first inclusion.
@@ -1698,7 +1719,7 @@ FileID SourceManager::translateFile(const FileEntry *SourceFile) const {
return FirstFID;
}
-/// \brief Get the source location in \arg FID for the given line:col.
+/// Get the source location in \arg FID for the given line:col.
/// Returns null location if \arg FID is not a file SLocEntry.
SourceLocation SourceManager::translateLineCol(FileID FID,
unsigned Line,
@@ -1759,7 +1780,7 @@ SourceLocation SourceManager::translateLineCol(FileID FID,
return FileLoc.getLocWithOffset(FilePos + i);
}
-/// \brief Compute a map of macro argument chunks to their expanded source
+/// Compute a map of macro argument chunks to their expanded source
/// location. Chunks that are not part of a macro argument will map to an
/// invalid source location. e.g. if a file contains one macro argument at
/// offset 100 with length 10, this is how the map will be formed:
@@ -1879,7 +1900,7 @@ void SourceManager::associateFileChunkWithMacroArgExp(
// 0 -> SourceLocation()
// 100 -> Expanded loc #1
// 110 -> SourceLocation()
- // and we found a new macro FileID that lexed from offet 105 with length 3,
+ // and we found a new macro FileID that lexed from offset 105 with length 3,
// the new map will be:
// 0 -> SourceLocation()
// 100 -> Expanded loc #1
@@ -1898,7 +1919,7 @@ void SourceManager::associateFileChunkWithMacroArgExp(
MacroArgsCache[EndOffs] = EndOffsMappedLoc;
}
-/// \brief If \arg Loc points inside a function macro argument, the returned
+/// If \arg Loc points inside a function macro argument, the returned
/// location will be the macro location in which the argument was expanded.
/// If a macro argument is used multiple times, the expanded location will
/// be at the first expansion of the argument.
@@ -2007,7 +2028,7 @@ InBeforeInTUCacheEntry &SourceManager::getInBeforeInTUCache(FileID LFID,
return IBTUCacheOverflow;
}
-/// \brief Determines the order of 2 source locations in the translation unit.
+/// Determines the order of 2 source locations in the translation unit.
///
/// \returns true if LHS source location comes before RHS, false otherwise.
bool SourceManager::isBeforeInTranslationUnit(SourceLocation LHS,
@@ -2237,3 +2258,29 @@ size_t SourceManager::getDataStructureSizes() const {
return size;
}
+
+SourceManagerForFile::SourceManagerForFile(StringRef FileName,
+ StringRef Content) {
+ // This is referenced by `FileMgr` and will be released by `FileMgr` when it
+ // is deleted.
+ IntrusiveRefCntPtr<vfs::InMemoryFileSystem> InMemoryFileSystem(
+ new vfs::InMemoryFileSystem);
+ InMemoryFileSystem->addFile(
+ FileName, 0,
+ llvm::MemoryBuffer::getMemBuffer(Content, FileName,
+ /*RequiresNullTerminator=*/false));
+ // This is passed to `SM` as reference, so the pointer has to be referenced
+ // in `Environment` so that `FileMgr` can out-live this function scope.
+ FileMgr =
+ llvm::make_unique<FileManager>(FileSystemOptions(), InMemoryFileSystem);
+ // This is passed to `SM` as reference, so the pointer has to be referenced
+ // by `Environment` due to the same reason above.
+ Diagnostics = llvm::make_unique<DiagnosticsEngine>(
+ IntrusiveRefCntPtr<DiagnosticIDs>(new DiagnosticIDs),
+ new DiagnosticOptions);
+ SourceMgr = llvm::make_unique<SourceManager>(*Diagnostics, *FileMgr);
+ FileID ID = SourceMgr->createFileID(FileMgr->getFile(FileName),
+ SourceLocation(), clang::SrcMgr::C_User);
+ assert(ID.isValid());
+ SourceMgr->setMainFileID(ID);
+}
diff --git a/lib/Basic/TargetInfo.cpp b/lib/Basic/TargetInfo.cpp
index ddd292c1b743..3400c8721f7a 100644
--- a/lib/Basic/TargetInfo.cpp
+++ b/lib/Basic/TargetInfo.cpp
@@ -14,6 +14,7 @@
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/AddressSpaces.h"
#include "clang/Basic/CharInfo.h"
+#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/LangOptions.h"
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/STLExtras.h"
@@ -32,12 +33,31 @@ TargetInfo::TargetInfo(const llvm::Triple &T) : TargetOpts(), Triple(T) {
TLSSupported = true;
VLASupported = true;
NoAsmVariants = false;
+ HasLegalHalfType = false;
HasFloat128 = false;
PointerWidth = PointerAlign = 32;
BoolWidth = BoolAlign = 8;
IntWidth = IntAlign = 32;
LongWidth = LongAlign = 32;
LongLongWidth = LongLongAlign = 64;
+
+ // Fixed point default bit widths
+ ShortAccumWidth = ShortAccumAlign = 16;
+ AccumWidth = AccumAlign = 32;
+ LongAccumWidth = LongAccumAlign = 64;
+ ShortFractWidth = ShortFractAlign = 8;
+ FractWidth = FractAlign = 16;
+ LongFractWidth = LongFractAlign = 32;
+
+ // Fixed point default integral and fractional bit sizes
+ // We give the _Accum 1 fewer fractional bits than their corresponding _Fract
+ // types by default to have the same number of fractional bits between _Accum
+ // and _Fract types.
+ PaddingOnUnsignedFixedPoint = false;
+ ShortAccumScale = 7;
+ AccumScale = 15;
+ LongAccumScale = 31;
+
SuitableAlign = 64;
DefaultAlignForAttributeAligned = 128;
MinGlobalAlign = 0;
@@ -114,6 +134,18 @@ TargetInfo::TargetInfo(const llvm::Triple &T) : TargetOpts(), Triple(T) {
// Out of line virtual dtor for TargetInfo.
TargetInfo::~TargetInfo() {}
+bool
+TargetInfo::checkCFProtectionBranchSupported(DiagnosticsEngine &Diags) const {
+ Diags.Report(diag::err_opt_not_valid_on_target) << "cf-protection=branch";
+ return false;
+}
+
+bool
+TargetInfo::checkCFProtectionReturnSupported(DiagnosticsEngine &Diags) const {
+ Diags.Report(diag::err_opt_not_valid_on_target) << "cf-protection=return";
+ return false;
+}
+
/// getTypeName - Return the user string for the specified integer type enum.
/// For example, SignedShort -> "short".
const char *TargetInfo::getTypeName(IntType T) {
@@ -342,6 +374,11 @@ void TargetInfo::adjust(LangOptions &Opts) {
if (Opts.NewAlignOverride)
NewAlign = Opts.NewAlignOverride * getCharWidth();
+
+ // Each unsigned fixed point type has the same number of fractional bits as
+ // its corresponding signed type.
+ PaddingOnUnsignedFixedPoint |= Opts.PaddingOnUnsignedFixedPoint;
+ CheckFixedPointBits();
}
bool TargetInfo::initFeatureMap(
@@ -356,6 +393,14 @@ bool TargetInfo::initFeatureMap(
return true;
}
+TargetInfo::CallingConvKind
+TargetInfo::getCallingConvKind(bool ClangABICompat4) const {
+ if (getCXXABI() != TargetCXXABI::Microsoft &&
+ (ClangABICompat4 || getTriple().getOS() == llvm::Triple::PS4))
+ return CCK_ClangABI4OrPS4;
+ return CCK_Default;
+}
+
LangAS TargetInfo::getOpenCLTypeAddrSpace(OpenCLTypeKind TK) const {
switch (TK) {
case OCLTK_Image:
@@ -688,3 +733,63 @@ bool TargetInfo::validateInputConstraint(
return true;
}
+
+void TargetInfo::CheckFixedPointBits() const {
+ // Check that the number of fractional and integral bits (and maybe sign) can
+ // fit into the bits given for a fixed point type.
+ assert(ShortAccumScale + getShortAccumIBits() + 1 <= ShortAccumWidth);
+ assert(AccumScale + getAccumIBits() + 1 <= AccumWidth);
+ assert(LongAccumScale + getLongAccumIBits() + 1 <= LongAccumWidth);
+ assert(getUnsignedShortAccumScale() + getUnsignedShortAccumIBits() <=
+ ShortAccumWidth);
+ assert(getUnsignedAccumScale() + getUnsignedAccumIBits() <= AccumWidth);
+ assert(getUnsignedLongAccumScale() + getUnsignedLongAccumIBits() <=
+ LongAccumWidth);
+
+ assert(getShortFractScale() + 1 <= ShortFractWidth);
+ assert(getFractScale() + 1 <= FractWidth);
+ assert(getLongFractScale() + 1 <= LongFractWidth);
+ assert(getUnsignedShortFractScale() <= ShortFractWidth);
+ assert(getUnsignedFractScale() <= FractWidth);
+ assert(getUnsignedLongFractScale() <= LongFractWidth);
+
+ // Each unsigned fract type has either the same number of fractional bits
+ // as, or one more fractional bit than, its corresponding signed fract type.
+ assert(getShortFractScale() == getUnsignedShortFractScale() ||
+ getShortFractScale() == getUnsignedShortFractScale() - 1);
+ assert(getFractScale() == getUnsignedFractScale() ||
+ getFractScale() == getUnsignedFractScale() - 1);
+ assert(getLongFractScale() == getUnsignedLongFractScale() ||
+ getLongFractScale() == getUnsignedLongFractScale() - 1);
+
+ // When arranged in order of increasing rank (see 6.3.1.3a), the number of
+ // fractional bits is nondecreasing for each of the following sets of
+ // fixed-point types:
+ // - signed fract types
+ // - unsigned fract types
+ // - signed accum types
+ // - unsigned accum types.
+ assert(getLongFractScale() >= getFractScale() &&
+ getFractScale() >= getShortFractScale());
+ assert(getUnsignedLongFractScale() >= getUnsignedFractScale() &&
+ getUnsignedFractScale() >= getUnsignedShortFractScale());
+ assert(LongAccumScale >= AccumScale && AccumScale >= ShortAccumScale);
+ assert(getUnsignedLongAccumScale() >= getUnsignedAccumScale() &&
+ getUnsignedAccumScale() >= getUnsignedShortAccumScale());
+
+ // When arranged in order of increasing rank (see 6.3.1.3a), the number of
+ // integral bits is nondecreasing for each of the following sets of
+ // fixed-point types:
+ // - signed accum types
+ // - unsigned accum types
+ assert(getLongAccumIBits() >= getAccumIBits() &&
+ getAccumIBits() >= getShortAccumIBits());
+ assert(getUnsignedLongAccumIBits() >= getUnsignedAccumIBits() &&
+ getUnsignedAccumIBits() >= getUnsignedShortAccumIBits());
+
+ // Each signed accum type has at least as many integral bits as its
+ // corresponding unsigned accum type.
+ assert(getShortAccumIBits() >= getUnsignedShortAccumIBits());
+ assert(getAccumIBits() >= getUnsignedAccumIBits());
+ assert(getLongAccumIBits() >= getUnsignedLongAccumIBits());
+}
diff --git a/lib/Basic/Targets.cpp b/lib/Basic/Targets.cpp
index 7deebc06c3ef..1ef2fe3b8141 100644
--- a/lib/Basic/Targets.cpp
+++ b/lib/Basic/Targets.cpp
@@ -29,6 +29,7 @@
#include "Targets/OSTargets.h"
#include "Targets/PNaCl.h"
#include "Targets/PPC.h"
+#include "Targets/RISCV.h"
#include "Targets/SPIR.h"
#include "Targets/Sparc.h"
#include "Targets/SystemZ.h"
@@ -37,6 +38,7 @@
#include "Targets/X86.h"
#include "Targets/XCore.h"
#include "clang/Basic/Diagnostic.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/Triple.h"
using namespace clang;
@@ -370,6 +372,17 @@ TargetInfo *AllocateTarget(const llvm::Triple &Triple,
case llvm::Triple::r600:
return new AMDGPUTargetInfo(Triple, Opts);
+ case llvm::Triple::riscv32:
+ // TODO: add cases for FreeBSD, NetBSD, RTEMS once tested.
+ if (os == llvm::Triple::Linux)
+ return new LinuxTargetInfo<RISCV32TargetInfo>(Triple, Opts);
+ return new RISCV32TargetInfo(Triple, Opts);
+ case llvm::Triple::riscv64:
+ // TODO: add cases for FreeBSD, NetBSD, RTEMS once tested.
+ if (os == llvm::Triple::Linux)
+ return new LinuxTargetInfo<RISCV64TargetInfo>(Triple, Opts);
+ return new RISCV64TargetInfo(Triple, Opts);
+
case llvm::Triple::sparc:
switch (os) {
case llvm::Triple::Linux:
@@ -595,6 +608,10 @@ TargetInfo::CreateTargetInfo(DiagnosticsEngine &Diags,
// Set the target CPU if specified.
if (!Opts->CPU.empty() && !Target->setCPU(Opts->CPU)) {
Diags.Report(diag::err_target_unknown_cpu) << Opts->CPU;
+ SmallVector<StringRef, 32> ValidList;
+ Target->fillValidCPUList(ValidList);
+ if (!ValidList.empty())
+ Diags.Report(diag::note_valid_options) << llvm::join(ValidList, ", ");
return nullptr;
}
@@ -621,6 +638,9 @@ TargetInfo::CreateTargetInfo(DiagnosticsEngine &Diags,
Opts->Features.clear();
for (const auto &F : Features)
Opts->Features.push_back((F.getValue() ? "+" : "-") + F.getKey().str());
+ // Sort here, so we handle the features in a predictable order. (This matters
+ // when we're dealing with features that overlap.)
+ llvm::sort(Opts->Features.begin(), Opts->Features.end());
if (!Target->handleTargetFeatures(Opts->Features, Diags))
return nullptr;
@@ -632,5 +652,7 @@ TargetInfo::CreateTargetInfo(DiagnosticsEngine &Diags,
if (!Target->validateTarget(Diags))
return nullptr;
+ Target->CheckFixedPointBits();
+
return Target.release();
}
diff --git a/lib/Basic/Targets.h b/lib/Basic/Targets.h
index 6fc967ddabee..d450aa3f37ed 100644
--- a/lib/Basic/Targets.h
+++ b/lib/Basic/Targets.h
@@ -1,4 +1,4 @@
-//===------- Targets.h - Declare target feature support -------------------===//
+//===------- Targets.h - Declare target feature support ---------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/lib/Basic/Targets/AArch64.cpp b/lib/Basic/Targets/AArch64.cpp
index 4d3cd121f705..3444591ac593 100644
--- a/lib/Basic/Targets/AArch64.cpp
+++ b/lib/Basic/Targets/AArch64.cpp
@@ -29,26 +29,27 @@ const Builtin::Info AArch64TargetInfo::BuiltinInfo[] = {
{#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
#define LANGBUILTIN(ID, TYPE, ATTRS, LANG) \
{#ID, TYPE, ATTRS, nullptr, LANG, nullptr},
+#define TARGET_HEADER_BUILTIN(ID, TYPE, ATTRS, HEADER, LANGS, FEATURE) \
+ {#ID, TYPE, ATTRS, HEADER, LANGS, FEATURE},
#include "clang/Basic/BuiltinsAArch64.def"
};
AArch64TargetInfo::AArch64TargetInfo(const llvm::Triple &Triple,
const TargetOptions &Opts)
: TargetInfo(Triple), ABI("aapcs") {
- if (getTriple().getOS() == llvm::Triple::NetBSD ||
- getTriple().getOS() == llvm::Triple::OpenBSD) {
- // NetBSD apparently prefers consistency across ARM targets to
- // consistency across 64-bit targets.
+ if (getTriple().getOS() == llvm::Triple::OpenBSD) {
Int64Type = SignedLongLong;
IntMaxType = SignedLongLong;
} else {
- if (!getTriple().isOSDarwin())
+ if (!getTriple().isOSDarwin() && getTriple().getOS() != llvm::Triple::NetBSD)
WCharType = UnsignedInt;
Int64Type = SignedLong;
IntMaxType = SignedLong;
}
+ // All AArch64 implementations support ARMv8 FP, which makes half a legal type.
+ HasLegalHalfType = true;
LongWidth = LongAlign = PointerWidth = PointerAlign = 64;
MaxVectorAlign = 128;
@@ -101,6 +102,11 @@ bool AArch64TargetInfo::setCPU(const std::string &Name) {
return isValidCPUName(Name);
}
+void AArch64TargetInfo::fillValidCPUList(
+ SmallVectorImpl<StringRef> &Values) const {
+ llvm::AArch64::fillValidCPUArchList(Values);
+}
+
void AArch64TargetInfo::getTargetDefinesARMV81A(const LangOptions &Opts,
MacroBuilder &Builder) const {
Builder.defineMacro("__ARM_FEATURE_QRDMX", "1");
@@ -183,6 +189,11 @@ void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
if ((FPU & NeonMode) && HasFullFP16)
Builder.defineMacro("__ARM_FEATURE_FP16_VECTOR_ARITHMETIC", "1");
+ if (HasFullFP16)
+ Builder.defineMacro("__ARM_FEATURE_FP16_SCALAR_ARITHMETIC", "1");
+
+ if (HasDotProd)
+ Builder.defineMacro("__ARM_FEATURE_DOTPROD", "1");
switch (ArchKind) {
default:
@@ -220,6 +231,7 @@ bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
Crypto = 0;
Unaligned = 1;
HasFullFP16 = 0;
+ HasDotProd = 0;
ArchKind = llvm::AArch64::ArchKind::ARMV8A;
for (const auto &Feature : Features) {
@@ -239,6 +251,8 @@ bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
ArchKind = llvm::AArch64::ArchKind::ARMV8_2A;
if (Feature == "+fullfp16")
HasFullFP16 = 1;
+ if (Feature == "+dotprod")
+ HasDotProd = 1;
}
setDataLayout();
@@ -299,7 +313,40 @@ ArrayRef<const char *> AArch64TargetInfo::getGCCRegNames() const {
}
const TargetInfo::GCCRegAlias AArch64TargetInfo::GCCRegAliases[] = {
- {{"w31"}, "wsp"}, {{"x29"}, "fp"}, {{"x30"}, "lr"}, {{"x31"}, "sp"},
+ {{"w31"}, "wsp"},
+ {{"x31"}, "sp"},
+ // GCC rN registers are aliases of xN registers.
+ {{"r0"}, "x0"},
+ {{"r1"}, "x1"},
+ {{"r2"}, "x2"},
+ {{"r3"}, "x3"},
+ {{"r4"}, "x4"},
+ {{"r5"}, "x5"},
+ {{"r6"}, "x6"},
+ {{"r7"}, "x7"},
+ {{"r8"}, "x8"},
+ {{"r9"}, "x9"},
+ {{"r10"}, "x10"},
+ {{"r11"}, "x11"},
+ {{"r12"}, "x12"},
+ {{"r13"}, "x13"},
+ {{"r14"}, "x14"},
+ {{"r15"}, "x15"},
+ {{"r16"}, "x16"},
+ {{"r17"}, "x17"},
+ {{"r18"}, "x18"},
+ {{"r19"}, "x19"},
+ {{"r20"}, "x20"},
+ {{"r21"}, "x21"},
+ {{"r22"}, "x22"},
+ {{"r23"}, "x23"},
+ {{"r24"}, "x24"},
+ {{"r25"}, "x25"},
+ {{"r26"}, "x26"},
+ {{"r27"}, "x27"},
+ {{"r28"}, "x28"},
+ {{"r29", "x29"}, "fp"},
+ {{"r30", "x30"}, "lr"},
// The S/D/Q and W/X registers overlap, but aren't really aliases; we
// don't want to substitute one of these for a different-sized one.
};
@@ -486,6 +533,11 @@ void MicrosoftARM64TargetInfo::getTargetDefines(const LangOptions &Opts,
getVisualStudioDefines(Opts, Builder);
}
+TargetInfo::CallingConvKind
+MicrosoftARM64TargetInfo::getCallingConvKind(bool ClangABICompat4) const {
+ return CCK_MicrosoftWin64;
+}
+
MinGWARM64TargetInfo::MinGWARM64TargetInfo(const llvm::Triple &Triple,
const TargetOptions &Opts)
: WindowsARM64TargetInfo(Triple, Opts) {
diff --git a/lib/Basic/Targets/AArch64.h b/lib/Basic/Targets/AArch64.h
index 33268f0f8d99..a9df895e4dad 100644
--- a/lib/Basic/Targets/AArch64.h
+++ b/lib/Basic/Targets/AArch64.h
@@ -33,6 +33,7 @@ class LLVM_LIBRARY_VISIBILITY AArch64TargetInfo : public TargetInfo {
unsigned Crypto;
unsigned Unaligned;
unsigned HasFullFP16;
+ unsigned HasDotProd;
llvm::AArch64::ArchKind ArchKind;
static const Builtin::Info BuiltinInfo[];
@@ -46,6 +47,7 @@ public:
bool setABI(const std::string &Name) override;
bool isValidCPUName(StringRef Name) const override;
+ void fillValidCPUList(SmallVectorImpl<StringRef> &Values) const override;
bool setCPU(const std::string &Name) override;
bool useFP16ConversionIntrinsics() const override {
@@ -80,6 +82,11 @@ public:
std::string &SuggestedModifier) const override;
const char *getClobbers() const override;
+ StringRef getConstraintRegister(StringRef Constraint,
+ StringRef Expression) const override {
+ return Expression;
+ }
+
int getEHDataRegisterNumber(unsigned RegNo) const override;
};
@@ -119,6 +126,8 @@ public:
MacroBuilder &Builder) const;
void getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const override;
+ TargetInfo::CallingConvKind
+ getCallingConvKind(bool ClangABICompat4) const override;
};
// ARM64 MinGW target
diff --git a/lib/Basic/Targets/AMDGPU.cpp b/lib/Basic/Targets/AMDGPU.cpp
index 4c510e47379f..b6b9aa2f1244 100644
--- a/lib/Basic/Targets/AMDGPU.cpp
+++ b/lib/Basic/Targets/AMDGPU.cpp
@@ -30,64 +30,35 @@ namespace targets {
static const char *const DataLayoutStringR600 =
"e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
- "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64";
+ "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5";
-static const char *const DataLayoutStringSIPrivateIsZero =
- "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32"
+static const char *const DataLayoutStringAMDGCN =
+ "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32"
"-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
- "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64";
-
-static const char *const DataLayoutStringSIGenericIsZero =
- "e-p:64:64-p1:64:64-p2:64:64-p3:32:32-p4:32:32-p5:32:32"
- "-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
- "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-A5";
-
-static const LangASMap AMDGPUPrivIsZeroDefIsGenMap = {
- 4, // Default
- 1, // opencl_global
- 3, // opencl_local
- 2, // opencl_constant
- 0, // opencl_private
- 4, // opencl_generic
- 1, // cuda_device
- 2, // cuda_constant
- 3 // cuda_shared
+ "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5";
+
+const LangASMap AMDGPUTargetInfo::AMDGPUDefIsGenMap = {
+ Generic, // Default
+ Global, // opencl_global
+ Local, // opencl_local
+ Constant, // opencl_constant
+ Private, // opencl_private
+ Generic, // opencl_generic
+ Global, // cuda_device
+ Constant, // cuda_constant
+ Local // cuda_shared
};
-static const LangASMap AMDGPUGenIsZeroDefIsGenMap = {
- 0, // Default
- 1, // opencl_global
- 3, // opencl_local
- 2, // opencl_constant
- 5, // opencl_private
- 0, // opencl_generic
- 1, // cuda_device
- 2, // cuda_constant
- 3 // cuda_shared
-};
-
-static const LangASMap AMDGPUPrivIsZeroDefIsPrivMap = {
- 0, // Default
- 1, // opencl_global
- 3, // opencl_local
- 2, // opencl_constant
- 0, // opencl_private
- 4, // opencl_generic
- 1, // cuda_device
- 2, // cuda_constant
- 3 // cuda_shared
-};
-
-static const LangASMap AMDGPUGenIsZeroDefIsPrivMap = {
- 5, // Default
- 1, // opencl_global
- 3, // opencl_local
- 2, // opencl_constant
- 5, // opencl_private
- 0, // opencl_generic
- 1, // cuda_device
- 2, // cuda_constant
- 3 // cuda_shared
+const LangASMap AMDGPUTargetInfo::AMDGPUDefIsPrivMap = {
+ Private, // Default
+ Global, // opencl_global
+ Local, // opencl_local
+ Constant, // opencl_constant
+ Private, // opencl_private
+ Generic, // opencl_generic
+ Global, // cuda_device
+ Constant, // cuda_constant
+ Local // cuda_shared
};
} // namespace targets
} // namespace clang
@@ -144,7 +115,7 @@ const char *const AMDGPUTargetInfo::GCCRegNames[] = {
"s104", "s105", "s106", "s107", "s108", "s109", "s110", "s111", "s112",
"s113", "s114", "s115", "s116", "s117", "s118", "s119", "s120", "s121",
"s122", "s123", "s124", "s125", "s126", "s127", "exec", "vcc", "scc",
- "m0", "flat_scratch", "exec_lo", "exec_hi", "vcc_lo", "vcc_hi",
+ "m0", "flat_scratch", "exec_lo", "exec_hi", "vcc_lo", "vcc_hi",
"flat_scratch_lo", "flat_scratch_hi"
};
@@ -157,49 +128,66 @@ bool AMDGPUTargetInfo::initFeatureMap(
const std::vector<std::string> &FeatureVec) const {
// XXX - What does the member GPU mean if device name string passed here?
- if (getTriple().getArch() == llvm::Triple::amdgcn) {
+ if (isAMDGCN(getTriple())) {
if (CPU.empty())
- CPU = "tahiti";
-
- switch (parseAMDGCNName(CPU)) {
- case GK_GFX6:
- case GK_GFX7:
- break;
+ CPU = "gfx600";
- case GK_GFX9:
+ switch (parseAMDGCNName(CPU).Kind) {
+ case GK_GFX906:
+ Features["dl-insts"] = true;
+ LLVM_FALLTHROUGH;
+ case GK_GFX904:
+ case GK_GFX902:
+ case GK_GFX900:
Features["gfx9-insts"] = true;
LLVM_FALLTHROUGH;
- case GK_GFX8:
- Features["s-memrealtime"] = true;
+ case GK_GFX810:
+ case GK_GFX803:
+ case GK_GFX802:
+ case GK_GFX801:
Features["16-bit-insts"] = true;
Features["dpp"] = true;
+ Features["s-memrealtime"] = true;
+ break;
+ case GK_GFX704:
+ case GK_GFX703:
+ case GK_GFX702:
+ case GK_GFX701:
+ case GK_GFX700:
+ case GK_GFX601:
+ case GK_GFX600:
break;
-
case GK_NONE:
return false;
default:
- llvm_unreachable("unhandled subtarget");
+ llvm_unreachable("Unhandled GPU!");
}
} else {
if (CPU.empty())
CPU = "r600";
- switch (parseR600Name(CPU)) {
- case GK_R600:
- case GK_R700:
- case GK_EVERGREEN:
- case GK_NORTHERN_ISLANDS:
- break;
- case GK_R600_DOUBLE_OPS:
- case GK_R700_DOUBLE_OPS:
- case GK_EVERGREEN_DOUBLE_OPS:
+ switch (parseR600Name(CPU).Kind) {
case GK_CAYMAN:
+ case GK_CYPRESS:
+ case GK_RV770:
+ case GK_RV670:
// TODO: Add fp64 when implemented.
break;
- case GK_NONE:
- return false;
+ case GK_TURKS:
+ case GK_CAICOS:
+ case GK_BARTS:
+ case GK_SUMO:
+ case GK_REDWOOD:
+ case GK_JUNIPER:
+ case GK_CEDAR:
+ case GK_RV730:
+ case GK_RV710:
+ case GK_RS880:
+ case GK_R630:
+ case GK_R600:
+ break;
default:
- llvm_unreachable("unhandled subtarget");
+ llvm_unreachable("Unhandled GPU!");
}
}
@@ -210,6 +198,7 @@ void AMDGPUTargetInfo::adjustTargetOptions(const CodeGenOptions &CGOpts,
TargetOptions &TargetOpts) const {
bool hasFP32Denormals = false;
bool hasFP64Denormals = false;
+ GPUInfo CGOptsGPU = parseGPUName(TargetOpts.CPU);
for (auto &I : TargetOpts.FeaturesAsWritten) {
if (I == "+fp32-denormals" || I == "-fp32-denormals")
hasFP32Denormals = true;
@@ -218,120 +207,68 @@ void AMDGPUTargetInfo::adjustTargetOptions(const CodeGenOptions &CGOpts,
}
if (!hasFP32Denormals)
TargetOpts.Features.push_back(
- (Twine(hasFullSpeedFMAF32(TargetOpts.CPU) && !CGOpts.FlushDenorm
+ (Twine(CGOptsGPU.HasFastFMAF && !CGOpts.FlushDenorm
? '+'
: '-') +
Twine("fp32-denormals"))
.str());
// Always do not flush fp64 or fp16 denorms.
- if (!hasFP64Denormals && hasFP64)
+ if (!hasFP64Denormals && CGOptsGPU.HasFP64)
TargetOpts.Features.push_back("+fp64-fp16-denormals");
}
-AMDGPUTargetInfo::GPUKind AMDGPUTargetInfo::parseR600Name(StringRef Name) {
- return llvm::StringSwitch<GPUKind>(Name)
- .Case("r600", GK_R600)
- .Case("rv610", GK_R600)
- .Case("rv620", GK_R600)
- .Case("rv630", GK_R600)
- .Case("rv635", GK_R600)
- .Case("rs780", GK_R600)
- .Case("rs880", GK_R600)
- .Case("rv670", GK_R600_DOUBLE_OPS)
- .Case("rv710", GK_R700)
- .Case("rv730", GK_R700)
- .Case("rv740", GK_R700_DOUBLE_OPS)
- .Case("rv770", GK_R700_DOUBLE_OPS)
- .Case("palm", GK_EVERGREEN)
- .Case("cedar", GK_EVERGREEN)
- .Case("sumo", GK_EVERGREEN)
- .Case("sumo2", GK_EVERGREEN)
- .Case("redwood", GK_EVERGREEN)
- .Case("juniper", GK_EVERGREEN)
- .Case("hemlock", GK_EVERGREEN_DOUBLE_OPS)
- .Case("cypress", GK_EVERGREEN_DOUBLE_OPS)
- .Case("barts", GK_NORTHERN_ISLANDS)
- .Case("turks", GK_NORTHERN_ISLANDS)
- .Case("caicos", GK_NORTHERN_ISLANDS)
- .Case("cayman", GK_CAYMAN)
- .Case("aruba", GK_CAYMAN)
- .Default(GK_NONE);
+constexpr AMDGPUTargetInfo::GPUInfo AMDGPUTargetInfo::InvalidGPU;
+constexpr AMDGPUTargetInfo::GPUInfo AMDGPUTargetInfo::R600GPUs[];
+constexpr AMDGPUTargetInfo::GPUInfo AMDGPUTargetInfo::AMDGCNGPUs[];
+
+AMDGPUTargetInfo::GPUInfo AMDGPUTargetInfo::parseR600Name(StringRef Name) {
+ const auto *Result = llvm::find_if(
+ R600GPUs, [Name](const GPUInfo &GPU) { return GPU.Name == Name; });
+
+ if (Result == std::end(R600GPUs))
+ return InvalidGPU;
+ return *Result;
}
-AMDGPUTargetInfo::GPUKind AMDGPUTargetInfo::parseAMDGCNName(StringRef Name) {
- return llvm::StringSwitch<GPUKind>(Name)
- .Case("gfx600", GK_GFX6)
- .Case("tahiti", GK_GFX6)
- .Case("gfx601", GK_GFX6)
- .Case("pitcairn", GK_GFX6)
- .Case("verde", GK_GFX6)
- .Case("oland", GK_GFX6)
- .Case("hainan", GK_GFX6)
- .Case("gfx700", GK_GFX7)
- .Case("bonaire", GK_GFX7)
- .Case("kaveri", GK_GFX7)
- .Case("gfx701", GK_GFX7)
- .Case("hawaii", GK_GFX7)
- .Case("gfx702", GK_GFX7)
- .Case("gfx703", GK_GFX7)
- .Case("kabini", GK_GFX7)
- .Case("mullins", GK_GFX7)
- .Case("gfx800", GK_GFX8)
- .Case("iceland", GK_GFX8)
- .Case("gfx801", GK_GFX8)
- .Case("carrizo", GK_GFX8)
- .Case("gfx802", GK_GFX8)
- .Case("tonga", GK_GFX8)
- .Case("gfx803", GK_GFX8)
- .Case("fiji", GK_GFX8)
- .Case("polaris10", GK_GFX8)
- .Case("polaris11", GK_GFX8)
- .Case("gfx804", GK_GFX8)
- .Case("gfx810", GK_GFX8)
- .Case("stoney", GK_GFX8)
- .Case("gfx900", GK_GFX9)
- .Case("gfx901", GK_GFX9)
- .Case("gfx902", GK_GFX9)
- .Case("gfx903", GK_GFX9)
- .Default(GK_NONE);
+AMDGPUTargetInfo::GPUInfo AMDGPUTargetInfo::parseAMDGCNName(StringRef Name) {
+ const auto *Result = llvm::find_if(
+ AMDGCNGPUs, [Name](const GPUInfo &GPU) { return GPU.Name == Name; });
+
+ if (Result == std::end(AMDGCNGPUs))
+ return InvalidGPU;
+ return *Result;
+}
+
+AMDGPUTargetInfo::GPUInfo AMDGPUTargetInfo::parseGPUName(StringRef Name) const {
+ if (isAMDGCN(getTriple()))
+ return parseAMDGCNName(Name);
+ else
+ return parseR600Name(Name);
+}
+
+void AMDGPUTargetInfo::fillValidCPUList(
+ SmallVectorImpl<StringRef> &Values) const {
+ if (isAMDGCN(getTriple()))
+ llvm::for_each(AMDGCNGPUs, [&Values](const GPUInfo &GPU) {
+ Values.emplace_back(GPU.Name);});
+ else
+ llvm::for_each(R600GPUs, [&Values](const GPUInfo &GPU) {
+ Values.emplace_back(GPU.Name);});
}
void AMDGPUTargetInfo::setAddressSpaceMap(bool DefaultIsPrivate) {
- if (isGenericZero(getTriple())) {
- AddrSpaceMap = DefaultIsPrivate ? &AMDGPUGenIsZeroDefIsPrivMap
- : &AMDGPUGenIsZeroDefIsGenMap;
- } else {
- AddrSpaceMap = DefaultIsPrivate ? &AMDGPUPrivIsZeroDefIsPrivMap
- : &AMDGPUPrivIsZeroDefIsGenMap;
- }
+ AddrSpaceMap = DefaultIsPrivate ? &AMDGPUDefIsPrivMap : &AMDGPUDefIsGenMap;
}
AMDGPUTargetInfo::AMDGPUTargetInfo(const llvm::Triple &Triple,
const TargetOptions &Opts)
: TargetInfo(Triple),
- GPU(isAMDGCN(Triple) ? GK_GFX6 : parseR600Name(Opts.CPU)),
- hasFP64(false), hasFMAF(false), hasLDEXPF(false),
- AS(isGenericZero(Triple)) {
- if (getTriple().getArch() == llvm::Triple::amdgcn) {
- hasFP64 = true;
- hasFMAF = true;
- hasLDEXPF = true;
- }
- if (getTriple().getArch() == llvm::Triple::r600) {
- if (GPU == GK_EVERGREEN_DOUBLE_OPS || GPU == GK_CAYMAN) {
- hasFMAF = true;
- }
- }
- auto IsGenericZero = isGenericZero(Triple);
- resetDataLayout(getTriple().getArch() == llvm::Triple::amdgcn
- ? (IsGenericZero ? DataLayoutStringSIGenericIsZero
- : DataLayoutStringSIPrivateIsZero)
- : DataLayoutStringR600);
- assert(DataLayout->getAllocaAddrSpace() == AS.Private);
+ GPU(isAMDGCN(Triple) ? AMDGCNGPUs[0] : parseR600Name(Opts.CPU)) {
+ resetDataLayout(isAMDGCN(getTriple()) ? DataLayoutStringAMDGCN
+ : DataLayoutStringR600);
+ assert(DataLayout->getAllocaAddrSpace() == Private);
setAddressSpaceMap(Triple.getOS() == llvm::Triple::Mesa3D ||
- Triple.getEnvironment() == llvm::Triple::OpenCL ||
- Triple.getEnvironmentName() == "amdgizcl" ||
!isAMDGCN(Triple));
UseAddrSpaceMapMangling = true;
@@ -349,7 +286,11 @@ AMDGPUTargetInfo::AMDGPUTargetInfo(const llvm::Triple &Triple,
void AMDGPUTargetInfo::adjust(LangOptions &Opts) {
TargetInfo::adjust(Opts);
- setAddressSpaceMap(Opts.OpenCL || !isAMDGCN(getTriple()));
+ // ToDo: There are still a few places using default address space as private
+ // address space in OpenCL, which needs to be cleaned up, then Opts.OpenCL
+ // can be removed from the following line.
+ setAddressSpaceMap(/*DefaultIsPrivate=*/Opts.OpenCL ||
+ !isAMDGCN(getTriple()));
}
ArrayRef<Builtin::Info> AMDGPUTargetInfo::getTargetBuiltins() const {
@@ -359,15 +300,27 @@ ArrayRef<Builtin::Info> AMDGPUTargetInfo::getTargetBuiltins() const {
void AMDGPUTargetInfo::getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
- if (getTriple().getArch() == llvm::Triple::amdgcn)
+ Builder.defineMacro("__AMD__");
+ Builder.defineMacro("__AMDGPU__");
+
+ if (isAMDGCN(getTriple()))
Builder.defineMacro("__AMDGCN__");
else
Builder.defineMacro("__R600__");
- if (hasFMAF)
+ if (GPU.Kind != GK_NONE)
+ Builder.defineMacro(Twine("__") + Twine(GPU.CanonicalName) + Twine("__"));
+
+ // TODO: __HAS_FMAF__, __HAS_LDEXPF__, __HAS_FP64__ are deprecated and will be
+ // removed in the near future.
+ if (GPU.HasFMAF)
Builder.defineMacro("__HAS_FMAF__");
- if (hasLDEXPF)
+ if (GPU.HasFastFMAF)
+ Builder.defineMacro("FP_FAST_FMAF");
+ if (GPU.HasLDEXPF)
Builder.defineMacro("__HAS_LDEXPF__");
- if (hasFP64)
+ if (GPU.HasFP64)
Builder.defineMacro("__HAS_FP64__");
+ if (GPU.HasFastFMA)
+ Builder.defineMacro("FP_FAST_FMA");
}
diff --git a/lib/Basic/Targets/AMDGPU.h b/lib/Basic/Targets/AMDGPU.h
index a4e070f1cb12..b0221031addf 100644
--- a/lib/Basic/Targets/AMDGPU.h
+++ b/lib/Basic/Targets/AMDGPU.h
@@ -28,60 +28,157 @@ class LLVM_LIBRARY_VISIBILITY AMDGPUTargetInfo final : public TargetInfo {
static const Builtin::Info BuiltinInfo[];
static const char *const GCCRegNames[];
- struct LLVM_LIBRARY_VISIBILITY AddrSpace {
- unsigned Generic, Global, Local, Constant, Private;
- AddrSpace(bool IsGenericZero_ = false) {
- if (IsGenericZero_) {
- Generic = 0;
- Global = 1;
- Local = 3;
- Constant = 2;
- Private = 5;
- } else {
- Generic = 4;
- Global = 1;
- Local = 3;
- Constant = 2;
- Private = 0;
- }
- }
+ enum AddrSpace {
+ Generic = 0,
+ Global = 1,
+ Local = 3,
+ Constant = 4,
+ Private = 5
};
+ static const LangASMap AMDGPUDefIsGenMap;
+ static const LangASMap AMDGPUDefIsPrivMap;
+
+ /// GPU kinds supported by the AMDGPU target.
+ enum GPUKind : uint32_t {
+ // Not specified processor.
+ GK_NONE = 0,
- /// \brief The GPU profiles supported by the AMDGPU target.
- enum GPUKind {
- GK_NONE,
+ // R600-based processors.
GK_R600,
- GK_R600_DOUBLE_OPS,
- GK_R700,
- GK_R700_DOUBLE_OPS,
- GK_EVERGREEN,
- GK_EVERGREEN_DOUBLE_OPS,
- GK_NORTHERN_ISLANDS,
+ GK_R630,
+ GK_RS880,
+ GK_RV670,
+ GK_RV710,
+ GK_RV730,
+ GK_RV770,
+ GK_CEDAR,
+ GK_CYPRESS,
+ GK_JUNIPER,
+ GK_REDWOOD,
+ GK_SUMO,
+ GK_BARTS,
+ GK_CAICOS,
GK_CAYMAN,
- GK_GFX6,
- GK_GFX7,
- GK_GFX8,
- GK_GFX9
- } GPU;
-
- bool hasFP64 : 1;
- bool hasFMAF : 1;
- bool hasLDEXPF : 1;
- const AddrSpace AS;
-
- static bool hasFullSpeedFMAF32(StringRef GPUName) {
- return parseAMDGCNName(GPUName) >= GK_GFX9;
- }
+ GK_TURKS,
+
+ GK_R600_FIRST = GK_R600,
+ GK_R600_LAST = GK_TURKS,
+
+ // AMDGCN-based processors.
+ GK_GFX600,
+ GK_GFX601,
+ GK_GFX700,
+ GK_GFX701,
+ GK_GFX702,
+ GK_GFX703,
+ GK_GFX704,
+ GK_GFX801,
+ GK_GFX802,
+ GK_GFX803,
+ GK_GFX810,
+ GK_GFX900,
+ GK_GFX902,
+ GK_GFX904,
+ GK_GFX906,
+
+ GK_AMDGCN_FIRST = GK_GFX600,
+ GK_AMDGCN_LAST = GK_GFX906,
+ };
+
+ struct GPUInfo {
+ llvm::StringLiteral Name;
+ llvm::StringLiteral CanonicalName;
+ AMDGPUTargetInfo::GPUKind Kind;
+ bool HasFMAF;
+ bool HasFastFMAF;
+ bool HasLDEXPF;
+ bool HasFP64;
+ bool HasFastFMA;
+ };
+
+ static constexpr GPUInfo InvalidGPU =
+ {{""}, {""}, GK_NONE, false, false, false, false, false};
+ static constexpr GPUInfo R600GPUs[26] = {
+ // Name Canonical Kind Has Has Has Has Has
+ // Name FMAF Fast LDEXPF FP64 Fast
+ // FMAF FMA
+ {{"r600"}, {"r600"}, GK_R600, false, false, false, false, false},
+ {{"rv630"}, {"r600"}, GK_R600, false, false, false, false, false},
+ {{"rv635"}, {"r600"}, GK_R600, false, false, false, false, false},
+ {{"r630"}, {"r630"}, GK_R630, false, false, false, false, false},
+ {{"rs780"}, {"rs880"}, GK_RS880, false, false, false, false, false},
+ {{"rs880"}, {"rs880"}, GK_RS880, false, false, false, false, false},
+ {{"rv610"}, {"rs880"}, GK_RS880, false, false, false, false, false},
+ {{"rv620"}, {"rs880"}, GK_RS880, false, false, false, false, false},
+ {{"rv670"}, {"rv670"}, GK_RV670, false, false, false, false, false},
+ {{"rv710"}, {"rv710"}, GK_RV710, false, false, false, false, false},
+ {{"rv730"}, {"rv730"}, GK_RV730, false, false, false, false, false},
+ {{"rv740"}, {"rv770"}, GK_RV770, false, false, false, false, false},
+ {{"rv770"}, {"rv770"}, GK_RV770, false, false, false, false, false},
+ {{"cedar"}, {"cedar"}, GK_CEDAR, false, false, false, false, false},
+ {{"palm"}, {"cedar"}, GK_CEDAR, false, false, false, false, false},
+ {{"cypress"}, {"cypress"}, GK_CYPRESS, true, false, false, false, false},
+ {{"hemlock"}, {"cypress"}, GK_CYPRESS, true, false, false, false, false},
+ {{"juniper"}, {"juniper"}, GK_JUNIPER, false, false, false, false, false},
+ {{"redwood"}, {"redwood"}, GK_REDWOOD, false, false, false, false, false},
+ {{"sumo"}, {"sumo"}, GK_SUMO, false, false, false, false, false},
+ {{"sumo2"}, {"sumo"}, GK_SUMO, false, false, false, false, false},
+ {{"barts"}, {"barts"}, GK_BARTS, false, false, false, false, false},
+ {{"caicos"}, {"caicos"}, GK_BARTS, false, false, false, false, false},
+ {{"aruba"}, {"cayman"}, GK_CAYMAN, true, false, false, false, false},
+ {{"cayman"}, {"cayman"}, GK_CAYMAN, true, false, false, false, false},
+ {{"turks"}, {"turks"}, GK_TURKS, false, false, false, false, false},
+ };
+ static constexpr GPUInfo AMDGCNGPUs[32] = {
+ // Name Canonical Kind Has Has Has Has Has
+ // Name FMAF Fast LDEXPF FP64 Fast
+ // FMAF FMA
+ {{"gfx600"}, {"gfx600"}, GK_GFX600, true, true, true, true, true},
+ {{"tahiti"}, {"gfx600"}, GK_GFX600, true, true, true, true, true},
+ {{"gfx601"}, {"gfx601"}, GK_GFX601, true, false, true, true, true},
+ {{"hainan"}, {"gfx601"}, GK_GFX601, true, false, true, true, true},
+ {{"oland"}, {"gfx601"}, GK_GFX601, true, false, true, true, true},
+ {{"pitcairn"}, {"gfx601"}, GK_GFX601, true, false, true, true, true},
+ {{"verde"}, {"gfx601"}, GK_GFX601, true, false, true, true, true},
+ {{"gfx700"}, {"gfx700"}, GK_GFX700, true, false, true, true, true},
+ {{"kaveri"}, {"gfx700"}, GK_GFX700, true, false, true, true, true},
+ {{"gfx701"}, {"gfx701"}, GK_GFX701, true, true, true, true, true},
+ {{"hawaii"}, {"gfx701"}, GK_GFX701, true, true, true, true, true},
+ {{"gfx702"}, {"gfx702"}, GK_GFX702, true, true, true, true, true},
+ {{"gfx703"}, {"gfx703"}, GK_GFX703, true, false, true, true, true},
+ {{"kabini"}, {"gfx703"}, GK_GFX703, true, false, true, true, true},
+ {{"mullins"}, {"gfx703"}, GK_GFX703, true, false, true, true, true},
+ {{"gfx704"}, {"gfx704"}, GK_GFX704, true, false, true, true, true},
+ {{"bonaire"}, {"gfx704"}, GK_GFX704, true, false, true, true, true},
+ {{"gfx801"}, {"gfx801"}, GK_GFX801, true, true, true, true, true},
+ {{"carrizo"}, {"gfx801"}, GK_GFX801, true, true, true, true, true},
+ {{"gfx802"}, {"gfx802"}, GK_GFX802, true, false, true, true, true},
+ {{"iceland"}, {"gfx802"}, GK_GFX802, true, false, true, true, true},
+ {{"tonga"}, {"gfx802"}, GK_GFX802, true, false, true, true, true},
+ {{"gfx803"}, {"gfx803"}, GK_GFX803, true, false, true, true, true},
+ {{"fiji"}, {"gfx803"}, GK_GFX803, true, false, true, true, true},
+ {{"polaris10"}, {"gfx803"}, GK_GFX803, true, false, true, true, true},
+ {{"polaris11"}, {"gfx803"}, GK_GFX803, true, false, true, true, true},
+ {{"gfx810"}, {"gfx810"}, GK_GFX810, true, false, true, true, true},
+ {{"stoney"}, {"gfx810"}, GK_GFX810, true, false, true, true, true},
+ {{"gfx900"}, {"gfx900"}, GK_GFX900, true, true, true, true, true},
+ {{"gfx902"}, {"gfx902"}, GK_GFX900, true, true, true, true, true},
+ {{"gfx904"}, {"gfx904"}, GK_GFX904, true, true, true, true, true},
+ {{"gfx906"}, {"gfx906"}, GK_GFX906, true, true, true, true, true},
+ };
+
+ static GPUInfo parseR600Name(StringRef Name);
+
+ static GPUInfo parseAMDGCNName(StringRef Name);
+
+ GPUInfo parseGPUName(StringRef Name) const;
+
+ GPUInfo GPU;
static bool isAMDGCN(const llvm::Triple &TT) {
return TT.getArch() == llvm::Triple::amdgcn;
}
- static bool isGenericZero(const llvm::Triple &TT) {
- return TT.getEnvironmentName() == "amdgiz" ||
- TT.getEnvironmentName() == "amdgizcl";
- }
-
public:
AMDGPUTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts);
@@ -90,12 +187,10 @@ public:
void adjust(LangOptions &Opts) override;
uint64_t getPointerWidthV(unsigned AddrSpace) const override {
- if (GPU <= GK_CAYMAN)
+ if (GPU.Kind <= GK_R600_LAST)
return 32;
-
- if (AddrSpace == AS.Private || AddrSpace == AS.Local) {
+ if (AddrSpace == Private || AddrSpace == Local)
return 32;
- }
return 64;
}
@@ -194,6 +289,19 @@ public:
return true;
}
+ // \p Constraint will be left pointing at the last character of
+ // the constraint. In practice, it won't be changed unless the
+ // constraint is longer than one character.
+ std::string convertConstraint(const char *&Constraint) const override {
+ const char *Begin = Constraint;
+ TargetInfo::ConstraintInfo Info("", "");
+ if (validateAsmConstraint(Constraint, Info))
+ return std::string(Begin).substr(0, Constraint - Begin + 1);
+
+ Constraint = Begin;
+ return std::string(1, *Constraint);
+ }
+
bool
initFeatureMap(llvm::StringMap<bool> &Features, DiagnosticsEngine &Diags,
StringRef CPU,
@@ -211,24 +319,22 @@ public:
return TargetInfo::CharPtrBuiltinVaList;
}
- static GPUKind parseR600Name(StringRef Name);
-
- static GPUKind parseAMDGCNName(StringRef Name);
-
bool isValidCPUName(StringRef Name) const override {
if (getTriple().getArch() == llvm::Triple::amdgcn)
- return GK_NONE != parseAMDGCNName(Name);
+ return GK_NONE != parseAMDGCNName(Name).Kind;
else
- return GK_NONE != parseR600Name(Name);
+ return GK_NONE != parseR600Name(Name).Kind;
}
+ void fillValidCPUList(SmallVectorImpl<StringRef> &Values) const override;
+
bool setCPU(const std::string &Name) override {
if (getTriple().getArch() == llvm::Triple::amdgcn)
GPU = parseAMDGCNName(Name);
else
GPU = parseR600Name(Name);
- return GPU != GK_NONE;
+ return GK_NONE != GPU.Kind;
}
void setSupportedOpenCLOpts() override {
@@ -236,16 +342,16 @@ public:
Opts.support("cl_clang_storage_class_specifiers");
Opts.support("cl_khr_icd");
- if (hasFP64)
+ if (GPU.HasFP64)
Opts.support("cl_khr_fp64");
- if (GPU >= GK_EVERGREEN) {
+ if (GPU.Kind >= GK_CEDAR) {
Opts.support("cl_khr_byte_addressable_store");
Opts.support("cl_khr_global_int32_base_atomics");
Opts.support("cl_khr_global_int32_extended_atomics");
Opts.support("cl_khr_local_int32_base_atomics");
Opts.support("cl_khr_local_int32_extended_atomics");
}
- if (GPU >= GK_GFX6) {
+ if (GPU.Kind >= GK_AMDGCN_FIRST) {
Opts.support("cl_khr_fp16");
Opts.support("cl_khr_int64_base_atomics");
Opts.support("cl_khr_int64_extended_atomics");
@@ -273,11 +379,13 @@ public:
}
llvm::Optional<LangAS> getConstantAddressSpace() const override {
- return getLangASFromTargetAS(AS.Constant);
+ return getLangASFromTargetAS(Constant);
}
/// \returns Target specific vtbl ptr address space.
- unsigned getVtblPtrAddressSpace() const override { return AS.Constant; }
+ unsigned getVtblPtrAddressSpace() const override {
+ return static_cast<unsigned>(Constant);
+ }
/// \returns If a target requires an address within a target specific address
/// space \p AddressSpace to be converted in order to be used, then return the
@@ -289,9 +397,9 @@ public:
getDWARFAddressSpace(unsigned AddressSpace) const override {
const unsigned DWARF_Private = 1;
const unsigned DWARF_Local = 2;
- if (AddressSpace == AS.Private) {
+ if (AddressSpace == Private) {
return DWARF_Private;
- } else if (AddressSpace == AS.Local) {
+ } else if (AddressSpace == Local) {
return DWARF_Local;
} else {
return None;
diff --git a/lib/Basic/Targets/ARM.cpp b/lib/Basic/Targets/ARM.cpp
index 6fb0ab41ff5b..efed9b096d56 100644
--- a/lib/Basic/Targets/ARM.cpp
+++ b/lib/Basic/Targets/ARM.cpp
@@ -334,9 +334,20 @@ bool ARMTargetInfo::initFeatureMap(
llvm::StringMap<bool> &Features, DiagnosticsEngine &Diags, StringRef CPU,
const std::vector<std::string> &FeaturesVec) const {
+ std::string ArchFeature;
std::vector<StringRef> TargetFeatures;
llvm::ARM::ArchKind Arch = llvm::ARM::parseArch(getTriple().getArchName());
+ // Map the base architecture to an appropriate target feature, so we don't
+ // rely on the target triple.
+ llvm::ARM::ArchKind CPUArch = llvm::ARM::parseCPUArch(CPU);
+ if (CPUArch == llvm::ARM::ArchKind::INVALID)
+ CPUArch = Arch;
+ if (CPUArch != llvm::ARM::ArchKind::INVALID) {
+ ArchFeature = ("+" + llvm::ARM::getArchName(CPUArch)).str();
+ TargetFeatures.push_back(ArchFeature);
+ }
+
// get default FPU features
unsigned FPUKind = llvm::ARM::getDefaultFPU(CPU, Arch);
llvm::ARM::getFPUFeatures(FPUKind, TargetFeatures);
@@ -379,6 +390,7 @@ bool ARMTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
Unaligned = 1;
SoftFloat = SoftFloatABI = false;
HWDiv = 0;
+ DotProd = 0;
// This does not diagnose illegal cases like having both
// "+vfpv2" and "+vfpv3" or having "+neon" and "+fp-only-sp".
@@ -419,6 +431,10 @@ bool ARMTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
Unaligned = 0;
} else if (Feature == "+fp16") {
HW_FP |= HW_FP_HP;
+ } else if (Feature == "+fullfp16") {
+ HasLegalHalfType = true;
+ } else if (Feature == "+dotprod") {
+ DotProd = true;
}
}
HW_FP &= ~HW_FP_remove;
@@ -478,6 +494,10 @@ bool ARMTargetInfo::isValidCPUName(StringRef Name) const {
llvm::ARM::parseCPUArch(Name) != llvm::ARM::ArchKind::INVALID;
}
+void ARMTargetInfo::fillValidCPUList(SmallVectorImpl<StringRef> &Values) const {
+ llvm::ARM::fillValidCPUArchList(Values);
+}
+
bool ARMTargetInfo::setCPU(const std::string &Name) {
if (Name != "generic")
setArchInfo(llvm::ARM::parseCPUArch(Name));
@@ -706,6 +726,18 @@ void ARMTargetInfo::getTargetDefines(const LangOptions &Opts,
if (Opts.UnsafeFPMath)
Builder.defineMacro("__ARM_FP_FAST", "1");
+ // Armv8.2-A FP16 vector intrinsic
+ if ((FPU & NeonFPU) && HasLegalHalfType)
+ Builder.defineMacro("__ARM_FEATURE_FP16_VECTOR_ARITHMETIC", "1");
+
+ // Armv8.2-A FP16 scalar intrinsics
+ if (HasLegalHalfType)
+ Builder.defineMacro("__ARM_FEATURE_FP16_SCALAR_ARITHMETIC", "1");
+
+ // Armv8.2-A dot product intrinsics
+ if (DotProd)
+ Builder.defineMacro("__ARM_FEATURE_DOTPROD", "1");
+
switch (ArchKind) {
default:
break;
@@ -956,6 +988,8 @@ WindowsARMTargetInfo::checkCallingConvention(CallingConv CC) const {
return CCCR_Ignore;
case CC_C:
case CC_OpenCLKernel:
+ case CC_PreserveMost:
+ case CC_PreserveAll:
return CCCR_OK;
default:
return CCCR_Warning;
diff --git a/lib/Basic/Targets/ARM.h b/lib/Basic/Targets/ARM.h
index fb0e7e66bea3..9c72c3387f7a 100644
--- a/lib/Basic/Targets/ARM.h
+++ b/lib/Basic/Targets/ARM.h
@@ -69,6 +69,7 @@ class LLVM_LIBRARY_VISIBILITY ARMTargetInfo : public TargetInfo {
unsigned Crypto : 1;
unsigned DSP : 1;
unsigned Unaligned : 1;
+ unsigned DotProd : 1;
enum {
LDREX_B = (1 << 0), /// byte (8-bit)
@@ -122,6 +123,8 @@ public:
bool hasFeature(StringRef Feature) const override;
bool isValidCPUName(StringRef Name) const override;
+ void fillValidCPUList(SmallVectorImpl<StringRef> &Values) const override;
+
bool setCPU(const std::string &Name) override;
bool setFPMath(StringRef Name) override;
@@ -153,6 +156,11 @@ public:
std::string &SuggestedModifier) const override;
const char *getClobbers() const override;
+ StringRef getConstraintRegister(StringRef Constraint,
+ StringRef Expression) const override {
+ return Expression;
+ }
+
CallingConvCheckResult checkCallingConvention(CallingConv CC) const override;
int getEHDataRegisterNumber(unsigned RegNo) const override;
diff --git a/lib/Basic/Targets/AVR.cpp b/lib/Basic/Targets/AVR.cpp
index 3022fe33d76c..9b66449cbca6 100644
--- a/lib/Basic/Targets/AVR.cpp
+++ b/lib/Basic/Targets/AVR.cpp
@@ -28,7 +28,7 @@ struct LLVM_LIBRARY_VISIBILITY MCUInfo {
};
// This list should be kept up-to-date with AVRDevices.td in LLVM.
-static ArrayRef<MCUInfo> AVRMcus = {
+static MCUInfo AVRMcus[] = {
{"at90s1200", "__AVR_AT90S1200__"},
{"attiny11", "__AVR_ATtiny11__"},
{"attiny12", "__AVR_ATtiny12__"},
@@ -273,35 +273,29 @@ static ArrayRef<MCUInfo> AVRMcus = {
} // namespace targets
} // namespace clang
+static constexpr llvm::StringLiteral ValidFamilyNames[] = {
+ "avr1", "avr2", "avr25", "avr3", "avr31",
+ "avr35", "avr4", "avr5", "avr51", "avr6",
+ "avrxmega1", "avrxmega2", "avrxmega3", "avrxmega4", "avrxmega5",
+ "avrxmega6", "avrxmega7", "avrtiny"};
+
bool AVRTargetInfo::isValidCPUName(StringRef Name) const {
- bool IsFamily = llvm::StringSwitch<bool>(Name)
- .Case("avr1", true)
- .Case("avr2", true)
- .Case("avr25", true)
- .Case("avr3", true)
- .Case("avr31", true)
- .Case("avr35", true)
- .Case("avr4", true)
- .Case("avr5", true)
- .Case("avr51", true)
- .Case("avr6", true)
- .Case("avrxmega1", true)
- .Case("avrxmega2", true)
- .Case("avrxmega3", true)
- .Case("avrxmega4", true)
- .Case("avrxmega5", true)
- .Case("avrxmega6", true)
- .Case("avrxmega7", true)
- .Case("avrtiny", true)
- .Default(false);
+ bool IsFamily =
+ llvm::find(ValidFamilyNames, Name) != std::end(ValidFamilyNames);
bool IsMCU =
- std::find_if(AVRMcus.begin(), AVRMcus.end(), [&](const MCUInfo &Info) {
+ llvm::find_if(AVRMcus, [&](const MCUInfo &Info) {
return Info.Name == Name;
- }) != AVRMcus.end();
+ }) != std::end(AVRMcus);
return IsFamily || IsMCU;
}
+void AVRTargetInfo::fillValidCPUList(SmallVectorImpl<StringRef> &Values) const {
+ Values.append(std::begin(ValidFamilyNames), std::end(ValidFamilyNames));
+ for (const MCUInfo &Info : AVRMcus)
+ Values.push_back(Info.Name);
+}
+
void AVRTargetInfo::getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
Builder.defineMacro("AVR");
@@ -309,12 +303,10 @@ void AVRTargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__AVR__");
if (!this->CPU.empty()) {
- auto It =
- std::find_if(AVRMcus.begin(), AVRMcus.end(), [&](const MCUInfo &Info) {
- return Info.Name == this->CPU;
- });
+ auto It = llvm::find_if(
+ AVRMcus, [&](const MCUInfo &Info) { return Info.Name == this->CPU; });
- if (It != AVRMcus.end())
+ if (It != std::end(AVRMcus))
Builder.defineMacro(It->DefineName);
}
}
diff --git a/lib/Basic/Targets/AVR.h b/lib/Basic/Targets/AVR.h
index 3dfb84f75668..d595f48e8ef7 100644
--- a/lib/Basic/Targets/AVR.h
+++ b/lib/Basic/Targets/AVR.h
@@ -55,7 +55,7 @@ public:
WIntType = SignedInt;
Char32Type = UnsignedLong;
SigAtomicType = SignedChar;
- resetDataLayout("e-p:16:8-i8:8-i16:8-i32:8-i64:8-f32:8-f64:8-n8-a:8");
+ resetDataLayout("e-P1-p:16:8-i8:8-i16:8-i32:8-i64:8-f32:8-f64:8-n8-a:8");
}
void getTargetDefines(const LangOptions &Opts,
@@ -167,6 +167,7 @@ public:
}
bool isValidCPUName(StringRef Name) const override;
+ void fillValidCPUList(SmallVectorImpl<StringRef> &Values) const override;
bool setCPU(const std::string &Name) override {
bool isValid = isValidCPUName(Name);
if (isValid)
diff --git a/lib/Basic/Targets/BPF.cpp b/lib/Basic/Targets/BPF.cpp
index 54e34f15532d..cf41a09d76f5 100644
--- a/lib/Basic/Targets/BPF.cpp
+++ b/lib/Basic/Targets/BPF.cpp
@@ -14,6 +14,7 @@
#include "BPF.h"
#include "Targets.h"
#include "clang/Basic/MacroBuilder.h"
+#include "llvm/ADT/StringRef.h"
using namespace clang;
using namespace clang::targets;
@@ -23,3 +24,14 @@ void BPFTargetInfo::getTargetDefines(const LangOptions &Opts,
DefineStd(Builder, "bpf", Opts);
Builder.defineMacro("__BPF__");
}
+
+static constexpr llvm::StringLiteral ValidCPUNames[] = {"generic", "v1", "v2",
+ "probe"};
+
+bool BPFTargetInfo::isValidCPUName(StringRef Name) const {
+ return llvm::find(ValidCPUNames, Name) != std::end(ValidCPUNames);
+}
+
+void BPFTargetInfo::fillValidCPUList(SmallVectorImpl<StringRef> &Values) const {
+ Values.append(std::begin(ValidCPUNames), std::end(ValidCPUNames));
+}
diff --git a/lib/Basic/Targets/BPF.h b/lib/Basic/Targets/BPF.h
index 4dd9cbd9d221..7f97f8189145 100644
--- a/lib/Basic/Targets/BPF.h
+++ b/lib/Basic/Targets/BPF.h
@@ -46,7 +46,14 @@ public:
void getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const override;
- bool hasFeature(StringRef Feature) const override { return Feature == "bpf"; }
+ bool hasFeature(StringRef Feature) const override {
+ return Feature == "bpf" || Feature == "alu32" || Feature == "dwarfris";
+ }
+
+ void setFeatureEnabled(llvm::StringMap<bool> &Features, StringRef Name,
+ bool Enabled) const override {
+ Features[Name] = Enabled;
+ }
ArrayRef<Builtin::Info> getTargetBuiltins() const override { return None; }
@@ -56,6 +63,7 @@ public:
return TargetInfo::VoidPtrBuiltinVaList;
}
+ bool isValidGCCRegisterName(StringRef Name) const override { return true; }
ArrayRef<const char *> getGCCRegNames() const override { return None; }
bool validateAsmConstraint(const char *&Name,
@@ -77,12 +85,9 @@ public:
}
}
- bool isValidCPUName(StringRef Name) const override {
- if (Name == "generic" || Name == "v1" ||
- Name == "v2" || Name == "probe")
- return true;
- return false;
- }
+ bool isValidCPUName(StringRef Name) const override;
+
+ void fillValidCPUList(SmallVectorImpl<StringRef> &Values) const override;
bool setCPU(const std::string &Name) override {
StringRef CPUName(Name);
diff --git a/lib/Basic/Targets/Hexagon.cpp b/lib/Basic/Targets/Hexagon.cpp
index 71d4c1e0f161..0ef1f6db281e 100644
--- a/lib/Basic/Targets/Hexagon.cpp
+++ b/lib/Basic/Targets/Hexagon.cpp
@@ -75,7 +75,6 @@ void HexagonTargetInfo::getTargetDefines(const LangOptions &Opts,
bool HexagonTargetInfo::initFeatureMap(
llvm::StringMap<bool> &Features, DiagnosticsEngine &Diags, StringRef CPU,
const std::vector<std::string> &FeaturesVec) const {
- Features["hvx-double"] = false;
Features["long-calls"] = false;
return TargetInfo::initFeatureMap(Features, Diags, CPU, FeaturesVec);
@@ -132,6 +131,10 @@ const Builtin::Info HexagonTargetInfo::BuiltinInfo[] = {
};
bool HexagonTargetInfo::hasFeature(StringRef Feature) const {
+ std::string VS = "hvxv" + HVXVersion;
+ if (Feature == VS)
+ return true;
+
return llvm::StringSwitch<bool>(Feature)
.Case("hexagon", true)
.Case("hvx", HasHVX)
@@ -141,15 +144,29 @@ bool HexagonTargetInfo::hasFeature(StringRef Feature) const {
.Default(false);
}
+struct CPUSuffix {
+ llvm::StringLiteral Name;
+ llvm::StringLiteral Suffix;
+};
+
+static constexpr CPUSuffix Suffixes[] = {
+ {{"hexagonv4"}, {"4"}}, {{"hexagonv5"}, {"5"}},
+ {{"hexagonv55"}, {"55"}}, {{"hexagonv60"}, {"60"}},
+ {{"hexagonv62"}, {"62"}}, {{"hexagonv65"}, {"65"}},
+};
+
const char *HexagonTargetInfo::getHexagonCPUSuffix(StringRef Name) {
- return llvm::StringSwitch<const char *>(Name)
- .Case("hexagonv4", "4")
- .Case("hexagonv5", "5")
- .Case("hexagonv55", "55")
- .Case("hexagonv60", "60")
- .Case("hexagonv62", "62")
- .Case("hexagonv65", "65")
- .Default(nullptr);
+ const CPUSuffix *Item = llvm::find_if(
+ Suffixes, [Name](const CPUSuffix &S) { return S.Name == Name; });
+ if (Item == std::end(Suffixes))
+ return nullptr;
+ return Item->Suffix.data();
+}
+
+void HexagonTargetInfo::fillValidCPUList(
+ SmallVectorImpl<StringRef> &Values) const {
+ for (const CPUSuffix &Suffix : Suffixes)
+ Values.push_back(Suffix.Name);
}
ArrayRef<Builtin::Info> HexagonTargetInfo::getTargetBuiltins() const {
diff --git a/lib/Basic/Targets/Hexagon.h b/lib/Basic/Targets/Hexagon.h
index 7b0966457c4b..fb4956a9e53d 100644
--- a/lib/Basic/Targets/Hexagon.h
+++ b/lib/Basic/Targets/Hexagon.h
@@ -112,6 +112,8 @@ public:
return getHexagonCPUSuffix(Name);
}
+ void fillValidCPUList(SmallVectorImpl<StringRef> &Values) const override;
+
bool setCPU(const std::string &Name) override {
if (!isValidCPUName(Name))
return false;
diff --git a/lib/Basic/Targets/Lanai.cpp b/lib/Basic/Targets/Lanai.cpp
index 1d8314af99fb..0e8030c04e5c 100644
--- a/lib/Basic/Targets/Lanai.cpp
+++ b/lib/Basic/Targets/Lanai.cpp
@@ -40,6 +40,10 @@ ArrayRef<TargetInfo::GCCRegAlias> LanaiTargetInfo::getGCCRegAliases() const {
bool LanaiTargetInfo::isValidCPUName(StringRef Name) const {
return llvm::StringSwitch<bool>(Name).Case("v11", true).Default(false);
}
+void LanaiTargetInfo::fillValidCPUList(
+ SmallVectorImpl<StringRef> &Values) const {
+ Values.emplace_back("v11");
+}
bool LanaiTargetInfo::setCPU(const std::string &Name) {
CPU = llvm::StringSwitch<CPUKind>(Name).Case("v11", CK_V11).Default(CK_NONE);
diff --git a/lib/Basic/Targets/Lanai.h b/lib/Basic/Targets/Lanai.h
index 5f99c17a5344..b9e6dbe04433 100644
--- a/lib/Basic/Targets/Lanai.h
+++ b/lib/Basic/Targets/Lanai.h
@@ -65,6 +65,8 @@ public:
bool isValidCPUName(StringRef Name) const override;
+ void fillValidCPUList(SmallVectorImpl<StringRef> &Values) const override;
+
bool setCPU(const std::string &Name) override;
bool hasFeature(StringRef Feature) const override;
diff --git a/lib/Basic/Targets/Mips.cpp b/lib/Basic/Targets/Mips.cpp
index a8a1bcc36361..cbd5a01c3da8 100644
--- a/lib/Basic/Targets/Mips.cpp
+++ b/lib/Basic/Targets/Mips.cpp
@@ -44,26 +44,19 @@ bool MipsTargetInfo::processorSupportsGPR64() const {
return false;
}
+static constexpr llvm::StringLiteral ValidCPUNames[] = {
+ {"mips1"}, {"mips2"}, {"mips3"}, {"mips4"}, {"mips5"},
+ {"mips32"}, {"mips32r2"}, {"mips32r3"}, {"mips32r5"}, {"mips32r6"},
+ {"mips64"}, {"mips64r2"}, {"mips64r3"}, {"mips64r5"}, {"mips64r6"},
+ {"octeon"}, {"p5600"}};
+
bool MipsTargetInfo::isValidCPUName(StringRef Name) const {
- return llvm::StringSwitch<bool>(Name)
- .Case("mips1", true)
- .Case("mips2", true)
- .Case("mips3", true)
- .Case("mips4", true)
- .Case("mips5", true)
- .Case("mips32", true)
- .Case("mips32r2", true)
- .Case("mips32r3", true)
- .Case("mips32r5", true)
- .Case("mips32r6", true)
- .Case("mips64", true)
- .Case("mips64r2", true)
- .Case("mips64r3", true)
- .Case("mips64r5", true)
- .Case("mips64r6", true)
- .Case("octeon", true)
- .Case("p5600", true)
- .Default(false);
+ return llvm::find(ValidCPUNames, Name) != std::end(ValidCPUNames);
+}
+
+void MipsTargetInfo::fillValidCPUList(
+ SmallVectorImpl<StringRef> &Values) const {
+ Values.append(std::begin(ValidCPUNames), std::end(ValidCPUNames));
}
void MipsTargetInfo::getTargetDefines(const LangOptions &Opts,
@@ -207,9 +200,7 @@ ArrayRef<Builtin::Info> MipsTargetInfo::getTargetBuiltins() const {
bool MipsTargetInfo::validateTarget(DiagnosticsEngine &Diags) const {
// microMIPS64R6 backend was removed.
- if ((getTriple().getArch() == llvm::Triple::mips64 ||
- getTriple().getArch() == llvm::Triple::mips64el) &&
- IsMicromips && (ABI == "n32" || ABI == "n64")) {
+ if (getTriple().isMIPS64() && IsMicromips && (ABI == "n32" || ABI == "n64")) {
Diags.Report(diag::err_target_unsupported_cpu_for_micromips) << CPU;
return false;
}
@@ -229,9 +220,7 @@ bool MipsTargetInfo::validateTarget(DiagnosticsEngine &Diags) const {
// FIXME: It's valid to use O32 on a mips64/mips64el triple but the backend
// can't handle this yet. It's better to fail here than on the
// backend assertion.
- if ((getTriple().getArch() == llvm::Triple::mips64 ||
- getTriple().getArch() == llvm::Triple::mips64el) &&
- ABI == "o32") {
+ if (getTriple().isMIPS64() && ABI == "o32") {
Diags.Report(diag::err_target_unsupported_abi_for_triple)
<< ABI << getTriple().str();
return false;
@@ -240,9 +229,7 @@ bool MipsTargetInfo::validateTarget(DiagnosticsEngine &Diags) const {
// FIXME: It's valid to use N32/N64 on a mips/mipsel triple but the backend
// can't handle this yet. It's better to fail here than on the
// backend assertion.
- if ((getTriple().getArch() == llvm::Triple::mips ||
- getTriple().getArch() == llvm::Triple::mipsel) &&
- (ABI == "n32" || ABI == "n64")) {
+ if (getTriple().isMIPS32() && (ABI == "n32" || ABI == "n64")) {
Diags.Report(diag::err_target_unsupported_abi_for_triple)
<< ABI << getTriple().str();
return false;
diff --git a/lib/Basic/Targets/Mips.h b/lib/Basic/Targets/Mips.h
index 28900f21f86b..11e9ac914430 100644
--- a/lib/Basic/Targets/Mips.h
+++ b/lib/Basic/Targets/Mips.h
@@ -54,6 +54,7 @@ class LLVM_LIBRARY_VISIBILITY MipsTargetInfo : public TargetInfo {
enum DspRevEnum { NoDSP, DSP1, DSP2 } DspRev;
bool HasMSA;
bool DisableMadd4;
+ bool UseIndirectJumpHazard;
protected:
bool HasFP64;
@@ -64,13 +65,11 @@ public:
: TargetInfo(Triple), IsMips16(false), IsMicromips(false),
IsNan2008(false), IsAbs2008(false), IsSingleFloat(false),
IsNoABICalls(false), CanUseBSDABICalls(false), FloatABI(HardFloat),
- DspRev(NoDSP), HasMSA(false), DisableMadd4(false), HasFP64(false) {
+ DspRev(NoDSP), HasMSA(false), DisableMadd4(false),
+ UseIndirectJumpHazard(false), HasFP64(false) {
TheCXXABI.set(TargetCXXABI::GenericMIPS);
- setABI((getTriple().getArch() == llvm::Triple::mips ||
- getTriple().getArch() == llvm::Triple::mipsel)
- ? "o32"
- : "n64");
+ setABI(getTriple().isMIPS32() ? "o32" : "n64");
CPU = ABI == "o32" ? "mips32r2" : "mips64r2";
@@ -161,6 +160,7 @@ public:
}
bool isValidCPUName(StringRef Name) const override;
+ void fillValidCPUList(SmallVectorImpl<StringRef> &Values) const override;
bool setCPU(const std::string &Name) override {
CPU = Name;
@@ -338,6 +338,8 @@ public:
IsAbs2008 = false;
else if (Feature == "+noabicalls")
IsNoABICalls = true;
+ else if (Feature == "+use-indirect-jump-hazard")
+ UseIndirectJumpHazard = true;
}
setDataLayout();
@@ -387,7 +389,9 @@ public:
return llvm::makeArrayRef(NewABIRegAliases);
}
- bool hasInt128Type() const override { return ABI == "n32" || ABI == "n64"; }
+ bool hasInt128Type() const override {
+ return (ABI == "n32" || ABI == "n64") || getTargetOpts().ForceEnableInt128;
+ }
bool validateTarget(DiagnosticsEngine &Diags) const override;
};
diff --git a/lib/Basic/Targets/NVPTX.cpp b/lib/Basic/Targets/NVPTX.cpp
index add3b318aeb6..fd4ee1606061 100644
--- a/lib/Basic/Targets/NVPTX.cpp
+++ b/lib/Basic/Targets/NVPTX.cpp
@@ -40,6 +40,22 @@ NVPTXTargetInfo::NVPTXTargetInfo(const llvm::Triple &Triple,
assert((TargetPointerWidth == 32 || TargetPointerWidth == 64) &&
"NVPTX only supports 32- and 64-bit modes.");
+ PTXVersion = 32;
+ for (const StringRef Feature : Opts.FeaturesAsWritten) {
+ if (!Feature.startswith("+ptx"))
+ continue;
+ PTXVersion = llvm::StringSwitch<unsigned>(Feature)
+ .Case("+ptx61", 61)
+ .Case("+ptx60", 60)
+ .Case("+ptx50", 50)
+ .Case("+ptx43", 43)
+ .Case("+ptx42", 42)
+ .Case("+ptx41", 41)
+ .Case("+ptx40", 40)
+ .Case("+ptx32", 32)
+ .Default(32);
+ }
+
TLSSupported = false;
VLASupported = false;
AddrSpaceMap = &NVPTXAddrSpaceMap;
@@ -52,6 +68,9 @@ NVPTXTargetInfo::NVPTXTargetInfo(const llvm::Triple &Triple,
if (TargetPointerWidth == 32)
resetDataLayout("e-p:32:32-i64:64-i128:128-v16:16-v32:32-n16:32:64");
+ else if (Opts.NVPTXUseShortPointers)
+ resetDataLayout(
+ "e-p3:32:32-p4:32:32-p5:32:32-i64:64-i128:128-v16:16-v32:32-n16:32:64");
else
resetDataLayout("e-i64:64-i128:128-v16:16-v32:32-n16:32:64");
@@ -145,7 +164,6 @@ ArrayRef<const char *> NVPTXTargetInfo::getGCCRegNames() const {
bool NVPTXTargetInfo::hasFeature(StringRef Feature) const {
return llvm::StringSwitch<bool>(Feature)
.Cases("ptx", "nvptx", true)
- .Case("satom", GPU >= CudaArch::SM_60) // Atomics w/ scope.
.Default(false);
}
@@ -157,6 +175,21 @@ void NVPTXTargetInfo::getTargetDefines(const LangOptions &Opts,
// Set __CUDA_ARCH__ for the GPU specified.
std::string CUDAArchCode = [this] {
switch (GPU) {
+ case CudaArch::GFX600:
+ case CudaArch::GFX601:
+ case CudaArch::GFX700:
+ case CudaArch::GFX701:
+ case CudaArch::GFX702:
+ case CudaArch::GFX703:
+ case CudaArch::GFX704:
+ case CudaArch::GFX801:
+ case CudaArch::GFX802:
+ case CudaArch::GFX803:
+ case CudaArch::GFX810:
+ case CudaArch::GFX900:
+ case CudaArch::GFX902:
+ case CudaArch::LAST:
+ break;
case CudaArch::UNKNOWN:
assert(false && "No GPU arch when compiling CUDA device code.");
return "";
@@ -186,6 +219,8 @@ void NVPTXTargetInfo::getTargetDefines(const LangOptions &Opts,
return "620";
case CudaArch::SM_70:
return "700";
+ case CudaArch::SM_72:
+ return "720";
}
llvm_unreachable("unhandled CudaArch");
}();
diff --git a/lib/Basic/Targets/NVPTX.h b/lib/Basic/Targets/NVPTX.h
index a84870763f54..84d466d2f49f 100644
--- a/lib/Basic/Targets/NVPTX.h
+++ b/lib/Basic/Targets/NVPTX.h
@@ -40,6 +40,7 @@ class LLVM_LIBRARY_VISIBILITY NVPTXTargetInfo : public TargetInfo {
static const char *const GCCRegNames[];
static const Builtin::Info BuiltinInfo[];
CudaArch GPU;
+ uint32_t PTXVersion;
std::unique_ptr<TargetInfo> HostTarget;
public:
@@ -55,7 +56,8 @@ public:
initFeatureMap(llvm::StringMap<bool> &Features, DiagnosticsEngine &Diags,
StringRef CPU,
const std::vector<std::string> &FeaturesVec) const override {
- Features["satom"] = GPU >= CudaArch::SM_60;
+ Features[CudaArchToString(GPU)] = true;
+ Features["ptx" + std::to_string(PTXVersion)] = true;
return TargetInfo::initFeatureMap(Features, Diags, CPU, FeaturesVec);
}
@@ -98,6 +100,12 @@ public:
return StringToCudaArch(Name) != CudaArch::UNKNOWN;
}
+ void fillValidCPUList(SmallVectorImpl<StringRef> &Values) const override {
+ for (int i = static_cast<int>(CudaArch::SM_20);
+ i < static_cast<int>(CudaArch::LAST); ++i)
+ Values.emplace_back(CudaArchToString(static_cast<CudaArch>(i)));
+ }
+
bool setCPU(const std::string &Name) override {
GPU = StringToCudaArch(Name);
return GPU != CudaArch::UNKNOWN;
diff --git a/lib/Basic/Targets/Nios2.h b/lib/Basic/Targets/Nios2.h
index aa02f8f6262f..ffeb414d4778 100644
--- a/lib/Basic/Targets/Nios2.h
+++ b/lib/Basic/Targets/Nios2.h
@@ -56,6 +56,10 @@ public:
return Name == "nios2r1" || Name == "nios2r2";
}
+ void fillValidCPUList(SmallVectorImpl<StringRef> &Values) const override {
+ Values.append({"nios2r1", "nios2r2"});
+ }
+
bool setCPU(const std::string &Name) override {
if (isValidCPUName(Name)) {
CPU = Name;
diff --git a/lib/Basic/Targets/OSTargets.h b/lib/Basic/Targets/OSTargets.h
index 5af63615dc5e..d0354784acf9 100644
--- a/lib/Basic/Targets/OSTargets.h
+++ b/lib/Basic/Targets/OSTargets.h
@@ -95,16 +95,22 @@ public:
if (Triple.isMacOSX())
this->TLSSupported = !Triple.isMacOSXVersionLT(10, 7);
else if (Triple.isiOS()) {
- // 64-bit iOS supported it from 8 onwards, 32-bit from 9 onwards.
- if (Triple.getArch() == llvm::Triple::x86_64 ||
- Triple.getArch() == llvm::Triple::aarch64)
+ // 64-bit iOS supported it from 8 onwards, 32-bit device from 9 onwards,
+ // 32-bit simulator from 10 onwards.
+ if (Triple.isArch64Bit())
this->TLSSupported = !Triple.isOSVersionLT(8);
- else if (Triple.getArch() == llvm::Triple::x86 ||
- Triple.getArch() == llvm::Triple::arm ||
- Triple.getArch() == llvm::Triple::thumb)
- this->TLSSupported = !Triple.isOSVersionLT(9);
- } else if (Triple.isWatchOS())
- this->TLSSupported = !Triple.isOSVersionLT(2);
+ else if (Triple.isArch32Bit()) {
+ if (!Triple.isSimulatorEnvironment())
+ this->TLSSupported = !Triple.isOSVersionLT(9);
+ else
+ this->TLSSupported = !Triple.isOSVersionLT(10);
+ }
+ } else if (Triple.isWatchOS()) {
+ if (!Triple.isSimulatorEnvironment())
+ this->TLSSupported = !Triple.isOSVersionLT(2);
+ else
+ this->TLSSupported = !Triple.isOSVersionLT(3);
+ }
this->MCountName = "\01mcount";
}
@@ -363,7 +369,7 @@ protected:
public:
NetBSDTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
: OSTargetInfo<Target>(Triple, Opts) {
- this->MCountName = "_mcount";
+ this->MCountName = "__mcount";
}
};
@@ -479,6 +485,7 @@ public:
default:
case llvm::Triple::x86_64:
this->MCountName = ".mcount";
+ this->NewAlign = 256;
break;
}
}
@@ -544,13 +551,24 @@ protected:
Builder.defineMacro("_LARGEFILE_SOURCE");
Builder.defineMacro("_LARGEFILE64_SOURCE");
Builder.defineMacro("__EXTENSIONS__");
- Builder.defineMacro("_REENTRANT");
+ if (Opts.POSIXThreads)
+ Builder.defineMacro("_REENTRANT");
+ if (this->HasFloat128)
+ Builder.defineMacro("__FLOAT128__");
}
public:
SolarisTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
: OSTargetInfo<Target>(Triple, Opts) {
// FIXME: WIntType should be SignedLong
+ switch (Triple.getArch()) {
+ default:
+ break;
+ case llvm::Triple::x86:
+ case llvm::Triple::x86_64:
+ this->HasFloat128 = true;
+ break;
+ }
}
};
@@ -599,8 +617,10 @@ protected:
Builder.defineMacro("_HAS_CHAR16_T_LANGUAGE_SUPPORT", Twine(1));
if (Opts.isCompatibleWithMSVC(LangOptions::MSVC2015)) {
- if (Opts.CPlusPlus17)
- Builder.defineMacro("_MSVC_LANG", "201403L");
+ if (Opts.CPlusPlus2a)
+ Builder.defineMacro("_MSVC_LANG", "201704L");
+ else if (Opts.CPlusPlus17)
+ Builder.defineMacro("_MSVC_LANG", "201703L");
else if (Opts.CPlusPlus14)
Builder.defineMacro("_MSVC_LANG", "201402L");
}
diff --git a/lib/Basic/Targets/PPC.cpp b/lib/Basic/Targets/PPC.cpp
index a44aa0cd96f0..b4eb3b1b97b7 100644
--- a/lib/Basic/Targets/PPC.cpp
+++ b/lib/Basic/Targets/PPC.cpp
@@ -15,7 +15,6 @@
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/MacroBuilder.h"
#include "clang/Basic/TargetBuiltins.h"
-#include "llvm/ADT/StringSwitch.h"
using namespace clang;
using namespace clang::targets;
@@ -96,7 +95,7 @@ void PPCTargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("_CALL_ELF", "2");
// This typically is only for a new enough linker (bfd >= 2.16.2 or gold), but
- // our suppport post-dates this and it should work on all 64-bit ppc linux
+ // our support post-dates this and it should work on all 64-bit ppc linux
// platforms. It is guaranteed to work on all elfv2 platforms.
if (getTriple().getOS() == llvm::Triple::Linux && PointerWidth == 64)
Builder.defineMacro("_CALL_LINUX", "1");
@@ -116,111 +115,37 @@ void PPCTargetInfo::getTargetDefines(const LangOptions &Opts,
(getTriple().getOS() == llvm::Triple::Darwin && PointerWidth == 64))
Builder.defineMacro("__STRUCT_PARM_ALIGN__", "16");
- // CPU identification.
- ArchDefineTypes defs =
- (ArchDefineTypes)llvm::StringSwitch<int>(CPU)
- .Case("440", ArchDefineName)
- .Case("450", ArchDefineName | ArchDefine440)
- .Case("601", ArchDefineName)
- .Case("602", ArchDefineName | ArchDefinePpcgr)
- .Case("603", ArchDefineName | ArchDefinePpcgr)
- .Case("603e", ArchDefineName | ArchDefine603 | ArchDefinePpcgr)
- .Case("603ev", ArchDefineName | ArchDefine603 | ArchDefinePpcgr)
- .Case("604", ArchDefineName | ArchDefinePpcgr)
- .Case("604e", ArchDefineName | ArchDefine604 | ArchDefinePpcgr)
- .Case("620", ArchDefineName | ArchDefinePpcgr)
- .Case("630", ArchDefineName | ArchDefinePpcgr)
- .Case("7400", ArchDefineName | ArchDefinePpcgr)
- .Case("7450", ArchDefineName | ArchDefinePpcgr)
- .Case("750", ArchDefineName | ArchDefinePpcgr)
- .Case("970", ArchDefineName | ArchDefinePwr4 | ArchDefinePpcgr |
- ArchDefinePpcsq)
- .Case("a2", ArchDefineA2)
- .Case("a2q", ArchDefineName | ArchDefineA2 | ArchDefineA2q)
- .Case("pwr3", ArchDefinePpcgr)
- .Case("pwr4", ArchDefineName | ArchDefinePpcgr | ArchDefinePpcsq)
- .Case("pwr5", ArchDefineName | ArchDefinePwr4 | ArchDefinePpcgr |
- ArchDefinePpcsq)
- .Case("pwr5x", ArchDefineName | ArchDefinePwr5 | ArchDefinePwr4 |
- ArchDefinePpcgr | ArchDefinePpcsq)
- .Case("pwr6", ArchDefineName | ArchDefinePwr5x | ArchDefinePwr5 |
- ArchDefinePwr4 | ArchDefinePpcgr | ArchDefinePpcsq)
- .Case("pwr6x", ArchDefineName | ArchDefinePwr6 | ArchDefinePwr5x |
- ArchDefinePwr5 | ArchDefinePwr4 | ArchDefinePpcgr |
- ArchDefinePpcsq)
- .Case("pwr7", ArchDefineName | ArchDefinePwr6x | ArchDefinePwr6 |
- ArchDefinePwr5x | ArchDefinePwr5 | ArchDefinePwr4 |
- ArchDefinePpcgr | ArchDefinePpcsq)
- .Case("pwr8", ArchDefineName | ArchDefinePwr7 | ArchDefinePwr6x |
- ArchDefinePwr6 | ArchDefinePwr5x | ArchDefinePwr5 |
- ArchDefinePwr4 | ArchDefinePpcgr | ArchDefinePpcsq)
- .Case("pwr9", ArchDefineName | ArchDefinePwr8 | ArchDefinePwr7 |
- ArchDefinePwr6x | ArchDefinePwr6 | ArchDefinePwr5x |
- ArchDefinePwr5 | ArchDefinePwr4 | ArchDefinePpcgr |
- ArchDefinePpcsq)
- .Case("power3", ArchDefinePpcgr)
- .Case("power4", ArchDefinePwr4 | ArchDefinePpcgr | ArchDefinePpcsq)
- .Case("power5", ArchDefinePwr5 | ArchDefinePwr4 | ArchDefinePpcgr |
- ArchDefinePpcsq)
- .Case("power5x", ArchDefinePwr5x | ArchDefinePwr5 | ArchDefinePwr4 |
- ArchDefinePpcgr | ArchDefinePpcsq)
- .Case("power6", ArchDefinePwr6 | ArchDefinePwr5x | ArchDefinePwr5 |
- ArchDefinePwr4 | ArchDefinePpcgr |
- ArchDefinePpcsq)
- .Case("power6x", ArchDefinePwr6x | ArchDefinePwr6 | ArchDefinePwr5x |
- ArchDefinePwr5 | ArchDefinePwr4 |
- ArchDefinePpcgr | ArchDefinePpcsq)
- .Case("power7", ArchDefinePwr7 | ArchDefinePwr6x | ArchDefinePwr6 |
- ArchDefinePwr5x | ArchDefinePwr5 |
- ArchDefinePwr4 | ArchDefinePpcgr |
- ArchDefinePpcsq)
- .Case("power8", ArchDefinePwr8 | ArchDefinePwr7 | ArchDefinePwr6x |
- ArchDefinePwr6 | ArchDefinePwr5x |
- ArchDefinePwr5 | ArchDefinePwr4 |
- ArchDefinePpcgr | ArchDefinePpcsq)
- .Case("power9", ArchDefinePwr9 | ArchDefinePwr8 | ArchDefinePwr7 |
- ArchDefinePwr6x | ArchDefinePwr6 |
- ArchDefinePwr5x | ArchDefinePwr5 |
- ArchDefinePwr4 | ArchDefinePpcgr |
- ArchDefinePpcsq)
- // powerpc64le automatically defaults to at least power8.
- .Case("ppc64le", ArchDefinePwr8 | ArchDefinePwr7 | ArchDefinePwr6x |
- ArchDefinePwr6 | ArchDefinePwr5x |
- ArchDefinePwr5 | ArchDefinePwr4 |
- ArchDefinePpcgr | ArchDefinePpcsq)
- .Default(ArchDefineNone);
-
- if (defs & ArchDefineName)
+ if (ArchDefs & ArchDefineName)
Builder.defineMacro(Twine("_ARCH_", StringRef(CPU).upper()));
- if (defs & ArchDefinePpcgr)
+ if (ArchDefs & ArchDefinePpcgr)
Builder.defineMacro("_ARCH_PPCGR");
- if (defs & ArchDefinePpcsq)
+ if (ArchDefs & ArchDefinePpcsq)
Builder.defineMacro("_ARCH_PPCSQ");
- if (defs & ArchDefine440)
+ if (ArchDefs & ArchDefine440)
Builder.defineMacro("_ARCH_440");
- if (defs & ArchDefine603)
+ if (ArchDefs & ArchDefine603)
Builder.defineMacro("_ARCH_603");
- if (defs & ArchDefine604)
+ if (ArchDefs & ArchDefine604)
Builder.defineMacro("_ARCH_604");
- if (defs & ArchDefinePwr4)
+ if (ArchDefs & ArchDefinePwr4)
Builder.defineMacro("_ARCH_PWR4");
- if (defs & ArchDefinePwr5)
+ if (ArchDefs & ArchDefinePwr5)
Builder.defineMacro("_ARCH_PWR5");
- if (defs & ArchDefinePwr5x)
+ if (ArchDefs & ArchDefinePwr5x)
Builder.defineMacro("_ARCH_PWR5X");
- if (defs & ArchDefinePwr6)
+ if (ArchDefs & ArchDefinePwr6)
Builder.defineMacro("_ARCH_PWR6");
- if (defs & ArchDefinePwr6x)
+ if (ArchDefs & ArchDefinePwr6x)
Builder.defineMacro("_ARCH_PWR6X");
- if (defs & ArchDefinePwr7)
+ if (ArchDefs & ArchDefinePwr7)
Builder.defineMacro("_ARCH_PWR7");
- if (defs & ArchDefinePwr8)
+ if (ArchDefs & ArchDefinePwr8)
Builder.defineMacro("_ARCH_PWR8");
- if (defs & ArchDefinePwr9)
+ if (ArchDefs & ArchDefinePwr9)
Builder.defineMacro("_ARCH_PWR9");
- if (defs & ArchDefineA2)
+ if (ArchDefs & ArchDefineA2)
Builder.defineMacro("_ARCH_A2");
- if (defs & ArchDefineA2q) {
+ if (ArchDefs & ArchDefineA2q) {
Builder.defineMacro("_ARCH_A2Q");
Builder.defineMacro("_ARCH_QP");
}
@@ -384,6 +309,14 @@ bool PPCTargetInfo::initFeatureMap(
if (!ppcUserFeaturesCheck(Diags, FeaturesVec))
return false;
+ if (!(ArchDefs & ArchDefinePwr9) && (ArchDefs & ArchDefinePpcgr) &&
+ std::find(FeaturesVec.begin(), FeaturesVec.end(), "+float128") !=
+ FeaturesVec.end()) {
+ // We have __float128 on PPC but not power 9 and above.
+ Diags.Report(diag::err_opt_not_valid_with_opt) << "-mfloat128" << CPU;
+ return false;
+ }
+
return TargetInfo::initFeatureMap(Features, Diags, CPU, FeaturesVec);
}
@@ -479,57 +412,25 @@ ArrayRef<TargetInfo::GCCRegAlias> PPCTargetInfo::getGCCRegAliases() const {
return llvm::makeArrayRef(GCCRegAliases);
}
+static constexpr llvm::StringLiteral ValidCPUNames[] = {
+ {"generic"}, {"440"}, {"450"}, {"601"}, {"602"},
+ {"603"}, {"603e"}, {"603ev"}, {"604"}, {"604e"},
+ {"620"}, {"630"}, {"g3"}, {"7400"}, {"g4"},
+ {"7450"}, {"g4+"}, {"750"}, {"970"}, {"g5"},
+ {"a2"}, {"a2q"}, {"e500mc"}, {"e5500"}, {"power3"},
+ {"pwr3"}, {"power4"}, {"pwr4"}, {"power5"}, {"pwr5"},
+ {"power5x"}, {"pwr5x"}, {"power6"}, {"pwr6"}, {"power6x"},
+ {"pwr6x"}, {"power7"}, {"pwr7"}, {"power8"}, {"pwr8"},
+ {"power9"}, {"pwr9"}, {"powerpc"}, {"ppc"}, {"powerpc64"},
+ {"ppc64"}, {"powerpc64le"}, {"ppc64le"},
+};
+
bool PPCTargetInfo::isValidCPUName(StringRef Name) const {
- return llvm::StringSwitch<bool>(Name)
- .Case("generic", true)
- .Case("440", true)
- .Case("450", true)
- .Case("601", true)
- .Case("602", true)
- .Case("603", true)
- .Case("603e", true)
- .Case("603ev", true)
- .Case("604", true)
- .Case("604e", true)
- .Case("620", true)
- .Case("630", true)
- .Case("g3", true)
- .Case("7400", true)
- .Case("g4", true)
- .Case("7450", true)
- .Case("g4+", true)
- .Case("750", true)
- .Case("970", true)
- .Case("g5", true)
- .Case("a2", true)
- .Case("a2q", true)
- .Case("e500mc", true)
- .Case("e5500", true)
- .Case("power3", true)
- .Case("pwr3", true)
- .Case("power4", true)
- .Case("pwr4", true)
- .Case("power5", true)
- .Case("pwr5", true)
- .Case("power5x", true)
- .Case("pwr5x", true)
- .Case("power6", true)
- .Case("pwr6", true)
- .Case("power6x", true)
- .Case("pwr6x", true)
- .Case("power7", true)
- .Case("pwr7", true)
- .Case("power8", true)
- .Case("pwr8", true)
- .Case("power9", true)
- .Case("pwr9", true)
- .Case("powerpc", true)
- .Case("ppc", true)
- .Case("powerpc64", true)
- .Case("ppc64", true)
- .Case("powerpc64le", true)
- .Case("ppc64le", true)
- .Default(false);
+ return llvm::find(ValidCPUNames, Name) != std::end(ValidCPUNames);
+}
+
+void PPCTargetInfo::fillValidCPUList(SmallVectorImpl<StringRef> &Values) const {
+ Values.append(std::begin(ValidCPUNames), std::end(ValidCPUNames));
}
void PPCTargetInfo::adjust(LangOptions &Opts) {
diff --git a/lib/Basic/Targets/PPC.h b/lib/Basic/Targets/PPC.h
index 04bef258e386..439c73a0e326 100644
--- a/lib/Basic/Targets/PPC.h
+++ b/lib/Basic/Targets/PPC.h
@@ -18,6 +18,7 @@
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/TargetOptions.h"
#include "llvm/ADT/Triple.h"
+#include "llvm/ADT/StringSwitch.h"
#include "llvm/Support/Compiler.h"
namespace clang {
@@ -25,39 +26,8 @@ namespace targets {
// PPC abstract base class
class LLVM_LIBRARY_VISIBILITY PPCTargetInfo : public TargetInfo {
- static const Builtin::Info BuiltinInfo[];
- static const char *const GCCRegNames[];
- static const TargetInfo::GCCRegAlias GCCRegAliases[];
- std::string CPU;
-
- // Target cpu features.
- bool HasAltivec;
- bool HasVSX;
- bool HasP8Vector;
- bool HasP8Crypto;
- bool HasDirectMove;
- bool HasQPX;
- bool HasHTM;
- bool HasBPERMD;
- bool HasExtDiv;
- bool HasP9Vector;
-
-protected:
- std::string ABI;
-
-public:
- PPCTargetInfo(const llvm::Triple &Triple, const TargetOptions &)
- : TargetInfo(Triple), HasAltivec(false), HasVSX(false),
- HasP8Vector(false), HasP8Crypto(false), HasDirectMove(false),
- HasQPX(false), HasHTM(false), HasBPERMD(false), HasExtDiv(false),
- HasP9Vector(false) {
- SuitableAlign = 128;
- SimdDefaultAlign = 128;
- LongDoubleWidth = LongDoubleAlign = 128;
- LongDoubleFormat = &llvm::APFloat::PPCDoubleDouble();
- }
- /// \brief Flags for architecture specific defines.
+ /// Flags for architecture specific defines.
typedef enum {
ArchDefineNone = 0,
ArchDefineName = 1 << 0, // <name> is substituted for arch name.
@@ -78,6 +48,37 @@ public:
ArchDefineA2q = 1 << 15
} ArchDefineTypes;
+
+ ArchDefineTypes ArchDefs = ArchDefineNone;
+ static const Builtin::Info BuiltinInfo[];
+ static const char *const GCCRegNames[];
+ static const TargetInfo::GCCRegAlias GCCRegAliases[];
+ std::string CPU;
+
+ // Target cpu features.
+ bool HasAltivec = false;
+ bool HasVSX = false;
+ bool HasP8Vector = false;
+ bool HasP8Crypto = false;
+ bool HasDirectMove = false;
+ bool HasQPX = false;
+ bool HasHTM = false;
+ bool HasBPERMD = false;
+ bool HasExtDiv = false;
+ bool HasP9Vector = false;
+
+protected:
+ std::string ABI;
+
+public:
+ PPCTargetInfo(const llvm::Triple &Triple, const TargetOptions &)
+ : TargetInfo(Triple) {
+ SuitableAlign = 128;
+ SimdDefaultAlign = 128;
+ LongDoubleWidth = LongDoubleAlign = 128;
+ LongDoubleFormat = &llvm::APFloat::PPCDoubleDouble();
+ }
+
// Set the language option for altivec based on our value.
void adjust(LangOptions &Opts) override;
@@ -86,11 +87,66 @@ public:
// 821, 823, 8540, 8548, e300c2, e300c3, e500mc64, e6500, 860, cell,
// titan, rs64.
bool isValidCPUName(StringRef Name) const override;
+ void fillValidCPUList(SmallVectorImpl<StringRef> &Values) const override;
bool setCPU(const std::string &Name) override {
bool CPUKnown = isValidCPUName(Name);
- if (CPUKnown)
+ if (CPUKnown) {
CPU = Name;
+
+ // CPU identification.
+ ArchDefs =
+ (ArchDefineTypes)llvm::StringSwitch<int>(CPU)
+ .Case("440", ArchDefineName)
+ .Case("450", ArchDefineName | ArchDefine440)
+ .Case("601", ArchDefineName)
+ .Case("602", ArchDefineName | ArchDefinePpcgr)
+ .Case("603", ArchDefineName | ArchDefinePpcgr)
+ .Case("603e", ArchDefineName | ArchDefine603 | ArchDefinePpcgr)
+ .Case("603ev", ArchDefineName | ArchDefine603 | ArchDefinePpcgr)
+ .Case("604", ArchDefineName | ArchDefinePpcgr)
+ .Case("604e", ArchDefineName | ArchDefine604 | ArchDefinePpcgr)
+ .Case("620", ArchDefineName | ArchDefinePpcgr)
+ .Case("630", ArchDefineName | ArchDefinePpcgr)
+ .Case("7400", ArchDefineName | ArchDefinePpcgr)
+ .Case("7450", ArchDefineName | ArchDefinePpcgr)
+ .Case("750", ArchDefineName | ArchDefinePpcgr)
+ .Case("970", ArchDefineName | ArchDefinePwr4 | ArchDefinePpcgr |
+ ArchDefinePpcsq)
+ .Case("a2", ArchDefineA2)
+ .Case("a2q", ArchDefineName | ArchDefineA2 | ArchDefineA2q)
+ .Cases("power3", "pwr3", ArchDefinePpcgr)
+ .Cases("power4", "pwr4",
+ ArchDefinePwr4 | ArchDefinePpcgr | ArchDefinePpcsq)
+ .Cases("power5", "pwr5",
+ ArchDefinePwr5 | ArchDefinePwr4 | ArchDefinePpcgr |
+ ArchDefinePpcsq)
+ .Cases("power5x", "pwr5x",
+ ArchDefinePwr5x | ArchDefinePwr5 | ArchDefinePwr4 |
+ ArchDefinePpcgr | ArchDefinePpcsq)
+ .Cases("power6", "pwr6",
+ ArchDefinePwr6 | ArchDefinePwr5x | ArchDefinePwr5 |
+ ArchDefinePwr4 | ArchDefinePpcgr | ArchDefinePpcsq)
+ .Cases("power6x", "pwr6x",
+ ArchDefinePwr6x | ArchDefinePwr6 | ArchDefinePwr5x |
+ ArchDefinePwr5 | ArchDefinePwr4 | ArchDefinePpcgr |
+ ArchDefinePpcsq)
+ .Cases("power7", "pwr7",
+ ArchDefinePwr7 | ArchDefinePwr6x | ArchDefinePwr6 |
+ ArchDefinePwr5x | ArchDefinePwr5 | ArchDefinePwr4 |
+ ArchDefinePpcgr | ArchDefinePpcsq)
+ // powerpc64le automatically defaults to at least power8.
+ .Cases("power8", "pwr8", "ppc64le",
+ ArchDefinePwr8 | ArchDefinePwr7 | ArchDefinePwr6x |
+ ArchDefinePwr6 | ArchDefinePwr5x | ArchDefinePwr5 |
+ ArchDefinePwr4 | ArchDefinePpcgr | ArchDefinePpcsq)
+ .Cases("power9", "pwr9",
+ ArchDefinePwr9 | ArchDefinePwr8 | ArchDefinePwr7 |
+ ArchDefinePwr6x | ArchDefinePwr6 | ArchDefinePwr5x |
+ ArchDefinePwr5 | ArchDefinePwr4 | ArchDefinePpcgr |
+ ArchDefinePpcsq)
+ .Default(ArchDefineNone);
+ }
return CPUKnown;
}
@@ -310,10 +366,6 @@ public:
LongDoubleWidth = LongDoubleAlign = 64;
LongDoubleFormat = &llvm::APFloat::IEEEdouble();
break;
- case llvm::Triple::NetBSD:
- IntMaxType = SignedLongLong;
- Int64Type = SignedLongLong;
- break;
default:
break;
}
@@ -334,6 +386,15 @@ public:
}
return false;
}
+
+ CallingConvCheckResult checkCallingConvention(CallingConv CC) const override {
+ switch (CC) {
+ case CC_Swift:
+ return CCCR_OK;
+ default:
+ return CCCR_Warning;
+ }
+ }
};
class LLVM_LIBRARY_VISIBILITY DarwinPPC32TargetInfo
diff --git a/lib/Basic/Targets/RISCV.cpp b/lib/Basic/Targets/RISCV.cpp
new file mode 100644
index 000000000000..7eb5e6a686a9
--- /dev/null
+++ b/lib/Basic/Targets/RISCV.cpp
@@ -0,0 +1,104 @@
+//===--- RISCV.cpp - Implement RISCV target feature support ---------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements RISCV TargetInfo objects.
+//
+//===----------------------------------------------------------------------===//
+
+#include "RISCV.h"
+#include "clang/Basic/MacroBuilder.h"
+#include "llvm/ADT/StringSwitch.h"
+
+using namespace clang;
+using namespace clang::targets;
+
+ArrayRef<const char *> RISCVTargetInfo::getGCCRegNames() const {
+ static const char *const GCCRegNames[] = {
+ "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",
+ "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15",
+ "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23",
+ "x24", "x25", "x26", "x27", "x28", "x29", "x30", "x31"};
+ return llvm::makeArrayRef(GCCRegNames);
+}
+
+ArrayRef<TargetInfo::GCCRegAlias> RISCVTargetInfo::getGCCRegAliases() const {
+ static const TargetInfo::GCCRegAlias GCCRegAliases[] = {
+ {{"zero"}, "x0"}, {{"ra"}, "x1"}, {{"sp"}, "x2"}, {{"gp"}, "x3"},
+ {{"tp"}, "x4"}, {{"t0"}, "x5"}, {{"t1"}, "x6"}, {{"t2"}, "x7"},
+ {{"s0"}, "x8"}, {{"s1"}, "x9"}, {{"a0"}, "x10"}, {{"a1"}, "x11"},
+ {{"a2"}, "x12"}, {{"a3"}, "x13"}, {{"a4"}, "x15"}, {{"a5"}, "x15"},
+ {{"a6"}, "x16"}, {{"a7"}, "x17"}, {{"s2"}, "x18"}, {{"s3"}, "x19"},
+ {{"s4"}, "x20"}, {{"s5"}, "x21"}, {{"s6"}, "x22"}, {{"s7"}, "x23"},
+ {{"s8"}, "x24"}, {{"s9"}, "x25"}, {{"s10"}, "x26"}, {{"s11"}, "x27"},
+ {{"t3"}, "x28"}, {{"t4"}, "x29"}, {{"t5"}, "x30"}, {{"t6"}, "x31"}};
+ return llvm::makeArrayRef(GCCRegAliases);
+}
+
+void RISCVTargetInfo::getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ Builder.defineMacro("__ELF__");
+ Builder.defineMacro("__riscv");
+ bool Is64Bit = getTriple().getArch() == llvm::Triple::riscv64;
+ Builder.defineMacro("__riscv_xlen", Is64Bit ? "64" : "32");
+ // TODO: modify when more code models and ABIs are supported.
+ Builder.defineMacro("__riscv_cmodel_medlow");
+ Builder.defineMacro("__riscv_float_abi_soft");
+
+ if (HasM) {
+ Builder.defineMacro("__riscv_mul");
+ Builder.defineMacro("__riscv_div");
+ Builder.defineMacro("__riscv_muldiv");
+ }
+
+ if (HasA)
+ Builder.defineMacro("__riscv_atomic");
+
+ if (HasF || HasD) {
+ Builder.defineMacro("__riscv_flen", HasD ? "64" : "32");
+ Builder.defineMacro("__riscv_fdiv");
+ Builder.defineMacro("__riscv_fsqrt");
+ }
+
+ if (HasC)
+ Builder.defineMacro("__riscv_compressed");
+}
+
+/// Return true if has this feature, need to sync with handleTargetFeatures.
+bool RISCVTargetInfo::hasFeature(StringRef Feature) const {
+ bool Is64Bit = getTriple().getArch() == llvm::Triple::riscv64;
+ return llvm::StringSwitch<bool>(Feature)
+ .Case("riscv", true)
+ .Case("riscv32", !Is64Bit)
+ .Case("riscv64", Is64Bit)
+ .Case("m", HasM)
+ .Case("a", HasA)
+ .Case("f", HasF)
+ .Case("d", HasD)
+ .Case("c", HasC)
+ .Default(false);
+}
+
+/// Perform initialization based on the user configured set of features.
+bool RISCVTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
+ DiagnosticsEngine &Diags) {
+ for (const auto &Feature : Features) {
+ if (Feature == "+m")
+ HasM = true;
+ else if (Feature == "+a")
+ HasA = true;
+ else if (Feature == "+f")
+ HasF = true;
+ else if (Feature == "+d")
+ HasD = true;
+ else if (Feature == "+c")
+ HasC = true;
+ }
+
+ return true;
+}
diff --git a/lib/Basic/Targets/RISCV.h b/lib/Basic/Targets/RISCV.h
new file mode 100644
index 000000000000..f83aae539391
--- /dev/null
+++ b/lib/Basic/Targets/RISCV.h
@@ -0,0 +1,114 @@
+//===--- RISCV.h - Declare RISCV target feature support ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares RISCV TargetInfo objects.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_BASIC_TARGETS_RISCV_H
+#define LLVM_CLANG_LIB_BASIC_TARGETS_RISCV_H
+
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Basic/TargetOptions.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/Support/Compiler.h"
+
+namespace clang {
+namespace targets {
+
+// RISC-V Target
+class RISCVTargetInfo : public TargetInfo {
+protected:
+ std::string ABI;
+ bool HasM;
+ bool HasA;
+ bool HasF;
+ bool HasD;
+ bool HasC;
+
+public:
+ RISCVTargetInfo(const llvm::Triple &Triple, const TargetOptions &)
+ : TargetInfo(Triple), HasM(false), HasA(false), HasF(false),
+ HasD(false), HasC(false) {
+ TLSSupported = false;
+ LongDoubleWidth = 128;
+ LongDoubleAlign = 128;
+ LongDoubleFormat = &llvm::APFloat::IEEEquad();
+ SuitableAlign = 128;
+ WCharType = SignedInt;
+ WIntType = UnsignedInt;
+ }
+
+ StringRef getABI() const override { return ABI; }
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override;
+
+ ArrayRef<Builtin::Info> getTargetBuiltins() const override { return None; }
+
+ BuiltinVaListKind getBuiltinVaListKind() const override {
+ return TargetInfo::VoidPtrBuiltinVaList;
+ }
+
+ const char *getClobbers() const override { return ""; }
+
+ ArrayRef<const char *> getGCCRegNames() const override;
+
+ ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override;
+
+ bool validateAsmConstraint(const char *&Name,
+ TargetInfo::ConstraintInfo &Info) const override {
+ return false;
+ }
+
+ bool hasFeature(StringRef Feature) const override;
+
+ bool handleTargetFeatures(std::vector<std::string> &Features,
+ DiagnosticsEngine &Diags) override;
+};
+class LLVM_LIBRARY_VISIBILITY RISCV32TargetInfo : public RISCVTargetInfo {
+public:
+ RISCV32TargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
+ : RISCVTargetInfo(Triple, Opts) {
+ IntPtrType = SignedInt;
+ PtrDiffType = SignedInt;
+ SizeType = UnsignedInt;
+ resetDataLayout("e-m:e-p:32:32-i64:64-n32-S128");
+ }
+
+ bool setABI(const std::string &Name) override {
+ // TODO: support ilp32f and ilp32d ABIs.
+ if (Name == "ilp32") {
+ ABI = Name;
+ return true;
+ }
+ return false;
+ }
+};
+class LLVM_LIBRARY_VISIBILITY RISCV64TargetInfo : public RISCVTargetInfo {
+public:
+ RISCV64TargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
+ : RISCVTargetInfo(Triple, Opts) {
+ LongWidth = LongAlign = PointerWidth = PointerAlign = 64;
+ IntMaxType = Int64Type = SignedLong;
+ resetDataLayout("e-m:e-p:64:64-i64:64-i128:128-n64-S128");
+ }
+
+ bool setABI(const std::string &Name) override {
+ // TODO: support lp64f and lp64d ABIs.
+ if (Name == "lp64") {
+ ABI = Name;
+ return true;
+ }
+ return false;
+ }
+};
+} // namespace targets
+} // namespace clang
+
+#endif // LLVM_CLANG_LIB_BASIC_TARGETS_RISCV_H
diff --git a/lib/Basic/Targets/SPIR.h b/lib/Basic/Targets/SPIR.h
index c384d4260ca9..9815292fc276 100644
--- a/lib/Basic/Targets/SPIR.h
+++ b/lib/Basic/Targets/SPIR.h
@@ -47,6 +47,7 @@ public:
LongWidth = LongAlign = 64;
AddrSpaceMap = &SPIRAddrSpaceMap;
UseAddrSpaceMapMangling = true;
+ HasLegalHalfType = true;
// Define available target features
// These must be defined in sorted order!
NoAsmVariants = true;
@@ -59,6 +60,10 @@ public:
return Feature == "spir";
}
+ // SPIR supports the half type and the only llvm intrinsic allowed in SPIR is
+ // memcpy as per section 3 of the SPIR spec.
+ bool useFP16ConversionIntrinsics() const override { return false; }
+
ArrayRef<Builtin::Info> getTargetBuiltins() const override { return None; }
const char *getClobbers() const override { return ""; }
diff --git a/lib/Basic/Targets/Sparc.cpp b/lib/Basic/Targets/Sparc.cpp
index 429c1ee3a23c..ee4f309363af 100644
--- a/lib/Basic/Targets/Sparc.cpp
+++ b/lib/Basic/Targets/Sparc.cpp
@@ -20,9 +20,17 @@ using namespace clang;
using namespace clang::targets;
const char *const SparcTargetInfo::GCCRegNames[] = {
+ // Integer registers
"r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10",
"r11", "r12", "r13", "r14", "r15", "r16", "r17", "r18", "r19", "r20", "r21",
- "r22", "r23", "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
+ "r22", "r23", "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
+
+ // Floating-point registers
+ "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9", "f10",
+ "f11", "f12", "f13", "f14", "f15", "f16", "f17", "f18", "f19", "f20", "f21",
+ "f22", "f23", "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31", "f32",
+ "f34", "f36", "f38", "f40", "f42", "f44", "f46", "f48", "f50", "f52", "f54",
+ "f56", "f58", "f60", "f62",
};
ArrayRef<const char *> SparcTargetInfo::getGCCRegNames() const {
@@ -51,49 +59,81 @@ bool SparcTargetInfo::hasFeature(StringRef Feature) const {
.Default(false);
}
+struct SparcCPUInfo {
+ llvm::StringLiteral Name;
+ SparcTargetInfo::CPUKind Kind;
+ SparcTargetInfo::CPUGeneration Generation;
+};
+
+static constexpr SparcCPUInfo CPUInfo[] = {
+ {{"v8"}, SparcTargetInfo::CK_V8, SparcTargetInfo::CG_V8},
+ {{"supersparc"}, SparcTargetInfo::CK_SUPERSPARC, SparcTargetInfo::CG_V8},
+ {{"sparclite"}, SparcTargetInfo::CK_SPARCLITE, SparcTargetInfo::CG_V8},
+ {{"f934"}, SparcTargetInfo::CK_F934, SparcTargetInfo::CG_V8},
+ {{"hypersparc"}, SparcTargetInfo::CK_HYPERSPARC, SparcTargetInfo::CG_V8},
+ {{"sparclite86x"},
+ SparcTargetInfo::CK_SPARCLITE86X,
+ SparcTargetInfo::CG_V8},
+ {{"sparclet"}, SparcTargetInfo::CK_SPARCLET, SparcTargetInfo::CG_V8},
+ {{"tsc701"}, SparcTargetInfo::CK_TSC701, SparcTargetInfo::CG_V8},
+ {{"v9"}, SparcTargetInfo::CK_V9, SparcTargetInfo::CG_V9},
+ {{"ultrasparc"}, SparcTargetInfo::CK_ULTRASPARC, SparcTargetInfo::CG_V9},
+ {{"ultrasparc3"}, SparcTargetInfo::CK_ULTRASPARC3, SparcTargetInfo::CG_V9},
+ {{"niagara"}, SparcTargetInfo::CK_NIAGARA, SparcTargetInfo::CG_V9},
+ {{"niagara2"}, SparcTargetInfo::CK_NIAGARA2, SparcTargetInfo::CG_V9},
+ {{"niagara3"}, SparcTargetInfo::CK_NIAGARA3, SparcTargetInfo::CG_V9},
+ {{"niagara4"}, SparcTargetInfo::CK_NIAGARA4, SparcTargetInfo::CG_V9},
+ {{"ma2100"}, SparcTargetInfo::CK_MYRIAD2100, SparcTargetInfo::CG_V8},
+ {{"ma2150"}, SparcTargetInfo::CK_MYRIAD2150, SparcTargetInfo::CG_V8},
+ {{"ma2155"}, SparcTargetInfo::CK_MYRIAD2155, SparcTargetInfo::CG_V8},
+ {{"ma2450"}, SparcTargetInfo::CK_MYRIAD2450, SparcTargetInfo::CG_V8},
+ {{"ma2455"}, SparcTargetInfo::CK_MYRIAD2455, SparcTargetInfo::CG_V8},
+ {{"ma2x5x"}, SparcTargetInfo::CK_MYRIAD2x5x, SparcTargetInfo::CG_V8},
+ {{"ma2080"}, SparcTargetInfo::CK_MYRIAD2080, SparcTargetInfo::CG_V8},
+ {{"ma2085"}, SparcTargetInfo::CK_MYRIAD2085, SparcTargetInfo::CG_V8},
+ {{"ma2480"}, SparcTargetInfo::CK_MYRIAD2480, SparcTargetInfo::CG_V8},
+ {{"ma2485"}, SparcTargetInfo::CK_MYRIAD2485, SparcTargetInfo::CG_V8},
+ {{"ma2x8x"}, SparcTargetInfo::CK_MYRIAD2x8x, SparcTargetInfo::CG_V8},
+ // FIXME: the myriad2[.n] spellings are obsolete,
+ // but a grace period is needed to allow updating dependent builds.
+ {{"myriad2"}, SparcTargetInfo::CK_MYRIAD2x5x, SparcTargetInfo::CG_V8},
+ {{"myriad2.1"}, SparcTargetInfo::CK_MYRIAD2100, SparcTargetInfo::CG_V8},
+ {{"myriad2.2"}, SparcTargetInfo::CK_MYRIAD2x5x, SparcTargetInfo::CG_V8},
+ {{"myriad2.3"}, SparcTargetInfo::CK_MYRIAD2x8x, SparcTargetInfo::CG_V8},
+ {{"leon2"}, SparcTargetInfo::CK_LEON2, SparcTargetInfo::CG_V8},
+ {{"at697e"}, SparcTargetInfo::CK_LEON2_AT697E, SparcTargetInfo::CG_V8},
+ {{"at697f"}, SparcTargetInfo::CK_LEON2_AT697F, SparcTargetInfo::CG_V8},
+ {{"leon3"}, SparcTargetInfo::CK_LEON3, SparcTargetInfo::CG_V8},
+ {{"ut699"}, SparcTargetInfo::CK_LEON3_UT699, SparcTargetInfo::CG_V8},
+ {{"gr712rc"}, SparcTargetInfo::CK_LEON3_GR712RC, SparcTargetInfo::CG_V8},
+ {{"leon4"}, SparcTargetInfo::CK_LEON4, SparcTargetInfo::CG_V8},
+ {{"gr740"}, SparcTargetInfo::CK_LEON4_GR740, SparcTargetInfo::CG_V8},
+};
+
+SparcTargetInfo::CPUGeneration
+SparcTargetInfo::getCPUGeneration(CPUKind Kind) const {
+ if (Kind == CK_GENERIC)
+ return CG_V8;
+ const SparcCPUInfo *Item = llvm::find_if(
+ CPUInfo, [Kind](const SparcCPUInfo &Info) { return Info.Kind == Kind; });
+ if (Item == std::end(CPUInfo))
+ llvm_unreachable("Unexpected CPU kind");
+ return Item->Generation;
+}
+
SparcTargetInfo::CPUKind SparcTargetInfo::getCPUKind(StringRef Name) const {
- return llvm::StringSwitch<CPUKind>(Name)
- .Case("v8", CK_V8)
- .Case("supersparc", CK_SUPERSPARC)
- .Case("sparclite", CK_SPARCLITE)
- .Case("f934", CK_F934)
- .Case("hypersparc", CK_HYPERSPARC)
- .Case("sparclite86x", CK_SPARCLITE86X)
- .Case("sparclet", CK_SPARCLET)
- .Case("tsc701", CK_TSC701)
- .Case("v9", CK_V9)
- .Case("ultrasparc", CK_ULTRASPARC)
- .Case("ultrasparc3", CK_ULTRASPARC3)
- .Case("niagara", CK_NIAGARA)
- .Case("niagara2", CK_NIAGARA2)
- .Case("niagara3", CK_NIAGARA3)
- .Case("niagara4", CK_NIAGARA4)
- .Case("ma2100", CK_MYRIAD2100)
- .Case("ma2150", CK_MYRIAD2150)
- .Case("ma2155", CK_MYRIAD2155)
- .Case("ma2450", CK_MYRIAD2450)
- .Case("ma2455", CK_MYRIAD2455)
- .Case("ma2x5x", CK_MYRIAD2x5x)
- .Case("ma2080", CK_MYRIAD2080)
- .Case("ma2085", CK_MYRIAD2085)
- .Case("ma2480", CK_MYRIAD2480)
- .Case("ma2485", CK_MYRIAD2485)
- .Case("ma2x8x", CK_MYRIAD2x8x)
- // FIXME: the myriad2[.n] spellings are obsolete,
- // but a grace period is needed to allow updating dependent builds.
- .Case("myriad2", CK_MYRIAD2x5x)
- .Case("myriad2.1", CK_MYRIAD2100)
- .Case("myriad2.2", CK_MYRIAD2x5x)
- .Case("myriad2.3", CK_MYRIAD2x8x)
- .Case("leon2", CK_LEON2)
- .Case("at697e", CK_LEON2_AT697E)
- .Case("at697f", CK_LEON2_AT697F)
- .Case("leon3", CK_LEON3)
- .Case("ut699", CK_LEON3_UT699)
- .Case("gr712rc", CK_LEON3_GR712RC)
- .Case("leon4", CK_LEON4)
- .Case("gr740", CK_LEON4_GR740)
- .Default(CK_GENERIC);
+ const SparcCPUInfo *Item = llvm::find_if(
+ CPUInfo, [Name](const SparcCPUInfo &Info) { return Info.Name == Name; });
+
+ if (Item == std::end(CPUInfo))
+ return CK_GENERIC;
+ return Item->Kind;
+}
+
+void SparcTargetInfo::fillValidCPUList(
+ SmallVectorImpl<StringRef> &Values) const {
+ for (const SparcCPUInfo &Info : CPUInfo)
+ Values.push_back(Info.Name);
}
void SparcTargetInfo::getTargetDefines(const LangOptions &Opts,
@@ -178,6 +218,13 @@ void SparcV8TargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro(MyriadArchValue, "1");
Builder.defineMacro(MyriadArchValue + "__", "1");
}
+ if (Myriad2Value == "2") {
+ Builder.defineMacro("__ma2x5x", "1");
+ Builder.defineMacro("__ma2x5x__", "1");
+ } else if (Myriad2Value == "3") {
+ Builder.defineMacro("__ma2x8x", "1");
+ Builder.defineMacro("__ma2x8x__", "1");
+ }
Builder.defineMacro("__myriad2__", Myriad2Value);
Builder.defineMacro("__myriad2", Myriad2Value);
}
@@ -195,3 +242,10 @@ void SparcV9TargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__sparcv9__");
}
}
+
+void SparcV9TargetInfo::fillValidCPUList(
+ SmallVectorImpl<StringRef> &Values) const {
+ for (const SparcCPUInfo &Info : CPUInfo)
+ if (Info.Generation == CG_V9)
+ Values.push_back(Info.Name);
+}
diff --git a/lib/Basic/Targets/Sparc.h b/lib/Basic/Targets/Sparc.h
index aacc26119dfb..af2189f21468 100644
--- a/lib/Basic/Targets/Sparc.h
+++ b/lib/Basic/Targets/Sparc.h
@@ -1,4 +1,4 @@
-//===--- Sparc.h - Declare Sparc target feature support -------------------===//
+//===--- Sparc.h - declare sparc target feature support ---------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -131,48 +131,7 @@ public:
CG_V9,
};
- CPUGeneration getCPUGeneration(CPUKind Kind) const {
- switch (Kind) {
- case CK_GENERIC:
- case CK_V8:
- case CK_SUPERSPARC:
- case CK_SPARCLITE:
- case CK_F934:
- case CK_HYPERSPARC:
- case CK_SPARCLITE86X:
- case CK_SPARCLET:
- case CK_TSC701:
- case CK_MYRIAD2100:
- case CK_MYRIAD2150:
- case CK_MYRIAD2155:
- case CK_MYRIAD2450:
- case CK_MYRIAD2455:
- case CK_MYRIAD2x5x:
- case CK_MYRIAD2080:
- case CK_MYRIAD2085:
- case CK_MYRIAD2480:
- case CK_MYRIAD2485:
- case CK_MYRIAD2x8x:
- case CK_LEON2:
- case CK_LEON2_AT697E:
- case CK_LEON2_AT697F:
- case CK_LEON3:
- case CK_LEON3_UT699:
- case CK_LEON3_GR712RC:
- case CK_LEON4:
- case CK_LEON4_GR740:
- return CG_V8;
- case CK_V9:
- case CK_ULTRASPARC:
- case CK_ULTRASPARC3:
- case CK_NIAGARA:
- case CK_NIAGARA2:
- case CK_NIAGARA3:
- case CK_NIAGARA4:
- return CG_V9;
- }
- llvm_unreachable("Unexpected CPU kind");
- }
+ CPUGeneration getCPUGeneration(CPUKind Kind) const;
CPUKind getCPUKind(StringRef Name) const;
@@ -180,6 +139,8 @@ public:
return getCPUKind(Name) != CK_GENERIC;
}
+ void fillValidCPUList(SmallVectorImpl<StringRef> &Values) const override;
+
bool setCPU(const std::string &Name) override {
CPU = getCPUKind(Name);
return CPU != CK_GENERIC;
@@ -259,6 +220,8 @@ public:
return getCPUGeneration(SparcTargetInfo::getCPUKind(Name)) == CG_V9;
}
+ void fillValidCPUList(SmallVectorImpl<StringRef> &Values) const override;
+
bool setCPU(const std::string &Name) override {
if (!SparcTargetInfo::setCPU(Name))
return false;
diff --git a/lib/Basic/Targets/SystemZ.cpp b/lib/Basic/Targets/SystemZ.cpp
index 98f3ae2f72b4..6f06f1fc760c 100644
--- a/lib/Basic/Targets/SystemZ.cpp
+++ b/lib/Basic/Targets/SystemZ.cpp
@@ -30,15 +30,30 @@ const Builtin::Info SystemZTargetInfo::BuiltinInfo[] = {
};
const char *const SystemZTargetInfo::GCCRegNames[] = {
- "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10",
- "r11", "r12", "r13", "r14", "r15", "f0", "f2", "f4", "f6", "f1", "f3",
- "f5", "f7", "f8", "f10", "f12", "f14", "f9", "f11", "f13", "f15"
+ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
+ "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
+ "f0", "f2", "f4", "f6", "f1", "f3", "f5", "f7",
+ "f8", "f10", "f12", "f14", "f9", "f11", "f13", "f15",
+ /*ap*/"", "cc", /*fp*/"", /*rp*/"", "a0", "a1",
+ "v16", "v18", "v20", "v22", "v17", "v19", "v21", "v23",
+ "v24", "v26", "v28", "v30", "v25", "v27", "v29", "v31"
+};
+
+const TargetInfo::AddlRegName GCCAddlRegNames[] = {
+ {{"v0"}, 16}, {{"v2"}, 17}, {{"v4"}, 18}, {{"v6"}, 19},
+ {{"v1"}, 20}, {{"v3"}, 21}, {{"v5"}, 22}, {{"v7"}, 23},
+ {{"v8"}, 24}, {{"v10"}, 25}, {{"v12"}, 26}, {{"v14"}, 27},
+ {{"v9"}, 28}, {{"v11"}, 29}, {{"v13"}, 30}, {{"v15"}, 31}
};
ArrayRef<const char *> SystemZTargetInfo::getGCCRegNames() const {
return llvm::makeArrayRef(GCCRegNames);
}
+ArrayRef<TargetInfo::AddlRegName> SystemZTargetInfo::getGCCAddlRegNames() const {
+ return llvm::makeArrayRef(GCCAddlRegNames);
+}
+
bool SystemZTargetInfo::validateAsmConstraint(
const char *&Name, TargetInfo::ConstraintInfo &Info) const {
switch (*Name) {
@@ -48,6 +63,7 @@ bool SystemZTargetInfo::validateAsmConstraint(
case 'a': // Address register
case 'd': // Data register (equivalent to 'r')
case 'f': // Floating-point register
+ case 'v': // Vector register
Info.setAllowsRegister();
return true;
@@ -67,14 +83,32 @@ bool SystemZTargetInfo::validateAsmConstraint(
}
}
-int SystemZTargetInfo::getISARevision(const StringRef &Name) const {
- return llvm::StringSwitch<int>(Name)
- .Cases("arch8", "z10", 8)
- .Cases("arch9", "z196", 9)
- .Cases("arch10", "zEC12", 10)
- .Cases("arch11", "z13", 11)
- .Cases("arch12", "z14", 12)
- .Default(-1);
+struct ISANameRevision {
+ llvm::StringLiteral Name;
+ int ISARevisionID;
+};
+static constexpr ISANameRevision ISARevisions[] = {
+ {{"arch8"}, 8}, {{"z10"}, 8},
+ {{"arch9"}, 9}, {{"z196"}, 9},
+ {{"arch10"}, 10}, {{"zEC12"}, 10},
+ {{"arch11"}, 11}, {{"z13"}, 11},
+ {{"arch12"}, 12}, {{"z14"}, 12}
+};
+
+int SystemZTargetInfo::getISARevision(StringRef Name) const {
+ const auto Rev =
+ llvm::find_if(ISARevisions, [Name](const ISANameRevision &CR) {
+ return CR.Name == Name;
+ });
+ if (Rev == std::end(ISARevisions))
+ return -1;
+ return Rev->ISARevisionID;
+}
+
+void SystemZTargetInfo::fillValidCPUList(
+ SmallVectorImpl<StringRef> &Values) const {
+ for (const ISANameRevision &Rev : ISARevisions)
+ Values.push_back(Rev.Name);
}
bool SystemZTargetInfo::hasFeature(StringRef Feature) const {
diff --git a/lib/Basic/Targets/SystemZ.h b/lib/Basic/Targets/SystemZ.h
index 3023c1d2ea26..842316005ed9 100644
--- a/lib/Basic/Targets/SystemZ.h
+++ b/lib/Basic/Targets/SystemZ.h
@@ -62,6 +62,8 @@ public:
return None;
}
+ ArrayRef<TargetInfo::AddlRegName> getGCCAddlRegNames() const override;
+
bool validateAsmConstraint(const char *&Name,
TargetInfo::ConstraintInfo &info) const override;
@@ -74,12 +76,14 @@ public:
return TargetInfo::SystemZBuiltinVaList;
}
- int getISARevision(const StringRef &Name) const;
+ int getISARevision(StringRef Name) const;
bool isValidCPUName(StringRef Name) const override {
return getISARevision(Name) != -1;
}
+ void fillValidCPUList(SmallVectorImpl<StringRef> &Values) const override;
+
bool setCPU(const std::string &Name) override {
CPU = Name;
ISARevision = getISARevision(CPU);
diff --git a/lib/Basic/Targets/WebAssembly.cpp b/lib/Basic/Targets/WebAssembly.cpp
index 915aad4b563b..b8a2a092aff4 100644
--- a/lib/Basic/Targets/WebAssembly.cpp
+++ b/lib/Basic/Targets/WebAssembly.cpp
@@ -29,19 +29,25 @@ const Builtin::Info WebAssemblyTargetInfo::BuiltinInfo[] = {
#include "clang/Basic/BuiltinsWebAssembly.def"
};
+static constexpr llvm::StringLiteral ValidCPUNames[] = {
+ {"mvp"}, {"bleeding-edge"}, {"generic"}};
+
bool WebAssemblyTargetInfo::hasFeature(StringRef Feature) const {
return llvm::StringSwitch<bool>(Feature)
.Case("simd128", SIMDLevel >= SIMD128)
.Case("nontrapping-fptoint", HasNontrappingFPToInt)
+ .Case("sign-ext", HasSignExt)
+ .Case("exception-handling", HasExceptionHandling)
.Default(false);
}
bool WebAssemblyTargetInfo::isValidCPUName(StringRef Name) const {
- return llvm::StringSwitch<bool>(Name)
- .Case("mvp", true)
- .Case("bleeding-edge", true)
- .Case("generic", true)
- .Default(false);
+ return llvm::find(ValidCPUNames, Name) != std::end(ValidCPUNames);
+}
+
+void WebAssemblyTargetInfo::fillValidCPUList(
+ SmallVectorImpl<StringRef> &Values) const {
+ Values.append(std::begin(ValidCPUNames), std::end(ValidCPUNames));
}
void WebAssemblyTargetInfo::getTargetDefines(const LangOptions &Opts,
@@ -70,6 +76,22 @@ bool WebAssemblyTargetInfo::handleTargetFeatures(
HasNontrappingFPToInt = false;
continue;
}
+ if (Feature == "+sign-ext") {
+ HasSignExt = true;
+ continue;
+ }
+ if (Feature == "-sign-ext") {
+ HasSignExt = false;
+ continue;
+ }
+ if (Feature == "+exception-handling") {
+ HasExceptionHandling = true;
+ continue;
+ }
+ if (Feature == "-exception-handling") {
+ HasExceptionHandling = false;
+ continue;
+ }
Diags.Report(diag::err_opt_not_valid_with_opt)
<< Feature << "-target-feature";
diff --git a/lib/Basic/Targets/WebAssembly.h b/lib/Basic/Targets/WebAssembly.h
index ee0073d081e0..c04c5cb6fb3a 100644
--- a/lib/Basic/Targets/WebAssembly.h
+++ b/lib/Basic/Targets/WebAssembly.h
@@ -31,10 +31,13 @@ class LLVM_LIBRARY_VISIBILITY WebAssemblyTargetInfo : public TargetInfo {
} SIMDLevel;
bool HasNontrappingFPToInt;
+ bool HasSignExt;
+ bool HasExceptionHandling;
public:
explicit WebAssemblyTargetInfo(const llvm::Triple &T, const TargetOptions &)
- : TargetInfo(T), SIMDLevel(NoSIMD), HasNontrappingFPToInt(false) {
+ : TargetInfo(T), SIMDLevel(NoSIMD), HasNontrappingFPToInt(false),
+ HasSignExt(false), HasExceptionHandling(false) {
NoAsmVariants = true;
SuitableAlign = 128;
LargeArrayMinWidth = 128;
@@ -43,9 +46,12 @@ public:
SigAtomicType = SignedLong;
LongDoubleWidth = LongDoubleAlign = 128;
LongDoubleFormat = &llvm::APFloat::IEEEquad();
- SizeType = UnsignedInt;
- PtrDiffType = SignedInt;
- IntPtrType = SignedInt;
+ MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 64;
+ // size_t being unsigned long for both wasm32 and wasm64 makes mangled names
+ // more consistent between the two.
+ SizeType = UnsignedLong;
+ PtrDiffType = SignedLong;
+ IntPtrType = SignedLong;
}
protected:
@@ -60,6 +66,7 @@ private:
if (CPU == "bleeding-edge") {
Features["simd128"] = true;
Features["nontrapping-fptoint"] = true;
+ Features["sign-ext"] = true;
}
return TargetInfo::initFeatureMap(Features, Diags, CPU, FeaturesVec);
}
@@ -70,6 +77,7 @@ private:
DiagnosticsEngine &Diags) final;
bool isValidCPUName(StringRef Name) const final;
+ void fillValidCPUList(SmallVectorImpl<StringRef> &Values) const final;
bool setCPU(const std::string &Name) final { return isValidCPUName(Name); }
@@ -115,7 +123,6 @@ public:
explicit WebAssembly32TargetInfo(const llvm::Triple &T,
const TargetOptions &Opts)
: WebAssemblyTargetInfo(T, Opts) {
- MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 64;
resetDataLayout("e-m:e-p:32:32-i64:64-n32:64-S128");
}
@@ -132,7 +139,6 @@ public:
: WebAssemblyTargetInfo(T, Opts) {
LongAlign = LongWidth = 64;
PointerAlign = PointerWidth = 64;
- MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 64;
SizeType = UnsignedLong;
PtrDiffType = SignedLong;
IntPtrType = SignedLong;
diff --git a/lib/Basic/Targets/X86.cpp b/lib/Basic/Targets/X86.cpp
index 3efba26a8373..7ae0696ce7e7 100644
--- a/lib/Basic/Targets/X86.cpp
+++ b/lib/Basic/Targets/X86.cpp
@@ -15,8 +15,10 @@
#include "clang/Basic/Builtins.h"
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/TargetBuiltins.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSwitch.h"
+#include "llvm/Support/TargetParser.h"
namespace clang {
namespace targets {
@@ -131,7 +133,11 @@ bool X86TargetInfo::initFeatureMap(
setFeatureEnabledImpl(Features, "mmx", true);
break;
- case CK_Icelake:
+ case CK_IcelakeServer:
+ setFeatureEnabledImpl(Features, "pconfig", true);
+ setFeatureEnabledImpl(Features, "wbnoinvd", true);
+ LLVM_FALLTHROUGH;
+ case CK_IcelakeClient:
setFeatureEnabledImpl(Features, "vaes", true);
setFeatureEnabledImpl(Features, "gfni", true);
setFeatureEnabledImpl(Features, "vpclmulqdq", true);
@@ -139,7 +145,7 @@ bool X86TargetInfo::initFeatureMap(
setFeatureEnabledImpl(Features, "avx512vnni", true);
setFeatureEnabledImpl(Features, "avx512vbmi2", true);
setFeatureEnabledImpl(Features, "avx512vpopcntdq", true);
- setFeatureEnabledImpl(Features, "clwb", true);
+ setFeatureEnabledImpl(Features, "rdpid", true);
LLVM_FALLTHROUGH;
case CK_Cannonlake:
setFeatureEnabledImpl(Features, "avx512ifma", true);
@@ -152,16 +158,16 @@ bool X86TargetInfo::initFeatureMap(
setFeatureEnabledImpl(Features, "avx512dq", true);
setFeatureEnabledImpl(Features, "avx512bw", true);
setFeatureEnabledImpl(Features, "avx512vl", true);
- if (Kind == CK_SkylakeServer) {
- setFeatureEnabledImpl(Features, "pku", true);
+ setFeatureEnabledImpl(Features, "pku", true);
+ if (Kind != CK_Cannonlake) // CNL inherits all SKX features, except CLWB
setFeatureEnabledImpl(Features, "clwb", true);
- }
LLVM_FALLTHROUGH;
case CK_SkylakeClient:
setFeatureEnabledImpl(Features, "xsavec", true);
setFeatureEnabledImpl(Features, "xsaves", true);
setFeatureEnabledImpl(Features, "mpx", true);
- setFeatureEnabledImpl(Features, "sgx", true);
+ if (Kind != CK_SkylakeServer) // SKX inherits all SKL features, except SGX
+ setFeatureEnabledImpl(Features, "sgx", true);
setFeatureEnabledImpl(Features, "clflushopt", true);
setFeatureEnabledImpl(Features, "rtm", true);
LLVM_FALLTHROUGH;
@@ -176,6 +182,7 @@ bool X86TargetInfo::initFeatureMap(
setFeatureEnabledImpl(Features, "bmi", true);
setFeatureEnabledImpl(Features, "bmi2", true);
setFeatureEnabledImpl(Features, "fma", true);
+ setFeatureEnabledImpl(Features, "invpcid", true);
setFeatureEnabledImpl(Features, "movbe", true);
LLVM_FALLTHROUGH;
case CK_IvyBridge:
@@ -200,6 +207,7 @@ bool X86TargetInfo::initFeatureMap(
LLVM_FALLTHROUGH;
case CK_Core2:
setFeatureEnabledImpl(Features, "ssse3", true);
+ setFeatureEnabledImpl(Features, "sahf", true);
LLVM_FALLTHROUGH;
case CK_Yonah:
case CK_Prescott:
@@ -218,9 +226,20 @@ bool X86TargetInfo::initFeatureMap(
setFeatureEnabledImpl(Features, "fxsr", true);
break;
+ case CK_Tremont:
+ setFeatureEnabledImpl(Features, "cldemote", true);
+ setFeatureEnabledImpl(Features, "movdiri", true);
+ setFeatureEnabledImpl(Features, "movdir64b", true);
+ setFeatureEnabledImpl(Features, "gfni", true);
+ setFeatureEnabledImpl(Features, "waitpkg", true);
+ LLVM_FALLTHROUGH;
+ case CK_GoldmontPlus:
+ setFeatureEnabledImpl(Features, "ptwrite", true);
+ setFeatureEnabledImpl(Features, "rdpid", true);
+ setFeatureEnabledImpl(Features, "sgx", true);
+ LLVM_FALLTHROUGH;
case CK_Goldmont:
setFeatureEnabledImpl(Features, "sha", true);
- setFeatureEnabledImpl(Features, "rdrnd", true);
setFeatureEnabledImpl(Features, "rdseed", true);
setFeatureEnabledImpl(Features, "xsave", true);
setFeatureEnabledImpl(Features, "xsaveopt", true);
@@ -231,6 +250,7 @@ bool X86TargetInfo::initFeatureMap(
setFeatureEnabledImpl(Features, "fsgsbase", true);
LLVM_FALLTHROUGH;
case CK_Silvermont:
+ setFeatureEnabledImpl(Features, "rdrnd", true);
setFeatureEnabledImpl(Features, "aes", true);
setFeatureEnabledImpl(Features, "pclmul", true);
setFeatureEnabledImpl(Features, "sse4.2", true);
@@ -241,6 +261,7 @@ bool X86TargetInfo::initFeatureMap(
setFeatureEnabledImpl(Features, "ssse3", true);
setFeatureEnabledImpl(Features, "fxsr", true);
setFeatureEnabledImpl(Features, "cx16", true);
+ setFeatureEnabledImpl(Features, "sahf", true);
break;
case CK_KNM:
@@ -271,6 +292,7 @@ bool X86TargetInfo::initFeatureMap(
setFeatureEnabledImpl(Features, "xsaveopt", true);
setFeatureEnabledImpl(Features, "xsave", true);
setFeatureEnabledImpl(Features, "movbe", true);
+ setFeatureEnabledImpl(Features, "sahf", true);
break;
case CK_K6_2:
@@ -284,6 +306,7 @@ bool X86TargetInfo::initFeatureMap(
setFeatureEnabledImpl(Features, "sse4a", true);
setFeatureEnabledImpl(Features, "lzcnt", true);
setFeatureEnabledImpl(Features, "popcnt", true);
+ setFeatureEnabledImpl(Features, "sahf", true);
LLVM_FALLTHROUGH;
case CK_K8SSE3:
setFeatureEnabledImpl(Features, "sse3", true);
@@ -317,6 +340,7 @@ bool X86TargetInfo::initFeatureMap(
setFeatureEnabledImpl(Features, "prfchw", true);
setFeatureEnabledImpl(Features, "cx16", true);
setFeatureEnabledImpl(Features, "fxsr", true);
+ setFeatureEnabledImpl(Features, "sahf", true);
break;
case CK_ZNVER1:
@@ -340,6 +364,7 @@ bool X86TargetInfo::initFeatureMap(
setFeatureEnabledImpl(Features, "prfchw", true);
setFeatureEnabledImpl(Features, "rdrnd", true);
setFeatureEnabledImpl(Features, "rdseed", true);
+ setFeatureEnabledImpl(Features, "sahf", true);
setFeatureEnabledImpl(Features, "sha", true);
setFeatureEnabledImpl(Features, "sse4a", true);
setFeatureEnabledImpl(Features, "xsave", true);
@@ -374,6 +399,7 @@ bool X86TargetInfo::initFeatureMap(
setFeatureEnabledImpl(Features, "cx16", true);
setFeatureEnabledImpl(Features, "fxsr", true);
setFeatureEnabledImpl(Features, "xsave", true);
+ setFeatureEnabledImpl(Features, "sahf", true);
break;
}
if (!TargetInfo::initFeatureMap(Features, Diags, CPU, FeaturesVec))
@@ -412,7 +438,7 @@ void X86TargetInfo::setSSELevel(llvm::StringMap<bool> &Features,
if (Enabled) {
switch (Level) {
case AVX512F:
- Features["avx512f"] = true;
+ Features["avx512f"] = Features["fma"] = Features["f16c"] = true;
LLVM_FALLTHROUGH;
case AVX2:
Features["avx2"] = true;
@@ -626,6 +652,8 @@ void X86TargetInfo::setFeatureEnabledImpl(llvm::StringMap<bool> &Features,
} else if (Name == "fma") {
if (Enabled)
setSSELevel(Features, AVX, Enabled);
+ else
+ setSSELevel(Features, AVX512F, Enabled);
} else if (Name == "fma4") {
setXOPLevel(Features, FMA4, Enabled);
} else if (Name == "xop") {
@@ -635,6 +663,8 @@ void X86TargetInfo::setFeatureEnabledImpl(llvm::StringMap<bool> &Features,
} else if (Name == "f16c") {
if (Enabled)
setSSELevel(Features, AVX, Enabled);
+ else
+ setSSELevel(Features, AVX512F, Enabled);
} else if (Name == "sha") {
if (Enabled)
setSSELevel(Features, SSE2, Enabled);
@@ -732,8 +762,6 @@ bool X86TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasMPX = true;
} else if (Feature == "+shstk") {
HasSHSTK = true;
- } else if (Feature == "+ibt") {
- HasIBT = true;
} else if (Feature == "+movbe") {
HasMOVBE = true;
} else if (Feature == "+sgx") {
@@ -758,10 +786,34 @@ bool X86TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasCLFLUSHOPT = true;
} else if (Feature == "+clwb") {
HasCLWB = true;
+ } else if (Feature == "+wbnoinvd") {
+ HasWBNOINVD = true;
} else if (Feature == "+prefetchwt1") {
HasPREFETCHWT1 = true;
} else if (Feature == "+clzero") {
HasCLZERO = true;
+ } else if (Feature == "+cldemote") {
+ HasCLDEMOTE = true;
+ } else if (Feature == "+rdpid") {
+ HasRDPID = true;
+ } else if (Feature == "+retpoline") {
+ HasRetpoline = true;
+ } else if (Feature == "+retpoline-external-thunk") {
+ HasRetpolineExternalThunk = true;
+ } else if (Feature == "+sahf") {
+ HasLAHFSAHF = true;
+ } else if (Feature == "+waitpkg") {
+ HasWAITPKG = true;
+ } else if (Feature == "+movdiri") {
+ HasMOVDIRI = true;
+ } else if (Feature == "+movdir64b") {
+ HasMOVDIR64B = true;
+ } else if (Feature == "+pconfig") {
+ HasPCONFIG = true;
+ } else if (Feature == "+ptwrite") {
+ HasPTWRITE = true;
+ } else if (Feature == "+invpcid") {
+ HasINVPCID = true;
}
X86SSEEnum Level = llvm::StringSwitch<X86SSEEnum>(Feature)
@@ -882,6 +934,12 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
case CK_Goldmont:
defineCPUMacros(Builder, "goldmont");
break;
+ case CK_GoldmontPlus:
+ defineCPUMacros(Builder, "goldmont_plus");
+ break;
+ case CK_Tremont:
+ defineCPUMacros(Builder, "tremont");
+ break;
case CK_Nehalem:
case CK_Westmere:
case CK_SandyBridge:
@@ -891,7 +949,8 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
case CK_SkylakeClient:
case CK_SkylakeServer:
case CK_Cannonlake:
- case CK_Icelake:
+ case CK_IcelakeClient:
+ case CK_IcelakeServer:
// FIXME: Historically, we defined this legacy name, it would be nice to
// remove it at some point. We've never exposed fine-grained names for
// recent primary x86 CPUs, and we should keep it that way.
@@ -1087,12 +1146,12 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__XSAVES__");
if (HasPKU)
Builder.defineMacro("__PKU__");
- if (HasCX16)
- Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16");
if (HasCLFLUSHOPT)
Builder.defineMacro("__CLFLUSHOPT__");
if (HasCLWB)
Builder.defineMacro("__CLWB__");
+ if (HasWBNOINVD)
+ Builder.defineMacro("__WBNOINVD__");
if (HasMPX)
Builder.defineMacro("__MPX__");
if (HasSHSTK)
@@ -1103,6 +1162,22 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__PREFETCHWT1__");
if (HasCLZERO)
Builder.defineMacro("__CLZERO__");
+ if (HasRDPID)
+ Builder.defineMacro("__RDPID__");
+ if (HasCLDEMOTE)
+ Builder.defineMacro("__CLDEMOTE__");
+ if (HasWAITPKG)
+ Builder.defineMacro("__WAITPKG__");
+ if (HasMOVDIRI)
+ Builder.defineMacro("__MOVDIRI__");
+ if (HasMOVDIR64B)
+ Builder.defineMacro("__MOVDIR64B__");
+ if (HasPCONFIG)
+ Builder.defineMacro("__PCONFIG__");
+ if (HasPTWRITE)
+ Builder.defineMacro("__PTWRITE__");
+ if (HasINVPCID)
+ Builder.defineMacro("__INVPCID__");
// Each case falls through to the previous one here.
switch (SSELevel) {
@@ -1182,6 +1257,8 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
}
if (CPU >= CK_i586)
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8");
+ if (HasCX16)
+ Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16");
if (HasFloat128)
Builder.defineMacro("__SIZEOF_FLOAT128__", "16");
@@ -1210,6 +1287,7 @@ bool X86TargetInfo::isValidFeatureName(StringRef Name) const {
.Case("avx512ifma", true)
.Case("bmi", true)
.Case("bmi2", true)
+ .Case("cldemote", true)
.Case("clflushopt", true)
.Case("clwb", true)
.Case("clzero", true)
@@ -1220,20 +1298,27 @@ bool X86TargetInfo::isValidFeatureName(StringRef Name) const {
.Case("fsgsbase", true)
.Case("fxsr", true)
.Case("gfni", true)
+ .Case("invpcid", true)
.Case("lwp", true)
.Case("lzcnt", true)
.Case("mmx", true)
.Case("movbe", true)
+ .Case("movdiri", true)
+ .Case("movdir64b", true)
.Case("mpx", true)
.Case("mwaitx", true)
.Case("pclmul", true)
+ .Case("pconfig", true)
.Case("pku", true)
.Case("popcnt", true)
.Case("prefetchwt1", true)
.Case("prfchw", true)
+ .Case("ptwrite", true)
+ .Case("rdpid", true)
.Case("rdrnd", true)
.Case("rdseed", true)
.Case("rtm", true)
+ .Case("sahf", true)
.Case("sgx", true)
.Case("sha", true)
.Case("shstk", true)
@@ -1248,6 +1333,8 @@ bool X86TargetInfo::isValidFeatureName(StringRef Name) const {
.Case("tbm", true)
.Case("vaes", true)
.Case("vpclmulqdq", true)
+ .Case("wbnoinvd", true)
+ .Case("waitpkg", true)
.Case("x87", true)
.Case("xop", true)
.Case("xsave", true)
@@ -1278,6 +1365,7 @@ bool X86TargetInfo::hasFeature(StringRef Feature) const {
.Case("avx512ifma", HasAVX512IFMA)
.Case("bmi", HasBMI)
.Case("bmi2", HasBMI2)
+ .Case("cldemote", HasCLDEMOTE)
.Case("clflushopt", HasCLFLUSHOPT)
.Case("clwb", HasCLWB)
.Case("clzero", HasCLZERO)
@@ -1288,23 +1376,31 @@ bool X86TargetInfo::hasFeature(StringRef Feature) const {
.Case("fsgsbase", HasFSGSBASE)
.Case("fxsr", HasFXSR)
.Case("gfni", HasGFNI)
- .Case("ibt", HasIBT)
+ .Case("invpcid", HasINVPCID)
.Case("lwp", HasLWP)
.Case("lzcnt", HasLZCNT)
.Case("mm3dnow", MMX3DNowLevel >= AMD3DNow)
.Case("mm3dnowa", MMX3DNowLevel >= AMD3DNowAthlon)
.Case("mmx", MMX3DNowLevel >= MMX)
.Case("movbe", HasMOVBE)
+ .Case("movdiri", HasMOVDIRI)
+ .Case("movdir64b", HasMOVDIR64B)
.Case("mpx", HasMPX)
.Case("mwaitx", HasMWAITX)
.Case("pclmul", HasPCLMUL)
+ .Case("pconfig", HasPCONFIG)
.Case("pku", HasPKU)
.Case("popcnt", HasPOPCNT)
.Case("prefetchwt1", HasPREFETCHWT1)
.Case("prfchw", HasPRFCHW)
+ .Case("ptwrite", HasPTWRITE)
+ .Case("rdpid", HasRDPID)
.Case("rdrnd", HasRDRND)
.Case("rdseed", HasRDSEED)
+ .Case("retpoline", HasRetpoline)
+ .Case("retpoline-external-thunk", HasRetpolineExternalThunk)
.Case("rtm", HasRTM)
+ .Case("sahf", HasLAHFSAHF)
.Case("sgx", HasSGX)
.Case("sha", HasSHA)
.Case("shstk", HasSHSTK)
@@ -1318,6 +1414,8 @@ bool X86TargetInfo::hasFeature(StringRef Feature) const {
.Case("tbm", HasTBM)
.Case("vaes", HasVAES)
.Case("vpclmulqdq", HasVPCLMULQDQ)
+ .Case("wbnoinvd", HasWBNOINVD)
+ .Case("waitpkg", HasWAITPKG)
.Case("x86", true)
.Case("x86_32", getTriple().getArch() == llvm::Triple::x86)
.Case("x86_64", getTriple().getArch() == llvm::Triple::x86_64)
@@ -1341,6 +1439,95 @@ bool X86TargetInfo::validateCpuSupports(StringRef FeatureStr) const {
.Default(false);
}
+static llvm::X86::ProcessorFeatures getFeature(StringRef Name) {
+ return llvm::StringSwitch<llvm::X86::ProcessorFeatures>(Name)
+#define X86_FEATURE_COMPAT(VAL, ENUM, STR) .Case(STR, llvm::X86::ENUM)
+#include "llvm/Support/X86TargetParser.def"
+ ;
+ // Note, this function should only be used after ensuring the value is
+ // correct, so it asserts if the value is out of range.
+}
+
+static unsigned getFeaturePriority(llvm::X86::ProcessorFeatures Feat) {
+ enum class FeatPriority {
+#define FEATURE(FEAT) FEAT,
+#include "clang/Basic/X86Target.def"
+ };
+ switch (Feat) {
+#define FEATURE(FEAT) \
+ case llvm::X86::FEAT: \
+ return static_cast<unsigned>(FeatPriority::FEAT);
+#include "clang/Basic/X86Target.def"
+ default:
+ llvm_unreachable("No Feature Priority for non-CPUSupports Features");
+ }
+}
+
+unsigned X86TargetInfo::multiVersionSortPriority(StringRef Name) const {
+ // Valid CPUs have a 'key feature' that compares just better than its key
+ // feature.
+ CPUKind Kind = getCPUKind(Name);
+ if (Kind != CK_Generic) {
+ switch (Kind) {
+ default:
+ llvm_unreachable(
+ "CPU Type without a key feature used in 'target' attribute");
+#define PROC_WITH_FEAT(ENUM, STR, IS64, KEY_FEAT) \
+ case CK_##ENUM: \
+ return (getFeaturePriority(llvm::X86::KEY_FEAT) << 1) + 1;
+#include "clang/Basic/X86Target.def"
+ }
+ }
+
+ // Now we know we have a feature, so get its priority and shift it a few so
+ // that we have sufficient room for the CPUs (above).
+ return getFeaturePriority(getFeature(Name)) << 1;
+}
+
+bool X86TargetInfo::validateCPUSpecificCPUDispatch(StringRef Name) const {
+ return llvm::StringSwitch<bool>(Name)
+#define CPU_SPECIFIC(NAME, MANGLING, FEATURES) .Case(NAME, true)
+#define CPU_SPECIFIC_ALIAS(NEW_NAME, NAME) .Case(NEW_NAME, true)
+#include "clang/Basic/X86Target.def"
+ .Default(false);
+}
+
+static StringRef CPUSpecificCPUDispatchNameDealias(StringRef Name) {
+ return llvm::StringSwitch<StringRef>(Name)
+#define CPU_SPECIFIC_ALIAS(NEW_NAME, NAME) .Case(NEW_NAME, NAME)
+#include "clang/Basic/X86Target.def"
+ .Default(Name);
+}
+
+char X86TargetInfo::CPUSpecificManglingCharacter(StringRef Name) const {
+ return llvm::StringSwitch<char>(CPUSpecificCPUDispatchNameDealias(Name))
+#define CPU_SPECIFIC(NAME, MANGLING, FEATURES) .Case(NAME, MANGLING)
+#include "clang/Basic/X86Target.def"
+ .Default(0);
+}
+
+void X86TargetInfo::getCPUSpecificCPUDispatchFeatures(
+ StringRef Name, llvm::SmallVectorImpl<StringRef> &Features) const {
+ StringRef WholeList =
+ llvm::StringSwitch<StringRef>(CPUSpecificCPUDispatchNameDealias(Name))
+#define CPU_SPECIFIC(NAME, MANGLING, FEATURES) .Case(NAME, FEATURES)
+#include "clang/Basic/X86Target.def"
+ .Default("");
+ WholeList.split(Features, ',', /*MaxSplit=*/-1, /*KeepEmpty=*/false);
+}
+
+std::string X86TargetInfo::getCPUKindCanonicalName(CPUKind Kind) const {
+ switch (Kind) {
+ case CK_Generic:
+ return "";
+#define PROC(ENUM, STRING, IS64BIT) \
+ case CK_##ENUM: \
+ return STRING;
+#include "clang/Basic/X86Target.def"
+ }
+ llvm_unreachable("Invalid CPUKind");
+}
+
// We can't use a generic validation scheme for the cpus accepted here
// versus subtarget cpus accepted in the target attribute because the
// variables intitialized by the runtime only support the below currently
@@ -1426,7 +1613,7 @@ bool X86TargetInfo::validateAsmConstraint(
case 'y': // Any MMX register.
case 'v': // Any {X,Y,Z}MM register (Arch & context dependent)
case 'x': // Any SSE register.
- case 'k': // Any AVX512 mask register (same as Yk, additionaly allows k0
+ case 'k': // Any AVX512 mask register (same as Yk, additionally allows k0
// for intermideate k reg operations).
case 'Q': // Any register accessible as [r]h: a, b, c, and d.
case 'R': // "Legacy" registers: ax, bx, cx, dx, di, si, sp, bp.
@@ -1554,8 +1741,6 @@ std::string X86TargetInfo::convertConstraint(const char *&Constraint) const {
bool X86TargetInfo::checkCPUKind(CPUKind Kind) const {
// Perform any per-CPU checks necessary to determine if this CPU is
// acceptable.
- // FIXME: This results in terrible diagnostics. Clang just says the CPU is
- // invalid without explaining *why*.
switch (Kind) {
case CK_Generic:
// No processor selected!
@@ -1568,6 +1753,18 @@ bool X86TargetInfo::checkCPUKind(CPUKind Kind) const {
llvm_unreachable("Unhandled CPU kind");
}
+void X86TargetInfo::fillValidCPUList(SmallVectorImpl<StringRef> &Values) const {
+#define PROC(ENUM, STRING, IS64BIT) \
+ if (IS64BIT || getTriple().getArch() == llvm::Triple::x86) \
+ Values.emplace_back(STRING);
+ // Go through CPUKind checking to ensure that the alias is de-aliased and
+ // 64 bit-ness is checked.
+#define PROC_ALIAS(ENUM, ALIAS) \
+ if (checkCPUKind(getCPUKind(ALIAS))) \
+ Values.emplace_back(ALIAS);
+#include "clang/Basic/X86Target.def"
+}
+
X86TargetInfo::CPUKind X86TargetInfo::getCPUKind(StringRef CPU) const {
return llvm::StringSwitch<CPUKind>(CPU)
#define PROC(ENUM, STRING, IS64BIT) .Case(STRING, CK_##ENUM)
diff --git a/lib/Basic/Targets/X86.h b/lib/Basic/Targets/X86.h
index cbd6a2d24fb5..b6cb27977b69 100644
--- a/lib/Basic/Targets/X86.h
+++ b/lib/Basic/Targets/X86.h
@@ -81,7 +81,6 @@ class LLVM_LIBRARY_VISIBILITY X86TargetInfo : public TargetInfo {
bool HasSHA = false;
bool HasMPX = false;
bool HasSHSTK = false;
- bool HasIBT = false;
bool HasSGX = false;
bool HasCX16 = false;
bool HasFXSR = false;
@@ -91,13 +90,26 @@ class LLVM_LIBRARY_VISIBILITY X86TargetInfo : public TargetInfo {
bool HasXSAVES = false;
bool HasMWAITX = false;
bool HasCLZERO = false;
+ bool HasCLDEMOTE = false;
+ bool HasPCONFIG = false;
bool HasPKU = false;
bool HasCLFLUSHOPT = false;
bool HasCLWB = false;
bool HasMOVBE = false;
bool HasPREFETCHWT1 = false;
-
- /// \brief Enumeration of all of the X86 CPUs supported by Clang.
+ bool HasRDPID = false;
+ bool HasRetpoline = false;
+ bool HasRetpolineExternalThunk = false;
+ bool HasLAHFSAHF = false;
+ bool HasWBNOINVD = false;
+ bool HasWAITPKG = false;
+ bool HasMOVDIRI = false;
+ bool HasMOVDIR64B = false;
+ bool HasPTWRITE = false;
+ bool HasINVPCID = false;
+
+protected:
+ /// Enumeration of all of the X86 CPUs supported by Clang.
///
/// Each enumeration represents a particular CPU supported by Clang. These
/// loosely correspond to the options passed to '-march' or '-mtune' flags.
@@ -111,6 +123,8 @@ class LLVM_LIBRARY_VISIBILITY X86TargetInfo : public TargetInfo {
CPUKind getCPUKind(StringRef CPU) const;
+ std::string getCPUKindCanonicalName(CPUKind Kind) const;
+
enum FPMathKind { FP_Default, FP_SSE, FP_387 } FPMath = FP_Default;
public:
@@ -136,6 +150,14 @@ public:
bool validateCpuIs(StringRef Name) const override;
+ bool validateCPUSpecificCPUDispatch(StringRef Name) const override;
+
+ char CPUSpecificManglingCharacter(StringRef Name) const override;
+
+ void getCPUSpecificCPUDispatchFeatures(
+ StringRef Name,
+ llvm::SmallVectorImpl<StringRef> &Features) const override;
+
bool validateAsmConstraint(const char *&Name,
TargetInfo::ConstraintInfo &info) const override;
@@ -156,6 +178,17 @@ public:
bool validateInputSize(StringRef Constraint, unsigned Size) const override;
+ virtual bool
+ checkCFProtectionReturnSupported(DiagnosticsEngine &Diags) const override {
+ return true;
+ };
+
+ virtual bool
+ checkCFProtectionBranchSupported(DiagnosticsEngine &Diags) const override {
+ return true;
+ };
+
+
virtual bool validateOperandSize(StringRef Constraint, unsigned Size) const;
std::string convertConstraint(const char *&Constraint) const override;
@@ -163,8 +196,8 @@ public:
return "~{dirflag},~{fpsr},~{flags}";
}
- StringRef getConstraintRegister(const StringRef &Constraint,
- const StringRef &Expression) const override {
+ StringRef getConstraintRegister(StringRef Constraint,
+ StringRef Expression) const override {
StringRef::iterator I, E;
for (I = Constraint.begin(), E = Constraint.end(); I != E; ++I) {
if (isalpha(*I))
@@ -252,10 +285,17 @@ public:
return checkCPUKind(getCPUKind(Name));
}
+ void fillValidCPUList(SmallVectorImpl<StringRef> &Values) const override;
+
bool setCPU(const std::string &Name) override {
return checkCPUKind(CPU = getCPUKind(Name));
}
+ bool supportsMultiVersioning() const override {
+ return getTriple().isOSBinFormatELF();
+ }
+ unsigned multiVersionSortPriority(StringRef Name) const override;
+
bool setFPMath(StringRef Name) override;
CallingConvCheckResult checkCallingConvention(CallingConv CC) const override {
@@ -267,6 +307,7 @@ public:
case CC_X86VectorCall:
case CC_X86RegCall:
case CC_C:
+ case CC_PreserveMost:
case CC_Swift:
case CC_X86Pascal:
case CC_IntelOclBicc:
@@ -309,9 +350,11 @@ public:
(1 << TargetInfo::LongDouble));
// x86-32 has atomics up to 8 bytes
- // FIXME: Check that we actually have cmpxchg8b before setting
- // MaxAtomicInlineWidth. (cmpxchg8b is an i586 instruction.)
- MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 64;
+ CPUKind Kind = getCPUKind(Opts.CPU);
+ if (Kind >= CK_i586 || Kind == CK_Generic)
+ MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 64;
+ else if (Kind >= CK_i486)
+ MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 32;
}
BuiltinVaListKind getBuiltinVaListKind() const override {
@@ -706,6 +749,11 @@ public:
Builder.defineMacro("_M_X64", "100");
Builder.defineMacro("_M_AMD64", "100");
}
+
+ TargetInfo::CallingConvKind
+ getCallingConvKind(bool ClangABICompat4) const override {
+ return CCK_MicrosoftWin64;
+ }
};
// x86-64 MinGW target
diff --git a/lib/Basic/VersionTuple.cpp b/lib/Basic/VersionTuple.cpp
deleted file mode 100644
index 9c73fd98a174..000000000000
--- a/lib/Basic/VersionTuple.cpp
+++ /dev/null
@@ -1,100 +0,0 @@
-//===- VersionTuple.cpp - Version Number Handling ---------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements the VersionTuple class, which represents a version in
-// the form major[.minor[.subminor]].
-//
-//===----------------------------------------------------------------------===//
-#include "clang/Basic/VersionTuple.h"
-#include "llvm/Support/raw_ostream.h"
-
-using namespace clang;
-
-std::string VersionTuple::getAsString() const {
- std::string Result;
- {
- llvm::raw_string_ostream Out(Result);
- Out << *this;
- }
- return Result;
-}
-
-raw_ostream& clang::operator<<(raw_ostream &Out,
- const VersionTuple &V) {
- Out << V.getMajor();
- if (Optional<unsigned> Minor = V.getMinor())
- Out << (V.usesUnderscores() ? '_' : '.') << *Minor;
- if (Optional<unsigned> Subminor = V.getSubminor())
- Out << (V.usesUnderscores() ? '_' : '.') << *Subminor;
- if (Optional<unsigned> Build = V.getBuild())
- Out << (V.usesUnderscores() ? '_' : '.') << *Build;
- return Out;
-}
-
-static bool parseInt(StringRef &input, unsigned &value) {
- assert(value == 0);
- if (input.empty()) return true;
-
- char next = input[0];
- input = input.substr(1);
- if (next < '0' || next > '9') return true;
- value = (unsigned) (next - '0');
-
- while (!input.empty()) {
- next = input[0];
- if (next < '0' || next > '9') return false;
- input = input.substr(1);
- value = value * 10 + (unsigned) (next - '0');
- }
-
- return false;
-}
-
-bool VersionTuple::tryParse(StringRef input) {
- unsigned major = 0, minor = 0, micro = 0, build = 0;
-
- // Parse the major version, [0-9]+
- if (parseInt(input, major)) return true;
-
- if (input.empty()) {
- *this = VersionTuple(major);
- return false;
- }
-
- // If we're not done, parse the minor version, \.[0-9]+
- if (input[0] != '.') return true;
- input = input.substr(1);
- if (parseInt(input, minor)) return true;
-
- if (input.empty()) {
- *this = VersionTuple(major, minor);
- return false;
- }
-
- // If we're not done, parse the micro version, \.[0-9]+
- if (input[0] != '.') return true;
- input = input.substr(1);
- if (parseInt(input, micro)) return true;
-
- if (input.empty()) {
- *this = VersionTuple(major, minor, micro);
- return false;
- }
-
- // If we're not done, parse the micro version, \.[0-9]+
- if (input[0] != '.') return true;
- input = input.substr(1);
- if (parseInt(input, build)) return true;
-
- // If we have characters left over, it's an error.
- if (!input.empty()) return true;
-
- *this = VersionTuple(major, minor, micro, build);
- return false;
-}
diff --git a/lib/Basic/VirtualFileSystem.cpp b/lib/Basic/VirtualFileSystem.cpp
index 9d44597dc3fb..bcfcbdbb9014 100644
--- a/lib/Basic/VirtualFileSystem.cpp
+++ b/lib/Basic/VirtualFileSystem.cpp
@@ -1,4 +1,4 @@
-//===- VirtualFileSystem.cpp - Virtual File System Layer --------*- C++ -*-===//
+//===- VirtualFileSystem.cpp - Virtual File System Layer ------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -6,30 +6,57 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
+//
// This file implements the VirtualFileSystem interface.
+//
//===----------------------------------------------------------------------===//
#include "clang/Basic/VirtualFileSystem.h"
-#include "clang/Basic/FileManager.h"
+#include "clang/Basic/LLVM.h"
+#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/IntrusiveRefCntPtr.h"
+#include "llvm/ADT/Optional.h"
#include "llvm/ADT/STLExtras.h"
-#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSet.h"
+#include "llvm/ADT/Twine.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Config/llvm-config.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/Chrono.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/Errc.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/ErrorOr.h"
+#include "llvm/Support/FileSystem.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/Process.h"
+#include "llvm/Support/SMLoc.h"
+#include "llvm/Support/SourceMgr.h"
#include "llvm/Support/YAMLParser.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
#include <atomic>
+#include <cassert>
+#include <cstdint>
+#include <iterator>
+#include <limits>
+#include <map>
#include <memory>
+#include <string>
+#include <system_error>
#include <utility>
+#include <vector>
using namespace clang;
-using namespace clang::vfs;
+using namespace vfs;
using namespace llvm;
+
using llvm::sys::fs::file_status;
using llvm::sys::fs::file_type;
using llvm::sys::fs::perms;
@@ -38,13 +65,13 @@ using llvm::sys::fs::UniqueID;
Status::Status(const file_status &Status)
: UID(Status.getUniqueID()), MTime(Status.getLastModificationTime()),
User(Status.getUser()), Group(Status.getGroup()), Size(Status.getSize()),
- Type(Status.type()), Perms(Status.permissions()), IsVFSMapped(false) {}
+ Type(Status.type()), Perms(Status.permissions()) {}
Status::Status(StringRef Name, UniqueID UID, sys::TimePoint<> MTime,
uint32_t User, uint32_t Group, uint64_t Size, file_type Type,
perms Perms)
: Name(Name), UID(UID), MTime(MTime), User(User), Group(Group), Size(Size),
- Type(Type), Perms(Perms), IsVFSMapped(false) {}
+ Type(Type), Perms(Perms) {}
Status Status::copyWithNewName(const Status &In, StringRef NewName) {
return Status(NewName, In.getUniqueID(), In.getLastModificationTime(),
@@ -62,28 +89,34 @@ bool Status::equivalent(const Status &Other) const {
assert(isStatusKnown() && Other.isStatusKnown());
return getUniqueID() == Other.getUniqueID();
}
+
bool Status::isDirectory() const {
return Type == file_type::directory_file;
}
+
bool Status::isRegularFile() const {
return Type == file_type::regular_file;
}
+
bool Status::isOther() const {
return exists() && !isRegularFile() && !isDirectory() && !isSymlink();
}
+
bool Status::isSymlink() const {
return Type == file_type::symlink_file;
}
+
bool Status::isStatusKnown() const {
return Type != file_type::status_error;
}
+
bool Status::exists() const {
return isStatusKnown() && Type != file_type::file_not_found;
}
-File::~File() {}
+File::~File() = default;
-FileSystem::~FileSystem() {}
+FileSystem::~FileSystem() = default;
ErrorOr<std::unique_ptr<MemoryBuffer>>
FileSystem::getBufferForFile(const llvm::Twine &Name, int64_t FileSize,
@@ -97,7 +130,7 @@ FileSystem::getBufferForFile(const llvm::Twine &Name, int64_t FileSize,
std::error_code FileSystem::makeAbsolute(SmallVectorImpl<char> &Path) const {
if (llvm::sys::path::is_absolute(Path))
- return std::error_code();
+ return {};
auto WorkingDir = getCurrentWorkingDirectory();
if (!WorkingDir)
@@ -106,6 +139,11 @@ std::error_code FileSystem::makeAbsolute(SmallVectorImpl<char> &Path) const {
return llvm::sys::fs::make_absolute(WorkingDir.get(), Path);
}
+std::error_code FileSystem::getRealPath(const Twine &Path,
+ SmallVectorImpl<char> &Output) const {
+ return errc::operation_not_permitted;
+}
+
bool FileSystem::exists(const Twine &Path) {
auto Status = status(Path);
return Status && Status->exists();
@@ -118,6 +156,7 @@ static bool isTraversalComponent(StringRef Component) {
static bool pathHasTraversal(StringRef Path) {
using namespace llvm::sys;
+
for (StringRef Comp : llvm::make_range(path::begin(Path), path::end(Path)))
if (isTraversalComponent(Comp))
return true;
@@ -130,12 +169,15 @@ static bool pathHasTraversal(StringRef Path) {
//===-----------------------------------------------------------------------===/
namespace {
-/// \brief Wrapper around a raw file descriptor.
+
+/// Wrapper around a raw file descriptor.
class RealFile : public File {
+ friend class RealFileSystem;
+
int FD;
Status S;
std::string RealName;
- friend class RealFileSystem;
+
RealFile(int FD, StringRef NewName, StringRef NewRealPathName)
: FD(FD), S(NewName, {}, {}, {}, {}, {},
llvm::sys::fs::file_type::status_error, {}),
@@ -145,6 +187,7 @@ class RealFile : public File {
public:
~RealFile() override;
+
ErrorOr<Status> status() override;
ErrorOr<std::string> getName() override;
ErrorOr<std::unique_ptr<MemoryBuffer>> getBuffer(const Twine &Name,
@@ -153,7 +196,9 @@ public:
bool IsVolatile) override;
std::error_code close() override;
};
-} // end anonymous namespace
+
+} // namespace
+
RealFile::~RealFile() { close(); }
ErrorOr<Status> RealFile::status() {
@@ -186,7 +231,8 @@ std::error_code RealFile::close() {
}
namespace {
-/// \brief The file system according to your operating system.
+
+/// The file system according to your operating system.
class RealFileSystem : public FileSystem {
public:
ErrorOr<Status> status(const Twine &Path) override;
@@ -195,8 +241,11 @@ public:
llvm::ErrorOr<std::string> getCurrentWorkingDirectory() const override;
std::error_code setCurrentWorkingDirectory(const Twine &Path) override;
+ std::error_code getRealPath(const Twine &Path,
+ SmallVectorImpl<char> &Output) const override;
};
-} // end anonymous namespace
+
+} // namespace
ErrorOr<Status> RealFileSystem::status(const Twine &Path) {
sys::fs::file_status RealStatus;
@@ -209,7 +258,8 @@ ErrorOr<std::unique_ptr<File>>
RealFileSystem::openFileForRead(const Twine &Name) {
int FD;
SmallString<256> RealName;
- if (std::error_code EC = sys::fs::openFileForRead(Name, FD, &RealName))
+ if (std::error_code EC =
+ sys::fs::openFileForRead(Name, FD, sys::fs::OF_None, &RealName))
return EC;
return std::unique_ptr<File>(new RealFile(FD, Name.str(), RealName.str()));
}
@@ -232,39 +282,50 @@ std::error_code RealFileSystem::setCurrentWorkingDirectory(const Twine &Path) {
return llvm::sys::fs::set_current_path(Path);
}
+std::error_code
+RealFileSystem::getRealPath(const Twine &Path,
+ SmallVectorImpl<char> &Output) const {
+ return llvm::sys::fs::real_path(Path, Output);
+}
+
IntrusiveRefCntPtr<FileSystem> vfs::getRealFileSystem() {
static IntrusiveRefCntPtr<FileSystem> FS = new RealFileSystem();
return FS;
}
namespace {
+
class RealFSDirIter : public clang::vfs::detail::DirIterImpl {
llvm::sys::fs::directory_iterator Iter;
+
public:
RealFSDirIter(const Twine &Path, std::error_code &EC) : Iter(Path, EC) {
- if (!EC && Iter != llvm::sys::fs::directory_iterator()) {
+ if (Iter != llvm::sys::fs::directory_iterator()) {
llvm::sys::fs::file_status S;
- EC = llvm::sys::fs::status(Iter->path(), S, true);
+ std::error_code ErrorCode = llvm::sys::fs::status(Iter->path(), S, true);
CurrentEntry = Status::copyWithNewName(S, Iter->path());
+ if (!EC)
+ EC = ErrorCode;
}
}
std::error_code increment() override {
std::error_code EC;
Iter.increment(EC);
- if (EC) {
- return EC;
- } else if (Iter == llvm::sys::fs::directory_iterator()) {
+ if (Iter == llvm::sys::fs::directory_iterator()) {
CurrentEntry = Status();
} else {
llvm::sys::fs::file_status S;
- EC = llvm::sys::fs::status(Iter->path(), S, true);
+ std::error_code ErrorCode = llvm::sys::fs::status(Iter->path(), S, true);
CurrentEntry = Status::copyWithNewName(S, Iter->path());
+ if (!EC)
+ EC = ErrorCode;
}
return EC;
}
};
-}
+
+} // namespace
directory_iterator RealFileSystem::dir_begin(const Twine &Dir,
std::error_code &EC) {
@@ -274,6 +335,7 @@ directory_iterator RealFileSystem::dir_begin(const Twine &Dir,
//===-----------------------------------------------------------------------===/
// OverlayFileSystem implementation
//===-----------------------------------------------------------------------===/
+
OverlayFileSystem::OverlayFileSystem(IntrusiveRefCntPtr<FileSystem> BaseFS) {
FSList.push_back(std::move(BaseFS));
}
@@ -311,17 +373,28 @@ OverlayFileSystem::getCurrentWorkingDirectory() const {
// All file systems are synchronized, just take the first working directory.
return FSList.front()->getCurrentWorkingDirectory();
}
+
std::error_code
OverlayFileSystem::setCurrentWorkingDirectory(const Twine &Path) {
for (auto &FS : FSList)
if (std::error_code EC = FS->setCurrentWorkingDirectory(Path))
return EC;
- return std::error_code();
+ return {};
+}
+
+std::error_code
+OverlayFileSystem::getRealPath(const Twine &Path,
+ SmallVectorImpl<char> &Output) const {
+ for (auto &FS : FSList)
+ if (FS->exists(Path))
+ return FS->getRealPath(Path, Output);
+ return errc::no_such_file_or_directory;
}
-clang::vfs::detail::DirIterImpl::~DirIterImpl() { }
+clang::vfs::detail::DirIterImpl::~DirIterImpl() = default;
namespace {
+
class OverlayFSDirIterImpl : public clang::vfs::detail::DirIterImpl {
OverlayFileSystem &Overlays;
std::string Path;
@@ -340,7 +413,7 @@ class OverlayFSDirIterImpl : public clang::vfs::detail::DirIterImpl {
if (CurrentDirIter != directory_iterator())
break; // found
}
- return std::error_code();
+ return {};
}
std::error_code incrementDirIter(bool IsFirstTime) {
@@ -379,7 +452,8 @@ public:
std::error_code increment() override { return incrementImpl(false); }
};
-} // end anonymous namespace
+
+} // namespace
directory_iterator OverlayFileSystem::dir_begin(const Twine &Dir,
std::error_code &EC) {
@@ -389,6 +463,7 @@ directory_iterator OverlayFileSystem::dir_begin(const Twine &Dir,
namespace clang {
namespace vfs {
+
namespace detail {
enum InMemoryNodeKind { IME_File, IME_Directory };
@@ -402,13 +477,15 @@ class InMemoryNode {
public:
InMemoryNode(Status Stat, InMemoryNodeKind Kind)
: Stat(std::move(Stat)), Kind(Kind) {}
- virtual ~InMemoryNode() {}
+ virtual ~InMemoryNode() = default;
+
const Status &getStatus() const { return Stat; }
InMemoryNodeKind getKind() const { return Kind; }
virtual std::string toString(unsigned Indent) const = 0;
};
namespace {
+
class InMemoryFile : public InMemoryNode {
std::unique_ptr<llvm::MemoryBuffer> Buffer;
@@ -417,9 +494,11 @@ public:
: InMemoryNode(std::move(Stat), IME_File), Buffer(std::move(Buffer)) {}
llvm::MemoryBuffer *getBuffer() { return Buffer.get(); }
+
std::string toString(unsigned Indent) const override {
return (std::string(Indent, ' ') + getStatus().getName() + "\n").str();
}
+
static bool classof(const InMemoryNode *N) {
return N->getKind() == IME_File;
}
@@ -433,6 +512,7 @@ public:
explicit InMemoryFileAdaptor(InMemoryFile &Node) : Node(Node) {}
llvm::ErrorOr<Status> status() override { return Node.getStatus(); }
+
llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>>
getBuffer(const Twine &Name, int64_t FileSize, bool RequiresNullTerminator,
bool IsVolatile) override {
@@ -440,9 +520,11 @@ public:
return llvm::MemoryBuffer::getMemBuffer(
Buf->getBuffer(), Buf->getBufferIdentifier(), RequiresNullTerminator);
}
- std::error_code close() override { return std::error_code(); }
+
+ std::error_code close() override { return {}; }
};
-} // end anonymous namespace
+
+} // namespace
class InMemoryDirectory : public InMemoryNode {
std::map<std::string, std::unique_ptr<InMemoryNode>> Entries;
@@ -450,34 +532,38 @@ class InMemoryDirectory : public InMemoryNode {
public:
InMemoryDirectory(Status Stat)
: InMemoryNode(std::move(Stat), IME_Directory) {}
+
InMemoryNode *getChild(StringRef Name) {
auto I = Entries.find(Name);
if (I != Entries.end())
return I->second.get();
return nullptr;
}
+
InMemoryNode *addChild(StringRef Name, std::unique_ptr<InMemoryNode> Child) {
return Entries.insert(make_pair(Name, std::move(Child)))
.first->second.get();
}
- typedef decltype(Entries)::const_iterator const_iterator;
+ using const_iterator = decltype(Entries)::const_iterator;
+
const_iterator begin() const { return Entries.begin(); }
const_iterator end() const { return Entries.end(); }
std::string toString(unsigned Indent) const override {
std::string Result =
(std::string(Indent, ' ') + getStatus().getName() + "\n").str();
- for (const auto &Entry : Entries) {
+ for (const auto &Entry : Entries)
Result += Entry.second->toString(Indent + 2);
- }
return Result;
}
+
static bool classof(const InMemoryNode *N) {
return N->getKind() == IME_Directory;
}
};
-}
+
+} // namespace detail
InMemoryFileSystem::InMemoryFileSystem(bool UseNormalizedPaths)
: Root(new detail::InMemoryDirectory(
@@ -486,7 +572,7 @@ InMemoryFileSystem::InMemoryFileSystem(bool UseNormalizedPaths)
llvm::sys::fs::perms::all_all))),
UseNormalizedPaths(UseNormalizedPaths) {}
-InMemoryFileSystem::~InMemoryFileSystem() {}
+InMemoryFileSystem::~InMemoryFileSystem() = default;
std::string InMemoryFileSystem::toString() const {
return Root->toString(/*Indent=*/0);
@@ -645,13 +731,15 @@ InMemoryFileSystem::openFileForRead(const Twine &Path) {
}
namespace {
+
/// Adaptor from InMemoryDir::iterator to directory_iterator.
class InMemoryDirIterator : public clang::vfs::detail::DirIterImpl {
detail::InMemoryDirectory::const_iterator I;
detail::InMemoryDirectory::const_iterator E;
public:
- InMemoryDirIterator() {}
+ InMemoryDirIterator() = default;
+
explicit InMemoryDirIterator(detail::InMemoryDirectory &Dir)
: I(Dir.begin()), E(Dir.end()) {
if (I != E)
@@ -663,10 +751,11 @@ public:
// When we're at the end, make CurrentEntry invalid and DirIterImpl will do
// the rest.
CurrentEntry = I != E ? I->second->getStatus() : Status();
- return std::error_code();
+ return {};
}
};
-} // end anonymous namespace
+
+} // namespace
directory_iterator InMemoryFileSystem::dir_begin(const Twine &Dir,
std::error_code &EC) {
@@ -697,11 +786,25 @@ std::error_code InMemoryFileSystem::setCurrentWorkingDirectory(const Twine &P) {
if (!Path.empty())
WorkingDirectory = Path.str();
- return std::error_code();
-}
+ return {};
}
+
+std::error_code
+InMemoryFileSystem::getRealPath(const Twine &Path,
+ SmallVectorImpl<char> &Output) const {
+ auto CWD = getCurrentWorkingDirectory();
+ if (!CWD || CWD->empty())
+ return errc::operation_not_permitted;
+ Path.toVector(Output);
+ if (auto EC = makeAbsolute(Output))
+ return EC;
+ llvm::sys::path::remove_dots(Output, /*remove_dot_dot=*/true);
+ return {};
}
+} // namespace vfs
+} // namespace clang
+
//===-----------------------------------------------------------------------===/
// RedirectingFileSystem implementation
//===-----------------------------------------------------------------------===/
@@ -713,14 +816,15 @@ enum EntryKind {
EK_File
};
-/// \brief A single file or directory in the VFS.
+/// A single file or directory in the VFS.
class Entry {
EntryKind Kind;
std::string Name;
public:
- virtual ~Entry();
Entry(EntryKind K, StringRef Name) : Kind(K), Name(Name) {}
+ virtual ~Entry() = default;
+
StringRef getName() const { return Name; }
EntryKind getKind() const { return Kind; }
};
@@ -737,14 +841,20 @@ public:
S(std::move(S)) {}
RedirectingDirectoryEntry(StringRef Name, Status S)
: Entry(EK_Directory, Name), S(std::move(S)) {}
+
Status getStatus() { return S; }
+
void addContent(std::unique_ptr<Entry> Content) {
Contents.push_back(std::move(Content));
}
+
Entry *getLastContent() const { return Contents.back().get(); }
- typedef decltype(Contents)::iterator iterator;
+
+ using iterator = decltype(Contents)::iterator;
+
iterator contents_begin() { return Contents.begin(); }
iterator contents_end() { return Contents.end(); }
+
static bool classof(const Entry *E) { return E->getKind() == EK_Directory; }
};
@@ -755,21 +865,27 @@ public:
NK_External,
NK_Virtual
};
+
private:
std::string ExternalContentsPath;
NameKind UseName;
+
public:
RedirectingFileEntry(StringRef Name, StringRef ExternalContentsPath,
NameKind UseName)
: Entry(EK_File, Name), ExternalContentsPath(ExternalContentsPath),
UseName(UseName) {}
+
StringRef getExternalContentsPath() const { return ExternalContentsPath; }
- /// \brief whether to use the external path as the name for this file.
+
+ /// whether to use the external path as the name for this file.
bool useExternalName(bool GlobalUseExternalName) const {
return UseName == NK_NotSet ? GlobalUseExternalName
: (UseName == NK_External);
}
+
NameKind getUseName() const { return UseName; }
+
static bool classof(const Entry *E) { return E->getKind() == EK_File; }
};
@@ -785,10 +901,11 @@ public:
RedirectingDirectoryEntry::iterator Begin,
RedirectingDirectoryEntry::iterator End,
std::error_code &EC);
+
std::error_code increment() override;
};
-/// \brief A virtual file system parsed from a YAML file.
+/// A virtual file system parsed from a YAML file.
///
/// Currently, this class allows creating virtual directories and mapping
/// virtual file paths to existing external files, available in \c ExternalFS.
@@ -844,10 +961,14 @@ public:
/// /path/to/file). However, any directory that contains more than one child
/// must be uniquely represented by a directory entry.
class RedirectingFileSystem : public vfs::FileSystem {
+ friend class RedirectingFileSystemParser;
+
/// The root(s) of the virtual file system.
std::vector<std::unique_ptr<Entry>> Roots;
- /// \brief The file system to use for external references.
+
+ /// The file system to use for external references.
IntrusiveRefCntPtr<FileSystem> ExternalFS;
+
/// If IsRelativeOverlay is set, this represents the directory
/// path that should be prefixed to each 'external-contents' entry
/// when reading from YAML files.
@@ -856,7 +977,7 @@ class RedirectingFileSystem : public vfs::FileSystem {
/// @name Configuration
/// @{
- /// \brief Whether to perform case-sensitive comparisons.
+ /// Whether to perform case-sensitive comparisons.
///
/// Currently, case-insensitive matching only works correctly with ASCII.
bool CaseSensitive = true;
@@ -865,11 +986,11 @@ class RedirectingFileSystem : public vfs::FileSystem {
/// be prefixed in every 'external-contents' when reading from YAML files.
bool IsRelativeOverlay = false;
- /// \brief Whether to use to use the value of 'external-contents' for the
+ /// Whether to use to use the value of 'external-contents' for the
/// names of files. This global value is overridable on a per-file basis.
bool UseExternalNames = true;
- /// \brief Whether an invalid path obtained via 'external-contents' should
+ /// Whether an invalid path obtained via 'external-contents' should
/// cause iteration on the VFS to stop. If 'true', the VFS should ignore
/// the entry and continue with the next. Allows YAML files to be shared
/// across multiple compiler invocations regardless of prior existent
@@ -882,31 +1003,29 @@ class RedirectingFileSystem : public vfs::FileSystem {
/// "." and "./" in their paths. FIXME: some unittests currently fail on
/// win32 when using remove_dots and remove_leading_dotslash on paths.
bool UseCanonicalizedPaths =
-#ifdef LLVM_ON_WIN32
+#ifdef _WIN32
false;
#else
true;
#endif
- friend class RedirectingFileSystemParser;
-
private:
RedirectingFileSystem(IntrusiveRefCntPtr<FileSystem> ExternalFS)
: ExternalFS(std::move(ExternalFS)) {}
- /// \brief Looks up the path <tt>[Start, End)</tt> in \p From, possibly
+ /// Looks up the path <tt>[Start, End)</tt> in \p From, possibly
/// recursing into the contents of \p From if it is a directory.
ErrorOr<Entry *> lookupPath(sys::path::const_iterator Start,
sys::path::const_iterator End, Entry *From);
- /// \brief Get the status of a given an \c Entry.
+ /// Get the status of a given an \c Entry.
ErrorOr<Status> status(const Twine &Path, Entry *E);
public:
- /// \brief Looks up \p Path in \c Roots.
+ /// Looks up \p Path in \c Roots.
ErrorOr<Entry *> lookupPath(const Twine &Path);
- /// \brief Parses \p Buffer, which is expected to be in YAML format and
+ /// Parses \p Buffer, which is expected to be in YAML format and
/// returns a virtual file system representing its contents.
static RedirectingFileSystem *
create(std::unique_ptr<MemoryBuffer> Buffer,
@@ -919,6 +1038,7 @@ public:
llvm::ErrorOr<std::string> getCurrentWorkingDirectory() const override {
return ExternalFS->getCurrentWorkingDirectory();
}
+
std::error_code setCurrentWorkingDirectory(const Twine &Path) override {
return ExternalFS->setCurrentWorkingDirectory(Path);
}
@@ -927,17 +1047,17 @@ public:
ErrorOr<Entry *> E = lookupPath(Dir);
if (!E) {
EC = E.getError();
- return directory_iterator();
+ return {};
}
ErrorOr<Status> S = status(Dir, *E);
if (!S) {
EC = S.getError();
- return directory_iterator();
+ return {};
}
if (!S->isDirectory()) {
EC = std::error_code(static_cast<int>(errc::not_a_directory),
std::system_category());
- return directory_iterator();
+ return {};
}
auto *D = cast<RedirectingDirectoryEntry>(*E);
@@ -959,7 +1079,7 @@ public:
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
LLVM_DUMP_METHOD void dump() const {
- for (const std::unique_ptr<Entry> &Root : Roots)
+ for (const auto &Root : Roots)
dumpEntry(Root.get());
}
@@ -979,10 +1099,9 @@ LLVM_DUMP_METHOD void dumpEntry(Entry *E, int NumSpaces = 0) const {
}
}
#endif
-
};
-/// \brief A helper class to hold the common YAML parsing state.
+/// A helper class to hold the common YAML parsing state.
class RedirectingFileSystemParser {
yaml::Stream &Stream;
@@ -993,7 +1112,8 @@ class RedirectingFileSystemParser {
// false on error
bool parseScalarString(yaml::Node *N, StringRef &Result,
SmallVectorImpl<char> &Storage) {
- yaml::ScalarNode *S = dyn_cast<yaml::ScalarNode>(N);
+ const auto *S = dyn_cast<yaml::ScalarNode>(N);
+
if (!S) {
error(N, "expected string");
return false;
@@ -1024,11 +1144,13 @@ class RedirectingFileSystemParser {
}
struct KeyStatus {
- KeyStatus(bool Required=false) : Required(Required), Seen(false) {}
bool Required;
- bool Seen;
+ bool Seen = false;
+
+ KeyStatus(bool Required = false) : Required(Required) {}
};
- typedef std::pair<StringRef, KeyStatus> KeyStatusPair;
+
+ using KeyStatusPair = std::pair<StringRef, KeyStatus>;
// false on error
bool checkDuplicateOrUnknownKey(yaml::Node *KeyNode, StringRef Key,
@@ -1048,11 +1170,9 @@ class RedirectingFileSystemParser {
// false on error
bool checkMissingKeys(yaml::Node *Obj, DenseMap<StringRef, KeyStatus> &Keys) {
- for (DenseMap<StringRef, KeyStatus>::iterator I = Keys.begin(),
- E = Keys.end();
- I != E; ++I) {
- if (I->second.Required && !I->second.Seen) {
- error(Obj, Twine("missing key '") + I->first + "'");
+ for (const auto &I : Keys) {
+ if (I.second.Required && !I.second.Seen) {
+ error(Obj, Twine("missing key '") + I.first + "'");
return false;
}
}
@@ -1062,7 +1182,7 @@ class RedirectingFileSystemParser {
Entry *lookupOrCreateEntry(RedirectingFileSystem *FS, StringRef Name,
Entry *ParentEntry = nullptr) {
if (!ParentEntry) { // Look for a existent root
- for (const std::unique_ptr<Entry> &Root : FS->Roots) {
+ for (const auto &Root : FS->Roots) {
if (Name.equals(Root->getName())) {
ParentEntry = Root.get();
return ParentEntry;
@@ -1125,7 +1245,7 @@ class RedirectingFileSystemParser {
}
std::unique_ptr<Entry> parseEntry(yaml::Node *N, RedirectingFileSystem *FS) {
- yaml::MappingNode *M = dyn_cast<yaml::MappingNode>(N);
+ auto *M = dyn_cast<yaml::MappingNode>(N);
if (!M) {
error(N, "expected mapping node for file or directory entry");
return nullptr;
@@ -1148,21 +1268,20 @@ class RedirectingFileSystemParser {
auto UseExternalName = RedirectingFileEntry::NK_NotSet;
EntryKind Kind;
- for (yaml::MappingNode::iterator I = M->begin(), E = M->end(); I != E;
- ++I) {
+ for (auto &I : *M) {
StringRef Key;
// Reuse the buffer for key and value, since we don't look at key after
// parsing value.
SmallString<256> Buffer;
- if (!parseScalarString(I->getKey(), Key, Buffer))
+ if (!parseScalarString(I.getKey(), Key, Buffer))
return nullptr;
- if (!checkDuplicateOrUnknownKey(I->getKey(), Key, Keys))
+ if (!checkDuplicateOrUnknownKey(I.getKey(), Key, Keys))
return nullptr;
StringRef Value;
if (Key == "name") {
- if (!parseScalarString(I->getValue(), Value, Buffer))
+ if (!parseScalarString(I.getValue(), Value, Buffer))
return nullptr;
if (FS->UseCanonicalizedPaths) {
@@ -1176,47 +1295,44 @@ class RedirectingFileSystemParser {
Name = Value;
}
} else if (Key == "type") {
- if (!parseScalarString(I->getValue(), Value, Buffer))
+ if (!parseScalarString(I.getValue(), Value, Buffer))
return nullptr;
if (Value == "file")
Kind = EK_File;
else if (Value == "directory")
Kind = EK_Directory;
else {
- error(I->getValue(), "unknown value for 'type'");
+ error(I.getValue(), "unknown value for 'type'");
return nullptr;
}
} else if (Key == "contents") {
if (HasContents) {
- error(I->getKey(),
+ error(I.getKey(),
"entry already has 'contents' or 'external-contents'");
return nullptr;
}
HasContents = true;
- yaml::SequenceNode *Contents =
- dyn_cast<yaml::SequenceNode>(I->getValue());
+ auto *Contents = dyn_cast<yaml::SequenceNode>(I.getValue());
if (!Contents) {
// FIXME: this is only for directories, what about files?
- error(I->getValue(), "expected array");
+ error(I.getValue(), "expected array");
return nullptr;
}
- for (yaml::SequenceNode::iterator I = Contents->begin(),
- E = Contents->end();
- I != E; ++I) {
- if (std::unique_ptr<Entry> E = parseEntry(&*I, FS))
+ for (auto &I : *Contents) {
+ if (std::unique_ptr<Entry> E = parseEntry(&I, FS))
EntryArrayContents.push_back(std::move(E));
else
return nullptr;
}
} else if (Key == "external-contents") {
if (HasContents) {
- error(I->getKey(),
+ error(I.getKey(),
"entry already has 'contents' or 'external-contents'");
return nullptr;
}
HasContents = true;
- if (!parseScalarString(I->getValue(), Value, Buffer))
+ if (!parseScalarString(I.getValue(), Value, Buffer))
return nullptr;
SmallString<256> FullPath;
@@ -1238,7 +1354,7 @@ class RedirectingFileSystemParser {
ExternalContentsPath = FullPath.str();
} else if (Key == "use-external-name") {
bool Val;
- if (!parseScalarBool(I->getValue(), Val))
+ if (!parseScalarBool(I.getValue(), Val))
return nullptr;
UseExternalName = Val ? RedirectingFileEntry::NK_External
: RedirectingFileEntry::NK_Virtual;
@@ -1311,7 +1427,7 @@ public:
// false on error
bool parse(yaml::Node *Root, RedirectingFileSystem *FS) {
- yaml::MappingNode *Top = dyn_cast<yaml::MappingNode>(Root);
+ auto *Top = dyn_cast<yaml::MappingNode>(Root);
if (!Top) {
error(Root, "expected mapping node");
return false;
@@ -1330,26 +1446,24 @@ public:
std::vector<std::unique_ptr<Entry>> RootEntries;
// Parse configuration and 'roots'
- for (yaml::MappingNode::iterator I = Top->begin(), E = Top->end(); I != E;
- ++I) {
+ for (auto &I : *Top) {
SmallString<10> KeyBuffer;
StringRef Key;
- if (!parseScalarString(I->getKey(), Key, KeyBuffer))
+ if (!parseScalarString(I.getKey(), Key, KeyBuffer))
return false;
- if (!checkDuplicateOrUnknownKey(I->getKey(), Key, Keys))
+ if (!checkDuplicateOrUnknownKey(I.getKey(), Key, Keys))
return false;
if (Key == "roots") {
- yaml::SequenceNode *Roots = dyn_cast<yaml::SequenceNode>(I->getValue());
+ auto *Roots = dyn_cast<yaml::SequenceNode>(I.getValue());
if (!Roots) {
- error(I->getValue(), "expected array");
+ error(I.getValue(), "expected array");
return false;
}
- for (yaml::SequenceNode::iterator I = Roots->begin(), E = Roots->end();
- I != E; ++I) {
- if (std::unique_ptr<Entry> E = parseEntry(&*I, FS))
+ for (auto &I : *Roots) {
+ if (std::unique_ptr<Entry> E = parseEntry(&I, FS))
RootEntries.push_back(std::move(E));
else
return false;
@@ -1357,32 +1471,32 @@ public:
} else if (Key == "version") {
StringRef VersionString;
SmallString<4> Storage;
- if (!parseScalarString(I->getValue(), VersionString, Storage))
+ if (!parseScalarString(I.getValue(), VersionString, Storage))
return false;
int Version;
if (VersionString.getAsInteger<int>(10, Version)) {
- error(I->getValue(), "expected integer");
+ error(I.getValue(), "expected integer");
return false;
}
if (Version < 0) {
- error(I->getValue(), "invalid version number");
+ error(I.getValue(), "invalid version number");
return false;
}
if (Version != 0) {
- error(I->getValue(), "version mismatch, expected 0");
+ error(I.getValue(), "version mismatch, expected 0");
return false;
}
} else if (Key == "case-sensitive") {
- if (!parseScalarBool(I->getValue(), FS->CaseSensitive))
+ if (!parseScalarBool(I.getValue(), FS->CaseSensitive))
return false;
} else if (Key == "overlay-relative") {
- if (!parseScalarBool(I->getValue(), FS->IsRelativeOverlay))
+ if (!parseScalarBool(I.getValue(), FS->IsRelativeOverlay))
return false;
} else if (Key == "use-external-names") {
- if (!parseScalarBool(I->getValue(), FS->UseExternalNames))
+ if (!parseScalarBool(I.getValue(), FS->UseExternalNames))
return false;
} else if (Key == "ignore-non-existent-contents") {
- if (!parseScalarBool(I->getValue(), FS->IgnoreNonExistentContents))
+ if (!parseScalarBool(I.getValue(), FS->IgnoreNonExistentContents))
return false;
} else {
llvm_unreachable("key missing from Keys");
@@ -1398,22 +1512,20 @@ public:
// Now that we sucessefully parsed the YAML file, canonicalize the internal
// representation to a proper directory tree so that we can search faster
// inside the VFS.
- for (std::unique_ptr<Entry> &E : RootEntries)
+ for (auto &E : RootEntries)
uniqueOverlayTree(FS, E.get());
return true;
}
};
-} // end of anonymous namespace
-Entry::~Entry() = default;
+} // namespace
RedirectingFileSystem *
RedirectingFileSystem::create(std::unique_ptr<MemoryBuffer> Buffer,
SourceMgr::DiagHandlerTy DiagHandler,
StringRef YAMLFilePath, void *DiagContext,
IntrusiveRefCntPtr<FileSystem> ExternalFS) {
-
SourceMgr SM;
yaml::Stream Stream(Buffer->getMemBufferRef(), SM);
@@ -1473,7 +1585,7 @@ ErrorOr<Entry *> RedirectingFileSystem::lookupPath(const Twine &Path_) {
sys::path::const_iterator Start = sys::path::begin(Path);
sys::path::const_iterator End = sys::path::end(Path);
- for (const std::unique_ptr<Entry> &Root : Roots) {
+ for (const auto &Root : Roots) {
ErrorOr<Entry *> Result = lookupPath(Start, End, Root.get());
if (Result || Result.getError() != llvm::errc::no_such_file_or_directory)
return Result;
@@ -1484,7 +1596,7 @@ ErrorOr<Entry *> RedirectingFileSystem::lookupPath(const Twine &Path_) {
ErrorOr<Entry *>
RedirectingFileSystem::lookupPath(sys::path::const_iterator Start,
sys::path::const_iterator End, Entry *From) {
-#ifndef LLVM_ON_WIN32
+#ifndef _WIN32
assert(!isTraversalComponent(*Start) &&
!isTraversalComponent(From->getName()) &&
"Paths should not contain traversal components");
@@ -1557,6 +1669,7 @@ ErrorOr<Status> RedirectingFileSystem::status(const Twine &Path) {
}
namespace {
+
/// Provide a file wrapper with an overriden status.
class FileWithFixedStatus : public File {
std::unique_ptr<File> InnerFile;
@@ -1568,14 +1681,17 @@ public:
ErrorOr<Status> status() override { return S; }
ErrorOr<std::unique_ptr<llvm::MemoryBuffer>>
+
getBuffer(const Twine &Name, int64_t FileSize, bool RequiresNullTerminator,
bool IsVolatile) override {
return InnerFile->getBuffer(Name, FileSize, RequiresNullTerminator,
IsVolatile);
}
+
std::error_code close() override { return InnerFile->close(); }
};
-} // end anonymous namespace
+
+} // namespace
ErrorOr<std::unique_ptr<File>>
RedirectingFileSystem::openFileForRead(const Twine &Path) {
@@ -1670,11 +1786,13 @@ void YAMLVFSWriter::addFileMapping(StringRef VirtualPath, StringRef RealPath) {
}
namespace {
+
class JSONWriter {
llvm::raw_ostream &OS;
SmallVector<StringRef, 16> DirStack;
- inline unsigned getDirIndent() { return 4 * DirStack.size(); }
- inline unsigned getFileIndent() { return 4 * (DirStack.size() + 1); }
+
+ unsigned getDirIndent() { return 4 * DirStack.size(); }
+ unsigned getFileIndent() { return 4 * (DirStack.size() + 1); }
bool containedIn(StringRef Parent, StringRef Path);
StringRef containedPart(StringRef Parent, StringRef Path);
void startDirectory(StringRef Path);
@@ -1683,14 +1801,17 @@ class JSONWriter {
public:
JSONWriter(llvm::raw_ostream &OS) : OS(OS) {}
+
void write(ArrayRef<YAMLVFSEntry> Entries, Optional<bool> UseExternalNames,
Optional<bool> IsCaseSensitive, Optional<bool> IsOverlayRelative,
Optional<bool> IgnoreNonExistentContents, StringRef OverlayDir);
};
-}
+
+} // namespace
bool JSONWriter::containedIn(StringRef Parent, StringRef Path) {
using namespace llvm::sys;
+
// Compare each path component.
auto IParent = path::begin(Parent), EParent = path::end(Parent);
for (auto IChild = path::begin(Path), EChild = path::end(Path);
@@ -1812,8 +1933,8 @@ void JSONWriter::write(ArrayRef<YAMLVFSEntry> Entries,
}
void YAMLVFSWriter::write(llvm::raw_ostream &OS) {
- std::sort(Mappings.begin(), Mappings.end(),
- [](const YAMLVFSEntry &LHS, const YAMLVFSEntry &RHS) {
+ llvm::sort(Mappings.begin(), Mappings.end(),
+ [](const YAMLVFSEntry &LHS, const YAMLVFSEntry &RHS) {
return LHS.VPath < RHS.VPath;
});
@@ -1868,7 +1989,7 @@ std::error_code VFSFromYamlDirIterImpl::increment() {
if (Current == End)
CurrentEntry = Status();
- return std::error_code();
+ return {};
}
vfs::recursive_directory_iterator::recursive_directory_iterator(FileSystem &FS_,
diff --git a/lib/Basic/XRayInstr.cpp b/lib/Basic/XRayInstr.cpp
new file mode 100644
index 000000000000..8cc36df79468
--- /dev/null
+++ b/lib/Basic/XRayInstr.cpp
@@ -0,0 +1,30 @@
+//===--- XRayInstr.cpp ------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is part of XRay, a function call instrumentation system.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/XRayInstr.h"
+#include "llvm/ADT/StringSwitch.h"
+
+namespace clang {
+
+XRayInstrMask parseXRayInstrValue(StringRef Value) {
+ XRayInstrMask ParsedKind = llvm::StringSwitch<XRayInstrMask>(Value)
+ .Case("all", XRayInstrKind::All)
+ .Case("custom", XRayInstrKind::Custom)
+ .Case("function", XRayInstrKind::Function)
+ .Case("typed", XRayInstrKind::Typed)
+ .Case("none", XRayInstrKind::None)
+ .Default(XRayInstrKind::None);
+ return ParsedKind;
+}
+
+} // namespace clang
diff --git a/lib/Basic/XRayLists.cpp b/lib/Basic/XRayLists.cpp
index 462777d53400..ad331899d2e2 100644
--- a/lib/Basic/XRayLists.cpp
+++ b/lib/Basic/XRayLists.cpp
@@ -16,24 +16,32 @@ using namespace clang;
XRayFunctionFilter::XRayFunctionFilter(
ArrayRef<std::string> AlwaysInstrumentPaths,
- ArrayRef<std::string> NeverInstrumentPaths, SourceManager &SM)
+ ArrayRef<std::string> NeverInstrumentPaths,
+ ArrayRef<std::string> AttrListPaths, SourceManager &SM)
: AlwaysInstrument(
llvm::SpecialCaseList::createOrDie(AlwaysInstrumentPaths)),
NeverInstrument(llvm::SpecialCaseList::createOrDie(NeverInstrumentPaths)),
- SM(SM) {}
+ AttrList(llvm::SpecialCaseList::createOrDie(AttrListPaths)), SM(SM) {}
XRayFunctionFilter::ImbueAttribute
XRayFunctionFilter::shouldImbueFunction(StringRef FunctionName) const {
// First apply the always instrument list, than if it isn't an "always" see
// whether it's treated as a "never" instrument function.
+ // TODO: Remove these as they're deprecated; use the AttrList exclusively.
if (AlwaysInstrument->inSection("xray_always_instrument", "fun", FunctionName,
- "arg1"))
+ "arg1") ||
+ AttrList->inSection("always", "fun", FunctionName, "arg1"))
return ImbueAttribute::ALWAYS_ARG1;
if (AlwaysInstrument->inSection("xray_always_instrument", "fun",
- FunctionName))
+ FunctionName) ||
+ AttrList->inSection("always", "fun", FunctionName))
return ImbueAttribute::ALWAYS;
- if (NeverInstrument->inSection("xray_never_instrument", "fun", FunctionName))
+
+ if (NeverInstrument->inSection("xray_never_instrument", "fun",
+ FunctionName) ||
+ AttrList->inSection("never", "fun", FunctionName))
return ImbueAttribute::NEVER;
+
return ImbueAttribute::NONE;
}
@@ -41,10 +49,12 @@ XRayFunctionFilter::ImbueAttribute
XRayFunctionFilter::shouldImbueFunctionsInFile(StringRef Filename,
StringRef Category) const {
if (AlwaysInstrument->inSection("xray_always_instrument", "src", Filename,
- Category))
+ Category) ||
+ AttrList->inSection("always", "src", Filename, Category))
return ImbueAttribute::ALWAYS;
if (NeverInstrument->inSection("xray_never_instrument", "src", Filename,
- Category))
+ Category) ||
+ AttrList->inSection("never", "src", Filename, Category))
return ImbueAttribute::NEVER;
return ImbueAttribute::NONE;
}
diff --git a/lib/CodeGen/ABIInfo.h b/lib/CodeGen/ABIInfo.h
index 575506da84d4..feed3833f24a 100644
--- a/lib/CodeGen/ABIInfo.h
+++ b/lib/CodeGen/ABIInfo.h
@@ -53,12 +53,9 @@ namespace swiftcall {
CodeGen::CodeGenTypes &CGT;
protected:
llvm::CallingConv::ID RuntimeCC;
- llvm::CallingConv::ID BuiltinCC;
public:
ABIInfo(CodeGen::CodeGenTypes &cgt)
- : CGT(cgt),
- RuntimeCC(llvm::CallingConv::C),
- BuiltinCC(llvm::CallingConv::C) {}
+ : CGT(cgt), RuntimeCC(llvm::CallingConv::C) {}
virtual ~ABIInfo();
@@ -77,11 +74,6 @@ namespace swiftcall {
return RuntimeCC;
}
- /// Return the calling convention to use for compiler builtins
- llvm::CallingConv::ID getBuiltinCC() const {
- return BuiltinCC;
- }
-
virtual void computeInfo(CodeGen::CGFunctionInfo &FI) const = 0;
/// EmitVAArg - Emit the target dependent code to load a value of
@@ -108,8 +100,6 @@ namespace swiftcall {
virtual bool isHomogeneousAggregateSmallEnough(const Type *Base,
uint64_t Members) const;
- virtual bool shouldSignExtUnsignedType(QualType Ty) const;
-
bool isHomogeneousAggregate(QualType Ty, const Type *&Base,
uint64_t &Members) const;
@@ -137,8 +127,7 @@ namespace swiftcall {
bool supportsSwift() const final override { return true; }
- virtual bool shouldPassIndirectlyForSwift(CharUnits totalSize,
- ArrayRef<llvm::Type*> types,
+ virtual bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> types,
bool asReturnValue) const = 0;
virtual bool isLegalVectorTypeForSwift(CharUnits totalSize,
diff --git a/lib/CodeGen/BackendUtil.cpp b/lib/CodeGen/BackendUtil.cpp
index e2349da5f0a4..415bd9626220 100644
--- a/lib/CodeGen/BackendUtil.cpp
+++ b/lib/CodeGen/BackendUtil.cpp
@@ -26,6 +26,7 @@
#include "llvm/Bitcode/BitcodeWriterPass.h"
#include "llvm/CodeGen/RegAllocRegistry.h"
#include "llvm/CodeGen/SchedulerRegistry.h"
+#include "llvm/CodeGen/TargetSubtargetInfo.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/IRPrintingPasses.h"
#include "llvm/IR/LegacyPassManager.h"
@@ -44,17 +45,19 @@
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
-#include "llvm/CodeGen/TargetSubtargetInfo.h"
#include "llvm/Transforms/Coroutines.h"
#include "llvm/Transforms/IPO.h"
#include "llvm/Transforms/IPO/AlwaysInliner.h"
#include "llvm/Transforms/IPO/PassManagerBuilder.h"
#include "llvm/Transforms/IPO/ThinLTOBitcodeWriter.h"
+#include "llvm/Transforms/InstCombine/InstCombine.h"
#include "llvm/Transforms/Instrumentation.h"
#include "llvm/Transforms/Instrumentation/BoundsChecking.h"
+#include "llvm/Transforms/Instrumentation/GCOVProfiler.h"
#include "llvm/Transforms/ObjCARC.h"
#include "llvm/Transforms/Scalar.h"
#include "llvm/Transforms/Scalar/GVN.h"
+#include "llvm/Transforms/Utils.h"
#include "llvm/Transforms/Utils/NameAnonGlobals.h"
#include "llvm/Transforms/Utils/SymbolRewriter.h"
#include <memory>
@@ -101,7 +104,18 @@ class EmitAssemblyHelper {
///
/// \return True on success.
bool AddEmitPasses(legacy::PassManager &CodeGenPasses, BackendAction Action,
- raw_pwrite_stream &OS);
+ raw_pwrite_stream &OS, raw_pwrite_stream *DwoOS);
+
+ std::unique_ptr<llvm::ToolOutputFile> openOutputFile(StringRef Path) {
+ std::error_code EC;
+ auto F = llvm::make_unique<llvm::ToolOutputFile>(Path, EC,
+ llvm::sys::fs::F_None);
+ if (EC) {
+ Diags.Report(diag::err_fe_unable_to_open_output) << Path << EC.message();
+ F.reset();
+ }
+ return F;
+ }
public:
EmitAssemblyHelper(DiagnosticsEngine &_Diags,
@@ -231,10 +245,9 @@ static void addAddressSanitizerPasses(const PassManagerBuilder &Builder,
static void addKernelAddressSanitizerPasses(const PassManagerBuilder &Builder,
legacy::PassManagerBase &PM) {
PM.add(createAddressSanitizerFunctionPass(
- /*CompileKernel*/ true,
- /*Recover*/ true, /*UseAfterScope*/ false));
- PM.add(createAddressSanitizerModulePass(/*CompileKernel*/true,
- /*Recover*/true));
+ /*CompileKernel*/ true, /*Recover*/ true, /*UseAfterScope*/ false));
+ PM.add(createAddressSanitizerModulePass(
+ /*CompileKernel*/ true, /*Recover*/ true));
}
static void addHWAddressSanitizerPasses(const PassManagerBuilder &Builder,
@@ -243,7 +256,13 @@ static void addHWAddressSanitizerPasses(const PassManagerBuilder &Builder,
static_cast<const PassManagerBuilderWrapper &>(Builder);
const CodeGenOptions &CGOpts = BuilderWrapper.getCGOpts();
bool Recover = CGOpts.SanitizeRecover.has(SanitizerKind::HWAddress);
- PM.add(createHWAddressSanitizerPass(Recover));
+ PM.add(createHWAddressSanitizerPass(/*CompileKernel*/ false, Recover));
+}
+
+static void addKernelHWAddressSanitizerPasses(const PassManagerBuilder &Builder,
+ legacy::PassManagerBase &PM) {
+ PM.add(createHWAddressSanitizerPass(
+ /*CompileKernel*/ true, /*Recover*/ true));
}
static void addMemorySanitizerPass(const PassManagerBuilder &Builder,
@@ -361,21 +380,6 @@ getCodeModel(const CodeGenOptions &CodeGenOpts) {
return static_cast<llvm::CodeModel::Model>(CodeModel);
}
-static llvm::Reloc::Model getRelocModel(const CodeGenOptions &CodeGenOpts) {
- // Keep this synced with the equivalent code in
- // lib/Frontend/CompilerInvocation.cpp
- llvm::Optional<llvm::Reloc::Model> RM;
- RM = llvm::StringSwitch<llvm::Reloc::Model>(CodeGenOpts.RelocationModel)
- .Case("static", llvm::Reloc::Static)
- .Case("pic", llvm::Reloc::PIC_)
- .Case("ropi", llvm::Reloc::ROPI)
- .Case("rwpi", llvm::Reloc::RWPI)
- .Case("ropi-rwpi", llvm::Reloc::ROPI_RWPI)
- .Case("dynamic-no-pic", llvm::Reloc::DynamicNoPIC);
- assert(RM.hasValue() && "invalid PIC model!");
- return *RM;
-}
-
static TargetMachine::CodeGenFileType getCodeGenFileType(BackendAction Action) {
if (Action == Backend_EmitObj)
return TargetMachine::CGFT_ObjectFile;
@@ -447,7 +451,10 @@ static void initTargetOptions(llvm::TargetOptions &Options,
Options.DataSections = CodeGenOpts.DataSections;
Options.UniqueSectionNames = CodeGenOpts.UniqueSectionNames;
Options.EmulatedTLS = CodeGenOpts.EmulatedTLS;
+ Options.ExplicitEmulatedTLS = CodeGenOpts.ExplicitEmulatedTLS;
Options.DebuggerTuning = CodeGenOpts.getDebuggerTuning();
+ Options.EmitStackSizeSection = CodeGenOpts.StackSizeSection;
+ Options.EmitAddrsig = CodeGenOpts.Addrsig;
if (CodeGenOpts.EnableSplitDwarf)
Options.MCOptions.SplitDwarfFile = CodeGenOpts.SplitDwarfFile;
@@ -470,6 +477,23 @@ static void initTargetOptions(llvm::TargetOptions &Options,
Options.MCOptions.IASSearchPaths.push_back(
Entry.IgnoreSysRoot ? Entry.Path : HSOpts.Sysroot + Entry.Path);
}
+static Optional<GCOVOptions> getGCOVOptions(const CodeGenOptions &CodeGenOpts) {
+ if (CodeGenOpts.DisableGCov)
+ return None;
+ if (!CodeGenOpts.EmitGcovArcs && !CodeGenOpts.EmitGcovNotes)
+ return None;
+ // Not using 'GCOVOptions::getDefault' allows us to avoid exiting if
+ // LLVM's -default-gcov-version flag is set to something invalid.
+ GCOVOptions Options;
+ Options.EmitNotes = CodeGenOpts.EmitGcovNotes;
+ Options.EmitData = CodeGenOpts.EmitGcovArcs;
+ llvm::copy(CodeGenOpts.CoverageVersion, std::begin(Options.Version));
+ Options.UseCfgChecksum = CodeGenOpts.CoverageExtraChecksum;
+ Options.NoRedZone = CodeGenOpts.DisableRedZone;
+ Options.FunctionNamesInData = !CodeGenOpts.CoverageNoFunctionNamesInData;
+ Options.ExitBlockBeforeBody = CodeGenOpts.CoverageExitBlockBeforeBody;
+ return Options;
+}
void EmitAssemblyHelper::CreatePasses(legacy::PassManager &MPM,
legacy::FunctionPassManager &FPM) {
@@ -501,7 +525,7 @@ void EmitAssemblyHelper::CreatePasses(legacy::PassManager &MPM,
PMBuilder.Inliner = createFunctionInliningPass(
CodeGenOpts.OptimizationLevel, CodeGenOpts.OptimizeSize,
(!CodeGenOpts.SampleProfileFile.empty() &&
- CodeGenOpts.EmitSummaryIndex));
+ CodeGenOpts.PrepareForThinLTO));
}
PMBuilder.OptLevel = CodeGenOpts.OptimizationLevel;
@@ -511,7 +535,7 @@ void EmitAssemblyHelper::CreatePasses(legacy::PassManager &MPM,
PMBuilder.DisableUnrollLoops = !CodeGenOpts.UnrollLoops;
PMBuilder.MergeFunctions = CodeGenOpts.MergeFunctions;
- PMBuilder.PrepareForThinLTO = CodeGenOpts.EmitSummaryIndex;
+ PMBuilder.PrepareForThinLTO = CodeGenOpts.PrepareForThinLTO;
PMBuilder.PrepareForLTO = CodeGenOpts.PrepareForLTO;
PMBuilder.RerollLoops = CodeGenOpts.RerollLoops;
@@ -535,6 +559,9 @@ void EmitAssemblyHelper::CreatePasses(legacy::PassManager &MPM,
addObjCARCOptPass);
}
+ if (LangOpts.CoroutinesTS)
+ addCoroutinePassesToExtensionPoints(PMBuilder);
+
if (LangOpts.Sanitize.has(SanitizerKind::LocalBounds)) {
PMBuilder.addExtension(PassManagerBuilder::EP_ScalarOptimizerLate,
addBoundsCheckingPass);
@@ -572,6 +599,13 @@ void EmitAssemblyHelper::CreatePasses(legacy::PassManager &MPM,
addHWAddressSanitizerPasses);
}
+ if (LangOpts.Sanitize.has(SanitizerKind::KernelHWAddress)) {
+ PMBuilder.addExtension(PassManagerBuilder::EP_OptimizerLast,
+ addKernelHWAddressSanitizerPasses);
+ PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0,
+ addKernelHWAddressSanitizerPasses);
+ }
+
if (LangOpts.Sanitize.has(SanitizerKind::Memory)) {
PMBuilder.addExtension(PassManagerBuilder::EP_OptimizerLast,
addMemorySanitizerPass);
@@ -593,9 +627,6 @@ void EmitAssemblyHelper::CreatePasses(legacy::PassManager &MPM,
addDataFlowSanitizerPass);
}
- if (LangOpts.CoroutinesTS)
- addCoroutinePassesToExtensionPoints(PMBuilder);
-
if (LangOpts.Sanitize.hasOneOf(SanitizerKind::Efficiency)) {
PMBuilder.addExtension(PassManagerBuilder::EP_OptimizerLast,
addEfficiencySanitizerPass);
@@ -612,20 +643,8 @@ void EmitAssemblyHelper::CreatePasses(legacy::PassManager &MPM,
if (!CodeGenOpts.RewriteMapFiles.empty())
addSymbolRewriterPass(CodeGenOpts, &MPM);
- if (!CodeGenOpts.DisableGCov &&
- (CodeGenOpts.EmitGcovArcs || CodeGenOpts.EmitGcovNotes)) {
- // Not using 'GCOVOptions::getDefault' allows us to avoid exiting if
- // LLVM's -default-gcov-version flag is set to something invalid.
- GCOVOptions Options;
- Options.EmitNotes = CodeGenOpts.EmitGcovNotes;
- Options.EmitData = CodeGenOpts.EmitGcovArcs;
- memcpy(Options.Version, CodeGenOpts.CoverageVersion, 4);
- Options.UseCfgChecksum = CodeGenOpts.CoverageExtraChecksum;
- Options.NoRedZone = CodeGenOpts.DisableRedZone;
- Options.FunctionNamesInData =
- !CodeGenOpts.CoverageNoFunctionNamesInData;
- Options.ExitBlockBeforeBody = CodeGenOpts.CoverageExitBlockBeforeBody;
- MPM.add(createGCOVProfilerPass(Options));
+ if (Optional<GCOVOptions> Options = getGCOVOptions(CodeGenOpts)) {
+ MPM.add(createGCOVProfilerPass(*Options));
if (CodeGenOpts.getDebugInfo() == codegenoptions::NoDebugInfo)
MPM.add(createStripSymbolsPass(true));
}
@@ -664,8 +683,6 @@ static void setCommandLineOpts(const CodeGenOptions &CodeGenOpts) {
BackendArgs.push_back("-limit-float-precision");
BackendArgs.push_back(CodeGenOpts.LimitFloatPrecision.c_str());
}
- for (const std::string &BackendOption : CodeGenOpts.BackendOptions)
- BackendArgs.push_back(BackendOption.c_str());
BackendArgs.push_back(nullptr);
llvm::cl::ParseCommandLineOptions(BackendArgs.size() - 1,
BackendArgs.data());
@@ -685,7 +702,7 @@ void EmitAssemblyHelper::CreateTargetMachine(bool MustCreateTM) {
Optional<llvm::CodeModel::Model> CM = getCodeModel(CodeGenOpts);
std::string FeaturesStr =
llvm::join(TargetOpts.Features.begin(), TargetOpts.Features.end(), ",");
- llvm::Reloc::Model RM = getRelocModel(CodeGenOpts);
+ llvm::Reloc::Model RM = CodeGenOpts.RelocationModel;
CodeGenOpt::Level OptLevel = getCGOptLevel(CodeGenOpts);
llvm::TargetOptions Options;
@@ -696,7 +713,8 @@ void EmitAssemblyHelper::CreateTargetMachine(bool MustCreateTM) {
bool EmitAssemblyHelper::AddEmitPasses(legacy::PassManager &CodeGenPasses,
BackendAction Action,
- raw_pwrite_stream &OS) {
+ raw_pwrite_stream &OS,
+ raw_pwrite_stream *DwoOS) {
// Add LibraryInfo.
llvm::Triple TargetTriple(TheModule->getTargetTriple());
std::unique_ptr<TargetLibraryInfoImpl> TLII(
@@ -713,7 +731,7 @@ bool EmitAssemblyHelper::AddEmitPasses(legacy::PassManager &CodeGenPasses,
if (CodeGenOpts.OptimizationLevel > 0)
CodeGenPasses.add(createObjCARCContractPass());
- if (TM->addPassesToEmitFile(CodeGenPasses, OS, CGFT,
+ if (TM->addPassesToEmitFile(CodeGenPasses, OS, DwoOS, CGFT,
/*DisableVerify=*/!CodeGenOpts.VerifyModule)) {
Diags.Report(diag::err_fe_unable_to_interface_with_target);
return false;
@@ -724,7 +742,7 @@ bool EmitAssemblyHelper::AddEmitPasses(legacy::PassManager &CodeGenPasses,
void EmitAssemblyHelper::EmitAssembly(BackendAction Action,
std::unique_ptr<raw_pwrite_stream> OS) {
- TimeRegion Region(llvm::TimePassesIsEnabled ? &CodeGenerationTime : nullptr);
+ TimeRegion Region(FrontendTimesIsEnabled ? &CodeGenerationTime : nullptr);
setCommandLineOpts(CodeGenOpts);
@@ -752,31 +770,35 @@ void EmitAssemblyHelper::EmitAssembly(BackendAction Action,
CodeGenPasses.add(
createTargetTransformInfoWrapperPass(getTargetIRAnalysis()));
- std::unique_ptr<raw_fd_ostream> ThinLinkOS;
+ std::unique_ptr<llvm::ToolOutputFile> ThinLinkOS, DwoOS;
switch (Action) {
case Backend_EmitNothing:
break;
case Backend_EmitBC:
- if (CodeGenOpts.EmitSummaryIndex) {
+ if (CodeGenOpts.PrepareForThinLTO) {
if (!CodeGenOpts.ThinLinkBitcodeFile.empty()) {
- std::error_code EC;
- ThinLinkOS.reset(new llvm::raw_fd_ostream(
- CodeGenOpts.ThinLinkBitcodeFile, EC,
- llvm::sys::fs::F_None));
- if (EC) {
- Diags.Report(diag::err_fe_unable_to_open_output) << CodeGenOpts.ThinLinkBitcodeFile
- << EC.message();
+ ThinLinkOS = openOutputFile(CodeGenOpts.ThinLinkBitcodeFile);
+ if (!ThinLinkOS)
return;
- }
}
+ PerModulePasses.add(createWriteThinLTOBitcodePass(
+ *OS, ThinLinkOS ? &ThinLinkOS->os() : nullptr));
+ } else {
+ // Emit a module summary by default for Regular LTO except for ld64
+ // targets
+ bool EmitLTOSummary =
+ (CodeGenOpts.PrepareForLTO &&
+ llvm::Triple(TheModule->getTargetTriple()).getVendor() !=
+ llvm::Triple::Apple);
+ if (EmitLTOSummary && !TheModule->getModuleFlag("ThinLTO"))
+ TheModule->addModuleFlag(Module::Error, "ThinLTO", uint32_t(0));
+
PerModulePasses.add(
- createWriteThinLTOBitcodePass(*OS, ThinLinkOS.get()));
+ createBitcodeWriterPass(*OS, CodeGenOpts.EmitLLVMUseLists,
+ EmitLTOSummary));
}
- else
- PerModulePasses.add(
- createBitcodeWriterPass(*OS, CodeGenOpts.EmitLLVMUseLists));
break;
case Backend_EmitLL:
@@ -785,7 +807,13 @@ void EmitAssemblyHelper::EmitAssembly(BackendAction Action,
break;
default:
- if (!AddEmitPasses(CodeGenPasses, Action, *OS))
+ if (!CodeGenOpts.SplitDwarfFile.empty()) {
+ DwoOS = openOutputFile(CodeGenOpts.SplitDwarfFile);
+ if (!DwoOS)
+ return;
+ }
+ if (!AddEmitPasses(CodeGenPasses, Action, *OS,
+ DwoOS ? &DwoOS->os() : nullptr))
return;
}
@@ -814,6 +842,11 @@ void EmitAssemblyHelper::EmitAssembly(BackendAction Action,
PrettyStackTraceString CrashInfo("Code generation");
CodeGenPasses.run(*TheModule);
}
+
+ if (ThinLinkOS)
+ ThinLinkOS->keep();
+ if (DwoOS)
+ DwoOS->keep();
}
static PassBuilder::OptimizationLevel mapToLevel(const CodeGenOptions &Opts) {
@@ -827,7 +860,7 @@ static PassBuilder::OptimizationLevel mapToLevel(const CodeGenOptions &Opts) {
case 2:
switch (Opts.OptimizeSize) {
default:
- llvm_unreachable("Invalide optimization level for size!");
+ llvm_unreachable("Invalid optimization level for size!");
case 0:
return PassBuilder::O2;
@@ -854,7 +887,7 @@ static PassBuilder::OptimizationLevel mapToLevel(const CodeGenOptions &Opts) {
/// `EmitAssembly` at some point in the future when the default switches.
void EmitAssemblyHelper::EmitAssemblyWithNewPassManager(
BackendAction Action, std::unique_ptr<raw_pwrite_stream> OS) {
- TimeRegion Region(llvm::TimePassesIsEnabled ? &CodeGenerationTime : nullptr);
+ TimeRegion Region(FrontendTimesIsEnabled ? &CodeGenerationTime : nullptr);
setCommandLineOpts(CodeGenOpts);
// The new pass manager always makes a target machine available to passes
@@ -913,10 +946,13 @@ void EmitAssemblyHelper::EmitAssemblyWithNewPassManager(
ModulePassManager MPM(CodeGenOpts.DebugPassManager);
if (!CodeGenOpts.DisableLLVMPasses) {
- bool IsThinLTO = CodeGenOpts.EmitSummaryIndex;
+ bool IsThinLTO = CodeGenOpts.PrepareForThinLTO;
bool IsLTO = CodeGenOpts.PrepareForLTO;
if (CodeGenOpts.OptimizationLevel == 0) {
+ if (Optional<GCOVOptions> Options = getGCOVOptions(CodeGenOpts))
+ MPM.addPass(GCOVProfilerPass(*Options));
+
// Build a minimal pipeline based on the semantics required by Clang,
// which is just that always inlining occurs.
MPM.addPass(AlwaysInlinerPass());
@@ -925,8 +961,8 @@ void EmitAssemblyHelper::EmitAssemblyWithNewPassManager(
if (LangOpts.Sanitize.has(SanitizerKind::LocalBounds))
MPM.addPass(createModuleToFunctionPassAdaptor(BoundsCheckingPass()));
- // Lastly, add a semantically necessary pass for ThinLTO.
- if (IsThinLTO)
+ // Lastly, add a semantically necessary pass for LTO.
+ if (IsLTO || IsThinLTO)
MPM.addPass(NameAnonGlobalPass());
} else {
// Map our optimization levels into one of the distinct levels used to
@@ -940,6 +976,10 @@ void EmitAssemblyHelper::EmitAssemblyWithNewPassManager(
[](FunctionPassManager &FPM, PassBuilder::OptimizationLevel Level) {
FPM.addPass(BoundsCheckingPass());
});
+ if (Optional<GCOVOptions> Options = getGCOVOptions(CodeGenOpts))
+ PB.registerPipelineStartEPCallback([Options](ModulePassManager &MPM) {
+ MPM.addPass(GCOVProfilerPass(*Options));
+ });
if (IsThinLTO) {
MPM = PB.buildThinLTOPreLinkDefaultPipeline(
@@ -948,6 +988,7 @@ void EmitAssemblyHelper::EmitAssemblyWithNewPassManager(
} else if (IsLTO) {
MPM = PB.buildLTOPreLinkDefaultPipeline(Level,
CodeGenOpts.DebugPassManager);
+ MPM.addPass(NameAnonGlobalPass());
} else {
MPM = PB.buildPerModuleDefaultPipeline(Level,
CodeGenOpts.DebugPassManager);
@@ -959,7 +1000,7 @@ void EmitAssemblyHelper::EmitAssemblyWithNewPassManager(
// create that pass manager here and use it as needed below.
legacy::PassManager CodeGenPasses;
bool NeedCodeGen = false;
- Optional<raw_fd_ostream> ThinLinkOS;
+ std::unique_ptr<llvm::ToolOutputFile> ThinLinkOS, DwoOS;
// Append any output we need to the pass manager.
switch (Action) {
@@ -967,23 +1008,26 @@ void EmitAssemblyHelper::EmitAssemblyWithNewPassManager(
break;
case Backend_EmitBC:
- if (CodeGenOpts.EmitSummaryIndex) {
+ if (CodeGenOpts.PrepareForThinLTO) {
if (!CodeGenOpts.ThinLinkBitcodeFile.empty()) {
- std::error_code EC;
- ThinLinkOS.emplace(CodeGenOpts.ThinLinkBitcodeFile, EC,
- llvm::sys::fs::F_None);
- if (EC) {
- Diags.Report(diag::err_fe_unable_to_open_output)
- << CodeGenOpts.ThinLinkBitcodeFile << EC.message();
+ ThinLinkOS = openOutputFile(CodeGenOpts.ThinLinkBitcodeFile);
+ if (!ThinLinkOS)
return;
- }
}
- MPM.addPass(
- ThinLTOBitcodeWriterPass(*OS, ThinLinkOS ? &*ThinLinkOS : nullptr));
+ MPM.addPass(ThinLTOBitcodeWriterPass(*OS, ThinLinkOS ? &ThinLinkOS->os()
+ : nullptr));
} else {
+ // Emit a module summary by default for Regular LTO except for ld64
+ // targets
+ bool EmitLTOSummary =
+ (CodeGenOpts.PrepareForLTO &&
+ llvm::Triple(TheModule->getTargetTriple()).getVendor() !=
+ llvm::Triple::Apple);
+ if (EmitLTOSummary && !TheModule->getModuleFlag("ThinLTO"))
+ TheModule->addModuleFlag(Module::Error, "ThinLTO", uint32_t(0));
+
MPM.addPass(BitcodeWriterPass(*OS, CodeGenOpts.EmitLLVMUseLists,
- CodeGenOpts.EmitSummaryIndex,
- CodeGenOpts.EmitSummaryIndex));
+ EmitLTOSummary));
}
break;
@@ -997,7 +1041,13 @@ void EmitAssemblyHelper::EmitAssemblyWithNewPassManager(
NeedCodeGen = true;
CodeGenPasses.add(
createTargetTransformInfoWrapperPass(getTargetIRAnalysis()));
- if (!AddEmitPasses(CodeGenPasses, Action, *OS))
+ if (!CodeGenOpts.SplitDwarfFile.empty()) {
+ DwoOS = openOutputFile(CodeGenOpts.SplitDwarfFile);
+ if (!DwoOS)
+ return;
+ }
+ if (!AddEmitPasses(CodeGenPasses, Action, *OS,
+ DwoOS ? &DwoOS->os() : nullptr))
// FIXME: Should we handle this error differently?
return;
break;
@@ -1017,6 +1067,11 @@ void EmitAssemblyHelper::EmitAssemblyWithNewPassManager(
PrettyStackTraceString CrashInfo("Code generation");
CodeGenPasses.run(*TheModule);
}
+
+ if (ThinLinkOS)
+ ThinLinkOS->keep();
+ if (DwoOS)
+ DwoOS->keep();
}
Expected<BitcodeModule> clang::FindThinLTOModule(MemoryBufferRef MBRef) {
@@ -1026,16 +1081,22 @@ Expected<BitcodeModule> clang::FindThinLTOModule(MemoryBufferRef MBRef) {
// The bitcode file may contain multiple modules, we want the one that is
// marked as being the ThinLTO module.
- for (BitcodeModule &BM : *BMsOrErr) {
- Expected<BitcodeLTOInfo> LTOInfo = BM.getLTOInfo();
- if (LTOInfo && LTOInfo->IsThinLTO)
- return BM;
- }
+ if (const BitcodeModule *Bm = FindThinLTOModule(*BMsOrErr))
+ return *Bm;
return make_error<StringError>("Could not find module summary",
inconvertibleErrorCode());
}
+BitcodeModule *clang::FindThinLTOModule(MutableArrayRef<BitcodeModule> BMs) {
+ for (BitcodeModule &BM : BMs) {
+ Expected<BitcodeLTOInfo> LTOInfo = BM.getLTOInfo();
+ if (LTOInfo && LTOInfo->IsThinLTO)
+ return &BM;
+ }
+ return nullptr;
+}
+
static void runThinLTOBackend(ModuleSummaryIndex *CombinedIndex, Module *M,
const HeaderSearchOptions &HeaderOpts,
const CodeGenOptions &CGOpts,
@@ -1067,9 +1128,8 @@ static void runThinLTOBackend(ModuleSummaryIndex *CombinedIndex, Module *M,
// e.g. record required linkage changes.
if (Summary->modulePath() == M->getModuleIdentifier())
continue;
- // Doesn't matter what value we plug in to the map, just needs an entry
- // to provoke importing by thinBackend.
- ImportList[Summary->modulePath()][GUID] = 1;
+ // Add an entry to provoke importing by thinBackend.
+ ImportList[Summary->modulePath()].insert(GUID);
}
std::vector<std::unique_ptr<llvm::MemoryBuffer>> OwnedImports;
@@ -1100,15 +1160,27 @@ static void runThinLTOBackend(ModuleSummaryIndex *CombinedIndex, Module *M,
return llvm::make_unique<lto::NativeObjectStream>(std::move(OS));
};
lto::Config Conf;
+ if (CGOpts.SaveTempsFilePrefix != "") {
+ if (Error E = Conf.addSaveTemps(CGOpts.SaveTempsFilePrefix + ".",
+ /* UseInputModulePath */ false)) {
+ handleAllErrors(std::move(E), [&](ErrorInfoBase &EIB) {
+ errs() << "Error setting up ThinLTO save-temps: " << EIB.message()
+ << '\n';
+ });
+ }
+ }
Conf.CPU = TOpts.CPU;
Conf.CodeModel = getCodeModel(CGOpts);
Conf.MAttrs = TOpts.Features;
- Conf.RelocModel = getRelocModel(CGOpts);
+ Conf.RelocModel = CGOpts.RelocationModel;
Conf.CGOptLevel = getCGOptLevel(CGOpts);
initTargetOptions(Conf.Options, CGOpts, TOpts, LOpts, HeaderOpts);
Conf.SampleProfile = std::move(SampleProfile);
Conf.UseNewPM = CGOpts.ExperimentalNewPassManager;
Conf.DebugPassManager = CGOpts.DebugPassManager;
+ Conf.RemarksWithHotness = CGOpts.DiagnosticsWithHotness;
+ Conf.RemarksFilename = CGOpts.OptRecordFile;
+ Conf.DwoPath = CGOpts.SplitDwarfFile;
switch (Action) {
case Backend_EmitNothing:
Conf.PreCodeGenModuleHook = [](size_t Task, const Module &Mod) {
@@ -1123,7 +1195,7 @@ static void runThinLTOBackend(ModuleSummaryIndex *CombinedIndex, Module *M,
break;
case Backend_EmitBC:
Conf.PreCodeGenModuleHook = [&](size_t Task, const Module &Mod) {
- WriteBitcodeToFile(M, *OS, CGOpts.EmitLLVMUseLists);
+ WriteBitcodeToFile(*M, *OS, CGOpts.EmitLLVMUseLists);
return false;
};
break;
@@ -1132,7 +1204,7 @@ static void runThinLTOBackend(ModuleSummaryIndex *CombinedIndex, Module *M,
break;
}
if (Error E = thinBackend(
- Conf, 0, AddStream, *M, *CombinedIndex, ImportList,
+ Conf, -1, AddStream, *M, *CombinedIndex, ImportList,
ModuleToDefinedGVSummaries[M->getModuleIdentifier()], ModuleMap)) {
handleAllErrors(std::move(E), [&](ErrorInfoBase &EIB) {
errs() << "Error running ThinLTO backend: " << EIB.message() << '\n';
@@ -1148,6 +1220,7 @@ void clang::EmitBackendOutput(DiagnosticsEngine &Diags,
const llvm::DataLayout &TDesc, Module *M,
BackendAction Action,
std::unique_ptr<raw_pwrite_stream> OS) {
+ std::unique_ptr<llvm::Module> EmptyModule;
if (!CGOpts.ThinLTOIndexFile.empty()) {
// If we are performing a ThinLTO importing compile, load the function index
// into memory and pass it into runThinLTOBackend, which will run the
@@ -1165,11 +1238,22 @@ void clang::EmitBackendOutput(DiagnosticsEngine &Diags,
// A null CombinedIndex means we should skip ThinLTO compilation
// (LLVM will optionally ignore empty index files, returning null instead
// of an error).
- bool DoThinLTOBackend = CombinedIndex != nullptr;
- if (DoThinLTOBackend) {
- runThinLTOBackend(CombinedIndex.get(), M, HeaderOpts, CGOpts, TOpts,
- LOpts, std::move(OS), CGOpts.SampleProfileFile, Action);
- return;
+ if (CombinedIndex) {
+ if (!CombinedIndex->skipModuleByDistributedBackend()) {
+ runThinLTOBackend(CombinedIndex.get(), M, HeaderOpts, CGOpts, TOpts,
+ LOpts, std::move(OS), CGOpts.SampleProfileFile,
+ Action);
+ return;
+ }
+ // Distributed indexing detected that nothing from the module is needed
+ // for the final linking. So we can skip the compilation. We sill need to
+ // output an empty object file to make sure that a linker does not fail
+ // trying to read it. Also for some features, like CFI, we must skip
+ // the compilation as CombinedIndex does not contain all required
+ // information.
+ EmptyModule = llvm::make_unique<llvm::Module>("empty", M->getContext());
+ EmptyModule->setTargetTriple(M->getTargetTriple());
+ M = EmptyModule.get();
}
}
@@ -1228,7 +1312,7 @@ void clang::EmbedBitcode(llvm::Module *M, const CodeGenOptions &CGOpts,
// Save llvm.compiler.used and remote it.
SmallVector<Constant*, 2> UsedArray;
- SmallSet<GlobalValue*, 4> UsedGlobals;
+ SmallPtrSet<GlobalValue*, 4> UsedGlobals;
Type *UsedElementType = Type::getInt8Ty(M->getContext())->getPointerTo(0);
GlobalVariable *Used = collectUsedGlobalVariables(*M, UsedGlobals, true);
for (auto *GV : UsedGlobals) {
@@ -1253,7 +1337,7 @@ void clang::EmbedBitcode(llvm::Module *M, const CodeGenOptions &CGOpts,
// If the input is LLVM Assembly, bitcode is produced by serializing
// the module. Use-lists order need to be perserved in this case.
llvm::raw_string_ostream OS(Data);
- llvm::WriteBitcodeToFile(M, OS, /* ShouldPreserveUseListOrder */ true);
+ llvm::WriteBitcodeToFile(*M, OS, /* ShouldPreserveUseListOrder */ true);
ModuleData =
ArrayRef<uint8_t>((const uint8_t *)OS.str().data(), OS.str().size());
} else
diff --git a/lib/CodeGen/CGAtomic.cpp b/lib/CodeGen/CGAtomic.cpp
index 6862fd811186..b34bcdc1fc38 100644
--- a/lib/CodeGen/CGAtomic.cpp
+++ b/lib/CodeGen/CGAtomic.cpp
@@ -18,6 +18,7 @@
#include "TargetInfo.h"
#include "clang/AST/ASTContext.h"
#include "clang/CodeGen/CGFunctionInfo.h"
+#include "clang/Sema/SemaDiagnostic.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Intrinsics.h"
@@ -186,7 +187,7 @@ namespace {
RValue convertAtomicTempToRValue(Address addr, AggValueSlot resultSlot,
SourceLocation loc, bool AsValue) const;
- /// \brief Converts a rvalue to integer value.
+ /// Converts a rvalue to integer value.
llvm::Value *convertRValueToInt(RValue RVal) const;
RValue ConvertIntToValueOrAtomic(llvm::Value *IntVal,
@@ -207,13 +208,13 @@ namespace {
LVal.getBaseInfo(), LVal.getTBAAInfo());
}
- /// \brief Emits atomic load.
+ /// Emits atomic load.
/// \returns Loaded value.
RValue EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc,
bool AsValue, llvm::AtomicOrdering AO,
bool IsVolatile);
- /// \brief Emits atomic compare-and-exchange sequence.
+ /// Emits atomic compare-and-exchange sequence.
/// \param Expected Expected value.
/// \param Desired Desired value.
/// \param Success Atomic ordering for success operation.
@@ -229,13 +230,13 @@ namespace {
llvm::AtomicOrdering::SequentiallyConsistent,
bool IsWeak = false);
- /// \brief Emits atomic update.
+ /// Emits atomic update.
/// \param AO Atomic ordering.
/// \param UpdateOp Update operation for the current lvalue.
void EmitAtomicUpdate(llvm::AtomicOrdering AO,
const llvm::function_ref<RValue(RValue)> &UpdateOp,
bool IsVolatile);
- /// \brief Emits atomic update.
+ /// Emits atomic update.
/// \param AO Atomic ordering.
void EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal,
bool IsVolatile);
@@ -243,25 +244,25 @@ namespace {
/// Materialize an atomic r-value in atomic-layout memory.
Address materializeRValue(RValue rvalue) const;
- /// \brief Creates temp alloca for intermediate operations on atomic value.
+ /// Creates temp alloca for intermediate operations on atomic value.
Address CreateTempAlloca() const;
private:
bool requiresMemSetZero(llvm::Type *type) const;
- /// \brief Emits atomic load as a libcall.
+ /// Emits atomic load as a libcall.
void EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
llvm::AtomicOrdering AO, bool IsVolatile);
- /// \brief Emits atomic load as LLVM instruction.
+ /// Emits atomic load as LLVM instruction.
llvm::Value *EmitAtomicLoadOp(llvm::AtomicOrdering AO, bool IsVolatile);
- /// \brief Emits atomic compare-and-exchange op as a libcall.
+ /// Emits atomic compare-and-exchange op as a libcall.
llvm::Value *EmitAtomicCompareExchangeLibcall(
llvm::Value *ExpectedAddr, llvm::Value *DesiredAddr,
llvm::AtomicOrdering Success =
llvm::AtomicOrdering::SequentiallyConsistent,
llvm::AtomicOrdering Failure =
llvm::AtomicOrdering::SequentiallyConsistent);
- /// \brief Emits atomic compare-and-exchange op as LLVM instruction.
+ /// Emits atomic compare-and-exchange op as LLVM instruction.
std::pair<llvm::Value *, llvm::Value *> EmitAtomicCompareExchangeOp(
llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
llvm::AtomicOrdering Success =
@@ -269,19 +270,19 @@ namespace {
llvm::AtomicOrdering Failure =
llvm::AtomicOrdering::SequentiallyConsistent,
bool IsWeak = false);
- /// \brief Emit atomic update as libcalls.
+ /// Emit atomic update as libcalls.
void
EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
const llvm::function_ref<RValue(RValue)> &UpdateOp,
bool IsVolatile);
- /// \brief Emit atomic update as LLVM instructions.
+ /// Emit atomic update as LLVM instructions.
void EmitAtomicUpdateOp(llvm::AtomicOrdering AO,
const llvm::function_ref<RValue(RValue)> &UpdateOp,
bool IsVolatile);
- /// \brief Emit atomic update as libcalls.
+ /// Emit atomic update as libcalls.
void EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO, RValue UpdateRVal,
bool IsVolatile);
- /// \brief Emit atomic update as LLVM instructions.
+ /// Emit atomic update as LLVM instructions.
void EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRal,
bool IsVolatile);
};
@@ -590,11 +591,13 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
break;
case AtomicExpr::AO__opencl_atomic_fetch_min:
+ case AtomicExpr::AO__atomic_fetch_min:
Op = E->getValueType()->isSignedIntegerType() ? llvm::AtomicRMWInst::Min
: llvm::AtomicRMWInst::UMin;
break;
case AtomicExpr::AO__opencl_atomic_fetch_max:
+ case AtomicExpr::AO__atomic_fetch_max:
Op = E->getValueType()->isSignedIntegerType() ? llvm::AtomicRMWInst::Max
: llvm::AtomicRMWInst::UMax;
break;
@@ -751,6 +754,13 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
Address Dest = Address::invalid();
Address Ptr = EmitPointerWithAlignment(E->getPtr());
+ if (E->getOp() == AtomicExpr::AO__c11_atomic_init ||
+ E->getOp() == AtomicExpr::AO__opencl_atomic_init) {
+ LValue lvalue = MakeAddrLValue(Ptr, AtomicTy);
+ EmitAtomicInit(E->getVal1(), lvalue);
+ return RValue::get(nullptr);
+ }
+
CharUnits sizeChars, alignChars;
std::tie(sizeChars, alignChars) = getContext().getTypeInfoInChars(AtomicTy);
uint64_t Size = sizeChars.getQuantity();
@@ -758,12 +768,8 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
bool UseLibcall = ((Ptr.getAlignment() % sizeChars) != 0 ||
getContext().toBits(sizeChars) > MaxInlineWidthInBits);
- if (E->getOp() == AtomicExpr::AO__c11_atomic_init ||
- E->getOp() == AtomicExpr::AO__opencl_atomic_init) {
- LValue lvalue = MakeAddrLValue(Ptr, AtomicTy);
- EmitAtomicInit(E->getVal1(), lvalue);
- return RValue::get(nullptr);
- }
+ if (UseLibcall)
+ CGM.getDiags().Report(E->getLocStart(), diag::warn_atomic_op_misaligned);
llvm::Value *Order = EmitScalarExpr(E->getOrder());
llvm::Value *Scope =
@@ -855,6 +861,8 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
case AtomicExpr::AO__atomic_or_fetch:
case AtomicExpr::AO__atomic_xor_fetch:
case AtomicExpr::AO__atomic_nand_fetch:
+ case AtomicExpr::AO__atomic_fetch_min:
+ case AtomicExpr::AO__atomic_fetch_max:
Val1 = EmitValToTemp(*this, E->getVal1());
break;
}
@@ -909,6 +917,8 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
case AtomicExpr::AO__atomic_or_fetch:
case AtomicExpr::AO__atomic_sub_fetch:
case AtomicExpr::AO__atomic_xor_fetch:
+ case AtomicExpr::AO__atomic_fetch_min:
+ case AtomicExpr::AO__atomic_fetch_max:
// For these, only library calls for certain sizes exist.
UseOptimizedLibcall = true;
break;
@@ -1091,6 +1101,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
MemTy, E->getExprLoc(), sizeChars);
break;
+ case AtomicExpr::AO__atomic_fetch_min:
case AtomicExpr::AO__opencl_atomic_fetch_min:
LibCallName = E->getValueType()->isSignedIntegerType()
? "__atomic_fetch_min"
@@ -1098,6 +1109,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
LoweredMemTy, E->getExprLoc(), sizeChars);
break;
+ case AtomicExpr::AO__atomic_fetch_max:
case AtomicExpr::AO__opencl_atomic_fetch_max:
LibCallName = E->getValueType()->isSignedIntegerType()
? "__atomic_fetch_max"
@@ -1160,7 +1172,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
if (UseOptimizedLibcall && Res.getScalarVal()) {
llvm::Value *ResVal = Res.getScalarVal();
if (PostOp) {
- llvm::Value *LoadVal1 = Args[1].RV.getScalarVal();
+ llvm::Value *LoadVal1 = Args[1].getRValue(*this).getScalarVal();
ResVal = Builder.CreateBinOp(PostOp, ResVal, LoadVal1);
}
if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
@@ -1508,11 +1520,13 @@ void AtomicInfo::emitCopyIntoMemory(RValue rvalue) const {
// which means that the caller is responsible for having zeroed
// any padding. Just do an aggregate copy of that type.
if (rvalue.isAggregate()) {
- CGF.EmitAggregateCopy(getAtomicAddress(),
- rvalue.getAggregateAddress(),
- getAtomicType(),
- (rvalue.isVolatileQualified()
- || LVal.isVolatileQualified()));
+ LValue Dest = CGF.MakeAddrLValue(getAtomicAddress(), getAtomicType());
+ LValue Src = CGF.MakeAddrLValue(rvalue.getAggregateAddress(),
+ getAtomicType());
+ bool IsVolatile = rvalue.isVolatileQualified() ||
+ LVal.isVolatileQualified();
+ CGF.EmitAggregateCopy(Dest, Src, getAtomicType(),
+ AggValueSlot::DoesNotOverlap, IsVolatile);
return;
}
@@ -2007,6 +2021,7 @@ void CodeGenFunction::EmitAtomicInit(Expr *init, LValue dest) {
AggValueSlot::IsNotDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
AggValueSlot::IsNotAliased,
+ AggValueSlot::DoesNotOverlap,
Zeroed ? AggValueSlot::IsZeroed :
AggValueSlot::IsNotZeroed);
diff --git a/lib/CodeGen/CGBlocks.cpp b/lib/CodeGen/CGBlocks.cpp
index 5f73d4cf7913..617856a7b43e 100644
--- a/lib/CodeGen/CGBlocks.cpp
+++ b/lib/CodeGen/CGBlocks.cpp
@@ -66,7 +66,7 @@ static llvm::Constant *buildDisposeHelper(CodeGenModule &CGM,
/// buildBlockDescriptor - Build the block descriptor meta-data for a block.
/// buildBlockDescriptor is accessed from 5th field of the Block_literal
/// meta-data and contains stationary information about the block literal.
-/// Its definition will have 4 (or optinally 6) words.
+/// Its definition will have 4 (or optionally 6) words.
/// \code
/// struct Block_descriptor {
/// unsigned long reserved;
@@ -104,7 +104,7 @@ static llvm::Constant *buildBlockDescriptor(CodeGenModule &CGM,
elements.addInt(ulong, blockInfo.BlockSize.getQuantity());
// Optional copy/dispose helpers.
- if (blockInfo.NeedsCopyDispose) {
+ if (blockInfo.needsCopyDisposeHelpers()) {
// copy_func_helper_decl
elements.add(buildCopyHelper(CGM, blockInfo));
@@ -159,6 +159,7 @@ static llvm::Constant *buildBlockDescriptor(CodeGenModule &CGM,
/// These are the flags (with corresponding bit number) that the
/// compiler is actually supposed to know about.
+ /// 23. BLOCK_IS_NOESCAPE - indicates that the block is non-escaping
/// 25. BLOCK_HAS_COPY_DISPOSE - indicates that the block
/// descriptor provides copy and dispose helper functions
/// 26. BLOCK_HAS_CXX_OBJ - indicates that there's a captured
@@ -307,25 +308,12 @@ static void initializeForBlockHeader(CodeGenModule &CGM, CGBlockInfo &info,
assert(elementTypes.empty());
if (CGM.getLangOpts().OpenCL) {
- // The header is basically 'struct { int; int; generic void *;
+ // The header is basically 'struct { int; int;
// custom_fields; }'. Assert that struct is packed.
- auto GenericAS =
- CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic);
- auto GenPtrAlign =
- CharUnits::fromQuantity(CGM.getTarget().getPointerAlign(GenericAS) / 8);
- auto GenPtrSize =
- CharUnits::fromQuantity(CGM.getTarget().getPointerWidth(GenericAS) / 8);
- assert(CGM.getIntSize() <= GenPtrSize);
- assert(CGM.getIntAlign() <= GenPtrAlign);
- assert((2 * CGM.getIntSize()).isMultipleOf(GenPtrAlign));
elementTypes.push_back(CGM.IntTy); /* total size */
elementTypes.push_back(CGM.IntTy); /* align */
- elementTypes.push_back(
- CGM.getOpenCLRuntime()
- .getGenericVoidPointerType()); /* invoke function */
- unsigned Offset =
- 2 * CGM.getIntSize().getQuantity() + GenPtrSize.getQuantity();
- unsigned BlockAlign = GenPtrAlign.getQuantity();
+ unsigned Offset = 2 * CGM.getIntSize().getQuantity();
+ unsigned BlockAlign = CGM.getIntAlign().getQuantity();
if (auto *Helper =
CGM.getTargetCodeGenInfo().getTargetOpenCLBlockHelper()) {
for (auto I : Helper->getCustomFieldTypes()) /* custom fields */ {
@@ -343,7 +331,7 @@ static void initializeForBlockHeader(CodeGenModule &CGM, CGBlockInfo &info,
info.BlockSize = CharUnits::fromQuantity(Offset);
} else {
// The header is basically 'struct { void *; int; int; void *; void *; }'.
- // Assert that that struct is packed.
+ // Assert that the struct is packed.
assert(CGM.getIntSize() <= CGM.getPointerSize());
assert(CGM.getIntAlign() <= CGM.getPointerAlign());
assert((2 * CGM.getIntSize()).isMultipleOf(CGM.getPointerAlign()));
@@ -477,6 +465,14 @@ static void computeBlockInfo(CodeGenModule &CGM, CodeGenFunction *CGF,
info.NeedsCopyDispose = true;
info.HasCXXObject = true;
+ // So do C structs that require non-trivial copy construction or
+ // destruction.
+ } else if (variable->getType().isNonTrivialToPrimitiveCopy() ==
+ QualType::PCK_Struct ||
+ variable->getType().isDestructedType() ==
+ QualType::DK_nontrivial_c_struct) {
+ info.NeedsCopyDispose = true;
+
// And so do types with destructors.
} else if (CGM.getLangOpts().CPlusPlus) {
if (const CXXRecordDecl *record =
@@ -705,11 +701,8 @@ static void enterBlockScope(CodeGenFunction &CGF, BlockDecl *block) {
/// kind of cleanup object is a BlockDecl*.
void CodeGenFunction::enterNonTrivialFullExpression(const ExprWithCleanups *E) {
assert(E->getNumObjects() != 0);
- ArrayRef<ExprWithCleanups::CleanupObject> cleanups = E->getObjects();
- for (ArrayRef<ExprWithCleanups::CleanupObject>::iterator
- i = cleanups.begin(), e = cleanups.end(); i != e; ++i) {
- enterBlockScope(*this, *i);
- }
+ for (const ExprWithCleanups::CleanupObject &C : E->getObjects())
+ enterBlockScope(*this, C);
}
/// Find the layout for the given block in a linked list and remove it.
@@ -740,27 +733,19 @@ void CodeGenFunction::destroyBlockInfos(CGBlockInfo *head) {
}
/// Emit a block literal expression in the current function.
-llvm::Value *CodeGenFunction::EmitBlockLiteral(const BlockExpr *blockExpr,
- llvm::Function **InvokeF) {
+llvm::Value *CodeGenFunction::EmitBlockLiteral(const BlockExpr *blockExpr) {
// If the block has no captures, we won't have a pre-computed
// layout for it.
if (!blockExpr->getBlockDecl()->hasCaptures()) {
// The block literal is emitted as a global variable, and the block invoke
// function has to be extracted from its initializer.
if (llvm::Constant *Block = CGM.getAddrOfGlobalBlockIfEmitted(blockExpr)) {
- if (InvokeF) {
- auto *GV = cast<llvm::GlobalVariable>(
- cast<llvm::Constant>(Block)->stripPointerCasts());
- auto *BlockInit = cast<llvm::ConstantStruct>(GV->getInitializer());
- *InvokeF = cast<llvm::Function>(
- BlockInit->getAggregateElement(2)->stripPointerCasts());
- }
return Block;
}
CGBlockInfo blockInfo(blockExpr->getBlockDecl(), CurFn->getName());
computeBlockInfo(CGM, this, blockInfo);
blockInfo.BlockExpression = blockExpr;
- return EmitBlockLiteral(blockInfo, InvokeF);
+ return EmitBlockLiteral(blockInfo);
}
// Find the block info for this block and take ownership of it.
@@ -769,28 +754,17 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const BlockExpr *blockExpr,
blockExpr->getBlockDecl()));
blockInfo->BlockExpression = blockExpr;
- return EmitBlockLiteral(*blockInfo, InvokeF);
+ return EmitBlockLiteral(*blockInfo);
}
-llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo,
- llvm::Function **InvokeF) {
+llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
bool IsOpenCL = CGM.getContext().getLangOpts().OpenCL;
- auto GenVoidPtrTy =
- IsOpenCL ? CGM.getOpenCLRuntime().getGenericVoidPointerType() : VoidPtrTy;
- LangAS GenVoidPtrAddr = IsOpenCL ? LangAS::opencl_generic : LangAS::Default;
- auto GenVoidPtrSize = CharUnits::fromQuantity(
- CGM.getTarget().getPointerWidth(
- CGM.getContext().getTargetAddressSpace(GenVoidPtrAddr)) /
- 8);
// Using the computed layout, generate the actual block function.
bool isLambdaConv = blockInfo.getBlockDecl()->isConversionFromLambda();
CodeGenFunction BlockCGF{CGM, true};
BlockCGF.SanOpts = SanOpts;
auto *InvokeFn = BlockCGF.GenerateBlockFunction(
CurGD, blockInfo, LocalDeclMap, isLambdaConv, blockInfo.CanBeGlobal);
- if (InvokeF)
- *InvokeF = InvokeFn;
- auto *blockFn = llvm::ConstantExpr::getPointerCast(InvokeFn, GenVoidPtrTy);
// If there is nothing to capture, we can emit this as a global block.
if (blockInfo.CanBeGlobal)
@@ -805,8 +779,13 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo,
llvm::Constant *descriptor;
BlockFlags flags;
if (!IsOpenCL) {
- isa = llvm::ConstantExpr::getBitCast(CGM.getNSConcreteStackBlock(),
- VoidPtrTy);
+ // If the block is non-escaping, set field 'isa 'to NSConcreteGlobalBlock
+ // and set the BLOCK_IS_GLOBAL bit of field 'flags'. Copying a non-escaping
+ // block just returns the original block and releasing it is a no-op.
+ llvm::Constant *blockISA = blockInfo.getBlockDecl()->doesNotEscape()
+ ? CGM.getNSConcreteGlobalBlock()
+ : CGM.getNSConcreteStackBlock();
+ isa = llvm::ConstantExpr::getBitCast(blockISA, VoidPtrTy);
// Build the block descriptor.
descriptor = buildBlockDescriptor(CGM, blockInfo);
@@ -815,12 +794,14 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo,
flags = BLOCK_HAS_SIGNATURE;
if (blockInfo.HasCapturedVariableLayout)
flags |= BLOCK_HAS_EXTENDED_LAYOUT;
- if (blockInfo.NeedsCopyDispose)
+ if (blockInfo.needsCopyDisposeHelpers())
flags |= BLOCK_HAS_COPY_DISPOSE;
if (blockInfo.HasCXXObject)
flags |= BLOCK_HAS_CXX_OBJ;
if (blockInfo.UsesStret)
flags |= BLOCK_USE_STRET;
+ if (blockInfo.getBlockDecl()->doesNotEscape())
+ flags |= BLOCK_IS_NOESCAPE | BLOCK_IS_GLOBAL;
}
auto projectField =
@@ -859,11 +840,12 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo,
llvm::ConstantInt::get(IntTy, blockInfo.BlockAlign.getQuantity()),
getIntSize(), "block.align");
}
- addHeaderField(blockFn, GenVoidPtrSize, "block.invoke");
- if (!IsOpenCL)
+ if (!IsOpenCL) {
+ addHeaderField(llvm::ConstantExpr::getBitCast(InvokeFn, VoidPtrTy),
+ getPointerSize(), "block.invoke");
addHeaderField(descriptor, getPointerSize(), "block.descriptor");
- else if (auto *Helper =
- CGM.getTargetCodeGenInfo().getTargetOpenCLBlockHelper()) {
+ } else if (auto *Helper =
+ CGM.getTargetCodeGenInfo().getTargetOpenCLBlockHelper()) {
for (auto I : Helper->getCustomFieldValues(*this, blockInfo)) {
addHeaderField(
I.first,
@@ -913,7 +895,7 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo,
const CGBlockInfo::Capture &enclosingCapture =
BlockInfo->getCapture(variable);
- // This is a [[type]]*, except that a byref entry wil just be an i8**.
+ // This is a [[type]]*, except that a byref entry will just be an i8**.
src = Builder.CreateStructGEP(LoadBlockStruct(),
enclosingCapture.getIndex(),
enclosingCapture.getOffset(),
@@ -955,7 +937,8 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo,
AggValueSlot::forAddr(blockField, Qualifiers(),
AggValueSlot::IsDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
- AggValueSlot::IsNotAliased);
+ AggValueSlot::IsNotAliased,
+ AggValueSlot::DoesNotOverlap);
EmitAggExpr(copyExpr, Slot);
} else {
EmitSynthesizedCXXCopyCtor(blockField, src, copyExpr);
@@ -1024,6 +1007,11 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo,
llvm::Value *result = Builder.CreatePointerCast(
blockAddr.getPointer(), ConvertType(blockInfo.getBlockExpr()->getType()));
+ if (IsOpenCL) {
+ CGM.getOpenCLRuntime().recordBlockInfo(blockInfo.BlockExpression, InvokeFn,
+ result);
+ }
+
return result;
}
@@ -1061,38 +1049,23 @@ llvm::Type *CodeGenModule::getBlockDescriptorType() {
}
llvm::Type *CodeGenModule::getGenericBlockLiteralType() {
+ assert(!getLangOpts().OpenCL && "OpenCL does not need this");
+
if (GenericBlockLiteralType)
return GenericBlockLiteralType;
llvm::Type *BlockDescPtrTy = getBlockDescriptorType();
- if (getLangOpts().OpenCL) {
- // struct __opencl_block_literal_generic {
- // int __size;
- // int __align;
- // __generic void *__invoke;
- // /* custom fields */
- // };
- SmallVector<llvm::Type *, 8> StructFields(
- {IntTy, IntTy, getOpenCLRuntime().getGenericVoidPointerType()});
- if (auto *Helper = getTargetCodeGenInfo().getTargetOpenCLBlockHelper()) {
- for (auto I : Helper->getCustomFieldTypes())
- StructFields.push_back(I);
- }
- GenericBlockLiteralType = llvm::StructType::create(
- StructFields, "struct.__opencl_block_literal_generic");
- } else {
- // struct __block_literal_generic {
- // void *__isa;
- // int __flags;
- // int __reserved;
- // void (*__invoke)(void *);
- // struct __block_descriptor *__descriptor;
- // };
- GenericBlockLiteralType =
- llvm::StructType::create("struct.__block_literal_generic", VoidPtrTy,
- IntTy, IntTy, VoidPtrTy, BlockDescPtrTy);
- }
+ // struct __block_literal_generic {
+ // void *__isa;
+ // int __flags;
+ // int __reserved;
+ // void (*__invoke)(void *);
+ // struct __block_descriptor *__descriptor;
+ // };
+ GenericBlockLiteralType =
+ llvm::StructType::create("struct.__block_literal_generic", VoidPtrTy,
+ IntTy, IntTy, VoidPtrTy, BlockDescPtrTy);
return GenericBlockLiteralType;
}
@@ -1103,27 +1076,21 @@ RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr *E,
E->getCallee()->getType()->getAs<BlockPointerType>();
llvm::Value *BlockPtr = EmitScalarExpr(E->getCallee());
+ llvm::Value *FuncPtr;
- // Get a pointer to the generic block literal.
- // For OpenCL we generate generic AS void ptr to be able to reuse the same
- // block definition for blocks with captures generated as private AS local
- // variables and without captures generated as global AS program scope
- // variables.
- unsigned AddrSpace = 0;
- if (getLangOpts().OpenCL)
- AddrSpace = getContext().getTargetAddressSpace(LangAS::opencl_generic);
-
- llvm::Type *BlockLiteralTy =
- llvm::PointerType::get(CGM.getGenericBlockLiteralType(), AddrSpace);
+ if (!CGM.getLangOpts().OpenCL) {
+ // Get a pointer to the generic block literal.
+ llvm::Type *BlockLiteralTy =
+ llvm::PointerType::get(CGM.getGenericBlockLiteralType(), 0);
- // Bitcast the callee to a block literal.
- BlockPtr =
- Builder.CreatePointerCast(BlockPtr, BlockLiteralTy, "block.literal");
+ // Bitcast the callee to a block literal.
+ BlockPtr =
+ Builder.CreatePointerCast(BlockPtr, BlockLiteralTy, "block.literal");
- // Get the function pointer from the literal.
- llvm::Value *FuncPtr =
- Builder.CreateStructGEP(CGM.getGenericBlockLiteralType(), BlockPtr,
- CGM.getLangOpts().OpenCL ? 2 : 3);
+ // Get the function pointer from the literal.
+ FuncPtr =
+ Builder.CreateStructGEP(CGM.getGenericBlockLiteralType(), BlockPtr, 3);
+ }
// Add the block literal.
CallArgList Args;
@@ -1146,7 +1113,11 @@ RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr *E,
EmitCallArgs(Args, FnType->getAs<FunctionProtoType>(), E->arguments());
// Load the function.
- llvm::Value *Func = Builder.CreateAlignedLoad(FuncPtr, getPointerAlign());
+ llvm::Value *Func;
+ if (CGM.getLangOpts().OpenCL)
+ Func = CGM.getOpenCLRuntime().getInvokeFunction(E->getCallee());
+ else
+ Func = Builder.CreateAlignedLoad(FuncPtr, getPointerAlign());
const FunctionType *FuncTy = FnType->castAs<FunctionType>();
const CGFunctionInfo &FnInfo =
@@ -1255,14 +1226,14 @@ static llvm::Constant *buildGlobalBlock(CodeGenModule &CGM,
// Reserved
fields.addInt(CGM.IntTy, 0);
+
+ // Function
+ fields.add(blockFn);
} else {
fields.addInt(CGM.IntTy, blockInfo.BlockSize.getQuantity());
fields.addInt(CGM.IntTy, blockInfo.BlockAlign.getQuantity());
}
- // Function
- fields.add(blockFn);
-
if (!IsOpenCL) {
// Descriptor
fields.add(buildBlockDescriptor(CGM, blockInfo));
@@ -1287,6 +1258,10 @@ static llvm::Constant *buildGlobalBlock(CodeGenModule &CGM,
llvm::Constant *Result =
llvm::ConstantExpr::getPointerCast(literal, RequiredType);
CGM.setAddrOfGlobalBlock(blockInfo.BlockExpression, Result);
+ if (CGM.getContext().getLangOpts().OpenCL)
+ CGM.getOpenCLRuntime().recordBlockInfo(
+ blockInfo.BlockExpression,
+ cast<llvm::Function>(blockFn->stripPointerCasts()), Result);
return Result;
}
@@ -1479,8 +1454,8 @@ CodeGenFunction::GenerateBlockFunction(GlobalDecl GD,
const CGBlockInfo::Capture &capture = blockInfo.getCapture(variable);
if (capture.isConstant()) {
auto addr = LocalDeclMap.find(variable)->second;
- DI->EmitDeclareOfAutoVariable(variable, addr.getPointer(),
- Builder);
+ (void)DI->EmitDeclareOfAutoVariable(variable, addr.getPointer(),
+ Builder);
continue;
}
@@ -1513,6 +1488,7 @@ enum class BlockCaptureEntityKind {
CXXRecord, // Copy or destroy
ARCWeak,
ARCStrong,
+ NonTrivialCStruct,
BlockObject, // Assign or release
None
};
@@ -1548,39 +1524,46 @@ computeCopyInfoForBlockCapture(const BlockDecl::Capture &CI, QualType T,
Flags |= BLOCK_FIELD_IS_WEAK;
return std::make_pair(BlockCaptureEntityKind::BlockObject, Flags);
}
- if (!T->isObjCRetainableType())
- // For all other types, the memcpy is fine.
- return std::make_pair(BlockCaptureEntityKind::None, Flags);
Flags = BLOCK_FIELD_IS_OBJECT;
bool isBlockPointer = T->isBlockPointerType();
if (isBlockPointer)
Flags = BLOCK_FIELD_IS_BLOCK;
- // Special rules for ARC captures:
- Qualifiers QS = T.getQualifiers();
-
- // We need to register __weak direct captures with the runtime.
- if (QS.getObjCLifetime() == Qualifiers::OCL_Weak)
+ switch (T.isNonTrivialToPrimitiveCopy()) {
+ case QualType::PCK_Struct:
+ return std::make_pair(BlockCaptureEntityKind::NonTrivialCStruct,
+ BlockFieldFlags());
+ case QualType::PCK_ARCWeak:
+ // We need to register __weak direct captures with the runtime.
return std::make_pair(BlockCaptureEntityKind::ARCWeak, Flags);
-
- // We need to retain the copied value for __strong direct captures.
- if (QS.getObjCLifetime() == Qualifiers::OCL_Strong) {
- // If it's a block pointer, we have to copy the block and
- // assign that to the destination pointer, so we might as
- // well use _Block_object_assign. Otherwise we can avoid that.
+ case QualType::PCK_ARCStrong:
+ // We need to retain the copied value for __strong direct captures.
+ // If it's a block pointer, we have to copy the block and assign that to
+ // the destination pointer, so we might as well use _Block_object_assign.
+ // Otherwise we can avoid that.
return std::make_pair(!isBlockPointer ? BlockCaptureEntityKind::ARCStrong
: BlockCaptureEntityKind::BlockObject,
Flags);
- }
+ case QualType::PCK_Trivial:
+ case QualType::PCK_VolatileTrivial: {
+ if (!T->isObjCRetainableType())
+ // For all other types, the memcpy is fine.
+ return std::make_pair(BlockCaptureEntityKind::None, BlockFieldFlags());
- // Non-ARC captures of retainable pointers are strong and
- // therefore require a call to _Block_object_assign.
- if (!QS.getObjCLifetime() && !LangOpts.ObjCAutoRefCount)
- return std::make_pair(BlockCaptureEntityKind::BlockObject, Flags);
+ // Special rules for ARC captures:
+ Qualifiers QS = T.getQualifiers();
- // Otherwise the memcpy is fine.
- return std::make_pair(BlockCaptureEntityKind::None, Flags);
+ // Non-ARC captures of retainable pointers are strong and
+ // therefore require a call to _Block_object_assign.
+ if (!QS.getObjCLifetime() && !LangOpts.ObjCAutoRefCount)
+ return std::make_pair(BlockCaptureEntityKind::BlockObject, Flags);
+
+ // Otherwise the memcpy is fine.
+ return std::make_pair(BlockCaptureEntityKind::None, BlockFieldFlags());
+ }
+ }
+ llvm_unreachable("after exhaustive PrimitiveCopyKind switch");
}
/// Find the set of block captures that need to be explicitly copied or destroy.
@@ -1602,6 +1585,64 @@ static void findBlockCapturedManagedEntities(
}
}
+namespace {
+/// Release a __block variable.
+struct CallBlockRelease final : EHScopeStack::Cleanup {
+ Address Addr;
+ BlockFieldFlags FieldFlags;
+ bool LoadBlockVarAddr;
+
+ CallBlockRelease(Address Addr, BlockFieldFlags Flags, bool LoadValue)
+ : Addr(Addr), FieldFlags(Flags), LoadBlockVarAddr(LoadValue) {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) override {
+ llvm::Value *BlockVarAddr;
+ if (LoadBlockVarAddr) {
+ BlockVarAddr = CGF.Builder.CreateLoad(Addr);
+ BlockVarAddr = CGF.Builder.CreateBitCast(BlockVarAddr, CGF.VoidPtrTy);
+ } else {
+ BlockVarAddr = Addr.getPointer();
+ }
+
+ CGF.BuildBlockRelease(BlockVarAddr, FieldFlags);
+ }
+};
+} // end anonymous namespace
+
+static void pushCaptureCleanup(BlockCaptureEntityKind CaptureKind,
+ Address Field, QualType CaptureType,
+ BlockFieldFlags Flags, bool EHOnly,
+ CodeGenFunction &CGF) {
+ switch (CaptureKind) {
+ case BlockCaptureEntityKind::CXXRecord:
+ case BlockCaptureEntityKind::ARCWeak:
+ case BlockCaptureEntityKind::NonTrivialCStruct:
+ case BlockCaptureEntityKind::ARCStrong: {
+ if (CaptureType.isDestructedType() &&
+ (!EHOnly || CGF.needsEHCleanup(CaptureType.isDestructedType()))) {
+ CodeGenFunction::Destroyer *Destroyer =
+ CaptureKind == BlockCaptureEntityKind::ARCStrong
+ ? CodeGenFunction::destroyARCStrongImprecise
+ : CGF.getDestroyer(CaptureType.isDestructedType());
+ CleanupKind Kind =
+ EHOnly ? EHCleanup
+ : CGF.getCleanupKind(CaptureType.isDestructedType());
+ CGF.pushDestroy(Kind, Field, CaptureType, Destroyer, Kind & EHCleanup);
+ }
+ break;
+ }
+ case BlockCaptureEntityKind::BlockObject: {
+ if (!EHOnly || CGF.getLangOpts().Exceptions) {
+ CleanupKind Kind = EHOnly ? EHCleanup : NormalAndEHCleanup;
+ CGF.enterByrefCleanup(Kind, Field, Flags, /*LoadBlockVarAddr*/ true);
+ }
+ break;
+ }
+ case BlockCaptureEntityKind::None:
+ llvm_unreachable("unexpected BlockCaptureEntityKind");
+ }
+}
+
/// Generate the copy-helper function for a block closure object:
/// static void block_copy_helper(block_t *dst, block_t *src);
/// The runtime will have previously initialized 'dst' by doing a
@@ -1644,7 +1685,7 @@ CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) {
false,
false);
- CGM.SetInternalFunctionAttributes(nullptr, Fn, FI);
+ CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FI);
StartFunction(FD, C.VoidTy, Fn, FI, args);
ApplyDebugLocation NL{*this, blockInfo.getBlockExpr()->getLocStart()};
@@ -1665,6 +1706,7 @@ CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) {
for (const auto &CopiedCapture : CopiedCaptures) {
const BlockDecl::Capture &CI = CopiedCapture.CI;
const CGBlockInfo::Capture &capture = CopiedCapture.Capture;
+ QualType captureType = CI.getVariable()->getType();
BlockFieldFlags flags = CopiedCapture.Flags;
unsigned index = capture.getIndex();
@@ -1677,6 +1719,13 @@ CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) {
EmitSynthesizedCXXCopyCtor(dstField, srcField, CI.getCopyExpr());
} else if (CopiedCapture.Kind == BlockCaptureEntityKind::ARCWeak) {
EmitARCCopyWeak(dstField, srcField);
+ // If this is a C struct that requires non-trivial copy construction, emit a
+ // call to its copy constructor.
+ } else if (CopiedCapture.Kind ==
+ BlockCaptureEntityKind::NonTrivialCStruct) {
+ QualType varType = CI.getVariable()->getType();
+ callCStructCopyConstructor(MakeAddrLValue(dstField, varType),
+ MakeAddrLValue(srcField, varType));
} else {
llvm::Value *srcValue = Builder.CreateLoad(srcField, "blockcopy.src");
if (CopiedCapture.Kind == BlockCaptureEntityKind::ARCStrong) {
@@ -1695,9 +1744,11 @@ CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) {
} else {
EmitARCRetainNonBlock(srcValue);
- // We don't need this anymore, so kill it. It's not quite
- // worth the annoyance to avoid creating it in the first place.
- cast<llvm::Instruction>(dstField.getPointer())->eraseFromParent();
+ // Unless EH cleanup is required, we don't need this anymore, so kill
+ // it. It's not quite worth the annoyance to avoid creating it in the
+ // first place.
+ if (!needsEHCleanup(captureType.isDestructedType()))
+ cast<llvm::Instruction>(dstField.getPointer())->eraseFromParent();
}
} else {
assert(CopiedCapture.Kind == BlockCaptureEntityKind::BlockObject);
@@ -1725,6 +1776,11 @@ CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) {
}
}
}
+
+ // Ensure that we destroy the copied object if an exception is thrown later
+ // in the helper function.
+ pushCaptureCleanup(CopiedCapture.Kind, dstField, captureType, flags, /*EHOnly*/ true,
+ *this);
}
FinishFunction();
@@ -1732,50 +1788,51 @@ CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) {
return llvm::ConstantExpr::getBitCast(Fn, VoidPtrTy);
}
+static BlockFieldFlags
+getBlockFieldFlagsForObjCObjectPointer(const BlockDecl::Capture &CI,
+ QualType T) {
+ BlockFieldFlags Flags = BLOCK_FIELD_IS_OBJECT;
+ if (T->isBlockPointerType())
+ Flags = BLOCK_FIELD_IS_BLOCK;
+ return Flags;
+}
+
static std::pair<BlockCaptureEntityKind, BlockFieldFlags>
computeDestroyInfoForBlockCapture(const BlockDecl::Capture &CI, QualType T,
const LangOptions &LangOpts) {
- BlockFieldFlags Flags;
if (CI.isByRef()) {
- Flags = BLOCK_FIELD_IS_BYREF;
+ BlockFieldFlags Flags = BLOCK_FIELD_IS_BYREF;
if (T.isObjCGCWeak())
Flags |= BLOCK_FIELD_IS_WEAK;
return std::make_pair(BlockCaptureEntityKind::BlockObject, Flags);
}
- if (const CXXRecordDecl *Record = T->getAsCXXRecordDecl()) {
- if (Record->hasTrivialDestructor())
- return std::make_pair(BlockCaptureEntityKind::None, BlockFieldFlags());
+ switch (T.isDestructedType()) {
+ case QualType::DK_cxx_destructor:
return std::make_pair(BlockCaptureEntityKind::CXXRecord, BlockFieldFlags());
- }
-
- // Other types don't need to be destroy explicitly.
- if (!T->isObjCRetainableType())
- return std::make_pair(BlockCaptureEntityKind::None, Flags);
-
- Flags = BLOCK_FIELD_IS_OBJECT;
- if (T->isBlockPointerType())
- Flags = BLOCK_FIELD_IS_BLOCK;
-
- // Special rules for ARC captures.
- Qualifiers QS = T.getQualifiers();
-
- // Use objc_storeStrong for __strong direct captures; the
- // dynamic tools really like it when we do this.
- if (QS.getObjCLifetime() == Qualifiers::OCL_Strong)
- return std::make_pair(BlockCaptureEntityKind::ARCStrong, Flags);
-
- // Support __weak direct captures.
- if (QS.getObjCLifetime() == Qualifiers::OCL_Weak)
- return std::make_pair(BlockCaptureEntityKind::ARCWeak, Flags);
-
- // Non-ARC captures are strong, and we need to use
- // _Block_object_dispose.
- if (!QS.hasObjCLifetime() && !LangOpts.ObjCAutoRefCount)
- return std::make_pair(BlockCaptureEntityKind::BlockObject, Flags);
-
- // Otherwise, we have nothing to do.
- return std::make_pair(BlockCaptureEntityKind::None, Flags);
+ case QualType::DK_objc_strong_lifetime:
+ // Use objc_storeStrong for __strong direct captures; the
+ // dynamic tools really like it when we do this.
+ return std::make_pair(BlockCaptureEntityKind::ARCStrong,
+ getBlockFieldFlagsForObjCObjectPointer(CI, T));
+ case QualType::DK_objc_weak_lifetime:
+ // Support __weak direct captures.
+ return std::make_pair(BlockCaptureEntityKind::ARCWeak,
+ getBlockFieldFlagsForObjCObjectPointer(CI, T));
+ case QualType::DK_nontrivial_c_struct:
+ return std::make_pair(BlockCaptureEntityKind::NonTrivialCStruct,
+ BlockFieldFlags());
+ case QualType::DK_none: {
+ // Non-ARC captures are strong, and we need to use _Block_object_dispose.
+ if (T->isObjCRetainableType() && !T.getQualifiers().hasObjCLifetime() &&
+ !LangOpts.ObjCAutoRefCount)
+ return std::make_pair(BlockCaptureEntityKind::BlockObject,
+ getBlockFieldFlagsForObjCObjectPointer(CI, T));
+ // Otherwise, we have nothing to do.
+ return std::make_pair(BlockCaptureEntityKind::None, BlockFieldFlags());
+ }
+ }
+ llvm_unreachable("after exhaustive DestructionKind switch");
}
/// Generate the destroy-helper function for a block closure object:
@@ -1814,7 +1871,7 @@ CodeGenFunction::GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo) {
nullptr, SC_Static,
false, false);
- CGM.SetInternalFunctionAttributes(nullptr, Fn, FI);
+ CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FI);
StartFunction(FD, C.VoidTy, Fn, FI, args);
ApplyDebugLocation NL{*this, blockInfo.getBlockExpr()->getLocStart()};
@@ -1839,29 +1896,8 @@ CodeGenFunction::GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo) {
Address srcField =
Builder.CreateStructGEP(src, capture.getIndex(), capture.getOffset());
- // If the captured record has a destructor then call it.
- if (DestroyedCapture.Kind == BlockCaptureEntityKind::CXXRecord) {
- const auto *Dtor =
- CI.getVariable()->getType()->getAsCXXRecordDecl()->getDestructor();
- PushDestructorCleanup(Dtor, srcField);
-
- // If this is a __weak capture, emit the release directly.
- } else if (DestroyedCapture.Kind == BlockCaptureEntityKind::ARCWeak) {
- EmitARCDestroyWeak(srcField);
-
- // Destroy strong objects with a call if requested.
- } else if (DestroyedCapture.Kind == BlockCaptureEntityKind::ARCStrong) {
- EmitARCDestroyStrong(srcField, ARCImpreciseLifetime);
-
- // Otherwise we call _Block_object_dispose. It wouldn't be too
- // hard to just emit this as a cleanup if we wanted to make sure
- // that things were done in reverse.
- } else {
- assert(DestroyedCapture.Kind == BlockCaptureEntityKind::BlockObject);
- llvm::Value *value = Builder.CreateLoad(srcField);
- value = Builder.CreateBitCast(value, VoidPtrTy);
- BuildBlockRelease(value, flags);
- }
+ pushCaptureCleanup(DestroyedCapture.Kind, srcField,
+ CI.getVariable()->getType(), flags, /*EHOnly*/ false, *this);
}
cleanups.ForceCleanup();
@@ -2020,6 +2056,36 @@ public:
id.AddPointer(VarType.getCanonicalType().getAsOpaquePtr());
}
};
+
+/// Emits the copy/dispose helpers for a __block variable that is a non-trivial
+/// C struct.
+class NonTrivialCStructByrefHelpers final : public BlockByrefHelpers {
+ QualType VarType;
+
+public:
+ NonTrivialCStructByrefHelpers(CharUnits alignment, QualType type)
+ : BlockByrefHelpers(alignment), VarType(type) {}
+
+ void emitCopy(CodeGenFunction &CGF, Address destField,
+ Address srcField) override {
+ CGF.callCStructMoveConstructor(CGF.MakeAddrLValue(destField, VarType),
+ CGF.MakeAddrLValue(srcField, VarType));
+ }
+
+ bool needsDispose() const override {
+ return VarType.isDestructedType();
+ }
+
+ void emitDispose(CodeGenFunction &CGF, Address field) override {
+ EHScopeStack::stable_iterator cleanupDepth = CGF.EHStack.stable_begin();
+ CGF.pushDestroy(VarType.isDestructedType(), field, VarType);
+ CGF.PopCleanupBlocks(cleanupDepth);
+ }
+
+ void profileImpl(llvm::FoldingSetNodeID &id) const override {
+ id.AddPointer(VarType.getCanonicalType().getAsOpaquePtr());
+ }
+};
} // end anonymous namespace
static llvm::Constant *
@@ -2059,7 +2125,7 @@ generateByrefCopyHelper(CodeGenFunction &CGF, const BlockByrefInfo &byrefInfo,
SC_Static,
false, false);
- CGF.CGM.SetInternalFunctionAttributes(nullptr, Fn, FI);
+ CGF.CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FI);
CGF.StartFunction(FD, R, Fn, FI, args);
@@ -2133,7 +2199,7 @@ generateByrefDisposeHelper(CodeGenFunction &CGF,
SC_Static,
false, false);
- CGF.CGM.SetInternalFunctionAttributes(nullptr, Fn, FI);
+ CGF.CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FI);
CGF.StartFunction(FD, R, Fn, FI, args);
@@ -2205,6 +2271,13 @@ CodeGenFunction::buildByrefHelpers(llvm::StructType &byrefType,
CGM, byrefInfo, CXXByrefHelpers(valueAlignment, type, copyExpr));
}
+ // If type is a non-trivial C struct type that is non-trivial to
+ // destructly move or destroy, build the copy and dispose helpers.
+ if (type.isNonTrivialToPrimitiveDestructiveMove() == QualType::PCK_Struct ||
+ type.isDestructedType() == QualType::DK_nontrivial_c_struct)
+ return ::buildByrefHelpers(
+ CGM, byrefInfo, NonTrivialCStructByrefHelpers(valueAlignment, type));
+
// Otherwise, if we don't have a retainable type, there's nothing to do.
// that the runtime does extra copies.
if (!type->isObjCRetainableType()) return nullptr;
@@ -2503,30 +2576,10 @@ void CodeGenFunction::BuildBlockRelease(llvm::Value *V, BlockFieldFlags flags) {
EmitNounwindRuntimeCall(F, args); // FIXME: throwing destructors?
}
-namespace {
- /// Release a __block variable.
- struct CallBlockRelease final : EHScopeStack::Cleanup {
- llvm::Value *Addr;
- CallBlockRelease(llvm::Value *Addr) : Addr(Addr) {}
-
- void Emit(CodeGenFunction &CGF, Flags flags) override {
- // Should we be passing FIELD_IS_WEAK here?
- CGF.BuildBlockRelease(Addr, BLOCK_FIELD_IS_BYREF);
- }
- };
-} // end anonymous namespace
-
-/// Enter a cleanup to destroy a __block variable. Note that this
-/// cleanup should be a no-op if the variable hasn't left the stack
-/// yet; if a cleanup is required for the variable itself, that needs
-/// to be done externally.
-void CodeGenFunction::enterByrefCleanup(const AutoVarEmission &emission) {
- // We don't enter this cleanup if we're in pure-GC mode.
- if (CGM.getLangOpts().getGC() == LangOptions::GCOnly)
- return;
-
- EHStack.pushCleanup<CallBlockRelease>(NormalAndEHCleanup,
- emission.Addr.getPointer());
+void CodeGenFunction::enterByrefCleanup(CleanupKind Kind, Address Addr,
+ BlockFieldFlags Flags,
+ bool LoadBlockVarAddr) {
+ EHStack.pushCleanup<CallBlockRelease>(Kind, Addr, Flags, LoadBlockVarAddr);
}
/// Adjust the declaration of something from the blocks API.
@@ -2559,11 +2612,11 @@ static void configureBlocksRuntimeObject(CodeGenModule &CGM,
}
}
- if (!CGM.getLangOpts().BlocksRuntimeOptional)
- return;
-
- if (GV->isDeclaration() && GV->hasExternalLinkage())
+ if (CGM.getLangOpts().BlocksRuntimeOptional && GV->isDeclaration() &&
+ GV->hasExternalLinkage())
GV->setLinkage(llvm::GlobalValue::ExternalWeakLinkage);
+
+ CGM.setDSOLocal(GV);
}
llvm::Constant *CodeGenModule::getBlockObjectDispose() {
diff --git a/lib/CodeGen/CGBlocks.h b/lib/CodeGen/CGBlocks.h
index 80e255f75417..5a8e960ffcc1 100644
--- a/lib/CodeGen/CGBlocks.h
+++ b/lib/CodeGen/CGBlocks.h
@@ -54,6 +54,7 @@ enum BlockByrefFlags {
};
enum BlockLiteralFlags {
+ BLOCK_IS_NOESCAPE = (1 << 23),
BLOCK_HAS_COPY_DISPOSE = (1 << 25),
BLOCK_HAS_CXX_OBJ = (1 << 26),
BLOCK_IS_GLOBAL = (1 << 28),
@@ -214,7 +215,8 @@ public:
/// no non-constant captures.
bool CanBeGlobal : 1;
- /// True if the block needs a custom copy or dispose function.
+ /// True if the block has captures that would necessitate custom copy or
+ /// dispose helper functions if the block were escaping.
bool NeedsCopyDispose : 1;
/// HasCXXObject - True if the block's custom copy/dispose functions
@@ -276,6 +278,11 @@ public:
}
CGBlockInfo(const BlockDecl *blockDecl, StringRef Name);
+
+ // Indicates whether the block needs a custom copy or dispose function.
+ bool needsCopyDisposeHelpers() const {
+ return NeedsCopyDispose && !Block->doesNotEscape();
+ }
};
} // end namespace CodeGen
diff --git a/lib/CodeGen/CGBuilder.h b/lib/CodeGen/CGBuilder.h
index 61fe4aac3afa..d2e5eb256d3b 100644
--- a/lib/CodeGen/CGBuilder.h
+++ b/lib/CodeGen/CGBuilder.h
@@ -20,7 +20,7 @@ namespace CodeGen {
class CodeGenFunction;
-/// \brief This is an IRBuilder insertion helper that forwards to
+/// This is an IRBuilder insertion helper that forwards to
/// CodeGenFunction::InsertHelper, which adds necessary metadata to
/// instructions.
class CGBuilderInserter : protected llvm::IRBuilderDefaultInserter {
@@ -29,7 +29,7 @@ public:
explicit CGBuilderInserter(CodeGenFunction *CGF) : CGF(CGF) {}
protected:
- /// \brief This forwards to CodeGenFunction::InsertHelper.
+ /// This forwards to CodeGenFunction::InsertHelper.
void InsertHelper(llvm::Instruction *I, const llvm::Twine &Name,
llvm::BasicBlock *BB,
llvm::BasicBlock::iterator InsertPt) const;
@@ -244,6 +244,21 @@ public:
Addr.getAlignment().alignmentAtOffset(Offset));
}
+ using CGBuilderBaseTy::CreateConstInBoundsGEP2_32;
+ Address CreateConstInBoundsGEP2_32(Address Addr, unsigned Idx0,
+ unsigned Idx1, const llvm::DataLayout &DL,
+ const llvm::Twine &Name = "") {
+ auto *GEP = cast<llvm::GetElementPtrInst>(CreateConstInBoundsGEP2_32(
+ Addr.getElementType(), Addr.getPointer(), Idx0, Idx1, Name));
+ llvm::APInt Offset(
+ DL.getIndexSizeInBits(Addr.getType()->getPointerAddressSpace()), 0,
+ /*IsSigned=*/true);
+ if (!GEP->accumulateConstantOffset(DL, Offset))
+ llvm_unreachable("offset of GEP with constants is always computable");
+ return Address(GEP, Addr.getAlignment().alignmentAtOffset(
+ CharUnits::fromQuantity(Offset.getSExtValue())));
+ }
+
llvm::Value *CreateConstInBoundsByteGEP(llvm::Value *Ptr, CharUnits Offset,
const llvm::Twine &Name = "") {
assert(Ptr->getType()->getPointerElementType() == TypeCache.Int8Ty);
@@ -258,23 +273,23 @@ public:
using CGBuilderBaseTy::CreateMemCpy;
llvm::CallInst *CreateMemCpy(Address Dest, Address Src, llvm::Value *Size,
bool IsVolatile = false) {
- auto Align = std::min(Dest.getAlignment(), Src.getAlignment());
- return CreateMemCpy(Dest.getPointer(), Src.getPointer(), Size,
- Align.getQuantity(), IsVolatile);
+ return CreateMemCpy(Dest.getPointer(), Dest.getAlignment().getQuantity(),
+ Src.getPointer(), Src.getAlignment().getQuantity(),
+ Size,IsVolatile);
}
llvm::CallInst *CreateMemCpy(Address Dest, Address Src, uint64_t Size,
bool IsVolatile = false) {
- auto Align = std::min(Dest.getAlignment(), Src.getAlignment());
- return CreateMemCpy(Dest.getPointer(), Src.getPointer(), Size,
- Align.getQuantity(), IsVolatile);
+ return CreateMemCpy(Dest.getPointer(), Dest.getAlignment().getQuantity(),
+ Src.getPointer(), Src.getAlignment().getQuantity(),
+ Size, IsVolatile);
}
using CGBuilderBaseTy::CreateMemMove;
llvm::CallInst *CreateMemMove(Address Dest, Address Src, llvm::Value *Size,
bool IsVolatile = false) {
- auto Align = std::min(Dest.getAlignment(), Src.getAlignment());
- return CreateMemMove(Dest.getPointer(), Src.getPointer(), Size,
- Align.getQuantity(), IsVolatile);
+ return CreateMemMove(Dest.getPointer(), Dest.getAlignment().getQuantity(),
+ Src.getPointer(), Src.getAlignment().getQuantity(),
+ Size, IsVolatile);
}
using CGBuilderBaseTy::CreateMemSet;
diff --git a/lib/CodeGen/CGBuiltin.cpp b/lib/CodeGen/CGBuiltin.cpp
index ba54f8342f1b..0892e84a044c 100644
--- a/lib/CodeGen/CGBuiltin.cpp
+++ b/lib/CodeGen/CGBuiltin.cpp
@@ -14,6 +14,7 @@
#include "CGCXXABI.h"
#include "CGObjCRuntime.h"
#include "CGOpenCLRuntime.h"
+#include "CGRecordLayout.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
#include "ConstantEmitter.h"
@@ -188,7 +189,7 @@ static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF,
return RValue::get(Result);
}
-/// @brief Utility to insert an atomic cmpxchg instruction.
+/// Utility to insert an atomic cmpxchg instruction.
///
/// @param CGF The current codegen function.
/// @param E Builtin call expression to convert to cmpxchg.
@@ -319,7 +320,7 @@ static RValue emitLibraryCall(CodeGenFunction &CGF, const FunctionDecl *FD,
return CGF.EmitCall(E->getCallee()->getType(), callee, E, ReturnValueSlot());
}
-/// \brief Emit a call to llvm.{sadd,uadd,ssub,usub,smul,umul}.with.overflow.*
+/// Emit a call to llvm.{sadd,uadd,ssub,usub,smul,umul}.with.overflow.*
/// depending on IntrinsicID.
///
/// \arg CGF The current codegen function.
@@ -384,7 +385,7 @@ EncompassingIntegerType(ArrayRef<struct WidthAndSignedness> Types) {
}
// The encompassing type must have a width greater than or equal to the width
- // of the specified types. Aditionally, if the encompassing type is signed,
+ // of the specified types. Additionally, if the encompassing type is signed,
// its width must be strictly greater than the width of any unsigned types
// given.
unsigned Width = 0;
@@ -478,13 +479,261 @@ CodeGenFunction::emitBuiltinObjectSize(const Expr *E, unsigned Type,
// LLVM only supports 0 and 2, make sure that we pass along that as a boolean.
Value *Min = Builder.getInt1((Type & 2) != 0);
- // For GCC compatability, __builtin_object_size treat NULL as unknown size.
+ // For GCC compatibility, __builtin_object_size treat NULL as unknown size.
Value *NullIsUnknown = Builder.getTrue();
return Builder.CreateCall(F, {Ptr, Min, NullIsUnknown});
}
-// Many of MSVC builtins are on both x64 and ARM; to avoid repeating code, we
-// handle them here.
+namespace {
+/// A struct to generically desribe a bit test intrinsic.
+struct BitTest {
+ enum ActionKind : uint8_t { TestOnly, Complement, Reset, Set };
+ enum InterlockingKind : uint8_t {
+ Unlocked,
+ Sequential,
+ Acquire,
+ Release,
+ NoFence
+ };
+
+ ActionKind Action;
+ InterlockingKind Interlocking;
+ bool Is64Bit;
+
+ static BitTest decodeBitTestBuiltin(unsigned BuiltinID);
+};
+} // namespace
+
+BitTest BitTest::decodeBitTestBuiltin(unsigned BuiltinID) {
+ switch (BuiltinID) {
+ // Main portable variants.
+ case Builtin::BI_bittest:
+ return {TestOnly, Unlocked, false};
+ case Builtin::BI_bittestandcomplement:
+ return {Complement, Unlocked, false};
+ case Builtin::BI_bittestandreset:
+ return {Reset, Unlocked, false};
+ case Builtin::BI_bittestandset:
+ return {Set, Unlocked, false};
+ case Builtin::BI_interlockedbittestandreset:
+ return {Reset, Sequential, false};
+ case Builtin::BI_interlockedbittestandset:
+ return {Set, Sequential, false};
+
+ // X86-specific 64-bit variants.
+ case Builtin::BI_bittest64:
+ return {TestOnly, Unlocked, true};
+ case Builtin::BI_bittestandcomplement64:
+ return {Complement, Unlocked, true};
+ case Builtin::BI_bittestandreset64:
+ return {Reset, Unlocked, true};
+ case Builtin::BI_bittestandset64:
+ return {Set, Unlocked, true};
+ case Builtin::BI_interlockedbittestandreset64:
+ return {Reset, Sequential, true};
+ case Builtin::BI_interlockedbittestandset64:
+ return {Set, Sequential, true};
+
+ // ARM/AArch64-specific ordering variants.
+ case Builtin::BI_interlockedbittestandset_acq:
+ return {Set, Acquire, false};
+ case Builtin::BI_interlockedbittestandset_rel:
+ return {Set, Release, false};
+ case Builtin::BI_interlockedbittestandset_nf:
+ return {Set, NoFence, false};
+ case Builtin::BI_interlockedbittestandreset_acq:
+ return {Reset, Acquire, false};
+ case Builtin::BI_interlockedbittestandreset_rel:
+ return {Reset, Release, false};
+ case Builtin::BI_interlockedbittestandreset_nf:
+ return {Reset, NoFence, false};
+ }
+ llvm_unreachable("expected only bittest intrinsics");
+}
+
+static char bitActionToX86BTCode(BitTest::ActionKind A) {
+ switch (A) {
+ case BitTest::TestOnly: return '\0';
+ case BitTest::Complement: return 'c';
+ case BitTest::Reset: return 'r';
+ case BitTest::Set: return 's';
+ }
+ llvm_unreachable("invalid action");
+}
+
+static llvm::Value *EmitX86BitTestIntrinsic(CodeGenFunction &CGF,
+ BitTest BT,
+ const CallExpr *E, Value *BitBase,
+ Value *BitPos) {
+ char Action = bitActionToX86BTCode(BT.Action);
+ char SizeSuffix = BT.Is64Bit ? 'q' : 'l';
+
+ // Build the assembly.
+ SmallString<64> Asm;
+ raw_svector_ostream AsmOS(Asm);
+ if (BT.Interlocking != BitTest::Unlocked)
+ AsmOS << "lock ";
+ AsmOS << "bt";
+ if (Action)
+ AsmOS << Action;
+ AsmOS << SizeSuffix << " $2, ($1)\n\tsetc ${0:b}";
+
+ // Build the constraints. FIXME: We should support immediates when possible.
+ std::string Constraints = "=r,r,r,~{cc},~{flags},~{fpsr}";
+ llvm::IntegerType *IntType = llvm::IntegerType::get(
+ CGF.getLLVMContext(),
+ CGF.getContext().getTypeSize(E->getArg(1)->getType()));
+ llvm::Type *IntPtrType = IntType->getPointerTo();
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CGF.Int8Ty, {IntPtrType, IntType}, false);
+
+ llvm::InlineAsm *IA =
+ llvm::InlineAsm::get(FTy, Asm, Constraints, /*SideEffects=*/true);
+ return CGF.Builder.CreateCall(IA, {BitBase, BitPos});
+}
+
+static llvm::AtomicOrdering
+getBitTestAtomicOrdering(BitTest::InterlockingKind I) {
+ switch (I) {
+ case BitTest::Unlocked: return llvm::AtomicOrdering::NotAtomic;
+ case BitTest::Sequential: return llvm::AtomicOrdering::SequentiallyConsistent;
+ case BitTest::Acquire: return llvm::AtomicOrdering::Acquire;
+ case BitTest::Release: return llvm::AtomicOrdering::Release;
+ case BitTest::NoFence: return llvm::AtomicOrdering::Monotonic;
+ }
+ llvm_unreachable("invalid interlocking");
+}
+
+/// Emit a _bittest* intrinsic. These intrinsics take a pointer to an array of
+/// bits and a bit position and read and optionally modify the bit at that
+/// position. The position index can be arbitrarily large, i.e. it can be larger
+/// than 31 or 63, so we need an indexed load in the general case.
+static llvm::Value *EmitBitTestIntrinsic(CodeGenFunction &CGF,
+ unsigned BuiltinID,
+ const CallExpr *E) {
+ Value *BitBase = CGF.EmitScalarExpr(E->getArg(0));
+ Value *BitPos = CGF.EmitScalarExpr(E->getArg(1));
+
+ BitTest BT = BitTest::decodeBitTestBuiltin(BuiltinID);
+
+ // X86 has special BT, BTC, BTR, and BTS instructions that handle the array
+ // indexing operation internally. Use them if possible.
+ llvm::Triple::ArchType Arch = CGF.getTarget().getTriple().getArch();
+ if (Arch == llvm::Triple::x86 || Arch == llvm::Triple::x86_64)
+ return EmitX86BitTestIntrinsic(CGF, BT, E, BitBase, BitPos);
+
+ // Otherwise, use generic code to load one byte and test the bit. Use all but
+ // the bottom three bits as the array index, and the bottom three bits to form
+ // a mask.
+ // Bit = BitBaseI8[BitPos >> 3] & (1 << (BitPos & 0x7)) != 0;
+ Value *ByteIndex = CGF.Builder.CreateAShr(
+ BitPos, llvm::ConstantInt::get(BitPos->getType(), 3), "bittest.byteidx");
+ Value *BitBaseI8 = CGF.Builder.CreatePointerCast(BitBase, CGF.Int8PtrTy);
+ Address ByteAddr(CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, BitBaseI8,
+ ByteIndex, "bittest.byteaddr"),
+ CharUnits::One());
+ Value *PosLow =
+ CGF.Builder.CreateAnd(CGF.Builder.CreateTrunc(BitPos, CGF.Int8Ty),
+ llvm::ConstantInt::get(CGF.Int8Ty, 0x7));
+
+ // The updating instructions will need a mask.
+ Value *Mask = nullptr;
+ if (BT.Action != BitTest::TestOnly) {
+ Mask = CGF.Builder.CreateShl(llvm::ConstantInt::get(CGF.Int8Ty, 1), PosLow,
+ "bittest.mask");
+ }
+
+ // Check the action and ordering of the interlocked intrinsics.
+ llvm::AtomicOrdering Ordering = getBitTestAtomicOrdering(BT.Interlocking);
+
+ Value *OldByte = nullptr;
+ if (Ordering != llvm::AtomicOrdering::NotAtomic) {
+ // Emit a combined atomicrmw load/store operation for the interlocked
+ // intrinsics.
+ llvm::AtomicRMWInst::BinOp RMWOp = llvm::AtomicRMWInst::Or;
+ if (BT.Action == BitTest::Reset) {
+ Mask = CGF.Builder.CreateNot(Mask);
+ RMWOp = llvm::AtomicRMWInst::And;
+ }
+ OldByte = CGF.Builder.CreateAtomicRMW(RMWOp, ByteAddr.getPointer(), Mask,
+ Ordering);
+ } else {
+ // Emit a plain load for the non-interlocked intrinsics.
+ OldByte = CGF.Builder.CreateLoad(ByteAddr, "bittest.byte");
+ Value *NewByte = nullptr;
+ switch (BT.Action) {
+ case BitTest::TestOnly:
+ // Don't store anything.
+ break;
+ case BitTest::Complement:
+ NewByte = CGF.Builder.CreateXor(OldByte, Mask);
+ break;
+ case BitTest::Reset:
+ NewByte = CGF.Builder.CreateAnd(OldByte, CGF.Builder.CreateNot(Mask));
+ break;
+ case BitTest::Set:
+ NewByte = CGF.Builder.CreateOr(OldByte, Mask);
+ break;
+ }
+ if (NewByte)
+ CGF.Builder.CreateStore(NewByte, ByteAddr);
+ }
+
+ // However we loaded the old byte, either by plain load or atomicrmw, shift
+ // the bit into the low position and mask it to 0 or 1.
+ Value *ShiftedByte = CGF.Builder.CreateLShr(OldByte, PosLow, "bittest.shr");
+ return CGF.Builder.CreateAnd(
+ ShiftedByte, llvm::ConstantInt::get(CGF.Int8Ty, 1), "bittest.res");
+}
+
+namespace {
+enum class MSVCSetJmpKind {
+ _setjmpex,
+ _setjmp3,
+ _setjmp
+};
+}
+
+/// MSVC handles setjmp a bit differently on different platforms. On every
+/// architecture except 32-bit x86, the frame address is passed. On x86, extra
+/// parameters can be passed as variadic arguments, but we always pass none.
+static RValue EmitMSVCRTSetJmp(CodeGenFunction &CGF, MSVCSetJmpKind SJKind,
+ const CallExpr *E) {
+ llvm::Value *Arg1 = nullptr;
+ llvm::Type *Arg1Ty = nullptr;
+ StringRef Name;
+ bool IsVarArg = false;
+ if (SJKind == MSVCSetJmpKind::_setjmp3) {
+ Name = "_setjmp3";
+ Arg1Ty = CGF.Int32Ty;
+ Arg1 = llvm::ConstantInt::get(CGF.IntTy, 0);
+ IsVarArg = true;
+ } else {
+ Name = SJKind == MSVCSetJmpKind::_setjmp ? "_setjmp" : "_setjmpex";
+ Arg1Ty = CGF.Int8PtrTy;
+ Arg1 = CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(Intrinsic::frameaddress),
+ llvm::ConstantInt::get(CGF.Int32Ty, 0));
+ }
+
+ // Mark the call site and declaration with ReturnsTwice.
+ llvm::Type *ArgTypes[2] = {CGF.Int8PtrTy, Arg1Ty};
+ llvm::AttributeList ReturnsTwiceAttr = llvm::AttributeList::get(
+ CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex,
+ llvm::Attribute::ReturnsTwice);
+ llvm::Constant *SetJmpFn = CGF.CGM.CreateRuntimeFunction(
+ llvm::FunctionType::get(CGF.IntTy, ArgTypes, IsVarArg), Name,
+ ReturnsTwiceAttr, /*Local=*/true);
+
+ llvm::Value *Buf = CGF.Builder.CreateBitOrPointerCast(
+ CGF.EmitScalarExpr(E->getArg(0)), CGF.Int8PtrTy);
+ llvm::Value *Args[] = {Buf, Arg1};
+ llvm::CallSite CS = CGF.EmitRuntimeCallOrInvoke(SetJmpFn, Args);
+ CS.setAttributes(ReturnsTwiceAttr);
+ return RValue::get(CS.getInstruction());
+}
+
+// Many of MSVC builtins are on x64, ARM and AArch64; to avoid repeating code,
+// we handle them here.
enum class CodeGenFunction::MSVCIntrin {
_BitScanForward,
_BitScanReverse,
@@ -496,7 +745,6 @@ enum class CodeGenFunction::MSVCIntrin {
_InterlockedIncrement,
_InterlockedOr,
_InterlockedXor,
- _interlockedbittestandset,
__fastfail,
};
@@ -564,22 +812,6 @@ Value *CodeGenFunction::EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID,
case MSVCIntrin::_InterlockedXor:
return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E);
- case MSVCIntrin::_interlockedbittestandset: {
- llvm::Value *Addr = EmitScalarExpr(E->getArg(0));
- llvm::Value *Bit = EmitScalarExpr(E->getArg(1));
- AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
- AtomicRMWInst::Or, Addr,
- Builder.CreateShl(ConstantInt::get(Bit->getType(), 1), Bit),
- llvm::AtomicOrdering::SequentiallyConsistent);
- // Shift the relevant bit to the least significant position, truncate to
- // the result type, and test the low bit.
- llvm::Value *Shifted = Builder.CreateLShr(RMWI, Bit);
- llvm::Value *Truncated =
- Builder.CreateTrunc(Shifted, ConvertType(E->getType()));
- return Builder.CreateAnd(Truncated,
- ConstantInt::get(Truncated->getType(), 1));
- }
-
case MSVCIntrin::_InterlockedDecrement: {
llvm::Type *IntTy = ConvertType(E->getType());
AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
@@ -915,7 +1147,11 @@ EmitCheckedMixedSignMultiply(CodeGenFunction &CGF, const clang::Expr *Op1,
Overflow = CGF.Builder.CreateOr(Overflow, TruncOverflow);
}
- Result = CGF.Builder.CreateTrunc(UnsignedResult, ResTy);
+ // Negate the product if it would be negative in infinite precision.
+ Result = CGF.Builder.CreateSelect(
+ IsNegative, CGF.Builder.CreateNeg(UnsignedResult), UnsignedResult);
+
+ Result = CGF.Builder.CreateTrunc(Result, ResTy);
}
assert(Overflow && Result && "Missing overflow or result");
@@ -926,6 +1162,96 @@ EmitCheckedMixedSignMultiply(CodeGenFunction &CGF, const clang::Expr *Op1,
return RValue::get(Overflow);
}
+static llvm::Value *dumpRecord(CodeGenFunction &CGF, QualType RType,
+ Value *&RecordPtr, CharUnits Align, Value *Func,
+ int Lvl) {
+ const auto *RT = RType->getAs<RecordType>();
+ ASTContext &Context = CGF.getContext();
+ RecordDecl *RD = RT->getDecl()->getDefinition();
+ ASTContext &Ctx = RD->getASTContext();
+ const ASTRecordLayout &RL = Ctx.getASTRecordLayout(RD);
+ std::string Pad = std::string(Lvl * 4, ' ');
+
+ Value *GString =
+ CGF.Builder.CreateGlobalStringPtr(RType.getAsString() + " {\n");
+ Value *Res = CGF.Builder.CreateCall(Func, {GString});
+
+ static llvm::DenseMap<QualType, const char *> Types;
+ if (Types.empty()) {
+ Types[Context.CharTy] = "%c";
+ Types[Context.BoolTy] = "%d";
+ Types[Context.SignedCharTy] = "%hhd";
+ Types[Context.UnsignedCharTy] = "%hhu";
+ Types[Context.IntTy] = "%d";
+ Types[Context.UnsignedIntTy] = "%u";
+ Types[Context.LongTy] = "%ld";
+ Types[Context.UnsignedLongTy] = "%lu";
+ Types[Context.LongLongTy] = "%lld";
+ Types[Context.UnsignedLongLongTy] = "%llu";
+ Types[Context.ShortTy] = "%hd";
+ Types[Context.UnsignedShortTy] = "%hu";
+ Types[Context.VoidPtrTy] = "%p";
+ Types[Context.FloatTy] = "%f";
+ Types[Context.DoubleTy] = "%f";
+ Types[Context.LongDoubleTy] = "%Lf";
+ Types[Context.getPointerType(Context.CharTy)] = "%s";
+ Types[Context.getPointerType(Context.getConstType(Context.CharTy))] = "%s";
+ }
+
+ for (const auto *FD : RD->fields()) {
+ uint64_t Off = RL.getFieldOffset(FD->getFieldIndex());
+ Off = Ctx.toCharUnitsFromBits(Off).getQuantity();
+
+ Value *FieldPtr = RecordPtr;
+ if (RD->isUnion())
+ FieldPtr = CGF.Builder.CreatePointerCast(
+ FieldPtr, CGF.ConvertType(Context.getPointerType(FD->getType())));
+ else
+ FieldPtr = CGF.Builder.CreateStructGEP(CGF.ConvertType(RType), FieldPtr,
+ FD->getFieldIndex());
+
+ GString = CGF.Builder.CreateGlobalStringPtr(
+ llvm::Twine(Pad)
+ .concat(FD->getType().getAsString())
+ .concat(llvm::Twine(' '))
+ .concat(FD->getNameAsString())
+ .concat(" : ")
+ .str());
+ Value *TmpRes = CGF.Builder.CreateCall(Func, {GString});
+ Res = CGF.Builder.CreateAdd(Res, TmpRes);
+
+ QualType CanonicalType =
+ FD->getType().getUnqualifiedType().getCanonicalType();
+
+ // We check whether we are in a recursive type
+ if (CanonicalType->isRecordType()) {
+ Value *TmpRes =
+ dumpRecord(CGF, CanonicalType, FieldPtr, Align, Func, Lvl + 1);
+ Res = CGF.Builder.CreateAdd(TmpRes, Res);
+ continue;
+ }
+
+ // We try to determine the best format to print the current field
+ llvm::Twine Format = Types.find(CanonicalType) == Types.end()
+ ? Types[Context.VoidPtrTy]
+ : Types[CanonicalType];
+
+ Address FieldAddress = Address(FieldPtr, Align);
+ FieldPtr = CGF.Builder.CreateLoad(FieldAddress);
+
+ // FIXME Need to handle bitfield here
+ GString = CGF.Builder.CreateGlobalStringPtr(
+ Format.concat(llvm::Twine('\n')).str());
+ TmpRes = CGF.Builder.CreateCall(Func, {GString, FieldPtr});
+ Res = CGF.Builder.CreateAdd(Res, TmpRes);
+ }
+
+ GString = CGF.Builder.CreateGlobalStringPtr(Pad + "}\n");
+ Value *TmpRes = CGF.Builder.CreateCall(Func, {GString});
+ Res = CGF.Builder.CreateAdd(Res, TmpRes);
+ return Res;
+}
+
RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
unsigned BuiltinID, const CallExpr *E,
ReturnValueSlot ReturnValue) {
@@ -962,6 +1288,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__builtin_copysign:
case Builtin::BI__builtin_copysignf:
case Builtin::BI__builtin_copysignl:
+ case Builtin::BI__builtin_copysignf128:
return RValue::get(emitBinaryBuiltin(*this, E, Intrinsic::copysign));
case Builtin::BIcos:
@@ -994,6 +1321,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__builtin_fabs:
case Builtin::BI__builtin_fabsf:
case Builtin::BI__builtin_fabsl:
+ case Builtin::BI__builtin_fabsf128:
return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::fabs));
case Builtin::BIfloor:
@@ -1154,16 +1482,13 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__builtin_abs:
case Builtin::BI__builtin_labs:
case Builtin::BI__builtin_llabs: {
+ // X < 0 ? -X : X
+ // The negation has 'nsw' because abs of INT_MIN is undefined.
Value *ArgValue = EmitScalarExpr(E->getArg(0));
-
- Value *NegOp = Builder.CreateNeg(ArgValue, "neg");
- Value *CmpResult =
- Builder.CreateICmpSGE(ArgValue,
- llvm::Constant::getNullValue(ArgValue->getType()),
- "abscond");
- Value *Result =
- Builder.CreateSelect(CmpResult, ArgValue, NegOp, "abs");
-
+ Value *NegOp = Builder.CreateNSWNeg(ArgValue, "neg");
+ Constant *Zero = llvm::Constant::getNullValue(ArgValue->getType());
+ Value *CmpResult = Builder.CreateICmpSLT(ArgValue, Zero, "abscond");
+ Value *Result = Builder.CreateSelect(CmpResult, NegOp, ArgValue, "abs");
return RValue::get(Result);
}
case Builtin::BI__builtin_conj:
@@ -1190,6 +1515,18 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
return RValue::get(ComplexVal.first);
}
+ case Builtin::BI__builtin_dump_struct: {
+ Value *Func = EmitScalarExpr(E->getArg(1)->IgnoreImpCasts());
+ CharUnits Arg0Align = EmitPointerWithAlignment(E->getArg(0)).getAlignment();
+
+ const Expr *Arg0 = E->getArg(0)->IgnoreImpCasts();
+ QualType Arg0Type = Arg0->getType()->getPointeeType();
+
+ Value *RecordPtr = EmitScalarExpr(Arg0);
+ Value *Res = dumpRecord(*this, Arg0Type, RecordPtr, Arg0Align, Func, 0);
+ return RValue::get(Res);
+ }
+
case Builtin::BI__builtin_cimag:
case Builtin::BI__builtin_cimagf:
case Builtin::BI__builtin_cimagl:
@@ -1300,20 +1637,14 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
llvm::Type *ArgType = Val->getType();
Shift = Builder.CreateIntCast(Shift, ArgType, false);
- unsigned ArgWidth = cast<llvm::IntegerType>(ArgType)->getBitWidth();
- Value *ArgTypeSize = llvm::ConstantInt::get(ArgType, ArgWidth);
- Value *ArgZero = llvm::Constant::getNullValue(ArgType);
-
+ unsigned ArgWidth = ArgType->getIntegerBitWidth();
Value *Mask = llvm::ConstantInt::get(ArgType, ArgWidth - 1);
- Shift = Builder.CreateAnd(Shift, Mask);
- Value *LeftShift = Builder.CreateSub(ArgTypeSize, Shift);
-
- Value *RightShifted = Builder.CreateLShr(Val, Shift);
- Value *LeftShifted = Builder.CreateShl(Val, LeftShift);
- Value *Rotated = Builder.CreateOr(LeftShifted, RightShifted);
- Value *ShiftIsZero = Builder.CreateICmpEQ(Shift, ArgZero);
- Value *Result = Builder.CreateSelect(ShiftIsZero, Val, Rotated);
+ Value *RightShiftAmt = Builder.CreateAnd(Shift, Mask);
+ Value *RightShifted = Builder.CreateLShr(Val, RightShiftAmt);
+ Value *LeftShiftAmt = Builder.CreateAnd(Builder.CreateNeg(Shift), Mask);
+ Value *LeftShifted = Builder.CreateShl(Val, LeftShiftAmt);
+ Value *Result = Builder.CreateOr(LeftShifted, RightShifted);
return RValue::get(Result);
}
case Builtin::BI_rotl8:
@@ -1326,20 +1657,14 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
llvm::Type *ArgType = Val->getType();
Shift = Builder.CreateIntCast(Shift, ArgType, false);
- unsigned ArgWidth = cast<llvm::IntegerType>(ArgType)->getBitWidth();
- Value *ArgTypeSize = llvm::ConstantInt::get(ArgType, ArgWidth);
- Value *ArgZero = llvm::Constant::getNullValue(ArgType);
-
+ unsigned ArgWidth = ArgType->getIntegerBitWidth();
Value *Mask = llvm::ConstantInt::get(ArgType, ArgWidth - 1);
- Shift = Builder.CreateAnd(Shift, Mask);
- Value *RightShift = Builder.CreateSub(ArgTypeSize, Shift);
-
- Value *LeftShifted = Builder.CreateShl(Val, Shift);
- Value *RightShifted = Builder.CreateLShr(Val, RightShift);
- Value *Rotated = Builder.CreateOr(LeftShifted, RightShifted);
- Value *ShiftIsZero = Builder.CreateICmpEQ(Shift, ArgZero);
- Value *Result = Builder.CreateSelect(ShiftIsZero, Val, Rotated);
+ Value *LeftShiftAmt = Builder.CreateAnd(Shift, Mask);
+ Value *LeftShifted = Builder.CreateShl(Val, LeftShiftAmt);
+ Value *RightShiftAmt = Builder.CreateAnd(Builder.CreateNeg(Shift), Mask);
+ Value *RightShifted = Builder.CreateLShr(Val, RightShiftAmt);
+ Value *Result = Builder.CreateOr(LeftShifted, RightShifted);
return RValue::get(Result);
}
case Builtin::BI__builtin_unpredictable: {
@@ -1735,6 +2060,63 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Builder.CreateMemSet(Dest, ByteVal, SizeVal, false);
return RValue::get(Dest.getPointer());
}
+ case Builtin::BI__builtin_wmemcmp: {
+ // The MSVC runtime library does not provide a definition of wmemcmp, so we
+ // need an inline implementation.
+ if (!getTarget().getTriple().isOSMSVCRT())
+ break;
+
+ llvm::Type *WCharTy = ConvertType(getContext().WCharTy);
+
+ Value *Dst = EmitScalarExpr(E->getArg(0));
+ Value *Src = EmitScalarExpr(E->getArg(1));
+ Value *Size = EmitScalarExpr(E->getArg(2));
+
+ BasicBlock *Entry = Builder.GetInsertBlock();
+ BasicBlock *CmpGT = createBasicBlock("wmemcmp.gt");
+ BasicBlock *CmpLT = createBasicBlock("wmemcmp.lt");
+ BasicBlock *Next = createBasicBlock("wmemcmp.next");
+ BasicBlock *Exit = createBasicBlock("wmemcmp.exit");
+ Value *SizeEq0 = Builder.CreateICmpEQ(Size, ConstantInt::get(SizeTy, 0));
+ Builder.CreateCondBr(SizeEq0, Exit, CmpGT);
+
+ EmitBlock(CmpGT);
+ PHINode *DstPhi = Builder.CreatePHI(Dst->getType(), 2);
+ DstPhi->addIncoming(Dst, Entry);
+ PHINode *SrcPhi = Builder.CreatePHI(Src->getType(), 2);
+ SrcPhi->addIncoming(Src, Entry);
+ PHINode *SizePhi = Builder.CreatePHI(SizeTy, 2);
+ SizePhi->addIncoming(Size, Entry);
+ CharUnits WCharAlign =
+ getContext().getTypeAlignInChars(getContext().WCharTy);
+ Value *DstCh = Builder.CreateAlignedLoad(WCharTy, DstPhi, WCharAlign);
+ Value *SrcCh = Builder.CreateAlignedLoad(WCharTy, SrcPhi, WCharAlign);
+ Value *DstGtSrc = Builder.CreateICmpUGT(DstCh, SrcCh);
+ Builder.CreateCondBr(DstGtSrc, Exit, CmpLT);
+
+ EmitBlock(CmpLT);
+ Value *DstLtSrc = Builder.CreateICmpULT(DstCh, SrcCh);
+ Builder.CreateCondBr(DstLtSrc, Exit, Next);
+
+ EmitBlock(Next);
+ Value *NextDst = Builder.CreateConstInBoundsGEP1_32(WCharTy, DstPhi, 1);
+ Value *NextSrc = Builder.CreateConstInBoundsGEP1_32(WCharTy, SrcPhi, 1);
+ Value *NextSize = Builder.CreateSub(SizePhi, ConstantInt::get(SizeTy, 1));
+ Value *NextSizeEq0 =
+ Builder.CreateICmpEQ(NextSize, ConstantInt::get(SizeTy, 0));
+ Builder.CreateCondBr(NextSizeEq0, Exit, CmpGT);
+ DstPhi->addIncoming(NextDst, Next);
+ SrcPhi->addIncoming(NextSrc, Next);
+ SizePhi->addIncoming(NextSize, Next);
+
+ EmitBlock(Exit);
+ PHINode *Ret = Builder.CreatePHI(IntTy, 4);
+ Ret->addIncoming(ConstantInt::get(IntTy, 0), Entry);
+ Ret->addIncoming(ConstantInt::get(IntTy, 1), CmpGT);
+ Ret->addIncoming(ConstantInt::get(IntTy, -1), CmpLT);
+ Ret->addIncoming(ConstantInt::get(IntTy, 0), Next);
+ return RValue::get(Ret);
+ }
case Builtin::BI__builtin_dwarf_cfa: {
// The offset in bytes from the first argument to the CFA.
//
@@ -2033,7 +2415,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__sync_synchronize: {
// We assume this is supposed to correspond to a C++0x-style
// sequentially-consistent fence (i.e. this is only usable for
- // synchonization, not device I/O or anything like that). This intrinsic
+ // synchronization, not device I/O or anything like that). This intrinsic
// is really badly designed in the sense that in theory, there isn't
// any way to safely use it... but in practice, it mostly works
// to use it with non-atomic loads and stores to get acquire/release
@@ -2548,11 +2930,12 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__builtin_addressof:
return RValue::get(EmitLValue(E->getArg(0)).getPointer());
case Builtin::BI__builtin_operator_new:
- return EmitBuiltinNewDeleteCall(FD->getType()->castAs<FunctionProtoType>(),
- E->getArg(0), false);
+ return EmitBuiltinNewDeleteCall(
+ E->getCallee()->getType()->castAs<FunctionProtoType>(), E, false);
case Builtin::BI__builtin_operator_delete:
- return EmitBuiltinNewDeleteCall(FD->getType()->castAs<FunctionProtoType>(),
- E->getArg(0), true);
+ return EmitBuiltinNewDeleteCall(
+ E->getCallee()->getType()->castAs<FunctionProtoType>(), E, true);
+
case Builtin::BI__noop:
// __noop always evaluates to an integer literal zero.
return RValue::get(ConstantInt::get(IntTy, 0));
@@ -2639,9 +3022,26 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI_InterlockedXor16:
case Builtin::BI_InterlockedXor:
return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor, E));
+
+ case Builtin::BI_bittest64:
+ case Builtin::BI_bittest:
+ case Builtin::BI_bittestandcomplement64:
+ case Builtin::BI_bittestandcomplement:
+ case Builtin::BI_bittestandreset64:
+ case Builtin::BI_bittestandreset:
+ case Builtin::BI_bittestandset64:
+ case Builtin::BI_bittestandset:
+ case Builtin::BI_interlockedbittestandreset:
+ case Builtin::BI_interlockedbittestandreset64:
+ case Builtin::BI_interlockedbittestandset64:
case Builtin::BI_interlockedbittestandset:
- return RValue::get(
- EmitMSVCBuiltinExpr(MSVCIntrin::_interlockedbittestandset, E));
+ case Builtin::BI_interlockedbittestandset_acq:
+ case Builtin::BI_interlockedbittestandset_rel:
+ case Builtin::BI_interlockedbittestandset_nf:
+ case Builtin::BI_interlockedbittestandreset_acq:
+ case Builtin::BI_interlockedbittestandreset_rel:
+ case Builtin::BI_interlockedbittestandreset_nf:
+ return RValue::get(EmitBitTestIntrinsic(*this, BuiltinID, E));
case Builtin::BI__exception_code:
case Builtin::BI_exception_code:
@@ -2652,59 +3052,19 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__abnormal_termination:
case Builtin::BI_abnormal_termination:
return RValue::get(EmitSEHAbnormalTermination());
- case Builtin::BI_setjmpex: {
- if (getTarget().getTriple().isOSMSVCRT()) {
- llvm::Type *ArgTypes[] = {Int8PtrTy, Int8PtrTy};
- llvm::AttributeList ReturnsTwiceAttr = llvm::AttributeList::get(
- getLLVMContext(), llvm::AttributeList::FunctionIndex,
- llvm::Attribute::ReturnsTwice);
- llvm::Constant *SetJmpEx = CGM.CreateRuntimeFunction(
- llvm::FunctionType::get(IntTy, ArgTypes, /*isVarArg=*/false),
- "_setjmpex", ReturnsTwiceAttr, /*Local=*/true);
- llvm::Value *Buf = Builder.CreateBitOrPointerCast(
- EmitScalarExpr(E->getArg(0)), Int8PtrTy);
- llvm::Value *FrameAddr =
- Builder.CreateCall(CGM.getIntrinsic(Intrinsic::frameaddress),
- ConstantInt::get(Int32Ty, 0));
- llvm::Value *Args[] = {Buf, FrameAddr};
- llvm::CallSite CS = EmitRuntimeCallOrInvoke(SetJmpEx, Args);
- CS.setAttributes(ReturnsTwiceAttr);
- return RValue::get(CS.getInstruction());
- }
+ case Builtin::BI_setjmpex:
+ if (getTarget().getTriple().isOSMSVCRT())
+ return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmpex, E);
break;
- }
- case Builtin::BI_setjmp: {
+ case Builtin::BI_setjmp:
if (getTarget().getTriple().isOSMSVCRT()) {
- llvm::AttributeList ReturnsTwiceAttr = llvm::AttributeList::get(
- getLLVMContext(), llvm::AttributeList::FunctionIndex,
- llvm::Attribute::ReturnsTwice);
- llvm::Value *Buf = Builder.CreateBitOrPointerCast(
- EmitScalarExpr(E->getArg(0)), Int8PtrTy);
- llvm::CallSite CS;
- if (getTarget().getTriple().getArch() == llvm::Triple::x86) {
- llvm::Type *ArgTypes[] = {Int8PtrTy, IntTy};
- llvm::Constant *SetJmp3 = CGM.CreateRuntimeFunction(
- llvm::FunctionType::get(IntTy, ArgTypes, /*isVarArg=*/true),
- "_setjmp3", ReturnsTwiceAttr, /*Local=*/true);
- llvm::Value *Count = ConstantInt::get(IntTy, 0);
- llvm::Value *Args[] = {Buf, Count};
- CS = EmitRuntimeCallOrInvoke(SetJmp3, Args);
- } else {
- llvm::Type *ArgTypes[] = {Int8PtrTy, Int8PtrTy};
- llvm::Constant *SetJmp = CGM.CreateRuntimeFunction(
- llvm::FunctionType::get(IntTy, ArgTypes, /*isVarArg=*/false),
- "_setjmp", ReturnsTwiceAttr, /*Local=*/true);
- llvm::Value *FrameAddr =
- Builder.CreateCall(CGM.getIntrinsic(Intrinsic::frameaddress),
- ConstantInt::get(Int32Ty, 0));
- llvm::Value *Args[] = {Buf, FrameAddr};
- CS = EmitRuntimeCallOrInvoke(SetJmp, Args);
- }
- CS.setAttributes(ReturnsTwiceAttr);
- return RValue::get(CS.getInstruction());
+ if (getTarget().getTriple().getArch() == llvm::Triple::x86)
+ return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmp3, E);
+ else if (getTarget().getTriple().getArch() == llvm::Triple::aarch64)
+ return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmpex, E);
+ return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmp, E);
}
break;
- }
case Builtin::BI__GetExceptionInfo: {
if (llvm::GlobalVariable *GV =
@@ -2732,6 +3092,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
return EmitCoroutineIntrinsic(E, Intrinsic::coro_resume);
case Builtin::BI__builtin_coro_frame:
return EmitCoroutineIntrinsic(E, Intrinsic::coro_frame);
+ case Builtin::BI__builtin_coro_noop:
+ return EmitCoroutineIntrinsic(E, Intrinsic::coro_noop);
case Builtin::BI__builtin_coro_free:
return EmitCoroutineIntrinsic(E, Intrinsic::coro_free);
case Builtin::BI__builtin_coro_destroy:
@@ -2882,11 +3244,14 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
// OpenCL v2.0 s6.13.16.4 Built-in pipe query functions
case Builtin::BIget_pipe_num_packets:
case Builtin::BIget_pipe_max_packets: {
- const char *Name;
+ const char *BaseName;
+ const PipeType *PipeTy = E->getArg(0)->getType()->getAs<PipeType>();
if (BuiltinID == Builtin::BIget_pipe_num_packets)
- Name = "__get_pipe_num_packets";
+ BaseName = "__get_pipe_num_packets";
else
- Name = "__get_pipe_max_packets";
+ BaseName = "__get_pipe_max_packets";
+ auto Name = std::string(BaseName) +
+ std::string(PipeTy->isReadOnly() ? "_ro" : "_wo");
// Building the generic function prototype.
Value *Arg0 = EmitScalarExpr(E->getArg(0));
@@ -2992,10 +3357,10 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
return Ptr;
};
- // Could have events and/or vaargs.
+ // Could have events and/or varargs.
if (E->getArg(3)->getType()->isBlockPointerType()) {
// No events passed, but has variadic arguments.
- Name = "__enqueue_kernel_vaargs";
+ Name = "__enqueue_kernel_varargs";
auto Info =
CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(3));
llvm::Value *Kernel =
@@ -3063,7 +3428,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
// Pass the number of variadics to the runtime function too.
Args.push_back(ConstantInt::get(Int32Ty, NumArgs - 7));
ArgTys.push_back(Int32Ty);
- Name = "__enqueue_kernel_events_vaargs";
+ Name = "__enqueue_kernel_events_varargs";
auto *PtrToSizeArray = CreateArrayForSizeVar(7);
Args.push_back(PtrToSizeArray);
@@ -3104,7 +3469,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
CGM.CreateRuntimeFunction(
llvm::FunctionType::get(IntTy, {GenericVoidPtrTy, GenericVoidPtrTy},
false),
- "__get_kernel_preferred_work_group_multiple_impl"),
+ "__get_kernel_preferred_work_group_size_multiple_impl"),
{Kernel, Arg}));
}
case Builtin::BIget_kernel_max_sub_group_size_for_ndrange:
@@ -3175,6 +3540,11 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__xray_customevent: {
if (!ShouldXRayInstrumentFunction())
return RValue::getIgnored();
+
+ if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
+ XRayInstrKind::Custom))
+ return RValue::getIgnored();
+
if (const auto *XRayAttr = CurFuncDecl->getAttr<XRayInstrumentAttr>())
if (XRayAttr->neverXRayInstrument() && !AlwaysEmitXRayCustomEvents())
return RValue::getIgnored();
@@ -3198,6 +3568,44 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
return RValue::get(Builder.CreateCall(F, {Arg0Val, Arg1}));
}
+ case Builtin::BI__xray_typedevent: {
+ // TODO: There should be a way to always emit events even if the current
+ // function is not instrumented. Losing events in a stream can cripple
+ // a trace.
+ if (!ShouldXRayInstrumentFunction())
+ return RValue::getIgnored();
+
+ if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
+ XRayInstrKind::Typed))
+ return RValue::getIgnored();
+
+ if (const auto *XRayAttr = CurFuncDecl->getAttr<XRayInstrumentAttr>())
+ if (XRayAttr->neverXRayInstrument() && !AlwaysEmitXRayTypedEvents())
+ return RValue::getIgnored();
+
+ Function *F = CGM.getIntrinsic(Intrinsic::xray_typedevent);
+ auto FTy = F->getFunctionType();
+ auto Arg0 = EmitScalarExpr(E->getArg(0));
+ auto PTy0 = FTy->getParamType(0);
+ if (PTy0 != Arg0->getType())
+ Arg0 = Builder.CreateTruncOrBitCast(Arg0, PTy0);
+ auto Arg1 = E->getArg(1);
+ auto Arg1Val = EmitScalarExpr(Arg1);
+ auto Arg1Ty = Arg1->getType();
+ auto PTy1 = FTy->getParamType(1);
+ if (PTy1 != Arg1Val->getType()) {
+ if (Arg1Ty->isArrayType())
+ Arg1Val = EmitArrayToPointerDecay(Arg1).getPointer();
+ else
+ Arg1Val = Builder.CreatePointerCast(Arg1Val, PTy1);
+ }
+ auto Arg2 = EmitScalarExpr(E->getArg(2));
+ auto PTy2 = FTy->getParamType(2);
+ if (PTy2 != Arg2->getType())
+ Arg2 = Builder.CreateTruncOrBitCast(Arg2, PTy2);
+ return RValue::get(Builder.CreateCall(F, {Arg0, Arg1Val, Arg2}));
+ }
+
case Builtin::BI__builtin_ms_va_start:
case Builtin::BI__builtin_ms_va_end:
return RValue::get(
@@ -3246,6 +3654,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
// can move this up to the beginning of the function.
checkTargetFeatures(E, FD);
+ if (unsigned VectorWidth = getContext().BuiltinInfo.getRequiredVectorWidth(BuiltinID))
+ LargestVectorWidth = std::max(LargestVectorWidth, VectorWidth);
+
// See if we have a target specific intrinsic.
const char *Name = getContext().BuiltinInfo.getName(BuiltinID);
Intrinsic::ID IntrinsicID = Intrinsic::not_intrinsic;
@@ -3253,7 +3664,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
llvm::Triple::getArchTypePrefix(getTarget().getTriple().getArch());
if (!Prefix.empty()) {
IntrinsicID = Intrinsic::getIntrinsicForGCCBuiltin(Prefix.data(), Name);
- // NOTE we dont need to perform a compatibility flag check here since the
+ // NOTE we don't need to perform a compatibility flag check here since the
// intrinsics are declared in Builtins*.def via LANGBUILTIN which filter the
// MS builtins via ALL_MS_LANGUAGES and are filtered earlier.
if (IntrinsicID == Intrinsic::not_intrinsic)
@@ -3378,7 +3789,7 @@ Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID,
static llvm::VectorType *GetNeonType(CodeGenFunction *CGF,
NeonTypeFlags TypeFlags,
- llvm::Triple::ArchType Arch,
+ bool HasLegalHalfType=true,
bool V1Ty=false) {
int IsQuad = TypeFlags.isQuad();
switch (TypeFlags.getEltType()) {
@@ -3389,9 +3800,7 @@ static llvm::VectorType *GetNeonType(CodeGenFunction *CGF,
case NeonTypeFlags::Poly16:
return llvm::VectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad));
case NeonTypeFlags::Float16:
- // FIXME: Only AArch64 backend can so far properly handle half types.
- // Remove else part once ARM backend support for half is complete.
- if (Arch == llvm::Triple::aarch64)
+ if (HasLegalHalfType)
return llvm::VectorType::get(CGF->HalfTy, V1Ty ? 1 : (4 << IsQuad));
else
return llvm::VectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad));
@@ -3454,7 +3863,7 @@ Value *CodeGenFunction::EmitNeonShiftVector(Value *V, llvm::Type *Ty,
return ConstantInt::get(Ty, neg ? -SV : SV);
}
-// \brief Right-shift a vector by a constant.
+// Right-shift a vector by a constant.
Value *CodeGenFunction::EmitNeonRShiftImm(Value *Vec, Value *Shift,
llvm::Type *Ty, bool usgn,
const char *name) {
@@ -3557,13 +3966,24 @@ static const NeonIntrinsicInfo ARMSIMDIntrinsicMap [] = {
NEONMAP1(vcaleq_v, arm_neon_vacge, 0),
NEONMAP1(vcalt_v, arm_neon_vacgt, 0),
NEONMAP1(vcaltq_v, arm_neon_vacgt, 0),
+ NEONMAP0(vceqz_v),
+ NEONMAP0(vceqzq_v),
+ NEONMAP0(vcgez_v),
+ NEONMAP0(vcgezq_v),
+ NEONMAP0(vcgtz_v),
+ NEONMAP0(vcgtzq_v),
+ NEONMAP0(vclez_v),
+ NEONMAP0(vclezq_v),
NEONMAP1(vcls_v, arm_neon_vcls, Add1ArgType),
NEONMAP1(vclsq_v, arm_neon_vcls, Add1ArgType),
+ NEONMAP0(vcltz_v),
+ NEONMAP0(vcltzq_v),
NEONMAP1(vclz_v, ctlz, Add1ArgType),
NEONMAP1(vclzq_v, ctlz, Add1ArgType),
NEONMAP1(vcnt_v, ctpop, Add1ArgType),
NEONMAP1(vcntq_v, ctpop, Add1ArgType),
NEONMAP1(vcvt_f16_f32, arm_neon_vcvtfp2hf, 0),
+ NEONMAP0(vcvt_f16_v),
NEONMAP1(vcvt_f32_f16, arm_neon_vcvthf2fp, 0),
NEONMAP0(vcvt_f32_v),
NEONMAP2(vcvt_n_f16_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
@@ -3583,6 +4003,7 @@ static const NeonIntrinsicInfo ARMSIMDIntrinsicMap [] = {
NEONMAP1(vcvta_s16_v, arm_neon_vcvtas, 0),
NEONMAP1(vcvta_s32_v, arm_neon_vcvtas, 0),
NEONMAP1(vcvta_s64_v, arm_neon_vcvtas, 0),
+ NEONMAP1(vcvta_u16_v, arm_neon_vcvtau, 0),
NEONMAP1(vcvta_u32_v, arm_neon_vcvtau, 0),
NEONMAP1(vcvta_u64_v, arm_neon_vcvtau, 0),
NEONMAP1(vcvtaq_s16_v, arm_neon_vcvtas, 0),
@@ -3627,6 +4048,7 @@ static const NeonIntrinsicInfo ARMSIMDIntrinsicMap [] = {
NEONMAP1(vcvtpq_u16_v, arm_neon_vcvtpu, 0),
NEONMAP1(vcvtpq_u32_v, arm_neon_vcvtpu, 0),
NEONMAP1(vcvtpq_u64_v, arm_neon_vcvtpu, 0),
+ NEONMAP0(vcvtq_f16_v),
NEONMAP0(vcvtq_f32_v),
NEONMAP2(vcvtq_n_f16_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
NEONMAP2(vcvtq_n_f32_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
@@ -3642,6 +4064,8 @@ static const NeonIntrinsicInfo ARMSIMDIntrinsicMap [] = {
NEONMAP0(vcvtq_u16_v),
NEONMAP0(vcvtq_u32_v),
NEONMAP0(vcvtq_u64_v),
+ NEONMAP2(vdot_v, arm_neon_udot, arm_neon_sdot, 0),
+ NEONMAP2(vdotq_v, arm_neon_udot, arm_neon_sdot, 0),
NEONMAP0(vext_v),
NEONMAP0(vextq_v),
NEONMAP0(vfma_v),
@@ -3652,18 +4076,30 @@ static const NeonIntrinsicInfo ARMSIMDIntrinsicMap [] = {
NEONMAP2(vhsubq_v, arm_neon_vhsubu, arm_neon_vhsubs, Add1ArgType | UnsignedAlts),
NEONMAP0(vld1_dup_v),
NEONMAP1(vld1_v, arm_neon_vld1, 0),
+ NEONMAP1(vld1_x2_v, arm_neon_vld1x2, 0),
+ NEONMAP1(vld1_x3_v, arm_neon_vld1x3, 0),
+ NEONMAP1(vld1_x4_v, arm_neon_vld1x4, 0),
NEONMAP0(vld1q_dup_v),
NEONMAP1(vld1q_v, arm_neon_vld1, 0),
+ NEONMAP1(vld1q_x2_v, arm_neon_vld1x2, 0),
+ NEONMAP1(vld1q_x3_v, arm_neon_vld1x3, 0),
+ NEONMAP1(vld1q_x4_v, arm_neon_vld1x4, 0),
+ NEONMAP1(vld2_dup_v, arm_neon_vld2dup, 0),
NEONMAP1(vld2_lane_v, arm_neon_vld2lane, 0),
NEONMAP1(vld2_v, arm_neon_vld2, 0),
+ NEONMAP1(vld2q_dup_v, arm_neon_vld2dup, 0),
NEONMAP1(vld2q_lane_v, arm_neon_vld2lane, 0),
NEONMAP1(vld2q_v, arm_neon_vld2, 0),
+ NEONMAP1(vld3_dup_v, arm_neon_vld3dup, 0),
NEONMAP1(vld3_lane_v, arm_neon_vld3lane, 0),
NEONMAP1(vld3_v, arm_neon_vld3, 0),
+ NEONMAP1(vld3q_dup_v, arm_neon_vld3dup, 0),
NEONMAP1(vld3q_lane_v, arm_neon_vld3lane, 0),
NEONMAP1(vld3q_v, arm_neon_vld3, 0),
+ NEONMAP1(vld4_dup_v, arm_neon_vld4dup, 0),
NEONMAP1(vld4_lane_v, arm_neon_vld4lane, 0),
NEONMAP1(vld4_v, arm_neon_vld4, 0),
+ NEONMAP1(vld4q_dup_v, arm_neon_vld4dup, 0),
NEONMAP1(vld4q_lane_v, arm_neon_vld4lane, 0),
NEONMAP1(vld4q_v, arm_neon_vld4, 0),
NEONMAP2(vmax_v, arm_neon_vmaxu, arm_neon_vmaxs, Add1ArgType | UnsignedAlts),
@@ -3722,6 +4158,8 @@ static const NeonIntrinsicInfo ARMSIMDIntrinsicMap [] = {
NEONMAP1(vrnd_v, arm_neon_vrintz, Add1ArgType),
NEONMAP1(vrnda_v, arm_neon_vrinta, Add1ArgType),
NEONMAP1(vrndaq_v, arm_neon_vrinta, Add1ArgType),
+ NEONMAP0(vrndi_v),
+ NEONMAP0(vrndiq_v),
NEONMAP1(vrndm_v, arm_neon_vrintm, Add1ArgType),
NEONMAP1(vrndmq_v, arm_neon_vrintm, Add1ArgType),
NEONMAP1(vrndn_v, arm_neon_vrintn, Add1ArgType),
@@ -3755,7 +4193,13 @@ static const NeonIntrinsicInfo ARMSIMDIntrinsicMap [] = {
NEONMAP0(vshrn_n_v),
NEONMAP0(vshrq_n_v),
NEONMAP1(vst1_v, arm_neon_vst1, 0),
+ NEONMAP1(vst1_x2_v, arm_neon_vst1x2, 0),
+ NEONMAP1(vst1_x3_v, arm_neon_vst1x3, 0),
+ NEONMAP1(vst1_x4_v, arm_neon_vst1x4, 0),
NEONMAP1(vst1q_v, arm_neon_vst1, 0),
+ NEONMAP1(vst1q_x2_v, arm_neon_vst1x2, 0),
+ NEONMAP1(vst1q_x3_v, arm_neon_vst1x3, 0),
+ NEONMAP1(vst1q_x4_v, arm_neon_vst1x4, 0),
NEONMAP1(vst2_lane_v, arm_neon_vst2lane, 0),
NEONMAP1(vst2_v, arm_neon_vst2, 0),
NEONMAP1(vst2q_lane_v, arm_neon_vst2lane, 0),
@@ -3795,8 +4239,18 @@ static const NeonIntrinsicInfo AArch64SIMDIntrinsicMap[] = {
NEONMAP1(vcaleq_v, aarch64_neon_facge, 0),
NEONMAP1(vcalt_v, aarch64_neon_facgt, 0),
NEONMAP1(vcaltq_v, aarch64_neon_facgt, 0),
+ NEONMAP0(vceqz_v),
+ NEONMAP0(vceqzq_v),
+ NEONMAP0(vcgez_v),
+ NEONMAP0(vcgezq_v),
+ NEONMAP0(vcgtz_v),
+ NEONMAP0(vcgtzq_v),
+ NEONMAP0(vclez_v),
+ NEONMAP0(vclezq_v),
NEONMAP1(vcls_v, aarch64_neon_cls, Add1ArgType),
NEONMAP1(vclsq_v, aarch64_neon_cls, Add1ArgType),
+ NEONMAP0(vcltz_v),
+ NEONMAP0(vcltzq_v),
NEONMAP1(vclz_v, ctlz, Add1ArgType),
NEONMAP1(vclzq_v, ctlz, Add1ArgType),
NEONMAP1(vcnt_v, ctpop, Add1ArgType),
@@ -3826,6 +4280,8 @@ static const NeonIntrinsicInfo AArch64SIMDIntrinsicMap[] = {
NEONMAP1(vcvtq_n_u32_v, aarch64_neon_vcvtfp2fxu, 0),
NEONMAP1(vcvtq_n_u64_v, aarch64_neon_vcvtfp2fxu, 0),
NEONMAP1(vcvtx_f32_v, aarch64_neon_fcvtxn, AddRetType | Add1ArgType),
+ NEONMAP2(vdot_v, aarch64_neon_udot, aarch64_neon_sdot, 0),
+ NEONMAP2(vdotq_v, aarch64_neon_udot, aarch64_neon_sdot, 0),
NEONMAP0(vext_v),
NEONMAP0(vextq_v),
NEONMAP0(vfma_v),
@@ -3834,6 +4290,12 @@ static const NeonIntrinsicInfo AArch64SIMDIntrinsicMap[] = {
NEONMAP2(vhaddq_v, aarch64_neon_uhadd, aarch64_neon_shadd, Add1ArgType | UnsignedAlts),
NEONMAP2(vhsub_v, aarch64_neon_uhsub, aarch64_neon_shsub, Add1ArgType | UnsignedAlts),
NEONMAP2(vhsubq_v, aarch64_neon_uhsub, aarch64_neon_shsub, Add1ArgType | UnsignedAlts),
+ NEONMAP1(vld1_x2_v, aarch64_neon_ld1x2, 0),
+ NEONMAP1(vld1_x3_v, aarch64_neon_ld1x3, 0),
+ NEONMAP1(vld1_x4_v, aarch64_neon_ld1x4, 0),
+ NEONMAP1(vld1q_x2_v, aarch64_neon_ld1x2, 0),
+ NEONMAP1(vld1q_x3_v, aarch64_neon_ld1x3, 0),
+ NEONMAP1(vld1q_x4_v, aarch64_neon_ld1x4, 0),
NEONMAP0(vmovl_v),
NEONMAP0(vmovn_v),
NEONMAP1(vmul_v, aarch64_neon_pmul, Add1ArgType),
@@ -3874,6 +4336,8 @@ static const NeonIntrinsicInfo AArch64SIMDIntrinsicMap[] = {
NEONMAP1(vrecpsq_v, aarch64_neon_frecps, Add1ArgType),
NEONMAP2(vrhadd_v, aarch64_neon_urhadd, aarch64_neon_srhadd, Add1ArgType | UnsignedAlts),
NEONMAP2(vrhaddq_v, aarch64_neon_urhadd, aarch64_neon_srhadd, Add1ArgType | UnsignedAlts),
+ NEONMAP0(vrndi_v),
+ NEONMAP0(vrndiq_v),
NEONMAP2(vrshl_v, aarch64_neon_urshl, aarch64_neon_srshl, Add1ArgType | UnsignedAlts),
NEONMAP2(vrshlq_v, aarch64_neon_urshl, aarch64_neon_srshl, Add1ArgType | UnsignedAlts),
NEONMAP2(vrshr_n_v, aarch64_neon_urshl, aarch64_neon_srshl, UnsignedAlts),
@@ -3897,6 +4361,12 @@ static const NeonIntrinsicInfo AArch64SIMDIntrinsicMap[] = {
NEONMAP0(vshr_n_v),
NEONMAP0(vshrn_n_v),
NEONMAP0(vshrq_n_v),
+ NEONMAP1(vst1_x2_v, aarch64_neon_st1x2, 0),
+ NEONMAP1(vst1_x3_v, aarch64_neon_st1x3, 0),
+ NEONMAP1(vst1_x4_v, aarch64_neon_st1x4, 0),
+ NEONMAP1(vst1q_x2_v, aarch64_neon_st1x2, 0),
+ NEONMAP1(vst1q_x3_v, aarch64_neon_st1x3, 0),
+ NEONMAP1(vst1q_x4_v, aarch64_neon_st1x4, 0),
NEONMAP0(vsubhn_v),
NEONMAP0(vtst_v),
NEONMAP0(vtstq_v),
@@ -4095,6 +4565,37 @@ static const NeonIntrinsicInfo AArch64SISDIntrinsicMap[] = {
NEONMAP1(vuqaddd_s64, aarch64_neon_suqadd, Add1ArgType),
NEONMAP1(vuqaddh_s16, aarch64_neon_suqadd, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vuqadds_s32, aarch64_neon_suqadd, Add1ArgType),
+ // FP16 scalar intrinisics go here.
+ NEONMAP1(vabdh_f16, aarch64_sisd_fabd, Add1ArgType),
+ NEONMAP1(vcvtah_s32_f16, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtah_s64_f16, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtah_u32_f16, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtah_u64_f16, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
+ NEONMAP1(vcvth_n_f16_s32, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
+ NEONMAP1(vcvth_n_f16_s64, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
+ NEONMAP1(vcvth_n_f16_u32, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
+ NEONMAP1(vcvth_n_f16_u64, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
+ NEONMAP1(vcvth_n_s32_f16, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
+ NEONMAP1(vcvth_n_s64_f16, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
+ NEONMAP1(vcvth_n_u32_f16, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
+ NEONMAP1(vcvth_n_u64_f16, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtmh_s32_f16, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtmh_s64_f16, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtmh_u32_f16, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtmh_u64_f16, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtnh_s32_f16, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtnh_s64_f16, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtnh_u32_f16, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtnh_u64_f16, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtph_s32_f16, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtph_s64_f16, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtph_u32_f16, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
+ NEONMAP1(vcvtph_u64_f16, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
+ NEONMAP1(vmulxh_f16, aarch64_neon_fmulx, Add1ArgType),
+ NEONMAP1(vrecpeh_f16, aarch64_neon_frecpe, Add1ArgType),
+ NEONMAP1(vrecpxh_f16, aarch64_neon_frecpx, Add1ArgType),
+ NEONMAP1(vrsqrteh_f16, aarch64_neon_frsqrte, Add1ArgType),
+ NEONMAP1(vrsqrtsh_f16, aarch64_neon_frsqrts, Add1ArgType),
};
#undef NEONMAP0
@@ -4244,8 +4745,9 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
NeonTypeFlags Type(NeonTypeConst.getZExtValue());
bool Usgn = Type.isUnsigned();
bool Quad = Type.isQuad();
+ const bool HasLegalHalfType = getTarget().hasLegalHalfType();
- llvm::VectorType *VTy = GetNeonType(this, Type, Arch);
+ llvm::VectorType *VTy = GetNeonType(this, Type, HasLegalHalfType);
llvm::Type *Ty = VTy;
if (!Ty)
return nullptr;
@@ -4310,6 +4812,26 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
return EmitNeonCall(F, Ops, NameHint);
}
+ case NEON::BI__builtin_neon_vceqz_v:
+ case NEON::BI__builtin_neon_vceqzq_v:
+ return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OEQ,
+ ICmpInst::ICMP_EQ, "vceqz");
+ case NEON::BI__builtin_neon_vcgez_v:
+ case NEON::BI__builtin_neon_vcgezq_v:
+ return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OGE,
+ ICmpInst::ICMP_SGE, "vcgez");
+ case NEON::BI__builtin_neon_vclez_v:
+ case NEON::BI__builtin_neon_vclezq_v:
+ return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OLE,
+ ICmpInst::ICMP_SLE, "vclez");
+ case NEON::BI__builtin_neon_vcgtz_v:
+ case NEON::BI__builtin_neon_vcgtzq_v:
+ return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OGT,
+ ICmpInst::ICMP_SGT, "vcgtz");
+ case NEON::BI__builtin_neon_vcltz_v:
+ case NEON::BI__builtin_neon_vcltzq_v:
+ return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OLT,
+ ICmpInst::ICMP_SLT, "vcltz");
case NEON::BI__builtin_neon_vclz_v:
case NEON::BI__builtin_neon_vclzq_v:
// We generate target-independent intrinsic, which needs a second argument
@@ -4319,13 +4841,15 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
case NEON::BI__builtin_neon_vcvt_f32_v:
case NEON::BI__builtin_neon_vcvtq_f32_v:
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, Quad), Arch);
+ Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, Quad),
+ HasLegalHalfType);
return Usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
: Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
case NEON::BI__builtin_neon_vcvt_f16_v:
case NEON::BI__builtin_neon_vcvtq_f16_v:
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float16, false, Quad), Arch);
+ Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float16, false, Quad),
+ HasLegalHalfType);
return Usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
: Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
case NEON::BI__builtin_neon_vcvt_n_f16_v:
@@ -4374,6 +4898,7 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
case NEON::BI__builtin_neon_vcvta_s16_v:
case NEON::BI__builtin_neon_vcvta_s32_v:
case NEON::BI__builtin_neon_vcvta_s64_v:
+ case NEON::BI__builtin_neon_vcvta_u16_v:
case NEON::BI__builtin_neon_vcvta_u32_v:
case NEON::BI__builtin_neon_vcvta_u64_v:
case NEON::BI__builtin_neon_vcvtaq_s16_v:
@@ -4448,12 +4973,33 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
Ops.push_back(getAlignmentValue32(PtrOp0));
return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, "vld1");
}
+ case NEON::BI__builtin_neon_vld1_x2_v:
+ case NEON::BI__builtin_neon_vld1q_x2_v:
+ case NEON::BI__builtin_neon_vld1_x3_v:
+ case NEON::BI__builtin_neon_vld1q_x3_v:
+ case NEON::BI__builtin_neon_vld1_x4_v:
+ case NEON::BI__builtin_neon_vld1q_x4_v: {
+ llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getVectorElementType());
+ Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
+ llvm::Type *Tys[2] = { VTy, PTy };
+ Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
+ Ops[1] = Builder.CreateCall(F, Ops[1], "vld1xN");
+ Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
+ }
case NEON::BI__builtin_neon_vld2_v:
case NEON::BI__builtin_neon_vld2q_v:
case NEON::BI__builtin_neon_vld3_v:
case NEON::BI__builtin_neon_vld3q_v:
case NEON::BI__builtin_neon_vld4_v:
- case NEON::BI__builtin_neon_vld4q_v: {
+ case NEON::BI__builtin_neon_vld4q_v:
+ case NEON::BI__builtin_neon_vld2_dup_v:
+ case NEON::BI__builtin_neon_vld2q_dup_v:
+ case NEON::BI__builtin_neon_vld3_dup_v:
+ case NEON::BI__builtin_neon_vld3q_dup_v:
+ case NEON::BI__builtin_neon_vld4_dup_v:
+ case NEON::BI__builtin_neon_vld4q_dup_v: {
llvm::Type *Tys[] = {Ty, Int8PtrTy};
Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
Value *Align = getAlignmentValue32(PtrOp1);
@@ -4552,7 +5098,10 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
case NEON::BI__builtin_neon_vrsqrteq_v:
Int = Ty->isFPOrFPVectorTy() ? LLVMIntrinsic : AltLLVMIntrinsic;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, NameHint);
-
+ case NEON::BI__builtin_neon_vrndi_v:
+ case NEON::BI__builtin_neon_vrndiq_v:
+ Int = Intrinsic::nearbyint;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, NameHint);
case NEON::BI__builtin_neon_vrshr_n_v:
case NEON::BI__builtin_neon_vrshrq_n_v:
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshr_n",
@@ -4603,6 +5152,23 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
Ops.push_back(getAlignmentValue32(PtrOp0));
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "");
}
+ case NEON::BI__builtin_neon_vst1_x2_v:
+ case NEON::BI__builtin_neon_vst1q_x2_v:
+ case NEON::BI__builtin_neon_vst1_x3_v:
+ case NEON::BI__builtin_neon_vst1q_x3_v:
+ case NEON::BI__builtin_neon_vst1_x4_v:
+ case NEON::BI__builtin_neon_vst1q_x4_v: {
+ llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getVectorElementType());
+ // TODO: Currently in AArch32 mode the pointer operand comes first, whereas
+ // in AArch64 it comes last. We may want to stick to one or another.
+ if (Arch == llvm::Triple::aarch64 || Arch == llvm::Triple::aarch64_be) {
+ llvm::Type *Tys[2] = { VTy, PTy };
+ std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
+ return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, "");
+ }
+ llvm::Type *Tys[2] = { PTy, VTy };
+ return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, "");
+ }
case NEON::BI__builtin_neon_vsubhn_v: {
llvm::VectorType *SrcTy =
llvm::VectorType::getExtendedElementVectorType(VTy);
@@ -4685,6 +5251,14 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
}
return SV;
}
+ case NEON::BI__builtin_neon_vdot_v:
+ case NEON::BI__builtin_neon_vdotq_v: {
+ llvm::Type *InputTy =
+ llvm::VectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
+ llvm::Type *Tys[2] = { Ty, InputTy };
+ Int = Usgn ? LLVMIntrinsic : AltLLVMIntrinsic;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vdot");
+ }
}
assert(Int && "Expected valid intrinsic number");
@@ -4893,6 +5467,34 @@ static bool HasExtraNeonArgument(unsigned BuiltinID) {
return true;
}
+Value *CodeGenFunction::EmitISOVolatileLoad(const CallExpr *E) {
+ Value *Ptr = EmitScalarExpr(E->getArg(0));
+ QualType ElTy = E->getArg(0)->getType()->getPointeeType();
+ CharUnits LoadSize = getContext().getTypeSizeInChars(ElTy);
+ llvm::Type *ITy = llvm::IntegerType::get(getLLVMContext(),
+ LoadSize.getQuantity() * 8);
+ Ptr = Builder.CreateBitCast(Ptr, ITy->getPointerTo());
+ llvm::LoadInst *Load =
+ Builder.CreateAlignedLoad(Ptr, LoadSize);
+ Load->setVolatile(true);
+ return Load;
+}
+
+Value *CodeGenFunction::EmitISOVolatileStore(const CallExpr *E) {
+ Value *Ptr = EmitScalarExpr(E->getArg(0));
+ Value *Value = EmitScalarExpr(E->getArg(1));
+ QualType ElTy = E->getArg(0)->getType()->getPointeeType();
+ CharUnits StoreSize = getContext().getTypeSizeInChars(ElTy);
+ llvm::Type *ITy = llvm::IntegerType::get(getLLVMContext(),
+ StoreSize.getQuantity() * 8);
+ Ptr = Builder.CreateBitCast(Ptr, ITy->getPointerTo());
+ llvm::StoreInst *Store =
+ Builder.CreateAlignedStore(Value, Ptr,
+ StoreSize);
+ Store->setVolatile(true);
+ return Store;
+}
+
Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
const CallExpr *E,
llvm::Triple::ArchType Arch) {
@@ -5135,35 +5737,13 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
case ARM::BI__iso_volatile_load8:
case ARM::BI__iso_volatile_load16:
case ARM::BI__iso_volatile_load32:
- case ARM::BI__iso_volatile_load64: {
- Value *Ptr = EmitScalarExpr(E->getArg(0));
- QualType ElTy = E->getArg(0)->getType()->getPointeeType();
- CharUnits LoadSize = getContext().getTypeSizeInChars(ElTy);
- llvm::Type *ITy = llvm::IntegerType::get(getLLVMContext(),
- LoadSize.getQuantity() * 8);
- Ptr = Builder.CreateBitCast(Ptr, ITy->getPointerTo());
- llvm::LoadInst *Load =
- Builder.CreateAlignedLoad(Ptr, LoadSize);
- Load->setVolatile(true);
- return Load;
- }
+ case ARM::BI__iso_volatile_load64:
+ return EmitISOVolatileLoad(E);
case ARM::BI__iso_volatile_store8:
case ARM::BI__iso_volatile_store16:
case ARM::BI__iso_volatile_store32:
- case ARM::BI__iso_volatile_store64: {
- Value *Ptr = EmitScalarExpr(E->getArg(0));
- Value *Value = EmitScalarExpr(E->getArg(1));
- QualType ElTy = E->getArg(0)->getType()->getPointeeType();
- CharUnits StoreSize = getContext().getTypeSizeInChars(ElTy);
- llvm::Type *ITy = llvm::IntegerType::get(getLLVMContext(),
- StoreSize.getQuantity() * 8);
- Ptr = Builder.CreateBitCast(Ptr, ITy->getPointerTo());
- llvm::StoreInst *Store =
- Builder.CreateAlignedStore(Value, Ptr,
- StoreSize);
- Store->setVolatile(true);
- return Store;
- }
+ case ARM::BI__iso_volatile_store64:
+ return EmitISOVolatileStore(E);
}
if (BuiltinID == ARM::BI__builtin_arm_clrex) {
@@ -5308,8 +5888,11 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vld4_lane_v:
case NEON::BI__builtin_neon_vld4q_lane_v:
case NEON::BI__builtin_neon_vld2_dup_v:
+ case NEON::BI__builtin_neon_vld2q_dup_v:
case NEON::BI__builtin_neon_vld3_dup_v:
+ case NEON::BI__builtin_neon_vld3q_dup_v:
case NEON::BI__builtin_neon_vld4_dup_v:
+ case NEON::BI__builtin_neon_vld4q_dup_v:
// Get the alignment for the argument in addition to the value;
// we'll use it later.
PtrOp1 = EmitPointerWithAlignment(E->getArg(1));
@@ -5345,6 +5928,12 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vgetq_lane_f32:
return Builder.CreateExtractElement(Ops[0], Ops[1], "vget_lane");
+ case NEON::BI__builtin_neon_vrndns_f32: {
+ Value *Arg = EmitScalarExpr(E->getArg(0));
+ llvm::Type *Tys[] = {Arg->getType()};
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vrintn, Tys);
+ return Builder.CreateCall(F, {Arg}, "vrndn"); }
+
case NEON::BI__builtin_neon_vset_lane_i8:
case NEON::BI__builtin_neon_vset_lane_i16:
case NEON::BI__builtin_neon_vset_lane_i32:
@@ -5434,7 +6023,8 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
bool usgn = Type.isUnsigned();
bool rightShift = false;
- llvm::VectorType *VTy = GetNeonType(this, Type, Arch);
+ llvm::VectorType *VTy = GetNeonType(this, Type,
+ getTarget().hasLegalHalfType());
llvm::Type *Ty = VTy;
if (!Ty)
return nullptr;
@@ -5479,68 +6069,6 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Value *Ld = Builder.CreateLoad(PtrOp0);
return Builder.CreateInsertElement(Ops[1], Ld, Ops[2], "vld1_lane");
}
- case NEON::BI__builtin_neon_vld2_dup_v:
- case NEON::BI__builtin_neon_vld3_dup_v:
- case NEON::BI__builtin_neon_vld4_dup_v: {
- // Handle 64-bit elements as a special-case. There is no "dup" needed.
- if (VTy->getElementType()->getPrimitiveSizeInBits() == 64) {
- switch (BuiltinID) {
- case NEON::BI__builtin_neon_vld2_dup_v:
- Int = Intrinsic::arm_neon_vld2;
- break;
- case NEON::BI__builtin_neon_vld3_dup_v:
- Int = Intrinsic::arm_neon_vld3;
- break;
- case NEON::BI__builtin_neon_vld4_dup_v:
- Int = Intrinsic::arm_neon_vld4;
- break;
- default: llvm_unreachable("unknown vld_dup intrinsic?");
- }
- llvm::Type *Tys[] = {Ty, Int8PtrTy};
- Function *F = CGM.getIntrinsic(Int, Tys);
- llvm::Value *Align = getAlignmentValue32(PtrOp1);
- Ops[1] = Builder.CreateCall(F, {Ops[1], Align}, "vld_dup");
- Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
- Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
- }
- switch (BuiltinID) {
- case NEON::BI__builtin_neon_vld2_dup_v:
- Int = Intrinsic::arm_neon_vld2lane;
- break;
- case NEON::BI__builtin_neon_vld3_dup_v:
- Int = Intrinsic::arm_neon_vld3lane;
- break;
- case NEON::BI__builtin_neon_vld4_dup_v:
- Int = Intrinsic::arm_neon_vld4lane;
- break;
- default: llvm_unreachable("unknown vld_dup intrinsic?");
- }
- llvm::Type *Tys[] = {Ty, Int8PtrTy};
- Function *F = CGM.getIntrinsic(Int, Tys);
- llvm::StructType *STy = cast<llvm::StructType>(F->getReturnType());
-
- SmallVector<Value*, 6> Args;
- Args.push_back(Ops[1]);
- Args.append(STy->getNumElements(), UndefValue::get(Ty));
-
- llvm::Constant *CI = ConstantInt::get(Int32Ty, 0);
- Args.push_back(CI);
- Args.push_back(getAlignmentValue32(PtrOp1));
-
- Ops[1] = Builder.CreateCall(F, Args, "vld_dup");
- // splat lane 0 to all elts in each vector of the result.
- for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
- Value *Val = Builder.CreateExtractValue(Ops[1], i);
- Value *Elt = Builder.CreateBitCast(Val, Ty);
- Elt = EmitNeonSplat(Elt, CI);
- Elt = Builder.CreateBitCast(Elt, Val->getType());
- Ops[1] = Builder.CreateInsertValue(Ops[1], Elt, i);
- }
- Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
- Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
- }
case NEON::BI__builtin_neon_vqrshrn_n_v:
Int =
usgn ? Intrinsic::arm_neon_vqrshiftnu : Intrinsic::arm_neon_vqrshiftns;
@@ -5680,7 +6208,7 @@ static Value *EmitAArch64TblBuiltinExpr(CodeGenFunction &CGF, unsigned BuiltinID
// Determine the type of this overloaded NEON intrinsic.
NeonTypeFlags Type(Result.getZExtValue());
- llvm::VectorType *Ty = GetNeonType(&CGF, Type, Arch);
+ llvm::VectorType *Ty = GetNeonType(&CGF, Type);
if (!Ty)
return nullptr;
@@ -5799,18 +6327,23 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
HintID = 0;
break;
case AArch64::BI__builtin_arm_yield:
+ case AArch64::BI__yield:
HintID = 1;
break;
case AArch64::BI__builtin_arm_wfe:
+ case AArch64::BI__wfe:
HintID = 2;
break;
case AArch64::BI__builtin_arm_wfi:
+ case AArch64::BI__wfi:
HintID = 3;
break;
case AArch64::BI__builtin_arm_sev:
+ case AArch64::BI__sev:
HintID = 4;
break;
case AArch64::BI__builtin_arm_sevl:
+ case AArch64::BI__sevl:
HintID = 5;
break;
}
@@ -6077,6 +6610,9 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
// Handle non-overloaded intrinsics first.
switch (BuiltinID) {
default: break;
+ case NEON::BI__builtin_neon_vabsh_f16:
+ Ops.push_back(EmitScalarExpr(E->getArg(0)));
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::fabs, HalfTy), Ops, "vabs");
case NEON::BI__builtin_neon_vldrq_p128: {
llvm::Type *Int128Ty = llvm::Type::getIntNTy(getLLVMContext(), 128);
llvm::Type *Int128PTy = llvm::PointerType::get(Int128Ty, 0);
@@ -6119,6 +6655,153 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
return Builder.CreateUIToFP(Ops[0], FTy);
return Builder.CreateSIToFP(Ops[0], FTy);
}
+ case NEON::BI__builtin_neon_vcvth_f16_u16:
+ case NEON::BI__builtin_neon_vcvth_f16_u32:
+ case NEON::BI__builtin_neon_vcvth_f16_u64:
+ usgn = true;
+ // FALL THROUGH
+ case NEON::BI__builtin_neon_vcvth_f16_s16:
+ case NEON::BI__builtin_neon_vcvth_f16_s32:
+ case NEON::BI__builtin_neon_vcvth_f16_s64: {
+ Ops.push_back(EmitScalarExpr(E->getArg(0)));
+ llvm::Type *FTy = HalfTy;
+ llvm::Type *InTy;
+ if (Ops[0]->getType()->getPrimitiveSizeInBits() == 64)
+ InTy = Int64Ty;
+ else if (Ops[0]->getType()->getPrimitiveSizeInBits() == 32)
+ InTy = Int32Ty;
+ else
+ InTy = Int16Ty;
+ Ops[0] = Builder.CreateBitCast(Ops[0], InTy);
+ if (usgn)
+ return Builder.CreateUIToFP(Ops[0], FTy);
+ return Builder.CreateSIToFP(Ops[0], FTy);
+ }
+ case NEON::BI__builtin_neon_vcvth_u16_f16:
+ usgn = true;
+ // FALL THROUGH
+ case NEON::BI__builtin_neon_vcvth_s16_f16: {
+ Ops.push_back(EmitScalarExpr(E->getArg(0)));
+ Ops[0] = Builder.CreateBitCast(Ops[0], HalfTy);
+ if (usgn)
+ return Builder.CreateFPToUI(Ops[0], Int16Ty);
+ return Builder.CreateFPToSI(Ops[0], Int16Ty);
+ }
+ case NEON::BI__builtin_neon_vcvth_u32_f16:
+ usgn = true;
+ // FALL THROUGH
+ case NEON::BI__builtin_neon_vcvth_s32_f16: {
+ Ops.push_back(EmitScalarExpr(E->getArg(0)));
+ Ops[0] = Builder.CreateBitCast(Ops[0], HalfTy);
+ if (usgn)
+ return Builder.CreateFPToUI(Ops[0], Int32Ty);
+ return Builder.CreateFPToSI(Ops[0], Int32Ty);
+ }
+ case NEON::BI__builtin_neon_vcvth_u64_f16:
+ usgn = true;
+ // FALL THROUGH
+ case NEON::BI__builtin_neon_vcvth_s64_f16: {
+ Ops.push_back(EmitScalarExpr(E->getArg(0)));
+ Ops[0] = Builder.CreateBitCast(Ops[0], HalfTy);
+ if (usgn)
+ return Builder.CreateFPToUI(Ops[0], Int64Ty);
+ return Builder.CreateFPToSI(Ops[0], Int64Ty);
+ }
+ case NEON::BI__builtin_neon_vcvtah_u16_f16:
+ case NEON::BI__builtin_neon_vcvtmh_u16_f16:
+ case NEON::BI__builtin_neon_vcvtnh_u16_f16:
+ case NEON::BI__builtin_neon_vcvtph_u16_f16:
+ case NEON::BI__builtin_neon_vcvtah_s16_f16:
+ case NEON::BI__builtin_neon_vcvtmh_s16_f16:
+ case NEON::BI__builtin_neon_vcvtnh_s16_f16:
+ case NEON::BI__builtin_neon_vcvtph_s16_f16: {
+ unsigned Int;
+ llvm::Type* InTy = Int32Ty;
+ llvm::Type* FTy = HalfTy;
+ llvm::Type *Tys[2] = {InTy, FTy};
+ Ops.push_back(EmitScalarExpr(E->getArg(0)));
+ switch (BuiltinID) {
+ default: llvm_unreachable("missing builtin ID in switch!");
+ case NEON::BI__builtin_neon_vcvtah_u16_f16:
+ Int = Intrinsic::aarch64_neon_fcvtau; break;
+ case NEON::BI__builtin_neon_vcvtmh_u16_f16:
+ Int = Intrinsic::aarch64_neon_fcvtmu; break;
+ case NEON::BI__builtin_neon_vcvtnh_u16_f16:
+ Int = Intrinsic::aarch64_neon_fcvtnu; break;
+ case NEON::BI__builtin_neon_vcvtph_u16_f16:
+ Int = Intrinsic::aarch64_neon_fcvtpu; break;
+ case NEON::BI__builtin_neon_vcvtah_s16_f16:
+ Int = Intrinsic::aarch64_neon_fcvtas; break;
+ case NEON::BI__builtin_neon_vcvtmh_s16_f16:
+ Int = Intrinsic::aarch64_neon_fcvtms; break;
+ case NEON::BI__builtin_neon_vcvtnh_s16_f16:
+ Int = Intrinsic::aarch64_neon_fcvtns; break;
+ case NEON::BI__builtin_neon_vcvtph_s16_f16:
+ Int = Intrinsic::aarch64_neon_fcvtps; break;
+ }
+ Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "fcvt");
+ return Builder.CreateTrunc(Ops[0], Int16Ty);
+ }
+ case NEON::BI__builtin_neon_vcaleh_f16:
+ case NEON::BI__builtin_neon_vcalth_f16:
+ case NEON::BI__builtin_neon_vcageh_f16:
+ case NEON::BI__builtin_neon_vcagth_f16: {
+ unsigned Int;
+ llvm::Type* InTy = Int32Ty;
+ llvm::Type* FTy = HalfTy;
+ llvm::Type *Tys[2] = {InTy, FTy};
+ Ops.push_back(EmitScalarExpr(E->getArg(1)));
+ switch (BuiltinID) {
+ default: llvm_unreachable("missing builtin ID in switch!");
+ case NEON::BI__builtin_neon_vcageh_f16:
+ Int = Intrinsic::aarch64_neon_facge; break;
+ case NEON::BI__builtin_neon_vcagth_f16:
+ Int = Intrinsic::aarch64_neon_facgt; break;
+ case NEON::BI__builtin_neon_vcaleh_f16:
+ Int = Intrinsic::aarch64_neon_facge; std::swap(Ops[0], Ops[1]); break;
+ case NEON::BI__builtin_neon_vcalth_f16:
+ Int = Intrinsic::aarch64_neon_facgt; std::swap(Ops[0], Ops[1]); break;
+ }
+ Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "facg");
+ return Builder.CreateTrunc(Ops[0], Int16Ty);
+ }
+ case NEON::BI__builtin_neon_vcvth_n_s16_f16:
+ case NEON::BI__builtin_neon_vcvth_n_u16_f16: {
+ unsigned Int;
+ llvm::Type* InTy = Int32Ty;
+ llvm::Type* FTy = HalfTy;
+ llvm::Type *Tys[2] = {InTy, FTy};
+ Ops.push_back(EmitScalarExpr(E->getArg(1)));
+ switch (BuiltinID) {
+ default: llvm_unreachable("missing builtin ID in switch!");
+ case NEON::BI__builtin_neon_vcvth_n_s16_f16:
+ Int = Intrinsic::aarch64_neon_vcvtfp2fxs; break;
+ case NEON::BI__builtin_neon_vcvth_n_u16_f16:
+ Int = Intrinsic::aarch64_neon_vcvtfp2fxu; break;
+ }
+ Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "fcvth_n");
+ return Builder.CreateTrunc(Ops[0], Int16Ty);
+ }
+ case NEON::BI__builtin_neon_vcvth_n_f16_s16:
+ case NEON::BI__builtin_neon_vcvth_n_f16_u16: {
+ unsigned Int;
+ llvm::Type* FTy = HalfTy;
+ llvm::Type* InTy = Int32Ty;
+ llvm::Type *Tys[2] = {FTy, InTy};
+ Ops.push_back(EmitScalarExpr(E->getArg(1)));
+ switch (BuiltinID) {
+ default: llvm_unreachable("missing builtin ID in switch!");
+ case NEON::BI__builtin_neon_vcvth_n_f16_s16:
+ Int = Intrinsic::aarch64_neon_vcvtfxs2fp;
+ Ops[0] = Builder.CreateSExt(Ops[0], InTy, "sext");
+ break;
+ case NEON::BI__builtin_neon_vcvth_n_f16_u16:
+ Int = Intrinsic::aarch64_neon_vcvtfxu2fp;
+ Ops[0] = Builder.CreateZExt(Ops[0], InTy);
+ break;
+ }
+ return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "fcvth_n");
+ }
case NEON::BI__builtin_neon_vpaddd_s64: {
llvm::Type *Ty = llvm::VectorType::get(Int64Ty, 2);
Value *Vec = EmitScalarExpr(E->getArg(0));
@@ -6160,6 +6843,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vceqzd_s64:
case NEON::BI__builtin_neon_vceqzd_f64:
case NEON::BI__builtin_neon_vceqzs_f32:
+ case NEON::BI__builtin_neon_vceqzh_f16:
Ops.push_back(EmitScalarExpr(E->getArg(0)));
return EmitAArch64CompareBuiltinExpr(
Ops[0], ConvertType(E->getCallReturnType(getContext())),
@@ -6167,6 +6851,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vcgezd_s64:
case NEON::BI__builtin_neon_vcgezd_f64:
case NEON::BI__builtin_neon_vcgezs_f32:
+ case NEON::BI__builtin_neon_vcgezh_f16:
Ops.push_back(EmitScalarExpr(E->getArg(0)));
return EmitAArch64CompareBuiltinExpr(
Ops[0], ConvertType(E->getCallReturnType(getContext())),
@@ -6174,6 +6859,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vclezd_s64:
case NEON::BI__builtin_neon_vclezd_f64:
case NEON::BI__builtin_neon_vclezs_f32:
+ case NEON::BI__builtin_neon_vclezh_f16:
Ops.push_back(EmitScalarExpr(E->getArg(0)));
return EmitAArch64CompareBuiltinExpr(
Ops[0], ConvertType(E->getCallReturnType(getContext())),
@@ -6181,6 +6867,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vcgtzd_s64:
case NEON::BI__builtin_neon_vcgtzd_f64:
case NEON::BI__builtin_neon_vcgtzs_f32:
+ case NEON::BI__builtin_neon_vcgtzh_f16:
Ops.push_back(EmitScalarExpr(E->getArg(0)));
return EmitAArch64CompareBuiltinExpr(
Ops[0], ConvertType(E->getCallReturnType(getContext())),
@@ -6188,6 +6875,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vcltzd_s64:
case NEON::BI__builtin_neon_vcltzd_f64:
case NEON::BI__builtin_neon_vcltzs_f32:
+ case NEON::BI__builtin_neon_vcltzh_f16:
Ops.push_back(EmitScalarExpr(E->getArg(0)));
return EmitAArch64CompareBuiltinExpr(
Ops[0], ConvertType(E->getCallReturnType(getContext())),
@@ -6240,6 +6928,26 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]);
return Builder.CreateSExt(Ops[0], Int32Ty, "vcmpd");
}
+ case NEON::BI__builtin_neon_vceqh_f16:
+ case NEON::BI__builtin_neon_vcleh_f16:
+ case NEON::BI__builtin_neon_vclth_f16:
+ case NEON::BI__builtin_neon_vcgeh_f16:
+ case NEON::BI__builtin_neon_vcgth_f16: {
+ llvm::CmpInst::Predicate P;
+ switch (BuiltinID) {
+ default: llvm_unreachable("missing builtin ID in switch!");
+ case NEON::BI__builtin_neon_vceqh_f16: P = llvm::FCmpInst::FCMP_OEQ; break;
+ case NEON::BI__builtin_neon_vcleh_f16: P = llvm::FCmpInst::FCMP_OLE; break;
+ case NEON::BI__builtin_neon_vclth_f16: P = llvm::FCmpInst::FCMP_OLT; break;
+ case NEON::BI__builtin_neon_vcgeh_f16: P = llvm::FCmpInst::FCMP_OGE; break;
+ case NEON::BI__builtin_neon_vcgth_f16: P = llvm::FCmpInst::FCMP_OGT; break;
+ }
+ Ops.push_back(EmitScalarExpr(E->getArg(1)));
+ Ops[0] = Builder.CreateBitCast(Ops[0], HalfTy);
+ Ops[1] = Builder.CreateBitCast(Ops[1], HalfTy);
+ Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]);
+ return Builder.CreateSExt(Ops[0], Int16Ty, "vcmpd");
+ }
case NEON::BI__builtin_neon_vceqd_s64:
case NEON::BI__builtin_neon_vceqd_u64:
case NEON::BI__builtin_neon_vcgtd_s64:
@@ -6377,6 +7085,31 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
llvm::VectorType::get(DoubleTy, 2));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vgetq_lane");
+ case NEON::BI__builtin_neon_vaddh_f16:
+ Ops.push_back(EmitScalarExpr(E->getArg(1)));
+ return Builder.CreateFAdd(Ops[0], Ops[1], "vaddh");
+ case NEON::BI__builtin_neon_vsubh_f16:
+ Ops.push_back(EmitScalarExpr(E->getArg(1)));
+ return Builder.CreateFSub(Ops[0], Ops[1], "vsubh");
+ case NEON::BI__builtin_neon_vmulh_f16:
+ Ops.push_back(EmitScalarExpr(E->getArg(1)));
+ return Builder.CreateFMul(Ops[0], Ops[1], "vmulh");
+ case NEON::BI__builtin_neon_vdivh_f16:
+ Ops.push_back(EmitScalarExpr(E->getArg(1)));
+ return Builder.CreateFDiv(Ops[0], Ops[1], "vdivh");
+ case NEON::BI__builtin_neon_vfmah_f16: {
+ Value *F = CGM.getIntrinsic(Intrinsic::fma, HalfTy);
+ // NEON intrinsic puts accumulator first, unlike the LLVM fma.
+ return Builder.CreateCall(F,
+ {EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2)), Ops[0]});
+ }
+ case NEON::BI__builtin_neon_vfmsh_f16: {
+ Value *F = CGM.getIntrinsic(Intrinsic::fma, HalfTy);
+ Value *Zero = llvm::ConstantFP::getZeroValueForNegation(HalfTy);
+ Value* Sub = Builder.CreateFSub(Zero, EmitScalarExpr(E->getArg(1)), "vsubh");
+ // NEON intrinsic puts accumulator first, unlike the LLVM fma.
+ return Builder.CreateCall(F, {Sub, EmitScalarExpr(E->getArg(2)), Ops[0]});
+ }
case NEON::BI__builtin_neon_vaddd_s64:
case NEON::BI__builtin_neon_vaddd_u64:
return Builder.CreateAdd(Ops[0], EmitScalarExpr(E->getArg(1)), "vaddd");
@@ -6534,7 +7267,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
}
}
- llvm::VectorType *VTy = GetNeonType(this, Type, Arch);
+ llvm::VectorType *VTy = GetNeonType(this, Type);
llvm::Type *Ty = VTy;
if (!Ty)
return nullptr;
@@ -6599,7 +7332,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
Ops[1] = Builder.CreateBitCast(Ops[1], DoubleTy);
llvm::Type *VTy = GetNeonType(this,
- NeonTypeFlags(NeonTypeFlags::Float64, false, true), Arch);
+ NeonTypeFlags(NeonTypeFlags::Float64, false, true));
Ops[2] = Builder.CreateBitCast(Ops[2], VTy);
Ops[2] = Builder.CreateExtractElement(Ops[2], Ops[3], "extract");
Value *F = CGM.getIntrinsic(Intrinsic::fma, DoubleTy);
@@ -6651,12 +7384,22 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Int = usgn ? Intrinsic::aarch64_neon_umax : Intrinsic::aarch64_neon_smax;
if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmax;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmax");
+ case NEON::BI__builtin_neon_vmaxh_f16: {
+ Ops.push_back(EmitScalarExpr(E->getArg(1)));
+ Int = Intrinsic::aarch64_neon_fmax;
+ return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmax");
+ }
case NEON::BI__builtin_neon_vmin_v:
case NEON::BI__builtin_neon_vminq_v:
// FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
Int = usgn ? Intrinsic::aarch64_neon_umin : Intrinsic::aarch64_neon_smin;
if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmin;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmin");
+ case NEON::BI__builtin_neon_vminh_f16: {
+ Ops.push_back(EmitScalarExpr(E->getArg(1)));
+ Int = Intrinsic::aarch64_neon_fmin;
+ return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmin");
+ }
case NEON::BI__builtin_neon_vabd_v:
case NEON::BI__builtin_neon_vabdq_v:
// FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
@@ -6695,20 +7438,31 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vminnmq_v:
Int = Intrinsic::aarch64_neon_fminnm;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vminnm");
+ case NEON::BI__builtin_neon_vminnmh_f16:
+ Ops.push_back(EmitScalarExpr(E->getArg(1)));
+ Int = Intrinsic::aarch64_neon_fminnm;
+ return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vminnm");
case NEON::BI__builtin_neon_vmaxnm_v:
case NEON::BI__builtin_neon_vmaxnmq_v:
Int = Intrinsic::aarch64_neon_fmaxnm;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmaxnm");
+ case NEON::BI__builtin_neon_vmaxnmh_f16:
+ Ops.push_back(EmitScalarExpr(E->getArg(1)));
+ Int = Intrinsic::aarch64_neon_fmaxnm;
+ return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmaxnm");
case NEON::BI__builtin_neon_vrecpss_f32: {
Ops.push_back(EmitScalarExpr(E->getArg(1)));
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, FloatTy),
Ops, "vrecps");
}
- case NEON::BI__builtin_neon_vrecpsd_f64: {
+ case NEON::BI__builtin_neon_vrecpsd_f64:
Ops.push_back(EmitScalarExpr(E->getArg(1)));
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, DoubleTy),
Ops, "vrecps");
- }
+ case NEON::BI__builtin_neon_vrecpsh_f16:
+ Ops.push_back(EmitScalarExpr(E->getArg(1)));
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, HalfTy),
+ Ops, "vrecps");
case NEON::BI__builtin_neon_vqshrun_n_v:
Int = Intrinsic::aarch64_neon_sqshrun;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrun_n");
@@ -6724,72 +7478,87 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vqrshrn_n_v:
Int = usgn ? Intrinsic::aarch64_neon_uqrshrn : Intrinsic::aarch64_neon_sqrshrn;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n");
+ case NEON::BI__builtin_neon_vrndah_f16: {
+ Ops.push_back(EmitScalarExpr(E->getArg(0)));
+ Int = Intrinsic::round;
+ return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrnda");
+ }
case NEON::BI__builtin_neon_vrnda_v:
case NEON::BI__builtin_neon_vrndaq_v: {
Int = Intrinsic::round;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnda");
}
- case NEON::BI__builtin_neon_vrndi_v:
- case NEON::BI__builtin_neon_vrndiq_v: {
+ case NEON::BI__builtin_neon_vrndih_f16: {
+ Ops.push_back(EmitScalarExpr(E->getArg(0)));
Int = Intrinsic::nearbyint;
- return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndi");
+ return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndi");
+ }
+ case NEON::BI__builtin_neon_vrndmh_f16: {
+ Ops.push_back(EmitScalarExpr(E->getArg(0)));
+ Int = Intrinsic::floor;
+ return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndm");
}
case NEON::BI__builtin_neon_vrndm_v:
case NEON::BI__builtin_neon_vrndmq_v: {
Int = Intrinsic::floor;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndm");
}
+ case NEON::BI__builtin_neon_vrndnh_f16: {
+ Ops.push_back(EmitScalarExpr(E->getArg(0)));
+ Int = Intrinsic::aarch64_neon_frintn;
+ return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndn");
+ }
case NEON::BI__builtin_neon_vrndn_v:
case NEON::BI__builtin_neon_vrndnq_v: {
Int = Intrinsic::aarch64_neon_frintn;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndn");
}
+ case NEON::BI__builtin_neon_vrndns_f32: {
+ Ops.push_back(EmitScalarExpr(E->getArg(0)));
+ Int = Intrinsic::aarch64_neon_frintn;
+ return EmitNeonCall(CGM.getIntrinsic(Int, FloatTy), Ops, "vrndn");
+ }
+ case NEON::BI__builtin_neon_vrndph_f16: {
+ Ops.push_back(EmitScalarExpr(E->getArg(0)));
+ Int = Intrinsic::ceil;
+ return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndp");
+ }
case NEON::BI__builtin_neon_vrndp_v:
case NEON::BI__builtin_neon_vrndpq_v: {
Int = Intrinsic::ceil;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndp");
}
+ case NEON::BI__builtin_neon_vrndxh_f16: {
+ Ops.push_back(EmitScalarExpr(E->getArg(0)));
+ Int = Intrinsic::rint;
+ return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndx");
+ }
case NEON::BI__builtin_neon_vrndx_v:
case NEON::BI__builtin_neon_vrndxq_v: {
Int = Intrinsic::rint;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndx");
}
+ case NEON::BI__builtin_neon_vrndh_f16: {
+ Ops.push_back(EmitScalarExpr(E->getArg(0)));
+ Int = Intrinsic::trunc;
+ return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndz");
+ }
case NEON::BI__builtin_neon_vrnd_v:
case NEON::BI__builtin_neon_vrndq_v: {
Int = Intrinsic::trunc;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndz");
}
- case NEON::BI__builtin_neon_vceqz_v:
- case NEON::BI__builtin_neon_vceqzq_v:
- return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OEQ,
- ICmpInst::ICMP_EQ, "vceqz");
- case NEON::BI__builtin_neon_vcgez_v:
- case NEON::BI__builtin_neon_vcgezq_v:
- return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OGE,
- ICmpInst::ICMP_SGE, "vcgez");
- case NEON::BI__builtin_neon_vclez_v:
- case NEON::BI__builtin_neon_vclezq_v:
- return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OLE,
- ICmpInst::ICMP_SLE, "vclez");
- case NEON::BI__builtin_neon_vcgtz_v:
- case NEON::BI__builtin_neon_vcgtzq_v:
- return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OGT,
- ICmpInst::ICMP_SGT, "vcgtz");
- case NEON::BI__builtin_neon_vcltz_v:
- case NEON::BI__builtin_neon_vcltzq_v:
- return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OLT,
- ICmpInst::ICMP_SLT, "vcltz");
case NEON::BI__builtin_neon_vcvt_f64_v:
case NEON::BI__builtin_neon_vcvtq_f64_v:
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, quad), Arch);
+ Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, quad));
return usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
: Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
case NEON::BI__builtin_neon_vcvt_f64_f32: {
assert(Type.getEltType() == NeonTypeFlags::Float64 && quad &&
"unexpected vcvt_f64_f32 builtin");
NeonTypeFlags SrcFlag = NeonTypeFlags(NeonTypeFlags::Float32, false, false);
- Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(this, SrcFlag, Arch));
+ Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(this, SrcFlag));
return Builder.CreateFPExt(Ops[0], Ty, "vcvt");
}
@@ -6797,7 +7566,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
assert(Type.getEltType() == NeonTypeFlags::Float32 &&
"unexpected vcvt_f32_f64 builtin");
NeonTypeFlags SrcFlag = NeonTypeFlags(NeonTypeFlags::Float64, false, true);
- Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(this, SrcFlag, Arch));
+ Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(this, SrcFlag));
return Builder.CreateFPTrunc(Ops[0], Ty, "vcvt");
}
@@ -6805,20 +7574,21 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vcvt_u32_v:
case NEON::BI__builtin_neon_vcvt_s64_v:
case NEON::BI__builtin_neon_vcvt_u64_v:
- case NEON::BI__builtin_neon_vcvt_s16_v:
- case NEON::BI__builtin_neon_vcvt_u16_v:
+ case NEON::BI__builtin_neon_vcvt_s16_v:
+ case NEON::BI__builtin_neon_vcvt_u16_v:
case NEON::BI__builtin_neon_vcvtq_s32_v:
case NEON::BI__builtin_neon_vcvtq_u32_v:
case NEON::BI__builtin_neon_vcvtq_s64_v:
case NEON::BI__builtin_neon_vcvtq_u64_v:
- case NEON::BI__builtin_neon_vcvtq_s16_v:
- case NEON::BI__builtin_neon_vcvtq_u16_v: {
+ case NEON::BI__builtin_neon_vcvtq_s16_v:
+ case NEON::BI__builtin_neon_vcvtq_u16_v: {
Ops[0] = Builder.CreateBitCast(Ops[0], GetFloatNeonType(this, Type));
if (usgn)
return Builder.CreateFPToUI(Ops[0], Ty);
return Builder.CreateFPToSI(Ops[0], Ty);
}
case NEON::BI__builtin_neon_vcvta_s16_v:
+ case NEON::BI__builtin_neon_vcvta_u16_v:
case NEON::BI__builtin_neon_vcvta_s32_v:
case NEON::BI__builtin_neon_vcvtaq_s16_v:
case NEON::BI__builtin_neon_vcvtaq_s32_v:
@@ -6886,6 +7656,16 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Int = Intrinsic::aarch64_neon_fmulx;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmulx");
}
+ case NEON::BI__builtin_neon_vmulxh_lane_f16:
+ case NEON::BI__builtin_neon_vmulxh_laneq_f16: {
+ // vmulx_lane should be mapped to Neon scalar mulx after
+ // extracting the scalar element
+ Ops.push_back(EmitScalarExpr(E->getArg(2)));
+ Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2], "extract");
+ Ops.pop_back();
+ Int = Intrinsic::aarch64_neon_fmulx;
+ return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmulx");
+ }
case NEON::BI__builtin_neon_vmul_lane_v:
case NEON::BI__builtin_neon_vmul_laneq_v: {
// v1f64 vmul_lane should be mapped to Neon scalar mul lane
@@ -6894,7 +7674,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Quad = true;
Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
llvm::Type *VTy = GetNeonType(this,
- NeonTypeFlags(NeonTypeFlags::Float64, false, Quad), Arch);
+ NeonTypeFlags(NeonTypeFlags::Float64, false, Quad));
Ops[1] = Builder.CreateBitCast(Ops[1], VTy);
Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2], "extract");
Value *Result = Builder.CreateFMul(Ops[0], Ops[1]);
@@ -6902,6 +7682,8 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
}
case NEON::BI__builtin_neon_vnegd_s64:
return Builder.CreateNeg(EmitScalarExpr(E->getArg(0)), "vnegd");
+ case NEON::BI__builtin_neon_vnegh_f16:
+ return Builder.CreateFNeg(EmitScalarExpr(E->getArg(0)), "vnegh");
case NEON::BI__builtin_neon_vpmaxnm_v:
case NEON::BI__builtin_neon_vpmaxnmq_v: {
Int = Intrinsic::aarch64_neon_fmaxnmp;
@@ -6912,6 +7694,11 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Int = Intrinsic::aarch64_neon_fminnmp;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpminnm");
}
+ case NEON::BI__builtin_neon_vsqrth_f16: {
+ Ops.push_back(EmitScalarExpr(E->getArg(0)));
+ Int = Intrinsic::sqrt;
+ return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vsqrt");
+ }
case NEON::BI__builtin_neon_vsqrt_v:
case NEON::BI__builtin_neon_vsqrtq_v: {
Int = Intrinsic::sqrt;
@@ -7289,64 +8076,6 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Ops[0] = Builder.CreateBitCast(Ops[0], VTy);
return Builder.CreateAdd(Ops[0], tmp);
}
- // FIXME: Sharing loads & stores with 32-bit is complicated by the absence
- // of an Align parameter here.
- case NEON::BI__builtin_neon_vld1_x2_v:
- case NEON::BI__builtin_neon_vld1q_x2_v:
- case NEON::BI__builtin_neon_vld1_x3_v:
- case NEON::BI__builtin_neon_vld1q_x3_v:
- case NEON::BI__builtin_neon_vld1_x4_v:
- case NEON::BI__builtin_neon_vld1q_x4_v: {
- llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getVectorElementType());
- Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
- llvm::Type *Tys[2] = { VTy, PTy };
- unsigned Int;
- switch (BuiltinID) {
- case NEON::BI__builtin_neon_vld1_x2_v:
- case NEON::BI__builtin_neon_vld1q_x2_v:
- Int = Intrinsic::aarch64_neon_ld1x2;
- break;
- case NEON::BI__builtin_neon_vld1_x3_v:
- case NEON::BI__builtin_neon_vld1q_x3_v:
- Int = Intrinsic::aarch64_neon_ld1x3;
- break;
- case NEON::BI__builtin_neon_vld1_x4_v:
- case NEON::BI__builtin_neon_vld1q_x4_v:
- Int = Intrinsic::aarch64_neon_ld1x4;
- break;
- }
- Function *F = CGM.getIntrinsic(Int, Tys);
- Ops[1] = Builder.CreateCall(F, Ops[1], "vld1xN");
- Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
- Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
- }
- case NEON::BI__builtin_neon_vst1_x2_v:
- case NEON::BI__builtin_neon_vst1q_x2_v:
- case NEON::BI__builtin_neon_vst1_x3_v:
- case NEON::BI__builtin_neon_vst1q_x3_v:
- case NEON::BI__builtin_neon_vst1_x4_v:
- case NEON::BI__builtin_neon_vst1q_x4_v: {
- llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getVectorElementType());
- llvm::Type *Tys[2] = { VTy, PTy };
- unsigned Int;
- switch (BuiltinID) {
- case NEON::BI__builtin_neon_vst1_x2_v:
- case NEON::BI__builtin_neon_vst1q_x2_v:
- Int = Intrinsic::aarch64_neon_st1x2;
- break;
- case NEON::BI__builtin_neon_vst1_x3_v:
- case NEON::BI__builtin_neon_vst1q_x3_v:
- Int = Intrinsic::aarch64_neon_st1x3;
- break;
- case NEON::BI__builtin_neon_vst1_x4_v:
- case NEON::BI__builtin_neon_vst1q_x4_v:
- Int = Intrinsic::aarch64_neon_st1x4;
- break;
- }
- std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
- return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "");
- }
case NEON::BI__builtin_neon_vld1_v:
case NEON::BI__builtin_neon_vld1q_v: {
Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(VTy));
@@ -7653,6 +8382,38 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Int = Intrinsic::aarch64_neon_suqadd;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vuqadd");
}
+ case AArch64::BI__iso_volatile_load8:
+ case AArch64::BI__iso_volatile_load16:
+ case AArch64::BI__iso_volatile_load32:
+ case AArch64::BI__iso_volatile_load64:
+ return EmitISOVolatileLoad(E);
+ case AArch64::BI__iso_volatile_store8:
+ case AArch64::BI__iso_volatile_store16:
+ case AArch64::BI__iso_volatile_store32:
+ case AArch64::BI__iso_volatile_store64:
+ return EmitISOVolatileStore(E);
+ case AArch64::BI_BitScanForward:
+ case AArch64::BI_BitScanForward64:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanForward, E);
+ case AArch64::BI_BitScanReverse:
+ case AArch64::BI_BitScanReverse64:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanReverse, E);
+ case AArch64::BI_InterlockedAnd64:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd, E);
+ case AArch64::BI_InterlockedExchange64:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange, E);
+ case AArch64::BI_InterlockedExchangeAdd64:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd, E);
+ case AArch64::BI_InterlockedExchangeSub64:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeSub, E);
+ case AArch64::BI_InterlockedOr64:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr, E);
+ case AArch64::BI_InterlockedXor64:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor, E);
+ case AArch64::BI_InterlockedDecrement64:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement, E);
+ case AArch64::BI_InterlockedIncrement64:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement, E);
}
}
@@ -7704,42 +8465,66 @@ static Value *getMaskVecValue(CodeGenFunction &CGF, Value *Mask,
}
static Value *EmitX86MaskedStore(CodeGenFunction &CGF,
- SmallVectorImpl<Value *> &Ops,
+ ArrayRef<Value *> Ops,
unsigned Align) {
// Cast the pointer to right type.
- Ops[0] = CGF.Builder.CreateBitCast(Ops[0],
+ Value *Ptr = CGF.Builder.CreateBitCast(Ops[0],
llvm::PointerType::getUnqual(Ops[1]->getType()));
- // If the mask is all ones just emit a regular store.
- if (const auto *C = dyn_cast<Constant>(Ops[2]))
- if (C->isAllOnesValue())
- return CGF.Builder.CreateAlignedStore(Ops[1], Ops[0], Align);
-
Value *MaskVec = getMaskVecValue(CGF, Ops[2],
Ops[1]->getType()->getVectorNumElements());
- return CGF.Builder.CreateMaskedStore(Ops[1], Ops[0], Align, MaskVec);
+ return CGF.Builder.CreateMaskedStore(Ops[1], Ptr, Align, MaskVec);
}
static Value *EmitX86MaskedLoad(CodeGenFunction &CGF,
- SmallVectorImpl<Value *> &Ops, unsigned Align) {
+ ArrayRef<Value *> Ops, unsigned Align) {
// Cast the pointer to right type.
- Ops[0] = CGF.Builder.CreateBitCast(Ops[0],
+ Value *Ptr = CGF.Builder.CreateBitCast(Ops[0],
llvm::PointerType::getUnqual(Ops[1]->getType()));
- // If the mask is all ones just emit a regular store.
- if (const auto *C = dyn_cast<Constant>(Ops[2]))
- if (C->isAllOnesValue())
- return CGF.Builder.CreateAlignedLoad(Ops[0], Align);
-
Value *MaskVec = getMaskVecValue(CGF, Ops[2],
Ops[1]->getType()->getVectorNumElements());
- return CGF.Builder.CreateMaskedLoad(Ops[0], Align, MaskVec, Ops[1]);
+ return CGF.Builder.CreateMaskedLoad(Ptr, Align, MaskVec, Ops[1]);
+}
+
+static Value *EmitX86ExpandLoad(CodeGenFunction &CGF,
+ ArrayRef<Value *> Ops) {
+ llvm::Type *ResultTy = Ops[1]->getType();
+ llvm::Type *PtrTy = ResultTy->getVectorElementType();
+
+ // Cast the pointer to element type.
+ Value *Ptr = CGF.Builder.CreateBitCast(Ops[0],
+ llvm::PointerType::getUnqual(PtrTy));
+
+ Value *MaskVec = getMaskVecValue(CGF, Ops[2],
+ ResultTy->getVectorNumElements());
+
+ llvm::Function *F = CGF.CGM.getIntrinsic(Intrinsic::masked_expandload,
+ ResultTy);
+ return CGF.Builder.CreateCall(F, { Ptr, MaskVec, Ops[1] });
+}
+
+static Value *EmitX86CompressStore(CodeGenFunction &CGF,
+ ArrayRef<Value *> Ops) {
+ llvm::Type *ResultTy = Ops[1]->getType();
+ llvm::Type *PtrTy = ResultTy->getVectorElementType();
+
+ // Cast the pointer to element type.
+ Value *Ptr = CGF.Builder.CreateBitCast(Ops[0],
+ llvm::PointerType::getUnqual(PtrTy));
+
+ Value *MaskVec = getMaskVecValue(CGF, Ops[2],
+ ResultTy->getVectorNumElements());
+
+ llvm::Function *F = CGF.CGM.getIntrinsic(Intrinsic::masked_compressstore,
+ ResultTy);
+ return CGF.Builder.CreateCall(F, { Ops[1], Ptr, MaskVec });
}
static Value *EmitX86MaskLogic(CodeGenFunction &CGF, Instruction::BinaryOps Opc,
- unsigned NumElts, SmallVectorImpl<Value *> &Ops,
+ unsigned NumElts, ArrayRef<Value *> Ops,
bool InvertLHS = false) {
Value *LHS = getMaskVecValue(CGF, Ops[0], NumElts);
Value *RHS = getMaskVecValue(CGF, Ops[1], NumElts);
@@ -7751,26 +8536,6 @@ static Value *EmitX86MaskLogic(CodeGenFunction &CGF, Instruction::BinaryOps Opc,
CGF.Builder.getIntNTy(std::max(NumElts, 8U)));
}
-static Value *EmitX86SubVectorBroadcast(CodeGenFunction &CGF,
- SmallVectorImpl<Value *> &Ops,
- llvm::Type *DstTy,
- unsigned SrcSizeInBits,
- unsigned Align) {
- // Load the subvector.
- Ops[0] = CGF.Builder.CreateAlignedLoad(Ops[0], Align);
-
- // Create broadcast mask.
- unsigned NumDstElts = DstTy->getVectorNumElements();
- unsigned NumSrcElts = SrcSizeInBits / DstTy->getScalarSizeInBits();
-
- SmallVector<uint32_t, 8> Mask;
- for (unsigned i = 0; i != NumDstElts; i += NumSrcElts)
- for (unsigned j = 0; j != NumSrcElts; ++j)
- Mask.push_back(j);
-
- return CGF.Builder.CreateShuffleVector(Ops[0], Ops[0], Mask, "subvecbcst");
-}
-
static Value *EmitX86Select(CodeGenFunction &CGF,
Value *Mask, Value *Op0, Value *Op1) {
@@ -7784,8 +8549,48 @@ static Value *EmitX86Select(CodeGenFunction &CGF,
return CGF.Builder.CreateSelect(Mask, Op0, Op1);
}
+static Value *EmitX86ScalarSelect(CodeGenFunction &CGF,
+ Value *Mask, Value *Op0, Value *Op1) {
+ // If the mask is all ones just return first argument.
+ if (const auto *C = dyn_cast<Constant>(Mask))
+ if (C->isAllOnesValue())
+ return Op0;
+
+ llvm::VectorType *MaskTy =
+ llvm::VectorType::get(CGF.Builder.getInt1Ty(),
+ Mask->getType()->getIntegerBitWidth());
+ Mask = CGF.Builder.CreateBitCast(Mask, MaskTy);
+ Mask = CGF.Builder.CreateExtractElement(Mask, (uint64_t)0);
+ return CGF.Builder.CreateSelect(Mask, Op0, Op1);
+}
+
+static Value *EmitX86MaskedCompareResult(CodeGenFunction &CGF, Value *Cmp,
+ unsigned NumElts, Value *MaskIn) {
+ if (MaskIn) {
+ const auto *C = dyn_cast<Constant>(MaskIn);
+ if (!C || !C->isAllOnesValue())
+ Cmp = CGF.Builder.CreateAnd(Cmp, getMaskVecValue(CGF, MaskIn, NumElts));
+ }
+
+ if (NumElts < 8) {
+ uint32_t Indices[8];
+ for (unsigned i = 0; i != NumElts; ++i)
+ Indices[i] = i;
+ for (unsigned i = NumElts; i != 8; ++i)
+ Indices[i] = i % NumElts + NumElts;
+ Cmp = CGF.Builder.CreateShuffleVector(
+ Cmp, llvm::Constant::getNullValue(Cmp->getType()), Indices);
+ }
+
+ return CGF.Builder.CreateBitCast(Cmp,
+ IntegerType::get(CGF.getLLVMContext(),
+ std::max(NumElts, 8U)));
+}
+
static Value *EmitX86MaskedCompare(CodeGenFunction &CGF, unsigned CC,
- bool Signed, SmallVectorImpl<Value *> &Ops) {
+ bool Signed, ArrayRef<Value *> Ops) {
+ assert((Ops.size() == 2 || Ops.size() == 4) &&
+ "Unexpected number of arguments");
unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
Value *Cmp;
@@ -7809,22 +8614,16 @@ static Value *EmitX86MaskedCompare(CodeGenFunction &CGF, unsigned CC,
Cmp = CGF.Builder.CreateICmp(Pred, Ops[0], Ops[1]);
}
- const auto *C = dyn_cast<Constant>(Ops.back());
- if (!C || !C->isAllOnesValue())
- Cmp = CGF.Builder.CreateAnd(Cmp, getMaskVecValue(CGF, Ops.back(), NumElts));
+ Value *MaskIn = nullptr;
+ if (Ops.size() == 4)
+ MaskIn = Ops[3];
- if (NumElts < 8) {
- uint32_t Indices[8];
- for (unsigned i = 0; i != NumElts; ++i)
- Indices[i] = i;
- for (unsigned i = NumElts; i != 8; ++i)
- Indices[i] = i % NumElts + NumElts;
- Cmp = CGF.Builder.CreateShuffleVector(
- Cmp, llvm::Constant::getNullValue(Cmp->getType()), Indices);
- }
- return CGF.Builder.CreateBitCast(Cmp,
- IntegerType::get(CGF.getLLVMContext(),
- std::max(NumElts, 8U)));
+ return EmitX86MaskedCompareResult(CGF, Cmp, NumElts, MaskIn);
+}
+
+static Value *EmitX86ConvertToMask(CodeGenFunction &CGF, Value *In) {
+ Value *Zero = Constant::getNullValue(In->getType());
+ return EmitX86MaskedCompare(CGF, 1, true, { In, Zero });
}
static Value *EmitX86Abs(CodeGenFunction &CGF, ArrayRef<Value *> Ops) {
@@ -7834,9 +8633,7 @@ static Value *EmitX86Abs(CodeGenFunction &CGF, ArrayRef<Value *> Ops) {
Value *Sub = CGF.Builder.CreateSub(Zero, Ops[0]);
Value *Cmp = CGF.Builder.CreateICmp(ICmpInst::ICMP_SGT, Ops[0], Zero);
Value *Res = CGF.Builder.CreateSelect(Cmp, Ops[0], Sub);
- if (Ops.size() == 1)
- return Res;
- return EmitX86Select(CGF, Ops[2], Res, Ops[1]);
+ return Res;
}
static Value *EmitX86MinMax(CodeGenFunction &CGF, ICmpInst::Predicate Pred,
@@ -7844,11 +8641,211 @@ static Value *EmitX86MinMax(CodeGenFunction &CGF, ICmpInst::Predicate Pred,
Value *Cmp = CGF.Builder.CreateICmp(Pred, Ops[0], Ops[1]);
Value *Res = CGF.Builder.CreateSelect(Cmp, Ops[0], Ops[1]);
- if (Ops.size() == 2)
- return Res;
+ assert(Ops.size() == 2);
+ return Res;
+}
+
+// Lowers X86 FMA intrinsics to IR.
+static Value *EmitX86FMAExpr(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
+ unsigned BuiltinID, bool IsAddSub) {
- assert(Ops.size() == 4);
- return EmitX86Select(CGF, Ops[3], Res, Ops[2]);
+ bool Subtract = false;
+ Intrinsic::ID IID = Intrinsic::not_intrinsic;
+ switch (BuiltinID) {
+ default: break;
+ case clang::X86::BI__builtin_ia32_vfmsubps512_mask3:
+ Subtract = true;
+ LLVM_FALLTHROUGH;
+ case clang::X86::BI__builtin_ia32_vfmaddps512_mask:
+ case clang::X86::BI__builtin_ia32_vfmaddps512_maskz:
+ case clang::X86::BI__builtin_ia32_vfmaddps512_mask3:
+ IID = llvm::Intrinsic::x86_avx512_vfmadd_ps_512; break;
+ case clang::X86::BI__builtin_ia32_vfmsubpd512_mask3:
+ Subtract = true;
+ LLVM_FALLTHROUGH;
+ case clang::X86::BI__builtin_ia32_vfmaddpd512_mask:
+ case clang::X86::BI__builtin_ia32_vfmaddpd512_maskz:
+ case clang::X86::BI__builtin_ia32_vfmaddpd512_mask3:
+ IID = llvm::Intrinsic::x86_avx512_vfmadd_pd_512; break;
+ case clang::X86::BI__builtin_ia32_vfmsubaddps512_mask3:
+ Subtract = true;
+ LLVM_FALLTHROUGH;
+ case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask:
+ case clang::X86::BI__builtin_ia32_vfmaddsubps512_maskz:
+ case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask3:
+ IID = llvm::Intrinsic::x86_avx512_vfmaddsub_ps_512;
+ break;
+ case clang::X86::BI__builtin_ia32_vfmsubaddpd512_mask3:
+ Subtract = true;
+ LLVM_FALLTHROUGH;
+ case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask:
+ case clang::X86::BI__builtin_ia32_vfmaddsubpd512_maskz:
+ case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask3:
+ IID = llvm::Intrinsic::x86_avx512_vfmaddsub_pd_512;
+ break;
+ }
+
+ Value *A = Ops[0];
+ Value *B = Ops[1];
+ Value *C = Ops[2];
+
+ if (Subtract)
+ C = CGF.Builder.CreateFNeg(C);
+
+ Value *Res;
+
+ // Only handle in case of _MM_FROUND_CUR_DIRECTION/4 (no rounding).
+ if (IID != Intrinsic::not_intrinsic &&
+ cast<llvm::ConstantInt>(Ops.back())->getZExtValue() != (uint64_t)4) {
+ Function *Intr = CGF.CGM.getIntrinsic(IID);
+ Res = CGF.Builder.CreateCall(Intr, {A, B, C, Ops.back() });
+ } else {
+ llvm::Type *Ty = A->getType();
+ Function *FMA = CGF.CGM.getIntrinsic(Intrinsic::fma, Ty);
+ Res = CGF.Builder.CreateCall(FMA, {A, B, C} );
+
+ if (IsAddSub) {
+ // Negate even elts in C using a mask.
+ unsigned NumElts = Ty->getVectorNumElements();
+ SmallVector<uint32_t, 16> Indices(NumElts);
+ for (unsigned i = 0; i != NumElts; ++i)
+ Indices[i] = i + (i % 2) * NumElts;
+
+ Value *NegC = CGF.Builder.CreateFNeg(C);
+ Value *FMSub = CGF.Builder.CreateCall(FMA, {A, B, NegC} );
+ Res = CGF.Builder.CreateShuffleVector(FMSub, Res, Indices);
+ }
+ }
+
+ // Handle any required masking.
+ Value *MaskFalseVal = nullptr;
+ switch (BuiltinID) {
+ case clang::X86::BI__builtin_ia32_vfmaddps512_mask:
+ case clang::X86::BI__builtin_ia32_vfmaddpd512_mask:
+ case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask:
+ case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask:
+ MaskFalseVal = Ops[0];
+ break;
+ case clang::X86::BI__builtin_ia32_vfmaddps512_maskz:
+ case clang::X86::BI__builtin_ia32_vfmaddpd512_maskz:
+ case clang::X86::BI__builtin_ia32_vfmaddsubps512_maskz:
+ case clang::X86::BI__builtin_ia32_vfmaddsubpd512_maskz:
+ MaskFalseVal = Constant::getNullValue(Ops[0]->getType());
+ break;
+ case clang::X86::BI__builtin_ia32_vfmsubps512_mask3:
+ case clang::X86::BI__builtin_ia32_vfmaddps512_mask3:
+ case clang::X86::BI__builtin_ia32_vfmsubpd512_mask3:
+ case clang::X86::BI__builtin_ia32_vfmaddpd512_mask3:
+ case clang::X86::BI__builtin_ia32_vfmsubaddps512_mask3:
+ case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask3:
+ case clang::X86::BI__builtin_ia32_vfmsubaddpd512_mask3:
+ case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask3:
+ MaskFalseVal = Ops[2];
+ break;
+ }
+
+ if (MaskFalseVal)
+ return EmitX86Select(CGF, Ops[3], Res, MaskFalseVal);
+
+ return Res;
+}
+
+static Value *
+EmitScalarFMAExpr(CodeGenFunction &CGF, MutableArrayRef<Value *> Ops,
+ Value *Upper, bool ZeroMask = false, unsigned PTIdx = 0,
+ bool NegAcc = false) {
+ unsigned Rnd = 4;
+ if (Ops.size() > 4)
+ Rnd = cast<llvm::ConstantInt>(Ops[4])->getZExtValue();
+
+ if (NegAcc)
+ Ops[2] = CGF.Builder.CreateFNeg(Ops[2]);
+
+ Ops[0] = CGF.Builder.CreateExtractElement(Ops[0], (uint64_t)0);
+ Ops[1] = CGF.Builder.CreateExtractElement(Ops[1], (uint64_t)0);
+ Ops[2] = CGF.Builder.CreateExtractElement(Ops[2], (uint64_t)0);
+ Value *Res;
+ if (Rnd != 4) {
+ Intrinsic::ID IID = Ops[0]->getType()->getPrimitiveSizeInBits() == 32 ?
+ Intrinsic::x86_avx512_vfmadd_f32 :
+ Intrinsic::x86_avx512_vfmadd_f64;
+ Res = CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(IID),
+ {Ops[0], Ops[1], Ops[2], Ops[4]});
+ } else {
+ Function *FMA = CGF.CGM.getIntrinsic(Intrinsic::fma, Ops[0]->getType());
+ Res = CGF.Builder.CreateCall(FMA, Ops.slice(0, 3));
+ }
+ // If we have more than 3 arguments, we need to do masking.
+ if (Ops.size() > 3) {
+ Value *PassThru = ZeroMask ? Constant::getNullValue(Res->getType())
+ : Ops[PTIdx];
+
+ // If we negated the accumulator and the its the PassThru value we need to
+ // bypass the negate. Conveniently Upper should be the same thing in this
+ // case.
+ if (NegAcc && PTIdx == 2)
+ PassThru = CGF.Builder.CreateExtractElement(Upper, (uint64_t)0);
+
+ Res = EmitX86ScalarSelect(CGF, Ops[3], Res, PassThru);
+ }
+ return CGF.Builder.CreateInsertElement(Upper, Res, (uint64_t)0);
+}
+
+static Value *EmitX86Muldq(CodeGenFunction &CGF, bool IsSigned,
+ ArrayRef<Value *> Ops) {
+ llvm::Type *Ty = Ops[0]->getType();
+ // Arguments have a vXi32 type so cast to vXi64.
+ Ty = llvm::VectorType::get(CGF.Int64Ty,
+ Ty->getPrimitiveSizeInBits() / 64);
+ Value *LHS = CGF.Builder.CreateBitCast(Ops[0], Ty);
+ Value *RHS = CGF.Builder.CreateBitCast(Ops[1], Ty);
+
+ if (IsSigned) {
+ // Shift left then arithmetic shift right.
+ Constant *ShiftAmt = ConstantInt::get(Ty, 32);
+ LHS = CGF.Builder.CreateShl(LHS, ShiftAmt);
+ LHS = CGF.Builder.CreateAShr(LHS, ShiftAmt);
+ RHS = CGF.Builder.CreateShl(RHS, ShiftAmt);
+ RHS = CGF.Builder.CreateAShr(RHS, ShiftAmt);
+ } else {
+ // Clear the upper bits.
+ Constant *Mask = ConstantInt::get(Ty, 0xffffffff);
+ LHS = CGF.Builder.CreateAnd(LHS, Mask);
+ RHS = CGF.Builder.CreateAnd(RHS, Mask);
+ }
+
+ return CGF.Builder.CreateMul(LHS, RHS);
+}
+
+// Emit a masked pternlog intrinsic. This only exists because the header has to
+// use a macro and we aren't able to pass the input argument to a pternlog
+// builtin and a select builtin without evaluating it twice.
+static Value *EmitX86Ternlog(CodeGenFunction &CGF, bool ZeroMask,
+ ArrayRef<Value *> Ops) {
+ llvm::Type *Ty = Ops[0]->getType();
+
+ unsigned VecWidth = Ty->getPrimitiveSizeInBits();
+ unsigned EltWidth = Ty->getScalarSizeInBits();
+ Intrinsic::ID IID;
+ if (VecWidth == 128 && EltWidth == 32)
+ IID = Intrinsic::x86_avx512_pternlog_d_128;
+ else if (VecWidth == 256 && EltWidth == 32)
+ IID = Intrinsic::x86_avx512_pternlog_d_256;
+ else if (VecWidth == 512 && EltWidth == 32)
+ IID = Intrinsic::x86_avx512_pternlog_d_512;
+ else if (VecWidth == 128 && EltWidth == 64)
+ IID = Intrinsic::x86_avx512_pternlog_q_128;
+ else if (VecWidth == 256 && EltWidth == 64)
+ IID = Intrinsic::x86_avx512_pternlog_q_256;
+ else if (VecWidth == 512 && EltWidth == 64)
+ IID = Intrinsic::x86_avx512_pternlog_q_512;
+ else
+ llvm_unreachable("Unexpected intrinsic");
+
+ Value *Ternlog = CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(IID),
+ Ops.drop_back());
+ Value *PassThru = ZeroMask ? ConstantAggregateZero::get(Ty) : Ops[0];
+ return EmitX86Select(CGF, Ops[4], Ternlog, PassThru);
}
static Value *EmitX86SExtMask(CodeGenFunction &CGF, Value *Op,
@@ -7914,11 +8911,10 @@ Value *CodeGenFunction::EmitX86CpuSupports(const CallExpr *E) {
return EmitX86CpuSupports(FeatureStr);
}
-Value *CodeGenFunction::EmitX86CpuSupports(ArrayRef<StringRef> FeatureStrs) {
+uint32_t
+CodeGenFunction::GetX86CpuSupportsMask(ArrayRef<StringRef> FeatureStrs) {
// Processor features and mapping to processor feature value.
-
uint32_t FeaturesMask = 0;
-
for (const StringRef &FeatureStr : FeatureStrs) {
unsigned Feature =
StringSwitch<unsigned>(FeatureStr)
@@ -7927,7 +8923,14 @@ Value *CodeGenFunction::EmitX86CpuSupports(ArrayRef<StringRef> FeatureStrs) {
;
FeaturesMask |= (1U << Feature);
}
+ return FeaturesMask;
+}
+
+Value *CodeGenFunction::EmitX86CpuSupports(ArrayRef<StringRef> FeatureStrs) {
+ return EmitX86CpuSupports(GetX86CpuSupportsMask(FeatureStrs));
+}
+llvm::Value *CodeGenFunction::EmitX86CpuSupports(uint32_t FeaturesMask) {
// Matching the struct layout from the compiler-rt/libgcc structure that is
// filled in:
// unsigned int __cpu_vendor;
@@ -8063,8 +9066,37 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
return Builder.CreateBitCast(BuildVector(Ops),
llvm::Type::getX86_MMXTy(getLLVMContext()));
case X86::BI__builtin_ia32_vec_ext_v2si:
- return Builder.CreateExtractElement(Ops[0],
- llvm::ConstantInt::get(Ops[1]->getType(), 0));
+ case X86::BI__builtin_ia32_vec_ext_v16qi:
+ case X86::BI__builtin_ia32_vec_ext_v8hi:
+ case X86::BI__builtin_ia32_vec_ext_v4si:
+ case X86::BI__builtin_ia32_vec_ext_v4sf:
+ case X86::BI__builtin_ia32_vec_ext_v2di:
+ case X86::BI__builtin_ia32_vec_ext_v32qi:
+ case X86::BI__builtin_ia32_vec_ext_v16hi:
+ case X86::BI__builtin_ia32_vec_ext_v8si:
+ case X86::BI__builtin_ia32_vec_ext_v4di: {
+ unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
+ uint64_t Index = cast<ConstantInt>(Ops[1])->getZExtValue();
+ Index &= NumElts - 1;
+ // These builtins exist so we can ensure the index is an ICE and in range.
+ // Otherwise we could just do this in the header file.
+ return Builder.CreateExtractElement(Ops[0], Index);
+ }
+ case X86::BI__builtin_ia32_vec_set_v16qi:
+ case X86::BI__builtin_ia32_vec_set_v8hi:
+ case X86::BI__builtin_ia32_vec_set_v4si:
+ case X86::BI__builtin_ia32_vec_set_v2di:
+ case X86::BI__builtin_ia32_vec_set_v32qi:
+ case X86::BI__builtin_ia32_vec_set_v16hi:
+ case X86::BI__builtin_ia32_vec_set_v8si:
+ case X86::BI__builtin_ia32_vec_set_v4di: {
+ unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
+ unsigned Index = cast<ConstantInt>(Ops[2])->getZExtValue();
+ Index &= NumElts - 1;
+ // These builtins exist so we can ensure the index is an ICE and in range.
+ // Otherwise we could just do this in the header file.
+ return Builder.CreateInsertElement(Ops[0], Ops[1], Index);
+ }
case X86::BI_mm_setcsr:
case X86::BI__builtin_ia32_ldmxcsr: {
Address Tmp = CreateMemTemp(E->getArg(0)->getType());
@@ -8141,7 +9173,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_storess128_mask:
case X86::BI__builtin_ia32_storesd128_mask: {
- return EmitX86MaskedStore(*this, Ops, 16);
+ return EmitX86MaskedStore(*this, Ops, 1);
}
case X86::BI__builtin_ia32_vpopcntb_128:
case X86::BI__builtin_ia32_vpopcntd_128:
@@ -8173,6 +9205,66 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_cvtmask2q512:
return EmitX86SExtMask(*this, Ops[0], ConvertType(E->getType()));
+ case X86::BI__builtin_ia32_cvtb2mask128:
+ case X86::BI__builtin_ia32_cvtb2mask256:
+ case X86::BI__builtin_ia32_cvtb2mask512:
+ case X86::BI__builtin_ia32_cvtw2mask128:
+ case X86::BI__builtin_ia32_cvtw2mask256:
+ case X86::BI__builtin_ia32_cvtw2mask512:
+ case X86::BI__builtin_ia32_cvtd2mask128:
+ case X86::BI__builtin_ia32_cvtd2mask256:
+ case X86::BI__builtin_ia32_cvtd2mask512:
+ case X86::BI__builtin_ia32_cvtq2mask128:
+ case X86::BI__builtin_ia32_cvtq2mask256:
+ case X86::BI__builtin_ia32_cvtq2mask512:
+ return EmitX86ConvertToMask(*this, Ops[0]);
+
+ case X86::BI__builtin_ia32_vfmaddss3:
+ case X86::BI__builtin_ia32_vfmaddsd3:
+ case X86::BI__builtin_ia32_vfmaddss3_mask:
+ case X86::BI__builtin_ia32_vfmaddsd3_mask:
+ return EmitScalarFMAExpr(*this, Ops, Ops[0]);
+ case X86::BI__builtin_ia32_vfmaddss:
+ case X86::BI__builtin_ia32_vfmaddsd:
+ return EmitScalarFMAExpr(*this, Ops,
+ Constant::getNullValue(Ops[0]->getType()));
+ case X86::BI__builtin_ia32_vfmaddss3_maskz:
+ case X86::BI__builtin_ia32_vfmaddsd3_maskz:
+ return EmitScalarFMAExpr(*this, Ops, Ops[0], /*ZeroMask*/true);
+ case X86::BI__builtin_ia32_vfmaddss3_mask3:
+ case X86::BI__builtin_ia32_vfmaddsd3_mask3:
+ return EmitScalarFMAExpr(*this, Ops, Ops[2], /*ZeroMask*/false, 2);
+ case X86::BI__builtin_ia32_vfmsubss3_mask3:
+ case X86::BI__builtin_ia32_vfmsubsd3_mask3:
+ return EmitScalarFMAExpr(*this, Ops, Ops[2], /*ZeroMask*/false, 2,
+ /*NegAcc*/true);
+ case X86::BI__builtin_ia32_vfmaddps:
+ case X86::BI__builtin_ia32_vfmaddpd:
+ case X86::BI__builtin_ia32_vfmaddps256:
+ case X86::BI__builtin_ia32_vfmaddpd256:
+ case X86::BI__builtin_ia32_vfmaddps512_mask:
+ case X86::BI__builtin_ia32_vfmaddps512_maskz:
+ case X86::BI__builtin_ia32_vfmaddps512_mask3:
+ case X86::BI__builtin_ia32_vfmsubps512_mask3:
+ case X86::BI__builtin_ia32_vfmaddpd512_mask:
+ case X86::BI__builtin_ia32_vfmaddpd512_maskz:
+ case X86::BI__builtin_ia32_vfmaddpd512_mask3:
+ case X86::BI__builtin_ia32_vfmsubpd512_mask3:
+ return EmitX86FMAExpr(*this, Ops, BuiltinID, /*IsAddSub*/false);
+ case X86::BI__builtin_ia32_vfmaddsubps:
+ case X86::BI__builtin_ia32_vfmaddsubpd:
+ case X86::BI__builtin_ia32_vfmaddsubps256:
+ case X86::BI__builtin_ia32_vfmaddsubpd256:
+ case X86::BI__builtin_ia32_vfmaddsubps512_mask:
+ case X86::BI__builtin_ia32_vfmaddsubps512_maskz:
+ case X86::BI__builtin_ia32_vfmaddsubps512_mask3:
+ case X86::BI__builtin_ia32_vfmsubaddps512_mask3:
+ case X86::BI__builtin_ia32_vfmaddsubpd512_mask:
+ case X86::BI__builtin_ia32_vfmaddsubpd512_maskz:
+ case X86::BI__builtin_ia32_vfmaddsubpd512_mask3:
+ case X86::BI__builtin_ia32_vfmsubaddpd512_mask3:
+ return EmitX86FMAExpr(*this, Ops, BuiltinID, /*IsAddSub*/true);
+
case X86::BI__builtin_ia32_movdqa32store128_mask:
case X86::BI__builtin_ia32_movdqa64store128_mask:
case X86::BI__builtin_ia32_storeaps128_mask:
@@ -8211,7 +9303,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_loadss128_mask:
case X86::BI__builtin_ia32_loadsd128_mask:
- return EmitX86MaskedLoad(*this, Ops, 16);
+ return EmitX86MaskedLoad(*this, Ops, 1);
case X86::BI__builtin_ia32_loadaps128_mask:
case X86::BI__builtin_ia32_loadaps256_mask:
@@ -8230,11 +9322,45 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
return EmitX86MaskedLoad(*this, Ops, Align);
}
- case X86::BI__builtin_ia32_vbroadcastf128_pd256:
- case X86::BI__builtin_ia32_vbroadcastf128_ps256: {
- llvm::Type *DstTy = ConvertType(E->getType());
- return EmitX86SubVectorBroadcast(*this, Ops, DstTy, 128, 1);
- }
+ case X86::BI__builtin_ia32_expandloaddf128_mask:
+ case X86::BI__builtin_ia32_expandloaddf256_mask:
+ case X86::BI__builtin_ia32_expandloaddf512_mask:
+ case X86::BI__builtin_ia32_expandloadsf128_mask:
+ case X86::BI__builtin_ia32_expandloadsf256_mask:
+ case X86::BI__builtin_ia32_expandloadsf512_mask:
+ case X86::BI__builtin_ia32_expandloaddi128_mask:
+ case X86::BI__builtin_ia32_expandloaddi256_mask:
+ case X86::BI__builtin_ia32_expandloaddi512_mask:
+ case X86::BI__builtin_ia32_expandloadsi128_mask:
+ case X86::BI__builtin_ia32_expandloadsi256_mask:
+ case X86::BI__builtin_ia32_expandloadsi512_mask:
+ case X86::BI__builtin_ia32_expandloadhi128_mask:
+ case X86::BI__builtin_ia32_expandloadhi256_mask:
+ case X86::BI__builtin_ia32_expandloadhi512_mask:
+ case X86::BI__builtin_ia32_expandloadqi128_mask:
+ case X86::BI__builtin_ia32_expandloadqi256_mask:
+ case X86::BI__builtin_ia32_expandloadqi512_mask:
+ return EmitX86ExpandLoad(*this, Ops);
+
+ case X86::BI__builtin_ia32_compressstoredf128_mask:
+ case X86::BI__builtin_ia32_compressstoredf256_mask:
+ case X86::BI__builtin_ia32_compressstoredf512_mask:
+ case X86::BI__builtin_ia32_compressstoresf128_mask:
+ case X86::BI__builtin_ia32_compressstoresf256_mask:
+ case X86::BI__builtin_ia32_compressstoresf512_mask:
+ case X86::BI__builtin_ia32_compressstoredi128_mask:
+ case X86::BI__builtin_ia32_compressstoredi256_mask:
+ case X86::BI__builtin_ia32_compressstoredi512_mask:
+ case X86::BI__builtin_ia32_compressstoresi128_mask:
+ case X86::BI__builtin_ia32_compressstoresi256_mask:
+ case X86::BI__builtin_ia32_compressstoresi512_mask:
+ case X86::BI__builtin_ia32_compressstorehi128_mask:
+ case X86::BI__builtin_ia32_compressstorehi256_mask:
+ case X86::BI__builtin_ia32_compressstorehi512_mask:
+ case X86::BI__builtin_ia32_compressstoreqi128_mask:
+ case X86::BI__builtin_ia32_compressstoreqi256_mask:
+ case X86::BI__builtin_ia32_compressstoreqi512_mask:
+ return EmitX86CompressStore(*this, Ops);
case X86::BI__builtin_ia32_storehps:
case X86::BI__builtin_ia32_storelps: {
@@ -8246,17 +9372,275 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
// extract (0, 1)
unsigned Index = BuiltinID == X86::BI__builtin_ia32_storelps ? 0 : 1;
- llvm::Value *Idx = llvm::ConstantInt::get(SizeTy, Index);
- Ops[1] = Builder.CreateExtractElement(Ops[1], Idx, "extract");
+ Ops[1] = Builder.CreateExtractElement(Ops[1], Index, "extract");
// cast pointer to i64 & store
Ops[0] = Builder.CreateBitCast(Ops[0], PtrTy);
return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
+ case X86::BI__builtin_ia32_vextractf128_pd256:
+ case X86::BI__builtin_ia32_vextractf128_ps256:
+ case X86::BI__builtin_ia32_vextractf128_si256:
+ case X86::BI__builtin_ia32_extract128i256:
+ case X86::BI__builtin_ia32_extractf64x4_mask:
+ case X86::BI__builtin_ia32_extractf32x4_mask:
+ case X86::BI__builtin_ia32_extracti64x4_mask:
+ case X86::BI__builtin_ia32_extracti32x4_mask:
+ case X86::BI__builtin_ia32_extractf32x8_mask:
+ case X86::BI__builtin_ia32_extracti32x8_mask:
+ case X86::BI__builtin_ia32_extractf32x4_256_mask:
+ case X86::BI__builtin_ia32_extracti32x4_256_mask:
+ case X86::BI__builtin_ia32_extractf64x2_256_mask:
+ case X86::BI__builtin_ia32_extracti64x2_256_mask:
+ case X86::BI__builtin_ia32_extractf64x2_512_mask:
+ case X86::BI__builtin_ia32_extracti64x2_512_mask: {
+ llvm::Type *DstTy = ConvertType(E->getType());
+ unsigned NumElts = DstTy->getVectorNumElements();
+ unsigned SrcNumElts = Ops[0]->getType()->getVectorNumElements();
+ unsigned SubVectors = SrcNumElts / NumElts;
+ unsigned Index = cast<ConstantInt>(Ops[1])->getZExtValue();
+ assert(llvm::isPowerOf2_32(SubVectors) && "Expected power of 2 subvectors");
+ Index &= SubVectors - 1; // Remove any extra bits.
+ Index *= NumElts;
+
+ uint32_t Indices[16];
+ for (unsigned i = 0; i != NumElts; ++i)
+ Indices[i] = i + Index;
+
+ Value *Res = Builder.CreateShuffleVector(Ops[0],
+ UndefValue::get(Ops[0]->getType()),
+ makeArrayRef(Indices, NumElts),
+ "extract");
+
+ if (Ops.size() == 4)
+ Res = EmitX86Select(*this, Ops[3], Res, Ops[2]);
+
+ return Res;
+ }
+ case X86::BI__builtin_ia32_vinsertf128_pd256:
+ case X86::BI__builtin_ia32_vinsertf128_ps256:
+ case X86::BI__builtin_ia32_vinsertf128_si256:
+ case X86::BI__builtin_ia32_insert128i256:
+ case X86::BI__builtin_ia32_insertf64x4:
+ case X86::BI__builtin_ia32_insertf32x4:
+ case X86::BI__builtin_ia32_inserti64x4:
+ case X86::BI__builtin_ia32_inserti32x4:
+ case X86::BI__builtin_ia32_insertf32x8:
+ case X86::BI__builtin_ia32_inserti32x8:
+ case X86::BI__builtin_ia32_insertf32x4_256:
+ case X86::BI__builtin_ia32_inserti32x4_256:
+ case X86::BI__builtin_ia32_insertf64x2_256:
+ case X86::BI__builtin_ia32_inserti64x2_256:
+ case X86::BI__builtin_ia32_insertf64x2_512:
+ case X86::BI__builtin_ia32_inserti64x2_512: {
+ unsigned DstNumElts = Ops[0]->getType()->getVectorNumElements();
+ unsigned SrcNumElts = Ops[1]->getType()->getVectorNumElements();
+ unsigned SubVectors = DstNumElts / SrcNumElts;
+ unsigned Index = cast<ConstantInt>(Ops[2])->getZExtValue();
+ assert(llvm::isPowerOf2_32(SubVectors) && "Expected power of 2 subvectors");
+ Index &= SubVectors - 1; // Remove any extra bits.
+ Index *= SrcNumElts;
+
+ uint32_t Indices[16];
+ for (unsigned i = 0; i != DstNumElts; ++i)
+ Indices[i] = (i >= SrcNumElts) ? SrcNumElts + (i % SrcNumElts) : i;
+
+ Value *Op1 = Builder.CreateShuffleVector(Ops[1],
+ UndefValue::get(Ops[1]->getType()),
+ makeArrayRef(Indices, DstNumElts),
+ "widen");
+
+ for (unsigned i = 0; i != DstNumElts; ++i) {
+ if (i >= Index && i < (Index + SrcNumElts))
+ Indices[i] = (i - Index) + DstNumElts;
+ else
+ Indices[i] = i;
+ }
+
+ return Builder.CreateShuffleVector(Ops[0], Op1,
+ makeArrayRef(Indices, DstNumElts),
+ "insert");
+ }
+ case X86::BI__builtin_ia32_pmovqd512_mask:
+ case X86::BI__builtin_ia32_pmovwb512_mask: {
+ Value *Res = Builder.CreateTrunc(Ops[0], Ops[1]->getType());
+ return EmitX86Select(*this, Ops[2], Res, Ops[1]);
+ }
+ case X86::BI__builtin_ia32_pmovdb512_mask:
+ case X86::BI__builtin_ia32_pmovdw512_mask:
+ case X86::BI__builtin_ia32_pmovqw512_mask: {
+ if (const auto *C = dyn_cast<Constant>(Ops[2]))
+ if (C->isAllOnesValue())
+ return Builder.CreateTrunc(Ops[0], Ops[1]->getType());
+
+ Intrinsic::ID IID;
+ switch (BuiltinID) {
+ default: llvm_unreachable("Unsupported intrinsic!");
+ case X86::BI__builtin_ia32_pmovdb512_mask:
+ IID = Intrinsic::x86_avx512_mask_pmov_db_512;
+ break;
+ case X86::BI__builtin_ia32_pmovdw512_mask:
+ IID = Intrinsic::x86_avx512_mask_pmov_dw_512;
+ break;
+ case X86::BI__builtin_ia32_pmovqw512_mask:
+ IID = Intrinsic::x86_avx512_mask_pmov_qw_512;
+ break;
+ }
+
+ Function *Intr = CGM.getIntrinsic(IID);
+ return Builder.CreateCall(Intr, Ops);
+ }
+ case X86::BI__builtin_ia32_pblendw128:
+ case X86::BI__builtin_ia32_blendpd:
+ case X86::BI__builtin_ia32_blendps:
+ case X86::BI__builtin_ia32_blendpd256:
+ case X86::BI__builtin_ia32_blendps256:
+ case X86::BI__builtin_ia32_pblendw256:
+ case X86::BI__builtin_ia32_pblendd128:
+ case X86::BI__builtin_ia32_pblendd256: {
+ unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
+ unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
+
+ uint32_t Indices[16];
+ // If there are more than 8 elements, the immediate is used twice so make
+ // sure we handle that.
+ for (unsigned i = 0; i != NumElts; ++i)
+ Indices[i] = ((Imm >> (i % 8)) & 0x1) ? NumElts + i : i;
+
+ return Builder.CreateShuffleVector(Ops[0], Ops[1],
+ makeArrayRef(Indices, NumElts),
+ "blend");
+ }
+ case X86::BI__builtin_ia32_pshuflw:
+ case X86::BI__builtin_ia32_pshuflw256:
+ case X86::BI__builtin_ia32_pshuflw512: {
+ uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
+ llvm::Type *Ty = Ops[0]->getType();
+ unsigned NumElts = Ty->getVectorNumElements();
+
+ // Splat the 8-bits of immediate 4 times to help the loop wrap around.
+ Imm = (Imm & 0xff) * 0x01010101;
+
+ uint32_t Indices[32];
+ for (unsigned l = 0; l != NumElts; l += 8) {
+ for (unsigned i = 0; i != 4; ++i) {
+ Indices[l + i] = l + (Imm & 3);
+ Imm >>= 2;
+ }
+ for (unsigned i = 4; i != 8; ++i)
+ Indices[l + i] = l + i;
+ }
+
+ return Builder.CreateShuffleVector(Ops[0], UndefValue::get(Ty),
+ makeArrayRef(Indices, NumElts),
+ "pshuflw");
+ }
+ case X86::BI__builtin_ia32_pshufhw:
+ case X86::BI__builtin_ia32_pshufhw256:
+ case X86::BI__builtin_ia32_pshufhw512: {
+ uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
+ llvm::Type *Ty = Ops[0]->getType();
+ unsigned NumElts = Ty->getVectorNumElements();
+
+ // Splat the 8-bits of immediate 4 times to help the loop wrap around.
+ Imm = (Imm & 0xff) * 0x01010101;
+
+ uint32_t Indices[32];
+ for (unsigned l = 0; l != NumElts; l += 8) {
+ for (unsigned i = 0; i != 4; ++i)
+ Indices[l + i] = l + i;
+ for (unsigned i = 4; i != 8; ++i) {
+ Indices[l + i] = l + 4 + (Imm & 3);
+ Imm >>= 2;
+ }
+ }
+
+ return Builder.CreateShuffleVector(Ops[0], UndefValue::get(Ty),
+ makeArrayRef(Indices, NumElts),
+ "pshufhw");
+ }
+ case X86::BI__builtin_ia32_pshufd:
+ case X86::BI__builtin_ia32_pshufd256:
+ case X86::BI__builtin_ia32_pshufd512:
+ case X86::BI__builtin_ia32_vpermilpd:
+ case X86::BI__builtin_ia32_vpermilps:
+ case X86::BI__builtin_ia32_vpermilpd256:
+ case X86::BI__builtin_ia32_vpermilps256:
+ case X86::BI__builtin_ia32_vpermilpd512:
+ case X86::BI__builtin_ia32_vpermilps512: {
+ uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
+ llvm::Type *Ty = Ops[0]->getType();
+ unsigned NumElts = Ty->getVectorNumElements();
+ unsigned NumLanes = Ty->getPrimitiveSizeInBits() / 128;
+ unsigned NumLaneElts = NumElts / NumLanes;
+
+ // Splat the 8-bits of immediate 4 times to help the loop wrap around.
+ Imm = (Imm & 0xff) * 0x01010101;
+
+ uint32_t Indices[16];
+ for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
+ for (unsigned i = 0; i != NumLaneElts; ++i) {
+ Indices[i + l] = (Imm % NumLaneElts) + l;
+ Imm /= NumLaneElts;
+ }
+ }
+
+ return Builder.CreateShuffleVector(Ops[0], UndefValue::get(Ty),
+ makeArrayRef(Indices, NumElts),
+ "permil");
+ }
+ case X86::BI__builtin_ia32_shufpd:
+ case X86::BI__builtin_ia32_shufpd256:
+ case X86::BI__builtin_ia32_shufpd512:
+ case X86::BI__builtin_ia32_shufps:
+ case X86::BI__builtin_ia32_shufps256:
+ case X86::BI__builtin_ia32_shufps512: {
+ uint32_t Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
+ llvm::Type *Ty = Ops[0]->getType();
+ unsigned NumElts = Ty->getVectorNumElements();
+ unsigned NumLanes = Ty->getPrimitiveSizeInBits() / 128;
+ unsigned NumLaneElts = NumElts / NumLanes;
+
+ // Splat the 8-bits of immediate 4 times to help the loop wrap around.
+ Imm = (Imm & 0xff) * 0x01010101;
+
+ uint32_t Indices[16];
+ for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
+ for (unsigned i = 0; i != NumLaneElts; ++i) {
+ unsigned Index = Imm % NumLaneElts;
+ Imm /= NumLaneElts;
+ if (i >= (NumLaneElts / 2))
+ Index += NumElts;
+ Indices[l + i] = l + Index;
+ }
+ }
+
+ return Builder.CreateShuffleVector(Ops[0], Ops[1],
+ makeArrayRef(Indices, NumElts),
+ "shufp");
+ }
+ case X86::BI__builtin_ia32_permdi256:
+ case X86::BI__builtin_ia32_permdf256:
+ case X86::BI__builtin_ia32_permdi512:
+ case X86::BI__builtin_ia32_permdf512: {
+ unsigned Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
+ llvm::Type *Ty = Ops[0]->getType();
+ unsigned NumElts = Ty->getVectorNumElements();
+
+ // These intrinsics operate on 256-bit lanes of four 64-bit elements.
+ uint32_t Indices[8];
+ for (unsigned l = 0; l != NumElts; l += 4)
+ for (unsigned i = 0; i != 4; ++i)
+ Indices[l + i] = l + ((Imm >> (2 * i)) & 0x3);
+
+ return Builder.CreateShuffleVector(Ops[0], UndefValue::get(Ty),
+ makeArrayRef(Indices, NumElts),
+ "perm");
+ }
case X86::BI__builtin_ia32_palignr128:
case X86::BI__builtin_ia32_palignr256:
- case X86::BI__builtin_ia32_palignr512_mask: {
- unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
+ case X86::BI__builtin_ia32_palignr512: {
+ unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0xff;
unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
assert(NumElts % 16 == 0);
@@ -8285,15 +9669,58 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
}
}
- Value *Align = Builder.CreateShuffleVector(Ops[1], Ops[0],
- makeArrayRef(Indices, NumElts),
- "palignr");
+ return Builder.CreateShuffleVector(Ops[1], Ops[0],
+ makeArrayRef(Indices, NumElts),
+ "palignr");
+ }
+ case X86::BI__builtin_ia32_alignd128:
+ case X86::BI__builtin_ia32_alignd256:
+ case X86::BI__builtin_ia32_alignd512:
+ case X86::BI__builtin_ia32_alignq128:
+ case X86::BI__builtin_ia32_alignq256:
+ case X86::BI__builtin_ia32_alignq512: {
+ unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
+ unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0xff;
+
+ // Mask the shift amount to width of two vectors.
+ ShiftVal &= (2 * NumElts) - 1;
- // If this isn't a masked builtin, just return the align operation.
- if (Ops.size() == 3)
- return Align;
+ uint32_t Indices[16];
+ for (unsigned i = 0; i != NumElts; ++i)
+ Indices[i] = i + ShiftVal;
+
+ return Builder.CreateShuffleVector(Ops[1], Ops[0],
+ makeArrayRef(Indices, NumElts),
+ "valign");
+ }
+ case X86::BI__builtin_ia32_shuf_f32x4_256:
+ case X86::BI__builtin_ia32_shuf_f64x2_256:
+ case X86::BI__builtin_ia32_shuf_i32x4_256:
+ case X86::BI__builtin_ia32_shuf_i64x2_256:
+ case X86::BI__builtin_ia32_shuf_f32x4:
+ case X86::BI__builtin_ia32_shuf_f64x2:
+ case X86::BI__builtin_ia32_shuf_i32x4:
+ case X86::BI__builtin_ia32_shuf_i64x2: {
+ unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
+ llvm::Type *Ty = Ops[0]->getType();
+ unsigned NumElts = Ty->getVectorNumElements();
+ unsigned NumLanes = Ty->getPrimitiveSizeInBits() == 512 ? 4 : 2;
+ unsigned NumLaneElts = NumElts / NumLanes;
+
+ uint32_t Indices[16];
+ for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
+ unsigned Index = (Imm % NumLanes) * NumLaneElts;
+ Imm /= NumLanes; // Discard the bits we just used.
+ if (l >= (NumElts / 2))
+ Index += NumElts; // Switch to other source.
+ for (unsigned i = 0; i != NumLaneElts; ++i) {
+ Indices[l + i] = Index + i;
+ }
+ }
- return EmitX86Select(*this, Ops[4], Align, Ops[3]);
+ return Builder.CreateShuffleVector(Ops[0], Ops[1],
+ makeArrayRef(Indices, NumElts),
+ "shuf");
}
case X86::BI__builtin_ia32_vperm2f128_pd256:
@@ -8335,6 +9762,66 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
"vperm");
}
+ case X86::BI__builtin_ia32_pslldqi128_byteshift:
+ case X86::BI__builtin_ia32_pslldqi256_byteshift:
+ case X86::BI__builtin_ia32_pslldqi512_byteshift: {
+ unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff;
+ llvm::Type *ResultType = Ops[0]->getType();
+ // Builtin type is vXi64 so multiply by 8 to get bytes.
+ unsigned NumElts = ResultType->getVectorNumElements() * 8;
+
+ // If pslldq is shifting the vector more than 15 bytes, emit zero.
+ if (ShiftVal >= 16)
+ return llvm::Constant::getNullValue(ResultType);
+
+ uint32_t Indices[64];
+ // 256/512-bit pslldq operates on 128-bit lanes so we need to handle that
+ for (unsigned l = 0; l != NumElts; l += 16) {
+ for (unsigned i = 0; i != 16; ++i) {
+ unsigned Idx = NumElts + i - ShiftVal;
+ if (Idx < NumElts) Idx -= NumElts - 16; // end of lane, switch operand.
+ Indices[l + i] = Idx + l;
+ }
+ }
+
+ llvm::Type *VecTy = llvm::VectorType::get(Int8Ty, NumElts);
+ Value *Cast = Builder.CreateBitCast(Ops[0], VecTy, "cast");
+ Value *Zero = llvm::Constant::getNullValue(VecTy);
+ Value *SV = Builder.CreateShuffleVector(Zero, Cast,
+ makeArrayRef(Indices, NumElts),
+ "pslldq");
+ return Builder.CreateBitCast(SV, Ops[0]->getType(), "cast");
+ }
+ case X86::BI__builtin_ia32_psrldqi128_byteshift:
+ case X86::BI__builtin_ia32_psrldqi256_byteshift:
+ case X86::BI__builtin_ia32_psrldqi512_byteshift: {
+ unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff;
+ llvm::Type *ResultType = Ops[0]->getType();
+ // Builtin type is vXi64 so multiply by 8 to get bytes.
+ unsigned NumElts = ResultType->getVectorNumElements() * 8;
+
+ // If psrldq is shifting the vector more than 15 bytes, emit zero.
+ if (ShiftVal >= 16)
+ return llvm::Constant::getNullValue(ResultType);
+
+ uint32_t Indices[64];
+ // 256/512-bit psrldq operates on 128-bit lanes so we need to handle that
+ for (unsigned l = 0; l != NumElts; l += 16) {
+ for (unsigned i = 0; i != 16; ++i) {
+ unsigned Idx = i + ShiftVal;
+ if (Idx >= 16) Idx += NumElts - 16; // end of lane, switch operand.
+ Indices[l + i] = Idx + l;
+ }
+ }
+
+ llvm::Type *VecTy = llvm::VectorType::get(Int8Ty, NumElts);
+ Value *Cast = Builder.CreateBitCast(Ops[0], VecTy, "cast");
+ Value *Zero = llvm::Constant::getNullValue(VecTy);
+ Value *SV = Builder.CreateShuffleVector(Cast, Zero,
+ makeArrayRef(Indices, NumElts),
+ "psrldq");
+ return Builder.CreateBitCast(SV, ResultType, "cast");
+ }
case X86::BI__builtin_ia32_movnti:
case X86::BI__builtin_ia32_movnti64:
case X86::BI__builtin_ia32_movntsd:
@@ -8380,6 +9867,13 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_selectpd_256:
case X86::BI__builtin_ia32_selectpd_512:
return EmitX86Select(*this, Ops[0], Ops[1], Ops[2]);
+ case X86::BI__builtin_ia32_selectss_128:
+ case X86::BI__builtin_ia32_selectsd_128: {
+ Value *A = Builder.CreateExtractElement(Ops[1], (uint64_t)0);
+ Value *B = Builder.CreateExtractElement(Ops[2], (uint64_t)0);
+ A = EmitX86ScalarSelect(*this, Ops[0], A, B);
+ return Builder.CreateInsertElement(Ops[1], A, (uint64_t)0);
+ }
case X86::BI__builtin_ia32_cmpb128_mask:
case X86::BI__builtin_ia32_cmpb256_mask:
case X86::BI__builtin_ia32_cmpb512_mask:
@@ -8411,6 +9905,18 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
return EmitX86MaskedCompare(*this, CC, false, Ops);
}
+ case X86::BI__builtin_ia32_kortestchi:
+ case X86::BI__builtin_ia32_kortestzhi: {
+ Value *Or = EmitX86MaskLogic(*this, Instruction::Or, 16, Ops);
+ Value *C;
+ if (BuiltinID == X86::BI__builtin_ia32_kortestchi)
+ C = llvm::Constant::getAllOnesValue(Builder.getInt16Ty());
+ else
+ C = llvm::Constant::getNullValue(Builder.getInt16Ty());
+ Value *Cmp = Builder.CreateICmpEQ(Or, C);
+ return Builder.CreateZExt(Cmp, ConvertType(E->getType()));
+ }
+
case X86::BI__builtin_ia32_kandhi:
return EmitX86MaskLogic(*this, Instruction::And, 16, Ops);
case X86::BI__builtin_ia32_kandnhi:
@@ -8427,85 +9933,176 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
Builder.getInt16Ty());
}
- case X86::BI__builtin_ia32_vplzcntd_128_mask:
- case X86::BI__builtin_ia32_vplzcntd_256_mask:
- case X86::BI__builtin_ia32_vplzcntd_512_mask:
- case X86::BI__builtin_ia32_vplzcntq_128_mask:
- case X86::BI__builtin_ia32_vplzcntq_256_mask:
- case X86::BI__builtin_ia32_vplzcntq_512_mask: {
+ case X86::BI__builtin_ia32_kunpckdi:
+ case X86::BI__builtin_ia32_kunpcksi:
+ case X86::BI__builtin_ia32_kunpckhi: {
+ unsigned NumElts = Ops[0]->getType()->getScalarSizeInBits();
+ Value *LHS = getMaskVecValue(*this, Ops[0], NumElts);
+ Value *RHS = getMaskVecValue(*this, Ops[1], NumElts);
+ uint32_t Indices[64];
+ for (unsigned i = 0; i != NumElts; ++i)
+ Indices[i] = i;
+
+ // First extract half of each vector. This gives better codegen than
+ // doing it in a single shuffle.
+ LHS = Builder.CreateShuffleVector(LHS, LHS,
+ makeArrayRef(Indices, NumElts / 2));
+ RHS = Builder.CreateShuffleVector(RHS, RHS,
+ makeArrayRef(Indices, NumElts / 2));
+ // Concat the vectors.
+ // NOTE: Operands are swapped to match the intrinsic definition.
+ Value *Res = Builder.CreateShuffleVector(RHS, LHS,
+ makeArrayRef(Indices, NumElts));
+ return Builder.CreateBitCast(Res, Ops[0]->getType());
+ }
+
+ case X86::BI__builtin_ia32_vplzcntd_128:
+ case X86::BI__builtin_ia32_vplzcntd_256:
+ case X86::BI__builtin_ia32_vplzcntd_512:
+ case X86::BI__builtin_ia32_vplzcntq_128:
+ case X86::BI__builtin_ia32_vplzcntq_256:
+ case X86::BI__builtin_ia32_vplzcntq_512: {
Function *F = CGM.getIntrinsic(Intrinsic::ctlz, Ops[0]->getType());
- return EmitX86Select(*this, Ops[2],
- Builder.CreateCall(F, {Ops[0],Builder.getInt1(false)}),
- Ops[1]);
+ return Builder.CreateCall(F, {Ops[0],Builder.getInt1(false)});
+ }
+ case X86::BI__builtin_ia32_sqrtss:
+ case X86::BI__builtin_ia32_sqrtsd: {
+ Value *A = Builder.CreateExtractElement(Ops[0], (uint64_t)0);
+ Function *F = CGM.getIntrinsic(Intrinsic::sqrt, A->getType());
+ A = Builder.CreateCall(F, {A});
+ return Builder.CreateInsertElement(Ops[0], A, (uint64_t)0);
+ }
+ case X86::BI__builtin_ia32_sqrtsd_round_mask:
+ case X86::BI__builtin_ia32_sqrtss_round_mask: {
+ unsigned CC = cast<llvm::ConstantInt>(Ops[4])->getZExtValue();
+ // Support only if the rounding mode is 4 (AKA CUR_DIRECTION),
+ // otherwise keep the intrinsic.
+ if (CC != 4) {
+ Intrinsic::ID IID = BuiltinID == X86::BI__builtin_ia32_sqrtsd_round_mask ?
+ Intrinsic::x86_avx512_mask_sqrt_sd :
+ Intrinsic::x86_avx512_mask_sqrt_ss;
+ return Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
+ }
+ Value *A = Builder.CreateExtractElement(Ops[1], (uint64_t)0);
+ Function *F = CGM.getIntrinsic(Intrinsic::sqrt, A->getType());
+ A = Builder.CreateCall(F, A);
+ Value *Src = Builder.CreateExtractElement(Ops[2], (uint64_t)0);
+ A = EmitX86ScalarSelect(*this, Ops[3], A, Src);
+ return Builder.CreateInsertElement(Ops[0], A, (uint64_t)0);
+ }
+ case X86::BI__builtin_ia32_sqrtpd256:
+ case X86::BI__builtin_ia32_sqrtpd:
+ case X86::BI__builtin_ia32_sqrtps256:
+ case X86::BI__builtin_ia32_sqrtps:
+ case X86::BI__builtin_ia32_sqrtps512:
+ case X86::BI__builtin_ia32_sqrtpd512: {
+ if (Ops.size() == 2) {
+ unsigned CC = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
+ // Support only if the rounding mode is 4 (AKA CUR_DIRECTION),
+ // otherwise keep the intrinsic.
+ if (CC != 4) {
+ Intrinsic::ID IID = BuiltinID == X86::BI__builtin_ia32_sqrtps512 ?
+ Intrinsic::x86_avx512_sqrt_ps_512 :
+ Intrinsic::x86_avx512_sqrt_pd_512;
+ return Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
+ }
+ }
+ Function *F = CGM.getIntrinsic(Intrinsic::sqrt, Ops[0]->getType());
+ return Builder.CreateCall(F, Ops[0]);
}
-
case X86::BI__builtin_ia32_pabsb128:
case X86::BI__builtin_ia32_pabsw128:
case X86::BI__builtin_ia32_pabsd128:
case X86::BI__builtin_ia32_pabsb256:
case X86::BI__builtin_ia32_pabsw256:
case X86::BI__builtin_ia32_pabsd256:
- case X86::BI__builtin_ia32_pabsq128_mask:
- case X86::BI__builtin_ia32_pabsq256_mask:
- case X86::BI__builtin_ia32_pabsb512_mask:
- case X86::BI__builtin_ia32_pabsw512_mask:
- case X86::BI__builtin_ia32_pabsd512_mask:
- case X86::BI__builtin_ia32_pabsq512_mask:
+ case X86::BI__builtin_ia32_pabsq128:
+ case X86::BI__builtin_ia32_pabsq256:
+ case X86::BI__builtin_ia32_pabsb512:
+ case X86::BI__builtin_ia32_pabsw512:
+ case X86::BI__builtin_ia32_pabsd512:
+ case X86::BI__builtin_ia32_pabsq512:
return EmitX86Abs(*this, Ops);
case X86::BI__builtin_ia32_pmaxsb128:
case X86::BI__builtin_ia32_pmaxsw128:
case X86::BI__builtin_ia32_pmaxsd128:
- case X86::BI__builtin_ia32_pmaxsq128_mask:
+ case X86::BI__builtin_ia32_pmaxsq128:
case X86::BI__builtin_ia32_pmaxsb256:
case X86::BI__builtin_ia32_pmaxsw256:
case X86::BI__builtin_ia32_pmaxsd256:
- case X86::BI__builtin_ia32_pmaxsq256_mask:
- case X86::BI__builtin_ia32_pmaxsb512_mask:
- case X86::BI__builtin_ia32_pmaxsw512_mask:
- case X86::BI__builtin_ia32_pmaxsd512_mask:
- case X86::BI__builtin_ia32_pmaxsq512_mask:
+ case X86::BI__builtin_ia32_pmaxsq256:
+ case X86::BI__builtin_ia32_pmaxsb512:
+ case X86::BI__builtin_ia32_pmaxsw512:
+ case X86::BI__builtin_ia32_pmaxsd512:
+ case X86::BI__builtin_ia32_pmaxsq512:
return EmitX86MinMax(*this, ICmpInst::ICMP_SGT, Ops);
case X86::BI__builtin_ia32_pmaxub128:
case X86::BI__builtin_ia32_pmaxuw128:
case X86::BI__builtin_ia32_pmaxud128:
- case X86::BI__builtin_ia32_pmaxuq128_mask:
+ case X86::BI__builtin_ia32_pmaxuq128:
case X86::BI__builtin_ia32_pmaxub256:
case X86::BI__builtin_ia32_pmaxuw256:
case X86::BI__builtin_ia32_pmaxud256:
- case X86::BI__builtin_ia32_pmaxuq256_mask:
- case X86::BI__builtin_ia32_pmaxub512_mask:
- case X86::BI__builtin_ia32_pmaxuw512_mask:
- case X86::BI__builtin_ia32_pmaxud512_mask:
- case X86::BI__builtin_ia32_pmaxuq512_mask:
+ case X86::BI__builtin_ia32_pmaxuq256:
+ case X86::BI__builtin_ia32_pmaxub512:
+ case X86::BI__builtin_ia32_pmaxuw512:
+ case X86::BI__builtin_ia32_pmaxud512:
+ case X86::BI__builtin_ia32_pmaxuq512:
return EmitX86MinMax(*this, ICmpInst::ICMP_UGT, Ops);
case X86::BI__builtin_ia32_pminsb128:
case X86::BI__builtin_ia32_pminsw128:
case X86::BI__builtin_ia32_pminsd128:
- case X86::BI__builtin_ia32_pminsq128_mask:
+ case X86::BI__builtin_ia32_pminsq128:
case X86::BI__builtin_ia32_pminsb256:
case X86::BI__builtin_ia32_pminsw256:
case X86::BI__builtin_ia32_pminsd256:
- case X86::BI__builtin_ia32_pminsq256_mask:
- case X86::BI__builtin_ia32_pminsb512_mask:
- case X86::BI__builtin_ia32_pminsw512_mask:
- case X86::BI__builtin_ia32_pminsd512_mask:
- case X86::BI__builtin_ia32_pminsq512_mask:
+ case X86::BI__builtin_ia32_pminsq256:
+ case X86::BI__builtin_ia32_pminsb512:
+ case X86::BI__builtin_ia32_pminsw512:
+ case X86::BI__builtin_ia32_pminsd512:
+ case X86::BI__builtin_ia32_pminsq512:
return EmitX86MinMax(*this, ICmpInst::ICMP_SLT, Ops);
case X86::BI__builtin_ia32_pminub128:
case X86::BI__builtin_ia32_pminuw128:
case X86::BI__builtin_ia32_pminud128:
- case X86::BI__builtin_ia32_pminuq128_mask:
+ case X86::BI__builtin_ia32_pminuq128:
case X86::BI__builtin_ia32_pminub256:
case X86::BI__builtin_ia32_pminuw256:
case X86::BI__builtin_ia32_pminud256:
- case X86::BI__builtin_ia32_pminuq256_mask:
- case X86::BI__builtin_ia32_pminub512_mask:
- case X86::BI__builtin_ia32_pminuw512_mask:
- case X86::BI__builtin_ia32_pminud512_mask:
- case X86::BI__builtin_ia32_pminuq512_mask:
+ case X86::BI__builtin_ia32_pminuq256:
+ case X86::BI__builtin_ia32_pminub512:
+ case X86::BI__builtin_ia32_pminuw512:
+ case X86::BI__builtin_ia32_pminud512:
+ case X86::BI__builtin_ia32_pminuq512:
return EmitX86MinMax(*this, ICmpInst::ICMP_ULT, Ops);
+ case X86::BI__builtin_ia32_pmuludq128:
+ case X86::BI__builtin_ia32_pmuludq256:
+ case X86::BI__builtin_ia32_pmuludq512:
+ return EmitX86Muldq(*this, /*IsSigned*/false, Ops);
+
+ case X86::BI__builtin_ia32_pmuldq128:
+ case X86::BI__builtin_ia32_pmuldq256:
+ case X86::BI__builtin_ia32_pmuldq512:
+ return EmitX86Muldq(*this, /*IsSigned*/true, Ops);
+
+ case X86::BI__builtin_ia32_pternlogd512_mask:
+ case X86::BI__builtin_ia32_pternlogq512_mask:
+ case X86::BI__builtin_ia32_pternlogd128_mask:
+ case X86::BI__builtin_ia32_pternlogd256_mask:
+ case X86::BI__builtin_ia32_pternlogq128_mask:
+ case X86::BI__builtin_ia32_pternlogq256_mask:
+ return EmitX86Ternlog(*this, /*ZeroMask*/false, Ops);
+
+ case X86::BI__builtin_ia32_pternlogd512_maskz:
+ case X86::BI__builtin_ia32_pternlogq512_maskz:
+ case X86::BI__builtin_ia32_pternlogd128_maskz:
+ case X86::BI__builtin_ia32_pternlogd256_maskz:
+ case X86::BI__builtin_ia32_pternlogq128_maskz:
+ case X86::BI__builtin_ia32_pternlogq256_maskz:
+ return EmitX86Ternlog(*this, /*ZeroMask*/true, Ops);
+
// 3DNow!
case X86::BI__builtin_ia32_pswapdsf:
case X86::BI__builtin_ia32_pswapdsi: {
@@ -8549,7 +10146,44 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
return Builder.CreateExtractValue(Call, 1);
}
- // SSE packed comparison intrinsics
+ case X86::BI__builtin_ia32_fpclassps128_mask:
+ case X86::BI__builtin_ia32_fpclassps256_mask:
+ case X86::BI__builtin_ia32_fpclassps512_mask:
+ case X86::BI__builtin_ia32_fpclasspd128_mask:
+ case X86::BI__builtin_ia32_fpclasspd256_mask:
+ case X86::BI__builtin_ia32_fpclasspd512_mask: {
+ unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
+ Value *MaskIn = Ops[2];
+ Ops.erase(&Ops[2]);
+
+ Intrinsic::ID ID;
+ switch (BuiltinID) {
+ default: llvm_unreachable("Unsupported intrinsic!");
+ case X86::BI__builtin_ia32_fpclassps128_mask:
+ ID = Intrinsic::x86_avx512_fpclass_ps_128;
+ break;
+ case X86::BI__builtin_ia32_fpclassps256_mask:
+ ID = Intrinsic::x86_avx512_fpclass_ps_256;
+ break;
+ case X86::BI__builtin_ia32_fpclassps512_mask:
+ ID = Intrinsic::x86_avx512_fpclass_ps_512;
+ break;
+ case X86::BI__builtin_ia32_fpclasspd128_mask:
+ ID = Intrinsic::x86_avx512_fpclass_pd_128;
+ break;
+ case X86::BI__builtin_ia32_fpclasspd256_mask:
+ ID = Intrinsic::x86_avx512_fpclass_pd_256;
+ break;
+ case X86::BI__builtin_ia32_fpclasspd512_mask:
+ ID = Intrinsic::x86_avx512_fpclass_pd_512;
+ break;
+ }
+
+ Value *Fpclass = Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
+ return EmitX86MaskedCompareResult(*this, Fpclass, NumElts, MaskIn);
+ }
+
+ // packed comparison intrinsics
case X86::BI__builtin_ia32_cmpeqps:
case X86::BI__builtin_ia32_cmpeqpd:
return getVectorFCmpIR(CmpInst::FCMP_OEQ);
@@ -8577,64 +10211,79 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_cmpps:
case X86::BI__builtin_ia32_cmpps256:
case X86::BI__builtin_ia32_cmppd:
- case X86::BI__builtin_ia32_cmppd256: {
- unsigned CC = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
- // If this one of the SSE immediates, we can use native IR.
- if (CC < 8) {
- FCmpInst::Predicate Pred;
- switch (CC) {
- case 0: Pred = FCmpInst::FCMP_OEQ; break;
- case 1: Pred = FCmpInst::FCMP_OLT; break;
- case 2: Pred = FCmpInst::FCMP_OLE; break;
- case 3: Pred = FCmpInst::FCMP_UNO; break;
- case 4: Pred = FCmpInst::FCMP_UNE; break;
- case 5: Pred = FCmpInst::FCMP_UGE; break;
- case 6: Pred = FCmpInst::FCMP_UGT; break;
- case 7: Pred = FCmpInst::FCMP_ORD; break;
- }
- return getVectorFCmpIR(Pred);
+ case X86::BI__builtin_ia32_cmppd256:
+ case X86::BI__builtin_ia32_cmpps128_mask:
+ case X86::BI__builtin_ia32_cmpps256_mask:
+ case X86::BI__builtin_ia32_cmpps512_mask:
+ case X86::BI__builtin_ia32_cmppd128_mask:
+ case X86::BI__builtin_ia32_cmppd256_mask:
+ case X86::BI__builtin_ia32_cmppd512_mask: {
+ // Lowering vector comparisons to fcmp instructions, while
+ // ignoring signalling behaviour requested
+ // ignoring rounding mode requested
+ // This is is only possible as long as FENV_ACCESS is not implemented.
+ // See also: https://reviews.llvm.org/D45616
+
+ // The third argument is the comparison condition, and integer in the
+ // range [0, 31]
+ unsigned CC = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x1f;
+
+ // Lowering to IR fcmp instruction.
+ // Ignoring requested signaling behaviour,
+ // e.g. both _CMP_GT_OS & _CMP_GT_OQ are translated to FCMP_OGT.
+ FCmpInst::Predicate Pred;
+ switch (CC) {
+ case 0x00: Pred = FCmpInst::FCMP_OEQ; break;
+ case 0x01: Pred = FCmpInst::FCMP_OLT; break;
+ case 0x02: Pred = FCmpInst::FCMP_OLE; break;
+ case 0x03: Pred = FCmpInst::FCMP_UNO; break;
+ case 0x04: Pred = FCmpInst::FCMP_UNE; break;
+ case 0x05: Pred = FCmpInst::FCMP_UGE; break;
+ case 0x06: Pred = FCmpInst::FCMP_UGT; break;
+ case 0x07: Pred = FCmpInst::FCMP_ORD; break;
+ case 0x08: Pred = FCmpInst::FCMP_UEQ; break;
+ case 0x09: Pred = FCmpInst::FCMP_ULT; break;
+ case 0x0a: Pred = FCmpInst::FCMP_ULE; break;
+ case 0x0b: Pred = FCmpInst::FCMP_FALSE; break;
+ case 0x0c: Pred = FCmpInst::FCMP_ONE; break;
+ case 0x0d: Pred = FCmpInst::FCMP_OGE; break;
+ case 0x0e: Pred = FCmpInst::FCMP_OGT; break;
+ case 0x0f: Pred = FCmpInst::FCMP_TRUE; break;
+ case 0x10: Pred = FCmpInst::FCMP_OEQ; break;
+ case 0x11: Pred = FCmpInst::FCMP_OLT; break;
+ case 0x12: Pred = FCmpInst::FCMP_OLE; break;
+ case 0x13: Pred = FCmpInst::FCMP_UNO; break;
+ case 0x14: Pred = FCmpInst::FCMP_UNE; break;
+ case 0x15: Pred = FCmpInst::FCMP_UGE; break;
+ case 0x16: Pred = FCmpInst::FCMP_UGT; break;
+ case 0x17: Pred = FCmpInst::FCMP_ORD; break;
+ case 0x18: Pred = FCmpInst::FCMP_UEQ; break;
+ case 0x19: Pred = FCmpInst::FCMP_ULT; break;
+ case 0x1a: Pred = FCmpInst::FCMP_ULE; break;
+ case 0x1b: Pred = FCmpInst::FCMP_FALSE; break;
+ case 0x1c: Pred = FCmpInst::FCMP_ONE; break;
+ case 0x1d: Pred = FCmpInst::FCMP_OGE; break;
+ case 0x1e: Pred = FCmpInst::FCMP_OGT; break;
+ case 0x1f: Pred = FCmpInst::FCMP_TRUE; break;
+ default: llvm_unreachable("Unhandled CC");
}
- // We can't handle 8-31 immediates with native IR, use the intrinsic.
- // Except for predicates that create constants.
- Intrinsic::ID ID;
+ // Builtins without the _mask suffix return a vector of integers
+ // of the same width as the input vectors
switch (BuiltinID) {
- default: llvm_unreachable("Unsupported intrinsic!");
- case X86::BI__builtin_ia32_cmpps:
- ID = Intrinsic::x86_sse_cmp_ps;
- break;
- case X86::BI__builtin_ia32_cmpps256:
- // _CMP_TRUE_UQ, _CMP_TRUE_US produce -1,-1... vector
- // on any input and _CMP_FALSE_OQ, _CMP_FALSE_OS produce 0, 0...
- if (CC == 0xf || CC == 0xb || CC == 0x1b || CC == 0x1f) {
- Value *Constant = (CC == 0xf || CC == 0x1f) ?
- llvm::Constant::getAllOnesValue(Builder.getInt32Ty()) :
- llvm::Constant::getNullValue(Builder.getInt32Ty());
- Value *Vec = Builder.CreateVectorSplat(
- Ops[0]->getType()->getVectorNumElements(), Constant);
- return Builder.CreateBitCast(Vec, Ops[0]->getType());
- }
- ID = Intrinsic::x86_avx_cmp_ps_256;
- break;
- case X86::BI__builtin_ia32_cmppd:
- ID = Intrinsic::x86_sse2_cmp_pd;
- break;
- case X86::BI__builtin_ia32_cmppd256:
- // _CMP_TRUE_UQ, _CMP_TRUE_US produce -1,-1... vector
- // on any input and _CMP_FALSE_OQ, _CMP_FALSE_OS produce 0, 0...
- if (CC == 0xf || CC == 0xb || CC == 0x1b || CC == 0x1f) {
- Value *Constant = (CC == 0xf || CC == 0x1f) ?
- llvm::Constant::getAllOnesValue(Builder.getInt64Ty()) :
- llvm::Constant::getNullValue(Builder.getInt64Ty());
- Value *Vec = Builder.CreateVectorSplat(
- Ops[0]->getType()->getVectorNumElements(), Constant);
- return Builder.CreateBitCast(Vec, Ops[0]->getType());
- }
- ID = Intrinsic::x86_avx_cmp_pd_256;
- break;
+ case X86::BI__builtin_ia32_cmpps512_mask:
+ case X86::BI__builtin_ia32_cmppd512_mask:
+ case X86::BI__builtin_ia32_cmpps128_mask:
+ case X86::BI__builtin_ia32_cmpps256_mask:
+ case X86::BI__builtin_ia32_cmppd128_mask:
+ case X86::BI__builtin_ia32_cmppd256_mask: {
+ unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
+ Value *Cmp = Builder.CreateFCmp(Pred, Ops[0], Ops[1]);
+ return EmitX86MaskedCompareResult(*this, Cmp, NumElts, Ops[3]);
+ }
+ default:
+ return getVectorFCmpIR(Pred);
}
-
- return Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
}
// SSE scalar comparison intrinsics
@@ -9195,19 +10844,11 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int64Ty, 2));
Ops[1] = Builder.CreateBitCast(Ops[1], llvm::VectorType::get(Int64Ty, 2));
- // Element zero comes from the first input vector and element one comes from
- // the second. The element indices within each vector are numbered in big
- // endian order so the shuffle mask must be adjusted for this on little
- // endian platforms (i.e. index is complemented and source vector reversed).
- unsigned ElemIdx0;
- unsigned ElemIdx1;
- if (getTarget().isLittleEndian()) {
- ElemIdx0 = (~Index & 1) + 2;
- ElemIdx1 = (~Index & 2) >> 1;
- } else { // BigEndian
- ElemIdx0 = (Index & 2) >> 1;
- ElemIdx1 = 2 + (Index & 1);
- }
+ // Account for endianness by treating this as just a shuffle. So we use the
+ // same indices for both LE and BE in order to produce expected results in
+ // both cases.
+ unsigned ElemIdx0 = (Index & 2) >> 1;
+ unsigned ElemIdx1 = 2 + (Index & 1);
Constant *ShuffleElts[2] = {ConstantInt::get(Int32Ty, ElemIdx0),
ConstantInt::get(Int32Ty, ElemIdx1)};
@@ -9398,6 +11039,49 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
CI->setConvergent();
return CI;
}
+ case AMDGPU::BI__builtin_amdgcn_ds_faddf:
+ case AMDGPU::BI__builtin_amdgcn_ds_fminf:
+ case AMDGPU::BI__builtin_amdgcn_ds_fmaxf: {
+ llvm::SmallVector<llvm::Value *, 5> Args;
+ for (unsigned I = 0; I != 5; ++I)
+ Args.push_back(EmitScalarExpr(E->getArg(I)));
+ const llvm::Type *PtrTy = Args[0]->getType();
+ // check pointer parameter
+ if (!PtrTy->isPointerTy() ||
+ E->getArg(0)
+ ->getType()
+ ->getPointeeType()
+ .getQualifiers()
+ .getAddressSpace() != LangAS::opencl_local ||
+ !PtrTy->getPointerElementType()->isFloatTy()) {
+ CGM.Error(E->getArg(0)->getLocStart(),
+ "parameter should have type \"local float*\"");
+ return nullptr;
+ }
+ // check float parameter
+ if (!Args[1]->getType()->isFloatTy()) {
+ CGM.Error(E->getArg(1)->getLocStart(),
+ "parameter should have type \"float\"");
+ return nullptr;
+ }
+
+ Intrinsic::ID ID;
+ switch (BuiltinID) {
+ case AMDGPU::BI__builtin_amdgcn_ds_faddf:
+ ID = Intrinsic::amdgcn_ds_fadd;
+ break;
+ case AMDGPU::BI__builtin_amdgcn_ds_fminf:
+ ID = Intrinsic::amdgcn_ds_fmin;
+ break;
+ case AMDGPU::BI__builtin_amdgcn_ds_fmaxf:
+ ID = Intrinsic::amdgcn_ds_fmax;
+ break;
+ default:
+ llvm_unreachable("Unknown BuiltinID");
+ }
+ Value *F = CGM.getIntrinsic(ID);
+ return Builder.CreateCall(F, Args);
+ }
// amdgcn workitem
case AMDGPU::BI__builtin_amdgcn_workitem_id_x:
@@ -10028,7 +11712,15 @@ Value *CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID,
case NVPTX::BI__hmma_m16n16k16_ld_a:
case NVPTX::BI__hmma_m16n16k16_ld_b:
case NVPTX::BI__hmma_m16n16k16_ld_c_f16:
- case NVPTX::BI__hmma_m16n16k16_ld_c_f32: {
+ case NVPTX::BI__hmma_m16n16k16_ld_c_f32:
+ case NVPTX::BI__hmma_m32n8k16_ld_a:
+ case NVPTX::BI__hmma_m32n8k16_ld_b:
+ case NVPTX::BI__hmma_m32n8k16_ld_c_f16:
+ case NVPTX::BI__hmma_m32n8k16_ld_c_f32:
+ case NVPTX::BI__hmma_m8n32k16_ld_a:
+ case NVPTX::BI__hmma_m8n32k16_ld_b:
+ case NVPTX::BI__hmma_m8n32k16_ld_c_f16:
+ case NVPTX::BI__hmma_m8n32k16_ld_c_f32: {
Address Dst = EmitPointerWithAlignment(E->getArg(0));
Value *Src = EmitScalarExpr(E->getArg(1));
Value *Ldm = EmitScalarExpr(E->getArg(2));
@@ -10040,31 +11732,70 @@ Value *CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID,
unsigned NumResults;
switch (BuiltinID) {
case NVPTX::BI__hmma_m16n16k16_ld_a:
- IID = isColMajor ? Intrinsic::nvvm_wmma_load_a_f16_col_stride
- : Intrinsic::nvvm_wmma_load_a_f16_row_stride;
+ IID = isColMajor ? Intrinsic::nvvm_wmma_m16n16k16_load_a_f16_col_stride
+ : Intrinsic::nvvm_wmma_m16n16k16_load_a_f16_row_stride;
NumResults = 8;
break;
case NVPTX::BI__hmma_m16n16k16_ld_b:
- IID = isColMajor ? Intrinsic::nvvm_wmma_load_b_f16_col_stride
- : Intrinsic::nvvm_wmma_load_b_f16_row_stride;
+ IID = isColMajor ? Intrinsic::nvvm_wmma_m16n16k16_load_b_f16_col_stride
+ : Intrinsic::nvvm_wmma_m16n16k16_load_b_f16_row_stride;
NumResults = 8;
break;
case NVPTX::BI__hmma_m16n16k16_ld_c_f16:
- IID = isColMajor ? Intrinsic::nvvm_wmma_load_c_f16_col_stride
- : Intrinsic::nvvm_wmma_load_c_f16_row_stride;
+ IID = isColMajor ? Intrinsic::nvvm_wmma_m16n16k16_load_c_f16_col_stride
+ : Intrinsic::nvvm_wmma_m16n16k16_load_c_f16_row_stride;
NumResults = 4;
break;
case NVPTX::BI__hmma_m16n16k16_ld_c_f32:
- IID = isColMajor ? Intrinsic::nvvm_wmma_load_c_f32_col_stride
- : Intrinsic::nvvm_wmma_load_c_f32_row_stride;
+ IID = isColMajor ? Intrinsic::nvvm_wmma_m16n16k16_load_c_f32_col_stride
+ : Intrinsic::nvvm_wmma_m16n16k16_load_c_f32_row_stride;
+ NumResults = 8;
+ break;
+ case NVPTX::BI__hmma_m32n8k16_ld_a:
+ IID = isColMajor ? Intrinsic::nvvm_wmma_m32n8k16_load_a_f16_col_stride
+ : Intrinsic::nvvm_wmma_m32n8k16_load_a_f16_row_stride;
+ NumResults = 8;
+ break;
+ case NVPTX::BI__hmma_m32n8k16_ld_b:
+ IID = isColMajor ? Intrinsic::nvvm_wmma_m32n8k16_load_b_f16_col_stride
+ : Intrinsic::nvvm_wmma_m32n8k16_load_b_f16_row_stride;
+ NumResults = 8;
+ break;
+ case NVPTX::BI__hmma_m32n8k16_ld_c_f16:
+ IID = isColMajor ? Intrinsic::nvvm_wmma_m32n8k16_load_c_f16_col_stride
+ : Intrinsic::nvvm_wmma_m32n8k16_load_c_f16_row_stride;
+ NumResults = 4;
+ break;
+ case NVPTX::BI__hmma_m32n8k16_ld_c_f32:
+ IID = isColMajor ? Intrinsic::nvvm_wmma_m32n8k16_load_c_f32_col_stride
+ : Intrinsic::nvvm_wmma_m32n8k16_load_c_f32_row_stride;
+ NumResults = 8;
+ break;
+ case NVPTX::BI__hmma_m8n32k16_ld_a:
+ IID = isColMajor ? Intrinsic::nvvm_wmma_m8n32k16_load_a_f16_col_stride
+ : Intrinsic::nvvm_wmma_m8n32k16_load_a_f16_row_stride;
+ NumResults = 8;
+ break;
+ case NVPTX::BI__hmma_m8n32k16_ld_b:
+ IID = isColMajor ? Intrinsic::nvvm_wmma_m8n32k16_load_b_f16_col_stride
+ : Intrinsic::nvvm_wmma_m8n32k16_load_b_f16_row_stride;
+ NumResults = 8;
+ break;
+ case NVPTX::BI__hmma_m8n32k16_ld_c_f16:
+ IID = isColMajor ? Intrinsic::nvvm_wmma_m8n32k16_load_c_f16_col_stride
+ : Intrinsic::nvvm_wmma_m8n32k16_load_c_f16_row_stride;
+ NumResults = 4;
+ break;
+ case NVPTX::BI__hmma_m8n32k16_ld_c_f32:
+ IID = isColMajor ? Intrinsic::nvvm_wmma_m8n32k16_load_c_f32_col_stride
+ : Intrinsic::nvvm_wmma_m8n32k16_load_c_f32_row_stride;
NumResults = 8;
break;
default:
llvm_unreachable("Unexpected builtin ID.");
}
Value *Result =
- Builder.CreateCall(CGM.getIntrinsic(IID),
- {Builder.CreatePointerCast(Src, VoidPtrTy), Ldm});
+ Builder.CreateCall(CGM.getIntrinsic(IID, Src->getType()), {Src, Ldm});
// Save returned values.
for (unsigned i = 0; i < NumResults; ++i) {
@@ -10078,7 +11809,11 @@ Value *CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID,
}
case NVPTX::BI__hmma_m16n16k16_st_c_f16:
- case NVPTX::BI__hmma_m16n16k16_st_c_f32: {
+ case NVPTX::BI__hmma_m16n16k16_st_c_f32:
+ case NVPTX::BI__hmma_m32n8k16_st_c_f16:
+ case NVPTX::BI__hmma_m32n8k16_st_c_f32:
+ case NVPTX::BI__hmma_m8n32k16_st_c_f16:
+ case NVPTX::BI__hmma_m8n32k16_st_c_f32: {
Value *Dst = EmitScalarExpr(E->getArg(0));
Address Src = EmitPointerWithAlignment(E->getArg(1));
Value *Ldm = EmitScalarExpr(E->getArg(2));
@@ -10092,21 +11827,38 @@ Value *CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID,
// for some reason nvcc builtins use _c_.
switch (BuiltinID) {
case NVPTX::BI__hmma_m16n16k16_st_c_f16:
- IID = isColMajor ? Intrinsic::nvvm_wmma_store_d_f16_col_stride
- : Intrinsic::nvvm_wmma_store_d_f16_row_stride;
+ IID = isColMajor ? Intrinsic::nvvm_wmma_m16n16k16_store_d_f16_col_stride
+ : Intrinsic::nvvm_wmma_m16n16k16_store_d_f16_row_stride;
NumResults = 4;
break;
case NVPTX::BI__hmma_m16n16k16_st_c_f32:
- IID = isColMajor ? Intrinsic::nvvm_wmma_store_d_f32_col_stride
- : Intrinsic::nvvm_wmma_store_d_f32_row_stride;
+ IID = isColMajor ? Intrinsic::nvvm_wmma_m16n16k16_store_d_f32_col_stride
+ : Intrinsic::nvvm_wmma_m16n16k16_store_d_f32_row_stride;
+ break;
+ case NVPTX::BI__hmma_m32n8k16_st_c_f16:
+ IID = isColMajor ? Intrinsic::nvvm_wmma_m32n8k16_store_d_f16_col_stride
+ : Intrinsic::nvvm_wmma_m32n8k16_store_d_f16_row_stride;
+ NumResults = 4;
+ break;
+ case NVPTX::BI__hmma_m32n8k16_st_c_f32:
+ IID = isColMajor ? Intrinsic::nvvm_wmma_m32n8k16_store_d_f32_col_stride
+ : Intrinsic::nvvm_wmma_m32n8k16_store_d_f32_row_stride;
+ break;
+ case NVPTX::BI__hmma_m8n32k16_st_c_f16:
+ IID = isColMajor ? Intrinsic::nvvm_wmma_m8n32k16_store_d_f16_col_stride
+ : Intrinsic::nvvm_wmma_m8n32k16_store_d_f16_row_stride;
+ NumResults = 4;
+ break;
+ case NVPTX::BI__hmma_m8n32k16_st_c_f32:
+ IID = isColMajor ? Intrinsic::nvvm_wmma_m8n32k16_store_d_f32_col_stride
+ : Intrinsic::nvvm_wmma_m8n32k16_store_d_f32_row_stride;
break;
default:
llvm_unreachable("Unexpected builtin ID.");
}
- Function *Intrinsic = CGM.getIntrinsic(IID);
+ Function *Intrinsic = CGM.getIntrinsic(IID, Dst->getType());
llvm::Type *ParamType = Intrinsic->getFunctionType()->getParamType(1);
- SmallVector<Value *, 10> Values;
- Values.push_back(Builder.CreatePointerCast(Dst, VoidPtrTy));
+ SmallVector<Value *, 10> Values = {Dst};
for (unsigned i = 0; i < NumResults; ++i) {
Value *V = Builder.CreateAlignedLoad(
Builder.CreateGEP(Src.getPointer(), llvm::ConstantInt::get(IntTy, i)),
@@ -10118,12 +11870,20 @@ Value *CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID,
return Result;
}
- // BI__hmma_m16n16k16_mma_<Dtype><CType>(d, a, b, c, layout, satf)
- // --> Intrinsic::nvvm_wmma_mma_sync<layout A,B><DType><CType><Satf>
+ // BI__hmma_m16n16k16_mma_<Dtype><CType>(d, a, b, c, layout, satf) -->
+ // Intrinsic::nvvm_wmma_m16n16k16_mma_sync<layout A,B><DType><CType><Satf>
case NVPTX::BI__hmma_m16n16k16_mma_f16f16:
case NVPTX::BI__hmma_m16n16k16_mma_f32f16:
case NVPTX::BI__hmma_m16n16k16_mma_f32f32:
- case NVPTX::BI__hmma_m16n16k16_mma_f16f32: {
+ case NVPTX::BI__hmma_m16n16k16_mma_f16f32:
+ case NVPTX::BI__hmma_m32n8k16_mma_f16f16:
+ case NVPTX::BI__hmma_m32n8k16_mma_f32f16:
+ case NVPTX::BI__hmma_m32n8k16_mma_f32f32:
+ case NVPTX::BI__hmma_m32n8k16_mma_f16f32:
+ case NVPTX::BI__hmma_m8n32k16_mma_f16f16:
+ case NVPTX::BI__hmma_m8n32k16_mma_f32f16:
+ case NVPTX::BI__hmma_m8n32k16_mma_f32f32:
+ case NVPTX::BI__hmma_m8n32k16_mma_f16f32: {
Address Dst = EmitPointerWithAlignment(E->getArg(0));
Address SrcA = EmitPointerWithAlignment(E->getArg(1));
Address SrcB = EmitPointerWithAlignment(E->getArg(2));
@@ -10140,15 +11900,15 @@ Value *CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID,
bool Satf = SatfArg.getSExtValue();
// clang-format off
-#define MMA_VARIANTS(type) {{ \
- Intrinsic::nvvm_wmma_mma_sync_row_row_##type, \
- Intrinsic::nvvm_wmma_mma_sync_row_row_##type##_satfinite, \
- Intrinsic::nvvm_wmma_mma_sync_row_col_##type, \
- Intrinsic::nvvm_wmma_mma_sync_row_col_##type##_satfinite, \
- Intrinsic::nvvm_wmma_mma_sync_col_row_##type, \
- Intrinsic::nvvm_wmma_mma_sync_col_row_##type##_satfinite, \
- Intrinsic::nvvm_wmma_mma_sync_col_col_##type, \
- Intrinsic::nvvm_wmma_mma_sync_col_col_##type##_satfinite \
+#define MMA_VARIANTS(geom, type) {{ \
+ Intrinsic::nvvm_wmma_##geom##_mma_row_row_##type, \
+ Intrinsic::nvvm_wmma_##geom##_mma_row_row_##type##_satfinite, \
+ Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type, \
+ Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type##_satfinite, \
+ Intrinsic::nvvm_wmma_##geom##_mma_col_row_##type, \
+ Intrinsic::nvvm_wmma_##geom##_mma_col_row_##type##_satfinite, \
+ Intrinsic::nvvm_wmma_##geom##_mma_col_col_##type, \
+ Intrinsic::nvvm_wmma_##geom##_mma_col_col_##type##_satfinite \
}}
// clang-format on
@@ -10162,22 +11922,62 @@ Value *CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID,
unsigned NumEltsD;
switch (BuiltinID) {
case NVPTX::BI__hmma_m16n16k16_mma_f16f16:
- IID = getMMAIntrinsic(MMA_VARIANTS(f16_f16));
+ IID = getMMAIntrinsic(MMA_VARIANTS(m16n16k16, f16_f16));
NumEltsC = 4;
NumEltsD = 4;
break;
case NVPTX::BI__hmma_m16n16k16_mma_f32f16:
- IID = getMMAIntrinsic(MMA_VARIANTS(f32_f16));
+ IID = getMMAIntrinsic(MMA_VARIANTS(m16n16k16, f32_f16));
NumEltsC = 4;
NumEltsD = 8;
break;
case NVPTX::BI__hmma_m16n16k16_mma_f16f32:
- IID = getMMAIntrinsic(MMA_VARIANTS(f16_f32));
+ IID = getMMAIntrinsic(MMA_VARIANTS(m16n16k16, f16_f32));
NumEltsC = 8;
NumEltsD = 4;
break;
case NVPTX::BI__hmma_m16n16k16_mma_f32f32:
- IID = getMMAIntrinsic(MMA_VARIANTS(f32_f32));
+ IID = getMMAIntrinsic(MMA_VARIANTS(m16n16k16, f32_f32));
+ NumEltsC = 8;
+ NumEltsD = 8;
+ break;
+ case NVPTX::BI__hmma_m32n8k16_mma_f16f16:
+ IID = getMMAIntrinsic(MMA_VARIANTS(m32n8k16, f16_f16));
+ NumEltsC = 4;
+ NumEltsD = 4;
+ break;
+ case NVPTX::BI__hmma_m32n8k16_mma_f32f16:
+ IID = getMMAIntrinsic(MMA_VARIANTS(m32n8k16, f32_f16));
+ NumEltsC = 4;
+ NumEltsD = 8;
+ break;
+ case NVPTX::BI__hmma_m32n8k16_mma_f16f32:
+ IID = getMMAIntrinsic(MMA_VARIANTS(m32n8k16, f16_f32));
+ NumEltsC = 8;
+ NumEltsD = 4;
+ break;
+ case NVPTX::BI__hmma_m32n8k16_mma_f32f32:
+ IID = getMMAIntrinsic(MMA_VARIANTS(m32n8k16, f32_f32));
+ NumEltsC = 8;
+ NumEltsD = 8;
+ break;
+ case NVPTX::BI__hmma_m8n32k16_mma_f16f16:
+ IID = getMMAIntrinsic(MMA_VARIANTS(m8n32k16, f16_f16));
+ NumEltsC = 4;
+ NumEltsD = 4;
+ break;
+ case NVPTX::BI__hmma_m8n32k16_mma_f32f16:
+ IID = getMMAIntrinsic(MMA_VARIANTS(m8n32k16, f32_f16));
+ NumEltsC = 4;
+ NumEltsD = 8;
+ break;
+ case NVPTX::BI__hmma_m8n32k16_mma_f16f32:
+ IID = getMMAIntrinsic(MMA_VARIANTS(m8n32k16, f16_f32));
+ NumEltsC = 8;
+ NumEltsD = 4;
+ break;
+ case NVPTX::BI__hmma_m8n32k16_mma_f32f32:
+ IID = getMMAIntrinsic(MMA_VARIANTS(m8n32k16, f32_f32));
NumEltsC = 8;
NumEltsD = 8;
break;
@@ -10231,6 +12031,36 @@ Value *CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID,
Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
const CallExpr *E) {
switch (BuiltinID) {
+ case WebAssembly::BI__builtin_wasm_memory_size: {
+ llvm::Type *ResultType = ConvertType(E->getType());
+ Value *I = EmitScalarExpr(E->getArg(0));
+ Value *Callee = CGM.getIntrinsic(Intrinsic::wasm_memory_size, ResultType);
+ return Builder.CreateCall(Callee, I);
+ }
+ case WebAssembly::BI__builtin_wasm_memory_grow: {
+ llvm::Type *ResultType = ConvertType(E->getType());
+ Value *Args[] = {
+ EmitScalarExpr(E->getArg(0)),
+ EmitScalarExpr(E->getArg(1))
+ };
+ Value *Callee = CGM.getIntrinsic(Intrinsic::wasm_memory_grow, ResultType);
+ return Builder.CreateCall(Callee, Args);
+ }
+ case WebAssembly::BI__builtin_wasm_mem_size: {
+ llvm::Type *ResultType = ConvertType(E->getType());
+ Value *I = EmitScalarExpr(E->getArg(0));
+ Value *Callee = CGM.getIntrinsic(Intrinsic::wasm_mem_size, ResultType);
+ return Builder.CreateCall(Callee, I);
+ }
+ case WebAssembly::BI__builtin_wasm_mem_grow: {
+ llvm::Type *ResultType = ConvertType(E->getType());
+ Value *Args[] = {
+ EmitScalarExpr(E->getArg(0)),
+ EmitScalarExpr(E->getArg(1))
+ };
+ Value *Callee = CGM.getIntrinsic(Intrinsic::wasm_mem_grow, ResultType);
+ return Builder.CreateCall(Callee, Args);
+ }
case WebAssembly::BI__builtin_wasm_current_memory: {
llvm::Type *ResultType = ConvertType(E->getType());
Value *Callee = CGM.getIntrinsic(Intrinsic::wasm_current_memory, ResultType);
@@ -10262,6 +12092,93 @@ Value *CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID,
SmallVector<llvm::Value *, 4> Ops;
Intrinsic::ID ID = Intrinsic::not_intrinsic;
+ auto MakeCircLd = [&](unsigned IntID, bool HasImm) {
+ // The base pointer is passed by address, so it needs to be loaded.
+ Address BP = EmitPointerWithAlignment(E->getArg(0));
+ BP = Address(Builder.CreateBitCast(BP.getPointer(), Int8PtrPtrTy),
+ BP.getAlignment());
+ llvm::Value *Base = Builder.CreateLoad(BP);
+ // Operands are Base, Increment, Modifier, Start.
+ if (HasImm)
+ Ops = { Base, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2)),
+ EmitScalarExpr(E->getArg(3)) };
+ else
+ Ops = { Base, EmitScalarExpr(E->getArg(1)),
+ EmitScalarExpr(E->getArg(2)) };
+
+ llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(IntID), Ops);
+ llvm::Value *NewBase = Builder.CreateExtractValue(Result, 1);
+ llvm::Value *LV = Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)),
+ NewBase->getType()->getPointerTo());
+ Address Dest = EmitPointerWithAlignment(E->getArg(0));
+ // The intrinsic generates two results. The new value for the base pointer
+ // needs to be stored.
+ Builder.CreateAlignedStore(NewBase, LV, Dest.getAlignment());
+ return Builder.CreateExtractValue(Result, 0);
+ };
+
+ auto MakeCircSt = [&](unsigned IntID, bool HasImm) {
+ // The base pointer is passed by address, so it needs to be loaded.
+ Address BP = EmitPointerWithAlignment(E->getArg(0));
+ BP = Address(Builder.CreateBitCast(BP.getPointer(), Int8PtrPtrTy),
+ BP.getAlignment());
+ llvm::Value *Base = Builder.CreateLoad(BP);
+ // Operands are Base, Increment, Modifier, Value, Start.
+ if (HasImm)
+ Ops = { Base, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2)),
+ EmitScalarExpr(E->getArg(3)), EmitScalarExpr(E->getArg(4)) };
+ else
+ Ops = { Base, EmitScalarExpr(E->getArg(1)),
+ EmitScalarExpr(E->getArg(2)), EmitScalarExpr(E->getArg(3)) };
+
+ llvm::Value *NewBase = Builder.CreateCall(CGM.getIntrinsic(IntID), Ops);
+ llvm::Value *LV = Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)),
+ NewBase->getType()->getPointerTo());
+ Address Dest = EmitPointerWithAlignment(E->getArg(0));
+ // The intrinsic generates one result, which is the new value for the base
+ // pointer. It needs to be stored.
+ return Builder.CreateAlignedStore(NewBase, LV, Dest.getAlignment());
+ };
+
+ // Handle the conversion of bit-reverse load intrinsics to bit code.
+ // The intrinsic call after this function only reads from memory and the
+ // write to memory is dealt by the store instruction.
+ auto MakeBrevLd = [&](unsigned IntID, llvm::Type *DestTy) {
+ // The intrinsic generates one result, which is the new value for the base
+ // pointer. It needs to be returned. The result of the load instruction is
+ // passed to intrinsic by address, so the value needs to be stored.
+ llvm::Value *BaseAddress =
+ Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), Int8PtrTy);
+
+ // Expressions like &(*pt++) will be incremented per evaluation.
+ // EmitPointerWithAlignment and EmitScalarExpr evaluates the expression
+ // per call.
+ Address DestAddr = EmitPointerWithAlignment(E->getArg(1));
+ DestAddr = Address(Builder.CreateBitCast(DestAddr.getPointer(), Int8PtrTy),
+ DestAddr.getAlignment());
+ llvm::Value *DestAddress = DestAddr.getPointer();
+
+ // Operands are Base, Dest, Modifier.
+ // The intrinsic format in LLVM IR is defined as
+ // { ValueType, i8* } (i8*, i32).
+ Ops = {BaseAddress, EmitScalarExpr(E->getArg(2))};
+
+ llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(IntID), Ops);
+ // The value needs to be stored as the variable is passed by reference.
+ llvm::Value *DestVal = Builder.CreateExtractValue(Result, 0);
+
+ // The store needs to be truncated to fit the destination type.
+ // While i32 and i64 are natively supported on Hexagon, i8 and i16 needs
+ // to be handled with stores of respective destination type.
+ DestVal = Builder.CreateTrunc(DestVal, DestTy);
+
+ llvm::Value *DestForStore =
+ Builder.CreateBitCast(DestAddress, DestVal->getType()->getPointerTo());
+ Builder.CreateAlignedStore(DestVal, DestForStore, DestAddr.getAlignment());
+ // The updated value of the base pointer is returned.
+ return Builder.CreateExtractValue(Result, 1);
+ };
+
switch (BuiltinID) {
case Hexagon::BI__builtin_HEXAGON_V6_vaddcarry:
case Hexagon::BI__builtin_HEXAGON_V6_vaddcarry_128B: {
@@ -10307,6 +12224,64 @@ Value *CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID,
Builder.CreateAlignedStore(Vprd, Base, Dest.getAlignment());
return Builder.CreateExtractValue(Result, 0);
}
+ case Hexagon::BI__builtin_HEXAGON_L2_loadrub_pci:
+ return MakeCircLd(Intrinsic::hexagon_L2_loadrub_pci, /*HasImm*/true);
+ case Hexagon::BI__builtin_HEXAGON_L2_loadrb_pci:
+ return MakeCircLd(Intrinsic::hexagon_L2_loadrb_pci, /*HasImm*/true);
+ case Hexagon::BI__builtin_HEXAGON_L2_loadruh_pci:
+ return MakeCircLd(Intrinsic::hexagon_L2_loadruh_pci, /*HasImm*/true);
+ case Hexagon::BI__builtin_HEXAGON_L2_loadrh_pci:
+ return MakeCircLd(Intrinsic::hexagon_L2_loadrh_pci, /*HasImm*/true);
+ case Hexagon::BI__builtin_HEXAGON_L2_loadri_pci:
+ return MakeCircLd(Intrinsic::hexagon_L2_loadri_pci, /*HasImm*/true);
+ case Hexagon::BI__builtin_HEXAGON_L2_loadrd_pci:
+ return MakeCircLd(Intrinsic::hexagon_L2_loadrd_pci, /*HasImm*/true);
+ case Hexagon::BI__builtin_HEXAGON_L2_loadrub_pcr:
+ return MakeCircLd(Intrinsic::hexagon_L2_loadrub_pcr, /*HasImm*/false);
+ case Hexagon::BI__builtin_HEXAGON_L2_loadrb_pcr:
+ return MakeCircLd(Intrinsic::hexagon_L2_loadrb_pcr, /*HasImm*/false);
+ case Hexagon::BI__builtin_HEXAGON_L2_loadruh_pcr:
+ return MakeCircLd(Intrinsic::hexagon_L2_loadruh_pcr, /*HasImm*/false);
+ case Hexagon::BI__builtin_HEXAGON_L2_loadrh_pcr:
+ return MakeCircLd(Intrinsic::hexagon_L2_loadrh_pcr, /*HasImm*/false);
+ case Hexagon::BI__builtin_HEXAGON_L2_loadri_pcr:
+ return MakeCircLd(Intrinsic::hexagon_L2_loadri_pcr, /*HasImm*/false);
+ case Hexagon::BI__builtin_HEXAGON_L2_loadrd_pcr:
+ return MakeCircLd(Intrinsic::hexagon_L2_loadrd_pcr, /*HasImm*/false);
+ case Hexagon::BI__builtin_HEXAGON_S2_storerb_pci:
+ return MakeCircSt(Intrinsic::hexagon_S2_storerb_pci, /*HasImm*/true);
+ case Hexagon::BI__builtin_HEXAGON_S2_storerh_pci:
+ return MakeCircSt(Intrinsic::hexagon_S2_storerh_pci, /*HasImm*/true);
+ case Hexagon::BI__builtin_HEXAGON_S2_storerf_pci:
+ return MakeCircSt(Intrinsic::hexagon_S2_storerf_pci, /*HasImm*/true);
+ case Hexagon::BI__builtin_HEXAGON_S2_storeri_pci:
+ return MakeCircSt(Intrinsic::hexagon_S2_storeri_pci, /*HasImm*/true);
+ case Hexagon::BI__builtin_HEXAGON_S2_storerd_pci:
+ return MakeCircSt(Intrinsic::hexagon_S2_storerd_pci, /*HasImm*/true);
+ case Hexagon::BI__builtin_HEXAGON_S2_storerb_pcr:
+ return MakeCircSt(Intrinsic::hexagon_S2_storerb_pcr, /*HasImm*/false);
+ case Hexagon::BI__builtin_HEXAGON_S2_storerh_pcr:
+ return MakeCircSt(Intrinsic::hexagon_S2_storerh_pcr, /*HasImm*/false);
+ case Hexagon::BI__builtin_HEXAGON_S2_storerf_pcr:
+ return MakeCircSt(Intrinsic::hexagon_S2_storerf_pcr, /*HasImm*/false);
+ case Hexagon::BI__builtin_HEXAGON_S2_storeri_pcr:
+ return MakeCircSt(Intrinsic::hexagon_S2_storeri_pcr, /*HasImm*/false);
+ case Hexagon::BI__builtin_HEXAGON_S2_storerd_pcr:
+ return MakeCircSt(Intrinsic::hexagon_S2_storerd_pcr, /*HasImm*/false);
+ case Hexagon::BI__builtin_brev_ldub:
+ return MakeBrevLd(Intrinsic::hexagon_L2_loadrub_pbr, Int8Ty);
+ case Hexagon::BI__builtin_brev_ldb:
+ return MakeBrevLd(Intrinsic::hexagon_L2_loadrb_pbr, Int8Ty);
+ case Hexagon::BI__builtin_brev_lduh:
+ return MakeBrevLd(Intrinsic::hexagon_L2_loadruh_pbr, Int16Ty);
+ case Hexagon::BI__builtin_brev_ldh:
+ return MakeBrevLd(Intrinsic::hexagon_L2_loadrh_pbr, Int16Ty);
+ case Hexagon::BI__builtin_brev_ldw:
+ return MakeBrevLd(Intrinsic::hexagon_L2_loadri_pbr, Int32Ty);
+ case Hexagon::BI__builtin_brev_ldd:
+ return MakeBrevLd(Intrinsic::hexagon_L2_loadrd_pbr, Int64Ty);
+ default:
+ break;
} // switch
return nullptr;
diff --git a/lib/CodeGen/CGCUDANV.cpp b/lib/CodeGen/CGCUDANV.cpp
index d24ef0a8a974..5fcc9e011bcb 100644
--- a/lib/CodeGen/CGCUDANV.cpp
+++ b/lib/CodeGen/CGCUDANV.cpp
@@ -15,17 +15,20 @@
#include "CGCUDARuntime.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
-#include "clang/CodeGen/ConstantInitBuilder.h"
#include "clang/AST/Decl.h"
+#include "clang/CodeGen/ConstantInitBuilder.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DerivedTypes.h"
+#include "llvm/Support/Format.h"
using namespace clang;
using namespace CodeGen;
namespace {
+constexpr unsigned CudaFatMagic = 0x466243b1;
+constexpr unsigned HIPFatMagic = 0x48495046; // "HIPF"
class CGNVCUDARuntime : public CGCUDARuntime {
@@ -41,14 +44,22 @@ private:
/// Keeps track of kernel launch stubs emitted in this module
llvm::SmallVector<llvm::Function *, 16> EmittedKernels;
llvm::SmallVector<std::pair<llvm::GlobalVariable *, unsigned>, 16> DeviceVars;
- /// Keeps track of variables containing handles of GPU binaries. Populated by
+ /// Keeps track of variable containing handle of GPU binary. Populated by
/// ModuleCtorFunction() and used to create corresponding cleanup calls in
/// ModuleDtorFunction()
- llvm::SmallVector<llvm::GlobalVariable *, 16> GpuBinaryHandles;
+ llvm::GlobalVariable *GpuBinaryHandle = nullptr;
+ /// Whether we generate relocatable device code.
+ bool RelocatableDeviceCode;
llvm::Constant *getSetupArgumentFn() const;
llvm::Constant *getLaunchFn() const;
+ llvm::FunctionType *getRegisterGlobalsFnTy() const;
+ llvm::FunctionType *getCallbackFnTy() const;
+ llvm::FunctionType *getRegisterLinkedBinaryFnTy() const;
+ std::string addPrefixToName(StringRef FuncName) const;
+ std::string addUnderscoredPrefixToName(StringRef FuncName) const;
+
/// Creates a function to register all kernel stubs generated in this module.
llvm::Function *makeRegisterGlobalsFn();
@@ -64,14 +75,34 @@ private:
auto ConstStr = CGM.GetAddrOfConstantCString(Str, Name.c_str());
llvm::GlobalVariable *GV =
cast<llvm::GlobalVariable>(ConstStr.getPointer());
- if (!SectionName.empty())
+ if (!SectionName.empty()) {
GV->setSection(SectionName);
+ // Mark the address as used which make sure that this section isn't
+ // merged and we will really have it in the object file.
+ GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::None);
+ }
if (Alignment)
GV->setAlignment(Alignment);
return llvm::ConstantExpr::getGetElementPtr(ConstStr.getElementType(),
ConstStr.getPointer(), Zeros);
- }
+ }
+
+ /// Helper function that generates an empty dummy function returning void.
+ llvm::Function *makeDummyFunction(llvm::FunctionType *FnTy) {
+ assert(FnTy->getReturnType()->isVoidTy() &&
+ "Can only generate dummy functions returning void!");
+ llvm::Function *DummyFunc = llvm::Function::Create(
+ FnTy, llvm::GlobalValue::InternalLinkage, "dummy", &TheModule);
+
+ llvm::BasicBlock *DummyBlock =
+ llvm::BasicBlock::Create(Context, "", DummyFunc);
+ CGBuilderTy FuncBuilder(CGM, Context);
+ FuncBuilder.SetInsertPoint(DummyBlock);
+ FuncBuilder.CreateRetVoid();
+
+ return DummyFunc;
+ }
void emitDeviceStubBody(CodeGenFunction &CGF, FunctionArgList &Args);
@@ -91,9 +122,22 @@ public:
}
+std::string CGNVCUDARuntime::addPrefixToName(StringRef FuncName) const {
+ if (CGM.getLangOpts().HIP)
+ return ((Twine("hip") + Twine(FuncName)).str());
+ return ((Twine("cuda") + Twine(FuncName)).str());
+}
+std::string
+CGNVCUDARuntime::addUnderscoredPrefixToName(StringRef FuncName) const {
+ if (CGM.getLangOpts().HIP)
+ return ((Twine("__hip") + Twine(FuncName)).str());
+ return ((Twine("__cuda") + Twine(FuncName)).str());
+}
+
CGNVCUDARuntime::CGNVCUDARuntime(CodeGenModule &CGM)
: CGCUDARuntime(CGM), Context(CGM.getLLVMContext()),
- TheModule(CGM.getModule()) {
+ TheModule(CGM.getModule()),
+ RelocatableDeviceCode(CGM.getLangOpts().CUDARelocatableDeviceCode) {
CodeGen::CodeGenTypes &Types = CGM.getTypes();
ASTContext &Ctx = CGM.getContext();
@@ -109,15 +153,37 @@ CGNVCUDARuntime::CGNVCUDARuntime(CodeGenModule &CGM)
llvm::Constant *CGNVCUDARuntime::getSetupArgumentFn() const {
// cudaError_t cudaSetupArgument(void *, size_t, size_t)
llvm::Type *Params[] = {VoidPtrTy, SizeTy, SizeTy};
- return CGM.CreateRuntimeFunction(llvm::FunctionType::get(IntTy,
- Params, false),
- "cudaSetupArgument");
+ return CGM.CreateRuntimeFunction(
+ llvm::FunctionType::get(IntTy, Params, false),
+ addPrefixToName("SetupArgument"));
}
llvm::Constant *CGNVCUDARuntime::getLaunchFn() const {
- // cudaError_t cudaLaunch(char *)
- return CGM.CreateRuntimeFunction(
- llvm::FunctionType::get(IntTy, CharPtrTy, false), "cudaLaunch");
+ if (CGM.getLangOpts().HIP) {
+ // hipError_t hipLaunchByPtr(char *);
+ return CGM.CreateRuntimeFunction(
+ llvm::FunctionType::get(IntTy, CharPtrTy, false), "hipLaunchByPtr");
+ } else {
+ // cudaError_t cudaLaunch(char *);
+ return CGM.CreateRuntimeFunction(
+ llvm::FunctionType::get(IntTy, CharPtrTy, false), "cudaLaunch");
+ }
+}
+
+llvm::FunctionType *CGNVCUDARuntime::getRegisterGlobalsFnTy() const {
+ return llvm::FunctionType::get(VoidTy, VoidPtrPtrTy, false);
+}
+
+llvm::FunctionType *CGNVCUDARuntime::getCallbackFnTy() const {
+ return llvm::FunctionType::get(VoidTy, VoidPtrTy, false);
+}
+
+llvm::FunctionType *CGNVCUDARuntime::getRegisterLinkedBinaryFnTy() const {
+ auto CallbackFnTy = getCallbackFnTy();
+ auto RegisterGlobalsFnTy = getRegisterGlobalsFnTy();
+ llvm::Type *Params[] = {RegisterGlobalsFnTy->getPointerTo(), VoidPtrTy,
+ VoidPtrTy, CallbackFnTy->getPointerTo()};
+ return llvm::FunctionType::get(VoidTy, Params, false);
}
void CGNVCUDARuntime::emitDeviceStub(CodeGenFunction &CGF,
@@ -181,8 +247,8 @@ llvm::Function *CGNVCUDARuntime::makeRegisterGlobalsFn() {
return nullptr;
llvm::Function *RegisterKernelsFunc = llvm::Function::Create(
- llvm::FunctionType::get(VoidTy, VoidPtrPtrTy, false),
- llvm::GlobalValue::InternalLinkage, "__cuda_register_globals", &TheModule);
+ getRegisterGlobalsFnTy(), llvm::GlobalValue::InternalLinkage,
+ addUnderscoredPrefixToName("_register_globals"), &TheModule);
llvm::BasicBlock *EntryBB =
llvm::BasicBlock::Create(Context, "entry", RegisterKernelsFunc);
CGBuilderTy Builder(CGM, Context);
@@ -195,7 +261,7 @@ llvm::Function *CGNVCUDARuntime::makeRegisterGlobalsFn() {
VoidPtrTy, VoidPtrTy, VoidPtrTy, VoidPtrTy, IntTy->getPointerTo()};
llvm::Constant *RegisterFunc = CGM.CreateRuntimeFunction(
llvm::FunctionType::get(IntTy, RegisterFuncParams, false),
- "__cudaRegisterFunction");
+ addUnderscoredPrefixToName("RegisterFunction"));
// Extract GpuBinaryHandle passed as the first argument passed to
// __cuda_register_globals() and generate __cudaRegisterFunction() call for
@@ -219,7 +285,7 @@ llvm::Function *CGNVCUDARuntime::makeRegisterGlobalsFn() {
IntTy, IntTy};
llvm::Constant *RegisterVar = CGM.CreateRuntimeFunction(
llvm::FunctionType::get(IntTy, RegisterVarParams, false),
- "__cudaRegisterVar");
+ addUnderscoredPrefixToName("RegisterVar"));
for (auto &Pair : DeviceVars) {
llvm::GlobalVariable *Var = Pair.first;
unsigned Flags = Pair.second;
@@ -243,133 +309,307 @@ llvm::Function *CGNVCUDARuntime::makeRegisterGlobalsFn() {
}
/// Creates a global constructor function for the module:
+///
+/// For CUDA:
/// \code
/// void __cuda_module_ctor(void*) {
-/// Handle0 = __cudaRegisterFatBinary(GpuBinaryBlob0);
-/// __cuda_register_globals(Handle0);
-/// ...
-/// HandleN = __cudaRegisterFatBinary(GpuBinaryBlobN);
-/// __cuda_register_globals(HandleN);
+/// Handle = __cudaRegisterFatBinary(GpuBinaryBlob);
+/// __cuda_register_globals(Handle);
+/// }
+/// \endcode
+///
+/// For HIP:
+/// \code
+/// void __hip_module_ctor(void*) {
+/// if (__hip_gpubin_handle == 0) {
+/// __hip_gpubin_handle = __hipRegisterFatBinary(GpuBinaryBlob);
+/// __hip_register_globals(__hip_gpubin_handle);
+/// }
/// }
/// \endcode
llvm::Function *CGNVCUDARuntime::makeModuleCtorFunction() {
- // No need to generate ctors/dtors if there are no GPU binaries.
- if (CGM.getCodeGenOpts().CudaGpuBinaryFileNames.empty())
+ bool IsHIP = CGM.getLangOpts().HIP;
+ // No need to generate ctors/dtors if there is no GPU binary.
+ StringRef CudaGpuBinaryFileName = CGM.getCodeGenOpts().CudaGpuBinaryFileName;
+ if (CudaGpuBinaryFileName.empty() && !IsHIP)
return nullptr;
- // void __cuda_register_globals(void* handle);
+ // void __{cuda|hip}_register_globals(void* handle);
llvm::Function *RegisterGlobalsFunc = makeRegisterGlobalsFn();
- // void ** __cudaRegisterFatBinary(void *);
+ // We always need a function to pass in as callback. Create a dummy
+ // implementation if we don't need to register anything.
+ if (RelocatableDeviceCode && !RegisterGlobalsFunc)
+ RegisterGlobalsFunc = makeDummyFunction(getRegisterGlobalsFnTy());
+
+ // void ** __{cuda|hip}RegisterFatBinary(void *);
llvm::Constant *RegisterFatbinFunc = CGM.CreateRuntimeFunction(
llvm::FunctionType::get(VoidPtrPtrTy, VoidPtrTy, false),
- "__cudaRegisterFatBinary");
+ addUnderscoredPrefixToName("RegisterFatBinary"));
// struct { int magic, int version, void * gpu_binary, void * dont_care };
llvm::StructType *FatbinWrapperTy =
llvm::StructType::get(IntTy, IntTy, VoidPtrTy, VoidPtrTy);
+ // Register GPU binary with the CUDA runtime, store returned handle in a
+ // global variable and save a reference in GpuBinaryHandle to be cleaned up
+ // in destructor on exit. Then associate all known kernels with the GPU binary
+ // handle so CUDA runtime can figure out what to call on the GPU side.
+ std::unique_ptr<llvm::MemoryBuffer> CudaGpuBinary;
+ if (!IsHIP) {
+ llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> CudaGpuBinaryOrErr =
+ llvm::MemoryBuffer::getFileOrSTDIN(CudaGpuBinaryFileName);
+ if (std::error_code EC = CudaGpuBinaryOrErr.getError()) {
+ CGM.getDiags().Report(diag::err_cannot_open_file)
+ << CudaGpuBinaryFileName << EC.message();
+ return nullptr;
+ }
+ CudaGpuBinary = std::move(CudaGpuBinaryOrErr.get());
+ }
+
llvm::Function *ModuleCtorFunc = llvm::Function::Create(
llvm::FunctionType::get(VoidTy, VoidPtrTy, false),
- llvm::GlobalValue::InternalLinkage, "__cuda_module_ctor", &TheModule);
+ llvm::GlobalValue::InternalLinkage,
+ addUnderscoredPrefixToName("_module_ctor"), &TheModule);
llvm::BasicBlock *CtorEntryBB =
llvm::BasicBlock::Create(Context, "entry", ModuleCtorFunc);
CGBuilderTy CtorBuilder(CGM, Context);
CtorBuilder.SetInsertPoint(CtorEntryBB);
- // For each GPU binary, register it with the CUDA runtime and store returned
- // handle in a global variable and save the handle in GpuBinaryHandles vector
- // to be cleaned up in destructor on exit. Then associate all known kernels
- // with the GPU binary handle so CUDA runtime can figure out what to call on
- // the GPU side.
- for (const std::string &GpuBinaryFileName :
- CGM.getCodeGenOpts().CudaGpuBinaryFileNames) {
- llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> GpuBinaryOrErr =
- llvm::MemoryBuffer::getFileOrSTDIN(GpuBinaryFileName);
- if (std::error_code EC = GpuBinaryOrErr.getError()) {
- CGM.getDiags().Report(diag::err_cannot_open_file) << GpuBinaryFileName
- << EC.message();
- continue;
- }
-
- const char *FatbinConstantName =
- CGM.getTriple().isMacOSX() ? "__NV_CUDA,__nv_fatbin" : ".nv_fatbin";
+ const char *FatbinConstantName;
+ const char *FatbinSectionName;
+ const char *ModuleIDSectionName;
+ StringRef ModuleIDPrefix;
+ llvm::Constant *FatBinStr;
+ unsigned FatMagic;
+ if (IsHIP) {
+ FatbinConstantName = ".hip_fatbin";
+ FatbinSectionName = ".hipFatBinSegment";
+
+ ModuleIDSectionName = "__hip_module_id";
+ ModuleIDPrefix = "__hip_";
+
+ // For HIP, create an external symbol __hip_fatbin in section .hip_fatbin.
+ // The external symbol is supposed to contain the fat binary but will be
+ // populated somewhere else, e.g. by lld through link script.
+ FatBinStr = new llvm::GlobalVariable(
+ CGM.getModule(), CGM.Int8Ty,
+ /*isConstant=*/true, llvm::GlobalValue::ExternalLinkage, nullptr,
+ "__hip_fatbin", nullptr,
+ llvm::GlobalVariable::NotThreadLocal);
+ cast<llvm::GlobalVariable>(FatBinStr)->setSection(FatbinConstantName);
+
+ FatMagic = HIPFatMagic;
+ } else {
+ if (RelocatableDeviceCode)
+ FatbinConstantName = CGM.getTriple().isMacOSX()
+ ? "__NV_CUDA,__nv_relfatbin"
+ : "__nv_relfatbin";
+ else
+ FatbinConstantName =
+ CGM.getTriple().isMacOSX() ? "__NV_CUDA,__nv_fatbin" : ".nv_fatbin";
// NVIDIA's cuobjdump looks for fatbins in this section.
- const char *FatbinSectionName =
+ FatbinSectionName =
CGM.getTriple().isMacOSX() ? "__NV_CUDA,__fatbin" : ".nvFatBinSegment";
- // Create initialized wrapper structure that points to the loaded GPU binary
- ConstantInitBuilder Builder(CGM);
- auto Values = Builder.beginStruct(FatbinWrapperTy);
- // Fatbin wrapper magic.
- Values.addInt(IntTy, 0x466243b1);
- // Fatbin version.
- Values.addInt(IntTy, 1);
- // Data.
- Values.add(makeConstantString(GpuBinaryOrErr.get()->getBuffer(),
- "", FatbinConstantName, 8));
- // Unused in fatbin v1.
- Values.add(llvm::ConstantPointerNull::get(VoidPtrTy));
- llvm::GlobalVariable *FatbinWrapper =
- Values.finishAndCreateGlobal("__cuda_fatbin_wrapper",
- CGM.getPointerAlign(),
- /*constant*/ true);
- FatbinWrapper->setSection(FatbinSectionName);
+ ModuleIDSectionName = CGM.getTriple().isMacOSX()
+ ? "__NV_CUDA,__nv_module_id"
+ : "__nv_module_id";
+ ModuleIDPrefix = "__nv_";
+
+ // For CUDA, create a string literal containing the fat binary loaded from
+ // the given file.
+ FatBinStr = makeConstantString(CudaGpuBinary->getBuffer(), "",
+ FatbinConstantName, 8);
+ FatMagic = CudaFatMagic;
+ }
+ // Create initialized wrapper structure that points to the loaded GPU binary
+ ConstantInitBuilder Builder(CGM);
+ auto Values = Builder.beginStruct(FatbinWrapperTy);
+ // Fatbin wrapper magic.
+ Values.addInt(IntTy, FatMagic);
+ // Fatbin version.
+ Values.addInt(IntTy, 1);
+ // Data.
+ Values.add(FatBinStr);
+ // Unused in fatbin v1.
+ Values.add(llvm::ConstantPointerNull::get(VoidPtrTy));
+ llvm::GlobalVariable *FatbinWrapper = Values.finishAndCreateGlobal(
+ addUnderscoredPrefixToName("_fatbin_wrapper"), CGM.getPointerAlign(),
+ /*constant*/ true);
+ FatbinWrapper->setSection(FatbinSectionName);
+
+ // There is only one HIP fat binary per linked module, however there are
+ // multiple constructor functions. Make sure the fat binary is registered
+ // only once. The constructor functions are executed by the dynamic loader
+ // before the program gains control. The dynamic loader cannot execute the
+ // constructor functions concurrently since doing that would not guarantee
+ // thread safety of the loaded program. Therefore we can assume sequential
+ // execution of constructor functions here.
+ if (IsHIP) {
+ llvm::BasicBlock *IfBlock =
+ llvm::BasicBlock::Create(Context, "if", ModuleCtorFunc);
+ llvm::BasicBlock *ExitBlock =
+ llvm::BasicBlock::Create(Context, "exit", ModuleCtorFunc);
+ // The name, size, and initialization pattern of this variable is part
+ // of HIP ABI.
+ GpuBinaryHandle = new llvm::GlobalVariable(
+ TheModule, VoidPtrPtrTy, /*isConstant=*/false,
+ llvm::GlobalValue::LinkOnceAnyLinkage,
+ /*Initializer=*/llvm::ConstantPointerNull::get(VoidPtrPtrTy),
+ "__hip_gpubin_handle");
+ GpuBinaryHandle->setAlignment(CGM.getPointerAlign().getQuantity());
+ Address GpuBinaryAddr(
+ GpuBinaryHandle,
+ CharUnits::fromQuantity(GpuBinaryHandle->getAlignment()));
+ {
+ auto HandleValue = CtorBuilder.CreateLoad(GpuBinaryAddr);
+ llvm::Constant *Zero =
+ llvm::Constant::getNullValue(HandleValue->getType());
+ llvm::Value *EQZero = CtorBuilder.CreateICmpEQ(HandleValue, Zero);
+ CtorBuilder.CreateCondBr(EQZero, IfBlock, ExitBlock);
+ }
+ {
+ CtorBuilder.SetInsertPoint(IfBlock);
+ // GpuBinaryHandle = __hipRegisterFatBinary(&FatbinWrapper);
+ llvm::CallInst *RegisterFatbinCall = CtorBuilder.CreateCall(
+ RegisterFatbinFunc,
+ CtorBuilder.CreateBitCast(FatbinWrapper, VoidPtrTy));
+ CtorBuilder.CreateStore(RegisterFatbinCall, GpuBinaryAddr);
+ CtorBuilder.CreateBr(ExitBlock);
+ }
+ {
+ CtorBuilder.SetInsertPoint(ExitBlock);
+ // Call __hip_register_globals(GpuBinaryHandle);
+ if (RegisterGlobalsFunc) {
+ auto HandleValue = CtorBuilder.CreateLoad(GpuBinaryAddr);
+ CtorBuilder.CreateCall(RegisterGlobalsFunc, HandleValue);
+ }
+ }
+ } else if (!RelocatableDeviceCode) {
+ // Register binary with CUDA runtime. This is substantially different in
+ // default mode vs. separate compilation!
// GpuBinaryHandle = __cudaRegisterFatBinary(&FatbinWrapper);
llvm::CallInst *RegisterFatbinCall = CtorBuilder.CreateCall(
RegisterFatbinFunc,
CtorBuilder.CreateBitCast(FatbinWrapper, VoidPtrTy));
- llvm::GlobalVariable *GpuBinaryHandle = new llvm::GlobalVariable(
+ GpuBinaryHandle = new llvm::GlobalVariable(
TheModule, VoidPtrPtrTy, false, llvm::GlobalValue::InternalLinkage,
llvm::ConstantPointerNull::get(VoidPtrPtrTy), "__cuda_gpubin_handle");
+ GpuBinaryHandle->setAlignment(CGM.getPointerAlign().getQuantity());
CtorBuilder.CreateAlignedStore(RegisterFatbinCall, GpuBinaryHandle,
CGM.getPointerAlign());
// Call __cuda_register_globals(GpuBinaryHandle);
if (RegisterGlobalsFunc)
CtorBuilder.CreateCall(RegisterGlobalsFunc, RegisterFatbinCall);
+ } else {
+ // Generate a unique module ID.
+ SmallString<64> ModuleID;
+ llvm::raw_svector_ostream OS(ModuleID);
+ OS << ModuleIDPrefix << llvm::format("%x", FatbinWrapper->getGUID());
+ llvm::Constant *ModuleIDConstant =
+ makeConstantString(ModuleID.str(), "", ModuleIDSectionName, 32);
+
+ // Create an alias for the FatbinWrapper that nvcc will look for.
+ llvm::GlobalAlias::create(llvm::GlobalValue::ExternalLinkage,
+ Twine("__fatbinwrap") + ModuleID, FatbinWrapper);
+
+ // void __cudaRegisterLinkedBinary%ModuleID%(void (*)(void *), void *,
+ // void *, void (*)(void **))
+ SmallString<128> RegisterLinkedBinaryName("__cudaRegisterLinkedBinary");
+ RegisterLinkedBinaryName += ModuleID;
+ llvm::Constant *RegisterLinkedBinaryFunc = CGM.CreateRuntimeFunction(
+ getRegisterLinkedBinaryFnTy(), RegisterLinkedBinaryName);
+
+ assert(RegisterGlobalsFunc && "Expecting at least dummy function!");
+ llvm::Value *Args[] = {RegisterGlobalsFunc,
+ CtorBuilder.CreateBitCast(FatbinWrapper, VoidPtrTy),
+ ModuleIDConstant,
+ makeDummyFunction(getCallbackFnTy())};
+ CtorBuilder.CreateCall(RegisterLinkedBinaryFunc, Args);
+ }
- // Save GpuBinaryHandle so we can unregister it in destructor.
- GpuBinaryHandles.push_back(GpuBinaryHandle);
+ // Create destructor and register it with atexit() the way NVCC does it. Doing
+ // it during regular destructor phase worked in CUDA before 9.2 but results in
+ // double-free in 9.2.
+ if (llvm::Function *CleanupFn = makeModuleDtorFunction()) {
+ // extern "C" int atexit(void (*f)(void));
+ llvm::FunctionType *AtExitTy =
+ llvm::FunctionType::get(IntTy, CleanupFn->getType(), false);
+ llvm::Constant *AtExitFunc =
+ CGM.CreateRuntimeFunction(AtExitTy, "atexit", llvm::AttributeList(),
+ /*Local=*/true);
+ CtorBuilder.CreateCall(AtExitFunc, CleanupFn);
}
CtorBuilder.CreateRetVoid();
return ModuleCtorFunc;
}
-/// Creates a global destructor function that unregisters all GPU code blobs
+/// Creates a global destructor function that unregisters the GPU code blob
/// registered by constructor.
+///
+/// For CUDA:
/// \code
/// void __cuda_module_dtor(void*) {
-/// __cudaUnregisterFatBinary(Handle0);
-/// ...
-/// __cudaUnregisterFatBinary(HandleN);
+/// __cudaUnregisterFatBinary(Handle);
+/// }
+/// \endcode
+///
+/// For HIP:
+/// \code
+/// void __hip_module_dtor(void*) {
+/// if (__hip_gpubin_handle) {
+/// __hipUnregisterFatBinary(__hip_gpubin_handle);
+/// __hip_gpubin_handle = 0;
+/// }
/// }
/// \endcode
llvm::Function *CGNVCUDARuntime::makeModuleDtorFunction() {
- // No need for destructor if we don't have handles to unregister.
- if (GpuBinaryHandles.empty())
+ // No need for destructor if we don't have a handle to unregister.
+ if (!GpuBinaryHandle)
return nullptr;
// void __cudaUnregisterFatBinary(void ** handle);
llvm::Constant *UnregisterFatbinFunc = CGM.CreateRuntimeFunction(
llvm::FunctionType::get(VoidTy, VoidPtrPtrTy, false),
- "__cudaUnregisterFatBinary");
+ addUnderscoredPrefixToName("UnregisterFatBinary"));
llvm::Function *ModuleDtorFunc = llvm::Function::Create(
llvm::FunctionType::get(VoidTy, VoidPtrTy, false),
- llvm::GlobalValue::InternalLinkage, "__cuda_module_dtor", &TheModule);
+ llvm::GlobalValue::InternalLinkage,
+ addUnderscoredPrefixToName("_module_dtor"), &TheModule);
+
llvm::BasicBlock *DtorEntryBB =
llvm::BasicBlock::Create(Context, "entry", ModuleDtorFunc);
CGBuilderTy DtorBuilder(CGM, Context);
DtorBuilder.SetInsertPoint(DtorEntryBB);
- for (llvm::GlobalVariable *GpuBinaryHandle : GpuBinaryHandles) {
- auto HandleValue =
- DtorBuilder.CreateAlignedLoad(GpuBinaryHandle, CGM.getPointerAlign());
+ Address GpuBinaryAddr(GpuBinaryHandle, CharUnits::fromQuantity(
+ GpuBinaryHandle->getAlignment()));
+ auto HandleValue = DtorBuilder.CreateLoad(GpuBinaryAddr);
+ // There is only one HIP fat binary per linked module, however there are
+ // multiple destructor functions. Make sure the fat binary is unregistered
+ // only once.
+ if (CGM.getLangOpts().HIP) {
+ llvm::BasicBlock *IfBlock =
+ llvm::BasicBlock::Create(Context, "if", ModuleDtorFunc);
+ llvm::BasicBlock *ExitBlock =
+ llvm::BasicBlock::Create(Context, "exit", ModuleDtorFunc);
+ llvm::Constant *Zero = llvm::Constant::getNullValue(HandleValue->getType());
+ llvm::Value *NEZero = DtorBuilder.CreateICmpNE(HandleValue, Zero);
+ DtorBuilder.CreateCondBr(NEZero, IfBlock, ExitBlock);
+
+ DtorBuilder.SetInsertPoint(IfBlock);
DtorBuilder.CreateCall(UnregisterFatbinFunc, HandleValue);
- }
+ DtorBuilder.CreateStore(Zero, GpuBinaryAddr);
+ DtorBuilder.CreateBr(ExitBlock);
+ DtorBuilder.SetInsertPoint(ExitBlock);
+ } else {
+ DtorBuilder.CreateCall(UnregisterFatbinFunc, HandleValue);
+ }
DtorBuilder.CreateRetVoid();
return ModuleDtorFunc;
}
diff --git a/lib/CodeGen/CGCXX.cpp b/lib/CodeGen/CGCXX.cpp
index 5ef4dc45fba1..475f17b77d92 100644
--- a/lib/CodeGen/CGCXX.cpp
+++ b/lib/CodeGen/CGCXX.cpp
@@ -109,17 +109,8 @@ bool CodeGenModule::TryEmitBaseDestructorAsAlias(const CXXDestructorDecl *D) {
D->getType()->getAs<FunctionType>()->getCallConv())
return true;
- return TryEmitDefinitionAsAlias(GlobalDecl(D, Dtor_Base),
- GlobalDecl(BaseD, Dtor_Base));
-}
-
-/// Try to emit a definition as a global alias for another definition.
-/// If \p InEveryTU is true, we know that an equivalent alias can be produced
-/// in every translation unit.
-bool CodeGenModule::TryEmitDefinitionAsAlias(GlobalDecl AliasDecl,
- GlobalDecl TargetDecl) {
- if (!getCodeGenOpts().CXXCtorDtorAliases)
- return true;
+ GlobalDecl AliasDecl(D, Dtor_Base);
+ GlobalDecl TargetDecl(BaseD, Dtor_Base);
// The alias will use the linkage of the referent. If we can't
// support aliases with that linkage, fail.
@@ -193,6 +184,9 @@ bool CodeGenModule::TryEmitDefinitionAsAlias(GlobalDecl AliasDecl,
auto *Alias = llvm::GlobalAlias::create(AliasValueType, 0, Linkage, "",
Aliasee, &getModule());
+ // Destructors are always unnamed_addr.
+ Alias->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
+
// Switch any previous uses to the alias.
if (Entry) {
assert(Entry->getType() == AliasType &&
@@ -205,7 +199,7 @@ bool CodeGenModule::TryEmitDefinitionAsAlias(GlobalDecl AliasDecl,
}
// Finally, set up the alias with its proper name and attributes.
- setAliasAttributes(cast<NamedDecl>(AliasDecl.getDecl()), Alias);
+ SetCommonAttributes(AliasDecl, Alias);
return false;
}
@@ -227,10 +221,9 @@ llvm::Function *CodeGenModule::codegenCXXStructor(const CXXMethodDecl *MD,
}
setFunctionLinkage(GD, Fn);
- setFunctionDLLStorageClass(GD, Fn);
CodeGenFunction(*this).GenerateCode(GD, Fn, FnInfo);
- setFunctionDefinitionAttributes(MD, Fn);
+ setNonAliasAttributes(GD, Fn);
SetLLVMFunctionAttributesForDefinition(MD, Fn);
return Fn;
}
@@ -243,6 +236,11 @@ llvm::Constant *CodeGenModule::getAddrOfCXXStructor(
if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
GD = GlobalDecl(CD, toCXXCtorType(Type));
} else {
+ // Always alias equivalent complete destructors to base destructors in the
+ // MS ABI.
+ if (getTarget().getCXXABI().isMicrosoft() &&
+ Type == StructorType::Complete && MD->getParent()->getNumVBases() == 0)
+ Type = StructorType::Base;
GD = GlobalDecl(cast<CXXDestructorDecl>(MD), toCXXDtorType(Type));
}
@@ -263,7 +261,6 @@ static CGCallee BuildAppleKextVirtualCall(CodeGenFunction &CGF,
const CXXRecordDecl *RD) {
assert(!CGF.CGM.getTarget().getCXXABI().isMicrosoft() &&
"No kext in Microsoft ABI");
- GD = GD.getCanonicalDecl();
CodeGenModule &CGM = CGF.CGM;
llvm::Value *VTable = CGM.getCXXABI().getAddrOfVTable(RD, CharUnits());
Ty = Ty->getPointerTo()->getPointerTo();
@@ -279,7 +276,7 @@ static CGCallee BuildAppleKextVirtualCall(CodeGenFunction &CGF,
CGF.Builder.CreateConstInBoundsGEP1_64(VTable, VTableIndex, "vfnkxt");
llvm::Value *VFunc =
CGF.Builder.CreateAlignedLoad(VFuncPtr, CGF.PointerAlignInBytes);
- CGCallee Callee(GD.getDecl(), VFunc);
+ CGCallee Callee(GD.getDecl()->getCanonicalDecl(), VFunc);
return Callee;
}
diff --git a/lib/CodeGen/CGCXXABI.cpp b/lib/CodeGen/CGCXXABI.cpp
index a27c3e9d27e3..0611749acf17 100644
--- a/lib/CodeGen/CGCXXABI.cpp
+++ b/lib/CodeGen/CGCXXABI.cpp
@@ -287,6 +287,20 @@ CGCXXABI::EmitCtorCompleteObjectHandler(CodeGenFunction &CGF,
return nullptr;
}
+void CGCXXABI::setCXXDestructorDLLStorage(llvm::GlobalValue *GV,
+ const CXXDestructorDecl *Dtor,
+ CXXDtorType DT) const {
+ // Assume the base C++ ABI has no special rules for destructor variants.
+ CGM.setDLLImportDLLExport(GV, Dtor);
+}
+
+llvm::GlobalValue::LinkageTypes CGCXXABI::getCXXDestructorLinkage(
+ GVALinkage Linkage, const CXXDestructorDecl *Dtor, CXXDtorType DT) const {
+ // Delegate back to CGM by default.
+ return CGM.getLLVMLinkageForDeclarator(Dtor, Linkage,
+ /*isConstantVariable=*/false);
+}
+
bool CGCXXABI::NeedsVTTParameter(GlobalDecl GD) {
return false;
}
diff --git a/lib/CodeGen/CGCXXABI.h b/lib/CodeGen/CGCXXABI.h
index 83426dc3a03c..65b50e14f436 100644
--- a/lib/CodeGen/CGCXXABI.h
+++ b/lib/CodeGen/CGCXXABI.h
@@ -40,7 +40,7 @@ class CodeGenFunction;
class CodeGenModule;
struct CatchTypeInfo;
-/// \brief Implements C++ ABI-specific code generation functions.
+/// Implements C++ ABI-specific code generation functions.
class CGCXXABI {
protected:
CodeGenModule &CGM;
@@ -222,7 +222,7 @@ protected:
/// is required.
llvm::Constant *getMemberPointerAdjustment(const CastExpr *E);
- /// \brief Computes the non-virtual adjustment needed for a member pointer
+ /// Computes the non-virtual adjustment needed for a member pointer
/// conversion along an inheritance path stored in an APValue. Unlike
/// getMemberPointerAdjustment(), the adjustment can be negative if the path
/// is from a derived type to a base type.
@@ -237,7 +237,7 @@ public:
virtual void emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) = 0;
virtual llvm::GlobalVariable *getThrowInfo(QualType T) { return nullptr; }
- /// \brief Determine whether it's possible to emit a vtable for \p RD, even
+ /// Determine whether it's possible to emit a vtable for \p RD, even
/// though we do not know that the vtable has been marked as used by semantic
/// analysis.
virtual bool canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const = 0;
@@ -319,6 +319,14 @@ public:
virtual bool useThunkForDtorVariant(const CXXDestructorDecl *Dtor,
CXXDtorType DT) const = 0;
+ virtual void setCXXDestructorDLLStorage(llvm::GlobalValue *GV,
+ const CXXDestructorDecl *Dtor,
+ CXXDtorType DT) const;
+
+ virtual llvm::GlobalValue::LinkageTypes
+ getCXXDestructorLinkage(GVALinkage Linkage, const CXXDestructorDecl *Dtor,
+ CXXDtorType DT) const;
+
/// Emit destructor variants required by this ABI.
virtual void EmitCXXDestructors(const CXXDestructorDecl *D) = 0;
@@ -414,8 +422,7 @@ public:
/// Build a virtual function pointer in the ABI-specific way.
virtual CGCallee getVirtualFunctionPointer(CodeGenFunction &CGF,
- GlobalDecl GD,
- Address This,
+ GlobalDecl GD, Address This,
llvm::Type *Ty,
SourceLocation Loc) = 0;
@@ -434,6 +441,7 @@ public:
/// base tables.
virtual void emitVirtualInheritanceTables(const CXXRecordDecl *RD) = 0;
+ virtual bool exportThunk() = 0;
virtual void setThunkLinkage(llvm::Function *Thunk, bool ForVTable,
GlobalDecl GD, bool ReturnAdjustment) = 0;
@@ -599,6 +607,17 @@ CGCXXABI *CreateItaniumCXXABI(CodeGenModule &CGM);
/// Creates a Microsoft-family ABI.
CGCXXABI *CreateMicrosoftCXXABI(CodeGenModule &CGM);
+struct CatchRetScope final : EHScopeStack::Cleanup {
+ llvm::CatchPadInst *CPI;
+
+ CatchRetScope(llvm::CatchPadInst *CPI) : CPI(CPI) {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) override {
+ llvm::BasicBlock *BB = CGF.createBasicBlock("catchret.dest");
+ CGF.Builder.CreateCatchRet(CPI, BB);
+ CGF.EmitBlock(BB);
+ }
+};
}
}
diff --git a/lib/CodeGen/CGCall.cpp b/lib/CodeGen/CGCall.cpp
index 38d7344572d3..f066ce168588 100644
--- a/lib/CodeGen/CGCall.cpp
+++ b/lib/CodeGen/CGCall.cpp
@@ -29,15 +29,15 @@
#include "clang/CodeGen/SwiftCallingConv.h"
#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/ADT/StringExtras.h"
+#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/Attributes.h"
-#include "llvm/IR/CallingConv.h"
#include "llvm/IR/CallSite.h"
+#include "llvm/IR/CallingConv.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/InlineAsm.h"
-#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/IntrinsicInst.h"
-#include "llvm/Transforms/Utils/Local.h"
+#include "llvm/IR/Intrinsics.h"
using namespace clang;
using namespace CodeGen;
@@ -255,6 +255,16 @@ CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD,
FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>(), MD);
}
+/// Set calling convention for CUDA/HIP kernel.
+static void setCUDAKernelCallingConvention(CanQualType &FTy, CodeGenModule &CGM,
+ const FunctionDecl *FD) {
+ if (FD->hasAttr<CUDAGlobalAttr>()) {
+ const FunctionType *FT = FTy->getAs<FunctionType>();
+ CGM.getTargetCodeGenInfo().setCUDAKernelCallingConvention(FT);
+ FTy = FT->getCanonicalTypeUnqualified();
+ }
+}
+
/// Arrange the argument and result information for a declaration or
/// definition of the given C++ non-static member function. The
/// member function must be an ordinary function, i.e. not a
@@ -264,7 +274,9 @@ CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) {
assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!");
assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!");
- CanQual<FunctionProtoType> prototype = GetFormalType(MD);
+ CanQualType FT = GetFormalType(MD).getAs<Type>();
+ setCUDAKernelCallingConvention(FT, CGM, MD);
+ auto prototype = FT.getAs<FunctionProtoType>();
if (MD->isInstance()) {
// The abstract case is perfectly fine.
@@ -424,6 +436,7 @@ CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) {
CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified();
assert(isa<FunctionType>(FTy));
+ setCUDAKernelCallingConvention(FTy, CGM, FD);
// When declaring a function without a prototype, always use a
// non-variadic type.
@@ -513,8 +526,8 @@ CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) {
/// correct type, and the caller will bitcast the function to the correct
/// prototype.
const CGFunctionInfo &
-CodeGenTypes::arrangeMSMemberPointerThunk(const CXXMethodDecl *MD) {
- assert(MD->isVirtual() && "only virtual memptrs have thunks");
+CodeGenTypes::arrangeUnprototypedMustTailThunk(const CXXMethodDecl *MD) {
+ assert(MD->isVirtual() && "only methods have thunks");
CanQual<FunctionProtoType> FTP = GetFormalType(MD);
CanQualType ArgTys[] = { GetThisType(Context, MD->getParent()) };
return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false,
@@ -803,6 +816,7 @@ CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC,
FI->NoReturn = info.getNoReturn();
FI->ReturnsRetained = info.getProducesResult();
FI->NoCallerSavedRegs = info.getNoCallerSavedRegs();
+ FI->NoCfCheck = info.getNoCfCheck();
FI->Required = required;
FI->HasRegParm = info.getHasRegParm();
FI->RegParm = info.getRegParm();
@@ -904,8 +918,7 @@ getTypeExpansion(QualType Ty, const ASTContext &Context) {
CharUnits UnionSize = CharUnits::Zero();
for (const auto *FD : RD->fields()) {
- // Skip zero length bitfields.
- if (FD->isBitField() && FD->getBitWidthValue(Context) == 0)
+ if (FD->isZeroLengthBitField(Context))
continue;
assert(!FD->isBitField() &&
"Cannot expand structure with bit-field members.");
@@ -926,8 +939,7 @@ getTypeExpansion(QualType Ty, const ASTContext &Context) {
}
for (const auto *FD : RD->fields()) {
- // Skip zero length bitfields.
- if (FD->isBitField() && FD->getBitWidthValue(Context) == 0)
+ if (FD->isZeroLengthBitField(Context))
continue;
assert(!FD->isBitField() &&
"Cannot expand structure with bit-field members.");
@@ -1040,42 +1052,49 @@ void CodeGenFunction::ExpandTypeFromArgs(
}
void CodeGenFunction::ExpandTypeToArgs(
- QualType Ty, RValue RV, llvm::FunctionType *IRFuncTy,
+ QualType Ty, CallArg Arg, llvm::FunctionType *IRFuncTy,
SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) {
auto Exp = getTypeExpansion(Ty, getContext());
if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
- forConstantArrayExpansion(*this, CAExp, RV.getAggregateAddress(),
- [&](Address EltAddr) {
- RValue EltRV =
- convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation());
- ExpandTypeToArgs(CAExp->EltTy, EltRV, IRFuncTy, IRCallArgs, IRCallArgPos);
- });
+ Address Addr = Arg.hasLValue() ? Arg.getKnownLValue().getAddress()
+ : Arg.getKnownRValue().getAggregateAddress();
+ forConstantArrayExpansion(
+ *this, CAExp, Addr, [&](Address EltAddr) {
+ CallArg EltArg = CallArg(
+ convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation()),
+ CAExp->EltTy);
+ ExpandTypeToArgs(CAExp->EltTy, EltArg, IRFuncTy, IRCallArgs,
+ IRCallArgPos);
+ });
} else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
- Address This = RV.getAggregateAddress();
+ Address This = Arg.hasLValue() ? Arg.getKnownLValue().getAddress()
+ : Arg.getKnownRValue().getAggregateAddress();
for (const CXXBaseSpecifier *BS : RExp->Bases) {
// Perform a single step derived-to-base conversion.
Address Base =
GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
/*NullCheckValue=*/false, SourceLocation());
- RValue BaseRV = RValue::getAggregate(Base);
+ CallArg BaseArg = CallArg(RValue::getAggregate(Base), BS->getType());
// Recurse onto bases.
- ExpandTypeToArgs(BS->getType(), BaseRV, IRFuncTy, IRCallArgs,
+ ExpandTypeToArgs(BS->getType(), BaseArg, IRFuncTy, IRCallArgs,
IRCallArgPos);
}
LValue LV = MakeAddrLValue(This, Ty);
for (auto FD : RExp->Fields) {
- RValue FldRV = EmitRValueForField(LV, FD, SourceLocation());
- ExpandTypeToArgs(FD->getType(), FldRV, IRFuncTy, IRCallArgs,
+ CallArg FldArg =
+ CallArg(EmitRValueForField(LV, FD, SourceLocation()), FD->getType());
+ ExpandTypeToArgs(FD->getType(), FldArg, IRFuncTy, IRCallArgs,
IRCallArgPos);
}
} else if (isa<ComplexExpansion>(Exp.get())) {
- ComplexPairTy CV = RV.getComplexVal();
+ ComplexPairTy CV = Arg.getKnownRValue().getComplexVal();
IRCallArgs[IRCallArgPos++] = CV.first;
IRCallArgs[IRCallArgPos++] = CV.second;
} else {
assert(isa<NoExpansion>(Exp.get()));
+ auto RV = Arg.getKnownRValue();
assert(RV.isScalar() &&
"Unexpected non-scalar rvalue during struct expansion.");
@@ -1479,7 +1498,8 @@ void ClangToLLVMArgMapping::construct(const ASTContext &Context,
/***/
bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) {
- return FI.getReturnInfo().isIndirect();
+ const auto &RI = FI.getReturnInfo();
+ return RI.isIndirect() || (RI.isInAlloca() && RI.getInAllocaSRet());
}
bool CodeGenModule::ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI) {
@@ -1672,7 +1692,7 @@ static void AddAttributesFromFunctionProtoType(ASTContext &Ctx,
return;
if (!isUnresolvedExceptionSpec(FPT->getExceptionSpecType()) &&
- FPT->isNothrow(Ctx))
+ FPT->isNothrow())
FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
}
@@ -1714,12 +1734,19 @@ void CodeGenModule::ConstructDefaultFnAttrList(StringRef Name, bool HasOptnone,
FuncAttrs.addAttribute("less-precise-fpmad",
llvm::toStringRef(CodeGenOpts.LessPreciseFPMAD));
+ if (CodeGenOpts.NullPointerIsValid)
+ FuncAttrs.addAttribute("null-pointer-is-valid", "true");
if (!CodeGenOpts.FPDenormalMode.empty())
FuncAttrs.addAttribute("denormal-fp-math", CodeGenOpts.FPDenormalMode);
FuncAttrs.addAttribute("no-trapping-math",
llvm::toStringRef(CodeGenOpts.NoTrappingMath));
+ // Strict (compliant) code is the default, so only add this attribute to
+ // indicate that we are trying to workaround a problem case.
+ if (!CodeGenOpts.StrictFloatCastOverflow)
+ FuncAttrs.addAttribute("strict-float-cast-overflow", "false");
+
// TODO: Are these all needed?
// unsafe/inf/nan/nsz are handled by instruction-level FastMathFlags.
FuncAttrs.addAttribute("no-infs-fp-math",
@@ -1738,6 +1765,10 @@ void CodeGenModule::ConstructDefaultFnAttrList(StringRef Name, bool HasOptnone,
"correctly-rounded-divide-sqrt-fp-math",
llvm::toStringRef(CodeGenOpts.CorrectlyRoundedDivSqrt));
+ if (getLangOpts().OpenCL)
+ FuncAttrs.addAttribute("denorms-are-zero",
+ llvm::toStringRef(CodeGenOpts.FlushDenorm));
+
// TODO: Reciprocal estimate codegen options should apply to instructions?
const std::vector<std::string> &Recips = CodeGenOpts.Reciprocals;
if (!Recips.empty())
@@ -1769,7 +1800,7 @@ void CodeGenModule::ConstructDefaultFnAttrList(StringRef Name, bool HasOptnone,
FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
// Respect -fcuda-flush-denormals-to-zero.
- if (getLangOpts().CUDADeviceFlushDenormalsToZero)
+ if (CodeGenOpts.FlushDenorm)
FuncAttrs.addAttribute("nvptx-f32ftz", "true");
}
}
@@ -1793,7 +1824,7 @@ void CodeGenModule::ConstructAttributeList(
FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
// If we have information about the function prototype, we can learn
- // attributes form there.
+ // attributes from there.
AddAttributesFromFunctionProtoType(getContext(), FuncAttrs,
CalleeInfo.getCalleeFunctionProtoType());
@@ -1838,18 +1869,20 @@ void CodeGenModule::ConstructAttributeList(
}
if (TargetDecl->hasAttr<RestrictAttr>())
RetAttrs.addAttribute(llvm::Attribute::NoAlias);
- if (TargetDecl->hasAttr<ReturnsNonNullAttr>())
+ if (TargetDecl->hasAttr<ReturnsNonNullAttr>() &&
+ !CodeGenOpts.NullPointerIsValid)
RetAttrs.addAttribute(llvm::Attribute::NonNull);
if (TargetDecl->hasAttr<AnyX86NoCallerSavedRegistersAttr>())
FuncAttrs.addAttribute("no_caller_saved_registers");
+ if (TargetDecl->hasAttr<AnyX86NoCfCheckAttr>())
+ FuncAttrs.addAttribute(llvm::Attribute::NoCfCheck);
HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>();
if (auto *AllocSize = TargetDecl->getAttr<AllocSizeAttr>()) {
Optional<unsigned> NumElemsParam;
- // alloc_size args are base-1, 0 means not present.
- if (unsigned N = AllocSize->getNumElemsParam())
- NumElemsParam = N - 1;
- FuncAttrs.addAllocSizeAttr(AllocSize->getElemSizeParam() - 1,
+ if (AllocSize->getNumElemsParam().isValid())
+ NumElemsParam = AllocSize->getNumElemsParam().getLLVMIndex();
+ FuncAttrs.addAllocSizeAttr(AllocSize->getElemSizeParam().getLLVMIndex(),
NumElemsParam);
}
}
@@ -1870,53 +1903,40 @@ void CodeGenModule::ConstructAttributeList(
}
}
- if (!AttrOnCallSite) {
- bool DisableTailCalls =
- CodeGenOpts.DisableTailCalls ||
- (TargetDecl && (TargetDecl->hasAttr<DisableTailCallsAttr>() ||
- TargetDecl->hasAttr<AnyX86InterruptAttr>()));
- FuncAttrs.addAttribute("disable-tail-calls",
- llvm::toStringRef(DisableTailCalls));
-
- // Add target-cpu and target-features attributes to functions. If
- // we have a decl for the function and it has a target attribute then
- // parse that and add it to the feature set.
- StringRef TargetCPU = getTarget().getTargetOpts().CPU;
- std::vector<std::string> Features;
- const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl);
- if (FD && FD->hasAttr<TargetAttr>()) {
- llvm::StringMap<bool> FeatureMap;
- getFunctionFeatureMap(FeatureMap, FD);
-
- // Produce the canonical string for this set of features.
- for (llvm::StringMap<bool>::const_iterator it = FeatureMap.begin(),
- ie = FeatureMap.end();
- it != ie; ++it)
- Features.push_back((it->second ? "+" : "-") + it->first().str());
-
- // Now add the target-cpu and target-features to the function.
- // While we populated the feature map above, we still need to
- // get and parse the target attribute so we can get the cpu for
- // the function.
- const auto *TD = FD->getAttr<TargetAttr>();
- TargetAttr::ParsedTargetAttr ParsedAttr = TD->parse();
- if (ParsedAttr.Architecture != "" &&
- getTarget().isValidCPUName(ParsedAttr.Architecture))
- TargetCPU = ParsedAttr.Architecture;
+ if (TargetDecl && TargetDecl->hasAttr<OpenCLKernelAttr>()) {
+ if (getLangOpts().OpenCLVersion <= 120) {
+ // OpenCL v1.2 Work groups are always uniform
+ FuncAttrs.addAttribute("uniform-work-group-size", "true");
} else {
- // Otherwise just add the existing target cpu and target features to the
- // function.
- Features = getTarget().getTargetOpts().Features;
+ // OpenCL v2.0 Work groups may be whether uniform or not.
+ // '-cl-uniform-work-group-size' compile option gets a hint
+ // to the compiler that the global work-size be a multiple of
+ // the work-group size specified to clEnqueueNDRangeKernel
+ // (i.e. work groups are uniform).
+ FuncAttrs.addAttribute("uniform-work-group-size",
+ llvm::toStringRef(CodeGenOpts.UniformWGSize));
}
+ }
- if (TargetCPU != "")
- FuncAttrs.addAttribute("target-cpu", TargetCPU);
- if (!Features.empty()) {
- std::sort(Features.begin(), Features.end());
- FuncAttrs.addAttribute(
- "target-features",
- llvm::join(Features, ","));
+ if (!AttrOnCallSite) {
+ bool DisableTailCalls = false;
+
+ if (CodeGenOpts.DisableTailCalls)
+ DisableTailCalls = true;
+ else if (TargetDecl) {
+ if (TargetDecl->hasAttr<DisableTailCallsAttr>() ||
+ TargetDecl->hasAttr<AnyX86InterruptAttr>())
+ DisableTailCalls = true;
+ else if (CodeGenOpts.NoEscapingBlockTailCalls) {
+ if (const auto *BD = dyn_cast<BlockDecl>(TargetDecl))
+ if (!BD->doesNotEscape())
+ DisableTailCalls = true;
+ }
}
+
+ FuncAttrs.addAttribute("disable-tail-calls",
+ llvm::toStringRef(DisableTailCalls));
+ GetCPUAndFeaturesAttributes(TargetDecl, FuncAttrs);
}
ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI);
@@ -1925,9 +1945,9 @@ void CodeGenModule::ConstructAttributeList(
const ABIArgInfo &RetAI = FI.getReturnInfo();
switch (RetAI.getKind()) {
case ABIArgInfo::Extend:
- if (RetTy->hasSignedIntegerRepresentation())
+ if (RetAI.isSignExt())
RetAttrs.addAttribute(llvm::Attribute::SExt);
- else if (RetTy->hasUnsignedIntegerRepresentation())
+ else
RetAttrs.addAttribute(llvm::Attribute::ZExt);
LLVM_FALLTHROUGH;
case ABIArgInfo::Direct:
@@ -1957,7 +1977,8 @@ void CodeGenModule::ConstructAttributeList(
if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
RetAttrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy)
.getQuantity());
- else if (getContext().getTargetAddressSpace(PTy) == 0)
+ else if (getContext().getTargetAddressSpace(PTy) == 0 &&
+ !CodeGenOpts.NullPointerIsValid)
RetAttrs.addAttribute(llvm::Attribute::NonNull);
}
@@ -1967,7 +1988,8 @@ void CodeGenModule::ConstructAttributeList(
// Attach attributes to sret.
if (IRFunctionArgs.hasSRetArg()) {
llvm::AttrBuilder SRETAttrs;
- SRETAttrs.addAttribute(llvm::Attribute::StructRet);
+ if (!RetAI.getSuppressSRet())
+ SRETAttrs.addAttribute(llvm::Attribute::StructRet);
hasUsedSRet = true;
if (RetAI.getInReg())
SRETAttrs.addAttribute(llvm::Attribute::InReg);
@@ -2006,14 +2028,10 @@ void CodeGenModule::ConstructAttributeList(
// sense to do it here because parameters are so messed up.
switch (AI.getKind()) {
case ABIArgInfo::Extend:
- if (ParamType->isSignedIntegerOrEnumerationType())
+ if (AI.isSignExt())
Attrs.addAttribute(llvm::Attribute::SExt);
- else if (ParamType->isUnsignedIntegerOrEnumerationType()) {
- if (getTypes().getABIInfo().shouldSignExtUnsignedType(ParamType))
- Attrs.addAttribute(llvm::Attribute::SExt);
- else
- Attrs.addAttribute(llvm::Attribute::ZExt);
- }
+ else
+ Attrs.addAttribute(llvm::Attribute::ZExt);
LLVM_FALLTHROUGH;
case ABIArgInfo::Direct:
if (ArgNo == 0 && FI.isChainCall())
@@ -2070,7 +2088,8 @@ void CodeGenModule::ConstructAttributeList(
if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
Attrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy)
.getQuantity());
- else if (getContext().getTargetAddressSpace(PTy) == 0)
+ else if (getContext().getTargetAddressSpace(PTy) == 0 &&
+ !CodeGenOpts.NullPointerIsValid)
Attrs.addAttribute(llvm::Attribute::NonNull);
}
@@ -2255,11 +2274,16 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
i != e; ++i, ++info_it, ++ArgNo) {
const VarDecl *Arg = *i;
- QualType Ty = info_it->type;
const ABIArgInfo &ArgI = info_it->info;
bool isPromoted =
isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
+ // We are converting from ABIArgInfo type to VarDecl type directly, unless
+ // the parameter is promoted. In this case we convert to
+ // CGFunctionInfo::ArgInfo type with subsequent argument demotion.
+ QualType Ty = isPromoted ? info_it->type : Arg->getType();
+ assert(hasScalarEvaluationKind(Ty) ==
+ hasScalarEvaluationKind(Arg->getType()));
unsigned FirstIRArg, NumIRArgs;
std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
@@ -2325,7 +2349,8 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) {
if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(),
- PVD->getFunctionScopeIndex()))
+ PVD->getFunctionScopeIndex()) &&
+ !CGM.getCodeGenOpts().NullPointerIsValid)
AI->addAttr(llvm::Attribute::NonNull);
QualType OTy = PVD->getOriginalType();
@@ -2344,7 +2369,8 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
Attrs.addDereferenceableAttr(
getContext().getTypeSizeInChars(ETy).getQuantity()*ArrSize);
AI->addAttrs(Attrs);
- } else if (getContext().getTargetAddressSpace(ETy) == 0) {
+ } else if (getContext().getTargetAddressSpace(ETy) == 0 &&
+ !CGM.getCodeGenOpts().NullPointerIsValid) {
AI->addAttr(llvm::Attribute::NonNull);
}
}
@@ -2354,7 +2380,8 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
// we can't use the dereferenceable attribute, but in addrspace(0)
// we know that it must be nonnull.
if (ArrTy->getSizeModifier() == VariableArrayType::Static &&
- !getContext().getTargetAddressSpace(ArrTy->getElementType()))
+ !getContext().getTargetAddressSpace(ArrTy->getElementType()) &&
+ !CGM.getCodeGenOpts().NullPointerIsValid)
AI->addAttr(llvm::Attribute::NonNull);
}
@@ -3022,7 +3049,8 @@ static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF,
Ty.getQualifiers(),
AggValueSlot::IsNotDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
- AggValueSlot::IsNotAliased);
+ AggValueSlot::IsNotAliased,
+ AggValueSlot::DoesNotOverlap);
}
void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
@@ -3062,6 +3090,19 @@ void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
} else {
args.add(convertTempToRValue(local, type, loc), type);
}
+
+ // Deactivate the cleanup for the callee-destructed param that was pushed.
+ if (hasAggregateEvaluationKind(type) && !CurFuncIsThunk &&
+ type->getAs<RecordType>()->getDecl()->isParamDestroyedInCallee() &&
+ type.isDestructedType()) {
+ EHScopeStack::stable_iterator cleanup =
+ CalleeDestructedParamCleanups.lookup(cast<ParmVarDecl>(param));
+ assert(cleanup.isValid() &&
+ "cleanup for callee-destructed param not recorded");
+ // This unreachable is a temporary marker which will be removed later.
+ llvm::Instruction *isActive = Builder.CreateUnreachable();
+ args.addArgCleanupDeactivation(cleanup, isActive);
+ }
}
static bool isProvablyNull(llvm::Value *addr) {
@@ -3143,7 +3184,6 @@ static void emitWritebacks(CodeGenFunction &CGF,
static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF,
const CallArgList &CallArgs) {
- assert(CGF.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee());
ArrayRef<CallArgList::CallArgCleanup> Cleanups =
CallArgs.getCleanupsToDeactivate();
// Iterate in reverse to increase the likelihood of popping the cleanup.
@@ -3430,13 +3470,17 @@ void CodeGenFunction::EmitCallArgs(
assert(InitialArgSize + 1 == Args.size() &&
"The code below depends on only adding one arg per EmitCallArg");
(void)InitialArgSize;
- RValue RVArg = Args.back().RV;
- EmitNonNullArgCheck(RVArg, ArgTypes[Idx], (*Arg)->getExprLoc(), AC,
- ParamsToSkip + Idx);
- // @llvm.objectsize should never have side-effects and shouldn't need
- // destruction/cleanups, so we can safely "emit" it after its arg,
- // regardless of right-to-leftness
- MaybeEmitImplicitObjectSize(Idx, *Arg, RVArg);
+ // Since pointer argument are never emitted as LValue, it is safe to emit
+ // non-null argument check for r-value only.
+ if (!Args.back().hasLValue()) {
+ RValue RVArg = Args.back().getKnownRValue();
+ EmitNonNullArgCheck(RVArg, ArgTypes[Idx], (*Arg)->getExprLoc(), AC,
+ ParamsToSkip + Idx);
+ // @llvm.objectsize should never have side-effects and shouldn't need
+ // destruction/cleanups, so we can safely "emit" it after its arg,
+ // regardless of right-to-leftness
+ MaybeEmitImplicitObjectSize(Idx, *Arg, RVArg);
+ }
}
if (!LeftToRight) {
@@ -3456,10 +3500,15 @@ struct DestroyUnpassedArg final : EHScopeStack::Cleanup {
QualType Ty;
void Emit(CodeGenFunction &CGF, Flags flags) override {
- const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor();
- assert(!Dtor->isTrivial());
- CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false,
- /*Delegating=*/false, Addr);
+ QualType::DestructionKind DtorKind = Ty.isDestructedType();
+ if (DtorKind == QualType::DK_cxx_destructor) {
+ const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor();
+ assert(!Dtor->isTrivial());
+ CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false,
+ /*Delegating=*/false, Addr);
+ } else {
+ CGF.callCStructDestructor(CGF.MakeAddrLValue(Addr, Ty));
+ }
}
};
@@ -3478,6 +3527,33 @@ struct DisableDebugLocationUpdates {
} // end anonymous namespace
+RValue CallArg::getRValue(CodeGenFunction &CGF) const {
+ if (!HasLV)
+ return RV;
+ LValue Copy = CGF.MakeAddrLValue(CGF.CreateMemTemp(Ty), Ty);
+ CGF.EmitAggregateCopy(Copy, LV, Ty, AggValueSlot::DoesNotOverlap,
+ LV.isVolatile());
+ IsUsed = true;
+ return RValue::getAggregate(Copy.getAddress());
+}
+
+void CallArg::copyInto(CodeGenFunction &CGF, Address Addr) const {
+ LValue Dst = CGF.MakeAddrLValue(Addr, Ty);
+ if (!HasLV && RV.isScalar())
+ CGF.EmitStoreOfScalar(RV.getScalarVal(), Dst, /*init=*/true);
+ else if (!HasLV && RV.isComplex())
+ CGF.EmitStoreOfComplex(RV.getComplexVal(), Dst, /*init=*/true);
+ else {
+ auto Addr = HasLV ? LV.getAddress() : RV.getAggregateAddress();
+ LValue SrcLV = CGF.MakeAddrLValue(Addr, Ty);
+ // We assume that call args are never copied into subobjects.
+ CGF.EmitAggregateCopy(Dst, SrcLV, Ty, AggValueSlot::DoesNotOverlap,
+ HasLV ? LV.isVolatileQualified()
+ : RV.isVolatileQualified());
+ }
+ IsUsed = true;
+}
+
void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
QualType type) {
DisableDebugLocationUpdates Dis(*this, E);
@@ -3501,7 +3577,7 @@ void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
// However, we still have to push an EH-only cleanup in case we unwind before
// we make it to the call.
if (HasAggregateEvalKind &&
- CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
+ type->getAs<RecordType>()->getDecl()->isParamDestroyedInCallee()) {
// If we're using inalloca, use the argument memory. Otherwise, use a
// temporary.
AggValueSlot Slot;
@@ -3510,10 +3586,12 @@ void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
else
Slot = CreateAggTemp(type, "agg.tmp");
- const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
- bool DestroyedInCallee =
- RD && RD->hasNonTrivialDestructor() &&
- CGM.getCXXABI().getRecordArgABI(RD) != CGCXXABI::RAA_Default;
+ bool DestroyedInCallee = true, NeedsEHCleanup = true;
+ if (const auto *RD = type->getAsCXXRecordDecl())
+ DestroyedInCallee = RD->hasNonTrivialDestructor();
+ else
+ NeedsEHCleanup = needsEHCleanup(type.isDestructedType());
+
if (DestroyedInCallee)
Slot.setExternallyDestructed();
@@ -3521,7 +3599,7 @@ void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
RValue RV = Slot.asRValue();
args.add(RV, type);
- if (DestroyedInCallee) {
+ if (DestroyedInCallee && NeedsEHCleanup) {
// Create a no-op GEP between the placeholder and the cleanup so we can
// RAUW it successfully. It also serves as a marker of the first
// instruction where the cleanup is active.
@@ -3538,15 +3616,7 @@ void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
assert(L.isSimple());
- if (L.getAlignment() >= getContext().getTypeAlignInChars(type)) {
- args.add(L.asAggregateRValue(), type, /*NeedsCopy*/true);
- } else {
- // We can't represent a misaligned lvalue in the CallArgList, so copy
- // to an aligned temporary now.
- Address tmp = CreateMemTemp(type);
- EmitAggregateCopy(tmp, L.getAddress(), type, L.isVolatile());
- args.add(RValue::getAggregate(tmp), type);
- }
+ args.addUncopiedAggregate(L, type);
return;
}
@@ -3608,20 +3678,21 @@ CodeGenFunction::EmitRuntimeCall(llvm::Value *callee,
// Calls which may throw must have operand bundles indicating which funclet
// they are nested within.
-static void
-getBundlesForFunclet(llvm::Value *Callee, llvm::Instruction *CurrentFuncletPad,
- SmallVectorImpl<llvm::OperandBundleDef> &BundleList) {
+SmallVector<llvm::OperandBundleDef, 1>
+CodeGenFunction::getBundlesForFunclet(llvm::Value *Callee) {
+ SmallVector<llvm::OperandBundleDef, 1> BundleList;
// There is no need for a funclet operand bundle if we aren't inside a
// funclet.
if (!CurrentFuncletPad)
- return;
+ return BundleList;
// Skip intrinsics which cannot throw.
auto *CalleeFn = dyn_cast<llvm::Function>(Callee->stripPointerCasts());
if (CalleeFn && CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow())
- return;
+ return BundleList;
BundleList.emplace_back("funclet", CurrentFuncletPad);
+ return BundleList;
}
/// Emits a simple call (never an invoke) to the given runtime function.
@@ -3629,10 +3700,8 @@ llvm::CallInst *
CodeGenFunction::EmitRuntimeCall(llvm::Value *callee,
ArrayRef<llvm::Value*> args,
const llvm::Twine &name) {
- SmallVector<llvm::OperandBundleDef, 1> BundleList;
- getBundlesForFunclet(callee, CurrentFuncletPad, BundleList);
-
- llvm::CallInst *call = Builder.CreateCall(callee, args, BundleList, name);
+ llvm::CallInst *call =
+ Builder.CreateCall(callee, args, getBundlesForFunclet(callee), name);
call->setCallingConv(getRuntimeCC());
return call;
}
@@ -3640,8 +3709,8 @@ CodeGenFunction::EmitRuntimeCall(llvm::Value *callee,
/// Emits a call or invoke to the given noreturn runtime function.
void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke(llvm::Value *callee,
ArrayRef<llvm::Value*> args) {
- SmallVector<llvm::OperandBundleDef, 1> BundleList;
- getBundlesForFunclet(callee, CurrentFuncletPad, BundleList);
+ SmallVector<llvm::OperandBundleDef, 1> BundleList =
+ getBundlesForFunclet(callee);
if (getInvokeDest()) {
llvm::InvokeInst *invoke =
@@ -3684,8 +3753,8 @@ CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
ArrayRef<llvm::Value *> Args,
const Twine &Name) {
llvm::BasicBlock *InvokeDest = getInvokeDest();
- SmallVector<llvm::OperandBundleDef, 1> BundleList;
- getBundlesForFunclet(Callee, CurrentFuncletPad, BundleList);
+ SmallVector<llvm::OperandBundleDef, 1> BundleList =
+ getBundlesForFunclet(Callee);
llvm::Instruction *Inst;
if (!InvokeDest)
@@ -3705,16 +3774,6 @@ CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
return llvm::CallSite(Inst);
}
-/// \brief Store a non-aggregate value to an address to initialize it. For
-/// initialization, a non-atomic store will be used.
-static void EmitInitStoreOfNonAggregate(CodeGenFunction &CGF, RValue Src,
- LValue Dst) {
- if (Src.isScalar())
- CGF.EmitStoreOfScalar(Src.getScalarVal(), Dst, /*init=*/true);
- else
- CGF.EmitStoreOfComplex(Src.getComplexVal(), Dst, /*init=*/true);
-}
-
void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old,
llvm::Value *New) {
DeferredReplacements.push_back(std::make_pair(Old, New));
@@ -3728,7 +3787,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
SourceLocation Loc) {
// FIXME: We no longer need the types from CallArgs; lift up and simplify.
- assert(Callee.isOrdinary());
+ assert(Callee.isOrdinary() || Callee.isVirtual());
// Handle struct-return functions by passing a pointer to the
// location that we would like to return into.
@@ -3775,17 +3834,17 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// If the call returns a temporary with struct return, create a temporary
// alloca to hold the result, unless one is given to us.
Address SRetPtr = Address::invalid();
- size_t UnusedReturnSize = 0;
+ Address SRetAlloca = Address::invalid();
+ llvm::Value *UnusedReturnSizePtr = nullptr;
if (RetAI.isIndirect() || RetAI.isInAlloca() || RetAI.isCoerceAndExpand()) {
if (!ReturnValue.isNull()) {
SRetPtr = ReturnValue.getValue();
} else {
- SRetPtr = CreateMemTemp(RetTy);
+ SRetPtr = CreateMemTemp(RetTy, "tmp", &SRetAlloca);
if (HaveInsertPoint() && ReturnValue.isUnused()) {
uint64_t size =
CGM.getDataLayout().getTypeAllocSize(ConvertTypeForMem(RetTy));
- if (EmitLifetimeStart(size, SRetPtr.getPointer()))
- UnusedReturnSize = size;
+ UnusedReturnSizePtr = EmitLifetimeStart(size, SRetAlloca.getPointer());
}
}
if (IRFunctionArgs.hasSRetArg()) {
@@ -3807,7 +3866,6 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
I != E; ++I, ++info_it, ++ArgNo) {
const ABIArgInfo &ArgInfo = info_it->info;
- RValue RV = I->RV;
// Insert a padding argument to ensure proper alignment.
if (IRFunctionArgs.hasPaddingArg(ArgNo))
@@ -3821,13 +3879,16 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
case ABIArgInfo::InAlloca: {
assert(NumIRArgs == 0);
assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
- if (RV.isAggregate()) {
+ if (I->isAggregate()) {
// Replace the placeholder with the appropriate argument slot GEP.
+ Address Addr = I->hasLValue()
+ ? I->getKnownLValue().getAddress()
+ : I->getKnownRValue().getAggregateAddress();
llvm::Instruction *Placeholder =
- cast<llvm::Instruction>(RV.getAggregatePointer());
+ cast<llvm::Instruction>(Addr.getPointer());
CGBuilderTy::InsertPoint IP = Builder.saveIP();
Builder.SetInsertPoint(Placeholder);
- Address Addr = createInAllocaStructGEP(ArgInfo.getInAllocaFieldIndex());
+ Addr = createInAllocaStructGEP(ArgInfo.getInAllocaFieldIndex());
Builder.restoreIP(IP);
deferPlaceholderReplacement(Placeholder, Addr.getPointer());
} else {
@@ -3840,22 +3901,20 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// from {}* to (%struct.foo*)*.
if (Addr.getType() != MemType)
Addr = Builder.CreateBitCast(Addr, MemType);
- LValue argLV = MakeAddrLValue(Addr, I->Ty);
- EmitInitStoreOfNonAggregate(*this, RV, argLV);
+ I->copyInto(*this, Addr);
}
break;
}
case ABIArgInfo::Indirect: {
assert(NumIRArgs == 1);
- if (RV.isScalar() || RV.isComplex()) {
+ if (!I->isAggregate()) {
// Make a temporary alloca to pass the argument.
- Address Addr = CreateMemTemp(I->Ty, ArgInfo.getIndirectAlign(),
- "indirect-arg-temp", false);
+ Address Addr = CreateMemTempWithoutCast(
+ I->Ty, ArgInfo.getIndirectAlign(), "indirect-arg-temp");
IRCallArgs[FirstIRArg] = Addr.getPointer();
- LValue argLV = MakeAddrLValue(Addr, I->Ty);
- EmitInitStoreOfNonAggregate(*this, RV, argLV);
+ I->copyInto(*this, Addr);
} else {
// We want to avoid creating an unnecessary temporary+copy here;
// however, we need one in three cases:
@@ -3863,30 +3922,51 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// source. (This case doesn't occur on any common architecture.)
// 2. If the argument is byval, RV is not sufficiently aligned, and
// we cannot force it to be sufficiently aligned.
- // 3. If the argument is byval, but RV is located in an address space
- // different than that of the argument (0).
- Address Addr = RV.getAggregateAddress();
+ // 3. If the argument is byval, but RV is not located in default
+ // or alloca address space.
+ Address Addr = I->hasLValue()
+ ? I->getKnownLValue().getAddress()
+ : I->getKnownRValue().getAggregateAddress();
+ llvm::Value *V = Addr.getPointer();
CharUnits Align = ArgInfo.getIndirectAlign();
const llvm::DataLayout *TD = &CGM.getDataLayout();
- const unsigned RVAddrSpace = Addr.getType()->getAddressSpace();
- const unsigned ArgAddrSpace =
- (FirstIRArg < IRFuncTy->getNumParams()
- ? IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace()
- : 0);
- if ((!ArgInfo.getIndirectByVal() && I->NeedsCopy) ||
- (ArgInfo.getIndirectByVal() && Addr.getAlignment() < Align &&
- llvm::getOrEnforceKnownAlignment(Addr.getPointer(),
- Align.getQuantity(), *TD)
- < Align.getQuantity()) ||
- (ArgInfo.getIndirectByVal() && (RVAddrSpace != ArgAddrSpace))) {
+
+ assert((FirstIRArg >= IRFuncTy->getNumParams() ||
+ IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() ==
+ TD->getAllocaAddrSpace()) &&
+ "indirect argument must be in alloca address space");
+
+ bool NeedCopy = false;
+
+ if (Addr.getAlignment() < Align &&
+ llvm::getOrEnforceKnownAlignment(V, Align.getQuantity(), *TD) <
+ Align.getQuantity()) {
+ NeedCopy = true;
+ } else if (I->hasLValue()) {
+ auto LV = I->getKnownLValue();
+ auto AS = LV.getAddressSpace();
+ if ((!ArgInfo.getIndirectByVal() &&
+ (LV.getAlignment() >=
+ getContext().getTypeAlignInChars(I->Ty))) ||
+ (ArgInfo.getIndirectByVal() &&
+ ((AS != LangAS::Default && AS != LangAS::opencl_private &&
+ AS != CGM.getASTAllocaAddressSpace())))) {
+ NeedCopy = true;
+ }
+ }
+ if (NeedCopy) {
// Create an aligned temporary, and copy to it.
- Address AI = CreateMemTemp(I->Ty, ArgInfo.getIndirectAlign(),
- "byval-temp", false);
+ Address AI = CreateMemTempWithoutCast(
+ I->Ty, ArgInfo.getIndirectAlign(), "byval-temp");
IRCallArgs[FirstIRArg] = AI.getPointer();
- EmitAggregateCopy(AI, Addr, I->Ty, RV.isVolatileQualified());
+ I->copyInto(*this, AI);
} else {
// Skip the extra memcpy call.
- IRCallArgs[FirstIRArg] = Addr.getPointer();
+ auto *T = V->getType()->getPointerElementType()->getPointerTo(
+ CGM.getDataLayout().getAllocaAddrSpace());
+ IRCallArgs[FirstIRArg] = getTargetHooks().performAddrSpaceCast(
+ *this, V, LangAS::Default, CGM.getASTAllocaAddressSpace(), T,
+ true);
}
}
break;
@@ -3903,10 +3983,12 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
ArgInfo.getDirectOffset() == 0) {
assert(NumIRArgs == 1);
llvm::Value *V;
- if (RV.isScalar())
- V = RV.getScalarVal();
+ if (!I->isAggregate())
+ V = I->getKnownRValue().getScalarVal();
else
- V = Builder.CreateLoad(RV.getAggregateAddress());
+ V = Builder.CreateLoad(
+ I->hasLValue() ? I->getKnownLValue().getAddress()
+ : I->getKnownRValue().getAggregateAddress());
// Implement swifterror by copying into a new swifterror argument.
// We'll write back in the normal path out of the call.
@@ -3944,12 +4026,12 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// FIXME: Avoid the conversion through memory if possible.
Address Src = Address::invalid();
- if (RV.isScalar() || RV.isComplex()) {
+ if (!I->isAggregate()) {
Src = CreateMemTemp(I->Ty, "coerce");
- LValue SrcLV = MakeAddrLValue(Src, I->Ty);
- EmitInitStoreOfNonAggregate(*this, RV, SrcLV);
+ I->copyInto(*this, Src);
} else {
- Src = RV.getAggregateAddress();
+ Src = I->hasLValue() ? I->getKnownLValue().getAddress()
+ : I->getKnownRValue().getAggregateAddress();
}
// If the value is offset in memory, apply the offset now.
@@ -4003,22 +4085,26 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
llvm::Value *tempSize = nullptr;
Address addr = Address::invalid();
- if (RV.isAggregate()) {
- addr = RV.getAggregateAddress();
+ Address AllocaAddr = Address::invalid();
+ if (I->isAggregate()) {
+ addr = I->hasLValue() ? I->getKnownLValue().getAddress()
+ : I->getKnownRValue().getAggregateAddress();
+
} else {
+ RValue RV = I->getKnownRValue();
assert(RV.isScalar()); // complex should always just be direct
llvm::Type *scalarType = RV.getScalarVal()->getType();
auto scalarSize = CGM.getDataLayout().getTypeAllocSize(scalarType);
auto scalarAlign = CGM.getDataLayout().getPrefTypeAlignment(scalarType);
- tempSize = llvm::ConstantInt::get(CGM.Int64Ty, scalarSize);
-
// Materialize to a temporary.
addr = CreateTempAlloca(RV.getScalarVal()->getType(),
- CharUnits::fromQuantity(std::max(layout->getAlignment(),
- scalarAlign)));
- EmitLifetimeStart(scalarSize, addr.getPointer());
+ CharUnits::fromQuantity(std::max(
+ layout->getAlignment(), scalarAlign)),
+ "tmp",
+ /*ArraySize=*/nullptr, &AllocaAddr);
+ tempSize = EmitLifetimeStart(scalarSize, AllocaAddr.getPointer());
Builder.CreateStore(RV.getScalarVal(), addr);
}
@@ -4036,7 +4122,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
assert(IRArgPos == FirstIRArg + NumIRArgs);
if (tempSize) {
- EmitLifetimeEnd(tempSize, addr.getPointer());
+ EmitLifetimeEnd(tempSize, AllocaAddr.getPointer());
}
break;
@@ -4044,13 +4130,14 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
case ABIArgInfo::Expand:
unsigned IRArgPos = FirstIRArg;
- ExpandTypeToArgs(I->Ty, RV, IRFuncTy, IRCallArgs, IRArgPos);
+ ExpandTypeToArgs(I->Ty, *I, IRFuncTy, IRCallArgs, IRArgPos);
assert(IRArgPos == FirstIRArg + NumIRArgs);
break;
}
}
- llvm::Value *CalleePtr = Callee.getFunctionPointer();
+ const CGCallee &ConcreteCallee = Callee.prepareConcreteCallee(*this);
+ llvm::Value *CalleePtr = ConcreteCallee.getFunctionPointer();
// If we're using inalloca, set up that argument.
if (ArgMemory.isValid()) {
@@ -4191,10 +4278,19 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
CannotThrow = Attrs.hasAttribute(llvm::AttributeList::FunctionIndex,
llvm::Attribute::NoUnwind);
}
+
+ // If we made a temporary, be sure to clean up after ourselves. Note that we
+ // can't depend on being inside of an ExprWithCleanups, so we need to manually
+ // pop this cleanup later on. Being eager about this is OK, since this
+ // temporary is 'invisible' outside of the callee.
+ if (UnusedReturnSizePtr)
+ pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, SRetAlloca,
+ UnusedReturnSizePtr);
+
llvm::BasicBlock *InvokeDest = CannotThrow ? nullptr : getInvokeDest();
- SmallVector<llvm::OperandBundleDef, 1> BundleList;
- getBundlesForFunclet(CalleePtr, CurrentFuncletPad, BundleList);
+ SmallVector<llvm::OperandBundleDef, 1> BundleList =
+ getBundlesForFunclet(CalleePtr);
// Emit the actual call/invoke instruction.
llvm::CallSite CS;
@@ -4244,9 +4340,8 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// insertion point; this allows the rest of IRGen to discard
// unreachable code.
if (CS.doesNotReturn()) {
- if (UnusedReturnSize)
- EmitLifetimeEnd(llvm::ConstantInt::get(Int64Ty, UnusedReturnSize),
- SRetPtr.getPointer());
+ if (UnusedReturnSizePtr)
+ PopCleanupBlock();
// Strip away the noreturn attribute to better diagnose unreachable UB.
if (SanOpts.has(SanitizerKind::Unreachable)) {
@@ -4315,9 +4410,8 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
case ABIArgInfo::InAlloca:
case ABIArgInfo::Indirect: {
RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation());
- if (UnusedReturnSize)
- EmitLifetimeEnd(llvm::ConstantInt::get(Int64Ty, UnusedReturnSize),
- SRetPtr.getPointer());
+ if (UnusedReturnSizePtr)
+ PopCleanupBlock();
return ret;
}
@@ -4395,7 +4489,8 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
OffsetValue);
} else if (const auto *AA = TargetDecl->getAttr<AllocAlignAttr>()) {
llvm::Value *ParamVal =
- CallArgs[AA->getParamIndex() - 1].RV.getScalarVal();
+ CallArgs[AA->getParamIndex().getLLVMIndex()].getRValue(
+ *this).getScalarVal();
EmitAlignmentAssumption(Ret.getScalarVal(), ParamVal);
}
}
@@ -4403,6 +4498,17 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
return Ret;
}
+CGCallee CGCallee::prepareConcreteCallee(CodeGenFunction &CGF) const {
+ if (isVirtual()) {
+ const CallExpr *CE = getVirtualCallExpr();
+ return CGF.CGM.getCXXABI().getVirtualFunctionPointer(
+ CGF, getVirtualMethodDecl(), getThisAddress(),
+ getFunctionType(), CE ? CE->getLocStart() : SourceLocation());
+ }
+
+ return *this;
+}
+
/* VarArg handling */
Address CodeGenFunction::EmitVAArg(VAArgExpr *VE, Address &VAListAddr) {
diff --git a/lib/CodeGen/CGCall.h b/lib/CodeGen/CGCall.h
index 7e10407fc31c..8adbe76fa6c3 100644
--- a/lib/CodeGen/CGCall.h
+++ b/lib/CodeGen/CGCall.h
@@ -18,6 +18,7 @@
#include "CGValue.h"
#include "EHScopeStack.h"
#include "clang/AST/CanonicalType.h"
+#include "clang/AST/GlobalDecl.h"
#include "clang/AST/Type.h"
#include "llvm/IR/Value.h"
@@ -42,9 +43,9 @@ namespace CodeGen {
/// Abstract information about a function or function prototype.
class CGCalleeInfo {
- /// \brief The function prototype of the callee.
+ /// The function prototype of the callee.
const FunctionProtoType *CalleeProtoTy;
- /// \brief The function declaration of the callee.
+ /// The function declaration of the callee.
const Decl *CalleeDecl;
public:
@@ -68,8 +69,9 @@ public:
Invalid,
Builtin,
PseudoDestructor,
+ Virtual,
- Last = PseudoDestructor
+ Last = Virtual
};
struct BuiltinInfoStorage {
@@ -79,12 +81,19 @@ public:
struct PseudoDestructorInfoStorage {
const CXXPseudoDestructorExpr *Expr;
};
+ struct VirtualInfoStorage {
+ const CallExpr *CE;
+ GlobalDecl MD;
+ Address Addr;
+ llvm::FunctionType *FTy;
+ };
SpecialKind KindOrFunctionPointer;
union {
CGCalleeInfo AbstractInfo;
BuiltinInfoStorage BuiltinInfo;
PseudoDestructorInfoStorage PseudoDestructorInfo;
+ VirtualInfoStorage VirtualInfo;
};
explicit CGCallee(SpecialKind kind) : KindOrFunctionPointer(kind) {}
@@ -127,6 +136,16 @@ public:
return CGCallee(abstractInfo, functionPtr);
}
+ static CGCallee forVirtual(const CallExpr *CE, GlobalDecl MD, Address Addr,
+ llvm::FunctionType *FTy) {
+ CGCallee result(SpecialKind::Virtual);
+ result.VirtualInfo.CE = CE;
+ result.VirtualInfo.MD = MD;
+ result.VirtualInfo.Addr = Addr;
+ result.VirtualInfo.FTy = FTy;
+ return result;
+ }
+
bool isBuiltin() const {
return KindOrFunctionPointer == SpecialKind::Builtin;
}
@@ -150,7 +169,9 @@ public:
bool isOrdinary() const {
return uintptr_t(KindOrFunctionPointer) > uintptr_t(SpecialKind::Last);
}
- const CGCalleeInfo &getAbstractInfo() const {
+ CGCalleeInfo getAbstractInfo() const {
+ if (isVirtual())
+ return VirtualInfo.MD.getDecl();
assert(isOrdinary());
return AbstractInfo;
}
@@ -158,29 +179,86 @@ public:
assert(isOrdinary());
return reinterpret_cast<llvm::Value*>(uintptr_t(KindOrFunctionPointer));
}
- llvm::FunctionType *getFunctionType() const {
- return cast<llvm::FunctionType>(
- getFunctionPointer()->getType()->getPointerElementType());
- }
void setFunctionPointer(llvm::Value *functionPtr) {
assert(isOrdinary());
KindOrFunctionPointer = SpecialKind(uintptr_t(functionPtr));
}
+
+ bool isVirtual() const {
+ return KindOrFunctionPointer == SpecialKind::Virtual;
+ }
+ const CallExpr *getVirtualCallExpr() const {
+ assert(isVirtual());
+ return VirtualInfo.CE;
+ }
+ GlobalDecl getVirtualMethodDecl() const {
+ assert(isVirtual());
+ return VirtualInfo.MD;
+ }
+ Address getThisAddress() const {
+ assert(isVirtual());
+ return VirtualInfo.Addr;
+ }
+
+ llvm::FunctionType *getFunctionType() const {
+ if (isVirtual())
+ return VirtualInfo.FTy;
+ return cast<llvm::FunctionType>(
+ getFunctionPointer()->getType()->getPointerElementType());
+ }
+
+ /// If this is a delayed callee computation of some sort, prepare
+ /// a concrete callee.
+ CGCallee prepareConcreteCallee(CodeGenFunction &CGF) const;
};
struct CallArg {
- RValue RV;
+ private:
+ union {
+ RValue RV;
+ LValue LV; /// The argument is semantically a load from this l-value.
+ };
+ bool HasLV;
+
+ /// A data-flow flag to make sure getRValue and/or copyInto are not
+ /// called twice for duplicated IR emission.
+ mutable bool IsUsed;
+
+ public:
QualType Ty;
- bool NeedsCopy;
- CallArg(RValue rv, QualType ty, bool needscopy)
- : RV(rv), Ty(ty), NeedsCopy(needscopy)
- { }
+ CallArg(RValue rv, QualType ty)
+ : RV(rv), HasLV(false), IsUsed(false), Ty(ty) {}
+ CallArg(LValue lv, QualType ty)
+ : LV(lv), HasLV(true), IsUsed(false), Ty(ty) {}
+ bool hasLValue() const { return HasLV; }
+ QualType getType() const { return Ty; }
+
+ /// \returns an independent RValue. If the CallArg contains an LValue,
+ /// a temporary copy is returned.
+ RValue getRValue(CodeGenFunction &CGF) const;
+
+ LValue getKnownLValue() const {
+ assert(HasLV && !IsUsed);
+ return LV;
+ }
+ RValue getKnownRValue() const {
+ assert(!HasLV && !IsUsed);
+ return RV;
+ }
+ void setRValue(RValue _RV) {
+ assert(!HasLV);
+ RV = _RV;
+ }
+
+ bool isAggregate() const { return HasLV || RV.isAggregate(); }
+
+ void copyInto(CodeGenFunction &CGF, Address A) const;
};
/// CallArgList - Type for representing both the value and type of
/// arguments in a call.
class CallArgList :
- public SmallVector<CallArg, 16> {
+ public SmallVector<CallArg, 8> {
public:
CallArgList() : StackBase(nullptr) {}
@@ -204,8 +282,10 @@ public:
llvm::Instruction *IsActiveIP;
};
- void add(RValue rvalue, QualType type, bool needscopy = false) {
- push_back(CallArg(rvalue, type, needscopy));
+ void add(RValue rvalue, QualType type) { push_back(CallArg(rvalue, type)); }
+
+ void addUncopiedAggregate(LValue LV, QualType type) {
+ push_back(CallArg(LV, type));
}
/// Add all the arguments from another CallArgList to this one. After doing
@@ -254,7 +334,7 @@ public:
llvm::Instruction *getStackBase() const { return StackBase; }
void freeArgumentMemory(CodeGenFunction &CGF) const;
- /// \brief Returns if we're using an inalloca struct to pass arguments in
+ /// Returns if we're using an inalloca struct to pass arguments in
/// memory.
bool isUsingInAlloca() const { return StackBase; }
diff --git a/lib/CodeGen/CGClass.cpp b/lib/CodeGen/CGClass.cpp
index a6915071ec17..0b9311f7771c 100644
--- a/lib/CodeGen/CGClass.cpp
+++ b/lib/CodeGen/CGClass.cpp
@@ -406,8 +406,8 @@ CodeGenFunction::GetAddressOfDerivedClass(Address BaseAddr,
// Apply the offset.
llvm::Value *Value = Builder.CreateBitCast(BaseAddr.getPointer(), Int8PtrTy);
- Value = Builder.CreateGEP(Value, Builder.CreateNeg(NonVirtualOffset),
- "sub.ptr");
+ Value = Builder.CreateInBoundsGEP(Value, Builder.CreateNeg(NonVirtualOffset),
+ "sub.ptr");
// Just cast.
Value = Builder.CreateBitCast(Value, DerivedPtrTy);
@@ -555,10 +555,12 @@ static void EmitBaseInitializer(CodeGenFunction &CGF,
BaseClassDecl,
isBaseVirtual);
AggValueSlot AggSlot =
- AggValueSlot::forAddr(V, Qualifiers(),
- AggValueSlot::IsDestructed,
- AggValueSlot::DoesNotNeedGCBarriers,
- AggValueSlot::IsNotAliased);
+ AggValueSlot::forAddr(
+ V, Qualifiers(),
+ AggValueSlot::IsDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased,
+ CGF.overlapForBaseInit(ClassDecl, BaseClassDecl, isBaseVirtual));
CGF.EmitAggExpr(BaseInit->getInit(), AggSlot);
@@ -615,7 +617,14 @@ static void EmitMemberInitializer(CodeGenFunction &CGF,
llvm::Value *ThisPtr = CGF.LoadCXXThis();
QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl);
- LValue LHS = CGF.MakeNaturalAlignAddrLValue(ThisPtr, RecordTy);
+ LValue LHS;
+
+ // If a base constructor is being emitted, create an LValue that has the
+ // non-virtual alignment.
+ if (CGF.CurGD.getCtorType() == Ctor_Base)
+ LHS = CGF.MakeNaturalAlignPointeeAddrLValue(ThisPtr, RecordTy);
+ else
+ LHS = CGF.MakeNaturalAlignAddrLValue(ThisPtr, RecordTy);
EmitLValueForAnyFieldInitialization(CGF, MemberInit, LHS);
@@ -640,7 +649,7 @@ static void EmitMemberInitializer(CodeGenFunction &CGF,
LValue Src = CGF.EmitLValueForFieldInitialization(ThisRHSLV, Field);
// Copy the aggregate.
- CGF.EmitAggregateCopy(LHS.getAddress(), Src.getAddress(), FieldType,
+ CGF.EmitAggregateCopy(LHS, Src, FieldType, CGF.overlapForFieldInit(Field),
LHS.isVolatileQualified());
// Ensure that we destroy the objects if an exception is thrown later in
// the constructor.
@@ -671,10 +680,12 @@ void CodeGenFunction::EmitInitializerForField(FieldDecl *Field, LValue LHS,
break;
case TEK_Aggregate: {
AggValueSlot Slot =
- AggValueSlot::forLValue(LHS,
- AggValueSlot::IsDestructed,
- AggValueSlot::DoesNotNeedGCBarriers,
- AggValueSlot::IsNotAliased);
+ AggValueSlot::forLValue(
+ LHS,
+ AggValueSlot::IsDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased,
+ overlapForFieldInit(Field));
EmitAggExpr(Init, Slot);
break;
}
@@ -905,15 +916,15 @@ namespace {
}
CharUnits getMemcpySize(uint64_t FirstByteOffset) const {
+ ASTContext &Ctx = CGF.getContext();
unsigned LastFieldSize =
- LastField->isBitField() ?
- LastField->getBitWidthValue(CGF.getContext()) :
- CGF.getContext().getTypeSize(LastField->getType());
- uint64_t MemcpySizeBits =
- LastFieldOffset + LastFieldSize - FirstByteOffset +
- CGF.getContext().getCharWidth() - 1;
- CharUnits MemcpySize =
- CGF.getContext().toCharUnitsFromBits(MemcpySizeBits);
+ LastField->isBitField()
+ ? LastField->getBitWidthValue(Ctx)
+ : Ctx.toBits(
+ Ctx.getTypeInfoDataSizeInChars(LastField->getType()).first);
+ uint64_t MemcpySizeBits = LastFieldOffset + LastFieldSize -
+ FirstByteOffset + Ctx.getCharWidth() - 1;
+ CharUnits MemcpySize = Ctx.toCharUnitsFromBits(MemcpySizeBits);
return MemcpySize;
}
@@ -1265,7 +1276,7 @@ void CodeGenFunction::EmitCtorPrologue(const CXXConstructorDecl *CD,
if (CGM.getCodeGenOpts().StrictVTablePointers &&
CGM.getCodeGenOpts().OptimizationLevel > 0 &&
isInitializerOfDynamicClass(*B))
- CXXThisValue = Builder.CreateInvariantGroupBarrier(LoadCXXThis());
+ CXXThisValue = Builder.CreateLaunderInvariantGroup(LoadCXXThis());
EmitBaseInitializer(*this, ClassDecl, *B, CtorType);
}
@@ -1282,7 +1293,7 @@ void CodeGenFunction::EmitCtorPrologue(const CXXConstructorDecl *CD,
if (CGM.getCodeGenOpts().StrictVTablePointers &&
CGM.getCodeGenOpts().OptimizationLevel > 0 &&
isInitializerOfDynamicClass(*B))
- CXXThisValue = Builder.CreateInvariantGroupBarrier(LoadCXXThis());
+ CXXThisValue = Builder.CreateLaunderInvariantGroup(LoadCXXThis());
EmitBaseInitializer(*this, ClassDecl, *B, CtorType);
}
@@ -1466,11 +1477,11 @@ void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) {
// Initialize the vtable pointers before entering the body.
if (!CanSkipVTablePointerInitialization(*this, Dtor)) {
- // Insert the llvm.invariant.group.barrier intrinsic before initializing
+ // Insert the llvm.launder.invariant.group intrinsic before initializing
// the vptrs to cancel any previous assumptions we might have made.
if (CGM.getCodeGenOpts().StrictVTablePointers &&
CGM.getCodeGenOpts().OptimizationLevel > 0)
- CXXThisValue = Builder.CreateInvariantGroupBarrier(LoadCXXThis());
+ CXXThisValue = Builder.CreateLaunderInvariantGroup(LoadCXXThis());
InitializeVTablePointers(Dtor->getParent());
}
@@ -1728,7 +1739,7 @@ namespace {
};
} // end anonymous namespace
-/// \brief Emit all code that comes at the end of class's
+/// Emit all code that comes at the end of class's
/// destructor. This is to call destructors on members and base classes
/// in reverse order of their construction.
///
@@ -1954,7 +1965,8 @@ void CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor,
}
EmitCXXConstructorCall(ctor, Ctor_Complete, /*ForVirtualBase=*/false,
- /*Delegating=*/false, curAddr, E);
+ /*Delegating=*/false, curAddr, E,
+ AggValueSlot::DoesNotOverlap);
}
// Go to the next element.
@@ -1989,7 +2001,8 @@ void CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
CXXCtorType Type,
bool ForVirtualBase,
bool Delegating, Address This,
- const CXXConstructExpr *E) {
+ const CXXConstructExpr *E,
+ AggValueSlot::Overlap_t Overlap) {
CallArgList Args;
// Push the this ptr.
@@ -2002,10 +2015,10 @@ void CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
assert(E->getNumArgs() == 1 && "unexpected argcount for trivial ctor");
const Expr *Arg = E->getArg(0);
- QualType SrcTy = Arg->getType();
- Address Src = EmitLValue(Arg).getAddress();
+ LValue Src = EmitLValue(Arg);
QualType DestTy = getContext().getTypeDeclType(D->getParent());
- EmitAggregateCopyCtor(This, Src, DestTy, SrcTy);
+ LValue Dest = MakeAddrLValue(This, DestTy);
+ EmitAggregateCopyCtor(Dest, Src, Overlap);
return;
}
@@ -2017,7 +2030,8 @@ void CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
EmitCallArgs(Args, FPT, E->arguments(), E->getConstructor(),
/*ParamsToSkip*/ 0, Order);
- EmitCXXConstructorCall(D, Type, ForVirtualBase, Delegating, This, Args);
+ EmitCXXConstructorCall(D, Type, ForVirtualBase, Delegating, This, Args,
+ Overlap, E->getExprLoc());
}
static bool canEmitDelegateCallArgs(CodeGenFunction &CGF,
@@ -2049,14 +2063,15 @@ void CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
bool ForVirtualBase,
bool Delegating,
Address This,
- CallArgList &Args) {
+ CallArgList &Args,
+ AggValueSlot::Overlap_t Overlap,
+ SourceLocation Loc) {
const CXXRecordDecl *ClassDecl = D->getParent();
// C++11 [class.mfct.non-static]p2:
// If a non-static member function of a class X is called for an object that
// is not of type X, or of a type derived from X, the behavior is undefined.
- // FIXME: Provide a source location here.
- EmitTypeCheck(CodeGenFunction::TCK_ConstructorCall, SourceLocation(),
+ EmitTypeCheck(CodeGenFunction::TCK_ConstructorCall, Loc,
This.getPointer(), getContext().getRecordType(ClassDecl));
if (D->isTrivial() && D->isDefaultConstructor()) {
@@ -2071,9 +2086,12 @@ void CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
assert(Args.size() == 2 && "unexpected argcount for trivial ctor");
QualType SrcTy = D->getParamDecl(0)->getType().getNonReferenceType();
- Address Src(Args[1].RV.getScalarVal(), getNaturalTypeAlignment(SrcTy));
+ Address Src(Args[1].getRValue(*this).getScalarVal(),
+ getNaturalTypeAlignment(SrcTy));
+ LValue SrcLVal = MakeAddrLValue(Src, SrcTy);
QualType DestTy = getContext().getTypeDeclType(ClassDecl);
- EmitAggregateCopyCtor(This, Src, DestTy, SrcTy);
+ LValue DestLVal = MakeAddrLValue(This, DestTy);
+ EmitAggregateCopyCtor(DestLVal, SrcLVal, Overlap);
return;
}
@@ -2123,8 +2141,7 @@ void CodeGenFunction::EmitInheritedCXXConstructorCall(
const CXXConstructorDecl *D, bool ForVirtualBase, Address This,
bool InheritedFromVBase, const CXXInheritedCtorInitExpr *E) {
CallArgList Args;
- CallArg ThisArg(RValue::get(This.getPointer()), D->getThisType(getContext()),
- /*NeedsCopy=*/false);
+ CallArg ThisArg(RValue::get(This.getPointer()), D->getThisType(getContext()));
// Forward the parameters.
if (InheritedFromVBase &&
@@ -2163,7 +2180,8 @@ void CodeGenFunction::EmitInheritedCXXConstructorCall(
}
EmitCXXConstructorCall(D, Ctor_Base, ForVirtualBase, /*Delegating*/false,
- This, Args);
+ This, Args, AggValueSlot::MayOverlap,
+ E->getLocation());
}
void CodeGenFunction::EmitInlinedInheritingCXXConstructorCall(
@@ -2188,7 +2206,7 @@ void CodeGenFunction::EmitInlinedInheritingCXXConstructorCall(
assert(Args.size() >= Params.size() && "too few arguments for call");
for (unsigned I = 0, N = Args.size(); I != N; ++I) {
if (I < Params.size() && isa<ImplicitParamDecl>(Params[I])) {
- const RValue &RV = Args[I].RV;
+ const RValue &RV = Args[I].getRValue(*this);
assert(!RV.isComplex() && "complex indirect params not supported");
ParamValue Val = RV.isScalar()
? ParamValue::forDirect(RV.getScalarVal())
@@ -2259,7 +2277,8 @@ CodeGenFunction::EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D,
EmitCallArgs(Args, FPT, drop_begin(E->arguments(), 1), E->getConstructor(),
/*ParamsToSkip*/ 1);
- EmitCXXConstructorCall(D, Ctor_Complete, false, false, This, Args);
+ EmitCXXConstructorCall(D, Ctor_Complete, false, false, This, Args,
+ AggValueSlot::MayOverlap, E->getExprLoc());
}
void
@@ -2294,7 +2313,8 @@ CodeGenFunction::EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor,
}
EmitCXXConstructorCall(Ctor, CtorType, /*ForVirtualBase=*/false,
- /*Delegating=*/true, This, DelegateArgs);
+ /*Delegating=*/true, This, DelegateArgs,
+ AggValueSlot::MayOverlap, Loc);
}
namespace {
@@ -2325,7 +2345,8 @@ CodeGenFunction::EmitDelegatingCXXConstructorCall(const CXXConstructorDecl *Ctor
AggValueSlot::forAddr(ThisPtr, Qualifiers(),
AggValueSlot::IsDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
- AggValueSlot::IsNotAliased);
+ AggValueSlot::IsNotAliased,
+ AggValueSlot::MayOverlap);
EmitAggExpr(Ctor->init_begin()[0]->getInit(), AggSlot);
@@ -2667,7 +2688,9 @@ void CodeGenFunction::EmitVTablePtrCheck(const CXXRecordDecl *RD,
SSK = llvm::SanStat_CFI_UnrelatedCast;
break;
case CFITCK_ICall:
- llvm_unreachable("not expecting CFITCK_ICall");
+ case CFITCK_NVMFCall:
+ case CFITCK_VMFCall:
+ llvm_unreachable("unexpected sanitizer kind");
}
std::string TypeName = RD->getQualifiedNameAsString();
diff --git a/lib/CodeGen/CGCleanup.cpp b/lib/CodeGen/CGCleanup.cpp
index 22055b2cb902..cfd230997ed0 100644
--- a/lib/CodeGen/CGCleanup.cpp
+++ b/lib/CodeGen/CGCleanup.cpp
@@ -281,10 +281,10 @@ void EHScopeStack::popNullFixups() {
BranchFixups.pop_back();
}
-void CodeGenFunction::initFullExprCleanup() {
+Address CodeGenFunction::createCleanupActiveFlag() {
// Create a variable to decide whether the cleanup needs to be run.
- Address active = CreateTempAlloca(Builder.getInt1Ty(), CharUnits::One(),
- "cleanup.cond");
+ Address active = CreateTempAllocaWithoutCast(
+ Builder.getInt1Ty(), CharUnits::One(), "cleanup.cond");
// Initialize it to false at a site that's guaranteed to be run
// before each evaluation.
@@ -293,10 +293,14 @@ void CodeGenFunction::initFullExprCleanup() {
// Initialize it to true at the current location.
Builder.CreateStore(Builder.getTrue(), active);
+ return active;
+}
+
+void CodeGenFunction::initFullExprCleanupWithFlag(Address ActiveFlag) {
// Set that as the active flag in the cleanup.
EHCleanupScope &cleanup = cast<EHCleanupScope>(*EHStack.begin());
assert(!cleanup.hasActiveFlag() && "cleanup already has active flag?");
- cleanup.setActiveFlag(active);
+ cleanup.setActiveFlag(ActiveFlag);
if (cleanup.isNormalCleanup()) cleanup.setTestFlagInNormalCleanup();
if (cleanup.isEHCleanup()) cleanup.setTestFlagInEHCleanup();
@@ -494,6 +498,13 @@ void CodeGenFunction::PopCleanupBlocks(
&LifetimeExtendedCleanupStack[I],
Header.getSize());
I += Header.getSize();
+
+ if (Header.isConditional()) {
+ Address ActiveFlag =
+ reinterpret_cast<Address &>(LifetimeExtendedCleanupStack[I]);
+ initFullExprCleanupWithFlag(ActiveFlag);
+ I += sizeof(ActiveFlag);
+ }
}
LifetimeExtendedCleanupStack.resize(OldLifetimeExtendedSize);
}
@@ -624,7 +635,7 @@ static void destroyOptimisticNormalEntry(CodeGenFunction &CGF,
si->eraseFromParent();
// Destroy the load.
- assert(condition->getOperand(0) == CGF.NormalCleanupDest);
+ assert(condition->getOperand(0) == CGF.NormalCleanupDest.getPointer());
assert(condition->use_empty());
condition->eraseFromParent();
}
@@ -833,7 +844,7 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
if (NormalCleanupDestSlot->hasOneUse()) {
NormalCleanupDestSlot->user_back()->eraseFromParent();
NormalCleanupDestSlot->eraseFromParent();
- NormalCleanupDest = nullptr;
+ NormalCleanupDest = Address::invalid();
}
llvm::BasicBlock *BranchAfter = Scope.getBranchAfterBlock(0);
@@ -971,16 +982,21 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
SaveAndRestore<llvm::Instruction *> RestoreCurrentFuncletPad(
CurrentFuncletPad);
llvm::CleanupPadInst *CPI = nullptr;
- if (!EHPersonality::get(*this).usesFuncletPads()) {
- EHStack.pushTerminate();
- PushedTerminate = true;
- } else {
+
+ const EHPersonality &Personality = EHPersonality::get(*this);
+ if (Personality.usesFuncletPads()) {
llvm::Value *ParentPad = CurrentFuncletPad;
if (!ParentPad)
ParentPad = llvm::ConstantTokenNone::get(CGM.getLLVMContext());
CurrentFuncletPad = CPI = Builder.CreateCleanupPad(ParentPad);
}
+ // Non-MSVC personalities need to terminate when an EH cleanup throws.
+ if (!Personality.isMSVCPersonality()) {
+ EHStack.pushTerminate();
+ PushedTerminate = true;
+ }
+
// We only actually emit the cleanup code if the cleanup is either
// active or was used before it was deactivated.
if (EHActiveFlag.isValid() || IsActive) {
@@ -1233,8 +1249,10 @@ void CodeGenFunction::DeactivateCleanupBlock(EHScopeStack::stable_iterator C,
EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(C));
assert(Scope.isActive() && "double deactivation");
- // If it's the top of the stack, just pop it.
- if (C == EHStack.stable_begin()) {
+ // If it's the top of the stack, just pop it, but do so only if it belongs
+ // to the current RunCleanupsScope.
+ if (C == EHStack.stable_begin() &&
+ CurrentCleanupScopeDepth.strictlyEncloses(C)) {
// If it's a normal cleanup, we need to pretend that the
// fallthrough is unreachable.
CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
@@ -1250,10 +1268,10 @@ void CodeGenFunction::DeactivateCleanupBlock(EHScopeStack::stable_iterator C,
}
Address CodeGenFunction::getNormalCleanupDestSlot() {
- if (!NormalCleanupDest)
+ if (!NormalCleanupDest.isValid())
NormalCleanupDest =
- CreateTempAlloca(Builder.getInt32Ty(), "cleanup.dest.slot");
- return Address(NormalCleanupDest, CharUnits::fromQuantity(4));
+ CreateDefaultAlignTempAlloca(Builder.getInt32Ty(), "cleanup.dest.slot");
+ return NormalCleanupDest;
}
/// Emits all the code to cause the given temporary to be cleaned up.
diff --git a/lib/CodeGen/CGCleanup.h b/lib/CodeGen/CGCleanup.h
index 105c5629d50c..93be3e6c1502 100644
--- a/lib/CodeGen/CGCleanup.h
+++ b/lib/CodeGen/CGCleanup.h
@@ -230,7 +230,7 @@ public:
};
/// A cleanup scope which generates the cleanup blocks lazily.
-class LLVM_ALIGNAS(/*alignof(uint64_t)*/ 8) EHCleanupScope : public EHScope {
+class alignas(8) EHCleanupScope : public EHScope {
/// The nearest normal cleanup scope enclosing this one.
EHScopeStack::stable_iterator EnclosingNormal;
@@ -627,16 +627,21 @@ struct EHPersonality {
static const EHPersonality MSVC_except_handler;
static const EHPersonality MSVC_C_specific_handler;
static const EHPersonality MSVC_CxxFrameHandler3;
+ static const EHPersonality GNU_Wasm_CPlusPlus;
/// Does this personality use landingpads or the family of pad instructions
/// designed to form funclets?
- bool usesFuncletPads() const { return isMSVCPersonality(); }
+ bool usesFuncletPads() const {
+ return isMSVCPersonality() || isWasmPersonality();
+ }
bool isMSVCPersonality() const {
return this == &MSVC_except_handler || this == &MSVC_C_specific_handler ||
this == &MSVC_CxxFrameHandler3;
}
+ bool isWasmPersonality() const { return this == &GNU_Wasm_CPlusPlus; }
+
bool isMSVCXXPersonality() const { return this == &MSVC_CxxFrameHandler3; }
};
}
diff --git a/lib/CodeGen/CGCoroutine.cpp b/lib/CodeGen/CGCoroutine.cpp
index 5842e7b3ff93..4f525c8aac85 100644
--- a/lib/CodeGen/CGCoroutine.cpp
+++ b/lib/CodeGen/CGCoroutine.cpp
@@ -44,6 +44,15 @@ struct clang::CodeGen::CGCoroData {
// A branch to this block is emitted when coroutine needs to suspend.
llvm::BasicBlock *SuspendBB = nullptr;
+ // The promise type's 'unhandled_exception' handler, if it defines one.
+ Stmt *ExceptionHandler = nullptr;
+
+ // A temporary i1 alloca that stores whether 'await_resume' threw an
+ // exception. If it did, 'true' is stored in this variable, and the coroutine
+ // body must be skipped. If the promise type does not define an exception
+ // handler, this is null.
+ llvm::Value *ResumeEHVar = nullptr;
+
// Stores the jump destination just before the coroutine memory is freed.
// This is the destination that every suspend point jumps to for the cleanup
// branch.
@@ -121,6 +130,16 @@ static SmallString<32> buildSuspendPrefixStr(CGCoroData &Coro, AwaitKind Kind) {
return Prefix;
}
+static bool memberCallExpressionCanThrow(const Expr *E) {
+ if (const auto *CE = dyn_cast<CXXMemberCallExpr>(E))
+ if (const auto *Proto =
+ CE->getMethodDecl()->getType()->getAs<FunctionProtoType>())
+ if (isNoexceptExceptionSpec(Proto->getExceptionSpecType()) &&
+ Proto->canThrow() == CT_Cannot)
+ return false;
+ return true;
+}
+
// Emit suspend expression which roughly looks like:
//
// auto && x = CommonExpr();
@@ -208,11 +227,36 @@ static LValueOrRValue emitSuspendExpression(CodeGenFunction &CGF, CGCoroData &Co
// Emit await_resume expression.
CGF.EmitBlock(ReadyBlock);
+
+ // Exception handling requires additional IR. If the 'await_resume' function
+ // is marked as 'noexcept', we avoid generating this additional IR.
+ CXXTryStmt *TryStmt = nullptr;
+ if (Coro.ExceptionHandler && Kind == AwaitKind::Init &&
+ memberCallExpressionCanThrow(S.getResumeExpr())) {
+ Coro.ResumeEHVar =
+ CGF.CreateTempAlloca(Builder.getInt1Ty(), Prefix + Twine("resume.eh"));
+ Builder.CreateFlagStore(true, Coro.ResumeEHVar);
+
+ auto Loc = S.getResumeExpr()->getExprLoc();
+ auto *Catch = new (CGF.getContext())
+ CXXCatchStmt(Loc, /*exDecl=*/nullptr, Coro.ExceptionHandler);
+ auto *TryBody =
+ CompoundStmt::Create(CGF.getContext(), S.getResumeExpr(), Loc, Loc);
+ TryStmt = CXXTryStmt::Create(CGF.getContext(), Loc, TryBody, Catch);
+ CGF.EnterCXXTryStmt(*TryStmt);
+ }
+
LValueOrRValue Res;
if (forLValue)
Res.LV = CGF.EmitLValue(S.getResumeExpr());
else
Res.RV = CGF.EmitAnyExpr(S.getResumeExpr(), aggSlot, ignoreResult);
+
+ if (TryStmt) {
+ Builder.CreateFlagStore(false, Coro.ResumeEHVar);
+ CGF.ExitCXXTryStmt(*TryStmt);
+ }
+
return Res;
}
@@ -315,7 +359,7 @@ namespace {
GetParamRef Visitor;
Visitor.Visit(const_cast<Expr*>(InitExpr));
assert(Visitor.Expr);
- auto *DREOrig = cast<DeclRefExpr>(Visitor.Expr);
+ DeclRefExpr *DREOrig = Visitor.Expr;
auto *PD = DREOrig->getDecl();
auto it = LocalDeclMap.find(PD);
@@ -588,19 +632,40 @@ void CodeGenFunction::EmitCoroutineBody(const CoroutineBodyStmt &S) {
EHStack.pushCleanup<CallCoroEnd>(EHCleanup);
CurCoro.Data->CurrentAwaitKind = AwaitKind::Init;
+ CurCoro.Data->ExceptionHandler = S.getExceptionHandler();
EmitStmt(S.getInitSuspendStmt());
CurCoro.Data->FinalJD = getJumpDestInCurrentScope(FinalBB);
CurCoro.Data->CurrentAwaitKind = AwaitKind::Normal;
- if (auto *OnException = S.getExceptionHandler()) {
+ if (CurCoro.Data->ExceptionHandler) {
+ // If we generated IR to record whether an exception was thrown from
+ // 'await_resume', then use that IR to determine whether the coroutine
+ // body should be skipped.
+ // If we didn't generate the IR (perhaps because 'await_resume' was marked
+ // as 'noexcept'), then we skip this check.
+ BasicBlock *ContBB = nullptr;
+ if (CurCoro.Data->ResumeEHVar) {
+ BasicBlock *BodyBB = createBasicBlock("coro.resumed.body");
+ ContBB = createBasicBlock("coro.resumed.cont");
+ Value *SkipBody = Builder.CreateFlagLoad(CurCoro.Data->ResumeEHVar,
+ "coro.resumed.eh");
+ Builder.CreateCondBr(SkipBody, ContBB, BodyBB);
+ EmitBlock(BodyBB);
+ }
+
auto Loc = S.getLocStart();
- CXXCatchStmt Catch(Loc, /*exDecl=*/nullptr, OnException);
- auto *TryStmt = CXXTryStmt::Create(getContext(), Loc, S.getBody(), &Catch);
+ CXXCatchStmt Catch(Loc, /*exDecl=*/nullptr,
+ CurCoro.Data->ExceptionHandler);
+ auto *TryStmt =
+ CXXTryStmt::Create(getContext(), Loc, S.getBody(), &Catch);
EnterCXXTryStmt(*TryStmt);
emitBodyAndFallthrough(*this, S, TryStmt->getTryBlock());
ExitCXXTryStmt(*TryStmt);
+
+ if (ContBB)
+ EmitBlock(ContBB);
}
else {
emitBodyAndFallthrough(*this, S, S.getBody());
diff --git a/lib/CodeGen/CGDebugInfo.cpp b/lib/CodeGen/CGDebugInfo.cpp
index aeed4d658a4e..097a1e043047 100644
--- a/lib/CodeGen/CGDebugInfo.cpp
+++ b/lib/CodeGen/CGDebugInfo.cpp
@@ -289,8 +289,7 @@ StringRef CGDebugInfo::getObjCMethodName(const ObjCMethodDecl *OMD) {
<< OC->getIdentifier()->getNameStart() << ')';
}
} else if (const auto *OCD = dyn_cast<ObjCCategoryImplDecl>(DC)) {
- OS << OCD->getClassInterface()->getName() << '('
- << OCD->getName() << ')';
+ OS << OCD->getClassInterface()->getName() << '(' << OCD->getName() << ')';
} else if (isa<ObjCProtocolDecl>(DC)) {
// We can extract the type of the class from the self pointer.
if (ImplicitParamDecl *SelfDecl = OMD->getSelfDecl()) {
@@ -361,18 +360,19 @@ StringRef CGDebugInfo::getClassName(const RecordDecl *RD) {
return StringRef();
}
-llvm::DIFile::ChecksumKind
+Optional<llvm::DIFile::ChecksumKind>
CGDebugInfo::computeChecksum(FileID FID, SmallString<32> &Checksum) const {
Checksum.clear();
- if (!CGM.getCodeGenOpts().EmitCodeView)
- return llvm::DIFile::CSK_None;
+ if (!CGM.getCodeGenOpts().EmitCodeView &&
+ CGM.getCodeGenOpts().DwarfVersion < 5)
+ return None;
SourceManager &SM = CGM.getContext().getSourceManager();
bool Invalid;
llvm::MemoryBuffer *MemBuffer = SM.getBuffer(FID, &Invalid);
if (Invalid)
- return llvm::DIFile::CSK_None;
+ return None;
llvm::MD5 Hash;
llvm::MD5::MD5Result Result;
@@ -384,51 +384,62 @@ CGDebugInfo::computeChecksum(FileID FID, SmallString<32> &Checksum) const {
return llvm::DIFile::CSK_MD5;
}
+Optional<StringRef> CGDebugInfo::getSource(const SourceManager &SM,
+ FileID FID) {
+ if (!CGM.getCodeGenOpts().EmbedSource)
+ return None;
+
+ bool SourceInvalid = false;
+ StringRef Source = SM.getBufferData(FID, &SourceInvalid);
+
+ if (SourceInvalid)
+ return None;
+
+ return Source;
+}
+
llvm::DIFile *CGDebugInfo::getOrCreateFile(SourceLocation Loc) {
if (!Loc.isValid())
// If Location is not valid then use main input file.
- return DBuilder.createFile(remapDIPath(TheCU->getFilename()),
- remapDIPath(TheCU->getDirectory()),
- TheCU->getFile()->getChecksumKind(),
- TheCU->getFile()->getChecksum());
+ return getOrCreateMainFile();
SourceManager &SM = CGM.getContext().getSourceManager();
PresumedLoc PLoc = SM.getPresumedLoc(Loc);
if (PLoc.isInvalid() || StringRef(PLoc.getFilename()).empty())
// If the location is not valid then use main input file.
- return DBuilder.createFile(remapDIPath(TheCU->getFilename()),
- remapDIPath(TheCU->getDirectory()),
- TheCU->getFile()->getChecksumKind(),
- TheCU->getFile()->getChecksum());
+ return getOrCreateMainFile();
// Cache the results.
const char *fname = PLoc.getFilename();
- auto it = DIFileCache.find(fname);
+ auto It = DIFileCache.find(fname);
- if (it != DIFileCache.end()) {
+ if (It != DIFileCache.end()) {
// Verify that the information still exists.
- if (llvm::Metadata *V = it->second)
+ if (llvm::Metadata *V = It->second)
return cast<llvm::DIFile>(V);
}
SmallString<32> Checksum;
- llvm::DIFile::ChecksumKind CSKind =
+ Optional<llvm::DIFile::ChecksumKind> CSKind =
computeChecksum(SM.getFileID(Loc), Checksum);
+ Optional<llvm::DIFile::ChecksumInfo<StringRef>> CSInfo;
+ if (CSKind)
+ CSInfo.emplace(*CSKind, Checksum);
- llvm::DIFile *F = DBuilder.createFile(remapDIPath(PLoc.getFilename()),
- remapDIPath(getCurrentDirname()),
- CSKind, Checksum);
+ llvm::DIFile *F = DBuilder.createFile(
+ remapDIPath(PLoc.getFilename()), remapDIPath(getCurrentDirname()), CSInfo,
+ getSource(SM, SM.getFileID(Loc)));
DIFileCache[fname].reset(F);
return F;
}
llvm::DIFile *CGDebugInfo::getOrCreateMainFile() {
- return DBuilder.createFile(remapDIPath(TheCU->getFilename()),
- remapDIPath(TheCU->getDirectory()),
- TheCU->getFile()->getChecksumKind(),
- TheCU->getFile()->getChecksum());
+ return DBuilder.createFile(
+ remapDIPath(TheCU->getFilename()), remapDIPath(TheCU->getDirectory()),
+ TheCU->getFile()->getChecksum(),
+ CGM.getCodeGenOpts().EmbedSource ? TheCU->getSource() : None);
}
std::string CGDebugInfo::remapDIPath(StringRef Path) const {
@@ -472,7 +483,8 @@ StringRef CGDebugInfo::getCurrentDirname() {
void CGDebugInfo::CreateCompileUnit() {
SmallString<32> Checksum;
- llvm::DIFile::ChecksumKind CSKind = llvm::DIFile::CSK_None;
+ Optional<llvm::DIFile::ChecksumKind> CSKind;
+ Optional<llvm::DIFile::ChecksumInfo<StringRef>> CSInfo;
// Should we be asking the SourceManager for the main file name, instead of
// accepting it as an argument? This just causes the main file name to
@@ -551,14 +563,19 @@ void CGDebugInfo::CreateCompileUnit() {
break;
}
+ if (CSKind)
+ CSInfo.emplace(*CSKind, Checksum);
+
// Create new compile unit.
// FIXME - Eliminate TheCU.
auto &CGOpts = CGM.getCodeGenOpts();
TheCU = DBuilder.createCompileUnit(
LangTag,
DBuilder.createFile(remapDIPath(MainFileName),
- remapDIPath(getCurrentDirname()), CSKind, Checksum),
- Producer, LO.Optimize || CGOpts.PrepareForLTO || CGOpts.EmitSummaryIndex,
+ remapDIPath(getCurrentDirname()), CSInfo,
+ getSource(SM, SM.getMainFileID())),
+ CGOpts.EmitVersionIdentMetadata ? Producer : "",
+ LO.Optimize || CGOpts.PrepareForLTO || CGOpts.PrepareForThinLTO,
CGOpts.DwarfDebugFlags, RuntimeVers,
CGOpts.EnableSplitDwarf ? "" : CGOpts.SplitDwarfFile, EmissionKind,
0 /* DWOid */, CGOpts.SplitDwarfInlining, CGOpts.DebugInfoForProfiling,
@@ -620,14 +637,13 @@ llvm::DIType *CGDebugInfo::CreateType(const BuiltinType *BT) {
return SelTy;
}
-#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
- case BuiltinType::Id: \
- return getOrCreateStructPtrType("opencl_" #ImgType "_" #Suffix "_t", \
+#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
+ case BuiltinType::Id: \
+ return getOrCreateStructPtrType("opencl_" #ImgType "_" #Suffix "_t", \
SingletonId);
#include "clang/Basic/OpenCLImageTypes.def"
case BuiltinType::OCLSampler:
- return getOrCreateStructPtrType("opencl_sampler_t",
- OCLSamplerDITy);
+ return getOrCreateStructPtrType("opencl_sampler_t", OCLSamplerDITy);
case BuiltinType::OCLEvent:
return getOrCreateStructPtrType("opencl_event_t", OCLEventDITy);
case BuiltinType::OCLClkEvent:
@@ -645,6 +661,7 @@ llvm::DIType *CGDebugInfo::CreateType(const BuiltinType *BT) {
case BuiltinType::SChar:
Encoding = llvm::dwarf::DW_ATE_signed_char;
break;
+ case BuiltinType::Char8:
case BuiltinType::Char16:
case BuiltinType::Char32:
Encoding = llvm::dwarf::DW_ATE_UTF;
@@ -681,6 +698,34 @@ llvm::DIType *CGDebugInfo::CreateType(const BuiltinType *BT) {
// floating point types of the same size.
Encoding = llvm::dwarf::DW_ATE_float;
break;
+ case BuiltinType::ShortAccum:
+ case BuiltinType::Accum:
+ case BuiltinType::LongAccum:
+ case BuiltinType::ShortFract:
+ case BuiltinType::Fract:
+ case BuiltinType::LongFract:
+ case BuiltinType::SatShortFract:
+ case BuiltinType::SatFract:
+ case BuiltinType::SatLongFract:
+ case BuiltinType::SatShortAccum:
+ case BuiltinType::SatAccum:
+ case BuiltinType::SatLongAccum:
+ Encoding = llvm::dwarf::DW_ATE_signed_fixed;
+ break;
+ case BuiltinType::UShortAccum:
+ case BuiltinType::UAccum:
+ case BuiltinType::ULongAccum:
+ case BuiltinType::UShortFract:
+ case BuiltinType::UFract:
+ case BuiltinType::ULongFract:
+ case BuiltinType::SatUShortAccum:
+ case BuiltinType::SatUAccum:
+ case BuiltinType::SatULongAccum:
+ case BuiltinType::SatUShortFract:
+ case BuiltinType::SatUFract:
+ case BuiltinType::SatULongFract:
+ Encoding = llvm::dwarf::DW_ATE_unsigned_fixed;
+ break;
}
switch (BT->getKind()) {
@@ -780,27 +825,49 @@ static bool hasCXXMangling(const TagDecl *TD, llvm::DICompileUnit *TheCU) {
}
}
-/// In C++ mode, types have linkage, so we can rely on the ODR and
-/// on their mangled names, if they're external.
-static SmallString<256> getUniqueTagTypeName(const TagType *Ty,
- CodeGenModule &CGM,
- llvm::DICompileUnit *TheCU) {
- SmallString<256> FullName;
+// Determines if the tag declaration will require a type identifier.
+static bool needsTypeIdentifier(const TagDecl *TD, CodeGenModule &CGM,
+ llvm::DICompileUnit *TheCU) {
+ // We only add a type identifier for types with C++ name mangling.
+ if (!hasCXXMangling(TD, TheCU))
+ return false;
+
+ // CodeView types with C++ mangling need a type identifier.
+ if (CGM.getCodeGenOpts().EmitCodeView)
+ return true;
+
+ // Externally visible types with C++ mangling need a type identifier.
+ if (TD->isExternallyVisible())
+ return true;
+
+ return false;
+}
+
+// When emitting CodeView debug information we need to produce a type
+// identifier for all types which have a C++ mangling. Until a GUID is added
+// to the identifier (not currently implemented) the result will not be unique
+// across compilation units.
+// When emitting DWARF debug information, we need to produce a type identifier
+// for all externally visible types with C++ name mangling. This identifier
+// should be unique across ODR-compliant compilation units.
+static SmallString<256> getTypeIdentifier(const TagType *Ty, CodeGenModule &CGM,
+ llvm::DICompileUnit *TheCU) {
+ SmallString<256> Identifier;
const TagDecl *TD = Ty->getDecl();
- if (!hasCXXMangling(TD, TheCU) || !TD->isExternallyVisible())
- return FullName;
+ if (!needsTypeIdentifier(TD, CGM, TheCU))
+ return Identifier;
// TODO: This is using the RTTI name. Is there a better way to get
// a unique string for a type?
- llvm::raw_svector_ostream Out(FullName);
+ llvm::raw_svector_ostream Out(Identifier);
CGM.getCXXABI().getMangleContext().mangleCXXRTTIName(QualType(Ty, 0), Out);
- return FullName;
+ return Identifier;
}
-/// \return the approproate DWARF tag for a composite type.
+/// \return the appropriate DWARF tag for a composite type.
static llvm::dwarf::Tag getTagForRecord(const RecordDecl *RD) {
- llvm::dwarf::Tag Tag;
+ llvm::dwarf::Tag Tag;
if (RD->isStruct() || RD->isInterface())
Tag = llvm::dwarf::DW_TAG_structure_type;
else if (RD->isUnion())
@@ -828,10 +895,10 @@ CGDebugInfo::getOrCreateRecordFwdDecl(const RecordType *Ty,
uint32_t Align = 0;
// Create the type.
- SmallString<256> FullName = getUniqueTagTypeName(Ty, CGM, TheCU);
+ SmallString<256> Identifier = getTypeIdentifier(Ty, CGM, TheCU);
llvm::DICompositeType *RetTy = DBuilder.createReplaceableCompositeType(
getTagForRecord(RD), RDName, Ctx, DefUnit, Line, 0, Size, Align,
- llvm::DINode::FlagFwdDecl, FullName);
+ llvm::DINode::FlagFwdDecl, Identifier);
if (CGM.getCodeGenOpts().DebugFwdTemplateParams)
if (auto *TSpecial = dyn_cast<ClassTemplateSpecializationDecl>(RD))
DBuilder.replaceArrays(RetTy, llvm::DINodeArray(),
@@ -926,9 +993,8 @@ llvm::DIType *CGDebugInfo::CreateType(const BlockPointerType *Ty,
// DW_AT_APPLE_BLOCK attribute and are an implementation detail only
// the debugger needs to know about. To allow type uniquing, emit
// them without a name or a location.
- EltTy =
- DBuilder.createStructType(Unit, "", nullptr, LineNo,
- FieldOffset, 0, Flags, nullptr, Elements);
+ EltTy = DBuilder.createStructType(Unit, "", nullptr, LineNo, FieldOffset, 0,
+ Flags, nullptr, Elements);
return DBuilder.createPointerType(EltTy, Size);
}
@@ -943,8 +1009,9 @@ llvm::DIType *CGDebugInfo::CreateType(const TemplateSpecializationType *Ty,
Ty->getTemplateName().print(OS, getPrintingPolicy(), /*qualified*/ false);
printTemplateArgumentList(OS, Ty->template_arguments(), getPrintingPolicy());
- auto *AliasDecl = cast<TypeAliasTemplateDecl>(
- Ty->getTemplateName().getAsTemplateDecl())->getTemplatedDecl();
+ auto *AliasDecl =
+ cast<TypeAliasTemplateDecl>(Ty->getTemplateName().getAsTemplateDecl())
+ ->getTemplatedDecl();
SourceLocation Loc = AliasDecl->getLocation();
return DBuilder.createTypedef(Src, OS.str(), getOrCreateFile(Loc),
@@ -981,20 +1048,28 @@ static unsigned getDwarfCC(CallingConv CC) {
return llvm::dwarf::DW_CC_LLVM_vectorcall;
case CC_X86Pascal:
return llvm::dwarf::DW_CC_BORLAND_pascal;
-
- // FIXME: Create new DW_CC_ codes for these calling conventions.
case CC_Win64:
+ return llvm::dwarf::DW_CC_LLVM_Win64;
case CC_X86_64SysV:
+ return llvm::dwarf::DW_CC_LLVM_X86_64SysV;
case CC_AAPCS:
+ return llvm::dwarf::DW_CC_LLVM_AAPCS;
case CC_AAPCS_VFP:
+ return llvm::dwarf::DW_CC_LLVM_AAPCS_VFP;
case CC_IntelOclBicc:
+ return llvm::dwarf::DW_CC_LLVM_IntelOclBicc;
case CC_SpirFunction:
+ return llvm::dwarf::DW_CC_LLVM_SpirFunction;
case CC_OpenCLKernel:
+ return llvm::dwarf::DW_CC_LLVM_OpenCLKernel;
case CC_Swift:
+ return llvm::dwarf::DW_CC_LLVM_Swift;
case CC_PreserveMost:
+ return llvm::dwarf::DW_CC_LLVM_PreserveMost;
case CC_PreserveAll:
+ return llvm::dwarf::DW_CC_LLVM_PreserveAll;
case CC_X86RegCall:
- return 0;
+ return llvm::dwarf::DW_CC_LLVM_X86RegCall;
}
return 0;
}
@@ -1102,8 +1177,8 @@ CGDebugInfo::createFieldType(StringRef name, QualType type, SourceLocation loc,
}
llvm::DINode::DIFlags flags = getAccessFlag(AS, RD);
- return DBuilder.createMemberType(scope, name, file, line, SizeInBits,
- Align, offsetInBits, flags, debugType);
+ return DBuilder.createMemberType(scope, name, file, line, SizeInBits, Align,
+ offsetInBits, flags, debugType);
}
void CGDebugInfo::CollectRecordLambdaFields(
@@ -1223,10 +1298,6 @@ void CGDebugInfo::CollectRecordFields(
else {
const ASTRecordLayout &layout = CGM.getContext().getASTRecordLayout(record);
- // Debug info for nested types is included in the member list only for
- // CodeView.
- bool IncludeNestedTypes = CGM.getCodeGenOpts().EmitCodeView;
-
// Field number for non-static fields.
unsigned fieldNo = 0;
@@ -1236,6 +1307,13 @@ void CGDebugInfo::CollectRecordFields(
if (const auto *V = dyn_cast<VarDecl>(I)) {
if (V->hasAttr<NoDebugAttr>())
continue;
+
+ // Skip variable template specializations when emitting CodeView. MSVC
+ // doesn't emit them.
+ if (CGM.getCodeGenOpts().EmitCodeView &&
+ isa<VarTemplateSpecializationDecl>(V))
+ continue;
+
// Reuse the existing static member declaration if one exists
auto MI = StaticDataMemberCache.find(V->getCanonicalDecl());
if (MI != StaticDataMemberCache.end()) {
@@ -1252,7 +1330,9 @@ void CGDebugInfo::CollectRecordFields(
// Bump field number for next field.
++fieldNo;
- } else if (IncludeNestedTypes) {
+ } else if (CGM.getCodeGenOpts().EmitCodeView) {
+ // Debug info for nested types is included in the member list only for
+ // CodeView.
if (const auto *nestedType = dyn_cast<TypeDecl>(I))
if (!nestedType->isImplicit() &&
nestedType->getDeclContext() == record)
@@ -1386,7 +1466,7 @@ llvm::DISubprogram *CGDebugInfo::CreateCXXMemberFunction(
// deleting dtor.
const auto *DD = dyn_cast<CXXDestructorDecl>(Method);
GlobalDecl GD = DD ? GlobalDecl(DD, Dtor_Deleting) : GlobalDecl(Method);
- MicrosoftVTableContext::MethodVFTableLocation ML =
+ MethodVFTableLocation ML =
CGM.getMicrosoftVTableContext().getMethodVFTableLocation(GD);
VIndex = ML.Index;
@@ -1507,6 +1587,7 @@ void CGDebugInfo::CollectCXXBasesAux(
auto *BaseTy = getOrCreateType(BI.getType(), Unit);
llvm::DINode::DIFlags BFlags = StartingFlags;
uint64_t BaseOffset;
+ uint32_t VBPtrOffset = 0;
if (BI.isVirtual()) {
if (CGM.getTarget().getCXXABI().isItaniumFamily()) {
@@ -1520,6 +1601,10 @@ void CGDebugInfo::CollectCXXBasesAux(
// vbase offset offset in Itanium.
BaseOffset =
4 * CGM.getMicrosoftVTableContext().getVBTableIndex(RD, Base);
+ VBPtrOffset = CGM.getContext()
+ .getASTRecordLayout(RD)
+ .getVBPtrOffset()
+ .getQuantity();
}
BFlags |= llvm::DINode::FlagVirtual;
} else
@@ -1528,8 +1613,8 @@ void CGDebugInfo::CollectCXXBasesAux(
// BI->isVirtual() and bits when not.
BFlags |= getAccessFlag(BI.getAccessSpecifier(), RD);
- llvm::DIType *DTy =
- DBuilder.createInheritance(RecordTy, BaseTy, BaseOffset, BFlags);
+ llvm::DIType *DTy = DBuilder.createInheritance(RecordTy, BaseTy, BaseOffset,
+ VBPtrOffset, BFlags);
EltTys.push_back(DTy);
}
}
@@ -1603,8 +1688,8 @@ CGDebugInfo::CollectTemplateParams(const TemplateParameterList *TPList,
V = CGM.getCXXABI().EmitNullMemberPointer(MPT);
if (!V)
V = llvm::ConstantInt::get(CGM.Int8Ty, 0);
- TemplateParams.push_back(DBuilder.createTemplateValueParameter(
- TheCU, Name, TTy, V));
+ TemplateParams.push_back(
+ DBuilder.createTemplateValueParameter(TheCU, Name, TTy, V));
} break;
case TemplateArgument::Template:
TemplateParams.push_back(DBuilder.createTemplateTemplateParameter(
@@ -1676,9 +1761,8 @@ llvm::DIType *CGDebugInfo::getOrCreateVTablePtrType(llvm::DIFile *Unit) {
Optional<unsigned> DWARFAddressSpace =
CGM.getTarget().getDWARFAddressSpace(VtblPtrAddressSpace);
- llvm::DIType *vtbl_ptr_type =
- DBuilder.createPointerType(SubTy, Size, 0, DWARFAddressSpace,
- "__vtbl_ptr_type");
+ llvm::DIType *vtbl_ptr_type = DBuilder.createPointerType(
+ SubTy, Size, 0, DWARFAddressSpace, "__vtbl_ptr_type");
VTablePtrType = DBuilder.createPointerType(vtbl_ptr_type, Size);
return VTablePtrType;
}
@@ -1722,9 +1806,8 @@ void CGDebugInfo::CollectVTableInfo(const CXXRecordDecl *RD, llvm::DIFile *Unit,
CGM.getTarget().getDWARFAddressSpace(VtblPtrAddressSpace);
// Create a very wide void* type and insert it directly in the element list.
- llvm::DIType *VTableType =
- DBuilder.createPointerType(nullptr, VTableWidth, 0, DWARFAddressSpace,
- "__vtbl_ptr_type");
+ llvm::DIType *VTableType = DBuilder.createPointerType(
+ nullptr, VTableWidth, 0, DWARFAddressSpace, "__vtbl_ptr_type");
EltTys.push_back(VTableType);
// The vptr is a pointer to this special vtable type.
@@ -1739,9 +1822,9 @@ void CGDebugInfo::CollectVTableInfo(const CXXRecordDecl *RD, llvm::DIFile *Unit,
VPtrTy = getOrCreateVTablePtrType(Unit);
unsigned Size = CGM.getContext().getTypeSize(CGM.getContext().VoidPtrTy);
- llvm::DIType *VPtrMember = DBuilder.createMemberType(
- Unit, getVTableName(RD), Unit, 0, Size, 0, 0,
- llvm::DINode::FlagArtificial, VPtrTy);
+ llvm::DIType *VPtrMember =
+ DBuilder.createMemberType(Unit, getVTableName(RD), Unit, 0, Size, 0, 0,
+ llvm::DINode::FlagArtificial, VPtrTy);
EltTys.push_back(VPtrMember);
}
@@ -2079,7 +2162,7 @@ CGDebugInfo::getOrCreateModuleRef(ExternalASTSource::ASTSourceDescriptor Mod,
llvm::raw_svector_ostream OS(ConfigMacros);
const auto &PPOpts = CGM.getPreprocessorOpts();
unsigned I = 0;
- // Translate the macro definitions back into a commmand line.
+ // Translate the macro definitions back into a command line.
for (auto &M : PPOpts.Macros) {
if (++I > 1)
OS << " ";
@@ -2088,9 +2171,14 @@ CGDebugInfo::getOrCreateModuleRef(ExternalASTSource::ASTSourceDescriptor Mod,
OS << "\"-" << (Undef ? 'U' : 'D');
for (char c : Macro)
switch (c) {
- case '\\' : OS << "\\\\"; break;
- case '"' : OS << "\\\""; break;
- default: OS << c;
+ case '\\':
+ OS << "\\\\";
+ break;
+ case '"':
+ OS << "\\\"";
+ break;
+ default:
+ OS << c;
}
OS << '\"';
}
@@ -2107,6 +2195,7 @@ CGDebugInfo::getOrCreateModuleRef(ExternalASTSource::ASTSourceDescriptor Mod,
: ~1ULL;
llvm::DIBuilder DIB(CGM.getModule());
DIB.createCompileUnit(TheCU->getSourceLanguage(),
+ // TODO: Support "Source" from external AST providers?
DIB.createFile(Mod.getModuleName(), Mod.getPath()),
TheCU->getProducer(), true, StringRef(), 0,
Mod.getASTFile(), llvm::DICompileUnit::FullDebug,
@@ -2162,7 +2251,7 @@ llvm::DIType *CGDebugInfo::CreateTypeDefinition(const ObjCInterfaceType *Ty,
if (!SClassTy)
return nullptr;
- llvm::DIType *InhTag = DBuilder.createInheritance(RealDecl, SClassTy, 0,
+ llvm::DIType *InhTag = DBuilder.createInheritance(RealDecl, SClassTy, 0, 0,
llvm::DINode::FlagZero);
EltTys.push_back(InhTag);
}
@@ -2184,7 +2273,7 @@ llvm::DIType *CGDebugInfo::CreateTypeDefinition(const ObjCInterfaceType *Ty,
EltTys.push_back(PropertyNode);
};
{
- llvm::SmallPtrSet<const IdentifierInfo*, 16> PropertySet;
+ llvm::SmallPtrSet<const IdentifierInfo *, 16> PropertySet;
for (const ObjCCategoryDecl *ClassExt : ID->known_extensions())
for (auto *PD : ClassExt->properties()) {
PropertySet.insert(PD->getIdentifier());
@@ -2265,10 +2354,12 @@ llvm::DIType *CGDebugInfo::CreateTypeDefinition(const ObjCInterfaceType *Ty,
ObjCMethodDecl *Setter = PD->getSetterMethodDecl();
PropertyNode = DBuilder.createObjCProperty(
PD->getName(), PUnit, PLine,
- hasDefaultGetterName(PD, Getter) ? "" : getSelectorName(
- PD->getGetterName()),
- hasDefaultSetterName(PD, Setter) ? "" : getSelectorName(
- PD->getSetterName()),
+ hasDefaultGetterName(PD, Getter)
+ ? ""
+ : getSelectorName(PD->getGetterName()),
+ hasDefaultSetterName(PD, Setter)
+ ? ""
+ : getSelectorName(PD->getSetterName()),
PD->getPropertyAttributes(),
getOrCreateType(PD->getType(), PUnit));
}
@@ -2291,12 +2382,14 @@ llvm::DIType *CGDebugInfo::CreateType(const VectorType *Ty,
llvm::DIFile *Unit) {
llvm::DIType *ElementTy = getOrCreateType(Ty->getElementType(), Unit);
int64_t Count = Ty->getNumElements();
- if (Count == 0)
- // If number of elements are not known then this is an unbounded array.
- // Use Count == -1 to express such arrays.
- Count = -1;
- llvm::Metadata *Subscript = DBuilder.getOrCreateSubrange(0, Count);
+ llvm::Metadata *Subscript;
+ QualType QTy(Ty, 0);
+ auto SizeExpr = SizeExprCache.find(QTy);
+ if (SizeExpr != SizeExprCache.end())
+ Subscript = DBuilder.getOrCreateSubrange(0, SizeExpr->getSecond());
+ else
+ Subscript = DBuilder.getOrCreateSubrange(0, Count ? Count : -1);
llvm::DINodeArray SubscriptArray = DBuilder.getOrCreateArray(Subscript);
uint64_t Size = CGM.getContext().getTypeSize(Ty);
@@ -2353,8 +2446,12 @@ llvm::DIType *CGDebugInfo::CreateType(const ArrayType *Ty, llvm::DIFile *Unit) {
}
}
- // FIXME: Verify this is right for VLAs.
- Subscripts.push_back(DBuilder.getOrCreateSubrange(0, Count));
+ auto SizeNode = SizeExprCache.find(EltTy);
+ if (SizeNode != SizeExprCache.end())
+ Subscripts.push_back(
+ DBuilder.getOrCreateSubrange(0, SizeNode->getSecond()));
+ else
+ Subscripts.push_back(DBuilder.getOrCreateSubrange(0, Count));
EltTy = Ty->getElementType();
}
@@ -2422,8 +2519,7 @@ llvm::DIType *CGDebugInfo::CreateType(const AtomicType *Ty, llvm::DIFile *U) {
return DBuilder.createQualifiedType(llvm::dwarf::DW_TAG_atomic_type, FromTy);
}
-llvm::DIType* CGDebugInfo::CreateType(const PipeType *Ty,
- llvm::DIFile *U) {
+llvm::DIType *CGDebugInfo::CreateType(const PipeType *Ty, llvm::DIFile *U) {
return getOrCreateType(Ty->getElementType(), U);
}
@@ -2437,7 +2533,7 @@ llvm::DIType *CGDebugInfo::CreateEnumType(const EnumType *Ty) {
Align = getDeclAlignIfRequired(ED, CGM.getContext());
}
- SmallString<256> FullName = getUniqueTagTypeName(Ty, CGM, TheCU);
+ SmallString<256> Identifier = getTypeIdentifier(Ty, CGM, TheCU);
bool isImportedFromModule =
DebugTypeExtRefs && ED->isFromASTFile() && ED->getDefinition();
@@ -2460,7 +2556,7 @@ llvm::DIType *CGDebugInfo::CreateEnumType(const EnumType *Ty) {
StringRef EDName = ED->getName();
llvm::DIType *RetTy = DBuilder.createReplaceableCompositeType(
llvm::dwarf::DW_TAG_enumeration_type, EDName, EDContext, DefUnit, Line,
- 0, Size, Align, llvm::DINode::FlagFwdDecl, FullName);
+ 0, Size, Align, llvm::DINode::FlagFwdDecl, Identifier);
ReplaceMap.emplace_back(
std::piecewise_construct, std::make_tuple(Ty),
@@ -2480,14 +2576,17 @@ llvm::DIType *CGDebugInfo::CreateTypeDefinition(const EnumType *Ty) {
Align = getDeclAlignIfRequired(ED, CGM.getContext());
}
- SmallString<256> FullName = getUniqueTagTypeName(Ty, CGM, TheCU);
+ SmallString<256> Identifier = getTypeIdentifier(Ty, CGM, TheCU);
// Create elements for each enumerator.
SmallVector<llvm::Metadata *, 16> Enumerators;
ED = ED->getDefinition();
+ bool IsSigned = ED->getIntegerType()->isSignedIntegerType();
for (const auto *Enum : ED->enumerators()) {
- Enumerators.push_back(DBuilder.createEnumerator(
- Enum->getName(), Enum->getInitVal().getSExtValue()));
+ const auto &InitVal = Enum->getInitVal();
+ auto Value = IsSigned ? InitVal.getSExtValue() : InitVal.getZExtValue();
+ Enumerators.push_back(
+ DBuilder.createEnumerator(Enum->getName(), Value, !IsSigned));
}
// Return a CompositeType for the enum itself.
@@ -2496,11 +2595,10 @@ llvm::DIType *CGDebugInfo::CreateTypeDefinition(const EnumType *Ty) {
llvm::DIFile *DefUnit = getOrCreateFile(ED->getLocation());
unsigned Line = getLineNumber(ED->getLocation());
llvm::DIScope *EnumContext = getDeclContextDescriptor(ED);
- llvm::DIType *ClassTy =
- ED->isFixed() ? getOrCreateType(ED->getIntegerType(), DefUnit) : nullptr;
+ llvm::DIType *ClassTy = getOrCreateType(ED->getIntegerType(), DefUnit);
return DBuilder.createEnumerationType(EnumContext, ED->getName(), DefUnit,
Line, Size, Align, EltArray, ClassTy,
- FullName);
+ Identifier, ED->isFixed());
}
llvm::DIMacro *CGDebugInfo::CreateMacro(llvm::DIMacroFile *Parent,
@@ -2585,10 +2683,10 @@ llvm::DIType *CGDebugInfo::getTypeOrNull(QualType Ty) {
// Unwrap the type as needed for debug information.
Ty = UnwrapTypeForDebugInfo(Ty, CGM.getContext());
- auto it = TypeCache.find(Ty.getAsOpaquePtr());
- if (it != TypeCache.end()) {
+ auto It = TypeCache.find(Ty.getAsOpaquePtr());
+ if (It != TypeCache.end()) {
// Verify that the debug info still exists.
- if (llvm::Metadata *V = it->second)
+ if (llvm::Metadata *V = It->second)
return cast<llvm::DIType>(V);
}
@@ -2623,7 +2721,7 @@ llvm::DIType *CGDebugInfo::getOrCreateType(QualType Ty, llvm::DIFile *Unit) {
return T;
llvm::DIType *Res = CreateTypeNode(Ty, Unit);
- void* TyPtr = Ty.getAsOpaquePtr();
+ void *TyPtr = Ty.getAsOpaquePtr();
// And update the type cache.
TypeCache[TyPtr].reset(Res);
@@ -2801,11 +2899,24 @@ llvm::DICompositeType *CGDebugInfo::CreateLimitedType(const RecordType *Ty) {
uint64_t Size = CGM.getContext().getTypeSize(Ty);
auto Align = getDeclAlignIfRequired(D, CGM.getContext());
- SmallString<256> FullName = getUniqueTagTypeName(Ty, CGM, TheCU);
+ SmallString<256> Identifier = getTypeIdentifier(Ty, CGM, TheCU);
+
+ // Explicitly record the calling convention for C++ records.
+ auto Flags = llvm::DINode::FlagZero;
+ if (auto CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
+ if (CGM.getCXXABI().getRecordArgABI(CXXRD) == CGCXXABI::RAA_Indirect)
+ Flags |= llvm::DINode::FlagTypePassByReference;
+ else
+ Flags |= llvm::DINode::FlagTypePassByValue;
+
+ // Record if a C++ record is trivial type.
+ if (CXXRD->isTrivial())
+ Flags |= llvm::DINode::FlagTrivial;
+ }
llvm::DICompositeType *RealDecl = DBuilder.createReplaceableCompositeType(
getTagForRecord(RD), RDName, RDContext, DefUnit, Line, 0, Size, Align,
- llvm::DINode::FlagZero, FullName);
+ Flags, Identifier);
// Elements of composite types usually have back to the type, creating
// uniquing cycles. Distinct nodes are more efficient.
@@ -2819,14 +2930,14 @@ llvm::DICompositeType *CGDebugInfo::CreateLimitedType(const RecordType *Ty) {
// so they don't tend to be involved in uniquing cycles and there is some
// chance of merging them when linking together two modules. Only make
// them distinct if they are ODR-uniqued.
- if (FullName.empty())
+ if (Identifier.empty())
break;
LLVM_FALLTHROUGH;
case llvm::dwarf::DW_TAG_structure_type:
case llvm::dwarf::DW_TAG_union_type:
case llvm::dwarf::DW_TAG_class_type:
- // Immediatley resolve to a distinct node.
+ // Immediately resolve to a distinct node.
RealDecl =
llvm::MDNode::replaceWithDistinct(llvm::TempDICompositeType(RealDecl));
break;
@@ -2901,10 +3012,10 @@ void CGDebugInfo::collectFunctionDeclProps(GlobalDecl GD, llvm::DIFile *Unit,
if (DebugKind >= codegenoptions::LimitedDebugInfo) {
if (const NamespaceDecl *NSDecl =
- dyn_cast_or_null<NamespaceDecl>(FD->getDeclContext()))
+ dyn_cast_or_null<NamespaceDecl>(FD->getDeclContext()))
FDContext = getOrCreateNamespace(NSDecl);
else if (const RecordDecl *RDecl =
- dyn_cast_or_null<RecordDecl>(FD->getDeclContext())) {
+ dyn_cast_or_null<RecordDecl>(FD->getDeclContext())) {
llvm::DIScope *Mod = getParentModuleOrNull(RDecl);
FDContext = getContextDescriptor(RDecl, Mod ? Mod : TheCU);
}
@@ -2931,8 +3042,8 @@ void CGDebugInfo::collectVarDeclProps(const VarDecl *VD, llvm::DIFile *&Unit,
llvm::APInt ConstVal(32, 1);
QualType ET = CGM.getContext().getAsArrayType(T)->getElementType();
- T = CGM.getContext().getConstantArrayType(ET, ConstVal,
- ArrayType::Normal, 0);
+ T = CGM.getContext().getConstantArrayType(ET, ConstVal, ArrayType::Normal,
+ 0);
}
Name = VD->getName();
@@ -2959,8 +3070,8 @@ void CGDebugInfo::collectVarDeclProps(const VarDecl *VD, llvm::DIFile *&Unit,
if (DC->isRecord())
DC = CGM.getContext().getTranslationUnitDecl();
- llvm::DIScope *Mod = getParentModuleOrNull(VD);
- VDContext = getContextDescriptor(cast<Decl>(DC), Mod ? Mod : TheCU);
+ llvm::DIScope *Mod = getParentModuleOrNull(VD);
+ VDContext = getContextDescriptor(cast<Decl>(DC), Mod ? Mod : TheCU);
}
llvm::DISubprogram *CGDebugInfo::getFunctionFwdDeclOrStub(GlobalDecl GD,
@@ -2972,8 +3083,8 @@ llvm::DISubprogram *CGDebugInfo::getFunctionFwdDeclOrStub(GlobalDecl GD,
llvm::DIFile *Unit = getOrCreateFile(Loc);
llvm::DIScope *DContext = Unit;
unsigned Line = getLineNumber(Loc);
- collectFunctionDeclProps(GD, Unit, Name, LinkageName, DContext,
- TParamsArray, Flags);
+ collectFunctionDeclProps(GD, Unit, Name, LinkageName, DContext, TParamsArray,
+ Flags);
auto *FD = dyn_cast<FunctionDecl>(GD.getDecl());
// Build function type.
@@ -2999,20 +3110,18 @@ llvm::DISubprogram *CGDebugInfo::getFunctionFwdDeclOrStub(GlobalDecl GD,
!FD->isExternallyVisible(),
/* isDefinition = */ false, 0, Flags, CGM.getLangOpts().Optimize,
TParamsArray.get(), getFunctionDeclaration(FD));
- const auto *CanonDecl = cast<FunctionDecl>(FD->getCanonicalDecl());
+ const FunctionDecl *CanonDecl = FD->getCanonicalDecl();
FwdDeclReplaceMap.emplace_back(std::piecewise_construct,
std::make_tuple(CanonDecl),
std::make_tuple(SP));
return SP;
}
-llvm::DISubprogram *
-CGDebugInfo::getFunctionForwardDeclaration(GlobalDecl GD) {
+llvm::DISubprogram *CGDebugInfo::getFunctionForwardDeclaration(GlobalDecl GD) {
return getFunctionFwdDeclOrStub(GD, /* Stub = */ false);
}
-llvm::DISubprogram *
-CGDebugInfo::getFunctionStub(GlobalDecl GD) {
+llvm::DISubprogram *CGDebugInfo::getFunctionStub(GlobalDecl GD) {
return getFunctionFwdDeclOrStub(GD, /* Stub = */ true);
}
@@ -3136,7 +3245,8 @@ llvm::DISubroutineType *CGDebugInfo::getOrCreateFunctionType(const Decl *D,
if (FPT->getNumParams() > 1)
SelfDeclTy = FPT->getParamType(0);
if (!SelfDeclTy.isNull())
- Elts.push_back(CreateSelfType(SelfDeclTy, getOrCreateType(SelfDeclTy, F)));
+ Elts.push_back(
+ CreateSelfType(SelfDeclTy, getOrCreateType(SelfDeclTy, F)));
// "_cmd" pointer is always second argument.
Elts.push_back(DBuilder.createArtificialType(
getOrCreateType(CGM.getContext().getObjCSelType(), F)));
@@ -3172,7 +3282,8 @@ llvm::DISubroutineType *CGDebugInfo::getOrCreateFunctionType(const Decl *D,
void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, SourceLocation Loc,
SourceLocation ScopeLoc, QualType FnType,
- llvm::Function *Fn, CGBuilderTy &Builder) {
+ llvm::Function *Fn, bool CurFuncIsThunk,
+ CGBuilderTy &Builder) {
StringRef Name;
StringRef LinkageName;
@@ -3213,11 +3324,15 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, SourceLocation Loc,
if (Name.startswith("\01"))
Name = Name.substr(1);
- if (!HasDecl || D->isImplicit()) {
+ if (!HasDecl || D->isImplicit() || D->hasAttr<ArtificialAttr>()) {
Flags |= llvm::DINode::FlagArtificial;
// Artificial functions should not silently reuse CurLoc.
CurLoc = SourceLocation();
}
+
+ if (CurFuncIsThunk)
+ Flags |= llvm::DINode::FlagThunk;
+
unsigned LineNo = getLineNumber(Loc);
unsigned ScopeLine = getLineNumber(ScopeLoc);
@@ -3238,6 +3353,27 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, SourceLocation Loc,
if (HasDecl && isa<FunctionDecl>(D))
DeclCache[D->getCanonicalDecl()].reset(SP);
+ if (CGM.getCodeGenOpts().DwarfVersion >= 5) {
+ // Starting with DWARF V5 method declarations are emitted as children of
+ // the interface type.
+ if (const auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(D)) {
+ const ObjCInterfaceDecl *ID = OMD->getClassInterface();
+ QualType QTy(ID->getTypeForDecl(), 0);
+ auto It = TypeCache.find(QTy.getAsOpaquePtr());
+ if (It != TypeCache.end()) {
+ llvm::DICompositeType *InterfaceDecl =
+ cast<llvm::DICompositeType>(It->second);
+ llvm::DISubprogram *FD = DBuilder.createFunction(
+ InterfaceDecl, Name, LinkageName, Unit, LineNo,
+ getOrCreateFunctionType(D, FnType, Unit), Fn->hasLocalLinkage(),
+ false /*definition*/, ScopeLine, Flags, CGM.getLangOpts().Optimize,
+ TParamsArray.get());
+ DBuilder.finalizeSubprogram(FD);
+ ObjCMethodCache[ID].push_back(FD);
+ }
+ }
+ }
+
// Push the function onto the lexical block stack.
LexicalBlockStack.emplace_back(SP);
@@ -3330,8 +3466,7 @@ void CGDebugInfo::CreateLexicalBlock(SourceLocation Loc) {
}
void CGDebugInfo::AppendAddressSpaceXDeref(
- unsigned AddressSpace,
- SmallVectorImpl<int64_t> &Expr) const {
+ unsigned AddressSpace, SmallVectorImpl<int64_t> &Expr) const {
Optional<unsigned> DWARFAddressSpace =
CGM.getTarget().getDWARFAddressSpace(AddressSpace);
if (!DWARFAddressSpace)
@@ -3463,13 +3598,14 @@ llvm::DIType *CGDebugInfo::EmitTypeForVarWithBlocksAttr(const VarDecl *VD,
nullptr, Elements);
}
-void CGDebugInfo::EmitDeclare(const VarDecl *VD, llvm::Value *Storage,
- llvm::Optional<unsigned> ArgNo,
- CGBuilderTy &Builder) {
+llvm::DILocalVariable *CGDebugInfo::EmitDeclare(const VarDecl *VD,
+ llvm::Value *Storage,
+ llvm::Optional<unsigned> ArgNo,
+ CGBuilderTy &Builder) {
assert(DebugKind >= codegenoptions::LimitedDebugInfo);
assert(!LexicalBlockStack.empty() && "Region stack mismatch, stack empty!");
if (VD->hasAttr<NoDebugAttr>())
- return;
+ return nullptr;
bool Unwritten =
VD->isImplicit() || (isa<Decl>(VD->getDeclContext()) &&
@@ -3487,7 +3623,7 @@ void CGDebugInfo::EmitDeclare(const VarDecl *VD, llvm::Value *Storage,
// If there is no debug info for this type then do not emit debug info
// for this variable.
if (!Ty)
- return;
+ return nullptr;
// Get location information.
unsigned Line = 0;
@@ -3538,15 +3674,15 @@ void CGDebugInfo::EmitDeclare(const VarDecl *VD, llvm::Value *Storage,
} else if (const auto *RT = dyn_cast<RecordType>(VD->getType())) {
// If VD is an anonymous union then Storage represents value for
// all union fields.
- const auto *RD = cast<RecordDecl>(RT->getDecl());
+ const RecordDecl *RD = RT->getDecl();
if (RD->isUnion() && RD->isAnonymousStructOrUnion()) {
// GDB has trouble finding local variables in anonymous unions, so we emit
- // artifical local variables for each of the members.
+ // artificial local variables for each of the members.
//
// FIXME: Remove this code as soon as GDB supports this.
// The debug info verifier in LLVM operates based on the assumption that a
- // variable has the same size as its storage and we had to disable the check
- // for artificial variables.
+ // variable has the same size as its storage and we had to disable the
+ // check for artificial variables.
for (const auto *Field : RD->fields()) {
llvm::DIType *FieldTy = getOrCreateType(Field->getType(), Unit);
StringRef FieldName = Field->getName();
@@ -3571,25 +3707,26 @@ void CGDebugInfo::EmitDeclare(const VarDecl *VD, llvm::Value *Storage,
}
// Create the descriptor for the variable.
- auto *D = ArgNo
- ? DBuilder.createParameterVariable(
- Scope, Name, *ArgNo, Unit, Line, Ty,
- CGM.getLangOpts().Optimize, Flags)
- : DBuilder.createAutoVariable(Scope, Name, Unit, Line, Ty,
- CGM.getLangOpts().Optimize, Flags,
- Align);
+ auto *D = ArgNo ? DBuilder.createParameterVariable(
+ Scope, Name, *ArgNo, Unit, Line, Ty,
+ CGM.getLangOpts().Optimize, Flags)
+ : DBuilder.createAutoVariable(Scope, Name, Unit, Line, Ty,
+ CGM.getLangOpts().Optimize,
+ Flags, Align);
// Insert an llvm.dbg.declare into the current block.
DBuilder.insertDeclare(Storage, D, DBuilder.createExpression(Expr),
llvm::DebugLoc::get(Line, Column, Scope, CurInlinedAt),
Builder.GetInsertBlock());
+
+ return D;
}
-void CGDebugInfo::EmitDeclareOfAutoVariable(const VarDecl *VD,
- llvm::Value *Storage,
- CGBuilderTy &Builder) {
+llvm::DILocalVariable *
+CGDebugInfo::EmitDeclareOfAutoVariable(const VarDecl *VD, llvm::Value *Storage,
+ CGBuilderTy &Builder) {
assert(DebugKind >= codegenoptions::LimitedDebugInfo);
- EmitDeclare(VD, Storage, llvm::None, Builder);
+ return EmitDeclare(VD, Storage, llvm::None, Builder);
}
llvm::DIType *CGDebugInfo::CreateSelfType(const QualType &QualTy,
@@ -3686,7 +3823,7 @@ struct BlockLayoutChunk {
bool operator<(const BlockLayoutChunk &l, const BlockLayoutChunk &r) {
return l.OffsetInBits < r.OffsetInBits;
}
-}
+} // namespace
void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
StringRef Name,
@@ -3725,9 +3862,10 @@ void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
blockLayout->getElementOffsetInBits(3),
tunit, tunit));
fields.push_back(createFieldType(
- "__descriptor", C.getPointerType(block.NeedsCopyDispose
- ? C.getBlockDescriptorExtendedType()
- : C.getBlockDescriptorType()),
+ "__descriptor",
+ C.getPointerType(block.NeedsCopyDispose
+ ? C.getBlockDescriptorExtendedType()
+ : C.getBlockDescriptorType()),
loc, AS_public, blockLayout->getElementOffsetInBits(4), tunit, tunit));
// We want to sort the captures by offset, not because DWARF
@@ -3806,8 +3944,8 @@ void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
}
SmallString<36> typeName;
- llvm::raw_svector_ostream(typeName) << "__block_literal_"
- << CGM.getUniqueBlockCount();
+ llvm::raw_svector_ostream(typeName)
+ << "__block_literal_" << CGM.getUniqueBlockCount();
llvm::DINodeArray fieldsArray = DBuilder.getOrCreateArray(fields);
@@ -3823,8 +3961,7 @@ void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
// Create the descriptor for the parameter.
auto *debugVar = DBuilder.createParameterVariable(
- scope, Name, ArgNo, tunit, line, type,
- CGM.getLangOpts().Optimize, flags);
+ scope, Name, ArgNo, tunit, line, type, CGM.getLangOpts().Optimize, flags);
// Insert an llvm.dbg.declare into the current block.
DBuilder.insertDeclare(Alloca, debugVar, DBuilder.createExpression(),
@@ -3863,7 +4000,7 @@ llvm::DIGlobalVariableExpression *CGDebugInfo::CollectAnonRecordDecls(
if (FieldName.empty()) {
if (const auto *RT = dyn_cast<RecordType>(Field->getType()))
GVE = CollectAnonRecordDecls(RT->getDecl(), Unit, LineNo, LinkageName,
- Var, DContext);
+ Var, DContext);
continue;
}
// Use VarDecl's Tag, Scope and Line number.
@@ -4090,7 +4227,6 @@ void CGDebugInfo::setDwoId(uint64_t Signature) {
TheCU->setDWOId(Signature);
}
-
void CGDebugInfo::finalize() {
// Creating types might create further types - invalidating the current
// element and the size(), so don't cache/reference them.
@@ -4102,32 +4238,55 @@ void CGDebugInfo::finalize() {
DBuilder.replaceTemporary(llvm::TempDIType(E.Decl), Ty);
}
- for (auto p : ReplaceMap) {
- assert(p.second);
- auto *Ty = cast<llvm::DIType>(p.second);
+ if (CGM.getCodeGenOpts().DwarfVersion >= 5) {
+ // Add methods to interface.
+ for (const auto &P : ObjCMethodCache) {
+ if (P.second.empty())
+ continue;
+
+ QualType QTy(P.first->getTypeForDecl(), 0);
+ auto It = TypeCache.find(QTy.getAsOpaquePtr());
+ assert(It != TypeCache.end());
+
+ llvm::DICompositeType *InterfaceDecl =
+ cast<llvm::DICompositeType>(It->second);
+
+ SmallVector<llvm::Metadata *, 16> EltTys;
+ auto CurrenetElts = InterfaceDecl->getElements();
+ EltTys.append(CurrenetElts.begin(), CurrenetElts.end());
+ for (auto &MD : P.second)
+ EltTys.push_back(MD);
+ llvm::DINodeArray Elements = DBuilder.getOrCreateArray(EltTys);
+ DBuilder.replaceArrays(InterfaceDecl, Elements);
+ }
+ }
+
+ for (const auto &P : ReplaceMap) {
+ assert(P.second);
+ auto *Ty = cast<llvm::DIType>(P.second);
assert(Ty->isForwardDecl());
- auto it = TypeCache.find(p.first);
- assert(it != TypeCache.end());
- assert(it->second);
+ auto It = TypeCache.find(P.first);
+ assert(It != TypeCache.end());
+ assert(It->second);
DBuilder.replaceTemporary(llvm::TempDIType(Ty),
- cast<llvm::DIType>(it->second));
+ cast<llvm::DIType>(It->second));
}
- for (const auto &p : FwdDeclReplaceMap) {
- assert(p.second);
- llvm::TempMDNode FwdDecl(cast<llvm::MDNode>(p.second));
+ for (const auto &P : FwdDeclReplaceMap) {
+ assert(P.second);
+ llvm::TempMDNode FwdDecl(cast<llvm::MDNode>(P.second));
llvm::Metadata *Repl;
- auto it = DeclCache.find(p.first);
+ auto It = DeclCache.find(P.first);
// If there has been no definition for the declaration, call RAUW
// with ourselves, that will destroy the temporary MDNode and
// replace it with a standard one, avoiding leaking memory.
- if (it == DeclCache.end())
- Repl = p.second;
+ if (It == DeclCache.end())
+ Repl = P.second;
else
- Repl = it->second;
+ Repl = It->second;
if (auto *GVE = dyn_cast_or_null<llvm::DIGlobalVariableExpression>(Repl))
Repl = GVE->getVariable();
@@ -4157,6 +4316,5 @@ llvm::DebugLoc CGDebugInfo::SourceLocToDebugLoc(SourceLocation Loc) {
return llvm::DebugLoc();
llvm::MDNode *Scope = LexicalBlockStack.back();
- return llvm::DebugLoc::get(
- getLineNumber(Loc), getColumnNumber(Loc), Scope);
+ return llvm::DebugLoc::get(getLineNumber(Loc), getColumnNumber(Loc), Scope);
}
diff --git a/lib/CodeGen/CGDebugInfo.h b/lib/CodeGen/CGDebugInfo.h
index 4f7b7f2a0d9c..e632806138f0 100644
--- a/lib/CodeGen/CGDebugInfo.h
+++ b/lib/CodeGen/CGDebugInfo.h
@@ -19,6 +19,7 @@
#include "clang/AST/Expr.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/Type.h"
+#include "clang/AST/TypeOrdering.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/ADT/DenseMap.h"
@@ -66,7 +67,7 @@ class CGDebugInfo {
llvm::DIType *ClassTy = nullptr;
llvm::DICompositeType *ObjTy = nullptr;
llvm::DIType *SelTy = nullptr;
-#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
+#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
llvm::DIType *SingletonId = nullptr;
#include "clang/Basic/OpenCLImageTypes.def"
llvm::DIType *OCLSamplerDITy = nullptr;
@@ -81,6 +82,10 @@ class CGDebugInfo {
llvm::SmallDenseMap<llvm::StringRef, llvm::StringRef> DebugPrefixMap;
+ /// Cache that maps VLA types to size expressions for that type,
+ /// represented by instantiated Metadata nodes.
+ llvm::SmallDenseMap<QualType, llvm::Metadata *> SizeExprCache;
+
struct ObjCInterfaceCacheEntry {
const ObjCInterfaceType *Type;
llvm::DIType *Decl;
@@ -93,6 +98,10 @@ class CGDebugInfo {
/// Cache of previously constructed interfaces which may change.
llvm::SmallVector<ObjCInterfaceCacheEntry, 32> ObjCInterfaceCache;
+ /// Cache of forward declarations for methods belonging to the interface.
+ llvm::DenseMap<const ObjCInterfaceDecl *, std::vector<llvm::DISubprogram *>>
+ ObjCMethodCache;
+
/// Cache of references to clang modules and precompiled headers.
llvm::DenseMap<const Module *, llvm::TrackingMDRef> ModuleCache;
@@ -223,12 +232,12 @@ class CGDebugInfo {
/// Helper function for CollectCXXBases.
/// Adds debug info entries for types in Bases that are not in SeenTypes.
- void CollectCXXBasesAux(const CXXRecordDecl *RD, llvm::DIFile *Unit,
- SmallVectorImpl<llvm::Metadata *> &EltTys,
- llvm::DIType *RecordTy,
- const CXXRecordDecl::base_class_const_range &Bases,
- llvm::DenseSet<CanonicalDeclPtr<const CXXRecordDecl>> &SeenTypes,
- llvm::DINode::DIFlags StartingFlags);
+ void CollectCXXBasesAux(
+ const CXXRecordDecl *RD, llvm::DIFile *Unit,
+ SmallVectorImpl<llvm::Metadata *> &EltTys, llvm::DIType *RecordTy,
+ const CXXRecordDecl::base_class_const_range &Bases,
+ llvm::DenseSet<CanonicalDeclPtr<const CXXRecordDecl>> &SeenTypes,
+ llvm::DINode::DIFlags StartingFlags);
/// A helper function to collect template parameters.
llvm::DINodeArray CollectTemplateParams(const TemplateParameterList *TPList,
@@ -247,8 +256,7 @@ class CGDebugInfo {
llvm::DIType *createFieldType(StringRef name, QualType type,
SourceLocation loc, AccessSpecifier AS,
- uint64_t offsetInBits,
- uint32_t AlignInBits,
+ uint64_t offsetInBits, uint32_t AlignInBits,
llvm::DIFile *tunit, llvm::DIScope *scope,
const RecordDecl *RD = nullptr);
@@ -309,6 +317,11 @@ public:
void finalize();
+ /// Register VLA size expression debug node with the qualified type.
+ void registerVLASizeExpression(QualType Ty, llvm::Metadata *SizeExpr) {
+ SizeExprCache[Ty] = SizeExpr;
+ }
+
/// Module debugging: Support for building PCMs.
/// @{
/// Set the main CU's DwoId field to \p Signature.
@@ -356,7 +369,8 @@ public:
/// \param ScopeLoc The location of the function body.
void EmitFunctionStart(GlobalDecl GD, SourceLocation Loc,
SourceLocation ScopeLoc, QualType FnType,
- llvm::Function *Fn, CGBuilderTy &Builder);
+ llvm::Function *Fn, bool CurFnIsThunk,
+ CGBuilderTy &Builder);
/// Start a new scope for an inlined function.
void EmitInlineFunctionStart(CGBuilderTy &Builder, GlobalDecl GD);
@@ -379,16 +393,17 @@ public:
/// Emit call to \c llvm.dbg.declare for an automatic variable
/// declaration.
- void EmitDeclareOfAutoVariable(const VarDecl *Decl, llvm::Value *AI,
- CGBuilderTy &Builder);
+ /// Returns a pointer to the DILocalVariable associated with the
+ /// llvm.dbg.declare, or nullptr otherwise.
+ llvm::DILocalVariable *EmitDeclareOfAutoVariable(const VarDecl *Decl,
+ llvm::Value *AI,
+ CGBuilderTy &Builder);
/// Emit call to \c llvm.dbg.declare for an imported variable
/// declaration in a block.
- void EmitDeclareOfBlockDeclRefVariable(const VarDecl *variable,
- llvm::Value *storage,
- CGBuilderTy &Builder,
- const CGBlockInfo &blockInfo,
- llvm::Instruction *InsertPoint = nullptr);
+ void EmitDeclareOfBlockDeclRefVariable(
+ const VarDecl *variable, llvm::Value *storage, CGBuilderTy &Builder,
+ const CGBlockInfo &blockInfo, llvm::Instruction *InsertPoint = nullptr);
/// Emit call to \c llvm.dbg.declare for an argument variable
/// declaration.
@@ -451,10 +466,14 @@ public:
llvm::DIMacroFile *CreateTempMacroFile(llvm::DIMacroFile *Parent,
SourceLocation LineLoc,
SourceLocation FileLoc);
+
private:
/// Emit call to llvm.dbg.declare for a variable declaration.
- void EmitDeclare(const VarDecl *decl, llvm::Value *AI,
- llvm::Optional<unsigned> ArgNo, CGBuilderTy &Builder);
+ /// Returns a pointer to the DILocalVariable associated with the
+ /// llvm.dbg.declare, or nullptr otherwise.
+ llvm::DILocalVariable *EmitDeclare(const VarDecl *decl, llvm::Value *AI,
+ llvm::Optional<unsigned> ArgNo,
+ CGBuilderTy &Builder);
/// Build up structure info for the byref. See \a BuildByRefType.
llvm::DIType *EmitTypeForVarWithBlocksAttr(const VarDecl *VD,
@@ -482,8 +501,11 @@ private:
std::string remapDIPath(StringRef) const;
/// Compute the file checksum debug info for input file ID.
- llvm::DIFile::ChecksumKind computeChecksum(FileID FID,
- SmallString<32> &Checksum) const;
+ Optional<llvm::DIFile::ChecksumKind>
+ computeChecksum(FileID FID, SmallString<32> &Checksum) const;
+
+ /// Get the source of the given file ID.
+ Optional<StringRef> getSource(const SourceManager &SM, FileID FID);
/// Get the file debug info descriptor for the input location.
llvm::DIFile *getOrCreateFile(SourceLocation Loc);
@@ -637,7 +659,7 @@ public:
~ApplyDebugLocation();
- /// \brief Apply TemporaryLocation if it is valid. Otherwise switch
+ /// Apply TemporaryLocation if it is valid. Otherwise switch
/// to an artificial debug location that has a valid scope, but no
/// line information.
///
@@ -651,7 +673,7 @@ public:
static ApplyDebugLocation CreateArtificial(CodeGenFunction &CGF) {
return ApplyDebugLocation(CGF, false, SourceLocation());
}
- /// \brief Apply TemporaryLocation if it is valid. Otherwise switch
+ /// Apply TemporaryLocation if it is valid. Otherwise switch
/// to an artificial debug location that has a valid scope, but no
/// line information.
static ApplyDebugLocation
@@ -668,7 +690,6 @@ public:
static ApplyDebugLocation CreateEmpty(CodeGenFunction &CGF) {
return ApplyDebugLocation(CGF, true, SourceLocation());
}
-
};
/// A scoped helper to set the current debug location to an inlined location.
diff --git a/lib/CodeGen/CGDecl.cpp b/lib/CodeGen/CGDecl.cpp
index 04585a8afbb6..57b2fbadbeec 100644
--- a/lib/CodeGen/CGDecl.cpp
+++ b/lib/CodeGen/CGDecl.cpp
@@ -229,18 +229,19 @@ llvm::Constant *CodeGenModule::getOrCreateStaticVarDecl(
LangAS AS = GetGlobalVarAddressSpace(&D);
unsigned TargetAS = getContext().getTargetAddressSpace(AS);
- // Local address space cannot have an initializer.
+ // OpenCL variables in local address space and CUDA shared
+ // variables cannot have an initializer.
llvm::Constant *Init = nullptr;
- if (Ty.getAddressSpace() != LangAS::opencl_local)
- Init = EmitNullConstant(Ty);
- else
+ if (Ty.getAddressSpace() == LangAS::opencl_local ||
+ D.hasAttr<CUDASharedAttr>())
Init = llvm::UndefValue::get(LTy);
+ else
+ Init = EmitNullConstant(Ty);
llvm::GlobalVariable *GV = new llvm::GlobalVariable(
getModule(), LTy, Ty.isConstant(getContext()), Linkage, Init, Name,
nullptr, llvm::GlobalVariable::NotThreadLocal, TargetAS);
GV->setAlignment(getContext().getDeclAlign(&D).getQuantity());
- setGlobalVisibility(GV, &D, ForDefinition);
if (supportsCOMDAT() && GV->isWeakForLinker())
GV->setComdat(TheModule.getOrInsertComdat(GV->getName()));
@@ -248,12 +249,7 @@ llvm::Constant *CodeGenModule::getOrCreateStaticVarDecl(
if (D.getTLSKind())
setTLSMode(GV, D);
- if (D.isExternallyVisible()) {
- if (D.hasAttr<DLLImportAttr>())
- GV->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass);
- else if (D.hasAttr<DLLExportAttr>())
- GV->setDLLStorageClass(llvm::GlobalVariable::DLLExportStorageClass);
- }
+ setGVProperties(GV, &D);
// Make sure the result is of the correct type.
LangAS ExpectedAS = Ty.getAddressSpace();
@@ -291,8 +287,11 @@ llvm::Constant *CodeGenModule::getOrCreateStaticVarDecl(
// never defer them.
assert(isa<ObjCMethodDecl>(DC) && "unexpected parent code decl");
}
- if (GD.getDecl())
+ if (GD.getDecl()) {
+ // Disable emission of the parent function for the OpenMP device codegen.
+ CGOpenMPRuntime::DisableAutoDeclareTargetRAII NoDeclTarget(*this);
(void)GetAddrOfGlobal(GD);
+ }
return Addr;
}
@@ -344,6 +343,7 @@ CodeGenFunction::AddInitializerToStaticVarDecl(const VarDecl &D,
OldGV->getThreadLocalMode(),
CGM.getContext().getTargetAddressSpace(D.getType()));
GV->setVisibility(OldGV->getVisibility());
+ GV->setDSOLocal(OldGV->isDSOLocal());
GV->setComdat(OldGV->getComdat());
// Steal the name of the old global
@@ -469,13 +469,11 @@ namespace {
}
};
- struct DestroyNRVOVariable final : EHScopeStack::Cleanup {
- DestroyNRVOVariable(Address addr,
- const CXXDestructorDecl *Dtor,
- llvm::Value *NRVOFlag)
- : Dtor(Dtor), NRVOFlag(NRVOFlag), Loc(addr) {}
+ template <class Derived>
+ struct DestroyNRVOVariable : EHScopeStack::Cleanup {
+ DestroyNRVOVariable(Address addr, llvm::Value *NRVOFlag)
+ : NRVOFlag(NRVOFlag), Loc(addr) {}
- const CXXDestructorDecl *Dtor;
llvm::Value *NRVOFlag;
Address Loc;
@@ -494,12 +492,39 @@ namespace {
CGF.EmitBlock(RunDtorBB);
}
+ static_cast<Derived *>(this)->emitDestructorCall(CGF);
+
+ if (NRVO) CGF.EmitBlock(SkipDtorBB);
+ }
+
+ virtual ~DestroyNRVOVariable() = default;
+ };
+
+ struct DestroyNRVOVariableCXX final
+ : DestroyNRVOVariable<DestroyNRVOVariableCXX> {
+ DestroyNRVOVariableCXX(Address addr, const CXXDestructorDecl *Dtor,
+ llvm::Value *NRVOFlag)
+ : DestroyNRVOVariable<DestroyNRVOVariableCXX>(addr, NRVOFlag),
+ Dtor(Dtor) {}
+
+ const CXXDestructorDecl *Dtor;
+
+ void emitDestructorCall(CodeGenFunction &CGF) {
CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
/*ForVirtualBase=*/false,
- /*Delegating=*/false,
- Loc);
+ /*Delegating=*/false, Loc);
+ }
+ };
- if (NRVO) CGF.EmitBlock(SkipDtorBB);
+ struct DestroyNRVOVariableC final
+ : DestroyNRVOVariable<DestroyNRVOVariableC> {
+ DestroyNRVOVariableC(Address addr, llvm::Value *NRVOFlag, QualType Ty)
+ : DestroyNRVOVariable<DestroyNRVOVariableC>(addr, NRVOFlag), Ty(Ty) {}
+
+ QualType Ty;
+
+ void emitDestructorCall(CodeGenFunction &CGF) {
+ CGF.destroyNonTrivialCStruct(CGF, Loc, Ty);
}
};
@@ -821,11 +846,10 @@ void CodeGenFunction::EmitScalarInit(const Expr *init, const ValueDecl *D,
EmitStoreOfScalar(value, lvalue, /* isInitialization */ true);
}
-/// canEmitInitWithFewStoresAfterMemset - Decide whether we can emit the
-/// non-zero parts of the specified initializer with equal or fewer than
-/// NumStores scalar stores.
-static bool canEmitInitWithFewStoresAfterMemset(llvm::Constant *Init,
- unsigned &NumStores) {
+/// Decide whether we can emit the non-zero parts of the specified initializer
+/// with equal or fewer than NumStores scalar stores.
+static bool canEmitInitWithFewStoresAfterBZero(llvm::Constant *Init,
+ unsigned &NumStores) {
// Zero and Undef never requires any extra stores.
if (isa<llvm::ConstantAggregateZero>(Init) ||
isa<llvm::ConstantPointerNull>(Init) ||
@@ -840,7 +864,7 @@ static bool canEmitInitWithFewStoresAfterMemset(llvm::Constant *Init,
if (isa<llvm::ConstantArray>(Init) || isa<llvm::ConstantStruct>(Init)) {
for (unsigned i = 0, e = Init->getNumOperands(); i != e; ++i) {
llvm::Constant *Elt = cast<llvm::Constant>(Init->getOperand(i));
- if (!canEmitInitWithFewStoresAfterMemset(Elt, NumStores))
+ if (!canEmitInitWithFewStoresAfterBZero(Elt, NumStores))
return false;
}
return true;
@@ -850,7 +874,7 @@ static bool canEmitInitWithFewStoresAfterMemset(llvm::Constant *Init,
dyn_cast<llvm::ConstantDataSequential>(Init)) {
for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
llvm::Constant *Elt = CDS->getElementAsConstant(i);
- if (!canEmitInitWithFewStoresAfterMemset(Elt, NumStores))
+ if (!canEmitInitWithFewStoresAfterBZero(Elt, NumStores))
return false;
}
return true;
@@ -860,18 +884,18 @@ static bool canEmitInitWithFewStoresAfterMemset(llvm::Constant *Init,
return false;
}
-/// emitStoresForInitAfterMemset - For inits that
-/// canEmitInitWithFewStoresAfterMemset returned true for, emit the scalar
-/// stores that would be required.
-static void emitStoresForInitAfterMemset(llvm::Constant *Init, llvm::Value *Loc,
- bool isVolatile, CGBuilderTy &Builder) {
+/// For inits that canEmitInitWithFewStoresAfterBZero returned true for, emit
+/// the scalar stores that would be required.
+static void emitStoresForInitAfterBZero(CodeGenModule &CGM,
+ llvm::Constant *Init, Address Loc,
+ bool isVolatile, CGBuilderTy &Builder) {
assert(!Init->isNullValue() && !isa<llvm::UndefValue>(Init) &&
- "called emitStoresForInitAfterMemset for zero or undef value.");
+ "called emitStoresForInitAfterBZero for zero or undef value.");
if (isa<llvm::ConstantInt>(Init) || isa<llvm::ConstantFP>(Init) ||
isa<llvm::ConstantVector>(Init) || isa<llvm::BlockAddress>(Init) ||
isa<llvm::ConstantExpr>(Init)) {
- Builder.CreateDefaultAlignedStore(Init, Loc, isVolatile);
+ Builder.CreateStore(Init, Loc, isVolatile);
return;
}
@@ -882,8 +906,9 @@ static void emitStoresForInitAfterMemset(llvm::Constant *Init, llvm::Value *Loc,
// If necessary, get a pointer to the element and emit it.
if (!Elt->isNullValue() && !isa<llvm::UndefValue>(Elt))
- emitStoresForInitAfterMemset(
- Elt, Builder.CreateConstGEP2_32(Init->getType(), Loc, 0, i),
+ emitStoresForInitAfterBZero(
+ CGM, Elt,
+ Builder.CreateConstInBoundsGEP2_32(Loc, 0, i, CGM.getDataLayout()),
isVolatile, Builder);
}
return;
@@ -897,19 +922,19 @@ static void emitStoresForInitAfterMemset(llvm::Constant *Init, llvm::Value *Loc,
// If necessary, get a pointer to the element and emit it.
if (!Elt->isNullValue() && !isa<llvm::UndefValue>(Elt))
- emitStoresForInitAfterMemset(
- Elt, Builder.CreateConstGEP2_32(Init->getType(), Loc, 0, i),
+ emitStoresForInitAfterBZero(
+ CGM, Elt,
+ Builder.CreateConstInBoundsGEP2_32(Loc, 0, i, CGM.getDataLayout()),
isVolatile, Builder);
}
}
-/// shouldUseMemSetPlusStoresToInitialize - Decide whether we should use memset
-/// plus some stores to initialize a local variable instead of using a memcpy
-/// from a constant global. It is beneficial to use memset if the global is all
-/// zeros, or mostly zeros and large.
-static bool shouldUseMemSetPlusStoresToInitialize(llvm::Constant *Init,
- uint64_t GlobalSize) {
- // If a global is all zeros, always use a memset.
+/// Decide whether we should use bzero plus some stores to initialize a local
+/// variable instead of using a memcpy from a constant global. It is beneficial
+/// to use bzero if the global is all zeros, or mostly zeros and large.
+static bool shouldUseBZeroPlusStoresToInitialize(llvm::Constant *Init,
+ uint64_t GlobalSize) {
+ // If a global is all zeros, always use a bzero.
if (isa<llvm::ConstantAggregateZero>(Init)) return true;
// If a non-zero global is <= 32 bytes, always use a memcpy. If it is large,
@@ -920,7 +945,114 @@ static bool shouldUseMemSetPlusStoresToInitialize(llvm::Constant *Init,
uint64_t SizeLimit = 32;
return GlobalSize > SizeLimit &&
- canEmitInitWithFewStoresAfterMemset(Init, StoreBudget);
+ canEmitInitWithFewStoresAfterBZero(Init, StoreBudget);
+}
+
+/// A byte pattern.
+///
+/// Can be "any" pattern if the value was padding or known to be undef.
+/// Can be "none" pattern if a sequence doesn't exist.
+class BytePattern {
+ uint8_t Val;
+ enum class ValueType : uint8_t { Specific, Any, None } Type;
+ BytePattern(ValueType Type) : Type(Type) {}
+
+public:
+ BytePattern(uint8_t Value) : Val(Value), Type(ValueType::Specific) {}
+ static BytePattern Any() { return BytePattern(ValueType::Any); }
+ static BytePattern None() { return BytePattern(ValueType::None); }
+ bool isAny() const { return Type == ValueType::Any; }
+ bool isNone() const { return Type == ValueType::None; }
+ bool isValued() const { return Type == ValueType::Specific; }
+ uint8_t getValue() const {
+ assert(isValued());
+ return Val;
+ }
+ BytePattern merge(const BytePattern Other) const {
+ if (isNone() || Other.isNone())
+ return None();
+ if (isAny())
+ return Other;
+ if (Other.isAny())
+ return *this;
+ if (getValue() == Other.getValue())
+ return *this;
+ return None();
+ }
+};
+
+/// Figures out whether the constant can be initialized with memset.
+static BytePattern constantIsRepeatedBytePattern(llvm::Constant *C) {
+ if (isa<llvm::ConstantAggregateZero>(C) || isa<llvm::ConstantPointerNull>(C))
+ return BytePattern(0x00);
+ if (isa<llvm::UndefValue>(C))
+ return BytePattern::Any();
+
+ if (isa<llvm::ConstantInt>(C)) {
+ auto *Int = cast<llvm::ConstantInt>(C);
+ if (Int->getBitWidth() % 8 != 0)
+ return BytePattern::None();
+ const llvm::APInt &Value = Int->getValue();
+ if (Value.isSplat(8))
+ return BytePattern(Value.getLoBits(8).getLimitedValue());
+ return BytePattern::None();
+ }
+
+ if (isa<llvm::ConstantFP>(C)) {
+ auto *FP = cast<llvm::ConstantFP>(C);
+ llvm::APInt Bits = FP->getValueAPF().bitcastToAPInt();
+ if (Bits.getBitWidth() % 8 != 0)
+ return BytePattern::None();
+ if (!Bits.isSplat(8))
+ return BytePattern::None();
+ return BytePattern(Bits.getLimitedValue() & 0xFF);
+ }
+
+ if (isa<llvm::ConstantVector>(C)) {
+ llvm::Constant *Splat = cast<llvm::ConstantVector>(C)->getSplatValue();
+ if (Splat)
+ return constantIsRepeatedBytePattern(Splat);
+ return BytePattern::None();
+ }
+
+ if (isa<llvm::ConstantArray>(C) || isa<llvm::ConstantStruct>(C)) {
+ BytePattern Pattern(BytePattern::Any());
+ for (unsigned I = 0, E = C->getNumOperands(); I != E; ++I) {
+ llvm::Constant *Elt = cast<llvm::Constant>(C->getOperand(I));
+ Pattern = Pattern.merge(constantIsRepeatedBytePattern(Elt));
+ if (Pattern.isNone())
+ return Pattern;
+ }
+ return Pattern;
+ }
+
+ if (llvm::ConstantDataSequential *CDS =
+ dyn_cast<llvm::ConstantDataSequential>(C)) {
+ BytePattern Pattern(BytePattern::Any());
+ for (unsigned I = 0, E = CDS->getNumElements(); I != E; ++I) {
+ llvm::Constant *Elt = CDS->getElementAsConstant(I);
+ Pattern = Pattern.merge(constantIsRepeatedBytePattern(Elt));
+ if (Pattern.isNone())
+ return Pattern;
+ }
+ return Pattern;
+ }
+
+ // BlockAddress, ConstantExpr, and everything else is scary.
+ return BytePattern::None();
+}
+
+/// Decide whether we should use memset to initialize a local variable instead
+/// of using a memcpy from a constant global. Assumes we've already decided to
+/// not user bzero.
+/// FIXME We could be more clever, as we are for bzero above, and generate
+/// memset followed by stores. It's unclear that's worth the effort.
+static BytePattern shouldUseMemSetToInitialize(llvm::Constant *Init,
+ uint64_t GlobalSize) {
+ uint64_t SizeLimit = 32;
+ if (GlobalSize <= SizeLimit)
+ return BytePattern::None();
+ return constantIsRepeatedBytePattern(Init);
}
/// EmitAutoVarDecl - Emit code and set up an entry in LocalDeclMap for a
@@ -940,6 +1072,9 @@ llvm::Value *CodeGenFunction::EmitLifetimeStart(uint64_t Size,
if (!ShouldEmitLifetimeMarkers)
return nullptr;
+ assert(Addr->getType()->getPointerAddressSpace() ==
+ CGM.getDataLayout().getAllocaAddrSpace() &&
+ "Pointer should be in alloca address space");
llvm::Value *SizeV = llvm::ConstantInt::get(Int64Ty, Size);
Addr = Builder.CreateBitCast(Addr, AllocaInt8PtrTy);
llvm::CallInst *C =
@@ -949,12 +1084,68 @@ llvm::Value *CodeGenFunction::EmitLifetimeStart(uint64_t Size,
}
void CodeGenFunction::EmitLifetimeEnd(llvm::Value *Size, llvm::Value *Addr) {
+ assert(Addr->getType()->getPointerAddressSpace() ==
+ CGM.getDataLayout().getAllocaAddrSpace() &&
+ "Pointer should be in alloca address space");
Addr = Builder.CreateBitCast(Addr, AllocaInt8PtrTy);
llvm::CallInst *C =
Builder.CreateCall(CGM.getLLVMLifetimeEndFn(), {Size, Addr});
C->setDoesNotThrow();
}
+void CodeGenFunction::EmitAndRegisterVariableArrayDimensions(
+ CGDebugInfo *DI, const VarDecl &D, bool EmitDebugInfo) {
+ // For each dimension stores its QualType and corresponding
+ // size-expression Value.
+ SmallVector<CodeGenFunction::VlaSizePair, 4> Dimensions;
+
+ // Break down the array into individual dimensions.
+ QualType Type1D = D.getType();
+ while (getContext().getAsVariableArrayType(Type1D)) {
+ auto VlaSize = getVLAElements1D(Type1D);
+ if (auto *C = dyn_cast<llvm::ConstantInt>(VlaSize.NumElts))
+ Dimensions.emplace_back(C, Type1D.getUnqualifiedType());
+ else {
+ auto SizeExprAddr = CreateDefaultAlignTempAlloca(
+ VlaSize.NumElts->getType(), "__vla_expr");
+ Builder.CreateStore(VlaSize.NumElts, SizeExprAddr);
+ Dimensions.emplace_back(SizeExprAddr.getPointer(),
+ Type1D.getUnqualifiedType());
+ }
+ Type1D = VlaSize.Type;
+ }
+
+ if (!EmitDebugInfo)
+ return;
+
+ // Register each dimension's size-expression with a DILocalVariable,
+ // so that it can be used by CGDebugInfo when instantiating a DISubrange
+ // to describe this array.
+ for (auto &VlaSize : Dimensions) {
+ llvm::Metadata *MD;
+ if (auto *C = dyn_cast<llvm::ConstantInt>(VlaSize.NumElts))
+ MD = llvm::ConstantAsMetadata::get(C);
+ else {
+ // Create an artificial VarDecl to generate debug info for.
+ IdentifierInfo &NameIdent = getContext().Idents.getOwn(
+ cast<llvm::AllocaInst>(VlaSize.NumElts)->getName());
+ auto VlaExprTy = VlaSize.NumElts->getType()->getPointerElementType();
+ auto QT = getContext().getIntTypeForBitwidth(
+ VlaExprTy->getScalarSizeInBits(), false);
+ auto *ArtificialDecl = VarDecl::Create(
+ getContext(), const_cast<DeclContext *>(D.getDeclContext()),
+ D.getLocation(), D.getLocation(), &NameIdent, QT,
+ getContext().CreateTypeSourceInfo(QT), SC_Auto);
+ ArtificialDecl->setImplicit();
+
+ MD = DI->EmitDeclareOfAutoVariable(ArtificialDecl, VlaSize.NumElts,
+ Builder);
+ }
+ assert(MD && "No Size expression debug node created");
+ DI->registerVLASizeExpression(VlaSize.Type, MD);
+ }
+}
+
/// EmitAutoVarAlloca - Emit the alloca and debug information for a
/// local variable. Does not emit initialization or destruction.
CodeGenFunction::AutoVarEmission
@@ -975,7 +1166,12 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
if (Ty->isVariablyModifiedType())
EmitVariablyModifiedType(Ty);
+ auto *DI = getDebugInfo();
+ bool EmitDebugInfo = DI && CGM.getCodeGenOpts().getDebugInfo() >=
+ codegenoptions::LimitedDebugInfo;
+
Address address = Address::invalid();
+ Address AllocaAddr = Address::invalid();
if (Ty->isConstantSizeType()) {
bool NRVO = getLangOpts().ElideConstructors &&
D.isNRVOVariable();
@@ -1016,16 +1212,27 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
}
// A normal fixed sized variable becomes an alloca in the entry block,
- // unless it's an NRVO variable.
-
- if (NRVO) {
+ // unless:
+ // - it's an NRVO variable.
+ // - we are compiling OpenMP and it's an OpenMP local variable.
+
+ Address OpenMPLocalAddr =
+ getLangOpts().OpenMP
+ ? CGM.getOpenMPRuntime().getAddressOfLocalVariable(*this, &D)
+ : Address::invalid();
+ if (getLangOpts().OpenMP && OpenMPLocalAddr.isValid()) {
+ address = OpenMPLocalAddr;
+ } else if (NRVO) {
// The named return value optimization: allocate this variable in the
// return slot, so that we can elide the copy when returning this
// variable (C++0x [class.copy]p34).
address = ReturnValue;
if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
- if (!cast<CXXRecordDecl>(RecordTy->getDecl())->hasTrivialDestructor()) {
+ const auto *RD = RecordTy->getDecl();
+ const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD);
+ if ((CXXRD && !CXXRD->hasTrivialDestructor()) ||
+ RD->isNonTrivialToPrimitiveDestroy()) {
// Create a flag that is used to indicate when the NRVO was applied
// to this variable. Set it to zero to indicate that NRVO was not
// applied.
@@ -1055,7 +1262,8 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
// Create the alloca. Note that we set the name separately from
// building the instruction so that it's there even in no-asserts
// builds.
- address = CreateTempAlloca(allocaTy, allocaAlignment, D.getName());
+ address = CreateTempAlloca(allocaTy, allocaAlignment, D.getName(),
+ /*ArraySize=*/nullptr, &AllocaAddr);
// Don't emit lifetime markers for MSVC catch parameters. The lifetime of
// the catch parameter starts in the catchpad instruction, and we can't
@@ -1083,7 +1291,7 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
!(!getLangOpts().CPlusPlus && hasLabelBeenSeenInCurrentScope())) {
uint64_t size = CGM.getDataLayout().getTypeAllocSize(allocaTy);
emission.SizeForLifetimeMarkers =
- EmitLifetimeStart(size, address.getPointer());
+ EmitLifetimeStart(size, AllocaAddr.getPointer());
}
} else {
assert(!emission.useLifetimeMarkers());
@@ -1108,28 +1316,28 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
pushStackRestore(NormalCleanup, Stack);
}
- llvm::Value *elementCount;
- QualType elementType;
- std::tie(elementCount, elementType) = getVLASize(Ty);
-
- llvm::Type *llvmTy = ConvertTypeForMem(elementType);
+ auto VlaSize = getVLASize(Ty);
+ llvm::Type *llvmTy = ConvertTypeForMem(VlaSize.Type);
// Allocate memory for the array.
- address = CreateTempAlloca(llvmTy, alignment, "vla", elementCount);
+ address = CreateTempAlloca(llvmTy, alignment, "vla", VlaSize.NumElts,
+ &AllocaAddr);
+
+ // If we have debug info enabled, properly describe the VLA dimensions for
+ // this type by registering the vla size expression for each of the
+ // dimensions.
+ EmitAndRegisterVariableArrayDimensions(DI, D, EmitDebugInfo);
}
setAddrOfLocalVar(&D, address);
emission.Addr = address;
+ emission.AllocaAddr = AllocaAddr;
// Emit debug info for local var declaration.
- if (HaveInsertPoint())
- if (CGDebugInfo *DI = getDebugInfo()) {
- if (CGM.getCodeGenOpts().getDebugInfo() >=
- codegenoptions::LimitedDebugInfo) {
- DI->setLocation(D.getLocation());
- DI->EmitDeclareOfAutoVariable(&D, address.getPointer(), Builder);
- }
- }
+ if (EmitDebugInfo && HaveInsertPoint()) {
+ DI->setLocation(D.getLocation());
+ (void)DI->EmitDeclareOfAutoVariable(&D, address.getPointer(), Builder);
+ }
if (D.hasAttr<AnnotateAttr>())
EmitVarAnnotations(&D, address.getPointer());
@@ -1137,23 +1345,36 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
// Make sure we call @llvm.lifetime.end.
if (emission.useLifetimeMarkers())
EHStack.pushCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker,
- emission.getAllocatedAddress(),
+ emission.getOriginalAllocatedAddress(),
emission.getSizeForLifetimeMarkers());
return emission;
}
+static bool isCapturedBy(const VarDecl &, const Expr *);
+
+/// Determines whether the given __block variable is potentially
+/// captured by the given statement.
+static bool isCapturedBy(const VarDecl &Var, const Stmt *S) {
+ if (const Expr *E = dyn_cast<Expr>(S))
+ return isCapturedBy(Var, E);
+ for (const Stmt *SubStmt : S->children())
+ if (isCapturedBy(Var, SubStmt))
+ return true;
+ return false;
+}
+
/// Determines whether the given __block variable is potentially
/// captured by the given expression.
-static bool isCapturedBy(const VarDecl &var, const Expr *e) {
+static bool isCapturedBy(const VarDecl &Var, const Expr *E) {
// Skip the most common kinds of expressions that make
// hierarchy-walking expensive.
- e = e->IgnoreParenCasts();
+ E = E->IgnoreParenCasts();
- if (const BlockExpr *be = dyn_cast<BlockExpr>(e)) {
- const BlockDecl *block = be->getBlockDecl();
- for (const auto &I : block->captures()) {
- if (I.getVariable() == &var)
+ if (const BlockExpr *BE = dyn_cast<BlockExpr>(E)) {
+ const BlockDecl *Block = BE->getBlockDecl();
+ for (const auto &I : Block->captures()) {
+ if (I.getVariable() == &Var)
return true;
}
@@ -1161,19 +1382,19 @@ static bool isCapturedBy(const VarDecl &var, const Expr *e) {
return false;
}
- if (const StmtExpr *SE = dyn_cast<StmtExpr>(e)) {
+ if (const StmtExpr *SE = dyn_cast<StmtExpr>(E)) {
const CompoundStmt *CS = SE->getSubStmt();
for (const auto *BI : CS->body())
- if (const auto *E = dyn_cast<Expr>(BI)) {
- if (isCapturedBy(var, E))
- return true;
+ if (const auto *BIE = dyn_cast<Expr>(BI)) {
+ if (isCapturedBy(Var, BIE))
+ return true;
}
else if (const auto *DS = dyn_cast<DeclStmt>(BI)) {
// special case declarations
for (const auto *I : DS->decls()) {
if (const auto *VD = dyn_cast<VarDecl>((I))) {
const Expr *Init = VD->getInit();
- if (Init && isCapturedBy(var, Init))
+ if (Init && isCapturedBy(Var, Init))
return true;
}
}
@@ -1185,14 +1406,14 @@ static bool isCapturedBy(const VarDecl &var, const Expr *e) {
return false;
}
- for (const Stmt *SubStmt : e->children())
- if (isCapturedBy(var, cast<Expr>(SubStmt)))
+ for (const Stmt *SubStmt : E->children())
+ if (isCapturedBy(Var, SubStmt))
return true;
return false;
}
-/// \brief Determine whether the given initializer is trivial in the sense
+/// Determine whether the given initializer is trivial in the sense
/// that it requires no code to be generated.
bool CodeGenFunction::isTrivialInitializer(const Expr *Init) {
if (!Init)
@@ -1232,6 +1453,19 @@ void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
if (emission.IsByRef)
emitByrefStructureInit(emission);
+ // Initialize the variable here if it doesn't have a initializer and it is a
+ // C struct that is non-trivial to initialize or an array containing such a
+ // struct.
+ if (!Init &&
+ type.isNonTrivialToPrimitiveDefaultInitialize() ==
+ QualType::PDIK_Struct) {
+ LValue Dst = MakeAddrLValue(emission.getAllocatedAddress(), type);
+ if (emission.IsByRef)
+ drillIntoBlockVariable(*this, Dst, &D);
+ defaultInitNonTrivialCStructVar(Dst);
+ return;
+ }
+
if (isTrivialInitializer(Init))
return;
@@ -1270,58 +1504,66 @@ void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
llvm::ConstantInt::get(IntPtrTy,
getContext().getTypeSizeInChars(type).getQuantity());
- llvm::Type *BP = AllocaInt8PtrTy;
+ llvm::Type *BP = CGM.Int8Ty->getPointerTo(Loc.getAddressSpace());
if (Loc.getType() != BP)
Loc = Builder.CreateBitCast(Loc, BP);
- // If the initializer is all or mostly zeros, codegen with memset then do
- // a few stores afterward.
- if (shouldUseMemSetPlusStoresToInitialize(constant,
- CGM.getDataLayout().getTypeAllocSize(constant->getType()))) {
+ // If the initializer is all or mostly the same, codegen with bzero / memset
+ // then do a few stores afterward.
+ uint64_t ConstantSize =
+ CGM.getDataLayout().getTypeAllocSize(constant->getType());
+ if (shouldUseBZeroPlusStoresToInitialize(constant, ConstantSize)) {
Builder.CreateMemSet(Loc, llvm::ConstantInt::get(Int8Ty, 0), SizeVal,
isVolatile);
// Zero and undef don't require a stores.
if (!constant->isNullValue() && !isa<llvm::UndefValue>(constant)) {
- Loc = Builder.CreateBitCast(Loc, constant->getType()->getPointerTo());
- emitStoresForInitAfterMemset(constant, Loc.getPointer(),
- isVolatile, Builder);
- }
- } else {
- // Otherwise, create a temporary global with the initializer then
- // memcpy from the global to the alloca.
- std::string Name = getStaticDeclName(CGM, D);
- unsigned AS = 0;
- if (getLangOpts().OpenCL) {
- AS = CGM.getContext().getTargetAddressSpace(LangAS::opencl_constant);
- BP = llvm::PointerType::getInt8PtrTy(getLLVMContext(), AS);
+ Loc = Builder.CreateBitCast(Loc,
+ constant->getType()->getPointerTo(Loc.getAddressSpace()));
+ emitStoresForInitAfterBZero(CGM, constant, Loc, isVolatile, Builder);
}
- llvm::GlobalVariable *GV =
- new llvm::GlobalVariable(CGM.getModule(), constant->getType(), true,
- llvm::GlobalValue::PrivateLinkage,
- constant, Name, nullptr,
- llvm::GlobalValue::NotThreadLocal, AS);
- GV->setAlignment(Loc.getAlignment().getQuantity());
- GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
-
- Address SrcPtr = Address(GV, Loc.getAlignment());
- if (SrcPtr.getType() != BP)
- SrcPtr = Builder.CreateBitCast(SrcPtr, BP);
+ return;
+ }
- Builder.CreateMemCpy(Loc, SrcPtr, SizeVal, isVolatile);
+ BytePattern Pattern = shouldUseMemSetToInitialize(constant, ConstantSize);
+ if (!Pattern.isNone()) {
+ uint8_t Value = Pattern.isAny() ? 0x00 : Pattern.getValue();
+ Builder.CreateMemSet(Loc, llvm::ConstantInt::get(Int8Ty, Value), SizeVal,
+ isVolatile);
+ return;
}
+
+ // Otherwise, create a temporary global with the initializer then
+ // memcpy from the global to the alloca.
+ std::string Name = getStaticDeclName(CGM, D);
+ unsigned AS = CGM.getContext().getTargetAddressSpace(
+ CGM.getStringLiteralAddressSpace());
+ BP = llvm::PointerType::getInt8PtrTy(getLLVMContext(), AS);
+
+ llvm::GlobalVariable *GV = new llvm::GlobalVariable(
+ CGM.getModule(), constant->getType(), true,
+ llvm::GlobalValue::PrivateLinkage, constant, Name, nullptr,
+ llvm::GlobalValue::NotThreadLocal, AS);
+ GV->setAlignment(Loc.getAlignment().getQuantity());
+ GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
+
+ Address SrcPtr = Address(GV, Loc.getAlignment());
+ if (SrcPtr.getType() != BP)
+ SrcPtr = Builder.CreateBitCast(SrcPtr, BP);
+
+ Builder.CreateMemCpy(Loc, SrcPtr, SizeVal, isVolatile);
}
-/// Emit an expression as an initializer for a variable at the given
-/// location. The expression is not necessarily the normal
-/// initializer for the variable, and the address is not necessarily
+/// Emit an expression as an initializer for an object (variable, field, etc.)
+/// at the given location. The expression is not necessarily the normal
+/// initializer for the object, and the address is not necessarily
/// its normal location.
///
/// \param init the initializing expression
-/// \param var the variable to act as if we're initializing
+/// \param D the object to act as if we're initializing
/// \param loc the address to initialize; its type is a pointer
-/// to the LLVM mapping of the variable's type
+/// to the LLVM mapping of the object's type
/// \param alignment the alignment of the address
-/// \param capturedByInit true if the variable is a __block variable
+/// \param capturedByInit true if \p D is a __block variable
/// whose address is potentially changed by the initializer
void CodeGenFunction::EmitExprAsInit(const Expr *init, const ValueDecl *D,
LValue lvalue, bool capturedByInit) {
@@ -1349,11 +1591,17 @@ void CodeGenFunction::EmitExprAsInit(const Expr *init, const ValueDecl *D,
if (type->isAtomicType()) {
EmitAtomicInit(const_cast<Expr*>(init), lvalue);
} else {
+ AggValueSlot::Overlap_t Overlap = AggValueSlot::MayOverlap;
+ if (isa<VarDecl>(D))
+ Overlap = AggValueSlot::DoesNotOverlap;
+ else if (auto *FD = dyn_cast<FieldDecl>(D))
+ Overlap = overlapForFieldInit(FD);
// TODO: how can we delay here if D is captured by its initializer?
EmitAggExpr(init, AggValueSlot::forLValue(lvalue,
AggValueSlot::IsDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
- AggValueSlot::IsNotAliased));
+ AggValueSlot::IsNotAliased,
+ Overlap));
}
return;
}
@@ -1386,8 +1634,8 @@ void CodeGenFunction::emitAutoVarTypeCleanup(
if (emission.NRVOFlag) {
assert(!type->isArrayType());
CXXDestructorDecl *dtor = type->getAsCXXRecordDecl()->getDestructor();
- EHStack.pushCleanup<DestroyNRVOVariable>(cleanupKind, addr,
- dtor, emission.NRVOFlag);
+ EHStack.pushCleanup<DestroyNRVOVariableCXX>(cleanupKind, addr, dtor,
+ emission.NRVOFlag);
return;
}
break;
@@ -1406,6 +1654,16 @@ void CodeGenFunction::emitAutoVarTypeCleanup(
case QualType::DK_objc_weak_lifetime:
break;
+
+ case QualType::DK_nontrivial_c_struct:
+ destroyer = CodeGenFunction::destroyNonTrivialCStruct;
+ if (emission.NRVOFlag) {
+ assert(!type->isArrayType());
+ EHStack.pushCleanup<DestroyNRVOVariableC>(cleanupKind, addr,
+ emission.NRVOFlag, type);
+ return;
+ }
+ break;
}
// If we haven't chosen a more specific destroyer, use the default.
@@ -1452,9 +1710,15 @@ void CodeGenFunction::EmitAutoVarCleanups(const AutoVarEmission &emission) {
}
// If this is a block variable, call _Block_object_destroy
- // (on the unforwarded address).
- if (emission.IsByRef)
- enterByrefCleanup(emission);
+ // (on the unforwarded address). Don't enter this cleanup if we're in pure-GC
+ // mode.
+ if (emission.IsByRef && CGM.getLangOpts().getGC() != LangOptions::GCOnly) {
+ BlockFieldFlags Flags = BLOCK_FIELD_IS_BYREF;
+ if (emission.Variable->getType().isObjCGCWeak())
+ Flags |= BLOCK_FIELD_IS_WEAK;
+ enterByrefCleanup(NormalAndEHCleanup, emission.Addr, Flags,
+ /*LoadBlockVarAddr*/ false);
+ }
}
CodeGenFunction::Destroyer *
@@ -1467,6 +1731,8 @@ CodeGenFunction::getDestroyer(QualType::DestructionKind kind) {
return destroyARCStrongPrecise;
case QualType::DK_objc_weak_lifetime:
return destroyARCWeak;
+ case QualType::DK_nontrivial_c_struct:
+ return destroyNonTrivialCStruct;
}
llvm_unreachable("Unknown DestructionKind");
}
@@ -1506,9 +1772,6 @@ void CodeGenFunction::pushStackRestore(CleanupKind Kind, Address SPMem) {
void CodeGenFunction::pushLifetimeExtendedDestroy(
CleanupKind cleanupKind, Address addr, QualType type,
Destroyer *destroyer, bool useEHCleanupForArray) {
- assert(!isInConditionalBranch() &&
- "performing lifetime extension from within conditional");
-
// Push an EH-only cleanup for the object now.
// FIXME: When popping normal cleanups, we need to keep this EH cleanup
// around in case a temporary's destructor throws an exception.
@@ -1791,9 +2054,12 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, ParamValue Arg,
// Use better IR generation for certain implicit parameters.
if (auto IPD = dyn_cast<ImplicitParamDecl>(&D)) {
// The only implicit argument a block has is its literal.
- // We assume this is always passed directly.
+ // This may be passed as an inalloca'ed value on Windows x86.
if (BlockInfo) {
- setBlockContextParameter(IPD, ArgNo, Arg.getDirectValue());
+ llvm::Value *V = Arg.isIndirect()
+ ? Builder.CreateLoad(Arg.getIndirectAddress())
+ : Arg.getDirectValue();
+ setBlockContextParameter(IPD, ArgNo, V);
return;
}
}
@@ -1809,20 +2075,50 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, ParamValue Arg,
llvm::Type *IRTy = ConvertTypeForMem(Ty)->getPointerTo(AS);
if (DeclPtr.getType() != IRTy)
DeclPtr = Builder.CreateBitCast(DeclPtr, IRTy, D.getName());
+ // Indirect argument is in alloca address space, which may be different
+ // from the default address space.
+ auto AllocaAS = CGM.getASTAllocaAddressSpace();
+ auto *V = DeclPtr.getPointer();
+ auto SrcLangAS = getLangOpts().OpenCL ? LangAS::opencl_private : AllocaAS;
+ auto DestLangAS =
+ getLangOpts().OpenCL ? LangAS::opencl_private : LangAS::Default;
+ if (SrcLangAS != DestLangAS) {
+ assert(getContext().getTargetAddressSpace(SrcLangAS) ==
+ CGM.getDataLayout().getAllocaAddrSpace());
+ auto DestAS = getContext().getTargetAddressSpace(DestLangAS);
+ auto *T = V->getType()->getPointerElementType()->getPointerTo(DestAS);
+ DeclPtr = Address(getTargetHooks().performAddrSpaceCast(
+ *this, V, SrcLangAS, DestLangAS, T, true),
+ DeclPtr.getAlignment());
+ }
// Push a destructor cleanup for this parameter if the ABI requires it.
// Don't push a cleanup in a thunk for a method that will also emit a
// cleanup.
- if (!IsScalar && !CurFuncIsThunk &&
- getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
- const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
- if (RD && RD->hasNonTrivialDestructor())
- pushDestroy(QualType::DK_cxx_destructor, DeclPtr, Ty);
+ if (hasAggregateEvaluationKind(Ty) && !CurFuncIsThunk &&
+ Ty->getAs<RecordType>()->getDecl()->isParamDestroyedInCallee()) {
+ if (QualType::DestructionKind DtorKind = Ty.isDestructedType()) {
+ assert((DtorKind == QualType::DK_cxx_destructor ||
+ DtorKind == QualType::DK_nontrivial_c_struct) &&
+ "unexpected destructor type");
+ pushDestroy(DtorKind, DeclPtr, Ty);
+ CalleeDestructedParamCleanups[cast<ParmVarDecl>(&D)] =
+ EHStack.stable_begin();
+ }
}
} else {
- // Otherwise, create a temporary to hold the value.
- DeclPtr = CreateMemTemp(Ty, getContext().getDeclAlign(&D),
- D.getName() + ".addr");
+ // Check if the parameter address is controlled by OpenMP runtime.
+ Address OpenMPLocalAddr =
+ getLangOpts().OpenMP
+ ? CGM.getOpenMPRuntime().getAddressOfLocalVariable(*this, &D)
+ : Address::invalid();
+ if (getLangOpts().OpenMP && OpenMPLocalAddr.isValid()) {
+ DeclPtr = OpenMPLocalAddr;
+ } else {
+ // Otherwise, create a temporary to hold the value.
+ DeclPtr = CreateMemTemp(Ty, getContext().getDeclAlign(&D),
+ D.getName() + ".addr");
+ }
DoStore = true;
}
diff --git a/lib/CodeGen/CGDeclCXX.cpp b/lib/CodeGen/CGDeclCXX.cpp
index 042997831702..5e237d7e0b69 100644
--- a/lib/CodeGen/CGDeclCXX.cpp
+++ b/lib/CodeGen/CGDeclCXX.cpp
@@ -53,7 +53,8 @@ static void EmitDeclInit(CodeGenFunction &CGF, const VarDecl &D,
case TEK_Aggregate:
CGF.EmitAggExpr(Init, AggValueSlot::forLValue(lv,AggValueSlot::IsDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
- AggValueSlot::IsNotAliased));
+ AggValueSlot::IsNotAliased,
+ AggValueSlot::DoesNotOverlap));
return;
}
llvm_unreachable("bad evaluation kind");
@@ -79,6 +80,7 @@ static void EmitDeclDestroy(CodeGenFunction &CGF, const VarDecl &D,
case QualType::DK_objc_strong_lifetime:
case QualType::DK_objc_weak_lifetime:
+ case QualType::DK_nontrivial_c_struct:
// We don't care about releasing objects during process teardown.
assert(!D.getTLSKind() && "should have rejected this");
return;
@@ -173,10 +175,12 @@ void CodeGenFunction::EmitCXXGlobalVarDeclInit(const VarDecl &D,
ConstantAddress DeclAddr(DeclPtr, getContext().getDeclAlign(&D));
if (!T->isReferenceType()) {
- if (getLangOpts().OpenMP && D.hasAttr<OMPThreadPrivateDeclAttr>())
+ if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd &&
+ D.hasAttr<OMPThreadPrivateDeclAttr>()) {
(void)CGM.getOpenMPRuntime().emitThreadPrivateVarDefinition(
&D, DeclAddr, D.getAttr<OMPThreadPrivateDeclAttr>()->getLocation(),
PerformInit, this);
+ }
if (PerformInit)
EmitDeclInit(*this, D, DeclAddr);
if (CGM.isTypeConstant(D.getType(), true))
@@ -232,7 +236,10 @@ void CodeGenFunction::registerGlobalDtorWithAtExit(const VarDecl &VD,
llvm::Constant *addr) {
// Create a function which calls the destructor.
llvm::Constant *dtorStub = createAtExitStub(VD, dtor, addr);
+ registerGlobalDtorWithAtExit(dtorStub);
+}
+void CodeGenFunction::registerGlobalDtorWithAtExit(llvm::Constant *dtorStub) {
// extern "C" int atexit(void (*f)(void));
llvm::FunctionType *atexitTy =
llvm::FunctionType::get(IntTy, dtorStub->getType(), false);
@@ -309,7 +316,7 @@ llvm::Function *CodeGenModule::CreateGlobalInitOrDestructFunction(
Fn->setSection(Section);
}
- SetInternalFunctionAttributes(nullptr, Fn, FI);
+ SetInternalFunctionAttributes(GlobalDecl(), Fn, FI);
Fn->setCallingConv(getRuntimeCC());
@@ -328,6 +335,10 @@ llvm::Function *CodeGenModule::CreateGlobalInitOrDestructFunction(
!isInSanitizerBlacklist(SanitizerKind::HWAddress, Fn, Loc))
Fn->addFnAttr(llvm::Attribute::SanitizeHWAddress);
+ if (getLangOpts().Sanitize.has(SanitizerKind::KernelHWAddress) &&
+ !isInSanitizerBlacklist(SanitizerKind::KernelHWAddress, Fn, Loc))
+ Fn->addFnAttr(llvm::Attribute::SanitizeHWAddress);
+
if (getLangOpts().Sanitize.has(SanitizerKind::Thread) &&
!isInSanitizerBlacklist(SanitizerKind::Thread, Fn, Loc))
Fn->addFnAttr(llvm::Attribute::SanitizeThread);
@@ -340,6 +351,10 @@ llvm::Function *CodeGenModule::CreateGlobalInitOrDestructFunction(
!isInSanitizerBlacklist(SanitizerKind::SafeStack, Fn, Loc))
Fn->addFnAttr(llvm::Attribute::SafeStack);
+ if (getLangOpts().Sanitize.has(SanitizerKind::ShadowCallStack) &&
+ !isInSanitizerBlacklist(SanitizerKind::ShadowCallStack, Fn, Loc))
+ Fn->addFnAttr(llvm::Attribute::ShadowCallStack);
+
return Fn;
}
@@ -376,6 +391,10 @@ CodeGenModule::EmitCXXGlobalVarDeclInitFunc(const VarDecl *D,
D->hasAttr<CUDASharedAttr>()))
return;
+ if (getLangOpts().OpenMP &&
+ getOpenMPRuntime().emitDeclareTargetVarDefinition(D, Addr, PerformInit))
+ return;
+
// Check if we've already initialized this decl.
auto I = DelayedCXXInitPosition.find(D);
if (I != DelayedCXXInitPosition.end() && I->second == ~0U)
diff --git a/lib/CodeGen/CGException.cpp b/lib/CodeGen/CGException.cpp
index 1ec084ff3f5b..c9820c242554 100644
--- a/lib/CodeGen/CGException.cpp
+++ b/lib/CodeGen/CGException.cpp
@@ -65,7 +65,7 @@ llvm::Constant *CodeGenModule::getTerminateFn() {
if (getLangOpts().isCompatibleWithMSVC(LangOptions::MSVC2015))
name = "__std_terminate";
else
- name = "\01?terminate@@YAXXZ";
+ name = "?terminate@@YAXXZ";
} else if (getLangOpts().ObjC1 &&
getLangOpts().ObjCRuntime.hasTerminate())
name = "objc_terminate";
@@ -111,21 +111,32 @@ const EHPersonality
EHPersonality::MSVC_C_specific_handler = { "__C_specific_handler", nullptr };
const EHPersonality
EHPersonality::MSVC_CxxFrameHandler3 = { "__CxxFrameHandler3", nullptr };
+const EHPersonality
+EHPersonality::GNU_Wasm_CPlusPlus = { "__gxx_wasm_personality_v0", nullptr };
-static const EHPersonality &getCPersonality(const llvm::Triple &T,
+static const EHPersonality &getCPersonality(const TargetInfo &Target,
const LangOptions &L) {
+ const llvm::Triple &T = Target.getTriple();
+ if (T.isWindowsMSVCEnvironment())
+ return EHPersonality::MSVC_CxxFrameHandler3;
if (L.SjLjExceptions)
return EHPersonality::GNU_C_SJLJ;
+ if (L.DWARFExceptions)
+ return EHPersonality::GNU_C;
if (L.SEHExceptions)
return EHPersonality::GNU_C_SEH;
return EHPersonality::GNU_C;
}
-static const EHPersonality &getObjCPersonality(const llvm::Triple &T,
+static const EHPersonality &getObjCPersonality(const TargetInfo &Target,
const LangOptions &L) {
+ const llvm::Triple &T = Target.getTriple();
+ if (T.isWindowsMSVCEnvironment())
+ return EHPersonality::MSVC_CxxFrameHandler3;
+
switch (L.ObjCRuntime.getKind()) {
case ObjCRuntime::FragileMacOSX:
- return getCPersonality(T, L);
+ return getCPersonality(Target, L);
case ObjCRuntime::MacOSX:
case ObjCRuntime::iOS:
case ObjCRuntime::WatchOS:
@@ -145,24 +156,37 @@ static const EHPersonality &getObjCPersonality(const llvm::Triple &T,
llvm_unreachable("bad runtime kind");
}
-static const EHPersonality &getCXXPersonality(const llvm::Triple &T,
+static const EHPersonality &getCXXPersonality(const TargetInfo &Target,
const LangOptions &L) {
+ const llvm::Triple &T = Target.getTriple();
+ if (T.isWindowsMSVCEnvironment())
+ return EHPersonality::MSVC_CxxFrameHandler3;
if (L.SjLjExceptions)
return EHPersonality::GNU_CPlusPlus_SJLJ;
+ if (L.DWARFExceptions)
+ return EHPersonality::GNU_CPlusPlus;
if (L.SEHExceptions)
return EHPersonality::GNU_CPlusPlus_SEH;
+ // Wasm EH is a non-MVP feature for now.
+ if (Target.hasFeature("exception-handling") &&
+ (T.getArch() == llvm::Triple::wasm32 ||
+ T.getArch() == llvm::Triple::wasm64))
+ return EHPersonality::GNU_Wasm_CPlusPlus;
return EHPersonality::GNU_CPlusPlus;
}
/// Determines the personality function to use when both C++
/// and Objective-C exceptions are being caught.
-static const EHPersonality &getObjCXXPersonality(const llvm::Triple &T,
+static const EHPersonality &getObjCXXPersonality(const TargetInfo &Target,
const LangOptions &L) {
+ if (Target.getTriple().isWindowsMSVCEnvironment())
+ return EHPersonality::MSVC_CxxFrameHandler3;
+
switch (L.ObjCRuntime.getKind()) {
// In the fragile ABI, just use C++ exception handling and hope
// they're not doing crazy exception mixing.
case ObjCRuntime::FragileMacOSX:
- return getCXXPersonality(T, L);
+ return getCXXPersonality(Target, L);
// The ObjC personality defers to the C++ personality for non-ObjC
// handlers. Unlike the C++ case, we use the same personality
@@ -170,7 +194,7 @@ static const EHPersonality &getObjCXXPersonality(const llvm::Triple &T,
case ObjCRuntime::MacOSX:
case ObjCRuntime::iOS:
case ObjCRuntime::WatchOS:
- return getObjCPersonality(T, L);
+ return getObjCPersonality(Target, L);
case ObjCRuntime::GNUstep:
return EHPersonality::GNU_ObjCXX;
@@ -179,7 +203,7 @@ static const EHPersonality &getObjCXXPersonality(const llvm::Triple &T,
// mixed EH. Use the ObjC personality just to avoid returning null.
case ObjCRuntime::GCC:
case ObjCRuntime::ObjFW:
- return getObjCPersonality(T, L);
+ return getObjCPersonality(Target, L);
}
llvm_unreachable("bad runtime kind");
}
@@ -194,30 +218,17 @@ const EHPersonality &EHPersonality::get(CodeGenModule &CGM,
const FunctionDecl *FD) {
const llvm::Triple &T = CGM.getTarget().getTriple();
const LangOptions &L = CGM.getLangOpts();
+ const TargetInfo &Target = CGM.getTarget();
// Functions using SEH get an SEH personality.
if (FD && FD->usesSEHTry())
return getSEHPersonalityMSVC(T);
- // Try to pick a personality function that is compatible with MSVC if we're
- // not compiling Obj-C. Obj-C users better have an Obj-C runtime that supports
- // the GCC-style personality function.
- if (T.isWindowsMSVCEnvironment() && !L.ObjC1) {
- if (L.SjLjExceptions)
- return EHPersonality::GNU_CPlusPlus_SJLJ;
- if (L.DWARFExceptions)
- return EHPersonality::GNU_CPlusPlus;
- return EHPersonality::MSVC_CxxFrameHandler3;
- }
-
- if (L.CPlusPlus && L.ObjC1)
- return getObjCXXPersonality(T, L);
- else if (L.CPlusPlus)
- return getCXXPersonality(T, L);
- else if (L.ObjC1)
- return getObjCPersonality(T, L);
- else
- return getCPersonality(T, L);
+ if (L.ObjC1)
+ return L.CPlusPlus ? getObjCXXPersonality(Target, L)
+ : getObjCPersonality(Target, L);
+ return L.CPlusPlus ? getCXXPersonality(Target, L)
+ : getCPersonality(Target, L);
}
const EHPersonality &EHPersonality::get(CodeGenFunction &CGF) {
@@ -313,8 +324,7 @@ void CodeGenModule::SimplifyPersonality() {
return;
const EHPersonality &ObjCXX = EHPersonality::get(*this, /*FD=*/nullptr);
- const EHPersonality &CXX =
- getCXXPersonality(getTarget().getTriple(), LangOpts);
+ const EHPersonality &CXX = getCXXPersonality(getTarget(), LangOpts);
if (&ObjCXX == &CXX)
return;
@@ -448,11 +458,9 @@ void CodeGenFunction::EmitStartEHSpec(const Decl *D) {
return;
ExceptionSpecificationType EST = Proto->getExceptionSpecType();
- if (isNoexceptExceptionSpec(EST)) {
- if (Proto->getNoexceptSpec(getContext()) == FunctionProtoType::NR_Nothrow) {
- // noexcept functions are simple terminate scopes.
- EHStack.pushTerminate();
- }
+ if (isNoexceptExceptionSpec(EST) && Proto->canThrow() == CT_Cannot) {
+ // noexcept functions are simple terminate scopes.
+ EHStack.pushTerminate();
} else if (EST == EST_Dynamic || EST == EST_DynamicNone) {
// TODO: Revisit exception specifications for the MS ABI. There is a way to
// encode these in an object file but MSVC doesn't do anything with it.
@@ -527,10 +535,8 @@ void CodeGenFunction::EmitEndEHSpec(const Decl *D) {
return;
ExceptionSpecificationType EST = Proto->getExceptionSpecType();
- if (isNoexceptExceptionSpec(EST)) {
- if (Proto->getNoexceptSpec(getContext()) == FunctionProtoType::NR_Nothrow) {
- EHStack.popTerminate();
- }
+ if (isNoexceptExceptionSpec(EST) && Proto->canThrow() == CT_Cannot) {
+ EHStack.popTerminate();
} else if (EST == EST_Dynamic || EST == EST_DynamicNone) {
// TODO: Revisit exception specifications for the MS ABI. There is a way to
// encode these in an object file but MSVC doesn't do anything with it.
@@ -584,7 +590,7 @@ void CodeGenFunction::EnterCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) {
llvm::BasicBlock *
CodeGenFunction::getEHDispatchBlock(EHScopeStack::stable_iterator si) {
if (EHPersonality::get(*this).usesFuncletPads())
- return getMSVCDispatchBlock(si);
+ return getFuncletEHDispatchBlock(si);
// The dispatch block for the end of the scope chain is a block that
// just resumes unwinding.
@@ -632,7 +638,7 @@ CodeGenFunction::getEHDispatchBlock(EHScopeStack::stable_iterator si) {
}
llvm::BasicBlock *
-CodeGenFunction::getMSVCDispatchBlock(EHScopeStack::stable_iterator SI) {
+CodeGenFunction::getFuncletEHDispatchBlock(EHScopeStack::stable_iterator SI) {
// Returning nullptr indicates that the previous dispatch block should unwind
// to caller.
if (SI == EHStack.stable_end())
@@ -646,7 +652,7 @@ CodeGenFunction::getMSVCDispatchBlock(EHScopeStack::stable_iterator SI) {
return DispatchBlock;
if (EHS.getKind() == EHScope::Terminate)
- DispatchBlock = getTerminateHandler();
+ DispatchBlock = getTerminateFunclet();
else
DispatchBlock = createBasicBlock();
CGBuilderTy Builder(*this, DispatchBlock);
@@ -926,10 +932,121 @@ static void emitCatchPadBlock(CodeGenFunction &CGF, EHCatchScope &CatchScope) {
CGF.Builder.restoreIP(SavedIP);
}
+// Wasm uses Windows-style EH instructions, but it merges all catch clauses into
+// one big catchpad, within which we use Itanium's landingpad-style selector
+// comparison instructions.
+static void emitWasmCatchPadBlock(CodeGenFunction &CGF,
+ EHCatchScope &CatchScope) {
+ llvm::BasicBlock *DispatchBlock = CatchScope.getCachedEHDispatchBlock();
+ assert(DispatchBlock);
+
+ CGBuilderTy::InsertPoint SavedIP = CGF.Builder.saveIP();
+ CGF.EmitBlockAfterUses(DispatchBlock);
+
+ llvm::Value *ParentPad = CGF.CurrentFuncletPad;
+ if (!ParentPad)
+ ParentPad = llvm::ConstantTokenNone::get(CGF.getLLVMContext());
+ llvm::BasicBlock *UnwindBB =
+ CGF.getEHDispatchBlock(CatchScope.getEnclosingEHScope());
+
+ unsigned NumHandlers = CatchScope.getNumHandlers();
+ llvm::CatchSwitchInst *CatchSwitch =
+ CGF.Builder.CreateCatchSwitch(ParentPad, UnwindBB, NumHandlers);
+
+ // We don't use a landingpad instruction, so generate intrinsic calls to
+ // provide exception and selector values.
+ llvm::BasicBlock *WasmCatchStartBlock = CGF.createBasicBlock("catch.start");
+ CatchSwitch->addHandler(WasmCatchStartBlock);
+ CGF.EmitBlockAfterUses(WasmCatchStartBlock);
+
+ // Create a catchpad instruction.
+ SmallVector<llvm::Value *, 4> CatchTypes;
+ for (unsigned I = 0, E = NumHandlers; I < E; ++I) {
+ const EHCatchScope::Handler &Handler = CatchScope.getHandler(I);
+ CatchTypeInfo TypeInfo = Handler.Type;
+ if (!TypeInfo.RTTI)
+ TypeInfo.RTTI = llvm::Constant::getNullValue(CGF.VoidPtrTy);
+ CatchTypes.push_back(TypeInfo.RTTI);
+ }
+ auto *CPI = CGF.Builder.CreateCatchPad(CatchSwitch, CatchTypes);
+
+ // Create calls to wasm.get.exception and wasm.get.ehselector intrinsics.
+ // Before they are lowered appropriately later, they provide values for the
+ // exception and selector.
+ llvm::Value *GetExnFn =
+ CGF.CGM.getIntrinsic(llvm::Intrinsic::wasm_get_exception);
+ llvm::Value *GetSelectorFn =
+ CGF.CGM.getIntrinsic(llvm::Intrinsic::wasm_get_ehselector);
+ llvm::CallInst *Exn = CGF.Builder.CreateCall(GetExnFn, CPI);
+ CGF.Builder.CreateStore(Exn, CGF.getExceptionSlot());
+ llvm::CallInst *Selector = CGF.Builder.CreateCall(GetSelectorFn, CPI);
+
+ llvm::Value *TypeIDFn = CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_typeid_for);
+
+ // If there's only a single catch-all, branch directly to its handler.
+ if (CatchScope.getNumHandlers() == 1 &&
+ CatchScope.getHandler(0).isCatchAll()) {
+ CGF.Builder.CreateBr(CatchScope.getHandler(0).Block);
+ CGF.Builder.restoreIP(SavedIP);
+ return;
+ }
+
+ // Test against each of the exception types we claim to catch.
+ for (unsigned I = 0, E = NumHandlers;; ++I) {
+ assert(I < E && "ran off end of handlers!");
+ const EHCatchScope::Handler &Handler = CatchScope.getHandler(I);
+ CatchTypeInfo TypeInfo = Handler.Type;
+ if (!TypeInfo.RTTI)
+ TypeInfo.RTTI = llvm::Constant::getNullValue(CGF.VoidPtrTy);
+
+ // Figure out the next block.
+ llvm::BasicBlock *NextBlock;
+
+ bool EmitNextBlock = false, NextIsEnd = false;
+
+ // If this is the last handler, we're at the end, and the next block is a
+ // block that contains a call to the rethrow function, so we can unwind to
+ // the enclosing EH scope. The call itself will be generated later.
+ if (I + 1 == E) {
+ NextBlock = CGF.createBasicBlock("rethrow");
+ EmitNextBlock = true;
+ NextIsEnd = true;
+
+ // If the next handler is a catch-all, we're at the end, and the
+ // next block is that handler.
+ } else if (CatchScope.getHandler(I + 1).isCatchAll()) {
+ NextBlock = CatchScope.getHandler(I + 1).Block;
+ NextIsEnd = true;
+
+ // Otherwise, we're not at the end and we need a new block.
+ } else {
+ NextBlock = CGF.createBasicBlock("catch.fallthrough");
+ EmitNextBlock = true;
+ }
+
+ // Figure out the catch type's index in the LSDA's type table.
+ llvm::CallInst *TypeIndex = CGF.Builder.CreateCall(TypeIDFn, TypeInfo.RTTI);
+ TypeIndex->setDoesNotThrow();
+
+ llvm::Value *MatchesTypeIndex =
+ CGF.Builder.CreateICmpEQ(Selector, TypeIndex, "matches");
+ CGF.Builder.CreateCondBr(MatchesTypeIndex, Handler.Block, NextBlock);
+
+ if (EmitNextBlock)
+ CGF.EmitBlock(NextBlock);
+ if (NextIsEnd)
+ break;
+ }
+
+ CGF.Builder.restoreIP(SavedIP);
+}
+
/// Emit the structure of the dispatch block for the given catch scope.
/// It is an invariant that the dispatch block already exists.
static void emitCatchDispatchBlock(CodeGenFunction &CGF,
EHCatchScope &catchScope) {
+ if (EHPersonality::get(CGF).isWasmPersonality())
+ return emitWasmCatchPadBlock(CGF, catchScope);
if (EHPersonality::get(CGF).usesFuncletPads())
return emitCatchPadBlock(CGF, catchScope);
@@ -1017,6 +1134,7 @@ void CodeGenFunction::ExitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) {
unsigned NumHandlers = S.getNumHandlers();
EHCatchScope &CatchScope = cast<EHCatchScope>(*EHStack.begin());
assert(CatchScope.getNumHandlers() == NumHandlers);
+ llvm::BasicBlock *DispatchBlock = CatchScope.getCachedEHDispatchBlock();
// If the catch was not required, bail out now.
if (!CatchScope.hasEHBranches()) {
@@ -1049,6 +1167,22 @@ void CodeGenFunction::ExitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) {
doImplicitRethrow = isa<CXXDestructorDecl>(CurCodeDecl) ||
isa<CXXConstructorDecl>(CurCodeDecl);
+ // Wasm uses Windows-style EH instructions, but merges all catch clauses into
+ // one big catchpad. So we save the old funclet pad here before we traverse
+ // each catch handler.
+ SaveAndRestore<llvm::Instruction *> RestoreCurrentFuncletPad(
+ CurrentFuncletPad);
+ llvm::BasicBlock *WasmCatchStartBlock = nullptr;
+ if (EHPersonality::get(*this).isWasmPersonality()) {
+ auto *CatchSwitch =
+ cast<llvm::CatchSwitchInst>(DispatchBlock->getFirstNonPHI());
+ WasmCatchStartBlock = CatchSwitch->hasUnwindDest()
+ ? CatchSwitch->getSuccessor(1)
+ : CatchSwitch->getSuccessor(0);
+ auto *CPI = cast<llvm::CatchPadInst>(WasmCatchStartBlock->getFirstNonPHI());
+ CurrentFuncletPad = CPI;
+ }
+
// Perversely, we emit the handlers backwards precisely because we
// want them to appear in source order. In all of these cases, the
// catch block will have exactly one predecessor, which will be a
@@ -1056,7 +1190,9 @@ void CodeGenFunction::ExitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) {
// a catch-all, one of the dispatch blocks will branch to two
// different handlers, and EmitBlockAfterUses will cause the second
// handler to be moved before the first.
+ bool HasCatchAll = false;
for (unsigned I = NumHandlers; I != 0; --I) {
+ HasCatchAll |= Handlers[I - 1].isCatchAll();
llvm::BasicBlock *CatchBlock = Handlers[I-1].Block;
EmitBlockAfterUses(CatchBlock);
@@ -1101,6 +1237,27 @@ void CodeGenFunction::ExitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) {
Builder.CreateBr(ContBB);
}
+ // Because in wasm we merge all catch clauses into one big catchpad, in case
+ // none of the types in catch handlers matches after we test against each of
+ // them, we should unwind to the next EH enclosing scope. We generate a call
+ // to rethrow function here to do that.
+ if (EHPersonality::get(*this).isWasmPersonality() && !HasCatchAll) {
+ assert(WasmCatchStartBlock);
+ // Navigate for the "rethrow" block we created in emitWasmCatchPadBlock().
+ // Wasm uses landingpad-style conditional branches to compare selectors, so
+ // we follow the false destination for each of the cond branches to reach
+ // the rethrow block.
+ llvm::BasicBlock *RethrowBlock = WasmCatchStartBlock;
+ while (llvm::TerminatorInst *TI = RethrowBlock->getTerminator()) {
+ auto *BI = cast<llvm::BranchInst>(TI);
+ assert(BI->isConditional());
+ RethrowBlock = BI->getSuccessor(1);
+ }
+ assert(RethrowBlock != WasmCatchStartBlock && RethrowBlock->empty());
+ Builder.SetInsertPoint(RethrowBlock);
+ CGM.getCXXABI().emitRethrow(*this, /*isNoReturn=*/true);
+ }
+
EmitBlock(ContBB);
incrementProfileCounter(&S);
}
@@ -1334,23 +1491,59 @@ llvm::BasicBlock *CodeGenFunction::getTerminateHandler() {
if (TerminateHandler)
return TerminateHandler;
- CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
-
// Set up the terminate handler. This block is inserted at the very
// end of the function by FinishFunction.
TerminateHandler = createBasicBlock("terminate.handler");
+ CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
Builder.SetInsertPoint(TerminateHandler);
+
llvm::Value *Exn = nullptr;
+ if (getLangOpts().CPlusPlus)
+ Exn = getExceptionFromSlot();
+ llvm::CallInst *terminateCall =
+ CGM.getCXXABI().emitTerminateForUnexpectedException(*this, Exn);
+ terminateCall->setDoesNotReturn();
+ Builder.CreateUnreachable();
+
+ // Restore the saved insertion state.
+ Builder.restoreIP(SavedIP);
+
+ return TerminateHandler;
+}
+
+llvm::BasicBlock *CodeGenFunction::getTerminateFunclet() {
+ assert(EHPersonality::get(*this).usesFuncletPads() &&
+ "use getTerminateLandingPad for non-funclet EH");
+
+ llvm::BasicBlock *&TerminateFunclet = TerminateFunclets[CurrentFuncletPad];
+ if (TerminateFunclet)
+ return TerminateFunclet;
+
+ CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
+
+ // Set up the terminate handler. This block is inserted at the very
+ // end of the function by FinishFunction.
+ TerminateFunclet = createBasicBlock("terminate.handler");
+ Builder.SetInsertPoint(TerminateFunclet);
+
+ // Create the cleanuppad using the current parent pad as its token. Use 'none'
+ // if this is a top-level terminate scope, which is the common case.
SaveAndRestore<llvm::Instruction *> RestoreCurrentFuncletPad(
CurrentFuncletPad);
- if (EHPersonality::get(*this).usesFuncletPads()) {
- llvm::Value *ParentPad = CurrentFuncletPad;
- if (!ParentPad)
- ParentPad = llvm::ConstantTokenNone::get(CGM.getLLVMContext());
- CurrentFuncletPad = Builder.CreateCleanupPad(ParentPad);
- } else {
- if (getLangOpts().CPlusPlus)
- Exn = getExceptionFromSlot();
+ llvm::Value *ParentPad = CurrentFuncletPad;
+ if (!ParentPad)
+ ParentPad = llvm::ConstantTokenNone::get(CGM.getLLVMContext());
+ CurrentFuncletPad = Builder.CreateCleanupPad(ParentPad);
+
+ // Emit the __std_terminate call.
+ llvm::Value *Exn = nullptr;
+ // In case of wasm personality, we need to pass the exception value to
+ // __clang_call_terminate function.
+ if (getLangOpts().CPlusPlus &&
+ EHPersonality::get(*this).isWasmPersonality()) {
+ llvm::Value *GetExnFn =
+ CGM.getIntrinsic(llvm::Intrinsic::wasm_get_exception);
+ Exn = Builder.CreateCall(GetExnFn, CurrentFuncletPad);
}
llvm::CallInst *terminateCall =
CGM.getCXXABI().emitTerminateForUnexpectedException(*this, Exn);
@@ -1360,7 +1553,7 @@ llvm::BasicBlock *CodeGenFunction::getTerminateHandler() {
// Restore the saved insertion state.
Builder.restoreIP(SavedIP);
- return TerminateHandler;
+ return TerminateFunclet;
}
llvm::BasicBlock *CodeGenFunction::getEHResumeBlock(bool isCleanup) {
diff --git a/lib/CodeGen/CGExpr.cpp b/lib/CodeGen/CGExpr.cpp
index c7dc8337e19e..3097caacb31c 100644
--- a/lib/CodeGen/CGExpr.cpp
+++ b/lib/CodeGen/CGExpr.cpp
@@ -61,18 +61,30 @@ llvm::Value *CodeGenFunction::EmitCastToVoidPtr(llvm::Value *value) {
/// CreateTempAlloca - This creates a alloca and inserts it into the entry
/// block.
+Address CodeGenFunction::CreateTempAllocaWithoutCast(llvm::Type *Ty,
+ CharUnits Align,
+ const Twine &Name,
+ llvm::Value *ArraySize) {
+ auto Alloca = CreateTempAlloca(Ty, Name, ArraySize);
+ Alloca->setAlignment(Align.getQuantity());
+ return Address(Alloca, Align);
+}
+
+/// CreateTempAlloca - This creates a alloca and inserts it into the entry
+/// block. The alloca is casted to default address space if necessary.
Address CodeGenFunction::CreateTempAlloca(llvm::Type *Ty, CharUnits Align,
const Twine &Name,
llvm::Value *ArraySize,
- bool CastToDefaultAddrSpace) {
- auto Alloca = CreateTempAlloca(Ty, Name, ArraySize);
- Alloca->setAlignment(Align.getQuantity());
- llvm::Value *V = Alloca;
+ Address *AllocaAddr) {
+ auto Alloca = CreateTempAllocaWithoutCast(Ty, Align, Name, ArraySize);
+ if (AllocaAddr)
+ *AllocaAddr = Alloca;
+ llvm::Value *V = Alloca.getPointer();
// Alloca always returns a pointer in alloca address space, which may
// be different from the type defined by the language. For example,
// in C++ the auto variables are in the default address space. Therefore
// cast alloca to the default address space when necessary.
- if (CastToDefaultAddrSpace && getASTAllocaAddressSpace() != LangAS::Default) {
+ if (getASTAllocaAddressSpace() != LangAS::Default) {
auto DestAddrSpace = getContext().getTargetAddressSpace(LangAS::Default);
llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
// When ArraySize is nullptr, alloca is inserted at AllocaInsertPt,
@@ -125,17 +137,26 @@ Address CodeGenFunction::CreateIRTemp(QualType Ty, const Twine &Name) {
}
Address CodeGenFunction::CreateMemTemp(QualType Ty, const Twine &Name,
- bool CastToDefaultAddrSpace) {
+ Address *Alloca) {
// FIXME: Should we prefer the preferred type alignment here?
- return CreateMemTemp(Ty, getContext().getTypeAlignInChars(Ty), Name,
- CastToDefaultAddrSpace);
+ return CreateMemTemp(Ty, getContext().getTypeAlignInChars(Ty), Name, Alloca);
}
Address CodeGenFunction::CreateMemTemp(QualType Ty, CharUnits Align,
- const Twine &Name,
- bool CastToDefaultAddrSpace) {
- return CreateTempAlloca(ConvertTypeForMem(Ty), Align, Name, nullptr,
- CastToDefaultAddrSpace);
+ const Twine &Name, Address *Alloca) {
+ return CreateTempAlloca(ConvertTypeForMem(Ty), Align, Name,
+ /*ArraySize=*/nullptr, Alloca);
+}
+
+Address CodeGenFunction::CreateMemTempWithoutCast(QualType Ty, CharUnits Align,
+ const Twine &Name) {
+ return CreateTempAllocaWithoutCast(ConvertTypeForMem(Ty), Align, Name);
+}
+
+Address CodeGenFunction::CreateMemTempWithoutCast(QualType Ty,
+ const Twine &Name) {
+ return CreateMemTempWithoutCast(Ty, getContext().getTypeAlignInChars(Ty),
+ Name);
}
/// EvaluateExprAsBool - Perform the usual unary conversions on the specified
@@ -187,7 +208,7 @@ RValue CodeGenFunction::EmitAnyExpr(const Expr *E,
llvm_unreachable("bad evaluation kind");
}
-/// EmitAnyExprToTemp - Similary to EmitAnyExpr(), however, the result will
+/// EmitAnyExprToTemp - Similar to EmitAnyExpr(), however, the result will
/// always be accessible even if no aggregate location is provided.
RValue CodeGenFunction::EmitAnyExprToTemp(const Expr *E) {
AggValueSlot AggSlot = AggValueSlot::ignored();
@@ -214,7 +235,8 @@ void CodeGenFunction::EmitAnyExprToMem(const Expr *E,
EmitAggExpr(E, AggValueSlot::forAddr(Location, Quals,
AggValueSlot::IsDestructed_t(IsInit),
AggValueSlot::DoesNotNeedGCBarriers,
- AggValueSlot::IsAliased_t(!IsInit)));
+ AggValueSlot::IsAliased_t(!IsInit),
+ AggValueSlot::MayOverlap));
return;
}
@@ -347,7 +369,8 @@ pushTemporaryCleanup(CodeGenFunction &CGF, const MaterializeTemporaryExpr *M,
static Address createReferenceTemporary(CodeGenFunction &CGF,
const MaterializeTemporaryExpr *M,
- const Expr *Inner) {
+ const Expr *Inner,
+ Address *Alloca = nullptr) {
auto &TCG = CGF.getTargetHooks();
switch (M->getStorageDuration()) {
case SD_FullExpression:
@@ -380,7 +403,7 @@ static Address createReferenceTemporary(CodeGenFunction &CGF,
return Address(C, alignment);
}
}
- return CGF.CreateMemTemp(Ty, "ref.tmp");
+ return CGF.CreateMemTemp(Ty, "ref.tmp", Alloca);
}
case SD_Thread:
case SD_Static:
@@ -432,7 +455,8 @@ EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *M) {
E->getType().getQualifiers(),
AggValueSlot::IsDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
- AggValueSlot::IsNotAliased));
+ AggValueSlot::IsNotAliased,
+ AggValueSlot::DoesNotOverlap));
break;
}
}
@@ -456,7 +480,8 @@ EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *M) {
}
// Create and initialize the reference temporary.
- Address Object = createReferenceTemporary(*this, M, E);
+ Address Alloca = Address::invalid();
+ Address Object = createReferenceTemporary(*this, M, E, &Alloca);
if (auto *Var = dyn_cast<llvm::GlobalVariable>(
Object.getPointer()->stripPointerCasts())) {
Object = Address(llvm::ConstantExpr::getBitCast(
@@ -475,13 +500,13 @@ EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *M) {
case SD_Automatic:
case SD_FullExpression:
if (auto *Size = EmitLifetimeStart(
- CGM.getDataLayout().getTypeAllocSize(Object.getElementType()),
- Object.getPointer())) {
+ CGM.getDataLayout().getTypeAllocSize(Alloca.getElementType()),
+ Alloca.getPointer())) {
if (M->getStorageDuration() == SD_Automatic)
pushCleanupAfterFullExpr<CallLifetimeEnd>(NormalEHLifetimeMarker,
- Object, Size);
+ Alloca, Size);
else
- pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, Object,
+ pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, Alloca,
Size);
}
break;
@@ -873,7 +898,7 @@ static llvm::Value *getArrayIndexingBound(
if (const auto *CAT = dyn_cast<ConstantArrayType>(AT))
return CGF.Builder.getInt(CAT->getSize());
else if (const auto *VAT = dyn_cast<VariableArrayType>(AT))
- return CGF.getVLASize(VAT).first;
+ return CGF.getVLASize(VAT).NumElts;
// Ignore pass_object_size here. It's not applicable on decayed pointers.
}
}
@@ -1034,8 +1059,12 @@ Address CodeGenFunction::EmitPointerWithAlignment(const Expr *E,
// Derived-to-base conversions.
case CK_UncheckedDerivedToBase:
case CK_DerivedToBase: {
- Address Addr = EmitPointerWithAlignment(CE->getSubExpr(), BaseInfo,
- TBAAInfo);
+ // TODO: Support accesses to members of base classes in TBAA. For now, we
+ // conservatively pretend that the complete object is of the base class
+ // type.
+ if (TBAAInfo)
+ *TBAAInfo = CGM.getTBAAAccessInfo(E->getType());
+ Address Addr = EmitPointerWithAlignment(CE->getSubExpr(), BaseInfo);
auto Derived = CE->getSubExpr()->getType()->getPointeeCXXRecordDecl();
return GetAddressOfBaseClass(Addr, Derived,
CE->path_begin(), CE->path_end(),
@@ -1785,7 +1814,7 @@ RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV) {
return RValue::get(Vec);
}
-/// @brief Generates lvalue for partial ext_vector access.
+/// Generates lvalue for partial ext_vector access.
Address CodeGenFunction::EmitExtVectorElementLValue(LValue LV) {
Address VectorAddress = LV.getExtVectorAddress();
const VectorType *ExprVT = LV.getType()->getAs<VectorType>();
@@ -1807,7 +1836,7 @@ Address CodeGenFunction::EmitExtVectorElementLValue(LValue LV) {
return VectorBasePtrPlusIx;
}
-/// @brief Load of global gamed gegisters are always calls to intrinsics.
+/// Load of global gamed gegisters are always calls to intrinsics.
RValue CodeGenFunction::EmitLoadOfGlobalRegLValue(LValue LV) {
assert((LV.getType()->isIntegerType() || LV.getType()->isPointerType()) &&
"Bad type for register variable");
@@ -2067,7 +2096,7 @@ void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
Dst.isVolatileQualified());
}
-/// @brief Store of global named registers are always calls to intrinsics.
+/// Store of global named registers are always calls to intrinsics.
void CodeGenFunction::EmitStoreThroughGlobalRegLValue(RValue Src, LValue Dst) {
assert((Dst.getType()->isIntegerType() || Dst.getType()->isPointerType()) &&
"Bad type for register variable");
@@ -2206,6 +2235,22 @@ static LValue EmitThreadPrivateVarDeclLValue(
return CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl);
}
+static Address emitDeclTargetLinkVarDeclLValue(CodeGenFunction &CGF,
+ const VarDecl *VD, QualType T) {
+ for (const auto *D : VD->redecls()) {
+ if (!VD->hasAttrs())
+ continue;
+ if (const auto *Attr = D->getAttr<OMPDeclareTargetDeclAttr>())
+ if (Attr->getMapType() == OMPDeclareTargetDeclAttr::MT_Link) {
+ QualType PtrTy = CGF.getContext().getPointerType(VD->getType());
+ Address Addr =
+ CGF.CGM.getOpenMPRuntime().getAddrOfDeclareTargetLink(VD);
+ return CGF.EmitLoadOfPointer(Addr, PtrTy->castAs<PointerType>());
+ }
+ }
+ return Address::invalid();
+}
+
Address
CodeGenFunction::EmitLoadOfReference(LValue RefLVal,
LValueBaseInfo *PointeeBaseInfo,
@@ -2255,6 +2300,13 @@ static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF,
if (VD->getTLSKind() == VarDecl::TLS_Dynamic &&
CGF.CGM.getCXXABI().usesThreadWrapperFunction())
return CGF.CGM.getCXXABI().EmitThreadLocalVarDeclLValue(CGF, VD, T);
+ // Check if the variable is marked as declare target with link clause in
+ // device codegen.
+ if (CGF.getLangOpts().OpenMPIsDevice) {
+ Address Addr = emitDeclTargetLinkVarDeclLValue(CGF, VD, T);
+ if (Addr.isValid())
+ return CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl);
+ }
llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(VD);
llvm::Type *RealVarTy = CGF.getTypes().ConvertTypeForMem(VD->getType());
@@ -2263,9 +2315,11 @@ static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF,
Address Addr(V, Alignment);
// Emit reference to the private copy of the variable if it is an OpenMP
// threadprivate variable.
- if (CGF.getLangOpts().OpenMP && VD->hasAttr<OMPThreadPrivateDeclAttr>())
+ if (CGF.getLangOpts().OpenMP && !CGF.getLangOpts().OpenMPSimd &&
+ VD->hasAttr<OMPThreadPrivateDeclAttr>()) {
return EmitThreadPrivateVarDeclLValue(CGF, VD, T, Addr, RealVarTy,
E->getExprLoc());
+ }
LValue LV = VD->getType()->isReferenceType() ?
CGF.EmitLoadOfReferenceLValue(Addr, VD->getType(),
AlignmentSource::Decl) :
@@ -2446,7 +2500,8 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
// Check for OpenMP threadprivate variables.
- if (getLangOpts().OpenMP && VD->hasAttr<OMPThreadPrivateDeclAttr>()) {
+ if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd &&
+ VD->hasAttr<OMPThreadPrivateDeclAttr>()) {
return EmitThreadPrivateVarDeclLValue(
*this, VD, T, addr, getTypes().ConvertTypeForMem(VD->getType()),
E->getExprLoc());
@@ -2579,7 +2634,7 @@ LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) {
StringRef NameItems[] = {
PredefinedExpr::getIdentTypeName(E->getIdentType()), FnName};
std::string GVName = llvm::join(NameItems, NameItems + 2, ".");
- if (auto *BD = dyn_cast<BlockDecl>(CurCodeDecl)) {
+ if (auto *BD = dyn_cast_or_null<BlockDecl>(CurCodeDecl)) {
std::string Name = SL->getString();
if (!Name.empty()) {
unsigned Discriminator =
@@ -2678,7 +2733,7 @@ llvm::Value *CodeGenFunction::EmitCheckValue(llvm::Value *V) {
return Builder.CreatePtrToInt(V, TargetTy);
}
-/// \brief Emit a representation of a SourceLocation for passing to a handler
+/// Emit a representation of a SourceLocation for passing to a handler
/// in a sanitizer runtime library. The format for this data is:
/// \code
/// struct SourceLocation {
@@ -2737,7 +2792,7 @@ llvm::Constant *CodeGenFunction::EmitCheckSourceLocation(SourceLocation Loc) {
}
namespace {
-/// \brief Specify under what conditions this check can be recovered
+/// Specify under what conditions this check can be recovered
enum class CheckRecoverableKind {
/// Always terminate program execution if this check fails.
Unrecoverable,
@@ -2945,6 +3000,7 @@ void CodeGenFunction::EmitCfiSlowPathCheck(
bool WithDiag = !CGM.getCodeGenOpts().SanitizeTrap.has(Kind);
llvm::CallInst *CheckCall;
+ llvm::Constant *SlowPathFn;
if (WithDiag) {
llvm::Constant *Info = llvm::ConstantStruct::getAnon(StaticArgs);
auto *InfoPtr =
@@ -2953,20 +3009,20 @@ void CodeGenFunction::EmitCfiSlowPathCheck(
InfoPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
CGM.getSanitizerMetadata()->disableSanitizerForGlobal(InfoPtr);
- llvm::Constant *SlowPathDiagFn = CGM.getModule().getOrInsertFunction(
+ SlowPathFn = CGM.getModule().getOrInsertFunction(
"__cfi_slowpath_diag",
llvm::FunctionType::get(VoidTy, {Int64Ty, Int8PtrTy, Int8PtrTy},
false));
CheckCall = Builder.CreateCall(
- SlowPathDiagFn,
- {TypeId, Ptr, Builder.CreateBitCast(InfoPtr, Int8PtrTy)});
+ SlowPathFn, {TypeId, Ptr, Builder.CreateBitCast(InfoPtr, Int8PtrTy)});
} else {
- llvm::Constant *SlowPathFn = CGM.getModule().getOrInsertFunction(
+ SlowPathFn = CGM.getModule().getOrInsertFunction(
"__cfi_slowpath",
llvm::FunctionType::get(VoidTy, {Int64Ty, Int8PtrTy}, false));
CheckCall = Builder.CreateCall(SlowPathFn, {TypeId, Ptr});
}
+ CGM.setDSOLocal(cast<llvm::GlobalValue>(SlowPathFn->stripPointerCasts()));
CheckCall->setDoesNotThrow();
EmitBlock(Cont);
@@ -2980,6 +3036,7 @@ void CodeGenFunction::EmitCfiCheckStub() {
llvm::Function *F = llvm::Function::Create(
llvm::FunctionType::get(VoidTy, {Int64Ty, Int8PtrTy, Int8PtrTy}, false),
llvm::GlobalValue::WeakAnyLinkage, "__cfi_check", M);
+ CGM.setDSOLocal(F);
llvm::BasicBlock *BB = llvm::BasicBlock::Create(Ctx, "entry", F);
// FIXME: consider emitting an intrinsic call like
// call void @llvm.cfi_check(i64 %0, i8* %1, i8* %2)
@@ -3018,6 +3075,11 @@ void CodeGenFunction::EmitCfiCheckFail() {
StartFunction(GlobalDecl(), CGM.getContext().VoidTy, F, FI, Args,
SourceLocation());
+ // This function should not be affected by blacklist. This function does
+ // not have a source location, but "src:*" would still apply. Revert any
+ // changes to SanOpts made in StartFunction.
+ SanOpts = CGM.getLangOpts().Sanitize;
+
llvm::Value *Data =
EmitLoadOfScalar(GetAddrOfLocalVar(&ArgData), /*Volatile=*/false,
CGM.getContext().VoidPtrTy, ArgData.getLocation());
@@ -3306,7 +3368,7 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
auto *Idx = EmitIdxAfterBase(/*Promote*/true);
// The element count here is the total number of non-VLA elements.
- llvm::Value *numElements = getVLASize(vla).first;
+ llvm::Value *numElements = getVLASize(vla).NumElts;
// Effectively, the multiply by the VLA size is part of the GEP.
// GEP indexes are signed, and scaling an index isn't permitted to
@@ -3540,7 +3602,7 @@ LValue CodeGenFunction::EmitOMPArraySectionExpr(const OMPArraySectionExpr *E,
emitOMPArraySectionBase(*this, E->getBase(), BaseInfo, TBAAInfo,
BaseTy, VLA->getElementType(), IsLowerBound);
// The element count here is the total number of non-VLA elements.
- llvm::Value *NumElements = getVLASize(VLA).first;
+ llvm::Value *NumElements = getVLASize(VLA).NumElts;
// Effectively, the multiply by the VLA size is part of the GEP.
// GEP indexes are signed, and scaling an index isn't permitted to
@@ -3808,6 +3870,18 @@ LValue CodeGenFunction::EmitLValueForField(LValue base,
}
Address addr = base.getAddress();
+ if (auto *ClassDef = dyn_cast<CXXRecordDecl>(rec)) {
+ if (CGM.getCodeGenOpts().StrictVTablePointers &&
+ ClassDef->isDynamicClass()) {
+ // Getting to any field of dynamic object requires stripping dynamic
+ // information provided by invariant.group. This is because accessing
+ // fields may leak the real address of dynamic object, which could result
+ // in miscompilation when leaked pointer would be compared.
+ auto *stripped = Builder.CreateStripInvariantGroup(addr.getPointer());
+ addr = Address(stripped, addr.getAlignment());
+ }
+ }
+
unsigned RecordCVR = base.getVRQualifiers();
if (rec->isUnion()) {
// For unions, there is no pointer adjustment.
@@ -3816,7 +3890,7 @@ LValue CodeGenFunction::EmitLValueForField(LValue base,
hasAnyVptr(FieldType, getContext()))
// Because unions can easily skip invariant.barriers, we need to add
// a barrier every time CXXRecord field with vptr is referenced.
- addr = Address(Builder.CreateInvariantGroupBarrier(addr.getPointer()),
+ addr = Address(Builder.CreateLaunderInvariantGroup(addr.getPointer()),
addr.getAlignment());
} else {
// For structs, we GEP to the field that the record layout suggests.
@@ -4160,7 +4234,35 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
LValue CodeGenFunction::EmitOpaqueValueLValue(const OpaqueValueExpr *e) {
assert(OpaqueValueMappingData::shouldBindAsLValue(e));
- return getOpaqueLValueMapping(e);
+ return getOrCreateOpaqueLValueMapping(e);
+}
+
+LValue
+CodeGenFunction::getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e) {
+ assert(OpaqueValueMapping::shouldBindAsLValue(e));
+
+ llvm::DenseMap<const OpaqueValueExpr*,LValue>::iterator
+ it = OpaqueLValues.find(e);
+
+ if (it != OpaqueLValues.end())
+ return it->second;
+
+ assert(e->isUnique() && "LValue for a nonunique OVE hasn't been emitted");
+ return EmitLValue(e->getSourceExpr());
+}
+
+RValue
+CodeGenFunction::getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e) {
+ assert(!OpaqueValueMapping::shouldBindAsLValue(e));
+
+ llvm::DenseMap<const OpaqueValueExpr*,RValue>::iterator
+ it = OpaqueRValues.find(e);
+
+ if (it != OpaqueRValues.end())
+ return it->second;
+
+ assert(e->isUnique() && "RValue for a nonunique OVE hasn't been emitted");
+ return EmitAnyExpr(e->getSourceExpr());
}
RValue CodeGenFunction::EmitRValueForField(LValue LV,
@@ -4476,8 +4578,7 @@ RValue CodeGenFunction::EmitCall(QualType CalleeType, const CGCallee &OrigCallee
CalleeType = getContext().getCanonicalType(CalleeType);
- const auto *FnType =
- cast<FunctionType>(cast<PointerType>(CalleeType)->getPointeeType());
+ auto PointeeType = cast<PointerType>(CalleeType)->getPointeeType();
CGCallee Callee = OrigCallee;
@@ -4486,8 +4587,12 @@ RValue CodeGenFunction::EmitCall(QualType CalleeType, const CGCallee &OrigCallee
if (llvm::Constant *PrefixSig =
CGM.getTargetCodeGenInfo().getUBSanFunctionSignature(CGM)) {
SanitizerScope SanScope(this);
+ // Remove any (C++17) exception specifications, to allow calling e.g. a
+ // noexcept function through a non-noexcept pointer.
+ auto ProtoTy =
+ getContext().getFunctionTypeWithExceptionSpec(PointeeType, EST_None);
llvm::Constant *FTRTTIConst =
- CGM.GetAddrOfRTTIDescriptor(QualType(FnType, 0), /*ForEH=*/true);
+ CGM.GetAddrOfRTTIDescriptor(ProtoTy, /*ForEH=*/true);
llvm::Type *PrefixStructTyElems[] = {PrefixSig->getType(), Int32Ty};
llvm::StructType *PrefixStructTy = llvm::StructType::get(
CGM.getLLVMContext(), PrefixStructTyElems, /*isPacked=*/true);
@@ -4527,6 +4632,8 @@ RValue CodeGenFunction::EmitCall(QualType CalleeType, const CGCallee &OrigCallee
}
}
+ const auto *FnType = cast<FunctionType>(PointeeType);
+
// If we are checking indirect calls and this call is indirect, check that the
// function pointer is a member of the bit set for the function type.
if (SanOpts.has(SanitizerKind::CFIICall) &&
@@ -4707,6 +4814,12 @@ static LValueOrRValue emitPseudoObjectExpr(CodeGenFunction &CGF,
// If this semantic expression is an opaque value, bind it
// to the result of its source expression.
if (const auto *ov = dyn_cast<OpaqueValueExpr>(semantic)) {
+ // Skip unique OVEs.
+ if (ov->isUnique()) {
+ assert(ov != resultExpr &&
+ "A unique OVE cannot be used as the result expression");
+ continue;
+ }
// If this is the result expression, we may need to evaluate
// directly into the slot.
diff --git a/lib/CodeGen/CGExprAgg.cpp b/lib/CodeGen/CGExprAgg.cpp
index 0f05cab66d7e..291740478329 100644
--- a/lib/CodeGen/CGExprAgg.cpp
+++ b/lib/CodeGen/CGExprAgg.cpp
@@ -12,8 +12,10 @@
//===----------------------------------------------------------------------===//
#include "CodeGenFunction.h"
+#include "CGCXXABI.h"
#include "CGObjCRuntime.h"
#include "CodeGenModule.h"
+#include "ConstantEmitter.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclTemplate.h"
@@ -22,6 +24,7 @@
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/IntrinsicInst.h"
using namespace clang;
using namespace CodeGen;
@@ -36,23 +39,6 @@ class AggExprEmitter : public StmtVisitor<AggExprEmitter> {
AggValueSlot Dest;
bool IsResultUnused;
- /// We want to use 'dest' as the return slot except under two
- /// conditions:
- /// - The destination slot requires garbage collection, so we
- /// need to use the GC API.
- /// - The destination slot is potentially aliased.
- bool shouldUseDestForReturnSlot() const {
- return !(Dest.requiresGCollection() || Dest.isPotentiallyAliased());
- }
-
- ReturnValueSlot getReturnValueSlot() const {
- if (!shouldUseDestForReturnSlot())
- return ReturnValueSlot();
-
- return ReturnValueSlot(Dest.getAddress(), Dest.isVolatile(),
- IsResultUnused);
- }
-
AggValueSlot EnsureSlot(QualType T) {
if (!Dest.isIgnored()) return Dest;
return CGF.CreateAggTemp(T, "agg.tmp.ensured");
@@ -62,6 +48,15 @@ class AggExprEmitter : public StmtVisitor<AggExprEmitter> {
Dest = CGF.CreateAggTemp(T, "agg.tmp.ensured");
}
+ // Calls `Fn` with a valid return value slot, potentially creating a temporary
+ // to do so. If a temporary is created, an appropriate copy into `Dest` will
+ // be emitted, as will lifetime markers.
+ //
+ // The given function should take a ReturnValueSlot, and return an RValue that
+ // points to said slot.
+ void withReturnValueSlot(const Expr *E,
+ llvm::function_ref<RValue(ReturnValueSlot)> Fn);
+
public:
AggExprEmitter(CodeGenFunction &cgf, AggValueSlot Dest, bool IsResultUnused)
: CGF(cgf), Builder(CGF.Builder), Dest(Dest),
@@ -76,8 +71,15 @@ public:
/// then loads the result into DestPtr.
void EmitAggLoadOfLValue(const Expr *E);
+ enum ExprValueKind {
+ EVK_RValue,
+ EVK_NonRValue
+ };
+
/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
- void EmitFinalDestCopy(QualType type, const LValue &src);
+ /// SrcIsRValue is true if source comes from an RValue.
+ void EmitFinalDestCopy(QualType type, const LValue &src,
+ ExprValueKind SrcValueKind = EVK_NonRValue);
void EmitFinalDestCopy(QualType type, RValue src);
void EmitCopy(QualType type, const AggValueSlot &dest,
const AggValueSlot &src);
@@ -85,7 +87,7 @@ public:
void EmitMoveFromReturnSlot(const Expr *E, RValue Src);
void EmitArrayInit(Address DestPtr, llvm::ArrayType *AType,
- QualType elementType, InitListExpr *E);
+ QualType ArrayQTy, InitListExpr *E);
AggValueSlot::NeedsGCBarriers_t needsGC(QualType T) {
if (CGF.getLangOpts().getGC() && TypeRequiresGCollection(T))
@@ -144,6 +146,7 @@ public:
void VisitPointerToDataMemberBinaryOperator(const BinaryOperator *BO);
void VisitBinAssign(const BinaryOperator *E);
void VisitBinComma(const BinaryOperator *E);
+ void VisitBinCmp(const BinaryOperator *E);
void VisitObjCMessageExpr(ObjCMessageExpr *E);
void VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
@@ -217,7 +220,7 @@ void AggExprEmitter::EmitAggLoadOfLValue(const Expr *E) {
EmitFinalDestCopy(E->getType(), LV);
}
-/// \brief True if the given aggregate type requires special GC API calls.
+/// True if the given aggregate type requires special GC API calls.
bool AggExprEmitter::TypeRequiresGCollection(QualType T) {
// Only record types have members that might require garbage collection.
const RecordType *RecordTy = T->getAs<RecordType>();
@@ -234,38 +237,78 @@ bool AggExprEmitter::TypeRequiresGCollection(QualType T) {
return Record->hasObjectMember();
}
-/// \brief Perform the final move to DestPtr if for some reason
-/// getReturnValueSlot() didn't use it directly.
-///
-/// The idea is that you do something like this:
-/// RValue Result = EmitSomething(..., getReturnValueSlot());
-/// EmitMoveFromReturnSlot(E, Result);
-///
-/// If nothing interferes, this will cause the result to be emitted
-/// directly into the return value slot. Otherwise, a final move
-/// will be performed.
-void AggExprEmitter::EmitMoveFromReturnSlot(const Expr *E, RValue src) {
- if (shouldUseDestForReturnSlot()) {
- // Logically, Dest.getAddr() should equal Src.getAggregateAddr().
- // The possibility of undef rvalues complicates that a lot,
- // though, so we can't really assert.
- return;
+void AggExprEmitter::withReturnValueSlot(
+ const Expr *E, llvm::function_ref<RValue(ReturnValueSlot)> EmitCall) {
+ QualType RetTy = E->getType();
+ bool RequiresDestruction =
+ Dest.isIgnored() &&
+ RetTy.isDestructedType() == QualType::DK_nontrivial_c_struct;
+
+ // If it makes no observable difference, save a memcpy + temporary.
+ //
+ // We need to always provide our own temporary if destruction is required.
+ // Otherwise, EmitCall will emit its own, notice that it's "unused", and end
+ // its lifetime before we have the chance to emit a proper destructor call.
+ bool UseTemp = Dest.isPotentiallyAliased() || Dest.requiresGCollection() ||
+ (RequiresDestruction && !Dest.getAddress().isValid());
+
+ Address RetAddr = Address::invalid();
+ Address RetAllocaAddr = Address::invalid();
+
+ EHScopeStack::stable_iterator LifetimeEndBlock;
+ llvm::Value *LifetimeSizePtr = nullptr;
+ llvm::IntrinsicInst *LifetimeStartInst = nullptr;
+ if (!UseTemp) {
+ RetAddr = Dest.getAddress();
+ } else {
+ RetAddr = CGF.CreateMemTemp(RetTy, "tmp", &RetAllocaAddr);
+ uint64_t Size =
+ CGF.CGM.getDataLayout().getTypeAllocSize(CGF.ConvertTypeForMem(RetTy));
+ LifetimeSizePtr = CGF.EmitLifetimeStart(Size, RetAllocaAddr.getPointer());
+ if (LifetimeSizePtr) {
+ LifetimeStartInst =
+ cast<llvm::IntrinsicInst>(std::prev(Builder.GetInsertPoint()));
+ assert(LifetimeStartInst->getIntrinsicID() ==
+ llvm::Intrinsic::lifetime_start &&
+ "Last insertion wasn't a lifetime.start?");
+
+ CGF.pushFullExprCleanup<CodeGenFunction::CallLifetimeEnd>(
+ NormalEHLifetimeMarker, RetAllocaAddr, LifetimeSizePtr);
+ LifetimeEndBlock = CGF.EHStack.stable_begin();
+ }
}
- // Otherwise, copy from there to the destination.
- assert(Dest.getPointer() != src.getAggregatePointer());
- EmitFinalDestCopy(E->getType(), src);
+ RValue Src =
+ EmitCall(ReturnValueSlot(RetAddr, Dest.isVolatile(), IsResultUnused));
+
+ if (RequiresDestruction)
+ CGF.pushDestroy(RetTy.isDestructedType(), Src.getAggregateAddress(), RetTy);
+
+ if (!UseTemp)
+ return;
+
+ assert(Dest.getPointer() != Src.getAggregatePointer());
+ EmitFinalDestCopy(E->getType(), Src);
+
+ if (!RequiresDestruction && LifetimeStartInst) {
+ // If there's no dtor to run, the copy was the last use of our temporary.
+ // Since we're not guaranteed to be in an ExprWithCleanups, clean up
+ // eagerly.
+ CGF.DeactivateCleanupBlock(LifetimeEndBlock, LifetimeStartInst);
+ CGF.EmitLifetimeEnd(LifetimeSizePtr, RetAllocaAddr.getPointer());
+ }
}
/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
void AggExprEmitter::EmitFinalDestCopy(QualType type, RValue src) {
assert(src.isAggregate() && "value must be aggregate value!");
LValue srcLV = CGF.MakeAddrLValue(src.getAggregateAddress(), type);
- EmitFinalDestCopy(type, srcLV);
+ EmitFinalDestCopy(type, srcLV, EVK_RValue);
}
/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
-void AggExprEmitter::EmitFinalDestCopy(QualType type, const LValue &src) {
+void AggExprEmitter::EmitFinalDestCopy(QualType type, const LValue &src,
+ ExprValueKind SrcValueKind) {
// If Dest is ignored, then we're evaluating an aggregate expression
// in a context that doesn't care about the result. Note that loads
// from volatile l-values force the existence of a non-ignored
@@ -273,9 +316,32 @@ void AggExprEmitter::EmitFinalDestCopy(QualType type, const LValue &src) {
if (Dest.isIgnored())
return;
+ // Copy non-trivial C structs here.
+ LValue DstLV = CGF.MakeAddrLValue(
+ Dest.getAddress(), Dest.isVolatile() ? type.withVolatile() : type);
+
+ if (SrcValueKind == EVK_RValue) {
+ if (type.isNonTrivialToPrimitiveDestructiveMove() == QualType::PCK_Struct) {
+ if (Dest.isPotentiallyAliased())
+ CGF.callCStructMoveAssignmentOperator(DstLV, src);
+ else
+ CGF.callCStructMoveConstructor(DstLV, src);
+ return;
+ }
+ } else {
+ if (type.isNonTrivialToPrimitiveCopy() == QualType::PCK_Struct) {
+ if (Dest.isPotentiallyAliased())
+ CGF.callCStructCopyAssignmentOperator(DstLV, src);
+ else
+ CGF.callCStructCopyConstructor(DstLV, src);
+ return;
+ }
+ }
+
AggValueSlot srcAgg =
AggValueSlot::forLValue(src, AggValueSlot::IsDestructed,
- needsGC(type), AggValueSlot::IsAliased);
+ needsGC(type), AggValueSlot::IsAliased,
+ AggValueSlot::MayOverlap);
EmitCopy(type, Dest, srcAgg);
}
@@ -286,7 +352,7 @@ void AggExprEmitter::EmitFinalDestCopy(QualType type, const LValue &src) {
void AggExprEmitter::EmitCopy(QualType type, const AggValueSlot &dest,
const AggValueSlot &src) {
if (dest.requiresGCollection()) {
- CharUnits sz = CGF.getContext().getTypeSizeInChars(type);
+ CharUnits sz = dest.getPreferredSize(CGF.getContext(), type);
llvm::Value *size = llvm::ConstantInt::get(CGF.SizeTy, sz.getQuantity());
CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF,
dest.getAddress(),
@@ -298,11 +364,13 @@ void AggExprEmitter::EmitCopy(QualType type, const AggValueSlot &dest,
// If the result of the assignment is used, copy the LHS there also.
// It's volatile if either side is. Use the minimum alignment of
// the two sides.
- CGF.EmitAggregateCopy(dest.getAddress(), src.getAddress(), type,
+ LValue DestLV = CGF.MakeAddrLValue(dest.getAddress(), type);
+ LValue SrcLV = CGF.MakeAddrLValue(src.getAddress(), type);
+ CGF.EmitAggregateCopy(DestLV, SrcLV, type, dest.mayOverlap(),
dest.isVolatile() || src.isVolatile());
}
-/// \brief Emit the initializer for a std::initializer_list initialized with a
+/// Emit the initializer for a std::initializer_list initialized with a
/// real initializer list.
void
AggExprEmitter::VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E) {
@@ -367,7 +435,7 @@ AggExprEmitter::VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E) {
}
}
-/// \brief Determine if E is a trivial array filler, that is, one that is
+/// Determine if E is a trivial array filler, that is, one that is
/// equivalent to zero-initialization.
static bool isTrivialFiller(Expr *E) {
if (!E)
@@ -390,14 +458,17 @@ static bool isTrivialFiller(Expr *E) {
return false;
}
-/// \brief Emit initialization of an array from an initializer list.
+/// Emit initialization of an array from an initializer list.
void AggExprEmitter::EmitArrayInit(Address DestPtr, llvm::ArrayType *AType,
- QualType elementType, InitListExpr *E) {
+ QualType ArrayQTy, InitListExpr *E) {
uint64_t NumInitElements = E->getNumInits();
uint64_t NumArrayElements = AType->getNumElements();
assert(NumInitElements <= NumArrayElements);
+ QualType elementType =
+ CGF.getContext().getAsArrayType(ArrayQTy)->getElementType();
+
// DestPtr is an array*. Construct an elementType* by drilling
// down a level.
llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
@@ -409,6 +480,29 @@ void AggExprEmitter::EmitArrayInit(Address DestPtr, llvm::ArrayType *AType,
CharUnits elementAlign =
DestPtr.getAlignment().alignmentOfArrayElement(elementSize);
+ // Consider initializing the array by copying from a global. For this to be
+ // more efficient than per-element initialization, the size of the elements
+ // with explicit initializers should be large enough.
+ if (NumInitElements * elementSize.getQuantity() > 16 &&
+ elementType.isTriviallyCopyableType(CGF.getContext())) {
+ CodeGen::CodeGenModule &CGM = CGF.CGM;
+ ConstantEmitter Emitter(CGM);
+ LangAS AS = ArrayQTy.getAddressSpace();
+ if (llvm::Constant *C = Emitter.tryEmitForInitializer(E, AS, ArrayQTy)) {
+ auto GV = new llvm::GlobalVariable(
+ CGM.getModule(), C->getType(),
+ CGM.isTypeConstant(ArrayQTy, /* ExcludeCtorDtor= */ true),
+ llvm::GlobalValue::PrivateLinkage, C, "constinit",
+ /* InsertBefore= */ nullptr, llvm::GlobalVariable::NotThreadLocal,
+ CGM.getContext().getTargetAddressSpace(AS));
+ Emitter.finalize(GV);
+ CharUnits Align = CGM.getContext().getTypeAlignInChars(ArrayQTy);
+ GV->setAlignment(Align.getQuantity());
+ EmitFinalDestCopy(ArrayQTy, CGF.MakeAddrLValue(GV, ArrayQTy, Align));
+ return;
+ }
+ }
+
// Exception safety requires us to destroy all the
// already-constructed members if an initializer throws.
// For that, we'll need an EH cleanup.
@@ -540,7 +634,11 @@ void AggExprEmitter::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E){
}
void AggExprEmitter::VisitOpaqueValueExpr(OpaqueValueExpr *e) {
- EmitFinalDestCopy(e->getType(), CGF.getOpaqueLValueMapping(e));
+ // If this is a unique OVE, just visit its source expression.
+ if (e->isUnique())
+ Visit(e->getSourceExpr());
+ else
+ EmitFinalDestCopy(e->getType(), CGF.getOrCreateOpaqueLValueMapping(e));
}
void
@@ -586,12 +684,12 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
CGF.EmitDynamicCast(LV.getAddress(), cast<CXXDynamicCastExpr>(E));
else
CGF.CGM.ErrorUnsupported(E, "non-simple lvalue dynamic_cast");
-
+
if (!Dest.isIgnored())
CGF.CGM.ErrorUnsupported(E, "lvalue dynamic_cast with a destination");
break;
}
-
+
case CK_ToUnion: {
// Evaluate even if the destination is ignored.
if (Dest.isIgnored()) {
@@ -651,7 +749,7 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
if (isToAtomic) {
AggValueSlot valueDest = Dest;
if (!valueDest.isIgnored() && CGF.CGM.isPaddedAtomicType(atomicType)) {
- // Zero-initialize. (Strictly speaking, we only need to intialize
+ // Zero-initialize. (Strictly speaking, we only need to initialize
// the padding at the end, but this is simpler.)
if (!Dest.isZeroed())
CGF.EmitNullInitialization(Dest.getAddress(), atomicType);
@@ -665,6 +763,7 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
valueDest.isExternallyDestructed(),
valueDest.requiresGCollection(),
valueDest.isPotentiallyAliased(),
+ AggValueSlot::DoesNotOverlap,
AggValueSlot::IsZeroed);
}
@@ -762,13 +861,15 @@ void AggExprEmitter::VisitCallExpr(const CallExpr *E) {
return;
}
- RValue RV = CGF.EmitCallExpr(E, getReturnValueSlot());
- EmitMoveFromReturnSlot(E, RV);
+ withReturnValueSlot(E, [&](ReturnValueSlot Slot) {
+ return CGF.EmitCallExpr(E, Slot);
+ });
}
void AggExprEmitter::VisitObjCMessageExpr(ObjCMessageExpr *E) {
- RValue RV = CGF.EmitObjCMessageExpr(E, getReturnValueSlot());
- EmitMoveFromReturnSlot(E, RV);
+ withReturnValueSlot(E, [&](ReturnValueSlot Slot) {
+ return CGF.EmitObjCMessageExpr(E, Slot);
+ });
}
void AggExprEmitter::VisitBinComma(const BinaryOperator *E) {
@@ -781,6 +882,150 @@ void AggExprEmitter::VisitStmtExpr(const StmtExpr *E) {
CGF.EmitCompoundStmt(*E->getSubStmt(), true, Dest);
}
+enum CompareKind {
+ CK_Less,
+ CK_Greater,
+ CK_Equal,
+};
+
+static llvm::Value *EmitCompare(CGBuilderTy &Builder, CodeGenFunction &CGF,
+ const BinaryOperator *E, llvm::Value *LHS,
+ llvm::Value *RHS, CompareKind Kind,
+ const char *NameSuffix = "") {
+ QualType ArgTy = E->getLHS()->getType();
+ if (const ComplexType *CT = ArgTy->getAs<ComplexType>())
+ ArgTy = CT->getElementType();
+
+ if (const auto *MPT = ArgTy->getAs<MemberPointerType>()) {
+ assert(Kind == CK_Equal &&
+ "member pointers may only be compared for equality");
+ return CGF.CGM.getCXXABI().EmitMemberPointerComparison(
+ CGF, LHS, RHS, MPT, /*IsInequality*/ false);
+ }
+
+ // Compute the comparison instructions for the specified comparison kind.
+ struct CmpInstInfo {
+ const char *Name;
+ llvm::CmpInst::Predicate FCmp;
+ llvm::CmpInst::Predicate SCmp;
+ llvm::CmpInst::Predicate UCmp;
+ };
+ CmpInstInfo InstInfo = [&]() -> CmpInstInfo {
+ using FI = llvm::FCmpInst;
+ using II = llvm::ICmpInst;
+ switch (Kind) {
+ case CK_Less:
+ return {"cmp.lt", FI::FCMP_OLT, II::ICMP_SLT, II::ICMP_ULT};
+ case CK_Greater:
+ return {"cmp.gt", FI::FCMP_OGT, II::ICMP_SGT, II::ICMP_UGT};
+ case CK_Equal:
+ return {"cmp.eq", FI::FCMP_OEQ, II::ICMP_EQ, II::ICMP_EQ};
+ }
+ llvm_unreachable("Unrecognised CompareKind enum");
+ }();
+
+ if (ArgTy->hasFloatingRepresentation())
+ return Builder.CreateFCmp(InstInfo.FCmp, LHS, RHS,
+ llvm::Twine(InstInfo.Name) + NameSuffix);
+ if (ArgTy->isIntegralOrEnumerationType() || ArgTy->isPointerType()) {
+ auto Inst =
+ ArgTy->hasSignedIntegerRepresentation() ? InstInfo.SCmp : InstInfo.UCmp;
+ return Builder.CreateICmp(Inst, LHS, RHS,
+ llvm::Twine(InstInfo.Name) + NameSuffix);
+ }
+
+ llvm_unreachable("unsupported aggregate binary expression should have "
+ "already been handled");
+}
+
+void AggExprEmitter::VisitBinCmp(const BinaryOperator *E) {
+ using llvm::BasicBlock;
+ using llvm::PHINode;
+ using llvm::Value;
+ assert(CGF.getContext().hasSameType(E->getLHS()->getType(),
+ E->getRHS()->getType()));
+ const ComparisonCategoryInfo &CmpInfo =
+ CGF.getContext().CompCategories.getInfoForType(E->getType());
+ assert(CmpInfo.Record->isTriviallyCopyable() &&
+ "cannot copy non-trivially copyable aggregate");
+
+ QualType ArgTy = E->getLHS()->getType();
+
+ // TODO: Handle comparing these types.
+ if (ArgTy->isVectorType())
+ return CGF.ErrorUnsupported(
+ E, "aggregate three-way comparison with vector arguments");
+ if (!ArgTy->isIntegralOrEnumerationType() && !ArgTy->isRealFloatingType() &&
+ !ArgTy->isNullPtrType() && !ArgTy->isPointerType() &&
+ !ArgTy->isMemberPointerType() && !ArgTy->isAnyComplexType()) {
+ return CGF.ErrorUnsupported(E, "aggregate three-way comparison");
+ }
+ bool IsComplex = ArgTy->isAnyComplexType();
+
+ // Evaluate the operands to the expression and extract their values.
+ auto EmitOperand = [&](Expr *E) -> std::pair<Value *, Value *> {
+ RValue RV = CGF.EmitAnyExpr(E);
+ if (RV.isScalar())
+ return {RV.getScalarVal(), nullptr};
+ if (RV.isAggregate())
+ return {RV.getAggregatePointer(), nullptr};
+ assert(RV.isComplex());
+ return RV.getComplexVal();
+ };
+ auto LHSValues = EmitOperand(E->getLHS()),
+ RHSValues = EmitOperand(E->getRHS());
+
+ auto EmitCmp = [&](CompareKind K) {
+ Value *Cmp = EmitCompare(Builder, CGF, E, LHSValues.first, RHSValues.first,
+ K, IsComplex ? ".r" : "");
+ if (!IsComplex)
+ return Cmp;
+ assert(K == CompareKind::CK_Equal);
+ Value *CmpImag = EmitCompare(Builder, CGF, E, LHSValues.second,
+ RHSValues.second, K, ".i");
+ return Builder.CreateAnd(Cmp, CmpImag, "and.eq");
+ };
+ auto EmitCmpRes = [&](const ComparisonCategoryInfo::ValueInfo *VInfo) {
+ return Builder.getInt(VInfo->getIntValue());
+ };
+
+ Value *Select;
+ if (ArgTy->isNullPtrType()) {
+ Select = EmitCmpRes(CmpInfo.getEqualOrEquiv());
+ } else if (CmpInfo.isEquality()) {
+ Select = Builder.CreateSelect(
+ EmitCmp(CK_Equal), EmitCmpRes(CmpInfo.getEqualOrEquiv()),
+ EmitCmpRes(CmpInfo.getNonequalOrNonequiv()), "sel.eq");
+ } else if (!CmpInfo.isPartial()) {
+ Value *SelectOne =
+ Builder.CreateSelect(EmitCmp(CK_Less), EmitCmpRes(CmpInfo.getLess()),
+ EmitCmpRes(CmpInfo.getGreater()), "sel.lt");
+ Select = Builder.CreateSelect(EmitCmp(CK_Equal),
+ EmitCmpRes(CmpInfo.getEqualOrEquiv()),
+ SelectOne, "sel.eq");
+ } else {
+ Value *SelectEq = Builder.CreateSelect(
+ EmitCmp(CK_Equal), EmitCmpRes(CmpInfo.getEqualOrEquiv()),
+ EmitCmpRes(CmpInfo.getUnordered()), "sel.eq");
+ Value *SelectGT = Builder.CreateSelect(EmitCmp(CK_Greater),
+ EmitCmpRes(CmpInfo.getGreater()),
+ SelectEq, "sel.gt");
+ Select = Builder.CreateSelect(
+ EmitCmp(CK_Less), EmitCmpRes(CmpInfo.getLess()), SelectGT, "sel.lt");
+ }
+ // Create the return value in the destination slot.
+ EnsureDest(E->getType());
+ LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
+
+ // Emit the address of the first (and only) field in the comparison category
+ // type, and initialize it from the constant integer value selected above.
+ LValue FieldLV = CGF.EmitLValueForFieldInitialization(
+ DestLV, *CmpInfo.Record->field_begin());
+ CGF.EmitStoreThroughLValue(RValue::get(Select), FieldLV, /*IsInit*/ true);
+
+ // All done! The result is in the Dest slot.
+}
+
void AggExprEmitter::VisitBinaryOperator(const BinaryOperator *E) {
if (E->getOpcode() == BO_PtrMemD || E->getOpcode() == BO_PtrMemI)
VisitPointerToDataMemberBinaryOperator(E);
@@ -890,7 +1135,8 @@ void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
EmitCopy(E->getLHS()->getType(),
AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed,
needsGC(E->getLHS()->getType()),
- AggValueSlot::IsAliased),
+ AggValueSlot::IsAliased,
+ AggValueSlot::MayOverlap),
Dest);
return;
}
@@ -911,7 +1157,8 @@ void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
AggValueSlot LHSSlot =
AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed,
needsGC(E->getLHS()->getType()),
- AggValueSlot::IsAliased);
+ AggValueSlot::IsAliased,
+ AggValueSlot::MayOverlap);
// A non-volatile aggregate destination might have volatile member.
if (!LHSSlot.isVolatile() &&
CGF.hasVolatileMember(E->getLHS()->getType()))
@@ -1089,6 +1336,7 @@ AggExprEmitter::EmitInitializationToLValue(Expr *E, LValue LV) {
AggValueSlot::IsDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
AggValueSlot::IsNotAliased,
+ AggValueSlot::MayOverlap,
Dest.isZeroed()));
return;
case TEK_Scalar:
@@ -1156,11 +1404,8 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
// Handle initialization of an array.
if (E->getType()->isArrayType()) {
- QualType elementType =
- CGF.getContext().getAsArrayType(E->getType())->getElementType();
-
auto AType = cast<llvm::ArrayType>(Dest.getAddress().getElementType());
- EmitArrayInit(Dest.getAddress(), AType, elementType, E);
+ EmitArrayInit(Dest.getAddress(), AType, E->getType(), E);
return;
}
@@ -1190,11 +1435,12 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
Address V = CGF.GetAddressOfDirectBaseInCompleteClass(
Dest.getAddress(), CXXRD, BaseRD,
/*isBaseVirtual*/ false);
- AggValueSlot AggSlot =
- AggValueSlot::forAddr(V, Qualifiers(),
- AggValueSlot::IsDestructed,
- AggValueSlot::DoesNotNeedGCBarriers,
- AggValueSlot::IsNotAliased);
+ AggValueSlot AggSlot = AggValueSlot::forAddr(
+ V, Qualifiers(),
+ AggValueSlot::IsDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased,
+ CGF.overlapForBaseInit(CXXRD, BaseRD, Base.isVirtual()));
CGF.EmitAggExpr(E->getInit(curInitIndex++), AggSlot);
if (QualType::DestructionKind dtorKind =
@@ -1375,7 +1621,9 @@ void AggExprEmitter::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E,
// If the subexpression is an ArrayInitLoopExpr, share its cleanup.
auto elementSlot = AggValueSlot::forLValue(
elementLV, AggValueSlot::IsDestructed,
- AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased);
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased,
+ AggValueSlot::DoesNotOverlap);
AggExprEmitter(CGF, elementSlot, false)
.VisitArrayInitLoopExpr(InnerLoop, outerBegin);
} else
@@ -1425,6 +1673,8 @@ static CharUnits GetNumNonZeroBytesInInit(const Expr *E, CodeGenFunction &CGF) {
// If this is an initlist expr, sum up the size of sizes of the (present)
// elements. If this is something weird, assume the whole thing is non-zero.
const InitListExpr *ILE = dyn_cast<InitListExpr>(E);
+ while (ILE && ILE->isTransparent())
+ ILE = dyn_cast<InitListExpr>(ILE->getInit(0));
if (!ILE || !CGF.getTypes().isZeroInitializable(ILE->getType()))
return CGF.getContext().getTypeSizeInChars(E->getType());
@@ -1491,7 +1741,7 @@ static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E,
}
// If the type is 16-bytes or smaller, prefer individual stores over memset.
- CharUnits Size = CGF.getContext().getTypeSizeInChars(E->getType());
+ CharUnits Size = Slot.getPreferredSize(CGF.getContext(), E->getType());
if (Size <= CharUnits::fromQuantity(16))
return;
@@ -1537,16 +1787,42 @@ LValue CodeGenFunction::EmitAggExprToLValue(const Expr *E) {
LValue LV = MakeAddrLValue(Temp, E->getType());
EmitAggExpr(E, AggValueSlot::forLValue(LV, AggValueSlot::IsNotDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
- AggValueSlot::IsNotAliased));
+ AggValueSlot::IsNotAliased,
+ AggValueSlot::DoesNotOverlap));
return LV;
}
-void CodeGenFunction::EmitAggregateCopy(Address DestPtr,
- Address SrcPtr, QualType Ty,
- bool isVolatile,
- bool isAssignment) {
+AggValueSlot::Overlap_t CodeGenFunction::overlapForBaseInit(
+ const CXXRecordDecl *RD, const CXXRecordDecl *BaseRD, bool IsVirtual) {
+ // Virtual bases are initialized first, in address order, so there's never
+ // any overlap during their initialization.
+ //
+ // FIXME: Under P0840, this is no longer true: the tail padding of a vbase
+ // of a field could be reused by a vbase of a containing class.
+ if (IsVirtual)
+ return AggValueSlot::DoesNotOverlap;
+
+ // If the base class is laid out entirely within the nvsize of the derived
+ // class, its tail padding cannot yet be initialized, so we can issue
+ // stores at the full width of the base class.
+ const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
+ if (Layout.getBaseClassOffset(BaseRD) +
+ getContext().getASTRecordLayout(BaseRD).getSize() <=
+ Layout.getNonVirtualSize())
+ return AggValueSlot::DoesNotOverlap;
+
+ // The tail padding may contain values we need to preserve.
+ return AggValueSlot::MayOverlap;
+}
+
+void CodeGenFunction::EmitAggregateCopy(LValue Dest, LValue Src, QualType Ty,
+ AggValueSlot::Overlap_t MayOverlap,
+ bool isVolatile) {
assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex");
+ Address DestPtr = Dest.getAddress();
+ Address SrcPtr = Src.getAddress();
+
if (getLangOpts().CPlusPlus) {
if (const RecordType *RT = Ty->getAs<RecordType>()) {
CXXRecordDecl *Record = cast<CXXRecordDecl>(RT->getDecl());
@@ -1562,7 +1838,7 @@ void CodeGenFunction::EmitAggregateCopy(Address DestPtr,
return;
}
}
-
+
// Aggregate assignment turns into llvm.memcpy. This is almost valid per
// C99 6.5.16.1p3, which states "If the value being stored in an object is
// read from another object that overlaps in anyway the storage of the first
@@ -1574,12 +1850,11 @@ void CodeGenFunction::EmitAggregateCopy(Address DestPtr,
// implementation handles this case safely. If there is a libc that does not
// safely handle this, we can add a target hook.
- // Get data size info for this aggregate. If this is an assignment,
- // don't copy the tail padding, because we might be assigning into a
- // base subobject where the tail padding is claimed. Otherwise,
- // copying it is fine.
+ // Get data size info for this aggregate. Don't copy the tail padding if this
+ // might be a potentially-overlapping subobject, since the tail padding might
+ // be occupied by a different object. Otherwise, copying it is fine.
std::pair<CharUnits, CharUnits> TypeInfo;
- if (isAssignment)
+ if (MayOverlap)
TypeInfo = getContext().getTypeInfoDataSizeInChars(Ty);
else
TypeInfo = getContext().getTypeInfoInChars(Ty);
@@ -1591,22 +1866,11 @@ void CodeGenFunction::EmitAggregateCopy(Address DestPtr,
getContext().getAsArrayType(Ty))) {
QualType BaseEltTy;
SizeVal = emitArrayLength(VAT, BaseEltTy, DestPtr);
- TypeInfo = getContext().getTypeInfoDataSizeInChars(BaseEltTy);
- std::pair<CharUnits, CharUnits> LastElementTypeInfo;
- if (!isAssignment)
- LastElementTypeInfo = getContext().getTypeInfoInChars(BaseEltTy);
+ TypeInfo = getContext().getTypeInfoInChars(BaseEltTy);
assert(!TypeInfo.first.isZero());
SizeVal = Builder.CreateNUWMul(
SizeVal,
llvm::ConstantInt::get(SizeTy, TypeInfo.first.getQuantity()));
- if (!isAssignment) {
- SizeVal = Builder.CreateNUWSub(
- SizeVal,
- llvm::ConstantInt::get(SizeTy, TypeInfo.first.getQuantity()));
- SizeVal = Builder.CreateNUWAdd(
- SizeVal, llvm::ConstantInt::get(
- SizeTy, LastElementTypeInfo.first.getQuantity()));
- }
}
}
if (!SizeVal) {
@@ -1657,4 +1921,10 @@ void CodeGenFunction::EmitAggregateCopy(Address DestPtr,
// the optimizer wishes to expand it in to scalar memory operations.
if (llvm::MDNode *TBAAStructTag = CGM.getTBAAStructInfo(Ty))
Inst->setMetadata(llvm::LLVMContext::MD_tbaa_struct, TBAAStructTag);
+
+ if (CGM.getCodeGenOpts().NewStructPathTBAA) {
+ TBAAAccessInfo TBAAInfo = CGM.mergeTBAAInfoForMemoryTransfer(
+ Dest.getTBAAInfo(), Src.getTBAAInfo());
+ CGM.DecorateInstructionWithTBAA(Inst, TBAAInfo);
+ }
}
diff --git a/lib/CodeGen/CGExprCXX.cpp b/lib/CodeGen/CGExprCXX.cpp
index c32f1e5415da..8955d8a4a83c 100644
--- a/lib/CodeGen/CGExprCXX.cpp
+++ b/lib/CodeGen/CGExprCXX.cpp
@@ -242,11 +242,15 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
}
}
- Address This = Address::invalid();
- if (IsArrow)
- This = EmitPointerWithAlignment(Base);
- else
- This = EmitLValue(Base).getAddress();
+ LValue This;
+ if (IsArrow) {
+ LValueBaseInfo BaseInfo;
+ TBAAAccessInfo TBAAInfo;
+ Address ThisValue = EmitPointerWithAlignment(Base, &BaseInfo, &TBAAInfo);
+ This = MakeAddrLValue(ThisValue, Base->getType(), BaseInfo, TBAAInfo);
+ } else {
+ This = EmitLValue(Base);
+ }
if (MD->isTrivial() || (MD->isDefaulted() && MD->getParent()->isUnion())) {
@@ -261,10 +265,10 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
// when it isn't necessary; just produce the proper effect here.
LValue RHS = isa<CXXOperatorCallExpr>(CE)
? MakeNaturalAlignAddrLValue(
- (*RtlArgs)[0].RV.getScalarVal(),
+ (*RtlArgs)[0].getRValue(*this).getScalarVal(),
(*(CE->arg_begin() + 1))->getType())
: EmitLValue(*CE->arg_begin());
- EmitAggregateAssign(This, RHS.getAddress(), CE->getType());
+ EmitAggregateAssign(This, RHS, CE->getType());
return RValue::get(This.getPointer());
}
@@ -272,8 +276,13 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
cast<CXXConstructorDecl>(MD)->isCopyOrMoveConstructor()) {
// Trivial move and copy ctor are the same.
assert(CE->getNumArgs() == 1 && "unexpected argcount for trivial ctor");
- Address RHS = EmitLValue(*CE->arg_begin()).getAddress();
- EmitAggregateCopy(This, RHS, (*CE->arg_begin())->getType());
+ const Expr *Arg = *CE->arg_begin();
+ LValue RHS = EmitLValue(Arg);
+ LValue Dest = MakeAddrLValue(This.getAddress(), Arg->getType());
+ // This is the MSVC p->Ctor::Ctor(...) extension. We assume that's
+ // constructing a new complete object of type Ctor.
+ EmitAggregateCopy(Dest, RHS, Arg->getType(),
+ AggValueSlot::DoesNotOverlap);
return RValue::get(This.getPointer());
}
llvm_unreachable("unknown trivial member function");
@@ -335,7 +344,8 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
assert(ReturnValue.isNull() && "Destructor shouldn't have return value");
if (UseVirtualCall) {
CGM.getCXXABI().EmitVirtualDestructorCall(
- *this, Dtor, Dtor_Complete, This, cast<CXXMemberCallExpr>(CE));
+ *this, Dtor, Dtor_Complete, This.getAddress(),
+ cast<CXXMemberCallExpr>(CE));
} else {
CGCallee Callee;
if (getLangOpts().AppleKext && MD->isVirtual() && HasQualifier)
@@ -364,15 +374,15 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
CGM.GetAddrOfFunction(GlobalDecl(Ctor, Ctor_Complete), Ty),
Ctor);
} else if (UseVirtualCall) {
- Callee = CGM.getCXXABI().getVirtualFunctionPointer(*this, MD, This, Ty,
- CE->getLocStart());
+ Callee = CGCallee::forVirtual(CE, MD, This.getAddress(), Ty);
} else {
if (SanOpts.has(SanitizerKind::CFINVCall) &&
MD->getParent()->isDynamicClass()) {
llvm::Value *VTable;
const CXXRecordDecl *RD;
std::tie(VTable, RD) =
- CGM.getCXXABI().LoadVTablePtr(*this, This, MD->getParent());
+ CGM.getCXXABI().LoadVTablePtr(*this, This.getAddress(),
+ MD->getParent());
EmitVTablePtrCheckForCall(RD, VTable, CFITCK_NVCall, CE->getLocStart());
}
@@ -388,8 +398,10 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
}
if (MD->isVirtual()) {
- This = CGM.getCXXABI().adjustThisArgumentForVirtualFunctionCall(
- *this, CalleeDecl, This, UseVirtualCall);
+ Address NewThisAddr =
+ CGM.getCXXABI().adjustThisArgumentForVirtualFunctionCall(
+ *this, CalleeDecl, This.getAddress(), UseVirtualCall);
+ This.setAddress(NewThisAddr);
}
return EmitCXXMemberOrOperatorCall(
@@ -622,7 +634,7 @@ CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E,
// Call the constructor.
EmitCXXConstructorCall(CD, Type, ForVirtualBase, Delegating,
- Dest.getAddress(), E);
+ Dest.getAddress(), E, Dest.mayOverlap());
}
}
@@ -924,7 +936,8 @@ static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF,
}
static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const Expr *Init,
- QualType AllocType, Address NewPtr) {
+ QualType AllocType, Address NewPtr,
+ AggValueSlot::Overlap_t MayOverlap) {
// FIXME: Refactor with EmitExprAsInit.
switch (CGF.getEvaluationKind(AllocType)) {
case TEK_Scalar:
@@ -940,7 +953,8 @@ static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const Expr *Init,
= AggValueSlot::forAddr(NewPtr, AllocType.getQualifiers(),
AggValueSlot::IsDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
- AggValueSlot::IsNotAliased);
+ AggValueSlot::IsNotAliased,
+ MayOverlap);
CGF.EmitAggExpr(Init, Slot);
return;
}
@@ -1009,7 +1023,8 @@ void CodeGenFunction::EmitNewArrayInitializer(
AggValueSlot::forAddr(CurPtr, ElementType.getQualifiers(),
AggValueSlot::IsDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
- AggValueSlot::IsNotAliased);
+ AggValueSlot::IsNotAliased,
+ AggValueSlot::DoesNotOverlap);
EmitAggExpr(ILE->getInit(0), Slot);
// Move past these elements.
@@ -1074,7 +1089,8 @@ void CodeGenFunction::EmitNewArrayInitializer(
// an array, and we have an array filler, we can fold together the two
// initialization loops.
StoreAnyExprIntoOneUnit(*this, ILE->getInit(i),
- ILE->getInit(i)->getType(), CurPtr);
+ ILE->getInit(i)->getType(), CurPtr,
+ AggValueSlot::DoesNotOverlap);
CurPtr = Address(Builder.CreateInBoundsGEP(CurPtr.getPointer(),
Builder.getSize(1),
"array.exp.next"),
@@ -1227,7 +1243,8 @@ void CodeGenFunction::EmitNewArrayInitializer(
}
// Emit the initializer into this element.
- StoreAnyExprIntoOneUnit(*this, Init, Init->getType(), CurPtr);
+ StoreAnyExprIntoOneUnit(*this, Init, Init->getType(), CurPtr,
+ AggValueSlot::DoesNotOverlap);
// Leave the Cleanup if we entered one.
if (CleanupDominator) {
@@ -1258,7 +1275,8 @@ static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
CGF.EmitNewArrayInitializer(E, ElementType, ElementTy, NewPtr, NumElements,
AllocSizeWithoutCookie);
else if (const Expr *Init = E->getInitializer())
- StoreAnyExprIntoOneUnit(CGF, Init, E->getAllocatedType(), NewPtr);
+ StoreAnyExprIntoOneUnit(CGF, Init, E->getAllocatedType(), NewPtr,
+ AggValueSlot::DoesNotOverlap);
}
/// Emit a call to an operator new or operator delete function, as implicitly
@@ -1298,19 +1316,19 @@ static RValue EmitNewDeleteCall(CodeGenFunction &CGF,
}
RValue CodeGenFunction::EmitBuiltinNewDeleteCall(const FunctionProtoType *Type,
- const Expr *Arg,
+ const CallExpr *TheCall,
bool IsDelete) {
CallArgList Args;
- const Stmt *ArgS = Arg;
- EmitCallArgs(Args, *Type->param_type_begin(), llvm::makeArrayRef(ArgS));
+ EmitCallArgs(Args, Type->getParamTypes(), TheCall->arguments());
// Find the allocation or deallocation function that we're calling.
ASTContext &Ctx = getContext();
DeclarationName Name = Ctx.DeclarationNames
.getCXXOperatorName(IsDelete ? OO_Delete : OO_New);
+
for (auto *Decl : Ctx.getTranslationUnitDecl()->lookup(Name))
if (auto *FD = dyn_cast<FunctionDecl>(Decl))
if (Ctx.hasSameType(FD->getType(), QualType(Type, 0)))
- return EmitNewDeleteCall(*this, cast<FunctionDecl>(Decl), Type, Args);
+ return EmitNewDeleteCall(*this, FD, Type, Args);
llvm_unreachable("predeclared global operator new/delete is missing");
}
@@ -1481,7 +1499,7 @@ static void EnterNewDeleteCleanup(CodeGenFunction &CGF,
AllocAlign);
for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I) {
auto &Arg = NewArgs[I + NumNonPlacementArgs];
- Cleanup->setPlacementArg(I, Arg.RV, Arg.Ty);
+ Cleanup->setPlacementArg(I, Arg.getRValue(CGF), Arg.Ty);
}
return;
@@ -1512,8 +1530,8 @@ static void EnterNewDeleteCleanup(CodeGenFunction &CGF,
AllocAlign);
for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I) {
auto &Arg = NewArgs[I + NumNonPlacementArgs];
- Cleanup->setPlacementArg(I, DominatingValue<RValue>::save(CGF, Arg.RV),
- Arg.Ty);
+ Cleanup->setPlacementArg(
+ I, DominatingValue<RValue>::save(CGF, Arg.getRValue(CGF)), Arg.Ty);
}
CGF.initFullExprCleanup();
@@ -1678,13 +1696,13 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
llvm::Type *elementTy = ConvertTypeForMem(allocType);
Address result = Builder.CreateElementBitCast(allocation, elementTy);
- // Passing pointer through invariant.group.barrier to avoid propagation of
+ // Passing pointer through launder.invariant.group to avoid propagation of
// vptrs information which may be included in previous type.
// To not break LTO with different optimizations levels, we do it regardless
// of optimization level.
if (CGM.getCodeGenOpts().StrictVTablePointers &&
allocator->isReservedGlobalPlacementOperator())
- result = Address(Builder.CreateInvariantGroupBarrier(result.getPointer()),
+ result = Address(Builder.CreateLaunderInvariantGroup(result.getPointer()),
result.getAlignment());
EmitNewInitializer(*this, E, allocType, elementTy, result, numElements,
diff --git a/lib/CodeGen/CGExprComplex.cpp b/lib/CodeGen/CGExprComplex.cpp
index 9094d3f8a91c..fb176093a741 100644
--- a/lib/CodeGen/CGExprComplex.cpp
+++ b/lib/CodeGen/CGExprComplex.cpp
@@ -155,8 +155,9 @@ public:
}
ComplexPairTy VisitOpaqueValueExpr(OpaqueValueExpr *E) {
if (E->isGLValue())
- return EmitLoadOfLValue(CGF.getOpaqueLValueMapping(E), E->getExprLoc());
- return CGF.getOpaqueRValueMapping(E).getComplexVal();
+ return EmitLoadOfLValue(CGF.getOrCreateOpaqueLValueMapping(E),
+ E->getExprLoc());
+ return CGF.getOrCreateOpaqueRValueMapping(E).getComplexVal();
}
ComplexPairTy VisitPseudoObjectExpr(PseudoObjectExpr *E) {
@@ -594,7 +595,7 @@ ComplexPairTy ComplexExprEmitter::EmitBinSub(const BinOpInfo &Op) {
return ComplexPairTy(ResR, ResI);
}
-/// \brief Emit a libcall for a binary operation on complex types.
+/// Emit a libcall for a binary operation on complex types.
ComplexPairTy ComplexExprEmitter::EmitComplexBinOpLibCall(StringRef LibCallName,
const BinOpInfo &Op) {
CallArgList Args;
@@ -628,11 +629,11 @@ ComplexPairTy ComplexExprEmitter::EmitComplexBinOpLibCall(StringRef LibCallName,
llvm::Instruction *Call;
RValue Res = CGF.EmitCall(FuncInfo, Callee, ReturnValueSlot(), Args, &Call);
- cast<llvm::CallInst>(Call)->setCallingConv(CGF.CGM.getBuiltinCC());
+ cast<llvm::CallInst>(Call)->setCallingConv(CGF.CGM.getRuntimeCC());
return Res.getComplexVal();
}
-/// \brief Lookup the libcall name for a given floating point type complex
+/// Lookup the libcall name for a given floating point type complex
/// multiply.
static StringRef getComplexMultiplyLibCallName(llvm::Type *Ty) {
switch (Ty->getTypeID()) {
@@ -1055,7 +1056,7 @@ ComplexPairTy ComplexExprEmitter::VisitInitListExpr(InitListExpr *E) {
return Visit(E->getInit(0));
}
- // Empty init list intializes to null
+ // Empty init list initializes to null
assert(E->getNumInits() == 0 && "Unexpected number of inits");
QualType Ty = E->getType()->castAs<ComplexType>()->getElementType();
llvm::Type* LTy = CGF.ConvertType(Ty);
diff --git a/lib/CodeGen/CGExprConstant.cpp b/lib/CodeGen/CGExprConstant.cpp
index d1b9e13a6f93..cfd0b859233a 100644
--- a/lib/CodeGen/CGExprConstant.cpp
+++ b/lib/CodeGen/CGExprConstant.cpp
@@ -635,6 +635,72 @@ static ConstantAddress tryEmitGlobalCompoundLiteral(CodeGenModule &CGM,
return ConstantAddress(GV, Align);
}
+static llvm::Constant *
+EmitArrayConstant(CodeGenModule &CGM, const ConstantArrayType *DestType,
+ llvm::Type *CommonElementType, unsigned ArrayBound,
+ SmallVectorImpl<llvm::Constant *> &Elements,
+ llvm::Constant *Filler) {
+ // Figure out how long the initial prefix of non-zero elements is.
+ unsigned NonzeroLength = ArrayBound;
+ if (Elements.size() < NonzeroLength && Filler->isNullValue())
+ NonzeroLength = Elements.size();
+ if (NonzeroLength == Elements.size()) {
+ while (NonzeroLength > 0 && Elements[NonzeroLength - 1]->isNullValue())
+ --NonzeroLength;
+ }
+
+ if (NonzeroLength == 0) {
+ return llvm::ConstantAggregateZero::get(
+ CGM.getTypes().ConvertType(QualType(DestType, 0)));
+ }
+
+ // Add a zeroinitializer array filler if we have lots of trailing zeroes.
+ unsigned TrailingZeroes = ArrayBound - NonzeroLength;
+ if (TrailingZeroes >= 8) {
+ assert(Elements.size() >= NonzeroLength &&
+ "missing initializer for non-zero element");
+
+ // If all the elements had the same type up to the trailing zeroes, emit a
+ // struct of two arrays (the nonzero data and the zeroinitializer).
+ if (CommonElementType && NonzeroLength >= 8) {
+ llvm::Constant *Initial = llvm::ConstantArray::get(
+ llvm::ArrayType::get(CommonElementType, NonzeroLength),
+ makeArrayRef(Elements).take_front(NonzeroLength));
+ Elements.resize(2);
+ Elements[0] = Initial;
+ } else {
+ Elements.resize(NonzeroLength + 1);
+ }
+
+ auto *FillerType =
+ CommonElementType
+ ? CommonElementType
+ : CGM.getTypes().ConvertType(DestType->getElementType());
+ FillerType = llvm::ArrayType::get(FillerType, TrailingZeroes);
+ Elements.back() = llvm::ConstantAggregateZero::get(FillerType);
+ CommonElementType = nullptr;
+ } else if (Elements.size() != ArrayBound) {
+ // Otherwise pad to the right size with the filler if necessary.
+ Elements.resize(ArrayBound, Filler);
+ if (Filler->getType() != CommonElementType)
+ CommonElementType = nullptr;
+ }
+
+ // If all elements have the same type, just emit an array constant.
+ if (CommonElementType)
+ return llvm::ConstantArray::get(
+ llvm::ArrayType::get(CommonElementType, ArrayBound), Elements);
+
+ // We have mixed types. Use a packed struct.
+ llvm::SmallVector<llvm::Type *, 16> Types;
+ Types.reserve(Elements.size());
+ for (llvm::Constant *Elt : Elements)
+ Types.push_back(Elt->getType());
+ llvm::StructType *SType =
+ llvm::StructType::get(CGM.getLLVMContext(), Types, true);
+ return llvm::ConstantStruct::get(SType, Elements);
+}
+
/// This class only needs to handle two cases:
/// 1) Literals (this is used by APValue emission to emit literals).
/// 2) Arrays, structs and unions (outside C++11 mode, we don't currently
@@ -832,60 +898,47 @@ public:
}
llvm::Constant *EmitArrayInitialization(InitListExpr *ILE, QualType T) {
- llvm::ArrayType *AType =
- cast<llvm::ArrayType>(ConvertType(ILE->getType()));
- llvm::Type *ElemTy = AType->getElementType();
+ auto *CAT = CGM.getContext().getAsConstantArrayType(ILE->getType());
+ assert(CAT && "can't emit array init for non-constant-bound array");
unsigned NumInitElements = ILE->getNumInits();
- unsigned NumElements = AType->getNumElements();
+ unsigned NumElements = CAT->getSize().getZExtValue();
// Initialising an array requires us to automatically
// initialise any elements that have not been initialised explicitly
unsigned NumInitableElts = std::min(NumInitElements, NumElements);
- QualType EltType = CGM.getContext().getAsArrayType(T)->getElementType();
+ QualType EltType = CAT->getElementType();
// Initialize remaining array elements.
- llvm::Constant *fillC;
- if (Expr *filler = ILE->getArrayFiller())
+ llvm::Constant *fillC = nullptr;
+ if (Expr *filler = ILE->getArrayFiller()) {
fillC = Emitter.tryEmitAbstractForMemory(filler, EltType);
- else
- fillC = Emitter.emitNullForMemory(EltType);
- if (!fillC)
- return nullptr;
-
- // Try to use a ConstantAggregateZero if we can.
- if (fillC->isNullValue() && !NumInitableElts)
- return llvm::ConstantAggregateZero::get(AType);
+ if (!fillC)
+ return nullptr;
+ }
// Copy initializer elements.
SmallVector<llvm::Constant*, 16> Elts;
- Elts.reserve(NumInitableElts + NumElements);
+ if (fillC && fillC->isNullValue())
+ Elts.reserve(NumInitableElts + 1);
+ else
+ Elts.reserve(NumElements);
- bool RewriteType = false;
+ llvm::Type *CommonElementType = nullptr;
for (unsigned i = 0; i < NumInitableElts; ++i) {
Expr *Init = ILE->getInit(i);
llvm::Constant *C = Emitter.tryEmitPrivateForMemory(Init, EltType);
if (!C)
return nullptr;
- RewriteType |= (C->getType() != ElemTy);
+ if (i == 0)
+ CommonElementType = C->getType();
+ else if (C->getType() != CommonElementType)
+ CommonElementType = nullptr;
Elts.push_back(C);
}
- RewriteType |= (fillC->getType() != ElemTy);
- Elts.resize(NumElements, fillC);
-
- if (RewriteType) {
- // FIXME: Try to avoid packing the array
- std::vector<llvm::Type*> Types;
- Types.reserve(NumInitableElts + NumElements);
- for (unsigned i = 0, e = Elts.size(); i < e; ++i)
- Types.push_back(Elts[i]->getType());
- llvm::StructType *SType = llvm::StructType::get(AType->getContext(),
- Types, true);
- return llvm::ConstantStruct::get(SType, Elts);
- }
-
- return llvm::ConstantArray::get(AType, Elts);
+ return EmitArrayConstant(CGM, CAT, CommonElementType, NumElements, Elts,
+ fillC);
}
llvm::Constant *EmitRecordInitialization(InitListExpr *ILE, QualType T) {
@@ -1881,40 +1934,31 @@ llvm::Constant *ConstantEmitter::tryEmitPrivate(const APValue &Value,
case APValue::Union:
return ConstStructBuilder::BuildStruct(*this, Value, DestType);
case APValue::Array: {
- const ArrayType *CAT = CGM.getContext().getAsArrayType(DestType);
+ const ConstantArrayType *CAT =
+ CGM.getContext().getAsConstantArrayType(DestType);
unsigned NumElements = Value.getArraySize();
unsigned NumInitElts = Value.getArrayInitializedElts();
// Emit array filler, if there is one.
llvm::Constant *Filler = nullptr;
- if (Value.hasArrayFiller())
+ if (Value.hasArrayFiller()) {
Filler = tryEmitAbstractForMemory(Value.getArrayFiller(),
CAT->getElementType());
-
- // Emit initializer elements.
- llvm::Type *CommonElementType =
- CGM.getTypes().ConvertType(CAT->getElementType());
-
- // Try to use a ConstantAggregateZero if we can.
- if (Filler && Filler->isNullValue() && !NumInitElts) {
- llvm::ArrayType *AType =
- llvm::ArrayType::get(CommonElementType, NumElements);
- return llvm::ConstantAggregateZero::get(AType);
+ if (!Filler)
+ return nullptr;
}
+ // Emit initializer elements.
SmallVector<llvm::Constant*, 16> Elts;
- Elts.reserve(NumElements);
- for (unsigned I = 0; I < NumElements; ++I) {
- llvm::Constant *C = Filler;
- if (I < NumInitElts) {
- C = tryEmitPrivateForMemory(Value.getArrayInitializedElt(I),
- CAT->getElementType());
- } else if (!Filler) {
- assert(Value.hasArrayFiller() &&
- "Missing filler for implicit elements of initializer");
- C = tryEmitPrivateForMemory(Value.getArrayFiller(),
- CAT->getElementType());
- }
+ if (Filler && Filler->isNullValue())
+ Elts.reserve(NumInitElts + 1);
+ else
+ Elts.reserve(NumElements);
+
+ llvm::Type *CommonElementType = nullptr;
+ for (unsigned I = 0; I < NumInitElts; ++I) {
+ llvm::Constant *C = tryEmitPrivateForMemory(
+ Value.getArrayInitializedElt(I), CAT->getElementType());
if (!C) return nullptr;
if (I == 0)
@@ -1924,20 +1968,8 @@ llvm::Constant *ConstantEmitter::tryEmitPrivate(const APValue &Value,
Elts.push_back(C);
}
- if (!CommonElementType) {
- // FIXME: Try to avoid packing the array
- std::vector<llvm::Type*> Types;
- Types.reserve(NumElements);
- for (unsigned i = 0, e = Elts.size(); i < e; ++i)
- Types.push_back(Elts[i]->getType());
- llvm::StructType *SType =
- llvm::StructType::get(CGM.getLLVMContext(), Types, true);
- return llvm::ConstantStruct::get(SType, Elts);
- }
-
- llvm::ArrayType *AType =
- llvm::ArrayType::get(CommonElementType, NumElements);
- return llvm::ConstantArray::get(AType, Elts);
+ return EmitArrayConstant(CGM, CAT, CommonElementType, NumElements, Elts,
+ Filler);
}
case APValue::MemberPointer:
return CGM.getCXXABI().EmitMemberPointer(Value, DestType);
diff --git a/lib/CodeGen/CGExprScalar.cpp b/lib/CodeGen/CGExprScalar.cpp
index c46215067a68..783f74c5026d 100644
--- a/lib/CodeGen/CGExprScalar.cpp
+++ b/lib/CodeGen/CGExprScalar.cpp
@@ -165,7 +165,7 @@ static bool CanElideOverflowCheck(const ASTContext &Ctx, const BinOpInfo &Op) {
// If a unary op has a widened operand, the op cannot overflow.
if (const auto *UO = dyn_cast<UnaryOperator>(Op.E))
- return IsWidenedIntegerOp(Ctx, UO->getSubExpr());
+ return !UO->canOverflow();
// We usually don't need overflow checks for binops with widened operands.
// Multiplication with promoted unsigned operands is a special case.
@@ -387,6 +387,9 @@ public:
Value *VisitIntegerLiteral(const IntegerLiteral *E) {
return Builder.getInt(E->getValue());
}
+ Value *VisitFixedPointLiteral(const FixedPointLiteral *E) {
+ return Builder.getInt(E->getValue());
+ }
Value *VisitFloatingLiteral(const FloatingLiteral *E) {
return llvm::ConstantFP::get(VMContext, E->getValue());
}
@@ -422,10 +425,11 @@ public:
Value *VisitOpaqueValueExpr(OpaqueValueExpr *E) {
if (E->isGLValue())
- return EmitLoadOfLValue(CGF.getOpaqueLValueMapping(E), E->getExprLoc());
+ return EmitLoadOfLValue(CGF.getOrCreateOpaqueLValueMapping(E),
+ E->getExprLoc());
// Otherwise, assume the mapping is the scalar directly.
- return CGF.getOpaqueRValueMapping(E).getScalarVal();
+ return CGF.getOrCreateOpaqueRValueMapping(E).getScalarVal();
}
Value *emitConstant(const CodeGenFunction::ConstantEmission &Constant,
@@ -1144,7 +1148,7 @@ Value *ScalarExprEmitter::EmitNullValue(QualType Ty) {
return CGF.EmitFromMemory(CGF.CGM.EmitNullConstant(Ty), Ty);
}
-/// \brief Emit a sanitization check for the given "binary" operation (which
+/// Emit a sanitization check for the given "binary" operation (which
/// might actually be a unary increment which has been lowered to a binary
/// operation). The check passes if all values in \p Checks (which are \c i1),
/// are \c true.
@@ -1617,6 +1621,24 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
CE->getLocStart());
}
+ if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
+ const QualType SrcType = E->getType();
+
+ if (SrcType.mayBeNotDynamicClass() && DestTy.mayBeDynamicClass()) {
+ // Casting to pointer that could carry dynamic information (provided by
+ // invariant.group) requires launder.
+ Src = Builder.CreateLaunderInvariantGroup(Src);
+ } else if (SrcType.mayBeDynamicClass() && DestTy.mayBeNotDynamicClass()) {
+ // Casting to pointer that does not carry dynamic information (provided
+ // by invariant.group) requires stripping it. Note that we don't do it
+ // if the source could not be dynamic type and destination could be
+ // dynamic because dynamic information is already laundered. It is
+ // because launder(strip(src)) == launder(src), so there is no need to
+ // add extra strip before launder.
+ Src = Builder.CreateStripInvariantGroup(Src);
+ }
+ }
+
return Builder.CreateBitCast(Src, DstTy);
}
case CK_AddressSpaceConversion: {
@@ -1753,12 +1775,31 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
llvm::Value* IntResult =
Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
- return Builder.CreateIntToPtr(IntResult, DestLLVMTy);
+ auto *IntToPtr = Builder.CreateIntToPtr(IntResult, DestLLVMTy);
+
+ if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
+ // Going from integer to pointer that could be dynamic requires reloading
+ // dynamic information from invariant.group.
+ if (DestTy.mayBeDynamicClass())
+ IntToPtr = Builder.CreateLaunderInvariantGroup(IntToPtr);
+ }
+ return IntToPtr;
}
- case CK_PointerToIntegral:
+ case CK_PointerToIntegral: {
assert(!DestTy->isBooleanType() && "bool should use PointerToBool");
- return Builder.CreatePtrToInt(Visit(E), ConvertType(DestTy));
+ auto *PtrExpr = Visit(E);
+
+ if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
+ const QualType SrcType = E->getType();
+
+ // Casting to integer requires stripping dynamic information as it does
+ // not carries it.
+ if (SrcType.mayBeDynamicClass())
+ PtrExpr = Builder.CreateStripInvariantGroup(PtrExpr);
+ }
+ return Builder.CreatePtrToInt(PtrExpr, ConvertType(DestTy));
+ }
case CK_ToVoid: {
CGF.EmitIgnoredExpr(E);
return nullptr;
@@ -1873,7 +1914,7 @@ llvm::Value *ScalarExprEmitter::EmitIncDecConsiderOverflowBehavior(
return Builder.CreateNSWAdd(InVal, Amount, Name);
// Fall through.
case LangOptions::SOB_Trapping:
- if (IsWidenedIntegerOp(CGF.getContext(), E->getSubExpr()))
+ if (!E->canOverflow())
return Builder.CreateNSWAdd(InVal, Amount, Name);
return EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec(E, InVal, IsInc));
}
@@ -1955,11 +1996,9 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
} else if (type->isIntegerType()) {
// Note that signed integer inc/dec with width less than int can't
// overflow because of promotion rules; we're just eliding a few steps here.
- bool CanOverflow = value->getType()->getIntegerBitWidth() >=
- CGF.IntTy->getIntegerBitWidth();
- if (CanOverflow && type->isSignedIntegerOrEnumerationType()) {
+ if (E->canOverflow() && type->isSignedIntegerOrEnumerationType()) {
value = EmitIncDecConsiderOverflowBehavior(E, value, isInc);
- } else if (CanOverflow && type->isUnsignedIntegerType() &&
+ } else if (E->canOverflow() && type->isUnsignedIntegerType() &&
CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) {
value =
EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec(E, value, isInc));
@@ -1975,7 +2014,7 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
// VLA types don't have constant size.
if (const VariableArrayType *vla
= CGF.getContext().getAsVariableArrayType(type)) {
- llvm::Value *numElts = CGF.getVLASize(vla).first;
+ llvm::Value *numElts = CGF.getVLASize(vla).NumElts;
if (!isInc) numElts = Builder.CreateNSWNeg(numElts, "vla.negsize");
if (CGF.getLangOpts().isSignedOverflowDefined())
value = Builder.CreateGEP(value, numElts, "vla.inc");
@@ -2273,16 +2312,13 @@ ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr(
CGF.EmitIgnoredExpr(E->getArgumentExpr());
}
- QualType eltType;
- llvm::Value *numElts;
- std::tie(numElts, eltType) = CGF.getVLASize(VAT);
-
- llvm::Value *size = numElts;
+ auto VlaSize = CGF.getVLASize(VAT);
+ llvm::Value *size = VlaSize.NumElts;
// Scale the number of non-VLA elements by the non-VLA element size.
- CharUnits eltSize = CGF.getContext().getTypeSizeInChars(eltType);
+ CharUnits eltSize = CGF.getContext().getTypeSizeInChars(VlaSize.Type);
if (!eltSize.isOne())
- size = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), numElts);
+ size = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), size);
return size;
}
@@ -2769,7 +2805,7 @@ static Value *emitPointerArithmetic(CodeGenFunction &CGF,
if (const VariableArrayType *vla
= CGF.getContext().getAsVariableArrayType(elementType)) {
// The element count here is the total number of non-VLA elements.
- llvm::Value *numElements = CGF.getVLASize(vla).first;
+ llvm::Value *numElements = CGF.getVLASize(vla).NumElts;
// Effectively, the multiply by the VLA size is part of the GEP.
// GEP indexes are signed, and scaling an index isn't permitted to
@@ -2964,10 +3000,9 @@ Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) {
// For a variable-length array, this is going to be non-constant.
if (const VariableArrayType *vla
= CGF.getContext().getAsVariableArrayType(elementType)) {
- llvm::Value *numElements;
- std::tie(numElements, elementType) = CGF.getVLASize(vla);
-
- divisor = numElements;
+ auto VlaSize = CGF.getVLASize(vla);
+ elementType = VlaSize.Type;
+ divisor = VlaSize.NumElts;
// Scale the number of non-VLA elements by the non-VLA element size.
CharUnits eltSize = CGF.getContext().getTypeSizeInChars(elementType);
@@ -3243,6 +3278,23 @@ Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,
Result = Builder.CreateICmp(SICmpOpc, LHS, RHS, "cmp");
} else {
// Unsigned integers and pointers.
+
+ if (CGF.CGM.getCodeGenOpts().StrictVTablePointers &&
+ !isa<llvm::ConstantPointerNull>(LHS) &&
+ !isa<llvm::ConstantPointerNull>(RHS)) {
+
+ // Dynamic information is required to be stripped for comparisons,
+ // because it could leak the dynamic information. Based on comparisons
+ // of pointers to dynamic objects, the optimizer can replace one pointer
+ // with another, which might be incorrect in presence of invariant
+ // groups. Comparison with null is safe because null does not carry any
+ // dynamic information.
+ if (LHSTy.mayBeDynamicClass())
+ LHS = Builder.CreateStripInvariantGroup(LHS);
+ if (RHSTy.mayBeDynamicClass())
+ RHS = Builder.CreateStripInvariantGroup(RHS);
+ }
+
Result = Builder.CreateICmp(UICmpOpc, LHS, RHS, "cmp");
}
@@ -3433,6 +3485,12 @@ Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
// Insert an entry into the phi node for the edge with the value of RHSCond.
PN->addIncoming(RHSCond, RHSBlock);
+ // Artificial location to preserve the scope information
+ {
+ auto NL = ApplyDebugLocation::CreateArtificial(CGF);
+ PN->setDebugLoc(Builder.getCurrentDebugLocation());
+ }
+
// ZExt result to int.
return Builder.CreateZExtOrBitCast(PN, ResTy, "land.ext");
}
diff --git a/lib/CodeGen/CGGPUBuiltin.cpp b/lib/CodeGen/CGGPUBuiltin.cpp
index 48156b1b26b7..b5375ffb8db7 100644
--- a/lib/CodeGen/CGGPUBuiltin.cpp
+++ b/lib/CodeGen/CGGPUBuiltin.cpp
@@ -83,8 +83,9 @@ CodeGenFunction::EmitNVPTXDevicePrintfCallExpr(const CallExpr *E,
/* ParamsToSkip = */ 0);
// We don't know how to emit non-scalar varargs.
- if (std::any_of(Args.begin() + 1, Args.end(),
- [](const CallArg &A) { return !A.RV.isScalar(); })) {
+ if (std::any_of(Args.begin() + 1, Args.end(), [&](const CallArg &A) {
+ return !A.getRValue(*this).isScalar();
+ })) {
CGM.ErrorUnsupported(E, "non-scalar arg to printf");
return RValue::get(llvm::ConstantInt::get(IntTy, 0));
}
@@ -97,7 +98,7 @@ CodeGenFunction::EmitNVPTXDevicePrintfCallExpr(const CallExpr *E,
} else {
llvm::SmallVector<llvm::Type *, 8> ArgTypes;
for (unsigned I = 1, NumArgs = Args.size(); I < NumArgs; ++I)
- ArgTypes.push_back(Args[I].RV.getScalarVal()->getType());
+ ArgTypes.push_back(Args[I].getRValue(*this).getScalarVal()->getType());
// Using llvm::StructType is correct only because printf doesn't accept
// aggregates. If we had to handle aggregates here, we'd have to manually
@@ -109,7 +110,7 @@ CodeGenFunction::EmitNVPTXDevicePrintfCallExpr(const CallExpr *E,
for (unsigned I = 1, NumArgs = Args.size(); I < NumArgs; ++I) {
llvm::Value *P = Builder.CreateStructGEP(AllocaTy, Alloca, I - 1);
- llvm::Value *Arg = Args[I].RV.getScalarVal();
+ llvm::Value *Arg = Args[I].getRValue(*this).getScalarVal();
Builder.CreateAlignedStore(Arg, P, DL.getPrefTypeAlignment(Arg->getType()));
}
BufferPtr = Builder.CreatePointerCast(Alloca, llvm::Type::getInt8PtrTy(Ctx));
@@ -117,6 +118,6 @@ CodeGenFunction::EmitNVPTXDevicePrintfCallExpr(const CallExpr *E,
// Invoke vprintf and return.
llvm::Function* VprintfFunc = GetVprintfDeclaration(CGM.getModule());
- return RValue::get(
- Builder.CreateCall(VprintfFunc, {Args[0].RV.getScalarVal(), BufferPtr}));
+ return RValue::get(Builder.CreateCall(
+ VprintfFunc, {Args[0].getRValue(*this).getScalarVal(), BufferPtr}));
}
diff --git a/lib/CodeGen/CGLoopInfo.h b/lib/CodeGen/CGLoopInfo.h
index 15608c105dc7..9d5f23ff9a2a 100644
--- a/lib/CodeGen/CGLoopInfo.h
+++ b/lib/CodeGen/CGLoopInfo.h
@@ -32,62 +32,62 @@ class Attr;
class ASTContext;
namespace CodeGen {
-/// \brief Attributes that may be specified on loops.
+/// Attributes that may be specified on loops.
struct LoopAttributes {
explicit LoopAttributes(bool IsParallel = false);
void clear();
- /// \brief Generate llvm.loop.parallel metadata for loads and stores.
+ /// Generate llvm.loop.parallel metadata for loads and stores.
bool IsParallel;
- /// \brief State of loop vectorization or unrolling.
+ /// State of loop vectorization or unrolling.
enum LVEnableState { Unspecified, Enable, Disable, Full };
- /// \brief Value for llvm.loop.vectorize.enable metadata.
+ /// Value for llvm.loop.vectorize.enable metadata.
LVEnableState VectorizeEnable;
- /// \brief Value for llvm.loop.unroll.* metadata (enable, disable, or full).
+ /// Value for llvm.loop.unroll.* metadata (enable, disable, or full).
LVEnableState UnrollEnable;
- /// \brief Value for llvm.loop.vectorize.width metadata.
+ /// Value for llvm.loop.vectorize.width metadata.
unsigned VectorizeWidth;
- /// \brief Value for llvm.loop.interleave.count metadata.
+ /// Value for llvm.loop.interleave.count metadata.
unsigned InterleaveCount;
- /// \brief llvm.unroll.
+ /// llvm.unroll.
unsigned UnrollCount;
- /// \brief Value for llvm.loop.distribute.enable metadata.
+ /// Value for llvm.loop.distribute.enable metadata.
LVEnableState DistributeEnable;
};
-/// \brief Information used when generating a structured loop.
+/// Information used when generating a structured loop.
class LoopInfo {
public:
- /// \brief Construct a new LoopInfo for the loop with entry Header.
+ /// Construct a new LoopInfo for the loop with entry Header.
LoopInfo(llvm::BasicBlock *Header, const LoopAttributes &Attrs,
const llvm::DebugLoc &StartLoc, const llvm::DebugLoc &EndLoc);
- /// \brief Get the loop id metadata for this loop.
+ /// Get the loop id metadata for this loop.
llvm::MDNode *getLoopID() const { return LoopID; }
- /// \brief Get the header block of this loop.
+ /// Get the header block of this loop.
llvm::BasicBlock *getHeader() const { return Header; }
- /// \brief Get the set of attributes active for this loop.
+ /// Get the set of attributes active for this loop.
const LoopAttributes &getAttributes() const { return Attrs; }
private:
- /// \brief Loop ID metadata.
+ /// Loop ID metadata.
llvm::MDNode *LoopID;
- /// \brief Header block of this loop.
+ /// Header block of this loop.
llvm::BasicBlock *Header;
- /// \brief The attributes for this loop.
+ /// The attributes for this loop.
LoopAttributes Attrs;
};
-/// \brief A stack of loop information corresponding to loop nesting levels.
+/// A stack of loop information corresponding to loop nesting levels.
/// This stack can be used to prepare attributes which are applied when a loop
/// is emitted.
class LoopInfoStack {
@@ -97,70 +97,70 @@ class LoopInfoStack {
public:
LoopInfoStack() {}
- /// \brief Begin a new structured loop. The set of staged attributes will be
+ /// Begin a new structured loop. The set of staged attributes will be
/// applied to the loop and then cleared.
void push(llvm::BasicBlock *Header, const llvm::DebugLoc &StartLoc,
const llvm::DebugLoc &EndLoc);
- /// \brief Begin a new structured loop. Stage attributes from the Attrs list.
+ /// Begin a new structured loop. Stage attributes from the Attrs list.
/// The staged attributes are applied to the loop and then cleared.
void push(llvm::BasicBlock *Header, clang::ASTContext &Ctx,
llvm::ArrayRef<const Attr *> Attrs, const llvm::DebugLoc &StartLoc,
const llvm::DebugLoc &EndLoc);
- /// \brief End the current loop.
+ /// End the current loop.
void pop();
- /// \brief Return the top loop id metadata.
+ /// Return the top loop id metadata.
llvm::MDNode *getCurLoopID() const { return getInfo().getLoopID(); }
- /// \brief Return true if the top loop is parallel.
+ /// Return true if the top loop is parallel.
bool getCurLoopParallel() const {
return hasInfo() ? getInfo().getAttributes().IsParallel : false;
}
- /// \brief Function called by the CodeGenFunction when an instruction is
+ /// Function called by the CodeGenFunction when an instruction is
/// created.
void InsertHelper(llvm::Instruction *I) const;
- /// \brief Set the next pushed loop as parallel.
+ /// Set the next pushed loop as parallel.
void setParallel(bool Enable = true) { StagedAttrs.IsParallel = Enable; }
- /// \brief Set the next pushed loop 'vectorize.enable'
+ /// Set the next pushed loop 'vectorize.enable'
void setVectorizeEnable(bool Enable = true) {
StagedAttrs.VectorizeEnable =
Enable ? LoopAttributes::Enable : LoopAttributes::Disable;
}
- /// \brief Set the next pushed loop as a distribution candidate.
+ /// Set the next pushed loop as a distribution candidate.
void setDistributeState(bool Enable = true) {
StagedAttrs.DistributeEnable =
Enable ? LoopAttributes::Enable : LoopAttributes::Disable;
}
- /// \brief Set the next pushed loop unroll state.
+ /// Set the next pushed loop unroll state.
void setUnrollState(const LoopAttributes::LVEnableState &State) {
StagedAttrs.UnrollEnable = State;
}
- /// \brief Set the vectorize width for the next loop pushed.
+ /// Set the vectorize width for the next loop pushed.
void setVectorizeWidth(unsigned W) { StagedAttrs.VectorizeWidth = W; }
- /// \brief Set the interleave count for the next loop pushed.
+ /// Set the interleave count for the next loop pushed.
void setInterleaveCount(unsigned C) { StagedAttrs.InterleaveCount = C; }
- /// \brief Set the unroll count for the next loop pushed.
+ /// Set the unroll count for the next loop pushed.
void setUnrollCount(unsigned C) { StagedAttrs.UnrollCount = C; }
private:
- /// \brief Returns true if there is LoopInfo on the stack.
+ /// Returns true if there is LoopInfo on the stack.
bool hasInfo() const { return !Active.empty(); }
- /// \brief Return the LoopInfo for the current loop. HasInfo should be called
+ /// Return the LoopInfo for the current loop. HasInfo should be called
/// first to ensure LoopInfo is present.
const LoopInfo &getInfo() const { return Active.back(); }
- /// \brief The set of attributes that will be applied to the next pushed loop.
+ /// The set of attributes that will be applied to the next pushed loop.
LoopAttributes StagedAttrs;
- /// \brief Stack of active loops.
+ /// Stack of active loops.
llvm::SmallVector<LoopInfo, 4> Active;
};
diff --git a/lib/CodeGen/CGNonTrivialStruct.cpp b/lib/CodeGen/CGNonTrivialStruct.cpp
new file mode 100644
index 000000000000..922e0934b866
--- /dev/null
+++ b/lib/CodeGen/CGNonTrivialStruct.cpp
@@ -0,0 +1,885 @@
+//===--- CGNonTrivialStruct.cpp - Emit Special Functions for C Structs ----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines functions to generate various special functions for C
+// structs.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "clang/AST/NonTrivialTypeVisitor.h"
+#include "llvm/Support/ScopedPrinter.h"
+#include <array>
+
+using namespace clang;
+using namespace CodeGen;
+
+// Return the size of a field in number of bits.
+static uint64_t getFieldSize(const FieldDecl *FD, QualType FT,
+ ASTContext &Ctx) {
+ if (FD && FD->isBitField())
+ return FD->getBitWidthValue(Ctx);
+ return Ctx.getTypeSize(FT);
+}
+
+namespace {
+enum { DstIdx = 0, SrcIdx = 1 };
+const char *ValNameStr[2] = {"dst", "src"};
+
+template <class Derived> struct StructVisitor {
+ StructVisitor(ASTContext &Ctx) : Ctx(Ctx) {}
+
+ template <class... Ts>
+ void visitStructFields(QualType QT, CharUnits CurStructOffset, Ts... Args) {
+ const RecordDecl *RD = QT->castAs<RecordType>()->getDecl();
+
+ // Iterate over the fields of the struct.
+ for (const FieldDecl *FD : RD->fields()) {
+ QualType FT = FD->getType();
+ FT = QT.isVolatileQualified() ? FT.withVolatile() : FT;
+ asDerived().visit(FT, FD, CurStructOffset, Args...);
+ }
+
+ asDerived().flushTrivialFields(Args...);
+ }
+
+ template <class... Ts> void visitTrivial(Ts... Args) {}
+
+ template <class... Ts> void visitCXXDestructor(Ts... Args) {
+ llvm_unreachable("field of a C++ struct type is not expected");
+ }
+
+ template <class... Ts> void flushTrivialFields(Ts... Args) {}
+
+ uint64_t getFieldOffsetInBits(const FieldDecl *FD) {
+ return FD ? Ctx.getASTRecordLayout(FD->getParent())
+ .getFieldOffset(FD->getFieldIndex())
+ : 0;
+ }
+
+ CharUnits getFieldOffset(const FieldDecl *FD) {
+ return Ctx.toCharUnitsFromBits(getFieldOffsetInBits(FD));
+ }
+
+ Derived &asDerived() { return static_cast<Derived &>(*this); }
+
+ ASTContext &getContext() { return Ctx; }
+ ASTContext &Ctx;
+};
+
+template <class Derived, bool IsMove>
+struct CopyStructVisitor : StructVisitor<Derived>,
+ CopiedTypeVisitor<Derived, IsMove> {
+ using StructVisitor<Derived>::asDerived;
+ using Super = CopiedTypeVisitor<Derived, IsMove>;
+
+ CopyStructVisitor(ASTContext &Ctx) : StructVisitor<Derived>(Ctx) {}
+
+ template <class... Ts>
+ void preVisit(QualType::PrimitiveCopyKind PCK, QualType FT,
+ const FieldDecl *FD, CharUnits CurStructOffsset,
+ Ts &&... Args) {
+ if (PCK)
+ asDerived().flushTrivialFields(std::forward<Ts>(Args)...);
+ }
+
+ template <class... Ts>
+ void visitWithKind(QualType::PrimitiveCopyKind PCK, QualType FT,
+ const FieldDecl *FD, CharUnits CurStructOffsset,
+ Ts &&... Args) {
+ if (const auto *AT = asDerived().getContext().getAsArrayType(FT)) {
+ asDerived().visitArray(PCK, AT, FT.isVolatileQualified(), FD,
+ CurStructOffsset, std::forward<Ts>(Args)...);
+ return;
+ }
+
+ Super::visitWithKind(PCK, FT, FD, CurStructOffsset,
+ std::forward<Ts>(Args)...);
+ }
+
+ template <class... Ts>
+ void visitTrivial(QualType FT, const FieldDecl *FD, CharUnits CurStructOffset,
+ Ts... Args) {
+ assert(!FT.isVolatileQualified() && "volatile field not expected");
+ ASTContext &Ctx = asDerived().getContext();
+ uint64_t FieldSize = getFieldSize(FD, FT, Ctx);
+
+ // Ignore zero-sized fields.
+ if (FieldSize == 0)
+ return;
+
+ uint64_t FStartInBits = asDerived().getFieldOffsetInBits(FD);
+ uint64_t FEndInBits = FStartInBits + FieldSize;
+ uint64_t RoundedFEnd = llvm::alignTo(FEndInBits, Ctx.getCharWidth());
+
+ // Set Start if this is the first field of a sequence of trivial fields.
+ if (Start == End)
+ Start = CurStructOffset + Ctx.toCharUnitsFromBits(FStartInBits);
+ End = CurStructOffset + Ctx.toCharUnitsFromBits(RoundedFEnd);
+ }
+
+ CharUnits Start = CharUnits::Zero(), End = CharUnits::Zero();
+};
+
+// This function creates the mangled name of a special function of a non-trivial
+// C struct. Since there is no ODR in C, the function is mangled based on the
+// struct contents and not the name. The mangled name has the following
+// structure:
+//
+// <function-name> ::= <prefix> <alignment-info> "_" <struct-field-info>
+// <prefix> ::= "__destructor_" | "__default_constructor_" |
+// "__copy_constructor_" | "__move_constructor_" |
+// "__copy_assignment_" | "__move_assignment_"
+// <alignment-info> ::= <dst-alignment> ["_" <src-alignment>]
+// <struct-field-info> ::= <field-info>+
+// <field-info> ::= <struct-or-scalar-field-info> | <array-field-info>
+// <struct-or-scalar-field-info> ::= <struct-field-info> | <strong-field-info> |
+// <trivial-field-info>
+// <array-field-info> ::= "_AB" <array-offset> "s" <element-size> "n"
+// <num-elements> <innermost-element-info> "_AE"
+// <innermost-element-info> ::= <struct-or-scalar-field-info>
+// <strong-field-info> ::= "_s" ["b"] ["v"] <field-offset>
+// <trivial-field-info> ::= "_t" ["v"] <field-offset> "_" <field-size>
+
+template <class Derived> struct GenFuncNameBase {
+ std::string getVolatileOffsetStr(bool IsVolatile, CharUnits Offset) {
+ std::string S;
+ if (IsVolatile)
+ S = "v";
+ S += llvm::to_string(Offset.getQuantity());
+ return S;
+ }
+
+ void visitARCStrong(QualType FT, const FieldDecl *FD,
+ CharUnits CurStructOffset) {
+ appendStr("_s");
+ if (FT->isBlockPointerType())
+ appendStr("b");
+ CharUnits FieldOffset = CurStructOffset + asDerived().getFieldOffset(FD);
+ appendStr(getVolatileOffsetStr(FT.isVolatileQualified(), FieldOffset));
+ }
+
+ void visitARCWeak(QualType FT, const FieldDecl *FD,
+ CharUnits CurStructOffset) {
+ appendStr("_w");
+ CharUnits FieldOffset = CurStructOffset + asDerived().getFieldOffset(FD);
+ appendStr(getVolatileOffsetStr(FT.isVolatileQualified(), FieldOffset));
+ }
+
+ void visitStruct(QualType QT, const FieldDecl *FD,
+ CharUnits CurStructOffset) {
+ CharUnits FieldOffset = CurStructOffset + asDerived().getFieldOffset(FD);
+ asDerived().visitStructFields(QT, FieldOffset);
+ }
+
+ template <class FieldKind>
+ void visitArray(FieldKind FK, const ArrayType *AT, bool IsVolatile,
+ const FieldDecl *FD, CharUnits CurStructOffset) {
+ // String for non-volatile trivial fields is emitted when
+ // flushTrivialFields is called.
+ if (!FK)
+ return asDerived().visitTrivial(QualType(AT, 0), FD, CurStructOffset);
+
+ CharUnits FieldOffset = CurStructOffset + asDerived().getFieldOffset(FD);
+ ASTContext &Ctx = asDerived().getContext();
+ const ConstantArrayType *CAT = cast<ConstantArrayType>(AT);
+ unsigned NumElts = Ctx.getConstantArrayElementCount(CAT);
+ QualType EltTy = Ctx.getBaseElementType(CAT);
+ CharUnits EltSize = Ctx.getTypeSizeInChars(EltTy);
+ appendStr("_AB" + llvm::to_string(FieldOffset.getQuantity()) + "s" +
+ llvm::to_string(EltSize.getQuantity()) + "n" +
+ llvm::to_string(NumElts));
+ EltTy = IsVolatile ? EltTy.withVolatile() : EltTy;
+ asDerived().visitWithKind(FK, EltTy, nullptr, FieldOffset);
+ appendStr("_AE");
+ }
+
+ void appendStr(StringRef Str) { Name += Str; }
+
+ std::string getName(QualType QT, bool IsVolatile) {
+ QT = IsVolatile ? QT.withVolatile() : QT;
+ asDerived().visitStructFields(QT, CharUnits::Zero());
+ return Name;
+ }
+
+ Derived &asDerived() { return static_cast<Derived &>(*this); }
+
+ std::string Name;
+};
+
+template <class Derived>
+struct GenUnaryFuncName : StructVisitor<Derived>, GenFuncNameBase<Derived> {
+ GenUnaryFuncName(StringRef Prefix, CharUnits DstAlignment, ASTContext &Ctx)
+ : StructVisitor<Derived>(Ctx) {
+ this->appendStr(Prefix);
+ this->appendStr(llvm::to_string(DstAlignment.getQuantity()));
+ }
+};
+
+// Helper function to create a null constant.
+static llvm::Constant *getNullForVariable(Address Addr) {
+ llvm::Type *Ty = Addr.getElementType();
+ return llvm::ConstantPointerNull::get(cast<llvm::PointerType>(Ty));
+}
+
+template <bool IsMove>
+struct GenBinaryFuncName : CopyStructVisitor<GenBinaryFuncName<IsMove>, IsMove>,
+ GenFuncNameBase<GenBinaryFuncName<IsMove>> {
+
+ GenBinaryFuncName(StringRef Prefix, CharUnits DstAlignment,
+ CharUnits SrcAlignment, ASTContext &Ctx)
+ : CopyStructVisitor<GenBinaryFuncName<IsMove>, IsMove>(Ctx) {
+ this->appendStr(Prefix);
+ this->appendStr(llvm::to_string(DstAlignment.getQuantity()));
+ this->appendStr("_" + llvm::to_string(SrcAlignment.getQuantity()));
+ }
+
+ void flushTrivialFields() {
+ if (this->Start == this->End)
+ return;
+
+ this->appendStr("_t" + llvm::to_string(this->Start.getQuantity()) + "w" +
+ llvm::to_string((this->End - this->Start).getQuantity()));
+
+ this->Start = this->End = CharUnits::Zero();
+ }
+
+ void visitVolatileTrivial(QualType FT, const FieldDecl *FD,
+ CharUnits CurStackOffset) {
+ // Because volatile fields can be bit-fields and are individually copied,
+ // their offset and width are in bits.
+ uint64_t OffsetInBits =
+ this->Ctx.toBits(CurStackOffset) + this->getFieldOffsetInBits(FD);
+ this->appendStr("_tv" + llvm::to_string(OffsetInBits) + "w" +
+ llvm::to_string(getFieldSize(FD, FT, this->Ctx)));
+ }
+};
+
+struct GenDefaultInitializeFuncName
+ : GenUnaryFuncName<GenDefaultInitializeFuncName>,
+ DefaultInitializedTypeVisitor<GenDefaultInitializeFuncName> {
+ using Super = DefaultInitializedTypeVisitor<GenDefaultInitializeFuncName>;
+ GenDefaultInitializeFuncName(CharUnits DstAlignment, ASTContext &Ctx)
+ : GenUnaryFuncName<GenDefaultInitializeFuncName>("__default_constructor_",
+ DstAlignment, Ctx) {}
+ void visitWithKind(QualType::PrimitiveDefaultInitializeKind PDIK, QualType FT,
+ const FieldDecl *FD, CharUnits CurStructOffset) {
+ if (const auto *AT = getContext().getAsArrayType(FT)) {
+ visitArray(PDIK, AT, FT.isVolatileQualified(), FD, CurStructOffset);
+ return;
+ }
+
+ Super::visitWithKind(PDIK, FT, FD, CurStructOffset);
+ }
+};
+
+struct GenDestructorFuncName : GenUnaryFuncName<GenDestructorFuncName>,
+ DestructedTypeVisitor<GenDestructorFuncName> {
+ using Super = DestructedTypeVisitor<GenDestructorFuncName>;
+ GenDestructorFuncName(CharUnits DstAlignment, ASTContext &Ctx)
+ : GenUnaryFuncName<GenDestructorFuncName>("__destructor_", DstAlignment,
+ Ctx) {}
+ void visitWithKind(QualType::DestructionKind DK, QualType FT,
+ const FieldDecl *FD, CharUnits CurStructOffset) {
+ if (const auto *AT = getContext().getAsArrayType(FT)) {
+ visitArray(DK, AT, FT.isVolatileQualified(), FD, CurStructOffset);
+ return;
+ }
+
+ Super::visitWithKind(DK, FT, FD, CurStructOffset);
+ }
+};
+
+// Helper function that creates CGFunctionInfo for an N-ary special function.
+template <size_t N>
+static const CGFunctionInfo &getFunctionInfo(CodeGenModule &CGM,
+ FunctionArgList &Args) {
+ ASTContext &Ctx = CGM.getContext();
+ llvm::SmallVector<ImplicitParamDecl *, N> Params;
+ QualType ParamTy = Ctx.getPointerType(Ctx.VoidPtrTy);
+
+ for (unsigned I = 0; I < N; ++I)
+ Params.push_back(ImplicitParamDecl::Create(
+ Ctx, nullptr, SourceLocation(), &Ctx.Idents.get(ValNameStr[I]), ParamTy,
+ ImplicitParamDecl::Other));
+
+ for (auto &P : Params)
+ Args.push_back(P);
+
+ return CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, Args);
+}
+
+// Template classes that are used as bases for classes that emit special
+// functions.
+template <class Derived> struct GenFuncBase {
+ template <size_t N>
+ void visitStruct(QualType FT, const FieldDecl *FD, CharUnits CurStackOffset,
+ std::array<Address, N> Addrs) {
+ this->asDerived().callSpecialFunction(
+ FT, CurStackOffset + asDerived().getFieldOffset(FD), Addrs);
+ }
+
+ template <class FieldKind, size_t N>
+ void visitArray(FieldKind FK, const ArrayType *AT, bool IsVolatile,
+ const FieldDecl *FD, CharUnits CurStackOffset,
+ std::array<Address, N> Addrs) {
+ // Non-volatile trivial fields are copied when flushTrivialFields is called.
+ if (!FK)
+ return asDerived().visitTrivial(QualType(AT, 0), FD, CurStackOffset,
+ Addrs);
+
+ CodeGenFunction &CGF = *this->CGF;
+ ASTContext &Ctx = CGF.getContext();
+
+ // Compute the end address.
+ QualType BaseEltQT;
+ std::array<Address, N> StartAddrs = Addrs;
+ for (unsigned I = 0; I < N; ++I)
+ StartAddrs[I] = getAddrWithOffset(Addrs[I], CurStackOffset, FD);
+ Address DstAddr = StartAddrs[DstIdx];
+ llvm::Value *NumElts = CGF.emitArrayLength(AT, BaseEltQT, DstAddr);
+ unsigned BaseEltSize = Ctx.getTypeSizeInChars(BaseEltQT).getQuantity();
+ llvm::Value *BaseEltSizeVal =
+ llvm::ConstantInt::get(NumElts->getType(), BaseEltSize);
+ llvm::Value *SizeInBytes =
+ CGF.Builder.CreateNUWMul(BaseEltSizeVal, NumElts);
+ Address BC = CGF.Builder.CreateBitCast(DstAddr, CGF.CGM.Int8PtrTy);
+ llvm::Value *DstArrayEnd =
+ CGF.Builder.CreateInBoundsGEP(BC.getPointer(), SizeInBytes);
+ DstArrayEnd = CGF.Builder.CreateBitCast(DstArrayEnd, CGF.CGM.Int8PtrPtrTy,
+ "dstarray.end");
+ llvm::BasicBlock *PreheaderBB = CGF.Builder.GetInsertBlock();
+
+ // Create the header block and insert the phi instructions.
+ llvm::BasicBlock *HeaderBB = CGF.createBasicBlock("loop.header");
+ CGF.EmitBlock(HeaderBB);
+ llvm::PHINode *PHIs[N];
+
+ for (unsigned I = 0; I < N; ++I) {
+ PHIs[I] = CGF.Builder.CreatePHI(CGF.CGM.Int8PtrPtrTy, 2, "addr.cur");
+ PHIs[I]->addIncoming(StartAddrs[I].getPointer(), PreheaderBB);
+ }
+
+ // Create the exit and loop body blocks.
+ llvm::BasicBlock *ExitBB = CGF.createBasicBlock("loop.exit");
+ llvm::BasicBlock *LoopBB = CGF.createBasicBlock("loop.body");
+
+ // Emit the comparison and conditional branch instruction that jumps to
+ // either the exit or the loop body.
+ llvm::Value *Done =
+ CGF.Builder.CreateICmpEQ(PHIs[DstIdx], DstArrayEnd, "done");
+ CGF.Builder.CreateCondBr(Done, ExitBB, LoopBB);
+
+ // Visit the element of the array in the loop body.
+ CGF.EmitBlock(LoopBB);
+ QualType EltQT = AT->getElementType();
+ CharUnits EltSize = Ctx.getTypeSizeInChars(EltQT);
+ std::array<Address, N> NewAddrs = Addrs;
+
+ for (unsigned I = 0; I < N; ++I)
+ NewAddrs[I] = Address(
+ PHIs[I], StartAddrs[I].getAlignment().alignmentAtOffset(EltSize));
+
+ EltQT = IsVolatile ? EltQT.withVolatile() : EltQT;
+ this->asDerived().visitWithKind(FK, EltQT, nullptr, CharUnits::Zero(),
+ NewAddrs);
+
+ LoopBB = CGF.Builder.GetInsertBlock();
+
+ for (unsigned I = 0; I < N; ++I) {
+ // Instrs to update the destination and source addresses.
+ // Update phi instructions.
+ NewAddrs[I] = getAddrWithOffset(NewAddrs[I], EltSize);
+ PHIs[I]->addIncoming(NewAddrs[I].getPointer(), LoopBB);
+ }
+
+ // Insert an unconditional branch to the header block.
+ CGF.Builder.CreateBr(HeaderBB);
+ CGF.EmitBlock(ExitBB);
+ }
+
+ /// Return an address with the specified offset from the passed address.
+ Address getAddrWithOffset(Address Addr, CharUnits Offset) {
+ assert(Addr.isValid() && "invalid address");
+ if (Offset.getQuantity() == 0)
+ return Addr;
+ Addr = CGF->Builder.CreateBitCast(Addr, CGF->CGM.Int8PtrTy);
+ Addr = CGF->Builder.CreateConstInBoundsGEP(Addr, Offset.getQuantity(),
+ CharUnits::One());
+ return CGF->Builder.CreateBitCast(Addr, CGF->CGM.Int8PtrPtrTy);
+ }
+
+ Address getAddrWithOffset(Address Addr, CharUnits StructFieldOffset,
+ const FieldDecl *FD) {
+ return getAddrWithOffset(Addr, StructFieldOffset +
+ asDerived().getFieldOffset(FD));
+ }
+
+ template <size_t N>
+ llvm::Function *
+ getFunction(StringRef FuncName, QualType QT, std::array<Address, N> Addrs,
+ std::array<CharUnits, N> Alignments, CodeGenModule &CGM) {
+ // If the special function already exists in the module, return it.
+ if (llvm::Function *F = CGM.getModule().getFunction(FuncName)) {
+ bool WrongType = false;
+ if (!F->getReturnType()->isVoidTy())
+ WrongType = true;
+ else {
+ for (const llvm::Argument &Arg : F->args())
+ if (Arg.getType() != CGM.Int8PtrPtrTy)
+ WrongType = true;
+ }
+
+ if (WrongType) {
+ std::string FuncName = F->getName();
+ SourceLocation Loc = QT->castAs<RecordType>()->getDecl()->getLocation();
+ CGM.Error(Loc, "special function " + FuncName +
+ " for non-trivial C struct has incorrect type");
+ return nullptr;
+ }
+ return F;
+ }
+
+ ASTContext &Ctx = CGM.getContext();
+ FunctionArgList Args;
+ const CGFunctionInfo &FI = getFunctionInfo<N>(CGM, Args);
+ llvm::FunctionType *FuncTy = CGM.getTypes().GetFunctionType(FI);
+ llvm::Function *F =
+ llvm::Function::Create(FuncTy, llvm::GlobalValue::LinkOnceODRLinkage,
+ FuncName, &CGM.getModule());
+ F->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ CGM.SetLLVMFunctionAttributes(nullptr, FI, F);
+ CGM.SetLLVMFunctionAttributesForDefinition(nullptr, F);
+ IdentifierInfo *II = &Ctx.Idents.get(FuncName);
+ FunctionDecl *FD = FunctionDecl::Create(
+ Ctx, Ctx.getTranslationUnitDecl(), SourceLocation(), SourceLocation(),
+ II, Ctx.VoidTy, nullptr, SC_PrivateExtern, false, false);
+ CodeGenFunction NewCGF(CGM);
+ setCGF(&NewCGF);
+ CGF->StartFunction(FD, Ctx.VoidTy, F, FI, Args);
+
+ for (unsigned I = 0; I < N; ++I) {
+ llvm::Value *V = CGF->Builder.CreateLoad(CGF->GetAddrOfLocalVar(Args[I]));
+ Addrs[I] = Address(V, Alignments[I]);
+ }
+
+ asDerived().visitStructFields(QT, CharUnits::Zero(), Addrs);
+ CGF->FinishFunction();
+ return F;
+ }
+
+ template <size_t N>
+ void callFunc(StringRef FuncName, QualType QT, std::array<Address, N> Addrs,
+ CodeGenFunction &CallerCGF) {
+ std::array<CharUnits, N> Alignments;
+ llvm::Value *Ptrs[N];
+
+ for (unsigned I = 0; I < N; ++I) {
+ Alignments[I] = Addrs[I].getAlignment();
+ Ptrs[I] =
+ CallerCGF.Builder.CreateBitCast(Addrs[I], CallerCGF.CGM.Int8PtrPtrTy)
+ .getPointer();
+ }
+
+ if (llvm::Function *F =
+ getFunction(FuncName, QT, Addrs, Alignments, CallerCGF.CGM))
+ CallerCGF.EmitNounwindRuntimeCall(F, Ptrs);
+ }
+
+ Derived &asDerived() { return static_cast<Derived &>(*this); }
+
+ void setCGF(CodeGenFunction *F) { CGF = F; }
+
+ CodeGenFunction *CGF = nullptr;
+};
+
+template <class Derived, bool IsMove>
+struct GenBinaryFunc : CopyStructVisitor<Derived, IsMove>,
+ GenFuncBase<Derived> {
+ GenBinaryFunc(ASTContext &Ctx) : CopyStructVisitor<Derived, IsMove>(Ctx) {}
+
+ void flushTrivialFields(std::array<Address, 2> Addrs) {
+ CharUnits Size = this->End - this->Start;
+
+ if (Size.getQuantity() == 0)
+ return;
+
+ Address DstAddr = this->getAddrWithOffset(Addrs[DstIdx], this->Start);
+ Address SrcAddr = this->getAddrWithOffset(Addrs[SrcIdx], this->Start);
+
+ // Emit memcpy.
+ if (Size.getQuantity() >= 16 || !llvm::isPowerOf2_32(Size.getQuantity())) {
+ llvm::Value *SizeVal =
+ llvm::ConstantInt::get(this->CGF->SizeTy, Size.getQuantity());
+ DstAddr =
+ this->CGF->Builder.CreateElementBitCast(DstAddr, this->CGF->Int8Ty);
+ SrcAddr =
+ this->CGF->Builder.CreateElementBitCast(SrcAddr, this->CGF->Int8Ty);
+ this->CGF->Builder.CreateMemCpy(DstAddr, SrcAddr, SizeVal, false);
+ } else {
+ llvm::Type *Ty = llvm::Type::getIntNTy(
+ this->CGF->getLLVMContext(),
+ Size.getQuantity() * this->CGF->getContext().getCharWidth());
+ DstAddr = this->CGF->Builder.CreateElementBitCast(DstAddr, Ty);
+ SrcAddr = this->CGF->Builder.CreateElementBitCast(SrcAddr, Ty);
+ llvm::Value *SrcVal = this->CGF->Builder.CreateLoad(SrcAddr, false);
+ this->CGF->Builder.CreateStore(SrcVal, DstAddr, false);
+ }
+
+ this->Start = this->End = CharUnits::Zero();
+ }
+
+ template <class... Ts>
+ void visitVolatileTrivial(QualType FT, const FieldDecl *FD, CharUnits Offset,
+ std::array<Address, 2> Addrs) {
+ LValue DstLV, SrcLV;
+ if (FD) {
+ QualType RT = QualType(FD->getParent()->getTypeForDecl(), 0);
+ llvm::PointerType *PtrTy = this->CGF->ConvertType(RT)->getPointerTo();
+ Address DstAddr = this->getAddrWithOffset(Addrs[DstIdx], Offset);
+ LValue DstBase = this->CGF->MakeAddrLValue(
+ this->CGF->Builder.CreateBitCast(DstAddr, PtrTy), FT);
+ DstLV = this->CGF->EmitLValueForField(DstBase, FD);
+ Address SrcAddr = this->getAddrWithOffset(Addrs[SrcIdx], Offset);
+ LValue SrcBase = this->CGF->MakeAddrLValue(
+ this->CGF->Builder.CreateBitCast(SrcAddr, PtrTy), FT);
+ SrcLV = this->CGF->EmitLValueForField(SrcBase, FD);
+ } else {
+ llvm::PointerType *Ty = this->CGF->ConvertType(FT)->getPointerTo();
+ Address DstAddr = this->CGF->Builder.CreateBitCast(Addrs[DstIdx], Ty);
+ Address SrcAddr = this->CGF->Builder.CreateBitCast(Addrs[SrcIdx], Ty);
+ DstLV = this->CGF->MakeAddrLValue(DstAddr, FT);
+ SrcLV = this->CGF->MakeAddrLValue(SrcAddr, FT);
+ }
+ RValue SrcVal = this->CGF->EmitLoadOfLValue(SrcLV, SourceLocation());
+ this->CGF->EmitStoreThroughLValue(SrcVal, DstLV);
+ }
+};
+
+// These classes that emit the special functions for a non-trivial struct.
+struct GenDestructor : StructVisitor<GenDestructor>,
+ GenFuncBase<GenDestructor>,
+ DestructedTypeVisitor<GenDestructor> {
+ using Super = DestructedTypeVisitor<GenDestructor>;
+ GenDestructor(ASTContext &Ctx) : StructVisitor<GenDestructor>(Ctx) {}
+
+ void visitWithKind(QualType::DestructionKind DK, QualType FT,
+ const FieldDecl *FD, CharUnits CurStructOffset,
+ std::array<Address, 1> Addrs) {
+ if (const auto *AT = getContext().getAsArrayType(FT)) {
+ visitArray(DK, AT, FT.isVolatileQualified(), FD, CurStructOffset, Addrs);
+ return;
+ }
+
+ Super::visitWithKind(DK, FT, FD, CurStructOffset, Addrs);
+ }
+
+ void visitARCStrong(QualType QT, const FieldDecl *FD,
+ CharUnits CurStackOffset, std::array<Address, 1> Addrs) {
+ CGF->destroyARCStrongImprecise(
+ *CGF, getAddrWithOffset(Addrs[DstIdx], CurStackOffset, FD), QT);
+ }
+
+ void visitARCWeak(QualType QT, const FieldDecl *FD, CharUnits CurStackOffset,
+ std::array<Address, 1> Addrs) {
+ CGF->destroyARCWeak(
+ *CGF, getAddrWithOffset(Addrs[DstIdx], CurStackOffset, FD), QT);
+ }
+
+ void callSpecialFunction(QualType FT, CharUnits Offset,
+ std::array<Address, 1> Addrs) {
+ CGF->callCStructDestructor(
+ CGF->MakeAddrLValue(getAddrWithOffset(Addrs[DstIdx], Offset), FT));
+ }
+};
+
+struct GenDefaultInitialize
+ : StructVisitor<GenDefaultInitialize>,
+ GenFuncBase<GenDefaultInitialize>,
+ DefaultInitializedTypeVisitor<GenDefaultInitialize> {
+ using Super = DefaultInitializedTypeVisitor<GenDefaultInitialize>;
+ typedef GenFuncBase<GenDefaultInitialize> GenFuncBaseTy;
+
+ GenDefaultInitialize(ASTContext &Ctx)
+ : StructVisitor<GenDefaultInitialize>(Ctx) {}
+
+ void visitWithKind(QualType::PrimitiveDefaultInitializeKind PDIK, QualType FT,
+ const FieldDecl *FD, CharUnits CurStructOffset,
+ std::array<Address, 1> Addrs) {
+ if (const auto *AT = getContext().getAsArrayType(FT)) {
+ visitArray(PDIK, AT, FT.isVolatileQualified(), FD, CurStructOffset,
+ Addrs);
+ return;
+ }
+
+ Super::visitWithKind(PDIK, FT, FD, CurStructOffset, Addrs);
+ }
+
+ void visitARCStrong(QualType QT, const FieldDecl *FD,
+ CharUnits CurStackOffset, std::array<Address, 1> Addrs) {
+ CGF->EmitNullInitialization(
+ getAddrWithOffset(Addrs[DstIdx], CurStackOffset, FD), QT);
+ }
+
+ void visitARCWeak(QualType QT, const FieldDecl *FD, CharUnits CurStackOffset,
+ std::array<Address, 1> Addrs) {
+ CGF->EmitNullInitialization(
+ getAddrWithOffset(Addrs[DstIdx], CurStackOffset, FD), QT);
+ }
+
+ template <class FieldKind, size_t... Is>
+ void visitArray(FieldKind FK, const ArrayType *AT, bool IsVolatile,
+ const FieldDecl *FD, CharUnits CurStackOffset,
+ std::array<Address, 1> Addrs) {
+ if (!FK)
+ return visitTrivial(QualType(AT, 0), FD, CurStackOffset, Addrs);
+
+ ASTContext &Ctx = getContext();
+ CharUnits Size = Ctx.getTypeSizeInChars(QualType(AT, 0));
+ QualType EltTy = Ctx.getBaseElementType(QualType(AT, 0));
+
+ if (Size < CharUnits::fromQuantity(16) || EltTy->getAs<RecordType>()) {
+ GenFuncBaseTy::visitArray(FK, AT, IsVolatile, FD, CurStackOffset, Addrs);
+ return;
+ }
+
+ llvm::Constant *SizeVal = CGF->Builder.getInt64(Size.getQuantity());
+ Address DstAddr = getAddrWithOffset(Addrs[DstIdx], CurStackOffset, FD);
+ Address Loc = CGF->Builder.CreateElementBitCast(DstAddr, CGF->Int8Ty);
+ CGF->Builder.CreateMemSet(Loc, CGF->Builder.getInt8(0), SizeVal,
+ IsVolatile);
+ }
+
+ void callSpecialFunction(QualType FT, CharUnits Offset,
+ std::array<Address, 1> Addrs) {
+ CGF->callCStructDefaultConstructor(
+ CGF->MakeAddrLValue(getAddrWithOffset(Addrs[DstIdx], Offset), FT));
+ }
+};
+
+struct GenCopyConstructor : GenBinaryFunc<GenCopyConstructor, false> {
+ GenCopyConstructor(ASTContext &Ctx)
+ : GenBinaryFunc<GenCopyConstructor, false>(Ctx) {}
+
+ void visitARCStrong(QualType QT, const FieldDecl *FD,
+ CharUnits CurStackOffset, std::array<Address, 2> Addrs) {
+ Addrs[DstIdx] = getAddrWithOffset(Addrs[DstIdx], CurStackOffset, FD);
+ Addrs[SrcIdx] = getAddrWithOffset(Addrs[SrcIdx], CurStackOffset, FD);
+ llvm::Value *SrcVal = CGF->EmitLoadOfScalar(
+ Addrs[SrcIdx], QT.isVolatileQualified(), QT, SourceLocation());
+ llvm::Value *Val = CGF->EmitARCRetain(QT, SrcVal);
+ CGF->EmitStoreOfScalar(Val, CGF->MakeAddrLValue(Addrs[DstIdx], QT), true);
+ }
+
+ void visitARCWeak(QualType QT, const FieldDecl *FD, CharUnits CurStackOffset,
+ std::array<Address, 2> Addrs) {
+ Addrs[DstIdx] = getAddrWithOffset(Addrs[DstIdx], CurStackOffset, FD);
+ Addrs[SrcIdx] = getAddrWithOffset(Addrs[SrcIdx], CurStackOffset, FD);
+ CGF->EmitARCCopyWeak(Addrs[DstIdx], Addrs[SrcIdx]);
+ }
+
+ void callSpecialFunction(QualType FT, CharUnits Offset,
+ std::array<Address, 2> Addrs) {
+ CGF->callCStructCopyConstructor(CGF->MakeAddrLValue(Addrs[DstIdx], FT),
+ CGF->MakeAddrLValue(Addrs[SrcIdx], FT));
+ }
+};
+
+struct GenMoveConstructor : GenBinaryFunc<GenMoveConstructor, true> {
+ GenMoveConstructor(ASTContext &Ctx)
+ : GenBinaryFunc<GenMoveConstructor, true>(Ctx) {}
+
+ void visitARCStrong(QualType QT, const FieldDecl *FD,
+ CharUnits CurStackOffset, std::array<Address, 2> Addrs) {
+ Addrs[DstIdx] = getAddrWithOffset(Addrs[DstIdx], CurStackOffset, FD);
+ Addrs[SrcIdx] = getAddrWithOffset(Addrs[SrcIdx], CurStackOffset, FD);
+ LValue SrcLV = CGF->MakeAddrLValue(Addrs[SrcIdx], QT);
+ llvm::Value *SrcVal =
+ CGF->EmitLoadOfLValue(SrcLV, SourceLocation()).getScalarVal();
+ CGF->EmitStoreOfScalar(getNullForVariable(SrcLV.getAddress()), SrcLV);
+ CGF->EmitStoreOfScalar(SrcVal, CGF->MakeAddrLValue(Addrs[DstIdx], QT),
+ /* isInitialization */ true);
+ }
+
+ void visitARCWeak(QualType QT, const FieldDecl *FD, CharUnits CurStackOffset,
+ std::array<Address, 2> Addrs) {
+ Addrs[DstIdx] = getAddrWithOffset(Addrs[DstIdx], CurStackOffset, FD);
+ Addrs[SrcIdx] = getAddrWithOffset(Addrs[SrcIdx], CurStackOffset, FD);
+ CGF->EmitARCMoveWeak(Addrs[DstIdx], Addrs[SrcIdx]);
+ }
+
+ void callSpecialFunction(QualType FT, CharUnits Offset,
+ std::array<Address, 2> Addrs) {
+ CGF->callCStructMoveConstructor(CGF->MakeAddrLValue(Addrs[DstIdx], FT),
+ CGF->MakeAddrLValue(Addrs[SrcIdx], FT));
+ }
+};
+
+struct GenCopyAssignment : GenBinaryFunc<GenCopyAssignment, false> {
+ GenCopyAssignment(ASTContext &Ctx)
+ : GenBinaryFunc<GenCopyAssignment, false>(Ctx) {}
+
+ void visitARCStrong(QualType QT, const FieldDecl *FD,
+ CharUnits CurStackOffset, std::array<Address, 2> Addrs) {
+ Addrs[DstIdx] = getAddrWithOffset(Addrs[DstIdx], CurStackOffset, FD);
+ Addrs[SrcIdx] = getAddrWithOffset(Addrs[SrcIdx], CurStackOffset, FD);
+ llvm::Value *SrcVal = CGF->EmitLoadOfScalar(
+ Addrs[SrcIdx], QT.isVolatileQualified(), QT, SourceLocation());
+ CGF->EmitARCStoreStrong(CGF->MakeAddrLValue(Addrs[DstIdx], QT), SrcVal,
+ false);
+ }
+
+ void visitARCWeak(QualType QT, const FieldDecl *FD, CharUnits CurStackOffset,
+ std::array<Address, 2> Addrs) {
+ Addrs[DstIdx] = getAddrWithOffset(Addrs[DstIdx], CurStackOffset, FD);
+ Addrs[SrcIdx] = getAddrWithOffset(Addrs[SrcIdx], CurStackOffset, FD);
+ CGF->emitARCCopyAssignWeak(QT, Addrs[DstIdx], Addrs[SrcIdx]);
+ }
+
+ void callSpecialFunction(QualType FT, CharUnits Offset,
+ std::array<Address, 2> Addrs) {
+ CGF->callCStructCopyAssignmentOperator(
+ CGF->MakeAddrLValue(Addrs[DstIdx], FT),
+ CGF->MakeAddrLValue(Addrs[SrcIdx], FT));
+ }
+};
+
+struct GenMoveAssignment : GenBinaryFunc<GenMoveAssignment, true> {
+ GenMoveAssignment(ASTContext &Ctx)
+ : GenBinaryFunc<GenMoveAssignment, true>(Ctx) {}
+
+ void visitARCStrong(QualType QT, const FieldDecl *FD,
+ CharUnits CurStackOffset, std::array<Address, 2> Addrs) {
+ Addrs[DstIdx] = getAddrWithOffset(Addrs[DstIdx], CurStackOffset, FD);
+ Addrs[SrcIdx] = getAddrWithOffset(Addrs[SrcIdx], CurStackOffset, FD);
+ LValue SrcLV = CGF->MakeAddrLValue(Addrs[SrcIdx], QT);
+ llvm::Value *SrcVal =
+ CGF->EmitLoadOfLValue(SrcLV, SourceLocation()).getScalarVal();
+ CGF->EmitStoreOfScalar(getNullForVariable(SrcLV.getAddress()), SrcLV);
+ LValue DstLV = CGF->MakeAddrLValue(Addrs[DstIdx], QT);
+ llvm::Value *DstVal =
+ CGF->EmitLoadOfLValue(DstLV, SourceLocation()).getScalarVal();
+ CGF->EmitStoreOfScalar(SrcVal, DstLV);
+ CGF->EmitARCRelease(DstVal, ARCImpreciseLifetime);
+ }
+
+ void visitARCWeak(QualType QT, const FieldDecl *FD, CharUnits CurStackOffset,
+ std::array<Address, 2> Addrs) {
+ Addrs[DstIdx] = getAddrWithOffset(Addrs[DstIdx], CurStackOffset, FD);
+ Addrs[SrcIdx] = getAddrWithOffset(Addrs[SrcIdx], CurStackOffset, FD);
+ CGF->emitARCMoveAssignWeak(QT, Addrs[DstIdx], Addrs[SrcIdx]);
+ }
+
+ void callSpecialFunction(QualType FT, CharUnits Offset,
+ std::array<Address, 2> Addrs) {
+ CGF->callCStructMoveAssignmentOperator(
+ CGF->MakeAddrLValue(Addrs[DstIdx], FT),
+ CGF->MakeAddrLValue(Addrs[SrcIdx], FT));
+ }
+};
+
+} // namespace
+
+void CodeGenFunction::destroyNonTrivialCStruct(CodeGenFunction &CGF,
+ Address Addr, QualType Type) {
+ CGF.callCStructDestructor(CGF.MakeAddrLValue(Addr, Type));
+}
+
+// Default-initialize a variable that is a non-trivial struct or an array of
+// such structure.
+void CodeGenFunction::defaultInitNonTrivialCStructVar(LValue Dst) {
+ GenDefaultInitialize Gen(getContext());
+ Address DstPtr = Builder.CreateBitCast(Dst.getAddress(), CGM.Int8PtrPtrTy);
+ Gen.setCGF(this);
+ QualType QT = Dst.getType();
+ QT = Dst.isVolatile() ? QT.withVolatile() : QT;
+ Gen.visit(QT, nullptr, CharUnits::Zero(), std::array<Address, 1>({{DstPtr}}));
+}
+
+template <class G, size_t N>
+static void callSpecialFunction(G &&Gen, StringRef FuncName, QualType QT,
+ bool IsVolatile, CodeGenFunction &CGF,
+ std::array<Address, N> Addrs) {
+ for (unsigned I = 0; I < N; ++I)
+ Addrs[I] = CGF.Builder.CreateBitCast(Addrs[I], CGF.CGM.Int8PtrPtrTy);
+ QT = IsVolatile ? QT.withVolatile() : QT;
+ Gen.callFunc(FuncName, QT, Addrs, CGF);
+}
+
+// Functions to emit calls to the special functions of a non-trivial C struct.
+void CodeGenFunction::callCStructDefaultConstructor(LValue Dst) {
+ bool IsVolatile = Dst.isVolatile();
+ Address DstPtr = Dst.getAddress();
+ QualType QT = Dst.getType();
+ GenDefaultInitializeFuncName GenName(DstPtr.getAlignment(), getContext());
+ std::string FuncName = GenName.getName(QT, IsVolatile);
+ callSpecialFunction(GenDefaultInitialize(getContext()), FuncName, QT,
+ IsVolatile, *this, std::array<Address, 1>({{DstPtr}}));
+}
+
+void CodeGenFunction::callCStructDestructor(LValue Dst) {
+ bool IsVolatile = Dst.isVolatile();
+ Address DstPtr = Dst.getAddress();
+ QualType QT = Dst.getType();
+ GenDestructorFuncName GenName(DstPtr.getAlignment(), getContext());
+ std::string FuncName = GenName.getName(QT, IsVolatile);
+ callSpecialFunction(GenDestructor(getContext()), FuncName, QT, IsVolatile,
+ *this, std::array<Address, 1>({{DstPtr}}));
+}
+
+void CodeGenFunction::callCStructCopyConstructor(LValue Dst, LValue Src) {
+ bool IsVolatile = Dst.isVolatile() || Src.isVolatile();
+ Address DstPtr = Dst.getAddress(), SrcPtr = Src.getAddress();
+ QualType QT = Dst.getType();
+ GenBinaryFuncName<false> GenName("__copy_constructor_", DstPtr.getAlignment(),
+ SrcPtr.getAlignment(), getContext());
+ std::string FuncName = GenName.getName(QT, IsVolatile);
+ callSpecialFunction(GenCopyConstructor(getContext()), FuncName, QT,
+ IsVolatile, *this,
+ std::array<Address, 2>({{DstPtr, SrcPtr}}));
+}
+
+void CodeGenFunction::callCStructCopyAssignmentOperator(LValue Dst, LValue Src
+
+) {
+ bool IsVolatile = Dst.isVolatile() || Src.isVolatile();
+ Address DstPtr = Dst.getAddress(), SrcPtr = Src.getAddress();
+ QualType QT = Dst.getType();
+ GenBinaryFuncName<false> GenName("__copy_assignment_", DstPtr.getAlignment(),
+ SrcPtr.getAlignment(), getContext());
+ std::string FuncName = GenName.getName(QT, IsVolatile);
+ callSpecialFunction(GenCopyAssignment(getContext()), FuncName, QT, IsVolatile,
+ *this, std::array<Address, 2>({{DstPtr, SrcPtr}}));
+}
+
+void CodeGenFunction::callCStructMoveConstructor(LValue Dst, LValue Src) {
+ bool IsVolatile = Dst.isVolatile() || Src.isVolatile();
+ Address DstPtr = Dst.getAddress(), SrcPtr = Src.getAddress();
+ QualType QT = Dst.getType();
+ GenBinaryFuncName<true> GenName("__move_constructor_", DstPtr.getAlignment(),
+ SrcPtr.getAlignment(), getContext());
+ std::string FuncName = GenName.getName(QT, IsVolatile);
+ callSpecialFunction(GenMoveConstructor(getContext()), FuncName, QT,
+ IsVolatile, *this,
+ std::array<Address, 2>({{DstPtr, SrcPtr}}));
+}
+
+void CodeGenFunction::callCStructMoveAssignmentOperator(LValue Dst, LValue Src
+
+) {
+ bool IsVolatile = Dst.isVolatile() || Src.isVolatile();
+ Address DstPtr = Dst.getAddress(), SrcPtr = Src.getAddress();
+ QualType QT = Dst.getType();
+ GenBinaryFuncName<true> GenName("__move_assignment_", DstPtr.getAlignment(),
+ SrcPtr.getAlignment(), getContext());
+ std::string FuncName = GenName.getName(QT, IsVolatile);
+ callSpecialFunction(GenMoveAssignment(getContext()), FuncName, QT, IsVolatile,
+ *this, std::array<Address, 2>({{DstPtr, SrcPtr}}));
+}
diff --git a/lib/CodeGen/CGObjC.cpp b/lib/CodeGen/CGObjC.cpp
index f26263d9472d..81c1201c0e06 100644
--- a/lib/CodeGen/CGObjC.cpp
+++ b/lib/CodeGen/CGObjC.cpp
@@ -259,7 +259,7 @@ llvm::Value *CodeGenFunction::EmitObjCProtocolExpr(const ObjCProtocolExpr *E) {
return CGM.getObjCRuntime().GenerateProtocolRef(*this, E->getProtocol());
}
-/// \brief Adjust the type of an Objective-C object that doesn't match up due
+/// Adjust the type of an Objective-C object that doesn't match up due
/// to type erasure at various points, e.g., related result types or the use
/// of parameterized classes.
static RValue AdjustObjCObjectType(CodeGenFunction &CGF, QualType ExpT,
@@ -803,7 +803,7 @@ PropertyImplStrategy::PropertyImplStrategy(CodeGenModule &CGM,
Kind = Native;
}
-/// \brief Generate an Objective-C property getter function.
+/// Generate an Objective-C property getter function.
///
/// The given Decl must be an ObjCImplementationDecl. \@synthesize
/// is illegal within a category.
@@ -1008,12 +1008,14 @@ CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
/*init*/ true);
return;
}
- case TEK_Aggregate:
+ case TEK_Aggregate: {
// The return value slot is guaranteed to not be aliased, but
// that's not necessarily the same as "on the stack", so
// we still potentially need objc_memmove_collectable.
- EmitAggregateCopy(ReturnValue, LV.getAddress(), ivarType);
+ EmitAggregateCopy(/* Dest= */ MakeAddrLValue(ReturnValue, ivarType),
+ /* Src= */ LV, ivarType, overlapForReturnValue());
return;
+ }
case TEK_Scalar: {
llvm::Value *value;
if (propType->isReferenceType()) {
@@ -1334,7 +1336,7 @@ CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
EmitStmt(&assign);
}
-/// \brief Generate an Objective-C property setter function.
+/// Generate an Objective-C property setter function.
///
/// The given Decl must be an ObjCImplementationDecl. \@synthesize
/// is illegal within a category.
@@ -1438,7 +1440,8 @@ void CodeGenFunction::GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP,
EmitAggExpr(IvarInit->getInit(),
AggValueSlot::forLValue(LV, AggValueSlot::IsDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
- AggValueSlot::IsNotAliased));
+ AggValueSlot::IsNotAliased,
+ AggValueSlot::DoesNotOverlap));
}
// constructor returns 'self'.
CodeGenTypes &Types = CGM.getTypes();
@@ -1814,22 +1817,6 @@ void CodeGenFunction::EmitARCIntrinsicUse(ArrayRef<llvm::Value*> values) {
}
-static bool IsForwarding(StringRef Name) {
- return llvm::StringSwitch<bool>(Name)
- .Cases("objc_autoreleaseReturnValue", // ARCInstKind::AutoreleaseRV
- "objc_autorelease", // ARCInstKind::Autorelease
- "objc_retainAutoreleaseReturnValue", // ARCInstKind::FusedRetainAutoreleaseRV
- "objc_retainAutoreleasedReturnValue", // ARCInstKind::RetainRV
- "objc_retainAutorelease", // ARCInstKind::FusedRetainAutorelease
- "objc_retainedObject", // ARCInstKind::NoopCast
- "objc_retain", // ARCInstKind::Retain
- "objc_unretainedObject", // ARCInstKind::NoopCast
- "objc_unretainedPointer", // ARCInstKind::NoopCast
- "objc_unsafeClaimAutoreleasedReturnValue", // ARCInstKind::ClaimRV
- true)
- .Default(false);
-}
-
static llvm::Constant *createARCRuntimeFunction(CodeGenModule &CGM,
llvm::FunctionType *FTy,
StringRef Name) {
@@ -1847,9 +1834,6 @@ static llvm::Constant *createARCRuntimeFunction(CodeGenModule &CGM,
// performance.
F->addFnAttr(llvm::Attribute::NonLazyBind);
}
-
- if (IsForwarding(Name))
- F->arg_begin()->addAttr(llvm::Attribute::Returned);
}
return RTF;
@@ -2052,7 +2036,7 @@ static void emitAutoreleasedReturnValueMarker(CodeGenFunction &CGF) {
// Call the marker asm if we made one, which we do only at -O0.
if (marker)
- CGF.Builder.CreateCall(marker);
+ CGF.Builder.CreateCall(marker, None, CGF.getBundlesForFunclet(marker));
}
/// Retain the given object which is the result of a function call.
@@ -2070,7 +2054,7 @@ CodeGenFunction::EmitARCRetainAutoreleasedReturnValue(llvm::Value *value) {
/// Claim a possibly-autoreleased return value at +0. This is only
/// valid to do in contexts which do not rely on the retain to keep
-/// the object valid for for all of its uses; for example, when
+/// the object valid for all of its uses; for example, when
/// the value is ignored, or when it is being assigned to an
/// __unsafe_unretained variable.
///
@@ -2325,6 +2309,21 @@ void CodeGenFunction::EmitARCCopyWeak(Address dst, Address src) {
"objc_copyWeak");
}
+void CodeGenFunction::emitARCCopyAssignWeak(QualType Ty, Address DstAddr,
+ Address SrcAddr) {
+ llvm::Value *Object = EmitARCLoadWeakRetained(SrcAddr);
+ Object = EmitObjCConsumeObject(Ty, Object);
+ EmitARCStoreWeak(DstAddr, Object, false);
+}
+
+void CodeGenFunction::emitARCMoveAssignWeak(QualType Ty, Address DstAddr,
+ Address SrcAddr) {
+ llvm::Value *Object = EmitARCLoadWeakRetained(SrcAddr);
+ Object = EmitObjCConsumeObject(Ty, Object);
+ EmitARCStoreWeak(DstAddr, Object, false);
+ EmitARCDestroyWeak(SrcAddr);
+}
+
/// Produce the code to do a objc_autoreleasepool_push.
/// call i8* \@objc_autoreleasePoolPush(void)
llvm::Value *CodeGenFunction::EmitObjCAutoreleasePoolPush() {
@@ -3261,19 +3260,19 @@ CodeGenFunction::GenerateObjCAtomicSetterCopyHelperFunction(
"__assign_helper_atomic_property_",
&CGM.getModule());
- CGM.SetInternalFunctionAttributes(nullptr, Fn, FI);
+ CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FI);
StartFunction(FD, C.VoidTy, Fn, FI, args);
DeclRefExpr DstExpr(&DstDecl, false, DestTy,
VK_RValue, SourceLocation());
UnaryOperator DST(&DstExpr, UO_Deref, DestTy->getPointeeType(),
- VK_LValue, OK_Ordinary, SourceLocation());
+ VK_LValue, OK_Ordinary, SourceLocation(), false);
DeclRefExpr SrcExpr(&SrcDecl, false, SrcTy,
VK_RValue, SourceLocation());
UnaryOperator SRC(&SrcExpr, UO_Deref, SrcTy->getPointeeType(),
- VK_LValue, OK_Ordinary, SourceLocation());
+ VK_LValue, OK_Ordinary, SourceLocation(), false);
Expr *Args[2] = { &DST, &SRC };
CallExpr *CalleeExp = cast<CallExpr>(PID->getSetterCXXAssignment());
@@ -3342,8 +3341,8 @@ CodeGenFunction::GenerateObjCAtomicGetterCopyHelperFunction(
llvm::Function *Fn =
llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
"__copy_helper_atomic_property_", &CGM.getModule());
-
- CGM.SetInternalFunctionAttributes(nullptr, Fn, FI);
+
+ CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FI);
StartFunction(FD, C.VoidTy, Fn, FI, args);
@@ -3351,7 +3350,7 @@ CodeGenFunction::GenerateObjCAtomicGetterCopyHelperFunction(
VK_RValue, SourceLocation());
UnaryOperator SRC(&SrcExpr, UO_Deref, SrcTy->getPointeeType(),
- VK_LValue, OK_Ordinary, SourceLocation());
+ VK_LValue, OK_Ordinary, SourceLocation(), false);
CXXConstructExpr *CXXConstExpr =
cast<CXXConstructExpr>(PID->getGetterCXXConstructor());
@@ -3384,7 +3383,8 @@ CodeGenFunction::GenerateObjCAtomicGetterCopyHelperFunction(
Qualifiers(),
AggValueSlot::IsDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
- AggValueSlot::IsNotAliased));
+ AggValueSlot::IsNotAliased,
+ AggValueSlot::DoesNotOverlap));
FinishFunction();
HelperFn = llvm::ConstantExpr::getBitCast(Fn, VoidPtrTy);
diff --git a/lib/CodeGen/CGObjCGNU.cpp b/lib/CodeGen/CGObjCGNU.cpp
index c8b8be7f4552..6a0554b46b1c 100644
--- a/lib/CodeGen/CGObjCGNU.cpp
+++ b/lib/CodeGen/CGObjCGNU.cpp
@@ -34,11 +34,24 @@
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
#include "llvm/Support/Compiler.h"
+#include "llvm/Support/ConvertUTF.h"
+#include <cctype>
using namespace clang;
using namespace CodeGen;
namespace {
+
+std::string SymbolNameForMethod( StringRef ClassName,
+ StringRef CategoryName, const Selector MethodName,
+ bool isClassMethod) {
+ std::string MethodNameColonStripped = MethodName.getAsString();
+ std::replace(MethodNameColonStripped.begin(), MethodNameColonStripped.end(),
+ ':', '_');
+ return (Twine(isClassMethod ? "_c_" : "_i_") + ClassName + "_" +
+ CategoryName + "_" + MethodNameColonStripped).str();
+}
+
/// Class that lazily initialises the runtime function. Avoids inserting the
/// types and the function declaration into a module if they're not used, and
/// avoids constructing the type more than once if it's used more than once.
@@ -80,8 +93,7 @@ public:
if (!Function) {
if (!FunctionName)
return nullptr;
- Function =
- cast<llvm::Constant>(CGM->CreateRuntimeFunction(FTy, FunctionName));
+ Function = CGM->CreateRuntimeFunction(FTy, FunctionName);
}
return Function;
}
@@ -114,6 +126,10 @@ protected:
/// Pointer to i8 - LLVM type of char*, for all of the places where the
/// runtime needs to deal with C strings.
llvm::PointerType *PtrToInt8Ty;
+ /// struct objc_protocol type
+ llvm::StructType *ProtocolTy;
+ /// Protocol * type.
+ llvm::PointerType *ProtocolPtrTy;
/// Instance Method Pointer type. This is a pointer to a function that takes,
/// at a minimum, an object and a selector, and is the generic type for
/// Objective-C methods. Due to differences between variadic / non-variadic
@@ -156,11 +172,29 @@ protected:
llvm::IntegerType *Int32Ty;
/// 64-bit integer type, to save us needing to look it up every time it's used.
llvm::IntegerType *Int64Ty;
+ /// The type of struct objc_property.
+ llvm::StructType *PropertyMetadataTy;
/// Metadata kind used to tie method lookups to message sends. The GNUstep
/// runtime provides some LLVM passes that can use this to do things like
/// automatic IMP caching and speculative inlining.
unsigned msgSendMDKind;
+ /// Helper to check if we are targeting a specific runtime version or later.
+ bool isRuntime(ObjCRuntime::Kind kind, unsigned major, unsigned minor=0) {
+ const ObjCRuntime &R = CGM.getLangOpts().ObjCRuntime;
+ return (R.getKind() == kind) &&
+ (R.getVersion() >= VersionTuple(major, minor));
+ }
+
+ std::string SymbolForProtocol(StringRef Name) {
+ return (StringRef("._OBJC_PROTOCOL_") + Name).str();
+ }
+
+ std::string SymbolForProtocolRef(StringRef Name) {
+ return (StringRef("._OBJC_REF_PROTOCOL_") + Name).str();
+ }
+
+
/// Helper function that generates a constant string and returns a pointer to
/// the start of the string. The result of this function can be used anywhere
/// where the C code specifies const char*.
@@ -174,39 +208,28 @@ protected:
/// string value. This allows the linker to combine the strings between
/// different modules. Used for EH typeinfo names, selector strings, and a
/// few other things.
- llvm::Constant *ExportUniqueString(const std::string &Str, StringRef Prefix) {
- std::string Name = Prefix.str() + Str;
- auto *ConstStr = TheModule.getGlobalVariable(Name);
+ llvm::Constant *ExportUniqueString(const std::string &Str,
+ const std::string &prefix,
+ bool Private=false) {
+ std::string name = prefix + Str;
+ auto *ConstStr = TheModule.getGlobalVariable(name);
if (!ConstStr) {
llvm::Constant *value = llvm::ConstantDataArray::getString(VMContext,Str);
- ConstStr = new llvm::GlobalVariable(TheModule, value->getType(), true,
- llvm::GlobalValue::LinkOnceODRLinkage,
- value, Name);
+ auto *GV = new llvm::GlobalVariable(TheModule, value->getType(), true,
+ llvm::GlobalValue::LinkOnceODRLinkage, value, name);
+ if (Private)
+ GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ ConstStr = GV;
}
return llvm::ConstantExpr::getGetElementPtr(ConstStr->getValueType(),
ConstStr, Zeros);
}
- /// Generates a global structure, initialized by the elements in the vector.
- /// The element types must match the types of the structure elements in the
- /// first argument.
- llvm::GlobalVariable *MakeGlobal(llvm::Constant *C,
- CharUnits Align,
- StringRef Name="",
- llvm::GlobalValue::LinkageTypes linkage
- =llvm::GlobalValue::InternalLinkage) {
- auto GV = new llvm::GlobalVariable(TheModule, C->getType(), false,
- linkage, C, Name);
- GV->setAlignment(Align.getQuantity());
- return GV;
- }
-
/// Returns a property name and encoding string.
llvm::Constant *MakePropertyEncodingString(const ObjCPropertyDecl *PD,
const Decl *Container) {
- const ObjCRuntime &R = CGM.getLangOpts().ObjCRuntime;
- if ((R.getKind() == ObjCRuntime::GNUstep) &&
- (R.getVersion() >= VersionTuple(1, 6))) {
+ assert(!isRuntime(ObjCRuntime::GNUstep, 2));
+ if (isRuntime(ObjCRuntime::GNUstep, 1, 6)) {
std::string NameAndAttributes;
std::string TypeStr =
CGM.getContext().getObjCEncodingForPropertyDecl(PD, Container);
@@ -222,7 +245,7 @@ protected:
/// Push the property attributes into two structure fields.
void PushPropertyAttributes(ConstantStructBuilder &Fields,
- ObjCPropertyDecl *property, bool isSynthesized=true, bool
+ const ObjCPropertyDecl *property, bool isSynthesized=true, bool
isDynamic=true) {
int attrs = property->getPropertyAttributes();
// For read-only properties, clear the copy and retain flags
@@ -249,6 +272,46 @@ protected:
Fields.addInt(Int8Ty, 0);
}
+ virtual ConstantArrayBuilder PushPropertyListHeader(ConstantStructBuilder &Fields,
+ int count) {
+ // int count;
+ Fields.addInt(IntTy, count);
+ // int size; (only in GNUstep v2 ABI.
+ if (isRuntime(ObjCRuntime::GNUstep, 2)) {
+ llvm::DataLayout td(&TheModule);
+ Fields.addInt(IntTy, td.getTypeSizeInBits(PropertyMetadataTy) /
+ CGM.getContext().getCharWidth());
+ }
+ // struct objc_property_list *next;
+ Fields.add(NULLPtr);
+ // struct objc_property properties[]
+ return Fields.beginArray(PropertyMetadataTy);
+ }
+ virtual void PushProperty(ConstantArrayBuilder &PropertiesArray,
+ const ObjCPropertyDecl *property,
+ const Decl *OCD,
+ bool isSynthesized=true, bool
+ isDynamic=true) {
+ auto Fields = PropertiesArray.beginStruct(PropertyMetadataTy);
+ ASTContext &Context = CGM.getContext();
+ Fields.add(MakePropertyEncodingString(property, OCD));
+ PushPropertyAttributes(Fields, property, isSynthesized, isDynamic);
+ auto addPropertyMethod = [&](const ObjCMethodDecl *accessor) {
+ if (accessor) {
+ std::string TypeStr = Context.getObjCEncodingForMethodDecl(accessor);
+ llvm::Constant *TypeEncoding = MakeConstantString(TypeStr);
+ Fields.add(MakeConstantString(accessor->getSelector().getAsString()));
+ Fields.add(TypeEncoding);
+ } else {
+ Fields.add(NULLPtr);
+ Fields.add(NULLPtr);
+ }
+ };
+ addPropertyMethod(property->getGetterMethodDecl());
+ addPropertyMethod(property->getSetterMethodDecl());
+ Fields.finishAndAddTo(PropertiesArray);
+ }
+
/// Ensures that the value has the required type, by inserting a bitcast if
/// required. This function lets us avoid inserting bitcasts that are
/// redundant.
@@ -268,7 +331,8 @@ protected:
/// LLVM context.
llvm::LLVMContext &VMContext;
-private:
+protected:
+
/// Placeholder for the class. Lots of things refer to the class before we've
/// actually emitted it. We use this alias as a placeholder, and then replace
/// it with a pointer to the class structure before finally emitting the
@@ -352,6 +416,7 @@ private:
/// Function used for non-object declared property setters.
LazyRuntimeFunction SetStructPropertyFn;
+protected:
/// The version of the runtime that this class targets. Must match the
/// version in the runtime.
int RuntimeVersion;
@@ -362,14 +427,18 @@ private:
/// Objective-C 1 property structures when targeting the GCC runtime or it
/// will abort.
const int ProtocolVersion;
-
+ /// The version of the class ABI. This value is used in the class structure
+ /// and indicates how various fields should be interpreted.
+ const int ClassABIVersion;
/// Generates an instance variable list structure. This is a structure
/// containing a size and an array of structures containing instance variable
/// metadata. This is used purely for introspection in the fragile ABI. In
/// the non-fragile ABI, it's used for instance variable fixup.
- llvm::Constant *GenerateIvarList(ArrayRef<llvm::Constant *> IvarNames,
- ArrayRef<llvm::Constant *> IvarTypes,
- ArrayRef<llvm::Constant *> IvarOffsets);
+ virtual llvm::Constant *GenerateIvarList(ArrayRef<llvm::Constant *> IvarNames,
+ ArrayRef<llvm::Constant *> IvarTypes,
+ ArrayRef<llvm::Constant *> IvarOffsets,
+ ArrayRef<llvm::Constant *> IvarAlign,
+ ArrayRef<Qualifiers::ObjCLifetime> IvarOwnership);
/// Generates a method list structure. This is a structure containing a size
/// and an array of structures containing method metadata.
@@ -378,20 +447,20 @@ private:
/// pointer allowing them to be chained together in a linked list.
llvm::Constant *GenerateMethodList(StringRef ClassName,
StringRef CategoryName,
- ArrayRef<Selector> MethodSels,
- ArrayRef<llvm::Constant *> MethodTypes,
+ ArrayRef<const ObjCMethodDecl*> Methods,
bool isClassMethodList);
/// Emits an empty protocol. This is used for \@protocol() where no protocol
/// is found. The runtime will (hopefully) fix up the pointer to refer to the
/// real protocol.
- llvm::Constant *GenerateEmptyProtocol(const std::string &ProtocolName);
+ virtual llvm::Constant *GenerateEmptyProtocol(StringRef ProtocolName);
/// Generates a list of property metadata structures. This follows the same
/// pattern as method and instance variable metadata lists.
- llvm::Constant *GeneratePropertyList(const ObjCImplementationDecl *OID,
- SmallVectorImpl<Selector> &InstanceMethodSels,
- SmallVectorImpl<llvm::Constant*> &InstanceMethodTypes);
+ llvm::Constant *GeneratePropertyList(const Decl *Container,
+ const ObjCContainerDecl *OCD,
+ bool isClassProperty=false,
+ bool protocolOptionalProperties=false);
/// Generates a list of referenced protocols. Classes, categories, and
/// protocols all use this structure.
@@ -422,22 +491,42 @@ private:
/// Generates a method list. This is used by protocols to define the required
/// and optional methods.
- llvm::Constant *GenerateProtocolMethodList(
- ArrayRef<llvm::Constant *> MethodNames,
- ArrayRef<llvm::Constant *> MethodTypes);
+ virtual llvm::Constant *GenerateProtocolMethodList(
+ ArrayRef<const ObjCMethodDecl*> Methods);
+ /// Emits optional and required method lists.
+ template<class T>
+ void EmitProtocolMethodList(T &&Methods, llvm::Constant *&Required,
+ llvm::Constant *&Optional) {
+ SmallVector<const ObjCMethodDecl*, 16> RequiredMethods;
+ SmallVector<const ObjCMethodDecl*, 16> OptionalMethods;
+ for (const auto *I : Methods)
+ if (I->isOptional())
+ OptionalMethods.push_back(I);
+ else
+ RequiredMethods.push_back(I);
+ Required = GenerateProtocolMethodList(RequiredMethods);
+ Optional = GenerateProtocolMethodList(OptionalMethods);
+ }
/// Returns a selector with the specified type encoding. An empty string is
/// used to return an untyped selector (with the types field set to NULL).
- llvm::Value *GetSelector(CodeGenFunction &CGF, Selector Sel,
+ virtual llvm::Value *GetSelector(CodeGenFunction &CGF, Selector Sel,
const std::string &TypeEncoding);
+ /// Returns the name of ivar offset variables. In the GNUstep v1 ABI, this
+ /// contains the class and ivar names, in the v2 ABI this contains the type
+ /// encoding as well.
+ virtual std::string GetIVarOffsetVariableName(const ObjCInterfaceDecl *ID,
+ const ObjCIvarDecl *Ivar) {
+ const std::string Name = "__objc_ivar_offset_" + ID->getNameAsString()
+ + '.' + Ivar->getNameAsString();
+ return Name;
+ }
/// Returns the variable used to store the offset of an instance variable.
llvm::GlobalVariable *ObjCIvarOffsetVariable(const ObjCInterfaceDecl *ID,
const ObjCIvarDecl *Ivar);
/// Emits a reference to a class. This allows the linker to object if there
/// is no class of the matching name.
-
-protected:
void EmitClassRef(const std::string &className);
/// Emits a pointer to the named class
@@ -476,7 +565,7 @@ protected:
public:
CGObjCGNU(CodeGenModule &cgm, unsigned runtimeABIVersion,
- unsigned protocolClassVersion);
+ unsigned protocolClassVersion, unsigned classABI=1);
ConstantAddress GenerateConstantString(const StringLiteral *) override;
@@ -499,6 +588,14 @@ public:
Address GetAddrOfSelector(CodeGenFunction &CGF, Selector Sel) override;
llvm::Value *GetSelector(CodeGenFunction &CGF,
const ObjCMethodDecl *Method) override;
+ virtual llvm::Constant *GetConstantSelector(Selector Sel,
+ const std::string &TypeEncoding) {
+ llvm_unreachable("Runtime unable to generate constant selector");
+ }
+ llvm::Constant *GetConstantSelector(const ObjCMethodDecl *M) {
+ return GetConstantSelector(M->getSelector(),
+ CGM.getContext().getObjCEncodingForMethodDecl(M));
+ }
llvm::Constant *GetEHType(QualType T) override;
llvm::Function *GenerateMethod(const ObjCMethodDecl *OMD,
@@ -698,7 +795,10 @@ class CGObjCGNUstep : public CGObjCGNU {
}
public:
- CGObjCGNUstep(CodeGenModule &Mod) : CGObjCGNU(Mod, 9, 3) {
+ CGObjCGNUstep(CodeGenModule &Mod) : CGObjCGNUstep(Mod, 9, 3, 1) {}
+ CGObjCGNUstep(CodeGenModule &Mod, unsigned ABI, unsigned ProtocolABI,
+ unsigned ClassABI) :
+ CGObjCGNU(Mod, ABI, ProtocolABI, ClassABI) {
const ObjCRuntime &R = CGM.getLangOpts().ObjCRuntime;
llvm::StructType *SlotStructTy =
@@ -707,7 +807,7 @@ class CGObjCGNUstep : public CGObjCGNU {
// Slot_t objc_msg_lookup_sender(id *receiver, SEL selector, id sender);
SlotLookupFn.init(&CGM, "objc_msg_lookup_sender", SlotTy, PtrToIdTy,
SelectorTy, IdTy);
- // Slot_t objc_msg_lookup_super(struct objc_super*, SEL);
+ // Slot_t objc_slot_lookup_super(struct objc_super*, SEL);
SlotLookupSuperFn.init(&CGM, "objc_slot_lookup_super", SlotTy,
PtrToObjCSuperTy, SelectorTy);
// If we're in ObjC++ mode, then we want to make
@@ -784,6 +884,951 @@ class CGObjCGNUstep : public CGObjCGNU {
}
};
+/// GNUstep Objective-C ABI version 2 implementation.
+/// This is the ABI that provides a clean break with the legacy GCC ABI and
+/// cleans up a number of things that were added to work around 1980s linkers.
+class CGObjCGNUstep2 : public CGObjCGNUstep {
+ /// The section for selectors.
+ static constexpr const char *const SelSection = "__objc_selectors";
+ /// The section for classes.
+ static constexpr const char *const ClsSection = "__objc_classes";
+ /// The section for references to classes.
+ static constexpr const char *const ClsRefSection = "__objc_class_refs";
+ /// The section for categories.
+ static constexpr const char *const CatSection = "__objc_cats";
+ /// The section for protocols.
+ static constexpr const char *const ProtocolSection = "__objc_protocols";
+ /// The section for protocol references.
+ static constexpr const char *const ProtocolRefSection = "__objc_protocol_refs";
+ /// The section for class aliases
+ static constexpr const char *const ClassAliasSection = "__objc_class_aliases";
+ /// The section for constexpr constant strings
+ static constexpr const char *const ConstantStringSection = "__objc_constant_string";
+ /// The GCC ABI superclass message lookup function. Takes a pointer to a
+ /// structure describing the receiver and the class, and a selector as
+ /// arguments. Returns the IMP for the corresponding method.
+ LazyRuntimeFunction MsgLookupSuperFn;
+ /// A flag indicating if we've emitted at least one protocol.
+ /// If we haven't, then we need to emit an empty protocol, to ensure that the
+ /// __start__objc_protocols and __stop__objc_protocols sections exist.
+ bool EmittedProtocol = false;
+ /// A flag indicating if we've emitted at least one protocol reference.
+ /// If we haven't, then we need to emit an empty protocol, to ensure that the
+ /// __start__objc_protocol_refs and __stop__objc_protocol_refs sections
+ /// exist.
+ bool EmittedProtocolRef = false;
+ /// A flag indicating if we've emitted at least one class.
+ /// If we haven't, then we need to emit an empty protocol, to ensure that the
+ /// __start__objc_classes and __stop__objc_classes sections / exist.
+ bool EmittedClass = false;
+ /// Generate the name of a symbol for a reference to a class. Accesses to
+ /// classes should be indirected via this.
+ std::string SymbolForClassRef(StringRef Name, bool isWeak) {
+ if (isWeak)
+ return (StringRef("._OBJC_WEAK_REF_CLASS_") + Name).str();
+ else
+ return (StringRef("._OBJC_REF_CLASS_") + Name).str();
+ }
+ /// Generate the name of a class symbol.
+ std::string SymbolForClass(StringRef Name) {
+ return (StringRef("._OBJC_CLASS_") + Name).str();
+ }
+ void CallRuntimeFunction(CGBuilderTy &B, StringRef FunctionName,
+ ArrayRef<llvm::Value*> Args) {
+ SmallVector<llvm::Type *,8> Types;
+ for (auto *Arg : Args)
+ Types.push_back(Arg->getType());
+ llvm::FunctionType *FT = llvm::FunctionType::get(B.getVoidTy(), Types,
+ false);
+ llvm::Value *Fn = CGM.CreateRuntimeFunction(FT, FunctionName);
+ B.CreateCall(Fn, Args);
+ }
+
+ ConstantAddress GenerateConstantString(const StringLiteral *SL) override {
+
+ auto Str = SL->getString();
+ CharUnits Align = CGM.getPointerAlign();
+
+ // Look for an existing one
+ llvm::StringMap<llvm::Constant*>::iterator old = ObjCStrings.find(Str);
+ if (old != ObjCStrings.end())
+ return ConstantAddress(old->getValue(), Align);
+
+ bool isNonASCII = SL->containsNonAscii();
+
+ auto LiteralLength = SL->getLength();
+
+ if ((CGM.getTarget().getPointerWidth(0) == 64) &&
+ (LiteralLength < 9) && !isNonASCII) {
+ // Tiny strings are only used on 64-bit platforms. They store 8 7-bit
+ // ASCII characters in the high 56 bits, followed by a 4-bit length and a
+ // 3-bit tag (which is always 4).
+ uint64_t str = 0;
+ // Fill in the characters
+ for (unsigned i=0 ; i<LiteralLength ; i++)
+ str |= ((uint64_t)SL->getCodeUnit(i)) << ((64 - 4 - 3) - (i*7));
+ // Fill in the length
+ str |= LiteralLength << 3;
+ // Set the tag
+ str |= 4;
+ auto *ObjCStr = llvm::ConstantExpr::getIntToPtr(
+ llvm::ConstantInt::get(Int64Ty, str), IdTy);
+ ObjCStrings[Str] = ObjCStr;
+ return ConstantAddress(ObjCStr, Align);
+ }
+
+ StringRef StringClass = CGM.getLangOpts().ObjCConstantStringClass;
+
+ if (StringClass.empty()) StringClass = "NSConstantString";
+
+ std::string Sym = SymbolForClass(StringClass);
+
+ llvm::Constant *isa = TheModule.getNamedGlobal(Sym);
+
+ if (!isa)
+ isa = new llvm::GlobalVariable(TheModule, IdTy, /* isConstant */false,
+ llvm::GlobalValue::ExternalLinkage, nullptr, Sym);
+ else if (isa->getType() != PtrToIdTy)
+ isa = llvm::ConstantExpr::getBitCast(isa, PtrToIdTy);
+
+ // struct
+ // {
+ // Class isa;
+ // uint32_t flags;
+ // uint32_t length; // Number of codepoints
+ // uint32_t size; // Number of bytes
+ // uint32_t hash;
+ // const char *data;
+ // };
+
+ ConstantInitBuilder Builder(CGM);
+ auto Fields = Builder.beginStruct();
+ Fields.add(isa);
+ // For now, all non-ASCII strings are represented as UTF-16. As such, the
+ // number of bytes is simply double the number of UTF-16 codepoints. In
+ // ASCII strings, the number of bytes is equal to the number of non-ASCII
+ // codepoints.
+ if (isNonASCII) {
+ unsigned NumU8CodeUnits = Str.size();
+ // A UTF-16 representation of a unicode string contains at most the same
+ // number of code units as a UTF-8 representation. Allocate that much
+ // space, plus one for the final null character.
+ SmallVector<llvm::UTF16, 128> ToBuf(NumU8CodeUnits + 1);
+ const llvm::UTF8 *FromPtr = (const llvm::UTF8 *)Str.data();
+ llvm::UTF16 *ToPtr = &ToBuf[0];
+ (void)llvm::ConvertUTF8toUTF16(&FromPtr, FromPtr + NumU8CodeUnits,
+ &ToPtr, ToPtr + NumU8CodeUnits, llvm::strictConversion);
+ uint32_t StringLength = ToPtr - &ToBuf[0];
+ // Add null terminator
+ *ToPtr = 0;
+ // Flags: 2 indicates UTF-16 encoding
+ Fields.addInt(Int32Ty, 2);
+ // Number of UTF-16 codepoints
+ Fields.addInt(Int32Ty, StringLength);
+ // Number of bytes
+ Fields.addInt(Int32Ty, StringLength * 2);
+ // Hash. Not currently initialised by the compiler.
+ Fields.addInt(Int32Ty, 0);
+ // pointer to the data string.
+ auto Arr = llvm::makeArrayRef(&ToBuf[0], ToPtr+1);
+ auto *C = llvm::ConstantDataArray::get(VMContext, Arr);
+ auto *Buffer = new llvm::GlobalVariable(TheModule, C->getType(),
+ /*isConstant=*/true, llvm::GlobalValue::PrivateLinkage, C, ".str");
+ Buffer->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
+ Fields.add(Buffer);
+ } else {
+ // Flags: 0 indicates ASCII encoding
+ Fields.addInt(Int32Ty, 0);
+ // Number of UTF-16 codepoints, each ASCII byte is a UTF-16 codepoint
+ Fields.addInt(Int32Ty, Str.size());
+ // Number of bytes
+ Fields.addInt(Int32Ty, Str.size());
+ // Hash. Not currently initialised by the compiler.
+ Fields.addInt(Int32Ty, 0);
+ // Data pointer
+ Fields.add(MakeConstantString(Str));
+ }
+ std::string StringName;
+ bool isNamed = !isNonASCII;
+ if (isNamed) {
+ StringName = ".objc_str_";
+ for (int i=0,e=Str.size() ; i<e ; ++i) {
+ unsigned char c = Str[i];
+ if (isalnum(c))
+ StringName += c;
+ else if (c == ' ')
+ StringName += '_';
+ else {
+ isNamed = false;
+ break;
+ }
+ }
+ }
+ auto *ObjCStrGV =
+ Fields.finishAndCreateGlobal(
+ isNamed ? StringRef(StringName) : ".objc_string",
+ Align, false, isNamed ? llvm::GlobalValue::LinkOnceODRLinkage
+ : llvm::GlobalValue::PrivateLinkage);
+ ObjCStrGV->setSection(ConstantStringSection);
+ if (isNamed) {
+ ObjCStrGV->setComdat(TheModule.getOrInsertComdat(StringName));
+ ObjCStrGV->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ }
+ llvm::Constant *ObjCStr = llvm::ConstantExpr::getBitCast(ObjCStrGV, IdTy);
+ ObjCStrings[Str] = ObjCStr;
+ ConstantStrings.push_back(ObjCStr);
+ return ConstantAddress(ObjCStr, Align);
+ }
+
+ void PushProperty(ConstantArrayBuilder &PropertiesArray,
+ const ObjCPropertyDecl *property,
+ const Decl *OCD,
+ bool isSynthesized=true, bool
+ isDynamic=true) override {
+ // struct objc_property
+ // {
+ // const char *name;
+ // const char *attributes;
+ // const char *type;
+ // SEL getter;
+ // SEL setter;
+ // };
+ auto Fields = PropertiesArray.beginStruct(PropertyMetadataTy);
+ ASTContext &Context = CGM.getContext();
+ Fields.add(MakeConstantString(property->getNameAsString()));
+ std::string TypeStr =
+ CGM.getContext().getObjCEncodingForPropertyDecl(property, OCD);
+ Fields.add(MakeConstantString(TypeStr));
+ std::string typeStr;
+ Context.getObjCEncodingForType(property->getType(), typeStr);
+ Fields.add(MakeConstantString(typeStr));
+ auto addPropertyMethod = [&](const ObjCMethodDecl *accessor) {
+ if (accessor) {
+ std::string TypeStr = Context.getObjCEncodingForMethodDecl(accessor);
+ Fields.add(GetConstantSelector(accessor->getSelector(), TypeStr));
+ } else {
+ Fields.add(NULLPtr);
+ }
+ };
+ addPropertyMethod(property->getGetterMethodDecl());
+ addPropertyMethod(property->getSetterMethodDecl());
+ Fields.finishAndAddTo(PropertiesArray);
+ }
+
+ llvm::Constant *
+ GenerateProtocolMethodList(ArrayRef<const ObjCMethodDecl*> Methods) override {
+ // struct objc_protocol_method_description
+ // {
+ // SEL selector;
+ // const char *types;
+ // };
+ llvm::StructType *ObjCMethodDescTy =
+ llvm::StructType::get(CGM.getLLVMContext(),
+ { PtrToInt8Ty, PtrToInt8Ty });
+ ASTContext &Context = CGM.getContext();
+ ConstantInitBuilder Builder(CGM);
+ // struct objc_protocol_method_description_list
+ // {
+ // int count;
+ // int size;
+ // struct objc_protocol_method_description methods[];
+ // };
+ auto MethodList = Builder.beginStruct();
+ // int count;
+ MethodList.addInt(IntTy, Methods.size());
+ // int size; // sizeof(struct objc_method_description)
+ llvm::DataLayout td(&TheModule);
+ MethodList.addInt(IntTy, td.getTypeSizeInBits(ObjCMethodDescTy) /
+ CGM.getContext().getCharWidth());
+ // struct objc_method_description[]
+ auto MethodArray = MethodList.beginArray(ObjCMethodDescTy);
+ for (auto *M : Methods) {
+ auto Method = MethodArray.beginStruct(ObjCMethodDescTy);
+ Method.add(CGObjCGNU::GetConstantSelector(M));
+ Method.add(GetTypeString(Context.getObjCEncodingForMethodDecl(M, true)));
+ Method.finishAndAddTo(MethodArray);
+ }
+ MethodArray.finishAndAddTo(MethodList);
+ return MethodList.finishAndCreateGlobal(".objc_protocol_method_list",
+ CGM.getPointerAlign());
+ }
+
+ llvm::Value *LookupIMPSuper(CodeGenFunction &CGF, Address ObjCSuper,
+ llvm::Value *cmd, MessageSendInfo &MSI) override {
+ // Don't access the slot unless we're trying to cache the result.
+ CGBuilderTy &Builder = CGF.Builder;
+ llvm::Value *lookupArgs[] = {CGObjCGNU::EnforceType(Builder, ObjCSuper,
+ PtrToObjCSuperTy).getPointer(), cmd};
+ return CGF.EmitNounwindRuntimeCall(MsgLookupSuperFn, lookupArgs);
+ }
+
+ llvm::GlobalVariable *GetClassVar(StringRef Name, bool isWeak=false) {
+ std::string SymbolName = SymbolForClassRef(Name, isWeak);
+ auto *ClassSymbol = TheModule.getNamedGlobal(SymbolName);
+ if (ClassSymbol)
+ return ClassSymbol;
+ ClassSymbol = new llvm::GlobalVariable(TheModule,
+ IdTy, false, llvm::GlobalValue::ExternalLinkage,
+ nullptr, SymbolName);
+ // If this is a weak symbol, then we are creating a valid definition for
+ // the symbol, pointing to a weak definition of the real class pointer. If
+ // this is not a weak reference, then we are expecting another compilation
+ // unit to provide the real indirection symbol.
+ if (isWeak)
+ ClassSymbol->setInitializer(new llvm::GlobalVariable(TheModule,
+ Int8Ty, false, llvm::GlobalValue::ExternalWeakLinkage,
+ nullptr, SymbolForClass(Name)));
+ assert(ClassSymbol->getName() == SymbolName);
+ return ClassSymbol;
+ }
+ llvm::Value *GetClassNamed(CodeGenFunction &CGF,
+ const std::string &Name,
+ bool isWeak) override {
+ return CGF.Builder.CreateLoad(Address(GetClassVar(Name, isWeak),
+ CGM.getPointerAlign()));
+ }
+ int32_t FlagsForOwnership(Qualifiers::ObjCLifetime Ownership) {
+ // typedef enum {
+ // ownership_invalid = 0,
+ // ownership_strong = 1,
+ // ownership_weak = 2,
+ // ownership_unsafe = 3
+ // } ivar_ownership;
+ int Flag;
+ switch (Ownership) {
+ case Qualifiers::OCL_Strong:
+ Flag = 1;
+ break;
+ case Qualifiers::OCL_Weak:
+ Flag = 2;
+ break;
+ case Qualifiers::OCL_ExplicitNone:
+ Flag = 3;
+ break;
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_Autoreleasing:
+ assert(Ownership != Qualifiers::OCL_Autoreleasing);
+ Flag = 0;
+ }
+ return Flag;
+ }
+ llvm::Constant *GenerateIvarList(ArrayRef<llvm::Constant *> IvarNames,
+ ArrayRef<llvm::Constant *> IvarTypes,
+ ArrayRef<llvm::Constant *> IvarOffsets,
+ ArrayRef<llvm::Constant *> IvarAlign,
+ ArrayRef<Qualifiers::ObjCLifetime> IvarOwnership) override {
+ llvm_unreachable("Method should not be called!");
+ }
+
+ llvm::Constant *GenerateEmptyProtocol(StringRef ProtocolName) override {
+ std::string Name = SymbolForProtocol(ProtocolName);
+ auto *GV = TheModule.getGlobalVariable(Name);
+ if (!GV) {
+ // Emit a placeholder symbol.
+ GV = new llvm::GlobalVariable(TheModule, ProtocolTy, false,
+ llvm::GlobalValue::ExternalLinkage, nullptr, Name);
+ GV->setAlignment(CGM.getPointerAlign().getQuantity());
+ }
+ return llvm::ConstantExpr::getBitCast(GV, ProtocolPtrTy);
+ }
+
+ /// Existing protocol references.
+ llvm::StringMap<llvm::Constant*> ExistingProtocolRefs;
+
+ llvm::Value *GenerateProtocolRef(CodeGenFunction &CGF,
+ const ObjCProtocolDecl *PD) override {
+ auto Name = PD->getNameAsString();
+ auto *&Ref = ExistingProtocolRefs[Name];
+ if (!Ref) {
+ auto *&Protocol = ExistingProtocols[Name];
+ if (!Protocol)
+ Protocol = GenerateProtocolRef(PD);
+ std::string RefName = SymbolForProtocolRef(Name);
+ assert(!TheModule.getGlobalVariable(RefName));
+ // Emit a reference symbol.
+ auto GV = new llvm::GlobalVariable(TheModule, ProtocolPtrTy,
+ false, llvm::GlobalValue::ExternalLinkage,
+ llvm::ConstantExpr::getBitCast(Protocol, ProtocolPtrTy), RefName);
+ GV->setSection(ProtocolRefSection);
+ GV->setAlignment(CGM.getPointerAlign().getQuantity());
+ Ref = GV;
+ }
+ EmittedProtocolRef = true;
+ return CGF.Builder.CreateAlignedLoad(Ref, CGM.getPointerAlign());
+ }
+
+ llvm::Constant *GenerateProtocolList(ArrayRef<llvm::Constant*> Protocols) {
+ llvm::ArrayType *ProtocolArrayTy = llvm::ArrayType::get(ProtocolPtrTy,
+ Protocols.size());
+ llvm::Constant * ProtocolArray = llvm::ConstantArray::get(ProtocolArrayTy,
+ Protocols);
+ ConstantInitBuilder builder(CGM);
+ auto ProtocolBuilder = builder.beginStruct();
+ ProtocolBuilder.addNullPointer(PtrTy);
+ ProtocolBuilder.addInt(SizeTy, Protocols.size());
+ ProtocolBuilder.add(ProtocolArray);
+ return ProtocolBuilder.finishAndCreateGlobal(".objc_protocol_list",
+ CGM.getPointerAlign(), false, llvm::GlobalValue::InternalLinkage);
+ }
+
+ void GenerateProtocol(const ObjCProtocolDecl *PD) override {
+ // Do nothing - we only emit referenced protocols.
+ }
+ llvm::Constant *GenerateProtocolRef(const ObjCProtocolDecl *PD) {
+ std::string ProtocolName = PD->getNameAsString();
+ auto *&Protocol = ExistingProtocols[ProtocolName];
+ if (Protocol)
+ return Protocol;
+
+ EmittedProtocol = true;
+
+ // Use the protocol definition, if there is one.
+ if (const ObjCProtocolDecl *Def = PD->getDefinition())
+ PD = Def;
+
+ SmallVector<llvm::Constant*, 16> Protocols;
+ for (const auto *PI : PD->protocols())
+ Protocols.push_back(
+ llvm::ConstantExpr::getBitCast(GenerateProtocolRef(PI),
+ ProtocolPtrTy));
+ llvm::Constant *ProtocolList = GenerateProtocolList(Protocols);
+
+ // Collect information about methods
+ llvm::Constant *InstanceMethodList, *OptionalInstanceMethodList;
+ llvm::Constant *ClassMethodList, *OptionalClassMethodList;
+ EmitProtocolMethodList(PD->instance_methods(), InstanceMethodList,
+ OptionalInstanceMethodList);
+ EmitProtocolMethodList(PD->class_methods(), ClassMethodList,
+ OptionalClassMethodList);
+
+ auto SymName = SymbolForProtocol(ProtocolName);
+ auto *OldGV = TheModule.getGlobalVariable(SymName);
+ // The isa pointer must be set to a magic number so the runtime knows it's
+ // the correct layout.
+ ConstantInitBuilder builder(CGM);
+ auto ProtocolBuilder = builder.beginStruct();
+ ProtocolBuilder.add(llvm::ConstantExpr::getIntToPtr(
+ llvm::ConstantInt::get(Int32Ty, ProtocolVersion), IdTy));
+ ProtocolBuilder.add(MakeConstantString(ProtocolName));
+ ProtocolBuilder.add(ProtocolList);
+ ProtocolBuilder.add(InstanceMethodList);
+ ProtocolBuilder.add(ClassMethodList);
+ ProtocolBuilder.add(OptionalInstanceMethodList);
+ ProtocolBuilder.add(OptionalClassMethodList);
+ // Required instance properties
+ ProtocolBuilder.add(GeneratePropertyList(nullptr, PD, false, false));
+ // Optional instance properties
+ ProtocolBuilder.add(GeneratePropertyList(nullptr, PD, false, true));
+ // Required class properties
+ ProtocolBuilder.add(GeneratePropertyList(nullptr, PD, true, false));
+ // Optional class properties
+ ProtocolBuilder.add(GeneratePropertyList(nullptr, PD, true, true));
+
+ auto *GV = ProtocolBuilder.finishAndCreateGlobal(SymName,
+ CGM.getPointerAlign(), false, llvm::GlobalValue::ExternalLinkage);
+ GV->setSection(ProtocolSection);
+ GV->setComdat(TheModule.getOrInsertComdat(SymName));
+ if (OldGV) {
+ OldGV->replaceAllUsesWith(llvm::ConstantExpr::getBitCast(GV,
+ OldGV->getType()));
+ OldGV->removeFromParent();
+ GV->setName(SymName);
+ }
+ Protocol = GV;
+ return GV;
+ }
+ llvm::Constant *EnforceType(llvm::Constant *Val, llvm::Type *Ty) {
+ if (Val->getType() == Ty)
+ return Val;
+ return llvm::ConstantExpr::getBitCast(Val, Ty);
+ }
+ llvm::Value *GetSelector(CodeGenFunction &CGF, Selector Sel,
+ const std::string &TypeEncoding) override {
+ return GetConstantSelector(Sel, TypeEncoding);
+ }
+ llvm::Constant *GetTypeString(llvm::StringRef TypeEncoding) {
+ if (TypeEncoding.empty())
+ return NULLPtr;
+ std::string MangledTypes = TypeEncoding;
+ std::replace(MangledTypes.begin(), MangledTypes.end(),
+ '@', '\1');
+ std::string TypesVarName = ".objc_sel_types_" + MangledTypes;
+ auto *TypesGlobal = TheModule.getGlobalVariable(TypesVarName);
+ if (!TypesGlobal) {
+ llvm::Constant *Init = llvm::ConstantDataArray::getString(VMContext,
+ TypeEncoding);
+ auto *GV = new llvm::GlobalVariable(TheModule, Init->getType(),
+ true, llvm::GlobalValue::LinkOnceODRLinkage, Init, TypesVarName);
+ GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ TypesGlobal = GV;
+ }
+ return llvm::ConstantExpr::getGetElementPtr(TypesGlobal->getValueType(),
+ TypesGlobal, Zeros);
+ }
+ llvm::Constant *GetConstantSelector(Selector Sel,
+ const std::string &TypeEncoding) override {
+ // @ is used as a special character in symbol names (used for symbol
+ // versioning), so mangle the name to not include it. Replace it with a
+ // character that is not a valid type encoding character (and, being
+ // non-printable, never will be!)
+ std::string MangledTypes = TypeEncoding;
+ std::replace(MangledTypes.begin(), MangledTypes.end(),
+ '@', '\1');
+ auto SelVarName = (StringRef(".objc_selector_") + Sel.getAsString() + "_" +
+ MangledTypes).str();
+ if (auto *GV = TheModule.getNamedGlobal(SelVarName))
+ return EnforceType(GV, SelectorTy);
+ ConstantInitBuilder builder(CGM);
+ auto SelBuilder = builder.beginStruct();
+ SelBuilder.add(ExportUniqueString(Sel.getAsString(), ".objc_sel_name_",
+ true));
+ SelBuilder.add(GetTypeString(TypeEncoding));
+ auto *GV = SelBuilder.finishAndCreateGlobal(SelVarName,
+ CGM.getPointerAlign(), false, llvm::GlobalValue::LinkOnceODRLinkage);
+ GV->setComdat(TheModule.getOrInsertComdat(SelVarName));
+ GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ GV->setSection(SelSection);
+ auto *SelVal = EnforceType(GV, SelectorTy);
+ return SelVal;
+ }
+ std::pair<llvm::Constant*,llvm::Constant*>
+ GetSectionBounds(StringRef Section) {
+ auto *Start = new llvm::GlobalVariable(TheModule, PtrTy,
+ /*isConstant*/false,
+ llvm::GlobalValue::ExternalLinkage, nullptr, StringRef("__start_") +
+ Section);
+ Start->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ auto *Stop = new llvm::GlobalVariable(TheModule, PtrTy,
+ /*isConstant*/false,
+ llvm::GlobalValue::ExternalLinkage, nullptr, StringRef("__stop_") +
+ Section);
+ Stop->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ return { Start, Stop };
+ }
+ llvm::Function *ModuleInitFunction() override {
+ llvm::Function *LoadFunction = llvm::Function::Create(
+ llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), false),
+ llvm::GlobalValue::LinkOnceODRLinkage, ".objcv2_load_function",
+ &TheModule);
+ LoadFunction->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ LoadFunction->setComdat(TheModule.getOrInsertComdat(".objcv2_load_function"));
+
+ llvm::BasicBlock *EntryBB =
+ llvm::BasicBlock::Create(VMContext, "entry", LoadFunction);
+ CGBuilderTy B(CGM, VMContext);
+ B.SetInsertPoint(EntryBB);
+ ConstantInitBuilder builder(CGM);
+ auto InitStructBuilder = builder.beginStruct();
+ InitStructBuilder.addInt(Int64Ty, 0);
+ auto addSection = [&](const char *section) {
+ auto bounds = GetSectionBounds(section);
+ InitStructBuilder.add(bounds.first);
+ InitStructBuilder.add(bounds.second);
+ };
+ addSection(SelSection);
+ addSection(ClsSection);
+ addSection(ClsRefSection);
+ addSection(CatSection);
+ addSection(ProtocolSection);
+ addSection(ProtocolRefSection);
+ addSection(ClassAliasSection);
+ addSection(ConstantStringSection);
+ auto *InitStruct = InitStructBuilder.finishAndCreateGlobal(".objc_init",
+ CGM.getPointerAlign(), false, llvm::GlobalValue::LinkOnceODRLinkage);
+ InitStruct->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ InitStruct->setComdat(TheModule.getOrInsertComdat(".objc_init"));
+
+ CallRuntimeFunction(B, "__objc_load", {InitStruct});;
+ B.CreateRetVoid();
+ // Make sure that the optimisers don't delete this function.
+ CGM.addCompilerUsedGlobal(LoadFunction);
+ // FIXME: Currently ELF only!
+ // We have to do this by hand, rather than with @llvm.ctors, so that the
+ // linker can remove the duplicate invocations.
+ auto *InitVar = new llvm::GlobalVariable(TheModule, LoadFunction->getType(),
+ /*isConstant*/true, llvm::GlobalValue::LinkOnceAnyLinkage,
+ LoadFunction, ".objc_ctor");
+ // Check that this hasn't been renamed. This shouldn't happen, because
+ // this function should be called precisely once.
+ assert(InitVar->getName() == ".objc_ctor");
+ InitVar->setSection(".ctors");
+ InitVar->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ InitVar->setComdat(TheModule.getOrInsertComdat(".objc_ctor"));
+ CGM.addCompilerUsedGlobal(InitVar);
+ for (auto *C : Categories) {
+ auto *Cat = cast<llvm::GlobalVariable>(C->stripPointerCasts());
+ Cat->setSection(CatSection);
+ CGM.addUsedGlobal(Cat);
+ }
+ // Add a null value fore each special section so that we can always
+ // guarantee that the _start and _stop symbols will exist and be
+ // meaningful.
+ auto createNullGlobal = [&](StringRef Name, ArrayRef<llvm::Constant*> Init,
+ StringRef Section) {
+ auto nullBuilder = builder.beginStruct();
+ for (auto *F : Init)
+ nullBuilder.add(F);
+ auto GV = nullBuilder.finishAndCreateGlobal(Name, CGM.getPointerAlign(),
+ false, llvm::GlobalValue::LinkOnceODRLinkage);
+ GV->setSection(Section);
+ GV->setComdat(TheModule.getOrInsertComdat(Name));
+ GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ CGM.addUsedGlobal(GV);
+ return GV;
+ };
+ createNullGlobal(".objc_null_selector", {NULLPtr, NULLPtr}, SelSection);
+ if (Categories.empty())
+ createNullGlobal(".objc_null_category", {NULLPtr, NULLPtr,
+ NULLPtr, NULLPtr, NULLPtr, NULLPtr, NULLPtr}, CatSection);
+ if (!EmittedClass) {
+ createNullGlobal(".objc_null_cls_init_ref", NULLPtr, ClsSection);
+ createNullGlobal(".objc_null_class_ref", { NULLPtr, NULLPtr },
+ ClsRefSection);
+ }
+ if (!EmittedProtocol)
+ createNullGlobal(".objc_null_protocol", {NULLPtr, NULLPtr, NULLPtr,
+ NULLPtr, NULLPtr, NULLPtr, NULLPtr, NULLPtr, NULLPtr, NULLPtr,
+ NULLPtr}, ProtocolSection);
+ if (!EmittedProtocolRef)
+ createNullGlobal(".objc_null_protocol_ref", {NULLPtr}, ProtocolRefSection);
+ if (!ClassAliases.empty())
+ for (auto clsAlias : ClassAliases)
+ createNullGlobal(std::string(".objc_class_alias") +
+ clsAlias.second, { MakeConstantString(clsAlias.second),
+ GetClassVar(clsAlias.first) }, ClassAliasSection);
+ else
+ createNullGlobal(".objc_null_class_alias", { NULLPtr, NULLPtr },
+ ClassAliasSection);
+ if (ConstantStrings.empty()) {
+ auto i32Zero = llvm::ConstantInt::get(Int32Ty, 0);
+ createNullGlobal(".objc_null_constant_string", { NULLPtr, i32Zero,
+ i32Zero, i32Zero, i32Zero, NULLPtr }, ConstantStringSection);
+ }
+ ConstantStrings.clear();
+ Categories.clear();
+ Classes.clear();
+ return nullptr;//CGObjCGNU::ModuleInitFunction();
+ }
+ /// In the v2 ABI, ivar offset variables use the type encoding in their name
+ /// to trigger linker failures if the types don't match.
+ std::string GetIVarOffsetVariableName(const ObjCInterfaceDecl *ID,
+ const ObjCIvarDecl *Ivar) override {
+ std::string TypeEncoding;
+ CGM.getContext().getObjCEncodingForType(Ivar->getType(), TypeEncoding);
+ // Prevent the @ from being interpreted as a symbol version.
+ std::replace(TypeEncoding.begin(), TypeEncoding.end(),
+ '@', '\1');
+ const std::string Name = "__objc_ivar_offset_" + ID->getNameAsString()
+ + '.' + Ivar->getNameAsString() + '.' + TypeEncoding;
+ return Name;
+ }
+ llvm::Value *EmitIvarOffset(CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *Interface,
+ const ObjCIvarDecl *Ivar) override {
+ const std::string Name = GetIVarOffsetVariableName(Ivar->getContainingInterface(), Ivar);
+ llvm::GlobalVariable *IvarOffsetPointer = TheModule.getNamedGlobal(Name);
+ if (!IvarOffsetPointer)
+ IvarOffsetPointer = new llvm::GlobalVariable(TheModule, IntTy, false,
+ llvm::GlobalValue::ExternalLinkage, nullptr, Name);
+ CharUnits Align = CGM.getIntAlign();
+ llvm::Value *Offset = CGF.Builder.CreateAlignedLoad(IvarOffsetPointer, Align);
+ if (Offset->getType() != PtrDiffTy)
+ Offset = CGF.Builder.CreateZExtOrBitCast(Offset, PtrDiffTy);
+ return Offset;
+ }
+ void GenerateClass(const ObjCImplementationDecl *OID) override {
+ ASTContext &Context = CGM.getContext();
+
+ // Get the class name
+ ObjCInterfaceDecl *classDecl =
+ const_cast<ObjCInterfaceDecl *>(OID->getClassInterface());
+ std::string className = classDecl->getNameAsString();
+ auto *classNameConstant = MakeConstantString(className);
+
+ ConstantInitBuilder builder(CGM);
+ auto metaclassFields = builder.beginStruct();
+ // struct objc_class *isa;
+ metaclassFields.addNullPointer(PtrTy);
+ // struct objc_class *super_class;
+ metaclassFields.addNullPointer(PtrTy);
+ // const char *name;
+ metaclassFields.add(classNameConstant);
+ // long version;
+ metaclassFields.addInt(LongTy, 0);
+ // unsigned long info;
+ // objc_class_flag_meta
+ metaclassFields.addInt(LongTy, 1);
+ // long instance_size;
+ // Setting this to zero is consistent with the older ABI, but it might be
+ // more sensible to set this to sizeof(struct objc_class)
+ metaclassFields.addInt(LongTy, 0);
+ // struct objc_ivar_list *ivars;
+ metaclassFields.addNullPointer(PtrTy);
+ // struct objc_method_list *methods
+ // FIXME: Almost identical code is copied and pasted below for the
+ // class, but refactoring it cleanly requires C++14 generic lambdas.
+ if (OID->classmeth_begin() == OID->classmeth_end())
+ metaclassFields.addNullPointer(PtrTy);
+ else {
+ SmallVector<ObjCMethodDecl*, 16> ClassMethods;
+ ClassMethods.insert(ClassMethods.begin(), OID->classmeth_begin(),
+ OID->classmeth_end());
+ metaclassFields.addBitCast(
+ GenerateMethodList(className, "", ClassMethods, true),
+ PtrTy);
+ }
+ // void *dtable;
+ metaclassFields.addNullPointer(PtrTy);
+ // IMP cxx_construct;
+ metaclassFields.addNullPointer(PtrTy);
+ // IMP cxx_destruct;
+ metaclassFields.addNullPointer(PtrTy);
+ // struct objc_class *subclass_list
+ metaclassFields.addNullPointer(PtrTy);
+ // struct objc_class *sibling_class
+ metaclassFields.addNullPointer(PtrTy);
+ // struct objc_protocol_list *protocols;
+ metaclassFields.addNullPointer(PtrTy);
+ // struct reference_list *extra_data;
+ metaclassFields.addNullPointer(PtrTy);
+ // long abi_version;
+ metaclassFields.addInt(LongTy, 0);
+ // struct objc_property_list *properties
+ metaclassFields.add(GeneratePropertyList(OID, classDecl, /*isClassProperty*/true));
+
+ auto *metaclass = metaclassFields.finishAndCreateGlobal("._OBJC_METACLASS_"
+ + className, CGM.getPointerAlign());
+
+ auto classFields = builder.beginStruct();
+ // struct objc_class *isa;
+ classFields.add(metaclass);
+ // struct objc_class *super_class;
+ // Get the superclass name.
+ const ObjCInterfaceDecl * SuperClassDecl =
+ OID->getClassInterface()->getSuperClass();
+ if (SuperClassDecl) {
+ auto SuperClassName = SymbolForClass(SuperClassDecl->getNameAsString());
+ llvm::Constant *SuperClass = TheModule.getNamedGlobal(SuperClassName);
+ if (!SuperClass)
+ {
+ SuperClass = new llvm::GlobalVariable(TheModule, PtrTy, false,
+ llvm::GlobalValue::ExternalLinkage, nullptr, SuperClassName);
+ }
+ classFields.add(llvm::ConstantExpr::getBitCast(SuperClass, PtrTy));
+ } else
+ classFields.addNullPointer(PtrTy);
+ // const char *name;
+ classFields.add(classNameConstant);
+ // long version;
+ classFields.addInt(LongTy, 0);
+ // unsigned long info;
+ // !objc_class_flag_meta
+ classFields.addInt(LongTy, 0);
+ // long instance_size;
+ int superInstanceSize = !SuperClassDecl ? 0 :
+ Context.getASTObjCInterfaceLayout(SuperClassDecl).getSize().getQuantity();
+ // Instance size is negative for classes that have not yet had their ivar
+ // layout calculated.
+ classFields.addInt(LongTy,
+ 0 - (Context.getASTObjCImplementationLayout(OID).getSize().getQuantity() -
+ superInstanceSize));
+
+ if (classDecl->all_declared_ivar_begin() == nullptr)
+ classFields.addNullPointer(PtrTy);
+ else {
+ int ivar_count = 0;
+ for (const ObjCIvarDecl *IVD = classDecl->all_declared_ivar_begin(); IVD;
+ IVD = IVD->getNextIvar()) ivar_count++;
+ llvm::DataLayout td(&TheModule);
+ // struct objc_ivar_list *ivars;
+ ConstantInitBuilder b(CGM);
+ auto ivarListBuilder = b.beginStruct();
+ // int count;
+ ivarListBuilder.addInt(IntTy, ivar_count);
+ // size_t size;
+ llvm::StructType *ObjCIvarTy = llvm::StructType::get(
+ PtrToInt8Ty,
+ PtrToInt8Ty,
+ PtrToInt8Ty,
+ Int32Ty,
+ Int32Ty);
+ ivarListBuilder.addInt(SizeTy, td.getTypeSizeInBits(ObjCIvarTy) /
+ CGM.getContext().getCharWidth());
+ // struct objc_ivar ivars[]
+ auto ivarArrayBuilder = ivarListBuilder.beginArray();
+ CodeGenTypes &Types = CGM.getTypes();
+ for (const ObjCIvarDecl *IVD = classDecl->all_declared_ivar_begin(); IVD;
+ IVD = IVD->getNextIvar()) {
+ auto ivarTy = IVD->getType();
+ auto ivarBuilder = ivarArrayBuilder.beginStruct();
+ // const char *name;
+ ivarBuilder.add(MakeConstantString(IVD->getNameAsString()));
+ // const char *type;
+ std::string TypeStr;
+ //Context.getObjCEncodingForType(ivarTy, TypeStr, IVD, true);
+ Context.getObjCEncodingForMethodParameter(Decl::OBJC_TQ_None, ivarTy, TypeStr, true);
+ ivarBuilder.add(MakeConstantString(TypeStr));
+ // int *offset;
+ uint64_t BaseOffset = ComputeIvarBaseOffset(CGM, OID, IVD);
+ uint64_t Offset = BaseOffset - superInstanceSize;
+ llvm::Constant *OffsetValue = llvm::ConstantInt::get(IntTy, Offset);
+ std::string OffsetName = GetIVarOffsetVariableName(classDecl, IVD);
+ llvm::GlobalVariable *OffsetVar = TheModule.getGlobalVariable(OffsetName);
+ if (OffsetVar)
+ OffsetVar->setInitializer(OffsetValue);
+ else
+ OffsetVar = new llvm::GlobalVariable(TheModule, IntTy,
+ false, llvm::GlobalValue::ExternalLinkage,
+ OffsetValue, OffsetName);
+ auto ivarVisibility =
+ (IVD->getAccessControl() == ObjCIvarDecl::Private ||
+ IVD->getAccessControl() == ObjCIvarDecl::Package ||
+ classDecl->getVisibility() == HiddenVisibility) ?
+ llvm::GlobalValue::HiddenVisibility :
+ llvm::GlobalValue::DefaultVisibility;
+ OffsetVar->setVisibility(ivarVisibility);
+ ivarBuilder.add(OffsetVar);
+ // Ivar size
+ ivarBuilder.addInt(Int32Ty,
+ td.getTypeSizeInBits(Types.ConvertType(ivarTy)) /
+ CGM.getContext().getCharWidth());
+ // Alignment will be stored as a base-2 log of the alignment.
+ int align = llvm::Log2_32(Context.getTypeAlignInChars(ivarTy).getQuantity());
+ // Objects that require more than 2^64-byte alignment should be impossible!
+ assert(align < 64);
+ // uint32_t flags;
+ // Bits 0-1 are ownership.
+ // Bit 2 indicates an extended type encoding
+ // Bits 3-8 contain log2(aligment)
+ ivarBuilder.addInt(Int32Ty,
+ (align << 3) | (1<<2) |
+ FlagsForOwnership(ivarTy.getQualifiers().getObjCLifetime()));
+ ivarBuilder.finishAndAddTo(ivarArrayBuilder);
+ }
+ ivarArrayBuilder.finishAndAddTo(ivarListBuilder);
+ auto ivarList = ivarListBuilder.finishAndCreateGlobal(".objc_ivar_list",
+ CGM.getPointerAlign(), /*constant*/ false,
+ llvm::GlobalValue::PrivateLinkage);
+ classFields.add(ivarList);
+ }
+ // struct objc_method_list *methods
+ SmallVector<const ObjCMethodDecl*, 16> InstanceMethods;
+ InstanceMethods.insert(InstanceMethods.begin(), OID->instmeth_begin(),
+ OID->instmeth_end());
+ for (auto *propImpl : OID->property_impls())
+ if (propImpl->getPropertyImplementation() ==
+ ObjCPropertyImplDecl::Synthesize) {
+ ObjCPropertyDecl *prop = propImpl->getPropertyDecl();
+ auto addIfExists = [&](const ObjCMethodDecl* OMD) {
+ if (OMD)
+ InstanceMethods.push_back(OMD);
+ };
+ addIfExists(prop->getGetterMethodDecl());
+ addIfExists(prop->getSetterMethodDecl());
+ }
+
+ if (InstanceMethods.size() == 0)
+ classFields.addNullPointer(PtrTy);
+ else
+ classFields.addBitCast(
+ GenerateMethodList(className, "", InstanceMethods, false),
+ PtrTy);
+ // void *dtable;
+ classFields.addNullPointer(PtrTy);
+ // IMP cxx_construct;
+ classFields.addNullPointer(PtrTy);
+ // IMP cxx_destruct;
+ classFields.addNullPointer(PtrTy);
+ // struct objc_class *subclass_list
+ classFields.addNullPointer(PtrTy);
+ // struct objc_class *sibling_class
+ classFields.addNullPointer(PtrTy);
+ // struct objc_protocol_list *protocols;
+ SmallVector<llvm::Constant*, 16> Protocols;
+ for (const auto *I : classDecl->protocols())
+ Protocols.push_back(
+ llvm::ConstantExpr::getBitCast(GenerateProtocolRef(I),
+ ProtocolPtrTy));
+ if (Protocols.empty())
+ classFields.addNullPointer(PtrTy);
+ else
+ classFields.add(GenerateProtocolList(Protocols));
+ // struct reference_list *extra_data;
+ classFields.addNullPointer(PtrTy);
+ // long abi_version;
+ classFields.addInt(LongTy, 0);
+ // struct objc_property_list *properties
+ classFields.add(GeneratePropertyList(OID, classDecl));
+
+ auto *classStruct =
+ classFields.finishAndCreateGlobal(SymbolForClass(className),
+ CGM.getPointerAlign(), false, llvm::GlobalValue::ExternalLinkage);
+
+ if (CGM.getTriple().isOSBinFormatCOFF()) {
+ auto Storage = llvm::GlobalValue::DefaultStorageClass;
+ if (OID->getClassInterface()->hasAttr<DLLImportAttr>())
+ Storage = llvm::GlobalValue::DLLImportStorageClass;
+ else if (OID->getClassInterface()->hasAttr<DLLExportAttr>())
+ Storage = llvm::GlobalValue::DLLExportStorageClass;
+ cast<llvm::GlobalValue>(classStruct)->setDLLStorageClass(Storage);
+ }
+
+ auto *classRefSymbol = GetClassVar(className);
+ classRefSymbol->setSection(ClsRefSection);
+ classRefSymbol->setInitializer(llvm::ConstantExpr::getBitCast(classStruct, IdTy));
+
+
+ // Resolve the class aliases, if they exist.
+ // FIXME: Class pointer aliases shouldn't exist!
+ if (ClassPtrAlias) {
+ ClassPtrAlias->replaceAllUsesWith(
+ llvm::ConstantExpr::getBitCast(classStruct, IdTy));
+ ClassPtrAlias->eraseFromParent();
+ ClassPtrAlias = nullptr;
+ }
+ if (auto Placeholder =
+ TheModule.getNamedGlobal(SymbolForClass(className)))
+ if (Placeholder != classStruct) {
+ Placeholder->replaceAllUsesWith(
+ llvm::ConstantExpr::getBitCast(classStruct, Placeholder->getType()));
+ Placeholder->eraseFromParent();
+ classStruct->setName(SymbolForClass(className));
+ }
+ if (MetaClassPtrAlias) {
+ MetaClassPtrAlias->replaceAllUsesWith(
+ llvm::ConstantExpr::getBitCast(metaclass, IdTy));
+ MetaClassPtrAlias->eraseFromParent();
+ MetaClassPtrAlias = nullptr;
+ }
+ assert(classStruct->getName() == SymbolForClass(className));
+
+ auto classInitRef = new llvm::GlobalVariable(TheModule,
+ classStruct->getType(), false, llvm::GlobalValue::ExternalLinkage,
+ classStruct, "._OBJC_INIT_CLASS_" + className);
+ classInitRef->setSection(ClsSection);
+ CGM.addUsedGlobal(classInitRef);
+
+ EmittedClass = true;
+ }
+ public:
+ CGObjCGNUstep2(CodeGenModule &Mod) : CGObjCGNUstep(Mod, 10, 4, 2) {
+ MsgLookupSuperFn.init(&CGM, "objc_msg_lookup_super", IMPTy,
+ PtrToObjCSuperTy, SelectorTy);
+ // struct objc_property
+ // {
+ // const char *name;
+ // const char *attributes;
+ // const char *type;
+ // SEL getter;
+ // SEL setter;
+ // }
+ PropertyMetadataTy =
+ llvm::StructType::get(CGM.getLLVMContext(),
+ { PtrToInt8Ty, PtrToInt8Ty, PtrToInt8Ty, PtrToInt8Ty, PtrToInt8Ty });
+ }
+
+};
+
/// Support for the ObjFW runtime.
class CGObjCObjFW: public CGObjCGNU {
protected:
@@ -878,22 +1923,12 @@ void CGObjCGNU::EmitClassRef(const std::string &className) {
llvm::GlobalValue::WeakAnyLinkage, ClassSymbol, symbolRef);
}
-static std::string SymbolNameForMethod( StringRef ClassName,
- StringRef CategoryName, const Selector MethodName,
- bool isClassMethod) {
- std::string MethodNameColonStripped = MethodName.getAsString();
- std::replace(MethodNameColonStripped.begin(), MethodNameColonStripped.end(),
- ':', '_');
- return (Twine(isClassMethod ? "_c_" : "_i_") + ClassName + "_" +
- CategoryName + "_" + MethodNameColonStripped).str();
-}
-
CGObjCGNU::CGObjCGNU(CodeGenModule &cgm, unsigned runtimeABIVersion,
- unsigned protocolClassVersion)
+ unsigned protocolClassVersion, unsigned classABI)
: CGObjCRuntime(cgm), TheModule(CGM.getModule()),
VMContext(cgm.getLLVMContext()), ClassPtrAlias(nullptr),
MetaClassPtrAlias(nullptr), RuntimeVersion(runtimeABIVersion),
- ProtocolVersion(protocolClassVersion) {
+ ProtocolVersion(protocolClassVersion), ClassABIVersion(classABI) {
msgSendMDKind = VMContext.getMDKindID("GNUObjCMessageSend");
@@ -911,6 +1946,8 @@ CGObjCGNU::CGObjCGNU(CodeGenModule &cgm, unsigned runtimeABIVersion,
Int8Ty = llvm::Type::getInt8Ty(VMContext);
// C string type. Used in lots of places.
PtrToInt8Ty = llvm::PointerType::getUnqual(Int8Ty);
+ ProtocolPtrTy = llvm::PointerType::getUnqual(
+ Types.ConvertType(CGM.getContext().getObjCProtoType()));
Zeros[0] = llvm::ConstantInt::get(LongTy, 0);
Zeros[1] = Zeros[0];
@@ -942,6 +1979,31 @@ CGObjCGNU::CGObjCGNU(CodeGenModule &cgm, unsigned runtimeABIVersion,
IdTy = PtrToInt8Ty;
}
PtrToIdTy = llvm::PointerType::getUnqual(IdTy);
+ ProtocolTy = llvm::StructType::get(IdTy,
+ PtrToInt8Ty, // name
+ PtrToInt8Ty, // protocols
+ PtrToInt8Ty, // instance methods
+ PtrToInt8Ty, // class methods
+ PtrToInt8Ty, // optional instance methods
+ PtrToInt8Ty, // optional class methods
+ PtrToInt8Ty, // properties
+ PtrToInt8Ty);// optional properties
+
+ // struct objc_property_gsv1
+ // {
+ // const char *name;
+ // char attributes;
+ // char attributes2;
+ // char unused1;
+ // char unused2;
+ // const char *getter_name;
+ // const char *getter_types;
+ // const char *setter_name;
+ // const char *setter_types;
+ // }
+ PropertyMetadataTy = llvm::StructType::get(CGM.getLLVMContext(), {
+ PtrToInt8Ty, Int8Ty, Int8Ty, Int8Ty, Int8Ty, PtrToInt8Ty, PtrToInt8Ty,
+ PtrToInt8Ty, PtrToInt8Ty });
ObjCSuperTy = llvm::StructType::get(IdTy, IdTy);
PtrToObjCSuperTy = llvm::PointerType::getUnqual(ObjCSuperTy);
@@ -1035,16 +2097,8 @@ llvm::Value *CGObjCGNU::GetClass(CodeGenFunction &CGF,
const ObjCInterfaceDecl *OID) {
auto *Value =
GetClassNamed(CGF, OID->getNameAsString(), OID->isWeakImported());
- if (CGM.getTriple().isOSBinFormatCOFF()) {
- if (auto *ClassSymbol = dyn_cast<llvm::GlobalVariable>(Value)) {
- auto DLLStorage = llvm::GlobalValue::DefaultStorageClass;
- if (OID->hasAttr<DLLExportAttr>())
- DLLStorage = llvm::GlobalValue::DLLExportStorageClass;
- else if (OID->hasAttr<DLLImportAttr>())
- DLLStorage = llvm::GlobalValue::DLLImportStorageClass;
- ClassSymbol->setDLLStorageClass(DLLStorage);
- }
- }
+ if (auto *ClassSymbol = dyn_cast<llvm::GlobalVariable>(Value))
+ CGM.setGVProperties(ClassSymbol, OID);
return Value;
}
@@ -1061,13 +2115,7 @@ llvm::Value *CGObjCGNU::EmitNSAutoreleasePoolClassRef(CodeGenFunction &CGF) {
if ((VD = dyn_cast<VarDecl>(Result)))
break;
- auto DLLStorage = llvm::GlobalValue::DefaultStorageClass;
- if (!VD || VD->hasAttr<DLLImportAttr>())
- DLLStorage = llvm::GlobalValue::DLLImportStorageClass;
- else if (VD->hasAttr<DLLExportAttr>())
- DLLStorage = llvm::GlobalValue::DLLExportStorageClass;
-
- ClassSymbol->setDLLStorageClass(DLLStorage);
+ CGM.setGVProperties(ClassSymbol, VD);
}
}
return Value;
@@ -1217,7 +2265,7 @@ ConstantAddress CGObjCGNU::GenerateConstantString(const StringLiteral *SL) {
StringRef StringClass = CGM.getLangOpts().ObjCConstantStringClass;
- if (StringClass.empty()) StringClass = "NXConstantString";
+ if (StringClass.empty()) StringClass = "NSConstantString";
std::string Sym = "_OBJC_CLASS_";
Sym += StringClass;
@@ -1278,54 +2326,67 @@ CGObjCGNU::GenerateMessageSendSuper(CodeGenFunction &CGF,
MessageSendInfo MSI = getMessageSendInfo(Method, ResultType, ActualArgs);
llvm::Value *ReceiverClass = nullptr;
- if (isCategoryImpl) {
- llvm::Constant *classLookupFunction = nullptr;
+ bool isV2ABI = isRuntime(ObjCRuntime::GNUstep, 2);
+ if (isV2ABI) {
+ ReceiverClass = GetClassNamed(CGF,
+ Class->getSuperClass()->getNameAsString(), /*isWeak*/false);
if (IsClassMessage) {
- classLookupFunction = CGM.CreateRuntimeFunction(llvm::FunctionType::get(
- IdTy, PtrTy, true), "objc_get_meta_class");
- } else {
- classLookupFunction = CGM.CreateRuntimeFunction(llvm::FunctionType::get(
- IdTy, PtrTy, true), "objc_get_class");
+ // Load the isa pointer of the superclass is this is a class method.
+ ReceiverClass = Builder.CreateBitCast(ReceiverClass,
+ llvm::PointerType::getUnqual(IdTy));
+ ReceiverClass =
+ Builder.CreateAlignedLoad(ReceiverClass, CGF.getPointerAlign());
}
- ReceiverClass = Builder.CreateCall(classLookupFunction,
- MakeConstantString(Class->getNameAsString()));
+ ReceiverClass = EnforceType(Builder, ReceiverClass, IdTy);
} else {
- // Set up global aliases for the metaclass or class pointer if they do not
- // already exist. These will are forward-references which will be set to
- // pointers to the class and metaclass structure created for the runtime
- // load function. To send a message to super, we look up the value of the
- // super_class pointer from either the class or metaclass structure.
- if (IsClassMessage) {
- if (!MetaClassPtrAlias) {
- MetaClassPtrAlias = llvm::GlobalAlias::create(
- IdTy->getElementType(), 0, llvm::GlobalValue::InternalLinkage,
- ".objc_metaclass_ref" + Class->getNameAsString(), &TheModule);
+ if (isCategoryImpl) {
+ llvm::Constant *classLookupFunction = nullptr;
+ if (IsClassMessage) {
+ classLookupFunction = CGM.CreateRuntimeFunction(llvm::FunctionType::get(
+ IdTy, PtrTy, true), "objc_get_meta_class");
+ } else {
+ classLookupFunction = CGM.CreateRuntimeFunction(llvm::FunctionType::get(
+ IdTy, PtrTy, true), "objc_get_class");
}
- ReceiverClass = MetaClassPtrAlias;
+ ReceiverClass = Builder.CreateCall(classLookupFunction,
+ MakeConstantString(Class->getNameAsString()));
} else {
- if (!ClassPtrAlias) {
- ClassPtrAlias = llvm::GlobalAlias::create(
- IdTy->getElementType(), 0, llvm::GlobalValue::InternalLinkage,
- ".objc_class_ref" + Class->getNameAsString(), &TheModule);
+ // Set up global aliases for the metaclass or class pointer if they do not
+ // already exist. These will are forward-references which will be set to
+ // pointers to the class and metaclass structure created for the runtime
+ // load function. To send a message to super, we look up the value of the
+ // super_class pointer from either the class or metaclass structure.
+ if (IsClassMessage) {
+ if (!MetaClassPtrAlias) {
+ MetaClassPtrAlias = llvm::GlobalAlias::create(
+ IdTy->getElementType(), 0, llvm::GlobalValue::InternalLinkage,
+ ".objc_metaclass_ref" + Class->getNameAsString(), &TheModule);
+ }
+ ReceiverClass = MetaClassPtrAlias;
+ } else {
+ if (!ClassPtrAlias) {
+ ClassPtrAlias = llvm::GlobalAlias::create(
+ IdTy->getElementType(), 0, llvm::GlobalValue::InternalLinkage,
+ ".objc_class_ref" + Class->getNameAsString(), &TheModule);
+ }
+ ReceiverClass = ClassPtrAlias;
}
- ReceiverClass = ClassPtrAlias;
}
+ // Cast the pointer to a simplified version of the class structure
+ llvm::Type *CastTy = llvm::StructType::get(IdTy, IdTy);
+ ReceiverClass = Builder.CreateBitCast(ReceiverClass,
+ llvm::PointerType::getUnqual(CastTy));
+ // Get the superclass pointer
+ ReceiverClass = Builder.CreateStructGEP(CastTy, ReceiverClass, 1);
+ // Load the superclass pointer
+ ReceiverClass =
+ Builder.CreateAlignedLoad(ReceiverClass, CGF.getPointerAlign());
}
- // Cast the pointer to a simplified version of the class structure
- llvm::Type *CastTy = llvm::StructType::get(IdTy, IdTy);
- ReceiverClass = Builder.CreateBitCast(ReceiverClass,
- llvm::PointerType::getUnqual(CastTy));
- // Get the superclass pointer
- ReceiverClass = Builder.CreateStructGEP(CastTy, ReceiverClass, 1);
- // Load the superclass pointer
- ReceiverClass =
- Builder.CreateAlignedLoad(ReceiverClass, CGF.getPointerAlign());
// Construct the structure used to look up the IMP
llvm::StructType *ObjCSuperTy =
llvm::StructType::get(Receiver->getType(), IdTy);
- // FIXME: Is this really supposed to be a dynamic alloca?
- Address ObjCSuper = Address(Builder.CreateAlloca(ObjCSuperTy),
+ Address ObjCSuper = CGF.CreateTempAlloca(ObjCSuperTy,
CGF.getPointerAlign());
Builder.CreateStore(Receiver,
@@ -1456,7 +2517,7 @@ CGObjCGNU::GenerateMessageSend(CodeGenFunction &CGF,
}
// Reset the receiver in case the lookup modified it
- ActualArgs[0] = CallArg(RValue::get(Receiver), ASTIdTy, false);
+ ActualArgs[0] = CallArg(RValue::get(Receiver), ASTIdTy);
imp = EnforceType(Builder, imp, MSI.MessengerType);
@@ -1506,17 +2567,16 @@ CGObjCGNU::GenerateMessageSend(CodeGenFunction &CGF,
llvm::Constant *CGObjCGNU::
GenerateMethodList(StringRef ClassName,
StringRef CategoryName,
- ArrayRef<Selector> MethodSels,
- ArrayRef<llvm::Constant *> MethodTypes,
+ ArrayRef<const ObjCMethodDecl*> Methods,
bool isClassMethodList) {
- if (MethodSels.empty())
+ if (Methods.empty())
return NULLPtr;
ConstantInitBuilder Builder(CGM);
auto MethodList = Builder.beginStruct();
MethodList.addNullPointer(CGM.Int8PtrTy);
- MethodList.addInt(Int32Ty, MethodTypes.size());
+ MethodList.addInt(Int32Ty, Methods.size());
// Get the method structure type.
llvm::StructType *ObjCMethodTy =
@@ -1525,20 +2585,48 @@ GenerateMethodList(StringRef ClassName,
PtrToInt8Ty, // Method types
IMPTy // Method pointer
});
- auto Methods = MethodList.beginArray();
- for (unsigned int i = 0, e = MethodTypes.size(); i < e; ++i) {
+ bool isV2ABI = isRuntime(ObjCRuntime::GNUstep, 2);
+ if (isV2ABI) {
+ // size_t size;
+ llvm::DataLayout td(&TheModule);
+ MethodList.addInt(SizeTy, td.getTypeSizeInBits(ObjCMethodTy) /
+ CGM.getContext().getCharWidth());
+ ObjCMethodTy =
+ llvm::StructType::get(CGM.getLLVMContext(), {
+ IMPTy, // Method pointer
+ PtrToInt8Ty, // Selector
+ PtrToInt8Ty // Extended type encoding
+ });
+ } else {
+ ObjCMethodTy =
+ llvm::StructType::get(CGM.getLLVMContext(), {
+ PtrToInt8Ty, // Really a selector, but the runtime creates it us.
+ PtrToInt8Ty, // Method types
+ IMPTy // Method pointer
+ });
+ }
+ auto MethodArray = MethodList.beginArray();
+ ASTContext &Context = CGM.getContext();
+ for (const auto *OMD : Methods) {
llvm::Constant *FnPtr =
TheModule.getFunction(SymbolNameForMethod(ClassName, CategoryName,
- MethodSels[i],
+ OMD->getSelector(),
isClassMethodList));
assert(FnPtr && "Can't generate metadata for method that doesn't exist");
- auto Method = Methods.beginStruct(ObjCMethodTy);
- Method.add(MakeConstantString(MethodSels[i].getAsString()));
- Method.add(MethodTypes[i]);
- Method.addBitCast(FnPtr, IMPTy);
- Method.finishAndAddTo(Methods);
+ auto Method = MethodArray.beginStruct(ObjCMethodTy);
+ if (isV2ABI) {
+ Method.addBitCast(FnPtr, IMPTy);
+ Method.add(GetConstantSelector(OMD->getSelector(),
+ Context.getObjCEncodingForMethodDecl(OMD)));
+ Method.add(MakeConstantString(Context.getObjCEncodingForMethodDecl(OMD, true)));
+ } else {
+ Method.add(MakeConstantString(OMD->getSelector().getAsString()));
+ Method.add(MakeConstantString(Context.getObjCEncodingForMethodDecl(OMD)));
+ Method.addBitCast(FnPtr, IMPTy);
+ }
+ Method.finishAndAddTo(MethodArray);
}
- Methods.finishAndAddTo(MethodList);
+ MethodArray.finishAndAddTo(MethodList);
// Create an instance of the structure
return MethodList.finishAndCreateGlobal(".objc_method_list",
@@ -1549,7 +2637,9 @@ GenerateMethodList(StringRef ClassName,
llvm::Constant *CGObjCGNU::
GenerateIvarList(ArrayRef<llvm::Constant *> IvarNames,
ArrayRef<llvm::Constant *> IvarTypes,
- ArrayRef<llvm::Constant *> IvarOffsets) {
+ ArrayRef<llvm::Constant *> IvarOffsets,
+ ArrayRef<llvm::Constant *> IvarAlign,
+ ArrayRef<Qualifiers::ObjCLifetime> IvarOwnership) {
if (IvarNames.empty())
return NULLPtr;
@@ -1664,7 +2754,7 @@ llvm::Constant *CGObjCGNU::GenerateClassStructure(
// gc_object_type
Elements.add(NULLPtr);
// abi_version
- Elements.addInt(LongTy, 1);
+ Elements.addInt(LongTy, ClassABIVersion);
// ivar_offsets
Elements.add(IvarOffsets);
// properties
@@ -1693,22 +2783,22 @@ llvm::Constant *CGObjCGNU::GenerateClassStructure(
}
llvm::Constant *CGObjCGNU::
-GenerateProtocolMethodList(ArrayRef<llvm::Constant *> MethodNames,
- ArrayRef<llvm::Constant *> MethodTypes) {
+GenerateProtocolMethodList(ArrayRef<const ObjCMethodDecl*> Methods) {
// Get the method structure type.
llvm::StructType *ObjCMethodDescTy =
llvm::StructType::get(CGM.getLLVMContext(), { PtrToInt8Ty, PtrToInt8Ty });
+ ASTContext &Context = CGM.getContext();
ConstantInitBuilder Builder(CGM);
auto MethodList = Builder.beginStruct();
- MethodList.addInt(IntTy, MethodNames.size());
- auto Methods = MethodList.beginArray(ObjCMethodDescTy);
- for (unsigned int i = 0, e = MethodTypes.size() ; i < e ; i++) {
- auto Method = Methods.beginStruct(ObjCMethodDescTy);
- Method.add(MethodNames[i]);
- Method.add(MethodTypes[i]);
- Method.finishAndAddTo(Methods);
- }
- Methods.finishAndAddTo(MethodList);
+ MethodList.addInt(IntTy, Methods.size());
+ auto MethodArray = MethodList.beginArray(ObjCMethodDescTy);
+ for (auto *M : Methods) {
+ auto Method = MethodArray.beginStruct(ObjCMethodDescTy);
+ Method.add(MakeConstantString(M->getSelector().getAsString()));
+ Method.add(MakeConstantString(Context.getObjCEncodingForMethodDecl(M)));
+ Method.finishAndAddTo(MethodArray);
+ }
+ MethodArray.finishAndAddTo(MethodList);
return MethodList.finishAndCreateGlobal(".objc_method_list",
CGM.getPointerAlign());
}
@@ -1742,16 +2832,19 @@ CGObjCGNU::GenerateProtocolList(ArrayRef<std::string> Protocols) {
llvm::Value *CGObjCGNU::GenerateProtocolRef(CodeGenFunction &CGF,
const ObjCProtocolDecl *PD) {
- llvm::Value *protocol = ExistingProtocols[PD->getNameAsString()];
+ llvm::Constant *&protocol = ExistingProtocols[PD->getNameAsString()];
+ if (!protocol)
+ GenerateProtocol(PD);
llvm::Type *T =
CGM.getTypes().ConvertType(CGM.getContext().getObjCProtoType());
return CGF.Builder.CreateBitCast(protocol, llvm::PointerType::getUnqual(T));
}
llvm::Constant *
-CGObjCGNU::GenerateEmptyProtocol(const std::string &ProtocolName) {
+CGObjCGNU::GenerateEmptyProtocol(StringRef ProtocolName) {
llvm::Constant *ProtocolList = GenerateProtocolList({});
- llvm::Constant *MethodList = GenerateProtocolMethodList({}, {});
+ llvm::Constant *MethodList = GenerateProtocolMethodList({});
+ MethodList = llvm::ConstantExpr::getBitCast(MethodList, PtrToInt8Ty);
// Protocols are objects containing lists of the methods implemented and
// protocols adopted.
ConstantInitBuilder Builder(CGM);
@@ -1763,17 +2856,18 @@ CGObjCGNU::GenerateEmptyProtocol(const std::string &ProtocolName) {
llvm::ConstantInt::get(Int32Ty, ProtocolVersion), IdTy));
Elements.add(MakeConstantString(ProtocolName, ".objc_protocol_name"));
- Elements.add(ProtocolList);
- Elements.add(MethodList);
- Elements.add(MethodList);
- Elements.add(MethodList);
- Elements.add(MethodList);
- return Elements.finishAndCreateGlobal(".objc_protocol",
+ Elements.add(ProtocolList); /* .protocol_list */
+ Elements.add(MethodList); /* .instance_methods */
+ Elements.add(MethodList); /* .class_methods */
+ Elements.add(MethodList); /* .optional_instance_methods */
+ Elements.add(MethodList); /* .optional_class_methods */
+ Elements.add(NULLPtr); /* .properties */
+ Elements.add(NULLPtr); /* .optional_properties */
+ return Elements.finishAndCreateGlobal(SymbolForProtocol(ProtocolName),
CGM.getPointerAlign());
}
void CGObjCGNU::GenerateProtocol(const ObjCProtocolDecl *PD) {
- ASTContext &Context = CGM.getContext();
std::string ProtocolName = PD->getNameAsString();
// Use the protocol definition, if there is one.
@@ -1783,51 +2877,31 @@ void CGObjCGNU::GenerateProtocol(const ObjCProtocolDecl *PD) {
SmallVector<std::string, 16> Protocols;
for (const auto *PI : PD->protocols())
Protocols.push_back(PI->getNameAsString());
- SmallVector<llvm::Constant*, 16> InstanceMethodNames;
- SmallVector<llvm::Constant*, 16> InstanceMethodTypes;
- SmallVector<llvm::Constant*, 16> OptionalInstanceMethodNames;
- SmallVector<llvm::Constant*, 16> OptionalInstanceMethodTypes;
- for (const auto *I : PD->instance_methods()) {
- std::string TypeStr = Context.getObjCEncodingForMethodDecl(I);
- if (I->getImplementationControl() == ObjCMethodDecl::Optional) {
- OptionalInstanceMethodNames.push_back(
- MakeConstantString(I->getSelector().getAsString()));
- OptionalInstanceMethodTypes.push_back(MakeConstantString(TypeStr));
- } else {
- InstanceMethodNames.push_back(
- MakeConstantString(I->getSelector().getAsString()));
- InstanceMethodTypes.push_back(MakeConstantString(TypeStr));
- }
- }
+ SmallVector<const ObjCMethodDecl*, 16> InstanceMethods;
+ SmallVector<const ObjCMethodDecl*, 16> OptionalInstanceMethods;
+ for (const auto *I : PD->instance_methods())
+ if (I->isOptional())
+ OptionalInstanceMethods.push_back(I);
+ else
+ InstanceMethods.push_back(I);
// Collect information about class methods:
- SmallVector<llvm::Constant*, 16> ClassMethodNames;
- SmallVector<llvm::Constant*, 16> ClassMethodTypes;
- SmallVector<llvm::Constant*, 16> OptionalClassMethodNames;
- SmallVector<llvm::Constant*, 16> OptionalClassMethodTypes;
- for (const auto *I : PD->class_methods()) {
- std::string TypeStr = Context.getObjCEncodingForMethodDecl(I);
- if (I->getImplementationControl() == ObjCMethodDecl::Optional) {
- OptionalClassMethodNames.push_back(
- MakeConstantString(I->getSelector().getAsString()));
- OptionalClassMethodTypes.push_back(MakeConstantString(TypeStr));
- } else {
- ClassMethodNames.push_back(
- MakeConstantString(I->getSelector().getAsString()));
- ClassMethodTypes.push_back(MakeConstantString(TypeStr));
- }
- }
+ SmallVector<const ObjCMethodDecl*, 16> ClassMethods;
+ SmallVector<const ObjCMethodDecl*, 16> OptionalClassMethods;
+ for (const auto *I : PD->class_methods())
+ if (I->isOptional())
+ OptionalClassMethods.push_back(I);
+ else
+ ClassMethods.push_back(I);
llvm::Constant *ProtocolList = GenerateProtocolList(Protocols);
llvm::Constant *InstanceMethodList =
- GenerateProtocolMethodList(InstanceMethodNames, InstanceMethodTypes);
+ GenerateProtocolMethodList(InstanceMethods);
llvm::Constant *ClassMethodList =
- GenerateProtocolMethodList(ClassMethodNames, ClassMethodTypes);
+ GenerateProtocolMethodList(ClassMethods);
llvm::Constant *OptionalInstanceMethodList =
- GenerateProtocolMethodList(OptionalInstanceMethodNames,
- OptionalInstanceMethodTypes);
+ GenerateProtocolMethodList(OptionalInstanceMethods);
llvm::Constant *OptionalClassMethodList =
- GenerateProtocolMethodList(OptionalClassMethodNames,
- OptionalClassMethodTypes);
+ GenerateProtocolMethodList(OptionalClassMethods);
// Property metadata: name, attributes, isSynthesized, setter name, setter
// types, getter name, getter types.
@@ -1835,78 +2909,10 @@ void CGObjCGNU::GenerateProtocol(const ObjCProtocolDecl *PD) {
// simplify the runtime library by allowing it to use the same data
// structures for protocol metadata everywhere.
- llvm::Constant *PropertyList;
- llvm::Constant *OptionalPropertyList;
- {
- llvm::StructType *propertyMetadataTy =
- llvm::StructType::get(CGM.getLLVMContext(),
- { PtrToInt8Ty, Int8Ty, Int8Ty, Int8Ty, Int8Ty, PtrToInt8Ty,
- PtrToInt8Ty, PtrToInt8Ty, PtrToInt8Ty });
-
- unsigned numReqProperties = 0, numOptProperties = 0;
- for (auto property : PD->instance_properties()) {
- if (property->isOptional())
- numOptProperties++;
- else
- numReqProperties++;
- }
-
- ConstantInitBuilder reqPropertyListBuilder(CGM);
- auto reqPropertiesList = reqPropertyListBuilder.beginStruct();
- reqPropertiesList.addInt(IntTy, numReqProperties);
- reqPropertiesList.add(NULLPtr);
- auto reqPropertiesArray = reqPropertiesList.beginArray(propertyMetadataTy);
-
- ConstantInitBuilder optPropertyListBuilder(CGM);
- auto optPropertiesList = optPropertyListBuilder.beginStruct();
- optPropertiesList.addInt(IntTy, numOptProperties);
- optPropertiesList.add(NULLPtr);
- auto optPropertiesArray = optPropertiesList.beginArray(propertyMetadataTy);
-
- // Add all of the property methods need adding to the method list and to the
- // property metadata list.
- for (auto *property : PD->instance_properties()) {
- auto &propertiesArray =
- (property->isOptional() ? optPropertiesArray : reqPropertiesArray);
- auto fields = propertiesArray.beginStruct(propertyMetadataTy);
-
- fields.add(MakePropertyEncodingString(property, nullptr));
- PushPropertyAttributes(fields, property);
-
- if (ObjCMethodDecl *getter = property->getGetterMethodDecl()) {
- std::string typeStr = Context.getObjCEncodingForMethodDecl(getter);
- llvm::Constant *typeEncoding = MakeConstantString(typeStr);
- InstanceMethodTypes.push_back(typeEncoding);
- fields.add(MakeConstantString(getter->getSelector().getAsString()));
- fields.add(typeEncoding);
- } else {
- fields.add(NULLPtr);
- fields.add(NULLPtr);
- }
- if (ObjCMethodDecl *setter = property->getSetterMethodDecl()) {
- std::string typeStr = Context.getObjCEncodingForMethodDecl(setter);
- llvm::Constant *typeEncoding = MakeConstantString(typeStr);
- InstanceMethodTypes.push_back(typeEncoding);
- fields.add(MakeConstantString(setter->getSelector().getAsString()));
- fields.add(typeEncoding);
- } else {
- fields.add(NULLPtr);
- fields.add(NULLPtr);
- }
-
- fields.finishAndAddTo(propertiesArray);
- }
-
- reqPropertiesArray.finishAndAddTo(reqPropertiesList);
- PropertyList =
- reqPropertiesList.finishAndCreateGlobal(".objc_property_list",
- CGM.getPointerAlign());
-
- optPropertiesArray.finishAndAddTo(optPropertiesList);
- OptionalPropertyList =
- optPropertiesList.finishAndCreateGlobal(".objc_property_list",
- CGM.getPointerAlign());
- }
+ llvm::Constant *PropertyList =
+ GeneratePropertyList(nullptr, PD, false, false);
+ llvm::Constant *OptionalPropertyList =
+ GeneratePropertyList(nullptr, PD, false, true);
// Protocols are objects containing lists of the methods implemented and
// protocols adopted.
@@ -1917,8 +2923,7 @@ void CGObjCGNU::GenerateProtocol(const ObjCProtocolDecl *PD) {
Elements.add(
llvm::ConstantExpr::getIntToPtr(
llvm::ConstantInt::get(Int32Ty, ProtocolVersion), IdTy));
- Elements.add(
- MakeConstantString(ProtocolName, ".objc_protocol_name"));
+ Elements.add(MakeConstantString(ProtocolName));
Elements.add(ProtocolList);
Elements.add(InstanceMethodList);
Elements.add(ClassMethodList);
@@ -1933,8 +2938,6 @@ void CGObjCGNU::GenerateProtocol(const ObjCProtocolDecl *PD) {
}
void CGObjCGNU::GenerateProtocolHolderCategory() {
// Collect information about instance methods
- SmallVector<Selector, 1> MethodSels;
- SmallVector<llvm::Constant*, 1> MethodTypes;
ConstantInitBuilder Builder(CGM);
auto Elements = Builder.beginStruct();
@@ -1945,10 +2948,10 @@ void CGObjCGNU::GenerateProtocolHolderCategory() {
Elements.add(MakeConstantString(ClassName));
// Instance method list
Elements.addBitCast(GenerateMethodList(
- ClassName, CategoryName, MethodSels, MethodTypes, false), PtrTy);
+ ClassName, CategoryName, {}, false), PtrTy);
// Class method list
Elements.addBitCast(GenerateMethodList(
- ClassName, CategoryName, MethodSels, MethodTypes, true), PtrTy);
+ ClassName, CategoryName, {}, true), PtrTy);
// Protocol list
ConstantInitBuilder ProtocolListBuilder(CGM);
@@ -2016,25 +3019,9 @@ llvm::Constant *CGObjCGNU::MakeBitField(ArrayRef<bool> bits) {
}
void CGObjCGNU::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
- std::string ClassName = OCD->getClassInterface()->getNameAsString();
+ const ObjCInterfaceDecl *Class = OCD->getClassInterface();
+ std::string ClassName = Class->getNameAsString();
std::string CategoryName = OCD->getNameAsString();
- // Collect information about instance methods
- SmallVector<Selector, 16> InstanceMethodSels;
- SmallVector<llvm::Constant*, 16> InstanceMethodTypes;
- for (const auto *I : OCD->instance_methods()) {
- InstanceMethodSels.push_back(I->getSelector());
- std::string TypeStr = CGM.getContext().getObjCEncodingForMethodDecl(I);
- InstanceMethodTypes.push_back(MakeConstantString(TypeStr));
- }
-
- // Collect information about class methods
- SmallVector<Selector, 16> ClassMethodSels;
- SmallVector<llvm::Constant*, 16> ClassMethodTypes;
- for (const auto *I : OCD->class_methods()) {
- ClassMethodSels.push_back(I->getSelector());
- std::string TypeStr = CGM.getContext().getObjCEncodingForMethodDecl(I);
- ClassMethodTypes.push_back(MakeConstantString(TypeStr));
- }
// Collect the names of referenced protocols
SmallVector<std::string, 16> Protocols;
@@ -2049,84 +3036,125 @@ void CGObjCGNU::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
Elements.add(MakeConstantString(CategoryName));
Elements.add(MakeConstantString(ClassName));
// Instance method list
+ SmallVector<ObjCMethodDecl*, 16> InstanceMethods;
+ InstanceMethods.insert(InstanceMethods.begin(), OCD->instmeth_begin(),
+ OCD->instmeth_end());
Elements.addBitCast(
- GenerateMethodList(ClassName, CategoryName, InstanceMethodSels,
- InstanceMethodTypes, false),
+ GenerateMethodList(ClassName, CategoryName, InstanceMethods, false),
PtrTy);
// Class method list
+
+ SmallVector<ObjCMethodDecl*, 16> ClassMethods;
+ ClassMethods.insert(ClassMethods.begin(), OCD->classmeth_begin(),
+ OCD->classmeth_end());
Elements.addBitCast(
- GenerateMethodList(ClassName, CategoryName, ClassMethodSels,
- ClassMethodTypes, true),
+ GenerateMethodList(ClassName, CategoryName, ClassMethods, true),
PtrTy);
// Protocol list
Elements.addBitCast(GenerateProtocolList(Protocols), PtrTy);
+ if (isRuntime(ObjCRuntime::GNUstep, 2)) {
+ const ObjCCategoryDecl *Category =
+ Class->FindCategoryDeclaration(OCD->getIdentifier());
+ if (Category) {
+ // Instance properties
+ Elements.addBitCast(GeneratePropertyList(OCD, Category, false), PtrTy);
+ // Class properties
+ Elements.addBitCast(GeneratePropertyList(OCD, Category, true), PtrTy);
+ } else {
+ Elements.addNullPointer(PtrTy);
+ Elements.addNullPointer(PtrTy);
+ }
+ }
+
Categories.push_back(llvm::ConstantExpr::getBitCast(
- Elements.finishAndCreateGlobal("", CGM.getPointerAlign()),
+ Elements.finishAndCreateGlobal(
+ std::string(".objc_category_")+ClassName+CategoryName,
+ CGM.getPointerAlign()),
PtrTy));
}
-llvm::Constant *CGObjCGNU::GeneratePropertyList(const ObjCImplementationDecl *OID,
- SmallVectorImpl<Selector> &InstanceMethodSels,
- SmallVectorImpl<llvm::Constant*> &InstanceMethodTypes) {
+llvm::Constant *CGObjCGNU::GeneratePropertyList(const Decl *Container,
+ const ObjCContainerDecl *OCD,
+ bool isClassProperty,
+ bool protocolOptionalProperties) {
+
+ SmallVector<const ObjCPropertyDecl *, 16> Properties;
+ llvm::SmallPtrSet<const IdentifierInfo*, 16> PropertySet;
+ bool isProtocol = isa<ObjCProtocolDecl>(OCD);
ASTContext &Context = CGM.getContext();
- // Property metadata: name, attributes, attributes2, padding1, padding2,
- // setter name, setter types, getter name, getter types.
- llvm::StructType *propertyMetadataTy =
- llvm::StructType::get(CGM.getLLVMContext(),
- { PtrToInt8Ty, Int8Ty, Int8Ty, Int8Ty, Int8Ty, PtrToInt8Ty,
- PtrToInt8Ty, PtrToInt8Ty, PtrToInt8Ty });
- unsigned numProperties = 0;
- for (auto *propertyImpl : OID->property_impls()) {
- (void) propertyImpl;
- numProperties++;
+ std::function<void(const ObjCProtocolDecl *Proto)> collectProtocolProperties
+ = [&](const ObjCProtocolDecl *Proto) {
+ for (const auto *P : Proto->protocols())
+ collectProtocolProperties(P);
+ for (const auto *PD : Proto->properties()) {
+ if (isClassProperty != PD->isClassProperty())
+ continue;
+ // Skip any properties that are declared in protocols that this class
+ // conforms to but are not actually implemented by this class.
+ if (!isProtocol && !Context.getObjCPropertyImplDeclForPropertyDecl(PD, Container))
+ continue;
+ if (!PropertySet.insert(PD->getIdentifier()).second)
+ continue;
+ Properties.push_back(PD);
+ }
+ };
+
+ if (const ObjCInterfaceDecl *OID = dyn_cast<ObjCInterfaceDecl>(OCD))
+ for (const ObjCCategoryDecl *ClassExt : OID->known_extensions())
+ for (auto *PD : ClassExt->properties()) {
+ if (isClassProperty != PD->isClassProperty())
+ continue;
+ PropertySet.insert(PD->getIdentifier());
+ Properties.push_back(PD);
+ }
+
+ for (const auto *PD : OCD->properties()) {
+ if (isClassProperty != PD->isClassProperty())
+ continue;
+ // If we're generating a list for a protocol, skip optional / required ones
+ // when generating the other list.
+ if (isProtocol && (protocolOptionalProperties != PD->isOptional()))
+ continue;
+ // Don't emit duplicate metadata for properties that were already in a
+ // class extension.
+ if (!PropertySet.insert(PD->getIdentifier()).second)
+ continue;
+
+ Properties.push_back(PD);
}
+ if (const ObjCInterfaceDecl *OID = dyn_cast<ObjCInterfaceDecl>(OCD))
+ for (const auto *P : OID->all_referenced_protocols())
+ collectProtocolProperties(P);
+ else if (const ObjCCategoryDecl *CD = dyn_cast<ObjCCategoryDecl>(OCD))
+ for (const auto *P : CD->protocols())
+ collectProtocolProperties(P);
+
+ auto numProperties = Properties.size();
+
+ if (numProperties == 0)
+ return NULLPtr;
+
ConstantInitBuilder builder(CGM);
auto propertyList = builder.beginStruct();
- propertyList.addInt(IntTy, numProperties);
- propertyList.add(NULLPtr);
- auto properties = propertyList.beginArray(propertyMetadataTy);
+ auto properties = PushPropertyListHeader(propertyList, numProperties);
// Add all of the property methods need adding to the method list and to the
// property metadata list.
- for (auto *propertyImpl : OID->property_impls()) {
- auto fields = properties.beginStruct(propertyMetadataTy);
- ObjCPropertyDecl *property = propertyImpl->getPropertyDecl();
- bool isSynthesized = (propertyImpl->getPropertyImplementation() ==
- ObjCPropertyImplDecl::Synthesize);
- bool isDynamic = (propertyImpl->getPropertyImplementation() ==
- ObjCPropertyImplDecl::Dynamic);
-
- fields.add(MakePropertyEncodingString(property, OID));
- PushPropertyAttributes(fields, property, isSynthesized, isDynamic);
- if (ObjCMethodDecl *getter = property->getGetterMethodDecl()) {
- std::string TypeStr = Context.getObjCEncodingForMethodDecl(getter);
- llvm::Constant *TypeEncoding = MakeConstantString(TypeStr);
- if (isSynthesized) {
- InstanceMethodTypes.push_back(TypeEncoding);
- InstanceMethodSels.push_back(getter->getSelector());
+ for (auto *property : Properties) {
+ bool isSynthesized = false;
+ bool isDynamic = false;
+ if (!isProtocol) {
+ auto *propertyImpl = Context.getObjCPropertyImplDeclForPropertyDecl(property, Container);
+ if (propertyImpl) {
+ isSynthesized = (propertyImpl->getPropertyImplementation() ==
+ ObjCPropertyImplDecl::Synthesize);
+ isDynamic = (propertyImpl->getPropertyImplementation() ==
+ ObjCPropertyImplDecl::Dynamic);
}
- fields.add(MakeConstantString(getter->getSelector().getAsString()));
- fields.add(TypeEncoding);
- } else {
- fields.add(NULLPtr);
- fields.add(NULLPtr);
}
- if (ObjCMethodDecl *setter = property->getSetterMethodDecl()) {
- std::string TypeStr = Context.getObjCEncodingForMethodDecl(setter);
- llvm::Constant *TypeEncoding = MakeConstantString(TypeStr);
- if (isSynthesized) {
- InstanceMethodTypes.push_back(TypeEncoding);
- InstanceMethodSels.push_back(setter->getSelector());
- }
- fields.add(MakeConstantString(setter->getSelector().getAsString()));
- fields.add(TypeEncoding);
- } else {
- fields.add(NULLPtr);
- fields.add(NULLPtr);
- }
- fields.finishAndAddTo(properties);
+ PushProperty(properties, property, Container, isSynthesized, isDynamic);
}
properties.finishAndAddTo(propertyList);
@@ -2179,6 +3207,8 @@ void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
SmallVector<llvm::Constant*, 16> IvarNames;
SmallVector<llvm::Constant*, 16> IvarTypes;
SmallVector<llvm::Constant*, 16> IvarOffsets;
+ SmallVector<llvm::Constant*, 16> IvarAligns;
+ SmallVector<Qualifiers::ObjCLifetime, 16> IvarOwnership;
ConstantInitBuilder IvarOffsetBuilder(CGM);
auto IvarOffsetValues = IvarOffsetBuilder.beginArray(PtrToIntTy);
@@ -2201,6 +3231,8 @@ void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
std::string TypeStr;
Context.getObjCEncodingForType(IVD->getType(), TypeStr, IVD);
IvarTypes.push_back(MakeConstantString(TypeStr));
+ IvarAligns.push_back(llvm::ConstantInt::get(IntTy,
+ Context.getTypeSize(IVD->getType())));
// Get the offset
uint64_t BaseOffset = ComputeIvarBaseOffset(CGM, OID, IVD);
uint64_t Offset = BaseOffset;
@@ -2211,6 +3243,7 @@ void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
// Create the direct offset value
std::string OffsetName = "__objc_ivar_offset_value_" + ClassName +"." +
IVD->getNameAsString();
+
llvm::GlobalVariable *OffsetVar = TheModule.getGlobalVariable(OffsetName);
if (OffsetVar) {
OffsetVar->setInitializer(OffsetValue);
@@ -2219,14 +3252,13 @@ void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
// copy.
OffsetVar->setLinkage(llvm::GlobalValue::ExternalLinkage);
} else
- OffsetVar = new llvm::GlobalVariable(TheModule, IntTy,
+ OffsetVar = new llvm::GlobalVariable(TheModule, Int32Ty,
false, llvm::GlobalValue::ExternalLinkage,
- OffsetValue,
- "__objc_ivar_offset_value_" + ClassName +"." +
- IVD->getNameAsString());
+ OffsetValue, OffsetName);
IvarOffsets.push_back(OffsetValue);
IvarOffsetValues.add(OffsetVar);
Qualifiers::ObjCLifetime lt = IVD->getType().getQualifiers().getObjCLifetime();
+ IvarOwnership.push_back(lt);
switch (lt) {
case Qualifiers::OCL_Strong:
StrongIvars.push_back(true);
@@ -2248,25 +3280,30 @@ void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
CGM.getPointerAlign());
// Collect information about instance methods
- SmallVector<Selector, 16> InstanceMethodSels;
- SmallVector<llvm::Constant*, 16> InstanceMethodTypes;
- for (const auto *I : OID->instance_methods()) {
- InstanceMethodSels.push_back(I->getSelector());
- std::string TypeStr = Context.getObjCEncodingForMethodDecl(I);
- InstanceMethodTypes.push_back(MakeConstantString(TypeStr));
- }
+ SmallVector<const ObjCMethodDecl*, 16> InstanceMethods;
+ InstanceMethods.insert(InstanceMethods.begin(), OID->instmeth_begin(),
+ OID->instmeth_end());
+
+ SmallVector<const ObjCMethodDecl*, 16> ClassMethods;
+ ClassMethods.insert(ClassMethods.begin(), OID->classmeth_begin(),
+ OID->classmeth_end());
+
+ // Collect the same information about synthesized properties, which don't
+ // show up in the instance method lists.
+ for (auto *propertyImpl : OID->property_impls())
+ if (propertyImpl->getPropertyImplementation() ==
+ ObjCPropertyImplDecl::Synthesize) {
+ ObjCPropertyDecl *property = propertyImpl->getPropertyDecl();
+ auto addPropertyMethod = [&](const ObjCMethodDecl *accessor) {
+ if (accessor)
+ InstanceMethods.push_back(accessor);
+ };
+ addPropertyMethod(property->getGetterMethodDecl());
+ addPropertyMethod(property->getSetterMethodDecl());
+ }
- llvm::Constant *Properties = GeneratePropertyList(OID, InstanceMethodSels,
- InstanceMethodTypes);
+ llvm::Constant *Properties = GeneratePropertyList(OID, ClassDecl);
- // Collect information about class methods
- SmallVector<Selector, 16> ClassMethodSels;
- SmallVector<llvm::Constant*, 16> ClassMethodTypes;
- for (const auto *I : OID->class_methods()) {
- ClassMethodSels.push_back(I->getSelector());
- std::string TypeStr = Context.getObjCEncodingForMethodDecl(I);
- ClassMethodTypes.push_back(MakeConstantString(TypeStr));
- }
// Collect the names of referenced protocols
SmallVector<std::string, 16> Protocols;
for (const auto *I : ClassDecl->protocols())
@@ -2283,11 +3320,11 @@ void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
SmallVector<llvm::Constant*, 1> empty;
// Generate the method and instance variable lists
llvm::Constant *MethodList = GenerateMethodList(ClassName, "",
- InstanceMethodSels, InstanceMethodTypes, false);
+ InstanceMethods, false);
llvm::Constant *ClassMethodList = GenerateMethodList(ClassName, "",
- ClassMethodSels, ClassMethodTypes, true);
+ ClassMethods, true);
llvm::Constant *IvarList = GenerateIvarList(IvarNames, IvarTypes,
- IvarOffsets);
+ IvarOffsets, IvarAligns, IvarOwnership);
// Irrespective of whether we are compiling for a fragile or non-fragile ABI,
// we emit a symbol containing the offset for each ivar in the class. This
// allows code compiled for the non-Fragile ABI to inherit from code compiled
@@ -2300,14 +3337,13 @@ void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
// the offset (third field in ivar structure)
llvm::Type *IndexTy = Int32Ty;
llvm::Constant *offsetPointerIndexes[] = {Zeros[0],
- llvm::ConstantInt::get(IndexTy, 1), nullptr,
- llvm::ConstantInt::get(IndexTy, 2) };
+ llvm::ConstantInt::get(IndexTy, ClassABIVersion > 1 ? 2 : 1), nullptr,
+ llvm::ConstantInt::get(IndexTy, ClassABIVersion > 1 ? 3 : 2) };
unsigned ivarIndex = 0;
for (const ObjCIvarDecl *IVD = ClassDecl->all_declared_ivar_begin(); IVD;
IVD = IVD->getNextIvar()) {
- const std::string Name = "__objc_ivar_offset_" + ClassName + '.'
- + IVD->getNameAsString();
+ const std::string Name = GetIVarOffsetVariableName(ClassDecl, IVD);
offsetPointerIndexes[2] = llvm::ConstantInt::get(IndexTy, ivarIndex);
// Get the correct ivar field
llvm::Constant *offsetValue = llvm::ConstantExpr::getGetElementPtr(
@@ -2321,12 +3357,10 @@ void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
// different modules will use this one, rather than their private
// copy.
offset->setLinkage(llvm::GlobalValue::ExternalLinkage);
- } else {
+ } else
// Add a new alias if there isn't one already.
- offset = new llvm::GlobalVariable(TheModule, offsetValue->getType(),
+ new llvm::GlobalVariable(TheModule, offsetValue->getType(),
false, llvm::GlobalValue::ExternalLinkage, offsetValue, Name);
- (void) offset; // Silence dead store warning.
- }
++ivarIndex;
}
llvm::Constant *ZeroPtr = llvm::ConstantInt::get(IntPtrTy, 0);
@@ -2334,16 +3368,10 @@ void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
//Generate metaclass for class methods
llvm::Constant *MetaClassStruct = GenerateClassStructure(
NULLPtr, NULLPtr, 0x12L, ClassName.c_str(), nullptr, Zeros[0],
- GenerateIvarList(empty, empty, empty), ClassMethodList, NULLPtr, NULLPtr,
- NULLPtr, ZeroPtr, ZeroPtr, true);
- if (CGM.getTriple().isOSBinFormatCOFF()) {
- auto Storage = llvm::GlobalValue::DefaultStorageClass;
- if (OID->getClassInterface()->hasAttr<DLLImportAttr>())
- Storage = llvm::GlobalValue::DLLImportStorageClass;
- else if (OID->getClassInterface()->hasAttr<DLLExportAttr>())
- Storage = llvm::GlobalValue::DLLExportStorageClass;
- cast<llvm::GlobalValue>(MetaClassStruct)->setDLLStorageClass(Storage);
- }
+ NULLPtr, ClassMethodList, NULLPtr, NULLPtr,
+ GeneratePropertyList(OID, ClassDecl, true), ZeroPtr, ZeroPtr, true);
+ CGM.setGVProperties(cast<llvm::GlobalValue>(MetaClassStruct),
+ OID->getClassInterface());
// Generate the class structure
llvm::Constant *ClassStruct = GenerateClassStructure(
@@ -2351,14 +3379,8 @@ void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
llvm::ConstantInt::get(LongTy, instanceSize), IvarList, MethodList,
GenerateProtocolList(Protocols), IvarOffsetArray, Properties,
StrongIvarBitmap, WeakIvarBitmap);
- if (CGM.getTriple().isOSBinFormatCOFF()) {
- auto Storage = llvm::GlobalValue::DefaultStorageClass;
- if (OID->getClassInterface()->hasAttr<DLLImportAttr>())
- Storage = llvm::GlobalValue::DLLImportStorageClass;
- else if (OID->getClassInterface()->hasAttr<DLLExportAttr>())
- Storage = llvm::GlobalValue::DLLExportStorageClass;
- cast<llvm::GlobalValue>(ClassStruct)->setDLLStorageClass(Storage);
- }
+ CGM.setGVProperties(cast<llvm::GlobalValue>(ClassStruct),
+ OID->getClassInterface());
// Resolve the class aliases, if they exist.
if (ClassPtrAlias) {
@@ -2785,8 +3807,7 @@ void CGObjCGNU::EmitGCMemmoveCollectable(CodeGenFunction &CGF,
llvm::GlobalVariable *CGObjCGNU::ObjCIvarOffsetVariable(
const ObjCInterfaceDecl *ID,
const ObjCIvarDecl *Ivar) {
- const std::string Name = "__objc_ivar_offset_" + ID->getNameAsString()
- + '.' + Ivar->getNameAsString();
+ const std::string Name = GetIVarOffsetVariableName(ID, Ivar);
// Emit the variable and initialize it with what we think the correct value
// is. This allows code compiled with non-fragile ivars to work correctly
// when linked against code which isn't (most of the time).
@@ -2895,8 +3916,11 @@ llvm::Value *CGObjCGNU::EmitIvarOffset(CodeGenFunction &CGF,
CGObjCRuntime *
clang::CodeGen::CreateGNUObjCRuntime(CodeGenModule &CGM) {
- switch (CGM.getLangOpts().ObjCRuntime.getKind()) {
+ auto Runtime = CGM.getLangOpts().ObjCRuntime;
+ switch (Runtime.getKind()) {
case ObjCRuntime::GNUstep:
+ if (Runtime.getVersion() >= VersionTuple(2, 0))
+ return new CGObjCGNUstep2(CGM);
return new CGObjCGNUstep(CGM);
case ObjCRuntime::GCC:
diff --git a/lib/CodeGen/CGObjCMac.cpp b/lib/CodeGen/CGObjCMac.cpp
index ef4e6cd4f01b..0c766575dc21 100644
--- a/lib/CodeGen/CGObjCMac.cpp
+++ b/lib/CodeGen/CGObjCMac.cpp
@@ -888,7 +888,7 @@ protected:
/// int * but is actually an Obj-C class pointer.
llvm::WeakTrackingVH ConstantStringClassRef;
- /// \brief The LLVM type corresponding to NSConstantString.
+ /// The LLVM type corresponding to NSConstantString.
llvm::StructType *NSConstantStringType = nullptr;
llvm::StringMap<llvm::GlobalVariable *> NSConstantStringMap;
@@ -1708,7 +1708,7 @@ struct NullReturnState {
e = Method->param_end(); i != e; ++i, ++I) {
const ParmVarDecl *ParamDecl = (*i);
if (ParamDecl->hasAttr<NSConsumedAttr>()) {
- RValue RV = I->RV;
+ RValue RV = I->getRValue(CGF);
assert(RV.isScalar() &&
"NullReturnState::complete - arg not on object");
CGF.EmitARCRelease(RV.getScalarVal(), ARCImpreciseLifetime);
@@ -3401,7 +3401,9 @@ static bool hasMRCWeakIvars(CodeGenModule &CGM,
See EmitClassExtension();
*/
void CGObjCMac::GenerateClass(const ObjCImplementationDecl *ID) {
- DefinedSymbols.insert(ID->getIdentifier());
+ IdentifierInfo *RuntimeName =
+ &CGM.getContext().Idents.get(ID->getObjCRuntimeNameAsString());
+ DefinedSymbols.insert(RuntimeName);
std::string ClassName = ID->getNameAsString();
// FIXME: Gross
@@ -4179,10 +4181,6 @@ void FragileHazards::emitHazardsInNewBlocks() {
}
}
-static void addIfPresent(llvm::DenseSet<llvm::Value*> &S, llvm::Value *V) {
- if (V) S.insert(V);
-}
-
static void addIfPresent(llvm::DenseSet<llvm::Value*> &S, Address V) {
if (V.isValid()) S.insert(V.getPointer());
}
@@ -4984,7 +4982,9 @@ llvm::Value *CGObjCMac::EmitClassRef(CodeGenFunction &CGF,
if (ID->hasAttr<ObjCRuntimeVisibleAttr>())
return EmitClassRefViaRuntime(CGF, ID, ObjCTypes);
- return EmitClassRefFromId(CGF, ID->getIdentifier());
+ IdentifierInfo *RuntimeName =
+ &CGM.getContext().Idents.get(ID->getObjCRuntimeNameAsString());
+ return EmitClassRefFromId(CGF, RuntimeName);
}
llvm::Value *CGObjCMac::EmitNSAutoreleasePoolClassRef(CodeGenFunction &CGF) {
@@ -6309,9 +6309,7 @@ void CGObjCNonFragileABIMac::GenerateClass(const ObjCImplementationDecl *ID) {
llvm::GlobalVariable *MetaTClass =
BuildClassObject(CI, /*metaclass*/ true,
IsAGV, SuperClassGV, CLASS_RO_GV, classIsHidden);
- if (CGM.getTriple().isOSBinFormatCOFF())
- if (CI->hasAttr<DLLExportAttr>())
- MetaTClass->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
+ CGM.setGVProperties(MetaTClass, CI);
DefinedMetaClasses.push_back(MetaTClass);
// Metadata for the class
@@ -6351,9 +6349,7 @@ void CGObjCNonFragileABIMac::GenerateClass(const ObjCImplementationDecl *ID) {
llvm::GlobalVariable *ClassMD =
BuildClassObject(CI, /*metaclass*/ false,
MetaTClass, SuperClassGV, CLASS_RO_GV, classIsHidden);
- if (CGM.getTriple().isOSBinFormatCOFF())
- if (CI->hasAttr<DLLExportAttr>())
- ClassMD->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
+ CGM.setGVProperties(ClassMD, CI);
DefinedClasses.push_back(ClassMD);
ImplementedClasses.push_back(CI);
@@ -6403,7 +6399,7 @@ llvm::Value *CGObjCNonFragileABIMac::GenerateProtocolRef(CodeGenFunction &CGF,
PTGV->setAlignment(Align.getQuantity());
if (!CGM.getTriple().isOSBinFormatMachO())
PTGV->setComdat(CGM.getModule().getOrInsertComdat(ProtocolName));
- CGM.addCompilerUsedGlobal(PTGV);
+ CGM.addUsedGlobal(PTGV);
return CGF.Builder.CreateAlignedLoad(PTGV, Align);
}
@@ -6847,7 +6843,7 @@ llvm::Constant *CGObjCNonFragileABIMac::GetOrEmitProtocol(
Protocols[PD->getIdentifier()] = Entry;
}
Entry->setVisibility(llvm::GlobalValue::HiddenVisibility);
- CGM.addCompilerUsedGlobal(Entry);
+ CGM.addUsedGlobal(Entry);
// Use this protocol meta-data to build protocol list table in section
// __DATA, __objc_protolist
@@ -6866,7 +6862,7 @@ llvm::Constant *CGObjCNonFragileABIMac::GetOrEmitProtocol(
PTGV->setSection(GetSectionName("__objc_protolist",
"coalesced,no_dead_strip"));
PTGV->setVisibility(llvm::GlobalValue::HiddenVisibility);
- CGM.addCompilerUsedGlobal(PTGV);
+ CGM.addUsedGlobal(PTGV);
return Entry;
}
@@ -6952,7 +6948,7 @@ llvm::Value *CGObjCNonFragileABIMac::EmitIvarOffset(
// This could be 32bit int or 64bit integer depending on the architecture.
// Cast it to 64bit integer value, if it is a 32bit integer ivar offset value
- // as this is what caller always expectes.
+ // as this is what caller always expects.
if (ObjCTypes.IvarOffsetVarTy == ObjCTypes.IntTy)
IvarOffsetValue = CGF.Builder.CreateIntCast(
IvarOffsetValue, ObjCTypes.LongTy, true, "ivar.conv");
@@ -7079,7 +7075,7 @@ CGObjCNonFragileABIMac::EmitVTableMessageSend(CodeGenFunction &CGF,
CGF.getPointerAlign());
// Update the message ref argument.
- args[1].RV = RValue::get(mref.getPointer());
+ args[1].setRValue(RValue::get(mref.getPointer()));
// Load the function to call from the message ref table.
Address calleeAddr =
@@ -7528,12 +7524,7 @@ CGObjCNonFragileABIMac::GetInterfaceEHType(const ObjCInterfaceDecl *ID,
Entry = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.EHTypeTy,
false, llvm::GlobalValue::ExternalLinkage,
nullptr, EHTypeName);
- if (CGM.getTriple().isOSBinFormatCOFF()) {
- if (ID->hasAttr<DLLExportAttr>())
- Entry->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
- else if (ID->hasAttr<DLLImportAttr>())
- Entry->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
- }
+ CGM.setGVProperties(Entry, ID);
return Entry;
}
}
@@ -7572,10 +7563,8 @@ CGObjCNonFragileABIMac::GetInterfaceEHType(const ObjCInterfaceDecl *ID,
CGM.getPointerAlign(),
/*constant*/ false,
L);
- if (CGM.getTriple().isOSBinFormatCOFF())
- if (hasObjCExceptionAttribute(CGM.getContext(), ID))
- if (ID->hasAttr<DLLExportAttr>())
- Entry->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
+ if (hasObjCExceptionAttribute(CGM.getContext(), ID))
+ CGM.setGVProperties(Entry, ID);
}
assert(Entry->getLinkage() == L);
diff --git a/lib/CodeGen/CGOpenCLRuntime.cpp b/lib/CodeGen/CGOpenCLRuntime.cpp
index d140e7f09e9a..1da19a90c387 100644
--- a/lib/CodeGen/CGOpenCLRuntime.cpp
+++ b/lib/CodeGen/CGOpenCLRuntime.cpp
@@ -66,13 +66,19 @@ llvm::Type *CGOpenCLRuntime::convertOpenCLSpecificType(const Type *T) {
}
llvm::Type *CGOpenCLRuntime::getPipeType(const PipeType *T) {
- if (!PipeTy){
- uint32_t PipeAddrSpc = CGM.getContext().getTargetAddressSpace(
- CGM.getContext().getOpenCLTypeAddrSpace(T));
- PipeTy = llvm::PointerType::get(llvm::StructType::create(
- CGM.getLLVMContext(), "opencl.pipe_t"), PipeAddrSpc);
- }
+ if (T->isReadOnly())
+ return getPipeType(T, "opencl.pipe_ro_t", PipeROTy);
+ else
+ return getPipeType(T, "opencl.pipe_wo_t", PipeWOTy);
+}
+llvm::Type *CGOpenCLRuntime::getPipeType(const PipeType *T, StringRef Name,
+ llvm::Type *&PipeTy) {
+ if (!PipeTy)
+ PipeTy = llvm::PointerType::get(llvm::StructType::create(
+ CGM.getLLVMContext(), Name),
+ CGM.getContext().getTargetAddressSpace(
+ CGM.getContext().getOpenCLTypeAddrSpace(T)));
return PipeTy;
}
@@ -112,37 +118,64 @@ llvm::PointerType *CGOpenCLRuntime::getGenericVoidPointerType() {
CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic));
}
-CGOpenCLRuntime::EnqueuedBlockInfo
-CGOpenCLRuntime::emitOpenCLEnqueuedBlock(CodeGenFunction &CGF, const Expr *E) {
- // The block literal may be assigned to a const variable. Chasing down
- // to get the block literal.
+// Get the block literal from an expression derived from the block expression.
+// OpenCL v2.0 s6.12.5:
+// Block variable declarations are implicitly qualified with const. Therefore
+// all block variables must be initialized at declaration time and may not be
+// reassigned.
+static const BlockExpr *getBlockExpr(const Expr *E) {
+ if (auto Cast = dyn_cast<CastExpr>(E)) {
+ E = Cast->getSubExpr();
+ }
if (auto DR = dyn_cast<DeclRefExpr>(E)) {
E = cast<VarDecl>(DR->getDecl())->getInit();
}
+ E = E->IgnoreImplicit();
if (auto Cast = dyn_cast<CastExpr>(E)) {
E = Cast->getSubExpr();
}
- auto *Block = cast<BlockExpr>(E);
+ return cast<BlockExpr>(E);
+}
+
+/// Record emitted llvm invoke function and llvm block literal for the
+/// corresponding block expression.
+void CGOpenCLRuntime::recordBlockInfo(const BlockExpr *E,
+ llvm::Function *InvokeF,
+ llvm::Value *Block) {
+ assert(EnqueuedBlockMap.find(E) == EnqueuedBlockMap.end() &&
+ "Block expression emitted twice");
+ assert(isa<llvm::Function>(InvokeF) && "Invalid invoke function");
+ assert(Block->getType()->isPointerTy() && "Invalid block literal type");
+ EnqueuedBlockMap[E].InvokeFunc = InvokeF;
+ EnqueuedBlockMap[E].BlockArg = Block;
+ EnqueuedBlockMap[E].Kernel = nullptr;
+}
+
+llvm::Function *CGOpenCLRuntime::getInvokeFunction(const Expr *E) {
+ return EnqueuedBlockMap[getBlockExpr(E)].InvokeFunc;
+}
+
+CGOpenCLRuntime::EnqueuedBlockInfo
+CGOpenCLRuntime::emitOpenCLEnqueuedBlock(CodeGenFunction &CGF, const Expr *E) {
+ CGF.EmitScalarExpr(E);
+
+ const BlockExpr *Block = getBlockExpr(E);
+ assert(EnqueuedBlockMap.find(Block) != EnqueuedBlockMap.end() &&
+ "Block expression not emitted");
- // The same block literal may be enqueued multiple times. Cache it if
- // possible.
- auto Loc = EnqueuedBlockMap.find(Block);
- if (Loc != EnqueuedBlockMap.end()) {
- return Loc->second;
+ // Do not emit the block wrapper again if it has been emitted.
+ if (EnqueuedBlockMap[Block].Kernel) {
+ return EnqueuedBlockMap[Block];
}
- // Emit block literal as a common block expression and get the block invoke
- // function.
- llvm::Function *Invoke;
- auto *V = CGF.EmitBlockLiteral(cast<BlockExpr>(Block), &Invoke);
auto *F = CGF.getTargetHooks().createEnqueuedBlockKernel(
- CGF, Invoke, V->stripPointerCasts());
+ CGF, EnqueuedBlockMap[Block].InvokeFunc,
+ EnqueuedBlockMap[Block].BlockArg->stripPointerCasts());
// The common part of the post-processing of the kernel goes here.
F->addFnAttr(llvm::Attribute::NoUnwind);
F->setCallingConv(
CGF.getTypes().ClangCallConvToLLVMCallConv(CallingConv::CC_OpenCLKernel));
- EnqueuedBlockInfo Info{F, V};
- EnqueuedBlockMap[Block] = Info;
- return Info;
+ EnqueuedBlockMap[Block].Kernel = F;
+ return EnqueuedBlockMap[Block];
}
diff --git a/lib/CodeGen/CGOpenCLRuntime.h b/lib/CodeGen/CGOpenCLRuntime.h
index ead303d1d0d5..a513340827a8 100644
--- a/lib/CodeGen/CGOpenCLRuntime.h
+++ b/lib/CodeGen/CGOpenCLRuntime.h
@@ -23,6 +23,7 @@
namespace clang {
+class BlockExpr;
class Expr;
class VarDecl;
@@ -34,20 +35,25 @@ class CodeGenModule;
class CGOpenCLRuntime {
protected:
CodeGenModule &CGM;
- llvm::Type *PipeTy;
+ llvm::Type *PipeROTy;
+ llvm::Type *PipeWOTy;
llvm::PointerType *SamplerTy;
/// Structure for enqueued block information.
struct EnqueuedBlockInfo {
- llvm::Function *Kernel; /// Enqueued block kernel.
- llvm::Value *BlockArg; /// The first argument to enqueued block kernel.
+ llvm::Function *InvokeFunc; /// Block invoke function.
+ llvm::Function *Kernel; /// Enqueued block kernel.
+ llvm::Value *BlockArg; /// The first argument to enqueued block kernel.
};
/// Maps block expression to block information.
llvm::DenseMap<const Expr *, EnqueuedBlockInfo> EnqueuedBlockMap;
+ virtual llvm::Type *getPipeType(const PipeType *T, StringRef Name,
+ llvm::Type *&PipeTy);
+
public:
- CGOpenCLRuntime(CodeGenModule &CGM) : CGM(CGM), PipeTy(nullptr),
- SamplerTy(nullptr) {}
+ CGOpenCLRuntime(CodeGenModule &CGM) : CGM(CGM),
+ PipeROTy(nullptr), PipeWOTy(nullptr), SamplerTy(nullptr) {}
virtual ~CGOpenCLRuntime();
/// Emit the IR required for a work-group-local variable declaration, and add
@@ -62,11 +68,11 @@ public:
llvm::PointerType *getSamplerType(const Type *T);
- // \brief Returnes a value which indicates the size in bytes of the pipe
+ // Returns a value which indicates the size in bytes of the pipe
// element.
virtual llvm::Value *getPipeElemSize(const Expr *PipeArg);
- // \brief Returnes a value which indicates the alignment in bytes of the pipe
+ // Returns a value which indicates the alignment in bytes of the pipe
// element.
virtual llvm::Value *getPipeElemAlign(const Expr *PipeArg);
@@ -76,6 +82,19 @@ public:
/// \return enqueued block information for enqueued block.
EnqueuedBlockInfo emitOpenCLEnqueuedBlock(CodeGenFunction &CGF,
const Expr *E);
+
+ /// Record invoke function and block literal emitted during normal
+ /// codegen for a block expression. The information is used by
+ /// emitOpenCLEnqueuedBlock to emit wrapper kernel.
+ ///
+ /// \param InvokeF invoke function emitted for the block expression.
+ /// \param Block block literal emitted for the block expression.
+ void recordBlockInfo(const BlockExpr *E, llvm::Function *InvokeF,
+ llvm::Value *Block);
+
+ /// \return LLVM block invoke function emitted for an expression derived from
+ /// the block expression.
+ llvm::Function *getInvokeFunction(const Expr *E);
};
}
diff --git a/lib/CodeGen/CGOpenMPRuntime.cpp b/lib/CodeGen/CGOpenMPRuntime.cpp
index fa38ee80bf41..3730b9af12fa 100644
--- a/lib/CodeGen/CGOpenMPRuntime.cpp
+++ b/lib/CodeGen/CGOpenMPRuntime.cpp
@@ -14,12 +14,13 @@
#include "CGCXXABI.h"
#include "CGCleanup.h"
#include "CGOpenMPRuntime.h"
+#include "CGRecordLayout.h"
#include "CodeGenFunction.h"
#include "clang/CodeGen/ConstantInitBuilder.h"
#include "clang/AST/Decl.h"
#include "clang/AST/StmtOpenMP.h"
+#include "clang/Basic/BitmaskEnum.h"
#include "llvm/ADT/ArrayRef.h"
-#include "llvm/ADT/BitmaskEnum.h"
#include "llvm/Bitcode/BitcodeReader.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/DerivedTypes.h"
@@ -33,20 +34,20 @@ using namespace clang;
using namespace CodeGen;
namespace {
-/// \brief Base class for handling code generation inside OpenMP regions.
+/// Base class for handling code generation inside OpenMP regions.
class CGOpenMPRegionInfo : public CodeGenFunction::CGCapturedStmtInfo {
public:
- /// \brief Kinds of OpenMP regions used in codegen.
+ /// Kinds of OpenMP regions used in codegen.
enum CGOpenMPRegionKind {
- /// \brief Region with outlined function for standalone 'parallel'
+ /// Region with outlined function for standalone 'parallel'
/// directive.
ParallelOutlinedRegion,
- /// \brief Region with outlined function for standalone 'task' directive.
+ /// Region with outlined function for standalone 'task' directive.
TaskOutlinedRegion,
- /// \brief Region for constructs that do not require function outlining,
+ /// Region for constructs that do not require function outlining,
/// like 'for', 'sections', 'atomic' etc. directives.
InlinedRegion,
- /// \brief Region with outlined function for standalone 'target' directive.
+ /// Region with outlined function for standalone 'target' directive.
TargetRegion,
};
@@ -63,14 +64,14 @@ public:
: CGCapturedStmtInfo(CR_OpenMP), RegionKind(RegionKind), CodeGen(CodeGen),
Kind(Kind), HasCancel(HasCancel) {}
- /// \brief Get a variable or parameter for storing global thread id
+ /// Get a variable or parameter for storing global thread id
/// inside OpenMP construct.
virtual const VarDecl *getThreadIDVariable() const = 0;
- /// \brief Emit the captured statement body.
+ /// Emit the captured statement body.
void EmitBody(CodeGenFunction &CGF, const Stmt *S) override;
- /// \brief Get an LValue for the current ThreadID variable.
+ /// Get an LValue for the current ThreadID variable.
/// \return LValue for thread id variable. This LValue always has type int32*.
virtual LValue getThreadIDVariableLValue(CodeGenFunction &CGF);
@@ -95,7 +96,7 @@ protected:
bool HasCancel;
};
-/// \brief API for captured statement code generation in OpenMP constructs.
+/// API for captured statement code generation in OpenMP constructs.
class CGOpenMPOutlinedRegionInfo final : public CGOpenMPRegionInfo {
public:
CGOpenMPOutlinedRegionInfo(const CapturedStmt &CS, const VarDecl *ThreadIDVar,
@@ -108,11 +109,11 @@ public:
assert(ThreadIDVar != nullptr && "No ThreadID in OpenMP region.");
}
- /// \brief Get a variable or parameter for storing global thread id
+ /// Get a variable or parameter for storing global thread id
/// inside OpenMP construct.
const VarDecl *getThreadIDVariable() const override { return ThreadIDVar; }
- /// \brief Get the name of the capture helper.
+ /// Get the name of the capture helper.
StringRef getHelperName() const override { return HelperName; }
static bool classof(const CGCapturedStmtInfo *Info) {
@@ -122,13 +123,13 @@ public:
}
private:
- /// \brief A variable or parameter storing global thread id for OpenMP
+ /// A variable or parameter storing global thread id for OpenMP
/// constructs.
const VarDecl *ThreadIDVar;
StringRef HelperName;
};
-/// \brief API for captured statement code generation in OpenMP constructs.
+/// API for captured statement code generation in OpenMP constructs.
class CGOpenMPTaskOutlinedRegionInfo final : public CGOpenMPRegionInfo {
public:
class UntiedTaskActionTy final : public PrePostActionTy {
@@ -144,11 +145,12 @@ public:
void Enter(CodeGenFunction &CGF) override {
if (Untied) {
// Emit task switching point.
- auto PartIdLVal = CGF.EmitLoadOfPointerLValue(
+ LValue PartIdLVal = CGF.EmitLoadOfPointerLValue(
CGF.GetAddrOfLocalVar(PartIDVar),
PartIDVar->getType()->castAs<PointerType>());
- auto *Res = CGF.EmitLoadOfScalar(PartIdLVal, SourceLocation());
- auto *DoneBB = CGF.createBasicBlock(".untied.done.");
+ llvm::Value *Res =
+ CGF.EmitLoadOfScalar(PartIdLVal, PartIDVar->getLocation());
+ llvm::BasicBlock *DoneBB = CGF.createBasicBlock(".untied.done.");
UntiedSwitch = CGF.Builder.CreateSwitch(Res, DoneBB);
CGF.EmitBlock(DoneBB);
CGF.EmitBranchThroughCleanup(CGF.ReturnBlock);
@@ -160,7 +162,7 @@ public:
}
void emitUntiedSwitch(CodeGenFunction &CGF) const {
if (Untied) {
- auto PartIdLVal = CGF.EmitLoadOfPointerLValue(
+ LValue PartIdLVal = CGF.EmitLoadOfPointerLValue(
CGF.GetAddrOfLocalVar(PartIDVar),
PartIDVar->getType()->castAs<PointerType>());
CGF.EmitStoreOfScalar(CGF.Builder.getInt32(UntiedSwitch->getNumCases()),
@@ -188,14 +190,14 @@ public:
assert(ThreadIDVar != nullptr && "No ThreadID in OpenMP region.");
}
- /// \brief Get a variable or parameter for storing global thread id
+ /// Get a variable or parameter for storing global thread id
/// inside OpenMP construct.
const VarDecl *getThreadIDVariable() const override { return ThreadIDVar; }
- /// \brief Get an LValue for the current ThreadID variable.
+ /// Get an LValue for the current ThreadID variable.
LValue getThreadIDVariableLValue(CodeGenFunction &CGF) override;
- /// \brief Get the name of the capture helper.
+ /// Get the name of the capture helper.
StringRef getHelperName() const override { return ".omp_outlined."; }
void emitUntiedSwitch(CodeGenFunction &CGF) override {
@@ -209,14 +211,14 @@ public:
}
private:
- /// \brief A variable or parameter storing global thread id for OpenMP
+ /// A variable or parameter storing global thread id for OpenMP
/// constructs.
const VarDecl *ThreadIDVar;
/// Action for emitting code for untied tasks.
const UntiedTaskActionTy &Action;
};
-/// \brief API for inlined captured statement code generation in OpenMP
+/// API for inlined captured statement code generation in OpenMP
/// constructs.
class CGOpenMPInlinedRegionInfo : public CGOpenMPRegionInfo {
public:
@@ -227,7 +229,7 @@ public:
OldCSI(OldCSI),
OuterRegionInfo(dyn_cast_or_null<CGOpenMPRegionInfo>(OldCSI)) {}
- // \brief Retrieve the value of the context parameter.
+ // Retrieve the value of the context parameter.
llvm::Value *getContextValue() const override {
if (OuterRegionInfo)
return OuterRegionInfo->getContextValue();
@@ -242,7 +244,7 @@ public:
llvm_unreachable("No context value for inlined OpenMP region");
}
- /// \brief Lookup the captured field decl for a variable.
+ /// Lookup the captured field decl for a variable.
const FieldDecl *lookup(const VarDecl *VD) const override {
if (OuterRegionInfo)
return OuterRegionInfo->lookup(VD);
@@ -257,7 +259,7 @@ public:
return nullptr;
}
- /// \brief Get a variable or parameter for storing global thread id
+ /// Get a variable or parameter for storing global thread id
/// inside OpenMP construct.
const VarDecl *getThreadIDVariable() const override {
if (OuterRegionInfo)
@@ -265,14 +267,14 @@ public:
return nullptr;
}
- /// \brief Get an LValue for the current ThreadID variable.
+ /// Get an LValue for the current ThreadID variable.
LValue getThreadIDVariableLValue(CodeGenFunction &CGF) override {
if (OuterRegionInfo)
return OuterRegionInfo->getThreadIDVariableLValue(CGF);
llvm_unreachable("No LValue for inlined OpenMP construct");
}
- /// \brief Get the name of the capture helper.
+ /// Get the name of the capture helper.
StringRef getHelperName() const override {
if (auto *OuterRegionInfo = getOldCSI())
return OuterRegionInfo->getHelperName();
@@ -294,12 +296,12 @@ public:
~CGOpenMPInlinedRegionInfo() override = default;
private:
- /// \brief CodeGen info about outer OpenMP region.
+ /// CodeGen info about outer OpenMP region.
CodeGenFunction::CGCapturedStmtInfo *OldCSI;
CGOpenMPRegionInfo *OuterRegionInfo;
};
-/// \brief API for captured statement code generation in OpenMP target
+/// API for captured statement code generation in OpenMP target
/// constructs. For this captures, implicit parameters are used instead of the
/// captured fields. The name of the target region has to be unique in a given
/// application so it is provided by the client, because only the client has
@@ -312,11 +314,11 @@ public:
/*HasCancel=*/false),
HelperName(HelperName) {}
- /// \brief This is unused for target regions because each starts executing
+ /// This is unused for target regions because each starts executing
/// with a single thread.
const VarDecl *getThreadIDVariable() const override { return nullptr; }
- /// \brief Get the name of the capture helper.
+ /// Get the name of the capture helper.
StringRef getHelperName() const override { return HelperName; }
static bool classof(const CGCapturedStmtInfo *Info) {
@@ -331,7 +333,7 @@ private:
static void EmptyCodeGen(CodeGenFunction &, PrePostActionTy &) {
llvm_unreachable("No codegen for expressions");
}
-/// \brief API for generation of expressions captured in a innermost OpenMP
+/// API for generation of expressions captured in a innermost OpenMP
/// region.
class CGOpenMPInnerExprInfo final : public CGOpenMPInlinedRegionInfo {
public:
@@ -343,7 +345,7 @@ public:
// Make sure the globals captured in the provided statement are local by
// using the privatization logic. We assume the same variable is not
// captured more than once.
- for (auto &C : CS.captures()) {
+ for (const auto &C : CS.captures()) {
if (!C.capturesVariable() && !C.capturesVariableByCopy())
continue;
@@ -354,33 +356,32 @@ public:
DeclRefExpr DRE(const_cast<VarDecl *>(VD),
/*RefersToEnclosingVariableOrCapture=*/false,
VD->getType().getNonReferenceType(), VK_LValue,
- SourceLocation());
- PrivScope.addPrivate(VD, [&CGF, &DRE]() -> Address {
- return CGF.EmitLValue(&DRE).getAddress();
- });
+ C.getLocation());
+ PrivScope.addPrivate(
+ VD, [&CGF, &DRE]() { return CGF.EmitLValue(&DRE).getAddress(); });
}
(void)PrivScope.Privatize();
}
- /// \brief Lookup the captured field decl for a variable.
+ /// Lookup the captured field decl for a variable.
const FieldDecl *lookup(const VarDecl *VD) const override {
- if (auto *FD = CGOpenMPInlinedRegionInfo::lookup(VD))
+ if (const FieldDecl *FD = CGOpenMPInlinedRegionInfo::lookup(VD))
return FD;
return nullptr;
}
- /// \brief Emit the captured statement body.
+ /// Emit the captured statement body.
void EmitBody(CodeGenFunction &CGF, const Stmt *S) override {
llvm_unreachable("No body for expressions");
}
- /// \brief Get a variable or parameter for storing global thread id
+ /// Get a variable or parameter for storing global thread id
/// inside OpenMP construct.
const VarDecl *getThreadIDVariable() const override {
llvm_unreachable("No thread id for expressions");
}
- /// \brief Get the name of the capture helper.
+ /// Get the name of the capture helper.
StringRef getHelperName() const override {
llvm_unreachable("No helper name for expressions");
}
@@ -392,14 +393,15 @@ private:
CodeGenFunction::OMPPrivateScope PrivScope;
};
-/// \brief RAII for emitting code of OpenMP constructs.
+/// RAII for emitting code of OpenMP constructs.
class InlinedOpenMPRegionRAII {
CodeGenFunction &CGF;
llvm::DenseMap<const VarDecl *, FieldDecl *> LambdaCaptureFields;
FieldDecl *LambdaThisCaptureField = nullptr;
+ const CodeGen::CGBlockInfo *BlockInfo = nullptr;
public:
- /// \brief Constructs region for combined constructs.
+ /// Constructs region for combined constructs.
/// \param CodeGen Code generation sequence for combined directives. Includes
/// a list of functions used for code generation of implicitly inlined
/// regions.
@@ -412,6 +414,8 @@ public:
std::swap(CGF.LambdaCaptureFields, LambdaCaptureFields);
LambdaThisCaptureField = CGF.LambdaThisCaptureField;
CGF.LambdaThisCaptureField = nullptr;
+ BlockInfo = CGF.BlockInfo;
+ CGF.BlockInfo = nullptr;
}
~InlinedOpenMPRegionRAII() {
@@ -422,28 +426,29 @@ public:
CGF.CapturedStmtInfo = OldCSI;
std::swap(CGF.LambdaCaptureFields, LambdaCaptureFields);
CGF.LambdaThisCaptureField = LambdaThisCaptureField;
+ CGF.BlockInfo = BlockInfo;
}
};
-/// \brief Values for bit flags used in the ident_t to describe the fields.
+/// Values for bit flags used in the ident_t to describe the fields.
/// All enumeric elements are named and described in accordance with the code
/// from http://llvm.org/svn/llvm-project/openmp/trunk/runtime/src/kmp.h
enum OpenMPLocationFlags : unsigned {
- /// \brief Use trampoline for internal microtask.
+ /// Use trampoline for internal microtask.
OMP_IDENT_IMD = 0x01,
- /// \brief Use c-style ident structure.
+ /// Use c-style ident structure.
OMP_IDENT_KMPC = 0x02,
- /// \brief Atomic reduction option for kmpc_reduce.
+ /// Atomic reduction option for kmpc_reduce.
OMP_ATOMIC_REDUCE = 0x10,
- /// \brief Explicit 'barrier' directive.
+ /// Explicit 'barrier' directive.
OMP_IDENT_BARRIER_EXPL = 0x20,
- /// \brief Implicit barrier in code.
+ /// Implicit barrier in code.
OMP_IDENT_BARRIER_IMPL = 0x40,
- /// \brief Implicit barrier in 'for' directive.
+ /// Implicit barrier in 'for' directive.
OMP_IDENT_BARRIER_IMPL_FOR = 0x40,
- /// \brief Implicit barrier in 'sections' directive.
+ /// Implicit barrier in 'sections' directive.
OMP_IDENT_BARRIER_IMPL_SECTIONS = 0xC0,
- /// \brief Implicit barrier in 'single' directive.
+ /// Implicit barrier in 'single' directive.
OMP_IDENT_BARRIER_IMPL_SINGLE = 0x140,
/// Call of __kmp_for_static_init for static loop.
OMP_IDENT_WORK_LOOP = 0x200,
@@ -454,7 +459,7 @@ enum OpenMPLocationFlags : unsigned {
LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/OMP_IDENT_WORK_DISTRIBUTE)
};
-/// \brief Describes ident structure that describes a source location.
+/// Describes ident structure that describes a source location.
/// All descriptions are taken from
/// http://llvm.org/svn/llvm-project/openmp/trunk/runtime/src/kmp.h
/// Original structure:
@@ -481,24 +486,24 @@ enum OpenMPLocationFlags : unsigned {
/// */
/// } ident_t;
enum IdentFieldIndex {
- /// \brief might be used in Fortran
+ /// might be used in Fortran
IdentField_Reserved_1,
- /// \brief OMP_IDENT_xxx flags; OMP_IDENT_KMPC identifies this union member.
+ /// OMP_IDENT_xxx flags; OMP_IDENT_KMPC identifies this union member.
IdentField_Flags,
- /// \brief Not really used in Fortran any more
+ /// Not really used in Fortran any more
IdentField_Reserved_2,
- /// \brief Source[4] in Fortran, do not use for C++
+ /// Source[4] in Fortran, do not use for C++
IdentField_Reserved_3,
- /// \brief String describing the source location. The string is composed of
+ /// String describing the source location. The string is composed of
/// semi-colon separated fields which describe the source file, the function
/// and a pair of line numbers that delimit the construct.
IdentField_PSource
};
-/// \brief Schedule types for 'omp for' loops (these enumerators are taken from
+/// Schedule types for 'omp for' loops (these enumerators are taken from
/// the enum sched_type in kmp.h).
enum OpenMPSchedType {
- /// \brief Lower bound for default (unordered) versions.
+ /// Lower bound for default (unordered) versions.
OMP_sch_lower = 32,
OMP_sch_static_chunked = 33,
OMP_sch_static = 34,
@@ -508,7 +513,7 @@ enum OpenMPSchedType {
OMP_sch_auto = 38,
/// static with chunk adjustment (e.g., simd)
OMP_sch_static_balanced_chunked = 45,
- /// \brief Lower bound for 'ordered' versions.
+ /// Lower bound for 'ordered' versions.
OMP_ord_lower = 64,
OMP_ord_static_chunked = 65,
OMP_ord_static = 66,
@@ -517,7 +522,7 @@ enum OpenMPSchedType {
OMP_ord_runtime = 69,
OMP_ord_auto = 70,
OMP_sch_default = OMP_sch_static,
- /// \brief dist_schedule types
+ /// dist_schedule types
OMP_dist_sch_static_chunked = 91,
OMP_dist_sch_static = 92,
/// Support for OpenMP 4.5 monotonic and nonmonotonic schedule modifiers.
@@ -528,13 +533,13 @@ enum OpenMPSchedType {
};
enum OpenMPRTLFunction {
- /// \brief Call to void __kmpc_fork_call(ident_t *loc, kmp_int32 argc,
+ /// Call to void __kmpc_fork_call(ident_t *loc, kmp_int32 argc,
/// kmpc_micro microtask, ...);
OMPRTL__kmpc_fork_call,
- /// \brief Call to void *__kmpc_threadprivate_cached(ident_t *loc,
+ /// Call to void *__kmpc_threadprivate_cached(ident_t *loc,
/// kmp_int32 global_tid, void *data, size_t size, void ***cache);
OMPRTL__kmpc_threadprivate_cached,
- /// \brief Call to void __kmpc_threadprivate_register( ident_t *,
+ /// Call to void __kmpc_threadprivate_register( ident_t *,
/// void *data, kmpc_ctor ctor, kmpc_cctor cctor, kmpc_dtor dtor);
OMPRTL__kmpc_threadprivate_register,
// Call to __kmpc_int32 kmpc_global_thread_num(ident_t *loc);
@@ -742,11 +747,11 @@ void RegionCodeGenTy::operator()(CodeGenFunction &CGF) const {
/// UDR decl used for reduction.
static const OMPDeclareReductionDecl *
getReductionInit(const Expr *ReductionOp) {
- if (auto *CE = dyn_cast<CallExpr>(ReductionOp))
- if (auto *OVE = dyn_cast<OpaqueValueExpr>(CE->getCallee()))
- if (auto *DRE =
+ if (const auto *CE = dyn_cast<CallExpr>(ReductionOp))
+ if (const auto *OVE = dyn_cast<OpaqueValueExpr>(CE->getCallee()))
+ if (const auto *DRE =
dyn_cast<DeclRefExpr>(OVE->getSourceExpr()->IgnoreImpCasts()))
- if (auto *DRD = dyn_cast<OMPDeclareReductionDecl>(DRE->getDecl()))
+ if (const auto *DRD = dyn_cast<OMPDeclareReductionDecl>(DRE->getDecl()))
return DRD;
return nullptr;
}
@@ -759,48 +764,51 @@ static void emitInitWithReductionInitializer(CodeGenFunction &CGF,
if (DRD->getInitializer()) {
std::pair<llvm::Function *, llvm::Function *> Reduction =
CGF.CGM.getOpenMPRuntime().getUserDefinedReduction(DRD);
- auto *CE = cast<CallExpr>(InitOp);
- auto *OVE = cast<OpaqueValueExpr>(CE->getCallee());
+ const auto *CE = cast<CallExpr>(InitOp);
+ const auto *OVE = cast<OpaqueValueExpr>(CE->getCallee());
const Expr *LHS = CE->getArg(/*Arg=*/0)->IgnoreParenImpCasts();
const Expr *RHS = CE->getArg(/*Arg=*/1)->IgnoreParenImpCasts();
- auto *LHSDRE = cast<DeclRefExpr>(cast<UnaryOperator>(LHS)->getSubExpr());
- auto *RHSDRE = cast<DeclRefExpr>(cast<UnaryOperator>(RHS)->getSubExpr());
+ const auto *LHSDRE =
+ cast<DeclRefExpr>(cast<UnaryOperator>(LHS)->getSubExpr());
+ const auto *RHSDRE =
+ cast<DeclRefExpr>(cast<UnaryOperator>(RHS)->getSubExpr());
CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
PrivateScope.addPrivate(cast<VarDecl>(LHSDRE->getDecl()),
- [=]() -> Address { return Private; });
+ [=]() { return Private; });
PrivateScope.addPrivate(cast<VarDecl>(RHSDRE->getDecl()),
- [=]() -> Address { return Original; });
+ [=]() { return Original; });
(void)PrivateScope.Privatize();
RValue Func = RValue::get(Reduction.second);
CodeGenFunction::OpaqueValueMapping Map(CGF, OVE, Func);
CGF.EmitIgnoredExpr(InitOp);
} else {
llvm::Constant *Init = CGF.CGM.EmitNullConstant(Ty);
+ std::string Name = CGF.CGM.getOpenMPRuntime().getName({"init"});
auto *GV = new llvm::GlobalVariable(
CGF.CGM.getModule(), Init->getType(), /*isConstant=*/true,
- llvm::GlobalValue::PrivateLinkage, Init, ".init");
+ llvm::GlobalValue::PrivateLinkage, Init, Name);
LValue LV = CGF.MakeNaturalAlignAddrLValue(GV, Ty);
RValue InitRVal;
switch (CGF.getEvaluationKind(Ty)) {
case TEK_Scalar:
- InitRVal = CGF.EmitLoadOfLValue(LV, SourceLocation());
+ InitRVal = CGF.EmitLoadOfLValue(LV, DRD->getLocation());
break;
case TEK_Complex:
InitRVal =
- RValue::getComplex(CGF.EmitLoadOfComplex(LV, SourceLocation()));
+ RValue::getComplex(CGF.EmitLoadOfComplex(LV, DRD->getLocation()));
break;
case TEK_Aggregate:
InitRVal = RValue::getAggregate(LV.getAddress());
break;
}
- OpaqueValueExpr OVE(SourceLocation(), Ty, VK_RValue);
+ OpaqueValueExpr OVE(DRD->getLocation(), Ty, VK_RValue);
CodeGenFunction::OpaqueValueMapping OpaqueMap(CGF, &OVE, InitRVal);
CGF.EmitAnyExprToMem(&OVE, Private, Ty.getQualifiers(),
/*IsInitializer=*/false);
}
}
-/// \brief Emit initialization of arrays of complex types.
+/// Emit initialization of arrays of complex types.
/// \param DestAddr Address of the array.
/// \param Type Type of array.
/// \param Init Initial expression of array.
@@ -814,8 +822,8 @@ static void EmitOMPAggregateInit(CodeGenFunction &CGF, Address DestAddr,
QualType ElementTy;
// Drill down to the base element type on both arrays.
- auto ArrayTy = Type->getAsArrayTypeUnsafe();
- auto NumElements = CGF.emitArrayLength(ArrayTy, ElementTy, DestAddr);
+ const ArrayType *ArrayTy = Type->getAsArrayTypeUnsafe();
+ llvm::Value *NumElements = CGF.emitArrayLength(ArrayTy, ElementTy, DestAddr);
DestAddr =
CGF.Builder.CreateElementBitCast(DestAddr, DestAddr.getElementType());
if (DRD)
@@ -825,18 +833,18 @@ static void EmitOMPAggregateInit(CodeGenFunction &CGF, Address DestAddr,
llvm::Value *SrcBegin = nullptr;
if (DRD)
SrcBegin = SrcAddr.getPointer();
- auto DestBegin = DestAddr.getPointer();
+ llvm::Value *DestBegin = DestAddr.getPointer();
// Cast from pointer to array type to pointer to single element.
- auto DestEnd = CGF.Builder.CreateGEP(DestBegin, NumElements);
+ llvm::Value *DestEnd = CGF.Builder.CreateGEP(DestBegin, NumElements);
// The basic structure here is a while-do loop.
- auto BodyBB = CGF.createBasicBlock("omp.arrayinit.body");
- auto DoneBB = CGF.createBasicBlock("omp.arrayinit.done");
- auto IsEmpty =
+ llvm::BasicBlock *BodyBB = CGF.createBasicBlock("omp.arrayinit.body");
+ llvm::BasicBlock *DoneBB = CGF.createBasicBlock("omp.arrayinit.done");
+ llvm::Value *IsEmpty =
CGF.Builder.CreateICmpEQ(DestBegin, DestEnd, "omp.arrayinit.isempty");
CGF.Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
// Enter the loop body, making that address the current address.
- auto EntryBB = CGF.Builder.GetInsertBlock();
+ llvm::BasicBlock *EntryBB = CGF.Builder.GetInsertBlock();
CGF.EmitBlock(BodyBB);
CharUnits ElementSize = CGF.getContext().getTypeSizeInChars(ElementTy);
@@ -871,16 +879,16 @@ static void EmitOMPAggregateInit(CodeGenFunction &CGF, Address DestAddr,
if (DRD) {
// Shift the address forward by one element.
- auto SrcElementNext = CGF.Builder.CreateConstGEP1_32(
+ llvm::Value *SrcElementNext = CGF.Builder.CreateConstGEP1_32(
SrcElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element");
SrcElementPHI->addIncoming(SrcElementNext, CGF.Builder.GetInsertBlock());
}
// Shift the address forward by one element.
- auto DestElementNext = CGF.Builder.CreateConstGEP1_32(
+ llvm::Value *DestElementNext = CGF.Builder.CreateConstGEP1_32(
DestElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element");
// Check whether we've reached the end.
- auto Done =
+ llvm::Value *Done =
CGF.Builder.CreateICmpEQ(DestElementNext, DestEnd, "omp.arraycpy.done");
CGF.Builder.CreateCondBr(Done, DoneBB, BodyBB);
DestElementPHI->addIncoming(DestElementNext, CGF.Builder.GetInsertBlock());
@@ -889,6 +897,25 @@ static void EmitOMPAggregateInit(CodeGenFunction &CGF, Address DestAddr,
CGF.EmitBlock(DoneBB, /*IsFinished=*/true);
}
+static llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy>
+isDeclareTargetDeclaration(const ValueDecl *VD) {
+ for (const Decl *D : VD->redecls()) {
+ if (!D->hasAttrs())
+ continue;
+ if (const auto *Attr = D->getAttr<OMPDeclareTargetDeclAttr>())
+ return Attr->getMapType();
+ }
+ if (const auto *V = dyn_cast<VarDecl>(VD)) {
+ if (const VarDecl *TD = V->getTemplateInstantiationPattern())
+ return isDeclareTargetDeclaration(TD);
+ } else if (const auto *FD = dyn_cast<FunctionDecl>(VD)) {
+ if (const auto *TD = FD->getTemplateInstantiationPattern())
+ return isDeclareTargetDeclaration(TD);
+ }
+
+ return llvm::None;
+}
+
LValue ReductionCodeGen::emitSharedLValue(CodeGenFunction &CGF, const Expr *E) {
return CGF.EmitOMPSharedLValue(E);
}
@@ -906,7 +933,7 @@ void ReductionCodeGen::emitAggregateInitialization(
// Emit VarDecl with copy init for arrays.
// Get the address of the original variable captured in current
// captured region.
- auto *PrivateVD =
+ const auto *PrivateVD =
cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
bool EmitDeclareReductionInit =
DRD && (DRD->getInitializer() || !PrivateVD->hasInit());
@@ -926,7 +953,7 @@ ReductionCodeGen::ReductionCodeGen(ArrayRef<const Expr *> Shareds,
BaseDecls.reserve(Shareds.size());
auto IPriv = Privates.begin();
auto IRed = ReductionOps.begin();
- for (const auto *Ref : Shareds) {
+ for (const Expr *Ref : Shareds) {
ClausesData.emplace_back(Ref, *IPriv, *IRed);
std::advance(IPriv, 1);
std::advance(IRed, 1);
@@ -942,7 +969,7 @@ void ReductionCodeGen::emitSharedLValue(CodeGenFunction &CGF, unsigned N) {
}
void ReductionCodeGen::emitAggregateType(CodeGenFunction &CGF, unsigned N) {
- auto *PrivateVD =
+ const auto *PrivateVD =
cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
QualType PrivateType = PrivateVD->getType();
bool AsArraySection = isa<OMPArraySectionExpr>(ClausesData[N].Ref);
@@ -955,7 +982,7 @@ void ReductionCodeGen::emitAggregateType(CodeGenFunction &CGF, unsigned N) {
}
llvm::Value *Size;
llvm::Value *SizeInChars;
- llvm::Type *ElemType =
+ auto *ElemType =
cast<llvm::PointerType>(SharedAddresses[N].first.getPointer()->getType())
->getElementType();
auto *ElemSizeOf = llvm::ConstantExpr::getSizeOf(ElemType);
@@ -981,7 +1008,7 @@ void ReductionCodeGen::emitAggregateType(CodeGenFunction &CGF, unsigned N) {
void ReductionCodeGen::emitAggregateType(CodeGenFunction &CGF, unsigned N,
llvm::Value *Size) {
- auto *PrivateVD =
+ const auto *PrivateVD =
cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
QualType PrivateType = PrivateVD->getType();
if (!PrivateType->isVariablyModifiedType()) {
@@ -1002,9 +1029,10 @@ void ReductionCodeGen::emitInitialization(
CodeGenFunction &CGF, unsigned N, Address PrivateAddr, LValue SharedLVal,
llvm::function_ref<bool(CodeGenFunction &)> DefaultInit) {
assert(SharedAddresses.size() > N && "No variable was generated");
- auto *PrivateVD =
+ const auto *PrivateVD =
cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
- auto *DRD = getReductionInit(ClausesData[N].ReductionOp);
+ const OMPDeclareReductionDecl *DRD =
+ getReductionInit(ClausesData[N].ReductionOp);
QualType PrivateType = PrivateVD->getType();
PrivateAddr = CGF.Builder.CreateElementBitCast(
PrivateAddr, CGF.ConvertTypeForMem(PrivateType));
@@ -1029,7 +1057,7 @@ void ReductionCodeGen::emitInitialization(
}
bool ReductionCodeGen::needCleanups(unsigned N) {
- auto *PrivateVD =
+ const auto *PrivateVD =
cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
QualType PrivateType = PrivateVD->getType();
QualType::DestructionKind DTorKind = PrivateType.isDestructedType();
@@ -1038,7 +1066,7 @@ bool ReductionCodeGen::needCleanups(unsigned N) {
void ReductionCodeGen::emitCleanups(CodeGenFunction &CGF, unsigned N,
Address PrivateAddr) {
- auto *PrivateVD =
+ const auto *PrivateVD =
cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
QualType PrivateType = PrivateVD->getType();
QualType::DestructionKind DTorKind = PrivateType.isDestructedType();
@@ -1054,9 +1082,9 @@ static LValue loadToBegin(CodeGenFunction &CGF, QualType BaseTy, QualType ElTy,
BaseTy = BaseTy.getNonReferenceType();
while ((BaseTy->isPointerType() || BaseTy->isReferenceType()) &&
!CGF.getContext().hasSameType(BaseTy, ElTy)) {
- if (auto *PtrTy = BaseTy->getAs<PointerType>())
+ if (const auto *PtrTy = BaseTy->getAs<PointerType>()) {
BaseLV = CGF.EmitLoadOfPointerLValue(BaseLV.getAddress(), PtrTy);
- else {
+ } else {
LValue RefLVal = CGF.MakeAddrLValue(BaseLV.getAddress(), BaseTy);
BaseLV = CGF.EmitLoadOfReferenceLValue(RefLVal);
}
@@ -1097,28 +1125,32 @@ static Address castToBase(CodeGenFunction &CGF, QualType BaseTy, QualType ElTy,
return Address(Addr, BaseLVAlignment);
}
-Address ReductionCodeGen::adjustPrivateAddress(CodeGenFunction &CGF, unsigned N,
- Address PrivateAddr) {
- const DeclRefExpr *DE;
+static const VarDecl *getBaseDecl(const Expr *Ref, const DeclRefExpr *&DE) {
const VarDecl *OrigVD = nullptr;
- if (auto *OASE = dyn_cast<OMPArraySectionExpr>(ClausesData[N].Ref)) {
- auto *Base = OASE->getBase()->IgnoreParenImpCasts();
- while (auto *TempOASE = dyn_cast<OMPArraySectionExpr>(Base))
+ if (const auto *OASE = dyn_cast<OMPArraySectionExpr>(Ref)) {
+ const Expr *Base = OASE->getBase()->IgnoreParenImpCasts();
+ while (const auto *TempOASE = dyn_cast<OMPArraySectionExpr>(Base))
Base = TempOASE->getBase()->IgnoreParenImpCasts();
- while (auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
+ while (const auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
Base = TempASE->getBase()->IgnoreParenImpCasts();
DE = cast<DeclRefExpr>(Base);
OrigVD = cast<VarDecl>(DE->getDecl());
- } else if (auto *ASE = dyn_cast<ArraySubscriptExpr>(ClausesData[N].Ref)) {
- auto *Base = ASE->getBase()->IgnoreParenImpCasts();
- while (auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
+ } else if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Ref)) {
+ const Expr *Base = ASE->getBase()->IgnoreParenImpCasts();
+ while (const auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
Base = TempASE->getBase()->IgnoreParenImpCasts();
DE = cast<DeclRefExpr>(Base);
OrigVD = cast<VarDecl>(DE->getDecl());
}
- if (OrigVD) {
+ return OrigVD;
+}
+
+Address ReductionCodeGen::adjustPrivateAddress(CodeGenFunction &CGF, unsigned N,
+ Address PrivateAddr) {
+ const DeclRefExpr *DE;
+ if (const VarDecl *OrigVD = ::getBaseDecl(ClausesData[N].Ref, DE)) {
BaseDecls.emplace_back(OrigVD);
- auto OriginalBaseLValue = CGF.EmitLValue(DE);
+ LValue OriginalBaseLValue = CGF.EmitLValue(DE);
LValue BaseLValue =
loadToBegin(CGF, OrigVD->getType(), SharedAddresses[N].first.getType(),
OriginalBaseLValue);
@@ -1140,7 +1172,8 @@ Address ReductionCodeGen::adjustPrivateAddress(CodeGenFunction &CGF, unsigned N,
}
bool ReductionCodeGen::usesReductionInitializer(unsigned N) const {
- auto *DRD = getReductionInit(ClausesData[N].ReductionOp);
+ const OMPDeclareReductionDecl *DRD =
+ getReductionInit(ClausesData[N].ReductionOp);
return DRD && DRD->getInitializer();
}
@@ -1170,12 +1203,38 @@ LValue CGOpenMPTaskOutlinedRegionInfo::getThreadIDVariableLValue(
AlignmentSource::Decl);
}
-CGOpenMPRuntime::CGOpenMPRuntime(CodeGenModule &CGM)
- : CGM(CGM), OffloadEntriesInfoManager(CGM) {
- IdentTy = llvm::StructType::create(
- "ident_t", CGM.Int32Ty /* reserved_1 */, CGM.Int32Ty /* flags */,
- CGM.Int32Ty /* reserved_2 */, CGM.Int32Ty /* reserved_3 */,
- CGM.Int8PtrTy /* psource */);
+static FieldDecl *addFieldToRecordDecl(ASTContext &C, DeclContext *DC,
+ QualType FieldTy) {
+ auto *Field = FieldDecl::Create(
+ C, DC, SourceLocation(), SourceLocation(), /*Id=*/nullptr, FieldTy,
+ C.getTrivialTypeSourceInfo(FieldTy, SourceLocation()),
+ /*BW=*/nullptr, /*Mutable=*/false, /*InitStyle=*/ICIS_NoInit);
+ Field->setAccess(AS_public);
+ DC->addDecl(Field);
+ return Field;
+}
+
+CGOpenMPRuntime::CGOpenMPRuntime(CodeGenModule &CGM, StringRef FirstSeparator,
+ StringRef Separator)
+ : CGM(CGM), FirstSeparator(FirstSeparator), Separator(Separator),
+ OffloadEntriesInfoManager(CGM) {
+ ASTContext &C = CGM.getContext();
+ RecordDecl *RD = C.buildImplicitRecord("ident_t");
+ QualType KmpInt32Ty = C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
+ RD->startDefinition();
+ // reserved_1
+ addFieldToRecordDecl(C, RD, KmpInt32Ty);
+ // flags
+ addFieldToRecordDecl(C, RD, KmpInt32Ty);
+ // reserved_2
+ addFieldToRecordDecl(C, RD, KmpInt32Ty);
+ // reserved_3
+ addFieldToRecordDecl(C, RD, KmpInt32Ty);
+ // psource
+ addFieldToRecordDecl(C, RD, C.VoidPtrTy);
+ RD->completeDefinition();
+ IdentQTy = C.getRecordType(RD);
+ IdentTy = CGM.getTypes().ConvertRecordDeclType(RD);
KmpCriticalNameTy = llvm::ArrayType::get(CGM.Int32Ty, /*NumElements*/ 8);
loadOffloadInfoMetadata();
@@ -1185,12 +1244,23 @@ void CGOpenMPRuntime::clear() {
InternalVars.clear();
}
+std::string CGOpenMPRuntime::getName(ArrayRef<StringRef> Parts) const {
+ SmallString<128> Buffer;
+ llvm::raw_svector_ostream OS(Buffer);
+ StringRef Sep = FirstSeparator;
+ for (StringRef Part : Parts) {
+ OS << Sep << Part;
+ Sep = Separator;
+ }
+ return OS.str();
+}
+
static llvm::Function *
emitCombinerOrInitializer(CodeGenModule &CGM, QualType Ty,
const Expr *CombinerInitializer, const VarDecl *In,
const VarDecl *Out, bool IsCombiner) {
// void .omp_combiner.(Ty *in, Ty *out);
- auto &C = CGM.getContext();
+ ASTContext &C = CGM.getContext();
QualType PtrTy = C.getPointerType(Ty).withRestrict();
FunctionArgList Args;
ImplicitParamDecl OmpOutParm(C, /*DC=*/nullptr, Out->getLocation(),
@@ -1199,28 +1269,30 @@ emitCombinerOrInitializer(CodeGenModule &CGM, QualType Ty,
/*Id=*/nullptr, PtrTy, ImplicitParamDecl::Other);
Args.push_back(&OmpOutParm);
Args.push_back(&OmpInParm);
- auto &FnInfo =
+ const CGFunctionInfo &FnInfo =
CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
- auto *FnTy = CGM.getTypes().GetFunctionType(FnInfo);
- auto *Fn = llvm::Function::Create(
- FnTy, llvm::GlobalValue::InternalLinkage,
- IsCombiner ? ".omp_combiner." : ".omp_initializer.", &CGM.getModule());
- CGM.SetInternalFunctionAttributes(/*D=*/nullptr, Fn, FnInfo);
+ llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FnInfo);
+ std::string Name = CGM.getOpenMPRuntime().getName(
+ {IsCombiner ? "omp_combiner" : "omp_initializer", ""});
+ auto *Fn = llvm::Function::Create(FnTy, llvm::GlobalValue::InternalLinkage,
+ Name, &CGM.getModule());
+ CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FnInfo);
Fn->removeFnAttr(llvm::Attribute::NoInline);
Fn->removeFnAttr(llvm::Attribute::OptimizeNone);
Fn->addFnAttr(llvm::Attribute::AlwaysInline);
CodeGenFunction CGF(CGM);
// Map "T omp_in;" variable to "*omp_in_parm" value in all expressions.
// Map "T omp_out;" variable to "*omp_out_parm" value in all expressions.
- CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args);
+ CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args, In->getLocation(),
+ Out->getLocation());
CodeGenFunction::OMPPrivateScope Scope(CGF);
Address AddrIn = CGF.GetAddrOfLocalVar(&OmpInParm);
- Scope.addPrivate(In, [&CGF, AddrIn, PtrTy]() -> Address {
+ Scope.addPrivate(In, [&CGF, AddrIn, PtrTy]() {
return CGF.EmitLoadOfPointerLValue(AddrIn, PtrTy->castAs<PointerType>())
.getAddress();
});
Address AddrOut = CGF.GetAddrOfLocalVar(&OmpOutParm);
- Scope.addPrivate(Out, [&CGF, AddrOut, PtrTy]() -> Address {
+ Scope.addPrivate(Out, [&CGF, AddrOut, PtrTy]() {
return CGF.EmitLoadOfPointerLValue(AddrOut, PtrTy->castAs<PointerType>())
.getAddress();
});
@@ -1242,7 +1314,7 @@ void CGOpenMPRuntime::emitUserDefinedReduction(
CodeGenFunction *CGF, const OMPDeclareReductionDecl *D) {
if (UDRMap.count(D) > 0)
return;
- auto &C = CGM.getContext();
+ ASTContext &C = CGM.getContext();
if (!In || !Out) {
In = &C.Idents.get("omp_in");
Out = &C.Idents.get("omp_out");
@@ -1252,7 +1324,7 @@ void CGOpenMPRuntime::emitUserDefinedReduction(
cast<VarDecl>(D->lookup(Out).front()),
/*IsCombiner=*/true);
llvm::Function *Initializer = nullptr;
- if (auto *Init = D->getInitializer()) {
+ if (const Expr *Init = D->getInitializer()) {
if (!Priv || !Orig) {
Priv = &C.Idents.get("omp_priv");
Orig = &C.Idents.get("omp_orig");
@@ -1265,7 +1337,7 @@ void CGOpenMPRuntime::emitUserDefinedReduction(
cast<VarDecl>(D->lookup(Priv).front()),
/*IsCombiner=*/false);
}
- UDRMap.insert(std::make_pair(D, std::make_pair(Combiner, Initializer)));
+ UDRMap.try_emplace(D, Combiner, Initializer);
if (CGF) {
auto &Decls = FunctionUDRMap.FindAndConstruct(CGF->CurFn);
Decls.second.push_back(D);
@@ -1281,25 +1353,6 @@ CGOpenMPRuntime::getUserDefinedReduction(const OMPDeclareReductionDecl *D) {
return UDRMap.lookup(D);
}
-// Layout information for ident_t.
-static CharUnits getIdentAlign(CodeGenModule &CGM) {
- return CGM.getPointerAlign();
-}
-static CharUnits getIdentSize(CodeGenModule &CGM) {
- assert((4 * CGM.getPointerSize()).isMultipleOf(CGM.getPointerAlign()));
- return CharUnits::fromQuantity(16) + CGM.getPointerSize();
-}
-static CharUnits getOffsetOfIdentField(IdentFieldIndex Field) {
- // All the fields except the last are i32, so this works beautifully.
- return unsigned(Field) * CharUnits::fromQuantity(4);
-}
-static Address createIdentFieldGEP(CodeGenFunction &CGF, Address Addr,
- IdentFieldIndex Field,
- const llvm::Twine &Name = "") {
- auto Offset = getOffsetOfIdentField(Field);
- return CGF.Builder.CreateStructGEP(Addr, Field, Offset, Name);
-}
-
static llvm::Value *emitParallelOrTeamsOutlinedFunction(
CodeGenModule &CGM, const OMPExecutableDirective &D, const CapturedStmt *CS,
const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind,
@@ -1308,19 +1361,20 @@ static llvm::Value *emitParallelOrTeamsOutlinedFunction(
"thread id variable must be of type kmp_int32 *");
CodeGenFunction CGF(CGM, true);
bool HasCancel = false;
- if (auto *OPD = dyn_cast<OMPParallelDirective>(&D))
+ if (const auto *OPD = dyn_cast<OMPParallelDirective>(&D))
HasCancel = OPD->hasCancel();
- else if (auto *OPSD = dyn_cast<OMPParallelSectionsDirective>(&D))
+ else if (const auto *OPSD = dyn_cast<OMPParallelSectionsDirective>(&D))
HasCancel = OPSD->hasCancel();
- else if (auto *OPFD = dyn_cast<OMPParallelForDirective>(&D))
+ else if (const auto *OPFD = dyn_cast<OMPParallelForDirective>(&D))
HasCancel = OPFD->hasCancel();
- else if (auto *OPFD = dyn_cast<OMPTargetParallelForDirective>(&D))
+ else if (const auto *OPFD = dyn_cast<OMPTargetParallelForDirective>(&D))
HasCancel = OPFD->hasCancel();
- else if (auto *OPFD = dyn_cast<OMPDistributeParallelForDirective>(&D))
+ else if (const auto *OPFD = dyn_cast<OMPDistributeParallelForDirective>(&D))
HasCancel = OPFD->hasCancel();
- else if (auto *OPFD = dyn_cast<OMPTeamsDistributeParallelForDirective>(&D))
+ else if (const auto *OPFD =
+ dyn_cast<OMPTeamsDistributeParallelForDirective>(&D))
HasCancel = OPFD->hasCancel();
- else if (auto *OPFD =
+ else if (const auto *OPFD =
dyn_cast<OMPTargetTeamsDistributeParallelForDirective>(&D))
HasCancel = OPFD->hasCancel();
CGOpenMPOutlinedRegionInfo CGInfo(*CS, ThreadIDVar, CodeGen, InnermostKind,
@@ -1352,8 +1406,8 @@ llvm::Value *CGOpenMPRuntime::emitTaskOutlinedFunction(
bool Tied, unsigned &NumberOfParts) {
auto &&UntiedCodeGen = [this, &D, TaskTVar](CodeGenFunction &CGF,
PrePostActionTy &) {
- auto *ThreadID = getThreadID(CGF, D.getLocStart());
- auto *UpLoc = emitUpdateLocation(CGF, D.getLocStart());
+ llvm::Value *ThreadID = getThreadID(CGF, D.getLocStart());
+ llvm::Value *UpLoc = emitUpdateLocation(CGF, D.getLocStart());
llvm::Value *TaskArgs[] = {
UpLoc, ThreadID,
CGF.EmitLoadOfPointerLValue(CGF.GetAddrOfLocalVar(TaskTVar),
@@ -1366,21 +1420,69 @@ llvm::Value *CGOpenMPRuntime::emitTaskOutlinedFunction(
CodeGen.setAction(Action);
assert(!ThreadIDVar->getType()->isPointerType() &&
"thread id variable must be of type kmp_int32 for tasks");
- auto *CS = cast<CapturedStmt>(D.getAssociatedStmt());
- auto *TD = dyn_cast<OMPTaskDirective>(&D);
+ const OpenMPDirectiveKind Region =
+ isOpenMPTaskLoopDirective(D.getDirectiveKind()) ? OMPD_taskloop
+ : OMPD_task;
+ const CapturedStmt *CS = D.getCapturedStmt(Region);
+ const auto *TD = dyn_cast<OMPTaskDirective>(&D);
CodeGenFunction CGF(CGM, true);
CGOpenMPTaskOutlinedRegionInfo CGInfo(*CS, ThreadIDVar, CodeGen,
InnermostKind,
TD ? TD->hasCancel() : false, Action);
CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
- auto *Res = CGF.GenerateCapturedStmtFunction(*CS);
+ llvm::Value *Res = CGF.GenerateCapturedStmtFunction(*CS);
if (!Tied)
NumberOfParts = Action.getNumberOfParts();
return Res;
}
+static void buildStructValue(ConstantStructBuilder &Fields, CodeGenModule &CGM,
+ const RecordDecl *RD, const CGRecordLayout &RL,
+ ArrayRef<llvm::Constant *> Data) {
+ llvm::StructType *StructTy = RL.getLLVMType();
+ unsigned PrevIdx = 0;
+ ConstantInitBuilder CIBuilder(CGM);
+ auto DI = Data.begin();
+ for (const FieldDecl *FD : RD->fields()) {
+ unsigned Idx = RL.getLLVMFieldNo(FD);
+ // Fill the alignment.
+ for (unsigned I = PrevIdx; I < Idx; ++I)
+ Fields.add(llvm::Constant::getNullValue(StructTy->getElementType(I)));
+ PrevIdx = Idx + 1;
+ Fields.add(*DI);
+ ++DI;
+ }
+}
+
+template <class... As>
+static llvm::GlobalVariable *
+createConstantGlobalStruct(CodeGenModule &CGM, QualType Ty,
+ ArrayRef<llvm::Constant *> Data, const Twine &Name,
+ As &&... Args) {
+ const auto *RD = cast<RecordDecl>(Ty->getAsTagDecl());
+ const CGRecordLayout &RL = CGM.getTypes().getCGRecordLayout(RD);
+ ConstantInitBuilder CIBuilder(CGM);
+ ConstantStructBuilder Fields = CIBuilder.beginStruct(RL.getLLVMType());
+ buildStructValue(Fields, CGM, RD, RL, Data);
+ return Fields.finishAndCreateGlobal(
+ Name, CGM.getContext().getAlignOfGlobalVarInChars(Ty),
+ /*isConstant=*/true, std::forward<As>(Args)...);
+}
+
+template <typename T>
+static void
+createConstantGlobalStructAndAddToParent(CodeGenModule &CGM, QualType Ty,
+ ArrayRef<llvm::Constant *> Data,
+ T &Parent) {
+ const auto *RD = cast<RecordDecl>(Ty->getAsTagDecl());
+ const CGRecordLayout &RL = CGM.getTypes().getCGRecordLayout(RD);
+ ConstantStructBuilder Fields = Parent.beginStruct(RL.getLLVMType());
+ buildStructValue(Fields, CGM, RD, RL, Data);
+ Fields.finishAndAddTo(Parent);
+}
+
Address CGOpenMPRuntime::getOrCreateDefaultLocation(unsigned Flags) {
- CharUnits Align = getIdentAlign(CGM);
+ CharUnits Align = CGM.getContext().getTypeAlignInChars(IdentQTy);
llvm::Value *Entry = OpenMPDefaultLocMap.lookup(Flags);
if (!Entry) {
if (!DefaultOpenMPPSource) {
@@ -1394,17 +1496,15 @@ Address CGOpenMPRuntime::getOrCreateDefaultLocation(unsigned Flags) {
llvm::ConstantExpr::getBitCast(DefaultOpenMPPSource, CGM.Int8PtrTy);
}
- ConstantInitBuilder builder(CGM);
- auto fields = builder.beginStruct(IdentTy);
- fields.addInt(CGM.Int32Ty, 0);
- fields.addInt(CGM.Int32Ty, Flags);
- fields.addInt(CGM.Int32Ty, 0);
- fields.addInt(CGM.Int32Ty, 0);
- fields.add(DefaultOpenMPPSource);
- auto DefaultOpenMPLocation =
- fields.finishAndCreateGlobal("", Align, /*isConstant*/ true,
- llvm::GlobalValue::PrivateLinkage);
- DefaultOpenMPLocation->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
+ llvm::Constant *Data[] = {llvm::ConstantInt::getNullValue(CGM.Int32Ty),
+ llvm::ConstantInt::get(CGM.Int32Ty, Flags),
+ llvm::ConstantInt::getNullValue(CGM.Int32Ty),
+ llvm::ConstantInt::getNullValue(CGM.Int32Ty),
+ DefaultOpenMPPSource};
+ llvm::GlobalValue *DefaultOpenMPLocation = createConstantGlobalStruct(
+ CGM, IdentQTy, Data, "", llvm::GlobalValue::PrivateLinkage);
+ DefaultOpenMPLocation->setUnnamedAddr(
+ llvm::GlobalValue::UnnamedAddr::Global);
OpenMPDefaultLocMap[Flags] = Entry = DefaultOpenMPLocation;
}
@@ -1422,17 +1522,17 @@ llvm::Value *CGOpenMPRuntime::emitUpdateLocation(CodeGenFunction &CGF,
assert(CGF.CurFn && "No function in current CodeGenFunction.");
+ CharUnits Align = CGM.getContext().getTypeAlignInChars(IdentQTy);
Address LocValue = Address::invalid();
auto I = OpenMPLocThreadIDMap.find(CGF.CurFn);
if (I != OpenMPLocThreadIDMap.end())
- LocValue = Address(I->second.DebugLoc, getIdentAlign(CGF.CGM));
+ LocValue = Address(I->second.DebugLoc, Align);
// OpenMPLocThreadIDMap may have null DebugLoc and non-null ThreadID, if
// GetOpenMPThreadID was called before this routine.
if (!LocValue.isValid()) {
// Generate "ident_t .kmpc_loc.addr;"
- Address AI = CGF.CreateTempAlloca(IdentTy, getIdentAlign(CGF.CGM),
- ".kmpc_loc.addr");
+ Address AI = CGF.CreateMemTemp(IdentQTy, ".kmpc_loc.addr");
auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
Elem.second.DebugLoc = AI.getPointer();
LocValue = AI;
@@ -1440,29 +1540,30 @@ llvm::Value *CGOpenMPRuntime::emitUpdateLocation(CodeGenFunction &CGF,
CGBuilderTy::InsertPointGuard IPG(CGF.Builder);
CGF.Builder.SetInsertPoint(CGF.AllocaInsertPt);
CGF.Builder.CreateMemCpy(LocValue, getOrCreateDefaultLocation(Flags),
- CGM.getSize(getIdentSize(CGF.CGM)));
+ CGF.getTypeSize(IdentQTy));
}
// char **psource = &.kmpc_loc_<flags>.addr.psource;
- Address PSource = createIdentFieldGEP(CGF, LocValue, IdentField_PSource);
+ LValue Base = CGF.MakeAddrLValue(LocValue, IdentQTy);
+ auto Fields = cast<RecordDecl>(IdentQTy->getAsTagDecl())->field_begin();
+ LValue PSource =
+ CGF.EmitLValueForField(Base, *std::next(Fields, IdentField_PSource));
- auto OMPDebugLoc = OpenMPDebugLocMap.lookup(Loc.getRawEncoding());
+ llvm::Value *OMPDebugLoc = OpenMPDebugLocMap.lookup(Loc.getRawEncoding());
if (OMPDebugLoc == nullptr) {
SmallString<128> Buffer2;
llvm::raw_svector_ostream OS2(Buffer2);
// Build debug location
PresumedLoc PLoc = CGF.getContext().getSourceManager().getPresumedLoc(Loc);
OS2 << ";" << PLoc.getFilename() << ";";
- if (const FunctionDecl *FD =
- dyn_cast_or_null<FunctionDecl>(CGF.CurFuncDecl)) {
+ if (const auto *FD = dyn_cast_or_null<FunctionDecl>(CGF.CurFuncDecl))
OS2 << FD->getQualifiedNameAsString();
- }
OS2 << ";" << PLoc.getLine() << ";" << PLoc.getColumn() << ";;";
OMPDebugLoc = CGF.Builder.CreateGlobalStringPtr(OS2.str());
OpenMPDebugLocMap[Loc.getRawEncoding()] = OMPDebugLoc;
}
// *psource = ";<File>;<Function>;<Line>;<Column>;;";
- CGF.Builder.CreateStore(OMPDebugLoc, PSource);
+ CGF.EmitStoreOfScalar(OMPDebugLoc, PSource);
// Our callers always pass this to a runtime function, so for
// convenience, go ahead and return a naked pointer.
@@ -1490,8 +1591,8 @@ llvm::Value *CGOpenMPRuntime::getThreadID(CodeGenFunction &CGF,
dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) {
if (OMPRegionInfo->getThreadIDVariable()) {
// Check if this an outlined function with thread id passed as argument.
- auto LVal = OMPRegionInfo->getThreadIDVariableLValue(CGF);
- ThreadID = CGF.EmitLoadOfLValue(LVal, Loc).getScalarVal();
+ LValue LVal = OMPRegionInfo->getThreadIDVariableLValue(CGF);
+ ThreadID = CGF.EmitLoadOfScalar(LVal, Loc);
// If value loaded in entry block, cache it and use it everywhere in
// function.
if (CGF.Builder.GetInsertBlock() == CGF.AllocaInsertPt->getParent()) {
@@ -1509,7 +1610,7 @@ llvm::Value *CGOpenMPRuntime::getThreadID(CodeGenFunction &CGF,
// function.
CGBuilderTy::InsertPointGuard IPG(CGF.Builder);
CGF.Builder.SetInsertPoint(CGF.AllocaInsertPt);
- auto *Call = CGF.Builder.CreateCall(
+ llvm::CallInst *Call = CGF.Builder.CreateCall(
createRuntimeFunction(OMPRTL__kmpc_global_thread_num),
emitUpdateLocation(CGF, Loc));
Call->setCallingConv(CGF.getRuntimeCC());
@@ -1523,17 +1624,14 @@ void CGOpenMPRuntime::functionFinished(CodeGenFunction &CGF) {
if (OpenMPLocThreadIDMap.count(CGF.CurFn))
OpenMPLocThreadIDMap.erase(CGF.CurFn);
if (FunctionUDRMap.count(CGF.CurFn) > 0) {
- for(auto *D : FunctionUDRMap[CGF.CurFn]) {
+ for(auto *D : FunctionUDRMap[CGF.CurFn])
UDRMap.erase(D);
- }
FunctionUDRMap.erase(CGF.CurFn);
}
}
llvm::Type *CGOpenMPRuntime::getIdentTyPointerTy() {
- if (!IdentTy) {
- }
- return llvm::PointerType::getUnqual(IdentTy);
+ return IdentTy->getPointerTo();
}
llvm::Type *CGOpenMPRuntime::getKmpc_MicroPointerTy() {
@@ -1555,7 +1653,7 @@ CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
// microtask, ...);
llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
getKmpc_MicroPointerTy()};
- llvm::FunctionType *FnTy =
+ auto *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ true);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_fork_call");
break;
@@ -1563,7 +1661,7 @@ CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
case OMPRTL__kmpc_global_thread_num: {
// Build kmp_int32 __kmpc_global_thread_num(ident_t *loc);
llvm::Type *TypeParams[] = {getIdentTyPointerTy()};
- llvm::FunctionType *FnTy =
+ auto *FnTy =
llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_global_thread_num");
break;
@@ -1574,7 +1672,7 @@ CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
CGM.VoidPtrTy, CGM.SizeTy,
CGM.VoidPtrTy->getPointerTo()->getPointerTo()};
- llvm::FunctionType *FnTy =
+ auto *FnTy =
llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg*/ false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_threadprivate_cached");
break;
@@ -1585,7 +1683,7 @@ CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
llvm::Type *TypeParams[] = {
getIdentTyPointerTy(), CGM.Int32Ty,
llvm::PointerType::getUnqual(KmpCriticalNameTy)};
- llvm::FunctionType *FnTy =
+ auto *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_critical");
break;
@@ -1596,7 +1694,7 @@ CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
llvm::PointerType::getUnqual(KmpCriticalNameTy),
CGM.IntPtrTy};
- llvm::FunctionType *FnTy =
+ auto *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_critical_with_hint");
break;
@@ -1605,21 +1703,22 @@ CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
// Build void __kmpc_threadprivate_register(ident_t *, void *data,
// kmpc_ctor ctor, kmpc_cctor cctor, kmpc_dtor dtor);
// typedef void *(*kmpc_ctor)(void *);
- auto KmpcCtorTy =
+ auto *KmpcCtorTy =
llvm::FunctionType::get(CGM.VoidPtrTy, CGM.VoidPtrTy,
/*isVarArg*/ false)->getPointerTo();
// typedef void *(*kmpc_cctor)(void *, void *);
llvm::Type *KmpcCopyCtorTyArgs[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
- auto KmpcCopyCtorTy =
+ auto *KmpcCopyCtorTy =
llvm::FunctionType::get(CGM.VoidPtrTy, KmpcCopyCtorTyArgs,
- /*isVarArg*/ false)->getPointerTo();
+ /*isVarArg*/ false)
+ ->getPointerTo();
// typedef void (*kmpc_dtor)(void *);
- auto KmpcDtorTy =
+ auto *KmpcDtorTy =
llvm::FunctionType::get(CGM.VoidTy, CGM.VoidPtrTy, /*isVarArg*/ false)
->getPointerTo();
llvm::Type *FnTyArgs[] = {getIdentTyPointerTy(), CGM.VoidPtrTy, KmpcCtorTy,
KmpcCopyCtorTy, KmpcDtorTy};
- auto FnTy = llvm::FunctionType::get(CGM.VoidTy, FnTyArgs,
+ auto *FnTy = llvm::FunctionType::get(CGM.VoidTy, FnTyArgs,
/*isVarArg*/ false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_threadprivate_register");
break;
@@ -1630,7 +1729,7 @@ CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
llvm::Type *TypeParams[] = {
getIdentTyPointerTy(), CGM.Int32Ty,
llvm::PointerType::getUnqual(KmpCriticalNameTy)};
- llvm::FunctionType *FnTy =
+ auto *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_critical");
break;
@@ -1639,7 +1738,7 @@ CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
// Build kmp_int32 __kmpc_cancel_barrier(ident_t *loc, kmp_int32
// global_tid);
llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
- llvm::FunctionType *FnTy =
+ auto *FnTy =
llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name*/ "__kmpc_cancel_barrier");
break;
@@ -1647,7 +1746,7 @@ CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
case OMPRTL__kmpc_barrier: {
// Build void __kmpc_barrier(ident_t *loc, kmp_int32 global_tid);
llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
- llvm::FunctionType *FnTy =
+ auto *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name*/ "__kmpc_barrier");
break;
@@ -1655,7 +1754,7 @@ CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
case OMPRTL__kmpc_for_static_fini: {
// Build void __kmpc_for_static_fini(ident_t *loc, kmp_int32 global_tid);
llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
- llvm::FunctionType *FnTy =
+ auto *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_for_static_fini");
break;
@@ -1665,7 +1764,7 @@ CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
// kmp_int32 num_threads)
llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
CGM.Int32Ty};
- llvm::FunctionType *FnTy =
+ auto *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_push_num_threads");
break;
@@ -1674,7 +1773,7 @@ CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
// Build void __kmpc_serialized_parallel(ident_t *loc, kmp_int32
// global_tid);
llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
- llvm::FunctionType *FnTy =
+ auto *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_serialized_parallel");
break;
@@ -1683,7 +1782,7 @@ CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
// Build void __kmpc_end_serialized_parallel(ident_t *loc, kmp_int32
// global_tid);
llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
- llvm::FunctionType *FnTy =
+ auto *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_serialized_parallel");
break;
@@ -1691,7 +1790,7 @@ CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
case OMPRTL__kmpc_flush: {
// Build void __kmpc_flush(ident_t *loc);
llvm::Type *TypeParams[] = {getIdentTyPointerTy()};
- llvm::FunctionType *FnTy =
+ auto *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_flush");
break;
@@ -1699,7 +1798,7 @@ CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
case OMPRTL__kmpc_master: {
// Build kmp_int32 __kmpc_master(ident_t *loc, kmp_int32 global_tid);
llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
- llvm::FunctionType *FnTy =
+ auto *FnTy =
llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_master");
break;
@@ -1707,7 +1806,7 @@ CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
case OMPRTL__kmpc_end_master: {
// Build void __kmpc_end_master(ident_t *loc, kmp_int32 global_tid);
llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
- llvm::FunctionType *FnTy =
+ auto *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_end_master");
break;
@@ -1716,7 +1815,7 @@ CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
// Build kmp_int32 __kmpc_omp_taskyield(ident_t *, kmp_int32 global_tid,
// int end_part);
llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.IntTy};
- llvm::FunctionType *FnTy =
+ auto *FnTy =
llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_taskyield");
break;
@@ -1724,7 +1823,7 @@ CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
case OMPRTL__kmpc_single: {
// Build kmp_int32 __kmpc_single(ident_t *loc, kmp_int32 global_tid);
llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
- llvm::FunctionType *FnTy =
+ auto *FnTy =
llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_single");
break;
@@ -1732,7 +1831,7 @@ CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
case OMPRTL__kmpc_end_single: {
// Build void __kmpc_end_single(ident_t *loc, kmp_int32 global_tid);
llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
- llvm::FunctionType *FnTy =
+ auto *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_end_single");
break;
@@ -1746,7 +1845,7 @@ CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.Int32Ty,
CGM.SizeTy, CGM.SizeTy, KmpRoutineEntryPtrTy};
// Return void * and then cast to particular kmp_task_t type.
- llvm::FunctionType *FnTy =
+ auto *FnTy =
llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg=*/false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_task_alloc");
break;
@@ -1756,7 +1855,7 @@ CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
// *new_task);
llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
CGM.VoidPtrTy};
- llvm::FunctionType *FnTy =
+ auto *FnTy =
llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_task");
break;
@@ -1771,7 +1870,7 @@ CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.SizeTy,
CGM.VoidPtrTy, CpyFnTy->getPointerTo(),
CGM.Int32Ty};
- llvm::FunctionType *FnTy =
+ auto *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_copyprivate");
break;
@@ -1787,7 +1886,7 @@ CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
getIdentTyPointerTy(), CGM.Int32Ty, CGM.Int32Ty, CGM.SizeTy,
CGM.VoidPtrTy, ReduceFnTy->getPointerTo(),
llvm::PointerType::getUnqual(KmpCriticalNameTy)};
- llvm::FunctionType *FnTy =
+ auto *FnTy =
llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_reduce");
break;
@@ -1804,7 +1903,7 @@ CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
getIdentTyPointerTy(), CGM.Int32Ty, CGM.Int32Ty, CGM.SizeTy,
CGM.VoidPtrTy, ReduceFnTy->getPointerTo(),
llvm::PointerType::getUnqual(KmpCriticalNameTy)};
- llvm::FunctionType *FnTy =
+ auto *FnTy =
llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_reduce_nowait");
break;
@@ -1815,7 +1914,7 @@ CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
llvm::Type *TypeParams[] = {
getIdentTyPointerTy(), CGM.Int32Ty,
llvm::PointerType::getUnqual(KmpCriticalNameTy)};
- llvm::FunctionType *FnTy =
+ auto *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_end_reduce");
break;
@@ -1826,7 +1925,7 @@ CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
llvm::Type *TypeParams[] = {
getIdentTyPointerTy(), CGM.Int32Ty,
llvm::PointerType::getUnqual(KmpCriticalNameTy)};
- llvm::FunctionType *FnTy =
+ auto *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
RTLFn =
CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_end_reduce_nowait");
@@ -1837,7 +1936,7 @@ CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
// *new_task);
llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
CGM.VoidPtrTy};
- llvm::FunctionType *FnTy =
+ auto *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
RTLFn =
CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_task_begin_if0");
@@ -1848,7 +1947,7 @@ CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
// *new_task);
llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
CGM.VoidPtrTy};
- llvm::FunctionType *FnTy =
+ auto *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
RTLFn = CGM.CreateRuntimeFunction(FnTy,
/*Name=*/"__kmpc_omp_task_complete_if0");
@@ -1857,7 +1956,7 @@ CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
case OMPRTL__kmpc_ordered: {
// Build void __kmpc_ordered(ident_t *loc, kmp_int32 global_tid);
llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
- llvm::FunctionType *FnTy =
+ auto *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_ordered");
break;
@@ -1865,7 +1964,7 @@ CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
case OMPRTL__kmpc_end_ordered: {
// Build void __kmpc_end_ordered(ident_t *loc, kmp_int32 global_tid);
llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
- llvm::FunctionType *FnTy =
+ auto *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_ordered");
break;
@@ -1873,7 +1972,7 @@ CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
case OMPRTL__kmpc_omp_taskwait: {
// Build kmp_int32 __kmpc_omp_taskwait(ident_t *loc, kmp_int32 global_tid);
llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
- llvm::FunctionType *FnTy =
+ auto *FnTy =
llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_omp_taskwait");
break;
@@ -1881,7 +1980,7 @@ CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
case OMPRTL__kmpc_taskgroup: {
// Build void __kmpc_taskgroup(ident_t *loc, kmp_int32 global_tid);
llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
- llvm::FunctionType *FnTy =
+ auto *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_taskgroup");
break;
@@ -1889,7 +1988,7 @@ CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
case OMPRTL__kmpc_end_taskgroup: {
// Build void __kmpc_end_taskgroup(ident_t *loc, kmp_int32 global_tid);
llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
- llvm::FunctionType *FnTy =
+ auto *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_taskgroup");
break;
@@ -1898,7 +1997,7 @@ CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
// Build void __kmpc_push_proc_bind(ident_t *loc, kmp_int32 global_tid,
// int proc_bind)
llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.IntTy};
- llvm::FunctionType *FnTy =
+ auto *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_push_proc_bind");
break;
@@ -1910,7 +2009,7 @@ CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
llvm::Type *TypeParams[] = {
getIdentTyPointerTy(), CGM.Int32Ty, CGM.VoidPtrTy, CGM.Int32Ty,
CGM.VoidPtrTy, CGM.Int32Ty, CGM.VoidPtrTy};
- llvm::FunctionType *FnTy =
+ auto *FnTy =
llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
RTLFn =
CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_task_with_deps");
@@ -1923,7 +2022,7 @@ CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
CGM.Int32Ty, CGM.VoidPtrTy,
CGM.Int32Ty, CGM.VoidPtrTy};
- llvm::FunctionType *FnTy =
+ auto *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_wait_deps");
break;
@@ -1932,7 +2031,7 @@ CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
// Build kmp_int32 __kmpc_cancellationpoint(ident_t *loc, kmp_int32
// global_tid, kmp_int32 cncl_kind)
llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.IntTy};
- llvm::FunctionType *FnTy =
+ auto *FnTy =
llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_cancellationpoint");
break;
@@ -1941,7 +2040,7 @@ CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
// Build kmp_int32 __kmpc_cancel(ident_t *loc, kmp_int32 global_tid,
// kmp_int32 cncl_kind)
llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.IntTy};
- llvm::FunctionType *FnTy =
+ auto *FnTy =
llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_cancel");
break;
@@ -1951,7 +2050,7 @@ CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
// kmp_int32 num_teams, kmp_int32 num_threads)
llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.Int32Ty,
CGM.Int32Ty};
- llvm::FunctionType *FnTy =
+ auto *FnTy =
llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_push_num_teams");
break;
@@ -1961,7 +2060,7 @@ CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
// microtask, ...);
llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
getKmpc_MicroPointerTy()};
- llvm::FunctionType *FnTy =
+ auto *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ true);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_fork_teams");
break;
@@ -1981,7 +2080,7 @@ CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
CGM.IntTy,
CGM.Int64Ty,
CGM.VoidPtrTy};
- llvm::FunctionType *FnTy =
+ auto *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_taskloop");
break;
@@ -1993,7 +2092,7 @@ CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
CGM.Int32Ty,
CGM.Int32Ty,
CGM.VoidPtrTy};
- llvm::FunctionType *FnTy =
+ auto *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_doacross_init");
break;
@@ -2001,7 +2100,7 @@ CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
case OMPRTL__kmpc_doacross_fini: {
// Build void __kmpc_doacross_fini(ident_t *loc, kmp_int32 gtid);
llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
- llvm::FunctionType *FnTy =
+ auto *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_doacross_fini");
break;
@@ -2011,7 +2110,7 @@ CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
// *vec);
llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
CGM.Int64Ty->getPointerTo()};
- llvm::FunctionType *FnTy =
+ auto *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_doacross_post");
break;
@@ -2021,7 +2120,7 @@ CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
// *vec);
llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
CGM.Int64Ty->getPointerTo()};
- llvm::FunctionType *FnTy =
+ auto *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_doacross_wait");
break;
@@ -2030,7 +2129,7 @@ CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
// Build void *__kmpc_task_reduction_init(int gtid, int num_data, void
// *data);
llvm::Type *TypeParams[] = {CGM.IntTy, CGM.IntTy, CGM.VoidPtrTy};
- llvm::FunctionType *FnTy =
+ auto *FnTy =
llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg=*/false);
RTLFn =
CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_task_reduction_init");
@@ -2040,7 +2139,7 @@ CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
// Build void *__kmpc_task_reduction_get_th_data(int gtid, void *tg, void
// *d);
llvm::Type *TypeParams[] = {CGM.IntTy, CGM.VoidPtrTy, CGM.VoidPtrTy};
- llvm::FunctionType *FnTy =
+ auto *FnTy =
llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg=*/false);
RTLFn = CGM.CreateRuntimeFunction(
FnTy, /*Name=*/"__kmpc_task_reduction_get_th_data");
@@ -2057,7 +2156,7 @@ CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
CGM.VoidPtrPtrTy,
CGM.SizeTy->getPointerTo(),
CGM.Int64Ty->getPointerTo()};
- llvm::FunctionType *FnTy =
+ auto *FnTy =
llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target");
break;
@@ -2073,7 +2172,7 @@ CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
CGM.VoidPtrPtrTy,
CGM.SizeTy->getPointerTo(),
CGM.Int64Ty->getPointerTo()};
- llvm::FunctionType *FnTy =
+ auto *FnTy =
llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_nowait");
break;
@@ -2091,7 +2190,7 @@ CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
CGM.Int64Ty->getPointerTo(),
CGM.Int32Ty,
CGM.Int32Ty};
- llvm::FunctionType *FnTy =
+ auto *FnTy =
llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_teams");
break;
@@ -2109,7 +2208,7 @@ CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
CGM.Int64Ty->getPointerTo(),
CGM.Int32Ty,
CGM.Int32Ty};
- llvm::FunctionType *FnTy =
+ auto *FnTy =
llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_teams_nowait");
break;
@@ -2119,7 +2218,7 @@ CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
QualType ParamTy =
CGM.getContext().getPointerType(getTgtBinaryDescriptorQTy());
llvm::Type *TypeParams[] = {CGM.getTypes().ConvertTypeForMem(ParamTy)};
- llvm::FunctionType *FnTy =
+ auto *FnTy =
llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_register_lib");
break;
@@ -2129,7 +2228,7 @@ CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
QualType ParamTy =
CGM.getContext().getPointerType(getTgtBinaryDescriptorQTy());
llvm::Type *TypeParams[] = {CGM.getTypes().ConvertTypeForMem(ParamTy)};
- llvm::FunctionType *FnTy =
+ auto *FnTy =
llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_unregister_lib");
break;
@@ -2143,7 +2242,7 @@ CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
CGM.VoidPtrPtrTy,
CGM.SizeTy->getPointerTo(),
CGM.Int64Ty->getPointerTo()};
- llvm::FunctionType *FnTy =
+ auto *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_begin");
break;
@@ -2172,7 +2271,7 @@ CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
CGM.VoidPtrPtrTy,
CGM.SizeTy->getPointerTo(),
CGM.Int64Ty->getPointerTo()};
- llvm::FunctionType *FnTy =
+ auto *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_end");
break;
@@ -2201,7 +2300,7 @@ CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
CGM.VoidPtrPtrTy,
CGM.SizeTy->getPointerTo(),
CGM.Int64Ty->getPointerTo()};
- llvm::FunctionType *FnTy =
+ auto *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_update");
break;
@@ -2230,12 +2329,12 @@ llvm::Constant *CGOpenMPRuntime::createForStaticInitFunction(unsigned IVSize,
bool IVSigned) {
assert((IVSize == 32 || IVSize == 64) &&
"IV size is not compatible with the omp runtime");
- auto Name = IVSize == 32 ? (IVSigned ? "__kmpc_for_static_init_4"
- : "__kmpc_for_static_init_4u")
- : (IVSigned ? "__kmpc_for_static_init_8"
- : "__kmpc_for_static_init_8u");
- auto ITy = IVSize == 32 ? CGM.Int32Ty : CGM.Int64Ty;
- auto PtrTy = llvm::PointerType::getUnqual(ITy);
+ StringRef Name = IVSize == 32 ? (IVSigned ? "__kmpc_for_static_init_4"
+ : "__kmpc_for_static_init_4u")
+ : (IVSigned ? "__kmpc_for_static_init_8"
+ : "__kmpc_for_static_init_8u");
+ llvm::Type *ITy = IVSize == 32 ? CGM.Int32Ty : CGM.Int64Ty;
+ auto *PtrTy = llvm::PointerType::getUnqual(ITy);
llvm::Type *TypeParams[] = {
getIdentTyPointerTy(), // loc
CGM.Int32Ty, // tid
@@ -2247,7 +2346,7 @@ llvm::Constant *CGOpenMPRuntime::createForStaticInitFunction(unsigned IVSize,
ITy, // incr
ITy // chunk
};
- llvm::FunctionType *FnTy =
+ auto *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
return CGM.CreateRuntimeFunction(FnTy, Name);
}
@@ -2256,11 +2355,11 @@ llvm::Constant *CGOpenMPRuntime::createDispatchInitFunction(unsigned IVSize,
bool IVSigned) {
assert((IVSize == 32 || IVSize == 64) &&
"IV size is not compatible with the omp runtime");
- auto Name =
+ StringRef Name =
IVSize == 32
? (IVSigned ? "__kmpc_dispatch_init_4" : "__kmpc_dispatch_init_4u")
: (IVSigned ? "__kmpc_dispatch_init_8" : "__kmpc_dispatch_init_8u");
- auto ITy = IVSize == 32 ? CGM.Int32Ty : CGM.Int64Ty;
+ llvm::Type *ITy = IVSize == 32 ? CGM.Int32Ty : CGM.Int64Ty;
llvm::Type *TypeParams[] = { getIdentTyPointerTy(), // loc
CGM.Int32Ty, // tid
CGM.Int32Ty, // schedtype
@@ -2269,7 +2368,7 @@ llvm::Constant *CGOpenMPRuntime::createDispatchInitFunction(unsigned IVSize,
ITy, // stride
ITy // chunk
};
- llvm::FunctionType *FnTy =
+ auto *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
return CGM.CreateRuntimeFunction(FnTy, Name);
}
@@ -2278,7 +2377,7 @@ llvm::Constant *CGOpenMPRuntime::createDispatchFiniFunction(unsigned IVSize,
bool IVSigned) {
assert((IVSize == 32 || IVSize == 64) &&
"IV size is not compatible with the omp runtime");
- auto Name =
+ StringRef Name =
IVSize == 32
? (IVSigned ? "__kmpc_dispatch_fini_4" : "__kmpc_dispatch_fini_4u")
: (IVSigned ? "__kmpc_dispatch_fini_8" : "__kmpc_dispatch_fini_8u");
@@ -2286,7 +2385,7 @@ llvm::Constant *CGOpenMPRuntime::createDispatchFiniFunction(unsigned IVSize,
getIdentTyPointerTy(), // loc
CGM.Int32Ty, // tid
};
- llvm::FunctionType *FnTy =
+ auto *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
return CGM.CreateRuntimeFunction(FnTy, Name);
}
@@ -2295,12 +2394,12 @@ llvm::Constant *CGOpenMPRuntime::createDispatchNextFunction(unsigned IVSize,
bool IVSigned) {
assert((IVSize == 32 || IVSize == 64) &&
"IV size is not compatible with the omp runtime");
- auto Name =
+ StringRef Name =
IVSize == 32
? (IVSigned ? "__kmpc_dispatch_next_4" : "__kmpc_dispatch_next_4u")
: (IVSigned ? "__kmpc_dispatch_next_8" : "__kmpc_dispatch_next_8u");
- auto ITy = IVSize == 32 ? CGM.Int32Ty : CGM.Int64Ty;
- auto PtrTy = llvm::PointerType::getUnqual(ITy);
+ llvm::Type *ITy = IVSize == 32 ? CGM.Int32Ty : CGM.Int64Ty;
+ auto *PtrTy = llvm::PointerType::getUnqual(ITy);
llvm::Type *TypeParams[] = {
getIdentTyPointerTy(), // loc
CGM.Int32Ty, // tid
@@ -2309,18 +2408,48 @@ llvm::Constant *CGOpenMPRuntime::createDispatchNextFunction(unsigned IVSize,
PtrTy, // p_upper
PtrTy // p_stride
};
- llvm::FunctionType *FnTy =
+ auto *FnTy =
llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
return CGM.CreateRuntimeFunction(FnTy, Name);
}
+Address CGOpenMPRuntime::getAddrOfDeclareTargetLink(const VarDecl *VD) {
+ if (CGM.getLangOpts().OpenMPSimd)
+ return Address::invalid();
+ llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
+ isDeclareTargetDeclaration(VD);
+ if (Res && *Res == OMPDeclareTargetDeclAttr::MT_Link) {
+ SmallString<64> PtrName;
+ {
+ llvm::raw_svector_ostream OS(PtrName);
+ OS << CGM.getMangledName(GlobalDecl(VD)) << "_decl_tgt_link_ptr";
+ }
+ llvm::Value *Ptr = CGM.getModule().getNamedValue(PtrName);
+ if (!Ptr) {
+ QualType PtrTy = CGM.getContext().getPointerType(VD->getType());
+ Ptr = getOrCreateInternalVariable(CGM.getTypes().ConvertTypeForMem(PtrTy),
+ PtrName);
+ if (!CGM.getLangOpts().OpenMPIsDevice) {
+ auto *GV = cast<llvm::GlobalVariable>(Ptr);
+ GV->setLinkage(llvm::GlobalValue::ExternalLinkage);
+ GV->setInitializer(CGM.GetAddrOfGlobal(VD));
+ }
+ CGM.addUsedGlobal(cast<llvm::GlobalValue>(Ptr));
+ registerTargetGlobalVariable(VD, cast<llvm::Constant>(Ptr));
+ }
+ return Address(Ptr, CGM.getContext().getDeclAlign(VD));
+ }
+ return Address::invalid();
+}
+
llvm::Constant *
CGOpenMPRuntime::getOrCreateThreadPrivateCache(const VarDecl *VD) {
assert(!CGM.getLangOpts().OpenMPUseTLS ||
!CGM.getContext().getTargetInfo().isTLSSupported());
// Lookup the entry, lazily creating it if necessary.
- return getOrCreateInternalVariable(CGM.Int8PtrPtrTy,
- Twine(CGM.getMangledName(VD)) + ".cache.");
+ std::string Suffix = getName({"cache", ""});
+ return getOrCreateInternalVariable(
+ CGM.Int8PtrPtrTy, Twine(CGM.getMangledName(VD)).concat(Suffix));
}
Address CGOpenMPRuntime::getAddrOfThreadPrivate(CodeGenFunction &CGF,
@@ -2331,7 +2460,7 @@ Address CGOpenMPRuntime::getAddrOfThreadPrivate(CodeGenFunction &CGF,
CGM.getContext().getTargetInfo().isTLSSupported())
return VDAddr;
- auto VarTy = VDAddr.getElementType();
+ llvm::Type *VarTy = VDAddr.getElementType();
llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
CGF.Builder.CreatePointerCast(VDAddr.getPointer(),
CGM.Int8PtrTy),
@@ -2347,15 +2476,14 @@ void CGOpenMPRuntime::emitThreadPrivateVarInit(
llvm::Value *CopyCtor, llvm::Value *Dtor, SourceLocation Loc) {
// Call kmp_int32 __kmpc_global_thread_num(&loc) to init OpenMP runtime
// library.
- auto OMPLoc = emitUpdateLocation(CGF, Loc);
+ llvm::Value *OMPLoc = emitUpdateLocation(CGF, Loc);
CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_global_thread_num),
OMPLoc);
// Call __kmpc_threadprivate_register(&loc, &var, ctor, cctor/*NULL*/, dtor)
// to register constructor/destructor for variable.
- llvm::Value *Args[] = {OMPLoc,
- CGF.Builder.CreatePointerCast(VDAddr.getPointer(),
- CGM.VoidPtrTy),
- Ctor, CopyCtor, Dtor};
+ llvm::Value *Args[] = {
+ OMPLoc, CGF.Builder.CreatePointerCast(VDAddr.getPointer(), CGM.VoidPtrTy),
+ Ctor, CopyCtor, Dtor};
CGF.EmitRuntimeCall(
createRuntimeFunction(OMPRTL__kmpc_threadprivate_register), Args);
}
@@ -2373,29 +2501,31 @@ llvm::Function *CGOpenMPRuntime::emitThreadPrivateVarDefinition(
QualType ASTTy = VD->getType();
llvm::Value *Ctor = nullptr, *CopyCtor = nullptr, *Dtor = nullptr;
- auto Init = VD->getAnyInitializer();
+ const Expr *Init = VD->getAnyInitializer();
if (CGM.getLangOpts().CPlusPlus && PerformInit) {
// Generate function that re-emits the declaration's initializer into the
// threadprivate copy of the variable VD
CodeGenFunction CtorCGF(CGM);
FunctionArgList Args;
- ImplicitParamDecl Dst(CGM.getContext(), CGM.getContext().VoidPtrTy,
+ ImplicitParamDecl Dst(CGM.getContext(), /*DC=*/nullptr, Loc,
+ /*Id=*/nullptr, CGM.getContext().VoidPtrTy,
ImplicitParamDecl::Other);
Args.push_back(&Dst);
- auto &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(
+ const auto &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(
CGM.getContext().VoidPtrTy, Args);
- auto FTy = CGM.getTypes().GetFunctionType(FI);
- auto Fn = CGM.CreateGlobalInitOrDestructFunction(
- FTy, ".__kmpc_global_ctor_.", FI, Loc);
+ llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
+ std::string Name = getName({"__kmpc_global_ctor_", ""});
+ llvm::Function *Fn =
+ CGM.CreateGlobalInitOrDestructFunction(FTy, Name, FI, Loc);
CtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidPtrTy, Fn, FI,
- Args, SourceLocation());
- auto ArgVal = CtorCGF.EmitLoadOfScalar(
+ Args, Loc, Loc);
+ llvm::Value *ArgVal = CtorCGF.EmitLoadOfScalar(
CtorCGF.GetAddrOfLocalVar(&Dst), /*Volatile=*/false,
CGM.getContext().VoidPtrTy, Dst.getLocation());
Address Arg = Address(ArgVal, VDAddr.getAlignment());
- Arg = CtorCGF.Builder.CreateElementBitCast(Arg,
- CtorCGF.ConvertTypeForMem(ASTTy));
+ Arg = CtorCGF.Builder.CreateElementBitCast(
+ Arg, CtorCGF.ConvertTypeForMem(ASTTy));
CtorCGF.EmitAnyExprToMem(Init, Arg, Init->getType().getQualifiers(),
/*IsInitializer=*/true);
ArgVal = CtorCGF.EmitLoadOfScalar(
@@ -2410,21 +2540,23 @@ llvm::Function *CGOpenMPRuntime::emitThreadPrivateVarDefinition(
// of the variable VD
CodeGenFunction DtorCGF(CGM);
FunctionArgList Args;
- ImplicitParamDecl Dst(CGM.getContext(), CGM.getContext().VoidPtrTy,
+ ImplicitParamDecl Dst(CGM.getContext(), /*DC=*/nullptr, Loc,
+ /*Id=*/nullptr, CGM.getContext().VoidPtrTy,
ImplicitParamDecl::Other);
Args.push_back(&Dst);
- auto &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(
+ const auto &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(
CGM.getContext().VoidTy, Args);
- auto FTy = CGM.getTypes().GetFunctionType(FI);
- auto Fn = CGM.CreateGlobalInitOrDestructFunction(
- FTy, ".__kmpc_global_dtor_.", FI, Loc);
+ llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
+ std::string Name = getName({"__kmpc_global_dtor_", ""});
+ llvm::Function *Fn =
+ CGM.CreateGlobalInitOrDestructFunction(FTy, Name, FI, Loc);
auto NL = ApplyDebugLocation::CreateEmpty(DtorCGF);
DtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, Fn, FI, Args,
- SourceLocation());
+ Loc, Loc);
// Create a scope with an artificial location for the body of this function.
auto AL = ApplyDebugLocation::CreateArtificial(DtorCGF);
- auto ArgVal = DtorCGF.EmitLoadOfScalar(
+ llvm::Value *ArgVal = DtorCGF.EmitLoadOfScalar(
DtorCGF.GetAddrOfLocalVar(&Dst),
/*Volatile=*/false, CGM.getContext().VoidPtrTy, Dst.getLocation());
DtorCGF.emitDestroy(Address(ArgVal, VDAddr.getAlignment()), ASTTy,
@@ -2438,34 +2570,36 @@ llvm::Function *CGOpenMPRuntime::emitThreadPrivateVarDefinition(
return nullptr;
llvm::Type *CopyCtorTyArgs[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
- auto CopyCtorTy =
- llvm::FunctionType::get(CGM.VoidPtrTy, CopyCtorTyArgs,
- /*isVarArg=*/false)->getPointerTo();
+ auto *CopyCtorTy = llvm::FunctionType::get(CGM.VoidPtrTy, CopyCtorTyArgs,
+ /*isVarArg=*/false)
+ ->getPointerTo();
// Copying constructor for the threadprivate variable.
// Must be NULL - reserved by runtime, but currently it requires that this
// parameter is always NULL. Otherwise it fires assertion.
CopyCtor = llvm::Constant::getNullValue(CopyCtorTy);
if (Ctor == nullptr) {
- auto CtorTy = llvm::FunctionType::get(CGM.VoidPtrTy, CGM.VoidPtrTy,
- /*isVarArg=*/false)->getPointerTo();
+ auto *CtorTy = llvm::FunctionType::get(CGM.VoidPtrTy, CGM.VoidPtrTy,
+ /*isVarArg=*/false)
+ ->getPointerTo();
Ctor = llvm::Constant::getNullValue(CtorTy);
}
if (Dtor == nullptr) {
- auto DtorTy = llvm::FunctionType::get(CGM.VoidTy, CGM.VoidPtrTy,
- /*isVarArg=*/false)->getPointerTo();
+ auto *DtorTy = llvm::FunctionType::get(CGM.VoidTy, CGM.VoidPtrTy,
+ /*isVarArg=*/false)
+ ->getPointerTo();
Dtor = llvm::Constant::getNullValue(DtorTy);
}
if (!CGF) {
- auto InitFunctionTy =
+ auto *InitFunctionTy =
llvm::FunctionType::get(CGM.VoidTy, /*isVarArg*/ false);
- auto InitFunction = CGM.CreateGlobalInitOrDestructFunction(
- InitFunctionTy, ".__omp_threadprivate_init_.",
- CGM.getTypes().arrangeNullaryFunction());
+ std::string Name = getName({"__omp_threadprivate_init_", ""});
+ llvm::Function *InitFunction = CGM.CreateGlobalInitOrDestructFunction(
+ InitFunctionTy, Name, CGM.getTypes().arrangeNullaryFunction());
CodeGenFunction InitCGF(CGM);
FunctionArgList ArgList;
InitCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, InitFunction,
CGM.getTypes().arrangeNullaryFunction(), ArgList,
- Loc);
+ Loc, Loc);
emitThreadPrivateVarInit(InitCGF, VDAddr, Ctor, CopyCtor, Dtor, Loc);
InitCGF.FinishFunction();
return InitFunction;
@@ -2475,19 +2609,156 @@ llvm::Function *CGOpenMPRuntime::emitThreadPrivateVarDefinition(
return nullptr;
}
+/// Obtain information that uniquely identifies a target entry. This
+/// consists of the file and device IDs as well as line number associated with
+/// the relevant entry source location.
+static void getTargetEntryUniqueInfo(ASTContext &C, SourceLocation Loc,
+ unsigned &DeviceID, unsigned &FileID,
+ unsigned &LineNum) {
+ SourceManager &SM = C.getSourceManager();
+
+ // The loc should be always valid and have a file ID (the user cannot use
+ // #pragma directives in macros)
+
+ assert(Loc.isValid() && "Source location is expected to be always valid.");
+
+ PresumedLoc PLoc = SM.getPresumedLoc(Loc);
+ assert(PLoc.isValid() && "Source location is expected to be always valid.");
+
+ llvm::sys::fs::UniqueID ID;
+ if (auto EC = llvm::sys::fs::getUniqueID(PLoc.getFilename(), ID))
+ SM.getDiagnostics().Report(diag::err_cannot_open_file)
+ << PLoc.getFilename() << EC.message();
+
+ DeviceID = ID.getDevice();
+ FileID = ID.getFile();
+ LineNum = PLoc.getLine();
+}
+
+bool CGOpenMPRuntime::emitDeclareTargetVarDefinition(const VarDecl *VD,
+ llvm::GlobalVariable *Addr,
+ bool PerformInit) {
+ Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
+ isDeclareTargetDeclaration(VD);
+ if (!Res || *Res == OMPDeclareTargetDeclAttr::MT_Link)
+ return false;
+ VD = VD->getDefinition(CGM.getContext());
+ if (VD && !DeclareTargetWithDefinition.insert(VD).second)
+ return CGM.getLangOpts().OpenMPIsDevice;
+
+ QualType ASTTy = VD->getType();
+
+ SourceLocation Loc = VD->getCanonicalDecl()->getLocStart();
+ // Produce the unique prefix to identify the new target regions. We use
+ // the source location of the variable declaration which we know to not
+ // conflict with any target region.
+ unsigned DeviceID;
+ unsigned FileID;
+ unsigned Line;
+ getTargetEntryUniqueInfo(CGM.getContext(), Loc, DeviceID, FileID, Line);
+ SmallString<128> Buffer, Out;
+ {
+ llvm::raw_svector_ostream OS(Buffer);
+ OS << "__omp_offloading_" << llvm::format("_%x", DeviceID)
+ << llvm::format("_%x_", FileID) << VD->getName() << "_l" << Line;
+ }
+
+ const Expr *Init = VD->getAnyInitializer();
+ if (CGM.getLangOpts().CPlusPlus && PerformInit) {
+ llvm::Constant *Ctor;
+ llvm::Constant *ID;
+ if (CGM.getLangOpts().OpenMPIsDevice) {
+ // Generate function that re-emits the declaration's initializer into
+ // the threadprivate copy of the variable VD
+ CodeGenFunction CtorCGF(CGM);
+
+ const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
+ llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
+ llvm::Function *Fn = CGM.CreateGlobalInitOrDestructFunction(
+ FTy, Twine(Buffer, "_ctor"), FI, Loc);
+ auto NL = ApplyDebugLocation::CreateEmpty(CtorCGF);
+ CtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, Fn, FI,
+ FunctionArgList(), Loc, Loc);
+ auto AL = ApplyDebugLocation::CreateArtificial(CtorCGF);
+ CtorCGF.EmitAnyExprToMem(Init,
+ Address(Addr, CGM.getContext().getDeclAlign(VD)),
+ Init->getType().getQualifiers(),
+ /*IsInitializer=*/true);
+ CtorCGF.FinishFunction();
+ Ctor = Fn;
+ ID = llvm::ConstantExpr::getBitCast(Fn, CGM.Int8PtrTy);
+ CGM.addUsedGlobal(cast<llvm::GlobalValue>(Ctor));
+ } else {
+ Ctor = new llvm::GlobalVariable(
+ CGM.getModule(), CGM.Int8Ty, /*isConstant=*/true,
+ llvm::GlobalValue::PrivateLinkage,
+ llvm::Constant::getNullValue(CGM.Int8Ty), Twine(Buffer, "_ctor"));
+ ID = Ctor;
+ }
+
+ // Register the information for the entry associated with the constructor.
+ Out.clear();
+ OffloadEntriesInfoManager.registerTargetRegionEntryInfo(
+ DeviceID, FileID, Twine(Buffer, "_ctor").toStringRef(Out), Line, Ctor,
+ ID, OffloadEntriesInfoManagerTy::OMPTargetRegionEntryCtor);
+ }
+ if (VD->getType().isDestructedType() != QualType::DK_none) {
+ llvm::Constant *Dtor;
+ llvm::Constant *ID;
+ if (CGM.getLangOpts().OpenMPIsDevice) {
+ // Generate function that emits destructor call for the threadprivate
+ // copy of the variable VD
+ CodeGenFunction DtorCGF(CGM);
+
+ const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
+ llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
+ llvm::Function *Fn = CGM.CreateGlobalInitOrDestructFunction(
+ FTy, Twine(Buffer, "_dtor"), FI, Loc);
+ auto NL = ApplyDebugLocation::CreateEmpty(DtorCGF);
+ DtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, Fn, FI,
+ FunctionArgList(), Loc, Loc);
+ // Create a scope with an artificial location for the body of this
+ // function.
+ auto AL = ApplyDebugLocation::CreateArtificial(DtorCGF);
+ DtorCGF.emitDestroy(Address(Addr, CGM.getContext().getDeclAlign(VD)),
+ ASTTy, DtorCGF.getDestroyer(ASTTy.isDestructedType()),
+ DtorCGF.needsEHCleanup(ASTTy.isDestructedType()));
+ DtorCGF.FinishFunction();
+ Dtor = Fn;
+ ID = llvm::ConstantExpr::getBitCast(Fn, CGM.Int8PtrTy);
+ CGM.addUsedGlobal(cast<llvm::GlobalValue>(Dtor));
+ } else {
+ Dtor = new llvm::GlobalVariable(
+ CGM.getModule(), CGM.Int8Ty, /*isConstant=*/true,
+ llvm::GlobalValue::PrivateLinkage,
+ llvm::Constant::getNullValue(CGM.Int8Ty), Twine(Buffer, "_dtor"));
+ ID = Dtor;
+ }
+ // Register the information for the entry associated with the destructor.
+ Out.clear();
+ OffloadEntriesInfoManager.registerTargetRegionEntryInfo(
+ DeviceID, FileID, Twine(Buffer, "_dtor").toStringRef(Out), Line, Dtor,
+ ID, OffloadEntriesInfoManagerTy::OMPTargetRegionEntryDtor);
+ }
+ return CGM.getLangOpts().OpenMPIsDevice;
+}
+
Address CGOpenMPRuntime::getAddrOfArtificialThreadPrivate(CodeGenFunction &CGF,
QualType VarType,
StringRef Name) {
- llvm::Twine VarName(Name, ".artificial.");
+ std::string Suffix = getName({"artificial", ""});
+ std::string CacheSuffix = getName({"cache", ""});
llvm::Type *VarLVType = CGF.ConvertTypeForMem(VarType);
- llvm::Value *GAddr = getOrCreateInternalVariable(VarLVType, VarName);
+ llvm::Value *GAddr =
+ getOrCreateInternalVariable(VarLVType, Twine(Name).concat(Suffix));
llvm::Value *Args[] = {
emitUpdateLocation(CGF, SourceLocation()),
getThreadID(CGF, SourceLocation()),
CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(GAddr, CGM.VoidPtrTy),
CGF.Builder.CreateIntCast(CGF.getTypeSize(VarType), CGM.SizeTy,
/*IsSigned=*/false),
- getOrCreateInternalVariable(CGM.VoidPtrPtrTy, VarName + ".cache.")};
+ getOrCreateInternalVariable(
+ CGM.VoidPtrPtrTy, Twine(Name).concat(Suffix).concat(CacheSuffix))};
return Address(
CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
CGF.EmitRuntimeCall(
@@ -2496,13 +2767,6 @@ Address CGOpenMPRuntime::getAddrOfArtificialThreadPrivate(CodeGenFunction &CGF,
CGM.getPointerAlign());
}
-/// \brief Emits code for OpenMP 'if' clause using specified \a CodeGen
-/// function. Here is the logic:
-/// if (Cond) {
-/// ThenGen();
-/// } else {
-/// ElseGen();
-/// }
void CGOpenMPRuntime::emitOMPIfClause(CodeGenFunction &CGF, const Expr *Cond,
const RegionCodeGenTy &ThenGen,
const RegionCodeGenTy &ElseGen) {
@@ -2521,9 +2785,9 @@ void CGOpenMPRuntime::emitOMPIfClause(CodeGenFunction &CGF, const Expr *Cond,
// Otherwise, the condition did not fold, or we couldn't elide it. Just
// emit the conditional branch.
- auto ThenBlock = CGF.createBasicBlock("omp_if.then");
- auto ElseBlock = CGF.createBasicBlock("omp_if.else");
- auto ContBlock = CGF.createBasicBlock("omp_if.end");
+ llvm::BasicBlock *ThenBlock = CGF.createBasicBlock("omp_if.then");
+ llvm::BasicBlock *ElseBlock = CGF.createBasicBlock("omp_if.else");
+ llvm::BasicBlock *ContBlock = CGF.createBasicBlock("omp_if.end");
CGF.EmitBranchOnBoolExpr(Cond, ThenBlock, ElseBlock, /*TrueCount=*/0);
// Emit the 'then' code.
@@ -2548,11 +2812,11 @@ void CGOpenMPRuntime::emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
const Expr *IfCond) {
if (!CGF.HaveInsertPoint())
return;
- auto *RTLoc = emitUpdateLocation(CGF, Loc);
+ llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
auto &&ThenGen = [OutlinedFn, CapturedVars, RTLoc](CodeGenFunction &CGF,
PrePostActionTy &) {
// Build call __kmpc_fork_call(loc, n, microtask, var1, .., varn);
- auto &RT = CGF.CGM.getOpenMPRuntime();
+ CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime();
llvm::Value *Args[] = {
RTLoc,
CGF.Builder.getInt32(CapturedVars.size()), // Number of captured vars
@@ -2561,13 +2825,13 @@ void CGOpenMPRuntime::emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
RealArgs.append(std::begin(Args), std::end(Args));
RealArgs.append(CapturedVars.begin(), CapturedVars.end());
- auto RTLFn = RT.createRuntimeFunction(OMPRTL__kmpc_fork_call);
+ llvm::Value *RTLFn = RT.createRuntimeFunction(OMPRTL__kmpc_fork_call);
CGF.EmitRuntimeCall(RTLFn, RealArgs);
};
auto &&ElseGen = [OutlinedFn, CapturedVars, RTLoc, Loc](CodeGenFunction &CGF,
PrePostActionTy &) {
- auto &RT = CGF.CGM.getOpenMPRuntime();
- auto ThreadID = RT.getThreadID(CGF, Loc);
+ CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime();
+ llvm::Value *ThreadID = RT.getThreadID(CGF, Loc);
// Build calls:
// __kmpc_serialized_parallel(&Loc, GTid);
llvm::Value *Args[] = {RTLoc, ThreadID};
@@ -2575,13 +2839,12 @@ void CGOpenMPRuntime::emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
RT.createRuntimeFunction(OMPRTL__kmpc_serialized_parallel), Args);
// OutlinedFn(&GTid, &zero, CapturedStruct);
- auto ThreadIDAddr = RT.emitThreadIDAddress(CGF, Loc);
- Address ZeroAddr =
- CGF.CreateTempAlloca(CGF.Int32Ty, CharUnits::fromQuantity(4),
- /*Name*/ ".zero.addr");
+ Address ZeroAddr = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
+ /*Name*/ ".zero.addr");
CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
- OutlinedFnArgs.push_back(ThreadIDAddr.getPointer());
+ // ThreadId for serialized parallels is 0.
+ OutlinedFnArgs.push_back(ZeroAddr.getPointer());
OutlinedFnArgs.push_back(ZeroAddr.getPointer());
OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end());
RT.emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, OutlinedFnArgs);
@@ -2592,9 +2855,9 @@ void CGOpenMPRuntime::emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
RT.createRuntimeFunction(OMPRTL__kmpc_end_serialized_parallel),
EndArgs);
};
- if (IfCond)
+ if (IfCond) {
emitOMPIfClause(CGF, IfCond, ThenGen, ElseGen);
- else {
+ } else {
RegionCodeGenTy ThenRCG(ThenGen);
ThenRCG(CGF);
}
@@ -2613,10 +2876,10 @@ Address CGOpenMPRuntime::emitThreadIDAddress(CodeGenFunction &CGF,
if (OMPRegionInfo->getThreadIDVariable())
return OMPRegionInfo->getThreadIDVariableLValue(CGF).getAddress();
- auto ThreadID = getThreadID(CGF, Loc);
- auto Int32Ty =
+ llvm::Value *ThreadID = getThreadID(CGF, Loc);
+ QualType Int32Ty =
CGF.getContext().getIntTypeForBitwidth(/*DestWidth*/ 32, /*Signed*/ true);
- auto ThreadIDTemp = CGF.CreateMemTemp(Int32Ty, /*Name*/ ".threadid_temp.");
+ Address ThreadIDTemp = CGF.CreateMemTemp(Int32Ty, /*Name*/ ".threadid_temp.");
CGF.EmitStoreOfScalar(ThreadID,
CGF.MakeAddrLValue(ThreadIDTemp, Int32Ty));
@@ -2629,8 +2892,8 @@ CGOpenMPRuntime::getOrCreateInternalVariable(llvm::Type *Ty,
SmallString<256> Buffer;
llvm::raw_svector_ostream Out(Buffer);
Out << Name;
- auto RuntimeName = Out.str();
- auto &Elem = *InternalVars.insert(std::make_pair(RuntimeName, nullptr)).first;
+ StringRef RuntimeName = Out.str();
+ auto &Elem = *InternalVars.try_emplace(RuntimeName, nullptr).first;
if (Elem.second) {
assert(Elem.second->getType()->getPointerElementType() == Ty &&
"OMP internal variable has different type than requested");
@@ -2644,8 +2907,9 @@ CGOpenMPRuntime::getOrCreateInternalVariable(llvm::Type *Ty,
}
llvm::Value *CGOpenMPRuntime::getCriticalRegionLock(StringRef CriticalName) {
- llvm::Twine Name(".gomp_critical_user_", CriticalName);
- return getOrCreateInternalVariable(KmpCriticalNameTy, Name.concat(".var"));
+ std::string Prefix = Twine("gomp_critical_user_", CriticalName).str();
+ std::string Name = getName({Prefix, "var"});
+ return getOrCreateInternalVariable(KmpCriticalNameTy, Name);
}
namespace {
@@ -2779,21 +3043,28 @@ static Address emitAddrOfVarFromArray(CodeGenFunction &CGF, Address Array,
static llvm::Value *emitCopyprivateCopyFunction(
CodeGenModule &CGM, llvm::Type *ArgsType,
ArrayRef<const Expr *> CopyprivateVars, ArrayRef<const Expr *> DestExprs,
- ArrayRef<const Expr *> SrcExprs, ArrayRef<const Expr *> AssignmentOps) {
- auto &C = CGM.getContext();
+ ArrayRef<const Expr *> SrcExprs, ArrayRef<const Expr *> AssignmentOps,
+ SourceLocation Loc) {
+ ASTContext &C = CGM.getContext();
// void copy_func(void *LHSArg, void *RHSArg);
FunctionArgList Args;
- ImplicitParamDecl LHSArg(C, C.VoidPtrTy, ImplicitParamDecl::Other);
- ImplicitParamDecl RHSArg(C, C.VoidPtrTy, ImplicitParamDecl::Other);
+ ImplicitParamDecl LHSArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
+ ImplicitParamDecl::Other);
+ ImplicitParamDecl RHSArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
+ ImplicitParamDecl::Other);
Args.push_back(&LHSArg);
Args.push_back(&RHSArg);
- auto &CGFI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
- auto *Fn = llvm::Function::Create(
- CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
- ".omp.copyprivate.copy_func", &CGM.getModule());
- CGM.SetInternalFunctionAttributes(/*D=*/nullptr, Fn, CGFI);
+ const auto &CGFI =
+ CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
+ std::string Name =
+ CGM.getOpenMPRuntime().getName({"omp", "copyprivate", "copy_func"});
+ auto *Fn = llvm::Function::Create(CGM.getTypes().GetFunctionType(CGFI),
+ llvm::GlobalValue::InternalLinkage, Name,
+ &CGM.getModule());
+ CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
+ Fn->setDoesNotRecurse();
CodeGenFunction CGF(CGM);
- CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args);
+ CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
// Dest = (void*[n])(LHSArg);
// Src = (void*[n])(RHSArg);
Address LHS(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
@@ -2807,13 +3078,15 @@ static llvm::Value *emitCopyprivateCopyFunction(
// ...
// *(Typen*)Dst[n] = *(Typen*)Src[n];
for (unsigned I = 0, E = AssignmentOps.size(); I < E; ++I) {
- auto DestVar = cast<VarDecl>(cast<DeclRefExpr>(DestExprs[I])->getDecl());
+ const auto *DestVar =
+ cast<VarDecl>(cast<DeclRefExpr>(DestExprs[I])->getDecl());
Address DestAddr = emitAddrOfVarFromArray(CGF, LHS, I, DestVar);
- auto SrcVar = cast<VarDecl>(cast<DeclRefExpr>(SrcExprs[I])->getDecl());
+ const auto *SrcVar =
+ cast<VarDecl>(cast<DeclRefExpr>(SrcExprs[I])->getDecl());
Address SrcAddr = emitAddrOfVarFromArray(CGF, RHS, I, SrcVar);
- auto *VD = cast<DeclRefExpr>(CopyprivateVars[I])->getDecl();
+ const auto *VD = cast<DeclRefExpr>(CopyprivateVars[I])->getDecl();
QualType Type = VD->getType();
CGF.EmitOMPCopy(Type, DestAddr, SrcAddr, DestVar, SrcVar, AssignmentOps[I]);
}
@@ -2833,7 +3106,7 @@ void CGOpenMPRuntime::emitSingleRegion(CodeGenFunction &CGF,
assert(CopyprivateVars.size() == SrcExprs.size() &&
CopyprivateVars.size() == DstExprs.size() &&
CopyprivateVars.size() == AssignmentOps.size());
- auto &C = CGM.getContext();
+ ASTContext &C = CGM.getContext();
// int32 did_it = 0;
// if(__kmpc_single(ident_t *, gtid)) {
// SingleOpGen();
@@ -2846,7 +3119,8 @@ void CGOpenMPRuntime::emitSingleRegion(CodeGenFunction &CGF,
Address DidIt = Address::invalid();
if (!CopyprivateVars.empty()) {
// int32 did_it = 0;
- auto KmpInt32Ty = C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
+ QualType KmpInt32Ty =
+ C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
DidIt = CGF.CreateMemTemp(KmpInt32Ty, ".omp.copyprivate.did_it");
CGF.Builder.CreateStore(CGF.Builder.getInt32(0), DidIt);
}
@@ -2866,7 +3140,7 @@ void CGOpenMPRuntime::emitSingleRegion(CodeGenFunction &CGF,
// <copy_func>, did_it);
if (DidIt.isValid()) {
llvm::APInt ArraySize(/*unsigned int numBits=*/32, CopyprivateVars.size());
- auto CopyprivateArrayTy =
+ QualType CopyprivateArrayTy =
C.getConstantArrayType(C.VoidPtrTy, ArraySize, ArrayType::Normal,
/*IndexTypeQuals=*/0);
// Create a list of all private variables for copyprivate.
@@ -2882,14 +3156,14 @@ void CGOpenMPRuntime::emitSingleRegion(CodeGenFunction &CGF,
}
// Build function that copies private values from single region to all other
// threads in the corresponding parallel region.
- auto *CpyFn = emitCopyprivateCopyFunction(
+ llvm::Value *CpyFn = emitCopyprivateCopyFunction(
CGM, CGF.ConvertTypeForMem(CopyprivateArrayTy)->getPointerTo(),
- CopyprivateVars, SrcExprs, DstExprs, AssignmentOps);
- auto *BufSize = CGF.getTypeSize(CopyprivateArrayTy);
+ CopyprivateVars, SrcExprs, DstExprs, AssignmentOps, Loc);
+ llvm::Value *BufSize = CGF.getTypeSize(CopyprivateArrayTy);
Address CL =
CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(CopyprivateList,
CGF.VoidPtrTy);
- auto *DidItVal = CGF.Builder.CreateLoad(DidIt);
+ llvm::Value *DidItVal = CGF.Builder.CreateLoad(DidIt);
llvm::Value *Args[] = {
emitUpdateLocation(CGF, Loc), // ident_t *<loc>
getThreadID(CGF, Loc), // i32 <gtid>
@@ -2948,19 +3222,19 @@ void CGOpenMPRuntime::emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc,
if (auto *OMPRegionInfo =
dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) {
if (!ForceSimpleCall && OMPRegionInfo->hasCancel()) {
- auto *Result = CGF.EmitRuntimeCall(
+ llvm::Value *Result = CGF.EmitRuntimeCall(
createRuntimeFunction(OMPRTL__kmpc_cancel_barrier), Args);
if (EmitChecks) {
// if (__kmpc_cancel_barrier()) {
// exit from construct;
// }
- auto *ExitBB = CGF.createBasicBlock(".cancel.exit");
- auto *ContBB = CGF.createBasicBlock(".cancel.continue");
- auto *Cmp = CGF.Builder.CreateIsNotNull(Result);
+ llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".cancel.exit");
+ llvm::BasicBlock *ContBB = CGF.createBasicBlock(".cancel.continue");
+ llvm::Value *Cmp = CGF.Builder.CreateIsNotNull(Result);
CGF.Builder.CreateCondBr(Cmp, ExitBB, ContBB);
CGF.EmitBlock(ExitBB);
// exit from construct;
- auto CancelDestination =
+ CodeGenFunction::JumpDest CancelDestination =
CGF.getOMPCancelDestination(OMPRegionInfo->getDirectiveKind());
CGF.EmitBranchThroughCleanup(CancelDestination);
CGF.EmitBlock(ContBB, /*IsFinished=*/true);
@@ -2971,7 +3245,7 @@ void CGOpenMPRuntime::emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc,
CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_barrier), Args);
}
-/// \brief Map the OpenMP loop schedule to the runtime enumeration.
+/// Map the OpenMP loop schedule to the runtime enumeration.
static OpenMPSchedType getRuntimeSchedule(OpenMPScheduleClauseKind ScheduleKind,
bool Chunked, bool Ordered) {
switch (ScheduleKind) {
@@ -2993,7 +3267,7 @@ static OpenMPSchedType getRuntimeSchedule(OpenMPScheduleClauseKind ScheduleKind,
llvm_unreachable("Unexpected runtime schedule");
}
-/// \brief Map the OpenMP distribute schedule to the runtime enumeration.
+/// Map the OpenMP distribute schedule to the runtime enumeration.
static OpenMPSchedType
getRuntimeSchedule(OpenMPDistScheduleClauseKind ScheduleKind, bool Chunked) {
// only static is allowed for dist_schedule
@@ -3002,19 +3276,20 @@ getRuntimeSchedule(OpenMPDistScheduleClauseKind ScheduleKind, bool Chunked) {
bool CGOpenMPRuntime::isStaticNonchunked(OpenMPScheduleClauseKind ScheduleKind,
bool Chunked) const {
- auto Schedule = getRuntimeSchedule(ScheduleKind, Chunked, /*Ordered=*/false);
+ OpenMPSchedType Schedule =
+ getRuntimeSchedule(ScheduleKind, Chunked, /*Ordered=*/false);
return Schedule == OMP_sch_static;
}
bool CGOpenMPRuntime::isStaticNonchunked(
OpenMPDistScheduleClauseKind ScheduleKind, bool Chunked) const {
- auto Schedule = getRuntimeSchedule(ScheduleKind, Chunked);
+ OpenMPSchedType Schedule = getRuntimeSchedule(ScheduleKind, Chunked);
return Schedule == OMP_dist_sch_static;
}
bool CGOpenMPRuntime::isDynamic(OpenMPScheduleClauseKind ScheduleKind) const {
- auto Schedule =
+ OpenMPSchedType Schedule =
getRuntimeSchedule(ScheduleKind, /*Chunked=*/false, /*Ordered=*/false);
assert(Schedule != OMP_sch_static_chunked && "cannot be chunked here");
return Schedule != OMP_sch_static;
@@ -3147,12 +3422,12 @@ void CGOpenMPRuntime::emitForStaticInit(CodeGenFunction &CGF,
ScheduleKind.Schedule, Values.Chunk != nullptr, Values.Ordered);
assert(isOpenMPWorksharingDirective(DKind) &&
"Expected loop-based or sections-based directive.");
- auto *UpdatedLocation = emitUpdateLocation(CGF, Loc,
+ llvm::Value *UpdatedLocation = emitUpdateLocation(CGF, Loc,
isOpenMPLoopDirective(DKind)
? OMP_IDENT_WORK_LOOP
: OMP_IDENT_WORK_SECTIONS);
- auto *ThreadId = getThreadID(CGF, Loc);
- auto *StaticInitFunction =
+ llvm::Value *ThreadId = getThreadID(CGF, Loc);
+ llvm::Constant *StaticInitFunction =
createForStaticInitFunction(Values.IVSize, Values.IVSigned);
emitForStaticInitCall(CGF, UpdatedLocation, ThreadId, StaticInitFunction,
ScheduleNum, ScheduleKind.M1, ScheduleKind.M2, Values);
@@ -3164,10 +3439,10 @@ void CGOpenMPRuntime::emitDistributeStaticInit(
const CGOpenMPRuntime::StaticRTInput &Values) {
OpenMPSchedType ScheduleNum =
getRuntimeSchedule(SchedKind, Values.Chunk != nullptr);
- auto *UpdatedLocation =
+ llvm::Value *UpdatedLocation =
emitUpdateLocation(CGF, Loc, OMP_IDENT_WORK_DISTRIBUTE);
- auto *ThreadId = getThreadID(CGF, Loc);
- auto *StaticInitFunction =
+ llvm::Value *ThreadId = getThreadID(CGF, Loc);
+ llvm::Constant *StaticInitFunction =
createForStaticInitFunction(Values.IVSize, Values.IVSigned);
emitForStaticInitCall(CGF, UpdatedLocation, ThreadId, StaticInitFunction,
ScheduleNum, OMPC_SCHEDULE_MODIFIER_unknown,
@@ -3223,7 +3498,7 @@ llvm::Value *CGOpenMPRuntime::emitForNext(CodeGenFunction &CGF,
llvm::Value *Call =
CGF.EmitRuntimeCall(createDispatchNextFunction(IVSize, IVSigned), Args);
return CGF.EmitScalarConversion(
- Call, CGF.getContext().getIntTypeForBitwidth(32, /* Signed */ true),
+ Call, CGF.getContext().getIntTypeForBitwidth(32, /*Signed=*/1),
CGF.getContext().BoolTy, Loc);
}
@@ -3285,13 +3560,13 @@ void CGOpenMPRuntime::emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *>,
}
namespace {
-/// \brief Indexes of fields for type kmp_task_t.
+/// Indexes of fields for type kmp_task_t.
enum KmpTaskTFields {
- /// \brief List of shared variables.
+ /// List of shared variables.
KmpTaskTShareds,
- /// \brief Task routine.
+ /// Task routine.
KmpTaskTRoutine,
- /// \brief Partition id for the untied tasks.
+ /// Partition id for the untied tasks.
KmpTaskTPartId,
/// Function with call of destructors for private variables.
Data1,
@@ -3311,11 +3586,11 @@ enum KmpTaskTFields {
} // anonymous namespace
bool CGOpenMPRuntime::OffloadEntriesInfoManagerTy::empty() const {
- // FIXME: Add other entries type when they become supported.
- return OffloadEntriesTargetRegion.empty();
+ return OffloadEntriesTargetRegion.empty() &&
+ OffloadEntriesDeviceGlobalVar.empty();
}
-/// \brief Initialize target region entry.
+/// Initialize target region entry.
void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
initializeTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
StringRef ParentName, unsigned LineNum,
@@ -3325,7 +3600,7 @@ void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
"code generation.");
OffloadEntriesTargetRegion[DeviceID][FileID][ParentName][LineNum] =
OffloadEntryInfoTargetRegion(Order, /*Addr=*/nullptr, /*ID=*/nullptr,
- /*Flags=*/0);
+ OMPTargetRegionEntryTargetRegion);
++OffloadingEntriesNum;
}
@@ -3333,22 +3608,27 @@ void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
registerTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
StringRef ParentName, unsigned LineNum,
llvm::Constant *Addr, llvm::Constant *ID,
- int32_t Flags) {
+ OMPTargetRegionEntryKind Flags) {
// If we are emitting code for a target, the entry is already initialized,
// only has to be registered.
if (CGM.getLangOpts().OpenMPIsDevice) {
- assert(hasTargetRegionEntryInfo(DeviceID, FileID, ParentName, LineNum) &&
- "Entry must exist.");
+ if (!hasTargetRegionEntryInfo(DeviceID, FileID, ParentName, LineNum)) {
+ unsigned DiagID = CGM.getDiags().getCustomDiagID(
+ DiagnosticsEngine::Error,
+ "Unable to find target region on line '%0' in the device code.");
+ CGM.getDiags().Report(DiagID) << LineNum;
+ return;
+ }
auto &Entry =
OffloadEntriesTargetRegion[DeviceID][FileID][ParentName][LineNum];
assert(Entry.isValid() && "Entry not initialized!");
Entry.setAddress(Addr);
Entry.setID(ID);
Entry.setFlags(Flags);
- return;
} else {
- OffloadEntryInfoTargetRegion Entry(OffloadingEntriesNum++, Addr, ID, Flags);
+ OffloadEntryInfoTargetRegion Entry(OffloadingEntriesNum, Addr, ID, Flags);
OffloadEntriesTargetRegion[DeviceID][FileID][ParentName][LineNum] = Entry;
+ ++OffloadingEntriesNum;
}
}
@@ -3376,48 +3656,69 @@ bool CGOpenMPRuntime::OffloadEntriesInfoManagerTy::hasTargetRegionEntryInfo(
void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::actOnTargetRegionEntriesInfo(
const OffloadTargetRegionEntryInfoActTy &Action) {
// Scan all target region entries and perform the provided action.
- for (auto &D : OffloadEntriesTargetRegion)
- for (auto &F : D.second)
- for (auto &P : F.second)
- for (auto &L : P.second)
+ for (const auto &D : OffloadEntriesTargetRegion)
+ for (const auto &F : D.second)
+ for (const auto &P : F.second)
+ for (const auto &L : P.second)
Action(D.first, F.first, P.first(), L.first, L.second);
}
-/// \brief Create a Ctor/Dtor-like function whose body is emitted through
-/// \a Codegen. This is used to emit the two functions that register and
-/// unregister the descriptor of the current compilation unit.
-static llvm::Function *
-createOffloadingBinaryDescriptorFunction(CodeGenModule &CGM, StringRef Name,
- const RegionCodeGenTy &Codegen) {
- auto &C = CGM.getContext();
- FunctionArgList Args;
- ImplicitParamDecl DummyPtr(C, C.VoidPtrTy, ImplicitParamDecl::Other);
- Args.push_back(&DummyPtr);
+void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
+ initializeDeviceGlobalVarEntryInfo(StringRef Name,
+ OMPTargetGlobalVarEntryKind Flags,
+ unsigned Order) {
+ assert(CGM.getLangOpts().OpenMPIsDevice && "Initialization of entries is "
+ "only required for the device "
+ "code generation.");
+ OffloadEntriesDeviceGlobalVar.try_emplace(Name, Order, Flags);
+ ++OffloadingEntriesNum;
+}
- CodeGenFunction CGF(CGM);
- auto &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
- auto FTy = CGM.getTypes().GetFunctionType(FI);
- auto *Fn =
- CGM.CreateGlobalInitOrDestructFunction(FTy, Name, FI, SourceLocation());
- CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FI, Args, SourceLocation());
- Codegen(CGF);
- CGF.FinishFunction();
- return Fn;
+void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
+ registerDeviceGlobalVarEntryInfo(StringRef VarName, llvm::Constant *Addr,
+ CharUnits VarSize,
+ OMPTargetGlobalVarEntryKind Flags,
+ llvm::GlobalValue::LinkageTypes Linkage) {
+ if (CGM.getLangOpts().OpenMPIsDevice) {
+ auto &Entry = OffloadEntriesDeviceGlobalVar[VarName];
+ assert(Entry.isValid() && Entry.getFlags() == Flags &&
+ "Entry not initialized!");
+ assert((!Entry.getAddress() || Entry.getAddress() == Addr) &&
+ "Resetting with the new address.");
+ if (Entry.getAddress() && hasDeviceGlobalVarEntryInfo(VarName))
+ return;
+ Entry.setAddress(Addr);
+ Entry.setVarSize(VarSize);
+ Entry.setLinkage(Linkage);
+ } else {
+ if (hasDeviceGlobalVarEntryInfo(VarName))
+ return;
+ OffloadEntriesDeviceGlobalVar.try_emplace(
+ VarName, OffloadingEntriesNum, Addr, VarSize, Flags, Linkage);
+ ++OffloadingEntriesNum;
+ }
+}
+
+void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
+ actOnDeviceGlobalVarEntriesInfo(
+ const OffloadDeviceGlobalVarEntryInfoActTy &Action) {
+ // Scan all target region entries and perform the provided action.
+ for (const auto &E : OffloadEntriesDeviceGlobalVar)
+ Action(E.getKey(), E.getValue());
}
llvm::Function *
CGOpenMPRuntime::createOffloadingBinaryDescriptorRegistration() {
-
// If we don't have entries or if we are emitting code for the device, we
// don't need to do anything.
if (CGM.getLangOpts().OpenMPIsDevice || OffloadEntriesInfoManager.empty())
return nullptr;
- auto &M = CGM.getModule();
- auto &C = CGM.getContext();
+ llvm::Module &M = CGM.getModule();
+ ASTContext &C = CGM.getContext();
// Get list of devices we care about
- auto &Devices = CGM.getLangOpts().OMPTargetTriples;
+ const std::vector<llvm::Triple> &Devices = CGM.getLangOpts().OMPTargetTriples;
// We should be creating an offloading descriptor only if there are devices
// specified.
@@ -3425,46 +3726,49 @@ CGOpenMPRuntime::createOffloadingBinaryDescriptorRegistration() {
// Create the external variables that will point to the begin and end of the
// host entries section. These will be defined by the linker.
- auto *OffloadEntryTy =
+ llvm::Type *OffloadEntryTy =
CGM.getTypes().ConvertTypeForMem(getTgtOffloadEntryQTy());
- llvm::GlobalVariable *HostEntriesBegin = new llvm::GlobalVariable(
- M, OffloadEntryTy, /*isConstant=*/true,
- llvm::GlobalValue::ExternalLinkage, /*Initializer=*/nullptr,
- ".omp_offloading.entries_begin");
- llvm::GlobalVariable *HostEntriesEnd = new llvm::GlobalVariable(
+ std::string EntriesBeginName = getName({"omp_offloading", "entries_begin"});
+ auto *HostEntriesBegin = new llvm::GlobalVariable(
M, OffloadEntryTy, /*isConstant=*/true,
llvm::GlobalValue::ExternalLinkage, /*Initializer=*/nullptr,
- ".omp_offloading.entries_end");
+ EntriesBeginName);
+ std::string EntriesEndName = getName({"omp_offloading", "entries_end"});
+ auto *HostEntriesEnd =
+ new llvm::GlobalVariable(M, OffloadEntryTy, /*isConstant=*/true,
+ llvm::GlobalValue::ExternalLinkage,
+ /*Initializer=*/nullptr, EntriesEndName);
// Create all device images
auto *DeviceImageTy = cast<llvm::StructType>(
CGM.getTypes().ConvertTypeForMem(getTgtDeviceImageQTy()));
ConstantInitBuilder DeviceImagesBuilder(CGM);
- auto DeviceImagesEntries = DeviceImagesBuilder.beginArray(DeviceImageTy);
+ ConstantArrayBuilder DeviceImagesEntries =
+ DeviceImagesBuilder.beginArray(DeviceImageTy);
- for (unsigned i = 0; i < Devices.size(); ++i) {
- StringRef T = Devices[i].getTriple();
+ for (const llvm::Triple &Device : Devices) {
+ StringRef T = Device.getTriple();
+ std::string BeginName = getName({"omp_offloading", "img_start", ""});
auto *ImgBegin = new llvm::GlobalVariable(
M, CGM.Int8Ty, /*isConstant=*/true, llvm::GlobalValue::ExternalLinkage,
- /*Initializer=*/nullptr,
- Twine(".omp_offloading.img_start.") + Twine(T));
+ /*Initializer=*/nullptr, Twine(BeginName).concat(T));
+ std::string EndName = getName({"omp_offloading", "img_end", ""});
auto *ImgEnd = new llvm::GlobalVariable(
M, CGM.Int8Ty, /*isConstant=*/true, llvm::GlobalValue::ExternalLinkage,
- /*Initializer=*/nullptr, Twine(".omp_offloading.img_end.") + Twine(T));
+ /*Initializer=*/nullptr, Twine(EndName).concat(T));
- auto Dev = DeviceImagesEntries.beginStruct(DeviceImageTy);
- Dev.add(ImgBegin);
- Dev.add(ImgEnd);
- Dev.add(HostEntriesBegin);
- Dev.add(HostEntriesEnd);
- Dev.finishAndAddTo(DeviceImagesEntries);
+ llvm::Constant *Data[] = {ImgBegin, ImgEnd, HostEntriesBegin,
+ HostEntriesEnd};
+ createConstantGlobalStructAndAddToParent(CGM, getTgtDeviceImageQTy(), Data,
+ DeviceImagesEntries);
}
// Create device images global array.
+ std::string ImagesName = getName({"omp_offloading", "device_images"});
llvm::GlobalVariable *DeviceImages =
- DeviceImagesEntries.finishAndCreateGlobal(".omp_offloading.device_images",
- CGM.getPointerAlign(),
- /*isConstant=*/true);
+ DeviceImagesEntries.finishAndCreateGlobal(ImagesName,
+ CGM.getPointerAlign(),
+ /*isConstant=*/true);
DeviceImages->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
// This is a Zero array to be used in the creation of the constant expressions
@@ -3472,49 +3776,64 @@ CGOpenMPRuntime::createOffloadingBinaryDescriptorRegistration() {
llvm::Constant::getNullValue(CGM.Int32Ty)};
// Create the target region descriptor.
- auto *BinaryDescriptorTy = cast<llvm::StructType>(
- CGM.getTypes().ConvertTypeForMem(getTgtBinaryDescriptorQTy()));
- ConstantInitBuilder DescBuilder(CGM);
- auto DescInit = DescBuilder.beginStruct(BinaryDescriptorTy);
- DescInit.addInt(CGM.Int32Ty, Devices.size());
- DescInit.add(llvm::ConstantExpr::getGetElementPtr(DeviceImages->getValueType(),
- DeviceImages,
- Index));
- DescInit.add(HostEntriesBegin);
- DescInit.add(HostEntriesEnd);
-
- auto *Desc = DescInit.finishAndCreateGlobal(".omp_offloading.descriptor",
- CGM.getPointerAlign(),
- /*isConstant=*/true);
+ llvm::Constant *Data[] = {
+ llvm::ConstantInt::get(CGM.Int32Ty, Devices.size()),
+ llvm::ConstantExpr::getGetElementPtr(DeviceImages->getValueType(),
+ DeviceImages, Index),
+ HostEntriesBegin, HostEntriesEnd};
+ std::string Descriptor = getName({"omp_offloading", "descriptor"});
+ llvm::GlobalVariable *Desc = createConstantGlobalStruct(
+ CGM, getTgtBinaryDescriptorQTy(), Data, Descriptor);
// Emit code to register or unregister the descriptor at execution
// startup or closing, respectively.
- // Create a variable to drive the registration and unregistration of the
- // descriptor, so we can reuse the logic that emits Ctors and Dtors.
- auto *IdentInfo = &C.Idents.get(".omp_offloading.reg_unreg_var");
- ImplicitParamDecl RegUnregVar(C, C.getTranslationUnitDecl(), SourceLocation(),
- IdentInfo, C.CharTy, ImplicitParamDecl::Other);
-
- auto *UnRegFn = createOffloadingBinaryDescriptorFunction(
- CGM, ".omp_offloading.descriptor_unreg",
- [&](CodeGenFunction &CGF, PrePostActionTy &) {
- CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__tgt_unregister_lib),
- Desc);
- });
- auto *RegFn = createOffloadingBinaryDescriptorFunction(
- CGM, ".omp_offloading.descriptor_reg",
- [&](CodeGenFunction &CGF, PrePostActionTy &) {
- CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__tgt_register_lib),
- Desc);
- CGM.getCXXABI().registerGlobalDtor(CGF, RegUnregVar, UnRegFn, Desc);
- });
+ llvm::Function *UnRegFn;
+ {
+ FunctionArgList Args;
+ ImplicitParamDecl DummyPtr(C, C.VoidPtrTy, ImplicitParamDecl::Other);
+ Args.push_back(&DummyPtr);
+
+ CodeGenFunction CGF(CGM);
+ // Disable debug info for global (de-)initializer because they are not part
+ // of some particular construct.
+ CGF.disableDebugInfo();
+ const auto &FI =
+ CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
+ llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
+ std::string UnregName = getName({"omp_offloading", "descriptor_unreg"});
+ UnRegFn = CGM.CreateGlobalInitOrDestructFunction(FTy, UnregName, FI);
+ CGF.StartFunction(GlobalDecl(), C.VoidTy, UnRegFn, FI, Args);
+ CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__tgt_unregister_lib),
+ Desc);
+ CGF.FinishFunction();
+ }
+ llvm::Function *RegFn;
+ {
+ CodeGenFunction CGF(CGM);
+ // Disable debug info for global (de-)initializer because they are not part
+ // of some particular construct.
+ CGF.disableDebugInfo();
+ const auto &FI = CGM.getTypes().arrangeNullaryFunction();
+ llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
+ std::string Descriptor = getName({"omp_offloading", "descriptor_reg"});
+ RegFn = CGM.CreateGlobalInitOrDestructFunction(FTy, Descriptor, FI);
+ CGF.StartFunction(GlobalDecl(), C.VoidTy, RegFn, FI, FunctionArgList());
+ CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__tgt_register_lib), Desc);
+ // Create a variable to drive the registration and unregistration of the
+ // descriptor, so we can reuse the logic that emits Ctors and Dtors.
+ ImplicitParamDecl RegUnregVar(C, C.getTranslationUnitDecl(),
+ SourceLocation(), nullptr, C.CharTy,
+ ImplicitParamDecl::Other);
+ CGM.getCXXABI().registerGlobalDtor(CGF, RegUnregVar, UnRegFn, Desc);
+ CGF.FinishFunction();
+ }
if (CGM.supportsCOMDAT()) {
// It is sufficient to call registration function only once, so create a
// COMDAT group for registration/unregistration functions and associated
// data. That would reduce startup time and code size. Registration
// function serves as a COMDAT group key.
- auto ComdatKey = M.getOrInsertComdat(RegFn->getName());
+ llvm::Comdat *ComdatKey = M.getOrInsertComdat(RegFn->getName());
RegFn->setLinkage(llvm::GlobalValue::LinkOnceAnyLinkage);
RegFn->setVisibility(llvm::GlobalValue::HiddenVisibility);
RegFn->setComdat(ComdatKey);
@@ -3525,48 +3844,35 @@ CGOpenMPRuntime::createOffloadingBinaryDescriptorRegistration() {
return RegFn;
}
-void CGOpenMPRuntime::createOffloadEntry(llvm::Constant *ID,
- llvm::Constant *Addr, uint64_t Size,
- int32_t Flags) {
+void CGOpenMPRuntime::createOffloadEntry(
+ llvm::Constant *ID, llvm::Constant *Addr, uint64_t Size, int32_t Flags,
+ llvm::GlobalValue::LinkageTypes Linkage) {
StringRef Name = Addr->getName();
- auto *TgtOffloadEntryType = cast<llvm::StructType>(
- CGM.getTypes().ConvertTypeForMem(getTgtOffloadEntryQTy()));
- llvm::LLVMContext &C = CGM.getModule().getContext();
llvm::Module &M = CGM.getModule();
-
- // Make sure the address has the right type.
- llvm::Constant *AddrPtr = llvm::ConstantExpr::getBitCast(ID, CGM.VoidPtrTy);
+ llvm::LLVMContext &C = M.getContext();
// Create constant string with the name.
llvm::Constant *StrPtrInit = llvm::ConstantDataArray::getString(C, Name);
- llvm::GlobalVariable *Str =
- new llvm::GlobalVariable(M, StrPtrInit->getType(), /*isConstant=*/true,
- llvm::GlobalValue::InternalLinkage, StrPtrInit,
- ".omp_offloading.entry_name");
+ std::string StringName = getName({"omp_offloading", "entry_name"});
+ auto *Str = new llvm::GlobalVariable(
+ M, StrPtrInit->getType(), /*isConstant=*/true,
+ llvm::GlobalValue::InternalLinkage, StrPtrInit, StringName);
Str->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
- llvm::Constant *StrPtr = llvm::ConstantExpr::getBitCast(Str, CGM.Int8PtrTy);
-
- // We can't have any padding between symbols, so we need to have 1-byte
- // alignment.
- auto Align = CharUnits::fromQuantity(1);
-
- // Create the entry struct.
- ConstantInitBuilder EntryBuilder(CGM);
- auto EntryInit = EntryBuilder.beginStruct(TgtOffloadEntryType);
- EntryInit.add(AddrPtr);
- EntryInit.add(StrPtr);
- EntryInit.addInt(CGM.SizeTy, Size);
- EntryInit.addInt(CGM.Int32Ty, Flags);
- EntryInit.addInt(CGM.Int32Ty, 0);
- llvm::GlobalVariable *Entry =
- EntryInit.finishAndCreateGlobal(".omp_offloading.entry",
- Align,
- /*constant*/ true,
- llvm::GlobalValue::ExternalLinkage);
+
+ llvm::Constant *Data[] = {llvm::ConstantExpr::getBitCast(ID, CGM.VoidPtrTy),
+ llvm::ConstantExpr::getBitCast(Str, CGM.Int8PtrTy),
+ llvm::ConstantInt::get(CGM.SizeTy, Size),
+ llvm::ConstantInt::get(CGM.Int32Ty, Flags),
+ llvm::ConstantInt::get(CGM.Int32Ty, 0)};
+ std::string EntryName = getName({"omp_offloading", "entry", ""});
+ llvm::GlobalVariable *Entry = createConstantGlobalStruct(
+ CGM, getTgtOffloadEntryQTy(), Data, Twine(EntryName).concat(Name),
+ llvm::GlobalValue::WeakAnyLinkage);
// The entry has to be created in the section the linker expects it to be.
- Entry->setSection(".omp_offloading.entries");
+ std::string Section = getName({"omp_offloading", "entries"});
+ Entry->setSection(Section);
}
void CGOpenMPRuntime::createOffloadEntriesAndInfoMetadata() {
@@ -3579,71 +3885,142 @@ void CGOpenMPRuntime::createOffloadEntriesAndInfoMetadata() {
// Right now we only generate metadata for function that contain target
// regions.
- // If we do not have entries, we dont need to do anything.
+ // If we do not have entries, we don't need to do anything.
if (OffloadEntriesInfoManager.empty())
return;
llvm::Module &M = CGM.getModule();
llvm::LLVMContext &C = M.getContext();
- SmallVector<OffloadEntriesInfoManagerTy::OffloadEntryInfo *, 16>
+ SmallVector<const OffloadEntriesInfoManagerTy::OffloadEntryInfo *, 16>
OrderedEntries(OffloadEntriesInfoManager.size());
- // Create the offloading info metadata node.
- llvm::NamedMDNode *MD = M.getOrInsertNamedMetadata("omp_offload.info");
-
// Auxiliary methods to create metadata values and strings.
- auto getMDInt = [&](unsigned v) {
+ auto &&GetMDInt = [this](unsigned V) {
return llvm::ConstantAsMetadata::get(
- llvm::ConstantInt::get(llvm::Type::getInt32Ty(C), v));
+ llvm::ConstantInt::get(CGM.Int32Ty, V));
};
- auto getMDString = [&](StringRef v) { return llvm::MDString::get(C, v); };
+ auto &&GetMDString = [&C](StringRef V) { return llvm::MDString::get(C, V); };
+
+ // Create the offloading info metadata node.
+ llvm::NamedMDNode *MD = M.getOrInsertNamedMetadata("omp_offload.info");
// Create function that emits metadata for each target region entry;
- auto &&TargetRegionMetadataEmitter = [&](
- unsigned DeviceID, unsigned FileID, StringRef ParentName, unsigned Line,
- OffloadEntriesInfoManagerTy::OffloadEntryInfoTargetRegion &E) {
- llvm::SmallVector<llvm::Metadata *, 32> Ops;
- // Generate metadata for target regions. Each entry of this metadata
- // contains:
- // - Entry 0 -> Kind of this type of metadata (0).
- // - Entry 1 -> Device ID of the file where the entry was identified.
- // - Entry 2 -> File ID of the file where the entry was identified.
- // - Entry 3 -> Mangled name of the function where the entry was identified.
- // - Entry 4 -> Line in the file where the entry was identified.
- // - Entry 5 -> Order the entry was created.
- // The first element of the metadata node is the kind.
- Ops.push_back(getMDInt(E.getKind()));
- Ops.push_back(getMDInt(DeviceID));
- Ops.push_back(getMDInt(FileID));
- Ops.push_back(getMDString(ParentName));
- Ops.push_back(getMDInt(Line));
- Ops.push_back(getMDInt(E.getOrder()));
-
- // Save this entry in the right position of the ordered entries array.
- OrderedEntries[E.getOrder()] = &E;
-
- // Add metadata to the named metadata node.
- MD->addOperand(llvm::MDNode::get(C, Ops));
- };
+ auto &&TargetRegionMetadataEmitter =
+ [&C, MD, &OrderedEntries, &GetMDInt, &GetMDString](
+ unsigned DeviceID, unsigned FileID, StringRef ParentName,
+ unsigned Line,
+ const OffloadEntriesInfoManagerTy::OffloadEntryInfoTargetRegion &E) {
+ // Generate metadata for target regions. Each entry of this metadata
+ // contains:
+ // - Entry 0 -> Kind of this type of metadata (0).
+ // - Entry 1 -> Device ID of the file where the entry was identified.
+ // - Entry 2 -> File ID of the file where the entry was identified.
+ // - Entry 3 -> Mangled name of the function where the entry was
+ // identified.
+ // - Entry 4 -> Line in the file where the entry was identified.
+ // - Entry 5 -> Order the entry was created.
+ // The first element of the metadata node is the kind.
+ llvm::Metadata *Ops[] = {GetMDInt(E.getKind()), GetMDInt(DeviceID),
+ GetMDInt(FileID), GetMDString(ParentName),
+ GetMDInt(Line), GetMDInt(E.getOrder())};
+
+ // Save this entry in the right position of the ordered entries array.
+ OrderedEntries[E.getOrder()] = &E;
+
+ // Add metadata to the named metadata node.
+ MD->addOperand(llvm::MDNode::get(C, Ops));
+ };
OffloadEntriesInfoManager.actOnTargetRegionEntriesInfo(
TargetRegionMetadataEmitter);
- for (auto *E : OrderedEntries) {
+ // Create function that emits metadata for each device global variable entry;
+ auto &&DeviceGlobalVarMetadataEmitter =
+ [&C, &OrderedEntries, &GetMDInt, &GetMDString,
+ MD](StringRef MangledName,
+ const OffloadEntriesInfoManagerTy::OffloadEntryInfoDeviceGlobalVar
+ &E) {
+ // Generate metadata for global variables. Each entry of this metadata
+ // contains:
+ // - Entry 0 -> Kind of this type of metadata (1).
+ // - Entry 1 -> Mangled name of the variable.
+ // - Entry 2 -> Declare target kind.
+ // - Entry 3 -> Order the entry was created.
+ // The first element of the metadata node is the kind.
+ llvm::Metadata *Ops[] = {
+ GetMDInt(E.getKind()), GetMDString(MangledName),
+ GetMDInt(E.getFlags()), GetMDInt(E.getOrder())};
+
+ // Save this entry in the right position of the ordered entries array.
+ OrderedEntries[E.getOrder()] = &E;
+
+ // Add metadata to the named metadata node.
+ MD->addOperand(llvm::MDNode::get(C, Ops));
+ };
+
+ OffloadEntriesInfoManager.actOnDeviceGlobalVarEntriesInfo(
+ DeviceGlobalVarMetadataEmitter);
+
+ for (const auto *E : OrderedEntries) {
assert(E && "All ordered entries must exist!");
- if (auto *CE =
+ if (const auto *CE =
dyn_cast<OffloadEntriesInfoManagerTy::OffloadEntryInfoTargetRegion>(
E)) {
- assert(CE->getID() && CE->getAddress() &&
- "Entry ID and Addr are invalid!");
- createOffloadEntry(CE->getID(), CE->getAddress(), /*Size=*/0);
- } else
+ if (!CE->getID() || !CE->getAddress()) {
+ unsigned DiagID = CGM.getDiags().getCustomDiagID(
+ DiagnosticsEngine::Error,
+ "Offloading entry for target region is incorrect: either the "
+ "address or the ID is invalid.");
+ CGM.getDiags().Report(DiagID);
+ continue;
+ }
+ createOffloadEntry(CE->getID(), CE->getAddress(), /*Size=*/0,
+ CE->getFlags(), llvm::GlobalValue::WeakAnyLinkage);
+ } else if (const auto *CE =
+ dyn_cast<OffloadEntriesInfoManagerTy::
+ OffloadEntryInfoDeviceGlobalVar>(E)) {
+ OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryKind Flags =
+ static_cast<OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryKind>(
+ CE->getFlags());
+ switch (Flags) {
+ case OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryTo: {
+ if (!CE->getAddress()) {
+ unsigned DiagID = CGM.getDiags().getCustomDiagID(
+ DiagnosticsEngine::Error,
+ "Offloading entry for declare target variable is incorrect: the "
+ "address is invalid.");
+ CGM.getDiags().Report(DiagID);
+ continue;
+ }
+ break;
+ }
+ case OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryLink:
+ assert(((CGM.getLangOpts().OpenMPIsDevice && !CE->getAddress()) ||
+ (!CGM.getLangOpts().OpenMPIsDevice && CE->getAddress())) &&
+ "Declaret target link address is set.");
+ if (CGM.getLangOpts().OpenMPIsDevice)
+ continue;
+ if (!CE->getAddress()) {
+ unsigned DiagID = CGM.getDiags().getCustomDiagID(
+ DiagnosticsEngine::Error,
+ "Offloading entry for declare target variable is incorrect: the "
+ "address is invalid.");
+ CGM.getDiags().Report(DiagID);
+ continue;
+ }
+ break;
+ }
+ createOffloadEntry(CE->getAddress(), CE->getAddress(),
+ CE->getVarSize().getQuantity(), Flags,
+ CE->getLinkage());
+ } else {
llvm_unreachable("Unsupported entry kind.");
+ }
}
}
-/// \brief Loads all the offload entries information from the host IR
+/// Loads all the offload entries information from the host IR
/// metadata.
void CGOpenMPRuntime::loadOffloadInfoMetadata() {
// If we are in target mode, load the metadata from the host IR. This code has
@@ -3656,44 +4033,57 @@ void CGOpenMPRuntime::loadOffloadInfoMetadata() {
return;
auto Buf = llvm::MemoryBuffer::getFile(CGM.getLangOpts().OMPHostIRFile);
- if (Buf.getError())
+ if (auto EC = Buf.getError()) {
+ CGM.getDiags().Report(diag::err_cannot_open_file)
+ << CGM.getLangOpts().OMPHostIRFile << EC.message();
return;
+ }
llvm::LLVMContext C;
auto ME = expectedToErrorOrAndEmitErrors(
C, llvm::parseBitcodeFile(Buf.get()->getMemBufferRef(), C));
- if (ME.getError())
+ if (auto EC = ME.getError()) {
+ unsigned DiagID = CGM.getDiags().getCustomDiagID(
+ DiagnosticsEngine::Error, "Unable to parse host IR file '%0':'%1'");
+ CGM.getDiags().Report(DiagID)
+ << CGM.getLangOpts().OMPHostIRFile << EC.message();
return;
+ }
llvm::NamedMDNode *MD = ME.get()->getNamedMetadata("omp_offload.info");
if (!MD)
return;
- for (auto I : MD->operands()) {
- llvm::MDNode *MN = cast<llvm::MDNode>(I);
-
- auto getMDInt = [&](unsigned Idx) {
- llvm::ConstantAsMetadata *V =
- cast<llvm::ConstantAsMetadata>(MN->getOperand(Idx));
+ for (llvm::MDNode *MN : MD->operands()) {
+ auto &&GetMDInt = [MN](unsigned Idx) {
+ auto *V = cast<llvm::ConstantAsMetadata>(MN->getOperand(Idx));
return cast<llvm::ConstantInt>(V->getValue())->getZExtValue();
};
- auto getMDString = [&](unsigned Idx) {
- llvm::MDString *V = cast<llvm::MDString>(MN->getOperand(Idx));
+ auto &&GetMDString = [MN](unsigned Idx) {
+ auto *V = cast<llvm::MDString>(MN->getOperand(Idx));
return V->getString();
};
- switch (getMDInt(0)) {
+ switch (GetMDInt(0)) {
default:
llvm_unreachable("Unexpected metadata!");
break;
case OffloadEntriesInfoManagerTy::OffloadEntryInfo::
- OFFLOAD_ENTRY_INFO_TARGET_REGION:
+ OffloadingEntryInfoTargetRegion:
OffloadEntriesInfoManager.initializeTargetRegionEntryInfo(
- /*DeviceID=*/getMDInt(1), /*FileID=*/getMDInt(2),
- /*ParentName=*/getMDString(3), /*Line=*/getMDInt(4),
- /*Order=*/getMDInt(5));
+ /*DeviceID=*/GetMDInt(1), /*FileID=*/GetMDInt(2),
+ /*ParentName=*/GetMDString(3), /*Line=*/GetMDInt(4),
+ /*Order=*/GetMDInt(5));
+ break;
+ case OffloadEntriesInfoManagerTy::OffloadEntryInfo::
+ OffloadingEntryInfoDeviceGlobalVar:
+ OffloadEntriesInfoManager.initializeDeviceGlobalVarEntryInfo(
+ /*MangledName=*/GetMDString(1),
+ static_cast<OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryKind>(
+ /*Flags=*/GetMDInt(2)),
+ /*Order=*/GetMDInt(3));
break;
}
}
@@ -3702,7 +4092,7 @@ void CGOpenMPRuntime::loadOffloadInfoMetadata() {
void CGOpenMPRuntime::emitKmpRoutineEntryT(QualType KmpInt32Ty) {
if (!KmpRoutineEntryPtrTy) {
// Build typedef kmp_int32 (* kmp_routine_entry_t)(kmp_int32, void *); type.
- auto &C = CGM.getContext();
+ ASTContext &C = CGM.getContext();
QualType KmpRoutineEntryTyArgs[] = {KmpInt32Ty, C.VoidPtrTy};
FunctionProtoType::ExtProtoInfo EPI;
KmpRoutineEntryPtrQTy = C.getPointerType(
@@ -3711,19 +4101,7 @@ void CGOpenMPRuntime::emitKmpRoutineEntryT(QualType KmpInt32Ty) {
}
}
-static FieldDecl *addFieldToRecordDecl(ASTContext &C, DeclContext *DC,
- QualType FieldTy) {
- auto *Field = FieldDecl::Create(
- C, DC, SourceLocation(), SourceLocation(), /*Id=*/nullptr, FieldTy,
- C.getTrivialTypeSourceInfo(FieldTy, SourceLocation()),
- /*BW=*/nullptr, /*Mutable=*/false, /*InitStyle=*/ICIS_NoInit);
- Field->setAccess(AS_public);
- DC->addDecl(Field);
- return Field;
-}
-
QualType CGOpenMPRuntime::getTgtOffloadEntryQTy() {
-
// Make sure the type of the entry is already created. This is the type we
// have to create:
// struct __tgt_offload_entry{
@@ -3736,7 +4114,7 @@ QualType CGOpenMPRuntime::getTgtOffloadEntryQTy() {
// };
if (TgtOffloadEntryQTy.isNull()) {
ASTContext &C = CGM.getContext();
- auto *RD = C.buildImplicitRecord("__tgt_offload_entry");
+ RecordDecl *RD = C.buildImplicitRecord("__tgt_offload_entry");
RD->startDefinition();
addFieldToRecordDecl(C, RD, C.VoidPtrTy);
addFieldToRecordDecl(C, RD, C.getPointerType(C.CharTy));
@@ -3746,6 +4124,7 @@ QualType CGOpenMPRuntime::getTgtOffloadEntryQTy() {
addFieldToRecordDecl(
C, RD, C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/true));
RD->completeDefinition();
+ RD->addAttr(PackedAttr::CreateImplicit(C));
TgtOffloadEntryQTy = C.getRecordType(RD);
}
return TgtOffloadEntryQTy;
@@ -3765,7 +4144,7 @@ QualType CGOpenMPRuntime::getTgtDeviceImageQTy() {
// };
if (TgtDeviceImageQTy.isNull()) {
ASTContext &C = CGM.getContext();
- auto *RD = C.buildImplicitRecord("__tgt_device_image");
+ RecordDecl *RD = C.buildImplicitRecord("__tgt_device_image");
RD->startDefinition();
addFieldToRecordDecl(C, RD, C.VoidPtrTy);
addFieldToRecordDecl(C, RD, C.VoidPtrTy);
@@ -3789,7 +4168,7 @@ QualType CGOpenMPRuntime::getTgtBinaryDescriptorQTy() {
// };
if (TgtBinaryDescriptorQTy.isNull()) {
ASTContext &C = CGM.getContext();
- auto *RD = C.buildImplicitRecord("__tgt_bin_desc");
+ RecordDecl *RD = C.buildImplicitRecord("__tgt_bin_desc");
RD->startDefinition();
addFieldToRecordDecl(
C, RD, C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/true));
@@ -3818,17 +4197,16 @@ typedef std::pair<CharUnits /*Align*/, PrivateHelpersTy> PrivateDataTy;
static RecordDecl *
createPrivatesRecordDecl(CodeGenModule &CGM, ArrayRef<PrivateDataTy> Privates) {
if (!Privates.empty()) {
- auto &C = CGM.getContext();
+ ASTContext &C = CGM.getContext();
// Build struct .kmp_privates_t. {
// /* private vars */
// };
- auto *RD = C.buildImplicitRecord(".kmp_privates.t");
+ RecordDecl *RD = C.buildImplicitRecord(".kmp_privates.t");
RD->startDefinition();
- for (auto &&Pair : Privates) {
- auto *VD = Pair.second.Original;
- auto Type = VD->getType();
- Type = Type.getNonReferenceType();
- auto *FD = addFieldToRecordDecl(C, RD, Type);
+ for (const auto &Pair : Privates) {
+ const VarDecl *VD = Pair.second.Original;
+ QualType Type = VD->getType().getNonReferenceType();
+ FieldDecl *FD = addFieldToRecordDecl(C, RD, Type);
if (VD->hasAttrs()) {
for (specific_attr_iterator<AlignedAttr> I(VD->getAttrs().begin()),
E(VD->getAttrs().end());
@@ -3846,7 +4224,7 @@ static RecordDecl *
createKmpTaskTRecordDecl(CodeGenModule &CGM, OpenMPDirectiveKind Kind,
QualType KmpInt32Ty,
QualType KmpRoutineEntryPointerQTy) {
- auto &C = CGM.getContext();
+ ASTContext &C = CGM.getContext();
// Build struct kmp_task_t {
// void * shareds;
// kmp_routine_entry_t routine;
@@ -3860,13 +4238,13 @@ createKmpTaskTRecordDecl(CodeGenModule &CGM, OpenMPDirectiveKind Kind,
// kmp_int32 liter;
// void * reductions;
// };
- auto *UD = C.buildImplicitRecord("kmp_cmplrdata_t", TTK_Union);
+ RecordDecl *UD = C.buildImplicitRecord("kmp_cmplrdata_t", TTK_Union);
UD->startDefinition();
addFieldToRecordDecl(C, UD, KmpInt32Ty);
addFieldToRecordDecl(C, UD, KmpRoutineEntryPointerQTy);
UD->completeDefinition();
QualType KmpCmplrdataTy = C.getRecordType(UD);
- auto *RD = C.buildImplicitRecord("kmp_task_t");
+ RecordDecl *RD = C.buildImplicitRecord("kmp_task_t");
RD->startDefinition();
addFieldToRecordDecl(C, RD, C.VoidPtrTy);
addFieldToRecordDecl(C, RD, KmpRoutineEntryPointerQTy);
@@ -3891,22 +4269,21 @@ createKmpTaskTRecordDecl(CodeGenModule &CGM, OpenMPDirectiveKind Kind,
static RecordDecl *
createKmpTaskTWithPrivatesRecordDecl(CodeGenModule &CGM, QualType KmpTaskTQTy,
ArrayRef<PrivateDataTy> Privates) {
- auto &C = CGM.getContext();
+ ASTContext &C = CGM.getContext();
// Build struct kmp_task_t_with_privates {
// kmp_task_t task_data;
// .kmp_privates_t. privates;
// };
- auto *RD = C.buildImplicitRecord("kmp_task_t_with_privates");
+ RecordDecl *RD = C.buildImplicitRecord("kmp_task_t_with_privates");
RD->startDefinition();
addFieldToRecordDecl(C, RD, KmpTaskTQTy);
- if (auto *PrivateRD = createPrivatesRecordDecl(CGM, Privates)) {
+ if (const RecordDecl *PrivateRD = createPrivatesRecordDecl(CGM, Privates))
addFieldToRecordDecl(C, RD, C.getRecordType(PrivateRD));
- }
RD->completeDefinition();
return RD;
}
-/// \brief Emit a proxy function which accepts kmp_task_t as the second
+/// Emit a proxy function which accepts kmp_task_t as the second
/// argument.
/// \code
/// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
@@ -3924,7 +4301,7 @@ emitProxyTaskFunction(CodeGenModule &CGM, SourceLocation Loc,
QualType KmpTaskTWithPrivatesQTy, QualType KmpTaskTQTy,
QualType SharedsPtrTy, llvm::Value *TaskFunction,
llvm::Value *TaskPrivatesMap) {
- auto &C = CGM.getContext();
+ ASTContext &C = CGM.getContext();
FunctionArgList Args;
ImplicitParamDecl GtidArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, KmpInt32Ty,
ImplicitParamDecl::Other);
@@ -3933,49 +4310,53 @@ emitProxyTaskFunction(CodeGenModule &CGM, SourceLocation Loc,
ImplicitParamDecl::Other);
Args.push_back(&GtidArg);
Args.push_back(&TaskTypeArg);
- auto &TaskEntryFnInfo =
+ const auto &TaskEntryFnInfo =
CGM.getTypes().arrangeBuiltinFunctionDeclaration(KmpInt32Ty, Args);
- auto *TaskEntryTy = CGM.getTypes().GetFunctionType(TaskEntryFnInfo);
- auto *TaskEntry =
- llvm::Function::Create(TaskEntryTy, llvm::GlobalValue::InternalLinkage,
- ".omp_task_entry.", &CGM.getModule());
- CGM.SetInternalFunctionAttributes(/*D=*/nullptr, TaskEntry, TaskEntryFnInfo);
+ llvm::FunctionType *TaskEntryTy =
+ CGM.getTypes().GetFunctionType(TaskEntryFnInfo);
+ std::string Name = CGM.getOpenMPRuntime().getName({"omp_task_entry", ""});
+ auto *TaskEntry = llvm::Function::Create(
+ TaskEntryTy, llvm::GlobalValue::InternalLinkage, Name, &CGM.getModule());
+ CGM.SetInternalFunctionAttributes(GlobalDecl(), TaskEntry, TaskEntryFnInfo);
+ TaskEntry->setDoesNotRecurse();
CodeGenFunction CGF(CGM);
- CGF.StartFunction(GlobalDecl(), KmpInt32Ty, TaskEntry, TaskEntryFnInfo, Args);
+ CGF.StartFunction(GlobalDecl(), KmpInt32Ty, TaskEntry, TaskEntryFnInfo, Args,
+ Loc, Loc);
// TaskFunction(gtid, tt->task_data.part_id, &tt->privates, task_privates_map,
// tt,
// For taskloops:
// tt->task_data.lb, tt->task_data.ub, tt->task_data.st, tt->task_data.liter,
// tt->task_data.shareds);
- auto *GtidParam = CGF.EmitLoadOfScalar(
+ llvm::Value *GtidParam = CGF.EmitLoadOfScalar(
CGF.GetAddrOfLocalVar(&GtidArg), /*Volatile=*/false, KmpInt32Ty, Loc);
LValue TDBase = CGF.EmitLoadOfPointerLValue(
CGF.GetAddrOfLocalVar(&TaskTypeArg),
KmpTaskTWithPrivatesPtrQTy->castAs<PointerType>());
- auto *KmpTaskTWithPrivatesQTyRD =
+ const auto *KmpTaskTWithPrivatesQTyRD =
cast<RecordDecl>(KmpTaskTWithPrivatesQTy->getAsTagDecl());
LValue Base =
CGF.EmitLValueForField(TDBase, *KmpTaskTWithPrivatesQTyRD->field_begin());
- auto *KmpTaskTQTyRD = cast<RecordDecl>(KmpTaskTQTy->getAsTagDecl());
+ const auto *KmpTaskTQTyRD = cast<RecordDecl>(KmpTaskTQTy->getAsTagDecl());
auto PartIdFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTPartId);
- auto PartIdLVal = CGF.EmitLValueForField(Base, *PartIdFI);
- auto *PartidParam = PartIdLVal.getPointer();
+ LValue PartIdLVal = CGF.EmitLValueForField(Base, *PartIdFI);
+ llvm::Value *PartidParam = PartIdLVal.getPointer();
auto SharedsFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTShareds);
- auto SharedsLVal = CGF.EmitLValueForField(Base, *SharedsFI);
- auto *SharedsParam = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- CGF.EmitLoadOfLValue(SharedsLVal, Loc).getScalarVal(),
+ LValue SharedsLVal = CGF.EmitLValueForField(Base, *SharedsFI);
+ llvm::Value *SharedsParam = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ CGF.EmitLoadOfScalar(SharedsLVal, Loc),
CGF.ConvertTypeForMem(SharedsPtrTy));
auto PrivatesFI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin(), 1);
llvm::Value *PrivatesParam;
if (PrivatesFI != KmpTaskTWithPrivatesQTyRD->field_end()) {
- auto PrivatesLVal = CGF.EmitLValueForField(TDBase, *PrivatesFI);
+ LValue PrivatesLVal = CGF.EmitLValueForField(TDBase, *PrivatesFI);
PrivatesParam = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
PrivatesLVal.getPointer(), CGF.VoidPtrTy);
- } else
+ } else {
PrivatesParam = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
+ }
llvm::Value *CommonArgs[] = {GtidParam, PartidParam, PrivatesParam,
TaskPrivatesMap,
@@ -3987,20 +4368,20 @@ emitProxyTaskFunction(CodeGenModule &CGM, SourceLocation Loc,
std::end(CommonArgs));
if (isOpenMPTaskLoopDirective(Kind)) {
auto LBFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTLowerBound);
- auto LBLVal = CGF.EmitLValueForField(Base, *LBFI);
- auto *LBParam = CGF.EmitLoadOfLValue(LBLVal, Loc).getScalarVal();
+ LValue LBLVal = CGF.EmitLValueForField(Base, *LBFI);
+ llvm::Value *LBParam = CGF.EmitLoadOfScalar(LBLVal, Loc);
auto UBFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTUpperBound);
- auto UBLVal = CGF.EmitLValueForField(Base, *UBFI);
- auto *UBParam = CGF.EmitLoadOfLValue(UBLVal, Loc).getScalarVal();
+ LValue UBLVal = CGF.EmitLValueForField(Base, *UBFI);
+ llvm::Value *UBParam = CGF.EmitLoadOfScalar(UBLVal, Loc);
auto StFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTStride);
- auto StLVal = CGF.EmitLValueForField(Base, *StFI);
- auto *StParam = CGF.EmitLoadOfLValue(StLVal, Loc).getScalarVal();
+ LValue StLVal = CGF.EmitLValueForField(Base, *StFI);
+ llvm::Value *StParam = CGF.EmitLoadOfScalar(StLVal, Loc);
auto LIFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTLastIter);
- auto LILVal = CGF.EmitLValueForField(Base, *LIFI);
- auto *LIParam = CGF.EmitLoadOfLValue(LILVal, Loc).getScalarVal();
+ LValue LILVal = CGF.EmitLValueForField(Base, *LIFI);
+ llvm::Value *LIParam = CGF.EmitLoadOfScalar(LILVal, Loc);
auto RFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTReductions);
- auto RLVal = CGF.EmitLValueForField(Base, *RFI);
- auto *RParam = CGF.EmitLoadOfLValue(RLVal, Loc).getScalarVal();
+ LValue RLVal = CGF.EmitLValueForField(Base, *RFI);
+ llvm::Value *RParam = CGF.EmitLoadOfScalar(RLVal, Loc);
CallArgs.push_back(LBParam);
CallArgs.push_back(UBParam);
CallArgs.push_back(StParam);
@@ -4011,9 +4392,8 @@ emitProxyTaskFunction(CodeGenModule &CGM, SourceLocation Loc,
CGM.getOpenMPRuntime().emitOutlinedFunctionCall(CGF, Loc, TaskFunction,
CallArgs);
- CGF.EmitStoreThroughLValue(
- RValue::get(CGF.Builder.getInt32(/*C=*/0)),
- CGF.MakeAddrLValue(CGF.ReturnValue, KmpInt32Ty));
+ CGF.EmitStoreThroughLValue(RValue::get(CGF.Builder.getInt32(/*C=*/0)),
+ CGF.MakeAddrLValue(CGF.ReturnValue, KmpInt32Ty));
CGF.FinishFunction();
return TaskEntry;
}
@@ -4023,7 +4403,7 @@ static llvm::Value *emitDestructorsFunction(CodeGenModule &CGM,
QualType KmpInt32Ty,
QualType KmpTaskTWithPrivatesPtrQTy,
QualType KmpTaskTWithPrivatesQTy) {
- auto &C = CGM.getContext();
+ ASTContext &C = CGM.getContext();
FunctionArgList Args;
ImplicitParamDecl GtidArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, KmpInt32Ty,
ImplicitParamDecl::Other);
@@ -4032,30 +4412,34 @@ static llvm::Value *emitDestructorsFunction(CodeGenModule &CGM,
ImplicitParamDecl::Other);
Args.push_back(&GtidArg);
Args.push_back(&TaskTypeArg);
- auto &DestructorFnInfo =
+ const auto &DestructorFnInfo =
CGM.getTypes().arrangeBuiltinFunctionDeclaration(KmpInt32Ty, Args);
- auto *DestructorFnTy = CGM.getTypes().GetFunctionType(DestructorFnInfo);
+ llvm::FunctionType *DestructorFnTy =
+ CGM.getTypes().GetFunctionType(DestructorFnInfo);
+ std::string Name =
+ CGM.getOpenMPRuntime().getName({"omp_task_destructor", ""});
auto *DestructorFn =
llvm::Function::Create(DestructorFnTy, llvm::GlobalValue::InternalLinkage,
- ".omp_task_destructor.", &CGM.getModule());
- CGM.SetInternalFunctionAttributes(/*D=*/nullptr, DestructorFn,
+ Name, &CGM.getModule());
+ CGM.SetInternalFunctionAttributes(GlobalDecl(), DestructorFn,
DestructorFnInfo);
+ DestructorFn->setDoesNotRecurse();
CodeGenFunction CGF(CGM);
- CGF.disableDebugInfo();
CGF.StartFunction(GlobalDecl(), KmpInt32Ty, DestructorFn, DestructorFnInfo,
- Args);
+ Args, Loc, Loc);
LValue Base = CGF.EmitLoadOfPointerLValue(
CGF.GetAddrOfLocalVar(&TaskTypeArg),
KmpTaskTWithPrivatesPtrQTy->castAs<PointerType>());
- auto *KmpTaskTWithPrivatesQTyRD =
+ const auto *KmpTaskTWithPrivatesQTyRD =
cast<RecordDecl>(KmpTaskTWithPrivatesQTy->getAsTagDecl());
auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin());
Base = CGF.EmitLValueForField(Base, *FI);
- for (auto *Field :
+ for (const auto *Field :
cast<RecordDecl>(FI->getType()->getAsTagDecl())->fields()) {
- if (auto DtorKind = Field->getType().isDestructedType()) {
- auto FieldLValue = CGF.EmitLValueForField(Base, Field);
+ if (QualType::DestructionKind DtorKind =
+ Field->getType().isDestructedType()) {
+ LValue FieldLValue = CGF.EmitLValueForField(Base, Field);
CGF.pushDestroy(DtorKind, FieldLValue.getAddress(), Field->getType());
}
}
@@ -4063,7 +4447,7 @@ static llvm::Value *emitDestructorsFunction(CodeGenModule &CGM,
return DestructorFn;
}
-/// \brief Emit a privates mapping function for correct handling of private and
+/// Emit a privates mapping function for correct handling of private and
/// firstprivate variables.
/// \code
/// void .omp_task_privates_map.(const .privates. *noalias privs, <ty1>
@@ -4080,7 +4464,7 @@ emitTaskPrivateMappingFunction(CodeGenModule &CGM, SourceLocation Loc,
ArrayRef<const Expr *> LastprivateVars,
QualType PrivatesQTy,
ArrayRef<PrivateDataTy> Privates) {
- auto &C = CGM.getContext();
+ ASTContext &C = CGM.getContext();
FunctionArgList Args;
ImplicitParamDecl TaskPrivatesArg(
C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
@@ -4089,67 +4473,69 @@ emitTaskPrivateMappingFunction(CodeGenModule &CGM, SourceLocation Loc,
Args.push_back(&TaskPrivatesArg);
llvm::DenseMap<const VarDecl *, unsigned> PrivateVarsPos;
unsigned Counter = 1;
- for (auto *E: PrivateVars) {
+ for (const Expr *E : PrivateVars) {
Args.push_back(ImplicitParamDecl::Create(
C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
C.getPointerType(C.getPointerType(E->getType()))
.withConst()
.withRestrict(),
ImplicitParamDecl::Other));
- auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
+ const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
PrivateVarsPos[VD] = Counter;
++Counter;
}
- for (auto *E : FirstprivateVars) {
+ for (const Expr *E : FirstprivateVars) {
Args.push_back(ImplicitParamDecl::Create(
C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
C.getPointerType(C.getPointerType(E->getType()))
.withConst()
.withRestrict(),
ImplicitParamDecl::Other));
- auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
+ const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
PrivateVarsPos[VD] = Counter;
++Counter;
}
- for (auto *E: LastprivateVars) {
+ for (const Expr *E : LastprivateVars) {
Args.push_back(ImplicitParamDecl::Create(
C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
C.getPointerType(C.getPointerType(E->getType()))
.withConst()
.withRestrict(),
ImplicitParamDecl::Other));
- auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
+ const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
PrivateVarsPos[VD] = Counter;
++Counter;
}
- auto &TaskPrivatesMapFnInfo =
+ const auto &TaskPrivatesMapFnInfo =
CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
- auto *TaskPrivatesMapTy =
+ llvm::FunctionType *TaskPrivatesMapTy =
CGM.getTypes().GetFunctionType(TaskPrivatesMapFnInfo);
+ std::string Name =
+ CGM.getOpenMPRuntime().getName({"omp_task_privates_map", ""});
auto *TaskPrivatesMap = llvm::Function::Create(
- TaskPrivatesMapTy, llvm::GlobalValue::InternalLinkage,
- ".omp_task_privates_map.", &CGM.getModule());
- CGM.SetInternalFunctionAttributes(/*D=*/nullptr, TaskPrivatesMap,
+ TaskPrivatesMapTy, llvm::GlobalValue::InternalLinkage, Name,
+ &CGM.getModule());
+ CGM.SetInternalFunctionAttributes(GlobalDecl(), TaskPrivatesMap,
TaskPrivatesMapFnInfo);
TaskPrivatesMap->removeFnAttr(llvm::Attribute::NoInline);
TaskPrivatesMap->removeFnAttr(llvm::Attribute::OptimizeNone);
TaskPrivatesMap->addFnAttr(llvm::Attribute::AlwaysInline);
CodeGenFunction CGF(CGM);
- CGF.disableDebugInfo();
CGF.StartFunction(GlobalDecl(), C.VoidTy, TaskPrivatesMap,
- TaskPrivatesMapFnInfo, Args);
+ TaskPrivatesMapFnInfo, Args, Loc, Loc);
// *privi = &.privates.privi;
LValue Base = CGF.EmitLoadOfPointerLValue(
CGF.GetAddrOfLocalVar(&TaskPrivatesArg),
TaskPrivatesArg.getType()->castAs<PointerType>());
- auto *PrivatesQTyRD = cast<RecordDecl>(PrivatesQTy->getAsTagDecl());
+ const auto *PrivatesQTyRD = cast<RecordDecl>(PrivatesQTy->getAsTagDecl());
Counter = 0;
- for (auto *Field : PrivatesQTyRD->fields()) {
- auto FieldLVal = CGF.EmitLValueForField(Base, Field);
- auto *VD = Args[PrivateVarsPos[Privates[Counter].second.Original]];
- auto RefLVal = CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(VD), VD->getType());
- auto RefLoadLVal = CGF.EmitLoadOfPointerLValue(
+ for (const FieldDecl *Field : PrivatesQTyRD->fields()) {
+ LValue FieldLVal = CGF.EmitLValueForField(Base, Field);
+ const VarDecl *VD = Args[PrivateVarsPos[Privates[Counter].second.Original]];
+ LValue RefLVal =
+ CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(VD), VD->getType());
+ LValue RefLoadLVal = CGF.EmitLoadOfPointerLValue(
RefLVal.getAddress(), RefLVal.getType()->castAs<PointerType>());
CGF.EmitStoreOfScalar(FieldLVal.getPointer(), RefLoadLVal);
++Counter;
@@ -4171,9 +4557,14 @@ static void emitPrivatesInit(CodeGenFunction &CGF,
QualType SharedsTy, QualType SharedsPtrTy,
const OMPTaskDataTy &Data,
ArrayRef<PrivateDataTy> Privates, bool ForDup) {
- auto &C = CGF.getContext();
+ ASTContext &C = CGF.getContext();
auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin());
LValue PrivatesBase = CGF.EmitLValueForField(TDBase, *FI);
+ OpenMPDirectiveKind Kind = isOpenMPTaskLoopDirective(D.getDirectiveKind())
+ ? OMPD_taskloop
+ : OMPD_task;
+ const CapturedStmt &CS = *D.getCapturedStmt(Kind);
+ CodeGenFunction::CGCapturedStmtInfo CapturesInfo(CS);
LValue SrcBase;
bool IsTargetTask =
isOpenMPTargetDataManagementDirective(D.getDirectiveKind()) ||
@@ -4182,40 +4573,38 @@ static void emitPrivatesInit(CodeGenFunction &CGF,
// PointersArray and SizesArray. The original variables for these arrays are
// not captured and we get their addresses explicitly.
if ((!IsTargetTask && !Data.FirstprivateVars.empty()) ||
- (IsTargetTask && Data.FirstprivateVars.size() > 3)) {
+ (IsTargetTask && KmpTaskSharedsPtr.isValid())) {
SrcBase = CGF.MakeAddrLValue(
CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
KmpTaskSharedsPtr, CGF.ConvertTypeForMem(SharedsPtrTy)),
SharedsTy);
}
- OpenMPDirectiveKind Kind = isOpenMPTaskLoopDirective(D.getDirectiveKind())
- ? OMPD_taskloop
- : OMPD_task;
- CodeGenFunction::CGCapturedStmtInfo CapturesInfo(*D.getCapturedStmt(Kind));
FI = cast<RecordDecl>(FI->getType()->getAsTagDecl())->field_begin();
- for (auto &&Pair : Privates) {
- auto *VD = Pair.second.PrivateCopy;
- auto *Init = VD->getAnyInitializer();
+ for (const PrivateDataTy &Pair : Privates) {
+ const VarDecl *VD = Pair.second.PrivateCopy;
+ const Expr *Init = VD->getAnyInitializer();
if (Init && (!ForDup || (isa<CXXConstructExpr>(Init) &&
!CGF.isTrivialInitializer(Init)))) {
LValue PrivateLValue = CGF.EmitLValueForField(PrivatesBase, *FI);
- if (auto *Elem = Pair.second.PrivateElemInit) {
- auto *OriginalVD = Pair.second.Original;
+ if (const VarDecl *Elem = Pair.second.PrivateElemInit) {
+ const VarDecl *OriginalVD = Pair.second.Original;
// Check if the variable is the target-based BasePointersArray,
// PointersArray or SizesArray.
LValue SharedRefLValue;
QualType Type = OriginalVD->getType();
- if (IsTargetTask && isa<ImplicitParamDecl>(OriginalVD) &&
- isa<CapturedDecl>(OriginalVD->getDeclContext()) &&
- cast<CapturedDecl>(OriginalVD->getDeclContext())->getNumParams() ==
- 0 &&
- isa<TranslationUnitDecl>(
- cast<CapturedDecl>(OriginalVD->getDeclContext())
- ->getDeclContext())) {
+ const FieldDecl *SharedField = CapturesInfo.lookup(OriginalVD);
+ if (IsTargetTask && !SharedField) {
+ assert(isa<ImplicitParamDecl>(OriginalVD) &&
+ isa<CapturedDecl>(OriginalVD->getDeclContext()) &&
+ cast<CapturedDecl>(OriginalVD->getDeclContext())
+ ->getNumParams() == 0 &&
+ isa<TranslationUnitDecl>(
+ cast<CapturedDecl>(OriginalVD->getDeclContext())
+ ->getDeclContext()) &&
+ "Expected artificial target data variable.");
SharedRefLValue =
CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(OriginalVD), Type);
} else {
- auto *SharedField = CapturesInfo.lookup(OriginalVD);
SharedRefLValue = CGF.EmitLValueForField(SrcBase, SharedField);
SharedRefLValue = CGF.MakeAddrLValue(
Address(SharedRefLValue.getPointer(), C.getDeclAlign(OriginalVD)),
@@ -4226,8 +4615,7 @@ static void emitPrivatesInit(CodeGenFunction &CGF,
// Initialize firstprivate array.
if (!isa<CXXConstructExpr>(Init) || CGF.isTrivialInitializer(Init)) {
// Perform simple memcpy.
- CGF.EmitAggregateAssign(PrivateLValue.getAddress(),
- SharedRefLValue.getAddress(), Type);
+ CGF.EmitAggregateAssign(PrivateLValue, SharedRefLValue, Type);
} else {
// Initialize firstprivate array using element-by-element
// initialization.
@@ -4258,8 +4646,9 @@ static void emitPrivatesInit(CodeGenFunction &CGF,
CGF.EmitExprAsInit(Init, VD, PrivateLValue,
/*capturedByInit=*/false);
}
- } else
+ } else {
CGF.EmitExprAsInit(Init, VD, PrivateLValue, /*capturedByInit=*/false);
+ }
}
++FI;
}
@@ -4269,11 +4658,13 @@ static void emitPrivatesInit(CodeGenFunction &CGF,
static bool checkInitIsRequired(CodeGenFunction &CGF,
ArrayRef<PrivateDataTy> Privates) {
bool InitRequired = false;
- for (auto &&Pair : Privates) {
- auto *VD = Pair.second.PrivateCopy;
- auto *Init = VD->getAnyInitializer();
+ for (const PrivateDataTy &Pair : Privates) {
+ const VarDecl *VD = Pair.second.PrivateCopy;
+ const Expr *Init = VD->getAnyInitializer();
InitRequired = InitRequired || (Init && isa<CXXConstructExpr>(Init) &&
!CGF.isTrivialInitializer(Init));
+ if (InitRequired)
+ break;
}
return InitRequired;
}
@@ -4297,7 +4688,7 @@ emitTaskDupFunction(CodeGenModule &CGM, SourceLocation Loc,
const RecordDecl *KmpTaskTQTyRD, QualType SharedsTy,
QualType SharedsPtrTy, const OMPTaskDataTy &Data,
ArrayRef<PrivateDataTy> Privates, bool WithLastIter) {
- auto &C = CGM.getContext();
+ ASTContext &C = CGM.getContext();
FunctionArgList Args;
ImplicitParamDecl DstArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
KmpTaskTWithPrivatesPtrQTy,
@@ -4310,16 +4701,17 @@ emitTaskDupFunction(CodeGenModule &CGM, SourceLocation Loc,
Args.push_back(&DstArg);
Args.push_back(&SrcArg);
Args.push_back(&LastprivArg);
- auto &TaskDupFnInfo =
+ const auto &TaskDupFnInfo =
CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
- auto *TaskDupTy = CGM.getTypes().GetFunctionType(TaskDupFnInfo);
- auto *TaskDup =
- llvm::Function::Create(TaskDupTy, llvm::GlobalValue::InternalLinkage,
- ".omp_task_dup.", &CGM.getModule());
- CGM.SetInternalFunctionAttributes(/*D=*/nullptr, TaskDup, TaskDupFnInfo);
+ llvm::FunctionType *TaskDupTy = CGM.getTypes().GetFunctionType(TaskDupFnInfo);
+ std::string Name = CGM.getOpenMPRuntime().getName({"omp_task_dup", ""});
+ auto *TaskDup = llvm::Function::Create(
+ TaskDupTy, llvm::GlobalValue::InternalLinkage, Name, &CGM.getModule());
+ CGM.SetInternalFunctionAttributes(GlobalDecl(), TaskDup, TaskDupFnInfo);
+ TaskDup->setDoesNotRecurse();
CodeGenFunction CGF(CGM);
- CGF.disableDebugInfo();
- CGF.StartFunction(GlobalDecl(), C.VoidTy, TaskDup, TaskDupFnInfo, Args);
+ CGF.StartFunction(GlobalDecl(), C.VoidTy, TaskDup, TaskDupFnInfo, Args, Loc,
+ Loc);
LValue TDBase = CGF.EmitLoadOfPointerLValue(
CGF.GetAddrOfLocalVar(&DstArg),
@@ -4362,9 +4754,9 @@ emitTaskDupFunction(CodeGenModule &CGM, SourceLocation Loc,
static bool
checkDestructorsRequired(const RecordDecl *KmpTaskTWithPrivatesQTyRD) {
bool NeedsCleanup = false;
- auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin());
- auto *PrivateRD = cast<RecordDecl>(FI->getType()->getAsTagDecl());
- for (auto *FD : PrivateRD->fields()) {
+ auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin(), 1);
+ const auto *PrivateRD = cast<RecordDecl>(FI->getType()->getAsTagDecl());
+ for (const FieldDecl *FD : PrivateRD->fields()) {
NeedsCleanup = NeedsCleanup || FD->getType().isDestructedType();
if (NeedsCleanup)
break;
@@ -4377,41 +4769,41 @@ CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
const OMPExecutableDirective &D,
llvm::Value *TaskFunction, QualType SharedsTy,
Address Shareds, const OMPTaskDataTy &Data) {
- auto &C = CGM.getContext();
+ ASTContext &C = CGM.getContext();
llvm::SmallVector<PrivateDataTy, 4> Privates;
// Aggregate privates and sort them by the alignment.
auto I = Data.PrivateCopies.begin();
- for (auto *E : Data.PrivateVars) {
- auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
- Privates.push_back(std::make_pair(
+ for (const Expr *E : Data.PrivateVars) {
+ const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
+ Privates.emplace_back(
C.getDeclAlign(VD),
PrivateHelpersTy(VD, cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()),
- /*PrivateElemInit=*/nullptr)));
+ /*PrivateElemInit=*/nullptr));
++I;
}
I = Data.FirstprivateCopies.begin();
auto IElemInitRef = Data.FirstprivateInits.begin();
- for (auto *E : Data.FirstprivateVars) {
- auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
- Privates.push_back(std::make_pair(
+ for (const Expr *E : Data.FirstprivateVars) {
+ const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
+ Privates.emplace_back(
C.getDeclAlign(VD),
PrivateHelpersTy(
VD, cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()),
- cast<VarDecl>(cast<DeclRefExpr>(*IElemInitRef)->getDecl()))));
+ cast<VarDecl>(cast<DeclRefExpr>(*IElemInitRef)->getDecl())));
++I;
++IElemInitRef;
}
I = Data.LastprivateCopies.begin();
- for (auto *E : Data.LastprivateVars) {
- auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
- Privates.push_back(std::make_pair(
+ for (const Expr *E : Data.LastprivateVars) {
+ const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
+ Privates.emplace_back(
C.getDeclAlign(VD),
PrivateHelpersTy(VD, cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()),
- /*PrivateElemInit=*/nullptr)));
+ /*PrivateElemInit=*/nullptr));
++I;
}
std::stable_sort(Privates.begin(), Privates.end(), stable_sort_comparator);
- auto KmpInt32Ty = C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
+ QualType KmpInt32Ty = C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
// Build type kmp_routine_entry_t (if not built yet).
emitKmpRoutineEntryT(KmpInt32Ty);
// Build type kmp_task_t (if not built yet).
@@ -4432,21 +4824,23 @@ CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
}
KmpTaskTQTy = SavedKmpTaskTQTy;
}
- auto *KmpTaskTQTyRD = cast<RecordDecl>(KmpTaskTQTy->getAsTagDecl());
+ const auto *KmpTaskTQTyRD = cast<RecordDecl>(KmpTaskTQTy->getAsTagDecl());
// Build particular struct kmp_task_t for the given task.
- auto *KmpTaskTWithPrivatesQTyRD =
+ const RecordDecl *KmpTaskTWithPrivatesQTyRD =
createKmpTaskTWithPrivatesRecordDecl(CGM, KmpTaskTQTy, Privates);
- auto KmpTaskTWithPrivatesQTy = C.getRecordType(KmpTaskTWithPrivatesQTyRD);
+ QualType KmpTaskTWithPrivatesQTy = C.getRecordType(KmpTaskTWithPrivatesQTyRD);
QualType KmpTaskTWithPrivatesPtrQTy =
C.getPointerType(KmpTaskTWithPrivatesQTy);
- auto *KmpTaskTWithPrivatesTy = CGF.ConvertType(KmpTaskTWithPrivatesQTy);
- auto *KmpTaskTWithPrivatesPtrTy = KmpTaskTWithPrivatesTy->getPointerTo();
- auto *KmpTaskTWithPrivatesTySize = CGF.getTypeSize(KmpTaskTWithPrivatesQTy);
+ llvm::Type *KmpTaskTWithPrivatesTy = CGF.ConvertType(KmpTaskTWithPrivatesQTy);
+ llvm::Type *KmpTaskTWithPrivatesPtrTy =
+ KmpTaskTWithPrivatesTy->getPointerTo();
+ llvm::Value *KmpTaskTWithPrivatesTySize =
+ CGF.getTypeSize(KmpTaskTWithPrivatesQTy);
QualType SharedsPtrTy = C.getPointerType(SharedsTy);
// Emit initial values for private copies (if any).
llvm::Value *TaskPrivatesMap = nullptr;
- auto *TaskPrivatesMapTy =
+ llvm::Type *TaskPrivatesMapTy =
std::next(cast<llvm::Function>(TaskFunction)->arg_begin(), 3)->getType();
if (!Privates.empty()) {
auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin());
@@ -4461,7 +4855,7 @@ CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
}
// Build a proxy function kmp_int32 .omp_task_entry.(kmp_int32 gtid,
// kmp_task_t *tt);
- auto *TaskEntry = emitProxyTaskFunction(
+ llvm::Value *TaskEntry = emitProxyTaskFunction(
CGM, Loc, D.getDirectiveKind(), KmpInt32Ty, KmpTaskTWithPrivatesPtrQTy,
KmpTaskTWithPrivatesQTy, KmpTaskTQTy, SharedsPtrTy, TaskFunction,
TaskPrivatesMap);
@@ -4487,23 +4881,24 @@ CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
}
if (Data.Priority.getInt())
Flags = Flags | PriorityFlag;
- auto *TaskFlags =
+ llvm::Value *TaskFlags =
Data.Final.getPointer()
? CGF.Builder.CreateSelect(Data.Final.getPointer(),
CGF.Builder.getInt32(FinalFlag),
CGF.Builder.getInt32(/*C=*/0))
: CGF.Builder.getInt32(Data.Final.getInt() ? FinalFlag : 0);
TaskFlags = CGF.Builder.CreateOr(TaskFlags, CGF.Builder.getInt32(Flags));
- auto *SharedsSize = CGM.getSize(C.getTypeSizeInChars(SharedsTy));
+ llvm::Value *SharedsSize = CGM.getSize(C.getTypeSizeInChars(SharedsTy));
llvm::Value *AllocArgs[] = {emitUpdateLocation(CGF, Loc),
getThreadID(CGF, Loc), TaskFlags,
KmpTaskTWithPrivatesTySize, SharedsSize,
CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
TaskEntry, KmpRoutineEntryPtrTy)};
- auto *NewTask = CGF.EmitRuntimeCall(
+ llvm::Value *NewTask = CGF.EmitRuntimeCall(
createRuntimeFunction(OMPRTL__kmpc_omp_task_alloc), AllocArgs);
- auto *NewTaskNewTaskTTy = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- NewTask, KmpTaskTWithPrivatesPtrTy);
+ llvm::Value *NewTaskNewTaskTTy =
+ CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ NewTask, KmpTaskTWithPrivatesPtrTy);
LValue Base = CGF.MakeNaturalAlignAddrLValue(NewTaskNewTaskTTy,
KmpTaskTWithPrivatesQTy);
LValue TDBase =
@@ -4519,7 +4914,9 @@ CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
KmpTaskTShareds)),
Loc),
CGF.getNaturalTypeAlignment(SharedsTy));
- CGF.EmitAggregateCopy(KmpTaskSharedsPtr, Shareds, SharedsTy);
+ LValue Dest = CGF.MakeAddrLValue(KmpTaskSharedsPtr, SharedsTy);
+ LValue Src = CGF.MakeAddrLValue(Shareds, SharedsTy);
+ CGF.EmitAggregateCopy(Dest, Src, SharedsTy, AggValueSlot::DoesNotOverlap);
}
// Emit initial values for private copies (if any).
TaskResultTy Result;
@@ -4539,7 +4936,8 @@ CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
enum { Priority = 0, Destructors = 1 };
// Provide pointer to function with destructors for privates.
auto FI = std::next(KmpTaskTQTyRD->field_begin(), Data1);
- auto *KmpCmplrdataUD = (*FI)->getType()->getAsUnionType()->getDecl();
+ const RecordDecl *KmpCmplrdataUD =
+ (*FI)->getType()->getAsUnionType()->getDecl();
if (NeedsCleanup) {
llvm::Value *DestructorFn = emitDestructorsFunction(
CGM, Loc, KmpInt32Ty, KmpTaskTWithPrivatesPtrQTy,
@@ -4582,8 +4980,8 @@ void CGOpenMPRuntime::emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Value *TaskEntry = Result.TaskEntry;
llvm::Value *NewTaskNewTaskTTy = Result.NewTaskNewTaskTTy;
LValue TDBase = Result.TDBase;
- RecordDecl *KmpTaskTQTyRD = Result.KmpTaskTQTyRD;
- auto &C = CGM.getContext();
+ const RecordDecl *KmpTaskTQTyRD = Result.KmpTaskTQTyRD;
+ ASTContext &C = CGM.getContext();
// Process list of dependences.
Address DependenciesArray = Address::invalid();
unsigned NumDependencies = Data.Dependences.size();
@@ -4603,8 +5001,9 @@ void CGOpenMPRuntime::emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
addFieldToRecordDecl(C, KmpDependInfoRD, FlagsTy);
KmpDependInfoRD->completeDefinition();
KmpDependInfoTy = C.getRecordType(KmpDependInfoRD);
- } else
+ } else {
KmpDependInfoRD = cast<RecordDecl>(KmpDependInfoTy->getAsTagDecl());
+ }
CharUnits DependencySize = C.getTypeSizeInChars(KmpDependInfoTy);
// Define type kmp_depend_info[<Dependences.size()>];
QualType KmpDependInfoArrayTy = C.getConstantArrayType(
@@ -4613,12 +5012,13 @@ void CGOpenMPRuntime::emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
// kmp_depend_info[<Dependences.size()>] deps;
DependenciesArray =
CGF.CreateMemTemp(KmpDependInfoArrayTy, ".dep.arr.addr");
- for (unsigned i = 0; i < NumDependencies; ++i) {
- const Expr *E = Data.Dependences[i].second;
- auto Addr = CGF.EmitLValue(E);
+ for (unsigned I = 0; I < NumDependencies; ++I) {
+ const Expr *E = Data.Dependences[I].second;
+ LValue Addr = CGF.EmitLValue(E);
llvm::Value *Size;
QualType Ty = E->getType();
- if (auto *ASE = dyn_cast<OMPArraySectionExpr>(E->IgnoreParenImpCasts())) {
+ if (const auto *ASE =
+ dyn_cast<OMPArraySectionExpr>(E->IgnoreParenImpCasts())) {
LValue UpAddrLVal =
CGF.EmitOMPArraySectionExpr(ASE, /*LowerBound=*/false);
llvm::Value *UpAddr =
@@ -4627,24 +5027,25 @@ void CGOpenMPRuntime::emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
CGF.Builder.CreatePtrToInt(Addr.getPointer(), CGM.SizeTy);
llvm::Value *UpIntPtr = CGF.Builder.CreatePtrToInt(UpAddr, CGM.SizeTy);
Size = CGF.Builder.CreateNUWSub(UpIntPtr, LowIntPtr);
- } else
+ } else {
Size = CGF.getTypeSize(Ty);
- auto Base = CGF.MakeAddrLValue(
- CGF.Builder.CreateConstArrayGEP(DependenciesArray, i, DependencySize),
+ }
+ LValue Base = CGF.MakeAddrLValue(
+ CGF.Builder.CreateConstArrayGEP(DependenciesArray, I, DependencySize),
KmpDependInfoTy);
// deps[i].base_addr = &<Dependences[i].second>;
- auto BaseAddrLVal = CGF.EmitLValueForField(
+ LValue BaseAddrLVal = CGF.EmitLValueForField(
Base, *std::next(KmpDependInfoRD->field_begin(), BaseAddr));
CGF.EmitStoreOfScalar(
CGF.Builder.CreatePtrToInt(Addr.getPointer(), CGF.IntPtrTy),
BaseAddrLVal);
// deps[i].len = sizeof(<Dependences[i].second>);
- auto LenLVal = CGF.EmitLValueForField(
+ LValue LenLVal = CGF.EmitLValueForField(
Base, *std::next(KmpDependInfoRD->field_begin(), Len));
CGF.EmitStoreOfScalar(Size, LenLVal);
// deps[i].flags = <Dependences[i].first>;
RTLDependenceKindTy DepKind;
- switch (Data.Dependences[i].first) {
+ switch (Data.Dependences[I].first) {
case OMPC_DEPEND_in:
DepKind = DepIn;
break;
@@ -4658,7 +5059,7 @@ void CGOpenMPRuntime::emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
case OMPC_DEPEND_unknown:
llvm_unreachable("Unknown task dependence type");
}
- auto FlagsLVal = CGF.EmitLValueForField(
+ LValue FlagsLVal = CGF.EmitLValueForField(
Base, *std::next(KmpDependInfoRD->field_begin(), Flags));
CGF.EmitStoreOfScalar(llvm::ConstantInt::get(LLVMFlagsTy, DepKind),
FlagsLVal);
@@ -4668,14 +5069,14 @@ void CGOpenMPRuntime::emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
CGF.VoidPtrTy);
}
- // NOTE: routine and part_id fields are intialized by __kmpc_omp_task_alloc()
+ // NOTE: routine and part_id fields are initialized by __kmpc_omp_task_alloc()
// libcall.
// Build kmp_int32 __kmpc_omp_task_with_deps(ident_t *, kmp_int32 gtid,
// kmp_task_t *new_task, kmp_int32 ndeps, kmp_depend_info_t *dep_list,
// kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list) if dependence
// list is not empty
- auto *ThreadID = getThreadID(CGF, Loc);
- auto *UpLoc = emitUpdateLocation(CGF, Loc);
+ llvm::Value *ThreadID = getThreadID(CGF, Loc);
+ llvm::Value *UpLoc = emitUpdateLocation(CGF, Loc);
llvm::Value *TaskArgs[] = { UpLoc, ThreadID, NewTask };
llvm::Value *DepTaskArgs[7];
if (NumDependencies) {
@@ -4692,7 +5093,7 @@ void CGOpenMPRuntime::emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
&DepTaskArgs](CodeGenFunction &CGF, PrePostActionTy &) {
if (!Data.Tied) {
auto PartIdFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTPartId);
- auto PartIdLVal = CGF.EmitLValueForField(TDBase, *PartIdFI);
+ LValue PartIdLVal = CGF.EmitLValueForField(TDBase, *PartIdFI);
CGF.EmitStoreOfScalar(CGF.Builder.getInt32(0), PartIdLVal);
}
if (NumDependencies) {
@@ -4720,7 +5121,7 @@ void CGOpenMPRuntime::emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
auto &&ElseCodeGen = [&TaskArgs, ThreadID, NewTaskNewTaskTTy, TaskEntry,
NumDependencies, &DepWaitTaskArgs,
Loc](CodeGenFunction &CGF, PrePostActionTy &) {
- auto &RT = CGF.CGM.getOpenMPRuntime();
+ CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime();
CodeGenFunction::RunCleanupsScope LocalScope(CGF);
// Build void __kmpc_omp_wait_deps(ident_t *, kmp_int32 gtid,
// kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32
@@ -4750,9 +5151,9 @@ void CGOpenMPRuntime::emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
RCG(CGF);
};
- if (IfCond)
+ if (IfCond) {
emitOMPIfClause(CGF, IfCond, ThenCodeGen, ElseCodeGen);
- else {
+ } else {
RegionCodeGenTy ThenRCG(ThenCodeGen);
ThenRCG(CGF);
}
@@ -4768,7 +5169,7 @@ void CGOpenMPRuntime::emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc,
return;
TaskResultTy Result =
emitTaskInit(CGF, Loc, D, TaskFunction, SharedsTy, Shareds, Data);
- // NOTE: routine and part_id fields are intialized by __kmpc_omp_task_alloc()
+ // NOTE: routine and part_id fields are initialized by __kmpc_omp_task_alloc()
// libcall.
// Call to void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t *task, int
// if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int nogroup, int
@@ -4779,27 +5180,28 @@ void CGOpenMPRuntime::emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc,
if (IfCond) {
IfVal = CGF.Builder.CreateIntCast(CGF.EvaluateExprAsBool(IfCond), CGF.IntTy,
/*isSigned=*/true);
- } else
+ } else {
IfVal = llvm::ConstantInt::getSigned(CGF.IntTy, /*V=*/1);
+ }
LValue LBLVal = CGF.EmitLValueForField(
Result.TDBase,
*std::next(Result.KmpTaskTQTyRD->field_begin(), KmpTaskTLowerBound));
- auto *LBVar =
+ const auto *LBVar =
cast<VarDecl>(cast<DeclRefExpr>(D.getLowerBoundVariable())->getDecl());
CGF.EmitAnyExprToMem(LBVar->getInit(), LBLVal.getAddress(), LBLVal.getQuals(),
/*IsInitializer=*/true);
LValue UBLVal = CGF.EmitLValueForField(
Result.TDBase,
*std::next(Result.KmpTaskTQTyRD->field_begin(), KmpTaskTUpperBound));
- auto *UBVar =
+ const auto *UBVar =
cast<VarDecl>(cast<DeclRefExpr>(D.getUpperBoundVariable())->getDecl());
CGF.EmitAnyExprToMem(UBVar->getInit(), UBLVal.getAddress(), UBLVal.getQuals(),
/*IsInitializer=*/true);
LValue StLVal = CGF.EmitLValueForField(
Result.TDBase,
*std::next(Result.KmpTaskTQTyRD->field_begin(), KmpTaskTStride));
- auto *StVar =
+ const auto *StVar =
cast<VarDecl>(cast<DeclRefExpr>(D.getStrideVariable())->getDecl());
CGF.EmitAnyExprToMem(StVar->getInit(), StLVal.getAddress(), StLVal.getQuals(),
/*IsInitializer=*/true);
@@ -4807,9 +5209,9 @@ void CGOpenMPRuntime::emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc,
LValue RedLVal = CGF.EmitLValueForField(
Result.TDBase,
*std::next(Result.KmpTaskTQTyRD->field_begin(), KmpTaskTReductions));
- if (Data.Reductions)
+ if (Data.Reductions) {
CGF.EmitStoreOfScalar(Data.Reductions, RedLVal);
- else {
+ } else {
CGF.EmitNullInitialization(RedLVal.getAddress(),
CGF.getContext().VoidPtrTy);
}
@@ -4821,7 +5223,7 @@ void CGOpenMPRuntime::emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc,
IfVal,
LBLVal.getPointer(),
UBLVal.getPointer(),
- CGF.EmitLoadOfScalar(StLVal, SourceLocation()),
+ CGF.EmitLoadOfScalar(StLVal, Loc),
llvm::ConstantInt::getNullValue(
CGF.IntTy), // Always 0 because taskgroup emitted by the compiler
llvm::ConstantInt::getSigned(
@@ -4838,7 +5240,7 @@ void CGOpenMPRuntime::emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc,
CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_taskloop), TaskArgs);
}
-/// \brief Emit reduction operation for each element of array (required for
+/// Emit reduction operation for each element of array (required for
/// array sections) LHS op = RHS.
/// \param Type Type of array.
/// \param LHSVar Variable on the left side of the reduction operation
@@ -4860,22 +5262,22 @@ static void EmitOMPAggregateReduction(
Address RHSAddr = CGF.GetAddrOfLocalVar(RHSVar);
// Drill down to the base element type on both arrays.
- auto ArrayTy = Type->getAsArrayTypeUnsafe();
- auto NumElements = CGF.emitArrayLength(ArrayTy, ElementTy, LHSAddr);
+ const ArrayType *ArrayTy = Type->getAsArrayTypeUnsafe();
+ llvm::Value *NumElements = CGF.emitArrayLength(ArrayTy, ElementTy, LHSAddr);
- auto RHSBegin = RHSAddr.getPointer();
- auto LHSBegin = LHSAddr.getPointer();
+ llvm::Value *RHSBegin = RHSAddr.getPointer();
+ llvm::Value *LHSBegin = LHSAddr.getPointer();
// Cast from pointer to array type to pointer to single element.
- auto LHSEnd = CGF.Builder.CreateGEP(LHSBegin, NumElements);
+ llvm::Value *LHSEnd = CGF.Builder.CreateGEP(LHSBegin, NumElements);
// The basic structure here is a while-do loop.
- auto BodyBB = CGF.createBasicBlock("omp.arraycpy.body");
- auto DoneBB = CGF.createBasicBlock("omp.arraycpy.done");
- auto IsEmpty =
+ llvm::BasicBlock *BodyBB = CGF.createBasicBlock("omp.arraycpy.body");
+ llvm::BasicBlock *DoneBB = CGF.createBasicBlock("omp.arraycpy.done");
+ llvm::Value *IsEmpty =
CGF.Builder.CreateICmpEQ(LHSBegin, LHSEnd, "omp.arraycpy.isempty");
CGF.Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
// Enter the loop body, making that address the current address.
- auto EntryBB = CGF.Builder.GetInsertBlock();
+ llvm::BasicBlock *EntryBB = CGF.Builder.GetInsertBlock();
CGF.EmitBlock(BodyBB);
CharUnits ElementSize = CGF.getContext().getTypeSizeInChars(ElementTy);
@@ -4896,19 +5298,19 @@ static void EmitOMPAggregateReduction(
// Emit copy.
CodeGenFunction::OMPPrivateScope Scope(CGF);
- Scope.addPrivate(LHSVar, [=]() -> Address { return LHSElementCurrent; });
- Scope.addPrivate(RHSVar, [=]() -> Address { return RHSElementCurrent; });
+ Scope.addPrivate(LHSVar, [=]() { return LHSElementCurrent; });
+ Scope.addPrivate(RHSVar, [=]() { return RHSElementCurrent; });
Scope.Privatize();
RedOpGen(CGF, XExpr, EExpr, UpExpr);
Scope.ForceCleanup();
// Shift the address forward by one element.
- auto LHSElementNext = CGF.Builder.CreateConstGEP1_32(
+ llvm::Value *LHSElementNext = CGF.Builder.CreateConstGEP1_32(
LHSElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element");
- auto RHSElementNext = CGF.Builder.CreateConstGEP1_32(
+ llvm::Value *RHSElementNext = CGF.Builder.CreateConstGEP1_32(
RHSElementPHI, /*Idx0=*/1, "omp.arraycpy.src.element");
// Check whether we've reached the end.
- auto Done =
+ llvm::Value *Done =
CGF.Builder.CreateICmpEQ(LHSElementNext, LHSEnd, "omp.arraycpy.done");
CGF.Builder.CreateCondBr(Done, DoneBB, BodyBB);
LHSElementPHI->addIncoming(LHSElementNext, CGF.Builder.GetInsertBlock());
@@ -4923,11 +5325,12 @@ static void EmitOMPAggregateReduction(
/// UDR combiner function.
static void emitReductionCombiner(CodeGenFunction &CGF,
const Expr *ReductionOp) {
- if (auto *CE = dyn_cast<CallExpr>(ReductionOp))
- if (auto *OVE = dyn_cast<OpaqueValueExpr>(CE->getCallee()))
- if (auto *DRE =
+ if (const auto *CE = dyn_cast<CallExpr>(ReductionOp))
+ if (const auto *OVE = dyn_cast<OpaqueValueExpr>(CE->getCallee()))
+ if (const auto *DRE =
dyn_cast<DeclRefExpr>(OVE->getSourceExpr()->IgnoreImpCasts()))
- if (auto *DRD = dyn_cast<OMPDeclareReductionDecl>(DRE->getDecl())) {
+ if (const auto *DRD =
+ dyn_cast<OMPDeclareReductionDecl>(DRE->getDecl())) {
std::pair<llvm::Function *, llvm::Function *> Reduction =
CGF.CGM.getOpenMPRuntime().getUserDefinedReduction(DRD);
RValue Func = RValue::get(Reduction.first);
@@ -4939,24 +5342,29 @@ static void emitReductionCombiner(CodeGenFunction &CGF,
}
llvm::Value *CGOpenMPRuntime::emitReductionFunction(
- CodeGenModule &CGM, llvm::Type *ArgsType, ArrayRef<const Expr *> Privates,
- ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs,
- ArrayRef<const Expr *> ReductionOps) {
- auto &C = CGM.getContext();
+ CodeGenModule &CGM, SourceLocation Loc, llvm::Type *ArgsType,
+ ArrayRef<const Expr *> Privates, ArrayRef<const Expr *> LHSExprs,
+ ArrayRef<const Expr *> RHSExprs, ArrayRef<const Expr *> ReductionOps) {
+ ASTContext &C = CGM.getContext();
// void reduction_func(void *LHSArg, void *RHSArg);
FunctionArgList Args;
- ImplicitParamDecl LHSArg(C, C.VoidPtrTy, ImplicitParamDecl::Other);
- ImplicitParamDecl RHSArg(C, C.VoidPtrTy, ImplicitParamDecl::Other);
+ ImplicitParamDecl LHSArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
+ ImplicitParamDecl::Other);
+ ImplicitParamDecl RHSArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
+ ImplicitParamDecl::Other);
Args.push_back(&LHSArg);
Args.push_back(&RHSArg);
- auto &CGFI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
- auto *Fn = llvm::Function::Create(
- CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
- ".omp.reduction.reduction_func", &CGM.getModule());
- CGM.SetInternalFunctionAttributes(/*D=*/nullptr, Fn, CGFI);
+ const auto &CGFI =
+ CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
+ std::string Name = getName({"omp", "reduction", "reduction_func"});
+ auto *Fn = llvm::Function::Create(CGM.getTypes().GetFunctionType(CGFI),
+ llvm::GlobalValue::InternalLinkage, Name,
+ &CGM.getModule());
+ CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
+ Fn->setDoesNotRecurse();
CodeGenFunction CGF(CGM);
- CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args);
+ CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
// Dst = (void*[n])(LHSArg);
// Src = (void*[n])(RHSArg);
@@ -4974,12 +5382,14 @@ llvm::Value *CGOpenMPRuntime::emitReductionFunction(
auto IPriv = Privates.begin();
unsigned Idx = 0;
for (unsigned I = 0, E = ReductionOps.size(); I < E; ++I, ++IPriv, ++Idx) {
- auto RHSVar = cast<VarDecl>(cast<DeclRefExpr>(RHSExprs[I])->getDecl());
- Scope.addPrivate(RHSVar, [&]() -> Address {
+ const auto *RHSVar =
+ cast<VarDecl>(cast<DeclRefExpr>(RHSExprs[I])->getDecl());
+ Scope.addPrivate(RHSVar, [&CGF, RHS, Idx, RHSVar]() {
return emitAddrOfVarFromArray(CGF, RHS, Idx, RHSVar);
});
- auto LHSVar = cast<VarDecl>(cast<DeclRefExpr>(LHSExprs[I])->getDecl());
- Scope.addPrivate(LHSVar, [&]() -> Address {
+ const auto *LHSVar =
+ cast<VarDecl>(cast<DeclRefExpr>(LHSExprs[I])->getDecl());
+ Scope.addPrivate(LHSVar, [&CGF, LHS, Idx, LHSVar]() {
return emitAddrOfVarFromArray(CGF, LHS, Idx, LHSVar);
});
QualType PrivTy = (*IPriv)->getType();
@@ -4989,8 +5399,9 @@ llvm::Value *CGOpenMPRuntime::emitReductionFunction(
Address Elem =
CGF.Builder.CreateConstArrayGEP(LHS, Idx, CGF.getPointerSize());
llvm::Value *Ptr = CGF.Builder.CreateLoad(Elem);
- auto *VLA = CGF.getContext().getAsVariableArrayType(PrivTy);
- auto *OVE = cast<OpaqueValueExpr>(VLA->getSizeExpr());
+ const VariableArrayType *VLA =
+ CGF.getContext().getAsVariableArrayType(PrivTy);
+ const auto *OVE = cast<OpaqueValueExpr>(VLA->getSizeExpr());
CodeGenFunction::OpaqueValueMapping OpaqueMap(
CGF, OVE, RValue::get(CGF.Builder.CreatePtrToInt(Ptr, CGF.SizeTy)));
CGF.EmitVariablyModifiedType(PrivTy);
@@ -5000,19 +5411,20 @@ llvm::Value *CGOpenMPRuntime::emitReductionFunction(
IPriv = Privates.begin();
auto ILHS = LHSExprs.begin();
auto IRHS = RHSExprs.begin();
- for (auto *E : ReductionOps) {
+ for (const Expr *E : ReductionOps) {
if ((*IPriv)->getType()->isArrayType()) {
// Emit reduction for array section.
- auto *LHSVar = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
- auto *RHSVar = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
+ const auto *LHSVar = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
+ const auto *RHSVar = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
EmitOMPAggregateReduction(
CGF, (*IPriv)->getType(), LHSVar, RHSVar,
[=](CodeGenFunction &CGF, const Expr *, const Expr *, const Expr *) {
emitReductionCombiner(CGF, E);
});
- } else
+ } else {
// Emit reduction for array subscript or single variable.
emitReductionCombiner(CGF, E);
+ }
++IPriv;
++ILHS;
++IRHS;
@@ -5029,16 +5441,17 @@ void CGOpenMPRuntime::emitSingleReductionCombiner(CodeGenFunction &CGF,
const DeclRefExpr *RHS) {
if (PrivateRef->getType()->isArrayType()) {
// Emit reduction for array section.
- auto *LHSVar = cast<VarDecl>(LHS->getDecl());
- auto *RHSVar = cast<VarDecl>(RHS->getDecl());
+ const auto *LHSVar = cast<VarDecl>(LHS->getDecl());
+ const auto *RHSVar = cast<VarDecl>(RHS->getDecl());
EmitOMPAggregateReduction(
CGF, PrivateRef->getType(), LHSVar, RHSVar,
[=](CodeGenFunction &CGF, const Expr *, const Expr *, const Expr *) {
emitReductionCombiner(CGF, ReductionOp);
});
- } else
+ } else {
// Emit reduction for array subscript or single variable.
emitReductionCombiner(CGF, ReductionOp);
+ }
}
void CGOpenMPRuntime::emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
@@ -5088,14 +5501,14 @@ void CGOpenMPRuntime::emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
// <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]);
// ...
- auto &C = CGM.getContext();
+ ASTContext &C = CGM.getContext();
if (SimpleReduction) {
CodeGenFunction::RunCleanupsScope Scope(CGF);
auto IPriv = Privates.begin();
auto ILHS = LHSExprs.begin();
auto IRHS = RHSExprs.begin();
- for (auto *E : ReductionOps) {
+ for (const Expr *E : ReductionOps) {
emitSingleReductionCombiner(CGF, E, *IPriv, cast<DeclRefExpr>(*ILHS),
cast<DeclRefExpr>(*IRHS));
++IPriv;
@@ -5108,7 +5521,7 @@ void CGOpenMPRuntime::emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
// 1. Build a list of reduction variables.
// void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
auto Size = RHSExprs.size();
- for (auto *E : Privates) {
+ for (const Expr *E : Privates) {
if (E->getType()->isVariablyModifiedType())
// Reserve place for array size.
++Size;
@@ -5136,7 +5549,7 @@ void CGOpenMPRuntime::emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Value *Size = CGF.Builder.CreateIntCast(
CGF.getVLASize(
CGF.getContext().getAsVariableArrayType((*IPriv)->getType()))
- .first,
+ .NumElts,
CGF.SizeTy, /*isSigned=*/false);
CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy),
Elem);
@@ -5144,19 +5557,20 @@ void CGOpenMPRuntime::emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
}
// 2. Emit reduce_func().
- auto *ReductionFn = emitReductionFunction(
- CGM, CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo(), Privates,
- LHSExprs, RHSExprs, ReductionOps);
+ llvm::Value *ReductionFn = emitReductionFunction(
+ CGM, Loc, CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo(),
+ Privates, LHSExprs, RHSExprs, ReductionOps);
// 3. Create static kmp_critical_name lock = { 0 };
- auto *Lock = getCriticalRegionLock(".reduction");
+ std::string Name = getName({"reduction"});
+ llvm::Value *Lock = getCriticalRegionLock(Name);
// 4. Build res = __kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList),
// RedList, reduce_func, &<lock>);
- auto *IdentTLoc = emitUpdateLocation(CGF, Loc, OMP_ATOMIC_REDUCE);
- auto *ThreadId = getThreadID(CGF, Loc);
- auto *ReductionArrayTySize = CGF.getTypeSize(ReductionArrayTy);
- auto *RL = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ llvm::Value *IdentTLoc = emitUpdateLocation(CGF, Loc, OMP_ATOMIC_REDUCE);
+ llvm::Value *ThreadId = getThreadID(CGF, Loc);
+ llvm::Value *ReductionArrayTySize = CGF.getTypeSize(ReductionArrayTy);
+ llvm::Value *RL = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
ReductionList.getPointer(), CGF.VoidPtrTy);
llvm::Value *Args[] = {
IdentTLoc, // ident_t *<loc>
@@ -5167,14 +5581,15 @@ void CGOpenMPRuntime::emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
ReductionFn, // void (*) (void *, void *) <reduce_func>
Lock // kmp_critical_name *&<lock>
};
- auto Res = CGF.EmitRuntimeCall(
+ llvm::Value *Res = CGF.EmitRuntimeCall(
createRuntimeFunction(WithNowait ? OMPRTL__kmpc_reduce_nowait
: OMPRTL__kmpc_reduce),
Args);
// 5. Build switch(res)
- auto *DefaultBB = CGF.createBasicBlock(".omp.reduction.default");
- auto *SwInst = CGF.Builder.CreateSwitch(Res, DefaultBB, /*NumCases=*/2);
+ llvm::BasicBlock *DefaultBB = CGF.createBasicBlock(".omp.reduction.default");
+ llvm::SwitchInst *SwInst =
+ CGF.Builder.CreateSwitch(Res, DefaultBB, /*NumCases=*/2);
// 6. Build case 1:
// ...
@@ -5182,7 +5597,7 @@ void CGOpenMPRuntime::emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
// ...
// __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>);
// break;
- auto *Case1BB = CGF.createBasicBlock(".omp.reduction.case1");
+ llvm::BasicBlock *Case1BB = CGF.createBasicBlock(".omp.reduction.case1");
SwInst->addCase(CGF.Builder.getInt32(1), Case1BB);
CGF.EmitBlock(Case1BB);
@@ -5192,13 +5607,13 @@ void CGOpenMPRuntime::emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
ThreadId, // i32 <gtid>
Lock // kmp_critical_name *&<lock>
};
- auto &&CodeGen = [&Privates, &LHSExprs, &RHSExprs, &ReductionOps](
- CodeGenFunction &CGF, PrePostActionTy &Action) {
- auto &RT = CGF.CGM.getOpenMPRuntime();
+ auto &&CodeGen = [Privates, LHSExprs, RHSExprs, ReductionOps](
+ CodeGenFunction &CGF, PrePostActionTy &Action) {
+ CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime();
auto IPriv = Privates.begin();
auto ILHS = LHSExprs.begin();
auto IRHS = RHSExprs.begin();
- for (auto *E : ReductionOps) {
+ for (const Expr *E : ReductionOps) {
RT.emitSingleReductionCombiner(CGF, E, *IPriv, cast<DeclRefExpr>(*ILHS),
cast<DeclRefExpr>(*IRHS));
++IPriv;
@@ -5222,44 +5637,44 @@ void CGOpenMPRuntime::emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
// Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]));
// ...
// break;
- auto *Case2BB = CGF.createBasicBlock(".omp.reduction.case2");
+ llvm::BasicBlock *Case2BB = CGF.createBasicBlock(".omp.reduction.case2");
SwInst->addCase(CGF.Builder.getInt32(2), Case2BB);
CGF.EmitBlock(Case2BB);
- auto &&AtomicCodeGen = [Loc, &Privates, &LHSExprs, &RHSExprs, &ReductionOps](
- CodeGenFunction &CGF, PrePostActionTy &Action) {
+ auto &&AtomicCodeGen = [Loc, Privates, LHSExprs, RHSExprs, ReductionOps](
+ CodeGenFunction &CGF, PrePostActionTy &Action) {
auto ILHS = LHSExprs.begin();
auto IRHS = RHSExprs.begin();
auto IPriv = Privates.begin();
- for (auto *E : ReductionOps) {
+ for (const Expr *E : ReductionOps) {
const Expr *XExpr = nullptr;
const Expr *EExpr = nullptr;
const Expr *UpExpr = nullptr;
BinaryOperatorKind BO = BO_Comma;
- if (auto *BO = dyn_cast<BinaryOperator>(E)) {
+ if (const auto *BO = dyn_cast<BinaryOperator>(E)) {
if (BO->getOpcode() == BO_Assign) {
XExpr = BO->getLHS();
UpExpr = BO->getRHS();
}
}
// Try to emit update expression as a simple atomic.
- auto *RHSExpr = UpExpr;
+ const Expr *RHSExpr = UpExpr;
if (RHSExpr) {
// Analyze RHS part of the whole expression.
- if (auto *ACO = dyn_cast<AbstractConditionalOperator>(
+ if (const auto *ACO = dyn_cast<AbstractConditionalOperator>(
RHSExpr->IgnoreParenImpCasts())) {
// If this is a conditional operator, analyze its condition for
// min/max reduction operator.
RHSExpr = ACO->getCond();
}
- if (auto *BORHS =
+ if (const auto *BORHS =
dyn_cast<BinaryOperator>(RHSExpr->IgnoreParenImpCasts())) {
EExpr = BORHS->getRHS();
BO = BORHS->getOpcode();
}
}
if (XExpr) {
- auto *VD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
+ const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
auto &&AtomicRedGen = [BO, VD,
Loc](CodeGenFunction &CGF, const Expr *XExpr,
const Expr *EExpr, const Expr *UpExpr) {
@@ -5273,7 +5688,7 @@ void CGOpenMPRuntime::emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
[&CGF, UpExpr, VD, Loc](RValue XRValue) {
CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
PrivateScope.addPrivate(
- VD, [&CGF, VD, XRValue, Loc]() -> Address {
+ VD, [&CGF, VD, XRValue, Loc]() {
Address LHSTemp = CGF.CreateMemTemp(VD->getType());
CGF.emitOMPSimpleStore(
CGF.MakeAddrLValue(LHSTemp, VD->getType()), XRValue,
@@ -5286,19 +5701,22 @@ void CGOpenMPRuntime::emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
};
if ((*IPriv)->getType()->isArrayType()) {
// Emit atomic reduction for array section.
- auto *RHSVar = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
+ const auto *RHSVar =
+ cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
EmitOMPAggregateReduction(CGF, (*IPriv)->getType(), VD, RHSVar,
AtomicRedGen, XExpr, EExpr, UpExpr);
- } else
+ } else {
// Emit atomic reduction for array subscript or single variable.
AtomicRedGen(CGF, XExpr, EExpr, UpExpr);
+ }
} else {
// Emit as a critical region.
auto &&CritRedGen = [E, Loc](CodeGenFunction &CGF, const Expr *,
- const Expr *, const Expr *) {
- auto &RT = CGF.CGM.getOpenMPRuntime();
+ const Expr *, const Expr *) {
+ CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime();
+ std::string Name = RT.getName({"atomic_reduction"});
RT.emitCriticalRegion(
- CGF, ".atomic_reduction",
+ CGF, Name,
[=](CodeGenFunction &CGF, PrePostActionTy &Action) {
Action.Enter(CGF);
emitReductionCombiner(CGF, E);
@@ -5306,12 +5724,15 @@ void CGOpenMPRuntime::emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
Loc);
};
if ((*IPriv)->getType()->isArrayType()) {
- auto *LHSVar = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
- auto *RHSVar = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
+ const auto *LHSVar =
+ cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
+ const auto *RHSVar =
+ cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
EmitOMPAggregateReduction(CGF, (*IPriv)->getType(), LHSVar, RHSVar,
CritRedGen);
- } else
+ } else {
CritRedGen(CGF, nullptr, nullptr, nullptr);
+ }
}
++ILHS;
++IRHS;
@@ -5331,20 +5752,29 @@ void CGOpenMPRuntime::emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
EndArgs);
AtomicRCG.setAction(Action);
AtomicRCG(CGF);
- } else
+ } else {
AtomicRCG(CGF);
+ }
CGF.EmitBranch(DefaultBB);
CGF.EmitBlock(DefaultBB, /*IsFinished=*/true);
}
/// Generates unique name for artificial threadprivate variables.
-/// Format is: <Prefix> "." <Loc_raw_encoding> "_" <N>
-static std::string generateUniqueName(StringRef Prefix, SourceLocation Loc,
- unsigned N) {
+/// Format is: <Prefix> "." <Decl_mangled_name> "_" "<Decl_start_loc_raw_enc>"
+static std::string generateUniqueName(CodeGenModule &CGM, StringRef Prefix,
+ const Expr *Ref) {
SmallString<256> Buffer;
llvm::raw_svector_ostream Out(Buffer);
- Out << Prefix << "." << Loc.getRawEncoding() << "_" << N;
+ const clang::DeclRefExpr *DE;
+ const VarDecl *D = ::getBaseDecl(Ref, DE);
+ if (!D)
+ D = cast<VarDecl>(cast<DeclRefExpr>(Ref)->getDecl());
+ D = D->getCanonicalDecl();
+ std::string Name = CGM.getOpenMPRuntime().getName(
+ {D->isLocalVarDeclOrParm() ? D->getName() : CGM.getMangledName(D)});
+ Out << Prefix << Name << "_"
+ << D->getCanonicalDecl()->getLocStart().getRawEncoding();
return Out.str();
}
@@ -5359,19 +5789,21 @@ static std::string generateUniqueName(StringRef Prefix, SourceLocation Loc,
static llvm::Value *emitReduceInitFunction(CodeGenModule &CGM,
SourceLocation Loc,
ReductionCodeGen &RCG, unsigned N) {
- auto &C = CGM.getContext();
+ ASTContext &C = CGM.getContext();
FunctionArgList Args;
- ImplicitParamDecl Param(C, C.VoidPtrTy, ImplicitParamDecl::Other);
+ ImplicitParamDecl Param(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
+ ImplicitParamDecl::Other);
Args.emplace_back(&Param);
- auto &FnInfo =
+ const auto &FnInfo =
CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
- auto *FnTy = CGM.getTypes().GetFunctionType(FnInfo);
+ llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FnInfo);
+ std::string Name = CGM.getOpenMPRuntime().getName({"red_init", ""});
auto *Fn = llvm::Function::Create(FnTy, llvm::GlobalValue::InternalLinkage,
- ".red_init.", &CGM.getModule());
- CGM.SetInternalFunctionAttributes(/*D=*/nullptr, Fn, FnInfo);
+ Name, &CGM.getModule());
+ CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FnInfo);
+ Fn->setDoesNotRecurse();
CodeGenFunction CGF(CGM);
- CGF.disableDebugInfo();
- CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args);
+ CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args, Loc, Loc);
Address PrivateAddr = CGF.EmitLoadOfPointer(
CGF.GetAddrOfLocalVar(&Param),
C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
@@ -5381,10 +5813,9 @@ static llvm::Value *emitReduceInitFunction(CodeGenModule &CGM,
if (RCG.getSizes(N).second) {
Address SizeAddr = CGM.getOpenMPRuntime().getAddrOfArtificialThreadPrivate(
CGF, CGM.getContext().getSizeType(),
- generateUniqueName("reduction_size", Loc, N));
- Size =
- CGF.EmitLoadOfScalar(SizeAddr, /*Volatile=*/false,
- CGM.getContext().getSizeType(), SourceLocation());
+ generateUniqueName(CGM, "reduction_size", RCG.getRefExpr(N)));
+ Size = CGF.EmitLoadOfScalar(SizeAddr, /*Volatile=*/false,
+ CGM.getContext().getSizeType(), Loc);
}
RCG.emitAggregateType(CGF, N, Size);
LValue SharedLVal;
@@ -5395,7 +5826,10 @@ static llvm::Value *emitReduceInitFunction(CodeGenModule &CGM,
Address SharedAddr =
CGM.getOpenMPRuntime().getAddrOfArtificialThreadPrivate(
CGF, CGM.getContext().VoidPtrTy,
- generateUniqueName("reduction", Loc, N));
+ generateUniqueName(CGM, "reduction", RCG.getRefExpr(N)));
+ SharedAddr = CGF.EmitLoadOfPointer(
+ SharedAddr,
+ CGM.getContext().VoidPtrTy.castAs<PointerType>()->getTypePtr());
SharedLVal = CGF.MakeAddrLValue(SharedAddr, CGM.getContext().VoidPtrTy);
} else {
SharedLVal = CGF.MakeNaturalAlignAddrLValue(
@@ -5427,40 +5861,42 @@ static llvm::Value *emitReduceCombFunction(CodeGenModule &CGM,
const Expr *ReductionOp,
const Expr *LHS, const Expr *RHS,
const Expr *PrivateRef) {
- auto &C = CGM.getContext();
- auto *LHSVD = cast<VarDecl>(cast<DeclRefExpr>(LHS)->getDecl());
- auto *RHSVD = cast<VarDecl>(cast<DeclRefExpr>(RHS)->getDecl());
+ ASTContext &C = CGM.getContext();
+ const auto *LHSVD = cast<VarDecl>(cast<DeclRefExpr>(LHS)->getDecl());
+ const auto *RHSVD = cast<VarDecl>(cast<DeclRefExpr>(RHS)->getDecl());
FunctionArgList Args;
- ImplicitParamDecl ParamInOut(C, C.VoidPtrTy, ImplicitParamDecl::Other);
- ImplicitParamDecl ParamIn(C, C.VoidPtrTy, ImplicitParamDecl::Other);
+ ImplicitParamDecl ParamInOut(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
+ C.VoidPtrTy, ImplicitParamDecl::Other);
+ ImplicitParamDecl ParamIn(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
+ ImplicitParamDecl::Other);
Args.emplace_back(&ParamInOut);
Args.emplace_back(&ParamIn);
- auto &FnInfo =
+ const auto &FnInfo =
CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
- auto *FnTy = CGM.getTypes().GetFunctionType(FnInfo);
+ llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FnInfo);
+ std::string Name = CGM.getOpenMPRuntime().getName({"red_comb", ""});
auto *Fn = llvm::Function::Create(FnTy, llvm::GlobalValue::InternalLinkage,
- ".red_comb.", &CGM.getModule());
- CGM.SetInternalFunctionAttributes(/*D=*/nullptr, Fn, FnInfo);
+ Name, &CGM.getModule());
+ CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FnInfo);
+ Fn->setDoesNotRecurse();
CodeGenFunction CGF(CGM);
- CGF.disableDebugInfo();
- CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args);
+ CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args, Loc, Loc);
llvm::Value *Size = nullptr;
// If the size of the reduction item is non-constant, load it from global
// threadprivate variable.
if (RCG.getSizes(N).second) {
Address SizeAddr = CGM.getOpenMPRuntime().getAddrOfArtificialThreadPrivate(
CGF, CGM.getContext().getSizeType(),
- generateUniqueName("reduction_size", Loc, N));
- Size =
- CGF.EmitLoadOfScalar(SizeAddr, /*Volatile=*/false,
- CGM.getContext().getSizeType(), SourceLocation());
+ generateUniqueName(CGM, "reduction_size", RCG.getRefExpr(N)));
+ Size = CGF.EmitLoadOfScalar(SizeAddr, /*Volatile=*/false,
+ CGM.getContext().getSizeType(), Loc);
}
RCG.emitAggregateType(CGF, N, Size);
// Remap lhs and rhs variables to the addresses of the function arguments.
// %lhs = bitcast void* %arg0 to <type>*
// %rhs = bitcast void* %arg1 to <type>*
CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
- PrivateScope.addPrivate(LHSVD, [&C, &CGF, &ParamInOut, LHSVD]() -> Address {
+ PrivateScope.addPrivate(LHSVD, [&C, &CGF, &ParamInOut, LHSVD]() {
// Pull out the pointer to the variable.
Address PtrAddr = CGF.EmitLoadOfPointer(
CGF.GetAddrOfLocalVar(&ParamInOut),
@@ -5468,7 +5904,7 @@ static llvm::Value *emitReduceCombFunction(CodeGenModule &CGM,
return CGF.Builder.CreateElementBitCast(
PtrAddr, CGF.ConvertTypeForMem(LHSVD->getType()));
});
- PrivateScope.addPrivate(RHSVD, [&C, &CGF, &ParamIn, RHSVD]() -> Address {
+ PrivateScope.addPrivate(RHSVD, [&C, &CGF, &ParamIn, RHSVD]() {
// Pull out the pointer to the variable.
Address PtrAddr = CGF.EmitLoadOfPointer(
CGF.GetAddrOfLocalVar(&ParamIn),
@@ -5500,19 +5936,21 @@ static llvm::Value *emitReduceFiniFunction(CodeGenModule &CGM,
ReductionCodeGen &RCG, unsigned N) {
if (!RCG.needCleanups(N))
return nullptr;
- auto &C = CGM.getContext();
+ ASTContext &C = CGM.getContext();
FunctionArgList Args;
- ImplicitParamDecl Param(C, C.VoidPtrTy, ImplicitParamDecl::Other);
+ ImplicitParamDecl Param(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
+ ImplicitParamDecl::Other);
Args.emplace_back(&Param);
- auto &FnInfo =
+ const auto &FnInfo =
CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
- auto *FnTy = CGM.getTypes().GetFunctionType(FnInfo);
+ llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FnInfo);
+ std::string Name = CGM.getOpenMPRuntime().getName({"red_fini", ""});
auto *Fn = llvm::Function::Create(FnTy, llvm::GlobalValue::InternalLinkage,
- ".red_fini.", &CGM.getModule());
- CGM.SetInternalFunctionAttributes(/*D=*/nullptr, Fn, FnInfo);
+ Name, &CGM.getModule());
+ CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FnInfo);
+ Fn->setDoesNotRecurse();
CodeGenFunction CGF(CGM);
- CGF.disableDebugInfo();
- CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args);
+ CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args, Loc, Loc);
Address PrivateAddr = CGF.EmitLoadOfPointer(
CGF.GetAddrOfLocalVar(&Param),
C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
@@ -5522,10 +5960,9 @@ static llvm::Value *emitReduceFiniFunction(CodeGenModule &CGM,
if (RCG.getSizes(N).second) {
Address SizeAddr = CGM.getOpenMPRuntime().getAddrOfArtificialThreadPrivate(
CGF, CGM.getContext().getSizeType(),
- generateUniqueName("reduction_size", Loc, N));
- Size =
- CGF.EmitLoadOfScalar(SizeAddr, /*Volatile=*/false,
- CGM.getContext().getSizeType(), SourceLocation());
+ generateUniqueName(CGM, "reduction_size", RCG.getRefExpr(N)));
+ Size = CGF.EmitLoadOfScalar(SizeAddr, /*Volatile=*/false,
+ CGM.getContext().getSizeType(), Loc);
}
RCG.emitAggregateType(CGF, N, Size);
// Emit the finalizer body:
@@ -5551,7 +5988,7 @@ llvm::Value *CGOpenMPRuntime::emitTaskReductionInit(
// kmp_task_red_flags_t flags; // flags for additional info from compiler
// } kmp_task_red_input_t;
ASTContext &C = CGM.getContext();
- auto *RD = C.buildImplicitRecord("kmp_task_red_input_t");
+ RecordDecl *RD = C.buildImplicitRecord("kmp_task_red_input_t");
RD->startDefinition();
const FieldDecl *SharedFD = addFieldToRecordDecl(C, RD, C.VoidPtrTy);
const FieldDecl *SizeFD = addFieldToRecordDecl(C, RD, C.getSizeType());
@@ -5652,14 +6089,14 @@ void CGOpenMPRuntime::emitTaskReductionFixups(CodeGenFunction &CGF,
/*isSigned=*/false);
Address SizeAddr = getAddrOfArtificialThreadPrivate(
CGF, CGM.getContext().getSizeType(),
- generateUniqueName("reduction_size", Loc, N));
+ generateUniqueName(CGM, "reduction_size", RCG.getRefExpr(N)));
CGF.Builder.CreateStore(SizeVal, SizeAddr, /*IsVolatile=*/false);
}
// Store address of the original reduction item if custom initializer is used.
if (RCG.usesReductionInitializer(N)) {
Address SharedAddr = getAddrOfArtificialThreadPrivate(
CGF, CGM.getContext().VoidPtrTy,
- generateUniqueName("reduction", Loc, N));
+ generateUniqueName(CGM, "reduction", RCG.getRefExpr(N)));
CGF.Builder.CreateStore(
CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
RCG.getSharedLValue(N).getPointer(), CGM.VoidPtrTy),
@@ -5749,18 +6186,18 @@ void CGOpenMPRuntime::emitCancellationPointCall(
emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
CGF.Builder.getInt32(getCancellationKind(CancelRegion))};
// Ignore return result until untied tasks are supported.
- auto *Result = CGF.EmitRuntimeCall(
+ llvm::Value *Result = CGF.EmitRuntimeCall(
createRuntimeFunction(OMPRTL__kmpc_cancellationpoint), Args);
// if (__kmpc_cancellationpoint()) {
// exit from construct;
// }
- auto *ExitBB = CGF.createBasicBlock(".cancel.exit");
- auto *ContBB = CGF.createBasicBlock(".cancel.continue");
- auto *Cmp = CGF.Builder.CreateIsNotNull(Result);
+ llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".cancel.exit");
+ llvm::BasicBlock *ContBB = CGF.createBasicBlock(".cancel.continue");
+ llvm::Value *Cmp = CGF.Builder.CreateIsNotNull(Result);
CGF.Builder.CreateCondBr(Cmp, ExitBB, ContBB);
CGF.EmitBlock(ExitBB);
// exit from construct;
- auto CancelDest =
+ CodeGenFunction::JumpDest CancelDest =
CGF.getOMPCancelDestination(OMPRegionInfo->getDirectiveKind());
CGF.EmitBranchThroughCleanup(CancelDest);
CGF.EmitBlock(ContBB, /*IsFinished=*/true);
@@ -5779,70 +6216,42 @@ void CGOpenMPRuntime::emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc,
dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) {
auto &&ThenGen = [Loc, CancelRegion, OMPRegionInfo](CodeGenFunction &CGF,
PrePostActionTy &) {
- auto &RT = CGF.CGM.getOpenMPRuntime();
+ CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime();
llvm::Value *Args[] = {
RT.emitUpdateLocation(CGF, Loc), RT.getThreadID(CGF, Loc),
CGF.Builder.getInt32(getCancellationKind(CancelRegion))};
// Ignore return result until untied tasks are supported.
- auto *Result = CGF.EmitRuntimeCall(
+ llvm::Value *Result = CGF.EmitRuntimeCall(
RT.createRuntimeFunction(OMPRTL__kmpc_cancel), Args);
// if (__kmpc_cancel()) {
// exit from construct;
// }
- auto *ExitBB = CGF.createBasicBlock(".cancel.exit");
- auto *ContBB = CGF.createBasicBlock(".cancel.continue");
- auto *Cmp = CGF.Builder.CreateIsNotNull(Result);
+ llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".cancel.exit");
+ llvm::BasicBlock *ContBB = CGF.createBasicBlock(".cancel.continue");
+ llvm::Value *Cmp = CGF.Builder.CreateIsNotNull(Result);
CGF.Builder.CreateCondBr(Cmp, ExitBB, ContBB);
CGF.EmitBlock(ExitBB);
// exit from construct;
- auto CancelDest =
+ CodeGenFunction::JumpDest CancelDest =
CGF.getOMPCancelDestination(OMPRegionInfo->getDirectiveKind());
CGF.EmitBranchThroughCleanup(CancelDest);
CGF.EmitBlock(ContBB, /*IsFinished=*/true);
};
- if (IfCond)
+ if (IfCond) {
emitOMPIfClause(CGF, IfCond, ThenGen,
[](CodeGenFunction &, PrePostActionTy &) {});
- else {
+ } else {
RegionCodeGenTy ThenRCG(ThenGen);
ThenRCG(CGF);
}
}
}
-/// \brief Obtain information that uniquely identifies a target entry. This
-/// consists of the file and device IDs as well as line number associated with
-/// the relevant entry source location.
-static void getTargetEntryUniqueInfo(ASTContext &C, SourceLocation Loc,
- unsigned &DeviceID, unsigned &FileID,
- unsigned &LineNum) {
-
- auto &SM = C.getSourceManager();
-
- // The loc should be always valid and have a file ID (the user cannot use
- // #pragma directives in macros)
-
- assert(Loc.isValid() && "Source location is expected to be always valid.");
- assert(Loc.isFileID() && "Source location is expected to refer to a file.");
-
- PresumedLoc PLoc = SM.getPresumedLoc(Loc);
- assert(PLoc.isValid() && "Source location is expected to be always valid.");
-
- llvm::sys::fs::UniqueID ID;
- if (llvm::sys::fs::getUniqueID(PLoc.getFilename(), ID))
- llvm_unreachable("Source file with target region no longer exists!");
-
- DeviceID = ID.getDevice();
- FileID = ID.getFile();
- LineNum = PLoc.getLine();
-}
-
void CGOpenMPRuntime::emitTargetOutlinedFunction(
const OMPExecutableDirective &D, StringRef ParentName,
llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID,
bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) {
assert(!ParentName.empty() && "Invalid target region parent name!");
-
emitTargetOutlinedFunctionHelper(D, ParentName, OutlinedFn, OutlinedFnID,
IsOffloadEntry, CodeGen);
}
@@ -5872,7 +6281,7 @@ void CGOpenMPRuntime::emitTargetOutlinedFunctionHelper(
<< llvm::format("_%x_", FileID) << ParentName << "_l" << Line;
}
- const CapturedStmt &CS = *cast<CapturedStmt>(D.getAssociatedStmt());
+ const CapturedStmt &CS = *D.getCapturedStmt(OMPD_target);
CodeGenFunction CGF(CGM, true);
CGOpenMPTargetRegionInfo CGInfo(CS, CodeGen, EntryFnName);
@@ -5898,22 +6307,25 @@ void CGOpenMPRuntime::emitTargetOutlinedFunctionHelper(
if (CGM.getLangOpts().OpenMPIsDevice) {
OutlinedFnID = llvm::ConstantExpr::getBitCast(OutlinedFn, CGM.Int8PtrTy);
- OutlinedFn->setLinkage(llvm::GlobalValue::ExternalLinkage);
- } else
+ OutlinedFn->setLinkage(llvm::GlobalValue::WeakAnyLinkage);
+ OutlinedFn->setDSOLocal(false);
+ } else {
+ std::string Name = getName({EntryFnName, "region_id"});
OutlinedFnID = new llvm::GlobalVariable(
CGM.getModule(), CGM.Int8Ty, /*isConstant=*/true,
- llvm::GlobalValue::PrivateLinkage,
- llvm::Constant::getNullValue(CGM.Int8Ty), ".omp_offload.region_id");
+ llvm::GlobalValue::WeakAnyLinkage,
+ llvm::Constant::getNullValue(CGM.Int8Ty), Name);
+ }
// Register the information for the entry associated with this target region.
OffloadEntriesInfoManager.registerTargetRegionEntryInfo(
DeviceID, FileID, ParentName, Line, OutlinedFn, OutlinedFnID,
- /*Flags=*/0);
+ OffloadEntriesInfoManagerTy::OMPTargetRegionEntryTargetRegion);
}
/// discard all CompoundStmts intervening between two constructs
static const Stmt *ignoreCompoundStmts(const Stmt *Body) {
- while (auto *CS = dyn_cast_or_null<CompoundStmt>(Body))
+ while (const auto *CS = dyn_cast_or_null<CompoundStmt>(Body))
Body = CS->body_front();
return Body;
@@ -5931,12 +6343,11 @@ static llvm::Value *
emitNumTeamsForTargetDirective(CGOpenMPRuntime &OMPRuntime,
CodeGenFunction &CGF,
const OMPExecutableDirective &D) {
-
assert(!CGF.getLangOpts().OpenMPIsDevice && "Clauses associated with the "
"teams directive expected to be "
"emitted only for the host!");
- auto &Bld = CGF.Builder;
+ CGBuilderTy &Bld = CGF.Builder;
// If the target directive is combined with a teams directive:
// Return the value in the num_teams clause, if any.
@@ -5944,8 +6355,8 @@ emitNumTeamsForTargetDirective(CGOpenMPRuntime &OMPRuntime,
if (isOpenMPTeamsDirective(D.getDirectiveKind())) {
if (const auto *NumTeamsClause = D.getSingleClause<OMPNumTeamsClause>()) {
CodeGenFunction::RunCleanupsScope NumTeamsScope(CGF);
- auto NumTeams = CGF.EmitScalarExpr(NumTeamsClause->getNumTeams(),
- /*IgnoreResultAssign*/ true);
+ llvm::Value *NumTeams = CGF.EmitScalarExpr(NumTeamsClause->getNumTeams(),
+ /*IgnoreResultAssign*/ true);
return Bld.CreateIntCast(NumTeams, CGF.Int32Ty,
/*IsSigned=*/true);
}
@@ -5965,12 +6376,12 @@ emitNumTeamsForTargetDirective(CGOpenMPRuntime &OMPRuntime,
// the expression is captured in the enclosing target environment when the
// teams directive is not combined with target.
- const CapturedStmt &CS = *cast<CapturedStmt>(D.getAssociatedStmt());
+ const CapturedStmt &CS = *D.getCapturedStmt(OMPD_target);
- if (auto *TeamsDir = dyn_cast_or_null<OMPExecutableDirective>(
+ if (const auto *TeamsDir = dyn_cast_or_null<OMPExecutableDirective>(
ignoreCompoundStmts(CS.getCapturedStmt()))) {
if (isOpenMPTeamsDirective(TeamsDir->getDirectiveKind())) {
- if (auto *NTE = TeamsDir->getSingleClause<OMPNumTeamsClause>()) {
+ if (const auto *NTE = TeamsDir->getSingleClause<OMPNumTeamsClause>()) {
CGOpenMPInnerExprInfo CGInfo(CGF, CS);
CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
llvm::Value *NumTeams = CGF.EmitScalarExpr(NTE->getNumTeams());
@@ -6000,12 +6411,11 @@ static llvm::Value *
emitNumThreadsForTargetDirective(CGOpenMPRuntime &OMPRuntime,
CodeGenFunction &CGF,
const OMPExecutableDirective &D) {
-
assert(!CGF.getLangOpts().OpenMPIsDevice && "Clauses associated with the "
"teams directive expected to be "
"emitted only for the host!");
- auto &Bld = CGF.Builder;
+ CGBuilderTy &Bld = CGF.Builder;
//
// If the target directive is combined with a teams directive:
@@ -6030,8 +6440,9 @@ emitNumThreadsForTargetDirective(CGOpenMPRuntime &OMPRuntime,
if (const auto *ThreadLimitClause =
D.getSingleClause<OMPThreadLimitClause>()) {
CodeGenFunction::RunCleanupsScope ThreadLimitScope(CGF);
- auto ThreadLimit = CGF.EmitScalarExpr(ThreadLimitClause->getThreadLimit(),
- /*IgnoreResultAssign*/ true);
+ llvm::Value *ThreadLimit =
+ CGF.EmitScalarExpr(ThreadLimitClause->getThreadLimit(),
+ /*IgnoreResultAssign*/ true);
ThreadLimitVal = Bld.CreateIntCast(ThreadLimit, CGF.Int32Ty,
/*IsSigned=*/true);
}
@@ -6068,12 +6479,12 @@ emitNumThreadsForTargetDirective(CGOpenMPRuntime &OMPRuntime,
// the expression is captured in the enclosing target environment when the
// teams directive is not combined with target.
- const CapturedStmt &CS = *cast<CapturedStmt>(D.getAssociatedStmt());
+ const CapturedStmt &CS = *D.getCapturedStmt(OMPD_target);
- if (auto *TeamsDir = dyn_cast_or_null<OMPExecutableDirective>(
+ if (const auto *TeamsDir = dyn_cast_or_null<OMPExecutableDirective>(
ignoreCompoundStmts(CS.getCapturedStmt()))) {
if (isOpenMPTeamsDirective(TeamsDir->getDirectiveKind())) {
- if (auto *TLE = TeamsDir->getSingleClause<OMPThreadLimitClause>()) {
+ if (const auto *TLE = TeamsDir->getSingleClause<OMPThreadLimitClause>()) {
CGOpenMPInnerExprInfo CGInfo(CGF, CS);
CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
llvm::Value *ThreadLimit = CGF.EmitScalarExpr(TLE->getThreadLimit());
@@ -6092,42 +6503,50 @@ emitNumThreadsForTargetDirective(CGOpenMPRuntime &OMPRuntime,
}
namespace {
-// \brief Utility to handle information from clauses associated with a given
+LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE();
+
+// Utility to handle information from clauses associated with a given
// construct that use mappable expressions (e.g. 'map' clause, 'to' clause).
// It provides a convenient interface to obtain the information and generate
// code for that information.
class MappableExprsHandler {
public:
- /// \brief Values for bit flags used to specify the mapping type for
+ /// Values for bit flags used to specify the mapping type for
/// offloading.
- enum OpenMPOffloadMappingFlags {
- /// \brief Allocate memory on the device and move data from host to device.
+ enum OpenMPOffloadMappingFlags : uint64_t {
+ /// No flags
+ OMP_MAP_NONE = 0x0,
+ /// Allocate memory on the device and move data from host to device.
OMP_MAP_TO = 0x01,
- /// \brief Allocate memory on the device and move data from device to host.
+ /// Allocate memory on the device and move data from device to host.
OMP_MAP_FROM = 0x02,
- /// \brief Always perform the requested mapping action on the element, even
+ /// Always perform the requested mapping action on the element, even
/// if it was already mapped before.
OMP_MAP_ALWAYS = 0x04,
- /// \brief Delete the element from the device environment, ignoring the
+ /// Delete the element from the device environment, ignoring the
/// current reference count associated with the element.
OMP_MAP_DELETE = 0x08,
- /// \brief The element being mapped is a pointer-pointee pair; both the
+ /// The element being mapped is a pointer-pointee pair; both the
/// pointer and the pointee should be mapped.
OMP_MAP_PTR_AND_OBJ = 0x10,
- /// \brief This flags signals that the base address of an entry should be
+ /// This flags signals that the base address of an entry should be
/// passed to the target kernel as an argument.
OMP_MAP_TARGET_PARAM = 0x20,
- /// \brief Signal that the runtime library has to return the device pointer
+ /// Signal that the runtime library has to return the device pointer
/// in the current position for the data being mapped. Used when we have the
/// use_device_ptr clause.
OMP_MAP_RETURN_PARAM = 0x40,
- /// \brief This flag signals that the reference being passed is a pointer to
+ /// This flag signals that the reference being passed is a pointer to
/// private data.
OMP_MAP_PRIVATE = 0x80,
- /// \brief Pass the element to the device by value.
+ /// Pass the element to the device by value.
OMP_MAP_LITERAL = 0x100,
/// Implicit map
OMP_MAP_IMPLICIT = 0x200,
+ /// The 16 MSBs of the flags indicate whether the entry is member of some
+ /// struct/class.
+ OMP_MAP_MEMBER_OF = 0xffff000000000000,
+ LLVM_MARK_AS_BITMASK_ENUM(/* LargestFlag = */ OMP_MAP_MEMBER_OF),
};
/// Class that associates information with a base pointer to be passed to the
@@ -6147,21 +6566,60 @@ public:
void setDevicePtrDecl(const ValueDecl *D) { DevPtrDecl = D; }
};
- typedef SmallVector<BasePointerInfo, 16> MapBaseValuesArrayTy;
- typedef SmallVector<llvm::Value *, 16> MapValuesArrayTy;
- typedef SmallVector<uint64_t, 16> MapFlagsArrayTy;
+ using MapBaseValuesArrayTy = SmallVector<BasePointerInfo, 4>;
+ using MapValuesArrayTy = SmallVector<llvm::Value *, 4>;
+ using MapFlagsArrayTy = SmallVector<OpenMPOffloadMappingFlags, 4>;
+
+ /// Map between a struct and the its lowest & highest elements which have been
+ /// mapped.
+ /// [ValueDecl *] --> {LE(FieldIndex, Pointer),
+ /// HE(FieldIndex, Pointer)}
+ struct StructRangeInfoTy {
+ std::pair<unsigned /*FieldIndex*/, Address /*Pointer*/> LowestElem = {
+ 0, Address::invalid()};
+ std::pair<unsigned /*FieldIndex*/, Address /*Pointer*/> HighestElem = {
+ 0, Address::invalid()};
+ Address Base = Address::invalid();
+ };
private:
- /// \brief Directive from where the map clauses were extracted.
+ /// Kind that defines how a device pointer has to be returned.
+ struct MapInfo {
+ OMPClauseMappableExprCommon::MappableExprComponentListRef Components;
+ OpenMPMapClauseKind MapType = OMPC_MAP_unknown;
+ OpenMPMapClauseKind MapTypeModifier = OMPC_MAP_unknown;
+ bool ReturnDevicePointer = false;
+ bool IsImplicit = false;
+
+ MapInfo() = default;
+ MapInfo(
+ OMPClauseMappableExprCommon::MappableExprComponentListRef Components,
+ OpenMPMapClauseKind MapType, OpenMPMapClauseKind MapTypeModifier,
+ bool ReturnDevicePointer, bool IsImplicit)
+ : Components(Components), MapType(MapType),
+ MapTypeModifier(MapTypeModifier),
+ ReturnDevicePointer(ReturnDevicePointer), IsImplicit(IsImplicit) {}
+ };
+
+ /// If use_device_ptr is used on a pointer which is a struct member and there
+ /// is no map information about it, then emission of that entry is deferred
+ /// until the whole struct has been processed.
+ struct DeferredDevicePtrEntryTy {
+ const Expr *IE = nullptr;
+ const ValueDecl *VD = nullptr;
+
+ DeferredDevicePtrEntryTy(const Expr *IE, const ValueDecl *VD)
+ : IE(IE), VD(VD) {}
+ };
+
+ /// Directive from where the map clauses were extracted.
const OMPExecutableDirective &CurDir;
- /// \brief Function the directive is being generated for.
+ /// Function the directive is being generated for.
CodeGenFunction &CGF;
- /// \brief Set of all first private variables in the current directive.
+ /// Set of all first private variables in the current directive.
llvm::SmallPtrSet<const VarDecl *, 8> FirstPrivateDecls;
- /// Set of all reduction variables in the current directive.
- llvm::SmallPtrSet<const VarDecl *, 8> ReductionDecls;
/// Map between device pointer declarations and their expression components.
/// The key value for declarations in 'this' is null.
@@ -6171,10 +6629,10 @@ private:
DevPointersMap;
llvm::Value *getExprTypeSize(const Expr *E) const {
- auto ExprTy = E->getType().getCanonicalType();
+ QualType ExprTy = E->getType().getCanonicalType();
// Reference types are ignored for mapping purposes.
- if (auto *RefTy = ExprTy->getAs<ReferenceType>())
+ if (const auto *RefTy = ExprTy->getAs<ReferenceType>())
ExprTy = RefTy->getPointeeType().getCanonicalType();
// Given that an array section is considered a built-in type, we need to
@@ -6191,10 +6649,10 @@ private:
return CGF.getTypeSize(BaseTy);
llvm::Value *ElemSize;
- if (auto *PTy = BaseTy->getAs<PointerType>())
+ if (const auto *PTy = BaseTy->getAs<PointerType>()) {
ElemSize = CGF.getTypeSize(PTy->getPointeeType().getCanonicalType());
- else {
- auto *ATy = cast<ArrayType>(BaseTy.getTypePtr());
+ } else {
+ const auto *ATy = cast<ArrayType>(BaseTy.getTypePtr());
assert(ATy && "Expecting array type if not a pointer type.");
ElemSize = CGF.getTypeSize(ATy->getElementType().getCanonicalType());
}
@@ -6204,7 +6662,7 @@ private:
if (!OAE->getLength())
return ElemSize;
- auto *LengthVal = CGF.EmitScalarExpr(OAE->getLength());
+ llvm::Value *LengthVal = CGF.EmitScalarExpr(OAE->getLength());
LengthVal =
CGF.Builder.CreateIntCast(LengthVal, CGF.SizeTy, /*isSigned=*/false);
return CGF.Builder.CreateNUWMul(LengthVal, ElemSize);
@@ -6212,14 +6670,16 @@ private:
return CGF.getTypeSize(ExprTy);
}
- /// \brief Return the corresponding bits for a given map clause modifier. Add
+ /// Return the corresponding bits for a given map clause modifier. Add
/// a flag marking the map as a pointer if requested. Add a flag marking the
/// map as the first one of a series of maps that relate to the same map
/// expression.
- uint64_t getMapTypeBits(OpenMPMapClauseKind MapType,
- OpenMPMapClauseKind MapTypeModifier, bool AddPtrFlag,
- bool AddIsTargetParamFlag) const {
- uint64_t Bits = 0u;
+ OpenMPOffloadMappingFlags getMapTypeBits(OpenMPMapClauseKind MapType,
+ OpenMPMapClauseKind MapTypeModifier,
+ bool IsImplicit, bool AddPtrFlag,
+ bool AddIsTargetParamFlag) const {
+ OpenMPOffloadMappingFlags Bits =
+ IsImplicit ? OMP_MAP_IMPLICIT : OMP_MAP_NONE;
switch (MapType) {
case OMPC_MAP_alloc:
case OMPC_MAP_release:
@@ -6229,20 +6689,20 @@ private:
// type modifiers.
break;
case OMPC_MAP_to:
- Bits = OMP_MAP_TO;
+ Bits |= OMP_MAP_TO;
break;
case OMPC_MAP_from:
- Bits = OMP_MAP_FROM;
+ Bits |= OMP_MAP_FROM;
break;
case OMPC_MAP_tofrom:
- Bits = OMP_MAP_TO | OMP_MAP_FROM;
+ Bits |= OMP_MAP_TO | OMP_MAP_FROM;
break;
case OMPC_MAP_delete:
- Bits = OMP_MAP_DELETE;
+ Bits |= OMP_MAP_DELETE;
break;
- default:
+ case OMPC_MAP_always:
+ case OMPC_MAP_unknown:
llvm_unreachable("Unexpected map type!");
- break;
}
if (AddPtrFlag)
Bits |= OMP_MAP_PTR_AND_OBJ;
@@ -6253,10 +6713,10 @@ private:
return Bits;
}
- /// \brief Return true if the provided expression is a final array section. A
+ /// Return true if the provided expression is a final array section. A
/// final array section, is one whose length can't be proved to be one.
bool isFinalArraySectionExpression(const Expr *E) const {
- auto *OASE = dyn_cast<OMPArraySectionExpr>(E);
+ const auto *OASE = dyn_cast<OMPArraySectionExpr>(E);
// It is not an array section and therefore not a unity-size one.
if (!OASE)
@@ -6266,16 +6726,16 @@ private:
if (OASE->getColonLoc().isInvalid())
return false;
- auto *Length = OASE->getLength();
+ const Expr *Length = OASE->getLength();
// If we don't have a length we have to check if the array has size 1
// for this dimension. Also, we should always expect a length if the
// base type is pointer.
if (!Length) {
- auto BaseQTy = OMPArraySectionExpr::getBaseOriginalType(
- OASE->getBase()->IgnoreParenImpCasts())
- .getCanonicalType();
- if (auto *ATy = dyn_cast<ConstantArrayType>(BaseQTy.getTypePtr()))
+ QualType BaseQTy = OMPArraySectionExpr::getBaseOriginalType(
+ OASE->getBase()->IgnoreParenImpCasts())
+ .getCanonicalType();
+ if (const auto *ATy = dyn_cast<ConstantArrayType>(BaseQTy.getTypePtr()))
return ATy->getSize().getSExtValue() != 1;
// If we don't have a constant dimension length, we have to consider
// the current section as having any size, so it is not necessarily
@@ -6291,7 +6751,7 @@ private:
return ConstLength.getSExtValue() != 1;
}
- /// \brief Generate the base pointers, section pointers, sizes and map type
+ /// Generate the base pointers, section pointers, sizes and map type
/// bits for the provided map type, map modifier, and expression components.
/// \a IsFirstComponent should be set to true if the provided set of
/// components is the first associated with a capture.
@@ -6300,10 +6760,10 @@ private:
OMPClauseMappableExprCommon::MappableExprComponentListRef Components,
MapBaseValuesArrayTy &BasePointers, MapValuesArrayTy &Pointers,
MapValuesArrayTy &Sizes, MapFlagsArrayTy &Types,
- bool IsFirstComponentList, bool IsImplicit) const {
-
+ StructRangeInfoTy &PartialStruct, bool IsFirstComponentList,
+ bool IsImplicit) const {
// The following summarizes what has to be generated for each map and the
- // types bellow. The generated information is expressed in this order:
+ // types below. The generated information is expressed in this order:
// base pointer, section pointer, size, flags
// (to add to the ones that come from the map type and modifier).
//
@@ -6326,96 +6786,141 @@ private:
// S2 *ps;
//
// map(d)
- // &d, &d, sizeof(double), noflags
+ // &d, &d, sizeof(double), TARGET_PARAM | TO | FROM
//
// map(i)
- // &i, &i, 100*sizeof(int), noflags
+ // &i, &i, 100*sizeof(int), TARGET_PARAM | TO | FROM
//
// map(i[1:23])
- // &i(=&i[0]), &i[1], 23*sizeof(int), noflags
+ // &i(=&i[0]), &i[1], 23*sizeof(int), TARGET_PARAM | TO | FROM
//
// map(p)
- // &p, &p, sizeof(float*), noflags
+ // &p, &p, sizeof(float*), TARGET_PARAM | TO | FROM
//
// map(p[1:24])
- // p, &p[1], 24*sizeof(float), noflags
+ // p, &p[1], 24*sizeof(float), TARGET_PARAM | TO | FROM
//
// map(s)
- // &s, &s, sizeof(S2), noflags
+ // &s, &s, sizeof(S2), TARGET_PARAM | TO | FROM
//
// map(s.i)
- // &s, &(s.i), sizeof(int), noflags
+ // &s, &(s.i), sizeof(int), TARGET_PARAM | TO | FROM
//
// map(s.s.f)
- // &s, &(s.i.f), 50*sizeof(int), noflags
+ // &s, &(s.s.f[0]), 50*sizeof(float), TARGET_PARAM | TO | FROM
//
// map(s.p)
- // &s, &(s.p), sizeof(double*), noflags
+ // &s, &(s.p), sizeof(double*), TARGET_PARAM | TO | FROM
//
- // map(s.p[:22], s.a s.b)
- // &s, &(s.p), sizeof(double*), noflags
- // &(s.p), &(s.p[0]), 22*sizeof(double), ptr_flag
+ // map(to: s.p[:22])
+ // &s, &(s.p), sizeof(double*), TARGET_PARAM (*)
+ // &s, &(s.p), sizeof(double*), MEMBER_OF(1) (**)
+ // &(s.p), &(s.p[0]), 22*sizeof(double),
+ // MEMBER_OF(1) | PTR_AND_OBJ | TO (***)
+ // (*) alloc space for struct members, only this is a target parameter
+ // (**) map the pointer (nothing to be mapped in this example) (the compiler
+ // optimizes this entry out, same in the examples below)
+ // (***) map the pointee (map: to)
//
// map(s.ps)
- // &s, &(s.ps), sizeof(S2*), noflags
+ // &s, &(s.ps), sizeof(S2*), TARGET_PARAM | TO | FROM
//
- // map(s.ps->s.i)
- // &s, &(s.ps), sizeof(S2*), noflags
- // &(s.ps), &(s.ps->s.i), sizeof(int), ptr_flag
+ // map(from: s.ps->s.i)
+ // &s, &(s.ps), sizeof(S2*), TARGET_PARAM
+ // &s, &(s.ps), sizeof(S2*), MEMBER_OF(1)
+ // &(s.ps), &(s.ps->s.i), sizeof(int), MEMBER_OF(1) | PTR_AND_OBJ | FROM
//
- // map(s.ps->ps)
- // &s, &(s.ps), sizeof(S2*), noflags
- // &(s.ps), &(s.ps->ps), sizeof(S2*), ptr_flag
+ // map(to: s.ps->ps)
+ // &s, &(s.ps), sizeof(S2*), TARGET_PARAM
+ // &s, &(s.ps), sizeof(S2*), MEMBER_OF(1)
+ // &(s.ps), &(s.ps->ps), sizeof(S2*), MEMBER_OF(1) | PTR_AND_OBJ | TO
//
// map(s.ps->ps->ps)
- // &s, &(s.ps), sizeof(S2*), noflags
- // &(s.ps), &(s.ps->ps), sizeof(S2*), ptr_flag
- // &(s.ps->ps), &(s.ps->ps->ps), sizeof(S2*), ptr_flag
+ // &s, &(s.ps), sizeof(S2*), TARGET_PARAM
+ // &s, &(s.ps), sizeof(S2*), MEMBER_OF(1)
+ // &(s.ps), &(s.ps->ps), sizeof(S2*), MEMBER_OF(1) | PTR_AND_OBJ
+ // &(s.ps->ps), &(s.ps->ps->ps), sizeof(S2*), PTR_AND_OBJ | TO | FROM
//
- // map(s.ps->ps->s.f[:22])
- // &s, &(s.ps), sizeof(S2*), noflags
- // &(s.ps), &(s.ps->ps), sizeof(S2*), ptr_flag
- // &(s.ps->ps), &(s.ps->ps->s.f[0]), 22*sizeof(float), ptr_flag
+ // map(to: s.ps->ps->s.f[:22])
+ // &s, &(s.ps), sizeof(S2*), TARGET_PARAM
+ // &s, &(s.ps), sizeof(S2*), MEMBER_OF(1)
+ // &(s.ps), &(s.ps->ps), sizeof(S2*), MEMBER_OF(1) | PTR_AND_OBJ
+ // &(s.ps->ps), &(s.ps->ps->s.f[0]), 22*sizeof(float), PTR_AND_OBJ | TO
//
// map(ps)
- // &ps, &ps, sizeof(S2*), noflags
+ // &ps, &ps, sizeof(S2*), TARGET_PARAM | TO | FROM
//
// map(ps->i)
- // ps, &(ps->i), sizeof(int), noflags
+ // ps, &(ps->i), sizeof(int), TARGET_PARAM | TO | FROM
//
// map(ps->s.f)
- // ps, &(ps->s.f[0]), 50*sizeof(float), noflags
+ // ps, &(ps->s.f[0]), 50*sizeof(float), TARGET_PARAM | TO | FROM
//
- // map(ps->p)
- // ps, &(ps->p), sizeof(double*), noflags
+ // map(from: ps->p)
+ // ps, &(ps->p), sizeof(double*), TARGET_PARAM | FROM
//
- // map(ps->p[:22])
- // ps, &(ps->p), sizeof(double*), noflags
- // &(ps->p), &(ps->p[0]), 22*sizeof(double), ptr_flag
+ // map(to: ps->p[:22])
+ // ps, &(ps->p), sizeof(double*), TARGET_PARAM
+ // ps, &(ps->p), sizeof(double*), MEMBER_OF(1)
+ // &(ps->p), &(ps->p[0]), 22*sizeof(double), MEMBER_OF(1) | PTR_AND_OBJ | TO
//
// map(ps->ps)
- // ps, &(ps->ps), sizeof(S2*), noflags
+ // ps, &(ps->ps), sizeof(S2*), TARGET_PARAM | TO | FROM
//
- // map(ps->ps->s.i)
- // ps, &(ps->ps), sizeof(S2*), noflags
- // &(ps->ps), &(ps->ps->s.i), sizeof(int), ptr_flag
+ // map(from: ps->ps->s.i)
+ // ps, &(ps->ps), sizeof(S2*), TARGET_PARAM
+ // ps, &(ps->ps), sizeof(S2*), MEMBER_OF(1)
+ // &(ps->ps), &(ps->ps->s.i), sizeof(int), MEMBER_OF(1) | PTR_AND_OBJ | FROM
//
- // map(ps->ps->ps)
- // ps, &(ps->ps), sizeof(S2*), noflags
- // &(ps->ps), &(ps->ps->ps), sizeof(S2*), ptr_flag
+ // map(from: ps->ps->ps)
+ // ps, &(ps->ps), sizeof(S2*), TARGET_PARAM
+ // ps, &(ps->ps), sizeof(S2*), MEMBER_OF(1)
+ // &(ps->ps), &(ps->ps->ps), sizeof(S2*), MEMBER_OF(1) | PTR_AND_OBJ | FROM
//
// map(ps->ps->ps->ps)
- // ps, &(ps->ps), sizeof(S2*), noflags
- // &(ps->ps), &(ps->ps->ps), sizeof(S2*), ptr_flag
- // &(ps->ps->ps), &(ps->ps->ps->ps), sizeof(S2*), ptr_flag
+ // ps, &(ps->ps), sizeof(S2*), TARGET_PARAM
+ // ps, &(ps->ps), sizeof(S2*), MEMBER_OF(1)
+ // &(ps->ps), &(ps->ps->ps), sizeof(S2*), MEMBER_OF(1) | PTR_AND_OBJ
+ // &(ps->ps->ps), &(ps->ps->ps->ps), sizeof(S2*), PTR_AND_OBJ | TO | FROM
//
- // map(ps->ps->ps->s.f[:22])
- // ps, &(ps->ps), sizeof(S2*), noflags
- // &(ps->ps), &(ps->ps->ps), sizeof(S2*), ptr_flag
- // &(ps->ps->ps), &(ps->ps->ps->s.f[0]), 22*sizeof(float), ptr_flag
+ // map(to: ps->ps->ps->s.f[:22])
+ // ps, &(ps->ps), sizeof(S2*), TARGET_PARAM
+ // ps, &(ps->ps), sizeof(S2*), MEMBER_OF(1)
+ // &(ps->ps), &(ps->ps->ps), sizeof(S2*), MEMBER_OF(1) | PTR_AND_OBJ
+ // &(ps->ps->ps), &(ps->ps->ps->s.f[0]), 22*sizeof(float), PTR_AND_OBJ | TO
+ //
+ // map(to: s.f[:22]) map(from: s.p[:33])
+ // &s, &(s.f[0]), 50*sizeof(float) + sizeof(struct S1) +
+ // sizeof(double*) (**), TARGET_PARAM
+ // &s, &(s.f[0]), 22*sizeof(float), MEMBER_OF(1) | TO
+ // &s, &(s.p), sizeof(double*), MEMBER_OF(1)
+ // &(s.p), &(s.p[0]), 33*sizeof(double), MEMBER_OF(1) | PTR_AND_OBJ | FROM
+ // (*) allocate contiguous space needed to fit all mapped members even if
+ // we allocate space for members not mapped (in this example,
+ // s.f[22..49] and s.s are not mapped, yet we must allocate space for
+ // them as well because they fall between &s.f[0] and &s.p)
+ //
+ // map(from: s.f[:22]) map(to: ps->p[:33])
+ // &s, &(s.f[0]), 22*sizeof(float), TARGET_PARAM | FROM
+ // ps, &(ps->p), sizeof(S2*), TARGET_PARAM
+ // ps, &(ps->p), sizeof(double*), MEMBER_OF(2) (*)
+ // &(ps->p), &(ps->p[0]), 33*sizeof(double), MEMBER_OF(2) | PTR_AND_OBJ | TO
+ // (*) the struct this entry pertains to is the 2nd element in the list of
+ // arguments, hence MEMBER_OF(2)
+ //
+ // map(from: s.f[:22], s.s) map(to: ps->p[:33])
+ // &s, &(s.f[0]), 50*sizeof(float) + sizeof(struct S1), TARGET_PARAM
+ // &s, &(s.f[0]), 22*sizeof(float), MEMBER_OF(1) | FROM
+ // &s, &(s.s), sizeof(struct S1), MEMBER_OF(1) | FROM
+ // ps, &(ps->p), sizeof(S2*), TARGET_PARAM
+ // ps, &(ps->p), sizeof(double*), MEMBER_OF(4) (*)
+ // &(ps->p), &(ps->p[0]), 33*sizeof(double), MEMBER_OF(4) | PTR_AND_OBJ | TO
+ // (*) the struct this entry pertains to is the 4th element in the list
+ // of arguments, hence MEMBER_OF(4)
// Track if the map information being generated is the first for a capture.
bool IsCaptureFirstInfo = IsFirstComponentList;
+ bool IsLink = false; // Is this variable a "declare target link"?
// Scan the components from the base to the complete expression.
auto CI = Components.rbegin();
@@ -6425,16 +6930,25 @@ private:
// Track if the map information being generated is the first for a list of
// components.
bool IsExpressionFirstInfo = true;
- llvm::Value *BP = nullptr;
+ Address BP = Address::invalid();
- if (auto *ME = dyn_cast<MemberExpr>(I->getAssociatedExpression())) {
+ if (isa<MemberExpr>(I->getAssociatedExpression())) {
// The base is the 'this' pointer. The content of the pointer is going
// to be the base of the field being mapped.
- BP = CGF.EmitScalarExpr(ME->getBase());
+ BP = CGF.LoadCXXThisAddress();
} else {
// The base is the reference to the variable.
// BP = &Var.
- BP = CGF.EmitOMPSharedLValue(I->getAssociatedExpression()).getPointer();
+ BP = CGF.EmitOMPSharedLValue(I->getAssociatedExpression()).getAddress();
+ if (const auto *VD =
+ dyn_cast_or_null<VarDecl>(I->getAssociatedDeclaration())) {
+ if (llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
+ isDeclareTargetDeclaration(VD))
+ if (*Res == OMPDeclareTargetDeclAttr::MT_Link) {
+ IsLink = true;
+ BP = CGF.CGM.getOpenMPRuntime().getAddrOfDeclareTargetLink(VD);
+ }
+ }
// If the variable is a pointer and is being dereferenced (i.e. is not
// the last component), the base has to be the pointer itself, not its
@@ -6442,10 +6956,7 @@ private:
QualType Ty =
I->getAssociatedDeclaration()->getType().getNonReferenceType();
if (Ty->isAnyPointerType() && std::next(I) != CE) {
- auto PtrAddr = CGF.MakeNaturalAlignAddrLValue(BP, Ty);
- BP = CGF.EmitLoadOfPointerLValue(PtrAddr.getAddress(),
- Ty->castAs<PointerType>())
- .getPointer();
+ BP = CGF.EmitLoadOfPointer(BP, Ty->castAs<PointerType>());
// We do not need to generate individual map information for the
// pointer, it can be associated with the combined storage.
@@ -6453,8 +6964,41 @@ private:
}
}
- uint64_t DefaultFlags = IsImplicit ? OMP_MAP_IMPLICIT : 0;
+ // Track whether a component of the list should be marked as MEMBER_OF some
+ // combined entry (for partial structs). Only the first PTR_AND_OBJ entry
+ // in a component list should be marked as MEMBER_OF, all subsequent entries
+ // do not belong to the base struct. E.g.
+ // struct S2 s;
+ // s.ps->ps->ps->f[:]
+ // (1) (2) (3) (4)
+ // ps(1) is a member pointer, ps(2) is a pointee of ps(1), so it is a
+ // PTR_AND_OBJ entry; the PTR is ps(1), so MEMBER_OF the base struct. ps(3)
+ // is the pointee of ps(2) which is not member of struct s, so it should not
+ // be marked as such (it is still PTR_AND_OBJ).
+ // The variable is initialized to false so that PTR_AND_OBJ entries which
+ // are not struct members are not considered (e.g. array of pointers to
+ // data).
+ bool ShouldBeMemberOf = false;
+
+ // Variable keeping track of whether or not we have encountered a component
+ // in the component list which is a member expression. Useful when we have a
+ // pointer or a final array section, in which case it is the previous
+ // component in the list which tells us whether we have a member expression.
+ // E.g. X.f[:]
+ // While processing the final array section "[:]" it is "f" which tells us
+ // whether we are dealing with a member of a declared struct.
+ const MemberExpr *EncounteredME = nullptr;
+
for (; I != CE; ++I) {
+ // If the current component is member of a struct (parent struct) mark it.
+ if (!EncounteredME) {
+ EncounteredME = dyn_cast<MemberExpr>(I->getAssociatedExpression());
+ // If we encounter a PTR_AND_OBJ entry from now on it should be marked
+ // as MEMBER_OF the parent struct.
+ if (EncounteredME)
+ ShouldBeMemberOf = true;
+ }
+
auto Next = std::next(I);
// We need to generate the addresses and sizes if this is the last
@@ -6472,14 +7016,12 @@ private:
const auto *OASE =
dyn_cast<OMPArraySectionExpr>(I->getAssociatedExpression());
bool IsPointer =
- (OASE &&
- OMPArraySectionExpr::getBaseOriginalType(OASE)
- .getCanonicalType()
- ->isAnyPointerType()) ||
+ (OASE && OMPArraySectionExpr::getBaseOriginalType(OASE)
+ .getCanonicalType()
+ ->isAnyPointerType()) ||
I->getAssociatedExpression()->getType()->isAnyPointerType();
if (Next == CE || IsPointer || IsFinalArraySection) {
-
// If this is not the last component, we expect the pointer to be
// associated with an array expression or member expression.
assert((Next == CE ||
@@ -6488,44 +7030,68 @@ private:
isa<OMPArraySectionExpr>(Next->getAssociatedExpression())) &&
"Unexpected expression");
- llvm::Value *LB =
- CGF.EmitOMPSharedLValue(I->getAssociatedExpression()).getPointer();
- auto *Size = getExprTypeSize(I->getAssociatedExpression());
-
- // If we have a member expression and the current component is a
- // reference, we have to map the reference too. Whenever we have a
- // reference, the section that reference refers to is going to be a
- // load instruction from the storage assigned to the reference.
- if (isa<MemberExpr>(I->getAssociatedExpression()) &&
- I->getAssociatedDeclaration()->getType()->isReferenceType()) {
- auto *LI = cast<llvm::LoadInst>(LB);
- auto *RefAddr = LI->getPointerOperand();
-
- BasePointers.push_back(BP);
- Pointers.push_back(RefAddr);
- Sizes.push_back(CGF.getTypeSize(CGF.getContext().VoidPtrTy));
- Types.push_back(DefaultFlags |
- getMapTypeBits(
- /*MapType*/ OMPC_MAP_alloc,
- /*MapTypeModifier=*/OMPC_MAP_unknown,
- !IsExpressionFirstInfo, IsCaptureFirstInfo));
- IsExpressionFirstInfo = false;
- IsCaptureFirstInfo = false;
- // The reference will be the next base address.
- BP = RefAddr;
- }
+ Address LB =
+ CGF.EmitOMPSharedLValue(I->getAssociatedExpression()).getAddress();
+ llvm::Value *Size = getExprTypeSize(I->getAssociatedExpression());
+
+ // If this component is a pointer inside the base struct then we don't
+ // need to create any entry for it - it will be combined with the object
+ // it is pointing to into a single PTR_AND_OBJ entry.
+ bool IsMemberPointer =
+ IsPointer && EncounteredME &&
+ (dyn_cast<MemberExpr>(I->getAssociatedExpression()) ==
+ EncounteredME);
+ if (!IsMemberPointer) {
+ BasePointers.push_back(BP.getPointer());
+ Pointers.push_back(LB.getPointer());
+ Sizes.push_back(Size);
+
+ // We need to add a pointer flag for each map that comes from the
+ // same expression except for the first one. We also need to signal
+ // this map is the first one that relates with the current capture
+ // (there is a set of entries for each capture).
+ OpenMPOffloadMappingFlags Flags = getMapTypeBits(
+ MapType, MapTypeModifier, IsImplicit,
+ !IsExpressionFirstInfo || IsLink, IsCaptureFirstInfo && !IsLink);
+
+ if (!IsExpressionFirstInfo) {
+ // If we have a PTR_AND_OBJ pair where the OBJ is a pointer as well,
+ // then we reset the TO/FROM/ALWAYS/DELETE flags.
+ if (IsPointer)
+ Flags &= ~(OMP_MAP_TO | OMP_MAP_FROM | OMP_MAP_ALWAYS |
+ OMP_MAP_DELETE);
+
+ if (ShouldBeMemberOf) {
+ // Set placeholder value MEMBER_OF=FFFF to indicate that the flag
+ // should be later updated with the correct value of MEMBER_OF.
+ Flags |= OMP_MAP_MEMBER_OF;
+ // From now on, all subsequent PTR_AND_OBJ entries should not be
+ // marked as MEMBER_OF.
+ ShouldBeMemberOf = false;
+ }
+ }
- BasePointers.push_back(BP);
- Pointers.push_back(LB);
- Sizes.push_back(Size);
+ Types.push_back(Flags);
+ }
- // We need to add a pointer flag for each map that comes from the
- // same expression except for the first one. We also need to signal
- // this map is the first one that relates with the current capture
- // (there is a set of entries for each capture).
- Types.push_back(DefaultFlags | getMapTypeBits(MapType, MapTypeModifier,
- !IsExpressionFirstInfo,
- IsCaptureFirstInfo));
+ // If we have encountered a member expression so far, keep track of the
+ // mapped member. If the parent is "*this", then the value declaration
+ // is nullptr.
+ if (EncounteredME) {
+ const auto *FD = dyn_cast<FieldDecl>(EncounteredME->getMemberDecl());
+ unsigned FieldIndex = FD->getFieldIndex();
+
+ // Update info about the lowest and highest elements for this struct
+ if (!PartialStruct.Base.isValid()) {
+ PartialStruct.LowestElem = {FieldIndex, LB};
+ PartialStruct.HighestElem = {FieldIndex, LB};
+ PartialStruct.Base = BP;
+ } else if (FieldIndex < PartialStruct.LowestElem.first) {
+ PartialStruct.LowestElem = {FieldIndex, LB};
+ } else if (FieldIndex > PartialStruct.HighestElem.first) {
+ PartialStruct.HighestElem = {FieldIndex, LB};
+ }
+ }
// If we have a final array section, we are done with this expression.
if (IsFinalArraySection)
@@ -6541,11 +7107,11 @@ private:
}
}
- /// \brief Return the adjusted map modifiers if the declaration a capture
- /// refers to appears in a first-private clause. This is expected to be used
- /// only with directives that start with 'target'.
- unsigned adjustMapModifiersForPrivateClauses(const CapturedStmt::Capture &Cap,
- unsigned CurrentModifiers) {
+ /// Return the adjusted map modifiers if the declaration a capture refers to
+ /// appears in a first-private clause. This is expected to be used only with
+ /// directives that start with 'target'.
+ MappableExprsHandler::OpenMPOffloadMappingFlags
+ getMapModifiersForPrivateClauses(const CapturedStmt::Capture &Cap) const {
assert(Cap.capturesVariable() && "Expected capture by reference only!");
// A first private variable captured by reference will use only the
@@ -6554,15 +7120,29 @@ private:
if (FirstPrivateDecls.count(Cap.getCapturedVar()))
return MappableExprsHandler::OMP_MAP_PRIVATE |
MappableExprsHandler::OMP_MAP_TO;
- // Reduction variable will use only the 'private ptr' and 'map to_from'
- // flag.
- if (ReductionDecls.count(Cap.getCapturedVar())) {
- return MappableExprsHandler::OMP_MAP_TO |
- MappableExprsHandler::OMP_MAP_FROM;
- }
+ return MappableExprsHandler::OMP_MAP_TO |
+ MappableExprsHandler::OMP_MAP_FROM;
+ }
+
+ static OpenMPOffloadMappingFlags getMemberOfFlag(unsigned Position) {
+ // Member of is given by the 16 MSB of the flag, so rotate by 48 bits.
+ return static_cast<OpenMPOffloadMappingFlags>(((uint64_t)Position + 1)
+ << 48);
+ }
+
+ static void setCorrectMemberOfFlag(OpenMPOffloadMappingFlags &Flags,
+ OpenMPOffloadMappingFlags MemberOfFlag) {
+ // If the entry is PTR_AND_OBJ but has not been marked with the special
+ // placeholder value 0xFFFF in the MEMBER_OF field, then it should not be
+ // marked as MEMBER_OF.
+ if ((Flags & OMP_MAP_PTR_AND_OBJ) &&
+ ((Flags & OMP_MAP_MEMBER_OF) != OMP_MAP_MEMBER_OF))
+ return;
- // We didn't modify anything.
- return CurrentModifiers;
+ // Reset the placeholder value to prepare the flag for the assignment of the
+ // proper MEMBER_OF value.
+ Flags &= ~OMP_MAP_MEMBER_OF;
+ Flags |= MemberOfFlag;
}
public:
@@ -6573,58 +7153,54 @@ public:
for (const auto *D : C->varlists())
FirstPrivateDecls.insert(
cast<VarDecl>(cast<DeclRefExpr>(D)->getDecl())->getCanonicalDecl());
- for (const auto *C : Dir.getClausesOfKind<OMPReductionClause>()) {
- for (const auto *D : C->varlists()) {
- ReductionDecls.insert(
- cast<VarDecl>(cast<DeclRefExpr>(D)->getDecl())->getCanonicalDecl());
- }
- }
// Extract device pointer clause information.
for (const auto *C : Dir.getClausesOfKind<OMPIsDevicePtrClause>())
for (auto L : C->component_lists())
DevPointersMap[L.first].push_back(L.second);
}
- /// \brief Generate all the base pointers, section pointers, sizes and map
+ /// Generate code for the combined entry if we have a partially mapped struct
+ /// and take care of the mapping flags of the arguments corresponding to
+ /// individual struct members.
+ void emitCombinedEntry(MapBaseValuesArrayTy &BasePointers,
+ MapValuesArrayTy &Pointers, MapValuesArrayTy &Sizes,
+ MapFlagsArrayTy &Types, MapFlagsArrayTy &CurTypes,
+ const StructRangeInfoTy &PartialStruct) const {
+ // Base is the base of the struct
+ BasePointers.push_back(PartialStruct.Base.getPointer());
+ // Pointer is the address of the lowest element
+ llvm::Value *LB = PartialStruct.LowestElem.second.getPointer();
+ Pointers.push_back(LB);
+ // Size is (addr of {highest+1} element) - (addr of lowest element)
+ llvm::Value *HB = PartialStruct.HighestElem.second.getPointer();
+ llvm::Value *HAddr = CGF.Builder.CreateConstGEP1_32(HB, /*Idx0=*/1);
+ llvm::Value *CLAddr = CGF.Builder.CreatePointerCast(LB, CGF.VoidPtrTy);
+ llvm::Value *CHAddr = CGF.Builder.CreatePointerCast(HAddr, CGF.VoidPtrTy);
+ llvm::Value *Diff = CGF.Builder.CreatePtrDiff(CHAddr, CLAddr);
+ llvm::Value *Size = CGF.Builder.CreateIntCast(Diff, CGF.SizeTy,
+ /*isSinged=*/false);
+ Sizes.push_back(Size);
+ // Map type is always TARGET_PARAM
+ Types.push_back(OMP_MAP_TARGET_PARAM);
+ // Remove TARGET_PARAM flag from the first element
+ (*CurTypes.begin()) &= ~OMP_MAP_TARGET_PARAM;
+
+ // All other current entries will be MEMBER_OF the combined entry
+ // (except for PTR_AND_OBJ entries which do not have a placeholder value
+ // 0xFFFF in the MEMBER_OF field).
+ OpenMPOffloadMappingFlags MemberOfFlag =
+ getMemberOfFlag(BasePointers.size() - 1);
+ for (auto &M : CurTypes)
+ setCorrectMemberOfFlag(M, MemberOfFlag);
+ }
+
+ /// Generate all the base pointers, section pointers, sizes and map
/// types for the extracted mappable expressions. Also, for each item that
/// relates with a device pointer, a pair of the relevant declaration and
/// index where it occurs is appended to the device pointers info array.
void generateAllInfo(MapBaseValuesArrayTy &BasePointers,
MapValuesArrayTy &Pointers, MapValuesArrayTy &Sizes,
MapFlagsArrayTy &Types) const {
- BasePointers.clear();
- Pointers.clear();
- Sizes.clear();
- Types.clear();
-
- struct MapInfo {
- /// Kind that defines how a device pointer has to be returned.
- enum ReturnPointerKind {
- // Don't have to return any pointer.
- RPK_None,
- // Pointer is the base of the declaration.
- RPK_Base,
- // Pointer is a member of the base declaration - 'this'
- RPK_Member,
- // Pointer is a reference and a member of the base declaration - 'this'
- RPK_MemberReference,
- };
- OMPClauseMappableExprCommon::MappableExprComponentListRef Components;
- OpenMPMapClauseKind MapType = OMPC_MAP_unknown;
- OpenMPMapClauseKind MapTypeModifier = OMPC_MAP_unknown;
- ReturnPointerKind ReturnDevicePointer = RPK_None;
- bool IsImplicit = false;
-
- MapInfo() = default;
- MapInfo(
- OMPClauseMappableExprCommon::MappableExprComponentListRef Components,
- OpenMPMapClauseKind MapType, OpenMPMapClauseKind MapTypeModifier,
- ReturnPointerKind ReturnDevicePointer, bool IsImplicit)
- : Components(Components), MapType(MapType),
- MapTypeModifier(MapTypeModifier),
- ReturnDevicePointer(ReturnDevicePointer), IsImplicit(IsImplicit) {}
- };
-
// We have to process the component lists that relate with the same
// declaration in a single chunk so that we can generate the map flags
// correctly. Therefore, we organize all lists in a map.
@@ -6636,7 +7212,7 @@ public:
const ValueDecl *D,
OMPClauseMappableExprCommon::MappableExprComponentListRef L,
OpenMPMapClauseKind MapType, OpenMPMapClauseKind MapModifier,
- MapInfo::ReturnPointerKind ReturnDevicePointer, bool IsImplicit) {
+ bool ReturnDevicePointer, bool IsImplicit) {
const ValueDecl *VD =
D ? cast<ValueDecl>(D->getCanonicalDecl()) : nullptr;
Info[VD].emplace_back(L, MapType, MapModifier, ReturnDevicePointer,
@@ -6644,33 +7220,39 @@ public:
};
// FIXME: MSVC 2013 seems to require this-> to find member CurDir.
- for (auto *C : this->CurDir.getClausesOfKind<OMPMapClause>())
- for (auto L : C->component_lists()) {
+ for (const auto *C : this->CurDir.getClausesOfKind<OMPMapClause>())
+ for (const auto &L : C->component_lists()) {
InfoGen(L.first, L.second, C->getMapType(), C->getMapTypeModifier(),
- MapInfo::RPK_None, C->isImplicit());
+ /*ReturnDevicePointer=*/false, C->isImplicit());
}
- for (auto *C : this->CurDir.getClausesOfKind<OMPToClause>())
- for (auto L : C->component_lists()) {
+ for (const auto *C : this->CurDir.getClausesOfKind<OMPToClause>())
+ for (const auto &L : C->component_lists()) {
InfoGen(L.first, L.second, OMPC_MAP_to, OMPC_MAP_unknown,
- MapInfo::RPK_None, C->isImplicit());
+ /*ReturnDevicePointer=*/false, C->isImplicit());
}
- for (auto *C : this->CurDir.getClausesOfKind<OMPFromClause>())
- for (auto L : C->component_lists()) {
+ for (const auto *C : this->CurDir.getClausesOfKind<OMPFromClause>())
+ for (const auto &L : C->component_lists()) {
InfoGen(L.first, L.second, OMPC_MAP_from, OMPC_MAP_unknown,
- MapInfo::RPK_None, C->isImplicit());
+ /*ReturnDevicePointer=*/false, C->isImplicit());
}
// Look at the use_device_ptr clause information and mark the existing map
// entries as such. If there is no map information for an entry in the
// use_device_ptr list, we create one with map type 'alloc' and zero size
- // section. It is the user fault if that was not mapped before.
+ // section. It is the user fault if that was not mapped before. If there is
+ // no map information and the pointer is a struct member, then we defer the
+ // emission of that entry until the whole struct has been processed.
+ llvm::MapVector<const ValueDecl *, SmallVector<DeferredDevicePtrEntryTy, 4>>
+ DeferredInfo;
+
// FIXME: MSVC 2013 seems to require this-> to find member CurDir.
- for (auto *C : this->CurDir.getClausesOfKind<OMPUseDevicePtrClause>())
- for (auto L : C->component_lists()) {
+ for (const auto *C :
+ this->CurDir.getClausesOfKind<OMPUseDevicePtrClause>()) {
+ for (const auto &L : C->component_lists()) {
assert(!L.second.empty() && "Not expecting empty list of components!");
const ValueDecl *VD = L.second.back().getAssociatedDeclaration();
VD = cast<ValueDecl>(VD->getCanonicalDecl());
- auto *IE = L.second.back().getAssociatedExpression();
+ const Expr *IE = L.second.back().getAssociatedExpression();
// If the first component is a member expression, we have to look into
// 'this', which maps to null in the map of map information. Otherwise
// look directly for the information.
@@ -6686,113 +7268,135 @@ public:
// If we found a map entry, signal that the pointer has to be returned
// and move on to the next declaration.
if (CI != It->second.end()) {
- CI->ReturnDevicePointer = isa<MemberExpr>(IE)
- ? (VD->getType()->isReferenceType()
- ? MapInfo::RPK_MemberReference
- : MapInfo::RPK_Member)
- : MapInfo::RPK_Base;
+ CI->ReturnDevicePointer = true;
continue;
}
}
// We didn't find any match in our map information - generate a zero
- // size array section.
+ // size array section - if the pointer is a struct member we defer this
+ // action until the whole struct has been processed.
// FIXME: MSVC 2013 seems to require this-> to find member CGF.
- llvm::Value *Ptr =
- this->CGF
- .EmitLoadOfLValue(this->CGF.EmitLValue(IE), SourceLocation())
- .getScalarVal();
- BasePointers.push_back({Ptr, VD});
- Pointers.push_back(Ptr);
- Sizes.push_back(llvm::Constant::getNullValue(this->CGF.SizeTy));
- Types.push_back(OMP_MAP_RETURN_PARAM | OMP_MAP_TARGET_PARAM);
+ if (isa<MemberExpr>(IE)) {
+ // Insert the pointer into Info to be processed by
+ // generateInfoForComponentList. Because it is a member pointer
+ // without a pointee, no entry will be generated for it, therefore
+ // we need to generate one after the whole struct has been processed.
+ // Nonetheless, generateInfoForComponentList must be called to take
+ // the pointer into account for the calculation of the range of the
+ // partial struct.
+ InfoGen(nullptr, L.second, OMPC_MAP_unknown, OMPC_MAP_unknown,
+ /*ReturnDevicePointer=*/false, C->isImplicit());
+ DeferredInfo[nullptr].emplace_back(IE, VD);
+ } else {
+ llvm::Value *Ptr = this->CGF.EmitLoadOfScalar(
+ this->CGF.EmitLValue(IE), IE->getExprLoc());
+ BasePointers.emplace_back(Ptr, VD);
+ Pointers.push_back(Ptr);
+ Sizes.push_back(llvm::Constant::getNullValue(this->CGF.SizeTy));
+ Types.push_back(OMP_MAP_RETURN_PARAM | OMP_MAP_TARGET_PARAM);
+ }
}
+ }
- for (auto &M : Info) {
+ for (const auto &M : Info) {
// We need to know when we generate information for the first component
// associated with a capture, because the mapping flags depend on it.
bool IsFirstComponentList = true;
- for (MapInfo &L : M.second) {
+
+ // Temporary versions of arrays
+ MapBaseValuesArrayTy CurBasePointers;
+ MapValuesArrayTy CurPointers;
+ MapValuesArrayTy CurSizes;
+ MapFlagsArrayTy CurTypes;
+ StructRangeInfoTy PartialStruct;
+
+ for (const MapInfo &L : M.second) {
assert(!L.Components.empty() &&
"Not expecting declaration with no component lists.");
// Remember the current base pointer index.
- unsigned CurrentBasePointersIdx = BasePointers.size();
+ unsigned CurrentBasePointersIdx = CurBasePointers.size();
// FIXME: MSVC 2013 seems to require this-> to find the member method.
this->generateInfoForComponentList(
- L.MapType, L.MapTypeModifier, L.Components, BasePointers, Pointers,
- Sizes, Types, IsFirstComponentList, L.IsImplicit);
+ L.MapType, L.MapTypeModifier, L.Components, CurBasePointers,
+ CurPointers, CurSizes, CurTypes, PartialStruct,
+ IsFirstComponentList, L.IsImplicit);
// If this entry relates with a device pointer, set the relevant
// declaration and add the 'return pointer' flag.
- if (IsFirstComponentList &&
- L.ReturnDevicePointer != MapInfo::RPK_None) {
- // If the pointer is not the base of the map, we need to skip the
- // base. If it is a reference in a member field, we also need to skip
- // the map of the reference.
- if (L.ReturnDevicePointer != MapInfo::RPK_Base) {
- ++CurrentBasePointersIdx;
- if (L.ReturnDevicePointer == MapInfo::RPK_MemberReference)
- ++CurrentBasePointersIdx;
- }
- assert(BasePointers.size() > CurrentBasePointersIdx &&
+ if (L.ReturnDevicePointer) {
+ assert(CurBasePointers.size() > CurrentBasePointersIdx &&
"Unexpected number of mapped base pointers.");
- auto *RelevantVD = L.Components.back().getAssociatedDeclaration();
+ const ValueDecl *RelevantVD =
+ L.Components.back().getAssociatedDeclaration();
assert(RelevantVD &&
"No relevant declaration related with device pointer??");
- BasePointers[CurrentBasePointersIdx].setDevicePtrDecl(RelevantVD);
- Types[CurrentBasePointersIdx] |= OMP_MAP_RETURN_PARAM;
+ CurBasePointers[CurrentBasePointersIdx].setDevicePtrDecl(RelevantVD);
+ CurTypes[CurrentBasePointersIdx] |= OMP_MAP_RETURN_PARAM;
}
IsFirstComponentList = false;
}
+
+ // Append any pending zero-length pointers which are struct members and
+ // used with use_device_ptr.
+ auto CI = DeferredInfo.find(M.first);
+ if (CI != DeferredInfo.end()) {
+ for (const DeferredDevicePtrEntryTy &L : CI->second) {
+ llvm::Value *BasePtr = this->CGF.EmitLValue(L.IE).getPointer();
+ llvm::Value *Ptr = this->CGF.EmitLoadOfScalar(
+ this->CGF.EmitLValue(L.IE), L.IE->getExprLoc());
+ CurBasePointers.emplace_back(BasePtr, L.VD);
+ CurPointers.push_back(Ptr);
+ CurSizes.push_back(llvm::Constant::getNullValue(this->CGF.SizeTy));
+ // Entry is PTR_AND_OBJ and RETURN_PARAM. Also, set the placeholder
+ // value MEMBER_OF=FFFF so that the entry is later updated with the
+ // correct value of MEMBER_OF.
+ CurTypes.push_back(OMP_MAP_PTR_AND_OBJ | OMP_MAP_RETURN_PARAM |
+ OMP_MAP_MEMBER_OF);
+ }
+ }
+
+ // If there is an entry in PartialStruct it means we have a struct with
+ // individual members mapped. Emit an extra combined entry.
+ if (PartialStruct.Base.isValid())
+ emitCombinedEntry(BasePointers, Pointers, Sizes, Types, CurTypes,
+ PartialStruct);
+
+ // We need to append the results of this capture to what we already have.
+ BasePointers.append(CurBasePointers.begin(), CurBasePointers.end());
+ Pointers.append(CurPointers.begin(), CurPointers.end());
+ Sizes.append(CurSizes.begin(), CurSizes.end());
+ Types.append(CurTypes.begin(), CurTypes.end());
}
}
- /// \brief Generate the base pointers, section pointers, sizes and map types
+ /// Generate the base pointers, section pointers, sizes and map types
/// associated to a given capture.
void generateInfoForCapture(const CapturedStmt::Capture *Cap,
llvm::Value *Arg,
MapBaseValuesArrayTy &BasePointers,
MapValuesArrayTy &Pointers,
- MapValuesArrayTy &Sizes,
- MapFlagsArrayTy &Types) const {
+ MapValuesArrayTy &Sizes, MapFlagsArrayTy &Types,
+ StructRangeInfoTy &PartialStruct) const {
assert(!Cap->capturesVariableArrayType() &&
"Not expecting to generate map info for a variable array type!");
- BasePointers.clear();
- Pointers.clear();
- Sizes.clear();
- Types.clear();
-
// We need to know when we generating information for the first component
// associated with a capture, because the mapping flags depend on it.
bool IsFirstComponentList = true;
- const ValueDecl *VD =
- Cap->capturesThis()
- ? nullptr
- : cast<ValueDecl>(Cap->getCapturedVar()->getCanonicalDecl());
+ const ValueDecl *VD = Cap->capturesThis()
+ ? nullptr
+ : Cap->getCapturedVar()->getCanonicalDecl();
// If this declaration appears in a is_device_ptr clause we just have to
// pass the pointer by value. If it is a reference to a declaration, we just
- // pass its value, otherwise, if it is a member expression, we need to map
- // 'to' the field.
- if (!VD) {
- auto It = DevPointersMap.find(VD);
- if (It != DevPointersMap.end()) {
- for (auto L : It->second) {
- generateInfoForComponentList(
- /*MapType=*/OMPC_MAP_to, /*MapTypeModifier=*/OMPC_MAP_unknown, L,
- BasePointers, Pointers, Sizes, Types, IsFirstComponentList,
- /*IsImplicit=*/false);
- IsFirstComponentList = false;
- }
- return;
- }
- } else if (DevPointersMap.count(VD)) {
- BasePointers.push_back({Arg, VD});
+ // pass its value.
+ if (DevPointersMap.count(VD)) {
+ BasePointers.emplace_back(Arg, VD);
Pointers.push_back(Arg);
Sizes.push_back(CGF.getTypeSize(CGF.getContext().VoidPtrTy));
Types.push_back(OMP_MAP_LITERAL | OMP_MAP_TARGET_PARAM);
@@ -6800,35 +7404,63 @@ public:
}
// FIXME: MSVC 2013 seems to require this-> to find member CurDir.
- for (auto *C : this->CurDir.getClausesOfKind<OMPMapClause>())
- for (auto L : C->decl_component_lists(VD)) {
+ for (const auto *C : this->CurDir.getClausesOfKind<OMPMapClause>())
+ for (const auto &L : C->decl_component_lists(VD)) {
assert(L.first == VD &&
"We got information for the wrong declaration??");
assert(!L.second.empty() &&
"Not expecting declaration with no component lists.");
- generateInfoForComponentList(
- C->getMapType(), C->getMapTypeModifier(), L.second, BasePointers,
- Pointers, Sizes, Types, IsFirstComponentList, C->isImplicit());
+ generateInfoForComponentList(C->getMapType(), C->getMapTypeModifier(),
+ L.second, BasePointers, Pointers, Sizes,
+ Types, PartialStruct, IsFirstComponentList,
+ C->isImplicit());
IsFirstComponentList = false;
}
+ }
- return;
+ /// Generate the base pointers, section pointers, sizes and map types
+ /// associated with the declare target link variables.
+ void generateInfoForDeclareTargetLink(MapBaseValuesArrayTy &BasePointers,
+ MapValuesArrayTy &Pointers,
+ MapValuesArrayTy &Sizes,
+ MapFlagsArrayTy &Types) const {
+ // Map other list items in the map clause which are not captured variables
+ // but "declare target link" global variables.,
+ for (const auto *C : this->CurDir.getClausesOfKind<OMPMapClause>()) {
+ for (const auto &L : C->component_lists()) {
+ if (!L.first)
+ continue;
+ const auto *VD = dyn_cast<VarDecl>(L.first);
+ if (!VD)
+ continue;
+ llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
+ isDeclareTargetDeclaration(VD);
+ if (!Res || *Res != OMPDeclareTargetDeclAttr::MT_Link)
+ continue;
+ StructRangeInfoTy PartialStruct;
+ generateInfoForComponentList(
+ C->getMapType(), C->getMapTypeModifier(), L.second, BasePointers,
+ Pointers, Sizes, Types, PartialStruct,
+ /*IsFirstComponentList=*/true, C->isImplicit());
+ assert(!PartialStruct.Base.isValid() &&
+ "No partial structs for declare target link expected.");
+ }
+ }
}
- /// \brief Generate the default map information for a given capture \a CI,
+ /// Generate the default map information for a given capture \a CI,
/// record field declaration \a RI and captured value \a CV.
void generateDefaultMapInfo(const CapturedStmt::Capture &CI,
const FieldDecl &RI, llvm::Value *CV,
MapBaseValuesArrayTy &CurBasePointers,
MapValuesArrayTy &CurPointers,
MapValuesArrayTy &CurSizes,
- MapFlagsArrayTy &CurMapTypes) {
-
+ MapFlagsArrayTy &CurMapTypes) const {
// Do the default mapping.
if (CI.capturesThis()) {
CurBasePointers.push_back(CV);
CurPointers.push_back(CV);
- const PointerType *PtrTy = cast<PointerType>(RI.getType().getTypePtr());
+ const auto *PtrTy = cast<PointerType>(RI.getType().getTypePtr());
CurSizes.push_back(CGF.getTypeSize(PtrTy->getPointeeType()));
// Default map type.
CurMapTypes.push_back(OMP_MAP_TO | OMP_MAP_FROM);
@@ -6843,7 +7475,7 @@ public:
} else {
// Pointers are implicitly mapped with a zero size and no flags
// (other than first map that is added for all implicit maps).
- CurMapTypes.push_back(0u);
+ CurMapTypes.push_back(OMP_MAP_NONE);
CurSizes.push_back(llvm::Constant::getNullValue(CGF.SizeTy));
}
} else {
@@ -6851,30 +7483,30 @@ public:
CurBasePointers.push_back(CV);
CurPointers.push_back(CV);
- const ReferenceType *PtrTy =
- cast<ReferenceType>(RI.getType().getTypePtr());
+ const auto *PtrTy = cast<ReferenceType>(RI.getType().getTypePtr());
QualType ElementType = PtrTy->getPointeeType();
CurSizes.push_back(CGF.getTypeSize(ElementType));
// The default map type for a scalar/complex type is 'to' because by
// default the value doesn't have to be retrieved. For an aggregate
// type, the default is 'tofrom'.
- CurMapTypes.emplace_back(adjustMapModifiersForPrivateClauses(
- CI, ElementType->isAggregateType() ? (OMP_MAP_TO | OMP_MAP_FROM)
- : OMP_MAP_TO));
+ CurMapTypes.push_back(getMapModifiersForPrivateClauses(CI));
}
// Every default map produces a single argument which is a target parameter.
CurMapTypes.back() |= OMP_MAP_TARGET_PARAM;
+
+ // Add flag stating this is an implicit map.
+ CurMapTypes.back() |= OMP_MAP_IMPLICIT;
}
};
enum OpenMPOffloadingReservedDeviceIDs {
- /// \brief Device ID if the device was not defined, runtime should get it
+ /// Device ID if the device was not defined, runtime should get it
/// from environment variables in the spec.
OMP_DEVICEID_UNDEF = -1,
};
} // anonymous namespace
-/// \brief Emit the arrays used to pass the captures and map information to the
+/// Emit the arrays used to pass the captures and map information to the
/// offloading runtime library. If there is no map or capture information,
/// return nullptr by reference.
static void
@@ -6884,8 +7516,8 @@ emitOffloadingArrays(CodeGenFunction &CGF,
MappableExprsHandler::MapValuesArrayTy &Sizes,
MappableExprsHandler::MapFlagsArrayTy &MapTypes,
CGOpenMPRuntime::TargetDataInfo &Info) {
- auto &CGM = CGF.CGM;
- auto &Ctx = CGF.getContext();
+ CodeGenModule &CGM = CGF.CGM;
+ ASTContext &Ctx = CGF.getContext();
// Reset the array information.
Info.clearArrayInfo();
@@ -6895,7 +7527,7 @@ emitOffloadingArrays(CodeGenFunction &CGF,
// Detect if we have any capture size requiring runtime evaluation of the
// size so that a constant array could be eventually used.
bool hasRuntimeEvaluationCaptureSize = false;
- for (auto *S : Sizes)
+ for (llvm::Value *S : Sizes)
if (!isa<llvm::Constant>(S)) {
hasRuntimeEvaluationCaptureSize = true;
break;
@@ -6924,48 +7556,53 @@ emitOffloadingArrays(CodeGenFunction &CGF,
// We expect all the sizes to be constant, so we collect them to create
// a constant array.
SmallVector<llvm::Constant *, 16> ConstSizes;
- for (auto S : Sizes)
+ for (llvm::Value *S : Sizes)
ConstSizes.push_back(cast<llvm::Constant>(S));
auto *SizesArrayInit = llvm::ConstantArray::get(
llvm::ArrayType::get(CGM.SizeTy, ConstSizes.size()), ConstSizes);
+ std::string Name = CGM.getOpenMPRuntime().getName({"offload_sizes"});
auto *SizesArrayGbl = new llvm::GlobalVariable(
CGM.getModule(), SizesArrayInit->getType(),
/*isConstant=*/true, llvm::GlobalValue::PrivateLinkage,
- SizesArrayInit, ".offload_sizes");
+ SizesArrayInit, Name);
SizesArrayGbl->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
Info.SizesArray = SizesArrayGbl;
}
// The map types are always constant so we don't need to generate code to
// fill arrays. Instead, we create an array constant.
+ SmallVector<uint64_t, 4> Mapping(MapTypes.size(), 0);
+ llvm::copy(MapTypes, Mapping.begin());
llvm::Constant *MapTypesArrayInit =
- llvm::ConstantDataArray::get(CGF.Builder.getContext(), MapTypes);
+ llvm::ConstantDataArray::get(CGF.Builder.getContext(), Mapping);
+ std::string MaptypesName =
+ CGM.getOpenMPRuntime().getName({"offload_maptypes"});
auto *MapTypesArrayGbl = new llvm::GlobalVariable(
CGM.getModule(), MapTypesArrayInit->getType(),
/*isConstant=*/true, llvm::GlobalValue::PrivateLinkage,
- MapTypesArrayInit, ".offload_maptypes");
+ MapTypesArrayInit, MaptypesName);
MapTypesArrayGbl->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
Info.MapTypesArray = MapTypesArrayGbl;
- for (unsigned i = 0; i < Info.NumberOfPtrs; ++i) {
- llvm::Value *BPVal = *BasePointers[i];
+ for (unsigned I = 0; I < Info.NumberOfPtrs; ++I) {
+ llvm::Value *BPVal = *BasePointers[I];
llvm::Value *BP = CGF.Builder.CreateConstInBoundsGEP2_32(
llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs),
- Info.BasePointersArray, 0, i);
+ Info.BasePointersArray, 0, I);
BP = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
BP, BPVal->getType()->getPointerTo(/*AddrSpace=*/0));
Address BPAddr(BP, Ctx.getTypeAlignInChars(Ctx.VoidPtrTy));
CGF.Builder.CreateStore(BPVal, BPAddr);
if (Info.requiresDevicePointerInfo())
- if (auto *DevVD = BasePointers[i].getDevicePtrDecl())
- Info.CaptureDeviceAddrMap.insert(std::make_pair(DevVD, BPAddr));
+ if (const ValueDecl *DevVD = BasePointers[I].getDevicePtrDecl())
+ Info.CaptureDeviceAddrMap.try_emplace(DevVD, BPAddr);
- llvm::Value *PVal = Pointers[i];
+ llvm::Value *PVal = Pointers[I];
llvm::Value *P = CGF.Builder.CreateConstInBoundsGEP2_32(
llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs),
- Info.PointersArray, 0, i);
+ Info.PointersArray, 0, I);
P = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
P, PVal->getType()->getPointerTo(/*AddrSpace=*/0));
Address PAddr(P, Ctx.getTypeAlignInChars(Ctx.VoidPtrTy));
@@ -6976,22 +7613,22 @@ emitOffloadingArrays(CodeGenFunction &CGF,
llvm::ArrayType::get(CGM.SizeTy, Info.NumberOfPtrs),
Info.SizesArray,
/*Idx0=*/0,
- /*Idx1=*/i);
+ /*Idx1=*/I);
Address SAddr(S, Ctx.getTypeAlignInChars(Ctx.getSizeType()));
CGF.Builder.CreateStore(
- CGF.Builder.CreateIntCast(Sizes[i], CGM.SizeTy, /*isSigned=*/true),
+ CGF.Builder.CreateIntCast(Sizes[I], CGM.SizeTy, /*isSigned=*/true),
SAddr);
}
}
}
}
-/// \brief Emit the arguments to be passed to the runtime library based on the
+/// Emit the arguments to be passed to the runtime library based on the
/// arrays of pointers, sizes and map types.
static void emitOffloadingArraysArgument(
CodeGenFunction &CGF, llvm::Value *&BasePointersArrayArg,
llvm::Value *&PointersArrayArg, llvm::Value *&SizesArrayArg,
llvm::Value *&MapTypesArrayArg, CGOpenMPRuntime::TargetDataInfo &Info) {
- auto &CGM = CGF.CGM;
+ CodeGenModule &CGM = CGF.CGM;
if (Info.NumberOfPtrs) {
BasePointersArrayArg = CGF.Builder.CreateConstInBoundsGEP2_32(
llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs),
@@ -7023,86 +7660,27 @@ void CGOpenMPRuntime::emitTargetCall(CodeGenFunction &CGF,
const OMPExecutableDirective &D,
llvm::Value *OutlinedFn,
llvm::Value *OutlinedFnID,
- const Expr *IfCond, const Expr *Device,
- ArrayRef<llvm::Value *> CapturedVars) {
+ const Expr *IfCond, const Expr *Device) {
if (!CGF.HaveInsertPoint())
return;
assert(OutlinedFn && "Invalid outlined function!");
- // Fill up the arrays with all the captured variables.
- MappableExprsHandler::MapValuesArrayTy KernelArgs;
- MappableExprsHandler::MapBaseValuesArrayTy BasePointers;
- MappableExprsHandler::MapValuesArrayTy Pointers;
- MappableExprsHandler::MapValuesArrayTy Sizes;
- MappableExprsHandler::MapFlagsArrayTy MapTypes;
-
- MappableExprsHandler::MapBaseValuesArrayTy CurBasePointers;
- MappableExprsHandler::MapValuesArrayTy CurPointers;
- MappableExprsHandler::MapValuesArrayTy CurSizes;
- MappableExprsHandler::MapFlagsArrayTy CurMapTypes;
-
- // Get mappable expression information.
- MappableExprsHandler MEHandler(D, CGF);
-
- const CapturedStmt &CS = *cast<CapturedStmt>(D.getAssociatedStmt());
- auto RI = CS.getCapturedRecordDecl()->field_begin();
- auto CV = CapturedVars.begin();
- for (CapturedStmt::const_capture_iterator CI = CS.capture_begin(),
- CE = CS.capture_end();
- CI != CE; ++CI, ++RI, ++CV) {
- CurBasePointers.clear();
- CurPointers.clear();
- CurSizes.clear();
- CurMapTypes.clear();
-
- // VLA sizes are passed to the outlined region by copy and do not have map
- // information associated.
- if (CI->capturesVariableArrayType()) {
- CurBasePointers.push_back(*CV);
- CurPointers.push_back(*CV);
- CurSizes.push_back(CGF.getTypeSize(RI->getType()));
- // Copy to the device as an argument. No need to retrieve it.
- CurMapTypes.push_back(MappableExprsHandler::OMP_MAP_LITERAL |
- MappableExprsHandler::OMP_MAP_TARGET_PARAM);
- } else {
- // If we have any information in the map clause, we use it, otherwise we
- // just do a default mapping.
- MEHandler.generateInfoForCapture(CI, *CV, CurBasePointers, CurPointers,
- CurSizes, CurMapTypes);
- if (CurBasePointers.empty())
- MEHandler.generateDefaultMapInfo(*CI, **RI, *CV, CurBasePointers,
- CurPointers, CurSizes, CurMapTypes);
- }
- // We expect to have at least an element of information for this capture.
- assert(!CurBasePointers.empty() && "Non-existing map pointer for capture!");
- assert(CurBasePointers.size() == CurPointers.size() &&
- CurBasePointers.size() == CurSizes.size() &&
- CurBasePointers.size() == CurMapTypes.size() &&
- "Inconsistent map information sizes!");
-
- // The kernel args are always the first elements of the base pointers
- // associated with a capture.
- KernelArgs.push_back(*CurBasePointers.front());
- // We need to append the results of this capture to what we already have.
- BasePointers.append(CurBasePointers.begin(), CurBasePointers.end());
- Pointers.append(CurPointers.begin(), CurPointers.end());
- Sizes.append(CurSizes.begin(), CurSizes.end());
- MapTypes.append(CurMapTypes.begin(), CurMapTypes.end());
- }
+ const bool RequiresOuterTask = D.hasClausesOfKind<OMPDependClause>();
+ llvm::SmallVector<llvm::Value *, 16> CapturedVars;
+ const CapturedStmt &CS = *D.getCapturedStmt(OMPD_target);
+ auto &&ArgsCodegen = [&CS, &CapturedVars](CodeGenFunction &CGF,
+ PrePostActionTy &) {
+ CGF.GenerateOpenMPCapturedVars(CS, CapturedVars);
+ };
+ emitInlinedDirective(CGF, OMPD_unknown, ArgsCodegen);
+ CodeGenFunction::OMPTargetDataInfo InputInfo;
+ llvm::Value *MapTypesArray = nullptr;
// Fill up the pointer arrays and transfer execution to the device.
- auto &&ThenGen = [this, &BasePointers, &Pointers, &Sizes, &MapTypes, Device,
- OutlinedFn, OutlinedFnID, &D,
- &KernelArgs](CodeGenFunction &CGF, PrePostActionTy &) {
- auto &RT = CGF.CGM.getOpenMPRuntime();
- // Emit the offloading arrays.
- TargetDataInfo Info;
- emitOffloadingArrays(CGF, BasePointers, Pointers, Sizes, MapTypes, Info);
- emitOffloadingArraysArgument(CGF, Info.BasePointersArray,
- Info.PointersArray, Info.SizesArray,
- Info.MapTypesArray, Info);
-
+ auto &&ThenGen = [this, Device, OutlinedFn, OutlinedFnID, &D, &InputInfo,
+ &MapTypesArray, &CS, RequiresOuterTask,
+ &CapturedVars](CodeGenFunction &CGF, PrePostActionTy &) {
// On top of the arrays that were filled up, the target offloading call
// takes as arguments the device id as well as the host pointer. The host
// pointer is used by the runtime library to identify the current target
@@ -7125,13 +7703,14 @@ void CGOpenMPRuntime::emitTargetCall(CodeGenFunction &CGF,
}
// Emit the number of elements in the offloading arrays.
- llvm::Value *PointerNum = CGF.Builder.getInt32(BasePointers.size());
+ llvm::Value *PointerNum =
+ CGF.Builder.getInt32(InputInfo.NumberOfTargetItems);
// Return value of the runtime offloading call.
llvm::Value *Return;
- auto *NumTeams = emitNumTeamsForTargetDirective(RT, CGF, D);
- auto *NumThreads = emitNumThreadsForTargetDirective(RT, CGF, D);
+ llvm::Value *NumTeams = emitNumTeamsForTargetDirective(*this, CGF, D);
+ llvm::Value *NumThreads = emitNumThreadsForTargetDirective(*this, CGF, D);
bool HasNowait = D.hasClausesOfKind<OMPNowaitClause>();
// The target region is an outlined function launched by the runtime
@@ -7169,25 +7748,30 @@ void CGOpenMPRuntime::emitTargetCall(CodeGenFunction &CGF,
// passed to the runtime library - a 32-bit integer with the value zero.
assert(NumThreads && "Thread limit expression should be available along "
"with number of teams.");
- llvm::Value *OffloadingArgs[] = {
- DeviceID, OutlinedFnID,
- PointerNum, Info.BasePointersArray,
- Info.PointersArray, Info.SizesArray,
- Info.MapTypesArray, NumTeams,
- NumThreads};
+ llvm::Value *OffloadingArgs[] = {DeviceID,
+ OutlinedFnID,
+ PointerNum,
+ InputInfo.BasePointersArray.getPointer(),
+ InputInfo.PointersArray.getPointer(),
+ InputInfo.SizesArray.getPointer(),
+ MapTypesArray,
+ NumTeams,
+ NumThreads};
Return = CGF.EmitRuntimeCall(
- RT.createRuntimeFunction(HasNowait ? OMPRTL__tgt_target_teams_nowait
- : OMPRTL__tgt_target_teams),
+ createRuntimeFunction(HasNowait ? OMPRTL__tgt_target_teams_nowait
+ : OMPRTL__tgt_target_teams),
OffloadingArgs);
} else {
- llvm::Value *OffloadingArgs[] = {
- DeviceID, OutlinedFnID,
- PointerNum, Info.BasePointersArray,
- Info.PointersArray, Info.SizesArray,
- Info.MapTypesArray};
+ llvm::Value *OffloadingArgs[] = {DeviceID,
+ OutlinedFnID,
+ PointerNum,
+ InputInfo.BasePointersArray.getPointer(),
+ InputInfo.PointersArray.getPointer(),
+ InputInfo.SizesArray.getPointer(),
+ MapTypesArray};
Return = CGF.EmitRuntimeCall(
- RT.createRuntimeFunction(HasNowait ? OMPRTL__tgt_target_nowait
- : OMPRTL__tgt_target),
+ createRuntimeFunction(HasNowait ? OMPRTL__tgt_target_nowait
+ : OMPRTL__tgt_target),
OffloadingArgs);
}
@@ -7200,17 +7784,120 @@ void CGOpenMPRuntime::emitTargetCall(CodeGenFunction &CGF,
CGF.Builder.CreateCondBr(Failed, OffloadFailedBlock, OffloadContBlock);
CGF.EmitBlock(OffloadFailedBlock);
- emitOutlinedFunctionCall(CGF, D.getLocStart(), OutlinedFn, KernelArgs);
+ if (RequiresOuterTask) {
+ CapturedVars.clear();
+ CGF.GenerateOpenMPCapturedVars(CS, CapturedVars);
+ }
+ emitOutlinedFunctionCall(CGF, D.getLocStart(), OutlinedFn, CapturedVars);
CGF.EmitBranch(OffloadContBlock);
CGF.EmitBlock(OffloadContBlock, /*IsFinished=*/true);
};
// Notify that the host version must be executed.
- auto &&ElseGen = [this, &D, OutlinedFn, &KernelArgs](CodeGenFunction &CGF,
- PrePostActionTy &) {
- emitOutlinedFunctionCall(CGF, D.getLocStart(), OutlinedFn,
- KernelArgs);
+ auto &&ElseGen = [this, &D, OutlinedFn, &CS, &CapturedVars,
+ RequiresOuterTask](CodeGenFunction &CGF,
+ PrePostActionTy &) {
+ if (RequiresOuterTask) {
+ CapturedVars.clear();
+ CGF.GenerateOpenMPCapturedVars(CS, CapturedVars);
+ }
+ emitOutlinedFunctionCall(CGF, D.getLocStart(), OutlinedFn, CapturedVars);
+ };
+
+ auto &&TargetThenGen = [this, &ThenGen, &D, &InputInfo, &MapTypesArray,
+ &CapturedVars, RequiresOuterTask,
+ &CS](CodeGenFunction &CGF, PrePostActionTy &) {
+ // Fill up the arrays with all the captured variables.
+ MappableExprsHandler::MapBaseValuesArrayTy BasePointers;
+ MappableExprsHandler::MapValuesArrayTy Pointers;
+ MappableExprsHandler::MapValuesArrayTy Sizes;
+ MappableExprsHandler::MapFlagsArrayTy MapTypes;
+
+ // Get mappable expression information.
+ MappableExprsHandler MEHandler(D, CGF);
+
+ auto RI = CS.getCapturedRecordDecl()->field_begin();
+ auto CV = CapturedVars.begin();
+ for (CapturedStmt::const_capture_iterator CI = CS.capture_begin(),
+ CE = CS.capture_end();
+ CI != CE; ++CI, ++RI, ++CV) {
+ MappableExprsHandler::MapBaseValuesArrayTy CurBasePointers;
+ MappableExprsHandler::MapValuesArrayTy CurPointers;
+ MappableExprsHandler::MapValuesArrayTy CurSizes;
+ MappableExprsHandler::MapFlagsArrayTy CurMapTypes;
+ MappableExprsHandler::StructRangeInfoTy PartialStruct;
+
+ // VLA sizes are passed to the outlined region by copy and do not have map
+ // information associated.
+ if (CI->capturesVariableArrayType()) {
+ CurBasePointers.push_back(*CV);
+ CurPointers.push_back(*CV);
+ CurSizes.push_back(CGF.getTypeSize(RI->getType()));
+ // Copy to the device as an argument. No need to retrieve it.
+ CurMapTypes.push_back(MappableExprsHandler::OMP_MAP_LITERAL |
+ MappableExprsHandler::OMP_MAP_TARGET_PARAM);
+ } else {
+ // If we have any information in the map clause, we use it, otherwise we
+ // just do a default mapping.
+ MEHandler.generateInfoForCapture(CI, *CV, CurBasePointers, CurPointers,
+ CurSizes, CurMapTypes, PartialStruct);
+ if (CurBasePointers.empty())
+ MEHandler.generateDefaultMapInfo(*CI, **RI, *CV, CurBasePointers,
+ CurPointers, CurSizes, CurMapTypes);
+ }
+ // We expect to have at least an element of information for this capture.
+ assert(!CurBasePointers.empty() &&
+ "Non-existing map pointer for capture!");
+ assert(CurBasePointers.size() == CurPointers.size() &&
+ CurBasePointers.size() == CurSizes.size() &&
+ CurBasePointers.size() == CurMapTypes.size() &&
+ "Inconsistent map information sizes!");
+
+ // If there is an entry in PartialStruct it means we have a struct with
+ // individual members mapped. Emit an extra combined entry.
+ if (PartialStruct.Base.isValid())
+ MEHandler.emitCombinedEntry(BasePointers, Pointers, Sizes, MapTypes,
+ CurMapTypes, PartialStruct);
+
+ // We need to append the results of this capture to what we already have.
+ BasePointers.append(CurBasePointers.begin(), CurBasePointers.end());
+ Pointers.append(CurPointers.begin(), CurPointers.end());
+ Sizes.append(CurSizes.begin(), CurSizes.end());
+ MapTypes.append(CurMapTypes.begin(), CurMapTypes.end());
+ }
+ // Map other list items in the map clause which are not captured variables
+ // but "declare target link" global variables.
+ MEHandler.generateInfoForDeclareTargetLink(BasePointers, Pointers, Sizes,
+ MapTypes);
+
+ TargetDataInfo Info;
+ // Fill up the arrays and create the arguments.
+ emitOffloadingArrays(CGF, BasePointers, Pointers, Sizes, MapTypes, Info);
+ emitOffloadingArraysArgument(CGF, Info.BasePointersArray,
+ Info.PointersArray, Info.SizesArray,
+ Info.MapTypesArray, Info);
+ InputInfo.NumberOfTargetItems = Info.NumberOfPtrs;
+ InputInfo.BasePointersArray =
+ Address(Info.BasePointersArray, CGM.getPointerAlign());
+ InputInfo.PointersArray =
+ Address(Info.PointersArray, CGM.getPointerAlign());
+ InputInfo.SizesArray = Address(Info.SizesArray, CGM.getPointerAlign());
+ MapTypesArray = Info.MapTypesArray;
+ if (RequiresOuterTask)
+ CGF.EmitOMPTargetTaskBasedDirective(D, ThenGen, InputInfo);
+ else
+ emitInlinedDirective(CGF, D.getDirectiveKind(), ThenGen);
+ };
+
+ auto &&TargetElseGen = [this, &ElseGen, &D, RequiresOuterTask](
+ CodeGenFunction &CGF, PrePostActionTy &) {
+ if (RequiresOuterTask) {
+ CodeGenFunction::OMPTargetDataInfo InputInfo;
+ CGF.EmitOMPTargetTaskBasedDirective(D, ElseGen, InputInfo);
+ } else {
+ emitInlinedDirective(CGF, D.getDirectiveKind(), ElseGen);
+ }
};
// If we have a target function ID it means that we need to support
@@ -7218,14 +7905,14 @@ void CGOpenMPRuntime::emitTargetCall(CodeGenFunction &CGF,
// regardless of the conditional in the if clause if, e.g., the user do not
// specify target triples.
if (OutlinedFnID) {
- if (IfCond)
- emitOMPIfClause(CGF, IfCond, ThenGen, ElseGen);
- else {
- RegionCodeGenTy ThenRCG(ThenGen);
+ if (IfCond) {
+ emitOMPIfClause(CGF, IfCond, TargetThenGen, TargetElseGen);
+ } else {
+ RegionCodeGenTy ThenRCG(TargetThenGen);
ThenRCG(CGF);
}
} else {
- RegionCodeGenTy ElseRCG(ElseGen);
+ RegionCodeGenTy ElseRCG(TargetElseGen);
ElseRCG(CGF);
}
}
@@ -7236,13 +7923,13 @@ void CGOpenMPRuntime::scanForTargetRegionsFunctions(const Stmt *S,
return;
// Codegen OMP target directives that offload compute to the device.
- bool requiresDeviceCodegen =
+ bool RequiresDeviceCodegen =
isa<OMPExecutableDirective>(S) &&
isOpenMPTargetExecutionDirective(
cast<OMPExecutableDirective>(S)->getDirectiveKind());
- if (requiresDeviceCodegen) {
- auto &E = *cast<OMPExecutableDirective>(S);
+ if (RequiresDeviceCodegen) {
+ const auto &E = *cast<OMPExecutableDirective>(S);
unsigned DeviceID;
unsigned FileID;
unsigned Line;
@@ -7255,66 +7942,118 @@ void CGOpenMPRuntime::scanForTargetRegionsFunctions(const Stmt *S,
ParentName, Line))
return;
- switch (S->getStmtClass()) {
- case Stmt::OMPTargetDirectiveClass:
- CodeGenFunction::EmitOMPTargetDeviceFunction(
- CGM, ParentName, cast<OMPTargetDirective>(*S));
+ switch (E.getDirectiveKind()) {
+ case OMPD_target:
+ CodeGenFunction::EmitOMPTargetDeviceFunction(CGM, ParentName,
+ cast<OMPTargetDirective>(E));
break;
- case Stmt::OMPTargetParallelDirectiveClass:
+ case OMPD_target_parallel:
CodeGenFunction::EmitOMPTargetParallelDeviceFunction(
- CGM, ParentName, cast<OMPTargetParallelDirective>(*S));
+ CGM, ParentName, cast<OMPTargetParallelDirective>(E));
break;
- case Stmt::OMPTargetTeamsDirectiveClass:
+ case OMPD_target_teams:
CodeGenFunction::EmitOMPTargetTeamsDeviceFunction(
- CGM, ParentName, cast<OMPTargetTeamsDirective>(*S));
+ CGM, ParentName, cast<OMPTargetTeamsDirective>(E));
break;
- case Stmt::OMPTargetTeamsDistributeDirectiveClass:
+ case OMPD_target_teams_distribute:
CodeGenFunction::EmitOMPTargetTeamsDistributeDeviceFunction(
- CGM, ParentName, cast<OMPTargetTeamsDistributeDirective>(*S));
+ CGM, ParentName, cast<OMPTargetTeamsDistributeDirective>(E));
break;
- case Stmt::OMPTargetTeamsDistributeSimdDirectiveClass:
+ case OMPD_target_teams_distribute_simd:
CodeGenFunction::EmitOMPTargetTeamsDistributeSimdDeviceFunction(
- CGM, ParentName, cast<OMPTargetTeamsDistributeSimdDirective>(*S));
+ CGM, ParentName, cast<OMPTargetTeamsDistributeSimdDirective>(E));
break;
- case Stmt::OMPTargetParallelForDirectiveClass:
+ case OMPD_target_parallel_for:
CodeGenFunction::EmitOMPTargetParallelForDeviceFunction(
- CGM, ParentName, cast<OMPTargetParallelForDirective>(*S));
+ CGM, ParentName, cast<OMPTargetParallelForDirective>(E));
break;
- case Stmt::OMPTargetParallelForSimdDirectiveClass:
+ case OMPD_target_parallel_for_simd:
CodeGenFunction::EmitOMPTargetParallelForSimdDeviceFunction(
- CGM, ParentName, cast<OMPTargetParallelForSimdDirective>(*S));
+ CGM, ParentName, cast<OMPTargetParallelForSimdDirective>(E));
break;
- case Stmt::OMPTargetSimdDirectiveClass:
+ case OMPD_target_simd:
CodeGenFunction::EmitOMPTargetSimdDeviceFunction(
- CGM, ParentName, cast<OMPTargetSimdDirective>(*S));
+ CGM, ParentName, cast<OMPTargetSimdDirective>(E));
break;
- default:
+ case OMPD_target_teams_distribute_parallel_for:
+ CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForDeviceFunction(
+ CGM, ParentName,
+ cast<OMPTargetTeamsDistributeParallelForDirective>(E));
+ break;
+ case OMPD_target_teams_distribute_parallel_for_simd:
+ CodeGenFunction::
+ EmitOMPTargetTeamsDistributeParallelForSimdDeviceFunction(
+ CGM, ParentName,
+ cast<OMPTargetTeamsDistributeParallelForSimdDirective>(E));
+ break;
+ case OMPD_parallel:
+ case OMPD_for:
+ case OMPD_parallel_for:
+ case OMPD_parallel_sections:
+ case OMPD_for_simd:
+ case OMPD_parallel_for_simd:
+ case OMPD_cancel:
+ case OMPD_cancellation_point:
+ case OMPD_ordered:
+ case OMPD_threadprivate:
+ case OMPD_task:
+ case OMPD_simd:
+ case OMPD_sections:
+ case OMPD_section:
+ case OMPD_single:
+ case OMPD_master:
+ case OMPD_critical:
+ case OMPD_taskyield:
+ case OMPD_barrier:
+ case OMPD_taskwait:
+ case OMPD_taskgroup:
+ case OMPD_atomic:
+ case OMPD_flush:
+ case OMPD_teams:
+ case OMPD_target_data:
+ case OMPD_target_exit_data:
+ case OMPD_target_enter_data:
+ case OMPD_distribute:
+ case OMPD_distribute_simd:
+ case OMPD_distribute_parallel_for:
+ case OMPD_distribute_parallel_for_simd:
+ case OMPD_teams_distribute:
+ case OMPD_teams_distribute_simd:
+ case OMPD_teams_distribute_parallel_for:
+ case OMPD_teams_distribute_parallel_for_simd:
+ case OMPD_target_update:
+ case OMPD_declare_simd:
+ case OMPD_declare_target:
+ case OMPD_end_declare_target:
+ case OMPD_declare_reduction:
+ case OMPD_taskloop:
+ case OMPD_taskloop_simd:
+ case OMPD_unknown:
llvm_unreachable("Unknown target directive for OpenMP device codegen.");
}
return;
}
- if (const OMPExecutableDirective *E = dyn_cast<OMPExecutableDirective>(S)) {
- if (!E->hasAssociatedStmt())
+ if (const auto *E = dyn_cast<OMPExecutableDirective>(S)) {
+ if (!E->hasAssociatedStmt() || !E->getAssociatedStmt())
return;
scanForTargetRegionsFunctions(
- cast<CapturedStmt>(E->getAssociatedStmt())->getCapturedStmt(),
- ParentName);
+ E->getInnermostCapturedStmt()->getCapturedStmt(), ParentName);
return;
}
// If this is a lambda function, look into its body.
- if (auto *L = dyn_cast<LambdaExpr>(S))
+ if (const auto *L = dyn_cast<LambdaExpr>(S))
S = L->getBody();
// Keep looking for target regions recursively.
- for (auto *II : S->children())
+ for (const Stmt *II : S->children())
scanForTargetRegionsFunctions(II, ParentName);
}
bool CGOpenMPRuntime::emitTargetFunctions(GlobalDecl GD) {
- auto &FD = *cast<FunctionDecl>(GD.getDecl());
+ const auto *FD = cast<FunctionDecl>(GD.getDecl());
// If emitting code for the host, we do not process FD here. Instead we do
// the normal code generation.
@@ -7322,12 +8061,11 @@ bool CGOpenMPRuntime::emitTargetFunctions(GlobalDecl GD) {
return false;
// Try to detect target regions in the function.
- scanForTargetRegionsFunctions(FD.getBody(), CGM.getMangledName(GD));
+ scanForTargetRegionsFunctions(FD->getBody(), CGM.getMangledName(GD));
- // We should not emit any function other that the ones created during the
- // scanning. Therefore, we signal that this function is completely dealt
- // with.
- return true;
+ // Do not to emit function if it is not marked as declare target.
+ return !isDeclareTargetDeclaration(FD) &&
+ AlreadyEmittedTargetFunctions.count(FD->getCanonicalDecl()) == 0;
}
bool CGOpenMPRuntime::emitTargetGlobalVariable(GlobalDecl GD) {
@@ -7338,33 +8076,101 @@ bool CGOpenMPRuntime::emitTargetGlobalVariable(GlobalDecl GD) {
// regions in it. We use the complete variant to produce the kernel name
// mangling.
QualType RDTy = cast<VarDecl>(GD.getDecl())->getType();
- if (auto *RD = RDTy->getBaseElementTypeUnsafe()->getAsCXXRecordDecl()) {
- for (auto *Ctor : RD->ctors()) {
+ if (const auto *RD = RDTy->getBaseElementTypeUnsafe()->getAsCXXRecordDecl()) {
+ for (const CXXConstructorDecl *Ctor : RD->ctors()) {
StringRef ParentName =
CGM.getMangledName(GlobalDecl(Ctor, Ctor_Complete));
scanForTargetRegionsFunctions(Ctor->getBody(), ParentName);
}
- auto *Dtor = RD->getDestructor();
- if (Dtor) {
+ if (const CXXDestructorDecl *Dtor = RD->getDestructor()) {
StringRef ParentName =
CGM.getMangledName(GlobalDecl(Dtor, Dtor_Complete));
scanForTargetRegionsFunctions(Dtor->getBody(), ParentName);
}
}
- // If we are in target mode, we do not emit any global (declare target is not
- // implemented yet). Therefore we signal that GD was processed in this case.
- return true;
+ // Do not to emit variable if it is not marked as declare target.
+ llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
+ isDeclareTargetDeclaration(cast<VarDecl>(GD.getDecl()));
+ return !Res || *Res == OMPDeclareTargetDeclAttr::MT_Link;
+}
+
+void CGOpenMPRuntime::registerTargetGlobalVariable(const VarDecl *VD,
+ llvm::Constant *Addr) {
+ if (llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
+ isDeclareTargetDeclaration(VD)) {
+ OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryKind Flags;
+ StringRef VarName;
+ CharUnits VarSize;
+ llvm::GlobalValue::LinkageTypes Linkage;
+ switch (*Res) {
+ case OMPDeclareTargetDeclAttr::MT_To:
+ Flags = OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryTo;
+ VarName = CGM.getMangledName(VD);
+ VarSize = CGM.getContext().getTypeSizeInChars(VD->getType());
+ Linkage = CGM.getLLVMLinkageVarDefinition(VD, /*IsConstant=*/false);
+ break;
+ case OMPDeclareTargetDeclAttr::MT_Link:
+ Flags = OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryLink;
+ if (CGM.getLangOpts().OpenMPIsDevice) {
+ VarName = Addr->getName();
+ Addr = nullptr;
+ } else {
+ VarName = getAddrOfDeclareTargetLink(VD).getName();
+ Addr =
+ cast<llvm::Constant>(getAddrOfDeclareTargetLink(VD).getPointer());
+ }
+ VarSize = CGM.getPointerSize();
+ Linkage = llvm::GlobalValue::WeakAnyLinkage;
+ break;
+ }
+ OffloadEntriesInfoManager.registerDeviceGlobalVarEntryInfo(
+ VarName, Addr, VarSize, Flags, Linkage);
+ }
}
bool CGOpenMPRuntime::emitTargetGlobal(GlobalDecl GD) {
- auto *VD = GD.getDecl();
- if (isa<FunctionDecl>(VD))
+ if (isa<FunctionDecl>(GD.getDecl()))
return emitTargetFunctions(GD);
return emitTargetGlobalVariable(GD);
}
+CGOpenMPRuntime::DisableAutoDeclareTargetRAII::DisableAutoDeclareTargetRAII(
+ CodeGenModule &CGM)
+ : CGM(CGM) {
+ if (CGM.getLangOpts().OpenMPIsDevice) {
+ SavedShouldMarkAsGlobal = CGM.getOpenMPRuntime().ShouldMarkAsGlobal;
+ CGM.getOpenMPRuntime().ShouldMarkAsGlobal = false;
+ }
+}
+
+CGOpenMPRuntime::DisableAutoDeclareTargetRAII::~DisableAutoDeclareTargetRAII() {
+ if (CGM.getLangOpts().OpenMPIsDevice)
+ CGM.getOpenMPRuntime().ShouldMarkAsGlobal = SavedShouldMarkAsGlobal;
+}
+
+bool CGOpenMPRuntime::markAsGlobalTarget(GlobalDecl GD) {
+ if (!CGM.getLangOpts().OpenMPIsDevice || !ShouldMarkAsGlobal)
+ return true;
+
+ const auto *D = cast<FunctionDecl>(GD.getDecl());
+ const FunctionDecl *FD = D->getCanonicalDecl();
+ // Do not to emit function if it is marked as declare target as it was already
+ // emitted.
+ if (isDeclareTargetDeclaration(D)) {
+ if (D->hasBody() && AlreadyEmittedTargetFunctions.count(FD) == 0) {
+ if (auto *F = dyn_cast_or_null<llvm::Function>(
+ CGM.GetGlobalValue(CGM.getMangledName(GD))))
+ return !F->isDeclaration();
+ return false;
+ }
+ return true;
+ }
+
+ return !AlreadyEmittedTargetFunctions.insert(FD).second;
+}
+
llvm::Function *CGOpenMPRuntime::emitRegistrationFunction() {
// If we have offloading in the current module, we need to emit the entries
// now and register the offloading descriptor.
@@ -7384,7 +8190,7 @@ void CGOpenMPRuntime::emitTeamsCall(CodeGenFunction &CGF,
if (!CGF.HaveInsertPoint())
return;
- auto *RTLoc = emitUpdateLocation(CGF, Loc);
+ llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
CodeGenFunction::RunCleanupsScope Scope(CGF);
// Build call __kmpc_fork_teams(loc, n, microtask, var1, .., varn);
@@ -7396,7 +8202,7 @@ void CGOpenMPRuntime::emitTeamsCall(CodeGenFunction &CGF,
RealArgs.append(std::begin(Args), std::end(Args));
RealArgs.append(CapturedVars.begin(), CapturedVars.end());
- auto RTLFn = createRuntimeFunction(OMPRTL__kmpc_fork_teams);
+ llvm::Value *RTLFn = createRuntimeFunction(OMPRTL__kmpc_fork_teams);
CGF.EmitRuntimeCall(RTLFn, RealArgs);
}
@@ -7407,16 +8213,16 @@ void CGOpenMPRuntime::emitNumTeamsClause(CodeGenFunction &CGF,
if (!CGF.HaveInsertPoint())
return;
- auto *RTLoc = emitUpdateLocation(CGF, Loc);
+ llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
llvm::Value *NumTeamsVal =
- (NumTeams)
+ NumTeams
? CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(NumTeams),
CGF.CGM.Int32Ty, /* isSigned = */ true)
: CGF.Builder.getInt32(0);
llvm::Value *ThreadLimitVal =
- (ThreadLimit)
+ ThreadLimit
? CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(ThreadLimit),
CGF.CGM.Int32Ty, /* isSigned = */ true)
: CGF.Builder.getInt32(0);
@@ -7473,7 +8279,7 @@ void CGOpenMPRuntime::emitTargetDataCalls(
}
// Emit the number of elements in the offloading arrays.
- auto *PointerNum = CGF.Builder.getInt32(Info.NumberOfPtrs);
+ llvm::Value *PointerNum = CGF.Builder.getInt32(Info.NumberOfPtrs);
llvm::Value *OffloadingArgs[] = {
DeviceID, PointerNum, BasePointersArrayArg,
@@ -7509,7 +8315,7 @@ void CGOpenMPRuntime::emitTargetDataCalls(
}
// Emit the number of elements in the offloading arrays.
- auto *PointerNum = CGF.Builder.getInt32(Info.NumberOfPtrs);
+ llvm::Value *PointerNum = CGF.Builder.getInt32(Info.NumberOfPtrs);
llvm::Value *OffloadingArgs[] = {
DeviceID, PointerNum, BasePointersArrayArg,
@@ -7596,9 +8402,6 @@ void CGOpenMPRuntime::emitTargetDataStandAloneCall(
const bool HasNowait = D.hasClausesOfKind<OMPNowaitClause>();
OpenMPRTLFunction RTLFn;
switch (D.getDirectiveKind()) {
- default:
- llvm_unreachable("Unexpected standalone target data directive.");
- break;
case OMPD_target_enter_data:
RTLFn = HasNowait ? OMPRTL__tgt_target_data_begin_nowait
: OMPRTL__tgt_target_data_begin;
@@ -7611,6 +8414,58 @@ void CGOpenMPRuntime::emitTargetDataStandAloneCall(
RTLFn = HasNowait ? OMPRTL__tgt_target_data_update_nowait
: OMPRTL__tgt_target_data_update;
break;
+ case OMPD_parallel:
+ case OMPD_for:
+ case OMPD_parallel_for:
+ case OMPD_parallel_sections:
+ case OMPD_for_simd:
+ case OMPD_parallel_for_simd:
+ case OMPD_cancel:
+ case OMPD_cancellation_point:
+ case OMPD_ordered:
+ case OMPD_threadprivate:
+ case OMPD_task:
+ case OMPD_simd:
+ case OMPD_sections:
+ case OMPD_section:
+ case OMPD_single:
+ case OMPD_master:
+ case OMPD_critical:
+ case OMPD_taskyield:
+ case OMPD_barrier:
+ case OMPD_taskwait:
+ case OMPD_taskgroup:
+ case OMPD_atomic:
+ case OMPD_flush:
+ case OMPD_teams:
+ case OMPD_target_data:
+ case OMPD_distribute:
+ case OMPD_distribute_simd:
+ case OMPD_distribute_parallel_for:
+ case OMPD_distribute_parallel_for_simd:
+ case OMPD_teams_distribute:
+ case OMPD_teams_distribute_simd:
+ case OMPD_teams_distribute_parallel_for:
+ case OMPD_teams_distribute_parallel_for_simd:
+ case OMPD_declare_simd:
+ case OMPD_declare_target:
+ case OMPD_end_declare_target:
+ case OMPD_declare_reduction:
+ case OMPD_taskloop:
+ case OMPD_taskloop_simd:
+ case OMPD_target:
+ case OMPD_target_simd:
+ case OMPD_target_teams_distribute:
+ case OMPD_target_teams_distribute_simd:
+ case OMPD_target_teams_distribute_parallel_for:
+ case OMPD_target_teams_distribute_parallel_for_simd:
+ case OMPD_target_teams:
+ case OMPD_target_parallel:
+ case OMPD_target_parallel_for:
+ case OMPD_target_parallel_for_simd:
+ case OMPD_unknown:
+ llvm_unreachable("Unexpected standalone target data directive.");
+ break;
}
CGF.EmitRuntimeCall(createRuntimeFunction(RTLFn), OffloadingArgs);
};
@@ -7644,13 +8499,13 @@ void CGOpenMPRuntime::emitTargetDataStandAloneCall(
if (D.hasClausesOfKind<OMPDependClause>())
CGF.EmitOMPTargetTaskBasedDirective(D, ThenGen, InputInfo);
else
- emitInlinedDirective(CGF, OMPD_target_update, ThenGen);
+ emitInlinedDirective(CGF, D.getDirectiveKind(), ThenGen);
};
- if (IfCond)
+ if (IfCond) {
emitOMPIfClause(CGF, IfCond, TargetThenGen,
[](CodeGenFunction &CGF, PrePostActionTy &) {});
- else {
+ } else {
RegionCodeGenTy ThenRCG(TargetThenGen);
ThenRCG(CGF);
}
@@ -7693,11 +8548,11 @@ static unsigned evaluateCDTSize(const FunctionDecl *FD,
return 0;
ASTContext &C = FD->getASTContext();
QualType CDT;
- if (!RetType.isNull() && !RetType->isVoidType())
+ if (!RetType.isNull() && !RetType->isVoidType()) {
CDT = RetType;
- else {
+ } else {
unsigned Offset = 0;
- if (auto *MD = dyn_cast<CXXMethodDecl>(FD)) {
+ if (const auto *MD = dyn_cast<CXXMethodDecl>(FD)) {
if (ParamAttrs[Offset].Kind == Vector)
CDT = C.getPointerType(C.getRecordType(MD->getParent()));
++Offset;
@@ -7755,17 +8610,18 @@ emitX86DeclareSimdFunction(const FunctionDecl *FD, llvm::Function *Fn,
Masked.push_back('M');
break;
}
- for (auto Mask : Masked) {
- for (auto &Data : ISAData) {
+ for (char Mask : Masked) {
+ for (const ISADataTy &Data : ISAData) {
SmallString<256> Buffer;
llvm::raw_svector_ostream Out(Buffer);
Out << "_ZGV" << Data.ISA << Mask;
if (!VLENVal) {
Out << llvm::APSInt::getUnsigned(Data.VecRegSize /
evaluateCDTSize(FD, ParamAttrs));
- } else
+ } else {
Out << VLENVal;
- for (auto &ParamAttr : ParamAttrs) {
+ }
+ for (const ParamAttrTy &ParamAttr : ParamAttrs) {
switch (ParamAttr.Kind){
case LinearWithVarStride:
Out << 's' << ParamAttr.StrideOrArg;
@@ -7794,90 +8650,95 @@ emitX86DeclareSimdFunction(const FunctionDecl *FD, llvm::Function *Fn,
void CGOpenMPRuntime::emitDeclareSimdFunction(const FunctionDecl *FD,
llvm::Function *Fn) {
ASTContext &C = CGM.getContext();
- FD = FD->getCanonicalDecl();
+ FD = FD->getMostRecentDecl();
// Map params to their positions in function decl.
llvm::DenseMap<const Decl *, unsigned> ParamPositions;
if (isa<CXXMethodDecl>(FD))
- ParamPositions.insert({FD, 0});
+ ParamPositions.try_emplace(FD, 0);
unsigned ParamPos = ParamPositions.size();
- for (auto *P : FD->parameters()) {
- ParamPositions.insert({P->getCanonicalDecl(), ParamPos});
+ for (const ParmVarDecl *P : FD->parameters()) {
+ ParamPositions.try_emplace(P->getCanonicalDecl(), ParamPos);
++ParamPos;
}
- for (auto *Attr : FD->specific_attrs<OMPDeclareSimdDeclAttr>()) {
- llvm::SmallVector<ParamAttrTy, 8> ParamAttrs(ParamPositions.size());
- // Mark uniform parameters.
- for (auto *E : Attr->uniforms()) {
- E = E->IgnoreParenImpCasts();
- unsigned Pos;
- if (isa<CXXThisExpr>(E))
- Pos = ParamPositions[FD];
- else {
- auto *PVD = cast<ParmVarDecl>(cast<DeclRefExpr>(E)->getDecl())
- ->getCanonicalDecl();
- Pos = ParamPositions[PVD];
- }
- ParamAttrs[Pos].Kind = Uniform;
- }
- // Get alignment info.
- auto NI = Attr->alignments_begin();
- for (auto *E : Attr->aligneds()) {
- E = E->IgnoreParenImpCasts();
- unsigned Pos;
- QualType ParmTy;
- if (isa<CXXThisExpr>(E)) {
- Pos = ParamPositions[FD];
- ParmTy = E->getType();
- } else {
- auto *PVD = cast<ParmVarDecl>(cast<DeclRefExpr>(E)->getDecl())
- ->getCanonicalDecl();
- Pos = ParamPositions[PVD];
- ParmTy = PVD->getType();
+ while (FD) {
+ for (const auto *Attr : FD->specific_attrs<OMPDeclareSimdDeclAttr>()) {
+ llvm::SmallVector<ParamAttrTy, 8> ParamAttrs(ParamPositions.size());
+ // Mark uniform parameters.
+ for (const Expr *E : Attr->uniforms()) {
+ E = E->IgnoreParenImpCasts();
+ unsigned Pos;
+ if (isa<CXXThisExpr>(E)) {
+ Pos = ParamPositions[FD];
+ } else {
+ const auto *PVD = cast<ParmVarDecl>(cast<DeclRefExpr>(E)->getDecl())
+ ->getCanonicalDecl();
+ Pos = ParamPositions[PVD];
+ }
+ ParamAttrs[Pos].Kind = Uniform;
}
- ParamAttrs[Pos].Alignment =
- (*NI) ? (*NI)->EvaluateKnownConstInt(C)
+ // Get alignment info.
+ auto NI = Attr->alignments_begin();
+ for (const Expr *E : Attr->aligneds()) {
+ E = E->IgnoreParenImpCasts();
+ unsigned Pos;
+ QualType ParmTy;
+ if (isa<CXXThisExpr>(E)) {
+ Pos = ParamPositions[FD];
+ ParmTy = E->getType();
+ } else {
+ const auto *PVD = cast<ParmVarDecl>(cast<DeclRefExpr>(E)->getDecl())
+ ->getCanonicalDecl();
+ Pos = ParamPositions[PVD];
+ ParmTy = PVD->getType();
+ }
+ ParamAttrs[Pos].Alignment =
+ (*NI)
+ ? (*NI)->EvaluateKnownConstInt(C)
: llvm::APSInt::getUnsigned(
C.toCharUnitsFromBits(C.getOpenMPDefaultSimdAlign(ParmTy))
.getQuantity());
- ++NI;
- }
- // Mark linear parameters.
- auto SI = Attr->steps_begin();
- auto MI = Attr->modifiers_begin();
- for (auto *E : Attr->linears()) {
- E = E->IgnoreParenImpCasts();
- unsigned Pos;
- if (isa<CXXThisExpr>(E))
- Pos = ParamPositions[FD];
- else {
- auto *PVD = cast<ParmVarDecl>(cast<DeclRefExpr>(E)->getDecl())
- ->getCanonicalDecl();
- Pos = ParamPositions[PVD];
+ ++NI;
}
- auto &ParamAttr = ParamAttrs[Pos];
- ParamAttr.Kind = Linear;
- if (*SI) {
- if (!(*SI)->EvaluateAsInt(ParamAttr.StrideOrArg, C,
- Expr::SE_AllowSideEffects)) {
- if (auto *DRE = cast<DeclRefExpr>((*SI)->IgnoreParenImpCasts())) {
- if (auto *StridePVD = cast<ParmVarDecl>(DRE->getDecl())) {
- ParamAttr.Kind = LinearWithVarStride;
- ParamAttr.StrideOrArg = llvm::APSInt::getUnsigned(
- ParamPositions[StridePVD->getCanonicalDecl()]);
+ // Mark linear parameters.
+ auto SI = Attr->steps_begin();
+ auto MI = Attr->modifiers_begin();
+ for (const Expr *E : Attr->linears()) {
+ E = E->IgnoreParenImpCasts();
+ unsigned Pos;
+ if (isa<CXXThisExpr>(E)) {
+ Pos = ParamPositions[FD];
+ } else {
+ const auto *PVD = cast<ParmVarDecl>(cast<DeclRefExpr>(E)->getDecl())
+ ->getCanonicalDecl();
+ Pos = ParamPositions[PVD];
+ }
+ ParamAttrTy &ParamAttr = ParamAttrs[Pos];
+ ParamAttr.Kind = Linear;
+ if (*SI) {
+ if (!(*SI)->EvaluateAsInt(ParamAttr.StrideOrArg, C,
+ Expr::SE_AllowSideEffects)) {
+ if (const auto *DRE =
+ cast<DeclRefExpr>((*SI)->IgnoreParenImpCasts())) {
+ if (const auto *StridePVD = cast<ParmVarDecl>(DRE->getDecl())) {
+ ParamAttr.Kind = LinearWithVarStride;
+ ParamAttr.StrideOrArg = llvm::APSInt::getUnsigned(
+ ParamPositions[StridePVD->getCanonicalDecl()]);
+ }
}
}
}
+ ++SI;
+ ++MI;
}
- ++SI;
- ++MI;
+ llvm::APSInt VLENVal;
+ if (const Expr *VLEN = Attr->getSimdlen())
+ VLENVal = VLEN->EvaluateKnownConstInt(C);
+ OMPDeclareSimdDeclAttr::BranchStateTy State = Attr->getBranchState();
+ if (CGM.getTriple().getArch() == llvm::Triple::x86 ||
+ CGM.getTriple().getArch() == llvm::Triple::x86_64)
+ emitX86DeclareSimdFunction(FD, Fn, VLENVal, ParamAttrs, State);
}
- llvm::APSInt VLENVal;
- if (const Expr *VLEN = Attr->getSimdlen())
- VLENVal = VLEN->EvaluateKnownConstInt(C);
- OMPDeclareSimdDeclAttr::BranchStateTy State = Attr->getBranchState();
- if (CGM.getTriple().getArch() == llvm::Triple::x86 ||
- CGM.getTriple().getArch() == llvm::Triple::x86_64)
- emitX86DeclareSimdFunction(FD, Fn, VLENVal, ParamAttrs, State);
+ FD = FD->getPreviousDecl();
}
}
@@ -7926,8 +8787,9 @@ void CGOpenMPRuntime::emitDoacrossInit(CodeGenFunction &CGF,
addFieldToRecordDecl(C, RD, Int64Ty);
RD->completeDefinition();
KmpDimTy = C.getRecordType(RD);
- } else
+ } else {
RD = cast<RecordDecl>(KmpDimTy->getAsTagDecl());
+ }
Address DimsAddr = CGF.CreateMemTemp(KmpDimTy, "dims");
CGF.EmitNullInitialization(DimsAddr, KmpDimTy);
@@ -7979,18 +8841,19 @@ void CGOpenMPRuntime::emitDoacrossOrdered(CodeGenFunction &CGF,
getThreadID(CGF, C->getLocStart()),
CntAddr.getPointer()};
llvm::Value *RTLFn;
- if (C->getDependencyKind() == OMPC_DEPEND_source)
+ if (C->getDependencyKind() == OMPC_DEPEND_source) {
RTLFn = createRuntimeFunction(OMPRTL__kmpc_doacross_post);
- else {
+ } else {
assert(C->getDependencyKind() == OMPC_DEPEND_sink);
RTLFn = createRuntimeFunction(OMPRTL__kmpc_doacross_wait);
}
CGF.EmitRuntimeCall(RTLFn, Args);
}
-void CGOpenMPRuntime::emitCall(CodeGenFunction &CGF, llvm::Value *Callee,
- ArrayRef<llvm::Value *> Args,
- SourceLocation Loc) const {
+void CGOpenMPRuntime::emitCall(CodeGenFunction &CGF, SourceLocation Loc,
+ llvm::Value *Callee,
+ ArrayRef<llvm::Value *> Args) const {
+ assert(Loc.isValid() && "Outlined function call location must be valid.");
auto DL = ApplyDebugLocation::CreateDefaultArtificial(CGF, Loc);
if (auto *Fn = dyn_cast<llvm::Function>(Callee)) {
@@ -8005,8 +8868,7 @@ void CGOpenMPRuntime::emitCall(CodeGenFunction &CGF, llvm::Value *Callee,
void CGOpenMPRuntime::emitOutlinedFunctionCall(
CodeGenFunction &CGF, SourceLocation Loc, llvm::Value *OutlinedFn,
ArrayRef<llvm::Value *> Args) const {
- assert(Loc.isValid() && "Outlined function call location must be valid.");
- emitCall(CGF, OutlinedFn, Args, Loc);
+ emitCall(CGF, Loc, OutlinedFn, Args);
}
Address CGOpenMPRuntime::getParameterAddress(CodeGenFunction &CGF,
@@ -8014,3 +8876,303 @@ Address CGOpenMPRuntime::getParameterAddress(CodeGenFunction &CGF,
const VarDecl *TargetParam) const {
return CGF.GetAddrOfLocalVar(NativeParam);
}
+
+Address CGOpenMPRuntime::getAddressOfLocalVariable(CodeGenFunction &CGF,
+ const VarDecl *VD) {
+ return Address::invalid();
+}
+
+llvm::Value *CGOpenMPSIMDRuntime::emitParallelOutlinedFunction(
+ const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
+ OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
+ llvm_unreachable("Not supported in SIMD-only mode");
+}
+
+llvm::Value *CGOpenMPSIMDRuntime::emitTeamsOutlinedFunction(
+ const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
+ OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
+ llvm_unreachable("Not supported in SIMD-only mode");
+}
+
+llvm::Value *CGOpenMPSIMDRuntime::emitTaskOutlinedFunction(
+ const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
+ const VarDecl *PartIDVar, const VarDecl *TaskTVar,
+ OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen,
+ bool Tied, unsigned &NumberOfParts) {
+ llvm_unreachable("Not supported in SIMD-only mode");
+}
+
+void CGOpenMPSIMDRuntime::emitParallelCall(CodeGenFunction &CGF,
+ SourceLocation Loc,
+ llvm::Value *OutlinedFn,
+ ArrayRef<llvm::Value *> CapturedVars,
+ const Expr *IfCond) {
+ llvm_unreachable("Not supported in SIMD-only mode");
+}
+
+void CGOpenMPSIMDRuntime::emitCriticalRegion(
+ CodeGenFunction &CGF, StringRef CriticalName,
+ const RegionCodeGenTy &CriticalOpGen, SourceLocation Loc,
+ const Expr *Hint) {
+ llvm_unreachable("Not supported in SIMD-only mode");
+}
+
+void CGOpenMPSIMDRuntime::emitMasterRegion(CodeGenFunction &CGF,
+ const RegionCodeGenTy &MasterOpGen,
+ SourceLocation Loc) {
+ llvm_unreachable("Not supported in SIMD-only mode");
+}
+
+void CGOpenMPSIMDRuntime::emitTaskyieldCall(CodeGenFunction &CGF,
+ SourceLocation Loc) {
+ llvm_unreachable("Not supported in SIMD-only mode");
+}
+
+void CGOpenMPSIMDRuntime::emitTaskgroupRegion(
+ CodeGenFunction &CGF, const RegionCodeGenTy &TaskgroupOpGen,
+ SourceLocation Loc) {
+ llvm_unreachable("Not supported in SIMD-only mode");
+}
+
+void CGOpenMPSIMDRuntime::emitSingleRegion(
+ CodeGenFunction &CGF, const RegionCodeGenTy &SingleOpGen,
+ SourceLocation Loc, ArrayRef<const Expr *> CopyprivateVars,
+ ArrayRef<const Expr *> DestExprs, ArrayRef<const Expr *> SrcExprs,
+ ArrayRef<const Expr *> AssignmentOps) {
+ llvm_unreachable("Not supported in SIMD-only mode");
+}
+
+void CGOpenMPSIMDRuntime::emitOrderedRegion(CodeGenFunction &CGF,
+ const RegionCodeGenTy &OrderedOpGen,
+ SourceLocation Loc,
+ bool IsThreads) {
+ llvm_unreachable("Not supported in SIMD-only mode");
+}
+
+void CGOpenMPSIMDRuntime::emitBarrierCall(CodeGenFunction &CGF,
+ SourceLocation Loc,
+ OpenMPDirectiveKind Kind,
+ bool EmitChecks,
+ bool ForceSimpleCall) {
+ llvm_unreachable("Not supported in SIMD-only mode");
+}
+
+void CGOpenMPSIMDRuntime::emitForDispatchInit(
+ CodeGenFunction &CGF, SourceLocation Loc,
+ const OpenMPScheduleTy &ScheduleKind, unsigned IVSize, bool IVSigned,
+ bool Ordered, const DispatchRTInput &DispatchValues) {
+ llvm_unreachable("Not supported in SIMD-only mode");
+}
+
+void CGOpenMPSIMDRuntime::emitForStaticInit(
+ CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind DKind,
+ const OpenMPScheduleTy &ScheduleKind, const StaticRTInput &Values) {
+ llvm_unreachable("Not supported in SIMD-only mode");
+}
+
+void CGOpenMPSIMDRuntime::emitDistributeStaticInit(
+ CodeGenFunction &CGF, SourceLocation Loc,
+ OpenMPDistScheduleClauseKind SchedKind, const StaticRTInput &Values) {
+ llvm_unreachable("Not supported in SIMD-only mode");
+}
+
+void CGOpenMPSIMDRuntime::emitForOrderedIterationEnd(CodeGenFunction &CGF,
+ SourceLocation Loc,
+ unsigned IVSize,
+ bool IVSigned) {
+ llvm_unreachable("Not supported in SIMD-only mode");
+}
+
+void CGOpenMPSIMDRuntime::emitForStaticFinish(CodeGenFunction &CGF,
+ SourceLocation Loc,
+ OpenMPDirectiveKind DKind) {
+ llvm_unreachable("Not supported in SIMD-only mode");
+}
+
+llvm::Value *CGOpenMPSIMDRuntime::emitForNext(CodeGenFunction &CGF,
+ SourceLocation Loc,
+ unsigned IVSize, bool IVSigned,
+ Address IL, Address LB,
+ Address UB, Address ST) {
+ llvm_unreachable("Not supported in SIMD-only mode");
+}
+
+void CGOpenMPSIMDRuntime::emitNumThreadsClause(CodeGenFunction &CGF,
+ llvm::Value *NumThreads,
+ SourceLocation Loc) {
+ llvm_unreachable("Not supported in SIMD-only mode");
+}
+
+void CGOpenMPSIMDRuntime::emitProcBindClause(CodeGenFunction &CGF,
+ OpenMPProcBindClauseKind ProcBind,
+ SourceLocation Loc) {
+ llvm_unreachable("Not supported in SIMD-only mode");
+}
+
+Address CGOpenMPSIMDRuntime::getAddrOfThreadPrivate(CodeGenFunction &CGF,
+ const VarDecl *VD,
+ Address VDAddr,
+ SourceLocation Loc) {
+ llvm_unreachable("Not supported in SIMD-only mode");
+}
+
+llvm::Function *CGOpenMPSIMDRuntime::emitThreadPrivateVarDefinition(
+ const VarDecl *VD, Address VDAddr, SourceLocation Loc, bool PerformInit,
+ CodeGenFunction *CGF) {
+ llvm_unreachable("Not supported in SIMD-only mode");
+}
+
+Address CGOpenMPSIMDRuntime::getAddrOfArtificialThreadPrivate(
+ CodeGenFunction &CGF, QualType VarType, StringRef Name) {
+ llvm_unreachable("Not supported in SIMD-only mode");
+}
+
+void CGOpenMPSIMDRuntime::emitFlush(CodeGenFunction &CGF,
+ ArrayRef<const Expr *> Vars,
+ SourceLocation Loc) {
+ llvm_unreachable("Not supported in SIMD-only mode");
+}
+
+void CGOpenMPSIMDRuntime::emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
+ const OMPExecutableDirective &D,
+ llvm::Value *TaskFunction,
+ QualType SharedsTy, Address Shareds,
+ const Expr *IfCond,
+ const OMPTaskDataTy &Data) {
+ llvm_unreachable("Not supported in SIMD-only mode");
+}
+
+void CGOpenMPSIMDRuntime::emitTaskLoopCall(
+ CodeGenFunction &CGF, SourceLocation Loc, const OMPLoopDirective &D,
+ llvm::Value *TaskFunction, QualType SharedsTy, Address Shareds,
+ const Expr *IfCond, const OMPTaskDataTy &Data) {
+ llvm_unreachable("Not supported in SIMD-only mode");
+}
+
+void CGOpenMPSIMDRuntime::emitReduction(
+ CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> Privates,
+ ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs,
+ ArrayRef<const Expr *> ReductionOps, ReductionOptionsTy Options) {
+ assert(Options.SimpleReduction && "Only simple reduction is expected.");
+ CGOpenMPRuntime::emitReduction(CGF, Loc, Privates, LHSExprs, RHSExprs,
+ ReductionOps, Options);
+}
+
+llvm::Value *CGOpenMPSIMDRuntime::emitTaskReductionInit(
+ CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> LHSExprs,
+ ArrayRef<const Expr *> RHSExprs, const OMPTaskDataTy &Data) {
+ llvm_unreachable("Not supported in SIMD-only mode");
+}
+
+void CGOpenMPSIMDRuntime::emitTaskReductionFixups(CodeGenFunction &CGF,
+ SourceLocation Loc,
+ ReductionCodeGen &RCG,
+ unsigned N) {
+ llvm_unreachable("Not supported in SIMD-only mode");
+}
+
+Address CGOpenMPSIMDRuntime::getTaskReductionItem(CodeGenFunction &CGF,
+ SourceLocation Loc,
+ llvm::Value *ReductionsPtr,
+ LValue SharedLVal) {
+ llvm_unreachable("Not supported in SIMD-only mode");
+}
+
+void CGOpenMPSIMDRuntime::emitTaskwaitCall(CodeGenFunction &CGF,
+ SourceLocation Loc) {
+ llvm_unreachable("Not supported in SIMD-only mode");
+}
+
+void CGOpenMPSIMDRuntime::emitCancellationPointCall(
+ CodeGenFunction &CGF, SourceLocation Loc,
+ OpenMPDirectiveKind CancelRegion) {
+ llvm_unreachable("Not supported in SIMD-only mode");
+}
+
+void CGOpenMPSIMDRuntime::emitCancelCall(CodeGenFunction &CGF,
+ SourceLocation Loc, const Expr *IfCond,
+ OpenMPDirectiveKind CancelRegion) {
+ llvm_unreachable("Not supported in SIMD-only mode");
+}
+
+void CGOpenMPSIMDRuntime::emitTargetOutlinedFunction(
+ const OMPExecutableDirective &D, StringRef ParentName,
+ llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID,
+ bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) {
+ llvm_unreachable("Not supported in SIMD-only mode");
+}
+
+void CGOpenMPSIMDRuntime::emitTargetCall(CodeGenFunction &CGF,
+ const OMPExecutableDirective &D,
+ llvm::Value *OutlinedFn,
+ llvm::Value *OutlinedFnID,
+ const Expr *IfCond, const Expr *Device) {
+ llvm_unreachable("Not supported in SIMD-only mode");
+}
+
+bool CGOpenMPSIMDRuntime::emitTargetFunctions(GlobalDecl GD) {
+ llvm_unreachable("Not supported in SIMD-only mode");
+}
+
+bool CGOpenMPSIMDRuntime::emitTargetGlobalVariable(GlobalDecl GD) {
+ llvm_unreachable("Not supported in SIMD-only mode");
+}
+
+bool CGOpenMPSIMDRuntime::emitTargetGlobal(GlobalDecl GD) {
+ return false;
+}
+
+llvm::Function *CGOpenMPSIMDRuntime::emitRegistrationFunction() {
+ return nullptr;
+}
+
+void CGOpenMPSIMDRuntime::emitTeamsCall(CodeGenFunction &CGF,
+ const OMPExecutableDirective &D,
+ SourceLocation Loc,
+ llvm::Value *OutlinedFn,
+ ArrayRef<llvm::Value *> CapturedVars) {
+ llvm_unreachable("Not supported in SIMD-only mode");
+}
+
+void CGOpenMPSIMDRuntime::emitNumTeamsClause(CodeGenFunction &CGF,
+ const Expr *NumTeams,
+ const Expr *ThreadLimit,
+ SourceLocation Loc) {
+ llvm_unreachable("Not supported in SIMD-only mode");
+}
+
+void CGOpenMPSIMDRuntime::emitTargetDataCalls(
+ CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond,
+ const Expr *Device, const RegionCodeGenTy &CodeGen, TargetDataInfo &Info) {
+ llvm_unreachable("Not supported in SIMD-only mode");
+}
+
+void CGOpenMPSIMDRuntime::emitTargetDataStandAloneCall(
+ CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond,
+ const Expr *Device) {
+ llvm_unreachable("Not supported in SIMD-only mode");
+}
+
+void CGOpenMPSIMDRuntime::emitDoacrossInit(CodeGenFunction &CGF,
+ const OMPLoopDirective &D) {
+ llvm_unreachable("Not supported in SIMD-only mode");
+}
+
+void CGOpenMPSIMDRuntime::emitDoacrossOrdered(CodeGenFunction &CGF,
+ const OMPDependClause *C) {
+ llvm_unreachable("Not supported in SIMD-only mode");
+}
+
+const VarDecl *
+CGOpenMPSIMDRuntime::translateParameter(const FieldDecl *FD,
+ const VarDecl *NativeParam) const {
+ llvm_unreachable("Not supported in SIMD-only mode");
+}
+
+Address
+CGOpenMPSIMDRuntime::getParameterAddress(CodeGenFunction &CGF,
+ const VarDecl *NativeParam,
+ const VarDecl *TargetParam) const {
+ llvm_unreachable("Not supported in SIMD-only mode");
+}
+
diff --git a/lib/CodeGen/CGOpenMPRuntime.h b/lib/CodeGen/CGOpenMPRuntime.h
index 94a143841373..01ff0c20fd66 100644
--- a/lib/CodeGen/CGOpenMPRuntime.h
+++ b/lib/CodeGen/CGOpenMPRuntime.h
@@ -133,7 +133,7 @@ private:
/// Base declarations for the reduction items.
SmallVector<const VarDecl *, 4> BaseDecls;
- /// Emits lvalue for shared expresion.
+ /// Emits lvalue for shared expression.
LValue emitSharedLValue(CodeGenFunction &CGF, const Expr *E);
/// Emits upper bound for shared expression (if array section).
LValue emitSharedLValueUB(CodeGenFunction &CGF, const Expr *E);
@@ -191,21 +191,41 @@ public:
}
/// Returns the base declaration of the reduction item.
const VarDecl *getBaseDecl(unsigned N) const { return BaseDecls[N]; }
+ /// Returns the base declaration of the reduction item.
+ const Expr *getRefExpr(unsigned N) const { return ClausesData[N].Ref; }
/// Returns true if the initialization of the reduction item uses initializer
/// from declare reduction construct.
bool usesReductionInitializer(unsigned N) const;
};
class CGOpenMPRuntime {
+public:
+ /// Allows to disable automatic handling of functions used in target regions
+ /// as those marked as `omp declare target`.
+ class DisableAutoDeclareTargetRAII {
+ CodeGenModule &CGM;
+ bool SavedShouldMarkAsGlobal;
+
+ public:
+ DisableAutoDeclareTargetRAII(CodeGenModule &CGM);
+ ~DisableAutoDeclareTargetRAII();
+ };
+
protected:
CodeGenModule &CGM;
+ StringRef FirstSeparator, Separator;
- /// \brief Creates offloading entry for the provided entry ID \a ID,
+ /// Constructor allowing to redefine the name separator for the variables.
+ explicit CGOpenMPRuntime(CodeGenModule &CGM, StringRef FirstSeparator,
+ StringRef Separator);
+
+ /// Creates offloading entry for the provided entry ID \a ID,
/// address \a Addr, size \a Size, and flags \a Flags.
virtual void createOffloadEntry(llvm::Constant *ID, llvm::Constant *Addr,
- uint64_t Size, int32_t Flags = 0);
+ uint64_t Size, int32_t Flags,
+ llvm::GlobalValue::LinkageTypes Linkage);
- /// \brief Helper to emit outlined function for 'target' directive.
+ /// Helper to emit outlined function for 'target' directive.
/// \param D Directive to emit.
/// \param ParentName Name of the function that encloses the target region.
/// \param OutlinedFn Outlined function value to be defined by this call.
@@ -221,7 +241,7 @@ protected:
bool IsOffloadEntry,
const RegionCodeGenTy &CodeGen);
- /// \brief Emits code for OpenMP 'if' clause using specified \a CodeGen
+ /// Emits code for OpenMP 'if' clause using specified \a CodeGen
/// function. Here is the logic:
/// if (Cond) {
/// ThenGen();
@@ -232,52 +252,56 @@ protected:
const RegionCodeGenTy &ThenGen,
const RegionCodeGenTy &ElseGen);
- /// \brief Emits object of ident_t type with info for source location.
+ /// Emits object of ident_t type with info for source location.
/// \param Flags Flags for OpenMP location.
///
llvm::Value *emitUpdateLocation(CodeGenFunction &CGF, SourceLocation Loc,
unsigned Flags = 0);
- /// \brief Returns pointer to ident_t type.
+ /// Returns pointer to ident_t type.
llvm::Type *getIdentTyPointerTy();
- /// \brief Gets thread id value for the current thread.
+ /// Gets thread id value for the current thread.
///
llvm::Value *getThreadID(CodeGenFunction &CGF, SourceLocation Loc);
- /// \brief Get the function name of an outlined region.
+ /// Get the function name of an outlined region.
// The name can be customized depending on the target.
//
virtual StringRef getOutlinedHelperName() const { return ".omp_outlined."; }
/// Emits \p Callee function call with arguments \p Args with location \p Loc.
- void emitCall(CodeGenFunction &CGF, llvm::Value *Callee,
- ArrayRef<llvm::Value *> Args = llvm::None,
- SourceLocation Loc = SourceLocation()) const;
+ void emitCall(CodeGenFunction &CGF, SourceLocation Loc, llvm::Value *Callee,
+ ArrayRef<llvm::Value *> Args = llvm::None) const;
+
+ /// Emits address of the word in a memory where current thread id is
+ /// stored.
+ virtual Address emitThreadIDAddress(CodeGenFunction &CGF, SourceLocation Loc);
private:
- /// \brief Default const ident_t object used for initialization of all other
+ /// Default const ident_t object used for initialization of all other
/// ident_t objects.
llvm::Constant *DefaultOpenMPPSource = nullptr;
- /// \brief Map of flags and corresponding default locations.
+ /// Map of flags and corresponding default locations.
typedef llvm::DenseMap<unsigned, llvm::Value *> OpenMPDefaultLocMapTy;
OpenMPDefaultLocMapTy OpenMPDefaultLocMap;
Address getOrCreateDefaultLocation(unsigned Flags);
+ QualType IdentQTy;
llvm::StructType *IdentTy = nullptr;
- /// \brief Map for SourceLocation and OpenMP runtime library debug locations.
+ /// Map for SourceLocation and OpenMP runtime library debug locations.
typedef llvm::DenseMap<unsigned, llvm::Value *> OpenMPDebugLocMapTy;
OpenMPDebugLocMapTy OpenMPDebugLocMap;
- /// \brief The type for a microtask which gets passed to __kmpc_fork_call().
+ /// The type for a microtask which gets passed to __kmpc_fork_call().
/// Original representation is:
/// typedef void (kmpc_micro)(kmp_int32 global_tid, kmp_int32 bound_tid,...);
llvm::FunctionType *Kmpc_MicroTy = nullptr;
- /// \brief Stores debug location and ThreadID for the function.
+ /// Stores debug location and ThreadID for the function.
struct DebugLocThreadIdTy {
llvm::Value *DebugLoc;
llvm::Value *ThreadID;
};
- /// \brief Map of local debug location, ThreadId and functions.
+ /// Map of local debug location, ThreadId and functions.
typedef llvm::DenseMap<llvm::Function *, DebugLocThreadIdTy>
OpenMPLocThreadIDMapTy;
OpenMPLocThreadIDMapTy OpenMPLocThreadIDMap;
@@ -295,20 +319,20 @@ private:
IdentifierInfo *Out = nullptr;
IdentifierInfo *Priv = nullptr;
IdentifierInfo *Orig = nullptr;
- /// \brief Type kmp_critical_name, originally defined as typedef kmp_int32
+ /// Type kmp_critical_name, originally defined as typedef kmp_int32
/// kmp_critical_name[8];
llvm::ArrayType *KmpCriticalNameTy;
- /// \brief An ordered map of auto-generated variables to their unique names.
+ /// An ordered map of auto-generated variables to their unique names.
/// It stores variables with the following names: 1) ".gomp_critical_user_" +
/// <critical_section_name> + ".var" for "omp critical" directives; 2)
/// <mangled_name_for_global_var> + ".cache." for cache for threadprivate
/// variables.
llvm::StringMap<llvm::AssertingVH<llvm::Constant>, llvm::BumpPtrAllocator>
InternalVars;
- /// \brief Type typedef kmp_int32 (* kmp_routine_entry_t)(kmp_int32, void *);
+ /// Type typedef kmp_int32 (* kmp_routine_entry_t)(kmp_int32, void *);
llvm::Type *KmpRoutineEntryPtrTy = nullptr;
QualType KmpRoutineEntryPtrQTy;
- /// \brief Type typedef struct kmp_task {
+ /// Type typedef struct kmp_task {
/// void * shareds; /**< pointer to block of pointers to
/// shared vars */
/// kmp_routine_entry_t routine; /**< pointer to routine to call for
@@ -322,7 +346,7 @@ private:
QualType SavedKmpTaskTQTy;
/// Saved kmp_task_t for taskloop-based directive.
QualType SavedKmpTaskloopTQTy;
- /// \brief Type typedef struct kmp_depend_info {
+ /// Type typedef struct kmp_depend_info {
/// kmp_intptr_t base_addr;
/// size_t len;
/// struct {
@@ -337,7 +361,7 @@ private:
/// kmp_int64 st; // stride
/// };
QualType KmpDimTy;
- /// \brief Type struct __tgt_offload_entry{
+ /// Type struct __tgt_offload_entry{
/// void *addr; // Pointer to the offload entry info.
/// // (function or global)
/// char *name; // Name of the function or global.
@@ -365,112 +389,195 @@ private:
/// // entries (non inclusive).
/// };
QualType TgtBinaryDescriptorQTy;
- /// \brief Entity that registers the offloading constants that were emitted so
+ /// Entity that registers the offloading constants that were emitted so
/// far.
class OffloadEntriesInfoManagerTy {
CodeGenModule &CGM;
- /// \brief Number of entries registered so far.
- unsigned OffloadingEntriesNum;
+ /// Number of entries registered so far.
+ unsigned OffloadingEntriesNum = 0;
public:
/// Base class of the entries info.
class OffloadEntryInfo {
public:
- /// Kind of a given entry. Currently, only target regions are
- /// supported.
+ /// Kind of a given entry.
enum OffloadingEntryInfoKinds : unsigned {
- // Entry is a target region.
- OFFLOAD_ENTRY_INFO_TARGET_REGION = 0,
- // Invalid entry info.
- OFFLOAD_ENTRY_INFO_INVALID = ~0u
+ /// Entry is a target region.
+ OffloadingEntryInfoTargetRegion = 0,
+ /// Entry is a declare target variable.
+ OffloadingEntryInfoDeviceGlobalVar = 1,
+ /// Invalid entry info.
+ OffloadingEntryInfoInvalid = ~0u
};
- OffloadEntryInfo()
- : Flags(0), Order(~0u), Kind(OFFLOAD_ENTRY_INFO_INVALID) {}
+ protected:
+ OffloadEntryInfo() = delete;
+ explicit OffloadEntryInfo(OffloadingEntryInfoKinds Kind) : Kind(Kind) {}
explicit OffloadEntryInfo(OffloadingEntryInfoKinds Kind, unsigned Order,
- int32_t Flags)
+ uint32_t Flags)
: Flags(Flags), Order(Order), Kind(Kind) {}
+ ~OffloadEntryInfo() = default;
+ public:
bool isValid() const { return Order != ~0u; }
unsigned getOrder() const { return Order; }
OffloadingEntryInfoKinds getKind() const { return Kind; }
- int32_t getFlags() const { return Flags; }
- void setFlags(int32_t NewFlags) { Flags = NewFlags; }
+ uint32_t getFlags() const { return Flags; }
+ void setFlags(uint32_t NewFlags) { Flags = NewFlags; }
+ llvm::Constant *getAddress() const {
+ return cast_or_null<llvm::Constant>(Addr);
+ }
+ void setAddress(llvm::Constant *V) {
+ assert(!Addr.pointsToAliveValue() && "Address has been set before!");
+ Addr = V;
+ }
static bool classof(const OffloadEntryInfo *Info) { return true; }
private:
+ /// Address of the entity that has to be mapped for offloading.
+ llvm::WeakTrackingVH Addr;
+
/// Flags associated with the device global.
- int32_t Flags;
+ uint32_t Flags = 0u;
/// Order this entry was emitted.
- unsigned Order;
+ unsigned Order = ~0u;
- OffloadingEntryInfoKinds Kind;
+ OffloadingEntryInfoKinds Kind = OffloadingEntryInfoInvalid;
};
- /// \brief Return true if a there are no entries defined.
+ /// Return true if a there are no entries defined.
bool empty() const;
- /// \brief Return number of entries defined so far.
+ /// Return number of entries defined so far.
unsigned size() const { return OffloadingEntriesNum; }
- OffloadEntriesInfoManagerTy(CodeGenModule &CGM)
- : CGM(CGM), OffloadingEntriesNum(0) {}
-
- ///
- /// Target region entries related.
- ///
- /// \brief Target region entries info.
- class OffloadEntryInfoTargetRegion : public OffloadEntryInfo {
- // \brief Address of the entity that has to be mapped for offloading.
- llvm::Constant *Addr;
- // \brief Address that can be used as the ID of the entry.
- llvm::Constant *ID;
+ OffloadEntriesInfoManagerTy(CodeGenModule &CGM) : CGM(CGM) {}
+
+ //
+ // Target region entries related.
+ //
+
+ /// Kind of the target registry entry.
+ enum OMPTargetRegionEntryKind : uint32_t {
+ /// Mark the entry as target region.
+ OMPTargetRegionEntryTargetRegion = 0x0,
+ /// Mark the entry as a global constructor.
+ OMPTargetRegionEntryCtor = 0x02,
+ /// Mark the entry as a global destructor.
+ OMPTargetRegionEntryDtor = 0x04,
+ };
+
+ /// Target region entries info.
+ class OffloadEntryInfoTargetRegion final : public OffloadEntryInfo {
+ /// Address that can be used as the ID of the entry.
+ llvm::Constant *ID = nullptr;
public:
OffloadEntryInfoTargetRegion()
- : OffloadEntryInfo(OFFLOAD_ENTRY_INFO_TARGET_REGION, ~0u,
- /*Flags=*/0),
- Addr(nullptr), ID(nullptr) {}
+ : OffloadEntryInfo(OffloadingEntryInfoTargetRegion) {}
explicit OffloadEntryInfoTargetRegion(unsigned Order,
llvm::Constant *Addr,
- llvm::Constant *ID, int32_t Flags)
- : OffloadEntryInfo(OFFLOAD_ENTRY_INFO_TARGET_REGION, Order, Flags),
- Addr(Addr), ID(ID) {}
+ llvm::Constant *ID,
+ OMPTargetRegionEntryKind Flags)
+ : OffloadEntryInfo(OffloadingEntryInfoTargetRegion, Order, Flags),
+ ID(ID) {
+ setAddress(Addr);
+ }
- llvm::Constant *getAddress() const { return Addr; }
llvm::Constant *getID() const { return ID; }
- void setAddress(llvm::Constant *V) {
- assert(!Addr && "Address as been set before!");
- Addr = V;
- }
void setID(llvm::Constant *V) {
- assert(!ID && "ID as been set before!");
+ assert(!ID && "ID has been set before!");
ID = V;
}
static bool classof(const OffloadEntryInfo *Info) {
- return Info->getKind() == OFFLOAD_ENTRY_INFO_TARGET_REGION;
+ return Info->getKind() == OffloadingEntryInfoTargetRegion;
}
};
- /// \brief Initialize target region entry.
+
+ /// Initialize target region entry.
void initializeTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
StringRef ParentName, unsigned LineNum,
unsigned Order);
- /// \brief Register target region entry.
+ /// Register target region entry.
void registerTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
StringRef ParentName, unsigned LineNum,
llvm::Constant *Addr, llvm::Constant *ID,
- int32_t Flags);
- /// \brief Return true if a target region entry with the provided
- /// information exists.
+ OMPTargetRegionEntryKind Flags);
+ /// Return true if a target region entry with the provided information
+ /// exists.
bool hasTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
StringRef ParentName, unsigned LineNum) const;
/// brief Applies action \a Action on all registered entries.
typedef llvm::function_ref<void(unsigned, unsigned, StringRef, unsigned,
- OffloadEntryInfoTargetRegion &)>
+ const OffloadEntryInfoTargetRegion &)>
OffloadTargetRegionEntryInfoActTy;
void actOnTargetRegionEntriesInfo(
const OffloadTargetRegionEntryInfoActTy &Action);
+ //
+ // Device global variable entries related.
+ //
+
+ /// Kind of the global variable entry..
+ enum OMPTargetGlobalVarEntryKind : uint32_t {
+ /// Mark the entry as a to declare target.
+ OMPTargetGlobalVarEntryTo = 0x0,
+ /// Mark the entry as a to declare target link.
+ OMPTargetGlobalVarEntryLink = 0x1,
+ };
+
+ /// Device global variable entries info.
+ class OffloadEntryInfoDeviceGlobalVar final : public OffloadEntryInfo {
+ /// Type of the global variable.
+ CharUnits VarSize;
+ llvm::GlobalValue::LinkageTypes Linkage;
+
+ public:
+ OffloadEntryInfoDeviceGlobalVar()
+ : OffloadEntryInfo(OffloadingEntryInfoDeviceGlobalVar) {}
+ explicit OffloadEntryInfoDeviceGlobalVar(unsigned Order,
+ OMPTargetGlobalVarEntryKind Flags)
+ : OffloadEntryInfo(OffloadingEntryInfoDeviceGlobalVar, Order, Flags) {}
+ explicit OffloadEntryInfoDeviceGlobalVar(
+ unsigned Order, llvm::Constant *Addr, CharUnits VarSize,
+ OMPTargetGlobalVarEntryKind Flags,
+ llvm::GlobalValue::LinkageTypes Linkage)
+ : OffloadEntryInfo(OffloadingEntryInfoDeviceGlobalVar, Order, Flags),
+ VarSize(VarSize), Linkage(Linkage) {
+ setAddress(Addr);
+ }
+
+ CharUnits getVarSize() const { return VarSize; }
+ void setVarSize(CharUnits Size) { VarSize = Size; }
+ llvm::GlobalValue::LinkageTypes getLinkage() const { return Linkage; }
+ void setLinkage(llvm::GlobalValue::LinkageTypes LT) { Linkage = LT; }
+ static bool classof(const OffloadEntryInfo *Info) {
+ return Info->getKind() == OffloadingEntryInfoDeviceGlobalVar;
+ }
+ };
+
+ /// Initialize device global variable entry.
+ void initializeDeviceGlobalVarEntryInfo(StringRef Name,
+ OMPTargetGlobalVarEntryKind Flags,
+ unsigned Order);
+
+ /// Register device global variable entry.
+ void
+ registerDeviceGlobalVarEntryInfo(StringRef VarName, llvm::Constant *Addr,
+ CharUnits VarSize,
+ OMPTargetGlobalVarEntryKind Flags,
+ llvm::GlobalValue::LinkageTypes Linkage);
+ /// Checks if the variable with the given name has been registered already.
+ bool hasDeviceGlobalVarEntryInfo(StringRef VarName) const {
+ return OffloadEntriesDeviceGlobalVar.count(VarName) > 0;
+ }
+ /// Applies action \a Action on all registered entries.
+ typedef llvm::function_ref<void(StringRef,
+ const OffloadEntryInfoDeviceGlobalVar &)>
+ OffloadDeviceGlobalVarEntryInfoActTy;
+ void actOnDeviceGlobalVarEntriesInfo(
+ const OffloadDeviceGlobalVarEntryInfoActTy &Action);
+
private:
// Storage for target region entries kind. The storage is to be indexed by
// file ID, device ID, parent function name and line number.
@@ -484,75 +591,79 @@ private:
OffloadEntriesTargetRegionPerDevice;
typedef OffloadEntriesTargetRegionPerDevice OffloadEntriesTargetRegionTy;
OffloadEntriesTargetRegionTy OffloadEntriesTargetRegion;
+ /// Storage for device global variable entries kind. The storage is to be
+ /// indexed by mangled name.
+ typedef llvm::StringMap<OffloadEntryInfoDeviceGlobalVar>
+ OffloadEntriesDeviceGlobalVarTy;
+ OffloadEntriesDeviceGlobalVarTy OffloadEntriesDeviceGlobalVar;
};
OffloadEntriesInfoManagerTy OffloadEntriesInfoManager;
- /// \brief Creates and registers offloading binary descriptor for the current
+ bool ShouldMarkAsGlobal = true;
+ llvm::SmallDenseSet<const FunctionDecl *> AlreadyEmittedTargetFunctions;
+
+ /// Creates and registers offloading binary descriptor for the current
/// compilation unit. The function that does the registration is returned.
llvm::Function *createOffloadingBinaryDescriptorRegistration();
- /// \brief Creates all the offload entries in the current compilation unit
+ /// Creates all the offload entries in the current compilation unit
/// along with the associated metadata.
void createOffloadEntriesAndInfoMetadata();
- /// \brief Loads all the offload entries information from the host IR
+ /// Loads all the offload entries information from the host IR
/// metadata.
void loadOffloadInfoMetadata();
- /// \brief Returns __tgt_offload_entry type.
+ /// Returns __tgt_offload_entry type.
QualType getTgtOffloadEntryQTy();
- /// \brief Returns __tgt_device_image type.
+ /// Returns __tgt_device_image type.
QualType getTgtDeviceImageQTy();
- /// \brief Returns __tgt_bin_desc type.
+ /// Returns __tgt_bin_desc type.
QualType getTgtBinaryDescriptorQTy();
- /// \brief Start scanning from statement \a S and and emit all target regions
+ /// Start scanning from statement \a S and and emit all target regions
/// found along the way.
/// \param S Starting statement.
/// \param ParentName Name of the function declaration that is being scanned.
void scanForTargetRegionsFunctions(const Stmt *S, StringRef ParentName);
- /// \brief Build type kmp_routine_entry_t (if not built yet).
+ /// Build type kmp_routine_entry_t (if not built yet).
void emitKmpRoutineEntryT(QualType KmpInt32Ty);
- /// \brief Returns pointer to kmpc_micro type.
+ /// Returns pointer to kmpc_micro type.
llvm::Type *getKmpc_MicroPointerTy();
- /// \brief Returns specified OpenMP runtime function.
+ /// Returns specified OpenMP runtime function.
/// \param Function OpenMP runtime function.
/// \return Specified function.
llvm::Constant *createRuntimeFunction(unsigned Function);
- /// \brief Returns __kmpc_for_static_init_* runtime function for the specified
+ /// Returns __kmpc_for_static_init_* runtime function for the specified
/// size \a IVSize and sign \a IVSigned.
llvm::Constant *createForStaticInitFunction(unsigned IVSize, bool IVSigned);
- /// \brief Returns __kmpc_dispatch_init_* runtime function for the specified
+ /// Returns __kmpc_dispatch_init_* runtime function for the specified
/// size \a IVSize and sign \a IVSigned.
llvm::Constant *createDispatchInitFunction(unsigned IVSize, bool IVSigned);
- /// \brief Returns __kmpc_dispatch_next_* runtime function for the specified
+ /// Returns __kmpc_dispatch_next_* runtime function for the specified
/// size \a IVSize and sign \a IVSigned.
llvm::Constant *createDispatchNextFunction(unsigned IVSize, bool IVSigned);
- /// \brief Returns __kmpc_dispatch_fini_* runtime function for the specified
+ /// Returns __kmpc_dispatch_fini_* runtime function for the specified
/// size \a IVSize and sign \a IVSigned.
llvm::Constant *createDispatchFiniFunction(unsigned IVSize, bool IVSigned);
- /// \brief If the specified mangled name is not in the module, create and
+ /// If the specified mangled name is not in the module, create and
/// return threadprivate cache object. This object is a pointer's worth of
/// storage that's reserved for use by the OpenMP runtime.
/// \param VD Threadprivate variable.
/// \return Cache variable for the specified threadprivate.
llvm::Constant *getOrCreateThreadPrivateCache(const VarDecl *VD);
- /// \brief Emits address of the word in a memory where current thread id is
- /// stored.
- virtual Address emitThreadIDAddress(CodeGenFunction &CGF, SourceLocation Loc);
-
- /// \brief Gets (if variable with the given name already exist) or creates
+ /// Gets (if variable with the given name already exist) or creates
/// internal global variable with the specified Name. The created variable has
/// linkage CommonLinkage by default and is initialized by null value.
/// \param Ty Type of the global variable. If it is exist already the type
@@ -561,10 +672,13 @@ private:
llvm::Constant *getOrCreateInternalVariable(llvm::Type *Ty,
const llvm::Twine &Name);
- /// \brief Set of threadprivate variables with the generated initializer.
+ /// Set of threadprivate variables with the generated initializer.
llvm::SmallPtrSet<const VarDecl *, 4> ThreadPrivateWithDefinition;
- /// \brief Emits initialization code for the threadprivate variables.
+ /// Set of declare target variables with the generated initializer.
+ llvm::SmallPtrSet<const VarDecl *, 4> DeclareTargetWithDefinition;
+
+ /// Emits initialization code for the threadprivate variables.
/// \param VDAddr Address of the global variable \a VD.
/// \param Ctor Pointer to a global init function for \a VD.
/// \param CopyCtor Pointer to a global copy function for \a VD.
@@ -574,7 +688,7 @@ private:
llvm::Value *Ctor, llvm::Value *CopyCtor,
llvm::Value *Dtor, SourceLocation Loc);
- /// \brief Returns corresponding lock object for the specified critical region
+ /// Returns corresponding lock object for the specified critical region
/// name. If the lock object does not exist it is created, otherwise the
/// reference to the existing copy is returned.
/// \param CriticalName Name of the critical region.
@@ -586,7 +700,7 @@ private:
llvm::Value *TaskEntry = nullptr;
llvm::Value *NewTaskNewTaskTTy = nullptr;
LValue TDBase;
- RecordDecl *KmpTaskTQTyRD = nullptr;
+ const RecordDecl *KmpTaskTQTyRD = nullptr;
llvm::Value *TaskDupFn = nullptr;
};
/// Emit task region for the task directive. The task region is emitted in
@@ -617,10 +731,14 @@ private:
Address Shareds, const OMPTaskDataTy &Data);
public:
- explicit CGOpenMPRuntime(CodeGenModule &CGM);
+ explicit CGOpenMPRuntime(CodeGenModule &CGM)
+ : CGOpenMPRuntime(CGM, ".", ".") {}
virtual ~CGOpenMPRuntime() {}
virtual void clear();
+ /// Get the platform-specific name separator.
+ std::string getName(ArrayRef<StringRef> Parts) const;
+
/// Emit code for the specified user defined reduction construct.
virtual void emitUserDefinedReduction(CodeGenFunction *CGF,
const OMPDeclareReductionDecl *D);
@@ -628,7 +746,7 @@ public:
virtual std::pair<llvm::Function *, llvm::Function *>
getUserDefinedReduction(const OMPDeclareReductionDecl *D);
- /// \brief Emits outlined function for the specified OpenMP parallel directive
+ /// Emits outlined function for the specified OpenMP parallel directive
/// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID,
/// kmp_int32 BoundID, struct context_vars*).
/// \param D OpenMP directive.
@@ -640,7 +758,7 @@ public:
const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen);
- /// \brief Emits outlined function for the specified OpenMP teams directive
+ /// Emits outlined function for the specified OpenMP teams directive
/// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID,
/// kmp_int32 BoundID, struct context_vars*).
/// \param D OpenMP directive.
@@ -652,7 +770,7 @@ public:
const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen);
- /// \brief Emits outlined function for the OpenMP task directive \a D. This
+ /// Emits outlined function for the OpenMP task directive \a D. This
/// outlined function has type void(*)(kmp_int32 ThreadID, struct task_t*
/// TaskT).
/// \param D OpenMP directive.
@@ -673,11 +791,11 @@ public:
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen,
bool Tied, unsigned &NumberOfParts);
- /// \brief Cleans up references to the objects in finished function.
+ /// Cleans up references to the objects in finished function.
///
- void functionFinished(CodeGenFunction &CGF);
+ virtual void functionFinished(CodeGenFunction &CGF);
- /// \brief Emits code for parallel or serial call of the \a OutlinedFn with
+ /// Emits code for parallel or serial call of the \a OutlinedFn with
/// variables captured in a record which address is stored in \a
/// CapturedStruct.
/// \param OutlinedFn Outlined function to be run in parallel threads. Type of
@@ -692,7 +810,7 @@ public:
ArrayRef<llvm::Value *> CapturedVars,
const Expr *IfCond);
- /// \brief Emits a critical region.
+ /// Emits a critical region.
/// \param CriticalName Name of the critical region.
/// \param CriticalOpGen Generator for the statement associated with the given
/// critical region.
@@ -702,24 +820,24 @@ public:
SourceLocation Loc,
const Expr *Hint = nullptr);
- /// \brief Emits a master region.
+ /// Emits a master region.
/// \param MasterOpGen Generator for the statement associated with the given
/// master region.
virtual void emitMasterRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &MasterOpGen,
SourceLocation Loc);
- /// \brief Emits code for a taskyield directive.
+ /// Emits code for a taskyield directive.
virtual void emitTaskyieldCall(CodeGenFunction &CGF, SourceLocation Loc);
- /// \brief Emit a taskgroup region.
+ /// Emit a taskgroup region.
/// \param TaskgroupOpGen Generator for the statement associated with the
/// given taskgroup region.
virtual void emitTaskgroupRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &TaskgroupOpGen,
SourceLocation Loc);
- /// \brief Emits a single region.
+ /// Emits a single region.
/// \param SingleOpGen Generator for the statement associated with the given
/// single region.
virtual void emitSingleRegion(CodeGenFunction &CGF,
@@ -730,14 +848,14 @@ public:
ArrayRef<const Expr *> SrcExprs,
ArrayRef<const Expr *> AssignmentOps);
- /// \brief Emit an ordered region.
+ /// Emit an ordered region.
/// \param OrderedOpGen Generator for the statement associated with the given
/// ordered region.
virtual void emitOrderedRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &OrderedOpGen,
SourceLocation Loc, bool IsThreads);
- /// \brief Emit an implicit/explicit barrier for OpenMP threads.
+ /// Emit an implicit/explicit barrier for OpenMP threads.
/// \param Kind Directive for which this implicit barrier call must be
/// generated. Must be OMPD_barrier for explicit barrier generation.
/// \param EmitChecks true if need to emit checks for cancellation barriers.
@@ -750,7 +868,7 @@ public:
bool EmitChecks = true,
bool ForceSimpleCall = false);
- /// \brief Check if the specified \a ScheduleKind is static non-chunked.
+ /// Check if the specified \a ScheduleKind is static non-chunked.
/// This kind of worksharing directive is emitted without outer loop.
/// \param ScheduleKind Schedule kind specified in the 'schedule' clause.
/// \param Chunked True if chunk is specified in the clause.
@@ -758,7 +876,7 @@ public:
virtual bool isStaticNonchunked(OpenMPScheduleClauseKind ScheduleKind,
bool Chunked) const;
- /// \brief Check if the specified \a ScheduleKind is static non-chunked.
+ /// Check if the specified \a ScheduleKind is static non-chunked.
/// This kind of distribute directive is emitted without outer loop.
/// \param ScheduleKind Schedule kind specified in the 'dist_schedule' clause.
/// \param Chunked True if chunk is specified in the clause.
@@ -766,7 +884,7 @@ public:
virtual bool isStaticNonchunked(OpenMPDistScheduleClauseKind ScheduleKind,
bool Chunked) const;
- /// \brief Check if the specified \a ScheduleKind is dynamic.
+ /// Check if the specified \a ScheduleKind is dynamic.
/// This kind of worksharing directive is emitted without outer loop.
/// \param ScheduleKind Schedule Kind specified in the 'schedule' clause.
///
@@ -839,7 +957,7 @@ public:
: IVSize(IVSize), IVSigned(IVSigned), Ordered(Ordered), IL(IL), LB(LB),
UB(UB), ST(ST), Chunk(Chunk) {}
};
- /// \brief Call the appropriate runtime routine to initialize it before start
+ /// Call the appropriate runtime routine to initialize it before start
/// of loop.
///
/// This is used only in case of static schedule, when the user did not
@@ -870,7 +988,7 @@ public:
OpenMPDistScheduleClauseKind SchedKind,
const StaticRTInput &Values);
- /// \brief Call the appropriate runtime routine to notify that we finished
+ /// Call the appropriate runtime routine to notify that we finished
/// iteration of the ordered loop with the dynamic scheduling.
///
/// \param CGF Reference to current CodeGenFunction.
@@ -882,7 +1000,7 @@ public:
SourceLocation Loc, unsigned IVSize,
bool IVSigned);
- /// \brief Call the appropriate runtime routine to notify that we finished
+ /// Call the appropriate runtime routine to notify that we finished
/// all the work with current loop.
///
/// \param CGF Reference to current CodeGenFunction.
@@ -911,7 +1029,7 @@ public:
Address IL, Address LB,
Address UB, Address ST);
- /// \brief Emits call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32
+ /// Emits call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32
/// global_tid, kmp_int32 num_threads) to generate code for 'num_threads'
/// clause.
/// \param NumThreads An integer value of threads.
@@ -919,13 +1037,13 @@ public:
llvm::Value *NumThreads,
SourceLocation Loc);
- /// \brief Emit call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32
+ /// Emit call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32
/// global_tid, int proc_bind) to generate code for 'proc_bind' clause.
virtual void emitProcBindClause(CodeGenFunction &CGF,
OpenMPProcBindClauseKind ProcBind,
SourceLocation Loc);
- /// \brief Returns address of the threadprivate variable for the current
+ /// Returns address of the threadprivate variable for the current
/// thread.
/// \param VD Threadprivate variable.
/// \param VDAddr Address of the global variable \a VD.
@@ -936,7 +1054,11 @@ public:
Address VDAddr,
SourceLocation Loc);
- /// \brief Emit a code for initialization of threadprivate variable. It emits
+ /// Returns the address of the variable marked as declare target with link
+ /// clause.
+ virtual Address getAddrOfDeclareTargetLink(const VarDecl *VD);
+
+ /// Emit a code for initialization of threadprivate variable. It emits
/// a call to runtime library which adds initial value to the newly created
/// threadprivate variable (if it is not constant) and registers destructor
/// for the variable (if any).
@@ -949,6 +1071,14 @@ public:
SourceLocation Loc, bool PerformInit,
CodeGenFunction *CGF = nullptr);
+ /// Emit a code for initialization of declare target variable.
+ /// \param VD Declare target variable.
+ /// \param Addr Address of the global variable \a VD.
+ /// \param PerformInit true if initialization expression is not constant.
+ virtual bool emitDeclareTargetVarDefinition(const VarDecl *VD,
+ llvm::GlobalVariable *Addr,
+ bool PerformInit);
+
/// Creates artificial threadprivate variable with name \p Name and type \p
/// VarType.
/// \param VarType Type of the artificial threadprivate variable.
@@ -957,12 +1087,12 @@ public:
QualType VarType,
StringRef Name);
- /// \brief Emit flush of the variables specified in 'omp flush' directive.
+ /// Emit flush of the variables specified in 'omp flush' directive.
/// \param Vars List of variables to flush.
virtual void emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *> Vars,
SourceLocation Loc);
- /// \brief Emit task region for the task directive. The task region is
+ /// Emit task region for the task directive. The task region is
/// emitted in several steps:
/// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32
/// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
@@ -1029,7 +1159,7 @@ public:
llvm::Value *TaskFunction, QualType SharedsTy, Address Shareds,
const Expr *IfCond, const OMPTaskDataTy &Data);
- /// \brief Emit code for the directive that does not require outlining.
+ /// Emit code for the directive that does not require outlining.
///
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
@@ -1048,7 +1178,8 @@ public:
/// \param RHSExprs List of RHS in \a ReductionOps reduction operations.
/// \param ReductionOps List of reduction operations in form 'LHS binop RHS'
/// or 'operator binop(LHS, RHS)'.
- llvm::Value *emitReductionFunction(CodeGenModule &CGM, llvm::Type *ArgsType,
+ llvm::Value *emitReductionFunction(CodeGenModule &CGM, SourceLocation Loc,
+ llvm::Type *ArgsType,
ArrayRef<const Expr *> Privates,
ArrayRef<const Expr *> LHSExprs,
ArrayRef<const Expr *> RHSExprs,
@@ -1066,7 +1197,7 @@ public:
bool SimpleReduction;
OpenMPDirectiveKind ReductionKind;
};
- /// \brief Emit a code for reduction clause. Next code should be emitted for
+ /// Emit a code for reduction clause. Next code should be emitted for
/// reduction:
/// \code
///
@@ -1160,10 +1291,10 @@ public:
llvm::Value *ReductionsPtr,
LValue SharedLVal);
- /// \brief Emit code for 'taskwait' directive.
+ /// Emit code for 'taskwait' directive.
virtual void emitTaskwaitCall(CodeGenFunction &CGF, SourceLocation Loc);
- /// \brief Emit code for 'cancellation point' construct.
+ /// Emit code for 'cancellation point' construct.
/// \param CancelRegion Region kind for which the cancellation point must be
/// emitted.
///
@@ -1171,7 +1302,7 @@ public:
SourceLocation Loc,
OpenMPDirectiveKind CancelRegion);
- /// \brief Emit code for 'cancel' construct.
+ /// Emit code for 'cancel' construct.
/// \param IfCond Condition in the associated 'if' clause, if it was
/// specified, nullptr otherwise.
/// \param CancelRegion Region kind for which the cancel must be emitted.
@@ -1180,7 +1311,7 @@ public:
const Expr *IfCond,
OpenMPDirectiveKind CancelRegion);
- /// \brief Emit outilined function for 'target' directive.
+ /// Emit outilined function for 'target' directive.
/// \param D Directive to emit.
/// \param ParentName Name of the function that encloses the target region.
/// \param OutlinedFn Outlined function value to be defined by this call.
@@ -1196,7 +1327,7 @@ public:
bool IsOffloadEntry,
const RegionCodeGenTy &CodeGen);
- /// \brief Emit the target offloading code associated with \a D. The emitted
+ /// Emit the target offloading code associated with \a D. The emitted
/// code attempts offloading the execution to the device, an the event of
/// a failure it executes the host version outlined in \a OutlinedFn.
/// \param D Directive to emit.
@@ -1206,36 +1337,39 @@ public:
/// directive, or null if no if clause is used.
/// \param Device Expression evaluated in device clause associated with the
/// target directive, or null if no device clause is used.
- /// \param CapturedVars Values captured in the current region.
virtual void emitTargetCall(CodeGenFunction &CGF,
const OMPExecutableDirective &D,
llvm::Value *OutlinedFn,
llvm::Value *OutlinedFnID, const Expr *IfCond,
- const Expr *Device,
- ArrayRef<llvm::Value *> CapturedVars);
+ const Expr *Device);
- /// \brief Emit the target regions enclosed in \a GD function definition or
+ /// Emit the target regions enclosed in \a GD function definition or
/// the function itself in case it is a valid device function. Returns true if
/// \a GD was dealt with successfully.
/// \param GD Function to scan.
virtual bool emitTargetFunctions(GlobalDecl GD);
- /// \brief Emit the global variable if it is a valid device global variable.
+ /// Emit the global variable if it is a valid device global variable.
/// Returns true if \a GD was dealt with successfully.
/// \param GD Variable declaration to emit.
virtual bool emitTargetGlobalVariable(GlobalDecl GD);
- /// \brief Emit the global \a GD if it is meaningful for the target. Returns
+ /// Checks if the provided global decl \a GD is a declare target variable and
+ /// registers it when emitting code for the host.
+ virtual void registerTargetGlobalVariable(const VarDecl *VD,
+ llvm::Constant *Addr);
+
+ /// Emit the global \a GD if it is meaningful for the target. Returns
/// if it was emitted successfully.
/// \param GD Global to scan.
virtual bool emitTargetGlobal(GlobalDecl GD);
- /// \brief Creates the offloading descriptor in the event any target region
+ /// Creates the offloading descriptor in the event any target region
/// was emitted in the current module and return the function that registers
/// it.
virtual llvm::Function *emitRegistrationFunction();
- /// \brief Emits code for teams call of the \a OutlinedFn with
+ /// Emits code for teams call of the \a OutlinedFn with
/// variables captured in a record which address is stored in \a
/// CapturedStruct.
/// \param OutlinedFn Outlined function to be run by team masters. Type of
@@ -1248,7 +1382,7 @@ public:
SourceLocation Loc, llvm::Value *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars);
- /// \brief Emits call to void __kmpc_push_num_teams(ident_t *loc, kmp_int32
+ /// Emits call to void __kmpc_push_num_teams(ident_t *loc, kmp_int32
/// global_tid, kmp_int32 num_teams, kmp_int32 thread_limit) to generate code
/// for num_teams clause.
/// \param NumTeams An integer expression of teams.
@@ -1296,7 +1430,7 @@ public:
bool requiresDevicePointerInfo() { return RequiresDevicePointerInfo; }
};
- /// \brief Emit the target data mapping code associated with \a D.
+ /// Emit the target data mapping code associated with \a D.
/// \param D Directive to emit.
/// \param IfCond Expression evaluated in if clause associated with the
/// target directive, or null if no device clause is used.
@@ -1310,7 +1444,7 @@ public:
const RegionCodeGenTy &CodeGen,
TargetDataInfo &Info);
- /// \brief Emit the data mapping/movement code associated with the directive
+ /// Emit the data mapping/movement code associated with the directive
/// \a D that should be of the form 'target [{enter|exit} data | update]'.
/// \param D Directive to emit.
/// \param IfCond Expression evaluated in if clause associated with the target
@@ -1341,7 +1475,7 @@ public:
/// Translates the native parameter of outlined function if this is required
/// for target.
- /// \param FD Field decl from captured record for the paramater.
+ /// \param FD Field decl from captured record for the parameter.
/// \param NativeParam Parameter itself.
virtual const VarDecl *translateParameter(const FieldDecl *FD,
const VarDecl *NativeParam) const {
@@ -1362,6 +1496,582 @@ public:
emitOutlinedFunctionCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Value *OutlinedFn,
ArrayRef<llvm::Value *> Args = llvm::None) const;
+
+ /// Emits OpenMP-specific function prolog.
+ /// Required for device constructs.
+ virtual void emitFunctionProlog(CodeGenFunction &CGF, const Decl *D) {}
+
+ /// Gets the OpenMP-specific address of the local variable.
+ virtual Address getAddressOfLocalVariable(CodeGenFunction &CGF,
+ const VarDecl *VD);
+
+ /// Marks the declaration as alread emitted for the device code and returns
+ /// true, if it was marked already, and false, otherwise.
+ bool markAsGlobalTarget(GlobalDecl GD);
+
+};
+
+/// Class supports emissionof SIMD-only code.
+class CGOpenMPSIMDRuntime final : public CGOpenMPRuntime {
+public:
+ explicit CGOpenMPSIMDRuntime(CodeGenModule &CGM) : CGOpenMPRuntime(CGM) {}
+ ~CGOpenMPSIMDRuntime() override {}
+
+ /// Emits outlined function for the specified OpenMP parallel directive
+ /// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID,
+ /// kmp_int32 BoundID, struct context_vars*).
+ /// \param D OpenMP directive.
+ /// \param ThreadIDVar Variable for thread id in the current OpenMP region.
+ /// \param InnermostKind Kind of innermost directive (for simple directives it
+ /// is a directive itself, for combined - its innermost directive).
+ /// \param CodeGen Code generation sequence for the \a D directive.
+ llvm::Value *
+ emitParallelOutlinedFunction(const OMPExecutableDirective &D,
+ const VarDecl *ThreadIDVar,
+ OpenMPDirectiveKind InnermostKind,
+ const RegionCodeGenTy &CodeGen) override;
+
+ /// Emits outlined function for the specified OpenMP teams directive
+ /// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID,
+ /// kmp_int32 BoundID, struct context_vars*).
+ /// \param D OpenMP directive.
+ /// \param ThreadIDVar Variable for thread id in the current OpenMP region.
+ /// \param InnermostKind Kind of innermost directive (for simple directives it
+ /// is a directive itself, for combined - its innermost directive).
+ /// \param CodeGen Code generation sequence for the \a D directive.
+ llvm::Value *
+ emitTeamsOutlinedFunction(const OMPExecutableDirective &D,
+ const VarDecl *ThreadIDVar,
+ OpenMPDirectiveKind InnermostKind,
+ const RegionCodeGenTy &CodeGen) override;
+
+ /// Emits outlined function for the OpenMP task directive \a D. This
+ /// outlined function has type void(*)(kmp_int32 ThreadID, struct task_t*
+ /// TaskT).
+ /// \param D OpenMP directive.
+ /// \param ThreadIDVar Variable for thread id in the current OpenMP region.
+ /// \param PartIDVar Variable for partition id in the current OpenMP untied
+ /// task region.
+ /// \param TaskTVar Variable for task_t argument.
+ /// \param InnermostKind Kind of innermost directive (for simple directives it
+ /// is a directive itself, for combined - its innermost directive).
+ /// \param CodeGen Code generation sequence for the \a D directive.
+ /// \param Tied true if task is generated for tied task, false otherwise.
+ /// \param NumberOfParts Number of parts in untied task. Ignored for tied
+ /// tasks.
+ ///
+ llvm::Value *emitTaskOutlinedFunction(
+ const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
+ const VarDecl *PartIDVar, const VarDecl *TaskTVar,
+ OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen,
+ bool Tied, unsigned &NumberOfParts) override;
+
+ /// Emits code for parallel or serial call of the \a OutlinedFn with
+ /// variables captured in a record which address is stored in \a
+ /// CapturedStruct.
+ /// \param OutlinedFn Outlined function to be run in parallel threads. Type of
+ /// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*).
+ /// \param CapturedVars A pointer to the record with the references to
+ /// variables used in \a OutlinedFn function.
+ /// \param IfCond Condition in the associated 'if' clause, if it was
+ /// specified, nullptr otherwise.
+ ///
+ void emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
+ llvm::Value *OutlinedFn,
+ ArrayRef<llvm::Value *> CapturedVars,
+ const Expr *IfCond) override;
+
+ /// Emits a critical region.
+ /// \param CriticalName Name of the critical region.
+ /// \param CriticalOpGen Generator for the statement associated with the given
+ /// critical region.
+ /// \param Hint Value of the 'hint' clause (optional).
+ void emitCriticalRegion(CodeGenFunction &CGF, StringRef CriticalName,
+ const RegionCodeGenTy &CriticalOpGen,
+ SourceLocation Loc,
+ const Expr *Hint = nullptr) override;
+
+ /// Emits a master region.
+ /// \param MasterOpGen Generator for the statement associated with the given
+ /// master region.
+ void emitMasterRegion(CodeGenFunction &CGF,
+ const RegionCodeGenTy &MasterOpGen,
+ SourceLocation Loc) override;
+
+ /// Emits code for a taskyield directive.
+ void emitTaskyieldCall(CodeGenFunction &CGF, SourceLocation Loc) override;
+
+ /// Emit a taskgroup region.
+ /// \param TaskgroupOpGen Generator for the statement associated with the
+ /// given taskgroup region.
+ void emitTaskgroupRegion(CodeGenFunction &CGF,
+ const RegionCodeGenTy &TaskgroupOpGen,
+ SourceLocation Loc) override;
+
+ /// Emits a single region.
+ /// \param SingleOpGen Generator for the statement associated with the given
+ /// single region.
+ void emitSingleRegion(CodeGenFunction &CGF,
+ const RegionCodeGenTy &SingleOpGen, SourceLocation Loc,
+ ArrayRef<const Expr *> CopyprivateVars,
+ ArrayRef<const Expr *> DestExprs,
+ ArrayRef<const Expr *> SrcExprs,
+ ArrayRef<const Expr *> AssignmentOps) override;
+
+ /// Emit an ordered region.
+ /// \param OrderedOpGen Generator for the statement associated with the given
+ /// ordered region.
+ void emitOrderedRegion(CodeGenFunction &CGF,
+ const RegionCodeGenTy &OrderedOpGen,
+ SourceLocation Loc, bool IsThreads) override;
+
+ /// Emit an implicit/explicit barrier for OpenMP threads.
+ /// \param Kind Directive for which this implicit barrier call must be
+ /// generated. Must be OMPD_barrier for explicit barrier generation.
+ /// \param EmitChecks true if need to emit checks for cancellation barriers.
+ /// \param ForceSimpleCall true simple barrier call must be emitted, false if
+ /// runtime class decides which one to emit (simple or with cancellation
+ /// checks).
+ ///
+ void emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc,
+ OpenMPDirectiveKind Kind, bool EmitChecks = true,
+ bool ForceSimpleCall = false) override;
+
+ /// This is used for non static scheduled types and when the ordered
+ /// clause is present on the loop construct.
+ /// Depending on the loop schedule, it is necessary to call some runtime
+ /// routine before start of the OpenMP loop to get the loop upper / lower
+ /// bounds \a LB and \a UB and stride \a ST.
+ ///
+ /// \param CGF Reference to current CodeGenFunction.
+ /// \param Loc Clang source location.
+ /// \param ScheduleKind Schedule kind, specified by the 'schedule' clause.
+ /// \param IVSize Size of the iteration variable in bits.
+ /// \param IVSigned Sign of the iteration variable.
+ /// \param Ordered true if loop is ordered, false otherwise.
+ /// \param DispatchValues struct containing llvm values for lower bound, upper
+ /// bound, and chunk expression.
+ /// For the default (nullptr) value, the chunk 1 will be used.
+ ///
+ void emitForDispatchInit(CodeGenFunction &CGF, SourceLocation Loc,
+ const OpenMPScheduleTy &ScheduleKind,
+ unsigned IVSize, bool IVSigned, bool Ordered,
+ const DispatchRTInput &DispatchValues) override;
+
+ /// Call the appropriate runtime routine to initialize it before start
+ /// of loop.
+ ///
+ /// This is used only in case of static schedule, when the user did not
+ /// specify a ordered clause on the loop construct.
+ /// Depending on the loop schedule, it is necessary to call some runtime
+ /// routine before start of the OpenMP loop to get the loop upper / lower
+ /// bounds LB and UB and stride ST.
+ ///
+ /// \param CGF Reference to current CodeGenFunction.
+ /// \param Loc Clang source location.
+ /// \param DKind Kind of the directive.
+ /// \param ScheduleKind Schedule kind, specified by the 'schedule' clause.
+ /// \param Values Input arguments for the construct.
+ ///
+ void emitForStaticInit(CodeGenFunction &CGF, SourceLocation Loc,
+ OpenMPDirectiveKind DKind,
+ const OpenMPScheduleTy &ScheduleKind,
+ const StaticRTInput &Values) override;
+
+ ///
+ /// \param CGF Reference to current CodeGenFunction.
+ /// \param Loc Clang source location.
+ /// \param SchedKind Schedule kind, specified by the 'dist_schedule' clause.
+ /// \param Values Input arguments for the construct.
+ ///
+ void emitDistributeStaticInit(CodeGenFunction &CGF, SourceLocation Loc,
+ OpenMPDistScheduleClauseKind SchedKind,
+ const StaticRTInput &Values) override;
+
+ /// Call the appropriate runtime routine to notify that we finished
+ /// iteration of the ordered loop with the dynamic scheduling.
+ ///
+ /// \param CGF Reference to current CodeGenFunction.
+ /// \param Loc Clang source location.
+ /// \param IVSize Size of the iteration variable in bits.
+ /// \param IVSigned Sign of the iteration variable.
+ ///
+ void emitForOrderedIterationEnd(CodeGenFunction &CGF, SourceLocation Loc,
+ unsigned IVSize, bool IVSigned) override;
+
+ /// Call the appropriate runtime routine to notify that we finished
+ /// all the work with current loop.
+ ///
+ /// \param CGF Reference to current CodeGenFunction.
+ /// \param Loc Clang source location.
+ /// \param DKind Kind of the directive for which the static finish is emitted.
+ ///
+ void emitForStaticFinish(CodeGenFunction &CGF, SourceLocation Loc,
+ OpenMPDirectiveKind DKind) override;
+
+ /// Call __kmpc_dispatch_next(
+ /// ident_t *loc, kmp_int32 tid, kmp_int32 *p_lastiter,
+ /// kmp_int[32|64] *p_lower, kmp_int[32|64] *p_upper,
+ /// kmp_int[32|64] *p_stride);
+ /// \param IVSize Size of the iteration variable in bits.
+ /// \param IVSigned Sign of the iteration variable.
+ /// \param IL Address of the output variable in which the flag of the
+ /// last iteration is returned.
+ /// \param LB Address of the output variable in which the lower iteration
+ /// number is returned.
+ /// \param UB Address of the output variable in which the upper iteration
+ /// number is returned.
+ /// \param ST Address of the output variable in which the stride value is
+ /// returned.
+ llvm::Value *emitForNext(CodeGenFunction &CGF, SourceLocation Loc,
+ unsigned IVSize, bool IVSigned, Address IL,
+ Address LB, Address UB, Address ST) override;
+
+ /// Emits call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32
+ /// global_tid, kmp_int32 num_threads) to generate code for 'num_threads'
+ /// clause.
+ /// \param NumThreads An integer value of threads.
+ void emitNumThreadsClause(CodeGenFunction &CGF, llvm::Value *NumThreads,
+ SourceLocation Loc) override;
+
+ /// Emit call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32
+ /// global_tid, int proc_bind) to generate code for 'proc_bind' clause.
+ void emitProcBindClause(CodeGenFunction &CGF,
+ OpenMPProcBindClauseKind ProcBind,
+ SourceLocation Loc) override;
+
+ /// Returns address of the threadprivate variable for the current
+ /// thread.
+ /// \param VD Threadprivate variable.
+ /// \param VDAddr Address of the global variable \a VD.
+ /// \param Loc Location of the reference to threadprivate var.
+ /// \return Address of the threadprivate variable for the current thread.
+ Address getAddrOfThreadPrivate(CodeGenFunction &CGF, const VarDecl *VD,
+ Address VDAddr, SourceLocation Loc) override;
+
+ /// Emit a code for initialization of threadprivate variable. It emits
+ /// a call to runtime library which adds initial value to the newly created
+ /// threadprivate variable (if it is not constant) and registers destructor
+ /// for the variable (if any).
+ /// \param VD Threadprivate variable.
+ /// \param VDAddr Address of the global variable \a VD.
+ /// \param Loc Location of threadprivate declaration.
+ /// \param PerformInit true if initialization expression is not constant.
+ llvm::Function *
+ emitThreadPrivateVarDefinition(const VarDecl *VD, Address VDAddr,
+ SourceLocation Loc, bool PerformInit,
+ CodeGenFunction *CGF = nullptr) override;
+
+ /// Creates artificial threadprivate variable with name \p Name and type \p
+ /// VarType.
+ /// \param VarType Type of the artificial threadprivate variable.
+ /// \param Name Name of the artificial threadprivate variable.
+ Address getAddrOfArtificialThreadPrivate(CodeGenFunction &CGF,
+ QualType VarType,
+ StringRef Name) override;
+
+ /// Emit flush of the variables specified in 'omp flush' directive.
+ /// \param Vars List of variables to flush.
+ void emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *> Vars,
+ SourceLocation Loc) override;
+
+ /// Emit task region for the task directive. The task region is
+ /// emitted in several steps:
+ /// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32
+ /// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
+ /// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the
+ /// function:
+ /// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
+ /// TaskFunction(gtid, tt->part_id, tt->shareds);
+ /// return 0;
+ /// }
+ /// 2. Copy a list of shared variables to field shareds of the resulting
+ /// structure kmp_task_t returned by the previous call (if any).
+ /// 3. Copy a pointer to destructions function to field destructions of the
+ /// resulting structure kmp_task_t.
+ /// 4. Emit a call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid,
+ /// kmp_task_t *new_task), where new_task is a resulting structure from
+ /// previous items.
+ /// \param D Current task directive.
+ /// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32
+ /// /*part_id*/, captured_struct */*__context*/);
+ /// \param SharedsTy A type which contains references the shared variables.
+ /// \param Shareds Context with the list of shared variables from the \p
+ /// TaskFunction.
+ /// \param IfCond Not a nullptr if 'if' clause was specified, nullptr
+ /// otherwise.
+ /// \param Data Additional data for task generation like tiednsee, final
+ /// state, list of privates etc.
+ void emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
+ const OMPExecutableDirective &D, llvm::Value *TaskFunction,
+ QualType SharedsTy, Address Shareds, const Expr *IfCond,
+ const OMPTaskDataTy &Data) override;
+
+ /// Emit task region for the taskloop directive. The taskloop region is
+ /// emitted in several steps:
+ /// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32
+ /// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
+ /// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the
+ /// function:
+ /// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
+ /// TaskFunction(gtid, tt->part_id, tt->shareds);
+ /// return 0;
+ /// }
+ /// 2. Copy a list of shared variables to field shareds of the resulting
+ /// structure kmp_task_t returned by the previous call (if any).
+ /// 3. Copy a pointer to destructions function to field destructions of the
+ /// resulting structure kmp_task_t.
+ /// 4. Emit a call to void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t
+ /// *task, int if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int
+ /// nogroup, int sched, kmp_uint64 grainsize, void *task_dup ), where new_task
+ /// is a resulting structure from
+ /// previous items.
+ /// \param D Current task directive.
+ /// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32
+ /// /*part_id*/, captured_struct */*__context*/);
+ /// \param SharedsTy A type which contains references the shared variables.
+ /// \param Shareds Context with the list of shared variables from the \p
+ /// TaskFunction.
+ /// \param IfCond Not a nullptr if 'if' clause was specified, nullptr
+ /// otherwise.
+ /// \param Data Additional data for task generation like tiednsee, final
+ /// state, list of privates etc.
+ void emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc,
+ const OMPLoopDirective &D, llvm::Value *TaskFunction,
+ QualType SharedsTy, Address Shareds, const Expr *IfCond,
+ const OMPTaskDataTy &Data) override;
+
+ /// Emit a code for reduction clause. Next code should be emitted for
+ /// reduction:
+ /// \code
+ ///
+ /// static kmp_critical_name lock = { 0 };
+ ///
+ /// void reduce_func(void *lhs[<n>], void *rhs[<n>]) {
+ /// ...
+ /// *(Type<i>*)lhs[i] = RedOp<i>(*(Type<i>*)lhs[i], *(Type<i>*)rhs[i]);
+ /// ...
+ /// }
+ ///
+ /// ...
+ /// void *RedList[<n>] = {&<RHSExprs>[0], ..., &<RHSExprs>[<n>-1]};
+ /// switch (__kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList),
+ /// RedList, reduce_func, &<lock>)) {
+ /// case 1:
+ /// ...
+ /// <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]);
+ /// ...
+ /// __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>);
+ /// break;
+ /// case 2:
+ /// ...
+ /// Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]));
+ /// ...
+ /// break;
+ /// default:;
+ /// }
+ /// \endcode
+ ///
+ /// \param Privates List of private copies for original reduction arguments.
+ /// \param LHSExprs List of LHS in \a ReductionOps reduction operations.
+ /// \param RHSExprs List of RHS in \a ReductionOps reduction operations.
+ /// \param ReductionOps List of reduction operations in form 'LHS binop RHS'
+ /// or 'operator binop(LHS, RHS)'.
+ /// \param Options List of options for reduction codegen:
+ /// WithNowait true if parent directive has also nowait clause, false
+ /// otherwise.
+ /// SimpleReduction Emit reduction operation only. Used for omp simd
+ /// directive on the host.
+ /// ReductionKind The kind of reduction to perform.
+ void emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
+ ArrayRef<const Expr *> Privates,
+ ArrayRef<const Expr *> LHSExprs,
+ ArrayRef<const Expr *> RHSExprs,
+ ArrayRef<const Expr *> ReductionOps,
+ ReductionOptionsTy Options) override;
+
+ /// Emit a code for initialization of task reduction clause. Next code
+ /// should be emitted for reduction:
+ /// \code
+ ///
+ /// _task_red_item_t red_data[n];
+ /// ...
+ /// red_data[i].shar = &origs[i];
+ /// red_data[i].size = sizeof(origs[i]);
+ /// red_data[i].f_init = (void*)RedInit<i>;
+ /// red_data[i].f_fini = (void*)RedDest<i>;
+ /// red_data[i].f_comb = (void*)RedOp<i>;
+ /// red_data[i].flags = <Flag_i>;
+ /// ...
+ /// void* tg1 = __kmpc_task_reduction_init(gtid, n, red_data);
+ /// \endcode
+ ///
+ /// \param LHSExprs List of LHS in \a Data.ReductionOps reduction operations.
+ /// \param RHSExprs List of RHS in \a Data.ReductionOps reduction operations.
+ /// \param Data Additional data for task generation like tiedness, final
+ /// state, list of privates, reductions etc.
+ llvm::Value *emitTaskReductionInit(CodeGenFunction &CGF, SourceLocation Loc,
+ ArrayRef<const Expr *> LHSExprs,
+ ArrayRef<const Expr *> RHSExprs,
+ const OMPTaskDataTy &Data) override;
+
+ /// Required to resolve existing problems in the runtime. Emits threadprivate
+ /// variables to store the size of the VLAs/array sections for
+ /// initializer/combiner/finalizer functions + emits threadprivate variable to
+ /// store the pointer to the original reduction item for the custom
+ /// initializer defined by declare reduction construct.
+ /// \param RCG Allows to reuse an existing data for the reductions.
+ /// \param N Reduction item for which fixups must be emitted.
+ void emitTaskReductionFixups(CodeGenFunction &CGF, SourceLocation Loc,
+ ReductionCodeGen &RCG, unsigned N) override;
+
+ /// Get the address of `void *` type of the privatue copy of the reduction
+ /// item specified by the \p SharedLVal.
+ /// \param ReductionsPtr Pointer to the reduction data returned by the
+ /// emitTaskReductionInit function.
+ /// \param SharedLVal Address of the original reduction item.
+ Address getTaskReductionItem(CodeGenFunction &CGF, SourceLocation Loc,
+ llvm::Value *ReductionsPtr,
+ LValue SharedLVal) override;
+
+ /// Emit code for 'taskwait' directive.
+ void emitTaskwaitCall(CodeGenFunction &CGF, SourceLocation Loc) override;
+
+ /// Emit code for 'cancellation point' construct.
+ /// \param CancelRegion Region kind for which the cancellation point must be
+ /// emitted.
+ ///
+ void emitCancellationPointCall(CodeGenFunction &CGF, SourceLocation Loc,
+ OpenMPDirectiveKind CancelRegion) override;
+
+ /// Emit code for 'cancel' construct.
+ /// \param IfCond Condition in the associated 'if' clause, if it was
+ /// specified, nullptr otherwise.
+ /// \param CancelRegion Region kind for which the cancel must be emitted.
+ ///
+ void emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc,
+ const Expr *IfCond,
+ OpenMPDirectiveKind CancelRegion) override;
+
+ /// Emit outilined function for 'target' directive.
+ /// \param D Directive to emit.
+ /// \param ParentName Name of the function that encloses the target region.
+ /// \param OutlinedFn Outlined function value to be defined by this call.
+ /// \param OutlinedFnID Outlined function ID value to be defined by this call.
+ /// \param IsOffloadEntry True if the outlined function is an offload entry.
+ /// \param CodeGen Code generation sequence for the \a D directive.
+ /// An outlined function may not be an entry if, e.g. the if clause always
+ /// evaluates to false.
+ void emitTargetOutlinedFunction(const OMPExecutableDirective &D,
+ StringRef ParentName,
+ llvm::Function *&OutlinedFn,
+ llvm::Constant *&OutlinedFnID,
+ bool IsOffloadEntry,
+ const RegionCodeGenTy &CodeGen) override;
+
+ /// Emit the target offloading code associated with \a D. The emitted
+ /// code attempts offloading the execution to the device, an the event of
+ /// a failure it executes the host version outlined in \a OutlinedFn.
+ /// \param D Directive to emit.
+ /// \param OutlinedFn Host version of the code to be offloaded.
+ /// \param OutlinedFnID ID of host version of the code to be offloaded.
+ /// \param IfCond Expression evaluated in if clause associated with the target
+ /// directive, or null if no if clause is used.
+ /// \param Device Expression evaluated in device clause associated with the
+ /// target directive, or null if no device clause is used.
+ void emitTargetCall(CodeGenFunction &CGF, const OMPExecutableDirective &D,
+ llvm::Value *OutlinedFn, llvm::Value *OutlinedFnID,
+ const Expr *IfCond, const Expr *Device) override;
+
+ /// Emit the target regions enclosed in \a GD function definition or
+ /// the function itself in case it is a valid device function. Returns true if
+ /// \a GD was dealt with successfully.
+ /// \param GD Function to scan.
+ bool emitTargetFunctions(GlobalDecl GD) override;
+
+ /// Emit the global variable if it is a valid device global variable.
+ /// Returns true if \a GD was dealt with successfully.
+ /// \param GD Variable declaration to emit.
+ bool emitTargetGlobalVariable(GlobalDecl GD) override;
+
+ /// Emit the global \a GD if it is meaningful for the target. Returns
+ /// if it was emitted successfully.
+ /// \param GD Global to scan.
+ bool emitTargetGlobal(GlobalDecl GD) override;
+
+ /// Creates the offloading descriptor in the event any target region
+ /// was emitted in the current module and return the function that registers
+ /// it.
+ llvm::Function *emitRegistrationFunction() override;
+
+ /// Emits code for teams call of the \a OutlinedFn with
+ /// variables captured in a record which address is stored in \a
+ /// CapturedStruct.
+ /// \param OutlinedFn Outlined function to be run by team masters. Type of
+ /// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*).
+ /// \param CapturedVars A pointer to the record with the references to
+ /// variables used in \a OutlinedFn function.
+ ///
+ void emitTeamsCall(CodeGenFunction &CGF, const OMPExecutableDirective &D,
+ SourceLocation Loc, llvm::Value *OutlinedFn,
+ ArrayRef<llvm::Value *> CapturedVars) override;
+
+ /// Emits call to void __kmpc_push_num_teams(ident_t *loc, kmp_int32
+ /// global_tid, kmp_int32 num_teams, kmp_int32 thread_limit) to generate code
+ /// for num_teams clause.
+ /// \param NumTeams An integer expression of teams.
+ /// \param ThreadLimit An integer expression of threads.
+ void emitNumTeamsClause(CodeGenFunction &CGF, const Expr *NumTeams,
+ const Expr *ThreadLimit, SourceLocation Loc) override;
+
+ /// Emit the target data mapping code associated with \a D.
+ /// \param D Directive to emit.
+ /// \param IfCond Expression evaluated in if clause associated with the
+ /// target directive, or null if no device clause is used.
+ /// \param Device Expression evaluated in device clause associated with the
+ /// target directive, or null if no device clause is used.
+ /// \param Info A record used to store information that needs to be preserved
+ /// until the region is closed.
+ void emitTargetDataCalls(CodeGenFunction &CGF,
+ const OMPExecutableDirective &D, const Expr *IfCond,
+ const Expr *Device, const RegionCodeGenTy &CodeGen,
+ TargetDataInfo &Info) override;
+
+ /// Emit the data mapping/movement code associated with the directive
+ /// \a D that should be of the form 'target [{enter|exit} data | update]'.
+ /// \param D Directive to emit.
+ /// \param IfCond Expression evaluated in if clause associated with the target
+ /// directive, or null if no if clause is used.
+ /// \param Device Expression evaluated in device clause associated with the
+ /// target directive, or null if no device clause is used.
+ void emitTargetDataStandAloneCall(CodeGenFunction &CGF,
+ const OMPExecutableDirective &D,
+ const Expr *IfCond,
+ const Expr *Device) override;
+
+ /// Emit initialization for doacross loop nesting support.
+ /// \param D Loop-based construct used in doacross nesting construct.
+ void emitDoacrossInit(CodeGenFunction &CGF,
+ const OMPLoopDirective &D) override;
+
+ /// Emit code for doacross ordered directive with 'depend' clause.
+ /// \param C 'depend' clause with 'sink|source' dependency kind.
+ void emitDoacrossOrdered(CodeGenFunction &CGF,
+ const OMPDependClause *C) override;
+
+ /// Translates the native parameter of outlined function if this is required
+ /// for target.
+ /// \param FD Field decl from captured record for the parameter.
+ /// \param NativeParam Parameter itself.
+ const VarDecl *translateParameter(const FieldDecl *FD,
+ const VarDecl *NativeParam) const override;
+
+ /// Gets the address of the native argument basing on the address of the
+ /// target-specific parameter.
+ /// \param NativeParam Parameter itself.
+ /// \param TargetParam Corresponding target-specific parameter.
+ Address getParameterAddress(CodeGenFunction &CGF, const VarDecl *NativeParam,
+ const VarDecl *TargetParam) const override;
};
} // namespace CodeGen
diff --git a/lib/CodeGen/CGOpenMPRuntimeNVPTX.cpp b/lib/CodeGen/CGOpenMPRuntimeNVPTX.cpp
index 7b2993cfd38d..036b5371fe0b 100644
--- a/lib/CodeGen/CGOpenMPRuntimeNVPTX.cpp
+++ b/lib/CodeGen/CGOpenMPRuntimeNVPTX.cpp
@@ -13,33 +13,35 @@
//===----------------------------------------------------------------------===//
#include "CGOpenMPRuntimeNVPTX.h"
-#include "clang/AST/DeclOpenMP.h"
#include "CodeGenFunction.h"
+#include "clang/AST/DeclOpenMP.h"
#include "clang/AST/StmtOpenMP.h"
+#include "clang/AST/StmtVisitor.h"
+#include "llvm/ADT/SmallPtrSet.h"
using namespace clang;
using namespace CodeGen;
namespace {
enum OpenMPRTLFunctionNVPTX {
- /// \brief Call to void __kmpc_kernel_init(kmp_int32 thread_limit,
+ /// Call to void __kmpc_kernel_init(kmp_int32 thread_limit,
/// int16_t RequiresOMPRuntime);
OMPRTL_NVPTX__kmpc_kernel_init,
- /// \brief Call to void __kmpc_kernel_deinit(int16_t IsOMPRuntimeInitialized);
+ /// Call to void __kmpc_kernel_deinit(int16_t IsOMPRuntimeInitialized);
OMPRTL_NVPTX__kmpc_kernel_deinit,
- /// \brief Call to void __kmpc_spmd_kernel_init(kmp_int32 thread_limit,
+ /// Call to void __kmpc_spmd_kernel_init(kmp_int32 thread_limit,
/// int16_t RequiresOMPRuntime, int16_t RequiresDataSharing);
OMPRTL_NVPTX__kmpc_spmd_kernel_init,
- /// \brief Call to void __kmpc_spmd_kernel_deinit();
+ /// Call to void __kmpc_spmd_kernel_deinit();
OMPRTL_NVPTX__kmpc_spmd_kernel_deinit,
- /// \brief Call to void __kmpc_kernel_prepare_parallel(void
- /// *outlined_function, void ***args, kmp_int32 nArgs, int16_t
+ /// Call to void __kmpc_kernel_prepare_parallel(void
+ /// *outlined_function, int16_t
/// IsOMPRuntimeInitialized);
OMPRTL_NVPTX__kmpc_kernel_prepare_parallel,
- /// \brief Call to bool __kmpc_kernel_parallel(void **outlined_function, void
- /// ***args, int16_t IsOMPRuntimeInitialized);
+ /// Call to bool __kmpc_kernel_parallel(void **outlined_function,
+ /// int16_t IsOMPRuntimeInitialized);
OMPRTL_NVPTX__kmpc_kernel_parallel,
- /// \brief Call to void __kmpc_kernel_end_parallel();
+ /// Call to void __kmpc_kernel_end_parallel();
OMPRTL_NVPTX__kmpc_kernel_end_parallel,
/// Call to void __kmpc_serialized_parallel(ident_t *loc, kmp_int32
/// global_tid);
@@ -47,19 +49,25 @@ enum OpenMPRTLFunctionNVPTX {
/// Call to void __kmpc_end_serialized_parallel(ident_t *loc, kmp_int32
/// global_tid);
OMPRTL_NVPTX__kmpc_end_serialized_parallel,
- /// \brief Call to int32_t __kmpc_shuffle_int32(int32_t element,
+ /// Call to int32_t __kmpc_shuffle_int32(int32_t element,
/// int16_t lane_offset, int16_t warp_size);
OMPRTL_NVPTX__kmpc_shuffle_int32,
- /// \brief Call to int64_t __kmpc_shuffle_int64(int64_t element,
+ /// Call to int64_t __kmpc_shuffle_int64(int64_t element,
/// int16_t lane_offset, int16_t warp_size);
OMPRTL_NVPTX__kmpc_shuffle_int64,
- /// \brief Call to __kmpc_nvptx_parallel_reduce_nowait(kmp_int32
+ /// Call to __kmpc_nvptx_parallel_reduce_nowait(kmp_int32
/// global_tid, kmp_int32 num_vars, size_t reduce_size, void* reduce_data,
/// void (*kmp_ShuffleReductFctPtr)(void *rhsData, int16_t lane_id, int16_t
/// lane_offset, int16_t shortCircuit),
/// void (*kmp_InterWarpCopyFctPtr)(void* src, int32_t warp_num));
OMPRTL_NVPTX__kmpc_parallel_reduce_nowait,
- /// \brief Call to __kmpc_nvptx_teams_reduce_nowait(int32_t global_tid,
+ /// Call to __kmpc_nvptx_simd_reduce_nowait(kmp_int32
+ /// global_tid, kmp_int32 num_vars, size_t reduce_size, void* reduce_data,
+ /// void (*kmp_ShuffleReductFctPtr)(void *rhsData, int16_t lane_id, int16_t
+ /// lane_offset, int16_t shortCircuit),
+ /// void (*kmp_InterWarpCopyFctPtr)(void* src, int32_t warp_num));
+ OMPRTL_NVPTX__kmpc_simd_reduce_nowait,
+ /// Call to __kmpc_nvptx_teams_reduce_nowait(int32_t global_tid,
/// int32_t num_vars, size_t reduce_size, void *reduce_data,
/// void (*kmp_ShuffleReductFctPtr)(void *rhs, int16_t lane_id, int16_t
/// lane_offset, int16_t shortCircuit),
@@ -69,17 +77,38 @@ enum OpenMPRTLFunctionNVPTX {
/// void (*kmp_LoadReduceFctPtr)(void *reduce_data, void * scratchpad, int32_t
/// index, int32_t width, int32_t reduce))
OMPRTL_NVPTX__kmpc_teams_reduce_nowait,
- /// \brief Call to __kmpc_nvptx_end_reduce_nowait(int32_t global_tid);
- OMPRTL_NVPTX__kmpc_end_reduce_nowait
+ /// Call to __kmpc_nvptx_end_reduce_nowait(int32_t global_tid);
+ OMPRTL_NVPTX__kmpc_end_reduce_nowait,
+ /// Call to void __kmpc_data_sharing_init_stack();
+ OMPRTL_NVPTX__kmpc_data_sharing_init_stack,
+ /// Call to void __kmpc_data_sharing_init_stack_spmd();
+ OMPRTL_NVPTX__kmpc_data_sharing_init_stack_spmd,
+ /// Call to void* __kmpc_data_sharing_push_stack(size_t size,
+ /// int16_t UseSharedMemory);
+ OMPRTL_NVPTX__kmpc_data_sharing_push_stack,
+ /// Call to void __kmpc_data_sharing_pop_stack(void *a);
+ OMPRTL_NVPTX__kmpc_data_sharing_pop_stack,
+ /// Call to void __kmpc_begin_sharing_variables(void ***args,
+ /// size_t n_args);
+ OMPRTL_NVPTX__kmpc_begin_sharing_variables,
+ /// Call to void __kmpc_end_sharing_variables();
+ OMPRTL_NVPTX__kmpc_end_sharing_variables,
+ /// Call to void __kmpc_get_shared_variables(void ***GlobalArgs)
+ OMPRTL_NVPTX__kmpc_get_shared_variables,
+ /// Call to uint16_t __kmpc_parallel_level(ident_t *loc, kmp_int32
+ /// global_tid);
+ OMPRTL_NVPTX__kmpc_parallel_level,
+ /// Call to int8_t __kmpc_is_spmd_exec_mode();
+ OMPRTL_NVPTX__kmpc_is_spmd_exec_mode,
};
/// Pre(post)-action for different OpenMP constructs specialized for NVPTX.
class NVPTXActionTy final : public PrePostActionTy {
- llvm::Value *EnterCallee;
+ llvm::Value *EnterCallee = nullptr;
ArrayRef<llvm::Value *> EnterArgs;
- llvm::Value *ExitCallee;
+ llvm::Value *ExitCallee = nullptr;
ArrayRef<llvm::Value *> ExitArgs;
- bool Conditional;
+ bool Conditional = false;
llvm::BasicBlock *ContBlock = nullptr;
public:
@@ -109,21 +138,21 @@ public:
}
};
-// A class to track the execution mode when codegening directives within
-// a target region. The appropriate mode (generic/spmd) is set on entry
-// to the target region and used by containing directives such as 'parallel'
-// to emit optimized code.
+/// A class to track the execution mode when codegening directives within
+/// a target region. The appropriate mode (SPMD|NON-SPMD) is set on entry
+/// to the target region and used by containing directives such as 'parallel'
+/// to emit optimized code.
class ExecutionModeRAII {
private:
CGOpenMPRuntimeNVPTX::ExecutionMode SavedMode;
CGOpenMPRuntimeNVPTX::ExecutionMode &Mode;
public:
- ExecutionModeRAII(CGOpenMPRuntimeNVPTX::ExecutionMode &Mode,
- CGOpenMPRuntimeNVPTX::ExecutionMode NewMode)
+ ExecutionModeRAII(CGOpenMPRuntimeNVPTX::ExecutionMode &Mode, bool IsSPMD)
: Mode(Mode) {
SavedMode = Mode;
- Mode = NewMode;
+ Mode = IsSPMD ? CGOpenMPRuntimeNVPTX::EM_SPMD
+ : CGOpenMPRuntimeNVPTX::EM_NonSPMD;
}
~ExecutionModeRAII() { Mode = SavedMode; }
};
@@ -149,6 +178,353 @@ enum NamedBarrier : unsigned {
/// barrier.
NB_Parallel = 1,
};
+
+/// Get the list of variables that can escape their declaration context.
+class CheckVarsEscapingDeclContext final
+ : public ConstStmtVisitor<CheckVarsEscapingDeclContext> {
+ CodeGenFunction &CGF;
+ llvm::SetVector<const ValueDecl *> EscapedDecls;
+ llvm::SetVector<const ValueDecl *> EscapedVariableLengthDecls;
+ llvm::SmallPtrSet<const Decl *, 4> EscapedParameters;
+ RecordDecl *GlobalizedRD = nullptr;
+ llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> MappedDeclsFields;
+ bool AllEscaped = false;
+ bool IsForCombinedParallelRegion = false;
+
+ static llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy>
+ isDeclareTargetDeclaration(const ValueDecl *VD) {
+ for (const Decl *D : VD->redecls()) {
+ if (!D->hasAttrs())
+ continue;
+ if (const auto *Attr = D->getAttr<OMPDeclareTargetDeclAttr>())
+ return Attr->getMapType();
+ }
+ return llvm::None;
+ }
+
+ void markAsEscaped(const ValueDecl *VD) {
+ // Do not globalize declare target variables.
+ if (!isa<VarDecl>(VD) || isDeclareTargetDeclaration(VD))
+ return;
+ VD = cast<ValueDecl>(VD->getCanonicalDecl());
+ // Variables captured by value must be globalized.
+ if (auto *CSI = CGF.CapturedStmtInfo) {
+ if (const FieldDecl *FD = CSI->lookup(cast<VarDecl>(VD))) {
+ // Check if need to capture the variable that was already captured by
+ // value in the outer region.
+ if (!IsForCombinedParallelRegion) {
+ if (!FD->hasAttrs())
+ return;
+ const auto *Attr = FD->getAttr<OMPCaptureKindAttr>();
+ if (!Attr)
+ return;
+ if (!isOpenMPPrivate(
+ static_cast<OpenMPClauseKind>(Attr->getCaptureKind())) ||
+ Attr->getCaptureKind() == OMPC_map)
+ return;
+ }
+ if (!FD->getType()->isReferenceType()) {
+ assert(!VD->getType()->isVariablyModifiedType() &&
+ "Parameter captured by value with variably modified type");
+ EscapedParameters.insert(VD);
+ } else if (!IsForCombinedParallelRegion) {
+ return;
+ }
+ }
+ }
+ if ((!CGF.CapturedStmtInfo ||
+ (IsForCombinedParallelRegion && CGF.CapturedStmtInfo)) &&
+ VD->getType()->isReferenceType())
+ // Do not globalize variables with reference type.
+ return;
+ if (VD->getType()->isVariablyModifiedType())
+ EscapedVariableLengthDecls.insert(VD);
+ else
+ EscapedDecls.insert(VD);
+ }
+
+ void VisitValueDecl(const ValueDecl *VD) {
+ if (VD->getType()->isLValueReferenceType())
+ markAsEscaped(VD);
+ if (const auto *VarD = dyn_cast<VarDecl>(VD)) {
+ if (!isa<ParmVarDecl>(VarD) && VarD->hasInit()) {
+ const bool SavedAllEscaped = AllEscaped;
+ AllEscaped = VD->getType()->isLValueReferenceType();
+ Visit(VarD->getInit());
+ AllEscaped = SavedAllEscaped;
+ }
+ }
+ }
+ void VisitOpenMPCapturedStmt(const CapturedStmt *S,
+ ArrayRef<OMPClause *> Clauses,
+ bool IsCombinedParallelRegion) {
+ if (!S)
+ return;
+ for (const CapturedStmt::Capture &C : S->captures()) {
+ if (C.capturesVariable() && !C.capturesVariableByCopy()) {
+ const ValueDecl *VD = C.getCapturedVar();
+ bool SavedIsForCombinedParallelRegion = IsForCombinedParallelRegion;
+ if (IsCombinedParallelRegion) {
+ // Check if the variable is privatized in the combined construct and
+ // those private copies must be shared in the inner parallel
+ // directive.
+ IsForCombinedParallelRegion = false;
+ for (const OMPClause *C : Clauses) {
+ if (!isOpenMPPrivate(C->getClauseKind()) ||
+ C->getClauseKind() == OMPC_reduction ||
+ C->getClauseKind() == OMPC_linear ||
+ C->getClauseKind() == OMPC_private)
+ continue;
+ ArrayRef<const Expr *> Vars;
+ if (const auto *PC = dyn_cast<OMPFirstprivateClause>(C))
+ Vars = PC->getVarRefs();
+ else if (const auto *PC = dyn_cast<OMPLastprivateClause>(C))
+ Vars = PC->getVarRefs();
+ else
+ llvm_unreachable("Unexpected clause.");
+ for (const auto *E : Vars) {
+ const Decl *D =
+ cast<DeclRefExpr>(E)->getDecl()->getCanonicalDecl();
+ if (D == VD->getCanonicalDecl()) {
+ IsForCombinedParallelRegion = true;
+ break;
+ }
+ }
+ if (IsForCombinedParallelRegion)
+ break;
+ }
+ }
+ markAsEscaped(VD);
+ if (isa<OMPCapturedExprDecl>(VD))
+ VisitValueDecl(VD);
+ IsForCombinedParallelRegion = SavedIsForCombinedParallelRegion;
+ }
+ }
+ }
+
+ typedef std::pair<CharUnits /*Align*/, const ValueDecl *> VarsDataTy;
+ static bool stable_sort_comparator(const VarsDataTy P1, const VarsDataTy P2) {
+ return P1.first > P2.first;
+ }
+
+ void buildRecordForGlobalizedVars() {
+ assert(!GlobalizedRD &&
+ "Record for globalized variables is built already.");
+ if (EscapedDecls.empty())
+ return;
+ ASTContext &C = CGF.getContext();
+ SmallVector<VarsDataTy, 4> GlobalizedVars;
+ for (const ValueDecl *D : EscapedDecls)
+ GlobalizedVars.emplace_back(C.getDeclAlign(D), D);
+ std::stable_sort(GlobalizedVars.begin(), GlobalizedVars.end(),
+ stable_sort_comparator);
+ // Build struct _globalized_locals_ty {
+ // /* globalized vars */
+ // };
+ GlobalizedRD = C.buildImplicitRecord("_globalized_locals_ty");
+ GlobalizedRD->startDefinition();
+ for (const auto &Pair : GlobalizedVars) {
+ const ValueDecl *VD = Pair.second;
+ QualType Type = VD->getType();
+ if (Type->isLValueReferenceType())
+ Type = C.getPointerType(Type.getNonReferenceType());
+ else
+ Type = Type.getNonReferenceType();
+ SourceLocation Loc = VD->getLocation();
+ auto *Field = FieldDecl::Create(
+ C, GlobalizedRD, Loc, Loc, VD->getIdentifier(), Type,
+ C.getTrivialTypeSourceInfo(Type, SourceLocation()),
+ /*BW=*/nullptr, /*Mutable=*/false,
+ /*InitStyle=*/ICIS_NoInit);
+ Field->setAccess(AS_public);
+ GlobalizedRD->addDecl(Field);
+ if (VD->hasAttrs()) {
+ for (specific_attr_iterator<AlignedAttr> I(VD->getAttrs().begin()),
+ E(VD->getAttrs().end());
+ I != E; ++I)
+ Field->addAttr(*I);
+ }
+ MappedDeclsFields.try_emplace(VD, Field);
+ }
+ GlobalizedRD->completeDefinition();
+ }
+
+public:
+ CheckVarsEscapingDeclContext(CodeGenFunction &CGF) : CGF(CGF) {}
+ virtual ~CheckVarsEscapingDeclContext() = default;
+ void VisitDeclStmt(const DeclStmt *S) {
+ if (!S)
+ return;
+ for (const Decl *D : S->decls())
+ if (const auto *VD = dyn_cast_or_null<ValueDecl>(D))
+ VisitValueDecl(VD);
+ }
+ void VisitOMPExecutableDirective(const OMPExecutableDirective *D) {
+ if (!D)
+ return;
+ if (!D->hasAssociatedStmt())
+ return;
+ if (const auto *S =
+ dyn_cast_or_null<CapturedStmt>(D->getAssociatedStmt())) {
+ // Do not analyze directives that do not actually require capturing,
+ // like `omp for` or `omp simd` directives.
+ llvm::SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
+ getOpenMPCaptureRegions(CaptureRegions, D->getDirectiveKind());
+ if (CaptureRegions.size() == 1 && CaptureRegions.back() == OMPD_unknown) {
+ VisitStmt(S->getCapturedStmt());
+ return;
+ }
+ VisitOpenMPCapturedStmt(
+ S, D->clauses(),
+ CaptureRegions.back() == OMPD_parallel &&
+ isOpenMPDistributeDirective(D->getDirectiveKind()));
+ }
+ }
+ void VisitCapturedStmt(const CapturedStmt *S) {
+ if (!S)
+ return;
+ for (const CapturedStmt::Capture &C : S->captures()) {
+ if (C.capturesVariable() && !C.capturesVariableByCopy()) {
+ const ValueDecl *VD = C.getCapturedVar();
+ markAsEscaped(VD);
+ if (isa<OMPCapturedExprDecl>(VD))
+ VisitValueDecl(VD);
+ }
+ }
+ }
+ void VisitLambdaExpr(const LambdaExpr *E) {
+ if (!E)
+ return;
+ for (const LambdaCapture &C : E->captures()) {
+ if (C.capturesVariable()) {
+ if (C.getCaptureKind() == LCK_ByRef) {
+ const ValueDecl *VD = C.getCapturedVar();
+ markAsEscaped(VD);
+ if (E->isInitCapture(&C) || isa<OMPCapturedExprDecl>(VD))
+ VisitValueDecl(VD);
+ }
+ }
+ }
+ }
+ void VisitBlockExpr(const BlockExpr *E) {
+ if (!E)
+ return;
+ for (const BlockDecl::Capture &C : E->getBlockDecl()->captures()) {
+ if (C.isByRef()) {
+ const VarDecl *VD = C.getVariable();
+ markAsEscaped(VD);
+ if (isa<OMPCapturedExprDecl>(VD) || VD->isInitCapture())
+ VisitValueDecl(VD);
+ }
+ }
+ }
+ void VisitCallExpr(const CallExpr *E) {
+ if (!E)
+ return;
+ for (const Expr *Arg : E->arguments()) {
+ if (!Arg)
+ continue;
+ if (Arg->isLValue()) {
+ const bool SavedAllEscaped = AllEscaped;
+ AllEscaped = true;
+ Visit(Arg);
+ AllEscaped = SavedAllEscaped;
+ } else {
+ Visit(Arg);
+ }
+ }
+ Visit(E->getCallee());
+ }
+ void VisitDeclRefExpr(const DeclRefExpr *E) {
+ if (!E)
+ return;
+ const ValueDecl *VD = E->getDecl();
+ if (AllEscaped)
+ markAsEscaped(VD);
+ if (isa<OMPCapturedExprDecl>(VD))
+ VisitValueDecl(VD);
+ else if (const auto *VarD = dyn_cast<VarDecl>(VD))
+ if (VarD->isInitCapture())
+ VisitValueDecl(VD);
+ }
+ void VisitUnaryOperator(const UnaryOperator *E) {
+ if (!E)
+ return;
+ if (E->getOpcode() == UO_AddrOf) {
+ const bool SavedAllEscaped = AllEscaped;
+ AllEscaped = true;
+ Visit(E->getSubExpr());
+ AllEscaped = SavedAllEscaped;
+ } else {
+ Visit(E->getSubExpr());
+ }
+ }
+ void VisitImplicitCastExpr(const ImplicitCastExpr *E) {
+ if (!E)
+ return;
+ if (E->getCastKind() == CK_ArrayToPointerDecay) {
+ const bool SavedAllEscaped = AllEscaped;
+ AllEscaped = true;
+ Visit(E->getSubExpr());
+ AllEscaped = SavedAllEscaped;
+ } else {
+ Visit(E->getSubExpr());
+ }
+ }
+ void VisitExpr(const Expr *E) {
+ if (!E)
+ return;
+ bool SavedAllEscaped = AllEscaped;
+ if (!E->isLValue())
+ AllEscaped = false;
+ for (const Stmt *Child : E->children())
+ if (Child)
+ Visit(Child);
+ AllEscaped = SavedAllEscaped;
+ }
+ void VisitStmt(const Stmt *S) {
+ if (!S)
+ return;
+ for (const Stmt *Child : S->children())
+ if (Child)
+ Visit(Child);
+ }
+
+ /// Returns the record that handles all the escaped local variables and used
+ /// instead of their original storage.
+ const RecordDecl *getGlobalizedRecord() {
+ if (!GlobalizedRD)
+ buildRecordForGlobalizedVars();
+ return GlobalizedRD;
+ }
+
+ /// Returns the field in the globalized record for the escaped variable.
+ const FieldDecl *getFieldForGlobalizedVar(const ValueDecl *VD) const {
+ assert(GlobalizedRD &&
+ "Record for globalized variables must be generated already.");
+ auto I = MappedDeclsFields.find(VD);
+ if (I == MappedDeclsFields.end())
+ return nullptr;
+ return I->getSecond();
+ }
+
+ /// Returns the list of the escaped local variables/parameters.
+ ArrayRef<const ValueDecl *> getEscapedDecls() const {
+ return EscapedDecls.getArrayRef();
+ }
+
+ /// Checks if the escaped local variable is actually a parameter passed by
+ /// value.
+ const llvm::SmallPtrSetImpl<const Decl *> &getEscapedParameters() const {
+ return EscapedParameters;
+ }
+
+ /// Returns the list of the escaped variables with the variably modified
+ /// types.
+ ArrayRef<const ValueDecl *> getEscapedVariableLengthDecls() const {
+ return EscapedVariableLengthDecls.getArrayRef();
+ }
+};
} // anonymous namespace
/// Get the GPU warp size.
@@ -223,12 +599,12 @@ static void syncParallelThreads(CodeGenFunction &CGF, llvm::Value *NumThreads) {
/// CTA. The threads in the last warp are reserved for master execution.
/// For the 'spmd' execution mode, all threads in a CTA are part of the team.
static llvm::Value *getThreadLimit(CodeGenFunction &CGF,
- bool IsInSpmdExecutionMode = false) {
+ bool IsInSPMDExecutionMode = false) {
CGBuilderTy &Bld = CGF.Builder;
- return IsInSpmdExecutionMode
+ return IsInSPMDExecutionMode
? getNVPTXNumThreads(CGF)
- : Bld.CreateSub(getNVPTXNumThreads(CGF), getNVPTXWarpSize(CGF),
- "thread_limit");
+ : Bld.CreateNUWSub(getNVPTXNumThreads(CGF), getNVPTXWarpSize(CGF),
+ "thread_limit");
}
/// Get the thread id of the OMP master thread.
@@ -243,96 +619,295 @@ static llvm::Value *getMasterThreadID(CodeGenFunction &CGF) {
llvm::Value *NumThreads = getNVPTXNumThreads(CGF);
// We assume that the warp size is a power of 2.
- llvm::Value *Mask = Bld.CreateSub(getNVPTXWarpSize(CGF), Bld.getInt32(1));
+ llvm::Value *Mask = Bld.CreateNUWSub(getNVPTXWarpSize(CGF), Bld.getInt32(1));
- return Bld.CreateAnd(Bld.CreateSub(NumThreads, Bld.getInt32(1)),
+ return Bld.CreateAnd(Bld.CreateNUWSub(NumThreads, Bld.getInt32(1)),
Bld.CreateNot(Mask), "master_tid");
}
CGOpenMPRuntimeNVPTX::WorkerFunctionState::WorkerFunctionState(
- CodeGenModule &CGM)
- : WorkerFn(nullptr), CGFI(nullptr) {
+ CodeGenModule &CGM, SourceLocation Loc)
+ : WorkerFn(nullptr), CGFI(CGM.getTypes().arrangeNullaryFunction()),
+ Loc(Loc) {
createWorkerFunction(CGM);
}
void CGOpenMPRuntimeNVPTX::WorkerFunctionState::createWorkerFunction(
CodeGenModule &CGM) {
// Create an worker function with no arguments.
- CGFI = &CGM.getTypes().arrangeNullaryFunction();
WorkerFn = llvm::Function::Create(
- CGM.getTypes().GetFunctionType(*CGFI), llvm::GlobalValue::InternalLinkage,
- /* placeholder */ "_worker", &CGM.getModule());
- CGM.SetInternalFunctionAttributes(/*D=*/nullptr, WorkerFn, *CGFI);
+ CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
+ /*placeholder=*/"_worker", &CGM.getModule());
+ CGM.SetInternalFunctionAttributes(GlobalDecl(), WorkerFn, CGFI);
+ WorkerFn->setDoesNotRecurse();
}
-bool CGOpenMPRuntimeNVPTX::isInSpmdExecutionMode() const {
- return CurrentExecutionMode == CGOpenMPRuntimeNVPTX::ExecutionMode::Spmd;
+CGOpenMPRuntimeNVPTX::ExecutionMode
+CGOpenMPRuntimeNVPTX::getExecutionMode() const {
+ return CurrentExecutionMode;
+}
+
+static CGOpenMPRuntimeNVPTX::DataSharingMode
+getDataSharingMode(CodeGenModule &CGM) {
+ return CGM.getLangOpts().OpenMPCUDAMode ? CGOpenMPRuntimeNVPTX::CUDA
+ : CGOpenMPRuntimeNVPTX::Generic;
+}
+
+/// Checks if the \p Body is the \a CompoundStmt and returns its child statement
+/// iff there is only one.
+static const Stmt *getSingleCompoundChild(const Stmt *Body) {
+ if (const auto *C = dyn_cast<CompoundStmt>(Body))
+ if (C->size() == 1)
+ return C->body_front();
+ return Body;
+}
+
+/// Check if the parallel directive has an 'if' clause with non-constant or
+/// false condition. Also, check if the number of threads is strictly specified
+/// and run those directives in non-SPMD mode.
+static bool hasParallelIfNumThreadsClause(ASTContext &Ctx,
+ const OMPExecutableDirective &D) {
+ if (D.hasClausesOfKind<OMPNumThreadsClause>())
+ return true;
+ for (const auto *C : D.getClausesOfKind<OMPIfClause>()) {
+ OpenMPDirectiveKind NameModifier = C->getNameModifier();
+ if (NameModifier != OMPD_parallel && NameModifier != OMPD_unknown)
+ continue;
+ const Expr *Cond = C->getCondition();
+ bool Result;
+ if (!Cond->EvaluateAsBooleanCondition(Result, Ctx) || !Result)
+ return true;
+ }
+ return false;
}
-static CGOpenMPRuntimeNVPTX::ExecutionMode
-getExecutionModeForDirective(CodeGenModule &CGM,
- const OMPExecutableDirective &D) {
+/// Check for inner (nested) SPMD construct, if any
+static bool hasNestedSPMDDirective(ASTContext &Ctx,
+ const OMPExecutableDirective &D) {
+ const auto *CS = D.getInnermostCapturedStmt();
+ const auto *Body = CS->getCapturedStmt()->IgnoreContainers();
+ const Stmt *ChildStmt = getSingleCompoundChild(Body);
+
+ if (const auto *NestedDir = dyn_cast<OMPExecutableDirective>(ChildStmt)) {
+ OpenMPDirectiveKind DKind = NestedDir->getDirectiveKind();
+ switch (D.getDirectiveKind()) {
+ case OMPD_target:
+ if (isOpenMPParallelDirective(DKind) &&
+ !hasParallelIfNumThreadsClause(Ctx, *NestedDir))
+ return true;
+ if (DKind == OMPD_teams || DKind == OMPD_teams_distribute) {
+ Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers();
+ if (!Body)
+ return false;
+ ChildStmt = getSingleCompoundChild(Body);
+ if (const auto *NND = dyn_cast<OMPExecutableDirective>(ChildStmt)) {
+ DKind = NND->getDirectiveKind();
+ if (isOpenMPParallelDirective(DKind) &&
+ !hasParallelIfNumThreadsClause(Ctx, *NND))
+ return true;
+ if (DKind == OMPD_distribute) {
+ Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers();
+ if (!Body)
+ return false;
+ ChildStmt = getSingleCompoundChild(Body);
+ if (!ChildStmt)
+ return false;
+ if (const auto *NND = dyn_cast<OMPExecutableDirective>(ChildStmt)) {
+ DKind = NND->getDirectiveKind();
+ return isOpenMPParallelDirective(DKind) &&
+ !hasParallelIfNumThreadsClause(Ctx, *NND);
+ }
+ }
+ }
+ }
+ return false;
+ case OMPD_target_teams:
+ if (isOpenMPParallelDirective(DKind) &&
+ !hasParallelIfNumThreadsClause(Ctx, *NestedDir))
+ return true;
+ if (DKind == OMPD_distribute) {
+ Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers();
+ if (!Body)
+ return false;
+ ChildStmt = getSingleCompoundChild(Body);
+ if (const auto *NND = dyn_cast<OMPExecutableDirective>(ChildStmt)) {
+ DKind = NND->getDirectiveKind();
+ return isOpenMPParallelDirective(DKind) &&
+ !hasParallelIfNumThreadsClause(Ctx, *NND);
+ }
+ }
+ return false;
+ case OMPD_target_teams_distribute:
+ return isOpenMPParallelDirective(DKind) &&
+ !hasParallelIfNumThreadsClause(Ctx, *NestedDir);
+ case OMPD_target_simd:
+ case OMPD_target_parallel:
+ case OMPD_target_parallel_for:
+ case OMPD_target_parallel_for_simd:
+ case OMPD_target_teams_distribute_simd:
+ case OMPD_target_teams_distribute_parallel_for:
+ case OMPD_target_teams_distribute_parallel_for_simd:
+ case OMPD_parallel:
+ case OMPD_for:
+ case OMPD_parallel_for:
+ case OMPD_parallel_sections:
+ case OMPD_for_simd:
+ case OMPD_parallel_for_simd:
+ case OMPD_cancel:
+ case OMPD_cancellation_point:
+ case OMPD_ordered:
+ case OMPD_threadprivate:
+ case OMPD_task:
+ case OMPD_simd:
+ case OMPD_sections:
+ case OMPD_section:
+ case OMPD_single:
+ case OMPD_master:
+ case OMPD_critical:
+ case OMPD_taskyield:
+ case OMPD_barrier:
+ case OMPD_taskwait:
+ case OMPD_taskgroup:
+ case OMPD_atomic:
+ case OMPD_flush:
+ case OMPD_teams:
+ case OMPD_target_data:
+ case OMPD_target_exit_data:
+ case OMPD_target_enter_data:
+ case OMPD_distribute:
+ case OMPD_distribute_simd:
+ case OMPD_distribute_parallel_for:
+ case OMPD_distribute_parallel_for_simd:
+ case OMPD_teams_distribute:
+ case OMPD_teams_distribute_simd:
+ case OMPD_teams_distribute_parallel_for:
+ case OMPD_teams_distribute_parallel_for_simd:
+ case OMPD_target_update:
+ case OMPD_declare_simd:
+ case OMPD_declare_target:
+ case OMPD_end_declare_target:
+ case OMPD_declare_reduction:
+ case OMPD_taskloop:
+ case OMPD_taskloop_simd:
+ case OMPD_unknown:
+ llvm_unreachable("Unexpected directive.");
+ }
+ }
+
+ return false;
+}
+
+static bool supportsSPMDExecutionMode(ASTContext &Ctx,
+ const OMPExecutableDirective &D) {
OpenMPDirectiveKind DirectiveKind = D.getDirectiveKind();
switch (DirectiveKind) {
case OMPD_target:
case OMPD_target_teams:
- return CGOpenMPRuntimeNVPTX::ExecutionMode::Generic;
+ case OMPD_target_teams_distribute:
+ return hasNestedSPMDDirective(Ctx, D);
case OMPD_target_parallel:
case OMPD_target_parallel_for:
case OMPD_target_parallel_for_simd:
- return CGOpenMPRuntimeNVPTX::ExecutionMode::Spmd;
- default:
- llvm_unreachable("Unsupported directive on NVPTX device.");
+ case OMPD_target_teams_distribute_parallel_for:
+ case OMPD_target_teams_distribute_parallel_for_simd:
+ return !hasParallelIfNumThreadsClause(Ctx, D);
+ case OMPD_target_simd:
+ case OMPD_target_teams_distribute_simd:
+ return false;
+ case OMPD_parallel:
+ case OMPD_for:
+ case OMPD_parallel_for:
+ case OMPD_parallel_sections:
+ case OMPD_for_simd:
+ case OMPD_parallel_for_simd:
+ case OMPD_cancel:
+ case OMPD_cancellation_point:
+ case OMPD_ordered:
+ case OMPD_threadprivate:
+ case OMPD_task:
+ case OMPD_simd:
+ case OMPD_sections:
+ case OMPD_section:
+ case OMPD_single:
+ case OMPD_master:
+ case OMPD_critical:
+ case OMPD_taskyield:
+ case OMPD_barrier:
+ case OMPD_taskwait:
+ case OMPD_taskgroup:
+ case OMPD_atomic:
+ case OMPD_flush:
+ case OMPD_teams:
+ case OMPD_target_data:
+ case OMPD_target_exit_data:
+ case OMPD_target_enter_data:
+ case OMPD_distribute:
+ case OMPD_distribute_simd:
+ case OMPD_distribute_parallel_for:
+ case OMPD_distribute_parallel_for_simd:
+ case OMPD_teams_distribute:
+ case OMPD_teams_distribute_simd:
+ case OMPD_teams_distribute_parallel_for:
+ case OMPD_teams_distribute_parallel_for_simd:
+ case OMPD_target_update:
+ case OMPD_declare_simd:
+ case OMPD_declare_target:
+ case OMPD_end_declare_target:
+ case OMPD_declare_reduction:
+ case OMPD_taskloop:
+ case OMPD_taskloop_simd:
+ case OMPD_unknown:
+ break;
}
- llvm_unreachable("Unsupported directive on NVPTX device.");
+ llvm_unreachable(
+ "Unknown programming model for OpenMP directive on NVPTX target.");
}
-void CGOpenMPRuntimeNVPTX::emitGenericKernel(const OMPExecutableDirective &D,
+void CGOpenMPRuntimeNVPTX::emitNonSPMDKernel(const OMPExecutableDirective &D,
StringRef ParentName,
llvm::Function *&OutlinedFn,
llvm::Constant *&OutlinedFnID,
bool IsOffloadEntry,
const RegionCodeGenTy &CodeGen) {
- ExecutionModeRAII ModeRAII(CurrentExecutionMode,
- CGOpenMPRuntimeNVPTX::ExecutionMode::Generic);
+ ExecutionModeRAII ModeRAII(CurrentExecutionMode, /*IsSPMD=*/false);
EntryFunctionState EST;
- WorkerFunctionState WST(CGM);
+ WorkerFunctionState WST(CGM, D.getLocStart());
Work.clear();
WrapperFunctionsMap.clear();
// Emit target region as a standalone region.
class NVPTXPrePostActionTy : public PrePostActionTy {
- CGOpenMPRuntimeNVPTX &RT;
CGOpenMPRuntimeNVPTX::EntryFunctionState &EST;
CGOpenMPRuntimeNVPTX::WorkerFunctionState &WST;
public:
- NVPTXPrePostActionTy(CGOpenMPRuntimeNVPTX &RT,
- CGOpenMPRuntimeNVPTX::EntryFunctionState &EST,
+ NVPTXPrePostActionTy(CGOpenMPRuntimeNVPTX::EntryFunctionState &EST,
CGOpenMPRuntimeNVPTX::WorkerFunctionState &WST)
- : RT(RT), EST(EST), WST(WST) {}
+ : EST(EST), WST(WST) {}
void Enter(CodeGenFunction &CGF) override {
- RT.emitGenericEntryHeader(CGF, EST, WST);
+ static_cast<CGOpenMPRuntimeNVPTX &>(CGF.CGM.getOpenMPRuntime())
+ .emitNonSPMDEntryHeader(CGF, EST, WST);
}
void Exit(CodeGenFunction &CGF) override {
- RT.emitGenericEntryFooter(CGF, EST);
+ static_cast<CGOpenMPRuntimeNVPTX &>(CGF.CGM.getOpenMPRuntime())
+ .emitNonSPMDEntryFooter(CGF, EST);
}
- } Action(*this, EST, WST);
+ } Action(EST, WST);
CodeGen.setAction(Action);
emitTargetOutlinedFunctionHelper(D, ParentName, OutlinedFn, OutlinedFnID,
IsOffloadEntry, CodeGen);
- // Create the worker function
- emitWorkerFunction(WST);
-
// Now change the name of the worker function to correspond to this target
// region's entry function.
- WST.WorkerFn->setName(OutlinedFn->getName() + "_worker");
+ WST.WorkerFn->setName(Twine(OutlinedFn->getName(), "_worker"));
+
+ // Create the worker function
+ emitWorkerFunction(WST);
}
// Setup NVPTX threads for master-worker OpenMP scheme.
-void CGOpenMPRuntimeNVPTX::emitGenericEntryHeader(CodeGenFunction &CGF,
+void CGOpenMPRuntimeNVPTX::emitNonSPMDEntryHeader(CodeGenFunction &CGF,
EntryFunctionState &EST,
WorkerFunctionState &WST) {
CGBuilderTy &Bld = CGF.Builder;
@@ -342,20 +917,22 @@ void CGOpenMPRuntimeNVPTX::emitGenericEntryHeader(CodeGenFunction &CGF,
llvm::BasicBlock *MasterBB = CGF.createBasicBlock(".master");
EST.ExitBB = CGF.createBasicBlock(".exit");
- auto *IsWorker =
+ llvm::Value *IsWorker =
Bld.CreateICmpULT(getNVPTXThreadID(CGF), getThreadLimit(CGF));
Bld.CreateCondBr(IsWorker, WorkerBB, MasterCheckBB);
CGF.EmitBlock(WorkerBB);
- emitCall(CGF, WST.WorkerFn);
+ emitCall(CGF, WST.Loc, WST.WorkerFn);
CGF.EmitBranch(EST.ExitBB);
CGF.EmitBlock(MasterCheckBB);
- auto *IsMaster =
+ llvm::Value *IsMaster =
Bld.CreateICmpEQ(getNVPTXThreadID(CGF), getMasterThreadID(CGF));
Bld.CreateCondBr(IsMaster, MasterBB, EST.ExitBB);
CGF.EmitBlock(MasterBB);
+ IsInTargetMasterThreadRegion = true;
+ // SEQUENTIAL (MASTER) REGION START
// First action in sequential region:
// Initialize the state of the OpenMP runtime library on the GPU.
// TODO: Optimize runtime initialization and pass in correct value.
@@ -363,10 +940,23 @@ void CGOpenMPRuntimeNVPTX::emitGenericEntryHeader(CodeGenFunction &CGF,
Bld.getInt16(/*RequiresOMPRuntime=*/1)};
CGF.EmitRuntimeCall(
createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_kernel_init), Args);
+
+ // For data sharing, we need to initialize the stack.
+ CGF.EmitRuntimeCall(
+ createNVPTXRuntimeFunction(
+ OMPRTL_NVPTX__kmpc_data_sharing_init_stack));
+
+ emitGenericVarsProlog(CGF, WST.Loc);
}
-void CGOpenMPRuntimeNVPTX::emitGenericEntryFooter(CodeGenFunction &CGF,
+void CGOpenMPRuntimeNVPTX::emitNonSPMDEntryFooter(CodeGenFunction &CGF,
EntryFunctionState &EST) {
+ IsInTargetMasterThreadRegion = false;
+ if (!CGF.HaveInsertPoint())
+ return;
+
+ emitGenericVarsEpilog(CGF);
+
if (!EST.ExitBB)
EST.ExitBB = CGF.createBasicBlock(".exit");
@@ -388,14 +978,13 @@ void CGOpenMPRuntimeNVPTX::emitGenericEntryFooter(CodeGenFunction &CGF,
EST.ExitBB = nullptr;
}
-void CGOpenMPRuntimeNVPTX::emitSpmdKernel(const OMPExecutableDirective &D,
+void CGOpenMPRuntimeNVPTX::emitSPMDKernel(const OMPExecutableDirective &D,
StringRef ParentName,
llvm::Function *&OutlinedFn,
llvm::Constant *&OutlinedFnID,
bool IsOffloadEntry,
const RegionCodeGenTy &CodeGen) {
- ExecutionModeRAII ModeRAII(CurrentExecutionMode,
- CGOpenMPRuntimeNVPTX::ExecutionMode::Spmd);
+ ExecutionModeRAII ModeRAII(CurrentExecutionMode, /*IsSPMD=*/true);
EntryFunctionState EST;
// Emit target region as a standalone region.
@@ -410,10 +999,10 @@ void CGOpenMPRuntimeNVPTX::emitSpmdKernel(const OMPExecutableDirective &D,
const OMPExecutableDirective &D)
: RT(RT), EST(EST), D(D) {}
void Enter(CodeGenFunction &CGF) override {
- RT.emitSpmdEntryHeader(CGF, EST, D);
+ RT.emitSPMDEntryHeader(CGF, EST, D);
}
void Exit(CodeGenFunction &CGF) override {
- RT.emitSpmdEntryFooter(CGF, EST);
+ RT.emitSPMDEntryFooter(CGF, EST);
}
} Action(*this, EST, D);
CodeGen.setAction(Action);
@@ -421,10 +1010,10 @@ void CGOpenMPRuntimeNVPTX::emitSpmdKernel(const OMPExecutableDirective &D,
IsOffloadEntry, CodeGen);
}
-void CGOpenMPRuntimeNVPTX::emitSpmdEntryHeader(
+void CGOpenMPRuntimeNVPTX::emitSPMDEntryHeader(
CodeGenFunction &CGF, EntryFunctionState &EST,
const OMPExecutableDirective &D) {
- auto &Bld = CGF.Builder;
+ CGBuilderTy &Bld = CGF.Builder;
// Setup BBs in entry function.
llvm::BasicBlock *ExecuteBB = CGF.createBasicBlock(".execute");
@@ -433,18 +1022,30 @@ void CGOpenMPRuntimeNVPTX::emitSpmdEntryHeader(
// Initialize the OMP state in the runtime; called by all active threads.
// TODO: Set RequiresOMPRuntime and RequiresDataSharing parameters
// based on code analysis of the target region.
- llvm::Value *Args[] = {getThreadLimit(CGF, /*IsInSpmdExecutionMode=*/true),
+ llvm::Value *Args[] = {getThreadLimit(CGF, /*IsInSPMDExecutionMode=*/true),
/*RequiresOMPRuntime=*/Bld.getInt16(1),
/*RequiresDataSharing=*/Bld.getInt16(1)};
CGF.EmitRuntimeCall(
createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_spmd_kernel_init), Args);
+
+ // For data sharing, we need to initialize the stack.
+ CGF.EmitRuntimeCall(
+ createNVPTXRuntimeFunction(
+ OMPRTL_NVPTX__kmpc_data_sharing_init_stack_spmd));
+
CGF.EmitBranch(ExecuteBB);
CGF.EmitBlock(ExecuteBB);
+
+ IsInTargetMasterThreadRegion = true;
}
-void CGOpenMPRuntimeNVPTX::emitSpmdEntryFooter(CodeGenFunction &CGF,
+void CGOpenMPRuntimeNVPTX::emitSPMDEntryFooter(CodeGenFunction &CGF,
EntryFunctionState &EST) {
+ IsInTargetMasterThreadRegion = false;
+ if (!CGF.HaveInsertPoint())
+ return;
+
if (!EST.ExitBB)
EST.ExitBB = CGF.createBasicBlock(".exit");
@@ -468,19 +1069,21 @@ void CGOpenMPRuntimeNVPTX::emitSpmdEntryFooter(CodeGenFunction &CGF,
// 'generic', the runtime reserves one warp for the master, otherwise, all
// warps participate in parallel work.
static void setPropertyExecutionMode(CodeGenModule &CGM, StringRef Name,
- CGOpenMPRuntimeNVPTX::ExecutionMode Mode) {
- (void)new llvm::GlobalVariable(
- CGM.getModule(), CGM.Int8Ty, /*isConstant=*/true,
- llvm::GlobalValue::WeakAnyLinkage,
- llvm::ConstantInt::get(CGM.Int8Ty, Mode), Name + Twine("_exec_mode"));
+ bool Mode) {
+ auto *GVMode =
+ new llvm::GlobalVariable(CGM.getModule(), CGM.Int8Ty, /*isConstant=*/true,
+ llvm::GlobalValue::WeakAnyLinkage,
+ llvm::ConstantInt::get(CGM.Int8Ty, Mode ? 0 : 1),
+ Twine(Name, "_exec_mode"));
+ CGM.addCompilerUsedGlobal(GVMode);
}
void CGOpenMPRuntimeNVPTX::emitWorkerFunction(WorkerFunctionState &WST) {
ASTContext &Ctx = CGM.getContext();
CodeGenFunction CGF(CGM, /*suppressNewContext=*/true);
- CGF.disableDebugInfo();
- CGF.StartFunction(GlobalDecl(), Ctx.VoidTy, WST.WorkerFn, *WST.CGFI, {});
+ CGF.StartFunction(GlobalDecl(), Ctx.VoidTy, WST.WorkerFn, WST.CGFI, {},
+ WST.Loc, WST.Loc);
emitWorkerLoop(CGF, WST);
CGF.FinishFunction();
}
@@ -519,19 +1122,16 @@ void CGOpenMPRuntimeNVPTX::emitWorkerLoop(CodeGenFunction &CGF,
CGF.InitTempAlloca(ExecStatus, Bld.getInt8(/*C=*/0));
CGF.InitTempAlloca(WorkFn, llvm::Constant::getNullValue(CGF.Int8PtrTy));
- // Set up shared arguments
- Address SharedArgs =
- CGF.CreateDefaultAlignTempAlloca(CGF.Int8PtrPtrTy, "shared_args");
// TODO: Optimize runtime initialization and pass in correct value.
- llvm::Value *Args[] = {WorkFn.getPointer(), SharedArgs.getPointer(),
+ llvm::Value *Args[] = {WorkFn.getPointer(),
/*RequiresOMPRuntime=*/Bld.getInt16(1)};
llvm::Value *Ret = CGF.EmitRuntimeCall(
createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_kernel_parallel), Args);
Bld.CreateStore(Bld.CreateZExt(Ret, CGF.Int8Ty), ExecStatus);
// On termination condition (workid == 0), exit loop.
- llvm::Value *ShouldTerminate =
- Bld.CreateIsNull(Bld.CreateLoad(WorkFn), "should_terminate");
+ llvm::Value *WorkID = Bld.CreateLoad(WorkFn);
+ llvm::Value *ShouldTerminate = Bld.CreateIsNull(WorkID, "should_terminate");
Bld.CreateCondBr(ShouldTerminate, ExitBB, SelectWorkersBB);
// Activate requested workers.
@@ -543,13 +1143,10 @@ void CGOpenMPRuntimeNVPTX::emitWorkerLoop(CodeGenFunction &CGF,
// Signal start of parallel region.
CGF.EmitBlock(ExecuteBB);
- // Current context
- ASTContext &Ctx = CGF.getContext();
-
// Process work items: outlined parallel functions.
- for (auto *W : Work) {
+ for (llvm::Function *W : Work) {
// Try to match this outlined function.
- auto *ID = Bld.CreatePointerBitCastOrAddrSpaceCast(W, CGM.Int8PtrTy);
+ llvm::Value *ID = Bld.CreatePointerBitCastOrAddrSpaceCast(W, CGM.Int8PtrTy);
llvm::Value *WorkFnMatch =
Bld.CreateICmpEQ(Bld.CreateLoad(WorkFn), ID, "work_match");
@@ -562,23 +1159,33 @@ void CGOpenMPRuntimeNVPTX::emitWorkerLoop(CodeGenFunction &CGF,
CGF.EmitBlock(ExecuteFNBB);
// Insert call to work function via shared wrapper. The shared
- // wrapper takes exactly three arguments:
+ // wrapper takes two arguments:
// - the parallelism level;
- // - the master thread ID;
- // - the list of references to shared arguments.
- //
- // TODO: Assert that the function is a wrapper function.s
- Address Capture = CGF.EmitLoadOfPointer(SharedArgs,
- Ctx.getPointerType(
- Ctx.getPointerType(Ctx.VoidPtrTy)).castAs<PointerType>());
- emitCall(CGF, W, {Bld.getInt16(/*ParallelLevel=*/0),
- getMasterThreadID(CGF), Capture.getPointer()});
+ // - the thread ID;
+ emitCall(CGF, WST.Loc, W,
+ {Bld.getInt16(/*ParallelLevel=*/0), getThreadID(CGF, WST.Loc)});
// Go to end of parallel region.
CGF.EmitBranch(TerminateBB);
CGF.EmitBlock(CheckNextBB);
}
+ // Default case: call to outlined function through pointer if the target
+ // region makes a declare target call that may contain an orphaned parallel
+ // directive.
+ auto *ParallelFnTy =
+ llvm::FunctionType::get(CGM.VoidTy, {CGM.Int16Ty, CGM.Int32Ty},
+ /*isVarArg=*/false)
+ ->getPointerTo();
+ llvm::Value *WorkFnCast = Bld.CreateBitCast(WorkID, ParallelFnTy);
+ // Insert call to work function via shared wrapper. The shared
+ // wrapper takes two arguments:
+ // - the parallelism level;
+ // - the thread ID;
+ emitCall(CGF, WST.Loc, WorkFnCast,
+ {Bld.getInt16(/*ParallelLevel=*/0), getThreadID(CGF, WST.Loc)});
+ // Go to end of parallel region.
+ CGF.EmitBranch(TerminateBB);
// Signal end of parallel region.
CGF.EmitBlock(TerminateBB);
@@ -597,7 +1204,7 @@ void CGOpenMPRuntimeNVPTX::emitWorkerLoop(CodeGenFunction &CGF,
CGF.EmitBlock(ExitBB);
}
-/// \brief Returns specified OpenMP runtime function for the current OpenMP
+/// Returns specified OpenMP runtime function for the current OpenMP
/// implementation. Specialized for the NVPTX device.
/// \param Function OpenMP runtime function.
/// \return Specified function.
@@ -609,7 +1216,7 @@ CGOpenMPRuntimeNVPTX::createNVPTXRuntimeFunction(unsigned Function) {
// Build void __kmpc_kernel_init(kmp_int32 thread_limit, int16_t
// RequiresOMPRuntime);
llvm::Type *TypeParams[] = {CGM.Int32Ty, CGM.Int16Ty};
- llvm::FunctionType *FnTy =
+ auto *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_kernel_init");
break;
@@ -617,7 +1224,7 @@ CGOpenMPRuntimeNVPTX::createNVPTXRuntimeFunction(unsigned Function) {
case OMPRTL_NVPTX__kmpc_kernel_deinit: {
// Build void __kmpc_kernel_deinit(int16_t IsOMPRuntimeInitialized);
llvm::Type *TypeParams[] = {CGM.Int16Ty};
- llvm::FunctionType *FnTy =
+ auto *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_kernel_deinit");
break;
@@ -626,44 +1233,40 @@ CGOpenMPRuntimeNVPTX::createNVPTXRuntimeFunction(unsigned Function) {
// Build void __kmpc_spmd_kernel_init(kmp_int32 thread_limit,
// int16_t RequiresOMPRuntime, int16_t RequiresDataSharing);
llvm::Type *TypeParams[] = {CGM.Int32Ty, CGM.Int16Ty, CGM.Int16Ty};
- llvm::FunctionType *FnTy =
+ auto *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_spmd_kernel_init");
break;
}
case OMPRTL_NVPTX__kmpc_spmd_kernel_deinit: {
// Build void __kmpc_spmd_kernel_deinit();
- llvm::FunctionType *FnTy =
+ auto *FnTy =
llvm::FunctionType::get(CGM.VoidTy, llvm::None, /*isVarArg*/ false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_spmd_kernel_deinit");
break;
}
case OMPRTL_NVPTX__kmpc_kernel_prepare_parallel: {
/// Build void __kmpc_kernel_prepare_parallel(
- /// void *outlined_function, void ***args, kmp_int32 nArgs, int16_t
- /// IsOMPRuntimeInitialized);
- llvm::Type *TypeParams[] = {CGM.Int8PtrTy,
- CGM.Int8PtrPtrTy->getPointerTo(0), CGM.Int32Ty,
- CGM.Int16Ty};
- llvm::FunctionType *FnTy =
+ /// void *outlined_function, int16_t IsOMPRuntimeInitialized);
+ llvm::Type *TypeParams[] = {CGM.Int8PtrTy, CGM.Int16Ty};
+ auto *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_kernel_prepare_parallel");
break;
}
case OMPRTL_NVPTX__kmpc_kernel_parallel: {
- /// Build bool __kmpc_kernel_parallel(void **outlined_function, void
- /// ***args, int16_t IsOMPRuntimeInitialized);
- llvm::Type *TypeParams[] = {CGM.Int8PtrPtrTy,
- CGM.Int8PtrPtrTy->getPointerTo(0), CGM.Int16Ty};
+ /// Build bool __kmpc_kernel_parallel(void **outlined_function,
+ /// int16_t IsOMPRuntimeInitialized);
+ llvm::Type *TypeParams[] = {CGM.Int8PtrPtrTy, CGM.Int16Ty};
llvm::Type *RetTy = CGM.getTypes().ConvertType(CGM.getContext().BoolTy);
- llvm::FunctionType *FnTy =
+ auto *FnTy =
llvm::FunctionType::get(RetTy, TypeParams, /*isVarArg*/ false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_kernel_parallel");
break;
}
case OMPRTL_NVPTX__kmpc_kernel_end_parallel: {
/// Build void __kmpc_kernel_end_parallel();
- llvm::FunctionType *FnTy =
+ auto *FnTy =
llvm::FunctionType::get(CGM.VoidTy, llvm::None, /*isVarArg*/ false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_kernel_end_parallel");
break;
@@ -672,7 +1275,7 @@ CGOpenMPRuntimeNVPTX::createNVPTXRuntimeFunction(unsigned Function) {
// Build void __kmpc_serialized_parallel(ident_t *loc, kmp_int32
// global_tid);
llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
- llvm::FunctionType *FnTy =
+ auto *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_serialized_parallel");
break;
@@ -681,7 +1284,7 @@ CGOpenMPRuntimeNVPTX::createNVPTXRuntimeFunction(unsigned Function) {
// Build void __kmpc_end_serialized_parallel(ident_t *loc, kmp_int32
// global_tid);
llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
- llvm::FunctionType *FnTy =
+ auto *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_serialized_parallel");
break;
@@ -690,7 +1293,7 @@ CGOpenMPRuntimeNVPTX::createNVPTXRuntimeFunction(unsigned Function) {
// Build int32_t __kmpc_shuffle_int32(int32_t element,
// int16_t lane_offset, int16_t warp_size);
llvm::Type *TypeParams[] = {CGM.Int32Ty, CGM.Int16Ty, CGM.Int16Ty};
- llvm::FunctionType *FnTy =
+ auto *FnTy =
llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_shuffle_int32");
break;
@@ -699,7 +1302,7 @@ CGOpenMPRuntimeNVPTX::createNVPTXRuntimeFunction(unsigned Function) {
// Build int64_t __kmpc_shuffle_int64(int64_t element,
// int16_t lane_offset, int16_t warp_size);
llvm::Type *TypeParams[] = {CGM.Int64Ty, CGM.Int16Ty, CGM.Int16Ty};
- llvm::FunctionType *FnTy =
+ auto *FnTy =
llvm::FunctionType::get(CGM.Int64Ty, TypeParams, /*isVarArg*/ false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_shuffle_int64");
break;
@@ -725,12 +1328,39 @@ CGOpenMPRuntimeNVPTX::createNVPTXRuntimeFunction(unsigned Function) {
CGM.VoidPtrTy,
ShuffleReduceFnTy->getPointerTo(),
InterWarpCopyFnTy->getPointerTo()};
- llvm::FunctionType *FnTy =
+ auto *FnTy =
llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
RTLFn = CGM.CreateRuntimeFunction(
FnTy, /*Name=*/"__kmpc_nvptx_parallel_reduce_nowait");
break;
}
+ case OMPRTL_NVPTX__kmpc_simd_reduce_nowait: {
+ // Build int32_t kmpc_nvptx_simd_reduce_nowait(kmp_int32 global_tid,
+ // kmp_int32 num_vars, size_t reduce_size, void* reduce_data,
+ // void (*kmp_ShuffleReductFctPtr)(void *rhsData, int16_t lane_id, int16_t
+ // lane_offset, int16_t Algorithm Version),
+ // void (*kmp_InterWarpCopyFctPtr)(void* src, int warp_num));
+ llvm::Type *ShuffleReduceTypeParams[] = {CGM.VoidPtrTy, CGM.Int16Ty,
+ CGM.Int16Ty, CGM.Int16Ty};
+ auto *ShuffleReduceFnTy =
+ llvm::FunctionType::get(CGM.VoidTy, ShuffleReduceTypeParams,
+ /*isVarArg=*/false);
+ llvm::Type *InterWarpCopyTypeParams[] = {CGM.VoidPtrTy, CGM.Int32Ty};
+ auto *InterWarpCopyFnTy =
+ llvm::FunctionType::get(CGM.VoidTy, InterWarpCopyTypeParams,
+ /*isVarArg=*/false);
+ llvm::Type *TypeParams[] = {CGM.Int32Ty,
+ CGM.Int32Ty,
+ CGM.SizeTy,
+ CGM.VoidPtrTy,
+ ShuffleReduceFnTy->getPointerTo(),
+ InterWarpCopyFnTy->getPointerTo()};
+ auto *FnTy =
+ llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
+ RTLFn = CGM.CreateRuntimeFunction(
+ FnTy, /*Name=*/"__kmpc_nvptx_simd_reduce_nowait");
+ break;
+ }
case OMPRTL_NVPTX__kmpc_teams_reduce_nowait: {
// Build int32_t __kmpc_nvptx_teams_reduce_nowait(int32_t global_tid,
// int32_t num_vars, size_t reduce_size, void *reduce_data,
@@ -768,7 +1398,7 @@ CGOpenMPRuntimeNVPTX::createNVPTXRuntimeFunction(unsigned Function) {
InterWarpCopyFnTy->getPointerTo(),
CopyToScratchpadFnTy->getPointerTo(),
LoadReduceFnTy->getPointerTo()};
- llvm::FunctionType *FnTy =
+ auto *FnTy =
llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
RTLFn = CGM.CreateRuntimeFunction(
FnTy, /*Name=*/"__kmpc_nvptx_teams_reduce_nowait");
@@ -777,32 +1407,103 @@ CGOpenMPRuntimeNVPTX::createNVPTXRuntimeFunction(unsigned Function) {
case OMPRTL_NVPTX__kmpc_end_reduce_nowait: {
// Build __kmpc_end_reduce_nowait(kmp_int32 global_tid);
llvm::Type *TypeParams[] = {CGM.Int32Ty};
- llvm::FunctionType *FnTy =
+ auto *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
RTLFn = CGM.CreateRuntimeFunction(
FnTy, /*Name=*/"__kmpc_nvptx_end_reduce_nowait");
break;
}
+ case OMPRTL_NVPTX__kmpc_data_sharing_init_stack: {
+ /// Build void __kmpc_data_sharing_init_stack();
+ auto *FnTy =
+ llvm::FunctionType::get(CGM.VoidTy, llvm::None, /*isVarArg*/ false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_data_sharing_init_stack");
+ break;
+ }
+ case OMPRTL_NVPTX__kmpc_data_sharing_init_stack_spmd: {
+ /// Build void __kmpc_data_sharing_init_stack_spmd();
+ auto *FnTy =
+ llvm::FunctionType::get(CGM.VoidTy, llvm::None, /*isVarArg*/ false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_data_sharing_init_stack_spmd");
+ break;
+ }
+ case OMPRTL_NVPTX__kmpc_data_sharing_push_stack: {
+ // Build void *__kmpc_data_sharing_push_stack(size_t size,
+ // int16_t UseSharedMemory);
+ llvm::Type *TypeParams[] = {CGM.SizeTy, CGM.Int16Ty};
+ auto *FnTy =
+ llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg=*/false);
+ RTLFn = CGM.CreateRuntimeFunction(
+ FnTy, /*Name=*/"__kmpc_data_sharing_push_stack");
+ break;
+ }
+ case OMPRTL_NVPTX__kmpc_data_sharing_pop_stack: {
+ // Build void __kmpc_data_sharing_pop_stack(void *a);
+ llvm::Type *TypeParams[] = {CGM.VoidPtrTy};
+ auto *FnTy =
+ llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy,
+ /*Name=*/"__kmpc_data_sharing_pop_stack");
+ break;
+ }
+ case OMPRTL_NVPTX__kmpc_begin_sharing_variables: {
+ /// Build void __kmpc_begin_sharing_variables(void ***args,
+ /// size_t n_args);
+ llvm::Type *TypeParams[] = {CGM.Int8PtrPtrTy->getPointerTo(), CGM.SizeTy};
+ auto *FnTy =
+ llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_begin_sharing_variables");
+ break;
+ }
+ case OMPRTL_NVPTX__kmpc_end_sharing_variables: {
+ /// Build void __kmpc_end_sharing_variables();
+ auto *FnTy =
+ llvm::FunctionType::get(CGM.VoidTy, llvm::None, /*isVarArg*/ false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_sharing_variables");
+ break;
+ }
+ case OMPRTL_NVPTX__kmpc_get_shared_variables: {
+ /// Build void __kmpc_get_shared_variables(void ***GlobalArgs);
+ llvm::Type *TypeParams[] = {CGM.Int8PtrPtrTy->getPointerTo()};
+ auto *FnTy =
+ llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_get_shared_variables");
+ break;
+ }
+ case OMPRTL_NVPTX__kmpc_parallel_level: {
+ // Build uint16_t __kmpc_parallel_level(ident_t *loc, kmp_int32 global_tid);
+ llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
+ auto *FnTy =
+ llvm::FunctionType::get(CGM.Int16Ty, TypeParams, /*isVarArg*/ false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_parallel_level");
+ break;
+ }
+ case OMPRTL_NVPTX__kmpc_is_spmd_exec_mode: {
+ // Build int8_t __kmpc_is_spmd_exec_mode();
+ auto *FnTy = llvm::FunctionType::get(CGM.Int8Ty, /*isVarArg=*/false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_is_spmd_exec_mode");
+ break;
+ }
}
return RTLFn;
}
void CGOpenMPRuntimeNVPTX::createOffloadEntry(llvm::Constant *ID,
llvm::Constant *Addr,
- uint64_t Size, int32_t) {
- auto *F = dyn_cast<llvm::Function>(Addr);
+ uint64_t Size, int32_t,
+ llvm::GlobalValue::LinkageTypes) {
// TODO: Add support for global variables on the device after declare target
// support.
- if (!F)
+ if (!isa<llvm::Function>(Addr))
return;
- llvm::Module *M = F->getParent();
- llvm::LLVMContext &Ctx = M->getContext();
+ llvm::Module &M = CGM.getModule();
+ llvm::LLVMContext &Ctx = CGM.getLLVMContext();
// Get "nvvm.annotations" metadata node
- llvm::NamedMDNode *MD = M->getOrInsertNamedMetadata("nvvm.annotations");
+ llvm::NamedMDNode *MD = M.getOrInsertNamedMetadata("nvvm.annotations");
llvm::Metadata *MDVals[] = {
- llvm::ConstantAsMetadata::get(F), llvm::MDString::get(Ctx, "kernel"),
+ llvm::ConstantAsMetadata::get(Addr), llvm::MDString::get(Ctx, "kernel"),
llvm::ConstantAsMetadata::get(
llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), 1))};
// Append metadata to nvvm.annotations
@@ -818,27 +1519,19 @@ void CGOpenMPRuntimeNVPTX::emitTargetOutlinedFunction(
assert(!ParentName.empty() && "Invalid target region parent name!");
- CGOpenMPRuntimeNVPTX::ExecutionMode Mode =
- getExecutionModeForDirective(CGM, D);
- switch (Mode) {
- case CGOpenMPRuntimeNVPTX::ExecutionMode::Generic:
- emitGenericKernel(D, ParentName, OutlinedFn, OutlinedFnID, IsOffloadEntry,
- CodeGen);
- break;
- case CGOpenMPRuntimeNVPTX::ExecutionMode::Spmd:
- emitSpmdKernel(D, ParentName, OutlinedFn, OutlinedFnID, IsOffloadEntry,
+ bool Mode = supportsSPMDExecutionMode(CGM.getContext(), D);
+ if (Mode)
+ emitSPMDKernel(D, ParentName, OutlinedFn, OutlinedFnID, IsOffloadEntry,
CodeGen);
- break;
- case CGOpenMPRuntimeNVPTX::ExecutionMode::Unknown:
- llvm_unreachable(
- "Unknown programming model for OpenMP directive on NVPTX target.");
- }
+ else
+ emitNonSPMDKernel(D, ParentName, OutlinedFn, OutlinedFnID, IsOffloadEntry,
+ CodeGen);
setPropertyExecutionMode(CGM, OutlinedFn->getName(), Mode);
}
CGOpenMPRuntimeNVPTX::CGOpenMPRuntimeNVPTX(CodeGenModule &CGM)
- : CGOpenMPRuntime(CGM), CurrentExecutionMode(ExecutionMode::Unknown) {
+ : CGOpenMPRuntime(CGM, "_", "$") {
if (!CGM.getLangOpts().OpenMPIsDevice)
llvm_unreachable("OpenMP NVPTX can only handle device code.");
}
@@ -846,9 +1539,8 @@ CGOpenMPRuntimeNVPTX::CGOpenMPRuntimeNVPTX(CodeGenModule &CGM)
void CGOpenMPRuntimeNVPTX::emitProcBindClause(CodeGenFunction &CGF,
OpenMPProcBindClauseKind ProcBind,
SourceLocation Loc) {
- // Do nothing in case of Spmd mode and L0 parallel.
- // TODO: If in Spmd mode and L1 parallel emit the clause.
- if (isInSpmdExecutionMode())
+ // Do nothing in case of SPMD mode and L0 parallel.
+ if (getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_SPMD)
return;
CGOpenMPRuntime::emitProcBindClause(CGF, ProcBind, Loc);
@@ -857,9 +1549,8 @@ void CGOpenMPRuntimeNVPTX::emitProcBindClause(CodeGenFunction &CGF,
void CGOpenMPRuntimeNVPTX::emitNumThreadsClause(CodeGenFunction &CGF,
llvm::Value *NumThreads,
SourceLocation Loc) {
- // Do nothing in case of Spmd mode and L0 parallel.
- // TODO: If in Spmd mode and L1 parallel emit the clause.
- if (isInSpmdExecutionMode())
+ // Do nothing in case of SPMD mode and L0 parallel.
+ if (getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_SPMD)
return;
CGOpenMPRuntime::emitNumThreadsClause(CGF, NumThreads, Loc);
@@ -873,13 +1564,33 @@ void CGOpenMPRuntimeNVPTX::emitNumTeamsClause(CodeGenFunction &CGF,
llvm::Value *CGOpenMPRuntimeNVPTX::emitParallelOutlinedFunction(
const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
+ // Emit target region as a standalone region.
+ class NVPTXPrePostActionTy : public PrePostActionTy {
+ bool &IsInParallelRegion;
+ bool PrevIsInParallelRegion;
- auto *OutlinedFun = cast<llvm::Function>(
- CGOpenMPRuntime::emitParallelOutlinedFunction(
+ public:
+ NVPTXPrePostActionTy(bool &IsInParallelRegion)
+ : IsInParallelRegion(IsInParallelRegion) {}
+ void Enter(CodeGenFunction &CGF) override {
+ PrevIsInParallelRegion = IsInParallelRegion;
+ IsInParallelRegion = true;
+ }
+ void Exit(CodeGenFunction &CGF) override {
+ IsInParallelRegion = PrevIsInParallelRegion;
+ }
+ } Action(IsInParallelRegion);
+ CodeGen.setAction(Action);
+ bool PrevIsInTargetMasterThreadRegion = IsInTargetMasterThreadRegion;
+ IsInTargetMasterThreadRegion = false;
+ auto *OutlinedFun =
+ cast<llvm::Function>(CGOpenMPRuntime::emitParallelOutlinedFunction(
D, ThreadIDVar, InnermostKind, CodeGen));
- if (!isInSpmdExecutionMode()) {
+ IsInTargetMasterThreadRegion = PrevIsInTargetMasterThreadRegion;
+ if (getExecutionMode() != CGOpenMPRuntimeNVPTX::EM_SPMD &&
+ !IsInParallelRegion) {
llvm::Function *WrapperFun =
- createDataSharingWrapper(OutlinedFun, D);
+ createParallelDataSharingWrapper(OutlinedFun, D);
WrapperFunctionsMap[OutlinedFun] = WrapperFun;
}
@@ -889,7 +1600,24 @@ llvm::Value *CGOpenMPRuntimeNVPTX::emitParallelOutlinedFunction(
llvm::Value *CGOpenMPRuntimeNVPTX::emitTeamsOutlinedFunction(
const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
+ SourceLocation Loc = D.getLocStart();
+ // Emit target region as a standalone region.
+ class NVPTXPrePostActionTy : public PrePostActionTy {
+ SourceLocation &Loc;
+
+ public:
+ NVPTXPrePostActionTy(SourceLocation &Loc) : Loc(Loc) {}
+ void Enter(CodeGenFunction &CGF) override {
+ static_cast<CGOpenMPRuntimeNVPTX &>(CGF.CGM.getOpenMPRuntime())
+ .emitGenericVarsProlog(CGF, Loc);
+ }
+ void Exit(CodeGenFunction &CGF) override {
+ static_cast<CGOpenMPRuntimeNVPTX &>(CGF.CGM.getOpenMPRuntime())
+ .emitGenericVarsEpilog(CGF);
+ }
+ } Action(Loc);
+ CodeGen.setAction(Action);
llvm::Value *OutlinedFunVal = CGOpenMPRuntime::emitTeamsOutlinedFunction(
D, ThreadIDVar, InnermostKind, CodeGen);
llvm::Function *OutlinedFun = cast<llvm::Function>(OutlinedFunVal);
@@ -900,6 +1628,119 @@ llvm::Value *CGOpenMPRuntimeNVPTX::emitTeamsOutlinedFunction(
return OutlinedFun;
}
+void CGOpenMPRuntimeNVPTX::emitGenericVarsProlog(CodeGenFunction &CGF,
+ SourceLocation Loc) {
+ if (getDataSharingMode(CGM) != CGOpenMPRuntimeNVPTX::Generic)
+ return;
+
+ CGBuilderTy &Bld = CGF.Builder;
+
+ const auto I = FunctionGlobalizedDecls.find(CGF.CurFn);
+ if (I == FunctionGlobalizedDecls.end())
+ return;
+ if (const RecordDecl *GlobalizedVarsRecord = I->getSecond().GlobalRecord) {
+ QualType RecTy = CGM.getContext().getRecordType(GlobalizedVarsRecord);
+
+ // Recover pointer to this function's global record. The runtime will
+ // handle the specifics of the allocation of the memory.
+ // Use actual memory size of the record including the padding
+ // for alignment purposes.
+ unsigned Alignment =
+ CGM.getContext().getTypeAlignInChars(RecTy).getQuantity();
+ unsigned GlobalRecordSize =
+ CGM.getContext().getTypeSizeInChars(RecTy).getQuantity();
+ GlobalRecordSize = llvm::alignTo(GlobalRecordSize, Alignment);
+ // TODO: allow the usage of shared memory to be controlled by
+ // the user, for now, default to global.
+ llvm::Value *GlobalRecordSizeArg[] = {
+ llvm::ConstantInt::get(CGM.SizeTy, GlobalRecordSize),
+ CGF.Builder.getInt16(/*UseSharedMemory=*/0)};
+ llvm::Value *GlobalRecValue = CGF.EmitRuntimeCall(
+ createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_data_sharing_push_stack),
+ GlobalRecordSizeArg);
+ llvm::Value *GlobalRecCastAddr = Bld.CreatePointerBitCastOrAddrSpaceCast(
+ GlobalRecValue, CGF.ConvertTypeForMem(RecTy)->getPointerTo());
+ LValue Base =
+ CGF.MakeNaturalAlignPointeeAddrLValue(GlobalRecCastAddr, RecTy);
+ I->getSecond().GlobalRecordAddr = GlobalRecValue;
+
+ // Emit the "global alloca" which is a GEP from the global declaration
+ // record using the pointer returned by the runtime.
+ for (auto &Rec : I->getSecond().LocalVarData) {
+ bool EscapedParam = I->getSecond().EscapedParameters.count(Rec.first);
+ llvm::Value *ParValue;
+ if (EscapedParam) {
+ const auto *VD = cast<VarDecl>(Rec.first);
+ LValue ParLVal =
+ CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(VD), VD->getType());
+ ParValue = CGF.EmitLoadOfScalar(ParLVal, Loc);
+ }
+ const FieldDecl *FD = Rec.second.first;
+ LValue VarAddr = CGF.EmitLValueForField(Base, FD);
+ Rec.second.second = VarAddr.getAddress();
+ if (EscapedParam) {
+ const auto *VD = cast<VarDecl>(Rec.first);
+ CGF.EmitStoreOfScalar(ParValue, VarAddr);
+ I->getSecond().MappedParams->setVarAddr(CGF, VD, VarAddr.getAddress());
+ }
+ }
+ }
+ for (const ValueDecl *VD : I->getSecond().EscapedVariableLengthDecls) {
+ // Recover pointer to this function's global record. The runtime will
+ // handle the specifics of the allocation of the memory.
+ // Use actual memory size of the record including the padding
+ // for alignment purposes.
+ CGBuilderTy &Bld = CGF.Builder;
+ llvm::Value *Size = CGF.getTypeSize(VD->getType());
+ CharUnits Align = CGM.getContext().getDeclAlign(VD);
+ Size = Bld.CreateNUWAdd(
+ Size, llvm::ConstantInt::get(CGF.SizeTy, Align.getQuantity() - 1));
+ llvm::Value *AlignVal =
+ llvm::ConstantInt::get(CGF.SizeTy, Align.getQuantity());
+ Size = Bld.CreateUDiv(Size, AlignVal);
+ Size = Bld.CreateNUWMul(Size, AlignVal);
+ // TODO: allow the usage of shared memory to be controlled by
+ // the user, for now, default to global.
+ llvm::Value *GlobalRecordSizeArg[] = {
+ Size, CGF.Builder.getInt16(/*UseSharedMemory=*/0)};
+ llvm::Value *GlobalRecValue = CGF.EmitRuntimeCall(
+ createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_data_sharing_push_stack),
+ GlobalRecordSizeArg);
+ llvm::Value *GlobalRecCastAddr = Bld.CreatePointerBitCastOrAddrSpaceCast(
+ GlobalRecValue, CGF.ConvertTypeForMem(VD->getType())->getPointerTo());
+ LValue Base = CGF.MakeAddrLValue(GlobalRecCastAddr, VD->getType(),
+ CGM.getContext().getDeclAlign(VD),
+ AlignmentSource::Decl);
+ I->getSecond().MappedParams->setVarAddr(CGF, cast<VarDecl>(VD),
+ Base.getAddress());
+ I->getSecond().EscapedVariableLengthDeclsAddrs.emplace_back(GlobalRecValue);
+ }
+ I->getSecond().MappedParams->apply(CGF);
+}
+
+void CGOpenMPRuntimeNVPTX::emitGenericVarsEpilog(CodeGenFunction &CGF) {
+ if (getDataSharingMode(CGM) != CGOpenMPRuntimeNVPTX::Generic)
+ return;
+
+ const auto I = FunctionGlobalizedDecls.find(CGF.CurFn);
+ if (I != FunctionGlobalizedDecls.end()) {
+ I->getSecond().MappedParams->restore(CGF);
+ if (!CGF.HaveInsertPoint())
+ return;
+ for (llvm::Value *Addr :
+ llvm::reverse(I->getSecond().EscapedVariableLengthDeclsAddrs)) {
+ CGF.EmitRuntimeCall(
+ createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_data_sharing_pop_stack),
+ Addr);
+ }
+ if (I->getSecond().GlobalRecordAddr) {
+ CGF.EmitRuntimeCall(
+ createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_data_sharing_pop_stack),
+ I->getSecond().GlobalRecordAddr);
+ }
+ }
+}
+
void CGOpenMPRuntimeNVPTX::emitTeamsCall(CodeGenFunction &CGF,
const OMPExecutableDirective &D,
SourceLocation Loc,
@@ -908,12 +1749,12 @@ void CGOpenMPRuntimeNVPTX::emitTeamsCall(CodeGenFunction &CGF,
if (!CGF.HaveInsertPoint())
return;
- Address ZeroAddr =
- CGF.CreateTempAlloca(CGF.Int32Ty, CharUnits::fromQuantity(4),
- /*Name*/ ".zero.addr");
+ Address ZeroAddr = CGF.CreateMemTemp(
+ CGF.getContext().getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1),
+ /*Name*/ ".zero.addr");
CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
- OutlinedFnArgs.push_back(ZeroAddr.getPointer());
+ OutlinedFnArgs.push_back(emitThreadIDAddress(CGF, Loc).getPointer());
OutlinedFnArgs.push_back(ZeroAddr.getPointer());
OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end());
emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, OutlinedFnArgs);
@@ -925,66 +1766,102 @@ void CGOpenMPRuntimeNVPTX::emitParallelCall(
if (!CGF.HaveInsertPoint())
return;
- if (isInSpmdExecutionMode())
- emitSpmdParallelCall(CGF, Loc, OutlinedFn, CapturedVars, IfCond);
+ if (getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_SPMD)
+ emitSPMDParallelCall(CGF, Loc, OutlinedFn, CapturedVars, IfCond);
else
- emitGenericParallelCall(CGF, Loc, OutlinedFn, CapturedVars, IfCond);
+ emitNonSPMDParallelCall(CGF, Loc, OutlinedFn, CapturedVars, IfCond);
}
-void CGOpenMPRuntimeNVPTX::emitGenericParallelCall(
+void CGOpenMPRuntimeNVPTX::emitNonSPMDParallelCall(
CodeGenFunction &CGF, SourceLocation Loc, llvm::Value *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars, const Expr *IfCond) {
llvm::Function *Fn = cast<llvm::Function>(OutlinedFn);
- llvm::Function *WFn = WrapperFunctionsMap[Fn];
- assert(WFn && "Wrapper function does not exist!");
// Force inline this outlined function at its call site.
Fn->setLinkage(llvm::GlobalValue::InternalLinkage);
- auto &&L0ParallelGen = [this, WFn, &CapturedVars](CodeGenFunction &CGF,
- PrePostActionTy &) {
- CGBuilderTy &Bld = CGF.Builder;
+ Address ZeroAddr = CGF.CreateMemTemp(CGF.getContext().getIntTypeForBitwidth(
+ /*DestWidth=*/32, /*Signed=*/1),
+ ".zero.addr");
+ CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
+ // ThreadId for serialized parallels is 0.
+ Address ThreadIDAddr = ZeroAddr;
+ auto &&CodeGen = [this, Fn, CapturedVars, Loc, ZeroAddr, &ThreadIDAddr](
+ CodeGenFunction &CGF, PrePostActionTy &Action) {
+ Action.Enter(CGF);
+
+ llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
+ OutlinedFnArgs.push_back(ThreadIDAddr.getPointer());
+ OutlinedFnArgs.push_back(ZeroAddr.getPointer());
+ OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end());
+ emitOutlinedFunctionCall(CGF, Loc, Fn, OutlinedFnArgs);
+ };
+ auto &&SeqGen = [this, &CodeGen, Loc](CodeGenFunction &CGF,
+ PrePostActionTy &) {
+
+ RegionCodeGenTy RCG(CodeGen);
+ llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
+ llvm::Value *ThreadID = getThreadID(CGF, Loc);
+ llvm::Value *Args[] = {RTLoc, ThreadID};
+ NVPTXActionTy Action(
+ createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_serialized_parallel),
+ Args,
+ createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_end_serialized_parallel),
+ Args);
+ RCG.setAction(Action);
+ RCG(CGF);
+ };
+
+ auto &&L0ParallelGen = [this, CapturedVars, Fn](CodeGenFunction &CGF,
+ PrePostActionTy &Action) {
+ CGBuilderTy &Bld = CGF.Builder;
+ llvm::Function *WFn = WrapperFunctionsMap[Fn];
+ assert(WFn && "Wrapper function does not exist!");
llvm::Value *ID = Bld.CreateBitOrPointerCast(WFn, CGM.Int8PtrTy);
+ // Prepare for parallel region. Indicate the outlined function.
+ llvm::Value *Args[] = {ID, /*RequiresOMPRuntime=*/Bld.getInt16(1)};
+ CGF.EmitRuntimeCall(
+ createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_kernel_prepare_parallel),
+ Args);
+
+ // Create a private scope that will globalize the arguments
+ // passed from the outside of the target region.
+ CodeGenFunction::OMPPrivateScope PrivateArgScope(CGF);
+
+ // There's somehting to share.
if (!CapturedVars.empty()) {
- // There's somehting to share, add the attribute
- CGF.CurFn->addFnAttr("has-nvptx-shared-depot");
// Prepare for parallel region. Indicate the outlined function.
Address SharedArgs =
- CGF.CreateDefaultAlignTempAlloca(CGF.VoidPtrPtrTy,
- "shared_args");
+ CGF.CreateDefaultAlignTempAlloca(CGF.VoidPtrPtrTy, "shared_arg_refs");
llvm::Value *SharedArgsPtr = SharedArgs.getPointer();
- // TODO: Optimize runtime initialization and pass in correct value.
- llvm::Value *Args[] = {ID, SharedArgsPtr,
- Bld.getInt32(CapturedVars.size()),
- /*RequiresOMPRuntime=*/Bld.getInt16(1)};
- CGF.EmitRuntimeCall(
- createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_kernel_prepare_parallel),
- Args);
+ llvm::Value *DataSharingArgs[] = {
+ SharedArgsPtr,
+ llvm::ConstantInt::get(CGM.SizeTy, CapturedVars.size())};
+ CGF.EmitRuntimeCall(createNVPTXRuntimeFunction(
+ OMPRTL_NVPTX__kmpc_begin_sharing_variables),
+ DataSharingArgs);
+ // Store variable address in a list of references to pass to workers.
unsigned Idx = 0;
ASTContext &Ctx = CGF.getContext();
+ Address SharedArgListAddress = CGF.EmitLoadOfPointer(
+ SharedArgs, Ctx.getPointerType(Ctx.getPointerType(Ctx.VoidPtrTy))
+ .castAs<PointerType>());
for (llvm::Value *V : CapturedVars) {
- Address Dst = Bld.CreateConstInBoundsGEP(
- CGF.EmitLoadOfPointer(SharedArgs,
- Ctx.getPointerType(
- Ctx.getPointerType(Ctx.VoidPtrTy)).castAs<PointerType>()),
- Idx, CGF.getPointerSize());
- llvm::Value *PtrV = Bld.CreateBitCast(V, CGF.VoidPtrTy);
+ Address Dst = Bld.CreateConstInBoundsGEP(SharedArgListAddress, Idx,
+ CGF.getPointerSize());
+ llvm::Value *PtrV;
+ if (V->getType()->isIntegerTy())
+ PtrV = Bld.CreateIntToPtr(V, CGF.VoidPtrTy);
+ else
+ PtrV = Bld.CreatePointerBitCastOrAddrSpaceCast(V, CGF.VoidPtrTy);
CGF.EmitStoreOfScalar(PtrV, Dst, /*Volatile=*/false,
- Ctx.getPointerType(Ctx.VoidPtrTy));
- Idx++;
+ Ctx.getPointerType(Ctx.VoidPtrTy));
+ ++Idx;
}
- } else {
- // TODO: Optimize runtime initialization and pass in correct value.
- llvm::Value *Args[] = {
- ID, llvm::ConstantPointerNull::get(CGF.VoidPtrPtrTy->getPointerTo(0)),
- /*nArgs=*/Bld.getInt32(0), /*RequiresOMPRuntime=*/Bld.getInt16(1)};
- CGF.EmitRuntimeCall(
- createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_kernel_prepare_parallel),
- Args);
}
// Activate workers. This barrier is used by the master to signal
@@ -999,96 +1876,332 @@ void CGOpenMPRuntimeNVPTX::emitGenericParallelCall(
// The master waits at this barrier until all workers are done.
syncCTAThreads(CGF);
+ if (!CapturedVars.empty())
+ CGF.EmitRuntimeCall(
+ createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_end_sharing_variables));
+
// Remember for post-processing in worker loop.
Work.emplace_back(WFn);
};
- auto *RTLoc = emitUpdateLocation(CGF, Loc);
- auto *ThreadID = getThreadID(CGF, Loc);
- llvm::Value *Args[] = {RTLoc, ThreadID};
-
- auto &&SeqGen = [this, Fn, &CapturedVars, &Args, Loc](CodeGenFunction &CGF,
- PrePostActionTy &) {
- auto &&CodeGen = [this, Fn, &CapturedVars, Loc](CodeGenFunction &CGF,
- PrePostActionTy &Action) {
- Action.Enter(CGF);
-
- llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
- OutlinedFnArgs.push_back(
- llvm::ConstantPointerNull::get(CGM.Int32Ty->getPointerTo()));
- OutlinedFnArgs.push_back(
- llvm::ConstantPointerNull::get(CGM.Int32Ty->getPointerTo()));
- OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end());
- emitOutlinedFunctionCall(CGF, Loc, Fn, OutlinedFnArgs);
- };
-
+ auto &&LNParallelGen = [this, Loc, &SeqGen, &L0ParallelGen, &CodeGen,
+ &ThreadIDAddr](CodeGenFunction &CGF,
+ PrePostActionTy &Action) {
RegionCodeGenTy RCG(CodeGen);
- NVPTXActionTy Action(
- createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_serialized_parallel),
- Args,
- createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_end_serialized_parallel),
- Args);
- RCG.setAction(Action);
- RCG(CGF);
+ if (IsInParallelRegion) {
+ SeqGen(CGF, Action);
+ } else if (IsInTargetMasterThreadRegion) {
+ L0ParallelGen(CGF, Action);
+ } else if (getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_NonSPMD) {
+ RCG(CGF);
+ } else {
+ // Check for master and then parallelism:
+ // if (__kmpc_is_spmd_exec_mode() || __kmpc_parallel_level(loc, gtid)) {
+ // Serialized execution.
+ // } else if (master) {
+ // Worker call.
+ // } else {
+ // Outlined function call.
+ // }
+ CGBuilderTy &Bld = CGF.Builder;
+ llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".exit");
+ llvm::BasicBlock *SeqBB = CGF.createBasicBlock(".sequential");
+ llvm::BasicBlock *ParallelCheckBB = CGF.createBasicBlock(".parcheck");
+ llvm::BasicBlock *MasterCheckBB = CGF.createBasicBlock(".mastercheck");
+ llvm::Value *IsSPMD = Bld.CreateIsNotNull(CGF.EmitNounwindRuntimeCall(
+ createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_is_spmd_exec_mode)));
+ Bld.CreateCondBr(IsSPMD, SeqBB, ParallelCheckBB);
+ // There is no need to emit line number for unconditional branch.
+ (void)ApplyDebugLocation::CreateEmpty(CGF);
+ CGF.EmitBlock(ParallelCheckBB);
+ llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
+ llvm::Value *ThreadID = getThreadID(CGF, Loc);
+ llvm::Value *PL = CGF.EmitRuntimeCall(
+ createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_parallel_level),
+ {RTLoc, ThreadID});
+ llvm::Value *Res = Bld.CreateIsNotNull(PL);
+ Bld.CreateCondBr(Res, SeqBB, MasterCheckBB);
+ CGF.EmitBlock(SeqBB);
+ SeqGen(CGF, Action);
+ CGF.EmitBranch(ExitBB);
+ // There is no need to emit line number for unconditional branch.
+ (void)ApplyDebugLocation::CreateEmpty(CGF);
+ CGF.EmitBlock(MasterCheckBB);
+ llvm::BasicBlock *MasterThenBB = CGF.createBasicBlock("master.then");
+ llvm::BasicBlock *ElseBlock = CGF.createBasicBlock("omp_if.else");
+ llvm::Value *IsMaster =
+ Bld.CreateICmpEQ(getNVPTXThreadID(CGF), getMasterThreadID(CGF));
+ Bld.CreateCondBr(IsMaster, MasterThenBB, ElseBlock);
+ CGF.EmitBlock(MasterThenBB);
+ L0ParallelGen(CGF, Action);
+ CGF.EmitBranch(ExitBB);
+ // There is no need to emit line number for unconditional branch.
+ (void)ApplyDebugLocation::CreateEmpty(CGF);
+ CGF.EmitBlock(ElseBlock);
+ // In the worker need to use the real thread id.
+ ThreadIDAddr = emitThreadIDAddress(CGF, Loc);
+ RCG(CGF);
+ // There is no need to emit line number for unconditional branch.
+ (void)ApplyDebugLocation::CreateEmpty(CGF);
+ // Emit the continuation block for code after the if.
+ CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
+ }
};
- if (IfCond)
- emitOMPIfClause(CGF, IfCond, L0ParallelGen, SeqGen);
- else {
+ if (IfCond) {
+ emitOMPIfClause(CGF, IfCond, LNParallelGen, SeqGen);
+ } else {
CodeGenFunction::RunCleanupsScope Scope(CGF);
- RegionCodeGenTy ThenRCG(L0ParallelGen);
+ RegionCodeGenTy ThenRCG(LNParallelGen);
ThenRCG(CGF);
}
}
-void CGOpenMPRuntimeNVPTX::emitSpmdParallelCall(
+void CGOpenMPRuntimeNVPTX::emitSPMDParallelCall(
CodeGenFunction &CGF, SourceLocation Loc, llvm::Value *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars, const Expr *IfCond) {
// Just call the outlined function to execute the parallel region.
// OutlinedFn(&GTid, &zero, CapturedStruct);
//
- // TODO: Do something with IfCond when support for the 'if' clause
- // is added on Spmd target directives.
llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
- OutlinedFnArgs.push_back(
- llvm::ConstantPointerNull::get(CGM.Int32Ty->getPointerTo()));
- OutlinedFnArgs.push_back(
- llvm::ConstantPointerNull::get(CGM.Int32Ty->getPointerTo()));
- OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end());
- emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, OutlinedFnArgs);
+
+ Address ZeroAddr = CGF.CreateMemTemp(CGF.getContext().getIntTypeForBitwidth(
+ /*DestWidth=*/32, /*Signed=*/1),
+ ".zero.addr");
+ CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
+ // ThreadId for serialized parallels is 0.
+ Address ThreadIDAddr = ZeroAddr;
+ auto &&CodeGen = [this, OutlinedFn, CapturedVars, Loc, ZeroAddr,
+ &ThreadIDAddr](CodeGenFunction &CGF,
+ PrePostActionTy &Action) {
+ Action.Enter(CGF);
+
+ llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
+ OutlinedFnArgs.push_back(ThreadIDAddr.getPointer());
+ OutlinedFnArgs.push_back(ZeroAddr.getPointer());
+ OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end());
+ emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, OutlinedFnArgs);
+ };
+ auto &&SeqGen = [this, &CodeGen, Loc](CodeGenFunction &CGF,
+ PrePostActionTy &) {
+
+ RegionCodeGenTy RCG(CodeGen);
+ llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
+ llvm::Value *ThreadID = getThreadID(CGF, Loc);
+ llvm::Value *Args[] = {RTLoc, ThreadID};
+
+ NVPTXActionTy Action(
+ createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_serialized_parallel),
+ Args,
+ createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_end_serialized_parallel),
+ Args);
+ RCG.setAction(Action);
+ RCG(CGF);
+ };
+
+ if (IsInTargetMasterThreadRegion) {
+ // In the worker need to use the real thread id.
+ ThreadIDAddr = emitThreadIDAddress(CGF, Loc);
+ RegionCodeGenTy RCG(CodeGen);
+ RCG(CGF);
+ } else {
+ // If we are not in the target region, it is definitely L2 parallelism or
+ // more, because for SPMD mode we always has L1 parallel level, sowe don't
+ // need to check for orphaned directives.
+ RegionCodeGenTy RCG(SeqGen);
+ RCG(CGF);
+ }
+}
+
+void CGOpenMPRuntimeNVPTX::emitCriticalRegion(
+ CodeGenFunction &CGF, StringRef CriticalName,
+ const RegionCodeGenTy &CriticalOpGen, SourceLocation Loc,
+ const Expr *Hint) {
+ llvm::BasicBlock *LoopBB = CGF.createBasicBlock("omp.critical.loop");
+ llvm::BasicBlock *TestBB = CGF.createBasicBlock("omp.critical.test");
+ llvm::BasicBlock *SyncBB = CGF.createBasicBlock("omp.critical.sync");
+ llvm::BasicBlock *BodyBB = CGF.createBasicBlock("omp.critical.body");
+ llvm::BasicBlock *ExitBB = CGF.createBasicBlock("omp.critical.exit");
+
+ // Fetch team-local id of the thread.
+ llvm::Value *ThreadID = getNVPTXThreadID(CGF);
+
+ // Get the width of the team.
+ llvm::Value *TeamWidth = getNVPTXNumThreads(CGF);
+
+ // Initialize the counter variable for the loop.
+ QualType Int32Ty =
+ CGF.getContext().getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/0);
+ Address Counter = CGF.CreateMemTemp(Int32Ty, "critical_counter");
+ LValue CounterLVal = CGF.MakeAddrLValue(Counter, Int32Ty);
+ CGF.EmitStoreOfScalar(llvm::Constant::getNullValue(CGM.Int32Ty), CounterLVal,
+ /*isInit=*/true);
+
+ // Block checks if loop counter exceeds upper bound.
+ CGF.EmitBlock(LoopBB);
+ llvm::Value *CounterVal = CGF.EmitLoadOfScalar(CounterLVal, Loc);
+ llvm::Value *CmpLoopBound = CGF.Builder.CreateICmpSLT(CounterVal, TeamWidth);
+ CGF.Builder.CreateCondBr(CmpLoopBound, TestBB, ExitBB);
+
+ // Block tests which single thread should execute region, and which threads
+ // should go straight to synchronisation point.
+ CGF.EmitBlock(TestBB);
+ CounterVal = CGF.EmitLoadOfScalar(CounterLVal, Loc);
+ llvm::Value *CmpThreadToCounter =
+ CGF.Builder.CreateICmpEQ(ThreadID, CounterVal);
+ CGF.Builder.CreateCondBr(CmpThreadToCounter, BodyBB, SyncBB);
+
+ // Block emits the body of the critical region.
+ CGF.EmitBlock(BodyBB);
+
+ // Output the critical statement.
+ CriticalOpGen(CGF);
+
+ // After the body surrounded by the critical region, the single executing
+ // thread will jump to the synchronisation point.
+ // Block waits for all threads in current team to finish then increments the
+ // counter variable and returns to the loop.
+ CGF.EmitBlock(SyncBB);
+ getNVPTXCTABarrier(CGF);
+
+ llvm::Value *IncCounterVal =
+ CGF.Builder.CreateNSWAdd(CounterVal, CGF.Builder.getInt32(1));
+ CGF.EmitStoreOfScalar(IncCounterVal, CounterLVal);
+ CGF.EmitBranch(LoopBB);
+
+ // Block that is reached when all threads in the team complete the region.
+ CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
+}
+
+/// Cast value to the specified type.
+static llvm::Value *castValueToType(CodeGenFunction &CGF, llvm::Value *Val,
+ QualType ValTy, QualType CastTy,
+ SourceLocation Loc) {
+ assert(!CGF.getContext().getTypeSizeInChars(CastTy).isZero() &&
+ "Cast type must sized.");
+ assert(!CGF.getContext().getTypeSizeInChars(ValTy).isZero() &&
+ "Val type must sized.");
+ llvm::Type *LLVMCastTy = CGF.ConvertTypeForMem(CastTy);
+ if (ValTy == CastTy)
+ return Val;
+ if (CGF.getContext().getTypeSizeInChars(ValTy) ==
+ CGF.getContext().getTypeSizeInChars(CastTy))
+ return CGF.Builder.CreateBitCast(Val, LLVMCastTy);
+ if (CastTy->isIntegerType() && ValTy->isIntegerType())
+ return CGF.Builder.CreateIntCast(Val, LLVMCastTy,
+ CastTy->hasSignedIntegerRepresentation());
+ Address CastItem = CGF.CreateMemTemp(CastTy);
+ Address ValCastItem = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ CastItem, Val->getType()->getPointerTo(CastItem.getAddressSpace()));
+ CGF.EmitStoreOfScalar(Val, ValCastItem, /*Volatile=*/false, ValTy);
+ return CGF.EmitLoadOfScalar(CastItem, /*Volatile=*/false, CastTy, Loc);
}
/// This function creates calls to one of two shuffle functions to copy
/// variables between lanes in a warp.
static llvm::Value *createRuntimeShuffleFunction(CodeGenFunction &CGF,
- QualType ElemTy,
llvm::Value *Elem,
- llvm::Value *Offset) {
- auto &CGM = CGF.CGM;
- auto &C = CGM.getContext();
- auto &Bld = CGF.Builder;
+ QualType ElemType,
+ llvm::Value *Offset,
+ SourceLocation Loc) {
+ CodeGenModule &CGM = CGF.CGM;
+ CGBuilderTy &Bld = CGF.Builder;
CGOpenMPRuntimeNVPTX &RT =
*(static_cast<CGOpenMPRuntimeNVPTX *>(&CGM.getOpenMPRuntime()));
- unsigned Size = CGM.getContext().getTypeSizeInChars(ElemTy).getQuantity();
- assert(Size <= 8 && "Unsupported bitwidth in shuffle instruction.");
+ CharUnits Size = CGF.getContext().getTypeSizeInChars(ElemType);
+ assert(Size.getQuantity() <= 8 &&
+ "Unsupported bitwidth in shuffle instruction.");
- OpenMPRTLFunctionNVPTX ShuffleFn = Size <= 4
+ OpenMPRTLFunctionNVPTX ShuffleFn = Size.getQuantity() <= 4
? OMPRTL_NVPTX__kmpc_shuffle_int32
: OMPRTL_NVPTX__kmpc_shuffle_int64;
// Cast all types to 32- or 64-bit values before calling shuffle routines.
- auto CastTy = Size <= 4 ? CGM.Int32Ty : CGM.Int64Ty;
- auto *ElemCast = Bld.CreateSExtOrBitCast(Elem, CastTy);
- auto *WarpSize = CGF.EmitScalarConversion(
- getNVPTXWarpSize(CGF), C.getIntTypeForBitwidth(32, /* Signed */ true),
- C.getIntTypeForBitwidth(16, /* Signed */ true), SourceLocation());
+ QualType CastTy = CGF.getContext().getIntTypeForBitwidth(
+ Size.getQuantity() <= 4 ? 32 : 64, /*Signed=*/1);
+ llvm::Value *ElemCast = castValueToType(CGF, Elem, ElemType, CastTy, Loc);
+ llvm::Value *WarpSize =
+ Bld.CreateIntCast(getNVPTXWarpSize(CGF), CGM.Int16Ty, /*isSigned=*/true);
- auto *ShuffledVal =
- CGF.EmitRuntimeCall(RT.createNVPTXRuntimeFunction(ShuffleFn),
- {ElemCast, Offset, WarpSize});
+ llvm::Value *ShuffledVal = CGF.EmitRuntimeCall(
+ RT.createNVPTXRuntimeFunction(ShuffleFn), {ElemCast, Offset, WarpSize});
- return Bld.CreateTruncOrBitCast(ShuffledVal, CGF.ConvertTypeForMem(ElemTy));
+ return castValueToType(CGF, ShuffledVal, CastTy, ElemType, Loc);
+}
+
+static void shuffleAndStore(CodeGenFunction &CGF, Address SrcAddr,
+ Address DestAddr, QualType ElemType,
+ llvm::Value *Offset, SourceLocation Loc) {
+ CGBuilderTy &Bld = CGF.Builder;
+
+ CharUnits Size = CGF.getContext().getTypeSizeInChars(ElemType);
+ // Create the loop over the big sized data.
+ // ptr = (void*)Elem;
+ // ptrEnd = (void*) Elem + 1;
+ // Step = 8;
+ // while (ptr + Step < ptrEnd)
+ // shuffle((int64_t)*ptr);
+ // Step = 4;
+ // while (ptr + Step < ptrEnd)
+ // shuffle((int32_t)*ptr);
+ // ...
+ Address ElemPtr = DestAddr;
+ Address Ptr = SrcAddr;
+ Address PtrEnd = Bld.CreatePointerBitCastOrAddrSpaceCast(
+ Bld.CreateConstGEP(SrcAddr, 1, Size), CGF.VoidPtrTy);
+ for (int IntSize = 8; IntSize >= 1; IntSize /= 2) {
+ if (Size < CharUnits::fromQuantity(IntSize))
+ continue;
+ QualType IntType = CGF.getContext().getIntTypeForBitwidth(
+ CGF.getContext().toBits(CharUnits::fromQuantity(IntSize)),
+ /*Signed=*/1);
+ llvm::Type *IntTy = CGF.ConvertTypeForMem(IntType);
+ Ptr = Bld.CreatePointerBitCastOrAddrSpaceCast(Ptr, IntTy->getPointerTo());
+ ElemPtr =
+ Bld.CreatePointerBitCastOrAddrSpaceCast(ElemPtr, IntTy->getPointerTo());
+ if (Size.getQuantity() / IntSize > 1) {
+ llvm::BasicBlock *PreCondBB = CGF.createBasicBlock(".shuffle.pre_cond");
+ llvm::BasicBlock *ThenBB = CGF.createBasicBlock(".shuffle.then");
+ llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".shuffle.exit");
+ llvm::BasicBlock *CurrentBB = Bld.GetInsertBlock();
+ CGF.EmitBlock(PreCondBB);
+ llvm::PHINode *PhiSrc =
+ Bld.CreatePHI(Ptr.getType(), /*NumReservedValues=*/2);
+ PhiSrc->addIncoming(Ptr.getPointer(), CurrentBB);
+ llvm::PHINode *PhiDest =
+ Bld.CreatePHI(ElemPtr.getType(), /*NumReservedValues=*/2);
+ PhiDest->addIncoming(ElemPtr.getPointer(), CurrentBB);
+ Ptr = Address(PhiSrc, Ptr.getAlignment());
+ ElemPtr = Address(PhiDest, ElemPtr.getAlignment());
+ llvm::Value *PtrDiff = Bld.CreatePtrDiff(
+ PtrEnd.getPointer(), Bld.CreatePointerBitCastOrAddrSpaceCast(
+ Ptr.getPointer(), CGF.VoidPtrTy));
+ Bld.CreateCondBr(Bld.CreateICmpSGT(PtrDiff, Bld.getInt64(IntSize - 1)),
+ ThenBB, ExitBB);
+ CGF.EmitBlock(ThenBB);
+ llvm::Value *Res = createRuntimeShuffleFunction(
+ CGF, CGF.EmitLoadOfScalar(Ptr, /*Volatile=*/false, IntType, Loc),
+ IntType, Offset, Loc);
+ CGF.EmitStoreOfScalar(Res, ElemPtr, /*Volatile=*/false, IntType);
+ Ptr = Bld.CreateConstGEP(Ptr, 1, CharUnits::fromQuantity(IntSize));
+ ElemPtr =
+ Bld.CreateConstGEP(ElemPtr, 1, CharUnits::fromQuantity(IntSize));
+ PhiSrc->addIncoming(Ptr.getPointer(), ThenBB);
+ PhiDest->addIncoming(ElemPtr.getPointer(), ThenBB);
+ CGF.EmitBranch(PreCondBB);
+ CGF.EmitBlock(ExitBB);
+ } else {
+ llvm::Value *Res = createRuntimeShuffleFunction(
+ CGF, CGF.EmitLoadOfScalar(Ptr, /*Volatile=*/false, IntType, Loc),
+ IntType, Offset, Loc);
+ CGF.EmitStoreOfScalar(Res, ElemPtr, /*Volatile=*/false, IntType);
+ Ptr = Bld.CreateConstGEP(Ptr, 1, CharUnits::fromQuantity(IntSize));
+ ElemPtr =
+ Bld.CreateConstGEP(ElemPtr, 1, CharUnits::fromQuantity(IntSize));
+ }
+ Size = Size % IntSize;
+ }
}
namespace {
@@ -1119,19 +2232,19 @@ static void emitReductionListCopy(
ArrayRef<const Expr *> Privates, Address SrcBase, Address DestBase,
CopyOptionsTy CopyOptions = {nullptr, nullptr, nullptr}) {
- auto &CGM = CGF.CGM;
- auto &C = CGM.getContext();
- auto &Bld = CGF.Builder;
+ CodeGenModule &CGM = CGF.CGM;
+ ASTContext &C = CGM.getContext();
+ CGBuilderTy &Bld = CGF.Builder;
- auto *RemoteLaneOffset = CopyOptions.RemoteLaneOffset;
- auto *ScratchpadIndex = CopyOptions.ScratchpadIndex;
- auto *ScratchpadWidth = CopyOptions.ScratchpadWidth;
+ llvm::Value *RemoteLaneOffset = CopyOptions.RemoteLaneOffset;
+ llvm::Value *ScratchpadIndex = CopyOptions.ScratchpadIndex;
+ llvm::Value *ScratchpadWidth = CopyOptions.ScratchpadWidth;
// Iterates, element-by-element, through the source Reduce list and
// make a copy.
unsigned Idx = 0;
unsigned Size = Privates.size();
- for (auto &Private : Privates) {
+ for (const Expr *Private : Privates) {
Address SrcElementAddr = Address::invalid();
Address DestElementAddr = Address::invalid();
Address DestElementPtrAddr = Address::invalid();
@@ -1150,10 +2263,9 @@ static void emitReductionListCopy(
// Step 1.1: Get the address for the src element in the Reduce list.
Address SrcElementPtrAddr =
Bld.CreateConstArrayGEP(SrcBase, Idx, CGF.getPointerSize());
- llvm::Value *SrcElementPtrPtr = CGF.EmitLoadOfScalar(
- SrcElementPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
- SrcElementAddr =
- Address(SrcElementPtrPtr, C.getTypeAlignInChars(Private->getType()));
+ SrcElementAddr = CGF.EmitLoadOfPointer(
+ SrcElementPtrAddr,
+ C.getPointerType(Private->getType())->castAs<PointerType>());
// Step 1.2: Create a temporary to store the element in the destination
// Reduce list.
@@ -1169,62 +2281,49 @@ static void emitReductionListCopy(
// Step 1.1: Get the address for the src element in the Reduce list.
Address SrcElementPtrAddr =
Bld.CreateConstArrayGEP(SrcBase, Idx, CGF.getPointerSize());
- llvm::Value *SrcElementPtrPtr = CGF.EmitLoadOfScalar(
- SrcElementPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
- SrcElementAddr =
- Address(SrcElementPtrPtr, C.getTypeAlignInChars(Private->getType()));
+ SrcElementAddr = CGF.EmitLoadOfPointer(
+ SrcElementPtrAddr,
+ C.getPointerType(Private->getType())->castAs<PointerType>());
// Step 1.2: Get the address for dest element. The destination
// element has already been created on the thread's stack.
DestElementPtrAddr =
Bld.CreateConstArrayGEP(DestBase, Idx, CGF.getPointerSize());
- llvm::Value *DestElementPtr =
- CGF.EmitLoadOfScalar(DestElementPtrAddr, /*Volatile=*/false,
- C.VoidPtrTy, SourceLocation());
- Address DestElemAddr =
- Address(DestElementPtr, C.getTypeAlignInChars(Private->getType()));
- DestElementAddr = Bld.CreateElementBitCast(
- DestElemAddr, CGF.ConvertTypeForMem(Private->getType()));
+ DestElementAddr = CGF.EmitLoadOfPointer(
+ DestElementPtrAddr,
+ C.getPointerType(Private->getType())->castAs<PointerType>());
break;
}
case ThreadToScratchpad: {
// Step 1.1: Get the address for the src element in the Reduce list.
Address SrcElementPtrAddr =
Bld.CreateConstArrayGEP(SrcBase, Idx, CGF.getPointerSize());
- llvm::Value *SrcElementPtrPtr = CGF.EmitLoadOfScalar(
- SrcElementPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
- SrcElementAddr =
- Address(SrcElementPtrPtr, C.getTypeAlignInChars(Private->getType()));
+ SrcElementAddr = CGF.EmitLoadOfPointer(
+ SrcElementPtrAddr,
+ C.getPointerType(Private->getType())->castAs<PointerType>());
// Step 1.2: Get the address for dest element:
// address = base + index * ElementSizeInChars.
- unsigned ElementSizeInChars =
- C.getTypeSizeInChars(Private->getType()).getQuantity();
- auto *CurrentOffset =
- Bld.CreateMul(llvm::ConstantInt::get(CGM.SizeTy, ElementSizeInChars),
- ScratchpadIndex);
- auto *ScratchPadElemAbsolutePtrVal =
- Bld.CreateAdd(DestBase.getPointer(), CurrentOffset);
+ llvm::Value *ElementSizeInChars = CGF.getTypeSize(Private->getType());
+ llvm::Value *CurrentOffset =
+ Bld.CreateNUWMul(ElementSizeInChars, ScratchpadIndex);
+ llvm::Value *ScratchPadElemAbsolutePtrVal =
+ Bld.CreateNUWAdd(DestBase.getPointer(), CurrentOffset);
ScratchPadElemAbsolutePtrVal =
Bld.CreateIntToPtr(ScratchPadElemAbsolutePtrVal, CGF.VoidPtrTy);
- Address ScratchpadPtr =
- Address(ScratchPadElemAbsolutePtrVal,
- C.getTypeAlignInChars(Private->getType()));
- DestElementAddr = Bld.CreateElementBitCast(
- ScratchpadPtr, CGF.ConvertTypeForMem(Private->getType()));
+ DestElementAddr = Address(ScratchPadElemAbsolutePtrVal,
+ C.getTypeAlignInChars(Private->getType()));
IncrScratchpadDest = true;
break;
}
case ScratchpadToThread: {
// Step 1.1: Get the address for the src element in the scratchpad.
// address = base + index * ElementSizeInChars.
- unsigned ElementSizeInChars =
- C.getTypeSizeInChars(Private->getType()).getQuantity();
- auto *CurrentOffset =
- Bld.CreateMul(llvm::ConstantInt::get(CGM.SizeTy, ElementSizeInChars),
- ScratchpadIndex);
- auto *ScratchPadElemAbsolutePtrVal =
- Bld.CreateAdd(SrcBase.getPointer(), CurrentOffset);
+ llvm::Value *ElementSizeInChars = CGF.getTypeSize(Private->getType());
+ llvm::Value *CurrentOffset =
+ Bld.CreateNUWMul(ElementSizeInChars, ScratchpadIndex);
+ llvm::Value *ScratchPadElemAbsolutePtrVal =
+ Bld.CreateNUWAdd(SrcBase.getPointer(), CurrentOffset);
ScratchPadElemAbsolutePtrVal =
Bld.CreateIntToPtr(ScratchPadElemAbsolutePtrVal, CGF.VoidPtrTy);
SrcElementAddr = Address(ScratchPadElemAbsolutePtrVal,
@@ -1246,21 +2345,30 @@ static void emitReductionListCopy(
// element as this is required in all directions
SrcElementAddr = Bld.CreateElementBitCast(
SrcElementAddr, CGF.ConvertTypeForMem(Private->getType()));
- llvm::Value *Elem =
- CGF.EmitLoadOfScalar(SrcElementAddr, /*Volatile=*/false,
- Private->getType(), SourceLocation());
+ DestElementAddr = Bld.CreateElementBitCast(DestElementAddr,
+ SrcElementAddr.getElementType());
// Now that all active lanes have read the element in the
// Reduce list, shuffle over the value from the remote lane.
if (ShuffleInElement) {
- Elem = createRuntimeShuffleFunction(CGF, Private->getType(), Elem,
- RemoteLaneOffset);
+ shuffleAndStore(CGF, SrcElementAddr, DestElementAddr, Private->getType(),
+ RemoteLaneOffset, Private->getExprLoc());
+ } else {
+ if (Private->getType()->isScalarType()) {
+ llvm::Value *Elem =
+ CGF.EmitLoadOfScalar(SrcElementAddr, /*Volatile=*/false,
+ Private->getType(), Private->getExprLoc());
+ // Store the source element value to the dest element address.
+ CGF.EmitStoreOfScalar(Elem, DestElementAddr, /*Volatile=*/false,
+ Private->getType());
+ } else {
+ CGF.EmitAggregateCopy(
+ CGF.MakeAddrLValue(DestElementAddr, Private->getType()),
+ CGF.MakeAddrLValue(SrcElementAddr, Private->getType()),
+ Private->getType(), AggValueSlot::DoesNotOverlap);
+ }
}
- // Store the source element value to the dest element address.
- CGF.EmitStoreOfScalar(Elem, DestElementAddr, /*Volatile=*/false,
- Private->getType());
-
// Step 3.1: Modify reference in dest Reduce list as needed.
// Modifying the reference in Reduce list to point to the newly
// created element. The element is live in the current function
@@ -1279,22 +2387,20 @@ static void emitReductionListCopy(
if ((IncrScratchpadDest || IncrScratchpadSrc) && (Idx + 1 < Size)) {
llvm::Value *ScratchpadBasePtr =
IncrScratchpadDest ? DestBase.getPointer() : SrcBase.getPointer();
- unsigned ElementSizeInChars =
- C.getTypeSizeInChars(Private->getType()).getQuantity();
- ScratchpadBasePtr = Bld.CreateAdd(
+ llvm::Value *ElementSizeInChars = CGF.getTypeSize(Private->getType());
+ ScratchpadBasePtr = Bld.CreateNUWAdd(
ScratchpadBasePtr,
- Bld.CreateMul(ScratchpadWidth, llvm::ConstantInt::get(
- CGM.SizeTy, ElementSizeInChars)));
+ Bld.CreateNUWMul(ScratchpadWidth, ElementSizeInChars));
// Take care of global memory alignment for performance
- ScratchpadBasePtr = Bld.CreateSub(ScratchpadBasePtr,
- llvm::ConstantInt::get(CGM.SizeTy, 1));
- ScratchpadBasePtr = Bld.CreateSDiv(
+ ScratchpadBasePtr = Bld.CreateNUWSub(
+ ScratchpadBasePtr, llvm::ConstantInt::get(CGM.SizeTy, 1));
+ ScratchpadBasePtr = Bld.CreateUDiv(
ScratchpadBasePtr,
llvm::ConstantInt::get(CGM.SizeTy, GlobalMemoryAlignment));
- ScratchpadBasePtr = Bld.CreateAdd(ScratchpadBasePtr,
- llvm::ConstantInt::get(CGM.SizeTy, 1));
- ScratchpadBasePtr = Bld.CreateMul(
+ ScratchpadBasePtr = Bld.CreateNUWAdd(
+ ScratchpadBasePtr, llvm::ConstantInt::get(CGM.SizeTy, 1));
+ ScratchpadBasePtr = Bld.CreateNUWMul(
ScratchpadBasePtr,
llvm::ConstantInt::get(CGM.SizeTy, GlobalMemoryAlignment));
@@ -1304,7 +2410,7 @@ static void emitReductionListCopy(
SrcBase = Address(ScratchpadBasePtr, CGF.getPointerAlign());
}
- Idx++;
+ ++Idx;
}
}
@@ -1319,27 +2425,31 @@ static void emitReductionListCopy(
/// local = local @ remote
/// else
/// local = remote
-static llvm::Value *
-emitReduceScratchpadFunction(CodeGenModule &CGM,
- ArrayRef<const Expr *> Privates,
- QualType ReductionArrayTy, llvm::Value *ReduceFn) {
- auto &C = CGM.getContext();
- auto Int32Ty = C.getIntTypeForBitwidth(32, /* Signed */ true);
+static llvm::Value *emitReduceScratchpadFunction(
+ CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
+ QualType ReductionArrayTy, llvm::Value *ReduceFn, SourceLocation Loc) {
+ ASTContext &C = CGM.getContext();
+ QualType Int32Ty = C.getIntTypeForBitwidth(32, /*Signed=*/1);
// Destination of the copy.
- ImplicitParamDecl ReduceListArg(C, C.VoidPtrTy, ImplicitParamDecl::Other);
+ ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
+ C.VoidPtrTy, ImplicitParamDecl::Other);
// Base address of the scratchpad array, with each element storing a
// Reduce list per team.
- ImplicitParamDecl ScratchPadArg(C, C.VoidPtrTy, ImplicitParamDecl::Other);
+ ImplicitParamDecl ScratchPadArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
+ C.VoidPtrTy, ImplicitParamDecl::Other);
// A source index into the scratchpad array.
- ImplicitParamDecl IndexArg(C, Int32Ty, ImplicitParamDecl::Other);
+ ImplicitParamDecl IndexArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, Int32Ty,
+ ImplicitParamDecl::Other);
// Row width of an element in the scratchpad array, typically
// the number of teams.
- ImplicitParamDecl WidthArg(C, Int32Ty, ImplicitParamDecl::Other);
+ ImplicitParamDecl WidthArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, Int32Ty,
+ ImplicitParamDecl::Other);
// If should_reduce == 1, then it's load AND reduce,
// If should_reduce == 0 (or otherwise), then it only loads (+ copy).
// The latter case is used for initialization.
- ImplicitParamDecl ShouldReduceArg(C, Int32Ty, ImplicitParamDecl::Other);
+ ImplicitParamDecl ShouldReduceArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
+ Int32Ty, ImplicitParamDecl::Other);
FunctionArgList Args;
Args.push_back(&ReduceListArg);
@@ -1348,47 +2458,44 @@ emitReduceScratchpadFunction(CodeGenModule &CGM,
Args.push_back(&WidthArg);
Args.push_back(&ShouldReduceArg);
- auto &CGFI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
+ const CGFunctionInfo &CGFI =
+ CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
auto *Fn = llvm::Function::Create(
CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
"_omp_reduction_load_and_reduce", &CGM.getModule());
- CGM.SetInternalFunctionAttributes(/*DC=*/nullptr, Fn, CGFI);
+ CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
+ Fn->setDoesNotRecurse();
CodeGenFunction CGF(CGM);
- // We don't need debug information in this function as nothing here refers to
- // user code.
- CGF.disableDebugInfo();
- CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args);
+ CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
- auto &Bld = CGF.Builder;
+ CGBuilderTy &Bld = CGF.Builder;
// Get local Reduce list pointer.
Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
Address ReduceListAddr(
Bld.CreatePointerBitCastOrAddrSpaceCast(
CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
- C.VoidPtrTy, SourceLocation()),
+ C.VoidPtrTy, Loc),
CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()),
CGF.getPointerAlign());
Address AddrScratchPadArg = CGF.GetAddrOfLocalVar(&ScratchPadArg);
llvm::Value *ScratchPadBase = CGF.EmitLoadOfScalar(
- AddrScratchPadArg, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
+ AddrScratchPadArg, /*Volatile=*/false, C.VoidPtrTy, Loc);
Address AddrIndexArg = CGF.GetAddrOfLocalVar(&IndexArg);
- llvm::Value *IndexVal =
- Bld.CreateIntCast(CGF.EmitLoadOfScalar(AddrIndexArg, /*Volatile=*/false,
- Int32Ty, SourceLocation()),
- CGM.SizeTy, /*isSigned=*/true);
+ llvm::Value *IndexVal = Bld.CreateIntCast(
+ CGF.EmitLoadOfScalar(AddrIndexArg, /*Volatile=*/false, Int32Ty, Loc),
+ CGM.SizeTy, /*isSigned=*/true);
Address AddrWidthArg = CGF.GetAddrOfLocalVar(&WidthArg);
- llvm::Value *WidthVal =
- Bld.CreateIntCast(CGF.EmitLoadOfScalar(AddrWidthArg, /*Volatile=*/false,
- Int32Ty, SourceLocation()),
- CGM.SizeTy, /*isSigned=*/true);
+ llvm::Value *WidthVal = Bld.CreateIntCast(
+ CGF.EmitLoadOfScalar(AddrWidthArg, /*Volatile=*/false, Int32Ty, Loc),
+ CGM.SizeTy, /*isSigned=*/true);
Address AddrShouldReduceArg = CGF.GetAddrOfLocalVar(&ShouldReduceArg);
llvm::Value *ShouldReduceVal = CGF.EmitLoadOfScalar(
- AddrShouldReduceArg, /*Volatile=*/false, Int32Ty, SourceLocation());
+ AddrShouldReduceArg, /*Volatile=*/false, Int32Ty, Loc);
// The absolute ptr address to the base addr of the next element to copy.
llvm::Value *CumulativeElemBasePtr =
@@ -1411,7 +2518,7 @@ emitReduceScratchpadFunction(CodeGenModule &CGM,
llvm::BasicBlock *ElseBB = CGF.createBasicBlock("else");
llvm::BasicBlock *MergeBB = CGF.createBasicBlock("ifcont");
- auto CondReduce = Bld.CreateICmpEQ(ShouldReduceVal, Bld.getInt32(1));
+ llvm::Value *CondReduce = Bld.CreateIsNotNull(ShouldReduceVal);
Bld.CreateCondBr(CondReduce, ThenBB, ElseBB);
CGF.EmitBlock(ThenBB);
@@ -1421,7 +2528,8 @@ emitReduceScratchpadFunction(CodeGenModule &CGM,
ReduceListAddr.getPointer(), CGF.VoidPtrTy);
llvm::Value *RemoteDataPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
RemoteReduceList.getPointer(), CGF.VoidPtrTy);
- CGF.EmitCallOrInvoke(ReduceFn, {LocalDataPtr, RemoteDataPtr});
+ CGM.getOpenMPRuntime().emitOutlinedFunctionCall(
+ CGF, Loc, ReduceFn, {LocalDataPtr, RemoteDataPtr});
Bld.CreateBr(MergeBB);
CGF.EmitBlock(ElseBB);
@@ -1445,22 +2553,27 @@ emitReduceScratchpadFunction(CodeGenModule &CGM,
///
static llvm::Value *emitCopyToScratchpad(CodeGenModule &CGM,
ArrayRef<const Expr *> Privates,
- QualType ReductionArrayTy) {
+ QualType ReductionArrayTy,
+ SourceLocation Loc) {
- auto &C = CGM.getContext();
- auto Int32Ty = C.getIntTypeForBitwidth(32, /* Signed */ true);
+ ASTContext &C = CGM.getContext();
+ QualType Int32Ty = C.getIntTypeForBitwidth(32, /*Signed=*/1);
// Source of the copy.
- ImplicitParamDecl ReduceListArg(C, C.VoidPtrTy, ImplicitParamDecl::Other);
+ ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
+ C.VoidPtrTy, ImplicitParamDecl::Other);
// Base address of the scratchpad array, with each element storing a
// Reduce list per team.
- ImplicitParamDecl ScratchPadArg(C, C.VoidPtrTy, ImplicitParamDecl::Other);
+ ImplicitParamDecl ScratchPadArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
+ C.VoidPtrTy, ImplicitParamDecl::Other);
// A destination index into the scratchpad array, typically the team
// identifier.
- ImplicitParamDecl IndexArg(C, Int32Ty, ImplicitParamDecl::Other);
+ ImplicitParamDecl IndexArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, Int32Ty,
+ ImplicitParamDecl::Other);
// Row width of an element in the scratchpad array, typically
// the number of teams.
- ImplicitParamDecl WidthArg(C, Int32Ty, ImplicitParamDecl::Other);
+ ImplicitParamDecl WidthArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, Int32Ty,
+ ImplicitParamDecl::Other);
FunctionArgList Args;
Args.push_back(&ReduceListArg);
@@ -1468,36 +2581,34 @@ static llvm::Value *emitCopyToScratchpad(CodeGenModule &CGM,
Args.push_back(&IndexArg);
Args.push_back(&WidthArg);
- auto &CGFI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
+ const CGFunctionInfo &CGFI =
+ CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
auto *Fn = llvm::Function::Create(
CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
"_omp_reduction_copy_to_scratchpad", &CGM.getModule());
- CGM.SetInternalFunctionAttributes(/*DC=*/nullptr, Fn, CGFI);
+ CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
+ Fn->setDoesNotRecurse();
CodeGenFunction CGF(CGM);
- // We don't need debug information in this function as nothing here refers to
- // user code.
- CGF.disableDebugInfo();
- CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args);
+ CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
- auto &Bld = CGF.Builder;
+ CGBuilderTy &Bld = CGF.Builder;
Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
Address SrcDataAddr(
Bld.CreatePointerBitCastOrAddrSpaceCast(
CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
- C.VoidPtrTy, SourceLocation()),
+ C.VoidPtrTy, Loc),
CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()),
CGF.getPointerAlign());
Address AddrScratchPadArg = CGF.GetAddrOfLocalVar(&ScratchPadArg);
llvm::Value *ScratchPadBase = CGF.EmitLoadOfScalar(
- AddrScratchPadArg, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
+ AddrScratchPadArg, /*Volatile=*/false, C.VoidPtrTy, Loc);
Address AddrIndexArg = CGF.GetAddrOfLocalVar(&IndexArg);
- llvm::Value *IndexVal =
- Bld.CreateIntCast(CGF.EmitLoadOfScalar(AddrIndexArg, /*Volatile=*/false,
- Int32Ty, SourceLocation()),
- CGF.SizeTy, /*isSigned=*/true);
+ llvm::Value *IndexVal = Bld.CreateIntCast(
+ CGF.EmitLoadOfScalar(AddrIndexArg, /*Volatile=*/false, Int32Ty, Loc),
+ CGF.SizeTy, /*isSigned=*/true);
Address AddrWidthArg = CGF.GetAddrOfLocalVar(&WidthArg);
llvm::Value *WidthVal =
@@ -1534,35 +2645,36 @@ static llvm::Value *emitCopyToScratchpad(CodeGenModule &CGM,
/// sync
static llvm::Value *emitInterWarpCopyFunction(CodeGenModule &CGM,
ArrayRef<const Expr *> Privates,
- QualType ReductionArrayTy) {
- auto &C = CGM.getContext();
- auto &M = CGM.getModule();
+ QualType ReductionArrayTy,
+ SourceLocation Loc) {
+ ASTContext &C = CGM.getContext();
+ llvm::Module &M = CGM.getModule();
// ReduceList: thread local Reduce list.
// At the stage of the computation when this function is called, partially
// aggregated values reside in the first lane of every active warp.
- ImplicitParamDecl ReduceListArg(C, C.VoidPtrTy, ImplicitParamDecl::Other);
+ ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
+ C.VoidPtrTy, ImplicitParamDecl::Other);
// NumWarps: number of warps active in the parallel region. This could
// be smaller than 32 (max warps in a CTA) for partial block reduction.
- ImplicitParamDecl NumWarpsArg(C,
+ ImplicitParamDecl NumWarpsArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
C.getIntTypeForBitwidth(32, /* Signed */ true),
ImplicitParamDecl::Other);
FunctionArgList Args;
Args.push_back(&ReduceListArg);
Args.push_back(&NumWarpsArg);
- auto &CGFI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
+ const CGFunctionInfo &CGFI =
+ CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
auto *Fn = llvm::Function::Create(
CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
"_omp_reduction_inter_warp_copy_func", &CGM.getModule());
- CGM.SetInternalFunctionAttributes(/*DC=*/nullptr, Fn, CGFI);
+ CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
+ Fn->setDoesNotRecurse();
CodeGenFunction CGF(CGM);
- // We don't need debug information in this function as nothing here refers to
- // user code.
- CGF.disableDebugInfo();
- CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args);
+ CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
- auto &Bld = CGF.Builder;
+ CGBuilderTy &Bld = CGF.Builder;
// This array is used as a medium to transfer, one reduce element at a time,
// the data from the first lane of every warp to lanes in the first warp
@@ -1571,7 +2683,7 @@ static llvm::Value *emitInterWarpCopyFunction(CodeGenModule &CGM,
// for reduced latency, as well as to have a distinct copy for concurrently
// executing target regions. The array is declared with common linkage so
// as to be shared across compilation units.
- const char *TransferMediumName =
+ StringRef TransferMediumName =
"__openmp_nvptx_data_transfer_temporary_storage";
llvm::GlobalVariable *TransferMedium =
M.getGlobalVariable(TransferMediumName);
@@ -1584,14 +2696,15 @@ static llvm::Value *emitInterWarpCopyFunction(CodeGenModule &CGM,
llvm::Constant::getNullValue(Ty), TransferMediumName,
/*InsertBefore=*/nullptr, llvm::GlobalVariable::NotThreadLocal,
SharedAddressSpace);
+ CGM.addCompilerUsedGlobal(TransferMedium);
}
// Get the CUDA thread id of the current OpenMP thread on the GPU.
- auto *ThreadID = getNVPTXThreadID(CGF);
+ llvm::Value *ThreadID = getNVPTXThreadID(CGF);
// nvptx_lane_id = nvptx_id % warpsize
- auto *LaneID = getNVPTXLaneID(CGF);
+ llvm::Value *LaneID = getNVPTXLaneID(CGF);
// nvptx_warp_id = nvptx_id / warpsize
- auto *WarpID = getNVPTXWarpID(CGF);
+ llvm::Value *WarpID = getNVPTXWarpID(CGF);
Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
Address LocalReduceList(
@@ -1602,7 +2715,7 @@ static llvm::Value *emitInterWarpCopyFunction(CodeGenModule &CGM,
CGF.getPointerAlign());
unsigned Idx = 0;
- for (auto &Private : Privates) {
+ for (const Expr *Private : Privates) {
//
// Warp master copies reduce element to transfer medium in __shared__
// memory.
@@ -1612,8 +2725,7 @@ static llvm::Value *emitInterWarpCopyFunction(CodeGenModule &CGM,
llvm::BasicBlock *MergeBB = CGF.createBasicBlock("ifcont");
// if (lane_id == 0)
- auto IsWarpMaster =
- Bld.CreateICmpEQ(LaneID, Bld.getInt32(0), "warp_master");
+ llvm::Value *IsWarpMaster = Bld.CreateIsNull(LaneID, "warp_master");
Bld.CreateCondBr(IsWarpMaster, ThenBB, ElseBB);
CGF.EmitBlock(ThenBB);
@@ -1627,9 +2739,6 @@ static llvm::Value *emitInterWarpCopyFunction(CodeGenModule &CGM,
Address(ElemPtrPtr, C.getTypeAlignInChars(Private->getType()));
ElemPtr = Bld.CreateElementBitCast(
ElemPtr, CGF.ConvertTypeForMem(Private->getType()));
- // elem = *elemptr
- llvm::Value *Elem = CGF.EmitLoadOfScalar(
- ElemPtr, /*Volatile=*/false, Private->getType(), SourceLocation());
// Get pointer to location in transfer medium.
// MediumPtr = &medium[warp_id]
@@ -1641,8 +2750,19 @@ static llvm::Value *emitInterWarpCopyFunction(CodeGenModule &CGM,
MediumPtr = Bld.CreateElementBitCast(
MediumPtr, CGF.ConvertTypeForMem(Private->getType()));
+ // elem = *elemptr
//*MediumPtr = elem
- Bld.CreateStore(Elem, MediumPtr);
+ if (Private->getType()->isScalarType()) {
+ llvm::Value *Elem = CGF.EmitLoadOfScalar(ElemPtr, /*Volatile=*/false,
+ Private->getType(), Loc);
+ // Store the source element value to the dest element address.
+ CGF.EmitStoreOfScalar(Elem, MediumPtr, /*Volatile=*/false,
+ Private->getType());
+ } else {
+ CGF.EmitAggregateCopy(CGF.MakeAddrLValue(ElemPtr, Private->getType()),
+ CGF.MakeAddrLValue(MediumPtr, Private->getType()),
+ Private->getType(), AggValueSlot::DoesNotOverlap);
+ }
Bld.CreateBr(MergeBB);
@@ -1655,7 +2775,7 @@ static llvm::Value *emitInterWarpCopyFunction(CodeGenModule &CGM,
llvm::Value *NumWarpsVal = CGF.EmitLoadOfScalar(
AddrNumWarpsArg, /*Volatile=*/false, C.IntTy, SourceLocation());
- auto *NumActiveThreads = Bld.CreateNSWMul(
+ llvm::Value *NumActiveThreads = Bld.CreateNSWMul(
NumWarpsVal, getNVPTXWarpSize(CGF), "num_active_threads");
// named_barrier_sync(ParallelBarrierID, num_active_threads)
syncParallelThreads(CGF, NumActiveThreads);
@@ -1668,7 +2788,7 @@ static llvm::Value *emitInterWarpCopyFunction(CodeGenModule &CGM,
llvm::BasicBlock *W0MergeBB = CGF.createBasicBlock("ifcont");
// Up to 32 threads in warp 0 are active.
- auto IsActiveThread =
+ llvm::Value *IsActiveThread =
Bld.CreateICmpULT(ThreadID, NumWarpsVal, "is_active_thread");
Bld.CreateCondBr(IsActiveThread, W0ThenBB, W0ElseBB);
@@ -1682,8 +2802,6 @@ static llvm::Value *emitInterWarpCopyFunction(CodeGenModule &CGM,
// SrcMediumVal = *SrcMediumPtr;
SrcMediumPtr = Bld.CreateElementBitCast(
SrcMediumPtr, CGF.ConvertTypeForMem(Private->getType()));
- llvm::Value *SrcMediumValue = CGF.EmitLoadOfScalar(
- SrcMediumPtr, /*Volatile=*/false, Private->getType(), SourceLocation());
// TargetElemPtr = (type[i]*)(SrcDataAddr[i])
Address TargetElemPtrPtr =
@@ -1696,8 +2814,17 @@ static llvm::Value *emitInterWarpCopyFunction(CodeGenModule &CGM,
TargetElemPtr, CGF.ConvertTypeForMem(Private->getType()));
// *TargetElemPtr = SrcMediumVal;
- CGF.EmitStoreOfScalar(SrcMediumValue, TargetElemPtr, /*Volatile=*/false,
- Private->getType());
+ if (Private->getType()->isScalarType()) {
+ llvm::Value *SrcMediumValue = CGF.EmitLoadOfScalar(
+ SrcMediumPtr, /*Volatile=*/false, Private->getType(), Loc);
+ CGF.EmitStoreOfScalar(SrcMediumValue, TargetElemPtr, /*Volatile=*/false,
+ Private->getType());
+ } else {
+ CGF.EmitAggregateCopy(
+ CGF.MakeAddrLValue(SrcMediumPtr, Private->getType()),
+ CGF.MakeAddrLValue(TargetElemPtr, Private->getType()),
+ Private->getType(), AggValueSlot::DoesNotOverlap);
+ }
Bld.CreateBr(W0MergeBB);
CGF.EmitBlock(W0ElseBB);
@@ -1708,7 +2835,7 @@ static llvm::Value *emitInterWarpCopyFunction(CodeGenModule &CGM,
// While warp 0 copies values from transfer medium, all other warps must
// wait.
syncParallelThreads(CGF, NumActiveThreads);
- Idx++;
+ ++Idx;
}
CGF.FinishFunction();
@@ -1781,39 +2908,40 @@ static llvm::Value *emitInterWarpCopyFunction(CodeGenModule &CGM,
/// (2k+1)th thread is ignored in the value aggregation. Therefore
/// we copy the Reduce list from the (2k+1)th lane to (k+1)th lane so
/// that the contiguity assumption still holds.
-static llvm::Value *
-emitShuffleAndReduceFunction(CodeGenModule &CGM,
- ArrayRef<const Expr *> Privates,
- QualType ReductionArrayTy, llvm::Value *ReduceFn) {
- auto &C = CGM.getContext();
+static llvm::Value *emitShuffleAndReduceFunction(
+ CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
+ QualType ReductionArrayTy, llvm::Value *ReduceFn, SourceLocation Loc) {
+ ASTContext &C = CGM.getContext();
// Thread local Reduce list used to host the values of data to be reduced.
- ImplicitParamDecl ReduceListArg(C, C.VoidPtrTy, ImplicitParamDecl::Other);
+ ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
+ C.VoidPtrTy, ImplicitParamDecl::Other);
// Current lane id; could be logical.
- ImplicitParamDecl LaneIDArg(C, C.ShortTy, ImplicitParamDecl::Other);
+ ImplicitParamDecl LaneIDArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.ShortTy,
+ ImplicitParamDecl::Other);
// Offset of the remote source lane relative to the current lane.
- ImplicitParamDecl RemoteLaneOffsetArg(C, C.ShortTy,
- ImplicitParamDecl::Other);
+ ImplicitParamDecl RemoteLaneOffsetArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
+ C.ShortTy, ImplicitParamDecl::Other);
// Algorithm version. This is expected to be known at compile time.
- ImplicitParamDecl AlgoVerArg(C, C.ShortTy, ImplicitParamDecl::Other);
+ ImplicitParamDecl AlgoVerArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
+ C.ShortTy, ImplicitParamDecl::Other);
FunctionArgList Args;
Args.push_back(&ReduceListArg);
Args.push_back(&LaneIDArg);
Args.push_back(&RemoteLaneOffsetArg);
Args.push_back(&AlgoVerArg);
- auto &CGFI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
+ const CGFunctionInfo &CGFI =
+ CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
auto *Fn = llvm::Function::Create(
CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
"_omp_reduction_shuffle_and_reduce_func", &CGM.getModule());
- CGM.SetInternalFunctionAttributes(/*D=*/nullptr, Fn, CGFI);
+ CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
+ Fn->setDoesNotRecurse();
CodeGenFunction CGF(CGM);
- // We don't need debug information in this function as nothing here refers to
- // user code.
- CGF.disableDebugInfo();
- CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args);
+ CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
- auto &Bld = CGF.Builder;
+ CGBuilderTy &Bld = CGF.Builder;
Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
Address LocalReduceList(
@@ -1870,21 +2998,19 @@ emitShuffleAndReduceFunction(CodeGenModule &CGM,
// When AlgoVer==2, the third conjunction has only the second part to be
// evaluated during runtime. Other conjunctions evaluates to false
// during compile time.
- auto CondAlgo0 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(0));
+ llvm::Value *CondAlgo0 = Bld.CreateIsNull(AlgoVerArgVal);
- auto Algo1 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(1));
- auto CondAlgo1 = Bld.CreateAnd(
+ llvm::Value *Algo1 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(1));
+ llvm::Value *CondAlgo1 = Bld.CreateAnd(
Algo1, Bld.CreateICmpULT(LaneIDArgVal, RemoteLaneOffsetArgVal));
- auto Algo2 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(2));
- auto CondAlgo2 = Bld.CreateAnd(
- Algo2,
- Bld.CreateICmpEQ(Bld.CreateAnd(LaneIDArgVal, Bld.getInt16(1)),
- Bld.getInt16(0)));
+ llvm::Value *Algo2 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(2));
+ llvm::Value *CondAlgo2 = Bld.CreateAnd(
+ Algo2, Bld.CreateIsNull(Bld.CreateAnd(LaneIDArgVal, Bld.getInt16(1))));
CondAlgo2 = Bld.CreateAnd(
CondAlgo2, Bld.CreateICmpSGT(RemoteLaneOffsetArgVal, Bld.getInt16(0)));
- auto CondReduce = Bld.CreateOr(CondAlgo0, CondAlgo1);
+ llvm::Value *CondReduce = Bld.CreateOr(CondAlgo0, CondAlgo1);
CondReduce = Bld.CreateOr(CondReduce, CondAlgo2);
llvm::BasicBlock *ThenBB = CGF.createBasicBlock("then");
@@ -1898,7 +3024,8 @@ emitShuffleAndReduceFunction(CodeGenModule &CGM,
LocalReduceList.getPointer(), CGF.VoidPtrTy);
llvm::Value *RemoteReduceListPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
RemoteReduceList.getPointer(), CGF.VoidPtrTy);
- CGF.EmitCallOrInvoke(ReduceFn, {LocalReduceListPtr, RemoteReduceListPtr});
+ CGM.getOpenMPRuntime().emitOutlinedFunctionCall(
+ CGF, Loc, ReduceFn, {LocalReduceListPtr, RemoteReduceListPtr});
Bld.CreateBr(MergeBB);
CGF.EmitBlock(ElseBB);
@@ -1909,7 +3036,7 @@ emitShuffleAndReduceFunction(CodeGenModule &CGM,
// if (AlgoVer==1 && (LaneId >= Offset)) copy Remote Reduce list to local
// Reduce list.
Algo1 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(1));
- auto CondCopy = Bld.CreateAnd(
+ llvm::Value *CondCopy = Bld.CreateAnd(
Algo1, Bld.CreateICmpUGE(LaneIDArgVal, RemoteLaneOffsetArgVal));
llvm::BasicBlock *CpyThenBB = CGF.createBasicBlock("then");
@@ -2182,16 +3309,22 @@ void CGOpenMPRuntimeNVPTX::emitReduction(
bool ParallelReduction = isOpenMPParallelDirective(Options.ReductionKind);
bool TeamsReduction = isOpenMPTeamsDirective(Options.ReductionKind);
- // FIXME: Add support for simd reduction.
- assert((TeamsReduction || ParallelReduction) &&
+ bool SimdReduction = isOpenMPSimdDirective(Options.ReductionKind);
+ assert((TeamsReduction || ParallelReduction || SimdReduction) &&
"Invalid reduction selection in emitReduction.");
- auto &C = CGM.getContext();
+ if (Options.SimpleReduction) {
+ CGOpenMPRuntime::emitReduction(CGF, Loc, Privates, LHSExprs, RHSExprs,
+ ReductionOps, Options);
+ return;
+ }
+
+ ASTContext &C = CGM.getContext();
// 1. Build a list of reduction variables.
// void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
auto Size = RHSExprs.size();
- for (auto *E : Privates) {
+ for (const Expr *E : Privates) {
if (E->getType()->isVariablyModifiedType())
// Reserve place for array size.
++Size;
@@ -2219,7 +3352,7 @@ void CGOpenMPRuntimeNVPTX::emitReduction(
llvm::Value *Size = CGF.Builder.CreateIntCast(
CGF.getVLASize(
CGF.getContext().getAsVariableArrayType((*IPriv)->getType()))
- .first,
+ .NumElts,
CGF.SizeTy, /*isSigned=*/false);
CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy),
Elem);
@@ -2227,41 +3360,44 @@ void CGOpenMPRuntimeNVPTX::emitReduction(
}
// 2. Emit reduce_func().
- auto *ReductionFn = emitReductionFunction(
- CGM, CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo(), Privates,
- LHSExprs, RHSExprs, ReductionOps);
+ llvm::Value *ReductionFn = emitReductionFunction(
+ CGM, Loc, CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo(),
+ Privates, LHSExprs, RHSExprs, ReductionOps);
// 4. Build res = __kmpc_reduce{_nowait}(<gtid>, <n>, sizeof(RedList),
// RedList, shuffle_reduce_func, interwarp_copy_func);
- auto *ThreadId = getThreadID(CGF, Loc);
- auto *ReductionArrayTySize = CGF.getTypeSize(ReductionArrayTy);
- auto *RL = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ llvm::Value *ThreadId = getThreadID(CGF, Loc);
+ llvm::Value *ReductionArrayTySize = CGF.getTypeSize(ReductionArrayTy);
+ llvm::Value *RL = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
ReductionList.getPointer(), CGF.VoidPtrTy);
- auto *ShuffleAndReduceFn = emitShuffleAndReduceFunction(
- CGM, Privates, ReductionArrayTy, ReductionFn);
- auto *InterWarpCopyFn =
- emitInterWarpCopyFunction(CGM, Privates, ReductionArrayTy);
+ llvm::Value *ShuffleAndReduceFn = emitShuffleAndReduceFunction(
+ CGM, Privates, ReductionArrayTy, ReductionFn, Loc);
+ llvm::Value *InterWarpCopyFn =
+ emitInterWarpCopyFunction(CGM, Privates, ReductionArrayTy, Loc);
- llvm::Value *Res = nullptr;
- if (ParallelReduction) {
- llvm::Value *Args[] = {ThreadId,
- CGF.Builder.getInt32(RHSExprs.size()),
- ReductionArrayTySize,
- RL,
- ShuffleAndReduceFn,
- InterWarpCopyFn};
+ llvm::Value *Args[] = {ThreadId,
+ CGF.Builder.getInt32(RHSExprs.size()),
+ ReductionArrayTySize,
+ RL,
+ ShuffleAndReduceFn,
+ InterWarpCopyFn};
+ llvm::Value *Res = nullptr;
+ if (ParallelReduction)
Res = CGF.EmitRuntimeCall(
createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_parallel_reduce_nowait),
Args);
- }
+ else if (SimdReduction)
+ Res = CGF.EmitRuntimeCall(
+ createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_simd_reduce_nowait),
+ Args);
if (TeamsReduction) {
- auto *ScratchPadCopyFn =
- emitCopyToScratchpad(CGM, Privates, ReductionArrayTy);
- auto *LoadAndReduceFn = emitReduceScratchpadFunction(
- CGM, Privates, ReductionArrayTy, ReductionFn);
+ llvm::Value *ScratchPadCopyFn =
+ emitCopyToScratchpad(CGM, Privates, ReductionArrayTy, Loc);
+ llvm::Value *LoadAndReduceFn = emitReduceScratchpadFunction(
+ CGM, Privates, ReductionArrayTy, ReductionFn, Loc);
llvm::Value *Args[] = {ThreadId,
CGF.Builder.getInt32(RHSExprs.size()),
@@ -2277,25 +3413,26 @@ void CGOpenMPRuntimeNVPTX::emitReduction(
}
// 5. Build switch(res)
- auto *DefaultBB = CGF.createBasicBlock(".omp.reduction.default");
- auto *SwInst = CGF.Builder.CreateSwitch(Res, DefaultBB, /*NumCases=*/1);
+ llvm::BasicBlock *DefaultBB = CGF.createBasicBlock(".omp.reduction.default");
+ llvm::SwitchInst *SwInst =
+ CGF.Builder.CreateSwitch(Res, DefaultBB, /*NumCases=*/1);
// 6. Build case 1: where we have reduced values in the master
// thread in each team.
// __kmpc_end_reduce{_nowait}(<gtid>);
// break;
- auto *Case1BB = CGF.createBasicBlock(".omp.reduction.case1");
+ llvm::BasicBlock *Case1BB = CGF.createBasicBlock(".omp.reduction.case1");
SwInst->addCase(CGF.Builder.getInt32(1), Case1BB);
CGF.EmitBlock(Case1BB);
// Add emission of __kmpc_end_reduce{_nowait}(<gtid>);
llvm::Value *EndArgs[] = {ThreadId};
- auto &&CodeGen = [&Privates, &LHSExprs, &RHSExprs, &ReductionOps,
+ auto &&CodeGen = [Privates, LHSExprs, RHSExprs, ReductionOps,
this](CodeGenFunction &CGF, PrePostActionTy &Action) {
auto IPriv = Privates.begin();
auto ILHS = LHSExprs.begin();
auto IRHS = RHSExprs.begin();
- for (auto *E : ReductionOps) {
+ for (const Expr *E : ReductionOps) {
emitSingleReductionCombiner(CGF, E, *IPriv, cast<DeclRefExpr>(*ILHS),
cast<DeclRefExpr>(*IRHS));
++IPriv;
@@ -2334,11 +3471,10 @@ CGOpenMPRuntimeNVPTX::translateParameter(const FieldDecl *FD,
enum { NVPTX_local_addr = 5 };
QC.addAddressSpace(getLangASFromTargetAS(NVPTX_local_addr));
ArgType = QC.apply(CGM.getContext(), ArgType);
- if (isa<ImplicitParamDecl>(NativeParam)) {
+ if (isa<ImplicitParamDecl>(NativeParam))
return ImplicitParamDecl::Create(
CGM.getContext(), /*DC=*/nullptr, NativeParam->getLocation(),
NativeParam->getIdentifier(), ArgType, ImplicitParamDecl::Other);
- }
return ParmVarDecl::Create(
CGM.getContext(),
const_cast<DeclContext *>(NativeParam->getDeclContext()),
@@ -2397,8 +3533,8 @@ void CGOpenMPRuntimeNVPTX::emitOutlinedFunctionCall(
continue;
}
llvm::Value *TargetArg = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- NativeArg, NativeArg->getType()->getPointerElementType()->getPointerTo(
- /*AddrSpace=*/0));
+ NativeArg,
+ NativeArg->getType()->getPointerElementType()->getPointerTo());
TargetArgs.emplace_back(
CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(TargetArg, TargetType));
}
@@ -2409,10 +3545,10 @@ void CGOpenMPRuntimeNVPTX::emitOutlinedFunctionCall(
/// and controls the arguments which are passed to this function.
/// The wrapper ensures that the outlined function is called
/// with the correct arguments when data is shared.
-llvm::Function *CGOpenMPRuntimeNVPTX::createDataSharingWrapper(
+llvm::Function *CGOpenMPRuntimeNVPTX::createParallelDataSharingWrapper(
llvm::Function *OutlinedParallelFn, const OMPExecutableDirective &D) {
ASTContext &Ctx = CGM.getContext();
- const auto &CS = *cast<CapturedStmt>(D.getAssociatedStmt());
+ const auto &CS = *D.getCapturedStmt(OMPD_parallel);
// Create a function that takes as argument the source thread.
FunctionArgList WrapperArgs;
@@ -2420,76 +3556,200 @@ llvm::Function *CGOpenMPRuntimeNVPTX::createDataSharingWrapper(
Ctx.getIntTypeForBitwidth(/*DestWidth=*/16, /*Signed=*/false);
QualType Int32QTy =
Ctx.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/false);
- QualType Int32PtrQTy = Ctx.getPointerType(Int32QTy);
- QualType VoidPtrPtrQTy = Ctx.getPointerType(Ctx.VoidPtrTy);
- ImplicitParamDecl ParallelLevelArg(Ctx, Int16QTy, ImplicitParamDecl::Other);
- ImplicitParamDecl WrapperArg(Ctx, Int32QTy, ImplicitParamDecl::Other);
- ImplicitParamDecl SharedArgsList(Ctx, VoidPtrPtrQTy,
- ImplicitParamDecl::Other);
+ ImplicitParamDecl ParallelLevelArg(Ctx, /*DC=*/nullptr, D.getLocStart(),
+ /*Id=*/nullptr, Int16QTy,
+ ImplicitParamDecl::Other);
+ ImplicitParamDecl WrapperArg(Ctx, /*DC=*/nullptr, D.getLocStart(),
+ /*Id=*/nullptr, Int32QTy,
+ ImplicitParamDecl::Other);
WrapperArgs.emplace_back(&ParallelLevelArg);
WrapperArgs.emplace_back(&WrapperArg);
- WrapperArgs.emplace_back(&SharedArgsList);
- auto &CGFI =
+ const CGFunctionInfo &CGFI =
CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, WrapperArgs);
auto *Fn = llvm::Function::Create(
CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
- OutlinedParallelFn->getName() + "_wrapper", &CGM.getModule());
- CGM.SetInternalFunctionAttributes(/*D=*/nullptr, Fn, CGFI);
+ Twine(OutlinedParallelFn->getName(), "_wrapper"), &CGM.getModule());
+ CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
Fn->setLinkage(llvm::GlobalValue::InternalLinkage);
+ Fn->setDoesNotRecurse();
CodeGenFunction CGF(CGM, /*suppressNewContext=*/true);
- CGF.StartFunction(GlobalDecl(), Ctx.VoidTy, Fn, CGFI, WrapperArgs);
+ CGF.StartFunction(GlobalDecl(), Ctx.VoidTy, Fn, CGFI, WrapperArgs,
+ D.getLocStart(), D.getLocStart());
const auto *RD = CS.getCapturedRecordDecl();
auto CurField = RD->field_begin();
+ Address ZeroAddr = CGF.CreateMemTemp(
+ CGF.getContext().getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1),
+ /*Name*/ ".zero.addr");
+ CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
// Get the array of arguments.
SmallVector<llvm::Value *, 8> Args;
- // TODO: suppport SIMD and pass actual values
- Args.emplace_back(llvm::ConstantPointerNull::get(
- CGM.Int32Ty->getPointerTo()));
- Args.emplace_back(llvm::ConstantPointerNull::get(
- CGM.Int32Ty->getPointerTo()));
+ Args.emplace_back(CGF.GetAddrOfLocalVar(&WrapperArg).getPointer());
+ Args.emplace_back(ZeroAddr.getPointer());
CGBuilderTy &Bld = CGF.Builder;
auto CI = CS.capture_begin();
- // Load the start of the array
- auto SharedArgs =
- CGF.EmitLoadOfPointer(CGF.GetAddrOfLocalVar(&SharedArgsList),
- VoidPtrPtrQTy->castAs<PointerType>());
-
- // For each captured variable
- for (unsigned I = 0; I < CS.capture_size(); ++I, ++CI, ++CurField) {
- // Name of captured variable
- StringRef Name;
- if (CI->capturesThis())
- Name = "this";
- else
- Name = CI->getCapturedVar()->getName();
-
- // We retrieve the CLANG type of the argument. We use it to create
- // an alloca which will give us the LLVM type.
- QualType ElemTy = CurField->getType();
- // If this is a capture by copy the element type has to be the pointer to
- // the data.
- if (CI->capturesVariableByCopy())
- ElemTy = Ctx.getPointerType(ElemTy);
-
- // Get shared address of the captured variable.
- Address ArgAddress = Bld.CreateConstInBoundsGEP(
- SharedArgs, I, CGF.getPointerSize());
- Address TypedArgAddress = Bld.CreateBitCast(
- ArgAddress, CGF.ConvertTypeForMem(Ctx.getPointerType(ElemTy)));
- llvm::Value *Arg = CGF.EmitLoadOfScalar(TypedArgAddress,
- /*Volatile=*/false, Int32PtrQTy, SourceLocation());
- Args.emplace_back(Arg);
- }
-
- emitCall(CGF, OutlinedParallelFn, Args);
+ // Use global memory for data sharing.
+ // Handle passing of global args to workers.
+ Address GlobalArgs =
+ CGF.CreateDefaultAlignTempAlloca(CGF.VoidPtrPtrTy, "global_args");
+ llvm::Value *GlobalArgsPtr = GlobalArgs.getPointer();
+ llvm::Value *DataSharingArgs[] = {GlobalArgsPtr};
+ CGF.EmitRuntimeCall(
+ createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_get_shared_variables),
+ DataSharingArgs);
+
+ // Retrieve the shared variables from the list of references returned
+ // by the runtime. Pass the variables to the outlined function.
+ Address SharedArgListAddress = Address::invalid();
+ if (CS.capture_size() > 0 ||
+ isOpenMPLoopBoundSharingDirective(D.getDirectiveKind())) {
+ SharedArgListAddress = CGF.EmitLoadOfPointer(
+ GlobalArgs, CGF.getContext()
+ .getPointerType(CGF.getContext().getPointerType(
+ CGF.getContext().VoidPtrTy))
+ .castAs<PointerType>());
+ }
+ unsigned Idx = 0;
+ if (isOpenMPLoopBoundSharingDirective(D.getDirectiveKind())) {
+ Address Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, Idx,
+ CGF.getPointerSize());
+ Address TypedAddress = Bld.CreatePointerBitCastOrAddrSpaceCast(
+ Src, CGF.SizeTy->getPointerTo());
+ llvm::Value *LB = CGF.EmitLoadOfScalar(
+ TypedAddress,
+ /*Volatile=*/false,
+ CGF.getContext().getPointerType(CGF.getContext().getSizeType()),
+ cast<OMPLoopDirective>(D).getLowerBoundVariable()->getExprLoc());
+ Args.emplace_back(LB);
+ ++Idx;
+ Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, Idx,
+ CGF.getPointerSize());
+ TypedAddress = Bld.CreatePointerBitCastOrAddrSpaceCast(
+ Src, CGF.SizeTy->getPointerTo());
+ llvm::Value *UB = CGF.EmitLoadOfScalar(
+ TypedAddress,
+ /*Volatile=*/false,
+ CGF.getContext().getPointerType(CGF.getContext().getSizeType()),
+ cast<OMPLoopDirective>(D).getUpperBoundVariable()->getExprLoc());
+ Args.emplace_back(UB);
+ ++Idx;
+ }
+ if (CS.capture_size() > 0) {
+ ASTContext &CGFContext = CGF.getContext();
+ for (unsigned I = 0, E = CS.capture_size(); I < E; ++I, ++CI, ++CurField) {
+ QualType ElemTy = CurField->getType();
+ Address Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, I + Idx,
+ CGF.getPointerSize());
+ Address TypedAddress = Bld.CreatePointerBitCastOrAddrSpaceCast(
+ Src, CGF.ConvertTypeForMem(CGFContext.getPointerType(ElemTy)));
+ llvm::Value *Arg = CGF.EmitLoadOfScalar(TypedAddress,
+ /*Volatile=*/false,
+ CGFContext.getPointerType(ElemTy),
+ CI->getLocation());
+ if (CI->capturesVariableByCopy() &&
+ !CI->getCapturedVar()->getType()->isAnyPointerType()) {
+ Arg = castValueToType(CGF, Arg, ElemTy, CGFContext.getUIntPtrType(),
+ CI->getLocation());
+ }
+ Args.emplace_back(Arg);
+ }
+ }
+
+ emitOutlinedFunctionCall(CGF, D.getLocStart(), OutlinedParallelFn, Args);
CGF.FinishFunction();
return Fn;
}
+
+void CGOpenMPRuntimeNVPTX::emitFunctionProlog(CodeGenFunction &CGF,
+ const Decl *D) {
+ if (getDataSharingMode(CGM) != CGOpenMPRuntimeNVPTX::Generic)
+ return;
+
+ assert(D && "Expected function or captured|block decl.");
+ assert(FunctionGlobalizedDecls.count(CGF.CurFn) == 0 &&
+ "Function is registered already.");
+ const Stmt *Body = nullptr;
+ bool NeedToDelayGlobalization = false;
+ if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
+ Body = FD->getBody();
+ } else if (const auto *BD = dyn_cast<BlockDecl>(D)) {
+ Body = BD->getBody();
+ } else if (const auto *CD = dyn_cast<CapturedDecl>(D)) {
+ Body = CD->getBody();
+ NeedToDelayGlobalization = CGF.CapturedStmtInfo->getKind() == CR_OpenMP;
+ }
+ if (!Body)
+ return;
+ CheckVarsEscapingDeclContext VarChecker(CGF);
+ VarChecker.Visit(Body);
+ const RecordDecl *GlobalizedVarsRecord = VarChecker.getGlobalizedRecord();
+ ArrayRef<const ValueDecl *> EscapedVariableLengthDecls =
+ VarChecker.getEscapedVariableLengthDecls();
+ if (!GlobalizedVarsRecord && EscapedVariableLengthDecls.empty())
+ return;
+ auto I = FunctionGlobalizedDecls.try_emplace(CGF.CurFn).first;
+ I->getSecond().MappedParams =
+ llvm::make_unique<CodeGenFunction::OMPMapVars>();
+ I->getSecond().GlobalRecord = GlobalizedVarsRecord;
+ I->getSecond().EscapedParameters.insert(
+ VarChecker.getEscapedParameters().begin(),
+ VarChecker.getEscapedParameters().end());
+ I->getSecond().EscapedVariableLengthDecls.append(
+ EscapedVariableLengthDecls.begin(), EscapedVariableLengthDecls.end());
+ DeclToAddrMapTy &Data = I->getSecond().LocalVarData;
+ for (const ValueDecl *VD : VarChecker.getEscapedDecls()) {
+ assert(VD->isCanonicalDecl() && "Expected canonical declaration");
+ const FieldDecl *FD = VarChecker.getFieldForGlobalizedVar(VD);
+ Data.insert(std::make_pair(VD, std::make_pair(FD, Address::invalid())));
+ }
+ if (!NeedToDelayGlobalization) {
+ emitGenericVarsProlog(CGF, D->getLocStart());
+ struct GlobalizationScope final : EHScopeStack::Cleanup {
+ GlobalizationScope() = default;
+
+ void Emit(CodeGenFunction &CGF, Flags flags) override {
+ static_cast<CGOpenMPRuntimeNVPTX &>(CGF.CGM.getOpenMPRuntime())
+ .emitGenericVarsEpilog(CGF);
+ }
+ };
+ CGF.EHStack.pushCleanup<GlobalizationScope>(NormalAndEHCleanup);
+ }
+}
+
+Address CGOpenMPRuntimeNVPTX::getAddressOfLocalVariable(CodeGenFunction &CGF,
+ const VarDecl *VD) {
+ if (getDataSharingMode(CGM) != CGOpenMPRuntimeNVPTX::Generic)
+ return Address::invalid();
+
+ VD = VD->getCanonicalDecl();
+ auto I = FunctionGlobalizedDecls.find(CGF.CurFn);
+ if (I == FunctionGlobalizedDecls.end())
+ return Address::invalid();
+ auto VDI = I->getSecond().LocalVarData.find(VD);
+ if (VDI != I->getSecond().LocalVarData.end())
+ return VDI->second.second;
+ if (VD->hasAttrs()) {
+ for (specific_attr_iterator<OMPReferencedVarAttr> IT(VD->attr_begin()),
+ E(VD->attr_end());
+ IT != E; ++IT) {
+ auto VDI = I->getSecond().LocalVarData.find(
+ cast<VarDecl>(cast<DeclRefExpr>(IT->getRef())->getDecl())
+ ->getCanonicalDecl());
+ if (VDI != I->getSecond().LocalVarData.end())
+ return VDI->second.second;
+ }
+ }
+ return Address::invalid();
+}
+
+void CGOpenMPRuntimeNVPTX::functionFinished(CodeGenFunction &CGF) {
+ FunctionGlobalizedDecls.erase(CGF.CurFn);
+ CGOpenMPRuntime::functionFinished(CGF);
+}
diff --git a/lib/CodeGen/CGOpenMPRuntimeNVPTX.h b/lib/CodeGen/CGOpenMPRuntimeNVPTX.h
index 5d13408318a5..f83e99f8a3b7 100644
--- a/lib/CodeGen/CGOpenMPRuntimeNVPTX.h
+++ b/lib/CodeGen/CGOpenMPRuntimeNVPTX.h
@@ -24,8 +24,18 @@ namespace clang {
namespace CodeGen {
class CGOpenMPRuntimeNVPTX : public CGOpenMPRuntime {
+public:
+ /// Defines the execution mode.
+ enum ExecutionMode {
+ /// SPMD execution mode (all threads are worker threads).
+ EM_SPMD,
+ /// Non-SPMD execution mode (1 master thread, others are workers).
+ EM_NonSPMD,
+ /// Unknown execution mode (orphaned directive).
+ EM_Unknown,
+ };
private:
- // Parallel outlined function work for workers to execute.
+ /// Parallel outlined function work for workers to execute.
llvm::SmallVector<llvm::Function *, 16> Work;
struct EntryFunctionState {
@@ -35,48 +45,56 @@ private:
class WorkerFunctionState {
public:
llvm::Function *WorkerFn;
- const CGFunctionInfo *CGFI;
+ const CGFunctionInfo &CGFI;
+ SourceLocation Loc;
- WorkerFunctionState(CodeGenModule &CGM);
+ WorkerFunctionState(CodeGenModule &CGM, SourceLocation Loc);
private:
void createWorkerFunction(CodeGenModule &CGM);
};
- bool isInSpmdExecutionMode() const;
+ ExecutionMode getExecutionMode() const;
- /// \brief Emit the worker function for the current target region.
+ /// Emit the worker function for the current target region.
void emitWorkerFunction(WorkerFunctionState &WST);
- /// \brief Helper for worker function. Emit body of worker loop.
+ /// Helper for worker function. Emit body of worker loop.
void emitWorkerLoop(CodeGenFunction &CGF, WorkerFunctionState &WST);
- /// \brief Helper for generic target entry function. Guide the master and
+ /// Helper for non-SPMD target entry function. Guide the master and
/// worker threads to their respective locations.
- void emitGenericEntryHeader(CodeGenFunction &CGF, EntryFunctionState &EST,
+ void emitNonSPMDEntryHeader(CodeGenFunction &CGF, EntryFunctionState &EST,
WorkerFunctionState &WST);
- /// \brief Signal termination of OMP execution for generic target entry
+ /// Signal termination of OMP execution for non-SPMD target entry
/// function.
- void emitGenericEntryFooter(CodeGenFunction &CGF, EntryFunctionState &EST);
+ void emitNonSPMDEntryFooter(CodeGenFunction &CGF, EntryFunctionState &EST);
+
+ /// Helper for generic variables globalization prolog.
+ void emitGenericVarsProlog(CodeGenFunction &CGF, SourceLocation Loc);
+
+ /// Helper for generic variables globalization epilog.
+ void emitGenericVarsEpilog(CodeGenFunction &CGF);
- /// \brief Helper for Spmd mode target directive's entry function.
- void emitSpmdEntryHeader(CodeGenFunction &CGF, EntryFunctionState &EST,
+ /// Helper for SPMD mode target directive's entry function.
+ void emitSPMDEntryHeader(CodeGenFunction &CGF, EntryFunctionState &EST,
const OMPExecutableDirective &D);
- /// \brief Signal termination of Spmd mode execution.
- void emitSpmdEntryFooter(CodeGenFunction &CGF, EntryFunctionState &EST);
+ /// Signal termination of SPMD mode execution.
+ void emitSPMDEntryFooter(CodeGenFunction &CGF, EntryFunctionState &EST);
//
// Base class overrides.
//
- /// \brief Creates offloading entry for the provided entry ID \a ID,
+ /// Creates offloading entry for the provided entry ID \a ID,
/// address \a Addr, size \a Size, and flags \a Flags.
void createOffloadEntry(llvm::Constant *ID, llvm::Constant *Addr,
- uint64_t Size, int32_t Flags = 0) override;
+ uint64_t Size, int32_t Flags,
+ llvm::GlobalValue::LinkageTypes Linkage) override;
- /// \brief Emit outlined function specialized for the Fork-Join
+ /// Emit outlined function specialized for the Fork-Join
/// programming model for applicable target directives on the NVPTX device.
/// \param D Directive to emit.
/// \param ParentName Name of the function that encloses the target region.
@@ -85,12 +103,12 @@ private:
/// \param IsOffloadEntry True if the outlined function is an offload entry.
/// An outlined function may not be an entry if, e.g. the if clause always
/// evaluates to false.
- void emitGenericKernel(const OMPExecutableDirective &D, StringRef ParentName,
+ void emitNonSPMDKernel(const OMPExecutableDirective &D, StringRef ParentName,
llvm::Function *&OutlinedFn,
llvm::Constant *&OutlinedFnID, bool IsOffloadEntry,
const RegionCodeGenTy &CodeGen);
- /// \brief Emit outlined function specialized for the Single Program
+ /// Emit outlined function specialized for the Single Program
/// Multiple Data programming model for applicable target directives on the
/// NVPTX device.
/// \param D Directive to emit.
@@ -101,12 +119,12 @@ private:
/// \param CodeGen Object containing the target statements.
/// An outlined function may not be an entry if, e.g. the if clause always
/// evaluates to false.
- void emitSpmdKernel(const OMPExecutableDirective &D, StringRef ParentName,
+ void emitSPMDKernel(const OMPExecutableDirective &D, StringRef ParentName,
llvm::Function *&OutlinedFn,
llvm::Constant *&OutlinedFnID, bool IsOffloadEntry,
const RegionCodeGenTy &CodeGen);
- /// \brief Emit outlined function for 'target' directive on the NVPTX
+ /// Emit outlined function for 'target' directive on the NVPTX
/// device.
/// \param D Directive to emit.
/// \param ParentName Name of the function that encloses the target region.
@@ -122,22 +140,22 @@ private:
bool IsOffloadEntry,
const RegionCodeGenTy &CodeGen) override;
- /// \brief Emits code for parallel or serial call of the \a OutlinedFn with
+ /// Emits code for parallel or serial call of the \a OutlinedFn with
/// variables captured in a record which address is stored in \a
/// CapturedStruct.
- /// This call is for the Generic Execution Mode.
+ /// This call is for the Non-SPMD Execution Mode.
/// \param OutlinedFn Outlined function to be run in parallel threads. Type of
/// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*).
/// \param CapturedVars A pointer to the record with the references to
/// variables used in \a OutlinedFn function.
/// \param IfCond Condition in the associated 'if' clause, if it was
/// specified, nullptr otherwise.
- void emitGenericParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
+ void emitNonSPMDParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Value *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars,
const Expr *IfCond);
- /// \brief Emits code for parallel or serial call of the \a OutlinedFn with
+ /// Emits code for parallel or serial call of the \a OutlinedFn with
/// variables captured in a record which address is stored in \a
/// CapturedStruct.
/// This call is for a parallel directive within an SPMD target directive.
@@ -148,13 +166,13 @@ private:
/// \param IfCond Condition in the associated 'if' clause, if it was
/// specified, nullptr otherwise.
///
- void emitSpmdParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
+ void emitSPMDParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Value *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars,
const Expr *IfCond);
protected:
- /// \brief Get the function name of an outlined region.
+ /// Get the function name of an outlined region.
// The name can be customized depending on the target.
//
StringRef getOutlinedHelperName() const override {
@@ -164,13 +182,13 @@ protected:
public:
explicit CGOpenMPRuntimeNVPTX(CodeGenModule &CGM);
- /// \brief Emit call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32
+ /// Emit call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32
/// global_tid, int proc_bind) to generate code for 'proc_bind' clause.
virtual void emitProcBindClause(CodeGenFunction &CGF,
OpenMPProcBindClauseKind ProcBind,
SourceLocation Loc) override;
- /// \brief Emits call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32
+ /// Emits call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32
/// global_tid, kmp_int32 num_threads) to generate code for 'num_threads'
/// clause.
/// \param NumThreads An integer value of threads.
@@ -178,7 +196,7 @@ public:
llvm::Value *NumThreads,
SourceLocation Loc) override;
- /// \brief This function ought to emit, in the general case, a call to
+ /// This function ought to emit, in the general case, a call to
// the openmp runtime kmpc_push_num_teams. In NVPTX backend it is not needed
// as these numbers are obtained through the PTX grid and block configuration.
/// \param NumTeams An integer expression of teams.
@@ -186,7 +204,7 @@ public:
void emitNumTeamsClause(CodeGenFunction &CGF, const Expr *NumTeams,
const Expr *ThreadLimit, SourceLocation Loc) override;
- /// \brief Emits inlined function for the specified OpenMP parallel
+ /// Emits inlined function for the specified OpenMP parallel
// directive.
/// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID,
/// kmp_int32 BoundID, struct context_vars*).
@@ -201,7 +219,7 @@ public:
OpenMPDirectiveKind InnermostKind,
const RegionCodeGenTy &CodeGen) override;
- /// \brief Emits inlined function for the specified OpenMP teams
+ /// Emits inlined function for the specified OpenMP teams
// directive.
/// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID,
/// kmp_int32 BoundID, struct context_vars*).
@@ -216,7 +234,7 @@ public:
OpenMPDirectiveKind InnermostKind,
const RegionCodeGenTy &CodeGen) override;
- /// \brief Emits code for teams call of the \a OutlinedFn with
+ /// Emits code for teams call of the \a OutlinedFn with
/// variables captured in a record which address is stored in \a
/// CapturedStruct.
/// \param OutlinedFn Outlined function to be run by team masters. Type of
@@ -228,7 +246,7 @@ public:
SourceLocation Loc, llvm::Value *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars) override;
- /// \brief Emits code for parallel or serial call of the \a OutlinedFn with
+ /// Emits code for parallel or serial call of the \a OutlinedFn with
/// variables captured in a record which address is stored in \a
/// CapturedStruct.
/// \param OutlinedFn Outlined function to be run in parallel threads. Type of
@@ -242,6 +260,16 @@ public:
ArrayRef<llvm::Value *> CapturedVars,
const Expr *IfCond) override;
+ /// Emits a critical region.
+ /// \param CriticalName Name of the critical region.
+ /// \param CriticalOpGen Generator for the statement associated with the given
+ /// critical region.
+ /// \param Hint Value of the 'hint' clause (optional).
+ void emitCriticalRegion(CodeGenFunction &CGF, StringRef CriticalName,
+ const RegionCodeGenTy &CriticalOpGen,
+ SourceLocation Loc,
+ const Expr *Hint = nullptr) override;
+
/// Emit a code for reduction clause.
///
/// \param Privates List of private copies for original reduction arguments.
@@ -270,7 +298,7 @@ public:
/// Translates the native parameter of outlined function if this is required
/// for target.
- /// \param FD Field decl from captured record for the paramater.
+ /// \param FD Field decl from captured record for the parameter.
/// \param NativeParam Parameter itself.
const VarDecl *translateParameter(const FieldDecl *FD,
const VarDecl *NativeParam) const override;
@@ -288,23 +316,41 @@ public:
CodeGenFunction &CGF, SourceLocation Loc, llvm::Value *OutlinedFn,
ArrayRef<llvm::Value *> Args = llvm::None) const override;
- /// Target codegen is specialized based on two programming models: the
- /// 'generic' fork-join model of OpenMP, and a more GPU efficient 'spmd'
- /// model for constructs like 'target parallel' that support it.
- enum ExecutionMode {
- /// Single Program Multiple Data.
- Spmd,
- /// Generic codegen to support fork-join model.
+ /// Emits OpenMP-specific function prolog.
+ /// Required for device constructs.
+ void emitFunctionProlog(CodeGenFunction &CGF, const Decl *D) override;
+
+ /// Gets the OpenMP-specific address of the local variable.
+ Address getAddressOfLocalVariable(CodeGenFunction &CGF,
+ const VarDecl *VD) override;
+
+ /// Target codegen is specialized based on two data-sharing modes: CUDA, in
+ /// which the local variables are actually global threadlocal, and Generic, in
+ /// which the local variables are placed in global memory if they may escape
+ /// their declaration context.
+ enum DataSharingMode {
+ /// CUDA data sharing mode.
+ CUDA,
+ /// Generic data-sharing mode.
Generic,
- Unknown,
};
+ /// Cleans up references to the objects in finished function.
+ ///
+ void functionFinished(CodeGenFunction &CGF) override;
+
private:
- // Track the execution mode when codegening directives within a target
- // region. The appropriate mode (generic/spmd) is set on entry to the
- // target region and used by containing directives such as 'parallel'
- // to emit optimized code.
- ExecutionMode CurrentExecutionMode;
+ /// Track the execution mode when codegening directives within a target
+ /// region. The appropriate mode (SPMD/NON-SPMD) is set on entry to the
+ /// target region and used by containing directives such as 'parallel'
+ /// to emit optimized code.
+ ExecutionMode CurrentExecutionMode = EM_Unknown;
+
+ /// true if we're emitting the code for the target region and next parallel
+ /// region is L0 for sure.
+ bool IsInTargetMasterThreadRegion = false;
+ /// true if we're definitely in the parallel region.
+ bool IsInParallelRegion = false;
/// Map between an outlined function and its wrapper.
llvm::DenseMap<llvm::Function *, llvm::Function *> WrapperFunctionsMap;
@@ -313,9 +359,26 @@ private:
/// and controls the parameters which are passed to this function.
/// The wrapper ensures that the outlined function is called
/// with the correct arguments when data is shared.
- llvm::Function *
- createDataSharingWrapper(llvm::Function *OutlinedParallelFn,
- const OMPExecutableDirective &D);
+ llvm::Function *createParallelDataSharingWrapper(
+ llvm::Function *OutlinedParallelFn, const OMPExecutableDirective &D);
+
+ /// The map of local variables to their addresses in the global memory.
+ using DeclToAddrMapTy = llvm::MapVector<const Decl *,
+ std::pair<const FieldDecl *, Address>>;
+ /// Set of the parameters passed by value escaping OpenMP context.
+ using EscapedParamsTy = llvm::SmallPtrSet<const Decl *, 4>;
+ struct FunctionData {
+ DeclToAddrMapTy LocalVarData;
+ EscapedParamsTy EscapedParameters;
+ llvm::SmallVector<const ValueDecl*, 4> EscapedVariableLengthDecls;
+ llvm::SmallVector<llvm::Value *, 4> EscapedVariableLengthDeclsAddrs;
+ const RecordDecl *GlobalRecord = nullptr;
+ llvm::Value *GlobalRecordAddr = nullptr;
+ std::unique_ptr<CodeGenFunction::OMPMapVars> MappedParams;
+ };
+ /// Maps the function to the list of the globalized variables with their
+ /// addresses.
+ llvm::SmallDenseMap<llvm::Function *, FunctionData> FunctionGlobalizedDecls;
};
} // CodeGen namespace.
diff --git a/lib/CodeGen/CGRecordLayout.h b/lib/CodeGen/CGRecordLayout.h
index 7b9c27d1d772..41084294ab9a 100644
--- a/lib/CodeGen/CGRecordLayout.h
+++ b/lib/CodeGen/CGRecordLayout.h
@@ -23,7 +23,7 @@ namespace llvm {
namespace clang {
namespace CodeGen {
-/// \brief Structure with information about how a bitfield should be accessed.
+/// Structure with information about how a bitfield should be accessed.
///
/// Often we layout a sequence of bitfields as a contiguous sequence of bits.
/// When the AST record layout does this, we represent it in the LLVM IR's type
@@ -92,7 +92,7 @@ struct CGBitFieldInfo {
void print(raw_ostream &OS) const;
void dump() const;
- /// \brief Given a bit-field decl, build an appropriate helper object for
+ /// Given a bit-field decl, build an appropriate helper object for
/// accessing that field (which is expected to have the given offset and
/// size).
static CGBitFieldInfo MakeInfo(class CodeGenTypes &Types,
@@ -156,31 +156,31 @@ public:
IsZeroInitializable(IsZeroInitializable),
IsZeroInitializableAsBase(IsZeroInitializableAsBase) {}
- /// \brief Return the "complete object" LLVM type associated with
+ /// Return the "complete object" LLVM type associated with
/// this record.
llvm::StructType *getLLVMType() const {
return CompleteObjectType;
}
- /// \brief Return the "base subobject" LLVM type associated with
+ /// Return the "base subobject" LLVM type associated with
/// this record.
llvm::StructType *getBaseSubobjectLLVMType() const {
return BaseSubobjectType;
}
- /// \brief Check whether this struct can be C++ zero-initialized
+ /// Check whether this struct can be C++ zero-initialized
/// with a zeroinitializer.
bool isZeroInitializable() const {
return IsZeroInitializable;
}
- /// \brief Check whether this struct can be C++ zero-initialized
+ /// Check whether this struct can be C++ zero-initialized
/// with a zeroinitializer when considered as a base subobject.
bool isZeroInitializableAsBase() const {
return IsZeroInitializableAsBase;
}
- /// \brief Return llvm::StructType element number that corresponds to the
+ /// Return llvm::StructType element number that corresponds to the
/// field FD.
unsigned getLLVMFieldNo(const FieldDecl *FD) const {
FD = FD->getCanonicalDecl();
@@ -193,14 +193,14 @@ public:
return NonVirtualBases.lookup(RD);
}
- /// \brief Return the LLVM field index corresponding to the given
+ /// Return the LLVM field index corresponding to the given
/// virtual base. Only valid when operating on the complete object.
unsigned getVirtualBaseIndex(const CXXRecordDecl *base) const {
assert(CompleteObjectVirtualBases.count(base) && "Invalid virtual base!");
return CompleteObjectVirtualBases.lookup(base);
}
- /// \brief Return the BitFieldInfo that corresponds to the field FD.
+ /// Return the BitFieldInfo that corresponds to the field FD.
const CGBitFieldInfo &getBitFieldInfo(const FieldDecl *FD) const {
FD = FD->getCanonicalDecl();
assert(FD->isBitField() && "Invalid call for non-bit-field decl!");
diff --git a/lib/CodeGen/CGRecordLayoutBuilder.cpp b/lib/CodeGen/CGRecordLayoutBuilder.cpp
index 1644ab4c0725..4ee6c8e71457 100644
--- a/lib/CodeGen/CGRecordLayoutBuilder.cpp
+++ b/lib/CodeGen/CGRecordLayoutBuilder.cpp
@@ -62,7 +62,7 @@ namespace {
/// because LLVM reads from the complete type it can generate incorrect code
/// if we do not clip the tail padding off of the bitfield in the complete
/// layout. This introduces a somewhat awkward extra unnecessary clip stage.
-/// The location of the clip is stored internally as a sentinal of type
+/// The location of the clip is stored internally as a sentinel of type
/// SCISSOR. If LLVM were updated to read base types (which it probably
/// should because locations of things such as VBases are bogus in the llvm
/// type anyway) then we could eliminate the SCISSOR.
@@ -74,7 +74,7 @@ namespace {
struct CGRecordLowering {
// MemberInfo is a helper structure that contains information about a record
// member. In additional to the standard member types, there exists a
- // sentinal member type that ensures correct rounding.
+ // sentinel member type that ensures correct rounding.
struct MemberInfo {
CharUnits Offset;
enum InfoKind { VFPtr, VBPtr, Field, Base, VBase, Scissor } Kind;
@@ -95,7 +95,7 @@ struct CGRecordLowering {
// The constructor.
CGRecordLowering(CodeGenTypes &Types, const RecordDecl *D, bool Packed);
// Short helper routines.
- /// \brief Constructs a MemberInfo instance from an offset and llvm::Type *.
+ /// Constructs a MemberInfo instance from an offset and llvm::Type *.
MemberInfo StorageInfo(CharUnits Offset, llvm::Type *Data) {
return MemberInfo(Offset, MemberInfo::Field, Data);
}
@@ -118,19 +118,19 @@ struct CGRecordLowering {
return !Context.getTargetInfo().getCXXABI().isMicrosoft();
}
- /// \brief Wraps llvm::Type::getIntNTy with some implicit arguments.
+ /// Wraps llvm::Type::getIntNTy with some implicit arguments.
llvm::Type *getIntNType(uint64_t NumBits) {
return llvm::Type::getIntNTy(Types.getLLVMContext(),
(unsigned)llvm::alignTo(NumBits, 8));
}
- /// \brief Gets an llvm type of size NumBytes and alignment 1.
+ /// Gets an llvm type of size NumBytes and alignment 1.
llvm::Type *getByteArrayType(CharUnits NumBytes) {
assert(!NumBytes.isZero() && "Empty byte arrays aren't allowed.");
llvm::Type *Type = llvm::Type::getInt8Ty(Types.getLLVMContext());
return NumBytes == CharUnits::One() ? Type :
(llvm::Type *)llvm::ArrayType::get(Type, NumBytes.getQuantity());
}
- /// \brief Gets the storage type for a field decl and handles storage
+ /// Gets the storage type for a field decl and handles storage
/// for itanium bitfields that are smaller than their declared type.
llvm::Type *getStorageType(const FieldDecl *FD) {
llvm::Type *Type = Types.ConvertTypeForMem(FD->getType());
@@ -139,7 +139,7 @@ struct CGRecordLowering {
return getIntNType(std::min(FD->getBitWidthValue(Context),
(unsigned)Context.toBits(getSize(Type))));
}
- /// \brief Gets the llvm Basesubobject type from a CXXRecordDecl.
+ /// Gets the llvm Basesubobject type from a CXXRecordDecl.
llvm::Type *getStorageType(const CXXRecordDecl *RD) {
return Types.getCGRecordLayout(RD).getBaseSubobjectLLVMType();
}
@@ -168,7 +168,7 @@ struct CGRecordLowering {
// Layout routines.
void setBitFieldInfo(const FieldDecl *FD, CharUnits StartOffset,
llvm::Type *StorageType);
- /// \brief Lowers an ASTRecordLayout to a llvm type.
+ /// Lowers an ASTRecordLayout to a llvm type.
void lower(bool NonVirtualBaseType);
void lowerUnion();
void accumulateFields();
@@ -177,18 +177,18 @@ struct CGRecordLowering {
void accumulateBases();
void accumulateVPtrs();
void accumulateVBases();
- /// \brief Recursively searches all of the bases to find out if a vbase is
+ /// Recursively searches all of the bases to find out if a vbase is
/// not the primary vbase of some base class.
bool hasOwnStorage(const CXXRecordDecl *Decl, const CXXRecordDecl *Query);
void calculateZeroInit();
- /// \brief Lowers bitfield storage types to I8 arrays for bitfields with tail
+ /// Lowers bitfield storage types to I8 arrays for bitfields with tail
/// padding that is or can potentially be used.
void clipTailPadding();
- /// \brief Determines if we need a packed llvm struct.
+ /// Determines if we need a packed llvm struct.
void determinePacked(bool NVBaseType);
- /// \brief Inserts padding everwhere it's needed.
+ /// Inserts padding everywhere it's needed.
void insertPadding();
- /// \brief Fills out the structures that are ultimately consumed.
+ /// Fills out the structures that are ultimately consumed.
void fillOutputFields();
// Input memoization fields.
CodeGenTypes &Types;
@@ -214,12 +214,13 @@ private:
};
} // namespace {
-CGRecordLowering::CGRecordLowering(CodeGenTypes &Types, const RecordDecl *D, bool Packed)
- : Types(Types), Context(Types.getContext()), D(D),
- RD(dyn_cast<CXXRecordDecl>(D)),
- Layout(Types.getContext().getASTRecordLayout(D)),
- DataLayout(Types.getDataLayout()), IsZeroInitializable(true),
- IsZeroInitializableAsBase(true), Packed(Packed) {}
+CGRecordLowering::CGRecordLowering(CodeGenTypes &Types, const RecordDecl *D,
+ bool Packed)
+ : Types(Types), Context(Types.getContext()), D(D),
+ RD(dyn_cast<CXXRecordDecl>(D)),
+ Layout(Types.getContext().getASTRecordLayout(D)),
+ DataLayout(Types.getDataLayout()), IsZeroInitializable(true),
+ IsZeroInitializableAsBase(true), Packed(Packed) {}
void CGRecordLowering::setBitFieldInfo(
const FieldDecl *FD, CharUnits StartOffset, llvm::Type *StorageType) {
@@ -294,8 +295,7 @@ void CGRecordLowering::lowerUnion() {
// been doing and cause lit tests to change.
for (const auto *Field : D->fields()) {
if (Field->isBitField()) {
- // Skip 0 sized bitfields.
- if (Field->getBitWidthValue(Context) == 0)
+ if (Field->isZeroLengthBitField(Context))
continue;
llvm::Type *FieldType = getStorageType(Field);
if (LayoutSize < getSize(FieldType))
@@ -380,7 +380,7 @@ CGRecordLowering::accumulateBitFields(RecordDecl::field_iterator Field,
for (; Field != FieldEnd; ++Field) {
uint64_t BitOffset = getFieldBitOffset(*Field);
// Zero-width bitfields end runs.
- if (Field->getBitWidthValue(Context) == 0) {
+ if (Field->isZeroLengthBitField(Context)) {
Run = FieldEnd;
continue;
}
@@ -404,19 +404,20 @@ CGRecordLowering::accumulateBitFields(RecordDecl::field_iterator Field,
return;
}
- // Check if current Field is better as a single field run. When current field
+ // Check if OffsetInRecord is better as a single field run. When OffsetInRecord
// has legal integer width, and its bitfield offset is naturally aligned, it
// is better to make the bitfield a separate storage component so as it can be
// accessed directly with lower cost.
- auto IsBetterAsSingleFieldRun = [&](RecordDecl::field_iterator Field) {
+ auto IsBetterAsSingleFieldRun = [&](uint64_t OffsetInRecord,
+ uint64_t StartBitOffset) {
if (!Types.getCodeGenOpts().FineGrainedBitfieldAccesses)
return false;
- unsigned Width = Field->getBitWidthValue(Context);
- if (!DataLayout.isLegalInteger(Width))
+ if (!DataLayout.isLegalInteger(OffsetInRecord))
return false;
- // Make sure Field is natually aligned if it is treated as an IType integer.
- if (getFieldBitOffset(*Field) %
- Context.toBits(getAlignment(getIntNType(Width))) !=
+ // Make sure StartBitOffset is natually aligned if it is treated as an
+ // IType integer.
+ if (StartBitOffset %
+ Context.toBits(getAlignment(getIntNType(OffsetInRecord))) !=
0)
return false;
return true;
@@ -431,26 +432,31 @@ CGRecordLowering::accumulateBitFields(RecordDecl::field_iterator Field,
if (Field == FieldEnd)
break;
// Any non-zero-length bitfield can start a new run.
- if (Field->getBitWidthValue(Context) != 0) {
+ if (!Field->isZeroLengthBitField(Context)) {
Run = Field;
StartBitOffset = getFieldBitOffset(*Field);
Tail = StartBitOffset + Field->getBitWidthValue(Context);
- StartFieldAsSingleRun = IsBetterAsSingleFieldRun(Run);
+ StartFieldAsSingleRun = IsBetterAsSingleFieldRun(Tail - StartBitOffset,
+ StartBitOffset);
}
++Field;
continue;
}
// If the start field of a new run is better as a single run, or
- // if current field is better as a single run, or
- // if current field has zero width bitfield, or
+ // if current field (or consecutive fields) is better as a single run, or
+ // if current field has zero width bitfield and either
+ // UseZeroLengthBitfieldAlignment or UseBitFieldTypeAlignment is set to
+ // true, or
// if the offset of current field is inconsistent with the offset of
// previous field plus its offset,
// skip the block below and go ahead to emit the storage.
// Otherwise, try to add bitfields to the run.
if (!StartFieldAsSingleRun && Field != FieldEnd &&
- !IsBetterAsSingleFieldRun(Field) &&
- Field->getBitWidthValue(Context) != 0 &&
+ !IsBetterAsSingleFieldRun(Tail - StartBitOffset, StartBitOffset) &&
+ (!Field->isZeroLengthBitField(Context) ||
+ (!Context.getTargetInfo().useZeroLengthBitfieldAlignment() &&
+ !Context.getTargetInfo().useBitFieldTypeAlignment())) &&
Tail == getFieldBitOffset(*Field)) {
Tail += Field->getBitWidthValue(Context);
++Field;
@@ -626,7 +632,7 @@ void CGRecordLowering::determinePacked(bool NVBaseType) {
// non-virtual sub-object and an unpacked complete object or vise versa.
if (NVSize % NVAlignment)
Packed = true;
- // Update the alignment of the sentinal.
+ // Update the alignment of the sentinel.
if (!Packed)
Members.back().Data = getIntNType(Context.toBits(Alignment));
}
@@ -785,8 +791,7 @@ CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D,
}
// Verify that the LLVM and AST field offsets agree.
- llvm::StructType *ST =
- dyn_cast<llvm::StructType>(RL->getLLVMType());
+ llvm::StructType *ST = RL->getLLVMType();
const llvm::StructLayout *SL = getDataLayout().getStructLayout(ST);
const ASTRecordLayout &AST_RL = getContext().getASTRecordLayout(D);
@@ -808,7 +813,7 @@ CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D,
continue;
// Don't inspect zero-length bitfields.
- if (FD->getBitWidthValue(getContext()) == 0)
+ if (FD->isZeroLengthBitField(getContext()))
continue;
const CGBitFieldInfo &Info = RL->getBitFieldInfo(FD);
diff --git a/lib/CodeGen/CGStmt.cpp b/lib/CodeGen/CGStmt.cpp
index 91fa49a46ef1..79662ec0099f 100644
--- a/lib/CodeGen/CGStmt.cpp
+++ b/lib/CodeGen/CGStmt.cpp
@@ -74,6 +74,15 @@ void CodeGenFunction::EmitStmt(const Stmt *S, ArrayRef<const Attr *> Attrs) {
// Generate a stoppoint if we are emitting debug info.
EmitStopPoint(S);
+ // Ignore all OpenMP directives except for simd if OpenMP with Simd is
+ // enabled.
+ if (getLangOpts().OpenMP && getLangOpts().OpenMPSimd) {
+ if (const auto *D = dyn_cast<OMPExecutableDirective>(S)) {
+ EmitSimpleOMPExecutableDirective(*D);
+ return;
+ }
+ }
+
switch (S->getStmtClass()) {
case Stmt::NoStmtClass:
case Stmt::CXXCatchStmtClass:
@@ -599,7 +608,7 @@ void CodeGenFunction::EmitIfStmt(const IfStmt &S) {
EmitStmt(S.getInit());
if (S.getConditionVariable())
- EmitAutoVarDecl(*S.getConditionVariable());
+ EmitDecl(*S.getConditionVariable());
// If the condition constant folds and can be elided, try to avoid emitting
// the condition and the dead arm of the if/else.
@@ -696,7 +705,7 @@ void CodeGenFunction::EmitWhileStmt(const WhileStmt &S,
RunCleanupsScope ConditionScope(*this);
if (S.getConditionVariable())
- EmitAutoVarDecl(*S.getConditionVariable());
+ EmitDecl(*S.getConditionVariable());
// Evaluate the conditional in the while header. C99 6.8.5.1: The
// evaluation of the controlling expression takes place before each
@@ -768,11 +777,6 @@ void CodeGenFunction::EmitDoStmt(const DoStmt &S,
// Emit the body of the loop.
llvm::BasicBlock *LoopBody = createBasicBlock("do.body");
- const SourceRange &R = S.getSourceRange();
- LoopStack.push(LoopBody, CGM.getContext(), DoAttrs,
- SourceLocToDebugLoc(R.getBegin()),
- SourceLocToDebugLoc(R.getEnd()));
-
EmitBlockWithFallThrough(LoopBody, &S);
{
RunCleanupsScope BodyScope(*this);
@@ -781,6 +785,11 @@ void CodeGenFunction::EmitDoStmt(const DoStmt &S,
EmitBlock(LoopCond.getBlock());
+ const SourceRange &R = S.getSourceRange();
+ LoopStack.push(LoopBody, CGM.getContext(), DoAttrs,
+ SourceLocToDebugLoc(R.getBegin()),
+ SourceLocToDebugLoc(R.getEnd()));
+
// C99 6.8.5.2: "The evaluation of the controlling expression takes place
// after each execution of the loop body."
@@ -856,7 +865,7 @@ void CodeGenFunction::EmitForStmt(const ForStmt &S,
// If the for statement has a condition scope, emit the local variable
// declaration.
if (S.getConditionVariable()) {
- EmitAutoVarDecl(*S.getConditionVariable());
+ EmitDecl(*S.getConditionVariable());
}
llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
@@ -996,7 +1005,9 @@ void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) {
if (RV.isScalar()) {
Builder.CreateStore(RV.getScalarVal(), ReturnValue);
} else if (RV.isAggregate()) {
- EmitAggregateCopy(ReturnValue, RV.getAggregateAddress(), Ty);
+ LValue Dest = MakeAddrLValue(ReturnValue, Ty);
+ LValue Src = MakeAddrLValue(RV.getAggregateAddress(), Ty);
+ EmitAggregateCopy(Dest, Src, Ty, overlapForReturnValue());
} else {
EmitStoreOfComplex(RV.getComplexVal(), MakeAddrLValue(ReturnValue, Ty),
/*init*/ true);
@@ -1026,7 +1037,7 @@ void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
Builder.ClearInsertionPoint();
}
- // Emit the result value, even if unused, to evalute the side effects.
+ // Emit the result value, even if unused, to evaluate the side effects.
const Expr *RV = S.getRetValue();
// Treat block literals in a return expression as if they appeared
@@ -1074,11 +1085,12 @@ void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
/*isInit*/ true);
break;
case TEK_Aggregate:
- EmitAggExpr(RV, AggValueSlot::forAddr(ReturnValue,
- Qualifiers(),
- AggValueSlot::IsDestructed,
- AggValueSlot::DoesNotNeedGCBarriers,
- AggValueSlot::IsNotAliased));
+ EmitAggExpr(RV, AggValueSlot::forAddr(
+ ReturnValue, Qualifiers(),
+ AggValueSlot::IsDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased,
+ overlapForReturnValue()));
break;
}
}
@@ -1563,7 +1575,7 @@ void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) {
// Emit the condition variable if needed inside the entire cleanup scope
// used by this special case for constant folded switches.
if (S.getConditionVariable())
- EmitAutoVarDecl(*S.getConditionVariable());
+ EmitDecl(*S.getConditionVariable());
// At this point, we are no longer "within" a switch instance, so
// we can temporarily enforce this to ensure that any embedded case
@@ -1592,7 +1604,7 @@ void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) {
EmitStmt(S.getInit());
if (S.getConditionVariable())
- EmitAutoVarDecl(*S.getConditionVariable());
+ EmitDecl(*S.getConditionVariable());
llvm::Value *CondV = EmitScalarExpr(S.getCond());
// Create basic block to hold stuff that comes after switch
@@ -1915,7 +1927,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
// Simplify the output constraint.
std::string OutputConstraint(S.getOutputConstraint(i));
OutputConstraint = SimplifyConstraint(OutputConstraint.c_str() + 1,
- getTarget());
+ getTarget(), &OutputConstraintInfos);
const Expr *OutExpr = S.getOutputExpr(i);
OutExpr = OutExpr->IgnoreParenNoopCasts(getContext());
@@ -2122,7 +2134,8 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
llvm::InlineAsm *IA =
llvm::InlineAsm::get(FTy, AsmString, Constraints, HasSideEffect,
/* IsAlignStack */ false, AsmDialect);
- llvm::CallInst *Result = Builder.CreateCall(IA, Args);
+ llvm::CallInst *Result =
+ Builder.CreateCall(IA, Args, getBundlesForFunclet(IA));
Result->addAttribute(llvm::AttributeList::FunctionIndex,
llvm::Attribute::NoUnwind);
diff --git a/lib/CodeGen/CGStmtOpenMP.cpp b/lib/CodeGen/CGStmtOpenMP.cpp
index f9861735832b..0d343f84c71f 100644
--- a/lib/CodeGen/CGStmtOpenMP.cpp
+++ b/lib/CodeGen/CGStmtOpenMP.cpp
@@ -29,12 +29,13 @@ namespace {
class OMPLexicalScope : public CodeGenFunction::LexicalScope {
void emitPreInitStmt(CodeGenFunction &CGF, const OMPExecutableDirective &S) {
for (const auto *C : S.clauses()) {
- if (auto *CPI = OMPClauseWithPreInit::get(C)) {
- if (auto *PreInit = cast_or_null<DeclStmt>(CPI->getPreInitStmt())) {
+ if (const auto *CPI = OMPClauseWithPreInit::get(C)) {
+ if (const auto *PreInit =
+ cast_or_null<DeclStmt>(CPI->getPreInitStmt())) {
for (const auto *I : PreInit->decls()) {
- if (!I->hasAttr<OMPCaptureNoInitAttr>())
+ if (!I->hasAttr<OMPCaptureNoInitAttr>()) {
CGF.EmitVarDecl(cast<VarDecl>(*I));
- else {
+ } else {
CodeGenFunction::AutoVarEmission Emission =
CGF.EmitAutoVarAlloca(cast<VarDecl>(*I));
CGF.EmitAutoVarCleanups(Emission);
@@ -53,34 +54,35 @@ class OMPLexicalScope : public CodeGenFunction::LexicalScope {
}
public:
- OMPLexicalScope(CodeGenFunction &CGF, const OMPExecutableDirective &S,
- bool AsInlined = false, bool EmitPreInitStmt = true)
+ OMPLexicalScope(
+ CodeGenFunction &CGF, const OMPExecutableDirective &S,
+ const llvm::Optional<OpenMPDirectiveKind> CapturedRegion = llvm::None,
+ const bool EmitPreInitStmt = true)
: CodeGenFunction::LexicalScope(CGF, S.getSourceRange()),
InlinedShareds(CGF) {
if (EmitPreInitStmt)
emitPreInitStmt(CGF, S);
- if (AsInlined) {
- if (S.hasAssociatedStmt()) {
- auto *CS = cast<CapturedStmt>(S.getAssociatedStmt());
- for (auto &C : CS->captures()) {
- if (C.capturesVariable() || C.capturesVariableByCopy()) {
- auto *VD = C.getCapturedVar();
- assert(VD == VD->getCanonicalDecl() &&
- "Canonical decl must be captured.");
- DeclRefExpr DRE(const_cast<VarDecl *>(VD),
- isCapturedVar(CGF, VD) ||
- (CGF.CapturedStmtInfo &&
- InlinedShareds.isGlobalVarCaptured(VD)),
- VD->getType().getNonReferenceType(), VK_LValue,
- SourceLocation());
- InlinedShareds.addPrivate(VD, [&CGF, &DRE]() -> Address {
- return CGF.EmitLValue(&DRE).getAddress();
- });
- }
- }
- (void)InlinedShareds.Privatize();
+ if (!CapturedRegion.hasValue())
+ return;
+ assert(S.hasAssociatedStmt() &&
+ "Expected associated statement for inlined directive.");
+ const CapturedStmt *CS = S.getCapturedStmt(*CapturedRegion);
+ for (const auto &C : CS->captures()) {
+ if (C.capturesVariable() || C.capturesVariableByCopy()) {
+ auto *VD = C.getCapturedVar();
+ assert(VD == VD->getCanonicalDecl() &&
+ "Canonical decl must be captured.");
+ DeclRefExpr DRE(
+ const_cast<VarDecl *>(VD),
+ isCapturedVar(CGF, VD) || (CGF.CapturedStmtInfo &&
+ InlinedShareds.isGlobalVarCaptured(VD)),
+ VD->getType().getNonReferenceType(), VK_LValue, C.getLocation());
+ InlinedShareds.addPrivate(VD, [&CGF, &DRE]() -> Address {
+ return CGF.EmitLValue(&DRE).getAddress();
+ });
}
}
+ (void)InlinedShareds.Privatize();
}
};
@@ -96,9 +98,8 @@ class OMPParallelScope final : public OMPLexicalScope {
public:
OMPParallelScope(CodeGenFunction &CGF, const OMPExecutableDirective &S)
- : OMPLexicalScope(CGF, S,
- /*AsInlined=*/false,
- /*EmitPreInitStmt=*/EmitPreInitStmt(S)) {}
+ : OMPLexicalScope(CGF, S, /*CapturedRegion=*/llvm::None,
+ EmitPreInitStmt(S)) {}
};
/// Lexical scope for OpenMP teams construct, that handles correct codegen
@@ -112,29 +113,26 @@ class OMPTeamsScope final : public OMPLexicalScope {
public:
OMPTeamsScope(CodeGenFunction &CGF, const OMPExecutableDirective &S)
- : OMPLexicalScope(CGF, S,
- /*AsInlined=*/false,
- /*EmitPreInitStmt=*/EmitPreInitStmt(S)) {}
+ : OMPLexicalScope(CGF, S, /*CapturedRegion=*/llvm::None,
+ EmitPreInitStmt(S)) {}
};
/// Private scope for OpenMP loop-based directives, that supports capturing
/// of used expression from loop statement.
class OMPLoopScope : public CodeGenFunction::RunCleanupsScope {
void emitPreInitStmt(CodeGenFunction &CGF, const OMPLoopDirective &S) {
- CodeGenFunction::OMPPrivateScope PreCondScope(CGF);
- for (auto *E : S.counters()) {
+ CodeGenFunction::OMPMapVars PreCondVars;
+ for (const auto *E : S.counters()) {
const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
- (void)PreCondScope.addPrivate(VD, [&CGF, VD]() {
- return CGF.CreateMemTemp(VD->getType().getNonReferenceType());
- });
+ (void)PreCondVars.setVarAddr(
+ CGF, VD, CGF.CreateMemTemp(VD->getType().getNonReferenceType()));
}
- (void)PreCondScope.Privatize();
- if (auto *LD = dyn_cast<OMPLoopDirective>(&S)) {
- if (auto *PreInits = cast_or_null<DeclStmt>(LD->getPreInits())) {
- for (const auto *I : PreInits->decls())
- CGF.EmitVarDecl(cast<VarDecl>(*I));
- }
+ (void)PreCondVars.apply(CGF);
+ if (const auto *PreInits = cast_or_null<DeclStmt>(S.getPreInits())) {
+ for (const auto *I : PreInits->decls())
+ CGF.EmitVarDecl(cast<VarDecl>(*I));
}
+ PreCondVars.restore(CGF);
}
public:
@@ -144,6 +142,72 @@ public:
}
};
+class OMPSimdLexicalScope : public CodeGenFunction::LexicalScope {
+ CodeGenFunction::OMPPrivateScope InlinedShareds;
+
+ static bool isCapturedVar(CodeGenFunction &CGF, const VarDecl *VD) {
+ return CGF.LambdaCaptureFields.lookup(VD) ||
+ (CGF.CapturedStmtInfo && CGF.CapturedStmtInfo->lookup(VD)) ||
+ (CGF.CurCodeDecl && isa<BlockDecl>(CGF.CurCodeDecl) &&
+ cast<BlockDecl>(CGF.CurCodeDecl)->capturesVariable(VD));
+ }
+
+public:
+ OMPSimdLexicalScope(CodeGenFunction &CGF, const OMPExecutableDirective &S)
+ : CodeGenFunction::LexicalScope(CGF, S.getSourceRange()),
+ InlinedShareds(CGF) {
+ for (const auto *C : S.clauses()) {
+ if (const auto *CPI = OMPClauseWithPreInit::get(C)) {
+ if (const auto *PreInit =
+ cast_or_null<DeclStmt>(CPI->getPreInitStmt())) {
+ for (const auto *I : PreInit->decls()) {
+ if (!I->hasAttr<OMPCaptureNoInitAttr>()) {
+ CGF.EmitVarDecl(cast<VarDecl>(*I));
+ } else {
+ CodeGenFunction::AutoVarEmission Emission =
+ CGF.EmitAutoVarAlloca(cast<VarDecl>(*I));
+ CGF.EmitAutoVarCleanups(Emission);
+ }
+ }
+ }
+ } else if (const auto *UDP = dyn_cast<OMPUseDevicePtrClause>(C)) {
+ for (const Expr *E : UDP->varlists()) {
+ const Decl *D = cast<DeclRefExpr>(E)->getDecl();
+ if (const auto *OED = dyn_cast<OMPCapturedExprDecl>(D))
+ CGF.EmitVarDecl(*OED);
+ }
+ }
+ }
+ if (!isOpenMPSimdDirective(S.getDirectiveKind()))
+ CGF.EmitOMPPrivateClause(S, InlinedShareds);
+ if (const auto *TG = dyn_cast<OMPTaskgroupDirective>(&S)) {
+ if (const Expr *E = TG->getReductionRef())
+ CGF.EmitVarDecl(*cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()));
+ }
+ const auto *CS = cast_or_null<CapturedStmt>(S.getAssociatedStmt());
+ while (CS) {
+ for (auto &C : CS->captures()) {
+ if (C.capturesVariable() || C.capturesVariableByCopy()) {
+ auto *VD = C.getCapturedVar();
+ assert(VD == VD->getCanonicalDecl() &&
+ "Canonical decl must be captured.");
+ DeclRefExpr DRE(const_cast<VarDecl *>(VD),
+ isCapturedVar(CGF, VD) ||
+ (CGF.CapturedStmtInfo &&
+ InlinedShareds.isGlobalVarCaptured(VD)),
+ VD->getType().getNonReferenceType(), VK_LValue,
+ C.getLocation());
+ InlinedShareds.addPrivate(VD, [&CGF, &DRE]() -> Address {
+ return CGF.EmitLValue(&DRE).getAddress();
+ });
+ }
+ }
+ CS = dyn_cast<CapturedStmt>(CS->getCapturedStmt());
+ }
+ (void)InlinedShareds.Privatize();
+ }
+};
+
} // namespace
static void emitCommonOMPTargetDirective(CodeGenFunction &CGF,
@@ -151,8 +215,8 @@ static void emitCommonOMPTargetDirective(CodeGenFunction &CGF,
const RegionCodeGenTy &CodeGen);
LValue CodeGenFunction::EmitOMPSharedLValue(const Expr *E) {
- if (auto *OrigDRE = dyn_cast<DeclRefExpr>(E)) {
- if (auto *OrigVD = dyn_cast<VarDecl>(OrigDRE->getDecl())) {
+ if (const auto *OrigDRE = dyn_cast<DeclRefExpr>(E)) {
+ if (const auto *OrigVD = dyn_cast<VarDecl>(OrigDRE->getDecl())) {
OrigVD = OrigVD->getCanonicalDecl();
bool IsCaptured =
LambdaCaptureFields.lookup(OrigVD) ||
@@ -167,23 +231,23 @@ LValue CodeGenFunction::EmitOMPSharedLValue(const Expr *E) {
}
llvm::Value *CodeGenFunction::getTypeSize(QualType Ty) {
- auto &C = getContext();
+ ASTContext &C = getContext();
llvm::Value *Size = nullptr;
auto SizeInChars = C.getTypeSizeInChars(Ty);
if (SizeInChars.isZero()) {
// getTypeSizeInChars() returns 0 for a VLA.
- while (auto *VAT = C.getAsVariableArrayType(Ty)) {
- llvm::Value *ArraySize;
- std::tie(ArraySize, Ty) = getVLASize(VAT);
- Size = Size ? Builder.CreateNUWMul(Size, ArraySize) : ArraySize;
+ while (const VariableArrayType *VAT = C.getAsVariableArrayType(Ty)) {
+ VlaSizePair VlaSize = getVLASize(VAT);
+ Ty = VlaSize.Type;
+ Size = Size ? Builder.CreateNUWMul(Size, VlaSize.NumElts)
+ : VlaSize.NumElts;
}
SizeInChars = C.getTypeSizeInChars(Ty);
if (SizeInChars.isZero())
return llvm::ConstantInt::get(SizeTy, /*V=*/0);
- Size = Builder.CreateNUWMul(Size, CGM.getSize(SizeInChars));
- } else
- Size = CGM.getSize(SizeInChars);
- return Size;
+ return Builder.CreateNUWMul(Size, CGM.getSize(SizeInChars));
+ }
+ return CGM.getSize(SizeInChars);
}
void CodeGenFunction::GenerateOpenMPCapturedVars(
@@ -195,27 +259,26 @@ void CodeGenFunction::GenerateOpenMPCapturedVars(
E = S.capture_init_end();
I != E; ++I, ++CurField, ++CurCap) {
if (CurField->hasCapturedVLAType()) {
- auto VAT = CurField->getCapturedVLAType();
- auto *Val = VLASizeMap[VAT->getSizeExpr()];
+ const VariableArrayType *VAT = CurField->getCapturedVLAType();
+ llvm::Value *Val = VLASizeMap[VAT->getSizeExpr()];
CapturedVars.push_back(Val);
- } else if (CurCap->capturesThis())
+ } else if (CurCap->capturesThis()) {
CapturedVars.push_back(CXXThisValue);
- else if (CurCap->capturesVariableByCopy()) {
- llvm::Value *CV =
- EmitLoadOfLValue(EmitLValue(*I), SourceLocation()).getScalarVal();
+ } else if (CurCap->capturesVariableByCopy()) {
+ llvm::Value *CV = EmitLoadOfScalar(EmitLValue(*I), CurCap->getLocation());
// If the field is not a pointer, we need to save the actual value
// and load it as a void pointer.
if (!CurField->getType()->isAnyPointerType()) {
- auto &Ctx = getContext();
- auto DstAddr = CreateMemTemp(
+ ASTContext &Ctx = getContext();
+ Address DstAddr = CreateMemTemp(
Ctx.getUIntPtrType(),
- Twine(CurCap->getCapturedVar()->getName()) + ".casted");
+ Twine(CurCap->getCapturedVar()->getName(), ".casted"));
LValue DstLV = MakeAddrLValue(DstAddr, Ctx.getUIntPtrType());
- auto *SrcAddrVal = EmitScalarConversion(
+ llvm::Value *SrcAddrVal = EmitScalarConversion(
DstAddr.getPointer(), Ctx.getPointerType(Ctx.getUIntPtrType()),
- Ctx.getPointerType(CurField->getType()), SourceLocation());
+ Ctx.getPointerType(CurField->getType()), CurCap->getLocation());
LValue SrcLV =
MakeNaturalAlignAddrLValue(SrcAddrVal, CurField->getType());
@@ -223,7 +286,7 @@ void CodeGenFunction::GenerateOpenMPCapturedVars(
EmitStoreThroughLValue(RValue::get(CV), SrcLV);
// Load the value using the destination type pointer.
- CV = EmitLoadOfLValue(DstLV, SourceLocation()).getScalarVal();
+ CV = EmitLoadOfScalar(DstLV, CurCap->getLocation());
}
CapturedVars.push_back(CV);
} else {
@@ -233,15 +296,16 @@ void CodeGenFunction::GenerateOpenMPCapturedVars(
}
}
-static Address castValueFromUintptr(CodeGenFunction &CGF, QualType DstType,
- StringRef Name, LValue AddrLV,
+static Address castValueFromUintptr(CodeGenFunction &CGF, SourceLocation Loc,
+ QualType DstType, StringRef Name,
+ LValue AddrLV,
bool isReferenceType = false) {
ASTContext &Ctx = CGF.getContext();
- auto *CastedPtr = CGF.EmitScalarConversion(
+ llvm::Value *CastedPtr = CGF.EmitScalarConversion(
AddrLV.getAddress().getPointer(), Ctx.getUIntPtrType(),
- Ctx.getPointerType(DstType), SourceLocation());
- auto TmpAddr =
+ Ctx.getPointerType(DstType), Loc);
+ Address TmpAddr =
CGF.MakeNaturalAlignAddrLValue(CastedPtr, Ctx.getPointerType(DstType))
.getAddress();
@@ -249,27 +313,26 @@ static Address castValueFromUintptr(CodeGenFunction &CGF, QualType DstType,
// reference instead of the reference of the value.
if (isReferenceType) {
QualType RefType = Ctx.getLValueReferenceType(DstType);
- auto *RefVal = TmpAddr.getPointer();
- TmpAddr = CGF.CreateMemTemp(RefType, Twine(Name) + ".ref");
- auto TmpLVal = CGF.MakeAddrLValue(TmpAddr, RefType);
- CGF.EmitStoreThroughLValue(RValue::get(RefVal), TmpLVal, /*isInit*/ true);
+ llvm::Value *RefVal = TmpAddr.getPointer();
+ TmpAddr = CGF.CreateMemTemp(RefType, Twine(Name, ".ref"));
+ LValue TmpLVal = CGF.MakeAddrLValue(TmpAddr, RefType);
+ CGF.EmitStoreThroughLValue(RValue::get(RefVal), TmpLVal, /*isInit=*/true);
}
return TmpAddr;
}
static QualType getCanonicalParamType(ASTContext &C, QualType T) {
- if (T->isLValueReferenceType()) {
+ if (T->isLValueReferenceType())
return C.getLValueReferenceType(
getCanonicalParamType(C, T.getNonReferenceType()),
/*SpelledAsLValue=*/false);
- }
if (T->isPointerType())
return C.getPointerType(getCanonicalParamType(C, T->getPointeeType()));
- if (auto *A = T->getAsArrayTypeUnsafe()) {
- if (auto *VLA = dyn_cast<VariableArrayType>(A))
+ if (const ArrayType *A = T->getAsArrayTypeUnsafe()) {
+ if (const auto *VLA = dyn_cast<VariableArrayType>(A))
return getCanonicalParamType(C, VLA->getElementType());
- else if (!A->isVariablyModifiedType())
+ if (!A->isVariablyModifiedType())
return C.getCanonicalType(T);
}
return C.getCanonicalParamType(T);
@@ -329,7 +392,7 @@ static llvm::Function *emitOutlinedFunctionPrologue(
Ctx.getFunctionType(Ctx.VoidTy, llvm::None, EPI)),
SC_Static, /*isInlineSpecified=*/false, /*hasWrittenPrototype=*/false);
}
- for (auto *FD : RD->fields()) {
+ for (const FieldDecl *FD : RD->fields()) {
QualType ArgType = FD->getType();
IdentifierInfo *II = nullptr;
VarDecl *CapVar = nullptr;
@@ -339,18 +402,17 @@ static llvm::Function *emitOutlinedFunctionPrologue(
// uintptr. This is necessary given that the runtime library is only able to
// deal with pointers. We can pass in the same way the VLA type sizes to the
// outlined function.
- if ((I->capturesVariableByCopy() && !ArgType->isAnyPointerType()) ||
- I->capturesVariableArrayType()) {
- if (FO.UIntPtrCastRequired)
- ArgType = Ctx.getUIntPtrType();
- }
+ if (FO.UIntPtrCastRequired &&
+ ((I->capturesVariableByCopy() && !ArgType->isAnyPointerType()) ||
+ I->capturesVariableArrayType()))
+ ArgType = Ctx.getUIntPtrType();
if (I->capturesVariable() || I->capturesVariableByCopy()) {
CapVar = I->getCapturedVar();
II = CapVar->getIdentifier();
- } else if (I->capturesThis())
+ } else if (I->capturesThis()) {
II = &Ctx.Idents.get("this");
- else {
+ } else {
assert(I->capturesVariableArrayType());
II = &Ctx.Idents.get("vla");
}
@@ -387,19 +449,20 @@ static llvm::Function *emitOutlinedFunctionPrologue(
CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, TargetArgs);
llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo);
- llvm::Function *F =
+ auto *F =
llvm::Function::Create(FuncLLVMTy, llvm::GlobalValue::InternalLinkage,
FO.FunctionName, &CGM.getModule());
CGM.SetInternalFunctionAttributes(CD, F, FuncInfo);
if (CD->isNothrow())
F->setDoesNotThrow();
+ F->setDoesNotRecurse();
// Generate the function.
CGF.StartFunction(CD, Ctx.VoidTy, F, FuncInfo, TargetArgs,
FO.S->getLocStart(), CD->getBody()->getLocStart());
unsigned Cnt = CD->getContextParamPosition();
I = FO.S->captures().begin();
- for (auto *FD : RD->fields()) {
+ for (const FieldDecl *FD : RD->fields()) {
// Do not map arguments if we emit function with non-original types.
Address LocalAddr(Address::invalid());
if (!FO.UIntPtrCastRequired && Args[Cnt] != TargetArgs[Cnt]) {
@@ -431,23 +494,23 @@ static llvm::Function *emitOutlinedFunctionPrologue(
AlignmentSource::Decl);
if (FD->hasCapturedVLAType()) {
if (FO.UIntPtrCastRequired) {
- ArgLVal = CGF.MakeAddrLValue(castValueFromUintptr(CGF, FD->getType(),
- Args[Cnt]->getName(),
- ArgLVal),
- FD->getType(), AlignmentSource::Decl);
+ ArgLVal = CGF.MakeAddrLValue(
+ castValueFromUintptr(CGF, I->getLocation(), FD->getType(),
+ Args[Cnt]->getName(), ArgLVal),
+ FD->getType(), AlignmentSource::Decl);
}
- auto *ExprArg =
- CGF.EmitLoadOfLValue(ArgLVal, SourceLocation()).getScalarVal();
- auto VAT = FD->getCapturedVLAType();
- VLASizes.insert({Args[Cnt], {VAT->getSizeExpr(), ExprArg}});
+ llvm::Value *ExprArg = CGF.EmitLoadOfScalar(ArgLVal, I->getLocation());
+ const VariableArrayType *VAT = FD->getCapturedVLAType();
+ VLASizes.try_emplace(Args[Cnt], VAT->getSizeExpr(), ExprArg);
} else if (I->capturesVariable()) {
- auto *Var = I->getCapturedVar();
+ const VarDecl *Var = I->getCapturedVar();
QualType VarTy = Var->getType();
Address ArgAddr = ArgLVal.getAddress();
if (!VarTy->isReferenceType()) {
if (ArgLVal.getType()->isLValueReferenceType()) {
ArgAddr = CGF.EmitLoadOfReference(ArgLVal);
- } else if (!VarTy->isVariablyModifiedType() || !VarTy->isPointerType()) {
+ } else if (!VarTy->isVariablyModifiedType() ||
+ !VarTy->isPointerType()) {
assert(ArgLVal.getType()->isPointerType());
ArgAddr = CGF.EmitLoadOfPointer(
ArgAddr, ArgLVal.getType()->castAs<PointerType>());
@@ -461,20 +524,19 @@ static llvm::Function *emitOutlinedFunctionPrologue(
} else if (I->capturesVariableByCopy()) {
assert(!FD->getType()->isAnyPointerType() &&
"Not expecting a captured pointer.");
- auto *Var = I->getCapturedVar();
+ const VarDecl *Var = I->getCapturedVar();
QualType VarTy = Var->getType();
LocalAddrs.insert(
{Args[Cnt],
- {Var,
- FO.UIntPtrCastRequired
- ? castValueFromUintptr(CGF, FD->getType(), Args[Cnt]->getName(),
- ArgLVal, VarTy->isReferenceType())
- : ArgLVal.getAddress()}});
+ {Var, FO.UIntPtrCastRequired
+ ? castValueFromUintptr(CGF, I->getLocation(),
+ FD->getType(), Args[Cnt]->getName(),
+ ArgLVal, VarTy->isReferenceType())
+ : ArgLVal.getAddress()}});
} else {
// If 'this' is captured, load it into CXXThisValue.
assert(I->capturesThis());
- CXXThisValue = CGF.EmitLoadOfLValue(ArgLVal, Args[Cnt]->getLocation())
- .getScalarVal();
+ CXXThisValue = CGF.EmitLoadOfScalar(ArgLVal, I->getLocation());
LocalAddrs.insert({Args[Cnt], {nullptr, ArgLVal.getAddress()}});
}
++Cnt;
@@ -524,6 +586,7 @@ CodeGenFunction::GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S) {
/*RegisterCastedArgsOnly=*/true,
CapturedStmtInfo->getHelperName());
CodeGenFunction WrapperCGF(CGM, /*suppressNewContext=*/true);
+ WrapperCGF.CapturedStmtInfo = CapturedStmtInfo;
Args.clear();
LocalAddrs.clear();
VLASizes.clear();
@@ -539,16 +602,16 @@ CodeGenFunction::GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S) {
I->second.second,
I->second.first ? I->second.first->getType() : Arg->getType(),
AlignmentSource::Decl);
- CallArg = WrapperCGF.EmitLoadOfScalar(LV, SourceLocation());
+ CallArg = WrapperCGF.EmitLoadOfScalar(LV, S.getLocStart());
} else {
auto EI = VLASizes.find(Arg);
- if (EI != VLASizes.end())
+ if (EI != VLASizes.end()) {
CallArg = EI->second.second;
- else {
+ } else {
LValue LV = WrapperCGF.MakeAddrLValue(WrapperCGF.GetAddrOfLocalVar(Arg),
Arg->getType(),
AlignmentSource::Decl);
- CallArg = WrapperCGF.EmitLoadOfScalar(LV, SourceLocation());
+ CallArg = WrapperCGF.EmitLoadOfScalar(LV, S.getLocStart());
}
}
CallArgs.emplace_back(WrapperCGF.EmitFromMemory(CallArg, Arg->getType()));
@@ -564,28 +627,28 @@ CodeGenFunction::GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S) {
//===----------------------------------------------------------------------===//
void CodeGenFunction::EmitOMPAggregateAssign(
Address DestAddr, Address SrcAddr, QualType OriginalType,
- const llvm::function_ref<void(Address, Address)> &CopyGen) {
+ const llvm::function_ref<void(Address, Address)> CopyGen) {
// Perform element-by-element initialization.
QualType ElementTy;
// Drill down to the base element type on both arrays.
- auto ArrayTy = OriginalType->getAsArrayTypeUnsafe();
- auto NumElements = emitArrayLength(ArrayTy, ElementTy, DestAddr);
+ const ArrayType *ArrayTy = OriginalType->getAsArrayTypeUnsafe();
+ llvm::Value *NumElements = emitArrayLength(ArrayTy, ElementTy, DestAddr);
SrcAddr = Builder.CreateElementBitCast(SrcAddr, DestAddr.getElementType());
- auto SrcBegin = SrcAddr.getPointer();
- auto DestBegin = DestAddr.getPointer();
+ llvm::Value *SrcBegin = SrcAddr.getPointer();
+ llvm::Value *DestBegin = DestAddr.getPointer();
// Cast from pointer to array type to pointer to single element.
- auto DestEnd = Builder.CreateGEP(DestBegin, NumElements);
+ llvm::Value *DestEnd = Builder.CreateGEP(DestBegin, NumElements);
// The basic structure here is a while-do loop.
- auto BodyBB = createBasicBlock("omp.arraycpy.body");
- auto DoneBB = createBasicBlock("omp.arraycpy.done");
- auto IsEmpty =
+ llvm::BasicBlock *BodyBB = createBasicBlock("omp.arraycpy.body");
+ llvm::BasicBlock *DoneBB = createBasicBlock("omp.arraycpy.done");
+ llvm::Value *IsEmpty =
Builder.CreateICmpEQ(DestBegin, DestEnd, "omp.arraycpy.isempty");
Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
// Enter the loop body, making that address the current address.
- auto EntryBB = Builder.GetInsertBlock();
+ llvm::BasicBlock *EntryBB = Builder.GetInsertBlock();
EmitBlock(BodyBB);
CharUnits ElementSize = getContext().getTypeSizeInChars(ElementTy);
@@ -608,12 +671,12 @@ void CodeGenFunction::EmitOMPAggregateAssign(
CopyGen(DestElementCurrent, SrcElementCurrent);
// Shift the address forward by one element.
- auto DestElementNext = Builder.CreateConstGEP1_32(
+ llvm::Value *DestElementNext = Builder.CreateConstGEP1_32(
DestElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element");
- auto SrcElementNext = Builder.CreateConstGEP1_32(
+ llvm::Value *SrcElementNext = Builder.CreateConstGEP1_32(
SrcElementPHI, /*Idx0=*/1, "omp.arraycpy.src.element");
// Check whether we've reached the end.
- auto Done =
+ llvm::Value *Done =
Builder.CreateICmpEQ(DestElementNext, DestEnd, "omp.arraycpy.done");
Builder.CreateCondBr(Done, DoneBB, BodyBB);
DestElementPHI->addIncoming(DestElementNext, Builder.GetInsertBlock());
@@ -627,10 +690,12 @@ void CodeGenFunction::EmitOMPCopy(QualType OriginalType, Address DestAddr,
Address SrcAddr, const VarDecl *DestVD,
const VarDecl *SrcVD, const Expr *Copy) {
if (OriginalType->isArrayType()) {
- auto *BO = dyn_cast<BinaryOperator>(Copy);
+ const auto *BO = dyn_cast<BinaryOperator>(Copy);
if (BO && BO->getOpcode() == BO_Assign) {
// Perform simple memcpy for simple copying.
- EmitAggregateAssign(DestAddr, SrcAddr, OriginalType);
+ LValue Dest = MakeAddrLValue(DestAddr, OriginalType);
+ LValue Src = MakeAddrLValue(SrcAddr, OriginalType);
+ EmitAggregateAssign(Dest, Src, OriginalType);
} else {
// For arrays with complex element types perform element by element
// copying.
@@ -641,11 +706,8 @@ void CodeGenFunction::EmitOMPCopy(QualType OriginalType, Address DestAddr,
// destination and source variables to corresponding array
// elements.
CodeGenFunction::OMPPrivateScope Remap(*this);
- Remap.addPrivate(DestVD, [DestElement]() -> Address {
- return DestElement;
- });
- Remap.addPrivate(
- SrcVD, [SrcElement]() -> Address { return SrcElement; });
+ Remap.addPrivate(DestVD, [DestElement]() { return DestElement; });
+ Remap.addPrivate(SrcVD, [SrcElement]() { return SrcElement; });
(void)Remap.Privatize();
EmitIgnoredExpr(Copy);
});
@@ -653,8 +715,8 @@ void CodeGenFunction::EmitOMPCopy(QualType OriginalType, Address DestAddr,
} else {
// Remap pseudo source variable to private copy.
CodeGenFunction::OMPPrivateScope Remap(*this);
- Remap.addPrivate(SrcVD, [SrcAddr]() -> Address { return SrcAddr; });
- Remap.addPrivate(DestVD, [DestAddr]() -> Address { return DestAddr; });
+ Remap.addPrivate(SrcVD, [SrcAddr]() { return SrcAddr; });
+ Remap.addPrivate(DestVD, [DestAddr]() { return DestAddr; });
(void)Remap.Privatize();
// Emit copying of the whole variable.
EmitIgnoredExpr(Copy);
@@ -673,17 +735,21 @@ bool CodeGenFunction::EmitOMPFirstprivateClause(const OMPExecutableDirective &D,
cast<VarDecl>(cast<DeclRefExpr>(D)->getDecl())->getCanonicalDecl());
}
llvm::DenseSet<const VarDecl *> EmittedAsFirstprivate;
- CGCapturedStmtInfo CapturesInfo(cast<CapturedStmt>(*D.getAssociatedStmt()));
+ llvm::SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
+ getOpenMPCaptureRegions(CaptureRegions, D.getDirectiveKind());
+ // Force emission of the firstprivate copy if the directive does not emit
+ // outlined function, like omp for, omp simd, omp distribute etc.
+ bool MustEmitFirstprivateCopy =
+ CaptureRegions.size() == 1 && CaptureRegions.back() == OMPD_unknown;
for (const auto *C : D.getClausesOfKind<OMPFirstprivateClause>()) {
auto IRef = C->varlist_begin();
auto InitsRef = C->inits().begin();
- for (auto IInit : C->private_copies()) {
- auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
+ for (const Expr *IInit : C->private_copies()) {
+ const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
bool ThisFirstprivateIsLastprivate =
Lastprivates.count(OrigVD->getCanonicalDecl()) > 0;
- auto *CapFD = CapturesInfo.lookup(OrigVD);
- auto *FD = CapturedStmtInfo->lookup(OrigVD);
- if (!ThisFirstprivateIsLastprivate && FD && (FD == CapFD) &&
+ const FieldDecl *FD = CapturedStmtInfo->lookup(OrigVD);
+ if (!MustEmitFirstprivateCopy && !ThisFirstprivateIsLastprivate && FD &&
!FD->getType()->isReferenceType()) {
EmittedAsFirstprivate.insert(OrigVD->getCanonicalDecl());
++IRef;
@@ -693,54 +759,61 @@ bool CodeGenFunction::EmitOMPFirstprivateClause(const OMPExecutableDirective &D,
FirstprivateIsLastprivate =
FirstprivateIsLastprivate || ThisFirstprivateIsLastprivate;
if (EmittedAsFirstprivate.insert(OrigVD->getCanonicalDecl()).second) {
- auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
- auto *VDInit = cast<VarDecl>(cast<DeclRefExpr>(*InitsRef)->getDecl());
+ const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
+ const auto *VDInit =
+ cast<VarDecl>(cast<DeclRefExpr>(*InitsRef)->getDecl());
bool IsRegistered;
DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD),
/*RefersToEnclosingVariableOrCapture=*/FD != nullptr,
(*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc());
- Address OriginalAddr = EmitLValue(&DRE).getAddress();
+ LValue OriginalLVal = EmitLValue(&DRE);
QualType Type = VD->getType();
if (Type->isArrayType()) {
// Emit VarDecl with copy init for arrays.
// Get the address of the original variable captured in current
// captured region.
- IsRegistered = PrivateScope.addPrivate(OrigVD, [&]() -> Address {
- auto Emission = EmitAutoVarAlloca(*VD);
- auto *Init = VD->getInit();
- if (!isa<CXXConstructExpr>(Init) || isTrivialInitializer(Init)) {
- // Perform simple memcpy.
- EmitAggregateAssign(Emission.getAllocatedAddress(), OriginalAddr,
- Type);
- } else {
- EmitOMPAggregateAssign(
- Emission.getAllocatedAddress(), OriginalAddr, Type,
- [this, VDInit, Init](Address DestElement,
- Address SrcElement) {
- // Clean up any temporaries needed by the initialization.
- RunCleanupsScope InitScope(*this);
- // Emit initialization for single element.
- setAddrOfLocalVar(VDInit, SrcElement);
- EmitAnyExprToMem(Init, DestElement,
- Init->getType().getQualifiers(),
- /*IsInitializer*/ false);
- LocalDeclMap.erase(VDInit);
- });
- }
- EmitAutoVarCleanups(Emission);
- return Emission.getAllocatedAddress();
- });
+ IsRegistered = PrivateScope.addPrivate(
+ OrigVD, [this, VD, Type, OriginalLVal, VDInit]() {
+ AutoVarEmission Emission = EmitAutoVarAlloca(*VD);
+ const Expr *Init = VD->getInit();
+ if (!isa<CXXConstructExpr>(Init) ||
+ isTrivialInitializer(Init)) {
+ // Perform simple memcpy.
+ LValue Dest =
+ MakeAddrLValue(Emission.getAllocatedAddress(), Type);
+ EmitAggregateAssign(Dest, OriginalLVal, Type);
+ } else {
+ EmitOMPAggregateAssign(
+ Emission.getAllocatedAddress(), OriginalLVal.getAddress(),
+ Type,
+ [this, VDInit, Init](Address DestElement,
+ Address SrcElement) {
+ // Clean up any temporaries needed by the
+ // initialization.
+ RunCleanupsScope InitScope(*this);
+ // Emit initialization for single element.
+ setAddrOfLocalVar(VDInit, SrcElement);
+ EmitAnyExprToMem(Init, DestElement,
+ Init->getType().getQualifiers(),
+ /*IsInitializer*/ false);
+ LocalDeclMap.erase(VDInit);
+ });
+ }
+ EmitAutoVarCleanups(Emission);
+ return Emission.getAllocatedAddress();
+ });
} else {
- IsRegistered = PrivateScope.addPrivate(OrigVD, [&]() -> Address {
- // Emit private VarDecl with copy init.
- // Remap temp VDInit variable to the address of the original
- // variable
- // (for proper handling of captured global variables).
- setAddrOfLocalVar(VDInit, OriginalAddr);
- EmitDecl(*VD);
- LocalDeclMap.erase(VDInit);
- return GetAddrOfLocalVar(VD);
- });
+ Address OriginalAddr = OriginalLVal.getAddress();
+ IsRegistered = PrivateScope.addPrivate(
+ OrigVD, [this, VDInit, OriginalAddr, VD]() {
+ // Emit private VarDecl with copy init.
+ // Remap temp VDInit variable to the address of the original
+ // variable (for proper handling of captured global variables).
+ setAddrOfLocalVar(VDInit, OriginalAddr);
+ EmitDecl(*VD);
+ LocalDeclMap.erase(VDInit);
+ return GetAddrOfLocalVar(VD);
+ });
}
assert(IsRegistered &&
"firstprivate var already registered as private");
@@ -762,16 +835,15 @@ void CodeGenFunction::EmitOMPPrivateClause(
llvm::DenseSet<const VarDecl *> EmittedAsPrivate;
for (const auto *C : D.getClausesOfKind<OMPPrivateClause>()) {
auto IRef = C->varlist_begin();
- for (auto IInit : C->private_copies()) {
- auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
+ for (const Expr *IInit : C->private_copies()) {
+ const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
- auto VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
- bool IsRegistered =
- PrivateScope.addPrivate(OrigVD, [&]() -> Address {
- // Emit private VarDecl with copy init.
- EmitDecl(*VD);
- return GetAddrOfLocalVar(VD);
- });
+ const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
+ bool IsRegistered = PrivateScope.addPrivate(OrigVD, [this, VD]() {
+ // Emit private VarDecl with copy init.
+ EmitDecl(*VD);
+ return GetAddrOfLocalVar(VD);
+ });
assert(IsRegistered && "private var already registered as private");
// Silence the warning about unused variable.
(void)IsRegistered;
@@ -794,8 +866,8 @@ bool CodeGenFunction::EmitOMPCopyinClause(const OMPExecutableDirective &D) {
auto IRef = C->varlist_begin();
auto ISrcRef = C->source_exprs().begin();
auto IDestRef = C->destination_exprs().begin();
- for (auto *AssignOp : C->assignment_ops()) {
- auto *VD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
+ for (const Expr *AssignOp : C->assignment_ops()) {
+ const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
QualType Type = VD->getType();
if (CopiedVars.insert(VD->getCanonicalDecl()).second) {
// Get the address of the master variable. If we are emitting code with
@@ -826,12 +898,15 @@ bool CodeGenFunction::EmitOMPCopyinClause(const OMPExecutableDirective &D) {
Builder.CreateCondBr(
Builder.CreateICmpNE(
Builder.CreatePtrToInt(MasterAddr.getPointer(), CGM.IntPtrTy),
- Builder.CreatePtrToInt(PrivateAddr.getPointer(), CGM.IntPtrTy)),
+ Builder.CreatePtrToInt(PrivateAddr.getPointer(),
+ CGM.IntPtrTy)),
CopyBegin, CopyEnd);
EmitBlock(CopyBegin);
}
- auto *SrcVD = cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl());
- auto *DestVD = cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
+ const auto *SrcVD =
+ cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl());
+ const auto *DestVD =
+ cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
EmitOMPCopy(Type, PrivateAddr, MasterAddr, DestVD, SrcVD, AssignOp);
}
++IRef;
@@ -854,8 +929,8 @@ bool CodeGenFunction::EmitOMPLastprivateClauseInit(
bool HasAtLeastOneLastprivate = false;
llvm::DenseSet<const VarDecl *> SIMDLCVs;
if (isOpenMPSimdDirective(D.getDirectiveKind())) {
- auto *LoopDirective = cast<OMPLoopDirective>(&D);
- for (auto *C : LoopDirective->counters()) {
+ const auto *LoopDirective = cast<OMPLoopDirective>(&D);
+ for (const Expr *C : LoopDirective->counters()) {
SIMDLCVs.insert(
cast<VarDecl>(cast<DeclRefExpr>(C)->getDecl())->getCanonicalDecl());
}
@@ -863,19 +938,21 @@ bool CodeGenFunction::EmitOMPLastprivateClauseInit(
llvm::DenseSet<const VarDecl *> AlreadyEmittedVars;
for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) {
HasAtLeastOneLastprivate = true;
- if (isOpenMPTaskLoopDirective(D.getDirectiveKind()))
+ if (isOpenMPTaskLoopDirective(D.getDirectiveKind()) &&
+ !getLangOpts().OpenMPSimd)
break;
auto IRef = C->varlist_begin();
auto IDestRef = C->destination_exprs().begin();
- for (auto *IInit : C->private_copies()) {
+ for (const Expr *IInit : C->private_copies()) {
// Keep the address of the original variable for future update at the end
// of the loop.
- auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
+ const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
// Taskloops do not require additional initialization, it is done in
// runtime support library.
if (AlreadyEmittedVars.insert(OrigVD->getCanonicalDecl()).second) {
- auto *DestVD = cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
- PrivateScope.addPrivate(DestVD, [this, OrigVD, IRef]() -> Address {
+ const auto *DestVD =
+ cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
+ PrivateScope.addPrivate(DestVD, [this, OrigVD, IRef]() {
DeclRefExpr DRE(
const_cast<VarDecl *>(OrigVD),
/*RefersToEnclosingVariableOrCapture=*/CapturedStmtInfo->lookup(
@@ -887,8 +964,8 @@ bool CodeGenFunction::EmitOMPLastprivateClauseInit(
// not generated. Initialization of this variable will happen in codegen
// for 'firstprivate' clause.
if (IInit && !SIMDLCVs.count(OrigVD->getCanonicalDecl())) {
- auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
- bool IsRegistered = PrivateScope.addPrivate(OrigVD, [&]() -> Address {
+ const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
+ bool IsRegistered = PrivateScope.addPrivate(OrigVD, [this, VD]() {
// Emit private VarDecl with copy init.
EmitDecl(*VD);
return GetAddrOfLocalVar(VD);
@@ -926,10 +1003,10 @@ void CodeGenFunction::EmitOMPLastprivateClauseFinal(
}
llvm::DenseSet<const VarDecl *> AlreadyEmittedVars;
llvm::DenseMap<const VarDecl *, const Expr *> LoopCountersAndUpdates;
- if (auto *LoopDirective = dyn_cast<OMPLoopDirective>(&D)) {
+ if (const auto *LoopDirective = dyn_cast<OMPLoopDirective>(&D)) {
auto IC = LoopDirective->counters().begin();
- for (auto F : LoopDirective->finals()) {
- auto *D =
+ for (const Expr *F : LoopDirective->finals()) {
+ const auto *D =
cast<VarDecl>(cast<DeclRefExpr>(*IC)->getDecl())->getCanonicalDecl();
if (NoFinals)
AlreadyEmittedVars.insert(D);
@@ -942,23 +1019,26 @@ void CodeGenFunction::EmitOMPLastprivateClauseFinal(
auto IRef = C->varlist_begin();
auto ISrcRef = C->source_exprs().begin();
auto IDestRef = C->destination_exprs().begin();
- for (auto *AssignOp : C->assignment_ops()) {
- auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
+ for (const Expr *AssignOp : C->assignment_ops()) {
+ const auto *PrivateVD =
+ cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
QualType Type = PrivateVD->getType();
- auto *CanonicalVD = PrivateVD->getCanonicalDecl();
+ const auto *CanonicalVD = PrivateVD->getCanonicalDecl();
if (AlreadyEmittedVars.insert(CanonicalVD).second) {
// If lastprivate variable is a loop control variable for loop-based
// directive, update its value before copyin back to original
// variable.
- if (auto *FinalExpr = LoopCountersAndUpdates.lookup(CanonicalVD))
+ if (const Expr *FinalExpr = LoopCountersAndUpdates.lookup(CanonicalVD))
EmitIgnoredExpr(FinalExpr);
- auto *SrcVD = cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl());
- auto *DestVD = cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
+ const auto *SrcVD =
+ cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl());
+ const auto *DestVD =
+ cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
// Get the address of the original variable.
Address OriginalAddr = GetAddrOfLocalVar(DestVD);
// Get the address of the private variable.
Address PrivateAddr = GetAddrOfLocalVar(PrivateVD);
- if (auto RefTy = PrivateVD->getType()->getAs<ReferenceType>())
+ if (const auto *RefTy = PrivateVD->getType()->getAs<ReferenceType>())
PrivateAddr =
Address(Builder.CreateLoad(PrivateAddr),
getNaturalTypeAlignment(RefTy->getPointeeType()));
@@ -968,7 +1048,7 @@ void CodeGenFunction::EmitOMPLastprivateClauseFinal(
++ISrcRef;
++IDestRef;
}
- if (auto *PostUpdate = C->getPostUpdateExpr())
+ if (const Expr *PostUpdate = C->getPostUpdateExpr())
EmitIgnoredExpr(PostUpdate);
}
if (IsLastIterCond)
@@ -990,7 +1070,7 @@ void CodeGenFunction::EmitOMPReductionClauseInit(
auto IRed = C->reduction_ops().begin();
auto ILHS = C->lhs_exprs().begin();
auto IRHS = C->rhs_exprs().begin();
- for (const auto *Ref : C->varlists()) {
+ for (const Expr *Ref : C->varlists()) {
Shareds.emplace_back(Ref);
Privates.emplace_back(*IPriv);
ReductionOps.emplace_back(*IRed);
@@ -1007,12 +1087,12 @@ void CodeGenFunction::EmitOMPReductionClauseInit(
auto ILHS = LHSs.begin();
auto IRHS = RHSs.begin();
auto IPriv = Privates.begin();
- for (const auto *IRef : Shareds) {
- auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*IPriv)->getDecl());
+ for (const Expr *IRef : Shareds) {
+ const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*IPriv)->getDecl());
// Emit private VarDecl with reduction init.
RedCG.emitSharedLValue(*this, Count);
RedCG.emitAggregateType(*this, Count);
- auto Emission = EmitAutoVarAlloca(*PrivateVD);
+ AutoVarEmission Emission = EmitAutoVarAlloca(*PrivateVD);
RedCG.emitInitialization(*this, Count, Emission.getAllocatedAddress(),
RedCG.getSharedLValue(Count),
[&Emission](CodeGenFunction &CGF) {
@@ -1023,32 +1103,31 @@ void CodeGenFunction::EmitOMPReductionClauseInit(
Address BaseAddr = RedCG.adjustPrivateAddress(
*this, Count, Emission.getAllocatedAddress());
bool IsRegistered = PrivateScope.addPrivate(
- RedCG.getBaseDecl(Count), [BaseAddr]() -> Address { return BaseAddr; });
+ RedCG.getBaseDecl(Count), [BaseAddr]() { return BaseAddr; });
assert(IsRegistered && "private var already registered as private");
// Silence the warning about unused variable.
(void)IsRegistered;
- auto *LHSVD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
- auto *RHSVD = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
+ const auto *LHSVD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
+ const auto *RHSVD = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
QualType Type = PrivateVD->getType();
bool isaOMPArraySectionExpr = isa<OMPArraySectionExpr>(IRef);
if (isaOMPArraySectionExpr && Type->isVariablyModifiedType()) {
// Store the address of the original variable associated with the LHS
// implicit variable.
- PrivateScope.addPrivate(LHSVD, [&RedCG, Count]() -> Address {
+ PrivateScope.addPrivate(LHSVD, [&RedCG, Count]() {
return RedCG.getSharedLValue(Count).getAddress();
});
- PrivateScope.addPrivate(RHSVD, [this, PrivateVD]() -> Address {
- return GetAddrOfLocalVar(PrivateVD);
- });
+ PrivateScope.addPrivate(
+ RHSVD, [this, PrivateVD]() { return GetAddrOfLocalVar(PrivateVD); });
} else if ((isaOMPArraySectionExpr && Type->isScalarType()) ||
isa<ArraySubscriptExpr>(IRef)) {
// Store the address of the original variable associated with the LHS
// implicit variable.
- PrivateScope.addPrivate(LHSVD, [&RedCG, Count]() -> Address {
+ PrivateScope.addPrivate(LHSVD, [&RedCG, Count]() {
return RedCG.getSharedLValue(Count).getAddress();
});
- PrivateScope.addPrivate(RHSVD, [this, PrivateVD, RHSVD]() -> Address {
+ PrivateScope.addPrivate(RHSVD, [this, PrivateVD, RHSVD]() {
return Builder.CreateElementBitCast(GetAddrOfLocalVar(PrivateVD),
ConvertTypeForMem(RHSVD->getType()),
"rhs.begin");
@@ -1063,10 +1142,9 @@ void CodeGenFunction::EmitOMPReductionClauseInit(
OriginalAddr = Builder.CreateElementBitCast(
OriginalAddr, ConvertTypeForMem(LHSVD->getType()), "lhs.begin");
}
+ PrivateScope.addPrivate(LHSVD, [OriginalAddr]() { return OriginalAddr; });
PrivateScope.addPrivate(
- LHSVD, [OriginalAddr]() -> Address { return OriginalAddr; });
- PrivateScope.addPrivate(
- RHSVD, [this, PrivateVD, RHSVD, IsArray]() -> Address {
+ RHSVD, [this, PrivateVD, RHSVD, IsArray]() {
return IsArray
? Builder.CreateElementBitCast(
GetAddrOfLocalVar(PrivateVD),
@@ -1100,9 +1178,8 @@ void CodeGenFunction::EmitOMPReductionClauseFinal(
if (HasAtLeastOneReduction) {
bool WithNowait = D.getSingleClause<OMPNowaitClause>() ||
isOpenMPParallelDirective(D.getDirectiveKind()) ||
- D.getDirectiveKind() == OMPD_simd;
- bool SimpleReduction = D.getDirectiveKind() == OMPD_simd ||
- D.getDirectiveKind() == OMPD_distribute_simd;
+ ReductionKind == OMPD_simd;
+ bool SimpleReduction = ReductionKind == OMPD_simd;
// Emit nowait reduction if nowait clause is present or directive is a
// parallel directive (it always has implicit barrier).
CGM.getOpenMPRuntime().emitReduction(
@@ -1113,17 +1190,17 @@ void CodeGenFunction::EmitOMPReductionClauseFinal(
static void emitPostUpdateForReductionClause(
CodeGenFunction &CGF, const OMPExecutableDirective &D,
- const llvm::function_ref<llvm::Value *(CodeGenFunction &)> &CondGen) {
+ const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen) {
if (!CGF.HaveInsertPoint())
return;
llvm::BasicBlock *DoneBB = nullptr;
for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) {
- if (auto *PostUpdate = C->getPostUpdateExpr()) {
+ if (const Expr *PostUpdate = C->getPostUpdateExpr()) {
if (!DoneBB) {
- if (auto *Cond = CondGen(CGF)) {
+ if (llvm::Value *Cond = CondGen(CGF)) {
// If the first post-update expression is found, emit conditional
// block if it was requested.
- auto *ThenBB = CGF.createBasicBlock(".omp.reduction.pu");
+ llvm::BasicBlock *ThenBB = CGF.createBasicBlock(".omp.reduction.pu");
DoneBB = CGF.createBasicBlock(".omp.reduction.pu.done");
CGF.Builder.CreateCondBr(Cond, ThenBB, DoneBB);
CGF.EmitBlock(ThenBB);
@@ -1151,12 +1228,14 @@ static void emitCommonOMPParallelDirective(
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen,
const CodeGenBoundParametersTy &CodeGenBoundParameters) {
const CapturedStmt *CS = S.getCapturedStmt(OMPD_parallel);
- auto OutlinedFn = CGF.CGM.getOpenMPRuntime().emitParallelOutlinedFunction(
- S, *CS->getCapturedDecl()->param_begin(), InnermostKind, CodeGen);
+ llvm::Value *OutlinedFn =
+ CGF.CGM.getOpenMPRuntime().emitParallelOutlinedFunction(
+ S, *CS->getCapturedDecl()->param_begin(), InnermostKind, CodeGen);
if (const auto *NumThreadsClause = S.getSingleClause<OMPNumThreadsClause>()) {
CodeGenFunction::RunCleanupsScope NumThreadsScope(CGF);
- auto NumThreads = CGF.EmitScalarExpr(NumThreadsClause->getNumThreads(),
- /*IgnoreResultAssign*/ true);
+ llvm::Value *NumThreads =
+ CGF.EmitScalarExpr(NumThreadsClause->getNumThreads(),
+ /*IgnoreResultAssign=*/true);
CGF.CGM.getOpenMPRuntime().emitNumThreadsClause(
CGF, NumThreads, NumThreadsClause->getLocStart());
}
@@ -1192,7 +1271,8 @@ static void emitEmptyBoundParameters(CodeGenFunction &,
void CodeGenFunction::EmitOMPParallelDirective(const OMPParallelDirective &S) {
// Emit parallel region as a standalone region.
- auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
+ auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
+ Action.Enter(CGF);
OMPPrivateScope PrivateScope(CGF);
bool Copyins = CGF.EmitOMPCopyinClause(S);
(void)CGF.EmitOMPFirstprivateClause(S, PrivateScope);
@@ -1207,34 +1287,33 @@ void CodeGenFunction::EmitOMPParallelDirective(const OMPParallelDirective &S) {
CGF.EmitOMPPrivateClause(S, PrivateScope);
CGF.EmitOMPReductionClauseInit(S, PrivateScope);
(void)PrivateScope.Privatize();
- CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
+ CGF.EmitStmt(S.getCapturedStmt(OMPD_parallel)->getCapturedStmt());
CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel);
};
emitCommonOMPParallelDirective(*this, S, OMPD_parallel, CodeGen,
emitEmptyBoundParameters);
- emitPostUpdateForReductionClause(
- *this, S, [](CodeGenFunction &) -> llvm::Value * { return nullptr; });
+ emitPostUpdateForReductionClause(*this, S,
+ [](CodeGenFunction &) { return nullptr; });
}
void CodeGenFunction::EmitOMPLoopBody(const OMPLoopDirective &D,
JumpDest LoopExit) {
RunCleanupsScope BodyScope(*this);
// Update counters values on current iteration.
- for (auto I : D.updates()) {
- EmitIgnoredExpr(I);
- }
+ for (const Expr *UE : D.updates())
+ EmitIgnoredExpr(UE);
// Update the linear variables.
// In distribute directives only loop counters may be marked as linear, no
// need to generate the code for them.
if (!isOpenMPDistributeDirective(D.getDirectiveKind())) {
for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) {
- for (auto *U : C->updates())
- EmitIgnoredExpr(U);
+ for (const Expr *UE : C->updates())
+ EmitIgnoredExpr(UE);
}
}
// On a continue in the body, jump to the end.
- auto Continue = getJumpDestInCurrentScope("omp.body.continue");
+ JumpDest Continue = getJumpDestInCurrentScope("omp.body.continue");
BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
// Emit loop body.
EmitStmt(D.getBody());
@@ -1246,24 +1325,24 @@ void CodeGenFunction::EmitOMPLoopBody(const OMPLoopDirective &D,
void CodeGenFunction::EmitOMPInnerLoop(
const Stmt &S, bool RequiresCleanup, const Expr *LoopCond,
const Expr *IncExpr,
- const llvm::function_ref<void(CodeGenFunction &)> &BodyGen,
- const llvm::function_ref<void(CodeGenFunction &)> &PostIncGen) {
+ const llvm::function_ref<void(CodeGenFunction &)> BodyGen,
+ const llvm::function_ref<void(CodeGenFunction &)> PostIncGen) {
auto LoopExit = getJumpDestInCurrentScope("omp.inner.for.end");
// Start the loop with a block that tests the condition.
auto CondBlock = createBasicBlock("omp.inner.for.cond");
EmitBlock(CondBlock);
- const SourceRange &R = S.getSourceRange();
+ const SourceRange R = S.getSourceRange();
LoopStack.push(CondBlock, SourceLocToDebugLoc(R.getBegin()),
SourceLocToDebugLoc(R.getEnd()));
// If there are any cleanups between here and the loop-exit scope,
// create a block to stage a loop exit along.
- auto ExitBlock = LoopExit.getBlock();
+ llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
if (RequiresCleanup)
ExitBlock = createBasicBlock("omp.inner.for.cond.cleanup");
- auto LoopBody = createBasicBlock("omp.inner.for.body");
+ llvm::BasicBlock *LoopBody = createBasicBlock("omp.inner.for.body");
// Emit condition.
EmitBranchOnBoolExpr(LoopCond, LoopBody, ExitBlock, getProfileCount(&S));
@@ -1276,7 +1355,7 @@ void CodeGenFunction::EmitOMPInnerLoop(
incrementProfileCounter(&S);
// Create a block for the increment.
- auto Continue = getJumpDestInCurrentScope("omp.inner.for.inc");
+ JumpDest Continue = getJumpDestInCurrentScope("omp.inner.for.inc");
BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
BodyGen(*this);
@@ -1298,12 +1377,13 @@ bool CodeGenFunction::EmitOMPLinearClauseInit(const OMPLoopDirective &D) {
// Emit inits for the linear variables.
bool HasLinears = false;
for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) {
- for (auto *Init : C->inits()) {
+ for (const Expr *Init : C->inits()) {
HasLinears = true;
- auto *VD = cast<VarDecl>(cast<DeclRefExpr>(Init)->getDecl());
- if (auto *Ref = dyn_cast<DeclRefExpr>(VD->getInit()->IgnoreImpCasts())) {
+ const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(Init)->getDecl());
+ if (const auto *Ref =
+ dyn_cast<DeclRefExpr>(VD->getInit()->IgnoreImpCasts())) {
AutoVarEmission Emission = EmitAutoVarAlloca(*VD);
- auto *OrigVD = cast<VarDecl>(Ref->getDecl());
+ const auto *OrigVD = cast<VarDecl>(Ref->getDecl());
DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD),
CapturedStmtInfo->lookup(OrigVD) != nullptr,
VD->getInit()->getType(), VK_LValue,
@@ -1312,13 +1392,14 @@ bool CodeGenFunction::EmitOMPLinearClauseInit(const OMPLoopDirective &D) {
VD->getType()),
/*capturedByInit=*/false);
EmitAutoVarCleanups(Emission);
- } else
+ } else {
EmitVarDecl(*VD);
+ }
}
// Emit the linear steps for the linear clauses.
// If a step is not constant, it is pre-calculated before the loop.
- if (auto CS = cast_or_null<BinaryOperator>(C->getCalcStep()))
- if (auto SaveRef = cast<DeclRefExpr>(CS->getLHS())) {
+ if (const auto *CS = cast_or_null<BinaryOperator>(C->getCalcStep()))
+ if (const auto *SaveRef = cast<DeclRefExpr>(CS->getLHS())) {
EmitVarDecl(*cast<VarDecl>(SaveRef->getDecl()));
// Emit calculation of the linear step.
EmitIgnoredExpr(CS);
@@ -1329,36 +1410,36 @@ bool CodeGenFunction::EmitOMPLinearClauseInit(const OMPLoopDirective &D) {
void CodeGenFunction::EmitOMPLinearClauseFinal(
const OMPLoopDirective &D,
- const llvm::function_ref<llvm::Value *(CodeGenFunction &)> &CondGen) {
+ const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen) {
if (!HaveInsertPoint())
return;
llvm::BasicBlock *DoneBB = nullptr;
// Emit the final values of the linear variables.
for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) {
auto IC = C->varlist_begin();
- for (auto *F : C->finals()) {
+ for (const Expr *F : C->finals()) {
if (!DoneBB) {
- if (auto *Cond = CondGen(*this)) {
+ if (llvm::Value *Cond = CondGen(*this)) {
// If the first post-update expression is found, emit conditional
// block if it was requested.
- auto *ThenBB = createBasicBlock(".omp.linear.pu");
+ llvm::BasicBlock *ThenBB = createBasicBlock(".omp.linear.pu");
DoneBB = createBasicBlock(".omp.linear.pu.done");
Builder.CreateCondBr(Cond, ThenBB, DoneBB);
EmitBlock(ThenBB);
}
}
- auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IC)->getDecl());
+ const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IC)->getDecl());
DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD),
CapturedStmtInfo->lookup(OrigVD) != nullptr,
(*IC)->getType(), VK_LValue, (*IC)->getExprLoc());
Address OrigAddr = EmitLValue(&DRE).getAddress();
CodeGenFunction::OMPPrivateScope VarScope(*this);
- VarScope.addPrivate(OrigVD, [OrigAddr]() -> Address { return OrigAddr; });
+ VarScope.addPrivate(OrigVD, [OrigAddr]() { return OrigAddr; });
(void)VarScope.Privatize();
EmitIgnoredExpr(F);
++IC;
}
- if (auto *PostUpdate = C->getPostUpdateExpr())
+ if (const Expr *PostUpdate = C->getPostUpdateExpr())
EmitIgnoredExpr(PostUpdate);
}
if (DoneBB)
@@ -1371,12 +1452,12 @@ static void emitAlignedClause(CodeGenFunction &CGF,
return;
for (const auto *Clause : D.getClausesOfKind<OMPAlignedClause>()) {
unsigned ClauseAlignment = 0;
- if (auto AlignmentExpr = Clause->getAlignment()) {
- auto AlignmentCI =
+ if (const Expr *AlignmentExpr = Clause->getAlignment()) {
+ auto *AlignmentCI =
cast<llvm::ConstantInt>(CGF.EmitScalarExpr(AlignmentExpr));
ClauseAlignment = static_cast<unsigned>(AlignmentCI->getZExtValue());
}
- for (auto E : Clause->varlists()) {
+ for (const Expr *E : Clause->varlists()) {
unsigned Alignment = ClauseAlignment;
if (Alignment == 0) {
// OpenMP [2.8.1, Description]
@@ -1403,28 +1484,28 @@ void CodeGenFunction::EmitOMPPrivateLoopCounters(
if (!HaveInsertPoint())
return;
auto I = S.private_counters().begin();
- for (auto *E : S.counters()) {
- auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
- auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl());
- (void)LoopScope.addPrivate(VD, [&]() -> Address {
- // Emit var without initialization.
- if (!LocalDeclMap.count(PrivateVD)) {
- auto VarEmission = EmitAutoVarAlloca(*PrivateVD);
- EmitAutoVarCleanups(VarEmission);
- }
- DeclRefExpr DRE(const_cast<VarDecl *>(PrivateVD),
- /*RefersToEnclosingVariableOrCapture=*/false,
- (*I)->getType(), VK_LValue, (*I)->getExprLoc());
- return EmitLValue(&DRE).getAddress();
+ for (const Expr *E : S.counters()) {
+ const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
+ const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl());
+ // Emit var without initialization.
+ AutoVarEmission VarEmission = EmitAutoVarAlloca(*PrivateVD);
+ EmitAutoVarCleanups(VarEmission);
+ LocalDeclMap.erase(PrivateVD);
+ (void)LoopScope.addPrivate(VD, [&VarEmission]() {
+ return VarEmission.getAllocatedAddress();
});
if (LocalDeclMap.count(VD) || CapturedStmtInfo->lookup(VD) ||
VD->hasGlobalStorage()) {
- (void)LoopScope.addPrivate(PrivateVD, [&]() -> Address {
+ (void)LoopScope.addPrivate(PrivateVD, [this, VD, E]() {
DeclRefExpr DRE(const_cast<VarDecl *>(VD),
LocalDeclMap.count(VD) || CapturedStmtInfo->lookup(VD),
E->getType(), VK_LValue, E->getExprLoc());
return EmitLValue(&DRE).getAddress();
});
+ } else {
+ (void)LoopScope.addPrivate(PrivateVD, [&VarEmission]() {
+ return VarEmission.getAllocatedAddress();
+ });
}
++I;
}
@@ -1440,7 +1521,7 @@ static void emitPreCond(CodeGenFunction &CGF, const OMPLoopDirective &S,
CGF.EmitOMPPrivateLoopCounters(S, PreCondScope);
(void)PreCondScope.Privatize();
// Get initial values of real counters.
- for (auto I : S.inits()) {
+ for (const Expr *I : S.inits()) {
CGF.EmitIgnoredExpr(I);
}
}
@@ -1454,20 +1535,20 @@ void CodeGenFunction::EmitOMPLinearClause(
return;
llvm::DenseSet<const VarDecl *> SIMDLCVs;
if (isOpenMPSimdDirective(D.getDirectiveKind())) {
- auto *LoopDirective = cast<OMPLoopDirective>(&D);
- for (auto *C : LoopDirective->counters()) {
+ const auto *LoopDirective = cast<OMPLoopDirective>(&D);
+ for (const Expr *C : LoopDirective->counters()) {
SIMDLCVs.insert(
cast<VarDecl>(cast<DeclRefExpr>(C)->getDecl())->getCanonicalDecl());
}
}
for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) {
auto CurPrivate = C->privates().begin();
- for (auto *E : C->varlists()) {
- auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
- auto *PrivateVD =
+ for (const Expr *E : C->varlists()) {
+ const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
+ const auto *PrivateVD =
cast<VarDecl>(cast<DeclRefExpr>(*CurPrivate)->getDecl());
if (!SIMDLCVs.count(VD->getCanonicalDecl())) {
- bool IsRegistered = PrivateScope.addPrivate(VD, [&]() -> Address {
+ bool IsRegistered = PrivateScope.addPrivate(VD, [this, PrivateVD]() {
// Emit private VarDecl with copy init.
EmitVarDecl(*PrivateVD);
return GetAddrOfLocalVar(PrivateVD);
@@ -1475,8 +1556,9 @@ void CodeGenFunction::EmitOMPLinearClause(
assert(IsRegistered && "linear var already registered as private");
// Silence the warning about unused variable.
(void)IsRegistered;
- } else
+ } else {
EmitVarDecl(*PrivateVD);
+ }
++CurPrivate;
}
}
@@ -1490,7 +1572,7 @@ static void emitSimdlenSafelenClause(CodeGenFunction &CGF,
if (const auto *C = D.getSingleClause<OMPSimdlenClause>()) {
RValue Len = CGF.EmitAnyExpr(C->getSimdlen(), AggValueSlot::ignored(),
/*ignoreResult=*/true);
- llvm::ConstantInt *Val = cast<llvm::ConstantInt>(Len.getScalarVal());
+ auto *Val = cast<llvm::ConstantInt>(Len.getScalarVal());
CGF.LoopStack.setVectorizeWidth(Val->getZExtValue());
// In presence of finite 'safelen', it may be unsafe to mark all
// the memory instructions parallel, because loop-carried
@@ -1500,12 +1582,12 @@ static void emitSimdlenSafelenClause(CodeGenFunction &CGF,
} else if (const auto *C = D.getSingleClause<OMPSafelenClause>()) {
RValue Len = CGF.EmitAnyExpr(C->getSafelen(), AggValueSlot::ignored(),
/*ignoreResult=*/true);
- llvm::ConstantInt *Val = cast<llvm::ConstantInt>(Len.getScalarVal());
+ auto *Val = cast<llvm::ConstantInt>(Len.getScalarVal());
CGF.LoopStack.setVectorizeWidth(Val->getZExtValue());
// In presence of finite 'safelen', it may be unsafe to mark all
// the memory instructions parallel, because loop-carried
// dependences of 'safelen' iterations are possible.
- CGF.LoopStack.setParallel(false);
+ CGF.LoopStack.setParallel(/*Enable=*/false);
}
}
@@ -1513,46 +1595,45 @@ void CodeGenFunction::EmitOMPSimdInit(const OMPLoopDirective &D,
bool IsMonotonic) {
// Walk clauses and process safelen/lastprivate.
LoopStack.setParallel(!IsMonotonic);
- LoopStack.setVectorizeEnable(true);
+ LoopStack.setVectorizeEnable();
emitSimdlenSafelenClause(*this, D, IsMonotonic);
}
void CodeGenFunction::EmitOMPSimdFinal(
const OMPLoopDirective &D,
- const llvm::function_ref<llvm::Value *(CodeGenFunction &)> &CondGen) {
+ const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen) {
if (!HaveInsertPoint())
return;
llvm::BasicBlock *DoneBB = nullptr;
auto IC = D.counters().begin();
auto IPC = D.private_counters().begin();
- for (auto F : D.finals()) {
- auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>((*IC))->getDecl());
- auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>((*IPC))->getDecl());
- auto *CED = dyn_cast<OMPCapturedExprDecl>(OrigVD);
+ for (const Expr *F : D.finals()) {
+ const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>((*IC))->getDecl());
+ const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>((*IPC))->getDecl());
+ const auto *CED = dyn_cast<OMPCapturedExprDecl>(OrigVD);
if (LocalDeclMap.count(OrigVD) || CapturedStmtInfo->lookup(OrigVD) ||
OrigVD->hasGlobalStorage() || CED) {
if (!DoneBB) {
- if (auto *Cond = CondGen(*this)) {
+ if (llvm::Value *Cond = CondGen(*this)) {
// If the first post-update expression is found, emit conditional
// block if it was requested.
- auto *ThenBB = createBasicBlock(".omp.final.then");
+ llvm::BasicBlock *ThenBB = createBasicBlock(".omp.final.then");
DoneBB = createBasicBlock(".omp.final.done");
Builder.CreateCondBr(Cond, ThenBB, DoneBB);
EmitBlock(ThenBB);
}
}
Address OrigAddr = Address::invalid();
- if (CED)
+ if (CED) {
OrigAddr = EmitLValue(CED->getInit()->IgnoreImpCasts()).getAddress();
- else {
+ } else {
DeclRefExpr DRE(const_cast<VarDecl *>(PrivateVD),
/*RefersToEnclosingVariableOrCapture=*/false,
(*IPC)->getType(), VK_LValue, (*IPC)->getExprLoc());
OrigAddr = EmitLValue(&DRE).getAddress();
}
OMPPrivateScope VarScope(*this);
- VarScope.addPrivate(OrigVD,
- [OrigAddr]() -> Address { return OrigAddr; });
+ VarScope.addPrivate(OrigVD, [OrigAddr]() { return OrigAddr; });
(void)VarScope.Privatize();
EmitIgnoredExpr(F);
}
@@ -1570,6 +1651,14 @@ static void emitOMPLoopBodyWithStopPoint(CodeGenFunction &CGF,
CGF.EmitStopPoint(&S);
}
+/// Emit a helper variable and return corresponding lvalue.
+static LValue EmitOMPHelperVar(CodeGenFunction &CGF,
+ const DeclRefExpr *Helper) {
+ auto VDecl = cast<VarDecl>(Helper->getDecl());
+ CGF.EmitVarDecl(*VDecl);
+ return CGF.EmitLValue(Helper);
+}
+
static void emitOMPSimdRegion(CodeGenFunction &CGF, const OMPLoopDirective &S,
PrePostActionTy &Action) {
Action.Enter(CGF);
@@ -1581,6 +1670,12 @@ static void emitOMPSimdRegion(CodeGenFunction &CGF, const OMPLoopDirective &S,
// <Final counter/linear vars updates>;
// }
//
+ if (isOpenMPDistributeDirective(S.getDirectiveKind()) ||
+ isOpenMPWorksharingDirective(S.getDirectiveKind()) ||
+ isOpenMPTaskLoopDirective(S.getDirectiveKind())) {
+ (void)EmitOMPHelperVar(CGF, cast<DeclRefExpr>(S.getLowerBoundVariable()));
+ (void)EmitOMPHelperVar(CGF, cast<DeclRefExpr>(S.getUpperBoundVariable()));
+ }
// Emit: if (PreCond) - begin.
// If the condition constant folds and can be elided, avoid emitting the
@@ -1591,7 +1686,7 @@ static void emitOMPSimdRegion(CodeGenFunction &CGF, const OMPLoopDirective &S,
if (!CondConstant)
return;
} else {
- auto *ThenBlock = CGF.createBasicBlock("simd.if.then");
+ llvm::BasicBlock *ThenBlock = CGF.createBasicBlock("simd.if.then");
ContBlock = CGF.createBasicBlock("simd.if.end");
emitPreCond(CGF, S, S.getPreCond(), ThenBlock, ContBlock,
CGF.getProfileCount(&S));
@@ -1601,14 +1696,14 @@ static void emitOMPSimdRegion(CodeGenFunction &CGF, const OMPLoopDirective &S,
// Emit the loop iteration variable.
const Expr *IVExpr = S.getIterationVariable();
- const VarDecl *IVDecl = cast<VarDecl>(cast<DeclRefExpr>(IVExpr)->getDecl());
+ const auto *IVDecl = cast<VarDecl>(cast<DeclRefExpr>(IVExpr)->getDecl());
CGF.EmitVarDecl(*IVDecl);
CGF.EmitIgnoredExpr(S.getInit());
// Emit the iterations count variable.
// If it is not a variable, Sema decided to calculate iterations count on
// each iteration (e.g., it is foldable into a constant).
- if (auto LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
+ if (const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
CGF.EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl()));
// Emit calculation of the iterations count.
CGF.EmitIgnoredExpr(S.getCalcLastIteration());
@@ -1633,17 +1728,15 @@ static void emitOMPSimdRegion(CodeGenFunction &CGF, const OMPLoopDirective &S,
CGF.EmitStopPoint(&S);
},
[](CodeGenFunction &) {});
- CGF.EmitOMPSimdFinal(
- S, [](CodeGenFunction &) -> llvm::Value * { return nullptr; });
+ CGF.EmitOMPSimdFinal(S, [](CodeGenFunction &) { return nullptr; });
// Emit final copy of the lastprivate variables at the end of loops.
if (HasLastprivateClause)
CGF.EmitOMPLastprivateClauseFinal(S, /*NoFinals=*/true);
CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_simd);
- emitPostUpdateForReductionClause(
- CGF, S, [](CodeGenFunction &) -> llvm::Value * { return nullptr; });
+ emitPostUpdateForReductionClause(CGF, S,
+ [](CodeGenFunction &) { return nullptr; });
}
- CGF.EmitOMPLinearClauseFinal(
- S, [](CodeGenFunction &) -> llvm::Value * { return nullptr; });
+ CGF.EmitOMPLinearClauseFinal(S, [](CodeGenFunction &) { return nullptr; });
// Emit: if (PreCond) - end.
if (ContBlock) {
CGF.EmitBranch(ContBlock);
@@ -1655,7 +1748,7 @@ void CodeGenFunction::EmitOMPSimdDirective(const OMPSimdDirective &S) {
auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
emitOMPSimdRegion(CGF, S, Action);
};
- OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
+ OMPLexicalScope Scope(*this, S, OMPD_unknown);
CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen);
}
@@ -1665,18 +1758,18 @@ void CodeGenFunction::EmitOMPOuterLoop(
const CodeGenFunction::OMPLoopArguments &LoopArgs,
const CodeGenFunction::CodeGenLoopTy &CodeGenLoop,
const CodeGenFunction::CodeGenOrderedTy &CodeGenOrdered) {
- auto &RT = CGM.getOpenMPRuntime();
+ CGOpenMPRuntime &RT = CGM.getOpenMPRuntime();
const Expr *IVExpr = S.getIterationVariable();
const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
- auto LoopExit = getJumpDestInCurrentScope("omp.dispatch.end");
+ JumpDest LoopExit = getJumpDestInCurrentScope("omp.dispatch.end");
// Start the loop with a block that tests the condition.
- auto CondBlock = createBasicBlock("omp.dispatch.cond");
+ llvm::BasicBlock *CondBlock = createBasicBlock("omp.dispatch.cond");
EmitBlock(CondBlock);
- const SourceRange &R = S.getSourceRange();
+ const SourceRange R = S.getSourceRange();
LoopStack.push(CondBlock, SourceLocToDebugLoc(R.getBegin()),
SourceLocToDebugLoc(R.getEnd()));
@@ -1698,11 +1791,11 @@ void CodeGenFunction::EmitOMPOuterLoop(
// If there are any cleanups between here and the loop-exit scope,
// create a block to stage a loop exit along.
- auto ExitBlock = LoopExit.getBlock();
+ llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
if (LoopScope.requiresCleanups())
ExitBlock = createBasicBlock("omp.dispatch.cleanup");
- auto LoopBody = createBasicBlock("omp.dispatch.body");
+ llvm::BasicBlock *LoopBody = createBasicBlock("omp.dispatch.body");
Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock);
if (ExitBlock != LoopExit.getBlock()) {
EmitBlock(ExitBlock);
@@ -1716,7 +1809,7 @@ void CodeGenFunction::EmitOMPOuterLoop(
EmitIgnoredExpr(LoopArgs.Init);
// Create a block for the increment.
- auto Continue = getJumpDestInCurrentScope("omp.dispatch.inc");
+ JumpDest Continue = getJumpDestInCurrentScope("omp.dispatch.inc");
BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
// Generate !llvm.loop.parallel metadata for loads and stores for loops
@@ -1769,7 +1862,7 @@ void CodeGenFunction::EmitOMPForOuterLoop(
const OMPLoopDirective &S, OMPPrivateScope &LoopScope, bool Ordered,
const OMPLoopArguments &LoopArgs,
const CodeGenDispatchBoundsTy &CGDispatchBounds) {
- auto &RT = CGM.getOpenMPRuntime();
+ CGOpenMPRuntime &RT = CGM.getOpenMPRuntime();
// Dynamic scheduling of the outer loop (dynamic, guided, auto, runtime).
const bool DynamicOrOrdered =
@@ -1835,7 +1928,8 @@ void CodeGenFunction::EmitOMPForOuterLoop(
const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
if (DynamicOrOrdered) {
- auto DispatchBounds = CGDispatchBounds(*this, S, LoopArgs.LB, LoopArgs.UB);
+ const std::pair<llvm::Value *, llvm::Value *> DispatchBounds =
+ CGDispatchBounds(*this, S, LoopArgs.LB, LoopArgs.UB);
llvm::Value *LBVal = DispatchBounds.first;
llvm::Value *UBVal = DispatchBounds.second;
CGOpenMPRuntime::DispatchRTInput DipatchRTInputValues = {LBVal, UBVal,
@@ -1878,7 +1972,7 @@ void CodeGenFunction::EmitOMPDistributeOuterLoop(
OMPPrivateScope &LoopScope, const OMPLoopArguments &LoopArgs,
const CodeGenLoopTy &CodeGenLoopContent) {
- auto &RT = CGM.getOpenMPRuntime();
+ CGOpenMPRuntime &RT = CGM.getOpenMPRuntime();
// Emit outer loop.
// Same behavior as a OMPForOuterLoop, except that schedule cannot be
@@ -1933,14 +2027,6 @@ void CodeGenFunction::EmitOMPDistributeOuterLoop(
emitEmptyOrdered);
}
-/// Emit a helper variable and return corresponding lvalue.
-static LValue EmitOMPHelperVar(CodeGenFunction &CGF,
- const DeclRefExpr *Helper) {
- auto VDecl = cast<VarDecl>(Helper->getDecl());
- CGF.EmitVarDecl(*VDecl);
- return CGF.EmitLValue(Helper);
-}
-
static std::pair<LValue, LValue>
emitDistributeParallelForInnerBounds(CodeGenFunction &CGF,
const OMPExecutableDirective &S) {
@@ -1958,14 +2044,18 @@ emitDistributeParallelForInnerBounds(CodeGenFunction &CGF,
// the current ones.
LValue PrevLB = CGF.EmitLValue(LS.getPrevLowerBoundVariable());
LValue PrevUB = CGF.EmitLValue(LS.getPrevUpperBoundVariable());
- llvm::Value *PrevLBVal = CGF.EmitLoadOfScalar(PrevLB, SourceLocation());
+ llvm::Value *PrevLBVal = CGF.EmitLoadOfScalar(
+ PrevLB, LS.getPrevLowerBoundVariable()->getExprLoc());
PrevLBVal = CGF.EmitScalarConversion(
PrevLBVal, LS.getPrevLowerBoundVariable()->getType(),
- LS.getIterationVariable()->getType(), SourceLocation());
- llvm::Value *PrevUBVal = CGF.EmitLoadOfScalar(PrevUB, SourceLocation());
+ LS.getIterationVariable()->getType(),
+ LS.getPrevLowerBoundVariable()->getExprLoc());
+ llvm::Value *PrevUBVal = CGF.EmitLoadOfScalar(
+ PrevUB, LS.getPrevUpperBoundVariable()->getExprLoc());
PrevUBVal = CGF.EmitScalarConversion(
PrevUBVal, LS.getPrevUpperBoundVariable()->getType(),
- LS.getIterationVariable()->getType(), SourceLocation());
+ LS.getIterationVariable()->getType(),
+ LS.getPrevUpperBoundVariable()->getExprLoc());
CGF.EmitStoreOfScalar(PrevLBVal, LB);
CGF.EmitStoreOfScalar(PrevUBVal, UB);
@@ -1991,10 +2081,10 @@ emitDistributeParallelForDispatchBounds(CodeGenFunction &CGF,
// is not normalized as each team only executes its own assigned
// distribute chunk
QualType IteratorTy = IVExpr->getType();
- llvm::Value *LBVal = CGF.EmitLoadOfScalar(LB, /*Volatile=*/false, IteratorTy,
- SourceLocation());
- llvm::Value *UBVal = CGF.EmitLoadOfScalar(UB, /*Volatile=*/false, IteratorTy,
- SourceLocation());
+ llvm::Value *LBVal =
+ CGF.EmitLoadOfScalar(LB, /*Volatile=*/false, IteratorTy, S.getLocStart());
+ llvm::Value *UBVal =
+ CGF.EmitLoadOfScalar(UB, /*Volatile=*/false, IteratorTy, S.getLocStart());
return {LBVal, UBVal};
}
@@ -2004,13 +2094,13 @@ static void emitDistributeParallelForDistributeInnerBoundParams(
const auto &Dir = cast<OMPLoopDirective>(S);
LValue LB =
CGF.EmitLValue(cast<DeclRefExpr>(Dir.getCombinedLowerBoundVariable()));
- auto LBCast = CGF.Builder.CreateIntCast(
+ llvm::Value *LBCast = CGF.Builder.CreateIntCast(
CGF.Builder.CreateLoad(LB.getAddress()), CGF.SizeTy, /*isSigned=*/false);
CapturedVars.push_back(LBCast);
LValue UB =
CGF.EmitLValue(cast<DeclRefExpr>(Dir.getCombinedUpperBoundVariable()));
- auto UBCast = CGF.Builder.CreateIntCast(
+ llvm::Value *UBCast = CGF.Builder.CreateIntCast(
CGF.Builder.CreateLoad(UB.getAddress()), CGF.SizeTy, /*isSigned=*/false);
CapturedVars.push_back(UBCast);
}
@@ -2020,7 +2110,8 @@ emitInnerParallelForWhenCombined(CodeGenFunction &CGF,
const OMPLoopDirective &S,
CodeGenFunction::JumpDest LoopExit) {
auto &&CGInlinedWorksharingLoop = [&S](CodeGenFunction &CGF,
- PrePostActionTy &) {
+ PrePostActionTy &Action) {
+ Action.Enter(CGF);
bool HasCancel = false;
if (!isOpenMPSimdDirective(S.getDirectiveKind())) {
if (const auto *D = dyn_cast<OMPTeamsDistributeParallelForDirective>(&S))
@@ -2051,7 +2142,7 @@ void CodeGenFunction::EmitOMPDistributeParallelForDirective(
CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined,
S.getDistInc());
};
- OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
+ OMPLexicalScope Scope(*this, S, OMPD_parallel);
CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_distribute, CodeGen);
}
@@ -2061,7 +2152,7 @@ void CodeGenFunction::EmitOMPDistributeParallelForSimdDirective(
CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined,
S.getDistInc());
};
- OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
+ OMPLexicalScope Scope(*this, S, OMPD_parallel);
CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_distribute, CodeGen);
}
@@ -2070,7 +2161,7 @@ void CodeGenFunction::EmitOMPDistributeSimdDirective(
auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc());
};
- OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
+ OMPLexicalScope Scope(*this, S, OMPD_unknown);
CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen);
}
@@ -2096,28 +2187,6 @@ void CodeGenFunction::EmitOMPTargetSimdDirective(
emitCommonOMPTargetDirective(*this, S, CodeGen);
}
-void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForDirective(
- const OMPTargetTeamsDistributeParallelForDirective &S) {
- OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
- CGM.getOpenMPRuntime().emitInlinedDirective(
- *this, OMPD_target_teams_distribute_parallel_for,
- [&S](CodeGenFunction &CGF, PrePostActionTy &) {
- CGF.EmitStmt(
- cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
- });
-}
-
-void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForSimdDirective(
- const OMPTargetTeamsDistributeParallelForSimdDirective &S) {
- OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
- CGM.getOpenMPRuntime().emitInlinedDirective(
- *this, OMPD_target_teams_distribute_parallel_for_simd,
- [&S](CodeGenFunction &CGF, PrePostActionTy &) {
- CGF.EmitStmt(
- cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
- });
-}
-
namespace {
struct ScheduleKindModifiersTy {
OpenMPScheduleClauseKind Kind;
@@ -2135,20 +2204,20 @@ bool CodeGenFunction::EmitOMPWorksharingLoop(
const CodeGenLoopBoundsTy &CodeGenLoopBounds,
const CodeGenDispatchBoundsTy &CGDispatchBounds) {
// Emit the loop iteration variable.
- auto IVExpr = cast<DeclRefExpr>(S.getIterationVariable());
- auto IVDecl = cast<VarDecl>(IVExpr->getDecl());
+ const auto *IVExpr = cast<DeclRefExpr>(S.getIterationVariable());
+ const auto *IVDecl = cast<VarDecl>(IVExpr->getDecl());
EmitVarDecl(*IVDecl);
// Emit the iterations count variable.
// If it is not a variable, Sema decided to calculate iterations count on each
// iteration (e.g., it is foldable into a constant).
- if (auto LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
+ if (const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl()));
// Emit calculation of the iterations count.
EmitIgnoredExpr(S.getCalcLastIteration());
}
- auto &RT = CGM.getOpenMPRuntime();
+ CGOpenMPRuntime &RT = CGM.getOpenMPRuntime();
bool HasLastprivateClause;
// Check pre-condition.
@@ -2163,7 +2232,7 @@ bool CodeGenFunction::EmitOMPWorksharingLoop(
if (!CondConstant)
return false;
} else {
- auto *ThenBlock = createBasicBlock("omp.precond.then");
+ llvm::BasicBlock *ThenBlock = createBasicBlock("omp.precond.then");
ContBlock = createBasicBlock("omp.precond.end");
emitPreCond(*this, S, S.getPreCond(), ThenBlock, ContBlock,
getProfileCount(&S));
@@ -2171,8 +2240,9 @@ bool CodeGenFunction::EmitOMPWorksharingLoop(
incrementProfileCounter(&S);
}
+ RunCleanupsScope DoacrossCleanupScope(*this);
bool Ordered = false;
- if (auto *OrderedClause = S.getSingleClause<OMPOrderedClause>()) {
+ if (const auto *OrderedClause = S.getSingleClause<OMPOrderedClause>()) {
if (OrderedClause->getNumForLoops())
RT.emitDoacrossInit(*this, S);
else
@@ -2213,11 +2283,11 @@ bool CodeGenFunction::EmitOMPWorksharingLoop(
// Detect the loop schedule kind and chunk.
llvm::Value *Chunk = nullptr;
OpenMPScheduleTy ScheduleKind;
- if (auto *C = S.getSingleClause<OMPScheduleClause>()) {
+ if (const auto *C = S.getSingleClause<OMPScheduleClause>()) {
ScheduleKind.Schedule = C->getScheduleKind();
ScheduleKind.M1 = C->getFirstScheduleModifier();
ScheduleKind.M2 = C->getSecondScheduleModifier();
- if (const auto *Ch = C->getChunkSize()) {
+ if (const Expr *Ch = C->getChunkSize()) {
Chunk = EmitScalarExpr(Ch);
Chunk = EmitScalarConversion(Chunk, Ch->getType(),
S.getIterationVariable()->getType(),
@@ -2245,7 +2315,7 @@ bool CodeGenFunction::EmitOMPWorksharingLoop(
UB.getAddress(), ST.getAddress());
RT.emitForStaticInit(*this, S.getLocStart(), S.getDirectiveKind(),
ScheduleKind, StaticInit);
- auto LoopExit =
+ JumpDest LoopExit =
getJumpDestInCurrentScope(createBasicBlock("omp.loop.exit"));
// UB = min(UB, GlobalUB);
EmitIgnoredExpr(S.getEnsureUpperBound());
@@ -2282,7 +2352,7 @@ bool CodeGenFunction::EmitOMPWorksharingLoop(
}
if (isOpenMPSimdDirective(S.getDirectiveKind())) {
EmitOMPSimdFinal(S,
- [&](CodeGenFunction &CGF) -> llvm::Value * {
+ [IL, &S](CodeGenFunction &CGF) {
return CGF.Builder.CreateIsNotNull(
CGF.EmitLoadOfScalar(IL, S.getLocStart()));
});
@@ -2293,7 +2363,7 @@ bool CodeGenFunction::EmitOMPWorksharingLoop(
: /*Parallel only*/ OMPD_parallel);
// Emit post-update of the reduction variables if IsLastIter != 0.
emitPostUpdateForReductionClause(
- *this, S, [&](CodeGenFunction &CGF) -> llvm::Value * {
+ *this, S, [IL, &S](CodeGenFunction &CGF) {
return CGF.Builder.CreateIsNotNull(
CGF.EmitLoadOfScalar(IL, S.getLocStart()));
});
@@ -2303,14 +2373,15 @@ bool CodeGenFunction::EmitOMPWorksharingLoop(
S, isOpenMPSimdDirective(S.getDirectiveKind()),
Builder.CreateIsNotNull(EmitLoadOfScalar(IL, S.getLocStart())));
}
- EmitOMPLinearClauseFinal(S, [&](CodeGenFunction &CGF) -> llvm::Value * {
+ EmitOMPLinearClauseFinal(S, [IL, &S](CodeGenFunction &CGF) {
return CGF.Builder.CreateIsNotNull(
CGF.EmitLoadOfScalar(IL, S.getLocStart()));
});
+ DoacrossCleanupScope.ForceCleanup();
// We're now done with the loop, so jump to the continuation block.
if (ContBlock) {
EmitBranch(ContBlock);
- EmitBlock(ContBlock, true);
+ EmitBlock(ContBlock, /*IsFinished=*/true);
}
}
return HasLastprivateClause;
@@ -2321,7 +2392,7 @@ bool CodeGenFunction::EmitOMPWorksharingLoop(
/// of the associated 'for' or 'distribute' loop.
static std::pair<LValue, LValue>
emitForLoopBounds(CodeGenFunction &CGF, const OMPExecutableDirective &S) {
- const OMPLoopDirective &LS = cast<OMPLoopDirective>(S);
+ const auto &LS = cast<OMPLoopDirective>(S);
LValue LB =
EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getLowerBoundVariable()));
LValue UB =
@@ -2336,7 +2407,7 @@ emitForLoopBounds(CodeGenFunction &CGF, const OMPExecutableDirective &S) {
static std::pair<llvm::Value *, llvm::Value *>
emitDispatchForLoopBounds(CodeGenFunction &CGF, const OMPExecutableDirective &S,
Address LB, Address UB) {
- const OMPLoopDirective &LS = cast<OMPLoopDirective>(S);
+ const auto &LS = cast<OMPLoopDirective>(S);
const Expr *IVExpr = LS.getIterationVariable();
const unsigned IVSize = CGF.getContext().getTypeSize(IVExpr->getType());
llvm::Value *LBVal = CGF.Builder.getIntN(IVSize, 0);
@@ -2354,15 +2425,14 @@ void CodeGenFunction::EmitOMPForDirective(const OMPForDirective &S) {
emitDispatchForLoopBounds);
};
{
- OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
+ OMPLexicalScope Scope(*this, S, OMPD_unknown);
CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_for, CodeGen,
S.hasCancel());
}
// Emit an implicit barrier at the end.
- if (!S.getSingleClause<OMPNowaitClause>() || HasLastprivates) {
+ if (!S.getSingleClause<OMPNowaitClause>() || HasLastprivates)
CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), OMPD_for);
- }
}
void CodeGenFunction::EmitOMPForSimdDirective(const OMPForSimdDirective &S) {
@@ -2374,38 +2444,39 @@ void CodeGenFunction::EmitOMPForSimdDirective(const OMPForSimdDirective &S) {
emitDispatchForLoopBounds);
};
{
- OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
+ OMPLexicalScope Scope(*this, S, OMPD_unknown);
CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen);
}
// Emit an implicit barrier at the end.
- if (!S.getSingleClause<OMPNowaitClause>() || HasLastprivates) {
+ if (!S.getSingleClause<OMPNowaitClause>() || HasLastprivates)
CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), OMPD_for);
- }
}
static LValue createSectionLVal(CodeGenFunction &CGF, QualType Ty,
const Twine &Name,
llvm::Value *Init = nullptr) {
- auto LVal = CGF.MakeAddrLValue(CGF.CreateMemTemp(Ty, Name), Ty);
+ LValue LVal = CGF.MakeAddrLValue(CGF.CreateMemTemp(Ty, Name), Ty);
if (Init)
CGF.EmitStoreThroughLValue(RValue::get(Init), LVal, /*isInit*/ true);
return LVal;
}
void CodeGenFunction::EmitSections(const OMPExecutableDirective &S) {
- auto *Stmt = cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt();
- auto *CS = dyn_cast<CompoundStmt>(Stmt);
+ const Stmt *CapturedStmt = S.getInnermostCapturedStmt()->getCapturedStmt();
+ const auto *CS = dyn_cast<CompoundStmt>(CapturedStmt);
bool HasLastprivates = false;
- auto &&CodeGen = [&S, Stmt, CS, &HasLastprivates](CodeGenFunction &CGF,
- PrePostActionTy &) {
- auto &C = CGF.CGM.getContext();
- auto KmpInt32Ty = C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
+ auto &&CodeGen = [&S, CapturedStmt, CS,
+ &HasLastprivates](CodeGenFunction &CGF, PrePostActionTy &) {
+ ASTContext &C = CGF.getContext();
+ QualType KmpInt32Ty =
+ C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
// Emit helper vars inits.
LValue LB = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.lb.",
CGF.Builder.getInt32(0));
- auto *GlobalUBVal = CS != nullptr ? CGF.Builder.getInt32(CS->size() - 1)
- : CGF.Builder.getInt32(0);
+ llvm::ConstantInt *GlobalUBVal = CS != nullptr
+ ? CGF.Builder.getInt32(CS->size() - 1)
+ : CGF.Builder.getInt32(0);
LValue UB =
createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.ub.", GlobalUBVal);
LValue ST = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.st.",
@@ -2423,8 +2494,8 @@ void CodeGenFunction::EmitSections(const OMPExecutableDirective &S) {
OK_Ordinary, S.getLocStart(), FPOptions());
// Increment for loop counter.
UnaryOperator Inc(&IVRefExpr, UO_PreInc, KmpInt32Ty, VK_RValue, OK_Ordinary,
- S.getLocStart());
- auto BodyGen = [Stmt, CS, &S, &IV](CodeGenFunction &CGF) {
+ S.getLocStart(), true);
+ auto &&BodyGen = [CapturedStmt, CS, &S, &IV](CodeGenFunction &CGF) {
// Iterate through all sections and emit a switch construct:
// switch (IV) {
// case 0:
@@ -2436,13 +2507,13 @@ void CodeGenFunction::EmitSections(const OMPExecutableDirective &S) {
// break;
// }
// .omp.sections.exit:
- auto *ExitBB = CGF.createBasicBlock(".omp.sections.exit");
- auto *SwitchStmt = CGF.Builder.CreateSwitch(
- CGF.EmitLoadOfLValue(IV, S.getLocStart()).getScalarVal(), ExitBB,
- CS == nullptr ? 1 : CS->size());
+ llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".omp.sections.exit");
+ llvm::SwitchInst *SwitchStmt =
+ CGF.Builder.CreateSwitch(CGF.EmitLoadOfScalar(IV, S.getLocStart()),
+ ExitBB, CS == nullptr ? 1 : CS->size());
if (CS) {
unsigned CaseNumber = 0;
- for (auto *SubStmt : CS->children()) {
+ for (const Stmt *SubStmt : CS->children()) {
auto CaseBB = CGF.createBasicBlock(".omp.sections.case");
CGF.EmitBlock(CaseBB);
SwitchStmt->addCase(CGF.Builder.getInt32(CaseNumber), CaseBB);
@@ -2451,10 +2522,10 @@ void CodeGenFunction::EmitSections(const OMPExecutableDirective &S) {
++CaseNumber;
}
} else {
- auto CaseBB = CGF.createBasicBlock(".omp.sections.case");
+ llvm::BasicBlock *CaseBB = CGF.createBasicBlock(".omp.sections.case");
CGF.EmitBlock(CaseBB);
SwitchStmt->addCase(CGF.Builder.getInt32(0), CaseBB);
- CGF.EmitStmt(Stmt);
+ CGF.EmitStmt(CapturedStmt);
CGF.EmitBranch(ExitBB);
}
CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
@@ -2483,8 +2554,8 @@ void CodeGenFunction::EmitSections(const OMPExecutableDirective &S) {
CGF.CGM.getOpenMPRuntime().emitForStaticInit(
CGF, S.getLocStart(), S.getDirectiveKind(), ScheduleKind, StaticInit);
// UB = min(UB, GlobalUB);
- auto *UBVal = CGF.EmitLoadOfScalar(UB, S.getLocStart());
- auto *MinUBGlobalUB = CGF.Builder.CreateSelect(
+ llvm::Value *UBVal = CGF.EmitLoadOfScalar(UB, S.getLocStart());
+ llvm::Value *MinUBGlobalUB = CGF.Builder.CreateSelect(
CGF.Builder.CreateICmpSLT(UBVal, GlobalUBVal), UBVal, GlobalUBVal);
CGF.EmitStoreOfScalar(MinUBGlobalUB, UB);
// IV = LB;
@@ -2500,11 +2571,10 @@ void CodeGenFunction::EmitSections(const OMPExecutableDirective &S) {
CGF.OMPCancelStack.emitExit(CGF, S.getDirectiveKind(), CodeGen);
CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel);
// Emit post-update of the reduction variables if IsLastIter != 0.
- emitPostUpdateForReductionClause(
- CGF, S, [&](CodeGenFunction &CGF) -> llvm::Value * {
- return CGF.Builder.CreateIsNotNull(
- CGF.EmitLoadOfScalar(IL, S.getLocStart()));
- });
+ emitPostUpdateForReductionClause(CGF, S, [IL, &S](CodeGenFunction &CGF) {
+ return CGF.Builder.CreateIsNotNull(
+ CGF.EmitLoadOfScalar(IL, S.getLocStart()));
+ });
// Emit final copy of the lastprivate variables if IsLastIter != 0.
if (HasLastprivates)
@@ -2535,7 +2605,7 @@ void CodeGenFunction::EmitSections(const OMPExecutableDirective &S) {
void CodeGenFunction::EmitOMPSectionsDirective(const OMPSectionsDirective &S) {
{
- OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
+ OMPLexicalScope Scope(*this, S, OMPD_unknown);
EmitSections(S);
}
// Emit an implicit barrier at the end.
@@ -2547,9 +2617,9 @@ void CodeGenFunction::EmitOMPSectionsDirective(const OMPSectionsDirective &S) {
void CodeGenFunction::EmitOMPSectionDirective(const OMPSectionDirective &S) {
auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
- CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
+ CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt());
};
- OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
+ OMPLexicalScope Scope(*this, S, OMPD_unknown);
CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_section, CodeGen,
S.hasCancel());
}
@@ -2578,10 +2648,10 @@ void CodeGenFunction::EmitOMPSingleDirective(const OMPSingleDirective &S) {
(void)CGF.EmitOMPFirstprivateClause(S, SingleScope);
CGF.EmitOMPPrivateClause(S, SingleScope);
(void)SingleScope.Privatize();
- CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
+ CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt());
};
{
- OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
+ OMPLexicalScope Scope(*this, S, OMPD_unknown);
CGM.getOpenMPRuntime().emitSingleRegion(*this, CodeGen, S.getLocStart(),
CopyprivateVars, DestExprs,
SrcExprs, AssignmentOps);
@@ -2598,21 +2668,21 @@ void CodeGenFunction::EmitOMPSingleDirective(const OMPSingleDirective &S) {
void CodeGenFunction::EmitOMPMasterDirective(const OMPMasterDirective &S) {
auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
Action.Enter(CGF);
- CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
+ CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt());
};
- OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
+ OMPLexicalScope Scope(*this, S, OMPD_unknown);
CGM.getOpenMPRuntime().emitMasterRegion(*this, CodeGen, S.getLocStart());
}
void CodeGenFunction::EmitOMPCriticalDirective(const OMPCriticalDirective &S) {
auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
Action.Enter(CGF);
- CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
+ CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt());
};
- Expr *Hint = nullptr;
- if (auto *HintClause = S.getSingleClause<OMPHintClause>())
+ const Expr *Hint = nullptr;
+ if (const auto *HintClause = S.getSingleClause<OMPHintClause>())
Hint = HintClause->getHint();
- OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
+ OMPLexicalScope Scope(*this, S, OMPD_unknown);
CGM.getOpenMPRuntime().emitCriticalRegion(*this,
S.getDirectiveName().getAsString(),
CodeGen, S.getLocStart(), Hint);
@@ -2622,7 +2692,8 @@ void CodeGenFunction::EmitOMPParallelForDirective(
const OMPParallelForDirective &S) {
// Emit directive as a combined directive that consists of two implicit
// directives: 'parallel' with 'for' directive.
- auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
+ auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
+ Action.Enter(CGF);
OMPCancelStackRAII CancelRegion(CGF, OMPD_parallel_for, S.hasCancel());
CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), emitForLoopBounds,
emitDispatchForLoopBounds);
@@ -2635,7 +2706,8 @@ void CodeGenFunction::EmitOMPParallelForSimdDirective(
const OMPParallelForSimdDirective &S) {
// Emit directive as a combined directive that consists of two implicit
// directives: 'parallel' with 'for' directive.
- auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
+ auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
+ Action.Enter(CGF);
CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), emitForLoopBounds,
emitDispatchForLoopBounds);
};
@@ -2647,27 +2719,28 @@ void CodeGenFunction::EmitOMPParallelSectionsDirective(
const OMPParallelSectionsDirective &S) {
// Emit directive as a combined directive that consists of two implicit
// directives: 'parallel' with 'sections' directive.
- auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
+ auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
+ Action.Enter(CGF);
CGF.EmitSections(S);
};
emitCommonOMPParallelDirective(*this, S, OMPD_sections, CodeGen,
emitEmptyBoundParameters);
}
-void CodeGenFunction::EmitOMPTaskBasedDirective(const OMPExecutableDirective &S,
- const RegionCodeGenTy &BodyGen,
- const TaskGenTy &TaskGen,
- OMPTaskDataTy &Data) {
+void CodeGenFunction::EmitOMPTaskBasedDirective(
+ const OMPExecutableDirective &S, const OpenMPDirectiveKind CapturedRegion,
+ const RegionCodeGenTy &BodyGen, const TaskGenTy &TaskGen,
+ OMPTaskDataTy &Data) {
// Emit outlined function for task construct.
- auto CS = cast<CapturedStmt>(S.getAssociatedStmt());
- auto *I = CS->getCapturedDecl()->param_begin();
- auto *PartId = std::next(I);
- auto *TaskT = std::next(I, 4);
+ const CapturedStmt *CS = S.getCapturedStmt(CapturedRegion);
+ auto I = CS->getCapturedDecl()->param_begin();
+ auto PartId = std::next(I);
+ auto TaskT = std::next(I, 4);
// Check if the task is final
if (const auto *Clause = S.getSingleClause<OMPFinalClause>()) {
// If the condition constant folds and can be elided, try to avoid emitting
// the condition and the dead arm of the if/else.
- auto *Cond = Clause->getCondition();
+ const Expr *Cond = Clause->getCondition();
bool CondConstant;
if (ConstantFoldsToSimpleInteger(Cond, CondConstant))
Data.Final.setInt(CondConstant);
@@ -2679,7 +2752,7 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(const OMPExecutableDirective &S,
}
// Check if the task has 'priority' clause.
if (const auto *Clause = S.getSingleClause<OMPPriorityClause>()) {
- auto *Prio = Clause->getPriority();
+ const Expr *Prio = Clause->getPriority();
Data.Priority.setInt(/*IntVal=*/true);
Data.Priority.setPointer(EmitScalarConversion(
EmitScalarExpr(Prio), Prio->getType(),
@@ -2692,8 +2765,8 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(const OMPExecutableDirective &S,
// Get list of private variables.
for (const auto *C : S.getClausesOfKind<OMPPrivateClause>()) {
auto IRef = C->varlist_begin();
- for (auto *IInit : C->private_copies()) {
- auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
+ for (const Expr *IInit : C->private_copies()) {
+ const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
Data.PrivateVars.push_back(*IRef);
Data.PrivateCopies.push_back(IInit);
@@ -2706,8 +2779,8 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(const OMPExecutableDirective &S,
for (const auto *C : S.getClausesOfKind<OMPFirstprivateClause>()) {
auto IRef = C->varlist_begin();
auto IElemInitRef = C->inits().begin();
- for (auto *IInit : C->private_copies()) {
- auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
+ for (const Expr *IInit : C->private_copies()) {
+ const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
Data.FirstprivateVars.push_back(*IRef);
Data.FirstprivateCopies.push_back(IInit);
@@ -2722,8 +2795,8 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(const OMPExecutableDirective &S,
for (const auto *C : S.getClausesOfKind<OMPLastprivateClause>()) {
auto IRef = C->varlist_begin();
auto ID = C->destination_exprs().begin();
- for (auto *IInit : C->private_copies()) {
- auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
+ for (const Expr *IInit : C->private_copies()) {
+ const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
Data.LastprivateVars.push_back(*IRef);
Data.LastprivateCopies.push_back(IInit);
@@ -2742,7 +2815,7 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(const OMPExecutableDirective &S,
auto IRed = C->reduction_ops().begin();
auto ILHS = C->lhs_exprs().begin();
auto IRHS = C->rhs_exprs().begin();
- for (const auto *Ref : C->varlists()) {
+ for (const Expr *Ref : C->varlists()) {
Data.ReductionVars.emplace_back(Ref);
Data.ReductionCopies.emplace_back(*IPriv);
Data.ReductionOps.emplace_back(*IRed);
@@ -2758,50 +2831,51 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(const OMPExecutableDirective &S,
*this, S.getLocStart(), LHSs, RHSs, Data);
// Build list of dependences.
for (const auto *C : S.getClausesOfKind<OMPDependClause>())
- for (auto *IRef : C->varlists())
- Data.Dependences.push_back(std::make_pair(C->getDependencyKind(), IRef));
- auto &&CodeGen = [&Data, &S, CS, &BodyGen, &LastprivateDstsOrigs](
- CodeGenFunction &CGF, PrePostActionTy &Action) {
+ for (const Expr *IRef : C->varlists())
+ Data.Dependences.emplace_back(C->getDependencyKind(), IRef);
+ auto &&CodeGen = [&Data, &S, CS, &BodyGen, &LastprivateDstsOrigs,
+ CapturedRegion](CodeGenFunction &CGF,
+ PrePostActionTy &Action) {
// Set proper addresses for generated private copies.
OMPPrivateScope Scope(CGF);
if (!Data.PrivateVars.empty() || !Data.FirstprivateVars.empty() ||
!Data.LastprivateVars.empty()) {
enum { PrivatesParam = 2, CopyFnParam = 3 };
- auto *CopyFn = CGF.Builder.CreateLoad(
- CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(3)));
- auto *PrivatesPtr = CGF.Builder.CreateLoad(
- CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(2)));
+ llvm::Value *CopyFn = CGF.Builder.CreateLoad(
+ CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(CopyFnParam)));
+ llvm::Value *PrivatesPtr = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(
+ CS->getCapturedDecl()->getParam(PrivatesParam)));
// Map privates.
llvm::SmallVector<std::pair<const VarDecl *, Address>, 16> PrivatePtrs;
llvm::SmallVector<llvm::Value *, 16> CallArgs;
CallArgs.push_back(PrivatesPtr);
- for (auto *E : Data.PrivateVars) {
- auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
+ for (const Expr *E : Data.PrivateVars) {
+ const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
Address PrivatePtr = CGF.CreateMemTemp(
CGF.getContext().getPointerType(E->getType()), ".priv.ptr.addr");
- PrivatePtrs.push_back(std::make_pair(VD, PrivatePtr));
+ PrivatePtrs.emplace_back(VD, PrivatePtr);
CallArgs.push_back(PrivatePtr.getPointer());
}
- for (auto *E : Data.FirstprivateVars) {
- auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
+ for (const Expr *E : Data.FirstprivateVars) {
+ const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
Address PrivatePtr =
CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()),
".firstpriv.ptr.addr");
- PrivatePtrs.push_back(std::make_pair(VD, PrivatePtr));
+ PrivatePtrs.emplace_back(VD, PrivatePtr);
CallArgs.push_back(PrivatePtr.getPointer());
}
- for (auto *E : Data.LastprivateVars) {
- auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
+ for (const Expr *E : Data.LastprivateVars) {
+ const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
Address PrivatePtr =
CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()),
".lastpriv.ptr.addr");
- PrivatePtrs.push_back(std::make_pair(VD, PrivatePtr));
+ PrivatePtrs.emplace_back(VD, PrivatePtr);
CallArgs.push_back(PrivatePtr.getPointer());
}
CGF.CGM.getOpenMPRuntime().emitOutlinedFunctionCall(CGF, S.getLocStart(),
CopyFn, CallArgs);
- for (auto &&Pair : LastprivateDstsOrigs) {
- auto *OrigVD = cast<VarDecl>(Pair.second->getDecl());
+ for (const auto &Pair : LastprivateDstsOrigs) {
+ const auto *OrigVD = cast<VarDecl>(Pair.second->getDecl());
DeclRefExpr DRE(
const_cast<VarDecl *>(OrigVD),
/*RefersToEnclosingVariableOrCapture=*/CGF.CapturedStmtInfo->lookup(
@@ -2811,14 +2885,14 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(const OMPExecutableDirective &S,
return CGF.EmitLValue(&DRE).getAddress();
});
}
- for (auto &&Pair : PrivatePtrs) {
+ for (const auto &Pair : PrivatePtrs) {
Address Replacement(CGF.Builder.CreateLoad(Pair.second),
CGF.getContext().getDeclAlign(Pair.first));
Scope.addPrivate(Pair.first, [Replacement]() { return Replacement; });
}
}
if (Data.Reductions) {
- OMPLexicalScope LexScope(CGF, S, /*AsInlined=*/true);
+ OMPLexicalScope LexScope(CGF, S, CapturedRegion);
ReductionCodeGen RedCG(Data.ReductionVars, Data.ReductionCopies,
Data.ReductionOps);
llvm::Value *ReductionsPtr = CGF.Builder.CreateLoad(
@@ -2826,6 +2900,11 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(const OMPExecutableDirective &S,
for (unsigned Cnt = 0, E = Data.ReductionVars.size(); Cnt < E; ++Cnt) {
RedCG.emitSharedLValue(CGF, Cnt);
RedCG.emitAggregateType(CGF, Cnt);
+ // FIXME: This must removed once the runtime library is fixed.
+ // Emit required threadprivate variables for
+ // initilizer/combiner/finalizer.
+ CGF.CGM.getOpenMPRuntime().emitTaskReductionFixups(CGF, S.getLocStart(),
+ RedCG, Cnt);
Address Replacement = CGF.CGM.getOpenMPRuntime().getTaskReductionItem(
CGF, S.getLocStart(), ReductionsPtr, RedCG.getSharedLValue(Cnt));
Replacement =
@@ -2833,16 +2912,11 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(const OMPExecutableDirective &S,
Replacement.getPointer(), CGF.getContext().VoidPtrTy,
CGF.getContext().getPointerType(
Data.ReductionCopies[Cnt]->getType()),
- SourceLocation()),
+ Data.ReductionCopies[Cnt]->getExprLoc()),
Replacement.getAlignment());
Replacement = RedCG.adjustPrivateAddress(CGF, Cnt, Replacement);
Scope.addPrivate(RedCG.getBaseDecl(Cnt),
[Replacement]() { return Replacement; });
- // FIXME: This must removed once the runtime library is fixed.
- // Emit required threadprivate variables for
- // initilizer/combiner/finalizer.
- CGF.CGM.getOpenMPRuntime().emitTaskReductionFixups(CGF, S.getLocStart(),
- RedCG, Cnt);
}
}
// Privatize all private variables except for in_reduction items.
@@ -2855,7 +2929,7 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(const OMPExecutableDirective &S,
auto IPriv = C->privates().begin();
auto IRed = C->reduction_ops().begin();
auto ITD = C->taskgroup_descriptors().begin();
- for (const auto *Ref : C->varlists()) {
+ for (const Expr *Ref : C->varlists()) {
InRedVars.emplace_back(Ref);
InRedPrivs.emplace_back(*IPriv);
InRedOps.emplace_back(*IRed);
@@ -2875,24 +2949,25 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(const OMPExecutableDirective &S,
RedCG.emitAggregateType(CGF, Cnt);
// The taskgroup descriptor variable is always implicit firstprivate and
// privatized already during procoessing of the firstprivates.
- llvm::Value *ReductionsPtr = CGF.EmitLoadOfScalar(
- CGF.EmitLValue(TaskgroupDescriptors[Cnt]), SourceLocation());
+ // FIXME: This must removed once the runtime library is fixed.
+ // Emit required threadprivate variables for
+ // initilizer/combiner/finalizer.
+ CGF.CGM.getOpenMPRuntime().emitTaskReductionFixups(CGF, S.getLocStart(),
+ RedCG, Cnt);
+ llvm::Value *ReductionsPtr =
+ CGF.EmitLoadOfScalar(CGF.EmitLValue(TaskgroupDescriptors[Cnt]),
+ TaskgroupDescriptors[Cnt]->getExprLoc());
Address Replacement = CGF.CGM.getOpenMPRuntime().getTaskReductionItem(
CGF, S.getLocStart(), ReductionsPtr, RedCG.getSharedLValue(Cnt));
Replacement = Address(
CGF.EmitScalarConversion(
Replacement.getPointer(), CGF.getContext().VoidPtrTy,
CGF.getContext().getPointerType(InRedPrivs[Cnt]->getType()),
- SourceLocation()),
+ InRedPrivs[Cnt]->getExprLoc()),
Replacement.getAlignment());
Replacement = RedCG.adjustPrivateAddress(CGF, Cnt, Replacement);
InRedScope.addPrivate(RedCG.getBaseDecl(Cnt),
[Replacement]() { return Replacement; });
- // FIXME: This must removed once the runtime library is fixed.
- // Emit required threadprivate variables for
- // initilizer/combiner/finalizer.
- CGF.CGM.getOpenMPRuntime().emitTaskReductionFixups(CGF, S.getLocStart(),
- RedCG, Cnt);
}
}
(void)InRedScope.Privatize();
@@ -2900,7 +2975,7 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(const OMPExecutableDirective &S,
Action.Enter(CGF);
BodyGen(CGF);
};
- auto *OutlinedFn = CGM.getOpenMPRuntime().emitTaskOutlinedFunction(
+ llvm::Value *OutlinedFn = CGM.getOpenMPRuntime().emitTaskOutlinedFunction(
S, *I, *PartId, *TaskT, S.getDirectiveKind(), CodeGen, Data.Tied,
Data.NumberOfParts);
OMPLexicalScope Scope(*this, S);
@@ -2909,27 +2984,24 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(const OMPExecutableDirective &S,
static ImplicitParamDecl *
createImplicitFirstprivateForType(ASTContext &C, OMPTaskDataTy &Data,
- QualType Ty, CapturedDecl *CD) {
- auto *OrigVD = ImplicitParamDecl::Create(
- C, CD, SourceLocation(), /*Id=*/nullptr, Ty, ImplicitParamDecl::Other);
- auto *OrigRef =
- DeclRefExpr::Create(C, NestedNameSpecifierLoc(), SourceLocation(), OrigVD,
- /*RefersToEnclosingVariableOrCapture=*/false,
- SourceLocation(), Ty, VK_LValue);
- auto *PrivateVD = ImplicitParamDecl::Create(
- C, CD, SourceLocation(), /*Id=*/nullptr, Ty, ImplicitParamDecl::Other);
+ QualType Ty, CapturedDecl *CD,
+ SourceLocation Loc) {
+ auto *OrigVD = ImplicitParamDecl::Create(C, CD, Loc, /*Id=*/nullptr, Ty,
+ ImplicitParamDecl::Other);
+ auto *OrigRef = DeclRefExpr::Create(
+ C, NestedNameSpecifierLoc(), SourceLocation(), OrigVD,
+ /*RefersToEnclosingVariableOrCapture=*/false, Loc, Ty, VK_LValue);
+ auto *PrivateVD = ImplicitParamDecl::Create(C, CD, Loc, /*Id=*/nullptr, Ty,
+ ImplicitParamDecl::Other);
auto *PrivateRef = DeclRefExpr::Create(
C, NestedNameSpecifierLoc(), SourceLocation(), PrivateVD,
- /*RefersToEnclosingVariableOrCapture=*/false, SourceLocation(), Ty,
- VK_LValue);
+ /*RefersToEnclosingVariableOrCapture=*/false, Loc, Ty, VK_LValue);
QualType ElemType = C.getBaseElementType(Ty);
- auto *InitVD =
- ImplicitParamDecl::Create(C, CD, SourceLocation(), /*Id=*/nullptr,
- ElemType, ImplicitParamDecl::Other);
- auto *InitRef =
- DeclRefExpr::Create(C, NestedNameSpecifierLoc(), SourceLocation(), InitVD,
- /*RefersToEnclosingVariableOrCapture=*/false,
- SourceLocation(), ElemType, VK_LValue);
+ auto *InitVD = ImplicitParamDecl::Create(C, CD, Loc, /*Id=*/nullptr, ElemType,
+ ImplicitParamDecl::Other);
+ auto *InitRef = DeclRefExpr::Create(
+ C, NestedNameSpecifierLoc(), SourceLocation(), InitVD,
+ /*RefersToEnclosingVariableOrCapture=*/false, Loc, ElemType, VK_LValue);
PrivateVD->setInitStyle(VarDecl::CInit);
PrivateVD->setInit(ImplicitCastExpr::Create(C, ElemType, CK_LValueToRValue,
InitRef, /*BasePath=*/nullptr,
@@ -2944,12 +3016,12 @@ void CodeGenFunction::EmitOMPTargetTaskBasedDirective(
const OMPExecutableDirective &S, const RegionCodeGenTy &BodyGen,
OMPTargetDataInfo &InputInfo) {
// Emit outlined function for task construct.
- auto CS = S.getCapturedStmt(OMPD_task);
- auto CapturedStruct = GenerateCapturedStmtArgument(*CS);
- auto SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl());
- auto *I = CS->getCapturedDecl()->param_begin();
- auto *PartId = std::next(I);
- auto *TaskT = std::next(I, 4);
+ const CapturedStmt *CS = S.getCapturedStmt(OMPD_task);
+ Address CapturedStruct = GenerateCapturedStmtArgument(*CS);
+ QualType SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl());
+ auto I = CS->getCapturedDecl()->param_begin();
+ auto PartId = std::next(I);
+ auto TaskT = std::next(I, 4);
OMPTaskDataTy Data;
// The task is not final.
Data.Final.setInt(/*IntVal=*/false);
@@ -2976,14 +3048,15 @@ void CodeGenFunction::EmitOMPTargetTaskBasedDirective(
QualType BaseAndPointersType = getContext().getConstantArrayType(
getContext().VoidPtrTy, ArrSize, ArrayType::Normal,
/*IndexTypeQuals=*/0);
- BPVD = createImplicitFirstprivateForType(getContext(), Data,
- BaseAndPointersType, CD);
- PVD = createImplicitFirstprivateForType(getContext(), Data,
- BaseAndPointersType, CD);
+ BPVD = createImplicitFirstprivateForType(
+ getContext(), Data, BaseAndPointersType, CD, S.getLocStart());
+ PVD = createImplicitFirstprivateForType(
+ getContext(), Data, BaseAndPointersType, CD, S.getLocStart());
QualType SizesType = getContext().getConstantArrayType(
getContext().getSizeType(), ArrSize, ArrayType::Normal,
/*IndexTypeQuals=*/0);
- SVD = createImplicitFirstprivateForType(getContext(), Data, SizesType, CD);
+ SVD = createImplicitFirstprivateForType(getContext(), Data, SizesType, CD,
+ S.getLocStart());
TargetScope.addPrivate(
BPVD, [&InputInfo]() { return InputInfo.BasePointersArray; });
TargetScope.addPrivate(PVD,
@@ -2994,33 +3067,33 @@ void CodeGenFunction::EmitOMPTargetTaskBasedDirective(
(void)TargetScope.Privatize();
// Build list of dependences.
for (const auto *C : S.getClausesOfKind<OMPDependClause>())
- for (auto *IRef : C->varlists())
- Data.Dependences.push_back(std::make_pair(C->getDependencyKind(), IRef));
+ for (const Expr *IRef : C->varlists())
+ Data.Dependences.emplace_back(C->getDependencyKind(), IRef);
auto &&CodeGen = [&Data, &S, CS, &BodyGen, BPVD, PVD, SVD,
&InputInfo](CodeGenFunction &CGF, PrePostActionTy &Action) {
// Set proper addresses for generated private copies.
OMPPrivateScope Scope(CGF);
if (!Data.FirstprivateVars.empty()) {
enum { PrivatesParam = 2, CopyFnParam = 3 };
- auto *CopyFn = CGF.Builder.CreateLoad(
- CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(3)));
- auto *PrivatesPtr = CGF.Builder.CreateLoad(
- CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(2)));
+ llvm::Value *CopyFn = CGF.Builder.CreateLoad(
+ CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(CopyFnParam)));
+ llvm::Value *PrivatesPtr = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(
+ CS->getCapturedDecl()->getParam(PrivatesParam)));
// Map privates.
llvm::SmallVector<std::pair<const VarDecl *, Address>, 16> PrivatePtrs;
llvm::SmallVector<llvm::Value *, 16> CallArgs;
CallArgs.push_back(PrivatesPtr);
- for (auto *E : Data.FirstprivateVars) {
- auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
+ for (const Expr *E : Data.FirstprivateVars) {
+ const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
Address PrivatePtr =
CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()),
".firstpriv.ptr.addr");
- PrivatePtrs.push_back(std::make_pair(VD, PrivatePtr));
+ PrivatePtrs.emplace_back(VD, PrivatePtr);
CallArgs.push_back(PrivatePtr.getPointer());
}
CGF.CGM.getOpenMPRuntime().emitOutlinedFunctionCall(CGF, S.getLocStart(),
CopyFn, CallArgs);
- for (auto &&Pair : PrivatePtrs) {
+ for (const auto &Pair : PrivatePtrs) {
Address Replacement(CGF.Builder.CreateLoad(Pair.second),
CGF.getContext().getDeclAlign(Pair.first));
Scope.addPrivate(Pair.first, [Replacement]() { return Replacement; });
@@ -3028,19 +3101,20 @@ void CodeGenFunction::EmitOMPTargetTaskBasedDirective(
}
// Privatize all private variables except for in_reduction items.
(void)Scope.Privatize();
- InputInfo.BasePointersArray = CGF.Builder.CreateConstArrayGEP(
- CGF.GetAddrOfLocalVar(BPVD), /*Index=*/0, CGF.getPointerSize());
- InputInfo.PointersArray = CGF.Builder.CreateConstArrayGEP(
- CGF.GetAddrOfLocalVar(PVD), /*Index=*/0, CGF.getPointerSize());
- InputInfo.SizesArray = CGF.Builder.CreateConstArrayGEP(
- CGF.GetAddrOfLocalVar(SVD), /*Index=*/0, CGF.getSizeSize());
+ if (InputInfo.NumberOfTargetItems > 0) {
+ InputInfo.BasePointersArray = CGF.Builder.CreateConstArrayGEP(
+ CGF.GetAddrOfLocalVar(BPVD), /*Index=*/0, CGF.getPointerSize());
+ InputInfo.PointersArray = CGF.Builder.CreateConstArrayGEP(
+ CGF.GetAddrOfLocalVar(PVD), /*Index=*/0, CGF.getPointerSize());
+ InputInfo.SizesArray = CGF.Builder.CreateConstArrayGEP(
+ CGF.GetAddrOfLocalVar(SVD), /*Index=*/0, CGF.getSizeSize());
+ }
Action.Enter(CGF);
- OMPLexicalScope LexScope(CGF, S, /*AsInlined=*/true,
- /*EmitPreInitStmt=*/false);
+ OMPLexicalScope LexScope(CGF, S, OMPD_task, /*EmitPreInitStmt=*/false);
BodyGen(CGF);
};
- auto *OutlinedFn = CGM.getOpenMPRuntime().emitTaskOutlinedFunction(
+ llvm::Value *OutlinedFn = CGM.getOpenMPRuntime().emitTaskOutlinedFunction(
S, *I, *PartId, *TaskT, S.getDirectiveKind(), CodeGen, /*Tied=*/true,
Data.NumberOfParts);
llvm::APInt TrueOrFalse(32, S.hasClausesOfKind<OMPNowaitClause>() ? 1 : 0);
@@ -3054,9 +3128,9 @@ void CodeGenFunction::EmitOMPTargetTaskBasedDirective(
void CodeGenFunction::EmitOMPTaskDirective(const OMPTaskDirective &S) {
// Emit outlined function for task construct.
- auto CS = cast<CapturedStmt>(S.getAssociatedStmt());
- auto CapturedStruct = GenerateCapturedStmtArgument(*CS);
- auto SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl());
+ const CapturedStmt *CS = S.getCapturedStmt(OMPD_task);
+ Address CapturedStruct = GenerateCapturedStmtArgument(*CS);
+ QualType SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl());
const Expr *IfCond = nullptr;
for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
if (C->getNameModifier() == OMPD_unknown ||
@@ -3079,7 +3153,7 @@ void CodeGenFunction::EmitOMPTaskDirective(const OMPTaskDirective &S) {
SharedsTy, CapturedStruct, IfCond,
Data);
};
- EmitOMPTaskBasedDirective(S, BodyGen, TaskGen, Data);
+ EmitOMPTaskBasedDirective(S, OMPD_task, BodyGen, TaskGen, Data);
}
void CodeGenFunction::EmitOMPTaskyieldDirective(
@@ -3108,7 +3182,7 @@ void CodeGenFunction::EmitOMPTaskgroupDirective(
auto IRed = C->reduction_ops().begin();
auto ILHS = C->lhs_exprs().begin();
auto IRHS = C->rhs_exprs().begin();
- for (const auto *Ref : C->varlists()) {
+ for (const Expr *Ref : C->varlists()) {
Data.ReductionVars.emplace_back(Ref);
Data.ReductionCopies.emplace_back(*IPriv);
Data.ReductionOps.emplace_back(*IRed);
@@ -3128,40 +3202,42 @@ void CodeGenFunction::EmitOMPTaskgroupDirective(
CGF.EmitStoreOfScalar(ReductionDesc, CGF.GetAddrOfLocalVar(VD),
/*Volatile=*/false, E->getType());
}
- CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
+ CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt());
};
- OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
+ OMPLexicalScope Scope(*this, S, OMPD_unknown);
CGM.getOpenMPRuntime().emitTaskgroupRegion(*this, CodeGen, S.getLocStart());
}
void CodeGenFunction::EmitOMPFlushDirective(const OMPFlushDirective &S) {
- CGM.getOpenMPRuntime().emitFlush(*this, [&]() -> ArrayRef<const Expr *> {
- if (const auto *FlushClause = S.getSingleClause<OMPFlushClause>()) {
- return llvm::makeArrayRef(FlushClause->varlist_begin(),
- FlushClause->varlist_end());
- }
- return llvm::None;
- }(), S.getLocStart());
+ CGM.getOpenMPRuntime().emitFlush(
+ *this,
+ [&S]() -> ArrayRef<const Expr *> {
+ if (const auto *FlushClause = S.getSingleClause<OMPFlushClause>())
+ return llvm::makeArrayRef(FlushClause->varlist_begin(),
+ FlushClause->varlist_end());
+ return llvm::None;
+ }(),
+ S.getLocStart());
}
void CodeGenFunction::EmitOMPDistributeLoop(const OMPLoopDirective &S,
const CodeGenLoopTy &CodeGenLoop,
Expr *IncExpr) {
// Emit the loop iteration variable.
- auto IVExpr = cast<DeclRefExpr>(S.getIterationVariable());
- auto IVDecl = cast<VarDecl>(IVExpr->getDecl());
+ const auto *IVExpr = cast<DeclRefExpr>(S.getIterationVariable());
+ const auto *IVDecl = cast<VarDecl>(IVExpr->getDecl());
EmitVarDecl(*IVDecl);
// Emit the iterations count variable.
// If it is not a variable, Sema decided to calculate iterations count on each
// iteration (e.g., it is foldable into a constant).
- if (auto LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
+ if (const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl()));
// Emit calculation of the iterations count.
EmitIgnoredExpr(S.getCalcLastIteration());
}
- auto &RT = CGM.getOpenMPRuntime();
+ CGOpenMPRuntime &RT = CGM.getOpenMPRuntime();
bool HasLastprivateClause = false;
// Check pre-condition.
@@ -3176,7 +3252,7 @@ void CodeGenFunction::EmitOMPDistributeLoop(const OMPLoopDirective &S,
if (!CondConstant)
return;
} else {
- auto *ThenBlock = createBasicBlock("omp.precond.then");
+ llvm::BasicBlock *ThenBlock = createBasicBlock("omp.precond.then");
ContBlock = createBasicBlock("omp.precond.end");
emitPreCond(*this, S, S.getPreCond(), ThenBlock, ContBlock,
getProfileCount(&S));
@@ -3225,9 +3301,9 @@ void CodeGenFunction::EmitOMPDistributeLoop(const OMPLoopDirective &S,
// Detect the distribute schedule kind and chunk.
llvm::Value *Chunk = nullptr;
OpenMPDistScheduleClauseKind ScheduleKind = OMPC_DIST_SCHEDULE_unknown;
- if (auto *C = S.getSingleClause<OMPDistScheduleClause>()) {
+ if (const auto *C = S.getSingleClause<OMPDistScheduleClause>()) {
ScheduleKind = C->getDistScheduleKind();
- if (const auto *Ch = C->getChunkSize()) {
+ if (const Expr *Ch = C->getChunkSize()) {
Chunk = EmitScalarExpr(Ch);
Chunk = EmitScalarConversion(Chunk, Ch->getType(),
S.getIterationVariable()->getType(),
@@ -3254,7 +3330,7 @@ void CodeGenFunction::EmitOMPDistributeLoop(const OMPLoopDirective &S,
LB.getAddress(), UB.getAddress(), ST.getAddress());
RT.emitDistributeStaticInit(*this, S.getLocStart(), ScheduleKind,
StaticInit);
- auto LoopExit =
+ JumpDest LoopExit =
getJumpDestInCurrentScope(createBasicBlock("omp.loop.exit"));
// UB = min(UB, GlobalUB);
EmitIgnoredExpr(isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
@@ -3265,9 +3341,10 @@ void CodeGenFunction::EmitOMPDistributeLoop(const OMPLoopDirective &S,
? S.getCombinedInit()
: S.getInit());
- Expr *Cond = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
- ? S.getCombinedCond()
- : S.getCond();
+ const Expr *Cond =
+ isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
+ ? S.getCombinedCond()
+ : S.getCond();
// for distribute alone, codegen
// while (idx <= UB) { BODY; ++idx; }
@@ -3291,31 +3368,35 @@ void CodeGenFunction::EmitOMPDistributeLoop(const OMPLoopDirective &S,
CodeGenLoop);
}
if (isOpenMPSimdDirective(S.getDirectiveKind())) {
- EmitOMPSimdFinal(S, [&](CodeGenFunction &CGF) -> llvm::Value * {
+ EmitOMPSimdFinal(S, [IL, &S](CodeGenFunction &CGF) {
return CGF.Builder.CreateIsNotNull(
CGF.EmitLoadOfScalar(IL, S.getLocStart()));
});
}
- OpenMPDirectiveKind ReductionKind = OMPD_unknown;
- if (isOpenMPParallelDirective(S.getDirectiveKind()) &&
- isOpenMPSimdDirective(S.getDirectiveKind())) {
- ReductionKind = OMPD_parallel_for_simd;
- } else if (isOpenMPParallelDirective(S.getDirectiveKind())) {
- ReductionKind = OMPD_parallel_for;
- } else if (isOpenMPSimdDirective(S.getDirectiveKind())) {
- ReductionKind = OMPD_simd;
- } else if (!isOpenMPTeamsDirective(S.getDirectiveKind()) &&
- S.hasClausesOfKind<OMPReductionClause>()) {
- llvm_unreachable(
- "No reduction clauses is allowed in distribute directive.");
+ if (isOpenMPSimdDirective(S.getDirectiveKind()) &&
+ !isOpenMPParallelDirective(S.getDirectiveKind()) &&
+ !isOpenMPTeamsDirective(S.getDirectiveKind())) {
+ OpenMPDirectiveKind ReductionKind = OMPD_unknown;
+ if (isOpenMPParallelDirective(S.getDirectiveKind()) &&
+ isOpenMPSimdDirective(S.getDirectiveKind())) {
+ ReductionKind = OMPD_parallel_for_simd;
+ } else if (isOpenMPParallelDirective(S.getDirectiveKind())) {
+ ReductionKind = OMPD_parallel_for;
+ } else if (isOpenMPSimdDirective(S.getDirectiveKind())) {
+ ReductionKind = OMPD_simd;
+ } else if (!isOpenMPTeamsDirective(S.getDirectiveKind()) &&
+ S.hasClausesOfKind<OMPReductionClause>()) {
+ llvm_unreachable(
+ "No reduction clauses is allowed in distribute directive.");
+ }
+ EmitOMPReductionClauseFinal(S, ReductionKind);
+ // Emit post-update of the reduction variables if IsLastIter != 0.
+ emitPostUpdateForReductionClause(
+ *this, S, [IL, &S](CodeGenFunction &CGF) {
+ return CGF.Builder.CreateIsNotNull(
+ CGF.EmitLoadOfScalar(IL, S.getLocStart()));
+ });
}
- EmitOMPReductionClauseFinal(S, ReductionKind);
- // Emit post-update of the reduction variables if IsLastIter != 0.
- emitPostUpdateForReductionClause(
- *this, S, [&](CodeGenFunction &CGF) -> llvm::Value * {
- return CGF.Builder.CreateIsNotNull(
- CGF.EmitLoadOfScalar(IL, S.getLocStart()));
- });
// Emit final copy of the lastprivate variables if IsLastIter != 0.
if (HasLastprivateClause) {
EmitOMPLastprivateClauseFinal(
@@ -3335,10 +3416,9 @@ void CodeGenFunction::EmitOMPDistributeLoop(const OMPLoopDirective &S,
void CodeGenFunction::EmitOMPDistributeDirective(
const OMPDistributeDirective &S) {
auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
-
CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc());
};
- OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
+ OMPLexicalScope Scope(*this, S, OMPD_unknown);
CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_distribute, CodeGen);
}
@@ -3347,34 +3427,35 @@ static llvm::Function *emitOutlinedOrderedFunction(CodeGenModule &CGM,
CodeGenFunction CGF(CGM, /*suppressNewContext=*/true);
CodeGenFunction::CGCapturedStmtInfo CapStmtInfo;
CGF.CapturedStmtInfo = &CapStmtInfo;
- auto *Fn = CGF.GenerateOpenMPCapturedStmtFunction(*S);
- Fn->addFnAttr(llvm::Attribute::NoInline);
+ llvm::Function *Fn = CGF.GenerateOpenMPCapturedStmtFunction(*S);
+ Fn->setDoesNotRecurse();
return Fn;
}
void CodeGenFunction::EmitOMPOrderedDirective(const OMPOrderedDirective &S) {
- if (!S.getAssociatedStmt()) {
+ if (S.hasClausesOfKind<OMPDependClause>()) {
+ assert(!S.getAssociatedStmt() &&
+ "No associated statement must be in ordered depend construct.");
for (const auto *DC : S.getClausesOfKind<OMPDependClause>())
CGM.getOpenMPRuntime().emitDoacrossOrdered(*this, DC);
return;
}
- auto *C = S.getSingleClause<OMPSIMDClause>();
+ const auto *C = S.getSingleClause<OMPSIMDClause>();
auto &&CodeGen = [&S, C, this](CodeGenFunction &CGF,
PrePostActionTy &Action) {
+ const CapturedStmt *CS = S.getInnermostCapturedStmt();
if (C) {
- auto CS = cast<CapturedStmt>(S.getAssociatedStmt());
llvm::SmallVector<llvm::Value *, 16> CapturedVars;
CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars);
- auto *OutlinedFn = emitOutlinedOrderedFunction(CGM, CS);
+ llvm::Function *OutlinedFn = emitOutlinedOrderedFunction(CGM, CS);
CGM.getOpenMPRuntime().emitOutlinedFunctionCall(CGF, S.getLocStart(),
OutlinedFn, CapturedVars);
} else {
Action.Enter(CGF);
- CGF.EmitStmt(
- cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
+ CGF.EmitStmt(CS->getCapturedStmt());
}
};
- OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
+ OMPLexicalScope Scope(*this, S, OMPD_unknown);
CGM.getOpenMPRuntime().emitOrderedRegion(*this, CodeGen, S.getLocStart(), !C);
}
@@ -3384,11 +3465,10 @@ static llvm::Value *convertToScalarValue(CodeGenFunction &CGF, RValue Val,
assert(CGF.hasScalarEvaluationKind(DestType) &&
"DestType must have scalar evaluation kind.");
assert(!Val.isAggregate() && "Must be a scalar or complex.");
- return Val.isScalar()
- ? CGF.EmitScalarConversion(Val.getScalarVal(), SrcType, DestType,
- Loc)
- : CGF.EmitComplexToScalarConversion(Val.getComplexVal(), SrcType,
- DestType, Loc);
+ return Val.isScalar() ? CGF.EmitScalarConversion(Val.getScalarVal(), SrcType,
+ DestType, Loc)
+ : CGF.EmitComplexToScalarConversion(
+ Val.getComplexVal(), SrcType, DestType, Loc);
}
static CodeGenFunction::ComplexPairTy
@@ -3399,15 +3479,17 @@ convertToComplexValue(CodeGenFunction &CGF, RValue Val, QualType SrcType,
CodeGenFunction::ComplexPairTy ComplexVal;
if (Val.isScalar()) {
// Convert the input element to the element type of the complex.
- auto DestElementType = DestType->castAs<ComplexType>()->getElementType();
- auto ScalarVal = CGF.EmitScalarConversion(Val.getScalarVal(), SrcType,
- DestElementType, Loc);
+ QualType DestElementType =
+ DestType->castAs<ComplexType>()->getElementType();
+ llvm::Value *ScalarVal = CGF.EmitScalarConversion(
+ Val.getScalarVal(), SrcType, DestElementType, Loc);
ComplexVal = CodeGenFunction::ComplexPairTy(
ScalarVal, llvm::Constant::getNullValue(ScalarVal->getType()));
} else {
assert(Val.isComplex() && "Must be a scalar or complex.");
- auto SrcElementType = SrcType->castAs<ComplexType>()->getElementType();
- auto DestElementType = DestType->castAs<ComplexType>()->getElementType();
+ QualType SrcElementType = SrcType->castAs<ComplexType>()->getElementType();
+ QualType DestElementType =
+ DestType->castAs<ComplexType>()->getElementType();
ComplexVal.first = CGF.EmitScalarConversion(
Val.getComplexVal().first, SrcElementType, DestElementType, Loc);
ComplexVal.second = CGF.EmitScalarConversion(
@@ -3446,7 +3528,7 @@ void CodeGenFunction::emitOMPSimpleStore(LValue LVal, RValue RVal,
}
}
-static void EmitOMPAtomicReadExpr(CodeGenFunction &CGF, bool IsSeqCst,
+static void emitOMPAtomicReadExpr(CodeGenFunction &CGF, bool IsSeqCst,
const Expr *X, const Expr *V,
SourceLocation Loc) {
// v = x;
@@ -3470,7 +3552,7 @@ static void EmitOMPAtomicReadExpr(CodeGenFunction &CGF, bool IsSeqCst,
CGF.emitOMPSimpleStore(VLValue, Res, X->getType().getNonReferenceType(), Loc);
}
-static void EmitOMPAtomicWriteExpr(CodeGenFunction &CGF, bool IsSeqCst,
+static void emitOMPAtomicWriteExpr(CodeGenFunction &CGF, bool IsSeqCst,
const Expr *X, const Expr *E,
SourceLocation Loc) {
// x = expr;
@@ -3489,7 +3571,7 @@ static std::pair<bool, RValue> emitOMPAtomicRMW(CodeGenFunction &CGF, LValue X,
BinaryOperatorKind BO,
llvm::AtomicOrdering AO,
bool IsXLHSInRHSPart) {
- auto &Context = CGF.CGM.getContext();
+ ASTContext &Context = CGF.getContext();
// Allow atomicrmw only if 'x' and 'update' are integer values, lvalue for 'x'
// expression is simple and atomic is allowed for the given type for the
// target platform.
@@ -3567,20 +3649,21 @@ static std::pair<bool, RValue> emitOMPAtomicRMW(CodeGenFunction &CGF, LValue X,
case BO_Comma:
llvm_unreachable("Unsupported atomic update operation");
}
- auto *UpdateVal = Update.getScalarVal();
+ llvm::Value *UpdateVal = Update.getScalarVal();
if (auto *IC = dyn_cast<llvm::ConstantInt>(UpdateVal)) {
UpdateVal = CGF.Builder.CreateIntCast(
IC, X.getAddress().getElementType(),
X.getType()->hasSignedIntegerRepresentation());
}
- auto *Res = CGF.Builder.CreateAtomicRMW(RMWOp, X.getPointer(), UpdateVal, AO);
+ llvm::Value *Res =
+ CGF.Builder.CreateAtomicRMW(RMWOp, X.getPointer(), UpdateVal, AO);
return std::make_pair(true, RValue::get(Res));
}
std::pair<bool, RValue> CodeGenFunction::EmitOMPAtomicSimpleUpdateExpr(
LValue X, RValue E, BinaryOperatorKind BO, bool IsXLHSInRHSPart,
llvm::AtomicOrdering AO, SourceLocation Loc,
- const llvm::function_ref<RValue(RValue)> &CommonGen) {
+ const llvm::function_ref<RValue(RValue)> CommonGen) {
// Update expressions are allowed to have the following forms:
// x binop= expr; -> xrval + expr;
// x++, ++x -> xrval + 1;
@@ -3601,13 +3684,13 @@ std::pair<bool, RValue> CodeGenFunction::EmitOMPAtomicSimpleUpdateExpr(
return Res;
}
-static void EmitOMPAtomicUpdateExpr(CodeGenFunction &CGF, bool IsSeqCst,
+static void emitOMPAtomicUpdateExpr(CodeGenFunction &CGF, bool IsSeqCst,
const Expr *X, const Expr *E,
const Expr *UE, bool IsXLHSInRHSPart,
SourceLocation Loc) {
assert(isa<BinaryOperator>(UE->IgnoreImpCasts()) &&
"Update expr in 'atomic update' must be a binary operator.");
- auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts());
+ const auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts());
// Update expressions are allowed to have the following forms:
// x binop= expr; -> xrval + expr;
// x++, ++x -> xrval + 1;
@@ -3617,18 +3700,18 @@ static void EmitOMPAtomicUpdateExpr(CodeGenFunction &CGF, bool IsSeqCst,
assert(X->isLValue() && "X of 'omp atomic update' is not lvalue");
LValue XLValue = CGF.EmitLValue(X);
RValue ExprRValue = CGF.EmitAnyExpr(E);
- auto AO = IsSeqCst ? llvm::AtomicOrdering::SequentiallyConsistent
- : llvm::AtomicOrdering::Monotonic;
- auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts());
- auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts());
- auto *XRValExpr = IsXLHSInRHSPart ? LHS : RHS;
- auto *ERValExpr = IsXLHSInRHSPart ? RHS : LHS;
- auto Gen =
- [&CGF, UE, ExprRValue, XRValExpr, ERValExpr](RValue XRValue) -> RValue {
- CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue);
- CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, XRValue);
- return CGF.EmitAnyExpr(UE);
- };
+ llvm::AtomicOrdering AO = IsSeqCst
+ ? llvm::AtomicOrdering::SequentiallyConsistent
+ : llvm::AtomicOrdering::Monotonic;
+ const auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts());
+ const auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts());
+ const OpaqueValueExpr *XRValExpr = IsXLHSInRHSPart ? LHS : RHS;
+ const OpaqueValueExpr *ERValExpr = IsXLHSInRHSPart ? RHS : LHS;
+ auto &&Gen = [&CGF, UE, ExprRValue, XRValExpr, ERValExpr](RValue XRValue) {
+ CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue);
+ CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, XRValue);
+ return CGF.EmitAnyExpr(UE);
+ };
(void)CGF.EmitOMPAtomicSimpleUpdateExpr(
XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO, Loc, Gen);
// OpenMP, 2.12.6, atomic Construct
@@ -3656,7 +3739,7 @@ static RValue convertToType(CodeGenFunction &CGF, RValue Value,
llvm_unreachable("Must be a scalar or complex.");
}
-static void EmitOMPAtomicCaptureExpr(CodeGenFunction &CGF, bool IsSeqCst,
+static void emitOMPAtomicCaptureExpr(CodeGenFunction &CGF, bool IsSeqCst,
bool IsPostfixUpdate, const Expr *V,
const Expr *X, const Expr *E,
const Expr *UE, bool IsXLHSInRHSPart,
@@ -3667,27 +3750,28 @@ static void EmitOMPAtomicCaptureExpr(CodeGenFunction &CGF, bool IsSeqCst,
LValue VLValue = CGF.EmitLValue(V);
LValue XLValue = CGF.EmitLValue(X);
RValue ExprRValue = CGF.EmitAnyExpr(E);
- auto AO = IsSeqCst ? llvm::AtomicOrdering::SequentiallyConsistent
- : llvm::AtomicOrdering::Monotonic;
+ llvm::AtomicOrdering AO = IsSeqCst
+ ? llvm::AtomicOrdering::SequentiallyConsistent
+ : llvm::AtomicOrdering::Monotonic;
QualType NewVValType;
if (UE) {
// 'x' is updated with some additional value.
assert(isa<BinaryOperator>(UE->IgnoreImpCasts()) &&
"Update expr in 'atomic capture' must be a binary operator.");
- auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts());
+ const auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts());
// Update expressions are allowed to have the following forms:
// x binop= expr; -> xrval + expr;
// x++, ++x -> xrval + 1;
// x--, --x -> xrval - 1;
// x = x binop expr; -> xrval binop expr
// x = expr Op x; - > expr binop xrval;
- auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts());
- auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts());
- auto *XRValExpr = IsXLHSInRHSPart ? LHS : RHS;
+ const auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts());
+ const auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts());
+ const OpaqueValueExpr *XRValExpr = IsXLHSInRHSPart ? LHS : RHS;
NewVValType = XRValExpr->getType();
- auto *ERValExpr = IsXLHSInRHSPart ? RHS : LHS;
+ const OpaqueValueExpr *ERValExpr = IsXLHSInRHSPart ? RHS : LHS;
auto &&Gen = [&CGF, &NewVVal, UE, ExprRValue, XRValExpr, ERValExpr,
- IsPostfixUpdate](RValue XRValue) -> RValue {
+ IsPostfixUpdate](RValue XRValue) {
CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue);
CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, XRValue);
RValue Res = CGF.EmitAnyExpr(UE);
@@ -3714,7 +3798,7 @@ static void EmitOMPAtomicCaptureExpr(CodeGenFunction &CGF, bool IsSeqCst,
NewVValType = X->getType().getNonReferenceType();
ExprRValue = convertToType(CGF, ExprRValue, E->getType(),
X->getType().getNonReferenceType(), Loc);
- auto &&Gen = [&NewVVal, ExprRValue](RValue XRValue) -> RValue {
+ auto &&Gen = [&NewVVal, ExprRValue](RValue XRValue) {
NewVVal = XRValue;
return ExprRValue;
};
@@ -3737,24 +3821,24 @@ static void EmitOMPAtomicCaptureExpr(CodeGenFunction &CGF, bool IsSeqCst,
CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc);
}
-static void EmitOMPAtomicExpr(CodeGenFunction &CGF, OpenMPClauseKind Kind,
+static void emitOMPAtomicExpr(CodeGenFunction &CGF, OpenMPClauseKind Kind,
bool IsSeqCst, bool IsPostfixUpdate,
const Expr *X, const Expr *V, const Expr *E,
const Expr *UE, bool IsXLHSInRHSPart,
SourceLocation Loc) {
switch (Kind) {
case OMPC_read:
- EmitOMPAtomicReadExpr(CGF, IsSeqCst, X, V, Loc);
+ emitOMPAtomicReadExpr(CGF, IsSeqCst, X, V, Loc);
break;
case OMPC_write:
- EmitOMPAtomicWriteExpr(CGF, IsSeqCst, X, E, Loc);
+ emitOMPAtomicWriteExpr(CGF, IsSeqCst, X, E, Loc);
break;
case OMPC_unknown:
case OMPC_update:
- EmitOMPAtomicUpdateExpr(CGF, IsSeqCst, X, E, UE, IsXLHSInRHSPart, Loc);
+ emitOMPAtomicUpdateExpr(CGF, IsSeqCst, X, E, UE, IsXLHSInRHSPart, Loc);
break;
case OMPC_capture:
- EmitOMPAtomicCaptureExpr(CGF, IsSeqCst, IsPostfixUpdate, V, X, E, UE,
+ emitOMPAtomicCaptureExpr(CGF, IsSeqCst, IsPostfixUpdate, V, X, E, UE,
IsXLHSInRHSPart, Loc);
break;
case OMPC_if:
@@ -3810,7 +3894,7 @@ static void EmitOMPAtomicExpr(CodeGenFunction &CGF, OpenMPClauseKind Kind,
void CodeGenFunction::EmitOMPAtomicDirective(const OMPAtomicDirective &S) {
bool IsSeqCst = S.getSingleClause<OMPSeqCstClause>();
OpenMPClauseKind Kind = OMPC_unknown;
- for (auto *C : S.clauses()) {
+ for (const OMPClause *C : S.clauses()) {
// Find first clause (skip seq_cst clause, if it is first).
if (C->getClauseKind() != OMPC_seq_cst) {
Kind = C->getClauseKind();
@@ -3818,28 +3902,25 @@ void CodeGenFunction::EmitOMPAtomicDirective(const OMPAtomicDirective &S) {
}
}
- const auto *CS =
- S.getAssociatedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true);
- if (const auto *EWC = dyn_cast<ExprWithCleanups>(CS)) {
+ const Stmt *CS = S.getInnermostCapturedStmt()->IgnoreContainers();
+ if (const auto *EWC = dyn_cast<ExprWithCleanups>(CS))
enterFullExpression(EWC);
- }
// Processing for statements under 'atomic capture'.
if (const auto *Compound = dyn_cast<CompoundStmt>(CS)) {
- for (const auto *C : Compound->body()) {
- if (const auto *EWC = dyn_cast<ExprWithCleanups>(C)) {
+ for (const Stmt *C : Compound->body()) {
+ if (const auto *EWC = dyn_cast<ExprWithCleanups>(C))
enterFullExpression(EWC);
- }
}
}
auto &&CodeGen = [&S, Kind, IsSeqCst, CS](CodeGenFunction &CGF,
PrePostActionTy &) {
CGF.EmitStopPoint(CS);
- EmitOMPAtomicExpr(CGF, Kind, IsSeqCst, S.isPostfixUpdate(), S.getX(),
+ emitOMPAtomicExpr(CGF, Kind, IsSeqCst, S.isPostfixUpdate(), S.getX(),
S.getV(), S.getExpr(), S.getUpdateExpr(),
S.isXLHSInRHSPart(), S.getLocStart());
};
- OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
+ OMPLexicalScope Scope(*this, S, OMPD_unknown);
CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_atomic, CodeGen);
}
@@ -3848,7 +3929,16 @@ static void emitCommonOMPTargetDirective(CodeGenFunction &CGF,
const RegionCodeGenTy &CodeGen) {
assert(isOpenMPTargetExecutionDirective(S.getDirectiveKind()));
CodeGenModule &CGM = CGF.CGM;
- const CapturedStmt &CS = *S.getCapturedStmt(OMPD_target);
+
+ // On device emit this construct as inlined code.
+ if (CGM.getLangOpts().OpenMPIsDevice) {
+ OMPLexicalScope Scope(CGF, S, OMPD_target);
+ CGM.getOpenMPRuntime().emitInlinedDirective(
+ CGF, OMPD_target, [&S](CodeGenFunction &CGF, PrePostActionTy &) {
+ CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt());
+ });
+ return;
+ }
llvm::Function *Fn = nullptr;
llvm::Constant *FnID = nullptr;
@@ -3865,9 +3955,8 @@ static void emitCommonOMPTargetDirective(CodeGenFunction &CGF,
// Check if we have any device clause associated with the directive.
const Expr *Device = nullptr;
- if (auto *C = S.getSingleClause<OMPDeviceClause>()) {
+ if (auto *C = S.getSingleClause<OMPDeviceClause>())
Device = C->getDevice();
- }
// Check if we have an if clause whose conditional always evaluates to false
// or if we do not have any targets specified. If so the target region is not
@@ -3885,9 +3974,9 @@ static void emitCommonOMPTargetDirective(CodeGenFunction &CGF,
StringRef ParentName;
// In case we have Ctors/Dtors we use the complete type variant to produce
// the mangling of the device outlined kernel.
- if (auto *D = dyn_cast<CXXConstructorDecl>(CGF.CurFuncDecl))
+ if (const auto *D = dyn_cast<CXXConstructorDecl>(CGF.CurFuncDecl))
ParentName = CGM.getMangledName(GlobalDecl(D, Ctor_Complete));
- else if (auto *D = dyn_cast<CXXDestructorDecl>(CGF.CurFuncDecl))
+ else if (const auto *D = dyn_cast<CXXDestructorDecl>(CGF.CurFuncDecl))
ParentName = CGM.getMangledName(GlobalDecl(D, Dtor_Complete));
else
ParentName =
@@ -3896,22 +3985,19 @@ static void emitCommonOMPTargetDirective(CodeGenFunction &CGF,
// Emit target region as a standalone region.
CGM.getOpenMPRuntime().emitTargetOutlinedFunction(S, ParentName, Fn, FnID,
IsOffloadEntry, CodeGen);
- OMPLexicalScope Scope(CGF, S);
- llvm::SmallVector<llvm::Value *, 16> CapturedVars;
- CGF.GenerateOpenMPCapturedVars(CS, CapturedVars);
- CGM.getOpenMPRuntime().emitTargetCall(CGF, S, Fn, FnID, IfCond, Device,
- CapturedVars);
+ OMPLexicalScope Scope(CGF, S, OMPD_task);
+ CGM.getOpenMPRuntime().emitTargetCall(CGF, S, Fn, FnID, IfCond, Device);
}
static void emitTargetRegion(CodeGenFunction &CGF, const OMPTargetDirective &S,
PrePostActionTy &Action) {
+ Action.Enter(CGF);
CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
(void)CGF.EmitOMPFirstprivateClause(S, PrivateScope);
CGF.EmitOMPPrivateClause(S, PrivateScope);
(void)PrivateScope.Privatize();
- Action.Enter(CGF);
- CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
+ CGF.EmitStmt(S.getCapturedStmt(OMPD_target)->getCapturedStmt());
}
void CodeGenFunction::EmitOMPTargetDeviceFunction(CodeGenModule &CGM,
@@ -3940,14 +4026,15 @@ static void emitCommonOMPTeamsDirective(CodeGenFunction &CGF,
OpenMPDirectiveKind InnermostKind,
const RegionCodeGenTy &CodeGen) {
const CapturedStmt *CS = S.getCapturedStmt(OMPD_teams);
- auto OutlinedFn = CGF.CGM.getOpenMPRuntime().emitTeamsOutlinedFunction(
- S, *CS->getCapturedDecl()->param_begin(), InnermostKind, CodeGen);
+ llvm::Value *OutlinedFn =
+ CGF.CGM.getOpenMPRuntime().emitTeamsOutlinedFunction(
+ S, *CS->getCapturedDecl()->param_begin(), InnermostKind, CodeGen);
- const OMPNumTeamsClause *NT = S.getSingleClause<OMPNumTeamsClause>();
- const OMPThreadLimitClause *TL = S.getSingleClause<OMPThreadLimitClause>();
+ const auto *NT = S.getSingleClause<OMPNumTeamsClause>();
+ const auto *TL = S.getSingleClause<OMPThreadLimitClause>();
if (NT || TL) {
- Expr *NumTeams = (NT) ? NT->getNumTeams() : nullptr;
- Expr *ThreadLimit = (TL) ? TL->getThreadLimit() : nullptr;
+ const Expr *NumTeams = NT ? NT->getNumTeams() : nullptr;
+ const Expr *ThreadLimit = TL ? TL->getThreadLimit() : nullptr;
CGF.CGM.getOpenMPRuntime().emitNumTeamsClause(CGF, NumTeams, ThreadLimit,
S.getLocStart());
@@ -3962,18 +4049,19 @@ static void emitCommonOMPTeamsDirective(CodeGenFunction &CGF,
void CodeGenFunction::EmitOMPTeamsDirective(const OMPTeamsDirective &S) {
// Emit teams region as a standalone region.
- auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
+ auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
+ Action.Enter(CGF);
OMPPrivateScope PrivateScope(CGF);
(void)CGF.EmitOMPFirstprivateClause(S, PrivateScope);
CGF.EmitOMPPrivateClause(S, PrivateScope);
CGF.EmitOMPReductionClauseInit(S, PrivateScope);
(void)PrivateScope.Privatize();
- CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
+ CGF.EmitStmt(S.getCapturedStmt(OMPD_teams)->getCapturedStmt());
CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams);
};
emitCommonOMPTeamsDirective(*this, S, OMPD_distribute, CodeGen);
- emitPostUpdateForReductionClause(
- *this, S, [](CodeGenFunction &) -> llvm::Value * { return nullptr; });
+ emitPostUpdateForReductionClause(*this, S,
+ [](CodeGenFunction &) { return nullptr; });
}
static void emitTargetTeamsRegion(CodeGenFunction &CGF, PrePostActionTy &Action,
@@ -3982,18 +4070,18 @@ static void emitTargetTeamsRegion(CodeGenFunction &CGF, PrePostActionTy &Action,
Action.Enter(CGF);
// Emit teams region as a standalone region.
auto &&CodeGen = [&S, CS](CodeGenFunction &CGF, PrePostActionTy &Action) {
+ Action.Enter(CGF);
CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
(void)CGF.EmitOMPFirstprivateClause(S, PrivateScope);
CGF.EmitOMPPrivateClause(S, PrivateScope);
CGF.EmitOMPReductionClauseInit(S, PrivateScope);
(void)PrivateScope.Privatize();
- Action.Enter(CGF);
CGF.EmitStmt(CS->getCapturedStmt());
CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams);
};
emitCommonOMPTeamsDirective(CGF, S, OMPD_teams, CodeGen);
- emitPostUpdateForReductionClause(
- CGF, S, [](CodeGenFunction &) -> llvm::Value * { return nullptr; });
+ emitPostUpdateForReductionClause(CGF, S,
+ [](CodeGenFunction &) { return nullptr; });
}
void CodeGenFunction::EmitOMPTargetTeamsDeviceFunction(
@@ -4028,7 +4116,8 @@ emitTargetTeamsDistributeRegion(CodeGenFunction &CGF, PrePostActionTy &Action,
// Emit teams region as a standalone region.
auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF,
- PrePostActionTy &) {
+ PrePostActionTy &Action) {
+ Action.Enter(CGF);
CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
CGF.EmitOMPReductionClauseInit(S, PrivateScope);
(void)PrivateScope.Privatize();
@@ -4073,7 +4162,8 @@ static void emitTargetTeamsDistributeSimdRegion(
// Emit teams region as a standalone region.
auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF,
- PrePostActionTy &) {
+ PrePostActionTy &Action) {
+ Action.Enter(CGF);
CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
CGF.EmitOMPReductionClauseInit(S, PrivateScope);
(void)PrivateScope.Privatize();
@@ -4117,7 +4207,8 @@ void CodeGenFunction::EmitOMPTeamsDistributeDirective(
// Emit teams region as a standalone region.
auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF,
- PrePostActionTy &) {
+ PrePostActionTy &Action) {
+ Action.Enter(CGF);
OMPPrivateScope PrivateScope(CGF);
CGF.EmitOMPReductionClauseInit(S, PrivateScope);
(void)PrivateScope.Privatize();
@@ -4138,7 +4229,8 @@ void CodeGenFunction::EmitOMPTeamsDistributeSimdDirective(
// Emit teams region as a standalone region.
auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF,
- PrePostActionTy &) {
+ PrePostActionTy &Action) {
+ Action.Enter(CGF);
OMPPrivateScope PrivateScope(CGF);
CGF.EmitOMPReductionClauseInit(S, PrivateScope);
(void)PrivateScope.Privatize();
@@ -4160,7 +4252,8 @@ void CodeGenFunction::EmitOMPTeamsDistributeParallelForDirective(
// Emit teams region as a standalone region.
auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF,
- PrePostActionTy &) {
+ PrePostActionTy &Action) {
+ Action.Enter(CGF);
OMPPrivateScope PrivateScope(CGF);
CGF.EmitOMPReductionClauseInit(S, PrivateScope);
(void)PrivateScope.Privatize();
@@ -4182,7 +4275,8 @@ void CodeGenFunction::EmitOMPTeamsDistributeParallelForSimdDirective(
// Emit teams region as a standalone region.
auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF,
- PrePostActionTy &) {
+ PrePostActionTy &Action) {
+ Action.Enter(CGF);
OMPPrivateScope PrivateScope(CGF);
CGF.EmitOMPReductionClauseInit(S, PrivateScope);
(void)PrivateScope.Privatize();
@@ -4195,6 +4289,109 @@ void CodeGenFunction::EmitOMPTeamsDistributeParallelForSimdDirective(
[](CodeGenFunction &) { return nullptr; });
}
+static void emitTargetTeamsDistributeParallelForRegion(
+ CodeGenFunction &CGF, const OMPTargetTeamsDistributeParallelForDirective &S,
+ PrePostActionTy &Action) {
+ Action.Enter(CGF);
+ auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
+ CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined,
+ S.getDistInc());
+ };
+
+ // Emit teams region as a standalone region.
+ auto &&CodeGenTeams = [&S, &CodeGenDistribute](CodeGenFunction &CGF,
+ PrePostActionTy &Action) {
+ Action.Enter(CGF);
+ CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
+ CGF.EmitOMPReductionClauseInit(S, PrivateScope);
+ (void)PrivateScope.Privatize();
+ CGF.CGM.getOpenMPRuntime().emitInlinedDirective(
+ CGF, OMPD_distribute, CodeGenDistribute, /*HasCancel=*/false);
+ CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams);
+ };
+
+ emitCommonOMPTeamsDirective(CGF, S, OMPD_distribute_parallel_for,
+ CodeGenTeams);
+ emitPostUpdateForReductionClause(CGF, S,
+ [](CodeGenFunction &) { return nullptr; });
+}
+
+void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForDeviceFunction(
+ CodeGenModule &CGM, StringRef ParentName,
+ const OMPTargetTeamsDistributeParallelForDirective &S) {
+ // Emit SPMD target teams distribute parallel for region as a standalone
+ // region.
+ auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
+ emitTargetTeamsDistributeParallelForRegion(CGF, S, Action);
+ };
+ llvm::Function *Fn;
+ llvm::Constant *Addr;
+ // Emit target region as a standalone region.
+ CGM.getOpenMPRuntime().emitTargetOutlinedFunction(
+ S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen);
+ assert(Fn && Addr && "Target device function emission failed.");
+}
+
+void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForDirective(
+ const OMPTargetTeamsDistributeParallelForDirective &S) {
+ auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
+ emitTargetTeamsDistributeParallelForRegion(CGF, S, Action);
+ };
+ emitCommonOMPTargetDirective(*this, S, CodeGen);
+}
+
+static void emitTargetTeamsDistributeParallelForSimdRegion(
+ CodeGenFunction &CGF,
+ const OMPTargetTeamsDistributeParallelForSimdDirective &S,
+ PrePostActionTy &Action) {
+ Action.Enter(CGF);
+ auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
+ CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined,
+ S.getDistInc());
+ };
+
+ // Emit teams region as a standalone region.
+ auto &&CodeGenTeams = [&S, &CodeGenDistribute](CodeGenFunction &CGF,
+ PrePostActionTy &Action) {
+ Action.Enter(CGF);
+ CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
+ CGF.EmitOMPReductionClauseInit(S, PrivateScope);
+ (void)PrivateScope.Privatize();
+ CGF.CGM.getOpenMPRuntime().emitInlinedDirective(
+ CGF, OMPD_distribute, CodeGenDistribute, /*HasCancel=*/false);
+ CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams);
+ };
+
+ emitCommonOMPTeamsDirective(CGF, S, OMPD_distribute_parallel_for_simd,
+ CodeGenTeams);
+ emitPostUpdateForReductionClause(CGF, S,
+ [](CodeGenFunction &) { return nullptr; });
+}
+
+void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForSimdDeviceFunction(
+ CodeGenModule &CGM, StringRef ParentName,
+ const OMPTargetTeamsDistributeParallelForSimdDirective &S) {
+ // Emit SPMD target teams distribute parallel for simd region as a standalone
+ // region.
+ auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
+ emitTargetTeamsDistributeParallelForSimdRegion(CGF, S, Action);
+ };
+ llvm::Function *Fn;
+ llvm::Constant *Addr;
+ // Emit target region as a standalone region.
+ CGM.getOpenMPRuntime().emitTargetOutlinedFunction(
+ S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen);
+ assert(Fn && Addr && "Target device function emission failed.");
+}
+
+void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForSimdDirective(
+ const OMPTargetTeamsDistributeParallelForSimdDirective &S) {
+ auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
+ emitTargetTeamsDistributeParallelForSimdRegion(CGF, S, Action);
+ };
+ emitCommonOMPTargetDirective(*this, S, CodeGen);
+}
+
void CodeGenFunction::EmitOMPCancellationPointDirective(
const OMPCancellationPointDirective &S) {
CGM.getOpenMPRuntime().emitCancellationPointCall(*this, S.getLocStart(),
@@ -4234,19 +4431,19 @@ void CodeGenFunction::EmitOMPUseDevicePtrClause(
const auto &C = cast<OMPUseDevicePtrClause>(NC);
auto OrigVarIt = C.varlist_begin();
auto InitIt = C.inits().begin();
- for (auto PvtVarIt : C.private_copies()) {
- auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*OrigVarIt)->getDecl());
- auto *InitVD = cast<VarDecl>(cast<DeclRefExpr>(*InitIt)->getDecl());
- auto *PvtVD = cast<VarDecl>(cast<DeclRefExpr>(PvtVarIt)->getDecl());
+ for (const Expr *PvtVarIt : C.private_copies()) {
+ const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*OrigVarIt)->getDecl());
+ const auto *InitVD = cast<VarDecl>(cast<DeclRefExpr>(*InitIt)->getDecl());
+ const auto *PvtVD = cast<VarDecl>(cast<DeclRefExpr>(PvtVarIt)->getDecl());
// In order to identify the right initializer we need to match the
// declaration used by the mapping logic. In some cases we may get
// OMPCapturedExprDecl that refers to the original declaration.
const ValueDecl *MatchingVD = OrigVD;
- if (auto *OED = dyn_cast<OMPCapturedExprDecl>(MatchingVD)) {
+ if (const auto *OED = dyn_cast<OMPCapturedExprDecl>(MatchingVD)) {
// OMPCapturedExprDecl are used to privative fields of the current
// structure.
- auto *ME = cast<MemberExpr>(OED->getInit());
+ const auto *ME = cast<MemberExpr>(OED->getInit());
assert(isa<CXXThisExpr>(ME->getBase()) &&
"Base should be the current struct!");
MatchingVD = ME->getMemberDecl();
@@ -4258,7 +4455,9 @@ void CodeGenFunction::EmitOMPUseDevicePtrClause(
if (InitAddrIt == CaptureDeviceAddrMap.end())
continue;
- bool IsRegistered = PrivateScope.addPrivate(OrigVD, [&]() -> Address {
+ bool IsRegistered = PrivateScope.addPrivate(OrigVD, [this, OrigVD,
+ InitAddrIt, InitVD,
+ PvtVD]() {
// Initialize the temporary initialization variable with the address we
// get from the runtime library. We have to cast the source address
// because it is always a void *. References are materialized in the
@@ -4275,7 +4474,7 @@ void CodeGenFunction::EmitOMPUseDevicePtrClause(
EmitDecl(*PvtVD);
// The initialization variables reached its purpose in the emission
- // ofthe previous declaration, so we don't need it anymore.
+ // of the previous declaration, so we don't need it anymore.
LocalDeclMap.erase(InitVD);
// Return the address of the private variable.
@@ -4312,13 +4511,12 @@ void CodeGenFunction::EmitOMPTargetDataDirective(
DevicePointerPrivActionTy PrivAction(PrivatizeDevicePointers);
auto &&CodeGen = [&S, &Info, &PrivatizeDevicePointers](
- CodeGenFunction &CGF, PrePostActionTy &Action) {
+ CodeGenFunction &CGF, PrePostActionTy &Action) {
auto &&InnermostCodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
- CGF.EmitStmt(
- cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
+ CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt());
};
- // Codegen that selects wheather to generate the privatization code or not.
+ // Codegen that selects whether to generate the privatization code or not.
auto &&PrivCodeGen = [&S, &Info, &PrivatizeDevicePointers,
&InnermostCodeGen](CodeGenFunction &CGF,
PrePostActionTy &Action) {
@@ -4337,8 +4535,9 @@ void CodeGenFunction::EmitOMPTargetDataDirective(
Info.CaptureDeviceAddrMap);
(void)PrivateScope.Privatize();
RCG(CGF);
- } else
+ } else {
RCG(CGF);
+ }
};
// Forward the provided action to the privatization codegen.
@@ -4364,12 +4563,12 @@ void CodeGenFunction::EmitOMPTargetDataDirective(
// Check if we have any if clause associated with the directive.
const Expr *IfCond = nullptr;
- if (auto *C = S.getSingleClause<OMPIfClause>())
+ if (const auto *C = S.getSingleClause<OMPIfClause>())
IfCond = C->getCondition();
// Check if we have any device clause associated with the directive.
const Expr *Device = nullptr;
- if (auto *C = S.getSingleClause<OMPDeviceClause>())
+ if (const auto *C = S.getSingleClause<OMPDeviceClause>())
Device = C->getDevice();
// Set the action to signal privatization of device pointers.
@@ -4389,15 +4588,15 @@ void CodeGenFunction::EmitOMPTargetEnterDataDirective(
// Check if we have any if clause associated with the directive.
const Expr *IfCond = nullptr;
- if (auto *C = S.getSingleClause<OMPIfClause>())
+ if (const auto *C = S.getSingleClause<OMPIfClause>())
IfCond = C->getCondition();
// Check if we have any device clause associated with the directive.
const Expr *Device = nullptr;
- if (auto *C = S.getSingleClause<OMPDeviceClause>())
+ if (const auto *C = S.getSingleClause<OMPDeviceClause>())
Device = C->getDevice();
- OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
+ OMPLexicalScope Scope(*this, S, OMPD_task);
CGM.getOpenMPRuntime().emitTargetDataStandAloneCall(*this, S, IfCond, Device);
}
@@ -4410,15 +4609,15 @@ void CodeGenFunction::EmitOMPTargetExitDataDirective(
// Check if we have any if clause associated with the directive.
const Expr *IfCond = nullptr;
- if (auto *C = S.getSingleClause<OMPIfClause>())
+ if (const auto *C = S.getSingleClause<OMPIfClause>())
IfCond = C->getCondition();
// Check if we have any device clause associated with the directive.
const Expr *Device = nullptr;
- if (auto *C = S.getSingleClause<OMPDeviceClause>())
+ if (const auto *C = S.getSingleClause<OMPDeviceClause>())
Device = C->getDevice();
- OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
+ OMPLexicalScope Scope(*this, S, OMPD_task);
CGM.getOpenMPRuntime().emitTargetDataStandAloneCall(*this, S, IfCond, Device);
}
@@ -4426,9 +4625,10 @@ static void emitTargetParallelRegion(CodeGenFunction &CGF,
const OMPTargetParallelDirective &S,
PrePostActionTy &Action) {
// Get the captured statement associated with the 'parallel' region.
- auto *CS = S.getCapturedStmt(OMPD_parallel);
+ const CapturedStmt *CS = S.getCapturedStmt(OMPD_parallel);
Action.Enter(CGF);
- auto &&CodeGen = [&S, CS](CodeGenFunction &CGF, PrePostActionTy &) {
+ auto &&CodeGen = [&S, CS](CodeGenFunction &CGF, PrePostActionTy &Action) {
+ Action.Enter(CGF);
CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
(void)CGF.EmitOMPFirstprivateClause(S, PrivateScope);
CGF.EmitOMPPrivateClause(S, PrivateScope);
@@ -4440,8 +4640,8 @@ static void emitTargetParallelRegion(CodeGenFunction &CGF,
};
emitCommonOMPParallelDirective(CGF, S, OMPD_parallel, CodeGen,
emitEmptyBoundParameters);
- emitPostUpdateForReductionClause(
- CGF, S, [](CodeGenFunction &) -> llvm::Value * { return nullptr; });
+ emitPostUpdateForReductionClause(CGF, S,
+ [](CodeGenFunction &) { return nullptr; });
}
void CodeGenFunction::EmitOMPTargetParallelDeviceFunction(
@@ -4472,7 +4672,8 @@ static void emitTargetParallelForRegion(CodeGenFunction &CGF,
Action.Enter(CGF);
// Emit directive as a combined directive that consists of two implicit
// directives: 'parallel' with 'for' directive.
- auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
+ auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
+ Action.Enter(CGF);
CodeGenFunction::OMPCancelStackRAII CancelRegion(
CGF, OMPD_target_parallel_for, S.hasCancel());
CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), emitForLoopBounds,
@@ -4512,7 +4713,8 @@ emitTargetParallelForSimdRegion(CodeGenFunction &CGF,
Action.Enter(CGF);
// Emit directive as a combined directive that consists of two implicit
// directives: 'parallel' with 'for' directive.
- auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
+ auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
+ Action.Enter(CGF);
CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), emitForLoopBounds,
emitDispatchForLoopBounds);
};
@@ -4547,17 +4749,17 @@ void CodeGenFunction::EmitOMPTargetParallelForSimdDirective(
static void mapParam(CodeGenFunction &CGF, const DeclRefExpr *Helper,
const ImplicitParamDecl *PVD,
CodeGenFunction::OMPPrivateScope &Privates) {
- auto *VDecl = cast<VarDecl>(Helper->getDecl());
- Privates.addPrivate(
- VDecl, [&CGF, PVD]() -> Address { return CGF.GetAddrOfLocalVar(PVD); });
+ const auto *VDecl = cast<VarDecl>(Helper->getDecl());
+ Privates.addPrivate(VDecl,
+ [&CGF, PVD]() { return CGF.GetAddrOfLocalVar(PVD); });
}
void CodeGenFunction::EmitOMPTaskLoopBasedDirective(const OMPLoopDirective &S) {
assert(isOpenMPTaskLoopDirective(S.getDirectiveKind()));
// Emit outlined function for task construct.
- auto CS = cast<CapturedStmt>(S.getAssociatedStmt());
- auto CapturedStruct = GenerateCapturedStmtArgument(*CS);
- auto SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl());
+ const CapturedStmt *CS = S.getCapturedStmt(OMPD_taskloop);
+ Address CapturedStruct = GenerateCapturedStmtArgument(*CS);
+ QualType SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl());
const Expr *IfCond = nullptr;
for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
if (C->getNameModifier() == OMPD_unknown ||
@@ -4600,7 +4802,7 @@ void CodeGenFunction::EmitOMPTaskLoopBasedDirective(const OMPLoopDirective &S) {
if (!CondConstant)
return;
} else {
- auto *ThenBlock = CGF.createBasicBlock("taskloop.if.then");
+ llvm::BasicBlock *ThenBlock = CGF.createBasicBlock("taskloop.if.then");
ContBlock = CGF.createBasicBlock("taskloop.if.end");
emitPreCond(CGF, S, S.getPreCond(), ThenBlock, ContBlock,
CGF.getProfileCount(&S));
@@ -4631,14 +4833,14 @@ void CodeGenFunction::EmitOMPTaskLoopBasedDirective(const OMPLoopDirective &S) {
(void)LoopScope.Privatize();
// Emit the loop iteration variable.
const Expr *IVExpr = S.getIterationVariable();
- const VarDecl *IVDecl = cast<VarDecl>(cast<DeclRefExpr>(IVExpr)->getDecl());
+ const auto *IVDecl = cast<VarDecl>(cast<DeclRefExpr>(IVExpr)->getDecl());
CGF.EmitVarDecl(*IVDecl);
CGF.EmitIgnoredExpr(S.getInit());
// Emit the iterations count variable.
// If it is not a variable, Sema decided to calculate iterations count on
// each iteration (e.g., it is foldable into a constant).
- if (auto LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
+ if (const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
CGF.EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl()));
// Emit calculation of the iterations count.
CGF.EmitIgnoredExpr(S.getCalcLastIteration());
@@ -4668,7 +4870,8 @@ void CodeGenFunction::EmitOMPTaskLoopBasedDirective(const OMPLoopDirective &S) {
auto &&TaskGen = [&S, SharedsTy, CapturedStruct,
IfCond](CodeGenFunction &CGF, llvm::Value *OutlinedFn,
const OMPTaskDataTy &Data) {
- auto &&CodeGen = [&](CodeGenFunction &CGF, PrePostActionTy &) {
+ auto &&CodeGen = [&S, OutlinedFn, SharedsTy, CapturedStruct, IfCond,
+ &Data](CodeGenFunction &CGF, PrePostActionTy &) {
OMPLoopScope PreInitScope(CGF, S);
CGF.CGM.getOpenMPRuntime().emitTaskLoopCall(CGF, S.getLocStart(), S,
OutlinedFn, SharedsTy,
@@ -4677,15 +4880,16 @@ void CodeGenFunction::EmitOMPTaskLoopBasedDirective(const OMPLoopDirective &S) {
CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_taskloop,
CodeGen);
};
- if (Data.Nogroup)
- EmitOMPTaskBasedDirective(S, BodyGen, TaskGen, Data);
- else {
+ if (Data.Nogroup) {
+ EmitOMPTaskBasedDirective(S, OMPD_taskloop, BodyGen, TaskGen, Data);
+ } else {
CGM.getOpenMPRuntime().emitTaskgroupRegion(
*this,
[&S, &BodyGen, &TaskGen, &Data](CodeGenFunction &CGF,
PrePostActionTy &Action) {
Action.Enter(CGF);
- CGF.EmitOMPTaskBasedDirective(S, BodyGen, TaskGen, Data);
+ CGF.EmitOMPTaskBasedDirective(S, OMPD_taskloop, BodyGen, TaskGen,
+ Data);
},
S.getLocStart());
}
@@ -4710,14 +4914,44 @@ void CodeGenFunction::EmitOMPTargetUpdateDirective(
// Check if we have any if clause associated with the directive.
const Expr *IfCond = nullptr;
- if (auto *C = S.getSingleClause<OMPIfClause>())
+ if (const auto *C = S.getSingleClause<OMPIfClause>())
IfCond = C->getCondition();
// Check if we have any device clause associated with the directive.
const Expr *Device = nullptr;
- if (auto *C = S.getSingleClause<OMPDeviceClause>())
+ if (const auto *C = S.getSingleClause<OMPDeviceClause>())
Device = C->getDevice();
- OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
+ OMPLexicalScope Scope(*this, S, OMPD_task);
CGM.getOpenMPRuntime().emitTargetDataStandAloneCall(*this, S, IfCond, Device);
}
+
+void CodeGenFunction::EmitSimpleOMPExecutableDirective(
+ const OMPExecutableDirective &D) {
+ if (!D.hasAssociatedStmt() || !D.getAssociatedStmt())
+ return;
+ auto &&CodeGen = [&D](CodeGenFunction &CGF, PrePostActionTy &Action) {
+ if (isOpenMPSimdDirective(D.getDirectiveKind())) {
+ emitOMPSimdRegion(CGF, cast<OMPLoopDirective>(D), Action);
+ } else {
+ if (const auto *LD = dyn_cast<OMPLoopDirective>(&D)) {
+ for (const Expr *E : LD->counters()) {
+ if (const auto *VD = dyn_cast<OMPCapturedExprDecl>(
+ cast<DeclRefExpr>(E)->getDecl())) {
+ // Emit only those that were not explicitly referenced in clauses.
+ if (!CGF.LocalDeclMap.count(VD))
+ CGF.EmitVarDecl(*VD);
+ }
+ }
+ }
+ CGF.EmitStmt(D.getInnermostCapturedStmt()->getCapturedStmt());
+ }
+ };
+ OMPSimdLexicalScope Scope(*this, D);
+ CGM.getOpenMPRuntime().emitInlinedDirective(
+ *this,
+ isOpenMPSimdDirective(D.getDirectiveKind()) ? OMPD_simd
+ : D.getDirectiveKind(),
+ CodeGen);
+}
+
diff --git a/lib/CodeGen/CGVTT.cpp b/lib/CodeGen/CGVTT.cpp
index 78928d04220d..41c8c943f54d 100644
--- a/lib/CodeGen/CGVTT.cpp
+++ b/lib/CodeGen/CGVTT.cpp
@@ -100,7 +100,7 @@ CodeGenVTables::EmitVTTDefinition(llvm::GlobalVariable *VTT,
VTT->setComdat(CGM.getModule().getOrInsertComdat(VTT->getName()));
// Set the right visibility.
- CGM.setGlobalVisibility(VTT, RD, ForDefinition);
+ CGM.setGVProperties(VTT, RD);
}
llvm::GlobalVariable *CodeGenVTables::GetAddrOfVTT(const CXXRecordDecl *RD) {
diff --git a/lib/CodeGen/CGVTables.cpp b/lib/CodeGen/CGVTables.cpp
index 2d9bf3bce926..5a2ec65f7763 100644
--- a/lib/CodeGen/CGVTables.cpp
+++ b/lib/CodeGen/CGVTables.cpp
@@ -31,29 +31,12 @@ using namespace CodeGen;
CodeGenVTables::CodeGenVTables(CodeGenModule &CGM)
: CGM(CGM), VTContext(CGM.getContext().getVTableContext()) {}
-llvm::Constant *CodeGenModule::GetAddrOfThunk(GlobalDecl GD,
- const ThunkInfo &Thunk) {
- const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
-
- // Compute the mangled name.
- SmallString<256> Name;
- llvm::raw_svector_ostream Out(Name);
- if (const CXXDestructorDecl* DD = dyn_cast<CXXDestructorDecl>(MD))
- getCXXABI().getMangleContext().mangleCXXDtorThunk(DD, GD.getDtorType(),
- Thunk.This, Out);
- else
- getCXXABI().getMangleContext().mangleThunk(MD, Thunk, Out);
-
- llvm::Type *Ty = getTypes().GetFunctionTypeForVTable(GD);
- return GetOrCreateLLVMFunction(Name, Ty, GD, /*ForVTable=*/true,
+llvm::Constant *CodeGenModule::GetAddrOfThunk(StringRef Name, llvm::Type *FnTy,
+ GlobalDecl GD) {
+ return GetOrCreateLLVMFunction(Name, FnTy, GD, /*ForVTable=*/true,
/*DontDefer=*/true, /*IsThunk=*/true);
}
-static void setThunkVisibility(CodeGenModule &CGM, const CXXMethodDecl *MD,
- const ThunkInfo &Thunk, llvm::Function *Fn) {
- CGM.setGlobalVisibility(Fn, MD, ForDefinition);
-}
-
static void setThunkProperties(CodeGenModule &CGM, const ThunkInfo &Thunk,
llvm::Function *ThunkFn, bool ForVTable,
GlobalDecl GD) {
@@ -62,8 +45,12 @@ static void setThunkProperties(CodeGenModule &CGM, const ThunkInfo &Thunk,
!Thunk.Return.isEmpty());
// Set the right visibility.
- const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
- setThunkVisibility(CGM, MD, Thunk, ThunkFn);
+ CGM.setGVProperties(ThunkFn, GD);
+
+ if (!CGM.getCXXABI().exportThunk()) {
+ ThunkFn->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass);
+ ThunkFn->setDSOLocal(true);
+ }
if (CGM.supportsCOMDAT() && ThunkFn->isWeakForLinker())
ThunkFn->setComdat(CGM.getModule().getOrInsertComdat(ThunkFn->getName()));
@@ -236,7 +223,8 @@ CodeGenFunction::GenerateVarArgsThunk(llvm::Function *Fn,
}
void CodeGenFunction::StartThunk(llvm::Function *Fn, GlobalDecl GD,
- const CGFunctionInfo &FnInfo) {
+ const CGFunctionInfo &FnInfo,
+ bool IsUnprototyped) {
assert(!CurGD.getDecl() && "CurGD was already set!");
CurGD = GD;
CurFuncIsThunk = true;
@@ -245,21 +233,28 @@ void CodeGenFunction::StartThunk(llvm::Function *Fn, GlobalDecl GD,
const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
QualType ThisType = MD->getThisType(getContext());
const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
- QualType ResultType = CGM.getCXXABI().HasThisReturn(GD)
- ? ThisType
- : CGM.getCXXABI().hasMostDerivedReturn(GD)
- ? CGM.getContext().VoidPtrTy
- : FPT->getReturnType();
+ QualType ResultType;
+ if (IsUnprototyped)
+ ResultType = CGM.getContext().VoidTy;
+ else if (CGM.getCXXABI().HasThisReturn(GD))
+ ResultType = ThisType;
+ else if (CGM.getCXXABI().hasMostDerivedReturn(GD))
+ ResultType = CGM.getContext().VoidPtrTy;
+ else
+ ResultType = FPT->getReturnType();
FunctionArgList FunctionArgs;
// Create the implicit 'this' parameter declaration.
CGM.getCXXABI().buildThisParam(*this, FunctionArgs);
- // Add the rest of the parameters.
- FunctionArgs.append(MD->param_begin(), MD->param_end());
+ // Add the rest of the parameters, if we have a prototype to work with.
+ if (!IsUnprototyped) {
+ FunctionArgs.append(MD->param_begin(), MD->param_end());
- if (isa<CXXDestructorDecl>(MD))
- CGM.getCXXABI().addImplicitStructorParams(*this, ResultType, FunctionArgs);
+ if (isa<CXXDestructorDecl>(MD))
+ CGM.getCXXABI().addImplicitStructorParams(*this, ResultType,
+ FunctionArgs);
+ }
// Start defining the function.
auto NL = ApplyDebugLocation::CreateEmpty(*this);
@@ -285,7 +280,8 @@ void CodeGenFunction::FinishThunk() {
}
void CodeGenFunction::EmitCallAndReturnForThunk(llvm::Constant *CalleePtr,
- const ThunkInfo *Thunk) {
+ const ThunkInfo *Thunk,
+ bool IsUnprototyped) {
assert(isa<CXXMethodDecl>(CurGD.getDecl()) &&
"Please use a new CGF for this thunk");
const CXXMethodDecl *MD = cast<CXXMethodDecl>(CurGD.getDecl());
@@ -296,13 +292,17 @@ void CodeGenFunction::EmitCallAndReturnForThunk(llvm::Constant *CalleePtr,
*this, LoadCXXThisAddress(), Thunk->This)
: LoadCXXThis();
- if (CurFnInfo->usesInAlloca()) {
+ if (CurFnInfo->usesInAlloca() || IsUnprototyped) {
// We don't handle return adjusting thunks, because they require us to call
// the copy constructor. For now, fall through and pretend the return
// adjustment was empty so we don't crash.
if (Thunk && !Thunk->Return.isEmpty()) {
- CGM.ErrorUnsupported(
- MD, "non-trivial argument copy for return-adjusting thunk");
+ if (IsUnprototyped)
+ CGM.ErrorUnsupported(
+ MD, "return-adjusting thunk with incomplete parameter type");
+ else
+ CGM.ErrorUnsupported(
+ MD, "non-trivial argument copy for return-adjusting thunk");
}
EmitMustTailThunk(MD, AdjustedThisPtr, CalleePtr);
return;
@@ -429,55 +429,98 @@ void CodeGenFunction::EmitMustTailThunk(const CXXMethodDecl *MD,
}
void CodeGenFunction::generateThunk(llvm::Function *Fn,
- const CGFunctionInfo &FnInfo,
- GlobalDecl GD, const ThunkInfo &Thunk) {
- StartThunk(Fn, GD, FnInfo);
+ const CGFunctionInfo &FnInfo, GlobalDecl GD,
+ const ThunkInfo &Thunk,
+ bool IsUnprototyped) {
+ StartThunk(Fn, GD, FnInfo, IsUnprototyped);
// Create a scope with an artificial location for the body of this function.
auto AL = ApplyDebugLocation::CreateArtificial(*this);
- // Get our callee.
- llvm::Type *Ty =
- CGM.getTypes().GetFunctionType(CGM.getTypes().arrangeGlobalDeclaration(GD));
+ // Get our callee. Use a placeholder type if this method is unprototyped so
+ // that CodeGenModule doesn't try to set attributes.
+ llvm::Type *Ty;
+ if (IsUnprototyped)
+ Ty = llvm::StructType::get(getLLVMContext());
+ else
+ Ty = CGM.getTypes().GetFunctionType(FnInfo);
+
llvm::Constant *Callee = CGM.GetAddrOfFunction(GD, Ty, /*ForVTable=*/true);
+ // Fix up the function type for an unprototyped musttail call.
+ if (IsUnprototyped)
+ Callee = llvm::ConstantExpr::getBitCast(Callee, Fn->getType());
+
// Make the call and return the result.
- EmitCallAndReturnForThunk(Callee, &Thunk);
+ EmitCallAndReturnForThunk(Callee, &Thunk, IsUnprototyped);
}
-void CodeGenVTables::emitThunk(GlobalDecl GD, const ThunkInfo &Thunk,
- bool ForVTable) {
- const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeGlobalDeclaration(GD);
+static bool shouldEmitVTableThunk(CodeGenModule &CGM, const CXXMethodDecl *MD,
+ bool IsUnprototyped, bool ForVTable) {
+ // Always emit thunks in the MS C++ ABI. We cannot rely on other TUs to
+ // provide thunks for us.
+ if (CGM.getTarget().getCXXABI().isMicrosoft())
+ return true;
- // FIXME: re-use FnInfo in this computation.
- llvm::Constant *C = CGM.GetAddrOfThunk(GD, Thunk);
- llvm::GlobalValue *Entry;
+ // In the Itanium C++ ABI, vtable thunks are provided by TUs that provide
+ // definitions of the main method. Therefore, emitting thunks with the vtable
+ // is purely an optimization. Emit the thunk if optimizations are enabled and
+ // all of the parameter types are complete.
+ if (ForVTable)
+ return CGM.getCodeGenOpts().OptimizationLevel && !IsUnprototyped;
- // Strip off a bitcast if we got one back.
- if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(C)) {
- assert(CE->getOpcode() == llvm::Instruction::BitCast);
- Entry = cast<llvm::GlobalValue>(CE->getOperand(0));
- } else {
- Entry = cast<llvm::GlobalValue>(C);
- }
+ // Always emit thunks along with the method definition.
+ return true;
+}
- // There's already a declaration with the same name, check if it has the same
- // type or if we need to replace it.
- if (Entry->getType()->getElementType() !=
- CGM.getTypes().GetFunctionTypeForVTable(GD)) {
- llvm::GlobalValue *OldThunkFn = Entry;
+llvm::Constant *CodeGenVTables::maybeEmitThunk(GlobalDecl GD,
+ const ThunkInfo &TI,
+ bool ForVTable) {
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
- // If the types mismatch then we have to rewrite the definition.
- assert(OldThunkFn->isDeclaration() &&
- "Shouldn't replace non-declaration");
+ // First, get a declaration. Compute the mangled name. Don't worry about
+ // getting the function prototype right, since we may only need this
+ // declaration to fill in a vtable slot.
+ SmallString<256> Name;
+ MangleContext &MCtx = CGM.getCXXABI().getMangleContext();
+ llvm::raw_svector_ostream Out(Name);
+ if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(MD))
+ MCtx.mangleCXXDtorThunk(DD, GD.getDtorType(), TI.This, Out);
+ else
+ MCtx.mangleThunk(MD, TI, Out);
+ llvm::Type *ThunkVTableTy = CGM.getTypes().GetFunctionTypeForVTable(GD);
+ llvm::Constant *Thunk = CGM.GetAddrOfThunk(Name, ThunkVTableTy, GD);
+
+ // If we don't need to emit a definition, return this declaration as is.
+ bool IsUnprototyped = !CGM.getTypes().isFuncTypeConvertible(
+ MD->getType()->castAs<FunctionType>());
+ if (!shouldEmitVTableThunk(CGM, MD, IsUnprototyped, ForVTable))
+ return Thunk;
+
+ // Arrange a function prototype appropriate for a function definition. In some
+ // cases in the MS ABI, we may need to build an unprototyped musttail thunk.
+ const CGFunctionInfo &FnInfo =
+ IsUnprototyped ? CGM.getTypes().arrangeUnprototypedMustTailThunk(MD)
+ : CGM.getTypes().arrangeGlobalDeclaration(GD);
+ llvm::FunctionType *ThunkFnTy = CGM.getTypes().GetFunctionType(FnInfo);
+
+ // If the type of the underlying GlobalValue is wrong, we'll have to replace
+ // it. It should be a declaration.
+ llvm::Function *ThunkFn = cast<llvm::Function>(Thunk->stripPointerCasts());
+ if (ThunkFn->getFunctionType() != ThunkFnTy) {
+ llvm::GlobalValue *OldThunkFn = ThunkFn;
+
+ assert(OldThunkFn->isDeclaration() && "Shouldn't replace non-declaration");
// Remove the name from the old thunk function and get a new thunk.
OldThunkFn->setName(StringRef());
- Entry = cast<llvm::GlobalValue>(CGM.GetAddrOfThunk(GD, Thunk));
+ ThunkFn = llvm::Function::Create(ThunkFnTy, llvm::Function::ExternalLinkage,
+ Name.str(), &CGM.getModule());
+ CGM.SetLLVMFunctionAttributes(MD, FnInfo, ThunkFn);
// If needed, replace the old thunk with a bitcast.
if (!OldThunkFn->use_empty()) {
llvm::Constant *NewPtrForOldDecl =
- llvm::ConstantExpr::getBitCast(Entry, OldThunkFn->getType());
+ llvm::ConstantExpr::getBitCast(ThunkFn, OldThunkFn->getType());
OldThunkFn->replaceAllUsesWith(NewPtrForOldDecl);
}
@@ -485,61 +528,48 @@ void CodeGenVTables::emitThunk(GlobalDecl GD, const ThunkInfo &Thunk,
OldThunkFn->eraseFromParent();
}
- llvm::Function *ThunkFn = cast<llvm::Function>(Entry);
bool ABIHasKeyFunctions = CGM.getTarget().getCXXABI().hasKeyFunctions();
bool UseAvailableExternallyLinkage = ForVTable && ABIHasKeyFunctions;
if (!ThunkFn->isDeclaration()) {
if (!ABIHasKeyFunctions || UseAvailableExternallyLinkage) {
// There is already a thunk emitted for this function, do nothing.
- return;
+ return ThunkFn;
}
- setThunkProperties(CGM, Thunk, ThunkFn, ForVTable, GD);
- return;
+ setThunkProperties(CGM, TI, ThunkFn, ForVTable, GD);
+ return ThunkFn;
}
+ // If this will be unprototyped, add the "thunk" attribute so that LLVM knows
+ // that the return type is meaningless. These thunks can be used to call
+ // functions with differing return types, and the caller is required to cast
+ // the prototype appropriately to extract the correct value.
+ if (IsUnprototyped)
+ ThunkFn->addFnAttr("thunk");
+
CGM.SetLLVMFunctionAttributesForDefinition(GD.getDecl(), ThunkFn);
- if (ThunkFn->isVarArg()) {
+ if (!IsUnprototyped && ThunkFn->isVarArg()) {
// Varargs thunks are special; we can't just generate a call because
// we can't copy the varargs. Our implementation is rather
// expensive/sucky at the moment, so don't generate the thunk unless
// we have to.
// FIXME: Do something better here; GenerateVarArgsThunk is extremely ugly.
if (UseAvailableExternallyLinkage)
- return;
- ThunkFn =
- CodeGenFunction(CGM).GenerateVarArgsThunk(ThunkFn, FnInfo, GD, Thunk);
+ return ThunkFn;
+ ThunkFn = CodeGenFunction(CGM).GenerateVarArgsThunk(ThunkFn, FnInfo, GD,
+ TI);
} else {
// Normal thunk body generation.
- CodeGenFunction(CGM).generateThunk(ThunkFn, FnInfo, GD, Thunk);
+ CodeGenFunction(CGM).generateThunk(ThunkFn, FnInfo, GD, TI, IsUnprototyped);
}
- setThunkProperties(CGM, Thunk, ThunkFn, ForVTable, GD);
-}
-
-void CodeGenVTables::maybeEmitThunkForVTable(GlobalDecl GD,
- const ThunkInfo &Thunk) {
- // If the ABI has key functions, only the TU with the key function should emit
- // the thunk. However, we can allow inlining of thunks if we emit them with
- // available_externally linkage together with vtables when optimizations are
- // enabled.
- if (CGM.getTarget().getCXXABI().hasKeyFunctions() &&
- !CGM.getCodeGenOpts().OptimizationLevel)
- return;
-
- // We can't emit thunks for member functions with incomplete types.
- const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
- if (!CGM.getTypes().isFuncTypeConvertible(
- MD->getType()->castAs<FunctionType>()))
- return;
-
- emitThunk(GD, Thunk, /*ForVTable=*/true);
+ setThunkProperties(CGM, TI, ThunkFn, ForVTable, GD);
+ return ThunkFn;
}
-void CodeGenVTables::EmitThunks(GlobalDecl GD)
-{
+void CodeGenVTables::EmitThunks(GlobalDecl GD) {
const CXXMethodDecl *MD =
cast<CXXMethodDecl>(GD.getDecl())->getCanonicalDecl();
@@ -554,7 +584,7 @@ void CodeGenVTables::EmitThunks(GlobalDecl GD)
return;
for (const ThunkInfo& Thunk : *ThunkInfoVector)
- emitThunk(GD, Thunk, /*ForVTable=*/false);
+ maybeEmitThunk(GD, Thunk, /*ForVTable=*/false);
}
void CodeGenVTables::addVTableComponent(
@@ -647,9 +677,8 @@ void CodeGenVTables::addVTableComponent(
layout.vtable_thunks()[nextVTableThunkIndex].first == idx) {
auto &thunkInfo = layout.vtable_thunks()[nextVTableThunkIndex].second;
- maybeEmitThunkForVTable(GD, thunkInfo);
nextVTableThunkIndex++;
- fnPtr = CGM.GetAddrOfThunk(GD, thunkInfo);
+ fnPtr = maybeEmitThunk(GD, thunkInfo, /*ForVTable=*/true);
// Otherwise we can use the method definition directly.
} else {
@@ -730,7 +759,7 @@ CodeGenVTables::GenerateConstructionVTable(const CXXRecordDecl *RD,
// Create the variable that will hold the construction vtable.
llvm::GlobalVariable *VTable =
CGM.CreateOrReplaceCXXRuntimeVariable(Name, VTType, Linkage);
- CGM.setGlobalVisibility(VTable, RD, ForDefinition);
+ CGM.setGVProperties(VTable, RD);
// V-tables are always unnamed_addr.
VTable->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
@@ -845,7 +874,7 @@ CodeGenModule::getVTableLinkage(const CXXRecordDecl *RD) {
llvm_unreachable("Invalid TemplateSpecializationKind!");
}
-/// This is a callback from Sema to tell us that that a particular vtable is
+/// This is a callback from Sema to tell us that a particular vtable is
/// required to be emitted in this translation unit.
///
/// This is only called for vtables that _must_ be emitted (mainly due to key
@@ -983,31 +1012,29 @@ void CodeGenModule::EmitVTableTypeMetadata(llvm::GlobalVariable *VTable,
CharUnits PointerWidth =
Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0));
- typedef std::pair<const CXXRecordDecl *, unsigned> BSEntry;
- std::vector<BSEntry> BitsetEntries;
- // Create a bit set entry for each address point.
+ typedef std::pair<const CXXRecordDecl *, unsigned> AddressPoint;
+ std::vector<AddressPoint> AddressPoints;
for (auto &&AP : VTLayout.getAddressPoints())
- BitsetEntries.push_back(
- std::make_pair(AP.first.getBase(),
- VTLayout.getVTableOffset(AP.second.VTableIndex) +
- AP.second.AddressPointIndex));
-
- // Sort the bit set entries for determinism.
- std::sort(BitsetEntries.begin(), BitsetEntries.end(),
- [this](const BSEntry &E1, const BSEntry &E2) {
- if (&E1 == &E2)
+ AddressPoints.push_back(std::make_pair(
+ AP.first.getBase(), VTLayout.getVTableOffset(AP.second.VTableIndex) +
+ AP.second.AddressPointIndex));
+
+ // Sort the address points for determinism.
+ llvm::sort(AddressPoints.begin(), AddressPoints.end(),
+ [this](const AddressPoint &AP1, const AddressPoint &AP2) {
+ if (&AP1 == &AP2)
return false;
std::string S1;
llvm::raw_string_ostream O1(S1);
getCXXABI().getMangleContext().mangleTypeName(
- QualType(E1.first->getTypeForDecl(), 0), O1);
+ QualType(AP1.first->getTypeForDecl(), 0), O1);
O1.flush();
std::string S2;
llvm::raw_string_ostream O2(S2);
getCXXABI().getMangleContext().mangleTypeName(
- QualType(E2.first->getTypeForDecl(), 0), O2);
+ QualType(AP2.first->getTypeForDecl(), 0), O2);
O2.flush();
if (S1 < S2)
@@ -1015,10 +1042,26 @@ void CodeGenModule::EmitVTableTypeMetadata(llvm::GlobalVariable *VTable,
if (S1 != S2)
return false;
- return E1.second < E2.second;
+ return AP1.second < AP2.second;
});
- for (auto BitsetEntry : BitsetEntries)
- AddVTableTypeMetadata(VTable, PointerWidth * BitsetEntry.second,
- BitsetEntry.first);
+ ArrayRef<VTableComponent> Comps = VTLayout.vtable_components();
+ for (auto AP : AddressPoints) {
+ // Create type metadata for the address point.
+ AddVTableTypeMetadata(VTable, PointerWidth * AP.second, AP.first);
+
+ // The class associated with each address point could also potentially be
+ // used for indirect calls via a member function pointer, so we need to
+ // annotate the address of each function pointer with the appropriate member
+ // function pointer type.
+ for (unsigned I = 0; I != Comps.size(); ++I) {
+ if (Comps[I].getKind() != VTableComponent::CK_FunctionPointer)
+ continue;
+ llvm::Metadata *MD = CreateMetadataIdentifierForVirtualMemPtrType(
+ Context.getMemberPointerType(
+ Comps[I].getFunctionDecl()->getType(),
+ Context.getRecordType(AP.first).getTypePtr()));
+ VTable->addTypeMetadata((PointerWidth * I).getQuantity(), MD);
+ }
+ }
}
diff --git a/lib/CodeGen/CGVTables.h b/lib/CodeGen/CGVTables.h
index b92212c368a9..a11474a15ea4 100644
--- a/lib/CodeGen/CGVTables.h
+++ b/lib/CodeGen/CGVTables.h
@@ -57,12 +57,10 @@ class CodeGenVTables {
/// Cache for the deleted virtual member call function.
llvm::Constant *DeletedVirtualFn = nullptr;
- /// emitThunk - Emit a single thunk.
- void emitThunk(GlobalDecl GD, const ThunkInfo &Thunk, bool ForVTable);
-
- /// maybeEmitThunkForVTable - Emit the given thunk for the vtable if needed by
- /// the ABI.
- void maybeEmitThunkForVTable(GlobalDecl GD, const ThunkInfo &Thunk);
+ /// Get the address of a thunk and emit it if necessary.
+ llvm::Constant *maybeEmitThunk(GlobalDecl GD,
+ const ThunkInfo &ThunkAdjustments,
+ bool ForVTable);
void addVTableComponent(ConstantArrayBuilder &builder,
const VTableLayout &layout, unsigned idx,
diff --git a/lib/CodeGen/CGValue.h b/lib/CodeGen/CGValue.h
index 7d07ea4516c9..418bda1f41bb 100644
--- a/lib/CodeGen/CGValue.h
+++ b/lib/CodeGen/CGValue.h
@@ -193,7 +193,7 @@ class LValue {
// The alignment to use when accessing this lvalue. (For vector elements,
// this is the alignment of the whole vector.)
- int64_t Alignment;
+ unsigned Alignment;
// objective-c's ivar
bool Ivar:1;
@@ -215,13 +215,13 @@ class LValue {
// to make the default bitfield pattern all-zeroes.
bool ImpreciseLifetime : 1;
- LValueBaseInfo BaseInfo;
- TBAAAccessInfo TBAAInfo;
-
// This flag shows if a nontemporal load/stores should be used when accessing
// this lvalue.
bool Nontemporal : 1;
+ LValueBaseInfo BaseInfo;
+ TBAAAccessInfo TBAAInfo;
+
Expr *BaseIvarExp;
private:
@@ -231,7 +231,10 @@ private:
"initializing l-value with zero alignment!");
this->Type = Type;
this->Quals = Quals;
- this->Alignment = Alignment.getQuantity();
+ const unsigned MaxAlign = 1U << 31;
+ this->Alignment = Alignment.getQuantity() <= MaxAlign
+ ? Alignment.getQuantity()
+ : MaxAlign;
assert(this->Alignment == Alignment.getQuantity() &&
"Alignment exceeds allowed max!");
this->BaseInfo = BaseInfo;
@@ -398,7 +401,7 @@ public:
return R;
}
- /// \brief Create a new object to represent a bit-field access.
+ /// Create a new object to represent a bit-field access.
///
/// \param Addr - The base address of the bit-field sequence this
/// bit-field refers to.
@@ -469,17 +472,25 @@ class AggValueSlot {
/// evaluating an expression which constructs such an object.
bool AliasedFlag : 1;
+ /// This is set to true if the tail padding of this slot might overlap
+ /// another object that may have already been initialized (and whose
+ /// value must be preserved by this initialization). If so, we may only
+ /// store up to the dsize of the type. Otherwise we can widen stores to
+ /// the size of the type.
+ bool OverlapFlag : 1;
+
public:
enum IsAliased_t { IsNotAliased, IsAliased };
enum IsDestructed_t { IsNotDestructed, IsDestructed };
enum IsZeroed_t { IsNotZeroed, IsZeroed };
+ enum Overlap_t { DoesNotOverlap, MayOverlap };
enum NeedsGCBarriers_t { DoesNotNeedGCBarriers, NeedsGCBarriers };
/// ignored - Returns an aggregate value slot indicating that the
/// aggregate value is being ignored.
static AggValueSlot ignored() {
return forAddr(Address::invalid(), Qualifiers(), IsNotDestructed,
- DoesNotNeedGCBarriers, IsNotAliased);
+ DoesNotNeedGCBarriers, IsNotAliased, DoesNotOverlap);
}
/// forAddr - Make a slot for an aggregate value.
@@ -497,6 +508,7 @@ public:
IsDestructed_t isDestructed,
NeedsGCBarriers_t needsGC,
IsAliased_t isAliased,
+ Overlap_t mayOverlap,
IsZeroed_t isZeroed = IsNotZeroed) {
AggValueSlot AV;
if (addr.isValid()) {
@@ -511,6 +523,7 @@ public:
AV.ObjCGCFlag = needsGC;
AV.ZeroedFlag = isZeroed;
AV.AliasedFlag = isAliased;
+ AV.OverlapFlag = mayOverlap;
return AV;
}
@@ -518,9 +531,10 @@ public:
IsDestructed_t isDestructed,
NeedsGCBarriers_t needsGC,
IsAliased_t isAliased,
+ Overlap_t mayOverlap,
IsZeroed_t isZeroed = IsNotZeroed) {
- return forAddr(LV.getAddress(),
- LV.getQuals(), isDestructed, needsGC, isAliased, isZeroed);
+ return forAddr(LV.getAddress(), LV.getQuals(), isDestructed, needsGC,
+ isAliased, mayOverlap, isZeroed);
}
IsDestructed_t isExternallyDestructed() const {
@@ -568,6 +582,10 @@ public:
return IsAliased_t(AliasedFlag);
}
+ Overlap_t mayOverlap() const {
+ return Overlap_t(OverlapFlag);
+ }
+
RValue asRValue() const {
if (isIgnored()) {
return RValue::getIgnored();
@@ -580,6 +598,14 @@ public:
IsZeroed_t isZeroed() const {
return IsZeroed_t(ZeroedFlag);
}
+
+ /// Get the preferred size to use when storing a value to this slot. This
+ /// is the type size unless that might overlap another object, in which
+ /// case it's the dsize.
+ CharUnits getPreferredSize(ASTContext &Ctx, QualType Type) const {
+ return mayOverlap() ? Ctx.getTypeInfoDataSizeInChars(Type).first
+ : Ctx.getTypeSizeInChars(Type);
+ }
};
} // end namespace CodeGen
diff --git a/lib/CodeGen/CMakeLists.txt b/lib/CodeGen/CMakeLists.txt
index 84248cc64719..2a0f4f0e83ec 100644
--- a/lib/CodeGen/CMakeLists.txt
+++ b/lib/CodeGen/CMakeLists.txt
@@ -7,6 +7,7 @@ set(LLVM_LINK_COMPONENTS
Coverage
IPO
IRReader
+ AggressiveInstCombine
InstCombine
Instrumentation
LTO
@@ -31,6 +32,10 @@ if (CLANG_BUILT_STANDALONE)
set(codegen_deps)
endif()
+if (MSVC)
+ set_source_files_properties(CodeGenModule.cpp PROPERTIES COMPILE_FLAGS /bigobj)
+endif()
+
add_clang_library(clangCodeGen
BackendUtil.cpp
CGAtomic.cpp
@@ -56,6 +61,7 @@ add_clang_library(clangCodeGen
CGExprScalar.cpp
CGGPUBuiltin.cpp
CGLoopInfo.cpp
+ CGNonTrivialStruct.cpp
CGObjC.cpp
CGObjCGNU.cpp
CGObjCMac.cpp
@@ -93,7 +99,6 @@ add_clang_library(clangCodeGen
LINK_LIBS
clangAnalysis
clangAST
- clangAnalysis
clangBasic
clangFrontend
clangLex
diff --git a/lib/CodeGen/CodeGenAction.cpp b/lib/CodeGen/CodeGenAction.cpp
index 6ca69d63cdce..7ca55070d4a0 100644
--- a/lib/CodeGen/CodeGenAction.cpp
+++ b/lib/CodeGen/CodeGenAction.cpp
@@ -126,7 +126,7 @@ namespace clang {
Gen(CreateLLVMCodeGen(Diags, InFile, HeaderSearchOpts, PPOpts,
CodeGenOpts, C, CoverageInfo)),
LinkModules(std::move(LinkModules)) {
- llvm::TimePassesIsEnabled = TimePasses;
+ FrontendTimesIsEnabled = TimePasses;
}
llvm::Module *getModule() const { return Gen->GetModule(); }
std::unique_ptr<llvm::Module> takeModule() {
@@ -144,12 +144,12 @@ namespace clang {
Context = &Ctx;
- if (llvm::TimePassesIsEnabled)
+ if (FrontendTimesIsEnabled)
LLVMIRGeneration.startTimer();
Gen->Initialize(Ctx);
- if (llvm::TimePassesIsEnabled)
+ if (FrontendTimesIsEnabled)
LLVMIRGeneration.stopTimer();
}
@@ -159,7 +159,7 @@ namespace clang {
"LLVM IR generation of declaration");
// Recurse.
- if (llvm::TimePassesIsEnabled) {
+ if (FrontendTimesIsEnabled) {
LLVMIRGenerationRefCount += 1;
if (LLVMIRGenerationRefCount == 1)
LLVMIRGeneration.startTimer();
@@ -167,7 +167,7 @@ namespace clang {
Gen->HandleTopLevelDecl(D);
- if (llvm::TimePassesIsEnabled) {
+ if (FrontendTimesIsEnabled) {
LLVMIRGenerationRefCount -= 1;
if (LLVMIRGenerationRefCount == 0)
LLVMIRGeneration.stopTimer();
@@ -180,12 +180,12 @@ namespace clang {
PrettyStackTraceDecl CrashInfo(D, SourceLocation(),
Context->getSourceManager(),
"LLVM IR generation of inline function");
- if (llvm::TimePassesIsEnabled)
+ if (FrontendTimesIsEnabled)
LLVMIRGeneration.startTimer();
Gen->HandleInlineFunctionDefinition(D);
- if (llvm::TimePassesIsEnabled)
+ if (FrontendTimesIsEnabled)
LLVMIRGeneration.stopTimer();
}
@@ -227,7 +227,7 @@ namespace clang {
void HandleTranslationUnit(ASTContext &C) override {
{
PrettyStackTraceString CrashInfo("Per-file LLVM IR generation");
- if (llvm::TimePassesIsEnabled) {
+ if (FrontendTimesIsEnabled) {
LLVMIRGenerationRefCount += 1;
if (LLVMIRGenerationRefCount == 1)
LLVMIRGeneration.startTimer();
@@ -235,13 +235,13 @@ namespace clang {
Gen->HandleTranslationUnit(C);
- if (llvm::TimePassesIsEnabled) {
+ if (FrontendTimesIsEnabled) {
LLVMIRGenerationRefCount -= 1;
if (LLVMIRGenerationRefCount == 0)
LLVMIRGeneration.stopTimer();
}
- IRGenFinished = true;
+ IRGenFinished = true;
}
// Silently ignore if we weren't initialized for some reason.
@@ -341,17 +341,17 @@ namespace clang {
SourceLocation LocCookie);
void DiagnosticHandlerImpl(const llvm::DiagnosticInfo &DI);
- /// \brief Specialized handler for InlineAsm diagnostic.
+ /// Specialized handler for InlineAsm diagnostic.
/// \return True if the diagnostic has been successfully reported, false
/// otherwise.
bool InlineAsmDiagHandler(const llvm::DiagnosticInfoInlineAsm &D);
- /// \brief Specialized handler for StackSize diagnostic.
+ /// Specialized handler for StackSize diagnostic.
/// \return True if the diagnostic has been successfully reported, false
/// otherwise.
bool StackSizeDiagHandler(const llvm::DiagnosticInfoStackSize &D);
- /// \brief Specialized handler for unsupported backend feature diagnostic.
+ /// Specialized handler for unsupported backend feature diagnostic.
void UnsupportedDiagHandler(const llvm::DiagnosticInfoUnsupported &D);
- /// \brief Specialized handlers for optimization remarks.
+ /// Specialized handlers for optimization remarks.
/// Note that these handlers only accept remarks and they always handle
/// them.
void EmitOptimizationMessage(const llvm::DiagnosticInfoOptimizationBase &D,
@@ -697,7 +697,7 @@ void BackendConsumer::OptimizationFailureHandler(
EmitOptimizationMessage(D, diag::warn_fe_backend_optimization_failure);
}
-/// \brief This function is invoked when the backend needs
+/// This function is invoked when the backend needs
/// to report something to the user.
void BackendConsumer::DiagnosticHandlerImpl(const DiagnosticInfo &DI) {
unsigned DiagID = diag::err_fe_inline_asm;
@@ -846,7 +846,10 @@ GetOutputStream(CompilerInstance &CI, StringRef InFile, BackendAction Action) {
std::unique_ptr<ASTConsumer>
CodeGenAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
BackendAction BA = static_cast<BackendAction>(Act);
- std::unique_ptr<raw_pwrite_stream> OS = GetOutputStream(CI, InFile, BA);
+ std::unique_ptr<raw_pwrite_stream> OS = CI.takeOutputStream();
+ if (!OS)
+ OS = GetOutputStream(CI, InFile, BA);
+
if (BA != Backend_EmitNothing && !OS)
return nullptr;
@@ -947,12 +950,21 @@ std::unique_ptr<llvm::Module> CodeGenAction::loadModule(MemoryBufferRef MBRef) {
return {};
};
- Expected<llvm::BitcodeModule> BMOrErr = FindThinLTOModule(MBRef);
- if (!BMOrErr)
- return DiagErrors(BMOrErr.takeError());
-
+ Expected<std::vector<BitcodeModule>> BMsOrErr = getBitcodeModuleList(MBRef);
+ if (!BMsOrErr)
+ return DiagErrors(BMsOrErr.takeError());
+ BitcodeModule *Bm = FindThinLTOModule(*BMsOrErr);
+ // We have nothing to do if the file contains no ThinLTO module. This is
+ // possible if ThinLTO compilation was not able to split module. Content of
+ // the file was already processed by indexing and will be passed to the
+ // linker using merged object file.
+ if (!Bm) {
+ auto M = llvm::make_unique<llvm::Module>("empty", *VMContext);
+ M->setTargetTriple(CI.getTargetOpts().Triple);
+ return M;
+ }
Expected<std::unique_ptr<llvm::Module>> MOrErr =
- BMOrErr->parseModule(*VMContext);
+ Bm->parseModule(*VMContext);
if (!MOrErr)
return DiagErrors(MOrErr.takeError());
return std::move(*MOrErr);
diff --git a/lib/CodeGen/CodeGenFunction.cpp b/lib/CodeGen/CodeGenFunction.cpp
index 9dbd7cc3fcbf..3c582688e91e 100644
--- a/lib/CodeGen/CodeGenFunction.cpp
+++ b/lib/CodeGen/CodeGenFunction.cpp
@@ -65,25 +65,9 @@ CodeGenFunction::CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext)
: CodeGenTypeCache(cgm), CGM(cgm), Target(cgm.getTarget()),
Builder(cgm, cgm.getModule().getContext(), llvm::ConstantFolder(),
CGBuilderInserterTy(this)),
- CurFn(nullptr), ReturnValue(Address::invalid()),
- CapturedStmtInfo(nullptr), SanOpts(CGM.getLangOpts().Sanitize),
- IsSanitizerScope(false), CurFuncIsThunk(false), AutoreleaseResult(false),
- SawAsmBlock(false), IsOutlinedSEHHelper(false), BlockInfo(nullptr),
- BlockPointer(nullptr), LambdaThisCaptureField(nullptr),
- NormalCleanupDest(nullptr), NextCleanupDestIndex(1),
- FirstBlockInfo(nullptr), EHResumeBlock(nullptr), ExceptionSlot(nullptr),
- EHSelectorSlot(nullptr), DebugInfo(CGM.getModuleDebugInfo()),
- DisableDebugInfo(false), DidCallStackSave(false), IndirectBranch(nullptr),
- PGO(cgm), SwitchInsn(nullptr), SwitchWeights(nullptr),
- CaseRangeBlock(nullptr), UnreachableBlock(nullptr), NumReturnExprs(0),
- NumSimpleReturnExprs(0), CXXABIThisDecl(nullptr),
- CXXABIThisValue(nullptr), CXXThisValue(nullptr),
- CXXStructorImplicitParamDecl(nullptr),
- CXXStructorImplicitParamValue(nullptr), OutermostConditional(nullptr),
- CurLexicalScope(nullptr), TerminateLandingPad(nullptr),
- TerminateHandler(nullptr), TrapBB(nullptr),
- ShouldEmitLifetimeMarkers(
- shouldEmitLifetimeMarkers(CGM.getCodeGenOpts(), CGM.getLangOpts())) {
+ SanOpts(CGM.getLangOpts().Sanitize), DebugInfo(CGM.getModuleDebugInfo()),
+ PGO(cgm), ShouldEmitLifetimeMarkers(shouldEmitLifetimeMarkers(
+ CGM.getCodeGenOpts(), CGM.getLangOpts())) {
if (!suppressNewContext)
CGM.getCXXABI().getMangleContext().startNewFunction();
@@ -419,6 +403,9 @@ void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
EmitIfUsed(*this, TerminateHandler);
EmitIfUsed(*this, UnreachableBlock);
+ for (const auto &FuncletAndParent : TerminateFunclets)
+ EmitIfUsed(*this, FuncletAndParent.second);
+
if (CGM.getCodeGenOpts().EmitDeclMetadata)
EmitDeclMetadata();
@@ -436,11 +423,17 @@ void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
// if compiled with no optimizations. We do it for coroutine as the lifetime
// of CleanupDestSlot alloca make correct coroutine frame building very
// difficult.
- if (NormalCleanupDest && isCoroutine()) {
+ if (NormalCleanupDest.isValid() && isCoroutine()) {
llvm::DominatorTree DT(*CurFn);
- llvm::PromoteMemToReg(NormalCleanupDest, DT);
- NormalCleanupDest = nullptr;
+ llvm::PromoteMemToReg(
+ cast<llvm::AllocaInst>(NormalCleanupDest.getPointer()), DT);
+ NormalCleanupDest = Address::invalid();
}
+
+ // Add the required-vector-width attribute.
+ if (LargestVectorWidth != 0)
+ CurFn->addFnAttr("min-legal-vector-width",
+ llvm::utostr(LargestVectorWidth));
}
/// ShouldInstrumentFunction - Return true if the current function should be
@@ -462,9 +455,19 @@ bool CodeGenFunction::ShouldXRayInstrumentFunction() const {
}
/// AlwaysEmitXRayCustomEvents - Return true if we should emit IR for calls to
-/// the __xray_customevent(...) builin calls, when doing XRay instrumentation.
+/// the __xray_customevent(...) builtin calls, when doing XRay instrumentation.
bool CodeGenFunction::AlwaysEmitXRayCustomEvents() const {
- return CGM.getCodeGenOpts().XRayAlwaysEmitCustomEvents;
+ return CGM.getCodeGenOpts().XRayInstrumentFunctions &&
+ (CGM.getCodeGenOpts().XRayAlwaysEmitCustomEvents ||
+ CGM.getCodeGenOpts().XRayInstrumentationBundle.Mask ==
+ XRayInstrKind::Custom);
+}
+
+bool CodeGenFunction::AlwaysEmitXRayTypedEvents() const {
+ return CGM.getCodeGenOpts().XRayInstrumentFunctions &&
+ (CGM.getCodeGenOpts().XRayAlwaysEmitTypedEvents ||
+ CGM.getCodeGenOpts().XRayInstrumentationBundle.Mask ==
+ XRayInstrKind::Typed);
}
llvm::Constant *
@@ -842,14 +845,24 @@ void CodeGenFunction::StartFunction(GlobalDecl GD,
if (D) {
// Apply the no_sanitize* attributes to SanOpts.
- for (auto Attr : D->specific_attrs<NoSanitizeAttr>())
- SanOpts.Mask &= ~Attr->getMask();
+ for (auto Attr : D->specific_attrs<NoSanitizeAttr>()) {
+ SanitizerMask mask = Attr->getMask();
+ SanOpts.Mask &= ~mask;
+ if (mask & SanitizerKind::Address)
+ SanOpts.set(SanitizerKind::KernelAddress, false);
+ if (mask & SanitizerKind::KernelAddress)
+ SanOpts.set(SanitizerKind::Address, false);
+ if (mask & SanitizerKind::HWAddress)
+ SanOpts.set(SanitizerKind::KernelHWAddress, false);
+ if (mask & SanitizerKind::KernelHWAddress)
+ SanOpts.set(SanitizerKind::HWAddress, false);
+ }
}
// Apply sanitizer attributes to the function.
if (SanOpts.hasOneOf(SanitizerKind::Address | SanitizerKind::KernelAddress))
Fn->addFnAttr(llvm::Attribute::SanitizeAddress);
- if (SanOpts.hasOneOf(SanitizerKind::HWAddress))
+ if (SanOpts.hasOneOf(SanitizerKind::HWAddress | SanitizerKind::KernelHWAddress))
Fn->addFnAttr(llvm::Attribute::SanitizeHWAddress);
if (SanOpts.has(SanitizerKind::Thread))
Fn->addFnAttr(llvm::Attribute::SanitizeThread);
@@ -857,6 +870,12 @@ void CodeGenFunction::StartFunction(GlobalDecl GD,
Fn->addFnAttr(llvm::Attribute::SanitizeMemory);
if (SanOpts.has(SanitizerKind::SafeStack))
Fn->addFnAttr(llvm::Attribute::SafeStack);
+ if (SanOpts.has(SanitizerKind::ShadowCallStack))
+ Fn->addFnAttr(llvm::Attribute::ShadowCallStack);
+
+ // Apply fuzzing attribute to the function.
+ if (SanOpts.hasOneOf(SanitizerKind::Fuzzer | SanitizerKind::FuzzerNoLink))
+ Fn->addFnAttr(llvm::Attribute::OptForFuzzing);
// Ignore TSan memory acesses from within ObjC/ObjC++ dealloc, initialize,
// .cxx_destruct, __destroy_helper_block_ and all of their calees at run time.
@@ -884,7 +903,10 @@ void CodeGenFunction::StartFunction(GlobalDecl GD,
}
// Apply xray attributes to the function (as a string, for now)
- if (D && ShouldXRayInstrumentFunction()) {
+ bool InstrumentXray = ShouldXRayInstrumentFunction() &&
+ CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
+ XRayInstrKind::Function);
+ if (D && InstrumentXray) {
if (const auto *XRayAttr = D->getAttr<XRayInstrumentAttr>()) {
if (XRayAttr->alwaysXRayInstrument())
Fn->addFnAttr("function-instrument", "xray-always");
@@ -921,8 +943,13 @@ void CodeGenFunction::StartFunction(GlobalDecl GD,
if (getLangOpts().CPlusPlus && SanOpts.has(SanitizerKind::Function)) {
if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
if (llvm::Constant *PrologueSig = getPrologueSignature(CGM, FD)) {
+ // Remove any (C++17) exception specifications, to allow calling e.g. a
+ // noexcept function through a non-noexcept pointer.
+ auto ProtoTy =
+ getContext().getFunctionTypeWithExceptionSpec(FD->getType(),
+ EST_None);
llvm::Constant *FTRTTIConst =
- CGM.GetAddrOfRTTIDescriptor(FD->getType(), /*ForEH=*/true);
+ CGM.GetAddrOfRTTIDescriptor(ProtoTy, /*ForEH=*/true);
llvm::Constant *FTRTTIConstEncoded =
EncodeAddrForUseInPrologue(Fn, FTRTTIConst);
llvm::Constant *PrologueStructElems[] = {PrologueSig,
@@ -987,7 +1014,8 @@ void CodeGenFunction::StartFunction(GlobalDecl GD,
ArgTypes.push_back(VD->getType());
QualType FnType = getContext().getFunctionType(
RetTy, ArgTypes, FunctionProtoType::ExtProtoInfo(CC));
- DI->EmitFunctionStart(GD, Loc, StartLoc, FnType, CurFn, Builder);
+ DI->EmitFunctionStart(GD, Loc, StartLoc, FnType, CurFn, CurFuncIsThunk,
+ Builder);
}
if (ShouldInstrumentFunction()) {
@@ -1006,10 +1034,12 @@ void CodeGenFunction::StartFunction(GlobalDecl GD,
// The attribute "counting-function" is set to mcount function name which is
// architecture dependent.
if (CGM.getCodeGenOpts().InstrumentForProfiling) {
- if (CGM.getCodeGenOpts().CallFEntry)
- Fn->addFnAttr("fentry-call", "true");
- else {
- if (!CurFuncDecl || !CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>()) {
+ // Calls to fentry/mcount should not be generated if function has
+ // the no_instrument_function attribute.
+ if (!CurFuncDecl || !CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>()) {
+ if (CGM.getCodeGenOpts().CallFEntry)
+ Fn->addFnAttr("fentry-call", "true");
+ else {
Fn->addFnAttr("instrument-function-entry-inlined",
getTarget().getMCountName());
}
@@ -1055,6 +1085,11 @@ void CodeGenFunction::StartFunction(GlobalDecl GD,
EmitStartEHSpec(CurCodeDecl);
PrologueCleanupDepth = EHStack.stable_begin();
+
+ // Emit OpenMP specific initialization of the device functions.
+ if (getLangOpts().OpenMP && CurCodeDecl)
+ CGM.getOpenMPRuntime().emitFunctionProlog(*this, CurCodeDecl);
+
EmitFunctionProlog(*CurFnInfo, CurFn, Args);
if (D && isa<CXXMethodDecl>(D) && cast<CXXMethodDecl>(D)->isInstance()) {
@@ -1108,8 +1143,7 @@ void CodeGenFunction::StartFunction(GlobalDecl GD,
// may have a static invoker function, which may call this operator with
// a null 'this' pointer.
if (isLambdaCallOperator(MD) &&
- cast<CXXRecordDecl>(MD->getParent())->getLambdaCaptureDefault() ==
- LCD_None)
+ MD->getParent()->getLambdaCaptureDefault() == LCD_None)
SkippedChecks.set(SanitizerKind::Null, true);
EmitTypeCheck(isa<CXXConstructorDecl>(MD) ? TCK_ConstructorCall
@@ -1141,6 +1175,12 @@ void CodeGenFunction::StartFunction(GlobalDecl GD,
// Emit a location at the end of the prologue.
if (CGDebugInfo *DI = getDebugInfo())
DI->EmitLocation(Builder, StartLoc);
+
+ // TODO: Do we need to handle this in two places like we do with
+ // target-features/target-cpu?
+ if (CurFuncDecl)
+ if (const auto *VecWidth = CurFuncDecl->getAttr<MinVectorWidthAttr>())
+ LargestVectorWidth = VecWidth->getVectorWidth();
}
void CodeGenFunction::EmitFunctionBody(FunctionArgList &Args,
@@ -1748,12 +1788,9 @@ CodeGenFunction::EmitNullInitialization(Address DestPtr, QualType Ty) {
if (const VariableArrayType *vlaType =
dyn_cast_or_null<VariableArrayType>(
getContext().getAsArrayType(Ty))) {
- QualType eltType;
- llvm::Value *numElts;
- std::tie(numElts, eltType) = getVLASize(vlaType);
-
- SizeVal = numElts;
- CharUnits eltSize = getContext().getTypeSizeInChars(eltType);
+ auto VlaSize = getVLASize(vlaType);
+ SizeVal = VlaSize.NumElts;
+ CharUnits eltSize = getContext().getTypeSizeInChars(VlaSize.Type);
if (!eltSize.isOne())
SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(eltSize));
vla = vlaType;
@@ -1836,7 +1873,7 @@ llvm::Value *CodeGenFunction::emitArrayLength(const ArrayType *origArrayType,
// this is the size of the VLA in bytes, not its size in elements.
llvm::Value *numVLAElements = nullptr;
if (isa<VariableArrayType>(arrayType)) {
- numVLAElements = getVLASize(cast<VariableArrayType>(arrayType)).first;
+ numVLAElements = getVLASize(cast<VariableArrayType>(arrayType)).NumElts;
// Walk into all VLAs. This doesn't require changes to addr,
// which has type T* where T is the first non-VLA element type.
@@ -1917,14 +1954,13 @@ llvm::Value *CodeGenFunction::emitArrayLength(const ArrayType *origArrayType,
return numElements;
}
-std::pair<llvm::Value*, QualType>
-CodeGenFunction::getVLASize(QualType type) {
+CodeGenFunction::VlaSizePair CodeGenFunction::getVLASize(QualType type) {
const VariableArrayType *vla = getContext().getAsVariableArrayType(type);
assert(vla && "type was not a variable array type!");
return getVLASize(vla);
}
-std::pair<llvm::Value*, QualType>
+CodeGenFunction::VlaSizePair
CodeGenFunction::getVLASize(const VariableArrayType *type) {
// The number of elements so far; always size_t.
llvm::Value *numElements = nullptr;
@@ -1945,7 +1981,22 @@ CodeGenFunction::getVLASize(const VariableArrayType *type) {
}
} while ((type = getContext().getAsVariableArrayType(elementType)));
- return std::pair<llvm::Value*,QualType>(numElements, elementType);
+ return { numElements, elementType };
+}
+
+CodeGenFunction::VlaSizePair
+CodeGenFunction::getVLAElements1D(QualType type) {
+ const VariableArrayType *vla = getContext().getAsVariableArrayType(type);
+ assert(vla && "type was not a variable array type!");
+ return getVLAElements1D(vla);
+}
+
+CodeGenFunction::VlaSizePair
+CodeGenFunction::getVLAElements1D(const VariableArrayType *Vla) {
+ llvm::Value *VlaSize = VLASizeMap[Vla->getSizeExpr()];
+ assert(VlaSize && "no size for VLA!");
+ assert(VlaSize->getType() == SizeTy);
+ return { VlaSize, Vla->getElementType() };
}
void CodeGenFunction::EmitVariablyModifiedType(QualType type) {
@@ -2228,7 +2279,7 @@ static bool hasRequiredFeatures(const SmallVectorImpl<StringRef> &ReqFeatures,
return std::all_of(
ReqFeatures.begin(), ReqFeatures.end(), [&](StringRef Feature) {
SmallVector<StringRef, 1> OrFeatures;
- Feature.split(OrFeatures, "|");
+ Feature.split(OrFeatures, '|');
return std::any_of(OrFeatures.begin(), OrFeatures.end(),
[&](StringRef Feature) {
if (!CallerFeatureMap.lookup(Feature)) {
@@ -2266,17 +2317,28 @@ void CodeGenFunction::checkTargetFeatures(const CallExpr *E,
// Return if the builtin doesn't have any required features.
if (!FeatureList || StringRef(FeatureList) == "")
return;
- StringRef(FeatureList).split(ReqFeatures, ",");
+ StringRef(FeatureList).split(ReqFeatures, ',');
if (!hasRequiredFeatures(ReqFeatures, CGM, FD, MissingFeature))
CGM.getDiags().Report(E->getLocStart(), diag::err_builtin_needs_feature)
<< TargetDecl->getDeclName()
<< CGM.getContext().BuiltinInfo.getRequiredFeatures(BuiltinID);
- } else if (TargetDecl->hasAttr<TargetAttr>()) {
+ } else if (TargetDecl->hasAttr<TargetAttr>() ||
+ TargetDecl->hasAttr<CPUSpecificAttr>()) {
// Get the required features for the callee.
+
+ const TargetAttr *TD = TargetDecl->getAttr<TargetAttr>();
+ TargetAttr::ParsedTargetAttr ParsedAttr = CGM.filterFunctionTargetAttrs(TD);
+
SmallVector<StringRef, 1> ReqFeatures;
llvm::StringMap<bool> CalleeFeatureMap;
CGM.getFunctionFeatureMap(CalleeFeatureMap, TargetDecl);
+
+ for (const auto &F : ParsedAttr.Features) {
+ if (F[0] == '+' && CalleeFeatureMap.lookup(F.substr(1)))
+ ReqFeatures.push_back(StringRef(F).substr(1));
+ }
+
for (const auto &F : CalleeFeatureMap) {
// Only positive features are "required".
if (F.getValue())
@@ -2297,6 +2359,99 @@ void CodeGenFunction::EmitSanitizerStatReport(llvm::SanitizerStatKind SSK) {
CGM.getSanStats().create(IRB, SSK);
}
+llvm::Value *CodeGenFunction::FormResolverCondition(
+ const TargetMultiVersionResolverOption &RO) {
+ llvm::Value *TrueCondition = nullptr;
+ if (!RO.ParsedAttribute.Architecture.empty())
+ TrueCondition = EmitX86CpuIs(RO.ParsedAttribute.Architecture);
+
+ if (!RO.ParsedAttribute.Features.empty()) {
+ SmallVector<StringRef, 8> FeatureList;
+ llvm::for_each(RO.ParsedAttribute.Features,
+ [&FeatureList](const std::string &Feature) {
+ FeatureList.push_back(StringRef{Feature}.substr(1));
+ });
+ llvm::Value *FeatureCmp = EmitX86CpuSupports(FeatureList);
+ TrueCondition = TrueCondition ? Builder.CreateAnd(TrueCondition, FeatureCmp)
+ : FeatureCmp;
+ }
+ return TrueCondition;
+}
+
+void CodeGenFunction::EmitTargetMultiVersionResolver(
+ llvm::Function *Resolver,
+ ArrayRef<TargetMultiVersionResolverOption> Options) {
+ assert((getContext().getTargetInfo().getTriple().getArch() ==
+ llvm::Triple::x86 ||
+ getContext().getTargetInfo().getTriple().getArch() ==
+ llvm::Triple::x86_64) &&
+ "Only implemented for x86 targets");
+
+ // Main function's basic block.
+ llvm::BasicBlock *CurBlock = createBasicBlock("entry", Resolver);
+ Builder.SetInsertPoint(CurBlock);
+ EmitX86CpuInit();
+
+ llvm::Function *DefaultFunc = nullptr;
+ for (const TargetMultiVersionResolverOption &RO : Options) {
+ Builder.SetInsertPoint(CurBlock);
+ llvm::Value *TrueCondition = FormResolverCondition(RO);
+
+ if (!TrueCondition) {
+ DefaultFunc = RO.Function;
+ } else {
+ llvm::BasicBlock *RetBlock = createBasicBlock("ro_ret", Resolver);
+ llvm::IRBuilder<> RetBuilder(RetBlock);
+ RetBuilder.CreateRet(RO.Function);
+ CurBlock = createBasicBlock("ro_else", Resolver);
+ Builder.CreateCondBr(TrueCondition, RetBlock, CurBlock);
+ }
+ }
+
+ assert(DefaultFunc && "No default version?");
+ // Emit return from the 'else-ist' block.
+ Builder.SetInsertPoint(CurBlock);
+ Builder.CreateRet(DefaultFunc);
+}
+
+void CodeGenFunction::EmitCPUDispatchMultiVersionResolver(
+ llvm::Function *Resolver,
+ ArrayRef<CPUDispatchMultiVersionResolverOption> Options) {
+ assert((getContext().getTargetInfo().getTriple().getArch() ==
+ llvm::Triple::x86 ||
+ getContext().getTargetInfo().getTriple().getArch() ==
+ llvm::Triple::x86_64) &&
+ "Only implemented for x86 targets");
+
+ // Main function's basic block.
+ llvm::BasicBlock *CurBlock = createBasicBlock("resolver_entry", Resolver);
+ Builder.SetInsertPoint(CurBlock);
+ EmitX86CpuInit();
+
+ for (const CPUDispatchMultiVersionResolverOption &RO : Options) {
+ Builder.SetInsertPoint(CurBlock);
+
+ // "generic" case should catch-all.
+ if (RO.FeatureMask == 0) {
+ Builder.CreateRet(RO.Function);
+ return;
+ }
+ llvm::BasicBlock *RetBlock = createBasicBlock("resolver_return", Resolver);
+ llvm::IRBuilder<> RetBuilder(RetBlock);
+ RetBuilder.CreateRet(RO.Function);
+ CurBlock = createBasicBlock("resolver_else", Resolver);
+ llvm::Value *TrueCondition = EmitX86CpuSupports(RO.FeatureMask);
+ Builder.CreateCondBr(TrueCondition, RetBlock, CurBlock);
+ }
+
+ Builder.SetInsertPoint(CurBlock);
+ llvm::CallInst *TrapCall = EmitTrapCall(llvm::Intrinsic::trap);
+ TrapCall->setDoesNotReturn();
+ TrapCall->setDoesNotThrow();
+ Builder.CreateUnreachable();
+ Builder.ClearInsertionPoint();
+}
+
llvm::DebugLoc CodeGenFunction::SourceLocToDebugLoc(SourceLocation Location) {
if (CGDebugInfo *DI = getDebugInfo())
return DI->SourceLocToDebugLoc(Location);
diff --git a/lib/CodeGen/CodeGenFunction.h b/lib/CodeGen/CodeGenFunction.h
index dd4c2e43ef64..79870ed59c96 100644
--- a/lib/CodeGen/CodeGenFunction.h
+++ b/lib/CodeGen/CodeGenFunction.h
@@ -34,6 +34,7 @@
#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/IR/ValueHandle.h"
#include "llvm/Support/Debug.h"
@@ -137,6 +138,88 @@ enum SanitizerHandler {
#undef SANITIZER_CHECK
};
+/// Helper class with most of the code for saving a value for a
+/// conditional expression cleanup.
+struct DominatingLLVMValue {
+ typedef llvm::PointerIntPair<llvm::Value*, 1, bool> saved_type;
+
+ /// Answer whether the given value needs extra work to be saved.
+ static bool needsSaving(llvm::Value *value) {
+ // If it's not an instruction, we don't need to save.
+ if (!isa<llvm::Instruction>(value)) return false;
+
+ // If it's an instruction in the entry block, we don't need to save.
+ llvm::BasicBlock *block = cast<llvm::Instruction>(value)->getParent();
+ return (block != &block->getParent()->getEntryBlock());
+ }
+
+ static saved_type save(CodeGenFunction &CGF, llvm::Value *value);
+ static llvm::Value *restore(CodeGenFunction &CGF, saved_type value);
+};
+
+/// A partial specialization of DominatingValue for llvm::Values that
+/// might be llvm::Instructions.
+template <class T> struct DominatingPointer<T,true> : DominatingLLVMValue {
+ typedef T *type;
+ static type restore(CodeGenFunction &CGF, saved_type value) {
+ return static_cast<T*>(DominatingLLVMValue::restore(CGF, value));
+ }
+};
+
+/// A specialization of DominatingValue for Address.
+template <> struct DominatingValue<Address> {
+ typedef Address type;
+
+ struct saved_type {
+ DominatingLLVMValue::saved_type SavedValue;
+ CharUnits Alignment;
+ };
+
+ static bool needsSaving(type value) {
+ return DominatingLLVMValue::needsSaving(value.getPointer());
+ }
+ static saved_type save(CodeGenFunction &CGF, type value) {
+ return { DominatingLLVMValue::save(CGF, value.getPointer()),
+ value.getAlignment() };
+ }
+ static type restore(CodeGenFunction &CGF, saved_type value) {
+ return Address(DominatingLLVMValue::restore(CGF, value.SavedValue),
+ value.Alignment);
+ }
+};
+
+/// A specialization of DominatingValue for RValue.
+template <> struct DominatingValue<RValue> {
+ typedef RValue type;
+ class saved_type {
+ enum Kind { ScalarLiteral, ScalarAddress, AggregateLiteral,
+ AggregateAddress, ComplexAddress };
+
+ llvm::Value *Value;
+ unsigned K : 3;
+ unsigned Align : 29;
+ saved_type(llvm::Value *v, Kind k, unsigned a = 0)
+ : Value(v), K(k), Align(a) {}
+
+ public:
+ static bool needsSaving(RValue value);
+ static saved_type save(CodeGenFunction &CGF, RValue value);
+ RValue restore(CodeGenFunction &CGF);
+
+ // implementations in CGCleanup.cpp
+ };
+
+ static bool needsSaving(type value) {
+ return saved_type::needsSaving(value);
+ }
+ static saved_type save(CodeGenFunction &CGF, type value) {
+ return saved_type::save(CGF, value);
+ }
+ static type restore(CodeGenFunction &CGF, saved_type value) {
+ return value.restore(CGF);
+ }
+};
+
/// CodeGenFunction - This class organizes the per-function state that is used
/// while generating LLVM code.
class CodeGenFunction : public CodeGenTypeCache {
@@ -200,7 +283,7 @@ public:
Address UB)>
CodeGenDispatchBoundsTy;
- /// \brief CGBuilder insert helper. This function is called after an
+ /// CGBuilder insert helper. This function is called after an
/// instruction is created using Builder.
void InsertHelper(llvm::Instruction *I, const llvm::Twine &Name,
llvm::BasicBlock *BB,
@@ -213,7 +296,7 @@ public:
const Decl *CurCodeDecl;
const CGFunctionInfo *CurFnInfo;
QualType FnRetTy;
- llvm::Function *CurFn;
+ llvm::Function *CurFn = nullptr;
// Holds coroutine data if the current function is a coroutine. We use a
// wrapper to manage its lifetime, so that we don't have to define CGCoroData
@@ -241,7 +324,7 @@ public:
/// ReturnValue - The temporary alloca to hold the return
/// value. This is invalid iff the function has no return value.
- Address ReturnValue;
+ Address ReturnValue = Address::invalid();
/// Return true if a label was seen in the current scope.
bool hasLabelBeenSeenInCurrentScope() const {
@@ -254,7 +337,7 @@ public:
/// we prefer to insert allocas.
llvm::AssertingVH<llvm::Instruction> AllocaInsertPt;
- /// \brief API for captured statement code generation.
+ /// API for captured statement code generation.
class CGCapturedStmtInfo {
public:
explicit CGCapturedStmtInfo(CapturedRegionKind K = CR_Default)
@@ -282,10 +365,10 @@ public:
CapturedRegionKind getKind() const { return Kind; }
virtual void setContextValue(llvm::Value *V) { ThisValue = V; }
- // \brief Retrieve the value of the context parameter.
+ // Retrieve the value of the context parameter.
virtual llvm::Value *getContextValue() const { return ThisValue; }
- /// \brief Lookup the captured field decl for a variable.
+ /// Lookup the captured field decl for a variable.
virtual const FieldDecl *lookup(const VarDecl *VD) const {
return CaptureFields.lookup(VD->getCanonicalDecl());
}
@@ -297,32 +380,32 @@ public:
return true;
}
- /// \brief Emit the captured statement body.
+ /// Emit the captured statement body.
virtual void EmitBody(CodeGenFunction &CGF, const Stmt *S) {
CGF.incrementProfileCounter(S);
CGF.EmitStmt(S);
}
- /// \brief Get the name of the capture helper.
+ /// Get the name of the capture helper.
virtual StringRef getHelperName() const { return "__captured_stmt"; }
private:
- /// \brief The kind of captured statement being generated.
+ /// The kind of captured statement being generated.
CapturedRegionKind Kind;
- /// \brief Keep the map between VarDecl and FieldDecl.
+ /// Keep the map between VarDecl and FieldDecl.
llvm::SmallDenseMap<const VarDecl *, FieldDecl *> CaptureFields;
- /// \brief The base address of the captured record, passed in as the first
+ /// The base address of the captured record, passed in as the first
/// argument of the parallel region function.
llvm::Value *ThisValue;
- /// \brief Captured 'this' type.
+ /// Captured 'this' type.
FieldDecl *CXXThisFieldDecl;
};
- CGCapturedStmtInfo *CapturedStmtInfo;
+ CGCapturedStmtInfo *CapturedStmtInfo = nullptr;
- /// \brief RAII for correct setting/restoring of CapturedStmtInfo.
+ /// RAII for correct setting/restoring of CapturedStmtInfo.
class CGCapturedStmtRAII {
private:
CodeGenFunction &CGF;
@@ -361,13 +444,13 @@ public:
}
};
- /// \brief Sanitizers enabled for this function.
+ /// Sanitizers enabled for this function.
SanitizerSet SanOpts;
- /// \brief True if CodeGen currently emits code implementing sanitizer checks.
- bool IsSanitizerScope;
+ /// True if CodeGen currently emits code implementing sanitizer checks.
+ bool IsSanitizerScope = false;
- /// \brief RAII object to set/unset CodeGenFunction::IsSanitizerScope.
+ /// RAII object to set/unset CodeGenFunction::IsSanitizerScope.
class SanitizerScope {
CodeGenFunction *CGF;
public:
@@ -377,28 +460,28 @@ public:
/// In C++, whether we are code generating a thunk. This controls whether we
/// should emit cleanups.
- bool CurFuncIsThunk;
+ bool CurFuncIsThunk = false;
/// In ARC, whether we should autorelease the return value.
- bool AutoreleaseResult;
+ bool AutoreleaseResult = false;
/// Whether we processed a Microsoft-style asm block during CodeGen. These can
/// potentially set the return value.
- bool SawAsmBlock;
+ bool SawAsmBlock = false;
const FunctionDecl *CurSEHParent = nullptr;
/// True if the current function is an outlined SEH helper. This can be a
/// finally block or filter expression.
- bool IsOutlinedSEHHelper;
+ bool IsOutlinedSEHHelper = false;
- const CodeGen::CGBlockInfo *BlockInfo;
- llvm::Value *BlockPointer;
+ const CodeGen::CGBlockInfo *BlockInfo = nullptr;
+ llvm::Value *BlockPointer = nullptr;
llvm::DenseMap<const VarDecl *, FieldDecl *> LambdaCaptureFields;
- FieldDecl *LambdaThisCaptureField;
+ FieldDecl *LambdaThisCaptureField = nullptr;
- /// \brief A mapping from NRVO variables to the flags used to indicate
+ /// A mapping from NRVO variables to the flags used to indicate
/// when the NRVO has been applied to this variable.
llvm::DenseMap<const VarDecl *, llvm::Value *> NRVOFlags;
@@ -426,30 +509,33 @@ public:
/// The size of the following cleanup object.
unsigned Size;
/// The kind of cleanup to push: a value from the CleanupKind enumeration.
- CleanupKind Kind;
+ unsigned Kind : 31;
+ /// Whether this is a conditional cleanup.
+ unsigned IsConditional : 1;
size_t getSize() const { return Size; }
- CleanupKind getKind() const { return Kind; }
+ CleanupKind getKind() const { return (CleanupKind)Kind; }
+ bool isConditional() const { return IsConditional; }
};
/// i32s containing the indexes of the cleanup destinations.
- llvm::AllocaInst *NormalCleanupDest;
+ Address NormalCleanupDest = Address::invalid();
- unsigned NextCleanupDestIndex;
+ unsigned NextCleanupDestIndex = 1;
/// FirstBlockInfo - The head of a singly-linked-list of block layouts.
- CGBlockInfo *FirstBlockInfo;
+ CGBlockInfo *FirstBlockInfo = nullptr;
/// EHResumeBlock - Unified block containing a call to llvm.eh.resume.
- llvm::BasicBlock *EHResumeBlock;
+ llvm::BasicBlock *EHResumeBlock = nullptr;
/// The exception slot. All landing pads write the current exception pointer
/// into this alloca.
- llvm::Value *ExceptionSlot;
+ llvm::Value *ExceptionSlot = nullptr;
/// The selector slot. Under the MandatoryCleanup model, all landing pads
/// write the current selector value into this alloca.
- llvm::AllocaInst *EHSelectorSlot;
+ llvm::AllocaInst *EHSelectorSlot = nullptr;
/// A stack of exception code slots. Entering an __except block pushes a slot
/// on the stack and leaving pops one. The __exception_code() intrinsic loads
@@ -524,28 +610,52 @@ public:
initFullExprCleanup();
}
- /// \brief Queue a cleanup to be pushed after finishing the current
+ /// Queue a cleanup to be pushed after finishing the current
/// full-expression.
template <class T, class... As>
void pushCleanupAfterFullExpr(CleanupKind Kind, As... A) {
- assert(!isInConditionalBranch() && "can't defer conditional cleanup");
+ if (!isInConditionalBranch())
+ return pushCleanupAfterFullExprImpl<T>(Kind, Address::invalid(), A...);
+
+ Address ActiveFlag = createCleanupActiveFlag();
+ assert(!DominatingValue<Address>::needsSaving(ActiveFlag) &&
+ "cleanup active flag should never need saving");
- LifetimeExtendedCleanupHeader Header = { sizeof(T), Kind };
+ typedef std::tuple<typename DominatingValue<As>::saved_type...> SavedTuple;
+ SavedTuple Saved{saveValueInCond(A)...};
+
+ typedef EHScopeStack::ConditionalCleanup<T, As...> CleanupType;
+ pushCleanupAfterFullExprImpl<CleanupType>(Kind, ActiveFlag, Saved);
+ }
+
+ template <class T, class... As>
+ void pushCleanupAfterFullExprImpl(CleanupKind Kind, Address ActiveFlag,
+ As... A) {
+ LifetimeExtendedCleanupHeader Header = {sizeof(T), Kind,
+ ActiveFlag.isValid()};
size_t OldSize = LifetimeExtendedCleanupStack.size();
LifetimeExtendedCleanupStack.resize(
- LifetimeExtendedCleanupStack.size() + sizeof(Header) + Header.Size);
+ LifetimeExtendedCleanupStack.size() + sizeof(Header) + Header.Size +
+ (Header.IsConditional ? sizeof(ActiveFlag) : 0));
static_assert(sizeof(Header) % alignof(T) == 0,
"Cleanup will be allocated on misaligned address");
char *Buffer = &LifetimeExtendedCleanupStack[OldSize];
new (Buffer) LifetimeExtendedCleanupHeader(Header);
new (Buffer + sizeof(Header)) T(A...);
+ if (Header.IsConditional)
+ new (Buffer + sizeof(Header) + sizeof(T)) Address(ActiveFlag);
}
- /// Set up the last cleaup that was pushed as a conditional
+ /// Set up the last cleanup that was pushed as a conditional
/// full-expression cleanup.
- void initFullExprCleanup();
+ void initFullExprCleanup() {
+ initFullExprCleanupWithFlag(createCleanupActiveFlag());
+ }
+
+ void initFullExprCleanupWithFlag(Address ActiveFlag);
+ Address createCleanupActiveFlag();
/// PushDestructorCleanup - Push a cleanup to call the
/// complete-object destructor of an object of the given type at the
@@ -583,10 +693,10 @@ public:
void ActivateCleanupBlock(EHScopeStack::stable_iterator Cleanup,
llvm::Instruction *DominatingIP);
- /// \brief Enters a new scope for capturing cleanups, all of which
+ /// Enters a new scope for capturing cleanups, all of which
/// will be executed once the scope is exited.
class RunCleanupsScope {
- EHScopeStack::stable_iterator CleanupStackDepth;
+ EHScopeStack::stable_iterator CleanupStackDepth, OldCleanupScopeDepth;
size_t LifetimeExtendedCleanupStackSize;
bool OldDidCallStackSave;
protected:
@@ -600,7 +710,7 @@ public:
CodeGenFunction& CGF;
public:
- /// \brief Enter a new cleanup scope.
+ /// Enter a new cleanup scope.
explicit RunCleanupsScope(CodeGenFunction &CGF)
: PerformCleanup(true), CGF(CGF)
{
@@ -609,20 +719,22 @@ public:
CGF.LifetimeExtendedCleanupStack.size();
OldDidCallStackSave = CGF.DidCallStackSave;
CGF.DidCallStackSave = false;
+ OldCleanupScopeDepth = CGF.CurrentCleanupScopeDepth;
+ CGF.CurrentCleanupScopeDepth = CleanupStackDepth;
}
- /// \brief Exit this cleanup scope, emitting any accumulated cleanups.
+ /// Exit this cleanup scope, emitting any accumulated cleanups.
~RunCleanupsScope() {
if (PerformCleanup)
ForceCleanup();
}
- /// \brief Determine whether this scope requires any cleanups.
+ /// Determine whether this scope requires any cleanups.
bool requiresCleanups() const {
return CGF.EHStack.stable_begin() != CleanupStackDepth;
}
- /// \brief Force the emission of cleanups now, instead of waiting
+ /// Force the emission of cleanups now, instead of waiting
/// until this object is destroyed.
/// \param ValuesToReload - A list of values that need to be available at
/// the insertion point after cleanup emission. If cleanup emission created
@@ -634,9 +746,14 @@ public:
CGF.PopCleanupBlocks(CleanupStackDepth, LifetimeExtendedCleanupStackSize,
ValuesToReload);
PerformCleanup = false;
+ CGF.CurrentCleanupScopeDepth = OldCleanupScopeDepth;
}
};
+ // Cleanup stack depth of the RunCleanupsScope that was pushed most recently.
+ EHScopeStack::stable_iterator CurrentCleanupScopeDepth =
+ EHScopeStack::stable_end();
+
class LexicalScope : public RunCleanupsScope {
SourceRange Range;
SmallVector<const LabelDecl*, 4> Labels;
@@ -646,7 +763,7 @@ public:
void operator=(const LexicalScope &) = delete;
public:
- /// \brief Enter a new cleanup scope.
+ /// Enter a new cleanup scope.
explicit LexicalScope(CodeGenFunction &CGF, SourceRange Range)
: RunCleanupsScope(CGF), Range(Range), ParentScope(CGF.CurLexicalScope) {
CGF.CurLexicalScope = this;
@@ -659,7 +776,7 @@ public:
Labels.push_back(label);
}
- /// \brief Exit this cleanup scope, emitting any accumulated
+ /// Exit this cleanup scope, emitting any accumulated
/// cleanups.
~LexicalScope() {
if (CGDebugInfo *DI = CGF.getDebugInfo())
@@ -673,7 +790,7 @@ public:
}
}
- /// \brief Force the emission of cleanups now, instead of waiting
+ /// Force the emission of cleanups now, instead of waiting
/// until this object is destroyed.
void ForceCleanup() {
CGF.CurLexicalScope = ParentScope;
@@ -692,57 +809,107 @@ public:
typedef llvm::DenseMap<const Decl *, Address> DeclMapTy;
- /// \brief The scope used to remap some variables as private in the OpenMP
- /// loop body (or other captured region emitted without outlining), and to
- /// restore old vars back on exit.
- class OMPPrivateScope : public RunCleanupsScope {
+ /// The class used to assign some variables some temporarily addresses.
+ class OMPMapVars {
DeclMapTy SavedLocals;
- DeclMapTy SavedPrivates;
-
- private:
- OMPPrivateScope(const OMPPrivateScope &) = delete;
- void operator=(const OMPPrivateScope &) = delete;
+ DeclMapTy SavedTempAddresses;
+ OMPMapVars(const OMPMapVars &) = delete;
+ void operator=(const OMPMapVars &) = delete;
public:
- /// \brief Enter a new OpenMP private scope.
- explicit OMPPrivateScope(CodeGenFunction &CGF) : RunCleanupsScope(CGF) {}
-
- /// \brief Registers \a LocalVD variable as a private and apply \a
- /// PrivateGen function for it to generate corresponding private variable.
- /// \a PrivateGen returns an address of the generated private variable.
- /// \return true if the variable is registered as private, false if it has
- /// been privatized already.
- bool
- addPrivate(const VarDecl *LocalVD,
- llvm::function_ref<Address()> PrivateGen) {
- assert(PerformCleanup && "adding private to dead scope");
+ explicit OMPMapVars() = default;
+ ~OMPMapVars() {
+ assert(SavedLocals.empty() && "Did not restored original addresses.");
+ };
+ /// Sets the address of the variable \p LocalVD to be \p TempAddr in
+ /// function \p CGF.
+ /// \return true if at least one variable was set already, false otherwise.
+ bool setVarAddr(CodeGenFunction &CGF, const VarDecl *LocalVD,
+ Address TempAddr) {
LocalVD = LocalVD->getCanonicalDecl();
// Only save it once.
if (SavedLocals.count(LocalVD)) return false;
// Copy the existing local entry to SavedLocals.
auto it = CGF.LocalDeclMap.find(LocalVD);
- if (it != CGF.LocalDeclMap.end()) {
- SavedLocals.insert({LocalVD, it->second});
- } else {
- SavedLocals.insert({LocalVD, Address::invalid()});
- }
+ if (it != CGF.LocalDeclMap.end())
+ SavedLocals.try_emplace(LocalVD, it->second);
+ else
+ SavedLocals.try_emplace(LocalVD, Address::invalid());
// Generate the private entry.
- Address Addr = PrivateGen();
QualType VarTy = LocalVD->getType();
if (VarTy->isReferenceType()) {
Address Temp = CGF.CreateMemTemp(VarTy);
- CGF.Builder.CreateStore(Addr.getPointer(), Temp);
- Addr = Temp;
+ CGF.Builder.CreateStore(TempAddr.getPointer(), Temp);
+ TempAddr = Temp;
}
- SavedPrivates.insert({LocalVD, Addr});
+ SavedTempAddresses.try_emplace(LocalVD, TempAddr);
return true;
}
- /// \brief Privatizes local variables previously registered as private.
+ /// Applies new addresses to the list of the variables.
+ /// \return true if at least one variable is using new address, false
+ /// otherwise.
+ bool apply(CodeGenFunction &CGF) {
+ copyInto(SavedTempAddresses, CGF.LocalDeclMap);
+ SavedTempAddresses.clear();
+ return !SavedLocals.empty();
+ }
+
+ /// Restores original addresses of the variables.
+ void restore(CodeGenFunction &CGF) {
+ if (!SavedLocals.empty()) {
+ copyInto(SavedLocals, CGF.LocalDeclMap);
+ SavedLocals.clear();
+ }
+ }
+
+ private:
+ /// Copy all the entries in the source map over the corresponding
+ /// entries in the destination, which must exist.
+ static void copyInto(const DeclMapTy &Src, DeclMapTy &Dest) {
+ for (auto &Pair : Src) {
+ if (!Pair.second.isValid()) {
+ Dest.erase(Pair.first);
+ continue;
+ }
+
+ auto I = Dest.find(Pair.first);
+ if (I != Dest.end())
+ I->second = Pair.second;
+ else
+ Dest.insert(Pair);
+ }
+ }
+ };
+
+ /// The scope used to remap some variables as private in the OpenMP loop body
+ /// (or other captured region emitted without outlining), and to restore old
+ /// vars back on exit.
+ class OMPPrivateScope : public RunCleanupsScope {
+ OMPMapVars MappedVars;
+ OMPPrivateScope(const OMPPrivateScope &) = delete;
+ void operator=(const OMPPrivateScope &) = delete;
+
+ public:
+ /// Enter a new OpenMP private scope.
+ explicit OMPPrivateScope(CodeGenFunction &CGF) : RunCleanupsScope(CGF) {}
+
+ /// Registers \p LocalVD variable as a private and apply \p PrivateGen
+ /// function for it to generate corresponding private variable. \p
+ /// PrivateGen returns an address of the generated private variable.
+ /// \return true if the variable is registered as private, false if it has
+ /// been privatized already.
+ bool addPrivate(const VarDecl *LocalVD,
+ const llvm::function_ref<Address()> PrivateGen) {
+ assert(PerformCleanup && "adding private to dead scope");
+ return MappedVars.setVarAddr(CGF, LocalVD, PrivateGen());
+ }
+
+ /// Privatizes local variables previously registered as private.
/// Registration is separate from the actual privatization to allow
/// initializers use values of the original variables, not the private one.
/// This is important, for example, if the private variable is a class
@@ -750,19 +917,14 @@ public:
/// variables. But at initialization original variables must be used, not
/// private copies.
/// \return true if at least one variable was privatized, false otherwise.
- bool Privatize() {
- copyInto(SavedPrivates, CGF.LocalDeclMap);
- SavedPrivates.clear();
- return !SavedLocals.empty();
- }
+ bool Privatize() { return MappedVars.apply(CGF); }
void ForceCleanup() {
RunCleanupsScope::ForceCleanup();
- copyInto(SavedLocals, CGF.LocalDeclMap);
- SavedLocals.clear();
+ MappedVars.restore(CGF);
}
- /// \brief Exit scope - all the mapped variables are restored.
+ /// Exit scope - all the mapped variables are restored.
~OMPPrivateScope() {
if (PerformCleanup)
ForceCleanup();
@@ -773,34 +935,15 @@ public:
VD = VD->getCanonicalDecl();
return !VD->isLocalVarDeclOrParm() && CGF.LocalDeclMap.count(VD) > 0;
}
-
- private:
- /// Copy all the entries in the source map over the corresponding
- /// entries in the destination, which must exist.
- static void copyInto(const DeclMapTy &src, DeclMapTy &dest) {
- for (auto &pair : src) {
- if (!pair.second.isValid()) {
- dest.erase(pair.first);
- continue;
- }
-
- auto it = dest.find(pair.first);
- if (it != dest.end()) {
- it->second = pair.second;
- } else {
- dest.insert(pair);
- }
- }
- }
};
- /// \brief Takes the old cleanup stack size and emits the cleanup blocks
+ /// Takes the old cleanup stack size and emits the cleanup blocks
/// that have been added.
void
PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize,
std::initializer_list<llvm::Value **> ValuesToReload = {});
- /// \brief Takes the old cleanup stack size and emits the cleanup blocks
+ /// Takes the old cleanup stack size and emits the cleanup blocks
/// that have been added, then adds all lifetime-extended cleanups from
/// the given position to the stack.
void
@@ -843,7 +986,8 @@ public:
llvm::BasicBlock *getEHResumeBlock(bool isCleanup);
llvm::BasicBlock *getEHDispatchBlock(EHScopeStack::stable_iterator scope);
- llvm::BasicBlock *getMSVCDispatchBlock(EHScopeStack::stable_iterator scope);
+ llvm::BasicBlock *
+ getFuncletEHDispatchBlock(EHScopeStack::stable_iterator scope);
/// An object to manage conditionally-evaluated expressions.
class ConditionalEvaluation {
@@ -1052,22 +1196,27 @@ public:
private:
CGDebugInfo *DebugInfo;
- bool DisableDebugInfo;
+ bool DisableDebugInfo = false;
/// DidCallStackSave - Whether llvm.stacksave has been called. Used to avoid
/// calling llvm.stacksave for multiple VLAs in the same scope.
- bool DidCallStackSave;
+ bool DidCallStackSave = false;
/// IndirectBranch - The first time an indirect goto is seen we create a block
/// with an indirect branch. Every time we see the address of a label taken,
/// we add the label to the indirect goto. Every subsequent indirect goto is
/// codegen'd as a jump to the IndirectBranch's basic block.
- llvm::IndirectBrInst *IndirectBranch;
+ llvm::IndirectBrInst *IndirectBranch = nullptr;
/// LocalDeclMap - This keeps track of the LLVM allocas or globals for local C
/// decls.
DeclMapTy LocalDeclMap;
+ // Keep track of the cleanups for callee-destructed parameters pushed to the
+ // cleanup stack so that they can be deactivated later.
+ llvm::DenseMap<const ParmVarDecl *, EHScopeStack::stable_iterator>
+ CalleeDestructedParamCleanups;
+
/// SizeArguments - If a ParmVarDecl had the pass_object_size attribute, this
/// will contain a mapping from said ParmVarDecl to its implicit "object_size"
/// parameter.
@@ -1119,7 +1268,7 @@ private:
/// Emits exit block with special codegen procedure specific for the related
/// OpenMP construct + emits code for normal construct cleanup.
void emitExit(CodeGenFunction &CGF, OpenMPDirectiveKind Kind,
- const llvm::function_ref<void(CodeGenFunction &)> &CodeGen) {
+ const llvm::function_ref<void(CodeGenFunction &)> CodeGen) {
if (Stack.back().Kind == Kind && getExitBlock().isValid()) {
assert(CGF.getOMPCancelDestination(Kind).isValid());
assert(CGF.HaveInsertPoint());
@@ -1207,13 +1356,13 @@ private:
/// SwitchInsn - This is nearest current switch instruction. It is null if
/// current context is not in a switch.
- llvm::SwitchInst *SwitchInsn;
+ llvm::SwitchInst *SwitchInsn = nullptr;
/// The branch weights of SwitchInsn when doing instrumentation based PGO.
- SmallVector<uint64_t, 16> *SwitchWeights;
+ SmallVector<uint64_t, 16> *SwitchWeights = nullptr;
/// CaseRangeBlock - This block holds if condition check for last case
/// statement range in current switch instruction.
- llvm::BasicBlock *CaseRangeBlock;
+ llvm::BasicBlock *CaseRangeBlock = nullptr;
/// OpaqueLValues - Keeps track of the current set of opaque value
/// expressions.
@@ -1230,13 +1379,13 @@ private:
/// A block containing a single 'unreachable' instruction. Created
/// lazily by getUnreachableBlock().
- llvm::BasicBlock *UnreachableBlock;
+ llvm::BasicBlock *UnreachableBlock = nullptr;
/// Counts of the number return expressions in the function.
- unsigned NumReturnExprs;
+ unsigned NumReturnExprs = 0;
/// Count the number of simple (constant) return expressions in the function.
- unsigned NumSimpleReturnExprs;
+ unsigned NumSimpleReturnExprs = 0;
/// The last regular (non-return) debug location (breakpoint) in the function.
SourceLocation LastStopPoint;
@@ -1356,9 +1505,9 @@ public:
private:
/// CXXThisDecl - When generating code for a C++ member function,
/// this will hold the implicit 'this' declaration.
- ImplicitParamDecl *CXXABIThisDecl;
- llvm::Value *CXXABIThisValue;
- llvm::Value *CXXThisValue;
+ ImplicitParamDecl *CXXABIThisDecl = nullptr;
+ llvm::Value *CXXABIThisValue = nullptr;
+ llvm::Value *CXXThisValue = nullptr;
CharUnits CXXABIThisAlignment;
CharUnits CXXThisAlignment;
@@ -1376,16 +1525,16 @@ private:
/// CXXStructorImplicitParamDecl - When generating code for a constructor or
/// destructor, this will hold the implicit argument (e.g. VTT).
- ImplicitParamDecl *CXXStructorImplicitParamDecl;
- llvm::Value *CXXStructorImplicitParamValue;
+ ImplicitParamDecl *CXXStructorImplicitParamDecl = nullptr;
+ llvm::Value *CXXStructorImplicitParamValue = nullptr;
/// OutermostConditional - Points to the outermost active
/// conditional control. This is used so that we know if a
/// temporary should be destroyed conditionally.
- ConditionalEvaluation *OutermostConditional;
+ ConditionalEvaluation *OutermostConditional = nullptr;
/// The current lexical scope.
- LexicalScope *CurLexicalScope;
+ LexicalScope *CurLexicalScope = nullptr;
/// The current source location that should be used for exception
/// handling code.
@@ -1416,14 +1565,21 @@ private:
CurCodeDecl && CurCodeDecl->getAttr<ReturnsNonNullAttr>());
}
- llvm::BasicBlock *TerminateLandingPad;
- llvm::BasicBlock *TerminateHandler;
- llvm::BasicBlock *TrapBB;
+ llvm::BasicBlock *TerminateLandingPad = nullptr;
+ llvm::BasicBlock *TerminateHandler = nullptr;
+ llvm::BasicBlock *TrapBB = nullptr;
+
+ /// Terminate funclets keyed by parent funclet pad.
+ llvm::MapVector<llvm::Value *, llvm::BasicBlock *> TerminateFunclets;
+
+ /// Largest vector width used in ths function. Will be used to create a
+ /// function attribute.
+ unsigned LargestVectorWidth = 0;
/// True if we need emit the life-time markers.
const bool ShouldEmitLifetimeMarkers;
- /// Add OpenCL kernel arg metadata and the kernel attribute meatadata to
+ /// Add OpenCL kernel arg metadata and the kernel attribute metadata to
/// the function metadata.
void EmitOpenCLKernelMetadata(const FunctionDecl *FD,
llvm::Function *Fn);
@@ -1532,6 +1688,7 @@ public:
return false;
case QualType::DK_cxx_destructor:
case QualType::DK_objc_weak_lifetime:
+ case QualType::DK_nontrivial_c_struct:
return getLangOpts().Exceptions;
case QualType::DK_objc_strong_lifetime:
return getLangOpts().Exceptions &&
@@ -1579,10 +1736,7 @@ public:
/// \return an LLVM value which is a pointer to a struct which contains
/// information about the block, including the block invoke function, the
/// captured variables, etc.
- /// \param InvokeF will contain the block invoke function if it is not
- /// nullptr.
- llvm::Value *EmitBlockLiteral(const BlockExpr *,
- llvm::Function **InvokeF = nullptr);
+ llvm::Value *EmitBlockLiteral(const BlockExpr *);
static void destroyBlockInfos(CGBlockInfo *info);
llvm::Function *GenerateBlockFunction(GlobalDecl GD,
@@ -1604,7 +1758,25 @@ public:
class AutoVarEmission;
void emitByrefStructureInit(const AutoVarEmission &emission);
- void enterByrefCleanup(const AutoVarEmission &emission);
+
+ /// Enter a cleanup to destroy a __block variable. Note that this
+ /// cleanup should be a no-op if the variable hasn't left the stack
+ /// yet; if a cleanup is required for the variable itself, that needs
+ /// to be done externally.
+ ///
+ /// \param Kind Cleanup kind.
+ ///
+ /// \param Addr When \p LoadBlockVarAddr is false, the address of the __block
+ /// structure that will be passed to _Block_object_dispose. When
+ /// \p LoadBlockVarAddr is true, the address of the field of the block
+ /// structure that holds the address of the __block structure.
+ ///
+ /// \param Flags The flag that will be passed to _Block_object_dispose.
+ ///
+ /// \param LoadBlockVarAddr Indicates whether we need to emit a load from
+ /// \p Addr to get the address of the __block structure.
+ void enterByrefCleanup(CleanupKind Kind, Address Addr, BlockFieldFlags Flags,
+ bool LoadBlockVarAddr);
void setBlockContextParameter(const ImplicitParamDecl *D, unsigned argNum,
llvm::Value *ptr);
@@ -1627,7 +1799,7 @@ public:
void GenerateCode(GlobalDecl GD, llvm::Function *Fn,
const CGFunctionInfo &FnInfo);
- /// \brief Emit code for the start of a function.
+ /// Emit code for the start of a function.
/// \param Loc The location to be associated with the function.
/// \param StartLoc The location of the function body.
void StartFunction(GlobalDecl GD,
@@ -1653,7 +1825,7 @@ public:
void EmitLambdaStaticInvokeBody(const CXXMethodDecl *MD);
void EmitAsanPrologueOrEpilogue(bool Prologue);
- /// \brief Emit the unified return block, trying to avoid its emission when
+ /// Emit the unified return block, trying to avoid its emission when
/// possible.
/// \return The debug location of the user written return statement if the
/// return block is is avoided.
@@ -1664,10 +1836,10 @@ public:
void FinishFunction(SourceLocation EndLoc=SourceLocation());
void StartThunk(llvm::Function *Fn, GlobalDecl GD,
- const CGFunctionInfo &FnInfo);
+ const CGFunctionInfo &FnInfo, bool IsUnprototyped);
- void EmitCallAndReturnForThunk(llvm::Constant *Callee,
- const ThunkInfo *Thunk);
+ void EmitCallAndReturnForThunk(llvm::Constant *Callee, const ThunkInfo *Thunk,
+ bool IsUnprototyped);
void FinishThunk();
@@ -1677,7 +1849,8 @@ public:
/// Generate a thunk for the given method.
void generateThunk(llvm::Function *Fn, const CGFunctionInfo &FnInfo,
- GlobalDecl GD, const ThunkInfo &Thunk);
+ GlobalDecl GD, const ThunkInfo &Thunk,
+ bool IsUnprototyped);
llvm::Function *GenerateVarArgsThunk(llvm::Function *Fn,
const CGFunctionInfo &FnInfo,
@@ -1688,7 +1861,7 @@ public:
void EmitInitializerForField(FieldDecl *Field, LValue LHS, Expr *Init);
- /// Struct with all informations about dynamic [sub]class needed to set vptr.
+ /// Struct with all information about dynamic [sub]class needed to set vptr.
struct VPtr {
BaseSubobject Base;
const CXXRecordDecl *NearestVBase;
@@ -1723,9 +1896,11 @@ public:
CFITCK_DerivedCast,
CFITCK_UnrelatedCast,
CFITCK_ICall,
+ CFITCK_NVMFCall,
+ CFITCK_VMFCall,
};
- /// \brief Derived is the presumed address of an object of type T after a
+ /// Derived is the presumed address of an object of type T after a
/// cast. If T is a polymorphic class type, emit a check that the virtual
/// table for Derived belongs to a class derived from T.
void EmitVTablePtrCheckForCast(QualType T, llvm::Value *Derived,
@@ -1775,6 +1950,10 @@ public:
/// XRay custom event handling calls.
bool AlwaysEmitXRayCustomEvents() const;
+ /// AlwaysEmitXRayTypedEvents - Return true if clang must unconditionally emit
+ /// XRay typed event handling calls.
+ bool AlwaysEmitXRayTypedEvents() const;
+
/// Encode an address into a form suitable for use in a function prologue.
llvm::Constant *EncodeAddrForUseInPrologue(llvm::Function *F,
llvm::Constant *Addr);
@@ -1808,6 +1987,10 @@ public:
/// getTerminateLandingPad - Return a landing pad that just calls terminate.
llvm::BasicBlock *getTerminateLandingPad();
+ /// getTerminateLandingPad - Return a cleanup funclet that just calls
+ /// terminate.
+ llvm::BasicBlock *getTerminateFunclet();
+
/// getTerminateHandler - Return a handler (not a landing pad, just
/// a catch handler) that just calls terminate. This is used when
/// a terminate scope encloses a try.
@@ -1841,11 +2024,7 @@ public:
llvm::BasicBlock *createBasicBlock(const Twine &name = "",
llvm::Function *parent = nullptr,
llvm::BasicBlock *before = nullptr) {
-#ifdef NDEBUG
- return llvm::BasicBlock::Create(getLLVMContext(), "", parent, before);
-#else
return llvm::BasicBlock::Create(getLLVMContext(), name, parent, before);
-#endif
}
/// getBasicBlockForLabel - Return the LLVM basicblock that the specified
@@ -1975,15 +2154,20 @@ public:
/// to the stack.
///
/// Because the address of a temporary is often exposed to the program in
- /// various ways, this function will perform the cast by default. The cast
- /// may be avoided by passing false as \p CastToDefaultAddrSpace; this is
+ /// various ways, this function will perform the cast. The original alloca
+ /// instruction is returned through \p Alloca if it is not nullptr.
+ ///
+ /// The cast is not performaed in CreateTempAllocaWithoutCast. This is
/// more efficient if the caller knows that the address will not be exposed.
llvm::AllocaInst *CreateTempAlloca(llvm::Type *Ty, const Twine &Name = "tmp",
llvm::Value *ArraySize = nullptr);
Address CreateTempAlloca(llvm::Type *Ty, CharUnits align,
const Twine &Name = "tmp",
llvm::Value *ArraySize = nullptr,
- bool CastToDefaultAddrSpace = true);
+ Address *Alloca = nullptr);
+ Address CreateTempAllocaWithoutCast(llvm::Type *Ty, CharUnits align,
+ const Twine &Name = "tmp",
+ llvm::Value *ArraySize = nullptr);
/// CreateDefaultAlignedTempAlloca - This creates an alloca with the
/// default ABI alignment of the given LLVM type.
@@ -2018,12 +2202,18 @@ public:
Address CreateIRTemp(QualType T, const Twine &Name = "tmp");
/// CreateMemTemp - Create a temporary memory object of the given type, with
- /// appropriate alignment. Cast it to the default address space if
- /// \p CastToDefaultAddrSpace is true.
+ /// appropriate alignmen and cast it to the default address space. Returns
+ /// the original alloca instruction by \p Alloca if it is not nullptr.
Address CreateMemTemp(QualType T, const Twine &Name = "tmp",
- bool CastToDefaultAddrSpace = true);
+ Address *Alloca = nullptr);
Address CreateMemTemp(QualType T, CharUnits Align, const Twine &Name = "tmp",
- bool CastToDefaultAddrSpace = true);
+ Address *Alloca = nullptr);
+
+ /// CreateMemTemp - Create a temporary memory object of the given type, with
+ /// appropriate alignmen without casting it to the default address space.
+ Address CreateMemTempWithoutCast(QualType T, const Twine &Name = "tmp");
+ Address CreateMemTempWithoutCast(QualType T, CharUnits Align,
+ const Twine &Name = "tmp");
/// CreateAggTemp - Create a temporary memory object for the given
/// aggregate type.
@@ -2032,7 +2222,8 @@ public:
T.getQualifiers(),
AggValueSlot::IsNotDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
- AggValueSlot::IsNotAliased);
+ AggValueSlot::IsNotAliased,
+ AggValueSlot::DoesNotOverlap);
}
/// Emit a cast to void* in the appropriate address space.
@@ -2089,31 +2280,52 @@ public:
}
return false;
}
- /// EmitAggregateCopy - Emit an aggregate assignment.
- ///
- /// The difference to EmitAggregateCopy is that tail padding is not copied.
- /// This is required for correctness when assigning non-POD structures in C++.
- void EmitAggregateAssign(Address DestPtr, Address SrcPtr,
- QualType EltTy) {
+
+ /// Determine whether a return value slot may overlap some other object.
+ AggValueSlot::Overlap_t overlapForReturnValue() {
+ // FIXME: Assuming no overlap here breaks guaranteed copy elision for base
+ // class subobjects. These cases may need to be revisited depending on the
+ // resolution of the relevant core issue.
+ return AggValueSlot::DoesNotOverlap;
+ }
+
+ /// Determine whether a field initialization may overlap some other object.
+ AggValueSlot::Overlap_t overlapForFieldInit(const FieldDecl *FD) {
+ // FIXME: These cases can result in overlap as a result of P0840R0's
+ // [[no_unique_address]] attribute. We can still infer NoOverlap in the
+ // presence of that attribute if the field is within the nvsize of its
+ // containing class, because non-virtual subobjects are initialized in
+ // address order.
+ return AggValueSlot::DoesNotOverlap;
+ }
+
+ /// Determine whether a base class initialization may overlap some other
+ /// object.
+ AggValueSlot::Overlap_t overlapForBaseInit(const CXXRecordDecl *RD,
+ const CXXRecordDecl *BaseRD,
+ bool IsVirtual);
+
+ /// Emit an aggregate assignment.
+ void EmitAggregateAssign(LValue Dest, LValue Src, QualType EltTy) {
bool IsVolatile = hasVolatileMember(EltTy);
- EmitAggregateCopy(DestPtr, SrcPtr, EltTy, IsVolatile, true);
+ EmitAggregateCopy(Dest, Src, EltTy, AggValueSlot::MayOverlap, IsVolatile);
}
- void EmitAggregateCopyCtor(Address DestPtr, Address SrcPtr,
- QualType DestTy, QualType SrcTy) {
- EmitAggregateCopy(DestPtr, SrcPtr, SrcTy, /*IsVolatile=*/false,
- /*IsAssignment=*/false);
+ void EmitAggregateCopyCtor(LValue Dest, LValue Src,
+ AggValueSlot::Overlap_t MayOverlap) {
+ EmitAggregateCopy(Dest, Src, Src.getType(), MayOverlap);
}
/// EmitAggregateCopy - Emit an aggregate copy.
///
- /// \param isVolatile - True iff either the source or the destination is
- /// volatile.
- /// \param isAssignment - If false, allow padding to be copied. This often
- /// yields more efficient.
- void EmitAggregateCopy(Address DestPtr, Address SrcPtr,
- QualType EltTy, bool isVolatile=false,
- bool isAssignment = false);
+ /// \param isVolatile \c true iff either the source or the destination is
+ /// volatile.
+ /// \param MayOverlap Whether the tail padding of the destination might be
+ /// occupied by some other object. More efficient code can often be
+ /// generated if not.
+ void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy,
+ AggValueSlot::Overlap_t MayOverlap,
+ bool isVolatile = false);
/// GetAddrOfLocalVar - Return the address of a local variable.
Address GetAddrOfLocalVar(const VarDecl *VD) {
@@ -2123,27 +2335,13 @@ public:
return it->second;
}
- /// getOpaqueLValueMapping - Given an opaque value expression (which
- /// must be mapped to an l-value), return its mapping.
- const LValue &getOpaqueLValueMapping(const OpaqueValueExpr *e) {
- assert(OpaqueValueMapping::shouldBindAsLValue(e));
+ /// Given an opaque value expression, return its LValue mapping if it exists,
+ /// otherwise create one.
+ LValue getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e);
- llvm::DenseMap<const OpaqueValueExpr*,LValue>::iterator
- it = OpaqueLValues.find(e);
- assert(it != OpaqueLValues.end() && "no mapping for opaque value!");
- return it->second;
- }
-
- /// getOpaqueRValueMapping - Given an opaque value expression (which
- /// must be mapped to an r-value), return its mapping.
- const RValue &getOpaqueRValueMapping(const OpaqueValueExpr *e) {
- assert(!OpaqueValueMapping::shouldBindAsLValue(e));
-
- llvm::DenseMap<const OpaqueValueExpr*,RValue>::iterator
- it = OpaqueRValues.find(e);
- assert(it != OpaqueRValues.end() && "no mapping for opaque value!");
- return it->second;
- }
+ /// Given an opaque value expression, return its RValue mapping if it exists,
+ /// otherwise create one.
+ RValue getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e);
/// Get the index of the current ArrayInitLoopExpr, if any.
llvm::Value *getArrayInitIndex() { return ArrayInitIndex; }
@@ -2193,12 +2391,24 @@ public:
/// This function can be called with a null (unreachable) insert point.
void EmitVariablyModifiedType(QualType Ty);
- /// getVLASize - Returns an LLVM value that corresponds to the size,
+ struct VlaSizePair {
+ llvm::Value *NumElts;
+ QualType Type;
+
+ VlaSizePair(llvm::Value *NE, QualType T) : NumElts(NE), Type(T) {}
+ };
+
+ /// Return the number of elements for a single dimension
+ /// for the given array type.
+ VlaSizePair getVLAElements1D(const VariableArrayType *vla);
+ VlaSizePair getVLAElements1D(QualType vla);
+
+ /// Returns an LLVM value that corresponds to the size,
/// in non-variably-sized elements, of a variable length array type,
/// plus that largest non-variably-sized element type. Assumes that
/// the type has already been emitted with EmitVariablyModifiedType.
- std::pair<llvm::Value*,QualType> getVLASize(const VariableArrayType *vla);
- std::pair<llvm::Value*,QualType> getVLASize(QualType vla);
+ VlaSizePair getVLASize(const VariableArrayType *vla);
+ VlaSizePair getVLASize(QualType vla);
/// LoadCXXThis - Load the value of 'this'. This function is only valid while
/// generating code for an C++ member function.
@@ -2279,11 +2489,14 @@ public:
void EmitCXXConstructorCall(const CXXConstructorDecl *D, CXXCtorType Type,
bool ForVirtualBase, bool Delegating,
- Address This, const CXXConstructExpr *E);
+ Address This, const CXXConstructExpr *E,
+ AggValueSlot::Overlap_t Overlap);
void EmitCXXConstructorCall(const CXXConstructorDecl *D, CXXCtorType Type,
bool ForVirtualBase, bool Delegating,
- Address This, CallArgList &Args);
+ Address This, CallArgList &Args,
+ AggValueSlot::Overlap_t Overlap,
+ SourceLocation Loc);
/// Emit assumption load for all bases. Requires to be be called only on
/// most-derived class and not under construction of the object.
@@ -2333,13 +2546,13 @@ public:
CharUnits CookieSize = CharUnits());
RValue EmitBuiltinNewDeleteCall(const FunctionProtoType *Type,
- const Expr *Arg, bool IsDelete);
+ const CallExpr *TheCallExpr, bool IsDelete);
llvm::Value *EmitCXXTypeidExpr(const CXXTypeidExpr *E);
llvm::Value *EmitDynamicCast(Address V, const CXXDynamicCastExpr *DCE);
Address EmitCXXUuidofExpr(const CXXUuidofExpr *E);
- /// \brief Situations in which we might emit a check for the suitability of a
+ /// Situations in which we might emit a check for the suitability of a
/// pointer or glvalue.
enum TypeCheckKind {
/// Checking the operand of a load. Must be suitably sized and aligned.
@@ -2383,17 +2596,17 @@ public:
/// Determine whether the pointer type check \p TCK requires a vptr check.
static bool isVptrCheckRequired(TypeCheckKind TCK, QualType Ty);
- /// \brief Whether any type-checking sanitizers are enabled. If \c false,
+ /// Whether any type-checking sanitizers are enabled. If \c false,
/// calls to EmitTypeCheck can be skipped.
bool sanitizePerformTypeCheck() const;
- /// \brief Emit a check that \p V is the address of storage of the
+ /// Emit a check that \p V is the address of storage of the
/// appropriate size and alignment for an object of type \p Type.
void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, llvm::Value *V,
QualType Type, CharUnits Alignment = CharUnits::Zero(),
SanitizerSet SkippedChecks = SanitizerSet());
- /// \brief Emit a check that \p Base points into an array object, which
+ /// Emit a check that \p Base points into an array object, which
/// we can access at index \p Index. \p Accessed should be \c false if we
/// this expression is used as an lvalue, for instance in "&Arr[Idx]".
void EmitBoundsCheck(const Expr *E, const Expr *Base, llvm::Value *Index,
@@ -2434,7 +2647,7 @@ public:
typedef void SpecialInitFn(CodeGenFunction &Init, const VarDecl &D,
llvm::Value *Address);
- /// \brief Determine whether the given initializer is trivial in the sense
+ /// Determine whether the given initializer is trivial in the sense
/// that it requires no code to be generated.
bool isTrivialInitializer(const Expr *Init);
@@ -2448,7 +2661,9 @@ public:
const VarDecl *Variable;
- /// The address of the alloca. Invalid if the variable was emitted
+ /// The address of the alloca for languages with explicit address space
+ /// (e.g. OpenCL) or alloca casted to generic pointer for address space
+ /// agnostic languages (e.g. C++). Invalid if the variable was emitted
/// as a global constant.
Address Addr;
@@ -2464,13 +2679,19 @@ public:
/// Non-null if we should use lifetime annotations.
llvm::Value *SizeForLifetimeMarkers;
+ /// Address with original alloca instruction. Invalid if the variable was
+ /// emitted as a global constant.
+ Address AllocaAddr;
+
struct Invalid {};
- AutoVarEmission(Invalid) : Variable(nullptr), Addr(Address::invalid()) {}
+ AutoVarEmission(Invalid)
+ : Variable(nullptr), Addr(Address::invalid()),
+ AllocaAddr(Address::invalid()) {}
AutoVarEmission(const VarDecl &variable)
- : Variable(&variable), Addr(Address::invalid()), NRVOFlag(nullptr),
- IsByRef(false), IsConstantAggregate(false),
- SizeForLifetimeMarkers(nullptr) {}
+ : Variable(&variable), Addr(Address::invalid()), NRVOFlag(nullptr),
+ IsByRef(false), IsConstantAggregate(false),
+ SizeForLifetimeMarkers(nullptr), AllocaAddr(Address::invalid()) {}
bool wasEmittedAsGlobal() const { return !Addr.isValid(); }
@@ -2486,11 +2707,15 @@ public:
}
/// Returns the raw, allocated address, which is not necessarily
- /// the address of the object itself.
+ /// the address of the object itself. It is casted to default
+ /// address space for address space agnostic languages.
Address getAllocatedAddress() const {
return Addr;
}
+ /// Returns the address for the original alloca instruction.
+ Address getOriginalAllocatedAddress() const { return AllocaAddr; }
+
/// Returns the address of the object within this declaration.
/// Note that this does not chase the forwarding pointer for
/// __block decls.
@@ -2506,6 +2731,15 @@ public:
void emitAutoVarTypeCleanup(const AutoVarEmission &emission,
QualType::DestructionKind dtorKind);
+ /// Emits the alloca and debug information for the size expressions for each
+ /// dimension of an array. It registers the association of its (1-dimensional)
+ /// QualTypes and size expression's debug node, so that CGDebugInfo can
+ /// reference this node when creating the DISubrange object to describe the
+ /// array types.
+ void EmitAndRegisterVariableArrayDimensions(CGDebugInfo *DI,
+ const VarDecl &D,
+ bool EmitDebugInfo);
+
void EmitStaticVarDecl(const VarDecl &D,
llvm::GlobalValue::LinkageTypes Linkage);
@@ -2655,6 +2889,9 @@ public:
llvm::Value *EmitSEHExceptionInfo();
llvm::Value *EmitSEHAbnormalTermination();
+ /// Emit simple code for OpenMP directives in Simd-only mode.
+ void EmitSimpleOMPExecutableDirective(const OMPExecutableDirective &D);
+
/// Scan the outlined statement for captures from the parent function. For
/// each capture, mark the capture as escaped and emit a call to
/// llvm.localrecover. Insert the localrecover result into the LocalDeclMap.
@@ -2697,7 +2934,7 @@ public:
SmallVectorImpl<llvm::Value *> &CapturedVars);
void emitOMPSimpleStore(LValue LVal, RValue RVal, QualType RValTy,
SourceLocation Loc);
- /// \brief Perform element by element copying of arrays with type \a
+ /// Perform element by element copying of arrays with type \a
/// OriginalType from \a SrcAddr to \a DestAddr using copying procedure
/// generated by \a CopyGen.
///
@@ -2708,8 +2945,8 @@ public:
/// to another single array element.
void EmitOMPAggregateAssign(
Address DestAddr, Address SrcAddr, QualType OriginalType,
- const llvm::function_ref<void(Address, Address)> &CopyGen);
- /// \brief Emit proper copying of data from one variable to another.
+ const llvm::function_ref<void(Address, Address)> CopyGen);
+ /// Emit proper copying of data from one variable to another.
///
/// \param OriginalType Original type of the copied variables.
/// \param DestAddr Destination address.
@@ -2724,7 +2961,7 @@ public:
Address DestAddr, Address SrcAddr,
const VarDecl *DestVD, const VarDecl *SrcVD,
const Expr *Copy);
- /// \brief Emit atomic update code for constructs: \a X = \a X \a BO \a E or
+ /// Emit atomic update code for constructs: \a X = \a X \a BO \a E or
/// \a X = \a E \a BO \a E.
///
/// \param X Value to be updated.
@@ -2740,7 +2977,7 @@ public:
std::pair<bool, RValue> EmitOMPAtomicSimpleUpdateExpr(
LValue X, RValue E, BinaryOperatorKind BO, bool IsXLHSInRHSPart,
llvm::AtomicOrdering AO, SourceLocation Loc,
- const llvm::function_ref<RValue(RValue)> &CommonGen);
+ const llvm::function_ref<RValue(RValue)> CommonGen);
bool EmitOMPFirstprivateClause(const OMPExecutableDirective &D,
OMPPrivateScope &PrivateScope);
void EmitOMPPrivateClause(const OMPExecutableDirective &D,
@@ -2748,7 +2985,7 @@ public:
void EmitOMPUseDevicePtrClause(
const OMPClause &C, OMPPrivateScope &PrivateScope,
const llvm::DenseMap<const ValueDecl *, Address> &CaptureDeviceAddrMap);
- /// \brief Emit code for copyin clause in \a D directive. The next code is
+ /// Emit code for copyin clause in \a D directive. The next code is
/// generated at the start of outlined functions for directives:
/// \code
/// threadprivate_var1 = master_threadprivate_var1;
@@ -2760,7 +2997,7 @@ public:
/// \param D OpenMP directive possibly with 'copyin' clause(s).
/// \returns true if at least one copyin variable is found, false otherwise.
bool EmitOMPCopyinClause(const OMPExecutableDirective &D);
- /// \brief Emit initial code for lastprivate variables. If some variable is
+ /// Emit initial code for lastprivate variables. If some variable is
/// not also firstprivate, then the default initialization is used. Otherwise
/// initialization of this variable is performed by EmitOMPFirstprivateClause
/// method.
@@ -2773,7 +3010,7 @@ public:
/// otherwise.
bool EmitOMPLastprivateClauseInit(const OMPExecutableDirective &D,
OMPPrivateScope &PrivateScope);
- /// \brief Emit final copying of lastprivate values to original variables at
+ /// Emit final copying of lastprivate values to original variables at
/// the end of the worksharing or simd directive.
///
/// \param D Directive that has at least one 'lastprivate' directives.
@@ -2791,8 +3028,8 @@ public:
/// linear clause.
void EmitOMPLinearClauseFinal(
const OMPLoopDirective &D,
- const llvm::function_ref<llvm::Value *(CodeGenFunction &)> &CondGen);
- /// \brief Emit initial code for reduction variables. Creates reduction copies
+ const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen);
+ /// Emit initial code for reduction variables. Creates reduction copies
/// and initializes them with the values according to OpenMP standard.
///
/// \param D Directive (possibly) with the 'reduction' clause.
@@ -2801,14 +3038,14 @@ public:
///
void EmitOMPReductionClauseInit(const OMPExecutableDirective &D,
OMPPrivateScope &PrivateScope);
- /// \brief Emit final update of reduction values to original variables at
+ /// Emit final update of reduction values to original variables at
/// the end of the directive.
///
/// \param D Directive that has at least one 'reduction' directives.
/// \param ReductionKind The kind of reduction to perform.
void EmitOMPReductionClauseFinal(const OMPExecutableDirective &D,
const OpenMPDirectiveKind ReductionKind);
- /// \brief Emit initial code for linear variables. Creates private copies
+ /// Emit initial code for linear variables. Creates private copies
/// and initializes them with the values according to OpenMP standard.
///
/// \param D Directive (possibly) with the 'linear' clause.
@@ -2821,6 +3058,7 @@ public:
const OMPTaskDataTy & /*Data*/)>
TaskGenTy;
void EmitOMPTaskBasedDirective(const OMPExecutableDirective &S,
+ const OpenMPDirectiveKind CapturedRegion,
const RegionCodeGenTy &BodyGen,
const TaskGenTy &TaskGen, OMPTaskDataTy &Data);
struct OMPTargetDataInfo {
@@ -2930,7 +3168,16 @@ public:
static void EmitOMPTargetSimdDeviceFunction(CodeGenModule &CGM,
StringRef ParentName,
const OMPTargetSimdDirective &S);
- /// \brief Emit inner loop of the worksharing/simd construct.
+ /// Emit device code for the target teams distribute parallel for simd
+ /// directive.
+ static void EmitOMPTargetTeamsDistributeParallelForSimdDeviceFunction(
+ CodeGenModule &CGM, StringRef ParentName,
+ const OMPTargetTeamsDistributeParallelForSimdDirective &S);
+
+ static void EmitOMPTargetTeamsDistributeParallelForDeviceFunction(
+ CodeGenModule &CGM, StringRef ParentName,
+ const OMPTargetTeamsDistributeParallelForDirective &S);
+ /// Emit inner loop of the worksharing/simd construct.
///
/// \param S Directive, for which the inner loop must be emitted.
/// \param RequiresCleanup true, if directive has some associated private
@@ -2943,8 +3190,8 @@ public:
void EmitOMPInnerLoop(
const Stmt &S, bool RequiresCleanup, const Expr *LoopCond,
const Expr *IncExpr,
- const llvm::function_ref<void(CodeGenFunction &)> &BodyGen,
- const llvm::function_ref<void(CodeGenFunction &)> &PostIncGen);
+ const llvm::function_ref<void(CodeGenFunction &)> BodyGen,
+ const llvm::function_ref<void(CodeGenFunction &)> PostIncGen);
JumpDest getOMPCancelDestination(OpenMPDirectiveKind Kind);
/// Emit initial code for loop counters of loop-based directives.
@@ -2954,7 +3201,7 @@ public:
/// Helper for the OpenMP loop directives.
void EmitOMPLoopBody(const OMPLoopDirective &D, JumpDest LoopExit);
- /// \brief Emit code for the worksharing loop-based directive.
+ /// Emit code for the worksharing loop-based directive.
/// \return true, if this construct has any lastprivate clause, false -
/// otherwise.
bool EmitOMPWorksharingLoop(const OMPLoopDirective &S, Expr *EUB,
@@ -2969,17 +3216,14 @@ public:
void EmitOMPSimdInit(const OMPLoopDirective &D, bool IsMonotonic = false);
void EmitOMPSimdFinal(
const OMPLoopDirective &D,
- const llvm::function_ref<llvm::Value *(CodeGenFunction &)> &CondGen);
+ const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen);
/// Emits the lvalue for the expression with possibly captured variable.
LValue EmitOMPSharedLValue(const Expr *E);
private:
- /// Helpers for blocks. Returns invoke function by \p InvokeF if it is not
- /// nullptr. It should be called without \p InvokeF if the caller does not
- /// need invoke function to be returned.
- llvm::Value *EmitBlockLiteral(const CGBlockInfo &Info,
- llvm::Function **InvokeF = nullptr);
+ /// Helpers for blocks.
+ llvm::Value *EmitBlockLiteral(const CGBlockInfo &Info);
/// struct with the values to be passed to the OpenMP loop-related functions
struct OMPLoopArguments {
@@ -3030,7 +3274,7 @@ private:
OMPPrivateScope &LoopScope,
const OMPLoopArguments &LoopArgs,
const CodeGenLoopTy &CodeGenLoopContent);
- /// \brief Emit code for sections directive.
+ /// Emit code for sections directive.
void EmitSections(const OMPExecutableDirective &S);
public:
@@ -3071,7 +3315,7 @@ public:
///
LValue EmitLValue(const Expr *E);
- /// \brief Same as EmitLValue but additionally we generate checking code to
+ /// Same as EmitLValue but additionally we generate checking code to
/// guard against undefined behavior. This is only suitable when we know
/// that the address will be used to access the object.
LValue EmitCheckedLValue(const Expr *E, TypeCheckKind TCK);
@@ -3332,6 +3576,9 @@ public:
ArrayRef<llvm::Value*> args,
const Twine &name = "");
+ SmallVector<llvm::OperandBundleDef, 1>
+ getBundlesForFunclet(llvm::Value *Callee);
+
llvm::CallSite EmitCallOrInvoke(llvm::Value *Callee,
ArrayRef<llvm::Value *> Args,
const Twine &Name = "");
@@ -3351,6 +3598,16 @@ public:
CXXDtorType Type,
const CXXRecordDecl *RD);
+ // These functions emit calls to the special functions of non-trivial C
+ // structs.
+ void defaultInitNonTrivialCStructVar(LValue Dst);
+ void callCStructDefaultConstructor(LValue Dst);
+ void callCStructDestructor(LValue Dst);
+ void callCStructCopyConstructor(LValue Dst, LValue Src);
+ void callCStructMoveConstructor(LValue Dst, LValue Src);
+ void callCStructCopyAssignmentOperator(LValue Dst, LValue Src);
+ void callCStructMoveAssignmentOperator(LValue Dst, LValue Src);
+
RValue
EmitCXXMemberOrOperatorCall(const CXXMethodDecl *Method,
const CGCallee &Callee,
@@ -3424,6 +3681,10 @@ public:
SmallVectorImpl<llvm::Value *> &Ops,
Address PtrOp0, Address PtrOp1,
llvm::Triple::ArchType Arch);
+
+ llvm::Value *EmitISOVolatileLoad(const CallExpr *E);
+ llvm::Value *EmitISOVolatileStore(const CallExpr *E);
+
llvm::Function *LookupNeonLLVMIntrinsic(unsigned IntrinsicID,
unsigned Modifier, llvm::Type *ArgTy,
const CallExpr *E);
@@ -3482,6 +3743,8 @@ public:
llvm::Value *EmitARCLoadWeak(Address addr);
llvm::Value *EmitARCLoadWeakRetained(Address addr);
llvm::Value *EmitARCStoreWeak(Address addr, llvm::Value *value, bool ignored);
+ void emitARCCopyAssignWeak(QualType Ty, Address DstAddr, Address SrcAddr);
+ void emitARCMoveAssignWeak(QualType Ty, Address DstAddr, Address SrcAddr);
void EmitARCCopyWeak(Address dst, Address src);
void EmitARCMoveWeak(Address dst, Address src);
llvm::Value *EmitARCRetainAutorelease(QualType type, llvm::Value *value);
@@ -3525,6 +3788,7 @@ public:
static Destroyer destroyARCStrongPrecise;
static Destroyer destroyARCWeak;
static Destroyer emitARCIntrinsicUse;
+ static Destroyer destroyNonTrivialCStruct;
void EmitObjCAutoreleasePoolPop(llvm::Value *Ptr);
llvm::Value *EmitObjCAutoreleasePoolPush();
@@ -3532,7 +3796,7 @@ public:
void EmitObjCAutoreleasePoolCleanup(llvm::Value *Ptr);
void EmitObjCMRRAutoreleasePoolPop(llvm::Value *Ptr);
- /// \brief Emits a reference binding to the passed in expression.
+ /// Emits a reference binding to the passed in expression.
RValue EmitReferenceBindingToExpr(const Expr *E);
//===--------------------------------------------------------------------===//
@@ -3610,6 +3874,9 @@ public:
void registerGlobalDtorWithAtExit(const VarDecl &D, llvm::Constant *fn,
llvm::Constant *addr);
+ /// Call atexit() with function dtorStub.
+ void registerGlobalDtorWithAtExit(llvm::Constant *dtorStub);
+
/// Emit code in this function to perform a guarded variable
/// initialization. Guarded initializations are used when it's not
/// possible to prove that an initialization will be done exactly
@@ -3746,26 +4013,26 @@ public:
/// enabled, a runtime check specified by \p Kind is also emitted.
llvm::Value *EmitCheckedArgForBuiltin(const Expr *E, BuiltinCheckKind Kind);
- /// \brief Emit a description of a type in a format suitable for passing to
+ /// Emit a description of a type in a format suitable for passing to
/// a runtime sanitizer handler.
llvm::Constant *EmitCheckTypeDescriptor(QualType T);
- /// \brief Convert a value into a format suitable for passing to a runtime
+ /// Convert a value into a format suitable for passing to a runtime
/// sanitizer handler.
llvm::Value *EmitCheckValue(llvm::Value *V);
- /// \brief Emit a description of a source location in a format suitable for
+ /// Emit a description of a source location in a format suitable for
/// passing to a runtime sanitizer handler.
llvm::Constant *EmitCheckSourceLocation(SourceLocation Loc);
- /// \brief Create a basic block that will call a handler function in a
+ /// Create a basic block that will call a handler function in a
/// sanitizer runtime with the provided arguments, and create a conditional
/// branch to it.
void EmitCheck(ArrayRef<std::pair<llvm::Value *, SanitizerMask>> Checked,
SanitizerHandler Check, ArrayRef<llvm::Constant *> StaticArgs,
ArrayRef<llvm::Value *> DynamicArgs);
- /// \brief Emit a slow path cross-DSO CFI check which calls __cfi_slowpath
+ /// Emit a slow path cross-DSO CFI check which calls __cfi_slowpath
/// if Cond if false.
void EmitCfiSlowPathCheck(SanitizerMask Kind, llvm::Value *Cond,
llvm::ConstantInt *TypeId, llvm::Value *Ptr,
@@ -3775,21 +4042,21 @@ public:
/// checking is enabled. Otherwise, just emit an unreachable instruction.
void EmitUnreachable(SourceLocation Loc);
- /// \brief Create a basic block that will call the trap intrinsic, and emit a
+ /// Create a basic block that will call the trap intrinsic, and emit a
/// conditional branch to it, for the -ftrapv checks.
void EmitTrapCheck(llvm::Value *Checked);
- /// \brief Emit a call to trap or debugtrap and attach function attribute
+ /// Emit a call to trap or debugtrap and attach function attribute
/// "trap-func-name" if specified.
llvm::CallInst *EmitTrapCall(llvm::Intrinsic::ID IntrID);
- /// \brief Emit a stub for the cross-DSO CFI check function.
+ /// Emit a stub for the cross-DSO CFI check function.
void EmitCfiCheckStub();
- /// \brief Emit a cross-DSO CFI failure handling function.
+ /// Emit a cross-DSO CFI failure handling function.
void EmitCfiCheckFail();
- /// \brief Create a check for a function parameter that may potentially be
+ /// Create a check for a function parameter that may potentially be
/// declared as non-null.
void EmitNonNullArgCheck(RValue RV, QualType ArgType, SourceLocation ArgLoc,
AbstractCallee AC, unsigned ParmNum);
@@ -3829,10 +4096,10 @@ private:
void ExpandTypeFromArgs(QualType Ty, LValue Dst,
SmallVectorImpl<llvm::Value *>::iterator &AI);
- /// ExpandTypeToArgs - Expand an RValue \arg RV, with the LLVM type for \arg
+ /// ExpandTypeToArgs - Expand an CallArg \arg Arg, with the LLVM type for \arg
/// Ty, into individual arguments on the provided vector \arg IRCallArgs,
/// starting at index \arg IRCallArgPos. See ABIArgInfo::Expand.
- void ExpandTypeToArgs(QualType Ty, RValue RV, llvm::FunctionType *IRFuncTy,
+ void ExpandTypeToArgs(QualType Ty, CallArg Arg, llvm::FunctionType *IRFuncTy,
SmallVectorImpl<llvm::Value *> &IRCallArgs,
unsigned &IRCallArgPos);
@@ -3844,7 +4111,7 @@ private:
std::string &ConstraintStr,
SourceLocation Loc);
- /// \brief Attempts to statically evaluate the object size of E. If that
+ /// Attempts to statically evaluate the object size of E. If that
/// fails, emits code to figure the size of E out for us. This is
/// pass_object_size aware.
///
@@ -3853,7 +4120,7 @@ private:
llvm::IntegerType *ResType,
llvm::Value *EmittedE);
- /// \brief Emits the size of E, as required by __builtin_object_size. This
+ /// Emits the size of E, as required by __builtin_object_size. This
/// function is aware of pass_object_size parameters, and will act accordingly
/// if E is a parameter with the pass_object_size attribute.
llvm::Value *emitBuiltinObjectSize(const Expr *E, unsigned Type,
@@ -3973,6 +4240,48 @@ public:
void EmitSanitizerStatReport(llvm::SanitizerStatKind SSK);
+ struct TargetMultiVersionResolverOption {
+ llvm::Function *Function;
+ TargetAttr::ParsedTargetAttr ParsedAttribute;
+ unsigned Priority;
+ TargetMultiVersionResolverOption(
+ const TargetInfo &TargInfo, llvm::Function *F,
+ const clang::TargetAttr::ParsedTargetAttr &PT)
+ : Function(F), ParsedAttribute(PT), Priority(0u) {
+ for (StringRef Feat : PT.Features)
+ Priority = std::max(Priority,
+ TargInfo.multiVersionSortPriority(Feat.substr(1)));
+
+ if (!PT.Architecture.empty())
+ Priority = std::max(Priority,
+ TargInfo.multiVersionSortPriority(PT.Architecture));
+ }
+
+ bool operator>(const TargetMultiVersionResolverOption &Other) const {
+ return Priority > Other.Priority;
+ }
+ };
+ void EmitTargetMultiVersionResolver(
+ llvm::Function *Resolver,
+ ArrayRef<TargetMultiVersionResolverOption> Options);
+
+ struct CPUDispatchMultiVersionResolverOption {
+ llvm::Function *Function;
+ // Note: EmitX86CPUSupports only has 32 bits available, so we store the mask
+ // as 32 bits here. When 64-bit support is added to __builtin_cpu_supports,
+ // this can be extended to 64 bits.
+ uint32_t FeatureMask;
+ CPUDispatchMultiVersionResolverOption(llvm::Function *F, uint64_t Mask)
+ : Function(F), FeatureMask(static_cast<uint32_t>(Mask)) {}
+ bool operator>(const CPUDispatchMultiVersionResolverOption &Other) const {
+ return FeatureMask > Other.FeatureMask;
+ }
+ };
+ void EmitCPUDispatchMultiVersionResolver(
+ llvm::Function *Resolver,
+ ArrayRef<CPUDispatchMultiVersionResolverOption> Options);
+ static uint32_t GetX86CpuSupportsMask(ArrayRef<StringRef> FeatureStrs);
+
private:
QualType getVarArgType(const Expr *Arg);
@@ -3988,110 +4297,35 @@ private:
llvm::Value *EmitX86CpuIs(StringRef CPUStr);
llvm::Value *EmitX86CpuSupports(const CallExpr *E);
llvm::Value *EmitX86CpuSupports(ArrayRef<StringRef> FeatureStrs);
+ llvm::Value *EmitX86CpuSupports(uint32_t Mask);
llvm::Value *EmitX86CpuInit();
+ llvm::Value *
+ FormResolverCondition(const TargetMultiVersionResolverOption &RO);
};
-/// Helper class with most of the code for saving a value for a
-/// conditional expression cleanup.
-struct DominatingLLVMValue {
- typedef llvm::PointerIntPair<llvm::Value*, 1, bool> saved_type;
-
- /// Answer whether the given value needs extra work to be saved.
- static bool needsSaving(llvm::Value *value) {
- // If it's not an instruction, we don't need to save.
- if (!isa<llvm::Instruction>(value)) return false;
-
- // If it's an instruction in the entry block, we don't need to save.
- llvm::BasicBlock *block = cast<llvm::Instruction>(value)->getParent();
- return (block != &block->getParent()->getEntryBlock());
- }
-
- /// Try to save the given value.
- static saved_type save(CodeGenFunction &CGF, llvm::Value *value) {
- if (!needsSaving(value)) return saved_type(value, false);
-
- // Otherwise, we need an alloca.
- auto align = CharUnits::fromQuantity(
- CGF.CGM.getDataLayout().getPrefTypeAlignment(value->getType()));
- Address alloca =
- CGF.CreateTempAlloca(value->getType(), align, "cond-cleanup.save");
- CGF.Builder.CreateStore(value, alloca);
-
- return saved_type(alloca.getPointer(), true);
- }
-
- static llvm::Value *restore(CodeGenFunction &CGF, saved_type value) {
- // If the value says it wasn't saved, trust that it's still dominating.
- if (!value.getInt()) return value.getPointer();
-
- // Otherwise, it should be an alloca instruction, as set up in save().
- auto alloca = cast<llvm::AllocaInst>(value.getPointer());
- return CGF.Builder.CreateAlignedLoad(alloca, alloca->getAlignment());
- }
-};
-
-/// A partial specialization of DominatingValue for llvm::Values that
-/// might be llvm::Instructions.
-template <class T> struct DominatingPointer<T,true> : DominatingLLVMValue {
- typedef T *type;
- static type restore(CodeGenFunction &CGF, saved_type value) {
- return static_cast<T*>(DominatingLLVMValue::restore(CGF, value));
- }
-};
-
-/// A specialization of DominatingValue for Address.
-template <> struct DominatingValue<Address> {
- typedef Address type;
-
- struct saved_type {
- DominatingLLVMValue::saved_type SavedValue;
- CharUnits Alignment;
- };
-
- static bool needsSaving(type value) {
- return DominatingLLVMValue::needsSaving(value.getPointer());
- }
- static saved_type save(CodeGenFunction &CGF, type value) {
- return { DominatingLLVMValue::save(CGF, value.getPointer()),
- value.getAlignment() };
- }
- static type restore(CodeGenFunction &CGF, saved_type value) {
- return Address(DominatingLLVMValue::restore(CGF, value.SavedValue),
- value.Alignment);
- }
-};
-
-/// A specialization of DominatingValue for RValue.
-template <> struct DominatingValue<RValue> {
- typedef RValue type;
- class saved_type {
- enum Kind { ScalarLiteral, ScalarAddress, AggregateLiteral,
- AggregateAddress, ComplexAddress };
+inline DominatingLLVMValue::saved_type
+DominatingLLVMValue::save(CodeGenFunction &CGF, llvm::Value *value) {
+ if (!needsSaving(value)) return saved_type(value, false);
- llvm::Value *Value;
- unsigned K : 3;
- unsigned Align : 29;
- saved_type(llvm::Value *v, Kind k, unsigned a = 0)
- : Value(v), K(k), Align(a) {}
+ // Otherwise, we need an alloca.
+ auto align = CharUnits::fromQuantity(
+ CGF.CGM.getDataLayout().getPrefTypeAlignment(value->getType()));
+ Address alloca =
+ CGF.CreateTempAlloca(value->getType(), align, "cond-cleanup.save");
+ CGF.Builder.CreateStore(value, alloca);
- public:
- static bool needsSaving(RValue value);
- static saved_type save(CodeGenFunction &CGF, RValue value);
- RValue restore(CodeGenFunction &CGF);
+ return saved_type(alloca.getPointer(), true);
+}
- // implementations in CGCleanup.cpp
- };
+inline llvm::Value *DominatingLLVMValue::restore(CodeGenFunction &CGF,
+ saved_type value) {
+ // If the value says it wasn't saved, trust that it's still dominating.
+ if (!value.getInt()) return value.getPointer();
- static bool needsSaving(type value) {
- return saved_type::needsSaving(value);
- }
- static saved_type save(CodeGenFunction &CGF, type value) {
- return saved_type::save(CGF, value);
- }
- static type restore(CodeGenFunction &CGF, saved_type value) {
- return value.restore(CGF);
- }
-};
+ // Otherwise, it should be an alloca instruction, as set up in save().
+ auto alloca = cast<llvm::AllocaInst>(value.getPointer());
+ return CGF.Builder.CreateAlignedLoad(alloca, alloca->getAlignment());
+}
} // end namespace CodeGen
} // end namespace clang
diff --git a/lib/CodeGen/CodeGenModule.cpp b/lib/CodeGen/CodeGenModule.cpp
index 5bdf81aaf66e..ecdf78d4b347 100644
--- a/lib/CodeGen/CodeGenModule.cpp
+++ b/lib/CodeGen/CodeGenModule.cpp
@@ -123,7 +123,6 @@ CodeGenModule::CodeGenModule(ASTContext &C, const HeaderSearchOptions &HSO,
ASTAllocaAddressSpace = getTargetCodeGenInfo().getASTAllocaAddressSpace();
RuntimeCC = getTargetCodeGenInfo().getABIInfo().getRuntimeCC();
- BuiltinCC = getTargetCodeGenInfo().getABIInfo().getBuiltinCC();
if (LangOpts.ObjC1)
createObjCRuntime();
@@ -208,7 +207,10 @@ void CodeGenModule::createOpenMPRuntime() {
OpenMPRuntime.reset(new CGOpenMPRuntimeNVPTX(*this));
break;
default:
- OpenMPRuntime.reset(new CGOpenMPRuntime(*this));
+ if (LangOpts.OpenMPSimd)
+ OpenMPRuntime.reset(new CGOpenMPSIMDRuntime(*this));
+ else
+ OpenMPRuntime.reset(new CGOpenMPRuntime(*this));
break;
}
}
@@ -392,26 +394,29 @@ void CodeGenModule::Release() {
applyGlobalValReplacements();
applyReplacements();
checkAliases();
+ emitMultiVersionFunctions();
EmitCXXGlobalInitFunc();
EmitCXXGlobalDtorFunc();
+ registerGlobalDtorsWithAtExit();
EmitCXXThreadLocalInitFunc();
if (ObjCRuntime)
if (llvm::Function *ObjCInitFunction = ObjCRuntime->ModuleInitFunction())
AddGlobalCtor(ObjCInitFunction);
if (Context.getLangOpts().CUDA && !Context.getLangOpts().CUDAIsDevice &&
CUDARuntime) {
- if (llvm::Function *CudaCtorFunction = CUDARuntime->makeModuleCtorFunction())
+ if (llvm::Function *CudaCtorFunction =
+ CUDARuntime->makeModuleCtorFunction())
AddGlobalCtor(CudaCtorFunction);
- if (llvm::Function *CudaDtorFunction = CUDARuntime->makeModuleDtorFunction())
- AddGlobalDtor(CudaDtorFunction);
}
- if (OpenMPRuntime)
+ if (OpenMPRuntime) {
if (llvm::Function *OpenMPRegistrationFunction =
OpenMPRuntime->emitRegistrationFunction()) {
auto ComdatKey = OpenMPRegistrationFunction->hasComdat() ?
OpenMPRegistrationFunction : nullptr;
AddGlobalCtor(OpenMPRegistrationFunction, 0, ComdatKey);
}
+ OpenMPRuntime->clear();
+ }
if (PGOReader) {
getModule().setProfileSummary(PGOReader->getSummary().getMD(VMContext));
if (PGOStats.hasDiagnostics())
@@ -453,6 +458,10 @@ void CodeGenModule::Release() {
// Indicate that we want CodeView in the metadata.
getModule().addModuleFlag(llvm::Module::Warning, "CodeView", 1);
}
+ if (CodeGenOpts.ControlFlowGuard) {
+ // We want function ID tables for Control Flow Guard.
+ getModule().addModuleFlag(llvm::Module::Warning, "cfguard", 1);
+ }
if (CodeGenOpts.OptimizationLevel > 0 && CodeGenOpts.StrictVTablePointers) {
// We don't support LTO with 2 with different StrictVTablePointers
// FIXME: we could support it by stripping all the information introduced
@@ -498,12 +507,26 @@ void CodeGenModule::Release() {
getModule().addModuleFlag(llvm::Module::Override, "Cross-DSO CFI", 1);
}
+ if (CodeGenOpts.CFProtectionReturn &&
+ Target.checkCFProtectionReturnSupported(getDiags())) {
+ // Indicate that we want to instrument return control flow protection.
+ getModule().addModuleFlag(llvm::Module::Override, "cf-protection-return",
+ 1);
+ }
+
+ if (CodeGenOpts.CFProtectionBranch &&
+ Target.checkCFProtectionBranchSupported(getDiags())) {
+ // Indicate that we want to instrument branch control flow protection.
+ getModule().addModuleFlag(llvm::Module::Override, "cf-protection-branch",
+ 1);
+ }
+
if (LangOpts.CUDAIsDevice && getTriple().isNVPTX()) {
// Indicate whether __nvvm_reflect should be configured to flush denormal
// floating point values to 0. (This corresponds to its "__CUDA_FTZ"
// property.)
getModule().addModuleFlag(llvm::Module::Override, "nvvm-reflect-ftz",
- LangOpts.CUDADeviceFlushDenormalsToZero ? 1 : 0);
+ CodeGenOpts.FlushDenorm ? 1 : 0);
}
// Emit OpenCL specific module metadata: OpenCL/SPIR version.
@@ -533,6 +556,9 @@ void CodeGenModule::Release() {
getModule().setPIELevel(static_cast<llvm::PIELevel::Level>(PLevel));
}
+ if (CodeGenOpts.NoPLT)
+ getModule().setRtLibUseGOT();
+
SimplifyPersonality();
if (getCodeGenOpts().EmitDeclMetadata)
@@ -544,7 +570,8 @@ void CodeGenModule::Release() {
if (DebugInfo)
DebugInfo->finalize();
- EmitVersionIdentMetadata();
+ if (getCodeGenOpts().EmitVersionIdentMetadata)
+ EmitVersionIdentMetadata();
EmitTargetMetadata();
}
@@ -580,13 +607,9 @@ llvm::MDNode *CodeGenModule::getTBAATypeInfo(QualType QTy) {
}
TBAAAccessInfo CodeGenModule::getTBAAAccessInfo(QualType AccessType) {
- // Pointee values may have incomplete types, but they shall never be
- // dereferenced.
- if (AccessType->isIncompleteType())
- return TBAAAccessInfo::getIncompleteInfo();
-
- uint64_t Size = Context.getTypeSizeInChars(AccessType).getQuantity();
- return TBAAAccessInfo(getTBAATypeInfo(AccessType), Size);
+ if (!TBAA)
+ return TBAAAccessInfo();
+ return TBAA->getAccessInfo(AccessType);
}
TBAAAccessInfo
@@ -629,6 +652,14 @@ CodeGenModule::mergeTBAAInfoForConditionalOperator(TBAAAccessInfo InfoA,
return TBAA->mergeTBAAInfoForConditionalOperator(InfoA, InfoB);
}
+TBAAAccessInfo
+CodeGenModule::mergeTBAAInfoForMemoryTransfer(TBAAAccessInfo DestInfo,
+ TBAAAccessInfo SrcInfo) {
+ if (!TBAA)
+ return TBAAAccessInfo();
+ return TBAA->mergeTBAAInfoForConditionalOperator(DestInfo, SrcInfo);
+}
+
void CodeGenModule::DecorateInstructionWithTBAA(llvm::Instruction *Inst,
TBAAAccessInfo TBAAInfo) {
if (llvm::MDNode *Tag = getTBAAAccessTagInfo(TBAAInfo))
@@ -670,21 +701,129 @@ llvm::ConstantInt *CodeGenModule::getSize(CharUnits size) {
}
void CodeGenModule::setGlobalVisibility(llvm::GlobalValue *GV,
- const NamedDecl *D,
- ForDefinition_t IsForDefinition) const {
+ const NamedDecl *D) const {
+ if (GV->hasDLLImportStorageClass())
+ return;
// Internal definitions always have default visibility.
if (GV->hasLocalLinkage()) {
GV->setVisibility(llvm::GlobalValue::DefaultVisibility);
return;
}
-
+ if (!D)
+ return;
// Set visibility for definitions.
LinkageInfo LV = D->getLinkageAndVisibility();
- if (LV.isVisibilityExplicit() ||
- (IsForDefinition && !GV->hasAvailableExternallyLinkage()))
+ if (LV.isVisibilityExplicit() || !GV->isDeclarationForLinker())
GV->setVisibility(GetLLVMVisibility(LV.getVisibility()));
}
+static bool shouldAssumeDSOLocal(const CodeGenModule &CGM,
+ llvm::GlobalValue *GV) {
+ if (GV->hasLocalLinkage())
+ return true;
+
+ if (!GV->hasDefaultVisibility() && !GV->hasExternalWeakLinkage())
+ return true;
+
+ // DLLImport explicitly marks the GV as external.
+ if (GV->hasDLLImportStorageClass())
+ return false;
+
+ const llvm::Triple &TT = CGM.getTriple();
+ // Every other GV is local on COFF.
+ // Make an exception for windows OS in the triple: Some firmware builds use
+ // *-win32-macho triples. This (accidentally?) produced windows relocations
+ // without GOT tables in older clang versions; Keep this behaviour.
+ // FIXME: even thread local variables?
+ if (TT.isOSBinFormatCOFF() || (TT.isOSWindows() && TT.isOSBinFormatMachO()))
+ return true;
+
+ // Only handle COFF and ELF for now.
+ if (!TT.isOSBinFormatELF())
+ return false;
+
+ // If this is not an executable, don't assume anything is local.
+ const auto &CGOpts = CGM.getCodeGenOpts();
+ llvm::Reloc::Model RM = CGOpts.RelocationModel;
+ const auto &LOpts = CGM.getLangOpts();
+ if (RM != llvm::Reloc::Static && !LOpts.PIE)
+ return false;
+
+ // A definition cannot be preempted from an executable.
+ if (!GV->isDeclarationForLinker())
+ return true;
+
+ // Most PIC code sequences that assume that a symbol is local cannot produce a
+ // 0 if it turns out the symbol is undefined. While this is ABI and relocation
+ // depended, it seems worth it to handle it here.
+ if (RM == llvm::Reloc::PIC_ && GV->hasExternalWeakLinkage())
+ return false;
+
+ // PPC has no copy relocations and cannot use a plt entry as a symbol address.
+ llvm::Triple::ArchType Arch = TT.getArch();
+ if (Arch == llvm::Triple::ppc || Arch == llvm::Triple::ppc64 ||
+ Arch == llvm::Triple::ppc64le)
+ return false;
+
+ // If we can use copy relocations we can assume it is local.
+ if (auto *Var = dyn_cast<llvm::GlobalVariable>(GV))
+ if (!Var->isThreadLocal() &&
+ (RM == llvm::Reloc::Static || CGOpts.PIECopyRelocations))
+ return true;
+
+ // If we can use a plt entry as the symbol address we can assume it
+ // is local.
+ // FIXME: This should work for PIE, but the gold linker doesn't support it.
+ if (isa<llvm::Function>(GV) && !CGOpts.NoPLT && RM == llvm::Reloc::Static)
+ return true;
+
+ // Otherwise don't assue it is local.
+ return false;
+}
+
+void CodeGenModule::setDSOLocal(llvm::GlobalValue *GV) const {
+ GV->setDSOLocal(shouldAssumeDSOLocal(*this, GV));
+}
+
+void CodeGenModule::setDLLImportDLLExport(llvm::GlobalValue *GV,
+ GlobalDecl GD) const {
+ const auto *D = dyn_cast<NamedDecl>(GD.getDecl());
+ // C++ destructors have a few C++ ABI specific special cases.
+ if (const auto *Dtor = dyn_cast_or_null<CXXDestructorDecl>(D)) {
+ getCXXABI().setCXXDestructorDLLStorage(GV, Dtor, GD.getDtorType());
+ return;
+ }
+ setDLLImportDLLExport(GV, D);
+}
+
+void CodeGenModule::setDLLImportDLLExport(llvm::GlobalValue *GV,
+ const NamedDecl *D) const {
+ if (D && D->isExternallyVisible()) {
+ if (D->hasAttr<DLLImportAttr>())
+ GV->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass);
+ else if (D->hasAttr<DLLExportAttr>() && !GV->isDeclarationForLinker())
+ GV->setDLLStorageClass(llvm::GlobalVariable::DLLExportStorageClass);
+ }
+}
+
+void CodeGenModule::setGVProperties(llvm::GlobalValue *GV,
+ GlobalDecl GD) const {
+ setDLLImportDLLExport(GV, GD);
+ setGlobalVisibilityAndLocal(GV, dyn_cast<NamedDecl>(GD.getDecl()));
+}
+
+void CodeGenModule::setGVProperties(llvm::GlobalValue *GV,
+ const NamedDecl *D) const {
+ setDLLImportDLLExport(GV, D);
+ setGlobalVisibilityAndLocal(GV, D);
+}
+
+void CodeGenModule::setGlobalVisibilityAndLocal(llvm::GlobalValue *GV,
+ const NamedDecl *D) const {
+ setGlobalVisibility(GV, D);
+ setDSOLocal(GV);
+}
+
static llvm::GlobalVariable::ThreadLocalMode GetLLVMTLSModel(StringRef S) {
return llvm::StringSwitch<llvm::GlobalVariable::ThreadLocalMode>(S)
.Case("global-dynamic", llvm::GlobalVariable::GeneralDynamicTLSModel)
@@ -722,36 +861,68 @@ void CodeGenModule::setTLSMode(llvm::GlobalValue *GV, const VarDecl &D) const {
GV->setThreadLocalMode(TLM);
}
-StringRef CodeGenModule::getMangledName(GlobalDecl GD) {
- GlobalDecl CanonicalGD = GD.getCanonicalDecl();
+static std::string getCPUSpecificMangling(const CodeGenModule &CGM,
+ StringRef Name) {
+ const TargetInfo &Target = CGM.getTarget();
+ return (Twine('.') + Twine(Target.CPUSpecificManglingCharacter(Name))).str();
+}
- // Some ABIs don't have constructor variants. Make sure that base and
- // complete constructors get mangled the same.
- if (const auto *CD = dyn_cast<CXXConstructorDecl>(CanonicalGD.getDecl())) {
- if (!getTarget().getCXXABI().hasConstructorVariants()) {
- CXXCtorType OrigCtorType = GD.getCtorType();
- assert(OrigCtorType == Ctor_Base || OrigCtorType == Ctor_Complete);
- if (OrigCtorType == Ctor_Base)
- CanonicalGD = GlobalDecl(CD, Ctor_Complete);
- }
+static void AppendCPUSpecificCPUDispatchMangling(const CodeGenModule &CGM,
+ const CPUSpecificAttr *Attr,
+ raw_ostream &Out) {
+ // cpu_specific gets the current name, dispatch gets the resolver.
+ if (Attr)
+ Out << getCPUSpecificMangling(CGM, Attr->getCurCPUName()->getName());
+ else
+ Out << ".resolver";
+}
+
+static void AppendTargetMangling(const CodeGenModule &CGM,
+ const TargetAttr *Attr, raw_ostream &Out) {
+ if (Attr->isDefaultVersion())
+ return;
+
+ Out << '.';
+ const TargetInfo &Target = CGM.getTarget();
+ TargetAttr::ParsedTargetAttr Info =
+ Attr->parse([&Target](StringRef LHS, StringRef RHS) {
+ // Multiversioning doesn't allow "no-${feature}", so we can
+ // only have "+" prefixes here.
+ assert(LHS.startswith("+") && RHS.startswith("+") &&
+ "Features should always have a prefix.");
+ return Target.multiVersionSortPriority(LHS.substr(1)) >
+ Target.multiVersionSortPriority(RHS.substr(1));
+ });
+
+ bool IsFirst = true;
+
+ if (!Info.Architecture.empty()) {
+ IsFirst = false;
+ Out << "arch_" << Info.Architecture;
}
- auto FoundName = MangledDeclNames.find(CanonicalGD);
- if (FoundName != MangledDeclNames.end())
- return FoundName->second;
+ for (StringRef Feat : Info.Features) {
+ if (!IsFirst)
+ Out << '_';
+ IsFirst = false;
+ Out << Feat.substr(1);
+ }
+}
- const auto *ND = cast<NamedDecl>(GD.getDecl());
+static std::string getMangledNameImpl(const CodeGenModule &CGM, GlobalDecl GD,
+ const NamedDecl *ND,
+ bool OmitMultiVersionMangling = false) {
SmallString<256> Buffer;
- StringRef Str;
- if (getCXXABI().getMangleContext().shouldMangleDeclName(ND)) {
+ llvm::raw_svector_ostream Out(Buffer);
+ MangleContext &MC = CGM.getCXXABI().getMangleContext();
+ if (MC.shouldMangleDeclName(ND)) {
llvm::raw_svector_ostream Out(Buffer);
if (const auto *D = dyn_cast<CXXConstructorDecl>(ND))
- getCXXABI().getMangleContext().mangleCXXCtor(D, GD.getCtorType(), Out);
+ MC.mangleCXXCtor(D, GD.getCtorType(), Out);
else if (const auto *D = dyn_cast<CXXDestructorDecl>(ND))
- getCXXABI().getMangleContext().mangleCXXDtor(D, GD.getDtorType(), Out);
+ MC.mangleCXXDtor(D, GD.getDtorType(), Out);
else
- getCXXABI().getMangleContext().mangleName(ND, Out);
- Str = Out.str();
+ MC.mangleName(ND, Out);
} else {
IdentifierInfo *II = ND->getIdentifier();
assert(II && "Attempt to mangle unnamed decl.");
@@ -761,14 +932,103 @@ StringRef CodeGenModule::getMangledName(GlobalDecl GD) {
FD->getType()->castAs<FunctionType>()->getCallConv() == CC_X86RegCall) {
llvm::raw_svector_ostream Out(Buffer);
Out << "__regcall3__" << II->getName();
- Str = Out.str();
} else {
- Str = II->getName();
+ Out << II->getName();
+ }
+ }
+
+ if (const auto *FD = dyn_cast<FunctionDecl>(ND))
+ if (FD->isMultiVersion() && !OmitMultiVersionMangling) {
+ if (FD->isCPUDispatchMultiVersion() || FD->isCPUSpecificMultiVersion())
+ AppendCPUSpecificCPUDispatchMangling(
+ CGM, FD->getAttr<CPUSpecificAttr>(), Out);
+ else
+ AppendTargetMangling(CGM, FD->getAttr<TargetAttr>(), Out);
+ }
+
+ return Out.str();
+}
+
+void CodeGenModule::UpdateMultiVersionNames(GlobalDecl GD,
+ const FunctionDecl *FD) {
+ if (!FD->isMultiVersion())
+ return;
+
+ // Get the name of what this would be without the 'target' attribute. This
+ // allows us to lookup the version that was emitted when this wasn't a
+ // multiversion function.
+ std::string NonTargetName =
+ getMangledNameImpl(*this, GD, FD, /*OmitMultiVersionMangling=*/true);
+ GlobalDecl OtherGD;
+ if (lookupRepresentativeDecl(NonTargetName, OtherGD)) {
+ assert(OtherGD.getCanonicalDecl()
+ .getDecl()
+ ->getAsFunction()
+ ->isMultiVersion() &&
+ "Other GD should now be a multiversioned function");
+ // OtherFD is the version of this function that was mangled BEFORE
+ // becoming a MultiVersion function. It potentially needs to be updated.
+ const FunctionDecl *OtherFD =
+ OtherGD.getCanonicalDecl().getDecl()->getAsFunction();
+ std::string OtherName = getMangledNameImpl(*this, OtherGD, OtherFD);
+ // This is so that if the initial version was already the 'default'
+ // version, we don't try to update it.
+ if (OtherName != NonTargetName) {
+ // Remove instead of erase, since others may have stored the StringRef
+ // to this.
+ const auto ExistingRecord = Manglings.find(NonTargetName);
+ if (ExistingRecord != std::end(Manglings))
+ Manglings.remove(&(*ExistingRecord));
+ auto Result = Manglings.insert(std::make_pair(OtherName, OtherGD));
+ MangledDeclNames[OtherGD.getCanonicalDecl()] = Result.first->first();
+ if (llvm::GlobalValue *Entry = GetGlobalValue(NonTargetName))
+ Entry->setName(OtherName);
+ }
+ }
+}
+
+StringRef CodeGenModule::getMangledName(GlobalDecl GD) {
+ GlobalDecl CanonicalGD = GD.getCanonicalDecl();
+
+ // Some ABIs don't have constructor variants. Make sure that base and
+ // complete constructors get mangled the same.
+ if (const auto *CD = dyn_cast<CXXConstructorDecl>(CanonicalGD.getDecl())) {
+ if (!getTarget().getCXXABI().hasConstructorVariants()) {
+ CXXCtorType OrigCtorType = GD.getCtorType();
+ assert(OrigCtorType == Ctor_Base || OrigCtorType == Ctor_Complete);
+ if (OrigCtorType == Ctor_Base)
+ CanonicalGD = GlobalDecl(CD, Ctor_Complete);
}
}
+ const auto *FD = dyn_cast<FunctionDecl>(GD.getDecl());
+ // Since CPUSpecific can require multiple emits per decl, store the manglings
+ // separately.
+ if (FD &&
+ (FD->isCPUDispatchMultiVersion() || FD->isCPUSpecificMultiVersion())) {
+ const auto *SD = FD->getAttr<CPUSpecificAttr>();
+
+ std::pair<GlobalDecl, unsigned> SpecCanonicalGD{
+ CanonicalGD,
+ SD ? SD->ActiveArgIndex : std::numeric_limits<unsigned>::max()};
+
+ auto FoundName = CPUSpecificMangledDeclNames.find(SpecCanonicalGD);
+ if (FoundName != CPUSpecificMangledDeclNames.end())
+ return FoundName->second;
+
+ auto Result = CPUSpecificManglings.insert(
+ std::make_pair(getMangledNameImpl(*this, GD, FD), SpecCanonicalGD));
+ return CPUSpecificMangledDeclNames[SpecCanonicalGD] = Result.first->first();
+ }
+
+ auto FoundName = MangledDeclNames.find(CanonicalGD);
+ if (FoundName != MangledDeclNames.end())
+ return FoundName->second;
+
// Keep the first result in the case of a mangling collision.
- auto Result = Manglings.insert(std::make_pair(Str, GD));
+ const auto *ND = cast<NamedDecl>(GD.getDecl());
+ auto Result =
+ Manglings.insert(std::make_pair(getMangledNameImpl(*this, GD, ND), GD));
return MangledDeclNames[CanonicalGD] = Result.first->first();
}
@@ -808,6 +1068,11 @@ void CodeGenModule::AddGlobalCtor(llvm::Function *Ctor, int Priority,
/// AddGlobalDtor - Add a function to the list that will be called
/// when the module is unloaded.
void CodeGenModule::AddGlobalDtor(llvm::Function *Dtor, int Priority) {
+ if (CodeGenOpts.RegisterGlobalDtorsWithAtExit) {
+ DtorsUsingAtExit[Priority].push_back(Dtor);
+ return;
+ }
+
// FIXME: Type coercion of void()* types.
GlobalDtors.push_back(Structor(Priority, Dtor, nullptr));
}
@@ -855,14 +1120,8 @@ CodeGenModule::getFunctionLinkage(GlobalDecl GD) {
GVALinkage Linkage = getContext().GetGVALinkageForFunction(D);
- if (isa<CXXDestructorDecl>(D) &&
- getCXXABI().useThunkForDtorVariant(cast<CXXDestructorDecl>(D),
- GD.getDtorType())) {
- // Destructor variants in the Microsoft C++ ABI are always internal or
- // linkonce_odr thunks emitted on an as-needed basis.
- return Linkage == GVA_Internal ? llvm::GlobalValue::InternalLinkage
- : llvm::GlobalValue::LinkOnceODRLinkage;
- }
+ if (const auto *Dtor = dyn_cast<CXXDestructorDecl>(D))
+ return getCXXABI().getCXXDestructorLinkage(Linkage, Dtor, GD.getDtorType());
if (isa<CXXConstructorDecl>(D) &&
cast<CXXConstructorDecl>(D)->isInheritingConstructor() &&
@@ -876,25 +1135,6 @@ CodeGenModule::getFunctionLinkage(GlobalDecl GD) {
return getLLVMLinkageForDeclarator(D, Linkage, /*isConstantVariable=*/false);
}
-void CodeGenModule::setFunctionDLLStorageClass(GlobalDecl GD, llvm::Function *F) {
- const auto *FD = cast<FunctionDecl>(GD.getDecl());
-
- if (const auto *Dtor = dyn_cast_or_null<CXXDestructorDecl>(FD)) {
- if (getCXXABI().useThunkForDtorVariant(Dtor, GD.getDtorType())) {
- // Don't dllexport/import destructor thunks.
- F->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass);
- return;
- }
- }
-
- if (FD->hasAttr<DLLImportAttr>())
- F->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass);
- else if (FD->hasAttr<DLLExportAttr>())
- F->setDLLStorageClass(llvm::GlobalVariable::DLLExportStorageClass);
- else
- F->setDLLStorageClass(llvm::GlobalVariable::DefaultStorageClass);
-}
-
llvm::ConstantInt *CodeGenModule::CreateCrossDsoCfiTypeId(llvm::Metadata *MD) {
llvm::MDString *MDS = dyn_cast<llvm::MDString>(MD);
if (!MDS) return nullptr;
@@ -902,11 +1142,6 @@ llvm::ConstantInt *CodeGenModule::CreateCrossDsoCfiTypeId(llvm::Metadata *MD) {
return llvm::ConstantInt::get(Int64Ty, llvm::MD5Hash(MDS->getString()));
}
-void CodeGenModule::setFunctionDefinitionAttributes(const FunctionDecl *D,
- llvm::Function *F) {
- setNonAliasAttributes(D, F);
-}
-
void CodeGenModule::SetLLVMFunctionAttributes(const Decl *D,
const CGFunctionInfo &Info,
llvm::Function *F) {
@@ -937,6 +1172,34 @@ static bool hasUnwindExceptions(const LangOptions &LangOpts) {
return true;
}
+static bool requiresMemberFunctionPointerTypeMetadata(CodeGenModule &CGM,
+ const CXXMethodDecl *MD) {
+ // Check that the type metadata can ever actually be used by a call.
+ if (!CGM.getCodeGenOpts().LTOUnit ||
+ !CGM.HasHiddenLTOVisibility(MD->getParent()))
+ return false;
+
+ // Only functions whose address can be taken with a member function pointer
+ // need this sort of type metadata.
+ return !MD->isStatic() && !MD->isVirtual() && !isa<CXXConstructorDecl>(MD) &&
+ !isa<CXXDestructorDecl>(MD);
+}
+
+std::vector<const CXXRecordDecl *>
+CodeGenModule::getMostBaseClasses(const CXXRecordDecl *RD) {
+ llvm::SetVector<const CXXRecordDecl *> MostBases;
+
+ std::function<void (const CXXRecordDecl *)> CollectMostBases;
+ CollectMostBases = [&](const CXXRecordDecl *RD) {
+ if (RD->getNumBases() == 0)
+ MostBases.insert(RD);
+ for (const CXXBaseSpecifier &B : RD->bases())
+ CollectMostBases(B.getType()->getAsCXXRecordDecl());
+ };
+ CollectMostBases(RD);
+ return MostBases.takeVector();
+}
+
void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D,
llvm::Function *F) {
llvm::AttrBuilder B;
@@ -947,12 +1210,14 @@ void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D,
if (!hasUnwindExceptions(LangOpts))
B.addAttribute(llvm::Attribute::NoUnwind);
- if (LangOpts.getStackProtector() == LangOptions::SSPOn)
- B.addAttribute(llvm::Attribute::StackProtect);
- else if (LangOpts.getStackProtector() == LangOptions::SSPStrong)
- B.addAttribute(llvm::Attribute::StackProtectStrong);
- else if (LangOpts.getStackProtector() == LangOptions::SSPReq)
- B.addAttribute(llvm::Attribute::StackProtectReq);
+ if (!D || !D->hasAttr<NoStackProtectorAttr>()) {
+ if (LangOpts.getStackProtector() == LangOptions::SSPOn)
+ B.addAttribute(llvm::Attribute::StackProtect);
+ else if (LangOpts.getStackProtector() == LangOptions::SSPStrong)
+ B.addAttribute(llvm::Attribute::StackProtectStrong);
+ else if (LangOpts.getStackProtector() == LangOptions::SSPReq)
+ B.addAttribute(llvm::Attribute::StackProtectReq);
+ }
if (!D) {
// If we don't have a declaration to control inlining, the function isn't
@@ -1044,6 +1309,10 @@ void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D,
if (alignment)
F->setAlignment(alignment);
+ if (!D->hasAttr<AlignedAttr>())
+ if (LangOpts.FunctionAlignment)
+ F->setAlignment(1 << LangOpts.FunctionAlignment);
+
// Some C++ ABIs require 2-byte alignment for member functions, in order to
// reserve a bit for differentiating between virtual and non-virtual member
// functions. If the current target's C++ ABI requires this and this is a
@@ -1056,13 +1325,26 @@ void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D,
// In the cross-dso CFI mode, we want !type attributes on definitions only.
if (CodeGenOpts.SanitizeCfiCrossDso)
if (auto *FD = dyn_cast<FunctionDecl>(D))
- CreateFunctionTypeMetadata(FD, F);
+ CreateFunctionTypeMetadataForIcall(FD, F);
+
+ // Emit type metadata on member functions for member function pointer checks.
+ // These are only ever necessary on definitions; we're guaranteed that the
+ // definition will be present in the LTO unit as a result of LTO visibility.
+ auto *MD = dyn_cast<CXXMethodDecl>(D);
+ if (MD && requiresMemberFunctionPointerTypeMetadata(*this, MD)) {
+ for (const CXXRecordDecl *Base : getMostBaseClasses(MD->getParent())) {
+ llvm::Metadata *Id =
+ CreateMetadataIdentifierForType(Context.getMemberPointerType(
+ MD->getType(), Context.getRecordType(Base).getTypePtr()));
+ F->addTypeMetadata(0, Id);
+ }
+ }
}
-void CodeGenModule::SetCommonAttributes(const Decl *D,
- llvm::GlobalValue *GV) {
- if (const auto *ND = dyn_cast_or_null<NamedDecl>(D))
- setGlobalVisibility(GV, ND, ForDefinition);
+void CodeGenModule::SetCommonAttributes(GlobalDecl GD, llvm::GlobalValue *GV) {
+ const Decl *D = GD.getDecl();
+ if (dyn_cast_or_null<NamedDecl>(D))
+ setGVProperties(GV, GD);
else
GV->setVisibility(llvm::GlobalValue::DefaultVisibility);
@@ -1070,19 +1352,59 @@ void CodeGenModule::SetCommonAttributes(const Decl *D,
addUsedGlobal(GV);
}
-void CodeGenModule::setAliasAttributes(const Decl *D,
- llvm::GlobalValue *GV) {
- SetCommonAttributes(D, GV);
+bool CodeGenModule::GetCPUAndFeaturesAttributes(const Decl *D,
+ llvm::AttrBuilder &Attrs) {
+ // Add target-cpu and target-features attributes to functions. If
+ // we have a decl for the function and it has a target attribute then
+ // parse that and add it to the feature set.
+ StringRef TargetCPU = getTarget().getTargetOpts().CPU;
+ std::vector<std::string> Features;
+ const auto *FD = dyn_cast_or_null<FunctionDecl>(D);
+ FD = FD ? FD->getMostRecentDecl() : FD;
+ const auto *TD = FD ? FD->getAttr<TargetAttr>() : nullptr;
+ const auto *SD = FD ? FD->getAttr<CPUSpecificAttr>() : nullptr;
+ bool AddedAttr = false;
+ if (TD || SD) {
+ llvm::StringMap<bool> FeatureMap;
+ getFunctionFeatureMap(FeatureMap, FD);
+
+ // Produce the canonical string for this set of features.
+ for (const llvm::StringMap<bool>::value_type &Entry : FeatureMap)
+ Features.push_back((Entry.getValue() ? "+" : "-") + Entry.getKey().str());
+
+ // Now add the target-cpu and target-features to the function.
+ // While we populated the feature map above, we still need to
+ // get and parse the target attribute so we can get the cpu for
+ // the function.
+ if (TD) {
+ TargetAttr::ParsedTargetAttr ParsedAttr = TD->parse();
+ if (ParsedAttr.Architecture != "" &&
+ getTarget().isValidCPUName(ParsedAttr.Architecture))
+ TargetCPU = ParsedAttr.Architecture;
+ }
+ } else {
+ // Otherwise just add the existing target cpu and target features to the
+ // function.
+ Features = getTarget().getTargetOpts().Features;
+ }
+
+ if (TargetCPU != "") {
+ Attrs.addAttribute("target-cpu", TargetCPU);
+ AddedAttr = true;
+ }
+ if (!Features.empty()) {
+ llvm::sort(Features.begin(), Features.end());
+ Attrs.addAttribute("target-features", llvm::join(Features, ","));
+ AddedAttr = true;
+ }
- // Process the dllexport attribute based on whether the original definition
- // (not necessarily the aliasee) was exported.
- if (D->hasAttr<DLLExportAttr>())
- GV->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
+ return AddedAttr;
}
-void CodeGenModule::setNonAliasAttributes(const Decl *D,
+void CodeGenModule::setNonAliasAttributes(GlobalDecl GD,
llvm::GlobalObject *GO) {
- SetCommonAttributes(D, GO);
+ const Decl *D = GD.getDecl();
+ SetCommonAttributes(GD, GO);
if (D) {
if (auto *GV = dyn_cast<llvm::GlobalVariable>(GO)) {
@@ -1096,55 +1418,60 @@ void CodeGenModule::setNonAliasAttributes(const Decl *D,
if (auto *F = dyn_cast<llvm::Function>(GO)) {
if (auto *SA = D->getAttr<PragmaClangTextSectionAttr>())
- if (!D->getAttr<SectionAttr>())
- F->addFnAttr("implicit-section-name", SA->getName());
+ if (!D->getAttr<SectionAttr>())
+ F->addFnAttr("implicit-section-name", SA->getName());
+
+ llvm::AttrBuilder Attrs;
+ if (GetCPUAndFeaturesAttributes(D, Attrs)) {
+ // We know that GetCPUAndFeaturesAttributes will always have the
+ // newest set, since it has the newest possible FunctionDecl, so the
+ // new ones should replace the old.
+ F->removeFnAttr("target-cpu");
+ F->removeFnAttr("target-features");
+ F->addAttributes(llvm::AttributeList::FunctionIndex, Attrs);
+ }
}
-
- if (const SectionAttr *SA = D->getAttr<SectionAttr>())
+
+ if (const auto *CSA = D->getAttr<CodeSegAttr>())
+ GO->setSection(CSA->getName());
+ else if (const auto *SA = D->getAttr<SectionAttr>())
GO->setSection(SA->getName());
}
- getTargetCodeGenInfo().setTargetAttributes(D, GO, *this, ForDefinition);
+ getTargetCodeGenInfo().setTargetAttributes(D, GO, *this);
}
-void CodeGenModule::SetInternalFunctionAttributes(const Decl *D,
+void CodeGenModule::SetInternalFunctionAttributes(GlobalDecl GD,
llvm::Function *F,
const CGFunctionInfo &FI) {
+ const Decl *D = GD.getDecl();
SetLLVMFunctionAttributes(D, FI, F);
SetLLVMFunctionAttributesForDefinition(D, F);
F->setLinkage(llvm::Function::InternalLinkage);
- setNonAliasAttributes(D, F);
+ setNonAliasAttributes(GD, F);
}
-static void setLinkageForGV(llvm::GlobalValue *GV,
- const NamedDecl *ND) {
+static void setLinkageForGV(llvm::GlobalValue *GV, const NamedDecl *ND) {
// Set linkage and visibility in case we never see a definition.
LinkageInfo LV = ND->getLinkageAndVisibility();
- if (!isExternallyVisible(LV.getLinkage())) {
- // Don't set internal linkage on declarations.
- } else {
- if (ND->hasAttr<DLLImportAttr>()) {
- GV->setLinkage(llvm::GlobalValue::ExternalLinkage);
- GV->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
- } else if (ND->hasAttr<DLLExportAttr>()) {
- GV->setLinkage(llvm::GlobalValue::ExternalLinkage);
- } else if (ND->hasAttr<WeakAttr>() || ND->isWeakImported()) {
- // "extern_weak" is overloaded in LLVM; we probably should have
- // separate linkage types for this.
- GV->setLinkage(llvm::GlobalValue::ExternalWeakLinkage);
- }
- }
+ // Don't set internal linkage on declarations.
+ // "extern_weak" is overloaded in LLVM; we probably should have
+ // separate linkage types for this.
+ if (isExternallyVisible(LV.getLinkage()) &&
+ (ND->hasAttr<WeakAttr>() || ND->isWeakImported()))
+ GV->setLinkage(llvm::GlobalValue::ExternalWeakLinkage);
}
-void CodeGenModule::CreateFunctionTypeMetadata(const FunctionDecl *FD,
- llvm::Function *F) {
+void CodeGenModule::CreateFunctionTypeMetadataForIcall(const FunctionDecl *FD,
+ llvm::Function *F) {
// Only if we are checking indirect calls.
if (!LangOpts.Sanitize.has(SanitizerKind::CFIICall))
return;
- // Non-static class methods are handled via vtable pointer checks elsewhere.
+ // Non-static class methods are handled via vtable or member function pointer
+ // checks elsewhere.
if (isa<CXXMethodDecl>(FD) && !cast<CXXMethodDecl>(FD)->isStatic())
return;
@@ -1168,8 +1495,7 @@ void CodeGenModule::CreateFunctionTypeMetadata(const FunctionDecl *FD,
void CodeGenModule::SetFunctionAttributes(GlobalDecl GD, llvm::Function *F,
bool IsIncompleteFunction,
- bool IsThunk,
- ForDefinition_t IsForDefinition) {
+ bool IsThunk) {
if (llvm::Intrinsic::ID IID = F->getIntrinsicID()) {
// If this is an intrinsic function, set the function's attributes
@@ -1183,9 +1509,8 @@ void CodeGenModule::SetFunctionAttributes(GlobalDecl GD, llvm::Function *F,
if (!IsIncompleteFunction) {
SetLLVMFunctionAttributes(FD, getTypes().arrangeGlobalDeclaration(GD), F);
// Setup target-specific attributes.
- if (!IsForDefinition)
- getTargetCodeGenInfo().setTargetAttributes(FD, F, *this,
- NotForDefinition);
+ if (F->isDeclaration())
+ getTargetCodeGenInfo().setTargetAttributes(FD, F, *this);
}
// Add the Returned attribute for "this", except for iOS 5 and earlier
@@ -1204,14 +1529,12 @@ void CodeGenModule::SetFunctionAttributes(GlobalDecl GD, llvm::Function *F,
// overridden by a definition.
setLinkageForGV(F, FD);
- setGlobalVisibility(F, FD, NotForDefinition);
-
- if (FD->getAttr<PragmaClangTextSectionAttr>()) {
- F->addFnAttr("implicit-section-name");
- }
+ setGVProperties(F, FD);
- if (const SectionAttr *SA = FD->getAttr<SectionAttr>())
- F->setSection(SA->getName());
+ if (const auto *CSA = FD->getAttr<CodeSegAttr>())
+ F->setSection(CSA->getName());
+ else if (const auto *SA = FD->getAttr<SectionAttr>())
+ F->setSection(SA->getName());
if (FD->isReplaceableGlobalAllocationFunction()) {
// A replaceable global allocation function does not act like a builtin by
@@ -1238,7 +1561,7 @@ void CodeGenModule::SetFunctionAttributes(GlobalDecl GD, llvm::Function *F,
// Don't emit entries for function declarations in the cross-DSO mode. This
// is handled with better precision by the receiving DSO.
if (!CodeGenOpts.SanitizeCfiCrossDso)
- CreateFunctionTypeMetadata(FD, F);
+ CreateFunctionTypeMetadataForIcall(FD, F);
if (getLangOpts().OpenMP && FD->hasAttr<OMPDeclareSimdDeclAttr>())
getOpenMPRuntime().emitDeclareSimdFunction(FD, F);
@@ -1299,6 +1622,12 @@ void CodeGenModule::AddDetectMismatch(StringRef Name, StringRef Value) {
LinkerOptionsMetadata.push_back(llvm::MDNode::get(getLLVMContext(), MDOpts));
}
+void CodeGenModule::AddELFLibDirective(StringRef Lib) {
+ auto &C = getLLVMContext();
+ LinkerOptionsMetadata.push_back(llvm::MDNode::get(
+ C, {llvm::MDString::get(C, "lib"), llvm::MDString::get(C, Lib)}));
+}
+
void CodeGenModule::AddDependentLib(StringRef Lib) {
llvm::SmallString<24> Opt;
getTargetCodeGenInfo().getDependentLibraryOption(Lib, Opt);
@@ -1306,7 +1635,7 @@ void CodeGenModule::AddDependentLib(StringRef Lib) {
LinkerOptionsMetadata.push_back(llvm::MDNode::get(getLLVMContext(), MDOpts));
}
-/// \brief Add link options implied by the given module, including modules
+/// Add link options implied by the given module, including modules
/// it depends on, using a postorder walk.
static void addLinkOptionsPostorder(CodeGenModule &CGM, Module *Mod,
SmallVectorImpl<llvm::MDNode *> &Metadata,
@@ -1325,6 +1654,12 @@ static void addLinkOptionsPostorder(CodeGenModule &CGM, Module *Mod,
// Add linker options to link against the libraries/frameworks
// described by this module.
llvm::LLVMContext &Context = CGM.getLLVMContext();
+
+ // For modules that use export_as for linking, use that module
+ // name instead.
+ if (Mod->UseExportAsModuleLinkName)
+ return;
+
for (unsigned I = Mod->LinkLibraries.size(); I > 0; --I) {
// Link against a framework. Frameworks are currently Darwin only, so we
// don't to ask TargetCodeGenInfo for the spelling of the linker option.
@@ -1586,7 +1921,8 @@ bool CodeGenModule::isInSanitizerBlacklist(llvm::GlobalVariable *GV,
StringRef Category) const {
// For now globals can be blacklisted only in ASan and KASan.
const SanitizerMask EnabledAsanMask = LangOpts.Sanitize.Mask &
- (SanitizerKind::Address | SanitizerKind::KernelAddress | SanitizerKind::HWAddress);
+ (SanitizerKind::Address | SanitizerKind::KernelAddress |
+ SanitizerKind::HWAddress | SanitizerKind::KernelHWAddress);
if (!EnabledAsanMask)
return false;
const auto &SanitizerBL = getContext().getSanitizerBlacklist();
@@ -1615,9 +1951,10 @@ bool CodeGenModule::imbueXRayAttrs(llvm::Function *Fn, SourceLocation Loc,
StringRef Category) const {
if (!LangOpts.XRayInstrument)
return false;
+
const auto &XRayFilter = getContext().getXRayFilter();
using ImbueAttr = XRayFunctionFilter::ImbueAttribute;
- auto Attr = XRayFunctionFilter::ImbueAttribute::NONE;
+ auto Attr = ImbueAttr::NONE;
if (Loc.isValid())
Attr = XRayFilter.shouldImbueLocation(Loc, Category);
if (Attr == ImbueAttr::NONE)
@@ -1662,7 +1999,8 @@ bool CodeGenModule::MayBeEmittedEagerly(const ValueDecl *Global) {
// If OpenMP is enabled and threadprivates must be generated like TLS, delay
// codegen for global variables, because they may be marked as threadprivate.
if (LangOpts.OpenMP && LangOpts.OpenMPUseTLS &&
- getContext().getTargetInfo().isTLSSupported() && isa<VarDecl>(Global))
+ getContext().getTargetInfo().isTLSSupported() && isa<VarDecl>(Global) &&
+ !isTypeConstant(Global->getType(), false))
return false;
return true;
@@ -1691,6 +2029,7 @@ ConstantAddress CodeGenModule::GetAddrOfUuidDescriptor(
/*isConstant=*/true, llvm::GlobalValue::LinkOnceODRLinkage, Init, Name);
if (supportsCOMDAT())
GV->setComdat(TheModule.getOrInsertComdat(GV->getName()));
+ setDSOLocal(GV);
return ConstantAddress(GV, Alignment);
}
@@ -1742,6 +2081,10 @@ void CodeGenModule::EmitGlobal(GlobalDecl GD) {
if (Global->hasAttr<IFuncAttr>())
return emitIFuncDefinition(GD);
+ // If this is a cpu_dispatch multiversion function, emit the resolver.
+ if (Global->hasAttr<CPUDispatchAttr>())
+ return emitCPUDispatchDefinition(GD);
+
// If this is CUDA, be selective about which declarations we emit.
if (LangOpts.CUDA) {
if (LangOpts.CUDAIsDevice) {
@@ -2058,6 +2401,124 @@ void CodeGenModule::EmitGlobalDefinition(GlobalDecl GD, llvm::GlobalValue *GV) {
static void ReplaceUsesOfNonProtoTypeWithRealFunction(llvm::GlobalValue *Old,
llvm::Function *NewFn);
+void CodeGenModule::emitMultiVersionFunctions() {
+ for (GlobalDecl GD : MultiVersionFuncs) {
+ SmallVector<CodeGenFunction::TargetMultiVersionResolverOption, 10> Options;
+ const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
+ getContext().forEachMultiversionedFunctionVersion(
+ FD, [this, &GD, &Options](const FunctionDecl *CurFD) {
+ GlobalDecl CurGD{
+ (CurFD->isDefined() ? CurFD->getDefinition() : CurFD)};
+ StringRef MangledName = getMangledName(CurGD);
+ llvm::Constant *Func = GetGlobalValue(MangledName);
+ if (!Func) {
+ if (CurFD->isDefined()) {
+ EmitGlobalFunctionDefinition(CurGD, nullptr);
+ Func = GetGlobalValue(MangledName);
+ } else {
+ const CGFunctionInfo &FI =
+ getTypes().arrangeGlobalDeclaration(GD);
+ llvm::FunctionType *Ty = getTypes().GetFunctionType(FI);
+ Func = GetAddrOfFunction(CurGD, Ty, /*ForVTable=*/false,
+ /*DontDefer=*/false, ForDefinition);
+ }
+ assert(Func && "This should have just been created");
+ }
+ Options.emplace_back(getTarget(), cast<llvm::Function>(Func),
+ CurFD->getAttr<TargetAttr>()->parse());
+ });
+
+ llvm::Function *ResolverFunc = cast<llvm::Function>(
+ GetGlobalValue((getMangledName(GD) + ".resolver").str()));
+ if (supportsCOMDAT())
+ ResolverFunc->setComdat(
+ getModule().getOrInsertComdat(ResolverFunc->getName()));
+ std::stable_sort(
+ Options.begin(), Options.end(),
+ std::greater<CodeGenFunction::TargetMultiVersionResolverOption>());
+ CodeGenFunction CGF(*this);
+ CGF.EmitTargetMultiVersionResolver(ResolverFunc, Options);
+ }
+}
+
+void CodeGenModule::emitCPUDispatchDefinition(GlobalDecl GD) {
+ const auto *FD = cast<FunctionDecl>(GD.getDecl());
+ assert(FD && "Not a FunctionDecl?");
+ const auto *DD = FD->getAttr<CPUDispatchAttr>();
+ assert(DD && "Not a cpu_dispatch Function?");
+ llvm::Type *DeclTy = getTypes().ConvertTypeForMem(FD->getType());
+
+ StringRef ResolverName = getMangledName(GD);
+ llvm::Type *ResolverType = llvm::FunctionType::get(
+ llvm::PointerType::get(DeclTy,
+ Context.getTargetAddressSpace(FD->getType())),
+ false);
+ auto *ResolverFunc = cast<llvm::Function>(
+ GetOrCreateLLVMFunction(ResolverName, ResolverType, GlobalDecl{},
+ /*ForVTable=*/false));
+
+ SmallVector<CodeGenFunction::CPUDispatchMultiVersionResolverOption, 10>
+ Options;
+ const TargetInfo &Target = getTarget();
+ for (const IdentifierInfo *II : DD->cpus()) {
+ // Get the name of the target function so we can look it up/create it.
+ std::string MangledName = getMangledNameImpl(*this, GD, FD, true) +
+ getCPUSpecificMangling(*this, II->getName());
+ llvm::Constant *Func = GetOrCreateLLVMFunction(
+ MangledName, DeclTy, GD, /*ForVTable=*/false, /*DontDefer=*/false,
+ /*IsThunk=*/false, llvm::AttributeList(), ForDefinition);
+ llvm::SmallVector<StringRef, 32> Features;
+ Target.getCPUSpecificCPUDispatchFeatures(II->getName(), Features);
+ llvm::transform(Features, Features.begin(),
+ [](StringRef Str) { return Str.substr(1); });
+ Features.erase(std::remove_if(
+ Features.begin(), Features.end(), [&Target](StringRef Feat) {
+ return !Target.validateCpuSupports(Feat);
+ }), Features.end());
+ Options.emplace_back(cast<llvm::Function>(Func),
+ CodeGenFunction::GetX86CpuSupportsMask(Features));
+ }
+
+ llvm::sort(
+ Options.begin(), Options.end(),
+ std::greater<CodeGenFunction::CPUDispatchMultiVersionResolverOption>());
+ CodeGenFunction CGF(*this);
+ CGF.EmitCPUDispatchMultiVersionResolver(ResolverFunc, Options);
+}
+
+/// If an ifunc for the specified mangled name is not in the module, create and
+/// return an llvm IFunc Function with the specified type.
+llvm::Constant *
+CodeGenModule::GetOrCreateMultiVersionIFunc(GlobalDecl GD, llvm::Type *DeclTy,
+ const FunctionDecl *FD) {
+ std::string MangledName =
+ getMangledNameImpl(*this, GD, FD, /*OmitMultiVersionMangling=*/true);
+ std::string IFuncName = MangledName + ".ifunc";
+ if (llvm::GlobalValue *IFuncGV = GetGlobalValue(IFuncName))
+ return IFuncGV;
+
+ // Since this is the first time we've created this IFunc, make sure
+ // that we put this multiversioned function into the list to be
+ // replaced later if necessary (target multiversioning only).
+ if (!FD->isCPUDispatchMultiVersion() && !FD->isCPUSpecificMultiVersion())
+ MultiVersionFuncs.push_back(GD);
+
+ std::string ResolverName = MangledName + ".resolver";
+ llvm::Type *ResolverType = llvm::FunctionType::get(
+ llvm::PointerType::get(DeclTy,
+ Context.getTargetAddressSpace(FD->getType())),
+ false);
+ llvm::Constant *Resolver =
+ GetOrCreateLLVMFunction(ResolverName, ResolverType, GlobalDecl{},
+ /*ForVTable=*/false);
+ llvm::GlobalIFunc *GIF = llvm::GlobalIFunc::create(
+ DeclTy, 0, llvm::Function::ExternalLinkage, "", Resolver, &getModule());
+ GIF->setName(IFuncName);
+ SetCommonAttributes(FD, GIF);
+
+ return GIF;
+}
+
/// GetOrCreateLLVMFunction - If the specified mangled name is not in the
/// module, create and return an llvm Function with the specified type. If there
/// is something in the module with the specified name, return it potentially
@@ -2071,6 +2532,33 @@ llvm::Constant *CodeGenModule::GetOrCreateLLVMFunction(
ForDefinition_t IsForDefinition) {
const Decl *D = GD.getDecl();
+ // Any attempts to use a MultiVersion function should result in retrieving
+ // the iFunc instead. Name Mangling will handle the rest of the changes.
+ if (const FunctionDecl *FD = cast_or_null<FunctionDecl>(D)) {
+ // For the device mark the function as one that should be emitted.
+ if (getLangOpts().OpenMPIsDevice && OpenMPRuntime &&
+ !OpenMPRuntime->markAsGlobalTarget(GD) && FD->isDefined() &&
+ !DontDefer && !IsForDefinition) {
+ const FunctionDecl *FDDef = FD->getDefinition();
+ GlobalDecl GDDef;
+ if (const auto *CD = dyn_cast<CXXConstructorDecl>(FDDef))
+ GDDef = GlobalDecl(CD, GD.getCtorType());
+ else if (const auto *DD = dyn_cast<CXXDestructorDecl>(FDDef))
+ GDDef = GlobalDecl(DD, GD.getDtorType());
+ else
+ GDDef = GlobalDecl(FDDef);
+ addDeferredDeclToEmit(GDDef);
+ }
+
+ if (FD->isMultiVersion()) {
+ const auto *TA = FD->getAttr<TargetAttr>();
+ if (TA && TA->isDefaultVersion())
+ UpdateMultiVersionNames(GD, FD);
+ if (!IsForDefinition)
+ return GetOrCreateMultiVersionIFunc(GD, Ty, FD);
+ }
+ }
+
// Lookup the entry, lazily creating it if necessary.
llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
if (Entry) {
@@ -2081,8 +2569,10 @@ llvm::Constant *CodeGenModule::GetOrCreateLLVMFunction(
}
// Handle dropped DLL attributes.
- if (D && !D->hasAttr<DLLImportAttr>() && !D->hasAttr<DLLExportAttr>())
+ if (D && !D->hasAttr<DLLImportAttr>() && !D->hasAttr<DLLExportAttr>()) {
Entry->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass);
+ setDSOLocal(Entry);
+ }
// If there are two attempts to define the same mangled name, issue an
// error.
@@ -2094,8 +2584,8 @@ llvm::Constant *CodeGenModule::GetOrCreateLLVMFunction(
(GD.getCanonicalDecl().getDecl() !=
OtherGD.getCanonicalDecl().getDecl()) &&
DiagnosedConflictingDefinitions.insert(GD).second) {
- getDiags().Report(D->getLocation(),
- diag::err_duplicate_mangled_name);
+ getDiags().Report(D->getLocation(), diag::err_duplicate_mangled_name)
+ << MangledName;
getDiags().Report(OtherGD.getDecl()->getLocation(),
diag::note_previous_definition);
}
@@ -2157,8 +2647,7 @@ llvm::Constant *CodeGenModule::GetOrCreateLLVMFunction(
assert(F->getName() == MangledName && "name was uniqued!");
if (D)
- SetFunctionAttributes(GD, F, IsIncompleteFunction, IsThunk,
- IsForDefinition);
+ SetFunctionAttributes(GD, F, IsIncompleteFunction, IsThunk);
if (ExtraAttrs.hasAttributes(llvm::AttributeList::FunctionIndex)) {
llvm::AttrBuilder B(ExtraAttrs, llvm::AttributeList::FunctionIndex);
F->addAttributes(llvm::AttributeList::FunctionIndex, B);
@@ -2234,6 +2723,16 @@ llvm::Constant *CodeGenModule::GetAddrOfFunction(GlobalDecl GD,
Ty = getTypes().ConvertFunctionType(CanonTy, FD);
}
+ // Devirtualized destructor calls may come through here instead of via
+ // getAddrOfCXXStructor. Make sure we use the MS ABI base destructor instead
+ // of the complete destructor when necessary.
+ if (const auto *DD = dyn_cast<CXXDestructorDecl>(GD.getDecl())) {
+ if (getTarget().getCXXABI().isMicrosoft() &&
+ GD.getDtorType() == Dtor_Complete &&
+ DD->getParent()->getNumVBases() == 0)
+ GD = GlobalDecl(DD, Dtor_Base);
+ }
+
StringRef MangledName = getMangledName(GD);
return GetOrCreateLLVMFunction(MangledName, Ty, GD, ForVTable, DontDefer,
/*IsThunk=*/false, llvm::AttributeList(),
@@ -2255,7 +2754,7 @@ GetRuntimeFunctionDecl(ASTContext &C, StringRef Name) {
// Demangle the premangled name from getTerminateFn()
IdentifierInfo &CXXII =
- (Name == "_ZSt9terminatev" || Name == "\01?terminate@@YAXXZ")
+ (Name == "_ZSt9terminatev" || Name == "?terminate@@YAXXZ")
? C.Idents.get("terminate")
: C.Idents.get(Name);
@@ -2302,6 +2801,7 @@ CodeGenModule::CreateRuntimeFunction(llvm::FunctionType *FTy, StringRef Name,
F->setLinkage(llvm::GlobalValue::ExternalLinkage);
}
}
+ setDSOLocal(F);
}
}
@@ -2313,13 +2813,7 @@ CodeGenModule::CreateRuntimeFunction(llvm::FunctionType *FTy, StringRef Name,
llvm::Constant *
CodeGenModule::CreateBuiltinFunction(llvm::FunctionType *FTy, StringRef Name,
llvm::AttributeList ExtraAttrs) {
- llvm::Constant *C =
- GetOrCreateLLVMFunction(Name, FTy, GlobalDecl(), /*ForVTable=*/false,
- /*DontDefer=*/false, /*IsThunk=*/false, ExtraAttrs);
- if (auto *F = dyn_cast<llvm::Function>(C))
- if (F->empty())
- F->setCallingConv(getBuiltinCC());
- return C;
+ return CreateRuntimeFunction(FTy, Name, ExtraAttrs, true);
}
/// isTypeConstant - Determine whether an object of this type can be emitted
@@ -2350,7 +2844,7 @@ bool CodeGenModule::isTypeConstant(QualType Ty, bool ExcludeCtor) {
/// If D is non-null, it specifies a decl that correspond to this. This is used
/// to set the attributes on the global when it is first created.
///
-/// If IsForDefinition is true, it is guranteed that an actual global with
+/// If IsForDefinition is true, it is guaranteed that an actual global with
/// type Ty will be returned, not conversion of a variable with the same
/// mangled name but some other type.
llvm::Constant *
@@ -2370,6 +2864,9 @@ CodeGenModule::GetOrCreateLLVMGlobal(StringRef MangledName,
if (D && !D->hasAttr<DLLImportAttr>() && !D->hasAttr<DLLExportAttr>())
Entry->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass);
+ if (LangOpts.OpenMP && !LangOpts.OpenMPSimd && D)
+ getOpenMPRuntime().registerTargetGlobalVariable(D, Entry);
+
if (Entry->getType() == Ty)
return Entry;
@@ -2386,8 +2883,8 @@ CodeGenModule::GetOrCreateLLVMGlobal(StringRef MangledName,
(OtherD = dyn_cast<VarDecl>(OtherGD.getDecl())) &&
OtherD->hasInit() &&
DiagnosedConflictingDefinitions.insert(D).second) {
- getDiags().Report(D->getLocation(),
- diag::err_duplicate_mangled_name);
+ getDiags().Report(D->getLocation(), diag::err_duplicate_mangled_name)
+ << MangledName;
getDiags().Report(OtherGD.getDecl()->getLocation(),
diag::note_previous_definition);
}
@@ -2438,6 +2935,9 @@ CodeGenModule::GetOrCreateLLVMGlobal(StringRef MangledName,
// Handle things which are present even on external declarations.
if (D) {
+ if (LangOpts.OpenMP && !LangOpts.OpenMPSimd)
+ getOpenMPRuntime().registerTargetGlobalVariable(D, GV);
+
// FIXME: This code is overly simple and should be merged with other global
// handling.
GV->setConstant(isTypeConstant(D->getType(), false));
@@ -2445,7 +2945,6 @@ CodeGenModule::GetOrCreateLLVMGlobal(StringRef MangledName,
GV->setAlignment(getContext().getDeclAlign(D).getQuantity());
setLinkageForGV(GV, D);
- setGlobalVisibility(GV, D, NotForDefinition);
if (D->getTLSKind()) {
if (D->getTLSKind() == VarDecl::TLS_Dynamic)
@@ -2453,6 +2952,8 @@ CodeGenModule::GetOrCreateLLVMGlobal(StringRef MangledName,
setTLSMode(GV, *D);
}
+ setGVProperties(GV, D);
+
// If required by the ABI, treat declarations of static data members with
// inline initializers as definitions.
if (getContext().isMSStaticDataMemberInlineDefinition(D)) {
@@ -2501,7 +3002,7 @@ CodeGenModule::GetOrCreateLLVMGlobal(StringRef MangledName,
GetAddrOfGlobalVar(D, InitType, IsForDefinition));
// Erase the old global, since it is no longer used.
- cast<llvm::GlobalValue>(GV)->eraseFromParent();
+ GV->eraseFromParent();
GV = NewGV;
} else {
GV->setInitializer(Init);
@@ -2602,7 +3103,7 @@ CodeGenModule::CreateOrReplaceCXXRuntimeVariable(StringRef Name,
/// GetAddrOfGlobalVar - Return the llvm::Constant for the address of the
/// given global variable. If Ty is non-null and if the global doesn't exist,
/// then it will be created with the specified type instead of whatever the
-/// normal requested type would be. If IsForDefinition is true, it is guranteed
+/// normal requested type would be. If IsForDefinition is true, it is guaranteed
/// that an actual global with type Ty will be returned, not conversion of a
/// variable with the same mangled name but some other type.
llvm::Constant *CodeGenModule::GetAddrOfGlobalVar(const VarDecl *D,
@@ -2625,7 +3126,10 @@ llvm::Constant *CodeGenModule::GetAddrOfGlobalVar(const VarDecl *D,
llvm::Constant *
CodeGenModule::CreateRuntimeVariable(llvm::Type *Ty,
StringRef Name) {
- return GetOrCreateLLVMGlobal(Name, llvm::PointerType::getUnqual(Ty), nullptr);
+ auto *Ret =
+ GetOrCreateLLVMGlobal(Name, llvm::PointerType::getUnqual(Ty), nullptr);
+ setDSOLocal(cast<llvm::GlobalValue>(Ret->stripPointerCasts()));
+ return Ret;
}
void CodeGenModule::EmitTentativeDefinition(const VarDecl *D) {
@@ -2679,6 +3183,39 @@ LangAS CodeGenModule::GetGlobalVarAddressSpace(const VarDecl *D) {
return getTargetCodeGenInfo().getGlobalVarAddressSpace(*this, D);
}
+LangAS CodeGenModule::getStringLiteralAddressSpace() const {
+ // OpenCL v1.2 s6.5.3: a string literal is in the constant address space.
+ if (LangOpts.OpenCL)
+ return LangAS::opencl_constant;
+ if (auto AS = getTarget().getConstantAddressSpace())
+ return AS.getValue();
+ return LangAS::Default;
+}
+
+// In address space agnostic languages, string literals are in default address
+// space in AST. However, certain targets (e.g. amdgcn) request them to be
+// emitted in constant address space in LLVM IR. To be consistent with other
+// parts of AST, string literal global variables in constant address space
+// need to be casted to default address space before being put into address
+// map and referenced by other part of CodeGen.
+// In OpenCL, string literals are in constant address space in AST, therefore
+// they should not be casted to default address space.
+static llvm::Constant *
+castStringLiteralToDefaultAddressSpace(CodeGenModule &CGM,
+ llvm::GlobalVariable *GV) {
+ llvm::Constant *Cast = GV;
+ if (!CGM.getLangOpts().OpenCL) {
+ if (auto AS = CGM.getTarget().getConstantAddressSpace()) {
+ if (AS != LangAS::Default)
+ Cast = CGM.getTargetCodeGenInfo().performAddrSpaceCast(
+ CGM, GV, AS.getValue(), LangAS::Default,
+ GV->getValueType()->getPointerTo(
+ CGM.getContext().getTargetAddressSpace(LangAS::Default)));
+ }
+ }
+ return Cast;
+}
+
template<typename SomeDecl>
void CodeGenModule::MaybeHandleStaticInExternC(const SomeDecl *D,
llvm::GlobalValue *GV) {
@@ -2753,6 +3290,12 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D,
if (getLangOpts().OpenCL && ASTTy->isSamplerT())
return;
+ // If this is OpenMP device, check if it is legal to emit this global
+ // normally.
+ if (LangOpts.OpenMPIsDevice && OpenMPRuntime &&
+ OpenMPRuntime->emitTargetGlobalVariable(D))
+ return;
+
llvm::Constant *Init = nullptr;
CXXRecordDecl *RD = ASTTy->getBaseElementTypeUnsafe()->getAsCXXRecordDecl();
bool NeedsGlobalCtor = false;
@@ -2989,7 +3532,7 @@ static bool isVarDeclStrongDefinition(const ASTContext &Context,
return true;
// A variable cannot be both common and exist in a section.
- // We dont try to determine which is the right section in the front-end.
+ // We don't try to determine which is the right section in the front-end.
// If no specialized section name is applicable, it will resort to default.
if (D->hasAttr<PragmaClangBSSSectionAttr>() ||
D->hasAttr<PragmaClangDataSectionAttr>() ||
@@ -3261,18 +3804,18 @@ void CodeGenModule::EmitGlobalFunctionDefinition(GlobalDecl GD,
// declarations).
auto *Fn = cast<llvm::Function>(GV);
setFunctionLinkage(GD, Fn);
- setFunctionDLLStorageClass(GD, Fn);
// FIXME: this is redundant with part of setFunctionDefinitionAttributes
- setGlobalVisibility(Fn, D, ForDefinition);
+ setGVProperties(Fn, GD);
MaybeHandleStaticInExternC(D, Fn);
+
maybeSetTrivialComdat(*D, *Fn);
CodeGenFunction(*this).GenerateCode(D, Fn, FI);
- setFunctionDefinitionAttributes(D, Fn);
+ setNonAliasAttributes(GD, Fn);
SetLLVMFunctionAttributesForDefinition(D, Fn);
if (const ConstructorAttr *CA = D->getAttr<ConstructorAttr>())
@@ -3281,6 +3824,15 @@ void CodeGenModule::EmitGlobalFunctionDefinition(GlobalDecl GD,
AddGlobalDtor(Fn, DA->getPriority());
if (D->hasAttr<AnnotateAttr>())
AddGlobalAnnotations(D, Fn);
+
+ if (D->isCPUSpecificMultiVersion()) {
+ auto *Spec = D->getAttr<CPUSpecificAttr>();
+ // If there is another specific version we need to emit, do so here.
+ if (Spec->ActiveArgIndex + 1 < Spec->cpus_size()) {
+ ++Spec->ActiveArgIndex;
+ EmitGlobalFunctionDefinition(GD, nullptr);
+ }
+ }
}
void CodeGenModule::EmitAliasDefinition(GlobalDecl GD) {
@@ -3356,7 +3908,7 @@ void CodeGenModule::EmitAliasDefinition(GlobalDecl GD) {
if (VD->getTLSKind())
setTLSMode(GA, *VD);
- setAliasAttributes(D, GA);
+ SetCommonAttributes(GD, GA);
}
void CodeGenModule::emitIFuncDefinition(GlobalDecl GD) {
@@ -3377,7 +3929,8 @@ void CodeGenModule::emitIFuncDefinition(GlobalDecl GD) {
GlobalDecl OtherGD;
if (lookupRepresentativeDecl(MangledName, OtherGD) &&
DiagnosedConflictingDefinitions.insert(GD).second) {
- Diags.Report(D->getLocation(), diag::err_duplicate_mangled_name);
+ Diags.Report(D->getLocation(), diag::err_duplicate_mangled_name)
+ << MangledName;
Diags.Report(OtherGD.getDecl()->getLocation(),
diag::note_previous_definition);
}
@@ -3415,7 +3968,7 @@ void CodeGenModule::emitIFuncDefinition(GlobalDecl GD) {
} else
GIF->setName(MangledName);
- SetCommonAttributes(D, GIF);
+ SetCommonAttributes(GD, GIF);
}
llvm::Function *CodeGenModule::getIntrinsic(unsigned IID,
@@ -3477,14 +4030,13 @@ CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
if (!CFConstantStringClassRef) {
llvm::Type *Ty = getTypes().ConvertType(getContext().IntTy);
Ty = llvm::ArrayType::get(Ty, 0);
- llvm::Constant *GV =
- CreateRuntimeVariable(Ty, "__CFConstantStringClassReference");
+ llvm::GlobalValue *GV = cast<llvm::GlobalValue>(
+ CreateRuntimeVariable(Ty, "__CFConstantStringClassReference"));
if (getTriple().isOSBinFormatCOFF()) {
IdentifierInfo &II = getContext().Idents.get(GV->getName());
TranslationUnitDecl *TUDecl = getContext().getTranslationUnitDecl();
DeclContext *DC = TranslationUnitDecl::castToDeclContext(TUDecl);
- llvm::GlobalValue *CGV = cast<llvm::GlobalValue>(GV);
const VarDecl *VD = nullptr;
for (const auto &Result : DC->lookup(&II))
@@ -3492,13 +4044,14 @@ CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
break;
if (!VD || !VD->hasAttr<DLLExportAttr>()) {
- CGV->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
- CGV->setLinkage(llvm::GlobalValue::ExternalLinkage);
+ GV->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
+ GV->setLinkage(llvm::GlobalValue::ExternalLinkage);
} else {
- CGV->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
- CGV->setLinkage(llvm::GlobalValue::ExternalLinkage);
+ GV->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
+ GV->setLinkage(llvm::GlobalValue::ExternalLinkage);
}
}
+ setDSOLocal(GV);
// Decay array -> ptr
CFConstantStringClassRef =
@@ -3666,10 +4219,8 @@ static llvm::GlobalVariable *
GenerateStringLiteral(llvm::Constant *C, llvm::GlobalValue::LinkageTypes LT,
CodeGenModule &CGM, StringRef GlobalName,
CharUnits Alignment) {
- // OpenCL v1.2 s6.5.3: a string literal is in the constant address space.
- unsigned AddrSpace = 0;
- if (CGM.getLangOpts().OpenCL)
- AddrSpace = CGM.getContext().getTargetAddressSpace(LangAS::opencl_constant);
+ unsigned AddrSpace = CGM.getContext().getTargetAddressSpace(
+ CGM.getStringLiteralAddressSpace());
llvm::Module &M = CGM.getModule();
// Create a global variable for this string
@@ -3682,6 +4233,7 @@ GenerateStringLiteral(llvm::Constant *C, llvm::GlobalValue::LinkageTypes LT,
assert(CGM.supportsCOMDAT() && "Only COFF uses weak string literals");
GV->setComdat(M.getOrInsertComdat(GV->getName()));
}
+ CGM.setDSOLocal(GV);
return GV;
}
@@ -3730,7 +4282,9 @@ CodeGenModule::GetAddrOfConstantStringFromLiteral(const StringLiteral *S,
SanitizerMD->reportGlobalToASan(GV, S->getStrTokenLoc(0), "<string literal>",
QualType());
- return ConstantAddress(GV, Alignment);
+
+ return ConstantAddress(castStringLiteralToDefaultAddressSpace(*this, GV),
+ Alignment);
}
/// GetAddrOfConstantStringFromObjCEncode - Return a pointer to a constant
@@ -3774,7 +4328,9 @@ ConstantAddress CodeGenModule::GetAddrOfConstantCString(
GlobalName, Alignment);
if (Entry)
*Entry = GV;
- return ConstantAddress(GV, Alignment);
+
+ return ConstantAddress(castStringLiteralToDefaultAddressSpace(*this, GV),
+ Alignment);
}
ConstantAddress CodeGenModule::GetAddrOfGlobalTemporary(
@@ -3847,7 +4403,7 @@ ConstantAddress CodeGenModule::GetAddrOfGlobalTemporary(
if (VD->isStaticDataMember() && VD->getAnyInitializer(InitVD) &&
isa<CXXRecordDecl>(InitVD->getLexicalDeclContext())) {
// Temporaries defined inside a class get linkonce_odr linkage because the
- // class can be defined in multipe translation units.
+ // class can be defined in multiple translation units.
Linkage = llvm::GlobalVariable::LinkOnceODRLinkage;
} else {
// There is no need for this temporary to have external linkage if the
@@ -3860,7 +4416,7 @@ ConstantAddress CodeGenModule::GetAddrOfGlobalTemporary(
getModule(), Type, Constant, Linkage, InitialValue, Name.c_str(),
/*InsertBefore=*/nullptr, llvm::GlobalVariable::NotThreadLocal, TargetAS);
if (emitter) emitter->finalize(GV);
- setGlobalVisibility(GV, VD, ForDefinition);
+ setGVProperties(GV, VD);
GV->setAlignment(Align.getQuantity());
if (supportsCOMDAT() && GV->isWeakForLinker())
GV->setComdat(TheModule.getOrInsertComdat(GV->getName()));
@@ -3997,18 +4553,13 @@ void CodeGenModule::EmitDeclContext(const DeclContext *DC) {
/// EmitTopLevelDecl - Emit code for a single top level declaration.
void CodeGenModule::EmitTopLevelDecl(Decl *D) {
// Ignore dependent declarations.
- if (D->getDeclContext() && D->getDeclContext()->isDependentContext())
+ if (D->isTemplated())
return;
switch (D->getKind()) {
case Decl::CXXConversion:
case Decl::CXXMethod:
case Decl::Function:
- // Skip function templates
- if (cast<FunctionDecl>(D)->getDescribedFunctionTemplate() ||
- cast<FunctionDecl>(D)->isLateTemplateParsed())
- return;
-
EmitGlobal(cast<FunctionDecl>(D));
// Always provide some coverage mapping
// even for the functions that aren't emitted.
@@ -4021,10 +4572,6 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
case Decl::Var:
case Decl::Decomposition:
- // Skip variable templates
- if (cast<VarDecl>(D)->getDescribedVarTemplate())
- return;
- LLVM_FALLTHROUGH;
case Decl::VarTemplateSpecialization:
EmitGlobal(cast<VarDecl>(D));
if (auto *DD = dyn_cast<DecompositionDecl>(D))
@@ -4083,16 +4630,9 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
DI->EmitUsingDirective(cast<UsingDirectiveDecl>(*D));
return;
case Decl::CXXConstructor:
- // Skip function templates
- if (cast<FunctionDecl>(D)->getDescribedFunctionTemplate() ||
- cast<FunctionDecl>(D)->isLateTemplateParsed())
- return;
-
getCXXABI().EmitCXXConstructors(cast<CXXConstructorDecl>(D));
break;
case Decl::CXXDestructor:
- if (cast<FunctionDecl>(D)->isLateTemplateParsed())
- return;
getCXXABI().EmitCXXDestructors(cast<CXXDestructorDecl>(D));
break;
@@ -4152,7 +4692,11 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
AppendLinkerOptions(PCD->getArg());
break;
case PCK_Lib:
- AddDependentLib(PCD->getArg());
+ if (getTarget().getTriple().isOSBinFormatELF() &&
+ !getTarget().getTriple().isPS4())
+ AddELFLibDirective(PCD->getArg());
+ else
+ AddDependentLib(PCD->getArg());
break;
case PCK_Compiler:
case PCK_ExeStr:
@@ -4358,9 +4902,7 @@ static void EmitGlobalDeclMetadata(CodeGenModule &CGM,
/// to such functions with an unmangled name from inline assembly within the
/// same translation unit.
void CodeGenModule::EmitStaticExternCAliases() {
- // Don't do anything if we're generating CUDA device code -- the NVPTX
- // assembly target doesn't support aliases.
- if (Context.getTargetInfo().getTriple().isNVPTX())
+ if (!getTargetCodeGenInfo().shouldEmitStaticExternCAliases())
return;
for (auto &I : StaticExternCValues) {
IdentifierInfo *Name = I.first;
@@ -4504,7 +5046,7 @@ llvm::Constant *CodeGenModule::GetAddrOfRTTIDescriptor(QualType Ty,
// Return a bogus pointer if RTTI is disabled, unless it's for EH.
// FIXME: should we even be calling this method if RTTI is disabled
// and it's not for EH?
- if (!ForEH && !getLangOpts().RTTI)
+ if ((!ForEH && !getLangOpts().RTTI) || getLangOpts().CUDAIsDevice)
return llvm::Constant::getNullValue(Int8PtrTy);
if (ForEH && Ty->isObjCObjectPointerType() &&
@@ -4515,6 +5057,9 @@ llvm::Constant *CodeGenModule::GetAddrOfRTTIDescriptor(QualType Ty,
}
void CodeGenModule::EmitOMPThreadPrivateDecl(const OMPThreadPrivateDecl *D) {
+ // Do not emit threadprivates in simd-only mode.
+ if (LangOpts.OpenMP && LangOpts.OpenMPSimd)
+ return;
for (auto RefExpr : D->varlists()) {
auto *VD = cast<VarDecl>(cast<DeclRefExpr>(RefExpr)->getDecl());
bool PerformInit =
@@ -4529,8 +5074,10 @@ void CodeGenModule::EmitOMPThreadPrivateDecl(const OMPThreadPrivateDecl *D) {
}
}
-llvm::Metadata *CodeGenModule::CreateMetadataIdentifierForType(QualType T) {
- llvm::Metadata *&InternalId = MetadataIdMap[T.getCanonicalType()];
+llvm::Metadata *
+CodeGenModule::CreateMetadataIdentifierImpl(QualType T, MetadataTypeMap &Map,
+ StringRef Suffix) {
+ llvm::Metadata *&InternalId = Map[T.getCanonicalType()];
if (InternalId)
return InternalId;
@@ -4538,6 +5085,7 @@ llvm::Metadata *CodeGenModule::CreateMetadataIdentifierForType(QualType T) {
std::string OutName;
llvm::raw_string_ostream Out(OutName);
getCXXABI().getMangleContext().mangleTypeName(T, Out);
+ Out << Suffix;
InternalId = llvm::MDString::get(getLLVMContext(), Out.str());
} else {
@@ -4548,6 +5096,15 @@ llvm::Metadata *CodeGenModule::CreateMetadataIdentifierForType(QualType T) {
return InternalId;
}
+llvm::Metadata *CodeGenModule::CreateMetadataIdentifierForType(QualType T) {
+ return CreateMetadataIdentifierImpl(T, MetadataIdMap, "");
+}
+
+llvm::Metadata *
+CodeGenModule::CreateMetadataIdentifierForVirtualMemPtrType(QualType T) {
+ return CreateMetadataIdentifierImpl(T, VirtualMetadataIdMap, ".virtual");
+}
+
// Generalize pointer types to a void pointer with the qualifiers of the
// originally pointed-to type, e.g. 'const char *' and 'char * const *'
// generalize to 'const void *' while 'char *' and 'const char **' generalize to
@@ -4581,25 +5138,8 @@ static QualType GeneralizeFunctionType(ASTContext &Ctx, QualType Ty) {
}
llvm::Metadata *CodeGenModule::CreateMetadataIdentifierGeneralized(QualType T) {
- T = GeneralizeFunctionType(getContext(), T);
-
- llvm::Metadata *&InternalId = GeneralizedMetadataIdMap[T.getCanonicalType()];
- if (InternalId)
- return InternalId;
-
- if (isExternallyVisible(T->getLinkage())) {
- std::string OutName;
- llvm::raw_string_ostream Out(OutName);
- getCXXABI().getMangleContext().mangleTypeName(T, Out);
- Out << ".generalized";
-
- InternalId = llvm::MDString::get(getLLVMContext(), Out.str());
- } else {
- InternalId = llvm::MDNode::getDistinct(getLLVMContext(),
- llvm::ArrayRef<llvm::Metadata *>());
- }
-
- return InternalId;
+ return CreateMetadataIdentifierImpl(GeneralizeFunctionType(getContext(), T),
+ GeneralizedMetadataIdMap, ".generalized");
}
/// Returns whether this module needs the "all-vtables" type identifier.
@@ -4634,22 +5174,28 @@ void CodeGenModule::AddVTableTypeMetadata(llvm::GlobalVariable *VTable,
}
}
+TargetAttr::ParsedTargetAttr CodeGenModule::filterFunctionTargetAttrs(const TargetAttr *TD) {
+ assert(TD != nullptr);
+ TargetAttr::ParsedTargetAttr ParsedAttr = TD->parse();
+
+ ParsedAttr.Features.erase(
+ llvm::remove_if(ParsedAttr.Features,
+ [&](const std::string &Feat) {
+ return !Target.isValidFeatureName(
+ StringRef{Feat}.substr(1));
+ }),
+ ParsedAttr.Features.end());
+ return ParsedAttr;
+}
+
+
// Fills in the supplied string map with the set of target features for the
// passed in function.
void CodeGenModule::getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap,
const FunctionDecl *FD) {
StringRef TargetCPU = Target.getTargetOpts().CPU;
if (const auto *TD = FD->getAttr<TargetAttr>()) {
- // If we have a TargetAttr build up the feature map based on that.
- TargetAttr::ParsedTargetAttr ParsedAttr = TD->parse();
-
- ParsedAttr.Features.erase(
- llvm::remove_if(ParsedAttr.Features,
- [&](const std::string &Feat) {
- return !Target.isValidFeatureName(
- StringRef{Feat}.substr(1));
- }),
- ParsedAttr.Features.end());
+ TargetAttr::ParsedTargetAttr ParsedAttr = filterFunctionTargetAttrs(TD);
// Make a copy of the features as passed on the command line into the
// beginning of the additional features from the function to override.
@@ -4667,6 +5213,12 @@ void CodeGenModule::getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap,
// the attribute.
Target.initFeatureMap(FeatureMap, getDiags(), TargetCPU,
ParsedAttr.Features);
+ } else if (const auto *SD = FD->getAttr<CPUSpecificAttr>()) {
+ llvm::SmallVector<StringRef, 32> FeaturesTmp;
+ Target.getCPUSpecificCPUDispatchFeatures(SD->getCurCPUName()->getName(),
+ FeaturesTmp);
+ std::vector<std::string> Features(FeaturesTmp.begin(), FeaturesTmp.end());
+ Target.initFeatureMap(FeatureMap, getDiags(), TargetCPU, Features);
} else {
Target.initFeatureMap(FeatureMap, getDiags(), TargetCPU,
Target.getTargetOpts().Features);
diff --git a/lib/CodeGen/CodeGenModule.h b/lib/CodeGen/CodeGenModule.h
index 22c4463b2c81..ee64ed4f2ae2 100644
--- a/lib/CodeGen/CodeGenModule.h
+++ b/lib/CodeGen/CodeGenModule.h
@@ -324,6 +324,10 @@ private:
/// is defined once we get to the end of the of the translation unit.
std::vector<GlobalDecl> Aliases;
+ /// List of multiversion functions that have to be emitted. Used to make sure
+ /// we properly emit the iFunc.
+ std::vector<GlobalDecl> MultiVersionFuncs;
+
typedef llvm::StringMap<llvm::TrackingVH<llvm::Constant> > ReplacementsTy;
ReplacementsTy Replacements;
@@ -362,6 +366,13 @@ private:
llvm::MapVector<GlobalDecl, StringRef> MangledDeclNames;
llvm::StringMap<GlobalDecl, llvm::BumpPtrAllocator> Manglings;
+ // An ordered map of canonical GlobalDecls paired with the cpu-index for
+ // cpu-specific name manglings.
+ llvm::MapVector<std::pair<GlobalDecl, unsigned>, StringRef>
+ CPUSpecificMangledDeclNames;
+ llvm::StringMap<std::pair<GlobalDecl, unsigned>, llvm::BumpPtrAllocator>
+ CPUSpecificManglings;
+
/// Global annotations.
std::vector<llvm::Constant*> Annotations;
@@ -387,10 +398,10 @@ private:
llvm::GlobalValue *> StaticExternCMap;
StaticExternCMap StaticExternCValues;
- /// \brief thread_local variables defined or used in this TU.
+ /// thread_local variables defined or used in this TU.
std::vector<const VarDecl *> CXXThreadLocals;
- /// \brief thread_local variables with initializers that need to run
+ /// thread_local variables with initializers that need to run
/// before any thread_local variable in this TU is odr-used.
std::vector<llvm::Function *> CXXThreadLocalInits;
std::vector<const VarDecl *> CXXThreadLocalInitVars;
@@ -421,14 +432,14 @@ private:
/// Global destructor functions and arguments that need to run on termination.
std::vector<std::pair<llvm::WeakTrackingVH, llvm::Constant *>> CXXGlobalDtors;
- /// \brief The complete set of modules that has been imported.
+ /// The complete set of modules that has been imported.
llvm::SetVector<clang::Module *> ImportedModules;
- /// \brief The set of modules for which the module initializers
+ /// The set of modules for which the module initializers
/// have been emitted.
llvm::SmallPtrSet<clang::Module *, 16> EmittedModuleInitializers;
- /// \brief A vector of metadata strings.
+ /// A vector of metadata strings.
SmallVector<llvm::MDNode *, 16> LinkerOptionsMetadata;
/// @name Cache for Objective-C runtime types
@@ -438,7 +449,7 @@ private:
/// int * but is actually an Obj-C class pointer.
llvm::WeakTrackingVH CFConstantStringClassRef;
- /// \brief The type used to describe the state of a fast enumeration in
+ /// The type used to describe the state of a fast enumeration in
/// Objective-C's for..in loop.
QualType ObjCFastEnumerationStateType;
@@ -499,6 +510,7 @@ private:
/// MDNodes.
typedef llvm::DenseMap<QualType, llvm::Metadata *> MetadataTypeMap;
MetadataTypeMap MetadataIdMap;
+ MetadataTypeMap VirtualMetadataIdMap;
MetadataTypeMap GeneralizedMetadataIdMap;
public:
@@ -685,6 +697,11 @@ public:
TBAAAccessInfo mergeTBAAInfoForConditionalOperator(TBAAAccessInfo InfoA,
TBAAAccessInfo InfoB);
+ /// mergeTBAAInfoForMemoryTransfer - Get merged TBAA information for the
+ /// purposes of memory transfer calls.
+ TBAAAccessInfo mergeTBAAInfoForMemoryTransfer(TBAAAccessInfo DestInfo,
+ TBAAAccessInfo SrcInfo);
+
/// getTBAAInfoForSubobject - Get TBAA information for an access with a given
/// base lvalue.
TBAAAccessInfo getTBAAInfoForSubobject(LValue Base, QualType AccessType) {
@@ -710,8 +727,19 @@ public:
llvm::ConstantInt *getSize(CharUnits numChars);
/// Set the visibility for the given LLVM GlobalValue.
- void setGlobalVisibility(llvm::GlobalValue *GV, const NamedDecl *D,
- ForDefinition_t IsForDefinition) const;
+ void setGlobalVisibility(llvm::GlobalValue *GV, const NamedDecl *D) const;
+
+ void setGlobalVisibilityAndLocal(llvm::GlobalValue *GV,
+ const NamedDecl *D) const;
+
+ void setDSOLocal(llvm::GlobalValue *GV) const;
+
+ void setDLLImportDLLExport(llvm::GlobalValue *GV, GlobalDecl D) const;
+ void setDLLImportDLLExport(llvm::GlobalValue *GV, const NamedDecl *D) const;
+ /// Set visibility, dllimport/dllexport and dso_local.
+ /// This must be called after dllimport/dllexport is set.
+ void setGVProperties(llvm::GlobalValue *GV, GlobalDecl GD) const;
+ void setGVProperties(llvm::GlobalValue *GV, const NamedDecl *D) const;
/// Set the TLS mode for the given LLVM GlobalValue for the thread-local
/// variable declaration D.
@@ -757,7 +785,7 @@ public:
/// Return the llvm::Constant for the address of the given global variable.
/// If Ty is non-null and if the global doesn't exist, then it will be created
/// with the specified type instead of whatever the normal requested type
- /// would be. If IsForDefinition is true, it is guranteed that an actual
+ /// would be. If IsForDefinition is true, it is guaranteed that an actual
/// global with type Ty will be returned, not conversion of a variable with
/// the same mangled name but some other type.
llvm::Constant *GetAddrOfGlobalVar(const VarDecl *D,
@@ -765,6 +793,13 @@ public:
ForDefinition_t IsForDefinition
= NotForDefinition);
+ /// Return the AST address space of string literal, which is used to emit
+ /// the string literal as global variable in LLVM IR.
+ /// Note: This is not necessarily the address space of the string literal
+ /// in AST. For address space agnostic language, e.g. C++, string literal
+ /// in AST is always in default address space.
+ LangAS getStringLiteralAddressSpace() const;
+
/// Return the address of the given function. If Ty is non-null, then this
/// function will use the specified type if it has to create it.
llvm::Constant *GetAddrOfFunction(GlobalDecl GD, llvm::Type *Ty = nullptr,
@@ -780,7 +815,8 @@ public:
ConstantAddress GetAddrOfUuidDescriptor(const CXXUuidofExpr* E);
/// Get the address of the thunk for the given global decl.
- llvm::Constant *GetAddrOfThunk(GlobalDecl GD, const ThunkInfo &Thunk);
+ llvm::Constant *GetAddrOfThunk(StringRef Name, llvm::Type *FnTy,
+ GlobalDecl GD);
/// Get a reference to the target of VD.
ConstantAddress GetWeakRefReference(const ValueDecl *VD);
@@ -879,12 +915,12 @@ public:
void setAddrOfConstantCompoundLiteral(const CompoundLiteralExpr *CLE,
llvm::GlobalVariable *GV);
- /// \brief Returns a pointer to a global variable representing a temporary
+ /// Returns a pointer to a global variable representing a temporary
/// with static or thread storage duration.
ConstantAddress GetAddrOfGlobalTemporary(const MaterializeTemporaryExpr *E,
const Expr *Inner);
- /// \brief Retrieve the record type that describes the state of an
+ /// Retrieve the record type that describes the state of an
/// Objective-C fast enumeration loop (for..in).
QualType getObjCFastEnumerationStateType();
@@ -912,22 +948,22 @@ public:
/// Emit code for a single top level declaration.
void EmitTopLevelDecl(Decl *D);
- /// \brief Stored a deferred empty coverage mapping for an unused
+ /// Stored a deferred empty coverage mapping for an unused
/// and thus uninstrumented top level declaration.
void AddDeferredUnusedCoverageMapping(Decl *D);
- /// \brief Remove the deferred empty coverage mapping as this
+ /// Remove the deferred empty coverage mapping as this
/// declaration is actually instrumented.
void ClearUnusedCoverageMapping(const Decl *D);
- /// \brief Emit all the deferred coverage mappings
+ /// Emit all the deferred coverage mappings
/// for the uninstrumented functions.
void EmitDeferredUnusedCoverageMappings();
/// Tell the consumer that this variable has been instantiated.
void HandleCXXStaticMemberVarInstantiation(VarDecl *VD);
- /// \brief If the declaration has internal linkage but is inside an
+ /// If the declaration has internal linkage but is inside an
/// extern "C" linkage specification, prepare to emit an alias for it
/// to the expected name.
template<typename SomeDecl>
@@ -976,7 +1012,7 @@ public:
llvm::Constant *getMemberPointerConstant(const UnaryOperator *e);
- /// \brief Emit type info if type of an expression is a variably modified
+ /// Emit type info if type of an expression is a variably modified
/// type. Also emit proper debug info for cast types.
void EmitExplicitCastExprType(const ExplicitCastExpr *E,
CodeGenFunction *CGF = nullptr);
@@ -1002,7 +1038,7 @@ public:
/// Set the attributes on the LLVM function for the given decl and function
/// info. This applies attributes necessary for handling the ABI as well as
/// user specified attributes like section.
- void SetInternalFunctionAttributes(const Decl *D, llvm::Function *F,
+ void SetInternalFunctionAttributes(GlobalDecl GD, llvm::Function *F,
const CGFunctionInfo &FI);
/// Set the LLVM function attributes (sext, zext, etc).
@@ -1061,6 +1097,10 @@ public:
/// It's up to you to ensure that this is safe.
void AddDefaultFnAttrs(llvm::Function &F);
+ /// Parses the target attributes passed in, and returns only the ones that are
+ /// valid feature names.
+ TargetAttr::ParsedTargetAttr filterFunctionTargetAttrs(const TargetAttr *TD);
+
// Fills in the supplied string map with the set of target features for the
// passed in function.
void getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap,
@@ -1075,25 +1115,24 @@ public:
void RefreshTypeCacheForClass(const CXXRecordDecl *Class);
- /// \brief Appends Opts to the "llvm.linker.options" metadata value.
+ /// Appends Opts to the "llvm.linker.options" metadata value.
void AppendLinkerOptions(StringRef Opts);
- /// \brief Appends a detect mismatch command to the linker options.
+ /// Appends a detect mismatch command to the linker options.
void AddDetectMismatch(StringRef Name, StringRef Value);
- /// \brief Appends a dependent lib to the "llvm.linker.options" metadata
+ /// Appends a dependent lib to the "llvm.linker.options" metadata
/// value.
void AddDependentLib(StringRef Lib);
+ void AddELFLibDirective(StringRef Lib);
+
llvm::GlobalVariable::LinkageTypes getFunctionLinkage(GlobalDecl GD);
void setFunctionLinkage(GlobalDecl GD, llvm::Function *F) {
F->setLinkage(getFunctionLinkage(GD));
}
- /// Set the DLL storage class on F.
- void setFunctionDLLStorageClass(GlobalDecl GD, llvm::Function *F);
-
/// Return the appropriate linkage for the vtable, VTT, and type information
/// of the given class.
llvm::GlobalVariable::LinkageTypes getVTableLinkage(const CXXRecordDecl *RD);
@@ -1158,40 +1197,29 @@ public:
DeferredVTables.push_back(RD);
}
- /// Emit code for a singal global function or var decl. Forward declarations
+ /// Emit code for a single global function or var decl. Forward declarations
/// are emitted lazily.
void EmitGlobal(GlobalDecl D);
- bool TryEmitDefinitionAsAlias(GlobalDecl Alias, GlobalDecl Target);
bool TryEmitBaseDestructorAsAlias(const CXXDestructorDecl *D);
- /// Set attributes for a global definition.
- void setFunctionDefinitionAttributes(const FunctionDecl *D,
- llvm::Function *F);
-
llvm::GlobalValue *GetGlobalValue(StringRef Ref);
/// Set attributes which are common to any form of a global definition (alias,
/// Objective-C method, function, global variable).
///
/// NOTE: This should only be called for definitions.
- void SetCommonAttributes(const Decl *D, llvm::GlobalValue *GV);
-
- /// Set attributes which must be preserved by an alias. This includes common
- /// attributes (i.e. it includes a call to SetCommonAttributes).
- ///
- /// NOTE: This should only be called for definitions.
- void setAliasAttributes(const Decl *D, llvm::GlobalValue *GV);
+ void SetCommonAttributes(GlobalDecl GD, llvm::GlobalValue *GV);
void addReplacement(StringRef Name, llvm::Constant *C);
void addGlobalValReplacement(llvm::GlobalValue *GV, llvm::Constant *C);
- /// \brief Emit a code for threadprivate directive.
+ /// Emit a code for threadprivate directive.
/// \param D Threadprivate declaration.
void EmitOMPThreadPrivateDecl(const OMPThreadPrivateDecl *D);
- /// \brief Emit a code for declare reduction construct.
+ /// Emit a code for declare reduction construct.
void EmitOMPDeclareReduction(const OMPDeclareReductionDecl *D,
CodeGenFunction *CGF = nullptr);
@@ -1212,13 +1240,18 @@ public:
/// internal identifiers).
llvm::Metadata *CreateMetadataIdentifierForType(QualType T);
+ /// Create a metadata identifier that is intended to be used to check virtual
+ /// calls via a member function pointer.
+ llvm::Metadata *CreateMetadataIdentifierForVirtualMemPtrType(QualType T);
+
/// Create a metadata identifier for the generalization of the given type.
/// This may either be an MDString (for external identifiers) or a distinct
/// unnamed MDNode (for internal identifiers).
llvm::Metadata *CreateMetadataIdentifierGeneralized(QualType T);
/// Create and attach type metadata to the given function.
- void CreateFunctionTypeMetadata(const FunctionDecl *FD, llvm::Function *F);
+ void CreateFunctionTypeMetadataForIcall(const FunctionDecl *FD,
+ llvm::Function *F);
/// Returns whether this module needs the "all-vtables" type identifier.
bool NeedAllVtablesTypeId() const;
@@ -1227,7 +1260,15 @@ public:
void AddVTableTypeMetadata(llvm::GlobalVariable *VTable, CharUnits Offset,
const CXXRecordDecl *RD);
- /// \brief Get the declaration of std::terminate for the platform.
+ /// Return a vector of most-base classes for RD. This is used to implement
+ /// control flow integrity checks for member function pointers.
+ ///
+ /// A most-base class of a class C is defined as a recursive base class of C,
+ /// including C itself, that does not have any bases.
+ std::vector<const CXXRecordDecl *>
+ getMostBaseClasses(const CXXRecordDecl *RD);
+
+ /// Get the declaration of std::terminate for the platform.
llvm::Constant *getTerminateFn();
llvm::SanitizerStatReport &getSanStats();
@@ -1247,18 +1288,24 @@ private:
llvm::AttributeList ExtraAttrs = llvm::AttributeList(),
ForDefinition_t IsForDefinition = NotForDefinition);
+ llvm::Constant *GetOrCreateMultiVersionIFunc(GlobalDecl GD,
+ llvm::Type *DeclTy,
+ const FunctionDecl *FD);
+ void UpdateMultiVersionNames(GlobalDecl GD, const FunctionDecl *FD);
+
llvm::Constant *GetOrCreateLLVMGlobal(StringRef MangledName,
llvm::PointerType *PTy,
const VarDecl *D,
ForDefinition_t IsForDefinition
= NotForDefinition);
- void setNonAliasAttributes(const Decl *D, llvm::GlobalObject *GO);
+ bool GetCPUAndFeaturesAttributes(const Decl *D,
+ llvm::AttrBuilder &AttrBuilder);
+ void setNonAliasAttributes(GlobalDecl GD, llvm::GlobalObject *GO);
/// Set function attributes for a function declaration.
void SetFunctionAttributes(GlobalDecl GD, llvm::Function *F,
- bool IsIncompleteFunction, bool IsThunk,
- ForDefinition_t IsForDefinition);
+ bool IsIncompleteFunction, bool IsThunk);
void EmitGlobalDefinition(GlobalDecl D, llvm::GlobalValue *GV = nullptr);
@@ -1266,6 +1313,7 @@ private:
void EmitGlobalVarDefinition(const VarDecl *D, bool IsTentative = false);
void EmitAliasDefinition(GlobalDecl GD);
void emitIFuncDefinition(GlobalDecl GD);
+ void emitCPUDispatchDefinition(GlobalDecl GD);
void EmitObjCPropertyImplementations(const ObjCImplementationDecl *D);
void EmitObjCIvarInitializations(ObjCImplementationDecl *D);
@@ -1274,7 +1322,7 @@ private:
void EmitDeclContext(const DeclContext *DC);
void EmitLinkageSpec(const LinkageSpecDecl *D);
- /// \brief Emit the function that initializes C++ thread_local variables.
+ /// Emit the function that initializes C++ thread_local variables.
void EmitCXXThreadLocalInitFunc();
/// Emit the function that initializes C++ globals.
@@ -1319,6 +1367,14 @@ private:
void checkAliases();
+ std::map<int, llvm::TinyPtrVector<llvm::Function *>> DtorsUsingAtExit;
+
+ /// Register functions annotated with __attribute__((destructor)) using
+ /// __cxa_atexit, if it is available, or atexit otherwise.
+ void registerGlobalDtorsWithAtExit();
+
+ void emitMultiVersionFunctions();
+
/// Emit any vtables which we deferred and still have a use for.
void EmitDeferredVTables();
@@ -1329,16 +1385,16 @@ private:
/// Emit the llvm.used and llvm.compiler.used metadata.
void emitLLVMUsed();
- /// \brief Emit the link options introduced by imported modules.
+ /// Emit the link options introduced by imported modules.
void EmitModuleLinkOptions();
- /// \brief Emit aliases for internal-linkage declarations inside "C" language
+ /// Emit aliases for internal-linkage declarations inside "C" language
/// linkage specifications, giving them the "expected" name where possible.
void EmitStaticExternCAliases();
void EmitDeclMetadata();
- /// \brief Emit the Clang version as llvm.ident metadata.
+ /// Emit the Clang version as llvm.ident metadata.
void EmitVersionIdentMetadata();
/// Emits target specific Metadata for global declarations.
@@ -1373,6 +1429,9 @@ private:
void ConstructDefaultFnAttrList(StringRef Name, bool HasOptnone,
bool AttrOnCallSite,
llvm::AttrBuilder &FuncAttrs);
+
+ llvm::Metadata *CreateMetadataIdentifierImpl(QualType T, MetadataTypeMap &Map,
+ StringRef Suffix);
};
} // end namespace CodeGen
diff --git a/lib/CodeGen/CodeGenPGO.cpp b/lib/CodeGen/CodeGenPGO.cpp
index 295893c64fbc..c8c2a1b956b8 100644
--- a/lib/CodeGen/CodeGenPGO.cpp
+++ b/lib/CodeGen/CodeGenPGO.cpp
@@ -58,7 +58,7 @@ enum PGOHashVersion : unsigned {
};
namespace {
-/// \brief Stable hasher for PGO region counters.
+/// Stable hasher for PGO region counters.
///
/// PGOHash produces a stable hash of a given function's control flow.
///
@@ -79,7 +79,7 @@ class PGOHash {
static const unsigned TooBig = 1u << NumBitsPerType;
public:
- /// \brief Hash values for AST nodes.
+ /// Hash values for AST nodes.
///
/// Distinct values for AST nodes that have region counters attached.
///
@@ -978,7 +978,7 @@ void CodeGenPGO::loadRegionCounts(llvm::IndexedInstrProfReader *PGOReader,
RegionCounts = ProfRecord->Counts;
}
-/// \brief Calculate what to divide by to scale weights.
+/// Calculate what to divide by to scale weights.
///
/// Given the maximum weight, calculate a divisor that will scale all the
/// weights to strictly less than UINT32_MAX.
@@ -986,7 +986,7 @@ static uint64_t calculateWeightScale(uint64_t MaxWeight) {
return MaxWeight < UINT32_MAX ? 1 : MaxWeight / UINT32_MAX + 1;
}
-/// \brief Scale an individual branch weight (and add 1).
+/// Scale an individual branch weight (and add 1).
///
/// Scale a 64-bit weight down to 32-bits using \c Scale.
///
diff --git a/lib/CodeGen/CodeGenTBAA.cpp b/lib/CodeGen/CodeGenTBAA.cpp
index ad473032db17..ec48231e5247 100644
--- a/lib/CodeGen/CodeGenTBAA.cpp
+++ b/lib/CodeGen/CodeGenTBAA.cpp
@@ -215,6 +215,19 @@ llvm::MDNode *CodeGenTBAA::getTypeInfo(QualType QTy) {
return MetadataCache[Ty] = TypeNode;
}
+TBAAAccessInfo CodeGenTBAA::getAccessInfo(QualType AccessType) {
+ // Pointee values may have incomplete types, but they shall never be
+ // dereferenced.
+ if (AccessType->isIncompleteType())
+ return TBAAAccessInfo::getIncompleteInfo();
+
+ if (TypeHasMayAlias(AccessType))
+ return TBAAAccessInfo::getMayAliasInfo();
+
+ uint64_t Size = Context.getTypeSizeInChars(AccessType).getQuantity();
+ return TBAAAccessInfo(getTypeInfo(AccessType), Size);
+}
+
TBAAAccessInfo CodeGenTBAA::getVTablePtrAccessInfo(llvm::Type *VTablePtrType) {
llvm::DataLayout DL(&Module);
unsigned Size = DL.getPointerTypeSize(VTablePtrType);
@@ -391,3 +404,21 @@ CodeGenTBAA::mergeTBAAInfoForConditionalOperator(TBAAAccessInfo InfoA,
// access type regardless of their base types.
return TBAAAccessInfo::getMayAliasInfo();
}
+
+TBAAAccessInfo
+CodeGenTBAA::mergeTBAAInfoForMemoryTransfer(TBAAAccessInfo DestInfo,
+ TBAAAccessInfo SrcInfo) {
+ if (DestInfo == SrcInfo)
+ return DestInfo;
+
+ if (!DestInfo || !SrcInfo)
+ return TBAAAccessInfo();
+
+ if (DestInfo.isMayAlias() || SrcInfo.isMayAlias())
+ return TBAAAccessInfo::getMayAliasInfo();
+
+ // TODO: Implement the rest of the logic here. For example, two accesses
+ // with same final access types result in an access to an object of that final
+ // access type regardless of their base types.
+ return TBAAAccessInfo::getMayAliasInfo();
+}
diff --git a/lib/CodeGen/CodeGenTBAA.h b/lib/CodeGen/CodeGenTBAA.h
index a5b1f66bcd1a..86ba407c05c6 100644
--- a/lib/CodeGen/CodeGenTBAA.h
+++ b/lib/CodeGen/CodeGenTBAA.h
@@ -177,6 +177,10 @@ public:
/// given type.
llvm::MDNode *getTypeInfo(QualType QTy);
+ /// getAccessInfo - Get TBAA information that describes an access to
+ /// an object of the given type.
+ TBAAAccessInfo getAccessInfo(QualType AccessType);
+
/// getVTablePtrAccessInfo - Get the TBAA information that describes an
/// access to a virtual table pointer.
TBAAAccessInfo getVTablePtrAccessInfo(llvm::Type *VTablePtrType);
@@ -201,6 +205,11 @@ public:
/// purpose of conditional operator.
TBAAAccessInfo mergeTBAAInfoForConditionalOperator(TBAAAccessInfo InfoA,
TBAAAccessInfo InfoB);
+
+ /// mergeTBAAInfoForMemoryTransfer - Get merged TBAA information for the
+ /// purpose of memory transfer calls.
+ TBAAAccessInfo mergeTBAAInfoForMemoryTransfer(TBAAAccessInfo DestInfo,
+ TBAAAccessInfo SrcInfo);
};
} // end namespace CodeGen
diff --git a/lib/CodeGen/CodeGenTypeCache.h b/lib/CodeGen/CodeGenTypeCache.h
index fb096ac89987..901aed6c00b2 100644
--- a/lib/CodeGen/CodeGenTypeCache.h
+++ b/lib/CodeGen/CodeGenTypeCache.h
@@ -112,8 +112,6 @@ struct CodeGenTypeCache {
llvm::CallingConv::ID RuntimeCC;
llvm::CallingConv::ID getRuntimeCC() const { return RuntimeCC; }
- llvm::CallingConv::ID BuiltinCC;
- llvm::CallingConv::ID getBuiltinCC() const { return BuiltinCC; }
LangAS getASTAllocaAddressSpace() const { return ASTAllocaAddressSpace; }
};
diff --git a/lib/CodeGen/CodeGenTypes.cpp b/lib/CodeGen/CodeGenTypes.cpp
index 529a13b7adc8..16ec1dd301aa 100644
--- a/lib/CodeGen/CodeGenTypes.cpp
+++ b/lib/CodeGen/CodeGenTypes.cpp
@@ -437,8 +437,33 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
case BuiltinType::ULongLong:
case BuiltinType::WChar_S:
case BuiltinType::WChar_U:
+ case BuiltinType::Char8:
case BuiltinType::Char16:
case BuiltinType::Char32:
+ case BuiltinType::ShortAccum:
+ case BuiltinType::Accum:
+ case BuiltinType::LongAccum:
+ case BuiltinType::UShortAccum:
+ case BuiltinType::UAccum:
+ case BuiltinType::ULongAccum:
+ case BuiltinType::ShortFract:
+ case BuiltinType::Fract:
+ case BuiltinType::LongFract:
+ case BuiltinType::UShortFract:
+ case BuiltinType::UFract:
+ case BuiltinType::ULongFract:
+ case BuiltinType::SatShortAccum:
+ case BuiltinType::SatAccum:
+ case BuiltinType::SatLongAccum:
+ case BuiltinType::SatUShortAccum:
+ case BuiltinType::SatUAccum:
+ case BuiltinType::SatULongAccum:
+ case BuiltinType::SatShortFract:
+ case BuiltinType::SatFract:
+ case BuiltinType::SatLongFract:
+ case BuiltinType::SatUShortFract:
+ case BuiltinType::SatUFract:
+ case BuiltinType::SatULongFract:
ResultType = llvm::IntegerType::get(getLLVMContext(),
static_cast<unsigned>(Context.getTypeSize(T)));
break;
@@ -767,7 +792,7 @@ bool CodeGenTypes::isZeroInitializable(QualType T) {
// Records are non-zero-initializable if they contain any
// non-zero-initializable subobjects.
if (const RecordType *RT = T->getAs<RecordType>()) {
- auto RD = cast<RecordDecl>(RT->getDecl());
+ const RecordDecl *RD = RT->getDecl();
return isZeroInitializable(RD);
}
diff --git a/lib/CodeGen/CodeGenTypes.h b/lib/CodeGen/CodeGenTypes.h
index d082342bf592..fb8d31684290 100644
--- a/lib/CodeGen/CodeGenTypes.h
+++ b/lib/CodeGen/CodeGenTypes.h
@@ -184,7 +184,7 @@ public:
/// ConvertType - Convert type T into a llvm::Type.
llvm::Type *ConvertType(QualType T);
- /// \brief Converts the GlobalDecl into an llvm::Type. This should be used
+ /// Converts the GlobalDecl into an llvm::Type. This should be used
/// when we know the target of the function we want to convert. This is
/// because some functions (explicitly, those with pass_object_size
/// parameters) may not have the same signature as their type portrays, and
@@ -225,7 +225,7 @@ public:
/// replace the 'opaque' type we previously made for it if applicable.
void UpdateCompletedType(const TagDecl *TD);
- /// \brief Remove stale types from the type cache when an inheritance model
+ /// Remove stale types from the type cache when an inheritance model
/// gets assigned to a class.
void RefreshTypeCacheForClass(const CXXRecordDecl *RD);
@@ -313,7 +313,8 @@ public:
const FunctionProtoType *type,
RequiredArgs required,
unsigned numPrefixArgs);
- const CGFunctionInfo &arrangeMSMemberPointerThunk(const CXXMethodDecl *MD);
+ const CGFunctionInfo &
+ arrangeUnprototypedMustTailThunk(const CXXMethodDecl *MD);
const CGFunctionInfo &arrangeMSCtorClosure(const CXXConstructorDecl *CD,
CXXCtorType CT);
const CGFunctionInfo &arrangeCXXMethodType(const CXXRecordDecl *RD,
@@ -334,7 +335,7 @@ public:
ArrayRef<FunctionProtoType::ExtParameterInfo> paramInfos,
RequiredArgs args);
- /// \brief Compute a new LLVM record layout object for the given record.
+ /// Compute a new LLVM record layout object for the given record.
CGRecordLayout *ComputeRecordLayout(const RecordDecl *D,
llvm::StructType *Ty);
diff --git a/lib/CodeGen/ConstantEmitter.h b/lib/CodeGen/ConstantEmitter.h
index 90c9fcd8cf81..b4d1b65743c7 100644
--- a/lib/CodeGen/ConstantEmitter.h
+++ b/lib/CodeGen/ConstantEmitter.h
@@ -50,7 +50,7 @@ public:
: CGM(CGM), CGF(CGF) {}
/// Initialize this emission in the context of the given function.
- /// Use this if the expression might contain contextaul references like
+ /// Use this if the expression might contain contextual references like
/// block addresses or PredefinedExprs.
ConstantEmitter(CodeGenFunction &CGF)
: CGM(CGF.CGM), CGF(&CGF) {}
diff --git a/lib/CodeGen/CoverageMappingGen.cpp b/lib/CodeGen/CoverageMappingGen.cpp
index 89a30dc7040c..2d8446463594 100644
--- a/lib/CodeGen/CoverageMappingGen.cpp
+++ b/lib/CodeGen/CoverageMappingGen.cpp
@@ -35,14 +35,14 @@ void CoverageSourceInfo::SourceRangeSkipped(SourceRange Range, SourceLocation) {
namespace {
-/// \brief A region of source code that can be mapped to a counter.
+/// A region of source code that can be mapped to a counter.
class SourceMappingRegion {
Counter Count;
- /// \brief The region's starting location.
+ /// The region's starting location.
Optional<SourceLocation> LocStart;
- /// \brief The region's ending location.
+ /// The region's ending location.
Optional<SourceLocation> LocEnd;
/// Whether this region should be emitted after its parent is emitted.
@@ -74,7 +74,10 @@ public:
bool hasEndLoc() const { return LocEnd.hasValue(); }
- void setEndLoc(SourceLocation Loc) { LocEnd = Loc; }
+ void setEndLoc(SourceLocation Loc) {
+ assert(Loc.isValid() && "Setting an invalid end location");
+ LocEnd = Loc;
+ }
SourceLocation getEndLoc() const {
assert(LocEnd && "Region has no end location");
@@ -123,7 +126,7 @@ struct SpellingRegion {
}
};
-/// \brief Provides the common functionality for the different
+/// Provides the common functionality for the different
/// coverage mapping region builders.
class CoverageMappingBuilder {
public:
@@ -132,17 +135,17 @@ public:
const LangOptions &LangOpts;
private:
- /// \brief Map of clang's FileIDs to IDs used for coverage mapping.
+ /// Map of clang's FileIDs to IDs used for coverage mapping.
llvm::SmallDenseMap<FileID, std::pair<unsigned, SourceLocation>, 8>
FileIDMapping;
public:
- /// \brief The coverage mapping regions for this function
+ /// The coverage mapping regions for this function
llvm::SmallVector<CounterMappingRegion, 32> MappingRegions;
- /// \brief The source mapping regions for this function.
+ /// The source mapping regions for this function.
std::vector<SourceMappingRegion> SourceRegions;
- /// \brief A set of regions which can be used as a filter.
+ /// A set of regions which can be used as a filter.
///
/// It is produced by emitExpansionRegions() and is used in
/// emitSourceRegions() to suppress producing code regions if
@@ -154,7 +157,7 @@ public:
const LangOptions &LangOpts)
: CVM(CVM), SM(SM), LangOpts(LangOpts) {}
- /// \brief Return the precise end location for the given token.
+ /// Return the precise end location for the given token.
SourceLocation getPreciseTokenLocEnd(SourceLocation Loc) {
// We avoid getLocForEndOfToken here, because it doesn't do what we want for
// macro locations, which we just treat as expanded files.
@@ -163,14 +166,14 @@ public:
return Loc.getLocWithOffset(TokLen);
}
- /// \brief Return the start location of an included file or expanded macro.
+ /// Return the start location of an included file or expanded macro.
SourceLocation getStartOfFileOrMacro(SourceLocation Loc) {
if (Loc.isMacroID())
return Loc.getLocWithOffset(-SM.getFileOffset(Loc));
return SM.getLocForStartOfFile(SM.getFileID(Loc));
}
- /// \brief Return the end location of an included file or expanded macro.
+ /// Return the end location of an included file or expanded macro.
SourceLocation getEndOfFileOrMacro(SourceLocation Loc) {
if (Loc.isMacroID())
return Loc.getLocWithOffset(SM.getFileIDSize(SM.getFileID(Loc)) -
@@ -178,18 +181,18 @@ public:
return SM.getLocForEndOfFile(SM.getFileID(Loc));
}
- /// \brief Find out where the current file is included or macro is expanded.
+ /// Find out where the current file is included or macro is expanded.
SourceLocation getIncludeOrExpansionLoc(SourceLocation Loc) {
- return Loc.isMacroID() ? SM.getImmediateExpansionRange(Loc).first
+ return Loc.isMacroID() ? SM.getImmediateExpansionRange(Loc).getBegin()
: SM.getIncludeLoc(SM.getFileID(Loc));
}
- /// \brief Return true if \c Loc is a location in a built-in macro.
+ /// Return true if \c Loc is a location in a built-in macro.
bool isInBuiltin(SourceLocation Loc) {
return SM.getBufferName(SM.getSpellingLoc(Loc)) == "<built-in>";
}
- /// \brief Check whether \c Loc is included or expanded from \c Parent.
+ /// Check whether \c Loc is included or expanded from \c Parent.
bool isNestedIn(SourceLocation Loc, FileID Parent) {
do {
Loc = getIncludeOrExpansionLoc(Loc);
@@ -199,23 +202,23 @@ public:
return true;
}
- /// \brief Get the start of \c S ignoring macro arguments and builtin macros.
+ /// Get the start of \c S ignoring macro arguments and builtin macros.
SourceLocation getStart(const Stmt *S) {
SourceLocation Loc = S->getLocStart();
while (SM.isMacroArgExpansion(Loc) || isInBuiltin(Loc))
- Loc = SM.getImmediateExpansionRange(Loc).first;
+ Loc = SM.getImmediateExpansionRange(Loc).getBegin();
return Loc;
}
- /// \brief Get the end of \c S ignoring macro arguments and builtin macros.
+ /// Get the end of \c S ignoring macro arguments and builtin macros.
SourceLocation getEnd(const Stmt *S) {
SourceLocation Loc = S->getLocEnd();
while (SM.isMacroArgExpansion(Loc) || isInBuiltin(Loc))
- Loc = SM.getImmediateExpansionRange(Loc).first;
+ Loc = SM.getImmediateExpansionRange(Loc).getBegin();
return getPreciseTokenLocEnd(Loc);
}
- /// \brief Find the set of files we have regions for and assign IDs
+ /// Find the set of files we have regions for and assign IDs
///
/// Fills \c Mapping with the virtual file mapping needed to write out
/// coverage and collects the necessary file information to emit source and
@@ -255,7 +258,7 @@ public:
}
}
- /// \brief Get the coverage mapping file ID for \c Loc.
+ /// Get the coverage mapping file ID for \c Loc.
///
/// If such file id doesn't exist, return None.
Optional<unsigned> getCoverageFileID(SourceLocation Loc) {
@@ -265,7 +268,7 @@ public:
return None;
}
- /// \brief Gather all the regions that were skipped by the preprocessor
+ /// Gather all the regions that were skipped by the preprocessor
/// using the constructs like #if.
void gatherSkippedRegions() {
/// An array of the minimum lineStarts and the maximum lineEnds
@@ -295,14 +298,14 @@ public:
auto Region = CounterMappingRegion::makeSkipped(
*CovFileID, SR.LineStart, SR.ColumnStart, SR.LineEnd, SR.ColumnEnd);
// Make sure that we only collect the regions that are inside
- // the souce code of this function.
+ // the source code of this function.
if (Region.LineStart >= FileLineRanges[*CovFileID].first &&
Region.LineEnd <= FileLineRanges[*CovFileID].second)
MappingRegions.push_back(Region);
}
}
- /// \brief Generate the coverage counter mapping regions from collected
+ /// Generate the coverage counter mapping regions from collected
/// source regions.
void emitSourceRegions(const SourceRegionFilter &Filter) {
for (const auto &Region : SourceRegions) {
@@ -347,7 +350,7 @@ public:
}
}
- /// \brief Generate expansion regions for each virtual file we've seen.
+ /// Generate expansion regions for each virtual file we've seen.
SourceRegionFilter emitExpansionRegions() {
SourceRegionFilter Filter;
for (const auto &FM : FileIDMapping) {
@@ -377,7 +380,7 @@ public:
}
};
-/// \brief Creates unreachable coverage regions for the functions that
+/// Creates unreachable coverage regions for the functions that
/// are not emitted.
struct EmptyCoverageMappingBuilder : public CoverageMappingBuilder {
EmptyCoverageMappingBuilder(CoverageMappingModuleGen &CVM, SourceManager &SM,
@@ -411,7 +414,7 @@ struct EmptyCoverageMappingBuilder : public CoverageMappingBuilder {
SourceRegions.emplace_back(Counter(), Start, End);
}
- /// \brief Write the mapping data to the output stream
+ /// Write the mapping data to the output stream
void write(llvm::raw_ostream &OS) {
SmallVector<unsigned, 16> FileIDMapping;
gatherFileIDs(FileIDMapping);
@@ -425,15 +428,15 @@ struct EmptyCoverageMappingBuilder : public CoverageMappingBuilder {
}
};
-/// \brief A StmtVisitor that creates coverage mapping regions which map
+/// A StmtVisitor that creates coverage mapping regions which map
/// from the source code locations to the PGO counters.
struct CounterCoverageMappingBuilder
: public CoverageMappingBuilder,
public ConstStmtVisitor<CounterCoverageMappingBuilder> {
- /// \brief The map of statements to count values.
+ /// The map of statements to count values.
llvm::DenseMap<const Stmt *, unsigned> &CounterMap;
- /// \brief A stack of currently live regions.
+ /// A stack of currently live regions.
std::vector<SourceMappingRegion> RegionStack;
/// The currently deferred region: its end location and count can be set once
@@ -442,7 +445,7 @@ struct CounterCoverageMappingBuilder
CounterExpressionBuilder Builder;
- /// \brief A location in the most recently visited file or macro.
+ /// A location in the most recently visited file or macro.
///
/// This is used to adjust the active source regions appropriately when
/// expressions cross file or macro boundaries.
@@ -451,12 +454,12 @@ struct CounterCoverageMappingBuilder
/// Location of the last terminated region.
Optional<std::pair<SourceLocation, size_t>> LastTerminatedRegion;
- /// \brief Return a counter for the subtraction of \c RHS from \c LHS
+ /// Return a counter for the subtraction of \c RHS from \c LHS
Counter subtractCounters(Counter LHS, Counter RHS) {
return Builder.subtract(LHS, RHS);
}
- /// \brief Return a counter for the sum of \c LHS and \c RHS.
+ /// Return a counter for the sum of \c LHS and \c RHS.
Counter addCounters(Counter LHS, Counter RHS) {
return Builder.add(LHS, RHS);
}
@@ -465,14 +468,14 @@ struct CounterCoverageMappingBuilder
return addCounters(addCounters(C1, C2), C3);
}
- /// \brief Return the region counter for the given statement.
+ /// Return the region counter for the given statement.
///
/// This should only be called on statements that have a dedicated counter.
Counter getRegionCounter(const Stmt *S) {
return Counter::getCounter(CounterMap[S]);
}
- /// \brief Push a region onto the stack.
+ /// Push a region onto the stack.
///
/// Returns the index on the stack where the region was pushed. This can be
/// used with popRegions to exit a "scope", ending the region that was pushed.
@@ -549,7 +552,7 @@ struct CounterCoverageMappingBuilder
completeDeferred(Count, DeferredEndLoc);
}
- /// \brief Pop regions from the stack into the function's list of regions.
+ /// Pop regions from the stack into the function's list of regions.
///
/// Adds all regions from \c ParentIndex to the top of the stack to the
/// function's \c SourceRegions.
@@ -616,13 +619,13 @@ struct CounterCoverageMappingBuilder
assert(!ParentOfDeferredRegion && "Deferred region with no parent");
}
- /// \brief Return the currently active region.
+ /// Return the currently active region.
SourceMappingRegion &getRegion() {
assert(!RegionStack.empty() && "statement has no region");
return RegionStack.back();
}
- /// \brief Propagate counts through the children of \c S.
+ /// Propagate counts through the children of \c S.
Counter propagateCounts(Counter TopCount, const Stmt *S) {
SourceLocation StartLoc = getStart(S);
SourceLocation EndLoc = getEnd(S);
@@ -639,7 +642,7 @@ struct CounterCoverageMappingBuilder
return ExitCount;
}
- /// \brief Check whether a region with bounds \c StartLoc and \c EndLoc
+ /// Check whether a region with bounds \c StartLoc and \c EndLoc
/// is already added to \c SourceRegions.
bool isRegionAlreadyAdded(SourceLocation StartLoc, SourceLocation EndLoc) {
return SourceRegions.rend() !=
@@ -650,7 +653,7 @@ struct CounterCoverageMappingBuilder
});
}
- /// \brief Adjust the most recently visited location to \c EndLoc.
+ /// Adjust the most recently visited location to \c EndLoc.
///
/// This should be used after visiting any statements in non-source order.
void adjustForOutOfOrderTraversal(SourceLocation EndLoc) {
@@ -667,7 +670,7 @@ struct CounterCoverageMappingBuilder
MostRecentLocation = getIncludeOrExpansionLoc(MostRecentLocation);
}
- /// \brief Adjust regions and state when \c NewLoc exits a file.
+ /// Adjust regions and state when \c NewLoc exits a file.
///
/// If moving from our most recently tracked location to \c NewLoc exits any
/// files, this adjusts our current region stack and creates the file regions
@@ -734,7 +737,7 @@ struct CounterCoverageMappingBuilder
MostRecentLocation = NewLoc;
}
- /// \brief Ensure that \c S is included in the current region.
+ /// Ensure that \c S is included in the current region.
void extendRegion(const Stmt *S) {
SourceMappingRegion &Region = getRegion();
SourceLocation StartLoc = getStart(S);
@@ -746,7 +749,7 @@ struct CounterCoverageMappingBuilder
completeDeferred(Region.getCounter(), StartLoc);
}
- /// \brief Mark \c S as a terminator, starting a zero region.
+ /// Mark \c S as a terminator, starting a zero region.
void terminateRegion(const Stmt *S) {
extendRegion(S);
SourceMappingRegion &Region = getRegion();
@@ -791,7 +794,7 @@ struct CounterCoverageMappingBuilder
popRegions(Index);
}
- /// \brief Keep counts of breaks and continues inside loops.
+ /// Keep counts of breaks and continues inside loops.
struct BreakContinue {
Counter BreakCount;
Counter ContinueCount;
@@ -805,7 +808,7 @@ struct CounterCoverageMappingBuilder
: CoverageMappingBuilder(CVM, SM, LangOpts), CounterMap(CounterMap),
DeferredRegion(None) {}
- /// \brief Write the mapping data to the output stream
+ /// Write the mapping data to the output stream
void write(llvm::raw_ostream &OS) {
llvm::SmallVector<unsigned, 8> VirtualFileMapping;
gatherFileIDs(VirtualFileMapping);
@@ -831,22 +834,6 @@ struct CounterCoverageMappingBuilder
handleFileExit(getEnd(S));
}
- /// Determine whether the final deferred region emitted in \p Body should be
- /// discarded.
- static bool discardFinalDeferredRegionInDecl(Stmt *Body) {
- if (auto *CS = dyn_cast<CompoundStmt>(Body)) {
- Stmt *LastStmt = CS->body_back();
- if (auto *IfElse = dyn_cast<IfStmt>(LastStmt)) {
- if (auto *Else = dyn_cast_or_null<CompoundStmt>(IfElse->getElse()))
- LastStmt = Else->body_back();
- else
- LastStmt = IfElse->getElse();
- }
- return dyn_cast_or_null<ReturnStmt>(LastStmt);
- }
- return false;
- }
-
void VisitDecl(const Decl *D) {
assert(!DeferredRegion && "Deferred region never completed");
@@ -856,17 +843,13 @@ struct CounterCoverageMappingBuilder
if (Body && SM.isInSystemHeader(SM.getSpellingLoc(getStart(Body))))
return;
- Counter ExitCount = propagateCounts(getRegionCounter(Body), Body);
+ propagateCounts(getRegionCounter(Body), Body);
assert(RegionStack.empty() && "Regions entered but never exited");
- if (DeferredRegion) {
- // Complete (or discard) any deferred regions introduced by the last
- // statement.
- if (discardFinalDeferredRegionInDecl(Body))
- DeferredRegion = None;
- else
- popRegions(completeDeferred(ExitCount, getEnd(Body)));
- }
+ // Discard the last uncompleted deferred region in a decl, if one exists.
+ // This prevents lines at the end of a function containing only whitespace
+ // or closing braces from being marked as uncovered.
+ DeferredRegion = None;
}
void VisitReturnStmt(const ReturnStmt *S) {
@@ -889,6 +872,7 @@ struct CounterCoverageMappingBuilder
Counter LabelCount = getRegionCounter(S);
SourceLocation Start = getStart(S);
completeTopLevelDeferredRegion(LabelCount, Start);
+ completeDeferred(LabelCount, Start);
// We can't extendRegion here or we risk overlapping with our new region.
handleFileExit(Start);
pushRegion(LabelCount, Start);
@@ -979,20 +963,28 @@ struct CounterCoverageMappingBuilder
Counter ParentCount = getRegion().getCounter();
Counter BodyCount = getRegionCounter(S);
+ // The loop increment may contain a break or continue.
+ if (S->getInc())
+ BreakContinueStack.emplace_back();
+
// Handle the body first so that we can get the backedge count.
- BreakContinueStack.push_back(BreakContinue());
+ BreakContinueStack.emplace_back();
extendRegion(S->getBody());
Counter BackedgeCount = propagateCounts(BodyCount, S->getBody());
- BreakContinue BC = BreakContinueStack.pop_back_val();
+ BreakContinue BodyBC = BreakContinueStack.pop_back_val();
// The increment is essentially part of the body but it needs to include
// the count for all the continue statements.
- if (const Stmt *Inc = S->getInc())
- propagateCounts(addCounters(BackedgeCount, BC.ContinueCount), Inc);
+ BreakContinue IncrementBC;
+ if (const Stmt *Inc = S->getInc()) {
+ propagateCounts(addCounters(BackedgeCount, BodyBC.ContinueCount), Inc);
+ IncrementBC = BreakContinueStack.pop_back_val();
+ }
// Go back to handle the condition.
- Counter CondCount =
- addCounters(ParentCount, BackedgeCount, BC.ContinueCount);
+ Counter CondCount = addCounters(
+ addCounters(ParentCount, BackedgeCount, BodyBC.ContinueCount),
+ IncrementBC.ContinueCount);
if (const Expr *Cond = S->getCond()) {
propagateCounts(CondCount, Cond);
adjustForOutOfOrderTraversal(getEnd(S));
@@ -1004,8 +996,8 @@ struct CounterCoverageMappingBuilder
if (Gap)
fillGapAreaWithCount(Gap->getBegin(), Gap->getEnd(), BodyCount);
- Counter OutCount =
- addCounters(BC.BreakCount, subtractCounters(CondCount, BodyCount));
+ Counter OutCount = addCounters(BodyBC.BreakCount, IncrementBC.BreakCount,
+ subtractCounters(CondCount, BodyCount));
if (OutCount != ParentCount)
pushRegion(OutCount);
}
@@ -1361,8 +1353,7 @@ void CoverageMappingModuleGen::emit() {
// and coverage mappings is a multiple of 8.
if (size_t Rem = OS.str().size() % 8) {
CoverageMappingSize += 8 - Rem;
- for (size_t I = 0, S = 8 - Rem; I < S; ++I)
- OS << '\0';
+ OS.write_zeros(8 - Rem);
}
auto *FilenamesAndMappingsVal =
llvm::ConstantDataArray::getString(Ctx, OS.str(), false);
diff --git a/lib/CodeGen/CoverageMappingGen.h b/lib/CodeGen/CoverageMappingGen.h
index d07ed5ebcf2b..b08ad896d7a5 100644
--- a/lib/CodeGen/CoverageMappingGen.h
+++ b/lib/CodeGen/CoverageMappingGen.h
@@ -31,7 +31,7 @@ class Preprocessor;
class Decl;
class Stmt;
-/// \brief Stores additional source code information like skipped ranges which
+/// Stores additional source code information like skipped ranges which
/// is required by the coverage mapping generator and is obtained from
/// the preprocessor.
class CoverageSourceInfo : public PPCallbacks {
@@ -46,7 +46,7 @@ namespace CodeGen {
class CodeGenModule;
-/// \brief Organizes the cross-function state that is used while generating
+/// Organizes the cross-function state that is used while generating
/// code coverage mapping data.
class CoverageMappingModuleGen {
CodeGenModule &CGM;
@@ -65,7 +65,7 @@ public:
return SourceInfo;
}
- /// \brief Add a function's coverage mapping record to the collection of the
+ /// Add a function's coverage mapping record to the collection of the
/// function mapping records.
void addFunctionMappingRecord(llvm::GlobalVariable *FunctionName,
StringRef FunctionNameValue,
@@ -73,15 +73,15 @@ public:
const std::string &CoverageMapping,
bool IsUsed = true);
- /// \brief Emit the coverage mapping data for a translation unit.
+ /// Emit the coverage mapping data for a translation unit.
void emit();
- /// \brief Return the coverage mapping translation unit file id
+ /// Return the coverage mapping translation unit file id
/// for the given file.
unsigned getFileID(const FileEntry *File);
};
-/// \brief Organizes the per-function state that is used while generating
+/// Organizes the per-function state that is used while generating
/// code coverage mapping data.
class CoverageMappingGen {
CoverageMappingModuleGen &CVM;
@@ -99,12 +99,12 @@ public:
llvm::DenseMap<const Stmt *, unsigned> *CounterMap)
: CVM(CVM), SM(SM), LangOpts(LangOpts), CounterMap(CounterMap) {}
- /// \brief Emit the coverage mapping data which maps the regions of
+ /// Emit the coverage mapping data which maps the regions of
/// code to counters that will be used to find the execution
/// counts for those regions.
void emitCounterMapping(const Decl *D, llvm::raw_ostream &OS);
- /// \brief Emit the coverage mapping data for an unused function.
+ /// Emit the coverage mapping data for an unused function.
/// It creates mapping regions with the counter of zero.
void emitEmptyMapping(const Decl *D, llvm::raw_ostream &OS);
};
diff --git a/lib/CodeGen/ItaniumCXXABI.cpp b/lib/CodeGen/ItaniumCXXABI.cpp
index c375b82ea936..16fdd1c16a1d 100644
--- a/lib/CodeGen/ItaniumCXXABI.cpp
+++ b/lib/CodeGen/ItaniumCXXABI.cpp
@@ -31,9 +31,11 @@
#include "clang/AST/StmtCXX.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Value.h"
+#include "llvm/Support/ScopedPrinter.h"
using namespace clang;
using namespace CodeGen;
@@ -63,13 +65,6 @@ public:
bool classifyReturnType(CGFunctionInfo &FI) const override;
bool passClassIndirect(const CXXRecordDecl *RD) const {
- // Clang <= 4 used the pre-C++11 rule, which ignores move operations.
- // The PS4 platform ABI follows the behavior of Clang 3.2.
- if (CGM.getCodeGenOpts().getClangABICompat() <=
- CodeGenOptions::ClangABI::Ver4 ||
- CGM.getTriple().getOS() == llvm::Triple::PS4)
- return RD->hasNonTrivialDestructor() ||
- RD->hasNonTrivialCopyConstructor();
return !canCopyArgument(RD);
}
@@ -187,8 +182,7 @@ public:
emitTerminateForUnexpectedException(CodeGenFunction &CGF,
llvm::Value *Exn) override;
- void EmitFundamentalRTTIDescriptor(QualType Type, bool DLLExport);
- void EmitFundamentalRTTIDescriptors(bool DLLExport);
+ void EmitFundamentalRTTIDescriptors(const CXXRecordDecl *RD);
llvm::Constant *getAddrOfRTTIDescriptor(QualType Ty) override;
CatchTypeInfo
getAddrOfCXXCatchHandlerType(QualType Ty,
@@ -300,16 +294,11 @@ public:
// linkage together with vtables when needed.
if (ForVTable && !Thunk->hasLocalLinkage())
Thunk->setLinkage(llvm::GlobalValue::AvailableExternallyLinkage);
-
- // Propagate dllexport storage, to enable the linker to generate import
- // thunks as necessary (e.g. when a parent class has a key function and a
- // child class doesn't, and the construction vtable for the parent in the
- // child needs to reference the parent's thunks).
- const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
- if (MD->hasAttr<DLLExportAttr>())
- Thunk->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
+ CGM.setGVProperties(Thunk, GD);
}
+ bool exportThunk() override { return true; }
+
llvm::Value *performThisAdjustment(CodeGenFunction &CGF, Address This,
const ThisAdjustment &TA) override;
@@ -480,6 +469,7 @@ public:
explicit WebAssemblyCXXABI(CodeGen::CodeGenModule &CGM)
: ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
/*UseARMGuardVarABI=*/true) {}
+ void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override;
private:
bool HasThisReturn(GlobalDecl GD) const override {
@@ -632,13 +622,53 @@ CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
VTableOffset = Builder.CreateTrunc(VTableOffset, CGF.Int32Ty);
VTableOffset = Builder.CreateZExt(VTableOffset, CGM.PtrDiffTy);
}
- VTable = Builder.CreateGEP(VTable, VTableOffset);
+ // Compute the address of the virtual function pointer.
+ llvm::Value *VFPAddr = Builder.CreateGEP(VTable, VTableOffset);
+
+ // Check the address of the function pointer if CFI on member function
+ // pointers is enabled.
+ llvm::Constant *CheckSourceLocation;
+ llvm::Constant *CheckTypeDesc;
+ bool ShouldEmitCFICheck = CGF.SanOpts.has(SanitizerKind::CFIMFCall) &&
+ CGM.HasHiddenLTOVisibility(RD);
+ if (ShouldEmitCFICheck) {
+ CodeGenFunction::SanitizerScope SanScope(&CGF);
+
+ CheckSourceLocation = CGF.EmitCheckSourceLocation(E->getLocStart());
+ CheckTypeDesc = CGF.EmitCheckTypeDescriptor(QualType(MPT, 0));
+ llvm::Constant *StaticData[] = {
+ llvm::ConstantInt::get(CGF.Int8Ty, CodeGenFunction::CFITCK_VMFCall),
+ CheckSourceLocation,
+ CheckTypeDesc,
+ };
+
+ llvm::Metadata *MD =
+ CGM.CreateMetadataIdentifierForVirtualMemPtrType(QualType(MPT, 0));
+ llvm::Value *TypeId = llvm::MetadataAsValue::get(CGF.getLLVMContext(), MD);
+
+ llvm::Value *TypeTest = Builder.CreateCall(
+ CGM.getIntrinsic(llvm::Intrinsic::type_test), {VFPAddr, TypeId});
+
+ if (CGM.getCodeGenOpts().SanitizeTrap.has(SanitizerKind::CFIMFCall)) {
+ CGF.EmitTrapCheck(TypeTest);
+ } else {
+ llvm::Value *AllVtables = llvm::MetadataAsValue::get(
+ CGM.getLLVMContext(),
+ llvm::MDString::get(CGM.getLLVMContext(), "all-vtables"));
+ llvm::Value *ValidVtable = Builder.CreateCall(
+ CGM.getIntrinsic(llvm::Intrinsic::type_test), {VTable, AllVtables});
+ CGF.EmitCheck(std::make_pair(TypeTest, SanitizerKind::CFIMFCall),
+ SanitizerHandler::CFICheckFail, StaticData,
+ {VTable, ValidVtable});
+ }
+
+ FnVirtual = Builder.GetInsertBlock();
+ }
// Load the virtual function to call.
- VTable = Builder.CreateBitCast(VTable, FTy->getPointerTo()->getPointerTo());
- llvm::Value *VirtualFn =
- Builder.CreateAlignedLoad(VTable, CGF.getPointerAlign(),
- "memptr.virtualfn");
+ VFPAddr = Builder.CreateBitCast(VFPAddr, FTy->getPointerTo()->getPointerTo());
+ llvm::Value *VirtualFn = Builder.CreateAlignedLoad(
+ VFPAddr, CGF.getPointerAlign(), "memptr.virtualfn");
CGF.EmitBranch(FnEnd);
// In the non-virtual path, the function pointer is actually a
@@ -647,6 +677,43 @@ CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
llvm::Value *NonVirtualFn =
Builder.CreateIntToPtr(FnAsInt, FTy->getPointerTo(), "memptr.nonvirtualfn");
+ // Check the function pointer if CFI on member function pointers is enabled.
+ if (ShouldEmitCFICheck) {
+ CXXRecordDecl *RD = MPT->getClass()->getAsCXXRecordDecl();
+ if (RD->hasDefinition()) {
+ CodeGenFunction::SanitizerScope SanScope(&CGF);
+
+ llvm::Constant *StaticData[] = {
+ llvm::ConstantInt::get(CGF.Int8Ty, CodeGenFunction::CFITCK_NVMFCall),
+ CheckSourceLocation,
+ CheckTypeDesc,
+ };
+
+ llvm::Value *Bit = Builder.getFalse();
+ llvm::Value *CastedNonVirtualFn =
+ Builder.CreateBitCast(NonVirtualFn, CGF.Int8PtrTy);
+ for (const CXXRecordDecl *Base : CGM.getMostBaseClasses(RD)) {
+ llvm::Metadata *MD = CGM.CreateMetadataIdentifierForType(
+ getContext().getMemberPointerType(
+ MPT->getPointeeType(),
+ getContext().getRecordType(Base).getTypePtr()));
+ llvm::Value *TypeId =
+ llvm::MetadataAsValue::get(CGF.getLLVMContext(), MD);
+
+ llvm::Value *TypeTest =
+ Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::type_test),
+ {CastedNonVirtualFn, TypeId});
+ Bit = Builder.CreateOr(Bit, TypeTest);
+ }
+
+ CGF.EmitCheck(std::make_pair(Bit, SanitizerKind::CFIMFCall),
+ SanitizerHandler::CFICheckFail, StaticData,
+ {CastedNonVirtualFn, llvm::UndefValue::get(CGF.IntPtrTy)});
+
+ FnNonVirtual = Builder.GetInsertBlock();
+ }
+ }
+
// We're done.
CGF.EmitBlock(FnEnd);
llvm::PHINode *CalleePtr = Builder.CreatePHI(FTy->getPointerTo(), 2);
@@ -836,7 +903,6 @@ ItaniumCXXABI::EmitMemberFunctionPointer(const CXXMethodDecl *MD) {
llvm::Constant *ItaniumCXXABI::BuildMemberPointer(const CXXMethodDecl *MD,
CharUnits ThisAdjustment) {
assert(MD->isInstance() && "Member function must not be static!");
- MD = MD->getCanonicalDecl();
CodeGenTypes &Types = CGM.getTypes();
@@ -1182,7 +1248,7 @@ static llvm::Constant *getBadCastFn(CodeGenFunction &CGF) {
return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_cast");
}
-/// \brief Compute the src2dst_offset hint as described in the
+/// Compute the src2dst_offset hint as described in the
/// Itanium C++ ABI [2.9.7]
static CharUnits computeOffsetHint(ASTContext &Context,
const CXXRecordDecl *Src,
@@ -1448,7 +1514,7 @@ void ItaniumCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) {
return;
/// Initialize the 'this' slot. In the Itanium C++ ABI, no prologue
- /// adjustments are required, becuase they are all handled by thunks.
+ /// adjustments are required, because they are all handled by thunks.
setCXXABIThisValue(CGF, loadIncomingCXXThis(CGF));
/// Initialize the 'vtt' slot if needed.
@@ -1479,8 +1545,7 @@ CGCXXABI::AddedStructorArgs ItaniumCXXABI::addImplicitConstructorArgs(
llvm::Value *VTT =
CGF.GetVTTParameter(GlobalDecl(D, Type), ForVirtualBase, Delegating);
QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy);
- Args.insert(Args.begin() + 1,
- CallArg(RValue::get(VTT), VTTTy, /*needscopy=*/false));
+ Args.insert(Args.begin() + 1, CallArg(RValue::get(VTT), VTTTy));
return AddedStructorArgs::prefix(1); // Added one arg.
}
@@ -1531,7 +1596,7 @@ void ItaniumCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT,
VTable->setComdat(CGM.getModule().getOrInsertComdat(VTable->getName()));
// Set the right visibility.
- CGM.setGlobalVisibility(VTable, RD, ForDefinition);
+ CGM.setGVProperties(VTable, RD);
// Use pointer alignment for the vtable. Otherwise we would align them based
// on the size of the initializer which doesn't make sense as only single
@@ -1548,7 +1613,7 @@ void ItaniumCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT,
isa<NamespaceDecl>(DC) && cast<NamespaceDecl>(DC)->getIdentifier() &&
cast<NamespaceDecl>(DC)->getIdentifier()->isStr("__cxxabiv1") &&
DC->getParent()->isTranslationUnit())
- EmitFundamentalRTTIDescriptors(RD->hasAttr<DLLExportAttr>());
+ EmitFundamentalRTTIDescriptors(RD);
if (!VTable->isDeclarationForLinker())
CGM.EmitVTableTypeMetadata(VTable, VTLayout);
@@ -1641,12 +1706,8 @@ llvm::GlobalVariable *ItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl *RD,
VTable = CGM.CreateOrReplaceCXXRuntimeVariable(
Name, VTableType, llvm::GlobalValue::ExternalLinkage);
VTable->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
- CGM.setGlobalVisibility(VTable, RD, NotForDefinition);
- if (RD->hasAttr<DLLImportAttr>())
- VTable->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
- else if (RD->hasAttr<DLLExportAttr>())
- VTable->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
+ CGM.setGVProperties(VTable, RD);
return VTable;
}
@@ -1656,7 +1717,6 @@ CGCallee ItaniumCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
Address This,
llvm::Type *Ty,
SourceLocation Loc) {
- GD = GD.getCanonicalDecl();
Ty = Ty->getPointerTo()->getPointerTo();
auto *MethodDecl = cast<CXXMethodDecl>(GD.getDecl());
llvm::Value *VTable = CGF.GetVTablePtr(This, Ty, MethodDecl->getParent());
@@ -1690,7 +1750,7 @@ CGCallee ItaniumCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
VFunc = VFuncLoad;
}
- CGCallee Callee(MethodDecl, VFunc);
+ CGCallee Callee(MethodDecl->getCanonicalDecl(), VFunc);
return Callee;
}
@@ -1702,10 +1762,9 @@ llvm::Value *ItaniumCXXABI::EmitVirtualDestructorCall(
const CGFunctionInfo *FInfo = &CGM.getTypes().arrangeCXXStructorDeclaration(
Dtor, getFromDtorType(DtorType));
- llvm::Type *Ty = CGF.CGM.getTypes().GetFunctionType(*FInfo);
+ llvm::FunctionType *Ty = CGF.CGM.getTypes().GetFunctionType(*FInfo);
CGCallee Callee =
- getVirtualFunctionPointer(CGF, GlobalDecl(Dtor, DtorType), This, Ty,
- CE ? CE->getLocStart() : SourceLocation());
+ CGCallee::forVirtual(CE, GlobalDecl(Dtor, DtorType), This, Ty);
CGF.EmitCXXMemberOrOperatorCall(Dtor, Callee, ReturnValueSlot(),
This.getPointer(), /*ImplicitParam=*/nullptr,
@@ -1725,11 +1784,19 @@ bool ItaniumCXXABI::canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const {
if (CGM.getLangOpts().AppleKext)
return false;
- // If we don't have any not emitted inline virtual function, and if vtable is
- // not hidden, then we are safe to emit available_externally copy of vtable.
+ // If the vtable is hidden then it is not safe to emit an available_externally
+ // copy of vtable.
+ if (isVTableHidden(RD))
+ return false;
+
+ if (CGM.getCodeGenOpts().ForceEmitVTables)
+ return true;
+
+ // If we don't have any not emitted inline virtual function then we are safe
+ // to emit an available_externally copy of vtable.
// FIXME we can still emit a copy of the vtable if we
// can emit definition of the inline functions.
- return !hasAnyUnusedVirtualInlineFunction(RD) && !isVTableHidden(RD);
+ return !hasAnyUnusedVirtualInlineFunction(RD);
}
static llvm::Value *performTypeAdjustment(CodeGenFunction &CGF,
Address InitialPtr,
@@ -1848,7 +1915,8 @@ Address ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
// Handle the array cookie specially in ASan.
if (CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) && AS == 0 &&
- expr->getOperatorNew()->isReplaceableGlobalAllocationFunction()) {
+ (expr->getOperatorNew()->isReplaceableGlobalAllocationFunction() ||
+ CGM.getCodeGenOpts().SanitizeAddressPoisonClassMemberArrayNewCookie)) {
// The store to the CookiePtr does not need to be instrumented.
CGM.getSanitizerMetadata()->disableSanitizerForInstruction(SI);
llvm::FunctionType *FTy =
@@ -2052,6 +2120,7 @@ void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
false, var->getLinkage(),
llvm::ConstantInt::get(guardTy, 0),
guardName.str());
+ guard->setDSOLocal(var->isDSOLocal());
guard->setVisibility(var->getVisibility());
// If the variable is thread-local, so is its guard variable.
guard->setThreadLocalMode(var->getThreadLocalMode());
@@ -2211,6 +2280,13 @@ static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF,
auto *GV = cast<llvm::GlobalValue>(handle->stripPointerCasts());
GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ if (!addr)
+ // addr is null when we are trying to register a dtor annotated with
+ // __attribute__((destructor)) in a constructor function. Using null here is
+ // okay because this argument is just passed back to the destructor
+ // function.
+ addr = llvm::Constant::getNullValue(CGF.Int8PtrTy);
+
llvm::Value *args[] = {
llvm::ConstantExpr::getBitCast(dtor, dtorTy),
llvm::ConstantExpr::getBitCast(addr, CGF.Int8PtrTy),
@@ -2219,6 +2295,48 @@ static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF,
CGF.EmitNounwindRuntimeCall(atexit, args);
}
+void CodeGenModule::registerGlobalDtorsWithAtExit() {
+ for (const auto I : DtorsUsingAtExit) {
+ int Priority = I.first;
+ const llvm::TinyPtrVector<llvm::Function *> &Dtors = I.second;
+
+ // Create a function that registers destructors that have the same priority.
+ //
+ // Since constructor functions are run in non-descending order of their
+ // priorities, destructors are registered in non-descending order of their
+ // priorities, and since destructor functions are run in the reverse order
+ // of their registration, destructor functions are run in non-ascending
+ // order of their priorities.
+ CodeGenFunction CGF(*this);
+ std::string GlobalInitFnName =
+ std::string("__GLOBAL_init_") + llvm::to_string(Priority);
+ llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
+ llvm::Function *GlobalInitFn = CreateGlobalInitOrDestructFunction(
+ FTy, GlobalInitFnName, getTypes().arrangeNullaryFunction(),
+ SourceLocation());
+ ASTContext &Ctx = getContext();
+ FunctionDecl *FD = FunctionDecl::Create(
+ Ctx, Ctx.getTranslationUnitDecl(), SourceLocation(), SourceLocation(),
+ &Ctx.Idents.get(GlobalInitFnName), Ctx.VoidTy, nullptr, SC_Static,
+ false, false);
+ CGF.StartFunction(GlobalDecl(FD), getContext().VoidTy, GlobalInitFn,
+ getTypes().arrangeNullaryFunction(), FunctionArgList(),
+ SourceLocation(), SourceLocation());
+
+ for (auto *Dtor : Dtors) {
+ // Register the destructor function calling __cxa_atexit if it is
+ // available. Otherwise fall back on calling atexit.
+ if (getCodeGenOpts().CXAAtExit)
+ emitGlobalDtorWithCXAAtExit(CGF, Dtor, nullptr, false);
+ else
+ CGF.registerGlobalDtorWithAtExit(Dtor);
+ }
+
+ CGF.FinishFunction();
+ AddGlobalCtor(GlobalInitFn, Priority, nullptr);
+ }
+}
+
/// Register a global destructor as best as we know how.
void ItaniumCXXABI::registerGlobalDtor(CodeGenFunction &CGF,
const VarDecl &D,
@@ -2407,8 +2525,10 @@ void ItaniumCXXABI::EmitThreadLocalInitFuncs(
CGM.SetLLVMFunctionAttributes(nullptr, FI, cast<llvm::Function>(Init));
}
- if (Init)
+ if (Init) {
Init->setVisibility(Var->getVisibility());
+ Init->setDSOLocal(Var->isDSOLocal());
+ }
llvm::LLVMContext &Context = CGM.getModule().getContext();
llvm::BasicBlock *Entry = llvm::BasicBlock::Create(Context, "", Wrapper);
@@ -2416,8 +2536,12 @@ void ItaniumCXXABI::EmitThreadLocalInitFuncs(
if (InitIsInitFunc) {
if (Init) {
llvm::CallInst *CallVal = Builder.CreateCall(Init);
- if (isThreadWrapperReplaceable(VD, CGM))
+ if (isThreadWrapperReplaceable(VD, CGM)) {
CallVal->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
+ llvm::Function *Fn =
+ cast<llvm::Function>(cast<llvm::GlobalAlias>(Init)->getAliasee());
+ Fn->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
+ }
}
} else {
// Don't know whether we have an init function. Call it if it exists.
@@ -2574,12 +2698,16 @@ public:
BCTI_Public = 0x2
};
+ /// BuildTypeInfo - Build the RTTI type info struct for the given type, or
+ /// link to an existing RTTI descriptor if one already exists.
+ llvm::Constant *BuildTypeInfo(QualType Ty);
+
/// BuildTypeInfo - Build the RTTI type info struct for the given type.
- ///
- /// \param Force - true to force the creation of this RTTI value
- /// \param DLLExport - true to mark the RTTI value as DLLExport
- llvm::Constant *BuildTypeInfo(QualType Ty, bool Force = false,
- bool DLLExport = false);
+ llvm::Constant *BuildTypeInfo(
+ QualType Ty,
+ llvm::GlobalVariable::LinkageTypes Linkage,
+ llvm::GlobalValue::VisibilityTypes Visibility,
+ llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass);
};
}
@@ -2622,11 +2750,8 @@ ItaniumRTTIBuilder::GetAddrOfExternalRTTIDescriptor(QualType Ty) {
/*Constant=*/true,
llvm::GlobalValue::ExternalLinkage, nullptr,
Name);
- if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
- const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
- if (RD->hasAttr<DLLImportAttr>())
- GV->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass);
- }
+ const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
+ CGM.setGVProperties(GV, RD);
}
return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy);
@@ -2673,6 +2798,7 @@ static bool TypeInfoIsInStandardLibrary(const BuiltinType *Ty) {
case BuiltinType::LongDouble:
case BuiltinType::Float16:
case BuiltinType::Float128:
+ case BuiltinType::Char8:
case BuiltinType::Char16:
case BuiltinType::Char32:
case BuiltinType::Int128:
@@ -2687,6 +2813,30 @@ static bool TypeInfoIsInStandardLibrary(const BuiltinType *Ty) {
case BuiltinType::OCLClkEvent:
case BuiltinType::OCLQueue:
case BuiltinType::OCLReserveID:
+ case BuiltinType::ShortAccum:
+ case BuiltinType::Accum:
+ case BuiltinType::LongAccum:
+ case BuiltinType::UShortAccum:
+ case BuiltinType::UAccum:
+ case BuiltinType::ULongAccum:
+ case BuiltinType::ShortFract:
+ case BuiltinType::Fract:
+ case BuiltinType::LongFract:
+ case BuiltinType::UShortFract:
+ case BuiltinType::UFract:
+ case BuiltinType::ULongFract:
+ case BuiltinType::SatShortAccum:
+ case BuiltinType::SatAccum:
+ case BuiltinType::SatLongAccum:
+ case BuiltinType::SatUShortAccum:
+ case BuiltinType::SatUAccum:
+ case BuiltinType::SatULongAccum:
+ case BuiltinType::SatShortFract:
+ case BuiltinType::SatFract:
+ case BuiltinType::SatLongFract:
+ case BuiltinType::SatUShortFract:
+ case BuiltinType::SatUFract:
+ case BuiltinType::SatULongFract:
return false;
case BuiltinType::Dependent:
@@ -2761,6 +2911,11 @@ static bool ShouldUseExternalRTTIDescriptor(CodeGenModule &CGM,
// N.B. We must always emit the RTTI data ourselves if there exists a key
// function.
bool IsDLLImport = RD->hasAttr<DLLImportAttr>();
+
+ // Don't import the RTTI but emit it locally.
+ if (CGM.getTriple().isWindowsGNUEnvironment() && IsDLLImport)
+ return false;
+
if (CGM.getVTables().isVTableExternal(RD))
return IsDLLImport && !CGM.getTriple().isWindowsItaniumEnvironment()
? false
@@ -2953,6 +3108,7 @@ void ItaniumRTTIBuilder::BuildVTablePointer(const Type *Ty) {
llvm::Constant *VTable =
CGM.getModule().getOrInsertGlobal(VTableName, CGM.Int8PtrTy);
+ CGM.setDSOLocal(cast<llvm::GlobalValue>(VTable->stripPointerCasts()));
llvm::Type *PtrDiffTy =
CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
@@ -2966,7 +3122,7 @@ void ItaniumRTTIBuilder::BuildVTablePointer(const Type *Ty) {
Fields.push_back(VTable);
}
-/// \brief Return the linkage that the type info and type info name constants
+/// Return the linkage that the type info and type info name constants
/// should have for the given type.
static llvm::GlobalVariable::LinkageTypes getTypeInfoLinkage(CodeGenModule &CGM,
QualType Ty) {
@@ -3020,8 +3176,7 @@ static llvm::GlobalVariable::LinkageTypes getTypeInfoLinkage(CodeGenModule &CGM,
llvm_unreachable("Invalid linkage!");
}
-llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(QualType Ty, bool Force,
- bool DLLExport) {
+llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(QualType Ty) {
// We want to operate on the canonical type.
Ty = Ty.getCanonicalType();
@@ -3039,17 +3194,41 @@ llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(QualType Ty, bool Force,
}
// Check if there is already an external RTTI descriptor for this type.
- bool IsStdLib = IsStandardLibraryRTTIDescriptor(Ty);
- if (!Force && (IsStdLib || ShouldUseExternalRTTIDescriptor(CGM, Ty)))
+ if (IsStandardLibraryRTTIDescriptor(Ty) ||
+ ShouldUseExternalRTTIDescriptor(CGM, Ty))
return GetAddrOfExternalRTTIDescriptor(Ty);
// Emit the standard library with external linkage.
- llvm::GlobalVariable::LinkageTypes Linkage;
- if (IsStdLib)
- Linkage = llvm::GlobalValue::ExternalLinkage;
+ llvm::GlobalVariable::LinkageTypes Linkage = getTypeInfoLinkage(CGM, Ty);
+
+ // Give the type_info object and name the formal visibility of the
+ // type itself.
+ llvm::GlobalValue::VisibilityTypes llvmVisibility;
+ if (llvm::GlobalValue::isLocalLinkage(Linkage))
+ // If the linkage is local, only default visibility makes sense.
+ llvmVisibility = llvm::GlobalValue::DefaultVisibility;
+ else if (CXXABI.classifyRTTIUniqueness(Ty, Linkage) ==
+ ItaniumCXXABI::RUK_NonUniqueHidden)
+ llvmVisibility = llvm::GlobalValue::HiddenVisibility;
else
- Linkage = getTypeInfoLinkage(CGM, Ty);
+ llvmVisibility = CodeGenModule::GetLLVMVisibility(Ty->getVisibility());
+
+ llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass =
+ llvm::GlobalValue::DefaultStorageClass;
+ if (CGM.getTriple().isWindowsItaniumEnvironment()) {
+ auto RD = Ty->getAsCXXRecordDecl();
+ if (RD && RD->hasAttr<DLLExportAttr>())
+ DLLStorageClass = llvm::GlobalValue::DLLExportStorageClass;
+ }
+ return BuildTypeInfo(Ty, Linkage, llvmVisibility, DLLStorageClass);
+}
+
+llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(
+ QualType Ty,
+ llvm::GlobalVariable::LinkageTypes Linkage,
+ llvm::GlobalValue::VisibilityTypes Visibility,
+ llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass) {
// Add the vtable pointer.
BuildVTablePointer(cast<Type>(Ty));
@@ -3163,7 +3342,11 @@ llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(QualType Ty, bool Force,
llvm::Constant *Init = llvm::ConstantStruct::getAnon(Fields);
+ SmallString<256> Name;
+ llvm::raw_svector_ostream Out(Name);
+ CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
llvm::Module &M = CGM.getModule();
+ llvm::GlobalVariable *OldGV = M.getNamedGlobal(Name);
llvm::GlobalVariable *GV =
new llvm::GlobalVariable(M, Init->getType(),
/*Constant=*/true, Linkage, Init, Name);
@@ -3195,37 +3378,14 @@ llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(QualType Ty, bool Force,
// All of this is to say that it's important that both the type_info
// object and the type_info name be uniqued when weakly emitted.
- // Give the type_info object and name the formal visibility of the
- // type itself.
- llvm::GlobalValue::VisibilityTypes llvmVisibility;
- if (llvm::GlobalValue::isLocalLinkage(Linkage))
- // If the linkage is local, only default visibility makes sense.
- llvmVisibility = llvm::GlobalValue::DefaultVisibility;
- else if (RTTIUniqueness == ItaniumCXXABI::RUK_NonUniqueHidden)
- llvmVisibility = llvm::GlobalValue::HiddenVisibility;
- else
- llvmVisibility = CodeGenModule::GetLLVMVisibility(Ty->getVisibility());
+ TypeName->setVisibility(Visibility);
+ CGM.setDSOLocal(TypeName);
- TypeName->setVisibility(llvmVisibility);
- GV->setVisibility(llvmVisibility);
+ GV->setVisibility(Visibility);
+ CGM.setDSOLocal(GV);
- if (CGM.getTriple().isWindowsItaniumEnvironment()) {
- auto RD = Ty->getAsCXXRecordDecl();
- if (DLLExport || (RD && RD->hasAttr<DLLExportAttr>())) {
- TypeName->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
- GV->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
- } else if (RD && RD->hasAttr<DLLImportAttr>() &&
- ShouldUseExternalRTTIDescriptor(CGM, Ty)) {
- TypeName->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
- GV->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
-
- // Because the typename and the typeinfo are DLL import, convert them to
- // declarations rather than definitions. The initializers still need to
- // be constructed to calculate the type for the declarations.
- TypeName->setInitializer(nullptr);
- GV->setInitializer(nullptr);
- }
- }
+ TypeName->setDLLStorageClass(DLLStorageClass);
+ GV->setDLLStorageClass(DLLStorageClass);
return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy);
}
@@ -3433,11 +3593,9 @@ static unsigned extractPBaseFlags(ASTContext &Ctx, QualType &Type) {
Flags |= ItaniumRTTIBuilder::PTI_Incomplete;
if (auto *Proto = Type->getAs<FunctionProtoType>()) {
- if (Proto->isNothrow(Ctx)) {
+ if (Proto->isNothrow()) {
Flags |= ItaniumRTTIBuilder::PTI_Noexcept;
- Type = Ctx.getFunctionType(
- Proto->getReturnType(), Proto->getParamTypes(),
- Proto->getExtProtoInfo().withExceptionSpec(EST_None));
+ Type = Ctx.getFunctionTypeWithExceptionSpec(Type, EST_None);
}
}
@@ -3502,18 +3660,7 @@ llvm::Constant *ItaniumCXXABI::getAddrOfRTTIDescriptor(QualType Ty) {
return ItaniumRTTIBuilder(*this).BuildTypeInfo(Ty);
}
-void ItaniumCXXABI::EmitFundamentalRTTIDescriptor(QualType Type,
- bool DLLExport) {
- QualType PointerType = getContext().getPointerType(Type);
- QualType PointerTypeConst = getContext().getPointerType(Type.withConst());
- ItaniumRTTIBuilder(*this).BuildTypeInfo(Type, /*Force=*/true, DLLExport);
- ItaniumRTTIBuilder(*this).BuildTypeInfo(PointerType, /*Force=*/true,
- DLLExport);
- ItaniumRTTIBuilder(*this).BuildTypeInfo(PointerTypeConst, /*Force=*/true,
- DLLExport);
-}
-
-void ItaniumCXXABI::EmitFundamentalRTTIDescriptors(bool DLLExport) {
+void ItaniumCXXABI::EmitFundamentalRTTIDescriptors(const CXXRecordDecl *RD) {
// Types added here must also be added to TypeInfoIsInStandardLibrary.
QualType FundamentalTypes[] = {
getContext().VoidTy, getContext().NullPtrTy,
@@ -3527,10 +3674,24 @@ void ItaniumCXXABI::EmitFundamentalRTTIDescriptors(bool DLLExport) {
getContext().UnsignedInt128Ty, getContext().HalfTy,
getContext().FloatTy, getContext().DoubleTy,
getContext().LongDoubleTy, getContext().Float128Ty,
- getContext().Char16Ty, getContext().Char32Ty
+ getContext().Char8Ty, getContext().Char16Ty,
+ getContext().Char32Ty
};
- for (const QualType &FundamentalType : FundamentalTypes)
- EmitFundamentalRTTIDescriptor(FundamentalType, DLLExport);
+ llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass =
+ RD->hasAttr<DLLExportAttr>()
+ ? llvm::GlobalValue::DLLExportStorageClass
+ : llvm::GlobalValue::DefaultStorageClass;
+ llvm::GlobalValue::VisibilityTypes Visibility =
+ CodeGenModule::GetLLVMVisibility(RD->getVisibility());
+ for (const QualType &FundamentalType : FundamentalTypes) {
+ QualType PointerType = getContext().getPointerType(FundamentalType);
+ QualType PointerTypeConst = getContext().getPointerType(
+ FundamentalType.withConst());
+ for (QualType Type : {FundamentalType, PointerType, PointerTypeConst})
+ ItaniumRTTIBuilder(*this).BuildTypeInfo(
+ Type, llvm::GlobalValue::ExternalLinkage,
+ Visibility, DLLStorageClass);
+ }
}
/// What sort of uniqueness rules should we use for the RTTI for the
@@ -3583,12 +3744,22 @@ static StructorCodegen getCodegenToUse(CodeGenModule &CGM,
}
llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(AliasDecl);
- if (llvm::GlobalValue::isDiscardableIfUnused(Linkage))
- return StructorCodegen::RAUW;
+ // All discardable structors can be RAUWed, but we don't want to do that in
+ // unoptimized code, as that makes complete structor symbol disappear
+ // completely, which degrades debugging experience.
+ // Symbols with private linkage can be safely aliased, so we special case them
+ // here.
+ if (llvm::GlobalValue::isLocalLinkage(Linkage))
+ return CGM.getCodeGenOpts().OptimizationLevel > 0 ? StructorCodegen::RAUW
+ : StructorCodegen::Alias;
+ // Linkonce structors cannot be aliased nor placed in a comdat, so these need
+ // to be emitted separately.
// FIXME: Should we allow available_externally aliases?
- if (!llvm::GlobalAlias::isValidLinkage(Linkage))
- return StructorCodegen::RAUW;
+ if (llvm::GlobalValue::isDiscardableIfUnused(Linkage) ||
+ !llvm::GlobalAlias::isValidLinkage(Linkage))
+ return CGM.getCodeGenOpts().OptimizationLevel > 0 ? StructorCodegen::RAUW
+ : StructorCodegen::Emit;
if (llvm::GlobalValue::isWeakForLinker(Linkage)) {
// Only ELF and wasm support COMDATs with arbitrary names (C5/D5).
@@ -3616,6 +3787,9 @@ static void emitConstructorDestructorAlias(CodeGenModule &CGM,
// Create the alias with no name.
auto *Alias = llvm::GlobalAlias::create(Linkage, "", Aliasee);
+ // Constructors and destructors are always unnamed_addr.
+ Alias->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
+
// Switch any previous uses to the alias.
if (Entry) {
assert(Entry->getType() == Aliasee->getType() &&
@@ -3628,7 +3802,7 @@ static void emitConstructorDestructorAlias(CodeGenModule &CGM,
}
// Finally, set up the alias with its proper name and attributes.
- CGM.setAliasAttributes(cast<NamedDecl>(AliasDecl.getDecl()), Alias);
+ CGM.SetCommonAttributes(AliasDecl, Alias);
}
void ItaniumCXXABI::emitCXXStructor(const CXXMethodDecl *MD,
@@ -3904,7 +4078,9 @@ static void InitCatchParam(CodeGenFunction &CGF,
llvm::Value *rawAdjustedExn = CallBeginCatch(CGF, Exn, true);
Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy),
caughtExnAlignment);
- CGF.EmitAggregateCopy(ParamAddr, adjustedExn, CatchType);
+ LValue Dest = CGF.MakeAddrLValue(ParamAddr, CatchType);
+ LValue Src = CGF.MakeAddrLValue(adjustedExn, CatchType);
+ CGF.EmitAggregateCopy(Dest, Src, CatchType, AggValueSlot::DoesNotOverlap);
return;
}
@@ -3931,7 +4107,8 @@ static void InitCatchParam(CodeGenFunction &CGF,
AggValueSlot::forAddr(ParamAddr, Qualifiers(),
AggValueSlot::IsNotDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
- AggValueSlot::IsNotAliased));
+ AggValueSlot::IsNotAliased,
+ AggValueSlot::DoesNotOverlap));
// Leave the terminate scope.
CGF.EHStack.popTerminate();
@@ -4051,3 +4228,11 @@ ItaniumCXXABI::LoadVTablePtr(CodeGenFunction &CGF, Address This,
const CXXRecordDecl *RD) {
return {CGF.GetVTablePtr(This, CGM.Int8PtrTy, RD), RD};
}
+
+void WebAssemblyCXXABI::emitBeginCatch(CodeGenFunction &CGF,
+ const CXXCatchStmt *C) {
+ if (CGF.getTarget().hasFeature("exception-handling"))
+ CGF.EHStack.pushCleanup<CatchRetScope>(
+ NormalCleanup, cast<llvm::CatchPadInst>(CGF.CurrentFuncletPad));
+ ItaniumCXXABI::emitBeginCatch(CGF, C);
+}
diff --git a/lib/CodeGen/MacroPPCallbacks.cpp b/lib/CodeGen/MacroPPCallbacks.cpp
index a6f21d8ddcfb..48dea7d54b1e 100644
--- a/lib/CodeGen/MacroPPCallbacks.cpp
+++ b/lib/CodeGen/MacroPPCallbacks.cpp
@@ -178,7 +178,8 @@ void MacroPPCallbacks::FileChanged(SourceLocation Loc, FileChangeReason Reason,
void MacroPPCallbacks::InclusionDirective(
SourceLocation HashLoc, const Token &IncludeTok, StringRef FileName,
bool IsAngled, CharSourceRange FilenameRange, const FileEntry *File,
- StringRef SearchPath, StringRef RelativePath, const Module *Imported) {
+ StringRef SearchPath, StringRef RelativePath, const Module *Imported,
+ SrcMgr::CharacteristicKind FileType) {
// Record the line location of the current included file.
LastHashLoc = HashLoc;
diff --git a/lib/CodeGen/MacroPPCallbacks.h b/lib/CodeGen/MacroPPCallbacks.h
index e117f96f47df..48c67e2d36ad 100644
--- a/lib/CodeGen/MacroPPCallbacks.h
+++ b/lib/CodeGen/MacroPPCallbacks.h
@@ -101,7 +101,8 @@ public:
StringRef FileName, bool IsAngled,
CharSourceRange FilenameRange, const FileEntry *File,
StringRef SearchPath, StringRef RelativePath,
- const Module *Imported) override;
+ const Module *Imported,
+ SrcMgr::CharacteristicKind FileType) override;
/// Hook called whenever a macro definition is seen.
void MacroDefined(const Token &MacroNameTok,
diff --git a/lib/CodeGen/MicrosoftCXXABI.cpp b/lib/CodeGen/MicrosoftCXXABI.cpp
index ffb3681c2585..81ed05059546 100644
--- a/lib/CodeGen/MicrosoftCXXABI.cpp
+++ b/lib/CodeGen/MicrosoftCXXABI.cpp
@@ -216,13 +216,20 @@ public:
return DT != Dtor_Base;
}
+ void setCXXDestructorDLLStorage(llvm::GlobalValue *GV,
+ const CXXDestructorDecl *Dtor,
+ CXXDtorType DT) const override;
+
+ llvm::GlobalValue::LinkageTypes
+ getCXXDestructorLinkage(GVALinkage Linkage, const CXXDestructorDecl *Dtor,
+ CXXDtorType DT) const override;
+
void EmitCXXDestructors(const CXXDestructorDecl *D) override;
const CXXRecordDecl *
getThisArgumentTypeForMethod(const CXXMethodDecl *MD) override {
- MD = MD->getCanonicalDecl();
if (MD->isVirtual() && !isa<CXXDestructorDecl>(MD)) {
- MicrosoftVTableContext::MethodVFTableLocation ML =
+ MethodVFTableLocation ML =
CGM.getMicrosoftVTableContext().getMethodVFTableLocation(MD);
// The vbases might be ordered differently in the final overrider object
// and the complete object, so the "this" argument may sometimes point to
@@ -357,9 +364,6 @@ public:
void setThunkLinkage(llvm::Function *Thunk, bool ForVTable,
GlobalDecl GD, bool ReturnAdjustment) override {
- // Never dllimport/dllexport thunks.
- Thunk->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass);
-
GVALinkage Linkage =
getContext().GetGVALinkageForFunction(cast<FunctionDecl>(GD.getDecl()));
@@ -371,6 +375,8 @@ public:
Thunk->setLinkage(llvm::GlobalValue::LinkOnceODRLinkage);
}
+ bool exportThunk() override { return false; }
+
llvm::Value *performThisAdjustment(CodeGenFunction &CGF, Address This,
const ThisAdjustment &TA) override;
@@ -516,10 +522,12 @@ public:
if (llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(Name))
return GV;
- return new llvm::GlobalVariable(CGM.getModule(), CGM.Int8Ty,
- /*isConstant=*/true,
- llvm::GlobalValue::ExternalLinkage,
- /*Initializer=*/nullptr, Name);
+ auto *GV = new llvm::GlobalVariable(CGM.getModule(), CGM.Int8Ty,
+ /*isConstant=*/true,
+ llvm::GlobalValue::ExternalLinkage,
+ /*Initializer=*/nullptr, Name);
+ CGM.setDSOLocal(GV);
+ return GV;
}
llvm::Constant *getImageRelativeConstant(llvm::Constant *PtrVal) {
@@ -558,7 +566,7 @@ private:
GetNullMemberPointerFields(const MemberPointerType *MPT,
llvm::SmallVectorImpl<llvm::Constant *> &fields);
- /// \brief Shared code for virtual base adjustment. Returns the offset from
+ /// Shared code for virtual base adjustment. Returns the offset from
/// the vbptr to the virtual base. Optionally returns the address of the
/// vbptr itself.
llvm::Value *GetVBaseOffsetFromVBPtr(CodeGenFunction &CGF,
@@ -582,14 +590,14 @@ private:
performBaseAdjustment(CodeGenFunction &CGF, Address Value,
QualType SrcRecordTy);
- /// \brief Performs a full virtual base adjustment. Used to dereference
+ /// Performs a full virtual base adjustment. Used to dereference
/// pointers to members of virtual bases.
llvm::Value *AdjustVirtualBase(CodeGenFunction &CGF, const Expr *E,
const CXXRecordDecl *RD, Address Base,
llvm::Value *VirtualBaseAdjustmentOffset,
llvm::Value *VBPtrOffset /* optional */);
- /// \brief Emits a full member pointer with the fields common to data and
+ /// Emits a full member pointer with the fields common to data and
/// function member pointers.
llvm::Constant *EmitFullMemberPointer(llvm::Constant *FirstField,
bool IsMemberFunction,
@@ -600,16 +608,15 @@ private:
bool MemberPointerConstantIsNull(const MemberPointerType *MPT,
llvm::Constant *MP);
- /// \brief - Initialize all vbptrs of 'this' with RD as the complete type.
+ /// - Initialize all vbptrs of 'this' with RD as the complete type.
void EmitVBPtrStores(CodeGenFunction &CGF, const CXXRecordDecl *RD);
- /// \brief Caching wrapper around VBTableBuilder::enumerateVBTables().
+ /// Caching wrapper around VBTableBuilder::enumerateVBTables().
const VBTableGlobals &enumerateVBTables(const CXXRecordDecl *RD);
- /// \brief Generate a thunk for calling a virtual member function MD.
- llvm::Function *EmitVirtualMemPtrThunk(
- const CXXMethodDecl *MD,
- const MicrosoftVTableContext::MethodVFTableLocation &ML);
+ /// Generate a thunk for calling a virtual member function MD.
+ llvm::Function *EmitVirtualMemPtrThunk(const CXXMethodDecl *MD,
+ const MethodVFTableLocation &ML);
public:
llvm::Type *ConvertMemberPointerType(const MemberPointerType *MPT) override;
@@ -753,15 +760,15 @@ private:
typedef std::pair<const CXXRecordDecl *, CharUnits> VFTableIdTy;
typedef llvm::DenseMap<VFTableIdTy, llvm::GlobalVariable *> VTablesMapTy;
typedef llvm::DenseMap<VFTableIdTy, llvm::GlobalValue *> VFTablesMapTy;
- /// \brief All the vftables that have been referenced.
+ /// All the vftables that have been referenced.
VFTablesMapTy VFTablesMap;
VTablesMapTy VTablesMap;
- /// \brief This set holds the record decls we've deferred vtable emission for.
+ /// This set holds the record decls we've deferred vtable emission for.
llvm::SmallPtrSet<const CXXRecordDecl *, 4> DeferredVFTables;
- /// \brief All the vbtables which have been referenced.
+ /// All the vbtables which have been referenced.
llvm::DenseMap<const CXXRecordDecl *, VBTableGlobals> VBTablesMap;
/// Info on the global variable used to guard initialization of static locals.
@@ -820,45 +827,8 @@ MicrosoftCXXABI::getRecordArgABI(const CXXRecordDecl *RD) const {
return RAA_Default;
case llvm::Triple::x86_64:
- // If a class has a destructor, we'd really like to pass it indirectly
- // because it allows us to elide copies. Unfortunately, MSVC makes that
- // impossible for small types, which it will pass in a single register or
- // stack slot. Most objects with dtors are large-ish, so handle that early.
- // We can't call out all large objects as being indirect because there are
- // multiple x64 calling conventions and the C++ ABI code shouldn't dictate
- // how we pass large POD types.
- //
- // Note: This permits small classes with nontrivial destructors to be
- // passed in registers, which is non-conforming.
- if (RD->hasNonTrivialDestructor() &&
- getContext().getTypeSize(RD->getTypeForDecl()) > 64)
- return RAA_Indirect;
-
- // If a class has at least one non-deleted, trivial copy constructor, it
- // is passed according to the C ABI. Otherwise, it is passed indirectly.
- //
- // Note: This permits classes with non-trivial copy or move ctors to be
- // passed in registers, so long as they *also* have a trivial copy ctor,
- // which is non-conforming.
- if (RD->needsImplicitCopyConstructor()) {
- // If the copy ctor has not yet been declared, we can read its triviality
- // off the AST.
- if (!RD->defaultedCopyConstructorIsDeleted() &&
- RD->hasTrivialCopyConstructor())
- return RAA_Default;
- } else {
- // Otherwise, we need to find the copy constructor(s) and ask.
- for (const CXXConstructorDecl *CD : RD->ctors()) {
- if (CD->isCopyConstructor()) {
- // We had at least one nondeleted trivial copy ctor. Return directly.
- if (!CD->isDeleted() && CD->isTrivial())
- return RAA_Default;
- }
- }
- }
-
- // We have no trivial, non-deleted copy constructor.
- return RAA_Indirect;
+ case llvm::Triple::aarch64:
+ return !canCopyArgument(RD) ? RAA_Indirect : RAA_Default;
}
llvm_unreachable("invalid enum");
@@ -890,20 +860,6 @@ void MicrosoftCXXABI::emitRethrow(CodeGenFunction &CGF, bool isNoReturn) {
CGF.EmitRuntimeCallOrInvoke(Fn, Args);
}
-namespace {
-struct CatchRetScope final : EHScopeStack::Cleanup {
- llvm::CatchPadInst *CPI;
-
- CatchRetScope(llvm::CatchPadInst *CPI) : CPI(CPI) {}
-
- void Emit(CodeGenFunction &CGF, Flags flags) override {
- llvm::BasicBlock *BB = CGF.createBasicBlock("catchret.dest");
- CGF.Builder.CreateCatchRet(CPI, BB);
- CGF.EmitBlock(BB);
- }
-};
-}
-
void MicrosoftCXXABI::emitBeginCatch(CodeGenFunction &CGF,
const CXXCatchStmt *S) {
// In the MS ABI, the runtime handles the copy, and the catch handler is
@@ -1105,10 +1061,22 @@ bool MicrosoftCXXABI::classifyReturnType(CGFunctionInfo &FI) const {
// the second parameter.
FI.getReturnInfo() = ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
FI.getReturnInfo().setSRetAfterThis(FI.isInstanceMethod());
+
+ // aarch64-windows requires that instance methods use X1 for the return
+ // address. So for aarch64-windows we do not mark the
+ // return as SRet.
+ FI.getReturnInfo().setSuppressSRet(CGM.getTarget().getTriple().getArch() ==
+ llvm::Triple::aarch64);
return true;
} else if (!RD->isPOD()) {
// If it's a free function, non-POD types are returned indirectly.
FI.getReturnInfo() = ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
+
+ // aarch64-windows requires that non-POD, non-instance returns use X0 for
+ // the return address. So for aarch64-windows we do not mark the return as
+ // SRet.
+ FI.getReturnInfo().setSuppressSRet(CGM.getTarget().getTriple().getArch() ==
+ llvm::Triple::aarch64);
return true;
}
@@ -1182,15 +1150,16 @@ void MicrosoftCXXABI::initializeHiddenVirtualInheritanceMembers(
unsigned AS = getThisAddress(CGF).getAddressSpace();
llvm::Value *Int8This = nullptr; // Initialize lazily.
- for (VBOffsets::const_iterator I = VBaseMap.begin(), E = VBaseMap.end();
- I != E; ++I) {
+ for (const CXXBaseSpecifier &S : RD->vbases()) {
+ const CXXRecordDecl *VBase = S.getType()->getAsCXXRecordDecl();
+ auto I = VBaseMap.find(VBase);
+ assert(I != VBaseMap.end());
if (!I->second.hasVtorDisp())
continue;
llvm::Value *VBaseOffset =
- GetVirtualBaseClassOffset(CGF, getThisAddress(CGF), RD, I->first);
- uint64_t ConstantVBaseOffset =
- Layout.getVBaseClassOffset(I->first).getQuantity();
+ GetVirtualBaseClassOffset(CGF, getThisAddress(CGF), RD, VBase);
+ uint64_t ConstantVBaseOffset = I->second.VBaseOffset.getQuantity();
// vtorDisp_for_vbase = vbptr[vbase_idx] - offsetof(RD, vbase).
llvm::Value *VtorDispValue = Builder.CreateSub(
@@ -1233,7 +1202,7 @@ void MicrosoftCXXABI::EmitCXXConstructors(const CXXConstructorDecl *D) {
if (!hasDefaultCXXMethodCC(getContext(), D) || D->getNumParams() != 0) {
llvm::Function *Fn = getAddrOfCXXCtorClosure(D, Ctor_DefaultClosure);
Fn->setLinkage(llvm::GlobalValue::WeakODRLinkage);
- Fn->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
+ CGM.setGVProperties(Fn, D);
}
}
@@ -1295,6 +1264,52 @@ MicrosoftCXXABI::buildStructorSignature(const CXXMethodDecl *MD, StructorType T,
return Added;
}
+void MicrosoftCXXABI::setCXXDestructorDLLStorage(llvm::GlobalValue *GV,
+ const CXXDestructorDecl *Dtor,
+ CXXDtorType DT) const {
+ // Deleting destructor variants are never imported or exported. Give them the
+ // default storage class.
+ if (DT == Dtor_Deleting) {
+ GV->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass);
+ } else {
+ const NamedDecl *ND = Dtor;
+ CGM.setDLLImportDLLExport(GV, ND);
+ }
+}
+
+llvm::GlobalValue::LinkageTypes MicrosoftCXXABI::getCXXDestructorLinkage(
+ GVALinkage Linkage, const CXXDestructorDecl *Dtor, CXXDtorType DT) const {
+ // Internal things are always internal, regardless of attributes. After this,
+ // we know the thunk is externally visible.
+ if (Linkage == GVA_Internal)
+ return llvm::GlobalValue::InternalLinkage;
+
+ switch (DT) {
+ case Dtor_Base:
+ // The base destructor most closely tracks the user-declared constructor, so
+ // we delegate back to the normal declarator case.
+ return CGM.getLLVMLinkageForDeclarator(Dtor, Linkage,
+ /*isConstantVariable=*/false);
+ case Dtor_Complete:
+ // The complete destructor is like an inline function, but it may be
+ // imported and therefore must be exported as well. This requires changing
+ // the linkage if a DLL attribute is present.
+ if (Dtor->hasAttr<DLLExportAttr>())
+ return llvm::GlobalValue::WeakODRLinkage;
+ if (Dtor->hasAttr<DLLImportAttr>())
+ return llvm::GlobalValue::AvailableExternallyLinkage;
+ return llvm::GlobalValue::LinkOnceODRLinkage;
+ case Dtor_Deleting:
+ // Deleting destructors are like inline functions. They have vague linkage
+ // and are emitted everywhere they are used. They are internal if the class
+ // is internal.
+ return llvm::GlobalValue::LinkOnceODRLinkage;
+ case Dtor_Comdat:
+ llvm_unreachable("MS C++ ABI does not support comdat dtors");
+ }
+ llvm_unreachable("invalid dtor type");
+}
+
void MicrosoftCXXABI::EmitCXXDestructors(const CXXDestructorDecl *D) {
// The TU defining a dtor is only guaranteed to emit a base destructor. All
// other destructor variants are delegating thunks.
@@ -1303,10 +1318,8 @@ void MicrosoftCXXABI::EmitCXXDestructors(const CXXDestructorDecl *D) {
CharUnits
MicrosoftCXXABI::getVirtualFunctionPrologueThisAdjustment(GlobalDecl GD) {
- GD = GD.getCanonicalDecl();
const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
- GlobalDecl LookupGD = GD;
if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(MD)) {
// Complete destructors take a pointer to the complete object as a
// parameter, thus don't need this adjustment.
@@ -1315,11 +1328,11 @@ MicrosoftCXXABI::getVirtualFunctionPrologueThisAdjustment(GlobalDecl GD) {
// There's no Dtor_Base in vftable but it shares the this adjustment with
// the deleting one, so look it up instead.
- LookupGD = GlobalDecl(DD, Dtor_Deleting);
+ GD = GlobalDecl(DD, Dtor_Deleting);
}
- MicrosoftVTableContext::MethodVFTableLocation ML =
- CGM.getMicrosoftVTableContext().getMethodVFTableLocation(LookupGD);
+ MethodVFTableLocation ML =
+ CGM.getMicrosoftVTableContext().getMethodVFTableLocation(GD);
CharUnits Adjustment = ML.VFPtrOffset;
// Normal virtual instance methods need to adjust from the vfptr that first
@@ -1353,7 +1366,6 @@ Address MicrosoftCXXABI::adjustThisArgumentForVirtualFunctionCall(
return CGF.Builder.CreateConstByteGEP(This, Adjustment);
}
- GD = GD.getCanonicalDecl();
const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
GlobalDecl LookupGD = GD;
@@ -1367,7 +1379,7 @@ Address MicrosoftCXXABI::adjustThisArgumentForVirtualFunctionCall(
// with the base one, so look up the deleting one instead.
LookupGD = GlobalDecl(DD, Dtor_Deleting);
}
- MicrosoftVTableContext::MethodVFTableLocation ML =
+ MethodVFTableLocation ML =
CGM.getMicrosoftVTableContext().getMethodVFTableLocation(LookupGD);
CharUnits StaticOffset = ML.VFPtrOffset;
@@ -1523,8 +1535,7 @@ CGCXXABI::AddedStructorArgs MicrosoftCXXABI::addImplicitConstructorArgs(
}
RValue RV = RValue::get(MostDerivedArg);
if (FPT->isVariadic()) {
- Args.insert(Args.begin() + 1,
- CallArg(RV, getContext().IntTy, /*needscopy=*/false));
+ Args.insert(Args.begin() + 1, CallArg(RV, getContext().IntTy));
return AddedStructorArgs::prefix(1);
}
Args.add(RV, getContext().IntTy);
@@ -1535,6 +1546,12 @@ void MicrosoftCXXABI::EmitDestructorCall(CodeGenFunction &CGF,
const CXXDestructorDecl *DD,
CXXDtorType Type, bool ForVirtualBase,
bool Delegating, Address This) {
+ // Use the base destructor variant in place of the complete destructor variant
+ // if the class has no virtual bases. This effectively implements some of the
+ // -mconstructor-aliases optimization, but as part of the MS C++ ABI.
+ if (Type == Dtor_Complete && DD->getParent()->getNumVBases() == 0)
+ Type = Dtor_Base;
+
CGCallee Callee = CGCallee::forDirect(
CGM.getAddrOfCXXStructor(DD, getFromDtorType(Type)),
DD);
@@ -1817,7 +1834,6 @@ CGCallee MicrosoftCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
Address This,
llvm::Type *Ty,
SourceLocation Loc) {
- GD = GD.getCanonicalDecl();
CGBuilderTy &Builder = CGF.Builder;
Ty = Ty->getPointerTo()->getPointerTo();
@@ -1828,8 +1844,7 @@ CGCallee MicrosoftCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
llvm::Value *VTable = CGF.GetVTablePtr(VPtr, Ty, MethodDecl->getParent());
MicrosoftVTableContext &VFTContext = CGM.getMicrosoftVTableContext();
- MicrosoftVTableContext::MethodVFTableLocation ML =
- VFTContext.getMethodVFTableLocation(GD);
+ MethodVFTableLocation ML = VFTContext.getMethodVFTableLocation(GD);
// Compute the identity of the most derived class whose virtual table is
// located at the MethodVFTableLocation ML.
@@ -1857,7 +1872,7 @@ CGCallee MicrosoftCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
VFunc = Builder.CreateAlignedLoad(VFuncPtr, CGF.getPointerAlign());
}
- CGCallee Callee(MethodDecl, VFunc);
+ CGCallee Callee(MethodDecl->getCanonicalDecl(), VFunc);
return Callee;
}
@@ -1872,9 +1887,8 @@ llvm::Value *MicrosoftCXXABI::EmitVirtualDestructorCall(
GlobalDecl GD(Dtor, Dtor_Deleting);
const CGFunctionInfo *FInfo = &CGM.getTypes().arrangeCXXStructorDeclaration(
Dtor, StructorType::Deleting);
- llvm::Type *Ty = CGF.CGM.getTypes().GetFunctionType(*FInfo);
- CGCallee Callee = getVirtualFunctionPointer(
- CGF, GD, This, Ty, CE ? CE->getLocStart() : SourceLocation());
+ llvm::FunctionType *Ty = CGF.CGM.getTypes().GetFunctionType(*FInfo);
+ CGCallee Callee = CGCallee::forVirtual(CE, GD, This, Ty);
ASTContext &Context = getContext();
llvm::Value *ImplicitParam = llvm::ConstantInt::get(
@@ -1915,23 +1929,24 @@ MicrosoftCXXABI::enumerateVBTables(const CXXRecordDecl *RD) {
return VBGlobals;
}
-llvm::Function *MicrosoftCXXABI::EmitVirtualMemPtrThunk(
- const CXXMethodDecl *MD,
- const MicrosoftVTableContext::MethodVFTableLocation &ML) {
+llvm::Function *
+MicrosoftCXXABI::EmitVirtualMemPtrThunk(const CXXMethodDecl *MD,
+ const MethodVFTableLocation &ML) {
assert(!isa<CXXConstructorDecl>(MD) && !isa<CXXDestructorDecl>(MD) &&
"can't form pointers to ctors or virtual dtors");
// Calculate the mangled name.
SmallString<256> ThunkName;
llvm::raw_svector_ostream Out(ThunkName);
- getMangleContext().mangleVirtualMemPtrThunk(MD, Out);
+ getMangleContext().mangleVirtualMemPtrThunk(MD, ML, Out);
// If the thunk has been generated previously, just return it.
if (llvm::GlobalValue *GV = CGM.getModule().getNamedValue(ThunkName))
return cast<llvm::Function>(GV);
// Create the llvm::Function.
- const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeMSMemberPointerThunk(MD);
+ const CGFunctionInfo &FnInfo =
+ CGM.getTypes().arrangeUnprototypedMustTailThunk(MD);
llvm::FunctionType *ThunkTy = CGM.getTypes().GetFunctionType(FnInfo);
llvm::Function *ThunkFn =
llvm::Function::Create(ThunkTy, llvm::Function::ExternalLinkage,
@@ -2716,9 +2731,8 @@ llvm::Constant *
MicrosoftCXXABI::EmitMemberFunctionPointer(const CXXMethodDecl *MD) {
assert(MD->isInstance() && "Member function must not be static!");
- MD = MD->getCanonicalDecl();
CharUnits NonVirtualBaseAdjustment = CharUnits::Zero();
- const CXXRecordDecl *RD = MD->getParent()->getMostRecentDecl();
+ const CXXRecordDecl *RD = MD->getParent()->getMostRecentNonInjectedDecl();
CodeGenTypes &Types = CGM.getTypes();
unsigned VBTableIndex = 0;
@@ -2738,8 +2752,7 @@ MicrosoftCXXABI::EmitMemberFunctionPointer(const CXXMethodDecl *MD) {
FirstField = CGM.GetAddrOfFunction(MD, Ty);
} else {
auto &VTableContext = CGM.getMicrosoftVTableContext();
- MicrosoftVTableContext::MethodVFTableLocation ML =
- VTableContext.getMethodVFTableLocation(MD);
+ MethodVFTableLocation ML = VTableContext.getMethodVFTableLocation(MD);
FirstField = EmitVirtualMemPtrThunk(MD, ML);
// Include the vfptr adjustment if the method is in a non-primary vftable.
NonVirtualBaseAdjustment += ML.VFPtrOffset;
@@ -3336,14 +3349,14 @@ CGCXXABI *clang::CodeGen::CreateMicrosoftCXXABI(CodeGenModule &CGM) {
// a reference to the TypeInfo for the type and a reference to the
// CompleteHierarchyDescriptor for the type.
//
-// ClassHieararchyDescriptor: Contains information about a class hierarchy.
+// ClassHierarchyDescriptor: Contains information about a class hierarchy.
// Used during dynamic_cast to walk a class hierarchy. References a base
// class array and the size of said array.
//
// BaseClassArray: Contains a list of classes in a hierarchy. BaseClassArray is
// somewhat of a misnomer because the most derived class is also in the list
// as well as multiple copies of virtual bases (if they occur multiple times
-// in the hiearchy.) The BaseClassArray contains one BaseClassDescriptor for
+// in the hierarchy.) The BaseClassArray contains one BaseClassDescriptor for
// every path in the hierarchy, in pre-order depth first order. Note, we do
// not declare a specific llvm type for BaseClassArray, it's merely an array
// of BaseClassDescriptor pointers.
@@ -3356,7 +3369,7 @@ CGCXXABI *clang::CodeGen::CreateMicrosoftCXXABI(CodeGenModule &CGM) {
// mangled into them so they can be aggressively deduplicated by the linker.
static llvm::GlobalVariable *getTypeInfoVTable(CodeGenModule &CGM) {
- StringRef MangledName("\01??_7type_info@@6B@");
+ StringRef MangledName("??_7type_info@@6B@");
if (auto VTable = CGM.getModule().getNamedGlobal(MangledName))
return VTable;
return new llvm::GlobalVariable(CGM.getModule(), CGM.Int8PtrTy,
@@ -3367,7 +3380,7 @@ static llvm::GlobalVariable *getTypeInfoVTable(CodeGenModule &CGM) {
namespace {
-/// \brief A Helper struct that stores information about a class in a class
+/// A Helper struct that stores information about a class in a class
/// hierarchy. The information stored in these structs struct is used during
/// the generation of ClassHierarchyDescriptors and BaseClassDescriptors.
// During RTTI creation, MSRTTIClasses are stored in a contiguous array with
@@ -3394,7 +3407,7 @@ struct MSRTTIClass {
uint32_t Flags, NumBases, OffsetInVBase;
};
-/// \brief Recursively initialize the base class array.
+/// Recursively initialize the base class array.
uint32_t MSRTTIClass::initialize(const MSRTTIClass *Parent,
const CXXBaseSpecifier *Specifier) {
Flags = HasHierarchyDescriptor;
@@ -3441,7 +3454,7 @@ static llvm::GlobalValue::LinkageTypes getLinkageForRTTI(QualType Ty) {
llvm_unreachable("Invalid linkage!");
}
-/// \brief An ephemeral helper class for building MS RTTI types. It caches some
+/// An ephemeral helper class for building MS RTTI types. It caches some
/// calls to the module and information about the most derived class in a
/// hierarchy.
struct MSRTTIBuilder {
@@ -3474,7 +3487,7 @@ struct MSRTTIBuilder {
} // namespace
-/// \brief Recursively serializes a class hierarchy in pre-order depth first
+/// Recursively serializes a class hierarchy in pre-order depth first
/// order.
static void serializeClassHierarchy(SmallVectorImpl<MSRTTIClass> &Classes,
const CXXRecordDecl *RD) {
@@ -3483,7 +3496,7 @@ static void serializeClassHierarchy(SmallVectorImpl<MSRTTIClass> &Classes,
serializeClassHierarchy(Classes, Base.getType()->getAsCXXRecordDecl());
}
-/// \brief Find ambiguity among base classes.
+/// Find ambiguity among base classes.
static void
detectAmbiguousBases(SmallVectorImpl<MSRTTIClass> &Classes) {
llvm::SmallPtrSet<const CXXRecordDecl *, 8> VirtualBases;
@@ -3749,7 +3762,7 @@ MicrosoftCXXABI::getAddrOfCXXCatchHandlerType(QualType Type,
Flags};
}
-/// \brief Gets a TypeDescriptor. Returns a llvm::Constant * rather than a
+/// Gets a TypeDescriptor. Returns a llvm::Constant * rather than a
/// llvm::GlobalVariable * because different type descriptors have different
/// types, and need to be abstracted. They are abstracting by casting the
/// address to an Int8PtrTy.
@@ -3791,7 +3804,7 @@ llvm::Constant *MicrosoftCXXABI::getAddrOfRTTIDescriptor(QualType Type) {
return llvm::ConstantExpr::getBitCast(Var, CGM.Int8PtrTy);
}
-/// \brief Gets or a creates a Microsoft CompleteObjectLocator.
+/// Gets or a creates a Microsoft CompleteObjectLocator.
llvm::GlobalVariable *
MicrosoftCXXABI::getMSCompleteObjectLocator(const CXXRecordDecl *RD,
const VPtrInfo &Info) {
@@ -3808,19 +3821,12 @@ static void emitCXXConstructor(CodeGenModule &CGM,
static void emitCXXDestructor(CodeGenModule &CGM, const CXXDestructorDecl *dtor,
StructorType dtorType) {
- // The complete destructor is equivalent to the base destructor for
- // classes with no virtual bases, so try to emit it as an alias.
- if (!dtor->getParent()->getNumVBases() &&
- (dtorType == StructorType::Complete || dtorType == StructorType::Base)) {
- bool ProducedAlias = !CGM.TryEmitDefinitionAsAlias(
- GlobalDecl(dtor, Dtor_Complete), GlobalDecl(dtor, Dtor_Base));
- if (ProducedAlias) {
- if (dtorType == StructorType::Complete)
- return;
- if (dtor->isVirtual())
- CGM.getVTables().EmitThunks(GlobalDecl(dtor, Dtor_Complete));
- }
- }
+ // Emit the base destructor if the base and complete (vbase) destructors are
+ // equivalent. This effectively implements -mconstructor-aliases as part of
+ // the ABI.
+ if (dtorType == StructorType::Complete &&
+ dtor->getParent()->getNumVBases() == 0)
+ dtorType = StructorType::Base;
// The base destructor is equivalent to the base destructor of its
// base class if there is exactly one non-virtual base class with a
@@ -3898,7 +3904,7 @@ MicrosoftCXXABI::getAddrOfCXXCtorClosure(const CXXConstructorDecl *CD,
SourceLocation(),
&getContext().Idents.get("is_most_derived"),
getContext().IntTy, ImplicitParamDecl::Other);
- // Only add the parameter to the list if thie class has virtual bases.
+ // Only add the parameter to the list if the class has virtual bases.
if (RD->getNumVBases() > 0)
FunctionArgs.push_back(&IsMostDerived);
diff --git a/lib/CodeGen/ObjectFilePCHContainerOperations.cpp b/lib/CodeGen/ObjectFilePCHContainerOperations.cpp
index d0760b9cc2a6..c164cec5d942 100644
--- a/lib/CodeGen/ObjectFilePCHContainerOperations.cpp
+++ b/lib/CodeGen/ObjectFilePCHContainerOperations.cpp
@@ -71,9 +71,8 @@ class PCHContainerGenerator : public ASTConsumer {
}
bool VisitImportDecl(ImportDecl *D) {
- auto *Import = cast<ImportDecl>(D);
- if (!Import->getImportedOwningModule())
- DI.EmitImportDecl(*Import);
+ if (!D->getImportedOwningModule())
+ DI.EmitImportDecl(*D);
return true;
}
@@ -229,6 +228,11 @@ public:
Builder->getModuleDebugInfo()->completeRequiredType(RD);
}
+ void HandleImplicitImportDecl(ImportDecl *D) override {
+ if (!D->getImportedOwningModule())
+ Builder->getModuleDebugInfo()->EmitImportDecl(*D);
+ }
+
/// Emit a container holding the serialized AST.
void HandleTranslationUnit(ASTContext &Ctx) override {
assert(M && VMContext && Builder);
@@ -286,7 +290,7 @@ public:
else
ASTSym->setSection("__clangast");
- DEBUG({
+ LLVM_DEBUG({
// Print the IR for the PCH container to the debug output.
llvm::SmallString<0> Buffer;
clang::EmitBackendOutput(
diff --git a/lib/CodeGen/SanitizerMetadata.cpp b/lib/CodeGen/SanitizerMetadata.cpp
index f891cfbe4bb2..23cf9e490828 100644
--- a/lib/CodeGen/SanitizerMetadata.cpp
+++ b/lib/CodeGen/SanitizerMetadata.cpp
@@ -27,7 +27,8 @@ void SanitizerMetadata::reportGlobalToASan(llvm::GlobalVariable *GV,
bool IsBlacklisted) {
if (!CGM.getLangOpts().Sanitize.hasOneOf(SanitizerKind::Address |
SanitizerKind::KernelAddress |
- SanitizerKind::HWAddress))
+ SanitizerKind::HWAddress |
+ SanitizerKind::KernelHWAddress))
return;
IsDynInit &= !CGM.isInSanitizerBlacklist(GV, Loc, Ty, "init");
IsBlacklisted |= CGM.isInSanitizerBlacklist(GV, Loc, Ty);
@@ -60,7 +61,8 @@ void SanitizerMetadata::reportGlobalToASan(llvm::GlobalVariable *GV,
const VarDecl &D, bool IsDynInit) {
if (!CGM.getLangOpts().Sanitize.hasOneOf(SanitizerKind::Address |
SanitizerKind::KernelAddress |
- SanitizerKind::HWAddress))
+ SanitizerKind::HWAddress |
+ SanitizerKind::KernelHWAddress))
return;
std::string QualName;
llvm::raw_string_ostream OS(QualName);
@@ -79,7 +81,8 @@ void SanitizerMetadata::disableSanitizerForGlobal(llvm::GlobalVariable *GV) {
// instrumentation.
if (CGM.getLangOpts().Sanitize.hasOneOf(SanitizerKind::Address |
SanitizerKind::KernelAddress |
- SanitizerKind::HWAddress))
+ SanitizerKind::HWAddress |
+ SanitizerKind::KernelHWAddress))
reportGlobalToASan(GV, SourceLocation(), "", QualType(), false, true);
}
diff --git a/lib/CodeGen/SwiftCallingConv.cpp b/lib/CodeGen/SwiftCallingConv.cpp
index fc8e36d2c599..3673a5597eac 100644
--- a/lib/CodeGen/SwiftCallingConv.cpp
+++ b/lib/CodeGen/SwiftCallingConv.cpp
@@ -579,11 +579,9 @@ bool SwiftAggLowering::shouldPassIndirectly(bool asReturnValue) const {
// Empty types don't need to be passed indirectly.
if (Entries.empty()) return false;
- CharUnits totalSize = Entries.back().End;
-
// Avoid copying the array of types when there's just a single element.
if (Entries.size() == 1) {
- return getSwiftABIInfo(CGM).shouldPassIndirectlyForSwift(totalSize,
+ return getSwiftABIInfo(CGM).shouldPassIndirectlyForSwift(
Entries.back().Type,
asReturnValue);
}
@@ -593,8 +591,14 @@ bool SwiftAggLowering::shouldPassIndirectly(bool asReturnValue) const {
for (auto &entry : Entries) {
componentTys.push_back(entry.Type);
}
- return getSwiftABIInfo(CGM).shouldPassIndirectlyForSwift(totalSize,
- componentTys,
+ return getSwiftABIInfo(CGM).shouldPassIndirectlyForSwift(componentTys,
+ asReturnValue);
+}
+
+bool swiftcall::shouldPassIndirectly(CodeGenModule &CGM,
+ ArrayRef<llvm::Type*> componentTys,
+ bool asReturnValue) {
+ return getSwiftABIInfo(CGM).shouldPassIndirectlyForSwift(componentTys,
asReturnValue);
}
@@ -736,24 +740,12 @@ void swiftcall::legalizeVectorType(CodeGenModule &CGM, CharUnits origVectorSize,
components.append(numElts, eltTy);
}
-bool swiftcall::shouldPassCXXRecordIndirectly(CodeGenModule &CGM,
- const CXXRecordDecl *record) {
- // Following a recommendation from Richard Smith, pass a C++ type
- // indirectly only if the destructor is non-trivial or *all* of the
- // copy/move constructors are deleted or non-trivial.
-
- if (record->hasNonTrivialDestructor())
- return true;
-
- // It would be nice if this were summarized on the CXXRecordDecl.
- for (auto ctor : record->ctors()) {
- if (ctor->isCopyOrMoveConstructor() && !ctor->isDeleted() &&
- ctor->isTrivial()) {
- return false;
- }
- }
-
- return true;
+bool swiftcall::mustPassRecordIndirectly(CodeGenModule &CGM,
+ const RecordDecl *record) {
+ // FIXME: should we not rely on the standard computation in Sema, just in
+ // case we want to diverge from the platform ABI (e.g. on targets where
+ // that uses the MSVC rule)?
+ return !record->canPassInRegisters();
}
static ABIArgInfo classifyExpandedType(SwiftAggLowering &lowering,
@@ -775,10 +767,8 @@ static ABIArgInfo classifyType(CodeGenModule &CGM, CanQualType type,
auto record = recordType->getDecl();
auto &layout = CGM.getContext().getASTRecordLayout(record);
- if (auto cxxRecord = dyn_cast<CXXRecordDecl>(record)) {
- if (shouldPassCXXRecordIndirectly(CGM, cxxRecord))
- return ABIArgInfo::getIndirect(layout.getAlignment(), /*byval*/ false);
- }
+ if (mustPassRecordIndirectly(CGM, record))
+ return ABIArgInfo::getIndirect(layout.getAlignment(), /*byval*/ false);
SwiftAggLowering lowering(CGM);
lowering.addTypedData(recordType->getDecl(), CharUnits::Zero(), layout);
diff --git a/lib/CodeGen/TargetInfo.cpp b/lib/CodeGen/TargetInfo.cpp
index 4b8006428f8f..fa9b0a27af28 100644
--- a/lib/CodeGen/TargetInfo.cpp
+++ b/lib/CodeGen/TargetInfo.cpp
@@ -140,8 +140,11 @@ bool SwiftABIInfo::isLegalVectorTypeForSwift(CharUnits vectorSize,
static CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT,
CGCXXABI &CXXABI) {
const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
- if (!RD)
+ if (!RD) {
+ if (!RT->getDecl()->canPassInRegisters())
+ return CGCXXABI::RAA_Indirect;
return CGCXXABI::RAA_Default;
+ }
return CXXABI.getRecordArgABI(RD);
}
@@ -153,6 +156,20 @@ static CGCXXABI::RecordArgABI getRecordArgABI(QualType T,
return getRecordArgABI(RT, CXXABI);
}
+static bool classifyReturnType(const CGCXXABI &CXXABI, CGFunctionInfo &FI,
+ const ABIInfo &Info) {
+ QualType Ty = FI.getReturnType();
+
+ if (const auto *RT = Ty->getAs<RecordType>())
+ if (!isa<CXXRecordDecl>(RT->getDecl()) &&
+ !RT->getDecl()->canPassInRegisters()) {
+ FI.getReturnInfo() = Info.getNaturalAlignIndirect(Ty);
+ return true;
+ }
+
+ return CXXABI.classifyReturnType(FI);
+}
+
/// Pass transparent unions as if they were the type of the first element. Sema
/// should ensure that all elements of the union have the same "machine type".
static QualType useFirstFieldIfTransparentUnion(QualType Ty) {
@@ -201,10 +218,6 @@ bool ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
return false;
}
-bool ABIInfo::shouldSignExtUnsignedType(QualType Ty) const {
- return false;
-}
-
LLVM_DUMP_METHOD void ABIArgInfo::dump() const {
raw_ostream &OS = llvm::errs();
OS << "(ABIArgInfo Kind=";
@@ -682,8 +695,8 @@ ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const {
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
Ty = EnumTy->getDecl()->getIntegerType();
- return (Ty->isPromotableIntegerType() ?
- ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+ return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend(Ty)
+ : ABIArgInfo::getDirect());
}
ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const {
@@ -697,8 +710,8 @@ ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const {
if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
RetTy = EnumTy->getDecl()->getIntegerType();
- return (RetTy->isPromotableIntegerType() ?
- ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+ return (RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend(RetTy)
+ : ABIArgInfo::getDirect());
}
//===----------------------------------------------------------------------===//
@@ -734,9 +747,18 @@ class WebAssemblyTargetCodeGenInfo final : public TargetCodeGenInfo {
public:
explicit WebAssemblyTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
: TargetCodeGenInfo(new WebAssemblyABIInfo(CGT)) {}
+
+ void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &CGM) const override {
+ if (auto *FD = dyn_cast_or_null<FunctionDecl>(D)) {
+ llvm::Function *Fn = cast<llvm::Function>(GV);
+ if (!FD->doesThisDeclarationHaveABody() && !FD->hasPrototype())
+ Fn->addFnAttr("no-prototype");
+ }
+ }
};
-/// \brief Classify argument of given type \p Ty.
+/// Classify argument of given type \p Ty.
ABIArgInfo WebAssemblyABIInfo::classifyArgumentType(QualType Ty) const {
Ty = useFirstFieldIfTransparentUnion(Ty);
@@ -831,7 +853,7 @@ Address PNaClABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
return EmitVAArgInstr(CGF, VAListAddr, Ty, ABIArgInfo::getDirect());
}
-/// \brief Classify argument of given type \p Ty.
+/// Classify argument of given type \p Ty.
ABIArgInfo PNaClABIInfo::classifyArgumentType(QualType Ty) const {
if (isAggregateTypeForABI(Ty)) {
if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
@@ -845,8 +867,8 @@ ABIArgInfo PNaClABIInfo::classifyArgumentType(QualType Ty) const {
return ABIArgInfo::getDirect();
}
- return (Ty->isPromotableIntegerType() ?
- ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+ return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend(Ty)
+ : ABIArgInfo::getDirect());
}
ABIArgInfo PNaClABIInfo::classifyReturnType(QualType RetTy) const {
@@ -861,8 +883,8 @@ ABIArgInfo PNaClABIInfo::classifyReturnType(QualType RetTy) const {
if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
RetTy = EnumTy->getDecl()->getIntegerType();
- return (RetTy->isPromotableIntegerType() ?
- ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+ return (RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend(RetTy)
+ : ABIArgInfo::getDirect());
}
/// IsX86_MMXType - Return true if this is an MMX type.
@@ -932,7 +954,7 @@ static ABIArgInfo getDirectX86Hva(llvm::Type* T = nullptr) {
// X86-32 ABI Implementation
//===----------------------------------------------------------------------===//
-/// \brief Similar to llvm::CCState, but for Clang.
+/// Similar to llvm::CCState, but for Clang.
struct CCState {
CCState(unsigned CC) : CC(CC), FreeRegs(0), FreeSSERegs(0) {}
@@ -985,14 +1007,14 @@ class X86_32ABIInfo : public SwiftABIInfo {
ABIArgInfo getIndirectReturnResult(QualType Ty, CCState &State) const;
- /// \brief Return the alignment to use for the given type on the stack.
+ /// Return the alignment to use for the given type on the stack.
unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const;
Class classify(QualType Ty) const;
ABIArgInfo classifyReturnType(QualType RetTy, CCState &State) const;
ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State) const;
- /// \brief Updates the number of available free registers, returns
+ /// Updates the number of available free registers, returns
/// true if any registers were allocated.
bool updateFreeRegs(QualType Ty, CCState &State) const;
@@ -1002,7 +1024,7 @@ class X86_32ABIInfo : public SwiftABIInfo {
bool canExpandIndirectArgument(QualType Ty) const;
- /// \brief Rewrite the function info so that all memory arguments use
+ /// Rewrite the function info so that all memory arguments use
/// inalloca.
void rewriteWithInAlloca(CGFunctionInfo &FI) const;
@@ -1028,8 +1050,7 @@ public:
IsMCUABI(CGT.getTarget().getTriple().isOSIAMCU()),
DefaultNumRegisterParameters(NumRegisterParameters) {}
- bool shouldPassIndirectlyForSwift(CharUnits totalSize,
- ArrayRef<llvm::Type*> scalars,
+ bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
bool asReturnValue) const override {
// LLVM's x86-32 lowering currently only assigns up to three
// integer registers and three fp registers. Oddly, it'll use up to
@@ -1057,8 +1078,7 @@ public:
const llvm::Triple &Triple, const CodeGenOptions &Opts);
void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
- CodeGen::CodeGenModule &CGM,
- ForDefinition_t IsForDefinition) const override;
+ CodeGen::CodeGenModule &CGM) const override;
int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
// Darwin uses different dwarf register numbers for EH.
@@ -1404,8 +1424,8 @@ ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
RetTy = EnumTy->getDecl()->getIntegerType();
- return (RetTy->isPromotableIntegerType() ?
- ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+ return (RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend(RetTy)
+ : ABIArgInfo::getDirect());
}
static bool isSSEVectorType(ASTContext &Context, QualType Ty) {
@@ -1677,8 +1697,8 @@ ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
if (Ty->isPromotableIntegerType()) {
if (InReg)
- return ABIArgInfo::getExtendInReg();
- return ABIArgInfo::getExtend();
+ return ABIArgInfo::getExtendInReg(Ty);
+ return ABIArgInfo::getExtend(Ty);
}
if (InReg)
@@ -1755,7 +1775,7 @@ void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const {
} else
State.FreeRegs = DefaultNumRegisterParameters;
- if (!getCXXABI().classifyReturnType(FI)) {
+ if (!::classifyReturnType(getCXXABI(), FI, *this)) {
FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), State);
} else if (FI.getReturnInfo().isIndirect()) {
// The C++ ABI is not aware of register usage, so we have to check if the
@@ -1925,19 +1945,13 @@ bool X86_32TargetCodeGenInfo::isStructReturnInRegABI(
}
void X86_32TargetCodeGenInfo::setTargetAttributes(
- const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM,
- ForDefinition_t IsForDefinition) const {
- if (!IsForDefinition)
+ const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
+ if (GV->isDeclaration())
return;
if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
- // Get the LLVM function.
llvm::Function *Fn = cast<llvm::Function>(GV);
-
- // Now add the 'alignstack' attribute with a value of 16.
- llvm::AttrBuilder B;
- B.addStackAlignmentAttr(16);
- Fn->addAttributes(llvm::AttributeList::FunctionIndex, B);
+ Fn->addFnAttr("stackrealign");
}
if (FD->hasAttr<AnyX86InterruptAttr>()) {
llvm::Function *Fn = cast<llvm::Function>(GV);
@@ -2121,8 +2135,8 @@ class X86_64ABIInfo : public SwiftABIInfo {
/// classify it as INTEGER (for compatibility with older clang compilers).
bool classifyIntegerMMXAsSSE() const {
// Clang <= 3.8 did not do this.
- if (getCodeGenOpts().getClangABICompat() <=
- CodeGenOptions::ClangABI::Ver3_8)
+ if (getContext().getLangOpts().getClangABICompat() <=
+ LangOptions::ClangABI::Ver3_8)
return false;
const llvm::Triple &Triple = getTarget().getTriple();
@@ -2168,8 +2182,7 @@ public:
return Has64BitPointers;
}
- bool shouldPassIndirectlyForSwift(CharUnits totalSize,
- ArrayRef<llvm::Type*> scalars,
+ bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
bool asReturnValue) const override {
return occupiesMoreThan(CGT, scalars, /*total*/ 4);
}
@@ -2201,8 +2214,7 @@ public:
return isX86VectorCallAggregateSmallEnough(NumMembers);
}
- bool shouldPassIndirectlyForSwift(CharUnits totalSize,
- ArrayRef<llvm::Type *> scalars,
+ bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type *> scalars,
bool asReturnValue) const override {
return occupiesMoreThan(CGT, scalars, /*total*/ 4);
}
@@ -2286,19 +2298,13 @@ public:
}
void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
- CodeGen::CodeGenModule &CGM,
- ForDefinition_t IsForDefinition) const override {
- if (!IsForDefinition)
+ CodeGen::CodeGenModule &CGM) const override {
+ if (GV->isDeclaration())
return;
if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
- // Get the LLVM function.
- auto *Fn = cast<llvm::Function>(GV);
-
- // Now add the 'alignstack' attribute with a value of 16.
- llvm::AttrBuilder B;
- B.addStackAlignmentAttr(16);
- Fn->addAttributes(llvm::AttributeList::FunctionIndex, B);
+ llvm::Function *Fn = cast<llvm::Function>(GV);
+ Fn->addFnAttr("stackrealign");
}
if (FD->hasAttr<AnyX86InterruptAttr>()) {
llvm::Function *Fn = cast<llvm::Function>(GV);
@@ -2346,8 +2352,7 @@ public:
Win32StructABI, NumRegisterParameters, false) {}
void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
- CodeGen::CodeGenModule &CGM,
- ForDefinition_t IsForDefinition) const override;
+ CodeGen::CodeGenModule &CGM) const override;
void getDependentLibraryOption(llvm::StringRef Lib,
llvm::SmallString<24> &Opt) const override {
@@ -2362,26 +2367,24 @@ public:
}
};
-static void addStackProbeSizeTargetAttribute(const Decl *D,
- llvm::GlobalValue *GV,
- CodeGen::CodeGenModule &CGM) {
- if (D && isa<FunctionDecl>(D)) {
- if (CGM.getCodeGenOpts().StackProbeSize != 4096) {
- llvm::Function *Fn = cast<llvm::Function>(GV);
+static void addStackProbeTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &CGM) {
+ if (llvm::Function *Fn = dyn_cast_or_null<llvm::Function>(GV)) {
+ if (CGM.getCodeGenOpts().StackProbeSize != 4096)
Fn->addFnAttr("stack-probe-size",
llvm::utostr(CGM.getCodeGenOpts().StackProbeSize));
- }
+ if (CGM.getCodeGenOpts().NoStackArgProbe)
+ Fn->addFnAttr("no-stack-arg-probe");
}
}
void WinX86_32TargetCodeGenInfo::setTargetAttributes(
- const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM,
- ForDefinition_t IsForDefinition) const {
- X86_32TargetCodeGenInfo::setTargetAttributes(D, GV, CGM, IsForDefinition);
- if (!IsForDefinition)
+ const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
+ X86_32TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
+ if (GV->isDeclaration())
return;
- addStackProbeSizeTargetAttribute(D, GV, CGM);
+ addStackProbeTargetAttributes(D, GV, CGM);
}
class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo {
@@ -2391,8 +2394,7 @@ public:
: TargetCodeGenInfo(new WinX86_64ABIInfo(CGT)) {}
void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
- CodeGen::CodeGenModule &CGM,
- ForDefinition_t IsForDefinition) const override;
+ CodeGen::CodeGenModule &CGM) const override;
int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
return 7;
@@ -2422,20 +2424,14 @@ public:
};
void WinX86_64TargetCodeGenInfo::setTargetAttributes(
- const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM,
- ForDefinition_t IsForDefinition) const {
- TargetCodeGenInfo::setTargetAttributes(D, GV, CGM, IsForDefinition);
- if (!IsForDefinition)
+ const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
+ TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
+ if (GV->isDeclaration())
return;
if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
- // Get the LLVM function.
- auto *Fn = cast<llvm::Function>(GV);
-
- // Now add the 'alignstack' attribute with a value of 16.
- llvm::AttrBuilder B;
- B.addStackAlignmentAttr(16);
- Fn->addAttributes(llvm::AttributeList::FunctionIndex, B);
+ llvm::Function *Fn = cast<llvm::Function>(GV);
+ Fn->addFnAttr("stackrealign");
}
if (FD->hasAttr<AnyX86InterruptAttr>()) {
llvm::Function *Fn = cast<llvm::Function>(GV);
@@ -2443,7 +2439,7 @@ void WinX86_64TargetCodeGenInfo::setTargetAttributes(
}
}
- addStackProbeSizeTargetAttribute(D, GV, CGM);
+ addStackProbeTargetAttributes(D, GV, CGM);
}
}
@@ -2868,8 +2864,8 @@ ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const {
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
Ty = EnumTy->getDecl()->getIntegerType();
- return (Ty->isPromotableIntegerType() ?
- ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+ return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend(Ty)
+ : ABIArgInfo::getDirect());
}
return getNaturalAlignIndirect(Ty);
@@ -2901,8 +2897,8 @@ ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty,
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
Ty = EnumTy->getDecl()->getIntegerType();
- return (Ty->isPromotableIntegerType() ?
- ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+ return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend(Ty)
+ : ABIArgInfo::getDirect());
}
if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
@@ -3271,7 +3267,7 @@ classifyReturnType(QualType RetTy) const {
if (RetTy->isIntegralOrEnumerationType() &&
RetTy->isPromotableIntegerType())
- return ABIArgInfo::getExtend();
+ return ABIArgInfo::getExtend(RetTy);
}
break;
@@ -3416,7 +3412,7 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(
if (Ty->isIntegralOrEnumerationType() &&
Ty->isPromotableIntegerType())
- return ABIArgInfo::getExtend();
+ return ABIArgInfo::getExtend(Ty);
}
break;
@@ -3543,14 +3539,24 @@ ABIArgInfo X86_64ABIInfo::classifyRegCallStructType(QualType Ty,
void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
- bool IsRegCall = FI.getCallingConvention() == llvm::CallingConv::X86_RegCall;
+ const unsigned CallingConv = FI.getCallingConvention();
+ // It is possible to force Win64 calling convention on any x86_64 target by
+ // using __attribute__((ms_abi)). In such case to correctly emit Win64
+ // compatible code delegate this call to WinX86_64ABIInfo::computeInfo.
+ if (CallingConv == llvm::CallingConv::Win64) {
+ WinX86_64ABIInfo Win64ABIInfo(CGT);
+ Win64ABIInfo.computeInfo(FI);
+ return;
+ }
+
+ bool IsRegCall = CallingConv == llvm::CallingConv::X86_RegCall;
// Keep track of the number of assigned registers.
unsigned FreeIntRegs = IsRegCall ? 11 : 6;
unsigned FreeSSERegs = IsRegCall ? 16 : 8;
unsigned NeededInt, NeededSSE;
- if (!getCXXABI().classifyReturnType(FI)) {
+ if (!::classifyReturnType(getCXXABI(), FI, *this)) {
if (IsRegCall && FI.getReturnType()->getTypePtr()->isRecordType() &&
!FI.getReturnType()->getTypePtr()->isUnionType()) {
FI.getReturnInfo() =
@@ -3797,17 +3803,18 @@ Address X86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
Address RegAddrHi =
CGF.Builder.CreateConstInBoundsByteGEP(RegAddrLo,
CharUnits::fromQuantity(16));
- llvm::Type *DoubleTy = CGF.DoubleTy;
- llvm::StructType *ST = llvm::StructType::get(DoubleTy, DoubleTy);
+ llvm::Type *ST = AI.canHaveCoerceToType()
+ ? AI.getCoerceToType()
+ : llvm::StructType::get(CGF.DoubleTy, CGF.DoubleTy);
llvm::Value *V;
Address Tmp = CGF.CreateMemTemp(Ty);
Tmp = CGF.Builder.CreateElementBitCast(Tmp, ST);
- V = CGF.Builder.CreateLoad(
- CGF.Builder.CreateElementBitCast(RegAddrLo, DoubleTy));
+ V = CGF.Builder.CreateLoad(CGF.Builder.CreateElementBitCast(
+ RegAddrLo, ST->getStructElementType(0)));
CGF.Builder.CreateStore(V,
CGF.Builder.CreateStructGEP(Tmp, 0, CharUnits::Zero()));
- V = CGF.Builder.CreateLoad(
- CGF.Builder.CreateElementBitCast(RegAddrHi, DoubleTy));
+ V = CGF.Builder.CreateLoad(CGF.Builder.CreateElementBitCast(
+ RegAddrHi, ST->getStructElementType(1)));
CGF.Builder.CreateStore(V,
CGF.Builder.CreateStructGEP(Tmp, 1, CharUnits::fromQuantity(8)));
@@ -3941,7 +3948,7 @@ ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, unsigned &FreeSSERegs,
// extended.
const BuiltinType *BT = Ty->getAs<BuiltinType>();
if (BT && BT->getKind() == BuiltinType::Bool)
- return ABIArgInfo::getExtend();
+ return ABIArgInfo::getExtend(Ty);
// Mingw64 GCC uses the old 80 bit extended precision floating point unit. It
// passes them indirectly through memory.
@@ -4289,7 +4296,7 @@ PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
namespace {
/// PPC64_SVR4_ABIInfo - The 64-bit PowerPC ELF (SVR4) ABI information.
-class PPC64_SVR4_ABIInfo : public ABIInfo {
+class PPC64_SVR4_ABIInfo : public SwiftABIInfo {
public:
enum ABIKind {
ELFv1 = 0,
@@ -4333,7 +4340,7 @@ private:
public:
PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, ABIKind Kind, bool HasQPX,
bool SoftFloatABI)
- : ABIInfo(CGT), Kind(Kind), HasQPX(HasQPX),
+ : SwiftABIInfo(CGT), Kind(Kind), HasQPX(HasQPX),
IsSoftFloatABI(SoftFloatABI) {}
bool isPromotableTypeForABI(QualType Ty) const;
@@ -4376,6 +4383,15 @@ public:
Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
QualType Ty) const override;
+
+ bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
+ bool asReturnValue) const override {
+ return occupiesMoreThan(CGT, scalars, /*total*/ 4);
+ }
+
+ bool isSwiftErrorInRegister() const override {
+ return false;
+ }
};
class PPC64_SVR4_TargetCodeGenInfo : public TargetCodeGenInfo {
@@ -4543,7 +4559,7 @@ bool ABIInfo::isHomogeneousAggregate(QualType Ty, const Type *&Base,
// For compatibility with GCC, ignore empty bitfields in C++ mode.
if (getContext().getLangOpts().CPlusPlus &&
- FD->isBitField() && FD->getBitWidthValue(getContext()) == 0)
+ FD->isZeroLengthBitField(getContext()))
continue;
uint64_t FldMembers;
@@ -4603,7 +4619,9 @@ bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
if (BT->getKind() == BuiltinType::Float ||
BT->getKind() == BuiltinType::Double ||
- BT->getKind() == BuiltinType::LongDouble) {
+ BT->getKind() == BuiltinType::LongDouble ||
+ (getContext().getTargetInfo().hasFloat128Type() &&
+ (BT->getKind() == BuiltinType::Float128))) {
if (IsSoftFloatABI)
return false;
return true;
@@ -4618,10 +4636,13 @@ bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateSmallEnough(
const Type *Base, uint64_t Members) const {
- // Vector types require one register, floating point types require one
- // or two registers depending on their size.
+ // Vector and fp128 types require one register, other floating point types
+ // require one or two registers depending on their size.
uint32_t NumRegs =
- Base->isVectorType() ? 1 : (getContext().getTypeSize(Base) + 63) / 64;
+ ((getContext().getTargetInfo().hasFloat128Type() &&
+ Base->isFloat128Type()) ||
+ Base->isVectorType()) ? 1
+ : (getContext().getTypeSize(Base) + 63) / 64;
// Homogeneous Aggregates may occupy at most 8 registers.
return Members * NumRegs <= 8;
@@ -4694,8 +4715,8 @@ PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const {
/*Realign=*/TyAlign > ABIAlign);
}
- return (isPromotableTypeForABI(Ty) ?
- ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+ return (isPromotableTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
+ : ABIArgInfo::getDirect());
}
ABIArgInfo
@@ -4749,8 +4770,8 @@ PPC64_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const {
return getNaturalAlignIndirect(RetTy);
}
- return (isPromotableTypeForABI(RetTy) ?
- ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+ return (isPromotableTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
+ : ABIArgInfo::getDirect());
}
// Based on ARMABIInfo::EmitVAArg, adjusted for 64-bit machine.
@@ -4899,7 +4920,7 @@ private:
bool isIllegalVectorType(QualType Ty) const;
void computeInfo(CGFunctionInfo &FI) const override {
- if (!getCXXABI().classifyReturnType(FI))
+ if (!::classifyReturnType(getCXXABI(), FI, *this))
FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
for (auto &it : FI.arguments())
@@ -4922,8 +4943,7 @@ private:
Address EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
QualType Ty) const override;
- bool shouldPassIndirectlyForSwift(CharUnits totalSize,
- ArrayRef<llvm::Type*> scalars,
+ bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
bool asReturnValue) const override {
return occupiesMoreThan(CGT, scalars, /*total*/ 4);
}
@@ -5002,7 +5022,7 @@ ABIArgInfo AArch64ABIInfo::classifyArgumentType(QualType Ty) const {
Ty = EnumTy->getDecl()->getIntegerType();
return (Ty->isPromotableIntegerType() && isDarwinPCS()
- ? ABIArgInfo::getExtend()
+ ? ABIArgInfo::getExtend(Ty)
: ABIArgInfo::getDirect());
}
@@ -5072,7 +5092,7 @@ ABIArgInfo AArch64ABIInfo::classifyReturnType(QualType RetTy) const {
RetTy = EnumTy->getDecl()->getIntegerType();
return (RetTy->isPromotableIntegerType() && isDarwinPCS()
- ? ABIArgInfo::getExtend()
+ ? ABIArgInfo::getExtend(RetTy)
: ABIArgInfo::getDirect());
}
@@ -5521,8 +5541,7 @@ private:
llvm::CallingConv::ID getABIDefaultCC() const;
void setCCs();
- bool shouldPassIndirectlyForSwift(CharUnits totalSize,
- ArrayRef<llvm::Type*> scalars,
+ bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
bool asReturnValue) const override {
return occupiesMoreThan(CGT, scalars, /*total*/ 4);
}
@@ -5565,9 +5584,8 @@ public:
}
void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
- CodeGen::CodeGenModule &CGM,
- ForDefinition_t IsForDefinition) const override {
- if (!IsForDefinition)
+ CodeGen::CodeGenModule &CGM) const override {
+ if (GV->isDeclaration())
return;
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
if (!FD)
@@ -5610,8 +5628,7 @@ public:
: ARMTargetCodeGenInfo(CGT, K) {}
void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
- CodeGen::CodeGenModule &CGM,
- ForDefinition_t IsForDefinition) const override;
+ CodeGen::CodeGenModule &CGM) const override;
void getDependentLibraryOption(llvm::StringRef Lib,
llvm::SmallString<24> &Opt) const override {
@@ -5625,17 +5642,16 @@ public:
};
void WindowsARMTargetCodeGenInfo::setTargetAttributes(
- const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM,
- ForDefinition_t IsForDefinition) const {
- ARMTargetCodeGenInfo::setTargetAttributes(D, GV, CGM, IsForDefinition);
- if (!IsForDefinition)
+ const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
+ ARMTargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
+ if (GV->isDeclaration())
return;
- addStackProbeSizeTargetAttribute(D, GV, CGM);
+ addStackProbeTargetAttributes(D, GV, CGM);
}
}
void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const {
- if (!getCXXABI().classifyReturnType(FI))
+ if (!::classifyReturnType(getCXXABI(), FI, *this))
FI.getReturnInfo() =
classifyReturnType(FI.getReturnType(), FI.isVariadic());
@@ -5682,18 +5698,6 @@ void ARMABIInfo::setCCs() {
llvm::CallingConv::ID abiCC = getABIDefaultCC();
if (abiCC != getLLVMDefaultCC())
RuntimeCC = abiCC;
-
- // AAPCS apparently requires runtime support functions to be soft-float, but
- // that's almost certainly for historic reasons (Thumb1 not supporting VFP
- // most likely). It's more convenient for AAPCS16_VFP to be hard-float.
-
- // The Run-time ABI for the ARM Architecture section 4.1.2 requires
- // AEABI-complying FP helper functions to use the base AAPCS.
- // These AEABI functions are expanded in the ARM llvm backend, all the builtin
- // support functions emitted by clang such as the _Complex helpers follow the
- // abiCC.
- if (abiCC != getLLVMDefaultCC())
- BuiltinCC = abiCC;
}
ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty,
@@ -5730,10 +5734,11 @@ ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty,
return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
}
- // __fp16 gets passed as if it were an int or float, but with the top 16 bits
- // unspecified. This is not done for OpenCL as it handles the half type
- // natively, and does not need to interwork with AAPCS code.
- if (Ty->isHalfType() && !getContext().getLangOpts().NativeHalfArgsAndReturns) {
+ // _Float16 and __fp16 get passed as if it were an int or float, but with
+ // the top 16 bits unspecified. This is not done for OpenCL as it handles the
+ // half type natively, and does not need to interwork with AAPCS code.
+ if ((Ty->isFloat16Type() || Ty->isHalfType()) &&
+ !getContext().getLangOpts().NativeHalfArgsAndReturns) {
llvm::Type *ResType = IsEffectivelyAAPCS_VFP ?
llvm::Type::getFloatTy(getVMContext()) :
llvm::Type::getInt32Ty(getVMContext());
@@ -5746,7 +5751,7 @@ ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty,
Ty = EnumTy->getDecl()->getIntegerType();
}
- return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend()
+ return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend(Ty)
: ABIArgInfo::getDirect());
}
@@ -5928,10 +5933,11 @@ ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy,
return getNaturalAlignIndirect(RetTy);
}
- // __fp16 gets returned as if it were an int or float, but with the top 16
- // bits unspecified. This is not done for OpenCL as it handles the half type
- // natively, and does not need to interwork with AAPCS code.
- if (RetTy->isHalfType() && !getContext().getLangOpts().NativeHalfArgsAndReturns) {
+ // _Float16 and __fp16 get returned as if it were an int or float, but with
+ // the top 16 bits unspecified. This is not done for OpenCL as it handles the
+ // half type natively, and does not need to interwork with AAPCS code.
+ if ((RetTy->isFloat16Type() || RetTy->isHalfType()) &&
+ !getContext().getLangOpts().NativeHalfArgsAndReturns) {
llvm::Type *ResType = IsEffectivelyAAPCS_VFP ?
llvm::Type::getFloatTy(getVMContext()) :
llvm::Type::getInt32Ty(getVMContext());
@@ -5943,7 +5949,7 @@ ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy,
if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
RetTy = EnumTy->getDecl()->getIntegerType();
- return RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend()
+ return RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend(RetTy)
: ABIArgInfo::getDirect();
}
@@ -6155,8 +6161,8 @@ public:
: TargetCodeGenInfo(new NVPTXABIInfo(CGT)) {}
void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
- CodeGen::CodeGenModule &M,
- ForDefinition_t IsForDefinition) const override;
+ CodeGen::CodeGenModule &M) const override;
+ bool shouldEmitStaticExternCAliases() const override;
private:
// Adds a NamedMDNode with F, Name, and Operand as operands, and adds the
@@ -6176,8 +6182,8 @@ ABIArgInfo NVPTXABIInfo::classifyReturnType(QualType RetTy) const {
if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
RetTy = EnumTy->getDecl()->getIntegerType();
- return (RetTy->isPromotableIntegerType() ?
- ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+ return (RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend(RetTy)
+ : ABIArgInfo::getDirect());
}
ABIArgInfo NVPTXABIInfo::classifyArgumentType(QualType Ty) const {
@@ -6189,8 +6195,8 @@ ABIArgInfo NVPTXABIInfo::classifyArgumentType(QualType Ty) const {
if (isAggregateTypeForABI(Ty))
return getNaturalAlignIndirect(Ty, /* byval */ true);
- return (Ty->isPromotableIntegerType() ?
- ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+ return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend(Ty)
+ : ABIArgInfo::getDirect());
}
void NVPTXABIInfo::computeInfo(CGFunctionInfo &FI) const {
@@ -6212,9 +6218,8 @@ Address NVPTXABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
}
void NVPTXTargetCodeGenInfo::setTargetAttributes(
- const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M,
- ForDefinition_t IsForDefinition) const {
- if (!IsForDefinition)
+ const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
+ if (GV->isDeclaration())
return;
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
if (!FD) return;
@@ -6279,6 +6284,10 @@ void NVPTXTargetCodeGenInfo::addNVVMMetadata(llvm::Function *F, StringRef Name,
// Append metadata to nvvm.annotations
MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
}
+
+bool NVPTXTargetCodeGenInfo::shouldEmitStaticExternCAliases() const {
+ return false;
+}
}
//===----------------------------------------------------------------------===//
@@ -6313,8 +6322,7 @@ public:
Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
QualType Ty) const override;
- bool shouldPassIndirectlyForSwift(CharUnits totalSize,
- ArrayRef<llvm::Type*> scalars,
+ bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
bool asReturnValue) const override {
return occupiesMoreThan(CGT, scalars, /*total*/ 4);
}
@@ -6402,7 +6410,7 @@ QualType SystemZABIInfo::GetSingleElementType(QualType Ty) const {
// Unlike isSingleElementStruct(), empty structure and array fields
// do count. So do anonymous bitfields that aren't zero-sized.
if (getContext().getLangOpts().CPlusPlus &&
- FD->isBitField() && FD->getBitWidthValue(getContext()) == 0)
+ FD->isZeroLengthBitField(getContext()))
continue;
// Unlike isSingleElementStruct(), arrays do not count.
@@ -6586,8 +6594,8 @@ ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy) const {
return ABIArgInfo::getDirect();
if (isCompoundType(RetTy) || getContext().getTypeSize(RetTy) > 64)
return getNaturalAlignIndirect(RetTy);
- return (isPromotableIntegerType(RetTy) ?
- ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+ return (isPromotableIntegerType(RetTy) ? ABIArgInfo::getExtend(RetTy)
+ : ABIArgInfo::getDirect());
}
ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const {
@@ -6597,7 +6605,7 @@ ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const {
// Integers and enums are extended to full register width.
if (isPromotableIntegerType(Ty))
- return ABIArgInfo::getExtend();
+ return ABIArgInfo::getExtend(Ty);
// Handle vector types and vector-like structure types. Note that
// as opposed to float-like structure types, we do not allow any
@@ -6651,16 +6659,14 @@ public:
MSP430TargetCodeGenInfo(CodeGenTypes &CGT)
: TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
- CodeGen::CodeGenModule &M,
- ForDefinition_t IsForDefinition) const override;
+ CodeGen::CodeGenModule &M) const override;
};
}
void MSP430TargetCodeGenInfo::setTargetAttributes(
- const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M,
- ForDefinition_t IsForDefinition) const {
- if (!IsForDefinition)
+ const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
+ if (GV->isDeclaration())
return;
if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
if (const MSP430InterruptAttr *attr = FD->getAttr<MSP430InterruptAttr>()) {
@@ -6705,7 +6711,7 @@ public:
void computeInfo(CGFunctionInfo &FI) const override;
Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
QualType Ty) const override;
- bool shouldSignExtUnsignedType(QualType Ty) const override;
+ ABIArgInfo extendType(QualType Ty) const;
};
class MIPSTargetCodeGenInfo : public TargetCodeGenInfo {
@@ -6720,8 +6726,7 @@ public:
}
void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
- CodeGen::CodeGenModule &CGM,
- ForDefinition_t IsForDefinition) const override {
+ CodeGen::CodeGenModule &CGM) const override {
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
if (!FD) return;
llvm::Function *Fn = cast<llvm::Function>(GV);
@@ -6732,7 +6737,7 @@ public:
Fn->addFnAttr("short-call");
// Other attributes do not have a meaning for declarations.
- if (!IsForDefinition)
+ if (GV->isDeclaration())
return;
if (FD->hasAttr<Mips16Attr>()) {
@@ -6898,7 +6903,7 @@ MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const {
// All integral types are promoted to the GPR width.
if (Ty->isIntegralOrEnumerationType())
- return ABIArgInfo::getExtend();
+ return extendType(Ty);
return ABIArgInfo::getDirect(
nullptr, 0, IsO32 ? nullptr : getPaddingType(OrigOffset, CurrOffset));
@@ -6980,8 +6985,8 @@ ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const {
if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
RetTy = EnumTy->getDecl()->getIntegerType();
- return (RetTy->isPromotableIntegerType() ?
- ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+ return (RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend(RetTy)
+ : ABIArgInfo::getDirect());
}
void MipsABIInfo::computeInfo(CGFunctionInfo &FI) const {
@@ -7047,14 +7052,14 @@ Address MipsABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
return Addr;
}
-bool MipsABIInfo::shouldSignExtUnsignedType(QualType Ty) const {
+ABIArgInfo MipsABIInfo::extendType(QualType Ty) const {
int TySize = getContext().getTypeSize(Ty);
// MIPS64 ABI requires unsigned 32 bit integers to be sign extended.
if (Ty->isUnsignedIntegerOrEnumerationType() && TySize == 32)
- return true;
+ return ABIArgInfo::getSignExtend(Ty);
- return false;
+ return ABIArgInfo::getExtend(Ty);
}
bool
@@ -7096,9 +7101,8 @@ public:
: TargetCodeGenInfo(new DefaultABIInfo(CGT)) { }
void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
- CodeGen::CodeGenModule &CGM,
- ForDefinition_t IsForDefinition) const override {
- if (!IsForDefinition)
+ CodeGen::CodeGenModule &CGM) const override {
+ if (GV->isDeclaration())
return;
const auto *FD = dyn_cast_or_null<FunctionDecl>(D);
if (!FD) return;
@@ -7127,14 +7131,12 @@ public:
: DefaultTargetCodeGenInfo(CGT) {}
void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
- CodeGen::CodeGenModule &M,
- ForDefinition_t IsForDefinition) const override;
+ CodeGen::CodeGenModule &M) const override;
};
void TCETargetCodeGenInfo::setTargetAttributes(
- const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M,
- ForDefinition_t IsForDefinition) const {
- if (!IsForDefinition)
+ const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
+ if (GV->isDeclaration())
return;
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
if (!FD) return;
@@ -7227,8 +7229,8 @@ ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty) const {
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
Ty = EnumTy->getDecl()->getIntegerType();
- return (Ty->isPromotableIntegerType() ?
- ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+ return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend(Ty)
+ : ABIArgInfo::getDirect());
}
if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
@@ -7265,8 +7267,8 @@ ABIArgInfo HexagonABIInfo::classifyReturnType(QualType RetTy) const {
if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
RetTy = EnumTy->getDecl()->getIntegerType();
- return (RetTy->isPromotableIntegerType() ?
- ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+ return (RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend(RetTy)
+ : ABIArgInfo::getDirect());
}
if (isEmptyRecord(getContext(), RetTy, true))
@@ -7409,7 +7411,7 @@ ABIArgInfo LanaiABIInfo::classifyArgumentType(QualType Ty,
if (Ty->isPromotableIntegerType()) {
if (InReg)
return ABIArgInfo::getDirectInReg();
- return ABIArgInfo::getExtend();
+ return ABIArgInfo::getExtend(Ty);
}
if (InReg)
return ABIArgInfo::getDirectInReg();
@@ -7639,8 +7641,7 @@ public:
AMDGPUTargetCodeGenInfo(CodeGenTypes &CGT)
: TargetCodeGenInfo(new AMDGPUABIInfo(CGT)) {}
void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
- CodeGen::CodeGenModule &M,
- ForDefinition_t IsForDefinition) const override;
+ CodeGen::CodeGenModule &M) const override;
unsigned getOpenCLKernelCallingConv() const override;
llvm::Constant *getNullPointer(const CodeGen::CodeGenModule &CGM,
@@ -7658,13 +7659,14 @@ public:
createEnqueuedBlockKernel(CodeGenFunction &CGF,
llvm::Function *BlockInvokeFunc,
llvm::Value *BlockLiteral) const override;
+ bool shouldEmitStaticExternCAliases() const override;
+ void setCUDAKernelCallingConvention(const FunctionType *&FT) const override;
};
}
void AMDGPUTargetCodeGenInfo::setTargetAttributes(
- const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M,
- ForDefinition_t IsForDefinition) const {
- if (!IsForDefinition)
+ const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
+ if (GV->isDeclaration())
return;
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
if (!FD)
@@ -7674,6 +7676,11 @@ void AMDGPUTargetCodeGenInfo::setTargetAttributes(
const auto *ReqdWGS = M.getLangOpts().OpenCL ?
FD->getAttr<ReqdWorkGroupSizeAttr>() : nullptr;
+
+ if (M.getLangOpts().OpenCL && FD->hasAttr<OpenCLKernelAttr>() &&
+ (M.getTriple().getOS() == llvm::Triple::AMDHSA))
+ F->addFnAttr("amdgpu-implicitarg-num-bytes", "48");
+
const auto *FlatWGS = FD->getAttr<AMDGPUFlatWorkGroupSizeAttr>();
if (ReqdWGS || FlatWGS) {
unsigned Min = FlatWGS ? FlatWGS->getMin() : 0;
@@ -7785,6 +7792,16 @@ AMDGPUTargetCodeGenInfo::getLLVMSyncScopeID(SyncScope S,
return C.getOrInsertSyncScopeID(Name);
}
+bool AMDGPUTargetCodeGenInfo::shouldEmitStaticExternCAliases() const {
+ return false;
+}
+
+void AMDGPUTargetCodeGenInfo::setCUDAKernelCallingConvention(
+ const FunctionType *&FT) const {
+ FT = getABIInfo().getContext().adjustFunctionType(
+ FT, FT->getExtInfo().withCallingConv(CC_OpenCLKernel));
+}
+
//===----------------------------------------------------------------------===//
// SPARC v8 ABI Implementation.
// Based on the SPARC Compliance Definition version 2.4.1.
@@ -7991,7 +8008,7 @@ SparcV9ABIInfo::classifyType(QualType Ty, unsigned SizeLimit) const {
// Integer types smaller than a register are extended.
if (Size < 64 && Ty->isIntegerType())
- return ABIArgInfo::getExtend();
+ return ABIArgInfo::getExtend(Ty);
// Other non-aggregates go in registers.
if (!isAggregateTypeForABI(Ty))
@@ -8521,7 +8538,7 @@ static bool appendRecordType(SmallStringEnc &Enc, const RecordType *RT,
// The ABI requires unions to be sorted but not structures.
// See FieldEncoding::operator< for sort algorithm.
if (RT->isUnionType())
- std::sort(FE.begin(), FE.end());
+ llvm::sort(FE.begin(), FE.end());
// We can now complete the TypeString.
unsigned E = FE.size();
for (unsigned I = 0; I != E; ++I) {
@@ -8565,7 +8582,7 @@ static bool appendEnumType(SmallStringEnc &Enc, const EnumType *ET,
EnumEnc += '}';
FE.push_back(FieldEncoding(!I->getName().empty(), EnumEnc));
}
- std::sort(FE.begin(), FE.end());
+ llvm::sort(FE.begin(), FE.end());
unsigned E = FE.size();
for (unsigned I = 0; I != E; ++I) {
if (I)
@@ -8780,6 +8797,203 @@ static bool getTypeString(SmallStringEnc &Enc, const Decl *D,
return false;
}
+//===----------------------------------------------------------------------===//
+// RISCV ABI Implementation
+//===----------------------------------------------------------------------===//
+
+namespace {
+class RISCVABIInfo : public DefaultABIInfo {
+private:
+ unsigned XLen; // Size of the integer ('x') registers in bits.
+ static const int NumArgGPRs = 8;
+
+public:
+ RISCVABIInfo(CodeGen::CodeGenTypes &CGT, unsigned XLen)
+ : DefaultABIInfo(CGT), XLen(XLen) {}
+
+ // DefaultABIInfo's classifyReturnType and classifyArgumentType are
+ // non-virtual, but computeInfo is virtual, so we overload it.
+ void computeInfo(CGFunctionInfo &FI) const override;
+
+ ABIArgInfo classifyArgumentType(QualType Ty, bool IsFixed,
+ int &ArgGPRsLeft) const;
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
+
+ ABIArgInfo extendType(QualType Ty) const;
+};
+} // end anonymous namespace
+
+void RISCVABIInfo::computeInfo(CGFunctionInfo &FI) const {
+ QualType RetTy = FI.getReturnType();
+ if (!getCXXABI().classifyReturnType(FI))
+ FI.getReturnInfo() = classifyReturnType(RetTy);
+
+ // IsRetIndirect is true if classifyArgumentType indicated the value should
+ // be passed indirect or if the type size is greater than 2*xlen. e.g. fp128
+ // is passed direct in LLVM IR, relying on the backend lowering code to
+ // rewrite the argument list and pass indirectly on RV32.
+ bool IsRetIndirect = FI.getReturnInfo().getKind() == ABIArgInfo::Indirect ||
+ getContext().getTypeSize(RetTy) > (2 * XLen);
+
+ // We must track the number of GPRs used in order to conform to the RISC-V
+ // ABI, as integer scalars passed in registers should have signext/zeroext
+ // when promoted, but are anyext if passed on the stack. As GPR usage is
+ // different for variadic arguments, we must also track whether we are
+ // examining a vararg or not.
+ int ArgGPRsLeft = IsRetIndirect ? NumArgGPRs - 1 : NumArgGPRs;
+ int NumFixedArgs = FI.getNumRequiredArgs();
+
+ int ArgNum = 0;
+ for (auto &ArgInfo : FI.arguments()) {
+ bool IsFixed = ArgNum < NumFixedArgs;
+ ArgInfo.info = classifyArgumentType(ArgInfo.type, IsFixed, ArgGPRsLeft);
+ ArgNum++;
+ }
+}
+
+ABIArgInfo RISCVABIInfo::classifyArgumentType(QualType Ty, bool IsFixed,
+ int &ArgGPRsLeft) const {
+ assert(ArgGPRsLeft <= NumArgGPRs && "Arg GPR tracking underflow");
+ Ty = useFirstFieldIfTransparentUnion(Ty);
+
+ // Structures with either a non-trivial destructor or a non-trivial
+ // copy constructor are always passed indirectly.
+ if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
+ if (ArgGPRsLeft)
+ ArgGPRsLeft -= 1;
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/RAA ==
+ CGCXXABI::RAA_DirectInMemory);
+ }
+
+ // Ignore empty structs/unions.
+ if (isEmptyRecord(getContext(), Ty, true))
+ return ABIArgInfo::getIgnore();
+
+ uint64_t Size = getContext().getTypeSize(Ty);
+ uint64_t NeededAlign = getContext().getTypeAlign(Ty);
+ bool MustUseStack = false;
+ // Determine the number of GPRs needed to pass the current argument
+ // according to the ABI. 2*XLen-aligned varargs are passed in "aligned"
+ // register pairs, so may consume 3 registers.
+ int NeededArgGPRs = 1;
+ if (!IsFixed && NeededAlign == 2 * XLen)
+ NeededArgGPRs = 2 + (ArgGPRsLeft % 2);
+ else if (Size > XLen && Size <= 2 * XLen)
+ NeededArgGPRs = 2;
+
+ if (NeededArgGPRs > ArgGPRsLeft) {
+ MustUseStack = true;
+ NeededArgGPRs = ArgGPRsLeft;
+ }
+
+ ArgGPRsLeft -= NeededArgGPRs;
+
+ if (!isAggregateTypeForABI(Ty) && !Ty->isVectorType()) {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ // All integral types are promoted to XLen width, unless passed on the
+ // stack.
+ if (Size < XLen && Ty->isIntegralOrEnumerationType() && !MustUseStack) {
+ return extendType(Ty);
+ }
+
+ return ABIArgInfo::getDirect();
+ }
+
+ // Aggregates which are <= 2*XLen will be passed in registers if possible,
+ // so coerce to integers.
+ if (Size <= 2 * XLen) {
+ unsigned Alignment = getContext().getTypeAlign(Ty);
+
+ // Use a single XLen int if possible, 2*XLen if 2*XLen alignment is
+ // required, and a 2-element XLen array if only XLen alignment is required.
+ if (Size <= XLen) {
+ return ABIArgInfo::getDirect(
+ llvm::IntegerType::get(getVMContext(), XLen));
+ } else if (Alignment == 2 * XLen) {
+ return ABIArgInfo::getDirect(
+ llvm::IntegerType::get(getVMContext(), 2 * XLen));
+ } else {
+ return ABIArgInfo::getDirect(llvm::ArrayType::get(
+ llvm::IntegerType::get(getVMContext(), XLen), 2));
+ }
+ }
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
+}
+
+ABIArgInfo RISCVABIInfo::classifyReturnType(QualType RetTy) const {
+ if (RetTy->isVoidType())
+ return ABIArgInfo::getIgnore();
+
+ int ArgGPRsLeft = 2;
+
+ // The rules for return and argument types are the same, so defer to
+ // classifyArgumentType.
+ return classifyArgumentType(RetTy, /*IsFixed=*/true, ArgGPRsLeft);
+}
+
+Address RISCVABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
+ CharUnits SlotSize = CharUnits::fromQuantity(XLen / 8);
+
+ // Empty records are ignored for parameter passing purposes.
+ if (isEmptyRecord(getContext(), Ty, true)) {
+ Address Addr(CGF.Builder.CreateLoad(VAListAddr), SlotSize);
+ Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty));
+ return Addr;
+ }
+
+ std::pair<CharUnits, CharUnits> SizeAndAlign =
+ getContext().getTypeInfoInChars(Ty);
+
+ // Arguments bigger than 2*Xlen bytes are passed indirectly.
+ bool IsIndirect = SizeAndAlign.first > 2 * SlotSize;
+
+ return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, SizeAndAlign,
+ SlotSize, /*AllowHigherAlign=*/true);
+}
+
+ABIArgInfo RISCVABIInfo::extendType(QualType Ty) const {
+ int TySize = getContext().getTypeSize(Ty);
+ // RV64 ABI requires unsigned 32 bit integers to be sign extended.
+ if (XLen == 64 && Ty->isUnsignedIntegerOrEnumerationType() && TySize == 32)
+ return ABIArgInfo::getSignExtend(Ty);
+ return ABIArgInfo::getExtend(Ty);
+}
+
+namespace {
+class RISCVTargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ RISCVTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, unsigned XLen)
+ : TargetCodeGenInfo(new RISCVABIInfo(CGT, XLen)) {}
+
+ void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &CGM) const override {
+ const auto *FD = dyn_cast_or_null<FunctionDecl>(D);
+ if (!FD) return;
+
+ const auto *Attr = FD->getAttr<RISCVInterruptAttr>();
+ if (!Attr)
+ return;
+
+ const char *Kind;
+ switch (Attr->getInterrupt()) {
+ case RISCVInterruptAttr::user: Kind = "user"; break;
+ case RISCVInterruptAttr::supervisor: Kind = "supervisor"; break;
+ case RISCVInterruptAttr::machine: Kind = "machine"; break;
+ }
+
+ auto *Fn = cast<llvm::Function>(GV);
+
+ Fn->addFnAttr("interrupt", Kind);
+ }
+};
+} // namespace
//===----------------------------------------------------------------------===//
// Driver code
@@ -8894,6 +9108,11 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
case llvm::Triple::msp430:
return SetCGInfo(new MSP430TargetCodeGenInfo(Types));
+ case llvm::Triple::riscv32:
+ return SetCGInfo(new RISCVTargetCodeGenInfo(Types, 32));
+ case llvm::Triple::riscv64:
+ return SetCGInfo(new RISCVTargetCodeGenInfo(Types, 64));
+
case llvm::Triple::systemz: {
bool HasVector = getTarget().getABI() == "vector";
return SetCGInfo(new SystemZTargetCodeGenInfo(Types, HasVector));
diff --git a/lib/CodeGen/TargetInfo.h b/lib/CodeGen/TargetInfo.h
index d745e420c4a5..b530260ea48f 100644
--- a/lib/CodeGen/TargetInfo.h
+++ b/lib/CodeGen/TargetInfo.h
@@ -57,8 +57,7 @@ public:
/// setTargetAttributes - Provides a convenient hook to handle extra
/// target-specific attributes for the given global.
virtual void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
- CodeGen::CodeGenModule &M,
- ForDefinition_t IsForDefinition) const {}
+ CodeGen::CodeGenModule &M) const {}
/// emitTargetMD - Provides a convenient hook to handle extra
/// target-specific metadata for the given global.
@@ -267,7 +266,7 @@ public:
virtual llvm::SyncScope::ID getLLVMSyncScopeID(SyncScope S,
llvm::LLVMContext &C) const;
- /// Inteface class for filling custom fields of a block literal for OpenCL.
+ /// Interface class for filling custom fields of a block literal for OpenCL.
class TargetOpenCLBlockHelper {
public:
typedef std::pair<llvm::Value *, StringRef> ValueTy;
@@ -297,6 +296,13 @@ public:
createEnqueuedBlockKernel(CodeGenFunction &CGF,
llvm::Function *BlockInvokeFunc,
llvm::Value *BlockLiteral) const;
+
+ /// \return true if the target supports alias from the unmangled name to the
+ /// mangled name of functions declared within an extern "C" region and marked
+ /// as 'used', and having internal linkage.
+ virtual bool shouldEmitStaticExternCAliases() const { return true; }
+
+ virtual void setCUDAKernelCallingConvention(const FunctionType *&FT) const {}
};
} // namespace CodeGen
diff --git a/lib/CodeGen/VarBypassDetector.cpp b/lib/CodeGen/VarBypassDetector.cpp
index cfb93d6a9fcc..2f8a591a3e7f 100644
--- a/lib/CodeGen/VarBypassDetector.cpp
+++ b/lib/CodeGen/VarBypassDetector.cpp
@@ -95,7 +95,7 @@ bool VarBypassDetector::BuildScopeInformation(const Stmt *S,
case Stmt::CaseStmtClass:
case Stmt::DefaultStmtClass:
case Stmt::LabelStmtClass:
- llvm_unreachable("the loop bellow handles labels and cases");
+ llvm_unreachable("the loop below handles labels and cases");
break;
default:
diff --git a/lib/Driver/Action.cpp b/lib/Driver/Action.cpp
index 85e466a4409d..99d588d9c009 100644
--- a/lib/Driver/Action.cpp
+++ b/lib/Driver/Action.cpp
@@ -1,4 +1,4 @@
-//===--- Action.cpp - Abstract compilation steps --------------------------===//
+//===- Action.cpp - Abstract compilation steps ----------------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -8,16 +8,15 @@
//===----------------------------------------------------------------------===//
#include "clang/Driver/Action.h"
-#include "clang/Driver/ToolChain.h"
-#include "llvm/ADT/StringRef.h"
-#include "llvm/ADT/StringSwitch.h"
#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/Regex.h"
#include <cassert>
-using namespace clang::driver;
+#include <string>
+
+using namespace clang;
+using namespace driver;
using namespace llvm::opt;
-Action::~Action() {}
+Action::~Action() = default;
const char *Action::getClassName(ActionClass AC) {
switch (AC) {
@@ -97,16 +96,23 @@ std::string Action::getOffloadingKindPrefix() const {
return "device-cuda";
case OFK_OpenMP:
return "device-openmp";
+ case OFK_HIP:
+ return "device-hip";
// TODO: Add other programming models here.
}
if (!ActiveOffloadKindMask)
- return "";
+ return {};
std::string Res("host");
+ assert(!((ActiveOffloadKindMask & OFK_Cuda) &&
+ (ActiveOffloadKindMask & OFK_HIP)) &&
+ "Cannot offload CUDA and HIP at the same time");
if (ActiveOffloadKindMask & OFK_Cuda)
Res += "-cuda";
+ if (ActiveOffloadKindMask & OFK_HIP)
+ Res += "-hip";
if (ActiveOffloadKindMask & OFK_OpenMP)
Res += "-openmp";
@@ -119,11 +125,11 @@ std::string Action::getOffloadingKindPrefix() const {
/// for each offloading kind.
std::string
Action::GetOffloadingFileNamePrefix(OffloadKind Kind,
- llvm::StringRef NormalizedTriple,
+ StringRef NormalizedTriple,
bool CreatePrefixForHost) {
// Don't generate prefix for host actions unless required.
if (!CreatePrefixForHost && (Kind == OFK_None || Kind == OFK_Host))
- return "";
+ return {};
std::string Res("-");
Res += GetOffloadKindName(Kind);
@@ -134,7 +140,7 @@ Action::GetOffloadingFileNamePrefix(OffloadKind Kind,
/// Return a string with the offload kind name. If that is not defined, we
/// assume 'host'.
-llvm::StringRef Action::GetOffloadKindName(OffloadKind Kind) {
+StringRef Action::GetOffloadKindName(OffloadKind Kind) {
switch (Kind) {
case OFK_None:
case OFK_Host:
@@ -143,6 +149,8 @@ llvm::StringRef Action::GetOffloadKindName(OffloadKind Kind) {
return "cuda";
case OFK_OpenMP:
return "openmp";
+ case OFK_HIP:
+ return "hip";
// TODO: Add other programming models here.
}
@@ -153,12 +161,11 @@ llvm::StringRef Action::GetOffloadKindName(OffloadKind Kind) {
void InputAction::anchor() {}
InputAction::InputAction(const Arg &_Input, types::ID _Type)
- : Action(InputClass, _Type), Input(_Input) {
-}
+ : Action(InputClass, _Type), Input(_Input) {}
void BindArchAction::anchor() {}
-BindArchAction::BindArchAction(Action *Input, llvm::StringRef ArchName)
+BindArchAction::BindArchAction(Action *Input, StringRef ArchName)
: Action(BindArchClass, Input), ArchName(ArchName) {}
void OffloadAction::anchor() {}
@@ -300,8 +307,7 @@ JobAction::JobAction(ActionClass Kind, Action *Input, types::ID Type)
: Action(Kind, Input, Type) {}
JobAction::JobAction(ActionClass Kind, const ActionList &Inputs, types::ID Type)
- : Action(Kind, Inputs, Type) {
-}
+ : Action(Kind, Inputs, Type) {}
void PreprocessJobAction::anchor() {}
@@ -341,20 +347,17 @@ AssembleJobAction::AssembleJobAction(Action *Input, types::ID OutputType)
void LinkJobAction::anchor() {}
LinkJobAction::LinkJobAction(ActionList &Inputs, types::ID Type)
- : JobAction(LinkJobClass, Inputs, Type) {
-}
+ : JobAction(LinkJobClass, Inputs, Type) {}
void LipoJobAction::anchor() {}
LipoJobAction::LipoJobAction(ActionList &Inputs, types::ID Type)
- : JobAction(LipoJobClass, Inputs, Type) {
-}
+ : JobAction(LipoJobClass, Inputs, Type) {}
void DsymutilJobAction::anchor() {}
DsymutilJobAction::DsymutilJobAction(ActionList &Inputs, types::ID Type)
- : JobAction(DsymutilJobClass, Inputs, Type) {
-}
+ : JobAction(DsymutilJobClass, Inputs, Type) {}
void VerifyJobAction::anchor() {}
diff --git a/lib/Driver/CMakeLists.txt b/lib/Driver/CMakeLists.txt
index 5bf91f2be981..471ffe0f1b00 100644
--- a/lib/Driver/CMakeLists.txt
+++ b/lib/Driver/CMakeLists.txt
@@ -25,6 +25,7 @@ add_clang_library(clangDriver
ToolChains/Arch/ARM.cpp
ToolChains/Arch/Mips.cpp
ToolChains/Arch/PPC.cpp
+ ToolChains/Arch/RISCV.cpp
ToolChains/Arch/Sparc.cpp
ToolChains/Arch/SystemZ.cpp
ToolChains/Arch/X86.cpp
@@ -44,6 +45,7 @@ add_clang_library(clangDriver
ToolChains/Fuchsia.cpp
ToolChains/Gnu.cpp
ToolChains/Haiku.cpp
+ ToolChains/HIP.cpp
ToolChains/Hexagon.cpp
ToolChains/Linux.cpp
ToolChains/MipsLinux.cpp
diff --git a/lib/Driver/Compilation.cpp b/lib/Driver/Compilation.cpp
index 645da5059587..ca2525dd07fb 100644
--- a/lib/Driver/Compilation.cpp
+++ b/lib/Driver/Compilation.cpp
@@ -1,4 +1,4 @@
-//===--- Compilation.cpp - Compilation Task Implementation ----------------===//
+//===- Compilation.cpp - Compilation Task Implementation ------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -8,32 +8,48 @@
//===----------------------------------------------------------------------===//
#include "clang/Driver/Compilation.h"
+#include "clang/Basic/LLVM.h"
#include "clang/Driver/Action.h"
#include "clang/Driver/Driver.h"
#include "clang/Driver/DriverDiagnostic.h"
+#include "clang/Driver/Job.h"
#include "clang/Driver/Options.h"
#include "clang/Driver/ToolChain.h"
+#include "clang/Driver/Util.h"
+#include "llvm/ADT/None.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/Triple.h"
#include "llvm/Option/ArgList.h"
+#include "llvm/Option/OptSpecifier.h"
+#include "llvm/Option/Option.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/raw_ostream.h"
+#include <cassert>
+#include <string>
+#include <system_error>
+#include <utility>
-using namespace clang::driver;
using namespace clang;
+using namespace driver;
using namespace llvm::opt;
Compilation::Compilation(const Driver &D, const ToolChain &_DefaultToolChain,
InputArgList *_Args, DerivedArgList *_TranslatedArgs,
bool ContainsError)
- : TheDriver(D), DefaultToolChain(_DefaultToolChain), ActiveOffloadMask(0u),
- Args(_Args), TranslatedArgs(_TranslatedArgs), ForDiagnostics(false),
- ContainsError(ContainsError) {
+ : TheDriver(D), DefaultToolChain(_DefaultToolChain), Args(_Args),
+ TranslatedArgs(_TranslatedArgs), ContainsError(ContainsError) {
// The offloading host toolchain is the default toolchain.
OrderedOffloadingToolchains.insert(
std::make_pair(Action::OFK_Host, &DefaultToolChain));
}
Compilation::~Compilation() {
+ // Remove temporary files. This must be done before arguments are freed, as
+ // the file names might be derived from the input arguments.
+ if (!TheDriver.isSaveTempsEnabled() && !ForceKeepTempFiles)
+ CleanupFileList(TempFiles);
+
delete TranslatedArgs;
delete Args;
@@ -74,9 +90,8 @@ Compilation::getArgsForToolChain(const ToolChain *TC, StringRef BoundArch,
}
// Add allocated arguments to the final DAL.
- for (auto ArgPtr : AllocatedArgs) {
+ for (auto ArgPtr : AllocatedArgs)
Entry->AddSynthesizedArg(ArgPtr);
- }
}
return *Entry;
@@ -105,7 +120,7 @@ bool Compilation::CleanupFile(const char *File, bool IssueErrors) const {
// so we don't need to check again.
if (IssueErrors)
- getDriver().Diag(clang::diag::err_drv_unable_to_remove_file)
+ getDriver().Diag(diag::err_drv_unable_to_remove_file)
<< EC.message();
return false;
}
@@ -115,9 +130,8 @@ bool Compilation::CleanupFile(const char *File, bool IssueErrors) const {
bool Compilation::CleanupFileList(const ArgStringList &Files,
bool IssueErrors) const {
bool Success = true;
- for (ArgStringList::const_iterator
- it = Files.begin(), ie = Files.end(); it != ie; ++it)
- Success &= CleanupFile(*it, IssueErrors);
+ for (const auto &File: Files)
+ Success &= CleanupFile(File, IssueErrors);
return Success;
}
@@ -125,14 +139,12 @@ bool Compilation::CleanupFileMap(const ArgStringMap &Files,
const JobAction *JA,
bool IssueErrors) const {
bool Success = true;
- for (ArgStringMap::const_iterator
- it = Files.begin(), ie = Files.end(); it != ie; ++it) {
-
+ for (const auto &File : Files) {
// If specified, only delete the files associated with the JobAction.
// Otherwise, delete all files in the map.
- if (JA && it->first != JA)
+ if (JA && File.first != JA)
continue;
- Success &= CleanupFile(it->second, IssueErrors);
+ Success &= CleanupFile(File.second, IssueErrors);
}
return Success;
}
@@ -151,7 +163,7 @@ int Compilation::ExecuteCommand(const Command &C,
llvm::sys::fs::F_Append |
llvm::sys::fs::F_Text);
if (EC) {
- getDriver().Diag(clang::diag::err_drv_cc_print_options_failure)
+ getDriver().Diag(diag::err_drv_cc_print_options_failure)
<< EC.message();
FailingCommand = &C;
delete OS;
@@ -173,7 +185,7 @@ int Compilation::ExecuteCommand(const Command &C,
int Res = C.Execute(Redirects, &Error, &ExecutionFailed);
if (!Error.empty()) {
assert(Res && "Error string set with 0 result code!");
- getDriver().Diag(clang::diag::err_drv_command_failure) << Error;
+ getDriver().Diag(diag::err_drv_command_failure) << Error;
}
if (Res)
@@ -186,21 +198,20 @@ using FailingCommandList = SmallVectorImpl<std::pair<int, const Command *>>;
static bool ActionFailed(const Action *A,
const FailingCommandList &FailingCommands) {
-
if (FailingCommands.empty())
return false;
- // CUDA can have the same input source code compiled multiple times so do not
- // compiled again if there are already failures. It is OK to abort the CUDA
- // pipeline on errors.
- if (A->isOffloading(Action::OFK_Cuda))
+ // CUDA/HIP can have the same input source code compiled multiple times so do
+ // not compiled again if there are already failures. It is OK to abort the
+ // CUDA pipeline on errors.
+ if (A->isOffloading(Action::OFK_Cuda) || A->isOffloading(Action::OFK_HIP))
return true;
for (const auto &CI : FailingCommands)
if (A == &(CI.second->getSource()))
return true;
- for (const Action *AI : A->inputs())
+ for (const auto *AI : A->inputs())
if (ActionFailed(AI, FailingCommands))
return true;
@@ -239,6 +250,10 @@ void Compilation::initCompilationForDiagnostics() {
AllActions.clear();
Jobs.clear();
+ // Remove temporary files.
+ if (!TheDriver.isSaveTempsEnabled() && !ForceKeepTempFiles)
+ CleanupFileList(TempFiles);
+
// Clear temporary/results file lists.
TempFiles.clear();
ResultFiles.clear();
@@ -256,6 +271,9 @@ void Compilation::initCompilationForDiagnostics() {
// Redirect stdout/stderr to /dev/null.
Redirects = {None, {""}, {""}};
+
+ // Temporary files added by diagnostics should be kept.
+ ForceKeepTempFiles = true;
}
StringRef Compilation::getSysRoot() const {
diff --git a/lib/Driver/Distro.cpp b/lib/Driver/Distro.cpp
index f15c919b9aae..2c4d44faf8d0 100644
--- a/lib/Driver/Distro.cpp
+++ b/lib/Driver/Distro.cpp
@@ -24,7 +24,7 @@ static Distro::DistroType DetectDistro(vfs::FileSystem &VFS) {
StringRef Data = File.get()->getBuffer();
SmallVector<StringRef, 16> Lines;
Data.split(Lines, "\n");
- Distro::DistroType Version = Distro::UnknownDistro;
+ Distro::DistroType Version = Distro::UnknownDistro;
for (StringRef Line : Lines)
if (Version == Distro::UnknownDistro && Line.startswith("DISTRIB_CODENAME="))
Version = llvm::StringSwitch<Distro::DistroType>(Line.substr(17))
@@ -49,6 +49,7 @@ static Distro::DistroType DetectDistro(vfs::FileSystem &VFS) {
.Case("zesty", Distro::UbuntuZesty)
.Case("artful", Distro::UbuntuArtful)
.Case("bionic", Distro::UbuntuBionic)
+ .Case("cosmic", Distro::UbuntuCosmic)
.Default(Distro::UnknownDistro);
if (Version != Distro::UnknownDistro)
return Version;
diff --git a/lib/Driver/Driver.cpp b/lib/Driver/Driver.cpp
index 9ae33b80f889..1dfcacc75ea5 100644
--- a/lib/Driver/Driver.cpp
+++ b/lib/Driver/Driver.cpp
@@ -12,6 +12,7 @@
#include "ToolChains/AMDGPU.h"
#include "ToolChains/AVR.h"
#include "ToolChains/Ananas.h"
+#include "ToolChains/BareMetal.h"
#include "ToolChains/Clang.h"
#include "ToolChains/CloudABI.h"
#include "ToolChains/Contiki.h"
@@ -22,15 +23,15 @@
#include "ToolChains/FreeBSD.h"
#include "ToolChains/Fuchsia.h"
#include "ToolChains/Gnu.h"
-#include "ToolChains/BareMetal.h"
+#include "ToolChains/HIP.h"
#include "ToolChains/Haiku.h"
#include "ToolChains/Hexagon.h"
#include "ToolChains/Lanai.h"
#include "ToolChains/Linux.h"
+#include "ToolChains/MSVC.h"
#include "ToolChains/MinGW.h"
#include "ToolChains/Minix.h"
#include "ToolChains/MipsLinux.h"
-#include "ToolChains/MSVC.h"
#include "ToolChains/Myriad.h"
#include "ToolChains/NaCl.h"
#include "ToolChains/NetBSD.h"
@@ -57,17 +58,20 @@
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringSet.h"
#include "llvm/ADT/StringSwitch.h"
+#include "llvm/Config/llvm-config.h"
#include "llvm/Option/Arg.h"
#include "llvm/Option/ArgList.h"
#include "llvm/Option/OptSpecifier.h"
#include "llvm/Option/OptTable.h"
#include "llvm/Option/Option.h"
+#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/PrettyStackTrace.h"
#include "llvm/Support/Process.h"
#include "llvm/Support/Program.h"
+#include "llvm/Support/StringSaver.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/raw_ostream.h"
#include <map>
@@ -81,19 +85,20 @@ using namespace clang::driver;
using namespace clang;
using namespace llvm::opt;
-Driver::Driver(StringRef ClangExecutable, StringRef DefaultTargetTriple,
+Driver::Driver(StringRef ClangExecutable, StringRef TargetTriple,
DiagnosticsEngine &Diags,
IntrusiveRefCntPtr<vfs::FileSystem> VFS)
: Opts(createDriverOptTable()), Diags(Diags), VFS(std::move(VFS)),
Mode(GCCMode), SaveTemps(SaveTempsNone), BitcodeEmbed(EmbedNone),
LTOMode(LTOK_None), ClangExecutable(ClangExecutable),
- SysRoot(DEFAULT_SYSROOT),
- DriverTitle("clang LLVM compiler"), CCPrintOptionsFilename(nullptr),
- CCPrintHeadersFilename(nullptr), CCLogDiagnosticsFilename(nullptr),
- CCCPrintBindings(false), CCPrintHeaders(false), CCLogDiagnostics(false),
- CCGenDiagnostics(false), DefaultTargetTriple(DefaultTargetTriple),
- CCCGenericGCCName(""), CheckInputsExist(true), CCCUsePCH(true),
- GenReproducer(false), SuppressMissingInputWarning(false) {
+ SysRoot(DEFAULT_SYSROOT), DriverTitle("clang LLVM compiler"),
+ CCPrintOptionsFilename(nullptr), CCPrintHeadersFilename(nullptr),
+ CCLogDiagnosticsFilename(nullptr), CCCPrintBindings(false),
+ CCPrintOptions(false), CCPrintHeaders(false), CCLogDiagnostics(false),
+ CCGenDiagnostics(false), TargetTriple(TargetTriple),
+ CCCGenericGCCName(""), Saver(Alloc), CheckInputsExist(true),
+ CCCUsePCH(true), GenReproducer(false),
+ SuppressMissingInputWarning(false) {
// Provide a sane fallback if no VFS is specified.
if (!this->VFS)
@@ -103,6 +108,13 @@ Driver::Driver(StringRef ClangExecutable, StringRef DefaultTargetTriple,
Dir = llvm::sys::path::parent_path(ClangExecutable);
InstalledDir = Dir; // Provide a sensible default installed dir.
+#if defined(CLANG_CONFIG_FILE_SYSTEM_DIR)
+ SystemConfigDir = CLANG_CONFIG_FILE_SYSTEM_DIR;
+#endif
+#if defined(CLANG_CONFIG_FILE_USER_DIR)
+ UserConfigDir = CLANG_CONFIG_FILE_USER_DIR;
+#endif
+
// Compute the path to the resource directory.
StringRef ClangResourceDir(CLANG_RESOURCE_DIR);
SmallString<128> P(Dir);
@@ -119,11 +131,12 @@ Driver::Driver(StringRef ClangExecutable, StringRef DefaultTargetTriple,
void Driver::ParseDriverMode(StringRef ProgramName,
ArrayRef<const char *> Args) {
- ClangNameParts = ToolChain::getTargetAndModeFromProgramName(ProgramName);
+ if (ClangNameParts.isEmpty())
+ ClangNameParts = ToolChain::getTargetAndModeFromProgramName(ProgramName);
setDriverModeFromOption(ClangNameParts.DriverMode);
for (const char *ArgPtr : Args) {
- // Ingore nullptrs, they are response file's EOL markers
+ // Ignore nullptrs, they are the response file's EOL markers.
if (ArgPtr == nullptr)
continue;
const StringRef Arg = ArgPtr;
@@ -138,15 +151,13 @@ void Driver::setDriverModeFromOption(StringRef Opt) {
return;
StringRef Value = Opt.drop_front(OptName.size());
- const unsigned M = llvm::StringSwitch<unsigned>(Value)
- .Case("gcc", GCCMode)
- .Case("g++", GXXMode)
- .Case("cpp", CPPMode)
- .Case("cl", CLMode)
- .Default(~0U);
-
- if (M != ~0U)
- Mode = static_cast<DriverMode>(M);
+ if (auto M = llvm::StringSwitch<llvm::Optional<DriverMode>>(Value)
+ .Case("gcc", GCCMode)
+ .Case("g++", GXXMode)
+ .Case("cpp", CPPMode)
+ .Case("cl", CLMode)
+ .Default(None))
+ Mode = *M;
else
Diag(diag::err_drv_unsupported_option_argument) << OptName << Value;
}
@@ -178,9 +189,19 @@ InputArgList Driver::ParseArgStrings(ArrayRef<const char *> ArgStrings,
// Check for unsupported options.
for (const Arg *A : Args) {
if (A->getOption().hasFlag(options::Unsupported)) {
- Diag(diag::err_drv_unsupported_opt) << A->getAsString(Args);
- ContainsError |= Diags.getDiagnosticLevel(diag::err_drv_unsupported_opt,
- SourceLocation()) >
+ unsigned DiagID;
+ auto ArgString = A->getAsString(Args);
+ std::string Nearest;
+ if (getOpts().findNearest(
+ ArgString, Nearest, IncludedFlagsBitmask,
+ ExcludedFlagsBitmask | options::Unsupported) > 1) {
+ DiagID = diag::err_drv_unsupported_opt;
+ Diag(DiagID) << ArgString;
+ } else {
+ DiagID = diag::err_drv_unsupported_opt_with_suggestion;
+ Diag(DiagID) << ArgString << Nearest;
+ }
+ ContainsError |= Diags.getDiagnosticLevel(DiagID, SourceLocation()) >
DiagnosticsEngine::Warning;
continue;
}
@@ -195,11 +216,20 @@ InputArgList Driver::ParseArgStrings(ArrayRef<const char *> ArgStrings,
}
for (const Arg *A : Args.filtered(options::OPT_UNKNOWN)) {
- auto ID = IsCLMode() ? diag::warn_drv_unknown_argument_clang_cl
- : diag::err_drv_unknown_argument;
-
- Diags.Report(ID) << A->getAsString(Args);
- ContainsError |= Diags.getDiagnosticLevel(ID, SourceLocation()) >
+ unsigned DiagID;
+ auto ArgString = A->getAsString(Args);
+ std::string Nearest;
+ if (getOpts().findNearest(
+ ArgString, Nearest, IncludedFlagsBitmask, ExcludedFlagsBitmask) > 1) {
+ DiagID = IsCLMode() ? diag::warn_drv_unknown_argument_clang_cl
+ : diag::err_drv_unknown_argument;
+ Diags.Report(DiagID) << ArgString;
+ } else {
+ DiagID = IsCLMode() ? diag::warn_drv_unknown_argument_clang_cl_with_suggestion
+ : diag::err_drv_unknown_argument_with_suggestion;
+ Diags.Report(DiagID) << ArgString << Nearest;
+ }
+ ContainsError |= Diags.getDiagnosticLevel(DiagID, SourceLocation()) >
DiagnosticsEngine::Warning;
}
@@ -256,11 +286,12 @@ phases::ID Driver::getFinalPhase(const DerivedArgList &DAL,
}
static Arg *MakeInputArg(DerivedArgList &Args, OptTable &Opts,
- StringRef Value) {
+ StringRef Value, bool Claim = true) {
Arg *A = new Arg(Opts.getOption(options::OPT_INPUT), Value,
Args.getBaseArgs().MakeIndex(Value), Value.data());
Args.AddSynthesizedArg(A);
- A->claim();
+ if (Claim)
+ A->claim();
return A;
}
@@ -328,7 +359,7 @@ DerivedArgList *Driver::TranslateInputArgs(const InputArgList &Args) const {
if (A->getOption().matches(options::OPT__DASH_DASH)) {
A->claim();
for (StringRef Val : A->getValues())
- DAL->append(MakeInputArg(*DAL, *Opts, Val));
+ DAL->append(MakeInputArg(*DAL, *Opts, Val, false));
continue;
}
@@ -353,23 +384,23 @@ DerivedArgList *Driver::TranslateInputArgs(const InputArgList &Args) const {
return DAL;
}
-/// \brief Compute target triple from args.
+/// Compute target triple from args.
///
/// This routine provides the logic to compute a target triple from various
/// args passed to the driver and the default triple string.
static llvm::Triple computeTargetTriple(const Driver &D,
- StringRef DefaultTargetTriple,
+ StringRef TargetTriple,
const ArgList &Args,
StringRef DarwinArchName = "") {
// FIXME: Already done in Compilation *Driver::BuildCompilation
if (const Arg *A = Args.getLastArg(options::OPT_target))
- DefaultTargetTriple = A->getValue();
+ TargetTriple = A->getValue();
- llvm::Triple Target(llvm::Triple::normalize(DefaultTargetTriple));
+ llvm::Triple Target(llvm::Triple::normalize(TargetTriple));
// Handle Apple-specific options available here.
if (Target.isOSBinFormatMachO()) {
- // If an explict Darwin arch name is given, that trumps all.
+ // If an explicit Darwin arch name is given, that trumps all.
if (!DarwinArchName.empty()) {
tools::darwin::setTripleTypeForMachOArchName(Target, DarwinArchName);
return Target;
@@ -452,7 +483,7 @@ static llvm::Triple computeTargetTriple(const Driver &D,
return Target;
}
-// \brief Parse the LTO options and record the type of LTO compilation
+// Parse the LTO options and record the type of LTO compilation
// based on which -f(no-)?lto(=.*)? option occurs last.
void Driver::setLTOMode(const llvm::opt::ArgList &Args) {
LTOMode = LTOK_None;
@@ -508,24 +539,55 @@ void Driver::CreateOffloadingDeviceToolChains(Compilation &C,
InputList &Inputs) {
//
- // CUDA
+ // CUDA/HIP
//
- // We need to generate a CUDA toolchain if any of the inputs has a CUDA type.
- if (llvm::any_of(Inputs, [](std::pair<types::ID, const llvm::opt::Arg *> &I) {
+ // We need to generate a CUDA/HIP toolchain if any of the inputs has a CUDA
+ // or HIP type. However, mixed CUDA/HIP compilation is not supported.
+ bool IsCuda =
+ llvm::any_of(Inputs, [](std::pair<types::ID, const llvm::opt::Arg *> &I) {
return types::isCuda(I.first);
- })) {
+ });
+ bool IsHIP =
+ llvm::any_of(Inputs,
+ [](std::pair<types::ID, const llvm::opt::Arg *> &I) {
+ return types::isHIP(I.first);
+ }) ||
+ C.getInputArgs().hasArg(options::OPT_hip_link);
+ if (IsCuda && IsHIP) {
+ Diag(clang::diag::err_drv_mix_cuda_hip);
+ return;
+ }
+ if (IsCuda) {
const ToolChain *HostTC = C.getSingleOffloadToolChain<Action::OFK_Host>();
const llvm::Triple &HostTriple = HostTC->getTriple();
- llvm::Triple CudaTriple(HostTriple.isArch64Bit() ? "nvptx64-nvidia-cuda"
- : "nvptx-nvidia-cuda");
- // Use the CUDA and host triples as the key into the ToolChains map, because
- // the device toolchain we create depends on both.
+ StringRef DeviceTripleStr;
+ auto OFK = Action::OFK_Cuda;
+ DeviceTripleStr =
+ HostTriple.isArch64Bit() ? "nvptx64-nvidia-cuda" : "nvptx-nvidia-cuda";
+ llvm::Triple CudaTriple(DeviceTripleStr);
+ // Use the CUDA and host triples as the key into the ToolChains map,
+ // because the device toolchain we create depends on both.
auto &CudaTC = ToolChains[CudaTriple.str() + "/" + HostTriple.str()];
if (!CudaTC) {
CudaTC = llvm::make_unique<toolchains::CudaToolChain>(
- *this, CudaTriple, *HostTC, C.getInputArgs(), Action::OFK_Cuda);
+ *this, CudaTriple, *HostTC, C.getInputArgs(), OFK);
+ }
+ C.addOffloadDeviceToolChain(CudaTC.get(), OFK);
+ } else if (IsHIP) {
+ const ToolChain *HostTC = C.getSingleOffloadToolChain<Action::OFK_Host>();
+ const llvm::Triple &HostTriple = HostTC->getTriple();
+ StringRef DeviceTripleStr;
+ auto OFK = Action::OFK_HIP;
+ DeviceTripleStr = "amdgcn-amd-amdhsa";
+ llvm::Triple HIPTriple(DeviceTripleStr);
+ // Use the HIP and host triples as the key into the ToolChains map,
+ // because the device toolchain we create depends on both.
+ auto &HIPTC = ToolChains[HIPTriple.str() + "/" + HostTriple.str()];
+ if (!HIPTC) {
+ HIPTC = llvm::make_unique<toolchains::HIPToolChain>(
+ *this, HIPTriple, *HostTC, C.getInputArgs());
}
- C.addOffloadDeviceToolChain(CudaTC.get(), Action::OFK_Cuda);
+ C.addOffloadDeviceToolChain(HIPTC.get(), OFK);
}
//
@@ -600,6 +662,216 @@ void Driver::CreateOffloadingDeviceToolChains(Compilation &C,
//
}
+/// Looks the given directories for the specified file.
+///
+/// \param[out] FilePath File path, if the file was found.
+/// \param[in] Dirs Directories used for the search.
+/// \param[in] FileName Name of the file to search for.
+/// \return True if file was found.
+///
+/// Looks for file specified by FileName sequentially in directories specified
+/// by Dirs.
+///
+static bool searchForFile(SmallVectorImpl<char> &FilePath,
+ ArrayRef<std::string> Dirs,
+ StringRef FileName) {
+ SmallString<128> WPath;
+ for (const StringRef &Dir : Dirs) {
+ if (Dir.empty())
+ continue;
+ WPath.clear();
+ llvm::sys::path::append(WPath, Dir, FileName);
+ llvm::sys::path::native(WPath);
+ if (llvm::sys::fs::is_regular_file(WPath)) {
+ FilePath = std::move(WPath);
+ return true;
+ }
+ }
+ return false;
+}
+
+bool Driver::readConfigFile(StringRef FileName) {
+ // Try reading the given file.
+ SmallVector<const char *, 32> NewCfgArgs;
+ if (!llvm::cl::readConfigFile(FileName, Saver, NewCfgArgs)) {
+ Diag(diag::err_drv_cannot_read_config_file) << FileName;
+ return true;
+ }
+
+ // Read options from config file.
+ llvm::SmallString<128> CfgFileName(FileName);
+ llvm::sys::path::native(CfgFileName);
+ ConfigFile = CfgFileName.str();
+ bool ContainErrors;
+ CfgOptions = llvm::make_unique<InputArgList>(
+ ParseArgStrings(NewCfgArgs, ContainErrors));
+ if (ContainErrors) {
+ CfgOptions.reset();
+ return true;
+ }
+
+ if (CfgOptions->hasArg(options::OPT_config)) {
+ CfgOptions.reset();
+ Diag(diag::err_drv_nested_config_file);
+ return true;
+ }
+
+ // Claim all arguments that come from a configuration file so that the driver
+ // does not warn on any that is unused.
+ for (Arg *A : *CfgOptions)
+ A->claim();
+ return false;
+}
+
+bool Driver::loadConfigFile() {
+ std::string CfgFileName;
+ bool FileSpecifiedExplicitly = false;
+
+ // Process options that change search path for config files.
+ if (CLOptions) {
+ if (CLOptions->hasArg(options::OPT_config_system_dir_EQ)) {
+ SmallString<128> CfgDir;
+ CfgDir.append(
+ CLOptions->getLastArgValue(options::OPT_config_system_dir_EQ));
+ if (!CfgDir.empty()) {
+ if (llvm::sys::fs::make_absolute(CfgDir).value() != 0)
+ SystemConfigDir.clear();
+ else
+ SystemConfigDir = std::string(CfgDir.begin(), CfgDir.end());
+ }
+ }
+ if (CLOptions->hasArg(options::OPT_config_user_dir_EQ)) {
+ SmallString<128> CfgDir;
+ CfgDir.append(
+ CLOptions->getLastArgValue(options::OPT_config_user_dir_EQ));
+ if (!CfgDir.empty()) {
+ if (llvm::sys::fs::make_absolute(CfgDir).value() != 0)
+ UserConfigDir.clear();
+ else
+ UserConfigDir = std::string(CfgDir.begin(), CfgDir.end());
+ }
+ }
+ }
+
+ // First try to find config file specified in command line.
+ if (CLOptions) {
+ std::vector<std::string> ConfigFiles =
+ CLOptions->getAllArgValues(options::OPT_config);
+ if (ConfigFiles.size() > 1) {
+ Diag(diag::err_drv_duplicate_config);
+ return true;
+ }
+
+ if (!ConfigFiles.empty()) {
+ CfgFileName = ConfigFiles.front();
+ assert(!CfgFileName.empty());
+
+ // If argument contains directory separator, treat it as a path to
+ // configuration file.
+ if (llvm::sys::path::has_parent_path(CfgFileName)) {
+ SmallString<128> CfgFilePath;
+ if (llvm::sys::path::is_relative(CfgFileName))
+ llvm::sys::fs::current_path(CfgFilePath);
+ llvm::sys::path::append(CfgFilePath, CfgFileName);
+ if (!llvm::sys::fs::is_regular_file(CfgFilePath)) {
+ Diag(diag::err_drv_config_file_not_exist) << CfgFilePath;
+ return true;
+ }
+ return readConfigFile(CfgFilePath);
+ }
+
+ FileSpecifiedExplicitly = true;
+ }
+ }
+
+ // If config file is not specified explicitly, try to deduce configuration
+ // from executable name. For instance, an executable 'armv7l-clang' will
+ // search for config file 'armv7l-clang.cfg'.
+ if (CfgFileName.empty() && !ClangNameParts.TargetPrefix.empty())
+ CfgFileName = ClangNameParts.TargetPrefix + '-' + ClangNameParts.ModeSuffix;
+
+ if (CfgFileName.empty())
+ return false;
+
+ // Determine architecture part of the file name, if it is present.
+ StringRef CfgFileArch = CfgFileName;
+ size_t ArchPrefixLen = CfgFileArch.find('-');
+ if (ArchPrefixLen == StringRef::npos)
+ ArchPrefixLen = CfgFileArch.size();
+ llvm::Triple CfgTriple;
+ CfgFileArch = CfgFileArch.take_front(ArchPrefixLen);
+ CfgTriple = llvm::Triple(llvm::Triple::normalize(CfgFileArch));
+ if (CfgTriple.getArch() == llvm::Triple::ArchType::UnknownArch)
+ ArchPrefixLen = 0;
+
+ if (!StringRef(CfgFileName).endswith(".cfg"))
+ CfgFileName += ".cfg";
+
+ // If config file starts with architecture name and command line options
+ // redefine architecture (with options like -m32 -LE etc), try finding new
+ // config file with that architecture.
+ SmallString<128> FixedConfigFile;
+ size_t FixedArchPrefixLen = 0;
+ if (ArchPrefixLen) {
+ // Get architecture name from config file name like 'i386.cfg' or
+ // 'armv7l-clang.cfg'.
+ // Check if command line options changes effective triple.
+ llvm::Triple EffectiveTriple = computeTargetTriple(*this,
+ CfgTriple.getTriple(), *CLOptions);
+ if (CfgTriple.getArch() != EffectiveTriple.getArch()) {
+ FixedConfigFile = EffectiveTriple.getArchName();
+ FixedArchPrefixLen = FixedConfigFile.size();
+ // Append the rest of original file name so that file name transforms
+ // like: i386-clang.cfg -> x86_64-clang.cfg.
+ if (ArchPrefixLen < CfgFileName.size())
+ FixedConfigFile += CfgFileName.substr(ArchPrefixLen);
+ }
+ }
+
+ // Prepare list of directories where config file is searched for.
+ SmallVector<std::string, 3> CfgFileSearchDirs;
+ CfgFileSearchDirs.push_back(UserConfigDir);
+ CfgFileSearchDirs.push_back(SystemConfigDir);
+ CfgFileSearchDirs.push_back(Dir);
+
+ // Try to find config file. First try file with corrected architecture.
+ llvm::SmallString<128> CfgFilePath;
+ if (!FixedConfigFile.empty()) {
+ if (searchForFile(CfgFilePath, CfgFileSearchDirs, FixedConfigFile))
+ return readConfigFile(CfgFilePath);
+ // If 'x86_64-clang.cfg' was not found, try 'x86_64.cfg'.
+ FixedConfigFile.resize(FixedArchPrefixLen);
+ FixedConfigFile.append(".cfg");
+ if (searchForFile(CfgFilePath, CfgFileSearchDirs, FixedConfigFile))
+ return readConfigFile(CfgFilePath);
+ }
+
+ // Then try original file name.
+ if (searchForFile(CfgFilePath, CfgFileSearchDirs, CfgFileName))
+ return readConfigFile(CfgFilePath);
+
+ // Finally try removing driver mode part: 'x86_64-clang.cfg' -> 'x86_64.cfg'.
+ if (!ClangNameParts.ModeSuffix.empty() &&
+ !ClangNameParts.TargetPrefix.empty()) {
+ CfgFileName.assign(ClangNameParts.TargetPrefix);
+ CfgFileName.append(".cfg");
+ if (searchForFile(CfgFilePath, CfgFileSearchDirs, CfgFileName))
+ return readConfigFile(CfgFilePath);
+ }
+
+ // Report error but only if config file was specified explicitly, by option
+ // --config. If it was deduced from executable name, it is not an error.
+ if (FileSpecifiedExplicitly) {
+ Diag(diag::err_drv_config_file_not_found) << CfgFileName;
+ for (const std::string &SearchDir : CfgFileSearchDirs)
+ if (!SearchDir.empty())
+ Diag(diag::note_drv_config_file_searched_in) << SearchDir;
+ return true;
+ }
+
+ return false;
+}
+
Compilation *Driver::BuildCompilation(ArrayRef<const char *> ArgList) {
llvm::PrettyStackTraceString CrashInfo("Compilation construction");
@@ -623,12 +895,38 @@ Compilation *Driver::BuildCompilation(ArrayRef<const char *> ArgList) {
// FIXME: What are we going to do with -V and -b?
+ // Arguments specified in command line.
+ bool ContainsError;
+ CLOptions = llvm::make_unique<InputArgList>(
+ ParseArgStrings(ArgList.slice(1), ContainsError));
+
+ // Try parsing configuration file.
+ if (!ContainsError)
+ ContainsError = loadConfigFile();
+ bool HasConfigFile = !ContainsError && (CfgOptions.get() != nullptr);
+
+ // All arguments, from both config file and command line.
+ InputArgList Args = std::move(HasConfigFile ? std::move(*CfgOptions)
+ : std::move(*CLOptions));
+ if (HasConfigFile)
+ for (auto *Opt : *CLOptions) {
+ if (Opt->getOption().matches(options::OPT_config))
+ continue;
+ unsigned Index = Args.MakeIndex(Opt->getSpelling());
+ const Arg *BaseArg = &Opt->getBaseArg();
+ if (BaseArg == Opt)
+ BaseArg = nullptr;
+ Arg *Copy = new llvm::opt::Arg(Opt->getOption(), Opt->getSpelling(),
+ Index, BaseArg);
+ Copy->getValues() = Opt->getValues();
+ if (Opt->isClaimed())
+ Copy->claim();
+ Args.append(Copy);
+ }
+
// FIXME: This stuff needs to go into the Compilation, not the driver.
bool CCCPrintPhases;
- bool ContainsError;
- InputArgList Args = ParseArgStrings(ArgList.slice(1), ContainsError);
-
// Silence driver warnings if requested
Diags.setIgnoreAllWarnings(Args.hasArg(options::OPT_w));
@@ -653,19 +951,19 @@ Compilation *Driver::BuildCompilation(ArrayRef<const char *> ArgList) {
GenReproducer = Args.hasFlag(options::OPT_gen_reproducer,
options::OPT_fno_crash_diagnostics,
!!::getenv("FORCE_CLANG_DIAGNOSTICS_CRASH"));
- // FIXME: DefaultTargetTriple is used by the target-prefixed calls to as/ld
+ // FIXME: TargetTriple is used by the target-prefixed calls to as/ld
// and getToolChain is const.
if (IsCLMode()) {
// clang-cl targets MSVC-style Win32.
- llvm::Triple T(DefaultTargetTriple);
+ llvm::Triple T(TargetTriple);
T.setOS(llvm::Triple::Win32);
T.setVendor(llvm::Triple::PC);
T.setEnvironment(llvm::Triple::MSVC);
T.setObjectFormat(llvm::Triple::COFF);
- DefaultTargetTriple = T.str();
+ TargetTriple = T.str();
}
if (const Arg *A = Args.getLastArg(options::OPT_target))
- DefaultTargetTriple = A->getValue();
+ TargetTriple = A->getValue();
if (const Arg *A = Args.getLastArg(options::OPT_ccc_install_dir))
Dir = InstalledDir = A->getValue();
for (const Arg *A : Args.filtered(options::OPT_B)) {
@@ -713,7 +1011,7 @@ Compilation *Driver::BuildCompilation(ArrayRef<const char *> ArgList) {
// Owned by the host.
const ToolChain &TC = getToolChain(
- *UArgs, computeTargetTriple(*this, DefaultTargetTriple, *UArgs));
+ *UArgs, computeTargetTriple(*this, TargetTriple, *UArgs));
// The compilation takes ownership of Args.
Compilation *C = new Compilation(*this, TC, UArgs.release(), TranslatedArgs,
@@ -851,8 +1149,9 @@ bool Driver::getCrashDiagnosticFile(StringRef ReproCrashFilename,
// When clang crashes, produce diagnostic information including the fully
// preprocessed source file(s). Request that the developer attach the
// diagnostic information to a bug report.
-void Driver::generateCompilationDiagnostics(Compilation &C,
- const Command &FailingCommand) {
+void Driver::generateCompilationDiagnostics(
+ Compilation &C, const Command &FailingCommand,
+ StringRef AdditionalInformation, CompilationDiagnosticReport *Report) {
if (C.getArgs().hasArg(options::OPT_fno_crash_diagnostics))
return;
@@ -954,9 +1253,6 @@ void Driver::generateCompilationDiagnostics(Compilation &C,
// If any of the preprocessing commands failed, clean up and exit.
if (!FailingCommands.empty()) {
- if (!isSaveTempsEnabled())
- C.CleanupFileList(C.getTempFiles(), true);
-
Diag(clang::diag::note_drv_command_failed_diag_msg)
<< "Error generating preprocessed source(s).";
return;
@@ -978,6 +1274,8 @@ void Driver::generateCompilationDiagnostics(Compilation &C,
SmallString<128> ReproCrashFilename;
for (const char *TempFile : TempFiles) {
Diag(clang::diag::note_drv_command_failed_diag_msg) << TempFile;
+ if (Report)
+ Report->TemporaryFiles.push_back(TempFile);
if (ReproCrashFilename.empty()) {
ReproCrashFilename = TempFile;
llvm::sys::path::replace_extension(ReproCrashFilename, ".crash");
@@ -993,12 +1291,13 @@ void Driver::generateCompilationDiagnostics(Compilation &C,
// Assume associated files are based off of the first temporary file.
CrashReportInfo CrashInfo(TempFiles[0], VFS);
- std::string Script = CrashInfo.Filename.rsplit('.').first.str() + ".sh";
+ llvm::SmallString<128> Script(CrashInfo.Filename);
+ llvm::sys::path::replace_extension(Script, "sh");
std::error_code EC;
- llvm::raw_fd_ostream ScriptOS(Script, EC, llvm::sys::fs::F_Excl);
+ llvm::raw_fd_ostream ScriptOS(Script, EC, llvm::sys::fs::CD_CreateNew);
if (EC) {
Diag(clang::diag::note_drv_command_failed_diag_msg)
- << "Error generating run script: " + Script + " " + EC.message();
+ << "Error generating run script: " << Script << " " << EC.message();
} else {
ScriptOS << "# Crash reproducer for " << getClangFullVersion() << "\n"
<< "# Driver args: ";
@@ -1006,6 +1305,11 @@ void Driver::generateCompilationDiagnostics(Compilation &C,
ScriptOS << "# Original command: ";
Cmd.Print(ScriptOS, "\n", /*Quote=*/true);
Cmd.Print(ScriptOS, "\n", /*Quote=*/true, &CrashInfo);
+ if (!AdditionalInformation.empty())
+ ScriptOS << "\n# Additional information: " << AdditionalInformation
+ << "\n";
+ if (Report)
+ Report->TemporaryFiles.push_back(Script.str());
Diag(clang::diag::note_drv_command_failed_diag_msg) << Script;
}
@@ -1066,9 +1370,6 @@ int Driver::ExecuteCompilation(
C.ExecuteJobs(C.getJobs(), FailingCommands);
- // Remove temp files.
- C.CleanupFileList(C.getTempFiles());
-
// If the command succeeded, we are done.
if (FailingCommands.empty())
return 0;
@@ -1144,6 +1445,10 @@ void Driver::PrintVersion(const Compilation &C, raw_ostream &OS) const {
// Print out the install directory.
OS << "InstalledDir: " << InstalledDir << '\n';
+
+ // If configuration file was used, print its path.
+ if (!ConfigFile.empty())
+ OS << "Configuration file: " << ConfigFile << '\n';
}
/// PrintDiagnosticCategories - Implement the --print-diagnostic-categories
@@ -1155,53 +1460,66 @@ static void PrintDiagnosticCategories(raw_ostream &OS) {
OS << i << ',' << DiagnosticIDs::getCategoryNameFromID(i) << '\n';
}
-void Driver::handleAutocompletions(StringRef PassedFlags) const {
+void Driver::HandleAutocompletions(StringRef PassedFlags) const {
+ if (PassedFlags == "")
+ return;
// Print out all options that start with a given argument. This is used for
// shell autocompletion.
std::vector<std::string> SuggestedCompletions;
+ std::vector<std::string> Flags;
unsigned short DisableFlags =
options::NoDriverOption | options::Unsupported | options::Ignored;
- // We want to show cc1-only options only when clang is invoked as "clang
- // -cc1". When clang is invoked as "clang -cc1", we add "#" to the beginning
- // of an --autocomplete option so that the clang driver can distinguish
- // whether it is requested to show cc1-only options or not.
- if (PassedFlags.size() > 0 && PassedFlags[0] == '#') {
+
+ // Parse PassedFlags by "," as all the command-line flags are passed to this
+ // function separated by ","
+ StringRef TargetFlags = PassedFlags;
+ while (TargetFlags != "") {
+ StringRef CurFlag;
+ std::tie(CurFlag, TargetFlags) = TargetFlags.split(",");
+ Flags.push_back(std::string(CurFlag));
+ }
+
+ // We want to show cc1-only options only when clang is invoked with -cc1 or
+ // -Xclang.
+ if (std::find(Flags.begin(), Flags.end(), "-Xclang") != Flags.end() ||
+ std::find(Flags.begin(), Flags.end(), "-cc1") != Flags.end())
DisableFlags &= ~options::NoDriverOption;
- PassedFlags = PassedFlags.substr(1);
+
+ StringRef Cur;
+ Cur = Flags.at(Flags.size() - 1);
+ StringRef Prev;
+ if (Flags.size() >= 2) {
+ Prev = Flags.at(Flags.size() - 2);
+ SuggestedCompletions = Opts->suggestValueCompletions(Prev, Cur);
}
- if (PassedFlags.find(',') == StringRef::npos) {
+ if (SuggestedCompletions.empty())
+ SuggestedCompletions = Opts->suggestValueCompletions(Cur, "");
+
+ if (SuggestedCompletions.empty()) {
// If the flag is in the form of "--autocomplete=-foo",
// we were requested to print out all option names that start with "-foo".
// For example, "--autocomplete=-fsyn" is expanded to "-fsyntax-only".
- SuggestedCompletions = Opts->findByPrefix(PassedFlags, DisableFlags);
+ SuggestedCompletions = Opts->findByPrefix(Cur, DisableFlags);
// We have to query the -W flags manually as they're not in the OptTable.
// TODO: Find a good way to add them to OptTable instead and them remove
// this code.
for (StringRef S : DiagnosticIDs::getDiagnosticFlags())
- if (S.startswith(PassedFlags))
+ if (S.startswith(Cur))
SuggestedCompletions.push_back(S);
- } else {
- // If the flag is in the form of "--autocomplete=foo,bar", we were
- // requested to print out all option values for "-foo" that start with
- // "bar". For example,
- // "--autocomplete=-stdlib=,l" is expanded to "libc++" and "libstdc++".
- StringRef Option, Arg;
- std::tie(Option, Arg) = PassedFlags.split(',');
- SuggestedCompletions = Opts->suggestValueCompletions(Option, Arg);
}
// Sort the autocomplete candidates so that shells print them out in a
// deterministic order. We could sort in any way, but we chose
// case-insensitive sorting for consistency with the -help option
// which prints out options in the case-insensitive alphabetical order.
- std::sort(SuggestedCompletions.begin(), SuggestedCompletions.end(),
- [](StringRef A, StringRef B) {
- if (int X = A.compare_lower(B))
- return X < 0;
- return A.compare(B) > 0;
+ llvm::sort(SuggestedCompletions.begin(), SuggestedCompletions.end(),
+ [](StringRef A, StringRef B) {
+ if (int X = A.compare_lower(B))
+ return X < 0;
+ return A.compare(B) > 0;
});
llvm::outs() << llvm::join(SuggestedCompletions, "\n") << '\n';
@@ -1250,6 +1568,15 @@ bool Driver::HandleImmediateArgs(const Compilation &C) {
SuppressMissingInputWarning = true;
}
+ if (C.getArgs().hasArg(options::OPT_v)) {
+ if (!SystemConfigDir.empty())
+ llvm::errs() << "System configuration file directory: "
+ << SystemConfigDir << "\n";
+ if (!UserConfigDir.empty())
+ llvm::errs() << "User configuration file directory: "
+ << UserConfigDir << "\n";
+ }
+
const ToolChain &TC = C.getDefaultToolChain();
if (C.getArgs().hasArg(options::OPT_v))
@@ -1295,13 +1622,19 @@ bool Driver::HandleImmediateArgs(const Compilation &C) {
}
if (Arg *A = C.getArgs().getLastArg(options::OPT_print_prog_name_EQ)) {
- llvm::outs() << GetProgramPath(A->getValue(), TC) << "\n";
+ StringRef ProgName = A->getValue();
+
+ // Null program name cannot have a path.
+ if (! ProgName.empty())
+ llvm::outs() << GetProgramPath(ProgName, TC);
+
+ llvm::outs() << "\n";
return false;
}
if (Arg *A = C.getArgs().getLastArg(options::OPT_autocomplete)) {
StringRef PassedFlags = A->getValue();
- handleAutocompletions(PassedFlags);
+ HandleAutocompletions(PassedFlags);
return false;
}
@@ -1427,7 +1760,7 @@ void Driver::PrintActions(const Compilation &C) const {
PrintActions1(C, A, Ids);
}
-/// \brief Check whether the given input tree contains any compilation or
+/// Check whether the given input tree contains any compilation or
/// assembly actions.
static bool ContainsCompileOrAssembleAction(const Action *A) {
if (isa<CompileJobAction>(A) || isa<BackendJobAction>(A) ||
@@ -1528,7 +1861,7 @@ void Driver::BuildUniversalActions(Compilation &C, const ToolChain &TC,
}
}
-/// \brief Check that the file referenced by Value exists. If it doesn't,
+/// Check that the file referenced by Value exists. If it doesn't,
/// issue a diagnostic and return false.
static bool DiagnoseInputExistence(const Driver &D, const DerivedArgList &Args,
StringRef Value, types::ID Ty) {
@@ -1823,9 +2156,10 @@ class OffloadingActionBuilder final {
}
};
- /// \brief CUDA action builder. It injects device code in the host backend
- /// action.
- class CudaActionBuilder final : public DeviceActionBuilder {
+ /// Base class for CUDA/HIP action builder. It injects device code in
+ /// the host backend action.
+ class CudaActionBuilderBase : public DeviceActionBuilder {
+ protected:
/// Flags to signal if the user requested host-only or device-only
/// compilation.
bool CompileHostOnly = false;
@@ -1842,115 +2176,11 @@ class OffloadingActionBuilder final {
/// Flag that is set to true if this builder acted on the current input.
bool IsActive = false;
-
public:
- CudaActionBuilder(Compilation &C, DerivedArgList &Args,
- const Driver::InputList &Inputs)
- : DeviceActionBuilder(C, Args, Inputs, Action::OFK_Cuda) {}
-
- ActionBuilderReturnCode
- getDeviceDependences(OffloadAction::DeviceDependences &DA,
- phases::ID CurPhase, phases::ID FinalPhase,
- PhasesTy &Phases) override {
- if (!IsActive)
- return ABRT_Inactive;
-
- // If we don't have more CUDA actions, we don't have any dependences to
- // create for the host.
- if (CudaDeviceActions.empty())
- return ABRT_Success;
-
- assert(CudaDeviceActions.size() == GpuArchList.size() &&
- "Expecting one action per GPU architecture.");
- assert(!CompileHostOnly &&
- "Not expecting CUDA actions in host-only compilation.");
-
- // If we are generating code for the device or we are in a backend phase,
- // we attempt to generate the fat binary. We compile each arch to ptx and
- // assemble to cubin, then feed the cubin *and* the ptx into a device
- // "link" action, which uses fatbinary to combine these cubins into one
- // fatbin. The fatbin is then an input to the host action if not in
- // device-only mode.
- if (CompileDeviceOnly || CurPhase == phases::Backend) {
- ActionList DeviceActions;
- for (unsigned I = 0, E = GpuArchList.size(); I != E; ++I) {
- // Produce the device action from the current phase up to the assemble
- // phase.
- for (auto Ph : Phases) {
- // Skip the phases that were already dealt with.
- if (Ph < CurPhase)
- continue;
- // We have to be consistent with the host final phase.
- if (Ph > FinalPhase)
- break;
-
- CudaDeviceActions[I] = C.getDriver().ConstructPhaseAction(
- C, Args, Ph, CudaDeviceActions[I]);
-
- if (Ph == phases::Assemble)
- break;
- }
-
- // If we didn't reach the assemble phase, we can't generate the fat
- // binary. We don't need to generate the fat binary if we are not in
- // device-only mode.
- if (!isa<AssembleJobAction>(CudaDeviceActions[I]) ||
- CompileDeviceOnly)
- continue;
-
- Action *AssembleAction = CudaDeviceActions[I];
- assert(AssembleAction->getType() == types::TY_Object);
- assert(AssembleAction->getInputs().size() == 1);
-
- Action *BackendAction = AssembleAction->getInputs()[0];
- assert(BackendAction->getType() == types::TY_PP_Asm);
-
- for (auto &A : {AssembleAction, BackendAction}) {
- OffloadAction::DeviceDependences DDep;
- DDep.add(*A, *ToolChains.front(), CudaArchToString(GpuArchList[I]),
- Action::OFK_Cuda);
- DeviceActions.push_back(
- C.MakeAction<OffloadAction>(DDep, A->getType()));
- }
- }
-
- // We generate the fat binary if we have device input actions.
- if (!DeviceActions.empty()) {
- CudaFatBinary =
- C.MakeAction<LinkJobAction>(DeviceActions, types::TY_CUDA_FATBIN);
-
- if (!CompileDeviceOnly) {
- DA.add(*CudaFatBinary, *ToolChains.front(), /*BoundArch=*/nullptr,
- Action::OFK_Cuda);
- // Clear the fat binary, it is already a dependence to an host
- // action.
- CudaFatBinary = nullptr;
- }
-
- // Remove the CUDA actions as they are already connected to an host
- // action or fat binary.
- CudaDeviceActions.clear();
- }
-
- // We avoid creating host action in device-only mode.
- return CompileDeviceOnly ? ABRT_Ignore_Host : ABRT_Success;
- } else if (CurPhase > phases::Backend) {
- // If we are past the backend phase and still have a device action, we
- // don't have to do anything as this action is already a device
- // top-level action.
- return ABRT_Success;
- }
-
- assert(CurPhase < phases::Backend && "Generating single CUDA "
- "instructions should only occur "
- "before the backend phase!");
-
- // By default, we produce an action for each device arch.
- for (Action *&A : CudaDeviceActions)
- A = C.getDriver().ConstructPhaseAction(C, Args, CurPhase, A);
-
- return ABRT_Success;
- }
+ CudaActionBuilderBase(Compilation &C, DerivedArgList &Args,
+ const Driver::InputList &Inputs,
+ Action::OffloadKind OFKind)
+ : DeviceActionBuilder(C, Args, Inputs, OFKind) {}
ActionBuilderReturnCode addDeviceDepences(Action *HostAction) override {
// While generating code for CUDA, we only depend on the host input action
@@ -1963,9 +2193,10 @@ class OffloadingActionBuilder final {
assert(!GpuArchList.empty() &&
"We should have at least one GPU architecture.");
- // If the host input is not CUDA, we don't need to bother about this
- // input.
- if (IA->getType() != types::TY_CUDA) {
+ // If the host input is not CUDA or HIP, we don't need to bother about
+ // this input.
+ if (IA->getType() != types::TY_CUDA &&
+ IA->getType() != types::TY_HIP) {
// The builder will ignore this input.
IsActive = false;
return ABRT_Inactive;
@@ -1978,10 +2209,24 @@ class OffloadingActionBuilder final {
return ABRT_Success;
// Replicate inputs for each GPU architecture.
- for (unsigned I = 0, E = GpuArchList.size(); I != E; ++I)
- CudaDeviceActions.push_back(C.MakeAction<InputAction>(
- IA->getInputArg(), types::TY_CUDA_DEVICE));
+ auto Ty = IA->getType() == types::TY_HIP ? types::TY_HIP_DEVICE
+ : types::TY_CUDA_DEVICE;
+ for (unsigned I = 0, E = GpuArchList.size(); I != E; ++I) {
+ CudaDeviceActions.push_back(
+ C.MakeAction<InputAction>(IA->getInputArg(), Ty));
+ }
+
+ return ABRT_Success;
+ }
+ // If this is an unbundling action use it as is for each CUDA toolchain.
+ if (auto *UA = dyn_cast<OffloadUnbundlingJobAction>(HostAction)) {
+ CudaDeviceActions.clear();
+ for (auto Arch : GpuArchList) {
+ CudaDeviceActions.push_back(UA);
+ UA->registerDependentActionInfo(ToolChains[0], CudaArchToString(Arch),
+ AssociatedOffloadKind);
+ }
return ABRT_Success;
}
@@ -1993,7 +2238,7 @@ class OffloadingActionBuilder final {
auto AddTopLevel = [&](Action *A, CudaArch BoundArch) {
OffloadAction::DeviceDependences Dep;
Dep.add(*A, *ToolChains.front(), CudaArchToString(BoundArch),
- Action::OFK_Cuda);
+ AssociatedOffloadKind);
AL.push_back(C.MakeAction<OffloadAction>(Dep, A->getType()));
};
@@ -2022,21 +2267,35 @@ class OffloadingActionBuilder final {
}
bool initialize() override {
+ assert(AssociatedOffloadKind == Action::OFK_Cuda ||
+ AssociatedOffloadKind == Action::OFK_HIP);
+
// We don't need to support CUDA.
- if (!C.hasOffloadToolChain<Action::OFK_Cuda>())
+ if (AssociatedOffloadKind == Action::OFK_Cuda &&
+ !C.hasOffloadToolChain<Action::OFK_Cuda>())
+ return false;
+
+ // We don't need to support HIP.
+ if (AssociatedOffloadKind == Action::OFK_HIP &&
+ !C.hasOffloadToolChain<Action::OFK_HIP>())
return false;
const ToolChain *HostTC = C.getSingleOffloadToolChain<Action::OFK_Host>();
assert(HostTC && "No toolchain for host compilation.");
- if (HostTC->getTriple().isNVPTX()) {
- // We do not support targeting NVPTX for host compilation. Throw
+ if (HostTC->getTriple().isNVPTX() ||
+ HostTC->getTriple().getArch() == llvm::Triple::amdgcn) {
+ // We do not support targeting NVPTX/AMDGCN for host compilation. Throw
// an error and abort pipeline construction early so we don't trip
// asserts that assume device-side compilation.
- C.getDriver().Diag(diag::err_drv_cuda_nvptx_host);
+ C.getDriver().Diag(diag::err_drv_cuda_host_arch)
+ << HostTC->getTriple().getArchName();
return true;
}
- ToolChains.push_back(C.getSingleOffloadToolChain<Action::OFK_Cuda>());
+ ToolChains.push_back(
+ AssociatedOffloadKind == Action::OFK_Cuda
+ ? C.getSingleOffloadToolChain<Action::OFK_Cuda>()
+ : C.getSingleOffloadToolChain<Action::OFK_HIP>());
Arg *PartialCompilationArg = Args.getLastArg(
options::OPT_cuda_host_only, options::OPT_cuda_device_only,
@@ -2089,6 +2348,187 @@ class OffloadingActionBuilder final {
}
};
+ /// \brief CUDA action builder. It injects device code in the host backend
+ /// action.
+ class CudaActionBuilder final : public CudaActionBuilderBase {
+ public:
+ CudaActionBuilder(Compilation &C, DerivedArgList &Args,
+ const Driver::InputList &Inputs)
+ : CudaActionBuilderBase(C, Args, Inputs, Action::OFK_Cuda) {}
+
+ ActionBuilderReturnCode
+ getDeviceDependences(OffloadAction::DeviceDependences &DA,
+ phases::ID CurPhase, phases::ID FinalPhase,
+ PhasesTy &Phases) override {
+ if (!IsActive)
+ return ABRT_Inactive;
+
+ // If we don't have more CUDA actions, we don't have any dependences to
+ // create for the host.
+ if (CudaDeviceActions.empty())
+ return ABRT_Success;
+
+ assert(CudaDeviceActions.size() == GpuArchList.size() &&
+ "Expecting one action per GPU architecture.");
+ assert(!CompileHostOnly &&
+ "Not expecting CUDA actions in host-only compilation.");
+
+ // If we are generating code for the device or we are in a backend phase,
+ // we attempt to generate the fat binary. We compile each arch to ptx and
+ // assemble to cubin, then feed the cubin *and* the ptx into a device
+ // "link" action, which uses fatbinary to combine these cubins into one
+ // fatbin. The fatbin is then an input to the host action if not in
+ // device-only mode.
+ if (CompileDeviceOnly || CurPhase == phases::Backend) {
+ ActionList DeviceActions;
+ for (unsigned I = 0, E = GpuArchList.size(); I != E; ++I) {
+ // Produce the device action from the current phase up to the assemble
+ // phase.
+ for (auto Ph : Phases) {
+ // Skip the phases that were already dealt with.
+ if (Ph < CurPhase)
+ continue;
+ // We have to be consistent with the host final phase.
+ if (Ph > FinalPhase)
+ break;
+
+ CudaDeviceActions[I] = C.getDriver().ConstructPhaseAction(
+ C, Args, Ph, CudaDeviceActions[I], Action::OFK_Cuda);
+
+ if (Ph == phases::Assemble)
+ break;
+ }
+
+ // If we didn't reach the assemble phase, we can't generate the fat
+ // binary. We don't need to generate the fat binary if we are not in
+ // device-only mode.
+ if (!isa<AssembleJobAction>(CudaDeviceActions[I]) ||
+ CompileDeviceOnly)
+ continue;
+
+ Action *AssembleAction = CudaDeviceActions[I];
+ assert(AssembleAction->getType() == types::TY_Object);
+ assert(AssembleAction->getInputs().size() == 1);
+
+ Action *BackendAction = AssembleAction->getInputs()[0];
+ assert(BackendAction->getType() == types::TY_PP_Asm);
+
+ for (auto &A : {AssembleAction, BackendAction}) {
+ OffloadAction::DeviceDependences DDep;
+ DDep.add(*A, *ToolChains.front(), CudaArchToString(GpuArchList[I]),
+ Action::OFK_Cuda);
+ DeviceActions.push_back(
+ C.MakeAction<OffloadAction>(DDep, A->getType()));
+ }
+ }
+
+ // We generate the fat binary if we have device input actions.
+ if (!DeviceActions.empty()) {
+ CudaFatBinary =
+ C.MakeAction<LinkJobAction>(DeviceActions, types::TY_CUDA_FATBIN);
+
+ if (!CompileDeviceOnly) {
+ DA.add(*CudaFatBinary, *ToolChains.front(), /*BoundArch=*/nullptr,
+ Action::OFK_Cuda);
+ // Clear the fat binary, it is already a dependence to an host
+ // action.
+ CudaFatBinary = nullptr;
+ }
+
+ // Remove the CUDA actions as they are already connected to an host
+ // action or fat binary.
+ CudaDeviceActions.clear();
+ }
+
+ // We avoid creating host action in device-only mode.
+ return CompileDeviceOnly ? ABRT_Ignore_Host : ABRT_Success;
+ } else if (CurPhase > phases::Backend) {
+ // If we are past the backend phase and still have a device action, we
+ // don't have to do anything as this action is already a device
+ // top-level action.
+ return ABRT_Success;
+ }
+
+ assert(CurPhase < phases::Backend && "Generating single CUDA "
+ "instructions should only occur "
+ "before the backend phase!");
+
+ // By default, we produce an action for each device arch.
+ for (Action *&A : CudaDeviceActions)
+ A = C.getDriver().ConstructPhaseAction(C, Args, CurPhase, A);
+
+ return ABRT_Success;
+ }
+ };
+ /// \brief HIP action builder. It injects device code in the host backend
+ /// action.
+ class HIPActionBuilder final : public CudaActionBuilderBase {
+ /// The linker inputs obtained for each device arch.
+ SmallVector<ActionList, 8> DeviceLinkerInputs;
+
+ public:
+ HIPActionBuilder(Compilation &C, DerivedArgList &Args,
+ const Driver::InputList &Inputs)
+ : CudaActionBuilderBase(C, Args, Inputs, Action::OFK_HIP) {}
+
+ bool canUseBundlerUnbundler() const override { return true; }
+
+ ActionBuilderReturnCode
+ getDeviceDependences(OffloadAction::DeviceDependences &DA,
+ phases::ID CurPhase, phases::ID FinalPhase,
+ PhasesTy &Phases) override {
+ // amdgcn does not support linking of object files, therefore we skip
+ // backend and assemble phases to output LLVM IR.
+ if (CudaDeviceActions.empty() || CurPhase == phases::Backend ||
+ CurPhase == phases::Assemble)
+ return ABRT_Success;
+
+ assert((CurPhase == phases::Link ||
+ CudaDeviceActions.size() == GpuArchList.size()) &&
+ "Expecting one action per GPU architecture.");
+ assert(!CompileHostOnly &&
+ "Not expecting CUDA actions in host-only compilation.");
+
+ // Save CudaDeviceActions to DeviceLinkerInputs for each GPU subarch.
+ // This happens to each device action originated from each input file.
+ // Later on, device actions in DeviceLinkerInputs are used to create
+ // device link actions in appendLinkDependences and the created device
+ // link actions are passed to the offload action as device dependence.
+ if (CurPhase == phases::Link) {
+ DeviceLinkerInputs.resize(CudaDeviceActions.size());
+ auto LI = DeviceLinkerInputs.begin();
+ for (auto *A : CudaDeviceActions) {
+ LI->push_back(A);
+ ++LI;
+ }
+
+ // We will pass the device action as a host dependence, so we don't
+ // need to do anything else with them.
+ CudaDeviceActions.clear();
+ return ABRT_Success;
+ }
+
+ // By default, we produce an action for each device arch.
+ for (Action *&A : CudaDeviceActions)
+ A = C.getDriver().ConstructPhaseAction(C, Args, CurPhase, A,
+ AssociatedOffloadKind);
+
+ return ABRT_Success;
+ }
+
+ void appendLinkDependences(OffloadAction::DeviceDependences &DA) override {
+ // Append a new link action for each device.
+ unsigned I = 0;
+ for (auto &LI : DeviceLinkerInputs) {
+ auto *DeviceLinkAction =
+ C.MakeAction<LinkJobAction>(LI, types::TY_Image);
+ DA.add(*DeviceLinkAction, *ToolChains[0],
+ CudaArchToString(GpuArchList[I]), AssociatedOffloadKind);
+ ++I;
+ }
+ }
+ };
+
/// OpenMP action builder. The host bitcode is passed to the device frontend
/// and all the device linked images are passed to the host link phase.
class OpenMPActionBuilder final : public DeviceActionBuilder {
@@ -2255,6 +2695,9 @@ public:
// Create a specialized builder for CUDA.
SpecializedBuilders.push_back(new CudaActionBuilder(C, Args, Inputs));
+ // Create a specialized builder for HIP.
+ SpecializedBuilders.push_back(new HIPActionBuilder(C, Args, Inputs));
+
// Create a specialized builder for OpenMP.
SpecializedBuilders.push_back(new OpenMPActionBuilder(C, Args, Inputs));
@@ -2549,22 +2992,6 @@ void Driver::BuildActions(Compilation &C, DerivedArgList &Args,
Args.eraseArg(options::OPT__SLASH_Yu);
YcArg = YuArg = nullptr;
}
- if (YcArg || YuArg) {
- StringRef Val = YcArg ? YcArg->getValue() : YuArg->getValue();
- bool FoundMatchingInclude = false;
- for (const Arg *Inc : Args.filtered(options::OPT_include)) {
- // FIXME: Do case-insensitive matching and consider / and \ as equal.
- if (Inc->getValue() == Val)
- FoundMatchingInclude = true;
- }
- if (!FoundMatchingInclude) {
- Diag(clang::diag::warn_drv_ycyu_no_fi_arg_clang_cl)
- << (YcArg ? YcArg : YuArg)->getSpelling();
- Args.eraseArg(options::OPT__SLASH_Yc);
- Args.eraseArg(options::OPT__SLASH_Yu);
- YcArg = YuArg = nullptr;
- }
- }
if (YcArg && Inputs.size() > 1) {
Diag(clang::diag::warn_drv_yc_multiple_inputs_clang_cl);
Args.eraseArg(options::OPT__SLASH_Yc);
@@ -2597,6 +3024,9 @@ void Driver::BuildActions(Compilation &C, DerivedArgList &Args,
// this compilation, warn the user about it.
phases::ID InitialPhase = PL[0];
if (InitialPhase > FinalPhase) {
+ if (InputArg->isClaimed())
+ continue;
+
// Claim here to avoid the more general unused warning.
InputArg->claim();
@@ -2631,11 +3061,9 @@ void Driver::BuildActions(Compilation &C, DerivedArgList &Args,
const types::ID HeaderType = lookupHeaderTypeForSourceType(InputType);
llvm::SmallVector<phases::ID, phases::MaxNumberOfPhases> PCHPL;
types::getCompilationPhases(HeaderType, PCHPL);
- Arg *PchInputArg = MakeInputArg(Args, *Opts, YcArg->getValue());
-
// Build the pipeline for the pch file.
Action *ClangClPch =
- C.MakeAction<InputAction>(*PchInputArg, HeaderType);
+ C.MakeAction<InputAction>(*InputArg, HeaderType);
for (phases::ID Phase : PCHPL)
ClangClPch = ConstructPhaseAction(C, Args, Phase, ClangClPch);
assert(ClangClPch);
@@ -2643,6 +3071,8 @@ void Driver::BuildActions(Compilation &C, DerivedArgList &Args,
// The driver currently exits after the first failed command. This
// relies on that behavior, to make sure if the pch generation fails,
// the main compilation won't run.
+ // FIXME: If the main compilation fails, the PCH generation should
+ // probably not be considered successful either.
}
}
@@ -2725,8 +3155,9 @@ void Driver::BuildActions(Compilation &C, DerivedArgList &Args,
Args.ClaimAllArgs(options::OPT_cuda_compile_host_device);
}
-Action *Driver::ConstructPhaseAction(Compilation &C, const ArgList &Args,
- phases::ID Phase, Action *Input) const {
+Action *Driver::ConstructPhaseAction(
+ Compilation &C, const ArgList &Args, phases::ID Phase, Action *Input,
+ Action::OffloadKind TargetDeviceOffloadKind) const {
llvm::PrettyStackTraceString CrashInfo("Constructing phase actions");
// Some types skip the assembler phase (e.g., llvm-bc), but we can't
@@ -2788,7 +3219,7 @@ Action *Driver::ConstructPhaseAction(Compilation &C, const ArgList &Args,
return C.MakeAction<CompileJobAction>(Input, types::TY_LLVM_BC);
}
case phases::Backend: {
- if (isUsingLTO()) {
+ if (isUsingLTO() && TargetDeviceOffloadKind == Action::OFK_None) {
types::ID Output =
Args.hasArg(options::OPT_S) ? types::TY_LTO_IR : types::TY_LTO_BC;
return C.MakeAction<BackendJobAction>(Input, Output);
@@ -3084,19 +3515,34 @@ class ToolSelector final {
const Tool *combineBackendCompile(ArrayRef<JobActionInfo> ActionInfo,
const ActionList *&Inputs,
ActionList &CollapsedOffloadAction) {
- if (ActionInfo.size() < 2 || !canCollapsePreprocessorAction())
+ if (ActionInfo.size() < 2)
return nullptr;
auto *BJ = dyn_cast<BackendJobAction>(ActionInfo[0].JA);
auto *CJ = dyn_cast<CompileJobAction>(ActionInfo[1].JA);
if (!BJ || !CJ)
return nullptr;
+ // Check if the initial input (to the compile job or its predessor if one
+ // exists) is LLVM bitcode. In that case, no preprocessor step is required
+ // and we can still collapse the compile and backend jobs when we have
+ // -save-temps. I.e. there is no need for a separate compile job just to
+ // emit unoptimized bitcode.
+ bool InputIsBitcode = true;
+ for (size_t i = 1; i < ActionInfo.size(); i++)
+ if (ActionInfo[i].JA->getType() != types::TY_LLVM_BC &&
+ ActionInfo[i].JA->getType() != types::TY_LTO_BC) {
+ InputIsBitcode = false;
+ break;
+ }
+ if (!InputIsBitcode && !canCollapsePreprocessorAction())
+ return nullptr;
+
// Get compiler tool.
const Tool *T = TC.SelectTool(*CJ);
if (!T)
return nullptr;
- if (T->canEmitIR() && (SaveTemps || EmbedBitcode))
+ if (T->canEmitIR() && ((SaveTemps && !InputIsBitcode) || EmbedBitcode))
return nullptr;
Inputs = &CJ->getInputs();
@@ -3312,7 +3758,7 @@ InputInfo Driver::BuildJobsForActionNoCache(
if (!ArchName.empty())
TC = &getToolChain(C.getArgs(),
- computeTargetTriple(*this, DefaultTargetTriple,
+ computeTargetTriple(*this, TargetTriple,
C.getArgs(), ArchName));
else
TC = &C.getDefaultToolChain();
@@ -3407,18 +3853,30 @@ InputInfo Driver::BuildJobsForActionNoCache(
UI.DependentToolChain->getTriple().normalize(),
/*CreatePrefixForHost=*/true);
auto CurI = InputInfo(
- UA, GetNamedOutputPath(C, *UA, BaseInput, UI.DependentBoundArch,
- /*AtTopLevel=*/false, MultipleArchs,
- OffloadingPrefix),
+ UA,
+ GetNamedOutputPath(C, *UA, BaseInput, UI.DependentBoundArch,
+ /*AtTopLevel=*/false,
+ MultipleArchs ||
+ UI.DependentOffloadKind == Action::OFK_HIP,
+ OffloadingPrefix),
BaseInput);
// Save the unbundling result.
UnbundlingResults.push_back(CurI);
// Get the unique string identifier for this dependence and cache the
// result.
- CachedResults[{A, GetTriplePlusArchString(
- UI.DependentToolChain, BoundArch,
- UI.DependentOffloadKind)}] = CurI;
+ StringRef Arch;
+ if (TargetDeviceOffloadKind == Action::OFK_HIP) {
+ if (UI.DependentOffloadKind == Action::OFK_Host)
+ Arch = StringRef();
+ else
+ Arch = UI.DependentBoundArch;
+ } else
+ Arch = BoundArch;
+
+ CachedResults[{A, GetTriplePlusArchString(UI.DependentToolChain, Arch,
+ UI.DependentOffloadKind)}] =
+ CurI;
}
// Now that we have all the results generated, select the one that should be
@@ -3478,11 +3936,11 @@ InputInfo Driver::BuildJobsForActionNoCache(
}
const char *Driver::getDefaultImageName() const {
- llvm::Triple Target(llvm::Triple::normalize(DefaultTargetTriple));
+ llvm::Triple Target(llvm::Triple::normalize(TargetTriple));
return Target.isOSWindows() ? "a.exe" : "a.out";
}
-/// \brief Create output filename based on ArgValue, which could either be a
+/// Create output filename based on ArgValue, which could either be a
/// full filename, filename without extension, or a directory. If ArgValue
/// does not provide a filename, then use BaseName, and use the extension
/// suitable for FileType.
@@ -3540,8 +3998,7 @@ const char *Driver::GetNamedOutputPath(Compilation &C, const JobAction &JA,
}
// Default to writing to stdout?
- if (AtTopLevel && !CCGenDiagnostics &&
- (isa<PreprocessJobAction>(JA) || JA.getType() == types::TY_ModuleFile))
+ if (AtTopLevel && !CCGenDiagnostics && isa<PreprocessJobAction>(JA))
return "-";
// Is this the assembly listing for /FA?
@@ -3562,8 +4019,22 @@ const char *Driver::GetNamedOutputPath(Compilation &C, const JobAction &JA,
CCGenDiagnostics) {
StringRef Name = llvm::sys::path::filename(BaseInput);
std::pair<StringRef, StringRef> Split = Name.split('.');
- std::string TmpName = GetTemporaryPath(
- Split.first, types::getTypeTempSuffix(JA.getType(), IsCLMode()));
+ SmallString<128> TmpName;
+ const char *Suffix = types::getTypeTempSuffix(JA.getType(), IsCLMode());
+ Arg *A = C.getArgs().getLastArg(options::OPT_fcrash_diagnostics_dir);
+ if (CCGenDiagnostics && A) {
+ SmallString<128> CrashDirectory(A->getValue());
+ llvm::sys::path::append(CrashDirectory, Split.first);
+ const char *Middle = Suffix ? "-%%%%%%." : "-%%%%%%";
+ std::error_code EC =
+ llvm::sys::fs::createUniqueFile(CrashDirectory + Middle + Suffix, TmpName);
+ if (EC) {
+ Diag(clang::diag::err_unable_to_make_temp) << EC.message();
+ return "";
+ }
+ } else {
+ TmpName = GetTemporaryPath(Split.first, Suffix);
+ }
return C.addTempFile(C.getArgs().MakeArgString(TmpName));
}
@@ -3717,14 +4188,14 @@ std::string Driver::GetFilePath(StringRef Name, const ToolChain &TC) const {
void Driver::generatePrefixedToolNames(
StringRef Tool, const ToolChain &TC,
SmallVectorImpl<std::string> &Names) const {
- // FIXME: Needs a better variable than DefaultTargetTriple
- Names.emplace_back((DefaultTargetTriple + "-" + Tool).str());
+ // FIXME: Needs a better variable than TargetTriple
+ Names.emplace_back((TargetTriple + "-" + Tool).str());
Names.emplace_back(Tool);
// Allow the discovery of tools prefixed with LLVM's default target triple.
- std::string LLVMDefaultTargetTriple = llvm::sys::getDefaultTargetTriple();
- if (LLVMDefaultTargetTriple != DefaultTargetTriple)
- Names.emplace_back((LLVMDefaultTargetTriple + "-" + Tool).str());
+ std::string DefaultTargetTriple = llvm::sys::getDefaultTargetTriple();
+ if (DefaultTargetTriple != TargetTriple)
+ Names.emplace_back((DefaultTargetTriple + "-" + Tool).str());
}
static bool ScanDirForExecutable(SmallString<128> &Dir,
@@ -3795,6 +4266,9 @@ std::string Driver::GetClPchPath(Compilation &C, StringRef BaseName) const {
// extension of .pch is assumed. "
if (!llvm::sys::path::has_extension(Output))
Output += ".pch";
+ } else if (Arg *YcArg = C.getArgs().getLastArg(options::OPT__SLASH_Yc)) {
+ Output = YcArg->getValue();
+ llvm::sys::path::replace_extension(Output, ".pch");
} else {
Output = BaseName;
llvm::sys::path::replace_extension(Output, ".pch");
diff --git a/lib/Driver/Job.cpp b/lib/Driver/Job.cpp
index 765c05752d8f..bd1a9bd8e3eb 100644
--- a/lib/Driver/Job.cpp
+++ b/lib/Driver/Job.cpp
@@ -1,4 +1,4 @@
-//===--- Job.cpp - Command to Execute -------------------------------------===//
+//===- Job.cpp - Command to Execute ---------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -9,14 +9,14 @@
#include "clang/Driver/Job.h"
#include "InputInfo.h"
+#include "clang/Basic/LLVM.h"
#include "clang/Driver/Driver.h"
#include "clang/Driver/DriverDiagnostic.h"
#include "clang/Driver/Tool.h"
#include "clang/Driver/ToolChain.h"
#include "llvm/ADT/ArrayRef.h"
-#include "llvm/ADT/Optional.h"
-#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSet.h"
#include "llvm/ADT/StringSwitch.h"
@@ -24,23 +24,27 @@
#include "llvm/Support/Path.h"
#include "llvm/Support/Program.h"
#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
#include <cassert>
-using namespace clang::driver;
-using llvm::raw_ostream;
-using llvm::StringRef;
-using llvm::ArrayRef;
+#include <cstddef>
+#include <string>
+#include <system_error>
+#include <utility>
+
+using namespace clang;
+using namespace driver;
Command::Command(const Action &Source, const Tool &Creator,
const char *Executable, const ArgStringList &Arguments,
ArrayRef<InputInfo> Inputs)
: Source(Source), Creator(Creator), Executable(Executable),
- Arguments(Arguments), ResponseFile(nullptr) {
+ Arguments(Arguments) {
for (const auto &II : Inputs)
if (II.isFilename())
InputFilenames.push_back(II.getFilename());
}
-/// @brief Check if the compiler flag in question should be skipped when
+/// Check if the compiler flag in question should be skipped when
/// emitting a reproducer. Also track how many arguments it has and if the
/// option is some kind of include path.
static bool skipArgs(const char *Flag, bool HaveCrashVFS, int &SkipNum,
@@ -67,7 +71,7 @@ static bool skipArgs(const char *Flag, bool HaveCrashVFS, int &SkipNum,
.Cases("-iframework", "-include-pch", true)
.Default(false);
if (IsInclude)
- return HaveCrashVFS ? false : true;
+ return !HaveCrashVFS;
// The remaining flags are treated as a single argument.
@@ -86,7 +90,7 @@ static bool skipArgs(const char *Flag, bool HaveCrashVFS, int &SkipNum,
StringRef FlagRef(Flag);
IsInclude = FlagRef.startswith("-F") || FlagRef.startswith("-I");
if (IsInclude)
- return HaveCrashVFS ? false : true;
+ return !HaveCrashVFS;
if (FlagRef.startswith("-fmodules-cache-path="))
return true;
@@ -104,7 +108,7 @@ void Command::printArg(raw_ostream &OS, StringRef Arg, bool Quote) {
// Quote and escape. This isn't really complete, but good enough.
OS << '"';
- for (const char c : Arg) {
+ for (const auto c : Arg) {
if (c == '"' || c == '\\' || c == '$')
OS << '\\';
OS << c;
@@ -115,7 +119,7 @@ void Command::printArg(raw_ostream &OS, StringRef Arg, bool Quote) {
void Command::writeResponseFile(raw_ostream &OS) const {
// In a file list, we only write the set of inputs to the response file
if (Creator.getResponseFilesSupport() == Tool::RF_FileList) {
- for (const char *Arg : InputFileList) {
+ for (const auto *Arg : InputFileList) {
OS << Arg << '\n';
}
return;
@@ -124,7 +128,7 @@ void Command::writeResponseFile(raw_ostream &OS) const {
// In regular response files, we send all arguments to the response file.
// Wrapping all arguments in double quotes ensures that both Unix tools and
// Windows tools understand the response file.
- for (const char *Arg : Arguments) {
+ for (const auto *Arg : Arguments) {
OS << '"';
for (; *Arg != '\0'; Arg++) {
@@ -150,13 +154,13 @@ void Command::buildArgvForResponseFile(
}
llvm::StringSet<> Inputs;
- for (const char *InputName : InputFileList)
+ for (const auto *InputName : InputFileList)
Inputs.insert(InputName);
Out.push_back(Executable);
// In a file list, build args vector ignoring parameters that will go in the
// response file (elements of the InputFileList vector)
bool FirstInput = true;
- for (const char *Arg : Arguments) {
+ for (const auto *Arg : Arguments) {
if (Inputs.count(Arg) == 0) {
Out.push_back(Arg);
} else if (FirstInput) {
@@ -167,13 +171,14 @@ void Command::buildArgvForResponseFile(
}
}
-/// @brief Rewrite relative include-like flag paths to absolute ones.
+/// Rewrite relative include-like flag paths to absolute ones.
static void
rewriteIncludes(const llvm::ArrayRef<const char *> &Args, size_t Idx,
size_t NumArgs,
llvm::SmallVectorImpl<llvm::SmallString<128>> &IncFlags) {
using namespace llvm;
using namespace sys;
+
auto getAbsPath = [](StringRef InInc, SmallVectorImpl<char> &OutInc) -> bool {
if (path::is_absolute(InInc)) // Nothing to do here...
return false;
@@ -212,8 +217,8 @@ void Command::Print(raw_ostream &OS, const char *Terminator, bool Quote,
OS << ' ';
printArg(OS, Executable, /*Quote=*/true);
- llvm::ArrayRef<const char *> Args = Arguments;
- llvm::SmallVector<const char *, 128> ArgsRespFile;
+ ArrayRef<const char *> Args = Arguments;
+ SmallVector<const char *, 128> ArgsRespFile;
if (ResponseFile != nullptr) {
buildArgvForResponseFile(ArgsRespFile);
Args = ArrayRef<const char *>(ArgsRespFile).slice(1); // no executable name
@@ -312,13 +317,13 @@ int Command::Execute(ArrayRef<llvm::Optional<StringRef>> Redirects,
std::string *ErrMsg, bool *ExecutionFailed) const {
SmallVector<const char*, 128> Argv;
- const char **Envp;
- if (Environment.empty()) {
- Envp = nullptr;
- } else {
+ Optional<ArrayRef<StringRef>> Env;
+ std::vector<StringRef> ArgvVectorStorage;
+ if (!Environment.empty()) {
assert(Environment.back() == nullptr &&
"Environment vector should be null-terminated by now");
- Envp = const_cast<const char **>(Environment.data());
+ ArgvVectorStorage = llvm::toStringRefArray(Environment.data());
+ Env = makeArrayRef(ArgvVectorStorage);
}
if (ResponseFile == nullptr) {
@@ -326,8 +331,9 @@ int Command::Execute(ArrayRef<llvm::Optional<StringRef>> Redirects,
Argv.append(Arguments.begin(), Arguments.end());
Argv.push_back(nullptr);
+ auto Args = llvm::toStringRefArray(Argv.data());
return llvm::sys::ExecuteAndWait(
- Executable, Argv.data(), Envp, Redirects, /*secondsToWait*/ 0,
+ Executable, Args, Env, Redirects, /*secondsToWait*/ 0,
/*memoryLimit*/ 0, ErrMsg, ExecutionFailed);
}
@@ -352,7 +358,8 @@ int Command::Execute(ArrayRef<llvm::Optional<StringRef>> Redirects,
return -1;
}
- return llvm::sys::ExecuteAndWait(Executable, Argv.data(), Envp, Redirects,
+ auto Args = llvm::toStringRefArray(Argv.data());
+ return llvm::sys::ExecuteAndWait(Executable, Args, Env, Redirects,
/*secondsToWait*/ 0,
/*memoryLimit*/ 0, ErrMsg, ExecutionFailed);
}
diff --git a/lib/Driver/Multilib.cpp b/lib/Driver/Multilib.cpp
index 16a81603b31e..178a60db60e5 100644
--- a/lib/Driver/Multilib.cpp
+++ b/lib/Driver/Multilib.cpp
@@ -1,4 +1,4 @@
-//===--- Multilib.cpp - Multilib Implementation ---------------------------===//
+//===- Multilib.cpp - Multilib Implementation -----------------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -8,25 +8,22 @@
//===----------------------------------------------------------------------===//
#include "clang/Driver/Multilib.h"
-#include "ToolChains/CommonArgs.h"
-#include "clang/Driver/Options.h"
+#include "clang/Basic/LLVM.h"
+#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSet.h"
-#include "llvm/Option/Arg.h"
-#include "llvm/Option/ArgList.h"
-#include "llvm/Option/OptTable.h"
-#include "llvm/Option/Option.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/Regex.h"
-#include "llvm/Support/YAMLParser.h"
-#include "llvm/Support/YAMLTraits.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
+#include <cassert>
+#include <string>
-using namespace clang::driver;
using namespace clang;
-using namespace llvm::opt;
+using namespace driver;
using namespace llvm::sys;
/// normalize Segment to "/foo/bar" or "".
@@ -34,7 +31,7 @@ static void normalizePathSegment(std::string &Segment) {
StringRef seg = Segment;
// Prune trailing "/" or "./"
- while (1) {
+ while (true) {
StringRef last = path::filename(seg);
if (last != ".")
break;
@@ -42,7 +39,7 @@ static void normalizePathSegment(std::string &Segment) {
}
if (seg.empty() || seg == "/") {
- Segment = "";
+ Segment.clear();
return;
}
@@ -198,8 +195,8 @@ MultilibSet &MultilibSet::Either(ArrayRef<Multilib> MultilibSegments) {
Multilibs.insert(Multilibs.end(), MultilibSegments.begin(),
MultilibSegments.end());
else {
- for (const Multilib &New : MultilibSegments) {
- for (const Multilib &Base : *this) {
+ for (const auto &New : MultilibSegments) {
+ for (const auto &Base : *this) {
Multilib MO = compose(Base, New);
if (MO.isValid())
Composed.push_back(MO);
@@ -262,7 +259,7 @@ bool MultilibSet::select(const Multilib::flags_list &Flags, Multilib &M) const {
return false;
}, Multilibs);
- if (Filtered.size() == 0)
+ if (Filtered.empty())
return false;
if (Filtered.size() == 1) {
M = Filtered[0];
@@ -279,7 +276,7 @@ LLVM_DUMP_METHOD void MultilibSet::dump() const {
}
void MultilibSet::print(raw_ostream &OS) const {
- for (const Multilib &M : *this)
+ for (const auto &M : *this)
OS << M << "\n";
}
diff --git a/lib/Driver/SanitizerArgs.cpp b/lib/Driver/SanitizerArgs.cpp
index 3c985a1f71d7..bdc17d11c92b 100644
--- a/lib/Driver/SanitizerArgs.cpp
+++ b/lib/Driver/SanitizerArgs.cpp
@@ -18,6 +18,7 @@
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/SpecialCaseList.h"
+#include "llvm/Support/TargetParser.h"
#include <memory>
using namespace clang;
@@ -30,20 +31,22 @@ enum : SanitizerMask {
NeedsUbsanCxxRt = Vptr | CFI,
NotAllowedWithTrap = Vptr,
NotAllowedWithMinimalRuntime = Vptr,
- RequiresPIE = DataFlow | Scudo,
+ RequiresPIE = DataFlow | HWAddress | Scudo,
NeedsUnwindTables = Address | HWAddress | Thread | Memory | DataFlow,
- SupportsCoverage = Address | HWAddress | KernelAddress | Memory | Leak |
- Undefined | Integer | Nullability | DataFlow | Fuzzer |
- FuzzerNoLink,
+ SupportsCoverage = Address | HWAddress | KernelAddress | KernelHWAddress |
+ Memory | Leak | Undefined | Integer | Nullability |
+ DataFlow | Fuzzer | FuzzerNoLink,
RecoverableByDefault = Undefined | Integer | Nullability,
Unrecoverable = Unreachable | Return,
+ AlwaysRecoverable = KernelAddress | KernelHWAddress,
LegacyFsanitizeRecoverMask = Undefined | Integer,
NeedsLTO = CFI,
TrappingSupported = (Undefined & ~Vptr) | UnsignedIntegerOverflow |
Nullability | LocalBounds | CFI,
TrappingDefault = CFI,
- CFIClasses = CFIVCall | CFINVCall | CFIDerivedCast | CFIUnrelatedCast,
- CompatibleWithMinimalRuntime = TrappingSupported,
+ CFIClasses =
+ CFIVCall | CFINVCall | CFIMFCall | CFIDerivedCast | CFIUnrelatedCast,
+ CompatibleWithMinimalRuntime = TrappingSupported | Scudo,
};
enum CoverageFeature {
@@ -92,31 +95,32 @@ static std::string describeSanitizeArg(const llvm::opt::Arg *A,
/// Sanitizers set.
static std::string toString(const clang::SanitizerSet &Sanitizers);
-static bool getDefaultBlacklist(const Driver &D, SanitizerMask Kinds,
- std::string &BLPath) {
- const char *BlacklistFile = nullptr;
- if (Kinds & Address)
- BlacklistFile = "asan_blacklist.txt";
- else if (Kinds & HWAddress)
- BlacklistFile = "hwasan_blacklist.txt";
- else if (Kinds & Memory)
- BlacklistFile = "msan_blacklist.txt";
- else if (Kinds & Thread)
- BlacklistFile = "tsan_blacklist.txt";
- else if (Kinds & DataFlow)
- BlacklistFile = "dfsan_abilist.txt";
- else if (Kinds & CFI)
- BlacklistFile = "cfi_blacklist.txt";
- else if (Kinds & (Undefined | Integer | Nullability))
- BlacklistFile = "ubsan_blacklist.txt";
-
- if (BlacklistFile) {
+static void addDefaultBlacklists(const Driver &D, SanitizerMask Kinds,
+ std::vector<std::string> &BlacklistFiles) {
+ struct Blacklist {
+ const char *File;
+ SanitizerMask Mask;
+ } Blacklists[] = {{"asan_blacklist.txt", Address},
+ {"hwasan_blacklist.txt", HWAddress},
+ {"msan_blacklist.txt", Memory},
+ {"tsan_blacklist.txt", Thread},
+ {"dfsan_abilist.txt", DataFlow},
+ {"cfi_blacklist.txt", CFI},
+ {"ubsan_blacklist.txt", Undefined | Integer | Nullability}};
+
+ for (auto BL : Blacklists) {
+ if (!(Kinds & BL.Mask))
+ continue;
+
clang::SmallString<64> Path(D.ResourceDir);
- llvm::sys::path::append(Path, BlacklistFile);
- BLPath = Path.str();
- return true;
+ llvm::sys::path::append(Path, "share", BL.File);
+ if (llvm::sys::fs::exists(Path))
+ BlacklistFiles.push_back(Path.str());
+ else if (BL.Mask == CFI)
+ // If cfi_blacklist.txt cannot be found in the resource dir, driver
+ // should fail.
+ D.Diag(clang::diag::err_drv_no_such_file) << Path;
}
- return false;
}
/// Sets group bits for every group that has at least one representative already
@@ -176,7 +180,8 @@ static SanitizerMask parseSanitizeTrapArgs(const Driver &D,
bool SanitizerArgs::needsUbsanRt() const {
// All of these include ubsan.
if (needsAsanRt() || needsMsanRt() || needsHwasanRt() || needsTsanRt() ||
- needsDfsanRt() || needsLsanRt() || needsCfiDiagRt() || needsScudoRt())
+ needsDfsanRt() || needsLsanRt() || needsCfiDiagRt() ||
+ (needsScudoRt() && !requiresMinimalRuntime()))
return false;
return (Sanitizers.Mask & NeedsUbsanRt & ~TrapSanitizers.Mask) ||
@@ -215,6 +220,10 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
// Used to deduplicate diagnostics.
SanitizerMask Kinds = 0;
const SanitizerMask Supported = setGroupBits(TC.getSupportedSanitizers());
+
+ CfiCrossDso = Args.hasFlag(options::OPT_fsanitize_cfi_cross_dso,
+ options::OPT_fno_sanitize_cfi_cross_dso, false);
+
ToolChain::RTTIMode RTTIMode = TC.getRTTIMode();
const Driver &D = TC.getDriver();
@@ -274,6 +283,24 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
Add &= ~NotAllowedWithMinimalRuntime;
}
+ // FIXME: Make CFI on member function calls compatible with cross-DSO CFI.
+ // There are currently two problems:
+ // - Virtual function call checks need to pass a pointer to the function
+ // address to llvm.type.test and a pointer to the address point to the
+ // diagnostic function. Currently we pass the same pointer to both
+ // places.
+ // - Non-virtual function call checks may need to check multiple type
+ // identifiers.
+ // Fixing both of those may require changes to the cross-DSO CFI
+ // interface.
+ if (CfiCrossDso && (Add & CFIMFCall & ~DiagnosedKinds)) {
+ D.Diag(diag::err_drv_argument_not_allowed_with)
+ << "-fsanitize=cfi-mfcall"
+ << "-fsanitize-cfi-cross-dso";
+ Add &= ~CFIMFCall;
+ DiagnosedKinds |= CFIMFCall;
+ }
+
if (SanitizerMask KindsToDiagnose = Add & ~Supported & ~DiagnosedKinds) {
std::string Desc = describeSanitizeArg(*I, KindsToDiagnose);
D.Diag(diag::err_drv_unsupported_opt_for_target)
@@ -285,19 +312,18 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
// Test for -fno-rtti + explicit -fsanitizer=vptr before expanding groups
// so we don't error out if -fno-rtti and -fsanitize=undefined were
// passed.
- if (Add & Vptr &&
- (RTTIMode == ToolChain::RM_DisabledImplicitly ||
- RTTIMode == ToolChain::RM_DisabledExplicitly)) {
- if (RTTIMode == ToolChain::RM_DisabledImplicitly)
- // Warn about not having rtti enabled if the vptr sanitizer is
- // explicitly enabled
- D.Diag(diag::warn_drv_disabling_vptr_no_rtti_default);
- else {
- const llvm::opt::Arg *NoRTTIArg = TC.getRTTIArg();
- assert(NoRTTIArg &&
- "RTTI disabled explicitly but we have no argument!");
+ if ((Add & Vptr) && (RTTIMode == ToolChain::RM_Disabled)) {
+ if (const llvm::opt::Arg *NoRTTIArg = TC.getRTTIArg()) {
+ assert(NoRTTIArg->getOption().matches(options::OPT_fno_rtti) &&
+ "RTTI disabled without -fno-rtti option?");
+ // The user explicitly passed -fno-rtti with -fsanitize=vptr, but
+ // the vptr sanitizer requires RTTI, so this is a user error.
D.Diag(diag::err_drv_argument_not_allowed_with)
<< "-fsanitize=vptr" << NoRTTIArg->getAsString(Args);
+ } else {
+ // The vptr sanitizer requires RTTI, but RTTI is disabled (by
+ // default). Warn that the vptr sanitizer is being disabled.
+ D.Diag(diag::warn_drv_disabling_vptr_no_rtti_default);
}
// Take out the Vptr sanitizer from the enabled sanitizers
@@ -313,6 +339,8 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
if (MinimalRuntime) {
Add &= ~NotAllowedWithMinimalRuntime;
}
+ if (CfiCrossDso)
+ Add &= ~CFIMFCall;
Add &= Supported;
if (Add & Fuzzer)
@@ -335,14 +363,41 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
}
}
+ std::pair<SanitizerMask, SanitizerMask> IncompatibleGroups[] = {
+ std::make_pair(Address, Thread | Memory),
+ std::make_pair(Thread, Memory),
+ std::make_pair(Leak, Thread | Memory),
+ std::make_pair(KernelAddress, Address | Leak | Thread | Memory),
+ std::make_pair(HWAddress, Address | Thread | Memory | KernelAddress),
+ std::make_pair(Efficiency, Address | HWAddress | Leak | Thread | Memory |
+ KernelAddress),
+ std::make_pair(Scudo, Address | HWAddress | Leak | Thread | Memory |
+ KernelAddress | Efficiency),
+ std::make_pair(SafeStack, Address | HWAddress | Leak | Thread | Memory |
+ KernelAddress | Efficiency),
+ std::make_pair(ShadowCallStack, Address | HWAddress | Leak | Thread |
+ Memory | KernelAddress | Efficiency |
+ SafeStack),
+ std::make_pair(KernelHWAddress, Address | HWAddress | Leak | Thread |
+ Memory | KernelAddress | Efficiency |
+ SafeStack | ShadowCallStack)};
+
// Enable toolchain specific default sanitizers if not explicitly disabled.
- Kinds |= TC.getDefaultSanitizers() & ~AllRemove;
+ SanitizerMask Default = TC.getDefaultSanitizers() & ~AllRemove;
+
+ // Disable default sanitizers that are incompatible with explicitly requested
+ // ones.
+ for (auto G : IncompatibleGroups) {
+ SanitizerMask Group = G.first;
+ if ((Default & Group) && (Kinds & G.second))
+ Default &= ~Group;
+ }
+
+ Kinds |= Default;
// We disable the vptr sanitizer if it was enabled by group expansion but RTTI
// is disabled.
- if ((Kinds & Vptr) &&
- (RTTIMode == ToolChain::RM_DisabledImplicitly ||
- RTTIMode == ToolChain::RM_DisabledExplicitly)) {
+ if ((Kinds & Vptr) && (RTTIMode == ToolChain::RM_Disabled)) {
Kinds &= ~Vptr;
}
@@ -352,6 +407,15 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
<< lastArgumentForMask(D, Args, Kinds & NeedsLTO) << "-flto";
}
+ if ((Kinds & ShadowCallStack) &&
+ TC.getTriple().getArch() == llvm::Triple::aarch64 &&
+ !llvm::AArch64::isX18ReservedByDefault(TC.getTriple()) &&
+ !Args.hasArg(options::OPT_ffixed_x18)) {
+ D.Diag(diag::err_drv_argument_only_allowed_with)
+ << lastArgumentForMask(D, Args, Kinds & ShadowCallStack)
+ << "-ffixed-x18";
+ }
+
// Report error if there are non-trapping sanitizers that require
// c++abi-specific parts of UBSan runtime, and they are not provided by the
// toolchain. We don't have a good way to check the latter, so we just
@@ -372,16 +436,6 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
}
// Warn about incompatible groups of sanitizers.
- std::pair<SanitizerMask, SanitizerMask> IncompatibleGroups[] = {
- std::make_pair(Address, Thread | Memory),
- std::make_pair(Thread, Memory),
- std::make_pair(Leak, Thread | Memory),
- std::make_pair(KernelAddress, Address | Leak | Thread | Memory),
- std::make_pair(HWAddress, Address | Thread | Memory | KernelAddress),
- std::make_pair(Efficiency, Address | HWAddress | Leak | Thread | Memory |
- KernelAddress),
- std::make_pair(Scudo, Address | HWAddress | Leak | Thread | Memory |
- KernelAddress | Efficiency)};
for (auto G : IncompatibleGroups) {
SanitizerMask Group = G.first;
if (Kinds & Group) {
@@ -399,8 +453,9 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
// default in ASan?
// Parse -f(no-)?sanitize-recover flags.
- SanitizerMask RecoverableKinds = RecoverableByDefault;
+ SanitizerMask RecoverableKinds = RecoverableByDefault | AlwaysRecoverable;
SanitizerMask DiagnosedUnrecoverableKinds = 0;
+ SanitizerMask DiagnosedAlwaysRecoverableKinds = 0;
for (const auto *Arg : Args) {
const char *DeprecatedReplacement = nullptr;
if (Arg->getOption().matches(options::OPT_fsanitize_recover)) {
@@ -428,7 +483,18 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
RecoverableKinds |= expandSanitizerGroups(Add);
Arg->claim();
} else if (Arg->getOption().matches(options::OPT_fno_sanitize_recover_EQ)) {
- RecoverableKinds &= ~expandSanitizerGroups(parseArgValues(D, Arg, true));
+ SanitizerMask Remove = parseArgValues(D, Arg, true);
+ // Report error if user explicitly tries to disable recovery from
+ // always recoverable sanitizer.
+ if (SanitizerMask KindsToDiagnose =
+ Remove & AlwaysRecoverable & ~DiagnosedAlwaysRecoverableKinds) {
+ SanitizerSet SetToDiagnose;
+ SetToDiagnose.Mask |= KindsToDiagnose;
+ D.Diag(diag::err_drv_unsupported_option_argument)
+ << Arg->getOption().getName() << toString(SetToDiagnose);
+ DiagnosedAlwaysRecoverableKinds |= KindsToDiagnose;
+ }
+ RecoverableKinds &= ~expandSanitizerGroups(Remove);
Arg->claim();
}
if (DeprecatedReplacement) {
@@ -444,11 +510,7 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
// Setup blacklist files.
// Add default blacklist from resource directory.
- {
- std::string BLPath;
- if (getDefaultBlacklist(D, Kinds, BLPath) && llvm::sys::fs::exists(BLPath))
- BlacklistFiles.push_back(BLPath);
- }
+ addDefaultBlacklists(D, Kinds, BlacklistFiles);
// Parse -f(no-)sanitize-blacklist options.
for (const auto *Arg : Args) {
if (Arg->getOption().matches(options::OPT_fsanitize_blacklist)) {
@@ -457,9 +519,9 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
if (llvm::sys::fs::exists(BLPath)) {
BlacklistFiles.push_back(BLPath);
ExtraDeps.push_back(BLPath);
- } else
+ } else {
D.Diag(clang::diag::err_drv_no_such_file) << BLPath;
-
+ }
} else if (Arg->getOption().matches(options::OPT_fno_sanitize_blacklist)) {
Arg->claim();
BlacklistFiles.clear();
@@ -517,8 +579,6 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
}
if (AllAddedKinds & CFI) {
- CfiCrossDso = Args.hasFlag(options::OPT_fsanitize_cfi_cross_dso,
- options::OPT_fno_sanitize_cfi_cross_dso, false);
// Without PIE, external function address may resolve to a PLT record, which
// can not be verified by the target module.
NeedPIE |= CfiCrossDso;
diff --git a/lib/Driver/ToolChain.cpp b/lib/Driver/ToolChain.cpp
index f96a1182e3ca..d62ba1253348 100644
--- a/lib/Driver/ToolChain.cpp
+++ b/lib/Driver/ToolChain.cpp
@@ -1,4 +1,4 @@
-//===--- ToolChain.cpp - Collections of tools for one platform ------------===//
+//===- ToolChain.cpp - Collections of tools for one platform --------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -8,33 +8,45 @@
//===----------------------------------------------------------------------===//
#include "clang/Driver/ToolChain.h"
-#include "ToolChains/CommonArgs.h"
+#include "InputInfo.h"
#include "ToolChains/Arch/ARM.h"
#include "ToolChains/Clang.h"
#include "clang/Basic/ObjCRuntime.h"
+#include "clang/Basic/Sanitizers.h"
#include "clang/Basic/VirtualFileSystem.h"
#include "clang/Config/config.h"
#include "clang/Driver/Action.h"
#include "clang/Driver/Driver.h"
#include "clang/Driver/DriverDiagnostic.h"
+#include "clang/Driver/Job.h"
#include "clang/Driver/Options.h"
#include "clang/Driver/SanitizerArgs.h"
#include "clang/Driver/XRayArgs.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/Config/llvm-config.h"
+#include "llvm/MC/MCTargetOptions.h"
#include "llvm/Option/Arg.h"
#include "llvm/Option/ArgList.h"
+#include "llvm/Option/OptTable.h"
#include "llvm/Option/Option.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Path.h"
-#include "llvm/MC/MCAsmInfo.h"
-#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/Support/TargetParser.h"
#include "llvm/Support/TargetRegistry.h"
+#include "llvm/Support/VersionTuple.h"
+#include <cassert>
+#include <cstddef>
+#include <cstring>
+#include <string>
-using namespace clang::driver;
-using namespace clang::driver::tools;
using namespace clang;
+using namespace driver;
+using namespace tools;
using namespace llvm;
using namespace llvm::opt;
@@ -49,33 +61,24 @@ static ToolChain::RTTIMode CalculateRTTIMode(const ArgList &Args,
// Explicit rtti/no-rtti args
if (CachedRTTIArg) {
if (CachedRTTIArg->getOption().matches(options::OPT_frtti))
- return ToolChain::RM_EnabledExplicitly;
+ return ToolChain::RM_Enabled;
else
- return ToolChain::RM_DisabledExplicitly;
+ return ToolChain::RM_Disabled;
}
// -frtti is default, except for the PS4 CPU.
- if (!Triple.isPS4CPU())
- return ToolChain::RM_EnabledImplicitly;
-
- // On the PS4, turning on c++ exceptions turns on rtti.
- // We're assuming that, if we see -fexceptions, rtti gets turned on.
- Arg *Exceptions = Args.getLastArgNoClaim(
- options::OPT_fcxx_exceptions, options::OPT_fno_cxx_exceptions,
- options::OPT_fexceptions, options::OPT_fno_exceptions);
- if (Exceptions &&
- (Exceptions->getOption().matches(options::OPT_fexceptions) ||
- Exceptions->getOption().matches(options::OPT_fcxx_exceptions)))
- return ToolChain::RM_EnabledImplicitly;
-
- return ToolChain::RM_DisabledImplicitly;
+ return (Triple.isPS4CPU()) ? ToolChain::RM_Disabled : ToolChain::RM_Enabled;
}
ToolChain::ToolChain(const Driver &D, const llvm::Triple &T,
const ArgList &Args)
: D(D), Triple(T), Args(Args), CachedRTTIArg(GetRTTIArgument(Args)),
- CachedRTTIMode(CalculateRTTIMode(Args, Triple, CachedRTTIArg)),
- EffectiveTriple() {
+ CachedRTTIMode(CalculateRTTIMode(Args, Triple, CachedRTTIArg)) {
+ SmallString<128> P(D.ResourceDir);
+ llvm::sys::path::append(P, D.getTargetTriple(), "lib");
+ if (getVFS().exists(P))
+ getFilePaths().push_back(P.str());
+
std::string CandidateLibPath = getArchSpecificLibPath();
if (getVFS().exists(CandidateLibPath))
getFilePaths().push_back(CandidateLibPath);
@@ -87,8 +90,7 @@ void ToolChain::setTripleEnvironment(llvm::Triple::EnvironmentType Env) {
EffectiveTriple.setEnvironment(Env);
}
-ToolChain::~ToolChain() {
-}
+ToolChain::~ToolChain() = default;
vfs::FileSystem &ToolChain::getVFS() const { return getDriver().getVFS(); }
@@ -115,12 +117,15 @@ const XRayArgs& ToolChain::getXRayArgs() const {
}
namespace {
+
struct DriverSuffix {
const char *Suffix;
const char *ModeFlag;
};
-const DriverSuffix *FindDriverSuffix(StringRef ProgName, size_t &Pos) {
+} // namespace
+
+static const DriverSuffix *FindDriverSuffix(StringRef ProgName, size_t &Pos) {
// A list of known driver suffixes. Suffixes are compared against the
// program name in order. If there is a match, the frontend type is updated as
// necessary by applying the ModeFlag.
@@ -151,16 +156,16 @@ const DriverSuffix *FindDriverSuffix(StringRef ProgName, size_t &Pos) {
/// Normalize the program name from argv[0] by stripping the file extension if
/// present and lower-casing the string on Windows.
-std::string normalizeProgramName(llvm::StringRef Argv0) {
+static std::string normalizeProgramName(llvm::StringRef Argv0) {
std::string ProgName = llvm::sys::path::stem(Argv0);
-#ifdef LLVM_ON_WIN32
+#ifdef _WIN32
// Transform to lowercase for case insensitive file systems.
std::transform(ProgName.begin(), ProgName.end(), ProgName.begin(), ::tolower);
#endif
return ProgName;
}
-const DriverSuffix *parseDriverSuffix(StringRef ProgName, size_t &Pos) {
+static const DriverSuffix *parseDriverSuffix(StringRef ProgName, size_t &Pos) {
// Try to infer frontend type and default target from the program name by
// comparing it against DriverSuffixes in order.
@@ -185,7 +190,6 @@ const DriverSuffix *parseDriverSuffix(StringRef ProgName, size_t &Pos) {
}
return DS;
}
-} // anonymous namespace
ParsedClangName
ToolChain::getTargetAndModeFromProgramName(StringRef PN) {
@@ -193,7 +197,7 @@ ToolChain::getTargetAndModeFromProgramName(StringRef PN) {
size_t SuffixPos;
const DriverSuffix *DS = parseDriverSuffix(ProgName, SuffixPos);
if (!DS)
- return ParsedClangName();
+ return {};
size_t SuffixEnd = SuffixPos + strlen(DS->Suffix);
size_t LastComponent = ProgName.rfind('-', SuffixPos);
@@ -323,13 +327,27 @@ static StringRef getArchNameForCompilerRTLib(const ToolChain &TC,
return llvm::Triple::getArchTypeName(TC.getArch());
}
+StringRef ToolChain::getOSLibName() const {
+ switch (Triple.getOS()) {
+ case llvm::Triple::FreeBSD:
+ return "freebsd";
+ case llvm::Triple::NetBSD:
+ return "netbsd";
+ case llvm::Triple::OpenBSD:
+ return "openbsd";
+ case llvm::Triple::Solaris:
+ return "sunos";
+ default:
+ return getOS();
+ }
+}
+
std::string ToolChain::getCompilerRTPath() const {
SmallString<128> Path(getDriver().ResourceDir);
if (Triple.isOSUnknown()) {
llvm::sys::path::append(Path, "lib");
} else {
- StringRef OSLibName = Triple.isOSFreeBSD() ? "freebsd" : getOS();
- llvm::sys::path::append(Path, "lib", OSLibName);
+ llvm::sys::path::append(Path, "lib", getOSLibName());
}
return Path.str();
}
@@ -337,15 +355,23 @@ std::string ToolChain::getCompilerRTPath() const {
std::string ToolChain::getCompilerRT(const ArgList &Args, StringRef Component,
bool Shared) const {
const llvm::Triple &TT = getTriple();
- const char *Env = TT.isAndroid() ? "-android" : "";
bool IsITANMSVCWindows =
TT.isWindowsMSVCEnvironment() || TT.isWindowsItaniumEnvironment();
- StringRef Arch = getArchNameForCompilerRTLib(*this, Args);
const char *Prefix = IsITANMSVCWindows ? "" : "lib";
const char *Suffix = Shared ? (Triple.isOSWindows() ? ".dll" : ".so")
: (IsITANMSVCWindows ? ".lib" : ".a");
+ const Driver &D = getDriver();
+ SmallString<128> P(D.ResourceDir);
+ llvm::sys::path::append(P, D.getTargetTriple(), "lib");
+ if (getVFS().exists(P)) {
+ llvm::sys::path::append(P, Prefix + Twine("clang_rt.") + Component + Suffix);
+ return P.str();
+ }
+
+ StringRef Arch = getArchNameForCompilerRTLib(*this, Args);
+ const char *Env = TT.isAndroid() ? "-android" : "";
SmallString<128> Path(getCompilerRTPath());
llvm::sys::path::append(Path, Prefix + Twine("clang_rt.") + Component + "-" +
Arch + Env + Suffix);
@@ -360,8 +386,7 @@ const char *ToolChain::getCompilerRTArgString(const llvm::opt::ArgList &Args,
std::string ToolChain::getArchSpecificLibPath() const {
SmallString<128> Path(getDriver().ResourceDir);
- StringRef OSLibName = getTriple().isOSFreeBSD() ? "freebsd" : getOS();
- llvm::sys::path::append(Path, "lib", OSLibName,
+ llvm::sys::path::append(Path, "lib", getOSLibName(),
llvm::Triple::getArchTypeName(getArch()));
return Path.str();
}
@@ -403,7 +428,7 @@ std::string ToolChain::GetLinkerPath() const {
if (llvm::sys::path::is_absolute(UseLinker)) {
// If we're passed what looks like an absolute path, don't attempt to
// second-guess that.
- if (llvm::sys::fs::exists(UseLinker))
+ if (llvm::sys::fs::can_execute(UseLinker))
return UseLinker;
} else if (UseLinker.empty() || UseLinker == "ld") {
// If we're passed -fuse-ld= with no argument, or with the argument ld,
@@ -418,7 +443,7 @@ std::string ToolChain::GetLinkerPath() const {
LinkerName.append(UseLinker);
std::string LinkerPath(GetProgramPath(LinkerName.c_str()));
- if (llvm::sys::fs::exists(LinkerPath))
+ if (llvm::sys::fs::can_execute(LinkerPath))
return LinkerPath;
}
@@ -459,8 +484,6 @@ ObjCRuntime ToolChain::getDefaultObjCRuntime(bool isNonFragile) const {
llvm::ExceptionHandling
ToolChain::GetExceptionModel(const llvm::opt::ArgList &Args) const {
- if (Triple.isOSWindows() && Triple.getArch() != llvm::Triple::x86)
- return llvm::ExceptionHandling::WinEH;
return llvm::ExceptionHandling::None;
}
@@ -576,7 +599,7 @@ std::string ToolChain::ComputeLLVMTriple(const ArgList &Args,
// CollectArgsForIntegratedAssembler but we can't change the ArchName at
// that point. There is no assembler equivalent of -mno-thumb, -marm, or
// -mno-arm.
- for (const Arg *A :
+ for (const auto *A :
Args.filtered(options::OPT_Wa_COMMA, options::OPT_Xassembler)) {
for (StringRef Value : A->getValues()) {
if (Value == "-mthumb")
@@ -660,7 +683,7 @@ ToolChain::CXXStdlibType ToolChain::GetCXXStdlibType(const ArgList &Args) const{
return GetDefaultCXXStdlibType();
}
-/// \brief Utility function to add a system include directory to CC1 arguments.
+/// Utility function to add a system include directory to CC1 arguments.
/*static*/ void ToolChain::addSystemInclude(const ArgList &DriverArgs,
ArgStringList &CC1Args,
const Twine &Path) {
@@ -668,7 +691,7 @@ ToolChain::CXXStdlibType ToolChain::GetCXXStdlibType(const ArgList &Args) const{
CC1Args.push_back(DriverArgs.MakeArgString(Path));
}
-/// \brief Utility function to add a system include directory with extern "C"
+/// Utility function to add a system include directory with extern "C"
/// semantics to CC1 arguments.
///
/// Note that this should be used rarely, and only for directories that
@@ -690,11 +713,11 @@ void ToolChain::addExternCSystemIncludeIfExists(const ArgList &DriverArgs,
addExternCSystemInclude(DriverArgs, CC1Args, Path);
}
-/// \brief Utility function to add a list of system include directories to CC1.
+/// Utility function to add a list of system include directories to CC1.
/*static*/ void ToolChain::addSystemIncludes(const ArgList &DriverArgs,
ArgStringList &CC1Args,
ArrayRef<StringRef> Paths) {
- for (StringRef Path : Paths) {
+ for (const auto Path : Paths) {
CC1Args.push_back("-internal-isystem");
CC1Args.push_back(DriverArgs.MakeArgString(Path));
}
@@ -776,7 +799,9 @@ bool ToolChain::AddFastMathRuntimeIfAvailable(const ArgList &Args,
SanitizerMask ToolChain::getSupportedSanitizers() const {
// Return sanitizers which don't require runtime support and are not
// platform dependent.
+
using namespace SanitizerKind;
+
SanitizerMask Res = (Undefined & ~Vptr & ~Function) | (CFI & ~CFIICall) |
CFICastStrict | UnsignedIntegerOverflow | Nullability |
LocalBounds;
@@ -787,6 +812,9 @@ SanitizerMask ToolChain::getSupportedSanitizers() const {
getTriple().getArch() == llvm::Triple::wasm32 ||
getTriple().getArch() == llvm::Triple::wasm64)
Res |= CFIICall;
+ if (getTriple().getArch() == llvm::Triple::x86_64 ||
+ getTriple().getArch() == llvm::Triple::aarch64)
+ Res |= ShadowCallStack;
return Res;
}
@@ -858,7 +886,7 @@ llvm::opt::DerivedArgList *ToolChain::TranslateOpenMPTargetArgs(
bool Modified = false;
// Handle -Xopenmp-target flags
- for (Arg *A : Args) {
+ for (auto *A : Args) {
// Exclude flags which may only apply to the host toolchain.
// Do not exclude flags when the host triple (AuxTriple)
// matches the current toolchain triple. If it is not present
diff --git a/lib/Driver/ToolChains/AMDGPU.cpp b/lib/Driver/ToolChains/AMDGPU.cpp
index a313bc5c35de..6b673feeadfc 100644
--- a/lib/Driver/ToolChains/AMDGPU.cpp
+++ b/lib/Driver/ToolChains/AMDGPU.cpp
@@ -43,7 +43,6 @@ void amdgpu::getAMDGPUTargetFeatures(const Driver &D,
StringRef value = dAbi->getValue();
if (value == "1.0") {
Features.push_back("+amdgpu-debugger-insert-nops");
- Features.push_back("+amdgpu-debugger-reserve-regs");
Features.push_back("+amdgpu-debugger-emit-prologue");
} else {
D.Diag(diag::err_drv_clang_unsupported) << dAbi->getAsString(Args);
diff --git a/lib/Driver/ToolChains/Ananas.cpp b/lib/Driver/ToolChains/Ananas.cpp
index ee072cc03e7c..006fdc029ef8 100644
--- a/lib/Driver/ToolChains/Ananas.cpp
+++ b/lib/Driver/ToolChains/Ananas.cpp
@@ -10,7 +10,6 @@
#include "Ananas.h"
#include "InputInfo.h"
#include "CommonArgs.h"
-#include "clang/Config/config.h"
#include "clang/Driver/Compilation.h"
#include "clang/Driver/Driver.h"
#include "clang/Driver/Options.h"
@@ -64,8 +63,19 @@ void ananas::Linker::ConstructJob(Compilation &C, const JobAction &JA,
if (!D.SysRoot.empty())
CmdArgs.push_back(Args.MakeArgString("--sysroot=" + D.SysRoot));
- // Ananas only supports static linkage for now.
- CmdArgs.push_back("-Bstatic");
+ if (Args.hasArg(options::OPT_static)) {
+ CmdArgs.push_back("-Bstatic");
+ } else {
+ if (Args.hasArg(options::OPT_rdynamic))
+ CmdArgs.push_back("-export-dynamic");
+ if (Args.hasArg(options::OPT_shared)) {
+ CmdArgs.push_back("-Bshareable");
+ } else {
+ Args.AddAllArgs(CmdArgs, options::OPT_pie);
+ CmdArgs.push_back("-dynamic-linker");
+ CmdArgs.push_back("/lib/ld-ananas.so");
+ }
+ }
if (Output.isFilename()) {
CmdArgs.push_back("-o");
@@ -75,9 +85,15 @@ void ananas::Linker::ConstructJob(Compilation &C, const JobAction &JA,
}
if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
- CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crt0.o")));
+ if (!Args.hasArg(options::OPT_shared)) {
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crt0.o")));
+ }
CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crti.o")));
- CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtbegin.o")));
+ if (Args.hasArg(options::OPT_shared) || Args.hasArg(options::OPT_pie)) {
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtbeginS.o")));
+ } else {
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtbegin.o")));
+ }
}
Args.AddAllArgs(CmdArgs, options::OPT_L);
@@ -86,8 +102,11 @@ void ananas::Linker::ConstructJob(Compilation &C, const JobAction &JA,
{options::OPT_T_Group, options::OPT_e, options::OPT_s,
options::OPT_t, options::OPT_Z_Flag, options::OPT_r});
- if (D.isUsingLTO())
- AddGoldPlugin(ToolChain, Args, CmdArgs, D.getLTOMode() == LTOK_Thin, D);
+ if (D.isUsingLTO()) {
+ assert(!Inputs.empty() && "Must have at least one input.");
+ AddGoldPlugin(ToolChain, Args, CmdArgs, Output, Inputs[0],
+ D.getLTOMode() == LTOK_Thin);
+ }
AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs, JA);
@@ -97,7 +116,10 @@ void ananas::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-lc");
if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
- CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtend.o")));
+ if (Args.hasArg(options::OPT_shared) || Args.hasArg(options::OPT_pie))
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtendS.o")));
+ else
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtend.o")));
CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtn.o")));
}
diff --git a/lib/Driver/ToolChains/Arch/AArch64.cpp b/lib/Driver/ToolChains/Arch/AArch64.cpp
index ad04aedd098e..5114279b4b45 100644
--- a/lib/Driver/ToolChains/Arch/AArch64.cpp
+++ b/lib/Driver/ToolChains/Arch/AArch64.cpp
@@ -69,6 +69,9 @@ static bool DecodeAArch64Mcpu(const Driver &D, StringRef Mcpu, StringRef &CPU,
std::pair<StringRef, StringRef> Split = Mcpu.split("+");
CPU = Split.first;
+ if (CPU == "native")
+ CPU = llvm::sys::getHostCPUName();
+
if (CPU == "generic") {
Features.push_back("+neon");
} else {
@@ -198,6 +201,9 @@ void aarch64::getAArch64TargetFeatures(const Driver &D, const ArgList &Args,
if (Args.hasArg(options::OPT_ffixed_x18))
Features.push_back("+reserve-x18");
+ if (Args.hasArg(options::OPT_ffixed_x20))
+ Features.push_back("+reserve-x20");
+
if (Args.hasArg(options::OPT_mno_neg_immediates))
Features.push_back("+no-neg-immediates");
}
diff --git a/lib/Driver/ToolChains/Arch/ARM.cpp b/lib/Driver/ToolChains/Arch/ARM.cpp
index 44c8871d0e1f..886d947c586b 100644
--- a/lib/Driver/ToolChains/Arch/ARM.cpp
+++ b/lib/Driver/ToolChains/Arch/ARM.cpp
@@ -232,7 +232,7 @@ arm::FloatABI arm::getARMFloatABI(const ToolChain &TC, const ArgList &Args) {
break;
case llvm::Triple::OpenBSD:
- ABI = FloatABI::Soft;
+ ABI = FloatABI::SoftFP;
break;
default:
@@ -391,12 +391,22 @@ void arm::getARMTargetFeatures(const ToolChain &TC,
} else if (HDivArg)
getARMHWDivFeatures(D, HDivArg, Args, HDivArg->getValue(), Features);
- // Setting -msoft-float effectively disables NEON because of the GCC
- // implementation, although the same isn't true of VFP or VFP3.
+ // Setting -msoft-float/-mfloat-abi=soft effectively disables the FPU (GCC
+ // ignores the -mfpu options in this case).
+ // Note that the ABI can also be set implicitly by the target selected.
if (ABI == arm::FloatABI::Soft) {
- Features.push_back("-neon");
- // Also need to explicitly disable features which imply NEON.
- Features.push_back("-crypto");
+ llvm::ARM::getFPUFeatures(llvm::ARM::FK_NONE, Features);
+
+ // Disable hardware FP features which have been enabled.
+ // FIXME: Disabling vfp2 and neon should be enough as all the other
+ // features are dependent on these 2 features in LLVM. However
+ // there is currently no easy way to test this in clang, so for
+ // now just be explicit and disable all known dependent features
+ // as well.
+ for (std::string Feature : {"vfp2", "vfp3", "vfp4", "fp-armv8", "fullfp16",
+ "neon", "crypto", "dotprod"})
+ if (std::find(std::begin(Features), std::end(Features), "+" + Feature) != std::end(Features))
+ Features.push_back(Args.MakeArgString("-" + Feature));
}
// En/disable crc code generation.
@@ -438,7 +448,7 @@ void arm::getARMTargetFeatures(const ToolChain &TC,
if (B->getOption().matches(options::OPT_mlong_calls))
D.Diag(diag::err_opt_not_valid_with_opt) << A->getAsString(Args) << B->getAsString(Args);
}
- Features.push_back("+execute-only");
+ Features.push_back("+execute-only");
}
}
}
diff --git a/lib/Driver/ToolChains/Arch/Mips.cpp b/lib/Driver/ToolChains/Arch/Mips.cpp
index 61481a92d0b7..6d814631d05f 100644
--- a/lib/Driver/ToolChains/Arch/Mips.cpp
+++ b/lib/Driver/ToolChains/Arch/Mips.cpp
@@ -20,11 +20,6 @@ using namespace clang::driver::tools;
using namespace clang;
using namespace llvm::opt;
-bool tools::isMipsArch(llvm::Triple::ArchType Arch) {
- return Arch == llvm::Triple::mips || Arch == llvm::Triple::mipsel ||
- Arch == llvm::Triple::mips64 || Arch == llvm::Triple::mips64el;
-}
-
// Get CPU and ABI names. They are not independent
// so we have to calculate them together.
void mips::getMipsCPUAndABI(const ArgList &Args, const llvm::Triple &Triple,
@@ -50,6 +45,13 @@ void mips::getMipsCPUAndABI(const ArgList &Args, const llvm::Triple &Triple,
if (Triple.getOS() == llvm::Triple::OpenBSD)
DefMips64CPU = "mips3";
+ // MIPS2 is the default for mips(el)?-unknown-freebsd.
+ // MIPS3 is the default for mips64(el)?-unknown-freebsd.
+ if (Triple.getOS() == llvm::Triple::FreeBSD) {
+ DefMips32CPU = "mips2";
+ DefMips64CPU = "mips3";
+ }
+
if (Arg *A = Args.getLastArg(clang::driver::options::OPT_march_EQ,
options::OPT_mcpu_EQ))
CPUName = A->getValue();
@@ -106,11 +108,7 @@ void mips::getMipsCPUAndABI(const ArgList &Args, const llvm::Triple &Triple,
if (ABIName.empty()) {
// Deduce ABI name from the target triple.
- if (Triple.getArch() == llvm::Triple::mips ||
- Triple.getArch() == llvm::Triple::mipsel)
- ABIName = "o32";
- else
- ABIName = "n64";
+ ABIName = Triple.isMIPS32() ? "o32" : "n64";
}
if (CPUName.empty()) {
@@ -214,6 +212,7 @@ void mips::getMIPSTargetFeatures(const Driver &D, const llvm::Triple &Triple,
// For case (a) we need to add +noabicalls for N64.
bool IsN64 = ABIName == "64";
+ bool IsPIC = false;
bool NonPIC = false;
Arg *LastPICArg = Args.getLastArg(options::OPT_fPIC, options::OPT_fno_PIC,
@@ -225,6 +224,9 @@ void mips::getMIPSTargetFeatures(const Driver &D, const llvm::Triple &Triple,
NonPIC =
(O.matches(options::OPT_fno_PIC) || O.matches(options::OPT_fno_pic) ||
O.matches(options::OPT_fno_PIE) || O.matches(options::OPT_fno_pie));
+ IsPIC =
+ (O.matches(options::OPT_fPIC) || O.matches(options::OPT_fpic) ||
+ O.matches(options::OPT_fPIE) || O.matches(options::OPT_fpie));
}
bool UseAbiCalls = false;
@@ -234,9 +236,14 @@ void mips::getMIPSTargetFeatures(const Driver &D, const llvm::Triple &Triple,
UseAbiCalls =
!ABICallsArg || ABICallsArg->getOption().matches(options::OPT_mabicalls);
- if (UseAbiCalls && IsN64 && NonPIC) {
- D.Diag(diag::warn_drv_unsupported_abicalls);
- UseAbiCalls = false;
+ if (IsN64 && NonPIC && (!ABICallsArg || UseAbiCalls)) {
+ D.Diag(diag::warn_drv_unsupported_pic_with_mabicalls)
+ << LastPICArg->getAsString(Args) << (!ABICallsArg ? 0 : 1);
+ NonPIC = false;
+ }
+
+ if (ABICallsArg && !UseAbiCalls && IsPIC) {
+ D.Diag(diag::err_drv_unsupported_noabicalls_pic);
}
if (!UseAbiCalls)
@@ -343,6 +350,34 @@ void mips::getMIPSTargetFeatures(const Driver &D, const llvm::Triple &Triple,
AddTargetFeature(Args, Features, options::OPT_mno_madd4, options::OPT_mmadd4,
"nomadd4");
AddTargetFeature(Args, Features, options::OPT_mmt, options::OPT_mno_mt, "mt");
+ AddTargetFeature(Args, Features, options::OPT_mcrc, options::OPT_mno_crc,
+ "crc");
+ AddTargetFeature(Args, Features, options::OPT_mvirt, options::OPT_mno_virt,
+ "virt");
+ AddTargetFeature(Args, Features, options::OPT_mginv, options::OPT_mno_ginv,
+ "ginv");
+
+ if (Arg *A = Args.getLastArg(options::OPT_mindirect_jump_EQ)) {
+ StringRef Val = StringRef(A->getValue());
+ if (Val == "hazard") {
+ Arg *B =
+ Args.getLastArg(options::OPT_mmicromips, options::OPT_mno_micromips);
+ Arg *C = Args.getLastArg(options::OPT_mips16, options::OPT_mno_mips16);
+
+ if (B && B->getOption().matches(options::OPT_mmicromips))
+ D.Diag(diag::err_drv_unsupported_indirect_jump_opt)
+ << "hazard" << "micromips";
+ else if (C && C->getOption().matches(options::OPT_mips16))
+ D.Diag(diag::err_drv_unsupported_indirect_jump_opt)
+ << "hazard" << "mips16";
+ else if (mips::supportsIndirectJumpHazardBarrier(CPUName))
+ Features.push_back("+use-indirect-jump-hazard");
+ else
+ D.Diag(diag::err_drv_unsupported_indirect_jump_opt)
+ << "hazard" << CPUName;
+ } else
+ D.Diag(diag::err_drv_unknown_indirect_jump_opt) << Val;
+ }
}
mips::IEEE754Standard mips::getIEEE754Standard(StringRef &CPU) {
@@ -447,3 +482,20 @@ bool mips::shouldUseFPXX(const ArgList &Args, const llvm::Triple &Triple,
return UseFPXX;
}
+
+bool mips::supportsIndirectJumpHazardBarrier(StringRef &CPU) {
+ // Supporting the hazard barrier method of dealing with indirect
+ // jumps requires MIPSR2 support.
+ return llvm::StringSwitch<bool>(CPU)
+ .Case("mips32r2", true)
+ .Case("mips32r3", true)
+ .Case("mips32r5", true)
+ .Case("mips32r6", true)
+ .Case("mips64r2", true)
+ .Case("mips64r3", true)
+ .Case("mips64r5", true)
+ .Case("mips64r6", true)
+ .Case("octeon", true)
+ .Case("p5600", true)
+ .Default(false);
+}
diff --git a/lib/Driver/ToolChains/Arch/Mips.h b/lib/Driver/ToolChains/Arch/Mips.h
index 89eea9a1514c..a232ddbc8f3d 100644
--- a/lib/Driver/ToolChains/Arch/Mips.h
+++ b/lib/Driver/ToolChains/Arch/Mips.h
@@ -21,8 +21,6 @@ namespace clang {
namespace driver {
namespace tools {
-bool isMipsArch(llvm::Triple::ArchType Arch);
-
namespace mips {
typedef enum { Legacy = 1, Std2008 = 2 } IEEE754Standard;
@@ -53,6 +51,7 @@ bool isFPXXDefault(const llvm::Triple &Triple, StringRef CPUName,
bool shouldUseFPXX(const llvm::opt::ArgList &Args, const llvm::Triple &Triple,
StringRef CPUName, StringRef ABIName,
mips::FloatABI FloatABI);
+bool supportsIndirectJumpHazardBarrier(StringRef &CPU);
} // end namespace mips
} // end namespace target
diff --git a/lib/Driver/ToolChains/Arch/PPC.cpp b/lib/Driver/ToolChains/Arch/PPC.cpp
index 7c7e1c70e550..f6a95962ace3 100644
--- a/lib/Driver/ToolChains/Arch/PPC.cpp
+++ b/lib/Driver/ToolChains/Arch/PPC.cpp
@@ -106,6 +106,16 @@ void ppc::getPPCTargetFeatures(const Driver &D, const llvm::Triple &Triple,
ppc::FloatABI FloatABI = ppc::getPPCFloatABI(D, Args);
if (FloatABI == ppc::FloatABI::Soft)
Features.push_back("-hard-float");
+
+ ppc::ReadGOTPtrMode ReadGOT = ppc::getPPCReadGOTPtrMode(D, Args);
+ if (ReadGOT == ppc::ReadGOTPtrMode::SecurePlt)
+ Features.push_back("+secure-plt");
+}
+
+ppc::ReadGOTPtrMode ppc::getPPCReadGOTPtrMode(const Driver &D, const ArgList &Args) {
+ if (Args.getLastArg(options::OPT_msecure_plt))
+ return ppc::ReadGOTPtrMode::SecurePlt;
+ return ppc::ReadGOTPtrMode::Bss;
}
ppc::FloatABI ppc::getPPCFloatABI(const Driver &D, const ArgList &Args) {
diff --git a/lib/Driver/ToolChains/Arch/PPC.h b/lib/Driver/ToolChains/Arch/PPC.h
index 7d7c68101b7b..3acee91a2ac3 100644
--- a/lib/Driver/ToolChains/Arch/PPC.h
+++ b/lib/Driver/ToolChains/Arch/PPC.h
@@ -29,10 +29,17 @@ enum class FloatABI {
Hard,
};
+enum class ReadGOTPtrMode {
+ Bss,
+ SecurePlt,
+};
+
FloatABI getPPCFloatABI(const Driver &D, const llvm::opt::ArgList &Args);
std::string getPPCTargetCPU(const llvm::opt::ArgList &Args);
const char *getPPCAsmModeForCPU(StringRef Name);
+ReadGOTPtrMode getPPCReadGOTPtrMode(const Driver &D,
+ const llvm::opt::ArgList &Args);
void getPPCTargetFeatures(const Driver &D, const llvm::Triple &Triple,
const llvm::opt::ArgList &Args,
diff --git a/lib/Driver/ToolChains/Arch/RISCV.cpp b/lib/Driver/ToolChains/Arch/RISCV.cpp
new file mode 100644
index 000000000000..11ce8a1fd769
--- /dev/null
+++ b/lib/Driver/ToolChains/Arch/RISCV.cpp
@@ -0,0 +1,378 @@
+//===--- RISCV.cpp - RISCV Helpers for Tools --------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "RISCV.h"
+#include "clang/Basic/CharInfo.h"
+#include "clang/Driver/Driver.h"
+#include "clang/Driver/DriverDiagnostic.h"
+#include "clang/Driver/Options.h"
+#include "llvm/Option/ArgList.h"
+#include "llvm/Support/TargetParser.h"
+#include "llvm/Support/raw_ostream.h"
+#include "ToolChains/CommonArgs.h"
+
+using namespace clang::driver;
+using namespace clang::driver::tools;
+using namespace clang;
+using namespace llvm::opt;
+
+static StringRef getExtensionTypeDesc(StringRef Ext) {
+ if (Ext.startswith("sx"))
+ return "non-standard supervisor-level extension";
+ if (Ext.startswith("s"))
+ return "standard supervisor-level extension";
+ if (Ext.startswith("x"))
+ return "non-standard user-level extension";
+ return StringRef();
+}
+
+static StringRef getExtensionType(StringRef Ext) {
+ if (Ext.startswith("sx"))
+ return "sx";
+ if (Ext.startswith("s"))
+ return "s";
+ if (Ext.startswith("x"))
+ return "x";
+ return StringRef();
+}
+
+static bool isSupportedExtension(StringRef Ext) {
+ // LLVM does not support "sx", "s" nor "x" extensions.
+ return false;
+}
+
+// Extensions may have a version number, and may be separated by
+// an underscore '_' e.g.: rv32i2_m2.
+// Version number is divided into major and minor version numbers,
+// separated by a 'p'. If the minor version is 0 then 'p0' can be
+// omitted from the version string. E.g., rv32i2p0, rv32i2, rv32i2p1.
+static bool getExtensionVersion(const Driver &D, StringRef MArch,
+ StringRef Ext, StringRef In,
+ std::string &Major, std::string &Minor) {
+ auto I = In.begin();
+ auto E = In.end();
+
+ while (I != E && isDigit(*I))
+ Major.append(1, *I++);
+
+ if (Major.empty())
+ return true;
+
+ if (I != E && *I == 'p') {
+ ++I;
+
+ while (I != E && isDigit(*I))
+ Minor.append(1, *I++);
+
+ // Expected 'p' to be followed by minor version number.
+ if (Minor.empty()) {
+ std::string Error =
+ "minor version number missing after 'p' for extension";
+ D.Diag(diag::err_drv_invalid_riscv_ext_arch_name)
+ << MArch << Error << Ext;
+ return false;
+ }
+ }
+
+ // TODO: Handle extensions with version number.
+ std::string Error = "unsupported version number " + Major;
+ if (!Minor.empty())
+ Error += "." + Minor;
+ Error += " for extension";
+ D.Diag(diag::err_drv_invalid_riscv_ext_arch_name) << MArch << Error << Ext;
+
+ return false;
+}
+
+// Handle other types of extensions other than the standard
+// general purpose and standard user-level extensions.
+// Parse the ISA string containing non-standard user-level
+// extensions, standard supervisor-level extensions and
+// non-standard supervisor-level extensions.
+// These extensions start with 'x', 's', 'sx' prefixes, follow a
+// canonical order, might have a version number (major, minor)
+// and are separated by a single underscore '_'.
+// Set the hardware features for the extensions that are supported.
+static void getExtensionFeatures(const Driver &D,
+ const ArgList &Args,
+ std::vector<StringRef> &Features,
+ StringRef &MArch, StringRef &Exts) {
+ if (Exts.empty())
+ return;
+
+ // Multi-letter extensions are seperated by a single underscore
+ // as described in RISC-V User-Level ISA V2.2.
+ SmallVector<StringRef, 8> Split;
+ Exts.split(Split, StringRef("_"));
+
+ SmallVector<StringRef, 3> Prefix;
+ Prefix.push_back("x");
+ Prefix.push_back("s");
+ Prefix.push_back("sx");
+ auto I = Prefix.begin();
+ auto E = Prefix.end();
+
+ SmallVector<StringRef, 8> AllExts;
+
+ for (StringRef Ext : Split) {
+
+ if (Ext.empty()) {
+ D.Diag(diag::err_drv_invalid_riscv_arch_name) << MArch
+ << "extension name missing after separator '_'";
+ return;
+ }
+
+ StringRef Type = getExtensionType(Ext);
+ StringRef Name(Ext.substr(Type.size()));
+ StringRef Desc = getExtensionTypeDesc(Ext);
+
+ if (Type.empty()) {
+ D.Diag(diag::err_drv_invalid_riscv_ext_arch_name)
+ << MArch << "invalid extension prefix" << Ext;
+ return;
+ }
+
+ // Check ISA extensions are specified in the canonical order.
+ while (I != E && *I != Type)
+ ++I;
+
+ if (I == E) {
+ std::string Error = Desc;
+ Error += " not given in canonical order";
+ D.Diag(diag::err_drv_invalid_riscv_ext_arch_name)
+ << MArch << Error << Ext;
+ return;
+ }
+
+ // The order is OK, do not advance I to the next prefix
+ // to allow repeated extension type, e.g.: rv32ixabc_xdef.
+
+ if (Name.empty()) {
+ std::string Error = Desc;
+ Error += " name missing after";
+ D.Diag(diag::err_drv_invalid_riscv_ext_arch_name)
+ << MArch << Error << Ext;
+ return;
+ }
+
+ std::string Major, Minor;
+ auto Pos = Name.find_if(isDigit);
+ if (Pos != StringRef::npos) {
+ auto Next = Name.substr(Pos);
+ Name = Name.substr(0, Pos);
+ if (!getExtensionVersion(D, MArch, Ext, Next, Major, Minor))
+ return;
+ }
+
+ // Check if duplicated extension.
+ if (std::find(AllExts.begin(), AllExts.end(), Ext) != AllExts.end()) {
+ std::string Error = "duplicated ";
+ Error += Desc;
+ D.Diag(diag::err_drv_invalid_riscv_ext_arch_name)
+ << MArch << Error << Ext;
+ return;
+ }
+
+ // Extension format is correct, keep parsing the extensions.
+ // TODO: Save Type, Name, Major, Minor to avoid parsing them later.
+ AllExts.push_back(Ext);
+ }
+
+ // Set target features.
+ // TODO: Hardware features to be handled in Support/TargetParser.cpp.
+ // TODO: Use version number when setting target features.
+ for (auto Ext : AllExts) {
+ if (!isSupportedExtension(Ext)) {
+ StringRef Desc = getExtensionTypeDesc(getExtensionType(Ext));
+ std::string Error = "unsupported ";
+ Error += Desc;
+ D.Diag(diag::err_drv_invalid_riscv_ext_arch_name)
+ << MArch << Error << Ext;
+ return;
+ }
+ Features.push_back(Args.MakeArgString("+" + Ext));
+ }
+}
+
+void riscv::getRISCVTargetFeatures(const Driver &D, const ArgList &Args,
+ std::vector<StringRef> &Features) {
+ if (const Arg *A = Args.getLastArg(options::OPT_march_EQ)) {
+ StringRef MArch = A->getValue();
+
+ // RISC-V ISA strings must be lowercase.
+ if (std::any_of(std::begin(MArch), std::end(MArch),
+ [](char c) { return isupper(c); })) {
+
+ D.Diag(diag::err_drv_invalid_riscv_arch_name) << MArch
+ << "string must be lowercase";
+ return;
+ }
+
+ // ISA string must begin with rv32 or rv64.
+ if (!(MArch.startswith("rv32") || MArch.startswith("rv64")) ||
+ (MArch.size() < 5)) {
+ D.Diag(diag::err_drv_invalid_riscv_arch_name) << MArch
+ << "string must begin with rv32{i,e,g} or rv64{i,g}";
+ return;
+ }
+
+ bool HasRV64 = MArch.startswith("rv64") ? true : false;
+
+ // The canonical order specified in ISA manual.
+ // Ref: Table 22.1 in RISC-V User-Level ISA V2.2
+ StringRef StdExts = "mafdqlcbjtpvn";
+ bool HasF = false, HasD = false;
+ char Baseline = MArch[4];
+
+ // First letter should be 'e', 'i' or 'g'.
+ switch (Baseline) {
+ default:
+ D.Diag(diag::err_drv_invalid_riscv_arch_name) << MArch
+ << "first letter should be 'e', 'i' or 'g'";
+ return;
+ case 'e': {
+ StringRef Error;
+ // Currently LLVM does not support 'e'.
+ // Extension 'e' is not allowed in rv64.
+ if (HasRV64)
+ Error = "standard user-level extension 'e' requires 'rv32'";
+ else
+ Error = "unsupported standard user-level extension 'e'";
+ D.Diag(diag::err_drv_invalid_riscv_arch_name)
+ << MArch << Error;
+ return;
+ }
+ case 'i':
+ break;
+ case 'g':
+ // g = imafd
+ StdExts = StdExts.drop_front(4);
+ Features.push_back("+m");
+ Features.push_back("+a");
+ Features.push_back("+f");
+ Features.push_back("+d");
+ HasF = true;
+ HasD = true;
+ break;
+ }
+
+ // Skip rvxxx
+ StringRef Exts = MArch.substr(5);
+
+ // Remove non-standard extensions and supervisor-level extensions.
+ // They have 'x', 's', 'sx' prefixes. Parse them at the end.
+ // Find the very first occurrence of 's' or 'x'.
+ StringRef OtherExts;
+ size_t Pos = Exts.find_first_of("sx");
+ if (Pos != StringRef::npos) {
+ OtherExts = Exts.substr(Pos);
+ Exts = Exts.substr(0, Pos);
+ }
+
+ std::string Major, Minor;
+ if (!getExtensionVersion(D, MArch, std::string(1, Baseline),
+ Exts, Major, Minor))
+ return;
+
+ // TODO: Use version number when setting target features
+ // and consume the underscore '_' that might follow.
+
+ auto StdExtsItr = StdExts.begin();
+ auto StdExtsEnd = StdExts.end();
+
+ for (auto I = Exts.begin(), E = Exts.end(); I != E; ++I) {
+ char c = *I;
+
+ // Check ISA extensions are specified in the canonical order.
+ while (StdExtsItr != StdExtsEnd && *StdExtsItr != c)
+ ++StdExtsItr;
+
+ if (StdExtsItr == StdExtsEnd) {
+ // Either c contains a valid extension but it was not given in
+ // canonical order or it is an invalid extension.
+ StringRef Error;
+ if (StdExts.contains(c))
+ Error = "standard user-level extension not given in canonical order";
+ else
+ Error = "invalid standard user-level extension";
+ D.Diag(diag::err_drv_invalid_riscv_ext_arch_name)
+ << MArch << Error << std::string(1, c);
+ return;
+ }
+
+ // Move to next char to prevent repeated letter.
+ ++StdExtsItr;
+
+ if (std::next(I) != E) {
+ // Skip c.
+ std::string Next = std::string(std::next(I), E);
+ std::string Major, Minor;
+ if (!getExtensionVersion(D, MArch, std::string(1, c),
+ Next, Major, Minor))
+ return;
+
+ // TODO: Use version number when setting target features
+ // and consume the underscore '_' that might follow.
+ }
+
+ // The order is OK, then push it into features.
+ switch (c) {
+ default:
+ // Currently LLVM supports only "mafdc".
+ D.Diag(diag::err_drv_invalid_riscv_ext_arch_name)
+ << MArch << "unsupported standard user-level extension"
+ << std::string(1, c);
+ return;
+ case 'm':
+ Features.push_back("+m");
+ break;
+ case 'a':
+ Features.push_back("+a");
+ break;
+ case 'f':
+ Features.push_back("+f");
+ HasF = true;
+ break;
+ case 'd':
+ Features.push_back("+d");
+ HasD = true;
+ break;
+ case 'c':
+ Features.push_back("+c");
+ break;
+ }
+ }
+
+ // Dependency check.
+ // It's illegal to specify the 'd' (double-precision floating point)
+ // extension without also specifying the 'f' (single precision
+ // floating-point) extension.
+ if (HasD && !HasF)
+ D.Diag(diag::err_drv_invalid_riscv_arch_name) << MArch
+ << "d requires f extension to also be specified";
+
+ // Additional dependency checks.
+ // TODO: The 'q' extension requires rv64.
+ // TODO: It is illegal to specify 'e' extensions with 'f' and 'd'.
+
+ // Handle all other types of extensions.
+ getExtensionFeatures(D, Args, Features, MArch, OtherExts);
+ }
+
+ // Now add any that the user explicitly requested on the command line,
+ // which may override the defaults.
+ handleTargetFeaturesGroup(Args, Features, options::OPT_m_riscv_Features_Group);
+}
+
+StringRef riscv::getRISCVABI(const ArgList &Args, const llvm::Triple &Triple) {
+ if (Arg *A = Args.getLastArg(options::OPT_mabi_EQ))
+ return A->getValue();
+
+ return Triple.getArch() == llvm::Triple::riscv32 ? "ilp32" : "lp64";
+}
diff --git a/lib/Driver/ToolChains/Arch/RISCV.h b/lib/Driver/ToolChains/Arch/RISCV.h
new file mode 100644
index 000000000000..beda14979fab
--- /dev/null
+++ b/lib/Driver/ToolChains/Arch/RISCV.h
@@ -0,0 +1,32 @@
+//===--- RISCV.h - RISCV-specific Tool Helpers ------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_ARCH_RISCV_H
+#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_ARCH_RISCV_H
+
+#include "clang/Driver/Driver.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Option/Option.h"
+#include <string>
+#include <vector>
+
+namespace clang {
+namespace driver {
+namespace tools {
+namespace riscv {
+void getRISCVTargetFeatures(const Driver &D, const llvm::opt::ArgList &Args,
+ std::vector<llvm::StringRef> &Features);
+StringRef getRISCVABI(const llvm::opt::ArgList &Args,
+ const llvm::Triple &Triple);
+} // end namespace riscv
+} // namespace tools
+} // end namespace driver
+} // end namespace clang
+
+#endif // LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_ARCH_RISCV_H
diff --git a/lib/Driver/ToolChains/Arch/Sparc.cpp b/lib/Driver/ToolChains/Arch/Sparc.cpp
index 594ec9986d8e..c177031b9f75 100644
--- a/lib/Driver/ToolChains/Arch/Sparc.cpp
+++ b/lib/Driver/ToolChains/Arch/Sparc.cpp
@@ -45,14 +45,29 @@ const char *sparc::getSparcAsmModeForCPU(StringRef Name,
.Case("niagara2", "-Av8plusb")
.Case("niagara3", "-Av8plusd")
.Case("niagara4", "-Av8plusd")
+ .Case("ma2100", "-Aleon")
+ .Case("ma2150", "-Aleon")
+ .Case("ma2155", "-Aleon")
+ .Case("ma2450", "-Aleon")
+ .Case("ma2455", "-Aleon")
+ .Case("ma2x5x", "-Aleon")
+ .Case("ma2080", "-Aleon")
+ .Case("ma2085", "-Aleon")
+ .Case("ma2480", "-Aleon")
+ .Case("ma2485", "-Aleon")
+ .Case("ma2x8x", "-Aleon")
+ .Case("myriad2", "-Aleon")
+ .Case("myriad2.1", "-Aleon")
+ .Case("myriad2.2", "-Aleon")
+ .Case("myriad2.3", "-Aleon")
.Case("leon2", "-Av8")
.Case("at697e", "-Av8")
.Case("at697f", "-Av8")
- .Case("leon3", "-Av8")
+ .Case("leon3", "-Aleon")
.Case("ut699", "-Av8")
- .Case("gr712rc", "-Av8")
- .Case("leon4", "-Av8")
- .Case("gr740", "-Av8")
+ .Case("gr712rc", "-Aleon")
+ .Case("leon4", "-Aleon")
+ .Case("gr740", "-Aleon")
.Default("-Av8");
}
}
diff --git a/lib/Driver/ToolChains/Arch/X86.cpp b/lib/Driver/ToolChains/Arch/X86.cpp
index a18b2aa35b03..7a4f836d2e1a 100644
--- a/lib/Driver/ToolChains/Arch/X86.cpp
+++ b/lib/Driver/ToolChains/Arch/X86.cpp
@@ -40,26 +40,29 @@ const char *x86::getX86TargetCPU(const ArgList &Args,
return Args.MakeArgString(CPU);
}
- if (const Arg *A = Args.getLastArg(options::OPT__SLASH_arch)) {
- // Mapping built by referring to X86TargetInfo::getDefaultFeatures().
+ if (const Arg *A = Args.getLastArgNoClaim(options::OPT__SLASH_arch)) {
+ // Mapping built by looking at lib/Basic's X86TargetInfo::initFeatureMap().
StringRef Arch = A->getValue();
- const char *CPU;
- if (Triple.getArch() == llvm::Triple::x86) {
+ const char *CPU = nullptr;
+ if (Triple.getArch() == llvm::Triple::x86) { // 32-bit-only /arch: flags.
CPU = llvm::StringSwitch<const char *>(Arch)
.Case("IA32", "i386")
.Case("SSE", "pentium3")
.Case("SSE2", "pentium4")
- .Case("AVX", "sandybridge")
- .Case("AVX2", "haswell")
.Default(nullptr);
- } else {
+ }
+ if (CPU == nullptr) { // 32-bit and 64-bit /arch: flags.
CPU = llvm::StringSwitch<const char *>(Arch)
.Case("AVX", "sandybridge")
.Case("AVX2", "haswell")
+ .Case("AVX512F", "knl")
+ .Case("AVX512", "skylake-avx512")
.Default(nullptr);
}
- if (CPU)
+ if (CPU) {
+ A->claim();
return CPU;
+ }
}
// Select the default CPU if none was given (or detection failed).
@@ -141,30 +144,6 @@ void x86::getX86TargetFeatures(const Driver &D, const llvm::Triple &Triple,
Features.push_back("+ssse3");
}
- // Set features according to the -arch flag on MSVC.
- if (Arg *A = Args.getLastArg(options::OPT__SLASH_arch)) {
- StringRef Arch = A->getValue();
- bool ArchUsed = false;
- // First, look for flags that are shared in x86 and x86-64.
- if (ArchType == llvm::Triple::x86_64 || ArchType == llvm::Triple::x86) {
- if (Arch == "AVX" || Arch == "AVX2") {
- ArchUsed = true;
- Features.push_back(Args.MakeArgString("+" + Arch.lower()));
- }
- }
- // Then, look for x86-specific flags.
- if (ArchType == llvm::Triple::x86) {
- if (Arch == "IA32") {
- ArchUsed = true;
- } else if (Arch == "SSE" || Arch == "SSE2") {
- ArchUsed = true;
- Features.push_back(Args.MakeArgString("+" + Arch.lower()));
- }
- }
- if (!ArchUsed)
- D.Diag(clang::diag::warn_drv_unused_argument) << A->getAsString(Args);
- }
-
// Now add any that the user explicitly requested on the command line,
// which may override the defaults.
handleTargetFeaturesGroup(Args, Features, options::OPT_m_x86_Features_Group);
diff --git a/lib/Driver/ToolChains/BareMetal.cpp b/lib/Driver/ToolChains/BareMetal.cpp
index 57a668650e6b..c302d647b973 100644
--- a/lib/Driver/ToolChains/BareMetal.cpp
+++ b/lib/Driver/ToolChains/BareMetal.cpp
@@ -95,16 +95,23 @@ void BareMetal::addClangTargetOptions(const ArgList &DriverArgs,
CC1Args.push_back("-nostdsysteminc");
}
-std::string BareMetal::findLibCxxIncludePath(CXXStdlibType LibType) const {
+void BareMetal::AddClangCXXStdlibIncludeArgs(
+ const ArgList &DriverArgs, ArgStringList &CC1Args) const {
+ if (DriverArgs.hasArg(options::OPT_nostdinc) ||
+ DriverArgs.hasArg(options::OPT_nostdlibinc) ||
+ DriverArgs.hasArg(options::OPT_nostdincxx))
+ return;
+
StringRef SysRoot = getDriver().SysRoot;
if (SysRoot.empty())
- return "";
+ return;
- switch (LibType) {
+ switch (GetCXXStdlibType(DriverArgs)) {
case ToolChain::CST_Libcxx: {
SmallString<128> Dir(SysRoot);
llvm::sys::path::append(Dir, "include", "c++", "v1");
- return Dir.str();
+ addSystemInclude(DriverArgs, CC1Args, Dir.str());
+ break;
}
case ToolChain::CST_Libstdcxx: {
SmallString<128> Dir(SysRoot);
@@ -124,24 +131,12 @@ std::string BareMetal::findLibCxxIncludePath(CXXStdlibType LibType) const {
Version = CandidateVersion;
}
if (Version.Major == -1)
- return "";
+ return;
llvm::sys::path::append(Dir, Version.Text);
- return Dir.str();
+ addSystemInclude(DriverArgs, CC1Args, Dir.str());
+ break;
}
}
- llvm_unreachable("unhandled LibType");
-}
-
-void BareMetal::AddClangCXXStdlibIncludeArgs(
- const ArgList &DriverArgs, ArgStringList &CC1Args) const {
- if (DriverArgs.hasArg(options::OPT_nostdinc) ||
- DriverArgs.hasArg(options::OPT_nostdlibinc) ||
- DriverArgs.hasArg(options::OPT_nostdincxx))
- return;
-
- std::string Path = findLibCxxIncludePath(GetCXXStdlibType(DriverArgs));
- if (!Path.empty())
- addSystemInclude(DriverArgs, CC1Args, Path);
}
void BareMetal::AddCXXStdlibLibArgs(const ArgList &Args,
diff --git a/lib/Driver/ToolChains/BareMetal.h b/lib/Driver/ToolChains/BareMetal.h
index 0bed63332cad..43a6a8b4bec3 100644
--- a/lib/Driver/ToolChains/BareMetal.h
+++ b/lib/Driver/ToolChains/BareMetal.h
@@ -53,7 +53,6 @@ public:
void addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args,
Action::OffloadKind DeviceOffloadKind) const override;
- std::string findLibCxxIncludePath(ToolChain::CXXStdlibType LibType) const;
void AddClangCXXStdlibIncludeArgs(
const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
diff --git a/lib/Driver/ToolChains/Clang.cpp b/lib/Driver/ToolChains/Clang.cpp
index 8b895c4514c4..eaff940a1c2b 100644
--- a/lib/Driver/ToolChains/Clang.cpp
+++ b/lib/Driver/ToolChains/Clang.cpp
@@ -12,6 +12,7 @@
#include "Arch/ARM.h"
#include "Arch/Mips.h"
#include "Arch/PPC.h"
+#include "Arch/RISCV.h"
#include "Arch/Sparc.h"
#include "Arch/SystemZ.h"
#include "Arch/X86.h"
@@ -24,12 +25,12 @@
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/ObjCRuntime.h"
#include "clang/Basic/Version.h"
-#include "clang/Config/config.h"
#include "clang/Driver/DriverDiagnostic.h"
#include "clang/Driver/Options.h"
#include "clang/Driver/SanitizerArgs.h"
#include "clang/Driver/XRayArgs.h"
#include "llvm/ADT/StringExtras.h"
+#include "llvm/Config/llvm-config.h"
#include "llvm/Option/ArgList.h"
#include "llvm/Support/CodeGen.h"
#include "llvm/Support/Compression.h"
@@ -130,6 +131,10 @@ forAllAssociatedToolChains(Compilation &C, const JobAction &JA,
Work(*C.getSingleOffloadToolChain<Action::OFK_Cuda>());
else if (JA.isDeviceOffloading(Action::OFK_Cuda))
Work(*C.getSingleOffloadToolChain<Action::OFK_Host>());
+ else if (JA.isHostOffloading(Action::OFK_HIP))
+ Work(*C.getSingleOffloadToolChain<Action::OFK_HIP>());
+ else if (JA.isDeviceOffloading(Action::OFK_HIP))
+ Work(*C.getSingleOffloadToolChain<Action::OFK_Host>());
if (JA.isHostOffloading(Action::OFK_OpenMP)) {
auto TCs = C.getOffloadToolChains<Action::OFK_OpenMP>();
@@ -327,6 +332,10 @@ static void getTargetFeatures(const ToolChain &TC, const llvm::Triple &Triple,
case llvm::Triple::ppc64le:
ppc::getPPCTargetFeatures(D, Triple, Args, Features);
break;
+ case llvm::Triple::riscv32:
+ case llvm::Triple::riscv64:
+ riscv::getRISCVTargetFeatures(D, Args, Features);
+ break;
case llvm::Triple::systemz:
systemz::getSystemZTargetFeatures(Args, Features);
break;
@@ -403,7 +412,6 @@ static void addExceptionArgs(const ArgList &Args, types::ID InputType,
const ToolChain &TC, bool KernelOrKext,
const ObjCRuntime &objcRuntime,
ArgStringList &CmdArgs) {
- const Driver &D = TC.getDriver();
const llvm::Triple &Triple = TC.getTriple();
if (KernelOrKext) {
@@ -445,21 +453,6 @@ static void addExceptionArgs(const ArgList &Args, types::ID InputType,
ExceptionArg->getOption().matches(options::OPT_fexceptions);
if (CXXExceptionsEnabled) {
- if (Triple.isPS4CPU()) {
- ToolChain::RTTIMode RTTIMode = TC.getRTTIMode();
- assert(ExceptionArg &&
- "On the PS4 exceptions should only be enabled if passing "
- "an argument");
- if (RTTIMode == ToolChain::RM_DisabledExplicitly) {
- const Arg *RTTIArg = TC.getRTTIArg();
- assert(RTTIArg && "RTTI disabled explicitly but no RTTIArg!");
- D.Diag(diag::err_drv_argument_not_allowed_with)
- << RTTIArg->getAsString(Args) << ExceptionArg->getAsString(Args);
- } else if (RTTIMode == ToolChain::RM_EnabledImplicitly)
- D.Diag(diag::warn_drv_enabling_rtti_with_exceptions);
- } else
- assert(TC.getRTTIMode() != ToolChain::RM_DisabledImplicitly);
-
CmdArgs.push_back("-fcxx-exceptions");
EH = true;
@@ -524,10 +517,17 @@ static bool useFramePointerForTargetByDefault(const ArgList &Args,
// XCore never wants frame pointers, regardless of OS.
// WebAssembly never wants frame pointers.
return false;
+ case llvm::Triple::riscv32:
+ case llvm::Triple::riscv64:
+ return !areOptimizationsEnabled(Args);
default:
break;
}
+ if (Triple.getOS() == llvm::Triple::NetBSD) {
+ return !areOptimizationsEnabled(Args);
+ }
+
if (Triple.isOSLinux() || Triple.getOS() == llvm::Triple::CloudABI) {
switch (Triple.getArch()) {
// Don't use a frame pointer on linux if optimizing for certain targets.
@@ -604,7 +604,19 @@ static void addDebugCompDirArg(const ArgList &Args, ArgStringList &CmdArgs) {
}
}
-/// \brief Vectorize at all optimization levels greater than 1 except for -Oz.
+/// Add a CC1 and CC1AS option to specify the debug file path prefix map.
+static void addDebugPrefixMapArg(const Driver &D, const ArgList &Args, ArgStringList &CmdArgs) {
+ for (const Arg *A : Args.filtered(options::OPT_fdebug_prefix_map_EQ)) {
+ StringRef Map = A->getValue();
+ if (Map.find('=') == StringRef::npos)
+ D.Diag(diag::err_drv_invalid_argument_to_fdebug_prefix_map) << Map;
+ else
+ CmdArgs.push_back(Args.MakeArgString("-fdebug-prefix-map=" + Map));
+ A->claim();
+ }
+}
+
+/// Vectorize at all optimization levels greater than 1 except for -Oz.
/// For -Oz the loop vectorizer is disable, while the slp vectorizer is enabled.
static bool shouldEnableVectorizerAtOLevel(const ArgList &Args, bool isSlpVec) {
if (Arg *A = Args.getLastArg(options::OPT_O_Group)) {
@@ -826,7 +838,7 @@ static void addPGOAndCoverageFlags(Compilation &C, const Driver &D,
}
}
-/// \brief Check whether the given input tree contains any compilation actions.
+/// Check whether the given input tree contains any compilation actions.
static bool ContainsCompileAction(const Action *A) {
if (isa<CompileJobAction>(A) || isa<BackendJobAction>(A))
return true;
@@ -838,7 +850,7 @@ static bool ContainsCompileAction(const Action *A) {
return false;
}
-/// \brief Check if -relax-all should be passed to the internal assembler.
+/// Check if -relax-all should be passed to the internal assembler.
/// This is done by default when compiling non-assembler source with -O0.
static bool UseRelaxAll(Compilation &C, const ArgList &Args) {
bool RelaxDefault = true;
@@ -1064,73 +1076,28 @@ void Clang::AddPreprocessingOptions(Compilation &C, const JobAction &JA,
// wonky, but we include looking for .gch so we can support seamless
// replacement into a build system already set up to be generating
// .gch files.
- int YcIndex = -1, YuIndex = -1;
- {
- int AI = -1;
+
+ if (getToolChain().getDriver().IsCLMode()) {
const Arg *YcArg = Args.getLastArg(options::OPT__SLASH_Yc);
const Arg *YuArg = Args.getLastArg(options::OPT__SLASH_Yu);
- for (const Arg *A : Args.filtered(options::OPT_clang_i_Group)) {
- // Walk the whole i_Group and skip non "-include" flags so that the index
- // here matches the index in the next loop below.
- ++AI;
- if (!A->getOption().matches(options::OPT_include))
- continue;
- if (YcArg && strcmp(A->getValue(), YcArg->getValue()) == 0)
- YcIndex = AI;
- if (YuArg && strcmp(A->getValue(), YuArg->getValue()) == 0)
- YuIndex = AI;
+ if (YcArg && JA.getKind() >= Action::PrecompileJobClass &&
+ JA.getKind() <= Action::AssembleJobClass) {
+ CmdArgs.push_back(Args.MakeArgString("-building-pch-with-obj"));
+ }
+ if (YcArg || YuArg) {
+ StringRef ThroughHeader = YcArg ? YcArg->getValue() : YuArg->getValue();
+ if (!isa<PrecompileJobAction>(JA)) {
+ CmdArgs.push_back("-include-pch");
+ CmdArgs.push_back(Args.MakeArgString(D.GetClPchPath(C, ThroughHeader)));
+ }
+ CmdArgs.push_back(
+ Args.MakeArgString(Twine("-pch-through-header=") + ThroughHeader));
}
- }
- if (isa<PrecompileJobAction>(JA) && YcIndex != -1) {
- Driver::InputList Inputs;
- D.BuildInputs(getToolChain(), C.getArgs(), Inputs);
- assert(Inputs.size() == 1 && "Need one input when building pch");
- CmdArgs.push_back(Args.MakeArgString(Twine("-find-pch-source=") +
- Inputs[0].second->getValue()));
}
bool RenderedImplicitInclude = false;
- int AI = -1;
for (const Arg *A : Args.filtered(options::OPT_clang_i_Group)) {
- ++AI;
-
- if (getToolChain().getDriver().IsCLMode() &&
- A->getOption().matches(options::OPT_include)) {
- // In clang-cl mode, /Ycfoo.h means that all code up to a foo.h
- // include is compiled into foo.h, and everything after goes into
- // the .obj file. /Yufoo.h means that all includes prior to and including
- // foo.h are completely skipped and replaced with a use of the pch file
- // for foo.h. (Each flag can have at most one value, multiple /Yc flags
- // just mean that the last one wins.) If /Yc and /Yu are both present
- // and refer to the same file, /Yc wins.
- // Note that OPT__SLASH_FI gets mapped to OPT_include.
- // FIXME: The code here assumes that /Yc and /Yu refer to the same file.
- // cl.exe seems to support both flags with different values, but that
- // seems strange (which flag does /Fp now refer to?), so don't implement
- // that until someone needs it.
- int PchIndex = YcIndex != -1 ? YcIndex : YuIndex;
- if (PchIndex != -1) {
- if (isa<PrecompileJobAction>(JA)) {
- // When building the pch, skip all includes after the pch.
- assert(YcIndex != -1 && PchIndex == YcIndex);
- if (AI >= YcIndex)
- continue;
- } else {
- // When using the pch, skip all includes prior to the pch.
- if (AI < PchIndex) {
- A->claim();
- continue;
- }
- if (AI == PchIndex) {
- A->claim();
- CmdArgs.push_back("-include-pch");
- CmdArgs.push_back(
- Args.MakeArgString(D.GetClPchPath(C, A->getValue())));
- continue;
- }
- }
- }
- } else if (A->getOption().matches(options::OPT_include)) {
+ if (A->getOption().matches(options::OPT_include)) {
// Handling of gcc-style gch precompiled headers.
bool IsFirstImplicitInclude = !RenderedImplicitInclude;
RenderedImplicitInclude = true;
@@ -1282,6 +1249,8 @@ static bool isSignedCharDefault(const llvm::Triple &Triple) {
case llvm::Triple::hexagon:
case llvm::Triple::ppc64le:
+ case llvm::Triple::riscv32:
+ case llvm::Triple::riscv64:
case llvm::Triple::systemz:
case llvm::Triple::xcore:
return false;
@@ -1291,6 +1260,8 @@ static bool isSignedCharDefault(const llvm::Triple &Triple) {
static bool isNoCommonDefault(const llvm::Triple &Triple) {
switch (Triple.getArch()) {
default:
+ if (Triple.isOSFuchsia())
+ return true;
return false;
case llvm::Triple::xcore:
@@ -1338,7 +1309,7 @@ void Clang::AddARMTargetArgs(const llvm::Triple &Triple, const ArgList &Args,
// Forward the -mglobal-merge option for explicit control over the pass.
if (Arg *A = Args.getLastArg(options::OPT_mglobal_merge,
options::OPT_mno_global_merge)) {
- CmdArgs.push_back("-backend-option");
+ CmdArgs.push_back("-mllvm");
if (A->getOption().matches(options::OPT_mno_global_merge))
CmdArgs.push_back("-arm-global-merge=false");
else
@@ -1391,6 +1362,11 @@ void Clang::RenderTargetOptions(const llvm::Triple &EffectiveTriple,
AddPPCTargetArgs(Args, CmdArgs);
break;
+ case llvm::Triple::riscv32:
+ case llvm::Triple::riscv64:
+ AddRISCVTargetArgs(Args, CmdArgs);
+ break;
+
case llvm::Triple::sparc:
case llvm::Triple::sparcel:
case llvm::Triple::sparcv9:
@@ -1447,21 +1423,21 @@ void Clang::AddAArch64TargetArgs(const ArgList &Args,
if (Arg *A = Args.getLastArg(options::OPT_mfix_cortex_a53_835769,
options::OPT_mno_fix_cortex_a53_835769)) {
- CmdArgs.push_back("-backend-option");
+ CmdArgs.push_back("-mllvm");
if (A->getOption().matches(options::OPT_mfix_cortex_a53_835769))
CmdArgs.push_back("-aarch64-fix-cortex-a53-835769=1");
else
CmdArgs.push_back("-aarch64-fix-cortex-a53-835769=0");
} else if (Triple.isAndroid()) {
// Enabled A53 errata (835769) workaround by default on android
- CmdArgs.push_back("-backend-option");
+ CmdArgs.push_back("-mllvm");
CmdArgs.push_back("-aarch64-fix-cortex-a53-835769=1");
}
// Forward the -mglobal-merge option for explicit control over the pass.
if (Arg *A = Args.getLastArg(options::OPT_mglobal_merge,
options::OPT_mno_global_merge)) {
- CmdArgs.push_back("-backend-option");
+ CmdArgs.push_back("-mllvm");
if (A->getOption().matches(options::OPT_mno_global_merge))
CmdArgs.push_back("-aarch64-enable-global-merge=false");
else
@@ -1668,6 +1644,25 @@ void Clang::AddPPCTargetArgs(const ArgList &Args,
}
}
+void Clang::AddRISCVTargetArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ // FIXME: currently defaults to the soft-float ABIs. Will need to be
+ // expanded to select ilp32f, ilp32d, lp64f, lp64d when appropriate.
+ const char *ABIName = nullptr;
+ const llvm::Triple &Triple = getToolChain().getTriple();
+ if (Arg *A = Args.getLastArg(options::OPT_mabi_EQ))
+ ABIName = A->getValue();
+ else if (Triple.getArch() == llvm::Triple::riscv32)
+ ABIName = "ilp32";
+ else if (Triple.getArch() == llvm::Triple::riscv64)
+ ABIName = "lp64";
+ else
+ llvm_unreachable("Unexpected triple!");
+
+ CmdArgs.push_back("-target-abi");
+ CmdArgs.push_back(ABIName);
+}
+
void Clang::AddSparcTargetArgs(const ArgList &Args,
ArgStringList &CmdArgs) const {
sparc::FloatABI FloatABI =
@@ -1722,6 +1717,9 @@ void Clang::AddX86TargetArgs(const ArgList &Args,
getToolChain().getDriver().Diag(diag::err_drv_unsupported_option_argument)
<< A->getOption().getName() << Value;
}
+ } else if (getToolChain().getDriver().IsCLMode()) {
+ CmdArgs.push_back("-mllvm");
+ CmdArgs.push_back("-x86-asm-syntax=intel");
}
// Set flags to support MCU ABI.
@@ -2043,7 +2041,7 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
// Handle various floating point optimization flags, mapping them to the
// appropriate LLVM code generation flags. This is complicated by several
// "umbrella" flags, so we do this by stepping through the flags incrementally
- // adjusting what we think is enabled/disabled, then at the end settting the
+ // adjusting what we think is enabled/disabled, then at the end setting the
// LLVM flags based on the final state.
bool HonorINFs = true;
bool HonorNaNs = true;
@@ -2202,6 +2200,11 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
CmdArgs.push_back("-mfpmath");
CmdArgs.push_back(A->getValue());
}
+
+ // Disable a codegen optimization for floating-point casts.
+ if (Args.hasFlag(options::OPT_fno_strict_float_cast_overflow,
+ options::OPT_fstrict_float_cast_overflow, false))
+ CmdArgs.push_back("-fno-strict-float-cast-overflow");
}
static void RenderAnalyzerOptions(const ArgList &Args, ArgStringList &CmdArgs,
@@ -2337,6 +2340,7 @@ static void RenderOpenCLOptions(const ArgList &Args, ArgStringList &CmdArgs) {
options::OPT_cl_no_signed_zeros,
options::OPT_cl_denorms_are_zero,
options::OPT_cl_fp32_correctly_rounded_divide_sqrt,
+ options::OPT_cl_uniform_work_group_size
};
if (Arg *A = Args.getLastArg(options::OPT_cl_std_EQ)) {
@@ -2458,6 +2462,13 @@ static void RenderBuiltinOptions(const ToolChain &TC, const llvm::Triple &T,
CmdArgs.push_back("-fno-math-builtin");
}
+void Driver::getDefaultModuleCachePath(SmallVectorImpl<char> &Result) {
+ llvm::sys::path::system_temp_directory(/*erasedOnReboot=*/false, Result);
+ llvm::sys::path::append(Result, "org.llvm.clang.");
+ appendUserToPath(Result);
+ llvm::sys::path::append(Result, "ModuleCache");
+}
+
static void RenderModulesOptions(Compilation &C, const Driver &D,
const ArgList &Args, const InputInfo &Input,
const InputInfo &Output,
@@ -2499,11 +2510,13 @@ static void RenderModulesOptions(Compilation &C, const Driver &D,
CmdArgs.push_back("-fmodules-strict-decluse");
// -fno-implicit-modules turns off implicitly compiling modules on demand.
+ bool ImplicitModules = false;
if (!Args.hasFlag(options::OPT_fimplicit_modules,
options::OPT_fno_implicit_modules, HaveClangModules)) {
if (HaveModules)
CmdArgs.push_back("-fno-implicit-modules");
} else if (HaveModules) {
+ ImplicitModules = true;
// -fmodule-cache-path specifies where our implicitly-built module files
// should be written.
SmallString<128> Path;
@@ -2518,10 +2531,7 @@ static void RenderModulesOptions(Compilation &C, const Driver &D,
llvm::sys::path::append(Path, "modules");
} else if (Path.empty()) {
// No module path was provided: use the default.
- llvm::sys::path::system_temp_directory(/*erasedOnReboot=*/false, Path);
- llvm::sys::path::append(Path, "org.llvm.clang.");
- appendUserToPath(Path);
- llvm::sys::path::append(Path, "ModuleCache");
+ Driver::getDefaultModuleCachePath(Path);
}
const char Arg[] = "-fmodules-cache-path=";
@@ -2613,7 +2623,11 @@ static void RenderModulesOptions(Compilation &C, const Driver &D,
options::OPT_fmodules_validate_once_per_build_session);
}
- Args.AddLastArg(CmdArgs, options::OPT_fmodules_validate_system_headers);
+ if (Args.hasFlag(options::OPT_fmodules_validate_system_headers,
+ options::OPT_fno_modules_validate_system_headers,
+ ImplicitModules))
+ CmdArgs.push_back("-fmodules-validate-system-headers");
+
Args.AddLastArg(CmdArgs, options::OPT_fmodules_disable_diagnostic_validation);
}
@@ -2632,6 +2646,9 @@ static void RenderCharacterOptions(const ArgList &Args, const llvm::Triple &T,
CmdArgs.push_back("-fno-signed-char");
}
+ if (Args.hasFlag(options::OPT_fchar8__t, options::OPT_fno_char8__t, false))
+ CmdArgs.push_back("-fchar8_t");
+
if (const Arg *A = Args.getLastArg(options::OPT_fshort_wchar,
options::OPT_fno_short_wchar)) {
if (A->getOption().matches(options::OPT_fshort_wchar)) {
@@ -2919,7 +2936,7 @@ static void RenderDebugOptions(const ToolChain &TC, const Driver &D,
// Forward -gcodeview. EmitCodeView might have been set by CL-compatibility
// argument parsing.
- if (Args.hasArg(options::OPT_gcodeview) || EmitCodeView) {
+ if (EmitCodeView) {
// DWARFVersion remains at 0 if no explicit choice was made.
CmdArgs.push_back("-gcodeview");
} else if (DWARFVersion == 0 &&
@@ -2937,7 +2954,7 @@ static void RenderDebugOptions(const ToolChain &TC, const Driver &D,
// debuggers don't handle missing end columns well, so it's better not to
// include any column info.
if (Args.hasFlag(options::OPT_gcolumn_info, options::OPT_gno_column_info,
- /*Default=*/!(IsWindowsMSVC && EmitCodeView) &&
+ /*Default=*/!EmitCodeView &&
DebuggerTuning != llvm::DebuggerKind::SCE))
CmdArgs.push_back("-dwarf-column-info");
@@ -2975,6 +2992,18 @@ static void RenderDebugOptions(const ToolChain &TC, const Driver &D,
if (DebugInfoKind == codegenoptions::LimitedDebugInfo && NeedFullDebug)
DebugInfoKind = codegenoptions::FullDebugInfo;
+ if (Args.hasFlag(options::OPT_gembed_source, options::OPT_gno_embed_source, false)) {
+ // Source embedding is a vendor extension to DWARF v5. By now we have
+ // checked if a DWARF version was stated explicitly, and have otherwise
+ // fallen back to the target default, so if this is still not at least 5 we
+ // emit an error.
+ if (DWARFVersion < 5)
+ D.Diag(diag::err_drv_argument_only_allowed_with)
+ << Args.getLastArg(options::OPT_gembed_source)->getAsString(Args)
+ << "-gdwarf-5";
+ CmdArgs.push_back("-gembed-source");
+ }
+
RenderDebugEnablingArgs(Args, CmdArgs, DebugInfoKind, DWARFVersion,
DebuggerTuning);
@@ -2984,7 +3013,8 @@ static void RenderDebugOptions(const ToolChain &TC, const Driver &D,
CmdArgs.push_back("-debug-info-macro");
// -ggnu-pubnames turns on gnu style pubnames in the backend.
- if (Args.hasArg(options::OPT_ggnu_pubnames))
+ if (Args.hasFlag(options::OPT_ggnu_pubnames, options::OPT_gno_gnu_pubnames,
+ false))
CmdArgs.push_back("-ggnu-pubnames");
// -gdwarf-aranges turns on the emission of the aranges section in the
@@ -2992,13 +3022,18 @@ static void RenderDebugOptions(const ToolChain &TC, const Driver &D,
// Always enabled for SCE tuning.
if (Args.hasArg(options::OPT_gdwarf_aranges) ||
DebuggerTuning == llvm::DebuggerKind::SCE) {
- CmdArgs.push_back("-backend-option");
+ CmdArgs.push_back("-mllvm");
CmdArgs.push_back("-generate-arange-section");
}
if (Args.hasFlag(options::OPT_fdebug_types_section,
options::OPT_fno_debug_types_section, false)) {
- CmdArgs.push_back("-backend-option");
+ if (!T.isOSBinFormatELF())
+ D.Diag(diag::err_drv_unsupported_opt_for_target)
+ << Args.getLastArg(options::OPT_fdebug_types_section)
+ ->getAsString(Args)
+ << T.getTriple();
+ CmdArgs.push_back("-mllvm");
CmdArgs.push_back("-generate-type-units");
}
@@ -3029,13 +3064,14 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// Check number of inputs for sanity. We need at least one input.
assert(Inputs.size() >= 1 && "Must have at least one input.");
const InputInfo &Input = Inputs[0];
- // CUDA compilation may have multiple inputs (source file + results of
+ // CUDA/HIP compilation may have multiple inputs (source file + results of
// device-side compilations). OpenMP device jobs also take the host IR as a
// second input. All other jobs are expected to have exactly one
// input.
bool IsCuda = JA.isOffloading(Action::OFK_Cuda);
+ bool IsHIP = JA.isOffloading(Action::OFK_HIP);
bool IsOpenMPDevice = JA.isDeviceOffloading(Action::OFK_OpenMP);
- assert((IsCuda || (IsOpenMPDevice && Inputs.size() == 2) ||
+ assert((IsCuda || IsHIP || (IsOpenMPDevice && Inputs.size() == 2) ||
Inputs.size() == 1) &&
"Unable to handle multiple inputs.");
@@ -3047,10 +3083,10 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
bool IsWindowsMSVC = RawTriple.isWindowsMSVCEnvironment();
bool IsIAMCU = RawTriple.isOSIAMCU();
- // Adjust IsWindowsXYZ for CUDA compilations. Even when compiling in device
- // mode (i.e., getToolchain().getTriple() is NVPTX, not Windows), we need to
- // pass Windows-specific flags to cc1.
- if (IsCuda) {
+ // Adjust IsWindowsXYZ for CUDA/HIP compilations. Even when compiling in
+ // device mode (i.e., getToolchain().getTriple() is NVPTX/AMDGCN, not
+ // Windows), we need to pass Windows-specific flags to cc1.
+ if (IsCuda || IsHIP) {
IsWindowsMSVC |= AuxTriple && AuxTriple->isWindowsMSVCEnvironment();
IsWindowsGNU |= AuxTriple && AuxTriple->isWindowsGNUEnvironment();
IsWindowsCygnus |= AuxTriple && AuxTriple->isWindowsCygwinEnvironment();
@@ -3074,18 +3110,21 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.ClaimAllArgs(options::OPT_MJ);
}
- if (IsCuda) {
- // We have to pass the triple of the host if compiling for a CUDA device and
- // vice-versa.
+ if (IsCuda || IsHIP) {
+ // We have to pass the triple of the host if compiling for a CUDA/HIP device
+ // and vice-versa.
std::string NormalizedTriple;
- if (JA.isDeviceOffloading(Action::OFK_Cuda))
+ if (JA.isDeviceOffloading(Action::OFK_Cuda) ||
+ JA.isDeviceOffloading(Action::OFK_HIP))
NormalizedTriple = C.getSingleOffloadToolChain<Action::OFK_Host>()
->getTriple()
.normalize();
else
- NormalizedTriple = C.getSingleOffloadToolChain<Action::OFK_Cuda>()
- ->getTriple()
- .normalize();
+ NormalizedTriple =
+ (IsCuda ? C.getSingleOffloadToolChain<Action::OFK_Cuda>()
+ : C.getSingleOffloadToolChain<Action::OFK_HIP>())
+ ->getTriple()
+ .normalize();
CmdArgs.push_back("-aux-triple");
CmdArgs.push_back(Args.MakeArgString(NormalizedTriple));
@@ -3188,7 +3227,11 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (JA.getType() == types::TY_LLVM_BC)
CmdArgs.push_back("-emit-llvm-uselists");
- if (D.isUsingLTO()) {
+ // Device-side jobs do not support LTO.
+ bool isDeviceOffloadAction = !(JA.isDeviceOffloading(Action::OFK_None) ||
+ JA.isDeviceOffloading(Action::OFK_Host));
+
+ if (D.isUsingLTO() && !isDeviceOffloadAction) {
Args.AddLastArg(CmdArgs, options::OPT_flto, options::OPT_flto_EQ);
// The Darwin and PS4 linkers currently use the legacy LTO API, which
@@ -3207,6 +3250,9 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddLastArg(CmdArgs, options::OPT_fthinlto_index_EQ);
}
+ if (Args.getLastArg(options::OPT_save_temps_EQ))
+ Args.AddLastArg(CmdArgs, options::OPT_save_temps_EQ);
+
// Embed-bitcode option.
if (C.getDriver().embedBitcodeInObject() && !C.getDriver().isUsingLTO() &&
(isa<BackendJobAction>(JA) || isa<AssembleJobAction>(JA))) {
@@ -3224,13 +3270,21 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (!C.isForDiagnostics())
CmdArgs.push_back("-disable-free");
-// Disable the verification pass in -asserts builds.
#ifdef NDEBUG
- CmdArgs.push_back("-disable-llvm-verifier");
- // Discard LLVM value names in -asserts builds.
- CmdArgs.push_back("-discard-value-names");
+ const bool IsAssertBuild = false;
+#else
+ const bool IsAssertBuild = true;
#endif
+ // Disable the verification pass in -asserts builds.
+ if (!IsAssertBuild)
+ CmdArgs.push_back("-disable-llvm-verifier");
+
+ // Discard value names in assert builds unless otherwise specified.
+ if (Args.hasFlag(options::OPT_fdiscard_value_names,
+ options::OPT_fno_discard_value_names, !IsAssertBuild))
+ CmdArgs.push_back("-discard-value-names");
+
// Set the main file name, so that debug info works even with
// -save-temps.
CmdArgs.push_back("-main-file-name");
@@ -3246,6 +3300,13 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CheckCodeGenerationOptions(D, Args);
+ unsigned FunctionAlignment = ParseFunctionAlignment(getToolChain(), Args);
+ assert(FunctionAlignment <= 31 && "function alignment will be truncated!");
+ if (FunctionAlignment) {
+ CmdArgs.push_back("-function-alignment");
+ CmdArgs.push_back(Args.MakeArgString(std::to_string(FunctionAlignment)));
+ }
+
llvm::Reloc::Model RelocationModel;
unsigned PICLevel;
bool IsPIE;
@@ -3288,9 +3349,13 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddLastArg(CmdArgs, options::OPT_fveclib);
- if (!Args.hasFlag(options::OPT_fmerge_all_constants,
- options::OPT_fno_merge_all_constants))
- CmdArgs.push_back("-fno-merge-all-constants");
+ if (Args.hasFlag(options::OPT_fmerge_all_constants,
+ options::OPT_fno_merge_all_constants, false))
+ CmdArgs.push_back("-fmerge-all-constants");
+
+ if (Args.hasFlag(options::OPT_fno_delete_null_pointer_checks,
+ options::OPT_fdelete_null_pointer_checks, false))
+ CmdArgs.push_back("-fno-delete-null-pointer-checks");
// LLVM Code Generator Options.
@@ -3382,9 +3447,16 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
options::OPT_fno_strict_vtable_pointers,
false))
CmdArgs.push_back("-fstrict-vtable-pointers");
+ if (Args.hasFlag(options::OPT_fforce_emit_vtables,
+ options::OPT_fno_force_emit_vtables,
+ false))
+ CmdArgs.push_back("-fforce-emit-vtables");
if (!Args.hasFlag(options::OPT_foptimize_sibling_calls,
options::OPT_fno_optimize_sibling_calls))
CmdArgs.push_back("-mdisable-tail-calls");
+ if (Args.hasFlag(options::OPT_fno_escaping_block_tail_calls,
+ options::OPT_fescaping_block_tail_calls, false))
+ CmdArgs.push_back("-fno-escaping-block-tail-calls");
Args.AddLastArg(CmdArgs, options::OPT_ffine_grained_bitfield_accesses,
options::OPT_fno_fine_grained_bitfield_accesses);
@@ -3404,8 +3476,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.hasArg(options::OPT_dA))
CmdArgs.push_back("-masm-verbose");
- if (!Args.hasFlag(options::OPT_fintegrated_as, options::OPT_fno_integrated_as,
- IsIntegratedAssemblerDefault))
+ if (!getToolChain().useIntegratedAs())
CmdArgs.push_back("-no-integrated-as");
if (Args.hasArg(options::OPT_fdebug_pass_structure)) {
@@ -3498,6 +3569,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
types::ID InputType = Input.getType();
if (D.IsCLMode())
AddClangCLArgs(Args, InputType, CmdArgs, &DebugInfoKind, &EmitCodeView);
+ else
+ EmitCodeView = Args.hasArg(options::OPT_gcodeview);
const Arg *SplitDWARFArg = nullptr;
RenderDebugOptions(getToolChain(), D, RawTriple, Args, EmitCodeView,
@@ -3583,14 +3656,20 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
options::OPT_finstrument_function_entry_bare))
A->render(Args, CmdArgs);
- addPGOAndCoverageFlags(C, D, Output, Args, CmdArgs);
+ // NVPTX doesn't support PGO or coverage. There's no runtime support for
+ // sampling, overhead of call arc collection is way too high and there's no
+ // way to collect the output.
+ if (!Triple.isNVPTX())
+ addPGOAndCoverageFlags(C, D, Output, Args, CmdArgs);
if (auto *ABICompatArg = Args.getLastArg(options::OPT_fclang_abi_compat_EQ))
ABICompatArg->render(Args, CmdArgs);
- // Add runtime flag for PS4 when PGO or Coverage are enabled.
- if (RawTriple.isPS4CPU())
+ // Add runtime flag for PS4 when PGO, coverage, or sanitizers are enabled.
+ if (RawTriple.isPS4CPU()) {
PS4cpu::addProfileRTArgs(getToolChain(), Args, CmdArgs);
+ PS4cpu::addSanitizerArgs(getToolChain(), CmdArgs);
+ }
// Pass options for controlling the default header search paths.
if (Args.hasArg(options::OPT_nostdinc)) {
@@ -3657,6 +3736,11 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddLastArg(CmdArgs, options::OPT_pedantic_errors);
Args.AddLastArg(CmdArgs, options::OPT_w);
+ // Fixed point flags
+ if (Args.hasFlag(options::OPT_ffixed_point, options::OPT_fno_fixed_point,
+ /*Default=*/false))
+ Args.AddLastArg(CmdArgs, options::OPT_ffixed_point);
+
// Handle -{std, ansi, trigraphs} -- take the last of -{std, ansi}
// (-ansi is equivalent to -std=c89 or -std=c++98).
//
@@ -3741,14 +3825,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// Add in -fdebug-compilation-dir if necessary.
addDebugCompDirArg(Args, CmdArgs);
- for (const Arg *A : Args.filtered(options::OPT_fdebug_prefix_map_EQ)) {
- StringRef Map = A->getValue();
- if (Map.find('=') == StringRef::npos)
- D.Diag(diag::err_drv_invalid_argument_to_fdebug_prefix_map) << Map;
- else
- CmdArgs.push_back(Args.MakeArgString("-fdebug-prefix-map=" + Map));
- A->claim();
- }
+ addDebugPrefixMapArg(D, Args, CmdArgs);
if (Arg *A = Args.getLastArg(options::OPT_ftemplate_depth_,
options::OPT_ftemplate_depth_EQ)) {
@@ -3798,6 +3875,10 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(A->getValue());
}
+ if (Args.hasFlag(options::OPT_fstack_size_section,
+ options::OPT_fno_stack_size_section, RawTriple.isPS4()))
+ CmdArgs.push_back("-fstack-size-section");
+
CmdArgs.push_back("-ferror-limit");
if (Arg *A = Args.getLastArg(options::OPT_ferror_limit_EQ))
CmdArgs.push_back(A->getValue());
@@ -3857,14 +3938,11 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// Forward -f (flag) options which we can pass directly.
Args.AddLastArg(CmdArgs, options::OPT_femit_all_decls);
Args.AddLastArg(CmdArgs, options::OPT_fheinous_gnu_extensions);
+ Args.AddLastArg(CmdArgs, options::OPT_fdigraphs, options::OPT_fno_digraphs);
Args.AddLastArg(CmdArgs, options::OPT_fno_operator_names);
- // Emulated TLS is enabled by default on Android and OpenBSD, and can be enabled
- // manually with -femulated-tls.
- bool EmulatedTLSDefault = Triple.isAndroid() || Triple.isOSOpenBSD() ||
- Triple.isWindowsCygwinEnvironment();
- if (Args.hasFlag(options::OPT_femulated_tls, options::OPT_fno_emulated_tls,
- EmulatedTLSDefault))
- CmdArgs.push_back("-femulated-tls");
+ Args.AddLastArg(CmdArgs, options::OPT_femulated_tls,
+ options::OPT_fno_emulated_tls);
+
// AltiVec-like language extensions aren't relevant for assembling.
if (!isa<PreprocessJobAction>(JA) || Output.getType() != types::TY_PP_Asm)
Args.AddLastArg(CmdArgs, options::OPT_fzvector);
@@ -3890,7 +3968,14 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (!Args.hasFlag(options::OPT_fopenmp_use_tls,
options::OPT_fnoopenmp_use_tls, /*Default=*/true))
CmdArgs.push_back("-fnoopenmp-use-tls");
+ Args.AddLastArg(CmdArgs, options::OPT_fopenmp_simd,
+ options::OPT_fno_openmp_simd);
Args.AddAllArgs(CmdArgs, options::OPT_fopenmp_version_EQ);
+
+ // When in OpenMP offloading mode with NVPTX target, forward
+ // cuda-mode flag
+ Args.AddLastArg(CmdArgs, options::OPT_fopenmp_cuda_mode,
+ options::OPT_fno_openmp_cuda_mode);
break;
default:
// By default, if Clang doesn't know how to generate useful OpenMP code
@@ -3901,6 +3986,10 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// semantic analysis, etc.
break;
}
+ } else {
+ Args.AddLastArg(CmdArgs, options::OPT_fopenmp_simd,
+ options::OPT_fno_openmp_simd);
+ Args.AddAllArgs(CmdArgs, options::OPT_fopenmp_version_EQ);
}
const SanitizerArgs &Sanitize = getToolChain().getSanitizerArgs();
@@ -3980,26 +4069,35 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-mstack-probe-size=0");
}
+ if (!Args.hasFlag(options::OPT_mstack_arg_probe,
+ options::OPT_mno_stack_arg_probe, true))
+ CmdArgs.push_back(Args.MakeArgString("-mno-stack-arg-probe"));
+
if (Arg *A = Args.getLastArg(options::OPT_mrestrict_it,
options::OPT_mno_restrict_it)) {
if (A->getOption().matches(options::OPT_mrestrict_it)) {
- CmdArgs.push_back("-backend-option");
+ CmdArgs.push_back("-mllvm");
CmdArgs.push_back("-arm-restrict-it");
} else {
- CmdArgs.push_back("-backend-option");
+ CmdArgs.push_back("-mllvm");
CmdArgs.push_back("-arm-no-restrict-it");
}
} else if (Triple.isOSWindows() &&
(Triple.getArch() == llvm::Triple::arm ||
Triple.getArch() == llvm::Triple::thumb)) {
// Windows on ARM expects restricted IT blocks
- CmdArgs.push_back("-backend-option");
+ CmdArgs.push_back("-mllvm");
CmdArgs.push_back("-arm-restrict-it");
}
// Forward -cl options to -cc1
RenderOpenCLOptions(Args, CmdArgs);
+ if (Arg *A = Args.getLastArg(options::OPT_fcf_protection_EQ)) {
+ CmdArgs.push_back(
+ Args.MakeArgString(Twine("-fcf-protection=") + A->getValue()));
+ }
+
// Forward -f options with positive and negative forms; we translate
// these by hand.
if (Arg *A = getLastProfileSampleUseArg(Args)) {
@@ -4058,8 +4156,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
ToolChain::RTTIMode RTTIMode = getToolChain().getRTTIMode();
if (KernelOrKext || (types::isCXX(InputType) &&
- (RTTIMode == ToolChain::RM_DisabledExplicitly ||
- RTTIMode == ToolChain::RM_DisabledImplicitly)))
+ (RTTIMode == ToolChain::RM_Disabled)))
CmdArgs.push_back("-fno-rtti");
// -fshort-enums=0 is default for all architectures except Hexagon.
@@ -4081,6 +4178,11 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
KernelOrKext)
CmdArgs.push_back("-fno-use-cxa-atexit");
+ if (Args.hasFlag(options::OPT_fregister_global_dtors_with_atexit,
+ options::OPT_fno_register_global_dtors_with_atexit,
+ RawTriple.isOSDarwin() && !KernelOrKext))
+ CmdArgs.push_back("-fregister-global-dtors-with-atexit");
+
// -fms-extensions=0 is default.
if (Args.hasFlag(options::OPT_fms_extensions, options::OPT_fno_ms_extensions,
IsWindowsMSVC))
@@ -4147,7 +4249,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
!IsWindowsMSVC || IsMSVC2015Compatible))
CmdArgs.push_back("-fno-threadsafe-statics");
- // -fno-delayed-template-parsing is default, except when targetting MSVC.
+ // -fno-delayed-template-parsing is default, except when targeting MSVC.
// Many old Windows SDK versions require this to parse.
// FIXME: MSVC introduced /Zc:twoPhase- to disable this behavior in their
// compiler. We should be able to disable this by default at some point.
@@ -4292,6 +4394,9 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
}
}
+ if (!Args.hasFlag(options::OPT_Qy, options::OPT_Qn, true))
+ CmdArgs.push_back("-Qn");
+
// -fcommon is the default unless compiling kernel code or the target says so
bool NoCommonDefault = KernelOrKext || isNoCommonDefault(RawTriple);
if (!Args.hasFlag(options::OPT_fcommon, options::OPT_fno_common,
@@ -4473,31 +4578,9 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
}
// Setup statistics file output.
- if (const Arg *A = Args.getLastArg(options::OPT_save_stats_EQ)) {
- StringRef SaveStats = A->getValue();
-
- SmallString<128> StatsFile;
- bool DoSaveStats = false;
- if (SaveStats == "obj") {
- if (Output.isFilename()) {
- StatsFile.assign(Output.getFilename());
- llvm::sys::path::remove_filename(StatsFile);
- }
- DoSaveStats = true;
- } else if (SaveStats == "cwd") {
- DoSaveStats = true;
- } else {
- D.Diag(diag::err_drv_invalid_value) << A->getAsString(Args) << SaveStats;
- }
-
- if (DoSaveStats) {
- StringRef BaseName = llvm::sys::path::filename(Input.getBaseInput());
- llvm::sys::path::append(StatsFile, BaseName);
- llvm::sys::path::replace_extension(StatsFile, "stats");
- CmdArgs.push_back(Args.MakeArgString(Twine("-stats-file=") +
- StatsFile));
- }
- }
+ SmallString<128> StatsFile = getStatsFileName(Args, Output, Input, D);
+ if (!StatsFile.empty())
+ CmdArgs.push_back(Args.MakeArgString(Twine("-stats-file=") + StatsFile));
// Forward -Xclang arguments to -cc1, and -mllvm arguments to the LLVM option
// parser.
@@ -4584,14 +4667,22 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(Args.MakeArgString(Flags));
}
- // Host-side cuda compilation receives device-side outputs as Inputs[1...].
- // Include them with -fcuda-include-gpubinary.
- if (IsCuda && Inputs.size() > 1)
- for (auto I = std::next(Inputs.begin()), E = Inputs.end(); I != E; ++I) {
+ if (IsCuda) {
+ // Host-side cuda compilation receives all device-side outputs in a single
+ // fatbin as Inputs[1]. Include the binary with -fcuda-include-gpubinary.
+ if (Inputs.size() > 1) {
+ assert(Inputs.size() == 2 && "More than one GPU binary!");
CmdArgs.push_back("-fcuda-include-gpubinary");
- CmdArgs.push_back(I->getFilename());
+ CmdArgs.push_back(Inputs[1].getFilename());
}
+ if (Args.hasFlag(options::OPT_fcuda_rdc, options::OPT_fno_cuda_rdc, false))
+ CmdArgs.push_back("-fcuda-rdc");
+ if (Args.hasFlag(options::OPT_fcuda_short_ptr,
+ options::OPT_fno_cuda_short_ptr, false))
+ CmdArgs.push_back("-fcuda-short-ptr");
+ }
+
// OpenMP offloading device jobs take the argument -fopenmp-host-ir-file-path
// to specify the result of the compile phase on the host, so the meaningful
// device declarations can be identified. Also, -fopenmp-is-device is passed
@@ -4607,7 +4698,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// For all the host OpenMP offloading compile jobs we need to pass the targets
// information using -fopenmp-targets= option.
- if (isa<CompileJobAction>(JA) && JA.isHostOffloading(Action::OFK_OpenMP)) {
+ if (JA.isHostOffloading(Action::OFK_OpenMP)) {
SmallString<128> TargetInfo("-fopenmp-targets=");
Arg *Tgts = Args.getLastArg(options::OPT_fopenmp_targets_EQ);
@@ -4635,6 +4726,71 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-fwhole-program-vtables");
}
+ if (Arg *A = Args.getLastArg(options::OPT_fexperimental_isel,
+ options::OPT_fno_experimental_isel)) {
+ CmdArgs.push_back("-mllvm");
+ if (A->getOption().matches(options::OPT_fexperimental_isel)) {
+ CmdArgs.push_back("-global-isel=1");
+
+ // GISel is on by default on AArch64 -O0, so don't bother adding
+ // the fallback remarks for it. Other combinations will add a warning of
+ // some kind.
+ bool IsArchSupported = Triple.getArch() == llvm::Triple::aarch64;
+ bool IsOptLevelSupported = false;
+
+ Arg *A = Args.getLastArg(options::OPT_O_Group);
+ if (Triple.getArch() == llvm::Triple::aarch64) {
+ if (!A || A->getOption().matches(options::OPT_O0))
+ IsOptLevelSupported = true;
+ }
+ if (!IsArchSupported || !IsOptLevelSupported) {
+ CmdArgs.push_back("-mllvm");
+ CmdArgs.push_back("-global-isel-abort=2");
+
+ if (!IsArchSupported)
+ D.Diag(diag::warn_drv_experimental_isel_incomplete) << Triple.getArchName();
+ else
+ D.Diag(diag::warn_drv_experimental_isel_incomplete_opt);
+ }
+ } else {
+ CmdArgs.push_back("-global-isel=0");
+ }
+ }
+
+ if (Arg *A = Args.getLastArg(options::OPT_fforce_enable_int128,
+ options::OPT_fno_force_enable_int128)) {
+ if (A->getOption().matches(options::OPT_fforce_enable_int128))
+ CmdArgs.push_back("-fforce-enable-int128");
+ }
+
+ if (Args.hasFlag(options::OPT_fcomplete_member_pointers,
+ options::OPT_fno_complete_member_pointers, false))
+ CmdArgs.push_back("-fcomplete-member-pointers");
+
+ if (Arg *A = Args.getLastArg(options::OPT_moutline,
+ options::OPT_mno_outline)) {
+ if (A->getOption().matches(options::OPT_moutline)) {
+ // We only support -moutline in AArch64 right now. If we're not compiling
+ // for AArch64, emit a warning and ignore the flag. Otherwise, add the
+ // proper mllvm flags.
+ if (Triple.getArch() != llvm::Triple::aarch64) {
+ D.Diag(diag::warn_drv_moutline_unsupported_opt) << Triple.getArchName();
+ } else {
+ CmdArgs.push_back("-mllvm");
+ CmdArgs.push_back("-enable-machine-outliner");
+ }
+ } else {
+ // Disable all outlining behaviour.
+ CmdArgs.push_back("-mllvm");
+ CmdArgs.push_back("-enable-machine-outliner=never");
+ }
+ }
+
+ if (Args.hasFlag(options::OPT_faddrsig, options::OPT_fno_addrsig,
+ getToolChain().getTriple().isOSBinFormatELF() &&
+ getToolChain().useIntegratedAs()))
+ CmdArgs.push_back("-faddrsig");
+
// Finally add the compile command to the compilation.
if (Args.hasArg(options::OPT__SLASH_fallback) &&
Output.getType() == types::TY_Object &&
@@ -4653,12 +4809,6 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
}
- // Handle the debug info splitting at object creation time if we're
- // creating an object.
- // TODO: Currently only works on linux with newer objcopy.
- if (SplitDWARF && Output.getType() == types::TY_Object)
- SplitDebugInfo(getToolChain(), C, *this, JA, Args, Output, SplitDWARFOut);
-
if (Arg *A = Args.getLastArg(options::OPT_pg))
if (Args.hasArg(options::OPT_fomit_frame_pointer))
D.Diag(diag::err_drv_argument_not_allowed_with) << "-fomit-frame-pointer"
@@ -4709,6 +4859,13 @@ ObjCRuntime Clang::AddObjCRuntimeArgs(const ArgList &args,
getToolChain().getDriver().Diag(diag::err_drv_unknown_objc_runtime)
<< value;
}
+ if ((runtime.getKind() == ObjCRuntime::GNUstep) &&
+ (runtime.getVersion() >= VersionTuple(2, 0)))
+ if (!getToolChain().getTriple().isOSBinFormatELF()) {
+ getToolChain().getDriver().Diag(
+ diag::err_drv_gnustep_objc_runtime_incompatible_binary)
+ << runtime.getVersion().getMajor();
+ }
runtimeArg->render(args, cmdArgs);
return runtime;
@@ -4802,7 +4959,7 @@ ObjCRuntime Clang::AddObjCRuntimeArgs(const ArgList &args,
// Legacy behaviour is to target the gnustep runtime if we are in
// non-fragile mode or the GCC runtime in fragile mode.
if (isNonFragile)
- runtime = ObjCRuntime(ObjCRuntime::GNUstep, VersionTuple(1, 6));
+ runtime = ObjCRuntime(ObjCRuntime::GNUstep, VersionTuple(2, 0));
else
runtime = ObjCRuntime(ObjCRuntime::GCC, VersionTuple());
}
@@ -4930,13 +5087,8 @@ void Clang::AddClangCLArgs(const ArgList &Args, types::ID InputType,
CmdArgs.push_back("--dependent-lib=oldnames");
}
- // Both /showIncludes and /E (and /EP) write to stdout. Allowing both
- // would produce interleaved output, so ignore /showIncludes in such cases.
- if ((!Args.hasArg(options::OPT_E) && !Args.hasArg(options::OPT__SLASH_EP)) ||
- (Args.hasArg(options::OPT__SLASH_P) &&
- Args.hasArg(options::OPT__SLASH_EP) && !Args.hasArg(options::OPT_E)))
- if (Arg *A = Args.getLastArg(options::OPT_show_includes))
- A->render(Args, CmdArgs);
+ if (Arg *A = Args.getLastArg(options::OPT_show_includes))
+ A->render(Args, CmdArgs);
// This controls whether or not we emit RTTI data for polymorphic types.
if (Args.hasFlag(options::OPT__SLASH_GR_, options::OPT__SLASH_GR,
@@ -5066,6 +5218,10 @@ void Clang::AddClangCLArgs(const ArgList &Args, types::ID InputType,
else
CmdArgs.push_back("msvc");
}
+
+ if (Args.hasArg(options::OPT__SLASH_Guard) &&
+ Args.getLastArgValue(options::OPT__SLASH_Guard).equals_lower("cf"))
+ CmdArgs.push_back("-cfguard");
}
visualstudio::Compiler *Clang::getCLFallback() const {
@@ -5220,6 +5376,8 @@ void ClangAs::ConstructJob(Compilation &C, const JobAction &JA,
// Add the -fdebug-compilation-dir flag if needed.
addDebugCompDirArg(Args, CmdArgs);
+ addDebugPrefixMapArg(getToolChain().getDriver(), Args, CmdArgs);
+
// Set the AT_producer to the clang version when using the integrated
// assembler on assembly source files.
CmdArgs.push_back("-dwarf-debug-producer");
@@ -5316,19 +5474,17 @@ void ClangAs::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-o");
CmdArgs.push_back(Output.getFilename());
+ if (Args.hasArg(options::OPT_gsplit_dwarf) &&
+ getToolChain().getTriple().isOSLinux()) {
+ CmdArgs.push_back("-split-dwarf-file");
+ CmdArgs.push_back(SplitDebugName(Args, Input));
+ }
+
assert(Input.isFilename() && "Invalid input.");
CmdArgs.push_back(Input.getFilename());
const char *Exec = getToolChain().getDriver().getClangProgramPath();
C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
-
- // Handle the debug info splitting at object creation time if we're
- // creating an object.
- // TODO: Currently only works on linux with newer objcopy.
- if (Args.hasArg(options::OPT_gsplit_dwarf) &&
- getToolChain().getTriple().isOSLinux())
- SplitDebugInfo(getToolChain(), C, *this, JA, Args, Output,
- SplitDebugName(Args, Input));
}
// Begin OffloadBundler
@@ -5379,6 +5535,10 @@ void OffloadBundler::ConstructJob(Compilation &C, const JobAction &JA,
Triples += Action::GetOffloadKindName(CurKind);
Triples += '-';
Triples += CurTC->getTriple().normalize();
+ if (CurKind == Action::OFK_HIP && CurDep->getOffloadingArch()) {
+ Triples += '-';
+ Triples += CurDep->getOffloadingArch();
+ }
}
CmdArgs.push_back(TCArgs.MakeArgString(Triples));
@@ -5448,6 +5608,11 @@ void OffloadBundler::ConstructJobMultipleOutputs(
Triples += Action::GetOffloadKindName(Dep.DependentOffloadKind);
Triples += '-';
Triples += Dep.DependentToolChain->getTriple().normalize();
+ if (Dep.DependentOffloadKind == Action::OFK_HIP &&
+ !Dep.DependentBoundArch.empty()) {
+ Triples += '-';
+ Triples += Dep.DependentBoundArch;
+ }
}
CmdArgs.push_back(TCArgs.MakeArgString(Triples));
diff --git a/lib/Driver/ToolChains/Clang.h b/lib/Driver/ToolChains/Clang.h
index e23822b9c678..df67fb2cb331 100644
--- a/lib/Driver/ToolChains/Clang.h
+++ b/lib/Driver/ToolChains/Clang.h
@@ -25,7 +25,7 @@ namespace driver {
namespace tools {
-/// \brief Clang compiler tool.
+/// Clang compiler tool.
class LLVM_LIBRARY_VISIBILITY Clang : public Tool {
public:
static const char *getBaseInputName(const llvm::opt::ArgList &Args,
@@ -60,6 +60,8 @@ private:
llvm::opt::ArgStringList &CmdArgs) const;
void AddR600TargetArgs(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const;
+ void AddRISCVTargetArgs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const;
void AddSparcTargetArgs(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const;
void AddSystemZTargetArgs(const llvm::opt::ArgList &Args,
@@ -109,7 +111,7 @@ public:
const char *LinkingOutput) const override;
};
-/// \brief Clang integrated assembler tool.
+/// Clang integrated assembler tool.
class LLVM_LIBRARY_VISIBILITY ClangAs : public Tool {
public:
ClangAs(const ToolChain &TC)
diff --git a/lib/Driver/ToolChains/CloudABI.cpp b/lib/Driver/ToolChains/CloudABI.cpp
index cdf807f7f91f..80f9fc493fd8 100644
--- a/lib/Driver/ToolChains/CloudABI.cpp
+++ b/lib/Driver/ToolChains/CloudABI.cpp
@@ -10,7 +10,6 @@
#include "CloudABI.h"
#include "InputInfo.h"
#include "CommonArgs.h"
-#include "clang/Config/config.h"
#include "clang/Driver/Compilation.h"
#include "clang/Driver/Driver.h"
#include "clang/Driver/Options.h"
@@ -75,8 +74,11 @@ void cloudabi::Linker::ConstructJob(Compilation &C, const JobAction &JA,
{options::OPT_T_Group, options::OPT_e, options::OPT_s,
options::OPT_t, options::OPT_Z_Flag, options::OPT_r});
- if (D.isUsingLTO())
- AddGoldPlugin(ToolChain, Args, CmdArgs, D.getLTOMode() == LTOK_Thin, D);
+ if (D.isUsingLTO()) {
+ assert(!Inputs.empty() && "Must have at least one input.");
+ AddGoldPlugin(ToolChain, Args, CmdArgs, Output, Inputs[0],
+ D.getLTOMode() == LTOK_Thin);
+ }
AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs, JA);
@@ -104,10 +106,11 @@ CloudABI::CloudABI(const Driver &D, const llvm::Triple &Triple,
getFilePaths().push_back(P.str());
}
-std::string CloudABI::findLibCxxIncludePath() const {
+void CloudABI::addLibCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const {
SmallString<128> P(getDriver().Dir);
llvm::sys::path::append(P, "..", getTriple().str(), "include/c++/v1");
- return P.str();
+ addSystemInclude(DriverArgs, CC1Args, P.str());
}
void CloudABI::AddCXXStdlibLibArgs(const ArgList &Args,
diff --git a/lib/Driver/ToolChains/CloudABI.h b/lib/Driver/ToolChains/CloudABI.h
index a284eb3dc0a4..7464c5954555 100644
--- a/lib/Driver/ToolChains/CloudABI.h
+++ b/lib/Driver/ToolChains/CloudABI.h
@@ -50,7 +50,9 @@ public:
GetCXXStdlibType(const llvm::opt::ArgList &Args) const override {
return ToolChain::CST_Libcxx;
}
- std::string findLibCxxIncludePath() const override;
+ void addLibCxxIncludePaths(
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
void AddCXXStdlibLibArgs(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const override;
diff --git a/lib/Driver/ToolChains/CommonArgs.cpp b/lib/Driver/ToolChains/CommonArgs.cpp
index f26880123d8c..1e093b25b909 100644
--- a/lib/Driver/ToolChains/CommonArgs.cpp
+++ b/lib/Driver/ToolChains/CommonArgs.cpp
@@ -8,14 +8,14 @@
//===----------------------------------------------------------------------===//
#include "CommonArgs.h"
-#include "InputInfo.h"
-#include "Hexagon.h"
#include "Arch/AArch64.h"
#include "Arch/ARM.h"
#include "Arch/Mips.h"
#include "Arch/PPC.h"
#include "Arch/SystemZ.h"
#include "Arch/X86.h"
+#include "Hexagon.h"
+#include "InputInfo.h"
#include "clang/Basic/CharInfo.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/ObjCRuntime.h"
@@ -31,6 +31,7 @@
#include "clang/Driver/SanitizerArgs.h"
#include "clang/Driver/ToolChain.h"
#include "clang/Driver/Util.h"
+#include "clang/Driver/XRayArgs.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringExtras.h"
@@ -41,6 +42,7 @@
#include "llvm/Option/Option.h"
#include "llvm/Support/CodeGen.h"
#include "llvm/Support/Compression.h"
+#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Host.h"
@@ -144,12 +146,14 @@ void tools::AddLinkerInputs(const ToolChain &TC, const InputInfoList &Inputs,
Args.AddAllArgValues(CmdArgs, options::OPT_Zlinker_input);
for (const auto &II : Inputs) {
- // If the current tool chain refers to an OpenMP offloading host, we should
- // ignore inputs that refer to OpenMP offloading devices - they will be
- // embedded according to a proper linker script.
+ // If the current tool chain refers to an OpenMP or HIP offloading host, we
+ // should ignore inputs that refer to OpenMP or HIP offloading devices -
+ // they will be embedded according to a proper linker script.
if (auto *IA = II.getAction())
- if (JA.isHostOffloading(Action::OFK_OpenMP) &&
- IA->isDeviceOffloading(Action::OFK_OpenMP))
+ if ((JA.isHostOffloading(Action::OFK_OpenMP) &&
+ IA->isDeviceOffloading(Action::OFK_OpenMP)) ||
+ (JA.isHostOffloading(Action::OFK_HIP) &&
+ IA->isDeviceOffloading(Action::OFK_HIP)))
continue;
if (!TC.HasNativeLLVMSupport() && types::isLLVMIR(II.getType()))
@@ -363,23 +367,20 @@ unsigned tools::getLTOParallelism(const ArgList &Args, const Driver &D) {
return Parallelism;
}
-// CloudABI and WebAssembly use -ffunction-sections and -fdata-sections by
-// default.
+// CloudABI uses -ffunction-sections and -fdata-sections by default.
bool tools::isUseSeparateSections(const llvm::Triple &Triple) {
- return Triple.getOS() == llvm::Triple::CloudABI ||
- Triple.getArch() == llvm::Triple::wasm32 ||
- Triple.getArch() == llvm::Triple::wasm64;
+ return Triple.getOS() == llvm::Triple::CloudABI;
}
void tools::AddGoldPlugin(const ToolChain &ToolChain, const ArgList &Args,
- ArgStringList &CmdArgs, bool IsThinLTO,
- const Driver &D) {
+ ArgStringList &CmdArgs, const InputInfo &Output,
+ const InputInfo &Input, bool IsThinLTO) {
// Tell the linker to load the plugin. This has to come before AddLinkerInputs
// as gold requires -plugin to come before any -plugin-opt that -Wl might
// forward.
CmdArgs.push_back("-plugin");
-#if defined(LLVM_ON_WIN32)
+#if defined(_WIN32)
const char *Suffix = ".dll";
#elif defined(__APPLE__)
const char *Suffix = ".dylib";
@@ -415,10 +416,16 @@ void tools::AddGoldPlugin(const ToolChain &ToolChain, const ArgList &Args,
CmdArgs.push_back(Args.MakeArgString(Twine("-plugin-opt=O") + OOpt));
}
+ if (Args.hasArg(options::OPT_gsplit_dwarf)) {
+ CmdArgs.push_back(
+ Args.MakeArgString(Twine("-plugin-opt=dwo_dir=") +
+ Output.getFilename() + "_dwo"));
+ }
+
if (IsThinLTO)
CmdArgs.push_back("-plugin-opt=thinlto");
- if (unsigned Parallelism = getLTOParallelism(Args, D))
+ if (unsigned Parallelism = getLTOParallelism(Args, ToolChain.getDriver()))
CmdArgs.push_back(
Args.MakeArgString("-plugin-opt=jobs=" + Twine(Parallelism)));
@@ -449,7 +456,7 @@ void tools::AddGoldPlugin(const ToolChain &ToolChain, const ArgList &Args,
if (Arg *A = getLastProfileSampleUseArg(Args)) {
StringRef FName = A->getValue();
if (!llvm::sys::fs::exists(FName))
- D.Diag(diag::err_drv_no_such_file) << FName;
+ ToolChain.getDriver().Diag(diag::err_drv_no_such_file) << FName;
else
CmdArgs.push_back(
Args.MakeArgString(Twine("-plugin-opt=sample-profile=") + FName));
@@ -458,14 +465,24 @@ void tools::AddGoldPlugin(const ToolChain &ToolChain, const ArgList &Args,
// Need this flag to turn on new pass manager via Gold plugin.
if (Args.hasFlag(options::OPT_fexperimental_new_pass_manager,
options::OPT_fno_experimental_new_pass_manager,
- /* Default */ false)) {
+ /* Default */ ENABLE_EXPERIMENTAL_NEW_PASS_MANAGER)) {
CmdArgs.push_back("-plugin-opt=new-pass-manager");
}
+ // Setup statistics file output.
+ SmallString<128> StatsFile =
+ getStatsFileName(Args, Output, Input, ToolChain.getDriver());
+ if (!StatsFile.empty())
+ CmdArgs.push_back(
+ Args.MakeArgString(Twine("-plugin-opt=stats-file=") + StatsFile));
}
void tools::addArchSpecificRPath(const ToolChain &TC, const ArgList &Args,
ArgStringList &CmdArgs) {
+ if (!Args.hasFlag(options::OPT_frtlib_add_rpath,
+ options::OPT_fno_rtlib_add_rpath, false))
+ return;
+
std::string CandidateRPath = TC.getArchSpecificLibPath();
if (TC.getVFS().exists(CandidateRPath)) {
CmdArgs.push_back("-rpath");
@@ -511,9 +528,9 @@ static void addSanitizerRuntime(const ToolChain &TC, const ArgList &Args,
bool IsShared, bool IsWhole) {
// Wrap any static runtimes that must be forced into executable in
// whole-archive.
- if (IsWhole) CmdArgs.push_back("-whole-archive");
+ if (IsWhole) CmdArgs.push_back("--whole-archive");
CmdArgs.push_back(TC.getCompilerRTArgString(Args, Sanitizer, IsShared));
- if (IsWhole) CmdArgs.push_back("-no-whole-archive");
+ if (IsWhole) CmdArgs.push_back("--no-whole-archive");
if (IsShared) {
addArchSpecificRPath(TC, Args, CmdArgs);
@@ -525,6 +542,15 @@ static void addSanitizerRuntime(const ToolChain &TC, const ArgList &Args,
static bool addSanitizerDynamicList(const ToolChain &TC, const ArgList &Args,
ArgStringList &CmdArgs,
StringRef Sanitizer) {
+ // Solaris ld defaults to --export-dynamic behaviour but doesn't support
+ // the option, so don't try to pass it.
+ if (TC.getTriple().getOS() == llvm::Triple::Solaris)
+ return true;
+ // Myriad is static linking only. Furthermore, some versions of its
+ // linker have the bug where --export-dynamic overrides -static, so
+ // don't use --export-dynamic on that platform.
+ if (TC.getTriple().getVendor() == llvm::Triple::Myriad)
+ return true;
SmallString<128> SanRT(TC.getCompilerRT(Args, Sanitizer));
if (llvm::sys::fs::exists(SanRT + ".syms")) {
CmdArgs.push_back(Args.MakeArgString("--dynamic-list=" + SanRT + ".syms"));
@@ -538,22 +564,23 @@ void tools::linkSanitizerRuntimeDeps(const ToolChain &TC,
// Force linking against the system libraries sanitizers depends on
// (see PR15823 why this is necessary).
CmdArgs.push_back("--no-as-needed");
- // There's no libpthread or librt on RTEMS.
- if (TC.getTriple().getOS() != llvm::Triple::RTEMS) {
+ // There's no libpthread or librt on RTEMS & Android.
+ if (TC.getTriple().getOS() != llvm::Triple::RTEMS &&
+ !TC.getTriple().isAndroid()) {
CmdArgs.push_back("-lpthread");
- CmdArgs.push_back("-lrt");
+ if (TC.getTriple().getOS() != llvm::Triple::OpenBSD)
+ CmdArgs.push_back("-lrt");
}
CmdArgs.push_back("-lm");
// There's no libdl on all OSes.
if (TC.getTriple().getOS() != llvm::Triple::FreeBSD &&
TC.getTriple().getOS() != llvm::Triple::NetBSD &&
+ TC.getTriple().getOS() != llvm::Triple::OpenBSD &&
TC.getTriple().getOS() != llvm::Triple::RTEMS)
CmdArgs.push_back("-ldl");
- // Required for forkpty on some OSes
- if (TC.getTriple().getOS() == llvm::Triple::NetBSD)
- CmdArgs.push_back("-lutil");
// Required for backtrace on some OSes
- if (TC.getTriple().getOS() == llvm::Triple::NetBSD)
+ if (TC.getTriple().getOS() == llvm::Triple::NetBSD ||
+ TC.getTriple().getOS() == llvm::Triple::FreeBSD)
CmdArgs.push_back("-lexecinfo");
}
@@ -573,14 +600,17 @@ collectSanitizerRuntimes(const ToolChain &TC, const ArgList &Args,
HelperStaticRuntimes.push_back("asan-preinit");
}
if (SanArgs.needsUbsanRt()) {
- if (SanArgs.requiresMinimalRuntime()) {
+ if (SanArgs.requiresMinimalRuntime())
SharedRuntimes.push_back("ubsan_minimal");
- } else {
+ else
SharedRuntimes.push_back("ubsan_standalone");
- }
}
- if (SanArgs.needsScudoRt())
- SharedRuntimes.push_back("scudo");
+ if (SanArgs.needsScudoRt()) {
+ if (SanArgs.requiresMinimalRuntime())
+ SharedRuntimes.push_back("scudo_minimal");
+ else
+ SharedRuntimes.push_back("scudo");
+ }
if (SanArgs.needsHwasanRt())
SharedRuntimes.push_back("hwasan");
}
@@ -646,9 +676,15 @@ collectSanitizerRuntimes(const ToolChain &TC, const ArgList &Args,
if (SanArgs.needsEsanRt())
StaticRuntimes.push_back("esan");
if (SanArgs.needsScudoRt()) {
- StaticRuntimes.push_back("scudo");
- if (SanArgs.linkCXXRuntimes())
- StaticRuntimes.push_back("scudo_cxx");
+ if (SanArgs.requiresMinimalRuntime()) {
+ StaticRuntimes.push_back("scudo_minimal");
+ if (SanArgs.linkCXXRuntimes())
+ StaticRuntimes.push_back("scudo_cxx_minimal");
+ } else {
+ StaticRuntimes.push_back("scudo");
+ if (SanArgs.linkCXXRuntimes())
+ StaticRuntimes.push_back("scudo_cxx");
+ }
}
}
@@ -691,7 +727,7 @@ bool tools::addSanitizerRuntimes(const ToolChain &TC, const ArgList &Args,
// If there is a static runtime with no dynamic list, force all the symbols
// to be dynamic to be sure we export sanitizer interface functions.
if (AddExportDynamic)
- CmdArgs.push_back("-export-dynamic");
+ CmdArgs.push_back("--export-dynamic");
const SanitizerArgs &SanArgs = TC.getSanitizerArgs();
if (SanArgs.hasCrossDsoCfi() && !AddExportDynamic)
@@ -700,6 +736,35 @@ bool tools::addSanitizerRuntimes(const ToolChain &TC, const ArgList &Args,
return !StaticRuntimes.empty() || !NonWholeStaticRuntimes.empty();
}
+bool tools::addXRayRuntime(const ToolChain&TC, const ArgList &Args, ArgStringList &CmdArgs) {
+ if (Args.hasArg(options::OPT_shared))
+ return false;
+
+ if (TC.getXRayArgs().needsXRayRt()) {
+ CmdArgs.push_back("-whole-archive");
+ CmdArgs.push_back(TC.getCompilerRTArgString(Args, "xray", false));
+ for (const auto &Mode : TC.getXRayArgs().modeList())
+ CmdArgs.push_back(TC.getCompilerRTArgString(Args, Mode, false));
+ CmdArgs.push_back("-no-whole-archive");
+ return true;
+ }
+
+ return false;
+}
+
+void tools::linkXRayRuntimeDeps(const ToolChain &TC, ArgStringList &CmdArgs) {
+ CmdArgs.push_back("--no-as-needed");
+ CmdArgs.push_back("-lpthread");
+ if (TC.getTriple().getOS() != llvm::Triple::OpenBSD)
+ CmdArgs.push_back("-lrt");
+ CmdArgs.push_back("-lm");
+
+ if (TC.getTriple().getOS() != llvm::Triple::FreeBSD &&
+ TC.getTriple().getOS() != llvm::Triple::NetBSD &&
+ TC.getTriple().getOS() != llvm::Triple::OpenBSD)
+ CmdArgs.push_back("-ldl");
+}
+
bool tools::areOptimizationsEnabled(const ArgList &Args) {
// Find the last -O arg and see if it is non-zero.
if (Arg *A = Args.getLastArg(options::OPT_O_Group))
@@ -859,6 +924,10 @@ tools::ParsePICArgs(const ToolChain &ToolChain, const ArgList &Args) {
}
}
+ // AMDGPU-specific defaults for PIC.
+ if (Triple.getArch() == llvm::Triple::amdgcn)
+ PIC = true;
+
// The last argument relating to either PIC or PIE wins, and no
// other argument is used. If the last argument is any flavor of the
// '-fno-...' arguments, both PIC and PIE are disabled. Any PIE
@@ -964,16 +1033,26 @@ tools::ParsePICArgs(const ToolChain &ToolChain, const ArgList &Args) {
RWPI = true;
}
- // ROPI and RWPI are not comaptible with PIC or PIE.
+ // ROPI and RWPI are not compatible with PIC or PIE.
if ((ROPI || RWPI) && (PIC || PIE))
ToolChain.getDriver().Diag(diag::err_drv_ropi_rwpi_incompatible_with_pic);
- // When targettng MIPS64 with N64, the default is PIC, unless -mno-abicalls is
- // used.
- if ((Triple.getArch() == llvm::Triple::mips64 ||
- Triple.getArch() == llvm::Triple::mips64el) &&
- Args.hasArg(options::OPT_mno_abicalls))
- return std::make_tuple(llvm::Reloc::Static, 0U, false);
+ if (Triple.isMIPS()) {
+ StringRef CPUName;
+ StringRef ABIName;
+ mips::getMipsCPUAndABI(Args, Triple, CPUName, ABIName);
+ // When targeting the N64 ABI, PIC is the default, except in the case
+ // when the -mno-abicalls option is used. In that case we exit
+ // at next check regardless of PIC being set below.
+ if (ABIName == "n64")
+ PIC = true;
+ // When targettng MIPS with -mno-abicalls, it's always static.
+ if(Args.hasArg(options::OPT_mno_abicalls))
+ return std::make_tuple(llvm::Reloc::Static, 0U, false);
+ // Unlike other architectures, MIPS, even with -fPIC/-mxgot/multigot,
+ // does not use PIC level 2 for historical reasons.
+ IsPICLevelTwo = false;
+ }
if (PIC)
return std::make_tuple(llvm::Reloc::PIC_, IsPICLevelTwo ? 2U : 1U, PIE);
@@ -989,6 +1068,40 @@ tools::ParsePICArgs(const ToolChain &ToolChain, const ArgList &Args) {
return std::make_tuple(RelocM, 0U, false);
}
+// `-falign-functions` indicates that the functions should be aligned to a
+// 16-byte boundary.
+//
+// `-falign-functions=1` is the same as `-fno-align-functions`.
+//
+// The scalar `n` in `-falign-functions=n` must be an integral value between
+// [0, 65536]. If the value is not a power-of-two, it will be rounded up to
+// the nearest power-of-two.
+//
+// If we return `0`, the frontend will default to the backend's preferred
+// alignment.
+//
+// NOTE: icc only allows values between [0, 4096]. icc uses `-falign-functions`
+// to mean `-falign-functions=16`. GCC defaults to the backend's preferred
+// alignment. For unaligned functions, we default to the backend's preferred
+// alignment.
+unsigned tools::ParseFunctionAlignment(const ToolChain &TC,
+ const ArgList &Args) {
+ const Arg *A = Args.getLastArg(options::OPT_falign_functions,
+ options::OPT_falign_functions_EQ,
+ options::OPT_fno_align_functions);
+ if (!A || A->getOption().matches(options::OPT_fno_align_functions))
+ return 0;
+
+ if (A->getOption().matches(options::OPT_falign_functions))
+ return 0;
+
+ unsigned Value = 0;
+ if (StringRef(A->getValue()).getAsInteger(10, Value) || Value > 65536)
+ TC.getDriver().Diag(diag::err_drv_invalid_int_value)
+ << A->getAsString(Args) << A->getValue();
+ return Value ? llvm::Log2_32_Ceil(std::min(Value, 65536u)) : Value;
+}
+
void tools::AddAssemblerKPIC(const ToolChain &ToolChain, const ArgList &Args,
ArgStringList &CmdArgs) {
llvm::Reloc::Model RelocationModel;
@@ -1000,7 +1113,7 @@ void tools::AddAssemblerKPIC(const ToolChain &ToolChain, const ArgList &Args,
CmdArgs.push_back("-KPIC");
}
-/// \brief Determine whether Objective-C automated reference counting is
+/// Determine whether Objective-C automated reference counting is
/// enabled.
bool tools::isObjCAutoRefCount(const ArgList &Args) {
return Args.hasFlag(options::OPT_fobjc_arc, options::OPT_fno_objc_arc, false);
@@ -1189,3 +1302,146 @@ void tools::AddOpenMPLinkerScript(const ToolChain &TC, Compilation &C,
Lksf << LksBuffer;
}
+
+/// Add HIP linker script arguments at the end of the argument list so that
+/// the fat binary is built by embedding the device images into the host. The
+/// linker script also defines a symbol required by the code generation so that
+/// the image can be retrieved at runtime. This should be used only in tool
+/// chains that support linker scripts.
+void tools::AddHIPLinkerScript(const ToolChain &TC, Compilation &C,
+ const InputInfo &Output,
+ const InputInfoList &Inputs, const ArgList &Args,
+ ArgStringList &CmdArgs, const JobAction &JA,
+ const Tool &T) {
+
+ // If this is not a HIP host toolchain, we don't need to do anything.
+ if (!JA.isHostOffloading(Action::OFK_HIP))
+ return;
+
+ // Create temporary linker script. Keep it if save-temps is enabled.
+ const char *LKS;
+ SmallString<256> Name = llvm::sys::path::filename(Output.getFilename());
+ if (C.getDriver().isSaveTempsEnabled()) {
+ llvm::sys::path::replace_extension(Name, "lk");
+ LKS = C.getArgs().MakeArgString(Name.c_str());
+ } else {
+ llvm::sys::path::replace_extension(Name, "");
+ Name = C.getDriver().GetTemporaryPath(Name, "lk");
+ LKS = C.addTempFile(C.getArgs().MakeArgString(Name.c_str()));
+ }
+
+ // Add linker script option to the command.
+ CmdArgs.push_back("-T");
+ CmdArgs.push_back(LKS);
+
+ // Create a buffer to write the contents of the linker script.
+ std::string LksBuffer;
+ llvm::raw_string_ostream LksStream(LksBuffer);
+
+ // Get the HIP offload tool chain.
+ auto *HIPTC = static_cast<const toolchains::CudaToolChain *>(
+ C.getSingleOffloadToolChain<Action::OFK_HIP>());
+ assert(HIPTC->getTriple().getArch() == llvm::Triple::amdgcn &&
+ "Wrong platform");
+ (void)HIPTC;
+
+ // Construct clang-offload-bundler command to bundle object files for
+ // for different GPU archs.
+ ArgStringList BundlerArgs;
+ BundlerArgs.push_back(Args.MakeArgString("-type=o"));
+
+ // ToDo: Remove the dummy host binary entry which is required by
+ // clang-offload-bundler.
+ std::string BundlerTargetArg = "-targets=host-x86_64-unknown-linux";
+ std::string BundlerInputArg = "-inputs=/dev/null";
+
+ for (const auto &II : Inputs) {
+ const Action *A = II.getAction();
+ // Is this a device linking action?
+ if (A && isa<LinkJobAction>(A) && A->isDeviceOffloading(Action::OFK_HIP)) {
+ BundlerTargetArg = BundlerTargetArg + ",hip-amdgcn-amd-amdhsa-" +
+ StringRef(A->getOffloadingArch()).str();
+ BundlerInputArg = BundlerInputArg + "," + II.getFilename();
+ }
+ }
+ BundlerArgs.push_back(Args.MakeArgString(BundlerTargetArg));
+ BundlerArgs.push_back(Args.MakeArgString(BundlerInputArg));
+
+ std::string BundleFileName = C.getDriver().GetTemporaryPath("BUNDLE", "o");
+ const char *BundleFile =
+ C.addTempFile(C.getArgs().MakeArgString(BundleFileName.c_str()));
+ auto BundlerOutputArg =
+ Args.MakeArgString(std::string("-outputs=").append(BundleFile));
+ BundlerArgs.push_back(BundlerOutputArg);
+
+ SmallString<128> BundlerPath(C.getDriver().Dir);
+ llvm::sys::path::append(BundlerPath, "clang-offload-bundler");
+ const char *Bundler = Args.MakeArgString(BundlerPath);
+ C.addCommand(llvm::make_unique<Command>(JA, T, Bundler, BundlerArgs, Inputs));
+
+ // Add commands to embed target binaries. We ensure that each section and
+ // image is 16-byte aligned. This is not mandatory, but increases the
+ // likelihood of data to be aligned with a cache block in several main host
+ // machines.
+ LksStream << "/*\n";
+ LksStream << " HIP Offload Linker Script\n";
+ LksStream << " *** Automatically generated by Clang ***\n";
+ LksStream << "*/\n";
+ LksStream << "TARGET(binary)\n";
+ LksStream << "INPUT(" << BundleFileName << ")\n";
+ LksStream << "SECTIONS\n";
+ LksStream << "{\n";
+ LksStream << " .hip_fatbin :\n";
+ LksStream << " ALIGN(0x10)\n";
+ LksStream << " {\n";
+ LksStream << " PROVIDE_HIDDEN(__hip_fatbin = .);\n";
+ LksStream << " " << BundleFileName << "\n";
+ LksStream << " }\n";
+ LksStream << "}\n";
+ LksStream << "INSERT BEFORE .data\n";
+ LksStream.flush();
+
+ // Dump the contents of the linker script if the user requested that. We
+ // support this option to enable testing of behavior with -###.
+ if (C.getArgs().hasArg(options::OPT_fhip_dump_offload_linker_script))
+ llvm::errs() << LksBuffer;
+
+ // If this is a dry run, do not create the linker script file.
+ if (C.getArgs().hasArg(options::OPT__HASH_HASH_HASH))
+ return;
+
+ // Open script file and write the contents.
+ std::error_code EC;
+ llvm::raw_fd_ostream Lksf(LKS, EC, llvm::sys::fs::F_None);
+
+ if (EC) {
+ C.getDriver().Diag(clang::diag::err_unable_to_make_temp) << EC.message();
+ return;
+ }
+
+ Lksf << LksBuffer;
+}
+
+SmallString<128> tools::getStatsFileName(const llvm::opt::ArgList &Args,
+ const InputInfo &Output,
+ const InputInfo &Input,
+ const Driver &D) {
+ const Arg *A = Args.getLastArg(options::OPT_save_stats_EQ);
+ if (!A)
+ return {};
+
+ StringRef SaveStats = A->getValue();
+ SmallString<128> StatsFile;
+ if (SaveStats == "obj" && Output.isFilename()) {
+ StatsFile.assign(Output.getFilename());
+ llvm::sys::path::remove_filename(StatsFile);
+ } else if (SaveStats != "cwd") {
+ D.Diag(diag::err_drv_invalid_value) << A->getAsString(Args) << SaveStats;
+ return {};
+ }
+
+ StringRef BaseName = llvm::sys::path::filename(Input.getBaseInput());
+ llvm::sys::path::append(StatsFile, BaseName);
+ llvm::sys::path::replace_extension(StatsFile, "stats");
+ return StatsFile;
+}
diff --git a/lib/Driver/ToolChains/CommonArgs.h b/lib/Driver/ToolChains/CommonArgs.h
index 012f5b9f87ae..e8ebe2225e1c 100644
--- a/lib/Driver/ToolChains/CommonArgs.h
+++ b/lib/Driver/ToolChains/CommonArgs.h
@@ -35,6 +35,12 @@ bool addSanitizerRuntimes(const ToolChain &TC, const llvm::opt::ArgList &Args,
void linkSanitizerRuntimeDeps(const ToolChain &TC,
llvm::opt::ArgStringList &CmdArgs);
+bool addXRayRuntime(const ToolChain &TC, const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs);
+
+void linkXRayRuntimeDeps(const ToolChain &TC,
+ llvm::opt::ArgStringList &CmdArgs);
+
void AddRunTimeLibs(const ToolChain &TC, const Driver &D,
llvm::opt::ArgStringList &CmdArgs,
const llvm::opt::ArgList &Args);
@@ -46,6 +52,12 @@ void AddOpenMPLinkerScript(const ToolChain &TC, Compilation &C,
llvm::opt::ArgStringList &CmdArgs,
const JobAction &JA);
+void AddHIPLinkerScript(const ToolChain &TC, Compilation &C,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs, const JobAction &JA,
+ const Tool &T);
+
const char *SplitDebugName(const llvm::opt::ArgList &Args,
const InputInfo &Input);
@@ -54,12 +66,15 @@ void SplitDebugInfo(const ToolChain &TC, Compilation &C, const Tool &T,
const InputInfo &Output, const char *OutFile);
void AddGoldPlugin(const ToolChain &ToolChain, const llvm::opt::ArgList &Args,
- llvm::opt::ArgStringList &CmdArgs, bool IsThinLTO,
- const Driver &D);
+ llvm::opt::ArgStringList &CmdArgs, const InputInfo &Output,
+ const InputInfo &Input, bool IsThinLTO);
std::tuple<llvm::Reloc::Model, unsigned, bool>
ParsePICArgs(const ToolChain &ToolChain, const llvm::opt::ArgList &Args);
+unsigned ParseFunctionAlignment(const ToolChain &TC,
+ const llvm::opt::ArgList &Args);
+
void AddAssemblerKPIC(const ToolChain &ToolChain,
const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs);
@@ -98,6 +113,11 @@ void handleTargetFeaturesGroup(const llvm::opt::ArgList &Args,
std::vector<StringRef> &Features,
llvm::opt::OptSpecifier Group);
+/// Handles the -save-stats option and returns the filename to save statistics
+/// to.
+SmallString<128> getStatsFileName(const llvm::opt::ArgList &Args,
+ const InputInfo &Output,
+ const InputInfo &Input, const Driver &D);
} // end namespace tools
} // end namespace driver
} // end namespace clang
diff --git a/lib/Driver/ToolChains/Contiki.h b/lib/Driver/ToolChains/Contiki.h
index f6e15073887b..86d59ac92b16 100644
--- a/lib/Driver/ToolChains/Contiki.h
+++ b/lib/Driver/ToolChains/Contiki.h
@@ -23,7 +23,9 @@ public:
const llvm::opt::ArgList &Args);
// No support for finding a C++ standard library yet.
- std::string findLibCxxIncludePath() const override { return ""; }
+ void addLibCxxIncludePaths(
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override {}
void addLibStdCxxIncludePaths(
const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override {}
diff --git a/lib/Driver/ToolChains/CrossWindows.cpp b/lib/Driver/ToolChains/CrossWindows.cpp
index 5049033c4137..6ca04a8a3abb 100644
--- a/lib/Driver/ToolChains/CrossWindows.cpp
+++ b/lib/Driver/ToolChains/CrossWindows.cpp
@@ -127,7 +127,8 @@ void tools::CrossWindows::Linker::ConstructJob(
}
CmdArgs.push_back("-shared");
- CmdArgs.push_back("-Bdynamic");
+ CmdArgs.push_back(Args.hasArg(options::OPT_static) ? "-Bstatic"
+ : "-Bdynamic");
CmdArgs.push_back("--enable-auto-image-base");
diff --git a/lib/Driver/ToolChains/Cuda.cpp b/lib/Driver/ToolChains/Cuda.cpp
index bc4820797b2f..d17c4c39532a 100644
--- a/lib/Driver/ToolChains/Cuda.cpp
+++ b/lib/Driver/ToolChains/Cuda.cpp
@@ -8,18 +8,21 @@
//===----------------------------------------------------------------------===//
#include "Cuda.h"
-#include "InputInfo.h"
#include "CommonArgs.h"
+#include "InputInfo.h"
#include "clang/Basic/Cuda.h"
-#include "clang/Config/config.h"
#include "clang/Basic/VirtualFileSystem.h"
-#include "clang/Driver/Distro.h"
+#include "clang/Config/config.h"
#include "clang/Driver/Compilation.h"
+#include "clang/Driver/Distro.h"
#include "clang/Driver/Driver.h"
#include "clang/Driver/DriverDiagnostic.h"
#include "clang/Driver/Options.h"
#include "llvm/Option/ArgList.h"
+#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Path.h"
+#include "llvm/Support/Process.h"
+#include "llvm/Support/Program.h"
#include <system_error>
using namespace clang::driver;
@@ -52,6 +55,10 @@ static CudaVersion ParseCudaVersionFile(llvm::StringRef V) {
return CudaVersion::CUDA_80;
if (Major == 9 && Minor == 0)
return CudaVersion::CUDA_90;
+ if (Major == 9 && Minor == 1)
+ return CudaVersion::CUDA_91;
+ if (Major == 9 && Minor == 2)
+ return CudaVersion::CUDA_92;
return CudaVersion::UNKNOWN;
}
@@ -59,42 +66,75 @@ CudaInstallationDetector::CudaInstallationDetector(
const Driver &D, const llvm::Triple &HostTriple,
const llvm::opt::ArgList &Args)
: D(D) {
- SmallVector<std::string, 4> CudaPathCandidates;
+ struct Candidate {
+ std::string Path;
+ bool StrictChecking;
+
+ Candidate(std::string Path, bool StrictChecking = false)
+ : Path(Path), StrictChecking(StrictChecking) {}
+ };
+ SmallVector<Candidate, 4> Candidates;
// In decreasing order so we prefer newer versions to older versions.
std::initializer_list<const char *> Versions = {"8.0", "7.5", "7.0"};
if (Args.hasArg(clang::driver::options::OPT_cuda_path_EQ)) {
- CudaPathCandidates.push_back(
- Args.getLastArgValue(clang::driver::options::OPT_cuda_path_EQ));
+ Candidates.emplace_back(
+ Args.getLastArgValue(clang::driver::options::OPT_cuda_path_EQ).str());
} else if (HostTriple.isOSWindows()) {
for (const char *Ver : Versions)
- CudaPathCandidates.push_back(
+ Candidates.emplace_back(
D.SysRoot + "/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v" +
Ver);
} else {
- CudaPathCandidates.push_back(D.SysRoot + "/usr/local/cuda");
+ if (!Args.hasArg(clang::driver::options::OPT_cuda_path_ignore_env)) {
+ // Try to find ptxas binary. If the executable is located in a directory
+ // called 'bin/', its parent directory might be a good guess for a valid
+ // CUDA installation.
+ // However, some distributions might installs 'ptxas' to /usr/bin. In that
+ // case the candidate would be '/usr' which passes the following checks
+ // because '/usr/include' exists as well. To avoid this case, we always
+ // check for the directory potentially containing files for libdevice,
+ // even if the user passes -nocudalib.
+ if (llvm::ErrorOr<std::string> ptxas =
+ llvm::sys::findProgramByName("ptxas")) {
+ SmallString<256> ptxasAbsolutePath;
+ llvm::sys::fs::real_path(*ptxas, ptxasAbsolutePath);
+
+ StringRef ptxasDir = llvm::sys::path::parent_path(ptxasAbsolutePath);
+ if (llvm::sys::path::filename(ptxasDir) == "bin")
+ Candidates.emplace_back(llvm::sys::path::parent_path(ptxasDir),
+ /*StrictChecking=*/true);
+ }
+ }
+
+ Candidates.emplace_back(D.SysRoot + "/usr/local/cuda");
for (const char *Ver : Versions)
- CudaPathCandidates.push_back(D.SysRoot + "/usr/local/cuda-" + Ver);
+ Candidates.emplace_back(D.SysRoot + "/usr/local/cuda-" + Ver);
if (Distro(D.getVFS()).IsDebian())
// Special case for Debian to have nvidia-cuda-toolkit work
// out of the box. More info on http://bugs.debian.org/882505
- CudaPathCandidates.push_back(D.SysRoot + "/usr/lib/cuda");
+ Candidates.emplace_back(D.SysRoot + "/usr/lib/cuda");
}
- for (const auto &CudaPath : CudaPathCandidates) {
- if (CudaPath.empty() || !D.getVFS().exists(CudaPath))
+ bool NoCudaLib = Args.hasArg(options::OPT_nocudalib);
+
+ for (const auto &Candidate : Candidates) {
+ InstallPath = Candidate.Path;
+ if (InstallPath.empty() || !D.getVFS().exists(InstallPath))
continue;
- InstallPath = CudaPath;
- BinPath = CudaPath + "/bin";
+ BinPath = InstallPath + "/bin";
IncludePath = InstallPath + "/include";
LibDevicePath = InstallPath + "/nvvm/libdevice";
auto &FS = D.getVFS();
if (!(FS.exists(IncludePath) && FS.exists(BinPath)))
continue;
+ bool CheckLibDevice = (!NoCudaLib || Candidate.StrictChecking);
+ if (CheckLibDevice && !FS.exists(LibDevicePath))
+ continue;
// On Linux, we have both lib and lib64 directories, and we need to choose
// based on our triple. On MacOS, we have only a lib directory.
@@ -119,14 +159,18 @@ CudaInstallationDetector::CudaInstallationDetector(
Version = ParseCudaVersionFile((*VersionFile)->getBuffer());
}
- if (Version == CudaVersion::CUDA_90) {
- // CUDA-9 uses single libdevice file for all GPU variants.
+ if (Version >= CudaVersion::CUDA_90) {
+ // CUDA-9+ uses single libdevice file for all GPU variants.
std::string FilePath = LibDevicePath + "/libdevice.10.bc";
if (FS.exists(FilePath)) {
- for (const char *GpuArch :
- {"sm_20", "sm_30", "sm_32", "sm_35", "sm_50", "sm_52", "sm_53",
- "sm_60", "sm_61", "sm_62", "sm_70"})
- LibDeviceMap[GpuArch] = FilePath;
+ for (const char *GpuArchName :
+ {"sm_30", "sm_32", "sm_35", "sm_37", "sm_50", "sm_52", "sm_53",
+ "sm_60", "sm_61", "sm_62", "sm_70", "sm_72"}) {
+ const CudaArch GpuArch = StringToCudaArch(GpuArchName);
+ if (Version >= MinVersionForCudaArch(GpuArch) &&
+ Version <= MaxVersionForCudaArch(GpuArch))
+ LibDeviceMap[GpuArchName] = FilePath;
+ }
}
} else {
std::error_code EC;
@@ -142,7 +186,7 @@ CudaInstallationDetector::CudaInstallationDetector(
StringRef GpuArch = FileName.slice(
LibDeviceName.size(), FileName.find('.', LibDeviceName.size()));
LibDeviceMap[GpuArch] = FilePath.str();
- // Insert map entries for specifc devices with this compute
+ // Insert map entries for specific devices with this compute
// capability. NVCC's choice of the libdevice library version is
// rather peculiar and depends on the CUDA version.
if (GpuArch == "compute_20") {
@@ -174,7 +218,7 @@ CudaInstallationDetector::CudaInstallationDetector(
// Check that we have found at least one libdevice that we can link in if
// -nocudalib hasn't been specified.
- if (LibDeviceMap.empty() && !Args.hasArg(options::OPT_nocudalib))
+ if (LibDeviceMap.empty() && !NoCudaLib)
continue;
IsValid = true;
@@ -231,6 +275,35 @@ void CudaInstallationDetector::print(raw_ostream &OS) const {
<< CudaVersionToString(Version) << "\n";
}
+namespace {
+ /// Debug info kind.
+enum DebugInfoKind {
+ NoDebug, /// No debug info.
+ LineTableOnly, /// Line tables only.
+ FullDebug /// Full debug info.
+};
+} // anonymous namespace
+
+static DebugInfoKind mustEmitDebugInfo(const ArgList &Args) {
+ Arg *A = Args.getLastArg(options::OPT_O_Group);
+ if (Args.hasFlag(options::OPT_cuda_noopt_device_debug,
+ options::OPT_no_cuda_noopt_device_debug,
+ !A || A->getOption().matches(options::OPT_O0))) {
+ if (const Arg *A = Args.getLastArg(options::OPT_g_Group)) {
+ const Option &Opt = A->getOption();
+ if (Opt.matches(options::OPT_gN_Group)) {
+ if (Opt.matches(options::OPT_g0) || Opt.matches(options::OPT_ggdb0))
+ return NoDebug;
+ if (Opt.matches(options::OPT_gline_tables_only) ||
+ Opt.matches(options::OPT_ggdb1))
+ return LineTableOnly;
+ }
+ return FullDebug;
+ }
+ }
+ return NoDebug;
+}
+
void NVPTX::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfo &Output,
const InputInfoList &Inputs,
@@ -262,8 +335,8 @@ void NVPTX::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
ArgStringList CmdArgs;
CmdArgs.push_back(TC.getTriple().isArch64Bit() ? "-m64" : "-m32");
- if (Args.hasFlag(options::OPT_cuda_noopt_device_debug,
- options::OPT_no_cuda_noopt_device_debug, false)) {
+ DebugInfoKind DIKind = mustEmitDebugInfo(Args);
+ if (DIKind == FullDebug) {
// ptxas does not accept -g option if optimization is enabled, so
// we ignore the compiler's -O* options if we want debug info.
CmdArgs.push_back("-g");
@@ -299,6 +372,8 @@ void NVPTX::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
// to no optimizations, but ptxas's default is -O3.
CmdArgs.push_back("-O0");
}
+ if (DIKind == LineTableOnly)
+ CmdArgs.push_back("-lineinfo");
// Pass -v to ptxas if it was passed to the driver.
if (Args.hasArg(options::OPT_v))
@@ -314,11 +389,17 @@ void NVPTX::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
for (const auto& A : Args.getAllArgValues(options::OPT_Xcuda_ptxas))
CmdArgs.push_back(Args.MakeArgString(A));
- // In OpenMP we need to generate relocatable code.
- if (JA.isOffloading(Action::OFK_OpenMP) &&
- Args.hasFlag(options::OPT_fopenmp_relocatable_target,
- options::OPT_fnoopenmp_relocatable_target,
- /*Default=*/ true))
+ bool Relocatable = false;
+ if (JA.isOffloading(Action::OFK_OpenMP))
+ // In OpenMP we need to generate relocatable code.
+ Relocatable = Args.hasFlag(options::OPT_fopenmp_relocatable_target,
+ options::OPT_fnoopenmp_relocatable_target,
+ /*Default=*/true);
+ else if (JA.isOffloading(Action::OFK_Cuda))
+ Relocatable = Args.hasFlag(options::OPT_fcuda_rdc,
+ options::OPT_fno_cuda_rdc, /*Default=*/false);
+
+ if (Relocatable)
CmdArgs.push_back("-c");
const char *Exec;
@@ -329,6 +410,22 @@ void NVPTX::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
}
+static bool shouldIncludePTX(const ArgList &Args, const char *gpu_arch) {
+ bool includePTX = true;
+ for (Arg *A : Args) {
+ if (!(A->getOption().matches(options::OPT_cuda_include_ptx_EQ) ||
+ A->getOption().matches(options::OPT_no_cuda_include_ptx_EQ)))
+ continue;
+ A->claim();
+ const StringRef ArchStr = A->getValue();
+ if (ArchStr == "all" || ArchStr == gpu_arch) {
+ includePTX = A->getOption().matches(options::OPT_cuda_include_ptx_EQ);
+ continue;
+ }
+ }
+ return includePTX;
+}
+
// All inputs to this linker must be from CudaDeviceActions, as we need to look
// at the Inputs' Actions in order to figure out which GPU architecture they
// correspond to.
@@ -346,6 +443,8 @@ void NVPTX::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(TC.getTriple().isArch64Bit() ? "-64" : "-32");
CmdArgs.push_back(Args.MakeArgString("--create"));
CmdArgs.push_back(Args.MakeArgString(Output.getFilename()));
+ if (mustEmitDebugInfo(Args) == FullDebug)
+ CmdArgs.push_back("-g");
for (const auto& II : Inputs) {
auto *A = II.getAction();
@@ -356,6 +455,9 @@ void NVPTX::Linker::ConstructJob(Compilation &C, const JobAction &JA,
"Device action expected to have associated a GPU architecture!");
CudaArch gpu_arch = StringToCudaArch(gpu_arch_str);
+ if (II.getType() == types::TY_PP_Asm &&
+ !shouldIncludePTX(Args, gpu_arch_str))
+ continue;
// We need to pass an Arch of the form "sm_XX" for cubin files and
// "compute_XX" for ptx.
const char *Arch =
@@ -394,7 +496,7 @@ void NVPTX::OpenMPLinker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(Output.getFilename());
} else
assert(Output.isNothing() && "Invalid output.");
- if (Args.hasArg(options::OPT_g_Flag))
+ if (mustEmitDebugInfo(Args) == FullDebug)
CmdArgs.push_back("-g");
if (Args.hasArg(options::OPT_v))
@@ -499,6 +601,10 @@ void CudaToolChain::addClangTargetOptions(
if (DriverArgs.hasFlag(options::OPT_fcuda_approx_transcendentals,
options::OPT_fno_cuda_approx_transcendentals, false))
CC1Args.push_back("-fcuda-approx-transcendentals");
+
+ if (DriverArgs.hasFlag(options::OPT_fcuda_rdc, options::OPT_fno_cuda_rdc,
+ false))
+ CC1Args.push_back("-fcuda-rdc");
}
if (DriverArgs.hasArg(options::OPT_nocudalib))
@@ -518,16 +624,58 @@ void CudaToolChain::addClangTargetOptions(
CC1Args.push_back("-mlink-cuda-bitcode");
CC1Args.push_back(DriverArgs.MakeArgString(LibDeviceFile));
- if (CudaInstallation.version() >= CudaVersion::CUDA_90) {
- // CUDA-9 uses new instructions that are only available in PTX6.0
- CC1Args.push_back("-target-feature");
- CC1Args.push_back("+ptx60");
- } else {
- // Libdevice in CUDA-7.0 requires PTX version that's more recent
- // than LLVM defaults to. Use PTX4.2 which is the PTX version that
- // came with CUDA-7.0.
- CC1Args.push_back("-target-feature");
- CC1Args.push_back("+ptx42");
+ // Libdevice in CUDA-7.0 requires PTX version that's more recent than LLVM
+ // defaults to. Use PTX4.2 by default, which is the PTX version that came with
+ // CUDA-7.0.
+ const char *PtxFeature = "+ptx42";
+ if (CudaInstallation.version() >= CudaVersion::CUDA_91) {
+ // CUDA-9.1 uses new instructions that are only available in PTX6.1+
+ PtxFeature = "+ptx61";
+ } else if (CudaInstallation.version() >= CudaVersion::CUDA_90) {
+ // CUDA-9.0 uses new instructions that are only available in PTX6.0+
+ PtxFeature = "+ptx60";
+ }
+ CC1Args.append({"-target-feature", PtxFeature});
+ if (DriverArgs.hasFlag(options::OPT_fcuda_short_ptr,
+ options::OPT_fno_cuda_short_ptr, false))
+ CC1Args.append({"-mllvm", "--nvptx-short-ptr"});
+
+ if (DeviceOffloadingKind == Action::OFK_OpenMP) {
+ SmallVector<StringRef, 8> LibraryPaths;
+ // Add path to lib and/or lib64 folders.
+ SmallString<256> DefaultLibPath =
+ llvm::sys::path::parent_path(getDriver().Dir);
+ llvm::sys::path::append(DefaultLibPath,
+ Twine("lib") + CLANG_LIBDIR_SUFFIX);
+ LibraryPaths.emplace_back(DefaultLibPath.c_str());
+
+ // Add user defined library paths from LIBRARY_PATH.
+ llvm::Optional<std::string> LibPath =
+ llvm::sys::Process::GetEnv("LIBRARY_PATH");
+ if (LibPath) {
+ SmallVector<StringRef, 8> Frags;
+ const char EnvPathSeparatorStr[] = {llvm::sys::EnvPathSeparator, '\0'};
+ llvm::SplitString(*LibPath, Frags, EnvPathSeparatorStr);
+ for (StringRef Path : Frags)
+ LibraryPaths.emplace_back(Path.trim());
+ }
+
+ std::string LibOmpTargetName =
+ "libomptarget-nvptx-" + GpuArch.str() + ".bc";
+ bool FoundBCLibrary = false;
+ for (StringRef LibraryPath : LibraryPaths) {
+ SmallString<128> LibOmpTargetFile(LibraryPath);
+ llvm::sys::path::append(LibOmpTargetFile, LibOmpTargetName);
+ if (llvm::sys::fs::exists(LibOmpTargetFile)) {
+ CC1Args.push_back("-mlink-cuda-bitcode");
+ CC1Args.push_back(DriverArgs.MakeArgString(LibOmpTargetFile));
+ FoundBCLibrary = true;
+ break;
+ }
+ }
+ if (!FoundBCLibrary)
+ getDriver().Diag(diag::warn_drv_omp_offload_target_missingbcruntime)
+ << LibOmpTargetName;
}
}
diff --git a/lib/Driver/ToolChains/Cuda.h b/lib/Driver/ToolChains/Cuda.h
index 3d08cec1643e..99d5a4a628ce 100644
--- a/lib/Driver/ToolChains/Cuda.h
+++ b/lib/Driver/ToolChains/Cuda.h
@@ -11,14 +11,14 @@
#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_CUDA_H
#include "clang/Basic/Cuda.h"
-#include "clang/Basic/VersionTuple.h"
#include "clang/Driver/Action.h"
#include "clang/Driver/Multilib.h"
-#include "clang/Driver/ToolChain.h"
#include "clang/Driver/Tool.h"
+#include "clang/Driver/ToolChain.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/Support/Compiler.h"
+#include "llvm/Support/VersionTuple.h"
#include <set>
#include <vector>
@@ -49,30 +49,30 @@ public:
void AddCudaIncludeArgs(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const;
- /// \brief Emit an error if Version does not support the given Arch.
+ /// Emit an error if Version does not support the given Arch.
///
/// If either Version or Arch is unknown, does not emit an error. Emits at
/// most one error per Arch.
void CheckCudaVersionSupportsArch(CudaArch Arch) const;
- /// \brief Check whether we detected a valid Cuda install.
+ /// Check whether we detected a valid Cuda install.
bool isValid() const { return IsValid; }
- /// \brief Print information about the detected CUDA installation.
+ /// Print information about the detected CUDA installation.
void print(raw_ostream &OS) const;
- /// \brief Get the detected Cuda install's version.
+ /// Get the detected Cuda install's version.
CudaVersion version() const { return Version; }
- /// \brief Get the detected Cuda installation path.
+ /// Get the detected Cuda installation path.
StringRef getInstallPath() const { return InstallPath; }
- /// \brief Get the detected path to Cuda's bin directory.
+ /// Get the detected path to Cuda's bin directory.
StringRef getBinPath() const { return BinPath; }
- /// \brief Get the detected Cuda Include path.
+ /// Get the detected Cuda Include path.
StringRef getIncludePath() const { return IncludePath; }
- /// \brief Get the detected Cuda library path.
+ /// Get the detected Cuda library path.
StringRef getLibPath() const { return LibPath; }
- /// \brief Get the detected Cuda device library path.
+ /// Get the detected Cuda device library path.
StringRef getLibDevicePath() const { return LibDevicePath; }
- /// \brief Get libdevice file for given architecture
+ /// Get libdevice file for given architecture
std::string getLibDeviceFile(StringRef Gpu) const {
return LibDeviceMap.lookup(Gpu);
}
@@ -115,7 +115,7 @@ class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
class LLVM_LIBRARY_VISIBILITY OpenMPLinker : public Tool {
public:
OpenMPLinker(const ToolChain &TC)
- : Tool("NVPTX::OpenMPLinker", "fatbinary", TC, RF_Full, llvm::sys::WEM_UTF8,
+ : Tool("NVPTX::OpenMPLinker", "nvlink", TC, RF_Full, llvm::sys::WEM_UTF8,
"--options-file") {}
bool hasIntegratedCPP() const override { return false; }
@@ -180,6 +180,8 @@ public:
computeMSVCVersion(const Driver *D,
const llvm::opt::ArgList &Args) const override;
+ unsigned GetDefaultDwarfVersion() const override { return 2; }
+
const ToolChain &HostTC;
CudaInstallationDetector CudaInstallation;
diff --git a/lib/Driver/ToolChains/Darwin.cpp b/lib/Driver/ToolChains/Darwin.cpp
index 2250e82d9dbf..95ec8d64c2c7 100644
--- a/lib/Driver/ToolChains/Darwin.cpp
+++ b/lib/Driver/ToolChains/Darwin.cpp
@@ -175,7 +175,7 @@ bool darwin::Linker::NeedsTempPath(const InputInfoList &Inputs) const {
return false;
}
-/// \brief Pass -no_deduplicate to ld64 under certain conditions:
+/// Pass -no_deduplicate to ld64 under certain conditions:
///
/// - Either -O0 or -O1 is explicitly specified
/// - No -O option is specified *and* this is a compile+link (implicit -O0)
@@ -409,7 +409,7 @@ void darwin::Linker::AddLinkArgs(Compilation &C, const ArgList &Args,
Args.AddLastArg(CmdArgs, options::OPT_Mach);
}
-/// \brief Determine whether we are linking the ObjC runtime.
+/// Determine whether we are linking the ObjC runtime.
static bool isObjCRuntimeLinked(const ArgList &Args) {
if (isObjCAutoRefCount(Args)) {
Args.ClaimAllArgs(options::OPT_fobjc_link_runtime);
@@ -452,7 +452,8 @@ void darwin::Linker::ConstructJob(Compilation &C, const JobAction &JA,
// we follow suite for ease of comparison.
AddLinkArgs(C, Args, CmdArgs, Inputs);
- // For LTO, pass the name of the optimization record file.
+ // For LTO, pass the name of the optimization record file and other
+ // opt-remarks flags.
if (Args.hasFlag(options::OPT_fsave_optimization_record,
options::OPT_fno_save_optimization_record, false)) {
CmdArgs.push_back("-mllvm");
@@ -467,6 +468,26 @@ void darwin::Linker::ConstructJob(Compilation &C, const JobAction &JA,
if (getLastProfileUseArg(Args)) {
CmdArgs.push_back("-mllvm");
CmdArgs.push_back("-lto-pass-remarks-with-hotness");
+
+ if (const Arg *A =
+ Args.getLastArg(options::OPT_fdiagnostics_hotness_threshold_EQ)) {
+ CmdArgs.push_back("-mllvm");
+ std::string Opt =
+ std::string("-lto-pass-remarks-hotness-threshold=") + A->getValue();
+ CmdArgs.push_back(Args.MakeArgString(Opt));
+ }
+ }
+ }
+
+ // Propagate the -moutline flag to the linker in LTO.
+ if (Args.hasFlag(options::OPT_moutline, options::OPT_mno_outline, false)) {
+ if (getMachOToolChain().getMachOArchName(Args) == "arm64") {
+ CmdArgs.push_back("-mllvm");
+ CmdArgs.push_back("-enable-machine-outliner");
+
+ // Outline from linkonceodr functions by default in LTO.
+ CmdArgs.push_back("-mllvm");
+ CmdArgs.push_back("-enable-linkonceodr-outlining");
}
}
@@ -895,13 +916,26 @@ unsigned DarwinClang::GetDefaultDwarfVersion() const {
return 4;
}
+SmallString<128> MachO::runtimeLibDir(bool IsEmbedded) const {
+ SmallString<128> Dir(getDriver().ResourceDir);
+ llvm::sys::path::append(
+ Dir, "lib", IsEmbedded ? "macho_embedded" : "darwin");
+ return Dir;
+}
+
+std::string Darwin::getFileNameForSanitizerLib(StringRef SanitizerName,
+ bool Shared) const {
+ return (Twine("libclang_rt.") + SanitizerName + "_" +
+ getOSLibraryNameSuffix() +
+ (Shared ? "_dynamic.dylib" : ".a")).str();
+
+}
+
void MachO::AddLinkRuntimeLib(const ArgList &Args, ArgStringList &CmdArgs,
StringRef DarwinLibName,
RuntimeLinkOptions Opts) const {
- SmallString<128> Dir(getDriver().ResourceDir);
- llvm::sys::path::append(
- Dir, "lib", (Opts & RLO_IsEmbedded) ? "macho_embedded" : "darwin");
+ SmallString<128> Dir = runtimeLibDir(Opts & RLO_IsEmbedded);
SmallString<128> P(Dir);
llvm::sys::path::append(P, DarwinLibName);
@@ -979,6 +1013,8 @@ StringRef Darwin::getOSLibraryNameSuffix() const {
/// Check if the link command contains a symbol export directive.
static bool hasExportSymbolDirective(const ArgList &Args) {
for (Arg *A : Args) {
+ if (A->getOption().matches(options::OPT_exported__symbols__list))
+ return true;
if (!A->getOption().matches(options::OPT_Wl_COMMA) &&
!A->getOption().matches(options::OPT_Xlinker))
continue;
@@ -1008,7 +1044,6 @@ void Darwin::addProfileRTLibs(const ArgList &Args,
// runtime, automatically export symbols necessary to implement some of the
// runtime's functionality.
if (hasExportSymbolDirective(Args)) {
- addExportedSymbol(CmdArgs, "_VPMergeHook");
addExportedSymbol(CmdArgs, "___llvm_profile_filename");
addExportedSymbol(CmdArgs, "___llvm_profile_raw_version");
addExportedSymbol(CmdArgs, "_lprofCurFilename");
@@ -1020,12 +1055,9 @@ void DarwinClang::AddLinkSanitizerLibArgs(const ArgList &Args,
StringRef Sanitizer,
bool Shared) const {
auto RLO = RuntimeLinkOptions(RLO_AlwaysLink | (Shared ? RLO_AddRPath : 0U));
- AddLinkRuntimeLib(Args, CmdArgs,
- (Twine("libclang_rt.") + Sanitizer + "_" +
- getOSLibraryNameSuffix() +
- (Shared ? "_dynamic.dylib" : ".a"))
- .str(),
- RLO);
+ std::string SanitizerRelFilename =
+ getFileNameForSanitizerLib(Sanitizer, Shared);
+ AddLinkRuntimeLib(Args, CmdArgs, SanitizerRelFilename, RLO);
}
ToolChain::RuntimeLibType DarwinClang::GetRuntimeLibType(
@@ -1186,15 +1218,30 @@ struct DarwinPlatform {
DarwinEnvironmentKind getEnvironment() const { return Environment; }
+ void setEnvironment(DarwinEnvironmentKind Kind) {
+ Environment = Kind;
+ InferSimulatorFromArch = false;
+ }
+
StringRef getOSVersion() const {
if (Kind == OSVersionArg)
return Argument->getValue();
return OSVersion;
}
+ void setOSVersion(StringRef S) {
+ assert(Kind == TargetArg && "Unexpected kind!");
+ OSVersion = S;
+ }
+
+ bool hasOSVersion() const { return HasOSVersion; }
+
/// Returns true if the target OS was explicitly specified.
bool isExplicitlySpecified() const { return Kind <= DeploymentTargetEnv; }
+ /// Returns true if the simulator environment can be inferred from the arch.
+ bool canInferSimulatorFromArch() const { return InferSimulatorFromArch; }
+
/// Adds the -m<os>-version-min argument to the compiler invocation.
void addOSVersionMinArgument(DerivedArgList &Args, const OptTable &Opts) {
if (Argument)
@@ -1235,17 +1282,21 @@ struct DarwinPlatform {
llvm_unreachable("Unsupported Darwin Source Kind");
}
- static DarwinPlatform createFromTarget(llvm::Triple::OSType OS,
- StringRef OSVersion, Arg *A,
- llvm::Triple::EnvironmentType Env) {
- DarwinPlatform Result(TargetArg, getPlatformFromOS(OS), OSVersion, A);
- switch (Env) {
+ static DarwinPlatform createFromTarget(const llvm::Triple &TT,
+ StringRef OSVersion, Arg *A) {
+ DarwinPlatform Result(TargetArg, getPlatformFromOS(TT.getOS()), OSVersion,
+ A);
+ switch (TT.getEnvironment()) {
case llvm::Triple::Simulator:
Result.Environment = DarwinEnvironmentKind::Simulator;
break;
default:
break;
}
+ unsigned Major, Minor, Micro;
+ TT.getOSVersion(Major, Minor, Micro);
+ if (Major == 0)
+ Result.HasOSVersion = false;
return Result;
}
static DarwinPlatform createOSVersionArg(DarwinPlatformKind Platform,
@@ -1260,8 +1311,13 @@ struct DarwinPlatform {
return Result;
}
static DarwinPlatform createFromSDK(DarwinPlatformKind Platform,
- StringRef Value) {
- return DarwinPlatform(InferredFromSDK, Platform, Value);
+ StringRef Value,
+ bool IsSimulator = false) {
+ DarwinPlatform Result(InferredFromSDK, Platform, Value);
+ if (IsSimulator)
+ Result.Environment = DarwinEnvironmentKind::Simulator;
+ Result.InferSimulatorFromArch = false;
+ return Result;
}
static DarwinPlatform createFromArch(llvm::Triple::OSType OS,
StringRef Value) {
@@ -1295,6 +1351,7 @@ private:
DarwinPlatformKind Platform;
DarwinEnvironmentKind Environment = DarwinEnvironmentKind::NativeEnvironment;
std::string OSVersion;
+ bool HasOSVersion = true, InferSimulatorFromArch = true;
Arg *Argument;
StringRef EnvVarName;
};
@@ -1416,14 +1473,20 @@ Optional<DarwinPlatform> inferDeploymentTargetFromSDK(DerivedArgList &Args) {
if (StartVer != StringRef::npos && EndVer > StartVer) {
StringRef Version = SDK.slice(StartVer, EndVer + 1);
if (SDK.startswith("iPhoneOS") || SDK.startswith("iPhoneSimulator"))
- return DarwinPlatform::createFromSDK(Darwin::IPhoneOS, Version);
+ return DarwinPlatform::createFromSDK(
+ Darwin::IPhoneOS, Version,
+ /*IsSimulator=*/SDK.startswith("iPhoneSimulator"));
else if (SDK.startswith("MacOSX"))
return DarwinPlatform::createFromSDK(Darwin::MacOS,
getSystemOrSDKMacOSVersion(Version));
else if (SDK.startswith("WatchOS") || SDK.startswith("WatchSimulator"))
- return DarwinPlatform::createFromSDK(Darwin::WatchOS, Version);
+ return DarwinPlatform::createFromSDK(
+ Darwin::WatchOS, Version,
+ /*IsSimulator=*/SDK.startswith("WatchSimulator"));
else if (SDK.startswith("AppleTVOS") || SDK.startswith("AppleTVSimulator"))
- return DarwinPlatform::createFromSDK(Darwin::TvOS, Version);
+ return DarwinPlatform::createFromSDK(
+ Darwin::TvOS, Version,
+ /*IsSimulator=*/SDK.startswith("AppleTVSimulator"));
}
return None;
}
@@ -1431,10 +1494,16 @@ Optional<DarwinPlatform> inferDeploymentTargetFromSDK(DerivedArgList &Args) {
std::string getOSVersion(llvm::Triple::OSType OS, const llvm::Triple &Triple,
const Driver &TheDriver) {
unsigned Major, Minor, Micro;
+ llvm::Triple SystemTriple(llvm::sys::getProcessTriple());
switch (OS) {
case llvm::Triple::Darwin:
case llvm::Triple::MacOSX:
- if (!Triple.getMacOSXVersion(Major, Minor, Micro))
+ // If there is no version specified on triple, and both host and target are
+ // macos, use the host triple to infer OS version.
+ if (Triple.isMacOSX() && SystemTriple.isMacOSX() &&
+ !Triple.getOSMajorVersion())
+ SystemTriple.getMacOSXVersion(Major, Minor, Micro);
+ else if (!Triple.getMacOSXVersion(Major, Minor, Micro))
TheDriver.Diag(diag::err_drv_invalid_darwin_version)
<< Triple.getOSName();
break;
@@ -1489,9 +1558,8 @@ Optional<DarwinPlatform> getDeploymentTargetFromTargetArg(
Triple.getOS() == llvm::Triple::UnknownOS)
return None;
std::string OSVersion = getOSVersion(Triple.getOS(), Triple, TheDriver);
- return DarwinPlatform::createFromTarget(Triple.getOS(), OSVersion,
- Args.getLastArg(options::OPT_target),
- Triple.getEnvironment());
+ return DarwinPlatform::createFromTarget(Triple, OSVersion,
+ Args.getLastArg(options::OPT_target));
}
} // namespace
@@ -1537,12 +1605,20 @@ void Darwin::AddDeploymentTarget(DerivedArgList &Args) const {
(VersionTuple(TargetMajor, TargetMinor, TargetMicro) !=
VersionTuple(ArgMajor, ArgMinor, ArgMicro) ||
TargetExtra != ArgExtra))) {
- // Warn about -m<os>-version-min that doesn't match the OS version
- // that's specified in the target.
- std::string OSVersionArg = OSVersionArgTarget->getAsString(Args, Opts);
- std::string TargetArg = OSTarget->getAsString(Args, Opts);
- getDriver().Diag(clang::diag::warn_drv_overriding_flag_option)
- << OSVersionArg << TargetArg;
+ // Select the OS version from the -m<os>-version-min argument when
+ // the -target does not include an OS version.
+ if (OSTarget->getPlatform() == OSVersionArgTarget->getPlatform() &&
+ !OSTarget->hasOSVersion()) {
+ OSTarget->setOSVersion(OSVersionArgTarget->getOSVersion());
+ } else {
+ // Warn about -m<os>-version-min that doesn't match the OS version
+ // that's specified in the target.
+ std::string OSVersionArg =
+ OSVersionArgTarget->getAsString(Args, Opts);
+ std::string TargetArg = OSTarget->getAsString(Args, Opts);
+ getDriver().Diag(clang::diag::warn_drv_overriding_flag_option)
+ << OSVersionArg << TargetArg;
+ }
}
}
} else {
@@ -1550,9 +1626,16 @@ void Darwin::AddDeploymentTarget(DerivedArgList &Args) const {
OSTarget = getDeploymentTargetFromOSVersionArg(Args, getDriver());
// If no deployment target was specified on the command line, check for
// environment defines.
- if (!OSTarget)
+ if (!OSTarget) {
OSTarget =
getDeploymentTargetFromEnvironmentVariables(getDriver(), getTriple());
+ if (OSTarget) {
+ // Don't infer simulator from the arch when the SDK is also specified.
+ Optional<DarwinPlatform> SDKTarget = inferDeploymentTargetFromSDK(Args);
+ if (SDKTarget)
+ OSTarget->setEnvironment(SDKTarget->getEnvironment());
+ }
+ }
// If there is no command-line argument to specify the Target version and
// no environment variable defined, see if we can set the default based
// on -isysroot.
@@ -1617,6 +1700,7 @@ void Darwin::AddDeploymentTarget(DerivedArgList &Args) const {
DarwinEnvironmentKind Environment = OSTarget->getEnvironment();
// Recognize iOS targets with an x86 architecture as the iOS simulator.
if (Environment == NativeEnvironment && Platform != MacOS &&
+ OSTarget->canInferSimulatorFromArch() &&
(getTriple().getArch() == llvm::Triple::x86 ||
getTriple().getArch() == llvm::Triple::x86_64))
Environment = Simulator;
@@ -2211,24 +2295,43 @@ void Darwin::CheckObjCARC() const {
SanitizerMask Darwin::getSupportedSanitizers() const {
const bool IsX86_64 = getTriple().getArch() == llvm::Triple::x86_64;
SanitizerMask Res = ToolChain::getSupportedSanitizers();
- Res |= SanitizerKind::Address;
- Res |= SanitizerKind::Leak;
- Res |= SanitizerKind::Fuzzer;
- Res |= SanitizerKind::FuzzerNoLink;
+
+ {
+ using namespace SanitizerKind;
+ assert(!(Res & (Address | Leak | Fuzzer | FuzzerNoLink | Thread)) &&
+ "Sanitizer is already registered as supported");
+ }
+
+ if (sanitizerRuntimeExists("asan"))
+ Res |= SanitizerKind::Address;
+ if (sanitizerRuntimeExists("lsan"))
+ Res |= SanitizerKind::Leak;
+ if (sanitizerRuntimeExists("fuzzer", /*Shared=*/false)) {
+ Res |= SanitizerKind::Fuzzer;
+ Res |= SanitizerKind::FuzzerNoLink;
+ }
Res |= SanitizerKind::Function;
- if (isTargetMacOS()) {
- if (!isMacosxVersionLT(10, 9))
- Res |= SanitizerKind::Vptr;
+ if (isTargetMacOS() && !isMacosxVersionLT(10, 9))
+ Res |= SanitizerKind::Vptr;
+ if (isTargetMacOS())
Res |= SanitizerKind::SafeStack;
- if (IsX86_64)
- Res |= SanitizerKind::Thread;
- } else if (isTargetIOSSimulator() || isTargetTvOSSimulator()) {
- if (IsX86_64)
- Res |= SanitizerKind::Thread;
- }
+
+ if (sanitizerRuntimeExists("tsan") && IsX86_64 &&
+ (isTargetMacOS() || isTargetIOSSimulator() || isTargetTvOSSimulator()))
+ Res |= SanitizerKind::Thread;
+
return Res;
}
void Darwin::printVerboseInfo(raw_ostream &OS) const {
CudaInstallation.print(OS);
}
+
+bool Darwin::sanitizerRuntimeExists(StringRef SanitizerName,
+ bool Shared) const {
+ std::string RelName = getFileNameForSanitizerLib(SanitizerName, Shared);
+ SmallString<128> Dir = runtimeLibDir();
+ SmallString<128> AbsName(Dir);
+ llvm::sys::path::append(AbsName, RelName);
+ return getVFS().exists(AbsName);
+}
diff --git a/lib/Driver/ToolChains/Darwin.h b/lib/Driver/ToolChains/Darwin.h
index 87d553bd7e0b..eee6e966718b 100644
--- a/lib/Driver/ToolChains/Darwin.h
+++ b/lib/Driver/ToolChains/Darwin.h
@@ -130,6 +130,9 @@ protected:
Tool *buildLinker() const override;
Tool *getTool(Action::ActionClass AC) const override;
+ /// \return Directory to find the runtime library in.
+ SmallString<128> runtimeLibDir(bool IsEmbedded=false) const;
+
private:
mutable std::unique_ptr<tools::darwin::Lipo> Lipo;
mutable std::unique_ptr<tools::darwin::Dsymutil> Dsymutil;
@@ -251,7 +254,6 @@ public:
GetExceptionModel(const llvm::opt::ArgList &Args) const override {
return llvm::ExceptionHandling::None;
}
-
/// }
};
@@ -420,6 +422,11 @@ protected:
StringRef getPlatformFamily() const;
StringRef getOSLibraryNameSuffix() const;
+ /// \return Relative path to the filename for the library
+ /// containing the sanitizer {@code SanitizerName}.
+ std::string getFileNameForSanitizerLib(StringRef SanitizerName,
+ bool Shared = true) const;
+
public:
static StringRef getSDKName(StringRef isysroot);
@@ -473,6 +480,12 @@ public:
SanitizerMask getSupportedSanitizers() const override;
void printVerboseInfo(raw_ostream &OS) const override;
+
+private:
+ /// \return Whether the runtime corresponding to the given
+ /// sanitizer exists in the toolchain.
+ bool sanitizerRuntimeExists(StringRef SanitizerName,
+ bool Shared = true) const;
};
/// DarwinClang - The Darwin toolchain used by Clang.
diff --git a/lib/Driver/ToolChains/FreeBSD.cpp b/lib/Driver/ToolChains/FreeBSD.cpp
index dd0334b9c28b..c16eabf06961 100644
--- a/lib/Driver/ToolChains/FreeBSD.cpp
+++ b/lib/Driver/ToolChains/FreeBSD.cpp
@@ -57,11 +57,10 @@ void freebsd::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-mabi");
CmdArgs.push_back(mips::getGnuCompatibleMipsABIName(ABIName).data());
- if (getToolChain().getArch() == llvm::Triple::mips ||
- getToolChain().getArch() == llvm::Triple::mips64)
- CmdArgs.push_back("-EB");
- else
+ if (getToolChain().getTriple().isLittleEndian())
CmdArgs.push_back("-EL");
+ else
+ CmdArgs.push_back("-EB");
if (Arg *A = Args.getLastArg(options::OPT_G)) {
StringRef v = A->getValue();
@@ -166,23 +165,45 @@ void freebsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("--enable-new-dtags");
}
- // When building 32-bit code on FreeBSD/amd64, we have to explicitly
- // instruct ld in the base system to link 32-bit code.
- if (Arch == llvm::Triple::x86) {
+ // Explicitly set the linker emulation for platforms that might not
+ // be the default emulation for the linker.
+ switch (Arch) {
+ case llvm::Triple::x86:
CmdArgs.push_back("-m");
CmdArgs.push_back("elf_i386_fbsd");
- }
-
- if (Arch == llvm::Triple::ppc) {
+ break;
+ case llvm::Triple::ppc:
CmdArgs.push_back("-m");
CmdArgs.push_back("elf32ppc_fbsd");
+ break;
+ case llvm::Triple::mips:
+ CmdArgs.push_back("-m");
+ CmdArgs.push_back("elf32btsmip_fbsd");
+ break;
+ case llvm::Triple::mipsel:
+ CmdArgs.push_back("-m");
+ CmdArgs.push_back("elf32ltsmip_fbsd");
+ break;
+ case llvm::Triple::mips64:
+ CmdArgs.push_back("-m");
+ if (tools::mips::hasMipsAbiArg(Args, "n32"))
+ CmdArgs.push_back("elf32btsmipn32_fbsd");
+ else
+ CmdArgs.push_back("elf64btsmip_fbsd");
+ break;
+ case llvm::Triple::mips64el:
+ CmdArgs.push_back("-m");
+ if (tools::mips::hasMipsAbiArg(Args, "n32"))
+ CmdArgs.push_back("elf32ltsmipn32_fbsd");
+ else
+ CmdArgs.push_back("elf64ltsmip_fbsd");
+ break;
+ default:
+ break;
}
if (Arg *A = Args.getLastArg(options::OPT_G)) {
- if (ToolChain.getArch() == llvm::Triple::mips ||
- ToolChain.getArch() == llvm::Triple::mipsel ||
- ToolChain.getArch() == llvm::Triple::mips64 ||
- ToolChain.getArch() == llvm::Triple::mips64el) {
+ if (ToolChain.getTriple().isMIPS()) {
StringRef v = A->getValue();
CmdArgs.push_back(Args.MakeArgString("-G" + v));
A->claim();
@@ -231,10 +252,14 @@ void freebsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddAllArgs(CmdArgs, options::OPT_Z_Flag);
Args.AddAllArgs(CmdArgs, options::OPT_r);
- if (D.isUsingLTO())
- AddGoldPlugin(ToolChain, Args, CmdArgs, D.getLTOMode() == LTOK_Thin, D);
+ if (D.isUsingLTO()) {
+ assert(!Inputs.empty() && "Must have at least one input.");
+ AddGoldPlugin(ToolChain, Args, CmdArgs, Output, Inputs[0],
+ D.getLTOMode() == LTOK_Thin);
+ }
bool NeedsSanitizerDeps = addSanitizerRuntimes(ToolChain, Args, CmdArgs);
+ bool NeedsXRayDeps = addXRayRuntime(ToolChain, Args, CmdArgs);
AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs, JA);
if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
@@ -249,6 +274,8 @@ void freebsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
}
if (NeedsSanitizerDeps)
linkSanitizerRuntimeDeps(ToolChain, CmdArgs);
+ if (NeedsXRayDeps)
+ linkXRayRuntimeDeps(ToolChain, CmdArgs);
// FIXME: For some reason GCC passes -lgcc and -lgcc_s before adding
// the default system libraries. Just mimic this for now.
if (Args.hasArg(options::OPT_pg))
@@ -316,7 +343,7 @@ FreeBSD::FreeBSD(const Driver &D, const llvm::Triple &Triple,
// When targeting 32-bit platforms, look for '/usr/lib32/crt1.o' and fall
// back to '/usr/lib' if it doesn't exist.
- if ((Triple.getArch() == llvm::Triple::x86 ||
+ if ((Triple.getArch() == llvm::Triple::x86 || Triple.isMIPS32() ||
Triple.getArch() == llvm::Triple::ppc) &&
D.getVFS().exists(getDriver().SysRoot + "/usr/lib32/crt1.o"))
getFilePaths().push_back(getDriver().SysRoot + "/usr/lib32");
@@ -381,8 +408,7 @@ bool FreeBSD::isPIEDefault() const { return getSanitizerArgs().requiresPIE(); }
SanitizerMask FreeBSD::getSupportedSanitizers() const {
const bool IsX86 = getTriple().getArch() == llvm::Triple::x86;
const bool IsX86_64 = getTriple().getArch() == llvm::Triple::x86_64;
- const bool IsMIPS64 = getTriple().getArch() == llvm::Triple::mips64 ||
- getTriple().getArch() == llvm::Triple::mips64el;
+ const bool IsMIPS64 = getTriple().isMIPS64();
SanitizerMask Res = ToolChain::getSupportedSanitizers();
Res |= SanitizerKind::Address;
Res |= SanitizerKind::Vptr;
@@ -391,7 +417,12 @@ SanitizerMask FreeBSD::getSupportedSanitizers() const {
Res |= SanitizerKind::Thread;
}
if (IsX86 || IsX86_64) {
+ Res |= SanitizerKind::Function;
Res |= SanitizerKind::SafeStack;
+ Res |= SanitizerKind::Fuzzer;
+ Res |= SanitizerKind::FuzzerNoLink;
}
+ if (IsX86_64)
+ Res |= SanitizerKind::Memory;
return Res;
}
diff --git a/lib/Driver/ToolChains/Fuchsia.cpp b/lib/Driver/ToolChains/Fuchsia.cpp
index 269d34d18f1e..54c34ff159b1 100644
--- a/lib/Driver/ToolChains/Fuchsia.cpp
+++ b/lib/Driver/ToolChains/Fuchsia.cpp
@@ -100,17 +100,31 @@ void fuchsia::Linker::ConstructJob(Compilation &C, const JobAction &JA,
ToolChain.AddFilePathLibArgs(Args, CmdArgs);
+ if (D.isUsingLTO()) {
+ assert(!Inputs.empty() && "Must have at least one input.");
+ AddGoldPlugin(ToolChain, Args, CmdArgs, Output, Inputs[0],
+ D.getLTOMode() == LTOK_Thin);
+ }
+
addSanitizerRuntimes(ToolChain, Args, CmdArgs);
AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs, JA);
+ ToolChain.addProfileRTLibs(Args, CmdArgs);
if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
if (Args.hasArg(options::OPT_static))
CmdArgs.push_back("-Bdynamic");
if (D.CCCIsCXX()) {
- if (ToolChain.ShouldLinkCXXStdlib(Args))
+ if (ToolChain.ShouldLinkCXXStdlib(Args)) {
+ bool OnlyLibstdcxxStatic = Args.hasArg(options::OPT_static_libstdcxx) &&
+ !Args.hasArg(options::OPT_static);
+ if (OnlyLibstdcxxStatic)
+ CmdArgs.push_back("-Bstatic");
ToolChain.AddCXXStdlibLibArgs(Args, CmdArgs);
+ if (OnlyLibstdcxxStatic)
+ CmdArgs.push_back("-Bdynamic");
+ }
CmdArgs.push_back("-lm");
}
@@ -131,21 +145,6 @@ void fuchsia::Linker::ConstructJob(Compilation &C, const JobAction &JA,
/// Fuchsia - Fuchsia tool chain which can call as(1) and ld(1) directly.
-static std::string normalizeTriple(llvm::Triple Triple) {
- SmallString<64> T;
- T += Triple.getArchName();
- T += "-";
- T += Triple.getOSName();
- return T.str();
-}
-
-static std::string getTargetDir(const Driver &D,
- llvm::Triple Triple) {
- SmallString<128> P(llvm::sys::path::parent_path(D.Dir));
- llvm::sys::path::append(P, "lib", normalizeTriple(Triple));
- return P.str();
-}
-
Fuchsia::Fuchsia(const Driver &D, const llvm::Triple &Triple,
const ArgList &Args)
: ToolChain(D, Triple, Args) {
@@ -153,10 +152,6 @@ Fuchsia::Fuchsia(const Driver &D, const llvm::Triple &Triple,
if (getDriver().getInstalledDir() != D.Dir)
getProgramPaths().push_back(D.Dir);
- SmallString<128> P(getTargetDir(D, getTriple()));
- llvm::sys::path::append(P, "lib");
- getFilePaths().push_back(P.str());
-
if (!D.SysRoot.empty()) {
SmallString<128> P(D.SysRoot);
llvm::sys::path::append(P, "lib");
@@ -167,8 +162,7 @@ Fuchsia::Fuchsia(const Driver &D, const llvm::Triple &Triple,
std::string Fuchsia::ComputeEffectiveClangTriple(const ArgList &Args,
types::ID InputType) const {
llvm::Triple Triple(ComputeLLVMTriple(Args, InputType));
- Triple.setTriple(normalizeTriple(Triple));
- return Triple.getTriple();
+ return (Triple.getArchName() + "-" + Triple.getOSName()).str();
}
Tool *Fuchsia::buildLinker() const {
@@ -251,7 +245,7 @@ void Fuchsia::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
switch (GetCXXStdlibType(DriverArgs)) {
case ToolChain::CST_Libcxx: {
- SmallString<128> P(getTargetDir(getDriver(), getTriple()));
+ SmallString<128> P(getDriver().ResourceDir);
llvm::sys::path::append(P, "include", "c++", "v1");
addSystemInclude(DriverArgs, CC1Args, P.str());
break;
@@ -267,8 +261,6 @@ void Fuchsia::AddCXXStdlibLibArgs(const ArgList &Args,
switch (GetCXXStdlibType(Args)) {
case ToolChain::CST_Libcxx:
CmdArgs.push_back("-lc++");
- CmdArgs.push_back("-lc++abi");
- CmdArgs.push_back("-lunwind");
break;
case ToolChain::CST_Libstdcxx:
@@ -278,8 +270,14 @@ void Fuchsia::AddCXXStdlibLibArgs(const ArgList &Args,
SanitizerMask Fuchsia::getSupportedSanitizers() const {
SanitizerMask Res = ToolChain::getSupportedSanitizers();
- Res |= SanitizerKind::SafeStack;
Res |= SanitizerKind::Address;
+ Res |= SanitizerKind::Fuzzer;
+ Res |= SanitizerKind::FuzzerNoLink;
+ Res |= SanitizerKind::SafeStack;
Res |= SanitizerKind::Scudo;
return Res;
}
+
+SanitizerMask Fuchsia::getDefaultSanitizers() const {
+ return SanitizerKind::SafeStack;
+}
diff --git a/lib/Driver/ToolChains/Fuchsia.h b/lib/Driver/ToolChains/Fuchsia.h
index 6f438deca7ff..e61eddc2aad1 100644
--- a/lib/Driver/ToolChains/Fuchsia.h
+++ b/lib/Driver/ToolChains/Fuchsia.h
@@ -60,10 +60,15 @@ public:
return llvm::DebuggerKind::GDB;
}
+ unsigned GetDefaultStackProtectorLevel(bool KernelOrKext) const override {
+ return 2; // SSPStrong
+ }
+
std::string ComputeEffectiveClangTriple(const llvm::opt::ArgList &Args,
types::ID InputType) const override;
SanitizerMask getSupportedSanitizers() const override;
+ SanitizerMask getDefaultSanitizers() const override;
RuntimeLibType
GetRuntimeLibType(const llvm::opt::ArgList &Args) const override;
diff --git a/lib/Driver/ToolChains/Gnu.cpp b/lib/Driver/ToolChains/Gnu.cpp
index 7845781f12c4..2c83598f3d77 100644
--- a/lib/Driver/ToolChains/Gnu.cpp
+++ b/lib/Driver/ToolChains/Gnu.cpp
@@ -8,13 +8,14 @@
//===----------------------------------------------------------------------===//
#include "Gnu.h"
-#include "Linux.h"
#include "Arch/ARM.h"
#include "Arch/Mips.h"
#include "Arch/PPC.h"
+#include "Arch/RISCV.h"
#include "Arch/Sparc.h"
#include "Arch/SystemZ.h"
#include "CommonArgs.h"
+#include "Linux.h"
#include "clang/Basic/VirtualFileSystem.h"
#include "clang/Config/config.h" // for GCC_INSTALL_PREFIX
#include "clang/Driver/Compilation.h"
@@ -84,6 +85,13 @@ void tools::gcc::Common::ConstructJob(Compilation &C, const JobAction &JA,
A->getOption().matches(options::OPT_W_Group))
continue;
+ // Don't forward -mno-unaligned-access since GCC doesn't understand
+ // it and because it doesn't affect the assembly or link steps.
+ if ((isa<AssembleJobAction>(JA) || isa<LinkJobAction>(JA)) &&
+ (A->getOption().matches(options::OPT_munaligned_access) ||
+ A->getOption().matches(options::OPT_mno_unaligned_access)))
+ continue;
+
A->render(Args, CmdArgs);
}
}
@@ -220,35 +228,6 @@ void tools::gcc::Linker::RenderExtraToolArgs(const JobAction &JA,
// The types are (hopefully) good enough.
}
-static bool addXRayRuntime(const ToolChain &TC, const ArgList &Args,
- ArgStringList &CmdArgs) {
- // Do not add the XRay runtime to shared libraries.
- if (Args.hasArg(options::OPT_shared))
- return false;
-
- if (Args.hasFlag(options::OPT_fxray_instrument,
- options::OPT_fnoxray_instrument, false)) {
- CmdArgs.push_back("-whole-archive");
- CmdArgs.push_back(TC.getCompilerRTArgString(Args, "xray", false));
- CmdArgs.push_back("-no-whole-archive");
- return true;
- }
-
- return false;
-}
-
-static void linkXRayRuntimeDeps(const ToolChain &TC, const ArgList &Args,
- ArgStringList &CmdArgs) {
- CmdArgs.push_back("--no-as-needed");
- CmdArgs.push_back("-lpthread");
- CmdArgs.push_back("-lrt");
- CmdArgs.push_back("-lm");
-
- if (TC.getTriple().getOS() != llvm::Triple::FreeBSD &&
- TC.getTriple().getOS() != llvm::Triple::NetBSD)
- CmdArgs.push_back("-ldl");
-}
-
static const char *getLDMOption(const llvm::Triple &T, const ArgList &Args) {
switch (T.getArch()) {
case llvm::Triple::x86:
@@ -271,6 +250,10 @@ static const char *getLDMOption(const llvm::Triple &T, const ArgList &Args) {
return "elf64ppc";
case llvm::Triple::ppc64le:
return "elf64lppc";
+ case llvm::Triple::riscv32:
+ return "elf32lriscv";
+ case llvm::Triple::riscv64:
+ return "elf64lriscv";
case llvm::Triple::sparc:
case llvm::Triple::sparcel:
return "elf32_sparc";
@@ -300,7 +283,8 @@ static const char *getLDMOption(const llvm::Triple &T, const ArgList &Args) {
}
static bool getPIE(const ArgList &Args, const toolchains::Linux &ToolChain) {
- if (Args.hasArg(options::OPT_shared) || Args.hasArg(options::OPT_static))
+ if (Args.hasArg(options::OPT_shared) || Args.hasArg(options::OPT_static) ||
+ Args.hasArg(options::OPT_r))
return false;
Arg *A = Args.getLastArg(options::OPT_pie, options::OPT_no_pie,
@@ -373,9 +357,7 @@ void tools::gnutools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
for (const auto &Opt : ToolChain.ExtraOpts)
CmdArgs.push_back(Opt.c_str());
- if (!Args.hasArg(options::OPT_static)) {
- CmdArgs.push_back("--eh-frame-hdr");
- }
+ CmdArgs.push_back("--eh-frame-hdr");
if (const char *LDMOption = getLDMOption(ToolChain.getTriple(), Args)) {
CmdArgs.push_back("-m");
@@ -453,8 +435,11 @@ void tools::gnutools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
ToolChain.AddFilePathLibArgs(Args, CmdArgs);
- if (D.isUsingLTO())
- AddGoldPlugin(ToolChain, Args, CmdArgs, D.getLTOMode() == LTOK_Thin, D);
+ if (D.isUsingLTO()) {
+ assert(!Inputs.empty() && "Must have at least one input.");
+ AddGoldPlugin(ToolChain, Args, CmdArgs, Output, Inputs[0],
+ D.getLTOMode() == LTOK_Thin);
+ }
if (Args.hasArg(options::OPT_Z_Xlinker__no_demangle))
CmdArgs.push_back("--no-demangle");
@@ -490,7 +475,7 @@ void tools::gnutools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
linkSanitizerRuntimeDeps(ToolChain, CmdArgs);
if (NeedsXRayDeps)
- linkXRayRuntimeDeps(ToolChain, Args, CmdArgs);
+ linkXRayRuntimeDeps(ToolChain, CmdArgs);
bool WantPthread = Args.hasArg(options::OPT_pthread) ||
Args.hasArg(options::OPT_pthreads);
@@ -550,6 +535,10 @@ void tools::gnutools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
// Add OpenMP offloading linker script args if required.
AddOpenMPLinkerScript(getToolChain(), C, Output, Inputs, Args, CmdArgs, JA);
+ // Add HIP offloading linker script args if required.
+ AddHIPLinkerScript(getToolChain(), C, Output, Inputs, Args, CmdArgs, JA,
+ *this);
+
C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
}
@@ -624,6 +613,18 @@ void tools::gnutools::Assembler::ConstructJob(Compilation &C,
ppc::getPPCAsmModeForCPU(getCPUName(Args, getToolChain().getTriple())));
break;
}
+ case llvm::Triple::riscv32:
+ case llvm::Triple::riscv64: {
+ StringRef ABIName = riscv::getRISCVABI(Args, getToolChain().getTriple());
+ CmdArgs.push_back("-mabi");
+ CmdArgs.push_back(ABIName.data());
+ if (const Arg *A = Args.getLastArg(options::OPT_march_EQ)) {
+ StringRef MArch = A->getValue();
+ CmdArgs.push_back("-march");
+ CmdArgs.push_back(MArch.data());
+ }
+ break;
+ }
case llvm::Triple::sparc:
case llvm::Triple::sparcel: {
CmdArgs.push_back("-32");
@@ -706,11 +707,10 @@ void tools::gnutools::Assembler::ConstructJob(Compilation &C,
if (ABIName != "64" && !Args.hasArg(options::OPT_mno_abicalls))
CmdArgs.push_back("-call_nonpic");
- if (getToolChain().getArch() == llvm::Triple::mips ||
- getToolChain().getArch() == llvm::Triple::mips64)
- CmdArgs.push_back("-EB");
- else
+ if (getToolChain().getTriple().isLittleEndian())
CmdArgs.push_back("-EL");
+ else
+ CmdArgs.push_back("-EB");
if (Arg *A = Args.getLastArg(options::OPT_mnan_EQ)) {
if (StringRef(A->getValue()) == "2008")
@@ -834,14 +834,6 @@ static bool isArmOrThumbArch(llvm::Triple::ArchType Arch) {
return Arch == llvm::Triple::arm || Arch == llvm::Triple::thumb;
}
-static bool isMips32(llvm::Triple::ArchType Arch) {
- return Arch == llvm::Triple::mips || Arch == llvm::Triple::mipsel;
-}
-
-static bool isMips64(llvm::Triple::ArchType Arch) {
- return Arch == llvm::Triple::mips64 || Arch == llvm::Triple::mips64el;
-}
-
static bool isMipsEL(llvm::Triple::ArchType Arch) {
return Arch == llvm::Triple::mipsel || Arch == llvm::Triple::mips64el;
}
@@ -856,6 +848,10 @@ static bool isMicroMips(const ArgList &Args) {
return A && A->getOption().matches(options::OPT_mmicromips);
}
+static bool isRISCV(llvm::Triple::ArchType Arch) {
+ return Arch == llvm::Triple::riscv32 || Arch == llvm::Triple::riscv64;
+}
+
static Multilib makeMultilib(StringRef commonSuffix) {
return Multilib(commonSuffix, commonSuffix, commonSuffix);
}
@@ -1300,8 +1296,8 @@ bool clang::driver::findMIPSMultilibs(const Driver &D,
llvm::Triple::ArchType TargetArch = TargetTriple.getArch();
Multilib::flags_list Flags;
- addMultilibFlag(isMips32(TargetArch), "m32", Flags);
- addMultilibFlag(isMips64(TargetArch), "m64", Flags);
+ addMultilibFlag(TargetTriple.isMIPS32(), "m32", Flags);
+ addMultilibFlag(TargetTriple.isMIPS64(), "m64", Flags);
addMultilibFlag(isMips16(Args), "mips16", Flags);
addMultilibFlag(CPUName == "mips32", "march=mips32", Flags);
addMultilibFlag(CPUName == "mips32r2" || CPUName == "mips32r3" ||
@@ -1401,11 +1397,48 @@ static void findAndroidArmMultilibs(const Driver &D,
Result.Multilibs = AndroidArmMultilibs;
}
+static void findRISCVMultilibs(const Driver &D,
+ const llvm::Triple &TargetTriple, StringRef Path,
+ const ArgList &Args, DetectedMultilibs &Result) {
+
+ FilterNonExistent NonExistent(Path, "/crtbegin.o", D.getVFS());
+ Multilib Ilp32 = makeMultilib("lib32/ilp32").flag("+m32").flag("+mabi=ilp32");
+ Multilib Ilp32f =
+ makeMultilib("lib32/ilp32f").flag("+m32").flag("+mabi=ilp32f");
+ Multilib Ilp32d =
+ makeMultilib("lib32/ilp32d").flag("+m32").flag("+mabi=ilp32d");
+ Multilib Lp64 = makeMultilib("lib64/lp64").flag("+m64").flag("+mabi=lp64");
+ Multilib Lp64f = makeMultilib("lib64/lp64f").flag("+m64").flag("+mabi=lp64f");
+ Multilib Lp64d = makeMultilib("lib64/lp64d").flag("+m64").flag("+mabi=lp64d");
+ MultilibSet RISCVMultilibs =
+ MultilibSet()
+ .Either({Ilp32, Ilp32f, Ilp32d, Lp64, Lp64f, Lp64d})
+ .FilterOut(NonExistent);
+
+ Multilib::flags_list Flags;
+ bool IsRV64 = TargetTriple.getArch() == llvm::Triple::riscv64;
+ StringRef ABIName = tools::riscv::getRISCVABI(Args, TargetTriple);
+
+ addMultilibFlag(!IsRV64, "m32", Flags);
+ addMultilibFlag(IsRV64, "m64", Flags);
+ addMultilibFlag(ABIName == "ilp32", "mabi=ilp32", Flags);
+ addMultilibFlag(ABIName == "ilp32f", "mabi=ilp32f", Flags);
+ addMultilibFlag(ABIName == "ilp32d", "mabi=ilp32d", Flags);
+ addMultilibFlag(ABIName == "lp64", "mabi=lp64", Flags);
+ addMultilibFlag(ABIName == "lp64f", "mabi=lp64f", Flags);
+ addMultilibFlag(ABIName == "lp64d", "mabi=lp64d", Flags);
+
+ if (RISCVMultilibs.select(Flags, Result.SelectedMultilib))
+ Result.Multilibs = RISCVMultilibs;
+}
+
static bool findBiarchMultilibs(const Driver &D,
const llvm::Triple &TargetTriple,
StringRef Path, const ArgList &Args,
bool NeedsBiarchSuffix,
DetectedMultilibs &Result) {
+ Multilib Default;
+
// Some versions of SUSE and Fedora on ppc64 put 32-bit libs
// in what would normally be GCCInstallPath and put the 64-bit
// libs in a subdirectory named 64. The simple logic we follow is that
@@ -1413,10 +1446,26 @@ static bool findBiarchMultilibs(const Driver &D,
// we use that. If not, and if not a biarch triple alias, we look for
// crtbegin.o without the subdirectory.
- Multilib Default;
+ StringRef Suff64 = "/64";
+ // Solaris uses platform-specific suffixes instead of /64.
+ if (TargetTriple.getOS() == llvm::Triple::Solaris) {
+ switch (TargetTriple.getArch()) {
+ case llvm::Triple::x86:
+ case llvm::Triple::x86_64:
+ Suff64 = "/amd64";
+ break;
+ case llvm::Triple::sparc:
+ case llvm::Triple::sparcv9:
+ Suff64 = "/sparcv9";
+ break;
+ default:
+ break;
+ }
+ }
+
Multilib Alt64 = Multilib()
- .gccSuffix("/64")
- .includeSuffix("/64")
+ .gccSuffix(Suff64)
+ .includeSuffix(Suff64)
.flag("-m32")
.flag("+m64")
.flag("-mx32");
@@ -1491,7 +1540,7 @@ static bool findBiarchMultilibs(const Driver &D,
/// all subcommands; this relies on gcc translating the majority of
/// command line options.
-/// \brief Less-than for GCCVersion, implementing a Strict Weak Ordering.
+/// Less-than for GCCVersion, implementing a Strict Weak Ordering.
bool Generic_GCC::GCCVersion::isOlderThan(int RHSMajor, int RHSMinor,
int RHSPatch,
StringRef RHSPatchSuffix) const {
@@ -1525,7 +1574,7 @@ bool Generic_GCC::GCCVersion::isOlderThan(int RHSMajor, int RHSMinor,
return false;
}
-/// \brief Parse a GCCVersion object out of a string of text.
+/// Parse a GCCVersion object out of a string of text.
///
/// This is the primary means of forming GCCVersion objects.
/*static*/
@@ -1540,21 +1589,29 @@ Generic_GCC::GCCVersion Generic_GCC::GCCVersion::Parse(StringRef VersionText) {
GoodVersion.MajorStr = First.first.str();
if (First.second.empty())
return GoodVersion;
- if (Second.first.getAsInteger(10, GoodVersion.Minor) || GoodVersion.Minor < 0)
+ StringRef MinorStr = Second.first;
+ if (Second.second.empty()) {
+ if (size_t EndNumber = MinorStr.find_first_not_of("0123456789")) {
+ GoodVersion.PatchSuffix = MinorStr.substr(EndNumber);
+ MinorStr = MinorStr.slice(0, EndNumber);
+ }
+ }
+ if (MinorStr.getAsInteger(10, GoodVersion.Minor) || GoodVersion.Minor < 0)
return BadVersion;
- GoodVersion.MinorStr = Second.first.str();
+ GoodVersion.MinorStr = MinorStr.str();
// First look for a number prefix and parse that if present. Otherwise just
// stash the entire patch string in the suffix, and leave the number
// unspecified. This covers versions strings such as:
// 5 (handled above)
// 4.4
+ // 4.4-patched
// 4.4.0
// 4.4.x
// 4.4.2-rc4
// 4.4.x-patched
// And retains any patch number it finds.
- StringRef PatchText = GoodVersion.PatchSuffix = Second.second.str();
+ StringRef PatchText = Second.second;
if (!PatchText.empty()) {
if (size_t EndNumber = PatchText.find_first_not_of("0123456789")) {
// Try to parse the number and any suffix.
@@ -1575,7 +1632,7 @@ static llvm::StringRef getGCCToolchainDir(const ArgList &Args) {
return GCC_INSTALL_PREFIX;
}
-/// \brief Initialize a GCCInstallationDetector from the driver.
+/// Initialize a GCCInstallationDetector from the driver.
///
/// This performs all of the autodetection and sets up the various paths.
/// Once constructed, a GCCInstallationDetector is essentially immutable.
@@ -1613,21 +1670,17 @@ void Generic_GCC::GCCInstallationDetector::init(
// If we have a SysRoot, try that first.
if (!D.SysRoot.empty()) {
Prefixes.push_back(D.SysRoot);
- Prefixes.push_back(D.SysRoot + "/usr");
+ AddDefaultGCCPrefixes(TargetTriple, Prefixes, D.SysRoot);
}
// Then look for gcc installed alongside clang.
Prefixes.push_back(D.InstalledDir + "/..");
- // Then look for distribution supplied gcc installations.
+ // Next, look for prefix(es) that correspond to distribution-supplied gcc
+ // installations.
if (D.SysRoot.empty()) {
- // Look for RHEL devtoolsets.
- Prefixes.push_back("/opt/rh/devtoolset-6/root/usr");
- Prefixes.push_back("/opt/rh/devtoolset-4/root/usr");
- Prefixes.push_back("/opt/rh/devtoolset-3/root/usr");
- Prefixes.push_back("/opt/rh/devtoolset-2/root/usr");
- // And finally in /usr.
- Prefixes.push_back("/usr");
+ // Typically /usr.
+ AddDefaultGCCPrefixes(TargetTriple, Prefixes, D.SysRoot);
}
}
@@ -1636,18 +1689,21 @@ void Generic_GCC::GCCInstallationDetector::init(
// in /usr. This avoids accidentally enforcing the system GCC version
// when using a custom toolchain.
if (GCCToolchainDir == "" || GCCToolchainDir == D.SysRoot + "/usr") {
- for (StringRef CandidateTriple : ExtraTripleAliases) {
- if (ScanGentooGccConfig(TargetTriple, Args, CandidateTriple))
- return;
- }
- for (StringRef CandidateTriple : CandidateTripleAliases) {
- if (ScanGentooGccConfig(TargetTriple, Args, CandidateTriple))
- return;
- }
- for (StringRef CandidateTriple : CandidateBiarchTripleAliases) {
- if (ScanGentooGccConfig(TargetTriple, Args, CandidateTriple, true))
- return;
- }
+ SmallVector<StringRef, 16> GentooTestTriples;
+ // Try to match an exact triple as target triple first.
+ // e.g. crossdev -S x86_64-gentoo-linux-gnu will install gcc libs for
+ // x86_64-gentoo-linux-gnu. But "clang -target x86_64-gentoo-linux-gnu"
+ // may pick the libraries for x86_64-pc-linux-gnu even when exact matching
+ // triple x86_64-gentoo-linux-gnu is present.
+ GentooTestTriples.push_back(TargetTriple.str());
+ // Check rest of triples.
+ GentooTestTriples.append(ExtraTripleAliases.begin(),
+ ExtraTripleAliases.end());
+ GentooTestTriples.append(CandidateTripleAliases.begin(),
+ CandidateTripleAliases.end());
+ if (ScanGentooConfigs(TargetTriple, Args, GentooTestTriples,
+ CandidateBiarchTripleAliases))
+ return;
}
// Loop over the various components which exist and select the best GCC
@@ -1660,6 +1716,9 @@ void Generic_GCC::GCCInstallationDetector::init(
const std::string LibDir = Prefix + Suffix.str();
if (!D.getVFS().exists(LibDir))
continue;
+ // Try to match the exact target triple first.
+ ScanLibDirForGCCTriple(TargetTriple, Args, LibDir, TargetTriple.str());
+ // Try rest of possible triples.
for (StringRef Candidate : ExtraTripleAliases) // Try these first.
ScanLibDirForGCCTriple(TargetTriple, Args, LibDir, Candidate);
for (StringRef Candidate : CandidateTripleAliases)
@@ -1698,6 +1757,49 @@ bool Generic_GCC::GCCInstallationDetector::getBiarchSibling(Multilib &M) const {
return false;
}
+void Generic_GCC::GCCInstallationDetector::AddDefaultGCCPrefixes(
+ const llvm::Triple &TargetTriple, SmallVectorImpl<std::string> &Prefixes,
+ StringRef SysRoot) {
+ if (TargetTriple.getOS() == llvm::Triple::Solaris) {
+ // Solaris is a special case.
+ // The GCC installation is under
+ // /usr/gcc/<major>.<minor>/lib/gcc/<triple>/<major>.<minor>.<patch>/
+ // so we need to find those /usr/gcc/*/lib/gcc libdirs and go with
+ // /usr/gcc/<version> as a prefix.
+
+ std::string PrefixDir = SysRoot.str() + "/usr/gcc";
+ std::error_code EC;
+ for (vfs::directory_iterator LI = D.getVFS().dir_begin(PrefixDir, EC), LE;
+ !EC && LI != LE; LI = LI.increment(EC)) {
+ StringRef VersionText = llvm::sys::path::filename(LI->getName());
+ GCCVersion CandidateVersion = GCCVersion::Parse(VersionText);
+
+ // Filter out obviously bad entries.
+ if (CandidateVersion.Major == -1 || CandidateVersion.isOlderThan(4, 1, 1))
+ continue;
+
+ std::string CandidatePrefix = PrefixDir + "/" + VersionText.str();
+ std::string CandidateLibPath = CandidatePrefix + "/lib/gcc";
+ if (!D.getVFS().exists(CandidateLibPath))
+ continue;
+
+ Prefixes.push_back(CandidatePrefix);
+ }
+ return;
+ }
+
+ // Non-Solaris is much simpler - most systems just go with "/usr".
+ if (SysRoot.empty() && TargetTriple.getOS() == llvm::Triple::Linux) {
+ // Yet, still look for RHEL devtoolsets.
+ Prefixes.push_back("/opt/rh/devtoolset-7/root/usr");
+ Prefixes.push_back("/opt/rh/devtoolset-6/root/usr");
+ Prefixes.push_back("/opt/rh/devtoolset-4/root/usr");
+ Prefixes.push_back("/opt/rh/devtoolset-3/root/usr");
+ Prefixes.push_back("/opt/rh/devtoolset-2/root/usr");
+ }
+ Prefixes.push_back(SysRoot.str() + "/usr");
+}
+
/*static*/ void Generic_GCC::GCCInstallationDetector::CollectLibDirsAndTriples(
const llvm::Triple &TargetTriple, const llvm::Triple &BiarchTriple,
SmallVectorImpl<StringRef> &LibDirs,
@@ -1709,22 +1811,20 @@ bool Generic_GCC::GCCInstallationDetector::getBiarchSibling(Multilib &M) const {
// lifetime or initialization issues.
static const char *const AArch64LibDirs[] = {"/lib64", "/lib"};
static const char *const AArch64Triples[] = {
- "aarch64-none-linux-gnu", "aarch64-linux-gnu", "aarch64-linux-android",
- "aarch64-redhat-linux", "aarch64-suse-linux"};
+ "aarch64-none-linux-gnu", "aarch64-linux-gnu", "aarch64-redhat-linux",
+ "aarch64-suse-linux"};
static const char *const AArch64beLibDirs[] = {"/lib"};
static const char *const AArch64beTriples[] = {"aarch64_be-none-linux-gnu",
"aarch64_be-linux-gnu"};
static const char *const ARMLibDirs[] = {"/lib"};
- static const char *const ARMTriples[] = {"arm-linux-gnueabi",
- "arm-linux-androideabi"};
+ static const char *const ARMTriples[] = {"arm-linux-gnueabi"};
static const char *const ARMHFTriples[] = {"arm-linux-gnueabihf",
"armv7hl-redhat-linux-gnueabi",
"armv6hl-suse-linux-gnueabi",
"armv7hl-suse-linux-gnueabi"};
static const char *const ARMebLibDirs[] = {"/lib"};
- static const char *const ARMebTriples[] = {"armeb-linux-gnueabi",
- "armeb-linux-androideabi"};
+ static const char *const ARMebTriples[] = {"armeb-linux-gnueabi"};
static const char *const ARMebHFTriples[] = {
"armeb-linux-gnueabihf", "armebv7hl-redhat-linux-gnueabi"};
@@ -1734,16 +1834,15 @@ bool Generic_GCC::GCCInstallationDetector::getBiarchSibling(Multilib &M) const {
"x86_64-pc-linux-gnu", "x86_64-redhat-linux6E",
"x86_64-redhat-linux", "x86_64-suse-linux",
"x86_64-manbo-linux-gnu", "x86_64-linux-gnu",
- "x86_64-slackware-linux", "x86_64-linux-android",
- "x86_64-unknown-linux"};
+ "x86_64-slackware-linux", "x86_64-unknown-linux",
+ "x86_64-amazon-linux"};
static const char *const X32LibDirs[] = {"/libx32"};
static const char *const X86LibDirs[] = {"/lib32", "/lib"};
static const char *const X86Triples[] = {
"i686-linux-gnu", "i686-pc-linux-gnu", "i486-linux-gnu",
"i386-linux-gnu", "i386-redhat-linux6E", "i686-redhat-linux",
"i586-redhat-linux", "i386-redhat-linux", "i586-suse-linux",
- "i486-slackware-linux", "i686-montavista-linux", "i686-linux-android",
- "i586-linux-gnu"};
+ "i486-slackware-linux", "i686-montavista-linux", "i586-linux-gnu"};
static const char *const MIPSLibDirs[] = {"/lib"};
static const char *const MIPSTriples[] = {"mips-linux-gnu", "mips-mti-linux",
@@ -1762,13 +1861,6 @@ bool Generic_GCC::GCCInstallationDetector::getBiarchSibling(Multilib &M) const {
"mips64el-linux-gnu", "mips-mti-linux-gnu", "mips-img-linux-gnu",
"mips64el-linux-gnuabi64"};
- static const char *const MIPSELAndroidLibDirs[] = {"/lib", "/libr2",
- "/libr6"};
- static const char *const MIPSELAndroidTriples[] = {"mipsel-linux-android"};
- static const char *const MIPS64ELAndroidLibDirs[] = {"/lib64", "/lib",
- "/libr2", "/libr6"};
- static const char *const MIPS64ELAndroidTriples[] = {
- "mips64el-linux-android"};
static const char *const PPCLibDirs[] = {"/lib32", "/lib"};
static const char *const PPCTriples[] = {
@@ -1783,6 +1875,10 @@ bool Generic_GCC::GCCInstallationDetector::getBiarchSibling(Multilib &M) const {
"powerpc64le-linux-gnu", "powerpc64le-unknown-linux-gnu",
"powerpc64le-suse-linux", "ppc64le-redhat-linux"};
+ static const char *const RISCV32LibDirs[] = {"/lib", "/lib32"};
+ static const char *const RISCVTriples[] = {"riscv32-unknown-linux-gnu",
+ "riscv64-unknown-linux-gnu"};
+
static const char *const SPARCv8LibDirs[] = {"/lib32", "/lib"};
static const char *const SPARCv8Triples[] = {"sparc-linux-gnu",
"sparcv8-linux-gnu"};
@@ -1795,17 +1891,109 @@ bool Generic_GCC::GCCInstallationDetector::getBiarchSibling(Multilib &M) const {
"s390x-linux-gnu", "s390x-unknown-linux-gnu", "s390x-ibm-linux-gnu",
"s390x-suse-linux", "s390x-redhat-linux"};
- // Solaris.
- static const char *const SolarisSPARCLibDirs[] = {"/gcc"};
- static const char *const SolarisSPARCTriples[] = {"sparc-sun-solaris2.11",
- "i386-pc-solaris2.11"};
using std::begin;
using std::end;
if (TargetTriple.getOS() == llvm::Triple::Solaris) {
- LibDirs.append(begin(SolarisSPARCLibDirs), end(SolarisSPARCLibDirs));
- TripleAliases.append(begin(SolarisSPARCTriples), end(SolarisSPARCTriples));
+ static const char *const SolarisLibDirs[] = {"/lib"};
+ static const char *const SolarisSparcV8Triples[] = {
+ "sparc-sun-solaris2.11", "sparc-sun-solaris2.12"};
+ static const char *const SolarisSparcV9Triples[] = {
+ "sparcv9-sun-solaris2.11", "sparcv9-sun-solaris2.12"};
+ static const char *const SolarisX86Triples[] = {"i386-pc-solaris2.11",
+ "i386-pc-solaris2.12"};
+ static const char *const SolarisX86_64Triples[] = {"x86_64-pc-solaris2.11",
+ "x86_64-pc-solaris2.12"};
+ LibDirs.append(begin(SolarisLibDirs), end(SolarisLibDirs));
+ BiarchLibDirs.append(begin(SolarisLibDirs), end(SolarisLibDirs));
+ switch (TargetTriple.getArch()) {
+ case llvm::Triple::x86:
+ TripleAliases.append(begin(SolarisX86Triples), end(SolarisX86Triples));
+ BiarchTripleAliases.append(begin(SolarisX86_64Triples),
+ end(SolarisX86_64Triples));
+ break;
+ case llvm::Triple::x86_64:
+ TripleAliases.append(begin(SolarisX86_64Triples),
+ end(SolarisX86_64Triples));
+ BiarchTripleAliases.append(begin(SolarisX86Triples),
+ end(SolarisX86Triples));
+ break;
+ case llvm::Triple::sparc:
+ TripleAliases.append(begin(SolarisSparcV8Triples),
+ end(SolarisSparcV8Triples));
+ BiarchTripleAliases.append(begin(SolarisSparcV9Triples),
+ end(SolarisSparcV9Triples));
+ break;
+ case llvm::Triple::sparcv9:
+ TripleAliases.append(begin(SolarisSparcV9Triples),
+ end(SolarisSparcV9Triples));
+ BiarchTripleAliases.append(begin(SolarisSparcV8Triples),
+ end(SolarisSparcV8Triples));
+ break;
+ default:
+ break;
+ }
+ return;
+ }
+
+ // Android targets should not use GNU/Linux tools or libraries.
+ if (TargetTriple.isAndroid()) {
+ static const char *const AArch64AndroidTriples[] = {
+ "aarch64-linux-android"};
+ static const char *const ARMAndroidTriples[] = {"arm-linux-androideabi"};
+ static const char *const MIPSELAndroidTriples[] = {"mipsel-linux-android"};
+ static const char *const MIPS64ELAndroidTriples[] = {
+ "mips64el-linux-android"};
+ static const char *const X86AndroidTriples[] = {"i686-linux-android"};
+ static const char *const X86_64AndroidTriples[] = {"x86_64-linux-android"};
+
+ switch (TargetTriple.getArch()) {
+ case llvm::Triple::aarch64:
+ LibDirs.append(begin(AArch64LibDirs), end(AArch64LibDirs));
+ TripleAliases.append(begin(AArch64AndroidTriples),
+ end(AArch64AndroidTriples));
+ break;
+ case llvm::Triple::arm:
+ case llvm::Triple::thumb:
+ LibDirs.append(begin(ARMLibDirs), end(ARMLibDirs));
+ TripleAliases.append(begin(ARMAndroidTriples), end(ARMAndroidTriples));
+ break;
+ case llvm::Triple::mipsel:
+ LibDirs.append(begin(MIPSELLibDirs), end(MIPSELLibDirs));
+ TripleAliases.append(begin(MIPSELAndroidTriples),
+ end(MIPSELAndroidTriples));
+ BiarchLibDirs.append(begin(MIPS64ELLibDirs), end(MIPS64ELLibDirs));
+ BiarchTripleAliases.append(begin(MIPS64ELAndroidTriples),
+ end(MIPS64ELAndroidTriples));
+ break;
+ case llvm::Triple::mips64el:
+ LibDirs.append(begin(MIPS64ELLibDirs), end(MIPS64ELLibDirs));
+ TripleAliases.append(begin(MIPS64ELAndroidTriples),
+ end(MIPS64ELAndroidTriples));
+ BiarchLibDirs.append(begin(MIPSELLibDirs), end(MIPSELLibDirs));
+ BiarchTripleAliases.append(begin(MIPSELAndroidTriples),
+ end(MIPSELAndroidTriples));
+ break;
+ case llvm::Triple::x86_64:
+ LibDirs.append(begin(X86_64LibDirs), end(X86_64LibDirs));
+ TripleAliases.append(begin(X86_64AndroidTriples),
+ end(X86_64AndroidTriples));
+ BiarchLibDirs.append(begin(X86LibDirs), end(X86LibDirs));
+ BiarchTripleAliases.append(begin(X86AndroidTriples),
+ end(X86AndroidTriples));
+ break;
+ case llvm::Triple::x86:
+ LibDirs.append(begin(X86LibDirs), end(X86LibDirs));
+ TripleAliases.append(begin(X86AndroidTriples), end(X86AndroidTriples));
+ BiarchLibDirs.append(begin(X86_64LibDirs), end(X86_64LibDirs));
+ BiarchTripleAliases.append(begin(X86_64AndroidTriples),
+ end(X86_64AndroidTriples));
+ break;
+ default:
+ break;
+ }
+
return;
}
@@ -1870,22 +2058,11 @@ bool Generic_GCC::GCCInstallationDetector::getBiarchSibling(Multilib &M) const {
BiarchTripleAliases.append(begin(MIPS64Triples), end(MIPS64Triples));
break;
case llvm::Triple::mipsel:
- if (TargetTriple.isAndroid()) {
- LibDirs.append(begin(MIPSELAndroidLibDirs), end(MIPSELAndroidLibDirs));
- TripleAliases.append(begin(MIPSELAndroidTriples),
- end(MIPSELAndroidTriples));
- BiarchLibDirs.append(begin(MIPS64ELAndroidLibDirs),
- end(MIPS64ELAndroidLibDirs));
- BiarchTripleAliases.append(begin(MIPS64ELAndroidTriples),
- end(MIPS64ELAndroidTriples));
-
- } else {
- LibDirs.append(begin(MIPSELLibDirs), end(MIPSELLibDirs));
- TripleAliases.append(begin(MIPSELTriples), end(MIPSELTriples));
- TripleAliases.append(begin(MIPSTriples), end(MIPSTriples));
- BiarchLibDirs.append(begin(MIPS64ELLibDirs), end(MIPS64ELLibDirs));
- BiarchTripleAliases.append(begin(MIPS64ELTriples), end(MIPS64ELTriples));
- }
+ LibDirs.append(begin(MIPSELLibDirs), end(MIPSELLibDirs));
+ TripleAliases.append(begin(MIPSELTriples), end(MIPSELTriples));
+ TripleAliases.append(begin(MIPSTriples), end(MIPSTriples));
+ BiarchLibDirs.append(begin(MIPS64ELLibDirs), end(MIPS64ELLibDirs));
+ BiarchTripleAliases.append(begin(MIPS64ELTriples), end(MIPS64ELTriples));
break;
case llvm::Triple::mips64:
LibDirs.append(begin(MIPS64LibDirs), end(MIPS64LibDirs));
@@ -1894,23 +2071,11 @@ bool Generic_GCC::GCCInstallationDetector::getBiarchSibling(Multilib &M) const {
BiarchTripleAliases.append(begin(MIPSTriples), end(MIPSTriples));
break;
case llvm::Triple::mips64el:
- if (TargetTriple.isAndroid()) {
- LibDirs.append(begin(MIPS64ELAndroidLibDirs),
- end(MIPS64ELAndroidLibDirs));
- TripleAliases.append(begin(MIPS64ELAndroidTriples),
- end(MIPS64ELAndroidTriples));
- BiarchLibDirs.append(begin(MIPSELAndroidLibDirs),
- end(MIPSELAndroidLibDirs));
- BiarchTripleAliases.append(begin(MIPSELAndroidTriples),
- end(MIPSELAndroidTriples));
-
- } else {
- LibDirs.append(begin(MIPS64ELLibDirs), end(MIPS64ELLibDirs));
- TripleAliases.append(begin(MIPS64ELTriples), end(MIPS64ELTriples));
- BiarchLibDirs.append(begin(MIPSELLibDirs), end(MIPSELLibDirs));
- BiarchTripleAliases.append(begin(MIPSELTriples), end(MIPSELTriples));
- BiarchTripleAliases.append(begin(MIPSTriples), end(MIPSTriples));
- }
+ LibDirs.append(begin(MIPS64ELLibDirs), end(MIPS64ELLibDirs));
+ TripleAliases.append(begin(MIPS64ELTriples), end(MIPS64ELTriples));
+ BiarchLibDirs.append(begin(MIPSELLibDirs), end(MIPSELLibDirs));
+ BiarchTripleAliases.append(begin(MIPSELTriples), end(MIPSELTriples));
+ BiarchTripleAliases.append(begin(MIPSTriples), end(MIPSTriples));
break;
case llvm::Triple::ppc:
LibDirs.append(begin(PPCLibDirs), end(PPCLibDirs));
@@ -1928,6 +2093,12 @@ bool Generic_GCC::GCCInstallationDetector::getBiarchSibling(Multilib &M) const {
LibDirs.append(begin(PPC64LELibDirs), end(PPC64LELibDirs));
TripleAliases.append(begin(PPC64LETriples), end(PPC64LETriples));
break;
+ case llvm::Triple::riscv32:
+ LibDirs.append(begin(RISCV32LibDirs), end(RISCV32LibDirs));
+ BiarchLibDirs.append(begin(RISCV32LibDirs), end(RISCV32LibDirs));
+ TripleAliases.append(begin(RISCVTriples), end(RISCVTriples));
+ BiarchTripleAliases.append(begin(RISCVTriples), end(RISCVTriples));
+ break;
case llvm::Triple::sparc:
case llvm::Triple::sparcel:
LibDirs.append(begin(SPARCv8LibDirs), end(SPARCv8LibDirs));
@@ -1960,56 +2131,6 @@ bool Generic_GCC::GCCInstallationDetector::getBiarchSibling(Multilib &M) const {
BiarchTripleAliases.push_back(BiarchTriple.str());
}
-void Generic_GCC::GCCInstallationDetector::scanLibDirForGCCTripleSolaris(
- const llvm::Triple &TargetArch, const llvm::opt::ArgList &Args,
- const std::string &LibDir, StringRef CandidateTriple,
- bool NeedsBiarchSuffix) {
- // Solaris is a special case. The GCC installation is under
- // /usr/gcc/<major>.<minor>/lib/gcc/<triple>/<major>.<minor>.<patch>/, so we
- // need to iterate twice.
- std::error_code EC;
- for (vfs::directory_iterator LI = D.getVFS().dir_begin(LibDir, EC), LE;
- !EC && LI != LE; LI = LI.increment(EC)) {
- StringRef VersionText = llvm::sys::path::filename(LI->getName());
- GCCVersion CandidateVersion = GCCVersion::Parse(VersionText);
-
- if (CandidateVersion.Major != -1) // Filter obviously bad entries.
- if (!CandidateGCCInstallPaths.insert(LI->getName()).second)
- continue; // Saw this path before; no need to look at it again.
- if (CandidateVersion.isOlderThan(4, 1, 1))
- continue;
- if (CandidateVersion <= Version)
- continue;
-
- GCCInstallPath =
- LibDir + "/" + VersionText.str() + "/lib/gcc/" + CandidateTriple.str();
- if (!D.getVFS().exists(GCCInstallPath))
- continue;
-
- // If we make it here there has to be at least one GCC version, let's just
- // use the latest one.
- std::error_code EEC;
- for (vfs::directory_iterator
- LLI = D.getVFS().dir_begin(GCCInstallPath, EEC),
- LLE;
- !EEC && LLI != LLE; LLI = LLI.increment(EEC)) {
-
- StringRef SubVersionText = llvm::sys::path::filename(LLI->getName());
- GCCVersion CandidateSubVersion = GCCVersion::Parse(SubVersionText);
-
- if (CandidateSubVersion > Version)
- Version = CandidateSubVersion;
- }
-
- GCCTriple.setTriple(CandidateTriple);
-
- GCCInstallPath += "/" + Version.Text;
- GCCParentLibPath = GCCInstallPath + "/../../../../";
-
- IsValid = true;
- }
-}
-
bool Generic_GCC::GCCInstallationDetector::ScanGCCForMultilibs(
const llvm::Triple &TargetTriple, const ArgList &Args,
StringRef Path, bool NeedsBiarchSuffix) {
@@ -2022,9 +2143,11 @@ bool Generic_GCC::GCCInstallationDetector::ScanGCCForMultilibs(
if (isArmOrThumbArch(TargetArch) && TargetTriple.isAndroid()) {
// It should also work without multilibs in a simplified toolchain.
findAndroidArmMultilibs(D, TargetTriple, Path, Args, Detected);
- } else if (tools::isMipsArch(TargetArch)) {
+ } else if (TargetTriple.isMIPS()) {
if (!findMIPSMultilibs(D, TargetTriple, Path, Args, Detected))
return false;
+ } else if (isRISCV(TargetArch)) {
+ findRISCVMultilibs(D, TargetTriple, Path, Args, Detected);
} else if (!findBiarchMultilibs(D, TargetTriple, Path, Args,
NeedsBiarchSuffix, Detected)) {
return false;
@@ -2041,12 +2164,6 @@ void Generic_GCC::GCCInstallationDetector::ScanLibDirForGCCTriple(
const llvm::Triple &TargetTriple, const ArgList &Args,
const std::string &LibDir, StringRef CandidateTriple,
bool NeedsBiarchSuffix) {
- if (TargetTriple.getOS() == llvm::Triple::Solaris) {
- scanLibDirForGCCTripleSolaris(TargetTriple, Args, LibDir, CandidateTriple,
- NeedsBiarchSuffix);
- return;
- }
-
llvm::Triple::ArchType TargetArch = TargetTriple.getArch();
// Locations relative to the system lib directory where GCC's triple-specific
// directories might reside.
@@ -2059,31 +2176,33 @@ void Generic_GCC::GCCInstallationDetector::ScanLibDirForGCCTriple(
// Whether this library suffix is relevant for the triple.
bool Active;
} Suffixes[] = {
- // This is the normal place.
- {"gcc/" + CandidateTriple.str(), "../..", true},
-
- // Debian puts cross-compilers in gcc-cross.
- {"gcc-cross/" + CandidateTriple.str(), "../..", true},
-
- // The Freescale PPC SDK has the gcc libraries in
- // <sysroot>/usr/lib/<triple>/x.y.z so have a look there as well. Only do
- // this on Freescale triples, though, since some systems put a *lot* of
- // files in that location, not just GCC installation data.
- {CandidateTriple.str(), "..",
- TargetTriple.getVendor() == llvm::Triple::Freescale},
-
- // Natively multiarch systems sometimes put the GCC triple-specific
- // directory within their multiarch lib directory, resulting in the
- // triple appearing twice.
- {CandidateTriple.str() + "/gcc/" + CandidateTriple.str(), "../../..", true},
-
- // Deal with cases (on Ubuntu) where the system architecture could be i386
- // but the GCC target architecture could be (say) i686.
- // FIXME: It may be worthwhile to generalize this and look for a second
- // triple.
- {"i386-linux-gnu/gcc/" + CandidateTriple.str(), "../../..",
- TargetArch == llvm::Triple::x86}
- };
+ // This is the normal place.
+ {"gcc/" + CandidateTriple.str(), "../..", true},
+
+ // Debian puts cross-compilers in gcc-cross.
+ {"gcc-cross/" + CandidateTriple.str(), "../..",
+ TargetTriple.getOS() != llvm::Triple::Solaris},
+
+ // The Freescale PPC SDK has the gcc libraries in
+ // <sysroot>/usr/lib/<triple>/x.y.z so have a look there as well. Only do
+ // this on Freescale triples, though, since some systems put a *lot* of
+ // files in that location, not just GCC installation data.
+ {CandidateTriple.str(), "..",
+ TargetTriple.getVendor() == llvm::Triple::Freescale},
+
+ // Natively multiarch systems sometimes put the GCC triple-specific
+ // directory within their multiarch lib directory, resulting in the
+ // triple appearing twice.
+ {CandidateTriple.str() + "/gcc/" + CandidateTriple.str(), "../../..",
+ TargetTriple.getOS() != llvm::Triple::Solaris},
+
+ // Deal with cases (on Ubuntu) where the system architecture could be i386
+ // but the GCC target architecture could be (say) i686.
+ // FIXME: It may be worthwhile to generalize this and look for a second
+ // triple.
+ {"i386-linux-gnu/gcc/" + CandidateTriple.str(), "../../..",
+ (TargetArch == llvm::Triple::x86 &&
+ TargetTriple.getOS() != llvm::Triple::Solaris)}};
for (auto &Suffix : Suffixes) {
if (!Suffix.Active)
@@ -2121,6 +2240,22 @@ void Generic_GCC::GCCInstallationDetector::ScanLibDirForGCCTriple(
}
}
+bool Generic_GCC::GCCInstallationDetector::ScanGentooConfigs(
+ const llvm::Triple &TargetTriple, const ArgList &Args,
+ const SmallVectorImpl<StringRef> &CandidateTriples,
+ const SmallVectorImpl<StringRef> &CandidateBiarchTriples) {
+ for (StringRef CandidateTriple : CandidateTriples) {
+ if (ScanGentooGccConfig(TargetTriple, Args, CandidateTriple))
+ return true;
+ }
+
+ for (StringRef CandidateTriple : CandidateBiarchTriples) {
+ if (ScanGentooGccConfig(TargetTriple, Args, CandidateTriple, true))
+ return true;
+ }
+ return false;
+}
+
bool Generic_GCC::GCCInstallationDetector::ScanGentooGccConfig(
const llvm::Triple &TargetTriple, const ArgList &Args,
StringRef CandidateTriple, bool NeedsBiarchSuffix) {
@@ -2133,23 +2268,53 @@ bool Generic_GCC::GCCInstallationDetector::ScanGentooGccConfig(
for (StringRef Line : Lines) {
Line = Line.trim();
// CURRENT=triple-version
- if (Line.consume_front("CURRENT=")) {
- const std::pair<StringRef, StringRef> ActiveVersion =
- Line.rsplit('-');
- // Note: Strictly speaking, we should be reading
- // /etc/env.d/gcc/${CURRENT} now. However, the file doesn't
- // contain anything new or especially useful to us.
- const std::string GentooPath = D.SysRoot + "/usr/lib/gcc/" +
- ActiveVersion.first.str() + "/" +
- ActiveVersion.second.str();
+ if (!Line.consume_front("CURRENT="))
+ continue;
+ // Process the config file pointed to by CURRENT.
+ llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> ConfigFile =
+ D.getVFS().getBufferForFile(D.SysRoot + "/etc/env.d/gcc/" +
+ Line.str());
+ std::pair<StringRef, StringRef> ActiveVersion = Line.rsplit('-');
+ // List of paths to scan for libraries.
+ SmallVector<StringRef, 4> GentooScanPaths;
+ // Scan the Config file to find installed GCC libraries path.
+ // Typical content of the GCC config file:
+ // LDPATH="/usr/lib/gcc/x86_64-pc-linux-gnu/4.9.x:/usr/lib/gcc/
+ // (continued from previous line) x86_64-pc-linux-gnu/4.9.x/32"
+ // MANPATH="/usr/share/gcc-data/x86_64-pc-linux-gnu/4.9.x/man"
+ // INFOPATH="/usr/share/gcc-data/x86_64-pc-linux-gnu/4.9.x/info"
+ // STDCXX_INCDIR="/usr/lib/gcc/x86_64-pc-linux-gnu/4.9.x/include/g++-v4"
+ // We are looking for the paths listed in LDPATH=... .
+ if (ConfigFile) {
+ SmallVector<StringRef, 2> ConfigLines;
+ ConfigFile.get()->getBuffer().split(ConfigLines, "\n");
+ for (StringRef ConfLine : ConfigLines) {
+ ConfLine = ConfLine.trim();
+ if (ConfLine.consume_front("LDPATH=")) {
+ // Drop '"' from front and back if present.
+ ConfLine.consume_back("\"");
+ ConfLine.consume_front("\"");
+ // Get all paths sperated by ':'
+ ConfLine.split(GentooScanPaths, ':', -1, /*AllowEmpty*/ false);
+ }
+ }
+ }
+ // Test the path based on the version in /etc/env.d/gcc/config-{tuple}.
+ std::string basePath = "/usr/lib/gcc/" + ActiveVersion.first.str() + "/"
+ + ActiveVersion.second.str();
+ GentooScanPaths.push_back(StringRef(basePath));
+
+ // Scan all paths for GCC libraries.
+ for (const auto &GentooScanPath : GentooScanPaths) {
+ std::string GentooPath = D.SysRoot + std::string(GentooScanPath);
if (D.getVFS().exists(GentooPath + "/crtbegin.o")) {
if (!ScanGCCForMultilibs(TargetTriple, Args, GentooPath,
NeedsBiarchSuffix))
- return false;
+ continue;
Version = GCCVersion::Parse(ActiveVersion.second);
GCCInstallPath = GentooPath;
- GCCParentLibPath = GentooPath + "/../../..";
+ GCCParentLibPath = GentooPath + std::string("/../../..");
GCCTriple.setTriple(ActiveVersion.first);
IsValid = true;
return true;
@@ -2240,17 +2405,21 @@ bool Generic_GCC::IsIntegratedAssemblerDefault() const {
case llvm::Triple::ppc:
case llvm::Triple::ppc64:
case llvm::Triple::ppc64le:
+ case llvm::Triple::riscv32:
+ case llvm::Triple::riscv64:
case llvm::Triple::systemz:
case llvm::Triple::mips:
case llvm::Triple::mipsel:
return true;
case llvm::Triple::mips64:
case llvm::Triple::mips64el:
- // Enabled for Debian and Android mips64/mipsel, as they can precisely
- // identify the ABI in use (Debian) or only use N64 for MIPS64 (Android).
- // Other targets are unable to distinguish N32 from N64.
+ // Enabled for Debian, Android, FreeBSD and OpenBSD mips64/mipsel, as they
+ // can precisely identify the ABI in use (Debian) or only use N64 for MIPS64
+ // (Android). Other targets are unable to distinguish N32 from N64.
if (getTriple().getEnvironment() == llvm::Triple::GNUABI64 ||
- getTriple().isAndroid())
+ getTriple().isAndroid() ||
+ getTriple().isOSFreeBSD() ||
+ getTriple().isOSOpenBSD())
return true;
return false;
default:
@@ -2265,12 +2434,9 @@ void Generic_GCC::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
return;
switch (GetCXXStdlibType(DriverArgs)) {
- case ToolChain::CST_Libcxx: {
- std::string Path = findLibCxxIncludePath();
- if (!Path.empty())
- addSystemInclude(DriverArgs, CC1Args, Path);
+ case ToolChain::CST_Libcxx:
+ addLibCxxIncludePaths(DriverArgs, CC1Args);
break;
- }
case ToolChain::CST_Libstdcxx:
addLibStdCxxIncludePaths(DriverArgs, CC1Args);
@@ -2278,9 +2444,12 @@ void Generic_GCC::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
}
}
-std::string Generic_GCC::findLibCxxIncludePath() const {
+void
+Generic_GCC::addLibCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const {
// FIXME: The Linux behavior would probaby be a better approach here.
- return getDriver().SysRoot + "/usr/include/c++/v1";
+ addSystemInclude(DriverArgs, CC1Args,
+ getDriver().SysRoot + "/usr/include/c++/v1");
}
void
@@ -2290,7 +2459,7 @@ Generic_GCC::addLibStdCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
// FIXME: If we have a valid GCCInstallation, use it.
}
-/// \brief Helper to add the variant paths of a libstdc++ installation.
+/// Helper to add the variant paths of a libstdc++ installation.
bool Generic_GCC::addLibStdCXXIncludePaths(
Twine Base, Twine Suffix, StringRef GCCTriple, StringRef GCCMultiarchTriple,
StringRef TargetMultiarchTriple, Twine IncludeSuffix,
@@ -2375,6 +2544,8 @@ void Generic_ELF::addClangTargetOptions(const ArgList &DriverArgs,
bool UseInitArrayDefault =
getTriple().getArch() == llvm::Triple::aarch64 ||
getTriple().getArch() == llvm::Triple::aarch64_be ||
+ (getTriple().getOS() == llvm::Triple::FreeBSD &&
+ getTriple().getOSMajorVersion() >= 12) ||
(getTriple().getOS() == llvm::Triple::Linux &&
((!GCCInstallation.isValid() || !V.isOlderThan(4, 7, 0)) ||
getTriple().isAndroid())) ||
diff --git a/lib/Driver/ToolChains/Gnu.h b/lib/Driver/ToolChains/Gnu.h
index f29342b95a07..e8e74e4d80fd 100644
--- a/lib/Driver/ToolChains/Gnu.h
+++ b/lib/Driver/ToolChains/Gnu.h
@@ -36,7 +36,7 @@ bool findMIPSMultilibs(const Driver &D, const llvm::Triple &TargetTriple,
namespace tools {
-/// \brief Base class for all GNU tools that provide the same behavior when
+/// Base class for all GNU tools that provide the same behavior when
/// it comes to response files support
class LLVM_LIBRARY_VISIBILITY GnuTool : public Tool {
virtual void anchor();
@@ -139,7 +139,7 @@ namespace toolchains {
/// command line options.
class LLVM_LIBRARY_VISIBILITY Generic_GCC : public ToolChain {
public:
- /// \brief Struct to store and manipulate GCC versions.
+ /// Struct to store and manipulate GCC versions.
///
/// We rely on assumptions about the form and structure of GCC version
/// numbers: they consist of at most three '.'-separated components, and each
@@ -155,16 +155,16 @@ public:
/// in the way that (for example) Debian's version format does. If that ever
/// becomes necessary, it can be added.
struct GCCVersion {
- /// \brief The unparsed text of the version.
+ /// The unparsed text of the version.
std::string Text;
- /// \brief The parsed major, minor, and patch numbers.
+ /// The parsed major, minor, and patch numbers.
int Major, Minor, Patch;
- /// \brief The text of the parsed major, and major+minor versions.
+ /// The text of the parsed major, and major+minor versions.
std::string MajorStr, MinorStr;
- /// \brief Any textual suffix on the patch number.
+ /// Any textual suffix on the patch number.
std::string PatchSuffix;
static GCCVersion Parse(StringRef VersionText);
@@ -178,7 +178,7 @@ public:
bool operator>=(const GCCVersion &RHS) const { return !(*this < RHS); }
};
- /// \brief This is a class to find a viable GCC installation for Clang to
+ /// This is a class to find a viable GCC installation for Clang to
/// use.
///
/// This class tries to find a GCC installation on the system, and report
@@ -213,32 +213,32 @@ public:
void init(const llvm::Triple &TargetTriple, const llvm::opt::ArgList &Args,
ArrayRef<std::string> ExtraTripleAliases = None);
- /// \brief Check whether we detected a valid GCC install.
+ /// Check whether we detected a valid GCC install.
bool isValid() const { return IsValid; }
- /// \brief Get the GCC triple for the detected install.
+ /// Get the GCC triple for the detected install.
const llvm::Triple &getTriple() const { return GCCTriple; }
- /// \brief Get the detected GCC installation path.
+ /// Get the detected GCC installation path.
StringRef getInstallPath() const { return GCCInstallPath; }
- /// \brief Get the detected GCC parent lib path.
+ /// Get the detected GCC parent lib path.
StringRef getParentLibPath() const { return GCCParentLibPath; }
- /// \brief Get the detected Multilib
+ /// Get the detected Multilib
const Multilib &getMultilib() const { return SelectedMultilib; }
- /// \brief Get the whole MultilibSet
+ /// Get the whole MultilibSet
const MultilibSet &getMultilibs() const { return Multilibs; }
/// Get the biarch sibling multilib (if it exists).
/// \return true iff such a sibling exists
bool getBiarchSibling(Multilib &M) const;
- /// \brief Get the detected GCC version string.
+ /// Get the detected GCC version string.
const GCCVersion &getVersion() const { return Version; }
- /// \brief Print information about the detected GCC installation.
+ /// Print information about the detected GCC installation.
void print(raw_ostream &OS) const;
private:
@@ -250,6 +250,10 @@ public:
SmallVectorImpl<StringRef> &BiarchLibDirs,
SmallVectorImpl<StringRef> &BiarchTripleAliases);
+ void AddDefaultGCCPrefixes(const llvm::Triple &TargetTriple,
+ SmallVectorImpl<std::string> &Prefixes,
+ StringRef SysRoot);
+
bool ScanGCCForMultilibs(const llvm::Triple &TargetTriple,
const llvm::opt::ArgList &Args,
StringRef Path,
@@ -261,11 +265,10 @@ public:
StringRef CandidateTriple,
bool NeedsBiarchSuffix = false);
- void scanLibDirForGCCTripleSolaris(const llvm::Triple &TargetArch,
- const llvm::opt::ArgList &Args,
- const std::string &LibDir,
- StringRef CandidateTriple,
- bool NeedsBiarchSuffix = false);
+ bool ScanGentooConfigs(const llvm::Triple &TargetTriple,
+ const llvm::opt::ArgList &Args,
+ const SmallVectorImpl<StringRef> &CandidateTriples,
+ const SmallVectorImpl<StringRef> &BiarchTriples);
bool ScanGentooGccConfig(const llvm::Triple &TargetTriple,
const llvm::opt::ArgList &Args,
@@ -301,19 +304,21 @@ protected:
/// \name ToolChain Implementation Helper Functions
/// @{
- /// \brief Check whether the target triple's architecture is 64-bits.
+ /// Check whether the target triple's architecture is 64-bits.
bool isTarget64Bit() const { return getTriple().isArch64Bit(); }
- /// \brief Check whether the target triple's architecture is 32-bits.
+ /// Check whether the target triple's architecture is 32-bits.
bool isTarget32Bit() const { return getTriple().isArch32Bit(); }
- // FIXME: This should be final, but the Solaris tool chain does weird
- // things we can't easily represent.
+ // FIXME: This should be final, but the CrossWindows toolchain does weird
+ // things that can't be easily generalized.
void AddClangCXXStdlibIncludeArgs(
const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
- virtual std::string findLibCxxIncludePath() const;
+ virtual void
+ addLibCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const;
virtual void
addLibStdCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const;
diff --git a/lib/Driver/ToolChains/HIP.cpp b/lib/Driver/ToolChains/HIP.cpp
new file mode 100644
index 000000000000..03acf45a9b31
--- /dev/null
+++ b/lib/Driver/ToolChains/HIP.cpp
@@ -0,0 +1,350 @@
+//===--- HIP.cpp - HIP Tool and ToolChain Implementations -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "HIP.h"
+#include "CommonArgs.h"
+#include "InputInfo.h"
+#include "clang/Basic/Cuda.h"
+#include "clang/Driver/Compilation.h"
+#include "clang/Driver/Driver.h"
+#include "clang/Driver/DriverDiagnostic.h"
+#include "clang/Driver/Options.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Path.h"
+
+using namespace clang::driver;
+using namespace clang::driver::toolchains;
+using namespace clang::driver::tools;
+using namespace clang;
+using namespace llvm::opt;
+
+namespace {
+
+static void addBCLib(Compilation &C, const ArgList &Args,
+ ArgStringList &CmdArgs, ArgStringList LibraryPaths,
+ StringRef BCName) {
+ StringRef FullName;
+ for (std::string LibraryPath : LibraryPaths) {
+ SmallString<128> Path(LibraryPath);
+ llvm::sys::path::append(Path, BCName);
+ FullName = Path;
+ if (llvm::sys::fs::exists(FullName)) {
+ CmdArgs.push_back(Args.MakeArgString(FullName));
+ return;
+ }
+ }
+ C.getDriver().Diag(diag::err_drv_no_such_file) << BCName;
+}
+
+} // namespace
+
+const char *AMDGCN::Linker::constructLLVMLinkCommand(
+ Compilation &C, const JobAction &JA, const InputInfoList &Inputs,
+ const ArgList &Args, StringRef SubArchName,
+ StringRef OutputFilePrefix) const {
+ ArgStringList CmdArgs;
+ // Add the input bc's created by compile step.
+ for (const auto &II : Inputs)
+ CmdArgs.push_back(II.getFilename());
+
+ ArgStringList LibraryPaths;
+
+ // Find in --hip-device-lib-path and HIP_LIBRARY_PATH.
+ for (auto Path : Args.getAllArgValues(options::OPT_hip_device_lib_path_EQ))
+ LibraryPaths.push_back(Args.MakeArgString(Path));
+
+ addDirectoryList(Args, LibraryPaths, "-L", "HIP_DEVICE_LIB_PATH");
+
+ llvm::SmallVector<std::string, 10> BCLibs;
+
+ // Add bitcode library in --hip-device-lib.
+ for (auto Lib : Args.getAllArgValues(options::OPT_hip_device_lib_EQ)) {
+ BCLibs.push_back(Args.MakeArgString(Lib));
+ }
+
+ // If --hip-device-lib is not set, add the default bitcode libraries.
+ if (BCLibs.empty()) {
+ // Get the bc lib file name for ISA version. For example,
+ // gfx803 => oclc_isa_version_803.amdgcn.bc.
+ std::string ISAVerBC =
+ "oclc_isa_version_" + SubArchName.drop_front(3).str() + ".amdgcn.bc";
+
+ llvm::StringRef FlushDenormalControlBC;
+ if (Args.hasArg(options::OPT_fcuda_flush_denormals_to_zero))
+ FlushDenormalControlBC = "oclc_daz_opt_on.amdgcn.bc";
+ else
+ FlushDenormalControlBC = "oclc_daz_opt_off.amdgcn.bc";
+
+ BCLibs.append({"opencl.amdgcn.bc",
+ "ocml.amdgcn.bc", "ockl.amdgcn.bc", "irif.amdgcn.bc",
+ "oclc_finite_only_off.amdgcn.bc",
+ FlushDenormalControlBC,
+ "oclc_correctly_rounded_sqrt_on.amdgcn.bc",
+ "oclc_unsafe_math_off.amdgcn.bc", ISAVerBC});
+ }
+ for (auto Lib : BCLibs)
+ addBCLib(C, Args, CmdArgs, LibraryPaths, Lib);
+
+ // Add an intermediate output file.
+ CmdArgs.push_back("-o");
+ std::string TmpName =
+ C.getDriver().GetTemporaryPath(OutputFilePrefix.str() + "-linked", "bc");
+ const char *OutputFileName =
+ C.addTempFile(C.getArgs().MakeArgString(TmpName));
+ CmdArgs.push_back(OutputFileName);
+ SmallString<128> ExecPath(C.getDriver().Dir);
+ llvm::sys::path::append(ExecPath, "llvm-link");
+ const char *Exec = Args.MakeArgString(ExecPath);
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+ return OutputFileName;
+}
+
+const char *AMDGCN::Linker::constructOptCommand(
+ Compilation &C, const JobAction &JA, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &Args, llvm::StringRef SubArchName,
+ llvm::StringRef OutputFilePrefix, const char *InputFileName) const {
+ // Construct opt command.
+ ArgStringList OptArgs;
+ // The input to opt is the output from llvm-link.
+ OptArgs.push_back(InputFileName);
+ // Pass optimization arg to opt.
+ if (Arg *A = Args.getLastArg(options::OPT_O_Group)) {
+ StringRef OOpt = "3";
+ if (A->getOption().matches(options::OPT_O4) ||
+ A->getOption().matches(options::OPT_Ofast))
+ OOpt = "3";
+ else if (A->getOption().matches(options::OPT_O0))
+ OOpt = "0";
+ else if (A->getOption().matches(options::OPT_O)) {
+ // -Os, -Oz, and -O(anything else) map to -O2
+ OOpt = llvm::StringSwitch<const char *>(A->getValue())
+ .Case("1", "1")
+ .Case("2", "2")
+ .Case("3", "3")
+ .Case("s", "2")
+ .Case("z", "2")
+ .Default("2");
+ }
+ OptArgs.push_back(Args.MakeArgString("-O" + OOpt));
+ }
+ OptArgs.push_back("-mtriple=amdgcn-amd-amdhsa");
+ OptArgs.push_back(Args.MakeArgString("-mcpu=" + SubArchName));
+ OptArgs.push_back("-o");
+ std::string TmpFileName = C.getDriver().GetTemporaryPath(
+ OutputFilePrefix.str() + "-optimized", "bc");
+ const char *OutputFileName =
+ C.addTempFile(C.getArgs().MakeArgString(TmpFileName));
+ OptArgs.push_back(OutputFileName);
+ SmallString<128> OptPath(C.getDriver().Dir);
+ llvm::sys::path::append(OptPath, "opt");
+ const char *OptExec = Args.MakeArgString(OptPath);
+ C.addCommand(llvm::make_unique<Command>(JA, *this, OptExec, OptArgs, Inputs));
+ return OutputFileName;
+}
+
+const char *AMDGCN::Linker::constructLlcCommand(
+ Compilation &C, const JobAction &JA, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &Args, llvm::StringRef SubArchName,
+ llvm::StringRef OutputFilePrefix, const char *InputFileName) const {
+ // Construct llc command.
+ ArgStringList LlcArgs{InputFileName, "-mtriple=amdgcn-amd-amdhsa",
+ "-filetype=obj",
+ Args.MakeArgString("-mcpu=" + SubArchName), "-o"};
+ std::string LlcOutputFileName =
+ C.getDriver().GetTemporaryPath(OutputFilePrefix, "o");
+ const char *LlcOutputFile =
+ C.addTempFile(C.getArgs().MakeArgString(LlcOutputFileName));
+ LlcArgs.push_back(LlcOutputFile);
+ SmallString<128> LlcPath(C.getDriver().Dir);
+ llvm::sys::path::append(LlcPath, "llc");
+ const char *Llc = Args.MakeArgString(LlcPath);
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Llc, LlcArgs, Inputs));
+ return LlcOutputFile;
+}
+
+void AMDGCN::Linker::constructLldCommand(Compilation &C, const JobAction &JA,
+ const InputInfoList &Inputs,
+ const InputInfo &Output,
+ const llvm::opt::ArgList &Args,
+ const char *InputFileName) const {
+ // Construct lld command.
+ // The output from ld.lld is an HSA code object file.
+ ArgStringList LldArgs{"-flavor", "gnu", "--no-undefined",
+ "-shared", "-o", Output.getFilename(),
+ InputFileName};
+ SmallString<128> LldPath(C.getDriver().Dir);
+ llvm::sys::path::append(LldPath, "lld");
+ const char *Lld = Args.MakeArgString(LldPath);
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Lld, LldArgs, Inputs));
+}
+
+// For amdgcn the inputs of the linker job are device bitcode and output is
+// object file. It calls llvm-link, opt, llc, then lld steps.
+void AMDGCN::Linker::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+
+ assert(getToolChain().getTriple().getArch() == llvm::Triple::amdgcn &&
+ "Unsupported target");
+
+ std::string SubArchName = JA.getOffloadingArch();
+ assert(StringRef(SubArchName).startswith("gfx") && "Unsupported sub arch");
+
+ // Prefix for temporary file name.
+ std::string Prefix =
+ llvm::sys::path::stem(Inputs[0].getFilename()).str() + "-" + SubArchName;
+
+ // Each command outputs different files.
+ const char *LLVMLinkCommand =
+ constructLLVMLinkCommand(C, JA, Inputs, Args, SubArchName, Prefix);
+ const char *OptCommand = constructOptCommand(C, JA, Inputs, Args, SubArchName,
+ Prefix, LLVMLinkCommand);
+ const char *LlcCommand =
+ constructLlcCommand(C, JA, Inputs, Args, SubArchName, Prefix, OptCommand);
+ constructLldCommand(C, JA, Inputs, Output, Args, LlcCommand);
+}
+
+HIPToolChain::HIPToolChain(const Driver &D, const llvm::Triple &Triple,
+ const ToolChain &HostTC, const ArgList &Args)
+ : ToolChain(D, Triple, Args), HostTC(HostTC) {
+ // Lookup binaries into the driver directory, this is used to
+ // discover the clang-offload-bundler executable.
+ getProgramPaths().push_back(getDriver().Dir);
+}
+
+void HIPToolChain::addClangTargetOptions(
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args,
+ Action::OffloadKind DeviceOffloadingKind) const {
+ HostTC.addClangTargetOptions(DriverArgs, CC1Args, DeviceOffloadingKind);
+
+ StringRef GpuArch = DriverArgs.getLastArgValue(options::OPT_march_EQ);
+ assert(!GpuArch.empty() && "Must have an explicit GPU arch.");
+ (void) GpuArch;
+ assert(DeviceOffloadingKind == Action::OFK_HIP &&
+ "Only HIP offloading kinds are supported for GPUs.");
+
+ CC1Args.push_back("-target-cpu");
+ CC1Args.push_back(DriverArgs.MakeArgStringRef(GpuArch));
+ CC1Args.push_back("-fcuda-is-device");
+
+ if (DriverArgs.hasFlag(options::OPT_fcuda_flush_denormals_to_zero,
+ options::OPT_fno_cuda_flush_denormals_to_zero, false))
+ CC1Args.push_back("-fcuda-flush-denormals-to-zero");
+
+ if (DriverArgs.hasFlag(options::OPT_fcuda_approx_transcendentals,
+ options::OPT_fno_cuda_approx_transcendentals, false))
+ CC1Args.push_back("-fcuda-approx-transcendentals");
+
+ if (DriverArgs.hasFlag(options::OPT_fcuda_rdc, options::OPT_fno_cuda_rdc,
+ false))
+ CC1Args.push_back("-fcuda-rdc");
+}
+
+llvm::opt::DerivedArgList *
+HIPToolChain::TranslateArgs(const llvm::opt::DerivedArgList &Args,
+ StringRef BoundArch,
+ Action::OffloadKind DeviceOffloadKind) const {
+ DerivedArgList *DAL =
+ HostTC.TranslateArgs(Args, BoundArch, DeviceOffloadKind);
+ if (!DAL)
+ DAL = new DerivedArgList(Args.getBaseArgs());
+
+ const OptTable &Opts = getDriver().getOpts();
+
+ for (Arg *A : Args) {
+ if (A->getOption().matches(options::OPT_Xarch__)) {
+ // Skip this argument unless the architecture matches BoundArch.
+ if (BoundArch.empty() || A->getValue(0) != BoundArch)
+ continue;
+
+ unsigned Index = Args.getBaseArgs().MakeIndex(A->getValue(1));
+ unsigned Prev = Index;
+ std::unique_ptr<Arg> XarchArg(Opts.ParseOneArg(Args, Index));
+
+ // If the argument parsing failed or more than one argument was
+ // consumed, the -Xarch_ argument's parameter tried to consume
+ // extra arguments. Emit an error and ignore.
+ //
+ // We also want to disallow any options which would alter the
+ // driver behavior; that isn't going to work in our model. We
+ // use isDriverOption() as an approximation, although things
+ // like -O4 are going to slip through.
+ if (!XarchArg || Index > Prev + 1) {
+ getDriver().Diag(diag::err_drv_invalid_Xarch_argument_with_args)
+ << A->getAsString(Args);
+ continue;
+ } else if (XarchArg->getOption().hasFlag(options::DriverOption)) {
+ getDriver().Diag(diag::err_drv_invalid_Xarch_argument_isdriver)
+ << A->getAsString(Args);
+ continue;
+ }
+ XarchArg->setBaseArg(A);
+ A = XarchArg.release();
+ DAL->AddSynthesizedArg(A);
+ }
+ DAL->append(A);
+ }
+
+ if (!BoundArch.empty()) {
+ DAL->eraseArg(options::OPT_march_EQ);
+ DAL->AddJoinedArg(nullptr, Opts.getOption(options::OPT_march_EQ), BoundArch);
+ }
+
+ return DAL;
+}
+
+Tool *HIPToolChain::buildLinker() const {
+ assert(getTriple().getArch() == llvm::Triple::amdgcn);
+ return new tools::AMDGCN::Linker(*this);
+}
+
+void HIPToolChain::addClangWarningOptions(ArgStringList &CC1Args) const {
+ HostTC.addClangWarningOptions(CC1Args);
+}
+
+ToolChain::CXXStdlibType
+HIPToolChain::GetCXXStdlibType(const ArgList &Args) const {
+ return HostTC.GetCXXStdlibType(Args);
+}
+
+void HIPToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ HostTC.AddClangSystemIncludeArgs(DriverArgs, CC1Args);
+}
+
+void HIPToolChain::AddClangCXXStdlibIncludeArgs(const ArgList &Args,
+ ArgStringList &CC1Args) const {
+ HostTC.AddClangCXXStdlibIncludeArgs(Args, CC1Args);
+}
+
+void HIPToolChain::AddIAMCUIncludeArgs(const ArgList &Args,
+ ArgStringList &CC1Args) const {
+ HostTC.AddIAMCUIncludeArgs(Args, CC1Args);
+}
+
+SanitizerMask HIPToolChain::getSupportedSanitizers() const {
+ // The HIPToolChain only supports sanitizers in the sense that it allows
+ // sanitizer arguments on the command line if they are supported by the host
+ // toolchain. The HIPToolChain will actually ignore any command line
+ // arguments for any of these "supported" sanitizers. That means that no
+ // sanitization of device code is actually supported at this time.
+ //
+ // This behavior is necessary because the host and device toolchains
+ // invocations often share the command line, so the device toolchain must
+ // tolerate flags meant only for the host toolchain.
+ return HostTC.getSupportedSanitizers();
+}
+
+VersionTuple HIPToolChain::computeMSVCVersion(const Driver *D,
+ const ArgList &Args) const {
+ return HostTC.computeMSVCVersion(D, Args);
+}
diff --git a/lib/Driver/ToolChains/HIP.h b/lib/Driver/ToolChains/HIP.h
new file mode 100644
index 000000000000..40c9128e2f59
--- /dev/null
+++ b/lib/Driver/ToolChains/HIP.h
@@ -0,0 +1,123 @@
+//===--- HIP.h - HIP ToolChain Implementations ------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_HIP_H
+#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_HIP_H
+
+#include "clang/Driver/ToolChain.h"
+#include "clang/Driver/Tool.h"
+
+namespace clang {
+namespace driver {
+
+namespace tools {
+
+namespace AMDGCN {
+// Runs llvm-link/opt/llc/lld, which links multiple LLVM bitcode, together with
+// device library, then compiles it to ISA in a shared object.
+class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
+public:
+ Linker(const ToolChain &TC) : Tool("AMDGCN::Linker", "amdgcn-link", TC) {}
+
+ bool hasIntegratedCPP() const override { return false; }
+
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+
+private:
+ /// \return llvm-link output file name.
+ const char *constructLLVMLinkCommand(Compilation &C, const JobAction &JA,
+ const InputInfoList &Inputs,
+ const llvm::opt::ArgList &Args,
+ llvm::StringRef SubArchName,
+ llvm::StringRef OutputFilePrefix) const;
+
+ /// \return opt output file name.
+ const char *constructOptCommand(Compilation &C, const JobAction &JA,
+ const InputInfoList &Inputs,
+ const llvm::opt::ArgList &Args,
+ llvm::StringRef SubArchName,
+ llvm::StringRef OutputFilePrefix,
+ const char *InputFileName) const;
+
+ /// \return llc output file name.
+ const char *constructLlcCommand(Compilation &C, const JobAction &JA,
+ const InputInfoList &Inputs,
+ const llvm::opt::ArgList &Args,
+ llvm::StringRef SubArchName,
+ llvm::StringRef OutputFilePrefix,
+ const char *InputFileName) const;
+
+ void constructLldCommand(Compilation &C, const JobAction &JA,
+ const InputInfoList &Inputs, const InputInfo &Output,
+ const llvm::opt::ArgList &Args,
+ const char *InputFileName) const;
+};
+
+} // end namespace AMDGCN
+} // end namespace tools
+
+namespace toolchains {
+
+class LLVM_LIBRARY_VISIBILITY HIPToolChain : public ToolChain {
+public:
+ HIPToolChain(const Driver &D, const llvm::Triple &Triple,
+ const ToolChain &HostTC, const llvm::opt::ArgList &Args);
+
+ const llvm::Triple *getAuxTriple() const override {
+ return &HostTC.getTriple();
+ }
+
+ llvm::opt::DerivedArgList *
+ TranslateArgs(const llvm::opt::DerivedArgList &Args, StringRef BoundArch,
+ Action::OffloadKind DeviceOffloadKind) const override;
+ void addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args,
+ Action::OffloadKind DeviceOffloadKind) const override;
+
+ bool useIntegratedAs() const override { return true; }
+ bool isCrossCompiling() const override { return true; }
+ bool isPICDefault() const override { return false; }
+ bool isPIEDefault() const override { return false; }
+ bool isPICDefaultForced() const override { return false; }
+ bool SupportsProfiling() const override { return false; }
+ bool IsMathErrnoDefault() const override { return false; }
+
+ void addClangWarningOptions(llvm::opt::ArgStringList &CC1Args) const override;
+ CXXStdlibType GetCXXStdlibType(const llvm::opt::ArgList &Args) const override;
+ void
+ AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+ void AddClangCXXStdlibIncludeArgs(
+ const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CC1Args) const override;
+ void AddIAMCUIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+
+ SanitizerMask getSupportedSanitizers() const override;
+
+ VersionTuple
+ computeMSVCVersion(const Driver *D,
+ const llvm::opt::ArgList &Args) const override;
+
+ unsigned GetDefaultDwarfVersion() const override { return 2; }
+
+ const ToolChain &HostTC;
+
+protected:
+ Tool *buildLinker() const override;
+};
+
+} // end namespace toolchains
+} // end namespace driver
+} // end namespace clang
+
+#endif // LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_HIP_H
diff --git a/lib/Driver/ToolChains/Haiku.cpp b/lib/Driver/ToolChains/Haiku.cpp
index 284d269a0c1b..12461ec9c4bd 100644
--- a/lib/Driver/ToolChains/Haiku.cpp
+++ b/lib/Driver/ToolChains/Haiku.cpp
@@ -22,8 +22,10 @@ Haiku::Haiku(const Driver &D, const llvm::Triple& Triple, const ArgList &Args)
}
-std::string Haiku::findLibCxxIncludePath() const {
- return getDriver().SysRoot + "/system/develop/headers/c++/v1";
+void Haiku::addLibCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const {
+ addSystemInclude(DriverArgs, CC1Args,
+ getDriver().SysRoot + "/system/develop/headers/c++/v1");
}
void Haiku::addLibStdCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
diff --git a/lib/Driver/ToolChains/Haiku.h b/lib/Driver/ToolChains/Haiku.h
index 8b5b48e59023..a12a48e00976 100644
--- a/lib/Driver/ToolChains/Haiku.h
+++ b/lib/Driver/ToolChains/Haiku.h
@@ -27,7 +27,9 @@ public:
return getTriple().getArch() == llvm::Triple::x86_64;
}
- std::string findLibCxxIncludePath() const override;
+ void addLibCxxIncludePaths(
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
void addLibStdCxxIncludePaths(
const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
diff --git a/lib/Driver/ToolChains/Hexagon.cpp b/lib/Driver/ToolChains/Hexagon.cpp
index 2debf0e2de54..c2b27b6d9ac6 100644
--- a/lib/Driver/ToolChains/Hexagon.cpp
+++ b/lib/Driver/ToolChains/Hexagon.cpp
@@ -11,7 +11,6 @@
#include "InputInfo.h"
#include "CommonArgs.h"
#include "clang/Basic/VirtualFileSystem.h"
-#include "clang/Config/config.h"
#include "clang/Driver/Compilation.h"
#include "clang/Driver/Driver.h"
#include "clang/Driver/DriverDiagnostic.h"
@@ -37,16 +36,10 @@ static StringRef getDefaultHvxLength(StringRef Cpu) {
}
static void handleHVXWarnings(const Driver &D, const ArgList &Args) {
- // Handle deprecated HVX double warnings.
- if (Arg *A = Args.getLastArg(options::OPT_mhexagon_hvx_double))
- D.Diag(diag::warn_drv_deprecated_arg)
- << A->getAsString(Args) << "-mhvx-length=128B";
- if (Arg *A = Args.getLastArg(options::OPT_mno_hexagon_hvx_double))
- D.Diag(diag::warn_drv_deprecated_arg) << A->getAsString(Args) << "-mno-hvx";
// Handle the unsupported values passed to mhvx-length.
if (Arg *A = Args.getLastArg(options::OPT_mhexagon_hvx_length_EQ)) {
StringRef Val = A->getValue();
- if (Val != "64B" && Val != "128B")
+ if (!Val.equals_lower("64b") && !Val.equals_lower("128b"))
D.Diag(diag::err_drv_unsupported_option_argument)
<< A->getOption().getName() << Val;
}
@@ -63,14 +56,13 @@ static void handleHVXTargetFeatures(const Driver &D, const ArgList &Args,
StringRef HVXFeature, HVXLength;
StringRef Cpu(toolchains::HexagonToolChain::GetTargetCPUVersion(Args));
- // Handle -mhvx, -mhvx=, -mno-hvx, -mno-hvx-double.
- if (Arg *A = Args.getLastArg(
- options::OPT_mno_hexagon_hvx, options::OPT_mno_hexagon_hvx_double,
- options::OPT_mhexagon_hvx, options::OPT_mhexagon_hvx_EQ)) {
- if (A->getOption().matches(options::OPT_mno_hexagon_hvx) ||
- A->getOption().matches(options::OPT_mno_hexagon_hvx_double)) {
+ // Handle -mhvx, -mhvx=, -mno-hvx.
+ if (Arg *A = Args.getLastArg(options::OPT_mno_hexagon_hvx,
+ options::OPT_mhexagon_hvx,
+ options::OPT_mhexagon_hvx_EQ)) {
+ if (A->getOption().matches(options::OPT_mno_hexagon_hvx))
return;
- } else if (A->getOption().matches(options::OPT_mhexagon_hvx_EQ)) {
+ if (A->getOption().matches(options::OPT_mhexagon_hvx_EQ)) {
HasHVX = true;
HVXFeature = Cpu = A->getValue();
HVXFeature = Args.MakeArgString(llvm::Twine("+hvx") + HVXFeature.lower());
@@ -81,16 +73,13 @@ static void handleHVXTargetFeatures(const Driver &D, const ArgList &Args,
Features.push_back(HVXFeature);
}
- // Handle -mhvx-length=, -mhvx-double.
- if (Arg *A = Args.getLastArg(options::OPT_mhexagon_hvx_length_EQ,
- options::OPT_mhexagon_hvx_double)) {
+ // Handle -mhvx-length=.
+ if (Arg *A = Args.getLastArg(options::OPT_mhexagon_hvx_length_EQ)) {
// These falgs are valid only if HVX in enabled.
if (!HasHVX)
D.Diag(diag::err_drv_invalid_hvx_length);
else if (A->getOption().matches(options::OPT_mhexagon_hvx_length_EQ))
HVXLength = A->getValue();
- else if (A->getOption().matches(options::OPT_mhexagon_hvx_double))
- HVXLength = "128b";
}
// Default hvx-length based on Cpu.
else if (HasHVX)
@@ -118,8 +107,11 @@ void hexagon::getHexagonTargetFeatures(const Driver &D, const ArgList &Args,
Features.push_back(UseLongCalls ? "+long-calls" : "-long-calls");
- bool HasHVX(false);
+ bool HasHVX = false;
handleHVXTargetFeatures(D, Args, Features, HasHVX);
+
+ if (HexagonToolChain::isAutoHVXEnabled(Args) && !HasHVX)
+ D.Diag(diag::warn_drv_vectorize_needs_hvx);
}
// Hexagon tools start.
@@ -521,11 +513,19 @@ unsigned HexagonToolChain::getOptimizationLevel(
void HexagonToolChain::addClangTargetOptions(const ArgList &DriverArgs,
ArgStringList &CC1Args,
Action::OffloadKind) const {
- if (DriverArgs.hasArg(options::OPT_ffp_contract))
- return;
- unsigned OptLevel = getOptimizationLevel(DriverArgs);
- if (OptLevel >= 3)
- CC1Args.push_back("-ffp-contract=fast");
+ if (!DriverArgs.hasArg(options::OPT_ffp_contract)) {
+ unsigned OptLevel = getOptimizationLevel(DriverArgs);
+ if (OptLevel >= 3)
+ CC1Args.push_back("-ffp-contract=fast");
+ }
+ if (DriverArgs.hasArg(options::OPT_ffixed_r19)) {
+ CC1Args.push_back("-target-feature");
+ CC1Args.push_back("+reserved-r19");
+ }
+ if (isAutoHVXEnabled(DriverArgs)) {
+ CC1Args.push_back("-mllvm");
+ CC1Args.push_back("-hexagon-autohvx");
+ }
}
void HexagonToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
@@ -563,6 +563,13 @@ HexagonToolChain::GetCXXStdlibType(const ArgList &Args) const {
return ToolChain::CST_Libstdcxx;
}
+bool HexagonToolChain::isAutoHVXEnabled(const llvm::opt::ArgList &Args) {
+ if (Arg *A = Args.getLastArg(options::OPT_fvectorize,
+ options::OPT_fno_vectorize))
+ return A->getOption().matches(options::OPT_fvectorize);
+ return false;
+}
+
//
// Returns the default CPU for Hexagon. This is the default compilation target
// if no Hexagon processor is selected at the command-line.
diff --git a/lib/Driver/ToolChains/Hexagon.h b/lib/Driver/ToolChains/Hexagon.h
index 229a08c76dfb..e43b8a5b8800 100644
--- a/lib/Driver/ToolChains/Hexagon.h
+++ b/lib/Driver/ToolChains/Hexagon.h
@@ -94,6 +94,7 @@ public:
void getHexagonLibraryPaths(const llvm::opt::ArgList &Args,
ToolChain::path_list &LibPaths) const;
+ static bool isAutoHVXEnabled(const llvm::opt::ArgList &Args);
static const StringRef GetDefaultCPU();
static const StringRef GetTargetCPUVersion(const llvm::opt::ArgList &Args);
diff --git a/lib/Driver/ToolChains/Lanai.h b/lib/Driver/ToolChains/Lanai.h
index 4ce658dc7775..bb92bfaea7e2 100644
--- a/lib/Driver/ToolChains/Lanai.h
+++ b/lib/Driver/ToolChains/Lanai.h
@@ -24,7 +24,9 @@ public:
: Generic_ELF(D, Triple, Args) {}
// No support for finding a C++ standard library yet.
- std::string findLibCxxIncludePath() const override { return ""; }
+ void addLibCxxIncludePaths(
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override {}
void addLibStdCxxIncludePaths(
const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override {}
diff --git a/lib/Driver/ToolChains/Linux.cpp b/lib/Driver/ToolChains/Linux.cpp
index 1301cdf114ae..d27f994d32ab 100644
--- a/lib/Driver/ToolChains/Linux.cpp
+++ b/lib/Driver/ToolChains/Linux.cpp
@@ -11,6 +11,7 @@
#include "Arch/ARM.h"
#include "Arch/Mips.h"
#include "Arch/PPC.h"
+#include "Arch/RISCV.h"
#include "CommonArgs.h"
#include "clang/Basic/VirtualFileSystem.h"
#include "clang/Config/config.h"
@@ -21,6 +22,7 @@
#include "llvm/Option/ArgList.h"
#include "llvm/ProfileData/InstrProf.h"
#include "llvm/Support/Path.h"
+#include "llvm/Support/ScopedPrinter.h"
#include <system_error>
using namespace clang::driver;
@@ -30,7 +32,7 @@ using namespace llvm::opt;
using tools::addPathIfExists;
-/// \brief Get our best guess at the multiarch triple for a target.
+/// Get our best guess at the multiarch triple for a target.
///
/// Debian-based systems are starting to use a multiarch setup where they use
/// a target-triple directory in the library and header search paths.
@@ -41,6 +43,7 @@ static std::string getMultiarchTriple(const Driver &D,
StringRef SysRoot) {
llvm::Triple::EnvironmentType TargetEnvironment =
TargetTriple.getEnvironment();
+ bool IsAndroid = TargetTriple.isAndroid();
// For most architectures, just use whatever we have rather than trying to be
// clever.
@@ -54,7 +57,9 @@ static std::string getMultiarchTriple(const Driver &D,
// regardless of what the actual target triple is.
case llvm::Triple::arm:
case llvm::Triple::thumb:
- if (TargetEnvironment == llvm::Triple::GNUEABIHF) {
+ if (IsAndroid) {
+ return "arm-linux-androideabi";
+ } else if (TargetEnvironment == llvm::Triple::GNUEABIHF) {
if (D.getVFS().exists(SysRoot + "/lib/arm-linux-gnueabihf"))
return "arm-linux-gnueabihf";
} else {
@@ -73,16 +78,22 @@ static std::string getMultiarchTriple(const Driver &D,
}
break;
case llvm::Triple::x86:
+ if (IsAndroid)
+ return "i686-linux-android";
if (D.getVFS().exists(SysRoot + "/lib/i386-linux-gnu"))
return "i386-linux-gnu";
break;
case llvm::Triple::x86_64:
+ if (IsAndroid)
+ return "x86_64-linux-android";
// We don't want this for x32, otherwise it will match x86_64 libs
if (TargetEnvironment != llvm::Triple::GNUX32 &&
D.getVFS().exists(SysRoot + "/lib/x86_64-linux-gnu"))
return "x86_64-linux-gnu";
break;
case llvm::Triple::aarch64:
+ if (IsAndroid)
+ return "aarch64-linux-android";
if (D.getVFS().exists(SysRoot + "/lib/aarch64-linux-gnu"))
return "aarch64-linux-gnu";
break;
@@ -95,6 +106,8 @@ static std::string getMultiarchTriple(const Driver &D,
return "mips-linux-gnu";
break;
case llvm::Triple::mipsel:
+ if (IsAndroid)
+ return "mipsel-linux-android";
if (D.getVFS().exists(SysRoot + "/lib/mipsel-linux-gnu"))
return "mipsel-linux-gnu";
break;
@@ -105,6 +118,8 @@ static std::string getMultiarchTriple(const Driver &D,
return "mips64-linux-gnuabi64";
break;
case llvm::Triple::mips64el:
+ if (IsAndroid)
+ return "mips64el-linux-android";
if (D.getVFS().exists(SysRoot + "/lib/mips64el-linux-gnu"))
return "mips64el-linux-gnu";
if (D.getVFS().exists(SysRoot + "/lib/mips64el-linux-gnuabi64"))
@@ -141,7 +156,7 @@ static std::string getMultiarchTriple(const Driver &D,
}
static StringRef getOSLibDir(const llvm::Triple &Triple, const ArgList &Args) {
- if (tools::isMipsArch(Triple.getArch())) {
+ if (Triple.isMIPS()) {
if (Triple.isAndroid()) {
StringRef CPUName;
StringRef ABIName;
@@ -176,6 +191,9 @@ static StringRef getOSLibDir(const llvm::Triple &Triple, const ArgList &Args) {
Triple.getEnvironment() == llvm::Triple::GNUX32)
return "libx32";
+ if (Triple.getArch() == llvm::Triple::riscv32)
+ return "lib32";
+
return Triple.isArch32Bit() ? "lib" : "lib64";
}
@@ -220,12 +238,23 @@ Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
ExtraOpts.push_back("relro");
}
+ if (GCCInstallation.getParentLibPath().find("opt/rh/devtoolset") !=
+ StringRef::npos)
+ // With devtoolset on RHEL, we want to add a bin directory that is relative
+ // to the detected gcc install, because if we are using devtoolset gcc then
+ // we want to use other tools from devtoolset (e.g. ld) instead of the
+ // standard system tools.
+ PPaths.push_back(Twine(GCCInstallation.getParentLibPath() +
+ "/../bin").str());
+
if (Arch == llvm::Triple::arm || Arch == llvm::Triple::thumb)
ExtraOpts.push_back("-X");
const bool IsAndroid = Triple.isAndroid();
- const bool IsMips = tools::isMipsArch(Arch);
+ const bool IsMips = Triple.isMIPS();
const bool IsHexagon = Arch == llvm::Triple::hexagon;
+ const bool IsRISCV =
+ Arch == llvm::Triple::riscv32 || Arch == llvm::Triple::riscv64;
if (IsMips && !SysRoot.empty())
ExtraOpts.push_back("--sysroot=" + SysRoot);
@@ -331,8 +360,28 @@ Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
addPathIfExists(D, SysRoot + "/lib/" + MultiarchTriple, Paths);
addPathIfExists(D, SysRoot + "/lib/../" + OSLibDir, Paths);
+
+ if (IsAndroid) {
+ // Android sysroots contain a library directory for each supported OS
+ // version as well as some unversioned libraries in the usual multiarch
+ // directory.
+ unsigned Major;
+ unsigned Minor;
+ unsigned Micro;
+ Triple.getEnvironmentVersion(Major, Minor, Micro);
+ addPathIfExists(D,
+ SysRoot + "/usr/lib/" + MultiarchTriple + "/" +
+ llvm::to_string(Major),
+ Paths);
+ }
+
addPathIfExists(D, SysRoot + "/usr/lib/" + MultiarchTriple, Paths);
addPathIfExists(D, SysRoot + "/usr/lib/../" + OSLibDir, Paths);
+ if (IsRISCV) {
+ StringRef ABIName = tools::riscv::getRISCVABI(Args, Triple);
+ addPathIfExists(D, SysRoot + "/" + OSLibDir + "/" + ABIName, Paths);
+ addPathIfExists(D, SysRoot + "/usr/" + OSLibDir + "/" + ABIName, Paths);
+ }
// Try walking via the GCC triple path in case of biarch or multiarch GCC
// installations with strange symlinks.
@@ -389,7 +438,16 @@ std::string Linux::computeSysRoot() const {
if (!getDriver().SysRoot.empty())
return getDriver().SysRoot;
- if (!GCCInstallation.isValid() || !tools::isMipsArch(getTriple().getArch()))
+ if (getTriple().isAndroid()) {
+ // Android toolchains typically include a sysroot at ../sysroot relative to
+ // the clang binary.
+ const StringRef ClangDir = getDriver().getInstalledDir();
+ std::string AndroidSysRootPath = (ClangDir + "/../sysroot").str();
+ if (getVFS().exists(AndroidSysRootPath))
+ return AndroidSysRootPath;
+ }
+
+ if (!GCCInstallation.isValid() || !getTriple().isMIPS())
return std::string();
// Standalone MIPS toolchains use different names for sysroot folder
@@ -481,8 +539,6 @@ std::string Linux::getDynamicLinker(const ArgList &Args) const {
case llvm::Triple::mipsel:
case llvm::Triple::mips64:
case llvm::Triple::mips64el: {
- bool LE = (Triple.getArch() == llvm::Triple::mipsel) ||
- (Triple.getArch() == llvm::Triple::mips64el);
bool IsNaN2008 = tools::mips::isNaN2008(Args, Triple);
LibDir = "lib" + tools::mips::getMipsABILibSuffix(Args, Triple);
@@ -491,7 +547,8 @@ std::string Linux::getDynamicLinker(const ArgList &Args) const {
Loader = IsNaN2008 ? "ld-uClibc-mipsn8.so.0" : "ld-uClibc.so.0";
else if (!Triple.hasEnvironment() &&
Triple.getVendor() == llvm::Triple::VendorType::MipsTechnologies)
- Loader = LE ? "ld-musl-mipsel.so.1" : "ld-musl-mips.so.1";
+ Loader =
+ Triple.isLittleEndian() ? "ld-musl-mipsel.so.1" : "ld-musl-mips.so.1";
else
Loader = IsNaN2008 ? "ld-linux-mipsn8.so.1" : "ld.so.1";
@@ -511,6 +568,18 @@ std::string Linux::getDynamicLinker(const ArgList &Args) const {
Loader =
(tools::ppc::hasPPCAbiArg(Args, "elfv1")) ? "ld64.so.1" : "ld64.so.2";
break;
+ case llvm::Triple::riscv32: {
+ StringRef ABIName = tools::riscv::getRISCVABI(Args, Triple);
+ LibDir = "lib";
+ Loader = ("ld-linux-riscv32-" + ABIName + ".so.1").str();
+ break;
+ }
+ case llvm::Triple::riscv64: {
+ StringRef ABIName = tools::riscv::getRISCVABI(Args, Triple);
+ LibDir = "lib";
+ Loader = ("ld-linux-riscv64-" + ABIName + ".so.1").str();
+ break;
+ }
case llvm::Triple::sparc:
case llvm::Triple::sparcel:
LibDir = "lib";
@@ -694,6 +763,14 @@ void Linux::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
default:
break;
}
+
+ const std::string AndroidMultiarchIncludeDir =
+ std::string("/usr/include/") +
+ getMultiarchTriple(D, getTriple(), SysRoot);
+ const StringRef AndroidMultiarchIncludeDirs[] = {AndroidMultiarchIncludeDir};
+ if (getTriple().isAndroid())
+ MultiarchIncludeDirs = AndroidMultiarchIncludeDirs;
+
for (StringRef Dir : MultiarchIncludeDirs) {
if (D.getVFS().exists(SysRoot + Dir)) {
addExternCSystemInclude(DriverArgs, CC1Args, SysRoot + Dir);
@@ -731,21 +808,24 @@ static std::string DetectLibcxxIncludePath(StringRef base) {
return MaxVersion ? (base + "/" + MaxVersionString).str() : "";
}
-std::string Linux::findLibCxxIncludePath() const {
+void Linux::addLibCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const {
+ const std::string& SysRoot = computeSysRoot();
const std::string LibCXXIncludePathCandidates[] = {
+ DetectLibcxxIncludePath(getDriver().ResourceDir + "/include/c++"),
DetectLibcxxIncludePath(getDriver().Dir + "/../include/c++"),
// If this is a development, non-installed, clang, libcxx will
// not be found at ../include/c++ but it likely to be found at
// one of the following two locations:
- DetectLibcxxIncludePath(getDriver().SysRoot + "/usr/local/include/c++"),
- DetectLibcxxIncludePath(getDriver().SysRoot + "/usr/include/c++") };
+ DetectLibcxxIncludePath(SysRoot + "/usr/local/include/c++"),
+ DetectLibcxxIncludePath(SysRoot + "/usr/include/c++") };
for (const auto &IncludePath : LibCXXIncludePathCandidates) {
if (IncludePath.empty() || !getVFS().exists(IncludePath))
continue;
// Use the first candidate that exists.
- return IncludePath;
+ addSystemInclude(DriverArgs, CC1Args, IncludePath);
+ return;
}
- return "";
}
void Linux::addLibStdCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
@@ -823,8 +903,8 @@ bool Linux::isPIEDefault() const {
SanitizerMask Linux::getSupportedSanitizers() const {
const bool IsX86 = getTriple().getArch() == llvm::Triple::x86;
const bool IsX86_64 = getTriple().getArch() == llvm::Triple::x86_64;
- const bool IsMIPS64 = getTriple().getArch() == llvm::Triple::mips64 ||
- getTriple().getArch() == llvm::Triple::mips64el;
+ const bool IsMIPS = getTriple().isMIPS32();
+ const bool IsMIPS64 = getTriple().isMIPS64();
const bool IsPowerPC64 = getTriple().getArch() == llvm::Triple::ppc64 ||
getTriple().getArch() == llvm::Triple::ppc64le;
const bool IsAArch64 = getTriple().getArch() == llvm::Triple::aarch64 ||
@@ -838,6 +918,7 @@ SanitizerMask Linux::getSupportedSanitizers() const {
Res |= SanitizerKind::Fuzzer;
Res |= SanitizerKind::FuzzerNoLink;
Res |= SanitizerKind::KernelAddress;
+ Res |= SanitizerKind::Memory;
Res |= SanitizerKind::Vptr;
Res |= SanitizerKind::SafeStack;
if (IsX86_64 || IsMIPS64 || IsAArch64)
@@ -846,16 +927,17 @@ SanitizerMask Linux::getSupportedSanitizers() const {
Res |= SanitizerKind::Leak;
if (IsX86_64 || IsMIPS64 || IsAArch64 || IsPowerPC64)
Res |= SanitizerKind::Thread;
- if (IsX86_64 || IsMIPS64 || IsPowerPC64 || IsAArch64)
- Res |= SanitizerKind::Memory;
if (IsX86_64 || IsMIPS64)
Res |= SanitizerKind::Efficiency;
if (IsX86 || IsX86_64)
Res |= SanitizerKind::Function;
- if (IsX86_64 || IsMIPS64 || IsAArch64 || IsX86 || IsArmArch)
+ if (IsX86_64 || IsMIPS64 || IsAArch64 || IsX86 || IsMIPS || IsArmArch ||
+ IsPowerPC64)
Res |= SanitizerKind::Scudo;
- if (IsAArch64)
+ if (IsX86_64 || IsAArch64) {
Res |= SanitizerKind::HWAddress;
+ Res |= SanitizerKind::KernelHWAddress;
+ }
return Res;
}
diff --git a/lib/Driver/ToolChains/Linux.h b/lib/Driver/ToolChains/Linux.h
index 9778c1832ccc..22dbbecf6b96 100644
--- a/lib/Driver/ToolChains/Linux.h
+++ b/lib/Driver/ToolChains/Linux.h
@@ -27,7 +27,9 @@ public:
void
AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
- std::string findLibCxxIncludePath() const override;
+ void addLibCxxIncludePaths(
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
void addLibStdCxxIncludePaths(
const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
diff --git a/lib/Driver/ToolChains/MSVC.cpp b/lib/Driver/ToolChains/MSVC.cpp
index ae41ee9e22cf..d062c6abc955 100644
--- a/lib/Driver/ToolChains/MSVC.cpp
+++ b/lib/Driver/ToolChains/MSVC.cpp
@@ -19,7 +19,6 @@
#include "clang/Driver/SanitizerArgs.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringSwitch.h"
-#include "llvm/Config/llvm-config.h"
#include "llvm/Option/Arg.h"
#include "llvm/Option/ArgList.h"
#include "llvm/Support/ConvertUTF.h"
@@ -31,13 +30,7 @@
#include "llvm/Support/Process.h"
#include <cstdio>
-// Include the necessary headers to interface with the Windows registry and
-// environment.
-#if defined(LLVM_ON_WIN32)
-#define USE_WIN32
-#endif
-
-#ifdef USE_WIN32
+#ifdef _WIN32
#define WIN32_LEAN_AND_MEAN
#define NOGDI
#ifndef NOMINMAX
@@ -476,7 +469,10 @@ void visualstudio::Linker::ConstructJob(Compilation &C, const JobAction &JA,
// their own link.exe which may come first.
linkPath = FindVisualStudioExecutable(TC, "link.exe");
-#ifdef USE_WIN32
+ if (!TC.FoundMSVCInstall() && !llvm::sys::fs::can_execute(linkPath))
+ C.getDriver().Diag(clang::diag::warn_drv_msvc_not_found);
+
+#ifdef _WIN32
// When cross-compiling with VS2017 or newer, link.exe expects to have
// its containing bin directory at the top of PATH, followed by the
// native target bin directory.
@@ -691,8 +687,6 @@ MSVCToolChain::MSVCToolChain(const Driver &D, const llvm::Triple &Triple,
}
Tool *MSVCToolChain::buildLinker() const {
- if (VCToolChainPath.empty())
- getDriver().Diag(clang::diag::warn_drv_msvc_not_found);
return new tools::visualstudio::Linker(*this);
}
@@ -752,6 +746,8 @@ static const char *llvmArchToWindowsSDKArch(llvm::Triple::ArchType Arch) {
return "x64";
case ArchType::arm:
return "arm";
+ case ArchType::aarch64:
+ return "arm64";
default:
return "";
}
@@ -769,6 +765,8 @@ static const char *llvmArchToLegacyVCArch(llvm::Triple::ArchType Arch) {
return "amd64";
case ArchType::arm:
return "arm";
+ case ArchType::aarch64:
+ return "arm64";
default:
return "";
}
@@ -784,6 +782,8 @@ static const char *llvmArchToDevDivInternalArch(llvm::Triple::ArchType Arch) {
return "amd64";
case ArchType::arm:
return "arm";
+ case ArchType::aarch64:
+ return "arm64";
default:
return "";
}
@@ -835,7 +835,7 @@ MSVCToolChain::getSubDirectoryPath(SubDirectoryType Type,
return Path.str();
}
-#ifdef USE_WIN32
+#ifdef _WIN32
static bool readFullStringValue(HKEY hkey, const char *valueName,
std::string &value) {
std::wstring WideValueName;
@@ -869,7 +869,7 @@ static bool readFullStringValue(HKEY hkey, const char *valueName,
}
#endif
-/// \brief Read registry string.
+/// Read registry string.
/// This also supports a means to look for high-versioned keys by use
/// of a $VERSION placeholder in the key path.
/// $VERSION in the key path is a placeholder for the version number,
@@ -879,7 +879,7 @@ static bool readFullStringValue(HKEY hkey, const char *valueName,
/// characters are compared. This function only searches HKLM.
static bool getSystemRegistryString(const char *keyPath, const char *valueName,
std::string &value, std::string *phValue) {
-#ifndef USE_WIN32
+#ifndef _WIN32
return false;
#else
HKEY hRootKey = HKEY_LOCAL_MACHINE;
@@ -961,7 +961,7 @@ static bool getSystemRegistryString(const char *keyPath, const char *valueName,
}
}
return returnValue;
-#endif // USE_WIN32
+#endif // _WIN32
}
// Find the most recent version of Universal CRT or Windows 10 SDK.
@@ -992,7 +992,7 @@ static bool getWindows10SDKVersionFromPath(const std::string &SDKPath,
return !SDKVersion.empty();
}
-/// \brief Get Windows SDK installation directory.
+/// Get Windows SDK installation directory.
static bool getWindowsSDKDir(std::string &Path, int &Major,
std::string &WindowsSDKIncludeVersion,
std::string &WindowsSDKLibVersion) {
@@ -1122,7 +1122,7 @@ static VersionTuple getMSVCVersionFromTriple(const llvm::Triple &Triple) {
static VersionTuple getMSVCVersionFromExe(const std::string &BinDir) {
VersionTuple Version;
-#ifdef USE_WIN32
+#ifdef _WIN32
SmallString<128> ClExe(BinDir);
llvm::sys::path::append(ClExe, "cl.exe");
@@ -1236,7 +1236,7 @@ void MSVCToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
return;
}
-#if defined(LLVM_ON_WIN32)
+#if defined(_WIN32)
// As a fallback, select default install paths.
// FIXME: Don't guess drives and paths like this on Windows.
const StringRef Paths[] = {
@@ -1298,6 +1298,7 @@ MSVCToolChain::ComputeEffectiveClangTriple(const ArgList &Args,
SanitizerMask MSVCToolChain::getSupportedSanitizers() const {
SanitizerMask Res = ToolChain::getSupportedSanitizers();
Res |= SanitizerKind::Address;
+ Res &= ~SanitizerKind::CFIMFCall;
return Res;
}
diff --git a/lib/Driver/ToolChains/MSVC.h b/lib/Driver/ToolChains/MSVC.h
index 854f88a36fd2..1db589ec9706 100644
--- a/lib/Driver/ToolChains/MSVC.h
+++ b/lib/Driver/ToolChains/MSVC.h
@@ -110,7 +110,7 @@ public:
llvm::opt::ArgStringList &CC1Args) const override;
bool getWindowsSDKLibraryPath(std::string &path) const;
- /// \brief Check if Universal CRT should be used if available
+ /// Check if Universal CRT should be used if available
bool getUniversalCRTLibraryPath(std::string &path) const;
bool useUniversalCRT() const;
VersionTuple
@@ -123,6 +123,8 @@ public:
void printVerboseInfo(raw_ostream &OS) const override;
+ bool FoundMSVCInstall() const { return !VCToolChainPath.empty(); }
+
protected:
void AddSystemIncludeWithSubfolder(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args,
diff --git a/lib/Driver/ToolChains/MinGW.cpp b/lib/Driver/ToolChains/MinGW.cpp
index 572ea803f2dc..a88e00f0c8e8 100644
--- a/lib/Driver/ToolChains/MinGW.cpp
+++ b/lib/Driver/ToolChains/MinGW.cpp
@@ -83,7 +83,7 @@ void tools::MinGW::Linker::AddLibGCC(const ArgList &Args,
CmdArgs.push_back("-lmoldname");
CmdArgs.push_back("-lmingwex");
for (auto Lib : Args.getAllArgValues(options::OPT_l))
- if (StringRef(Lib).startswith("msvcr") || Lib == "ucrtbase")
+ if (StringRef(Lib).startswith("msvcr") || StringRef(Lib).startswith("ucrt"))
return;
CmdArgs.push_back("-lmsvcrt");
}
@@ -141,22 +141,21 @@ void tools::MinGW::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("console");
}
+ if (Args.hasArg(options::OPT_mdll))
+ CmdArgs.push_back("--dll");
+ else if (Args.hasArg(options::OPT_shared))
+ CmdArgs.push_back("--shared");
if (Args.hasArg(options::OPT_static))
CmdArgs.push_back("-Bstatic");
- else {
- if (Args.hasArg(options::OPT_mdll))
- CmdArgs.push_back("--dll");
- else if (Args.hasArg(options::OPT_shared))
- CmdArgs.push_back("--shared");
+ else
CmdArgs.push_back("-Bdynamic");
- if (Args.hasArg(options::OPT_mdll) || Args.hasArg(options::OPT_shared)) {
- CmdArgs.push_back("-e");
- if (TC.getArch() == llvm::Triple::x86)
- CmdArgs.push_back("_DllMainCRTStartup@12");
- else
- CmdArgs.push_back("DllMainCRTStartup");
- CmdArgs.push_back("--enable-auto-image-base");
- }
+ if (Args.hasArg(options::OPT_mdll) || Args.hasArg(options::OPT_shared)) {
+ CmdArgs.push_back("-e");
+ if (TC.getArch() == llvm::Triple::x86)
+ CmdArgs.push_back("_DllMainCRTStartup@12");
+ else
+ CmdArgs.push_back("DllMainCRTStartup");
+ CmdArgs.push_back("--enable-auto-image-base");
}
CmdArgs.push_back("-o");
@@ -202,6 +201,14 @@ void tools::MinGW::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-Bdynamic");
}
+ bool HasWindowsApp = false;
+ for (auto Lib : Args.getAllArgValues(options::OPT_l)) {
+ if (Lib == "windowsapp") {
+ HasWindowsApp = true;
+ break;
+ }
+ }
+
if (!Args.hasArg(options::OPT_nostdlib)) {
if (!Args.hasArg(options::OPT_nodefaultlibs)) {
if (Args.hasArg(options::OPT_static))
@@ -224,15 +231,19 @@ void tools::MinGW::Linker::ConstructJob(Compilation &C, const JobAction &JA,
if (Args.hasArg(options::OPT_pthread))
CmdArgs.push_back("-lpthread");
- // add system libraries
- if (Args.hasArg(options::OPT_mwindows)) {
- CmdArgs.push_back("-lgdi32");
- CmdArgs.push_back("-lcomdlg32");
+ if (!HasWindowsApp) {
+ // Add system libraries. If linking to libwindowsapp.a, that import
+ // library replaces all these and we shouldn't accidentally try to
+ // link to the normal desktop mode dlls.
+ if (Args.hasArg(options::OPT_mwindows)) {
+ CmdArgs.push_back("-lgdi32");
+ CmdArgs.push_back("-lcomdlg32");
+ }
+ CmdArgs.push_back("-ladvapi32");
+ CmdArgs.push_back("-lshell32");
+ CmdArgs.push_back("-luser32");
+ CmdArgs.push_back("-lkernel32");
}
- CmdArgs.push_back("-ladvapi32");
- CmdArgs.push_back("-lshell32");
- CmdArgs.push_back("-luser32");
- CmdArgs.push_back("-lkernel32");
if (Args.hasArg(options::OPT_static))
CmdArgs.push_back("--end-group");
@@ -276,7 +287,8 @@ void toolchains::MinGW::findGccLibDir() {
Archs.emplace_back(getTriple().getArchName());
Archs[0] += "-w64-mingw32";
Archs.emplace_back("mingw32");
- Arch = Archs[0].str();
+ if (Arch.empty())
+ Arch = Archs[0].str();
// lib: Arch Linux, Ubuntu, Windows
// lib64: openSUSE Linux
for (StringRef CandidateLib : {"lib", "lib64"}) {
@@ -303,6 +315,23 @@ llvm::ErrorOr<std::string> toolchains::MinGW::findGcc() {
return make_error_code(std::errc::no_such_file_or_directory);
}
+llvm::ErrorOr<std::string> toolchains::MinGW::findClangRelativeSysroot() {
+ llvm::SmallVector<llvm::SmallString<32>, 2> Subdirs;
+ Subdirs.emplace_back(getTriple().str());
+ Subdirs.emplace_back(getTriple().getArchName());
+ Subdirs[1] += "-w64-mingw32";
+ StringRef ClangRoot =
+ llvm::sys::path::parent_path(getDriver().getInstalledDir());
+ StringRef Sep = llvm::sys::path::get_separator();
+ for (StringRef CandidateSubdir : Subdirs) {
+ if (llvm::sys::fs::is_directory(ClangRoot + Sep + CandidateSubdir)) {
+ Arch = CandidateSubdir;
+ return (ClangRoot + Sep + CandidateSubdir).str();
+ }
+ }
+ return make_error_code(std::errc::no_such_file_or_directory);
+}
+
toolchains::MinGW::MinGW(const Driver &D, const llvm::Triple &Triple,
const ArgList &Args)
: ToolChain(D, Triple, Args), CudaInstallation(D, Triple, Args) {
@@ -310,6 +339,10 @@ toolchains::MinGW::MinGW(const Driver &D, const llvm::Triple &Triple,
if (getDriver().SysRoot.size())
Base = getDriver().SysRoot;
+ // Look for <clang-bin>/../<triplet>; if found, use <clang-bin>/.. as the
+ // base as it could still be a base for a gcc setup with libgcc.
+ else if (llvm::ErrorOr<std::string> TargetSubdir = findClangRelativeSysroot())
+ Base = llvm::sys::path::parent_path(TargetSubdir.get());
else if (llvm::ErrorOr<std::string> GPPName = findGcc())
Base = llvm::sys::path::parent_path(
llvm::sys::path::parent_path(GPPName.get()));
@@ -454,11 +487,14 @@ void toolchains::MinGW::AddClangCXXStdlibIncludeArgs(
DriverArgs.hasArg(options::OPT_nostdincxx))
return;
+ StringRef Slash = llvm::sys::path::get_separator();
+
switch (GetCXXStdlibType(DriverArgs)) {
case ToolChain::CST_Libcxx:
+ addSystemInclude(DriverArgs, CC1Args, Base + Arch + Slash + "include" +
+ Slash + "c++" + Slash + "v1");
addSystemInclude(DriverArgs, CC1Args,
- Base + "include" + llvm::sys::path::get_separator() +
- "c++" + llvm::sys::path::get_separator() + "v1");
+ Base + "include" + Slash + "c++" + Slash + "v1");
break;
case ToolChain::CST_Libstdcxx:
@@ -473,7 +509,7 @@ void toolchains::MinGW::AddClangCXXStdlibIncludeArgs(
llvm::sys::path::append(CppIncludeBases[3], "include", "c++");
for (auto &CppIncludeBase : CppIncludeBases) {
addSystemInclude(DriverArgs, CC1Args, CppIncludeBase);
- CppIncludeBase += llvm::sys::path::get_separator();
+ CppIncludeBase += Slash;
addSystemInclude(DriverArgs, CC1Args, CppIncludeBase + Arch);
addSystemInclude(DriverArgs, CC1Args, CppIncludeBase + "backward");
}
diff --git a/lib/Driver/ToolChains/MinGW.h b/lib/Driver/ToolChains/MinGW.h
index f8dbcae62756..0c3919d29f77 100644
--- a/lib/Driver/ToolChains/MinGW.h
+++ b/lib/Driver/ToolChains/MinGW.h
@@ -96,6 +96,7 @@ private:
mutable std::unique_ptr<tools::gcc::Compiler> Compiler;
void findGccLibDir();
llvm::ErrorOr<std::string> findGcc();
+ llvm::ErrorOr<std::string> findClangRelativeSysroot();
};
} // end namespace toolchains
diff --git a/lib/Driver/ToolChains/MipsLinux.cpp b/lib/Driver/ToolChains/MipsLinux.cpp
index b394208336ed..9f23996b764a 100644
--- a/lib/Driver/ToolChains/MipsLinux.cpp
+++ b/lib/Driver/ToolChains/MipsLinux.cpp
@@ -10,7 +10,6 @@
#include "MipsLinux.h"
#include "Arch/Mips.h"
#include "CommonArgs.h"
-#include "clang/Config/config.h"
#include "clang/Driver/Driver.h"
#include "clang/Driver/DriverDiagnostic.h"
#include "clang/Driver/Options.h"
@@ -94,16 +93,18 @@ MipsLLVMToolChain::GetCXXStdlibType(const ArgList &Args) const {
return ToolChain::CST_Libcxx;
}
-std::string MipsLLVMToolChain::findLibCxxIncludePath() const {
+void MipsLLVMToolChain::addLibCxxIncludePaths(
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const {
if (const auto &Callback = Multilibs.includeDirsCallback()) {
for (std::string Path : Callback(SelectedMultilib)) {
Path = getDriver().getInstalledDir() + Path + "/c++/v1";
if (llvm::sys::fs::exists(Path)) {
- return Path;
+ addSystemInclude(DriverArgs, CC1Args, Path);
+ return;
}
}
}
- return "";
}
void MipsLLVMToolChain::AddCXXStdlibLibArgs(const ArgList &Args,
diff --git a/lib/Driver/ToolChains/MipsLinux.h b/lib/Driver/ToolChains/MipsLinux.h
index fa82efbbfc8f..d4b476d883e6 100644
--- a/lib/Driver/ToolChains/MipsLinux.h
+++ b/lib/Driver/ToolChains/MipsLinux.h
@@ -31,7 +31,9 @@ public:
CXXStdlibType GetCXXStdlibType(const llvm::opt::ArgList &Args) const override;
- std::string findLibCxxIncludePath() const override;
+ void addLibCxxIncludePaths(
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
void AddCXXStdlibLibArgs(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const override;
diff --git a/lib/Driver/ToolChains/Myriad.cpp b/lib/Driver/ToolChains/Myriad.cpp
index 06079b109dd1..2b4c1d165576 100644
--- a/lib/Driver/ToolChains/Myriad.cpp
+++ b/lib/Driver/ToolChains/Myriad.cpp
@@ -107,7 +107,6 @@ void tools::SHAVE::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(
Args.MakeArgString(std::string("-i:") + A->getValue(0)));
}
- CmdArgs.push_back("-elf"); // Output format.
CmdArgs.push_back(II.getFilename());
CmdArgs.push_back(
Args.MakeArgString(std::string("-o:") + Output.getFilename()));
@@ -243,9 +242,11 @@ void MyriadToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
addSystemInclude(DriverArgs, CC1Args, getDriver().SysRoot + "/include");
}
-std::string MyriadToolChain::findLibCxxIncludePath() const {
+void MyriadToolChain::addLibCxxIncludePaths(
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const {
std::string Path(getDriver().getInstalledDir());
- return Path + "/../include/c++/v1";
+ addSystemInclude(DriverArgs, CC1Args, Path + "/../include/c++/v1");
}
void MyriadToolChain::addLibStdCxxIncludePaths(
diff --git a/lib/Driver/ToolChains/Myriad.h b/lib/Driver/ToolChains/Myriad.h
index 4c213c726219..33307c3f871a 100644
--- a/lib/Driver/ToolChains/Myriad.h
+++ b/lib/Driver/ToolChains/Myriad.h
@@ -76,7 +76,9 @@ public:
void
AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
- std::string findLibCxxIncludePath() const override;
+ void addLibCxxIncludePaths(
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
void addLibStdCxxIncludePaths(
const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
diff --git a/lib/Driver/ToolChains/NaCl.cpp b/lib/Driver/ToolChains/NaCl.cpp
index 128478d63871..89a18944c319 100644
--- a/lib/Driver/ToolChains/NaCl.cpp
+++ b/lib/Driver/ToolChains/NaCl.cpp
@@ -309,25 +309,31 @@ void NaClToolChain::AddCXXStdlibLibArgs(const ArgList &Args,
CmdArgs.push_back("-lc++");
}
-std::string NaClToolChain::findLibCxxIncludePath() const {
+void NaClToolChain::addLibCxxIncludePaths(
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const {
const Driver &D = getDriver();
SmallString<128> P(D.Dir + "/../");
switch (getTriple().getArch()) {
+ default:
+ break;
case llvm::Triple::arm:
llvm::sys::path::append(P, "arm-nacl/include/c++/v1");
- return P.str();
+ addSystemInclude(DriverArgs, CC1Args, P.str());
+ break;
case llvm::Triple::x86:
llvm::sys::path::append(P, "x86_64-nacl/include/c++/v1");
- return P.str();
+ addSystemInclude(DriverArgs, CC1Args, P.str());
+ break;
case llvm::Triple::x86_64:
llvm::sys::path::append(P, "x86_64-nacl/include/c++/v1");
- return P.str();
+ addSystemInclude(DriverArgs, CC1Args, P.str());
+ break;
case llvm::Triple::mipsel:
llvm::sys::path::append(P, "mipsel-nacl/include/c++/v1");
- return P.str();
- default:
- return "";
+ addSystemInclude(DriverArgs, CC1Args, P.str());
+ break;
}
}
diff --git a/lib/Driver/ToolChains/NaCl.h b/lib/Driver/ToolChains/NaCl.h
index 31af3a53ad3c..e0885b526d70 100644
--- a/lib/Driver/ToolChains/NaCl.h
+++ b/lib/Driver/ToolChains/NaCl.h
@@ -53,7 +53,9 @@ public:
void
AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
- std::string findLibCxxIncludePath() const override;
+ void addLibCxxIncludePaths(
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
CXXStdlibType GetCXXStdlibType(const llvm::opt::ArgList &Args) const override;
diff --git a/lib/Driver/ToolChains/NetBSD.cpp b/lib/Driver/ToolChains/NetBSD.cpp
index 0db6578f7407..02caafda1657 100644
--- a/lib/Driver/ToolChains/NetBSD.cpp
+++ b/lib/Driver/ToolChains/NetBSD.cpp
@@ -64,11 +64,10 @@ void netbsd::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-mabi");
CmdArgs.push_back(mips::getGnuCompatibleMipsABIName(ABIName).data());
- if (getToolChain().getArch() == llvm::Triple::mips ||
- getToolChain().getArch() == llvm::Triple::mips64)
- CmdArgs.push_back("-EB");
- else
+ if (getToolChain().getTriple().isLittleEndian())
CmdArgs.push_back("-EL");
+ else
+ CmdArgs.push_back("-EB");
AddAssemblerKPIC(getToolChain(), Args, CmdArgs);
break;
@@ -112,7 +111,9 @@ void netbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfoList &Inputs,
const ArgList &Args,
const char *LinkingOutput) const {
- const Driver &D = getToolChain().getDriver();
+ const toolchains::NetBSD &ToolChain =
+ static_cast<const toolchains::NetBSD &>(getToolChain());
+ const Driver &D = ToolChain.getDriver();
ArgStringList CmdArgs;
if (!D.SysRoot.empty())
@@ -121,6 +122,10 @@ void netbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("--eh-frame-hdr");
if (Args.hasArg(options::OPT_static)) {
CmdArgs.push_back("-Bstatic");
+ if (Args.hasArg(options::OPT_pie)) {
+ Args.AddAllArgs(CmdArgs, options::OPT_pie);
+ CmdArgs.push_back("--no-dynamic-linker");
+ }
} else {
if (Args.hasArg(options::OPT_rdynamic))
CmdArgs.push_back("-export-dynamic");
@@ -135,7 +140,7 @@ void netbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
// Many NetBSD architectures support more than one ABI.
// Determine the correct emulation for ld.
- switch (getToolChain().getArch()) {
+ switch (ToolChain.getArch()) {
case llvm::Triple::x86:
CmdArgs.push_back("-m");
CmdArgs.push_back("elf_i386");
@@ -143,7 +148,7 @@ void netbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
case llvm::Triple::arm:
case llvm::Triple::thumb:
CmdArgs.push_back("-m");
- switch (getToolChain().getTriple().getEnvironment()) {
+ switch (ToolChain.getTriple().getEnvironment()) {
case llvm::Triple::EABI:
case llvm::Triple::GNUEABI:
CmdArgs.push_back("armelf_nbsd_eabi");
@@ -159,9 +164,9 @@ void netbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
break;
case llvm::Triple::armeb:
case llvm::Triple::thumbeb:
- arm::appendEBLinkFlags(Args, CmdArgs, getToolChain().getEffectiveTriple());
+ arm::appendEBLinkFlags(Args, CmdArgs, ToolChain.getEffectiveTriple());
CmdArgs.push_back("-m");
- switch (getToolChain().getTriple().getEnvironment()) {
+ switch (ToolChain.getTriple().getEnvironment()) {
case llvm::Triple::EABI:
case llvm::Triple::GNUEABI:
CmdArgs.push_back("armelfb_nbsd_eabi");
@@ -179,13 +184,13 @@ void netbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
case llvm::Triple::mips64el:
if (mips::hasMipsAbiArg(Args, "32")) {
CmdArgs.push_back("-m");
- if (getToolChain().getArch() == llvm::Triple::mips64)
+ if (ToolChain.getArch() == llvm::Triple::mips64)
CmdArgs.push_back("elf32btsmip");
else
CmdArgs.push_back("elf32ltsmip");
} else if (mips::hasMipsAbiArg(Args, "64")) {
CmdArgs.push_back("-m");
- if (getToolChain().getArch() == llvm::Triple::mips64)
+ if (ToolChain.getArch() == llvm::Triple::mips64)
CmdArgs.push_back("elf64btsmip");
else
CmdArgs.push_back("elf64ltsmip");
@@ -226,16 +231,16 @@ void netbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
if (!Args.hasArg(options::OPT_shared)) {
CmdArgs.push_back(
- Args.MakeArgString(getToolChain().GetFilePath("crt0.o")));
+ Args.MakeArgString(ToolChain.GetFilePath("crt0.o")));
}
CmdArgs.push_back(
- Args.MakeArgString(getToolChain().GetFilePath("crti.o")));
+ Args.MakeArgString(ToolChain.GetFilePath("crti.o")));
if (Args.hasArg(options::OPT_shared) || Args.hasArg(options::OPT_pie)) {
CmdArgs.push_back(
- Args.MakeArgString(getToolChain().GetFilePath("crtbeginS.o")));
+ Args.MakeArgString(ToolChain.GetFilePath("crtbeginS.o")));
} else {
CmdArgs.push_back(
- Args.MakeArgString(getToolChain().GetFilePath("crtbegin.o")));
+ Args.MakeArgString(ToolChain.GetFilePath("crtbegin.o")));
}
}
@@ -248,13 +253,14 @@ void netbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddAllArgs(CmdArgs, options::OPT_r);
bool NeedsSanitizerDeps = addSanitizerRuntimes(getToolChain(), Args, CmdArgs);
+ bool NeedsXRayDeps = addXRayRuntime(ToolChain, Args, CmdArgs);
AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs, JA);
unsigned Major, Minor, Micro;
- getToolChain().getTriple().getOSVersion(Major, Minor, Micro);
+ ToolChain.getTriple().getOSVersion(Major, Minor, Micro);
bool useLibgcc = true;
if (Major >= 7 || Major == 0) {
- switch (getToolChain().getArch()) {
+ switch (ToolChain.getArch()) {
case llvm::Triple::aarch64:
case llvm::Triple::aarch64_be:
case llvm::Triple::arm:
@@ -278,12 +284,14 @@ void netbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
addOpenMPRuntime(CmdArgs, getToolChain(), Args);
if (D.CCCIsCXX()) {
- if (getToolChain().ShouldLinkCXXStdlib(Args))
- getToolChain().AddCXXStdlibLibArgs(Args, CmdArgs);
+ if (ToolChain.ShouldLinkCXXStdlib(Args))
+ ToolChain.AddCXXStdlibLibArgs(Args, CmdArgs);
CmdArgs.push_back("-lm");
}
if (NeedsSanitizerDeps)
linkSanitizerRuntimeDeps(getToolChain(), CmdArgs);
+ if (NeedsXRayDeps)
+ linkXRayRuntimeDeps(ToolChain, CmdArgs);
if (Args.hasArg(options::OPT_pthread))
CmdArgs.push_back("-lpthread");
CmdArgs.push_back("-lc");
@@ -308,16 +316,16 @@ void netbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
if (Args.hasArg(options::OPT_shared) || Args.hasArg(options::OPT_pie))
CmdArgs.push_back(
- Args.MakeArgString(getToolChain().GetFilePath("crtendS.o")));
+ Args.MakeArgString(ToolChain.GetFilePath("crtendS.o")));
else
CmdArgs.push_back(
- Args.MakeArgString(getToolChain().GetFilePath("crtend.o")));
- CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath("crtn.o")));
+ Args.MakeArgString(ToolChain.GetFilePath("crtend.o")));
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtn.o")));
}
- getToolChain().addProfileRTLibs(Args, CmdArgs);
+ ToolChain.addProfileRTLibs(Args, CmdArgs);
- const char *Exec = Args.MakeArgString(getToolChain().GetLinkerPath());
+ const char *Exec = Args.MakeArgString(ToolChain.GetLinkerPath());
C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
}
@@ -406,8 +414,10 @@ ToolChain::CXXStdlibType NetBSD::GetDefaultCXXStdlibType() const {
return ToolChain::CST_Libstdcxx;
}
-std::string NetBSD::findLibCxxIncludePath() const {
- return getDriver().SysRoot + "/usr/include/c++/";
+void NetBSD::addLibCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const {
+ addSystemInclude(DriverArgs, CC1Args,
+ getDriver().SysRoot + "/usr/include/c++/");
}
void NetBSD::addLibStdCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
diff --git a/lib/Driver/ToolChains/NetBSD.h b/lib/Driver/ToolChains/NetBSD.h
index e98df72ce65c..49e3a58d02c3 100644
--- a/lib/Driver/ToolChains/NetBSD.h
+++ b/lib/Driver/ToolChains/NetBSD.h
@@ -60,7 +60,9 @@ public:
CXXStdlibType GetDefaultCXXStdlibType() const override;
- std::string findLibCxxIncludePath() const override;
+ void addLibCxxIncludePaths(
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
void addLibStdCxxIncludePaths(
const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
diff --git a/lib/Driver/ToolChains/OpenBSD.cpp b/lib/Driver/ToolChains/OpenBSD.cpp
index fbb84a62ca89..7b98cd62bbfc 100644
--- a/lib/Driver/ToolChains/OpenBSD.cpp
+++ b/lib/Driver/ToolChains/OpenBSD.cpp
@@ -13,6 +13,7 @@
#include "CommonArgs.h"
#include "clang/Driver/Compilation.h"
#include "clang/Driver/Options.h"
+#include "clang/Driver/SanitizerArgs.h"
#include "llvm/Option/ArgList.h"
using namespace clang::driver;
@@ -67,10 +68,10 @@ void openbsd::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-mabi");
CmdArgs.push_back(mips::getGnuCompatibleMipsABIName(ABIName).data());
- if (getToolChain().getArch() == llvm::Triple::mips64)
- CmdArgs.push_back("-EB");
- else
+ if (getToolChain().getTriple().isLittleEndian())
CmdArgs.push_back("-EL");
+ else
+ CmdArgs.push_back("-EB");
AddAssemblerKPIC(getToolChain(), Args, CmdArgs);
break;
@@ -97,6 +98,8 @@ void openbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfoList &Inputs,
const ArgList &Args,
const char *LinkingOutput) const {
+ const toolchains::OpenBSD &ToolChain =
+ static_cast<const toolchains::OpenBSD &>(getToolChain());
const Driver &D = getToolChain().getDriver();
ArgStringList CmdArgs;
@@ -170,11 +173,14 @@ void openbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
Triple.replace(0, 6, "amd64");
CmdArgs.push_back(
Args.MakeArgString("-L/usr/lib/gcc-lib/" + Triple + "/4.2.1"));
+ CmdArgs.push_back(Args.MakeArgString("-L/usr/lib"));
Args.AddAllArgs(CmdArgs, {options::OPT_L, options::OPT_T_Group,
options::OPT_e, options::OPT_s, options::OPT_t,
options::OPT_Z_Flag, options::OPT_r});
+ bool NeedsSanitizerDeps = addSanitizerRuntimes(ToolChain, Args, CmdArgs);
+ bool NeedsXRayDeps = addXRayRuntime(ToolChain, Args, CmdArgs);
AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs, JA);
if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
@@ -186,7 +192,14 @@ void openbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
else
CmdArgs.push_back("-lm");
}
-
+ if (NeedsSanitizerDeps) {
+ CmdArgs.push_back(ToolChain.getCompilerRTArgString(Args, "builtins", false));
+ linkSanitizerRuntimeDeps(ToolChain, CmdArgs);
+ }
+ if (NeedsXRayDeps) {
+ CmdArgs.push_back(ToolChain.getCompilerRTArgString(Args, "builtins", false));
+ linkXRayRuntimeDeps(ToolChain, CmdArgs);
+ }
// FIXME: For some reason GCC passes -lgcc before adding
// the default system libraries. Just mimic this for now.
CmdArgs.push_back("-lgcc");
@@ -217,10 +230,28 @@ void openbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
Args.MakeArgString(getToolChain().GetFilePath("crtendS.o")));
}
- const char *Exec = Args.MakeArgString(getToolChain().GetLinkerPath());
+ const char *Exec = Args.MakeArgString(
+ !NeedsSanitizerDeps ? getToolChain().GetLinkerPath()
+ : getToolChain().GetProgramPath("ld.lld"));
C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
}
+SanitizerMask OpenBSD::getSupportedSanitizers() const {
+ const bool IsX86 = getTriple().getArch() == llvm::Triple::x86;
+ const bool IsX86_64 = getTriple().getArch() == llvm::Triple::x86_64;
+
+ // For future use, only UBsan at the moment
+ SanitizerMask Res = ToolChain::getSupportedSanitizers();
+
+ if (IsX86 || IsX86_64) {
+ Res |= SanitizerKind::Vptr;
+ Res |= SanitizerKind::Fuzzer;
+ Res |= SanitizerKind::FuzzerNoLink;
+ }
+
+ return Res;
+}
+
/// OpenBSD - OpenBSD tool chain which can call as(1) and ld(1) directly.
OpenBSD::OpenBSD(const Driver &D, const llvm::Triple &Triple,
@@ -230,6 +261,14 @@ OpenBSD::OpenBSD(const Driver &D, const llvm::Triple &Triple,
getFilePaths().push_back("/usr/lib");
}
+void OpenBSD::AddCXXStdlibLibArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ bool Profiling = Args.hasArg(options::OPT_pg);
+
+ CmdArgs.push_back(Profiling ? "-lc++_p" : "-lc++");
+ CmdArgs.push_back(Profiling ? "-lc++abi_p" : "-lc++abi");
+}
+
Tool *OpenBSD::buildAssembler() const {
return new tools::openbsd::Assembler(*this);
}
diff --git a/lib/Driver/ToolChains/OpenBSD.h b/lib/Driver/ToolChains/OpenBSD.h
index 1cc0ca71984a..bf8dfa4653cb 100644
--- a/lib/Driver/ToolChains/OpenBSD.h
+++ b/lib/Driver/ToolChains/OpenBSD.h
@@ -58,12 +58,16 @@ public:
bool IsMathErrnoDefault() const override { return false; }
bool IsObjCNonFragileABIDefault() const override { return true; }
bool isPIEDefault() const override { return true; }
+ void AddCXXStdlibLibArgs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const override;
unsigned GetDefaultStackProtectorLevel(bool KernelOrKext) const override {
return 2;
}
unsigned GetDefaultDwarfVersion() const override { return 2; }
+ SanitizerMask getSupportedSanitizers() const override;
+
protected:
Tool *buildAssembler() const override;
Tool *buildLinker() const override;
diff --git a/lib/Driver/ToolChains/PS4CPU.cpp b/lib/Driver/ToolChains/PS4CPU.cpp
index b37fe7d1f9b9..a4b74d492331 100644
--- a/lib/Driver/ToolChains/PS4CPU.cpp
+++ b/lib/Driver/ToolChains/PS4CPU.cpp
@@ -76,6 +76,15 @@ static void AddPS4SanitizerArgs(const ToolChain &TC, ArgStringList &CmdArgs) {
}
}
+void tools::PS4cpu::addSanitizerArgs(const ToolChain &TC,
+ ArgStringList &CmdArgs) {
+ const SanitizerArgs &SanArgs = TC.getSanitizerArgs();
+ if (SanArgs.needsUbsanRt())
+ CmdArgs.push_back("--dependent-lib=libSceDbgUBSanitizer_stub_weak.a");
+ if (SanArgs.needsAsanRt())
+ CmdArgs.push_back("--dependent-lib=libSceDbgAddressSanitizer_stub_weak.a");
+}
+
static void ConstructPS4LinkJob(const Tool &T, Compilation &C,
const JobAction &JA, const InputInfo &Output,
const InputInfoList &Inputs,
@@ -303,7 +312,7 @@ static void ConstructGoldLinkJob(const Tool &T, Compilation &C,
}
const char *Exec =
-#ifdef LLVM_ON_WIN32
+#ifdef _WIN32
Args.MakeArgString(ToolChain.GetProgramPath("orbis-ld.gold"));
#else
Args.MakeArgString(ToolChain.GetProgramPath("orbis-ld"));
diff --git a/lib/Driver/ToolChains/PS4CPU.h b/lib/Driver/ToolChains/PS4CPU.h
index e507edbad4d5..bd0a44352f4d 100644
--- a/lib/Driver/ToolChains/PS4CPU.h
+++ b/lib/Driver/ToolChains/PS4CPU.h
@@ -23,6 +23,8 @@ namespace PS4cpu {
void addProfileRTArgs(const ToolChain &TC, const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs);
+void addSanitizerArgs(const ToolChain &TC, llvm::opt::ArgStringList &CmdArgs);
+
class LLVM_LIBRARY_VISIBILITY Assemble : public Tool {
public:
Assemble(const ToolChain &TC)
@@ -61,7 +63,9 @@ public:
const llvm::opt::ArgList &Args);
// No support for finding a C++ standard library yet.
- std::string findLibCxxIncludePath() const override { return ""; }
+ void addLibCxxIncludePaths(
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override {}
void addLibStdCxxIncludePaths(
const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override {}
diff --git a/lib/Driver/ToolChains/Solaris.cpp b/lib/Driver/ToolChains/Solaris.cpp
index 9fe6e9d520d0..b48edbb08ee6 100644
--- a/lib/Driver/ToolChains/Solaris.cpp
+++ b/lib/Driver/ToolChains/Solaris.cpp
@@ -71,6 +71,11 @@ void solaris::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(
Args.MakeArgString(getToolChain().GetFilePath("ld.so.1")));
}
+
+ // libpthread has been folded into libc since Solaris 10, no need to do
+ // anything for pthreads. Claim argument to avoid warning.
+ Args.ClaimAllArgs(options::OPT_pthread);
+ Args.ClaimAllArgs(options::OPT_pthreads);
}
if (Output.isFilename()) {
@@ -92,24 +97,48 @@ void solaris::Linker::ConstructJob(Compilation &C, const JobAction &JA,
Args.MakeArgString(getToolChain().GetFilePath("crtbegin.o")));
}
+ // Provide __start___sancov_guards. Solaris ld doesn't automatically create
+ // __start_SECNAME labels.
+ CmdArgs.push_back("--whole-archive");
+ CmdArgs.push_back(
+ getToolChain().getCompilerRTArgString(Args, "sancov_begin", false));
+ CmdArgs.push_back("--no-whole-archive");
+
getToolChain().AddFilePathLibArgs(Args, CmdArgs);
Args.AddAllArgs(CmdArgs, {options::OPT_L, options::OPT_T_Group,
options::OPT_e, options::OPT_r});
+ bool NeedsSanitizerDeps = addSanitizerRuntimes(getToolChain(), Args, CmdArgs);
AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs, JA);
if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
if (getToolChain().ShouldLinkCXXStdlib(Args))
getToolChain().AddCXXStdlibLibArgs(Args, CmdArgs);
+ if (Args.hasArg(options::OPT_fstack_protector) ||
+ Args.hasArg(options::OPT_fstack_protector_strong) ||
+ Args.hasArg(options::OPT_fstack_protector_all)) {
+ // Explicitly link ssp libraries, not folded into Solaris libc.
+ CmdArgs.push_back("-lssp_nonshared");
+ CmdArgs.push_back("-lssp");
+ }
CmdArgs.push_back("-lgcc_s");
CmdArgs.push_back("-lc");
if (!Args.hasArg(options::OPT_shared)) {
CmdArgs.push_back("-lgcc");
CmdArgs.push_back("-lm");
}
+ if (NeedsSanitizerDeps)
+ linkSanitizerRuntimeDeps(getToolChain(), CmdArgs);
}
+ // Provide __stop___sancov_guards. Solaris ld doesn't automatically create
+ // __stop_SECNAME labels.
+ CmdArgs.push_back("--whole-archive");
+ CmdArgs.push_back(
+ getToolChain().getCompilerRTArgString(Args, "sancov_end", false));
+ CmdArgs.push_back("--no-whole-archive");
+
if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
CmdArgs.push_back(
Args.MakeArgString(getToolChain().GetFilePath("crtend.o")));
@@ -122,6 +151,21 @@ void solaris::Linker::ConstructJob(Compilation &C, const JobAction &JA,
C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
}
+static StringRef getSolarisLibSuffix(const llvm::Triple &Triple) {
+ switch (Triple.getArch()) {
+ case llvm::Triple::x86:
+ case llvm::Triple::sparc:
+ break;
+ case llvm::Triple::x86_64:
+ return "/amd64";
+ case llvm::Triple::sparcv9:
+ return "/sparcv9";
+ default:
+ llvm_unreachable("Unsupported architecture");
+ }
+ return "";
+}
+
/// Solaris - Solaris tool chain which can call as(1) and ld(1) directly.
Solaris::Solaris(const Driver &D, const llvm::Triple &Triple,
@@ -130,32 +174,35 @@ Solaris::Solaris(const Driver &D, const llvm::Triple &Triple,
GCCInstallation.init(Triple, Args);
+ StringRef LibSuffix = getSolarisLibSuffix(Triple);
path_list &Paths = getFilePaths();
- if (GCCInstallation.isValid())
- addPathIfExists(D, GCCInstallation.getInstallPath(), Paths);
+ if (GCCInstallation.isValid()) {
+ // On Solaris gcc uses both an architecture-specific path with triple in it
+ // as well as a more generic lib path (+arch suffix).
+ addPathIfExists(D,
+ GCCInstallation.getInstallPath() +
+ GCCInstallation.getMultilib().gccSuffix(),
+ Paths);
+ addPathIfExists(D, GCCInstallation.getParentLibPath() + LibSuffix, Paths);
+ }
- addPathIfExists(D, getDriver().getInstalledDir(), Paths);
- if (getDriver().getInstalledDir() != getDriver().Dir)
- addPathIfExists(D, getDriver().Dir, Paths);
+ // If we are currently running Clang inside of the requested system root,
+ // add its parent library path to those searched.
+ if (StringRef(D.Dir).startswith(D.SysRoot))
+ addPathIfExists(D, D.Dir + "/../lib", Paths);
- addPathIfExists(D, getDriver().SysRoot + getDriver().Dir + "/../lib", Paths);
+ addPathIfExists(D, D.SysRoot + "/usr/lib" + LibSuffix, Paths);
+}
- std::string LibPath = "/usr/lib/";
- switch (Triple.getArch()) {
- case llvm::Triple::x86:
- case llvm::Triple::sparc:
- break;
- case llvm::Triple::x86_64:
- LibPath += "amd64/";
- break;
- case llvm::Triple::sparcv9:
- LibPath += "sparcv9/";
- break;
- default:
- llvm_unreachable("Unsupported architecture");
+SanitizerMask Solaris::getSupportedSanitizers() const {
+ const bool IsX86 = getTriple().getArch() == llvm::Triple::x86;
+ SanitizerMask Res = ToolChain::getSupportedSanitizers();
+ // FIXME: Omit X86_64 until 64-bit support is figured out.
+ if (IsX86) {
+ Res |= SanitizerKind::Address;
}
-
- addPathIfExists(D, getDriver().SysRoot + LibPath, Paths);
+ Res |= SanitizerKind::Vptr;
+ return Res;
}
Tool *Solaris::buildAssembler() const {
@@ -164,30 +211,72 @@ Tool *Solaris::buildAssembler() const {
Tool *Solaris::buildLinker() const { return new tools::solaris::Linker(*this); }
-void Solaris::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
- ArgStringList &CC1Args) const {
- if (DriverArgs.hasArg(options::OPT_nostdlibinc) ||
- DriverArgs.hasArg(options::OPT_nostdincxx))
+void Solaris::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ const Driver &D = getDriver();
+
+ if (DriverArgs.hasArg(clang::driver::options::OPT_nostdinc))
+ return;
+
+ if (!DriverArgs.hasArg(options::OPT_nostdlibinc))
+ addSystemInclude(DriverArgs, CC1Args, D.SysRoot + "/usr/local/include");
+
+ if (!DriverArgs.hasArg(options::OPT_nobuiltininc)) {
+ SmallString<128> P(D.ResourceDir);
+ llvm::sys::path::append(P, "include");
+ addSystemInclude(DriverArgs, CC1Args, P);
+ }
+
+ if (DriverArgs.hasArg(options::OPT_nostdlibinc))
return;
- // Include the support directory for things like xlocale and fudged system
- // headers.
- // FIXME: This is a weird mix of libc++ and libstdc++. We should also be
- // checking the value of -stdlib= here and adding the includes for libc++
- // rather than libstdc++ if it's requested.
- addSystemInclude(DriverArgs, CC1Args, "/usr/include/c++/v1/support/solaris");
+ // Check for configure-time C include directories.
+ StringRef CIncludeDirs(C_INCLUDE_DIRS);
+ if (CIncludeDirs != "") {
+ SmallVector<StringRef, 5> dirs;
+ CIncludeDirs.split(dirs, ":");
+ for (StringRef dir : dirs) {
+ StringRef Prefix =
+ llvm::sys::path::is_absolute(dir) ? StringRef(D.SysRoot) : "";
+ addExternCSystemInclude(DriverArgs, CC1Args, Prefix + dir);
+ }
+ return;
+ }
+ // Add include directories specific to the selected multilib set and multilib.
if (GCCInstallation.isValid()) {
- GCCVersion Version = GCCInstallation.getVersion();
- addSystemInclude(DriverArgs, CC1Args,
- getDriver().SysRoot + "/usr/gcc/" +
- Version.MajorStr + "." +
- Version.MinorStr +
- "/include/c++/" + Version.Text);
- addSystemInclude(DriverArgs, CC1Args,
- getDriver().SysRoot + "/usr/gcc/" + Version.MajorStr +
- "." + Version.MinorStr + "/include/c++/" +
- Version.Text + "/" +
- GCCInstallation.getTriple().str());
+ const MultilibSet::IncludeDirsFunc &Callback =
+ Multilibs.includeDirsCallback();
+ if (Callback) {
+ for (const auto &Path : Callback(GCCInstallation.getMultilib()))
+ addExternCSystemIncludeIfExists(
+ DriverArgs, CC1Args, GCCInstallation.getInstallPath() + Path);
+ }
}
+
+ addExternCSystemInclude(DriverArgs, CC1Args, D.SysRoot + "/usr/include");
+}
+
+void Solaris::addLibStdCxxIncludePaths(
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const {
+ // We need a detected GCC installation on Solaris (similar to Linux)
+ // to provide libstdc++'s headers.
+ if (!GCCInstallation.isValid())
+ return;
+
+ // By default, look for the C++ headers in an include directory adjacent to
+ // the lib directory of the GCC installation.
+ // On Solaris this usually looks like /usr/gcc/X.Y/include/c++/X.Y.Z
+ StringRef LibDir = GCCInstallation.getParentLibPath();
+ StringRef TripleStr = GCCInstallation.getTriple().str();
+ const Multilib &Multilib = GCCInstallation.getMultilib();
+ const GCCVersion &Version = GCCInstallation.getVersion();
+
+ // The primary search for libstdc++ supports multiarch variants.
+ addLibStdCXXIncludePaths(LibDir.str() + "/../include", "/c++/" + Version.Text,
+ TripleStr,
+ /*GCCMultiarchTriple*/ "",
+ /*TargetMultiarchTriple*/ "",
+ Multilib.includeSuffix(), DriverArgs, CC1Args);
}
diff --git a/lib/Driver/ToolChains/Solaris.h b/lib/Driver/ToolChains/Solaris.h
index 787917afab6e..9e14269b393e 100644
--- a/lib/Driver/ToolChains/Solaris.h
+++ b/lib/Driver/ToolChains/Solaris.h
@@ -57,10 +57,15 @@ public:
bool IsIntegratedAssemblerDefault() const override { return true; }
- void AddClangCXXStdlibIncludeArgs(
- const llvm::opt::ArgList &DriverArgs,
- llvm::opt::ArgStringList &CC1Args) const override;
+ void
+ AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+ void
+ addLibStdCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+
+ SanitizerMask getSupportedSanitizers() const override;
unsigned GetDefaultDwarfVersion() const override { return 2; }
protected:
diff --git a/lib/Driver/ToolChains/WebAssembly.cpp b/lib/Driver/ToolChains/WebAssembly.cpp
index 8ae1b6c2f55d..94f7279bbdba 100644
--- a/lib/Driver/ToolChains/WebAssembly.cpp
+++ b/lib/Driver/ToolChains/WebAssembly.cpp
@@ -11,6 +11,7 @@
#include "CommonArgs.h"
#include "clang/Driver/Compilation.h"
#include "clang/Driver/Driver.h"
+#include "clang/Driver/DriverDiagnostic.h"
#include "clang/Driver/Options.h"
#include "llvm/Option/ArgList.h"
@@ -62,8 +63,6 @@ void wasm::Linker::ConstructJob(Compilation &C, const JobAction &JA,
if (Args.hasArg(options::OPT_pthread))
CmdArgs.push_back("-lpthread");
- CmdArgs.push_back("-allow-undefined-file");
- CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("wasm.syms")));
CmdArgs.push_back("-lc");
AddRunTimeLibs(ToolChain, ToolChain.getDriver(), CmdArgs, Args);
}
@@ -119,6 +118,12 @@ ToolChain::RuntimeLibType WebAssembly::GetDefaultRuntimeLibType() const {
}
ToolChain::CXXStdlibType WebAssembly::GetCXXStdlibType(const ArgList &Args) const {
+ if (Arg *A = Args.getLastArg(options::OPT_stdlib_EQ)) {
+ StringRef Value = A->getValue();
+ if (Value != "libc++")
+ getDriver().Diag(diag::err_drv_invalid_stdlib_name)
+ << A->getAsString(Args);
+ }
return ToolChain::CST_Libcxx;
}
@@ -136,6 +141,19 @@ void WebAssembly::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
getDriver().SysRoot + "/include/c++/v1");
}
+void WebAssembly::AddCXXStdlibLibArgs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const {
+
+ switch (GetCXXStdlibType(Args)) {
+ case ToolChain::CST_Libcxx:
+ CmdArgs.push_back("-lc++");
+ CmdArgs.push_back("-lc++abi");
+ break;
+ case ToolChain::CST_Libstdcxx:
+ llvm_unreachable("invalid stdlib name");
+ }
+}
+
std::string WebAssembly::getThreadModel() const {
// The WebAssembly MVP does not yet support threads; for now, use the
// "single" threading model, which lowers atomics to non-atomic operations.
diff --git a/lib/Driver/ToolChains/WebAssembly.h b/lib/Driver/ToolChains/WebAssembly.h
index 8784e12dfb0e..cdbb34ff919f 100644
--- a/lib/Driver/ToolChains/WebAssembly.h
+++ b/lib/Driver/ToolChains/WebAssembly.h
@@ -62,6 +62,8 @@ private:
void AddClangCXXStdlibIncludeArgs(
const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
+ void AddCXXStdlibLibArgs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const override;
std::string getThreadModel() const override;
const char *getDefaultLinker() const override {
diff --git a/lib/Driver/Types.cpp b/lib/Driver/Types.cpp
index ab63f0e81b12..45bb699cfb88 100644
--- a/lib/Driver/Types.cpp
+++ b/lib/Driver/Types.cpp
@@ -102,6 +102,9 @@ bool types::isAcceptedByClang(ID Id) {
case TY_CL:
case TY_CUDA: case TY_PP_CUDA:
case TY_CUDA_DEVICE:
+ case TY_HIP:
+ case TY_PP_HIP:
+ case TY_HIP_DEVICE:
case TY_ObjC: case TY_PP_ObjC: case TY_PP_ObjC_Alias:
case TY_CXX: case TY_PP_CXX:
case TY_ObjCXX: case TY_PP_ObjCXX: case TY_PP_ObjCXX_Alias:
@@ -141,6 +144,9 @@ bool types::isCXX(ID Id) {
case TY_ObjCXXHeader: case TY_PP_ObjCXXHeader:
case TY_CXXModule: case TY_PP_CXXModule:
case TY_CUDA: case TY_PP_CUDA: case TY_CUDA_DEVICE:
+ case TY_HIP:
+ case TY_PP_HIP:
+ case TY_HIP_DEVICE:
return true;
}
}
@@ -170,6 +176,18 @@ bool types::isCuda(ID Id) {
}
}
+bool types::isHIP(ID Id) {
+ switch (Id) {
+ default:
+ return false;
+
+ case TY_HIP:
+ case TY_PP_HIP:
+ case TY_HIP_DEVICE:
+ return true;
+ }
+}
+
bool types::isSrcFile(ID Id) {
return Id != TY_Object && getPreprocessedType(Id) != TY_INVALID;
}
@@ -221,6 +239,7 @@ types::ID types::lookupTypeForExtension(llvm::StringRef Ext) {
.Case("fpp", TY_Fortran)
.Case("FPP", TY_Fortran)
.Case("gch", TY_PCH)
+ .Case("hip", TY_HIP)
.Case("hpp", TY_CXXHeader)
.Case("iim", TY_PP_CXXModule)
.Case("lib", TY_Object)
diff --git a/lib/Driver/XRayArgs.cpp b/lib/Driver/XRayArgs.cpp
index 232bacd5f095..30b0e72760c9 100644
--- a/lib/Driver/XRayArgs.cpp
+++ b/lib/Driver/XRayArgs.cpp
@@ -27,6 +27,7 @@ namespace {
constexpr char XRayInstrumentOption[] = "-fxray-instrument";
constexpr char XRayInstructionThresholdOption[] =
"-fxray-instruction-threshold=";
+constexpr const char *const XRaySupportedModes[] = {"xray-fdr", "xray-basic"};
} // namespace
XRayArgs::XRayArgs(const ToolChain &TC, const ArgList &Args) {
@@ -34,7 +35,7 @@ XRayArgs::XRayArgs(const ToolChain &TC, const ArgList &Args) {
const llvm::Triple &Triple = TC.getTriple();
if (Args.hasFlag(options::OPT_fxray_instrument,
options::OPT_fnoxray_instrument, false)) {
- if (Triple.getOS() == llvm::Triple::Linux)
+ if (Triple.getOS() == llvm::Triple::Linux) {
switch (Triple.getArch()) {
case llvm::Triple::x86_64:
case llvm::Triple::arm:
@@ -49,9 +50,17 @@ XRayArgs::XRayArgs(const ToolChain &TC, const ArgList &Args) {
D.Diag(diag::err_drv_clang_unsupported)
<< (std::string(XRayInstrumentOption) + " on " + Triple.str());
}
- else
+ } else if (Triple.getOS() == llvm::Triple::FreeBSD ||
+ Triple.getOS() == llvm::Triple::OpenBSD ||
+ Triple.getOS() == llvm::Triple::NetBSD) {
+ if (Triple.getArch() != llvm::Triple::x86_64) {
+ D.Diag(diag::err_drv_clang_unsupported)
+ << (std::string(XRayInstrumentOption) + " on " + Triple.str());
+ }
+ } else {
D.Diag(diag::err_drv_clang_unsupported)
- << (std::string(XRayInstrumentOption) + " on non-Linux target OS");
+ << (std::string(XRayInstrumentOption) + " on " + Triple.str());
+ }
XRayInstrument = true;
if (const Arg *A =
Args.getLastArg(options::OPT_fxray_instruction_threshold_,
@@ -69,6 +78,44 @@ XRayArgs::XRayArgs(const ToolChain &TC, const ArgList &Args) {
options::OPT_fnoxray_always_emit_customevents, false))
XRayAlwaysEmitCustomEvents = true;
+ if (Args.hasFlag(options::OPT_fxray_always_emit_typedevents,
+ options::OPT_fnoxray_always_emit_typedevents, false))
+ XRayAlwaysEmitTypedEvents = true;
+
+ if (!Args.hasFlag(options::OPT_fxray_link_deps,
+ options::OPT_fnoxray_link_deps, true))
+ XRayRT = false;
+
+ auto Bundles =
+ Args.getAllArgValues(options::OPT_fxray_instrumentation_bundle);
+ if (Bundles.empty())
+ InstrumentationBundle.Mask = XRayInstrKind::All;
+ else
+ for (const auto &B : Bundles) {
+ llvm::SmallVector<StringRef, 2> BundleParts;
+ llvm::SplitString(B, BundleParts, ",");
+ for (const auto &P : BundleParts) {
+ // TODO: Automate the generation of the string case table.
+ auto Valid = llvm::StringSwitch<bool>(P)
+ .Cases("none", "all", "function", "custom", true)
+ .Default(false);
+
+ if (!Valid) {
+ D.Diag(clang::diag::err_drv_invalid_value)
+ << "-fxray-instrumentation-bundle=" << P;
+ continue;
+ }
+
+ auto Mask = parseXRayInstrValue(P);
+ if (Mask == XRayInstrKind::None) {
+ InstrumentationBundle.clear();
+ break;
+ }
+
+ InstrumentationBundle.Mask |= Mask;
+ }
+ }
+
// Validate the always/never attribute files. We also make sure that they
// are treated as actual dependencies.
for (const auto &Filename :
@@ -88,6 +135,37 @@ XRayArgs::XRayArgs(const ToolChain &TC, const ArgList &Args) {
} else
D.Diag(clang::diag::err_drv_no_such_file) << Filename;
}
+
+ for (const auto &Filename :
+ Args.getAllArgValues(options::OPT_fxray_attr_list)) {
+ if (llvm::sys::fs::exists(Filename)) {
+ AttrListFiles.push_back(Filename);
+ ExtraDeps.push_back(Filename);
+ } else
+ D.Diag(clang::diag::err_drv_no_such_file) << Filename;
+ }
+
+ // Get the list of modes we want to support.
+ auto SpecifiedModes = Args.getAllArgValues(options::OPT_fxray_modes);
+ if (SpecifiedModes.empty())
+ llvm::copy(XRaySupportedModes, std::back_inserter(Modes));
+ else
+ for (const auto &Arg : SpecifiedModes) {
+ // Parse CSV values for -fxray-modes=...
+ llvm::SmallVector<StringRef, 2> ModeParts;
+ llvm::SplitString(Arg, ModeParts, ",");
+ for (const auto &M : ModeParts)
+ if (M == "none")
+ Modes.clear();
+ else if (M == "all")
+ llvm::copy(XRaySupportedModes, std::back_inserter(Modes));
+ else
+ Modes.push_back(M);
+ }
+
+ // Then we want to sort and unique the modes we've collected.
+ llvm::sort(Modes.begin(), Modes.end());
+ Modes.erase(std::unique(Modes.begin(), Modes.end()), Modes.end());
}
}
@@ -101,6 +179,9 @@ void XRayArgs::addArgs(const ToolChain &TC, const ArgList &Args,
if (XRayAlwaysEmitCustomEvents)
CmdArgs.push_back("-fxray-always-emit-customevents");
+ if (XRayAlwaysEmitTypedEvents)
+ CmdArgs.push_back("-fxray-always-emit-typedevents");
+
CmdArgs.push_back(Args.MakeArgString(Twine(XRayInstructionThresholdOption) +
Twine(InstructionThreshold)));
@@ -116,9 +197,21 @@ void XRayArgs::addArgs(const ToolChain &TC, const ArgList &Args,
CmdArgs.push_back(Args.MakeArgString(NeverInstrumentOpt));
}
+ for (const auto &AttrFile : AttrListFiles) {
+ SmallString<64> AttrListFileOpt("-fxray-attr-list=");
+ AttrListFileOpt += AttrFile;
+ CmdArgs.push_back(Args.MakeArgString(AttrListFileOpt));
+ }
+
for (const auto &Dep : ExtraDeps) {
SmallString<64> ExtraDepOpt("-fdepfile-entry=");
ExtraDepOpt += Dep;
CmdArgs.push_back(Args.MakeArgString(ExtraDepOpt));
}
+
+ for (const auto &Mode : Modes) {
+ SmallString<64> ModeOpt("-fxray-modes=");
+ ModeOpt += Mode;
+ CmdArgs.push_back(Args.MakeArgString(ModeOpt));
+ }
}
diff --git a/lib/Edit/Commit.cpp b/lib/Edit/Commit.cpp
index cb7a784a41af..afc1a131eb25 100644
--- a/lib/Edit/Commit.cpp
+++ b/lib/Edit/Commit.cpp
@@ -1,4 +1,4 @@
-//===----- Commit.cpp - A unit of edits -----------------------------------===//
+//===- Commit.cpp - A unit of edits ---------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -8,10 +8,16 @@
//===----------------------------------------------------------------------===//
#include "clang/Edit/Commit.h"
+#include "clang/Basic/LLVM.h"
+#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Edit/EditedSource.h"
+#include "clang/Edit/FileOffset.h"
#include "clang/Lex/Lexer.h"
#include "clang/Lex/PPConditionalDirectiveRecord.h"
+#include "llvm/ADT/StringRef.h"
+#include <cassert>
+#include <utility>
using namespace clang;
using namespace edit;
@@ -36,9 +42,9 @@ CharSourceRange Commit::Edit::getInsertFromRange(SourceManager &SM) const {
}
Commit::Commit(EditedSource &Editor)
- : SourceMgr(Editor.getSourceManager()), LangOpts(Editor.getLangOpts()),
- PPRec(Editor.getPPCondDirectiveRecord()),
- Editor(&Editor), IsCommitable(true) { }
+ : SourceMgr(Editor.getSourceManager()), LangOpts(Editor.getLangOpts()),
+ PPRec(Editor.getPPCondDirectiveRecord()),
+ Editor(&Editor) {}
bool Commit::insert(SourceLocation loc, StringRef text,
bool afterToken, bool beforePreviousInsertions) {
@@ -225,8 +231,7 @@ bool Commit::canInsert(SourceLocation loc, FileOffset &offs) {
isAtStartOfMacroExpansion(loc, &loc);
const SourceManager &SM = SourceMgr;
- while (SM.isMacroArgExpansion(loc))
- loc = SM.getImmediateSpellingLoc(loc);
+ loc = SM.getTopMacroCallerLoc(loc);
if (loc.isMacroID())
if (!isAtStartOfMacroExpansion(loc, &loc))
@@ -256,8 +261,7 @@ bool Commit::canInsertAfterToken(SourceLocation loc, FileOffset &offs,
isAtEndOfMacroExpansion(loc, &loc);
const SourceManager &SM = SourceMgr;
- while (SM.isMacroArgExpansion(loc))
- loc = SM.getImmediateSpellingLoc(loc);
+ loc = SM.getTopMacroCallerLoc(loc);
if (loc.isMacroID())
if (!isAtEndOfMacroExpansion(loc, &loc))
@@ -278,14 +282,12 @@ bool Commit::canInsertAfterToken(SourceLocation loc, FileOffset &offs,
}
bool Commit::canInsertInOffset(SourceLocation OrigLoc, FileOffset Offs) {
- for (unsigned i = 0, e = CachedEdits.size(); i != e; ++i) {
- Edit &act = CachedEdits[i];
+ for (const auto &act : CachedEdits)
if (act.Kind == Act_Remove) {
if (act.Offset.getFID() == Offs.getFID() &&
Offs > act.Offset && Offs < act.Offset.getWithOffset(act.Length))
return false; // position has been removed.
}
- }
if (!Editor)
return true;
@@ -340,6 +342,7 @@ bool Commit::isAtStartOfMacroExpansion(SourceLocation loc,
SourceLocation *MacroBegin) const {
return Lexer::isAtStartOfMacroExpansion(loc, SourceMgr, LangOpts, MacroBegin);
}
+
bool Commit::isAtEndOfMacroExpansion(SourceLocation loc,
SourceLocation *MacroEnd) const {
return Lexer::isAtEndOfMacroExpansion(loc, SourceMgr, LangOpts, MacroEnd);
diff --git a/lib/Edit/EditedSource.cpp b/lib/Edit/EditedSource.cpp
index 444d0393cccd..b38f8fd0d9cb 100644
--- a/lib/Edit/EditedSource.cpp
+++ b/lib/Edit/EditedSource.cpp
@@ -1,4 +1,4 @@
-//===----- EditedSource.cpp - Collection of source edits ------------------===//
+//===- EditedSource.cpp - Collection of source edits ----------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -9,12 +9,21 @@
#include "clang/Edit/EditedSource.h"
#include "clang/Basic/CharInfo.h"
+#include "clang/Basic/LLVM.h"
+#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Edit/Commit.h"
#include "clang/Edit/EditsReceiver.h"
+#include "clang/Edit/FileOffset.h"
#include "clang/Lex/Lexer.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
+#include <algorithm>
+#include <cassert>
+#include <tuple>
+#include <utility>
using namespace clang;
using namespace edit;
@@ -27,12 +36,14 @@ void EditedSource::deconstructMacroArgLoc(SourceLocation Loc,
SourceLocation &ExpansionLoc,
MacroArgUse &ArgUse) {
assert(SourceMgr.isMacroArgExpansion(Loc));
- SourceLocation DefArgLoc = SourceMgr.getImmediateExpansionRange(Loc).first;
+ SourceLocation DefArgLoc =
+ SourceMgr.getImmediateExpansionRange(Loc).getBegin();
SourceLocation ImmediateExpansionLoc =
- SourceMgr.getImmediateExpansionRange(DefArgLoc).first;
+ SourceMgr.getImmediateExpansionRange(DefArgLoc).getBegin();
ExpansionLoc = ImmediateExpansionLoc;
while (SourceMgr.isMacroBodyExpansion(ExpansionLoc))
- ExpansionLoc = SourceMgr.getImmediateExpansionRange(ExpansionLoc).first;
+ ExpansionLoc =
+ SourceMgr.getImmediateExpansionRange(ExpansionLoc).getBegin();
SmallString<20> Buf;
StringRef ArgName = Lexer::getSpelling(SourceMgr.getSpellingLoc(DefArgLoc),
Buf, SourceMgr, LangOpts);
@@ -269,9 +280,11 @@ bool EditedSource::commit(const Commit &commit) {
struct CommitRAII {
EditedSource &Editor;
+
CommitRAII(EditedSource &Editor) : Editor(Editor) {
Editor.startingCommit();
}
+
~CommitRAII() {
Editor.finishedCommit();
}
@@ -298,7 +311,7 @@ bool EditedSource::commit(const Commit &commit) {
return true;
}
-// \brief Returns true if it is ok to make the two given characters adjacent.
+// Returns true if it is ok to make the two given characters adjacent.
static bool canBeJoined(char left, char right, const LangOptions &LangOpts) {
// FIXME: Should use TokenConcatenation to make sure we don't allow stuff like
// making two '<' adjacent.
@@ -306,7 +319,7 @@ static bool canBeJoined(char left, char right, const LangOptions &LangOpts) {
Lexer::isIdentifierBodyChar(right, LangOpts));
}
-/// \brief Returns true if it is ok to eliminate the trailing whitespace between
+/// Returns true if it is ok to eliminate the trailing whitespace between
/// the given characters.
static bool canRemoveWhitespace(char left, char beforeWSpace, char right,
const LangOptions &LangOpts) {
@@ -319,7 +332,7 @@ static bool canRemoveWhitespace(char left, char beforeWSpace, char right,
return true;
}
-/// \brief Check the range that we are going to remove and:
+/// Check the range that we are going to remove and:
/// -Remove any trailing whitespace if possible.
/// -Insert a space if removing the range is going to mess up the source tokens.
static void adjustRemoval(const SourceManager &SM, const LangOptions &LangOpts,
diff --git a/lib/Edit/RewriteObjCFoundationAPI.cpp b/lib/Edit/RewriteObjCFoundationAPI.cpp
index dc501b564eea..f89526a71da2 100644
--- a/lib/Edit/RewriteObjCFoundationAPI.cpp
+++ b/lib/Edit/RewriteObjCFoundationAPI.cpp
@@ -95,7 +95,7 @@ bool edit::rewriteObjCRedundantCallWithLiteral(const ObjCMessageExpr *Msg,
// rewriteToObjCSubscriptSyntax.
//===----------------------------------------------------------------------===//
-/// \brief Check for classes that accept 'objectForKey:' (or the other selectors
+/// Check for classes that accept 'objectForKey:' (or the other selectors
/// that the migrator handles) but return their instances as 'id', resulting
/// in the compiler resolving 'objectForKey:' as the method from NSDictionary.
///
@@ -355,7 +355,7 @@ bool edit::rewriteToObjCLiteralSyntax(const ObjCMessageExpr *Msg,
return false;
}
-/// \brief Returns true if the immediate message arguments of \c Msg should not
+/// Returns true if the immediate message arguments of \c Msg should not
/// be rewritten because it will interfere with the rewrite of the parent
/// message expression. e.g.
/// \code
@@ -372,7 +372,7 @@ static bool shouldNotRewriteImmediateMessageArgs(const ObjCMessageExpr *Msg,
// rewriteToArrayLiteral.
//===----------------------------------------------------------------------===//
-/// \brief Adds an explicit cast to 'id' if the type is not objc object.
+/// Adds an explicit cast to 'id' if the type is not objc object.
static void objectifyExpr(const Expr *E, Commit &commit);
static bool rewriteToArrayLiteral(const ObjCMessageExpr *Msg,
@@ -434,7 +434,7 @@ static bool rewriteToArrayLiteral(const ObjCMessageExpr *Msg,
// rewriteToDictionaryLiteral.
//===----------------------------------------------------------------------===//
-/// \brief If \c Msg is an NSArray creation message or literal, this gets the
+/// If \c Msg is an NSArray creation message or literal, this gets the
/// objects that were used to create it.
/// \returns true if it is an NSArray and we got objects, or false otherwise.
static bool getNSArrayObjects(const Expr *E, const NSAPI &NS,
diff --git a/lib/Format/AffectedRangeManager.cpp b/lib/Format/AffectedRangeManager.cpp
index 5d4df1941209..b14316a14cd9 100644
--- a/lib/Format/AffectedRangeManager.cpp
+++ b/lib/Format/AffectedRangeManager.cpp
@@ -8,7 +8,7 @@
//===----------------------------------------------------------------------===//
///
/// \file
-/// \brief This file implements AffectRangeManager class.
+/// This file implements AffectRangeManager class.
///
//===----------------------------------------------------------------------===//
@@ -21,8 +21,9 @@ namespace clang {
namespace format {
bool AffectedRangeManager::computeAffectedLines(
- SmallVectorImpl<AnnotatedLine *>::iterator I,
- SmallVectorImpl<AnnotatedLine *>::iterator E) {
+ SmallVectorImpl<AnnotatedLine *> &Lines) {
+ SmallVectorImpl<AnnotatedLine *>::iterator I = Lines.begin();
+ SmallVectorImpl<AnnotatedLine *>::iterator E = Lines.end();
bool SomeLineAffected = false;
const AnnotatedLine *PreviousLine = nullptr;
while (I != E) {
@@ -48,7 +49,7 @@ bool AffectedRangeManager::computeAffectedLines(
continue;
}
- if (nonPPLineAffected(Line, PreviousLine))
+ if (nonPPLineAffected(Line, PreviousLine, Lines))
SomeLineAffected = true;
PreviousLine = Line;
@@ -99,10 +100,10 @@ void AffectedRangeManager::markAllAsAffected(
}
bool AffectedRangeManager::nonPPLineAffected(
- AnnotatedLine *Line, const AnnotatedLine *PreviousLine) {
+ AnnotatedLine *Line, const AnnotatedLine *PreviousLine,
+ SmallVectorImpl<AnnotatedLine *> &Lines) {
bool SomeLineAffected = false;
- Line->ChildrenAffected =
- computeAffectedLines(Line->Children.begin(), Line->Children.end());
+ Line->ChildrenAffected = computeAffectedLines(Line->Children);
if (Line->ChildrenAffected)
SomeLineAffected = true;
@@ -138,8 +139,13 @@ bool AffectedRangeManager::nonPPLineAffected(
Line->First->NewlinesBefore < 2 && PreviousLine &&
PreviousLine->Affected && PreviousLine->Last->is(tok::comment);
+ bool IsAffectedClosingBrace =
+ Line->First->is(tok::r_brace) &&
+ Line->MatchingOpeningBlockLineIndex != UnwrappedLine::kInvalidIndex &&
+ Lines[Line->MatchingOpeningBlockLineIndex]->Affected;
+
if (SomeTokenAffected || SomeFirstChildAffected || LineMoved ||
- IsContinuedComment) {
+ IsContinuedComment || IsAffectedClosingBrace) {
Line->Affected = true;
SomeLineAffected = true;
}
diff --git a/lib/Format/AffectedRangeManager.h b/lib/Format/AffectedRangeManager.h
index d8d5ee55acd8..b0c9dd259fb8 100644
--- a/lib/Format/AffectedRangeManager.h
+++ b/lib/Format/AffectedRangeManager.h
@@ -8,7 +8,7 @@
//===----------------------------------------------------------------------===//
///
/// \file
-/// \brief AffectedRangeManager class manages affected ranges in the code.
+/// AffectedRangeManager class manages affected ranges in the code.
///
//===----------------------------------------------------------------------===//
@@ -30,10 +30,9 @@ public:
: SourceMgr(SourceMgr), Ranges(Ranges.begin(), Ranges.end()) {}
// Determines which lines are affected by the SourceRanges given as input.
- // Returns \c true if at least one line between I and E or one of their
+ // Returns \c true if at least one line in \p Lines or one of their
// children is affected.
- bool computeAffectedLines(SmallVectorImpl<AnnotatedLine *>::iterator I,
- SmallVectorImpl<AnnotatedLine *>::iterator E);
+ bool computeAffectedLines(SmallVectorImpl<AnnotatedLine *> &Lines);
// Returns true if 'Range' intersects with one of the input ranges.
bool affectsCharSourceRange(const CharSourceRange &Range);
@@ -54,8 +53,8 @@ private:
// Determines whether 'Line' is affected by the SourceRanges given as input.
// Returns \c true if line or one if its children is affected.
- bool nonPPLineAffected(AnnotatedLine *Line,
- const AnnotatedLine *PreviousLine);
+ bool nonPPLineAffected(AnnotatedLine *Line, const AnnotatedLine *PreviousLine,
+ SmallVectorImpl<AnnotatedLine *> &Lines);
const SourceManager &SourceMgr;
const SmallVector<CharSourceRange, 8> Ranges;
diff --git a/lib/Format/BreakableToken.cpp b/lib/Format/BreakableToken.cpp
index 4735ab3564f0..cc68f70100e3 100644
--- a/lib/Format/BreakableToken.cpp
+++ b/lib/Format/BreakableToken.cpp
@@ -8,7 +8,7 @@
//===----------------------------------------------------------------------===//
///
/// \file
-/// \brief Contains implementation of BreakableToken class and classes derived
+/// Contains implementation of BreakableToken class and classes derived
/// from it.
///
//===----------------------------------------------------------------------===//
@@ -44,7 +44,8 @@ static StringRef getLineCommentIndentPrefix(StringRef Comment,
const FormatStyle &Style) {
static const char *const KnownCStylePrefixes[] = {"///<", "//!<", "///", "//",
"//!"};
- static const char *const KnownTextProtoPrefixes[] = {"//", "#"};
+ static const char *const KnownTextProtoPrefixes[] = {"//", "#", "##", "###",
+ "####"};
ArrayRef<const char *> KnownPrefixes(KnownCStylePrefixes);
if (Style.Language == FormatStyle::LK_TextProto)
KnownPrefixes = KnownTextProtoPrefixes;
@@ -67,8 +68,9 @@ static BreakableToken::Split getCommentSplit(StringRef Text,
unsigned ColumnLimit,
unsigned TabWidth,
encoding::Encoding Encoding) {
- DEBUG(llvm::dbgs() << "Comment split: \"" << Text << ", " << ColumnLimit
- << "\", Content start: " << ContentStartColumn << "\n");
+ LLVM_DEBUG(llvm::dbgs() << "Comment split: \"" << Text << ", " << ColumnLimit
+ << "\", Content start: " << ContentStartColumn
+ << "\n");
if (ColumnLimit <= ContentStartColumn + 1)
return BreakableToken::Split(StringRef::npos, 0);
@@ -89,9 +91,9 @@ static BreakableToken::Split getCommentSplit(StringRef Text,
// Do not split before a number followed by a dot: this would be interpreted
// as a numbered list, which would prevent re-flowing in subsequent passes.
- static llvm::Regex kNumberedListRegexp = llvm::Regex("^[1-9][0-9]?\\.");
+ static auto *const kNumberedListRegexp = new llvm::Regex("^[1-9][0-9]?\\.");
if (SpaceOffset != StringRef::npos &&
- kNumberedListRegexp.match(Text.substr(SpaceOffset).ltrim(Blanks)))
+ kNumberedListRegexp->match(Text.substr(SpaceOffset).ltrim(Blanks)))
SpaceOffset = Text.find_last_of(Blanks, SpaceOffset);
if (SpaceOffset == StringRef::npos ||
@@ -214,11 +216,11 @@ unsigned BreakableStringLiteral::getContentStartColumn(unsigned LineIndex,
BreakableStringLiteral::BreakableStringLiteral(
const FormatToken &Tok, unsigned StartColumn, StringRef Prefix,
- StringRef Postfix, bool InPPDirective, encoding::Encoding Encoding,
- const FormatStyle &Style)
+ StringRef Postfix, unsigned UnbreakableTailLength, bool InPPDirective,
+ encoding::Encoding Encoding, const FormatStyle &Style)
: BreakableToken(Tok, InPPDirective, Encoding, Style),
StartColumn(StartColumn), Prefix(Prefix), Postfix(Postfix),
- UnbreakableTailLength(Tok.UnbreakableTailLength) {
+ UnbreakableTailLength(UnbreakableTailLength) {
assert(Tok.TokenText.startswith(Prefix) && Tok.TokenText.endswith(Postfix));
Line = Tok.TokenText.substr(
Prefix.size(), Tok.TokenText.size() - Prefix.size() - Postfix.size());
@@ -284,10 +286,9 @@ static bool mayReflowContent(StringRef Content) {
Content = Content.trim(Blanks);
// Lines starting with '@' commonly have special meaning.
// Lines starting with '-', '-#', '+' or '*' are bulleted/numbered lists.
- static const SmallVector<StringRef, 8> kSpecialMeaningPrefixes = {
- "@", "TODO", "FIXME", "XXX", "-# ", "- ", "+ ", "* "};
bool hasSpecialMeaningPrefix = false;
- for (StringRef Prefix : kSpecialMeaningPrefixes) {
+ for (StringRef Prefix :
+ {"@", "TODO", "FIXME", "XXX", "-# ", "- ", "+ ", "* "}) {
if (Content.startswith(Prefix)) {
hasSpecialMeaningPrefix = true;
break;
@@ -297,9 +298,9 @@ static bool mayReflowContent(StringRef Content) {
// Numbered lists may also start with a number followed by '.'
// To avoid issues if a line starts with a number which is actually the end
// of a previous line, we only consider numbers with up to 2 digits.
- static llvm::Regex kNumberedListRegexp = llvm::Regex("^[1-9][0-9]?\\. ");
+ static auto *const kNumberedListRegexp = new llvm::Regex("^[1-9][0-9]?\\. ");
hasSpecialMeaningPrefix =
- hasSpecialMeaningPrefix || kNumberedListRegexp.match(Content);
+ hasSpecialMeaningPrefix || kNumberedListRegexp->match(Content);
// Simple heuristic for what to reflow: content should contain at least two
// characters and either the first or second character must be
@@ -425,7 +426,7 @@ BreakableBlockComment::BreakableBlockComment(
}
}
- DEBUG({
+ LLVM_DEBUG({
llvm::dbgs() << "IndentAtLineBreak " << IndentAtLineBreak << "\n";
llvm::dbgs() << "DelimitersOnNewline " << DelimitersOnNewline << "\n";
for (size_t i = 0; i < Lines.size(); ++i) {
@@ -788,16 +789,47 @@ BreakableComment::Split BreakableLineCommentSection::getReflowSplit(
void BreakableLineCommentSection::reflow(unsigned LineIndex,
WhitespaceManager &Whitespaces) const {
- // Reflow happens between tokens. Replace the whitespace between the
- // tokens by the empty string.
- Whitespaces.replaceWhitespace(
- *Tokens[LineIndex], /*Newlines=*/0, /*Spaces=*/0,
- /*StartOfTokenColumn=*/StartColumn, /*InPPDirective=*/false);
+ if (LineIndex > 0 && Tokens[LineIndex] != Tokens[LineIndex - 1]) {
+ // Reflow happens between tokens. Replace the whitespace between the
+ // tokens by the empty string.
+ Whitespaces.replaceWhitespace(
+ *Tokens[LineIndex], /*Newlines=*/0, /*Spaces=*/0,
+ /*StartOfTokenColumn=*/StartColumn, /*InPPDirective=*/false);
+ } else if (LineIndex > 0) {
+ // In case we're reflowing after the '\' in:
+ //
+ // // line comment \
+ // // line 2
+ //
+ // the reflow happens inside the single comment token (it is a single line
+ // comment with an unescaped newline).
+ // Replace the whitespace between the '\' and '//' with the empty string.
+ //
+ // Offset points to after the '\' relative to start of the token.
+ unsigned Offset = Lines[LineIndex - 1].data() +
+ Lines[LineIndex - 1].size() -
+ tokenAt(LineIndex - 1).TokenText.data();
+ // WhitespaceLength is the number of chars between the '\' and the '//' on
+ // the next line.
+ unsigned WhitespaceLength =
+ Lines[LineIndex].data() - tokenAt(LineIndex).TokenText.data() - Offset;
+ Whitespaces.replaceWhitespaceInToken(*Tokens[LineIndex],
+ Offset,
+ /*ReplaceChars=*/WhitespaceLength,
+ /*PreviousPostfix=*/"",
+ /*CurrentPrefix=*/"",
+ /*InPPDirective=*/false,
+ /*Newlines=*/0,
+ /*Spaces=*/0);
+
+ }
// Replace the indent and prefix of the token with the reflow prefix.
+ unsigned Offset =
+ Lines[LineIndex].data() - tokenAt(LineIndex).TokenText.data();
unsigned WhitespaceLength =
- Content[LineIndex].data() - tokenAt(LineIndex).TokenText.data();
+ Content[LineIndex].data() - Lines[LineIndex].data();
Whitespaces.replaceWhitespaceInToken(*Tokens[LineIndex],
- /*Offset=*/0,
+ Offset,
/*ReplaceChars=*/WhitespaceLength,
/*PreviousPostfix=*/"",
/*CurrentPrefix=*/ReflowPrefix,
diff --git a/lib/Format/BreakableToken.h b/lib/Format/BreakableToken.h
index 8ef26ef464da..0fac8f08c026 100644
--- a/lib/Format/BreakableToken.h
+++ b/lib/Format/BreakableToken.h
@@ -1,4 +1,4 @@
-//===--- BreakableToken.h - Format C++ code -------------------------------===//
+//===--- BreakableToken.h - Format C++ code ---------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -8,7 +8,7 @@
//===----------------------------------------------------------------------===//
///
/// \file
-/// \brief Declares BreakableToken, BreakableStringLiteral, BreakableComment,
+/// Declares BreakableToken, BreakableStringLiteral, BreakableComment,
/// BreakableBlockComment and BreakableLineCommentSection classes, that contain
/// token type-specific logic to break long lines in tokens and reflow content
/// between tokens.
@@ -27,13 +27,13 @@
namespace clang {
namespace format {
-/// \brief Checks if \p Token switches formatting, like /* clang-format off */.
+/// Checks if \p Token switches formatting, like /* clang-format off */.
/// \p Token must be a comment.
bool switchesFormatting(const FormatToken &Token);
struct FormatStyle;
-/// \brief Base class for tokens / ranges of tokens that can allow breaking
+/// Base class for tokens / ranges of tokens that can allow breaking
/// within the tokens - for example, to avoid whitespace beyond the column
/// limit, or to reflow text.
///
@@ -88,15 +88,15 @@ struct FormatStyle;
///
class BreakableToken {
public:
- /// \brief Contains starting character index and length of split.
+ /// Contains starting character index and length of split.
typedef std::pair<StringRef::size_type, unsigned> Split;
virtual ~BreakableToken() {}
- /// \brief Returns the number of lines in this token in the original code.
+ /// Returns the number of lines in this token in the original code.
virtual unsigned getLineCount() const = 0;
- /// \brief Returns the number of columns required to format the text in the
+ /// Returns the number of columns required to format the text in the
/// byte range [\p Offset, \p Offset \c + \p Length).
///
/// \p Offset is the byte offset from the start of the content of the line
@@ -108,7 +108,7 @@ public:
StringRef::size_type Length,
unsigned StartColumn) const = 0;
- /// \brief Returns the number of columns required to format the text following
+ /// Returns the number of columns required to format the text following
/// the byte \p Offset in the line \p LineIndex, including potentially
/// unbreakable sequences of tokens following after the end of the token.
///
@@ -125,7 +125,7 @@ public:
return getRangeLength(LineIndex, Offset, StringRef::npos, StartColumn);
}
- /// \brief Returns the column at which content in line \p LineIndex starts,
+ /// Returns the column at which content in line \p LineIndex starts,
/// assuming no reflow.
///
/// If \p Break is true, returns the column at which the line should start
@@ -135,7 +135,7 @@ public:
virtual unsigned getContentStartColumn(unsigned LineIndex,
bool Break) const = 0;
- /// \brief Returns a range (offset, length) at which to break the line at
+ /// Returns a range (offset, length) at which to break the line at
/// \p LineIndex, if previously broken at \p TailOffset. If possible, do not
/// violate \p ColumnLimit, assuming the text starting at \p TailOffset in
/// the token is formatted starting at ContentStartColumn in the reformatted
@@ -144,27 +144,27 @@ public:
unsigned ColumnLimit, unsigned ContentStartColumn,
llvm::Regex &CommentPragmasRegex) const = 0;
- /// \brief Emits the previously retrieved \p Split via \p Whitespaces.
+ /// Emits the previously retrieved \p Split via \p Whitespaces.
virtual void insertBreak(unsigned LineIndex, unsigned TailOffset, Split Split,
WhitespaceManager &Whitespaces) const = 0;
- /// \brief Returns the number of columns needed to format
+ /// Returns the number of columns needed to format
/// \p RemainingTokenColumns, assuming that Split is within the range measured
/// by \p RemainingTokenColumns, and that the whitespace in Split is reduced
/// to a single space.
unsigned getLengthAfterCompression(unsigned RemainingTokenColumns,
Split Split) const;
- /// \brief Replaces the whitespace range described by \p Split with a single
+ /// Replaces the whitespace range described by \p Split with a single
/// space.
virtual void compressWhitespace(unsigned LineIndex, unsigned TailOffset,
Split Split,
WhitespaceManager &Whitespaces) const = 0;
- /// \brief Returns whether the token supports reflowing text.
+ /// Returns whether the token supports reflowing text.
virtual bool supportsReflow() const { return false; }
- /// \brief Returns a whitespace range (offset, length) of the content at \p
+ /// Returns a whitespace range (offset, length) of the content at \p
/// LineIndex such that the content of that line is reflown to the end of the
/// previous one.
///
@@ -180,21 +180,21 @@ public:
return Split(StringRef::npos, 0);
}
- /// \brief Reflows the current line into the end of the previous one.
+ /// Reflows the current line into the end of the previous one.
virtual void reflow(unsigned LineIndex,
WhitespaceManager &Whitespaces) const {}
- /// \brief Returns whether there will be a line break at the start of the
+ /// Returns whether there will be a line break at the start of the
/// token.
virtual bool introducesBreakBeforeToken() const {
return false;
}
- /// \brief Replaces the whitespace between \p LineIndex-1 and \p LineIndex.
+ /// Replaces the whitespace between \p LineIndex-1 and \p LineIndex.
virtual void adaptStartOfLine(unsigned LineIndex,
WhitespaceManager &Whitespaces) const {}
- /// \brief Returns a whitespace range (offset, length) of the content at
+ /// Returns a whitespace range (offset, length) of the content at
/// the last line that needs to be reformatted after the last line has been
/// reformatted.
///
@@ -204,7 +204,7 @@ public:
return Split(StringRef::npos, 0);
}
- /// \brief Replaces the whitespace from \p SplitAfterLastLine on the last line
+ /// Replaces the whitespace from \p SplitAfterLastLine on the last line
/// after the last line has been formatted by performing a reformatting.
void replaceWhitespaceAfterLastLine(unsigned TailOffset,
Split SplitAfterLastLine,
@@ -213,7 +213,7 @@ public:
Whitespaces);
}
- /// \brief Updates the next token of \p State to the next token after this
+ /// Updates the next token of \p State to the next token after this
/// one. This can be used when this token manages a set of underlying tokens
/// as a unit and is responsible for the formatting of the them.
virtual void updateNextToken(LineState &State) const {}
@@ -232,17 +232,17 @@ protected:
class BreakableStringLiteral : public BreakableToken {
public:
- /// \brief Creates a breakable token for a single line string literal.
+ /// Creates a breakable token for a single line string literal.
///
/// \p StartColumn specifies the column in which the token will start
/// after formatting.
BreakableStringLiteral(const FormatToken &Tok, unsigned StartColumn,
StringRef Prefix, StringRef Postfix,
- bool InPPDirective, encoding::Encoding Encoding,
- const FormatStyle &Style);
+ unsigned UnbreakableTailLength, bool InPPDirective,
+ encoding::Encoding Encoding, const FormatStyle &Style);
Split getSplit(unsigned LineIndex, unsigned TailOffset, unsigned ColumnLimit,
- unsigned ReflowColumn,
+ unsigned ContentStartColumn,
llvm::Regex &CommentPragmasRegex) const override;
void insertBreak(unsigned LineIndex, unsigned TailOffset, Split Split,
WhitespaceManager &Whitespaces) const override;
@@ -272,7 +272,7 @@ protected:
class BreakableComment : public BreakableToken {
protected:
- /// \brief Creates a breakable token for a comment.
+ /// Creates a breakable token for a comment.
///
/// \p StartColumn specifies the column in which the comment will start after
/// formatting.
@@ -284,7 +284,7 @@ public:
bool supportsReflow() const override { return true; }
unsigned getLineCount() const override;
Split getSplit(unsigned LineIndex, unsigned TailOffset, unsigned ColumnLimit,
- unsigned ReflowColumn,
+ unsigned ContentStartColumn,
llvm::Regex &CommentPragmasRegex) const override;
void compressWhitespace(unsigned LineIndex, unsigned TailOffset, Split Split,
WhitespaceManager &Whitespaces) const override;
@@ -453,7 +453,7 @@ private:
SmallVector<unsigned, 16> OriginalContentColumn;
- /// \brief The token to which the last line of this breakable token belongs
+ /// The token to which the last line of this breakable token belongs
/// to; nullptr if that token is the initial token.
///
/// The distinction is because if the token of the last line of this breakable
diff --git a/lib/Format/CMakeLists.txt b/lib/Format/CMakeLists.txt
index 42e6d53d9fe6..0019d045cd06 100644
--- a/lib/Format/CMakeLists.txt
+++ b/lib/Format/CMakeLists.txt
@@ -20,4 +20,5 @@ add_clang_library(clangFormat
clangBasic
clangLex
clangToolingCore
+ clangToolingInclusions
)
diff --git a/lib/Format/ContinuationIndenter.cpp b/lib/Format/ContinuationIndenter.cpp
index a3d38b244c5c..90d2a9997111 100644
--- a/lib/Format/ContinuationIndenter.cpp
+++ b/lib/Format/ContinuationIndenter.cpp
@@ -8,7 +8,7 @@
//===----------------------------------------------------------------------===//
///
/// \file
-/// \brief This file implements the continuation indenter.
+/// This file implements the continuation indenter.
///
//===----------------------------------------------------------------------===//
@@ -26,14 +26,81 @@
namespace clang {
namespace format {
+// Returns true if a TT_SelectorName should be indented when wrapped,
+// false otherwise.
+static bool shouldIndentWrappedSelectorName(const FormatStyle &Style,
+ LineType LineType) {
+ return Style.IndentWrappedFunctionNames || LineType == LT_ObjCMethodDecl;
+}
+
// Returns the length of everything up to the first possible line break after
// the ), ], } or > matching \c Tok.
-static unsigned getLengthToMatchingParen(const FormatToken &Tok) {
+static unsigned getLengthToMatchingParen(const FormatToken &Tok,
+ const std::vector<ParenState> &Stack) {
+ // Normally whether or not a break before T is possible is calculated and
+ // stored in T.CanBreakBefore. Braces, array initializers and text proto
+ // messages like `key: < ... >` are an exception: a break is possible
+ // before a closing brace R if a break was inserted after the corresponding
+ // opening brace. The information about whether or not a break is needed
+ // before a closing brace R is stored in the ParenState field
+ // S.BreakBeforeClosingBrace where S is the state that R closes.
+ //
+ // In order to decide whether there can be a break before encountered right
+ // braces, this implementation iterates over the sequence of tokens and over
+ // the paren stack in lockstep, keeping track of the stack level which visited
+ // right braces correspond to in MatchingStackIndex.
+ //
+ // For example, consider:
+ // L. <- line number
+ // 1. {
+ // 2. {1},
+ // 3. {2},
+ // 4. {{3}}}
+ // ^ where we call this method with this token.
+ // The paren stack at this point contains 3 brace levels:
+ // 0. { at line 1, BreakBeforeClosingBrace: true
+ // 1. first { at line 4, BreakBeforeClosingBrace: false
+ // 2. second { at line 4, BreakBeforeClosingBrace: false,
+ // where there might be fake parens levels in-between these levels.
+ // The algorithm will start at the first } on line 4, which is the matching
+ // brace of the initial left brace and at level 2 of the stack. Then,
+ // examining BreakBeforeClosingBrace: false at level 2, it will continue to
+ // the second } on line 4, and will traverse the stack downwards until it
+ // finds the matching { on level 1. Then, examining BreakBeforeClosingBrace:
+ // false at level 1, it will continue to the third } on line 4 and will
+ // traverse the stack downwards until it finds the matching { on level 0.
+ // Then, examining BreakBeforeClosingBrace: true at level 0, the algorithm
+ // will stop and will use the second } on line 4 to determine the length to
+ // return, as in this example the range will include the tokens: {3}}
+ //
+ // The algorithm will only traverse the stack if it encounters braces, array
+ // initializer squares or text proto angle brackets.
if (!Tok.MatchingParen)
return 0;
FormatToken *End = Tok.MatchingParen;
- while (End->Next && !End->Next->CanBreakBefore) {
- End = End->Next;
+ // Maintains a stack level corresponding to the current End token.
+ int MatchingStackIndex = Stack.size() - 1;
+ // Traverses the stack downwards, looking for the level to which LBrace
+ // corresponds. Returns either a pointer to the matching level or nullptr if
+ // LParen is not found in the initial portion of the stack up to
+ // MatchingStackIndex.
+ auto FindParenState = [&](const FormatToken *LBrace) -> const ParenState * {
+ while (MatchingStackIndex >= 0 && Stack[MatchingStackIndex].Tok != LBrace)
+ --MatchingStackIndex;
+ return MatchingStackIndex >= 0 ? &Stack[MatchingStackIndex] : nullptr;
+ };
+ for (; End->Next; End = End->Next) {
+ if (End->Next->CanBreakBefore)
+ break;
+ if (!End->Next->closesScope())
+ continue;
+ if (End->Next->MatchingParen &&
+ End->Next->MatchingParen->isOneOf(
+ tok::l_brace, TT_ArrayInitializerLSquare, tok::less)) {
+ const ParenState *State = FindParenState(End->Next->MatchingParen);
+ if (State && State->BreakBeforeClosingBrace)
+ break;
+ }
}
return End->TotalLength - Tok.TotalLength + 1;
}
@@ -64,7 +131,7 @@ static bool startsNextParameter(const FormatToken &Current,
Style.BreakConstructorInitializers !=
FormatStyle::BCIS_BeforeComma) &&
(Previous.isNot(TT_InheritanceComma) ||
- !Style.BreakBeforeInheritanceComma));
+ Style.BreakInheritanceList != FormatStyle::BILS_BeforeComma));
}
static bool opensProtoMessageField(const FormatToken &LessTok,
@@ -102,28 +169,59 @@ static llvm::Optional<StringRef> getRawStringDelimiter(StringRef TokenText) {
return Delimiter;
}
+// Returns the canonical delimiter for \p Language, or the empty string if no
+// canonical delimiter is specified.
+static StringRef
+getCanonicalRawStringDelimiter(const FormatStyle &Style,
+ FormatStyle::LanguageKind Language) {
+ for (const auto &Format : Style.RawStringFormats) {
+ if (Format.Language == Language)
+ return StringRef(Format.CanonicalDelimiter);
+ }
+ return "";
+}
+
RawStringFormatStyleManager::RawStringFormatStyleManager(
const FormatStyle &CodeStyle) {
for (const auto &RawStringFormat : CodeStyle.RawStringFormats) {
- FormatStyle Style;
- if (!getPredefinedStyle(RawStringFormat.BasedOnStyle,
- RawStringFormat.Language, &Style)) {
- Style = getLLVMStyle();
- Style.Language = RawStringFormat.Language;
+ llvm::Optional<FormatStyle> LanguageStyle =
+ CodeStyle.GetLanguageStyle(RawStringFormat.Language);
+ if (!LanguageStyle) {
+ FormatStyle PredefinedStyle;
+ if (!getPredefinedStyle(RawStringFormat.BasedOnStyle,
+ RawStringFormat.Language, &PredefinedStyle)) {
+ PredefinedStyle = getLLVMStyle();
+ PredefinedStyle.Language = RawStringFormat.Language;
+ }
+ LanguageStyle = PredefinedStyle;
+ }
+ LanguageStyle->ColumnLimit = CodeStyle.ColumnLimit;
+ for (StringRef Delimiter : RawStringFormat.Delimiters) {
+ DelimiterStyle.insert({Delimiter, *LanguageStyle});
+ }
+ for (StringRef EnclosingFunction : RawStringFormat.EnclosingFunctions) {
+ EnclosingFunctionStyle.insert({EnclosingFunction, *LanguageStyle});
}
- Style.ColumnLimit = CodeStyle.ColumnLimit;
- DelimiterStyle.insert({RawStringFormat.Delimiter, Style});
}
}
llvm::Optional<FormatStyle>
-RawStringFormatStyleManager::get(StringRef Delimiter) const {
+RawStringFormatStyleManager::getDelimiterStyle(StringRef Delimiter) const {
auto It = DelimiterStyle.find(Delimiter);
if (It == DelimiterStyle.end())
return None;
return It->second;
}
+llvm::Optional<FormatStyle>
+RawStringFormatStyleManager::getEnclosingFunctionStyle(
+ StringRef EnclosingFunction) const {
+ auto It = EnclosingFunctionStyle.find(EnclosingFunction);
+ if (It == EnclosingFunctionStyle.end())
+ return None;
+ return It->second;
+}
+
ContinuationIndenter::ContinuationIndenter(const FormatStyle &Style,
const AdditionalKeywords &Keywords,
const SourceManager &SourceMgr,
@@ -154,7 +252,7 @@ LineState ContinuationIndenter::getInitialState(unsigned FirstIndent,
State.Column = 0;
State.Line = Line;
State.NextToken = Line->First;
- State.Stack.push_back(ParenState(FirstIndent, FirstIndent,
+ State.Stack.push_back(ParenState(/*Tok=*/nullptr, FirstIndent, FirstIndent,
/*AvoidBinPacking=*/false,
/*NoLineBreak=*/false));
State.LineContainsContinuedForLoopSection = false;
@@ -169,6 +267,7 @@ LineState ContinuationIndenter::getInitialState(unsigned FirstIndent,
// global scope.
State.Stack.back().AvoidBinPacking = true;
State.Stack.back().BreakBeforeParameter = true;
+ State.Stack.back().AlignColons = false;
}
// The first token has already been indented and thus consumed.
@@ -222,6 +321,9 @@ bool ContinuationIndenter::canBreak(const LineState &State) {
State.Stack.back().NoLineBreakInOperand)
return false;
+ if (Previous.is(tok::l_square) && Previous.is(TT_ObjCMethodExpr))
+ return false;
+
return !State.Stack.back().NoLineBreak;
}
@@ -235,6 +337,11 @@ bool ContinuationIndenter::mustBreak(const LineState &State) {
return true;
if (Previous.is(tok::semi) && State.LineContainsContinuedForLoopSection)
return true;
+ if (Style.Language == FormatStyle::LK_ObjC &&
+ Current.ObjCSelectorNameParts > 1 &&
+ Current.startsSequence(TT_SelectorName, tok::colon, tok::caret)) {
+ return true;
+ }
if ((startsNextParameter(Current, Style) || Previous.is(tok::semi) ||
(Previous.is(TT_TemplateCloser) && Current.is(TT_StartOfName) &&
Style.isCpp() &&
@@ -255,7 +362,7 @@ bool ContinuationIndenter::mustBreak(const LineState &State) {
Previous.ParameterCount > 1) ||
opensProtoMessageField(Previous, Style)) &&
Style.ColumnLimit > 0 &&
- getLengthToMatchingParen(Previous) + State.Column - 1 >
+ getLengthToMatchingParen(Previous, State.Stack) + State.Column - 1 >
getColumnLimit(State))
return true;
@@ -275,7 +382,8 @@ bool ContinuationIndenter::mustBreak(const LineState &State) {
if (Current.is(TT_ObjCMethodExpr) && !Previous.is(TT_SelectorName) &&
State.Line->startsWith(TT_ObjCMethodSpecifier))
return true;
- if (Current.is(TT_SelectorName) && State.Stack.back().ObjCSelectorNameFound &&
+ if (Current.is(TT_SelectorName) && !Previous.is(tok::at) &&
+ State.Stack.back().ObjCSelectorNameFound &&
State.Stack.back().BreakBeforeParameter)
return true;
@@ -298,6 +406,12 @@ bool ContinuationIndenter::mustBreak(const LineState &State) {
Style.Language == FormatStyle::LK_JavaScript))
return true;
+ // If the template declaration spans multiple lines, force wrap before the
+ // function/class declaration
+ if (Previous.ClosesTemplateDeclaration &&
+ State.Stack.back().BreakBeforeParameter && Current.CanBreakBefore)
+ return true;
+
if (State.Column <= NewLineColumn)
return false;
@@ -349,7 +463,7 @@ bool ContinuationIndenter::mustBreak(const LineState &State) {
// for cases where the entire line does not fit on a single line as a
// different LineFormatter would be used otherwise.
if (Previous.ClosesTemplateDeclaration)
- return true;
+ return Style.AlwaysBreakTemplateDeclarations != FormatStyle::BTDS_No;
if (Previous.is(TT_FunctionAnnotationRParen))
return true;
if (Previous.is(TT_LeadingJavaAnnotation) && Current.isNot(tok::l_paren) &&
@@ -466,7 +580,11 @@ void ContinuationIndenter::addTokenOnCurrentLine(LineState &State, bool DryRun,
// If "BreakBeforeInheritanceComma" mode, don't break within the inheritance
// declaration unless there is multiple inheritance.
- if (Style.BreakBeforeInheritanceComma && Current.is(TT_InheritanceColon))
+ if (Style.BreakInheritanceList == FormatStyle::BILS_BeforeComma &&
+ Current.is(TT_InheritanceColon))
+ State.Stack.back().NoLineBreak = true;
+ if (Style.BreakInheritanceList == FormatStyle::BILS_AfterColon &&
+ Previous.is(TT_InheritanceColon))
State.Stack.back().NoLineBreak = true;
if (Current.is(TT_SelectorName) &&
@@ -661,11 +779,12 @@ unsigned ContinuationIndenter::addTokenOnNewLine(LineState &State,
State.Stack.back().AlignColons = false;
} else {
State.Stack.back().ColonPos =
- (Style.IndentWrappedFunctionNames
+ (shouldIndentWrappedSelectorName(Style, State.Line->Type)
? std::max(State.Stack.back().Indent,
State.FirstIndent + Style.ContinuationIndentWidth)
: State.Stack.back().Indent) +
- NextNonComment->LongestObjCSelectorName;
+ std::max(NextNonComment->LongestObjCSelectorName,
+ NextNonComment->ColumnWidth);
}
} else if (State.Stack.back().AlignColons &&
State.Stack.back().ColonPos <= NextNonComment->ColumnWidth) {
@@ -693,7 +812,8 @@ unsigned ContinuationIndenter::addTokenOnNewLine(LineState &State,
!State.Stack.back().AvoidBinPacking) ||
Previous.is(TT_BinaryOperator))
State.Stack.back().BreakBeforeParameter = false;
- if (Previous.isOneOf(TT_TemplateCloser, TT_JavaAnnotation) &&
+ if (PreviousNonComment &&
+ PreviousNonComment->isOneOf(TT_TemplateCloser, TT_JavaAnnotation) &&
Current.NestingLevel == 0)
State.Stack.back().BreakBeforeParameter = false;
if (NextNonComment->is(tok::question) ||
@@ -826,7 +946,7 @@ unsigned ContinuationIndenter::getNewLineColumn(const LineState &State) {
(Current.Next->is(TT_DictLiteral) ||
((Style.Language == FormatStyle::LK_Proto ||
Style.Language == FormatStyle::LK_TextProto) &&
- Current.Next->isOneOf(TT_TemplateOpener, tok::l_brace))))
+ Current.Next->isOneOf(tok::less, tok::l_brace))))
return State.Stack.back().Indent;
if (NextNonComment->is(TT_ObjCStringLiteral) &&
State.StartOfStringLiteral != 0)
@@ -851,20 +971,29 @@ unsigned ContinuationIndenter::getNewLineColumn(const LineState &State) {
if ((PreviousNonComment &&
(PreviousNonComment->ClosesTemplateDeclaration ||
PreviousNonComment->isOneOf(
- TT_AttributeParen, TT_FunctionAnnotationRParen, TT_JavaAnnotation,
- TT_LeadingJavaAnnotation))) ||
+ TT_AttributeParen, TT_AttributeSquare, TT_FunctionAnnotationRParen,
+ TT_JavaAnnotation, TT_LeadingJavaAnnotation))) ||
(!Style.IndentWrappedFunctionNames &&
NextNonComment->isOneOf(tok::kw_operator, TT_FunctionDeclarationName)))
return std::max(State.Stack.back().LastSpace, State.Stack.back().Indent);
if (NextNonComment->is(TT_SelectorName)) {
if (!State.Stack.back().ObjCSelectorNameFound) {
- if (NextNonComment->LongestObjCSelectorName == 0)
- return State.Stack.back().Indent;
- return (Style.IndentWrappedFunctionNames
- ? std::max(State.Stack.back().Indent,
- State.FirstIndent + Style.ContinuationIndentWidth)
- : State.Stack.back().Indent) +
- NextNonComment->LongestObjCSelectorName -
+ unsigned MinIndent = State.Stack.back().Indent;
+ if (shouldIndentWrappedSelectorName(Style, State.Line->Type))
+ MinIndent = std::max(MinIndent,
+ State.FirstIndent + Style.ContinuationIndentWidth);
+ // If LongestObjCSelectorName is 0, we are indenting the first
+ // part of an ObjC selector (or a selector component which is
+ // not colon-aligned due to block formatting).
+ //
+ // Otherwise, we are indenting a subsequent part of an ObjC
+ // selector which should be colon-aligned to the longest
+ // component of the ObjC selector.
+ //
+ // In either case, we want to respect Style.IndentWrappedFunctionNames.
+ return MinIndent +
+ std::max(NextNonComment->LongestObjCSelectorName,
+ NextNonComment->ColumnWidth) -
NextNonComment->ColumnWidth;
}
if (!State.Stack.back().AlignColons)
@@ -898,12 +1027,17 @@ unsigned ContinuationIndenter::getNewLineColumn(const LineState &State) {
if (PreviousNonComment && PreviousNonComment->is(TT_CtorInitializerColon) &&
Style.BreakConstructorInitializers == FormatStyle::BCIS_AfterColon)
return State.Stack.back().Indent;
+ if (PreviousNonComment && PreviousNonComment->is(TT_InheritanceColon) &&
+ Style.BreakInheritanceList == FormatStyle::BILS_AfterColon)
+ return State.Stack.back().Indent;
if (NextNonComment->isOneOf(TT_CtorInitializerColon, TT_InheritanceColon,
TT_InheritanceComma))
return State.FirstIndent + Style.ConstructorInitializerIndentWidth;
if (Previous.is(tok::r_paren) && !Current.isBinaryOperator() &&
!Current.isOneOf(tok::colon, tok::comment))
return ContinuationIndent;
+ if (Current.is(TT_ProtoExtensionLSquare))
+ return State.Stack.back().Indent;
if (State.Stack.back().Indent == State.FirstIndent && PreviousNonComment &&
PreviousNonComment->isNot(tok::r_brace))
// Ensure that we fall back to the continuation indent width instead of
@@ -951,13 +1085,8 @@ unsigned ContinuationIndenter::moveStateToNextToken(LineState &State,
if (Current.isMemberAccess())
State.Stack.back().StartOfFunctionCall =
!Current.NextOperator ? 0 : State.Column;
- if (Current.is(TT_SelectorName)) {
+ if (Current.is(TT_SelectorName))
State.Stack.back().ObjCSelectorNameFound = true;
- if (Style.IndentWrappedFunctionNames) {
- State.Stack.back().Indent =
- State.FirstIndent + Style.ContinuationIndentWidth;
- }
- }
if (Current.is(TT_CtorInitializerColon) &&
Style.BreakConstructorInitializers != FormatStyle::BCIS_AfterColon) {
// Indent 2 from the column, so:
@@ -985,7 +1114,7 @@ unsigned ContinuationIndenter::moveStateToNextToken(LineState &State,
}
if (Current.is(TT_InheritanceColon))
State.Stack.back().Indent =
- State.FirstIndent + Style.ContinuationIndentWidth;
+ State.FirstIndent + Style.ConstructorInitializerIndentWidth;
if (Current.isOneOf(TT_BinaryOperator, TT_ConditionalExpr) && Newline)
State.Stack.back().NestedBlockIndent =
State.Column + Current.ColumnWidth + 1;
@@ -1071,6 +1200,7 @@ void ContinuationIndenter::moveStatePastFakeLParens(LineState &State,
E = Current.FakeLParens.rend();
I != E; ++I) {
ParenState NewParenState = State.Stack.back();
+ NewParenState.Tok = nullptr;
NewParenState.ContainsLineBreak = false;
NewParenState.LastOperatorWrapped = true;
NewParenState.NoLineBreak =
@@ -1180,7 +1310,6 @@ void ContinuationIndenter::moveStatePastScopeOpener(LineState &State,
// void SomeFunction(vector< // break
// int> v);
// FIXME: We likely want to do this for more combinations of brackets.
- // Verify that it is wanted for ObjC, too.
if (Current.is(tok::less) && Current.ParentBracket == tok::l_paren) {
NewIndent = std::max(NewIndent, State.Stack.back().Indent);
LastSpace = std::max(LastSpace, State.Stack.back().Indent);
@@ -1191,9 +1320,20 @@ void ContinuationIndenter::moveStatePastScopeOpener(LineState &State,
Current.MatchingParen->getPreviousNonComment() &&
Current.MatchingParen->getPreviousNonComment()->is(tok::comma);
+ // If ObjCBinPackProtocolList is unspecified, fall back to BinPackParameters
+ // for backwards compatibility.
+ bool ObjCBinPackProtocolList =
+ (Style.ObjCBinPackProtocolList == FormatStyle::BPS_Auto &&
+ Style.BinPackParameters) ||
+ Style.ObjCBinPackProtocolList == FormatStyle::BPS_Always;
+
+ bool BinPackDeclaration =
+ (State.Line->Type != LT_ObjCDecl && Style.BinPackParameters) ||
+ (State.Line->Type == LT_ObjCDecl && ObjCBinPackProtocolList);
+
AvoidBinPacking =
(Style.Language == FormatStyle::LK_JavaScript && EndsInComma) ||
- (State.Line->MustBeDeclaration && !Style.BinPackParameters) ||
+ (State.Line->MustBeDeclaration && !BinPackDeclaration) ||
(!State.Line->MustBeDeclaration && !Style.BinPackArguments) ||
(Style.ExperimentalAutoDetectBinPacking &&
(Current.PackingKind == PPK_OnePerLine ||
@@ -1204,7 +1344,7 @@ void ContinuationIndenter::moveStatePastScopeOpener(LineState &State,
if (Style.ColumnLimit) {
// If this '[' opens an ObjC call, determine whether all parameters fit
// into one line and put one per line if they don't.
- if (getLengthToMatchingParen(Current) + State.Column >
+ if (getLengthToMatchingParen(Current, State.Stack) + State.Column >
getColumnLimit(State))
BreakBeforeParameter = true;
} else {
@@ -1235,10 +1375,13 @@ void ContinuationIndenter::moveStatePastScopeOpener(LineState &State,
(Current.is(TT_TemplateOpener) &&
State.Stack.back().ContainsUnwrappedBuilder));
State.Stack.push_back(
- ParenState(NewIndent, LastSpace, AvoidBinPacking, NoLineBreak));
+ ParenState(&Current, NewIndent, LastSpace, AvoidBinPacking, NoLineBreak));
State.Stack.back().NestedBlockIndent = NestedBlockIndent;
State.Stack.back().BreakBeforeParameter = BreakBeforeParameter;
State.Stack.back().HasMultipleNestedBlocks = Current.BlockParameterCount > 1;
+ State.Stack.back().IsInsideObjCArrayLiteral =
+ Current.is(TT_ArrayInitializerLSquare) && Current.Previous &&
+ Current.Previous->is(tok::at);
}
void ContinuationIndenter::moveStatePastScopeCloser(LineState &State) {
@@ -1251,9 +1394,34 @@ void ContinuationIndenter::moveStatePastScopeCloser(LineState &State) {
if (State.Stack.size() > 1 &&
(Current.isOneOf(tok::r_paren, tok::r_square, TT_TemplateString) ||
(Current.is(tok::r_brace) && State.NextToken != State.Line->First) ||
- State.NextToken->is(TT_TemplateCloser)))
+ State.NextToken->is(TT_TemplateCloser) ||
+ (Current.is(tok::greater) && Current.is(TT_DictLiteral))))
State.Stack.pop_back();
+ // Reevaluate whether ObjC message arguments fit into one line.
+ // If a receiver spans multiple lines, e.g.:
+ // [[object block:^{
+ // return 42;
+ // }] a:42 b:42];
+ // BreakBeforeParameter is calculated based on an incorrect assumption
+ // (it is checked whether the whole expression fits into one line without
+ // considering a line break inside a message receiver).
+ // We check whether arguements fit after receiver scope closer (into the same
+ // line).
+ if (State.Stack.back().BreakBeforeParameter && Current.MatchingParen &&
+ Current.MatchingParen->Previous) {
+ const FormatToken &CurrentScopeOpener = *Current.MatchingParen->Previous;
+ if (CurrentScopeOpener.is(TT_ObjCMethodExpr) &&
+ CurrentScopeOpener.MatchingParen) {
+ int NecessarySpaceInLine =
+ getLengthToMatchingParen(CurrentScopeOpener, State.Stack) +
+ CurrentScopeOpener.TotalLength - Current.TotalLength - 1;
+ if (State.Column + Current.ColumnWidth + NecessarySpaceInLine <=
+ Style.ColumnLimit)
+ State.Stack.back().BreakBeforeParameter = false;
+ }
+ }
+
if (Current.is(tok::r_square)) {
// If this ends the array subscript expr, reset the corresponding value.
const FormatToken *NextNonComment = Current.getNextNonComment();
@@ -1269,7 +1437,8 @@ void ContinuationIndenter::moveStateToNewBlock(LineState &State) {
NestedBlockIndent + (State.NextToken->is(TT_ObjCBlockLBrace)
? Style.ObjCBlockIndentWidth
: Style.IndentWidth);
- State.Stack.push_back(ParenState(NewIndent, State.Stack.back().LastSpace,
+ State.Stack.push_back(ParenState(State.NextToken, NewIndent,
+ State.Stack.back().LastSpace,
/*AvoidBinPacking=*/true,
/*NoLineBreak=*/false));
State.Stack.back().NestedBlockIndent = NestedBlockIndent;
@@ -1293,14 +1462,32 @@ unsigned ContinuationIndenter::reformatRawStringLiteral(
const FormatToken &Current, LineState &State,
const FormatStyle &RawStringStyle, bool DryRun) {
unsigned StartColumn = State.Column - Current.ColumnWidth;
- auto Delimiter = *getRawStringDelimiter(Current.TokenText);
+ StringRef OldDelimiter = *getRawStringDelimiter(Current.TokenText);
+ StringRef NewDelimiter =
+ getCanonicalRawStringDelimiter(Style, RawStringStyle.Language);
+ if (NewDelimiter.empty() || OldDelimiter.empty())
+ NewDelimiter = OldDelimiter;
// The text of a raw string is between the leading 'R"delimiter(' and the
// trailing 'delimiter)"'.
- unsigned PrefixSize = 3 + Delimiter.size();
- unsigned SuffixSize = 2 + Delimiter.size();
+ unsigned OldPrefixSize = 3 + OldDelimiter.size();
+ unsigned OldSuffixSize = 2 + OldDelimiter.size();
+ // We create a virtual text environment which expects a null-terminated
+ // string, so we cannot use StringRef.
+ std::string RawText =
+ Current.TokenText.substr(OldPrefixSize).drop_back(OldSuffixSize);
+ if (NewDelimiter != OldDelimiter) {
+ // Don't update to the canonical delimiter 'deli' if ')deli"' occurs in the
+ // raw string.
+ std::string CanonicalDelimiterSuffix = (")" + NewDelimiter + "\"").str();
+ if (StringRef(RawText).contains(CanonicalDelimiterSuffix))
+ NewDelimiter = OldDelimiter;
+ }
+
+ unsigned NewPrefixSize = 3 + NewDelimiter.size();
+ unsigned NewSuffixSize = 2 + NewDelimiter.size();
- // The first start column is the column the raw text starts.
- unsigned FirstStartColumn = StartColumn + PrefixSize;
+ // The first start column is the column the raw text starts after formatting.
+ unsigned FirstStartColumn = StartColumn + NewPrefixSize;
// The next start column is the intended indentation a line break inside
// the raw string at level 0. It is determined by the following rules:
@@ -1311,10 +1498,11 @@ unsigned ContinuationIndenter::reformatRawStringLiteral(
// These rules have the advantage that the formatted content both does not
// violate the rectangle rule and visually flows within the surrounding
// source.
- bool ContentStartsOnNewline = Current.TokenText[PrefixSize] == '\n';
- unsigned NextStartColumn = ContentStartsOnNewline
- ? State.Stack.back().Indent + Style.IndentWidth
- : FirstStartColumn;
+ bool ContentStartsOnNewline = Current.TokenText[OldPrefixSize] == '\n';
+ unsigned NextStartColumn =
+ ContentStartsOnNewline
+ ? State.Stack.back().NestedBlockIndent + Style.IndentWidth
+ : FirstStartColumn;
// The last start column is the column the raw string suffix starts if it is
// put on a newline.
@@ -1325,11 +1513,8 @@ unsigned ContinuationIndenter::reformatRawStringLiteral(
// - if the raw string prefix does not start on a newline, it is the current
// indent.
unsigned LastStartColumn = Current.NewlinesBefore
- ? FirstStartColumn - PrefixSize
- : State.Stack.back().Indent;
-
- std::string RawText =
- Current.TokenText.substr(PrefixSize).drop_back(SuffixSize);
+ ? FirstStartColumn - NewPrefixSize
+ : State.Stack.back().NestedBlockIndent;
std::pair<tooling::Replacements, unsigned> Fixes = internal::reformat(
RawStringStyle, RawText, {tooling::Range(0, RawText.size())},
@@ -1343,8 +1528,33 @@ unsigned ContinuationIndenter::reformatRawStringLiteral(
return 0;
}
if (!DryRun) {
+ if (NewDelimiter != OldDelimiter) {
+ // In 'R"delimiter(...', the delimiter starts 2 characters after the start
+ // of the token.
+ SourceLocation PrefixDelimiterStart =
+ Current.Tok.getLocation().getLocWithOffset(2);
+ auto PrefixErr = Whitespaces.addReplacement(tooling::Replacement(
+ SourceMgr, PrefixDelimiterStart, OldDelimiter.size(), NewDelimiter));
+ if (PrefixErr) {
+ llvm::errs()
+ << "Failed to update the prefix delimiter of a raw string: "
+ << llvm::toString(std::move(PrefixErr)) << "\n";
+ }
+ // In 'R"delimiter(...)delimiter"', the suffix delimiter starts at
+ // position length - 1 - |delimiter|.
+ SourceLocation SuffixDelimiterStart =
+ Current.Tok.getLocation().getLocWithOffset(Current.TokenText.size() -
+ 1 - OldDelimiter.size());
+ auto SuffixErr = Whitespaces.addReplacement(tooling::Replacement(
+ SourceMgr, SuffixDelimiterStart, OldDelimiter.size(), NewDelimiter));
+ if (SuffixErr) {
+ llvm::errs()
+ << "Failed to update the suffix delimiter of a raw string: "
+ << llvm::toString(std::move(SuffixErr)) << "\n";
+ }
+ }
SourceLocation OriginLoc =
- Current.Tok.getLocation().getLocWithOffset(PrefixSize);
+ Current.Tok.getLocation().getLocWithOffset(OldPrefixSize);
for (const tooling::Replacement &Fix : Fixes.first) {
auto Err = Whitespaces.addReplacement(tooling::Replacement(
SourceMgr, OriginLoc.getLocWithOffset(Fix.getOffset()),
@@ -1357,8 +1567,14 @@ unsigned ContinuationIndenter::reformatRawStringLiteral(
}
unsigned RawLastLineEndColumn = getLastLineEndColumn(
*NewCode, FirstStartColumn, Style.TabWidth, Encoding);
- State.Column = RawLastLineEndColumn + SuffixSize;
- return Fixes.second;
+ State.Column = RawLastLineEndColumn + NewSuffixSize;
+ // Since we're updating the column to after the raw string literal here, we
+ // have to manually add the penalty for the prefix R"delim( over the column
+ // limit.
+ unsigned PrefixExcessCharacters =
+ StartColumn + NewPrefixSize > Style.ColumnLimit ?
+ StartColumn + NewPrefixSize - Style.ColumnLimit : 0;
+ return Fixes.second + PrefixExcessCharacters * Style.PenaltyExcessCharacter;
}
unsigned ContinuationIndenter::addMultilineToken(const FormatToken &Current,
@@ -1384,7 +1600,7 @@ unsigned ContinuationIndenter::handleEndOfLine(const FormatToken &Current,
// Compute the raw string style to use in case this is a raw string literal
// that can be reformatted.
auto RawStringStyle = getRawStringStyle(Current, State);
- if (RawStringStyle) {
+ if (RawStringStyle && !Current.Finalized) {
Penalty = reformatRawStringLiteral(Current, State, *RawStringStyle, DryRun);
} else if (Current.IsMultiline && Current.isNot(TT_BlockComment)) {
// Don't break multi-line tokens other than block comments and raw string
@@ -1430,6 +1646,26 @@ unsigned ContinuationIndenter::handleEndOfLine(const FormatToken &Current,
return Penalty;
}
+// Returns the enclosing function name of a token, or the empty string if not
+// found.
+static StringRef getEnclosingFunctionName(const FormatToken &Current) {
+ // Look for: 'function(' or 'function<templates>(' before Current.
+ auto Tok = Current.getPreviousNonComment();
+ if (!Tok || !Tok->is(tok::l_paren))
+ return "";
+ Tok = Tok->getPreviousNonComment();
+ if (!Tok)
+ return "";
+ if (Tok->is(TT_TemplateCloser)) {
+ Tok = Tok->MatchingParen;
+ if (Tok)
+ Tok = Tok->getPreviousNonComment();
+ }
+ if (!Tok || !Tok->is(tok::identifier))
+ return "";
+ return Tok->TokenText;
+}
+
llvm::Optional<FormatStyle>
ContinuationIndenter::getRawStringStyle(const FormatToken &Current,
const LineState &State) {
@@ -1438,7 +1674,10 @@ ContinuationIndenter::getRawStringStyle(const FormatToken &Current,
auto Delimiter = getRawStringDelimiter(Current.TokenText);
if (!Delimiter)
return None;
- auto RawStringStyle = RawStringFormats.get(*Delimiter);
+ auto RawStringStyle = RawStringFormats.getDelimiterStyle(*Delimiter);
+ if (!RawStringStyle && Delimiter->empty())
+ RawStringStyle = RawStringFormats.getEnclosingFunctionStyle(
+ getEnclosingFunctionName(Current));
if (!RawStringStyle)
return None;
RawStringStyle->ColumnLimit = getColumnLimit(State);
@@ -1468,6 +1707,11 @@ std::unique_ptr<BreakableToken> ContinuationIndenter::createBreakableToken(
// likely want to terminate the string before any line breaking is done.
if (Current.IsUnterminatedLiteral)
return nullptr;
+ // Don't break string literals inside Objective-C array literals (doing so
+ // raises the warning -Wobjc-string-concatenation).
+ if (State.Stack.back().IsInsideObjCArrayLiteral) {
+ return nullptr;
+ }
StringRef Text = Current.TokenText;
StringRef Prefix;
@@ -1482,9 +1726,16 @@ std::unique_ptr<BreakableToken> ContinuationIndenter::createBreakableToken(
Text.startswith(Prefix = "u8\"") ||
Text.startswith(Prefix = "L\""))) ||
(Text.startswith(Prefix = "_T(\"") && Text.endswith(Postfix = "\")"))) {
+ // We need this to address the case where there is an unbreakable tail
+ // only if certain other formatting decisions have been taken. The
+ // UnbreakableTailLength of Current is an overapproximation is that case
+ // and we need to be correct here.
+ unsigned UnbreakableTailLength = (State.NextToken && canBreak(State))
+ ? 0
+ : Current.UnbreakableTailLength;
return llvm::make_unique<BreakableStringLiteral>(
- Current, StartColumn, Prefix, Postfix, State.Line->InPPDirective,
- Encoding, Style);
+ Current, StartColumn, Prefix, Postfix, UnbreakableTailLength,
+ State.Line->InPPDirective, Encoding, Style);
}
} else if (Current.is(TT_BlockComment)) {
if (!Style.ReflowComments ||
@@ -1559,12 +1810,12 @@ ContinuationIndenter::breakProtrudingToken(const FormatToken &Current,
Token->adaptStartOfLine(0, Whitespaces);
unsigned Penalty = 0;
- DEBUG(llvm::dbgs() << "Breaking protruding token at column " << StartColumn
- << ".\n");
+ LLVM_DEBUG(llvm::dbgs() << "Breaking protruding token at column "
+ << StartColumn << ".\n");
for (unsigned LineIndex = 0, EndIndex = Token->getLineCount();
LineIndex != EndIndex; ++LineIndex) {
- DEBUG(llvm::dbgs() << " Line: " << LineIndex << " (Reflow: " << Reflow
- << ")\n");
+ LLVM_DEBUG(llvm::dbgs()
+ << " Line: " << LineIndex << " (Reflow: " << Reflow << ")\n");
NewBreakBefore = false;
// If we did reflow the previous line, we'll try reflowing again. Otherwise
// we'll start reflowing if the current line is broken or whitespace is
@@ -1572,11 +1823,11 @@ ContinuationIndenter::breakProtrudingToken(const FormatToken &Current,
bool TryReflow = Reflow;
// Break the current token until we can fit the rest of the line.
while (ContentStartColumn + RemainingTokenColumns > ColumnLimit) {
- DEBUG(llvm::dbgs() << " Over limit, need: "
- << (ContentStartColumn + RemainingTokenColumns)
- << ", space: " << ColumnLimit
- << ", reflown prefix: " << ContentStartColumn
- << ", offset in line: " << TailOffset << "\n");
+ LLVM_DEBUG(llvm::dbgs() << " Over limit, need: "
+ << (ContentStartColumn + RemainingTokenColumns)
+ << ", space: " << ColumnLimit
+ << ", reflown prefix: " << ContentStartColumn
+ << ", offset in line: " << TailOffset << "\n");
// If the current token doesn't fit, find the latest possible split in the
// current line so that breaking at it will be under the column limit.
// FIXME: Use the earliest possible split while reflowing to correctly
@@ -1591,7 +1842,7 @@ ContinuationIndenter::breakProtrudingToken(const FormatToken &Current,
// The last line's penalty is handled in addNextStateToQueue().
Penalty += Style.PenaltyExcessCharacter *
(ContentStartColumn + RemainingTokenColumns - ColumnLimit);
- DEBUG(llvm::dbgs() << " No break opportunity.\n");
+ LLVM_DEBUG(llvm::dbgs() << " No break opportunity.\n");
break;
}
assert(Split.first != 0);
@@ -1618,7 +1869,7 @@ ContinuationIndenter::breakProtrudingToken(const FormatToken &Current,
// ^--------------- to next split columns
unsigned ToSplitColumns = Token->getRangeLength(
LineIndex, TailOffset, Split.first, ContentStartColumn);
- DEBUG(llvm::dbgs() << " ToSplit: " << ToSplitColumns << "\n");
+ LLVM_DEBUG(llvm::dbgs() << " ToSplit: " << ToSplitColumns << "\n");
BreakableToken::Split NextSplit = Token->getSplit(
LineIndex, TailOffset + Split.first + Split.second, ColumnLimit,
@@ -1638,9 +1889,10 @@ ContinuationIndenter::breakProtrudingToken(const FormatToken &Current,
// unbreakable sequence.
ToNextSplitColumns =
Token->getLengthAfterCompression(ToNextSplitColumns, Split);
- DEBUG(llvm::dbgs() << " ContentStartColumn: " << ContentStartColumn
- << "\n");
- DEBUG(llvm::dbgs() << " ToNextSplit: " << ToNextSplitColumns << "\n");
+ LLVM_DEBUG(llvm::dbgs()
+ << " ContentStartColumn: " << ContentStartColumn << "\n");
+ LLVM_DEBUG(llvm::dbgs()
+ << " ToNextSplit: " << ToNextSplitColumns << "\n");
// If the whitespace compression makes us fit, continue on the current
// line.
bool ContinueOnLine =
@@ -1652,16 +1904,16 @@ ContinuationIndenter::breakProtrudingToken(const FormatToken &Current,
ExcessCharactersPenalty =
(ContentStartColumn + ToNextSplitColumns - ColumnLimit) *
Style.PenaltyExcessCharacter;
- DEBUG(llvm::dbgs()
- << " Penalty excess: " << ExcessCharactersPenalty
- << "\n break : " << NewBreakPenalty << "\n");
+ LLVM_DEBUG(llvm::dbgs()
+ << " Penalty excess: " << ExcessCharactersPenalty
+ << "\n break : " << NewBreakPenalty << "\n");
if (ExcessCharactersPenalty < NewBreakPenalty) {
Exceeded = true;
ContinueOnLine = true;
}
}
if (ContinueOnLine) {
- DEBUG(llvm::dbgs() << " Continuing on line...\n");
+ LLVM_DEBUG(llvm::dbgs() << " Continuing on line...\n");
// The current line fits after compressing the whitespace - reflow
// the next line into it if possible.
TryReflow = true;
@@ -1677,7 +1929,7 @@ ContinuationIndenter::breakProtrudingToken(const FormatToken &Current,
continue;
}
}
- DEBUG(llvm::dbgs() << " Breaking...\n");
+ LLVM_DEBUG(llvm::dbgs() << " Breaking...\n");
ContentStartColumn =
Token->getContentStartColumn(LineIndex, /*Break=*/true);
unsigned NewRemainingTokenColumns = Token->getRemainingLength(
@@ -1693,8 +1945,8 @@ ContinuationIndenter::breakProtrudingToken(const FormatToken &Current,
}
assert(NewRemainingTokenColumns < RemainingTokenColumns);
- DEBUG(llvm::dbgs() << " Breaking at: " << TailOffset + Split.first
- << ", " << Split.second << "\n");
+ LLVM_DEBUG(llvm::dbgs() << " Breaking at: " << TailOffset + Split.first
+ << ", " << Split.second << "\n");
if (!DryRun)
Token->insertBreak(LineIndex, TailOffset, Split, Whitespaces);
@@ -1732,11 +1984,12 @@ ContinuationIndenter::breakProtrudingToken(const FormatToken &Current,
// the next logical line.
BreakableToken::Split SplitBeforeNext =
Token->getReflowSplit(NextLineIndex, CommentPragmasRegex);
- DEBUG(llvm::dbgs() << " Size of reflown text: " << ContentStartColumn
- << "\n Potential reflow split: ");
+ LLVM_DEBUG(llvm::dbgs()
+ << " Size of reflown text: " << ContentStartColumn
+ << "\n Potential reflow split: ");
if (SplitBeforeNext.first != StringRef::npos) {
- DEBUG(llvm::dbgs() << SplitBeforeNext.first << ", "
- << SplitBeforeNext.second << "\n");
+ LLVM_DEBUG(llvm::dbgs() << SplitBeforeNext.first << ", "
+ << SplitBeforeNext.second << "\n");
TailOffset = SplitBeforeNext.first + SplitBeforeNext.second;
// If the rest of the next line fits into the current line below the
// column limit, we can safely reflow.
@@ -1744,11 +1997,12 @@ ContinuationIndenter::breakProtrudingToken(const FormatToken &Current,
NextLineIndex, TailOffset, ContentStartColumn);
Reflow = true;
if (ContentStartColumn + RemainingTokenColumns > ColumnLimit) {
- DEBUG(llvm::dbgs() << " Over limit after reflow, need: "
- << (ContentStartColumn + RemainingTokenColumns)
- << ", space: " << ColumnLimit
- << ", reflown prefix: " << ContentStartColumn
- << ", offset in line: " << TailOffset << "\n");
+ LLVM_DEBUG(llvm::dbgs()
+ << " Over limit after reflow, need: "
+ << (ContentStartColumn + RemainingTokenColumns)
+ << ", space: " << ColumnLimit
+ << ", reflown prefix: " << ContentStartColumn
+ << ", offset in line: " << TailOffset << "\n");
// If the whole next line does not fit, try to find a point in
// the next line at which we can break so that attaching the part
// of the next line to that break point onto the current line is
@@ -1757,7 +2011,7 @@ ContinuationIndenter::breakProtrudingToken(const FormatToken &Current,
Token->getSplit(NextLineIndex, TailOffset, ColumnLimit,
ContentStartColumn, CommentPragmasRegex);
if (Split.first == StringRef::npos) {
- DEBUG(llvm::dbgs() << " Did not find later break\n");
+ LLVM_DEBUG(llvm::dbgs() << " Did not find later break\n");
Reflow = false;
} else {
// Check whether the first split point gets us below the column
@@ -1766,9 +2020,9 @@ ContinuationIndenter::breakProtrudingToken(const FormatToken &Current,
unsigned ToSplitColumns = Token->getRangeLength(
NextLineIndex, TailOffset, Split.first, ContentStartColumn);
if (ContentStartColumn + ToSplitColumns > ColumnLimit) {
- DEBUG(llvm::dbgs() << " Next split protrudes, need: "
- << (ContentStartColumn + ToSplitColumns)
- << ", space: " << ColumnLimit);
+ LLVM_DEBUG(llvm::dbgs() << " Next split protrudes, need: "
+ << (ContentStartColumn + ToSplitColumns)
+ << ", space: " << ColumnLimit);
unsigned ExcessCharactersPenalty =
(ContentStartColumn + ToSplitColumns - ColumnLimit) *
Style.PenaltyExcessCharacter;
@@ -1779,7 +2033,7 @@ ContinuationIndenter::breakProtrudingToken(const FormatToken &Current,
}
}
} else {
- DEBUG(llvm::dbgs() << "not found.\n");
+ LLVM_DEBUG(llvm::dbgs() << "not found.\n");
}
}
if (!Reflow) {
@@ -1821,7 +2075,7 @@ ContinuationIndenter::breakProtrudingToken(const FormatToken &Current,
BreakableToken::Split SplitAfterLastLine =
Token->getSplitAfterLastLine(TailOffset);
if (SplitAfterLastLine.first != StringRef::npos) {
- DEBUG(llvm::dbgs() << "Replacing whitespace after last line.\n");
+ LLVM_DEBUG(llvm::dbgs() << "Replacing whitespace after last line.\n");
if (!DryRun)
Token->replaceWhitespaceAfterLastLine(TailOffset, SplitAfterLastLine,
Whitespaces);
@@ -1875,7 +2129,7 @@ bool ContinuationIndenter::nextIsMultilineString(const LineState &State) {
if (Current.getNextNonComment() &&
Current.getNextNonComment()->isStringLiteral())
return true; // Implicit concatenation.
- if (Style.ColumnLimit != 0 &&
+ if (Style.ColumnLimit != 0 && Style.BreakStringLiterals &&
State.Column + Current.ColumnWidth + Current.UnbreakableTailLength >
Style.ColumnLimit)
return true; // String will be split.
diff --git a/lib/Format/ContinuationIndenter.h b/lib/Format/ContinuationIndenter.h
index ded7bfab4267..4ff05ba99f1a 100644
--- a/lib/Format/ContinuationIndenter.h
+++ b/lib/Format/ContinuationIndenter.h
@@ -8,7 +8,7 @@
//===----------------------------------------------------------------------===//
///
/// \file
-/// \brief This file implements an indenter that manages the indentation of
+/// This file implements an indenter that manages the indentation of
/// continuations.
///
//===----------------------------------------------------------------------===//
@@ -38,15 +38,19 @@ class WhitespaceManager;
struct RawStringFormatStyleManager {
llvm::StringMap<FormatStyle> DelimiterStyle;
+ llvm::StringMap<FormatStyle> EnclosingFunctionStyle;
RawStringFormatStyleManager(const FormatStyle &CodeStyle);
- llvm::Optional<FormatStyle> get(StringRef Delimiter) const;
+ llvm::Optional<FormatStyle> getDelimiterStyle(StringRef Delimiter) const;
+
+ llvm::Optional<FormatStyle>
+ getEnclosingFunctionStyle(StringRef EnclosingFunction) const;
};
class ContinuationIndenter {
public:
- /// \brief Constructs a \c ContinuationIndenter to format \p Line starting in
+ /// Constructs a \c ContinuationIndenter to format \p Line starting in
/// column \p FirstIndent.
ContinuationIndenter(const FormatStyle &Style,
const AdditionalKeywords &Keywords,
@@ -55,7 +59,7 @@ public:
encoding::Encoding Encoding,
bool BinPackInconclusiveFunctions);
- /// \brief Get the initial state, i.e. the state after placing \p Line's
+ /// Get the initial state, i.e. the state after placing \p Line's
/// first token at \p FirstIndent. When reformatting a fragment of code, as in
/// the case of formatting inside raw string literals, \p FirstStartColumn is
/// the column at which the state of the parent formatter is.
@@ -64,13 +68,13 @@ public:
// FIXME: canBreak and mustBreak aren't strictly indentation-related. Find a
// better home.
- /// \brief Returns \c true, if a line break after \p State is allowed.
+ /// Returns \c true, if a line break after \p State is allowed.
bool canBreak(const LineState &State);
- /// \brief Returns \c true, if a line break after \p State is mandatory.
+ /// Returns \c true, if a line break after \p State is mandatory.
bool mustBreak(const LineState &State);
- /// \brief Appends the next token to \p State and updates information
+ /// Appends the next token to \p State and updates information
/// necessary for indentation.
///
/// Puts the token on the current line if \p Newline is \c false and adds a
@@ -81,28 +85,28 @@ public:
unsigned addTokenToState(LineState &State, bool Newline, bool DryRun,
unsigned ExtraSpaces = 0);
- /// \brief Get the column limit for this line. This is the style's column
+ /// Get the column limit for this line. This is the style's column
/// limit, potentially reduced for preprocessor definitions.
unsigned getColumnLimit(const LineState &State) const;
private:
- /// \brief Mark the next token as consumed in \p State and modify its stacks
+ /// Mark the next token as consumed in \p State and modify its stacks
/// accordingly.
unsigned moveStateToNextToken(LineState &State, bool DryRun, bool Newline);
- /// \brief Update 'State' according to the next token's fake left parentheses.
+ /// Update 'State' according to the next token's fake left parentheses.
void moveStatePastFakeLParens(LineState &State, bool Newline);
- /// \brief Update 'State' according to the next token's fake r_parens.
+ /// Update 'State' according to the next token's fake r_parens.
void moveStatePastFakeRParens(LineState &State);
- /// \brief Update 'State' according to the next token being one of "(<{[".
+ /// Update 'State' according to the next token being one of "(<{[".
void moveStatePastScopeOpener(LineState &State, bool Newline);
- /// \brief Update 'State' according to the next token being one of ")>}]".
+ /// Update 'State' according to the next token being one of ")>}]".
void moveStatePastScopeCloser(LineState &State);
- /// \brief Update 'State' with the next token opening a nested block.
+ /// Update 'State' with the next token opening a nested block.
void moveStateToNewBlock(LineState &State);
- /// \brief Reformats a raw string literal.
+ /// Reformats a raw string literal.
///
/// \returns An extra penalty induced by reformatting the token.
unsigned reformatRawStringLiteral(const FormatToken &Current,
@@ -110,17 +114,17 @@ private:
const FormatStyle &RawStringStyle,
bool DryRun);
- /// \brief If the current token is at the end of the current line, handle
+ /// If the current token is at the end of the current line, handle
/// the transition to the next line.
unsigned handleEndOfLine(const FormatToken &Current, LineState &State,
bool DryRun, bool AllowBreak);
- /// \brief If \p Current is a raw string that is configured to be reformatted,
+ /// If \p Current is a raw string that is configured to be reformatted,
/// return the style to be used.
llvm::Optional<FormatStyle> getRawStringStyle(const FormatToken &Current,
const LineState &State);
- /// \brief If the current token sticks out over the end of the line, break
+ /// If the current token sticks out over the end of the line, break
/// it if possible.
///
/// \returns A pair (penalty, exceeded), where penalty is the extra penalty
@@ -143,13 +147,13 @@ private:
bool AllowBreak, bool DryRun,
bool Strict);
- /// \brief Returns the \c BreakableToken starting at \p Current, or nullptr
+ /// Returns the \c BreakableToken starting at \p Current, or nullptr
/// if the current token cannot be broken.
std::unique_ptr<BreakableToken>
createBreakableToken(const FormatToken &Current, LineState &State,
bool AllowBreak);
- /// \brief Appends the next token to \p State and updates information
+ /// Appends the next token to \p State and updates information
/// necessary for indentation.
///
/// Puts the token on the current line.
@@ -159,7 +163,7 @@ private:
void addTokenOnCurrentLine(LineState &State, bool DryRun,
unsigned ExtraSpaces);
- /// \brief Appends the next token to \p State and updates information
+ /// Appends the next token to \p State and updates information
/// necessary for indentation.
///
/// Adds a line break and necessary indentation.
@@ -168,17 +172,17 @@ private:
/// \c Replacement.
unsigned addTokenOnNewLine(LineState &State, bool DryRun);
- /// \brief Calculate the new column for a line wrap before the next token.
+ /// Calculate the new column for a line wrap before the next token.
unsigned getNewLineColumn(const LineState &State);
- /// \brief Adds a multiline token to the \p State.
+ /// Adds a multiline token to the \p State.
///
/// \returns Extra penalty for the first line of the literal: last line is
/// handled in \c addNextStateToQueue, and the penalty for other lines doesn't
/// matter, as we don't change them.
unsigned addMultilineToken(const FormatToken &Current, LineState &State);
- /// \brief Returns \c true if the next token starts a multiline string
+ /// Returns \c true if the next token starts a multiline string
/// literal.
///
/// This includes implicitly concatenated strings, strings that will be broken
@@ -196,124 +200,136 @@ private:
};
struct ParenState {
- ParenState(unsigned Indent, unsigned LastSpace, bool AvoidBinPacking,
- bool NoLineBreak)
- : Indent(Indent), LastSpace(LastSpace), NestedBlockIndent(Indent),
- BreakBeforeClosingBrace(false), AvoidBinPacking(AvoidBinPacking),
- BreakBeforeParameter(false), NoLineBreak(NoLineBreak),
- NoLineBreakInOperand(false), LastOperatorWrapped(true),
- ContainsLineBreak(false), ContainsUnwrappedBuilder(false),
- AlignColons(true), ObjCSelectorNameFound(false),
- HasMultipleNestedBlocks(false), NestedBlockInlined(false) {}
-
- /// \brief The position to which a specific parenthesis level needs to be
+ ParenState(const FormatToken *Tok, unsigned Indent, unsigned LastSpace,
+ bool AvoidBinPacking, bool NoLineBreak)
+ : Tok(Tok), Indent(Indent), LastSpace(LastSpace),
+ NestedBlockIndent(Indent), BreakBeforeClosingBrace(false),
+ AvoidBinPacking(AvoidBinPacking), BreakBeforeParameter(false),
+ NoLineBreak(NoLineBreak), NoLineBreakInOperand(false),
+ LastOperatorWrapped(true), ContainsLineBreak(false),
+ ContainsUnwrappedBuilder(false), AlignColons(true),
+ ObjCSelectorNameFound(false), HasMultipleNestedBlocks(false),
+ NestedBlockInlined(false), IsInsideObjCArrayLiteral(false) {}
+
+ /// \brief The token opening this parenthesis level, or nullptr if this level
+ /// is opened by fake parenthesis.
+ ///
+ /// Not considered for memoization as it will always have the same value at
+ /// the same token.
+ const FormatToken *Tok;
+
+ /// The position to which a specific parenthesis level needs to be
/// indented.
unsigned Indent;
- /// \brief The position of the last space on each level.
+ /// The position of the last space on each level.
///
/// Used e.g. to break like:
/// functionCall(Parameter, otherCall(
/// OtherParameter));
unsigned LastSpace;
- /// \brief If a block relative to this parenthesis level gets wrapped, indent
+ /// If a block relative to this parenthesis level gets wrapped, indent
/// it this much.
unsigned NestedBlockIndent;
- /// \brief The position the first "<<" operator encountered on each level.
+ /// The position the first "<<" operator encountered on each level.
///
/// Used to align "<<" operators. 0 if no such operator has been encountered
/// on a level.
unsigned FirstLessLess = 0;
- /// \brief The column of a \c ? in a conditional expression;
+ /// The column of a \c ? in a conditional expression;
unsigned QuestionColumn = 0;
- /// \brief The position of the colon in an ObjC method declaration/call.
+ /// The position of the colon in an ObjC method declaration/call.
unsigned ColonPos = 0;
- /// \brief The start of the most recent function in a builder-type call.
+ /// The start of the most recent function in a builder-type call.
unsigned StartOfFunctionCall = 0;
- /// \brief Contains the start of array subscript expressions, so that they
+ /// Contains the start of array subscript expressions, so that they
/// can be aligned.
unsigned StartOfArraySubscripts = 0;
- /// \brief If a nested name specifier was broken over multiple lines, this
+ /// If a nested name specifier was broken over multiple lines, this
/// contains the start column of the second line. Otherwise 0.
unsigned NestedNameSpecifierContinuation = 0;
- /// \brief If a call expression was broken over multiple lines, this
+ /// If a call expression was broken over multiple lines, this
/// contains the start column of the second line. Otherwise 0.
unsigned CallContinuation = 0;
- /// \brief The column of the first variable name in a variable declaration.
+ /// The column of the first variable name in a variable declaration.
///
/// Used to align further variables if necessary.
unsigned VariablePos = 0;
- /// \brief Whether a newline needs to be inserted before the block's closing
+ /// Whether a newline needs to be inserted before the block's closing
/// brace.
///
/// We only want to insert a newline before the closing brace if there also
/// was a newline after the beginning left brace.
bool BreakBeforeClosingBrace : 1;
- /// \brief Avoid bin packing, i.e. multiple parameters/elements on multiple
+ /// Avoid bin packing, i.e. multiple parameters/elements on multiple
/// lines, in this context.
bool AvoidBinPacking : 1;
- /// \brief Break after the next comma (or all the commas in this context if
+ /// Break after the next comma (or all the commas in this context if
/// \c AvoidBinPacking is \c true).
bool BreakBeforeParameter : 1;
- /// \brief Line breaking in this context would break a formatting rule.
+ /// Line breaking in this context would break a formatting rule.
bool NoLineBreak : 1;
- /// \brief Same as \c NoLineBreak, but is restricted until the end of the
+ /// Same as \c NoLineBreak, but is restricted until the end of the
/// operand (including the next ",").
bool NoLineBreakInOperand : 1;
- /// \brief True if the last binary operator on this level was wrapped to the
+ /// True if the last binary operator on this level was wrapped to the
/// next line.
bool LastOperatorWrapped : 1;
- /// \brief \c true if this \c ParenState already contains a line-break.
+ /// \c true if this \c ParenState already contains a line-break.
///
/// The first line break in a certain \c ParenState causes extra penalty so
/// that clang-format prefers similar breaks, i.e. breaks in the same
/// parenthesis.
bool ContainsLineBreak : 1;
- /// \brief \c true if this \c ParenState contains multiple segments of a
+ /// \c true if this \c ParenState contains multiple segments of a
/// builder-type call on one line.
bool ContainsUnwrappedBuilder : 1;
- /// \brief \c true if the colons of the curren ObjC method expression should
+ /// \c true if the colons of the curren ObjC method expression should
/// be aligned.
///
/// Not considered for memoization as it will always have the same value at
/// the same token.
bool AlignColons : 1;
- /// \brief \c true if at least one selector name was found in the current
+ /// \c true if at least one selector name was found in the current
/// ObjC method expression.
///
/// Not considered for memoization as it will always have the same value at
/// the same token.
bool ObjCSelectorNameFound : 1;
- /// \brief \c true if there are multiple nested blocks inside these parens.
+ /// \c true if there are multiple nested blocks inside these parens.
///
/// Not considered for memoization as it will always have the same value at
/// the same token.
bool HasMultipleNestedBlocks : 1;
- // \brief The start of a nested block (e.g. lambda introducer in C++ or
- // "function" in JavaScript) is not wrapped to a new line.
+ /// The start of a nested block (e.g. lambda introducer in C++ or
+ /// "function" in JavaScript) is not wrapped to a new line.
bool NestedBlockInlined : 1;
+ /// \c true if the current \c ParenState represents an Objective-C
+ /// array literal.
+ bool IsInsideObjCArrayLiteral : 1;
+
bool operator<(const ParenState &Other) const {
if (Indent != Other.Indent)
return Indent < Other.Indent;
@@ -355,37 +371,37 @@ struct ParenState {
}
};
-/// \brief The current state when indenting a unwrapped line.
+/// The current state when indenting a unwrapped line.
///
/// As the indenting tries different combinations this is copied by value.
struct LineState {
- /// \brief The number of used columns in the current line.
+ /// The number of used columns in the current line.
unsigned Column;
- /// \brief The token that needs to be next formatted.
+ /// The token that needs to be next formatted.
FormatToken *NextToken;
- /// \brief \c true if this line contains a continued for-loop section.
+ /// \c true if this line contains a continued for-loop section.
bool LineContainsContinuedForLoopSection;
- /// \brief \c true if \p NextToken should not continue this line.
+ /// \c true if \p NextToken should not continue this line.
bool NoContinuation;
- /// \brief The \c NestingLevel at the start of this line.
+ /// The \c NestingLevel at the start of this line.
unsigned StartOfLineLevel;
- /// \brief The lowest \c NestingLevel on the current line.
+ /// The lowest \c NestingLevel on the current line.
unsigned LowestLevelOnLine;
- /// \brief The start column of the string literal, if we're in a string
+ /// The start column of the string literal, if we're in a string
/// literal sequence, 0 otherwise.
unsigned StartOfStringLiteral;
- /// \brief A stack keeping track of properties applying to parenthesis
+ /// A stack keeping track of properties applying to parenthesis
/// levels.
std::vector<ParenState> Stack;
- /// \brief Ignore the stack of \c ParenStates for state comparison.
+ /// Ignore the stack of \c ParenStates for state comparison.
///
/// In long and deeply nested unwrapped lines, the current algorithm can
/// be insufficient for finding the best formatting with a reasonable amount
@@ -400,15 +416,15 @@ struct LineState {
/// FIXME: Come up with a better algorithm instead.
bool IgnoreStackForComparison;
- /// \brief The indent of the first token.
+ /// The indent of the first token.
unsigned FirstIndent;
- /// \brief The line that is being formatted.
+ /// The line that is being formatted.
///
/// Does not need to be considered for memoization because it doesn't change.
const AnnotatedLine *Line;
- /// \brief Comparison operator to be able to used \c LineState in \c map.
+ /// Comparison operator to be able to used \c LineState in \c map.
bool operator<(const LineState &Other) const {
if (NextToken != Other.NextToken)
return NextToken < Other.NextToken;
diff --git a/lib/Format/Encoding.h b/lib/Format/Encoding.h
index 3339597b4edd..4c877e7e49d5 100644
--- a/lib/Format/Encoding.h
+++ b/lib/Format/Encoding.h
@@ -1,4 +1,4 @@
-//===--- Encoding.h - Format C++ code -------------------------------------===//
+//===--- Encoding.h - Format C++ code ---------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -8,7 +8,7 @@
//===----------------------------------------------------------------------===//
///
/// \file
-/// \brief Contains functions for text encoding manipulation. Supports UTF-8,
+/// Contains functions for text encoding manipulation. Supports UTF-8,
/// 8-bit encodings and escape sequences in C++ string literals.
///
//===----------------------------------------------------------------------===//
@@ -30,7 +30,7 @@ enum Encoding {
Encoding_Unknown // We treat all other encodings as 8-bit encodings.
};
-/// \brief Detects encoding of the Text. If the Text can be decoded using UTF-8,
+/// Detects encoding of the Text. If the Text can be decoded using UTF-8,
/// it is considered UTF8, otherwise we treat it as some 8-bit encoding.
inline Encoding detectEncoding(StringRef Text) {
const llvm::UTF8 *Ptr = reinterpret_cast<const llvm::UTF8 *>(Text.begin());
@@ -40,7 +40,7 @@ inline Encoding detectEncoding(StringRef Text) {
return Encoding_Unknown;
}
-/// \brief Returns the number of columns required to display the \p Text on a
+/// Returns the number of columns required to display the \p Text on a
/// generic Unicode-capable terminal. Text is assumed to use the specified
/// \p Encoding.
inline unsigned columnWidth(StringRef Text, Encoding Encoding) {
@@ -56,7 +56,7 @@ inline unsigned columnWidth(StringRef Text, Encoding Encoding) {
return Text.size();
}
-/// \brief Returns the number of columns required to display the \p Text,
+/// Returns the number of columns required to display the \p Text,
/// starting from the \p StartColumn on a terminal with the \p TabWidth. The
/// text is assumed to use the specified \p Encoding.
inline unsigned columnWidthWithTabs(StringRef Text, unsigned StartColumn,
@@ -73,7 +73,7 @@ inline unsigned columnWidthWithTabs(StringRef Text, unsigned StartColumn,
}
}
-/// \brief Gets the number of bytes in a sequence representing a single
+/// Gets the number of bytes in a sequence representing a single
/// codepoint and starting with FirstChar in the specified Encoding.
inline unsigned getCodePointNumBytes(char FirstChar, Encoding Encoding) {
switch (Encoding) {
@@ -91,7 +91,7 @@ inline bool isHexDigit(char c) {
('A' <= c && c <= 'F');
}
-/// \brief Gets the length of an escape sequence inside a C++ string literal.
+/// Gets the length of an escape sequence inside a C++ string literal.
/// Text should span from the beginning of the escape sequence (starting with a
/// backslash) to the end of the string literal.
inline unsigned getEscapeSequenceLength(StringRef Text) {
diff --git a/lib/Format/Format.cpp b/lib/Format/Format.cpp
index 217c6729ee39..59d34308c0a9 100644
--- a/lib/Format/Format.cpp
+++ b/lib/Format/Format.cpp
@@ -8,7 +8,7 @@
//===----------------------------------------------------------------------===//
///
/// \file
-/// \brief This file implements functions declared in Format.h. This will be
+/// This file implements functions declared in Format.h. This will be
/// split into separate files as we go.
///
//===----------------------------------------------------------------------===//
@@ -31,7 +31,9 @@
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/VirtualFileSystem.h"
#include "clang/Lex/Lexer.h"
+#include "clang/Tooling/Inclusions/HeaderIncludes.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/Path.h"
@@ -39,13 +41,14 @@
#include "llvm/Support/YAMLTraits.h"
#include <algorithm>
#include <memory>
+#include <mutex>
#include <string>
+#include <unordered_map>
#define DEBUG_TYPE "format-formatter"
using clang::format::FormatStyle;
-LLVM_YAML_IS_SEQUENCE_VECTOR(clang::format::FormatStyle::IncludeCategory)
LLVM_YAML_IS_SEQUENCE_VECTOR(clang::format::FormatStyle::RawStringFormat)
namespace llvm {
@@ -104,6 +107,14 @@ template <> struct ScalarEnumerationTraits<FormatStyle::ShortFunctionStyle> {
}
};
+template <> struct ScalarEnumerationTraits<FormatStyle::BinPackStyle> {
+ static void enumeration(IO &IO, FormatStyle::BinPackStyle &Value) {
+ IO.enumCase(Value, "Auto", FormatStyle::BPS_Auto);
+ IO.enumCase(Value, "Always", FormatStyle::BPS_Always);
+ IO.enumCase(Value, "Never", FormatStyle::BPS_Never);
+ }
+};
+
template <> struct ScalarEnumerationTraits<FormatStyle::BinaryOperatorStyle> {
static void enumeration(IO &IO, FormatStyle::BinaryOperatorStyle &Value) {
IO.enumCase(Value, "All", FormatStyle::BOS_All);
@@ -138,6 +149,16 @@ struct ScalarEnumerationTraits<FormatStyle::BreakConstructorInitializersStyle> {
};
template <>
+struct ScalarEnumerationTraits<FormatStyle::BreakInheritanceListStyle> {
+ static void
+ enumeration(IO &IO, FormatStyle::BreakInheritanceListStyle &Value) {
+ IO.enumCase(Value, "BeforeColon", FormatStyle::BILS_BeforeColon);
+ IO.enumCase(Value, "BeforeComma", FormatStyle::BILS_BeforeComma);
+ IO.enumCase(Value, "AfterColon", FormatStyle::BILS_AfterColon);
+ }
+};
+
+template <>
struct ScalarEnumerationTraits<FormatStyle::PPDirectiveIndentStyle> {
static void enumeration(IO &IO, FormatStyle::PPDirectiveIndentStyle &Value) {
IO.enumCase(Value, "None", FormatStyle::PPDIS_None);
@@ -158,6 +179,19 @@ struct ScalarEnumerationTraits<FormatStyle::ReturnTypeBreakingStyle> {
};
template <>
+struct ScalarEnumerationTraits<FormatStyle::BreakTemplateDeclarationsStyle> {
+ static void enumeration(IO &IO, FormatStyle::BreakTemplateDeclarationsStyle &Value) {
+ IO.enumCase(Value, "No", FormatStyle::BTDS_No);
+ IO.enumCase(Value, "MultiLine", FormatStyle::BTDS_MultiLine);
+ IO.enumCase(Value, "Yes", FormatStyle::BTDS_Yes);
+
+ // For backward compatibility.
+ IO.enumCase(Value, "false", FormatStyle::BTDS_MultiLine);
+ IO.enumCase(Value, "true", FormatStyle::BTDS_Yes);
+ }
+};
+
+template <>
struct ScalarEnumerationTraits<FormatStyle::DefinitionReturnTypeBreakingStyle> {
static void
enumeration(IO &IO, FormatStyle::DefinitionReturnTypeBreakingStyle &Value) {
@@ -326,8 +360,19 @@ template <> struct MappingTraits<FormatStyle> {
IO.mapOptional("BreakBeforeBinaryOperators",
Style.BreakBeforeBinaryOperators);
IO.mapOptional("BreakBeforeBraces", Style.BreakBeforeBraces);
+
+ bool BreakBeforeInheritanceComma = false;
IO.mapOptional("BreakBeforeInheritanceComma",
- Style.BreakBeforeInheritanceComma);
+ BreakBeforeInheritanceComma);
+ IO.mapOptional("BreakInheritanceList",
+ Style.BreakInheritanceList);
+ // If BreakBeforeInheritanceComma was specified but
+ // BreakInheritance was not, initialize the latter from the
+ // former for backwards compatibility.
+ if (BreakBeforeInheritanceComma &&
+ Style.BreakInheritanceList == FormatStyle::BILS_BeforeColon)
+ Style.BreakInheritanceList = FormatStyle::BILS_BeforeComma;
+
IO.mapOptional("BreakBeforeTernaryOperators",
Style.BreakBeforeTernaryOperators);
@@ -361,9 +406,9 @@ template <> struct MappingTraits<FormatStyle> {
Style.ExperimentalAutoDetectBinPacking);
IO.mapOptional("FixNamespaceComments", Style.FixNamespaceComments);
IO.mapOptional("ForEachMacros", Style.ForEachMacros);
- IO.mapOptional("IncludeBlocks", Style.IncludeBlocks);
- IO.mapOptional("IncludeCategories", Style.IncludeCategories);
- IO.mapOptional("IncludeIsMainRegex", Style.IncludeIsMainRegex);
+ IO.mapOptional("IncludeBlocks", Style.IncludeStyle.IncludeBlocks);
+ IO.mapOptional("IncludeCategories", Style.IncludeStyle.IncludeCategories);
+ IO.mapOptional("IncludeIsMainRegex", Style.IncludeStyle.IncludeIsMainRegex);
IO.mapOptional("IndentCaseLabels", Style.IndentCaseLabels);
IO.mapOptional("IndentPPDirectives", Style.IndentPPDirectives);
IO.mapOptional("IndentWidth", Style.IndentWidth);
@@ -377,6 +422,7 @@ template <> struct MappingTraits<FormatStyle> {
IO.mapOptional("MacroBlockEnd", Style.MacroBlockEnd);
IO.mapOptional("MaxEmptyLinesToKeep", Style.MaxEmptyLinesToKeep);
IO.mapOptional("NamespaceIndentation", Style.NamespaceIndentation);
+ IO.mapOptional("ObjCBinPackProtocolList", Style.ObjCBinPackProtocolList);
IO.mapOptional("ObjCBlockIndentWidth", Style.ObjCBlockIndentWidth);
IO.mapOptional("ObjCSpaceAfterProperty", Style.ObjCSpaceAfterProperty);
IO.mapOptional("ObjCSpaceBeforeProtocolList",
@@ -388,6 +434,8 @@ template <> struct MappingTraits<FormatStyle> {
IO.mapOptional("PenaltyBreakFirstLessLess",
Style.PenaltyBreakFirstLessLess);
IO.mapOptional("PenaltyBreakString", Style.PenaltyBreakString);
+ IO.mapOptional("PenaltyBreakTemplateDeclaration",
+ Style.PenaltyBreakTemplateDeclaration);
IO.mapOptional("PenaltyExcessCharacter", Style.PenaltyExcessCharacter);
IO.mapOptional("PenaltyReturnTypeOnItsOwnLine",
Style.PenaltyReturnTypeOnItsOwnLine);
@@ -401,7 +449,15 @@ template <> struct MappingTraits<FormatStyle> {
Style.SpaceAfterTemplateKeyword);
IO.mapOptional("SpaceBeforeAssignmentOperators",
Style.SpaceBeforeAssignmentOperators);
+ IO.mapOptional("SpaceBeforeCpp11BracedList",
+ Style.SpaceBeforeCpp11BracedList);
+ IO.mapOptional("SpaceBeforeCtorInitializerColon",
+ Style.SpaceBeforeCtorInitializerColon);
+ IO.mapOptional("SpaceBeforeInheritanceColon",
+ Style.SpaceBeforeInheritanceColon);
IO.mapOptional("SpaceBeforeParens", Style.SpaceBeforeParens);
+ IO.mapOptional("SpaceBeforeRangeBasedForLoopColon",
+ Style.SpaceBeforeRangeBasedForLoopColon);
IO.mapOptional("SpaceInEmptyParentheses", Style.SpaceInEmptyParentheses);
IO.mapOptional("SpacesBeforeTrailingComments",
Style.SpacesBeforeTrailingComments);
@@ -438,25 +494,12 @@ template <> struct MappingTraits<FormatStyle::BraceWrappingFlags> {
}
};
-template <> struct MappingTraits<FormatStyle::IncludeCategory> {
- static void mapping(IO &IO, FormatStyle::IncludeCategory &Category) {
- IO.mapOptional("Regex", Category.Regex);
- IO.mapOptional("Priority", Category.Priority);
- }
-};
-
-template <> struct ScalarEnumerationTraits<FormatStyle::IncludeBlocksStyle> {
- static void enumeration(IO &IO, FormatStyle::IncludeBlocksStyle &Value) {
- IO.enumCase(Value, "Preserve", FormatStyle::IBS_Preserve);
- IO.enumCase(Value, "Merge", FormatStyle::IBS_Merge);
- IO.enumCase(Value, "Regroup", FormatStyle::IBS_Regroup);
- }
-};
-
template <> struct MappingTraits<FormatStyle::RawStringFormat> {
static void mapping(IO &IO, FormatStyle::RawStringFormat &Format) {
- IO.mapOptional("Delimiter", Format.Delimiter);
IO.mapOptional("Language", Format.Language);
+ IO.mapOptional("Delimiters", Format.Delimiters);
+ IO.mapOptional("EnclosingFunctions", Format.EnclosingFunctions);
+ IO.mapOptional("CanonicalDelimiter", Format.CanonicalDelimiter);
IO.mapOptional("BasedOnStyle", Format.BasedOnStyle);
}
};
@@ -493,7 +536,7 @@ namespace clang {
namespace format {
const std::error_category &getParseCategory() {
- static ParseErrorCategory C;
+ static const ParseErrorCategory C{};
return C;
}
std::error_code make_error_code(ParseError e) {
@@ -593,7 +636,7 @@ FormatStyle getLLVMStyle() {
LLVMStyle.AlwaysBreakAfterReturnType = FormatStyle::RTBS_None;
LLVMStyle.AlwaysBreakAfterDefinitionReturnType = FormatStyle::DRTBS_None;
LLVMStyle.AlwaysBreakBeforeMultilineStrings = false;
- LLVMStyle.AlwaysBreakTemplateDeclarations = false;
+ LLVMStyle.AlwaysBreakTemplateDeclarations = FormatStyle::BTDS_MultiLine;
LLVMStyle.BinPackArguments = true;
LLVMStyle.BinPackParameters = true;
LLVMStyle.BreakBeforeBinaryOperators = FormatStyle::BOS_None;
@@ -604,7 +647,7 @@ FormatStyle getLLVMStyle() {
false, false, true, true, true};
LLVMStyle.BreakAfterJavaFieldAnnotations = false;
LLVMStyle.BreakConstructorInitializers = FormatStyle::BCIS_BeforeColon;
- LLVMStyle.BreakBeforeInheritanceComma = false;
+ LLVMStyle.BreakInheritanceList = FormatStyle::BILS_BeforeColon;
LLVMStyle.BreakStringLiterals = true;
LLVMStyle.ColumnLimit = 80;
LLVMStyle.CommentPragmas = "^ IWYU pragma:";
@@ -619,11 +662,12 @@ FormatStyle getLLVMStyle() {
LLVMStyle.ForEachMacros.push_back("foreach");
LLVMStyle.ForEachMacros.push_back("Q_FOREACH");
LLVMStyle.ForEachMacros.push_back("BOOST_FOREACH");
- LLVMStyle.IncludeCategories = {{"^\"(llvm|llvm-c|clang|clang-c)/", 2},
- {"^(<|\"(gtest|gmock|isl|json)/)", 3},
- {".*", 1}};
- LLVMStyle.IncludeIsMainRegex = "(Test)?$";
- LLVMStyle.IncludeBlocks = FormatStyle::IBS_Preserve;
+ LLVMStyle.IncludeStyle.IncludeCategories = {
+ {"^\"(llvm|llvm-c|clang|clang-c)/", 2},
+ {"^(<|\"(gtest|gmock|isl|json)/)", 3},
+ {".*", 1}};
+ LLVMStyle.IncludeStyle.IncludeIsMainRegex = "(Test)?$";
+ LLVMStyle.IncludeStyle.IncludeBlocks = tooling::IncludeStyle::IBS_Preserve;
LLVMStyle.IndentCaseLabels = false;
LLVMStyle.IndentPPDirectives = FormatStyle::PPDIS_None;
LLVMStyle.IndentWrappedFunctionNames = false;
@@ -634,6 +678,7 @@ FormatStyle getLLVMStyle() {
LLVMStyle.MaxEmptyLinesToKeep = 1;
LLVMStyle.KeepEmptyLinesAtTheStartOfBlocks = true;
LLVMStyle.NamespaceIndentation = FormatStyle::NI_None;
+ LLVMStyle.ObjCBinPackProtocolList = FormatStyle::BPS_Auto;
LLVMStyle.ObjCBlockIndentWidth = 2;
LLVMStyle.ObjCSpaceAfterProperty = false;
LLVMStyle.ObjCSpaceBeforeProtocolList = true;
@@ -641,7 +686,6 @@ FormatStyle getLLVMStyle() {
LLVMStyle.SpacesBeforeTrailingComments = 1;
LLVMStyle.Standard = FormatStyle::LS_Cpp11;
LLVMStyle.UseTab = FormatStyle::UT_Never;
- LLVMStyle.RawStringFormats = {{"pb", FormatStyle::LK_TextProto, "google"}};
LLVMStyle.ReflowComments = true;
LLVMStyle.SpacesInParentheses = false;
LLVMStyle.SpacesInSquareBrackets = false;
@@ -650,8 +694,12 @@ FormatStyle getLLVMStyle() {
LLVMStyle.SpacesInCStyleCastParentheses = false;
LLVMStyle.SpaceAfterCStyleCast = false;
LLVMStyle.SpaceAfterTemplateKeyword = true;
+ LLVMStyle.SpaceBeforeCtorInitializerColon = true;
+ LLVMStyle.SpaceBeforeInheritanceColon = true;
LLVMStyle.SpaceBeforeParens = FormatStyle::SBPO_ControlStatements;
+ LLVMStyle.SpaceBeforeRangeBasedForLoopColon = true;
LLVMStyle.SpaceBeforeAssignmentOperators = true;
+ LLVMStyle.SpaceBeforeCpp11BracedList = false;
LLVMStyle.SpacesInAngles = false;
LLVMStyle.PenaltyBreakAssignment = prec::Assignment;
@@ -661,6 +709,7 @@ FormatStyle getLLVMStyle() {
LLVMStyle.PenaltyExcessCharacter = 1000000;
LLVMStyle.PenaltyReturnTypeOnItsOwnLine = 60;
LLVMStyle.PenaltyBreakBeforeFirstCallParameter = 19;
+ LLVMStyle.PenaltyBreakTemplateDeclaration = prec::Relational;
LLVMStyle.DisableFormat = false;
LLVMStyle.SortIncludes = true;
@@ -673,6 +722,7 @@ FormatStyle getGoogleStyle(FormatStyle::LanguageKind Language) {
if (Language == FormatStyle::LK_TextProto) {
FormatStyle GoogleStyle = getGoogleStyle(FormatStyle::LK_Proto);
GoogleStyle.Language = FormatStyle::LK_TextProto;
+
return GoogleStyle;
}
@@ -684,17 +734,57 @@ FormatStyle getGoogleStyle(FormatStyle::LanguageKind Language) {
GoogleStyle.AllowShortIfStatementsOnASingleLine = true;
GoogleStyle.AllowShortLoopsOnASingleLine = true;
GoogleStyle.AlwaysBreakBeforeMultilineStrings = true;
- GoogleStyle.AlwaysBreakTemplateDeclarations = true;
+ GoogleStyle.AlwaysBreakTemplateDeclarations = FormatStyle::BTDS_Yes;
GoogleStyle.ConstructorInitializerAllOnOneLineOrOnePerLine = true;
GoogleStyle.DerivePointerAlignment = true;
- GoogleStyle.IncludeCategories = {
+ GoogleStyle.IncludeStyle.IncludeCategories = {
{"^<ext/.*\\.h>", 2}, {"^<.*\\.h>", 1}, {"^<.*", 2}, {".*", 3}};
- GoogleStyle.IncludeIsMainRegex = "([-_](test|unittest))?$";
+ GoogleStyle.IncludeStyle.IncludeIsMainRegex = "([-_](test|unittest))?$";
GoogleStyle.IndentCaseLabels = true;
GoogleStyle.KeepEmptyLinesAtTheStartOfBlocks = false;
+ GoogleStyle.ObjCBinPackProtocolList = FormatStyle::BPS_Never;
GoogleStyle.ObjCSpaceAfterProperty = false;
- GoogleStyle.ObjCSpaceBeforeProtocolList = false;
+ GoogleStyle.ObjCSpaceBeforeProtocolList = true;
GoogleStyle.PointerAlignment = FormatStyle::PAS_Left;
+ GoogleStyle.RawStringFormats = {
+ {
+ FormatStyle::LK_Cpp,
+ /*Delimiters=*/
+ {
+ "cc",
+ "CC",
+ "cpp",
+ "Cpp",
+ "CPP",
+ "c++",
+ "C++",
+ },
+ /*EnclosingFunctionNames=*/
+ {},
+ /*CanonicalDelimiter=*/"",
+ /*BasedOnStyle=*/"google",
+ },
+ {
+ FormatStyle::LK_TextProto,
+ /*Delimiters=*/
+ {
+ "pb",
+ "PB",
+ "proto",
+ "PROTO",
+ },
+ /*EnclosingFunctionNames=*/
+ {
+ "EqualsProto",
+ "EquivToProto",
+ "PARSE_TEST_PROTO",
+ "PARSE_TEXT_PROTO",
+ "ParseTextOrDie",
+ },
+ /*CanonicalDelimiter=*/"",
+ /*BasedOnStyle=*/"google",
+ },
+ };
GoogleStyle.SpacesBeforeTrailingComments = 2;
GoogleStyle.Standard = FormatStyle::LS_Auto;
@@ -729,8 +819,17 @@ FormatStyle getGoogleStyle(FormatStyle::LanguageKind Language) {
GoogleStyle.JavaScriptWrapImports = false;
} else if (Language == FormatStyle::LK_Proto) {
GoogleStyle.AllowShortFunctionsOnASingleLine = FormatStyle::SFS_None;
+ GoogleStyle.AlwaysBreakBeforeMultilineStrings = false;
GoogleStyle.SpacesInContainerLiterals = false;
+ GoogleStyle.Cpp11BracedListStyle = false;
+ // This affects protocol buffer options specifications and text protos.
+ // Text protos are currently mostly formatted inside C++ raw string literals
+ // and often the current breaking behavior of string literals is not
+ // beneficial there. Investigate turning this on once proper string reflow
+ // has been implemented.
+ GoogleStyle.BreakStringLiterals = false;
} else if (Language == FormatStyle::LK_ObjC) {
+ GoogleStyle.AlwaysBreakBeforeMultilineStrings = false;
GoogleStyle.ColumnLimit = 100;
}
@@ -767,12 +866,12 @@ FormatStyle getMozillaStyle() {
MozillaStyle.AlwaysBreakAfterReturnType = FormatStyle::RTBS_TopLevel;
MozillaStyle.AlwaysBreakAfterDefinitionReturnType =
FormatStyle::DRTBS_TopLevel;
- MozillaStyle.AlwaysBreakTemplateDeclarations = true;
+ MozillaStyle.AlwaysBreakTemplateDeclarations = FormatStyle::BTDS_Yes;
MozillaStyle.BinPackParameters = false;
MozillaStyle.BinPackArguments = false;
MozillaStyle.BreakBeforeBraces = FormatStyle::BS_Mozilla;
MozillaStyle.BreakConstructorInitializers = FormatStyle::BCIS_BeforeComma;
- MozillaStyle.BreakBeforeInheritanceComma = true;
+ MozillaStyle.BreakInheritanceList = FormatStyle::BILS_BeforeComma;
MozillaStyle.ConstructorInitializerIndentWidth = 2;
MozillaStyle.ContinuationIndentWidth = 2;
MozillaStyle.Cpp11BracedListStyle = false;
@@ -803,6 +902,7 @@ FormatStyle getWebKitStyle() {
Style.ObjCBlockIndentWidth = 4;
Style.ObjCSpaceAfterProperty = true;
Style.PointerAlignment = FormatStyle::PAS_Left;
+ Style.SpaceBeforeCpp11BracedList = true;
return Style;
}
@@ -859,7 +959,7 @@ std::error_code parseConfiguration(StringRef Text, FormatStyle *Style) {
assert(Language != FormatStyle::LK_None);
if (Text.trim().empty())
return make_error_code(ParseError::Error);
-
+ Style->StyleSet.Clear();
std::vector<FormatStyle> Styles;
llvm::yaml::Input Input(Text);
// DocumentListTraits<vector<FormatStyle>> uses the context to get default
@@ -878,9 +978,9 @@ std::error_code parseConfiguration(StringRef Text, FormatStyle *Style) {
// Ensure that each language is configured at most once.
for (unsigned j = 0; j < i; ++j) {
if (Styles[i].Language == Styles[j].Language) {
- DEBUG(llvm::dbgs()
- << "Duplicate languages in the config file on positions " << j
- << " and " << i << "\n");
+ LLVM_DEBUG(llvm::dbgs()
+ << "Duplicate languages in the config file on positions "
+ << j << " and " << i << "\n");
return make_error_code(ParseError::Error);
}
}
@@ -888,15 +988,23 @@ std::error_code parseConfiguration(StringRef Text, FormatStyle *Style) {
// Look for a suitable configuration starting from the end, so we can
// find the configuration for the specific language first, and the default
// configuration (which can only be at slot 0) after it.
+ FormatStyle::FormatStyleSet StyleSet;
+ bool LanguageFound = false;
for (int i = Styles.size() - 1; i >= 0; --i) {
- if (Styles[i].Language == Language ||
- Styles[i].Language == FormatStyle::LK_None) {
- *Style = Styles[i];
- Style->Language = Language;
- return make_error_code(ParseError::Success);
- }
- }
- return make_error_code(ParseError::Unsuitable);
+ if (Styles[i].Language != FormatStyle::LK_None)
+ StyleSet.Add(Styles[i]);
+ if (Styles[i].Language == Language)
+ LanguageFound = true;
+ }
+ if (!LanguageFound) {
+ if (Styles.empty() || Styles[0].Language != FormatStyle::LK_None)
+ return make_error_code(ParseError::Unsuitable);
+ FormatStyle DefaultStyle = Styles[0];
+ DefaultStyle.Language = Language;
+ StyleSet.Add(std::move(DefaultStyle));
+ }
+ *Style = *StyleSet.Get(Language);
+ return make_error_code(ParseError::Success);
}
std::string configurationAsText(const FormatStyle &Style) {
@@ -910,6 +1018,38 @@ std::string configurationAsText(const FormatStyle &Style) {
return Stream.str();
}
+llvm::Optional<FormatStyle>
+FormatStyle::FormatStyleSet::Get(FormatStyle::LanguageKind Language) const {
+ if (!Styles)
+ return None;
+ auto It = Styles->find(Language);
+ if (It == Styles->end())
+ return None;
+ FormatStyle Style = It->second;
+ Style.StyleSet = *this;
+ return Style;
+}
+
+void FormatStyle::FormatStyleSet::Add(FormatStyle Style) {
+ assert(Style.Language != LK_None &&
+ "Cannot add a style for LK_None to a StyleSet");
+ assert(
+ !Style.StyleSet.Styles &&
+ "Cannot add a style associated with an existing StyleSet to a StyleSet");
+ if (!Styles)
+ Styles = std::make_shared<MapType>();
+ (*Styles)[Style.Language] = std::move(Style);
+}
+
+void FormatStyle::FormatStyleSet::Clear() {
+ Styles.reset();
+}
+
+llvm::Optional<FormatStyle>
+FormatStyle::GetLanguageStyle(FormatStyle::LanguageKind Language) const {
+ return StyleSet.Get(Language);
+}
+
namespace {
class JavaScriptRequoter : public TokenAnalyzer {
@@ -921,8 +1061,7 @@ public:
analyze(TokenAnnotator &Annotator,
SmallVectorImpl<AnnotatedLine *> &AnnotatedLines,
FormatTokenLexer &Tokens) override {
- AffectedRangeMgr.computeAffectedLines(AnnotatedLines.begin(),
- AnnotatedLines.end());
+ AffectedRangeMgr.computeAffectedLines(AnnotatedLines);
tooling::Replacements Result;
requoteJSStringLiteral(AnnotatedLines, Result);
return {Result, 0};
@@ -1012,8 +1151,7 @@ public:
FormatTokenLexer &Tokens) override {
tooling::Replacements Result;
deriveLocalStyle(AnnotatedLines);
- AffectedRangeMgr.computeAffectedLines(AnnotatedLines.begin(),
- AnnotatedLines.end());
+ AffectedRangeMgr.computeAffectedLines(AnnotatedLines);
for (unsigned i = 0, e = AnnotatedLines.size(); i != e; ++i) {
Annotator.calculateFormattingInformation(*AnnotatedLines[i]);
}
@@ -1137,8 +1275,7 @@ public:
// To determine if some redundant code is actually introduced by
// replacements(e.g. deletions), we need to come up with a more
// sophisticated way of computing affected ranges.
- AffectedRangeMgr.computeAffectedLines(AnnotatedLines.begin(),
- AnnotatedLines.end());
+ AffectedRangeMgr.computeAffectedLines(AnnotatedLines);
checkEmptyNamespace(AnnotatedLines);
@@ -1341,6 +1478,128 @@ private:
std::set<FormatToken *, FormatTokenLess> DeletedTokens;
};
+class ObjCHeaderStyleGuesser : public TokenAnalyzer {
+public:
+ ObjCHeaderStyleGuesser(const Environment &Env, const FormatStyle &Style)
+ : TokenAnalyzer(Env, Style), IsObjC(false) {}
+
+ std::pair<tooling::Replacements, unsigned>
+ analyze(TokenAnnotator &Annotator,
+ SmallVectorImpl<AnnotatedLine *> &AnnotatedLines,
+ FormatTokenLexer &Tokens) override {
+ assert(Style.Language == FormatStyle::LK_Cpp);
+ IsObjC = guessIsObjC(AnnotatedLines, Tokens.getKeywords());
+ tooling::Replacements Result;
+ return {Result, 0};
+ }
+
+ bool isObjC() { return IsObjC; }
+
+private:
+ static bool guessIsObjC(const SmallVectorImpl<AnnotatedLine *> &AnnotatedLines,
+ const AdditionalKeywords &Keywords) {
+ // Keep this array sorted, since we are binary searching over it.
+ static constexpr llvm::StringLiteral FoundationIdentifiers[] = {
+ "CGFloat",
+ "CGPoint",
+ "CGPointMake",
+ "CGPointZero",
+ "CGRect",
+ "CGRectEdge",
+ "CGRectInfinite",
+ "CGRectMake",
+ "CGRectNull",
+ "CGRectZero",
+ "CGSize",
+ "CGSizeMake",
+ "CGVector",
+ "CGVectorMake",
+ "NSAffineTransform",
+ "NSArray",
+ "NSAttributedString",
+ "NSBlockOperation",
+ "NSBundle",
+ "NSCache",
+ "NSCalendar",
+ "NSCharacterSet",
+ "NSCountedSet",
+ "NSData",
+ "NSDataDetector",
+ "NSDecimal",
+ "NSDecimalNumber",
+ "NSDictionary",
+ "NSEdgeInsets",
+ "NSHashTable",
+ "NSIndexPath",
+ "NSIndexSet",
+ "NSInteger",
+ "NSInvocationOperation",
+ "NSLocale",
+ "NSMapTable",
+ "NSMutableArray",
+ "NSMutableAttributedString",
+ "NSMutableCharacterSet",
+ "NSMutableData",
+ "NSMutableDictionary",
+ "NSMutableIndexSet",
+ "NSMutableOrderedSet",
+ "NSMutableSet",
+ "NSMutableString",
+ "NSNumber",
+ "NSNumberFormatter",
+ "NSObject",
+ "NSOperation",
+ "NSOperationQueue",
+ "NSOperationQueuePriority",
+ "NSOrderedSet",
+ "NSPoint",
+ "NSPointerArray",
+ "NSQualityOfService",
+ "NSRange",
+ "NSRect",
+ "NSRegularExpression",
+ "NSSet",
+ "NSSize",
+ "NSString",
+ "NSTimeZone",
+ "NSUInteger",
+ "NSURL",
+ "NSURLComponents",
+ "NSURLQueryItem",
+ "NSUUID",
+ "NSValue",
+ "UIImage",
+ "UIView",
+ };
+
+ for (auto Line : AnnotatedLines) {
+ for (const FormatToken *FormatTok = Line->First; FormatTok;
+ FormatTok = FormatTok->Next) {
+ if ((FormatTok->Previous && FormatTok->Previous->is(tok::at) &&
+ (FormatTok->Tok.getObjCKeywordID() != tok::objc_not_keyword ||
+ FormatTok->isOneOf(tok::numeric_constant, tok::l_square,
+ tok::l_brace))) ||
+ (FormatTok->Tok.isAnyIdentifier() &&
+ std::binary_search(std::begin(FoundationIdentifiers),
+ std::end(FoundationIdentifiers),
+ FormatTok->TokenText)) ||
+ FormatTok->is(TT_ObjCStringLiteral) ||
+ FormatTok->isOneOf(Keywords.kw_NS_ENUM, Keywords.kw_NS_OPTIONS,
+ TT_ObjCBlockLBrace, TT_ObjCBlockLParen,
+ TT_ObjCDecl, TT_ObjCForIn, TT_ObjCMethodExpr,
+ TT_ObjCMethodSpecifier, TT_ObjCProperty)) {
+ return true;
+ }
+ if (guessIsObjC(Line->Children, Keywords))
+ return true;
+ }
+ }
+ return false;
+ }
+
+ bool IsObjC;
+};
+
struct IncludeDirective {
StringRef Filename;
StringRef Text;
@@ -1436,14 +1695,15 @@ static void sortCppIncludes(const FormatStyle &Style,
// the entire block. Otherwise, no replacement is generated.
if (Indices.size() == Includes.size() &&
std::is_sorted(Indices.begin(), Indices.end()) &&
- Style.IncludeBlocks == FormatStyle::IBS_Preserve)
+ Style.IncludeStyle.IncludeBlocks == tooling::IncludeStyle::IBS_Preserve)
return;
std::string result;
for (unsigned Index : Indices) {
if (!result.empty()) {
result += "\n";
- if (Style.IncludeBlocks == FormatStyle::IBS_Regroup &&
+ if (Style.IncludeStyle.IncludeBlocks ==
+ tooling::IncludeStyle::IBS_Regroup &&
CurrentCategory != Includes[Index].Category)
result += "\n";
}
@@ -1465,60 +1725,6 @@ static void sortCppIncludes(const FormatStyle &Style,
namespace {
-// This class manages priorities of #include categories and calculates
-// priorities for headers.
-class IncludeCategoryManager {
-public:
- IncludeCategoryManager(const FormatStyle &Style, StringRef FileName)
- : Style(Style), FileName(FileName) {
- FileStem = llvm::sys::path::stem(FileName);
- for (const auto &Category : Style.IncludeCategories)
- CategoryRegexs.emplace_back(Category.Regex, llvm::Regex::IgnoreCase);
- IsMainFile = FileName.endswith(".c") || FileName.endswith(".cc") ||
- FileName.endswith(".cpp") || FileName.endswith(".c++") ||
- FileName.endswith(".cxx") || FileName.endswith(".m") ||
- FileName.endswith(".mm");
- }
-
- // Returns the priority of the category which \p IncludeName belongs to.
- // If \p CheckMainHeader is true and \p IncludeName is a main header, returns
- // 0. Otherwise, returns the priority of the matching category or INT_MAX.
- int getIncludePriority(StringRef IncludeName, bool CheckMainHeader) {
- int Ret = INT_MAX;
- for (unsigned i = 0, e = CategoryRegexs.size(); i != e; ++i)
- if (CategoryRegexs[i].match(IncludeName)) {
- Ret = Style.IncludeCategories[i].Priority;
- break;
- }
- if (CheckMainHeader && IsMainFile && Ret > 0 && isMainHeader(IncludeName))
- Ret = 0;
- return Ret;
- }
-
-private:
- bool isMainHeader(StringRef IncludeName) const {
- if (!IncludeName.startswith("\""))
- return false;
- StringRef HeaderStem =
- llvm::sys::path::stem(IncludeName.drop_front(1).drop_back(1));
- if (FileStem.startswith(HeaderStem) ||
- FileStem.startswith_lower(HeaderStem)) {
- llvm::Regex MainIncludeRegex(
- (HeaderStem + Style.IncludeIsMainRegex).str(),
- llvm::Regex::IgnoreCase);
- if (MainIncludeRegex.match(FileStem))
- return true;
- }
- return false;
- }
-
- const FormatStyle &Style;
- bool IsMainFile;
- StringRef FileName;
- StringRef FileStem;
- SmallVector<llvm::Regex, 4> CategoryRegexs;
-};
-
const char IncludeRegexPattern[] =
R"(^[\t\ ]*#[\t\ ]*(import|include)[^"<]*(["<][^">]*[">]))";
@@ -1542,7 +1748,7 @@ tooling::Replacements sortCppIncludes(const FormatStyle &Style, StringRef Code,
//
// FIXME: Do some sanity checking, e.g. edit distance of the base name, to fix
// cases where the first #include is unlikely to be the main header.
- IncludeCategoryManager Categories(Style, FileName);
+ tooling::IncludeCategoryManager Categories(Style.IncludeStyle, FileName);
bool FirstIncludeBlock = true;
bool MainIncludeFound = false;
bool FormattingOff = false;
@@ -1559,8 +1765,10 @@ tooling::Replacements sortCppIncludes(const FormatStyle &Style, StringRef Code,
FormattingOff = false;
const bool EmptyLineSkipped =
- Trimmed.empty() && (Style.IncludeBlocks == FormatStyle::IBS_Merge ||
- Style.IncludeBlocks == FormatStyle::IBS_Regroup);
+ Trimmed.empty() &&
+ (Style.IncludeStyle.IncludeBlocks == tooling::IncludeStyle::IBS_Merge ||
+ Style.IncludeStyle.IncludeBlocks ==
+ tooling::IncludeStyle::IBS_Regroup);
if (!FormattingOff && !Line.endswith("\\")) {
if (IncludeRegex.match(Line, &Matches)) {
@@ -1670,122 +1878,6 @@ inline bool isHeaderDeletion(const tooling::Replacement &Replace) {
return Replace.getOffset() == UINT_MAX && Replace.getLength() == 1;
}
-// Returns the offset after skipping a sequence of tokens, matched by \p
-// GetOffsetAfterSequence, from the start of the code.
-// \p GetOffsetAfterSequence should be a function that matches a sequence of
-// tokens and returns an offset after the sequence.
-unsigned getOffsetAfterTokenSequence(
- StringRef FileName, StringRef Code, const FormatStyle &Style,
- llvm::function_ref<unsigned(const SourceManager &, Lexer &, Token &)>
- GetOffsetAfterSequence) {
- std::unique_ptr<Environment> Env =
- Environment::CreateVirtualEnvironment(Code, FileName, /*Ranges=*/{});
- const SourceManager &SourceMgr = Env->getSourceManager();
- Lexer Lex(Env->getFileID(), SourceMgr.getBuffer(Env->getFileID()), SourceMgr,
- getFormattingLangOpts(Style));
- Token Tok;
- // Get the first token.
- Lex.LexFromRawLexer(Tok);
- return GetOffsetAfterSequence(SourceMgr, Lex, Tok);
-}
-
-// Check if a sequence of tokens is like "#<Name> <raw_identifier>". If it is,
-// \p Tok will be the token after this directive; otherwise, it can be any token
-// after the given \p Tok (including \p Tok).
-bool checkAndConsumeDirectiveWithName(Lexer &Lex, StringRef Name, Token &Tok) {
- bool Matched = Tok.is(tok::hash) && !Lex.LexFromRawLexer(Tok) &&
- Tok.is(tok::raw_identifier) &&
- Tok.getRawIdentifier() == Name && !Lex.LexFromRawLexer(Tok) &&
- Tok.is(tok::raw_identifier);
- if (Matched)
- Lex.LexFromRawLexer(Tok);
- return Matched;
-}
-
-void skipComments(Lexer &Lex, Token &Tok) {
- while (Tok.is(tok::comment))
- if (Lex.LexFromRawLexer(Tok))
- return;
-}
-
-// Returns the offset after header guard directives and any comments
-// before/after header guards. If no header guard presents in the code, this
-// will returns the offset after skipping all comments from the start of the
-// code.
-unsigned getOffsetAfterHeaderGuardsAndComments(StringRef FileName,
- StringRef Code,
- const FormatStyle &Style) {
- return getOffsetAfterTokenSequence(
- FileName, Code, Style,
- [](const SourceManager &SM, Lexer &Lex, Token Tok) {
- skipComments(Lex, Tok);
- unsigned InitialOffset = SM.getFileOffset(Tok.getLocation());
- if (checkAndConsumeDirectiveWithName(Lex, "ifndef", Tok)) {
- skipComments(Lex, Tok);
- if (checkAndConsumeDirectiveWithName(Lex, "define", Tok))
- return SM.getFileOffset(Tok.getLocation());
- }
- return InitialOffset;
- });
-}
-
-// Check if a sequence of tokens is like
-// "#include ("header.h" | <header.h>)".
-// If it is, \p Tok will be the token after this directive; otherwise, it can be
-// any token after the given \p Tok (including \p Tok).
-bool checkAndConsumeInclusiveDirective(Lexer &Lex, Token &Tok) {
- auto Matched = [&]() {
- Lex.LexFromRawLexer(Tok);
- return true;
- };
- if (Tok.is(tok::hash) && !Lex.LexFromRawLexer(Tok) &&
- Tok.is(tok::raw_identifier) && Tok.getRawIdentifier() == "include") {
- if (Lex.LexFromRawLexer(Tok))
- return false;
- if (Tok.is(tok::string_literal))
- return Matched();
- if (Tok.is(tok::less)) {
- while (!Lex.LexFromRawLexer(Tok) && Tok.isNot(tok::greater)) {
- }
- if (Tok.is(tok::greater))
- return Matched();
- }
- }
- return false;
-}
-
-// Returns the offset of the last #include directive after which a new
-// #include can be inserted. This ignores #include's after the #include block(s)
-// in the beginning of a file to avoid inserting headers into code sections
-// where new #include's should not be added by default.
-// These code sections include:
-// - raw string literals (containing #include).
-// - #if blocks.
-// - Special #include's among declarations (e.g. functions).
-//
-// If no #include after which a new #include can be inserted, this returns the
-// offset after skipping all comments from the start of the code.
-// Inserting after an #include is not allowed if it comes after code that is not
-// #include (e.g. pre-processing directive that is not #include, declarations).
-unsigned getMaxHeaderInsertionOffset(StringRef FileName, StringRef Code,
- const FormatStyle &Style) {
- return getOffsetAfterTokenSequence(
- FileName, Code, Style,
- [](const SourceManager &SM, Lexer &Lex, Token Tok) {
- skipComments(Lex, Tok);
- unsigned MaxOffset = SM.getFileOffset(Tok.getLocation());
- while (checkAndConsumeInclusiveDirective(Lex, Tok))
- MaxOffset = SM.getFileOffset(Tok.getLocation());
- return MaxOffset;
- });
-}
-
-bool isDeletedHeader(llvm::StringRef HeaderName,
- const std::set<llvm::StringRef> &HeadersToDelete) {
- return HeadersToDelete.count(HeaderName) ||
- HeadersToDelete.count(HeaderName.trim("\"<>"));
-}
-
// FIXME: insert empty lines between newly created blocks.
tooling::Replacements
fixCppIncludeInsertions(StringRef Code, const tooling::Replacements &Replaces,
@@ -1814,85 +1906,26 @@ fixCppIncludeInsertions(StringRef Code, const tooling::Replacements &Replaces,
if (HeaderInsertions.empty() && HeadersToDelete.empty())
return Replaces;
- llvm::Regex IncludeRegex(IncludeRegexPattern);
- llvm::Regex DefineRegex(R"(^[\t\ ]*#[\t\ ]*define[\t\ ]*[^\\]*$)");
- SmallVector<StringRef, 4> Matches;
StringRef FileName = Replaces.begin()->getFilePath();
- IncludeCategoryManager Categories(Style, FileName);
-
- // Record the offset of the end of the last include in each category.
- std::map<int, int> CategoryEndOffsets;
- // All possible priorities.
- // Add 0 for main header and INT_MAX for headers that are not in any category.
- std::set<int> Priorities = {0, INT_MAX};
- for (const auto &Category : Style.IncludeCategories)
- Priorities.insert(Category.Priority);
- int FirstIncludeOffset = -1;
- // All new headers should be inserted after this offset.
- unsigned MinInsertOffset =
- getOffsetAfterHeaderGuardsAndComments(FileName, Code, Style);
- StringRef TrimmedCode = Code.drop_front(MinInsertOffset);
- // Max insertion offset in the original code.
- unsigned MaxInsertOffset =
- MinInsertOffset +
- getMaxHeaderInsertionOffset(FileName, TrimmedCode, Style);
- SmallVector<StringRef, 32> Lines;
- TrimmedCode.split(Lines, '\n');
- unsigned Offset = MinInsertOffset;
- unsigned NextLineOffset;
- std::set<StringRef> ExistingIncludes;
- for (auto Line : Lines) {
- NextLineOffset = std::min(Code.size(), Offset + Line.size() + 1);
- if (IncludeRegex.match(Line, &Matches)) {
- // The header name with quotes or angle brackets.
- StringRef IncludeName = Matches[2];
- ExistingIncludes.insert(IncludeName);
- // Only record the offset of current #include if we can insert after it.
- if (Offset <= MaxInsertOffset) {
- int Category = Categories.getIncludePriority(
- IncludeName, /*CheckMainHeader=*/FirstIncludeOffset < 0);
- CategoryEndOffsets[Category] = NextLineOffset;
- if (FirstIncludeOffset < 0)
- FirstIncludeOffset = Offset;
- }
- if (isDeletedHeader(IncludeName, HeadersToDelete)) {
- // If this is the last line without trailing newline, we need to make
- // sure we don't delete across the file boundary.
- unsigned Length = std::min(Line.size() + 1, Code.size() - Offset);
- llvm::Error Err =
- Result.add(tooling::Replacement(FileName, Offset, Length, ""));
- if (Err) {
- // Ignore the deletion on conflict.
- llvm::errs() << "Failed to add header deletion replacement for "
- << IncludeName << ": " << llvm::toString(std::move(Err))
- << "\n";
- }
+ tooling::HeaderIncludes Includes(FileName, Code, Style.IncludeStyle);
+
+ for (const auto &Header : HeadersToDelete) {
+ tooling::Replacements Replaces =
+ Includes.remove(Header.trim("\"<>"), Header.startswith("<"));
+ for (const auto &R : Replaces) {
+ auto Err = Result.add(R);
+ if (Err) {
+ // Ignore the deletion on conflict.
+ llvm::errs() << "Failed to add header deletion replacement for "
+ << Header << ": " << llvm::toString(std::move(Err))
+ << "\n";
}
}
- Offset = NextLineOffset;
- }
-
- // Populate CategoryEndOfssets:
- // - Ensure that CategoryEndOffset[Highest] is always populated.
- // - If CategoryEndOffset[Priority] isn't set, use the next higher value that
- // is set, up to CategoryEndOffset[Highest].
- auto Highest = Priorities.begin();
- if (CategoryEndOffsets.find(*Highest) == CategoryEndOffsets.end()) {
- if (FirstIncludeOffset >= 0)
- CategoryEndOffsets[*Highest] = FirstIncludeOffset;
- else
- CategoryEndOffsets[*Highest] = MinInsertOffset;
- }
- // By this point, CategoryEndOffset[Highest] is always set appropriately:
- // - to an appropriate location before/after existing #includes, or
- // - to right after the header guard, or
- // - to the beginning of the file.
- for (auto I = ++Priorities.begin(), E = Priorities.end(); I != E; ++I)
- if (CategoryEndOffsets.find(*I) == CategoryEndOffsets.end())
- CategoryEndOffsets[*I] = CategoryEndOffsets[*std::prev(I)];
-
- bool NeedNewLineAtEnd = !Code.empty() && Code.back() != '\n';
+ }
+
+ llvm::Regex IncludeRegex = llvm::Regex(IncludeRegexPattern);
+ llvm::SmallVector<StringRef, 4> Matches;
for (const auto &R : HeaderInsertions) {
auto IncludeDirective = R.getReplacementText();
bool Matched = IncludeRegex.match(IncludeDirective, &Matches);
@@ -1900,30 +1933,17 @@ fixCppIncludeInsertions(StringRef Code, const tooling::Replacements &Replaces,
"'#include ...'");
(void)Matched;
auto IncludeName = Matches[2];
- if (ExistingIncludes.find(IncludeName) != ExistingIncludes.end()) {
- DEBUG(llvm::dbgs() << "Skip adding existing include : " << IncludeName
- << "\n");
- continue;
- }
- int Category =
- Categories.getIncludePriority(IncludeName, /*CheckMainHeader=*/true);
- Offset = CategoryEndOffsets[Category];
- std::string NewInclude = !IncludeDirective.endswith("\n")
- ? (IncludeDirective + "\n").str()
- : IncludeDirective.str();
- // When inserting headers at end of the code, also append '\n' to the code
- // if it does not end with '\n'.
- if (NeedNewLineAtEnd && Offset == Code.size()) {
- NewInclude = "\n" + NewInclude;
- NeedNewLineAtEnd = false;
- }
- auto NewReplace = tooling::Replacement(FileName, Offset, 0, NewInclude);
- auto Err = Result.add(NewReplace);
- if (Err) {
- llvm::consumeError(std::move(Err));
- unsigned NewOffset = Result.getShiftedCodePosition(Offset);
- NewReplace = tooling::Replacement(FileName, NewOffset, 0, NewInclude);
- Result = Result.merge(tooling::Replacements(NewReplace));
+ auto Replace =
+ Includes.insert(IncludeName.trim("\"<>"), IncludeName.startswith("<"));
+ if (Replace) {
+ auto Err = Result.add(*Replace);
+ if (Err) {
+ llvm::consumeError(std::move(Err));
+ unsigned NewOffset = Result.getShiftedCodePosition(Replace->getOffset());
+ auto Shifted = tooling::Replacement(FileName, NewOffset, 0,
+ Replace->getReplacementText());
+ Result = Result.merge(tooling::Replacements(Shifted));
+ }
}
}
return Result;
@@ -1988,9 +2008,9 @@ reformat(const FormatStyle &Style, StringRef Code,
return Formatter(Env, Expanded, Status).process();
});
- std::unique_ptr<Environment> Env = Environment::CreateVirtualEnvironment(
- Code, FileName, Ranges, FirstStartColumn, NextStartColumn,
- LastStartColumn);
+ auto Env =
+ llvm::make_unique<Environment>(Code, FileName, Ranges, FirstStartColumn,
+ NextStartColumn, LastStartColumn);
llvm::Optional<std::string> CurrentCode = None;
tooling::Replacements Fixes;
unsigned Penalty = 0;
@@ -2003,7 +2023,7 @@ reformat(const FormatStyle &Style, StringRef Code,
Penalty += PassFixes.second;
if (I + 1 < E) {
CurrentCode = std::move(*NewCode);
- Env = Environment::CreateVirtualEnvironment(
+ Env = llvm::make_unique<Environment>(
*CurrentCode, FileName,
tooling::calculateRangesAfterReplacements(Fixes, Ranges),
FirstStartColumn, NextStartColumn, LastStartColumn);
@@ -2032,10 +2052,7 @@ tooling::Replacements cleanup(const FormatStyle &Style, StringRef Code,
// cleanups only apply to C++ (they mostly concern ctor commas etc.)
if (Style.Language != FormatStyle::LK_Cpp)
return tooling::Replacements();
- std::unique_ptr<Environment> Env =
- Environment::CreateVirtualEnvironment(Code, FileName, Ranges);
- Cleaner Clean(*Env, Style);
- return Clean.process().first;
+ return Cleaner(Environment(Code, FileName, Ranges), Style).process().first;
}
tooling::Replacements reformat(const FormatStyle &Style, StringRef Code,
@@ -2052,20 +2069,18 @@ tooling::Replacements fixNamespaceEndComments(const FormatStyle &Style,
StringRef Code,
ArrayRef<tooling::Range> Ranges,
StringRef FileName) {
- std::unique_ptr<Environment> Env =
- Environment::CreateVirtualEnvironment(Code, FileName, Ranges);
- NamespaceEndCommentsFixer Fix(*Env, Style);
- return Fix.process().first;
+ return NamespaceEndCommentsFixer(Environment(Code, FileName, Ranges), Style)
+ .process()
+ .first;
}
tooling::Replacements sortUsingDeclarations(const FormatStyle &Style,
StringRef Code,
ArrayRef<tooling::Range> Ranges,
StringRef FileName) {
- std::unique_ptr<Environment> Env =
- Environment::CreateVirtualEnvironment(Code, FileName, Ranges);
- UsingDeclarationsSorter Sorter(*Env, Style);
- return Sorter.process().first;
+ return UsingDeclarationsSorter(Environment(Code, FileName, Ranges), Style)
+ .process()
+ .first;
}
LangOptions getFormattingLangOpts(const FormatStyle &Style) {
@@ -2117,6 +2132,28 @@ static FormatStyle::LanguageKind getLanguageByFileName(StringRef FileName) {
return FormatStyle::LK_Cpp;
}
+FormatStyle::LanguageKind guessLanguage(StringRef FileName, StringRef Code) {
+ const auto GuessedLanguage = getLanguageByFileName(FileName);
+ if (GuessedLanguage == FormatStyle::LK_Cpp) {
+ auto Extension = llvm::sys::path::extension(FileName);
+ // If there's no file extension (or it's .h), we need to check the contents
+ // of the code to see if it contains Objective-C.
+ if (Extension.empty() || Extension == ".h") {
+ auto NonEmptyFileName = FileName.empty() ? "guess.h" : FileName;
+ Environment Env(Code, NonEmptyFileName, /*Ranges=*/{});
+ ObjCHeaderStyleGuesser Guesser(Env, getLLVMStyle());
+ Guesser.process();
+ if (Guesser.isObjC())
+ return FormatStyle::LK_ObjC;
+ }
+ }
+ return GuessedLanguage;
+}
+
+const char *DefaultFormatStyle = "file";
+
+const char *DefaultFallbackStyle = "LLVM";
+
llvm::Expected<FormatStyle> getStyle(StringRef StyleName, StringRef FileName,
StringRef FallbackStyleName,
StringRef Code, vfs::FileSystem *FS) {
@@ -2124,16 +2161,7 @@ llvm::Expected<FormatStyle> getStyle(StringRef StyleName, StringRef FileName,
FS = vfs::getRealFileSystem().get();
}
FormatStyle Style = getLLVMStyle();
- Style.Language = getLanguageByFileName(FileName);
-
- // This is a very crude detection of whether a header contains ObjC code that
- // should be improved over time and probably be done on tokens, not one the
- // bare content of the file.
- if (Style.Language == FormatStyle::LK_Cpp && FileName.endswith(".h") &&
- (Code.contains("\n- (") || Code.contains("\n+ (") ||
- Code.contains("\n@end\n") || Code.contains("\n@end ") ||
- Code.endswith("@end")))
- Style.Language = FormatStyle::LK_ObjC;
+ Style.Language = guessLanguage(FileName, Code);
FormatStyle FallbackStyle = getNoStyle();
if (!getPredefinedStyle(FallbackStyleName, Style.Language, &FallbackStyle))
@@ -2170,7 +2198,7 @@ llvm::Expected<FormatStyle> getStyle(StringRef StyleName, StringRef FileName,
SmallString<128> ConfigFile(Directory);
llvm::sys::path::append(ConfigFile, ".clang-format");
- DEBUG(llvm::dbgs() << "Trying " << ConfigFile << "...\n");
+ LLVM_DEBUG(llvm::dbgs() << "Trying " << ConfigFile << "...\n");
Status = FS->status(ConfigFile.str());
bool FoundConfigFile =
@@ -2179,7 +2207,7 @@ llvm::Expected<FormatStyle> getStyle(StringRef StyleName, StringRef FileName,
// Try _clang-format too, since dotfiles are not commonly used on Windows.
ConfigFile = Directory;
llvm::sys::path::append(ConfigFile, "_clang-format");
- DEBUG(llvm::dbgs() << "Trying " << ConfigFile << "...\n");
+ LLVM_DEBUG(llvm::dbgs() << "Trying " << ConfigFile << "...\n");
Status = FS->status(ConfigFile.str());
FoundConfigFile = Status && (Status->getType() ==
llvm::sys::fs::file_type::regular_file);
@@ -2201,7 +2229,8 @@ llvm::Expected<FormatStyle> getStyle(StringRef StyleName, StringRef FileName,
return make_string_error("Error reading " + ConfigFile + ": " +
ec.message());
}
- DEBUG(llvm::dbgs() << "Using configuration file " << ConfigFile << "\n");
+ LLVM_DEBUG(llvm::dbgs()
+ << "Using configuration file " << ConfigFile << "\n");
return Style;
}
}
diff --git a/lib/Format/FormatInternal.h b/lib/Format/FormatInternal.h
index 3984158467b3..5c59e7656eee 100644
--- a/lib/Format/FormatInternal.h
+++ b/lib/Format/FormatInternal.h
@@ -8,7 +8,7 @@
//===----------------------------------------------------------------------===//
///
/// \file
-/// \brief This file declares Format APIs to be used internally by the
+/// This file declares Format APIs to be used internally by the
/// formatting library implementation.
///
//===----------------------------------------------------------------------===//
@@ -24,7 +24,7 @@ namespace clang {
namespace format {
namespace internal {
-/// \brief Reformats the given \p Ranges in the code fragment \p Code.
+/// Reformats the given \p Ranges in the code fragment \p Code.
///
/// A fragment of code could conceptually be surrounded by other code that might
/// constrain how that fragment is laid out.
diff --git a/lib/Format/FormatToken.cpp b/lib/Format/FormatToken.cpp
index 10ac392abbf2..62b08c576e05 100644
--- a/lib/Format/FormatToken.cpp
+++ b/lib/Format/FormatToken.cpp
@@ -8,7 +8,7 @@
//===----------------------------------------------------------------------===//
///
/// \file
-/// \brief This file implements specific functions of \c FormatTokens and their
+/// This file implements specific functions of \c FormatTokens and their
/// roles.
///
//===----------------------------------------------------------------------===//
@@ -57,6 +57,7 @@ bool FormatToken::isSimpleTypeSpecifier() const {
case tok::kw_bool:
case tok::kw___underlying_type:
case tok::annot_typename:
+ case tok::kw_char8_t:
case tok::kw_char16_t:
case tok::kw_char32_t:
case tok::kw_typeof:
diff --git a/lib/Format/FormatToken.h b/lib/Format/FormatToken.h
index 3dc0ab0e7cca..9094e7689e1d 100644
--- a/lib/Format/FormatToken.h
+++ b/lib/Format/FormatToken.h
@@ -8,7 +8,7 @@
//===----------------------------------------------------------------------===//
///
/// \file
-/// \brief This file contains the declaration of the FormatToken, a wrapper
+/// This file contains the declaration of the FormatToken, a wrapper
/// around Token with additional information related to formatting.
///
//===----------------------------------------------------------------------===//
@@ -29,7 +29,9 @@ namespace format {
#define LIST_TOKEN_TYPES \
TYPE(ArrayInitializerLSquare) \
TYPE(ArraySubscriptLSquare) \
+ TYPE(AttributeColon) \
TYPE(AttributeParen) \
+ TYPE(AttributeSquare) \
TYPE(BinaryOperator) \
TYPE(BitFieldColon) \
TYPE(BlockComment) \
@@ -88,6 +90,7 @@ namespace format {
TYPE(TemplateCloser) \
TYPE(TemplateOpener) \
TYPE(TemplateString) \
+ TYPE(ProtoExtensionLSquare) \
TYPE(TrailingAnnotation) \
TYPE(TrailingReturnArrow) \
TYPE(TrailingUnaryOperator) \
@@ -101,7 +104,7 @@ enum TokenType {
NUM_TOKEN_TYPES
};
-/// \brief Determines the name of a token type.
+/// Determines the name of a token type.
const char *getTokenTypeName(TokenType Type);
// Represents what type of block a set of braces open.
@@ -115,181 +118,191 @@ enum FormatDecision { FD_Unformatted, FD_Continue, FD_Break };
class TokenRole;
class AnnotatedLine;
-/// \brief A wrapper around a \c Token storing information about the
+/// A wrapper around a \c Token storing information about the
/// whitespace characters preceding it.
struct FormatToken {
FormatToken() {}
- /// \brief The \c Token.
+ /// The \c Token.
Token Tok;
- /// \brief The number of newlines immediately before the \c Token.
+ /// The number of newlines immediately before the \c Token.
///
/// This can be used to determine what the user wrote in the original code
/// and thereby e.g. leave an empty line between two function definitions.
unsigned NewlinesBefore = 0;
- /// \brief Whether there is at least one unescaped newline before the \c
+ /// Whether there is at least one unescaped newline before the \c
/// Token.
bool HasUnescapedNewline = false;
- /// \brief The range of the whitespace immediately preceding the \c Token.
+ /// The range of the whitespace immediately preceding the \c Token.
SourceRange WhitespaceRange;
- /// \brief The offset just past the last '\n' in this token's leading
+ /// The offset just past the last '\n' in this token's leading
/// whitespace (relative to \c WhiteSpaceStart). 0 if there is no '\n'.
unsigned LastNewlineOffset = 0;
- /// \brief The width of the non-whitespace parts of the token (or its first
+ /// The width of the non-whitespace parts of the token (or its first
/// line for multi-line tokens) in columns.
/// We need this to correctly measure number of columns a token spans.
unsigned ColumnWidth = 0;
- /// \brief Contains the width in columns of the last line of a multi-line
+ /// Contains the width in columns of the last line of a multi-line
/// token.
unsigned LastLineColumnWidth = 0;
- /// \brief Whether the token text contains newlines (escaped or not).
+ /// Whether the token text contains newlines (escaped or not).
bool IsMultiline = false;
- /// \brief Indicates that this is the first token of the file.
+ /// Indicates that this is the first token of the file.
bool IsFirst = false;
- /// \brief Whether there must be a line break before this token.
+ /// Whether there must be a line break before this token.
///
/// This happens for example when a preprocessor directive ended directly
/// before the token.
bool MustBreakBefore = false;
- /// \brief The raw text of the token.
+ /// The raw text of the token.
///
/// Contains the raw token text without leading whitespace and without leading
/// escaped newlines.
StringRef TokenText;
- /// \brief Set to \c true if this token is an unterminated literal.
+ /// Set to \c true if this token is an unterminated literal.
bool IsUnterminatedLiteral = 0;
- /// \brief Contains the kind of block if this token is a brace.
+ /// Contains the kind of block if this token is a brace.
BraceBlockKind BlockKind = BK_Unknown;
TokenType Type = TT_Unknown;
- /// \brief The number of spaces that should be inserted before this token.
+ /// The number of spaces that should be inserted before this token.
unsigned SpacesRequiredBefore = 0;
- /// \brief \c true if it is allowed to break before this token.
+ /// \c true if it is allowed to break before this token.
bool CanBreakBefore = false;
- /// \brief \c true if this is the ">" of "template<..>".
+ /// \c true if this is the ">" of "template<..>".
bool ClosesTemplateDeclaration = false;
- /// \brief Number of parameters, if this is "(", "[" or "<".
+ /// Number of parameters, if this is "(", "[" or "<".
///
/// This is initialized to 1 as we don't need to distinguish functions with
/// 0 parameters from functions with 1 parameter. Thus, we can simply count
/// the number of commas.
unsigned ParameterCount = 0;
- /// \brief Number of parameters that are nested blocks,
+ /// Number of parameters that are nested blocks,
/// if this is "(", "[" or "<".
unsigned BlockParameterCount = 0;
- /// \brief If this is a bracket ("<", "(", "[" or "{"), contains the kind of
+ /// If this is a bracket ("<", "(", "[" or "{"), contains the kind of
/// the surrounding bracket.
tok::TokenKind ParentBracket = tok::unknown;
- /// \brief A token can have a special role that can carry extra information
+ /// A token can have a special role that can carry extra information
/// about the token's formatting.
std::unique_ptr<TokenRole> Role;
- /// \brief If this is an opening parenthesis, how are the parameters packed?
+ /// If this is an opening parenthesis, how are the parameters packed?
ParameterPackingKind PackingKind = PPK_Inconclusive;
- /// \brief The total length of the unwrapped line up to and including this
+ /// The total length of the unwrapped line up to and including this
/// token.
unsigned TotalLength = 0;
- /// \brief The original 0-based column of this token, including expanded tabs.
+ /// The original 0-based column of this token, including expanded tabs.
/// The configured TabWidth is used as tab width.
unsigned OriginalColumn = 0;
- /// \brief The length of following tokens until the next natural split point,
+ /// The length of following tokens until the next natural split point,
/// or the next token that can be broken.
unsigned UnbreakableTailLength = 0;
// FIXME: Come up with a 'cleaner' concept.
- /// \brief The binding strength of a token. This is a combined value of
+ /// The binding strength of a token. This is a combined value of
/// operator precedence, parenthesis nesting, etc.
unsigned BindingStrength = 0;
- /// \brief The nesting level of this token, i.e. the number of surrounding (),
+ /// The nesting level of this token, i.e. the number of surrounding (),
/// [], {} or <>.
unsigned NestingLevel = 0;
- /// \brief The indent level of this token. Copied from the surrounding line.
+ /// The indent level of this token. Copied from the surrounding line.
unsigned IndentLevel = 0;
- /// \brief Penalty for inserting a line break before this token.
+ /// Penalty for inserting a line break before this token.
unsigned SplitPenalty = 0;
- /// \brief If this is the first ObjC selector name in an ObjC method
+ /// If this is the first ObjC selector name in an ObjC method
/// definition or call, this contains the length of the longest name.
///
/// This being set to 0 means that the selectors should not be colon-aligned,
/// e.g. because several of them are block-type.
unsigned LongestObjCSelectorName = 0;
- /// \brief Stores the number of required fake parentheses and the
+ /// If this is the first ObjC selector name in an ObjC method
+ /// definition or call, this contains the number of parts that the whole
+ /// selector consist of.
+ unsigned ObjCSelectorNameParts = 0;
+
+ /// The 0-based index of the parameter/argument. For ObjC it is set
+ /// for the selector name token.
+ /// For now calculated only for ObjC.
+ unsigned ParameterIndex = 0;
+
+ /// Stores the number of required fake parentheses and the
/// corresponding operator precedence.
///
/// If multiple fake parentheses start at a token, this vector stores them in
/// reverse order, i.e. inner fake parenthesis first.
SmallVector<prec::Level, 4> FakeLParens;
- /// \brief Insert this many fake ) after this token for correct indentation.
+ /// Insert this many fake ) after this token for correct indentation.
unsigned FakeRParens = 0;
- /// \brief \c true if this token starts a binary expression, i.e. has at least
+ /// \c true if this token starts a binary expression, i.e. has at least
/// one fake l_paren with a precedence greater than prec::Unknown.
bool StartsBinaryExpression = false;
- /// \brief \c true if this token ends a binary expression.
+ /// \c true if this token ends a binary expression.
bool EndsBinaryExpression = false;
- /// \brief Is this is an operator (or "."/"->") in a sequence of operators
+ /// Is this is an operator (or "."/"->") in a sequence of operators
/// with the same precedence, contains the 0-based operator index.
unsigned OperatorIndex = 0;
- /// \brief If this is an operator (or "."/"->") in a sequence of operators
+ /// If this is an operator (or "."/"->") in a sequence of operators
/// with the same precedence, points to the next operator.
FormatToken *NextOperator = nullptr;
- /// \brief Is this token part of a \c DeclStmt defining multiple variables?
+ /// Is this token part of a \c DeclStmt defining multiple variables?
///
/// Only set if \c Type == \c TT_StartOfName.
bool PartOfMultiVariableDeclStmt = false;
- /// \brief Does this line comment continue a line comment section?
+ /// Does this line comment continue a line comment section?
///
/// Only set to true if \c Type == \c TT_LineComment.
bool ContinuesLineCommentSection = false;
- /// \brief If this is a bracket, this points to the matching one.
+ /// If this is a bracket, this points to the matching one.
FormatToken *MatchingParen = nullptr;
- /// \brief The previous token in the unwrapped line.
+ /// The previous token in the unwrapped line.
FormatToken *Previous = nullptr;
- /// \brief The next token in the unwrapped line.
+ /// The next token in the unwrapped line.
FormatToken *Next = nullptr;
- /// \brief If this token starts a block, this contains all the unwrapped lines
+ /// If this token starts a block, this contains all the unwrapped lines
/// in it.
SmallVector<AnnotatedLine *, 1> Children;
- /// \brief Stores the formatting decision for the token once it was made.
+ /// Stores the formatting decision for the token once it was made.
FormatDecision Decision = FD_Unformatted;
- /// \brief If \c true, this token has been fully formatted (indented and
+ /// If \c true, this token has been fully formatted (indented and
/// potentially re-formatted inside), and we do not allow further formatting
/// changes.
bool Finalized = false;
@@ -337,7 +350,7 @@ struct FormatToken {
(!ColonRequired || (Next && Next->is(tok::colon)));
}
- /// \brief Determine whether the token is a simple-type-specifier.
+ /// Determine whether the token is a simple-type-specifier.
bool isSimpleTypeSpecifier() const;
bool isObjCAccessSpecifier() const {
@@ -348,22 +361,28 @@ struct FormatToken {
Next->isObjCAtKeyword(tok::objc_private));
}
- /// \brief Returns whether \p Tok is ([{ or a template opening <.
+ /// Returns whether \p Tok is ([{ or an opening < of a template or in
+ /// protos.
bool opensScope() const {
if (is(TT_TemplateString) && TokenText.endswith("${"))
return true;
+ if (is(TT_DictLiteral) && is(tok::less))
+ return true;
return isOneOf(tok::l_paren, tok::l_brace, tok::l_square,
TT_TemplateOpener);
}
- /// \brief Returns whether \p Tok is )]} or a template closing >.
+ /// Returns whether \p Tok is )]} or a closing > of a template or in
+ /// protos.
bool closesScope() const {
if (is(TT_TemplateString) && TokenText.startswith("}"))
return true;
+ if (is(TT_DictLiteral) && is(tok::greater))
+ return true;
return isOneOf(tok::r_paren, tok::r_brace, tok::r_square,
TT_TemplateCloser);
}
- /// \brief Returns \c true if this is a "." or "->" accessing a member.
+ /// Returns \c true if this is a "." or "->" accessing a member.
bool isMemberAccess() const {
return isOneOf(tok::arrow, tok::period, tok::arrowstar) &&
!isOneOf(TT_DesignatedInitializerPeriod, TT_TrailingReturnArrow,
@@ -396,7 +415,7 @@ struct FormatToken {
(is(TT_LineComment) || !Next || Next->NewlinesBefore > 0);
}
- /// \brief Returns \c true if this is a keyword that can be used
+ /// Returns \c true if this is a keyword that can be used
/// like a function call (e.g. sizeof, typeid, ...).
bool isFunctionLikeKeyword() const {
switch (Tok.getKind()) {
@@ -416,7 +435,7 @@ struct FormatToken {
}
}
- /// \brief Returns \c true if this is a string literal that's like a label,
+ /// Returns \c true if this is a string literal that's like a label,
/// e.g. ends with "=" or ":".
bool isLabelString() const {
if (!is(tok::string_literal))
@@ -431,7 +450,7 @@ struct FormatToken {
(Content.back() == ':' || Content.back() == '=');
}
- /// \brief Returns actual token start location without leading escaped
+ /// Returns actual token start location without leading escaped
/// newlines and whitespace.
///
/// This can be different to Tok.getLocation(), which includes leading escaped
@@ -441,10 +460,11 @@ struct FormatToken {
}
prec::Level getPrecedence() const {
- return getBinOpPrecedence(Tok.getKind(), true, true);
+ return getBinOpPrecedence(Tok.getKind(), /*GreaterThanIsOperator=*/true,
+ /*CPlusPlus11=*/true);
}
- /// \brief Returns the previous token ignoring comments.
+ /// Returns the previous token ignoring comments.
FormatToken *getPreviousNonComment() const {
FormatToken *Tok = Previous;
while (Tok && Tok->is(tok::comment))
@@ -452,7 +472,7 @@ struct FormatToken {
return Tok;
}
- /// \brief Returns the next token ignoring comments.
+ /// Returns the next token ignoring comments.
const FormatToken *getNextNonComment() const {
const FormatToken *Tok = Next;
while (Tok && Tok->is(tok::comment))
@@ -460,12 +480,13 @@ struct FormatToken {
return Tok;
}
- /// \brief Returns \c true if this tokens starts a block-type list, i.e. a
+ /// Returns \c true if this tokens starts a block-type list, i.e. a
/// list that should be indented with a block indent.
bool opensBlockOrBlockTypeList(const FormatStyle &Style) const {
if (is(TT_TemplateString) && opensScope())
return true;
return is(TT_ArrayInitializerLSquare) ||
+ is(TT_ProtoExtensionLSquare) ||
(is(tok::l_brace) &&
(BlockKind == BK_Block || is(TT_DictLiteral) ||
(!Style.Cpp11BracedListStyle && NestingLevel == 0))) ||
@@ -473,7 +494,7 @@ struct FormatToken {
Style.Language == FormatStyle::LK_TextProto));
}
- /// \brief Returns whether the token is the left square bracket of a C++
+ /// Returns whether the token is the left square bracket of a C++
/// structured binding declaration.
bool isCppStructuredBinding(const FormatStyle &Style) const {
if (!Style.isCpp() || isNot(tok::l_square))
@@ -486,14 +507,14 @@ struct FormatToken {
return T && T->is(tok::kw_auto);
}
- /// \brief Same as opensBlockOrBlockTypeList, but for the closing token.
+ /// Same as opensBlockOrBlockTypeList, but for the closing token.
bool closesBlockOrBlockTypeList(const FormatStyle &Style) const {
if (is(TT_TemplateString) && closesScope())
return true;
return MatchingParen && MatchingParen->opensBlockOrBlockTypeList(Style);
}
- /// \brief Return the actual namespace token, if this token starts a namespace
+ /// Return the actual namespace token, if this token starts a namespace
/// block.
const FormatToken *getNamespaceToken() const {
const FormatToken *NamespaceTok = this;
@@ -546,11 +567,11 @@ public:
TokenRole(const FormatStyle &Style) : Style(Style) {}
virtual ~TokenRole();
- /// \brief After the \c TokenAnnotator has finished annotating all the tokens,
+ /// After the \c TokenAnnotator has finished annotating all the tokens,
/// this function precomputes required information for formatting.
virtual void precomputeFormattingInfos(const FormatToken *Token);
- /// \brief Apply the special formatting that the given role demands.
+ /// Apply the special formatting that the given role demands.
///
/// Assumes that the token having this role is already formatted.
///
@@ -562,7 +583,7 @@ public:
return 0;
}
- /// \brief Same as \c formatFromToken, but assumes that the first token has
+ /// Same as \c formatFromToken, but assumes that the first token has
/// already been set thereby deciding on the first line break.
virtual unsigned formatAfterToken(LineState &State,
ContinuationIndenter *Indenter,
@@ -570,7 +591,7 @@ public:
return 0;
}
- /// \brief Notifies the \c Role that a comma was found.
+ /// Notifies the \c Role that a comma was found.
virtual void CommaFound(const FormatToken *Token) {}
protected:
@@ -590,46 +611,46 @@ public:
unsigned formatFromToken(LineState &State, ContinuationIndenter *Indenter,
bool DryRun) override;
- /// \brief Adds \p Token as the next comma to the \c CommaSeparated list.
+ /// Adds \p Token as the next comma to the \c CommaSeparated list.
void CommaFound(const FormatToken *Token) override {
Commas.push_back(Token);
}
private:
- /// \brief A struct that holds information on how to format a given list with
+ /// A struct that holds information on how to format a given list with
/// a specific number of columns.
struct ColumnFormat {
- /// \brief The number of columns to use.
+ /// The number of columns to use.
unsigned Columns;
- /// \brief The total width in characters.
+ /// The total width in characters.
unsigned TotalWidth;
- /// \brief The number of lines required for this format.
+ /// The number of lines required for this format.
unsigned LineCount;
- /// \brief The size of each column in characters.
+ /// The size of each column in characters.
SmallVector<unsigned, 8> ColumnSizes;
};
- /// \brief Calculate which \c ColumnFormat fits best into
+ /// Calculate which \c ColumnFormat fits best into
/// \p RemainingCharacters.
const ColumnFormat *getColumnFormat(unsigned RemainingCharacters) const;
- /// \brief The ordered \c FormatTokens making up the commas of this list.
+ /// The ordered \c FormatTokens making up the commas of this list.
SmallVector<const FormatToken *, 8> Commas;
- /// \brief The length of each of the list's items in characters including the
+ /// The length of each of the list's items in characters including the
/// trailing comma.
SmallVector<unsigned, 8> ItemLengths;
- /// \brief Precomputed formats that can be used for this list.
+ /// Precomputed formats that can be used for this list.
SmallVector<ColumnFormat, 4> Formats;
bool HasNestedBracedList;
};
-/// \brief Encapsulates keywords that are context sensitive or for languages not
+/// Encapsulates keywords that are context sensitive or for languages not
/// properly supported by Clang's lexer.
struct AdditionalKeywords {
AdditionalKeywords(IdentifierTable &IdentTable) {
@@ -761,7 +782,7 @@ struct AdditionalKeywords {
IdentifierInfo *kw_slots;
IdentifierInfo *kw_qslots;
- /// \brief Returns \c true if \p Tok is a true JavaScript identifier, returns
+ /// Returns \c true if \p Tok is a true JavaScript identifier, returns
/// \c false if it is a keyword or a pseudo keyword.
bool IsJavaScriptIdentifier(const FormatToken &Tok) const {
return Tok.is(tok::identifier) &&
@@ -770,7 +791,7 @@ struct AdditionalKeywords {
}
private:
- /// \brief The JavaScript keywords beyond the C++ keyword set.
+ /// The JavaScript keywords beyond the C++ keyword set.
std::unordered_set<IdentifierInfo *> JsExtraKeywords;
};
diff --git a/lib/Format/FormatTokenLexer.cpp b/lib/Format/FormatTokenLexer.cpp
index 199d2974c5c7..c7f720a443d3 100644
--- a/lib/Format/FormatTokenLexer.cpp
+++ b/lib/Format/FormatTokenLexer.cpp
@@ -8,7 +8,7 @@
//===----------------------------------------------------------------------===//
///
/// \file
-/// \brief This file implements FormatTokenLexer, which tokenizes a source file
+/// This file implements FormatTokenLexer, which tokenizes a source file
/// into a FormatToken stream suitable for ClangFormat.
///
//===----------------------------------------------------------------------===//
@@ -38,7 +38,7 @@ FormatTokenLexer::FormatTokenLexer(const SourceManager &SourceMgr, FileID ID,
for (const std::string &ForEachMacro : Style.ForEachMacros)
ForEachMacros.push_back(&IdentTable.get(ForEachMacro));
- std::sort(ForEachMacros.begin(), ForEachMacros.end());
+ llvm::sort(ForEachMacros.begin(), ForEachMacros.end());
}
ArrayRef<FormatToken *> FormatTokenLexer::lex() {
@@ -334,7 +334,7 @@ void FormatTokenLexer::handleTemplateStrings() {
void FormatTokenLexer::tryParsePythonComment() {
FormatToken *HashToken = Tokens.back();
- if (HashToken->isNot(tok::hash))
+ if (!HashToken->isOneOf(tok::hash, tok::hashhash))
return;
// Turn the remainder of this line into a comment.
const char *CommentBegin =
@@ -691,7 +691,9 @@ void FormatTokenLexer::readRawToken(FormatToken &Tok) {
}
}
- if (Style.Language == FormatStyle::LK_JavaScript &&
+ if ((Style.Language == FormatStyle::LK_JavaScript ||
+ Style.Language == FormatStyle::LK_Proto ||
+ Style.Language == FormatStyle::LK_TextProto) &&
Tok.is(tok::char_constant)) {
Tok.Tok.setKind(tok::string_literal);
}
diff --git a/lib/Format/FormatTokenLexer.h b/lib/Format/FormatTokenLexer.h
index 59dc2a752f1f..3b79d27480e3 100644
--- a/lib/Format/FormatTokenLexer.h
+++ b/lib/Format/FormatTokenLexer.h
@@ -8,7 +8,7 @@
//===----------------------------------------------------------------------===//
///
/// \file
-/// \brief This file contains FormatTokenLexer, which tokenizes a source file
+/// This file contains FormatTokenLexer, which tokenizes a source file
/// into a token stream suitable for ClangFormat.
///
//===----------------------------------------------------------------------===//
diff --git a/lib/Format/NamespaceEndCommentsFixer.cpp b/lib/Format/NamespaceEndCommentsFixer.cpp
index df99bb2e1381..995b3219a1f4 100644
--- a/lib/Format/NamespaceEndCommentsFixer.cpp
+++ b/lib/Format/NamespaceEndCommentsFixer.cpp
@@ -8,7 +8,7 @@
//===----------------------------------------------------------------------===//
///
/// \file
-/// \brief This file implements NamespaceEndCommentsFixer, a TokenAnalyzer that
+/// This file implements NamespaceEndCommentsFixer, a TokenAnalyzer that
/// fixes namespace end comments.
///
//===----------------------------------------------------------------------===//
@@ -27,13 +27,6 @@ namespace {
// Short namespaces don't need an end comment.
static const int kShortNamespaceMaxLines = 1;
-// Matches a valid namespace end comment.
-// Valid namespace end comments don't need to be edited.
-static llvm::Regex kNamespaceCommentPattern =
- llvm::Regex("^/[/*] *(end (of )?)? *(anonymous|unnamed)? *"
- "namespace( +([a-zA-Z0-9:_]+))?\\.? *(\\*/)?$",
- llvm::Regex::IgnoreCase);
-
// Computes the name of a namespace given the namespace token.
// Returns "" for anonymous namespace.
std::string computeName(const FormatToken *NamespaceTok) {
@@ -67,8 +60,15 @@ bool hasEndComment(const FormatToken *RBraceTok) {
bool validEndComment(const FormatToken *RBraceTok, StringRef NamespaceName) {
assert(hasEndComment(RBraceTok));
const FormatToken *Comment = RBraceTok->Next;
+
+ // Matches a valid namespace end comment.
+ // Valid namespace end comments don't need to be edited.
+ static llvm::Regex *const NamespaceCommentPattern =
+ new llvm::Regex("^/[/*] *(end (of )?)? *(anonymous|unnamed)? *"
+ "namespace( +([a-zA-Z0-9:_]+))?\\.? *(\\*/)?$",
+ llvm::Regex::IgnoreCase);
SmallVector<StringRef, 7> Groups;
- if (kNamespaceCommentPattern.match(Comment->TokenText, &Groups)) {
+ if (NamespaceCommentPattern->match(Comment->TokenText, &Groups)) {
StringRef NamespaceNameInComment = Groups.size() > 5 ? Groups[5] : "";
// Anonymous namespace comments must not mention a namespace name.
if (NamespaceName.empty() && !NamespaceNameInComment.empty())
@@ -107,13 +107,14 @@ void updateEndComment(const FormatToken *RBraceTok, StringRef EndCommentText,
<< llvm::toString(std::move(Err)) << "\n";
}
}
+} // namespace
const FormatToken *
-getNamespaceToken(const AnnotatedLine *line,
+getNamespaceToken(const AnnotatedLine *Line,
const SmallVectorImpl<AnnotatedLine *> &AnnotatedLines) {
- if (!line->Affected || line->InPPDirective || !line->startsWith(tok::r_brace))
+ if (!Line->Affected || Line->InPPDirective || !Line->startsWith(tok::r_brace))
return nullptr;
- size_t StartLineIndex = line->MatchingOpeningBlockLineIndex;
+ size_t StartLineIndex = Line->MatchingOpeningBlockLineIndex;
if (StartLineIndex == UnwrappedLine::kInvalidIndex)
return nullptr;
assert(StartLineIndex < AnnotatedLines.size());
@@ -131,7 +132,6 @@ getNamespaceToken(const AnnotatedLine *line,
return nullptr;
return NamespaceTok;
}
-} // namespace
NamespaceEndCommentsFixer::NamespaceEndCommentsFixer(const Environment &Env,
const FormatStyle &Style)
@@ -141,8 +141,7 @@ std::pair<tooling::Replacements, unsigned> NamespaceEndCommentsFixer::analyze(
TokenAnnotator &Annotator, SmallVectorImpl<AnnotatedLine *> &AnnotatedLines,
FormatTokenLexer &Tokens) {
const SourceManager &SourceMgr = Env.getSourceManager();
- AffectedRangeMgr.computeAffectedLines(AnnotatedLines.begin(),
- AnnotatedLines.end());
+ AffectedRangeMgr.computeAffectedLines(AnnotatedLines);
tooling::Replacements Fixes;
std::string AllNamespaceNames = "";
size_t StartLineIndex = SIZE_MAX;
diff --git a/lib/Format/NamespaceEndCommentsFixer.h b/lib/Format/NamespaceEndCommentsFixer.h
index 4779f0d27c92..07a1c7bb0c35 100644
--- a/lib/Format/NamespaceEndCommentsFixer.h
+++ b/lib/Format/NamespaceEndCommentsFixer.h
@@ -8,7 +8,7 @@
//===----------------------------------------------------------------------===//
///
/// \file
-/// \brief This file declares NamespaceEndCommentsFixer, a TokenAnalyzer that
+/// This file declares NamespaceEndCommentsFixer, a TokenAnalyzer that
/// fixes namespace end comments.
///
//===----------------------------------------------------------------------===//
@@ -21,6 +21,16 @@
namespace clang {
namespace format {
+// Finds the namespace token corresponding to a closing namespace `}`, if that
+// is to be formatted.
+// If \p Line contains the closing `}` of a namespace, is affected and is not in
+// a preprocessor directive, the result will be the matching namespace token.
+// Otherwise returns null.
+// \p AnnotatedLines is the sequence of lines from which \p Line is a member of.
+const FormatToken *
+getNamespaceToken(const AnnotatedLine *Line,
+ const SmallVectorImpl<AnnotatedLine *> &AnnotatedLines);
+
class NamespaceEndCommentsFixer : public TokenAnalyzer {
public:
NamespaceEndCommentsFixer(const Environment &Env, const FormatStyle &Style);
diff --git a/lib/Format/SortJavaScriptImports.cpp b/lib/Format/SortJavaScriptImports.cpp
index d0b979e100d5..2ec577382ffb 100644
--- a/lib/Format/SortJavaScriptImports.cpp
+++ b/lib/Format/SortJavaScriptImports.cpp
@@ -8,7 +8,7 @@
//===----------------------------------------------------------------------===//
///
/// \file
-/// \brief This file implements a sort operation for JavaScript ES6 imports.
+/// This file implements a sort operation for JavaScript ES6 imports.
///
//===----------------------------------------------------------------------===//
@@ -128,8 +128,7 @@ public:
SmallVectorImpl<AnnotatedLine *> &AnnotatedLines,
FormatTokenLexer &Tokens) override {
tooling::Replacements Result;
- AffectedRangeMgr.computeAffectedLines(AnnotatedLines.begin(),
- AnnotatedLines.end());
+ AffectedRangeMgr.computeAffectedLines(AnnotatedLines);
const AdditionalKeywords &Keywords = Tokens.getKeywords();
SmallVector<JsModuleReference, 16> References;
@@ -189,9 +188,9 @@ public:
if (FirstNonImportLine && FirstNonImportLine->First->NewlinesBefore < 2)
ReferencesText += "\n";
- DEBUG(llvm::dbgs() << "Replacing imports:\n"
- << getSourceText(InsertionPoint) << "\nwith:\n"
- << ReferencesText << "\n");
+ LLVM_DEBUG(llvm::dbgs() << "Replacing imports:\n"
+ << getSourceText(InsertionPoint) << "\nwith:\n"
+ << ReferencesText << "\n");
auto Err = Result.add(tooling::Replacement(
Env.getSourceManager(), CharSourceRange::getCharRange(InsertionPoint),
ReferencesText));
@@ -308,7 +307,7 @@ private:
FirstNonImportLine = nullptr;
AnyImportAffected = AnyImportAffected || Line->Affected;
Reference.Range.setEnd(LineEnd->Tok.getEndLoc());
- DEBUG({
+ LLVM_DEBUG({
llvm::dbgs() << "JsModuleReference: {"
<< "is_export: " << Reference.IsExport
<< ", cat: " << Reference.Category
@@ -446,10 +445,9 @@ tooling::Replacements sortJavaScriptImports(const FormatStyle &Style,
ArrayRef<tooling::Range> Ranges,
StringRef FileName) {
// FIXME: Cursor support.
- std::unique_ptr<Environment> Env =
- Environment::CreateVirtualEnvironment(Code, FileName, Ranges);
- JavaScriptImportSorter Sorter(*Env, Style);
- return Sorter.process().first;
+ return JavaScriptImportSorter(Environment(Code, FileName, Ranges), Style)
+ .process()
+ .first;
}
} // end namespace format
diff --git a/lib/Format/SortJavaScriptImports.h b/lib/Format/SortJavaScriptImports.h
index f22a051008f0..ecab0ae54cb3 100644
--- a/lib/Format/SortJavaScriptImports.h
+++ b/lib/Format/SortJavaScriptImports.h
@@ -8,7 +8,7 @@
//===----------------------------------------------------------------------===//
///
/// \file
-/// \brief This file implements a sorter for JavaScript ES6 imports.
+/// This file implements a sorter for JavaScript ES6 imports.
///
//===----------------------------------------------------------------------===//
diff --git a/lib/Format/TokenAnalyzer.cpp b/lib/Format/TokenAnalyzer.cpp
index d1dfb1fea32b..99fc61ef1c32 100644
--- a/lib/Format/TokenAnalyzer.cpp
+++ b/lib/Format/TokenAnalyzer.cpp
@@ -8,7 +8,7 @@
//===----------------------------------------------------------------------===//
///
/// \file
-/// \brief This file implements an abstract TokenAnalyzer and associated helper
+/// This file implements an abstract TokenAnalyzer and associated helper
/// classes. TokenAnalyzer can be extended to generate replacements based on
/// an annotated and pre-processed token stream.
///
@@ -34,48 +34,19 @@
namespace clang {
namespace format {
-// This sets up an virtual file system with file \p FileName containing \p
-// Code.
-std::unique_ptr<Environment>
-Environment::CreateVirtualEnvironment(StringRef Code, StringRef FileName,
- ArrayRef<tooling::Range> Ranges,
- unsigned FirstStartColumn,
- unsigned NextStartColumn,
- unsigned LastStartColumn) {
- // This is referenced by `FileMgr` and will be released by `FileMgr` when it
- // is deleted.
- IntrusiveRefCntPtr<vfs::InMemoryFileSystem> InMemoryFileSystem(
- new vfs::InMemoryFileSystem);
- // This is passed to `SM` as reference, so the pointer has to be referenced
- // in `Environment` so that `FileMgr` can out-live this function scope.
- std::unique_ptr<FileManager> FileMgr(
- new FileManager(FileSystemOptions(), InMemoryFileSystem));
- // This is passed to `SM` as reference, so the pointer has to be referenced
- // by `Environment` due to the same reason above.
- std::unique_ptr<DiagnosticsEngine> Diagnostics(new DiagnosticsEngine(
- IntrusiveRefCntPtr<DiagnosticIDs>(new DiagnosticIDs),
- new DiagnosticOptions));
- // This will be stored as reference, so the pointer has to be stored in
- // due to the same reason above.
- std::unique_ptr<SourceManager> VirtualSM(
- new SourceManager(*Diagnostics, *FileMgr));
- InMemoryFileSystem->addFile(
- FileName, 0,
- llvm::MemoryBuffer::getMemBuffer(Code, FileName,
- /*RequiresNullTerminator=*/false));
- FileID ID = VirtualSM->createFileID(FileMgr->getFile(FileName),
- SourceLocation(), clang::SrcMgr::C_User);
- assert(ID.isValid());
- SourceLocation StartOfFile = VirtualSM->getLocForStartOfFile(ID);
- std::vector<CharSourceRange> CharRanges;
+Environment::Environment(StringRef Code, StringRef FileName,
+ ArrayRef<tooling::Range> Ranges,
+ unsigned FirstStartColumn, unsigned NextStartColumn,
+ unsigned LastStartColumn)
+ : VirtualSM(new SourceManagerForFile(FileName, Code)), SM(VirtualSM->get()),
+ ID(VirtualSM->get().getMainFileID()), FirstStartColumn(FirstStartColumn),
+ NextStartColumn(NextStartColumn), LastStartColumn(LastStartColumn) {
+ SourceLocation StartOfFile = SM.getLocForStartOfFile(ID);
for (const tooling::Range &Range : Ranges) {
SourceLocation Start = StartOfFile.getLocWithOffset(Range.getOffset());
SourceLocation End = Start.getLocWithOffset(Range.getLength());
CharRanges.push_back(CharSourceRange::getCharRange(Start, End));
}
- return llvm::make_unique<Environment>(
- ID, std::move(FileMgr), std::move(VirtualSM), std::move(Diagnostics),
- CharRanges, FirstStartColumn, NextStartColumn, LastStartColumn);
}
TokenAnalyzer::TokenAnalyzer(const Environment &Env, const FormatStyle &Style)
@@ -84,12 +55,12 @@ TokenAnalyzer::TokenAnalyzer(const Environment &Env, const FormatStyle &Style)
UnwrappedLines(1),
Encoding(encoding::detectEncoding(
Env.getSourceManager().getBufferData(Env.getFileID()))) {
- DEBUG(
+ LLVM_DEBUG(
llvm::dbgs() << "File encoding: "
<< (Encoding == encoding::Encoding_UTF8 ? "UTF8" : "unknown")
<< "\n");
- DEBUG(llvm::dbgs() << "Language: " << getLanguageName(Style.Language)
- << "\n");
+ LLVM_DEBUG(llvm::dbgs() << "Language: " << getLanguageName(Style.Language)
+ << "\n");
}
std::pair<tooling::Replacements, unsigned> TokenAnalyzer::process() {
@@ -103,7 +74,7 @@ std::pair<tooling::Replacements, unsigned> TokenAnalyzer::process() {
assert(UnwrappedLines.rbegin()->empty());
unsigned Penalty = 0;
for (unsigned Run = 0, RunE = UnwrappedLines.size(); Run + 1 != RunE; ++Run) {
- DEBUG(llvm::dbgs() << "Run " << Run << "...\n");
+ LLVM_DEBUG(llvm::dbgs() << "Run " << Run << "...\n");
SmallVector<AnnotatedLine *, 16> AnnotatedLines;
TokenAnnotator Annotator(Style, Tokens.getKeywords());
@@ -115,7 +86,7 @@ std::pair<tooling::Replacements, unsigned> TokenAnalyzer::process() {
std::pair<tooling::Replacements, unsigned> RunResult =
analyze(Annotator, AnnotatedLines, Tokens);
- DEBUG({
+ LLVM_DEBUG({
llvm::dbgs() << "Replacements for run " << Run << ":\n";
for (tooling::Replacements::const_iterator I = RunResult.first.begin(),
E = RunResult.first.end();
diff --git a/lib/Format/TokenAnalyzer.h b/lib/Format/TokenAnalyzer.h
index 96ea00b25ba1..e43a860e46cf 100644
--- a/lib/Format/TokenAnalyzer.h
+++ b/lib/Format/TokenAnalyzer.h
@@ -8,7 +8,7 @@
//===----------------------------------------------------------------------===//
///
/// \file
-/// \brief This file declares an abstract TokenAnalyzer, and associated helper
+/// This file declares an abstract TokenAnalyzer, and associated helper
/// classes. TokenAnalyzer can be extended to generate replacements based on
/// an annotated and pre-processed token stream.
///
@@ -37,44 +37,24 @@ namespace format {
class Environment {
public:
Environment(SourceManager &SM, FileID ID, ArrayRef<CharSourceRange> Ranges)
- : ID(ID), CharRanges(Ranges.begin(), Ranges.end()), SM(SM),
- FirstStartColumn(0),
- NextStartColumn(0),
- LastStartColumn(0) {}
-
- Environment(FileID ID, std::unique_ptr<FileManager> FileMgr,
- std::unique_ptr<SourceManager> VirtualSM,
- std::unique_ptr<DiagnosticsEngine> Diagnostics,
- const std::vector<CharSourceRange> &CharRanges,
- unsigned FirstStartColumn,
- unsigned NextStartColumn,
- unsigned LastStartColumn)
- : ID(ID), CharRanges(CharRanges.begin(), CharRanges.end()),
- SM(*VirtualSM),
- FirstStartColumn(FirstStartColumn),
- NextStartColumn(NextStartColumn),
- LastStartColumn(LastStartColumn),
- FileMgr(std::move(FileMgr)),
- VirtualSM(std::move(VirtualSM)), Diagnostics(std::move(Diagnostics)) {}
+ : SM(SM), ID(ID), CharRanges(Ranges.begin(), Ranges.end()),
+ FirstStartColumn(0), NextStartColumn(0), LastStartColumn(0) {}
// This sets up an virtual file system with file \p FileName containing the
// fragment \p Code. Assumes that \p Code starts at \p FirstStartColumn,
// that the next lines of \p Code should start at \p NextStartColumn, and
// that \p Code should end at \p LastStartColumn if it ends in newline.
// See also the documentation of clang::format::internal::reformat.
- static std::unique_ptr<Environment>
- CreateVirtualEnvironment(StringRef Code, StringRef FileName,
- ArrayRef<tooling::Range> Ranges,
- unsigned FirstStartColumn = 0,
- unsigned NextStartColumn = 0,
- unsigned LastStartColumn = 0);
+ Environment(StringRef Code, StringRef FileName,
+ ArrayRef<tooling::Range> Ranges, unsigned FirstStartColumn = 0,
+ unsigned NextStartColumn = 0, unsigned LastStartColumn = 0);
FileID getFileID() const { return ID; }
- ArrayRef<CharSourceRange> getCharRanges() const { return CharRanges; }
-
const SourceManager &getSourceManager() const { return SM; }
+ ArrayRef<CharSourceRange> getCharRanges() const { return CharRanges; }
+
// Returns the column at which the fragment of code managed by this
// environment starts.
unsigned getFirstStartColumn() const { return FirstStartColumn; }
@@ -88,19 +68,18 @@ public:
unsigned getLastStartColumn() const { return LastStartColumn; }
private:
+ // This is only set if constructed from string.
+ std::unique_ptr<SourceManagerForFile> VirtualSM;
+
+ // This refers to either a SourceManager provided by users or VirtualSM
+ // created for a single file.
+ SourceManager &SM;
FileID ID;
+
SmallVector<CharSourceRange, 8> CharRanges;
- SourceManager &SM;
unsigned FirstStartColumn;
unsigned NextStartColumn;
unsigned LastStartColumn;
-
- // The order of these fields are important - they should be in the same order
- // as they are created in `CreateVirtualEnvironment` so that they can be
- // deleted in the reverse order as they are created.
- std::unique_ptr<FileManager> FileMgr;
- std::unique_ptr<SourceManager> VirtualSM;
- std::unique_ptr<DiagnosticsEngine> Diagnostics;
};
class TokenAnalyzer : public UnwrappedLineConsumer {
diff --git a/lib/Format/TokenAnnotator.cpp b/lib/Format/TokenAnnotator.cpp
index 298c72b002f8..3a19215e1803 100644
--- a/lib/Format/TokenAnnotator.cpp
+++ b/lib/Format/TokenAnnotator.cpp
@@ -8,7 +8,7 @@
//===----------------------------------------------------------------------===//
///
/// \file
-/// \brief This file implements a token annotator, i.e. creates
+/// This file implements a token annotator, i.e. creates
/// \c AnnotatedTokens out of \c FormatTokens with required extra information.
///
//===----------------------------------------------------------------------===//
@@ -25,7 +25,22 @@ namespace format {
namespace {
-/// \brief A parser that gathers additional information about tokens.
+/// Returns \c true if the token can be used as an identifier in
+/// an Objective-C \c @selector, \c false otherwise.
+///
+/// Because getFormattingLangOpts() always lexes source code as
+/// Objective-C++, C++ keywords like \c new and \c delete are
+/// lexed as tok::kw_*, not tok::identifier, even for Objective-C.
+///
+/// For Objective-C and Objective-C++, both identifiers and keywords
+/// are valid inside @selector(...) (or a macro which
+/// invokes @selector(...)). So, we allow treat any identifier or
+/// keyword as a potential Objective-C selector component.
+static bool canBeObjCSelectorComponent(const FormatToken &Tok) {
+ return Tok.Tok.getIdentifierInfo() != nullptr;
+}
+
+/// A parser that gathers additional information about tokens.
///
/// The \c TokenAnnotator tries to match parenthesis and square brakets and
/// store a parenthesis levels. It also tries to resolve matching "<" and ">"
@@ -79,7 +94,17 @@ private:
if (CurrentToken->is(tok::greater)) {
Left->MatchingParen = CurrentToken;
CurrentToken->MatchingParen = Left;
- CurrentToken->Type = TT_TemplateCloser;
+ // In TT_Proto, we must distignuish between:
+ // map<key, value>
+ // msg < item: data >
+ // msg: < item: data >
+ // In TT_TextProto, map<key, value> does not occur.
+ if (Style.Language == FormatStyle::LK_TextProto ||
+ (Style.Language == FormatStyle::LK_Proto && Left->Previous &&
+ Left->Previous->isOneOf(TT_SelectorName, TT_DictLiteral)))
+ CurrentToken->Type = TT_DictLiteral;
+ else
+ CurrentToken->Type = TT_TemplateCloser;
next();
return true;
}
@@ -131,10 +156,7 @@ private:
Contexts.size() == 2 && Contexts[0].ColonIsForRangeExpr;
bool StartsObjCMethodExpr = false;
- if (CurrentToken->is(tok::caret)) {
- // (^ can start a block type.
- Left->Type = TT_ObjCBlockLParen;
- } else if (FormatToken *MaybeSel = Left->Previous) {
+ if (FormatToken *MaybeSel = Left->Previous) {
// @selector( starts a selector.
if (MaybeSel->isObjCAtKeyword(tok::objc_selector) && MaybeSel->Previous &&
MaybeSel->Previous->is(tok::at)) {
@@ -200,12 +222,21 @@ private:
Left->Type = TT_ObjCMethodExpr;
}
+ // MightBeFunctionType and ProbablyFunctionType are used for
+ // function pointer and reference types as well as Objective-C
+ // block types:
+ //
+ // void (*FunctionPointer)(void);
+ // void (&FunctionReference)(void);
+ // void (^ObjCBlock)(void);
bool MightBeFunctionType = !Contexts[Contexts.size() - 2].IsExpression;
- bool ProbablyFunctionType = CurrentToken->isOneOf(tok::star, tok::amp);
+ bool ProbablyFunctionType =
+ CurrentToken->isOneOf(tok::star, tok::amp, tok::caret);
bool HasMultipleLines = false;
bool HasMultipleParametersOnALine = false;
bool MightBeObjCForRangeLoop =
Left->Previous && Left->Previous->is(tok::kw_for);
+ FormatToken *PossibleObjCForInToken = nullptr;
while (CurrentToken) {
// LookForDecls is set when "if (" has been seen. Check for
// 'identifier' '*' 'identifier' followed by not '=' -- this
@@ -237,7 +268,8 @@ private:
if (MightBeFunctionType && ProbablyFunctionType && CurrentToken->Next &&
(CurrentToken->Next->is(tok::l_paren) ||
(CurrentToken->Next->is(tok::l_square) && Line.MustBeDeclaration)))
- Left->Type = TT_FunctionTypeLParen;
+ Left->Type = Left->Next->is(tok::caret) ? TT_ObjCBlockLParen
+ : TT_FunctionTypeLParen;
Left->MatchingParen = CurrentToken;
CurrentToken->MatchingParen = Left;
@@ -291,10 +323,17 @@ private:
CurrentToken->Previous->isSimpleTypeSpecifier()) &&
!CurrentToken->is(tok::l_brace))
Contexts.back().IsExpression = false;
- if (CurrentToken->isOneOf(tok::semi, tok::colon))
+ if (CurrentToken->isOneOf(tok::semi, tok::colon)) {
MightBeObjCForRangeLoop = false;
- if (MightBeObjCForRangeLoop && CurrentToken->is(Keywords.kw_in))
- CurrentToken->Type = TT_ObjCForIn;
+ if (PossibleObjCForInToken) {
+ PossibleObjCForInToken->Type = TT_Unknown;
+ PossibleObjCForInToken = nullptr;
+ }
+ }
+ if (MightBeObjCForRangeLoop && CurrentToken->is(Keywords.kw_in)) {
+ PossibleObjCForInToken = CurrentToken;
+ PossibleObjCForInToken->Type = TT_ObjCForIn;
+ }
// When we discover a 'new', we set CanBeExpression to 'false' in order to
// parse the type correctly. Reset that after a comma.
if (CurrentToken->is(tok::comma))
@@ -310,13 +349,40 @@ private:
return false;
}
+ bool isCpp11AttributeSpecifier(const FormatToken &Tok) {
+ if (!Style.isCpp() || !Tok.startsSequence(tok::l_square, tok::l_square))
+ return false;
+ const FormatToken *AttrTok = Tok.Next->Next;
+ if (!AttrTok)
+ return false;
+ // C++17 '[[using ns: foo, bar(baz, blech)]]'
+ // We assume nobody will name an ObjC variable 'using'.
+ if (AttrTok->startsSequence(tok::kw_using, tok::identifier, tok::colon))
+ return true;
+ if (AttrTok->isNot(tok::identifier))
+ return false;
+ while (AttrTok && !AttrTok->startsSequence(tok::r_square, tok::r_square)) {
+ // ObjC message send. We assume nobody will use : in a C++11 attribute
+ // specifier parameter, although this is technically valid:
+ // [[foo(:)]]
+ if (AttrTok->is(tok::colon) ||
+ AttrTok->startsSequence(tok::identifier, tok::identifier))
+ return false;
+ if (AttrTok->is(tok::ellipsis))
+ return true;
+ AttrTok = AttrTok->Next;
+ }
+ return AttrTok && AttrTok->startsSequence(tok::r_square, tok::r_square);
+ }
+
bool parseSquare() {
if (!CurrentToken)
return false;
// A '[' could be an index subscript (after an identifier or after
// ')' or ']'), it could be the start of an Objective-C method
- // expression, or it could the start of an Objective-C array literal.
+ // expression, it could the start of an Objective-C array literal,
+ // or it could be a C++ attribute specifier [[foo::bar]].
FormatToken *Left = CurrentToken->Previous;
Left->ParentBracket = Contexts.back().ContextKind;
FormatToken *Parent = Left->getPreviousNonComment();
@@ -329,14 +395,18 @@ private:
(Contexts.back().CanBeExpression || Contexts.back().IsExpression ||
Contexts.back().InTemplateArgument);
+ bool IsCpp11AttributeSpecifier = isCpp11AttributeSpecifier(*Left) ||
+ Contexts.back().InCpp11AttributeSpecifier;
+
bool StartsObjCMethodExpr =
- !CppArrayTemplates && Style.isCpp() &&
+ !CppArrayTemplates && Style.isCpp() && !IsCpp11AttributeSpecifier &&
Contexts.back().CanBeExpression && Left->isNot(TT_LambdaLSquare) &&
- CurrentToken->isNot(tok::l_brace) &&
+ !CurrentToken->isOneOf(tok::l_brace, tok::r_square) &&
(!Parent ||
Parent->isOneOf(tok::colon, tok::l_square, tok::l_paren,
tok::kw_return, tok::kw_throw) ||
Parent->isUnaryOperator() ||
+ // FIXME(bug 36976): ObjC return types shouldn't use TT_CastRParen.
Parent->isOneOf(TT_ObjCForIn, TT_CastRParen) ||
getBinOpPrecedence(Parent->Tok.getKind(), true, true) > prec::Unknown);
bool ColonFound = false;
@@ -347,6 +417,8 @@ private:
} else if (Left->is(TT_Unknown)) {
if (StartsObjCMethodExpr) {
Left->Type = TT_ObjCMethodExpr;
+ } else if (IsCpp11AttributeSpecifier) {
+ Left->Type = TT_AttributeSquare;
} else if (Style.Language == FormatStyle::LK_JavaScript && Parent &&
Contexts.back().ContextKind == tok::l_brace &&
Parent->isOneOf(tok::l_brace, tok::comma)) {
@@ -358,12 +430,48 @@ private:
Parent->is(TT_TemplateCloser)) {
Left->Type = TT_ArraySubscriptLSquare;
} else if (Style.Language == FormatStyle::LK_Proto ||
- (!CppArrayTemplates && Parent &&
- Parent->isOneOf(TT_BinaryOperator, TT_TemplateCloser, tok::at,
- tok::comma, tok::l_paren, tok::l_square,
- tok::question, tok::colon, tok::kw_return,
- // Should only be relevant to JavaScript:
- tok::kw_default))) {
+ Style.Language == FormatStyle::LK_TextProto) {
+ // Square braces in LK_Proto can either be message field attributes:
+ //
+ // optional Aaa aaa = 1 [
+ // (aaa) = aaa
+ // ];
+ //
+ // extensions 123 [
+ // (aaa) = aaa
+ // ];
+ //
+ // or text proto extensions (in options):
+ //
+ // option (Aaa.options) = {
+ // [type.type/type] {
+ // key: value
+ // }
+ // }
+ //
+ // or repeated fields (in options):
+ //
+ // option (Aaa.options) = {
+ // keys: [ 1, 2, 3 ]
+ // }
+ //
+ // In the first and the third case we want to spread the contents inside
+ // the square braces; in the second we want to keep them inline.
+ Left->Type = TT_ArrayInitializerLSquare;
+ if (!Left->endsSequence(tok::l_square, tok::numeric_constant,
+ tok::equal) &&
+ !Left->endsSequence(tok::l_square, tok::numeric_constant,
+ tok::identifier) &&
+ !Left->endsSequence(tok::l_square, tok::colon, TT_SelectorName)) {
+ Left->Type = TT_ProtoExtensionLSquare;
+ BindingIncrease = 10;
+ }
+ } else if (!CppArrayTemplates && Parent &&
+ Parent->isOneOf(TT_BinaryOperator, TT_TemplateCloser, tok::at,
+ tok::comma, tok::l_paren, tok::l_square,
+ tok::question, tok::colon, tok::kw_return,
+ // Should only be relevant to JavaScript:
+ tok::kw_default)) {
Left->Type = TT_ArrayInitializerLSquare;
} else {
BindingIncrease = 10;
@@ -378,11 +486,14 @@ private:
Contexts.back().IsExpression = false;
Contexts.back().ColonIsObjCMethodExpr = StartsObjCMethodExpr;
+ Contexts.back().InCpp11AttributeSpecifier = IsCpp11AttributeSpecifier;
while (CurrentToken) {
if (CurrentToken->is(tok::r_square)) {
- if (CurrentToken->Next && CurrentToken->Next->is(tok::l_paren) &&
- Left->is(TT_ObjCMethodExpr)) {
+ if (IsCpp11AttributeSpecifier)
+ CurrentToken->Type = TT_AttributeSquare;
+ else if (CurrentToken->Next && CurrentToken->Next->is(tok::l_paren) &&
+ Left->is(TT_ObjCMethodExpr)) {
// An ObjC method call is rarely followed by an open parenthesis.
// FIXME: Do we incorrectly label ":" with this?
StartsObjCMethodExpr = false;
@@ -390,6 +501,12 @@ private:
}
if (StartsObjCMethodExpr && CurrentToken->Previous != Left) {
CurrentToken->Type = TT_ObjCMethodExpr;
+ // If we haven't seen a colon yet, make sure the last identifier
+ // before the r_square is tagged as a selector name component.
+ if (!ColonFound && CurrentToken->Previous &&
+ CurrentToken->Previous->is(TT_Unknown) &&
+ canBeObjCSelectorComponent(*CurrentToken->Previous))
+ CurrentToken->Previous->Type = TT_SelectorName;
// determineStarAmpUsage() thinks that '*' '[' is allocating an
// array of pointers, but if '[' starts a selector then '*' is a
// binary operator.
@@ -398,6 +515,20 @@ private:
}
Left->MatchingParen = CurrentToken;
CurrentToken->MatchingParen = Left;
+ // FirstObjCSelectorName is set when a colon is found. This does
+ // not work, however, when the method has no parameters.
+ // Here, we set FirstObjCSelectorName when the end of the method call is
+ // reached, in case it was not set already.
+ if (!Contexts.back().FirstObjCSelectorName) {
+ FormatToken* Previous = CurrentToken->getPreviousNonComment();
+ if (Previous && Previous->is(TT_SelectorName)) {
+ Previous->ObjCSelectorNameParts = 1;
+ Contexts.back().FirstObjCSelectorName = Previous;
+ }
+ } else {
+ Left->ParameterCount =
+ Contexts.back().FirstObjCSelectorName->ObjCSelectorNameParts;
+ }
if (Contexts.back().FirstObjCSelectorName) {
Contexts.back().FirstObjCSelectorName->LongestObjCSelectorName =
Contexts.back().LongestObjCSelectorName;
@@ -410,12 +541,19 @@ private:
if (CurrentToken->isOneOf(tok::r_paren, tok::r_brace))
return false;
if (CurrentToken->is(tok::colon)) {
- if (Left->isOneOf(TT_ArraySubscriptLSquare,
- TT_DesignatedInitializerLSquare)) {
+ if (IsCpp11AttributeSpecifier &&
+ CurrentToken->endsSequence(tok::colon, tok::identifier,
+ tok::kw_using)) {
+ // Remember that this is a [[using ns: foo]] C++ attribute, so we
+ // don't add a space before the colon (unlike other colons).
+ CurrentToken->Type = TT_AttributeColon;
+ } else if (Left->isOneOf(TT_ArraySubscriptLSquare,
+ TT_DesignatedInitializerLSquare)) {
Left->Type = TT_ObjCMethodExpr;
StartsObjCMethodExpr = true;
Contexts.back().ColonIsObjCMethodExpr = true;
if (Parent && Parent->is(tok::r_paren))
+ // FIXME(bug 36976): ObjC return types shouldn't use TT_CastRParen.
Parent->Type = TT_CastRParen;
}
ColonFound = true;
@@ -462,13 +600,15 @@ private:
FormatToken *Previous = CurrentToken->getPreviousNonComment();
if (Previous->is(TT_JsTypeOptionalQuestion))
Previous = Previous->getPreviousNonComment();
- if (((CurrentToken->is(tok::colon) &&
- (!Contexts.back().ColonIsDictLiteral || !Style.isCpp())) ||
- Style.Language == FormatStyle::LK_Proto ||
- Style.Language == FormatStyle::LK_TextProto) &&
- (Previous->Tok.getIdentifierInfo() ||
- Previous->is(tok::string_literal)))
- Previous->Type = TT_SelectorName;
+ if ((CurrentToken->is(tok::colon) &&
+ (!Contexts.back().ColonIsDictLiteral || !Style.isCpp())) ||
+ Style.Language == FormatStyle::LK_Proto ||
+ Style.Language == FormatStyle::LK_TextProto) {
+ Left->Type = TT_DictLiteral;
+ if (Previous->Tok.getIdentifierInfo() ||
+ Previous->is(tok::string_literal))
+ Previous->Type = TT_SelectorName;
+ }
if (CurrentToken->is(tok::colon) ||
Style.Language == FormatStyle::LK_JavaScript)
Left->Type = TT_DictLiteral;
@@ -484,6 +624,9 @@ private:
}
void updateParameterCount(FormatToken *Left, FormatToken *Current) {
+ // For ObjC methods, the number of parameters is calculated differently as
+ // method declarations have a different structure (the parameters are not
+ // inside a bracket scope).
if (Current->is(tok::l_brace) && Current->BlockKind == BK_Block)
++Left->BlockParameterCount;
if (Current->is(tok::comma)) {
@@ -562,19 +705,29 @@ private:
Line.startsWith(TT_ObjCMethodSpecifier)) {
Tok->Type = TT_ObjCMethodExpr;
const FormatToken *BeforePrevious = Tok->Previous->Previous;
+ // Ensure we tag all identifiers in method declarations as
+ // TT_SelectorName.
+ bool UnknownIdentifierInMethodDeclaration =
+ Line.startsWith(TT_ObjCMethodSpecifier) &&
+ Tok->Previous->is(tok::identifier) && Tok->Previous->is(TT_Unknown);
if (!BeforePrevious ||
+ // FIXME(bug 36976): ObjC return types shouldn't use TT_CastRParen.
!(BeforePrevious->is(TT_CastRParen) ||
(BeforePrevious->is(TT_ObjCMethodExpr) &&
BeforePrevious->is(tok::colon))) ||
BeforePrevious->is(tok::r_square) ||
- Contexts.back().LongestObjCSelectorName == 0) {
+ Contexts.back().LongestObjCSelectorName == 0 ||
+ UnknownIdentifierInMethodDeclaration) {
Tok->Previous->Type = TT_SelectorName;
- if (Tok->Previous->ColumnWidth >
- Contexts.back().LongestObjCSelectorName)
- Contexts.back().LongestObjCSelectorName =
- Tok->Previous->ColumnWidth;
if (!Contexts.back().FirstObjCSelectorName)
Contexts.back().FirstObjCSelectorName = Tok->Previous;
+ else if (Tok->Previous->ColumnWidth >
+ Contexts.back().LongestObjCSelectorName)
+ Contexts.back().LongestObjCSelectorName =
+ Tok->Previous->ColumnWidth;
+ Tok->Previous->ParameterIndex =
+ Contexts.back().FirstObjCSelectorName->ObjCSelectorNameParts;
+ ++Contexts.back().FirstObjCSelectorName->ObjCSelectorNameParts;
}
} else if (Contexts.back().ColonIsForRangeExpr) {
Tok->Type = TT_RangeBasedForLoopColon;
@@ -587,8 +740,10 @@ private:
Tok->Type = TT_CtorInitializerColon;
else
Tok->Type = TT_InheritanceColon;
- } else if (Tok->Previous->is(tok::identifier) && Tok->Next &&
- Tok->Next->isOneOf(tok::r_paren, tok::comma)) {
+ } else if (canBeObjCSelectorComponent(*Tok->Previous) && Tok->Next &&
+ (Tok->Next->isOneOf(tok::r_paren, tok::comma) ||
+ (canBeObjCSelectorComponent(*Tok->Next) && Tok->Next->Next &&
+ Tok->Next->Next->is(tok::colon)))) {
// This handles a special macro in ObjC code where selectors including
// the colon are passed as macro arguments.
Tok->Type = TT_ObjCMethodExpr;
@@ -668,7 +823,15 @@ private:
case tok::less:
if (parseAngle()) {
Tok->Type = TT_TemplateOpener;
- if (Style.Language == FormatStyle::LK_TextProto) {
+ // In TT_Proto, we must distignuish between:
+ // map<key, value>
+ // msg < item: data >
+ // msg: < item: data >
+ // In TT_TextProto, map<key, value> does not occur.
+ if (Style.Language == FormatStyle::LK_TextProto ||
+ (Style.Language == FormatStyle::LK_Proto && Tok->Previous &&
+ Tok->Previous->isOneOf(TT_SelectorName, TT_DictLiteral))) {
+ Tok->Type = TT_DictLiteral;
FormatToken *Previous = Tok->getPreviousNonComment();
if (Previous && Previous->Type != TT_DictLiteral)
Previous->Type = TT_SelectorName;
@@ -689,9 +852,13 @@ private:
return false;
break;
case tok::greater:
- Tok->Type = TT_BinaryOperator;
+ if (Style.Language != FormatStyle::LK_TextProto)
+ Tok->Type = TT_BinaryOperator;
break;
case tok::kw_operator:
+ if (Style.Language == FormatStyle::LK_TextProto ||
+ Style.Language == FormatStyle::LK_Proto)
+ break;
while (CurrentToken &&
!CurrentToken->isOneOf(tok::l_paren, tok::semi, tok::r_paren)) {
if (CurrentToken->isOneOf(tok::star, tok::amp))
@@ -987,7 +1154,7 @@ private:
resetTokenMetadata(CurrentToken);
}
- /// \brief A struct to hold information valid in a specific context, e.g.
+ /// A struct to hold information valid in a specific context, e.g.
/// a pair of parenthesis.
struct Context {
Context(tok::TokenKind ContextKind, unsigned BindingStrength,
@@ -1010,9 +1177,10 @@ private:
bool InInheritanceList = false;
bool CaretFound = false;
bool IsForEachMacro = false;
+ bool InCpp11AttributeSpecifier = false;
};
- /// \brief Puts a new \c Context onto the stack \c Contexts for the lifetime
+ /// Puts a new \c Context onto the stack \c Contexts for the lifetime
/// of each instance.
struct ScopedContextCreator {
AnnotatingParser &P;
@@ -1155,7 +1323,9 @@ private:
Current.Type = TT_ConditionalExpr;
}
} else if (Current.isBinaryOperator() &&
- (!Current.Previous || Current.Previous->isNot(tok::l_square))) {
+ (!Current.Previous || Current.Previous->isNot(tok::l_square)) &&
+ (!Current.is(tok::greater) &&
+ Style.Language != FormatStyle::LK_TextProto)) {
Current.Type = TT_BinaryOperator;
} else if (Current.is(tok::comment)) {
if (Current.TokenText.startswith("/*")) {
@@ -1214,6 +1384,17 @@ private:
TT_LeadingJavaAnnotation)) {
Current.Type = Current.Previous->Type;
}
+ } else if (canBeObjCSelectorComponent(Current) &&
+ // FIXME(bug 36976): ObjC return types shouldn't use TT_CastRParen.
+ Current.Previous && Current.Previous->is(TT_CastRParen) &&
+ Current.Previous->MatchingParen &&
+ Current.Previous->MatchingParen->Previous &&
+ Current.Previous->MatchingParen->Previous->is(
+ TT_ObjCMethodSpecifier)) {
+ // This is the first part of an Objective-C selector name. (If there's no
+ // colon after this, this is the only place which annotates the identifier
+ // as a selector.)
+ Current.Type = TT_SelectorName;
} else if (Current.isOneOf(tok::identifier, tok::kw_const) &&
Current.Previous &&
!Current.Previous->isOneOf(tok::equal, tok::at) &&
@@ -1240,7 +1421,7 @@ private:
}
}
- /// \brief Take a guess at whether \p Tok starts a name of a function or
+ /// Take a guess at whether \p Tok starts a name of a function or
/// variable declaration.
///
/// This is a heuristic based on whether \p Tok is an identifier following
@@ -1285,7 +1466,7 @@ private:
PreviousNotConst->isSimpleTypeSpecifier();
}
- /// \brief Determine whether ')' is ending a cast.
+ /// Determine whether ')' is ending a cast.
bool rParenEndsCast(const FormatToken &Tok) {
// C-style casts are only used in C++ and Java.
if (!Style.isCpp() && Style.Language != FormatStyle::LK_Java)
@@ -1382,7 +1563,7 @@ private:
return true;
}
- /// \brief Return the type of the given token assuming it is * or &.
+ /// Return the type of the given token assuming it is * or &.
TokenType determineStarAmpUsage(const FormatToken &Tok, bool IsExpression,
bool InTemplateArgument) {
if (Style.Language == FormatStyle::LK_JavaScript)
@@ -1459,10 +1640,8 @@ private:
if (!PrevToken)
return TT_UnaryOperator;
- if (PrevToken->isOneOf(TT_CastRParen, TT_UnaryOperator) &&
- !PrevToken->is(tok::exclaim))
- // There aren't any trailing unary operators except for TypeScript's
- // non-null operator (!). Thus, this must be squence of leading operators.
+ if (PrevToken->isOneOf(TT_CastRParen, TT_UnaryOperator))
+ // This must be a sequence of leading unary operators.
return TT_UnaryOperator;
// Use heuristics to recognize unary operators.
@@ -1479,7 +1658,7 @@ private:
return TT_BinaryOperator;
}
- /// \brief Determine whether ++/-- are pre- or post-increments/-decrements.
+ /// Determine whether ++/-- are pre- or post-increments/-decrements.
TokenType determineIncrementUsage(const FormatToken &Tok) {
const FormatToken *PrevToken = Tok.getPreviousNonComment();
if (!PrevToken || PrevToken->is(TT_CastRParen))
@@ -1508,7 +1687,7 @@ private:
static const int PrecedenceUnaryOperator = prec::PointerToMember + 1;
static const int PrecedenceArrowAndPeriod = prec::PointerToMember + 2;
-/// \brief Parses binary expressions by inserting fake parenthesis based on
+/// Parses binary expressions by inserting fake parenthesis based on
/// operator precedence.
class ExpressionParser {
public:
@@ -1516,7 +1695,7 @@ public:
AnnotatedLine &Line)
: Style(Style), Keywords(Keywords), Current(Line.First) {}
- /// \brief Parse expressions with the given operatore precedence.
+ /// Parse expressions with the given operator precedence.
void parse(int Precedence = 0) {
// Skip 'return' and ObjC selector colons as they are not part of a binary
// expression.
@@ -1603,7 +1782,7 @@ public:
}
private:
- /// \brief Gets the precedence (+1) of the given token for binary operators
+ /// Gets the precedence (+1) of the given token for binary operators
/// and other tokens that we treat like binary operators.
int getCurrentPrecedence() {
if (Current) {
@@ -1662,7 +1841,7 @@ private:
}
}
- /// \brief Parse unary operator expressions and surround them with fake
+ /// Parse unary operator expressions and surround them with fake
/// parentheses if appropriate.
void parseUnaryOperator() {
llvm::SmallVector<FormatToken *, 2> Tokens;
@@ -1723,15 +1902,18 @@ void TokenAnnotator::setCommentLineLevels(
}
}
- if (NextNonCommentLine && CommentLine) {
- // If the comment is currently aligned with the line immediately following
- // it, that's probably intentional and we should keep it.
- bool AlignedWithNextLine =
- NextNonCommentLine->First->NewlinesBefore <= 1 &&
- NextNonCommentLine->First->OriginalColumn ==
- (*I)->First->OriginalColumn;
- if (AlignedWithNextLine)
- (*I)->Level = NextNonCommentLine->Level;
+ // If the comment is currently aligned with the line immediately following
+ // it, that's probably intentional and we should keep it.
+ if (NextNonCommentLine && CommentLine &&
+ NextNonCommentLine->First->NewlinesBefore <= 1 &&
+ NextNonCommentLine->First->OriginalColumn ==
+ (*I)->First->OriginalColumn) {
+ // Align comments for preprocessor lines with the # in column 0.
+ // Otherwise, align with the next line.
+ (*I)->Level = (NextNonCommentLine->Type == LT_PreprocessorDirective ||
+ NextNonCommentLine->Type == LT_ImportStatement)
+ ? 0
+ : NextNonCommentLine->Level;
} else {
NextNonCommentLine = (*I)->First->isNot(tok::r_brace) ? (*I) : nullptr;
}
@@ -1962,8 +2144,20 @@ void TokenAnnotator::calculateFormattingInformation(AnnotatedLine &Line) {
// FIXME: Only calculate this if CanBreakBefore is true once static
// initializers etc. are sorted out.
// FIXME: Move magic numbers to a better place.
- Current->SplitPenalty = 20 * Current->BindingStrength +
- splitPenalty(Line, *Current, InFunctionDecl);
+
+ // Reduce penalty for aligning ObjC method arguments using the colon
+ // alignment as this is the canonical way (still prefer fitting everything
+ // into one line if possible). Trying to fit a whole expression into one
+ // line should not force other line breaks (e.g. when ObjC method
+ // expression is a part of other expression).
+ Current->SplitPenalty = splitPenalty(Line, *Current, InFunctionDecl);
+ if (Style.Language == FormatStyle::LK_ObjC &&
+ Current->is(TT_SelectorName) && Current->ParameterIndex > 0) {
+ if (Current->ParameterIndex == 1)
+ Current->SplitPenalty += 5 * Current->BindingStrength;
+ } else {
+ Current->SplitPenalty += 20 * Current->BindingStrength;
+ }
Current = Current->Next;
}
@@ -1983,7 +2177,7 @@ void TokenAnnotator::calculateFormattingInformation(AnnotatedLine &Line) {
++IndentLevel;
}
- DEBUG({ printDebugInfo(Line); });
+ LLVM_DEBUG({ printDebugInfo(Line); });
}
void TokenAnnotator::calculateUnbreakableTailLengths(AnnotatedLine &Line) {
@@ -2043,7 +2237,7 @@ unsigned TokenAnnotator::splitPenalty(const AnnotatedLine &Line,
return 35;
if (!Right.isOneOf(TT_ObjCMethodExpr, TT_LambdaLSquare,
TT_ArrayInitializerLSquare,
- TT_DesignatedInitializerLSquare))
+ TT_DesignatedInitializerLSquare, TT_AttributeSquare))
return 500;
}
@@ -2128,6 +2322,13 @@ unsigned TokenAnnotator::splitPenalty(const AnnotatedLine &Line,
if (Left.is(tok::colon) && Left.is(TT_ObjCMethodExpr))
return Line.MightBeFunctionDecl ? 50 : 500;
+ // In Objective-C type declarations, avoid breaking after the category's
+ // open paren (we'll prefer breaking after the protocol list's opening
+ // angle bracket, if present).
+ if (Line.Type == LT_ObjCDecl && Left.is(tok::l_paren) && Left.Previous &&
+ Left.Previous->isOneOf(tok::identifier, tok::greater))
+ return 500;
+
if (Left.is(tok::l_paren) && InFunctionDecl &&
Style.AlignAfterOpenBracket != FormatStyle::BAS_DontAlign)
return 100;
@@ -2144,6 +2345,8 @@ unsigned TokenAnnotator::splitPenalty(const AnnotatedLine &Line,
if (Left.opensScope()) {
if (Style.AlignAfterOpenBracket == FormatStyle::BAS_DontAlign)
return 0;
+ if (Left.is(tok::l_brace) && !Style.Cpp11BracedListStyle)
+ return 19;
return Left.ParameterCount > 1 ? Style.PenaltyBreakBeforeFirstCallParameter
: 19;
}
@@ -2169,6 +2372,8 @@ unsigned TokenAnnotator::splitPenalty(const AnnotatedLine &Line,
return 2;
return 1;
}
+ if (Left.ClosesTemplateDeclaration)
+ return Style.PenaltyBreakTemplateDeclaration;
if (Left.is(TT_ConditionalExpr))
return prec::Conditional;
prec::Level Level = Left.getPrecedence();
@@ -2205,9 +2410,12 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
: Style.SpacesInParentheses;
if (Right.isOneOf(tok::semi, tok::comma))
return false;
- if (Right.is(tok::less) && Line.Type == LT_ObjCDecl &&
- Style.ObjCSpaceBeforeProtocolList)
- return true;
+ if (Right.is(tok::less) && Line.Type == LT_ObjCDecl) {
+ bool IsLightweightGeneric =
+ Right.MatchingParen && Right.MatchingParen->Next &&
+ Right.MatchingParen->Next->is(tok::colon);
+ return !IsLightweightGeneric && Style.ObjCSpaceBeforeProtocolList;
+ }
if (Right.is(tok::less) && Left.is(tok::kw_template))
return Style.SpaceAfterTemplateKeyword;
if (Left.isOneOf(tok::exclaim, tok::tilde))
@@ -2221,8 +2429,17 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
return !Left.is(TT_ObjCMethodExpr);
if (Left.is(tok::coloncolon))
return false;
- if (Left.is(tok::less) || Right.isOneOf(tok::greater, tok::less))
+ if (Left.is(tok::less) || Right.isOneOf(tok::greater, tok::less)) {
+ if (Style.Language == FormatStyle::LK_TextProto ||
+ (Style.Language == FormatStyle::LK_Proto &&
+ (Left.is(TT_DictLiteral) || Right.is(TT_DictLiteral)))) {
+ // Format empty list as `<>`.
+ if (Left.is(tok::less) && Right.is(tok::greater))
+ return false;
+ return !Style.Cpp11BracedListStyle;
+ }
return false;
+ }
if (Right.is(tok::ellipsis))
return Left.Tok.isLiteral() || (Left.is(tok::identifier) && Left.Previous &&
Left.Previous->is(tok::kw_case));
@@ -2263,23 +2480,34 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
!Left.Previous->isOneOf(tok::l_paren, tok::coloncolon));
if (Right.is(tok::star) && Left.is(tok::l_paren))
return false;
+ const auto SpaceRequiredForArrayInitializerLSquare =
+ [](const FormatToken &LSquareTok, const FormatStyle &Style) {
+ return Style.SpacesInContainerLiterals ||
+ ((Style.Language == FormatStyle::LK_Proto ||
+ Style.Language == FormatStyle::LK_TextProto) &&
+ !Style.Cpp11BracedListStyle &&
+ LSquareTok.endsSequence(tok::l_square, tok::colon,
+ TT_SelectorName));
+ };
if (Left.is(tok::l_square))
- return (Left.is(TT_ArrayInitializerLSquare) &&
- Style.SpacesInContainerLiterals && Right.isNot(tok::r_square)) ||
+ return (Left.is(TT_ArrayInitializerLSquare) && Right.isNot(tok::r_square) &&
+ SpaceRequiredForArrayInitializerLSquare(Left, Style)) ||
(Left.isOneOf(TT_ArraySubscriptLSquare,
TT_StructuredBindingLSquare) &&
Style.SpacesInSquareBrackets && Right.isNot(tok::r_square));
if (Right.is(tok::r_square))
return Right.MatchingParen &&
- ((Style.SpacesInContainerLiterals &&
- Right.MatchingParen->is(TT_ArrayInitializerLSquare)) ||
+ ((Right.MatchingParen->is(TT_ArrayInitializerLSquare) &&
+ SpaceRequiredForArrayInitializerLSquare(*Right.MatchingParen,
+ Style)) ||
(Style.SpacesInSquareBrackets &&
Right.MatchingParen->isOneOf(TT_ArraySubscriptLSquare,
- TT_StructuredBindingLSquare)));
+ TT_StructuredBindingLSquare)) ||
+ Right.MatchingParen->is(TT_AttributeParen));
if (Right.is(tok::l_square) &&
!Right.isOneOf(TT_ObjCMethodExpr, TT_LambdaLSquare,
TT_DesignatedInitializerLSquare,
- TT_StructuredBindingLSquare) &&
+ TT_StructuredBindingLSquare, TT_AttributeSquare) &&
!Left.isOneOf(tok::numeric_constant, TT_DictLiteral))
return false;
if (Left.is(tok::l_brace) && Right.is(tok::r_brace))
@@ -2291,7 +2519,8 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
if (Left.is(TT_BlockComment))
return !Left.TokenText.endswith("=*/");
if (Right.is(tok::l_paren)) {
- if (Left.is(tok::r_paren) && Left.is(TT_AttributeParen))
+ if ((Left.is(tok::r_paren) && Left.is(TT_AttributeParen)) ||
+ (Left.is(tok::r_square) && Left.is(TT_AttributeSquare)))
return true;
return Line.Type == LT_ObjCDecl || Left.is(tok::semi) ||
(Style.SpaceBeforeParens != FormatStyle::SBPO_Never &&
@@ -2329,6 +2558,13 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
return false;
if (Left.is(TT_TemplateCloser) && Right.is(tok::l_square))
return false;
+ if (Left.is(tok::l_brace) && Left.endsSequence(TT_DictLiteral, tok::at))
+ // Objective-C dictionary literal -> no space after opening brace.
+ return false;
+ if (Right.is(tok::r_brace) && Right.MatchingParen &&
+ Right.MatchingParen->endsSequence(TT_DictLiteral, tok::at))
+ // Objective-C dictionary literal -> no space before closing brace.
+ return false;
return true;
}
@@ -2340,6 +2576,9 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
if (Style.isCpp()) {
if (Left.is(tok::kw_operator))
return Right.is(tok::coloncolon);
+ if (Right.is(tok::l_brace) && Right.BlockKind == BK_BracedInit &&
+ !Left.opensScope() && Style.SpaceBeforeCpp11BracedList)
+ return true;
} else if (Style.Language == FormatStyle::LK_Proto ||
Style.Language == FormatStyle::LK_TextProto) {
if (Right.is(tok::period) &&
@@ -2351,6 +2590,19 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
return true;
if (Right.isOneOf(tok::l_brace, tok::less) && Left.is(TT_SelectorName))
return true;
+ // Slashes occur in text protocol extension syntax: [type/type] { ... }.
+ if (Left.is(tok::slash) || Right.is(tok::slash))
+ return false;
+ if (Left.MatchingParen && Left.MatchingParen->is(TT_ProtoExtensionLSquare) &&
+ Right.isOneOf(tok::l_brace, tok::less))
+ return !Style.Cpp11BracedListStyle;
+ // A percent is probably part of a formatting specification, such as %lld.
+ if (Left.is(tok::percent))
+ return false;
+ // Preserve the existence of a space before a percent for cases like 0x%04x
+ // and "%d %d"
+ if (Left.is(tok::numeric_constant) && Right.is(tok::percent))
+ return Right.WhitespaceRange.getEnd() != Right.WhitespaceRange.getBegin();
} else if (Style.Language == FormatStyle::LK_JavaScript) {
if (Left.is(TT_JsFatArrow))
return true;
@@ -2402,7 +2654,7 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
// (e.g. as "const x of y" in a for loop), or after a destructuring
// operation (const [x, y] of z, const {a, b} of c).
(Left.is(Keywords.kw_of) && Left.Previous &&
- (Left.Previous->Tok.getIdentifierInfo() ||
+ (Left.Previous->Tok.is(tok::identifier) ||
Left.Previous->isOneOf(tok::r_square, tok::r_brace)))) &&
(!Left.Previous || !Left.Previous->is(tok::period)))
return true;
@@ -2455,8 +2707,10 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
if (Line.Type == LT_ObjCMethodDecl) {
if (Left.is(TT_ObjCMethodSpecifier))
return true;
- if (Left.is(tok::r_paren) && Right.is(tok::identifier))
- // Don't space between ')' and <id>
+ if (Left.is(tok::r_paren) && canBeObjCSelectorComponent(Right))
+ // Don't space between ')' and <id> or ')' and 'new'. 'new' is not a
+ // keyword in Objective-C, and '+ (instancetype)new;' is a standard class
+ // method declaration.
return false;
}
if (Line.Type == LT_ObjCProperty &&
@@ -2472,8 +2726,15 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
return true;
if (Right.is(tok::comma))
return false;
- if (Right.isOneOf(TT_CtorInitializerColon, TT_ObjCBlockLParen))
+ if (Right.is(TT_ObjCBlockLParen))
return true;
+ if (Right.is(TT_CtorInitializerColon))
+ return Style.SpaceBeforeCtorInitializerColon;
+ if (Right.is(TT_InheritanceColon) && !Style.SpaceBeforeInheritanceColon)
+ return false;
+ if (Right.is(TT_RangeBasedForLoopColon) &&
+ !Style.SpaceBeforeRangeBasedForLoopColon)
+ return false;
if (Right.is(tok::colon)) {
if (Line.First->isOneOf(tok::kw_case, tok::kw_default) ||
!Right.getNextNonComment() || Right.getNextNonComment()->is(tok::semi))
@@ -2486,6 +2747,8 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
return false;
if (Right.is(TT_DictLiteral))
return Style.SpacesInContainerLiterals;
+ if (Right.is(TT_AttributeColon))
+ return false;
return true;
}
if (Left.is(TT_UnaryOperator))
@@ -2497,9 +2760,13 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
return Style.SpaceAfterCStyleCast ||
Right.isOneOf(TT_BinaryOperator, TT_SelectorName);
- if (Left.is(tok::greater) && Right.is(tok::greater))
+ if (Left.is(tok::greater) && Right.is(tok::greater)) {
+ if (Style.Language == FormatStyle::LK_TextProto ||
+ (Style.Language == FormatStyle::LK_Proto && Left.is(TT_DictLiteral)))
+ return !Style.Cpp11BracedListStyle;
return Right.is(TT_TemplateCloser) && Left.is(TT_TemplateCloser) &&
(Style.Standard != FormatStyle::LS_Cpp11 || Style.SpacesInAngles);
+ }
if (Right.isOneOf(tok::arrow, tok::arrowstar, tok::periodstar) ||
Left.isOneOf(tok::arrow, tok::period, tok::arrowstar, tok::periodstar) ||
(Right.is(tok::period) && Right.isNot(TT_DesignatedInitializerPeriod)))
@@ -2517,7 +2784,8 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
Style.Standard == FormatStyle::LS_Cpp03) ||
!(Left.isOneOf(tok::l_paren, tok::r_paren, tok::l_square,
tok::kw___super, TT_TemplateCloser,
- TT_TemplateOpener));
+ TT_TemplateOpener)) ||
+ (Left.is(tok ::l_paren) && Style.SpacesInParentheses);
if ((Left.is(TT_TemplateOpener)) != (Right.is(TT_TemplateCloser)))
return Style.SpacesInAngles;
// Space before TT_StructuredBindingLSquare.
@@ -2597,7 +2865,8 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
return true;
} else if (Style.Language == FormatStyle::LK_Cpp ||
Style.Language == FormatStyle::LK_ObjC ||
- Style.Language == FormatStyle::LK_Proto) {
+ Style.Language == FormatStyle::LK_Proto ||
+ Style.Language == FormatStyle::LK_TextProto) {
if (Left.isStringLiteral() && Right.isStringLiteral())
return true;
}
@@ -2639,7 +2908,7 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
if (Right.Previous->ClosesTemplateDeclaration &&
Right.Previous->MatchingParen &&
Right.Previous->MatchingParen->NestingLevel == 0 &&
- Style.AlwaysBreakTemplateDeclarations)
+ Style.AlwaysBreakTemplateDeclarations == FormatStyle::BTDS_Yes)
return true;
if (Right.is(TT_CtorInitializerComma) &&
Style.BreakConstructorInitializers == FormatStyle::BCIS_BeforeComma &&
@@ -2650,13 +2919,14 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
!Style.ConstructorInitializerAllOnOneLineOrOnePerLine)
return true;
// Break only if we have multiple inheritance.
- if (Style.BreakBeforeInheritanceComma && Right.is(TT_InheritanceComma))
+ if (Style.BreakInheritanceList == FormatStyle::BILS_BeforeComma &&
+ Right.is(TT_InheritanceComma))
return true;
if (Right.is(tok::string_literal) && Right.TokenText.startswith("R\""))
- // Raw string literals are special wrt. line breaks. The author has made a
- // deliberate choice and might have aligned the contents of the string
- // literal accordingly. Thus, we try keep existing line breaks.
- return Right.NewlinesBefore > 0;
+ // Multiline raw string literals are special wrt. line breaks. The author
+ // has made a deliberate choice and might have aligned the contents of the
+ // string literal accordingly. Thus, we try keep existing line breaks.
+ return Right.IsMultiline && Right.NewlinesBefore > 0;
if ((Right.Previous->is(tok::l_brace) ||
(Right.Previous->is(tok::less) && Right.Previous->Previous &&
Right.Previous->Previous->is(tok::equal))) &&
@@ -2683,6 +2953,94 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
(Line.Last->is(tok::l_brace) || Style.BreakAfterJavaFieldAnnotations))
return true;
+ if (Right.is(TT_ProtoExtensionLSquare))
+ return true;
+
+ // In text proto instances if a submessage contains at least 2 entries and at
+ // least one of them is a submessage, like A { ... B { ... } ... },
+ // put all of the entries of A on separate lines by forcing the selector of
+ // the submessage B to be put on a newline.
+ //
+ // Example: these can stay on one line:
+ // a { scalar_1: 1 scalar_2: 2 }
+ // a { b { key: value } }
+ //
+ // and these entries need to be on a new line even if putting them all in one
+ // line is under the column limit:
+ // a {
+ // scalar: 1
+ // b { key: value }
+ // }
+ //
+ // We enforce this by breaking before a submessage field that has previous
+ // siblings, *and* breaking before a field that follows a submessage field.
+ //
+ // Be careful to exclude the case [proto.ext] { ... } since the `]` is
+ // the TT_SelectorName there, but we don't want to break inside the brackets.
+ //
+ // Another edge case is @submessage { key: value }, which is a common
+ // substitution placeholder. In this case we want to keep `@` and `submessage`
+ // together.
+ //
+ // We ensure elsewhere that extensions are always on their own line.
+ if ((Style.Language == FormatStyle::LK_Proto ||
+ Style.Language == FormatStyle::LK_TextProto) &&
+ Right.is(TT_SelectorName) && !Right.is(tok::r_square) && Right.Next) {
+ // Keep `@submessage` together in:
+ // @submessage { key: value }
+ if (Right.Previous && Right.Previous->is(tok::at))
+ return false;
+ // Look for the scope opener after selector in cases like:
+ // selector { ...
+ // selector: { ...
+ // selector: @base { ...
+ FormatToken *LBrace = Right.Next;
+ if (LBrace && LBrace->is(tok::colon)) {
+ LBrace = LBrace->Next;
+ if (LBrace && LBrace->is(tok::at)) {
+ LBrace = LBrace->Next;
+ if (LBrace)
+ LBrace = LBrace->Next;
+ }
+ }
+ if (LBrace &&
+ // The scope opener is one of {, [, <:
+ // selector { ... }
+ // selector [ ... ]
+ // selector < ... >
+ //
+ // In case of selector { ... }, the l_brace is TT_DictLiteral.
+ // In case of an empty selector {}, the l_brace is not TT_DictLiteral,
+ // so we check for immediately following r_brace.
+ ((LBrace->is(tok::l_brace) &&
+ (LBrace->is(TT_DictLiteral) ||
+ (LBrace->Next && LBrace->Next->is(tok::r_brace)))) ||
+ LBrace->is(TT_ArrayInitializerLSquare) || LBrace->is(tok::less))) {
+ // If Left.ParameterCount is 0, then this submessage entry is not the
+ // first in its parent submessage, and we want to break before this entry.
+ // If Left.ParameterCount is greater than 0, then its parent submessage
+ // might contain 1 or more entries and we want to break before this entry
+ // if it contains at least 2 entries. We deal with this case later by
+ // detecting and breaking before the next entry in the parent submessage.
+ if (Left.ParameterCount == 0)
+ return true;
+ // However, if this submessage is the first entry in its parent
+ // submessage, Left.ParameterCount might be 1 in some cases.
+ // We deal with this case later by detecting an entry
+ // following a closing paren of this submessage.
+ }
+
+ // If this is an entry immediately following a submessage, it will be
+ // preceded by a closing paren of that submessage, like in:
+ // left---. .---right
+ // v v
+ // sub: { ... } key: value
+ // If there was a comment between `}` an `key` above, then `key` would be
+ // put on a new line anyways.
+ if (Left.isOneOf(tok::r_brace, tok::greater, tok::r_square))
+ return true;
+ }
+
return false;
}
@@ -2708,14 +3066,19 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
Keywords.kw_readonly, Keywords.kw_abstract, Keywords.kw_get,
Keywords.kw_set, Keywords.kw_async, Keywords.kw_await))
return false; // Otherwise automatic semicolon insertion would trigger.
- if (Left.Tok.getIdentifierInfo() &&
- Right.startsSequence(tok::l_square, tok::r_square))
- return false; // breaking in "foo[]" creates illegal TS type syntax.
+ if (Right.NestingLevel == 0 &&
+ (Left.Tok.getIdentifierInfo() ||
+ Left.isOneOf(tok::r_square, tok::r_paren)) &&
+ Right.isOneOf(tok::l_square, tok::l_paren))
+ return false; // Otherwise automatic semicolon insertion would trigger.
if (Left.is(TT_JsFatArrow) && Right.is(tok::l_brace))
return false;
if (Left.is(TT_JsTypeColon))
return true;
- if (Right.NestingLevel == 0 && Right.is(Keywords.kw_is))
+ // Don't wrap between ":" and "!" of a strict prop init ("field!: type;").
+ if (Left.is(tok::exclaim) && Right.is(tok::colon))
+ return false;
+ if (Right.is(Keywords.kw_is))
return false;
if (Left.is(Keywords.kw_in))
return Style.BreakBeforeBinaryOperators == FormatStyle::BOS_None;
@@ -2774,16 +3137,56 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
return Style.BreakBeforeTernaryOperators;
if (Left.is(TT_ConditionalExpr) || Left.is(tok::question))
return !Style.BreakBeforeTernaryOperators;
+ if (Left.is(TT_InheritanceColon))
+ return Style.BreakInheritanceList == FormatStyle::BILS_AfterColon;
if (Right.is(TT_InheritanceColon))
- return true;
+ return Style.BreakInheritanceList != FormatStyle::BILS_AfterColon;
if (Right.is(TT_ObjCMethodExpr) && !Right.is(tok::r_square) &&
Left.isNot(TT_SelectorName))
return true;
+
if (Right.is(tok::colon) &&
!Right.isOneOf(TT_CtorInitializerColon, TT_InlineASMColon))
return false;
- if (Left.is(tok::colon) && Left.isOneOf(TT_DictLiteral, TT_ObjCMethodExpr))
+ if (Left.is(tok::colon) && Left.isOneOf(TT_DictLiteral, TT_ObjCMethodExpr)) {
+ if (Style.Language == FormatStyle::LK_Proto ||
+ Style.Language == FormatStyle::LK_TextProto) {
+ if (!Style.AlwaysBreakBeforeMultilineStrings && Right.isStringLiteral())
+ return false;
+ // Prevent cases like:
+ //
+ // submessage:
+ // { key: valueeeeeeeeeeee }
+ //
+ // when the snippet does not fit into one line.
+ // Prefer:
+ //
+ // submessage: {
+ // key: valueeeeeeeeeeee
+ // }
+ //
+ // instead, even if it is longer by one line.
+ //
+ // Note that this allows allows the "{" to go over the column limit
+ // when the column limit is just between ":" and "{", but that does
+ // not happen too often and alternative formattings in this case are
+ // not much better.
+ //
+ // The code covers the cases:
+ //
+ // submessage: { ... }
+ // submessage: < ... >
+ // repeated: [ ... ]
+ if (((Right.is(tok::l_brace) || Right.is(tok::less)) &&
+ Right.is(TT_DictLiteral)) ||
+ Right.is(TT_ArrayInitializerLSquare))
+ return false;
+ }
return true;
+ }
+ if (Right.is(tok::r_square) && Right.MatchingParen &&
+ Right.MatchingParen->is(TT_ProtoExtensionLSquare))
+ return false;
if (Right.is(TT_SelectorName) || (Right.is(tok::identifier) && Right.Next &&
Right.Next->is(TT_ObjCMethodExpr)))
return Left.isNot(tok::period); // FIXME: Properly parse ObjC calls.
@@ -2806,6 +3209,9 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
if (Left.is(tok::equal) && !Right.isOneOf(tok::kw_default, tok::kw_delete) &&
Line.Type == LT_VirtualFunctionDecl && Left.NestingLevel == 0)
return false;
+ if (Left.is(tok::equal) && Right.is(tok::l_brace) &&
+ !Style.Cpp11BracedListStyle)
+ return false;
if (Left.is(tok::l_paren) && Left.is(TT_AttributeParen))
return false;
if (Left.is(tok::l_paren) && Left.Previous &&
@@ -2831,7 +3237,8 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
return !Right.isOneOf(tok::l_brace, tok::semi, tok::equal, tok::l_paren,
tok::less, tok::coloncolon);
- if (Right.is(tok::kw___attribute))
+ if (Right.is(tok::kw___attribute) ||
+ (Right.is(tok::l_square) && Right.is(TT_AttributeSquare)))
return true;
if (Left.is(tok::identifier) && Right.is(tok::string_literal))
@@ -2850,9 +3257,11 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
if (Right.is(TT_CtorInitializerComma) &&
Style.BreakConstructorInitializers == FormatStyle::BCIS_BeforeComma)
return true;
- if (Left.is(TT_InheritanceComma) && Style.BreakBeforeInheritanceComma)
+ if (Left.is(TT_InheritanceComma) &&
+ Style.BreakInheritanceList == FormatStyle::BILS_BeforeComma)
return false;
- if (Right.is(TT_InheritanceComma) && Style.BreakBeforeInheritanceComma)
+ if (Right.is(TT_InheritanceComma) &&
+ Style.BreakInheritanceList == FormatStyle::BILS_BeforeComma)
return true;
if ((Left.is(tok::greater) && Right.is(tok::greater)) ||
(Left.is(tok::less) && Right.is(tok::less)))
@@ -2872,6 +3281,9 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
(Style.BreakBeforeBinaryOperators == FormatStyle::BOS_None ||
Left.getPrecedence() == prec::Assignment))
return true;
+ if ((Left.is(TT_AttributeSquare) && Right.is(tok::l_square)) ||
+ (Left.is(tok::r_square) && Right.is(TT_AttributeSquare)))
+ return false;
return Left.isOneOf(tok::comma, tok::coloncolon, tok::semi, tok::l_brace,
tok::kw_class, tok::kw_struct, tok::comment) ||
Right.isMemberAccess() ||
@@ -2898,6 +3310,7 @@ void TokenAnnotator::printDebugInfo(const AnnotatedLine &Line) {
for (unsigned i = 0, e = Tok->FakeLParens.size(); i != e; ++i)
llvm::errs() << Tok->FakeLParens[i] << "/";
llvm::errs() << " FakeRParens=" << Tok->FakeRParens;
+ llvm::errs() << " II=" << Tok->Tok.getIdentifierInfo();
llvm::errs() << " Text='" << Tok->TokenText << "'\n";
if (!Tok->Next)
assert(Tok == Line.Last);
diff --git a/lib/Format/TokenAnnotator.h b/lib/Format/TokenAnnotator.h
index 04a18d45b82e..a3124fcb3d65 100644
--- a/lib/Format/TokenAnnotator.h
+++ b/lib/Format/TokenAnnotator.h
@@ -8,7 +8,7 @@
//===----------------------------------------------------------------------===//
///
/// \file
-/// \brief This file implements a token annotator, i.e. creates
+/// This file implements a token annotator, i.e. creates
/// \c AnnotatedTokens out of \c FormatTokens with required extra information.
///
//===----------------------------------------------------------------------===//
@@ -40,6 +40,7 @@ public:
AnnotatedLine(const UnwrappedLine &Line)
: First(Line.Tokens.front().Tok), Level(Line.Level),
MatchingOpeningBlockLineIndex(Line.MatchingOpeningBlockLineIndex),
+ MatchingClosingBlockLineIndex(Line.MatchingClosingBlockLineIndex),
InPPDirective(Line.InPPDirective),
MustBeDeclaration(Line.MustBeDeclaration), MightBeFunctionDecl(false),
IsMultiVariableDeclStmt(false), Affected(false),
@@ -112,6 +113,7 @@ public:
LineType Type;
unsigned Level;
size_t MatchingOpeningBlockLineIndex;
+ size_t MatchingClosingBlockLineIndex;
bool InPPDirective;
bool MustBeDeclaration;
bool MightBeFunctionDecl;
@@ -136,14 +138,14 @@ private:
void operator=(const AnnotatedLine &) = delete;
};
-/// \brief Determines extra information about the tokens comprising an
+/// Determines extra information about the tokens comprising an
/// \c UnwrappedLine.
class TokenAnnotator {
public:
TokenAnnotator(const FormatStyle &Style, const AdditionalKeywords &Keywords)
: Style(Style), Keywords(Keywords) {}
- /// \brief Adapts the indent levels of comment lines to the indent of the
+ /// Adapts the indent levels of comment lines to the indent of the
/// subsequent line.
// FIXME: Can/should this be done in the UnwrappedLineParser?
void setCommentLineLevels(SmallVectorImpl<AnnotatedLine *> &Lines);
@@ -152,14 +154,14 @@ public:
void calculateFormattingInformation(AnnotatedLine &Line);
private:
- /// \brief Calculate the penalty for splitting before \c Tok.
+ /// Calculate the penalty for splitting before \c Tok.
unsigned splitPenalty(const AnnotatedLine &Line, const FormatToken &Tok,
bool InFunctionDecl);
bool spaceRequiredBetween(const AnnotatedLine &Line, const FormatToken &Left,
const FormatToken &Right);
- bool spaceRequiredBefore(const AnnotatedLine &Line, const FormatToken &Tok);
+ bool spaceRequiredBefore(const AnnotatedLine &Line, const FormatToken &Right);
bool mustBreakBefore(const AnnotatedLine &Line, const FormatToken &Right);
diff --git a/lib/Format/UnwrappedLineFormatter.cpp b/lib/Format/UnwrappedLineFormatter.cpp
index 60dc1a7169d1..906dae40cbee 100644
--- a/lib/Format/UnwrappedLineFormatter.cpp
+++ b/lib/Format/UnwrappedLineFormatter.cpp
@@ -7,6 +7,7 @@
//
//===----------------------------------------------------------------------===//
+#include "NamespaceEndCommentsFixer.h"
#include "UnwrappedLineFormatter.h"
#include "WhitespaceManager.h"
#include "llvm/Support/Debug.h"
@@ -26,7 +27,7 @@ bool startsExternCBlock(const AnnotatedLine &Line) {
NextNext && NextNext->is(tok::l_brace);
}
-/// \brief Tracks the indent level of \c AnnotatedLines across levels.
+/// Tracks the indent level of \c AnnotatedLines across levels.
///
/// \c nextLine must be called for each \c AnnotatedLine, after which \c
/// getIndent() will return the indent for the last line \c nextLine was called
@@ -45,10 +46,10 @@ public:
IndentForLevel.push_back(Style.IndentWidth * i + AdditionalIndent);
}
- /// \brief Returns the indent for the current line.
+ /// Returns the indent for the current line.
unsigned getIndent() const { return Indent; }
- /// \brief Update the indent state given that \p Line is going to be formatted
+ /// Update the indent state given that \p Line is going to be formatted
/// next.
void nextLine(const AnnotatedLine &Line) {
Offset = getIndentOffset(*Line.First);
@@ -66,14 +67,14 @@ public:
Indent += Offset;
}
- /// \brief Update the indent state given that \p Line indent should be
+ /// Update the indent state given that \p Line indent should be
/// skipped.
void skipLine(const AnnotatedLine &Line) {
while (IndentForLevel.size() <= Line.Level)
IndentForLevel.push_back(Indent);
}
- /// \brief Update the level indent to adapt to the given \p Line.
+ /// Update the level indent to adapt to the given \p Line.
///
/// When a line is not formatted, we move the subsequent lines on the same
/// level to the same indent.
@@ -88,7 +89,7 @@ public:
}
private:
- /// \brief Get the offset of the line relatively to the level.
+ /// Get the offset of the line relatively to the level.
///
/// For example, 'public:' labels in classes are offset by 1 or 2
/// characters to the left from their level.
@@ -104,7 +105,7 @@ private:
return 0;
}
- /// \brief Get the indent of \p Level from \p IndentForLevel.
+ /// Get the indent of \p Level from \p IndentForLevel.
///
/// \p IndentForLevel must contain the indent for the level \c l
/// at \p IndentForLevel[l], or a value < 0 if the indent for
@@ -121,16 +122,16 @@ private:
const AdditionalKeywords &Keywords;
const unsigned AdditionalIndent;
- /// \brief The indent in characters for each level.
+ /// The indent in characters for each level.
std::vector<int> IndentForLevel;
- /// \brief Offset of the current line relative to the indent level.
+ /// Offset of the current line relative to the indent level.
///
/// For example, the 'public' keywords is often indented with a negative
/// offset.
int Offset = 0;
- /// \brief The current line's indent.
+ /// The current line's indent.
unsigned Indent = 0;
};
@@ -157,7 +158,7 @@ public:
: Style(Style), Keywords(Keywords), End(Lines.end()), Next(Lines.begin()),
AnnotatedLines(Lines) {}
- /// \brief Returns the next line, merging multiple lines into one if possible.
+ /// Returns the next line, merging multiple lines into one if possible.
const AnnotatedLine *getNextMergedLine(bool DryRun,
LevelIndentTracker &IndentTracker) {
if (Next == End)
@@ -179,7 +180,7 @@ public:
}
private:
- /// \brief Calculates how many lines can be merged into 1 starting at \p I.
+ /// Calculates how many lines can be merged into 1 starting at \p I.
unsigned
tryFitMultipleLinesInOne(LevelIndentTracker &IndentTracker,
SmallVectorImpl<AnnotatedLine *>::const_iterator I,
@@ -251,9 +252,9 @@ private:
if (Style.CompactNamespaces) {
if (isNamespaceDeclaration(TheLine)) {
int i = 0;
- unsigned closingLine = TheLine->MatchingOpeningBlockLineIndex - 1;
+ unsigned closingLine = TheLine->MatchingClosingBlockLineIndex - 1;
for (; I + 1 + i != E && isNamespaceDeclaration(I[i + 1]) &&
- closingLine == I[i + 1]->MatchingOpeningBlockLineIndex &&
+ closingLine == I[i + 1]->MatchingClosingBlockLineIndex &&
I[i + 1]->Last->TotalLength < Limit;
i++, closingLine--) {
// No extra indent for compacted namespaces
@@ -304,9 +305,23 @@ private:
if (TheLine->First->is(tok::l_brace) && TheLine->First == TheLine->Last &&
I != AnnotatedLines.begin() &&
I[-1]->First->isOneOf(tok::kw_if, tok::kw_while, tok::kw_for)) {
- return Style.AllowShortBlocksOnASingleLine
- ? tryMergeSimpleBlock(I - 1, E, Limit)
- : 0;
+ unsigned MergedLines = 0;
+ if (Style.AllowShortBlocksOnASingleLine) {
+ MergedLines = tryMergeSimpleBlock(I - 1, E, Limit);
+ // If we managed to merge the block, discard the first merged line
+ // since we are merging starting from I.
+ if (MergedLines > 0)
+ --MergedLines;
+ }
+ return MergedLines;
+ }
+ // Don't merge block with left brace wrapped after ObjC special blocks
+ if (TheLine->First->is(tok::l_brace) && I != AnnotatedLines.begin() &&
+ I[-1]->First->is(tok::at) && I[-1]->First->Next) {
+ tok::ObjCKeywordKind kwId = I[-1]->First->Next->Tok.getObjCKeywordID();
+ if (kwId == clang::tok::objc_autoreleasepool ||
+ kwId == clang::tok::objc_synchronized)
+ return 0;
}
// Try to merge a block with left brace wrapped that wasn't yet covered
if (TheLine->Last->is(tok::l_brace)) {
@@ -644,14 +659,14 @@ static void markFinalized(FormatToken *Tok) {
static void printLineState(const LineState &State) {
llvm::dbgs() << "State: ";
for (const ParenState &P : State.Stack) {
- llvm::dbgs() << P.Indent << "|" << P.LastSpace << "|" << P.NestedBlockIndent
- << " ";
+ llvm::dbgs() << (P.Tok ? P.Tok->TokenText : "F") << "|" << P.Indent << "|"
+ << P.LastSpace << "|" << P.NestedBlockIndent << " ";
}
llvm::dbgs() << State.NextToken->TokenText << "\n";
}
#endif
-/// \brief Base class for classes that format one \c AnnotatedLine.
+/// Base class for classes that format one \c AnnotatedLine.
class LineFormatter {
public:
LineFormatter(ContinuationIndenter *Indenter, WhitespaceManager *Whitespaces,
@@ -661,7 +676,7 @@ public:
BlockFormatter(BlockFormatter) {}
virtual ~LineFormatter() {}
- /// \brief Formats an \c AnnotatedLine and returns the penalty.
+ /// Formats an \c AnnotatedLine and returns the penalty.
///
/// If \p DryRun is \c false, directly applies the changes.
virtual unsigned formatLine(const AnnotatedLine &Line,
@@ -670,7 +685,7 @@ public:
bool DryRun) = 0;
protected:
- /// \brief If the \p State's next token is an r_brace closing a nested block,
+ /// If the \p State's next token is an r_brace closing a nested block,
/// format the nested block before it.
///
/// Returns \c true if all children could be placed successfully and adapts
@@ -752,7 +767,7 @@ private:
UnwrappedLineFormatter *BlockFormatter;
};
-/// \brief Formatter that keeps the existing line breaks.
+/// Formatter that keeps the existing line breaks.
class NoColumnLimitLineFormatter : public LineFormatter {
public:
NoColumnLimitLineFormatter(ContinuationIndenter *Indenter,
@@ -761,7 +776,7 @@ public:
UnwrappedLineFormatter *BlockFormatter)
: LineFormatter(Indenter, Whitespaces, Style, BlockFormatter) {}
- /// \brief Formats the line, simply keeping all of the input's line breaking
+ /// Formats the line, simply keeping all of the input's line breaking
/// decisions.
unsigned formatLine(const AnnotatedLine &Line, unsigned FirstIndent,
unsigned FirstStartColumn, bool DryRun) override {
@@ -780,7 +795,7 @@ public:
}
};
-/// \brief Formatter that puts all tokens into a single line without breaks.
+/// Formatter that puts all tokens into a single line without breaks.
class NoLineBreakFormatter : public LineFormatter {
public:
NoLineBreakFormatter(ContinuationIndenter *Indenter,
@@ -788,7 +803,7 @@ public:
UnwrappedLineFormatter *BlockFormatter)
: LineFormatter(Indenter, Whitespaces, Style, BlockFormatter) {}
- /// \brief Puts all tokens into a single line.
+ /// Puts all tokens into a single line.
unsigned formatLine(const AnnotatedLine &Line, unsigned FirstIndent,
unsigned FirstStartColumn, bool DryRun) override {
unsigned Penalty = 0;
@@ -803,7 +818,7 @@ public:
}
};
-/// \brief Finds the best way to break lines.
+/// Finds the best way to break lines.
class OptimizingLineFormatter : public LineFormatter {
public:
OptimizingLineFormatter(ContinuationIndenter *Indenter,
@@ -812,7 +827,7 @@ public:
UnwrappedLineFormatter *BlockFormatter)
: LineFormatter(Indenter, Whitespaces, Style, BlockFormatter) {}
- /// \brief Formats the line by finding the best line breaks with line lengths
+ /// Formats the line by finding the best line breaks with line lengths
/// below the column limit.
unsigned formatLine(const AnnotatedLine &Line, unsigned FirstIndent,
unsigned FirstStartColumn, bool DryRun) override {
@@ -835,14 +850,14 @@ private:
}
};
- /// \brief A pair of <penalty, count> that is used to prioritize the BFS on.
+ /// A pair of <penalty, count> that is used to prioritize the BFS on.
///
/// In case of equal penalties, we want to prefer states that were inserted
/// first. During state generation we make sure that we insert states first
/// that break the line as late as possible.
typedef std::pair<unsigned, unsigned> OrderedPenalty;
- /// \brief An edge in the solution space from \c Previous->State to \c State,
+ /// An edge in the solution space from \c Previous->State to \c State,
/// inserting a newline dependent on the \c NewLine.
struct StateNode {
StateNode(const LineState &State, bool NewLine, StateNode *Previous)
@@ -852,16 +867,16 @@ private:
StateNode *Previous;
};
- /// \brief An item in the prioritized BFS search queue. The \c StateNode's
+ /// An item in the prioritized BFS search queue. The \c StateNode's
/// \c State has the given \c OrderedPenalty.
typedef std::pair<OrderedPenalty, StateNode *> QueueItem;
- /// \brief The BFS queue type.
+ /// The BFS queue type.
typedef std::priority_queue<QueueItem, std::vector<QueueItem>,
std::greater<QueueItem>>
QueueType;
- /// \brief Analyze the entire solution space starting from \p InitialState.
+ /// Analyze the entire solution space starting from \p InitialState.
///
/// This implements a variant of Dijkstra's algorithm on the graph that spans
/// the solution space (\c LineStates are the nodes). The algorithm tries to
@@ -890,7 +905,8 @@ private:
Penalty = Queue.top().first.first;
StateNode *Node = Queue.top().second;
if (!Node->State.NextToken) {
- DEBUG(llvm::dbgs() << "\n---\nPenalty for line: " << Penalty << "\n");
+ LLVM_DEBUG(llvm::dbgs()
+ << "\n---\nPenalty for line: " << Penalty << "\n");
break;
}
Queue.pop();
@@ -914,7 +930,7 @@ private:
if (Queue.empty()) {
// We were unable to find a solution, do nothing.
// FIXME: Add diagnostic?
- DEBUG(llvm::dbgs() << "Could not find a solution.\n");
+ LLVM_DEBUG(llvm::dbgs() << "Could not find a solution.\n");
return 0;
}
@@ -922,13 +938,14 @@ private:
if (!DryRun)
reconstructPath(InitialState, Queue.top().second);
- DEBUG(llvm::dbgs() << "Total number of analyzed states: " << Count << "\n");
- DEBUG(llvm::dbgs() << "---\n");
+ LLVM_DEBUG(llvm::dbgs()
+ << "Total number of analyzed states: " << Count << "\n");
+ LLVM_DEBUG(llvm::dbgs() << "---\n");
return Penalty;
}
- /// \brief Add the following state to the analysis queue \c Queue.
+ /// Add the following state to the analysis queue \c Queue.
///
/// Assume the current state is \p PreviousNode and has been reached with a
/// penalty of \p Penalty. Insert a line break if \p NewLine is \c true.
@@ -950,7 +967,7 @@ private:
++(*Count);
}
- /// \brief Applies the best formatting by reconstructing the path in the
+ /// Applies the best formatting by reconstructing the path in the
/// solution space that leads to \c Best.
void reconstructPath(LineState &State, StateNode *Best) {
std::deque<StateNode *> Path;
@@ -965,7 +982,7 @@ private:
formatChildren(State, (*I)->NewLine, /*DryRun=*/false, Penalty);
Penalty += Indenter->addTokenToState(State, (*I)->NewLine, false);
- DEBUG({
+ LLVM_DEBUG({
printLineState((*I)->Previous->State);
if ((*I)->NewLine) {
llvm::dbgs() << "Penalty for placing "
@@ -1018,9 +1035,12 @@ UnwrappedLineFormatter::format(const SmallVectorImpl<AnnotatedLine *> &Lines,
// scope was added. However, we need to carefully stop doing this when we
// exit the scope of affected lines to prevent indenting a the entire
// remaining file if it currently missing a closing brace.
+ bool PreviousRBrace =
+ PreviousLine && PreviousLine->startsWith(tok::r_brace);
bool ContinueFormatting =
TheLine.Level > RangeMinLevel ||
- (TheLine.Level == RangeMinLevel && !TheLine.startsWith(tok::r_brace));
+ (TheLine.Level == RangeMinLevel && !PreviousRBrace &&
+ !TheLine.startsWith(tok::r_brace));
bool FixIndentation = (FixBadIndentation || ContinueFormatting) &&
Indent != TheLine.First->OriginalColumn;
@@ -1036,8 +1056,7 @@ UnwrappedLineFormatter::format(const SmallVectorImpl<AnnotatedLine *> &Lines,
if (ShouldFormat && TheLine.Type != LT_Invalid) {
if (!DryRun) {
bool LastLine = Line->First->is(tok::eof);
- formatFirstToken(TheLine, PreviousLine,
- Indent,
+ formatFirstToken(TheLine, PreviousLine, Lines, Indent,
LastLine ? LastStartColumn : NextStartColumn + Indent);
}
@@ -1081,7 +1100,7 @@ UnwrappedLineFormatter::format(const SmallVectorImpl<AnnotatedLine *> &Lines,
TheLine.LeadingEmptyLinesAffected);
// Format the first token.
if (ReformatLeadingWhitespace)
- formatFirstToken(TheLine, PreviousLine,
+ formatFirstToken(TheLine, PreviousLine, Lines,
TheLine.First->OriginalColumn,
TheLine.First->OriginalColumn);
else
@@ -1103,10 +1122,10 @@ UnwrappedLineFormatter::format(const SmallVectorImpl<AnnotatedLine *> &Lines,
return Penalty;
}
-void UnwrappedLineFormatter::formatFirstToken(const AnnotatedLine &Line,
- const AnnotatedLine *PreviousLine,
- unsigned Indent,
- unsigned NewlineIndent) {
+void UnwrappedLineFormatter::formatFirstToken(
+ const AnnotatedLine &Line, const AnnotatedLine *PreviousLine,
+ const SmallVectorImpl<AnnotatedLine *> &Lines, unsigned Indent,
+ unsigned NewlineIndent) {
FormatToken &RootToken = *Line.First;
if (RootToken.is(tok::eof)) {
unsigned Newlines = std::min(RootToken.NewlinesBefore, 1u);
@@ -1120,7 +1139,9 @@ void UnwrappedLineFormatter::formatFirstToken(const AnnotatedLine &Line,
// Remove empty lines before "}" where applicable.
if (RootToken.is(tok::r_brace) &&
(!RootToken.Next ||
- (RootToken.Next->is(tok::semi) && !RootToken.Next->Next)))
+ (RootToken.Next->is(tok::semi) && !RootToken.Next->Next)) &&
+ // Do not remove empty lines before namespace closing "}".
+ !getNamespaceToken(&Line, Lines))
Newlines = std::min(Newlines, 1u);
// Remove empty lines at the start of nested blocks (lambdas/arrow functions)
if (PreviousLine == nullptr && Line.Level > 0)
diff --git a/lib/Format/UnwrappedLineFormatter.h b/lib/Format/UnwrappedLineFormatter.h
index 6432ca83a4c9..dac210ea62b1 100644
--- a/lib/Format/UnwrappedLineFormatter.h
+++ b/lib/Format/UnwrappedLineFormatter.h
@@ -8,7 +8,7 @@
//===----------------------------------------------------------------------===//
///
/// \file
-/// \brief Implements a combinartorial exploration of all the different
+/// Implements a combinartorial exploration of all the different
/// linebreaks unwrapped lines can be formatted in.
///
//===----------------------------------------------------------------------===//
@@ -37,7 +37,7 @@ public:
: Indenter(Indenter), Whitespaces(Whitespaces), Style(Style),
Keywords(Keywords), SourceMgr(SourceMgr), Status(Status) {}
- /// \brief Format the current block and return the penalty.
+ /// Format the current block and return the penalty.
unsigned format(const SmallVectorImpl<AnnotatedLine *> &Lines,
bool DryRun = false, int AdditionalIndent = 0,
bool FixBadIndentation = false,
@@ -46,13 +46,14 @@ public:
unsigned LastStartColumn = 0);
private:
- /// \brief Add a new line and the required indent before the first Token
+ /// Add a new line and the required indent before the first Token
/// of the \c UnwrappedLine if there was no structural parsing error.
void formatFirstToken(const AnnotatedLine &Line,
- const AnnotatedLine *PreviousLine, unsigned Indent,
- unsigned NewlineIndent);
+ const AnnotatedLine *PreviousLine,
+ const SmallVectorImpl<AnnotatedLine *> &Lines,
+ unsigned Indent, unsigned NewlineIndent);
- /// \brief Returns the column limit for a line, taking into account whether we
+ /// Returns the column limit for a line, taking into account whether we
/// need an escaped newline due to a continued preprocessor directive.
unsigned getColumnLimit(bool InPPDirective,
const AnnotatedLine *NextLine) const;
diff --git a/lib/Format/UnwrappedLineParser.cpp b/lib/Format/UnwrappedLineParser.cpp
index b8608dcac9c7..e5afa1264abb 100644
--- a/lib/Format/UnwrappedLineParser.cpp
+++ b/lib/Format/UnwrappedLineParser.cpp
@@ -8,7 +8,7 @@
//===----------------------------------------------------------------------===//
///
/// \file
-/// \brief This file contains the implementation of the UnwrappedLineParser,
+/// This file contains the implementation of the UnwrappedLineParser,
/// which turns a stream of tokens into UnwrappedLines.
///
//===----------------------------------------------------------------------===//
@@ -83,6 +83,8 @@ public:
: Line(Line), TokenSource(TokenSource), ResetToken(ResetToken),
PreviousLineLevel(Line.Level), PreviousTokenSource(TokenSource),
Token(nullptr), PreviousToken(nullptr) {
+ FakeEOF.Tok.startToken();
+ FakeEOF.Tok.setKind(tok::eof);
TokenSource = this;
Line.Level = 0;
Line.InPPDirective = true;
@@ -102,7 +104,7 @@ public:
PreviousToken = Token;
Token = PreviousTokenSource->getNextToken();
if (eof())
- return getFakeEOF();
+ return &FakeEOF;
return Token;
}
@@ -121,17 +123,7 @@ private:
/*MinColumnToken=*/PreviousToken);
}
- FormatToken *getFakeEOF() {
- static bool EOFInitialized = false;
- static FormatToken FormatTok;
- if (!EOFInitialized) {
- FormatTok.Tok.startToken();
- FormatTok.Tok.setKind(tok::eof);
- EOFInitialized = true;
- }
- return &FormatTok;
- }
-
+ FormatToken FakeEOF;
UnwrappedLine &Line;
FormatTokenSource *&TokenSource;
FormatToken *&ResetToken;
@@ -234,14 +226,17 @@ UnwrappedLineParser::UnwrappedLineParser(const FormatStyle &Style,
CurrentLines(&Lines), Style(Style), Keywords(Keywords),
CommentPragmasRegex(Style.CommentPragmas), Tokens(nullptr),
Callback(Callback), AllTokens(Tokens), PPBranchLevel(-1),
- IfNdefCondition(nullptr), FoundIncludeGuardStart(false),
- IncludeGuardRejected(false), FirstStartColumn(FirstStartColumn) {}
+ IncludeGuard(Style.IndentPPDirectives == FormatStyle::PPDIS_None
+ ? IG_Rejected
+ : IG_Inited),
+ IncludeGuardToken(nullptr), FirstStartColumn(FirstStartColumn) {}
void UnwrappedLineParser::reset() {
PPBranchLevel = -1;
- IfNdefCondition = nullptr;
- FoundIncludeGuardStart = false;
- IncludeGuardRejected = false;
+ IncludeGuard = Style.IndentPPDirectives == FormatStyle::PPDIS_None
+ ? IG_Rejected
+ : IG_Inited;
+ IncludeGuardToken = nullptr;
Line.reset(new UnwrappedLine);
CommentsBeforeNextToken.clear();
FormatTok = nullptr;
@@ -257,13 +252,21 @@ void UnwrappedLineParser::parse() {
IndexedTokenSource TokenSource(AllTokens);
Line->FirstStartColumn = FirstStartColumn;
do {
- DEBUG(llvm::dbgs() << "----\n");
+ LLVM_DEBUG(llvm::dbgs() << "----\n");
reset();
Tokens = &TokenSource;
TokenSource.reset();
readToken();
parseFile();
+
+ // If we found an include guard then all preprocessor directives (other than
+ // the guard) are over-indented by one.
+ if (IncludeGuard == IG_Found)
+ for (auto &Line : Lines)
+ if (Line.InPPDirective && Line.Level > 0)
+ --Line.Level;
+
// Create line with eof token.
pushToken(FormatTok);
addUnwrappedLine();
@@ -300,6 +303,18 @@ void UnwrappedLineParser::parseFile() {
else
parseLevel(/*HasOpeningBrace=*/false);
// Make sure to format the remaining tokens.
+ //
+ // LK_TextProto is special since its top-level is parsed as the body of a
+ // braced list, which does not necessarily have natural line separators such
+ // as a semicolon. Comments after the last entry that have been determined to
+ // not belong to that line, as in:
+ // key: value
+ // // endfile comment
+ // do not have a chance to be put on a line of their own until this point.
+ // Here we add this newline before end-of-file comments.
+ if (Style.Language == FormatStyle::LK_TextProto &&
+ !CommentsBeforeNextToken.empty())
+ addUnwrappedLine();
flushComments(true);
addUnwrappedLine();
}
@@ -333,7 +348,19 @@ void UnwrappedLineParser::parseLevel(bool HasOpeningBrace) {
nextToken();
addUnwrappedLine();
break;
- case tok::kw_default:
+ case tok::kw_default: {
+ unsigned StoredPosition = Tokens->getPosition();
+ FormatToken *Next = Tokens->getNextToken();
+ FormatTok = Tokens->setPosition(StoredPosition);
+ if (Next && Next->isNot(tok::colon)) {
+ // default not followed by ':' is not a case label; treat it like
+ // an identifier.
+ parseStructuralElement();
+ break;
+ }
+ // Else, if it is 'default:', fall through to the case handling.
+ LLVM_FALLTHROUGH;
+ }
case tok::kw_case:
if (Style.Language == FormatStyle::LK_JavaScript &&
Line->MustBeDeclaration) {
@@ -426,12 +453,19 @@ void UnwrappedLineParser::calculateBraceTypes(bool ExpectClassBody) {
(Style.isCpp() && NextTok->is(tok::l_paren)) ||
NextTok->isOneOf(tok::comma, tok::period, tok::colon,
tok::r_paren, tok::r_square, tok::l_brace,
- tok::l_square, tok::ellipsis) ||
+ tok::ellipsis) ||
(NextTok->is(tok::identifier) &&
!PrevTok->isOneOf(tok::semi, tok::r_brace, tok::l_brace)) ||
(NextTok->is(tok::semi) &&
(!ExpectClassBody || LBraceStack.size() != 1)) ||
(NextTok->isBinaryOperator() && !NextIsObjCMethod);
+ if (NextTok->is(tok::l_square)) {
+ // We can have an array subscript after a braced init
+ // list, but C++11 attributes are expected after blocks.
+ NextTok = Tokens->getNextToken();
+ ++ReadTokens;
+ ProbablyBracedList = NextTok->isNot(tok::l_square);
+ }
}
if (ProbablyBracedList) {
Tok->BlockKind = BK_BracedInit;
@@ -540,7 +574,7 @@ void UnwrappedLineParser::parseBlock(bool MustBeDeclaration, bool AddLevel,
Line->MatchingOpeningBlockLineIndex = OpeningLineIndex;
if (OpeningLineIndex != UnwrappedLine::kInvalidIndex) {
// Update the opening line to add the forward reference as well
- (*CurrentLines)[OpeningLineIndex].MatchingOpeningBlockLineIndex =
+ (*CurrentLines)[OpeningLineIndex].MatchingClosingBlockLineIndex =
CurrentLines->size() - 1;
}
}
@@ -712,26 +746,27 @@ void UnwrappedLineParser::parsePPIf(bool IfDef) {
// If there's a #ifndef on the first line, and the only lines before it are
// comments, it could be an include guard.
bool MaybeIncludeGuard = IfNDef;
- if (!IncludeGuardRejected && !FoundIncludeGuardStart && MaybeIncludeGuard) {
+ if (IncludeGuard == IG_Inited && MaybeIncludeGuard)
for (auto &Line : Lines) {
if (!Line.Tokens.front().Tok->is(tok::comment)) {
MaybeIncludeGuard = false;
- IncludeGuardRejected = true;
+ IncludeGuard = IG_Rejected;
break;
}
}
- }
--PPBranchLevel;
parsePPUnknown();
++PPBranchLevel;
- if (!IncludeGuardRejected && !FoundIncludeGuardStart && MaybeIncludeGuard)
- IfNdefCondition = IfCondition;
+ if (IncludeGuard == IG_Inited && MaybeIncludeGuard) {
+ IncludeGuard = IG_IfNdefed;
+ IncludeGuardToken = IfCondition;
+ }
}
void UnwrappedLineParser::parsePPElse() {
// If a potential include guard has an #else, it's not an include guard.
- if (FoundIncludeGuardStart && PPBranchLevel == 0)
- FoundIncludeGuardStart = false;
+ if (IncludeGuard == IG_Defined && PPBranchLevel == 0)
+ IncludeGuard = IG_Rejected;
conditionalCompilationAlternative();
if (PPBranchLevel > -1)
--PPBranchLevel;
@@ -745,34 +780,37 @@ void UnwrappedLineParser::parsePPEndIf() {
conditionalCompilationEnd();
parsePPUnknown();
// If the #endif of a potential include guard is the last thing in the file,
- // then we count it as a real include guard and subtract one from every
- // preprocessor indent.
+ // then we found an include guard.
unsigned TokenPosition = Tokens->getPosition();
FormatToken *PeekNext = AllTokens[TokenPosition];
- if (FoundIncludeGuardStart && PPBranchLevel == -1 && PeekNext->is(tok::eof) &&
+ if (IncludeGuard == IG_Defined && PPBranchLevel == -1 &&
+ PeekNext->is(tok::eof) &&
Style.IndentPPDirectives != FormatStyle::PPDIS_None)
- for (auto &Line : Lines)
- if (Line.InPPDirective && Line.Level > 0)
- --Line.Level;
+ IncludeGuard = IG_Found;
}
void UnwrappedLineParser::parsePPDefine() {
nextToken();
if (FormatTok->Tok.getKind() != tok::identifier) {
+ IncludeGuard = IG_Rejected;
+ IncludeGuardToken = nullptr;
parsePPUnknown();
return;
}
- if (IfNdefCondition && IfNdefCondition->TokenText == FormatTok->TokenText) {
- FoundIncludeGuardStart = true;
+
+ if (IncludeGuard == IG_IfNdefed &&
+ IncludeGuardToken->TokenText == FormatTok->TokenText) {
+ IncludeGuard = IG_Defined;
+ IncludeGuardToken = nullptr;
for (auto &Line : Lines) {
if (!Line.Tokens.front().Tok->isOneOf(tok::comment, tok::hash)) {
- FoundIncludeGuardStart = false;
+ IncludeGuard = IG_Rejected;
break;
}
}
}
- IfNdefCondition = nullptr;
+
nextToken();
if (FormatTok->Tok.getKind() == tok::l_paren &&
FormatTok->WhitespaceRange.getBegin() ==
@@ -799,7 +837,6 @@ void UnwrappedLineParser::parsePPUnknown() {
if (Style.IndentPPDirectives == FormatStyle::PPDIS_AfterHash)
Line->Level += PPBranchLevel + 1;
addUnwrappedLine();
- IfNdefCondition = nullptr;
}
// Here we blacklist certain tokens that are not usually the first token in an
@@ -932,49 +969,6 @@ void UnwrappedLineParser::parseStructuralElement() {
return;
}
switch (FormatTok->Tok.getKind()) {
- case tok::at:
- nextToken();
- if (FormatTok->Tok.is(tok::l_brace)) {
- nextToken();
- parseBracedList();
- break;
- }
- switch (FormatTok->Tok.getObjCKeywordID()) {
- case tok::objc_public:
- case tok::objc_protected:
- case tok::objc_package:
- case tok::objc_private:
- return parseAccessSpecifier();
- case tok::objc_interface:
- case tok::objc_implementation:
- return parseObjCInterfaceOrImplementation();
- case tok::objc_protocol:
- return parseObjCProtocol();
- case tok::objc_end:
- return; // Handled by the caller.
- case tok::objc_optional:
- case tok::objc_required:
- nextToken();
- addUnwrappedLine();
- return;
- case tok::objc_autoreleasepool:
- nextToken();
- if (FormatTok->Tok.is(tok::l_brace)) {
- if (Style.BraceWrapping.AfterObjCDeclaration)
- addUnwrappedLine();
- parseBlock(/*MustBeDeclaration=*/false);
- }
- addUnwrappedLine();
- return;
- case tok::objc_try:
- // This branch isn't strictly necessary (the kw_try case below would
- // do this too after the tok::at is parsed above). But be explicit.
- parseTryCatch();
- return;
- default:
- break;
- }
- break;
case tok::kw_asm:
nextToken();
if (FormatTok->is(tok::l_brace)) {
@@ -1032,8 +1026,12 @@ void UnwrappedLineParser::parseStructuralElement() {
// 'default: string' field declaration.
break;
nextToken();
- parseLabel();
- return;
+ if (FormatTok->is(tok::colon)) {
+ parseLabel();
+ return;
+ }
+ // e.g. "default void f() {}" in a Java interface.
+ break;
case tok::kw_case:
if (Style.Language == FormatStyle::LK_JavaScript && Line->MustBeDeclaration)
// 'case: string' field declaration.
@@ -1117,6 +1115,56 @@ void UnwrappedLineParser::parseStructuralElement() {
if (FormatTok->Tok.is(tok::l_brace)) {
nextToken();
parseBracedList();
+ break;
+ }
+ switch (FormatTok->Tok.getObjCKeywordID()) {
+ case tok::objc_public:
+ case tok::objc_protected:
+ case tok::objc_package:
+ case tok::objc_private:
+ return parseAccessSpecifier();
+ case tok::objc_interface:
+ case tok::objc_implementation:
+ return parseObjCInterfaceOrImplementation();
+ case tok::objc_protocol:
+ if (parseObjCProtocol())
+ return;
+ break;
+ case tok::objc_end:
+ return; // Handled by the caller.
+ case tok::objc_optional:
+ case tok::objc_required:
+ nextToken();
+ addUnwrappedLine();
+ return;
+ case tok::objc_autoreleasepool:
+ nextToken();
+ if (FormatTok->Tok.is(tok::l_brace)) {
+ if (Style.BraceWrapping.AfterControlStatement)
+ addUnwrappedLine();
+ parseBlock(/*MustBeDeclaration=*/false);
+ }
+ addUnwrappedLine();
+ return;
+ case tok::objc_synchronized:
+ nextToken();
+ if (FormatTok->Tok.is(tok::l_paren))
+ // Skip synchronization object
+ parseParens();
+ if (FormatTok->Tok.is(tok::l_brace)) {
+ if (Style.BraceWrapping.AfterControlStatement)
+ addUnwrappedLine();
+ parseBlock(/*MustBeDeclaration=*/false);
+ }
+ addUnwrappedLine();
+ return;
+ case tok::objc_try:
+ // This branch isn't strictly necessary (the kw_try case below would
+ // do this too after the tok::at is parsed above). But be explicit.
+ parseTryCatch();
+ return;
+ default:
+ break;
}
break;
case tok::kw_enum:
@@ -1369,13 +1417,16 @@ bool UnwrappedLineParser::tryToParseLambdaIntroducer() {
const FormatToken *Previous = FormatTok->Previous;
if (Previous &&
(Previous->isOneOf(tok::identifier, tok::kw_operator, tok::kw_new,
- tok::kw_delete) ||
+ tok::kw_delete, tok::l_square) ||
FormatTok->isCppStructuredBinding(Style) || Previous->closesScope() ||
Previous->isSimpleTypeSpecifier())) {
nextToken();
return false;
}
nextToken();
+ if (FormatTok->is(tok::l_square)) {
+ return false;
+ }
parseSquare(/*LambdaIntroducer=*/true);
return true;
}
@@ -2083,11 +2134,33 @@ void UnwrappedLineParser::parseRecord(bool ParseAsExpr) {
// "} n, m;" will end up in one unwrapped line.
}
+void UnwrappedLineParser::parseObjCMethod() {
+ assert(FormatTok->Tok.isOneOf(tok::l_paren, tok::identifier) &&
+ "'(' or identifier expected.");
+ do {
+ if (FormatTok->Tok.is(tok::semi)) {
+ nextToken();
+ addUnwrappedLine();
+ return;
+ } else if (FormatTok->Tok.is(tok::l_brace)) {
+ parseBlock(/*MustBeDeclaration=*/false);
+ addUnwrappedLine();
+ return;
+ } else {
+ nextToken();
+ }
+ } while (!eof());
+}
+
void UnwrappedLineParser::parseObjCProtocolList() {
assert(FormatTok->Tok.is(tok::less) && "'<' expected.");
- do
+ do {
nextToken();
- while (!eof() && FormatTok->Tok.isNot(tok::greater));
+ // Early exit in case someone forgot a close angle.
+ if (FormatTok->isOneOf(tok::semi, tok::l_brace) ||
+ FormatTok->Tok.isObjCAtKeyword(tok::objc_end))
+ return;
+ } while (!eof() && FormatTok->Tok.isNot(tok::greater));
nextToken(); // Skip '>'.
}
@@ -2106,6 +2179,9 @@ void UnwrappedLineParser::parseObjCUntilAtEnd() {
// Ignore stray "}". parseStructuralElement doesn't consume them.
nextToken();
addUnwrappedLine();
+ } else if (FormatTok->isOneOf(tok::minus, tok::plus)) {
+ nextToken();
+ parseObjCMethod();
} else {
parseStructuralElement();
}
@@ -2113,10 +2189,37 @@ void UnwrappedLineParser::parseObjCUntilAtEnd() {
}
void UnwrappedLineParser::parseObjCInterfaceOrImplementation() {
+ assert(FormatTok->Tok.getObjCKeywordID() == tok::objc_interface ||
+ FormatTok->Tok.getObjCKeywordID() == tok::objc_implementation);
nextToken();
nextToken(); // interface name
- // @interface can be followed by either a base class, or a category.
+ // @interface can be followed by a lightweight generic
+ // specialization list, then either a base class or a category.
+ if (FormatTok->Tok.is(tok::less)) {
+ // Unlike protocol lists, generic parameterizations support
+ // nested angles:
+ //
+ // @interface Foo<ValueType : id <NSCopying, NSSecureCoding>> :
+ // NSObject <NSCopying, NSSecureCoding>
+ //
+ // so we need to count how many open angles we have left.
+ unsigned NumOpenAngles = 1;
+ do {
+ nextToken();
+ // Early exit in case someone forgot a close angle.
+ if (FormatTok->isOneOf(tok::semi, tok::l_brace) ||
+ FormatTok->Tok.isObjCAtKeyword(tok::objc_end))
+ break;
+ if (FormatTok->Tok.is(tok::less))
+ ++NumOpenAngles;
+ else if (FormatTok->Tok.is(tok::greater)) {
+ assert(NumOpenAngles > 0 && "'>' makes NumOpenAngles negative");
+ --NumOpenAngles;
+ }
+ } while (!eof() && NumOpenAngles != 0);
+ nextToken(); // Skip '>'.
+ }
if (FormatTok->Tok.is(tok::colon)) {
nextToken();
nextToken(); // base class name
@@ -2140,8 +2243,21 @@ void UnwrappedLineParser::parseObjCInterfaceOrImplementation() {
parseObjCUntilAtEnd();
}
-void UnwrappedLineParser::parseObjCProtocol() {
+// Returns true for the declaration/definition form of @protocol,
+// false for the expression form.
+bool UnwrappedLineParser::parseObjCProtocol() {
+ assert(FormatTok->Tok.getObjCKeywordID() == tok::objc_protocol);
nextToken();
+
+ if (FormatTok->is(tok::l_paren))
+ // The expression form of @protocol, e.g. "Protocol* p = @protocol(foo);".
+ return false;
+
+ // The definition/declaration form,
+ // @protocol Foo
+ // - (int)someMethod;
+ // @end
+
nextToken(); // protocol name
if (FormatTok->Tok.is(tok::less))
@@ -2150,11 +2266,13 @@ void UnwrappedLineParser::parseObjCProtocol() {
// Check for protocol declaration.
if (FormatTok->Tok.is(tok::semi)) {
nextToken();
- return addUnwrappedLine();
+ addUnwrappedLine();
+ return true;
}
addUnwrappedLine();
parseObjCUntilAtEnd();
+ return true;
}
void UnwrappedLineParser::parseJavaScriptEs6ImportExport() {
@@ -2231,7 +2349,7 @@ LLVM_ATTRIBUTE_UNUSED static void printDebugInfo(const UnwrappedLine &Line,
void UnwrappedLineParser::addUnwrappedLine() {
if (Line->Tokens.empty())
return;
- DEBUG({
+ LLVM_DEBUG({
if (CurrentLines == &Lines)
printDebugInfo(*Line);
});
diff --git a/lib/Format/UnwrappedLineParser.h b/lib/Format/UnwrappedLineParser.h
index 1d8ccabbd0f8..87254832c635 100644
--- a/lib/Format/UnwrappedLineParser.h
+++ b/lib/Format/UnwrappedLineParser.h
@@ -8,7 +8,7 @@
//===----------------------------------------------------------------------===//
///
/// \file
-/// \brief This file contains the declaration of the UnwrappedLineParser,
+/// This file contains the declaration of the UnwrappedLineParser,
/// which turns a stream of tokens into UnwrappedLines.
///
//===----------------------------------------------------------------------===//
@@ -28,7 +28,7 @@ namespace format {
struct UnwrappedLineNode;
-/// \brief An unwrapped line is a sequence of \c Token, that we would like to
+/// An unwrapped line is a sequence of \c Token, that we would like to
/// put on a single line if there was no column limit.
///
/// This is used as a main interface between the \c UnwrappedLineParser and the
@@ -38,22 +38,26 @@ struct UnwrappedLine {
UnwrappedLine();
// FIXME: Don't use std::list here.
- /// \brief The \c Tokens comprising this \c UnwrappedLine.
+ /// The \c Tokens comprising this \c UnwrappedLine.
std::list<UnwrappedLineNode> Tokens;
- /// \brief The indent level of the \c UnwrappedLine.
+ /// The indent level of the \c UnwrappedLine.
unsigned Level;
- /// \brief Whether this \c UnwrappedLine is part of a preprocessor directive.
+ /// Whether this \c UnwrappedLine is part of a preprocessor directive.
bool InPPDirective;
bool MustBeDeclaration;
- /// \brief If this \c UnwrappedLine closes a block in a sequence of lines,
+ /// If this \c UnwrappedLine closes a block in a sequence of lines,
/// \c MatchingOpeningBlockLineIndex stores the index of the corresponding
/// opening line. Otherwise, \c MatchingOpeningBlockLineIndex must be
/// \c kInvalidIndex.
- size_t MatchingOpeningBlockLineIndex;
+ size_t MatchingOpeningBlockLineIndex = kInvalidIndex;
+
+ /// If this \c UnwrappedLine opens a block, stores the index of the
+ /// line with the corresponding closing brace.
+ size_t MatchingClosingBlockLineIndex = kInvalidIndex;
static const size_t kInvalidIndex = -1;
@@ -116,10 +120,11 @@ private:
// parses the record as a child block, i.e. if the class declaration is an
// expression.
void parseRecord(bool ParseAsExpr = false);
+ void parseObjCMethod();
void parseObjCProtocolList();
void parseObjCUntilAtEnd();
void parseObjCInterfaceOrImplementation();
- void parseObjCProtocol();
+ bool parseObjCProtocol();
void parseJavaScriptEs6ImportExport();
bool tryToParseLambda();
bool tryToParseLambdaIntroducer();
@@ -141,7 +146,7 @@ private:
// token.
//
// NextTok specifies the next token. A null pointer NextTok is supported, and
- // signifies either the absense of a next token, or that the next token
+ // signifies either the absence of a next token, or that the next token
// shouldn't be taken into accunt for the analysis.
void distributeComments(const SmallVectorImpl<FormatToken *> &Comments,
const FormatToken *NextTok);
@@ -248,10 +253,23 @@ private:
// sequence.
std::stack<int> PPChainBranchIndex;
- // Contains the #ifndef condition for a potential include guard.
- FormatToken *IfNdefCondition;
- bool FoundIncludeGuardStart;
- bool IncludeGuardRejected;
+ // Include guard search state. Used to fixup preprocessor indent levels
+ // so that include guards do not participate in indentation.
+ enum IncludeGuardState {
+ IG_Inited, // Search started, looking for #ifndef.
+ IG_IfNdefed, // #ifndef found, IncludeGuardToken points to condition.
+ IG_Defined, // Matching #define found, checking other requirements.
+ IG_Found, // All requirements met, need to fix indents.
+ IG_Rejected, // Search failed or never started.
+ };
+
+ // Current state of include guard search.
+ IncludeGuardState IncludeGuard;
+
+ // Points to the #ifndef condition for a potential include guard. Null unless
+ // IncludeGuardState == IG_IfNdefed.
+ FormatToken *IncludeGuardToken;
+
// Contains the first start column where the source begins. This is zero for
// normal source code and may be nonzero when formatting a code fragment that
// does not start at the beginning of the file.
diff --git a/lib/Format/UsingDeclarationsSorter.cpp b/lib/Format/UsingDeclarationsSorter.cpp
index ef0c7a7d5a45..9e49e7913033 100644
--- a/lib/Format/UsingDeclarationsSorter.cpp
+++ b/lib/Format/UsingDeclarationsSorter.cpp
@@ -8,7 +8,7 @@
//===----------------------------------------------------------------------===//
///
/// \file
-/// \brief This file implements UsingDeclarationsSorter, a TokenAnalyzer that
+/// This file implements UsingDeclarationsSorter, a TokenAnalyzer that
/// sorts consecutive using declarations.
///
//===----------------------------------------------------------------------===//
@@ -161,7 +161,7 @@ void endUsingDeclarationBlock(
StringRef Text(SourceMgr.getCharacterData(SortedBegin),
SourceMgr.getCharacterData(SortedEnd) -
SourceMgr.getCharacterData(SortedBegin));
- DEBUG({
+ LLVM_DEBUG({
StringRef OldText(SourceMgr.getCharacterData(Begin),
SourceMgr.getCharacterData(End) -
SourceMgr.getCharacterData(Begin));
@@ -187,8 +187,7 @@ std::pair<tooling::Replacements, unsigned> UsingDeclarationsSorter::analyze(
TokenAnnotator &Annotator, SmallVectorImpl<AnnotatedLine *> &AnnotatedLines,
FormatTokenLexer &Tokens) {
const SourceManager &SourceMgr = Env.getSourceManager();
- AffectedRangeMgr.computeAffectedLines(AnnotatedLines.begin(),
- AnnotatedLines.end());
+ AffectedRangeMgr.computeAffectedLines(AnnotatedLines);
tooling::Replacements Fixes;
SmallVector<UsingDeclaration, 4> UsingDeclarations;
for (size_t I = 0, E = AnnotatedLines.size(); I != E; ++I) {
diff --git a/lib/Format/UsingDeclarationsSorter.h b/lib/Format/UsingDeclarationsSorter.h
index 6f137712d841..7e5cf7610d67 100644
--- a/lib/Format/UsingDeclarationsSorter.h
+++ b/lib/Format/UsingDeclarationsSorter.h
@@ -8,7 +8,7 @@
//===----------------------------------------------------------------------===//
///
/// \file
-/// \brief This file declares UsingDeclarationsSorter, a TokenAnalyzer that
+/// This file declares UsingDeclarationsSorter, a TokenAnalyzer that
/// sorts consecutive using declarations.
///
//===----------------------------------------------------------------------===//
diff --git a/lib/Format/WhitespaceManager.cpp b/lib/Format/WhitespaceManager.cpp
index a5477a996327..7070ce03c864 100644
--- a/lib/Format/WhitespaceManager.cpp
+++ b/lib/Format/WhitespaceManager.cpp
@@ -8,7 +8,7 @@
//===----------------------------------------------------------------------===//
///
/// \file
-/// \brief This file implements WhitespaceManager class.
+/// This file implements WhitespaceManager class.
///
//===----------------------------------------------------------------------===//
@@ -90,7 +90,7 @@ const tooling::Replacements &WhitespaceManager::generateReplacements() {
if (Changes.empty())
return Replaces;
- std::sort(Changes.begin(), Changes.end(), Change::IsBeforeInFile(SourceMgr));
+ llvm::sort(Changes.begin(), Changes.end(), Change::IsBeforeInFile(SourceMgr));
calculateLineBreakInformation();
alignConsecutiveDeclarations();
alignConsecutiveAssignments();
diff --git a/lib/Format/WhitespaceManager.h b/lib/Format/WhitespaceManager.h
index af20dc5616a7..db90343f7294 100644
--- a/lib/Format/WhitespaceManager.h
+++ b/lib/Format/WhitespaceManager.h
@@ -8,7 +8,7 @@
//===----------------------------------------------------------------------===//
///
/// \file
-/// \brief WhitespaceManager class manages whitespace around tokens and their
+/// WhitespaceManager class manages whitespace around tokens and their
/// replacements.
///
//===----------------------------------------------------------------------===//
@@ -24,7 +24,7 @@
namespace clang {
namespace format {
-/// \brief Manages the whitespaces around tokens and their replacements.
+/// Manages the whitespaces around tokens and their replacements.
///
/// This includes special handling for certain constructs, e.g. the alignment of
/// trailing line comments.
@@ -41,7 +41,7 @@ public:
bool UseCRLF)
: SourceMgr(SourceMgr), Style(Style), UseCRLF(UseCRLF) {}
- /// \brief Replaces the whitespace in front of \p Tok. Only call once for
+ /// Replaces the whitespace in front of \p Tok. Only call once for
/// each \c AnnotatedToken.
///
/// \p StartOfTokenColumn is the column at which the token will start after
@@ -51,7 +51,7 @@ public:
unsigned StartOfTokenColumn,
bool InPPDirective = false);
- /// \brief Adds information about an unchangeable token's whitespace.
+ /// Adds information about an unchangeable token's whitespace.
///
/// Needs to be called for every token for which \c replaceWhitespace
/// was not called.
@@ -59,7 +59,7 @@ public:
llvm::Error addReplacement(const tooling::Replacement &Replacement);
- /// \brief Inserts or replaces whitespace in the middle of a token.
+ /// Inserts or replaces whitespace in the middle of a token.
///
/// Inserts \p PreviousPostfix, \p Newlines, \p Spaces and \p CurrentPrefix
/// (in this order) at \p Offset inside \p Tok, replacing \p ReplaceChars
@@ -79,13 +79,13 @@ public:
StringRef CurrentPrefix, bool InPPDirective,
unsigned Newlines, int Spaces);
- /// \brief Returns all the \c Replacements created during formatting.
+ /// Returns all the \c Replacements created during formatting.
const tooling::Replacements &generateReplacements();
- /// \brief Represents a change before a token, a break inside a token,
+ /// Represents a change before a token, a break inside a token,
/// or the layout of an unchanged token (or whitespace within).
struct Change {
- /// \brief Functor to sort changes in original source order.
+ /// Functor to sort changes in original source order.
class IsBeforeInFile {
public:
IsBeforeInFile(const SourceManager &SourceMgr) : SourceMgr(SourceMgr) {}
@@ -95,7 +95,7 @@ public:
const SourceManager &SourceMgr;
};
- /// \brief Creates a \c Change.
+ /// Creates a \c Change.
///
/// The generated \c Change will replace the characters at
/// \p OriginalWhitespaceRange with a concatenation of
@@ -165,35 +165,35 @@ public:
};
private:
- /// \brief Calculate \c IsTrailingComment, \c TokenLength for the last tokens
+ /// Calculate \c IsTrailingComment, \c TokenLength for the last tokens
/// or token parts in a line and \c PreviousEndOfTokenColumn and
/// \c EscapedNewlineColumn for the first tokens or token parts in a line.
void calculateLineBreakInformation();
- /// \brief Align consecutive assignments over all \c Changes.
+ /// Align consecutive assignments over all \c Changes.
void alignConsecutiveAssignments();
- /// \brief Align consecutive declarations over all \c Changes.
+ /// Align consecutive declarations over all \c Changes.
void alignConsecutiveDeclarations();
- /// \brief Align trailing comments over all \c Changes.
+ /// Align trailing comments over all \c Changes.
void alignTrailingComments();
- /// \brief Align trailing comments from change \p Start to change \p End at
+ /// Align trailing comments from change \p Start to change \p End at
/// the specified \p Column.
void alignTrailingComments(unsigned Start, unsigned End, unsigned Column);
- /// \brief Align escaped newlines over all \c Changes.
+ /// Align escaped newlines over all \c Changes.
void alignEscapedNewlines();
- /// \brief Align escaped newlines from change \p Start to change \p End at
+ /// Align escaped newlines from change \p Start to change \p End at
/// the specified \p Column.
void alignEscapedNewlines(unsigned Start, unsigned End, unsigned Column);
- /// \brief Fill \c Replaces with the replacements for all effective changes.
+ /// Fill \c Replaces with the replacements for all effective changes.
void generateChanges();
- /// \brief Stores \p Text as the replacement for the whitespace in \p Range.
+ /// Stores \p Text as the replacement for the whitespace in \p Range.
void storeReplacement(SourceRange Range, StringRef Text);
void appendNewlineText(std::string &Text, unsigned Newlines);
void appendEscapedNewlineText(std::string &Text, unsigned Newlines,
diff --git a/lib/Frontend/ASTConsumers.cpp b/lib/Frontend/ASTConsumers.cpp
index 7dc475e26f76..b67c019baed8 100644
--- a/lib/Frontend/ASTConsumers.cpp
+++ b/lib/Frontend/ASTConsumers.cpp
@@ -87,9 +87,10 @@ namespace {
<< DC->getPrimaryContext() << "\n";
} else
Out << "Not a DeclContext\n";
- } else if (OutputKind == Print)
- D->print(Out, /*Indentation=*/0, /*PrintInstantiation=*/true);
- else if (OutputKind != None)
+ } else if (OutputKind == Print) {
+ PrintingPolicy Policy(D->getASTContext().getLangOpts());
+ D->print(Out, Policy, /*Indentation=*/0, /*PrintInstantiation=*/true);
+ } else if (OutputKind != None)
D->dump(Out, OutputKind == DumpFull);
}
@@ -138,12 +139,14 @@ clang::CreateASTPrinter(std::unique_ptr<raw_ostream> Out,
FilterString);
}
-std::unique_ptr<ASTConsumer> clang::CreateASTDumper(StringRef FilterString,
- bool DumpDecls,
- bool Deserialize,
- bool DumpLookups) {
+std::unique_ptr<ASTConsumer>
+clang::CreateASTDumper(std::unique_ptr<raw_ostream> Out,
+ StringRef FilterString,
+ bool DumpDecls,
+ bool Deserialize,
+ bool DumpLookups) {
assert((DumpDecls || Deserialize || DumpLookups) && "nothing to dump");
- return llvm::make_unique<ASTPrinter>(nullptr,
+ return llvm::make_unique<ASTPrinter>(std::move(Out),
Deserialize ? ASTPrinter::DumpFull :
DumpDecls ? ASTPrinter::Dump :
ASTPrinter::None,
diff --git a/lib/Frontend/ASTMerge.cpp b/lib/Frontend/ASTMerge.cpp
index 354527db7bad..6ec0e2a98c1b 100644
--- a/lib/Frontend/ASTMerge.cpp
+++ b/lib/Frontend/ASTMerge.cpp
@@ -1,4 +1,4 @@
-//===-- ASTMerge.cpp - AST Merging Frontent Action --------------*- C++ -*-===//
+//===-- ASTMerge.cpp - AST Merging Frontend Action --------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/lib/Frontend/ASTUnit.cpp b/lib/Frontend/ASTUnit.cpp
index 1160df15a920..e4c313fed30f 100644
--- a/lib/Frontend/ASTUnit.cpp
+++ b/lib/Frontend/ASTUnit.cpp
@@ -1,4 +1,4 @@
-//===--- ASTUnit.cpp - ASTUnit utility --------------------------*- C++ -*-===//
+//===- ASTUnit.cpp - ASTUnit utility --------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -14,45 +14,99 @@
#include "clang/Frontend/ASTUnit.h"
#include "clang/AST/ASTConsumer.h"
#include "clang/AST/ASTContext.h"
-#include "clang/AST/DeclVisitor.h"
-#include "clang/AST/StmtVisitor.h"
+#include "clang/AST/CommentCommandTraits.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclBase.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclGroup.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/DeclarationName.h"
+#include "clang/AST/ExternalASTSource.h"
+#include "clang/AST/PrettyPrinter.h"
+#include "clang/AST/Type.h"
#include "clang/AST/TypeOrdering.h"
#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/IdentifierTable.h"
+#include "clang/Basic/LLVM.h"
+#include "clang/Basic/LangOptions.h"
#include "clang/Basic/MemoryBufferCache.h"
+#include "clang/Basic/Module.h"
+#include "clang/Basic/SourceLocation.h"
+#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/TargetOptions.h"
#include "clang/Basic/VirtualFileSystem.h"
#include "clang/Frontend/CompilerInstance.h"
+#include "clang/Frontend/CompilerInvocation.h"
+#include "clang/Frontend/FrontendAction.h"
#include "clang/Frontend/FrontendActions.h"
#include "clang/Frontend/FrontendDiagnostic.h"
#include "clang/Frontend/FrontendOptions.h"
#include "clang/Frontend/MultiplexConsumer.h"
+#include "clang/Frontend/PCHContainerOperations.h"
+#include "clang/Frontend/PrecompiledPreamble.h"
#include "clang/Frontend/Utils.h"
#include "clang/Lex/HeaderSearch.h"
+#include "clang/Lex/HeaderSearchOptions.h"
+#include "clang/Lex/Lexer.h"
+#include "clang/Lex/PPCallbacks.h"
+#include "clang/Lex/PreprocessingRecord.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Lex/PreprocessorOptions.h"
+#include "clang/Lex/Token.h"
+#include "clang/Sema/CodeCompleteConsumer.h"
+#include "clang/Sema/CodeCompleteOptions.h"
#include "clang/Sema/Sema.h"
+#include "clang/Serialization/ASTBitCodes.h"
#include "clang/Serialization/ASTReader.h"
#include "clang/Serialization/ASTWriter.h"
+#include "clang/Serialization/ContinuousRangeMap.h"
+#include "clang/Serialization/Module.h"
#include "llvm/ADT/ArrayRef.h"
-#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/IntrusiveRefCntPtr.h"
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSet.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/Bitcode/BitstreamWriter.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/Casting.h"
#include "llvm/Support/CrashRecoveryContext.h"
-#include "llvm/Support/Host.h"
+#include "llvm/Support/DJB.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/ErrorOr.h"
+#include "llvm/Support/FileSystem.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Mutex.h"
-#include "llvm/Support/MutexGuard.h"
#include "llvm/Support/Timer.h"
#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
#include <atomic>
+#include <cassert>
+#include <cstdint>
#include <cstdio>
#include <cstdlib>
+#include <memory>
+#include <string>
+#include <tuple>
+#include <utility>
+#include <vector>
using namespace clang;
using llvm::TimeRecord;
namespace {
+
class SimpleTimer {
bool WantTiming;
TimeRecord Start;
@@ -64,11 +118,6 @@ namespace {
Start = TimeRecord::getCurrentTime();
}
- void setOutput(const Twine &Output) {
- if (WantTiming)
- this->Output = Output.str();
- }
-
~SimpleTimer() {
if (WantTiming) {
TimeRecord Elapsed = TimeRecord::getCurrentTime();
@@ -78,29 +127,37 @@ namespace {
llvm::errs() << '\n';
}
}
+
+ void setOutput(const Twine &Output) {
+ if (WantTiming)
+ this->Output = Output.str();
+ }
};
- template <class T>
- std::unique_ptr<T> valueOrNull(llvm::ErrorOr<std::unique_ptr<T>> Val) {
- if (!Val)
- return nullptr;
- return std::move(*Val);
- }
+} // namespace
- template <class T>
- bool moveOnNoError(llvm::ErrorOr<T> Val, T &Output) {
- if (!Val)
- return false;
- Output = std::move(*Val);
- return true;
- }
+template <class T>
+static std::unique_ptr<T> valueOrNull(llvm::ErrorOr<std::unique_ptr<T>> Val) {
+ if (!Val)
+ return nullptr;
+ return std::move(*Val);
+}
+
+template <class T>
+static bool moveOnNoError(llvm::ErrorOr<T> Val, T &Output) {
+ if (!Val)
+ return false;
+ Output = std::move(*Val);
+ return true;
+}
-/// \brief Get a source buffer for \p MainFilePath, handling all file-to-file
+/// Get a source buffer for \p MainFilePath, handling all file-to-file
/// and file-to-buffer remappings inside \p Invocation.
static std::unique_ptr<llvm::MemoryBuffer>
getBufferForFileHandlingRemapping(const CompilerInvocation &Invocation,
vfs::FileSystem *VFS,
- StringRef FilePath) {
+ StringRef FilePath,
+ bool isVolatile) {
const auto &PreprocessorOpts = Invocation.getPreprocessorOpts();
// Try to determine if the main file has been remapped, either from the
@@ -120,7 +177,7 @@ getBufferForFileHandlingRemapping(const CompilerInvocation &Invocation,
llvm::sys::fs::UniqueID MID = MPathStatus->getUniqueID();
if (MainFileID == MID) {
// We found a remapping. Try to load the resulting, remapped source.
- BufferOwner = valueOrNull(VFS->getBufferForFile(RF.second));
+ BufferOwner = valueOrNull(VFS->getBufferForFile(RF.second, -1, true, isVolatile));
if (!BufferOwner)
return nullptr;
}
@@ -145,7 +202,7 @@ getBufferForFileHandlingRemapping(const CompilerInvocation &Invocation,
// If the main source file was not remapped, load it now.
if (!Buffer && !BufferOwner) {
- BufferOwner = valueOrNull(VFS->getBufferForFile(FilePath));
+ BufferOwner = valueOrNull(VFS->getBufferForFile(FilePath, -1, true, isVolatile));
if (!BufferOwner)
return nullptr;
}
@@ -156,7 +213,6 @@ getBufferForFileHandlingRemapping(const CompilerInvocation &Invocation,
return nullptr;
return llvm::MemoryBuffer::getMemBufferCopy(Buffer->getBuffer(), FilePath);
}
-}
struct ASTUnit::ASTWriterData {
SmallString<128> Buffer;
@@ -171,32 +227,22 @@ void ASTUnit::clearFileLevelDecls() {
llvm::DeleteContainerSeconds(FileDecls);
}
-/// \brief After failing to build a precompiled preamble (due to
+/// After failing to build a precompiled preamble (due to
/// errors in the source that occurs in the preamble), the number of
/// reparses during which we'll skip even trying to precompile the
/// preamble.
const unsigned DefaultPreambleRebuildInterval = 5;
-/// \brief Tracks the number of ASTUnit objects that are currently active.
+/// Tracks the number of ASTUnit objects that are currently active.
///
/// Used for debugging purposes only.
static std::atomic<unsigned> ActiveASTUnitObjects;
ASTUnit::ASTUnit(bool _MainFileIsAST)
- : Reader(nullptr), HadModuleLoaderFatalFailure(false),
- OnlyLocalDecls(false), CaptureDiagnostics(false),
- MainFileIsAST(_MainFileIsAST),
- TUKind(TU_Complete), WantTiming(getenv("LIBCLANG_TIMING")),
- OwnsRemappedFileBuffers(true),
- NumStoredDiagnosticsFromDriver(0),
- PreambleRebuildCounter(0),
- NumWarningsInPreamble(0),
- ShouldCacheCodeCompletionResults(false),
- IncludeBriefCommentsInCodeCompletion(false), UserFilesAreVolatile(false),
- CompletionCacheTopLevelHashValue(0),
- PreambleTopLevelHashValue(0),
- CurrentTopLevelHashValue(0),
- UnsafeToFree(false) {
+ : MainFileIsAST(_MainFileIsAST), WantTiming(getenv("LIBCLANG_TIMING")),
+ ShouldCacheCodeCompletionResults(false),
+ IncludeBriefCommentsInCodeCompletion(false), UserFilesAreVolatile(false),
+ UnsafeToFree(false) {
if (getenv("LIBCLANG_OBJTRACKING"))
fprintf(stderr, "+++ %u translation units\n", ++ActiveASTUnitObjects);
}
@@ -219,8 +265,8 @@ ASTUnit::~ASTUnit() {
delete RB.second;
}
- ClearCachedCompletionResults();
-
+ ClearCachedCompletionResults();
+
if (getenv("LIBCLANG_OBJTRACKING"))
fprintf(stderr, "--- %u translation units\n", --ActiveASTUnitObjects);
}
@@ -229,20 +275,26 @@ void ASTUnit::setPreprocessor(std::shared_ptr<Preprocessor> PP) {
this->PP = std::move(PP);
}
-/// \brief Determine the set of code-completion contexts in which this
+void ASTUnit::enableSourceFileDiagnostics() {
+ assert(getDiagnostics().getClient() && Ctx &&
+ "Bad context for source file");
+ getDiagnostics().getClient()->BeginSourceFile(Ctx->getLangOpts(), PP.get());
+}
+
+/// Determine the set of code-completion contexts in which this
/// declaration should be shown.
static unsigned getDeclShowContexts(const NamedDecl *ND,
const LangOptions &LangOpts,
bool &IsNestedNameSpecifier) {
IsNestedNameSpecifier = false;
-
+
if (isa<UsingShadowDecl>(ND))
- ND = dyn_cast<NamedDecl>(ND->getUnderlyingDecl());
+ ND = ND->getUnderlyingDecl();
if (!ND)
return 0;
-
+
uint64_t Contexts = 0;
- if (isa<TypeDecl>(ND) || isa<ObjCInterfaceDecl>(ND) ||
+ if (isa<TypeDecl>(ND) || isa<ObjCInterfaceDecl>(ND) ||
isa<ClassTemplateDecl>(ND) || isa<TemplateTemplateParmDecl>(ND) ||
isa<TypeAliasTemplateDecl>(ND)) {
// Types can appear in these contexts.
@@ -257,12 +309,12 @@ static unsigned getDeclShowContexts(const NamedDecl *ND,
// In C++, types can appear in expressions contexts (for functional casts).
if (LangOpts.CPlusPlus)
Contexts |= (1LL << CodeCompletionContext::CCC_Expression);
-
+
// In Objective-C, message sends can send interfaces. In Objective-C++,
// all types are available due to functional casts.
if (LangOpts.CPlusPlus || isa<ObjCInterfaceDecl>(ND))
Contexts |= (1LL << CodeCompletionContext::CCC_ObjCMessageReceiver);
-
+
// In Objective-C, you can only be a subclass of another Objective-C class
if (const auto *ID = dyn_cast<ObjCInterfaceDecl>(ND)) {
// Objective-C interfaces can be used in a class property expression.
@@ -274,16 +326,16 @@ static unsigned getDeclShowContexts(const NamedDecl *ND,
// Deal with tag names.
if (isa<EnumDecl>(ND)) {
Contexts |= (1LL << CodeCompletionContext::CCC_EnumTag);
-
+
// Part of the nested-name-specifier in C++0x.
if (LangOpts.CPlusPlus11)
IsNestedNameSpecifier = true;
- } else if (const RecordDecl *Record = dyn_cast<RecordDecl>(ND)) {
+ } else if (const auto *Record = dyn_cast<RecordDecl>(ND)) {
if (Record->isUnion())
Contexts |= (1LL << CodeCompletionContext::CCC_UnionTag);
else
Contexts |= (1LL << CodeCompletionContext::CCC_ClassOrStructTag);
-
+
if (LangOpts.CPlusPlus)
IsNestedNameSpecifier = true;
} else if (isa<ClassTemplateDecl>(ND))
@@ -300,37 +352,37 @@ static unsigned getDeclShowContexts(const NamedDecl *ND,
Contexts = (1LL << CodeCompletionContext::CCC_ObjCCategoryName);
} else if (isa<NamespaceDecl>(ND) || isa<NamespaceAliasDecl>(ND)) {
Contexts = (1LL << CodeCompletionContext::CCC_Namespace);
-
+
// Part of the nested-name-specifier.
IsNestedNameSpecifier = true;
}
-
+
return Contexts;
}
void ASTUnit::CacheCodeCompletionResults() {
if (!TheSema)
return;
-
+
SimpleTimer Timer(WantTiming);
Timer.setOutput("Cache global code completions for " + getMainFileName());
// Clear out the previous results.
ClearCachedCompletionResults();
-
+
// Gather the set of global code completions.
- typedef CodeCompletionResult Result;
+ using Result = CodeCompletionResult;
SmallVector<Result, 8> Results;
CachedCompletionAllocator = std::make_shared<GlobalCodeCompletionAllocator>();
CodeCompletionTUInfo CCTUInfo(CachedCompletionAllocator);
TheSema->GatherGlobalCodeCompletions(*CachedCompletionAllocator,
CCTUInfo, Results);
-
+
// Translate global code completions into cached completions.
llvm::DenseMap<CanQualType, unsigned> CompletionTypes;
CodeCompletionContext CCContext(CodeCompletionContext::CCC_TopLevel);
- for (Result &R : Results) {
+ for (auto &R : Results) {
switch (R.Kind) {
case Result::RK_Declaration: {
bool IsNestedNameSpecifier = false;
@@ -344,7 +396,7 @@ void ASTUnit::CacheCodeCompletionResults() {
CachedResult.Kind = R.CursorKind;
CachedResult.Availability = R.Availability;
- // Keep track of the type of this completion in an ASTContext-agnostic
+ // Keep track of the type of this completion in an ASTContext-agnostic
// way.
QualType UsageType = getDeclUsageType(*Ctx, R.Declaration);
if (UsageType.isNull()) {
@@ -356,7 +408,7 @@ void ASTUnit::CacheCodeCompletionResults() {
CachedResult.TypeClass = getSimplifiedTypeClass(CanUsageType);
// Determine whether we have already seen this type. If so, we save
- // ourselves the work of formatting the type string by using the
+ // ourselves the work of formatting the type string by using the
// temporary, CanQualType-based hash table to find the associated value.
unsigned &TypeValue = CompletionTypes[CanUsageType];
if (TypeValue == 0) {
@@ -364,12 +416,12 @@ void ASTUnit::CacheCodeCompletionResults() {
CachedCompletionTypes[QualType(CanUsageType).getAsString()]
= TypeValue;
}
-
+
CachedResult.Type = TypeValue;
}
-
+
CachedCompletionResults.push_back(CachedResult);
-
+
/// Handle nested-name-specifiers in C++.
if (TheSema->Context.getLangOpts().CPlusPlus && IsNestedNameSpecifier &&
!R.StartsNestedNameSpecifier) {
@@ -392,10 +444,10 @@ void ASTUnit::CacheCodeCompletionResults() {
isa<NamespaceAliasDecl>(R.Declaration))
NNSContexts |= (1LL << CodeCompletionContext::CCC_Namespace);
- if (unsigned RemainingContexts
+ if (unsigned RemainingContexts
= NNSContexts & ~CachedResult.ShowInContexts) {
- // If there any contexts where this completion can be a
- // nested-name-specifier but isn't already an option, create a
+ // If there any contexts where this completion can be a
+ // nested-name-specifier but isn't already an option, create a
// nested-name-specifier completion.
R.StartsNestedNameSpecifier = true;
CachedResult.Completion = R.CreateCodeCompletionString(
@@ -410,13 +462,13 @@ void ASTUnit::CacheCodeCompletionResults() {
}
break;
}
-
+
case Result::RK_Keyword:
case Result::RK_Pattern:
// Ignore keywords and patterns; we don't care, since they are so
// easily regenerated.
break;
-
+
case Result::RK_Macro: {
CachedCodeCompletionResult CachedResult;
CachedResult.Completion = R.CreateCodeCompletionString(
@@ -446,7 +498,7 @@ void ASTUnit::CacheCodeCompletionResults() {
}
}
}
-
+
// Save the current top-level hash value.
CompletionCacheTopLevelHashValue = CurrentTopLevelHashValue;
}
@@ -459,7 +511,7 @@ void ASTUnit::ClearCachedCompletionResults() {
namespace {
-/// \brief Gathers information from ASTReader that will be used to initialize
+/// Gathers information from ASTReader that will be used to initialize
/// a Preprocessor.
class ASTInfoCollector : public ASTReaderListener {
Preprocessor &PP;
@@ -470,8 +522,8 @@ class ASTInfoCollector : public ASTReaderListener {
std::shared_ptr<TargetOptions> &TargetOpts;
IntrusiveRefCntPtr<TargetInfo> &Target;
unsigned &Counter;
+ bool InitializedLanguage = false;
- bool InitializedLanguage;
public:
ASTInfoCollector(Preprocessor &PP, ASTContext *Context,
HeaderSearchOptions &HSOpts, PreprocessorOptions &PPOpts,
@@ -480,30 +532,29 @@ public:
IntrusiveRefCntPtr<TargetInfo> &Target, unsigned &Counter)
: PP(PP), Context(Context), HSOpts(HSOpts), PPOpts(PPOpts),
LangOpt(LangOpt), TargetOpts(TargetOpts), Target(Target),
- Counter(Counter), InitializedLanguage(false) {}
+ Counter(Counter) {}
bool ReadLanguageOptions(const LangOptions &LangOpts, bool Complain,
bool AllowCompatibleDifferences) override {
if (InitializedLanguage)
return false;
-
+
LangOpt = LangOpts;
InitializedLanguage = true;
-
+
updated();
return false;
}
- virtual bool ReadHeaderSearchOptions(const HeaderSearchOptions &HSOpts,
- StringRef SpecificModuleCachePath,
- bool Complain) override {
+ bool ReadHeaderSearchOptions(const HeaderSearchOptions &HSOpts,
+ StringRef SpecificModuleCachePath,
+ bool Complain) override {
this->HSOpts = HSOpts;
return false;
}
- virtual bool
- ReadPreprocessorOptions(const PreprocessorOptions &PPOpts, bool Complain,
- std::string &SuggestedPredefines) override {
+ bool ReadPreprocessorOptions(const PreprocessorOptions &PPOpts, bool Complain,
+ std::string &SuggestedPredefines) override {
this->PPOpts = PPOpts;
return false;
}
@@ -557,19 +608,18 @@ private:
}
};
- /// \brief Diagnostic consumer that saves each diagnostic it is given.
+/// Diagnostic consumer that saves each diagnostic it is given.
class StoredDiagnosticConsumer : public DiagnosticConsumer {
SmallVectorImpl<StoredDiagnostic> *StoredDiags;
SmallVectorImpl<ASTUnit::StandaloneDiagnostic> *StandaloneDiags;
- const LangOptions *LangOpts;
- SourceManager *SourceMgr;
+ const LangOptions *LangOpts = nullptr;
+ SourceManager *SourceMgr = nullptr;
public:
StoredDiagnosticConsumer(
SmallVectorImpl<StoredDiagnostic> *StoredDiags,
SmallVectorImpl<ASTUnit::StandaloneDiagnostic> *StandaloneDiags)
- : StoredDiags(StoredDiags), StandaloneDiags(StandaloneDiags),
- LangOpts(nullptr), SourceMgr(nullptr) {
+ : StoredDiags(StoredDiags), StandaloneDiags(StandaloneDiags) {
assert((StoredDiags || StandaloneDiags) &&
"No output collections were passed to StoredDiagnosticConsumer.");
}
@@ -585,20 +635,20 @@ public:
const Diagnostic &Info) override;
};
-/// \brief RAII object that optionally captures diagnostics, if
+/// RAII object that optionally captures diagnostics, if
/// there is no diagnostic client to capture them already.
class CaptureDroppedDiagnostics {
DiagnosticsEngine &Diags;
StoredDiagnosticConsumer Client;
- DiagnosticConsumer *PreviousClient;
+ DiagnosticConsumer *PreviousClient = nullptr;
std::unique_ptr<DiagnosticConsumer> OwningPreviousClient;
public:
- CaptureDroppedDiagnostics(bool RequestCapture, DiagnosticsEngine &Diags,
- SmallVectorImpl<StoredDiagnostic> *StoredDiags,
- SmallVectorImpl<ASTUnit::StandaloneDiagnostic> *StandaloneDiags)
- : Diags(Diags), Client(StoredDiags, StandaloneDiags), PreviousClient(nullptr)
- {
+ CaptureDroppedDiagnostics(
+ bool RequestCapture, DiagnosticsEngine &Diags,
+ SmallVectorImpl<StoredDiagnostic> *StoredDiags,
+ SmallVectorImpl<ASTUnit::StandaloneDiagnostic> *StandaloneDiags)
+ : Diags(Diags), Client(StoredDiags, StandaloneDiags) {
if (RequestCapture || Diags.getClient() == nullptr) {
OwningPreviousClient = Diags.takeClient();
PreviousClient = Diags.getClient();
@@ -612,7 +662,7 @@ public:
}
};
-} // anonymous namespace
+} // namespace
static ASTUnit::StandaloneDiagnostic
makeStandaloneDiagnostic(const LangOptions &LangOpts,
@@ -634,7 +684,7 @@ void StoredDiagnosticConsumer::HandleDiagnostic(DiagnosticsEngine::Level Level,
}
if (StandaloneDiags) {
- llvm::Optional<StoredDiagnostic> StoredDiag = llvm::None;
+ llvm::Optional<StoredDiagnostic> StoredDiag = None;
if (!ResultDiag) {
StoredDiag.emplace(Level, Info);
ResultDiag = StoredDiag.getPointer();
@@ -664,7 +714,7 @@ ASTDeserializationListener *ASTUnit::getDeserializationListener() {
std::unique_ptr<llvm::MemoryBuffer>
ASTUnit::getBufferForFile(StringRef Filename, std::string *ErrorStr) {
assert(FileMgr);
- auto Buffer = FileMgr->getBufferForFile(Filename);
+ auto Buffer = FileMgr->getBufferForFile(Filename, UserFilesAreVolatile);
if (Buffer)
return std::move(*Buffer);
if (ErrorStr)
@@ -672,7 +722,7 @@ ASTUnit::getBufferForFile(StringRef Filename, std::string *ErrorStr) {
return nullptr;
}
-/// \brief Configure the diagnostics object for use with ASTUnit.
+/// Configure the diagnostics object for use with ASTUnit.
void ASTUnit::ConfigureDiags(IntrusiveRefCntPtr<DiagnosticsEngine> Diags,
ASTUnit &AST, bool CaptureDiagnostics) {
assert(Diags.get() && "no DiagnosticsEngine was provided");
@@ -693,7 +743,7 @@ std::unique_ptr<ASTUnit> ASTUnit::LoadFromASTFile(
llvm::CrashRecoveryContextCleanupRegistrar<ASTUnit>
ASTUnitCleanup(AST.get());
llvm::CrashRecoveryContextCleanupRegistrar<DiagnosticsEngine,
- llvm::CrashRecoveryContextReleaseRefCleanup<DiagnosticsEngine> >
+ llvm::CrashRecoveryContextReleaseRefCleanup<DiagnosticsEngine>>
DiagCleanup(Diags.get());
ConfigureDiags(Diags, *AST, CaptureDiagnostics);
@@ -741,7 +791,7 @@ std::unique_ptr<ASTUnit> ASTUnit::LoadFromASTFile(
bool disableValid = false;
if (::getenv("LIBCLANG_DISABLE_PCH_VALIDATION"))
disableValid = true;
- AST->Reader = new ASTReader(PP, AST->Ctx.get(), PCHContainerRdr, { },
+ AST->Reader = new ASTReader(PP, AST->Ctx.get(), PCHContainerRdr, {},
/*isysroot=*/"",
/*DisableValidation=*/disableValid,
AllowPCHWithCompilerErrors);
@@ -794,20 +844,20 @@ std::unique_ptr<ASTUnit> ASTUnit::LoadFromASTFile(
return AST;
}
-namespace {
-
-/// \brief Add the given macro to the hash of all top-level entities.
-void AddDefinedMacroToHash(const Token &MacroNameTok, unsigned &Hash) {
- Hash = llvm::HashString(MacroNameTok.getIdentifierInfo()->getName(), Hash);
+/// Add the given macro to the hash of all top-level entities.
+static void AddDefinedMacroToHash(const Token &MacroNameTok, unsigned &Hash) {
+ Hash = llvm::djbHash(MacroNameTok.getIdentifierInfo()->getName(), Hash);
}
-/// \brief Preprocessor callback class that updates a hash value with the names
+namespace {
+
+/// Preprocessor callback class that updates a hash value with the names
/// of all macros that have been defined by the translation unit.
class MacroDefinitionTrackerPPCallbacks : public PPCallbacks {
unsigned &Hash;
-
+
public:
- explicit MacroDefinitionTrackerPPCallbacks(unsigned &Hash) : Hash(Hash) { }
+ explicit MacroDefinitionTrackerPPCallbacks(unsigned &Hash) : Hash(Hash) {}
void MacroDefined(const Token &MacroNameTok,
const MacroDirective *MD) override {
@@ -815,55 +865,59 @@ public:
}
};
-/// \brief Add the given declaration to the hash of all top-level entities.
-void AddTopLevelDeclarationToHash(Decl *D, unsigned &Hash) {
+} // namespace
+
+/// Add the given declaration to the hash of all top-level entities.
+static void AddTopLevelDeclarationToHash(Decl *D, unsigned &Hash) {
if (!D)
return;
-
+
DeclContext *DC = D->getDeclContext();
if (!DC)
return;
-
+
if (!(DC->isTranslationUnit() || DC->getLookupParent()->isTranslationUnit()))
return;
- if (NamedDecl *ND = dyn_cast<NamedDecl>(D)) {
- if (EnumDecl *EnumD = dyn_cast<EnumDecl>(D)) {
+ if (const auto *ND = dyn_cast<NamedDecl>(D)) {
+ if (const auto *EnumD = dyn_cast<EnumDecl>(D)) {
// For an unscoped enum include the enumerators in the hash since they
// enter the top-level namespace.
if (!EnumD->isScoped()) {
for (const auto *EI : EnumD->enumerators()) {
if (EI->getIdentifier())
- Hash = llvm::HashString(EI->getIdentifier()->getName(), Hash);
+ Hash = llvm::djbHash(EI->getIdentifier()->getName(), Hash);
}
}
}
if (ND->getIdentifier())
- Hash = llvm::HashString(ND->getIdentifier()->getName(), Hash);
+ Hash = llvm::djbHash(ND->getIdentifier()->getName(), Hash);
else if (DeclarationName Name = ND->getDeclName()) {
std::string NameStr = Name.getAsString();
- Hash = llvm::HashString(NameStr, Hash);
+ Hash = llvm::djbHash(NameStr, Hash);
}
return;
}
- if (ImportDecl *ImportD = dyn_cast<ImportDecl>(D)) {
- if (Module *Mod = ImportD->getImportedModule()) {
+ if (const auto *ImportD = dyn_cast<ImportDecl>(D)) {
+ if (const Module *Mod = ImportD->getImportedModule()) {
std::string ModName = Mod->getFullModuleName();
- Hash = llvm::HashString(ModName, Hash);
+ Hash = llvm::djbHash(ModName, Hash);
}
return;
}
}
+namespace {
+
class TopLevelDeclTrackerConsumer : public ASTConsumer {
ASTUnit &Unit;
unsigned &Hash;
-
+
public:
TopLevelDeclTrackerConsumer(ASTUnit &_Unit, unsigned &Hash)
- : Unit(_Unit), Hash(Hash) {
+ : Unit(_Unit), Hash(Hash) {
Hash = 0;
}
@@ -886,14 +940,14 @@ public:
void handleFileLevelDecl(Decl *D) {
Unit.addFileLevelDecl(D);
- if (NamespaceDecl *NSD = dyn_cast<NamespaceDecl>(D)) {
+ if (auto *NSD = dyn_cast<NamespaceDecl>(D)) {
for (auto *I : NSD->decls())
handleFileLevelDecl(I);
}
}
bool HandleTopLevelDecl(DeclGroupRef D) override {
- for (Decl *TopLevelDecl : D)
+ for (auto *TopLevelDecl : D)
handleTopLevelDecl(TopLevelDecl);
return true;
}
@@ -902,7 +956,7 @@ public:
void HandleInterestingDecl(DeclGroupRef) override {}
void HandleTopLevelDeclInObjCContainer(DeclGroupRef D) override {
- for (Decl *TopLevelDecl : D)
+ for (auto *TopLevelDecl : D)
handleTopLevelDecl(TopLevelDecl);
}
@@ -932,8 +986,9 @@ public:
TopLevelDeclTrackerAction(ASTUnit &_Unit) : Unit(_Unit) {}
bool hasCodeCompletionSupport() const override { return false; }
+
TranslationUnitKind getTranslationUnitKind() override {
- return Unit.getTranslationUnitKind();
+ return Unit.getTranslationUnitKind();
}
};
@@ -949,7 +1004,7 @@ public:
void AfterPCHEmitted(ASTWriter &Writer) override {
TopLevelDeclIDs.reserve(TopLevelDecls.size());
- for (Decl *D : TopLevelDecls) {
+ for (const auto *D : TopLevelDecls) {
// Invalid top-level decls may not have been serialized.
if (D->isInvalidDecl())
continue;
@@ -958,7 +1013,7 @@ public:
}
void HandleTopLevelDecl(DeclGroupRef DG) override {
- for (Decl *D : DG) {
+ for (auto *D : DG) {
// FIXME: Currently ObjC method declarations are incorrectly being
// reported as top-level declarations, even though their DeclContext
// is the containing ObjC @interface/@implementation. This is a
@@ -981,7 +1036,7 @@ private:
llvm::SmallVector<ASTUnit::StandaloneDiagnostic, 4> PreambleDiags;
};
-} // anonymous namespace
+} // namespace
static bool isNonDriverDiag(const StoredDiagnostic &StoredDiag) {
return StoredDiag.getLocation().isValid();
@@ -1004,7 +1059,7 @@ static void checkAndSanitizeDiags(SmallVectorImpl<StoredDiagnostic> &
// been careful to make sure that the source manager's state
// before and after are identical, so that we can reuse the source
// location itself.
- for (StoredDiagnostic &SD : StoredDiagnostics) {
+ for (auto &SD : StoredDiagnostics) {
if (SD.getLocation().isValid()) {
FullSourceLoc Loc(SD.getLocation(), SM);
SD.setLocation(Loc);
@@ -1052,11 +1107,11 @@ bool ASTUnit::Parse(std::shared_ptr<PCHContainerOperations> PCHContainerOps,
Clang->setInvocation(CCInvocation);
OriginalSourceFile = Clang->getFrontendOpts().Inputs[0].getFile();
-
+
// Set up diagnostics, capturing any diagnostics that would
// otherwise be dropped.
Clang->setDiagnostics(&getDiagnostics());
-
+
// Create the target instance.
Clang->setTarget(TargetInfo::CreateTargetInfo(
Clang->getDiagnostics(), Clang->getInvocation().TargetOpts));
@@ -1068,7 +1123,7 @@ bool ASTUnit::Parse(std::shared_ptr<PCHContainerOperations> PCHContainerOps,
// FIXME: We shouldn't need to do this, the target should be immutable once
// created. This complexity should be lifted elsewhere.
Clang->getTarget().adjust(Clang->getLangOpts());
-
+
assert(Clang->getFrontendOpts().Inputs.size() == 1 &&
"Invocation must have exactly one source file!");
assert(Clang->getFrontendOpts().Inputs[0].getKind().getFormat() ==
@@ -1097,10 +1152,10 @@ bool ASTUnit::Parse(std::shared_ptr<PCHContainerOperations> PCHContainerOps,
// Create a file manager object to provide access to and cache the filesystem.
Clang->setFileManager(&getFileManager());
-
+
// Create the source manager.
Clang->setSourceManager(&getSourceManager());
-
+
// If the main file has been overridden due to the use of a preamble,
// make that override happen and introduce the preamble.
if (OverrideMainBuffer) {
@@ -1135,7 +1190,7 @@ bool ASTUnit::Parse(std::shared_ptr<PCHContainerOperations> PCHContainerOps,
goto error;
transferASTDataFromCompilerInstance(*Clang);
-
+
Act->EndSourceFile();
FailedParseDiagnostics.clear();
@@ -1192,22 +1247,22 @@ makeStandaloneDiagnostic(const LangOptions &LangOpts,
if (OutDiag.Filename.empty())
return OutDiag;
OutDiag.LocOffset = SM.getFileOffset(FileLoc);
- for (const CharSourceRange &Range : InDiag.getRanges())
+ for (const auto &Range : InDiag.getRanges())
OutDiag.Ranges.push_back(makeStandaloneRange(Range, SM, LangOpts));
- for (const FixItHint &FixIt : InDiag.getFixIts())
+ for (const auto &FixIt : InDiag.getFixIts())
OutDiag.FixIts.push_back(makeStandaloneFixIt(SM, LangOpts, FixIt));
return OutDiag;
}
-/// \brief Attempt to build or re-use a precompiled preamble when (re-)parsing
+/// Attempt to build or re-use a precompiled preamble when (re-)parsing
/// the source file.
///
/// This routine will compute the preamble of the main source file. If a
-/// non-trivial preamble is found, it will precompile that preamble into a
+/// non-trivial preamble is found, it will precompile that preamble into a
/// precompiled header so that the precompiled preamble can be used to reduce
/// reparsing time. If a precompiled preamble has already been constructed,
-/// this routine will determine if it is still valid and, if so, avoid
+/// this routine will determine if it is still valid and, if so, avoid
/// rebuilding the precompiled preamble.
///
/// \param AllowRebuild When true (the default), this routine is
@@ -1223,15 +1278,14 @@ makeStandaloneDiagnostic(const LangOptions &LangOpts,
std::unique_ptr<llvm::MemoryBuffer>
ASTUnit::getMainBufferWithPrecompiledPreamble(
std::shared_ptr<PCHContainerOperations> PCHContainerOps,
- const CompilerInvocation &PreambleInvocationIn,
+ CompilerInvocation &PreambleInvocationIn,
IntrusiveRefCntPtr<vfs::FileSystem> VFS, bool AllowRebuild,
unsigned MaxLines) {
-
auto MainFilePath =
PreambleInvocationIn.getFrontendOpts().Inputs[0].getFile();
std::unique_ptr<llvm::MemoryBuffer> MainFileBuffer =
getBufferForFileHandlingRemapping(PreambleInvocationIn, VFS.get(),
- MainFilePath);
+ MainFilePath, UserFilesAreVolatile);
if (!MainFileBuffer)
return nullptr;
@@ -1259,6 +1313,7 @@ ASTUnit::getMainBufferWithPrecompiledPreamble(
Preamble.reset();
PreambleDiagnostics.clear();
TopLevelDeclsInPreamble.clear();
+ PreambleSrcLocCache.clear();
PreambleRebuildCounter = 1;
}
}
@@ -1290,9 +1345,18 @@ ASTUnit::getMainBufferWithPrecompiledPreamble(
SimpleTimer PreambleTimer(WantTiming);
PreambleTimer.setOutput("Precompiling preamble");
+ const bool PreviousSkipFunctionBodies =
+ PreambleInvocationIn.getFrontendOpts().SkipFunctionBodies;
+ if (SkipFunctionBodies == SkipFunctionBodiesScope::Preamble)
+ PreambleInvocationIn.getFrontendOpts().SkipFunctionBodies = true;
+
llvm::ErrorOr<PrecompiledPreamble> NewPreamble = PrecompiledPreamble::Build(
PreambleInvocationIn, MainFileBuffer.get(), Bounds, *Diagnostics, VFS,
PCHContainerOps, /*StoreInMemory=*/false, Callbacks);
+
+ PreambleInvocationIn.getFrontendOpts().SkipFunctionBodies =
+ PreviousSkipFunctionBodies;
+
if (NewPreamble) {
Preamble = std::move(*NewPreamble);
PreambleRebuildCounter = 1;
@@ -1306,7 +1370,6 @@ ASTUnit::getMainBufferWithPrecompiledPreamble(
case BuildPreambleError::CouldntCreateTargetInfo:
case BuildPreambleError::BeginSourceFileFailed:
case BuildPreambleError::CouldntEmitPCH:
- case BuildPreambleError::CouldntCreateVFSOverlay:
// These erros are more likely to repeat, retry after some period.
PreambleRebuildCounter = DefaultPreambleRebuildInterval;
return nullptr;
@@ -1344,7 +1407,7 @@ void ASTUnit::RealizeTopLevelDeclsFromPreamble() {
std::vector<Decl *> Resolved;
Resolved.reserve(TopLevelDeclsInPreamble.size());
ExternalASTSource &Source = *getASTContext().getExternalSource();
- for (serialization::DeclID TopLevelDecl : TopLevelDeclsInPreamble) {
+ for (const auto TopLevelDecl : TopLevelDeclsInPreamble) {
// Resolve the declaration ID to an actual declaration, possibly
// deserializing the declaration in the process.
if (Decl *D = Source.GetExternalDecl(TopLevelDecl))
@@ -1388,12 +1451,12 @@ StringRef ASTUnit::getMainFileName() const {
return FE->getName();
}
- return StringRef();
+ return {};
}
StringRef ASTUnit::getASTFileName() const {
if (!isMainFileAST())
- return StringRef();
+ return {};
serialization::ModuleFile &
Mod = Reader->getModuleManager().getPrimaryModule();
@@ -1408,8 +1471,6 @@ ASTUnit::create(std::shared_ptr<CompilerInvocation> CI,
ConfigureDiags(Diags, *AST, CaptureDiagnostics);
IntrusiveRefCntPtr<vfs::FileSystem> VFS =
createVFSFromCompilerInvocation(*CI, *Diags);
- if (!VFS)
- return nullptr;
AST->Diagnostics = Diags;
AST->FileSystemOpts = CI->getFileSystemOpts();
AST->Invocation = std::move(CI);
@@ -1442,7 +1503,7 @@ ASTUnit *ASTUnit::LoadFromCompilerInvocationAction(
if (!AST)
return nullptr;
}
-
+
if (!ResourceFilesPath.empty()) {
// Override the resources path.
CI->getHeaderSearchOpts().ResourceDir = ResourceFilesPath;
@@ -1460,7 +1521,7 @@ ASTUnit *ASTUnit::LoadFromCompilerInvocationAction(
llvm::CrashRecoveryContextCleanupRegistrar<ASTUnit>
ASTUnitCleanup(OwnAST.get());
llvm::CrashRecoveryContextCleanupRegistrar<DiagnosticsEngine,
- llvm::CrashRecoveryContextReleaseRefCleanup<DiagnosticsEngine> >
+ llvm::CrashRecoveryContextReleaseRefCleanup<DiagnosticsEngine>>
DiagCleanup(Diags.get());
// We'll manage file buffers ourselves.
@@ -1478,11 +1539,11 @@ ASTUnit *ASTUnit::LoadFromCompilerInvocationAction(
Clang->setInvocation(std::move(CI));
AST->OriginalSourceFile = Clang->getFrontendOpts().Inputs[0].getFile();
-
+
// Set up diagnostics, capturing any diagnostics that would
// otherwise be dropped.
Clang->setDiagnostics(&AST->getDiagnostics());
-
+
// Create the target instance.
Clang->setTarget(TargetInfo::CreateTargetInfo(
Clang->getDiagnostics(), Clang->getInvocation().TargetOpts));
@@ -1494,7 +1555,7 @@ ASTUnit *ASTUnit::LoadFromCompilerInvocationAction(
// FIXME: We shouldn't need to do this, the target should be immutable once
// created. This complexity should be lifted elsewhere.
Clang->getTarget().adjust(Clang->getLangOpts());
-
+
assert(Clang->getFrontendOpts().Inputs.size() == 1 &&
"Invocation must have exactly one source file!");
assert(Clang->getFrontendOpts().Inputs[0].getKind().getFormat() ==
@@ -1512,7 +1573,7 @@ ASTUnit *ASTUnit::LoadFromCompilerInvocationAction(
// Create a file manager object to provide access to and cache the filesystem.
Clang->setFileManager(&AST->getFileManager());
-
+
// Create the source manager.
Clang->setSourceManager(&AST->getSourceManager());
@@ -1558,7 +1619,7 @@ ASTUnit *ASTUnit::LoadFromCompilerInvocationAction(
// Steal the created target, context, and preprocessor.
AST->transferASTDataFromCompilerInstance(*Clang);
-
+
Act->EndSourceFile();
if (OwnAST)
@@ -1623,12 +1684,12 @@ std::unique_ptr<ASTUnit> ASTUnit::LoadFromCompilerInvocation(
AST->FileSystemOpts = FileMgr->getFileSystemOpts();
AST->FileMgr = FileMgr;
AST->UserFilesAreVolatile = UserFilesAreVolatile;
-
+
// Recover resources if we crash before exiting this method.
llvm::CrashRecoveryContextCleanupRegistrar<ASTUnit>
ASTUnitCleanup(AST.get());
llvm::CrashRecoveryContextCleanupRegistrar<DiagnosticsEngine,
- llvm::CrashRecoveryContextReleaseRefCleanup<DiagnosticsEngine> >
+ llvm::CrashRecoveryContextReleaseRefCleanup<DiagnosticsEngine>>
DiagCleanup(Diags.get());
if (AST->LoadFromCompilerInvocation(std::move(PCHContainerOps),
@@ -1646,7 +1707,7 @@ ASTUnit *ASTUnit::LoadFromCommandLine(
ArrayRef<RemappedFile> RemappedFiles, bool RemappedFilesKeepOriginalName,
unsigned PrecompilePreambleAfterNParses, TranslationUnitKind TUKind,
bool CacheCodeCompletionResults, bool IncludeBriefCommentsInCodeCompletion,
- bool AllowPCHWithCompilerErrors, bool SkipFunctionBodies,
+ bool AllowPCHWithCompilerErrors, SkipFunctionBodiesScope SkipFunctionBodies,
bool SingleFileParse, bool UserFilesAreVolatile, bool ForSerialization,
llvm::Optional<StringRef> ModuleFormat, std::unique_ptr<ASTUnit> *ErrAST,
IntrusiveRefCntPtr<vfs::FileSystem> VFS) {
@@ -1657,11 +1718,10 @@ ASTUnit *ASTUnit::LoadFromCommandLine(
std::shared_ptr<CompilerInvocation> CI;
{
-
CaptureDroppedDiagnostics Capture(CaptureDiagnostics, *Diags,
&StoredDiagnostics, nullptr);
- CI = clang::createInvocationFromCommandLine(
+ CI = createInvocationFromCommandLine(
llvm::makeArrayRef(ArgBegin, ArgEnd), Diags, VFS);
if (!CI)
return nullptr;
@@ -1676,11 +1736,12 @@ ASTUnit *ASTUnit::LoadFromCommandLine(
PPOpts.RemappedFilesKeepOriginalName = RemappedFilesKeepOriginalName;
PPOpts.AllowPCHWithCompilerErrors = AllowPCHWithCompilerErrors;
PPOpts.SingleFileParseMode = SingleFileParse;
-
+
// Override the resources path.
CI->getHeaderSearchOpts().ResourceDir = ResourceFilesPath;
- CI->getFrontendOpts().SkipFunctionBodies = SkipFunctionBodies;
+ CI->getFrontendOpts().SkipFunctionBodies =
+ SkipFunctionBodies == SkipFunctionBodiesScope::PreambleAndMainFile;
if (ModuleFormat)
CI->getHeaderSearchOpts().ModuleFormat = ModuleFormat.getValue();
@@ -1688,14 +1749,14 @@ ASTUnit *ASTUnit::LoadFromCommandLine(
// Create the AST unit.
std::unique_ptr<ASTUnit> AST;
AST.reset(new ASTUnit(false));
+ AST->NumStoredDiagnosticsFromDriver = StoredDiagnostics.size();
+ AST->StoredDiagnostics.swap(StoredDiagnostics);
ConfigureDiags(Diags, *AST, CaptureDiagnostics);
AST->Diagnostics = Diags;
AST->FileSystemOpts = CI->getFileSystemOpts();
if (!VFS)
VFS = vfs::getRealFileSystem();
VFS = createVFSFromCompilerInvocation(*CI, *Diags, VFS);
- if (!VFS)
- return nullptr;
AST->FileMgr = new FileManager(AST->FileSystemOpts, VFS);
AST->PCMCache = new MemoryBufferCache;
AST->OnlyLocalDecls = OnlyLocalDecls;
@@ -1705,9 +1766,8 @@ ASTUnit *ASTUnit::LoadFromCommandLine(
AST->IncludeBriefCommentsInCodeCompletion
= IncludeBriefCommentsInCodeCompletion;
AST->UserFilesAreVolatile = UserFilesAreVolatile;
- AST->NumStoredDiagnosticsFromDriver = StoredDiagnostics.size();
- AST->StoredDiagnostics.swap(StoredDiagnostics);
AST->Invocation = CI;
+ AST->SkipFunctionBodies = SkipFunctionBodies;
if (ForSerialization)
AST->WriterData.reset(new ASTWriterData(*AST->PCMCache));
// Zero out now to ease cleanup during crash recovery.
@@ -1745,7 +1805,7 @@ bool ASTUnit::Reparse(std::shared_ptr<PCHContainerOperations> PCHContainerOps,
}
clearFileLevelDecls();
-
+
SimpleTimer ParsingTimer(WantTiming);
ParsingTimer.setOutput("Reparsing " + getMainFileName());
@@ -1767,7 +1827,6 @@ bool ASTUnit::Reparse(std::shared_ptr<PCHContainerOperations> PCHContainerOps,
OverrideMainBuffer =
getMainBufferWithPrecompiledPreamble(PCHContainerOps, *Invocation, VFS);
-
// Clear out the diagnostics state.
FileMgr.reset();
getDiagnostics().Reset();
@@ -1779,7 +1838,7 @@ bool ASTUnit::Reparse(std::shared_ptr<PCHContainerOperations> PCHContainerOps,
bool Result =
Parse(std::move(PCHContainerOps), std::move(OverrideMainBuffer), VFS);
- // If we're caching global code-completion results, and the top-level
+ // If we're caching global code-completion results, and the top-level
// declarations have changed, clear out the code-completion cache.
if (!Result && ShouldCacheCodeCompletionResults &&
CurrentTopLevelHashValue != CompletionCacheTopLevelHashValue)
@@ -1788,7 +1847,7 @@ bool ASTUnit::Reparse(std::shared_ptr<PCHContainerOperations> PCHContainerOps,
// We now need to clear out the completion info related to this translation
// unit; it'll be recreated if necessary.
CCTUInfo.reset();
-
+
return Result;
}
@@ -1810,23 +1869,23 @@ void ASTUnit::ResetForParse() {
//----------------------------------------------------------------------------//
namespace {
- /// \brief Code completion consumer that combines the cached code-completion
+
+ /// Code completion consumer that combines the cached code-completion
/// results from an ASTUnit with the code-completion results provided to it,
- /// then passes the result on to
+ /// then passes the result on to
class AugmentedCodeCompleteConsumer : public CodeCompleteConsumer {
uint64_t NormalContexts;
ASTUnit &AST;
CodeCompleteConsumer &Next;
-
+
public:
AugmentedCodeCompleteConsumer(ASTUnit &AST, CodeCompleteConsumer &Next,
const CodeCompleteOptions &CodeCompleteOpts)
- : CodeCompleteConsumer(CodeCompleteOpts, Next.isOutputBinary()),
- AST(AST), Next(Next)
- {
+ : CodeCompleteConsumer(CodeCompleteOpts, Next.isOutputBinary()),
+ AST(AST), Next(Next) {
// Compute the set of contexts in which we will look when we don't have
// any information about the specific context.
- NormalContexts
+ NormalContexts
= (1LL << CodeCompletionContext::CCC_TopLevel)
| (1LL << CodeCompletionContext::CCC_ObjCInterface)
| (1LL << CodeCompletionContext::CCC_ObjCImplementation)
@@ -1865,9 +1924,10 @@ namespace {
return Next.getCodeCompletionTUInfo();
}
};
-} // anonymous namespace
-/// \brief Helper function that computes which global names are hidden by the
+} // namespace
+
+/// Helper function that computes which global names are hidden by the
/// local code-completion results.
static void CalculateHiddenNames(const CodeCompletionContext &Context,
CodeCompletionResult *Results,
@@ -1895,13 +1955,13 @@ static void CalculateHiddenNames(const CodeCompletionContext &Context,
case CodeCompletionContext::CCC_ParenthesizedExpression:
case CodeCompletionContext::CCC_ObjCInterfaceName:
break;
-
+
case CodeCompletionContext::CCC_EnumTag:
case CodeCompletionContext::CCC_UnionTag:
case CodeCompletionContext::CCC_ClassOrStructTag:
OnlyTagNames = true;
break;
-
+
case CodeCompletionContext::CCC_ObjCProtocolName:
case CodeCompletionContext::CCC_MacroName:
case CodeCompletionContext::CCC_MacroNameUse:
@@ -1919,12 +1979,12 @@ static void CalculateHiddenNames(const CodeCompletionContext &Context,
// be hidden.
return;
}
-
- typedef CodeCompletionResult Result;
+
+ using Result = CodeCompletionResult;
for (unsigned I = 0; I != NumResults; ++I) {
if (Results[I].Kind != Result::RK_Declaration)
continue;
-
+
unsigned IDNS
= Results[I].Declaration->getUnderlyingDecl()->getIdentifierNamespace();
@@ -1932,17 +1992,17 @@ static void CalculateHiddenNames(const CodeCompletionContext &Context,
if (OnlyTagNames)
Hiding = (IDNS & Decl::IDNS_Tag);
else {
- unsigned HiddenIDNS = (Decl::IDNS_Type | Decl::IDNS_Member |
+ unsigned HiddenIDNS = (Decl::IDNS_Type | Decl::IDNS_Member |
Decl::IDNS_Namespace | Decl::IDNS_Ordinary |
Decl::IDNS_NonMemberOperator);
if (Ctx.getLangOpts().CPlusPlus)
HiddenIDNS |= Decl::IDNS_Tag;
Hiding = (IDNS & HiddenIDNS);
}
-
+
if (!Hiding)
continue;
-
+
DeclarationName Name = Results[I].Declaration->getDeclName();
if (IdentifierInfo *Identifier = Name.getAsIdentifierInfo())
HiddenNames.insert(Identifier->getName());
@@ -1954,7 +2014,7 @@ static void CalculateHiddenNames(const CodeCompletionContext &Context,
void AugmentedCodeCompleteConsumer::ProcessCodeCompleteResults(Sema &S,
CodeCompletionContext Context,
CodeCompletionResult *Results,
- unsigned NumResults) {
+ unsigned NumResults) {
// Merge the results we were given with the results we cached.
bool AddedResult = false;
uint64_t InContexts =
@@ -1962,31 +2022,31 @@ void AugmentedCodeCompleteConsumer::ProcessCodeCompleteResults(Sema &S,
? NormalContexts : (1LL << Context.getKind());
// Contains the set of names that are hidden by "local" completion results.
llvm::StringSet<llvm::BumpPtrAllocator> HiddenNames;
- typedef CodeCompletionResult Result;
+ using Result = CodeCompletionResult;
SmallVector<Result, 8> AllResults;
- for (ASTUnit::cached_completion_iterator
+ for (ASTUnit::cached_completion_iterator
C = AST.cached_completion_begin(),
CEnd = AST.cached_completion_end();
C != CEnd; ++C) {
- // If the context we are in matches any of the contexts we are
+ // If the context we are in matches any of the contexts we are
// interested in, we'll add this result.
if ((C->ShowInContexts & InContexts) == 0)
continue;
-
+
// If we haven't added any results previously, do so now.
if (!AddedResult) {
- CalculateHiddenNames(Context, Results, NumResults, S.Context,
+ CalculateHiddenNames(Context, Results, NumResults, S.Context,
HiddenNames);
AllResults.insert(AllResults.end(), Results, Results + NumResults);
AddedResult = true;
}
-
+
// Determine whether this global completion result is hidden by a local
// completion result. If so, skip it.
if (C->Kind != CXCursor_MacroDefinition &&
HiddenNames.count(C->Completion->getTypedText()))
continue;
-
+
// Adjust priority based on similar type classes.
unsigned Priority = C->Priority;
CodeCompletionString *Completion = C->Completion;
@@ -1994,7 +2054,7 @@ void AugmentedCodeCompleteConsumer::ProcessCodeCompleteResults(Sema &S,
if (C->Kind == CXCursor_MacroDefinition) {
Priority = getMacroUsagePriority(C->Completion->getTypedText(),
S.getLangOpts(),
- Context.getPreferredType()->isAnyPointerType());
+ Context.getPreferredType()->isAnyPointerType());
} else if (C->Type) {
CanQualType Expected
= S.Context.getCanonicalType(
@@ -2013,7 +2073,7 @@ void AugmentedCodeCompleteConsumer::ProcessCodeCompleteResults(Sema &S,
}
}
}
-
+
// Adjust the completion string, if required.
if (C->Kind == CXCursor_MacroDefinition &&
Context.getKind() == CodeCompletionContext::CCC_MacroNameUse) {
@@ -2025,18 +2085,18 @@ void AugmentedCodeCompleteConsumer::ProcessCodeCompleteResults(Sema &S,
Priority = CCP_CodePattern;
Completion = Builder.TakeString();
}
-
+
AllResults.push_back(Result(Completion, Priority, C->Kind,
C->Availability));
}
-
+
// If we did not add any cached completion results, just forward the
// results we were given to the next consumer.
if (!AddedResult) {
Next.ProcessCodeCompleteResults(S, Context, Results, NumResults);
return;
}
-
+
Next.ProcessCodeCompleteResults(S, Context, AllResults.data(),
AllResults.size());
}
@@ -2068,6 +2128,8 @@ void ASTUnit::CodeComplete(
CodeCompleteOpts.IncludeCodePatterns = IncludeCodePatterns;
CodeCompleteOpts.IncludeGlobals = CachedCompletionResults.empty();
CodeCompleteOpts.IncludeBriefComments = IncludeBriefComments;
+ CodeCompleteOpts.LoadExternal = Consumer.loadExternal();
+ CodeCompleteOpts.IncludeFixIts = Consumer.includeFixIts();
assert(IncludeBriefComments == this->IncludeBriefCommentsInCodeCompletion);
@@ -2092,11 +2154,11 @@ void ASTUnit::CodeComplete(
auto &Inv = *CCInvocation;
Clang->setInvocation(std::move(CCInvocation));
OriginalSourceFile = Clang->getFrontendOpts().Inputs[0].getFile();
-
+
// Set up diagnostics, capturing any diagnostics produced.
Clang->setDiagnostics(&Diag);
- CaptureDroppedDiagnostics Capture(true,
- Clang->getDiagnostics(),
+ CaptureDroppedDiagnostics Capture(true,
+ Clang->getDiagnostics(),
&StoredDiagnostics, nullptr);
ProcessWarningOptions(Diag, Inv.getDiagnosticOpts());
@@ -2107,13 +2169,13 @@ void ASTUnit::CodeComplete(
Clang->setInvocation(nullptr);
return;
}
-
+
// Inform the target of the language options.
//
// FIXME: We shouldn't need to do this, the target should be immutable once
// created. This complexity should be lifted elsewhere.
Clang->getTarget().adjust(Clang->getLangOpts());
-
+
assert(Clang->getFrontendOpts().Inputs.size() == 1 &&
"Invocation must have exactly one source file!");
assert(Clang->getFrontendOpts().Inputs[0].getKind().getFormat() ==
@@ -2122,7 +2184,7 @@ void ASTUnit::CodeComplete(
assert(Clang->getFrontendOpts().Inputs[0].getKind().getLanguage() !=
InputKind::LLVM_IR &&
"IR inputs not support here!");
-
+
// Use the source and file managers that we were given.
Clang->setFileManager(&FileMgr);
Clang->setSourceManager(&SourceMgr);
@@ -2209,7 +2271,7 @@ bool ASTUnit::Save(StringRef File) {
if (llvm::sys::fs::createUniqueFile(TempPath, fd, TempPath))
return true;
- // FIXME: Can we somehow regenerate the stat cache here, or do we need to
+ // FIXME: Can we somehow regenerate the stat cache here, or do we need to
// unconditionally create a stat cache when we parse the file?
llvm::raw_fd_ostream Out(fd, /*shouldClose=*/true);
@@ -2257,7 +2319,7 @@ bool ASTUnit::serialize(raw_ostream &OS) {
return serializeUnit(Writer, Buffer, getSema(), hasErrors, OS);
}
-typedef ContinuousRangeMap<unsigned, int, 2> SLocRemap;
+using SLocRemap = ContinuousRangeMap<unsigned, int, 2>;
void ASTUnit::TranslateStoredDiagnostics(
FileManager &FileMgr,
@@ -2271,7 +2333,7 @@ void ASTUnit::TranslateStoredDiagnostics(
SmallVector<StoredDiagnostic, 4> Result;
Result.reserve(Diags.size());
- for (const StandaloneDiagnostic &SD : Diags) {
+ for (const auto &SD : Diags) {
// Rebuild the StoredDiagnostic.
if (SD.Filename.empty())
continue;
@@ -2303,7 +2365,7 @@ void ASTUnit::TranslateStoredDiagnostics(
SmallVector<FixItHint, 2> FixIts;
FixIts.reserve(SD.FixIts.size());
- for (const StandaloneFixIt &FixIt : SD.FixIts) {
+ for (const auto &FixIt : SD.FixIts) {
FixIts.push_back(FixItHint());
FixItHint &FH = FixIts.back();
FH.CodeToInsert = FixIt.CodeToInsert;
@@ -2312,7 +2374,7 @@ void ASTUnit::TranslateStoredDiagnostics(
FH.RemoveRange = CharSourceRange::getCharRange(BL, EL);
}
- Result.push_back(StoredDiagnostic(SD.Level, SD.ID,
+ Result.push_back(StoredDiagnostic(SD.Level, SD.ID,
SD.Message, Loc, Ranges, FixIts));
}
Result.swap(Out);
@@ -2320,7 +2382,7 @@ void ASTUnit::TranslateStoredDiagnostics(
void ASTUnit::addFileLevelDecl(Decl *D) {
assert(D);
-
+
// We only care about local declarations.
if (D->isFromASTFile())
return;
@@ -2397,7 +2459,7 @@ void ASTUnit::findFileRegionDecls(FileID File, unsigned Offset, unsigned Length,
std::make_pair(Offset + Length, (Decl *)nullptr), llvm::less_first());
if (EndIt != LocDecls.end())
++EndIt;
-
+
for (LocDeclsTy::iterator DIt = BeginIt; DIt != EndIt; ++DIt)
Decls.push_back(DIt->second);
}
@@ -2416,7 +2478,7 @@ SourceLocation ASTUnit::getLocation(const FileEntry *File,
return SM.getMacroArgExpandedLocation(FileLoc.getLocWithOffset(Offset));
}
-/// \brief If \arg Loc is a loaded location from the preamble, returns
+/// If \arg Loc is a loaded location from the preamble, returns
/// the corresponding local location of the main file, otherwise it returns
/// \arg Loc.
SourceLocation ASTUnit::mapLocationFromPreamble(SourceLocation Loc) const {
@@ -2437,7 +2499,7 @@ SourceLocation ASTUnit::mapLocationFromPreamble(SourceLocation Loc) const {
return Loc;
}
-/// \brief If \arg Loc is a local location of the main file but inside the
+/// If \arg Loc is a local location of the main file but inside the
/// preamble chunk, returns the corresponding loaded location from the
/// preamble, otherwise it returns \arg Loc.
SourceLocation ASTUnit::mapLocationToPreamble(SourceLocation Loc) const {
@@ -2462,10 +2524,10 @@ bool ASTUnit::isInPreambleFileID(SourceLocation Loc) const {
FileID FID;
if (SourceMgr)
FID = SourceMgr->getPreambleFileID();
-
+
if (Loc.isInvalid() || FID.isInvalid())
return false;
-
+
return SourceMgr->isInFileID(Loc, FID);
}
@@ -2473,10 +2535,10 @@ bool ASTUnit::isInMainFileID(SourceLocation Loc) const {
FileID FID;
if (SourceMgr)
FID = SourceMgr->getMainFileID();
-
+
if (Loc.isInvalid() || FID.isInvalid())
return false;
-
+
return SourceMgr->isInFileID(Loc, FID);
}
@@ -2484,9 +2546,9 @@ SourceLocation ASTUnit::getEndOfPreambleFileID() const {
FileID FID;
if (SourceMgr)
FID = SourceMgr->getPreambleFileID();
-
+
if (FID.isInvalid())
- return SourceLocation();
+ return {};
return SourceMgr->getLocForEndOfFile(FID);
}
@@ -2495,10 +2557,10 @@ SourceLocation ASTUnit::getStartOfMainFileID() const {
FileID FID;
if (SourceMgr)
FID = SourceMgr->getMainFileID();
-
+
if (FID.isInvalid())
- return SourceLocation();
-
+ return {};
+
return SourceMgr->getLocForStartOfFile(FID);
}
@@ -2521,7 +2583,7 @@ bool ASTUnit::visitLocalTopLevelDecls(void *context, DeclVisitorFn Fn) {
if (isMainFileAST()) {
serialization::ModuleFile &
Mod = Reader->getModuleManager().getPrimaryModule();
- for (const Decl *D : Reader->getModuleFileLevelDecls(Mod)) {
+ for (const auto *D : Reader->getModuleFileLevelDecls(Mod)) {
if (!Fn(context, D))
return false;
}
diff --git a/lib/Frontend/CMakeLists.txt b/lib/Frontend/CMakeLists.txt
index ba3bd7d28c70..6161b46a9dc5 100644
--- a/lib/Frontend/CMakeLists.txt
+++ b/lib/Frontend/CMakeLists.txt
@@ -29,6 +29,7 @@ add_clang_library(clangFrontend
FrontendAction.cpp
FrontendActions.cpp
FrontendOptions.cpp
+ FrontendTiming.cpp
HeaderIncludeGen.cpp
InitHeaderSearch.cpp
InitPreprocessor.cpp
diff --git a/lib/Frontend/CacheTokens.cpp b/lib/Frontend/CacheTokens.cpp
index 72e8f68dc051..c4504a14456d 100644
--- a/lib/Frontend/CacheTokens.cpp
+++ b/lib/Frontend/CacheTokens.cpp
@@ -21,8 +21,8 @@
#include "clang/Lex/Lexer.h"
#include "clang/Lex/PTHManager.h"
#include "clang/Lex/Preprocessor.h"
-#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringMap.h"
+#include "llvm/Support/DJB.h"
#include "llvm/Support/EndianStream.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/MemoryBuffer.h"
@@ -88,7 +88,7 @@ public:
void EmitData(raw_ostream& Out) {
using namespace llvm::support;
- endian::Writer<little> LE(Out);
+ endian::Writer LE(Out, little);
switch (Kind) {
case IsFE: {
// Emit stat information.
@@ -128,14 +128,14 @@ public:
typedef unsigned offset_type;
static hash_value_type ComputeHash(PTHEntryKeyVariant V) {
- return llvm::HashString(V.getString());
+ return llvm::djbHash(V.getString());
}
static std::pair<unsigned,unsigned>
EmitKeyDataLength(raw_ostream& Out, PTHEntryKeyVariant V,
const PTHEntry& E) {
using namespace llvm::support;
- endian::Writer<little> LE(Out);
+ endian::Writer LE(Out, little);
unsigned n = V.getString().size() + 1 + 1;
LE.write<uint16_t>(n);
@@ -149,7 +149,7 @@ public:
static void EmitKey(raw_ostream& Out, PTHEntryKeyVariant V, unsigned n){
using namespace llvm::support;
// Emit the entry kind.
- endian::Writer<little>(Out).write<uint8_t>((unsigned)V.getKind());
+ Out << char(V.getKind());
// Emit the string.
Out.write(V.getString().data(), n - 1);
}
@@ -157,7 +157,7 @@ public:
static void EmitData(raw_ostream& Out, PTHEntryKeyVariant V,
const PTHEntry& E, unsigned) {
using namespace llvm::support;
- endian::Writer<little> LE(Out);
+ endian::Writer LE(Out, little);
// For file entries emit the offsets into the PTH file for token data
// and the preprocessor blocks table.
@@ -205,18 +205,17 @@ class PTHWriter {
void EmitToken(const Token& T);
void Emit8(uint32_t V) {
- using namespace llvm::support;
- endian::Writer<little>(Out).write<uint8_t>(V);
+ Out << char(V);
}
void Emit16(uint32_t V) {
using namespace llvm::support;
- endian::Writer<little>(Out).write<uint16_t>(V);
+ endian::write<uint16_t>(Out, V, little);
}
void Emit32(uint32_t V) {
using namespace llvm::support;
- endian::Writer<little>(Out).write<uint32_t>(V);
+ endian::write<uint32_t>(Out, V, little);
}
void EmitBuf(const char *Ptr, unsigned NumBytes) {
@@ -225,7 +224,7 @@ class PTHWriter {
void EmitString(StringRef V) {
using namespace llvm::support;
- endian::Writer<little>(Out).write<uint16_t>(V.size());
+ endian::write<uint16_t>(Out, V.size(), little);
EmitBuf(V.data(), V.size());
}
@@ -299,7 +298,7 @@ PTHEntry PTHWriter::LexTokens(Lexer& L) {
// Pad 0's so that we emit tokens to a 4-byte alignment.
// This speed up reading them back in.
using namespace llvm::support;
- endian::Writer<little> LE(Out);
+ endian::Writer LE(Out, little);
uint32_t TokenOff = Out.tell();
for (uint64_t N = llvm::OffsetToAlignment(TokenOff, 4); N; --N, ++TokenOff)
LE.write<uint8_t>(0);
@@ -625,14 +624,14 @@ public:
typedef unsigned offset_type;
static hash_value_type ComputeHash(PTHIdKey* key) {
- return llvm::HashString(key->II->getName());
+ return llvm::djbHash(key->II->getName());
}
static std::pair<unsigned,unsigned>
EmitKeyDataLength(raw_ostream& Out, const PTHIdKey* key, uint32_t) {
using namespace llvm::support;
unsigned n = key->II->getLength() + 1;
- endian::Writer<little>(Out).write<uint16_t>(n);
+ endian::write<uint16_t>(Out, n, little);
return std::make_pair(n, sizeof(uint32_t));
}
@@ -646,7 +645,7 @@ public:
static void EmitData(raw_ostream& Out, PTHIdKey*, uint32_t pID,
unsigned) {
using namespace llvm::support;
- endian::Writer<little>(Out).write<uint32_t>(pID);
+ endian::write<uint32_t>(Out, pID, little);
}
};
} // end anonymous namespace
@@ -662,7 +661,8 @@ std::pair<Offset,Offset> PTHWriter::EmitIdentifierTable() {
// (2) a map from (IdentifierInfo*, Offset)* -> persistent IDs
// Note that we use 'calloc', so all the bytes are 0.
- PTHIdKey *IIDMap = (PTHIdKey*)calloc(idcount, sizeof(PTHIdKey));
+ PTHIdKey *IIDMap = static_cast<PTHIdKey*>(
+ llvm::safe_calloc(idcount, sizeof(PTHIdKey)));
// Create the hashtable.
llvm::OnDiskChainedHashTableGenerator<PTHIdentifierTableTrait> IIOffMap;
diff --git a/lib/Frontend/CodeGenOptions.cpp b/lib/Frontend/CodeGenOptions.cpp
index 50bb9f951be4..84a39f2d570d 100644
--- a/lib/Frontend/CodeGenOptions.cpp
+++ b/lib/Frontend/CodeGenOptions.cpp
@@ -17,7 +17,7 @@ CodeGenOptions::CodeGenOptions() {
#define ENUM_CODEGENOPT(Name, Type, Bits, Default) set##Name(Default);
#include "clang/Frontend/CodeGenOptions.def"
- RelocationModel = "pic";
+ RelocationModel = llvm::Reloc::PIC_;
memcpy(CoverageVersion, "402*", 4);
}
diff --git a/lib/Frontend/CompilerInstance.cpp b/lib/Frontend/CompilerInstance.cpp
index 7208177aa012..155ead4ac8e8 100644
--- a/lib/Frontend/CompilerInstance.cpp
+++ b/lib/Frontend/CompilerInstance.cpp
@@ -16,6 +16,7 @@
#include "clang/Basic/FileManager.h"
#include "clang/Basic/MemoryBufferCache.h"
#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/Stack.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/Version.h"
#include "clang/Config/config.h"
@@ -302,11 +303,9 @@ CompilerInstance::createDiagnostics(DiagnosticOptions *Opts,
FileManager *CompilerInstance::createFileManager() {
if (!hasVirtualFileSystem()) {
- if (IntrusiveRefCntPtr<vfs::FileSystem> VFS =
- createVFSFromCompilerInvocation(getInvocation(), getDiagnostics()))
- setVirtualFileSystem(VFS);
- else
- return nullptr;
+ IntrusiveRefCntPtr<vfs::FileSystem> VFS =
+ createVFSFromCompilerInvocation(getInvocation(), getDiagnostics());
+ setVirtualFileSystem(VFS);
}
FileMgr = new FileManager(getFileSystemOpts(), VirtualFileSystem);
return FileMgr.get();
@@ -464,7 +463,7 @@ void CompilerInstance::createPreprocessor(TranslationUnitKind TUKind) {
/*ShowDepth=*/false);
}
- if (DepOpts.PrintShowIncludes) {
+ if (DepOpts.ShowIncludesDest != ShowIncludesDestination::None) {
AttachHeaderIncludeGen(*PP, DepOpts,
/*ShowAllHeaders=*/true, /*OutputPath=*/"",
/*ShowDepth=*/true, /*MSStyle=*/true);
@@ -854,36 +853,7 @@ bool CompilerInstance::InitializeSourceManager(
// Figure out where to get and map in the main file.
if (InputFile != "-") {
- const FileEntry *File;
- if (Opts.FindPchSource.empty()) {
- File = FileMgr.getFile(InputFile, /*OpenFile=*/true);
- } else {
- // When building a pch file in clang-cl mode, the .h file is built as if
- // it was included by a cc file. Since the driver doesn't know about
- // all include search directories, the frontend must search the input
- // file through HeaderSearch here, as if it had been included by the
- // cc file at Opts.FindPchSource.
- const FileEntry *FindFile = FileMgr.getFile(Opts.FindPchSource);
- if (!FindFile) {
- Diags.Report(diag::err_fe_error_reading) << Opts.FindPchSource;
- return false;
- }
- const DirectoryLookup *UnusedCurDir;
- SmallVector<std::pair<const FileEntry *, const DirectoryEntry *>, 16>
- Includers;
- Includers.push_back(std::make_pair(FindFile, FindFile->getDir()));
- File = HS->LookupFile(InputFile, SourceLocation(), /*isAngled=*/false,
- /*FromDir=*/nullptr,
- /*CurDir=*/UnusedCurDir, Includers,
- /*SearchPath=*/nullptr,
- /*RelativePath=*/nullptr,
- /*RequestingModule=*/nullptr,
- /*SuggestedModule=*/nullptr, /*IsMapped=*/nullptr,
- /*SkipCache=*/true);
- // Also add the header to /showIncludes output.
- if (File)
- DepOpts.ShowIncludesPretendHeader = File->getName();
- }
+ const FileEntry *File = FileMgr.getFile(InputFile, /*OpenFile=*/true);
if (!File) {
Diags.Report(diag::err_fe_error_reading) << InputFile;
return false;
@@ -1044,7 +1014,7 @@ bool CompilerInstance::ExecuteAction(FrontendAction &Act) {
return !getDiagnostics().getClient()->getNumErrors();
}
-/// \brief Determine the appropriate source input kind based on language
+/// Determine the appropriate source input kind based on language
/// options.
static InputKind::Language getLanguageFromOptions(const LangOptions &LangOpts) {
if (LangOpts.OpenCL)
@@ -1056,7 +1026,7 @@ static InputKind::Language getLanguageFromOptions(const LangOptions &LangOpts) {
return LangOpts.CPlusPlus ? InputKind::CXX : InputKind::C;
}
-/// \brief Compile a module file for the given module, using the options
+/// Compile a module file for the given module, using the options
/// provided by the importing compiler instance. Returns true if the module
/// was built without errors.
static bool
@@ -1090,6 +1060,10 @@ compileModuleImpl(CompilerInstance &ImportingInstance, SourceLocation ImportLoc,
}),
PPOpts.Macros.end());
+ // If the original compiler invocation had -fmodule-name, pass it through.
+ Invocation->getLangOpts()->ModuleName =
+ ImportingInstance.getInvocation().getLangOpts()->ModuleName;
+
// Note the name of the module we're building.
Invocation->getLangOpts()->CurrentModule = ModuleName;
@@ -1162,14 +1136,13 @@ compileModuleImpl(CompilerInstance &ImportingInstance, SourceLocation ImportLoc,
// Execute the action to actually build the module in-place. Use a separate
// thread so that we get a stack large enough.
- const unsigned ThreadStackSize = 8 << 20;
llvm::CrashRecoveryContext CRC;
CRC.RunSafelyOnThread(
[&]() {
GenerateModuleFromModuleMapAction Action;
Instance.ExecuteAction(Action);
},
- ThreadStackSize);
+ DesiredStackSize);
PostBuildStep(Instance);
@@ -1186,7 +1159,20 @@ compileModuleImpl(CompilerInstance &ImportingInstance, SourceLocation ImportLoc,
return !Instance.getDiagnostics().hasErrorOccurred();
}
-/// \brief Compile a module file for the given module, using the options
+static const FileEntry *getPublicModuleMap(const FileEntry *File,
+ FileManager &FileMgr) {
+ StringRef Filename = llvm::sys::path::filename(File->getName());
+ SmallString<128> PublicFilename(File->getDir()->getName());
+ if (Filename == "module_private.map")
+ llvm::sys::path::append(PublicFilename, "module.map");
+ else if (Filename == "module.private.modulemap")
+ llvm::sys::path::append(PublicFilename, "module.modulemap");
+ else
+ return nullptr;
+ return FileMgr.getFile(PublicFilename);
+}
+
+/// Compile a module file for the given module, using the options
/// provided by the importing compiler instance. Returns true if the module
/// was built without errors.
static bool compileModuleImpl(CompilerInstance &ImportingInstance,
@@ -1202,6 +1188,13 @@ static bool compileModuleImpl(CompilerInstance &ImportingInstance,
bool Result;
if (const FileEntry *ModuleMapFile =
ModMap.getContainingModuleMapFile(Module)) {
+ // Canonicalize compilation to start with the public module map. This is
+ // vital for submodules declarations in the private module maps to be
+ // correctly parsed when depending on a top level module in the public one.
+ if (const FileEntry *PublicMMFile = getPublicModuleMap(
+ ModuleMapFile, ImportingInstance.getFileManager()))
+ ModuleMapFile = PublicMMFile;
+
// Use the module map where this module resides.
Result = compileModuleImpl(
ImportingInstance, ImportLoc, Module->getTopLevelModuleName(),
@@ -1298,7 +1291,7 @@ static bool compileAndLoadModule(CompilerInstance &ImportingInstance,
// case of timeout, build it ourselves.
Diags.Report(ModuleNameLoc, diag::remark_module_lock_timeout)
<< Module->Name;
- // Clear the lock file so that future invokations can make progress.
+ // Clear the lock file so that future invocations can make progress.
Locked.unsafeRemoveLockFile();
continue;
}
@@ -1328,7 +1321,7 @@ static bool compileAndLoadModule(CompilerInstance &ImportingInstance,
}
}
-/// \brief Diagnose differences between the current definition of the given
+/// Diagnose differences between the current definition of the given
/// configuration macro and the definition provided on the command line.
static void checkConfigMacro(Preprocessor &PP, StringRef ConfigMacro,
Module *Mod, SourceLocation ImportLoc) {
@@ -1386,13 +1379,13 @@ static void checkConfigMacro(Preprocessor &PP, StringRef ConfigMacro,
}
}
-/// \brief Write a new timestamp file with the given path.
+/// Write a new timestamp file with the given path.
static void writeTimestampFile(StringRef TimestampFile) {
std::error_code EC;
llvm::raw_fd_ostream Out(TimestampFile.str(), EC, llvm::sys::fs::F_None);
}
-/// \brief Prune the module cache of modules that haven't been accessed in
+/// Prune the module cache of modules that haven't been accessed in
/// a long time.
static void pruneModuleCache(const HeaderSearchOptions &HSOpts) {
struct stat StatBuf;
@@ -1580,15 +1573,22 @@ bool CompilerInstance::loadModuleFile(StringRef FileName) {
if (!ModuleManager)
createModuleManager();
+ // If -Wmodule-file-config-mismatch is mapped as an error or worse, allow the
+ // ASTReader to diagnose it, since it can produce better errors that we can.
+ bool ConfigMismatchIsRecoverable =
+ getDiagnostics().getDiagnosticLevel(diag::warn_module_config_mismatch,
+ SourceLocation())
+ <= DiagnosticsEngine::Warning;
+
auto Listener = llvm::make_unique<ReadModuleNames>(*this);
auto &ListenerRef = *Listener;
ASTReader::ListenerScope ReadModuleNamesListener(*ModuleManager,
std::move(Listener));
// Try to load the module file.
- switch (ModuleManager->ReadAST(FileName, serialization::MK_ExplicitModule,
- SourceLocation(),
- ASTReader::ARR_ConfigurationMismatch)) {
+ switch (ModuleManager->ReadAST(
+ FileName, serialization::MK_ExplicitModule, SourceLocation(),
+ ConfigMismatchIsRecoverable ? ASTReader::ARR_ConfigurationMismatch : 0)) {
case ASTReader::Success:
// We successfully loaded the module file; remember the set of provided
// modules so that we don't try to load implicit modules for them.
@@ -1653,8 +1653,10 @@ CompilerInstance::loadModule(SourceLocation ImportLoc,
// Retrieve the cached top-level module.
Module = Known->second;
} else if (ModuleName == getLangOpts().CurrentModule) {
- // This is the module we're building.
- Module = PP->getHeaderSearchInfo().lookupModule(ModuleName);
+ // This is the module we're building.
+ Module = PP->getHeaderSearchInfo().lookupModule(
+ ModuleName, /*AllowSearch*/ true,
+ /*AllowExtraModuleMapSearch*/ !IsInclusionDirective);
/// FIXME: perhaps we should (a) look for a module using the module name
// to file map (PrebuiltModuleFiles) and (b) diagnose if still not found?
//if (Module == nullptr) {
@@ -1666,7 +1668,8 @@ CompilerInstance::loadModule(SourceLocation ImportLoc,
Known = KnownModules.insert(std::make_pair(Path[0].first, Module)).first;
} else {
// Search for a module with the given name.
- Module = PP->getHeaderSearchInfo().lookupModule(ModuleName);
+ Module = PP->getHeaderSearchInfo().lookupModule(ModuleName, true,
+ !IsInclusionDirective);
HeaderSearchOptions &HSOpts =
PP->getHeaderSearchInfo().getHeaderSearchOpts();
@@ -1743,7 +1746,8 @@ CompilerInstance::loadModule(SourceLocation ImportLoc,
ImportLoc, ARRFlags)) {
case ASTReader::Success: {
if (Source != ModuleCache && !Module) {
- Module = PP->getHeaderSearchInfo().lookupModule(ModuleName);
+ Module = PP->getHeaderSearchInfo().lookupModule(ModuleName, true,
+ !IsInclusionDirective);
if (!Module || !Module->getASTFile() ||
FileMgr->getFile(ModuleFileName) != Module->getASTFile()) {
// Error out if Module does not refer to the file in the prebuilt
@@ -1859,6 +1863,40 @@ CompilerInstance::loadModule(SourceLocation ImportLoc,
for (unsigned I = 1, N = Path.size(); I != N; ++I) {
StringRef Name = Path[I].first->getName();
clang::Module *Sub = Module->findSubmodule(Name);
+
+ // If the user is requesting Foo.Private and it doesn't exist, try to
+ // match Foo_Private and emit a warning asking for the user to write
+ // @import Foo_Private instead. FIXME: remove this when existing clients
+ // migrate off of Foo.Private syntax.
+ if (!Sub && PP->getLangOpts().ImplicitModules && Name == "Private" &&
+ Module == Module->getTopLevelModule()) {
+ SmallString<128> PrivateModule(Module->Name);
+ PrivateModule.append("_Private");
+
+ SmallVector<std::pair<IdentifierInfo *, SourceLocation>, 2> PrivPath;
+ auto &II = PP->getIdentifierTable().get(
+ PrivateModule, PP->getIdentifierInfo(Module->Name)->getTokenID());
+ PrivPath.push_back(std::make_pair(&II, Path[0].second));
+
+ if (PP->getHeaderSearchInfo().lookupModule(PrivateModule, true,
+ !IsInclusionDirective))
+ Sub =
+ loadModule(ImportLoc, PrivPath, Visibility, IsInclusionDirective);
+ if (Sub) {
+ MapPrivateSubModToTopLevel = true;
+ if (!getDiagnostics().isIgnored(
+ diag::warn_no_priv_submodule_use_toplevel, ImportLoc)) {
+ getDiagnostics().Report(Path[I].second,
+ diag::warn_no_priv_submodule_use_toplevel)
+ << Path[I].first << Module->getFullModuleName() << PrivateModule
+ << SourceRange(Path[0].second, Path[I].second)
+ << FixItHint::CreateReplacement(SourceRange(Path[0].second),
+ PrivateModule);
+ getDiagnostics().Report(Sub->DefinitionLoc,
+ diag::note_private_top_level_defined);
+ }
+ }
+ }
if (!Sub) {
// Attempt to perform typo correction to find a module name that works.
@@ -1894,39 +1932,6 @@ CompilerInstance::loadModule(SourceLocation ImportLoc,
}
}
- // If the user is requesting Foo.Private and it doesn't exist, try to
- // match Foo_Private and emit a warning asking for the user to write
- // @import Foo_Private instead. FIXME: remove this when existing clients
- // migrate off of Foo.Private syntax.
- if (!Sub && PP->getLangOpts().ImplicitModules && Name == "Private" &&
- Module == Module->getTopLevelModule()) {
- SmallString<128> PrivateModule(Module->Name);
- PrivateModule.append("_Private");
-
- SmallVector<std::pair<IdentifierInfo *, SourceLocation>, 2> PrivPath;
- auto &II = PP->getIdentifierTable().get(
- PrivateModule, PP->getIdentifierInfo(Module->Name)->getTokenID());
- PrivPath.push_back(std::make_pair(&II, Path[0].second));
-
- if (PP->getHeaderSearchInfo().lookupModule(PrivateModule))
- Sub =
- loadModule(ImportLoc, PrivPath, Visibility, IsInclusionDirective);
- if (Sub) {
- MapPrivateSubModToTopLevel = true;
- if (!getDiagnostics().isIgnored(
- diag::warn_no_priv_submodule_use_toplevel, ImportLoc)) {
- getDiagnostics().Report(Path[I].second,
- diag::warn_no_priv_submodule_use_toplevel)
- << Path[I].first << Module->getFullModuleName() << PrivateModule
- << SourceRange(Path[0].second, Path[I].second)
- << FixItHint::CreateReplacement(SourceRange(Path[0].second),
- PrivateModule);
- getDiagnostics().Report(Sub->DefinitionLoc,
- diag::note_private_top_level_defined);
- }
- }
- }
-
if (!Sub) {
// No submodule by this name. Complain, and don't look for further
// submodules.
@@ -1977,6 +1982,12 @@ CompilerInstance::loadModule(SourceLocation ImportLoc,
Module, ImportLoc);
}
+ // Resolve any remaining module using export_as for this one.
+ getPreprocessor()
+ .getHeaderSearchInfo()
+ .getModuleMap()
+ .resolveLinkAsDependencies(TopModule);
+
LastModuleImportLoc = ImportLoc;
LastModuleImportResult = ModuleLoadResult(Module);
return LastModuleImportResult;
diff --git a/lib/Frontend/CompilerInvocation.cpp b/lib/Frontend/CompilerInvocation.cpp
index 2e8a737de4e4..78e6babd0251 100644
--- a/lib/Frontend/CompilerInvocation.cpp
+++ b/lib/Frontend/CompilerInvocation.cpp
@@ -1,4 +1,4 @@
-//===--- CompilerInvocation.cpp -------------------------------------------===//
+//===- CompilerInvocation.cpp ---------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -10,54 +10,99 @@
#include "clang/Frontend/CompilerInvocation.h"
#include "TestModuleFileExtension.h"
#include "clang/Basic/Builtins.h"
-#include "clang/Basic/FileManager.h"
+#include "clang/Basic/CharInfo.h"
+#include "clang/Basic/CommentOptions.h"
+#include "clang/Basic/DebugInfoOptions.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/DiagnosticOptions.h"
+#include "clang/Basic/FileSystemOptions.h"
+#include "clang/Basic/LLVM.h"
+#include "clang/Basic/LangOptions.h"
+#include "clang/Basic/ObjCRuntime.h"
+#include "clang/Basic/Sanitizers.h"
+#include "clang/Basic/SourceLocation.h"
+#include "clang/Basic/TargetOptions.h"
#include "clang/Basic/Version.h"
+#include "clang/Basic/VirtualFileSystem.h"
+#include "clang/Basic/Visibility.h"
+#include "clang/Basic/XRayInstr.h"
#include "clang/Config/config.h"
#include "clang/Driver/DriverDiagnostic.h"
#include "clang/Driver/Options.h"
-#include "clang/Driver/Util.h"
+#include "clang/Frontend/CodeGenOptions.h"
+#include "clang/Frontend/CommandLineSourceLoc.h"
+#include "clang/Frontend/DependencyOutputOptions.h"
#include "clang/Frontend/FrontendDiagnostic.h"
+#include "clang/Frontend/FrontendOptions.h"
#include "clang/Frontend/LangStandard.h"
+#include "clang/Frontend/MigratorOptions.h"
+#include "clang/Frontend/PreprocessorOutputOptions.h"
#include "clang/Frontend/Utils.h"
#include "clang/Lex/HeaderSearchOptions.h"
#include "clang/Lex/PreprocessorOptions.h"
-#include "clang/Serialization/ASTReader.h"
+#include "clang/Sema/CodeCompleteOptions.h"
#include "clang/Serialization/ModuleFileExtension.h"
+#include "clang/StaticAnalyzer/Core/AnalyzerOptions.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/CachedHashString.h"
#include "llvm/ADT/Hashing.h"
-#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/Triple.h"
+#include "llvm/ADT/Twine.h"
#include "llvm/Linker/Linker.h"
+#include "llvm/MC/MCTargetOptions.h"
#include "llvm/Option/Arg.h"
#include "llvm/Option/ArgList.h"
+#include "llvm/Option/OptSpecifier.h"
#include "llvm/Option/OptTable.h"
#include "llvm/Option/Option.h"
#include "llvm/ProfileData/InstrProfReader.h"
#include "llvm/Support/CodeGen.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Error.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/ErrorOr.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Host.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/Process.h"
+#include "llvm/Support/Regex.h"
+#include "llvm/Support/VersionTuple.h"
+#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetOptions.h"
-#include "llvm/Support/ScopedPrinter.h"
+#include <algorithm>
#include <atomic>
+#include <cassert>
+#include <cstddef>
+#include <cstring>
#include <memory>
-#include <sys/stat.h>
-#include <system_error>
+#include <string>
+#include <tuple>
+#include <utility>
+#include <vector>
+
using namespace clang;
+using namespace driver;
+using namespace options;
+using namespace llvm::opt;
//===----------------------------------------------------------------------===//
// Initialization.
//===----------------------------------------------------------------------===//
CompilerInvocationBase::CompilerInvocationBase()
- : LangOpts(new LangOptions()), TargetOpts(new TargetOptions()),
- DiagnosticOpts(new DiagnosticOptions()),
- HeaderSearchOpts(new HeaderSearchOptions()),
- PreprocessorOpts(new PreprocessorOptions()) {}
+ : LangOpts(new LangOptions()), TargetOpts(new TargetOptions()),
+ DiagnosticOpts(new DiagnosticOptions()),
+ HeaderSearchOpts(new HeaderSearchOptions()),
+ PreprocessorOpts(new PreprocessorOptions()) {}
CompilerInvocationBase::CompilerInvocationBase(const CompilerInvocationBase &X)
: LangOpts(new LangOptions(*X.getLangOpts())),
@@ -66,18 +111,12 @@ CompilerInvocationBase::CompilerInvocationBase(const CompilerInvocationBase &X)
HeaderSearchOpts(new HeaderSearchOptions(X.getHeaderSearchOpts())),
PreprocessorOpts(new PreprocessorOptions(X.getPreprocessorOpts())) {}
-CompilerInvocationBase::~CompilerInvocationBase() {}
+CompilerInvocationBase::~CompilerInvocationBase() = default;
//===----------------------------------------------------------------------===//
// Deserialization (from args)
//===----------------------------------------------------------------------===//
-using namespace clang::driver;
-using namespace clang::driver::options;
-using namespace llvm::opt;
-
-//
-
static unsigned getOptimizationLevel(ArgList &Args, InputKind IK,
DiagnosticsEngine &Diags) {
unsigned DefaultOpt = 0;
@@ -91,7 +130,7 @@ static unsigned getOptimizationLevel(ArgList &Args, InputKind IK,
if (A->getOption().matches(options::OPT_Ofast))
return 3;
- assert (A->getOption().matches(options::OPT_O));
+ assert(A->getOption().matches(options::OPT_O));
StringRef S(A->getValue());
if (S == "s" || S == "z" || S.empty())
@@ -125,7 +164,7 @@ static unsigned getOptimizationLevelSize(ArgList &Args) {
static void addDiagnosticArgs(ArgList &Args, OptSpecifier Group,
OptSpecifier GroupWithValue,
std::vector<std::string> &Diagnostics) {
- for (Arg *A : Args.filtered(Group)) {
+ for (auto *A : Args.filtered(Group)) {
if (A->getOption().getKind() == Option::FlagClass) {
// The argument is a pure flag (such as OPT_Wall or OPT_Wdeprecated). Add
// its name (minus the "W" or "R" at the beginning) to the warning list.
@@ -135,7 +174,7 @@ static void addDiagnosticArgs(ArgList &Args, OptSpecifier Group,
Diagnostics.push_back(A->getOption().getName().drop_front(1).rtrim("=-"));
} else {
// Otherwise, add its value (for OPT_W_Joined and similar).
- for (const char *Arg : A->getValues())
+ for (const auto *Arg : A->getValues())
Diagnostics.emplace_back(Arg);
}
}
@@ -157,7 +196,6 @@ static void getAllNoBuiltinFuncValues(ArgList &Args,
static bool ParseAnalyzerArgs(AnalyzerOptions &Opts, ArgList &Args,
DiagnosticsEngine &Diags) {
- using namespace options;
bool Success = true;
if (Arg *A = Args.getLastArg(OPT_analyzer_store)) {
StringRef Name = A->getValue();
@@ -273,31 +311,31 @@ static bool ParseAnalyzerArgs(AnalyzerOptions &Opts, ArgList &Args,
StringRef checkerList = A->getValue();
SmallVector<StringRef, 4> checkers;
checkerList.split(checkers, ",");
- for (StringRef checker : checkers)
+ for (auto checker : checkers)
Opts.CheckersControlList.emplace_back(checker, enable);
}
// Go through the analyzer configuration options.
- for (const Arg *A : Args.filtered(OPT_analyzer_config)) {
+ for (const auto *A : Args.filtered(OPT_analyzer_config)) {
A->claim();
// We can have a list of comma separated config names, e.g:
// '-analyzer-config key1=val1,key2=val2'
StringRef configList = A->getValue();
SmallVector<StringRef, 4> configVals;
configList.split(configVals, ",");
- for (unsigned i = 0, e = configVals.size(); i != e; ++i) {
+ for (const auto &configVal : configVals) {
StringRef key, val;
- std::tie(key, val) = configVals[i].split("=");
+ std::tie(key, val) = configVal.split("=");
if (val.empty()) {
Diags.Report(SourceLocation(),
- diag::err_analyzer_config_no_value) << configVals[i];
+ diag::err_analyzer_config_no_value) << configVal;
Success = false;
break;
}
if (val.find('=') != StringRef::npos) {
Diags.Report(SourceLocation(),
diag::err_analyzer_config_multiple_values)
- << configVals[i];
+ << configVal;
Success = false;
break;
}
@@ -305,6 +343,14 @@ static bool ParseAnalyzerArgs(AnalyzerOptions &Opts, ArgList &Args,
}
}
+ llvm::raw_string_ostream os(Opts.FullCompilerInvocation);
+ for (unsigned i = 0; i < Args.getNumInputArgStrings(); ++i) {
+ if (i != 0)
+ os << " ";
+ os << Args.getArgString(i);
+ }
+ os.flush();
+
return Success;
}
@@ -330,18 +376,26 @@ static StringRef getCodeModel(ArgList &Args, DiagnosticsEngine &Diags) {
return "default";
}
-static StringRef getRelocModel(ArgList &Args, DiagnosticsEngine &Diags) {
+static llvm::Reloc::Model getRelocModel(ArgList &Args,
+ DiagnosticsEngine &Diags) {
if (Arg *A = Args.getLastArg(OPT_mrelocation_model)) {
StringRef Value = A->getValue();
- if (Value == "static" || Value == "pic" || Value == "ropi" ||
- Value == "rwpi" || Value == "ropi-rwpi" || Value == "dynamic-no-pic")
- return Value;
+ auto RM = llvm::StringSwitch<llvm::Optional<llvm::Reloc::Model>>(Value)
+ .Case("static", llvm::Reloc::Static)
+ .Case("pic", llvm::Reloc::PIC_)
+ .Case("ropi", llvm::Reloc::ROPI)
+ .Case("rwpi", llvm::Reloc::RWPI)
+ .Case("ropi-rwpi", llvm::Reloc::ROPI_RWPI)
+ .Case("dynamic-no-pic", llvm::Reloc::DynamicNoPIC)
+ .Default(None);
+ if (RM.hasValue())
+ return *RM;
Diags.Report(diag::err_drv_invalid_value) << A->getAsString(Args) << Value;
}
- return "pic";
+ return llvm::Reloc::PIC_;
}
-/// \brief Create a new Regex instance out of the string value in \p RpassArg.
+/// Create a new Regex instance out of the string value in \p RpassArg.
/// It returns a pointer to the newly generated Regex instance.
static std::shared_ptr<llvm::Regex>
GenerateOptimizationRemarkRegex(DiagnosticsEngine &Diags, ArgList &Args,
@@ -392,6 +446,25 @@ static void parseSanitizerKinds(StringRef FlagName,
}
}
+static void parseXRayInstrumentationBundle(StringRef FlagName, StringRef Bundle,
+ ArgList &Args, DiagnosticsEngine &D,
+ XRayInstrSet &S) {
+ llvm::SmallVector<StringRef, 2> BundleParts;
+ llvm::SplitString(Bundle, BundleParts, ",");
+ for (const auto B : BundleParts) {
+ auto Mask = parseXRayInstrValue(B);
+ if (Mask == XRayInstrKind::None)
+ if (B != "none")
+ D.Report(diag::err_drv_invalid_value) << FlagName << Bundle;
+ else
+ S.Mask = Mask;
+ else if (Mask == XRayInstrKind::All)
+ S.Mask = Mask;
+ else
+ S.set(Mask, true);
+ }
+}
+
// Set the profile kind for fprofile-instrument.
static void setPGOInstrumentor(CodeGenOptions &Opts, ArgList &Args,
DiagnosticsEngine &Diags) {
@@ -409,8 +482,7 @@ static void setPGOInstrumentor(CodeGenOptions &Opts, ArgList &Args,
<< S;
return;
}
- CodeGenOptions::ProfileInstrKind Instrumentor =
- static_cast<CodeGenOptions::ProfileInstrKind>(I);
+ auto Instrumentor = static_cast<CodeGenOptions::ProfileInstrKind>(I);
Opts.setProfileInstr(Instrumentor);
}
@@ -434,8 +506,8 @@ static void setPGOUseInstrumentor(CodeGenOptions &Opts,
static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
DiagnosticsEngine &Diags,
- const TargetOptions &TargetOpts) {
- using namespace options;
+ const TargetOptions &TargetOpts,
+ const FrontendOptions &FrontendOpts) {
bool Success = true;
llvm::Triple Triple = llvm::Triple(TargetOpts.Triple);
@@ -474,7 +546,7 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.ExperimentalNewPassManager = Args.hasFlag(
OPT_fexperimental_new_pass_manager, OPT_fno_experimental_new_pass_manager,
- /* Default */ false);
+ /* Default */ ENABLE_EXPERIMENTAL_NEW_PASS_MANAGER);
Opts.DebugPassManager =
Args.hasFlag(OPT_fdebug_pass_manager, OPT_fno_debug_pass_manager,
@@ -529,6 +601,7 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.DebugTypeExtRefs = Args.hasArg(OPT_dwarf_ext_refs);
Opts.DebugExplicitImport = Args.hasArg(OPT_dwarf_explicit_import);
Opts.DebugFwdTemplateParams = Args.hasArg(OPT_debug_forward_template_params);
+ Opts.EmbedSource = Args.hasArg(OPT_gembed_source);
for (const auto &Arg : Args.getAllArgValues(OPT_fdebug_prefix_map_EQ))
Opts.DebugPrefixMap.insert(StringRef(Arg).split('='));
@@ -552,7 +625,7 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Args.hasFlag(OPT_ffine_grained_bitfield_accesses,
OPT_fno_fine_grained_bitfield_accesses, false);
Opts.DwarfDebugFlags = Args.getLastArgValue(OPT_dwarf_debug_flags);
- Opts.MergeAllConstants = !Args.hasArg(OPT_fno_merge_all_constants);
+ Opts.MergeAllConstants = Args.hasArg(OPT_fmerge_all_constants);
Opts.NoCommon = Args.hasArg(OPT_fno_common);
Opts.NoImplicitFloat = Args.hasArg(OPT_no_implicit_float);
Opts.OptimizeSize = getOptimizationLevelSize(Args);
@@ -580,33 +653,6 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
if (!Opts.ProfileInstrumentUsePath.empty())
setPGOUseInstrumentor(Opts, Opts.ProfileInstrumentUsePath);
- if (Arg *A = Args.getLastArg(OPT_fclang_abi_compat_EQ)) {
- Opts.setClangABICompat(CodeGenOptions::ClangABI::Latest);
-
- StringRef Ver = A->getValue();
- std::pair<StringRef, StringRef> VerParts = Ver.split('.');
- unsigned Major, Minor = 0;
-
- // Check the version number is valid: either 3.x (0 <= x <= 9) or
- // y or y.0 (4 <= y <= current version).
- if (!VerParts.first.startswith("0") &&
- !VerParts.first.getAsInteger(10, Major) &&
- 3 <= Major && Major <= CLANG_VERSION_MAJOR &&
- (Major == 3 ? VerParts.second.size() == 1 &&
- !VerParts.second.getAsInteger(10, Minor)
- : VerParts.first.size() == Ver.size() ||
- VerParts.second == "0")) {
- // Got a valid version number.
- if (Major == 3 && Minor <= 8)
- Opts.setClangABICompat(CodeGenOptions::ClangABI::Ver3_8);
- else if (Major <= 4)
- Opts.setClangABICompat(CodeGenOptions::ClangABI::Ver4);
- } else if (Ver != "latest") {
- Diags.Report(diag::err_drv_invalid_value)
- << A->getAsString(Args) << A->getValue();
- }
- }
-
Opts.CoverageMapping =
Args.hasFlag(OPT_fcoverage_mapping, OPT_fno_coverage_mapping, false);
Opts.DumpCoverageMapping = Args.hasArg(OPT_dump_coverage_mapping);
@@ -615,6 +661,8 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.AssumeSaneOperatorNew = !Args.hasArg(OPT_fno_assume_sane_operator_new);
Opts.ObjCAutoRefCountExceptions = Args.hasArg(OPT_fobjc_arc_exceptions);
Opts.CXAAtExit = !Args.hasArg(OPT_fno_use_cxa_atexit);
+ Opts.RegisterGlobalDtorsWithAtExit =
+ Args.hasArg(OPT_fregister_global_dtors_with_atexit);
Opts.CXXCtorDtorAliases = Args.hasArg(OPT_mconstructor_aliases);
Opts.CodeModel = getCodeModel(Args, Diags);
Opts.DebugPass = Args.getLastArgValue(OPT_mdebug_pass);
@@ -623,6 +671,8 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.DisableFree = Args.hasArg(OPT_disable_free);
Opts.DiscardValueNames = Args.hasArg(OPT_discard_value_names);
Opts.DisableTailCalls = Args.hasArg(OPT_mdisable_tail_calls);
+ Opts.NoEscapingBlockTailCalls =
+ Args.hasArg(OPT_fno_escaping_block_tail_calls);
Opts.FloatABI = Args.getLastArgValue(OPT_mfloat_abi);
Opts.LessPreciseFPMAD = Args.hasArg(OPT_cl_mad_enable) ||
Args.hasArg(OPT_cl_unsafe_math_optimizations) ||
@@ -640,14 +690,20 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Args.hasArg(OPT_cl_unsafe_math_optimizations) ||
Args.hasArg(OPT_cl_fast_relaxed_math));
Opts.Reassociate = Args.hasArg(OPT_mreassociate);
- Opts.FlushDenorm = Args.hasArg(OPT_cl_denorms_are_zero);
+ Opts.FlushDenorm = Args.hasArg(OPT_cl_denorms_are_zero) ||
+ (Args.hasArg(OPT_fcuda_is_device) &&
+ Args.hasArg(OPT_fcuda_flush_denormals_to_zero));
Opts.CorrectlyRoundedDivSqrt =
Args.hasArg(OPT_cl_fp32_correctly_rounded_divide_sqrt);
+ Opts.UniformWGSize =
+ Args.hasArg(OPT_cl_uniform_work_group_size);
Opts.Reciprocals = Args.getAllArgValues(OPT_mrecip_EQ);
Opts.ReciprocalMath = Args.hasArg(OPT_freciprocal_math);
Opts.NoTrappingMath = Args.hasArg(OPT_fno_trapping_math);
+ Opts.StrictFloatCastOverflow =
+ !Args.hasArg(OPT_fno_strict_float_cast_overflow);
+
Opts.NoZeroInitializedInBSS = Args.hasArg(OPT_mno_zero_initialized_in_bss);
- Opts.BackendOptions = Args.getAllArgValues(OPT_backend_option);
Opts.NumRegisterParameters = getLastArgIntValue(Args, OPT_mregparm, 0, Diags);
Opts.NoExecStack = Args.hasArg(OPT_mno_exec_stack);
Opts.FatalWarnings = Args.hasArg(OPT_massembler_fatal_warnings);
@@ -665,6 +721,7 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.StrictEnums = Args.hasArg(OPT_fstrict_enums);
Opts.StrictReturn = !Args.hasArg(OPT_fno_strict_return);
Opts.StrictVTablePointers = Args.hasArg(OPT_fstrict_vtable_pointers);
+ Opts.ForceEmitVTables = Args.hasArg(OPT_fforce_emit_vtables);
Opts.UnsafeFPMath = Args.hasArg(OPT_menable_unsafe_fp_math) ||
Args.hasArg(OPT_cl_unsafe_math_optimizations) ||
Args.hasArg(OPT_cl_fast_relaxed_math);
@@ -682,6 +739,8 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
OPT_fno_function_sections, false);
Opts.DataSections = Args.hasFlag(OPT_fdata_sections,
OPT_fno_data_sections, false);
+ Opts.StackSizeSection =
+ Args.hasFlag(OPT_fstack_size_section, OPT_fno_stack_size_section, false);
Opts.UniqueSectionNames = Args.hasFlag(OPT_funique_section_names,
OPT_fno_unique_section_names, true);
@@ -689,14 +748,16 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.NoUseJumpTables = Args.hasArg(OPT_fno_jump_tables);
+ Opts.NullPointerIsValid = Args.hasArg(OPT_fno_delete_null_pointer_checks);
+
Opts.ProfileSampleAccurate = Args.hasArg(OPT_fprofile_sample_accurate);
Opts.PrepareForLTO = Args.hasArg(OPT_flto, OPT_flto_EQ);
- Opts.EmitSummaryIndex = false;
+ Opts.PrepareForThinLTO = false;
if (Arg *A = Args.getLastArg(OPT_flto_EQ)) {
StringRef S = A->getValue();
if (S == "thin")
- Opts.EmitSummaryIndex = true;
+ Opts.PrepareForThinLTO = true;
else if (S != "full")
Diags.Report(diag::err_drv_invalid_value) << A->getAsString(Args) << S;
}
@@ -707,6 +768,12 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
<< A->getAsString(Args) << "-x ir";
Opts.ThinLTOIndexFile = Args.getLastArgValue(OPT_fthinlto_index_EQ);
}
+ if (Arg *A = Args.getLastArg(OPT_save_temps_EQ))
+ Opts.SaveTempsFilePrefix =
+ llvm::StringSwitch<std::string>(A->getValue())
+ .Case("obj", FrontendOpts.OutputFile)
+ .Default(llvm::sys::path::filename(FrontendOpts.OutputFile).str());
+
Opts.ThinLinkBitcodeFile = Args.getLastArgValue(OPT_fthin_link_bitcode_EQ);
Opts.MSVolatile = Args.hasArg(OPT_fms_volatile);
@@ -719,6 +786,8 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.MainFileName = Args.getLastArgValue(OPT_main_file_name);
Opts.VerifyModule = !Args.hasArg(OPT_disable_llvm_verifier);
+ Opts.ControlFlowGuard = Args.hasArg(OPT_cfguard);
+
Opts.DisableGCov = Args.hasArg(OPT_test_coverage);
Opts.EmitGcovArcs = Args.hasArg(OPT_femit_coverage_data);
Opts.EmitGcovNotes = Args.hasArg(OPT_femit_coverage_notes);
@@ -741,7 +810,7 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
}
}
}
- // Handle -fembed-bitcode option.
+ // Handle -fembed-bitcode option.
if (Arg *A = Args.getLastArg(OPT_fembed_bitcode_EQ)) {
StringRef Name = A->getValue();
unsigned Model = llvm::StringSwitch<unsigned>(Name)
@@ -787,15 +856,44 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Args.hasArg(OPT_finstrument_functions_after_inlining);
Opts.InstrumentFunctionEntryBare =
Args.hasArg(OPT_finstrument_function_entry_bare);
- Opts.XRayInstrumentFunctions = Args.hasArg(OPT_fxray_instrument);
+
+ Opts.XRayInstrumentFunctions =
+ Args.hasArg(OPT_fxray_instrument);
Opts.XRayAlwaysEmitCustomEvents =
Args.hasArg(OPT_fxray_always_emit_customevents);
+ Opts.XRayAlwaysEmitTypedEvents =
+ Args.hasArg(OPT_fxray_always_emit_typedevents);
Opts.XRayInstructionThreshold =
getLastArgIntValue(Args, OPT_fxray_instruction_threshold_EQ, 200, Diags);
+
+ auto XRayInstrBundles =
+ Args.getAllArgValues(OPT_fxray_instrumentation_bundle);
+ if (XRayInstrBundles.empty())
+ Opts.XRayInstrumentationBundle.Mask = XRayInstrKind::All;
+ else
+ for (const auto &A : XRayInstrBundles)
+ parseXRayInstrumentationBundle("-fxray-instrumentation-bundle=", A, Args,
+ Diags, Opts.XRayInstrumentationBundle);
+
Opts.InstrumentForProfiling = Args.hasArg(OPT_pg);
Opts.CallFEntry = Args.hasArg(OPT_mfentry);
Opts.EmitOpenCLArgMetadata = Args.hasArg(OPT_cl_kernel_arg_info);
+ if (const Arg *A = Args.getLastArg(OPT_fcf_protection_EQ)) {
+ StringRef Name = A->getValue();
+ if (Name == "full") {
+ Opts.CFProtectionReturn = 1;
+ Opts.CFProtectionBranch = 1;
+ } else if (Name == "return")
+ Opts.CFProtectionReturn = 1;
+ else if (Name == "branch")
+ Opts.CFProtectionBranch = 1;
+ else if (Name != "none") {
+ Diags.Report(diag::err_drv_invalid_value) << A->getAsString(Args) << Name;
+ Success = false;
+ }
+ }
+
if (const Arg *A = Args.getLastArg(OPT_compress_debug_sections,
OPT_compress_debug_sections_EQ)) {
if (A->getOption().getID() == OPT_compress_debug_sections) {
@@ -813,7 +911,8 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.RelaxELFRelocations = Args.hasArg(OPT_mrelax_relocations);
Opts.DebugCompilationDir = Args.getLastArgValue(OPT_fdebug_compilation_dir);
- for (auto A : Args.filtered(OPT_mlink_bitcode_file, OPT_mlink_cuda_bitcode)) {
+ for (auto *A :
+ Args.filtered(OPT_mlink_bitcode_file, OPT_mlink_cuda_bitcode)) {
CodeGenOptions::BitcodeFileToLink F;
F.Filename = A->getValue();
if (A->getOption().matches(OPT_mlink_cuda_bitcode)) {
@@ -855,6 +954,13 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.SanitizeCfiICallGeneralizePointers =
Args.hasArg(OPT_fsanitize_cfi_icall_generalize_pointers);
Opts.SanitizeStats = Args.hasArg(OPT_fsanitize_stats);
+ if (Arg *A = Args.getLastArg(
+ OPT_fsanitize_address_poison_class_member_array_new_cookie,
+ OPT_fno_sanitize_address_poison_class_member_array_new_cookie)) {
+ Opts.SanitizeAddressPoisonClassMemberArrayNewCookie =
+ A->getOption().getID() ==
+ OPT_fsanitize_address_poison_class_member_array_new_cookie;
+ }
if (Arg *A = Args.getLastArg(OPT_fsanitize_address_use_after_scope,
OPT_fno_sanitize_address_use_after_scope)) {
Opts.SanitizeAddressUseAfterScope =
@@ -879,6 +985,8 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.StackProbeSize = StackProbeSize;
}
+ Opts.NoStackArgProbe = Args.hasArg(OPT_mno_stack_arg_probe);
+
if (Arg *A = Args.getLastArg(OPT_fobjc_dispatch_method_EQ)) {
StringRef Name = A->getValue();
unsigned Method = llvm::StringSwitch<unsigned>(Name)
@@ -895,8 +1003,12 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
}
}
- Opts.EmulatedTLS =
- Args.hasFlag(OPT_femulated_tls, OPT_fno_emulated_tls, false);
+ if (Args.getLastArg(OPT_femulated_tls) ||
+ Args.getLastArg(OPT_fno_emulated_tls)) {
+ Opts.ExplicitEmulatedTLS = true;
+ Opts.EmulatedTLS =
+ Args.hasFlag(OPT_femulated_tls, OPT_fno_emulated_tls, false);
+ }
if (Arg *A = Args.getLastArg(OPT_ftlsmodel_EQ)) {
StringRef Name = A->getValue();
@@ -967,7 +1079,9 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
bool UsingProfile = UsingSampleProfile ||
(Opts.getProfileUse() != CodeGenOptions::ProfileNone);
- if (Opts.DiagnosticsWithHotness && !UsingProfile)
+ if (Opts.DiagnosticsWithHotness && !UsingProfile &&
+ // An IR file will contain PGO as metadata
+ IK.getLanguage() != InputKind::LLVM_IR)
Diags.Report(diag::warn_drv_diagnostics_hotness_requires_pgo)
<< "-fdiagnostics-show-hotness";
@@ -999,20 +1113,23 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Args.getAllArgValues(OPT_fsanitize_trap_EQ), Diags,
Opts.SanitizeTrap);
- Opts.CudaGpuBinaryFileNames =
- Args.getAllArgValues(OPT_fcuda_include_gpubinary);
+ Opts.CudaGpuBinaryFileName =
+ Args.getLastArgValue(OPT_fcuda_include_gpubinary);
Opts.Backchain = Args.hasArg(OPT_mbackchain);
Opts.EmitCheckPathComponentsToStrip = getLastArgIntValue(
Args, OPT_fsanitize_undefined_strip_path_components_EQ, 0, Diags);
+ Opts.EmitVersionIdentMetadata = Args.hasFlag(OPT_Qy, OPT_Qn, true);
+
+ Opts.Addrsig = Args.hasArg(OPT_faddrsig);
+
return Success;
}
static void ParseDependencyOutputArgs(DependencyOutputOptions &Opts,
ArgList &Args) {
- using namespace options;
Opts.OutputFile = Args.getLastArgValue(OPT_dependency_file);
Opts.Targets = Args.getAllArgValues(OPT_MT);
Opts.IncludeSystemHeaders = Args.hasArg(OPT_sys_header_deps);
@@ -1021,7 +1138,17 @@ static void ParseDependencyOutputArgs(DependencyOutputOptions &Opts,
Opts.ShowHeaderIncludes = Args.hasArg(OPT_H);
Opts.HeaderIncludeOutputFile = Args.getLastArgValue(OPT_header_include_file);
Opts.AddMissingHeaderDeps = Args.hasArg(OPT_MG);
- Opts.PrintShowIncludes = Args.hasArg(OPT_show_includes);
+ if (Args.hasArg(OPT_show_includes)) {
+ // Writing both /showIncludes and preprocessor output to stdout
+ // would produce interleaved output, so use stderr for /showIncludes.
+ // This behaves the same as cl.exe, when /E, /EP or /P are passed.
+ if (Args.hasArg(options::OPT_E) || Args.hasArg(options::OPT_P))
+ Opts.ShowIncludesDest = ShowIncludesDestination::Stderr;
+ else
+ Opts.ShowIncludesDest = ShowIncludesDestination::Stdout;
+ } else {
+ Opts.ShowIncludesDest = ShowIncludesDestination::None;
+ }
Opts.DOTOutputFile = Args.getLastArgValue(OPT_dependency_dot);
Opts.ModuleDependencyOutputDir =
Args.getLastArgValue(OPT_module_dependency_dir);
@@ -1032,7 +1159,7 @@ static void ParseDependencyOutputArgs(DependencyOutputOptions &Opts,
// we let make / ninja to know about this implicit dependency.
Opts.ExtraDeps = Args.getAllArgValues(OPT_fdepfile_entry);
// Only the -fmodule-file=<file> form.
- for (const Arg *A : Args.filtered(OPT_fmodule_file)) {
+ for (const auto *A : Args.filtered(OPT_fmodule_file)) {
StringRef Val = A->getValue();
if (Val.find('=') == StringRef::npos)
Opts.ExtraDeps.push_back(Val);
@@ -1049,7 +1176,7 @@ static bool parseShowColorsArgs(const ArgList &Args, bool DefaultColor) {
Colors_Off,
Colors_Auto
} ShowColors = DefaultColor ? Colors_Auto : Colors_Off;
- for (Arg *A : Args) {
+ for (auto *A : Args) {
const Option &O = A->getOption();
if (O.matches(options::OPT_fcolor_diagnostics) ||
O.matches(options::OPT_fdiagnostics_color)) {
@@ -1095,7 +1222,6 @@ static bool checkVerifyPrefixes(const std::vector<std::string> &VerifyPrefixes,
bool clang::ParseDiagnosticArgs(DiagnosticOptions &Opts, ArgList &Args,
DiagnosticsEngine *Diags,
bool DefaultDiagColor, bool DefaultShowOpt) {
- using namespace options;
bool Success = true;
Opts.DiagnosticLogFile = Args.getLastArgValue(OPT_diagnostic_log_file);
@@ -1190,7 +1316,7 @@ bool clang::ParseDiagnosticArgs(DiagnosticOptions &Opts, ArgList &Args,
Success = false;
}
else
- std::sort(Opts.VerifyPrefixes.begin(), Opts.VerifyPrefixes.end());
+ llvm::sort(Opts.VerifyPrefixes.begin(), Opts.VerifyPrefixes.end());
DiagnosticLevelMask DiagMask = DiagnosticLevelMask::None;
Success &= parseDiagnosticLevelMask("-verify-ignore-unexpected=",
Args.getAllArgValues(OPT_verify_ignore_unexpected_EQ),
@@ -1262,7 +1388,6 @@ static bool parseTestModuleFileExtensionArg(StringRef Arg,
static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
DiagnosticsEngine &Diags,
bool &IsHeaderFile) {
- using namespace options;
Opts.ProgramAction = frontend::ParseSyntaxOnly;
if (const Arg *A = Args.getLastArg(OPT_Action_Group)) {
switch (A->getOption().getID()) {
@@ -1278,6 +1403,8 @@ static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
Opts.ProgramAction = frontend::ASTPrint; break;
case OPT_ast_view:
Opts.ProgramAction = frontend::ASTView; break;
+ case OPT_compiler_options_dump:
+ Opts.ProgramAction = frontend::DumpCompilerOptions; break;
case OPT_dump_raw_tokens:
Opts.ProgramAction = frontend::DumpRawTokens; break;
case OPT_dump_tokens:
@@ -1323,6 +1450,8 @@ static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
Opts.ProgramAction = frontend::PrintPreamble; break;
case OPT_E:
Opts.ProgramAction = frontend::PrintPreprocessedInput; break;
+ case OPT_templight_dump:
+ Opts.ProgramAction = frontend::TemplightDump; break;
case OPT_rewrite_macros:
Opts.ProgramAction = frontend::RewriteMacros; break;
case OPT_rewrite_objc:
@@ -1344,7 +1473,7 @@ static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
Opts.ActionName = A->getValue();
}
Opts.AddPluginActions = Args.getAllArgValues(OPT_add_plugin);
- for (const Arg *AA : Args.filtered(OPT_plugin_arg))
+ for (const auto *AA : Args.filtered(OPT_plugin_arg))
Opts.PluginArgs[AA->getValue(0)].emplace_back(AA->getValue(1));
for (const std::string &Arg :
@@ -1397,7 +1526,7 @@ static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
Opts.GenerateGlobalModuleIndex = Opts.UseGlobalModuleIndex;
Opts.ModuleMapFiles = Args.getAllArgValues(OPT_fmodule_map_file);
// Only the -fmodule-file=<file> form.
- for (const Arg *A : Args.filtered(OPT_fmodule_file)) {
+ for (const auto *A : Args.filtered(OPT_fmodule_file)) {
StringRef Val = A->getValue();
if (Val.find('=') == StringRef::npos)
Opts.ModuleFiles.push_back(Val);
@@ -1416,12 +1545,13 @@ static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
= !Args.hasArg(OPT_no_code_completion_ns_level_decls);
Opts.CodeCompleteOpts.IncludeBriefComments
= Args.hasArg(OPT_code_completion_brief_comments);
+ Opts.CodeCompleteOpts.IncludeFixIts
+ = Args.hasArg(OPT_code_completion_with_fixits);
Opts.OverrideRecordLayoutsFile
= Args.getLastArgValue(OPT_foverride_record_layout_EQ);
Opts.AuxTriple =
llvm::Triple::normalize(Args.getLastArgValue(OPT_aux_triple));
- Opts.FindPchSource = Args.getLastArgValue(OPT_find_pch_source_EQ);
Opts.StatsFile = Args.getLastArgValue(OPT_stats_file);
if (const Arg *A = Args.getLastArg(OPT_arcmt_check,
@@ -1502,6 +1632,7 @@ static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
.Case("c", InputKind::C)
.Case("cl", InputKind::OpenCL)
.Case("cuda", InputKind::CUDA)
+ .Case("hip", InputKind::HIP)
.Case("c++", InputKind::CXX)
.Case("objective-c", InputKind::ObjC)
.Case("objective-c++", InputKind::ObjCXX)
@@ -1585,7 +1716,6 @@ std::string CompilerInvocation::GetResourcesPath(const char *Argv0,
static void ParseHeaderSearchArgs(HeaderSearchOptions &Opts, ArgList &Args,
const std::string &WorkingDir) {
- using namespace options;
Opts.Sysroot = Args.getLastArgValue(OPT_isysroot, "/");
Opts.Verbose = Args.hasArg(OPT_v);
Opts.UseBuiltinIncludes = !Args.hasArg(OPT_nobuiltininc);
@@ -1608,12 +1738,12 @@ static void ParseHeaderSearchArgs(HeaderSearchOptions &Opts, ArgList &Args,
Opts.ModuleUserBuildPath = Args.getLastArgValue(OPT_fmodules_user_build_path);
// Only the -fmodule-file=<name>=<file> form.
- for (const Arg *A : Args.filtered(OPT_fmodule_file)) {
+ for (const auto *A : Args.filtered(OPT_fmodule_file)) {
StringRef Val = A->getValue();
if (Val.find('=') != StringRef::npos)
Opts.PrebuiltModuleFiles.insert(Val.split('='));
}
- for (const Arg *A : Args.filtered(OPT_fprebuilt_module_path))
+ for (const auto *A : Args.filtered(OPT_fprebuilt_module_path))
Opts.AddPrebuiltModulePath(A->getValue());
Opts.DisableModuleHash = Args.hasArg(OPT_fdisable_module_hash);
Opts.ModulesHashContent = Args.hasArg(OPT_fmodules_hash_content);
@@ -1634,7 +1764,7 @@ static void ParseHeaderSearchArgs(HeaderSearchOptions &Opts, ArgList &Args,
if (const Arg *A = Args.getLastArg(OPT_fmodule_format_EQ))
Opts.ModuleFormat = A->getValue();
- for (const Arg *A : Args.filtered(OPT_fmodules_ignore_macro)) {
+ for (const auto *A : Args.filtered(OPT_fmodules_ignore_macro)) {
StringRef MacroDef = A->getValue();
Opts.ModulesIgnoreMacros.insert(
llvm::CachedHashString(MacroDef.split('=').first));
@@ -1644,7 +1774,7 @@ static void ParseHeaderSearchArgs(HeaderSearchOptions &Opts, ArgList &Args,
bool IsIndexHeaderMap = false;
bool IsSysrootSpecified =
Args.hasArg(OPT__sysroot_EQ) || Args.hasArg(OPT_isysroot);
- for (const Arg *A : Args.filtered(OPT_I, OPT_F, OPT_index_header_map)) {
+ for (const auto *A : Args.filtered(OPT_I, OPT_F, OPT_index_header_map)) {
if (A->getOption().matches(OPT_index_header_map)) {
// -index-header-map applies to the next -I or -F.
IsIndexHeaderMap = true;
@@ -1671,7 +1801,7 @@ static void ParseHeaderSearchArgs(HeaderSearchOptions &Opts, ArgList &Args,
// Add -iprefix/-iwithprefix/-iwithprefixbefore options.
StringRef Prefix = ""; // FIXME: This isn't the correct default prefix.
- for (const Arg *A :
+ for (const auto *A :
Args.filtered(OPT_iprefix, OPT_iwithprefix, OPT_iwithprefixbefore)) {
if (A->getOption().matches(OPT_iprefix))
Prefix = A->getValue();
@@ -1681,31 +1811,31 @@ static void ParseHeaderSearchArgs(HeaderSearchOptions &Opts, ArgList &Args,
Opts.AddPath(Prefix.str() + A->getValue(), frontend::Angled, false, true);
}
- for (const Arg *A : Args.filtered(OPT_idirafter))
+ for (const auto *A : Args.filtered(OPT_idirafter))
Opts.AddPath(A->getValue(), frontend::After, false, true);
- for (const Arg *A : Args.filtered(OPT_iquote))
+ for (const auto *A : Args.filtered(OPT_iquote))
Opts.AddPath(A->getValue(), frontend::Quoted, false, true);
- for (const Arg *A : Args.filtered(OPT_isystem, OPT_iwithsysroot))
+ for (const auto *A : Args.filtered(OPT_isystem, OPT_iwithsysroot))
Opts.AddPath(A->getValue(), frontend::System, false,
!A->getOption().matches(OPT_iwithsysroot));
- for (const Arg *A : Args.filtered(OPT_iframework))
+ for (const auto *A : Args.filtered(OPT_iframework))
Opts.AddPath(A->getValue(), frontend::System, true, true);
- for (const Arg *A : Args.filtered(OPT_iframeworkwithsysroot))
+ for (const auto *A : Args.filtered(OPT_iframeworkwithsysroot))
Opts.AddPath(A->getValue(), frontend::System, /*IsFramework=*/true,
/*IgnoreSysRoot=*/false);
// Add the paths for the various language specific isystem flags.
- for (const Arg *A : Args.filtered(OPT_c_isystem))
+ for (const auto *A : Args.filtered(OPT_c_isystem))
Opts.AddPath(A->getValue(), frontend::CSystem, false, true);
- for (const Arg *A : Args.filtered(OPT_cxx_isystem))
+ for (const auto *A : Args.filtered(OPT_cxx_isystem))
Opts.AddPath(A->getValue(), frontend::CXXSystem, false, true);
- for (const Arg *A : Args.filtered(OPT_objc_isystem))
+ for (const auto *A : Args.filtered(OPT_objc_isystem))
Opts.AddPath(A->getValue(), frontend::ObjCSystem, false,true);
- for (const Arg *A : Args.filtered(OPT_objcxx_isystem))
+ for (const auto *A : Args.filtered(OPT_objcxx_isystem))
Opts.AddPath(A->getValue(), frontend::ObjCXXSystem, false, true);
// Add the internal paths from a driver that detects standard include paths.
- for (const Arg *A :
+ for (const auto *A :
Args.filtered(OPT_internal_isystem, OPT_internal_externc_isystem)) {
frontend::IncludeDirGroup Group = frontend::System;
if (A->getOption().matches(OPT_internal_externc_isystem))
@@ -1714,12 +1844,12 @@ static void ParseHeaderSearchArgs(HeaderSearchOptions &Opts, ArgList &Args,
}
// Add the path prefixes which are implicitly treated as being system headers.
- for (const Arg *A :
+ for (const auto *A :
Args.filtered(OPT_system_header_prefix, OPT_no_system_header_prefix))
Opts.AddSystemHeaderPrefix(
A->getValue(), A->getOption().matches(OPT_system_header_prefix));
- for (const Arg *A : Args.filtered(OPT_ivfsoverlay))
+ for (const auto *A : Args.filtered(OPT_ivfsoverlay))
Opts.AddVFSOverlayFile(A->getValue());
}
@@ -1754,22 +1884,37 @@ void CompilerInvocation::setLangDefaults(LangOptions &Opts, InputKind IK,
break;
case InputKind::Asm:
case InputKind::C:
+#if defined(CLANG_DEFAULT_STD_C)
+ LangStd = CLANG_DEFAULT_STD_C;
+#else
// The PS4 uses C99 as the default C standard.
if (T.isPS4())
LangStd = LangStandard::lang_gnu99;
else
LangStd = LangStandard::lang_gnu11;
+#endif
break;
case InputKind::ObjC:
+#if defined(CLANG_DEFAULT_STD_C)
+ LangStd = CLANG_DEFAULT_STD_C;
+#else
LangStd = LangStandard::lang_gnu11;
+#endif
break;
case InputKind::CXX:
case InputKind::ObjCXX:
+#if defined(CLANG_DEFAULT_STD_CXX)
+ LangStd = CLANG_DEFAULT_STD_CXX;
+#else
LangStd = LangStandard::lang_gnucxx14;
+#endif
break;
case InputKind::RenderScript:
LangStd = LangStandard::lang_c99;
break;
+ case InputKind::HIP:
+ LangStd = LangStandard::lang_hip;
+ break;
}
}
@@ -1799,6 +1944,8 @@ void CompilerInvocation::setLangDefaults(LangOptions &Opts, InputKind IK,
Opts.OpenCLVersion = 120;
else if (LangStd == LangStandard::lang_opencl20)
Opts.OpenCLVersion = 200;
+ else if (LangStd == LangStandard::lang_openclcpp)
+ Opts.OpenCLCPlusPlusVersion = 100;
// OpenCL has some additional defaults.
if (Opts.OpenCL) {
@@ -1808,13 +1955,15 @@ void CompilerInvocation::setLangDefaults(LangOptions &Opts, InputKind IK,
Opts.setDefaultFPContractMode(LangOptions::FPC_On);
Opts.NativeHalfType = 1;
Opts.NativeHalfArgsAndReturns = 1;
+ Opts.OpenCLCPlusPlus = Opts.CPlusPlus;
// Include default header file for OpenCL.
if (Opts.IncludeDefaultHeader) {
PPOpts.Includes.push_back("opencl-c.h");
}
}
- Opts.CUDA = IK.getLanguage() == InputKind::CUDA;
+ Opts.HIP = IK.getLanguage() == InputKind::HIP;
+ Opts.CUDA = IK.getLanguage() == InputKind::CUDA || Opts.HIP;
if (Opts.CUDA)
// Set default FP_CONTRACT to FAST.
Opts.setDefaultFPContractMode(LangOptions::FPC_Fast);
@@ -1885,6 +2034,10 @@ static bool IsInputCompatibleWithStandard(InputKind IK,
return S.getLanguage() == InputKind::CUDA ||
S.getLanguage() == InputKind::CXX;
+ case InputKind::HIP:
+ return S.getLanguage() == InputKind::CXX ||
+ S.getLanguage() == InputKind::HIP;
+
case InputKind::Asm:
// Accept (and ignore) all -std= values.
// FIXME: The -std= value is not ignored; it affects the tokenization
@@ -1912,6 +2065,8 @@ static const StringRef GetInputKindName(InputKind IK) {
return "CUDA";
case InputKind::RenderScript:
return "RenderScript";
+ case InputKind::HIP:
+ return "HIP";
case InputKind::Asm:
return "Asm";
@@ -1975,6 +2130,12 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
}
}
+ if (const Arg *A = Args.getLastArg(OPT_fcf_protection_EQ)) {
+ StringRef Name = A->getValue();
+ if (Name == "full" || Name == "branch") {
+ Opts.CFProtectionBranch = 1;
+ }
+ }
// -cl-std only applies for OpenCL language standards.
// Override the -std option in this case.
if (const Arg *A = Args.getLastArg(OPT_cl_std_EQ)) {
@@ -1984,6 +2145,7 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
.Cases("cl1.1", "CL1.1", LangStandard::lang_opencl11)
.Cases("cl1.2", "CL1.2", LangStandard::lang_opencl12)
.Cases("cl2.0", "CL2.0", LangStandard::lang_opencl20)
+ .Case("c++", LangStandard::lang_openclcpp)
.Default(LangStandard::lang_unspecified);
if (OpenCLLangStd == LangStandard::lang_unspecified) {
@@ -2004,11 +2166,9 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
// this option was added for compatibility with OpenCL 1.0.
if (Args.getLastArg(OPT_cl_strict_aliasing)
&& Opts.OpenCLVersion > 100) {
- std::string VerSpec = llvm::to_string(Opts.OpenCLVersion / 100) +
- std::string(".") +
- llvm::to_string((Opts.OpenCLVersion % 100) / 10);
Diags.Report(diag::warn_option_invalid_ocl_version)
- << VerSpec << Args.getLastArg(OPT_cl_strict_aliasing)->getAsString(Args);
+ << Opts.getOpenCLVersionTuple().getAsString()
+ << Args.getLastArg(OPT_cl_strict_aliasing)->getAsString(Args);
}
// We abuse '-f[no-]gnu-keywords' to force overriding all GNU-extension
@@ -2019,6 +2179,8 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
Opts.GNUKeywords = Args.hasFlag(OPT_fgnu_keywords, OPT_fno_gnu_keywords,
Opts.GNUKeywords);
+ Opts.Digraphs = Args.hasFlag(OPT_fdigraphs, OPT_fno_digraphs, Opts.Digraphs);
+
if (Args.hasArg(OPT_fno_operator_names))
Opts.CXXOperatorNames = 0;
@@ -2031,12 +2193,11 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
if (Args.hasArg(OPT_fno_cuda_host_device_constexpr))
Opts.CUDAHostDeviceConstexpr = 0;
- if (Opts.CUDAIsDevice && Args.hasArg(OPT_fcuda_flush_denormals_to_zero))
- Opts.CUDADeviceFlushDenormalsToZero = 1;
-
if (Opts.CUDAIsDevice && Args.hasArg(OPT_fcuda_approx_transcendentals))
Opts.CUDADeviceApproxTranscendentals = 1;
+ Opts.CUDARelocatableDeviceCode = Args.hasArg(OPT_fcuda_rdc);
+
if (Opts.ObjC1) {
if (Arg *arg = Args.getLastArg(OPT_fobjc_runtime_EQ)) {
StringRef value = arg->getValue();
@@ -2177,12 +2338,27 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
Opts.ObjCExceptions = Args.hasArg(OPT_fobjc_exceptions);
Opts.CXXExceptions = Args.hasArg(OPT_fcxx_exceptions);
+ // -ffixed-point
+ Opts.FixedPoint =
+ Args.hasFlag(OPT_ffixed_point, OPT_fno_fixed_point, /*Default=*/false) &&
+ !Opts.CPlusPlus;
+ Opts.PaddingOnUnsignedFixedPoint =
+ Args.hasFlag(OPT_fpadding_on_unsigned_fixed_point,
+ OPT_fno_padding_on_unsigned_fixed_point,
+ /*Default=*/false) &&
+ Opts.FixedPoint;
+
// Handle exception personalities
Arg *A = Args.getLastArg(options::OPT_fsjlj_exceptions,
options::OPT_fseh_exceptions,
options::OPT_fdwarf_exceptions);
if (A) {
const Option &Opt = A->getOption();
+ llvm::Triple T(TargetOpts.Triple);
+ if (T.isWindowsMSVCEnvironment())
+ Diags.Report(diag::err_fe_invalid_exception_model)
+ << Opt.getName() << T.str();
+
Opts.SjLjExceptions = Opt.matches(options::OPT_fsjlj_exceptions);
Opts.SEHExceptions = Opt.matches(options::OPT_fseh_exceptions);
Opts.DWARFExceptions = Opt.matches(options::OPT_fdwarf_exceptions);
@@ -2194,7 +2370,7 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
Opts.RTTI = Opts.CPlusPlus && !Args.hasArg(OPT_fno_rtti);
Opts.RTTIData = Opts.RTTI && !Args.hasArg(OPT_fno_rtti_data);
Opts.Blocks = Args.hasArg(OPT_fblocks) || (Opts.OpenCL
- && Opts.OpenCLVersion >= 200);
+ && Opts.OpenCLVersion == 200);
Opts.BlocksRuntimeOptional = Args.hasArg(OPT_fblocks_runtime_optional);
Opts.CoroutinesTS = Args.hasArg(OPT_fcoroutines_ts);
@@ -2219,6 +2395,7 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
Opts.ImplicitModules = !Args.hasArg(OPT_fno_implicit_modules);
Opts.CharIsSigned = Opts.OpenCL || !Args.hasArg(OPT_fno_signed_char);
Opts.WChar = Opts.CPlusPlus && !Args.hasArg(OPT_fno_wchar);
+ Opts.Char8 = Args.hasArg(OPT_fchar8__t);
if (const Arg *A = Args.getLastArg(OPT_fwchar_type_EQ)) {
Opts.WCharSize = llvm::StringSwitch<unsigned>(A->getValue())
.Case("char", 1)
@@ -2297,10 +2474,11 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
Opts.DebuggerCastResultToId = Args.hasArg(OPT_fdebugger_cast_result_to_id);
Opts.DebuggerObjCLiteral = Args.hasArg(OPT_fdebugger_objc_literal);
Opts.ApplePragmaPack = Args.hasArg(OPT_fapple_pragma_pack);
- Opts.CurrentModule = Args.getLastArgValue(OPT_fmodule_name_EQ);
+ Opts.ModuleName = Args.getLastArgValue(OPT_fmodule_name_EQ);
+ Opts.CurrentModule = Opts.ModuleName;
Opts.AppExt = Args.hasArg(OPT_fapplication_extension);
Opts.ModuleFeatures = Args.getAllArgValues(OPT_fmodule_feature);
- std::sort(Opts.ModuleFeatures.begin(), Opts.ModuleFeatures.end());
+ llvm::sort(Opts.ModuleFeatures.begin(), Opts.ModuleFeatures.end());
Opts.NativeHalfType |= Args.hasArg(OPT_fnative_half_type);
Opts.NativeHalfArgsAndReturns |= Args.hasArg(OPT_fnative_half_arguments_and_returns);
// Enable HalfArgsAndReturns if present in Args or if NativeHalfArgsAndReturns
@@ -2407,16 +2585,25 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
// Check if -fopenmp is specified.
Opts.OpenMP = Args.hasArg(options::OPT_fopenmp) ? 1 : 0;
+ // Check if -fopenmp-simd is specified.
+ bool IsSimdSpecified =
+ Args.hasFlag(options::OPT_fopenmp_simd, options::OPT_fno_openmp_simd,
+ /*Default=*/false);
+ Opts.OpenMPSimd = !Opts.OpenMP && IsSimdSpecified;
Opts.OpenMPUseTLS =
Opts.OpenMP && !Args.hasArg(options::OPT_fnoopenmp_use_tls);
Opts.OpenMPIsDevice =
Opts.OpenMP && Args.hasArg(options::OPT_fopenmp_is_device);
+ bool IsTargetSpecified =
+ Opts.OpenMPIsDevice || Args.hasArg(options::OPT_fopenmp_targets_EQ);
- if (Opts.OpenMP) {
- int Version =
- getLastArgIntValue(Args, OPT_fopenmp_version_EQ, Opts.OpenMP, Diags);
- if (Version != 0)
+ if (Opts.OpenMP || Opts.OpenMPSimd) {
+ if (int Version = getLastArgIntValue(
+ Args, OPT_fopenmp_version_EQ,
+ (IsSimdSpecified || IsTargetSpecified) ? 45 : Opts.OpenMP, Diags))
Opts.OpenMP = Version;
+ else if (IsSimdSpecified || IsTargetSpecified)
+ Opts.OpenMP = 45;
// Provide diagnostic when a given target is not expected to be an OpenMP
// device or host.
if (!Opts.OpenMPIsDevice) {
@@ -2426,7 +2613,7 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
// Add unsupported host targets here:
case llvm::Triple::nvptx:
case llvm::Triple::nvptx64:
- Diags.Report(clang::diag::err_drv_omp_host_target_not_supported)
+ Diags.Report(diag::err_drv_omp_host_target_not_supported)
<< TargetOpts.Triple;
break;
}
@@ -2435,7 +2622,8 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
// Set the flag to prevent the implementation from emitting device exception
// handling code for those requiring so.
- if (Opts.OpenMPIsDevice && T.isNVPTX()) {
+ Opts.OpenMPHostCXXExceptions = Opts.Exceptions && Opts.CXXExceptions;
+ if ((Opts.OpenMPIsDevice && T.isNVPTX()) || Opts.OpenCLCPlusPlus) {
Opts.Exceptions = 0;
Opts.CXXExceptions = 0;
}
@@ -2454,7 +2642,7 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
TT.getArch() == llvm::Triple::nvptx64 ||
TT.getArch() == llvm::Triple::x86 ||
TT.getArch() == llvm::Triple::x86_64))
- Diags.Report(clang::diag::err_drv_invalid_omp_target) << A->getValue(i);
+ Diags.Report(diag::err_drv_invalid_omp_target) << A->getValue(i);
else
Opts.OMPTargetTriples.push_back(TT);
}
@@ -2465,10 +2653,14 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
if (Arg *A = Args.getLastArg(options::OPT_fopenmp_host_ir_file_path)) {
Opts.OMPHostIRFile = A->getValue();
if (!llvm::sys::fs::exists(Opts.OMPHostIRFile))
- Diags.Report(clang::diag::err_drv_omp_host_ir_file_not_found)
+ Diags.Report(diag::err_drv_omp_host_ir_file_not_found)
<< Opts.OMPHostIRFile;
}
+ // set CUDA mode for OpenMP target NVPTX if specified in options
+ Opts.OpenMPCUDAMode = Opts.OpenMPIsDevice && T.isNVPTX() &&
+ Args.hasArg(options::OPT_fopenmp_cuda_mode);
+
// Record whether the __DEPRECATED define was requested.
Opts.Deprecated = Args.hasFlag(OPT_fdeprecated_macro,
OPT_fno_deprecated_macro,
@@ -2543,14 +2735,55 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
Args.hasFlag(OPT_fxray_always_emit_customevents,
OPT_fnoxray_always_emit_customevents, false);
+ // -fxray-always-emit-typedevents
+ Opts.XRayAlwaysEmitTypedEvents =
+ Args.hasFlag(OPT_fxray_always_emit_typedevents,
+ OPT_fnoxray_always_emit_customevents, false);
+
// -fxray-{always,never}-instrument= filenames.
Opts.XRayAlwaysInstrumentFiles =
Args.getAllArgValues(OPT_fxray_always_instrument);
Opts.XRayNeverInstrumentFiles =
Args.getAllArgValues(OPT_fxray_never_instrument);
+ Opts.XRayAttrListFiles = Args.getAllArgValues(OPT_fxray_attr_list);
+
+ // -fforce-emit-vtables
+ Opts.ForceEmitVTables = Args.hasArg(OPT_fforce_emit_vtables);
// -fallow-editor-placeholders
Opts.AllowEditorPlaceholders = Args.hasArg(OPT_fallow_editor_placeholders);
+
+ if (Arg *A = Args.getLastArg(OPT_fclang_abi_compat_EQ)) {
+ Opts.setClangABICompat(LangOptions::ClangABI::Latest);
+
+ StringRef Ver = A->getValue();
+ std::pair<StringRef, StringRef> VerParts = Ver.split('.');
+ unsigned Major, Minor = 0;
+
+ // Check the version number is valid: either 3.x (0 <= x <= 9) or
+ // y or y.0 (4 <= y <= current version).
+ if (!VerParts.first.startswith("0") &&
+ !VerParts.first.getAsInteger(10, Major) &&
+ 3 <= Major && Major <= CLANG_VERSION_MAJOR &&
+ (Major == 3 ? VerParts.second.size() == 1 &&
+ !VerParts.second.getAsInteger(10, Minor)
+ : VerParts.first.size() == Ver.size() ||
+ VerParts.second == "0")) {
+ // Got a valid version number.
+ if (Major == 3 && Minor <= 8)
+ Opts.setClangABICompat(LangOptions::ClangABI::Ver3_8);
+ else if (Major <= 4)
+ Opts.setClangABICompat(LangOptions::ClangABI::Ver4);
+ else if (Major <= 6)
+ Opts.setClangABICompat(LangOptions::ClangABI::Ver6);
+ } else if (Ver != "latest") {
+ Diags.Report(diag::err_drv_invalid_value)
+ << A->getAsString(Args) << A->getValue();
+ }
+ }
+
+ Opts.CompleteMemberPointers = Args.hasArg(OPT_fcomplete_member_pointers);
+ Opts.BuildingPCHWithObjectFile = Args.hasArg(OPT_building_pch_with_obj);
}
static bool isStrictlyPreprocessorAction(frontend::ActionKind Action) {
@@ -2579,9 +2812,11 @@ static bool isStrictlyPreprocessorAction(frontend::ActionKind Action) {
case frontend::RewriteObjC:
case frontend::RewriteTest:
case frontend::RunAnalysis:
+ case frontend::TemplightDump:
case frontend::MigrateSource:
return false;
+ case frontend::DumpCompilerOptions:
case frontend::DumpRawTokens:
case frontend::DumpTokens:
case frontend::InitOnly:
@@ -2595,12 +2830,11 @@ static bool isStrictlyPreprocessorAction(frontend::ActionKind Action) {
}
static void ParsePreprocessorArgs(PreprocessorOptions &Opts, ArgList &Args,
- FileManager &FileMgr,
DiagnosticsEngine &Diags,
frontend::ActionKind Action) {
- using namespace options;
Opts.ImplicitPCHInclude = Args.getLastArgValue(OPT_include_pch);
Opts.ImplicitPTHInclude = Args.getLastArgValue(OPT_include_pth);
+ Opts.PCHThroughHeader = Args.getLastArgValue(OPT_pch_through_header_EQ);
if (const Arg *A = Args.getLastArg(OPT_token_cache))
Opts.TokenCache = A->getValue();
else
@@ -2611,7 +2845,7 @@ static void ParsePreprocessorArgs(PreprocessorOptions &Opts, ArgList &Args,
Opts.AllowPCHWithCompilerErrors = Args.hasArg(OPT_fallow_pch_with_errors);
Opts.DumpDeserializedPCHDecls = Args.hasArg(OPT_dump_deserialized_pch_decls);
- for (const Arg *A : Args.filtered(OPT_error_on_deserialized_pch_decl))
+ for (const auto *A : Args.filtered(OPT_error_on_deserialized_pch_decl))
Opts.DeserializedPCHDeclsToErrorOn.insert(A->getValue());
if (const Arg *A = Args.getLastArg(OPT_preamble_bytes_EQ)) {
@@ -2630,8 +2864,19 @@ static void ParsePreprocessorArgs(PreprocessorOptions &Opts, ArgList &Args,
}
}
+ // Add the __CET__ macro if a CFProtection option is set.
+ if (const Arg *A = Args.getLastArg(OPT_fcf_protection_EQ)) {
+ StringRef Name = A->getValue();
+ if (Name == "branch")
+ Opts.addMacroDef("__CET__=1");
+ else if (Name == "return")
+ Opts.addMacroDef("__CET__=2");
+ else if (Name == "full")
+ Opts.addMacroDef("__CET__=3");
+ }
+
// Add macros from the command line.
- for (const Arg *A : Args.filtered(OPT_D, OPT_U)) {
+ for (const auto *A : Args.filtered(OPT_D, OPT_U)) {
if (A->getOption().matches(OPT_D))
Opts.addMacroDef(A->getValue());
else
@@ -2641,13 +2886,13 @@ static void ParsePreprocessorArgs(PreprocessorOptions &Opts, ArgList &Args,
Opts.MacroIncludes = Args.getAllArgValues(OPT_imacros);
// Add the ordered list of -includes.
- for (const Arg *A : Args.filtered(OPT_include))
+ for (const auto *A : Args.filtered(OPT_include))
Opts.Includes.emplace_back(A->getValue());
- for (const Arg *A : Args.filtered(OPT_chain_include))
+ for (const auto *A : Args.filtered(OPT_chain_include))
Opts.ChainedIncludes.emplace_back(A->getValue());
- for (const Arg *A : Args.filtered(OPT_remap_file)) {
+ for (const auto *A : Args.filtered(OPT_remap_file)) {
std::pair<StringRef, StringRef> Split = StringRef(A->getValue()).split(';');
if (Split.second.empty()) {
@@ -2681,8 +2926,6 @@ static void ParsePreprocessorArgs(PreprocessorOptions &Opts, ArgList &Args,
static void ParsePreprocessorOutputArgs(PreprocessorOutputOptions &Opts,
ArgList &Args,
frontend::ActionKind Action) {
- using namespace options;
-
if (isStrictlyPreprocessorAction(Action))
Opts.ShowCPP = !Args.hasArg(OPT_dM);
else
@@ -2700,7 +2943,6 @@ static void ParsePreprocessorOutputArgs(PreprocessorOutputOptions &Opts,
static void ParseTargetArgs(TargetOptions &Opts, ArgList &Args,
DiagnosticsEngine &Diags) {
- using namespace options;
Opts.ABI = Args.getLastArgValue(OPT_target_abi);
if (Arg *A = Args.getLastArg(OPT_meabi)) {
StringRef Value = A->getValue();
@@ -2720,11 +2962,15 @@ static void ParseTargetArgs(TargetOptions &Opts, ArgList &Args,
Opts.FPMath = Args.getLastArgValue(OPT_mfpmath);
Opts.FeaturesAsWritten = Args.getAllArgValues(OPT_target_feature);
Opts.LinkerVersion = Args.getLastArgValue(OPT_target_linker_version);
- Opts.Triple = llvm::Triple::normalize(Args.getLastArgValue(OPT_triple));
+ Opts.Triple = Args.getLastArgValue(OPT_triple);
// Use the default target triple if unspecified.
if (Opts.Triple.empty())
Opts.Triple = llvm::sys::getDefaultTargetTriple();
+ Opts.Triple = llvm::Triple::normalize(Opts.Triple);
Opts.OpenCLExtensionsAsWritten = Args.getAllArgValues(OPT_cl_ext_EQ);
+ Opts.ForceEnableInt128 = Args.hasArg(OPT_fforce_enable_int128);
+ Opts.NVPTXUseShortPointers = Args.hasFlag(
+ options::OPT_fcuda_short_ptr, options::OPT_fno_cuda_short_ptr, false);
}
bool CompilerInvocation::CreateFromArgs(CompilerInvocation &Res,
@@ -2750,8 +2996,14 @@ bool CompilerInvocation::CreateFromArgs(CompilerInvocation &Res,
}
// Issue errors on unknown arguments.
- for (const Arg *A : Args.filtered(OPT_UNKNOWN)) {
- Diags.Report(diag::err_drv_unknown_argument) << A->getAsString(Args);
+ for (const auto *A : Args.filtered(OPT_UNKNOWN)) {
+ auto ArgString = A->getAsString(Args);
+ std::string Nearest;
+ if (Opts->findNearest(ArgString, Nearest, IncludedFlagsBitmask) > 1)
+ Diags.Report(diag::err_drv_unknown_argument) << ArgString;
+ else
+ Diags.Report(diag::err_drv_unknown_argument_with_suggestion)
+ << ArgString << Nearest;
Success = false;
}
@@ -2768,7 +3020,7 @@ bool CompilerInvocation::CreateFromArgs(CompilerInvocation &Res,
LangOpts.IsHeaderFile);
ParseTargetArgs(Res.getTargetOpts(), Args, Diags);
Success &= ParseCodeGenArgs(Res.getCodeGenOpts(), Args, DashX, Diags,
- Res.getTargetOpts());
+ Res.getTargetOpts(), Res.getFrontendOpts());
ParseHeaderSearchArgs(Res.getHeaderSearchOpts(), Args,
Res.getFileSystemOpts().WorkingDir);
if (DashX.getFormat() == InputKind::Precompiled ||
@@ -2793,6 +3045,9 @@ bool CompilerInvocation::CreateFromArgs(CompilerInvocation &Res,
LangOpts.ObjCExceptions = 1;
}
+ LangOpts.FunctionAlignment =
+ getLastArgIntValue(Args, OPT_function_alignment, 0, Diags);
+
if (LangOpts.CUDA) {
// During CUDA device-side compilation, the aux triple is the
// triple used for host compilation.
@@ -2811,12 +3066,7 @@ bool CompilerInvocation::CreateFromArgs(CompilerInvocation &Res,
!LangOpts.Sanitize.has(SanitizerKind::Address) &&
!LangOpts.Sanitize.has(SanitizerKind::Memory);
- // FIXME: ParsePreprocessorArgs uses the FileManager to read the contents of
- // PCH file and find the original header name. Remove the need to do that in
- // ParsePreprocessorArgs and remove the FileManager
- // parameters from the function and the "FileManager.h" #include.
- FileManager FileMgr(Res.getFileSystemOpts());
- ParsePreprocessorArgs(Res.getPreprocessorOpts(), Args, FileMgr, Diags,
+ ParsePreprocessorArgs(Res.getPreprocessorOpts(), Args, Diags,
Res.getFrontendOpts().ProgramAction);
ParsePreprocessorOutputArgs(Res.getPreprocessorOutputOpts(), Args,
Res.getFrontendOpts().ProgramAction);
@@ -2864,29 +3114,26 @@ std::string CompilerInvocation::getModuleHash() const {
// Extend the signature with the target options.
code = hash_combine(code, TargetOpts->Triple, TargetOpts->CPU,
TargetOpts->ABI);
- for (unsigned i = 0, n = TargetOpts->FeaturesAsWritten.size(); i != n; ++i)
- code = hash_combine(code, TargetOpts->FeaturesAsWritten[i]);
+ for (const auto &FeatureAsWritten : TargetOpts->FeaturesAsWritten)
+ code = hash_combine(code, FeatureAsWritten);
// Extend the signature with preprocessor options.
const PreprocessorOptions &ppOpts = getPreprocessorOpts();
const HeaderSearchOptions &hsOpts = getHeaderSearchOpts();
code = hash_combine(code, ppOpts.UsePredefines, ppOpts.DetailedRecord);
- for (std::vector<std::pair<std::string, bool/*isUndef*/>>::const_iterator
- I = getPreprocessorOpts().Macros.begin(),
- IEnd = getPreprocessorOpts().Macros.end();
- I != IEnd; ++I) {
+ for (const auto &I : getPreprocessorOpts().Macros) {
// If we're supposed to ignore this macro for the purposes of modules,
// don't put it into the hash.
if (!hsOpts.ModulesIgnoreMacros.empty()) {
// Check whether we're ignoring this macro.
- StringRef MacroDef = I->first;
+ StringRef MacroDef = I.first;
if (hsOpts.ModulesIgnoreMacros.count(
llvm::CachedHashString(MacroDef.split('=').first)))
continue;
}
- code = hash_combine(code, I->first, I->second);
+ code = hash_combine(code, I.first, I.second);
}
// Extend the signature with the sysroot and other header search options.
@@ -2919,8 +3166,6 @@ std::string CompilerInvocation::getModuleHash() const {
return llvm::APInt(64, code).toString(36, /*Signed=*/false);
}
-namespace clang {
-
template<typename IntTy>
static IntTy getLastArgIntValueImpl(const ArgList &Args, OptSpecifier Id,
IntTy Default,
@@ -2936,6 +3181,7 @@ static IntTy getLastArgIntValueImpl(const ArgList &Args, OptSpecifier Id,
return Res;
}
+namespace clang {
// Declared in clang/Frontend/Utils.h.
int getLastArgIntValue(const ArgList &Args, OptSpecifier Id, int Default,
@@ -2980,22 +3226,22 @@ createVFSFromCompilerInvocation(const CompilerInvocation &CI,
IntrusiveRefCntPtr<vfs::OverlayFileSystem> Overlay(
new vfs::OverlayFileSystem(BaseFS));
// earlier vfs files are on the bottom
- for (const std::string &File : CI.getHeaderSearchOpts().VFSOverlayFiles) {
+ for (const auto &File : CI.getHeaderSearchOpts().VFSOverlayFiles) {
llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> Buffer =
BaseFS->getBufferForFile(File);
if (!Buffer) {
Diags.Report(diag::err_missing_vfs_overlay_file) << File;
- return IntrusiveRefCntPtr<vfs::FileSystem>();
+ continue;
}
IntrusiveRefCntPtr<vfs::FileSystem> FS = vfs::getVFSFromYAML(
std::move(Buffer.get()), /*DiagHandler*/ nullptr, File);
- if (!FS.get()) {
+ if (FS)
+ Overlay->pushOverlay(FS);
+ else
Diags.Report(diag::err_invalid_vfs_overlay) << File;
- return IntrusiveRefCntPtr<vfs::FileSystem>();
- }
- Overlay->pushOverlay(FS);
}
return Overlay;
}
-} // end namespace clang
+
+} // namespace clang
diff --git a/lib/Frontend/DependencyFile.cpp b/lib/Frontend/DependencyFile.cpp
index 561eb9c4a316..f89722eeb9ed 100644
--- a/lib/Frontend/DependencyFile.cpp
+++ b/lib/Frontend/DependencyFile.cpp
@@ -63,7 +63,8 @@ struct DepCollectorPPCallbacks : public PPCallbacks {
StringRef FileName, bool IsAngled,
CharSourceRange FilenameRange, const FileEntry *File,
StringRef SearchPath, StringRef RelativePath,
- const Module *Imported) override {
+ const Module *Imported,
+ SrcMgr::CharacteristicKind FileType) override {
if (!File)
DepCollector.maybeAddDependency(FileName, /*FromModule*/false,
/*IsSystem*/false, /*IsModuleFile*/false,
@@ -162,6 +163,7 @@ class DFGImpl : public PPCallbacks {
bool SeenMissingHeader;
bool IncludeModuleFiles;
DependencyOutputFormat OutputFormat;
+ unsigned InputFileIndex;
private:
bool FileMatchesDepCriteria(const char *Filename,
@@ -176,26 +178,33 @@ public:
AddMissingHeaderDeps(Opts.AddMissingHeaderDeps),
SeenMissingHeader(false),
IncludeModuleFiles(Opts.IncludeModuleFiles),
- OutputFormat(Opts.OutputFormat) {
+ OutputFormat(Opts.OutputFormat),
+ InputFileIndex(0) {
for (const auto &ExtraDep : Opts.ExtraDeps) {
- AddFilename(ExtraDep);
+ if (AddFilename(ExtraDep))
+ ++InputFileIndex;
}
}
void FileChanged(SourceLocation Loc, FileChangeReason Reason,
SrcMgr::CharacteristicKind FileType,
FileID PrevFID) override;
+
+ void FileSkipped(const FileEntry &SkippedFile, const Token &FilenameTok,
+ SrcMgr::CharacteristicKind FileType) override;
+
void InclusionDirective(SourceLocation HashLoc, const Token &IncludeTok,
StringRef FileName, bool IsAngled,
CharSourceRange FilenameRange, const FileEntry *File,
StringRef SearchPath, StringRef RelativePath,
- const Module *Imported) override;
+ const Module *Imported,
+ SrcMgr::CharacteristicKind FileType) override;
void EndOfMainFile() override {
OutputDependencyFile();
}
- void AddFilename(StringRef Filename);
+ bool AddFilename(StringRef Filename);
bool includeSystemHeaders() const { return IncludeSystemHeaders; }
bool includeModuleFiles() const { return IncludeModuleFiles; }
};
@@ -291,6 +300,16 @@ void DFGImpl::FileChanged(SourceLocation Loc,
AddFilename(llvm::sys::path::remove_leading_dotslash(Filename));
}
+void DFGImpl::FileSkipped(const FileEntry &SkippedFile,
+ const Token &FilenameTok,
+ SrcMgr::CharacteristicKind FileType) {
+ StringRef Filename = SkippedFile.getName();
+ if (!FileMatchesDepCriteria(Filename.data(), FileType))
+ return;
+
+ AddFilename(llvm::sys::path::remove_leading_dotslash(Filename));
+}
+
void DFGImpl::InclusionDirective(SourceLocation HashLoc,
const Token &IncludeTok,
StringRef FileName,
@@ -299,7 +318,8 @@ void DFGImpl::InclusionDirective(SourceLocation HashLoc,
const FileEntry *File,
StringRef SearchPath,
StringRef RelativePath,
- const Module *Imported) {
+ const Module *Imported,
+ SrcMgr::CharacteristicKind FileType) {
if (!File) {
if (AddMissingHeaderDeps)
AddFilename(FileName);
@@ -308,9 +328,12 @@ void DFGImpl::InclusionDirective(SourceLocation HashLoc,
}
}
-void DFGImpl::AddFilename(StringRef Filename) {
- if (FilesSet.insert(Filename).second)
+bool DFGImpl::AddFilename(StringRef Filename) {
+ if (FilesSet.insert(Filename).second) {
Files.push_back(Filename);
+ return true;
+ }
+ return false;
}
/// Print the filename, with escaping or quoting that accommodates the three
@@ -446,8 +469,10 @@ void DFGImpl::OutputDependencyFile() {
// Create phony targets if requested.
if (PhonyTarget && !Files.empty()) {
- // Skip the first entry, this is always the input file itself.
- for (auto I = Files.begin() + 1, E = Files.end(); I != E; ++I) {
+ unsigned Index = 0;
+ for (auto I = Files.begin(), E = Files.end(); I != E; ++I) {
+ if (Index++ == InputFileIndex)
+ continue;
OS << '\n';
PrintFilename(OS, *I, OutputFormat);
OS << ":\n";
diff --git a/lib/Frontend/DependencyGraph.cpp b/lib/Frontend/DependencyGraph.cpp
index 67a977e38be2..660f664447ab 100644
--- a/lib/Frontend/DependencyGraph.cpp
+++ b/lib/Frontend/DependencyGraph.cpp
@@ -50,7 +50,8 @@ public:
StringRef FileName, bool IsAngled,
CharSourceRange FilenameRange, const FileEntry *File,
StringRef SearchPath, StringRef RelativePath,
- const Module *Imported) override;
+ const Module *Imported,
+ SrcMgr::CharacteristicKind FileType) override;
void EndOfMainFile() override {
OutputGraphFile();
@@ -65,15 +66,17 @@ void clang::AttachDependencyGraphGen(Preprocessor &PP, StringRef OutputFile,
SysRoot));
}
-void DependencyGraphCallback::InclusionDirective(SourceLocation HashLoc,
- const Token &IncludeTok,
- StringRef FileName,
- bool IsAngled,
- CharSourceRange FilenameRange,
- const FileEntry *File,
- StringRef SearchPath,
- StringRef RelativePath,
- const Module *Imported) {
+void DependencyGraphCallback::InclusionDirective(
+ SourceLocation HashLoc,
+ const Token &IncludeTok,
+ StringRef FileName,
+ bool IsAngled,
+ CharSourceRange FilenameRange,
+ const FileEntry *File,
+ StringRef SearchPath,
+ StringRef RelativePath,
+ const Module *Imported,
+ SrcMgr::CharacteristicKind FileType) {
if (!File)
return;
diff --git a/lib/Frontend/DiagnosticRenderer.cpp b/lib/Frontend/DiagnosticRenderer.cpp
index e3263843e29b..fb0a92d1f8c6 100644
--- a/lib/Frontend/DiagnosticRenderer.cpp
+++ b/lib/Frontend/DiagnosticRenderer.cpp
@@ -1,4 +1,4 @@
-//===--- DiagnosticRenderer.cpp - Diagnostic Pretty-Printing --------------===//
+//===- DiagnosticRenderer.cpp - Diagnostic Pretty-Printing ----------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -8,24 +8,34 @@
//===----------------------------------------------------------------------===//
#include "clang/Frontend/DiagnosticRenderer.h"
+#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/DiagnosticOptions.h"
+#include "clang/Basic/LLVM.h"
+#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Edit/Commit.h"
#include "clang/Edit/EditedSource.h"
#include "clang/Edit/EditsReceiver.h"
#include "clang/Lex/Lexer.h"
-#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/None.h"
#include "llvm/ADT/SmallString.h"
-#include "llvm/Support/ErrorHandling.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
+#include <cassert>
+#include <iterator>
+#include <utility>
+
using namespace clang;
DiagnosticRenderer::DiagnosticRenderer(const LangOptions &LangOpts,
DiagnosticOptions *DiagOpts)
- : LangOpts(LangOpts), DiagOpts(DiagOpts), LastLevel() {}
+ : LangOpts(LangOpts), DiagOpts(DiagOpts), LastLevel() {}
-DiagnosticRenderer::~DiagnosticRenderer() {}
+DiagnosticRenderer::~DiagnosticRenderer() = default;
namespace {
@@ -34,24 +44,24 @@ class FixitReceiver : public edit::EditsReceiver {
public:
FixitReceiver(SmallVectorImpl<FixItHint> &MergedFixits)
- : MergedFixits(MergedFixits) { }
+ : MergedFixits(MergedFixits) {}
+
void insert(SourceLocation loc, StringRef text) override {
MergedFixits.push_back(FixItHint::CreateInsertion(loc, text));
}
+
void replace(CharSourceRange range, StringRef text) override {
MergedFixits.push_back(FixItHint::CreateReplacement(range, text));
}
};
-}
+} // namespace
static void mergeFixits(ArrayRef<FixItHint> FixItHints,
const SourceManager &SM, const LangOptions &LangOpts,
SmallVectorImpl<FixItHint> &MergedFixits) {
edit::Commit commit(SM, LangOpts);
- for (ArrayRef<FixItHint>::const_iterator
- I = FixItHints.begin(), E = FixItHints.end(); I != E; ++I) {
- const FixItHint &Hint = *I;
+ for (const auto &Hint : FixItHints)
if (Hint.CodeToInsert.empty()) {
if (Hint.InsertFromRange.isValid())
commit.insertFromRange(Hint.RemoveRange.getBegin(),
@@ -67,7 +77,6 @@ static void mergeFixits(ArrayRef<FixItHint> FixItHints,
commit.insert(Hint.RemoveRange.getBegin(), Hint.CodeToInsert,
/*afterToken=*/false, Hint.BeforePreviousInsertions);
}
- }
edit::EditedSource Editor(SM, LangOpts);
if (Editor.commit(commit)) {
@@ -100,11 +109,9 @@ void DiagnosticRenderer::emitDiagnostic(FullSourceLoc Loc,
FixItHints = MergedFixits;
}
- for (ArrayRef<FixItHint>::const_iterator I = FixItHints.begin(),
- E = FixItHints.end();
- I != E; ++I)
- if (I->RemoveRange.isValid())
- MutableRanges.push_back(I->RemoveRange);
+ for (const auto &Hint : FixItHints)
+ if (Hint.RemoveRange.isValid())
+ MutableRanges.push_back(Hint.RemoveRange);
FullSourceLoc UnexpandedLoc = Loc;
@@ -134,7 +141,6 @@ void DiagnosticRenderer::emitDiagnostic(FullSourceLoc Loc,
endDiagnostic(D, Level);
}
-
void DiagnosticRenderer::emitStoredDiagnostic(StoredDiagnostic &Diag) {
emitDiagnostic(Diag.getLocation(), Diag.getLevel(), Diag.getMessage(),
Diag.getRanges(), Diag.getFixIts(),
@@ -146,7 +152,7 @@ void DiagnosticRenderer::emitBasicNote(StringRef Message) {
Message, None, DiagOrStoredDiag());
}
-/// \brief Prints an include stack when appropriate for a particular
+/// Prints an include stack when appropriate for a particular
/// diagnostic level and location.
///
/// This routine handles all the logic of suppressing particular include
@@ -180,7 +186,7 @@ void DiagnosticRenderer::emitIncludeStack(FullSourceLoc Loc, PresumedLoc PLoc,
}
}
-/// \brief Helper to recursivly walk up the include stack and print each layer
+/// Helper to recursively walk up the include stack and print each layer
/// on the way back down.
void DiagnosticRenderer::emitIncludeStackRecursively(FullSourceLoc Loc) {
if (Loc.isInvalid()) {
@@ -210,7 +216,7 @@ void DiagnosticRenderer::emitIncludeStackRecursively(FullSourceLoc Loc) {
emitIncludeLocation(Loc, PLoc);
}
-/// \brief Emit the module import stack associated with the current location.
+/// Emit the module import stack associated with the current location.
void DiagnosticRenderer::emitImportStack(FullSourceLoc Loc) {
if (Loc.isInvalid()) {
emitModuleBuildStack(Loc.getManager());
@@ -221,7 +227,7 @@ void DiagnosticRenderer::emitImportStack(FullSourceLoc Loc) {
emitImportStackRecursively(NextImportLoc.first, NextImportLoc.second);
}
-/// \brief Helper to recursivly walk up the import stack and print each layer
+/// Helper to recursively walk up the import stack and print each layer
/// on the way back down.
void DiagnosticRenderer::emitImportStackRecursively(FullSourceLoc Loc,
StringRef ModuleName) {
@@ -239,14 +245,14 @@ void DiagnosticRenderer::emitImportStackRecursively(FullSourceLoc Loc,
emitImportLocation(Loc, PLoc, ModuleName);
}
-/// \brief Emit the module build stack, for cases where a module is (re-)built
+/// Emit the module build stack, for cases where a module is (re-)built
/// on demand.
void DiagnosticRenderer::emitModuleBuildStack(const SourceManager &SM) {
ModuleBuildStack Stack = SM.getModuleBuildStack();
- for (unsigned I = 0, N = Stack.size(); I != N; ++I) {
- emitBuildingModuleLocation(Stack[I].second, Stack[I].second.getPresumedLoc(
- DiagOpts->ShowPresumedLoc),
- Stack[I].first);
+ for (const auto &I : Stack) {
+ emitBuildingModuleLocation(I.second, I.second.getPresumedLoc(
+ DiagOpts->ShowPresumedLoc),
+ I.first);
}
}
@@ -256,41 +262,54 @@ static SourceLocation
retrieveMacroLocation(SourceLocation Loc, FileID MacroFileID,
FileID CaretFileID,
const SmallVectorImpl<FileID> &CommonArgExpansions,
- bool IsBegin, const SourceManager *SM) {
+ bool IsBegin, const SourceManager *SM,
+ bool &IsTokenRange) {
assert(SM->getFileID(Loc) == MacroFileID);
if (MacroFileID == CaretFileID)
return Loc;
if (!Loc.isMacroID())
- return SourceLocation();
+ return {};
- SourceLocation MacroLocation, MacroArgLocation;
+ CharSourceRange MacroRange, MacroArgRange;
if (SM->isMacroArgExpansion(Loc)) {
// Only look at the immediate spelling location of this macro argument if
// the other location in the source range is also present in that expansion.
if (std::binary_search(CommonArgExpansions.begin(),
CommonArgExpansions.end(), MacroFileID))
- MacroLocation = SM->getImmediateSpellingLoc(Loc);
- MacroArgLocation = IsBegin ? SM->getImmediateExpansionRange(Loc).first
- : SM->getImmediateExpansionRange(Loc).second;
+ MacroRange =
+ CharSourceRange(SM->getImmediateSpellingLoc(Loc), IsTokenRange);
+ MacroArgRange = SM->getImmediateExpansionRange(Loc);
} else {
- MacroLocation = IsBegin ? SM->getImmediateExpansionRange(Loc).first
- : SM->getImmediateExpansionRange(Loc).second;
- MacroArgLocation = SM->getImmediateSpellingLoc(Loc);
+ MacroRange = SM->getImmediateExpansionRange(Loc);
+ MacroArgRange =
+ CharSourceRange(SM->getImmediateSpellingLoc(Loc), IsTokenRange);
}
+ SourceLocation MacroLocation =
+ IsBegin ? MacroRange.getBegin() : MacroRange.getEnd();
if (MacroLocation.isValid()) {
MacroFileID = SM->getFileID(MacroLocation);
+ bool TokenRange = IsBegin ? IsTokenRange : MacroRange.isTokenRange();
MacroLocation =
retrieveMacroLocation(MacroLocation, MacroFileID, CaretFileID,
- CommonArgExpansions, IsBegin, SM);
- if (MacroLocation.isValid())
+ CommonArgExpansions, IsBegin, SM, TokenRange);
+ if (MacroLocation.isValid()) {
+ IsTokenRange = TokenRange;
return MacroLocation;
+ }
}
+ // If we moved the end of the range to an expansion location, we now have
+ // a range of the same kind as the expansion range.
+ if (!IsBegin)
+ IsTokenRange = MacroArgRange.isTokenRange();
+
+ SourceLocation MacroArgLocation =
+ IsBegin ? MacroArgRange.getBegin() : MacroArgRange.getEnd();
MacroFileID = SM->getFileID(MacroArgLocation);
return retrieveMacroLocation(MacroArgLocation, MacroFileID, CaretFileID,
- CommonArgExpansions, IsBegin, SM);
+ CommonArgExpansions, IsBegin, SM, IsTokenRange);
}
/// Walk up the chain of macro expansions and collect the FileIDs identifying the
@@ -304,7 +323,7 @@ static void getMacroArgExpansionFileIDs(SourceLocation Loc,
Loc = SM->getImmediateSpellingLoc(Loc);
} else {
auto ExpRange = SM->getImmediateExpansionRange(Loc);
- Loc = IsBegin ? ExpRange.first : ExpRange.second;
+ Loc = IsBegin ? ExpRange.getBegin() : ExpRange.getEnd();
}
}
}
@@ -318,8 +337,8 @@ static void computeCommonMacroArgExpansionFileIDs(
SmallVector<FileID, 4> EndArgExpansions;
getMacroArgExpansionFileIDs(Begin, BeginArgExpansions, /*IsBegin=*/true, SM);
getMacroArgExpansionFileIDs(End, EndArgExpansions, /*IsBegin=*/false, SM);
- std::sort(BeginArgExpansions.begin(), BeginArgExpansions.end());
- std::sort(EndArgExpansions.begin(), EndArgExpansions.end());
+ llvm::sort(BeginArgExpansions.begin(), BeginArgExpansions.end());
+ llvm::sort(EndArgExpansions.begin(), EndArgExpansions.end());
std::set_intersection(BeginArgExpansions.begin(), BeginArgExpansions.end(),
EndArgExpansions.begin(), EndArgExpansions.end(),
std::back_inserter(CommonArgExpansions));
@@ -342,11 +361,12 @@ mapDiagnosticRanges(FullSourceLoc CaretLoc, ArrayRef<CharSourceRange> Ranges,
const SourceManager *SM = &CaretLoc.getManager();
- for (auto I = Ranges.begin(), E = Ranges.end(); I != E; ++I) {
- if (I->isInvalid()) continue;
+ for (const auto &Range : Ranges) {
+ if (Range.isInvalid())
+ continue;
- SourceLocation Begin = I->getBegin(), End = I->getEnd();
- bool IsTokenRange = I->isTokenRange();
+ SourceLocation Begin = Range.getBegin(), End = Range.getEnd();
+ bool IsTokenRange = Range.isTokenRange();
FileID BeginFileID = SM->getFileID(Begin);
FileID EndFileID = SM->getFileID(End);
@@ -357,14 +377,16 @@ mapDiagnosticRanges(FullSourceLoc CaretLoc, ArrayRef<CharSourceRange> Ranges,
llvm::SmallDenseMap<FileID, SourceLocation> BeginLocsMap;
while (Begin.isMacroID() && BeginFileID != EndFileID) {
BeginLocsMap[BeginFileID] = Begin;
- Begin = SM->getImmediateExpansionRange(Begin).first;
+ Begin = SM->getImmediateExpansionRange(Begin).getBegin();
BeginFileID = SM->getFileID(Begin);
}
// Then, crawl the expansion chain for the end of the range.
if (BeginFileID != EndFileID) {
while (End.isMacroID() && !BeginLocsMap.count(EndFileID)) {
- End = SM->getImmediateExpansionRange(End).second;
+ auto Exp = SM->getImmediateExpansionRange(End);
+ IsTokenRange = Exp.isTokenRange();
+ End = Exp.getEnd();
EndFileID = SM->getFileID(End);
}
if (End.isMacroID()) {
@@ -377,9 +399,11 @@ mapDiagnosticRanges(FullSourceLoc CaretLoc, ArrayRef<CharSourceRange> Ranges,
SmallVector<FileID, 4> CommonArgExpansions;
computeCommonMacroArgExpansionFileIDs(Begin, End, SM, CommonArgExpansions);
Begin = retrieveMacroLocation(Begin, BeginFileID, CaretLocFileID,
- CommonArgExpansions, /*IsBegin=*/true, SM);
+ CommonArgExpansions, /*IsBegin=*/true, SM,
+ IsTokenRange);
End = retrieveMacroLocation(End, BeginFileID, CaretLocFileID,
- CommonArgExpansions, /*IsBegin=*/false, SM);
+ CommonArgExpansions, /*IsBegin=*/false, SM,
+ IsTokenRange);
if (Begin.isInvalid() || End.isInvalid()) continue;
// Return the spelling location of the beginning and end of the range.
@@ -400,7 +424,7 @@ void DiagnosticRenderer::emitCaret(FullSourceLoc Loc,
emitCodeContext(Loc, Level, SpellingRanges, Hints);
}
-/// \brief A helper function for emitMacroExpansion to print the
+/// A helper function for emitMacroExpansion to print the
/// macro expansion message
void DiagnosticRenderer::emitSingleMacroExpansion(
FullSourceLoc Loc, DiagnosticsEngine::Level Level,
@@ -441,7 +465,7 @@ static bool checkLocForMacroArgExpansion(SourceLocation Loc,
}
/// Check if all the locations in the range have the same macro argument
-/// expansion, and that that expansion starts with ArgumentLoc.
+/// expansion, and that the expansion starts with ArgumentLoc.
static bool checkRangeForMacroArgExpansion(CharSourceRange Range,
const SourceManager &SM,
SourceLocation ArgumentLoc) {
@@ -466,8 +490,9 @@ static bool checkRangesForMacroArgExpansion(FullSourceLoc Loc,
/// Count all valid ranges.
unsigned ValidCount = 0;
- for (auto I : Ranges)
- if (I.isValid()) ValidCount++;
+ for (const auto &Range : Ranges)
+ if (Range.isValid())
+ ValidCount++;
if (ValidCount > SpellingRanges.size())
return false;
@@ -480,15 +505,14 @@ static bool checkRangesForMacroArgExpansion(FullSourceLoc Loc,
if (!Loc.isMacroArgExpansion(&ArgumentLoc))
return false;
- for (auto I = SpellingRanges.begin(), E = SpellingRanges.end(); I != E; ++I) {
- if (!checkRangeForMacroArgExpansion(*I, Loc.getManager(), ArgumentLoc))
+ for (const auto &Range : SpellingRanges)
+ if (!checkRangeForMacroArgExpansion(Range, Loc.getManager(), ArgumentLoc))
return false;
- }
return true;
}
-/// \brief Recursively emit notes for each macro expansion and caret
+/// Recursively emit notes for each macro expansion and caret
/// diagnostics where appropriate.
///
/// Walks up the macro expansion stack printing expansion notes, the code
@@ -504,29 +528,31 @@ void DiagnosticRenderer::emitMacroExpansions(FullSourceLoc Loc,
ArrayRef<CharSourceRange> Ranges,
ArrayRef<FixItHint> Hints) {
assert(Loc.isValid() && "must have a valid source location here");
+ const SourceManager &SM = Loc.getManager();
+ SourceLocation L = Loc;
// Produce a stack of macro backtraces.
- SmallVector<FullSourceLoc, 8> LocationStack;
+ SmallVector<SourceLocation, 8> LocationStack;
unsigned IgnoredEnd = 0;
- while (Loc.isMacroID()) {
+ while (L.isMacroID()) {
// If this is the expansion of a macro argument, point the caret at the
// use of the argument in the definition of the macro, not the expansion.
- if (Loc.isMacroArgExpansion())
- LocationStack.push_back(Loc.getImmediateExpansionRange().first);
+ if (SM.isMacroArgExpansion(L))
+ LocationStack.push_back(SM.getImmediateExpansionRange(L).getBegin());
else
- LocationStack.push_back(Loc);
+ LocationStack.push_back(L);
- if (checkRangesForMacroArgExpansion(Loc, Ranges))
+ if (checkRangesForMacroArgExpansion(FullSourceLoc(L, SM), Ranges))
IgnoredEnd = LocationStack.size();
- Loc = Loc.getImmediateMacroCallerLoc();
+ L = SM.getImmediateMacroCallerLoc(L);
// Once the location no longer points into a macro, try stepping through
// the last found location. This sometimes produces additional useful
// backtraces.
- if (Loc.isFileID())
- Loc = LocationStack.back().getImmediateMacroCallerLoc();
- assert(Loc.isValid() && "must have a valid source location here");
+ if (L.isFileID())
+ L = SM.getImmediateMacroCallerLoc(LocationStack.back());
+ assert(L.isValid() && "must have a valid source location here");
}
LocationStack.erase(LocationStack.begin(),
@@ -537,7 +563,7 @@ void DiagnosticRenderer::emitMacroExpansions(FullSourceLoc Loc,
if (MacroDepth <= MacroLimit || MacroLimit == 0) {
for (auto I = LocationStack.rbegin(), E = LocationStack.rend();
I != E; ++I)
- emitSingleMacroExpansion(*I, Level, Ranges);
+ emitSingleMacroExpansion(FullSourceLoc(*I, SM), Level, Ranges);
return;
}
@@ -547,7 +573,7 @@ void DiagnosticRenderer::emitMacroExpansions(FullSourceLoc Loc,
for (auto I = LocationStack.rbegin(),
E = LocationStack.rbegin() + MacroStartMessages;
I != E; ++I)
- emitSingleMacroExpansion(*I, Level, Ranges);
+ emitSingleMacroExpansion(FullSourceLoc(*I, SM), Level, Ranges);
SmallString<200> MessageStorage;
llvm::raw_svector_ostream Message(MessageStorage);
@@ -559,10 +585,10 @@ void DiagnosticRenderer::emitMacroExpansions(FullSourceLoc Loc,
for (auto I = LocationStack.rend() - MacroEndMessages,
E = LocationStack.rend();
I != E; ++I)
- emitSingleMacroExpansion(*I, Level, Ranges);
+ emitSingleMacroExpansion(FullSourceLoc(*I, SM), Level, Ranges);
}
-DiagnosticNoteRenderer::~DiagnosticNoteRenderer() {}
+DiagnosticNoteRenderer::~DiagnosticNoteRenderer() = default;
void DiagnosticNoteRenderer::emitIncludeLocation(FullSourceLoc Loc,
PresumedLoc PLoc) {
diff --git a/lib/Frontend/FrontendAction.cpp b/lib/Frontend/FrontendAction.cpp
index 12226b231417..a5929424e52a 100644
--- a/lib/Frontend/FrontendAction.cpp
+++ b/lib/Frontend/FrontendAction.cpp
@@ -79,7 +79,7 @@ public:
}
};
-/// \brief Dumps deserialized declarations.
+/// Dumps deserialized declarations.
class DeserializedDeclsDumper : public DelegatingDeserializationListener {
public:
explicit DeserializedDeclsDumper(ASTDeserializationListener *Previous,
@@ -88,15 +88,17 @@ public:
void DeclRead(serialization::DeclID ID, const Decl *D) override {
llvm::outs() << "PCH DECL: " << D->getDeclKindName();
- if (const NamedDecl *ND = dyn_cast<NamedDecl>(D))
- llvm::outs() << " - " << *ND;
+ if (const NamedDecl *ND = dyn_cast<NamedDecl>(D)) {
+ llvm::outs() << " - ";
+ ND->printQualifiedName(llvm::outs());
+ }
llvm::outs() << "\n";
DelegatingDeserializationListener::DeclRead(ID, D);
}
};
-/// \brief Checks deserialized declarations and emits error if a name
+/// Checks deserialized declarations and emits error if a name
/// matches one given in command-line using -error-on-deserialized-decl.
class DeserializedDeclsChecker : public DelegatingDeserializationListener {
ASTContext &Ctx;
@@ -153,6 +155,10 @@ FrontendAction::CreateWrappedASTConsumer(CompilerInstance &CI,
if (FrontendPluginRegistry::begin() == FrontendPluginRegistry::end())
return Consumer;
+ // If this is a code completion run, avoid invoking the plugin consumers
+ if (CI.hasCodeCompletionConsumer())
+ return Consumer;
+
// Collect the list of plugins that go before the main action (in Consumers)
// or after it (in AfterConsumers)
std::vector<std::unique_ptr<ASTConsumer>> Consumers;
@@ -282,7 +288,7 @@ static void addHeaderInclude(StringRef HeaderName,
Includes += "}\n";
}
-/// \brief Collect the set of header includes needed to construct the given
+/// Collect the set of header includes needed to construct the given
/// module and update the TopHeaders file set of the module.
///
/// \param Module The module we're collecting includes from.
@@ -760,6 +766,22 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
if (!BeginSourceFileAction(CI))
goto failure;
+ // If we were asked to load any module map files, do so now.
+ for (const auto &Filename : CI.getFrontendOpts().ModuleMapFiles) {
+ if (auto *File = CI.getFileManager().getFile(Filename))
+ CI.getPreprocessor().getHeaderSearchInfo().loadModuleMapFile(
+ File, /*IsSystem*/false);
+ else
+ CI.getDiagnostics().Report(diag::err_module_map_not_found) << Filename;
+ }
+
+ // Add a module declaration scope so that modules from -fmodule-map-file
+ // arguments may shadow modules found implicitly in search paths.
+ CI.getPreprocessor()
+ .getHeaderSearchInfo()
+ .getModuleMap()
+ .finishModuleDeclarationScope();
+
// Create the AST context and consumer unless this is a preprocessor only
// action.
if (!usesPreprocessorOnly()) {
@@ -849,15 +871,6 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
"doesn't support modules");
}
- // If we were asked to load any module map files, do so now.
- for (const auto &Filename : CI.getFrontendOpts().ModuleMapFiles) {
- if (auto *File = CI.getFileManager().getFile(Filename))
- CI.getPreprocessor().getHeaderSearchInfo().loadModuleMapFile(
- File, /*IsSystem*/false);
- else
- CI.getDiagnostics().Report(diag::err_module_map_not_found) << Filename;
- }
-
// If we were asked to load any module files, do so now.
for (const auto &ModuleFile : CI.getFrontendOpts().ModuleFiles)
if (!CI.loadModuleFile(ModuleFile))
diff --git a/lib/Frontend/FrontendActions.cpp b/lib/Frontend/FrontendActions.cpp
index ffa5b410d2d8..9344e673c7ac 100644
--- a/lib/Frontend/FrontendActions.cpp
+++ b/lib/Frontend/FrontendActions.cpp
@@ -18,17 +18,36 @@
#include "clang/Lex/HeaderSearch.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Lex/PreprocessorOptions.h"
+#include "clang/Sema/TemplateInstCallback.h"
#include "clang/Serialization/ASTReader.h"
#include "clang/Serialization/ASTWriter.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/YAMLTraits.h"
#include <memory>
#include <system_error>
using namespace clang;
+namespace {
+CodeCompleteConsumer *GetCodeCompletionConsumer(CompilerInstance &CI) {
+ return CI.hasCodeCompletionConsumer() ? &CI.getCodeCompletionConsumer()
+ : nullptr;
+}
+
+void EnsureSemaIsCreated(CompilerInstance &CI, FrontendAction &Action) {
+ if (Action.hasCodeCompletionSupport() &&
+ !CI.getFrontendOpts().CodeCompletionAt.FileName.empty())
+ CI.createCodeCompletionConsumer();
+
+ if (!CI.hasSema())
+ CI.createSema(Action.getTranslationUnitKind(),
+ GetCodeCompletionConsumer(CI));
+}
+} // namespace
+
//===----------------------------------------------------------------------===//
// Custom Actions
//===----------------------------------------------------------------------===//
@@ -55,7 +74,8 @@ ASTPrintAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
std::unique_ptr<ASTConsumer>
ASTDumpAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
- return CreateASTDumper(CI.getFrontendOpts().ASTDumpFilter,
+ return CreateASTDumper(nullptr /*Dump to stdout.*/,
+ CI.getFrontendOpts().ASTDumpFilter,
CI.getFrontendOpts().ASTDumpDecls,
CI.getFrontendOpts().ASTDumpAll,
CI.getFrontendOpts().ASTDumpLookups);
@@ -92,14 +112,14 @@ GeneratePCHAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
if (!CI.getFrontendOpts().RelocatablePCH)
Sysroot.clear();
+ const auto &FrontendOpts = CI.getFrontendOpts();
auto Buffer = std::make_shared<PCHBuffer>();
std::vector<std::unique_ptr<ASTConsumer>> Consumers;
Consumers.push_back(llvm::make_unique<PCHGenerator>(
CI.getPreprocessor(), OutputFile, Sysroot,
- Buffer, CI.getFrontendOpts().ModuleFileExtensions,
- /*AllowASTWithErrors*/CI.getPreprocessorOpts().AllowPCHWithCompilerErrors,
- /*IncludeTimestamps*/
- +CI.getFrontendOpts().IncludeTimestamps));
+ Buffer, FrontendOpts.ModuleFileExtensions,
+ CI.getPreprocessorOpts().AllowPCHWithCompilerErrors,
+ FrontendOpts.IncludeTimestamps));
Consumers.push_back(CI.getPCHContainerWriter().CreatePCHContainerGenerator(
CI, InFile, OutputFile, std::move(OS), Buffer));
@@ -262,7 +282,141 @@ void VerifyPCHAction::ExecuteAction() {
}
namespace {
- /// \brief AST reader listener that dumps module information for a module
+struct TemplightEntry {
+ std::string Name;
+ std::string Kind;
+ std::string Event;
+ std::string DefinitionLocation;
+ std::string PointOfInstantiation;
+};
+} // namespace
+
+namespace llvm {
+namespace yaml {
+template <> struct MappingTraits<TemplightEntry> {
+ static void mapping(IO &io, TemplightEntry &fields) {
+ io.mapRequired("name", fields.Name);
+ io.mapRequired("kind", fields.Kind);
+ io.mapRequired("event", fields.Event);
+ io.mapRequired("orig", fields.DefinitionLocation);
+ io.mapRequired("poi", fields.PointOfInstantiation);
+ }
+};
+} // namespace yaml
+} // namespace llvm
+
+namespace {
+class DefaultTemplateInstCallback : public TemplateInstantiationCallback {
+ using CodeSynthesisContext = Sema::CodeSynthesisContext;
+
+public:
+ void initialize(const Sema &) override {}
+
+ void finalize(const Sema &) override {}
+
+ void atTemplateBegin(const Sema &TheSema,
+ const CodeSynthesisContext &Inst) override {
+ displayTemplightEntry<true>(llvm::outs(), TheSema, Inst);
+ }
+
+ void atTemplateEnd(const Sema &TheSema,
+ const CodeSynthesisContext &Inst) override {
+ displayTemplightEntry<false>(llvm::outs(), TheSema, Inst);
+ }
+
+private:
+ static std::string toString(CodeSynthesisContext::SynthesisKind Kind) {
+ switch (Kind) {
+ case CodeSynthesisContext::TemplateInstantiation:
+ return "TemplateInstantiation";
+ case CodeSynthesisContext::DefaultTemplateArgumentInstantiation:
+ return "DefaultTemplateArgumentInstantiation";
+ case CodeSynthesisContext::DefaultFunctionArgumentInstantiation:
+ return "DefaultFunctionArgumentInstantiation";
+ case CodeSynthesisContext::ExplicitTemplateArgumentSubstitution:
+ return "ExplicitTemplateArgumentSubstitution";
+ case CodeSynthesisContext::DeducedTemplateArgumentSubstitution:
+ return "DeducedTemplateArgumentSubstitution";
+ case CodeSynthesisContext::PriorTemplateArgumentSubstitution:
+ return "PriorTemplateArgumentSubstitution";
+ case CodeSynthesisContext::DefaultTemplateArgumentChecking:
+ return "DefaultTemplateArgumentChecking";
+ case CodeSynthesisContext::ExceptionSpecInstantiation:
+ return "ExceptionSpecInstantiation";
+ case CodeSynthesisContext::DeclaringSpecialMember:
+ return "DeclaringSpecialMember";
+ case CodeSynthesisContext::DefiningSynthesizedFunction:
+ return "DefiningSynthesizedFunction";
+ case CodeSynthesisContext::Memoization:
+ return "Memoization";
+ }
+ return "";
+ }
+
+ template <bool BeginInstantiation>
+ static void displayTemplightEntry(llvm::raw_ostream &Out, const Sema &TheSema,
+ const CodeSynthesisContext &Inst) {
+ std::string YAML;
+ {
+ llvm::raw_string_ostream OS(YAML);
+ llvm::yaml::Output YO(OS);
+ TemplightEntry Entry =
+ getTemplightEntry<BeginInstantiation>(TheSema, Inst);
+ llvm::yaml::EmptyContext Context;
+ llvm::yaml::yamlize(YO, Entry, true, Context);
+ }
+ Out << "---" << YAML << "\n";
+ }
+
+ template <bool BeginInstantiation>
+ static TemplightEntry getTemplightEntry(const Sema &TheSema,
+ const CodeSynthesisContext &Inst) {
+ TemplightEntry Entry;
+ Entry.Kind = toString(Inst.Kind);
+ Entry.Event = BeginInstantiation ? "Begin" : "End";
+ if (auto *NamedTemplate = dyn_cast_or_null<NamedDecl>(Inst.Entity)) {
+ llvm::raw_string_ostream OS(Entry.Name);
+ NamedTemplate->getNameForDiagnostic(OS, TheSema.getLangOpts(), true);
+ const PresumedLoc DefLoc =
+ TheSema.getSourceManager().getPresumedLoc(Inst.Entity->getLocation());
+ if(!DefLoc.isInvalid())
+ Entry.DefinitionLocation = std::string(DefLoc.getFilename()) + ":" +
+ std::to_string(DefLoc.getLine()) + ":" +
+ std::to_string(DefLoc.getColumn());
+ }
+ const PresumedLoc PoiLoc =
+ TheSema.getSourceManager().getPresumedLoc(Inst.PointOfInstantiation);
+ if (!PoiLoc.isInvalid()) {
+ Entry.PointOfInstantiation = std::string(PoiLoc.getFilename()) + ":" +
+ std::to_string(PoiLoc.getLine()) + ":" +
+ std::to_string(PoiLoc.getColumn());
+ }
+ return Entry;
+ }
+};
+} // namespace
+
+std::unique_ptr<ASTConsumer>
+TemplightDumpAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
+ return llvm::make_unique<ASTConsumer>();
+}
+
+void TemplightDumpAction::ExecuteAction() {
+ CompilerInstance &CI = getCompilerInstance();
+
+ // This part is normally done by ASTFrontEndAction, but needs to happen
+ // before Templight observers can be created
+ // FIXME: Move the truncation aspect of this into Sema, we delayed this till
+ // here so the source manager would be initialized.
+ EnsureSemaIsCreated(CI, *this);
+
+ CI.getSema().TemplateInstCallbacks.push_back(
+ llvm::make_unique<DefaultTemplateInstCallback>());
+ ASTFrontendAction::ExecuteAction();
+}
+
+namespace {
+ /// AST reader listener that dumps module information for a module
/// file.
class DumpModuleInfoListener : public ASTReaderListener {
llvm::raw_ostream &Out;
@@ -406,6 +560,45 @@ namespace {
Out << "\n";
}
+
+ /// Tells the \c ASTReaderListener that we want to receive the
+ /// input files of the AST file via \c visitInputFile.
+ bool needsInputFileVisitation() override { return true; }
+
+ /// Tells the \c ASTReaderListener that we want to receive the
+ /// input files of the AST file via \c visitInputFile.
+ bool needsSystemInputFileVisitation() override { return true; }
+
+ /// Indicates that the AST file contains particular input file.
+ ///
+ /// \returns true to continue receiving the next input file, false to stop.
+ bool visitInputFile(StringRef Filename, bool isSystem,
+ bool isOverridden, bool isExplicitModule) override {
+
+ Out.indent(2) << "Input file: " << Filename;
+
+ if (isSystem || isOverridden || isExplicitModule) {
+ Out << " [";
+ if (isSystem) {
+ Out << "System";
+ if (isOverridden || isExplicitModule)
+ Out << ", ";
+ }
+ if (isOverridden) {
+ Out << "Overridden";
+ if (isExplicitModule)
+ Out << ", ";
+ }
+ if (isExplicitModule)
+ Out << "ExplicitModule";
+
+ Out << "]";
+ }
+
+ Out << "\n";
+
+ return true;
+ }
#undef DUMP_BOOLEAN
};
}
@@ -579,6 +772,7 @@ void PrintPreambleAction::ExecuteAction() {
case InputKind::ObjCXX:
case InputKind::OpenCL:
case InputKind::CUDA:
+ case InputKind::HIP:
break;
case InputKind::Unknown:
@@ -601,3 +795,51 @@ void PrintPreambleAction::ExecuteAction() {
llvm::outs().write((*Buffer)->getBufferStart(), Preamble);
}
}
+
+void DumpCompilerOptionsAction::ExecuteAction() {
+ CompilerInstance &CI = getCompilerInstance();
+ std::unique_ptr<raw_ostream> OSP =
+ CI.createDefaultOutputFile(false, getCurrentFile());
+ if (!OSP)
+ return;
+
+ raw_ostream &OS = *OSP;
+ const Preprocessor &PP = CI.getPreprocessor();
+ const LangOptions &LangOpts = PP.getLangOpts();
+
+ // FIXME: Rather than manually format the JSON (which is awkward due to
+ // needing to remove trailing commas), this should make use of a JSON library.
+ // FIXME: Instead of printing enums as an integral value and specifying the
+ // type as a separate field, use introspection to print the enumerator.
+
+ OS << "{\n";
+ OS << "\n\"features\" : [\n";
+ {
+ llvm::SmallString<128> Str;
+#define FEATURE(Name, Predicate) \
+ ("\t{\"" #Name "\" : " + llvm::Twine(Predicate ? "true" : "false") + "},\n") \
+ .toVector(Str);
+#include "clang/Basic/Features.def"
+#undef FEATURE
+ // Remove the newline and comma from the last entry to ensure this remains
+ // valid JSON.
+ OS << Str.substr(0, Str.size() - 2);
+ }
+ OS << "\n],\n";
+
+ OS << "\n\"extensions\" : [\n";
+ {
+ llvm::SmallString<128> Str;
+#define EXTENSION(Name, Predicate) \
+ ("\t{\"" #Name "\" : " + llvm::Twine(Predicate ? "true" : "false") + "},\n") \
+ .toVector(Str);
+#include "clang/Basic/Features.def"
+#undef EXTENSION
+ // Remove the newline and comma from the last entry to ensure this remains
+ // valid JSON.
+ OS << Str.substr(0, Str.size() - 2);
+ }
+ OS << "\n]\n";
+
+ OS << "}";
+}
diff --git a/lib/Frontend/FrontendOptions.cpp b/lib/Frontend/FrontendOptions.cpp
index dca434588fb1..0744d447e816 100644
--- a/lib/Frontend/FrontendOptions.cpp
+++ b/lib/Frontend/FrontendOptions.cpp
@@ -1,4 +1,4 @@
-//===--- FrontendOptions.cpp ----------------------------------------------===//
+//===- FrontendOptions.cpp ------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -9,6 +9,7 @@
#include "clang/Frontend/FrontendOptions.h"
#include "llvm/ADT/StringSwitch.h"
+
using namespace clang;
InputKind FrontendOptions::getInputKindForExtension(StringRef Extension) {
diff --git a/lib/Frontend/FrontendTiming.cpp b/lib/Frontend/FrontendTiming.cpp
new file mode 100644
index 000000000000..9ea7347e7797
--- /dev/null
+++ b/lib/Frontend/FrontendTiming.cpp
@@ -0,0 +1,20 @@
+//===- FronendTiming.cpp - Implements Frontend timing utils --------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file keps implementation of frontend timing utils.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/Utils.h"
+
+namespace clang {
+
+bool FrontendTimesIsEnabled = false;
+
+}
diff --git a/lib/Frontend/HeaderIncludeGen.cpp b/lib/Frontend/HeaderIncludeGen.cpp
index 5bff4ecd0b46..9dc107c9d546 100644
--- a/lib/Frontend/HeaderIncludeGen.cpp
+++ b/lib/Frontend/HeaderIncludeGen.cpp
@@ -80,9 +80,23 @@ void clang::AttachHeaderIncludeGen(Preprocessor &PP,
const DependencyOutputOptions &DepOpts,
bool ShowAllHeaders, StringRef OutputPath,
bool ShowDepth, bool MSStyle) {
- raw_ostream *OutputFile = MSStyle ? &llvm::outs() : &llvm::errs();
+ raw_ostream *OutputFile = &llvm::errs();
bool OwnsOutputFile = false;
+ // Choose output stream, when printing in cl.exe /showIncludes style.
+ if (MSStyle) {
+ switch (DepOpts.ShowIncludesDest) {
+ default:
+ llvm_unreachable("Invalid destination for /showIncludes output!");
+ case ShowIncludesDestination::Stderr:
+ OutputFile = &llvm::errs();
+ break;
+ case ShowIncludesDestination::Stdout:
+ OutputFile = &llvm::outs();
+ break;
+ }
+ }
+
// Open the output file, if used.
if (!OutputPath.empty()) {
std::error_code EC;
diff --git a/lib/Frontend/InitHeaderSearch.cpp b/lib/Frontend/InitHeaderSearch.cpp
index 8c6faced76ac..8a70404629d3 100644
--- a/lib/Frontend/InitHeaderSearch.cpp
+++ b/lib/Frontend/InitHeaderSearch.cpp
@@ -14,6 +14,7 @@
#include "clang/Basic/FileManager.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Config/config.h" // C_INCLUDE_DIRS
+#include "clang/Frontend/FrontendDiagnostic.h"
#include "clang/Frontend/Utils.h"
#include "clang/Lex/HeaderMap.h"
#include "clang/Lex/HeaderSearch.h"
@@ -55,11 +56,13 @@ public:
/// AddPath - Add the specified path to the specified group list, prefixing
/// the sysroot if used.
- void AddPath(const Twine &Path, IncludeDirGroup Group, bool isFramework);
+ /// Returns true if the path exists, false if it was ignored.
+ bool AddPath(const Twine &Path, IncludeDirGroup Group, bool isFramework);
/// AddUnmappedPath - Add the specified path to the specified group list,
/// without performing any sysroot remapping.
- void AddUnmappedPath(const Twine &Path, IncludeDirGroup Group,
+ /// Returns true if the path exists, false if it was ignored.
+ bool AddUnmappedPath(const Twine &Path, IncludeDirGroup Group,
bool isFramework);
/// AddSystemHeaderPrefix - Add the specified prefix to the system header
@@ -70,10 +73,9 @@ public:
/// AddGnuCPlusPlusIncludePaths - Add the necessary paths to support a gnu
/// libstdc++.
- void AddGnuCPlusPlusIncludePaths(StringRef Base,
- StringRef ArchDir,
- StringRef Dir32,
- StringRef Dir64,
+ /// Returns true if the \p Base path was found, false if it does not exist.
+ bool AddGnuCPlusPlusIncludePaths(StringRef Base, StringRef ArchDir,
+ StringRef Dir32, StringRef Dir64,
const llvm::Triple &triple);
/// AddMinGWCPlusPlusIncludePaths - Add the necessary paths to support a MinGW
@@ -88,7 +90,8 @@ public:
// AddDefaultCPlusPlusIncludePaths - Add paths that should be searched when
// compiling c++.
- void AddDefaultCPlusPlusIncludePaths(const llvm::Triple &triple,
+ void AddDefaultCPlusPlusIncludePaths(const LangOptions &LangOpts,
+ const llvm::Triple &triple,
const HeaderSearchOptions &HSOpts);
/// AddDefaultSystemIncludePaths - Adds the default system include paths so
@@ -105,14 +108,14 @@ public:
} // end anonymous namespace.
static bool CanPrefixSysroot(StringRef Path) {
-#if defined(LLVM_ON_WIN32)
+#if defined(_WIN32)
return !Path.empty() && llvm::sys::path::is_separator(Path[0]);
#else
return llvm::sys::path::is_absolute(Path);
#endif
}
-void InitHeaderSearch::AddPath(const Twine &Path, IncludeDirGroup Group,
+bool InitHeaderSearch::AddPath(const Twine &Path, IncludeDirGroup Group,
bool isFramework) {
// Add the path with sysroot prepended, if desired and this is a system header
// group.
@@ -120,15 +123,14 @@ void InitHeaderSearch::AddPath(const Twine &Path, IncludeDirGroup Group,
SmallString<256> MappedPathStorage;
StringRef MappedPathStr = Path.toStringRef(MappedPathStorage);
if (CanPrefixSysroot(MappedPathStr)) {
- AddUnmappedPath(IncludeSysroot + Path, Group, isFramework);
- return;
+ return AddUnmappedPath(IncludeSysroot + Path, Group, isFramework);
}
}
- AddUnmappedPath(Path, Group, isFramework);
+ return AddUnmappedPath(Path, Group, isFramework);
}
-void InitHeaderSearch::AddUnmappedPath(const Twine &Path, IncludeDirGroup Group,
+bool InitHeaderSearch::AddUnmappedPath(const Twine &Path, IncludeDirGroup Group,
bool isFramework) {
assert(!Path.isTriviallyEmpty() && "can't handle empty path here");
@@ -150,7 +152,7 @@ void InitHeaderSearch::AddUnmappedPath(const Twine &Path, IncludeDirGroup Group,
if (const DirectoryEntry *DE = FM.getDirectory(MappedPathStr)) {
IncludePath.push_back(
std::make_pair(Group, DirectoryLookup(DE, Type, isFramework)));
- return;
+ return true;
}
// Check to see if this is an apple-style headermap (which are not allowed to
@@ -162,7 +164,7 @@ void InitHeaderSearch::AddUnmappedPath(const Twine &Path, IncludeDirGroup Group,
IncludePath.push_back(
std::make_pair(Group,
DirectoryLookup(HM, Type, Group == IndexHeaderMap)));
- return;
+ return true;
}
}
}
@@ -170,15 +172,16 @@ void InitHeaderSearch::AddUnmappedPath(const Twine &Path, IncludeDirGroup Group,
if (Verbose)
llvm::errs() << "ignoring nonexistent directory \""
<< MappedPathStr << "\"\n";
+ return false;
}
-void InitHeaderSearch::AddGnuCPlusPlusIncludePaths(StringRef Base,
+bool InitHeaderSearch::AddGnuCPlusPlusIncludePaths(StringRef Base,
StringRef ArchDir,
StringRef Dir32,
StringRef Dir64,
const llvm::Triple &triple) {
// Add the base dir
- AddPath(Base, CXXSystem, false);
+ bool IsBaseFound = AddPath(Base, CXXSystem, false);
// Add the multilib dirs
llvm::Triple::ArchType arch = triple.getArch();
@@ -190,6 +193,7 @@ void InitHeaderSearch::AddGnuCPlusPlusIncludePaths(StringRef Base,
// Add the backward dir
AddPath(Base + "/backward", CXXSystem, false);
+ return IsBaseFound;
}
void InitHeaderSearch::AddMinGWCPlusPlusIncludePaths(StringRef Base,
@@ -216,6 +220,7 @@ void InitHeaderSearch::AddDefaultCIncludePaths(const llvm::Triple &triple,
case llvm::Triple::NaCl:
case llvm::Triple::PS4:
case llvm::Triple::ELFIAMCU:
+ case llvm::Triple::Fuchsia:
break;
case llvm::Triple::Win32:
if (triple.getEnvironment() != llvm::Triple::Cygnus)
@@ -255,6 +260,7 @@ void InitHeaderSearch::AddDefaultCIncludePaths(const llvm::Triple &triple,
switch (os) {
case llvm::Triple::Linux:
+ case llvm::Triple::Solaris:
llvm_unreachable("Include management is handled in the driver.");
case llvm::Triple::CloudABI: {
@@ -321,6 +327,7 @@ void InitHeaderSearch::AddDefaultCIncludePaths(const llvm::Triple &triple,
case llvm::Triple::RTEMS:
case llvm::Triple::NaCl:
case llvm::Triple::ELFIAMCU:
+ case llvm::Triple::Fuchsia:
break;
case llvm::Triple::PS4: {
// <isysroot> gets prepended later in AddPath().
@@ -351,51 +358,61 @@ void InitHeaderSearch::AddDefaultCIncludePaths(const llvm::Triple &triple,
}
}
-void InitHeaderSearch::
-AddDefaultCPlusPlusIncludePaths(const llvm::Triple &triple, const HeaderSearchOptions &HSOpts) {
+void InitHeaderSearch::AddDefaultCPlusPlusIncludePaths(
+ const LangOptions &LangOpts, const llvm::Triple &triple,
+ const HeaderSearchOptions &HSOpts) {
llvm::Triple::OSType os = triple.getOS();
// FIXME: temporary hack: hard-coded paths.
if (triple.isOSDarwin()) {
+ bool IsBaseFound = true;
switch (triple.getArch()) {
default: break;
case llvm::Triple::ppc:
case llvm::Triple::ppc64:
- AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.2.1",
- "powerpc-apple-darwin10", "", "ppc64",
- triple);
- AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.0.0",
- "powerpc-apple-darwin10", "", "ppc64",
- triple);
+ IsBaseFound = AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.2.1",
+ "powerpc-apple-darwin10", "",
+ "ppc64", triple);
+ IsBaseFound |= AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.0.0",
+ "powerpc-apple-darwin10", "",
+ "ppc64", triple);
break;
case llvm::Triple::x86:
case llvm::Triple::x86_64:
- AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.2.1",
- "i686-apple-darwin10", "", "x86_64", triple);
- AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.0.0",
- "i686-apple-darwin8", "", "", triple);
+ IsBaseFound = AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.2.1",
+ "i686-apple-darwin10", "",
+ "x86_64", triple);
+ IsBaseFound |= AddGnuCPlusPlusIncludePaths(
+ "/usr/include/c++/4.0.0", "i686-apple-darwin8", "", "", triple);
break;
case llvm::Triple::arm:
case llvm::Triple::thumb:
- AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.2.1",
- "arm-apple-darwin10", "v7", "", triple);
- AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.2.1",
- "arm-apple-darwin10", "v6", "", triple);
+ IsBaseFound = AddGnuCPlusPlusIncludePaths(
+ "/usr/include/c++/4.2.1", "arm-apple-darwin10", "v7", "", triple);
+ IsBaseFound |= AddGnuCPlusPlusIncludePaths(
+ "/usr/include/c++/4.2.1", "arm-apple-darwin10", "v6", "", triple);
break;
case llvm::Triple::aarch64:
- AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.2.1",
- "arm64-apple-darwin10", "", "", triple);
+ IsBaseFound = AddGnuCPlusPlusIncludePaths(
+ "/usr/include/c++/4.2.1", "arm64-apple-darwin10", "", "", triple);
break;
}
+ // Warn when compiling pure C++ / Objective-C++ only.
+ if (!IsBaseFound &&
+ !(LangOpts.CUDA || LangOpts.OpenCL || LangOpts.RenderScript)) {
+ Headers.getDiags().Report(SourceLocation(),
+ diag::warn_stdlibcxx_not_found);
+ }
return;
}
switch (os) {
case llvm::Triple::Linux:
+ case llvm::Triple::Solaris:
llvm_unreachable("Include management is handled in the driver.");
break;
case llvm::Triple::Win32:
@@ -443,6 +460,7 @@ void InitHeaderSearch::AddDefaultIncludePaths(const LangOptions &Lang,
break; // Everything else continues to use this routine's logic.
case llvm::Triple::Linux:
+ case llvm::Triple::Solaris:
return;
case llvm::Triple::Win32:
@@ -452,8 +470,8 @@ void InitHeaderSearch::AddDefaultIncludePaths(const LangOptions &Lang,
break;
}
- if (Lang.CPlusPlus && HSOpts.UseStandardCXXIncludes &&
- HSOpts.UseStandardSystemIncludes) {
+ if (Lang.CPlusPlus && !Lang.AsmPreprocessor &&
+ HSOpts.UseStandardCXXIncludes && HSOpts.UseStandardSystemIncludes) {
if (HSOpts.UseLibcxx) {
if (triple.isOSDarwin()) {
// On Darwin, libc++ may be installed alongside the compiler in
@@ -473,7 +491,7 @@ void InitHeaderSearch::AddDefaultIncludePaths(const LangOptions &Lang,
}
AddPath("/usr/include/c++/v1", CXXSystem, false);
} else {
- AddDefaultCPlusPlusIncludePaths(triple, HSOpts);
+ AddDefaultCPlusPlusIncludePaths(Lang, triple, HSOpts);
}
}
diff --git a/lib/Frontend/InitPreprocessor.cpp b/lib/Frontend/InitPreprocessor.cpp
index d39890494323..e3f4f92b9d1e 100644
--- a/lib/Frontend/InitPreprocessor.cpp
+++ b/lib/Frontend/InitPreprocessor.cpp
@@ -93,7 +93,7 @@ static void AddImplicitIncludePTH(MacroBuilder &Builder, Preprocessor &PP,
AddImplicitInclude(Builder, OriginalFile);
}
-/// \brief Add an implicit \#include using the original file used to generate
+/// Add an implicit \#include using the original file used to generate
/// a PCH file.
static void AddImplicitIncludePCH(MacroBuilder &Builder, Preprocessor &PP,
const PCHContainerReader &PCHContainerRdr,
@@ -301,7 +301,7 @@ static const char *getLockFreeValue(unsigned TypeWidth, unsigned TypeAlign,
return "1"; // "sometimes lock free"
}
-/// \brief Add definitions required for a smooth interaction between
+/// Add definitions required for a smooth interaction between
/// Objective-C++ automated reference counting and libstdc++ (4.2).
static void AddObjCXXARCLibstdcxxDefines(const LangOptions &LangOpts,
MacroBuilder &Builder) {
@@ -426,45 +426,59 @@ static void InitializeStandardPredefinedMacros(const TargetInfo &TI,
// OpenCL v1.0/1.1 s6.9, v1.2/2.0 s6.10: Preprocessor Directives and Macros.
if (LangOpts.OpenCL) {
- // OpenCL v1.0 and v1.1 do not have a predefined macro to indicate the
- // language standard with which the program is compiled. __OPENCL_VERSION__
- // is for the OpenCL version supported by the OpenCL device, which is not
- // necessarily the language standard with which the program is compiled.
- // A shared OpenCL header file requires a macro to indicate the language
- // standard. As a workaround, __OPENCL_C_VERSION__ is defined for
- // OpenCL v1.0 and v1.1.
- switch (LangOpts.OpenCLVersion) {
- case 100:
- Builder.defineMacro("__OPENCL_C_VERSION__", "100");
- break;
- case 110:
- Builder.defineMacro("__OPENCL_C_VERSION__", "110");
- break;
- case 120:
- Builder.defineMacro("__OPENCL_C_VERSION__", "120");
- break;
- case 200:
- Builder.defineMacro("__OPENCL_C_VERSION__", "200");
- break;
- default:
- llvm_unreachable("Unsupported OpenCL version");
- }
- Builder.defineMacro("CL_VERSION_1_0", "100");
- Builder.defineMacro("CL_VERSION_1_1", "110");
- Builder.defineMacro("CL_VERSION_1_2", "120");
- Builder.defineMacro("CL_VERSION_2_0", "200");
+ if (LangOpts.CPlusPlus) {
+ if (LangOpts.OpenCLCPlusPlusVersion == 100)
+ Builder.defineMacro("__OPENCL_CPP_VERSION__", "100");
+ else
+ llvm_unreachable("Unsupported OpenCL C++ version");
+ Builder.defineMacro("__CL_CPP_VERSION_1_0__", "100");
+ } else {
+ // OpenCL v1.0 and v1.1 do not have a predefined macro to indicate the
+ // language standard with which the program is compiled. __OPENCL_VERSION__
+ // is for the OpenCL version supported by the OpenCL device, which is not
+ // necessarily the language standard with which the program is compiled.
+ // A shared OpenCL header file requires a macro to indicate the language
+ // standard. As a workaround, __OPENCL_C_VERSION__ is defined for
+ // OpenCL v1.0 and v1.1.
+ switch (LangOpts.OpenCLVersion) {
+ case 100:
+ Builder.defineMacro("__OPENCL_C_VERSION__", "100");
+ break;
+ case 110:
+ Builder.defineMacro("__OPENCL_C_VERSION__", "110");
+ break;
+ case 120:
+ Builder.defineMacro("__OPENCL_C_VERSION__", "120");
+ break;
+ case 200:
+ Builder.defineMacro("__OPENCL_C_VERSION__", "200");
+ break;
+ default:
+ llvm_unreachable("Unsupported OpenCL version");
+ }
+ Builder.defineMacro("CL_VERSION_1_0", "100");
+ Builder.defineMacro("CL_VERSION_1_1", "110");
+ Builder.defineMacro("CL_VERSION_1_2", "120");
+ Builder.defineMacro("CL_VERSION_2_0", "200");
- if (TI.isLittleEndian())
- Builder.defineMacro("__ENDIAN_LITTLE__");
+ if (TI.isLittleEndian())
+ Builder.defineMacro("__ENDIAN_LITTLE__");
- if (LangOpts.FastRelaxedMath)
- Builder.defineMacro("__FAST_RELAXED_MATH__");
+ if (LangOpts.FastRelaxedMath)
+ Builder.defineMacro("__FAST_RELAXED_MATH__");
+ }
}
// Not "standard" per se, but available even with the -undef flag.
if (LangOpts.AsmPreprocessor)
Builder.defineMacro("__ASSEMBLER__");
- if (LangOpts.CUDA)
+ if (LangOpts.CUDA && !LangOpts.HIP)
Builder.defineMacro("__CUDA__");
+ if (LangOpts.HIP) {
+ Builder.defineMacro("__HIP__");
+ Builder.defineMacro("__HIPCC__");
+ if (LangOpts.CUDAIsDevice)
+ Builder.defineMacro("__HIP_DEVICE_COMPILE__");
+ }
}
/// Initialize the predefined C++ language feature test macros defined in
@@ -473,78 +487,86 @@ static void InitializeCPlusPlusFeatureTestMacros(const LangOptions &LangOpts,
MacroBuilder &Builder) {
// C++98 features.
if (LangOpts.RTTI)
- Builder.defineMacro("__cpp_rtti", "199711");
+ Builder.defineMacro("__cpp_rtti", "199711L");
if (LangOpts.CXXExceptions)
- Builder.defineMacro("__cpp_exceptions", "199711");
+ Builder.defineMacro("__cpp_exceptions", "199711L");
// C++11 features.
if (LangOpts.CPlusPlus11) {
- Builder.defineMacro("__cpp_unicode_characters", "200704");
- Builder.defineMacro("__cpp_raw_strings", "200710");
- Builder.defineMacro("__cpp_unicode_literals", "200710");
- Builder.defineMacro("__cpp_user_defined_literals", "200809");
- Builder.defineMacro("__cpp_lambdas", "200907");
+ Builder.defineMacro("__cpp_unicode_characters", "200704L");
+ Builder.defineMacro("__cpp_raw_strings", "200710L");
+ Builder.defineMacro("__cpp_unicode_literals", "200710L");
+ Builder.defineMacro("__cpp_user_defined_literals", "200809L");
+ Builder.defineMacro("__cpp_lambdas", "200907L");
Builder.defineMacro("__cpp_constexpr",
- LangOpts.CPlusPlus17 ? "201603" :
- LangOpts.CPlusPlus14 ? "201304" : "200704");
+ LangOpts.CPlusPlus17 ? "201603L" :
+ LangOpts.CPlusPlus14 ? "201304L" : "200704");
Builder.defineMacro("__cpp_range_based_for",
- LangOpts.CPlusPlus17 ? "201603" : "200907");
+ LangOpts.CPlusPlus17 ? "201603L" : "200907");
Builder.defineMacro("__cpp_static_assert",
- LangOpts.CPlusPlus17 ? "201411" : "200410");
- Builder.defineMacro("__cpp_decltype", "200707");
- Builder.defineMacro("__cpp_attributes", "200809");
- Builder.defineMacro("__cpp_rvalue_references", "200610");
- Builder.defineMacro("__cpp_variadic_templates", "200704");
- Builder.defineMacro("__cpp_initializer_lists", "200806");
- Builder.defineMacro("__cpp_delegating_constructors", "200604");
- Builder.defineMacro("__cpp_nsdmi", "200809");
- Builder.defineMacro("__cpp_inheriting_constructors", "201511");
- Builder.defineMacro("__cpp_ref_qualifiers", "200710");
- Builder.defineMacro("__cpp_alias_templates", "200704");
+ LangOpts.CPlusPlus17 ? "201411L" : "200410");
+ Builder.defineMacro("__cpp_decltype", "200707L");
+ Builder.defineMacro("__cpp_attributes", "200809L");
+ Builder.defineMacro("__cpp_rvalue_references", "200610L");
+ Builder.defineMacro("__cpp_variadic_templates", "200704L");
+ Builder.defineMacro("__cpp_initializer_lists", "200806L");
+ Builder.defineMacro("__cpp_delegating_constructors", "200604L");
+ Builder.defineMacro("__cpp_nsdmi", "200809L");
+ Builder.defineMacro("__cpp_inheriting_constructors", "201511L");
+ Builder.defineMacro("__cpp_ref_qualifiers", "200710L");
+ Builder.defineMacro("__cpp_alias_templates", "200704L");
}
if (LangOpts.ThreadsafeStatics)
- Builder.defineMacro("__cpp_threadsafe_static_init", "200806");
+ Builder.defineMacro("__cpp_threadsafe_static_init", "200806L");
// C++14 features.
if (LangOpts.CPlusPlus14) {
- Builder.defineMacro("__cpp_binary_literals", "201304");
- Builder.defineMacro("__cpp_digit_separators", "201309");
- Builder.defineMacro("__cpp_init_captures", "201304");
- Builder.defineMacro("__cpp_generic_lambdas", "201304");
- Builder.defineMacro("__cpp_decltype_auto", "201304");
- Builder.defineMacro("__cpp_return_type_deduction", "201304");
- Builder.defineMacro("__cpp_aggregate_nsdmi", "201304");
- Builder.defineMacro("__cpp_variable_templates", "201304");
+ Builder.defineMacro("__cpp_binary_literals", "201304L");
+ Builder.defineMacro("__cpp_digit_separators", "201309L");
+ Builder.defineMacro("__cpp_init_captures", "201304L");
+ Builder.defineMacro("__cpp_generic_lambdas", "201304L");
+ Builder.defineMacro("__cpp_decltype_auto", "201304L");
+ Builder.defineMacro("__cpp_return_type_deduction", "201304L");
+ Builder.defineMacro("__cpp_aggregate_nsdmi", "201304L");
+ Builder.defineMacro("__cpp_variable_templates", "201304L");
}
if (LangOpts.SizedDeallocation)
- Builder.defineMacro("__cpp_sized_deallocation", "201309");
+ Builder.defineMacro("__cpp_sized_deallocation", "201309L");
// C++17 features.
if (LangOpts.CPlusPlus17) {
- Builder.defineMacro("__cpp_hex_float", "201603");
- Builder.defineMacro("__cpp_inline_variables", "201606");
- Builder.defineMacro("__cpp_noexcept_function_type", "201510");
- Builder.defineMacro("__cpp_capture_star_this", "201603");
- Builder.defineMacro("__cpp_if_constexpr", "201606");
- Builder.defineMacro("__cpp_deduction_guides", "201611");
- Builder.defineMacro("__cpp_template_auto", "201606");
- Builder.defineMacro("__cpp_namespace_attributes", "201411");
- Builder.defineMacro("__cpp_enumerator_attributes", "201411");
- Builder.defineMacro("__cpp_nested_namespace_definitions", "201411");
- Builder.defineMacro("__cpp_variadic_using", "201611");
- Builder.defineMacro("__cpp_aggregate_bases", "201603");
- Builder.defineMacro("__cpp_structured_bindings", "201606");
- Builder.defineMacro("__cpp_nontype_template_args", "201411");
- Builder.defineMacro("__cpp_fold_expressions", "201603");
+ Builder.defineMacro("__cpp_hex_float", "201603L");
+ Builder.defineMacro("__cpp_inline_variables", "201606L");
+ Builder.defineMacro("__cpp_noexcept_function_type", "201510L");
+ Builder.defineMacro("__cpp_capture_star_this", "201603L");
+ Builder.defineMacro("__cpp_if_constexpr", "201606L");
+ Builder.defineMacro("__cpp_deduction_guides", "201703L");
+ Builder.defineMacro("__cpp_template_auto", "201606L"); // (old name)
+ Builder.defineMacro("__cpp_namespace_attributes", "201411L");
+ Builder.defineMacro("__cpp_enumerator_attributes", "201411L");
+ Builder.defineMacro("__cpp_nested_namespace_definitions", "201411L");
+ Builder.defineMacro("__cpp_variadic_using", "201611L");
+ Builder.defineMacro("__cpp_aggregate_bases", "201603L");
+ Builder.defineMacro("__cpp_structured_bindings", "201606L");
+ Builder.defineMacro("__cpp_nontype_template_args", "201411L");
+ Builder.defineMacro("__cpp_fold_expressions", "201603L");
+ Builder.defineMacro("__cpp_guaranteed_copy_elision", "201606L");
+ Builder.defineMacro("__cpp_nontype_template_parameter_auto", "201606L");
}
if (LangOpts.AlignedAllocation)
- Builder.defineMacro("__cpp_aligned_new", "201606");
+ Builder.defineMacro("__cpp_aligned_new", "201606L");
+ if (LangOpts.RelaxedTemplateTemplateArgs)
+ Builder.defineMacro("__cpp_template_template_args", "201611L");
// TS features.
if (LangOpts.ConceptsTS)
- Builder.defineMacro("__cpp_experimental_concepts", "1");
+ Builder.defineMacro("__cpp_experimental_concepts", "1L");
if (LangOpts.CoroutinesTS)
Builder.defineMacro("__cpp_coroutines", "201703L");
+
+ // Potential future breaking changes.
+ if (LangOpts.Char8)
+ Builder.defineMacro("__cpp_char8_t", "201803L");
}
static void InitializePredefinedMacros(const TargetInfo &TI,
@@ -627,6 +649,19 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
if (LangOpts.ObjCRuntime.isNeXTFamily())
Builder.defineMacro("__NEXT_RUNTIME__");
+ if (LangOpts.ObjCRuntime.getKind() == ObjCRuntime::GNUstep) {
+ auto version = LangOpts.ObjCRuntime.getVersion();
+ std::string versionString = "1";
+ // Don't rely on the tuple argument, because we can be asked to target
+ // later ABIs than we actually support, so clamp these values to those
+ // currently supported
+ if (version >= VersionTuple(2, 0))
+ Builder.defineMacro("__OBJC_GNUSTEP_RUNTIME_ABI__", "20");
+ else
+ Builder.defineMacro("__OBJC_GNUSTEP_RUNTIME_ABI__",
+ "1" + Twine(std::min(8U, version.getMinor().getValueOr(0))));
+ }
+
if (LangOpts.ObjCRuntime.getKind() == ObjCRuntime::ObjFW) {
VersionTuple tuple = LangOpts.ObjCRuntime.getVersion();
@@ -817,10 +852,6 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
DefineFloatMacros(Builder, "FLT", &TI.getFloatFormat(), "F");
DefineFloatMacros(Builder, "DBL", &TI.getDoubleFormat(), "");
DefineFloatMacros(Builder, "LDBL", &TI.getLongDoubleFormat(), "L");
- if (TI.hasFloat128Type())
- // FIXME: Switch away from the non-standard "Q" when we can
- DefineFloatMacros(Builder, "FLT128", &TI.getFloat128Format(), "Q");
-
// Define a __POINTER_WIDTH__ macro for stdint.h.
Builder.defineMacro("__POINTER_WIDTH__",
@@ -929,6 +960,8 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
InlineWidthBits));
DEFINE_LOCK_FREE_MACRO(BOOL, Bool);
DEFINE_LOCK_FREE_MACRO(CHAR, Char);
+ if (LangOpts.Char8)
+ DEFINE_LOCK_FREE_MACRO(CHAR8_T, Char); // Treat char8_t like char.
DEFINE_LOCK_FREE_MACRO(CHAR16_T, Char16);
DEFINE_LOCK_FREE_MACRO(CHAR32_T, Char32);
DEFINE_LOCK_FREE_MACRO(WCHAR_T, WChar);
@@ -1011,23 +1044,25 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
// macro name is defined to have the decimal value yyyymm where
// yyyy and mm are the year and the month designations of the
// version of the OpenMP API that the implementation support.
- switch (LangOpts.OpenMP) {
- case 0:
- break;
- case 40:
- Builder.defineMacro("_OPENMP", "201307");
- break;
- case 45:
- Builder.defineMacro("_OPENMP", "201511");
- break;
- default:
- // Default version is OpenMP 3.1
- Builder.defineMacro("_OPENMP", "201107");
- break;
+ if (!LangOpts.OpenMPSimd) {
+ switch (LangOpts.OpenMP) {
+ case 0:
+ break;
+ case 40:
+ Builder.defineMacro("_OPENMP", "201307");
+ break;
+ case 45:
+ Builder.defineMacro("_OPENMP", "201511");
+ break;
+ default:
+ // Default version is OpenMP 3.1
+ Builder.defineMacro("_OPENMP", "201107");
+ break;
+ }
}
// CUDA device path compilaton
- if (LangOpts.CUDAIsDevice) {
+ if (LangOpts.CUDAIsDevice && !LangOpts.HIP) {
// The CUDA_ARCH value is set for the GPU target specified in the NVPTX
// backend's target defines.
Builder.defineMacro("__CUDA_ARCH__");
diff --git a/lib/Frontend/LayoutOverrideSource.cpp b/lib/Frontend/LayoutOverrideSource.cpp
index 06e9a7dc50b4..93e07eb81f72 100644
--- a/lib/Frontend/LayoutOverrideSource.cpp
+++ b/lib/Frontend/LayoutOverrideSource.cpp
@@ -15,7 +15,7 @@
using namespace clang;
-/// \brief Parse a simple identifier.
+/// Parse a simple identifier.
static std::string parseName(StringRef S) {
if (S.empty() || !isIdentifierHead(S[0]))
return "";
diff --git a/lib/Frontend/ModuleDependencyCollector.cpp b/lib/Frontend/ModuleDependencyCollector.cpp
index ede12aab6e69..25cad8be6d00 100644
--- a/lib/Frontend/ModuleDependencyCollector.cpp
+++ b/lib/Frontend/ModuleDependencyCollector.cpp
@@ -16,6 +16,7 @@
#include "clang/Lex/Preprocessor.h"
#include "clang/Serialization/ASTReader.h"
#include "llvm/ADT/iterator_range.h"
+#include "llvm/Config/llvm-config.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/raw_ostream.h"
@@ -49,7 +50,8 @@ struct ModuleDependencyPPCallbacks : public PPCallbacks {
StringRef FileName, bool IsAngled,
CharSourceRange FilenameRange, const FileEntry *File,
StringRef SearchPath, StringRef RelativePath,
- const Module *Imported) override {
+ const Module *Imported,
+ SrcMgr::CharacteristicKind FileType) override {
if (!File)
return;
Collector.addFile(File->getName());
@@ -135,7 +137,7 @@ static bool isCaseSensitivePath(StringRef Path) {
// Change path to all upper case and ask for its real path, if the latter
// exists and is equal to Path, it's not case sensitive. Default to case
- // sensitive in the absense of realpath, since this is what the VFSWriter
+ // sensitive in the absence of realpath, since this is what the VFSWriter
// already expects when sensitivity isn't setup.
for (auto &C : Path)
UpperDest.push_back(toUppercase(C));
diff --git a/lib/Frontend/MultiplexConsumer.cpp b/lib/Frontend/MultiplexConsumer.cpp
index 04a8f6c1cdfb..df36c88ec38e 100644
--- a/lib/Frontend/MultiplexConsumer.cpp
+++ b/lib/Frontend/MultiplexConsumer.cpp
@@ -16,35 +16,11 @@
#include "clang/Frontend/MultiplexConsumer.h"
#include "clang/AST/ASTMutationListener.h"
#include "clang/AST/DeclGroup.h"
-#include "clang/Serialization/ASTDeserializationListener.h"
using namespace clang;
namespace clang {
-// This ASTDeserializationListener forwards its notifications to a set of
-// child listeners.
-class MultiplexASTDeserializationListener
- : public ASTDeserializationListener {
-public:
- // Does NOT take ownership of the elements in L.
- MultiplexASTDeserializationListener(
- const std::vector<ASTDeserializationListener*>& L);
- void ReaderInitialized(ASTReader *Reader) override;
- void IdentifierRead(serialization::IdentID ID,
- IdentifierInfo *II) override;
- void MacroRead(serialization::MacroID ID, MacroInfo *MI) override;
- void TypeRead(serialization::TypeIdx Idx, QualType T) override;
- void DeclRead(serialization::DeclID ID, const Decl *D) override;
- void SelectorRead(serialization::SelectorID iD, Selector Sel) override;
- void MacroDefinitionRead(serialization::PreprocessedEntityID,
- MacroDefinitionRecord *MD) override;
- void ModuleRead(serialization::SubmoduleID ID, Module *Mod) override;
-
-private:
- std::vector<ASTDeserializationListener *> Listeners;
-};
-
MultiplexASTDeserializationListener::MultiplexASTDeserializationListener(
const std::vector<ASTDeserializationListener*>& L)
: Listeners(L) {
diff --git a/lib/Frontend/PCHContainerOperations.cpp b/lib/Frontend/PCHContainerOperations.cpp
index eebebf327a19..340e8ce63ff4 100644
--- a/lib/Frontend/PCHContainerOperations.cpp
+++ b/lib/Frontend/PCHContainerOperations.cpp
@@ -25,7 +25,7 @@ PCHContainerReader::~PCHContainerReader() {}
namespace {
-/// \brief A PCHContainerGenerator that writes out the PCH to a flat file.
+/// A PCHContainerGenerator that writes out the PCH to a flat file.
class RawPCHContainerGenerator : public ASTConsumer {
std::shared_ptr<PCHBuffer> Buffer;
std::unique_ptr<raw_pwrite_stream> OS;
diff --git a/lib/Frontend/PrecompiledPreamble.cpp b/lib/Frontend/PrecompiledPreamble.cpp
index 7e1323fd83bb..30ae2db26d86 100644
--- a/lib/Frontend/PrecompiledPreamble.cpp
+++ b/lib/Frontend/PrecompiledPreamble.cpp
@@ -40,7 +40,7 @@ namespace {
StringRef getInMemoryPreamblePath() {
#if defined(LLVM_ON_UNIX)
return "/__clang_tmp/___clang_inmemory_preamble___";
-#elif defined(LLVM_ON_WIN32)
+#elif defined(_WIN32)
return "C:\\__clang_tmp\\___clang_inmemory_preamble___";
#else
#warning "Unknown platform. Defaulting to UNIX-style paths for in-memory PCHs"
@@ -63,6 +63,16 @@ createVFSOverlayForPreamblePCH(StringRef PCHFilename,
return Overlay;
}
+class PreambleDependencyCollector : public DependencyCollector {
+public:
+ // We want to collect all dependencies for correctness. Avoiding the real
+ // system dependencies (e.g. stl from /usr/lib) would probably be a good idea,
+ // but there is no way to distinguish between those and the ones that can be
+ // spuriously added by '-isystem' (e.g. to suppress warnings from those
+ // headers).
+ bool needSystemDependencies() override { return true; }
+};
+
/// Keeps a track of files to be deleted in destructor.
class TemporaryFiles {
public:
@@ -303,8 +313,6 @@ llvm::ErrorOr<PrecompiledPreamble> PrecompiledPreamble::Build(
VFS =
createVFSFromCompilerInvocation(Clang->getInvocation(), Diagnostics, VFS);
- if (!VFS)
- return BuildPreambleError::CouldntCreateVFSOverlay;
// Create a file manager object to provide access to and cache the filesystem.
Clang->setFileManager(new FileManager(Clang->getFileSystemOpts(), VFS));
@@ -313,7 +321,7 @@ llvm::ErrorOr<PrecompiledPreamble> PrecompiledPreamble::Build(
Clang->setSourceManager(
new SourceManager(Diagnostics, Clang->getFileManager()));
- auto PreambleDepCollector = std::make_shared<DependencyCollector>();
+ auto PreambleDepCollector = std::make_shared<PreambleDependencyCollector>();
Clang->addDependencyCollector(PreambleDepCollector);
// Remap the main source file to the preamble buffer.
@@ -485,20 +493,15 @@ bool PrecompiledPreamble::CanReuse(const CompilerInvocation &Invocation,
void PrecompiledPreamble::AddImplicitPreamble(
CompilerInvocation &CI, IntrusiveRefCntPtr<vfs::FileSystem> &VFS,
llvm::MemoryBuffer *MainFileBuffer) const {
- assert(VFS && "VFS must not be null");
-
- auto &PreprocessorOpts = CI.getPreprocessorOpts();
-
- // Remap main file to point to MainFileBuffer.
- auto MainFilePath = CI.getFrontendOpts().Inputs[0].getFile();
- PreprocessorOpts.addRemappedFile(MainFilePath, MainFileBuffer);
-
- // Configure ImpicitPCHInclude.
- PreprocessorOpts.PrecompiledPreambleBytes.first = PreambleBytes.size();
- PreprocessorOpts.PrecompiledPreambleBytes.second = PreambleEndsAtStartOfLine;
- PreprocessorOpts.DisablePCHValidation = true;
+ PreambleBounds Bounds(PreambleBytes.size(), PreambleEndsAtStartOfLine);
+ configurePreamble(Bounds, CI, VFS, MainFileBuffer);
+}
- setupPreambleStorage(Storage, PreprocessorOpts, VFS);
+void PrecompiledPreamble::OverridePreamble(
+ CompilerInvocation &CI, IntrusiveRefCntPtr<vfs::FileSystem> &VFS,
+ llvm::MemoryBuffer *MainFileBuffer) const {
+ auto Bounds = ComputePreambleBounds(*CI.getLangOpts(), MainFileBuffer, 0);
+ configurePreamble(Bounds, CI, VFS, MainFileBuffer);
}
PrecompiledPreamble::PrecompiledPreamble(
@@ -681,6 +684,27 @@ PrecompiledPreamble::PreambleFileHash::createForMemoryBuffer(
return Result;
}
+void PrecompiledPreamble::configurePreamble(
+ PreambleBounds Bounds, CompilerInvocation &CI,
+ IntrusiveRefCntPtr<vfs::FileSystem> &VFS,
+ llvm::MemoryBuffer *MainFileBuffer) const {
+ assert(VFS);
+
+ auto &PreprocessorOpts = CI.getPreprocessorOpts();
+
+ // Remap main file to point to MainFileBuffer.
+ auto MainFilePath = CI.getFrontendOpts().Inputs[0].getFile();
+ PreprocessorOpts.addRemappedFile(MainFilePath, MainFileBuffer);
+
+ // Configure ImpicitPCHInclude.
+ PreprocessorOpts.PrecompiledPreambleBytes.first = Bounds.Size;
+ PreprocessorOpts.PrecompiledPreambleBytes.second =
+ Bounds.PreambleEndsAtStartOfLine;
+ PreprocessorOpts.DisablePCHValidation = true;
+
+ setupPreambleStorage(Storage, PreprocessorOpts, VFS);
+}
+
void PrecompiledPreamble::setupPreambleStorage(
const PCHStorage &Storage, PreprocessorOptions &PreprocessorOpts,
IntrusiveRefCntPtr<vfs::FileSystem> &VFS) {
@@ -740,8 +764,6 @@ std::string BuildPreambleErrorCategory::message(int condition) const {
return "Could not create temporary file for PCH";
case BuildPreambleError::CouldntCreateTargetInfo:
return "CreateTargetInfo() return null";
- case BuildPreambleError::CouldntCreateVFSOverlay:
- return "Could not create VFS Overlay";
case BuildPreambleError::BeginSourceFileFailed:
return "BeginSourceFile() return an error";
case BuildPreambleError::CouldntEmitPCH:
diff --git a/lib/Frontend/PrintPreprocessedOutput.cpp b/lib/Frontend/PrintPreprocessedOutput.cpp
index 2e023294f1e8..1b35b32656e7 100644
--- a/lib/Frontend/PrintPreprocessedOutput.cpp
+++ b/lib/Frontend/PrintPreprocessedOutput.cpp
@@ -130,7 +130,8 @@ public:
StringRef FileName, bool IsAngled,
CharSourceRange FilenameRange, const FileEntry *File,
StringRef SearchPath, StringRef RelativePath,
- const Module *Imported) override;
+ const Module *Imported,
+ SrcMgr::CharacteristicKind FileType) override;
void Ident(SourceLocation Loc, StringRef str) override;
void PragmaMessage(SourceLocation Loc, StringRef Namespace,
PragmaMessageKind Kind, StringRef Str) override;
@@ -320,15 +321,17 @@ void PrintPPOutputPPCallbacks::FileChanged(SourceLocation Loc,
}
}
-void PrintPPOutputPPCallbacks::InclusionDirective(SourceLocation HashLoc,
- const Token &IncludeTok,
- StringRef FileName,
- bool IsAngled,
- CharSourceRange FilenameRange,
- const FileEntry *File,
- StringRef SearchPath,
- StringRef RelativePath,
- const Module *Imported) {
+void PrintPPOutputPPCallbacks::InclusionDirective(
+ SourceLocation HashLoc,
+ const Token &IncludeTok,
+ StringRef FileName,
+ bool IsAngled,
+ CharSourceRange FilenameRange,
+ const FileEntry *File,
+ StringRef SearchPath,
+ StringRef RelativePath,
+ const Module *Imported,
+ SrcMgr::CharacteristicKind FileType) {
// In -dI mode, dump #include directives prior to dumping their content or
// interpretation.
if (DumpIncludeDirectives) {
@@ -752,7 +755,7 @@ static void PrintPreprocessedTokens(Preprocessor &PP, Token &Tok,
} else if (Tok.isLiteral() && !Tok.needsCleaning() &&
Tok.getLiteralData()) {
OS.write(Tok.getLiteralData(), Tok.getLength());
- } else if (Tok.getLength() < 256) {
+ } else if (Tok.getLength() < llvm::array_lengthof(Buffer)) {
const char *TokPtr = Buffer;
unsigned Len = PP.getSpelling(Tok, TokPtr);
OS.write(TokPtr, Len);
diff --git a/lib/Frontend/Rewrite/FixItRewriter.cpp b/lib/Frontend/Rewrite/FixItRewriter.cpp
index dc787ac9557c..64785e301236 100644
--- a/lib/Frontend/Rewrite/FixItRewriter.cpp
+++ b/lib/Frontend/Rewrite/FixItRewriter.cpp
@@ -1,4 +1,4 @@
-//===--- FixItRewriter.cpp - Fix-It Rewriter Diagnostic Client --*- C++ -*-===//
+//===- FixItRewriter.cpp - Fix-It Rewriter Diagnostic Client --------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -14,28 +14,32 @@
//===----------------------------------------------------------------------===//
#include "clang/Rewrite/Frontend/FixItRewriter.h"
+#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/FileManager.h"
+#include "clang/Basic/LLVM.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Edit/Commit.h"
#include "clang/Edit/EditsReceiver.h"
#include "clang/Frontend/FrontendDiagnostic.h"
-#include "llvm/Support/Path.h"
+#include "clang/Rewrite/Core/RewriteBuffer.h"
+#include "clang/Rewrite/Core/Rewriter.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/FileSystem.h"
#include "llvm/Support/raw_ostream.h"
#include <cstdio>
#include <memory>
+#include <string>
+#include <system_error>
+#include <utility>
using namespace clang;
FixItRewriter::FixItRewriter(DiagnosticsEngine &Diags, SourceManager &SourceMgr,
const LangOptions &LangOpts,
FixItOptions *FixItOpts)
- : Diags(Diags),
- Editor(SourceMgr, LangOpts),
- Rewrite(SourceMgr, LangOpts),
- FixItOpts(FixItOpts),
- NumFailures(0),
- PrevDiagSilenced(false) {
+ : Diags(Diags), Editor(SourceMgr, LangOpts), Rewrite(SourceMgr, LangOpts),
+ FixItOpts(FixItOpts) {
Owner = Diags.takeClient();
Client = Diags.getClient();
Diags.setClient(this, false);
@@ -59,20 +63,21 @@ class RewritesReceiver : public edit::EditsReceiver {
Rewriter &Rewrite;
public:
- RewritesReceiver(Rewriter &Rewrite) : Rewrite(Rewrite) { }
+ RewritesReceiver(Rewriter &Rewrite) : Rewrite(Rewrite) {}
void insert(SourceLocation loc, StringRef text) override {
Rewrite.InsertText(loc, text);
}
+
void replace(CharSourceRange range, StringRef text) override {
Rewrite.ReplaceText(range.getBegin(), Rewrite.getRangeSize(range), text);
}
};
-}
+} // namespace
bool FixItRewriter::WriteFixedFiles(
- std::vector<std::pair<std::string, std::string> > *RewrittenFiles) {
+ std::vector<std::pair<std::string, std::string>> *RewrittenFiles) {
if (NumFailures > 0 && !FixItOpts->FixWhatYouCan) {
Diag(FullSourceLoc(), diag::warn_fixit_no_changes);
return true;
@@ -189,7 +194,7 @@ void FixItRewriter::HandleDiagnostic(DiagnosticsEngine::Level DiagLevel,
Diag(Info.getLocation(), diag::note_fixit_applied);
}
-/// \brief Emit a diagnostic via the adapted diagnostic client.
+/// Emit a diagnostic via the adapted diagnostic client.
void FixItRewriter::Diag(SourceLocation Loc, unsigned DiagID) {
// When producing this diagnostic, we temporarily bypass ourselves,
// clear out any current diagnostic, and let the downstream client
@@ -200,4 +205,4 @@ void FixItRewriter::Diag(SourceLocation Loc, unsigned DiagID) {
Diags.setClient(this, false);
}
-FixItOptions::~FixItOptions() {}
+FixItOptions::~FixItOptions() = default;
diff --git a/lib/Frontend/Rewrite/HTMLPrint.cpp b/lib/Frontend/Rewrite/HTMLPrint.cpp
index 11e431de0a31..34ee9673cc54 100644
--- a/lib/Frontend/Rewrite/HTMLPrint.cpp
+++ b/lib/Frontend/Rewrite/HTMLPrint.cpp
@@ -86,8 +86,7 @@ void HTMLPrinter::HandleTranslationUnit(ASTContext &Ctx) {
// Emit the HTML.
const RewriteBuffer &RewriteBuf = R.getEditBuffer(FID);
- char *Buffer = (char*)malloc(RewriteBuf.size());
- std::copy(RewriteBuf.begin(), RewriteBuf.end(), Buffer);
- Out->write(Buffer, RewriteBuf.size());
- free(Buffer);
+ std::unique_ptr<char[]> Buffer(new char[RewriteBuf.size()]);
+ std::copy(RewriteBuf.begin(), RewriteBuf.end(), Buffer.get());
+ Out->write(Buffer.get(), RewriteBuf.size());
}
diff --git a/lib/Frontend/Rewrite/InclusionRewriter.cpp b/lib/Frontend/Rewrite/InclusionRewriter.cpp
index e0477069b340..1631eccd7013 100644
--- a/lib/Frontend/Rewrite/InclusionRewriter.cpp
+++ b/lib/Frontend/Rewrite/InclusionRewriter.cpp
@@ -32,8 +32,10 @@ class InclusionRewriter : public PPCallbacks {
struct IncludedFile {
FileID Id;
SrcMgr::CharacteristicKind FileType;
- IncludedFile(FileID Id, SrcMgr::CharacteristicKind FileType)
- : Id(Id), FileType(FileType) {}
+ const DirectoryLookup *DirLookup;
+ IncludedFile(FileID Id, SrcMgr::CharacteristicKind FileType,
+ const DirectoryLookup *DirLookup)
+ : Id(Id), FileType(FileType), DirLookup(DirLookup) {}
};
Preprocessor &PP; ///< Used to find inclusion directives.
SourceManager &SM; ///< Used to read and manage source files.
@@ -54,7 +56,8 @@ class InclusionRewriter : public PPCallbacks {
public:
InclusionRewriter(Preprocessor &PP, raw_ostream &OS, bool ShowLineMarkers,
bool UseLineDirectives);
- void Process(FileID FileId, SrcMgr::CharacteristicKind FileType);
+ void Process(FileID FileId, SrcMgr::CharacteristicKind FileType,
+ const DirectoryLookup *DirLookup);
void setPredefinesBuffer(const llvm::MemoryBuffer *Buf) {
PredefinesBuffer = Buf;
}
@@ -74,7 +77,8 @@ private:
StringRef FileName, bool IsAngled,
CharSourceRange FilenameRange, const FileEntry *File,
StringRef SearchPath, StringRef RelativePath,
- const Module *Imported) override;
+ const Module *Imported,
+ SrcMgr::CharacteristicKind FileType) override;
void WriteLineInfo(StringRef Filename, int Line,
SrcMgr::CharacteristicKind FileType,
StringRef Extra = StringRef());
@@ -156,8 +160,9 @@ void InclusionRewriter::FileChanged(SourceLocation Loc,
// we didn't reach this file (eg: the main file) via an inclusion directive
return;
FileID Id = FullSourceLoc(Loc, SM).getFileID();
- auto P = FileIncludes.insert(std::make_pair(
- LastInclusionLocation.getRawEncoding(), IncludedFile(Id, NewFileType)));
+ auto P = FileIncludes.insert(
+ std::make_pair(LastInclusionLocation.getRawEncoding(),
+ IncludedFile(Id, NewFileType, PP.GetCurDirLookup())));
(void)P;
assert(P.second && "Unexpected revisitation of the same include directive");
LastInclusionLocation = SourceLocation();
@@ -188,7 +193,8 @@ void InclusionRewriter::InclusionDirective(SourceLocation HashLoc,
const FileEntry * /*File*/,
StringRef /*SearchPath*/,
StringRef /*RelativePath*/,
- const Module *Imported) {
+ const Module *Imported,
+ SrcMgr::CharacteristicKind FileType){
if (Imported) {
auto P = ModuleIncludes.insert(
std::make_pair(HashLoc.getRawEncoding(), Imported));
@@ -408,7 +414,7 @@ bool InclusionRewriter::HandleHasInclude(
Includers.push_back(std::make_pair(FileEnt, FileEnt->getDir()));
// FIXME: Why don't we call PP.LookupFile here?
const FileEntry *File = PP.getHeaderSearchInfo().LookupFile(
- Filename, SourceLocation(), isAngled, nullptr, CurDir, Includers, nullptr,
+ Filename, SourceLocation(), isAngled, Lookup, CurDir, Includers, nullptr,
nullptr, nullptr, nullptr, nullptr);
FileExists = File != nullptr;
@@ -418,7 +424,8 @@ bool InclusionRewriter::HandleHasInclude(
/// Use a raw lexer to analyze \p FileId, incrementally copying parts of it
/// and including content of included files recursively.
void InclusionRewriter::Process(FileID FileId,
- SrcMgr::CharacteristicKind FileType) {
+ SrcMgr::CharacteristicKind FileType,
+ const DirectoryLookup *DirLookup) {
bool Invalid;
const MemoryBuffer &FromFile = *SM.getBuffer(FileId, &Invalid);
assert(!Invalid && "Attempting to process invalid inclusion");
@@ -475,7 +482,7 @@ void InclusionRewriter::Process(FileID FileId,
<< Mod->getFullModuleName(true) << "\n";
// Include and recursively process the file.
- Process(Inc->Id, Inc->FileType);
+ Process(Inc->Id, Inc->FileType, Inc->DirLookup);
if (Mod)
OS << "#pragma clang module end /*"
@@ -532,11 +539,10 @@ void InclusionRewriter::Process(FileID FileId,
// Rewrite __has_include_next(x)
} else if (RawToken.getIdentifierInfo()->isStr(
"__has_include_next")) {
- const DirectoryLookup *Lookup = PP.GetCurDirLookup();
- if (Lookup)
- ++Lookup;
+ if (DirLookup)
+ ++DirLookup;
- if (!HandleHasInclude(FileId, RawLex, Lookup, RawToken,
+ if (!HandleHasInclude(FileId, RawLex, DirLookup, RawToken,
HasFile))
continue;
} else {
@@ -621,7 +627,7 @@ void clang::RewriteIncludesInInput(Preprocessor &PP, raw_ostream *OS,
Rewrite->handleModuleBegin(Tok);
} while (Tok.isNot(tok::eof));
Rewrite->setPredefinesBuffer(SM.getBuffer(PP.getPredefinesFileID()));
- Rewrite->Process(PP.getPredefinesFileID(), SrcMgr::C_User);
- Rewrite->Process(SM.getMainFileID(), SrcMgr::C_User);
+ Rewrite->Process(PP.getPredefinesFileID(), SrcMgr::C_User, nullptr);
+ Rewrite->Process(SM.getMainFileID(), SrcMgr::C_User, nullptr);
OS->flush();
}
diff --git a/lib/Frontend/Rewrite/RewriteModernObjC.cpp b/lib/Frontend/Rewrite/RewriteModernObjC.cpp
index 1954b24aedad..52b979c85f15 100644
--- a/lib/Frontend/Rewrite/RewriteModernObjC.cpp
+++ b/lib/Frontend/Rewrite/RewriteModernObjC.cpp
@@ -1714,7 +1714,7 @@ Stmt *RewriteModernObjC::RewriteObjCForCollectionStmt(ObjCForCollectionStmt *S,
else {
DeclRefExpr *DR = cast<DeclRefExpr>(S->getElement());
elementName = DR->getDecl()->getName();
- ValueDecl *VD = cast<ValueDecl>(DR->getDecl());
+ ValueDecl *VD = DR->getDecl();
if (VD->getType()->isObjCQualifiedIdType() ||
VD->getType()->isObjCQualifiedInterfaceType())
// Simply use 'id' for all qualified types.
@@ -2590,7 +2590,7 @@ Stmt *RewriteModernObjC::RewriteObjCStringLiteral(ObjCStringLiteral *Exp) {
Expr *Unop = new (Context) UnaryOperator(DRE, UO_AddrOf,
Context->getPointerType(DRE->getType()),
VK_RValue, OK_Ordinary,
- SourceLocation());
+ SourceLocation(), false);
// cast to NSConstantString *
CastExpr *cast = NoTypeInfoCStyleCastExpr(Context, Exp->getType(),
CK_CPointerToObjCPointerCast, Unop);
@@ -3295,7 +3295,7 @@ Stmt *RewriteModernObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
SuperRep = new (Context) UnaryOperator(SuperRep, UO_AddrOf,
Context->getPointerType(SuperRep->getType()),
VK_RValue, OK_Ordinary,
- SourceLocation());
+ SourceLocation(), false);
SuperRep = NoTypeInfoCStyleCastExpr(Context,
Context->getPointerType(superType),
CK_BitCast, SuperRep);
@@ -3313,7 +3313,7 @@ Stmt *RewriteModernObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
SuperRep = new (Context) UnaryOperator(SuperRep, UO_AddrOf,
Context->getPointerType(SuperRep->getType()),
VK_RValue, OK_Ordinary,
- SourceLocation());
+ SourceLocation(), false);
}
MsgExprs.push_back(SuperRep);
break;
@@ -3389,7 +3389,7 @@ Stmt *RewriteModernObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
SuperRep = new (Context) UnaryOperator(SuperRep, UO_AddrOf,
Context->getPointerType(SuperRep->getType()),
VK_RValue, OK_Ordinary,
- SourceLocation());
+ SourceLocation(), false);
SuperRep = NoTypeInfoCStyleCastExpr(Context,
Context->getPointerType(superType),
CK_BitCast, SuperRep);
@@ -4720,7 +4720,7 @@ Stmt *RewriteModernObjC::RewriteLocalVariableExternalStorage(DeclRefExpr *DRE) {
return DRE;
Expr *Exp = new (Context) UnaryOperator(DRE, UO_Deref, DRE->getType(),
VK_LValue, OK_Ordinary,
- DRE->getLocation());
+ DRE->getLocation(), false);
// Need parens to enforce precedence.
ParenExpr *PE = new (Context) ParenExpr(SourceLocation(), SourceLocation(),
Exp);
@@ -5314,7 +5314,7 @@ Stmt *RewriteModernObjC::SynthBlockInitExpr(BlockExpr *Exp,
UO_AddrOf,
Context->getPointerType(Context->VoidPtrTy),
VK_RValue, OK_Ordinary,
- SourceLocation());
+ SourceLocation(), false);
InitExprs.push_back(DescRefExpr);
// Add initializers for any closure decl refs.
@@ -5332,7 +5332,8 @@ Stmt *RewriteModernObjC::SynthBlockInitExpr(BlockExpr *Exp,
QualType QT = (*I)->getType();
QT = Context->getPointerType(QT);
Exp = new (Context) UnaryOperator(Exp, UO_AddrOf, QT, VK_RValue,
- OK_Ordinary, SourceLocation());
+ OK_Ordinary, SourceLocation(),
+ false);
}
} else if (isTopLevelBlockPointerType((*I)->getType())) {
FD = SynthBlockInitFunctionDecl((*I)->getName());
@@ -5348,7 +5349,8 @@ Stmt *RewriteModernObjC::SynthBlockInitExpr(BlockExpr *Exp,
QualType QT = (*I)->getType();
QT = Context->getPointerType(QT);
Exp = new (Context) UnaryOperator(Exp, UO_AddrOf, QT, VK_RValue,
- OK_Ordinary, SourceLocation());
+ OK_Ordinary, SourceLocation(),
+ false);
}
}
@@ -5388,7 +5390,8 @@ Stmt *RewriteModernObjC::SynthBlockInitExpr(BlockExpr *Exp,
if (!isNestedCapturedVar)
Exp = new (Context) UnaryOperator(Exp, UO_AddrOf,
Context->getPointerType(Exp->getType()),
- VK_RValue, OK_Ordinary, SourceLocation());
+ VK_RValue, OK_Ordinary, SourceLocation(),
+ false);
Exp = NoTypeInfoCStyleCastExpr(Context, castT, CK_BitCast, Exp);
InitExprs.push_back(Exp);
}
@@ -5414,7 +5417,7 @@ Stmt *RewriteModernObjC::SynthBlockInitExpr(BlockExpr *Exp,
NewRep = new (Context) UnaryOperator(NewRep, UO_AddrOf,
Context->getPointerType(NewRep->getType()),
- VK_RValue, OK_Ordinary, SourceLocation());
+ VK_RValue, OK_Ordinary, SourceLocation(), false);
NewRep = NoTypeInfoCStyleCastExpr(Context, FType, CK_BitCast,
NewRep);
// Put Paren around the call.
@@ -6744,9 +6747,9 @@ static void Write_IvarOffsetVar(RewriteModernObjC &RewriteObj,
if (Ivar->getAccessControl() == ObjCIvarDecl::Private ||
Ivar->getAccessControl() == ObjCIvarDecl::Package ||
Class->getVisibility() == HiddenVisibility)
- Visibility shoud be: HiddenVisibility;
+ Visibility should be: HiddenVisibility;
else
- Visibility shoud be: DefaultVisibility;
+ Visibility should be: DefaultVisibility;
*/
Result += "\n";
@@ -7558,7 +7561,7 @@ Stmt *RewriteModernObjC::RewriteObjCIvarRefExpr(ObjCIvarRefExpr *IV) {
Expr *Exp = new (Context) UnaryOperator(castExpr, UO_Deref, IvarT,
VK_LValue, OK_Ordinary,
- SourceLocation());
+ SourceLocation(), false);
PE = new (Context) ParenExpr(OldRange.getBegin(),
OldRange.getEnd(),
Exp);
diff --git a/lib/Frontend/Rewrite/RewriteObjC.cpp b/lib/Frontend/Rewrite/RewriteObjC.cpp
index 096b81bc3f08..9938f89eb869 100644
--- a/lib/Frontend/Rewrite/RewriteObjC.cpp
+++ b/lib/Frontend/Rewrite/RewriteObjC.cpp
@@ -1502,7 +1502,7 @@ Stmt *RewriteObjC::RewriteObjCForCollectionStmt(ObjCForCollectionStmt *S,
else {
DeclRefExpr *DR = cast<DeclRefExpr>(S->getElement());
elementName = DR->getDecl()->getName();
- ValueDecl *VD = cast<ValueDecl>(DR->getDecl());
+ ValueDecl *VD = DR->getDecl();
if (VD->getType()->isObjCQualifiedIdType() ||
VD->getType()->isObjCQualifiedInterfaceType())
// Simply use 'id' for all qualified types.
@@ -2511,7 +2511,7 @@ Stmt *RewriteObjC::RewriteObjCStringLiteral(ObjCStringLiteral *Exp) {
Expr *Unop = new (Context) UnaryOperator(DRE, UO_AddrOf,
Context->getPointerType(DRE->getType()),
VK_RValue, OK_Ordinary,
- SourceLocation());
+ SourceLocation(), false);
// cast to NSConstantString *
CastExpr *cast = NoTypeInfoCStyleCastExpr(Context, Exp->getType(),
CK_CPointerToObjCPointerCast, Unop);
@@ -2712,7 +2712,7 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
SuperRep = new (Context) UnaryOperator(SuperRep, UO_AddrOf,
Context->getPointerType(SuperRep->getType()),
VK_RValue, OK_Ordinary,
- SourceLocation());
+ SourceLocation(), false);
SuperRep = NoTypeInfoCStyleCastExpr(Context,
Context->getPointerType(superType),
CK_BitCast, SuperRep);
@@ -2730,7 +2730,7 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
SuperRep = new (Context) UnaryOperator(SuperRep, UO_AddrOf,
Context->getPointerType(SuperRep->getType()),
VK_RValue, OK_Ordinary,
- SourceLocation());
+ SourceLocation(), false);
}
MsgExprs.push_back(SuperRep);
break;
@@ -2806,7 +2806,7 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
SuperRep = new (Context) UnaryOperator(SuperRep, UO_AddrOf,
Context->getPointerType(SuperRep->getType()),
VK_RValue, OK_Ordinary,
- SourceLocation());
+ SourceLocation(), false);
SuperRep = NoTypeInfoCStyleCastExpr(Context,
Context->getPointerType(superType),
CK_BitCast, SuperRep);
@@ -3045,7 +3045,7 @@ Stmt *RewriteObjC::RewriteObjCProtocolExpr(ObjCProtocolExpr *Exp) {
VK_LValue, SourceLocation());
Expr *DerefExpr = new (Context) UnaryOperator(DRE, UO_AddrOf,
Context->getPointerType(DRE->getType()),
- VK_RValue, OK_Ordinary, SourceLocation());
+ VK_RValue, OK_Ordinary, SourceLocation(), false);
CastExpr *castExpr = NoTypeInfoCStyleCastExpr(Context, DerefExpr->getType(),
CK_BitCast,
DerefExpr);
@@ -3875,7 +3875,7 @@ Stmt *RewriteObjC::RewriteLocalVariableExternalStorage(DeclRefExpr *DRE) {
return DRE;
Expr *Exp = new (Context) UnaryOperator(DRE, UO_Deref, DRE->getType(),
VK_LValue, OK_Ordinary,
- DRE->getLocation());
+ DRE->getLocation(), false);
// Need parens to enforce precedence.
ParenExpr *PE = new (Context) ParenExpr(SourceLocation(), SourceLocation(),
Exp);
@@ -4438,7 +4438,7 @@ Stmt *RewriteObjC::SynthBlockInitExpr(BlockExpr *Exp,
UO_AddrOf,
Context->getPointerType(Context->VoidPtrTy),
VK_RValue, OK_Ordinary,
- SourceLocation());
+ SourceLocation(), false);
InitExprs.push_back(DescRefExpr);
// Add initializers for any closure decl refs.
@@ -4456,7 +4456,8 @@ Stmt *RewriteObjC::SynthBlockInitExpr(BlockExpr *Exp,
QualType QT = (*I)->getType();
QT = Context->getPointerType(QT);
Exp = new (Context) UnaryOperator(Exp, UO_AddrOf, QT, VK_RValue,
- OK_Ordinary, SourceLocation());
+ OK_Ordinary, SourceLocation(),
+ false);
}
} else if (isTopLevelBlockPointerType((*I)->getType())) {
FD = SynthBlockInitFunctionDecl((*I)->getName());
@@ -4472,7 +4473,8 @@ Stmt *RewriteObjC::SynthBlockInitExpr(BlockExpr *Exp,
QualType QT = (*I)->getType();
QT = Context->getPointerType(QT);
Exp = new (Context) UnaryOperator(Exp, UO_AddrOf, QT, VK_RValue,
- OK_Ordinary, SourceLocation());
+ OK_Ordinary, SourceLocation(),
+ false);
}
}
InitExprs.push_back(Exp);
@@ -4509,9 +4511,9 @@ Stmt *RewriteObjC::SynthBlockInitExpr(BlockExpr *Exp,
// captured nested byref variable has its address passed. Do not take
// its address again.
if (!isNestedCapturedVar)
- Exp = new (Context) UnaryOperator(Exp, UO_AddrOf,
- Context->getPointerType(Exp->getType()),
- VK_RValue, OK_Ordinary, SourceLocation());
+ Exp = new (Context) UnaryOperator(
+ Exp, UO_AddrOf, Context->getPointerType(Exp->getType()), VK_RValue,
+ OK_Ordinary, SourceLocation(), false);
Exp = NoTypeInfoCStyleCastExpr(Context, castT, CK_BitCast, Exp);
InitExprs.push_back(Exp);
}
@@ -4529,7 +4531,7 @@ Stmt *RewriteObjC::SynthBlockInitExpr(BlockExpr *Exp,
FType, VK_LValue, SourceLocation());
NewRep = new (Context) UnaryOperator(NewRep, UO_AddrOf,
Context->getPointerType(NewRep->getType()),
- VK_RValue, OK_Ordinary, SourceLocation());
+ VK_RValue, OK_Ordinary, SourceLocation(), false);
NewRep = NoTypeInfoCStyleCastExpr(Context, FType, CK_BitCast,
NewRep);
BlockDeclRefs.clear();
diff --git a/lib/Frontend/SerializedDiagnosticPrinter.cpp b/lib/Frontend/SerializedDiagnosticPrinter.cpp
index 7666fe10b381..ca60c4812f72 100644
--- a/lib/Frontend/SerializedDiagnosticPrinter.cpp
+++ b/lib/Frontend/SerializedDiagnosticPrinter.cpp
@@ -162,131 +162,131 @@ public:
void finish() override;
private:
- /// \brief Build a DiagnosticsEngine to emit diagnostics about the diagnostics
+ /// Build a DiagnosticsEngine to emit diagnostics about the diagnostics
DiagnosticsEngine *getMetaDiags();
- /// \brief Remove old copies of the serialized diagnostics. This is necessary
+ /// Remove old copies of the serialized diagnostics. This is necessary
/// so that we can detect when subprocesses write diagnostics that we should
/// merge into our own.
void RemoveOldDiagnostics();
- /// \brief Emit the preamble for the serialized diagnostics.
+ /// Emit the preamble for the serialized diagnostics.
void EmitPreamble();
- /// \brief Emit the BLOCKINFO block.
+ /// Emit the BLOCKINFO block.
void EmitBlockInfoBlock();
- /// \brief Emit the META data block.
+ /// Emit the META data block.
void EmitMetaBlock();
- /// \brief Start a DIAG block.
+ /// Start a DIAG block.
void EnterDiagBlock();
- /// \brief End a DIAG block.
+ /// End a DIAG block.
void ExitDiagBlock();
- /// \brief Emit a DIAG record.
+ /// Emit a DIAG record.
void EmitDiagnosticMessage(FullSourceLoc Loc, PresumedLoc PLoc,
DiagnosticsEngine::Level Level, StringRef Message,
DiagOrStoredDiag D);
- /// \brief Emit FIXIT and SOURCE_RANGE records for a diagnostic.
+ /// Emit FIXIT and SOURCE_RANGE records for a diagnostic.
void EmitCodeContext(SmallVectorImpl<CharSourceRange> &Ranges,
ArrayRef<FixItHint> Hints,
const SourceManager &SM);
- /// \brief Emit a record for a CharSourceRange.
+ /// Emit a record for a CharSourceRange.
void EmitCharSourceRange(CharSourceRange R, const SourceManager &SM);
- /// \brief Emit the string information for the category.
+ /// Emit the string information for the category.
unsigned getEmitCategory(unsigned category = 0);
- /// \brief Emit the string information for diagnostic flags.
+ /// Emit the string information for diagnostic flags.
unsigned getEmitDiagnosticFlag(DiagnosticsEngine::Level DiagLevel,
unsigned DiagID = 0);
unsigned getEmitDiagnosticFlag(StringRef DiagName);
- /// \brief Emit (lazily) the file string and retrieved the file identifier.
+ /// Emit (lazily) the file string and retrieved the file identifier.
unsigned getEmitFile(const char *Filename);
- /// \brief Add SourceLocation information the specified record.
+ /// Add SourceLocation information the specified record.
void AddLocToRecord(FullSourceLoc Loc, PresumedLoc PLoc,
RecordDataImpl &Record, unsigned TokSize = 0);
- /// \brief Add SourceLocation information the specified record.
+ /// Add SourceLocation information the specified record.
void AddLocToRecord(FullSourceLoc Loc, RecordDataImpl &Record,
unsigned TokSize = 0) {
AddLocToRecord(Loc, Loc.hasManager() ? Loc.getPresumedLoc() : PresumedLoc(),
Record, TokSize);
}
- /// \brief Add CharSourceRange information the specified record.
+ /// Add CharSourceRange information the specified record.
void AddCharSourceRangeToRecord(CharSourceRange R, RecordDataImpl &Record,
const SourceManager &SM);
- /// \brief Language options, which can differ from one clone of this client
+ /// Language options, which can differ from one clone of this client
/// to another.
const LangOptions *LangOpts;
- /// \brief Whether this is the original instance (rather than one of its
+ /// Whether this is the original instance (rather than one of its
/// clones), responsible for writing the file at the end.
bool OriginalInstance;
- /// \brief Whether this instance should aggregate diagnostics that are
+ /// Whether this instance should aggregate diagnostics that are
/// generated from child processes.
bool MergeChildRecords;
- /// \brief State that is shared among the various clones of this diagnostic
+ /// State that is shared among the various clones of this diagnostic
/// consumer.
struct SharedState {
SharedState(StringRef File, DiagnosticOptions *Diags)
: DiagOpts(Diags), Stream(Buffer), OutputFile(File.str()),
EmittedAnyDiagBlocks(false) {}
- /// \brief Diagnostic options.
+ /// Diagnostic options.
IntrusiveRefCntPtr<DiagnosticOptions> DiagOpts;
- /// \brief The byte buffer for the serialized content.
+ /// The byte buffer for the serialized content.
SmallString<1024> Buffer;
- /// \brief The BitStreamWriter for the serialized diagnostics.
+ /// The BitStreamWriter for the serialized diagnostics.
llvm::BitstreamWriter Stream;
- /// \brief The name of the diagnostics file.
+ /// The name of the diagnostics file.
std::string OutputFile;
- /// \brief The set of constructed record abbreviations.
+ /// The set of constructed record abbreviations.
AbbreviationMap Abbrevs;
- /// \brief A utility buffer for constructing record content.
+ /// A utility buffer for constructing record content.
RecordData Record;
- /// \brief A text buffer for rendering diagnostic text.
+ /// A text buffer for rendering diagnostic text.
SmallString<256> diagBuf;
- /// \brief The collection of diagnostic categories used.
+ /// The collection of diagnostic categories used.
llvm::DenseSet<unsigned> Categories;
- /// \brief The collection of files used.
+ /// The collection of files used.
llvm::DenseMap<const char *, unsigned> Files;
typedef llvm::DenseMap<const void *, std::pair<unsigned, StringRef> >
DiagFlagsTy;
- /// \brief Map for uniquing strings.
+ /// Map for uniquing strings.
DiagFlagsTy DiagFlags;
- /// \brief Whether we have already started emission of any DIAG blocks. Once
+ /// Whether we have already started emission of any DIAG blocks. Once
/// this becomes \c true, we never close a DIAG block until we know that we're
/// starting another one or we're done.
bool EmittedAnyDiagBlocks;
- /// \brief Engine for emitting diagnostics about the diagnostics.
+ /// Engine for emitting diagnostics about the diagnostics.
std::unique_ptr<DiagnosticsEngine> MetaDiagnostics;
};
- /// \brief State shared among the various clones of this diagnostic consumer.
+ /// State shared among the various clones of this diagnostic consumer.
std::shared_ptr<SharedState> State;
};
} // end anonymous namespace
@@ -305,7 +305,7 @@ create(StringRef OutputFile, DiagnosticOptions *Diags, bool MergeChildRecords) {
// Serialization methods.
//===----------------------------------------------------------------------===//
-/// \brief Emits a block ID in the BLOCKINFO block.
+/// Emits a block ID in the BLOCKINFO block.
static void EmitBlockID(unsigned ID, const char *Name,
llvm::BitstreamWriter &Stream,
RecordDataImpl &Record) {
@@ -325,7 +325,7 @@ static void EmitBlockID(unsigned ID, const char *Name,
Stream.EmitRecord(llvm::bitc::BLOCKINFO_CODE_BLOCKNAME, Record);
}
-/// \brief Emits a record ID in the BLOCKINFO block.
+/// Emits a record ID in the BLOCKINFO block.
static void EmitRecordID(unsigned ID, const char *Name,
llvm::BitstreamWriter &Stream,
RecordDataImpl &Record){
@@ -395,7 +395,7 @@ void SDiagsWriter::EmitCharSourceRange(CharSourceRange R,
State->Record);
}
-/// \brief Emits the preamble of the diagnostics file.
+/// Emits the preamble of the diagnostics file.
void SDiagsWriter::EmitPreamble() {
// Emit the file header.
State->Stream.Emit((unsigned)'D', 8);
@@ -462,7 +462,7 @@ void SDiagsWriter::EmitBlockInfoBlock() {
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // Diagnostc text.
Abbrevs.set(RECORD_DIAG, Stream.EmitBlockInfoAbbrev(BLOCK_DIAG, Abbrev));
- // Emit abbrevation for RECORD_CATEGORY.
+ // Emit abbreviation for RECORD_CATEGORY.
Abbrev = std::make_shared<BitCodeAbbrev>();
Abbrev->Add(BitCodeAbbrevOp(RECORD_CATEGORY));
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 16)); // Category ID.
@@ -470,7 +470,7 @@ void SDiagsWriter::EmitBlockInfoBlock() {
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // Category text.
Abbrevs.set(RECORD_CATEGORY, Stream.EmitBlockInfoAbbrev(BLOCK_DIAG, Abbrev));
- // Emit abbrevation for RECORD_SOURCE_RANGE.
+ // Emit abbreviation for RECORD_SOURCE_RANGE.
Abbrev = std::make_shared<BitCodeAbbrev>();
Abbrev->Add(BitCodeAbbrevOp(RECORD_SOURCE_RANGE));
AddRangeLocationAbbrev(*Abbrev);
diff --git a/lib/Frontend/SerializedDiagnosticReader.cpp b/lib/Frontend/SerializedDiagnosticReader.cpp
index 08b7087fbad6..458717819c41 100644
--- a/lib/Frontend/SerializedDiagnosticReader.cpp
+++ b/lib/Frontend/SerializedDiagnosticReader.cpp
@@ -1,4 +1,4 @@
-//===--- SerializedDiagnosticReader.cpp - Reads diagnostics ---------------===//
+//===- SerializedDiagnosticReader.cpp - Reads diagnostics -----------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -9,11 +9,22 @@
#include "clang/Frontend/SerializedDiagnosticReader.h"
#include "clang/Basic/FileManager.h"
+#include "clang/Basic/FileSystemOptions.h"
#include "clang/Frontend/SerializedDiagnostics.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Bitcode/BitCodes.h"
+#include "llvm/Bitcode/BitstreamReader.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/ErrorOr.h"
#include "llvm/Support/ManagedStatic.h"
+#include <cstdint>
+#include <system_error>
using namespace clang;
-using namespace clang::serialized_diags;
+using namespace serialized_diags;
std::error_code SerializedDiagnosticReader::readDiagnostics(StringRef File) {
// Open the diagnostics file.
@@ -44,13 +55,12 @@ std::error_code SerializedDiagnosticReader::readDiagnostics(StringRef File) {
std::error_code EC;
switch (Stream.ReadSubBlockID()) {
- case llvm::bitc::BLOCKINFO_BLOCK_ID: {
+ case llvm::bitc::BLOCKINFO_BLOCK_ID:
BlockInfo = Stream.ReadBlockInfoBlock();
if (!BlockInfo)
return SDError::MalformedBlockInfoBlock;
Stream.setBlockInfo(&*BlockInfo);
continue;
- }
case BLOCK_META:
if ((EC = readMetaBlock(Stream)))
return EC;
@@ -65,7 +75,7 @@ std::error_code SerializedDiagnosticReader::readDiagnostics(StringRef File) {
continue;
}
}
- return std::error_code();
+ return {};
}
enum class SerializedDiagnosticReader::Cursor {
@@ -132,7 +142,7 @@ SerializedDiagnosticReader::readMetaBlock(llvm::BitstreamCursor &Stream) {
case Cursor::BlockEnd:
if (!VersionChecked)
return SDError::MissingVersion;
- return std::error_code();
+ return {};
}
SmallVector<uint64_t, 1> Record;
@@ -176,7 +186,7 @@ SerializedDiagnosticReader::readDiagnosticBlock(llvm::BitstreamCursor &Stream) {
case Cursor::BlockEnd:
if ((EC = visitEndOfDiagnostic()))
return EC;
- return std::error_code();
+ return {};
case Cursor::Record:
break;
}
@@ -253,12 +263,14 @@ SerializedDiagnosticReader::readDiagnosticBlock(llvm::BitstreamCursor &Stream) {
}
namespace {
+
class SDErrorCategoryType final : public std::error_category {
const char *name() const noexcept override {
return "clang.serialized_diags";
}
+
std::string message(int IE) const override {
- SDError E = static_cast<SDError>(IE);
+ auto E = static_cast<SDError>(IE);
switch (E) {
case SDError::CouldNotLoad:
return "Failed to open diagnostics file";
@@ -290,7 +302,8 @@ class SDErrorCategoryType final : public std::error_category {
llvm_unreachable("Unknown error type!");
}
};
-}
+
+} // namespace
static llvm::ManagedStatic<SDErrorCategoryType> ErrorCategory;
const std::error_category &clang::serialized_diags::SDErrorCategory() {
diff --git a/lib/Frontend/TextDiagnostic.cpp b/lib/Frontend/TextDiagnostic.cpp
index 6a72b00c602b..85cd019005da 100644
--- a/lib/Frontend/TextDiagnostic.cpp
+++ b/lib/Frontend/TextDiagnostic.cpp
@@ -42,7 +42,7 @@ static const enum raw_ostream::Colors fatalColor = raw_ostream::RED;
static const enum raw_ostream::Colors savedColor =
raw_ostream::SAVEDCOLOR;
-/// \brief Add highlights to differences in template strings.
+/// Add highlights to differences in template strings.
static void applyTemplateHighlighting(raw_ostream &OS, StringRef Str,
bool &Normal, bool Bold) {
while (1) {
@@ -63,7 +63,7 @@ static void applyTemplateHighlighting(raw_ostream &OS, StringRef Str,
}
}
-/// \brief Number of spaces to indent when word-wrapping.
+/// Number of spaces to indent when word-wrapping.
const unsigned WordWrapIndentation = 6;
static int bytesSincePreviousTabOrLineBegin(StringRef SourceLine, size_t i) {
@@ -76,7 +76,7 @@ static int bytesSincePreviousTabOrLineBegin(StringRef SourceLine, size_t i) {
return bytes;
}
-/// \brief returns a printable representation of first item from input range
+/// returns a printable representation of first item from input range
///
/// This function returns a printable representation of the next item in a line
/// of source. If the next byte begins a valid and printable character, that
@@ -269,14 +269,14 @@ struct SourceColumnMap {
int columns() const { return m_byteToColumn.back(); }
int bytes() const { return m_columnToByte.back(); }
- /// \brief Map a byte to the column which it is at the start of, or return -1
+ /// Map a byte to the column which it is at the start of, or return -1
/// if it is not at the start of a column (for a UTF-8 trailing byte).
int byteToColumn(int n) const {
assert(0<=n && n<static_cast<int>(m_byteToColumn.size()));
return m_byteToColumn[n];
}
- /// \brief Map a byte to the first column which contains it.
+ /// Map a byte to the first column which contains it.
int byteToContainingColumn(int N) const {
assert(0 <= N && N < static_cast<int>(m_byteToColumn.size()));
while (m_byteToColumn[N] == -1)
@@ -284,7 +284,7 @@ struct SourceColumnMap {
return m_byteToColumn[N];
}
- /// \brief Map a column to the byte which starts the column, or return -1 if
+ /// Map a column to the byte which starts the column, or return -1 if
/// the column the second or subsequent column of an expanded tab or similar
/// multi-column entity.
int columnToByte(int n) const {
@@ -292,14 +292,14 @@ struct SourceColumnMap {
return m_columnToByte[n];
}
- /// \brief Map from a byte index to the next byte which starts a column.
+ /// Map from a byte index to the next byte which starts a column.
int startOfNextColumn(int N) const {
assert(0 <= N && N < static_cast<int>(m_byteToColumn.size() - 1));
while (byteToColumn(++N) == -1) {}
return N;
}
- /// \brief Map from a byte index to the previous byte which starts a column.
+ /// Map from a byte index to the previous byte which starts a column.
int startOfPreviousColumn(int N) const {
assert(0 < N && N < static_cast<int>(m_byteToColumn.size()));
while (byteToColumn(--N) == -1) {}
@@ -317,7 +317,7 @@ private:
};
} // end anonymous namespace
-/// \brief When the source code line we want to print is too long for
+/// When the source code line we want to print is too long for
/// the terminal, select the "interesting" region.
static void selectInterestingSourceRegion(std::string &SourceLine,
std::string &CaretLine,
@@ -507,7 +507,7 @@ static void selectInterestingSourceRegion(std::string &SourceLine,
}
}
-/// \brief Skip over whitespace in the string, starting at the given
+/// Skip over whitespace in the string, starting at the given
/// index.
///
/// \returns The index of the first non-whitespace character that is
@@ -519,7 +519,7 @@ static unsigned skipWhitespace(unsigned Idx, StringRef Str, unsigned Length) {
return Idx;
}
-/// \brief If the given character is the start of some kind of
+/// If the given character is the start of some kind of
/// balanced punctuation (e.g., quotes or parentheses), return the
/// character that will terminate the punctuation.
///
@@ -539,7 +539,7 @@ static inline char findMatchingPunctuation(char c) {
return 0;
}
-/// \brief Find the end of the word starting at the given offset
+/// Find the end of the word starting at the given offset
/// within a string.
///
/// \returns the index pointing one character past the end of the
@@ -596,7 +596,7 @@ static unsigned findEndOfWord(unsigned Start, StringRef Str,
return findEndOfWord(Start + 1, Str, Length, Column + 1, Columns);
}
-/// \brief Print the given string to a stream, word-wrapping it to
+/// Print the given string to a stream, word-wrapping it to
/// some number of columns in the process.
///
/// \param OS the stream to which the word-wrapping string will be
@@ -777,7 +777,7 @@ void TextDiagnostic::emitFilename(StringRef Filename, const SourceManager &SM) {
OS << Filename;
}
-/// \brief Print out the file/line/column information and include trace.
+/// Print out the file/line/column information and include trace.
///
/// This method handlen the emission of the diagnostic location information.
/// This includes extracting as much location information as is present for
@@ -852,23 +852,14 @@ void TextDiagnostic::emitDiagnosticLoc(FullSourceLoc Loc, PresumedLoc PLoc,
// Ignore invalid ranges.
if (!RI->isValid()) continue;
- FullSourceLoc B =
- FullSourceLoc(RI->getBegin(), Loc.getManager()).getExpansionLoc();
- FullSourceLoc E =
- FullSourceLoc(RI->getEnd(), Loc.getManager()).getExpansionLoc();
+ auto &SM = Loc.getManager();
+ SourceLocation B = SM.getExpansionLoc(RI->getBegin());
+ CharSourceRange ERange = SM.getExpansionRange(RI->getEnd());
+ SourceLocation E = ERange.getEnd();
+ bool IsTokenRange = ERange.isTokenRange();
- // If the End location and the start location are the same and are a
- // macro location, then the range was something that came from a
- // macro expansion or _Pragma. If this is an object-like macro, the
- // best we can do is to highlight the range. If this is a
- // function-like macro, we'd also like to highlight the arguments.
- if (B == E && RI->getEnd().isMacroID())
- E = FullSourceLoc(RI->getEnd(), Loc.getManager())
- .getExpansionRange()
- .second;
-
- std::pair<FileID, unsigned> BInfo = B.getDecomposedLoc();
- std::pair<FileID, unsigned> EInfo = E.getDecomposedLoc();
+ std::pair<FileID, unsigned> BInfo = SM.getDecomposedLoc(B);
+ std::pair<FileID, unsigned> EInfo = SM.getDecomposedLoc(E);
// If the start or end of the range is in another file, just discard
// it.
@@ -878,11 +869,14 @@ void TextDiagnostic::emitDiagnosticLoc(FullSourceLoc Loc, PresumedLoc PLoc,
// Add in the length of the token, so that we cover multi-char
// tokens.
unsigned TokSize = 0;
- if (RI->isTokenRange())
- TokSize = Lexer::MeasureTokenLength(E, E.getManager(), LangOpts);
-
- OS << '{' << B.getLineNumber() << ':' << B.getColumnNumber() << '-'
- << E.getLineNumber() << ':' << (E.getColumnNumber() + TokSize) << '}';
+ if (IsTokenRange)
+ TokSize = Lexer::MeasureTokenLength(E, SM, LangOpts);
+
+ FullSourceLoc BF(B, SM), EF(E, SM);
+ OS << '{'
+ << BF.getLineNumber() << ':' << BF.getColumnNumber() << '-'
+ << EF.getLineNumber() << ':' << (EF.getColumnNumber() + TokSize)
+ << '}';
PrintedRange = true;
}
@@ -919,7 +913,7 @@ void TextDiagnostic::emitBuildingModuleLocation(FullSourceLoc Loc,
OS << "While building module '" << ModuleName << "':\n";
}
-/// \brief Find the suitable set of lines to show to include a set of ranges.
+/// Find the suitable set of lines to show to include a set of ranges.
static llvm::Optional<std::pair<unsigned, unsigned>>
findLinesForRange(const CharSourceRange &R, FileID FID,
const SourceManager &SM) {
@@ -969,7 +963,7 @@ maybeAddRange(std::pair<unsigned, unsigned> A, std::pair<unsigned, unsigned> B,
return A;
}
-/// \brief Highlight a SourceRange (with ~'s) for any characters on LineNo.
+/// Highlight a SourceRange (with ~'s) for any characters on LineNo.
static void highlightRange(const CharSourceRange &R,
unsigned LineNo, FileID FID,
const SourceColumnMap &map,
@@ -1116,7 +1110,7 @@ static std::string buildFixItInsertionLine(FileID FID,
return FixItInsertionLine;
}
-/// \brief Emit a code snippet and caret line.
+/// Emit a code snippet and caret line.
///
/// This routine emits a single line's code snippet and caret line..
///
diff --git a/lib/Frontend/TextDiagnosticBuffer.cpp b/lib/Frontend/TextDiagnosticBuffer.cpp
index 288507310baa..44bb2bc29bc0 100644
--- a/lib/Frontend/TextDiagnosticBuffer.cpp
+++ b/lib/Frontend/TextDiagnosticBuffer.cpp
@@ -1,4 +1,4 @@
-//===--- TextDiagnosticBuffer.cpp - Buffer Text Diagnostics ---------------===//
+//===- TextDiagnosticBuffer.cpp - Buffer Text Diagnostics -----------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -12,13 +12,15 @@
//===----------------------------------------------------------------------===//
#include "clang/Frontend/TextDiagnosticBuffer.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/LLVM.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Support/ErrorHandling.h"
+
using namespace clang;
/// HandleDiagnostic - Store the errors, warnings, and notes that are
/// reported.
-///
void TextDiagnosticBuffer::HandleDiagnostic(DiagnosticsEngine::Level Level,
const Diagnostic &Info) {
// Default implementation (Warnings/errors count).
@@ -50,25 +52,24 @@ void TextDiagnosticBuffer::HandleDiagnostic(DiagnosticsEngine::Level Level,
}
void TextDiagnosticBuffer::FlushDiagnostics(DiagnosticsEngine &Diags) const {
- for (auto it = All.begin(), ie = All.end(); it != ie; ++it) {
- auto Diag = Diags.Report(Diags.getCustomDiagID(it->first, "%0"));
- switch (it->first) {
+ for (const auto &I : All) {
+ auto Diag = Diags.Report(Diags.getCustomDiagID(I.first, "%0"));
+ switch (I.first) {
default: llvm_unreachable(
"Diagnostic not handled during diagnostic flushing!");
case DiagnosticsEngine::Note:
- Diag << Notes[it->second].second;
+ Diag << Notes[I.second].second;
break;
case DiagnosticsEngine::Warning:
- Diag << Warnings[it->second].second;
+ Diag << Warnings[I.second].second;
break;
case DiagnosticsEngine::Remark:
- Diag << Remarks[it->second].second;
+ Diag << Remarks[I.second].second;
break;
case DiagnosticsEngine::Error:
case DiagnosticsEngine::Fatal:
- Diag << Errors[it->second].second;
+ Diag << Errors[I.second].second;
break;
}
}
}
-
diff --git a/lib/Frontend/TextDiagnosticPrinter.cpp b/lib/Frontend/TextDiagnosticPrinter.cpp
index 5dd3252d5b1e..a37382c116ae 100644
--- a/lib/Frontend/TextDiagnosticPrinter.cpp
+++ b/lib/Frontend/TextDiagnosticPrinter.cpp
@@ -44,7 +44,7 @@ void TextDiagnosticPrinter::EndSourceFile() {
TextDiag.reset();
}
-/// \brief Print any diagnostic option information to a raw_ostream.
+/// Print any diagnostic option information to a raw_ostream.
///
/// This implements all of the logic for adding diagnostic options to a message
/// (via OS). Each relevant option is comma separated and all are enclosed in
diff --git a/lib/Frontend/VerifyDiagnosticConsumer.cpp b/lib/Frontend/VerifyDiagnosticConsumer.cpp
index 0df5393a309b..21933f474ff5 100644
--- a/lib/Frontend/VerifyDiagnosticConsumer.cpp
+++ b/lib/Frontend/VerifyDiagnosticConsumer.cpp
@@ -1,4 +1,4 @@
-//===---- VerifyDiagnosticConsumer.cpp - Verifying Diagnostic Client ------===//
+//===- VerifyDiagnosticConsumer.cpp - Verifying Diagnostic Client ---------===//
//
// The LLVM Compiler Infrastructure
//
@@ -13,27 +13,48 @@
#include "clang/Frontend/VerifyDiagnosticConsumer.h"
#include "clang/Basic/CharInfo.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/DiagnosticOptions.h"
#include "clang/Basic/FileManager.h"
+#include "clang/Basic/LLVM.h"
+#include "clang/Basic/SourceLocation.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/TokenKinds.h"
#include "clang/Frontend/FrontendDiagnostic.h"
#include "clang/Frontend/TextDiagnosticBuffer.h"
#include "clang/Lex/HeaderSearch.h"
+#include "clang/Lex/Lexer.h"
+#include "clang/Lex/PPCallbacks.h"
#include "clang/Lex/Preprocessor.h"
+#include "clang/Lex/Token.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Regex.h"
#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstring>
+#include <iterator>
+#include <memory>
+#include <string>
+#include <utility>
+#include <vector>
using namespace clang;
-typedef VerifyDiagnosticConsumer::Directive Directive;
-typedef VerifyDiagnosticConsumer::DirectiveList DirectiveList;
-typedef VerifyDiagnosticConsumer::ExpectedData ExpectedData;
+
+using Directive = VerifyDiagnosticConsumer::Directive;
+using DirectiveList = VerifyDiagnosticConsumer::DirectiveList;
+using ExpectedData = VerifyDiagnosticConsumer::ExpectedData;
VerifyDiagnosticConsumer::VerifyDiagnosticConsumer(DiagnosticsEngine &Diags_)
- : Diags(Diags_),
- PrimaryClient(Diags.getClient()), PrimaryClientOwner(Diags.takeClient()),
- Buffer(new TextDiagnosticBuffer()), CurrentPreprocessor(nullptr),
- LangOpts(nullptr), SrcManager(nullptr), ActiveSourceFiles(0),
- Status(HasNoDirectives)
-{
+ : Diags(Diags_), PrimaryClient(Diags.getClient()),
+ PrimaryClientOwner(Diags.takeClient()),
+ Buffer(new TextDiagnosticBuffer()), Status(HasNoDirectives) {
if (Diags.hasSourceManager())
setSourceManager(Diags.getSourceManager());
}
@@ -48,16 +69,18 @@ VerifyDiagnosticConsumer::~VerifyDiagnosticConsumer() {
}
#ifndef NDEBUG
+
namespace {
+
class VerifyFileTracker : public PPCallbacks {
VerifyDiagnosticConsumer &Verify;
SourceManager &SM;
public:
VerifyFileTracker(VerifyDiagnosticConsumer &Verify, SourceManager &SM)
- : Verify(Verify), SM(SM) { }
+ : Verify(Verify), SM(SM) {}
- /// \brief Hook into the preprocessor and update the list of parsed
+ /// Hook into the preprocessor and update the list of parsed
/// files when the preprocessor indicates a new file is entered.
void FileChanged(SourceLocation Loc, FileChangeReason Reason,
SrcMgr::CharacteristicKind FileType,
@@ -66,7 +89,9 @@ public:
VerifyDiagnosticConsumer::IsParsed);
}
};
-} // End anonymous namespace.
+
+} // namespace
+
#endif
// DiagnosticConsumer interface.
@@ -79,10 +104,10 @@ void VerifyDiagnosticConsumer::BeginSourceFile(const LangOptions &LangOpts,
CurrentPreprocessor = PP;
this->LangOpts = &LangOpts;
setSourceManager(PP->getSourceManager());
- const_cast<Preprocessor*>(PP)->addCommentHandler(this);
+ const_cast<Preprocessor *>(PP)->addCommentHandler(this);
#ifndef NDEBUG
// Debug build tracks parsed files.
- const_cast<Preprocessor*>(PP)->addPPCallbacks(
+ const_cast<Preprocessor *>(PP)->addPPCallbacks(
llvm::make_unique<VerifyFileTracker>(*this, *SrcManager));
#endif
}
@@ -99,7 +124,8 @@ void VerifyDiagnosticConsumer::EndSourceFile() {
// Detach comment handler once last active source file completed.
if (--ActiveSourceFiles == 0) {
if (CurrentPreprocessor)
- const_cast<Preprocessor*>(CurrentPreprocessor)->removeCommentHandler(this);
+ const_cast<Preprocessor *>(CurrentPreprocessor)->
+ removeCommentHandler(this);
// Check diagnostics once last file completed.
CheckDiagnostics();
@@ -152,19 +178,18 @@ void VerifyDiagnosticConsumer::HandleDiagnostic(
// Checking diagnostics implementation.
//===----------------------------------------------------------------------===//
-typedef TextDiagnosticBuffer::DiagList DiagList;
-typedef TextDiagnosticBuffer::const_iterator const_diag_iterator;
+using DiagList = TextDiagnosticBuffer::DiagList;
+using const_diag_iterator = TextDiagnosticBuffer::const_iterator;
namespace {
/// StandardDirective - Directive with string matching.
-///
class StandardDirective : public Directive {
public:
StandardDirective(SourceLocation DirectiveLoc, SourceLocation DiagnosticLoc,
bool MatchAnyLine, StringRef Text, unsigned Min,
unsigned Max)
- : Directive(DirectiveLoc, DiagnosticLoc, MatchAnyLine, Text, Min, Max) { }
+ : Directive(DirectiveLoc, DiagnosticLoc, MatchAnyLine, Text, Min, Max) {}
bool isValid(std::string &Error) override {
// all strings are considered valid; even empty ones
@@ -177,14 +202,13 @@ public:
};
/// RegexDirective - Directive with regular-expression matching.
-///
class RegexDirective : public Directive {
public:
RegexDirective(SourceLocation DirectiveLoc, SourceLocation DiagnosticLoc,
bool MatchAnyLine, StringRef Text, unsigned Min, unsigned Max,
StringRef RegexStr)
- : Directive(DirectiveLoc, DiagnosticLoc, MatchAnyLine, Text, Min, Max),
- Regex(RegexStr) { }
+ : Directive(DirectiveLoc, DiagnosticLoc, MatchAnyLine, Text, Min, Max),
+ Regex(RegexStr) {}
bool isValid(std::string &Error) override {
return Regex.isValid(Error);
@@ -202,7 +226,7 @@ class ParseHelper
{
public:
ParseHelper(StringRef S)
- : Begin(S.begin()), End(S.end()), C(Begin), P(Begin), PEnd(nullptr) {}
+ : Begin(S.begin()), End(S.end()), C(Begin), P(Begin) {}
// Return true if string literal is next.
bool Next(StringRef S) {
@@ -210,7 +234,7 @@ public:
PEnd = C + S.size();
if (PEnd > End)
return false;
- return !memcmp(P, S.data(), S.size());
+ return memcmp(P, S.data(), S.size()) == 0;
}
// Return true if number is next.
@@ -321,16 +345,23 @@ public:
return !(C < End);
}
- const char * const Begin; // beginning of expected content
- const char * const End; // end of expected content (1-past)
- const char *C; // position of next char in content
+ // Beginning of expected content.
+ const char * const Begin;
+
+ // End of expected content (1-past).
+ const char * const End;
+
+ // Position of next char in content.
+ const char *C;
+
const char *P;
private:
- const char *PEnd; // previous next/search subject end (1-past)
+ // Previous next/search subject end (1-past).
+ const char *PEnd = nullptr;
};
-} // namespace anonymous
+} // anonymous
/// ParseDirective - Go through the comment and see if it indicates expected
/// diagnostics. If so, then put them in the appropriate directive list.
@@ -625,7 +656,7 @@ bool VerifyDiagnosticConsumer::HandleComment(Preprocessor &PP,
}
#ifndef NDEBUG
-/// \brief Lex the specified source file to determine whether it contains
+/// Lex the specified source file to determine whether it contains
/// any expected-* directives. As a Lexer is used rather than a full-blown
/// Preprocessor, directives inside skipped #if blocks will still be found.
///
@@ -663,7 +694,7 @@ static bool findDirectives(SourceManager &SM, FileID FID,
}
#endif // !NDEBUG
-/// \brief Takes a list of diagnostics that have been generated but not matched
+/// Takes a list of diagnostics that have been generated but not matched
/// by an expected-* directive and produces a diagnostic to the user from this.
static unsigned PrintUnexpected(DiagnosticsEngine &Diags, SourceManager *SourceMgr,
const_diag_iterator diag_begin,
@@ -691,7 +722,7 @@ static unsigned PrintUnexpected(DiagnosticsEngine &Diags, SourceManager *SourceM
return std::distance(diag_begin, diag_end);
}
-/// \brief Takes a list of diagnostics that were expected to have been generated
+/// Takes a list of diagnostics that were expected to have been generated
/// but were not and produces a diagnostic to the user from this.
static unsigned PrintExpected(DiagnosticsEngine &Diags,
SourceManager &SourceMgr,
@@ -701,21 +732,20 @@ static unsigned PrintExpected(DiagnosticsEngine &Diags,
SmallString<256> Fmt;
llvm::raw_svector_ostream OS(Fmt);
- for (auto *DirPtr : DL) {
- Directive &D = *DirPtr;
- if (D.DiagnosticLoc.isInvalid())
+ for (const auto *D : DL) {
+ if (D->DiagnosticLoc.isInvalid())
OS << "\n File *";
else
- OS << "\n File " << SourceMgr.getFilename(D.DiagnosticLoc);
- if (D.MatchAnyLine)
+ OS << "\n File " << SourceMgr.getFilename(D->DiagnosticLoc);
+ if (D->MatchAnyLine)
OS << " Line *";
else
- OS << " Line " << SourceMgr.getPresumedLineNumber(D.DiagnosticLoc);
- if (D.DirectiveLoc != D.DiagnosticLoc)
+ OS << " Line " << SourceMgr.getPresumedLineNumber(D->DiagnosticLoc);
+ if (D->DirectiveLoc != D->DiagnosticLoc)
OS << " (directive at "
- << SourceMgr.getFilename(D.DirectiveLoc) << ':'
- << SourceMgr.getPresumedLineNumber(D.DirectiveLoc) << ')';
- OS << ": " << D.Text;
+ << SourceMgr.getFilename(D->DirectiveLoc) << ':'
+ << SourceMgr.getPresumedLineNumber(D->DirectiveLoc) << ')';
+ OS << ": " << D->Text;
}
Diags.Report(diag::err_verify_inconsistent_diags).setForceEmit()
@@ -723,7 +753,7 @@ static unsigned PrintExpected(DiagnosticsEngine &Diags,
return DL.size();
}
-/// \brief Determine whether two source locations come from the same file.
+/// Determine whether two source locations come from the same file.
static bool IsFromSameFile(SourceManager &SM, SourceLocation DirectiveLoc,
SourceLocation DiagnosticLoc) {
while (DiagnosticLoc.isMacroID())
@@ -741,7 +771,6 @@ static bool IsFromSameFile(SourceManager &SM, SourceLocation DirectiveLoc,
/// CheckLists - Compare expected to seen diagnostic lists and return the
/// the difference between them.
-///
static unsigned CheckLists(DiagnosticsEngine &Diags, SourceManager &SourceMgr,
const char *Label,
DirectiveList &Left,
@@ -792,7 +821,6 @@ static unsigned CheckLists(DiagnosticsEngine &Diags, SourceManager &SourceMgr,
/// CheckResults - This compares the expected results to those that
/// were actually reported. It emits any discrepencies. Return "true" if there
/// were problems. Return "false" otherwise.
-///
static unsigned CheckResults(DiagnosticsEngine &Diags, SourceManager &SourceMgr,
const TextDiagnosticBuffer &Buffer,
ExpectedData &ED) {
@@ -875,19 +903,16 @@ void VerifyDiagnosticConsumer::CheckDiagnostics() {
// this file is being parsed separately from the main file, in which
// case consider moving the directives to the correct place, if this
// is applicable.
- if (UnparsedFiles.size() > 0) {
+ if (!UnparsedFiles.empty()) {
// Generate a cache of parsed FileEntry pointers for alias lookups.
llvm::SmallPtrSet<const FileEntry *, 8> ParsedFileCache;
- for (ParsedFilesMap::iterator I = ParsedFiles.begin(),
- End = ParsedFiles.end(); I != End; ++I) {
- if (const FileEntry *FE = I->second)
+ for (const auto &I : ParsedFiles)
+ if (const FileEntry *FE = I.second)
ParsedFileCache.insert(FE);
- }
// Iterate through list of unparsed files.
- for (UnparsedFilesMap::iterator I = UnparsedFiles.begin(),
- End = UnparsedFiles.end(); I != End; ++I) {
- const UnparsedFileStatus &Status = I->second;
+ for (const auto &I : UnparsedFiles) {
+ const UnparsedFileStatus &Status = I.second;
const FileEntry *FE = Status.getFile();
// Skip files that have been parsed via an alias.
diff --git a/lib/FrontendTool/ExecuteCompilerInvocation.cpp b/lib/FrontendTool/ExecuteCompilerInvocation.cpp
index 4167e1fe20b8..ac2ee50a1e4b 100644
--- a/lib/FrontendTool/ExecuteCompilerInvocation.cpp
+++ b/lib/FrontendTool/ExecuteCompilerInvocation.cpp
@@ -32,6 +32,8 @@
using namespace clang;
using namespace llvm::opt;
+namespace clang {
+
static std::unique_ptr<FrontendAction>
CreateFrontendBaseAction(CompilerInstance &CI) {
using namespace clang::frontend;
@@ -43,6 +45,8 @@ CreateFrontendBaseAction(CompilerInstance &CI) {
case ASTDump: return llvm::make_unique<ASTDumpAction>();
case ASTPrint: return llvm::make_unique<ASTPrintAction>();
case ASTView: return llvm::make_unique<ASTViewAction>();
+ case DumpCompilerOptions:
+ return llvm::make_unique<DumpCompilerOptionsAction>();
case DumpRawTokens: return llvm::make_unique<DumpRawTokensAction>();
case DumpTokens: return llvm::make_unique<DumpTokensAction>();
case EmitAssembly: return llvm::make_unique<EmitAssemblyAction>();
@@ -63,6 +67,7 @@ CreateFrontendBaseAction(CompilerInstance &CI) {
case ParseSyntaxOnly: return llvm::make_unique<SyntaxOnlyAction>();
case ModuleFileInfo: return llvm::make_unique<DumpModuleInfoAction>();
case VerifyPCH: return llvm::make_unique<VerifyPCHAction>();
+ case TemplightDump: return llvm::make_unique<TemplightDumpAction>();
case PluginAction: {
for (FrontendPluginRegistry::iterator it =
@@ -122,7 +127,7 @@ CreateFrontendBaseAction(CompilerInstance &CI) {
#endif
}
-static std::unique_ptr<FrontendAction>
+std::unique_ptr<FrontendAction>
CreateFrontendAction(CompilerInstance &CI) {
// Create the underlying action.
std::unique_ptr<FrontendAction> Act = CreateFrontendBaseAction(CI);
@@ -173,7 +178,7 @@ CreateFrontendAction(CompilerInstance &CI) {
return Act;
}
-bool clang::ExecuteCompilerInvocation(CompilerInstance *Clang) {
+bool ExecuteCompilerInvocation(CompilerInstance *Clang) {
// Honor -help.
if (Clang->getFrontendOpts().ShowHelp) {
std::unique_ptr<OptTable> Opts = driver::createDriverOptTable();
@@ -254,3 +259,5 @@ bool clang::ExecuteCompilerInvocation(CompilerInstance *Clang) {
BuryPointer(std::move(Act));
return Success;
}
+
+} // namespace clang
diff --git a/lib/Headers/CMakeLists.txt b/lib/Headers/CMakeLists.txt
index 97ba3edea1c5..1930d8e225c7 100644
--- a/lib/Headers/CMakeLists.txt
+++ b/lib/Headers/CMakeLists.txt
@@ -34,10 +34,13 @@ set(files
__clang_cuda_builtin_vars.h
__clang_cuda_cmath.h
__clang_cuda_complex_builtins.h
+ __clang_cuda_device_functions.h
__clang_cuda_intrinsics.h
+ __clang_cuda_libdevice_declares.h
__clang_cuda_math_forward_declares.h
__clang_cuda_runtime_wrapper.h
cetintrin.h
+ cldemoteintrin.h
clzerointrin.h
cpuid.h
clflushoptintrin.h
@@ -55,6 +58,7 @@ set(files
immintrin.h
intrin.h
inttypes.h
+ invpcidintrin.h
iso646.h
limits.h
lwpintrin.h
@@ -63,16 +67,20 @@ set(files
mmintrin.h
mm_malloc.h
module.modulemap
+ movdirintrin.h
msa.h
mwaitxintrin.h
nmmintrin.h
opencl-c.h
pkuintrin.h
pmmintrin.h
+ pconfigintrin.h
popcntintrin.h
prfchwintrin.h
+ ptwriteintrin.h
rdseedintrin.h
rtmintrin.h
+ sgxintrin.h
s390intrin.h
shaintrin.h
smmintrin.h
@@ -93,6 +101,8 @@ set(files
varargs.h
vecintrin.h
vpclmulqdqintrin.h
+ waitpkgintrin.h
+ wbnoinvdintrin.h
wmmintrin.h
__wmmintrin_aes.h
__wmmintrin_pclmul.h
@@ -116,7 +126,12 @@ set(output_dir ${LLVM_LIBRARY_OUTPUT_INTDIR}/clang/${CLANG_VERSION}/include)
# Generate arm_neon.h
clang_tablegen(arm_neon.h -gen-arm-neon
+ -I ${CLANG_SOURCE_DIR}/include/clang/Basic/
SOURCE ${CLANG_SOURCE_DIR}/include/clang/Basic/arm_neon.td)
+# Generate arm_fp16.h
+clang_tablegen(arm_fp16.h -gen-arm-fp16
+ -I ${CLANG_SOURCE_DIR}/include/clang/Basic/
+ SOURCE ${CLANG_SOURCE_DIR}/include/clang/Basic/arm_fp16.td)
set(out_files)
foreach( f ${files} ${cuda_wrapper_files} )
@@ -134,6 +149,11 @@ add_custom_command(OUTPUT ${output_dir}/arm_neon.h
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${CMAKE_CURRENT_BINARY_DIR}/arm_neon.h ${output_dir}/arm_neon.h
COMMENT "Copying clang's arm_neon.h...")
list(APPEND out_files ${output_dir}/arm_neon.h)
+add_custom_command(OUTPUT ${output_dir}/arm_fp16.h
+ DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/arm_fp16.h
+ COMMAND ${CMAKE_COMMAND} -E copy_if_different ${CMAKE_CURRENT_BINARY_DIR}/arm_fp16.h ${output_dir}/arm_fp16.h
+ COMMENT "Copying clang's arm_fp16.h...")
+list(APPEND out_files ${output_dir}/arm_fp16.h)
add_custom_target(clang-headers ALL DEPENDS ${out_files})
set_target_properties(clang-headers PROPERTIES FOLDER "Misc")
@@ -145,6 +165,12 @@ install(
DESTINATION lib${LLVM_LIBDIR_SUFFIX}/clang/${CLANG_VERSION}/include)
install(
+ FILES ${files} ${CMAKE_CURRENT_BINARY_DIR}/arm_fp16.h
+ COMPONENT clang-headers
+ PERMISSIONS OWNER_READ OWNER_WRITE GROUP_READ WORLD_READ
+ DESTINATION lib${LLVM_LIBDIR_SUFFIX}/clang/${CLANG_VERSION}/include)
+
+install(
FILES ${cuda_wrapper_files}
COMPONENT clang-headers
PERMISSIONS OWNER_READ OWNER_WRITE GROUP_READ WORLD_READ
diff --git a/lib/Headers/__clang_cuda_builtin_vars.h b/lib/Headers/__clang_cuda_builtin_vars.h
index 6f5eb9c78d85..290c4b298433 100644
--- a/lib/Headers/__clang_cuda_builtin_vars.h
+++ b/lib/Headers/__clang_cuda_builtin_vars.h
@@ -54,7 +54,7 @@ struct dim3;
#define __DELETE
#endif
-// Make sure nobody can create instances of the special varible types. nvcc
+// Make sure nobody can create instances of the special variable types. nvcc
// also disallows taking address of special variables, so we disable address-of
// operator as well.
#define __CUDA_DISALLOW_BUILTINVAR_ACCESS(TypeName) \
diff --git a/lib/Headers/__clang_cuda_device_functions.h b/lib/Headers/__clang_cuda_device_functions.h
new file mode 100644
index 000000000000..67bbc68b1637
--- /dev/null
+++ b/lib/Headers/__clang_cuda_device_functions.h
@@ -0,0 +1,1768 @@
+/*===---- __clang_cuda_device_functions.h - CUDA runtime support -----------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __CLANG_CUDA_DEVICE_FUNCTIONS_H__
+#define __CLANG_CUDA_DEVICE_FUNCTIONS_H__
+
+#if CUDA_VERSION < 9000
+#error This file is intended to be used with CUDA-9+ only.
+#endif
+
+// __DEVICE__ is a helper macro with common set of attributes for the wrappers
+// we implement in this file. We need static in order to avoid emitting unused
+// functions and __forceinline__ helps inlining these wrappers at -O1.
+#pragma push_macro("__DEVICE__")
+#define __DEVICE__ static __device__ __forceinline__
+
+// libdevice provides fast low precision and slow full-recision implementations
+// for some functions. Which one gets selected depends on
+// __CLANG_CUDA_APPROX_TRANSCENDENTALS__ which gets defined by clang if
+// -ffast-math or -fcuda-approx-transcendentals are in effect.
+#pragma push_macro("__FAST_OR_SLOW")
+#if defined(__CLANG_CUDA_APPROX_TRANSCENDENTALS__)
+#define __FAST_OR_SLOW(fast, slow) fast
+#else
+#define __FAST_OR_SLOW(fast, slow) slow
+#endif
+
+__DEVICE__ int __all(int __a) { return __nvvm_vote_all(__a); }
+__DEVICE__ int __any(int __a) { return __nvvm_vote_any(__a); }
+__DEVICE__ unsigned int __ballot(int __a) { return __nvvm_vote_ballot(__a); }
+__DEVICE__ unsigned int __brev(unsigned int __a) { return __nv_brev(__a); }
+__DEVICE__ unsigned long long __brevll(unsigned long long __a) {
+ return __nv_brevll(__a);
+}
+__DEVICE__ void __brkpt() { asm volatile("brkpt;"); }
+__DEVICE__ void __brkpt(int __a) { __brkpt(); }
+__DEVICE__ unsigned int __byte_perm(unsigned int __a, unsigned int __b,
+ unsigned int __c) {
+ return __nv_byte_perm(__a, __b, __c);
+}
+__DEVICE__ int __clz(int __a) { return __nv_clz(__a); }
+__DEVICE__ int __clzll(long long __a) { return __nv_clzll(__a); }
+__DEVICE__ float __cosf(float __a) { return __nv_fast_cosf(__a); }
+__DEVICE__ double __dAtomicAdd(double *__p, double __v) {
+ return __nvvm_atom_add_gen_d(__p, __v);
+}
+__DEVICE__ double __dAtomicAdd_block(double *__p, double __v) {
+ return __nvvm_atom_cta_add_gen_d(__p, __v);
+}
+__DEVICE__ double __dAtomicAdd_system(double *__p, double __v) {
+ return __nvvm_atom_sys_add_gen_d(__p, __v);
+}
+__DEVICE__ double __dadd_rd(double __a, double __b) {
+ return __nv_dadd_rd(__a, __b);
+}
+__DEVICE__ double __dadd_rn(double __a, double __b) {
+ return __nv_dadd_rn(__a, __b);
+}
+__DEVICE__ double __dadd_ru(double __a, double __b) {
+ return __nv_dadd_ru(__a, __b);
+}
+__DEVICE__ double __dadd_rz(double __a, double __b) {
+ return __nv_dadd_rz(__a, __b);
+}
+__DEVICE__ double __ddiv_rd(double __a, double __b) {
+ return __nv_ddiv_rd(__a, __b);
+}
+__DEVICE__ double __ddiv_rn(double __a, double __b) {
+ return __nv_ddiv_rn(__a, __b);
+}
+__DEVICE__ double __ddiv_ru(double __a, double __b) {
+ return __nv_ddiv_ru(__a, __b);
+}
+__DEVICE__ double __ddiv_rz(double __a, double __b) {
+ return __nv_ddiv_rz(__a, __b);
+}
+__DEVICE__ double __dmul_rd(double __a, double __b) {
+ return __nv_dmul_rd(__a, __b);
+}
+__DEVICE__ double __dmul_rn(double __a, double __b) {
+ return __nv_dmul_rn(__a, __b);
+}
+__DEVICE__ double __dmul_ru(double __a, double __b) {
+ return __nv_dmul_ru(__a, __b);
+}
+__DEVICE__ double __dmul_rz(double __a, double __b) {
+ return __nv_dmul_rz(__a, __b);
+}
+__DEVICE__ float __double2float_rd(double __a) {
+ return __nv_double2float_rd(__a);
+}
+__DEVICE__ float __double2float_rn(double __a) {
+ return __nv_double2float_rn(__a);
+}
+__DEVICE__ float __double2float_ru(double __a) {
+ return __nv_double2float_ru(__a);
+}
+__DEVICE__ float __double2float_rz(double __a) {
+ return __nv_double2float_rz(__a);
+}
+__DEVICE__ int __double2hiint(double __a) { return __nv_double2hiint(__a); }
+__DEVICE__ int __double2int_rd(double __a) { return __nv_double2int_rd(__a); }
+__DEVICE__ int __double2int_rn(double __a) { return __nv_double2int_rn(__a); }
+__DEVICE__ int __double2int_ru(double __a) { return __nv_double2int_ru(__a); }
+__DEVICE__ int __double2int_rz(double __a) { return __nv_double2int_rz(__a); }
+__DEVICE__ long long __double2ll_rd(double __a) {
+ return __nv_double2ll_rd(__a);
+}
+__DEVICE__ long long __double2ll_rn(double __a) {
+ return __nv_double2ll_rn(__a);
+}
+__DEVICE__ long long __double2ll_ru(double __a) {
+ return __nv_double2ll_ru(__a);
+}
+__DEVICE__ long long __double2ll_rz(double __a) {
+ return __nv_double2ll_rz(__a);
+}
+__DEVICE__ int __double2loint(double __a) { return __nv_double2loint(__a); }
+__DEVICE__ unsigned int __double2uint_rd(double __a) {
+ return __nv_double2uint_rd(__a);
+}
+__DEVICE__ unsigned int __double2uint_rn(double __a) {
+ return __nv_double2uint_rn(__a);
+}
+__DEVICE__ unsigned int __double2uint_ru(double __a) {
+ return __nv_double2uint_ru(__a);
+}
+__DEVICE__ unsigned int __double2uint_rz(double __a) {
+ return __nv_double2uint_rz(__a);
+}
+__DEVICE__ unsigned long long __double2ull_rd(double __a) {
+ return __nv_double2ull_rd(__a);
+}
+__DEVICE__ unsigned long long __double2ull_rn(double __a) {
+ return __nv_double2ull_rn(__a);
+}
+__DEVICE__ unsigned long long __double2ull_ru(double __a) {
+ return __nv_double2ull_ru(__a);
+}
+__DEVICE__ unsigned long long __double2ull_rz(double __a) {
+ return __nv_double2ull_rz(__a);
+}
+__DEVICE__ long long __double_as_longlong(double __a) {
+ return __nv_double_as_longlong(__a);
+}
+__DEVICE__ double __drcp_rd(double __a) { return __nv_drcp_rd(__a); }
+__DEVICE__ double __drcp_rn(double __a) { return __nv_drcp_rn(__a); }
+__DEVICE__ double __drcp_ru(double __a) { return __nv_drcp_ru(__a); }
+__DEVICE__ double __drcp_rz(double __a) { return __nv_drcp_rz(__a); }
+__DEVICE__ double __dsqrt_rd(double __a) { return __nv_dsqrt_rd(__a); }
+__DEVICE__ double __dsqrt_rn(double __a) { return __nv_dsqrt_rn(__a); }
+__DEVICE__ double __dsqrt_ru(double __a) { return __nv_dsqrt_ru(__a); }
+__DEVICE__ double __dsqrt_rz(double __a) { return __nv_dsqrt_rz(__a); }
+__DEVICE__ double __dsub_rd(double __a, double __b) {
+ return __nv_dsub_rd(__a, __b);
+}
+__DEVICE__ double __dsub_rn(double __a, double __b) {
+ return __nv_dsub_rn(__a, __b);
+}
+__DEVICE__ double __dsub_ru(double __a, double __b) {
+ return __nv_dsub_ru(__a, __b);
+}
+__DEVICE__ double __dsub_rz(double __a, double __b) {
+ return __nv_dsub_rz(__a, __b);
+}
+__DEVICE__ float __exp10f(float __a) { return __nv_fast_exp10f(__a); }
+__DEVICE__ float __expf(float __a) { return __nv_fast_expf(__a); }
+__DEVICE__ float __fAtomicAdd(float *__p, float __v) {
+ return __nvvm_atom_add_gen_f(__p, __v);
+}
+__DEVICE__ float __fAtomicAdd_block(float *__p, float __v) {
+ return __nvvm_atom_cta_add_gen_f(__p, __v);
+}
+__DEVICE__ float __fAtomicAdd_system(float *__p, float __v) {
+ return __nvvm_atom_sys_add_gen_f(__p, __v);
+}
+__DEVICE__ float __fAtomicExch(float *__p, float __v) {
+ return __nv_int_as_float(
+ __nvvm_atom_xchg_gen_i((int *)__p, __nv_float_as_int(__v)));
+}
+__DEVICE__ float __fAtomicExch_block(float *__p, float __v) {
+ return __nv_int_as_float(
+ __nvvm_atom_cta_xchg_gen_i((int *)__p, __nv_float_as_int(__v)));
+}
+__DEVICE__ float __fAtomicExch_system(float *__p, float __v) {
+ return __nv_int_as_float(
+ __nvvm_atom_sys_xchg_gen_i((int *)__p, __nv_float_as_int(__v)));
+}
+__DEVICE__ float __fadd_rd(float __a, float __b) {
+ return __nv_fadd_rd(__a, __b);
+}
+__DEVICE__ float __fadd_rn(float __a, float __b) {
+ return __nv_fadd_rn(__a, __b);
+}
+__DEVICE__ float __fadd_ru(float __a, float __b) {
+ return __nv_fadd_ru(__a, __b);
+}
+__DEVICE__ float __fadd_rz(float __a, float __b) {
+ return __nv_fadd_rz(__a, __b);
+}
+__DEVICE__ float __fdiv_rd(float __a, float __b) {
+ return __nv_fdiv_rd(__a, __b);
+}
+__DEVICE__ float __fdiv_rn(float __a, float __b) {
+ return __nv_fdiv_rn(__a, __b);
+}
+__DEVICE__ float __fdiv_ru(float __a, float __b) {
+ return __nv_fdiv_ru(__a, __b);
+}
+__DEVICE__ float __fdiv_rz(float __a, float __b) {
+ return __nv_fdiv_rz(__a, __b);
+}
+__DEVICE__ float __fdividef(float __a, float __b) {
+ return __nv_fast_fdividef(__a, __b);
+}
+__DEVICE__ int __ffs(int __a) { return __nv_ffs(__a); }
+__DEVICE__ int __ffsll(long long __a) { return __nv_ffsll(__a); }
+__DEVICE__ int __finite(double __a) { return __nv_isfinited(__a); }
+__DEVICE__ int __finitef(float __a) { return __nv_finitef(__a); }
+__DEVICE__ int __float2int_rd(float __a) { return __nv_float2int_rd(__a); }
+__DEVICE__ int __float2int_rn(float __a) { return __nv_float2int_rn(__a); }
+__DEVICE__ int __float2int_ru(float __a) { return __nv_float2int_ru(__a); }
+__DEVICE__ int __float2int_rz(float __a) { return __nv_float2int_rz(__a); }
+__DEVICE__ long long __float2ll_rd(float __a) { return __nv_float2ll_rd(__a); }
+__DEVICE__ long long __float2ll_rn(float __a) { return __nv_float2ll_rn(__a); }
+__DEVICE__ long long __float2ll_ru(float __a) { return __nv_float2ll_ru(__a); }
+__DEVICE__ long long __float2ll_rz(float __a) { return __nv_float2ll_rz(__a); }
+__DEVICE__ unsigned int __float2uint_rd(float __a) {
+ return __nv_float2uint_rd(__a);
+}
+__DEVICE__ unsigned int __float2uint_rn(float __a) {
+ return __nv_float2uint_rn(__a);
+}
+__DEVICE__ unsigned int __float2uint_ru(float __a) {
+ return __nv_float2uint_ru(__a);
+}
+__DEVICE__ unsigned int __float2uint_rz(float __a) {
+ return __nv_float2uint_rz(__a);
+}
+__DEVICE__ unsigned long long __float2ull_rd(float __a) {
+ return __nv_float2ull_rd(__a);
+}
+__DEVICE__ unsigned long long __float2ull_rn(float __a) {
+ return __nv_float2ull_rn(__a);
+}
+__DEVICE__ unsigned long long __float2ull_ru(float __a) {
+ return __nv_float2ull_ru(__a);
+}
+__DEVICE__ unsigned long long __float2ull_rz(float __a) {
+ return __nv_float2ull_rz(__a);
+}
+__DEVICE__ int __float_as_int(float __a) { return __nv_float_as_int(__a); }
+__DEVICE__ unsigned int __float_as_uint(float __a) {
+ return __nv_float_as_uint(__a);
+}
+__DEVICE__ double __fma_rd(double __a, double __b, double __c) {
+ return __nv_fma_rd(__a, __b, __c);
+}
+__DEVICE__ double __fma_rn(double __a, double __b, double __c) {
+ return __nv_fma_rn(__a, __b, __c);
+}
+__DEVICE__ double __fma_ru(double __a, double __b, double __c) {
+ return __nv_fma_ru(__a, __b, __c);
+}
+__DEVICE__ double __fma_rz(double __a, double __b, double __c) {
+ return __nv_fma_rz(__a, __b, __c);
+}
+__DEVICE__ float __fmaf_ieee_rd(float __a, float __b, float __c) {
+ return __nv_fmaf_ieee_rd(__a, __b, __c);
+}
+__DEVICE__ float __fmaf_ieee_rn(float __a, float __b, float __c) {
+ return __nv_fmaf_ieee_rn(__a, __b, __c);
+}
+__DEVICE__ float __fmaf_ieee_ru(float __a, float __b, float __c) {
+ return __nv_fmaf_ieee_ru(__a, __b, __c);
+}
+__DEVICE__ float __fmaf_ieee_rz(float __a, float __b, float __c) {
+ return __nv_fmaf_ieee_rz(__a, __b, __c);
+}
+__DEVICE__ float __fmaf_rd(float __a, float __b, float __c) {
+ return __nv_fmaf_rd(__a, __b, __c);
+}
+__DEVICE__ float __fmaf_rn(float __a, float __b, float __c) {
+ return __nv_fmaf_rn(__a, __b, __c);
+}
+__DEVICE__ float __fmaf_ru(float __a, float __b, float __c) {
+ return __nv_fmaf_ru(__a, __b, __c);
+}
+__DEVICE__ float __fmaf_rz(float __a, float __b, float __c) {
+ return __nv_fmaf_rz(__a, __b, __c);
+}
+__DEVICE__ float __fmul_rd(float __a, float __b) {
+ return __nv_fmul_rd(__a, __b);
+}
+__DEVICE__ float __fmul_rn(float __a, float __b) {
+ return __nv_fmul_rn(__a, __b);
+}
+__DEVICE__ float __fmul_ru(float __a, float __b) {
+ return __nv_fmul_ru(__a, __b);
+}
+__DEVICE__ float __fmul_rz(float __a, float __b) {
+ return __nv_fmul_rz(__a, __b);
+}
+__DEVICE__ float __frcp_rd(float __a) { return __nv_frcp_rd(__a); }
+__DEVICE__ float __frcp_rn(float __a) { return __nv_frcp_rn(__a); }
+__DEVICE__ float __frcp_ru(float __a) { return __nv_frcp_ru(__a); }
+__DEVICE__ float __frcp_rz(float __a) { return __nv_frcp_rz(__a); }
+__DEVICE__ float __frsqrt_rn(float __a) { return __nv_frsqrt_rn(__a); }
+__DEVICE__ float __fsqrt_rd(float __a) { return __nv_fsqrt_rd(__a); }
+__DEVICE__ float __fsqrt_rn(float __a) { return __nv_fsqrt_rn(__a); }
+__DEVICE__ float __fsqrt_ru(float __a) { return __nv_fsqrt_ru(__a); }
+__DEVICE__ float __fsqrt_rz(float __a) { return __nv_fsqrt_rz(__a); }
+__DEVICE__ float __fsub_rd(float __a, float __b) {
+ return __nv_fsub_rd(__a, __b);
+}
+__DEVICE__ float __fsub_rn(float __a, float __b) {
+ return __nv_fsub_rn(__a, __b);
+}
+__DEVICE__ float __fsub_ru(float __a, float __b) {
+ return __nv_fsub_ru(__a, __b);
+}
+__DEVICE__ float __fsub_rz(float __a, float __b) {
+ return __nv_fsub_rz(__a, __b);
+}
+__DEVICE__ int __hadd(int __a, int __b) { return __nv_hadd(__a, __b); }
+__DEVICE__ double __hiloint2double(int __a, int __b) {
+ return __nv_hiloint2double(__a, __b);
+}
+__DEVICE__ int __iAtomicAdd(int *__p, int __v) {
+ return __nvvm_atom_add_gen_i(__p, __v);
+}
+__DEVICE__ int __iAtomicAdd_block(int *__p, int __v) {
+ __nvvm_atom_cta_add_gen_i(__p, __v);
+}
+__DEVICE__ int __iAtomicAdd_system(int *__p, int __v) {
+ __nvvm_atom_sys_add_gen_i(__p, __v);
+}
+__DEVICE__ int __iAtomicAnd(int *__p, int __v) {
+ return __nvvm_atom_and_gen_i(__p, __v);
+}
+__DEVICE__ int __iAtomicAnd_block(int *__p, int __v) {
+ return __nvvm_atom_cta_and_gen_i(__p, __v);
+}
+__DEVICE__ int __iAtomicAnd_system(int *__p, int __v) {
+ return __nvvm_atom_sys_and_gen_i(__p, __v);
+}
+__DEVICE__ int __iAtomicCAS(int *__p, int __cmp, int __v) {
+ return __nvvm_atom_cas_gen_i(__p, __cmp, __v);
+}
+__DEVICE__ int __iAtomicCAS_block(int *__p, int __cmp, int __v) {
+ return __nvvm_atom_cta_cas_gen_i(__p, __cmp, __v);
+}
+__DEVICE__ int __iAtomicCAS_system(int *__p, int __cmp, int __v) {
+ return __nvvm_atom_sys_cas_gen_i(__p, __cmp, __v);
+}
+__DEVICE__ int __iAtomicExch(int *__p, int __v) {
+ return __nvvm_atom_xchg_gen_i(__p, __v);
+}
+__DEVICE__ int __iAtomicExch_block(int *__p, int __v) {
+ return __nvvm_atom_cta_xchg_gen_i(__p, __v);
+}
+__DEVICE__ int __iAtomicExch_system(int *__p, int __v) {
+ return __nvvm_atom_sys_xchg_gen_i(__p, __v);
+}
+__DEVICE__ int __iAtomicMax(int *__p, int __v) {
+ return __nvvm_atom_max_gen_i(__p, __v);
+}
+__DEVICE__ int __iAtomicMax_block(int *__p, int __v) {
+ return __nvvm_atom_cta_max_gen_i(__p, __v);
+}
+__DEVICE__ int __iAtomicMax_system(int *__p, int __v) {
+ return __nvvm_atom_sys_max_gen_i(__p, __v);
+}
+__DEVICE__ int __iAtomicMin(int *__p, int __v) {
+ return __nvvm_atom_min_gen_i(__p, __v);
+}
+__DEVICE__ int __iAtomicMin_block(int *__p, int __v) {
+ return __nvvm_atom_cta_min_gen_i(__p, __v);
+}
+__DEVICE__ int __iAtomicMin_system(int *__p, int __v) {
+ return __nvvm_atom_sys_min_gen_i(__p, __v);
+}
+__DEVICE__ int __iAtomicOr(int *__p, int __v) {
+ return __nvvm_atom_or_gen_i(__p, __v);
+}
+__DEVICE__ int __iAtomicOr_block(int *__p, int __v) {
+ return __nvvm_atom_cta_or_gen_i(__p, __v);
+}
+__DEVICE__ int __iAtomicOr_system(int *__p, int __v) {
+ return __nvvm_atom_sys_or_gen_i(__p, __v);
+}
+__DEVICE__ int __iAtomicXor(int *__p, int __v) {
+ return __nvvm_atom_xor_gen_i(__p, __v);
+}
+__DEVICE__ int __iAtomicXor_block(int *__p, int __v) {
+ return __nvvm_atom_cta_xor_gen_i(__p, __v);
+}
+__DEVICE__ int __iAtomicXor_system(int *__p, int __v) {
+ return __nvvm_atom_sys_xor_gen_i(__p, __v);
+}
+__DEVICE__ long long __illAtomicMax(long long *__p, long long __v) {
+ return __nvvm_atom_max_gen_ll(__p, __v);
+}
+__DEVICE__ long long __illAtomicMax_block(long long *__p, long long __v) {
+ return __nvvm_atom_cta_max_gen_ll(__p, __v);
+}
+__DEVICE__ long long __illAtomicMax_system(long long *__p, long long __v) {
+ return __nvvm_atom_sys_max_gen_ll(__p, __v);
+}
+__DEVICE__ long long __illAtomicMin(long long *__p, long long __v) {
+ return __nvvm_atom_min_gen_ll(__p, __v);
+}
+__DEVICE__ long long __illAtomicMin_block(long long *__p, long long __v) {
+ return __nvvm_atom_cta_min_gen_ll(__p, __v);
+}
+__DEVICE__ long long __illAtomicMin_system(long long *__p, long long __v) {
+ return __nvvm_atom_sys_min_gen_ll(__p, __v);
+}
+__DEVICE__ double __int2double_rn(int __a) { return __nv_int2double_rn(__a); }
+__DEVICE__ float __int2float_rd(int __a) { return __nv_int2float_rd(__a); }
+__DEVICE__ float __int2float_rn(int __a) { return __nv_int2float_rn(__a); }
+__DEVICE__ float __int2float_ru(int __a) { return __nv_int2float_ru(__a); }
+__DEVICE__ float __int2float_rz(int __a) { return __nv_int2float_rz(__a); }
+__DEVICE__ float __int_as_float(int __a) { return __nv_int_as_float(__a); }
+__DEVICE__ int __isfinited(double __a) { return __nv_isfinited(__a); }
+__DEVICE__ int __isinf(double __a) { return __nv_isinfd(__a); }
+__DEVICE__ int __isinff(float __a) { return __nv_isinff(__a); }
+__DEVICE__ int __isnan(double __a) { return __nv_isnand(__a); }
+__DEVICE__ int __isnanf(float __a) { return __nv_isnanf(__a); }
+__DEVICE__ double __ll2double_rd(long long __a) {
+ return __nv_ll2double_rd(__a);
+}
+__DEVICE__ double __ll2double_rn(long long __a) {
+ return __nv_ll2double_rn(__a);
+}
+__DEVICE__ double __ll2double_ru(long long __a) {
+ return __nv_ll2double_ru(__a);
+}
+__DEVICE__ double __ll2double_rz(long long __a) {
+ return __nv_ll2double_rz(__a);
+}
+__DEVICE__ float __ll2float_rd(long long __a) { return __nv_ll2float_rd(__a); }
+__DEVICE__ float __ll2float_rn(long long __a) { return __nv_ll2float_rn(__a); }
+__DEVICE__ float __ll2float_ru(long long __a) { return __nv_ll2float_ru(__a); }
+__DEVICE__ float __ll2float_rz(long long __a) { return __nv_ll2float_rz(__a); }
+__DEVICE__ long long __llAtomicAnd(long long *__p, long long __v) {
+ return __nvvm_atom_and_gen_ll(__p, __v);
+}
+__DEVICE__ long long __llAtomicAnd_block(long long *__p, long long __v) {
+ return __nvvm_atom_cta_and_gen_ll(__p, __v);
+}
+__DEVICE__ long long __llAtomicAnd_system(long long *__p, long long __v) {
+ return __nvvm_atom_sys_and_gen_ll(__p, __v);
+}
+__DEVICE__ long long __llAtomicOr(long long *__p, long long __v) {
+ return __nvvm_atom_or_gen_ll(__p, __v);
+}
+__DEVICE__ long long __llAtomicOr_block(long long *__p, long long __v) {
+ return __nvvm_atom_cta_or_gen_ll(__p, __v);
+}
+__DEVICE__ long long __llAtomicOr_system(long long *__p, long long __v) {
+ return __nvvm_atom_sys_or_gen_ll(__p, __v);
+}
+__DEVICE__ long long __llAtomicXor(long long *__p, long long __v) {
+ return __nvvm_atom_xor_gen_ll(__p, __v);
+}
+__DEVICE__ long long __llAtomicXor_block(long long *__p, long long __v) {
+ return __nvvm_atom_cta_xor_gen_ll(__p, __v);
+}
+__DEVICE__ long long __llAtomicXor_system(long long *__p, long long __v) {
+ return __nvvm_atom_sys_xor_gen_ll(__p, __v);
+}
+__DEVICE__ float __log10f(float __a) { return __nv_fast_log10f(__a); }
+__DEVICE__ float __log2f(float __a) { return __nv_fast_log2f(__a); }
+__DEVICE__ float __logf(float __a) { return __nv_fast_logf(__a); }
+__DEVICE__ double __longlong_as_double(long long __a) {
+ return __nv_longlong_as_double(__a);
+}
+__DEVICE__ int __mul24(int __a, int __b) { return __nv_mul24(__a, __b); }
+__DEVICE__ long long __mul64hi(long long __a, long long __b) {
+ return __nv_mul64hi(__a, __b);
+}
+__DEVICE__ int __mulhi(int __a, int __b) { return __nv_mulhi(__a, __b); }
+__DEVICE__ unsigned int __pm0(void) { return __nvvm_read_ptx_sreg_pm0(); }
+__DEVICE__ unsigned int __pm1(void) { return __nvvm_read_ptx_sreg_pm1(); }
+__DEVICE__ unsigned int __pm2(void) { return __nvvm_read_ptx_sreg_pm2(); }
+__DEVICE__ unsigned int __pm3(void) { return __nvvm_read_ptx_sreg_pm3(); }
+__DEVICE__ int __popc(int __a) { return __nv_popc(__a); }
+__DEVICE__ int __popcll(long long __a) { return __nv_popcll(__a); }
+__DEVICE__ float __powf(float __a, float __b) {
+ return __nv_fast_powf(__a, __b);
+}
+
+// Parameter must have a known integer value.
+#define __prof_trigger(__a) asm __volatile__("pmevent \t%0;" ::"i"(__a))
+__DEVICE__ int __rhadd(int __a, int __b) { return __nv_rhadd(__a, __b); }
+__DEVICE__ unsigned int __sad(int __a, int __b, unsigned int __c) {
+ return __nv_sad(__a, __b, __c);
+}
+__DEVICE__ float __saturatef(float __a) { return __nv_saturatef(__a); }
+__DEVICE__ int __signbitd(double __a) { return __nv_signbitd(__a); }
+__DEVICE__ int __signbitf(float __a) { return __nv_signbitf(__a); }
+__DEVICE__ void __sincosf(float __a, float *__sptr, float *__cptr) {
+ return __nv_fast_sincosf(__a, __sptr, __cptr);
+}
+__DEVICE__ float __sinf(float __a) { return __nv_fast_sinf(__a); }
+__DEVICE__ int __syncthreads_and(int __a) { return __nvvm_bar0_and(__a); }
+__DEVICE__ int __syncthreads_count(int __a) { return __nvvm_bar0_popc(__a); }
+__DEVICE__ int __syncthreads_or(int __a) { return __nvvm_bar0_or(__a); }
+__DEVICE__ float __tanf(float __a) { return __nv_fast_tanf(__a); }
+__DEVICE__ void __threadfence(void) { __nvvm_membar_gl(); }
+__DEVICE__ void __threadfence_block(void) { __nvvm_membar_cta(); };
+__DEVICE__ void __threadfence_system(void) { __nvvm_membar_sys(); };
+__DEVICE__ void __trap(void) { asm volatile("trap;"); }
+__DEVICE__ unsigned int __uAtomicAdd(unsigned int *__p, unsigned int __v) {
+ return __nvvm_atom_add_gen_i((int *)__p, __v);
+}
+__DEVICE__ unsigned int __uAtomicAdd_block(unsigned int *__p,
+ unsigned int __v) {
+ return __nvvm_atom_cta_add_gen_i((int *)__p, __v);
+}
+__DEVICE__ unsigned int __uAtomicAdd_system(unsigned int *__p,
+ unsigned int __v) {
+ return __nvvm_atom_sys_add_gen_i((int *)__p, __v);
+}
+__DEVICE__ unsigned int __uAtomicAnd(unsigned int *__p, unsigned int __v) {
+ return __nvvm_atom_and_gen_i((int *)__p, __v);
+}
+__DEVICE__ unsigned int __uAtomicAnd_block(unsigned int *__p,
+ unsigned int __v) {
+ return __nvvm_atom_cta_and_gen_i((int *)__p, __v);
+}
+__DEVICE__ unsigned int __uAtomicAnd_system(unsigned int *__p,
+ unsigned int __v) {
+ return __nvvm_atom_sys_and_gen_i((int *)__p, __v);
+}
+__DEVICE__ unsigned int __uAtomicCAS(unsigned int *__p, unsigned int __cmp,
+ unsigned int __v) {
+ return __nvvm_atom_cas_gen_i((int *)__p, __cmp, __v);
+}
+__DEVICE__ unsigned int
+__uAtomicCAS_block(unsigned int *__p, unsigned int __cmp, unsigned int __v) {
+ return __nvvm_atom_cta_cas_gen_i((int *)__p, __cmp, __v);
+}
+__DEVICE__ unsigned int
+__uAtomicCAS_system(unsigned int *__p, unsigned int __cmp, unsigned int __v) {
+ return __nvvm_atom_sys_cas_gen_i((int *)__p, __cmp, __v);
+}
+__DEVICE__ unsigned int __uAtomicDec(unsigned int *__p, unsigned int __v) {
+ return __nvvm_atom_dec_gen_ui(__p, __v);
+}
+__DEVICE__ unsigned int __uAtomicDec_block(unsigned int *__p,
+ unsigned int __v) {
+ return __nvvm_atom_cta_dec_gen_ui(__p, __v);
+}
+__DEVICE__ unsigned int __uAtomicDec_system(unsigned int *__p,
+ unsigned int __v) {
+ return __nvvm_atom_sys_dec_gen_ui(__p, __v);
+}
+__DEVICE__ unsigned int __uAtomicExch(unsigned int *__p, unsigned int __v) {
+ return __nvvm_atom_xchg_gen_i((int *)__p, __v);
+}
+__DEVICE__ unsigned int __uAtomicExch_block(unsigned int *__p,
+ unsigned int __v) {
+ return __nvvm_atom_cta_xchg_gen_i((int *)__p, __v);
+}
+__DEVICE__ unsigned int __uAtomicExch_system(unsigned int *__p,
+ unsigned int __v) {
+ return __nvvm_atom_sys_xchg_gen_i((int *)__p, __v);
+}
+__DEVICE__ unsigned int __uAtomicInc(unsigned int *__p, unsigned int __v) {
+ return __nvvm_atom_inc_gen_ui(__p, __v);
+}
+__DEVICE__ unsigned int __uAtomicInc_block(unsigned int *__p,
+ unsigned int __v) {
+ return __nvvm_atom_cta_inc_gen_ui(__p, __v);
+}
+__DEVICE__ unsigned int __uAtomicInc_system(unsigned int *__p,
+ unsigned int __v) {
+ return __nvvm_atom_sys_inc_gen_ui(__p, __v);
+}
+__DEVICE__ unsigned int __uAtomicMax(unsigned int *__p, unsigned int __v) {
+ return __nvvm_atom_max_gen_ui(__p, __v);
+}
+__DEVICE__ unsigned int __uAtomicMax_block(unsigned int *__p,
+ unsigned int __v) {
+ return __nvvm_atom_cta_max_gen_ui(__p, __v);
+}
+__DEVICE__ unsigned int __uAtomicMax_system(unsigned int *__p,
+ unsigned int __v) {
+ return __nvvm_atom_sys_max_gen_ui(__p, __v);
+}
+__DEVICE__ unsigned int __uAtomicMin(unsigned int *__p, unsigned int __v) {
+ return __nvvm_atom_min_gen_ui(__p, __v);
+}
+__DEVICE__ unsigned int __uAtomicMin_block(unsigned int *__p,
+ unsigned int __v) {
+ return __nvvm_atom_cta_min_gen_ui(__p, __v);
+}
+__DEVICE__ unsigned int __uAtomicMin_system(unsigned int *__p,
+ unsigned int __v) {
+ return __nvvm_atom_sys_min_gen_ui(__p, __v);
+}
+__DEVICE__ unsigned int __uAtomicOr(unsigned int *__p, unsigned int __v) {
+ return __nvvm_atom_or_gen_i((int *)__p, __v);
+}
+__DEVICE__ unsigned int __uAtomicOr_block(unsigned int *__p, unsigned int __v) {
+ return __nvvm_atom_cta_or_gen_i((int *)__p, __v);
+}
+__DEVICE__ unsigned int __uAtomicOr_system(unsigned int *__p,
+ unsigned int __v) {
+ return __nvvm_atom_sys_or_gen_i((int *)__p, __v);
+}
+__DEVICE__ unsigned int __uAtomicXor(unsigned int *__p, unsigned int __v) {
+ return __nvvm_atom_xor_gen_i((int *)__p, __v);
+}
+__DEVICE__ unsigned int __uAtomicXor_block(unsigned int *__p,
+ unsigned int __v) {
+ return __nvvm_atom_cta_xor_gen_i((int *)__p, __v);
+}
+__DEVICE__ unsigned int __uAtomicXor_system(unsigned int *__p,
+ unsigned int __v) {
+ return __nvvm_atom_sys_xor_gen_i((int *)__p, __v);
+}
+__DEVICE__ unsigned int __uhadd(unsigned int __a, unsigned int __b) {
+ return __nv_uhadd(__a, __b);
+}
+__DEVICE__ double __uint2double_rn(unsigned int __a) {
+ return __nv_uint2double_rn(__a);
+}
+__DEVICE__ float __uint2float_rd(unsigned int __a) {
+ return __nv_uint2float_rd(__a);
+}
+__DEVICE__ float __uint2float_rn(unsigned int __a) {
+ return __nv_uint2float_rn(__a);
+}
+__DEVICE__ float __uint2float_ru(unsigned int __a) {
+ return __nv_uint2float_ru(__a);
+}
+__DEVICE__ float __uint2float_rz(unsigned int __a) {
+ return __nv_uint2float_rz(__a);
+}
+__DEVICE__ float __uint_as_float(unsigned int __a) {
+ return __nv_uint_as_float(__a);
+} //
+__DEVICE__ double __ull2double_rd(unsigned long long __a) {
+ return __nv_ull2double_rd(__a);
+}
+__DEVICE__ double __ull2double_rn(unsigned long long __a) {
+ return __nv_ull2double_rn(__a);
+}
+__DEVICE__ double __ull2double_ru(unsigned long long __a) {
+ return __nv_ull2double_ru(__a);
+}
+__DEVICE__ double __ull2double_rz(unsigned long long __a) {
+ return __nv_ull2double_rz(__a);
+}
+__DEVICE__ float __ull2float_rd(unsigned long long __a) {
+ return __nv_ull2float_rd(__a);
+}
+__DEVICE__ float __ull2float_rn(unsigned long long __a) {
+ return __nv_ull2float_rn(__a);
+}
+__DEVICE__ float __ull2float_ru(unsigned long long __a) {
+ return __nv_ull2float_ru(__a);
+}
+__DEVICE__ float __ull2float_rz(unsigned long long __a) {
+ return __nv_ull2float_rz(__a);
+}
+__DEVICE__ unsigned long long __ullAtomicAdd(unsigned long long *__p,
+ unsigned long long __v) {
+ return __nvvm_atom_add_gen_ll((long long *)__p, __v);
+}
+__DEVICE__ unsigned long long __ullAtomicAdd_block(unsigned long long *__p,
+ unsigned long long __v) {
+ return __nvvm_atom_cta_add_gen_ll((long long *)__p, __v);
+}
+__DEVICE__ unsigned long long __ullAtomicAdd_system(unsigned long long *__p,
+ unsigned long long __v) {
+ return __nvvm_atom_sys_add_gen_ll((long long *)__p, __v);
+}
+__DEVICE__ unsigned long long __ullAtomicAnd(unsigned long long *__p,
+ unsigned long long __v) {
+ return __nvvm_atom_and_gen_ll((long long *)__p, __v);
+}
+__DEVICE__ unsigned long long __ullAtomicAnd_block(unsigned long long *__p,
+ unsigned long long __v) {
+ return __nvvm_atom_cta_and_gen_ll((long long *)__p, __v);
+}
+__DEVICE__ unsigned long long __ullAtomicAnd_system(unsigned long long *__p,
+ unsigned long long __v) {
+ return __nvvm_atom_sys_and_gen_ll((long long *)__p, __v);
+}
+__DEVICE__ unsigned long long __ullAtomicCAS(unsigned long long *__p,
+ unsigned long long __cmp,
+ unsigned long long __v) {
+ return __nvvm_atom_cas_gen_ll((long long *)__p, __cmp, __v);
+}
+__DEVICE__ unsigned long long __ullAtomicCAS_block(unsigned long long *__p,
+ unsigned long long __cmp,
+ unsigned long long __v) {
+ return __nvvm_atom_cta_cas_gen_ll((long long *)__p, __cmp, __v);
+}
+__DEVICE__ unsigned long long __ullAtomicCAS_system(unsigned long long *__p,
+ unsigned long long __cmp,
+ unsigned long long __v) {
+ return __nvvm_atom_sys_cas_gen_ll((long long *)__p, __cmp, __v);
+}
+__DEVICE__ unsigned long long __ullAtomicExch(unsigned long long *__p,
+ unsigned long long __v) {
+ return __nvvm_atom_xchg_gen_ll((long long *)__p, __v);
+}
+__DEVICE__ unsigned long long __ullAtomicExch_block(unsigned long long *__p,
+ unsigned long long __v) {
+ return __nvvm_atom_cta_xchg_gen_ll((long long *)__p, __v);
+}
+__DEVICE__ unsigned long long __ullAtomicExch_system(unsigned long long *__p,
+ unsigned long long __v) {
+ return __nvvm_atom_sys_xchg_gen_ll((long long *)__p, __v);
+}
+__DEVICE__ unsigned long long __ullAtomicMax(unsigned long long *__p,
+ unsigned long long __v) {
+ return __nvvm_atom_max_gen_ull(__p, __v);
+}
+__DEVICE__ unsigned long long __ullAtomicMax_block(unsigned long long *__p,
+ unsigned long long __v) {
+ return __nvvm_atom_cta_max_gen_ull(__p, __v);
+}
+__DEVICE__ unsigned long long __ullAtomicMax_system(unsigned long long *__p,
+ unsigned long long __v) {
+ return __nvvm_atom_sys_max_gen_ull(__p, __v);
+}
+__DEVICE__ unsigned long long __ullAtomicMin(unsigned long long *__p,
+ unsigned long long __v) {
+ return __nvvm_atom_min_gen_ull(__p, __v);
+}
+__DEVICE__ unsigned long long __ullAtomicMin_block(unsigned long long *__p,
+ unsigned long long __v) {
+ return __nvvm_atom_cta_min_gen_ull(__p, __v);
+}
+__DEVICE__ unsigned long long __ullAtomicMin_system(unsigned long long *__p,
+ unsigned long long __v) {
+ return __nvvm_atom_sys_min_gen_ull(__p, __v);
+}
+__DEVICE__ unsigned long long __ullAtomicOr(unsigned long long *__p,
+ unsigned long long __v) {
+ return __nvvm_atom_or_gen_ll((long long *)__p, __v);
+}
+__DEVICE__ unsigned long long __ullAtomicOr_block(unsigned long long *__p,
+ unsigned long long __v) {
+ return __nvvm_atom_cta_or_gen_ll((long long *)__p, __v);
+}
+__DEVICE__ unsigned long long __ullAtomicOr_system(unsigned long long *__p,
+ unsigned long long __v) {
+ return __nvvm_atom_sys_or_gen_ll((long long *)__p, __v);
+}
+__DEVICE__ unsigned long long __ullAtomicXor(unsigned long long *__p,
+ unsigned long long __v) {
+ return __nvvm_atom_xor_gen_ll((long long *)__p, __v);
+}
+__DEVICE__ unsigned long long __ullAtomicXor_block(unsigned long long *__p,
+ unsigned long long __v) {
+ return __nvvm_atom_cta_xor_gen_ll((long long *)__p, __v);
+}
+__DEVICE__ unsigned long long __ullAtomicXor_system(unsigned long long *__p,
+ unsigned long long __v) {
+ return __nvvm_atom_sys_xor_gen_ll((long long *)__p, __v);
+}
+__DEVICE__ unsigned int __umul24(unsigned int __a, unsigned int __b) {
+ return __nv_umul24(__a, __b);
+}
+__DEVICE__ unsigned long long __umul64hi(unsigned long long __a,
+ unsigned long long __b) {
+ return __nv_umul64hi(__a, __b);
+}
+__DEVICE__ unsigned int __umulhi(unsigned int __a, unsigned int __b) {
+ return __nv_umulhi(__a, __b);
+}
+__DEVICE__ unsigned int __urhadd(unsigned int __a, unsigned int __b) {
+ return __nv_urhadd(__a, __b);
+}
+__DEVICE__ unsigned int __usad(unsigned int __a, unsigned int __b,
+ unsigned int __c) {
+ return __nv_usad(__a, __b, __c);
+}
+
+#if CUDA_VERSION >= 9000 && CUDA_VERSION < 9020
+__DEVICE__ unsigned int __vabs2(unsigned int __a) { return __nv_vabs2(__a); }
+__DEVICE__ unsigned int __vabs4(unsigned int __a) { return __nv_vabs4(__a); }
+__DEVICE__ unsigned int __vabsdiffs2(unsigned int __a, unsigned int __b) {
+ return __nv_vabsdiffs2(__a, __b);
+}
+__DEVICE__ unsigned int __vabsdiffs4(unsigned int __a, unsigned int __b) {
+ return __nv_vabsdiffs4(__a, __b);
+}
+__DEVICE__ unsigned int __vabsdiffu2(unsigned int __a, unsigned int __b) {
+ return __nv_vabsdiffu2(__a, __b);
+}
+__DEVICE__ unsigned int __vabsdiffu4(unsigned int __a, unsigned int __b) {
+ return __nv_vabsdiffu4(__a, __b);
+}
+__DEVICE__ unsigned int __vabsss2(unsigned int __a) {
+ return __nv_vabsss2(__a);
+}
+__DEVICE__ unsigned int __vabsss4(unsigned int __a) {
+ return __nv_vabsss4(__a);
+}
+__DEVICE__ unsigned int __vadd2(unsigned int __a, unsigned int __b) {
+ return __nv_vadd2(__a, __b);
+}
+__DEVICE__ unsigned int __vadd4(unsigned int __a, unsigned int __b) {
+ return __nv_vadd4(__a, __b);
+}
+__DEVICE__ unsigned int __vaddss2(unsigned int __a, unsigned int __b) {
+ return __nv_vaddss2(__a, __b);
+}
+__DEVICE__ unsigned int __vaddss4(unsigned int __a, unsigned int __b) {
+ return __nv_vaddss4(__a, __b);
+}
+__DEVICE__ unsigned int __vaddus2(unsigned int __a, unsigned int __b) {
+ return __nv_vaddus2(__a, __b);
+}
+__DEVICE__ unsigned int __vaddus4(unsigned int __a, unsigned int __b) {
+ return __nv_vaddus4(__a, __b);
+}
+__DEVICE__ unsigned int __vavgs2(unsigned int __a, unsigned int __b) {
+ return __nv_vavgs2(__a, __b);
+}
+__DEVICE__ unsigned int __vavgs4(unsigned int __a, unsigned int __b) {
+ return __nv_vavgs4(__a, __b);
+}
+__DEVICE__ unsigned int __vavgu2(unsigned int __a, unsigned int __b) {
+ return __nv_vavgu2(__a, __b);
+}
+__DEVICE__ unsigned int __vavgu4(unsigned int __a, unsigned int __b) {
+ return __nv_vavgu4(__a, __b);
+}
+__DEVICE__ unsigned int __vcmpeq2(unsigned int __a, unsigned int __b) {
+ return __nv_vcmpeq2(__a, __b);
+}
+__DEVICE__ unsigned int __vcmpeq4(unsigned int __a, unsigned int __b) {
+ return __nv_vcmpeq4(__a, __b);
+}
+__DEVICE__ unsigned int __vcmpges2(unsigned int __a, unsigned int __b) {
+ return __nv_vcmpges2(__a, __b);
+}
+__DEVICE__ unsigned int __vcmpges4(unsigned int __a, unsigned int __b) {
+ return __nv_vcmpges4(__a, __b);
+}
+__DEVICE__ unsigned int __vcmpgeu2(unsigned int __a, unsigned int __b) {
+ return __nv_vcmpgeu2(__a, __b);
+}
+__DEVICE__ unsigned int __vcmpgeu4(unsigned int __a, unsigned int __b) {
+ return __nv_vcmpgeu4(__a, __b);
+}
+__DEVICE__ unsigned int __vcmpgts2(unsigned int __a, unsigned int __b) {
+ return __nv_vcmpgts2(__a, __b);
+}
+__DEVICE__ unsigned int __vcmpgts4(unsigned int __a, unsigned int __b) {
+ return __nv_vcmpgts4(__a, __b);
+}
+__DEVICE__ unsigned int __vcmpgtu2(unsigned int __a, unsigned int __b) {
+ return __nv_vcmpgtu2(__a, __b);
+}
+__DEVICE__ unsigned int __vcmpgtu4(unsigned int __a, unsigned int __b) {
+ return __nv_vcmpgtu4(__a, __b);
+}
+__DEVICE__ unsigned int __vcmples2(unsigned int __a, unsigned int __b) {
+ return __nv_vcmples2(__a, __b);
+}
+__DEVICE__ unsigned int __vcmples4(unsigned int __a, unsigned int __b) {
+ return __nv_vcmples4(__a, __b);
+}
+__DEVICE__ unsigned int __vcmpleu2(unsigned int __a, unsigned int __b) {
+ return __nv_vcmpleu2(__a, __b);
+}
+__DEVICE__ unsigned int __vcmpleu4(unsigned int __a, unsigned int __b) {
+ return __nv_vcmpleu4(__a, __b);
+}
+__DEVICE__ unsigned int __vcmplts2(unsigned int __a, unsigned int __b) {
+ return __nv_vcmplts2(__a, __b);
+}
+__DEVICE__ unsigned int __vcmplts4(unsigned int __a, unsigned int __b) {
+ return __nv_vcmplts4(__a, __b);
+}
+__DEVICE__ unsigned int __vcmpltu2(unsigned int __a, unsigned int __b) {
+ return __nv_vcmpltu2(__a, __b);
+}
+__DEVICE__ unsigned int __vcmpltu4(unsigned int __a, unsigned int __b) {
+ return __nv_vcmpltu4(__a, __b);
+}
+__DEVICE__ unsigned int __vcmpne2(unsigned int __a, unsigned int __b) {
+ return __nv_vcmpne2(__a, __b);
+}
+__DEVICE__ unsigned int __vcmpne4(unsigned int __a, unsigned int __b) {
+ return __nv_vcmpne4(__a, __b);
+}
+__DEVICE__ unsigned int __vhaddu2(unsigned int __a, unsigned int __b) {
+ return __nv_vhaddu2(__a, __b);
+}
+__DEVICE__ unsigned int __vhaddu4(unsigned int __a, unsigned int __b) {
+ return __nv_vhaddu4(__a, __b);
+}
+__DEVICE__ unsigned int __vmaxs2(unsigned int __a, unsigned int __b) {
+ return __nv_vmaxs2(__a, __b);
+}
+__DEVICE__ unsigned int __vmaxs4(unsigned int __a, unsigned int __b) {
+ return __nv_vmaxs4(__a, __b);
+}
+__DEVICE__ unsigned int __vmaxu2(unsigned int __a, unsigned int __b) {
+ return __nv_vmaxu2(__a, __b);
+}
+__DEVICE__ unsigned int __vmaxu4(unsigned int __a, unsigned int __b) {
+ return __nv_vmaxu4(__a, __b);
+}
+__DEVICE__ unsigned int __vmins2(unsigned int __a, unsigned int __b) {
+ return __nv_vmins2(__a, __b);
+}
+__DEVICE__ unsigned int __vmins4(unsigned int __a, unsigned int __b) {
+ return __nv_vmins4(__a, __b);
+}
+__DEVICE__ unsigned int __vminu2(unsigned int __a, unsigned int __b) {
+ return __nv_vminu2(__a, __b);
+}
+__DEVICE__ unsigned int __vminu4(unsigned int __a, unsigned int __b) {
+ return __nv_vminu4(__a, __b);
+}
+__DEVICE__ unsigned int __vneg2(unsigned int __a) { return __nv_vneg2(__a); }
+__DEVICE__ unsigned int __vneg4(unsigned int __a) { return __nv_vneg4(__a); }
+__DEVICE__ unsigned int __vnegss2(unsigned int __a) {
+ return __nv_vnegss2(__a);
+}
+__DEVICE__ unsigned int __vnegss4(unsigned int __a) {
+ return __nv_vnegss4(__a);
+}
+__DEVICE__ unsigned int __vsads2(unsigned int __a, unsigned int __b) {
+ return __nv_vsads2(__a, __b);
+}
+__DEVICE__ unsigned int __vsads4(unsigned int __a, unsigned int __b) {
+ return __nv_vsads4(__a, __b);
+}
+__DEVICE__ unsigned int __vsadu2(unsigned int __a, unsigned int __b) {
+ return __nv_vsadu2(__a, __b);
+}
+__DEVICE__ unsigned int __vsadu4(unsigned int __a, unsigned int __b) {
+ return __nv_vsadu4(__a, __b);
+}
+__DEVICE__ unsigned int __vseteq2(unsigned int __a, unsigned int __b) {
+ return __nv_vseteq2(__a, __b);
+}
+__DEVICE__ unsigned int __vseteq4(unsigned int __a, unsigned int __b) {
+ return __nv_vseteq4(__a, __b);
+}
+__DEVICE__ unsigned int __vsetges2(unsigned int __a, unsigned int __b) {
+ return __nv_vsetges2(__a, __b);
+}
+__DEVICE__ unsigned int __vsetges4(unsigned int __a, unsigned int __b) {
+ return __nv_vsetges4(__a, __b);
+}
+__DEVICE__ unsigned int __vsetgeu2(unsigned int __a, unsigned int __b) {
+ return __nv_vsetgeu2(__a, __b);
+}
+__DEVICE__ unsigned int __vsetgeu4(unsigned int __a, unsigned int __b) {
+ return __nv_vsetgeu4(__a, __b);
+}
+__DEVICE__ unsigned int __vsetgts2(unsigned int __a, unsigned int __b) {
+ return __nv_vsetgts2(__a, __b);
+}
+__DEVICE__ unsigned int __vsetgts4(unsigned int __a, unsigned int __b) {
+ return __nv_vsetgts4(__a, __b);
+}
+__DEVICE__ unsigned int __vsetgtu2(unsigned int __a, unsigned int __b) {
+ return __nv_vsetgtu2(__a, __b);
+}
+__DEVICE__ unsigned int __vsetgtu4(unsigned int __a, unsigned int __b) {
+ return __nv_vsetgtu4(__a, __b);
+}
+__DEVICE__ unsigned int __vsetles2(unsigned int __a, unsigned int __b) {
+ return __nv_vsetles2(__a, __b);
+}
+__DEVICE__ unsigned int __vsetles4(unsigned int __a, unsigned int __b) {
+ return __nv_vsetles4(__a, __b);
+}
+__DEVICE__ unsigned int __vsetleu2(unsigned int __a, unsigned int __b) {
+ return __nv_vsetleu2(__a, __b);
+}
+__DEVICE__ unsigned int __vsetleu4(unsigned int __a, unsigned int __b) {
+ return __nv_vsetleu4(__a, __b);
+}
+__DEVICE__ unsigned int __vsetlts2(unsigned int __a, unsigned int __b) {
+ return __nv_vsetlts2(__a, __b);
+}
+__DEVICE__ unsigned int __vsetlts4(unsigned int __a, unsigned int __b) {
+ return __nv_vsetlts4(__a, __b);
+}
+__DEVICE__ unsigned int __vsetltu2(unsigned int __a, unsigned int __b) {
+ return __nv_vsetltu2(__a, __b);
+}
+__DEVICE__ unsigned int __vsetltu4(unsigned int __a, unsigned int __b) {
+ return __nv_vsetltu4(__a, __b);
+}
+__DEVICE__ unsigned int __vsetne2(unsigned int __a, unsigned int __b) {
+ return __nv_vsetne2(__a, __b);
+}
+__DEVICE__ unsigned int __vsetne4(unsigned int __a, unsigned int __b) {
+ return __nv_vsetne4(__a, __b);
+}
+__DEVICE__ unsigned int __vsub2(unsigned int __a, unsigned int __b) {
+ return __nv_vsub2(__a, __b);
+}
+__DEVICE__ unsigned int __vsub4(unsigned int __a, unsigned int __b) {
+ return __nv_vsub4(__a, __b);
+}
+__DEVICE__ unsigned int __vsubss2(unsigned int __a, unsigned int __b) {
+ return __nv_vsubss2(__a, __b);
+}
+__DEVICE__ unsigned int __vsubss4(unsigned int __a, unsigned int __b) {
+ return __nv_vsubss4(__a, __b);
+}
+__DEVICE__ unsigned int __vsubus2(unsigned int __a, unsigned int __b) {
+ return __nv_vsubus2(__a, __b);
+}
+__DEVICE__ unsigned int __vsubus4(unsigned int __a, unsigned int __b) {
+ return __nv_vsubus4(__a, __b);
+}
+#else // CUDA_VERSION >= 9020
+// CUDA no longer provides inline assembly (or bitcode) implementation of these
+// functions, so we have to reimplment them. The implementation is naive and is
+// not optimized for performance.
+
+// Helper function to convert N-bit boolean subfields into all-0 or all-1.
+// E.g. __bool2mask(0x01000100,8) -> 0xff00ff00
+// __bool2mask(0x00010000,16) -> 0xffff0000
+__DEVICE__ unsigned int __bool2mask(unsigned int __a, int shift) {
+ return (__a << shift) - __a;
+}
+__DEVICE__ unsigned int __vabs2(unsigned int __a) {
+ unsigned int r;
+ asm("vabsdiff2.s32.s32.s32 %0,%1,%2,%3;"
+ : "=r"(r)
+ : "r"(__a), "r"(0), "r"(0));
+ return r;
+}
+__DEVICE__ unsigned int __vabs4(unsigned int __a) {
+ unsigned int r;
+ asm("vabsdiff4.s32.s32.s32 %0,%1,%2,%3;"
+ : "=r"(r)
+ : "r"(__a), "r"(0), "r"(0));
+ return r;
+}
+__DEVICE__ unsigned int __vabsdiffs2(unsigned int __a, unsigned int __b) {
+ unsigned int r;
+ asm("vabsdiff2.s32.s32.s32 %0,%1,%2,%3;"
+ : "=r"(r)
+ : "r"(__a), "r"(__b), "r"(0));
+ return r;
+}
+
+__DEVICE__ unsigned int __vabsdiffs4(unsigned int __a, unsigned int __b) {
+ unsigned int r;
+ asm("vabsdiff4.s32.s32.s32 %0,%1,%2,%3;"
+ : "=r"(r)
+ : "r"(__a), "r"(__b), "r"(0));
+ return r;
+}
+__DEVICE__ unsigned int __vabsdiffu2(unsigned int __a, unsigned int __b) {
+ unsigned int r;
+ asm("vabsdiff2.u32.u32.u32 %0,%1,%2,%3;"
+ : "=r"(r)
+ : "r"(__a), "r"(__b), "r"(0));
+ return r;
+}
+__DEVICE__ unsigned int __vabsdiffu4(unsigned int __a, unsigned int __b) {
+ unsigned int r;
+ asm("vabsdiff4.u32.u32.u32 %0,%1,%2,%3;"
+ : "=r"(r)
+ : "r"(__a), "r"(__b), "r"(0));
+ return r;
+}
+__DEVICE__ unsigned int __vabsss2(unsigned int __a) {
+ unsigned int r;
+ asm("vabsdiff2.s32.s32.s32.sat %0,%1,%2,%3;"
+ : "=r"(r)
+ : "r"(__a), "r"(0), "r"(0));
+ return r;
+}
+__DEVICE__ unsigned int __vabsss4(unsigned int __a) {
+ unsigned int r;
+ asm("vabsdiff4.s32.s32.s32.sat %0,%1,%2,%3;"
+ : "=r"(r)
+ : "r"(__a), "r"(0), "r"(0));
+ return r;
+}
+__DEVICE__ unsigned int __vadd2(unsigned int __a, unsigned int __b) {
+ unsigned int r;
+ asm("vadd2.u32.u32.u32 %0,%1,%2,%3;" : "=r"(r) : "r"(__a), "r"(__b), "r"(0));
+ return r;
+}
+__DEVICE__ unsigned int __vadd4(unsigned int __a, unsigned int __b) {
+ unsigned int r;
+ asm("vadd4.u32.u32.u32 %0,%1,%2,%3;" : "=r"(r) : "r"(__a), "r"(__b), "r"(0));
+ return r;
+}
+__DEVICE__ unsigned int __vaddss2(unsigned int __a, unsigned int __b) {
+ unsigned int r;
+ asm("vadd2.s32.s32.s32.sat %0,%1,%2,%3;"
+ : "=r"(r)
+ : "r"(__a), "r"(__b), "r"(0));
+ return r;
+}
+__DEVICE__ unsigned int __vaddss4(unsigned int __a, unsigned int __b) {
+ unsigned int r;
+ asm("vadd4.s32.s32.s32.sat %0,%1,%2,%3;"
+ : "=r"(r)
+ : "r"(__a), "r"(__b), "r"(0));
+ return r;
+}
+__DEVICE__ unsigned int __vaddus2(unsigned int __a, unsigned int __b) {
+ unsigned int r;
+ asm("vadd2.u32.u32.u32.sat %0,%1,%2,%3;"
+ : "=r"(r)
+ : "r"(__a), "r"(__b), "r"(0));
+ return r;
+}
+__DEVICE__ unsigned int __vaddus4(unsigned int __a, unsigned int __b) {
+ unsigned int r;
+ asm("vadd4.u32.u32.u32.sat %0,%1,%2,%3;"
+ : "=r"(r)
+ : "r"(__a), "r"(__b), "r"(0));
+ return r;
+}
+__DEVICE__ unsigned int __vavgs2(unsigned int __a, unsigned int __b) {
+ unsigned int r;
+ asm("vavrg2.s32.s32.s32 %0,%1,%2,%3;" : "=r"(r) : "r"(__a), "r"(__b), "r"(0));
+ return r;
+}
+__DEVICE__ unsigned int __vavgs4(unsigned int __a, unsigned int __b) {
+ unsigned int r;
+ asm("vavrg4.s32.s32.s32 %0,%1,%2,%3;" : "=r"(r) : "r"(__a), "r"(__b), "r"(0));
+ return r;
+}
+__DEVICE__ unsigned int __vavgu2(unsigned int __a, unsigned int __b) {
+ unsigned int r;
+ asm("vavrg2.u32.u32.u32 %0,%1,%2,%3;" : "=r"(r) : "r"(__a), "r"(__b), "r"(0));
+ return r;
+}
+__DEVICE__ unsigned int __vavgu4(unsigned int __a, unsigned int __b) {
+ unsigned int r;
+ asm("vavrg4.u32.u32.u32 %0,%1,%2,%3;" : "=r"(r) : "r"(__a), "r"(__b), "r"(0));
+ return r;
+}
+__DEVICE__ unsigned int __vseteq2(unsigned int __a, unsigned int __b) {
+ unsigned int r;
+ asm("vset2.u32.u32.eq %0,%1,%2,%3;" : "=r"(r) : "r"(__a), "r"(__b), "r"(0));
+ return r;
+}
+__DEVICE__ unsigned int __vcmpeq2(unsigned int __a, unsigned int __b) {
+ return __bool2mask(__vseteq2(__a, __b), 16);
+}
+__DEVICE__ unsigned int __vseteq4(unsigned int __a, unsigned int __b) {
+ unsigned int r;
+ asm("vset4.u32.u32.eq %0,%1,%2,%3;" : "=r"(r) : "r"(__a), "r"(__b), "r"(0));
+ return r;
+}
+__DEVICE__ unsigned int __vcmpeq4(unsigned int __a, unsigned int __b) {
+ return __bool2mask(__vseteq4(__a, __b), 8);
+}
+__DEVICE__ unsigned int __vsetges2(unsigned int __a, unsigned int __b) {
+ unsigned int r;
+ asm("vset2.s32.s32.ge %0,%1,%2,%3;" : "=r"(r) : "r"(__a), "r"(__b), "r"(0));
+ return r;
+}
+__DEVICE__ unsigned int __vcmpges2(unsigned int __a, unsigned int __b) {
+ return __bool2mask(__vsetges2(__a, __b), 16);
+}
+__DEVICE__ unsigned int __vsetges4(unsigned int __a, unsigned int __b) {
+ unsigned int r;
+ asm("vset4.s32.s32.ge %0,%1,%2,%3;" : "=r"(r) : "r"(__a), "r"(__b), "r"(0));
+ return r;
+}
+__DEVICE__ unsigned int __vcmpges4(unsigned int __a, unsigned int __b) {
+ return __bool2mask(__vsetges4(__a, __b), 8);
+}
+__DEVICE__ unsigned int __vsetgeu2(unsigned int __a, unsigned int __b) {
+ unsigned int r;
+ asm("vset2.u32.u32.ge %0,%1,%2,%3;" : "=r"(r) : "r"(__a), "r"(__b), "r"(0));
+ return r;
+}
+__DEVICE__ unsigned int __vcmpgeu2(unsigned int __a, unsigned int __b) {
+ return __bool2mask(__vsetgeu2(__a, __b), 16);
+}
+__DEVICE__ unsigned int __vsetgeu4(unsigned int __a, unsigned int __b) {
+ unsigned int r;
+ asm("vset4.u32.u32.ge %0,%1,%2,%3;" : "=r"(r) : "r"(__a), "r"(__b), "r"(0));
+ return r;
+}
+__DEVICE__ unsigned int __vcmpgeu4(unsigned int __a, unsigned int __b) {
+ return __bool2mask(__vsetgeu4(__a, __b), 8);
+}
+__DEVICE__ unsigned int __vsetgts2(unsigned int __a, unsigned int __b) {
+ unsigned int r;
+ asm("vset2.s32.s32.gt %0,%1,%2,%3;" : "=r"(r) : "r"(__a), "r"(__b), "r"(0));
+ return r;
+}
+__DEVICE__ unsigned int __vcmpgts2(unsigned int __a, unsigned int __b) {
+ return __bool2mask(__vsetgts2(__a, __b), 16);
+}
+__DEVICE__ unsigned int __vsetgts4(unsigned int __a, unsigned int __b) {
+ unsigned int r;
+ asm("vset4.s32.s32.gt %0,%1,%2,%3;" : "=r"(r) : "r"(__a), "r"(__b), "r"(0));
+ return r;
+}
+__DEVICE__ unsigned int __vcmpgts4(unsigned int __a, unsigned int __b) {
+ return __bool2mask(__vsetgts4(__a, __b), 8);
+}
+__DEVICE__ unsigned int __vsetgtu2(unsigned int __a, unsigned int __b) {
+ unsigned int r;
+ asm("vset2.u32.u32.gt %0,%1,%2,%3;" : "=r"(r) : "r"(__a), "r"(__b), "r"(0));
+ return r;
+}
+__DEVICE__ unsigned int __vcmpgtu2(unsigned int __a, unsigned int __b) {
+ return __bool2mask(__vsetgtu2(__a, __b), 16);
+}
+__DEVICE__ unsigned int __vsetgtu4(unsigned int __a, unsigned int __b) {
+ unsigned int r;
+ asm("vset4.u32.u32.gt %0,%1,%2,%3;" : "=r"(r) : "r"(__a), "r"(__b), "r"(0));
+ return r;
+}
+__DEVICE__ unsigned int __vcmpgtu4(unsigned int __a, unsigned int __b) {
+ return __bool2mask(__vsetgtu4(__a, __b), 8);
+}
+__DEVICE__ unsigned int __vsetles2(unsigned int __a, unsigned int __b) {
+ unsigned int r;
+ asm("vset2.s32.s32.le %0,%1,%2,%3;" : "=r"(r) : "r"(__a), "r"(__b), "r"(0));
+ return r;
+}
+__DEVICE__ unsigned int __vcmples2(unsigned int __a, unsigned int __b) {
+ return __bool2mask(__vsetles2(__a, __b), 16);
+}
+__DEVICE__ unsigned int __vsetles4(unsigned int __a, unsigned int __b) {
+ unsigned int r;
+ asm("vset4.s32.s32.le %0,%1,%2,%3;" : "=r"(r) : "r"(__a), "r"(__b), "r"(0));
+ return r;
+}
+__DEVICE__ unsigned int __vcmples4(unsigned int __a, unsigned int __b) {
+ return __bool2mask(__vsetles4(__a, __b), 8);
+}
+__DEVICE__ unsigned int __vsetleu2(unsigned int __a, unsigned int __b) {
+ unsigned int r;
+ asm("vset2.u32.u32.le %0,%1,%2,%3;" : "=r"(r) : "r"(__a), "r"(__b), "r"(0));
+ return r;
+}
+__DEVICE__ unsigned int __vcmpleu2(unsigned int __a, unsigned int __b) {
+ return __bool2mask(__vsetleu2(__a, __b), 16);
+}
+__DEVICE__ unsigned int __vsetleu4(unsigned int __a, unsigned int __b) {
+ unsigned int r;
+ asm("vset4.u32.u32.le %0,%1,%2,%3;" : "=r"(r) : "r"(__a), "r"(__b), "r"(0));
+ return r;
+}
+__DEVICE__ unsigned int __vcmpleu4(unsigned int __a, unsigned int __b) {
+ return __bool2mask(__vsetleu4(__a, __b), 8);
+}
+__DEVICE__ unsigned int __vsetlts2(unsigned int __a, unsigned int __b) {
+ unsigned int r;
+ asm("vset2.s32.s32.lt %0,%1,%2,%3;" : "=r"(r) : "r"(__a), "r"(__b), "r"(0));
+ return r;
+}
+__DEVICE__ unsigned int __vcmplts2(unsigned int __a, unsigned int __b) {
+ return __bool2mask(__vsetlts2(__a, __b), 16);
+}
+__DEVICE__ unsigned int __vsetlts4(unsigned int __a, unsigned int __b) {
+ unsigned int r;
+ asm("vset4.s32.s32.lt %0,%1,%2,%3;" : "=r"(r) : "r"(__a), "r"(__b), "r"(0));
+ return r;
+}
+__DEVICE__ unsigned int __vcmplts4(unsigned int __a, unsigned int __b) {
+ return __bool2mask(__vsetlts4(__a, __b), 8);
+}
+__DEVICE__ unsigned int __vsetltu2(unsigned int __a, unsigned int __b) {
+ unsigned int r;
+ asm("vset2.u32.u32.lt %0,%1,%2,%3;" : "=r"(r) : "r"(__a), "r"(__b), "r"(0));
+ return r;
+}
+__DEVICE__ unsigned int __vcmpltu2(unsigned int __a, unsigned int __b) {
+ return __bool2mask(__vsetltu2(__a, __b), 16);
+}
+__DEVICE__ unsigned int __vsetltu4(unsigned int __a, unsigned int __b) {
+ unsigned int r;
+ asm("vset4.u32.u32.lt %0,%1,%2,%3;" : "=r"(r) : "r"(__a), "r"(__b), "r"(0));
+ return r;
+}
+__DEVICE__ unsigned int __vcmpltu4(unsigned int __a, unsigned int __b) {
+ return __bool2mask(__vsetltu4(__a, __b), 8);
+}
+__DEVICE__ unsigned int __vsetne2(unsigned int __a, unsigned int __b) {
+ unsigned int r;
+ asm("vset2.u32.u32.ne %0,%1,%2,%3;" : "=r"(r) : "r"(__a), "r"(__b), "r"(0));
+ return r;
+}
+__DEVICE__ unsigned int __vcmpne2(unsigned int __a, unsigned int __b) {
+ return __bool2mask(__vsetne2(__a, __b), 16);
+}
+__DEVICE__ unsigned int __vsetne4(unsigned int __a, unsigned int __b) {
+ unsigned int r;
+ asm("vset4.u32.u32.ne %0,%1,%2,%3;" : "=r"(r) : "r"(__a), "r"(__b), "r"(0));
+ return r;
+}
+__DEVICE__ unsigned int __vcmpne4(unsigned int __a, unsigned int __b) {
+ return __bool2mask(__vsetne4(__a, __b), 8);
+}
+
+// Based on ITEM 23 in AIM-239: http://dspace.mit.edu/handle/1721.1/6086
+// (a & b) + (a | b) = a + b = (a ^ b) + 2 * (a & b) =>
+// (a + b) / 2 = ((a ^ b) >> 1) + (a & b)
+// To operate on multiple sub-elements we need to make sure to mask out bits
+// that crossed over into adjacent elements during the shift.
+__DEVICE__ unsigned int __vhaddu2(unsigned int __a, unsigned int __b) {
+ return (((__a ^ __b) >> 1) & ~0x80008000u) + (__a & __b);
+}
+__DEVICE__ unsigned int __vhaddu4(unsigned int __a, unsigned int __b) {
+ return (((__a ^ __b) >> 1) & ~0x80808080u) + (__a & __b);
+}
+
+__DEVICE__ unsigned int __vmaxs2(unsigned int __a, unsigned int __b) {
+ unsigned int r;
+ if ((__a & 0x8000) && (__b & 0x8000)) {
+ // Work around a bug in ptxas which produces invalid result if low element
+ // is negative.
+ unsigned mask = __vcmpgts2(__a, __b);
+ r = (__a & mask) | (__b & ~mask);
+ } else {
+ asm("vmax2.s32.s32.s32 %0,%1,%2,%3;"
+ : "=r"(r)
+ : "r"(__a), "r"(__b), "r"(0));
+ }
+ return r;
+}
+__DEVICE__ unsigned int __vmaxs4(unsigned int __a, unsigned int __b) {
+ unsigned int r;
+ asm("vmax4.s32.s32.s32 %0,%1,%2,%3;" : "=r"(r) : "r"(__a), "r"(__b), "r"(0));
+ return r;
+}
+__DEVICE__ unsigned int __vmaxu2(unsigned int __a, unsigned int __b) {
+ unsigned int r;
+ asm("vmax2.u32.u32.u32 %0,%1,%2,%3;" : "=r"(r) : "r"(__a), "r"(__b), "r"(0));
+ return r;
+}
+__DEVICE__ unsigned int __vmaxu4(unsigned int __a, unsigned int __b) {
+ unsigned int r;
+ asm("vmax4.u32.u32.u32 %0,%1,%2,%3;" : "=r"(r) : "r"(__a), "r"(__b), "r"(0));
+ return r;
+}
+__DEVICE__ unsigned int __vmins2(unsigned int __a, unsigned int __b) {
+ unsigned int r;
+ asm("vmin2.s32.s32.s32 %0,%1,%2,%3;" : "=r"(r) : "r"(__a), "r"(__b), "r"(0));
+ return r;
+}
+__DEVICE__ unsigned int __vmins4(unsigned int __a, unsigned int __b) {
+ unsigned int r;
+ asm("vmin4.s32.s32.s32 %0,%1,%2,%3;" : "=r"(r) : "r"(__a), "r"(__b), "r"(0));
+ return r;
+}
+__DEVICE__ unsigned int __vminu2(unsigned int __a, unsigned int __b) {
+ unsigned int r;
+ asm("vmin2.u32.u32.u32 %0,%1,%2,%3;" : "=r"(r) : "r"(__a), "r"(__b), "r"(0));
+ return r;
+}
+__DEVICE__ unsigned int __vminu4(unsigned int __a, unsigned int __b) {
+ unsigned int r;
+ asm("vmin4.u32.u32.u32 %0,%1,%2,%3;" : "=r"(r) : "r"(__a), "r"(__b), "r"(0));
+ return r;
+}
+__DEVICE__ unsigned int __vsads2(unsigned int __a, unsigned int __b) {
+ unsigned int r;
+ asm("vabsdiff2.s32.s32.s32.add %0,%1,%2,%3;"
+ : "=r"(r)
+ : "r"(__a), "r"(__b), "r"(0));
+ return r;
+}
+__DEVICE__ unsigned int __vsads4(unsigned int __a, unsigned int __b) {
+ unsigned int r;
+ asm("vabsdiff4.s32.s32.s32.add %0,%1,%2,%3;"
+ : "=r"(r)
+ : "r"(__a), "r"(__b), "r"(0));
+ return r;
+}
+__DEVICE__ unsigned int __vsadu2(unsigned int __a, unsigned int __b) {
+ unsigned int r;
+ asm("vabsdiff2.u32.u32.u32.add %0,%1,%2,%3;"
+ : "=r"(r)
+ : "r"(__a), "r"(__b), "r"(0));
+ return r;
+}
+__DEVICE__ unsigned int __vsadu4(unsigned int __a, unsigned int __b) {
+ unsigned int r;
+ asm("vabsdiff4.u32.u32.u32.add %0,%1,%2,%3;"
+ : "=r"(r)
+ : "r"(__a), "r"(__b), "r"(0));
+ return r;
+}
+
+__DEVICE__ unsigned int __vsub2(unsigned int __a, unsigned int __b) {
+ unsigned int r;
+ asm("vsub2.u32.u32.u32 %0,%1,%2,%3;" : "=r"(r) : "r"(__a), "r"(__b), "r"(0));
+ return r;
+}
+__DEVICE__ unsigned int __vneg2(unsigned int __a) { return __vsub2(0, __a); }
+
+__DEVICE__ unsigned int __vsub4(unsigned int __a, unsigned int __b) {
+ unsigned int r;
+ asm("vsub4.u32.u32.u32 %0,%1,%2,%3;" : "=r"(r) : "r"(__a), "r"(__b), "r"(0));
+ return r;
+}
+__DEVICE__ unsigned int __vneg4(unsigned int __a) { return __vsub4(0, __a); }
+__DEVICE__ unsigned int __vsubss2(unsigned int __a, unsigned int __b) {
+ unsigned int r;
+ asm("vsub2.s32.s32.s32.sat %0,%1,%2,%3;"
+ : "=r"(r)
+ : "r"(__a), "r"(__b), "r"(0));
+ return r;
+}
+__DEVICE__ unsigned int __vnegss2(unsigned int __a) {
+ return __vsubss2(0, __a);
+}
+__DEVICE__ unsigned int __vsubss4(unsigned int __a, unsigned int __b) {
+ unsigned int r;
+ asm("vsub4.s32.s32.s32.sat %0,%1,%2,%3;"
+ : "=r"(r)
+ : "r"(__a), "r"(__b), "r"(0));
+ return r;
+}
+__DEVICE__ unsigned int __vnegss4(unsigned int __a) {
+ return __vsubss4(0, __a);
+}
+__DEVICE__ unsigned int __vsubus2(unsigned int __a, unsigned int __b) {
+ unsigned int r;
+ asm("vsub2.u32.u32.u32.sat %0,%1,%2,%3;"
+ : "=r"(r)
+ : "r"(__a), "r"(__b), "r"(0));
+ return r;
+}
+__DEVICE__ unsigned int __vsubus4(unsigned int __a, unsigned int __b) {
+ unsigned int r;
+ asm("vsub4.u32.u32.u32.sat %0,%1,%2,%3;"
+ : "=r"(r)
+ : "r"(__a), "r"(__b), "r"(0));
+ return r;
+}
+#endif // CUDA_VERSION >= 9020
+__DEVICE__ int abs(int __a) { return __nv_abs(__a); }
+__DEVICE__ double acos(double __a) { return __nv_acos(__a); }
+__DEVICE__ float acosf(float __a) { return __nv_acosf(__a); }
+__DEVICE__ double acosh(double __a) { return __nv_acosh(__a); }
+__DEVICE__ float acoshf(float __a) { return __nv_acoshf(__a); }
+__DEVICE__ double asin(double __a) { return __nv_asin(__a); }
+__DEVICE__ float asinf(float __a) { return __nv_asinf(__a); }
+__DEVICE__ double asinh(double __a) { return __nv_asinh(__a); }
+__DEVICE__ float asinhf(float __a) { return __nv_asinhf(__a); }
+__DEVICE__ double atan(double __a) { return __nv_atan(__a); }
+__DEVICE__ double atan2(double __a, double __b) { return __nv_atan2(__a, __b); }
+__DEVICE__ float atan2f(float __a, float __b) { return __nv_atan2f(__a, __b); }
+__DEVICE__ float atanf(float __a) { return __nv_atanf(__a); }
+__DEVICE__ double atanh(double __a) { return __nv_atanh(__a); }
+__DEVICE__ float atanhf(float __a) { return __nv_atanhf(__a); }
+__DEVICE__ double cbrt(double __a) { return __nv_cbrt(__a); }
+__DEVICE__ float cbrtf(float __a) { return __nv_cbrtf(__a); }
+__DEVICE__ double ceil(double __a) { return __nv_ceil(__a); }
+__DEVICE__ float ceilf(float __a) { return __nv_ceilf(__a); }
+__DEVICE__ int clock() { return __nvvm_read_ptx_sreg_clock(); }
+__DEVICE__ long long clock64() { return __nvvm_read_ptx_sreg_clock64(); }
+__DEVICE__ double copysign(double __a, double __b) {
+ return __nv_copysign(__a, __b);
+}
+__DEVICE__ float copysignf(float __a, float __b) {
+ return __nv_copysignf(__a, __b);
+}
+__DEVICE__ double cos(double __a) { return __nv_cos(__a); }
+__DEVICE__ float cosf(float __a) {
+ return __FAST_OR_SLOW(__nv_fast_cosf, __nv_cosf)(__a);
+}
+__DEVICE__ double cosh(double __a) { return __nv_cosh(__a); }
+__DEVICE__ float coshf(float __a) { return __nv_coshf(__a); }
+__DEVICE__ double cospi(double __a) { return __nv_cospi(__a); }
+__DEVICE__ float cospif(float __a) { return __nv_cospif(__a); }
+__DEVICE__ double cyl_bessel_i0(double __a) { return __nv_cyl_bessel_i0(__a); }
+__DEVICE__ float cyl_bessel_i0f(float __a) { return __nv_cyl_bessel_i0f(__a); }
+__DEVICE__ double cyl_bessel_i1(double __a) { return __nv_cyl_bessel_i1(__a); }
+__DEVICE__ float cyl_bessel_i1f(float __a) { return __nv_cyl_bessel_i1f(__a); }
+__DEVICE__ double erf(double __a) { return __nv_erf(__a); }
+__DEVICE__ double erfc(double __a) { return __nv_erfc(__a); }
+__DEVICE__ float erfcf(float __a) { return __nv_erfcf(__a); }
+__DEVICE__ double erfcinv(double __a) { return __nv_erfcinv(__a); }
+__DEVICE__ float erfcinvf(float __a) { return __nv_erfcinvf(__a); }
+__DEVICE__ double erfcx(double __a) { return __nv_erfcx(__a); }
+__DEVICE__ float erfcxf(float __a) { return __nv_erfcxf(__a); }
+__DEVICE__ float erff(float __a) { return __nv_erff(__a); }
+__DEVICE__ double erfinv(double __a) { return __nv_erfinv(__a); }
+__DEVICE__ float erfinvf(float __a) { return __nv_erfinvf(__a); }
+__DEVICE__ double exp(double __a) { return __nv_exp(__a); }
+__DEVICE__ double exp10(double __a) { return __nv_exp10(__a); }
+__DEVICE__ float exp10f(float __a) { return __nv_exp10f(__a); }
+__DEVICE__ double exp2(double __a) { return __nv_exp2(__a); }
+__DEVICE__ float exp2f(float __a) { return __nv_exp2f(__a); }
+__DEVICE__ float expf(float __a) { return __nv_expf(__a); }
+__DEVICE__ double expm1(double __a) { return __nv_expm1(__a); }
+__DEVICE__ float expm1f(float __a) { return __nv_expm1f(__a); }
+__DEVICE__ double fabs(double __a) { return __nv_fabs(__a); }
+__DEVICE__ float fabsf(float __a) { return __nv_fabsf(__a); }
+__DEVICE__ double fdim(double __a, double __b) { return __nv_fdim(__a, __b); }
+__DEVICE__ float fdimf(float __a, float __b) { return __nv_fdimf(__a, __b); }
+__DEVICE__ double fdivide(double __a, double __b) { return __a / __b; }
+__DEVICE__ float fdividef(float __a, float __b) {
+#if __FAST_MATH__ && !__CUDA_PREC_DIV
+ return __nv_fast_fdividef(__a, __b);
+#else
+ return __a / __b;
+#endif
+}
+__DEVICE__ double floor(double __f) { return __nv_floor(__f); }
+__DEVICE__ float floorf(float __f) { return __nv_floorf(__f); }
+__DEVICE__ double fma(double __a, double __b, double __c) {
+ return __nv_fma(__a, __b, __c);
+}
+__DEVICE__ float fmaf(float __a, float __b, float __c) {
+ return __nv_fmaf(__a, __b, __c);
+}
+__DEVICE__ double fmax(double __a, double __b) { return __nv_fmax(__a, __b); }
+__DEVICE__ float fmaxf(float __a, float __b) { return __nv_fmaxf(__a, __b); }
+__DEVICE__ double fmin(double __a, double __b) { return __nv_fmin(__a, __b); }
+__DEVICE__ float fminf(float __a, float __b) { return __nv_fminf(__a, __b); }
+__DEVICE__ double fmod(double __a, double __b) { return __nv_fmod(__a, __b); }
+__DEVICE__ float fmodf(float __a, float __b) { return __nv_fmodf(__a, __b); }
+__DEVICE__ double frexp(double __a, int *__b) { return __nv_frexp(__a, __b); }
+__DEVICE__ float frexpf(float __a, int *__b) { return __nv_frexpf(__a, __b); }
+__DEVICE__ double hypot(double __a, double __b) { return __nv_hypot(__a, __b); }
+__DEVICE__ float hypotf(float __a, float __b) { return __nv_hypotf(__a, __b); }
+__DEVICE__ int ilogb(double __a) { return __nv_ilogb(__a); }
+__DEVICE__ int ilogbf(float __a) { return __nv_ilogbf(__a); }
+__DEVICE__ double j0(double __a) { return __nv_j0(__a); }
+__DEVICE__ float j0f(float __a) { return __nv_j0f(__a); }
+__DEVICE__ double j1(double __a) { return __nv_j1(__a); }
+__DEVICE__ float j1f(float __a) { return __nv_j1f(__a); }
+__DEVICE__ double jn(int __n, double __a) { return __nv_jn(__n, __a); }
+__DEVICE__ float jnf(int __n, float __a) { return __nv_jnf(__n, __a); }
+#if defined(__LP64__)
+__DEVICE__ long labs(long __a) { return llabs(__a); };
+#else
+__DEVICE__ long labs(long __a) { return __nv_abs(__a); };
+#endif
+__DEVICE__ double ldexp(double __a, int __b) { return __nv_ldexp(__a, __b); }
+__DEVICE__ float ldexpf(float __a, int __b) { return __nv_ldexpf(__a, __b); }
+__DEVICE__ double lgamma(double __a) { return __nv_lgamma(__a); }
+__DEVICE__ float lgammaf(float __a) { return __nv_lgammaf(__a); }
+__DEVICE__ long long llabs(long long __a) { return __nv_llabs(__a); }
+__DEVICE__ long long llmax(long long __a, long long __b) {
+ return __nv_llmax(__a, __b);
+}
+__DEVICE__ long long llmin(long long __a, long long __b) {
+ return __nv_llmin(__a, __b);
+}
+__DEVICE__ long long llrint(double __a) { return __nv_llrint(__a); }
+__DEVICE__ long long llrintf(float __a) { return __nv_llrintf(__a); }
+__DEVICE__ long long llround(double __a) { return __nv_llround(__a); }
+__DEVICE__ long long llroundf(float __a) { return __nv_llroundf(__a); }
+__DEVICE__ double log(double __a) { return __nv_log(__a); }
+__DEVICE__ double log10(double __a) { return __nv_log10(__a); }
+__DEVICE__ float log10f(float __a) { return __nv_log10f(__a); }
+__DEVICE__ double log1p(double __a) { return __nv_log1p(__a); }
+__DEVICE__ float log1pf(float __a) { return __nv_log1pf(__a); }
+__DEVICE__ double log2(double __a) { return __nv_log2(__a); }
+__DEVICE__ float log2f(float __a) {
+ return __FAST_OR_SLOW(__nv_fast_log2f, __nv_log2f)(__a);
+}
+__DEVICE__ double logb(double __a) { return __nv_logb(__a); }
+__DEVICE__ float logbf(float __a) { return __nv_logbf(__a); }
+__DEVICE__ float logf(float __a) {
+ return __FAST_OR_SLOW(__nv_fast_logf, __nv_logf)(__a);
+}
+#if defined(__LP64__)
+__DEVICE__ long lrint(double __a) { return llrint(__a); }
+__DEVICE__ long lrintf(float __a) { return __float2ll_rn(__a); }
+__DEVICE__ long lround(double __a) { return llround(__a); }
+__DEVICE__ long lroundf(float __a) { return llroundf(__a); }
+#else
+__DEVICE__ long lrint(double __a) { return (long)rint(__a); }
+__DEVICE__ long lrintf(float __a) { return __float2int_rn(__a); }
+__DEVICE__ long lround(double __a) { return round(__a); }
+__DEVICE__ long lroundf(float __a) { return roundf(__a); }
+#endif
+__DEVICE__ int max(int __a, int __b) { return __nv_max(__a, __b); }
+__DEVICE__ void *memcpy(void *__a, const void *__b, size_t __c) {
+ return __builtin_memcpy(__a, __b, __c);
+}
+__DEVICE__ void *memset(void *__a, int __b, size_t __c) {
+ return __builtin_memset(__a, __b, __c);
+}
+__DEVICE__ int min(int __a, int __b) { return __nv_min(__a, __b); }
+__DEVICE__ double modf(double __a, double *__b) { return __nv_modf(__a, __b); }
+__DEVICE__ float modff(float __a, float *__b) { return __nv_modff(__a, __b); }
+__DEVICE__ double nearbyint(double __a) { return __nv_nearbyint(__a); }
+__DEVICE__ float nearbyintf(float __a) { return __nv_nearbyintf(__a); }
+__DEVICE__ double nextafter(double __a, double __b) {
+ return __nv_nextafter(__a, __b);
+}
+__DEVICE__ float nextafterf(float __a, float __b) {
+ return __nv_nextafterf(__a, __b);
+}
+__DEVICE__ double norm(int __dim, const double *__t) {
+ return __nv_norm(__dim, __t);
+}
+__DEVICE__ double norm3d(double __a, double __b, double __c) {
+ return __nv_norm3d(__a, __b, __c);
+}
+__DEVICE__ float norm3df(float __a, float __b, float __c) {
+ return __nv_norm3df(__a, __b, __c);
+}
+__DEVICE__ double norm4d(double __a, double __b, double __c, double __d) {
+ return __nv_norm4d(__a, __b, __c, __d);
+}
+__DEVICE__ float norm4df(float __a, float __b, float __c, float __d) {
+ return __nv_norm4df(__a, __b, __c, __d);
+}
+__DEVICE__ double normcdf(double __a) { return __nv_normcdf(__a); }
+__DEVICE__ float normcdff(float __a) { return __nv_normcdff(__a); }
+__DEVICE__ double normcdfinv(double __a) { return __nv_normcdfinv(__a); }
+__DEVICE__ float normcdfinvf(float __a) { return __nv_normcdfinvf(__a); }
+__DEVICE__ float normf(int __dim, const float *__t) {
+ return __nv_normf(__dim, __t);
+}
+__DEVICE__ double pow(double __a, double __b) { return __nv_pow(__a, __b); }
+__DEVICE__ float powf(float __a, float __b) { return __nv_powf(__a, __b); }
+__DEVICE__ double powi(double __a, int __b) { return __nv_powi(__a, __b); }
+__DEVICE__ float powif(float __a, int __b) { return __nv_powif(__a, __b); }
+__DEVICE__ double rcbrt(double __a) { return __nv_rcbrt(__a); }
+__DEVICE__ float rcbrtf(float __a) { return __nv_rcbrtf(__a); }
+__DEVICE__ double remainder(double __a, double __b) {
+ return __nv_remainder(__a, __b);
+}
+__DEVICE__ float remainderf(float __a, float __b) {
+ return __nv_remainderf(__a, __b);
+}
+__DEVICE__ double remquo(double __a, double __b, int *__c) {
+ return __nv_remquo(__a, __b, __c);
+}
+__DEVICE__ float remquof(float __a, float __b, int *__c) {
+ return __nv_remquof(__a, __b, __c);
+}
+__DEVICE__ double rhypot(double __a, double __b) {
+ return __nv_rhypot(__a, __b);
+}
+__DEVICE__ float rhypotf(float __a, float __b) {
+ return __nv_rhypotf(__a, __b);
+}
+__DEVICE__ double rint(double __a) { return __nv_rint(__a); }
+__DEVICE__ float rintf(float __a) { return __nv_rintf(__a); }
+__DEVICE__ double rnorm(int __a, const double *__b) {
+ return __nv_rnorm(__a, __b);
+}
+__DEVICE__ double rnorm3d(double __a, double __b, double __c) {
+ return __nv_rnorm3d(__a, __b, __c);
+}
+__DEVICE__ float rnorm3df(float __a, float __b, float __c) {
+ return __nv_rnorm3df(__a, __b, __c);
+}
+__DEVICE__ double rnorm4d(double __a, double __b, double __c, double __d) {
+ return __nv_rnorm4d(__a, __b, __c, __d);
+}
+__DEVICE__ float rnorm4df(float __a, float __b, float __c, float __d) {
+ return __nv_rnorm4df(__a, __b, __c, __d);
+}
+__DEVICE__ float rnormf(int __dim, const float *__t) {
+ return __nv_rnormf(__dim, __t);
+}
+__DEVICE__ double round(double __a) { return __nv_round(__a); }
+__DEVICE__ float roundf(float __a) { return __nv_roundf(__a); }
+__DEVICE__ double rsqrt(double __a) { return __nv_rsqrt(__a); }
+__DEVICE__ float rsqrtf(float __a) { return __nv_rsqrtf(__a); }
+__DEVICE__ double scalbn(double __a, int __b) { return __nv_scalbn(__a, __b); }
+__DEVICE__ float scalbnf(float __a, int __b) { return __nv_scalbnf(__a, __b); }
+__DEVICE__ double scalbln(double __a, long __b) {
+ if (__b > INT_MAX)
+ return __a > 0 ? HUGE_VAL : -HUGE_VAL;
+ if (__b < INT_MIN)
+ return __a > 0 ? 0.0 : -0.0;
+ return scalbn(__a, (int)__b);
+}
+__DEVICE__ float scalblnf(float __a, long __b) {
+ if (__b > INT_MAX)
+ return __a > 0 ? HUGE_VALF : -HUGE_VALF;
+ if (__b < INT_MIN)
+ return __a > 0 ? 0.f : -0.f;
+ return scalbnf(__a, (int)__b);
+}
+__DEVICE__ double sin(double __a) { return __nv_sin(__a); }
+__DEVICE__ void sincos(double __a, double *__sptr, double *__cptr) {
+ return __nv_sincos(__a, __sptr, __cptr);
+}
+__DEVICE__ void sincosf(float __a, float *__sptr, float *__cptr) {
+ return __FAST_OR_SLOW(__nv_fast_sincosf, __nv_sincosf)(__a, __sptr, __cptr);
+}
+__DEVICE__ void sincospi(double __a, double *__sptr, double *__cptr) {
+ return __nv_sincospi(__a, __sptr, __cptr);
+}
+__DEVICE__ void sincospif(float __a, float *__sptr, float *__cptr) {
+ return __nv_sincospif(__a, __sptr, __cptr);
+}
+__DEVICE__ float sinf(float __a) {
+ return __FAST_OR_SLOW(__nv_fast_sinf, __nv_sinf)(__a);
+}
+__DEVICE__ double sinh(double __a) { return __nv_sinh(__a); }
+__DEVICE__ float sinhf(float __a) { return __nv_sinhf(__a); }
+__DEVICE__ double sinpi(double __a) { return __nv_sinpi(__a); }
+__DEVICE__ float sinpif(float __a) { return __nv_sinpif(__a); }
+__DEVICE__ double sqrt(double __a) { return __nv_sqrt(__a); }
+__DEVICE__ float sqrtf(float __a) { return __nv_sqrtf(__a); }
+__DEVICE__ double tan(double __a) { return __nv_tan(__a); }
+__DEVICE__ float tanf(float __a) { return __nv_tanf(__a); }
+__DEVICE__ double tanh(double __a) { return __nv_tanh(__a); }
+__DEVICE__ float tanhf(float __a) { return __nv_tanhf(__a); }
+__DEVICE__ double tgamma(double __a) { return __nv_tgamma(__a); }
+__DEVICE__ float tgammaf(float __a) { return __nv_tgammaf(__a); }
+__DEVICE__ double trunc(double __a) { return __nv_trunc(__a); }
+__DEVICE__ float truncf(float __a) { return __nv_truncf(__a); }
+__DEVICE__ unsigned long long ullmax(unsigned long long __a,
+ unsigned long long __b) {
+ return __nv_ullmax(__a, __b);
+}
+__DEVICE__ unsigned long long ullmin(unsigned long long __a,
+ unsigned long long __b) {
+ return __nv_ullmin(__a, __b);
+}
+__DEVICE__ unsigned int umax(unsigned int __a, unsigned int __b) {
+ return __nv_umax(__a, __b);
+}
+__DEVICE__ unsigned int umin(unsigned int __a, unsigned int __b) {
+ return __nv_umin(__a, __b);
+}
+__DEVICE__ double y0(double __a) { return __nv_y0(__a); }
+__DEVICE__ float y0f(float __a) { return __nv_y0f(__a); }
+__DEVICE__ double y1(double __a) { return __nv_y1(__a); }
+__DEVICE__ float y1f(float __a) { return __nv_y1f(__a); }
+__DEVICE__ double yn(int __a, double __b) { return __nv_yn(__a, __b); }
+__DEVICE__ float ynf(int __a, float __b) { return __nv_ynf(__a, __b); }
+
+#pragma pop_macro("__DEVICE__")
+#pragma pop_macro("__FAST_OR_SLOW")
+#endif // __CLANG_CUDA_DEVICE_FUNCTIONS_H__
diff --git a/lib/Headers/__clang_cuda_intrinsics.h b/lib/Headers/__clang_cuda_intrinsics.h
index 1794eb3dc1d6..3c0cde94ed44 100644
--- a/lib/Headers/__clang_cuda_intrinsics.h
+++ b/lib/Headers/__clang_cuda_intrinsics.h
@@ -277,6 +277,9 @@ inline __device__ long long __ldg(const long long *ptr) {
inline __device__ unsigned char __ldg(const unsigned char *ptr) {
return __nvvm_ldg_uc(ptr);
}
+inline __device__ signed char __ldg(const signed char *ptr) {
+ return __nvvm_ldg_uc((const unsigned char *)ptr);
+}
inline __device__ unsigned short __ldg(const unsigned short *ptr) {
return __nvvm_ldg_us(ptr);
}
diff --git a/lib/Headers/__clang_cuda_libdevice_declares.h b/lib/Headers/__clang_cuda_libdevice_declares.h
new file mode 100644
index 000000000000..71df7f849d15
--- /dev/null
+++ b/lib/Headers/__clang_cuda_libdevice_declares.h
@@ -0,0 +1,466 @@
+/*===-- __clang_cuda_libdevice_declares.h - decls for libdevice functions --===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __CLANG_CUDA_LIBDEVICE_DECLARES_H__
+#define __CLANG_CUDA_LIBDEVICE_DECLARES_H__
+
+extern "C" {
+
+__device__ int __nv_abs(int __a);
+__device__ double __nv_acos(double __a);
+__device__ float __nv_acosf(float __a);
+__device__ double __nv_acosh(double __a);
+__device__ float __nv_acoshf(float __a);
+__device__ double __nv_asin(double __a);
+__device__ float __nv_asinf(float __a);
+__device__ double __nv_asinh(double __a);
+__device__ float __nv_asinhf(float __a);
+__device__ double __nv_atan2(double __a, double __b);
+__device__ float __nv_atan2f(float __a, float __b);
+__device__ double __nv_atan(double __a);
+__device__ float __nv_atanf(float __a);
+__device__ double __nv_atanh(double __a);
+__device__ float __nv_atanhf(float __a);
+__device__ int __nv_brev(int __a);
+__device__ long long __nv_brevll(long long __a);
+__device__ int __nv_byte_perm(int __a, int __b, int __c);
+__device__ double __nv_cbrt(double __a);
+__device__ float __nv_cbrtf(float __a);
+__device__ double __nv_ceil(double __a);
+__device__ float __nv_ceilf(float __a);
+__device__ int __nv_clz(int __a);
+__device__ int __nv_clzll(long long __a);
+__device__ double __nv_copysign(double __a, double __b);
+__device__ float __nv_copysignf(float __a, float __b);
+__device__ double __nv_cos(double __a);
+__device__ float __nv_cosf(float __a);
+__device__ double __nv_cosh(double __a);
+__device__ float __nv_coshf(float __a);
+__device__ double __nv_cospi(double __a);
+__device__ float __nv_cospif(float __a);
+__device__ double __nv_cyl_bessel_i0(double __a);
+__device__ float __nv_cyl_bessel_i0f(float __a);
+__device__ double __nv_cyl_bessel_i1(double __a);
+__device__ float __nv_cyl_bessel_i1f(float __a);
+__device__ double __nv_dadd_rd(double __a, double __b);
+__device__ double __nv_dadd_rn(double __a, double __b);
+__device__ double __nv_dadd_ru(double __a, double __b);
+__device__ double __nv_dadd_rz(double __a, double __b);
+__device__ double __nv_ddiv_rd(double __a, double __b);
+__device__ double __nv_ddiv_rn(double __a, double __b);
+__device__ double __nv_ddiv_ru(double __a, double __b);
+__device__ double __nv_ddiv_rz(double __a, double __b);
+__device__ double __nv_dmul_rd(double __a, double __b);
+__device__ double __nv_dmul_rn(double __a, double __b);
+__device__ double __nv_dmul_ru(double __a, double __b);
+__device__ double __nv_dmul_rz(double __a, double __b);
+__device__ float __nv_double2float_rd(double __a);
+__device__ float __nv_double2float_rn(double __a);
+__device__ float __nv_double2float_ru(double __a);
+__device__ float __nv_double2float_rz(double __a);
+__device__ int __nv_double2hiint(double __a);
+__device__ int __nv_double2int_rd(double __a);
+__device__ int __nv_double2int_rn(double __a);
+__device__ int __nv_double2int_ru(double __a);
+__device__ int __nv_double2int_rz(double __a);
+__device__ long long __nv_double2ll_rd(double __a);
+__device__ long long __nv_double2ll_rn(double __a);
+__device__ long long __nv_double2ll_ru(double __a);
+__device__ long long __nv_double2ll_rz(double __a);
+__device__ int __nv_double2loint(double __a);
+__device__ unsigned int __nv_double2uint_rd(double __a);
+__device__ unsigned int __nv_double2uint_rn(double __a);
+__device__ unsigned int __nv_double2uint_ru(double __a);
+__device__ unsigned int __nv_double2uint_rz(double __a);
+__device__ unsigned long long __nv_double2ull_rd(double __a);
+__device__ unsigned long long __nv_double2ull_rn(double __a);
+__device__ unsigned long long __nv_double2ull_ru(double __a);
+__device__ unsigned long long __nv_double2ull_rz(double __a);
+__device__ unsigned long long __nv_double_as_longlong(double __a);
+__device__ double __nv_drcp_rd(double __a);
+__device__ double __nv_drcp_rn(double __a);
+__device__ double __nv_drcp_ru(double __a);
+__device__ double __nv_drcp_rz(double __a);
+__device__ double __nv_dsqrt_rd(double __a);
+__device__ double __nv_dsqrt_rn(double __a);
+__device__ double __nv_dsqrt_ru(double __a);
+__device__ double __nv_dsqrt_rz(double __a);
+__device__ double __nv_dsub_rd(double __a, double __b);
+__device__ double __nv_dsub_rn(double __a, double __b);
+__device__ double __nv_dsub_ru(double __a, double __b);
+__device__ double __nv_dsub_rz(double __a, double __b);
+__device__ double __nv_erfc(double __a);
+__device__ float __nv_erfcf(float __a);
+__device__ double __nv_erfcinv(double __a);
+__device__ float __nv_erfcinvf(float __a);
+__device__ double __nv_erfcx(double __a);
+__device__ float __nv_erfcxf(float __a);
+__device__ double __nv_erf(double __a);
+__device__ float __nv_erff(float __a);
+__device__ double __nv_erfinv(double __a);
+__device__ float __nv_erfinvf(float __a);
+__device__ double __nv_exp10(double __a);
+__device__ float __nv_exp10f(float __a);
+__device__ double __nv_exp2(double __a);
+__device__ float __nv_exp2f(float __a);
+__device__ double __nv_exp(double __a);
+__device__ float __nv_expf(float __a);
+__device__ double __nv_expm1(double __a);
+__device__ float __nv_expm1f(float __a);
+__device__ double __nv_fabs(double __a);
+__device__ float __nv_fabsf(float __a);
+__device__ float __nv_fadd_rd(float __a, float __b);
+__device__ float __nv_fadd_rn(float __a, float __b);
+__device__ float __nv_fadd_ru(float __a, float __b);
+__device__ float __nv_fadd_rz(float __a, float __b);
+__device__ float __nv_fast_cosf(float __a);
+__device__ float __nv_fast_exp10f(float __a);
+__device__ float __nv_fast_expf(float __a);
+__device__ float __nv_fast_fdividef(float __a, float __b);
+__device__ float __nv_fast_log10f(float __a);
+__device__ float __nv_fast_log2f(float __a);
+__device__ float __nv_fast_logf(float __a);
+__device__ float __nv_fast_powf(float __a, float __b);
+__device__ void __nv_fast_sincosf(float __a, float *__sptr, float *__cptr);
+__device__ float __nv_fast_sinf(float __a);
+__device__ float __nv_fast_tanf(float __a);
+__device__ double __nv_fdim(double __a, double __b);
+__device__ float __nv_fdimf(float __a, float __b);
+__device__ float __nv_fdiv_rd(float __a, float __b);
+__device__ float __nv_fdiv_rn(float __a, float __b);
+__device__ float __nv_fdiv_ru(float __a, float __b);
+__device__ float __nv_fdiv_rz(float __a, float __b);
+__device__ int __nv_ffs(int __a);
+__device__ int __nv_ffsll(long long __a);
+__device__ int __nv_finitef(float __a);
+__device__ unsigned short __nv_float2half_rn(float __a);
+__device__ int __nv_float2int_rd(float __a);
+__device__ int __nv_float2int_rn(float __a);
+__device__ int __nv_float2int_ru(float __a);
+__device__ int __nv_float2int_rz(float __a);
+__device__ long long __nv_float2ll_rd(float __a);
+__device__ long long __nv_float2ll_rn(float __a);
+__device__ long long __nv_float2ll_ru(float __a);
+__device__ long long __nv_float2ll_rz(float __a);
+__device__ unsigned int __nv_float2uint_rd(float __a);
+__device__ unsigned int __nv_float2uint_rn(float __a);
+__device__ unsigned int __nv_float2uint_ru(float __a);
+__device__ unsigned int __nv_float2uint_rz(float __a);
+__device__ unsigned long long __nv_float2ull_rd(float __a);
+__device__ unsigned long long __nv_float2ull_rn(float __a);
+__device__ unsigned long long __nv_float2ull_ru(float __a);
+__device__ unsigned long long __nv_float2ull_rz(float __a);
+__device__ int __nv_float_as_int(float __a);
+__device__ unsigned int __nv_float_as_uint(float __a);
+__device__ double __nv_floor(double __a);
+__device__ float __nv_floorf(float __a);
+__device__ double __nv_fma(double __a, double __b, double __c);
+__device__ float __nv_fmaf(float __a, float __b, float __c);
+__device__ float __nv_fmaf_ieee_rd(float __a, float __b, float __c);
+__device__ float __nv_fmaf_ieee_rn(float __a, float __b, float __c);
+__device__ float __nv_fmaf_ieee_ru(float __a, float __b, float __c);
+__device__ float __nv_fmaf_ieee_rz(float __a, float __b, float __c);
+__device__ float __nv_fmaf_rd(float __a, float __b, float __c);
+__device__ float __nv_fmaf_rn(float __a, float __b, float __c);
+__device__ float __nv_fmaf_ru(float __a, float __b, float __c);
+__device__ float __nv_fmaf_rz(float __a, float __b, float __c);
+__device__ double __nv_fma_rd(double __a, double __b, double __c);
+__device__ double __nv_fma_rn(double __a, double __b, double __c);
+__device__ double __nv_fma_ru(double __a, double __b, double __c);
+__device__ double __nv_fma_rz(double __a, double __b, double __c);
+__device__ double __nv_fmax(double __a, double __b);
+__device__ float __nv_fmaxf(float __a, float __b);
+__device__ double __nv_fmin(double __a, double __b);
+__device__ float __nv_fminf(float __a, float __b);
+__device__ double __nv_fmod(double __a, double __b);
+__device__ float __nv_fmodf(float __a, float __b);
+__device__ float __nv_fmul_rd(float __a, float __b);
+__device__ float __nv_fmul_rn(float __a, float __b);
+__device__ float __nv_fmul_ru(float __a, float __b);
+__device__ float __nv_fmul_rz(float __a, float __b);
+__device__ float __nv_frcp_rd(float __a);
+__device__ float __nv_frcp_rn(float __a);
+__device__ float __nv_frcp_ru(float __a);
+__device__ float __nv_frcp_rz(float __a);
+__device__ double __nv_frexp(double __a, int *__b);
+__device__ float __nv_frexpf(float __a, int *__b);
+__device__ float __nv_frsqrt_rn(float __a);
+__device__ float __nv_fsqrt_rd(float __a);
+__device__ float __nv_fsqrt_rn(float __a);
+__device__ float __nv_fsqrt_ru(float __a);
+__device__ float __nv_fsqrt_rz(float __a);
+__device__ float __nv_fsub_rd(float __a, float __b);
+__device__ float __nv_fsub_rn(float __a, float __b);
+__device__ float __nv_fsub_ru(float __a, float __b);
+__device__ float __nv_fsub_rz(float __a, float __b);
+__device__ int __nv_hadd(int __a, int __b);
+__device__ float __nv_half2float(unsigned short __h);
+__device__ double __nv_hiloint2double(int __a, int __b);
+__device__ double __nv_hypot(double __a, double __b);
+__device__ float __nv_hypotf(float __a, float __b);
+__device__ int __nv_ilogb(double __a);
+__device__ int __nv_ilogbf(float __a);
+__device__ double __nv_int2double_rn(int __a);
+__device__ float __nv_int2float_rd(int __a);
+__device__ float __nv_int2float_rn(int __a);
+__device__ float __nv_int2float_ru(int __a);
+__device__ float __nv_int2float_rz(int __a);
+__device__ float __nv_int_as_float(int __a);
+__device__ int __nv_isfinited(double __a);
+__device__ int __nv_isinfd(double __a);
+__device__ int __nv_isinff(float __a);
+__device__ int __nv_isnand(double __a);
+__device__ int __nv_isnanf(float __a);
+__device__ double __nv_j0(double __a);
+__device__ float __nv_j0f(float __a);
+__device__ double __nv_j1(double __a);
+__device__ float __nv_j1f(float __a);
+__device__ float __nv_jnf(int __a, float __b);
+__device__ double __nv_jn(int __a, double __b);
+__device__ double __nv_ldexp(double __a, int __b);
+__device__ float __nv_ldexpf(float __a, int __b);
+__device__ double __nv_lgamma(double __a);
+__device__ float __nv_lgammaf(float __a);
+__device__ double __nv_ll2double_rd(long long __a);
+__device__ double __nv_ll2double_rn(long long __a);
+__device__ double __nv_ll2double_ru(long long __a);
+__device__ double __nv_ll2double_rz(long long __a);
+__device__ float __nv_ll2float_rd(long long __a);
+__device__ float __nv_ll2float_rn(long long __a);
+__device__ float __nv_ll2float_ru(long long __a);
+__device__ float __nv_ll2float_rz(long long __a);
+__device__ long long __nv_llabs(long long __a);
+__device__ long long __nv_llmax(long long __a, long long __b);
+__device__ long long __nv_llmin(long long __a, long long __b);
+__device__ long long __nv_llrint(double __a);
+__device__ long long __nv_llrintf(float __a);
+__device__ long long __nv_llround(double __a);
+__device__ long long __nv_llroundf(float __a);
+__device__ double __nv_log10(double __a);
+__device__ float __nv_log10f(float __a);
+__device__ double __nv_log1p(double __a);
+__device__ float __nv_log1pf(float __a);
+__device__ double __nv_log2(double __a);
+__device__ float __nv_log2f(float __a);
+__device__ double __nv_logb(double __a);
+__device__ float __nv_logbf(float __a);
+__device__ double __nv_log(double __a);
+__device__ float __nv_logf(float __a);
+__device__ double __nv_longlong_as_double(long long __a);
+__device__ int __nv_max(int __a, int __b);
+__device__ int __nv_min(int __a, int __b);
+__device__ double __nv_modf(double __a, double *__b);
+__device__ float __nv_modff(float __a, float *__b);
+__device__ int __nv_mul24(int __a, int __b);
+__device__ long long __nv_mul64hi(long long __a, long long __b);
+__device__ int __nv_mulhi(int __a, int __b);
+__device__ double __nv_nan(const signed char *__a);
+__device__ float __nv_nanf(const signed char *__a);
+__device__ double __nv_nearbyint(double __a);
+__device__ float __nv_nearbyintf(float __a);
+__device__ double __nv_nextafter(double __a, double __b);
+__device__ float __nv_nextafterf(float __a, float __b);
+__device__ double __nv_norm3d(double __a, double __b, double __c);
+__device__ float __nv_norm3df(float __a, float __b, float __c);
+__device__ double __nv_norm4d(double __a, double __b, double __c, double __d);
+__device__ float __nv_norm4df(float __a, float __b, float __c, float __d);
+__device__ double __nv_normcdf(double __a);
+__device__ float __nv_normcdff(float __a);
+__device__ double __nv_normcdfinv(double __a);
+__device__ float __nv_normcdfinvf(float __a);
+__device__ float __nv_normf(int __a, const float *__b);
+__device__ double __nv_norm(int __a, const double *__b);
+__device__ int __nv_popc(int __a);
+__device__ int __nv_popcll(long long __a);
+__device__ double __nv_pow(double __a, double __b);
+__device__ float __nv_powf(float __a, float __b);
+__device__ double __nv_powi(double __a, int __b);
+__device__ float __nv_powif(float __a, int __b);
+__device__ double __nv_rcbrt(double __a);
+__device__ float __nv_rcbrtf(float __a);
+__device__ double __nv_rcp64h(double __a);
+__device__ double __nv_remainder(double __a, double __b);
+__device__ float __nv_remainderf(float __a, float __b);
+__device__ double __nv_remquo(double __a, double __b, int *__c);
+__device__ float __nv_remquof(float __a, float __b, int *__c);
+__device__ int __nv_rhadd(int __a, int __b);
+__device__ double __nv_rhypot(double __a, double __b);
+__device__ float __nv_rhypotf(float __a, float __b);
+__device__ double __nv_rint(double __a);
+__device__ float __nv_rintf(float __a);
+__device__ double __nv_rnorm3d(double __a, double __b, double __c);
+__device__ float __nv_rnorm3df(float __a, float __b, float __c);
+__device__ double __nv_rnorm4d(double __a, double __b, double __c, double __d);
+__device__ float __nv_rnorm4df(float __a, float __b, float __c, float __d);
+__device__ float __nv_rnormf(int __a, const float *__b);
+__device__ double __nv_rnorm(int __a, const double *__b);
+__device__ double __nv_round(double __a);
+__device__ float __nv_roundf(float __a);
+__device__ double __nv_rsqrt(double __a);
+__device__ float __nv_rsqrtf(float __a);
+__device__ int __nv_sad(int __a, int __b, int __c);
+__device__ float __nv_saturatef(float __a);
+__device__ double __nv_scalbn(double __a, int __b);
+__device__ float __nv_scalbnf(float __a, int __b);
+__device__ int __nv_signbitd(double __a);
+__device__ int __nv_signbitf(float __a);
+__device__ void __nv_sincos(double __a, double *__b, double *__c);
+__device__ void __nv_sincosf(float __a, float *__b, float *__c);
+__device__ void __nv_sincospi(double __a, double *__b, double *__c);
+__device__ void __nv_sincospif(float __a, float *__b, float *__c);
+__device__ double __nv_sin(double __a);
+__device__ float __nv_sinf(float __a);
+__device__ double __nv_sinh(double __a);
+__device__ float __nv_sinhf(float __a);
+__device__ double __nv_sinpi(double __a);
+__device__ float __nv_sinpif(float __a);
+__device__ double __nv_sqrt(double __a);
+__device__ float __nv_sqrtf(float __a);
+__device__ double __nv_tan(double __a);
+__device__ float __nv_tanf(float __a);
+__device__ double __nv_tanh(double __a);
+__device__ float __nv_tanhf(float __a);
+__device__ double __nv_tgamma(double __a);
+__device__ float __nv_tgammaf(float __a);
+__device__ double __nv_trunc(double __a);
+__device__ float __nv_truncf(float __a);
+__device__ int __nv_uhadd(unsigned int __a, unsigned int __b);
+__device__ double __nv_uint2double_rn(unsigned int __i);
+__device__ float __nv_uint2float_rd(unsigned int __a);
+__device__ float __nv_uint2float_rn(unsigned int __a);
+__device__ float __nv_uint2float_ru(unsigned int __a);
+__device__ float __nv_uint2float_rz(unsigned int __a);
+__device__ float __nv_uint_as_float(unsigned int __a);
+__device__ double __nv_ull2double_rd(unsigned long long __a);
+__device__ double __nv_ull2double_rn(unsigned long long __a);
+__device__ double __nv_ull2double_ru(unsigned long long __a);
+__device__ double __nv_ull2double_rz(unsigned long long __a);
+__device__ float __nv_ull2float_rd(unsigned long long __a);
+__device__ float __nv_ull2float_rn(unsigned long long __a);
+__device__ float __nv_ull2float_ru(unsigned long long __a);
+__device__ float __nv_ull2float_rz(unsigned long long __a);
+__device__ unsigned long long __nv_ullmax(unsigned long long __a,
+ unsigned long long __b);
+__device__ unsigned long long __nv_ullmin(unsigned long long __a,
+ unsigned long long __b);
+__device__ unsigned int __nv_umax(unsigned int __a, unsigned int __b);
+__device__ unsigned int __nv_umin(unsigned int __a, unsigned int __b);
+__device__ unsigned int __nv_umul24(unsigned int __a, unsigned int __b);
+__device__ unsigned long long __nv_umul64hi(unsigned long long __a,
+ unsigned long long __b);
+__device__ unsigned int __nv_umulhi(unsigned int __a, unsigned int __b);
+__device__ unsigned int __nv_urhadd(unsigned int __a, unsigned int __b);
+__device__ unsigned int __nv_usad(unsigned int __a, unsigned int __b,
+ unsigned int __c);
+#if CUDA_VERSION >= 9000 && CUDA_VERSION < 9020
+__device__ int __nv_vabs2(int __a);
+__device__ int __nv_vabs4(int __a);
+__device__ int __nv_vabsdiffs2(int __a, int __b);
+__device__ int __nv_vabsdiffs4(int __a, int __b);
+__device__ int __nv_vabsdiffu2(int __a, int __b);
+__device__ int __nv_vabsdiffu4(int __a, int __b);
+__device__ int __nv_vabsss2(int __a);
+__device__ int __nv_vabsss4(int __a);
+__device__ int __nv_vadd2(int __a, int __b);
+__device__ int __nv_vadd4(int __a, int __b);
+__device__ int __nv_vaddss2(int __a, int __b);
+__device__ int __nv_vaddss4(int __a, int __b);
+__device__ int __nv_vaddus2(int __a, int __b);
+__device__ int __nv_vaddus4(int __a, int __b);
+__device__ int __nv_vavgs2(int __a, int __b);
+__device__ int __nv_vavgs4(int __a, int __b);
+__device__ int __nv_vavgu2(int __a, int __b);
+__device__ int __nv_vavgu4(int __a, int __b);
+__device__ int __nv_vcmpeq2(int __a, int __b);
+__device__ int __nv_vcmpeq4(int __a, int __b);
+__device__ int __nv_vcmpges2(int __a, int __b);
+__device__ int __nv_vcmpges4(int __a, int __b);
+__device__ int __nv_vcmpgeu2(int __a, int __b);
+__device__ int __nv_vcmpgeu4(int __a, int __b);
+__device__ int __nv_vcmpgts2(int __a, int __b);
+__device__ int __nv_vcmpgts4(int __a, int __b);
+__device__ int __nv_vcmpgtu2(int __a, int __b);
+__device__ int __nv_vcmpgtu4(int __a, int __b);
+__device__ int __nv_vcmples2(int __a, int __b);
+__device__ int __nv_vcmples4(int __a, int __b);
+__device__ int __nv_vcmpleu2(int __a, int __b);
+__device__ int __nv_vcmpleu4(int __a, int __b);
+__device__ int __nv_vcmplts2(int __a, int __b);
+__device__ int __nv_vcmplts4(int __a, int __b);
+__device__ int __nv_vcmpltu2(int __a, int __b);
+__device__ int __nv_vcmpltu4(int __a, int __b);
+__device__ int __nv_vcmpne2(int __a, int __b);
+__device__ int __nv_vcmpne4(int __a, int __b);
+__device__ int __nv_vhaddu2(int __a, int __b);
+__device__ int __nv_vhaddu4(int __a, int __b);
+__device__ int __nv_vmaxs2(int __a, int __b);
+__device__ int __nv_vmaxs4(int __a, int __b);
+__device__ int __nv_vmaxu2(int __a, int __b);
+__device__ int __nv_vmaxu4(int __a, int __b);
+__device__ int __nv_vmins2(int __a, int __b);
+__device__ int __nv_vmins4(int __a, int __b);
+__device__ int __nv_vminu2(int __a, int __b);
+__device__ int __nv_vminu4(int __a, int __b);
+__device__ int __nv_vneg2(int __a);
+__device__ int __nv_vneg4(int __a);
+__device__ int __nv_vnegss2(int __a);
+__device__ int __nv_vnegss4(int __a);
+__device__ int __nv_vsads2(int __a, int __b);
+__device__ int __nv_vsads4(int __a, int __b);
+__device__ int __nv_vsadu2(int __a, int __b);
+__device__ int __nv_vsadu4(int __a, int __b);
+__device__ int __nv_vseteq2(int __a, int __b);
+__device__ int __nv_vseteq4(int __a, int __b);
+__device__ int __nv_vsetges2(int __a, int __b);
+__device__ int __nv_vsetges4(int __a, int __b);
+__device__ int __nv_vsetgeu2(int __a, int __b);
+__device__ int __nv_vsetgeu4(int __a, int __b);
+__device__ int __nv_vsetgts2(int __a, int __b);
+__device__ int __nv_vsetgts4(int __a, int __b);
+__device__ int __nv_vsetgtu2(int __a, int __b);
+__device__ int __nv_vsetgtu4(int __a, int __b);
+__device__ int __nv_vsetles2(int __a, int __b);
+__device__ int __nv_vsetles4(int __a, int __b);
+__device__ int __nv_vsetleu2(int __a, int __b);
+__device__ int __nv_vsetleu4(int __a, int __b);
+__device__ int __nv_vsetlts2(int __a, int __b);
+__device__ int __nv_vsetlts4(int __a, int __b);
+__device__ int __nv_vsetltu2(int __a, int __b);
+__device__ int __nv_vsetltu4(int __a, int __b);
+__device__ int __nv_vsetne2(int __a, int __b);
+__device__ int __nv_vsetne4(int __a, int __b);
+__device__ int __nv_vsub2(int __a, int __b);
+__device__ int __nv_vsub4(int __a, int __b);
+__device__ int __nv_vsubss2(int __a, int __b);
+__device__ int __nv_vsubss4(int __a, int __b);
+__device__ int __nv_vsubus2(int __a, int __b);
+__device__ int __nv_vsubus4(int __a, int __b);
+#endif // CUDA_VERSION
+__device__ double __nv_y0(double __a);
+__device__ float __nv_y0f(float __a);
+__device__ double __nv_y1(double __a);
+__device__ float __nv_y1f(float __a);
+__device__ float __nv_ynf(int __a, float __b);
+__device__ double __nv_yn(int __a, double __b);
+} // extern "C"
+#endif // __CLANG_CUDA_LIBDEVICE_DECLARES_H__
diff --git a/lib/Headers/__clang_cuda_runtime_wrapper.h b/lib/Headers/__clang_cuda_runtime_wrapper.h
index a82a8490f367..09705a273a47 100644
--- a/lib/Headers/__clang_cuda_runtime_wrapper.h
+++ b/lib/Headers/__clang_cuda_runtime_wrapper.h
@@ -62,7 +62,7 @@
#include "cuda.h"
#if !defined(CUDA_VERSION)
#error "cuda.h did not define CUDA_VERSION"
-#elif CUDA_VERSION < 7000 || CUDA_VERSION > 9000
+#elif CUDA_VERSION < 7000 || CUDA_VERSION > 9020
#error "Unsupported CUDA version!"
#endif
@@ -84,6 +84,9 @@
#define __DEVICE_FUNCTIONS_H__
#define __MATH_FUNCTIONS_H__
#define __COMMON_FUNCTIONS_H__
+// device_functions_decls is replaced by __clang_cuda_device_functions.h
+// included below.
+#define __DEVICE_FUNCTIONS_DECLS_H__
#undef __CUDACC__
#if CUDA_VERSION < 9000
@@ -97,11 +100,17 @@
#include "host_config.h"
#include "host_defines.h"
+// Temporarily replace "nv_weak" with weak, so __attribute__((nv_weak)) in
+// cuda_device_runtime_api.h ends up being __attribute__((weak)) which is the
+// functional equivalent of what we need.
+#pragma push_macro("nv_weak")
+#define nv_weak weak
#undef __CUDABE__
#undef __CUDA_LIBDEVICE__
#define __CUDACC__
#include "cuda_runtime.h"
+#pragma pop_macro("nv_weak")
#undef __CUDACC__
#define __CUDABE__
@@ -137,20 +146,22 @@ inline __host__ double __signbitd(double x) {
}
#endif
-// We need decls for functions in CUDA's libdevice with __device__
-// attribute only. Alas they come either as __host__ __device__ or
-// with no attributes at all. To work around that, define __CUDA_RTC__
-// which produces HD variant and undef __host__ which gives us desided
-// decls with __device__ attribute.
-#pragma push_macro("__host__")
-#define __host__
-#define __CUDACC_RTC__
-#include "device_functions_decls.h"
-#undef __CUDACC_RTC__
+// CUDA 9.1 no longer provides declarations for libdevice functions, so we need
+// to provide our own.
+#include <__clang_cuda_libdevice_declares.h>
-// Temporarily poison __host__ macro to ensure it's not used by any of
-// the headers we're about to include.
-#define __host__ UNEXPECTED_HOST_ATTRIBUTE
+// Wrappers for many device-side standard library functions became compiler
+// builtins in CUDA-9 and have been removed from the CUDA headers. Clang now
+// provides its own implementation of the wrappers.
+#if CUDA_VERSION >= 9000
+#include <__clang_cuda_device_functions.h>
+#endif
+
+// __THROW is redefined to be empty by device_functions_decls.h in CUDA. Clang's
+// counterpart does not do it, so we need to make it empty here to keep
+// following CUDA includes happy.
+#undef __THROW
+#define __THROW
// CUDA 8.0.41 relies on __USE_FAST_MATH__ and __CUDA_PREC_DIV's values.
// Previous versions used to check whether they are defined or not.
@@ -167,24 +178,20 @@ inline __host__ double __signbitd(double x) {
#endif
#endif
+// Temporarily poison __host__ macro to ensure it's not used by any of
+// the headers we're about to include.
+#pragma push_macro("__host__")
+#define __host__ UNEXPECTED_HOST_ATTRIBUTE
+
// device_functions.hpp and math_functions*.hpp use 'static
// __forceinline__' (with no __device__) for definitions of device
// functions. Temporarily redefine __forceinline__ to include
// __device__.
#pragma push_macro("__forceinline__")
#define __forceinline__ __device__ __inline__ __attribute__((always_inline))
-
-#pragma push_macro("__float2half_rn")
-#if CUDA_VERSION >= 9000
-// CUDA-9 has conflicting prototypes for __float2half_rn(float f) in
-// cuda_fp16.h[pp] and device_functions.hpp. We need to get the one in
-// device_functions.hpp out of the way.
-#define __float2half_rn __float2half_rn_disabled
-#endif
-
+#if CUDA_VERSION < 9000
#include "device_functions.hpp"
-#pragma pop_macro("__float2half_rn")
-
+#endif
// math_function.hpp uses the __USE_FAST_MATH__ macro to determine whether we
// get the slow-but-accurate or fast-but-inaccurate versions of functions like
@@ -196,17 +203,32 @@ inline __host__ double __signbitd(double x) {
#if defined(__CLANG_CUDA_APPROX_TRANSCENDENTALS__)
#define __USE_FAST_MATH__ 1
#endif
+
+#if CUDA_VERSION >= 9000
+// CUDA-9.2 needs host-side memcpy for some host functions in
+// device_functions.hpp
+#if CUDA_VERSION >= 9020
+#include <string.h>
+#endif
+#include "crt/math_functions.hpp"
+#else
#include "math_functions.hpp"
+#endif
+
#pragma pop_macro("__USE_FAST_MATH__")
+#if CUDA_VERSION < 9000
#include "math_functions_dbl_ptx3.hpp"
+#endif
#pragma pop_macro("__forceinline__")
// Pull in host-only functions that are only available when neither
// __CUDACC__ nor __CUDABE__ are defined.
#undef __MATH_FUNCTIONS_HPP__
#undef __CUDABE__
+#if CUDA_VERSION < 9000
#include "math_functions.hpp"
+#endif
// Alas, additional overloads for these functions are hard to get to.
// Considering that we only need these overloads for a few functions,
// we can provide them here.
@@ -222,22 +244,36 @@ static inline float normcdfinv(float __a) { return normcdfinvf(__a); }
static inline float normcdf(float __a) { return normcdff(__a); }
static inline float erfcx(float __a) { return erfcxf(__a); }
+#if CUDA_VERSION < 9000
// For some reason single-argument variant is not always declared by
// CUDA headers. Alas, device_functions.hpp included below needs it.
static inline __device__ void __brkpt(int __c) { __brkpt(); }
+#endif
// Now include *.hpp with definitions of various GPU functions. Alas,
// a lot of thins get declared/defined with __host__ attribute which
// we don't want and we have to define it out. We also have to include
// {device,math}_functions.hpp again in order to extract the other
// branch of #if/else inside.
-
#define __host__
#undef __CUDABE__
#define __CUDACC__
+#if CUDA_VERSION >= 9000
+// Some atomic functions became compiler builtins in CUDA-9 , so we need their
+// declarations.
+#include "device_atomic_functions.h"
+#endif
#undef __DEVICE_FUNCTIONS_HPP__
#include "device_atomic_functions.hpp"
+#if CUDA_VERSION >= 9000
+#include "crt/device_functions.hpp"
+#include "crt/device_double_functions.hpp"
+#else
#include "device_functions.hpp"
+#define __CUDABE__
+#include "device_double_functions.h"
+#undef __CUDABE__
+#endif
#include "sm_20_atomic_functions.hpp"
#include "sm_20_intrinsics.hpp"
#include "sm_32_atomic_functions.hpp"
@@ -251,8 +287,11 @@ static inline __device__ void __brkpt(int __c) { __brkpt(); }
// reason about our code.
#if CUDA_VERSION >= 8000
+#pragma push_macro("__CUDA_ARCH__")
+#undef __CUDA_ARCH__
#include "sm_60_atomic_functions.hpp"
#include "sm_61_intrinsics.hpp"
+#pragma pop_macro("__CUDA_ARCH__")
#endif
#undef __MATH_FUNCTIONS_HPP__
@@ -279,7 +318,11 @@ static inline __device__ void __brkpt(int __c) { __brkpt(); }
#endif
#endif
+#if CUDA_VERSION >= 9000
+#include "crt/math_functions.hpp"
+#else
#include "math_functions.hpp"
+#endif
#pragma pop_macro("_GLIBCXX_MATH_H")
#pragma pop_macro("_LIBCPP_VERSION")
#pragma pop_macro("__GNUC__")
diff --git a/lib/Headers/__wmmintrin_aes.h b/lib/Headers/__wmmintrin_aes.h
index 3a2ee1b2ef2e..70c355efc48c 100644
--- a/lib/Headers/__wmmintrin_aes.h
+++ b/lib/Headers/__wmmintrin_aes.h
@@ -20,15 +20,18 @@
*
*===-----------------------------------------------------------------------===
*/
-#ifndef _WMMINTRIN_AES_H
-#define _WMMINTRIN_AES_H
-#include <emmintrin.h>
+#ifndef __WMMINTRIN_H
+#error "Never use <__wmmintrin_aes.h> directly; include <wmmintrin.h> instead."
+#endif
+
+#ifndef __WMMINTRIN_AES_H
+#define __WMMINTRIN_AES_H
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("aes")))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("aes"), __min_vector_width__(128)))
-/// \brief Performs a single round of AES encryption using the Equivalent
+/// Performs a single round of AES encryption using the Equivalent
/// Inverse Cipher, transforming the state value from the first source
/// operand using a 128-bit round key value contained in the second source
/// operand, and writes the result to the destination.
@@ -48,7 +51,7 @@ _mm_aesenc_si128(__m128i __V, __m128i __R)
return (__m128i)__builtin_ia32_aesenc128((__v2di)__V, (__v2di)__R);
}
-/// \brief Performs the final round of AES encryption using the Equivalent
+/// Performs the final round of AES encryption using the Equivalent
/// Inverse Cipher, transforming the state value from the first source
/// operand using a 128-bit round key value contained in the second source
/// operand, and writes the result to the destination.
@@ -68,7 +71,7 @@ _mm_aesenclast_si128(__m128i __V, __m128i __R)
return (__m128i)__builtin_ia32_aesenclast128((__v2di)__V, (__v2di)__R);
}
-/// \brief Performs a single round of AES decryption using the Equivalent
+/// Performs a single round of AES decryption using the Equivalent
/// Inverse Cipher, transforming the state value from the first source
/// operand using a 128-bit round key value contained in the second source
/// operand, and writes the result to the destination.
@@ -88,7 +91,7 @@ _mm_aesdec_si128(__m128i __V, __m128i __R)
return (__m128i)__builtin_ia32_aesdec128((__v2di)__V, (__v2di)__R);
}
-/// \brief Performs the final round of AES decryption using the Equivalent
+/// Performs the final round of AES decryption using the Equivalent
/// Inverse Cipher, transforming the state value from the first source
/// operand using a 128-bit round key value contained in the second source
/// operand, and writes the result to the destination.
@@ -108,7 +111,7 @@ _mm_aesdeclast_si128(__m128i __V, __m128i __R)
return (__m128i)__builtin_ia32_aesdeclast128((__v2di)__V, (__v2di)__R);
}
-/// \brief Applies the AES InvMixColumns() transformation to an expanded key
+/// Applies the AES InvMixColumns() transformation to an expanded key
/// contained in the source operand, and writes the result to the
/// destination.
///
@@ -125,7 +128,7 @@ _mm_aesimc_si128(__m128i __V)
return (__m128i)__builtin_ia32_aesimc128((__v2di)__V);
}
-/// \brief Generates a round key for AES encyption, operating on 128-bit data
+/// Generates a round key for AES encryption, operating on 128-bit data
/// specified in the first source operand and using an 8-bit round constant
/// specified by the second source operand, and writes the result to the
/// destination.
@@ -148,4 +151,4 @@ _mm_aesimc_si128(__m128i __V)
#undef __DEFAULT_FN_ATTRS
-#endif /* _WMMINTRIN_AES_H */
+#endif /* __WMMINTRIN_AES_H */
diff --git a/lib/Headers/__wmmintrin_pclmul.h b/lib/Headers/__wmmintrin_pclmul.h
index e9c6a9f6d415..e0f928796ac1 100644
--- a/lib/Headers/__wmmintrin_pclmul.h
+++ b/lib/Headers/__wmmintrin_pclmul.h
@@ -20,10 +20,15 @@
*
*===-----------------------------------------------------------------------===
*/
-#ifndef _WMMINTRIN_PCLMUL_H
-#define _WMMINTRIN_PCLMUL_H
-/// \brief Multiplies two 64-bit integer values, which are selected from source
+#ifndef __WMMINTRIN_H
+#error "Never use <__wmmintrin_pclmul.h> directly; include <wmmintrin.h> instead."
+#endif
+
+#ifndef __WMMINTRIN_PCLMUL_H
+#define __WMMINTRIN_PCLMUL_H
+
+/// Multiplies two 64-bit integer values, which are selected from source
/// operands using the immediate-value operand. The multiplication is a
/// carry-less multiplication, and the 128-bit integer product is stored in
/// the destination.
@@ -50,8 +55,8 @@
/// Bit[4]=1 indicates that bits[127:64] of operand \a __Y are used.
/// \returns The 128-bit integer vector containing the result of the carry-less
/// multiplication of the selected 64-bit values.
-#define _mm_clmulepi64_si128(__X, __Y, __I) \
- ((__m128i)__builtin_ia32_pclmulqdq128((__v2di)(__m128i)(__X), \
- (__v2di)(__m128i)(__Y), (char)(__I)))
+#define _mm_clmulepi64_si128(X, Y, I) \
+ ((__m128i)__builtin_ia32_pclmulqdq128((__v2di)(__m128i)(X), \
+ (__v2di)(__m128i)(Y), (char)(I)))
-#endif /* _WMMINTRIN_PCLMUL_H */
+#endif /* __WMMINTRIN_PCLMUL_H */
diff --git a/lib/Headers/ammintrin.h b/lib/Headers/ammintrin.h
index 2843a7a2677f..680b4465eaae 100644
--- a/lib/Headers/ammintrin.h
+++ b/lib/Headers/ammintrin.h
@@ -27,9 +27,9 @@
#include <pmmintrin.h>
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sse4a")))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sse4a"), __min_vector_width__(128)))
-/// \brief Extracts the specified bits from the lower 64 bits of the 128-bit
+/// Extracts the specified bits from the lower 64 bits of the 128-bit
/// integer vector operand at the index \a idx and of the length \a len.
///
/// \headerfile <x86intrin.h>
@@ -57,7 +57,7 @@
((__m128i)__builtin_ia32_extrqi((__v2di)(__m128i)(x), \
(char)(len), (char)(idx)))
-/// \brief Extracts the specified bits from the lower 64 bits of the 128-bit
+/// Extracts the specified bits from the lower 64 bits of the 128-bit
/// integer vector operand at the index and of the length specified by
/// \a __y.
///
@@ -82,7 +82,7 @@ _mm_extract_si64(__m128i __x, __m128i __y)
return (__m128i)__builtin_ia32_extrq((__v2di)__x, (__v16qi)__y);
}
-/// \brief Inserts bits of a specified length from the source integer vector
+/// Inserts bits of a specified length from the source integer vector
/// \a y into the lower 64 bits of the destination integer vector \a x at
/// the index \a idx and of the length \a len.
///
@@ -120,7 +120,7 @@ _mm_extract_si64(__m128i __x, __m128i __y)
(__v2di)(__m128i)(y), \
(char)(len), (char)(idx)))
-/// \brief Inserts bits of a specified length from the source integer vector
+/// Inserts bits of a specified length from the source integer vector
/// \a __y into the lower 64 bits of the destination integer vector \a __x
/// at the index and of the length specified by \a __y.
///
@@ -152,7 +152,7 @@ _mm_insert_si64(__m128i __x, __m128i __y)
return (__m128i)__builtin_ia32_insertq((__v2di)__x, (__v2di)__y);
}
-/// \brief Stores a 64-bit double-precision value in a 64-bit memory location.
+/// Stores a 64-bit double-precision value in a 64-bit memory location.
/// To minimize caching, the data is flagged as non-temporal (unlikely to be
/// used again soon).
///
@@ -170,7 +170,7 @@ _mm_stream_sd(double *__p, __m128d __a)
__builtin_ia32_movntsd(__p, (__v2df)__a);
}
-/// \brief Stores a 32-bit single-precision floating-point value in a 32-bit
+/// Stores a 32-bit single-precision floating-point value in a 32-bit
/// memory location. To minimize caching, the data is flagged as
/// non-temporal (unlikely to be used again soon).
///
diff --git a/lib/Headers/avx2intrin.h b/lib/Headers/avx2intrin.h
index caf4ced92054..9688a96fde18 100644
--- a/lib/Headers/avx2intrin.h
+++ b/lib/Headers/avx2intrin.h
@@ -29,120 +29,121 @@
#define __AVX2INTRIN_H
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx2")))
+#define __DEFAULT_FN_ATTRS256 __attribute__((__always_inline__, __nodebug__, __target__("avx2"), __min_vector_width__(256)))
+#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("avx2"), __min_vector_width__(128)))
/* SSE4 Multiple Packed Sums of Absolute Difference. */
#define _mm256_mpsadbw_epu8(X, Y, M) \
(__m256i)__builtin_ia32_mpsadbw256((__v32qi)(__m256i)(X), \
(__v32qi)(__m256i)(Y), (int)(M))
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_abs_epi8(__m256i __a)
{
return (__m256i)__builtin_ia32_pabsb256((__v32qi)__a);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_abs_epi16(__m256i __a)
{
return (__m256i)__builtin_ia32_pabsw256((__v16hi)__a);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_abs_epi32(__m256i __a)
{
return (__m256i)__builtin_ia32_pabsd256((__v8si)__a);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_packs_epi16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_packsswb256((__v16hi)__a, (__v16hi)__b);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_packs_epi32(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_packssdw256((__v8si)__a, (__v8si)__b);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_packus_epi16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_packuswb256((__v16hi)__a, (__v16hi)__b);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_packus_epi32(__m256i __V1, __m256i __V2)
{
return (__m256i) __builtin_ia32_packusdw256((__v8si)__V1, (__v8si)__V2);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_add_epi8(__m256i __a, __m256i __b)
{
return (__m256i)((__v32qu)__a + (__v32qu)__b);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_add_epi16(__m256i __a, __m256i __b)
{
return (__m256i)((__v16hu)__a + (__v16hu)__b);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_add_epi32(__m256i __a, __m256i __b)
{
return (__m256i)((__v8su)__a + (__v8su)__b);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_add_epi64(__m256i __a, __m256i __b)
{
return (__m256i)((__v4du)__a + (__v4du)__b);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_adds_epi8(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_paddsb256((__v32qi)__a, (__v32qi)__b);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_adds_epi16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_paddsw256((__v16hi)__a, (__v16hi)__b);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_adds_epu8(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_paddusb256((__v32qi)__a, (__v32qi)__b);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_adds_epu16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_paddusw256((__v16hi)__a, (__v16hi)__b);
}
-#define _mm256_alignr_epi8(a, b, n) __extension__ ({ \
+#define _mm256_alignr_epi8(a, b, n) \
(__m256i)__builtin_ia32_palignr256((__v32qi)(__m256i)(a), \
- (__v32qi)(__m256i)(b), (n)); })
+ (__v32qi)(__m256i)(b), (n))
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_and_si256(__m256i __a, __m256i __b)
{
return (__m256i)((__v4du)__a & (__v4du)__b);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_andnot_si256(__m256i __a, __m256i __b)
{
return (__m256i)(~(__v4du)__a & (__v4du)__b);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_avg_epu8(__m256i __a, __m256i __b)
{
typedef unsigned short __v32hu __attribute__((__vector_size__(64)));
@@ -152,7 +153,7 @@ _mm256_avg_epu8(__m256i __a, __m256i __b)
>> 1, __v32qu);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_avg_epu16(__m256i __a, __m256i __b)
{
typedef unsigned int __v16su __attribute__((__vector_size__(64)));
@@ -162,58 +163,42 @@ _mm256_avg_epu16(__m256i __a, __m256i __b)
>> 1, __v16hu);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_blendv_epi8(__m256i __V1, __m256i __V2, __m256i __M)
{
return (__m256i)__builtin_ia32_pblendvb256((__v32qi)__V1, (__v32qi)__V2,
(__v32qi)__M);
}
-#define _mm256_blend_epi16(V1, V2, M) __extension__ ({ \
- (__m256i)__builtin_shufflevector((__v16hi)(__m256i)(V1), \
- (__v16hi)(__m256i)(V2), \
- (((M) & 0x01) ? 16 : 0), \
- (((M) & 0x02) ? 17 : 1), \
- (((M) & 0x04) ? 18 : 2), \
- (((M) & 0x08) ? 19 : 3), \
- (((M) & 0x10) ? 20 : 4), \
- (((M) & 0x20) ? 21 : 5), \
- (((M) & 0x40) ? 22 : 6), \
- (((M) & 0x80) ? 23 : 7), \
- (((M) & 0x01) ? 24 : 8), \
- (((M) & 0x02) ? 25 : 9), \
- (((M) & 0x04) ? 26 : 10), \
- (((M) & 0x08) ? 27 : 11), \
- (((M) & 0x10) ? 28 : 12), \
- (((M) & 0x20) ? 29 : 13), \
- (((M) & 0x40) ? 30 : 14), \
- (((M) & 0x80) ? 31 : 15)); })
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+#define _mm256_blend_epi16(V1, V2, M) \
+ (__m256i)__builtin_ia32_pblendw256((__v16hi)(__m256i)(V1), \
+ (__v16hi)(__m256i)(V2), (int)(M))
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cmpeq_epi8(__m256i __a, __m256i __b)
{
return (__m256i)((__v32qi)__a == (__v32qi)__b);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cmpeq_epi16(__m256i __a, __m256i __b)
{
return (__m256i)((__v16hi)__a == (__v16hi)__b);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cmpeq_epi32(__m256i __a, __m256i __b)
{
return (__m256i)((__v8si)__a == (__v8si)__b);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cmpeq_epi64(__m256i __a, __m256i __b)
{
return (__m256i)((__v4di)__a == (__v4di)__b);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cmpgt_epi8(__m256i __a, __m256i __b)
{
/* This function always performs a signed comparison, but __v32qi is a char
@@ -221,151 +206,151 @@ _mm256_cmpgt_epi8(__m256i __a, __m256i __b)
return (__m256i)((__v32qs)__a > (__v32qs)__b);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cmpgt_epi16(__m256i __a, __m256i __b)
{
return (__m256i)((__v16hi)__a > (__v16hi)__b);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cmpgt_epi32(__m256i __a, __m256i __b)
{
return (__m256i)((__v8si)__a > (__v8si)__b);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cmpgt_epi64(__m256i __a, __m256i __b)
{
return (__m256i)((__v4di)__a > (__v4di)__b);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_hadd_epi16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_phaddw256((__v16hi)__a, (__v16hi)__b);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_hadd_epi32(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_phaddd256((__v8si)__a, (__v8si)__b);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_hadds_epi16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_phaddsw256((__v16hi)__a, (__v16hi)__b);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_hsub_epi16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_phsubw256((__v16hi)__a, (__v16hi)__b);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_hsub_epi32(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_phsubd256((__v8si)__a, (__v8si)__b);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_hsubs_epi16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_phsubsw256((__v16hi)__a, (__v16hi)__b);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maddubs_epi16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_pmaddubsw256((__v32qi)__a, (__v32qi)__b);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_madd_epi16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_pmaddwd256((__v16hi)__a, (__v16hi)__b);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_max_epi8(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_pmaxsb256((__v32qi)__a, (__v32qi)__b);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_max_epi16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_pmaxsw256((__v16hi)__a, (__v16hi)__b);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_max_epi32(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_pmaxsd256((__v8si)__a, (__v8si)__b);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_max_epu8(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_pmaxub256((__v32qi)__a, (__v32qi)__b);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_max_epu16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_pmaxuw256((__v16hi)__a, (__v16hi)__b);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_max_epu32(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_pmaxud256((__v8si)__a, (__v8si)__b);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_min_epi8(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_pminsb256((__v32qi)__a, (__v32qi)__b);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_min_epi16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_pminsw256((__v16hi)__a, (__v16hi)__b);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_min_epi32(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_pminsd256((__v8si)__a, (__v8si)__b);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_min_epu8(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_pminub256((__v32qi)__a, (__v32qi)__b);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_min_epu16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_pminuw256 ((__v16hi)__a, (__v16hi)__b);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_min_epu32(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_pminud256((__v8si)__a, (__v8si)__b);
}
-static __inline__ int __DEFAULT_FN_ATTRS
+static __inline__ int __DEFAULT_FN_ATTRS256
_mm256_movemask_epi8(__m256i __a)
{
return __builtin_ia32_pmovmskb256((__v32qi)__a);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cvtepi8_epi16(__m128i __V)
{
/* This function always performs a signed extension, but __v16qi is a char
@@ -373,7 +358,7 @@ _mm256_cvtepi8_epi16(__m128i __V)
return (__m256i)__builtin_convertvector((__v16qs)__V, __v16hi);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cvtepi8_epi32(__m128i __V)
{
/* This function always performs a signed extension, but __v16qi is a char
@@ -381,7 +366,7 @@ _mm256_cvtepi8_epi32(__m128i __V)
return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3, 4, 5, 6, 7), __v8si);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cvtepi8_epi64(__m128i __V)
{
/* This function always performs a signed extension, but __v16qi is a char
@@ -389,920 +374,795 @@ _mm256_cvtepi8_epi64(__m128i __V)
return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3), __v4di);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cvtepi16_epi32(__m128i __V)
{
return (__m256i)__builtin_convertvector((__v8hi)__V, __v8si);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cvtepi16_epi64(__m128i __V)
{
return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v8hi)__V, (__v8hi)__V, 0, 1, 2, 3), __v4di);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cvtepi32_epi64(__m128i __V)
{
return (__m256i)__builtin_convertvector((__v4si)__V, __v4di);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cvtepu8_epi16(__m128i __V)
{
return (__m256i)__builtin_convertvector((__v16qu)__V, __v16hi);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cvtepu8_epi32(__m128i __V)
{
return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3, 4, 5, 6, 7), __v8si);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cvtepu8_epi64(__m128i __V)
{
return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3), __v4di);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cvtepu16_epi32(__m128i __V)
{
return (__m256i)__builtin_convertvector((__v8hu)__V, __v8si);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cvtepu16_epi64(__m128i __V)
{
return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v8hu)__V, (__v8hu)__V, 0, 1, 2, 3), __v4di);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cvtepu32_epi64(__m128i __V)
{
return (__m256i)__builtin_convertvector((__v4su)__V, __v4di);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mul_epi32(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_pmuldq256((__v8si)__a, (__v8si)__b);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mulhrs_epi16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_pmulhrsw256((__v16hi)__a, (__v16hi)__b);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mulhi_epu16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_pmulhuw256((__v16hi)__a, (__v16hi)__b);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mulhi_epi16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_pmulhw256((__v16hi)__a, (__v16hi)__b);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mullo_epi16(__m256i __a, __m256i __b)
{
return (__m256i)((__v16hu)__a * (__v16hu)__b);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mullo_epi32 (__m256i __a, __m256i __b)
{
return (__m256i)((__v8su)__a * (__v8su)__b);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mul_epu32(__m256i __a, __m256i __b)
{
return __builtin_ia32_pmuludq256((__v8si)__a, (__v8si)__b);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_or_si256(__m256i __a, __m256i __b)
{
return (__m256i)((__v4du)__a | (__v4du)__b);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_sad_epu8(__m256i __a, __m256i __b)
{
return __builtin_ia32_psadbw256((__v32qi)__a, (__v32qi)__b);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_shuffle_epi8(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_pshufb256((__v32qi)__a, (__v32qi)__b);
}
-#define _mm256_shuffle_epi32(a, imm) __extension__ ({ \
- (__m256i)__builtin_shufflevector((__v8si)(__m256i)(a), \
- (__v8si)_mm256_undefined_si256(), \
- 0 + (((imm) >> 0) & 0x3), \
- 0 + (((imm) >> 2) & 0x3), \
- 0 + (((imm) >> 4) & 0x3), \
- 0 + (((imm) >> 6) & 0x3), \
- 4 + (((imm) >> 0) & 0x3), \
- 4 + (((imm) >> 2) & 0x3), \
- 4 + (((imm) >> 4) & 0x3), \
- 4 + (((imm) >> 6) & 0x3)); })
-
-#define _mm256_shufflehi_epi16(a, imm) __extension__ ({ \
- (__m256i)__builtin_shufflevector((__v16hi)(__m256i)(a), \
- (__v16hi)_mm256_undefined_si256(), \
- 0, 1, 2, 3, \
- 4 + (((imm) >> 0) & 0x3), \
- 4 + (((imm) >> 2) & 0x3), \
- 4 + (((imm) >> 4) & 0x3), \
- 4 + (((imm) >> 6) & 0x3), \
- 8, 9, 10, 11, \
- 12 + (((imm) >> 0) & 0x3), \
- 12 + (((imm) >> 2) & 0x3), \
- 12 + (((imm) >> 4) & 0x3), \
- 12 + (((imm) >> 6) & 0x3)); })
-
-#define _mm256_shufflelo_epi16(a, imm) __extension__ ({ \
- (__m256i)__builtin_shufflevector((__v16hi)(__m256i)(a), \
- (__v16hi)_mm256_undefined_si256(), \
- 0 + (((imm) >> 0) & 0x3), \
- 0 + (((imm) >> 2) & 0x3), \
- 0 + (((imm) >> 4) & 0x3), \
- 0 + (((imm) >> 6) & 0x3), \
- 4, 5, 6, 7, \
- 8 + (((imm) >> 0) & 0x3), \
- 8 + (((imm) >> 2) & 0x3), \
- 8 + (((imm) >> 4) & 0x3), \
- 8 + (((imm) >> 6) & 0x3), \
- 12, 13, 14, 15); })
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+#define _mm256_shuffle_epi32(a, imm) \
+ (__m256i)__builtin_ia32_pshufd256((__v8si)(__m256i)(a), (int)(imm))
+
+#define _mm256_shufflehi_epi16(a, imm) \
+ (__m256i)__builtin_ia32_pshufhw256((__v16hi)(__m256i)(a), (int)(imm))
+
+#define _mm256_shufflelo_epi16(a, imm) \
+ (__m256i)__builtin_ia32_pshuflw256((__v16hi)(__m256i)(a), (int)(imm))
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_sign_epi8(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_psignb256((__v32qi)__a, (__v32qi)__b);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_sign_epi16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_psignw256((__v16hi)__a, (__v16hi)__b);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_sign_epi32(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_psignd256((__v8si)__a, (__v8si)__b);
}
-#define _mm256_slli_si256(a, imm) __extension__ ({ \
- (__m256i)__builtin_shufflevector( \
- (__v32qi)_mm256_setzero_si256(), \
- (__v32qi)(__m256i)(a), \
- ((char)(imm)&0xF0) ? 0 : ((char)(imm)>0x0 ? 16 : 32) - (char)(imm), \
- ((char)(imm)&0xF0) ? 1 : ((char)(imm)>0x1 ? 17 : 33) - (char)(imm), \
- ((char)(imm)&0xF0) ? 2 : ((char)(imm)>0x2 ? 18 : 34) - (char)(imm), \
- ((char)(imm)&0xF0) ? 3 : ((char)(imm)>0x3 ? 19 : 35) - (char)(imm), \
- ((char)(imm)&0xF0) ? 4 : ((char)(imm)>0x4 ? 20 : 36) - (char)(imm), \
- ((char)(imm)&0xF0) ? 5 : ((char)(imm)>0x5 ? 21 : 37) - (char)(imm), \
- ((char)(imm)&0xF0) ? 6 : ((char)(imm)>0x6 ? 22 : 38) - (char)(imm), \
- ((char)(imm)&0xF0) ? 7 : ((char)(imm)>0x7 ? 23 : 39) - (char)(imm), \
- ((char)(imm)&0xF0) ? 8 : ((char)(imm)>0x8 ? 24 : 40) - (char)(imm), \
- ((char)(imm)&0xF0) ? 9 : ((char)(imm)>0x9 ? 25 : 41) - (char)(imm), \
- ((char)(imm)&0xF0) ? 10 : ((char)(imm)>0xA ? 26 : 42) - (char)(imm), \
- ((char)(imm)&0xF0) ? 11 : ((char)(imm)>0xB ? 27 : 43) - (char)(imm), \
- ((char)(imm)&0xF0) ? 12 : ((char)(imm)>0xC ? 28 : 44) - (char)(imm), \
- ((char)(imm)&0xF0) ? 13 : ((char)(imm)>0xD ? 29 : 45) - (char)(imm), \
- ((char)(imm)&0xF0) ? 14 : ((char)(imm)>0xE ? 30 : 46) - (char)(imm), \
- ((char)(imm)&0xF0) ? 15 : ((char)(imm)>0xF ? 31 : 47) - (char)(imm), \
- ((char)(imm)&0xF0) ? 16 : ((char)(imm)>0x0 ? 32 : 48) - (char)(imm), \
- ((char)(imm)&0xF0) ? 17 : ((char)(imm)>0x1 ? 33 : 49) - (char)(imm), \
- ((char)(imm)&0xF0) ? 18 : ((char)(imm)>0x2 ? 34 : 50) - (char)(imm), \
- ((char)(imm)&0xF0) ? 19 : ((char)(imm)>0x3 ? 35 : 51) - (char)(imm), \
- ((char)(imm)&0xF0) ? 20 : ((char)(imm)>0x4 ? 36 : 52) - (char)(imm), \
- ((char)(imm)&0xF0) ? 21 : ((char)(imm)>0x5 ? 37 : 53) - (char)(imm), \
- ((char)(imm)&0xF0) ? 22 : ((char)(imm)>0x6 ? 38 : 54) - (char)(imm), \
- ((char)(imm)&0xF0) ? 23 : ((char)(imm)>0x7 ? 39 : 55) - (char)(imm), \
- ((char)(imm)&0xF0) ? 24 : ((char)(imm)>0x8 ? 40 : 56) - (char)(imm), \
- ((char)(imm)&0xF0) ? 25 : ((char)(imm)>0x9 ? 41 : 57) - (char)(imm), \
- ((char)(imm)&0xF0) ? 26 : ((char)(imm)>0xA ? 42 : 58) - (char)(imm), \
- ((char)(imm)&0xF0) ? 27 : ((char)(imm)>0xB ? 43 : 59) - (char)(imm), \
- ((char)(imm)&0xF0) ? 28 : ((char)(imm)>0xC ? 44 : 60) - (char)(imm), \
- ((char)(imm)&0xF0) ? 29 : ((char)(imm)>0xD ? 45 : 61) - (char)(imm), \
- ((char)(imm)&0xF0) ? 30 : ((char)(imm)>0xE ? 46 : 62) - (char)(imm), \
- ((char)(imm)&0xF0) ? 31 : ((char)(imm)>0xF ? 47 : 63) - (char)(imm)); })
-
-#define _mm256_bslli_epi128(a, count) _mm256_slli_si256((a), (count))
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+#define _mm256_slli_si256(a, imm) \
+ (__m256i)__builtin_ia32_pslldqi256_byteshift((__v4di)(__m256i)(a), (int)(imm))
+
+#define _mm256_bslli_epi128(a, imm) \
+ (__m256i)__builtin_ia32_pslldqi256_byteshift((__v4di)(__m256i)(a), (int)(imm))
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_slli_epi16(__m256i __a, int __count)
{
return (__m256i)__builtin_ia32_psllwi256((__v16hi)__a, __count);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_sll_epi16(__m256i __a, __m128i __count)
{
return (__m256i)__builtin_ia32_psllw256((__v16hi)__a, (__v8hi)__count);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_slli_epi32(__m256i __a, int __count)
{
return (__m256i)__builtin_ia32_pslldi256((__v8si)__a, __count);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_sll_epi32(__m256i __a, __m128i __count)
{
return (__m256i)__builtin_ia32_pslld256((__v8si)__a, (__v4si)__count);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_slli_epi64(__m256i __a, int __count)
{
return __builtin_ia32_psllqi256((__v4di)__a, __count);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_sll_epi64(__m256i __a, __m128i __count)
{
return __builtin_ia32_psllq256((__v4di)__a, __count);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_srai_epi16(__m256i __a, int __count)
{
return (__m256i)__builtin_ia32_psrawi256((__v16hi)__a, __count);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_sra_epi16(__m256i __a, __m128i __count)
{
return (__m256i)__builtin_ia32_psraw256((__v16hi)__a, (__v8hi)__count);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_srai_epi32(__m256i __a, int __count)
{
return (__m256i)__builtin_ia32_psradi256((__v8si)__a, __count);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_sra_epi32(__m256i __a, __m128i __count)
{
return (__m256i)__builtin_ia32_psrad256((__v8si)__a, (__v4si)__count);
}
-#define _mm256_srli_si256(a, imm) __extension__ ({ \
- (__m256i)__builtin_shufflevector( \
- (__v32qi)(__m256i)(a), \
- (__v32qi)_mm256_setzero_si256(), \
- ((char)(imm)&0xF0) ? 32 : (char)(imm) + ((char)(imm)>0xF ? 16 : 0), \
- ((char)(imm)&0xF0) ? 33 : (char)(imm) + ((char)(imm)>0xE ? 17 : 1), \
- ((char)(imm)&0xF0) ? 34 : (char)(imm) + ((char)(imm)>0xD ? 18 : 2), \
- ((char)(imm)&0xF0) ? 35 : (char)(imm) + ((char)(imm)>0xC ? 19 : 3), \
- ((char)(imm)&0xF0) ? 36 : (char)(imm) + ((char)(imm)>0xB ? 20 : 4), \
- ((char)(imm)&0xF0) ? 37 : (char)(imm) + ((char)(imm)>0xA ? 21 : 5), \
- ((char)(imm)&0xF0) ? 38 : (char)(imm) + ((char)(imm)>0x9 ? 22 : 6), \
- ((char)(imm)&0xF0) ? 39 : (char)(imm) + ((char)(imm)>0x8 ? 23 : 7), \
- ((char)(imm)&0xF0) ? 40 : (char)(imm) + ((char)(imm)>0x7 ? 24 : 8), \
- ((char)(imm)&0xF0) ? 41 : (char)(imm) + ((char)(imm)>0x6 ? 25 : 9), \
- ((char)(imm)&0xF0) ? 42 : (char)(imm) + ((char)(imm)>0x5 ? 26 : 10), \
- ((char)(imm)&0xF0) ? 43 : (char)(imm) + ((char)(imm)>0x4 ? 27 : 11), \
- ((char)(imm)&0xF0) ? 44 : (char)(imm) + ((char)(imm)>0x3 ? 28 : 12), \
- ((char)(imm)&0xF0) ? 45 : (char)(imm) + ((char)(imm)>0x2 ? 29 : 13), \
- ((char)(imm)&0xF0) ? 46 : (char)(imm) + ((char)(imm)>0x1 ? 30 : 14), \
- ((char)(imm)&0xF0) ? 47 : (char)(imm) + ((char)(imm)>0x0 ? 31 : 15), \
- ((char)(imm)&0xF0) ? 48 : (char)(imm) + ((char)(imm)>0xF ? 32 : 16), \
- ((char)(imm)&0xF0) ? 49 : (char)(imm) + ((char)(imm)>0xE ? 33 : 17), \
- ((char)(imm)&0xF0) ? 50 : (char)(imm) + ((char)(imm)>0xD ? 34 : 18), \
- ((char)(imm)&0xF0) ? 51 : (char)(imm) + ((char)(imm)>0xC ? 35 : 19), \
- ((char)(imm)&0xF0) ? 52 : (char)(imm) + ((char)(imm)>0xB ? 36 : 20), \
- ((char)(imm)&0xF0) ? 53 : (char)(imm) + ((char)(imm)>0xA ? 37 : 21), \
- ((char)(imm)&0xF0) ? 54 : (char)(imm) + ((char)(imm)>0x9 ? 38 : 22), \
- ((char)(imm)&0xF0) ? 55 : (char)(imm) + ((char)(imm)>0x8 ? 39 : 23), \
- ((char)(imm)&0xF0) ? 56 : (char)(imm) + ((char)(imm)>0x7 ? 40 : 24), \
- ((char)(imm)&0xF0) ? 57 : (char)(imm) + ((char)(imm)>0x6 ? 41 : 25), \
- ((char)(imm)&0xF0) ? 58 : (char)(imm) + ((char)(imm)>0x5 ? 42 : 26), \
- ((char)(imm)&0xF0) ? 59 : (char)(imm) + ((char)(imm)>0x4 ? 43 : 27), \
- ((char)(imm)&0xF0) ? 60 : (char)(imm) + ((char)(imm)>0x3 ? 44 : 28), \
- ((char)(imm)&0xF0) ? 61 : (char)(imm) + ((char)(imm)>0x2 ? 45 : 29), \
- ((char)(imm)&0xF0) ? 62 : (char)(imm) + ((char)(imm)>0x1 ? 46 : 30), \
- ((char)(imm)&0xF0) ? 63 : (char)(imm) + ((char)(imm)>0x0 ? 47 : 31)); })
-
-#define _mm256_bsrli_epi128(a, count) _mm256_srli_si256((a), (count))
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+#define _mm256_srli_si256(a, imm) \
+ (__m256i)__builtin_ia32_psrldqi256_byteshift((__m256i)(a), (int)(imm))
+
+#define _mm256_bsrli_epi128(a, imm) \
+ (__m256i)__builtin_ia32_psrldqi256_byteshift((__m256i)(a), (int)(imm))
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_srli_epi16(__m256i __a, int __count)
{
return (__m256i)__builtin_ia32_psrlwi256((__v16hi)__a, __count);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_srl_epi16(__m256i __a, __m128i __count)
{
return (__m256i)__builtin_ia32_psrlw256((__v16hi)__a, (__v8hi)__count);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_srli_epi32(__m256i __a, int __count)
{
return (__m256i)__builtin_ia32_psrldi256((__v8si)__a, __count);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_srl_epi32(__m256i __a, __m128i __count)
{
return (__m256i)__builtin_ia32_psrld256((__v8si)__a, (__v4si)__count);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_srli_epi64(__m256i __a, int __count)
{
return __builtin_ia32_psrlqi256((__v4di)__a, __count);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_srl_epi64(__m256i __a, __m128i __count)
{
return __builtin_ia32_psrlq256((__v4di)__a, __count);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_sub_epi8(__m256i __a, __m256i __b)
{
return (__m256i)((__v32qu)__a - (__v32qu)__b);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_sub_epi16(__m256i __a, __m256i __b)
{
return (__m256i)((__v16hu)__a - (__v16hu)__b);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_sub_epi32(__m256i __a, __m256i __b)
{
return (__m256i)((__v8su)__a - (__v8su)__b);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_sub_epi64(__m256i __a, __m256i __b)
{
return (__m256i)((__v4du)__a - (__v4du)__b);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_subs_epi8(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_psubsb256((__v32qi)__a, (__v32qi)__b);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_subs_epi16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_psubsw256((__v16hi)__a, (__v16hi)__b);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_subs_epu8(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_psubusb256((__v32qi)__a, (__v32qi)__b);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_subs_epu16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_psubusw256((__v16hi)__a, (__v16hi)__b);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_unpackhi_epi8(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_shufflevector((__v32qi)__a, (__v32qi)__b, 8, 32+8, 9, 32+9, 10, 32+10, 11, 32+11, 12, 32+12, 13, 32+13, 14, 32+14, 15, 32+15, 24, 32+24, 25, 32+25, 26, 32+26, 27, 32+27, 28, 32+28, 29, 32+29, 30, 32+30, 31, 32+31);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_unpackhi_epi16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_shufflevector((__v16hi)__a, (__v16hi)__b, 4, 16+4, 5, 16+5, 6, 16+6, 7, 16+7, 12, 16+12, 13, 16+13, 14, 16+14, 15, 16+15);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_unpackhi_epi32(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_shufflevector((__v8si)__a, (__v8si)__b, 2, 8+2, 3, 8+3, 6, 8+6, 7, 8+7);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_unpackhi_epi64(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_shufflevector((__v4di)__a, (__v4di)__b, 1, 4+1, 3, 4+3);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_unpacklo_epi8(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_shufflevector((__v32qi)__a, (__v32qi)__b, 0, 32+0, 1, 32+1, 2, 32+2, 3, 32+3, 4, 32+4, 5, 32+5, 6, 32+6, 7, 32+7, 16, 32+16, 17, 32+17, 18, 32+18, 19, 32+19, 20, 32+20, 21, 32+21, 22, 32+22, 23, 32+23);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_unpacklo_epi16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_shufflevector((__v16hi)__a, (__v16hi)__b, 0, 16+0, 1, 16+1, 2, 16+2, 3, 16+3, 8, 16+8, 9, 16+9, 10, 16+10, 11, 16+11);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_unpacklo_epi32(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_shufflevector((__v8si)__a, (__v8si)__b, 0, 8+0, 1, 8+1, 4, 8+4, 5, 8+5);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_unpacklo_epi64(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_shufflevector((__v4di)__a, (__v4di)__b, 0, 4+0, 2, 4+2);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_xor_si256(__m256i __a, __m256i __b)
{
return (__m256i)((__v4du)__a ^ (__v4du)__b);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_stream_load_si256(__m256i const *__V)
{
typedef __v4di __v4di_aligned __attribute__((aligned(32)));
return (__m256i)__builtin_nontemporal_load((const __v4di_aligned *)__V);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_broadcastss_ps(__m128 __X)
{
return (__m128)__builtin_shufflevector((__v4sf)__X, (__v4sf)__X, 0, 0, 0, 0);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_broadcastsd_pd(__m128d __a)
{
return __builtin_shufflevector((__v2df)__a, (__v2df)__a, 0, 0);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_broadcastss_ps(__m128 __X)
{
return (__m256)__builtin_shufflevector((__v4sf)__X, (__v4sf)__X, 0, 0, 0, 0, 0, 0, 0, 0);
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_broadcastsd_pd(__m128d __X)
{
return (__m256d)__builtin_shufflevector((__v2df)__X, (__v2df)__X, 0, 0, 0, 0);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_broadcastsi128_si256(__m128i __X)
{
return (__m256i)__builtin_shufflevector((__v2di)__X, (__v2di)__X, 0, 1, 0, 1);
}
-#define _mm_blend_epi32(V1, V2, M) __extension__ ({ \
- (__m128i)__builtin_shufflevector((__v4si)(__m128i)(V1), \
- (__v4si)(__m128i)(V2), \
- (((M) & 0x01) ? 4 : 0), \
- (((M) & 0x02) ? 5 : 1), \
- (((M) & 0x04) ? 6 : 2), \
- (((M) & 0x08) ? 7 : 3)); })
-
-#define _mm256_blend_epi32(V1, V2, M) __extension__ ({ \
- (__m256i)__builtin_shufflevector((__v8si)(__m256i)(V1), \
- (__v8si)(__m256i)(V2), \
- (((M) & 0x01) ? 8 : 0), \
- (((M) & 0x02) ? 9 : 1), \
- (((M) & 0x04) ? 10 : 2), \
- (((M) & 0x08) ? 11 : 3), \
- (((M) & 0x10) ? 12 : 4), \
- (((M) & 0x20) ? 13 : 5), \
- (((M) & 0x40) ? 14 : 6), \
- (((M) & 0x80) ? 15 : 7)); })
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+#define _mm_blend_epi32(V1, V2, M) \
+ (__m128i)__builtin_ia32_pblendd128((__v4si)(__m128i)(V1), \
+ (__v4si)(__m128i)(V2), (int)(M))
+
+#define _mm256_blend_epi32(V1, V2, M) \
+ (__m256i)__builtin_ia32_pblendd256((__v8si)(__m256i)(V1), \
+ (__v8si)(__m256i)(V2), (int)(M))
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_broadcastb_epi8(__m128i __X)
{
return (__m256i)__builtin_shufflevector((__v16qi)__X, (__v16qi)__X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_broadcastw_epi16(__m128i __X)
{
return (__m256i)__builtin_shufflevector((__v8hi)__X, (__v8hi)__X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_broadcastd_epi32(__m128i __X)
{
return (__m256i)__builtin_shufflevector((__v4si)__X, (__v4si)__X, 0, 0, 0, 0, 0, 0, 0, 0);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_broadcastq_epi64(__m128i __X)
{
return (__m256i)__builtin_shufflevector((__v2di)__X, (__v2di)__X, 0, 0, 0, 0);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_broadcastb_epi8(__m128i __X)
{
return (__m128i)__builtin_shufflevector((__v16qi)__X, (__v16qi)__X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_broadcastw_epi16(__m128i __X)
{
return (__m128i)__builtin_shufflevector((__v8hi)__X, (__v8hi)__X, 0, 0, 0, 0, 0, 0, 0, 0);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_broadcastd_epi32(__m128i __X)
{
return (__m128i)__builtin_shufflevector((__v4si)__X, (__v4si)__X, 0, 0, 0, 0);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_broadcastq_epi64(__m128i __X)
{
return (__m128i)__builtin_shufflevector((__v2di)__X, (__v2di)__X, 0, 0);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_permutevar8x32_epi32(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_permvarsi256((__v8si)__a, (__v8si)__b);
}
-#define _mm256_permute4x64_pd(V, M) __extension__ ({ \
- (__m256d)__builtin_shufflevector((__v4df)(__m256d)(V), \
- (__v4df)_mm256_undefined_pd(), \
- ((M) >> 0) & 0x3, \
- ((M) >> 2) & 0x3, \
- ((M) >> 4) & 0x3, \
- ((M) >> 6) & 0x3); })
+#define _mm256_permute4x64_pd(V, M) \
+ (__m256d)__builtin_ia32_permdf256((__v4df)(__m256d)(V), (int)(M))
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_permutevar8x32_ps(__m256 __a, __m256i __b)
{
return (__m256)__builtin_ia32_permvarsf256((__v8sf)__a, (__v8si)__b);
}
-#define _mm256_permute4x64_epi64(V, M) __extension__ ({ \
- (__m256i)__builtin_shufflevector((__v4di)(__m256i)(V), \
- (__v4di)_mm256_undefined_si256(), \
- ((M) >> 0) & 0x3, \
- ((M) >> 2) & 0x3, \
- ((M) >> 4) & 0x3, \
- ((M) >> 6) & 0x3); })
-
-#define _mm256_permute2x128_si256(V1, V2, M) __extension__ ({ \
- (__m256i)__builtin_ia32_permti256((__m256i)(V1), (__m256i)(V2), (M)); })
-
-#define _mm256_extracti128_si256(V, M) __extension__ ({ \
- (__m128i)__builtin_shufflevector((__v4di)(__m256i)(V), \
- (__v4di)_mm256_undefined_si256(), \
- (((M) & 1) ? 2 : 0), \
- (((M) & 1) ? 3 : 1) ); })
-
-#define _mm256_inserti128_si256(V1, V2, M) __extension__ ({ \
- (__m256i)__builtin_shufflevector((__v4di)(__m256i)(V1), \
- (__v4di)_mm256_castsi128_si256((__m128i)(V2)), \
- (((M) & 1) ? 0 : 4), \
- (((M) & 1) ? 1 : 5), \
- (((M) & 1) ? 4 : 2), \
- (((M) & 1) ? 5 : 3) ); })
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+#define _mm256_permute4x64_epi64(V, M) \
+ (__m256i)__builtin_ia32_permdi256((__v4di)(__m256i)(V), (int)(M))
+
+#define _mm256_permute2x128_si256(V1, V2, M) \
+ (__m256i)__builtin_ia32_permti256((__m256i)(V1), (__m256i)(V2), (int)(M))
+
+#define _mm256_extracti128_si256(V, M) \
+ (__m128i)__builtin_ia32_extract128i256((__v4di)(__m256i)(V), (int)(M))
+
+#define _mm256_inserti128_si256(V1, V2, M) \
+ (__m256i)__builtin_ia32_insert128i256((__v4di)(__m256i)(V1), \
+ (__v2di)(__m128i)(V2), (int)(M))
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskload_epi32(int const *__X, __m256i __M)
{
return (__m256i)__builtin_ia32_maskloadd256((const __v8si *)__X, (__v8si)__M);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskload_epi64(long long const *__X, __m256i __M)
{
return (__m256i)__builtin_ia32_maskloadq256((const __v4di *)__X, (__v4di)__M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskload_epi32(int const *__X, __m128i __M)
{
return (__m128i)__builtin_ia32_maskloadd((const __v4si *)__X, (__v4si)__M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskload_epi64(long long const *__X, __m128i __M)
{
return (__m128i)__builtin_ia32_maskloadq((const __v2di *)__X, (__v2di)__M);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS256
_mm256_maskstore_epi32(int *__X, __m256i __M, __m256i __Y)
{
__builtin_ia32_maskstored256((__v8si *)__X, (__v8si)__M, (__v8si)__Y);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS256
_mm256_maskstore_epi64(long long *__X, __m256i __M, __m256i __Y)
{
__builtin_ia32_maskstoreq256((__v4di *)__X, (__v4di)__M, (__v4di)__Y);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS128
_mm_maskstore_epi32(int *__X, __m128i __M, __m128i __Y)
{
__builtin_ia32_maskstored((__v4si *)__X, (__v4si)__M, (__v4si)__Y);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS128
_mm_maskstore_epi64(long long *__X, __m128i __M, __m128i __Y)
{
__builtin_ia32_maskstoreq(( __v2di *)__X, (__v2di)__M, (__v2di)__Y);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_sllv_epi32(__m256i __X, __m256i __Y)
{
return (__m256i)__builtin_ia32_psllv8si((__v8si)__X, (__v8si)__Y);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_sllv_epi32(__m128i __X, __m128i __Y)
{
return (__m128i)__builtin_ia32_psllv4si((__v4si)__X, (__v4si)__Y);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_sllv_epi64(__m256i __X, __m256i __Y)
{
return (__m256i)__builtin_ia32_psllv4di((__v4di)__X, (__v4di)__Y);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_sllv_epi64(__m128i __X, __m128i __Y)
{
return (__m128i)__builtin_ia32_psllv2di((__v2di)__X, (__v2di)__Y);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_srav_epi32(__m256i __X, __m256i __Y)
{
return (__m256i)__builtin_ia32_psrav8si((__v8si)__X, (__v8si)__Y);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_srav_epi32(__m128i __X, __m128i __Y)
{
return (__m128i)__builtin_ia32_psrav4si((__v4si)__X, (__v4si)__Y);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_srlv_epi32(__m256i __X, __m256i __Y)
{
return (__m256i)__builtin_ia32_psrlv8si((__v8si)__X, (__v8si)__Y);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_srlv_epi32(__m128i __X, __m128i __Y)
{
return (__m128i)__builtin_ia32_psrlv4si((__v4si)__X, (__v4si)__Y);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_srlv_epi64(__m256i __X, __m256i __Y)
{
return (__m256i)__builtin_ia32_psrlv4di((__v4di)__X, (__v4di)__Y);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_srlv_epi64(__m128i __X, __m128i __Y)
{
return (__m128i)__builtin_ia32_psrlv2di((__v2di)__X, (__v2di)__Y);
}
-#define _mm_mask_i32gather_pd(a, m, i, mask, s) __extension__ ({ \
+#define _mm_mask_i32gather_pd(a, m, i, mask, s) \
(__m128d)__builtin_ia32_gatherd_pd((__v2df)(__m128i)(a), \
(double const *)(m), \
(__v4si)(__m128i)(i), \
- (__v2df)(__m128d)(mask), (s)); })
+ (__v2df)(__m128d)(mask), (s))
-#define _mm256_mask_i32gather_pd(a, m, i, mask, s) __extension__ ({ \
+#define _mm256_mask_i32gather_pd(a, m, i, mask, s) \
(__m256d)__builtin_ia32_gatherd_pd256((__v4df)(__m256d)(a), \
(double const *)(m), \
(__v4si)(__m128i)(i), \
- (__v4df)(__m256d)(mask), (s)); })
+ (__v4df)(__m256d)(mask), (s))
-#define _mm_mask_i64gather_pd(a, m, i, mask, s) __extension__ ({ \
+#define _mm_mask_i64gather_pd(a, m, i, mask, s) \
(__m128d)__builtin_ia32_gatherq_pd((__v2df)(__m128d)(a), \
(double const *)(m), \
(__v2di)(__m128i)(i), \
- (__v2df)(__m128d)(mask), (s)); })
+ (__v2df)(__m128d)(mask), (s))
-#define _mm256_mask_i64gather_pd(a, m, i, mask, s) __extension__ ({ \
+#define _mm256_mask_i64gather_pd(a, m, i, mask, s) \
(__m256d)__builtin_ia32_gatherq_pd256((__v4df)(__m256d)(a), \
(double const *)(m), \
(__v4di)(__m256i)(i), \
- (__v4df)(__m256d)(mask), (s)); })
+ (__v4df)(__m256d)(mask), (s))
-#define _mm_mask_i32gather_ps(a, m, i, mask, s) __extension__ ({ \
+#define _mm_mask_i32gather_ps(a, m, i, mask, s) \
(__m128)__builtin_ia32_gatherd_ps((__v4sf)(__m128)(a), \
(float const *)(m), \
(__v4si)(__m128i)(i), \
- (__v4sf)(__m128)(mask), (s)); })
+ (__v4sf)(__m128)(mask), (s))
-#define _mm256_mask_i32gather_ps(a, m, i, mask, s) __extension__ ({ \
+#define _mm256_mask_i32gather_ps(a, m, i, mask, s) \
(__m256)__builtin_ia32_gatherd_ps256((__v8sf)(__m256)(a), \
(float const *)(m), \
(__v8si)(__m256i)(i), \
- (__v8sf)(__m256)(mask), (s)); })
+ (__v8sf)(__m256)(mask), (s))
-#define _mm_mask_i64gather_ps(a, m, i, mask, s) __extension__ ({ \
+#define _mm_mask_i64gather_ps(a, m, i, mask, s) \
(__m128)__builtin_ia32_gatherq_ps((__v4sf)(__m128)(a), \
(float const *)(m), \
(__v2di)(__m128i)(i), \
- (__v4sf)(__m128)(mask), (s)); })
+ (__v4sf)(__m128)(mask), (s))
-#define _mm256_mask_i64gather_ps(a, m, i, mask, s) __extension__ ({ \
+#define _mm256_mask_i64gather_ps(a, m, i, mask, s) \
(__m128)__builtin_ia32_gatherq_ps256((__v4sf)(__m128)(a), \
(float const *)(m), \
(__v4di)(__m256i)(i), \
- (__v4sf)(__m128)(mask), (s)); })
+ (__v4sf)(__m128)(mask), (s))
-#define _mm_mask_i32gather_epi32(a, m, i, mask, s) __extension__ ({ \
+#define _mm_mask_i32gather_epi32(a, m, i, mask, s) \
(__m128i)__builtin_ia32_gatherd_d((__v4si)(__m128i)(a), \
(int const *)(m), \
(__v4si)(__m128i)(i), \
- (__v4si)(__m128i)(mask), (s)); })
+ (__v4si)(__m128i)(mask), (s))
-#define _mm256_mask_i32gather_epi32(a, m, i, mask, s) __extension__ ({ \
+#define _mm256_mask_i32gather_epi32(a, m, i, mask, s) \
(__m256i)__builtin_ia32_gatherd_d256((__v8si)(__m256i)(a), \
(int const *)(m), \
(__v8si)(__m256i)(i), \
- (__v8si)(__m256i)(mask), (s)); })
+ (__v8si)(__m256i)(mask), (s))
-#define _mm_mask_i64gather_epi32(a, m, i, mask, s) __extension__ ({ \
+#define _mm_mask_i64gather_epi32(a, m, i, mask, s) \
(__m128i)__builtin_ia32_gatherq_d((__v4si)(__m128i)(a), \
(int const *)(m), \
(__v2di)(__m128i)(i), \
- (__v4si)(__m128i)(mask), (s)); })
+ (__v4si)(__m128i)(mask), (s))
-#define _mm256_mask_i64gather_epi32(a, m, i, mask, s) __extension__ ({ \
+#define _mm256_mask_i64gather_epi32(a, m, i, mask, s) \
(__m128i)__builtin_ia32_gatherq_d256((__v4si)(__m128i)(a), \
(int const *)(m), \
(__v4di)(__m256i)(i), \
- (__v4si)(__m128i)(mask), (s)); })
+ (__v4si)(__m128i)(mask), (s))
-#define _mm_mask_i32gather_epi64(a, m, i, mask, s) __extension__ ({ \
+#define _mm_mask_i32gather_epi64(a, m, i, mask, s) \
(__m128i)__builtin_ia32_gatherd_q((__v2di)(__m128i)(a), \
(long long const *)(m), \
(__v4si)(__m128i)(i), \
- (__v2di)(__m128i)(mask), (s)); })
+ (__v2di)(__m128i)(mask), (s))
-#define _mm256_mask_i32gather_epi64(a, m, i, mask, s) __extension__ ({ \
+#define _mm256_mask_i32gather_epi64(a, m, i, mask, s) \
(__m256i)__builtin_ia32_gatherd_q256((__v4di)(__m256i)(a), \
(long long const *)(m), \
(__v4si)(__m128i)(i), \
- (__v4di)(__m256i)(mask), (s)); })
+ (__v4di)(__m256i)(mask), (s))
-#define _mm_mask_i64gather_epi64(a, m, i, mask, s) __extension__ ({ \
+#define _mm_mask_i64gather_epi64(a, m, i, mask, s) \
(__m128i)__builtin_ia32_gatherq_q((__v2di)(__m128i)(a), \
(long long const *)(m), \
(__v2di)(__m128i)(i), \
- (__v2di)(__m128i)(mask), (s)); })
+ (__v2di)(__m128i)(mask), (s))
-#define _mm256_mask_i64gather_epi64(a, m, i, mask, s) __extension__ ({ \
+#define _mm256_mask_i64gather_epi64(a, m, i, mask, s) \
(__m256i)__builtin_ia32_gatherq_q256((__v4di)(__m256i)(a), \
(long long const *)(m), \
(__v4di)(__m256i)(i), \
- (__v4di)(__m256i)(mask), (s)); })
+ (__v4di)(__m256i)(mask), (s))
-#define _mm_i32gather_pd(m, i, s) __extension__ ({ \
+#define _mm_i32gather_pd(m, i, s) \
(__m128d)__builtin_ia32_gatherd_pd((__v2df)_mm_undefined_pd(), \
(double const *)(m), \
(__v4si)(__m128i)(i), \
(__v2df)_mm_cmpeq_pd(_mm_setzero_pd(), \
_mm_setzero_pd()), \
- (s)); })
+ (s))
-#define _mm256_i32gather_pd(m, i, s) __extension__ ({ \
+#define _mm256_i32gather_pd(m, i, s) \
(__m256d)__builtin_ia32_gatherd_pd256((__v4df)_mm256_undefined_pd(), \
(double const *)(m), \
(__v4si)(__m128i)(i), \
(__v4df)_mm256_cmp_pd(_mm256_setzero_pd(), \
_mm256_setzero_pd(), \
_CMP_EQ_OQ), \
- (s)); })
+ (s))
-#define _mm_i64gather_pd(m, i, s) __extension__ ({ \
+#define _mm_i64gather_pd(m, i, s) \
(__m128d)__builtin_ia32_gatherq_pd((__v2df)_mm_undefined_pd(), \
(double const *)(m), \
(__v2di)(__m128i)(i), \
(__v2df)_mm_cmpeq_pd(_mm_setzero_pd(), \
_mm_setzero_pd()), \
- (s)); })
+ (s))
-#define _mm256_i64gather_pd(m, i, s) __extension__ ({ \
+#define _mm256_i64gather_pd(m, i, s) \
(__m256d)__builtin_ia32_gatherq_pd256((__v4df)_mm256_undefined_pd(), \
(double const *)(m), \
(__v4di)(__m256i)(i), \
(__v4df)_mm256_cmp_pd(_mm256_setzero_pd(), \
_mm256_setzero_pd(), \
_CMP_EQ_OQ), \
- (s)); })
+ (s))
-#define _mm_i32gather_ps(m, i, s) __extension__ ({ \
+#define _mm_i32gather_ps(m, i, s) \
(__m128)__builtin_ia32_gatherd_ps((__v4sf)_mm_undefined_ps(), \
(float const *)(m), \
(__v4si)(__m128i)(i), \
(__v4sf)_mm_cmpeq_ps(_mm_setzero_ps(), \
_mm_setzero_ps()), \
- (s)); })
+ (s))
-#define _mm256_i32gather_ps(m, i, s) __extension__ ({ \
+#define _mm256_i32gather_ps(m, i, s) \
(__m256)__builtin_ia32_gatherd_ps256((__v8sf)_mm256_undefined_ps(), \
(float const *)(m), \
(__v8si)(__m256i)(i), \
(__v8sf)_mm256_cmp_ps(_mm256_setzero_ps(), \
_mm256_setzero_ps(), \
_CMP_EQ_OQ), \
- (s)); })
+ (s))
-#define _mm_i64gather_ps(m, i, s) __extension__ ({ \
+#define _mm_i64gather_ps(m, i, s) \
(__m128)__builtin_ia32_gatherq_ps((__v4sf)_mm_undefined_ps(), \
(float const *)(m), \
(__v2di)(__m128i)(i), \
(__v4sf)_mm_cmpeq_ps(_mm_setzero_ps(), \
_mm_setzero_ps()), \
- (s)); })
+ (s))
-#define _mm256_i64gather_ps(m, i, s) __extension__ ({ \
+#define _mm256_i64gather_ps(m, i, s) \
(__m128)__builtin_ia32_gatherq_ps256((__v4sf)_mm_undefined_ps(), \
(float const *)(m), \
(__v4di)(__m256i)(i), \
(__v4sf)_mm_cmpeq_ps(_mm_setzero_ps(), \
_mm_setzero_ps()), \
- (s)); })
+ (s))
-#define _mm_i32gather_epi32(m, i, s) __extension__ ({ \
+#define _mm_i32gather_epi32(m, i, s) \
(__m128i)__builtin_ia32_gatherd_d((__v4si)_mm_undefined_si128(), \
(int const *)(m), (__v4si)(__m128i)(i), \
- (__v4si)_mm_set1_epi32(-1), (s)); })
+ (__v4si)_mm_set1_epi32(-1), (s))
-#define _mm256_i32gather_epi32(m, i, s) __extension__ ({ \
+#define _mm256_i32gather_epi32(m, i, s) \
(__m256i)__builtin_ia32_gatherd_d256((__v8si)_mm256_undefined_si256(), \
(int const *)(m), (__v8si)(__m256i)(i), \
- (__v8si)_mm256_set1_epi32(-1), (s)); })
+ (__v8si)_mm256_set1_epi32(-1), (s))
-#define _mm_i64gather_epi32(m, i, s) __extension__ ({ \
+#define _mm_i64gather_epi32(m, i, s) \
(__m128i)__builtin_ia32_gatherq_d((__v4si)_mm_undefined_si128(), \
(int const *)(m), (__v2di)(__m128i)(i), \
- (__v4si)_mm_set1_epi32(-1), (s)); })
+ (__v4si)_mm_set1_epi32(-1), (s))
-#define _mm256_i64gather_epi32(m, i, s) __extension__ ({ \
+#define _mm256_i64gather_epi32(m, i, s) \
(__m128i)__builtin_ia32_gatherq_d256((__v4si)_mm_undefined_si128(), \
(int const *)(m), (__v4di)(__m256i)(i), \
- (__v4si)_mm_set1_epi32(-1), (s)); })
+ (__v4si)_mm_set1_epi32(-1), (s))
-#define _mm_i32gather_epi64(m, i, s) __extension__ ({ \
+#define _mm_i32gather_epi64(m, i, s) \
(__m128i)__builtin_ia32_gatherd_q((__v2di)_mm_undefined_si128(), \
(long long const *)(m), \
(__v4si)(__m128i)(i), \
- (__v2di)_mm_set1_epi64x(-1), (s)); })
+ (__v2di)_mm_set1_epi64x(-1), (s))
-#define _mm256_i32gather_epi64(m, i, s) __extension__ ({ \
+#define _mm256_i32gather_epi64(m, i, s) \
(__m256i)__builtin_ia32_gatherd_q256((__v4di)_mm256_undefined_si256(), \
(long long const *)(m), \
(__v4si)(__m128i)(i), \
- (__v4di)_mm256_set1_epi64x(-1), (s)); })
+ (__v4di)_mm256_set1_epi64x(-1), (s))
-#define _mm_i64gather_epi64(m, i, s) __extension__ ({ \
+#define _mm_i64gather_epi64(m, i, s) \
(__m128i)__builtin_ia32_gatherq_q((__v2di)_mm_undefined_si128(), \
(long long const *)(m), \
(__v2di)(__m128i)(i), \
- (__v2di)_mm_set1_epi64x(-1), (s)); })
+ (__v2di)_mm_set1_epi64x(-1), (s))
-#define _mm256_i64gather_epi64(m, i, s) __extension__ ({ \
+#define _mm256_i64gather_epi64(m, i, s) \
(__m256i)__builtin_ia32_gatherq_q256((__v4di)_mm256_undefined_si256(), \
(long long const *)(m), \
(__v4di)(__m256i)(i), \
- (__v4di)_mm256_set1_epi64x(-1), (s)); })
+ (__v4di)_mm256_set1_epi64x(-1), (s))
-#undef __DEFAULT_FN_ATTRS
+#undef __DEFAULT_FN_ATTRS256
+#undef __DEFAULT_FN_ATTRS128
#endif /* __AVX2INTRIN_H */
diff --git a/lib/Headers/avx512bitalgintrin.h b/lib/Headers/avx512bitalgintrin.h
index 2dd1471d2f7e..56046f8c4999 100644
--- a/lib/Headers/avx512bitalgintrin.h
+++ b/lib/Headers/avx512bitalgintrin.h
@@ -29,7 +29,7 @@
#define __AVX512BITALGINTRIN_H
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512bitalg")))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512bitalg"), __min_vector_width__(512)))
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_popcnt_epi16(__m512i __A)
@@ -48,7 +48,7 @@ _mm512_mask_popcnt_epi16(__m512i __A, __mmask32 __U, __m512i __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_maskz_popcnt_epi16(__mmask32 __U, __m512i __B)
{
- return _mm512_mask_popcnt_epi16((__m512i) _mm512_setzero_hi(),
+ return _mm512_mask_popcnt_epi16((__m512i) _mm512_setzero_si512(),
__U,
__B);
}
@@ -70,7 +70,7 @@ _mm512_mask_popcnt_epi8(__m512i __A, __mmask64 __U, __m512i __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_maskz_popcnt_epi8(__mmask64 __U, __m512i __B)
{
- return _mm512_mask_popcnt_epi8((__m512i) _mm512_setzero_qi(),
+ return _mm512_mask_popcnt_epi8((__m512i) _mm512_setzero_si512(),
__U,
__B);
}
diff --git a/lib/Headers/avx512bwintrin.h b/lib/Headers/avx512bwintrin.h
index 3ff0e3aafdcc..fc4632374977 100644
--- a/lib/Headers/avx512bwintrin.h
+++ b/lib/Headers/avx512bwintrin.h
@@ -32,69 +32,49 @@ typedef unsigned int __mmask32;
typedef unsigned long long __mmask64;
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512bw")))
-
-static __inline __m512i __DEFAULT_FN_ATTRS
-_mm512_setzero_qi(void) {
- return (__m512i)(__v64qi){ 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0 };
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS
-_mm512_setzero_hi(void) {
- return (__m512i)(__v32hi){ 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0 };
-}
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512bw"), __min_vector_width__(512)))
/* Integer compare */
-#define _mm512_cmp_epi8_mask(a, b, p) __extension__ ({ \
+#define _mm512_cmp_epi8_mask(a, b, p) \
(__mmask64)__builtin_ia32_cmpb512_mask((__v64qi)(__m512i)(a), \
(__v64qi)(__m512i)(b), (int)(p), \
- (__mmask64)-1); })
+ (__mmask64)-1)
-#define _mm512_mask_cmp_epi8_mask(m, a, b, p) __extension__ ({ \
+#define _mm512_mask_cmp_epi8_mask(m, a, b, p) \
(__mmask64)__builtin_ia32_cmpb512_mask((__v64qi)(__m512i)(a), \
(__v64qi)(__m512i)(b), (int)(p), \
- (__mmask64)(m)); })
+ (__mmask64)(m))
-#define _mm512_cmp_epu8_mask(a, b, p) __extension__ ({ \
+#define _mm512_cmp_epu8_mask(a, b, p) \
(__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)(__m512i)(a), \
(__v64qi)(__m512i)(b), (int)(p), \
- (__mmask64)-1); })
+ (__mmask64)-1)
-#define _mm512_mask_cmp_epu8_mask(m, a, b, p) __extension__ ({ \
+#define _mm512_mask_cmp_epu8_mask(m, a, b, p) \
(__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)(__m512i)(a), \
(__v64qi)(__m512i)(b), (int)(p), \
- (__mmask64)(m)); })
+ (__mmask64)(m))
-#define _mm512_cmp_epi16_mask(a, b, p) __extension__ ({ \
+#define _mm512_cmp_epi16_mask(a, b, p) \
(__mmask32)__builtin_ia32_cmpw512_mask((__v32hi)(__m512i)(a), \
(__v32hi)(__m512i)(b), (int)(p), \
- (__mmask32)-1); })
+ (__mmask32)-1)
-#define _mm512_mask_cmp_epi16_mask(m, a, b, p) __extension__ ({ \
+#define _mm512_mask_cmp_epi16_mask(m, a, b, p) \
(__mmask32)__builtin_ia32_cmpw512_mask((__v32hi)(__m512i)(a), \
(__v32hi)(__m512i)(b), (int)(p), \
- (__mmask32)(m)); })
+ (__mmask32)(m))
-#define _mm512_cmp_epu16_mask(a, b, p) __extension__ ({ \
+#define _mm512_cmp_epu16_mask(a, b, p) \
(__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)(__m512i)(a), \
(__v32hi)(__m512i)(b), (int)(p), \
- (__mmask32)-1); })
+ (__mmask32)-1)
-#define _mm512_mask_cmp_epu16_mask(m, a, b, p) __extension__ ({ \
+#define _mm512_mask_cmp_epu16_mask(m, a, b, p) \
(__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)(__m512i)(a), \
(__v32hi)(__m512i)(b), (int)(p), \
- (__mmask32)(m)); })
+ (__mmask32)(m))
#define _mm512_cmpeq_epi8_mask(A, B) \
_mm512_cmp_epi8_mask((A), (B), _MM_CMPINT_EQ)
@@ -212,7 +192,7 @@ static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_maskz_add_epi8(__mmask64 __U, __m512i __A, __m512i __B) {
return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
(__v64qi)_mm512_add_epi8(__A, __B),
- (__v64qi)_mm512_setzero_qi());
+ (__v64qi)_mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
@@ -231,7 +211,7 @@ static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_maskz_sub_epi8(__mmask64 __U, __m512i __A, __m512i __B) {
return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
(__v64qi)_mm512_sub_epi8(__A, __B),
- (__v64qi)_mm512_setzero_qi());
+ (__v64qi)_mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
@@ -250,7 +230,7 @@ static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_maskz_add_epi16(__mmask32 __U, __m512i __A, __m512i __B) {
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
(__v32hi)_mm512_add_epi16(__A, __B),
- (__v32hi)_mm512_setzero_hi());
+ (__v32hi)_mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
@@ -269,7 +249,7 @@ static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_maskz_sub_epi16(__mmask32 __U, __m512i __A, __m512i __B) {
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
(__v32hi)_mm512_sub_epi16(__A, __B),
- (__v32hi)_mm512_setzero_hi());
+ (__v32hi)_mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
@@ -288,7 +268,7 @@ static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_maskz_mullo_epi16(__mmask32 __U, __m512i __A, __m512i __B) {
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
(__v32hi)_mm512_mullo_epi16(__A, __B),
- (__v32hi)_mm512_setzero_hi());
+ (__v32hi)_mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
@@ -310,49 +290,45 @@ _mm512_mask_blend_epi16 (__mmask32 __U, __m512i __A, __m512i __W)
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_abs_epi8 (__m512i __A)
{
- return (__m512i) __builtin_ia32_pabsb512_mask ((__v64qi) __A,
- (__v64qi) _mm512_setzero_qi(),
- (__mmask64) -1);
+ return (__m512i)__builtin_ia32_pabsb512((__v64qi)__A);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_mask_abs_epi8 (__m512i __W, __mmask64 __U, __m512i __A)
{
- return (__m512i) __builtin_ia32_pabsb512_mask ((__v64qi) __A,
- (__v64qi) __W,
- (__mmask64) __U);
+ return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
+ (__v64qi)_mm512_abs_epi8(__A),
+ (__v64qi)__W);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_maskz_abs_epi8 (__mmask64 __U, __m512i __A)
{
- return (__m512i) __builtin_ia32_pabsb512_mask ((__v64qi) __A,
- (__v64qi) _mm512_setzero_qi(),
- (__mmask64) __U);
+ return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
+ (__v64qi)_mm512_abs_epi8(__A),
+ (__v64qi)_mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_abs_epi16 (__m512i __A)
{
- return (__m512i) __builtin_ia32_pabsw512_mask ((__v32hi) __A,
- (__v32hi) _mm512_setzero_hi(),
- (__mmask32) -1);
+ return (__m512i)__builtin_ia32_pabsw512((__v32hi)__A);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_mask_abs_epi16 (__m512i __W, __mmask32 __U, __m512i __A)
{
- return (__m512i) __builtin_ia32_pabsw512_mask ((__v32hi) __A,
- (__v32hi) __W,
- (__mmask32) __U);
+ return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
+ (__v32hi)_mm512_abs_epi16(__A),
+ (__v32hi)__W);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_maskz_abs_epi16 (__mmask32 __U, __m512i __A)
{
- return (__m512i) __builtin_ia32_pabsw512_mask ((__v32hi) __A,
- (__v32hi) _mm512_setzero_hi(),
- (__mmask32) __U);
+ return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
+ (__v32hi)_mm512_abs_epi16(__A),
+ (__v32hi)_mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
@@ -366,7 +342,7 @@ _mm512_maskz_packs_epi32(__mmask32 __M, __m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M,
(__v32hi)_mm512_packs_epi32(__A, __B),
- (__v32hi)_mm512_setzero_hi());
+ (__v32hi)_mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
@@ -396,7 +372,7 @@ _mm512_maskz_packs_epi16(__mmask64 __M, __m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M,
(__v64qi)_mm512_packs_epi16(__A, __B),
- (__v64qi)_mm512_setzero_qi());
+ (__v64qi)_mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
@@ -410,7 +386,7 @@ _mm512_maskz_packus_epi32(__mmask32 __M, __m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M,
(__v32hi)_mm512_packus_epi32(__A, __B),
- (__v32hi)_mm512_setzero_hi());
+ (__v32hi)_mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
@@ -440,7 +416,7 @@ _mm512_maskz_packus_epi16(__mmask64 __M, __m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M,
(__v64qi)_mm512_packus_epi16(__A, __B),
- (__v64qi)_mm512_setzero_qi());
+ (__v64qi)_mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
@@ -448,7 +424,7 @@ _mm512_adds_epi8 (__m512i __A, __m512i __B)
{
return (__m512i) __builtin_ia32_paddsb512_mask ((__v64qi) __A,
(__v64qi) __B,
- (__v64qi) _mm512_setzero_qi(),
+ (__v64qi) _mm512_setzero_si512(),
(__mmask64) -1);
}
@@ -467,7 +443,7 @@ _mm512_maskz_adds_epi8 (__mmask64 __U, __m512i __A, __m512i __B)
{
return (__m512i) __builtin_ia32_paddsb512_mask ((__v64qi) __A,
(__v64qi) __B,
- (__v64qi) _mm512_setzero_qi(),
+ (__v64qi) _mm512_setzero_si512(),
(__mmask64) __U);
}
@@ -476,7 +452,7 @@ _mm512_adds_epi16 (__m512i __A, __m512i __B)
{
return (__m512i) __builtin_ia32_paddsw512_mask ((__v32hi) __A,
(__v32hi) __B,
- (__v32hi) _mm512_setzero_hi(),
+ (__v32hi) _mm512_setzero_si512(),
(__mmask32) -1);
}
@@ -495,7 +471,7 @@ _mm512_maskz_adds_epi16 (__mmask32 __U, __m512i __A, __m512i __B)
{
return (__m512i) __builtin_ia32_paddsw512_mask ((__v32hi) __A,
(__v32hi) __B,
- (__v32hi) _mm512_setzero_hi(),
+ (__v32hi) _mm512_setzero_si512(),
(__mmask32) __U);
}
@@ -504,7 +480,7 @@ _mm512_adds_epu8 (__m512i __A, __m512i __B)
{
return (__m512i) __builtin_ia32_paddusb512_mask ((__v64qi) __A,
(__v64qi) __B,
- (__v64qi) _mm512_setzero_qi(),
+ (__v64qi) _mm512_setzero_si512(),
(__mmask64) -1);
}
@@ -523,7 +499,7 @@ _mm512_maskz_adds_epu8 (__mmask64 __U, __m512i __A, __m512i __B)
{
return (__m512i) __builtin_ia32_paddusb512_mask ((__v64qi) __A,
(__v64qi) __B,
- (__v64qi) _mm512_setzero_qi(),
+ (__v64qi) _mm512_setzero_si512(),
(__mmask64) __U);
}
@@ -532,7 +508,7 @@ _mm512_adds_epu16 (__m512i __A, __m512i __B)
{
return (__m512i) __builtin_ia32_paddusw512_mask ((__v32hi) __A,
(__v32hi) __B,
- (__v32hi) _mm512_setzero_hi(),
+ (__v32hi) _mm512_setzero_si512(),
(__mmask32) -1);
}
@@ -551,7 +527,7 @@ _mm512_maskz_adds_epu16 (__mmask32 __U, __m512i __A, __m512i __B)
{
return (__m512i) __builtin_ia32_paddusw512_mask ((__v32hi) __A,
(__v32hi) __B,
- (__v32hi) _mm512_setzero_hi(),
+ (__v32hi) _mm512_setzero_si512(),
(__mmask32) __U);
}
@@ -579,7 +555,7 @@ _mm512_maskz_avg_epu8 (__mmask64 __U, __m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
(__v64qi)_mm512_avg_epu8(__A, __B),
- (__v64qi)_mm512_setzero_qi());
+ (__v64qi)_mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
@@ -606,231 +582,184 @@ _mm512_maskz_avg_epu16 (__mmask32 __U, __m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
(__v32hi)_mm512_avg_epu16(__A, __B),
- (__v32hi) _mm512_setzero_hi());
+ (__v32hi) _mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_max_epi8 (__m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_pmaxsb512_mask ((__v64qi) __A,
- (__v64qi) __B,
- (__v64qi) _mm512_setzero_qi(),
- (__mmask64) -1);
+ return (__m512i)__builtin_ia32_pmaxsb512((__v64qi) __A, (__v64qi) __B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_maskz_max_epi8 (__mmask64 __M, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_pmaxsb512_mask ((__v64qi) __A,
- (__v64qi) __B,
- (__v64qi) _mm512_setzero_qi(),
- (__mmask64) __M);
+ return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M,
+ (__v64qi)_mm512_max_epi8(__A, __B),
+ (__v64qi)_mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_max_epi8 (__m512i __W, __mmask64 __M, __m512i __A,
- __m512i __B)
+_mm512_mask_max_epi8 (__m512i __W, __mmask64 __M, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_pmaxsb512_mask ((__v64qi) __A,
- (__v64qi) __B,
- (__v64qi) __W,
- (__mmask64) __M);
+ return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M,
+ (__v64qi)_mm512_max_epi8(__A, __B),
+ (__v64qi)__W);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_max_epi16 (__m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_pmaxsw512_mask ((__v32hi) __A,
- (__v32hi) __B,
- (__v32hi) _mm512_setzero_hi(),
- (__mmask32) -1);
+ return (__m512i)__builtin_ia32_pmaxsw512((__v32hi) __A, (__v32hi) __B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_maskz_max_epi16 (__mmask32 __M, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_pmaxsw512_mask ((__v32hi) __A,
- (__v32hi) __B,
- (__v32hi) _mm512_setzero_hi(),
- (__mmask32) __M);
+ return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M,
+ (__v32hi)_mm512_max_epi16(__A, __B),
+ (__v32hi)_mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_mask_max_epi16 (__m512i __W, __mmask32 __M, __m512i __A,
__m512i __B)
{
- return (__m512i) __builtin_ia32_pmaxsw512_mask ((__v32hi) __A,
- (__v32hi) __B,
- (__v32hi) __W,
- (__mmask32) __M);
+ return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M,
+ (__v32hi)_mm512_max_epi16(__A, __B),
+ (__v32hi)__W);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_max_epu8 (__m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_pmaxub512_mask ((__v64qi) __A,
- (__v64qi) __B,
- (__v64qi) _mm512_setzero_qi(),
- (__mmask64) -1);
+ return (__m512i)__builtin_ia32_pmaxub512((__v64qi)__A, (__v64qi)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_maskz_max_epu8 (__mmask64 __M, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_pmaxub512_mask ((__v64qi) __A,
- (__v64qi) __B,
- (__v64qi) _mm512_setzero_qi(),
- (__mmask64) __M);
+ return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M,
+ (__v64qi)_mm512_max_epu8(__A, __B),
+ (__v64qi)_mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_max_epu8 (__m512i __W, __mmask64 __M, __m512i __A,
- __m512i __B)
+_mm512_mask_max_epu8 (__m512i __W, __mmask64 __M, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_pmaxub512_mask ((__v64qi) __A,
- (__v64qi) __B,
- (__v64qi) __W,
- (__mmask64) __M);
+ return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M,
+ (__v64qi)_mm512_max_epu8(__A, __B),
+ (__v64qi)__W);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_max_epu16 (__m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_pmaxuw512_mask ((__v32hi) __A,
- (__v32hi) __B,
- (__v32hi) _mm512_setzero_hi(),
- (__mmask32) -1);
+ return (__m512i)__builtin_ia32_pmaxuw512((__v32hi)__A, (__v32hi)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_maskz_max_epu16 (__mmask32 __M, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_pmaxuw512_mask ((__v32hi) __A,
- (__v32hi) __B,
- (__v32hi) _mm512_setzero_hi(),
- (__mmask32) __M);
+ return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M,
+ (__v32hi)_mm512_max_epu16(__A, __B),
+ (__v32hi)_mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_max_epu16 (__m512i __W, __mmask32 __M, __m512i __A,
- __m512i __B)
+_mm512_mask_max_epu16 (__m512i __W, __mmask32 __M, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_pmaxuw512_mask ((__v32hi) __A,
- (__v32hi) __B,
- (__v32hi) __W,
- (__mmask32) __M);
+ return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M,
+ (__v32hi)_mm512_max_epu16(__A, __B),
+ (__v32hi)__W);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_min_epi8 (__m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_pminsb512_mask ((__v64qi) __A,
- (__v64qi) __B,
- (__v64qi) _mm512_setzero_qi(),
- (__mmask64) -1);
+ return (__m512i)__builtin_ia32_pminsb512((__v64qi) __A, (__v64qi) __B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_maskz_min_epi8 (__mmask64 __M, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_pminsb512_mask ((__v64qi) __A,
- (__v64qi) __B,
- (__v64qi) _mm512_setzero_qi(),
- (__mmask64) __M);
+ return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M,
+ (__v64qi)_mm512_min_epi8(__A, __B),
+ (__v64qi)_mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_min_epi8 (__m512i __W, __mmask64 __M, __m512i __A,
- __m512i __B)
+_mm512_mask_min_epi8 (__m512i __W, __mmask64 __M, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_pminsb512_mask ((__v64qi) __A,
- (__v64qi) __B,
- (__v64qi) __W,
- (__mmask64) __M);
+ return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M,
+ (__v64qi)_mm512_min_epi8(__A, __B),
+ (__v64qi)__W);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_min_epi16 (__m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_pminsw512_mask ((__v32hi) __A,
- (__v32hi) __B,
- (__v32hi) _mm512_setzero_hi(),
- (__mmask32) -1);
+ return (__m512i)__builtin_ia32_pminsw512((__v32hi) __A, (__v32hi) __B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_maskz_min_epi16 (__mmask32 __M, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_pminsw512_mask ((__v32hi) __A,
- (__v32hi) __B,
- (__v32hi) _mm512_setzero_hi(),
- (__mmask32) __M);
+ return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M,
+ (__v32hi)_mm512_min_epi16(__A, __B),
+ (__v32hi)_mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_min_epi16 (__m512i __W, __mmask32 __M, __m512i __A,
- __m512i __B)
+_mm512_mask_min_epi16 (__m512i __W, __mmask32 __M, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_pminsw512_mask ((__v32hi) __A,
- (__v32hi) __B,
- (__v32hi) __W,
- (__mmask32) __M);
+ return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M,
+ (__v32hi)_mm512_min_epi16(__A, __B),
+ (__v32hi)__W);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_min_epu8 (__m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_pminub512_mask ((__v64qi) __A,
- (__v64qi) __B,
- (__v64qi) _mm512_setzero_qi(),
- (__mmask64) -1);
+ return (__m512i)__builtin_ia32_pminub512((__v64qi)__A, (__v64qi)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_maskz_min_epu8 (__mmask64 __M, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_pminub512_mask ((__v64qi) __A,
- (__v64qi) __B,
- (__v64qi) _mm512_setzero_qi(),
- (__mmask64) __M);
+ return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M,
+ (__v64qi)_mm512_min_epu8(__A, __B),
+ (__v64qi)_mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_min_epu8 (__m512i __W, __mmask64 __M, __m512i __A,
- __m512i __B)
+_mm512_mask_min_epu8 (__m512i __W, __mmask64 __M, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_pminub512_mask ((__v64qi) __A,
- (__v64qi) __B,
- (__v64qi) __W,
- (__mmask64) __M);
+ return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M,
+ (__v64qi)_mm512_min_epu8(__A, __B),
+ (__v64qi)__W);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_min_epu16 (__m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_pminuw512_mask ((__v32hi) __A,
- (__v32hi) __B,
- (__v32hi) _mm512_setzero_hi(),
- (__mmask32) -1);
+ return (__m512i)__builtin_ia32_pminuw512((__v32hi)__A, (__v32hi)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_maskz_min_epu16 (__mmask32 __M, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_pminuw512_mask ((__v32hi) __A,
- (__v32hi) __B,
- (__v32hi) _mm512_setzero_hi(),
- (__mmask32) __M);
+ return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M,
+ (__v32hi)_mm512_min_epu16(__A, __B),
+ (__v32hi)_mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_min_epu16 (__m512i __W, __mmask32 __M, __m512i __A,
- __m512i __B)
+_mm512_mask_min_epu16 (__m512i __W, __mmask32 __M, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_pminuw512_mask ((__v32hi) __A,
- (__v32hi) __B,
- (__v32hi) __W,
- (__mmask32) __M);
+ return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M,
+ (__v32hi)_mm512_min_epu16(__A, __B),
+ (__v32hi)__W);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
@@ -852,7 +781,7 @@ _mm512_maskz_shuffle_epi8(__mmask64 __U, __m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
(__v64qi)_mm512_shuffle_epi8(__A, __B),
- (__v64qi)_mm512_setzero_qi());
+ (__v64qi)_mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
@@ -860,7 +789,7 @@ _mm512_subs_epi8 (__m512i __A, __m512i __B)
{
return (__m512i) __builtin_ia32_psubsb512_mask ((__v64qi) __A,
(__v64qi) __B,
- (__v64qi) _mm512_setzero_qi(),
+ (__v64qi) _mm512_setzero_si512(),
(__mmask64) -1);
}
@@ -879,7 +808,7 @@ _mm512_maskz_subs_epi8 (__mmask64 __U, __m512i __A, __m512i __B)
{
return (__m512i) __builtin_ia32_psubsb512_mask ((__v64qi) __A,
(__v64qi) __B,
- (__v64qi) _mm512_setzero_qi(),
+ (__v64qi) _mm512_setzero_si512(),
(__mmask64) __U);
}
@@ -888,7 +817,7 @@ _mm512_subs_epi16 (__m512i __A, __m512i __B)
{
return (__m512i) __builtin_ia32_psubsw512_mask ((__v32hi) __A,
(__v32hi) __B,
- (__v32hi) _mm512_setzero_hi(),
+ (__v32hi) _mm512_setzero_si512(),
(__mmask32) -1);
}
@@ -907,7 +836,7 @@ _mm512_maskz_subs_epi16 (__mmask32 __U, __m512i __A, __m512i __B)
{
return (__m512i) __builtin_ia32_psubsw512_mask ((__v32hi) __A,
(__v32hi) __B,
- (__v32hi) _mm512_setzero_hi(),
+ (__v32hi) _mm512_setzero_si512(),
(__mmask32) __U);
}
@@ -916,7 +845,7 @@ _mm512_subs_epu8 (__m512i __A, __m512i __B)
{
return (__m512i) __builtin_ia32_psubusb512_mask ((__v64qi) __A,
(__v64qi) __B,
- (__v64qi) _mm512_setzero_qi(),
+ (__v64qi) _mm512_setzero_si512(),
(__mmask64) -1);
}
@@ -935,7 +864,7 @@ _mm512_maskz_subs_epu8 (__mmask64 __U, __m512i __A, __m512i __B)
{
return (__m512i) __builtin_ia32_psubusb512_mask ((__v64qi) __A,
(__v64qi) __B,
- (__v64qi) _mm512_setzero_qi(),
+ (__v64qi) _mm512_setzero_si512(),
(__mmask64) __U);
}
@@ -944,7 +873,7 @@ _mm512_subs_epu16 (__m512i __A, __m512i __B)
{
return (__m512i) __builtin_ia32_psubusw512_mask ((__v32hi) __A,
(__v32hi) __B,
- (__v32hi) _mm512_setzero_hi(),
+ (__v32hi) _mm512_setzero_si512(),
(__mmask32) -1);
}
@@ -963,182 +892,148 @@ _mm512_maskz_subs_epu16 (__mmask32 __U, __m512i __A, __m512i __B)
{
return (__m512i) __builtin_ia32_psubusw512_mask ((__v32hi) __A,
(__v32hi) __B,
- (__v32hi) _mm512_setzero_hi(),
+ (__v32hi) _mm512_setzero_si512(),
(__mmask32) __U);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask2_permutex2var_epi16 (__m512i __A, __m512i __I,
- __mmask32 __U, __m512i __B)
+_mm512_permutex2var_epi16(__m512i __A, __m512i __I, __m512i __B)
{
- return (__m512i) __builtin_ia32_vpermi2varhi512_mask ((__v32hi) __A,
- (__v32hi) __I /* idx */ ,
- (__v32hi) __B,
- (__mmask32) __U);
+ return (__m512i)__builtin_ia32_vpermi2varhi512((__v32hi)__A, (__v32hi)__I,
+ (__v32hi)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_permutex2var_epi16 (__m512i __A, __m512i __I, __m512i __B)
+_mm512_mask_permutex2var_epi16(__m512i __A, __mmask32 __U, __m512i __I,
+ __m512i __B)
{
- return (__m512i) __builtin_ia32_vpermt2varhi512_mask ((__v32hi) __I /* idx */,
- (__v32hi) __A,
- (__v32hi) __B,
- (__mmask32) -1);
+ return (__m512i)__builtin_ia32_selectw_512(__U,
+ (__v32hi)_mm512_permutex2var_epi16(__A, __I, __B),
+ (__v32hi)__A);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_permutex2var_epi16 (__m512i __A, __mmask32 __U,
- __m512i __I, __m512i __B)
+_mm512_mask2_permutex2var_epi16(__m512i __A, __m512i __I, __mmask32 __U,
+ __m512i __B)
{
- return (__m512i) __builtin_ia32_vpermt2varhi512_mask ((__v32hi) __I /* idx */,
- (__v32hi) __A,
- (__v32hi) __B,
- (__mmask32) __U);
+ return (__m512i)__builtin_ia32_selectw_512(__U,
+ (__v32hi)_mm512_permutex2var_epi16(__A, __I, __B),
+ (__v32hi)__I);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_maskz_permutex2var_epi16 (__mmask32 __U, __m512i __A,
- __m512i __I, __m512i __B)
+_mm512_maskz_permutex2var_epi16(__mmask32 __U, __m512i __A, __m512i __I,
+ __m512i __B)
{
- return (__m512i) __builtin_ia32_vpermt2varhi512_maskz ((__v32hi) __I
- /* idx */ ,
- (__v32hi) __A,
- (__v32hi) __B,
- (__mmask32) __U);
+ return (__m512i)__builtin_ia32_selectw_512(__U,
+ (__v32hi)_mm512_permutex2var_epi16(__A, __I, __B),
+ (__v32hi)_mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mulhrs_epi16 (__m512i __A, __m512i __B)
+_mm512_mulhrs_epi16(__m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_pmulhrsw512_mask ((__v32hi) __A,
- (__v32hi) __B,
- (__v32hi) _mm512_setzero_hi(),
- (__mmask32) -1);
+ return (__m512i)__builtin_ia32_pmulhrsw512((__v32hi)__A, (__v32hi)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_mulhrs_epi16 (__m512i __W, __mmask32 __U, __m512i __A,
- __m512i __B)
+_mm512_mask_mulhrs_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_pmulhrsw512_mask ((__v32hi) __A,
- (__v32hi) __B,
- (__v32hi) __W,
- (__mmask32) __U);
+ return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
+ (__v32hi)_mm512_mulhrs_epi16(__A, __B),
+ (__v32hi)__W);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_maskz_mulhrs_epi16 (__mmask32 __U, __m512i __A, __m512i __B)
+_mm512_maskz_mulhrs_epi16(__mmask32 __U, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_pmulhrsw512_mask ((__v32hi) __A,
- (__v32hi) __B,
- (__v32hi) _mm512_setzero_hi(),
- (__mmask32) __U);
+ return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
+ (__v32hi)_mm512_mulhrs_epi16(__A, __B),
+ (__v32hi)_mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mulhi_epi16 (__m512i __A, __m512i __B)
+_mm512_mulhi_epi16(__m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_pmulhw512_mask ((__v32hi) __A,
- (__v32hi) __B,
- (__v32hi) _mm512_setzero_hi(),
- (__mmask32) -1);
+ return (__m512i)__builtin_ia32_pmulhw512((__v32hi) __A, (__v32hi) __B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_mulhi_epi16 (__m512i __W, __mmask32 __U, __m512i __A,
+_mm512_mask_mulhi_epi16(__m512i __W, __mmask32 __U, __m512i __A,
__m512i __B)
{
- return (__m512i) __builtin_ia32_pmulhw512_mask ((__v32hi) __A,
- (__v32hi) __B,
- (__v32hi) __W,
- (__mmask32) __U);
+ return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
+ (__v32hi)_mm512_mulhi_epi16(__A, __B),
+ (__v32hi)__W);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_maskz_mulhi_epi16 (__mmask32 __U, __m512i __A, __m512i __B)
+_mm512_maskz_mulhi_epi16(__mmask32 __U, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_pmulhw512_mask ((__v32hi) __A,
- (__v32hi) __B,
- (__v32hi) _mm512_setzero_hi(),
- (__mmask32) __U);
+ return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
+ (__v32hi)_mm512_mulhi_epi16(__A, __B),
+ (__v32hi)_mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mulhi_epu16 (__m512i __A, __m512i __B)
+_mm512_mulhi_epu16(__m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_pmulhuw512_mask ((__v32hi) __A,
- (__v32hi) __B,
- (__v32hi) _mm512_setzero_hi(),
- (__mmask32) -1);
+ return (__m512i)__builtin_ia32_pmulhuw512((__v32hi) __A, (__v32hi) __B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_mulhi_epu16 (__m512i __W, __mmask32 __U, __m512i __A,
- __m512i __B)
+_mm512_mask_mulhi_epu16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_pmulhuw512_mask ((__v32hi) __A,
- (__v32hi) __B,
- (__v32hi) __W,
- (__mmask32) __U);
+ return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
+ (__v32hi)_mm512_mulhi_epu16(__A, __B),
+ (__v32hi)__W);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_maskz_mulhi_epu16 (__mmask32 __U, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_pmulhuw512_mask ((__v32hi) __A,
- (__v32hi) __B,
- (__v32hi) _mm512_setzero_hi(),
- (__mmask32) __U);
+ return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
+ (__v32hi)_mm512_mulhi_epu16(__A, __B),
+ (__v32hi)_mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_maddubs_epi16 (__m512i __X, __m512i __Y) {
- return (__m512i) __builtin_ia32_pmaddubsw512_mask ((__v64qi) __X,
- (__v64qi) __Y,
- (__v32hi) _mm512_setzero_hi(),
- (__mmask32) -1);
+_mm512_maddubs_epi16(__m512i __X, __m512i __Y) {
+ return (__m512i)__builtin_ia32_pmaddubsw512((__v64qi)__X, (__v64qi)__Y);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_maddubs_epi16 (__m512i __W, __mmask32 __U, __m512i __X,
- __m512i __Y) {
- return (__m512i) __builtin_ia32_pmaddubsw512_mask ((__v64qi) __X,
- (__v64qi) __Y,
- (__v32hi) __W,
- (__mmask32) __U);
+_mm512_mask_maddubs_epi16(__m512i __W, __mmask32 __U, __m512i __X,
+ __m512i __Y) {
+ return (__m512i)__builtin_ia32_selectw_512((__mmask32) __U,
+ (__v32hi)_mm512_maddubs_epi16(__X, __Y),
+ (__v32hi)__W);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_maskz_maddubs_epi16 (__mmask32 __U, __m512i __X, __m512i __Y) {
- return (__m512i) __builtin_ia32_pmaddubsw512_mask ((__v64qi) __X,
- (__v64qi) __Y,
- (__v32hi) _mm512_setzero_hi(),
- (__mmask32) __U);
+_mm512_maskz_maddubs_epi16(__mmask32 __U, __m512i __X, __m512i __Y) {
+ return (__m512i)__builtin_ia32_selectw_512((__mmask32) __U,
+ (__v32hi)_mm512_maddubs_epi16(__X, __Y),
+ (__v32hi)_mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_madd_epi16 (__m512i __A, __m512i __B) {
- return (__m512i) __builtin_ia32_pmaddwd512_mask ((__v32hi) __A,
- (__v32hi) __B,
- (__v16si) _mm512_setzero_si512(),
- (__mmask16) -1);
+_mm512_madd_epi16(__m512i __A, __m512i __B) {
+ return (__m512i)__builtin_ia32_pmaddwd512((__v32hi)__A, (__v32hi)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_madd_epi16 (__m512i __W, __mmask16 __U, __m512i __A,
- __m512i __B) {
- return (__m512i) __builtin_ia32_pmaddwd512_mask ((__v32hi) __A,
- (__v32hi) __B,
- (__v16si) __W,
- (__mmask16) __U);
+_mm512_mask_madd_epi16(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B) {
+ return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
+ (__v16si)_mm512_madd_epi16(__A, __B),
+ (__v16si)__W);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_maskz_madd_epi16 (__mmask16 __U, __m512i __A, __m512i __B) {
- return (__m512i) __builtin_ia32_pmaddwd512_mask ((__v32hi) __A,
- (__v32hi) __B,
- (__v16si) _mm512_setzero_si512(),
- (__mmask16) __U);
+_mm512_maskz_madd_epi16(__mmask16 __U, __m512i __A, __m512i __B) {
+ return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
+ (__v16si)_mm512_madd_epi16(__A, __B),
+ (__v16si)_mm512_setzero_si512());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
@@ -1186,7 +1081,7 @@ _mm512_maskz_cvtusepi16_epi8 (__mmask32 __M, __m512i __A) {
static __inline__ __m256i __DEFAULT_FN_ATTRS
_mm512_cvtepi16_epi8 (__m512i __A) {
return (__m256i) __builtin_ia32_pmovwb512_mask ((__v32hi) __A,
- (__v32qi) _mm256_setzero_si256(),
+ (__v32qi) _mm256_undefined_si256(),
(__mmask32) -1);
}
@@ -1254,7 +1149,7 @@ static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_maskz_unpackhi_epi8(__mmask64 __U, __m512i __A, __m512i __B) {
return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
(__v64qi)_mm512_unpackhi_epi8(__A, __B),
- (__v64qi)_mm512_setzero_qi());
+ (__v64qi)_mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
@@ -1281,7 +1176,7 @@ static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_maskz_unpackhi_epi16(__mmask32 __U, __m512i __A, __m512i __B) {
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
(__v32hi)_mm512_unpackhi_epi16(__A, __B),
- (__v32hi)_mm512_setzero_hi());
+ (__v32hi)_mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
@@ -1316,7 +1211,7 @@ static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_maskz_unpacklo_epi8(__mmask64 __U, __m512i __A, __m512i __B) {
return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
(__v64qi)_mm512_unpacklo_epi8(__A, __B),
- (__v64qi)_mm512_setzero_qi());
+ (__v64qi)_mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
@@ -1343,7 +1238,7 @@ static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_maskz_unpacklo_epi16(__mmask32 __U, __m512i __A, __m512i __B) {
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
(__v32hi)_mm512_unpacklo_epi16(__A, __B),
- (__v32hi)_mm512_setzero_hi());
+ (__v32hi)_mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
@@ -1367,7 +1262,7 @@ _mm512_maskz_cvtepi8_epi16(__mmask32 __U, __m256i __A)
{
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
(__v32hi)_mm512_cvtepi8_epi16(__A),
- (__v32hi)_mm512_setzero_hi());
+ (__v32hi)_mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
@@ -1389,83 +1284,41 @@ _mm512_maskz_cvtepu8_epi16(__mmask32 __U, __m256i __A)
{
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
(__v32hi)_mm512_cvtepu8_epi16(__A),
- (__v32hi)_mm512_setzero_hi());
-}
-
-
-#define _mm512_shufflehi_epi16(A, imm) __extension__ ({ \
- (__m512i)__builtin_shufflevector((__v32hi)(__m512i)(A), \
- (__v32hi)_mm512_undefined_epi32(), \
- 0, 1, 2, 3, \
- 4 + (((imm) >> 0) & 0x3), \
- 4 + (((imm) >> 2) & 0x3), \
- 4 + (((imm) >> 4) & 0x3), \
- 4 + (((imm) >> 6) & 0x3), \
- 8, 9, 10, 11, \
- 12 + (((imm) >> 0) & 0x3), \
- 12 + (((imm) >> 2) & 0x3), \
- 12 + (((imm) >> 4) & 0x3), \
- 12 + (((imm) >> 6) & 0x3), \
- 16, 17, 18, 19, \
- 20 + (((imm) >> 0) & 0x3), \
- 20 + (((imm) >> 2) & 0x3), \
- 20 + (((imm) >> 4) & 0x3), \
- 20 + (((imm) >> 6) & 0x3), \
- 24, 25, 26, 27, \
- 28 + (((imm) >> 0) & 0x3), \
- 28 + (((imm) >> 2) & 0x3), \
- 28 + (((imm) >> 4) & 0x3), \
- 28 + (((imm) >> 6) & 0x3)); })
-
-#define _mm512_mask_shufflehi_epi16(W, U, A, imm) __extension__ ({ \
+ (__v32hi)_mm512_setzero_si512());
+}
+
+
+#define _mm512_shufflehi_epi16(A, imm) \
+ (__m512i)__builtin_ia32_pshufhw512((__v32hi)(__m512i)(A), (int)(imm))
+
+#define _mm512_mask_shufflehi_epi16(W, U, A, imm) \
(__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
(__v32hi)_mm512_shufflehi_epi16((A), \
(imm)), \
- (__v32hi)(__m512i)(W)); })
+ (__v32hi)(__m512i)(W))
-#define _mm512_maskz_shufflehi_epi16(U, A, imm) __extension__ ({ \
+#define _mm512_maskz_shufflehi_epi16(U, A, imm) \
(__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
(__v32hi)_mm512_shufflehi_epi16((A), \
(imm)), \
- (__v32hi)_mm512_setzero_hi()); })
-
-#define _mm512_shufflelo_epi16(A, imm) __extension__ ({ \
- (__m512i)__builtin_shufflevector((__v32hi)(__m512i)(A), \
- (__v32hi)_mm512_undefined_epi32(), \
- 0 + (((imm) >> 0) & 0x3), \
- 0 + (((imm) >> 2) & 0x3), \
- 0 + (((imm) >> 4) & 0x3), \
- 0 + (((imm) >> 6) & 0x3), \
- 4, 5, 6, 7, \
- 8 + (((imm) >> 0) & 0x3), \
- 8 + (((imm) >> 2) & 0x3), \
- 8 + (((imm) >> 4) & 0x3), \
- 8 + (((imm) >> 6) & 0x3), \
- 12, 13, 14, 15, \
- 16 + (((imm) >> 0) & 0x3), \
- 16 + (((imm) >> 2) & 0x3), \
- 16 + (((imm) >> 4) & 0x3), \
- 16 + (((imm) >> 6) & 0x3), \
- 20, 21, 22, 23, \
- 24 + (((imm) >> 0) & 0x3), \
- 24 + (((imm) >> 2) & 0x3), \
- 24 + (((imm) >> 4) & 0x3), \
- 24 + (((imm) >> 6) & 0x3), \
- 28, 29, 30, 31); })
-
-
-#define _mm512_mask_shufflelo_epi16(W, U, A, imm) __extension__ ({ \
+ (__v32hi)_mm512_setzero_si512())
+
+#define _mm512_shufflelo_epi16(A, imm) \
+ (__m512i)__builtin_ia32_pshuflw512((__v32hi)(__m512i)(A), (int)(imm))
+
+
+#define _mm512_mask_shufflelo_epi16(W, U, A, imm) \
(__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
(__v32hi)_mm512_shufflelo_epi16((A), \
(imm)), \
- (__v32hi)(__m512i)(W)); })
+ (__v32hi)(__m512i)(W))
-#define _mm512_maskz_shufflelo_epi16(U, A, imm) __extension__ ({ \
+#define _mm512_maskz_shufflelo_epi16(U, A, imm) \
(__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
(__v32hi)_mm512_shufflelo_epi16((A), \
(imm)), \
- (__v32hi)_mm512_setzero_hi()); })
+ (__v32hi)_mm512_setzero_si512())
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_sllv_epi16(__m512i __A, __m512i __B)
@@ -1486,7 +1339,7 @@ _mm512_maskz_sllv_epi16(__mmask32 __U, __m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
(__v32hi)_mm512_sllv_epi16(__A, __B),
- (__v32hi)_mm512_setzero_hi());
+ (__v32hi)_mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
@@ -1508,7 +1361,7 @@ _mm512_maskz_sll_epi16(__mmask32 __U, __m512i __A, __m128i __B)
{
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
(__v32hi)_mm512_sll_epi16(__A, __B),
- (__v32hi)_mm512_setzero_hi());
+ (__v32hi)_mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
@@ -1530,77 +1383,11 @@ _mm512_maskz_slli_epi16(__mmask32 __U, __m512i __A, int __B)
{
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
(__v32hi)_mm512_slli_epi16(__A, __B),
- (__v32hi)_mm512_setzero_hi());
-}
-
-#define _mm512_bslli_epi128(a, imm) __extension__ ({ \
- (__m512i)__builtin_shufflevector( \
- (__v64qi)_mm512_setzero_si512(), \
- (__v64qi)(__m512i)(a), \
- ((char)(imm)&0xF0) ? 0 : ((char)(imm)>0x0 ? 16 : 64) - (char)(imm), \
- ((char)(imm)&0xF0) ? 1 : ((char)(imm)>0x1 ? 17 : 65) - (char)(imm), \
- ((char)(imm)&0xF0) ? 2 : ((char)(imm)>0x2 ? 18 : 66) - (char)(imm), \
- ((char)(imm)&0xF0) ? 3 : ((char)(imm)>0x3 ? 19 : 67) - (char)(imm), \
- ((char)(imm)&0xF0) ? 4 : ((char)(imm)>0x4 ? 20 : 68) - (char)(imm), \
- ((char)(imm)&0xF0) ? 5 : ((char)(imm)>0x5 ? 21 : 69) - (char)(imm), \
- ((char)(imm)&0xF0) ? 6 : ((char)(imm)>0x6 ? 22 : 70) - (char)(imm), \
- ((char)(imm)&0xF0) ? 7 : ((char)(imm)>0x7 ? 23 : 71) - (char)(imm), \
- ((char)(imm)&0xF0) ? 8 : ((char)(imm)>0x8 ? 24 : 72) - (char)(imm), \
- ((char)(imm)&0xF0) ? 9 : ((char)(imm)>0x9 ? 25 : 73) - (char)(imm), \
- ((char)(imm)&0xF0) ? 10 : ((char)(imm)>0xA ? 26 : 74) - (char)(imm), \
- ((char)(imm)&0xF0) ? 11 : ((char)(imm)>0xB ? 27 : 75) - (char)(imm), \
- ((char)(imm)&0xF0) ? 12 : ((char)(imm)>0xC ? 28 : 76) - (char)(imm), \
- ((char)(imm)&0xF0) ? 13 : ((char)(imm)>0xD ? 29 : 77) - (char)(imm), \
- ((char)(imm)&0xF0) ? 14 : ((char)(imm)>0xE ? 30 : 78) - (char)(imm), \
- ((char)(imm)&0xF0) ? 15 : ((char)(imm)>0xF ? 31 : 79) - (char)(imm), \
- ((char)(imm)&0xF0) ? 16 : ((char)(imm)>0x0 ? 32 : 80) - (char)(imm), \
- ((char)(imm)&0xF0) ? 17 : ((char)(imm)>0x1 ? 33 : 81) - (char)(imm), \
- ((char)(imm)&0xF0) ? 18 : ((char)(imm)>0x2 ? 34 : 82) - (char)(imm), \
- ((char)(imm)&0xF0) ? 19 : ((char)(imm)>0x3 ? 35 : 83) - (char)(imm), \
- ((char)(imm)&0xF0) ? 20 : ((char)(imm)>0x4 ? 36 : 84) - (char)(imm), \
- ((char)(imm)&0xF0) ? 21 : ((char)(imm)>0x5 ? 37 : 85) - (char)(imm), \
- ((char)(imm)&0xF0) ? 22 : ((char)(imm)>0x6 ? 38 : 86) - (char)(imm), \
- ((char)(imm)&0xF0) ? 23 : ((char)(imm)>0x7 ? 39 : 87) - (char)(imm), \
- ((char)(imm)&0xF0) ? 24 : ((char)(imm)>0x8 ? 40 : 88) - (char)(imm), \
- ((char)(imm)&0xF0) ? 25 : ((char)(imm)>0x9 ? 41 : 89) - (char)(imm), \
- ((char)(imm)&0xF0) ? 26 : ((char)(imm)>0xA ? 42 : 90) - (char)(imm), \
- ((char)(imm)&0xF0) ? 27 : ((char)(imm)>0xB ? 43 : 91) - (char)(imm), \
- ((char)(imm)&0xF0) ? 28 : ((char)(imm)>0xC ? 44 : 92) - (char)(imm), \
- ((char)(imm)&0xF0) ? 29 : ((char)(imm)>0xD ? 45 : 93) - (char)(imm), \
- ((char)(imm)&0xF0) ? 30 : ((char)(imm)>0xE ? 46 : 94) - (char)(imm), \
- ((char)(imm)&0xF0) ? 31 : ((char)(imm)>0xF ? 47 : 95) - (char)(imm), \
- ((char)(imm)&0xF0) ? 32 : ((char)(imm)>0x0 ? 48 : 96) - (char)(imm), \
- ((char)(imm)&0xF0) ? 33 : ((char)(imm)>0x1 ? 49 : 97) - (char)(imm), \
- ((char)(imm)&0xF0) ? 34 : ((char)(imm)>0x2 ? 50 : 98) - (char)(imm), \
- ((char)(imm)&0xF0) ? 35 : ((char)(imm)>0x3 ? 51 : 99) - (char)(imm), \
- ((char)(imm)&0xF0) ? 36 : ((char)(imm)>0x4 ? 52 : 100) - (char)(imm), \
- ((char)(imm)&0xF0) ? 37 : ((char)(imm)>0x5 ? 53 : 101) - (char)(imm), \
- ((char)(imm)&0xF0) ? 38 : ((char)(imm)>0x6 ? 54 : 102) - (char)(imm), \
- ((char)(imm)&0xF0) ? 39 : ((char)(imm)>0x7 ? 55 : 103) - (char)(imm), \
- ((char)(imm)&0xF0) ? 40 : ((char)(imm)>0x8 ? 56 : 104) - (char)(imm), \
- ((char)(imm)&0xF0) ? 41 : ((char)(imm)>0x9 ? 57 : 105) - (char)(imm), \
- ((char)(imm)&0xF0) ? 42 : ((char)(imm)>0xA ? 58 : 106) - (char)(imm), \
- ((char)(imm)&0xF0) ? 43 : ((char)(imm)>0xB ? 59 : 107) - (char)(imm), \
- ((char)(imm)&0xF0) ? 44 : ((char)(imm)>0xC ? 60 : 108) - (char)(imm), \
- ((char)(imm)&0xF0) ? 45 : ((char)(imm)>0xD ? 61 : 109) - (char)(imm), \
- ((char)(imm)&0xF0) ? 46 : ((char)(imm)>0xE ? 62 : 110) - (char)(imm), \
- ((char)(imm)&0xF0) ? 47 : ((char)(imm)>0xF ? 63 : 111) - (char)(imm), \
- ((char)(imm)&0xF0) ? 48 : ((char)(imm)>0x0 ? 64 : 112) - (char)(imm), \
- ((char)(imm)&0xF0) ? 49 : ((char)(imm)>0x1 ? 65 : 113) - (char)(imm), \
- ((char)(imm)&0xF0) ? 50 : ((char)(imm)>0x2 ? 66 : 114) - (char)(imm), \
- ((char)(imm)&0xF0) ? 51 : ((char)(imm)>0x3 ? 67 : 115) - (char)(imm), \
- ((char)(imm)&0xF0) ? 52 : ((char)(imm)>0x4 ? 68 : 116) - (char)(imm), \
- ((char)(imm)&0xF0) ? 53 : ((char)(imm)>0x5 ? 69 : 117) - (char)(imm), \
- ((char)(imm)&0xF0) ? 54 : ((char)(imm)>0x6 ? 70 : 118) - (char)(imm), \
- ((char)(imm)&0xF0) ? 55 : ((char)(imm)>0x7 ? 71 : 119) - (char)(imm), \
- ((char)(imm)&0xF0) ? 56 : ((char)(imm)>0x8 ? 72 : 120) - (char)(imm), \
- ((char)(imm)&0xF0) ? 57 : ((char)(imm)>0x9 ? 73 : 121) - (char)(imm), \
- ((char)(imm)&0xF0) ? 58 : ((char)(imm)>0xA ? 74 : 122) - (char)(imm), \
- ((char)(imm)&0xF0) ? 59 : ((char)(imm)>0xB ? 75 : 123) - (char)(imm), \
- ((char)(imm)&0xF0) ? 60 : ((char)(imm)>0xC ? 76 : 124) - (char)(imm), \
- ((char)(imm)&0xF0) ? 61 : ((char)(imm)>0xD ? 77 : 125) - (char)(imm), \
- ((char)(imm)&0xF0) ? 62 : ((char)(imm)>0xE ? 78 : 126) - (char)(imm), \
- ((char)(imm)&0xF0) ? 63 : ((char)(imm)>0xF ? 79 : 127) - (char)(imm)); })
+ (__v32hi)_mm512_setzero_si512());
+}
+
+#define _mm512_bslli_epi128(a, imm) \
+ (__m512i)__builtin_ia32_pslldqi512_byteshift((__v8di)(__m512i)(a), (int)(imm))
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_srlv_epi16(__m512i __A, __m512i __B)
@@ -1621,7 +1408,7 @@ _mm512_maskz_srlv_epi16(__mmask32 __U, __m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
(__v32hi)_mm512_srlv_epi16(__A, __B),
- (__v32hi)_mm512_setzero_hi());
+ (__v32hi)_mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
@@ -1643,7 +1430,7 @@ _mm512_maskz_srav_epi16(__mmask32 __U, __m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
(__v32hi)_mm512_srav_epi16(__A, __B),
- (__v32hi)_mm512_setzero_hi());
+ (__v32hi)_mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
@@ -1665,7 +1452,7 @@ _mm512_maskz_sra_epi16(__mmask32 __U, __m512i __A, __m128i __B)
{
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
(__v32hi)_mm512_sra_epi16(__A, __B),
- (__v32hi)_mm512_setzero_hi());
+ (__v32hi)_mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
@@ -1687,7 +1474,7 @@ _mm512_maskz_srai_epi16(__mmask32 __U, __m512i __A, int __B)
{
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
(__v32hi)_mm512_srai_epi16(__A, __B),
- (__v32hi)_mm512_setzero_hi());
+ (__v32hi)_mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
@@ -1709,7 +1496,7 @@ _mm512_maskz_srl_epi16(__mmask32 __U, __m512i __A, __m128i __B)
{
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
(__v32hi)_mm512_srl_epi16(__A, __B),
- (__v32hi)_mm512_setzero_hi());
+ (__v32hi)_mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
@@ -1731,77 +1518,11 @@ _mm512_maskz_srli_epi16(__mmask32 __U, __m512i __A, int __B)
{
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
(__v32hi)_mm512_srli_epi16(__A, __B),
- (__v32hi)_mm512_setzero_hi());
-}
-
-#define _mm512_bsrli_epi128(a, imm) __extension__ ({ \
- (__m512i)__builtin_shufflevector( \
- (__v64qi)(__m512i)(a), \
- (__v64qi)_mm512_setzero_si512(), \
- ((char)(imm)&0xF0) ? 64 : (char)(imm) + ((char)(imm)>0xF ? 48 : 0), \
- ((char)(imm)&0xF0) ? 65 : (char)(imm) + ((char)(imm)>0xE ? 49 : 1), \
- ((char)(imm)&0xF0) ? 66 : (char)(imm) + ((char)(imm)>0xD ? 50 : 2), \
- ((char)(imm)&0xF0) ? 67 : (char)(imm) + ((char)(imm)>0xC ? 51 : 3), \
- ((char)(imm)&0xF0) ? 68 : (char)(imm) + ((char)(imm)>0xB ? 52 : 4), \
- ((char)(imm)&0xF0) ? 69 : (char)(imm) + ((char)(imm)>0xA ? 53 : 5), \
- ((char)(imm)&0xF0) ? 70 : (char)(imm) + ((char)(imm)>0x9 ? 54 : 6), \
- ((char)(imm)&0xF0) ? 71 : (char)(imm) + ((char)(imm)>0x8 ? 55 : 7), \
- ((char)(imm)&0xF0) ? 72 : (char)(imm) + ((char)(imm)>0x7 ? 56 : 8), \
- ((char)(imm)&0xF0) ? 73 : (char)(imm) + ((char)(imm)>0x6 ? 57 : 9), \
- ((char)(imm)&0xF0) ? 74 : (char)(imm) + ((char)(imm)>0x5 ? 58 : 10), \
- ((char)(imm)&0xF0) ? 75 : (char)(imm) + ((char)(imm)>0x4 ? 59 : 11), \
- ((char)(imm)&0xF0) ? 76 : (char)(imm) + ((char)(imm)>0x3 ? 60 : 12), \
- ((char)(imm)&0xF0) ? 77 : (char)(imm) + ((char)(imm)>0x2 ? 61 : 13), \
- ((char)(imm)&0xF0) ? 78 : (char)(imm) + ((char)(imm)>0x1 ? 62 : 14), \
- ((char)(imm)&0xF0) ? 79 : (char)(imm) + ((char)(imm)>0x0 ? 63 : 15), \
- ((char)(imm)&0xF0) ? 80 : (char)(imm) + ((char)(imm)>0xF ? 64 : 16), \
- ((char)(imm)&0xF0) ? 81 : (char)(imm) + ((char)(imm)>0xE ? 65 : 17), \
- ((char)(imm)&0xF0) ? 82 : (char)(imm) + ((char)(imm)>0xD ? 66 : 18), \
- ((char)(imm)&0xF0) ? 83 : (char)(imm) + ((char)(imm)>0xC ? 67 : 19), \
- ((char)(imm)&0xF0) ? 84 : (char)(imm) + ((char)(imm)>0xB ? 68 : 20), \
- ((char)(imm)&0xF0) ? 85 : (char)(imm) + ((char)(imm)>0xA ? 69 : 21), \
- ((char)(imm)&0xF0) ? 86 : (char)(imm) + ((char)(imm)>0x9 ? 70 : 22), \
- ((char)(imm)&0xF0) ? 87 : (char)(imm) + ((char)(imm)>0x8 ? 71 : 23), \
- ((char)(imm)&0xF0) ? 88 : (char)(imm) + ((char)(imm)>0x7 ? 72 : 24), \
- ((char)(imm)&0xF0) ? 89 : (char)(imm) + ((char)(imm)>0x6 ? 73 : 25), \
- ((char)(imm)&0xF0) ? 90 : (char)(imm) + ((char)(imm)>0x5 ? 74 : 26), \
- ((char)(imm)&0xF0) ? 91 : (char)(imm) + ((char)(imm)>0x4 ? 75 : 27), \
- ((char)(imm)&0xF0) ? 92 : (char)(imm) + ((char)(imm)>0x3 ? 76 : 28), \
- ((char)(imm)&0xF0) ? 93 : (char)(imm) + ((char)(imm)>0x2 ? 77 : 29), \
- ((char)(imm)&0xF0) ? 94 : (char)(imm) + ((char)(imm)>0x1 ? 78 : 30), \
- ((char)(imm)&0xF0) ? 95 : (char)(imm) + ((char)(imm)>0x0 ? 79 : 31), \
- ((char)(imm)&0xF0) ? 96 : (char)(imm) + ((char)(imm)>0xF ? 80 : 32), \
- ((char)(imm)&0xF0) ? 97 : (char)(imm) + ((char)(imm)>0xE ? 81 : 33), \
- ((char)(imm)&0xF0) ? 98 : (char)(imm) + ((char)(imm)>0xD ? 82 : 34), \
- ((char)(imm)&0xF0) ? 99 : (char)(imm) + ((char)(imm)>0xC ? 83 : 35), \
- ((char)(imm)&0xF0) ? 100 : (char)(imm) + ((char)(imm)>0xB ? 84 : 36), \
- ((char)(imm)&0xF0) ? 101 : (char)(imm) + ((char)(imm)>0xA ? 85 : 37), \
- ((char)(imm)&0xF0) ? 102 : (char)(imm) + ((char)(imm)>0x9 ? 86 : 38), \
- ((char)(imm)&0xF0) ? 103 : (char)(imm) + ((char)(imm)>0x8 ? 87 : 39), \
- ((char)(imm)&0xF0) ? 104 : (char)(imm) + ((char)(imm)>0x7 ? 88 : 40), \
- ((char)(imm)&0xF0) ? 105 : (char)(imm) + ((char)(imm)>0x6 ? 89 : 41), \
- ((char)(imm)&0xF0) ? 106 : (char)(imm) + ((char)(imm)>0x5 ? 90 : 42), \
- ((char)(imm)&0xF0) ? 107 : (char)(imm) + ((char)(imm)>0x4 ? 91 : 43), \
- ((char)(imm)&0xF0) ? 108 : (char)(imm) + ((char)(imm)>0x3 ? 92 : 44), \
- ((char)(imm)&0xF0) ? 109 : (char)(imm) + ((char)(imm)>0x2 ? 93 : 45), \
- ((char)(imm)&0xF0) ? 110 : (char)(imm) + ((char)(imm)>0x1 ? 94 : 46), \
- ((char)(imm)&0xF0) ? 111 : (char)(imm) + ((char)(imm)>0x0 ? 95 : 47), \
- ((char)(imm)&0xF0) ? 112 : (char)(imm) + ((char)(imm)>0xF ? 96 : 48), \
- ((char)(imm)&0xF0) ? 113 : (char)(imm) + ((char)(imm)>0xE ? 97 : 49), \
- ((char)(imm)&0xF0) ? 114 : (char)(imm) + ((char)(imm)>0xD ? 98 : 50), \
- ((char)(imm)&0xF0) ? 115 : (char)(imm) + ((char)(imm)>0xC ? 99 : 51), \
- ((char)(imm)&0xF0) ? 116 : (char)(imm) + ((char)(imm)>0xB ? 100 : 52), \
- ((char)(imm)&0xF0) ? 117 : (char)(imm) + ((char)(imm)>0xA ? 101 : 53), \
- ((char)(imm)&0xF0) ? 118 : (char)(imm) + ((char)(imm)>0x9 ? 102 : 54), \
- ((char)(imm)&0xF0) ? 119 : (char)(imm) + ((char)(imm)>0x8 ? 103 : 55), \
- ((char)(imm)&0xF0) ? 120 : (char)(imm) + ((char)(imm)>0x7 ? 104 : 56), \
- ((char)(imm)&0xF0) ? 121 : (char)(imm) + ((char)(imm)>0x6 ? 105 : 57), \
- ((char)(imm)&0xF0) ? 122 : (char)(imm) + ((char)(imm)>0x5 ? 106 : 58), \
- ((char)(imm)&0xF0) ? 123 : (char)(imm) + ((char)(imm)>0x4 ? 107 : 59), \
- ((char)(imm)&0xF0) ? 124 : (char)(imm) + ((char)(imm)>0x3 ? 108 : 60), \
- ((char)(imm)&0xF0) ? 125 : (char)(imm) + ((char)(imm)>0x2 ? 109 : 61), \
- ((char)(imm)&0xF0) ? 126 : (char)(imm) + ((char)(imm)>0x1 ? 110 : 62), \
- ((char)(imm)&0xF0) ? 127 : (char)(imm) + ((char)(imm)>0x0 ? 111 : 63)); })
+ (__v32hi)_mm512_setzero_si512());
+}
+
+#define _mm512_bsrli_epi128(a, imm) \
+ (__m512i)__builtin_ia32_psrldqi512_byteshift((__v8di)(__m512i)(a), (int)(imm))
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_mask_mov_epi16 (__m512i __W, __mmask32 __U, __m512i __A)
@@ -1816,7 +1537,7 @@ _mm512_maskz_mov_epi16 (__mmask32 __U, __m512i __A)
{
return (__m512i) __builtin_ia32_selectw_512 ((__mmask32) __U,
(__v32hi) __A,
- (__v32hi) _mm512_setzero_hi ());
+ (__v32hi) _mm512_setzero_si512 ());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
@@ -1832,7 +1553,7 @@ _mm512_maskz_mov_epi8 (__mmask64 __U, __m512i __A)
{
return (__m512i) __builtin_ia32_selectb_512 ((__mmask64) __U,
(__v64qi) __A,
- (__v64qi) _mm512_setzero_hi ());
+ (__v64qi) _mm512_setzero_si512 ());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
@@ -1854,13 +1575,15 @@ _mm512_maskz_set1_epi8 (__mmask64 __M, char __A)
static __inline__ __mmask64 __DEFAULT_FN_ATTRS
_mm512_kunpackd (__mmask64 __A, __mmask64 __B)
{
- return (__mmask64) (( __A & 0xFFFFFFFF) | ( __B << 32));
+ return (__mmask64) __builtin_ia32_kunpckdi ((__mmask64) __A,
+ (__mmask64) __B);
}
static __inline__ __mmask32 __DEFAULT_FN_ATTRS
_mm512_kunpackw (__mmask32 __A, __mmask32 __B)
{
-return (__mmask32) (( __A & 0xFFFF) | ( __B << 16));
+ return (__mmask32) __builtin_ia32_kunpcksi ((__mmask32) __A,
+ (__mmask32) __B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
@@ -1876,7 +1599,7 @@ _mm512_maskz_loadu_epi16 (__mmask32 __U, void const *__P)
{
return (__m512i) __builtin_ia32_loaddquhi512_mask ((__v32hi *) __P,
(__v32hi)
- _mm512_setzero_hi (),
+ _mm512_setzero_si512 (),
(__mmask32) __U);
}
@@ -1893,7 +1616,7 @@ _mm512_maskz_loadu_epi8 (__mmask64 __U, void const *__P)
{
return (__m512i) __builtin_ia32_loaddquqi512_mask ((__v64qi *) __P,
(__v64qi)
- _mm512_setzero_hi (),
+ _mm512_setzero_si512 (),
(__mmask64) __U);
}
static __inline__ void __DEFAULT_FN_ATTRS
@@ -1916,55 +1639,55 @@ static __inline__ __mmask64 __DEFAULT_FN_ATTRS
_mm512_test_epi8_mask (__m512i __A, __m512i __B)
{
return _mm512_cmpneq_epi8_mask (_mm512_and_epi32 (__A, __B),
- _mm512_setzero_qi());
+ _mm512_setzero_si512());
}
static __inline__ __mmask64 __DEFAULT_FN_ATTRS
_mm512_mask_test_epi8_mask (__mmask64 __U, __m512i __A, __m512i __B)
{
return _mm512_mask_cmpneq_epi8_mask (__U, _mm512_and_epi32 (__A, __B),
- _mm512_setzero_qi());
+ _mm512_setzero_si512());
}
static __inline__ __mmask32 __DEFAULT_FN_ATTRS
_mm512_test_epi16_mask (__m512i __A, __m512i __B)
{
return _mm512_cmpneq_epi16_mask (_mm512_and_epi32 (__A, __B),
- _mm512_setzero_qi());
+ _mm512_setzero_si512());
}
static __inline__ __mmask32 __DEFAULT_FN_ATTRS
_mm512_mask_test_epi16_mask (__mmask32 __U, __m512i __A, __m512i __B)
{
return _mm512_mask_cmpneq_epi16_mask (__U, _mm512_and_epi32 (__A, __B),
- _mm512_setzero_qi());
+ _mm512_setzero_si512());
}
static __inline__ __mmask64 __DEFAULT_FN_ATTRS
_mm512_testn_epi8_mask (__m512i __A, __m512i __B)
{
- return _mm512_cmpeq_epi8_mask (_mm512_and_epi32 (__A, __B), _mm512_setzero_qi());
+ return _mm512_cmpeq_epi8_mask (_mm512_and_epi32 (__A, __B), _mm512_setzero_si512());
}
static __inline__ __mmask64 __DEFAULT_FN_ATTRS
_mm512_mask_testn_epi8_mask (__mmask64 __U, __m512i __A, __m512i __B)
{
return _mm512_mask_cmpeq_epi8_mask (__U, _mm512_and_epi32 (__A, __B),
- _mm512_setzero_qi());
+ _mm512_setzero_si512());
}
static __inline__ __mmask32 __DEFAULT_FN_ATTRS
_mm512_testn_epi16_mask (__m512i __A, __m512i __B)
{
return _mm512_cmpeq_epi16_mask (_mm512_and_epi32 (__A, __B),
- _mm512_setzero_qi());
+ _mm512_setzero_si512());
}
static __inline__ __mmask32 __DEFAULT_FN_ATTRS
_mm512_mask_testn_epi16_mask (__mmask32 __U, __m512i __A, __m512i __B)
{
return _mm512_mask_cmpeq_epi16_mask (__U, _mm512_and_epi32 (__A, __B),
- _mm512_setzero_qi());
+ _mm512_setzero_si512());
}
static __inline__ __mmask64 __DEFAULT_FN_ATTRS
@@ -1994,8 +1717,7 @@ _mm512_movm_epi16 (__mmask32 __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_broadcastb_epi8 (__m128i __A)
{
- return (__m512i)__builtin_shufflevector((__v16qi) __A,
- (__v16qi)_mm_undefined_si128(),
+ return (__m512i)__builtin_shufflevector((__v16qi) __A, (__v16qi) __A,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -2037,8 +1759,7 @@ _mm512_maskz_set1_epi16 (__mmask32 __M, short __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_broadcastw_epi16 (__m128i __A)
{
- return (__m512i)__builtin_shufflevector((__v8hi) __A,
- (__v8hi)_mm_undefined_si128(),
+ return (__m512i)__builtin_shufflevector((__v8hi) __A, (__v8hi) __A,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
}
@@ -2062,67 +1783,54 @@ _mm512_maskz_broadcastw_epi16 (__mmask32 __M, __m128i __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_permutexvar_epi16 (__m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_permvarhi512_mask ((__v32hi) __B,
- (__v32hi) __A,
- (__v32hi) _mm512_undefined_epi32 (),
- (__mmask32) -1);
+ return (__m512i)__builtin_ia32_permvarhi512((__v32hi)__B, (__v32hi)__A);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_maskz_permutexvar_epi16 (__mmask32 __M, __m512i __A,
__m512i __B)
{
- return (__m512i) __builtin_ia32_permvarhi512_mask ((__v32hi) __B,
- (__v32hi) __A,
- (__v32hi) _mm512_setzero_hi(),
- (__mmask32) __M);
+ return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M,
+ (__v32hi)_mm512_permutexvar_epi16(__A, __B),
+ (__v32hi)_mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_mask_permutexvar_epi16 (__m512i __W, __mmask32 __M, __m512i __A,
__m512i __B)
{
- return (__m512i) __builtin_ia32_permvarhi512_mask ((__v32hi) __B,
- (__v32hi) __A,
- (__v32hi) __W,
- (__mmask32) __M);
-}
-
-#define _mm512_alignr_epi8(A, B, N) __extension__ ({\
- (__m512i)__builtin_ia32_palignr512_mask((__v64qi)(__m512i)(A), \
- (__v64qi)(__m512i)(B), (int)(N), \
- (__v64qi)_mm512_undefined_pd(), \
- (__mmask64)-1); })
-
-#define _mm512_mask_alignr_epi8(W, U, A, B, N) __extension__({\
- (__m512i)__builtin_ia32_palignr512_mask((__v64qi)(__m512i)(A), \
- (__v64qi)(__m512i)(B), (int)(N), \
- (__v64qi)(__m512i)(W), \
- (__mmask64)(U)); })
-
-#define _mm512_maskz_alignr_epi8(U, A, B, N) __extension__({\
- (__m512i)__builtin_ia32_palignr512_mask((__v64qi)(__m512i)(A), \
- (__v64qi)(__m512i)(B), (int)(N), \
- (__v64qi)_mm512_setzero_si512(), \
- (__mmask64)(U)); })
-
-#define _mm512_dbsad_epu8(A, B, imm) __extension__ ({\
- (__m512i)__builtin_ia32_dbpsadbw512_mask((__v64qi)(__m512i)(A), \
- (__v64qi)(__m512i)(B), (int)(imm), \
- (__v32hi)_mm512_undefined_epi32(), \
- (__mmask32)-1); })
-
-#define _mm512_mask_dbsad_epu8(W, U, A, B, imm) ({\
- (__m512i)__builtin_ia32_dbpsadbw512_mask((__v64qi)(__m512i)(A), \
- (__v64qi)(__m512i)(B), (int)(imm), \
- (__v32hi)(__m512i)(W), \
- (__mmask32)(U)); })
-
-#define _mm512_maskz_dbsad_epu8(U, A, B, imm) ({\
- (__m512i)__builtin_ia32_dbpsadbw512_mask((__v64qi)(__m512i)(A), \
- (__v64qi)(__m512i)(B), (int)(imm), \
- (__v32hi)_mm512_setzero_hi(), \
- (__mmask32)(U)); })
+ return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M,
+ (__v32hi)_mm512_permutexvar_epi16(__A, __B),
+ (__v32hi)__W);
+}
+
+#define _mm512_alignr_epi8(A, B, N) \
+ (__m512i)__builtin_ia32_palignr512((__v64qi)(__m512i)(A), \
+ (__v64qi)(__m512i)(B), (int)(N))
+
+#define _mm512_mask_alignr_epi8(W, U, A, B, N) \
+ (__m512i)__builtin_ia32_selectb_512((__mmask64)(U), \
+ (__v64qi)_mm512_alignr_epi8((A), (B), (int)(N)), \
+ (__v64qi)(__m512i)(W))
+
+#define _mm512_maskz_alignr_epi8(U, A, B, N) \
+ (__m512i)__builtin_ia32_selectb_512((__mmask64)(U), \
+ (__v64qi)_mm512_alignr_epi8((A), (B), (int)(N)), \
+ (__v64qi)(__m512i)_mm512_setzero_si512())
+
+#define _mm512_dbsad_epu8(A, B, imm) \
+ (__m512i)__builtin_ia32_dbpsadbw512((__v64qi)(__m512i)(A), \
+ (__v64qi)(__m512i)(B), (int)(imm))
+
+#define _mm512_mask_dbsad_epu8(W, U, A, B, imm) \
+ (__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
+ (__v32hi)_mm512_dbsad_epu8((A), (B), (imm)), \
+ (__v32hi)(__m512i)(W))
+
+#define _mm512_maskz_dbsad_epu8(U, A, B, imm) \
+ (__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
+ (__v32hi)_mm512_dbsad_epu8((A), (B), (imm)), \
+ (__v32hi)_mm512_setzero_si512())
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_sad_epu8 (__m512i __A, __m512i __B)
diff --git a/lib/Headers/avx512cdintrin.h b/lib/Headers/avx512cdintrin.h
index ec7e0cd443b4..e63902743c06 100644
--- a/lib/Headers/avx512cdintrin.h
+++ b/lib/Headers/avx512cdintrin.h
@@ -29,7 +29,7 @@
#define __AVX512CDINTRIN_H
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512cd")))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512cd"), __min_vector_width__(512)))
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_conflict_epi64 (__m512i __A)
@@ -82,49 +82,45 @@ _mm512_maskz_conflict_epi32 (__mmask16 __U, __m512i __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_lzcnt_epi32 (__m512i __A)
{
- return (__m512i) __builtin_ia32_vplzcntd_512_mask ((__v16si) __A,
- (__v16si) _mm512_setzero_si512 (),
- (__mmask16) -1);
+ return (__m512i) __builtin_ia32_vplzcntd_512 ((__v16si) __A);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_mask_lzcnt_epi32 (__m512i __W, __mmask16 __U, __m512i __A)
{
- return (__m512i) __builtin_ia32_vplzcntd_512_mask ((__v16si) __A,
- (__v16si) __W,
- (__mmask16) __U);
+ return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
+ (__v16si)_mm512_lzcnt_epi32(__A),
+ (__v16si)__W);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_maskz_lzcnt_epi32 (__mmask16 __U, __m512i __A)
{
- return (__m512i) __builtin_ia32_vplzcntd_512_mask ((__v16si) __A,
- (__v16si) _mm512_setzero_si512 (),
- (__mmask16) __U);
+ return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
+ (__v16si)_mm512_lzcnt_epi32(__A),
+ (__v16si)_mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_lzcnt_epi64 (__m512i __A)
{
- return (__m512i) __builtin_ia32_vplzcntq_512_mask ((__v8di) __A,
- (__v8di) _mm512_setzero_si512 (),
- (__mmask8) -1);
+ return (__m512i) __builtin_ia32_vplzcntq_512 ((__v8di) __A);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_mask_lzcnt_epi64 (__m512i __W, __mmask8 __U, __m512i __A)
{
- return (__m512i) __builtin_ia32_vplzcntq_512_mask ((__v8di) __A,
- (__v8di) __W,
- (__mmask8) __U);
+ return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
+ (__v8di)_mm512_lzcnt_epi64(__A),
+ (__v8di)__W);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_maskz_lzcnt_epi64 (__mmask8 __U, __m512i __A)
{
- return (__m512i) __builtin_ia32_vplzcntq_512_mask ((__v8di) __A,
- (__v8di) _mm512_setzero_si512 (),
- (__mmask8) __U);
+ return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
+ (__v8di)_mm512_lzcnt_epi64(__A),
+ (__v8di)_mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
diff --git a/lib/Headers/avx512dqintrin.h b/lib/Headers/avx512dqintrin.h
index 2c431d9740cd..8a00b3afa9d5 100644
--- a/lib/Headers/avx512dqintrin.h
+++ b/lib/Headers/avx512dqintrin.h
@@ -29,7 +29,7 @@
#define __AVX512DQINTRIN_H
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512dq")))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512dq"), __min_vector_width__(512)))
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_mullo_epi64 (__m512i __A, __m512i __B) {
@@ -226,20 +226,20 @@ _mm512_maskz_cvtpd_epi64 (__mmask8 __U, __m512d __A) {
_MM_FROUND_CUR_DIRECTION);
}
-#define _mm512_cvt_roundpd_epi64(A, R) __extension__ ({ \
+#define _mm512_cvt_roundpd_epi64(A, R) \
(__m512i)__builtin_ia32_cvtpd2qq512_mask((__v8df)(__m512d)(A), \
(__v8di)_mm512_setzero_si512(), \
- (__mmask8)-1, (int)(R)); })
+ (__mmask8)-1, (int)(R))
-#define _mm512_mask_cvt_roundpd_epi64(W, U, A, R) __extension__ ({ \
+#define _mm512_mask_cvt_roundpd_epi64(W, U, A, R) \
(__m512i)__builtin_ia32_cvtpd2qq512_mask((__v8df)(__m512d)(A), \
(__v8di)(__m512i)(W), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-#define _mm512_maskz_cvt_roundpd_epi64(U, A, R) __extension__ ({ \
+#define _mm512_maskz_cvt_roundpd_epi64(U, A, R) \
(__m512i)__builtin_ia32_cvtpd2qq512_mask((__v8df)(__m512d)(A), \
(__v8di)_mm512_setzero_si512(), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_cvtpd_epu64 (__m512d __A) {
@@ -265,20 +265,20 @@ _mm512_maskz_cvtpd_epu64 (__mmask8 __U, __m512d __A) {
_MM_FROUND_CUR_DIRECTION);
}
-#define _mm512_cvt_roundpd_epu64(A, R) __extension__ ({ \
+#define _mm512_cvt_roundpd_epu64(A, R) \
(__m512i)__builtin_ia32_cvtpd2uqq512_mask((__v8df)(__m512d)(A), \
(__v8di)_mm512_setzero_si512(), \
- (__mmask8)-1, (int)(R)); })
+ (__mmask8)-1, (int)(R))
-#define _mm512_mask_cvt_roundpd_epu64(W, U, A, R) __extension__ ({ \
+#define _mm512_mask_cvt_roundpd_epu64(W, U, A, R) \
(__m512i)__builtin_ia32_cvtpd2uqq512_mask((__v8df)(__m512d)(A), \
(__v8di)(__m512i)(W), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-#define _mm512_maskz_cvt_roundpd_epu64(U, A, R) __extension__ ({ \
+#define _mm512_maskz_cvt_roundpd_epu64(U, A, R) \
(__m512i)__builtin_ia32_cvtpd2uqq512_mask((__v8df)(__m512d)(A), \
(__v8di)_mm512_setzero_si512(), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_cvtps_epi64 (__m256 __A) {
@@ -304,20 +304,20 @@ _mm512_maskz_cvtps_epi64 (__mmask8 __U, __m256 __A) {
_MM_FROUND_CUR_DIRECTION);
}
-#define _mm512_cvt_roundps_epi64(A, R) __extension__ ({ \
+#define _mm512_cvt_roundps_epi64(A, R) \
(__m512i)__builtin_ia32_cvtps2qq512_mask((__v8sf)(__m256)(A), \
(__v8di)_mm512_setzero_si512(), \
- (__mmask8)-1, (int)(R)); })
+ (__mmask8)-1, (int)(R))
-#define _mm512_mask_cvt_roundps_epi64(W, U, A, R) __extension__ ({ \
+#define _mm512_mask_cvt_roundps_epi64(W, U, A, R) \
(__m512i)__builtin_ia32_cvtps2qq512_mask((__v8sf)(__m256)(A), \
(__v8di)(__m512i)(W), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-#define _mm512_maskz_cvt_roundps_epi64(U, A, R) __extension__ ({ \
+#define _mm512_maskz_cvt_roundps_epi64(U, A, R) \
(__m512i)__builtin_ia32_cvtps2qq512_mask((__v8sf)(__m256)(A), \
(__v8di)_mm512_setzero_si512(), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_cvtps_epu64 (__m256 __A) {
@@ -343,60 +343,55 @@ _mm512_maskz_cvtps_epu64 (__mmask8 __U, __m256 __A) {
_MM_FROUND_CUR_DIRECTION);
}
-#define _mm512_cvt_roundps_epu64(A, R) __extension__ ({ \
+#define _mm512_cvt_roundps_epu64(A, R) \
(__m512i)__builtin_ia32_cvtps2uqq512_mask((__v8sf)(__m256)(A), \
(__v8di)_mm512_setzero_si512(), \
- (__mmask8)-1, (int)(R)); })
+ (__mmask8)-1, (int)(R))
-#define _mm512_mask_cvt_roundps_epu64(W, U, A, R) __extension__ ({ \
+#define _mm512_mask_cvt_roundps_epu64(W, U, A, R) \
(__m512i)__builtin_ia32_cvtps2uqq512_mask((__v8sf)(__m256)(A), \
(__v8di)(__m512i)(W), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-#define _mm512_maskz_cvt_roundps_epu64(U, A, R) __extension__ ({ \
+#define _mm512_maskz_cvt_roundps_epu64(U, A, R) \
(__m512i)__builtin_ia32_cvtps2uqq512_mask((__v8sf)(__m256)(A), \
(__v8di)_mm512_setzero_si512(), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
static __inline__ __m512d __DEFAULT_FN_ATTRS
_mm512_cvtepi64_pd (__m512i __A) {
- return (__m512d) __builtin_ia32_cvtqq2pd512_mask ((__v8di) __A,
- (__v8df) _mm512_setzero_pd(),
- (__mmask8) -1,
- _MM_FROUND_CUR_DIRECTION);
+ return (__m512d)__builtin_convertvector((__v8di)__A, __v8df);
}
static __inline__ __m512d __DEFAULT_FN_ATTRS
_mm512_mask_cvtepi64_pd (__m512d __W, __mmask8 __U, __m512i __A) {
- return (__m512d) __builtin_ia32_cvtqq2pd512_mask ((__v8di) __A,
- (__v8df) __W,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
+ return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
+ (__v8df)_mm512_cvtepi64_pd(__A),
+ (__v8df)__W);
}
static __inline__ __m512d __DEFAULT_FN_ATTRS
_mm512_maskz_cvtepi64_pd (__mmask8 __U, __m512i __A) {
- return (__m512d) __builtin_ia32_cvtqq2pd512_mask ((__v8di) __A,
- (__v8df) _mm512_setzero_pd(),
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
+ return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
+ (__v8df)_mm512_cvtepi64_pd(__A),
+ (__v8df)_mm512_setzero_pd());
}
-#define _mm512_cvt_roundepi64_pd(A, R) __extension__ ({ \
+#define _mm512_cvt_roundepi64_pd(A, R) \
(__m512d)__builtin_ia32_cvtqq2pd512_mask((__v8di)(__m512i)(A), \
(__v8df)_mm512_setzero_pd(), \
- (__mmask8)-1, (int)(R)); })
+ (__mmask8)-1, (int)(R))
-#define _mm512_mask_cvt_roundepi64_pd(W, U, A, R) __extension__ ({ \
+#define _mm512_mask_cvt_roundepi64_pd(W, U, A, R) \
(__m512d)__builtin_ia32_cvtqq2pd512_mask((__v8di)(__m512i)(A), \
(__v8df)(__m512d)(W), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-#define _mm512_maskz_cvt_roundepi64_pd(U, A, R) __extension__ ({ \
+#define _mm512_maskz_cvt_roundepi64_pd(U, A, R) \
(__m512d)__builtin_ia32_cvtqq2pd512_mask((__v8di)(__m512i)(A), \
(__v8df)_mm512_setzero_pd(), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
static __inline__ __m256 __DEFAULT_FN_ATTRS
_mm512_cvtepi64_ps (__m512i __A) {
@@ -422,20 +417,20 @@ _mm512_maskz_cvtepi64_ps (__mmask8 __U, __m512i __A) {
_MM_FROUND_CUR_DIRECTION);
}
-#define _mm512_cvt_roundepi64_ps(A, R) __extension__ ({ \
+#define _mm512_cvt_roundepi64_ps(A, R) \
(__m256)__builtin_ia32_cvtqq2ps512_mask((__v8di)(__m512i)(A), \
(__v8sf)_mm256_setzero_ps(), \
- (__mmask8)-1, (int)(R)); })
+ (__mmask8)-1, (int)(R))
-#define _mm512_mask_cvt_roundepi64_ps(W, U, A, R) __extension__ ({ \
+#define _mm512_mask_cvt_roundepi64_ps(W, U, A, R) \
(__m256)__builtin_ia32_cvtqq2ps512_mask((__v8di)(__m512i)(A), \
(__v8sf)(__m256)(W), (__mmask8)(U), \
- (int)(R)); })
+ (int)(R))
-#define _mm512_maskz_cvt_roundepi64_ps(U, A, R) __extension__ ({ \
+#define _mm512_maskz_cvt_roundepi64_ps(U, A, R) \
(__m256)__builtin_ia32_cvtqq2ps512_mask((__v8di)(__m512i)(A), \
(__v8sf)_mm256_setzero_ps(), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
static __inline__ __m512i __DEFAULT_FN_ATTRS
@@ -462,20 +457,20 @@ _mm512_maskz_cvttpd_epi64 (__mmask8 __U, __m512d __A) {
_MM_FROUND_CUR_DIRECTION);
}
-#define _mm512_cvtt_roundpd_epi64(A, R) __extension__ ({ \
+#define _mm512_cvtt_roundpd_epi64(A, R) \
(__m512i)__builtin_ia32_cvttpd2qq512_mask((__v8df)(__m512d)(A), \
(__v8di)_mm512_setzero_si512(), \
- (__mmask8)-1, (int)(R)); })
+ (__mmask8)-1, (int)(R))
-#define _mm512_mask_cvtt_roundpd_epi64(W, U, A, R) __extension__ ({ \
+#define _mm512_mask_cvtt_roundpd_epi64(W, U, A, R) \
(__m512i)__builtin_ia32_cvttpd2qq512_mask((__v8df)(__m512d)(A), \
(__v8di)(__m512i)(W), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-#define _mm512_maskz_cvtt_roundpd_epi64(U, A, R) __extension__ ({ \
+#define _mm512_maskz_cvtt_roundpd_epi64(U, A, R) \
(__m512i)__builtin_ia32_cvttpd2qq512_mask((__v8df)(__m512d)(A), \
(__v8di)_mm512_setzero_si512(), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_cvttpd_epu64 (__m512d __A) {
@@ -501,20 +496,20 @@ _mm512_maskz_cvttpd_epu64 (__mmask8 __U, __m512d __A) {
_MM_FROUND_CUR_DIRECTION);
}
-#define _mm512_cvtt_roundpd_epu64(A, R) __extension__ ({ \
+#define _mm512_cvtt_roundpd_epu64(A, R) \
(__m512i)__builtin_ia32_cvttpd2uqq512_mask((__v8df)(__m512d)(A), \
(__v8di)_mm512_setzero_si512(), \
- (__mmask8)-1, (int)(R)); })
+ (__mmask8)-1, (int)(R))
-#define _mm512_mask_cvtt_roundpd_epu64(W, U, A, R) __extension__ ({ \
+#define _mm512_mask_cvtt_roundpd_epu64(W, U, A, R) \
(__m512i)__builtin_ia32_cvttpd2uqq512_mask((__v8df)(__m512d)(A), \
(__v8di)(__m512i)(W), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-#define _mm512_maskz_cvtt_roundpd_epu64(U, A, R) __extension__ ({ \
+#define _mm512_maskz_cvtt_roundpd_epu64(U, A, R) \
(__m512i)__builtin_ia32_cvttpd2uqq512_mask((__v8df)(__m512d)(A), \
(__v8di)_mm512_setzero_si512(), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_cvttps_epi64 (__m256 __A) {
@@ -540,20 +535,20 @@ _mm512_maskz_cvttps_epi64 (__mmask8 __U, __m256 __A) {
_MM_FROUND_CUR_DIRECTION);
}
-#define _mm512_cvtt_roundps_epi64(A, R) __extension__ ({ \
+#define _mm512_cvtt_roundps_epi64(A, R) \
(__m512i)__builtin_ia32_cvttps2qq512_mask((__v8sf)(__m256)(A), \
(__v8di)_mm512_setzero_si512(), \
- (__mmask8)-1, (int)(R)); })
+ (__mmask8)-1, (int)(R))
-#define _mm512_mask_cvtt_roundps_epi64(W, U, A, R) __extension__ ({ \
+#define _mm512_mask_cvtt_roundps_epi64(W, U, A, R) \
(__m512i)__builtin_ia32_cvttps2qq512_mask((__v8sf)(__m256)(A), \
(__v8di)(__m512i)(W), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-#define _mm512_maskz_cvtt_roundps_epi64(U, A, R) __extension__ ({ \
+#define _mm512_maskz_cvtt_roundps_epi64(U, A, R) \
(__m512i)__builtin_ia32_cvttps2qq512_mask((__v8sf)(__m256)(A), \
(__v8di)_mm512_setzero_si512(), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_cvttps_epu64 (__m256 __A) {
@@ -579,60 +574,55 @@ _mm512_maskz_cvttps_epu64 (__mmask8 __U, __m256 __A) {
_MM_FROUND_CUR_DIRECTION);
}
-#define _mm512_cvtt_roundps_epu64(A, R) __extension__ ({ \
+#define _mm512_cvtt_roundps_epu64(A, R) \
(__m512i)__builtin_ia32_cvttps2uqq512_mask((__v8sf)(__m256)(A), \
(__v8di)_mm512_setzero_si512(), \
- (__mmask8)-1, (int)(R)); })
+ (__mmask8)-1, (int)(R))
-#define _mm512_mask_cvtt_roundps_epu64(W, U, A, R) __extension__ ({ \
+#define _mm512_mask_cvtt_roundps_epu64(W, U, A, R) \
(__m512i)__builtin_ia32_cvttps2uqq512_mask((__v8sf)(__m256)(A), \
(__v8di)(__m512i)(W), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-#define _mm512_maskz_cvtt_roundps_epu64(U, A, R) __extension__ ({ \
+#define _mm512_maskz_cvtt_roundps_epu64(U, A, R) \
(__m512i)__builtin_ia32_cvttps2uqq512_mask((__v8sf)(__m256)(A), \
(__v8di)_mm512_setzero_si512(), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
static __inline__ __m512d __DEFAULT_FN_ATTRS
_mm512_cvtepu64_pd (__m512i __A) {
- return (__m512d) __builtin_ia32_cvtuqq2pd512_mask ((__v8di) __A,
- (__v8df) _mm512_setzero_pd(),
- (__mmask8) -1,
- _MM_FROUND_CUR_DIRECTION);
+ return (__m512d)__builtin_convertvector((__v8du)__A, __v8df);
}
static __inline__ __m512d __DEFAULT_FN_ATTRS
_mm512_mask_cvtepu64_pd (__m512d __W, __mmask8 __U, __m512i __A) {
- return (__m512d) __builtin_ia32_cvtuqq2pd512_mask ((__v8di) __A,
- (__v8df) __W,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
+ return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
+ (__v8df)_mm512_cvtepu64_pd(__A),
+ (__v8df)__W);
}
static __inline__ __m512d __DEFAULT_FN_ATTRS
_mm512_maskz_cvtepu64_pd (__mmask8 __U, __m512i __A) {
- return (__m512d) __builtin_ia32_cvtuqq2pd512_mask ((__v8di) __A,
- (__v8df) _mm512_setzero_pd(),
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
+ return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
+ (__v8df)_mm512_cvtepu64_pd(__A),
+ (__v8df)_mm512_setzero_pd());
}
-#define _mm512_cvt_roundepu64_pd(A, R) __extension__ ({ \
+#define _mm512_cvt_roundepu64_pd(A, R) \
(__m512d)__builtin_ia32_cvtuqq2pd512_mask((__v8di)(__m512i)(A), \
(__v8df)_mm512_setzero_pd(), \
- (__mmask8)-1, (int)(R)); })
+ (__mmask8)-1, (int)(R))
-#define _mm512_mask_cvt_roundepu64_pd(W, U, A, R) __extension__ ({ \
+#define _mm512_mask_cvt_roundepu64_pd(W, U, A, R) \
(__m512d)__builtin_ia32_cvtuqq2pd512_mask((__v8di)(__m512i)(A), \
(__v8df)(__m512d)(W), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-#define _mm512_maskz_cvt_roundepu64_pd(U, A, R) __extension__ ({ \
+#define _mm512_maskz_cvt_roundepu64_pd(U, A, R) \
(__m512d)__builtin_ia32_cvtuqq2pd512_mask((__v8di)(__m512i)(A), \
(__v8df)_mm512_setzero_pd(), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
static __inline__ __m256 __DEFAULT_FN_ATTRS
@@ -659,292 +649,292 @@ _mm512_maskz_cvtepu64_ps (__mmask8 __U, __m512i __A) {
_MM_FROUND_CUR_DIRECTION);
}
-#define _mm512_cvt_roundepu64_ps(A, R) __extension__ ({ \
+#define _mm512_cvt_roundepu64_ps(A, R) \
(__m256)__builtin_ia32_cvtuqq2ps512_mask((__v8di)(__m512i)(A), \
(__v8sf)_mm256_setzero_ps(), \
- (__mmask8)-1, (int)(R)); })
+ (__mmask8)-1, (int)(R))
-#define _mm512_mask_cvt_roundepu64_ps(W, U, A, R) __extension__ ({ \
+#define _mm512_mask_cvt_roundepu64_ps(W, U, A, R) \
(__m256)__builtin_ia32_cvtuqq2ps512_mask((__v8di)(__m512i)(A), \
(__v8sf)(__m256)(W), (__mmask8)(U), \
- (int)(R)); })
+ (int)(R))
-#define _mm512_maskz_cvt_roundepu64_ps(U, A, R) __extension__ ({ \
+#define _mm512_maskz_cvt_roundepu64_ps(U, A, R) \
(__m256)__builtin_ia32_cvtuqq2ps512_mask((__v8di)(__m512i)(A), \
(__v8sf)_mm256_setzero_ps(), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-#define _mm512_range_pd(A, B, C) __extension__ ({ \
+#define _mm512_range_pd(A, B, C) \
(__m512d)__builtin_ia32_rangepd512_mask((__v8df)(__m512d)(A), \
(__v8df)(__m512d)(B), (int)(C), \
(__v8df)_mm512_setzero_pd(), \
(__mmask8)-1, \
- _MM_FROUND_CUR_DIRECTION); })
+ _MM_FROUND_CUR_DIRECTION)
-#define _mm512_mask_range_pd(W, U, A, B, C) __extension__ ({ \
+#define _mm512_mask_range_pd(W, U, A, B, C) \
(__m512d)__builtin_ia32_rangepd512_mask((__v8df)(__m512d)(A), \
(__v8df)(__m512d)(B), (int)(C), \
(__v8df)(__m512d)(W), (__mmask8)(U), \
- _MM_FROUND_CUR_DIRECTION); })
+ _MM_FROUND_CUR_DIRECTION)
-#define _mm512_maskz_range_pd(U, A, B, C) __extension__ ({ \
+#define _mm512_maskz_range_pd(U, A, B, C) \
(__m512d)__builtin_ia32_rangepd512_mask((__v8df)(__m512d)(A), \
(__v8df)(__m512d)(B), (int)(C), \
(__v8df)_mm512_setzero_pd(), \
(__mmask8)(U), \
- _MM_FROUND_CUR_DIRECTION); })
+ _MM_FROUND_CUR_DIRECTION)
-#define _mm512_range_round_pd(A, B, C, R) __extension__ ({ \
+#define _mm512_range_round_pd(A, B, C, R) \
(__m512d)__builtin_ia32_rangepd512_mask((__v8df)(__m512d)(A), \
(__v8df)(__m512d)(B), (int)(C), \
(__v8df)_mm512_setzero_pd(), \
- (__mmask8)-1, (int)(R)); })
+ (__mmask8)-1, (int)(R))
-#define _mm512_mask_range_round_pd(W, U, A, B, C, R) __extension__ ({ \
+#define _mm512_mask_range_round_pd(W, U, A, B, C, R) \
(__m512d)__builtin_ia32_rangepd512_mask((__v8df)(__m512d)(A), \
(__v8df)(__m512d)(B), (int)(C), \
(__v8df)(__m512d)(W), (__mmask8)(U), \
- (int)(R)); })
+ (int)(R))
-#define _mm512_maskz_range_round_pd(U, A, B, C, R) __extension__ ({ \
+#define _mm512_maskz_range_round_pd(U, A, B, C, R) \
(__m512d)__builtin_ia32_rangepd512_mask((__v8df)(__m512d)(A), \
(__v8df)(__m512d)(B), (int)(C), \
(__v8df)_mm512_setzero_pd(), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-#define _mm512_range_ps(A, B, C) __extension__ ({ \
+#define _mm512_range_ps(A, B, C) \
(__m512)__builtin_ia32_rangeps512_mask((__v16sf)(__m512)(A), \
(__v16sf)(__m512)(B), (int)(C), \
(__v16sf)_mm512_setzero_ps(), \
(__mmask16)-1, \
- _MM_FROUND_CUR_DIRECTION); })
+ _MM_FROUND_CUR_DIRECTION)
-#define _mm512_mask_range_ps(W, U, A, B, C) __extension__ ({ \
+#define _mm512_mask_range_ps(W, U, A, B, C) \
(__m512)__builtin_ia32_rangeps512_mask((__v16sf)(__m512)(A), \
(__v16sf)(__m512)(B), (int)(C), \
(__v16sf)(__m512)(W), (__mmask16)(U), \
- _MM_FROUND_CUR_DIRECTION); })
+ _MM_FROUND_CUR_DIRECTION)
-#define _mm512_maskz_range_ps(U, A, B, C) __extension__ ({ \
+#define _mm512_maskz_range_ps(U, A, B, C) \
(__m512)__builtin_ia32_rangeps512_mask((__v16sf)(__m512)(A), \
(__v16sf)(__m512)(B), (int)(C), \
(__v16sf)_mm512_setzero_ps(), \
(__mmask16)(U), \
- _MM_FROUND_CUR_DIRECTION); })
+ _MM_FROUND_CUR_DIRECTION)
-#define _mm512_range_round_ps(A, B, C, R) __extension__ ({ \
+#define _mm512_range_round_ps(A, B, C, R) \
(__m512)__builtin_ia32_rangeps512_mask((__v16sf)(__m512)(A), \
(__v16sf)(__m512)(B), (int)(C), \
(__v16sf)_mm512_setzero_ps(), \
- (__mmask16)-1, (int)(R)); })
+ (__mmask16)-1, (int)(R))
-#define _mm512_mask_range_round_ps(W, U, A, B, C, R) __extension__ ({ \
+#define _mm512_mask_range_round_ps(W, U, A, B, C, R) \
(__m512)__builtin_ia32_rangeps512_mask((__v16sf)(__m512)(A), \
(__v16sf)(__m512)(B), (int)(C), \
(__v16sf)(__m512)(W), (__mmask16)(U), \
- (int)(R)); })
+ (int)(R))
-#define _mm512_maskz_range_round_ps(U, A, B, C, R) __extension__ ({ \
+#define _mm512_maskz_range_round_ps(U, A, B, C, R) \
(__m512)__builtin_ia32_rangeps512_mask((__v16sf)(__m512)(A), \
(__v16sf)(__m512)(B), (int)(C), \
(__v16sf)_mm512_setzero_ps(), \
- (__mmask16)(U), (int)(R)); })
+ (__mmask16)(U), (int)(R))
-#define _mm_range_round_ss(A, B, C, R) __extension__ ({ \
+#define _mm_range_round_ss(A, B, C, R) \
(__m128)__builtin_ia32_rangess128_round_mask((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), \
(__v4sf)_mm_setzero_ps(), \
(__mmask8) -1, (int)(C),\
- (int)(R)); })
+ (int)(R))
#define _mm_range_ss(A ,B , C) _mm_range_round_ss(A, B, C ,_MM_FROUND_CUR_DIRECTION)
-#define _mm_mask_range_round_ss(W, U, A, B, C, R) __extension__ ({ \
+#define _mm_mask_range_round_ss(W, U, A, B, C, R) \
(__m128)__builtin_ia32_rangess128_round_mask((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), \
(__v4sf)(__m128)(W),\
(__mmask8)(U), (int)(C),\
- (int)(R)); })
+ (int)(R))
#define _mm_mask_range_ss(W , U, A, B, C) _mm_mask_range_round_ss(W, U, A, B, C , _MM_FROUND_CUR_DIRECTION)
-#define _mm_maskz_range_round_ss(U, A, B, C, R) __extension__ ({ \
+#define _mm_maskz_range_round_ss(U, A, B, C, R) \
(__m128)__builtin_ia32_rangess128_round_mask((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), \
(__v4sf)_mm_setzero_ps(), \
(__mmask8)(U), (int)(C),\
- (int)(R)); })
+ (int)(R))
#define _mm_maskz_range_ss(U, A ,B , C) _mm_maskz_range_round_ss(U, A, B, C ,_MM_FROUND_CUR_DIRECTION)
-#define _mm_range_round_sd(A, B, C, R) __extension__ ({ \
+#define _mm_range_round_sd(A, B, C, R) \
(__m128d)__builtin_ia32_rangesd128_round_mask((__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), \
(__v2df)_mm_setzero_pd(), \
(__mmask8) -1, (int)(C),\
- (int)(R)); })
+ (int)(R))
#define _mm_range_sd(A ,B , C) _mm_range_round_sd(A, B, C ,_MM_FROUND_CUR_DIRECTION)
-#define _mm_mask_range_round_sd(W, U, A, B, C, R) __extension__ ({ \
+#define _mm_mask_range_round_sd(W, U, A, B, C, R) \
(__m128d)__builtin_ia32_rangesd128_round_mask((__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), \
(__v2df)(__m128d)(W),\
(__mmask8)(U), (int)(C),\
- (int)(R)); })
+ (int)(R))
#define _mm_mask_range_sd(W, U, A, B, C) _mm_mask_range_round_sd(W, U, A, B, C ,_MM_FROUND_CUR_DIRECTION)
-#define _mm_maskz_range_round_sd(U, A, B, C, R) __extension__ ({ \
+#define _mm_maskz_range_round_sd(U, A, B, C, R) \
(__m128d)__builtin_ia32_rangesd128_round_mask((__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), \
(__v2df)_mm_setzero_pd(), \
(__mmask8)(U), (int)(C),\
- (int)(R)); })
+ (int)(R))
#define _mm_maskz_range_sd(U, A, B, C) _mm_maskz_range_round_sd(U, A, B, C ,_MM_FROUND_CUR_DIRECTION)
-#define _mm512_reduce_pd(A, B) __extension__ ({ \
+#define _mm512_reduce_pd(A, B) \
(__m512d)__builtin_ia32_reducepd512_mask((__v8df)(__m512d)(A), (int)(B), \
(__v8df)_mm512_setzero_pd(), \
(__mmask8)-1, \
- _MM_FROUND_CUR_DIRECTION); })
+ _MM_FROUND_CUR_DIRECTION)
-#define _mm512_mask_reduce_pd(W, U, A, B) __extension__ ({ \
+#define _mm512_mask_reduce_pd(W, U, A, B) \
(__m512d)__builtin_ia32_reducepd512_mask((__v8df)(__m512d)(A), (int)(B), \
(__v8df)(__m512d)(W), \
(__mmask8)(U), \
- _MM_FROUND_CUR_DIRECTION); })
+ _MM_FROUND_CUR_DIRECTION)
-#define _mm512_maskz_reduce_pd(U, A, B) __extension__ ({ \
+#define _mm512_maskz_reduce_pd(U, A, B) \
(__m512d)__builtin_ia32_reducepd512_mask((__v8df)(__m512d)(A), (int)(B), \
(__v8df)_mm512_setzero_pd(), \
(__mmask8)(U), \
- _MM_FROUND_CUR_DIRECTION); })
+ _MM_FROUND_CUR_DIRECTION)
-#define _mm512_reduce_ps(A, B) __extension__ ({ \
+#define _mm512_reduce_ps(A, B) \
(__m512)__builtin_ia32_reduceps512_mask((__v16sf)(__m512)(A), (int)(B), \
(__v16sf)_mm512_setzero_ps(), \
(__mmask16)-1, \
- _MM_FROUND_CUR_DIRECTION); })
+ _MM_FROUND_CUR_DIRECTION)
-#define _mm512_mask_reduce_ps(W, U, A, B) __extension__ ({ \
+#define _mm512_mask_reduce_ps(W, U, A, B) \
(__m512)__builtin_ia32_reduceps512_mask((__v16sf)(__m512)(A), (int)(B), \
(__v16sf)(__m512)(W), \
(__mmask16)(U), \
- _MM_FROUND_CUR_DIRECTION); })
+ _MM_FROUND_CUR_DIRECTION)
-#define _mm512_maskz_reduce_ps(U, A, B) __extension__ ({ \
+#define _mm512_maskz_reduce_ps(U, A, B) \
(__m512)__builtin_ia32_reduceps512_mask((__v16sf)(__m512)(A), (int)(B), \
(__v16sf)_mm512_setzero_ps(), \
(__mmask16)(U), \
- _MM_FROUND_CUR_DIRECTION); })
+ _MM_FROUND_CUR_DIRECTION)
-#define _mm512_reduce_round_pd(A, B, R) __extension__ ({\
+#define _mm512_reduce_round_pd(A, B, R) \
(__m512d)__builtin_ia32_reducepd512_mask((__v8df)(__m512d)(A), (int)(B), \
(__v8df)_mm512_setzero_pd(), \
- (__mmask8)-1, (int)(R)); })
+ (__mmask8)-1, (int)(R))
-#define _mm512_mask_reduce_round_pd(W, U, A, B, R) __extension__ ({\
+#define _mm512_mask_reduce_round_pd(W, U, A, B, R) \
(__m512d)__builtin_ia32_reducepd512_mask((__v8df)(__m512d)(A), (int)(B), \
(__v8df)(__m512d)(W), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-#define _mm512_maskz_reduce_round_pd(U, A, B, R) __extension__ ({\
+#define _mm512_maskz_reduce_round_pd(U, A, B, R) \
(__m512d)__builtin_ia32_reducepd512_mask((__v8df)(__m512d)(A), (int)(B), \
(__v8df)_mm512_setzero_pd(), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-#define _mm512_reduce_round_ps(A, B, R) __extension__ ({\
+#define _mm512_reduce_round_ps(A, B, R) \
(__m512)__builtin_ia32_reduceps512_mask((__v16sf)(__m512)(A), (int)(B), \
(__v16sf)_mm512_setzero_ps(), \
- (__mmask16)-1, (int)(R)); })
+ (__mmask16)-1, (int)(R))
-#define _mm512_mask_reduce_round_ps(W, U, A, B, R) __extension__ ({\
+#define _mm512_mask_reduce_round_ps(W, U, A, B, R) \
(__m512)__builtin_ia32_reduceps512_mask((__v16sf)(__m512)(A), (int)(B), \
(__v16sf)(__m512)(W), \
- (__mmask16)(U), (int)(R)); })
+ (__mmask16)(U), (int)(R))
-#define _mm512_maskz_reduce_round_ps(U, A, B, R) __extension__ ({\
+#define _mm512_maskz_reduce_round_ps(U, A, B, R) \
(__m512)__builtin_ia32_reduceps512_mask((__v16sf)(__m512)(A), (int)(B), \
(__v16sf)_mm512_setzero_ps(), \
- (__mmask16)(U), (int)(R)); })
+ (__mmask16)(U), (int)(R))
-#define _mm_reduce_ss(A, B, C) __extension__ ({ \
+#define _mm_reduce_ss(A, B, C) \
(__m128)__builtin_ia32_reducess_mask((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), \
(__v4sf)_mm_setzero_ps(), (__mmask8)-1, \
- (int)(C), _MM_FROUND_CUR_DIRECTION); })
+ (int)(C), _MM_FROUND_CUR_DIRECTION)
-#define _mm_mask_reduce_ss(W, U, A, B, C) __extension__ ({ \
+#define _mm_mask_reduce_ss(W, U, A, B, C) \
(__m128)__builtin_ia32_reducess_mask((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), \
(__v4sf)(__m128)(W), (__mmask8)(U), \
- (int)(C), _MM_FROUND_CUR_DIRECTION); })
+ (int)(C), _MM_FROUND_CUR_DIRECTION)
-#define _mm_maskz_reduce_ss(U, A, B, C) __extension__ ({ \
+#define _mm_maskz_reduce_ss(U, A, B, C) \
(__m128)__builtin_ia32_reducess_mask((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), \
(__v4sf)_mm_setzero_ps(), \
(__mmask8)(U), (int)(C), \
- _MM_FROUND_CUR_DIRECTION); })
+ _MM_FROUND_CUR_DIRECTION)
-#define _mm_reduce_round_ss(A, B, C, R) __extension__ ({ \
+#define _mm_reduce_round_ss(A, B, C, R) \
(__m128)__builtin_ia32_reducess_mask((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), \
(__v4sf)_mm_setzero_ps(), (__mmask8)-1, \
- (int)(C), (int)(R)); })
+ (int)(C), (int)(R))
-#define _mm_mask_reduce_round_ss(W, U, A, B, C, R) __extension__ ({ \
+#define _mm_mask_reduce_round_ss(W, U, A, B, C, R) \
(__m128)__builtin_ia32_reducess_mask((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), \
(__v4sf)(__m128)(W), (__mmask8)(U), \
- (int)(C), (int)(R)); })
+ (int)(C), (int)(R))
-#define _mm_maskz_reduce_round_ss(U, A, B, C, R) __extension__ ({ \
+#define _mm_maskz_reduce_round_ss(U, A, B, C, R) \
(__m128)__builtin_ia32_reducess_mask((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), \
(__v4sf)_mm_setzero_ps(), \
- (__mmask8)(U), (int)(C), (int)(R)); })
+ (__mmask8)(U), (int)(C), (int)(R))
-#define _mm_reduce_sd(A, B, C) __extension__ ({ \
+#define _mm_reduce_sd(A, B, C) \
(__m128d)__builtin_ia32_reducesd_mask((__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), \
(__v2df)_mm_setzero_pd(), \
(__mmask8)-1, (int)(C), \
- _MM_FROUND_CUR_DIRECTION); })
+ _MM_FROUND_CUR_DIRECTION)
-#define _mm_mask_reduce_sd(W, U, A, B, C) __extension__ ({ \
+#define _mm_mask_reduce_sd(W, U, A, B, C) \
(__m128d)__builtin_ia32_reducesd_mask((__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), \
(__v2df)(__m128d)(W), (__mmask8)(U), \
- (int)(C), _MM_FROUND_CUR_DIRECTION); })
+ (int)(C), _MM_FROUND_CUR_DIRECTION)
-#define _mm_maskz_reduce_sd(U, A, B, C) __extension__ ({ \
+#define _mm_maskz_reduce_sd(U, A, B, C) \
(__m128d)__builtin_ia32_reducesd_mask((__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), \
(__v2df)_mm_setzero_pd(), \
(__mmask8)(U), (int)(C), \
- _MM_FROUND_CUR_DIRECTION); })
+ _MM_FROUND_CUR_DIRECTION)
-#define _mm_reduce_round_sd(A, B, C, R) __extension__ ({ \
+#define _mm_reduce_round_sd(A, B, C, R) \
(__m128d)__builtin_ia32_reducesd_mask((__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), \
(__v2df)_mm_setzero_pd(), \
- (__mmask8)-1, (int)(C), (int)(R)); })
+ (__mmask8)-1, (int)(C), (int)(R))
-#define _mm_mask_reduce_round_sd(W, U, A, B, C, R) __extension__ ({ \
+#define _mm_mask_reduce_round_sd(W, U, A, B, C, R) \
(__m128d)__builtin_ia32_reducesd_mask((__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), \
(__v2df)(__m128d)(W), (__mmask8)(U), \
- (int)(C), (int)(R)); })
+ (int)(C), (int)(R))
-#define _mm_maskz_reduce_round_sd(U, A, B, C, R) __extension__ ({ \
+#define _mm_maskz_reduce_round_sd(U, A, B, C, R) \
(__m128d)__builtin_ia32_reducesd_mask((__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), \
(__v2df)_mm_setzero_pd(), \
- (__mmask8)(U), (int)(C), (int)(R)); })
-
+ (__mmask8)(U), (int)(C), (int)(R))
+
static __inline__ __mmask16 __DEFAULT_FN_ATTRS
_mm512_movepi32_mask (__m512i __A)
{
@@ -973,8 +963,7 @@ _mm512_movepi64_mask (__m512i __A)
static __inline__ __m512 __DEFAULT_FN_ATTRS
_mm512_broadcast_f32x2 (__m128 __A)
{
- return (__m512)__builtin_shufflevector((__v4sf)__A,
- (__v4sf)_mm_undefined_ps(),
+ return (__m512)__builtin_shufflevector((__v4sf)__A, (__v4sf)__A,
0, 1, 0, 1, 0, 1, 0, 1,
0, 1, 0, 1, 0, 1, 0, 1);
}
@@ -1006,7 +995,7 @@ _mm512_broadcast_f32x8(__m256 __A)
static __inline__ __m512 __DEFAULT_FN_ATTRS
_mm512_mask_broadcast_f32x8(__m512 __O, __mmask16 __M, __m256 __A)
{
- return (__m512)__builtin_ia32_selectps_512((__mmask8)__M,
+ return (__m512)__builtin_ia32_selectps_512((__mmask16)__M,
(__v16sf)_mm512_broadcast_f32x8(__A),
(__v16sf)__O);
}
@@ -1014,7 +1003,7 @@ _mm512_mask_broadcast_f32x8(__m512 __O, __mmask16 __M, __m256 __A)
static __inline__ __m512 __DEFAULT_FN_ATTRS
_mm512_maskz_broadcast_f32x8(__mmask16 __M, __m256 __A)
{
- return (__m512)__builtin_ia32_selectps_512((__mmask8)__M,
+ return (__m512)__builtin_ia32_selectps_512((__mmask16)__M,
(__v16sf)_mm512_broadcast_f32x8(__A),
(__v16sf)_mm512_setzero_ps());
}
@@ -1045,8 +1034,7 @@ _mm512_maskz_broadcast_f64x2(__mmask8 __M, __m128d __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_broadcast_i32x2 (__m128i __A)
{
- return (__m512i)__builtin_shufflevector((__v4si)__A,
- (__v4si)_mm_undefined_si128(),
+ return (__m512i)__builtin_shufflevector((__v4si)__A, (__v4si)__A,
0, 1, 0, 1, 0, 1, 0, 1,
0, 1, 0, 1, 0, 1, 0, 1);
}
@@ -1078,7 +1066,7 @@ _mm512_broadcast_i32x8(__m256i __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_mask_broadcast_i32x8(__m512i __O, __mmask16 __M, __m256i __A)
{
- return (__m512i)__builtin_ia32_selectd_512((__mmask8)__M,
+ return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
(__v16si)_mm512_broadcast_i32x8(__A),
(__v16si)__O);
}
@@ -1086,7 +1074,7 @@ _mm512_mask_broadcast_i32x8(__m512i __O, __mmask16 __M, __m256i __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_maskz_broadcast_i32x8(__mmask16 __M, __m256i __A)
{
- return (__m512i)__builtin_ia32_selectd_512((__mmask8)__M,
+ return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
(__v16si)_mm512_broadcast_i32x8(__A),
(__v16si)_mm512_setzero_si512());
}
@@ -1114,217 +1102,159 @@ _mm512_maskz_broadcast_i64x2(__mmask8 __M, __m128i __A)
(__v8di)_mm512_setzero_si512());
}
-#define _mm512_extractf32x8_ps(A, imm) __extension__ ({ \
- (__m256)__builtin_shufflevector((__v16sf)(__m512)(A), \
- (__v16sf)_mm512_undefined_ps(), \
- ((imm) & 1) ? 8 : 0, \
- ((imm) & 1) ? 9 : 1, \
- ((imm) & 1) ? 10 : 2, \
- ((imm) & 1) ? 11 : 3, \
- ((imm) & 1) ? 12 : 4, \
- ((imm) & 1) ? 13 : 5, \
- ((imm) & 1) ? 14 : 6, \
- ((imm) & 1) ? 15 : 7); })
-
-#define _mm512_mask_extractf32x8_ps(W, U, A, imm) __extension__ ({ \
- (__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
- (__v8sf)_mm512_extractf32x8_ps((A), (imm)), \
- (__v8sf)(W)); })
-
-#define _mm512_maskz_extractf32x8_ps(U, A, imm) __extension__ ({ \
- (__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
- (__v8sf)_mm512_extractf32x8_ps((A), (imm)), \
- (__v8sf)_mm256_setzero_ps()); })
-
-#define _mm512_extractf64x2_pd(A, imm) __extension__ ({ \
- (__m128d)__builtin_shufflevector((__v8df)(__m512d)(A), \
- (__v8df)_mm512_undefined_pd(), \
- 0 + ((imm) & 0x3) * 2, \
- 1 + ((imm) & 0x3) * 2); })
-
-#define _mm512_mask_extractf64x2_pd(W, U, A, imm) __extension__ ({ \
- (__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \
- (__v2df)_mm512_extractf64x2_pd((A), (imm)), \
- (__v2df)(W)); })
-
-#define _mm512_maskz_extractf64x2_pd(U, A, imm) __extension__ ({ \
- (__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \
- (__v2df)_mm512_extractf64x2_pd((A), (imm)), \
- (__v2df)_mm_setzero_pd()); })
-
-#define _mm512_extracti32x8_epi32(A, imm) __extension__ ({ \
- (__m256i)__builtin_shufflevector((__v16si)(__m512i)(A), \
- (__v16si)_mm512_undefined_epi32(), \
- ((imm) & 1) ? 8 : 0, \
- ((imm) & 1) ? 9 : 1, \
- ((imm) & 1) ? 10 : 2, \
- ((imm) & 1) ? 11 : 3, \
- ((imm) & 1) ? 12 : 4, \
- ((imm) & 1) ? 13 : 5, \
- ((imm) & 1) ? 14 : 6, \
- ((imm) & 1) ? 15 : 7); })
-
-#define _mm512_mask_extracti32x8_epi32(W, U, A, imm) __extension__ ({ \
- (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
- (__v8si)_mm512_extracti32x8_epi32((A), (imm)), \
- (__v8si)(W)); })
-
-#define _mm512_maskz_extracti32x8_epi32(U, A, imm) __extension__ ({ \
- (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
- (__v8si)_mm512_extracti32x8_epi32((A), (imm)), \
- (__v8si)_mm256_setzero_si256()); })
-
-#define _mm512_extracti64x2_epi64(A, imm) __extension__ ({ \
- (__m128i)__builtin_shufflevector((__v8di)(__m512i)(A), \
- (__v8di)_mm512_undefined_epi32(), \
- 0 + ((imm) & 0x3) * 2, \
- 1 + ((imm) & 0x3) * 2); })
-
-#define _mm512_mask_extracti64x2_epi64(W, U, A, imm) __extension__ ({ \
- (__m128d)__builtin_ia32_selectq_128((__mmask8)(U), \
- (__v2di)_mm512_extracti64x2_epi64((A), (imm)), \
- (__v2di)(W)); })
-
-#define _mm512_maskz_extracti64x2_epi64(U, A, imm) __extension__ ({ \
- (__m128d)__builtin_ia32_selectq_128((__mmask8)(U), \
- (__v2di)_mm512_extracti64x2_epi64((A), (imm)), \
- (__v2di)_mm_setzero_di()); })
-
-#define _mm512_insertf32x8(A, B, imm) __extension__ ({ \
- (__m512)__builtin_shufflevector((__v16sf)(__m512)(A), \
- (__v16sf)_mm512_castps256_ps512((__m256)(B)),\
- ((imm) & 0x1) ? 0 : 16, \
- ((imm) & 0x1) ? 1 : 17, \
- ((imm) & 0x1) ? 2 : 18, \
- ((imm) & 0x1) ? 3 : 19, \
- ((imm) & 0x1) ? 4 : 20, \
- ((imm) & 0x1) ? 5 : 21, \
- ((imm) & 0x1) ? 6 : 22, \
- ((imm) & 0x1) ? 7 : 23, \
- ((imm) & 0x1) ? 16 : 8, \
- ((imm) & 0x1) ? 17 : 9, \
- ((imm) & 0x1) ? 18 : 10, \
- ((imm) & 0x1) ? 19 : 11, \
- ((imm) & 0x1) ? 20 : 12, \
- ((imm) & 0x1) ? 21 : 13, \
- ((imm) & 0x1) ? 22 : 14, \
- ((imm) & 0x1) ? 23 : 15); })
-
-#define _mm512_mask_insertf32x8(W, U, A, B, imm) __extension__ ({ \
+#define _mm512_extractf32x8_ps(A, imm) \
+ (__m256)__builtin_ia32_extractf32x8_mask((__v16sf)(__m512)(A), (int)(imm), \
+ (__v8sf)_mm256_undefined_ps(), \
+ (__mmask8)-1)
+
+#define _mm512_mask_extractf32x8_ps(W, U, A, imm) \
+ (__m256)__builtin_ia32_extractf32x8_mask((__v16sf)(__m512)(A), (int)(imm), \
+ (__v8sf)(__m256)(W), \
+ (__mmask8)(U))
+
+#define _mm512_maskz_extractf32x8_ps(U, A, imm) \
+ (__m256)__builtin_ia32_extractf32x8_mask((__v16sf)(__m512)(A), (int)(imm), \
+ (__v8sf)_mm256_setzero_ps(), \
+ (__mmask8)(U))
+
+#define _mm512_extractf64x2_pd(A, imm) \
+ (__m128d)__builtin_ia32_extractf64x2_512_mask((__v8df)(__m512d)(A), \
+ (int)(imm), \
+ (__v2df)_mm_undefined_pd(), \
+ (__mmask8)-1)
+
+#define _mm512_mask_extractf64x2_pd(W, U, A, imm) \
+ (__m128d)__builtin_ia32_extractf64x2_512_mask((__v8df)(__m512d)(A), \
+ (int)(imm), \
+ (__v2df)(__m128d)(W), \
+ (__mmask8)(U))
+
+#define _mm512_maskz_extractf64x2_pd(U, A, imm) \
+ (__m128d)__builtin_ia32_extractf64x2_512_mask((__v8df)(__m512d)(A), \
+ (int)(imm), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)(U))
+
+#define _mm512_extracti32x8_epi32(A, imm) \
+ (__m256i)__builtin_ia32_extracti32x8_mask((__v16si)(__m512i)(A), (int)(imm), \
+ (__v8si)_mm256_undefined_si256(), \
+ (__mmask8)-1)
+
+#define _mm512_mask_extracti32x8_epi32(W, U, A, imm) \
+ (__m256i)__builtin_ia32_extracti32x8_mask((__v16si)(__m512i)(A), (int)(imm), \
+ (__v8si)(__m256i)(W), \
+ (__mmask8)(U))
+
+#define _mm512_maskz_extracti32x8_epi32(U, A, imm) \
+ (__m256i)__builtin_ia32_extracti32x8_mask((__v16si)(__m512i)(A), (int)(imm), \
+ (__v8si)_mm256_setzero_si256(), \
+ (__mmask8)(U))
+
+#define _mm512_extracti64x2_epi64(A, imm) \
+ (__m128i)__builtin_ia32_extracti64x2_512_mask((__v8di)(__m512i)(A), \
+ (int)(imm), \
+ (__v2di)_mm_undefined_si128(), \
+ (__mmask8)-1)
+
+#define _mm512_mask_extracti64x2_epi64(W, U, A, imm) \
+ (__m128i)__builtin_ia32_extracti64x2_512_mask((__v8di)(__m512i)(A), \
+ (int)(imm), \
+ (__v2di)(__m128i)(W), \
+ (__mmask8)(U))
+
+#define _mm512_maskz_extracti64x2_epi64(U, A, imm) \
+ (__m128i)__builtin_ia32_extracti64x2_512_mask((__v8di)(__m512i)(A), \
+ (int)(imm), \
+ (__v2di)_mm_setzero_si128(), \
+ (__mmask8)(U))
+
+#define _mm512_insertf32x8(A, B, imm) \
+ (__m512)__builtin_ia32_insertf32x8((__v16sf)(__m512)(A), \
+ (__v8sf)(__m256)(B), (int)(imm))
+
+#define _mm512_mask_insertf32x8(W, U, A, B, imm) \
(__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
(__v16sf)_mm512_insertf32x8((A), (B), (imm)), \
- (__v16sf)(W)); })
+ (__v16sf)(__m512)(W))
-#define _mm512_maskz_insertf32x8(U, A, B, imm) __extension__ ({ \
+#define _mm512_maskz_insertf32x8(U, A, B, imm) \
(__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
(__v16sf)_mm512_insertf32x8((A), (B), (imm)), \
- (__v16sf)_mm512_setzero_ps()); })
-
-#define _mm512_insertf64x2(A, B, imm) __extension__ ({ \
- (__m512d)__builtin_shufflevector((__v8df)(__m512d)(A), \
- (__v8df)_mm512_castpd128_pd512((__m128d)(B)),\
- (((imm) & 0x3) == 0) ? 8 : 0, \
- (((imm) & 0x3) == 0) ? 9 : 1, \
- (((imm) & 0x3) == 1) ? 8 : 2, \
- (((imm) & 0x3) == 1) ? 9 : 3, \
- (((imm) & 0x3) == 2) ? 8 : 4, \
- (((imm) & 0x3) == 2) ? 9 : 5, \
- (((imm) & 0x3) == 3) ? 8 : 6, \
- (((imm) & 0x3) == 3) ? 9 : 7); })
-
-#define _mm512_mask_insertf64x2(W, U, A, B, imm) __extension__ ({ \
+ (__v16sf)_mm512_setzero_ps())
+
+#define _mm512_insertf64x2(A, B, imm) \
+ (__m512d)__builtin_ia32_insertf64x2_512((__v8df)(__m512d)(A), \
+ (__v2df)(__m128d)(B), (int)(imm))
+
+#define _mm512_mask_insertf64x2(W, U, A, B, imm) \
(__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
(__v8df)_mm512_insertf64x2((A), (B), (imm)), \
- (__v8df)(W)); })
+ (__v8df)(__m512d)(W))
-#define _mm512_maskz_insertf64x2(U, A, B, imm) __extension__ ({ \
+#define _mm512_maskz_insertf64x2(U, A, B, imm) \
(__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
(__v8df)_mm512_insertf64x2((A), (B), (imm)), \
- (__v8df)_mm512_setzero_pd()); })
-
-#define _mm512_inserti32x8(A, B, imm) __extension__ ({ \
- (__m512i)__builtin_shufflevector((__v16si)(__m512i)(A), \
- (__v16si)_mm512_castsi256_si512((__m256i)(B)),\
- ((imm) & 0x1) ? 0 : 16, \
- ((imm) & 0x1) ? 1 : 17, \
- ((imm) & 0x1) ? 2 : 18, \
- ((imm) & 0x1) ? 3 : 19, \
- ((imm) & 0x1) ? 4 : 20, \
- ((imm) & 0x1) ? 5 : 21, \
- ((imm) & 0x1) ? 6 : 22, \
- ((imm) & 0x1) ? 7 : 23, \
- ((imm) & 0x1) ? 16 : 8, \
- ((imm) & 0x1) ? 17 : 9, \
- ((imm) & 0x1) ? 18 : 10, \
- ((imm) & 0x1) ? 19 : 11, \
- ((imm) & 0x1) ? 20 : 12, \
- ((imm) & 0x1) ? 21 : 13, \
- ((imm) & 0x1) ? 22 : 14, \
- ((imm) & 0x1) ? 23 : 15); })
-
-#define _mm512_mask_inserti32x8(W, U, A, B, imm) __extension__ ({ \
+ (__v8df)_mm512_setzero_pd())
+
+#define _mm512_inserti32x8(A, B, imm) \
+ (__m512i)__builtin_ia32_inserti32x8((__v16si)(__m512i)(A), \
+ (__v8si)(__m256i)(B), (int)(imm))
+
+#define _mm512_mask_inserti32x8(W, U, A, B, imm) \
(__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
(__v16si)_mm512_inserti32x8((A), (B), (imm)), \
- (__v16si)(W)); })
+ (__v16si)(__m512i)(W))
-#define _mm512_maskz_inserti32x8(U, A, B, imm) __extension__ ({ \
+#define _mm512_maskz_inserti32x8(U, A, B, imm) \
(__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
(__v16si)_mm512_inserti32x8((A), (B), (imm)), \
- (__v16si)_mm512_setzero_si512()); })
-
-#define _mm512_inserti64x2(A, B, imm) __extension__ ({ \
- (__m512i)__builtin_shufflevector((__v8di)(__m512i)(A), \
- (__v8di)_mm512_castsi128_si512((__m128i)(B)),\
- (((imm) & 0x3) == 0) ? 8 : 0, \
- (((imm) & 0x3) == 0) ? 9 : 1, \
- (((imm) & 0x3) == 1) ? 8 : 2, \
- (((imm) & 0x3) == 1) ? 9 : 3, \
- (((imm) & 0x3) == 2) ? 8 : 4, \
- (((imm) & 0x3) == 2) ? 9 : 5, \
- (((imm) & 0x3) == 3) ? 8 : 6, \
- (((imm) & 0x3) == 3) ? 9 : 7); })
-
-#define _mm512_mask_inserti64x2(W, U, A, B, imm) __extension__ ({ \
+ (__v16si)_mm512_setzero_si512())
+
+#define _mm512_inserti64x2(A, B, imm) \
+ (__m512i)__builtin_ia32_inserti64x2_512((__v8di)(__m512i)(A), \
+ (__v2di)(__m128i)(B), (int)(imm))
+
+#define _mm512_mask_inserti64x2(W, U, A, B, imm) \
(__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
(__v8di)_mm512_inserti64x2((A), (B), (imm)), \
- (__v8di)(W)); })
+ (__v8di)(__m512i)(W))
-#define _mm512_maskz_inserti64x2(U, A, B, imm) __extension__ ({ \
+#define _mm512_maskz_inserti64x2(U, A, B, imm) \
(__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
(__v8di)_mm512_inserti64x2((A), (B), (imm)), \
- (__v8di)_mm512_setzero_si512()); })
+ (__v8di)_mm512_setzero_si512())
-#define _mm512_mask_fpclass_ps_mask(U, A, imm) __extension__ ({ \
+#define _mm512_mask_fpclass_ps_mask(U, A, imm) \
(__mmask16)__builtin_ia32_fpclassps512_mask((__v16sf)(__m512)(A), \
- (int)(imm), (__mmask16)(U)); })
+ (int)(imm), (__mmask16)(U))
-#define _mm512_fpclass_ps_mask(A, imm) __extension__ ({ \
+#define _mm512_fpclass_ps_mask(A, imm) \
(__mmask16)__builtin_ia32_fpclassps512_mask((__v16sf)(__m512)(A), \
- (int)(imm), (__mmask16)-1); })
+ (int)(imm), (__mmask16)-1)
-#define _mm512_mask_fpclass_pd_mask(U, A, imm) __extension__ ({ \
+#define _mm512_mask_fpclass_pd_mask(U, A, imm) \
(__mmask8)__builtin_ia32_fpclasspd512_mask((__v8df)(__m512d)(A), (int)(imm), \
- (__mmask8)(U)); })
+ (__mmask8)(U))
-#define _mm512_fpclass_pd_mask(A, imm) __extension__ ({ \
+#define _mm512_fpclass_pd_mask(A, imm) \
(__mmask8)__builtin_ia32_fpclasspd512_mask((__v8df)(__m512d)(A), (int)(imm), \
- (__mmask8)-1); })
+ (__mmask8)-1)
-#define _mm_fpclass_sd_mask(A, imm) __extension__ ({ \
+#define _mm_fpclass_sd_mask(A, imm) \
(__mmask8)__builtin_ia32_fpclasssd_mask((__v2df)(__m128d)(A), (int)(imm), \
- (__mmask8)-1); })
+ (__mmask8)-1)
-#define _mm_mask_fpclass_sd_mask(U, A, imm) __extension__ ({ \
+#define _mm_mask_fpclass_sd_mask(U, A, imm) \
(__mmask8)__builtin_ia32_fpclasssd_mask((__v2df)(__m128d)(A), (int)(imm), \
- (__mmask8)(U)); })
+ (__mmask8)(U))
-#define _mm_fpclass_ss_mask(A, imm) __extension__ ({ \
+#define _mm_fpclass_ss_mask(A, imm) \
(__mmask8)__builtin_ia32_fpclassss_mask((__v4sf)(__m128)(A), (int)(imm), \
- (__mmask8)-1); })
+ (__mmask8)-1)
-#define _mm_mask_fpclass_ss_mask(U, A, imm) __extension__ ({ \
+#define _mm_mask_fpclass_ss_mask(U, A, imm) \
(__mmask8)__builtin_ia32_fpclassss_mask((__v4sf)(__m128)(A), (int)(imm), \
- (__mmask8)(U)); })
+ (__mmask8)(U))
#undef __DEFAULT_FN_ATTRS
diff --git a/lib/Headers/avx512erintrin.h b/lib/Headers/avx512erintrin.h
index 8ff212c42211..6348275c8d31 100644
--- a/lib/Headers/avx512erintrin.h
+++ b/lib/Headers/avx512erintrin.h
@@ -27,21 +27,21 @@
#ifndef __AVX512ERINTRIN_H
#define __AVX512ERINTRIN_H
-// exp2a23
-#define _mm512_exp2a23_round_pd(A, R) __extension__ ({ \
+/* exp2a23 */
+#define _mm512_exp2a23_round_pd(A, R) \
(__m512d)__builtin_ia32_exp2pd_mask((__v8df)(__m512d)(A), \
(__v8df)_mm512_setzero_pd(), \
- (__mmask8)-1, (int)(R)); })
+ (__mmask8)-1, (int)(R))
-#define _mm512_mask_exp2a23_round_pd(S, M, A, R) __extension__ ({ \
+#define _mm512_mask_exp2a23_round_pd(S, M, A, R) \
(__m512d)__builtin_ia32_exp2pd_mask((__v8df)(__m512d)(A), \
(__v8df)(__m512d)(S), (__mmask8)(M), \
- (int)(R)); })
+ (int)(R))
-#define _mm512_maskz_exp2a23_round_pd(M, A, R) __extension__ ({ \
+#define _mm512_maskz_exp2a23_round_pd(M, A, R) \
(__m512d)__builtin_ia32_exp2pd_mask((__v8df)(__m512d)(A), \
(__v8df)_mm512_setzero_pd(), \
- (__mmask8)(M), (int)(R)); })
+ (__mmask8)(M), (int)(R))
#define _mm512_exp2a23_pd(A) \
_mm512_exp2a23_round_pd((A), _MM_FROUND_CUR_DIRECTION)
@@ -52,20 +52,20 @@
#define _mm512_maskz_exp2a23_pd(M, A) \
_mm512_maskz_exp2a23_round_pd((M), (A), _MM_FROUND_CUR_DIRECTION)
-#define _mm512_exp2a23_round_ps(A, R) __extension__ ({ \
+#define _mm512_exp2a23_round_ps(A, R) \
(__m512)__builtin_ia32_exp2ps_mask((__v16sf)(__m512)(A), \
(__v16sf)_mm512_setzero_ps(), \
- (__mmask16)-1, (int)(R)); })
+ (__mmask16)-1, (int)(R))
-#define _mm512_mask_exp2a23_round_ps(S, M, A, R) __extension__ ({ \
+#define _mm512_mask_exp2a23_round_ps(S, M, A, R) \
(__m512)__builtin_ia32_exp2ps_mask((__v16sf)(__m512)(A), \
(__v16sf)(__m512)(S), (__mmask16)(M), \
- (int)(R)); })
+ (int)(R))
-#define _mm512_maskz_exp2a23_round_ps(M, A, R) __extension__ ({ \
+#define _mm512_maskz_exp2a23_round_ps(M, A, R) \
(__m512)__builtin_ia32_exp2ps_mask((__v16sf)(__m512)(A), \
(__v16sf)_mm512_setzero_ps(), \
- (__mmask16)(M), (int)(R)); })
+ (__mmask16)(M), (int)(R))
#define _mm512_exp2a23_ps(A) \
_mm512_exp2a23_round_ps((A), _MM_FROUND_CUR_DIRECTION)
@@ -76,21 +76,21 @@
#define _mm512_maskz_exp2a23_ps(M, A) \
_mm512_maskz_exp2a23_round_ps((M), (A), _MM_FROUND_CUR_DIRECTION)
-// rsqrt28
-#define _mm512_rsqrt28_round_pd(A, R) __extension__ ({ \
+/* rsqrt28 */
+#define _mm512_rsqrt28_round_pd(A, R) \
(__m512d)__builtin_ia32_rsqrt28pd_mask((__v8df)(__m512d)(A), \
(__v8df)_mm512_setzero_pd(), \
- (__mmask8)-1, (int)(R)); })
+ (__mmask8)-1, (int)(R))
-#define _mm512_mask_rsqrt28_round_pd(S, M, A, R) __extension__ ({ \
+#define _mm512_mask_rsqrt28_round_pd(S, M, A, R) \
(__m512d)__builtin_ia32_rsqrt28pd_mask((__v8df)(__m512d)(A), \
(__v8df)(__m512d)(S), (__mmask8)(M), \
- (int)(R)); })
+ (int)(R))
-#define _mm512_maskz_rsqrt28_round_pd(M, A, R) __extension__ ({ \
+#define _mm512_maskz_rsqrt28_round_pd(M, A, R) \
(__m512d)__builtin_ia32_rsqrt28pd_mask((__v8df)(__m512d)(A), \
(__v8df)_mm512_setzero_pd(), \
- (__mmask8)(M), (int)(R)); })
+ (__mmask8)(M), (int)(R))
#define _mm512_rsqrt28_pd(A) \
_mm512_rsqrt28_round_pd((A), _MM_FROUND_CUR_DIRECTION)
@@ -101,20 +101,20 @@
#define _mm512_maskz_rsqrt28_pd(M, A) \
_mm512_maskz_rsqrt28_round_pd((M), (A), _MM_FROUND_CUR_DIRECTION)
-#define _mm512_rsqrt28_round_ps(A, R) __extension__ ({ \
+#define _mm512_rsqrt28_round_ps(A, R) \
(__m512)__builtin_ia32_rsqrt28ps_mask((__v16sf)(__m512)(A), \
(__v16sf)_mm512_setzero_ps(), \
- (__mmask16)-1, (int)(R)); })
+ (__mmask16)-1, (int)(R))
-#define _mm512_mask_rsqrt28_round_ps(S, M, A, R) __extension__ ({ \
+#define _mm512_mask_rsqrt28_round_ps(S, M, A, R) \
(__m512)__builtin_ia32_rsqrt28ps_mask((__v16sf)(__m512)(A), \
(__v16sf)(__m512)(S), (__mmask16)(M), \
- (int)(R)); })
+ (int)(R))
-#define _mm512_maskz_rsqrt28_round_ps(M, A, R) __extension__ ({ \
+#define _mm512_maskz_rsqrt28_round_ps(M, A, R) \
(__m512)__builtin_ia32_rsqrt28ps_mask((__v16sf)(__m512)(A), \
(__v16sf)_mm512_setzero_ps(), \
- (__mmask16)(M), (int)(R)); })
+ (__mmask16)(M), (int)(R))
#define _mm512_rsqrt28_ps(A) \
_mm512_rsqrt28_round_ps((A), _MM_FROUND_CUR_DIRECTION)
@@ -125,23 +125,23 @@
#define _mm512_maskz_rsqrt28_ps(M, A) \
_mm512_maskz_rsqrt28_round_ps((M), (A), _MM_FROUND_CUR_DIRECTION)
-#define _mm_rsqrt28_round_ss(A, B, R) __extension__ ({ \
+#define _mm_rsqrt28_round_ss(A, B, R) \
(__m128)__builtin_ia32_rsqrt28ss_round_mask((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), \
(__v4sf)_mm_setzero_ps(), \
- (__mmask8)-1, (int)(R)); })
+ (__mmask8)-1, (int)(R))
-#define _mm_mask_rsqrt28_round_ss(S, M, A, B, R) __extension__ ({ \
+#define _mm_mask_rsqrt28_round_ss(S, M, A, B, R) \
(__m128)__builtin_ia32_rsqrt28ss_round_mask((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), \
(__v4sf)(__m128)(S), \
- (__mmask8)(M), (int)(R)); })
+ (__mmask8)(M), (int)(R))
-#define _mm_maskz_rsqrt28_round_ss(M, A, B, R) __extension__ ({ \
+#define _mm_maskz_rsqrt28_round_ss(M, A, B, R) \
(__m128)__builtin_ia32_rsqrt28ss_round_mask((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), \
(__v4sf)_mm_setzero_ps(), \
- (__mmask8)(M), (int)(R)); })
+ (__mmask8)(M), (int)(R))
#define _mm_rsqrt28_ss(A, B) \
_mm_rsqrt28_round_ss((A), (B), _MM_FROUND_CUR_DIRECTION)
@@ -152,23 +152,23 @@
#define _mm_maskz_rsqrt28_ss(M, A, B) \
_mm_maskz_rsqrt28_round_ss((M), (A), (B), _MM_FROUND_CUR_DIRECTION)
-#define _mm_rsqrt28_round_sd(A, B, R) __extension__ ({ \
+#define _mm_rsqrt28_round_sd(A, B, R) \
(__m128d)__builtin_ia32_rsqrt28sd_round_mask((__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), \
(__v2df)_mm_setzero_pd(), \
- (__mmask8)-1, (int)(R)); })
+ (__mmask8)-1, (int)(R))
-#define _mm_mask_rsqrt28_round_sd(S, M, A, B, R) __extension__ ({ \
+#define _mm_mask_rsqrt28_round_sd(S, M, A, B, R) \
(__m128d)__builtin_ia32_rsqrt28sd_round_mask((__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), \
(__v2df)(__m128d)(S), \
- (__mmask8)(M), (int)(R)); })
+ (__mmask8)(M), (int)(R))
-#define _mm_maskz_rsqrt28_round_sd(M, A, B, R) __extension__ ({ \
+#define _mm_maskz_rsqrt28_round_sd(M, A, B, R) \
(__m128d)__builtin_ia32_rsqrt28sd_round_mask((__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), \
(__v2df)_mm_setzero_pd(), \
- (__mmask8)(M), (int)(R)); })
+ (__mmask8)(M), (int)(R))
#define _mm_rsqrt28_sd(A, B) \
_mm_rsqrt28_round_sd((A), (B), _MM_FROUND_CUR_DIRECTION)
@@ -179,21 +179,21 @@
#define _mm_maskz_rsqrt28_sd(M, A, B) \
_mm_maskz_rsqrt28_round_sd((M), (A), (B), _MM_FROUND_CUR_DIRECTION)
-// rcp28
-#define _mm512_rcp28_round_pd(A, R) __extension__ ({ \
+/* rcp28 */
+#define _mm512_rcp28_round_pd(A, R) \
(__m512d)__builtin_ia32_rcp28pd_mask((__v8df)(__m512d)(A), \
(__v8df)_mm512_setzero_pd(), \
- (__mmask8)-1, (int)(R)); })
+ (__mmask8)-1, (int)(R))
-#define _mm512_mask_rcp28_round_pd(S, M, A, R) __extension__ ({ \
+#define _mm512_mask_rcp28_round_pd(S, M, A, R) \
(__m512d)__builtin_ia32_rcp28pd_mask((__v8df)(__m512d)(A), \
(__v8df)(__m512d)(S), (__mmask8)(M), \
- (int)(R)); })
+ (int)(R))
-#define _mm512_maskz_rcp28_round_pd(M, A, R) __extension__ ({ \
+#define _mm512_maskz_rcp28_round_pd(M, A, R) \
(__m512d)__builtin_ia32_rcp28pd_mask((__v8df)(__m512d)(A), \
(__v8df)_mm512_setzero_pd(), \
- (__mmask8)(M), (int)(R)); })
+ (__mmask8)(M), (int)(R))
#define _mm512_rcp28_pd(A) \
_mm512_rcp28_round_pd((A), _MM_FROUND_CUR_DIRECTION)
@@ -204,20 +204,20 @@
#define _mm512_maskz_rcp28_pd(M, A) \
_mm512_maskz_rcp28_round_pd((M), (A), _MM_FROUND_CUR_DIRECTION)
-#define _mm512_rcp28_round_ps(A, R) __extension__ ({ \
+#define _mm512_rcp28_round_ps(A, R) \
(__m512)__builtin_ia32_rcp28ps_mask((__v16sf)(__m512)(A), \
(__v16sf)_mm512_setzero_ps(), \
- (__mmask16)-1, (int)(R)); })
+ (__mmask16)-1, (int)(R))
-#define _mm512_mask_rcp28_round_ps(S, M, A, R) __extension__ ({ \
+#define _mm512_mask_rcp28_round_ps(S, M, A, R) \
(__m512)__builtin_ia32_rcp28ps_mask((__v16sf)(__m512)(A), \
(__v16sf)(__m512)(S), (__mmask16)(M), \
- (int)(R)); })
+ (int)(R))
-#define _mm512_maskz_rcp28_round_ps(M, A, R) __extension__ ({ \
+#define _mm512_maskz_rcp28_round_ps(M, A, R) \
(__m512)__builtin_ia32_rcp28ps_mask((__v16sf)(__m512)(A), \
(__v16sf)_mm512_setzero_ps(), \
- (__mmask16)(M), (int)(R)); })
+ (__mmask16)(M), (int)(R))
#define _mm512_rcp28_ps(A) \
_mm512_rcp28_round_ps((A), _MM_FROUND_CUR_DIRECTION)
@@ -228,23 +228,23 @@
#define _mm512_maskz_rcp28_ps(M, A) \
_mm512_maskz_rcp28_round_ps((M), (A), _MM_FROUND_CUR_DIRECTION)
-#define _mm_rcp28_round_ss(A, B, R) __extension__ ({ \
+#define _mm_rcp28_round_ss(A, B, R) \
(__m128)__builtin_ia32_rcp28ss_round_mask((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), \
(__v4sf)_mm_setzero_ps(), \
- (__mmask8)-1, (int)(R)); })
+ (__mmask8)-1, (int)(R))
-#define _mm_mask_rcp28_round_ss(S, M, A, B, R) __extension__ ({ \
+#define _mm_mask_rcp28_round_ss(S, M, A, B, R) \
(__m128)__builtin_ia32_rcp28ss_round_mask((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), \
(__v4sf)(__m128)(S), \
- (__mmask8)(M), (int)(R)); })
+ (__mmask8)(M), (int)(R))
-#define _mm_maskz_rcp28_round_ss(M, A, B, R) __extension__ ({ \
+#define _mm_maskz_rcp28_round_ss(M, A, B, R) \
(__m128)__builtin_ia32_rcp28ss_round_mask((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), \
(__v4sf)_mm_setzero_ps(), \
- (__mmask8)(M), (int)(R)); })
+ (__mmask8)(M), (int)(R))
#define _mm_rcp28_ss(A, B) \
_mm_rcp28_round_ss((A), (B), _MM_FROUND_CUR_DIRECTION)
@@ -255,23 +255,23 @@
#define _mm_maskz_rcp28_ss(M, A, B) \
_mm_maskz_rcp28_round_ss((M), (A), (B), _MM_FROUND_CUR_DIRECTION)
-#define _mm_rcp28_round_sd(A, B, R) __extension__ ({ \
+#define _mm_rcp28_round_sd(A, B, R) \
(__m128d)__builtin_ia32_rcp28sd_round_mask((__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), \
(__v2df)_mm_setzero_pd(), \
- (__mmask8)-1, (int)(R)); })
+ (__mmask8)-1, (int)(R))
-#define _mm_mask_rcp28_round_sd(S, M, A, B, R) __extension__ ({ \
+#define _mm_mask_rcp28_round_sd(S, M, A, B, R) \
(__m128d)__builtin_ia32_rcp28sd_round_mask((__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), \
(__v2df)(__m128d)(S), \
- (__mmask8)(M), (int)(R)); })
+ (__mmask8)(M), (int)(R))
-#define _mm_maskz_rcp28_round_sd(M, A, B, R) __extension__ ({ \
+#define _mm_maskz_rcp28_round_sd(M, A, B, R) \
(__m128d)__builtin_ia32_rcp28sd_round_mask((__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), \
(__v2df)_mm_setzero_pd(), \
- (__mmask8)(M), (int)(R)); })
+ (__mmask8)(M), (int)(R))
#define _mm_rcp28_sd(A, B) \
_mm_rcp28_round_sd((A), (B), _MM_FROUND_CUR_DIRECTION)
@@ -282,4 +282,4 @@
#define _mm_maskz_rcp28_sd(M, A, B) \
_mm_maskz_rcp28_round_sd((M), (A), (B), _MM_FROUND_CUR_DIRECTION)
-#endif // __AVX512ERINTRIN_H
+#endif /* __AVX512ERINTRIN_H */
diff --git a/lib/Headers/avx512fintrin.h b/lib/Headers/avx512fintrin.h
index d34f0b1327ae..81696953b76e 100644
--- a/lib/Headers/avx512fintrin.h
+++ b/lib/Headers/avx512fintrin.h
@@ -173,51 +173,51 @@ typedef enum
} _MM_MANTISSA_SIGN_ENUM;
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512f")))
+#define __DEFAULT_FN_ATTRS512 __attribute__((__always_inline__, __nodebug__, __target__("avx512f"), __min_vector_width__(512)))
+#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("avx512f"), __min_vector_width__(128)))
/* Create vectors with repeated elements */
-static __inline __m512i __DEFAULT_FN_ATTRS
+static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_setzero_si512(void)
{
- return (__m512i)(__v8di){ 0, 0, 0, 0, 0, 0, 0, 0 };
+ return __extension__ (__m512i)(__v8di){ 0, 0, 0, 0, 0, 0, 0, 0 };
}
#define _mm512_setzero_epi32 _mm512_setzero_si512
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_undefined_pd(void)
{
return (__m512d)__builtin_ia32_undef512();
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_undefined(void)
{
return (__m512)__builtin_ia32_undef512();
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_undefined_ps(void)
{
return (__m512)__builtin_ia32_undef512();
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_undefined_epi32(void)
{
return (__m512i)__builtin_ia32_undef512();
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_broadcastd_epi32 (__m128i __A)
{
- return (__m512i)__builtin_shufflevector((__v4si) __A,
- (__v4si)_mm_undefined_si128(),
+ return (__m512i)__builtin_shufflevector((__v4si) __A, (__v4si) __A,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_broadcastd_epi32 (__m512i __O, __mmask16 __M, __m128i __A)
{
return (__m512i)__builtin_ia32_selectd_512(__M,
@@ -225,7 +225,7 @@ _mm512_mask_broadcastd_epi32 (__m512i __O, __mmask16 __M, __m128i __A)
(__v16si) __O);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_broadcastd_epi32 (__mmask16 __M, __m128i __A)
{
return (__m512i)__builtin_ia32_selectd_512(__M,
@@ -233,15 +233,14 @@ _mm512_maskz_broadcastd_epi32 (__mmask16 __M, __m128i __A)
(__v16si) _mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_broadcastq_epi64 (__m128i __A)
{
- return (__m512i)__builtin_shufflevector((__v2di) __A,
- (__v2di) _mm_undefined_si128(),
+ return (__m512i)__builtin_shufflevector((__v2di) __A, (__v2di) __A,
0, 0, 0, 0, 0, 0, 0, 0);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_broadcastq_epi64 (__m512i __O, __mmask8 __M, __m128i __A)
{
return (__m512i)__builtin_ia32_selectq_512(__M,
@@ -250,7 +249,7 @@ _mm512_mask_broadcastq_epi64 (__m512i __O, __mmask8 __M, __m128i __A)
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_broadcastq_epi64 (__mmask8 __M, __m128i __A)
{
return (__m512i)__builtin_ia32_selectq_512(__M,
@@ -259,64 +258,67 @@ _mm512_maskz_broadcastq_epi64 (__mmask8 __M, __m128i __A)
}
-static __inline __m512 __DEFAULT_FN_ATTRS
+static __inline __m512 __DEFAULT_FN_ATTRS512
_mm512_setzero_ps(void)
{
- return (__m512){ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
- 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 };
+ return __extension__ (__m512){ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 };
}
#define _mm512_setzero _mm512_setzero_ps
-static __inline __m512d __DEFAULT_FN_ATTRS
+static __inline __m512d __DEFAULT_FN_ATTRS512
_mm512_setzero_pd(void)
{
- return (__m512d){ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 };
+ return __extension__ (__m512d){ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 };
}
-static __inline __m512 __DEFAULT_FN_ATTRS
+static __inline __m512 __DEFAULT_FN_ATTRS512
_mm512_set1_ps(float __w)
{
- return (__m512){ __w, __w, __w, __w, __w, __w, __w, __w,
- __w, __w, __w, __w, __w, __w, __w, __w };
+ return __extension__ (__m512){ __w, __w, __w, __w, __w, __w, __w, __w,
+ __w, __w, __w, __w, __w, __w, __w, __w };
}
-static __inline __m512d __DEFAULT_FN_ATTRS
+static __inline __m512d __DEFAULT_FN_ATTRS512
_mm512_set1_pd(double __w)
{
- return (__m512d){ __w, __w, __w, __w, __w, __w, __w, __w };
+ return __extension__ (__m512d){ __w, __w, __w, __w, __w, __w, __w, __w };
}
-static __inline __m512i __DEFAULT_FN_ATTRS
+static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_set1_epi8(char __w)
{
- return (__m512i)(__v64qi){ __w, __w, __w, __w, __w, __w, __w, __w,
- __w, __w, __w, __w, __w, __w, __w, __w,
- __w, __w, __w, __w, __w, __w, __w, __w,
- __w, __w, __w, __w, __w, __w, __w, __w,
- __w, __w, __w, __w, __w, __w, __w, __w,
- __w, __w, __w, __w, __w, __w, __w, __w,
- __w, __w, __w, __w, __w, __w, __w, __w,
- __w, __w, __w, __w, __w, __w, __w, __w };
+ return __extension__ (__m512i)(__v64qi){
+ __w, __w, __w, __w, __w, __w, __w, __w,
+ __w, __w, __w, __w, __w, __w, __w, __w,
+ __w, __w, __w, __w, __w, __w, __w, __w,
+ __w, __w, __w, __w, __w, __w, __w, __w,
+ __w, __w, __w, __w, __w, __w, __w, __w,
+ __w, __w, __w, __w, __w, __w, __w, __w,
+ __w, __w, __w, __w, __w, __w, __w, __w,
+ __w, __w, __w, __w, __w, __w, __w, __w };
}
-static __inline __m512i __DEFAULT_FN_ATTRS
+static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_set1_epi16(short __w)
{
- return (__m512i)(__v32hi){ __w, __w, __w, __w, __w, __w, __w, __w,
- __w, __w, __w, __w, __w, __w, __w, __w,
- __w, __w, __w, __w, __w, __w, __w, __w,
- __w, __w, __w, __w, __w, __w, __w, __w };
+ return __extension__ (__m512i)(__v32hi){
+ __w, __w, __w, __w, __w, __w, __w, __w,
+ __w, __w, __w, __w, __w, __w, __w, __w,
+ __w, __w, __w, __w, __w, __w, __w, __w,
+ __w, __w, __w, __w, __w, __w, __w, __w };
}
-static __inline __m512i __DEFAULT_FN_ATTRS
+static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_set1_epi32(int __s)
{
- return (__m512i)(__v16si){ __s, __s, __s, __s, __s, __s, __s, __s,
- __s, __s, __s, __s, __s, __s, __s, __s };
+ return __extension__ (__m512i)(__v16si){
+ __s, __s, __s, __s, __s, __s, __s, __s,
+ __s, __s, __s, __s, __s, __s, __s, __s };
}
-static __inline __m512i __DEFAULT_FN_ATTRS
+static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_set1_epi32(__mmask16 __M, int __A)
{
return (__m512i)__builtin_ia32_selectd_512(__M,
@@ -324,57 +326,54 @@ _mm512_maskz_set1_epi32(__mmask16 __M, int __A)
(__v16si)_mm512_setzero_si512());
}
-static __inline __m512i __DEFAULT_FN_ATTRS
+static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_set1_epi64(long long __d)
{
- return (__m512i)(__v8di){ __d, __d, __d, __d, __d, __d, __d, __d };
+ return __extension__(__m512i)(__v8di){ __d, __d, __d, __d, __d, __d, __d, __d };
}
-#ifdef __x86_64__
-static __inline __m512i __DEFAULT_FN_ATTRS
+static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_set1_epi64(__mmask8 __M, long long __A)
{
return (__m512i)__builtin_ia32_selectq_512(__M,
(__v8di)_mm512_set1_epi64(__A),
(__v8di)_mm512_setzero_si512());
}
-#endif
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_broadcastss_ps(__m128 __A)
{
- return (__m512)__builtin_shufflevector((__v4sf) __A,
- (__v4sf)_mm_undefined_ps(),
+ return (__m512)__builtin_shufflevector((__v4sf) __A, (__v4sf) __A,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
}
-static __inline __m512i __DEFAULT_FN_ATTRS
+static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_set4_epi32 (int __A, int __B, int __C, int __D)
{
- return (__m512i)(__v16si)
+ return __extension__ (__m512i)(__v16si)
{ __D, __C, __B, __A, __D, __C, __B, __A,
__D, __C, __B, __A, __D, __C, __B, __A };
}
-static __inline __m512i __DEFAULT_FN_ATTRS
+static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_set4_epi64 (long long __A, long long __B, long long __C,
long long __D)
{
- return (__m512i) (__v8di)
+ return __extension__ (__m512i) (__v8di)
{ __D, __C, __B, __A, __D, __C, __B, __A };
}
-static __inline __m512d __DEFAULT_FN_ATTRS
+static __inline __m512d __DEFAULT_FN_ATTRS512
_mm512_set4_pd (double __A, double __B, double __C, double __D)
{
- return (__m512d)
+ return __extension__ (__m512d)
{ __D, __C, __B, __A, __D, __C, __B, __A };
}
-static __inline __m512 __DEFAULT_FN_ATTRS
+static __inline __m512 __DEFAULT_FN_ATTRS512
_mm512_set4_ps (float __A, float __B, float __C, float __D)
{
- return (__m512)
+ return __extension__ (__m512)
{ __D, __C, __B, __A, __D, __C, __B, __A,
__D, __C, __B, __A, __D, __C, __B, __A };
}
@@ -391,138 +390,137 @@ _mm512_set4_ps (float __A, float __B, float __C, float __D)
#define _mm512_setr4_ps(e0,e1,e2,e3) \
_mm512_set4_ps((e3),(e2),(e1),(e0))
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_broadcastsd_pd(__m128d __A)
{
- return (__m512d)__builtin_shufflevector((__v2df) __A,
- (__v2df) _mm_undefined_pd(),
+ return (__m512d)__builtin_shufflevector((__v2df) __A, (__v2df) __A,
0, 0, 0, 0, 0, 0, 0, 0);
}
/* Cast between vector types */
-static __inline __m512d __DEFAULT_FN_ATTRS
+static __inline __m512d __DEFAULT_FN_ATTRS512
_mm512_castpd256_pd512(__m256d __a)
{
return __builtin_shufflevector(__a, __a, 0, 1, 2, 3, -1, -1, -1, -1);
}
-static __inline __m512 __DEFAULT_FN_ATTRS
+static __inline __m512 __DEFAULT_FN_ATTRS512
_mm512_castps256_ps512(__m256 __a)
{
return __builtin_shufflevector(__a, __a, 0, 1, 2, 3, 4, 5, 6, 7,
-1, -1, -1, -1, -1, -1, -1, -1);
}
-static __inline __m128d __DEFAULT_FN_ATTRS
+static __inline __m128d __DEFAULT_FN_ATTRS512
_mm512_castpd512_pd128(__m512d __a)
{
return __builtin_shufflevector(__a, __a, 0, 1);
}
-static __inline __m256d __DEFAULT_FN_ATTRS
+static __inline __m256d __DEFAULT_FN_ATTRS512
_mm512_castpd512_pd256 (__m512d __A)
{
return __builtin_shufflevector(__A, __A, 0, 1, 2, 3);
}
-static __inline __m128 __DEFAULT_FN_ATTRS
+static __inline __m128 __DEFAULT_FN_ATTRS512
_mm512_castps512_ps128(__m512 __a)
{
return __builtin_shufflevector(__a, __a, 0, 1, 2, 3);
}
-static __inline __m256 __DEFAULT_FN_ATTRS
+static __inline __m256 __DEFAULT_FN_ATTRS512
_mm512_castps512_ps256 (__m512 __A)
{
return __builtin_shufflevector(__A, __A, 0, 1, 2, 3, 4, 5, 6, 7);
}
-static __inline __m512 __DEFAULT_FN_ATTRS
+static __inline __m512 __DEFAULT_FN_ATTRS512
_mm512_castpd_ps (__m512d __A)
{
return (__m512) (__A);
}
-static __inline __m512i __DEFAULT_FN_ATTRS
+static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_castpd_si512 (__m512d __A)
{
return (__m512i) (__A);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_castpd128_pd512 (__m128d __A)
{
return __builtin_shufflevector( __A, __A, 0, 1, -1, -1, -1, -1, -1, -1);
}
-static __inline __m512d __DEFAULT_FN_ATTRS
+static __inline __m512d __DEFAULT_FN_ATTRS512
_mm512_castps_pd (__m512 __A)
{
return (__m512d) (__A);
}
-static __inline __m512i __DEFAULT_FN_ATTRS
+static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_castps_si512 (__m512 __A)
{
return (__m512i) (__A);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_castps128_ps512 (__m128 __A)
{
return __builtin_shufflevector( __A, __A, 0, 1, 2, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_castsi128_si512 (__m128i __A)
{
return __builtin_shufflevector( __A, __A, 0, 1, -1, -1, -1, -1, -1, -1);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_castsi256_si512 (__m256i __A)
{
return __builtin_shufflevector( __A, __A, 0, 1, 2, 3, -1, -1, -1, -1);
}
-static __inline __m512 __DEFAULT_FN_ATTRS
+static __inline __m512 __DEFAULT_FN_ATTRS512
_mm512_castsi512_ps (__m512i __A)
{
return (__m512) (__A);
}
-static __inline __m512d __DEFAULT_FN_ATTRS
+static __inline __m512d __DEFAULT_FN_ATTRS512
_mm512_castsi512_pd (__m512i __A)
{
return (__m512d) (__A);
}
-static __inline __m128i __DEFAULT_FN_ATTRS
+static __inline __m128i __DEFAULT_FN_ATTRS512
_mm512_castsi512_si128 (__m512i __A)
{
return (__m128i)__builtin_shufflevector(__A, __A , 0, 1);
}
-static __inline __m256i __DEFAULT_FN_ATTRS
+static __inline __m256i __DEFAULT_FN_ATTRS512
_mm512_castsi512_si256 (__m512i __A)
{
return (__m256i)__builtin_shufflevector(__A, __A , 0, 1, 2, 3);
}
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS512
_mm512_int2mask(int __a)
{
return (__mmask16)__a;
}
-static __inline__ int __DEFAULT_FN_ATTRS
+static __inline__ int __DEFAULT_FN_ATTRS512
_mm512_mask2int(__mmask16 __a)
{
return (int)__a;
}
-/// \brief Constructs a 512-bit floating-point vector of [8 x double] from a
+/// Constructs a 512-bit floating-point vector of [8 x double] from a
/// 128-bit floating-point vector of [2 x double]. The lower 128 bits
/// contain the value of the source vector. The upper 384 bits are set
/// to zero.
@@ -535,13 +533,13 @@ _mm512_mask2int(__mmask16 __a)
/// A 128-bit vector of [2 x double].
/// \returns A 512-bit floating-point vector of [8 x double]. The lower 128 bits
/// contain the value of the parameter. The upper 384 bits are set to zero.
-static __inline __m512d __DEFAULT_FN_ATTRS
+static __inline __m512d __DEFAULT_FN_ATTRS512
_mm512_zextpd128_pd512(__m128d __a)
{
return __builtin_shufflevector((__v2df)__a, (__v2df)_mm_setzero_pd(), 0, 1, 2, 3, 2, 3, 2, 3);
}
-/// \brief Constructs a 512-bit floating-point vector of [8 x double] from a
+/// Constructs a 512-bit floating-point vector of [8 x double] from a
/// 256-bit floating-point vector of [4 x double]. The lower 256 bits
/// contain the value of the source vector. The upper 256 bits are set
/// to zero.
@@ -554,13 +552,13 @@ _mm512_zextpd128_pd512(__m128d __a)
/// A 256-bit vector of [4 x double].
/// \returns A 512-bit floating-point vector of [8 x double]. The lower 256 bits
/// contain the value of the parameter. The upper 256 bits are set to zero.
-static __inline __m512d __DEFAULT_FN_ATTRS
+static __inline __m512d __DEFAULT_FN_ATTRS512
_mm512_zextpd256_pd512(__m256d __a)
{
return __builtin_shufflevector((__v4df)__a, (__v4df)_mm256_setzero_pd(), 0, 1, 2, 3, 4, 5, 6, 7);
}
-/// \brief Constructs a 512-bit floating-point vector of [16 x float] from a
+/// Constructs a 512-bit floating-point vector of [16 x float] from a
/// 128-bit floating-point vector of [4 x float]. The lower 128 bits contain
/// the value of the source vector. The upper 384 bits are set to zero.
///
@@ -572,13 +570,13 @@ _mm512_zextpd256_pd512(__m256d __a)
/// A 128-bit vector of [4 x float].
/// \returns A 512-bit floating-point vector of [16 x float]. The lower 128 bits
/// contain the value of the parameter. The upper 384 bits are set to zero.
-static __inline __m512 __DEFAULT_FN_ATTRS
+static __inline __m512 __DEFAULT_FN_ATTRS512
_mm512_zextps128_ps512(__m128 __a)
{
return __builtin_shufflevector((__v4sf)__a, (__v4sf)_mm_setzero_ps(), 0, 1, 2, 3, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7);
}
-/// \brief Constructs a 512-bit floating-point vector of [16 x float] from a
+/// Constructs a 512-bit floating-point vector of [16 x float] from a
/// 256-bit floating-point vector of [8 x float]. The lower 256 bits contain
/// the value of the source vector. The upper 256 bits are set to zero.
///
@@ -590,13 +588,13 @@ _mm512_zextps128_ps512(__m128 __a)
/// A 256-bit vector of [8 x float].
/// \returns A 512-bit floating-point vector of [16 x float]. The lower 256 bits
/// contain the value of the parameter. The upper 256 bits are set to zero.
-static __inline __m512 __DEFAULT_FN_ATTRS
+static __inline __m512 __DEFAULT_FN_ATTRS512
_mm512_zextps256_ps512(__m256 __a)
{
return __builtin_shufflevector((__v8sf)__a, (__v8sf)_mm256_setzero_ps(), 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
}
-/// \brief Constructs a 512-bit integer vector from a 128-bit integer vector.
+/// Constructs a 512-bit integer vector from a 128-bit integer vector.
/// The lower 128 bits contain the value of the source vector. The upper
/// 384 bits are set to zero.
///
@@ -608,13 +606,13 @@ _mm512_zextps256_ps512(__m256 __a)
/// A 128-bit integer vector.
/// \returns A 512-bit integer vector. The lower 128 bits contain the value of
/// the parameter. The upper 384 bits are set to zero.
-static __inline __m512i __DEFAULT_FN_ATTRS
+static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_zextsi128_si512(__m128i __a)
{
return __builtin_shufflevector((__v2di)__a, (__v2di)_mm_setzero_si128(), 0, 1, 2, 3, 2, 3, 2, 3);
}
-/// \brief Constructs a 512-bit integer vector from a 256-bit integer vector.
+/// Constructs a 512-bit integer vector from a 256-bit integer vector.
/// The lower 256 bits contain the value of the source vector. The upper
/// 256 bits are set to zero.
///
@@ -626,20 +624,20 @@ _mm512_zextsi128_si512(__m128i __a)
/// A 256-bit integer vector.
/// \returns A 512-bit integer vector. The lower 256 bits contain the value of
/// the parameter. The upper 256 bits are set to zero.
-static __inline __m512i __DEFAULT_FN_ATTRS
+static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_zextsi256_si512(__m256i __a)
{
return __builtin_shufflevector((__v4di)__a, (__v4di)_mm256_setzero_si256(), 0, 1, 2, 3, 4, 5, 6, 7);
}
/* Bitwise operators */
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_and_epi32(__m512i __a, __m512i __b)
{
return (__m512i)((__v16su)__a & (__v16su)__b);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_and_epi32(__m512i __src, __mmask16 __k, __m512i __a, __m512i __b)
{
return (__m512i)__builtin_ia32_selectd_512((__mmask16)__k,
@@ -647,20 +645,20 @@ _mm512_mask_and_epi32(__m512i __src, __mmask16 __k, __m512i __a, __m512i __b)
(__v16si) __src);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_and_epi32(__mmask16 __k, __m512i __a, __m512i __b)
{
return (__m512i) _mm512_mask_and_epi32(_mm512_setzero_si512 (),
__k, __a, __b);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_and_epi64(__m512i __a, __m512i __b)
{
return (__m512i)((__v8du)__a & (__v8du)__b);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_and_epi64(__m512i __src, __mmask8 __k, __m512i __a, __m512i __b)
{
return (__m512i) __builtin_ia32_selectq_512 ((__mmask8) __k,
@@ -668,26 +666,26 @@ _mm512_mask_and_epi64(__m512i __src, __mmask8 __k, __m512i __a, __m512i __b)
(__v8di) __src);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_and_epi64(__mmask8 __k, __m512i __a, __m512i __b)
{
return (__m512i) _mm512_mask_and_epi64(_mm512_setzero_si512 (),
__k, __a, __b);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_andnot_si512 (__m512i __A, __m512i __B)
{
- return (__m512i)(~(__v8du)(__A) & (__v8du)__B);
+ return (__m512i)(~(__v8du)__A & (__v8du)__B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_andnot_epi32 (__m512i __A, __m512i __B)
{
- return (__m512i)(~(__v16su)(__A) & (__v16su)__B);
+ return (__m512i)(~(__v16su)__A & (__v16su)__B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_andnot_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
@@ -695,20 +693,20 @@ _mm512_mask_andnot_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B)
(__v16si)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_andnot_epi32(__mmask16 __U, __m512i __A, __m512i __B)
{
return (__m512i)_mm512_mask_andnot_epi32(_mm512_setzero_si512(),
__U, __A, __B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_andnot_epi64(__m512i __A, __m512i __B)
{
- return (__m512i)(~(__v8du)(__A) & (__v8du)__B);
+ return (__m512i)(~(__v8du)__A & (__v8du)__B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_andnot_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
@@ -716,20 +714,20 @@ _mm512_mask_andnot_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B)
(__v8di)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_andnot_epi64(__mmask8 __U, __m512i __A, __m512i __B)
{
return (__m512i)_mm512_mask_andnot_epi64(_mm512_setzero_si512(),
__U, __A, __B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_or_epi32(__m512i __a, __m512i __b)
{
return (__m512i)((__v16su)__a | (__v16su)__b);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_or_epi32(__m512i __src, __mmask16 __k, __m512i __a, __m512i __b)
{
return (__m512i)__builtin_ia32_selectd_512((__mmask16)__k,
@@ -737,19 +735,19 @@ _mm512_mask_or_epi32(__m512i __src, __mmask16 __k, __m512i __a, __m512i __b)
(__v16si)__src);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_or_epi32(__mmask16 __k, __m512i __a, __m512i __b)
{
return (__m512i)_mm512_mask_or_epi32(_mm512_setzero_si512(), __k, __a, __b);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_or_epi64(__m512i __a, __m512i __b)
{
return (__m512i)((__v8du)__a | (__v8du)__b);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_or_epi64(__m512i __src, __mmask8 __k, __m512i __a, __m512i __b)
{
return (__m512i)__builtin_ia32_selectq_512((__mmask8)__k,
@@ -757,19 +755,19 @@ _mm512_mask_or_epi64(__m512i __src, __mmask8 __k, __m512i __a, __m512i __b)
(__v8di)__src);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_or_epi64(__mmask8 __k, __m512i __a, __m512i __b)
{
return (__m512i)_mm512_mask_or_epi64(_mm512_setzero_si512(), __k, __a, __b);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_xor_epi32(__m512i __a, __m512i __b)
{
return (__m512i)((__v16su)__a ^ (__v16su)__b);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_xor_epi32(__m512i __src, __mmask16 __k, __m512i __a, __m512i __b)
{
return (__m512i)__builtin_ia32_selectd_512((__mmask16)__k,
@@ -777,19 +775,19 @@ _mm512_mask_xor_epi32(__m512i __src, __mmask16 __k, __m512i __a, __m512i __b)
(__v16si)__src);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_xor_epi32(__mmask16 __k, __m512i __a, __m512i __b)
{
return (__m512i)_mm512_mask_xor_epi32(_mm512_setzero_si512(), __k, __a, __b);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_xor_epi64(__m512i __a, __m512i __b)
{
return (__m512i)((__v8du)__a ^ (__v8du)__b);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_xor_epi64(__m512i __src, __mmask8 __k, __m512i __a, __m512i __b)
{
return (__m512i)__builtin_ia32_selectq_512((__mmask8)__k,
@@ -797,25 +795,25 @@ _mm512_mask_xor_epi64(__m512i __src, __mmask8 __k, __m512i __a, __m512i __b)
(__v8di)__src);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_xor_epi64(__mmask8 __k, __m512i __a, __m512i __b)
{
return (__m512i)_mm512_mask_xor_epi64(_mm512_setzero_si512(), __k, __a, __b);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_and_si512(__m512i __a, __m512i __b)
{
return (__m512i)((__v8du)__a & (__v8du)__b);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_or_si512(__m512i __a, __m512i __b)
{
return (__m512i)((__v8du)__a | (__v8du)__b);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_xor_si512(__m512i __a, __m512i __b)
{
return (__m512i)((__v8du)__a ^ (__v8du)__b);
@@ -823,49 +821,49 @@ _mm512_xor_si512(__m512i __a, __m512i __b)
/* Arithmetic */
-static __inline __m512d __DEFAULT_FN_ATTRS
+static __inline __m512d __DEFAULT_FN_ATTRS512
_mm512_add_pd(__m512d __a, __m512d __b)
{
return (__m512d)((__v8df)__a + (__v8df)__b);
}
-static __inline __m512 __DEFAULT_FN_ATTRS
+static __inline __m512 __DEFAULT_FN_ATTRS512
_mm512_add_ps(__m512 __a, __m512 __b)
{
return (__m512)((__v16sf)__a + (__v16sf)__b);
}
-static __inline __m512d __DEFAULT_FN_ATTRS
+static __inline __m512d __DEFAULT_FN_ATTRS512
_mm512_mul_pd(__m512d __a, __m512d __b)
{
return (__m512d)((__v8df)__a * (__v8df)__b);
}
-static __inline __m512 __DEFAULT_FN_ATTRS
+static __inline __m512 __DEFAULT_FN_ATTRS512
_mm512_mul_ps(__m512 __a, __m512 __b)
{
return (__m512)((__v16sf)__a * (__v16sf)__b);
}
-static __inline __m512d __DEFAULT_FN_ATTRS
+static __inline __m512d __DEFAULT_FN_ATTRS512
_mm512_sub_pd(__m512d __a, __m512d __b)
{
return (__m512d)((__v8df)__a - (__v8df)__b);
}
-static __inline __m512 __DEFAULT_FN_ATTRS
+static __inline __m512 __DEFAULT_FN_ATTRS512
_mm512_sub_ps(__m512 __a, __m512 __b)
{
return (__m512)((__v16sf)__a - (__v16sf)__b);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_add_epi64 (__m512i __A, __m512i __B)
{
return (__m512i) ((__v8du) __A + (__v8du) __B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_add_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
@@ -873,7 +871,7 @@ _mm512_mask_add_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B)
(__v8di)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_add_epi64(__mmask8 __U, __m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
@@ -881,13 +879,13 @@ _mm512_maskz_add_epi64(__mmask8 __U, __m512i __A, __m512i __B)
(__v8di)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_sub_epi64 (__m512i __A, __m512i __B)
{
return (__m512i) ((__v8du) __A - (__v8du) __B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_sub_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
@@ -895,7 +893,7 @@ _mm512_mask_sub_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B)
(__v8di)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_sub_epi64(__mmask8 __U, __m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
@@ -903,13 +901,13 @@ _mm512_maskz_sub_epi64(__mmask8 __U, __m512i __A, __m512i __B)
(__v8di)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_add_epi32 (__m512i __A, __m512i __B)
{
return (__m512i) ((__v16su) __A + (__v16su) __B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_add_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
@@ -917,7 +915,7 @@ _mm512_mask_add_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B)
(__v16si)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_add_epi32 (__mmask16 __U, __m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
@@ -925,13 +923,13 @@ _mm512_maskz_add_epi32 (__mmask16 __U, __m512i __A, __m512i __B)
(__v16si)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_sub_epi32 (__m512i __A, __m512i __B)
{
return (__m512i) ((__v16su) __A - (__v16su) __B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_sub_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
@@ -939,7 +937,7 @@ _mm512_mask_sub_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B)
(__v16si)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_sub_epi32(__mmask16 __U, __m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
@@ -947,107 +945,81 @@ _mm512_maskz_sub_epi32(__mmask16 __U, __m512i __A, __m512i __B)
(__v16si)_mm512_setzero_si512());
}
-#define _mm512_mask_max_round_pd(W, U, A, B, R) __extension__ ({ \
- (__m512d)__builtin_ia32_maxpd512_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8df)(__m512d)(W), (__mmask8)(U), \
- (int)(R)); })
+#define _mm512_max_round_pd(A, B, R) \
+ (__m512d)__builtin_ia32_maxpd512((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), (int)(R))
-#define _mm512_maskz_max_round_pd(U, A, B, R) __extension__ ({ \
- (__m512d)__builtin_ia32_maxpd512_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8df)_mm512_setzero_pd(), \
- (__mmask8)(U), (int)(R)); })
+#define _mm512_mask_max_round_pd(W, U, A, B, R) \
+ (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ (__v8df)_mm512_max_round_pd((A), (B), (R)), \
+ (__v8df)(W))
-#define _mm512_max_round_pd(A, B, R) __extension__ ({ \
- (__m512d)__builtin_ia32_maxpd512_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8df)_mm512_undefined_pd(), \
- (__mmask8)-1, (int)(R)); })
+#define _mm512_maskz_max_round_pd(U, A, B, R) \
+ (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ (__v8df)_mm512_max_round_pd((A), (B), (R)), \
+ (__v8df)_mm512_setzero_pd())
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_max_pd(__m512d __A, __m512d __B)
{
- return (__m512d) __builtin_ia32_maxpd512_mask ((__v8df) __A,
- (__v8df) __B,
- (__v8df)
- _mm512_setzero_pd (),
- (__mmask8) -1,
- _MM_FROUND_CUR_DIRECTION);
+ return (__m512d) __builtin_ia32_maxpd512((__v8df) __A, (__v8df) __B,
+ _MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_mask_max_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d __B)
{
- return (__m512d) __builtin_ia32_maxpd512_mask ((__v8df) __A,
- (__v8df) __B,
- (__v8df) __W,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
+ return (__m512d)__builtin_ia32_selectpd_512(__U,
+ (__v8df)_mm512_max_pd(__A, __B),
+ (__v8df)__W);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_maskz_max_pd (__mmask8 __U, __m512d __A, __m512d __B)
{
- return (__m512d) __builtin_ia32_maxpd512_mask ((__v8df) __A,
- (__v8df) __B,
- (__v8df)
- _mm512_setzero_pd (),
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
+ return (__m512d)__builtin_ia32_selectpd_512(__U,
+ (__v8df)_mm512_max_pd(__A, __B),
+ (__v8df)_mm512_setzero_pd());
}
-#define _mm512_mask_max_round_ps(W, U, A, B, R) __extension__ ({ \
- (__m512)__builtin_ia32_maxps512_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16sf)(__m512)(W), (__mmask16)(U), \
- (int)(R)); })
+#define _mm512_max_round_ps(A, B, R) \
+ (__m512)__builtin_ia32_maxps512((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), (int)(R))
-#define _mm512_maskz_max_round_ps(U, A, B, R) __extension__ ({ \
- (__m512)__builtin_ia32_maxps512_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16sf)_mm512_setzero_ps(), \
- (__mmask16)(U), (int)(R)); })
+#define _mm512_mask_max_round_ps(W, U, A, B, R) \
+ (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+ (__v16sf)_mm512_max_round_ps((A), (B), (R)), \
+ (__v16sf)(W))
-#define _mm512_max_round_ps(A, B, R) __extension__ ({ \
- (__m512)__builtin_ia32_maxps512_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16sf)_mm512_undefined_ps(), \
- (__mmask16)-1, (int)(R)); })
+#define _mm512_maskz_max_round_ps(U, A, B, R) \
+ (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+ (__v16sf)_mm512_max_round_ps((A), (B), (R)), \
+ (__v16sf)_mm512_setzero_ps())
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_max_ps(__m512 __A, __m512 __B)
{
- return (__m512) __builtin_ia32_maxps512_mask ((__v16sf) __A,
- (__v16sf) __B,
- (__v16sf)
- _mm512_setzero_ps (),
- (__mmask16) -1,
- _MM_FROUND_CUR_DIRECTION);
+ return (__m512) __builtin_ia32_maxps512((__v16sf) __A, (__v16sf) __B,
+ _MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_mask_max_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B)
{
- return (__m512) __builtin_ia32_maxps512_mask ((__v16sf) __A,
- (__v16sf) __B,
- (__v16sf) __W,
- (__mmask16) __U,
- _MM_FROUND_CUR_DIRECTION);
+ return (__m512)__builtin_ia32_selectps_512(__U,
+ (__v16sf)_mm512_max_ps(__A, __B),
+ (__v16sf)__W);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_maskz_max_ps (__mmask16 __U, __m512 __A, __m512 __B)
{
- return (__m512) __builtin_ia32_maxps512_mask ((__v16sf) __A,
- (__v16sf) __B,
- (__v16sf)
- _mm512_setzero_ps (),
- (__mmask16) __U,
- _MM_FROUND_CUR_DIRECTION);
+ return (__m512)__builtin_ia32_selectps_512(__U,
+ (__v16sf)_mm512_max_ps(__A, __B),
+ (__v16sf)_mm512_setzero_ps());
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_max_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
return (__m128) __builtin_ia32_maxss_round_mask ((__v4sf) __A,
(__v4sf) __B,
@@ -1056,7 +1028,7 @@ _mm_mask_max_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_maskz_max_ss(__mmask8 __U,__m128 __A, __m128 __B) {
return (__m128) __builtin_ia32_maxss_round_mask ((__v4sf) __A,
(__v4sf) __B,
@@ -1065,25 +1037,25 @@ _mm_maskz_max_ss(__mmask8 __U,__m128 __A, __m128 __B) {
_MM_FROUND_CUR_DIRECTION);
}
-#define _mm_max_round_ss(A, B, R) __extension__ ({ \
+#define _mm_max_round_ss(A, B, R) \
(__m128)__builtin_ia32_maxss_round_mask((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), \
(__v4sf)_mm_setzero_ps(), \
- (__mmask8)-1, (int)(R)); })
+ (__mmask8)-1, (int)(R))
-#define _mm_mask_max_round_ss(W, U, A, B, R) __extension__ ({ \
+#define _mm_mask_max_round_ss(W, U, A, B, R) \
(__m128)__builtin_ia32_maxss_round_mask((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), \
(__v4sf)(__m128)(W), (__mmask8)(U), \
- (int)(R)); })
+ (int)(R))
-#define _mm_maskz_max_round_ss(U, A, B, R) __extension__ ({ \
+#define _mm_maskz_max_round_ss(U, A, B, R) \
(__m128)__builtin_ia32_maxss_round_mask((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), \
(__v4sf)_mm_setzero_ps(), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_max_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
return (__m128d) __builtin_ia32_maxsd_round_mask ((__v2df) __A,
(__v2df) __B,
@@ -1092,7 +1064,7 @@ _mm_mask_max_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_maskz_max_sd(__mmask8 __U,__m128d __A, __m128d __B) {
return (__m128d) __builtin_ia32_maxsd_round_mask ((__v2df) __A,
(__v2df) __B,
@@ -1101,238 +1073,188 @@ _mm_maskz_max_sd(__mmask8 __U,__m128d __A, __m128d __B) {
_MM_FROUND_CUR_DIRECTION);
}
-#define _mm_max_round_sd(A, B, R) __extension__ ({ \
+#define _mm_max_round_sd(A, B, R) \
(__m128d)__builtin_ia32_maxsd_round_mask((__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), \
(__v2df)_mm_setzero_pd(), \
- (__mmask8)-1, (int)(R)); })
+ (__mmask8)-1, (int)(R))
-#define _mm_mask_max_round_sd(W, U, A, B, R) __extension__ ({ \
+#define _mm_mask_max_round_sd(W, U, A, B, R) \
(__m128d)__builtin_ia32_maxsd_round_mask((__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), \
(__v2df)(__m128d)(W), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-#define _mm_maskz_max_round_sd(U, A, B, R) __extension__ ({ \
+#define _mm_maskz_max_round_sd(U, A, B, R) \
(__m128d)__builtin_ia32_maxsd_round_mask((__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), \
(__v2df)_mm_setzero_pd(), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
static __inline __m512i
-__DEFAULT_FN_ATTRS
+__DEFAULT_FN_ATTRS512
_mm512_max_epi32(__m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_pmaxsd512_mask ((__v16si) __A,
- (__v16si) __B,
- (__v16si)
- _mm512_setzero_si512 (),
- (__mmask16) -1);
+ return (__m512i)__builtin_ia32_pmaxsd512((__v16si)__A, (__v16si)__B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_max_epi32 (__m512i __W, __mmask16 __M, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_pmaxsd512_mask ((__v16si) __A,
- (__v16si) __B,
- (__v16si) __W, __M);
+ return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
+ (__v16si)_mm512_max_epi32(__A, __B),
+ (__v16si)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_max_epi32 (__mmask16 __M, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_pmaxsd512_mask ((__v16si) __A,
- (__v16si) __B,
- (__v16si)
- _mm512_setzero_si512 (),
- __M);
+ return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
+ (__v16si)_mm512_max_epi32(__A, __B),
+ (__v16si)_mm512_setzero_si512());
}
-static __inline __m512i __DEFAULT_FN_ATTRS
+static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_max_epu32(__m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_pmaxud512_mask ((__v16si) __A,
- (__v16si) __B,
- (__v16si)
- _mm512_setzero_si512 (),
- (__mmask16) -1);
+ return (__m512i)__builtin_ia32_pmaxud512((__v16si)__A, (__v16si)__B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_max_epu32 (__m512i __W, __mmask16 __M, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_pmaxud512_mask ((__v16si) __A,
- (__v16si) __B,
- (__v16si) __W, __M);
+ return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
+ (__v16si)_mm512_max_epu32(__A, __B),
+ (__v16si)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_max_epu32 (__mmask16 __M, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_pmaxud512_mask ((__v16si) __A,
- (__v16si) __B,
- (__v16si)
- _mm512_setzero_si512 (),
- __M);
+ return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
+ (__v16si)_mm512_max_epu32(__A, __B),
+ (__v16si)_mm512_setzero_si512());
}
-static __inline __m512i __DEFAULT_FN_ATTRS
+static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_max_epi64(__m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_pmaxsq512_mask ((__v8di) __A,
- (__v8di) __B,
- (__v8di)
- _mm512_setzero_si512 (),
- (__mmask8) -1);
+ return (__m512i)__builtin_ia32_pmaxsq512((__v8di)__A, (__v8di)__B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_max_epi64 (__m512i __W, __mmask8 __M, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_pmaxsq512_mask ((__v8di) __A,
- (__v8di) __B,
- (__v8di) __W, __M);
+ return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
+ (__v8di)_mm512_max_epi64(__A, __B),
+ (__v8di)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_max_epi64 (__mmask8 __M, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_pmaxsq512_mask ((__v8di) __A,
- (__v8di) __B,
- (__v8di)
- _mm512_setzero_si512 (),
- __M);
+ return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
+ (__v8di)_mm512_max_epi64(__A, __B),
+ (__v8di)_mm512_setzero_si512());
}
-static __inline __m512i __DEFAULT_FN_ATTRS
+static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_max_epu64(__m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_pmaxuq512_mask ((__v8di) __A,
- (__v8di) __B,
- (__v8di)
- _mm512_setzero_si512 (),
- (__mmask8) -1);
+ return (__m512i)__builtin_ia32_pmaxuq512((__v8di)__A, (__v8di)__B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_max_epu64 (__m512i __W, __mmask8 __M, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_pmaxuq512_mask ((__v8di) __A,
- (__v8di) __B,
- (__v8di) __W, __M);
+ return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
+ (__v8di)_mm512_max_epu64(__A, __B),
+ (__v8di)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_max_epu64 (__mmask8 __M, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_pmaxuq512_mask ((__v8di) __A,
- (__v8di) __B,
- (__v8di)
- _mm512_setzero_si512 (),
- __M);
+ return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
+ (__v8di)_mm512_max_epu64(__A, __B),
+ (__v8di)_mm512_setzero_si512());
}
-#define _mm512_mask_min_round_pd(W, U, A, B, R) __extension__ ({ \
- (__m512d)__builtin_ia32_minpd512_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8df)(__m512d)(W), (__mmask8)(U), \
- (int)(R)); })
+#define _mm512_min_round_pd(A, B, R) \
+ (__m512d)__builtin_ia32_minpd512((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), (int)(R))
-#define _mm512_maskz_min_round_pd(U, A, B, R) __extension__ ({ \
- (__m512d)__builtin_ia32_minpd512_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8df)_mm512_setzero_pd(), \
- (__mmask8)(U), (int)(R)); })
+#define _mm512_mask_min_round_pd(W, U, A, B, R) \
+ (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ (__v8df)_mm512_min_round_pd((A), (B), (R)), \
+ (__v8df)(W))
-#define _mm512_min_round_pd(A, B, R) __extension__ ({ \
- (__m512d)__builtin_ia32_minpd512_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8df)_mm512_undefined_pd(), \
- (__mmask8)-1, (int)(R)); })
+#define _mm512_maskz_min_round_pd(U, A, B, R) \
+ (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ (__v8df)_mm512_min_round_pd((A), (B), (R)), \
+ (__v8df)_mm512_setzero_pd())
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_min_pd(__m512d __A, __m512d __B)
{
- return (__m512d) __builtin_ia32_minpd512_mask ((__v8df) __A,
- (__v8df) __B,
- (__v8df)
- _mm512_setzero_pd (),
- (__mmask8) -1,
- _MM_FROUND_CUR_DIRECTION);
+ return (__m512d) __builtin_ia32_minpd512((__v8df) __A, (__v8df) __B,
+ _MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_mask_min_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d __B)
{
- return (__m512d) __builtin_ia32_minpd512_mask ((__v8df) __A,
- (__v8df) __B,
- (__v8df) __W,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
+ return (__m512d)__builtin_ia32_selectpd_512(__U,
+ (__v8df)_mm512_min_pd(__A, __B),
+ (__v8df)__W);
}
-#define _mm512_mask_min_round_ps(W, U, A, B, R) __extension__ ({ \
- (__m512)__builtin_ia32_minps512_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16sf)(__m512)(W), (__mmask16)(U), \
- (int)(R)); })
-
-#define _mm512_maskz_min_round_ps(U, A, B, R) __extension__ ({ \
- (__m512)__builtin_ia32_minps512_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16sf)_mm512_setzero_ps(), \
- (__mmask16)(U), (int)(R)); })
-
-#define _mm512_min_round_ps(A, B, R) __extension__ ({ \
- (__m512)__builtin_ia32_minps512_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16sf)_mm512_undefined_ps(), \
- (__mmask16)-1, (int)(R)); })
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_maskz_min_pd (__mmask8 __U, __m512d __A, __m512d __B)
{
- return (__m512d) __builtin_ia32_minpd512_mask ((__v8df) __A,
- (__v8df) __B,
- (__v8df)
- _mm512_setzero_pd (),
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
+ return (__m512d)__builtin_ia32_selectpd_512(__U,
+ (__v8df)_mm512_min_pd(__A, __B),
+ (__v8df)_mm512_setzero_pd());
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+#define _mm512_min_round_ps(A, B, R) \
+ (__m512)__builtin_ia32_minps512((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), (int)(R))
+
+#define _mm512_mask_min_round_ps(W, U, A, B, R) \
+ (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+ (__v16sf)_mm512_min_round_ps((A), (B), (R)), \
+ (__v16sf)(W))
+
+#define _mm512_maskz_min_round_ps(U, A, B, R) \
+ (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+ (__v16sf)_mm512_min_round_ps((A), (B), (R)), \
+ (__v16sf)_mm512_setzero_ps())
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_min_ps(__m512 __A, __m512 __B)
{
- return (__m512) __builtin_ia32_minps512_mask ((__v16sf) __A,
- (__v16sf) __B,
- (__v16sf)
- _mm512_setzero_ps (),
- (__mmask16) -1,
- _MM_FROUND_CUR_DIRECTION);
+ return (__m512) __builtin_ia32_minps512((__v16sf) __A, (__v16sf) __B,
+ _MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_mask_min_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B)
{
- return (__m512) __builtin_ia32_minps512_mask ((__v16sf) __A,
- (__v16sf) __B,
- (__v16sf) __W,
- (__mmask16) __U,
- _MM_FROUND_CUR_DIRECTION);
+ return (__m512)__builtin_ia32_selectps_512(__U,
+ (__v16sf)_mm512_min_ps(__A, __B),
+ (__v16sf)__W);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_maskz_min_ps (__mmask16 __U, __m512 __A, __m512 __B)
{
- return (__m512) __builtin_ia32_minps512_mask ((__v16sf) __A,
- (__v16sf) __B,
- (__v16sf)
- _mm512_setzero_ps (),
- (__mmask16) __U,
- _MM_FROUND_CUR_DIRECTION);
+ return (__m512)__builtin_ia32_selectps_512(__U,
+ (__v16sf)_mm512_min_ps(__A, __B),
+ (__v16sf)_mm512_setzero_ps());
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_min_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
return (__m128) __builtin_ia32_minss_round_mask ((__v4sf) __A,
(__v4sf) __B,
@@ -1341,7 +1263,7 @@ _mm_mask_min_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_maskz_min_ss(__mmask8 __U,__m128 __A, __m128 __B) {
return (__m128) __builtin_ia32_minss_round_mask ((__v4sf) __A,
(__v4sf) __B,
@@ -1350,25 +1272,25 @@ _mm_maskz_min_ss(__mmask8 __U,__m128 __A, __m128 __B) {
_MM_FROUND_CUR_DIRECTION);
}
-#define _mm_min_round_ss(A, B, R) __extension__ ({ \
+#define _mm_min_round_ss(A, B, R) \
(__m128)__builtin_ia32_minss_round_mask((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), \
(__v4sf)_mm_setzero_ps(), \
- (__mmask8)-1, (int)(R)); })
+ (__mmask8)-1, (int)(R))
-#define _mm_mask_min_round_ss(W, U, A, B, R) __extension__ ({ \
+#define _mm_mask_min_round_ss(W, U, A, B, R) \
(__m128)__builtin_ia32_minss_round_mask((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), \
(__v4sf)(__m128)(W), (__mmask8)(U), \
- (int)(R)); })
+ (int)(R))
-#define _mm_maskz_min_round_ss(U, A, B, R) __extension__ ({ \
+#define _mm_maskz_min_round_ss(U, A, B, R) \
(__m128)__builtin_ia32_minss_round_mask((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), \
(__v4sf)_mm_setzero_ps(), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_min_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
return (__m128d) __builtin_ia32_minsd_round_mask ((__v2df) __A,
(__v2df) __B,
@@ -1377,7 +1299,7 @@ _mm_mask_min_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_maskz_min_sd(__mmask8 __U,__m128d __A, __m128d __B) {
return (__m128d) __builtin_ia32_minsd_round_mask ((__v2df) __A,
(__v2df) __B,
@@ -1386,144 +1308,120 @@ _mm_maskz_min_sd(__mmask8 __U,__m128d __A, __m128d __B) {
_MM_FROUND_CUR_DIRECTION);
}
-#define _mm_min_round_sd(A, B, R) __extension__ ({ \
+#define _mm_min_round_sd(A, B, R) \
(__m128d)__builtin_ia32_minsd_round_mask((__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), \
(__v2df)_mm_setzero_pd(), \
- (__mmask8)-1, (int)(R)); })
+ (__mmask8)-1, (int)(R))
-#define _mm_mask_min_round_sd(W, U, A, B, R) __extension__ ({ \
+#define _mm_mask_min_round_sd(W, U, A, B, R) \
(__m128d)__builtin_ia32_minsd_round_mask((__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), \
(__v2df)(__m128d)(W), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-#define _mm_maskz_min_round_sd(U, A, B, R) __extension__ ({ \
+#define _mm_maskz_min_round_sd(U, A, B, R) \
(__m128d)__builtin_ia32_minsd_round_mask((__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), \
(__v2df)_mm_setzero_pd(), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
static __inline __m512i
-__DEFAULT_FN_ATTRS
+__DEFAULT_FN_ATTRS512
_mm512_min_epi32(__m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_pminsd512_mask ((__v16si) __A,
- (__v16si) __B,
- (__v16si)
- _mm512_setzero_si512 (),
- (__mmask16) -1);
+ return (__m512i)__builtin_ia32_pminsd512((__v16si)__A, (__v16si)__B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_min_epi32 (__m512i __W, __mmask16 __M, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_pminsd512_mask ((__v16si) __A,
- (__v16si) __B,
- (__v16si) __W, __M);
+ return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
+ (__v16si)_mm512_min_epi32(__A, __B),
+ (__v16si)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_min_epi32 (__mmask16 __M, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_pminsd512_mask ((__v16si) __A,
- (__v16si) __B,
- (__v16si)
- _mm512_setzero_si512 (),
- __M);
+ return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
+ (__v16si)_mm512_min_epi32(__A, __B),
+ (__v16si)_mm512_setzero_si512());
}
-static __inline __m512i __DEFAULT_FN_ATTRS
+static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_min_epu32(__m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_pminud512_mask ((__v16si) __A,
- (__v16si) __B,
- (__v16si)
- _mm512_setzero_si512 (),
- (__mmask16) -1);
+ return (__m512i)__builtin_ia32_pminud512((__v16si)__A, (__v16si)__B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_min_epu32 (__m512i __W, __mmask16 __M, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_pminud512_mask ((__v16si) __A,
- (__v16si) __B,
- (__v16si) __W, __M);
+ return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
+ (__v16si)_mm512_min_epu32(__A, __B),
+ (__v16si)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_min_epu32 (__mmask16 __M, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_pminud512_mask ((__v16si) __A,
- (__v16si) __B,
- (__v16si)
- _mm512_setzero_si512 (),
- __M);
+ return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
+ (__v16si)_mm512_min_epu32(__A, __B),
+ (__v16si)_mm512_setzero_si512());
}
-static __inline __m512i __DEFAULT_FN_ATTRS
+static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_min_epi64(__m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_pminsq512_mask ((__v8di) __A,
- (__v8di) __B,
- (__v8di)
- _mm512_setzero_si512 (),
- (__mmask8) -1);
+ return (__m512i)__builtin_ia32_pminsq512((__v8di)__A, (__v8di)__B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_min_epi64 (__m512i __W, __mmask8 __M, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_pminsq512_mask ((__v8di) __A,
- (__v8di) __B,
- (__v8di) __W, __M);
+ return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
+ (__v8di)_mm512_min_epi64(__A, __B),
+ (__v8di)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_min_epi64 (__mmask8 __M, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_pminsq512_mask ((__v8di) __A,
- (__v8di) __B,
- (__v8di)
- _mm512_setzero_si512 (),
- __M);
+ return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
+ (__v8di)_mm512_min_epi64(__A, __B),
+ (__v8di)_mm512_setzero_si512());
}
-static __inline __m512i __DEFAULT_FN_ATTRS
+static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_min_epu64(__m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_pminuq512_mask ((__v8di) __A,
- (__v8di) __B,
- (__v8di)
- _mm512_setzero_si512 (),
- (__mmask8) -1);
+ return (__m512i)__builtin_ia32_pminuq512((__v8di)__A, (__v8di)__B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_min_epu64 (__m512i __W, __mmask8 __M, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_pminuq512_mask ((__v8di) __A,
- (__v8di) __B,
- (__v8di) __W, __M);
+ return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
+ (__v8di)_mm512_min_epu64(__A, __B),
+ (__v8di)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_min_epu64 (__mmask8 __M, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_pminuq512_mask ((__v8di) __A,
- (__v8di) __B,
- (__v8di)
- _mm512_setzero_si512 (),
- __M);
+ return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
+ (__v8di)_mm512_min_epu64(__A, __B),
+ (__v8di)_mm512_setzero_si512());
}
-static __inline __m512i __DEFAULT_FN_ATTRS
+static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_mul_epi32(__m512i __X, __m512i __Y)
{
return (__m512i)__builtin_ia32_pmuldq512((__v16si)__X, (__v16si) __Y);
}
-static __inline __m512i __DEFAULT_FN_ATTRS
+static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_mul_epi32(__m512i __W, __mmask8 __M, __m512i __X, __m512i __Y)
{
return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
@@ -1531,7 +1429,7 @@ _mm512_mask_mul_epi32(__m512i __W, __mmask8 __M, __m512i __X, __m512i __Y)
(__v8di)__W);
}
-static __inline __m512i __DEFAULT_FN_ATTRS
+static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_mul_epi32(__mmask8 __M, __m512i __X, __m512i __Y)
{
return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
@@ -1539,13 +1437,13 @@ _mm512_maskz_mul_epi32(__mmask8 __M, __m512i __X, __m512i __Y)
(__v8di)_mm512_setzero_si512 ());
}
-static __inline __m512i __DEFAULT_FN_ATTRS
+static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_mul_epu32(__m512i __X, __m512i __Y)
{
return (__m512i)__builtin_ia32_pmuludq512((__v16si)__X, (__v16si)__Y);
}
-static __inline __m512i __DEFAULT_FN_ATTRS
+static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_mul_epu32(__m512i __W, __mmask8 __M, __m512i __X, __m512i __Y)
{
return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
@@ -1553,7 +1451,7 @@ _mm512_mask_mul_epu32(__m512i __W, __mmask8 __M, __m512i __X, __m512i __Y)
(__v8di)__W);
}
-static __inline __m512i __DEFAULT_FN_ATTRS
+static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_mul_epu32(__mmask8 __M, __m512i __X, __m512i __Y)
{
return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
@@ -1561,13 +1459,13 @@ _mm512_maskz_mul_epu32(__mmask8 __M, __m512i __X, __m512i __Y)
(__v8di)_mm512_setzero_si512 ());
}
-static __inline __m512i __DEFAULT_FN_ATTRS
+static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_mullo_epi32 (__m512i __A, __m512i __B)
{
return (__m512i) ((__v16su) __A * (__v16su) __B);
}
-static __inline __m512i __DEFAULT_FN_ATTRS
+static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_mullo_epi32(__mmask16 __M, __m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
@@ -1575,7 +1473,7 @@ _mm512_maskz_mullo_epi32(__mmask16 __M, __m512i __A, __m512i __B)
(__v16si)_mm512_setzero_si512());
}
-static __inline __m512i __DEFAULT_FN_ATTRS
+static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_mullo_epi32(__m512i __W, __mmask16 __M, __m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
@@ -1583,92 +1481,91 @@ _mm512_mask_mullo_epi32(__m512i __W, __mmask16 __M, __m512i __A, __m512i __B)
(__v16si)__W);
}
-#define _mm512_mask_sqrt_round_pd(W, U, A, R) __extension__ ({ \
- (__m512d)__builtin_ia32_sqrtpd512_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(W), (__mmask8)(U), \
- (int)(R)); })
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
+_mm512_mullox_epi64 (__m512i __A, __m512i __B) {
+ return (__m512i) ((__v8du) __A * (__v8du) __B);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
+_mm512_mask_mullox_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) {
+ return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
+ (__v8di)_mm512_mullox_epi64(__A, __B),
+ (__v8di)__W);
+}
+
+#define _mm512_sqrt_round_pd(A, R) \
+ (__m512d)__builtin_ia32_sqrtpd512((__v8df)(__m512d)(A), (int)(R))
-#define _mm512_maskz_sqrt_round_pd(U, A, R) __extension__ ({ \
- (__m512d)__builtin_ia32_sqrtpd512_mask((__v8df)(__m512d)(A), \
- (__v8df)_mm512_setzero_pd(), \
- (__mmask8)(U), (int)(R)); })
+#define _mm512_mask_sqrt_round_pd(W, U, A, R) \
+ (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ (__v8df)_mm512_sqrt_round_pd((A), (R)), \
+ (__v8df)(__m512d)(W))
-#define _mm512_sqrt_round_pd(A, R) __extension__ ({ \
- (__m512d)__builtin_ia32_sqrtpd512_mask((__v8df)(__m512d)(A), \
- (__v8df)_mm512_undefined_pd(), \
- (__mmask8)-1, (int)(R)); })
+#define _mm512_maskz_sqrt_round_pd(U, A, R) \
+ (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ (__v8df)_mm512_sqrt_round_pd((A), (R)), \
+ (__v8df)_mm512_setzero_pd())
-static __inline__ __m512d __DEFAULT_FN_ATTRS
-_mm512_sqrt_pd(__m512d __a)
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
+_mm512_sqrt_pd(__m512d __A)
{
- return (__m512d)__builtin_ia32_sqrtpd512_mask((__v8df)__a,
- (__v8df) _mm512_setzero_pd (),
- (__mmask8) -1,
- _MM_FROUND_CUR_DIRECTION);
+ return (__m512d)__builtin_ia32_sqrtpd512((__v8df)__A,
+ _MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_mask_sqrt_pd (__m512d __W, __mmask8 __U, __m512d __A)
{
- return (__m512d) __builtin_ia32_sqrtpd512_mask ((__v8df) __A,
- (__v8df) __W,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
+ return (__m512d)__builtin_ia32_selectpd_512(__U,
+ (__v8df)_mm512_sqrt_pd(__A),
+ (__v8df)__W);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_maskz_sqrt_pd (__mmask8 __U, __m512d __A)
{
- return (__m512d) __builtin_ia32_sqrtpd512_mask ((__v8df) __A,
- (__v8df)
- _mm512_setzero_pd (),
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
+ return (__m512d)__builtin_ia32_selectpd_512(__U,
+ (__v8df)_mm512_sqrt_pd(__A),
+ (__v8df)_mm512_setzero_pd());
}
-#define _mm512_mask_sqrt_round_ps(W, U, A, R) __extension__ ({ \
- (__m512)__builtin_ia32_sqrtps512_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(W), (__mmask16)(U), \
- (int)(R)); })
+#define _mm512_sqrt_round_ps(A, R) \
+ (__m512)__builtin_ia32_sqrtps512((__v16sf)(__m512)(A), (int)(R))
-#define _mm512_maskz_sqrt_round_ps(U, A, R) __extension__ ({ \
- (__m512)__builtin_ia32_sqrtps512_mask((__v16sf)(__m512)(A), \
- (__v16sf)_mm512_setzero_ps(), \
- (__mmask16)(U), (int)(R)); })
+#define _mm512_mask_sqrt_round_ps(W, U, A, R) \
+ (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+ (__v16sf)_mm512_sqrt_round_ps((A), (R)), \
+ (__v16sf)(__m512)(W))
-#define _mm512_sqrt_round_ps(A, R) __extension__ ({ \
- (__m512)__builtin_ia32_sqrtps512_mask((__v16sf)(__m512)(A), \
- (__v16sf)_mm512_undefined_ps(), \
- (__mmask16)-1, (int)(R)); })
+#define _mm512_maskz_sqrt_round_ps(U, A, R) \
+ (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+ (__v16sf)_mm512_sqrt_round_ps((A), (R)), \
+ (__v16sf)_mm512_setzero_ps())
-static __inline__ __m512 __DEFAULT_FN_ATTRS
-_mm512_sqrt_ps(__m512 __a)
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
+_mm512_sqrt_ps(__m512 __A)
{
- return (__m512)__builtin_ia32_sqrtps512_mask((__v16sf)__a,
- (__v16sf) _mm512_setzero_ps (),
- (__mmask16) -1,
- _MM_FROUND_CUR_DIRECTION);
+ return (__m512)__builtin_ia32_sqrtps512((__v16sf)__A,
+ _MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_mask_sqrt_ps(__m512 __W, __mmask16 __U, __m512 __A)
{
- return (__m512)__builtin_ia32_sqrtps512_mask((__v16sf)__A,
- (__v16sf) __W,
- (__mmask16) __U,
- _MM_FROUND_CUR_DIRECTION);
+ return (__m512)__builtin_ia32_selectps_512(__U,
+ (__v16sf)_mm512_sqrt_ps(__A),
+ (__v16sf)__W);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_maskz_sqrt_ps( __mmask16 __U, __m512 __A)
{
- return (__m512)__builtin_ia32_sqrtps512_mask((__v16sf)__A,
- (__v16sf) _mm512_setzero_ps (),
- (__mmask16) __U,
- _MM_FROUND_CUR_DIRECTION);
+ return (__m512)__builtin_ia32_selectps_512(__U,
+ (__v16sf)_mm512_sqrt_ps(__A),
+ (__v16sf)_mm512_setzero_ps());
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_rsqrt14_pd(__m512d __A)
{
return (__m512d) __builtin_ia32_rsqrt14pd512_mask ((__v8df) __A,
@@ -1676,7 +1573,7 @@ _mm512_rsqrt14_pd(__m512d __A)
_mm512_setzero_pd (),
(__mmask8) -1);}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_mask_rsqrt14_pd (__m512d __W, __mmask8 __U, __m512d __A)
{
return (__m512d) __builtin_ia32_rsqrt14pd512_mask ((__v8df) __A,
@@ -1684,7 +1581,7 @@ _mm512_mask_rsqrt14_pd (__m512d __W, __mmask8 __U, __m512d __A)
(__mmask8) __U);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_maskz_rsqrt14_pd (__mmask8 __U, __m512d __A)
{
return (__m512d) __builtin_ia32_rsqrt14pd512_mask ((__v8df) __A,
@@ -1693,7 +1590,7 @@ _mm512_maskz_rsqrt14_pd (__mmask8 __U, __m512d __A)
(__mmask8) __U);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_rsqrt14_ps(__m512 __A)
{
return (__m512) __builtin_ia32_rsqrt14ps512_mask ((__v16sf) __A,
@@ -1702,7 +1599,7 @@ _mm512_rsqrt14_ps(__m512 __A)
(__mmask16) -1);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_mask_rsqrt14_ps (__m512 __W, __mmask16 __U, __m512 __A)
{
return (__m512) __builtin_ia32_rsqrt14ps512_mask ((__v16sf) __A,
@@ -1710,7 +1607,7 @@ _mm512_mask_rsqrt14_ps (__m512 __W, __mmask16 __U, __m512 __A)
(__mmask16) __U);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_maskz_rsqrt14_ps (__mmask16 __U, __m512 __A)
{
return (__m512) __builtin_ia32_rsqrt14ps512_mask ((__v16sf) __A,
@@ -1719,7 +1616,7 @@ _mm512_maskz_rsqrt14_ps (__mmask16 __U, __m512 __A)
(__mmask16) __U);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_rsqrt14_ss(__m128 __A, __m128 __B)
{
return (__m128) __builtin_ia32_rsqrt14ss_mask ((__v4sf) __A,
@@ -1729,7 +1626,7 @@ _mm_rsqrt14_ss(__m128 __A, __m128 __B)
(__mmask8) -1);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_rsqrt14_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
{
return (__m128) __builtin_ia32_rsqrt14ss_mask ((__v4sf) __A,
@@ -1738,7 +1635,7 @@ _mm_mask_rsqrt14_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
(__mmask8) __U);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_maskz_rsqrt14_ss (__mmask8 __U, __m128 __A, __m128 __B)
{
return (__m128) __builtin_ia32_rsqrt14ss_mask ((__v4sf) __A,
@@ -1747,7 +1644,7 @@ _mm_maskz_rsqrt14_ss (__mmask8 __U, __m128 __A, __m128 __B)
(__mmask8) __U);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_rsqrt14_sd(__m128d __A, __m128d __B)
{
return (__m128d) __builtin_ia32_rsqrt14sd_mask ((__v2df) __A,
@@ -1757,7 +1654,7 @@ _mm_rsqrt14_sd(__m128d __A, __m128d __B)
(__mmask8) -1);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_rsqrt14_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
{
return (__m128d) __builtin_ia32_rsqrt14sd_mask ( (__v2df) __A,
@@ -1766,7 +1663,7 @@ _mm_mask_rsqrt14_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
(__mmask8) __U);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_maskz_rsqrt14_sd (__mmask8 __U, __m128d __A, __m128d __B)
{
return (__m128d) __builtin_ia32_rsqrt14sd_mask ( (__v2df) __A,
@@ -1775,7 +1672,7 @@ _mm_maskz_rsqrt14_sd (__mmask8 __U, __m128d __A, __m128d __B)
(__mmask8) __U);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_rcp14_pd(__m512d __A)
{
return (__m512d) __builtin_ia32_rcp14pd512_mask ((__v8df) __A,
@@ -1784,7 +1681,7 @@ _mm512_rcp14_pd(__m512d __A)
(__mmask8) -1);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_mask_rcp14_pd (__m512d __W, __mmask8 __U, __m512d __A)
{
return (__m512d) __builtin_ia32_rcp14pd512_mask ((__v8df) __A,
@@ -1792,7 +1689,7 @@ _mm512_mask_rcp14_pd (__m512d __W, __mmask8 __U, __m512d __A)
(__mmask8) __U);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_maskz_rcp14_pd (__mmask8 __U, __m512d __A)
{
return (__m512d) __builtin_ia32_rcp14pd512_mask ((__v8df) __A,
@@ -1801,7 +1698,7 @@ _mm512_maskz_rcp14_pd (__mmask8 __U, __m512d __A)
(__mmask8) __U);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_rcp14_ps(__m512 __A)
{
return (__m512) __builtin_ia32_rcp14ps512_mask ((__v16sf) __A,
@@ -1810,7 +1707,7 @@ _mm512_rcp14_ps(__m512 __A)
(__mmask16) -1);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_mask_rcp14_ps (__m512 __W, __mmask16 __U, __m512 __A)
{
return (__m512) __builtin_ia32_rcp14ps512_mask ((__v16sf) __A,
@@ -1818,7 +1715,7 @@ _mm512_mask_rcp14_ps (__m512 __W, __mmask16 __U, __m512 __A)
(__mmask16) __U);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_maskz_rcp14_ps (__mmask16 __U, __m512 __A)
{
return (__m512) __builtin_ia32_rcp14ps512_mask ((__v16sf) __A,
@@ -1827,7 +1724,7 @@ _mm512_maskz_rcp14_ps (__mmask16 __U, __m512 __A)
(__mmask16) __U);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_rcp14_ss(__m128 __A, __m128 __B)
{
return (__m128) __builtin_ia32_rcp14ss_mask ((__v4sf) __A,
@@ -1837,7 +1734,7 @@ _mm_rcp14_ss(__m128 __A, __m128 __B)
(__mmask8) -1);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_rcp14_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
{
return (__m128) __builtin_ia32_rcp14ss_mask ((__v4sf) __A,
@@ -1846,7 +1743,7 @@ _mm_mask_rcp14_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
(__mmask8) __U);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_maskz_rcp14_ss (__mmask8 __U, __m128 __A, __m128 __B)
{
return (__m128) __builtin_ia32_rcp14ss_mask ((__v4sf) __A,
@@ -1855,7 +1752,7 @@ _mm_maskz_rcp14_ss (__mmask8 __U, __m128 __A, __m128 __B)
(__mmask8) __U);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_rcp14_sd(__m128d __A, __m128d __B)
{
return (__m128d) __builtin_ia32_rcp14sd_mask ((__v2df) __A,
@@ -1865,7 +1762,7 @@ _mm_rcp14_sd(__m128d __A, __m128d __B)
(__mmask8) -1);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_rcp14_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
{
return (__m128d) __builtin_ia32_rcp14sd_mask ( (__v2df) __A,
@@ -1874,7 +1771,7 @@ _mm_mask_rcp14_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
(__mmask8) __U);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_maskz_rcp14_sd (__mmask8 __U, __m128d __A, __m128d __B)
{
return (__m128d) __builtin_ia32_rcp14sd_mask ( (__v2df) __A,
@@ -1883,7 +1780,7 @@ _mm_maskz_rcp14_sd (__mmask8 __U, __m128d __A, __m128d __B)
(__mmask8) __U);
}
-static __inline __m512 __DEFAULT_FN_ATTRS
+static __inline __m512 __DEFAULT_FN_ATTRS512
_mm512_floor_ps(__m512 __A)
{
return (__m512) __builtin_ia32_rndscaleps_mask ((__v16sf) __A,
@@ -1892,7 +1789,7 @@ _mm512_floor_ps(__m512 __A)
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_mask_floor_ps (__m512 __W, __mmask16 __U, __m512 __A)
{
return (__m512) __builtin_ia32_rndscaleps_mask ((__v16sf) __A,
@@ -1901,7 +1798,7 @@ _mm512_mask_floor_ps (__m512 __W, __mmask16 __U, __m512 __A)
_MM_FROUND_CUR_DIRECTION);
}
-static __inline __m512d __DEFAULT_FN_ATTRS
+static __inline __m512d __DEFAULT_FN_ATTRS512
_mm512_floor_pd(__m512d __A)
{
return (__m512d) __builtin_ia32_rndscalepd_mask ((__v8df) __A,
@@ -1910,7 +1807,7 @@ _mm512_floor_pd(__m512d __A)
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_mask_floor_pd (__m512d __W, __mmask8 __U, __m512d __A)
{
return (__m512d) __builtin_ia32_rndscalepd_mask ((__v8df) __A,
@@ -1919,7 +1816,7 @@ _mm512_mask_floor_pd (__m512d __W, __mmask8 __U, __m512d __A)
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_mask_ceil_ps (__m512 __W, __mmask16 __U, __m512 __A)
{
return (__m512) __builtin_ia32_rndscaleps_mask ((__v16sf) __A,
@@ -1928,7 +1825,7 @@ _mm512_mask_ceil_ps (__m512 __W, __mmask16 __U, __m512 __A)
_MM_FROUND_CUR_DIRECTION);
}
-static __inline __m512 __DEFAULT_FN_ATTRS
+static __inline __m512 __DEFAULT_FN_ATTRS512
_mm512_ceil_ps(__m512 __A)
{
return (__m512) __builtin_ia32_rndscaleps_mask ((__v16sf) __A,
@@ -1937,7 +1834,7 @@ _mm512_ceil_ps(__m512 __A)
_MM_FROUND_CUR_DIRECTION);
}
-static __inline __m512d __DEFAULT_FN_ATTRS
+static __inline __m512d __DEFAULT_FN_ATTRS512
_mm512_ceil_pd(__m512d __A)
{
return (__m512d) __builtin_ia32_rndscalepd_mask ((__v8df) __A,
@@ -1946,7 +1843,7 @@ _mm512_ceil_pd(__m512d __A)
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_mask_ceil_pd (__m512d __W, __mmask8 __U, __m512d __A)
{
return (__m512d) __builtin_ia32_rndscalepd_mask ((__v8df) __A,
@@ -1955,758 +1852,672 @@ _mm512_mask_ceil_pd (__m512d __W, __mmask8 __U, __m512d __A)
_MM_FROUND_CUR_DIRECTION);
}
-static __inline __m512i __DEFAULT_FN_ATTRS
+static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_abs_epi64(__m512i __A)
{
- return (__m512i) __builtin_ia32_pabsq512_mask ((__v8di) __A,
- (__v8di)
- _mm512_setzero_si512 (),
- (__mmask8) -1);
+ return (__m512i)__builtin_ia32_pabsq512((__v8di)__A);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_abs_epi64 (__m512i __W, __mmask8 __U, __m512i __A)
{
- return (__m512i) __builtin_ia32_pabsq512_mask ((__v8di) __A,
- (__v8di) __W,
- (__mmask8) __U);
+ return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
+ (__v8di)_mm512_abs_epi64(__A),
+ (__v8di)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_abs_epi64 (__mmask8 __U, __m512i __A)
{
- return (__m512i) __builtin_ia32_pabsq512_mask ((__v8di) __A,
- (__v8di)
- _mm512_setzero_si512 (),
- (__mmask8) __U);
+ return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
+ (__v8di)_mm512_abs_epi64(__A),
+ (__v8di)_mm512_setzero_si512());
}
-static __inline __m512i __DEFAULT_FN_ATTRS
+static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_abs_epi32(__m512i __A)
{
- return (__m512i) __builtin_ia32_pabsd512_mask ((__v16si) __A,
- (__v16si)
- _mm512_setzero_si512 (),
- (__mmask16) -1);
+ return (__m512i)__builtin_ia32_pabsd512((__v16si) __A);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_abs_epi32 (__m512i __W, __mmask16 __U, __m512i __A)
{
- return (__m512i) __builtin_ia32_pabsd512_mask ((__v16si) __A,
- (__v16si) __W,
- (__mmask16) __U);
+ return (__m512i)__builtin_ia32_selectd_512(__U,
+ (__v16si)_mm512_abs_epi32(__A),
+ (__v16si)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_abs_epi32 (__mmask16 __U, __m512i __A)
{
- return (__m512i) __builtin_ia32_pabsd512_mask ((__v16si) __A,
- (__v16si)
- _mm512_setzero_si512 (),
- (__mmask16) __U);
+ return (__m512i)__builtin_ia32_selectd_512(__U,
+ (__v16si)_mm512_abs_epi32(__A),
+ (__v16si)_mm512_setzero_si512());
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_add_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
- return (__m128) __builtin_ia32_addss_round_mask ((__v4sf) __A,
- (__v4sf) __B,
- (__v4sf) __W,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
+ __A = _mm_add_ss(__A, __B);
+ return __builtin_ia32_selectss_128(__U, __A, __W);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_maskz_add_ss(__mmask8 __U,__m128 __A, __m128 __B) {
- return (__m128) __builtin_ia32_addss_round_mask ((__v4sf) __A,
- (__v4sf) __B,
- (__v4sf) _mm_setzero_ps (),
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
+ __A = _mm_add_ss(__A, __B);
+ return __builtin_ia32_selectss_128(__U, __A, _mm_setzero_ps());
}
-#define _mm_add_round_ss(A, B, R) __extension__ ({ \
+#define _mm_add_round_ss(A, B, R) \
(__m128)__builtin_ia32_addss_round_mask((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), \
(__v4sf)_mm_setzero_ps(), \
- (__mmask8)-1, (int)(R)); })
+ (__mmask8)-1, (int)(R))
-#define _mm_mask_add_round_ss(W, U, A, B, R) __extension__ ({ \
+#define _mm_mask_add_round_ss(W, U, A, B, R) \
(__m128)__builtin_ia32_addss_round_mask((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), \
(__v4sf)(__m128)(W), (__mmask8)(U), \
- (int)(R)); })
+ (int)(R))
-#define _mm_maskz_add_round_ss(U, A, B, R) __extension__ ({ \
+#define _mm_maskz_add_round_ss(U, A, B, R) \
(__m128)__builtin_ia32_addss_round_mask((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), \
(__v4sf)_mm_setzero_ps(), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_add_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
- return (__m128d) __builtin_ia32_addsd_round_mask ((__v2df) __A,
- (__v2df) __B,
- (__v2df) __W,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
+ __A = _mm_add_sd(__A, __B);
+ return __builtin_ia32_selectsd_128(__U, __A, __W);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_maskz_add_sd(__mmask8 __U,__m128d __A, __m128d __B) {
- return (__m128d) __builtin_ia32_addsd_round_mask ((__v2df) __A,
- (__v2df) __B,
- (__v2df) _mm_setzero_pd (),
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
+ __A = _mm_add_sd(__A, __B);
+ return __builtin_ia32_selectsd_128(__U, __A, _mm_setzero_pd());
}
-#define _mm_add_round_sd(A, B, R) __extension__ ({ \
+#define _mm_add_round_sd(A, B, R) \
(__m128d)__builtin_ia32_addsd_round_mask((__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), \
(__v2df)_mm_setzero_pd(), \
- (__mmask8)-1, (int)(R)); })
+ (__mmask8)-1, (int)(R))
-#define _mm_mask_add_round_sd(W, U, A, B, R) __extension__ ({ \
+#define _mm_mask_add_round_sd(W, U, A, B, R) \
(__m128d)__builtin_ia32_addsd_round_mask((__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), \
(__v2df)(__m128d)(W), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-#define _mm_maskz_add_round_sd(U, A, B, R) __extension__ ({ \
+#define _mm_maskz_add_round_sd(U, A, B, R) \
(__m128d)__builtin_ia32_addsd_round_mask((__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), \
(__v2df)_mm_setzero_pd(), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_mask_add_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
(__v8df)_mm512_add_pd(__A, __B),
(__v8df)__W);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_maskz_add_pd(__mmask8 __U, __m512d __A, __m512d __B) {
return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
(__v8df)_mm512_add_pd(__A, __B),
(__v8df)_mm512_setzero_pd());
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_mask_add_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
(__v16sf)_mm512_add_ps(__A, __B),
(__v16sf)__W);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_maskz_add_ps(__mmask16 __U, __m512 __A, __m512 __B) {
return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
(__v16sf)_mm512_add_ps(__A, __B),
(__v16sf)_mm512_setzero_ps());
}
-#define _mm512_add_round_pd(A, B, R) __extension__ ({ \
- (__m512d)__builtin_ia32_addpd512_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8df)_mm512_setzero_pd(), \
- (__mmask8)-1, (int)(R)); })
-
-#define _mm512_mask_add_round_pd(W, U, A, B, R) __extension__ ({ \
- (__m512d)__builtin_ia32_addpd512_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8df)(__m512d)(W), (__mmask8)(U), \
- (int)(R)); })
-
-#define _mm512_maskz_add_round_pd(U, A, B, R) __extension__ ({ \
- (__m512d)__builtin_ia32_addpd512_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8df)_mm512_setzero_pd(), \
- (__mmask8)(U), (int)(R)); })
-
-#define _mm512_add_round_ps(A, B, R) __extension__ ({ \
- (__m512)__builtin_ia32_addps512_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16sf)_mm512_setzero_ps(), \
- (__mmask16)-1, (int)(R)); })
-
-#define _mm512_mask_add_round_ps(W, U, A, B, R) __extension__ ({ \
- (__m512)__builtin_ia32_addps512_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16sf)(__m512)(W), (__mmask16)(U), \
- (int)(R)); })
-
-#define _mm512_maskz_add_round_ps(U, A, B, R) __extension__ ({ \
- (__m512)__builtin_ia32_addps512_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16sf)_mm512_setzero_ps(), \
- (__mmask16)(U), (int)(R)); })
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+#define _mm512_add_round_pd(A, B, R) \
+ (__m512d)__builtin_ia32_addpd512((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), (int)(R))
+
+#define _mm512_mask_add_round_pd(W, U, A, B, R) \
+ (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ (__v8df)_mm512_add_round_pd((A), (B), (R)), \
+ (__v8df)(__m512d)(W));
+
+#define _mm512_maskz_add_round_pd(U, A, B, R) \
+ (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ (__v8df)_mm512_add_round_pd((A), (B), (R)), \
+ (__v8df)_mm512_setzero_pd());
+
+#define _mm512_add_round_ps(A, B, R) \
+ (__m512)__builtin_ia32_addps512((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), (int)(R))
+
+#define _mm512_mask_add_round_ps(W, U, A, B, R) \
+ (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+ (__v16sf)_mm512_add_round_ps((A), (B), (R)), \
+ (__v16sf)(__m512)(W));
+
+#define _mm512_maskz_add_round_ps(U, A, B, R) \
+ (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+ (__v16sf)_mm512_add_round_ps((A), (B), (R)), \
+ (__v16sf)_mm512_setzero_ps());
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_sub_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
- return (__m128) __builtin_ia32_subss_round_mask ((__v4sf) __A,
- (__v4sf) __B,
- (__v4sf) __W,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
+ __A = _mm_sub_ss(__A, __B);
+ return __builtin_ia32_selectss_128(__U, __A, __W);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_maskz_sub_ss(__mmask8 __U,__m128 __A, __m128 __B) {
- return (__m128) __builtin_ia32_subss_round_mask ((__v4sf) __A,
- (__v4sf) __B,
- (__v4sf) _mm_setzero_ps (),
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
+ __A = _mm_sub_ss(__A, __B);
+ return __builtin_ia32_selectss_128(__U, __A, _mm_setzero_ps());
}
-#define _mm_sub_round_ss(A, B, R) __extension__ ({ \
+#define _mm_sub_round_ss(A, B, R) \
(__m128)__builtin_ia32_subss_round_mask((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), \
(__v4sf)_mm_setzero_ps(), \
- (__mmask8)-1, (int)(R)); })
+ (__mmask8)-1, (int)(R))
-#define _mm_mask_sub_round_ss(W, U, A, B, R) __extension__ ({ \
+#define _mm_mask_sub_round_ss(W, U, A, B, R) \
(__m128)__builtin_ia32_subss_round_mask((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), \
(__v4sf)(__m128)(W), (__mmask8)(U), \
- (int)(R)); })
+ (int)(R))
-#define _mm_maskz_sub_round_ss(U, A, B, R) __extension__ ({ \
+#define _mm_maskz_sub_round_ss(U, A, B, R) \
(__m128)__builtin_ia32_subss_round_mask((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), \
(__v4sf)_mm_setzero_ps(), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_sub_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
- return (__m128d) __builtin_ia32_subsd_round_mask ((__v2df) __A,
- (__v2df) __B,
- (__v2df) __W,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
+ __A = _mm_sub_sd(__A, __B);
+ return __builtin_ia32_selectsd_128(__U, __A, __W);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_maskz_sub_sd(__mmask8 __U,__m128d __A, __m128d __B) {
- return (__m128d) __builtin_ia32_subsd_round_mask ((__v2df) __A,
- (__v2df) __B,
- (__v2df) _mm_setzero_pd (),
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
+ __A = _mm_sub_sd(__A, __B);
+ return __builtin_ia32_selectsd_128(__U, __A, _mm_setzero_pd());
}
-#define _mm_sub_round_sd(A, B, R) __extension__ ({ \
+#define _mm_sub_round_sd(A, B, R) \
(__m128d)__builtin_ia32_subsd_round_mask((__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), \
(__v2df)_mm_setzero_pd(), \
- (__mmask8)-1, (int)(R)); })
+ (__mmask8)-1, (int)(R))
-#define _mm_mask_sub_round_sd(W, U, A, B, R) __extension__ ({ \
+#define _mm_mask_sub_round_sd(W, U, A, B, R) \
(__m128d)__builtin_ia32_subsd_round_mask((__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), \
(__v2df)(__m128d)(W), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-#define _mm_maskz_sub_round_sd(U, A, B, R) __extension__ ({ \
+#define _mm_maskz_sub_round_sd(U, A, B, R) \
(__m128d)__builtin_ia32_subsd_round_mask((__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), \
(__v2df)_mm_setzero_pd(), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_mask_sub_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
(__v8df)_mm512_sub_pd(__A, __B),
(__v8df)__W);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_maskz_sub_pd(__mmask8 __U, __m512d __A, __m512d __B) {
return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
(__v8df)_mm512_sub_pd(__A, __B),
(__v8df)_mm512_setzero_pd());
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_mask_sub_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
(__v16sf)_mm512_sub_ps(__A, __B),
(__v16sf)__W);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_maskz_sub_ps(__mmask16 __U, __m512 __A, __m512 __B) {
return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
(__v16sf)_mm512_sub_ps(__A, __B),
(__v16sf)_mm512_setzero_ps());
}
-#define _mm512_sub_round_pd(A, B, R) __extension__ ({ \
- (__m512d)__builtin_ia32_subpd512_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8df)_mm512_setzero_pd(), \
- (__mmask8)-1, (int)(R)); })
-
-#define _mm512_mask_sub_round_pd(W, U, A, B, R) __extension__ ({ \
- (__m512d)__builtin_ia32_subpd512_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8df)(__m512d)(W), (__mmask8)(U), \
- (int)(R)); })
-
-#define _mm512_maskz_sub_round_pd(U, A, B, R) __extension__ ({ \
- (__m512d)__builtin_ia32_subpd512_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8df)_mm512_setzero_pd(), \
- (__mmask8)(U), (int)(R)); })
-
-#define _mm512_sub_round_ps(A, B, R) __extension__ ({ \
- (__m512)__builtin_ia32_subps512_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16sf)_mm512_setzero_ps(), \
- (__mmask16)-1, (int)(R)); })
-
-#define _mm512_mask_sub_round_ps(W, U, A, B, R) __extension__ ({ \
- (__m512)__builtin_ia32_subps512_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16sf)(__m512)(W), (__mmask16)(U), \
- (int)(R)); });
-
-#define _mm512_maskz_sub_round_ps(U, A, B, R) __extension__ ({ \
- (__m512)__builtin_ia32_subps512_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16sf)_mm512_setzero_ps(), \
- (__mmask16)(U), (int)(R)); });
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+#define _mm512_sub_round_pd(A, B, R) \
+ (__m512d)__builtin_ia32_subpd512((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), (int)(R))
+
+#define _mm512_mask_sub_round_pd(W, U, A, B, R) \
+ (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ (__v8df)_mm512_sub_round_pd((A), (B), (R)), \
+ (__v8df)(__m512d)(W));
+
+#define _mm512_maskz_sub_round_pd(U, A, B, R) \
+ (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ (__v8df)_mm512_sub_round_pd((A), (B), (R)), \
+ (__v8df)_mm512_setzero_pd());
+
+#define _mm512_sub_round_ps(A, B, R) \
+ (__m512)__builtin_ia32_subps512((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), (int)(R))
+
+#define _mm512_mask_sub_round_ps(W, U, A, B, R) \
+ (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+ (__v16sf)_mm512_sub_round_ps((A), (B), (R)), \
+ (__v16sf)(__m512)(W));
+
+#define _mm512_maskz_sub_round_ps(U, A, B, R) \
+ (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+ (__v16sf)_mm512_sub_round_ps((A), (B), (R)), \
+ (__v16sf)_mm512_setzero_ps());
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_mul_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
- return (__m128) __builtin_ia32_mulss_round_mask ((__v4sf) __A,
- (__v4sf) __B,
- (__v4sf) __W,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
+ __A = _mm_mul_ss(__A, __B);
+ return __builtin_ia32_selectss_128(__U, __A, __W);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_maskz_mul_ss(__mmask8 __U,__m128 __A, __m128 __B) {
- return (__m128) __builtin_ia32_mulss_round_mask ((__v4sf) __A,
- (__v4sf) __B,
- (__v4sf) _mm_setzero_ps (),
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
+ __A = _mm_mul_ss(__A, __B);
+ return __builtin_ia32_selectss_128(__U, __A, _mm_setzero_ps());
}
-#define _mm_mul_round_ss(A, B, R) __extension__ ({ \
+#define _mm_mul_round_ss(A, B, R) \
(__m128)__builtin_ia32_mulss_round_mask((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), \
(__v4sf)_mm_setzero_ps(), \
- (__mmask8)-1, (int)(R)); })
+ (__mmask8)-1, (int)(R))
-#define _mm_mask_mul_round_ss(W, U, A, B, R) __extension__ ({ \
+#define _mm_mask_mul_round_ss(W, U, A, B, R) \
(__m128)__builtin_ia32_mulss_round_mask((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), \
(__v4sf)(__m128)(W), (__mmask8)(U), \
- (int)(R)); })
+ (int)(R))
-#define _mm_maskz_mul_round_ss(U, A, B, R) __extension__ ({ \
+#define _mm_maskz_mul_round_ss(U, A, B, R) \
(__m128)__builtin_ia32_mulss_round_mask((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), \
(__v4sf)_mm_setzero_ps(), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_mul_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
- return (__m128d) __builtin_ia32_mulsd_round_mask ((__v2df) __A,
- (__v2df) __B,
- (__v2df) __W,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
+ __A = _mm_mul_sd(__A, __B);
+ return __builtin_ia32_selectsd_128(__U, __A, __W);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_maskz_mul_sd(__mmask8 __U,__m128d __A, __m128d __B) {
- return (__m128d) __builtin_ia32_mulsd_round_mask ((__v2df) __A,
- (__v2df) __B,
- (__v2df) _mm_setzero_pd (),
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
+ __A = _mm_mul_sd(__A, __B);
+ return __builtin_ia32_selectsd_128(__U, __A, _mm_setzero_pd());
}
-#define _mm_mul_round_sd(A, B, R) __extension__ ({ \
+#define _mm_mul_round_sd(A, B, R) \
(__m128d)__builtin_ia32_mulsd_round_mask((__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), \
(__v2df)_mm_setzero_pd(), \
- (__mmask8)-1, (int)(R)); })
+ (__mmask8)-1, (int)(R))
-#define _mm_mask_mul_round_sd(W, U, A, B, R) __extension__ ({ \
+#define _mm_mask_mul_round_sd(W, U, A, B, R) \
(__m128d)__builtin_ia32_mulsd_round_mask((__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), \
(__v2df)(__m128d)(W), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-#define _mm_maskz_mul_round_sd(U, A, B, R) __extension__ ({ \
+#define _mm_maskz_mul_round_sd(U, A, B, R) \
(__m128d)__builtin_ia32_mulsd_round_mask((__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), \
(__v2df)_mm_setzero_pd(), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_mask_mul_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
(__v8df)_mm512_mul_pd(__A, __B),
(__v8df)__W);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_maskz_mul_pd(__mmask8 __U, __m512d __A, __m512d __B) {
return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
(__v8df)_mm512_mul_pd(__A, __B),
(__v8df)_mm512_setzero_pd());
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_mask_mul_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
(__v16sf)_mm512_mul_ps(__A, __B),
(__v16sf)__W);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_maskz_mul_ps(__mmask16 __U, __m512 __A, __m512 __B) {
return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
(__v16sf)_mm512_mul_ps(__A, __B),
(__v16sf)_mm512_setzero_ps());
}
-#define _mm512_mul_round_pd(A, B, R) __extension__ ({ \
- (__m512d)__builtin_ia32_mulpd512_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8df)_mm512_setzero_pd(), \
- (__mmask8)-1, (int)(R)); })
-
-#define _mm512_mask_mul_round_pd(W, U, A, B, R) __extension__ ({ \
- (__m512d)__builtin_ia32_mulpd512_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8df)(__m512d)(W), (__mmask8)(U), \
- (int)(R)); })
-
-#define _mm512_maskz_mul_round_pd(U, A, B, R) __extension__ ({ \
- (__m512d)__builtin_ia32_mulpd512_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8df)_mm512_setzero_pd(), \
- (__mmask8)(U), (int)(R)); })
-
-#define _mm512_mul_round_ps(A, B, R) __extension__ ({ \
- (__m512)__builtin_ia32_mulps512_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16sf)_mm512_setzero_ps(), \
- (__mmask16)-1, (int)(R)); })
-
-#define _mm512_mask_mul_round_ps(W, U, A, B, R) __extension__ ({ \
- (__m512)__builtin_ia32_mulps512_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16sf)(__m512)(W), (__mmask16)(U), \
- (int)(R)); });
-
-#define _mm512_maskz_mul_round_ps(U, A, B, R) __extension__ ({ \
- (__m512)__builtin_ia32_mulps512_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16sf)_mm512_setzero_ps(), \
- (__mmask16)(U), (int)(R)); });
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+#define _mm512_mul_round_pd(A, B, R) \
+ (__m512d)__builtin_ia32_mulpd512((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), (int)(R))
+
+#define _mm512_mask_mul_round_pd(W, U, A, B, R) \
+ (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ (__v8df)_mm512_mul_round_pd((A), (B), (R)), \
+ (__v8df)(__m512d)(W));
+
+#define _mm512_maskz_mul_round_pd(U, A, B, R) \
+ (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ (__v8df)_mm512_mul_round_pd((A), (B), (R)), \
+ (__v8df)_mm512_setzero_pd());
+
+#define _mm512_mul_round_ps(A, B, R) \
+ (__m512)__builtin_ia32_mulps512((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), (int)(R))
+
+#define _mm512_mask_mul_round_ps(W, U, A, B, R) \
+ (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+ (__v16sf)_mm512_mul_round_ps((A), (B), (R)), \
+ (__v16sf)(__m512)(W));
+
+#define _mm512_maskz_mul_round_ps(U, A, B, R) \
+ (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+ (__v16sf)_mm512_mul_round_ps((A), (B), (R)), \
+ (__v16sf)_mm512_setzero_ps());
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_div_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
- return (__m128) __builtin_ia32_divss_round_mask ((__v4sf) __A,
- (__v4sf) __B,
- (__v4sf) __W,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
+ __A = _mm_div_ss(__A, __B);
+ return __builtin_ia32_selectss_128(__U, __A, __W);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_maskz_div_ss(__mmask8 __U,__m128 __A, __m128 __B) {
- return (__m128) __builtin_ia32_divss_round_mask ((__v4sf) __A,
- (__v4sf) __B,
- (__v4sf) _mm_setzero_ps (),
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
+ __A = _mm_div_ss(__A, __B);
+ return __builtin_ia32_selectss_128(__U, __A, _mm_setzero_ps());
}
-#define _mm_div_round_ss(A, B, R) __extension__ ({ \
+#define _mm_div_round_ss(A, B, R) \
(__m128)__builtin_ia32_divss_round_mask((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), \
(__v4sf)_mm_setzero_ps(), \
- (__mmask8)-1, (int)(R)); })
+ (__mmask8)-1, (int)(R))
-#define _mm_mask_div_round_ss(W, U, A, B, R) __extension__ ({ \
+#define _mm_mask_div_round_ss(W, U, A, B, R) \
(__m128)__builtin_ia32_divss_round_mask((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), \
(__v4sf)(__m128)(W), (__mmask8)(U), \
- (int)(R)); })
+ (int)(R))
-#define _mm_maskz_div_round_ss(U, A, B, R) __extension__ ({ \
+#define _mm_maskz_div_round_ss(U, A, B, R) \
(__m128)__builtin_ia32_divss_round_mask((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), \
(__v4sf)_mm_setzero_ps(), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_div_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
- return (__m128d) __builtin_ia32_divsd_round_mask ((__v2df) __A,
- (__v2df) __B,
- (__v2df) __W,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
+ __A = _mm_div_sd(__A, __B);
+ return __builtin_ia32_selectsd_128(__U, __A, __W);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_maskz_div_sd(__mmask8 __U,__m128d __A, __m128d __B) {
- return (__m128d) __builtin_ia32_divsd_round_mask ((__v2df) __A,
- (__v2df) __B,
- (__v2df) _mm_setzero_pd (),
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
+ __A = _mm_div_sd(__A, __B);
+ return __builtin_ia32_selectsd_128(__U, __A, _mm_setzero_pd());
}
-#define _mm_div_round_sd(A, B, R) __extension__ ({ \
+#define _mm_div_round_sd(A, B, R) \
(__m128d)__builtin_ia32_divsd_round_mask((__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), \
(__v2df)_mm_setzero_pd(), \
- (__mmask8)-1, (int)(R)); })
+ (__mmask8)-1, (int)(R))
-#define _mm_mask_div_round_sd(W, U, A, B, R) __extension__ ({ \
+#define _mm_mask_div_round_sd(W, U, A, B, R) \
(__m128d)__builtin_ia32_divsd_round_mask((__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), \
(__v2df)(__m128d)(W), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-#define _mm_maskz_div_round_sd(U, A, B, R) __extension__ ({ \
+#define _mm_maskz_div_round_sd(U, A, B, R) \
(__m128d)__builtin_ia32_divsd_round_mask((__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), \
(__v2df)_mm_setzero_pd(), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-static __inline __m512d __DEFAULT_FN_ATTRS
+static __inline __m512d __DEFAULT_FN_ATTRS512
_mm512_div_pd(__m512d __a, __m512d __b)
{
return (__m512d)((__v8df)__a/(__v8df)__b);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_mask_div_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
(__v8df)_mm512_div_pd(__A, __B),
(__v8df)__W);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_maskz_div_pd(__mmask8 __U, __m512d __A, __m512d __B) {
return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
(__v8df)_mm512_div_pd(__A, __B),
(__v8df)_mm512_setzero_pd());
}
-static __inline __m512 __DEFAULT_FN_ATTRS
+static __inline __m512 __DEFAULT_FN_ATTRS512
_mm512_div_ps(__m512 __a, __m512 __b)
{
return (__m512)((__v16sf)__a/(__v16sf)__b);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_mask_div_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
(__v16sf)_mm512_div_ps(__A, __B),
(__v16sf)__W);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_maskz_div_ps(__mmask16 __U, __m512 __A, __m512 __B) {
return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
(__v16sf)_mm512_div_ps(__A, __B),
(__v16sf)_mm512_setzero_ps());
}
-#define _mm512_div_round_pd(A, B, R) __extension__ ({ \
- (__m512d)__builtin_ia32_divpd512_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8df)_mm512_setzero_pd(), \
- (__mmask8)-1, (int)(R)); })
-
-#define _mm512_mask_div_round_pd(W, U, A, B, R) __extension__ ({ \
- (__m512d)__builtin_ia32_divpd512_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8df)(__m512d)(W), (__mmask8)(U), \
- (int)(R)); })
-
-#define _mm512_maskz_div_round_pd(U, A, B, R) __extension__ ({ \
- (__m512d)__builtin_ia32_divpd512_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8df)_mm512_setzero_pd(), \
- (__mmask8)(U), (int)(R)); })
-
-#define _mm512_div_round_ps(A, B, R) __extension__ ({ \
- (__m512)__builtin_ia32_divps512_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16sf)_mm512_setzero_ps(), \
- (__mmask16)-1, (int)(R)); })
-
-#define _mm512_mask_div_round_ps(W, U, A, B, R) __extension__ ({ \
- (__m512)__builtin_ia32_divps512_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16sf)(__m512)(W), (__mmask16)(U), \
- (int)(R)); });
-
-#define _mm512_maskz_div_round_ps(U, A, B, R) __extension__ ({ \
- (__m512)__builtin_ia32_divps512_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16sf)_mm512_setzero_ps(), \
- (__mmask16)(U), (int)(R)); });
-
-#define _mm512_roundscale_ps(A, B) __extension__ ({ \
+#define _mm512_div_round_pd(A, B, R) \
+ (__m512d)__builtin_ia32_divpd512((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), (int)(R))
+
+#define _mm512_mask_div_round_pd(W, U, A, B, R) \
+ (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ (__v8df)_mm512_div_round_pd((A), (B), (R)), \
+ (__v8df)(__m512d)(W));
+
+#define _mm512_maskz_div_round_pd(U, A, B, R) \
+ (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ (__v8df)_mm512_div_round_pd((A), (B), (R)), \
+ (__v8df)_mm512_setzero_pd());
+
+#define _mm512_div_round_ps(A, B, R) \
+ (__m512)__builtin_ia32_divps512((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), (int)(R))
+
+#define _mm512_mask_div_round_ps(W, U, A, B, R) \
+ (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+ (__v16sf)_mm512_div_round_ps((A), (B), (R)), \
+ (__v16sf)(__m512)(W));
+
+#define _mm512_maskz_div_round_ps(U, A, B, R) \
+ (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+ (__v16sf)_mm512_div_round_ps((A), (B), (R)), \
+ (__v16sf)_mm512_setzero_ps());
+
+#define _mm512_roundscale_ps(A, B) \
(__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(A), (int)(B), \
- (__v16sf)(__m512)(A), (__mmask16)-1, \
- _MM_FROUND_CUR_DIRECTION); })
+ (__v16sf)_mm512_undefined_ps(), \
+ (__mmask16)-1, \
+ _MM_FROUND_CUR_DIRECTION)
-#define _mm512_mask_roundscale_ps(A, B, C, imm) __extension__ ({\
+#define _mm512_mask_roundscale_ps(A, B, C, imm) \
(__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(C), (int)(imm), \
(__v16sf)(__m512)(A), (__mmask16)(B), \
- _MM_FROUND_CUR_DIRECTION); })
+ _MM_FROUND_CUR_DIRECTION)
-#define _mm512_maskz_roundscale_ps(A, B, imm) __extension__ ({\
+#define _mm512_maskz_roundscale_ps(A, B, imm) \
(__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(B), (int)(imm), \
(__v16sf)_mm512_setzero_ps(), \
(__mmask16)(A), \
- _MM_FROUND_CUR_DIRECTION); })
+ _MM_FROUND_CUR_DIRECTION)
-#define _mm512_mask_roundscale_round_ps(A, B, C, imm, R) __extension__ ({ \
+#define _mm512_mask_roundscale_round_ps(A, B, C, imm, R) \
(__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(C), (int)(imm), \
(__v16sf)(__m512)(A), (__mmask16)(B), \
- (int)(R)); })
+ (int)(R))
-#define _mm512_maskz_roundscale_round_ps(A, B, imm, R) __extension__ ({ \
+#define _mm512_maskz_roundscale_round_ps(A, B, imm, R) \
(__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(B), (int)(imm), \
(__v16sf)_mm512_setzero_ps(), \
- (__mmask16)(A), (int)(R)); })
+ (__mmask16)(A), (int)(R))
-#define _mm512_roundscale_round_ps(A, imm, R) __extension__ ({ \
+#define _mm512_roundscale_round_ps(A, imm, R) \
(__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(A), (int)(imm), \
(__v16sf)_mm512_undefined_ps(), \
- (__mmask16)-1, (int)(R)); })
+ (__mmask16)-1, (int)(R))
-#define _mm512_roundscale_pd(A, B) __extension__ ({ \
+#define _mm512_roundscale_pd(A, B) \
(__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(A), (int)(B), \
- (__v8df)(__m512d)(A), (__mmask8)-1, \
- _MM_FROUND_CUR_DIRECTION); })
+ (__v8df)_mm512_undefined_pd(), \
+ (__mmask8)-1, \
+ _MM_FROUND_CUR_DIRECTION)
-#define _mm512_mask_roundscale_pd(A, B, C, imm) __extension__ ({\
+#define _mm512_mask_roundscale_pd(A, B, C, imm) \
(__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(C), (int)(imm), \
(__v8df)(__m512d)(A), (__mmask8)(B), \
- _MM_FROUND_CUR_DIRECTION); })
+ _MM_FROUND_CUR_DIRECTION)
-#define _mm512_maskz_roundscale_pd(A, B, imm) __extension__ ({\
+#define _mm512_maskz_roundscale_pd(A, B, imm) \
(__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(B), (int)(imm), \
(__v8df)_mm512_setzero_pd(), \
(__mmask8)(A), \
- _MM_FROUND_CUR_DIRECTION); })
+ _MM_FROUND_CUR_DIRECTION)
-#define _mm512_mask_roundscale_round_pd(A, B, C, imm, R) __extension__ ({ \
+#define _mm512_mask_roundscale_round_pd(A, B, C, imm, R) \
(__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(C), (int)(imm), \
(__v8df)(__m512d)(A), (__mmask8)(B), \
- (int)(R)); })
+ (int)(R))
-#define _mm512_maskz_roundscale_round_pd(A, B, imm, R) __extension__ ({ \
+#define _mm512_maskz_roundscale_round_pd(A, B, imm, R) \
(__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(B), (int)(imm), \
(__v8df)_mm512_setzero_pd(), \
- (__mmask8)(A), (int)(R)); })
+ (__mmask8)(A), (int)(R))
-#define _mm512_roundscale_round_pd(A, imm, R) __extension__ ({ \
+#define _mm512_roundscale_round_pd(A, imm, R) \
(__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(A), (int)(imm), \
(__v8df)_mm512_undefined_pd(), \
- (__mmask8)-1, (int)(R)); })
+ (__mmask8)-1, (int)(R))
-#define _mm512_fmadd_round_pd(A, B, C, R) __extension__ ({ \
+#define _mm512_fmadd_round_pd(A, B, C, R) \
(__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
(__v8df)(__m512d)(B), \
- (__v8df)(__m512d)(C), (__mmask8)-1, \
- (int)(R)); })
+ (__v8df)(__m512d)(C), \
+ (__mmask8)-1, (int)(R))
-#define _mm512_mask_fmadd_round_pd(A, U, B, C, R) __extension__ ({ \
+#define _mm512_mask_fmadd_round_pd(A, U, B, C, R) \
(__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
(__v8df)(__m512d)(B), \
(__v8df)(__m512d)(C), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-#define _mm512_mask3_fmadd_round_pd(A, B, C, U, R) __extension__ ({ \
+#define _mm512_mask3_fmadd_round_pd(A, B, C, U, R) \
(__m512d)__builtin_ia32_vfmaddpd512_mask3((__v8df)(__m512d)(A), \
(__v8df)(__m512d)(B), \
(__v8df)(__m512d)(C), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-#define _mm512_maskz_fmadd_round_pd(U, A, B, C, R) __extension__ ({ \
+#define _mm512_maskz_fmadd_round_pd(U, A, B, C, R) \
(__m512d)__builtin_ia32_vfmaddpd512_maskz((__v8df)(__m512d)(A), \
(__v8df)(__m512d)(B), \
(__v8df)(__m512d)(C), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-#define _mm512_fmsub_round_pd(A, B, C, R) __extension__ ({ \
+#define _mm512_fmsub_round_pd(A, B, C, R) \
(__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
(__v8df)(__m512d)(B), \
-(__v8df)(__m512d)(C), \
- (__mmask8)-1, (int)(R)); })
+ (__mmask8)-1, (int)(R))
-#define _mm512_mask_fmsub_round_pd(A, U, B, C, R) __extension__ ({ \
+#define _mm512_mask_fmsub_round_pd(A, U, B, C, R) \
(__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
(__v8df)(__m512d)(B), \
-(__v8df)(__m512d)(C), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-#define _mm512_maskz_fmsub_round_pd(U, A, B, C, R) __extension__ ({ \
+#define _mm512_maskz_fmsub_round_pd(U, A, B, C, R) \
(__m512d)__builtin_ia32_vfmaddpd512_maskz((__v8df)(__m512d)(A), \
(__v8df)(__m512d)(B), \
-(__v8df)(__m512d)(C), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-#define _mm512_fnmadd_round_pd(A, B, C, R) __extension__ ({ \
+#define _mm512_fnmadd_round_pd(A, B, C, R) \
(__m512d)__builtin_ia32_vfmaddpd512_mask(-(__v8df)(__m512d)(A), \
(__v8df)(__m512d)(B), \
- (__v8df)(__m512d)(C), (__mmask8)-1, \
- (int)(R)); })
+ (__v8df)(__m512d)(C), \
+ (__mmask8)-1, (int)(R))
-#define _mm512_mask3_fnmadd_round_pd(A, B, C, U, R) __extension__ ({ \
+#define _mm512_mask3_fnmadd_round_pd(A, B, C, U, R) \
(__m512d)__builtin_ia32_vfmaddpd512_mask3(-(__v8df)(__m512d)(A), \
(__v8df)(__m512d)(B), \
(__v8df)(__m512d)(C), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-#define _mm512_maskz_fnmadd_round_pd(U, A, B, C, R) __extension__ ({ \
+#define _mm512_maskz_fnmadd_round_pd(U, A, B, C, R) \
(__m512d)__builtin_ia32_vfmaddpd512_maskz(-(__v8df)(__m512d)(A), \
(__v8df)(__m512d)(B), \
(__v8df)(__m512d)(C), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-#define _mm512_fnmsub_round_pd(A, B, C, R) __extension__ ({ \
+#define _mm512_fnmsub_round_pd(A, B, C, R) \
(__m512d)__builtin_ia32_vfmaddpd512_mask(-(__v8df)(__m512d)(A), \
(__v8df)(__m512d)(B), \
-(__v8df)(__m512d)(C), \
- (__mmask8)-1, (int)(R)); })
+ (__mmask8)-1, (int)(R))
-#define _mm512_maskz_fnmsub_round_pd(U, A, B, C, R) __extension__ ({ \
+#define _mm512_maskz_fnmsub_round_pd(U, A, B, C, R) \
(__m512d)__builtin_ia32_vfmaddpd512_maskz(-(__v8df)(__m512d)(A), \
(__v8df)(__m512d)(B), \
-(__v8df)(__m512d)(C), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_fmadd_pd(__m512d __A, __m512d __B, __m512d __C)
{
return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A,
@@ -2716,7 +2527,7 @@ _mm512_fmadd_pd(__m512d __A, __m512d __B, __m512d __C)
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_mask_fmadd_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C)
{
return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A,
@@ -2726,7 +2537,7 @@ _mm512_mask_fmadd_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C)
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_mask3_fmadd_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U)
{
return (__m512d) __builtin_ia32_vfmaddpd512_mask3 ((__v8df) __A,
@@ -2736,7 +2547,7 @@ _mm512_mask3_fmadd_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U)
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_maskz_fmadd_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C)
{
return (__m512d) __builtin_ia32_vfmaddpd512_maskz ((__v8df) __A,
@@ -2746,7 +2557,7 @@ _mm512_maskz_fmadd_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C)
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_fmsub_pd(__m512d __A, __m512d __B, __m512d __C)
{
return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A,
@@ -2756,7 +2567,7 @@ _mm512_fmsub_pd(__m512d __A, __m512d __B, __m512d __C)
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_mask_fmsub_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C)
{
return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A,
@@ -2766,7 +2577,7 @@ _mm512_mask_fmsub_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C)
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_maskz_fmsub_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C)
{
return (__m512d) __builtin_ia32_vfmaddpd512_maskz ((__v8df) __A,
@@ -2776,17 +2587,17 @@ _mm512_maskz_fmsub_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C)
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_fnmadd_pd(__m512d __A, __m512d __B, __m512d __C)
{
- return (__m512d) __builtin_ia32_vfmaddpd512_mask (-(__v8df) __A,
- (__v8df) __B,
+ return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A,
+ -(__v8df) __B,
(__v8df) __C,
(__mmask8) -1,
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_mask3_fnmadd_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U)
{
return (__m512d) __builtin_ia32_vfmaddpd512_mask3 (-(__v8df) __A,
@@ -2796,7 +2607,7 @@ _mm512_mask3_fnmadd_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U)
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_maskz_fnmadd_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C)
{
return (__m512d) __builtin_ia32_vfmaddpd512_maskz (-(__v8df) __A,
@@ -2806,17 +2617,17 @@ _mm512_maskz_fnmadd_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C)
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_fnmsub_pd(__m512d __A, __m512d __B, __m512d __C)
{
- return (__m512d) __builtin_ia32_vfmaddpd512_mask (-(__v8df) __A,
- (__v8df) __B,
+ return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A,
+ -(__v8df) __B,
-(__v8df) __C,
(__mmask8) -1,
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_maskz_fnmsub_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C)
{
return (__m512d) __builtin_ia32_vfmaddpd512_maskz (-(__v8df) __A,
@@ -2826,91 +2637,91 @@ _mm512_maskz_fnmsub_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C)
_MM_FROUND_CUR_DIRECTION);
}
-#define _mm512_fmadd_round_ps(A, B, C, R) __extension__ ({ \
+#define _mm512_fmadd_round_ps(A, B, C, R) \
(__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
(__v16sf)(__m512)(B), \
- (__v16sf)(__m512)(C), (__mmask16)-1, \
- (int)(R)); })
+ (__v16sf)(__m512)(C), \
+ (__mmask16)-1, (int)(R))
-#define _mm512_mask_fmadd_round_ps(A, U, B, C, R) __extension__ ({ \
+#define _mm512_mask_fmadd_round_ps(A, U, B, C, R) \
(__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
(__v16sf)(__m512)(B), \
(__v16sf)(__m512)(C), \
- (__mmask16)(U), (int)(R)); })
+ (__mmask16)(U), (int)(R))
-#define _mm512_mask3_fmadd_round_ps(A, B, C, U, R) __extension__ ({ \
+#define _mm512_mask3_fmadd_round_ps(A, B, C, U, R) \
(__m512)__builtin_ia32_vfmaddps512_mask3((__v16sf)(__m512)(A), \
(__v16sf)(__m512)(B), \
(__v16sf)(__m512)(C), \
- (__mmask16)(U), (int)(R)); })
+ (__mmask16)(U), (int)(R))
-#define _mm512_maskz_fmadd_round_ps(U, A, B, C, R) __extension__ ({ \
+#define _mm512_maskz_fmadd_round_ps(U, A, B, C, R) \
(__m512)__builtin_ia32_vfmaddps512_maskz((__v16sf)(__m512)(A), \
(__v16sf)(__m512)(B), \
(__v16sf)(__m512)(C), \
- (__mmask16)(U), (int)(R)); })
+ (__mmask16)(U), (int)(R))
-#define _mm512_fmsub_round_ps(A, B, C, R) __extension__ ({ \
+#define _mm512_fmsub_round_ps(A, B, C, R) \
(__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
(__v16sf)(__m512)(B), \
-(__v16sf)(__m512)(C), \
- (__mmask16)-1, (int)(R)); })
+ (__mmask16)-1, (int)(R))
-#define _mm512_mask_fmsub_round_ps(A, U, B, C, R) __extension__ ({ \
+#define _mm512_mask_fmsub_round_ps(A, U, B, C, R) \
(__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
(__v16sf)(__m512)(B), \
-(__v16sf)(__m512)(C), \
- (__mmask16)(U), (int)(R)); })
+ (__mmask16)(U), (int)(R))
-#define _mm512_maskz_fmsub_round_ps(U, A, B, C, R) __extension__ ({ \
+#define _mm512_maskz_fmsub_round_ps(U, A, B, C, R) \
(__m512)__builtin_ia32_vfmaddps512_maskz((__v16sf)(__m512)(A), \
(__v16sf)(__m512)(B), \
-(__v16sf)(__m512)(C), \
- (__mmask16)(U), (int)(R)); })
+ (__mmask16)(U), (int)(R))
-#define _mm512_fnmadd_round_ps(A, B, C, R) __extension__ ({ \
- (__m512)__builtin_ia32_vfmaddps512_mask(-(__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16sf)(__m512)(C), (__mmask16)-1, \
- (int)(R)); })
+#define _mm512_fnmadd_round_ps(A, B, C, R) \
+ (__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
+ -(__v16sf)(__m512)(B), \
+ (__v16sf)(__m512)(C), \
+ (__mmask16)-1, (int)(R))
-#define _mm512_mask3_fnmadd_round_ps(A, B, C, U, R) __extension__ ({ \
+#define _mm512_mask3_fnmadd_round_ps(A, B, C, U, R) \
(__m512)__builtin_ia32_vfmaddps512_mask3(-(__v16sf)(__m512)(A), \
(__v16sf)(__m512)(B), \
(__v16sf)(__m512)(C), \
- (__mmask16)(U), (int)(R)); })
+ (__mmask16)(U), (int)(R))
-#define _mm512_maskz_fnmadd_round_ps(U, A, B, C, R) __extension__ ({ \
+#define _mm512_maskz_fnmadd_round_ps(U, A, B, C, R) \
(__m512)__builtin_ia32_vfmaddps512_maskz(-(__v16sf)(__m512)(A), \
(__v16sf)(__m512)(B), \
(__v16sf)(__m512)(C), \
- (__mmask16)(U), (int)(R)); })
+ (__mmask16)(U), (int)(R))
-#define _mm512_fnmsub_round_ps(A, B, C, R) __extension__ ({ \
- (__m512)__builtin_ia32_vfmaddps512_mask(-(__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
+#define _mm512_fnmsub_round_ps(A, B, C, R) \
+ (__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
+ -(__v16sf)(__m512)(B), \
-(__v16sf)(__m512)(C), \
- (__mmask16)-1, (int)(R)); })
+ (__mmask16)-1, (int)(R))
-#define _mm512_maskz_fnmsub_round_ps(U, A, B, C, R) __extension__ ({ \
+#define _mm512_maskz_fnmsub_round_ps(U, A, B, C, R) \
(__m512)__builtin_ia32_vfmaddps512_maskz(-(__v16sf)(__m512)(A), \
(__v16sf)(__m512)(B), \
-(__v16sf)(__m512)(C), \
- (__mmask16)(U), (int)(R)); })
+ (__mmask16)(U), (int)(R))
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_fmadd_ps(__m512 __A, __m512 __B, __m512 __C)
{
return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A,
@@ -2920,7 +2731,7 @@ _mm512_fmadd_ps(__m512 __A, __m512 __B, __m512 __C)
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_mask_fmadd_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C)
{
return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A,
@@ -2930,7 +2741,7 @@ _mm512_mask_fmadd_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C)
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_mask3_fmadd_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U)
{
return (__m512) __builtin_ia32_vfmaddps512_mask3 ((__v16sf) __A,
@@ -2940,7 +2751,7 @@ _mm512_mask3_fmadd_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U)
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_maskz_fmadd_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C)
{
return (__m512) __builtin_ia32_vfmaddps512_maskz ((__v16sf) __A,
@@ -2950,7 +2761,7 @@ _mm512_maskz_fmadd_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C)
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_fmsub_ps(__m512 __A, __m512 __B, __m512 __C)
{
return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A,
@@ -2960,7 +2771,7 @@ _mm512_fmsub_ps(__m512 __A, __m512 __B, __m512 __C)
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_mask_fmsub_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C)
{
return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A,
@@ -2970,7 +2781,7 @@ _mm512_mask_fmsub_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C)
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_maskz_fmsub_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C)
{
return (__m512) __builtin_ia32_vfmaddps512_maskz ((__v16sf) __A,
@@ -2980,17 +2791,17 @@ _mm512_maskz_fmsub_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C)
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_fnmadd_ps(__m512 __A, __m512 __B, __m512 __C)
{
- return (__m512) __builtin_ia32_vfmaddps512_mask (-(__v16sf) __A,
- (__v16sf) __B,
+ return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A,
+ -(__v16sf) __B,
(__v16sf) __C,
(__mmask16) -1,
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_mask3_fnmadd_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U)
{
return (__m512) __builtin_ia32_vfmaddps512_mask3 (-(__v16sf) __A,
@@ -3000,7 +2811,7 @@ _mm512_mask3_fnmadd_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U)
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_maskz_fnmadd_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C)
{
return (__m512) __builtin_ia32_vfmaddps512_maskz (-(__v16sf) __A,
@@ -3010,17 +2821,17 @@ _mm512_maskz_fnmadd_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C)
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_fnmsub_ps(__m512 __A, __m512 __B, __m512 __C)
{
- return (__m512) __builtin_ia32_vfmaddps512_mask (-(__v16sf) __A,
- (__v16sf) __B,
+ return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A,
+ -(__v16sf) __B,
-(__v16sf) __C,
(__mmask16) -1,
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_maskz_fnmsub_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C)
{
return (__m512) __builtin_ia32_vfmaddps512_maskz (-(__v16sf) __A,
@@ -3030,96 +2841,96 @@ _mm512_maskz_fnmsub_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C)
_MM_FROUND_CUR_DIRECTION);
}
-#define _mm512_fmaddsub_round_pd(A, B, C, R) __extension__ ({ \
+#define _mm512_fmaddsub_round_pd(A, B, C, R) \
(__m512d)__builtin_ia32_vfmaddsubpd512_mask((__v8df)(__m512d)(A), \
(__v8df)(__m512d)(B), \
(__v8df)(__m512d)(C), \
- (__mmask8)-1, (int)(R)); })
+ (__mmask8)-1, (int)(R))
-#define _mm512_mask_fmaddsub_round_pd(A, U, B, C, R) __extension__ ({ \
+#define _mm512_mask_fmaddsub_round_pd(A, U, B, C, R) \
(__m512d)__builtin_ia32_vfmaddsubpd512_mask((__v8df)(__m512d)(A), \
(__v8df)(__m512d)(B), \
(__v8df)(__m512d)(C), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-#define _mm512_mask3_fmaddsub_round_pd(A, B, C, U, R) __extension__ ({ \
+#define _mm512_mask3_fmaddsub_round_pd(A, B, C, U, R) \
(__m512d)__builtin_ia32_vfmaddsubpd512_mask3((__v8df)(__m512d)(A), \
(__v8df)(__m512d)(B), \
(__v8df)(__m512d)(C), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-#define _mm512_maskz_fmaddsub_round_pd(U, A, B, C, R) __extension__ ({ \
+#define _mm512_maskz_fmaddsub_round_pd(U, A, B, C, R) \
(__m512d)__builtin_ia32_vfmaddsubpd512_maskz((__v8df)(__m512d)(A), \
(__v8df)(__m512d)(B), \
(__v8df)(__m512d)(C), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-#define _mm512_fmsubadd_round_pd(A, B, C, R) __extension__ ({ \
+#define _mm512_fmsubadd_round_pd(A, B, C, R) \
(__m512d)__builtin_ia32_vfmaddsubpd512_mask((__v8df)(__m512d)(A), \
(__v8df)(__m512d)(B), \
-(__v8df)(__m512d)(C), \
- (__mmask8)-1, (int)(R)); })
+ (__mmask8)-1, (int)(R))
-#define _mm512_mask_fmsubadd_round_pd(A, U, B, C, R) __extension__ ({ \
+#define _mm512_mask_fmsubadd_round_pd(A, U, B, C, R) \
(__m512d)__builtin_ia32_vfmaddsubpd512_mask((__v8df)(__m512d)(A), \
(__v8df)(__m512d)(B), \
-(__v8df)(__m512d)(C), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-#define _mm512_maskz_fmsubadd_round_pd(U, A, B, C, R) __extension__ ({ \
+#define _mm512_maskz_fmsubadd_round_pd(U, A, B, C, R) \
(__m512d)__builtin_ia32_vfmaddsubpd512_maskz((__v8df)(__m512d)(A), \
(__v8df)(__m512d)(B), \
-(__v8df)(__m512d)(C), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_fmaddsub_pd(__m512d __A, __m512d __B, __m512d __C)
{
return (__m512d) __builtin_ia32_vfmaddsubpd512_mask ((__v8df) __A,
- (__v8df) __B,
- (__v8df) __C,
- (__mmask8) -1,
- _MM_FROUND_CUR_DIRECTION);
+ (__v8df) __B,
+ (__v8df) __C,
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_mask_fmaddsub_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C)
{
return (__m512d) __builtin_ia32_vfmaddsubpd512_mask ((__v8df) __A,
- (__v8df) __B,
- (__v8df) __C,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
+ (__v8df) __B,
+ (__v8df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_mask3_fmaddsub_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U)
{
return (__m512d) __builtin_ia32_vfmaddsubpd512_mask3 ((__v8df) __A,
- (__v8df) __B,
- (__v8df) __C,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
+ (__v8df) __B,
+ (__v8df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_maskz_fmaddsub_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C)
{
return (__m512d) __builtin_ia32_vfmaddsubpd512_maskz ((__v8df) __A,
- (__v8df) __B,
- (__v8df) __C,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
+ (__v8df) __B,
+ (__v8df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_fmsubadd_pd(__m512d __A, __m512d __B, __m512d __C)
{
return (__m512d) __builtin_ia32_vfmaddsubpd512_mask ((__v8df) __A,
@@ -3129,7 +2940,7 @@ _mm512_fmsubadd_pd(__m512d __A, __m512d __B, __m512d __C)
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_mask_fmsubadd_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C)
{
return (__m512d) __builtin_ia32_vfmaddsubpd512_mask ((__v8df) __A,
@@ -3139,7 +2950,7 @@ _mm512_mask_fmsubadd_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C)
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_maskz_fmsubadd_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C)
{
return (__m512d) __builtin_ia32_vfmaddsubpd512_maskz ((__v8df) __A,
@@ -3149,56 +2960,56 @@ _mm512_maskz_fmsubadd_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C)
_MM_FROUND_CUR_DIRECTION);
}
-#define _mm512_fmaddsub_round_ps(A, B, C, R) __extension__ ({ \
+#define _mm512_fmaddsub_round_ps(A, B, C, R) \
(__m512)__builtin_ia32_vfmaddsubps512_mask((__v16sf)(__m512)(A), \
(__v16sf)(__m512)(B), \
(__v16sf)(__m512)(C), \
- (__mmask16)-1, (int)(R)); })
+ (__mmask16)-1, (int)(R))
-#define _mm512_mask_fmaddsub_round_ps(A, U, B, C, R) __extension__ ({ \
+#define _mm512_mask_fmaddsub_round_ps(A, U, B, C, R) \
(__m512)__builtin_ia32_vfmaddsubps512_mask((__v16sf)(__m512)(A), \
(__v16sf)(__m512)(B), \
(__v16sf)(__m512)(C), \
- (__mmask16)(U), (int)(R)); })
+ (__mmask16)(U), (int)(R))
-#define _mm512_mask3_fmaddsub_round_ps(A, B, C, U, R) __extension__ ({ \
+#define _mm512_mask3_fmaddsub_round_ps(A, B, C, U, R) \
(__m512)__builtin_ia32_vfmaddsubps512_mask3((__v16sf)(__m512)(A), \
(__v16sf)(__m512)(B), \
(__v16sf)(__m512)(C), \
- (__mmask16)(U), (int)(R)); })
+ (__mmask16)(U), (int)(R))
-#define _mm512_maskz_fmaddsub_round_ps(U, A, B, C, R) __extension__ ({ \
+#define _mm512_maskz_fmaddsub_round_ps(U, A, B, C, R) \
(__m512)__builtin_ia32_vfmaddsubps512_maskz((__v16sf)(__m512)(A), \
(__v16sf)(__m512)(B), \
(__v16sf)(__m512)(C), \
- (__mmask16)(U), (int)(R)); })
+ (__mmask16)(U), (int)(R))
-#define _mm512_fmsubadd_round_ps(A, B, C, R) __extension__ ({ \
+#define _mm512_fmsubadd_round_ps(A, B, C, R) \
(__m512)__builtin_ia32_vfmaddsubps512_mask((__v16sf)(__m512)(A), \
(__v16sf)(__m512)(B), \
-(__v16sf)(__m512)(C), \
- (__mmask16)-1, (int)(R)); })
+ (__mmask16)-1, (int)(R))
-#define _mm512_mask_fmsubadd_round_ps(A, U, B, C, R) __extension__ ({ \
+#define _mm512_mask_fmsubadd_round_ps(A, U, B, C, R) \
(__m512)__builtin_ia32_vfmaddsubps512_mask((__v16sf)(__m512)(A), \
(__v16sf)(__m512)(B), \
-(__v16sf)(__m512)(C), \
- (__mmask16)(U), (int)(R)); })
+ (__mmask16)(U), (int)(R))
-#define _mm512_maskz_fmsubadd_round_ps(U, A, B, C, R) __extension__ ({ \
+#define _mm512_maskz_fmsubadd_round_ps(U, A, B, C, R) \
(__m512)__builtin_ia32_vfmaddsubps512_maskz((__v16sf)(__m512)(A), \
(__v16sf)(__m512)(B), \
-(__v16sf)(__m512)(C), \
- (__mmask16)(U), (int)(R)); })
+ (__mmask16)(U), (int)(R))
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_fmaddsub_ps(__m512 __A, __m512 __B, __m512 __C)
{
return (__m512) __builtin_ia32_vfmaddsubps512_mask ((__v16sf) __A,
@@ -3208,7 +3019,7 @@ _mm512_fmaddsub_ps(__m512 __A, __m512 __B, __m512 __C)
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_mask_fmaddsub_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C)
{
return (__m512) __builtin_ia32_vfmaddsubps512_mask ((__v16sf) __A,
@@ -3218,7 +3029,7 @@ _mm512_mask_fmaddsub_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C)
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_mask3_fmaddsub_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U)
{
return (__m512) __builtin_ia32_vfmaddsubps512_mask3 ((__v16sf) __A,
@@ -3228,7 +3039,7 @@ _mm512_mask3_fmaddsub_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U)
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_maskz_fmaddsub_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C)
{
return (__m512) __builtin_ia32_vfmaddsubps512_maskz ((__v16sf) __A,
@@ -3238,7 +3049,7 @@ _mm512_maskz_fmaddsub_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C)
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_fmsubadd_ps(__m512 __A, __m512 __B, __m512 __C)
{
return (__m512) __builtin_ia32_vfmaddsubps512_mask ((__v16sf) __A,
@@ -3248,7 +3059,7 @@ _mm512_fmsubadd_ps(__m512 __A, __m512 __B, __m512 __C)
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_mask_fmsubadd_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C)
{
return (__m512) __builtin_ia32_vfmaddsubps512_mask ((__v16sf) __A,
@@ -3258,7 +3069,7 @@ _mm512_mask_fmsubadd_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C)
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_maskz_fmsubadd_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C)
{
return (__m512) __builtin_ia32_vfmaddsubps512_maskz ((__v16sf) __A,
@@ -3268,337 +3079,309 @@ _mm512_maskz_fmsubadd_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C)
_MM_FROUND_CUR_DIRECTION);
}
-#define _mm512_mask3_fmsub_round_pd(A, B, C, U, R) __extension__ ({ \
+#define _mm512_mask3_fmsub_round_pd(A, B, C, U, R) \
(__m512d)__builtin_ia32_vfmsubpd512_mask3((__v8df)(__m512d)(A), \
(__v8df)(__m512d)(B), \
(__v8df)(__m512d)(C), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_mask3_fmsub_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U)
{
- return (__m512d) __builtin_ia32_vfmsubpd512_mask3 ((__v8df) __A,
- (__v8df) __B,
- (__v8df) __C,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
+ return (__m512d)__builtin_ia32_vfmsubpd512_mask3 ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
}
-#define _mm512_mask3_fmsub_round_ps(A, B, C, U, R) __extension__ ({ \
+#define _mm512_mask3_fmsub_round_ps(A, B, C, U, R) \
(__m512)__builtin_ia32_vfmsubps512_mask3((__v16sf)(__m512)(A), \
(__v16sf)(__m512)(B), \
(__v16sf)(__m512)(C), \
- (__mmask16)(U), (int)(R)); })
-
+ (__mmask16)(U), (int)(R))
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_mask3_fmsub_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U)
{
- return (__m512) __builtin_ia32_vfmsubps512_mask3 ((__v16sf) __A,
- (__v16sf) __B,
- (__v16sf) __C,
- (__mmask16) __U,
- _MM_FROUND_CUR_DIRECTION);
+ return (__m512)__builtin_ia32_vfmsubps512_mask3 ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __C,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
}
-#define _mm512_mask3_fmsubadd_round_pd(A, B, C, U, R) __extension__ ({ \
+#define _mm512_mask3_fmsubadd_round_pd(A, B, C, U, R) \
(__m512d)__builtin_ia32_vfmsubaddpd512_mask3((__v8df)(__m512d)(A), \
(__v8df)(__m512d)(B), \
(__v8df)(__m512d)(C), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_mask3_fmsubadd_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U)
{
- return (__m512d) __builtin_ia32_vfmsubaddpd512_mask3 ((__v8df) __A,
- (__v8df) __B,
- (__v8df) __C,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
+ return (__m512d)__builtin_ia32_vfmsubaddpd512_mask3 ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
}
-#define _mm512_mask3_fmsubadd_round_ps(A, B, C, U, R) __extension__ ({ \
+#define _mm512_mask3_fmsubadd_round_ps(A, B, C, U, R) \
(__m512)__builtin_ia32_vfmsubaddps512_mask3((__v16sf)(__m512)(A), \
(__v16sf)(__m512)(B), \
(__v16sf)(__m512)(C), \
- (__mmask16)(U), (int)(R)); })
+ (__mmask16)(U), (int)(R))
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_mask3_fmsubadd_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U)
{
- return (__m512) __builtin_ia32_vfmsubaddps512_mask3 ((__v16sf) __A,
- (__v16sf) __B,
- (__v16sf) __C,
- (__mmask16) __U,
- _MM_FROUND_CUR_DIRECTION);
+ return (__m512)__builtin_ia32_vfmsubaddps512_mask3 ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __C,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
}
-#define _mm512_mask_fnmadd_round_pd(A, U, B, C, R) __extension__ ({ \
- (__m512d)__builtin_ia32_vfnmaddpd512_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8df)(__m512d)(C), \
- (__mmask8)(U), (int)(R)); })
+#define _mm512_mask_fnmadd_round_pd(A, U, B, C, R) \
+ (__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
+ -(__v8df)(__m512d)(B), \
+ (__v8df)(__m512d)(C), \
+ (__mmask8)(U), (int)(R))
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_mask_fnmadd_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C)
{
- return (__m512d) __builtin_ia32_vfnmaddpd512_mask ((__v8df) __A,
- (__v8df) __B,
- (__v8df) __C,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
+ return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A,
+ -(__v8df) __B,
+ (__v8df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
}
-#define _mm512_mask_fnmadd_round_ps(A, U, B, C, R) __extension__ ({ \
- (__m512)__builtin_ia32_vfnmaddps512_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16sf)(__m512)(C), \
- (__mmask16)(U), (int)(R)); })
+#define _mm512_mask_fnmadd_round_ps(A, U, B, C, R) \
+ (__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
+ -(__v16sf)(__m512)(B), \
+ (__v16sf)(__m512)(C), \
+ (__mmask16)(U), (int)(R))
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_mask_fnmadd_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C)
{
- return (__m512) __builtin_ia32_vfnmaddps512_mask ((__v16sf) __A,
- (__v16sf) __B,
- (__v16sf) __C,
- (__mmask16) __U,
- _MM_FROUND_CUR_DIRECTION);
+ return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A,
+ -(__v16sf) __B,
+ (__v16sf) __C,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
}
-#define _mm512_mask_fnmsub_round_pd(A, U, B, C, R) __extension__ ({ \
- (__m512d)__builtin_ia32_vfnmsubpd512_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8df)(__m512d)(C), \
- (__mmask8)(U), (int)(R)); })
+#define _mm512_mask_fnmsub_round_pd(A, U, B, C, R) \
+ (__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
+ -(__v8df)(__m512d)(B), \
+ -(__v8df)(__m512d)(C), \
+ (__mmask8)(U), (int)(R))
-#define _mm512_mask3_fnmsub_round_pd(A, B, C, U, R) __extension__ ({ \
- (__m512d)__builtin_ia32_vfnmsubpd512_mask3((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8df)(__m512d)(C), \
- (__mmask8)(U), (int)(R)); })
+#define _mm512_mask3_fnmsub_round_pd(A, B, C, U, R) \
+ (__m512d)__builtin_ia32_vfmsubpd512_mask3(-(__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)(__m512d)(C), \
+ (__mmask8)(U), (int)(R))
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_mask_fnmsub_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C)
{
- return (__m512d) __builtin_ia32_vfnmsubpd512_mask ((__v8df) __A,
+ return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A,
+ -(__v8df) __B,
+ -(__v8df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
+_mm512_mask3_fnmsub_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U)
+{
+ return (__m512d) __builtin_ia32_vfmsubpd512_mask3 (-(__v8df) __A,
(__v8df) __B,
(__v8df) __C,
(__mmask8) __U,
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
-_mm512_mask3_fnmsub_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U)
-{
- return (__m512d) __builtin_ia32_vfnmsubpd512_mask3 ((__v8df) __A,
- (__v8df) __B,
- (__v8df) __C,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
-}
+#define _mm512_mask_fnmsub_round_ps(A, U, B, C, R) \
+ (__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
+ -(__v16sf)(__m512)(B), \
+ -(__v16sf)(__m512)(C), \
+ (__mmask16)(U), (int)(R))
+
-#define _mm512_mask_fnmsub_round_ps(A, U, B, C, R) __extension__ ({ \
- (__m512)__builtin_ia32_vfnmsubps512_mask((__v16sf)(__m512)(A), \
+#define _mm512_mask3_fnmsub_round_ps(A, B, C, U, R) \
+ (__m512)__builtin_ia32_vfmsubps512_mask3(-(__v16sf)(__m512)(A), \
(__v16sf)(__m512)(B), \
(__v16sf)(__m512)(C), \
- (__mmask16)(U), (int)(R)); })
+ (__mmask16)(U), (int)(R))
-#define _mm512_mask3_fnmsub_round_ps(A, B, C, U, R) __extension__ ({ \
- (__m512)__builtin_ia32_vfnmsubps512_mask3((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16sf)(__m512)(C), \
- (__mmask16)(U), (int)(R)); })
-
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_mask_fnmsub_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C)
{
- return (__m512) __builtin_ia32_vfnmsubps512_mask ((__v16sf) __A,
- (__v16sf) __B,
- (__v16sf) __C,
- (__mmask16) __U,
- _MM_FROUND_CUR_DIRECTION);
+ return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A,
+ -(__v16sf) __B,
+ -(__v16sf) __C,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_mask3_fnmsub_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U)
{
- return (__m512) __builtin_ia32_vfnmsubps512_mask3 ((__v16sf) __A,
- (__v16sf) __B,
- (__v16sf) __C,
- (__mmask16) __U,
- _MM_FROUND_CUR_DIRECTION);
+ return (__m512) __builtin_ia32_vfmsubps512_mask3 (-(__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __C,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
}
/* Vector permutations */
-static __inline __m512i __DEFAULT_FN_ATTRS
+static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_permutex2var_epi32(__m512i __A, __m512i __I, __m512i __B)
{
- return (__m512i) __builtin_ia32_vpermt2vard512_mask ((__v16si) __I
- /* idx */ ,
- (__v16si) __A,
- (__v16si) __B,
- (__mmask16) -1);
+ return (__m512i)__builtin_ia32_vpermi2vard512((__v16si)__A, (__v16si) __I,
+ (__v16si) __B);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
+_mm512_mask_permutex2var_epi32(__m512i __A, __mmask16 __U, __m512i __I,
+ __m512i __B)
+{
+ return (__m512i)__builtin_ia32_selectd_512(__U,
+ (__v16si)_mm512_permutex2var_epi32(__A, __I, __B),
+ (__v16si)__A);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_permutex2var_epi32 (__m512i __A, __mmask16 __U,
- __m512i __I, __m512i __B)
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
+_mm512_mask2_permutex2var_epi32(__m512i __A, __m512i __I, __mmask16 __U,
+ __m512i __B)
{
- return (__m512i) __builtin_ia32_vpermt2vard512_mask ((__v16si) __I
- /* idx */ ,
- (__v16si) __A,
- (__v16si) __B,
- (__mmask16) __U);
+ return (__m512i)__builtin_ia32_selectd_512(__U,
+ (__v16si)_mm512_permutex2var_epi32(__A, __I, __B),
+ (__v16si)__I);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_maskz_permutex2var_epi32 (__mmask16 __U, __m512i __A,
- __m512i __I, __m512i __B)
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
+_mm512_maskz_permutex2var_epi32(__mmask16 __U, __m512i __A, __m512i __I,
+ __m512i __B)
{
- return (__m512i) __builtin_ia32_vpermt2vard512_maskz ((__v16si) __I
- /* idx */ ,
- (__v16si) __A,
- (__v16si) __B,
- (__mmask16) __U);
+ return (__m512i)__builtin_ia32_selectd_512(__U,
+ (__v16si)_mm512_permutex2var_epi32(__A, __I, __B),
+ (__v16si)_mm512_setzero_si512());
}
-static __inline __m512i __DEFAULT_FN_ATTRS
+static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_permutex2var_epi64(__m512i __A, __m512i __I, __m512i __B)
{
- return (__m512i) __builtin_ia32_vpermt2varq512_mask ((__v8di) __I
- /* idx */ ,
- (__v8di) __A,
- (__v8di) __B,
- (__mmask8) -1);
+ return (__m512i)__builtin_ia32_vpermi2varq512((__v8di)__A, (__v8di) __I,
+ (__v8di) __B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_permutex2var_epi64 (__m512i __A, __mmask8 __U, __m512i __I,
- __m512i __B)
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
+_mm512_mask_permutex2var_epi64(__m512i __A, __mmask8 __U, __m512i __I,
+ __m512i __B)
{
- return (__m512i) __builtin_ia32_vpermt2varq512_mask ((__v8di) __I
- /* idx */ ,
- (__v8di) __A,
- (__v8di) __B,
- (__mmask8) __U);
+ return (__m512i)__builtin_ia32_selectq_512(__U,
+ (__v8di)_mm512_permutex2var_epi64(__A, __I, __B),
+ (__v8di)__A);
}
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
+_mm512_mask2_permutex2var_epi64(__m512i __A, __m512i __I, __mmask8 __U,
+ __m512i __B)
+{
+ return (__m512i)__builtin_ia32_selectq_512(__U,
+ (__v8di)_mm512_permutex2var_epi64(__A, __I, __B),
+ (__v8di)__I);
+}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_maskz_permutex2var_epi64 (__mmask8 __U, __m512i __A,
- __m512i __I, __m512i __B)
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
+_mm512_maskz_permutex2var_epi64(__mmask8 __U, __m512i __A, __m512i __I,
+ __m512i __B)
{
- return (__m512i) __builtin_ia32_vpermt2varq512_maskz ((__v8di) __I
- /* idx */ ,
- (__v8di) __A,
- (__v8di) __B,
- (__mmask8) __U);
+ return (__m512i)__builtin_ia32_selectq_512(__U,
+ (__v8di)_mm512_permutex2var_epi64(__A, __I, __B),
+ (__v8di)_mm512_setzero_si512());
}
-#define _mm512_alignr_epi64(A, B, I) __extension__ ({ \
- (__m512i)__builtin_shufflevector((__v8di)(__m512i)(B), \
- (__v8di)(__m512i)(A), \
- ((int)(I) & 0x7) + 0, \
- ((int)(I) & 0x7) + 1, \
- ((int)(I) & 0x7) + 2, \
- ((int)(I) & 0x7) + 3, \
- ((int)(I) & 0x7) + 4, \
- ((int)(I) & 0x7) + 5, \
- ((int)(I) & 0x7) + 6, \
- ((int)(I) & 0x7) + 7); })
+#define _mm512_alignr_epi64(A, B, I) \
+ (__m512i)__builtin_ia32_alignq512((__v8di)(__m512i)(A), \
+ (__v8di)(__m512i)(B), (int)(I))
-#define _mm512_mask_alignr_epi64(W, U, A, B, imm) __extension__({\
+#define _mm512_mask_alignr_epi64(W, U, A, B, imm) \
(__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
(__v8di)_mm512_alignr_epi64((A), (B), (imm)), \
- (__v8di)(__m512i)(W)); })
+ (__v8di)(__m512i)(W))
-#define _mm512_maskz_alignr_epi64(U, A, B, imm) __extension__({\
+#define _mm512_maskz_alignr_epi64(U, A, B, imm) \
(__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
(__v8di)_mm512_alignr_epi64((A), (B), (imm)), \
- (__v8di)_mm512_setzero_si512()); })
-
-#define _mm512_alignr_epi32(A, B, I) __extension__ ({ \
- (__m512i)__builtin_shufflevector((__v16si)(__m512i)(B), \
- (__v16si)(__m512i)(A), \
- ((int)(I) & 0xf) + 0, \
- ((int)(I) & 0xf) + 1, \
- ((int)(I) & 0xf) + 2, \
- ((int)(I) & 0xf) + 3, \
- ((int)(I) & 0xf) + 4, \
- ((int)(I) & 0xf) + 5, \
- ((int)(I) & 0xf) + 6, \
- ((int)(I) & 0xf) + 7, \
- ((int)(I) & 0xf) + 8, \
- ((int)(I) & 0xf) + 9, \
- ((int)(I) & 0xf) + 10, \
- ((int)(I) & 0xf) + 11, \
- ((int)(I) & 0xf) + 12, \
- ((int)(I) & 0xf) + 13, \
- ((int)(I) & 0xf) + 14, \
- ((int)(I) & 0xf) + 15); })
-
-#define _mm512_mask_alignr_epi32(W, U, A, B, imm) __extension__ ({\
+ (__v8di)_mm512_setzero_si512())
+
+#define _mm512_alignr_epi32(A, B, I) \
+ (__m512i)__builtin_ia32_alignd512((__v16si)(__m512i)(A), \
+ (__v16si)(__m512i)(B), (int)(I))
+
+#define _mm512_mask_alignr_epi32(W, U, A, B, imm) \
(__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
(__v16si)_mm512_alignr_epi32((A), (B), (imm)), \
- (__v16si)(__m512i)(W)); })
+ (__v16si)(__m512i)(W))
-#define _mm512_maskz_alignr_epi32(U, A, B, imm) __extension__({\
+#define _mm512_maskz_alignr_epi32(U, A, B, imm) \
(__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
(__v16si)_mm512_alignr_epi32((A), (B), (imm)), \
- (__v16si)_mm512_setzero_si512()); })
+ (__v16si)_mm512_setzero_si512())
/* Vector Extract */
-#define _mm512_extractf64x4_pd(A, I) __extension__ ({ \
- (__m256d)__builtin_shufflevector((__v8df)(__m512d)(A), \
- (__v8df)_mm512_undefined_pd(), \
- ((I) & 1) ? 4 : 0, \
- ((I) & 1) ? 5 : 1, \
- ((I) & 1) ? 6 : 2, \
- ((I) & 1) ? 7 : 3); })
-
-#define _mm512_mask_extractf64x4_pd(W, U, A, imm) __extension__ ({\
- (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
- (__v4df)_mm512_extractf64x4_pd((A), (imm)), \
- (__v4df)(W)); })
-
-#define _mm512_maskz_extractf64x4_pd(U, A, imm) __extension__ ({\
- (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
- (__v4df)_mm512_extractf64x4_pd((A), (imm)), \
- (__v4df)_mm256_setzero_pd()); })
-
-#define _mm512_extractf32x4_ps(A, I) __extension__ ({ \
- (__m128)__builtin_shufflevector((__v16sf)(__m512)(A), \
- (__v16sf)_mm512_undefined_ps(), \
- 0 + ((I) & 0x3) * 4, \
- 1 + ((I) & 0x3) * 4, \
- 2 + ((I) & 0x3) * 4, \
- 3 + ((I) & 0x3) * 4); })
-
-#define _mm512_mask_extractf32x4_ps(W, U, A, imm) __extension__ ({\
- (__m128)__builtin_ia32_selectps_128((__mmask8)(U), \
- (__v4sf)_mm512_extractf32x4_ps((A), (imm)), \
- (__v4sf)(W)); })
-
-#define _mm512_maskz_extractf32x4_ps(U, A, imm) __extension__ ({\
- (__m128)__builtin_ia32_selectps_128((__mmask8)(U), \
- (__v4sf)_mm512_extractf32x4_ps((A), (imm)), \
- (__v4sf)_mm_setzero_ps()); })
+#define _mm512_extractf64x4_pd(A, I) \
+ (__m256d)__builtin_ia32_extractf64x4_mask((__v8df)(__m512d)(A), (int)(I), \
+ (__v4df)_mm256_undefined_pd(), \
+ (__mmask8)-1)
+
+#define _mm512_mask_extractf64x4_pd(W, U, A, imm) \
+ (__m256d)__builtin_ia32_extractf64x4_mask((__v8df)(__m512d)(A), (int)(imm), \
+ (__v4df)(__m256d)(W), \
+ (__mmask8)(U))
+
+#define _mm512_maskz_extractf64x4_pd(U, A, imm) \
+ (__m256d)__builtin_ia32_extractf64x4_mask((__v8df)(__m512d)(A), (int)(imm), \
+ (__v4df)_mm256_setzero_pd(), \
+ (__mmask8)(U))
+
+#define _mm512_extractf32x4_ps(A, I) \
+ (__m128)__builtin_ia32_extractf32x4_mask((__v16sf)(__m512)(A), (int)(I), \
+ (__v4sf)_mm_undefined_ps(), \
+ (__mmask8)-1)
+
+#define _mm512_mask_extractf32x4_ps(W, U, A, imm) \
+ (__m128)__builtin_ia32_extractf32x4_mask((__v16sf)(__m512)(A), (int)(imm), \
+ (__v4sf)(__m128)(W), \
+ (__mmask8)(U))
+
+#define _mm512_maskz_extractf32x4_ps(U, A, imm) \
+ (__m128)__builtin_ia32_extractf32x4_mask((__v16sf)(__m512)(A), (int)(imm), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)(U))
/* Vector Blend */
-static __inline __m512d __DEFAULT_FN_ATTRS
+static __inline __m512d __DEFAULT_FN_ATTRS512
_mm512_mask_blend_pd(__mmask8 __U, __m512d __A, __m512d __W)
{
return (__m512d) __builtin_ia32_selectpd_512 ((__mmask8) __U,
@@ -3606,7 +3389,7 @@ _mm512_mask_blend_pd(__mmask8 __U, __m512d __A, __m512d __W)
(__v8df) __A);
}
-static __inline __m512 __DEFAULT_FN_ATTRS
+static __inline __m512 __DEFAULT_FN_ATTRS512
_mm512_mask_blend_ps(__mmask16 __U, __m512 __A, __m512 __W)
{
return (__m512) __builtin_ia32_selectps_512 ((__mmask16) __U,
@@ -3614,7 +3397,7 @@ _mm512_mask_blend_ps(__mmask16 __U, __m512 __A, __m512 __W)
(__v16sf) __A);
}
-static __inline __m512i __DEFAULT_FN_ATTRS
+static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_blend_epi64(__mmask8 __U, __m512i __A, __m512i __W)
{
return (__m512i) __builtin_ia32_selectq_512 ((__mmask8) __U,
@@ -3622,7 +3405,7 @@ _mm512_mask_blend_epi64(__mmask8 __U, __m512i __A, __m512i __W)
(__v8di) __A);
}
-static __inline __m512i __DEFAULT_FN_ATTRS
+static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_blend_epi32(__mmask16 __U, __m512i __A, __m512i __W)
{
return (__m512i) __builtin_ia32_selectd_512 ((__mmask16) __U,
@@ -3632,15 +3415,15 @@ _mm512_mask_blend_epi32(__mmask16 __U, __m512i __A, __m512i __W)
/* Compare */
-#define _mm512_cmp_round_ps_mask(A, B, P, R) __extension__ ({ \
+#define _mm512_cmp_round_ps_mask(A, B, P, R) \
(__mmask16)__builtin_ia32_cmpps512_mask((__v16sf)(__m512)(A), \
(__v16sf)(__m512)(B), (int)(P), \
- (__mmask16)-1, (int)(R)); })
+ (__mmask16)-1, (int)(R))
-#define _mm512_mask_cmp_round_ps_mask(U, A, B, P, R) __extension__ ({ \
+#define _mm512_mask_cmp_round_ps_mask(U, A, B, P, R) \
(__mmask16)__builtin_ia32_cmpps512_mask((__v16sf)(__m512)(A), \
(__v16sf)(__m512)(B), (int)(P), \
- (__mmask16)(U), (int)(R)); })
+ (__mmask16)(U), (int)(R))
#define _mm512_cmp_ps_mask(A, B, P) \
_mm512_cmp_round_ps_mask((A), (B), (P), _MM_FROUND_CUR_DIRECTION)
@@ -3687,15 +3470,15 @@ _mm512_mask_blend_epi32(__mmask16 __U, __m512i __A, __m512i __W)
#define _mm512_mask_cmpord_ps_mask(k, A, B) \
_mm512_mask_cmp_ps_mask((k), (A), (B), _CMP_ORD_Q)
-#define _mm512_cmp_round_pd_mask(A, B, P, R) __extension__ ({ \
+#define _mm512_cmp_round_pd_mask(A, B, P, R) \
(__mmask8)__builtin_ia32_cmppd512_mask((__v8df)(__m512d)(A), \
(__v8df)(__m512d)(B), (int)(P), \
- (__mmask8)-1, (int)(R)); })
+ (__mmask8)-1, (int)(R))
-#define _mm512_mask_cmp_round_pd_mask(U, A, B, P, R) __extension__ ({ \
+#define _mm512_mask_cmp_round_pd_mask(U, A, B, P, R) \
(__mmask8)__builtin_ia32_cmppd512_mask((__v8df)(__m512d)(A), \
(__v8df)(__m512d)(B), (int)(P), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
#define _mm512_cmp_pd_mask(A, B, P) \
_mm512_cmp_round_pd_mask((A), (B), (P), _MM_FROUND_CUR_DIRECTION)
@@ -3744,23 +3527,23 @@ _mm512_mask_blend_epi32(__mmask16 __U, __m512i __A, __m512i __W)
/* Conversion */
-#define _mm512_cvtt_roundps_epu32(A, R) __extension__ ({ \
+#define _mm512_cvtt_roundps_epu32(A, R) \
(__m512i)__builtin_ia32_cvttps2udq512_mask((__v16sf)(__m512)(A), \
(__v16si)_mm512_undefined_epi32(), \
- (__mmask16)-1, (int)(R)); })
+ (__mmask16)-1, (int)(R))
-#define _mm512_mask_cvtt_roundps_epu32(W, U, A, R) __extension__ ({ \
+#define _mm512_mask_cvtt_roundps_epu32(W, U, A, R) \
(__m512i)__builtin_ia32_cvttps2udq512_mask((__v16sf)(__m512)(A), \
(__v16si)(__m512i)(W), \
- (__mmask16)(U), (int)(R)); })
+ (__mmask16)(U), (int)(R))
-#define _mm512_maskz_cvtt_roundps_epu32(U, A, R) __extension__ ({ \
+#define _mm512_maskz_cvtt_roundps_epu32(U, A, R) \
(__m512i)__builtin_ia32_cvttps2udq512_mask((__v16sf)(__m512)(A), \
(__v16si)_mm512_setzero_si512(), \
- (__mmask16)(U), (int)(R)); })
+ (__mmask16)(U), (int)(R))
-static __inline __m512i __DEFAULT_FN_ATTRS
+static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_cvttps_epu32(__m512 __A)
{
return (__m512i) __builtin_ia32_cvttps2udq512_mask ((__v16sf) __A,
@@ -3770,7 +3553,7 @@ _mm512_cvttps_epu32(__m512 __A)
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_cvttps_epu32 (__m512i __W, __mmask16 __U, __m512 __A)
{
return (__m512i) __builtin_ia32_cvttps2udq512_mask ((__v16sf) __A,
@@ -3779,7 +3562,7 @@ _mm512_mask_cvttps_epu32 (__m512i __W, __mmask16 __U, __m512 __A)
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_cvttps_epu32 (__mmask16 __U, __m512 __A)
{
return (__m512i) __builtin_ia32_cvttps2udq512_mask ((__v16sf) __A,
@@ -3788,70 +3571,65 @@ _mm512_maskz_cvttps_epu32 (__mmask16 __U, __m512 __A)
_MM_FROUND_CUR_DIRECTION);
}
-#define _mm512_cvt_roundepi32_ps(A, R) __extension__ ({ \
+#define _mm512_cvt_roundepi32_ps(A, R) \
(__m512)__builtin_ia32_cvtdq2ps512_mask((__v16si)(__m512i)(A), \
(__v16sf)_mm512_setzero_ps(), \
- (__mmask16)-1, (int)(R)); })
+ (__mmask16)-1, (int)(R))
-#define _mm512_mask_cvt_roundepi32_ps(W, U, A, R) __extension__ ({ \
+#define _mm512_mask_cvt_roundepi32_ps(W, U, A, R) \
(__m512)__builtin_ia32_cvtdq2ps512_mask((__v16si)(__m512i)(A), \
(__v16sf)(__m512)(W), \
- (__mmask16)(U), (int)(R)); })
+ (__mmask16)(U), (int)(R))
-#define _mm512_maskz_cvt_roundepi32_ps(U, A, R) __extension__ ({ \
+#define _mm512_maskz_cvt_roundepi32_ps(U, A, R) \
(__m512)__builtin_ia32_cvtdq2ps512_mask((__v16si)(__m512i)(A), \
(__v16sf)_mm512_setzero_ps(), \
- (__mmask16)(U), (int)(R)); })
+ (__mmask16)(U), (int)(R))
-#define _mm512_cvt_roundepu32_ps(A, R) __extension__ ({ \
+#define _mm512_cvt_roundepu32_ps(A, R) \
(__m512)__builtin_ia32_cvtudq2ps512_mask((__v16si)(__m512i)(A), \
(__v16sf)_mm512_setzero_ps(), \
- (__mmask16)-1, (int)(R)); })
+ (__mmask16)-1, (int)(R))
-#define _mm512_mask_cvt_roundepu32_ps(W, U, A, R) __extension__ ({ \
+#define _mm512_mask_cvt_roundepu32_ps(W, U, A, R) \
(__m512)__builtin_ia32_cvtudq2ps512_mask((__v16si)(__m512i)(A), \
(__v16sf)(__m512)(W), \
- (__mmask16)(U), (int)(R)); })
+ (__mmask16)(U), (int)(R))
-#define _mm512_maskz_cvt_roundepu32_ps(U, A, R) __extension__ ({ \
+#define _mm512_maskz_cvt_roundepu32_ps(U, A, R) \
(__m512)__builtin_ia32_cvtudq2ps512_mask((__v16si)(__m512i)(A), \
(__v16sf)_mm512_setzero_ps(), \
- (__mmask16)(U), (int)(R)); })
+ (__mmask16)(U), (int)(R))
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_cvtepu32_ps (__m512i __A)
{
- return (__m512) __builtin_ia32_cvtudq2ps512_mask ((__v16si) __A,
- (__v16sf) _mm512_undefined_ps (),
- (__mmask16) -1,
- _MM_FROUND_CUR_DIRECTION);
+ return (__m512)__builtin_convertvector((__v16su)__A, __v16sf);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_mask_cvtepu32_ps (__m512 __W, __mmask16 __U, __m512i __A)
{
- return (__m512) __builtin_ia32_cvtudq2ps512_mask ((__v16si) __A,
- (__v16sf) __W,
- (__mmask16) __U,
- _MM_FROUND_CUR_DIRECTION);
+ return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
+ (__v16sf)_mm512_cvtepu32_ps(__A),
+ (__v16sf)__W);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_maskz_cvtepu32_ps (__mmask16 __U, __m512i __A)
{
- return (__m512) __builtin_ia32_cvtudq2ps512_mask ((__v16si) __A,
- (__v16sf) _mm512_setzero_ps (),
- (__mmask16) __U,
- _MM_FROUND_CUR_DIRECTION);
+ return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
+ (__v16sf)_mm512_cvtepu32_ps(__A),
+ (__v16sf)_mm512_setzero_ps());
}
-static __inline __m512d __DEFAULT_FN_ATTRS
+static __inline __m512d __DEFAULT_FN_ATTRS512
_mm512_cvtepi32_pd(__m256i __A)
{
return (__m512d)__builtin_convertvector((__v8si)__A, __v8df);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_mask_cvtepi32_pd (__m512d __W, __mmask8 __U, __m256i __A)
{
return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U,
@@ -3859,7 +3637,7 @@ _mm512_mask_cvtepi32_pd (__m512d __W, __mmask8 __U, __m256i __A)
(__v8df)__W);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_maskz_cvtepi32_pd (__mmask8 __U, __m256i __A)
{
return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U,
@@ -3867,52 +3645,47 @@ _mm512_maskz_cvtepi32_pd (__mmask8 __U, __m256i __A)
(__v8df)_mm512_setzero_pd());
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_cvtepi32lo_pd(__m512i __A)
{
return (__m512d) _mm512_cvtepi32_pd(_mm512_castsi512_si256(__A));
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_mask_cvtepi32lo_pd(__m512d __W, __mmask8 __U,__m512i __A)
{
return (__m512d) _mm512_mask_cvtepi32_pd(__W, __U, _mm512_castsi512_si256(__A));
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_cvtepi32_ps (__m512i __A)
{
- return (__m512) __builtin_ia32_cvtdq2ps512_mask ((__v16si) __A,
- (__v16sf) _mm512_undefined_ps (),
- (__mmask16) -1,
- _MM_FROUND_CUR_DIRECTION);
+ return (__m512)__builtin_convertvector((__v16si)__A, __v16sf);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_mask_cvtepi32_ps (__m512 __W, __mmask16 __U, __m512i __A)
{
- return (__m512) __builtin_ia32_cvtdq2ps512_mask ((__v16si) __A,
- (__v16sf) __W,
- (__mmask16) __U,
- _MM_FROUND_CUR_DIRECTION);
+ return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
+ (__v16sf)_mm512_cvtepi32_ps(__A),
+ (__v16sf)__W);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_maskz_cvtepi32_ps (__mmask16 __U, __m512i __A)
{
- return (__m512) __builtin_ia32_cvtdq2ps512_mask ((__v16si) __A,
- (__v16sf) _mm512_setzero_ps (),
- (__mmask16) __U,
- _MM_FROUND_CUR_DIRECTION);
+ return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
+ (__v16sf)_mm512_cvtepi32_ps(__A),
+ (__v16sf)_mm512_setzero_ps());
}
-static __inline __m512d __DEFAULT_FN_ATTRS
+static __inline __m512d __DEFAULT_FN_ATTRS512
_mm512_cvtepu32_pd(__m256i __A)
{
return (__m512d)__builtin_convertvector((__v8su)__A, __v8df);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_mask_cvtepu32_pd (__m512d __W, __mmask8 __U, __m256i __A)
{
return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U,
@@ -3920,7 +3693,7 @@ _mm512_mask_cvtepu32_pd (__m512d __W, __mmask8 __U, __m256i __A)
(__v8df)__W);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_maskz_cvtepu32_pd (__mmask8 __U, __m256i __A)
{
return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U,
@@ -3928,34 +3701,34 @@ _mm512_maskz_cvtepu32_pd (__mmask8 __U, __m256i __A)
(__v8df)_mm512_setzero_pd());
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_cvtepu32lo_pd(__m512i __A)
{
return (__m512d) _mm512_cvtepu32_pd(_mm512_castsi512_si256(__A));
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_mask_cvtepu32lo_pd(__m512d __W, __mmask8 __U,__m512i __A)
{
return (__m512d) _mm512_mask_cvtepu32_pd(__W, __U, _mm512_castsi512_si256(__A));
}
-#define _mm512_cvt_roundpd_ps(A, R) __extension__ ({ \
+#define _mm512_cvt_roundpd_ps(A, R) \
(__m256)__builtin_ia32_cvtpd2ps512_mask((__v8df)(__m512d)(A), \
(__v8sf)_mm256_setzero_ps(), \
- (__mmask8)-1, (int)(R)); })
+ (__mmask8)-1, (int)(R))
-#define _mm512_mask_cvt_roundpd_ps(W, U, A, R) __extension__ ({ \
+#define _mm512_mask_cvt_roundpd_ps(W, U, A, R) \
(__m256)__builtin_ia32_cvtpd2ps512_mask((__v8df)(__m512d)(A), \
(__v8sf)(__m256)(W), (__mmask8)(U), \
- (int)(R)); })
+ (int)(R))
-#define _mm512_maskz_cvt_roundpd_ps(U, A, R) __extension__ ({ \
+#define _mm512_maskz_cvt_roundpd_ps(U, A, R) \
(__m256)__builtin_ia32_cvtpd2ps512_mask((__v8df)(__m512d)(A), \
(__v8sf)_mm256_setzero_ps(), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS512
_mm512_cvtpd_ps (__m512d __A)
{
return (__m256) __builtin_ia32_cvtpd2ps512_mask ((__v8df) __A,
@@ -3964,7 +3737,7 @@ _mm512_cvtpd_ps (__m512d __A)
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS512
_mm512_mask_cvtpd_ps (__m256 __W, __mmask8 __U, __m512d __A)
{
return (__m256) __builtin_ia32_cvtpd2ps512_mask ((__v8df) __A,
@@ -3973,7 +3746,7 @@ _mm512_mask_cvtpd_ps (__m256 __W, __mmask8 __U, __m512d __A)
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS512
_mm512_maskz_cvtpd_ps (__mmask8 __U, __m512d __A)
{
return (__m256) __builtin_ia32_cvtpd2ps512_mask ((__v8df) __A,
@@ -3982,7 +3755,7 @@ _mm512_maskz_cvtpd_ps (__mmask8 __U, __m512d __A)
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_cvtpd_pslo (__m512d __A)
{
return (__m512) __builtin_shufflevector((__v8sf) _mm512_cvtpd_ps(__A),
@@ -3990,7 +3763,7 @@ _mm512_cvtpd_pslo (__m512d __A)
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_mask_cvtpd_pslo (__m512 __W, __mmask8 __U,__m512d __A)
{
return (__m512) __builtin_shufflevector (
@@ -4000,53 +3773,53 @@ _mm512_mask_cvtpd_pslo (__m512 __W, __mmask8 __U,__m512d __A)
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
}
-#define _mm512_cvt_roundps_ph(A, I) __extension__ ({ \
+#define _mm512_cvt_roundps_ph(A, I) \
(__m256i)__builtin_ia32_vcvtps2ph512_mask((__v16sf)(__m512)(A), (int)(I), \
(__v16hi)_mm256_undefined_si256(), \
- (__mmask16)-1); })
+ (__mmask16)-1)
-#define _mm512_mask_cvt_roundps_ph(U, W, A, I) __extension__ ({ \
+#define _mm512_mask_cvt_roundps_ph(U, W, A, I) \
(__m256i)__builtin_ia32_vcvtps2ph512_mask((__v16sf)(__m512)(A), (int)(I), \
(__v16hi)(__m256i)(U), \
- (__mmask16)(W)); })
+ (__mmask16)(W))
-#define _mm512_maskz_cvt_roundps_ph(W, A, I) __extension__ ({ \
+#define _mm512_maskz_cvt_roundps_ph(W, A, I) \
(__m256i)__builtin_ia32_vcvtps2ph512_mask((__v16sf)(__m512)(A), (int)(I), \
(__v16hi)_mm256_setzero_si256(), \
- (__mmask16)(W)); })
+ (__mmask16)(W))
-#define _mm512_cvtps_ph(A, I) __extension__ ({ \
+#define _mm512_cvtps_ph(A, I) \
(__m256i)__builtin_ia32_vcvtps2ph512_mask((__v16sf)(__m512)(A), (int)(I), \
(__v16hi)_mm256_setzero_si256(), \
- (__mmask16)-1); })
+ (__mmask16)-1)
-#define _mm512_mask_cvtps_ph(U, W, A, I) __extension__ ({ \
+#define _mm512_mask_cvtps_ph(U, W, A, I) \
(__m256i)__builtin_ia32_vcvtps2ph512_mask((__v16sf)(__m512)(A), (int)(I), \
(__v16hi)(__m256i)(U), \
- (__mmask16)(W)); })
+ (__mmask16)(W))
-#define _mm512_maskz_cvtps_ph(W, A, I) __extension__ ({\
+#define _mm512_maskz_cvtps_ph(W, A, I) \
(__m256i)__builtin_ia32_vcvtps2ph512_mask((__v16sf)(__m512)(A), (int)(I), \
(__v16hi)_mm256_setzero_si256(), \
- (__mmask16)(W)); })
+ (__mmask16)(W))
-#define _mm512_cvt_roundph_ps(A, R) __extension__ ({ \
+#define _mm512_cvt_roundph_ps(A, R) \
(__m512)__builtin_ia32_vcvtph2ps512_mask((__v16hi)(__m256i)(A), \
(__v16sf)_mm512_undefined_ps(), \
- (__mmask16)-1, (int)(R)); })
+ (__mmask16)-1, (int)(R))
-#define _mm512_mask_cvt_roundph_ps(W, U, A, R) __extension__ ({ \
+#define _mm512_mask_cvt_roundph_ps(W, U, A, R) \
(__m512)__builtin_ia32_vcvtph2ps512_mask((__v16hi)(__m256i)(A), \
(__v16sf)(__m512)(W), \
- (__mmask16)(U), (int)(R)); })
+ (__mmask16)(U), (int)(R))
-#define _mm512_maskz_cvt_roundph_ps(U, A, R) __extension__ ({ \
+#define _mm512_maskz_cvt_roundph_ps(U, A, R) \
(__m512)__builtin_ia32_vcvtph2ps512_mask((__v16hi)(__m256i)(A), \
(__v16sf)_mm512_setzero_ps(), \
- (__mmask16)(U), (int)(R)); })
+ (__mmask16)(U), (int)(R))
-static __inline __m512 __DEFAULT_FN_ATTRS
+static __inline __m512 __DEFAULT_FN_ATTRS512
_mm512_cvtph_ps(__m256i __A)
{
return (__m512) __builtin_ia32_vcvtph2ps512_mask ((__v16hi) __A,
@@ -4056,7 +3829,7 @@ _mm512_cvtph_ps(__m256i __A)
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_mask_cvtph_ps (__m512 __W, __mmask16 __U, __m256i __A)
{
return (__m512) __builtin_ia32_vcvtph2ps512_mask ((__v16hi) __A,
@@ -4065,7 +3838,7 @@ _mm512_mask_cvtph_ps (__m512 __W, __mmask16 __U, __m256i __A)
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_maskz_cvtph_ps (__mmask16 __U, __m256i __A)
{
return (__m512) __builtin_ia32_vcvtph2ps512_mask ((__v16hi) __A,
@@ -4074,22 +3847,22 @@ _mm512_maskz_cvtph_ps (__mmask16 __U, __m256i __A)
_MM_FROUND_CUR_DIRECTION);
}
-#define _mm512_cvtt_roundpd_epi32(A, R) __extension__ ({ \
+#define _mm512_cvtt_roundpd_epi32(A, R) \
(__m256i)__builtin_ia32_cvttpd2dq512_mask((__v8df)(__m512d)(A), \
(__v8si)_mm256_setzero_si256(), \
- (__mmask8)-1, (int)(R)); })
+ (__mmask8)-1, (int)(R))
-#define _mm512_mask_cvtt_roundpd_epi32(W, U, A, R) __extension__ ({ \
+#define _mm512_mask_cvtt_roundpd_epi32(W, U, A, R) \
(__m256i)__builtin_ia32_cvttpd2dq512_mask((__v8df)(__m512d)(A), \
(__v8si)(__m256i)(W), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-#define _mm512_maskz_cvtt_roundpd_epi32(U, A, R) __extension__ ({ \
+#define _mm512_maskz_cvtt_roundpd_epi32(U, A, R) \
(__m256i)__builtin_ia32_cvttpd2dq512_mask((__v8df)(__m512d)(A), \
(__v8si)_mm256_setzero_si256(), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-static __inline __m256i __DEFAULT_FN_ATTRS
+static __inline __m256i __DEFAULT_FN_ATTRS512
_mm512_cvttpd_epi32(__m512d __a)
{
return (__m256i)__builtin_ia32_cvttpd2dq512_mask((__v8df) __a,
@@ -4098,7 +3871,7 @@ _mm512_cvttpd_epi32(__m512d __a)
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS512
_mm512_mask_cvttpd_epi32 (__m256i __W, __mmask8 __U, __m512d __A)
{
return (__m256i) __builtin_ia32_cvttpd2dq512_mask ((__v8df) __A,
@@ -4107,7 +3880,7 @@ _mm512_mask_cvttpd_epi32 (__m256i __W, __mmask8 __U, __m512d __A)
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS512
_mm512_maskz_cvttpd_epi32 (__mmask8 __U, __m512d __A)
{
return (__m256i) __builtin_ia32_cvttpd2dq512_mask ((__v8df) __A,
@@ -4116,22 +3889,22 @@ _mm512_maskz_cvttpd_epi32 (__mmask8 __U, __m512d __A)
_MM_FROUND_CUR_DIRECTION);
}
-#define _mm512_cvtt_roundps_epi32(A, R) __extension__ ({ \
+#define _mm512_cvtt_roundps_epi32(A, R) \
(__m512i)__builtin_ia32_cvttps2dq512_mask((__v16sf)(__m512)(A), \
(__v16si)_mm512_setzero_si512(), \
- (__mmask16)-1, (int)(R)); })
+ (__mmask16)-1, (int)(R))
-#define _mm512_mask_cvtt_roundps_epi32(W, U, A, R) __extension__ ({ \
+#define _mm512_mask_cvtt_roundps_epi32(W, U, A, R) \
(__m512i)__builtin_ia32_cvttps2dq512_mask((__v16sf)(__m512)(A), \
(__v16si)(__m512i)(W), \
- (__mmask16)(U), (int)(R)); })
+ (__mmask16)(U), (int)(R))
-#define _mm512_maskz_cvtt_roundps_epi32(U, A, R) __extension__ ({ \
+#define _mm512_maskz_cvtt_roundps_epi32(U, A, R) \
(__m512i)__builtin_ia32_cvttps2dq512_mask((__v16sf)(__m512)(A), \
(__v16si)_mm512_setzero_si512(), \
- (__mmask16)(U), (int)(R)); })
+ (__mmask16)(U), (int)(R))
-static __inline __m512i __DEFAULT_FN_ATTRS
+static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_cvttps_epi32(__m512 __a)
{
return (__m512i)
@@ -4140,7 +3913,7 @@ _mm512_cvttps_epi32(__m512 __a)
(__mmask16) -1, _MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_cvttps_epi32 (__m512i __W, __mmask16 __U, __m512 __A)
{
return (__m512i) __builtin_ia32_cvttps2dq512_mask ((__v16sf) __A,
@@ -4149,7 +3922,7 @@ _mm512_mask_cvttps_epi32 (__m512i __W, __mmask16 __U, __m512 __A)
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_cvttps_epi32 (__mmask16 __U, __m512 __A)
{
return (__m512i) __builtin_ia32_cvttps2dq512_mask ((__v16sf) __A,
@@ -4158,22 +3931,22 @@ _mm512_maskz_cvttps_epi32 (__mmask16 __U, __m512 __A)
_MM_FROUND_CUR_DIRECTION);
}
-#define _mm512_cvt_roundps_epi32(A, R) __extension__ ({ \
+#define _mm512_cvt_roundps_epi32(A, R) \
(__m512i)__builtin_ia32_cvtps2dq512_mask((__v16sf)(__m512)(A), \
(__v16si)_mm512_setzero_si512(), \
- (__mmask16)-1, (int)(R)); })
+ (__mmask16)-1, (int)(R))
-#define _mm512_mask_cvt_roundps_epi32(W, U, A, R) __extension__ ({ \
+#define _mm512_mask_cvt_roundps_epi32(W, U, A, R) \
(__m512i)__builtin_ia32_cvtps2dq512_mask((__v16sf)(__m512)(A), \
(__v16si)(__m512i)(W), \
- (__mmask16)(U), (int)(R)); })
+ (__mmask16)(U), (int)(R))
-#define _mm512_maskz_cvt_roundps_epi32(U, A, R) __extension__ ({ \
+#define _mm512_maskz_cvt_roundps_epi32(U, A, R) \
(__m512i)__builtin_ia32_cvtps2dq512_mask((__v16sf)(__m512)(A), \
(__v16si)_mm512_setzero_si512(), \
- (__mmask16)(U), (int)(R)); })
+ (__mmask16)(U), (int)(R))
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_cvtps_epi32 (__m512 __A)
{
return (__m512i) __builtin_ia32_cvtps2dq512_mask ((__v16sf) __A,
@@ -4182,7 +3955,7 @@ _mm512_cvtps_epi32 (__m512 __A)
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_cvtps_epi32 (__m512i __W, __mmask16 __U, __m512 __A)
{
return (__m512i) __builtin_ia32_cvtps2dq512_mask ((__v16sf) __A,
@@ -4191,7 +3964,7 @@ _mm512_mask_cvtps_epi32 (__m512i __W, __mmask16 __U, __m512 __A)
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_cvtps_epi32 (__mmask16 __U, __m512 __A)
{
return (__m512i) __builtin_ia32_cvtps2dq512_mask ((__v16sf) __A,
@@ -4201,22 +3974,22 @@ _mm512_maskz_cvtps_epi32 (__mmask16 __U, __m512 __A)
_MM_FROUND_CUR_DIRECTION);
}
-#define _mm512_cvt_roundpd_epi32(A, R) __extension__ ({ \
+#define _mm512_cvt_roundpd_epi32(A, R) \
(__m256i)__builtin_ia32_cvtpd2dq512_mask((__v8df)(__m512d)(A), \
(__v8si)_mm256_setzero_si256(), \
- (__mmask8)-1, (int)(R)); })
+ (__mmask8)-1, (int)(R))
-#define _mm512_mask_cvt_roundpd_epi32(W, U, A, R) __extension__ ({ \
+#define _mm512_mask_cvt_roundpd_epi32(W, U, A, R) \
(__m256i)__builtin_ia32_cvtpd2dq512_mask((__v8df)(__m512d)(A), \
(__v8si)(__m256i)(W), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-#define _mm512_maskz_cvt_roundpd_epi32(U, A, R) __extension__ ({ \
+#define _mm512_maskz_cvt_roundpd_epi32(U, A, R) \
(__m256i)__builtin_ia32_cvtpd2dq512_mask((__v8df)(__m512d)(A), \
(__v8si)_mm256_setzero_si256(), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS512
_mm512_cvtpd_epi32 (__m512d __A)
{
return (__m256i) __builtin_ia32_cvtpd2dq512_mask ((__v8df) __A,
@@ -4226,7 +3999,7 @@ _mm512_cvtpd_epi32 (__m512d __A)
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS512
_mm512_mask_cvtpd_epi32 (__m256i __W, __mmask8 __U, __m512d __A)
{
return (__m256i) __builtin_ia32_cvtpd2dq512_mask ((__v8df) __A,
@@ -4235,7 +4008,7 @@ _mm512_mask_cvtpd_epi32 (__m256i __W, __mmask8 __U, __m512d __A)
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS512
_mm512_maskz_cvtpd_epi32 (__mmask8 __U, __m512d __A)
{
return (__m256i) __builtin_ia32_cvtpd2dq512_mask ((__v8df) __A,
@@ -4245,32 +4018,32 @@ _mm512_maskz_cvtpd_epi32 (__mmask8 __U, __m512d __A)
_MM_FROUND_CUR_DIRECTION);
}
-#define _mm512_cvt_roundps_epu32(A, R) __extension__ ({ \
+#define _mm512_cvt_roundps_epu32(A, R) \
(__m512i)__builtin_ia32_cvtps2udq512_mask((__v16sf)(__m512)(A), \
(__v16si)_mm512_setzero_si512(), \
- (__mmask16)-1, (int)(R)); })
+ (__mmask16)-1, (int)(R))
-#define _mm512_mask_cvt_roundps_epu32(W, U, A, R) __extension__ ({ \
+#define _mm512_mask_cvt_roundps_epu32(W, U, A, R) \
(__m512i)__builtin_ia32_cvtps2udq512_mask((__v16sf)(__m512)(A), \
(__v16si)(__m512i)(W), \
- (__mmask16)(U), (int)(R)); })
+ (__mmask16)(U), (int)(R))
-#define _mm512_maskz_cvt_roundps_epu32(U, A, R) __extension__ ({ \
+#define _mm512_maskz_cvt_roundps_epu32(U, A, R) \
(__m512i)__builtin_ia32_cvtps2udq512_mask((__v16sf)(__m512)(A), \
(__v16si)_mm512_setzero_si512(), \
- (__mmask16)(U), (int)(R)); })
+ (__mmask16)(U), (int)(R))
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_cvtps_epu32 ( __m512 __A)
{
return (__m512i) __builtin_ia32_cvtps2udq512_mask ((__v16sf) __A,\
(__v16si)\
- _mm512_undefined_epi32 (),\
+ _mm512_undefined_epi32 (),
(__mmask16) -1,\
- _MM_FROUND_CUR_DIRECTION);\
+ _MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_cvtps_epu32 (__m512i __W, __mmask16 __U, __m512 __A)
{
return (__m512i) __builtin_ia32_cvtps2udq512_mask ((__v16sf) __A,
@@ -4279,7 +4052,7 @@ _mm512_mask_cvtps_epu32 (__m512i __W, __mmask16 __U, __m512 __A)
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_cvtps_epu32 ( __mmask16 __U, __m512 __A)
{
return (__m512i) __builtin_ia32_cvtps2udq512_mask ((__v16sf) __A,
@@ -4289,22 +4062,22 @@ _mm512_maskz_cvtps_epu32 ( __mmask16 __U, __m512 __A)
_MM_FROUND_CUR_DIRECTION);
}
-#define _mm512_cvt_roundpd_epu32(A, R) __extension__ ({ \
+#define _mm512_cvt_roundpd_epu32(A, R) \
(__m256i)__builtin_ia32_cvtpd2udq512_mask((__v8df)(__m512d)(A), \
(__v8si)_mm256_setzero_si256(), \
- (__mmask8)-1, (int)(R)); })
+ (__mmask8)-1, (int)(R))
-#define _mm512_mask_cvt_roundpd_epu32(W, U, A, R) __extension__ ({ \
+#define _mm512_mask_cvt_roundpd_epu32(W, U, A, R) \
(__m256i)__builtin_ia32_cvtpd2udq512_mask((__v8df)(__m512d)(A), \
- (__v8si)(W), \
- (__mmask8)(U), (int)(R)); })
+ (__v8si)(__m256i)(W), \
+ (__mmask8)(U), (int)(R))
-#define _mm512_maskz_cvt_roundpd_epu32(U, A, R) __extension__ ({ \
+#define _mm512_maskz_cvt_roundpd_epu32(U, A, R) \
(__m256i)__builtin_ia32_cvtpd2udq512_mask((__v8df)(__m512d)(A), \
(__v8si)_mm256_setzero_si256(), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS512
_mm512_cvtpd_epu32 (__m512d __A)
{
return (__m256i) __builtin_ia32_cvtpd2udq512_mask ((__v8df) __A,
@@ -4314,7 +4087,7 @@ _mm512_cvtpd_epu32 (__m512d __A)
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS512
_mm512_mask_cvtpd_epu32 (__m256i __W, __mmask8 __U, __m512d __A)
{
return (__m256i) __builtin_ia32_cvtpd2udq512_mask ((__v8df) __A,
@@ -4323,7 +4096,7 @@ _mm512_mask_cvtpd_epu32 (__m256i __W, __mmask8 __U, __m512d __A)
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS512
_mm512_maskz_cvtpd_epu32 (__mmask8 __U, __m512d __A)
{
return (__m256i) __builtin_ia32_cvtpd2udq512_mask ((__v8df) __A,
@@ -4333,13 +4106,13 @@ _mm512_maskz_cvtpd_epu32 (__mmask8 __U, __m512d __A)
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ double __DEFAULT_FN_ATTRS
+static __inline__ double __DEFAULT_FN_ATTRS512
_mm512_cvtsd_f64(__m512d __a)
{
return __a[0];
}
-static __inline__ float __DEFAULT_FN_ATTRS
+static __inline__ float __DEFAULT_FN_ATTRS512
_mm512_cvtss_f32(__m512 __a)
{
return __a[0];
@@ -4347,14 +4120,14 @@ _mm512_cvtss_f32(__m512 __a)
/* Unpack and Interleave */
-static __inline __m512d __DEFAULT_FN_ATTRS
+static __inline __m512d __DEFAULT_FN_ATTRS512
_mm512_unpackhi_pd(__m512d __a, __m512d __b)
{
return (__m512d)__builtin_shufflevector((__v8df)__a, (__v8df)__b,
1, 9, 1+2, 9+2, 1+4, 9+4, 1+6, 9+6);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_mask_unpackhi_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B)
{
return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U,
@@ -4362,7 +4135,7 @@ _mm512_mask_unpackhi_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B)
(__v8df)__W);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_maskz_unpackhi_pd(__mmask8 __U, __m512d __A, __m512d __B)
{
return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U,
@@ -4370,14 +4143,14 @@ _mm512_maskz_unpackhi_pd(__mmask8 __U, __m512d __A, __m512d __B)
(__v8df)_mm512_setzero_pd());
}
-static __inline __m512d __DEFAULT_FN_ATTRS
+static __inline __m512d __DEFAULT_FN_ATTRS512
_mm512_unpacklo_pd(__m512d __a, __m512d __b)
{
return (__m512d)__builtin_shufflevector((__v8df)__a, (__v8df)__b,
0, 8, 0+2, 8+2, 0+4, 8+4, 0+6, 8+6);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_mask_unpacklo_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B)
{
return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U,
@@ -4385,7 +4158,7 @@ _mm512_mask_unpacklo_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B)
(__v8df)__W);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_maskz_unpacklo_pd (__mmask8 __U, __m512d __A, __m512d __B)
{
return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U,
@@ -4393,7 +4166,7 @@ _mm512_maskz_unpacklo_pd (__mmask8 __U, __m512d __A, __m512d __B)
(__v8df)_mm512_setzero_pd());
}
-static __inline __m512 __DEFAULT_FN_ATTRS
+static __inline __m512 __DEFAULT_FN_ATTRS512
_mm512_unpackhi_ps(__m512 __a, __m512 __b)
{
return (__m512)__builtin_shufflevector((__v16sf)__a, (__v16sf)__b,
@@ -4403,7 +4176,7 @@ _mm512_unpackhi_ps(__m512 __a, __m512 __b)
2+12, 18+12, 3+12, 19+12);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_mask_unpackhi_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B)
{
return (__m512)__builtin_ia32_selectps_512((__mmask16) __U,
@@ -4411,7 +4184,7 @@ _mm512_mask_unpackhi_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B)
(__v16sf)__W);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_maskz_unpackhi_ps (__mmask16 __U, __m512 __A, __m512 __B)
{
return (__m512)__builtin_ia32_selectps_512((__mmask16) __U,
@@ -4419,7 +4192,7 @@ _mm512_maskz_unpackhi_ps (__mmask16 __U, __m512 __A, __m512 __B)
(__v16sf)_mm512_setzero_ps());
}
-static __inline __m512 __DEFAULT_FN_ATTRS
+static __inline __m512 __DEFAULT_FN_ATTRS512
_mm512_unpacklo_ps(__m512 __a, __m512 __b)
{
return (__m512)__builtin_shufflevector((__v16sf)__a, (__v16sf)__b,
@@ -4429,7 +4202,7 @@ _mm512_unpacklo_ps(__m512 __a, __m512 __b)
0+12, 16+12, 1+12, 17+12);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_mask_unpacklo_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B)
{
return (__m512)__builtin_ia32_selectps_512((__mmask16) __U,
@@ -4437,7 +4210,7 @@ _mm512_mask_unpacklo_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B)
(__v16sf)__W);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_maskz_unpacklo_ps (__mmask16 __U, __m512 __A, __m512 __B)
{
return (__m512)__builtin_ia32_selectps_512((__mmask16) __U,
@@ -4445,7 +4218,7 @@ _mm512_maskz_unpacklo_ps (__mmask16 __U, __m512 __A, __m512 __B)
(__v16sf)_mm512_setzero_ps());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_unpackhi_epi32(__m512i __A, __m512i __B)
{
return (__m512i)__builtin_shufflevector((__v16si)__A, (__v16si)__B,
@@ -4455,7 +4228,7 @@ _mm512_unpackhi_epi32(__m512i __A, __m512i __B)
2+12, 18+12, 3+12, 19+12);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_unpackhi_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_selectd_512((__mmask16) __U,
@@ -4463,7 +4236,7 @@ _mm512_mask_unpackhi_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B)
(__v16si)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_unpackhi_epi32(__mmask16 __U, __m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_selectd_512((__mmask16) __U,
@@ -4471,7 +4244,7 @@ _mm512_maskz_unpackhi_epi32(__mmask16 __U, __m512i __A, __m512i __B)
(__v16si)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_unpacklo_epi32(__m512i __A, __m512i __B)
{
return (__m512i)__builtin_shufflevector((__v16si)__A, (__v16si)__B,
@@ -4481,7 +4254,7 @@ _mm512_unpacklo_epi32(__m512i __A, __m512i __B)
0+12, 16+12, 1+12, 17+12);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_unpacklo_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_selectd_512((__mmask16) __U,
@@ -4489,7 +4262,7 @@ _mm512_mask_unpacklo_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B)
(__v16si)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_unpacklo_epi32(__mmask16 __U, __m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_selectd_512((__mmask16) __U,
@@ -4497,14 +4270,14 @@ _mm512_maskz_unpacklo_epi32(__mmask16 __U, __m512i __A, __m512i __B)
(__v16si)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_unpackhi_epi64(__m512i __A, __m512i __B)
{
return (__m512i)__builtin_shufflevector((__v8di)__A, (__v8di)__B,
1, 9, 1+2, 9+2, 1+4, 9+4, 1+6, 9+6);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_unpackhi_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_selectq_512((__mmask8) __U,
@@ -4512,7 +4285,7 @@ _mm512_mask_unpackhi_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B)
(__v8di)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_unpackhi_epi64(__mmask8 __U, __m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_selectq_512((__mmask8) __U,
@@ -4520,14 +4293,14 @@ _mm512_maskz_unpackhi_epi64(__mmask8 __U, __m512i __A, __m512i __B)
(__v8di)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_unpacklo_epi64 (__m512i __A, __m512i __B)
{
return (__m512i)__builtin_shufflevector((__v8di)__A, (__v8di)__B,
0, 8, 0+2, 8+2, 0+4, 8+4, 0+6, 8+6);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_unpacklo_epi64 (__m512i __W, __mmask8 __U, __m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_selectq_512((__mmask8) __U,
@@ -4535,7 +4308,7 @@ _mm512_mask_unpacklo_epi64 (__m512i __W, __mmask8 __U, __m512i __A, __m512i __B)
(__v8di)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_unpacklo_epi64 (__mmask8 __U, __m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_selectq_512((__mmask8) __U,
@@ -4546,16 +4319,16 @@ _mm512_maskz_unpacklo_epi64 (__mmask8 __U, __m512i __A, __m512i __B)
/* SIMD load ops */
-static __inline __m512i __DEFAULT_FN_ATTRS
+static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_loadu_si512 (void const *__P)
{
- return (__m512i) __builtin_ia32_loaddqusi512_mask ((const int *) __P,
- (__v16si)
- _mm512_setzero_si512 (),
- (__mmask16) -1);
+ struct __loadu_si512 {
+ __m512i __v;
+ } __attribute__((__packed__, __may_alias__));
+ return ((struct __loadu_si512*)__P)->__v;
}
-static __inline __m512i __DEFAULT_FN_ATTRS
+static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_loadu_epi32 (__m512i __W, __mmask16 __U, void const *__P)
{
return (__m512i) __builtin_ia32_loaddqusi512_mask ((const int *) __P,
@@ -4564,7 +4337,7 @@ _mm512_mask_loadu_epi32 (__m512i __W, __mmask16 __U, void const *__P)
}
-static __inline __m512i __DEFAULT_FN_ATTRS
+static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_loadu_epi32(__mmask16 __U, void const *__P)
{
return (__m512i) __builtin_ia32_loaddqusi512_mask ((const int *)__P,
@@ -4573,7 +4346,7 @@ _mm512_maskz_loadu_epi32(__mmask16 __U, void const *__P)
(__mmask16) __U);
}
-static __inline __m512i __DEFAULT_FN_ATTRS
+static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_loadu_epi64 (__m512i __W, __mmask8 __U, void const *__P)
{
return (__m512i) __builtin_ia32_loaddqudi512_mask ((const long long *) __P,
@@ -4581,7 +4354,7 @@ _mm512_mask_loadu_epi64 (__m512i __W, __mmask8 __U, void const *__P)
(__mmask8) __U);
}
-static __inline __m512i __DEFAULT_FN_ATTRS
+static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_loadu_epi64(__mmask8 __U, void const *__P)
{
return (__m512i) __builtin_ia32_loaddqudi512_mask ((const long long *)__P,
@@ -4590,7 +4363,7 @@ _mm512_maskz_loadu_epi64(__mmask8 __U, void const *__P)
(__mmask8) __U);
}
-static __inline __m512 __DEFAULT_FN_ATTRS
+static __inline __m512 __DEFAULT_FN_ATTRS512
_mm512_mask_loadu_ps (__m512 __W, __mmask16 __U, void const *__P)
{
return (__m512) __builtin_ia32_loadups512_mask ((const float *) __P,
@@ -4598,7 +4371,7 @@ _mm512_mask_loadu_ps (__m512 __W, __mmask16 __U, void const *__P)
(__mmask16) __U);
}
-static __inline __m512 __DEFAULT_FN_ATTRS
+static __inline __m512 __DEFAULT_FN_ATTRS512
_mm512_maskz_loadu_ps(__mmask16 __U, void const *__P)
{
return (__m512) __builtin_ia32_loadups512_mask ((const float *)__P,
@@ -4607,7 +4380,7 @@ _mm512_maskz_loadu_ps(__mmask16 __U, void const *__P)
(__mmask16) __U);
}
-static __inline __m512d __DEFAULT_FN_ATTRS
+static __inline __m512d __DEFAULT_FN_ATTRS512
_mm512_mask_loadu_pd (__m512d __W, __mmask8 __U, void const *__P)
{
return (__m512d) __builtin_ia32_loadupd512_mask ((const double *) __P,
@@ -4615,7 +4388,7 @@ _mm512_mask_loadu_pd (__m512d __W, __mmask8 __U, void const *__P)
(__mmask8) __U);
}
-static __inline __m512d __DEFAULT_FN_ATTRS
+static __inline __m512d __DEFAULT_FN_ATTRS512
_mm512_maskz_loadu_pd(__mmask8 __U, void const *__P)
{
return (__m512d) __builtin_ia32_loadupd512_mask ((const double *)__P,
@@ -4624,7 +4397,7 @@ _mm512_maskz_loadu_pd(__mmask8 __U, void const *__P)
(__mmask8) __U);
}
-static __inline __m512d __DEFAULT_FN_ATTRS
+static __inline __m512d __DEFAULT_FN_ATTRS512
_mm512_loadu_pd(void const *__p)
{
struct __loadu_pd {
@@ -4633,7 +4406,7 @@ _mm512_loadu_pd(void const *__p)
return ((struct __loadu_pd*)__p)->__v;
}
-static __inline __m512 __DEFAULT_FN_ATTRS
+static __inline __m512 __DEFAULT_FN_ATTRS512
_mm512_loadu_ps(void const *__p)
{
struct __loadu_ps {
@@ -4642,16 +4415,13 @@ _mm512_loadu_ps(void const *__p)
return ((struct __loadu_ps*)__p)->__v;
}
-static __inline __m512 __DEFAULT_FN_ATTRS
+static __inline __m512 __DEFAULT_FN_ATTRS512
_mm512_load_ps(void const *__p)
{
- return (__m512) __builtin_ia32_loadaps512_mask ((const __v16sf *)__p,
- (__v16sf)
- _mm512_setzero_ps (),
- (__mmask16) -1);
+ return *(__m512*)__p;
}
-static __inline __m512 __DEFAULT_FN_ATTRS
+static __inline __m512 __DEFAULT_FN_ATTRS512
_mm512_mask_load_ps (__m512 __W, __mmask16 __U, void const *__P)
{
return (__m512) __builtin_ia32_loadaps512_mask ((const __v16sf *) __P,
@@ -4659,7 +4429,7 @@ _mm512_mask_load_ps (__m512 __W, __mmask16 __U, void const *__P)
(__mmask16) __U);
}
-static __inline __m512 __DEFAULT_FN_ATTRS
+static __inline __m512 __DEFAULT_FN_ATTRS512
_mm512_maskz_load_ps(__mmask16 __U, void const *__P)
{
return (__m512) __builtin_ia32_loadaps512_mask ((const __v16sf *)__P,
@@ -4668,16 +4438,13 @@ _mm512_maskz_load_ps(__mmask16 __U, void const *__P)
(__mmask16) __U);
}
-static __inline __m512d __DEFAULT_FN_ATTRS
+static __inline __m512d __DEFAULT_FN_ATTRS512
_mm512_load_pd(void const *__p)
{
- return (__m512d) __builtin_ia32_loadapd512_mask ((const __v8df *)__p,
- (__v8df)
- _mm512_setzero_pd (),
- (__mmask8) -1);
+ return *(__m512d*)__p;
}
-static __inline __m512d __DEFAULT_FN_ATTRS
+static __inline __m512d __DEFAULT_FN_ATTRS512
_mm512_mask_load_pd (__m512d __W, __mmask8 __U, void const *__P)
{
return (__m512d) __builtin_ia32_loadapd512_mask ((const __v8df *) __P,
@@ -4685,7 +4452,7 @@ _mm512_mask_load_pd (__m512d __W, __mmask8 __U, void const *__P)
(__mmask8) __U);
}
-static __inline __m512d __DEFAULT_FN_ATTRS
+static __inline __m512d __DEFAULT_FN_ATTRS512
_mm512_maskz_load_pd(__mmask8 __U, void const *__P)
{
return (__m512d) __builtin_ia32_loadapd512_mask ((const __v8df *)__P,
@@ -4694,19 +4461,19 @@ _mm512_maskz_load_pd(__mmask8 __U, void const *__P)
(__mmask8) __U);
}
-static __inline __m512i __DEFAULT_FN_ATTRS
+static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_load_si512 (void const *__P)
{
return *(__m512i *) __P;
}
-static __inline __m512i __DEFAULT_FN_ATTRS
+static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_load_epi32 (void const *__P)
{
return *(__m512i *) __P;
}
-static __inline __m512i __DEFAULT_FN_ATTRS
+static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_load_epi64 (void const *__P)
{
return *(__m512i *) __P;
@@ -4714,90 +4481,98 @@ _mm512_load_epi64 (void const *__P)
/* SIMD store ops */
-static __inline void __DEFAULT_FN_ATTRS
+static __inline void __DEFAULT_FN_ATTRS512
_mm512_mask_storeu_epi64(void *__P, __mmask8 __U, __m512i __A)
{
__builtin_ia32_storedqudi512_mask ((long long *)__P, (__v8di) __A,
(__mmask8) __U);
}
-static __inline void __DEFAULT_FN_ATTRS
+static __inline void __DEFAULT_FN_ATTRS512
_mm512_storeu_si512 (void *__P, __m512i __A)
{
- __builtin_ia32_storedqusi512_mask ((int *) __P, (__v16si) __A,
- (__mmask16) -1);
+ struct __storeu_si512 {
+ __m512i __v;
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __storeu_si512*)__P)->__v = __A;
}
-static __inline void __DEFAULT_FN_ATTRS
+static __inline void __DEFAULT_FN_ATTRS512
_mm512_mask_storeu_epi32(void *__P, __mmask16 __U, __m512i __A)
{
__builtin_ia32_storedqusi512_mask ((int *)__P, (__v16si) __A,
(__mmask16) __U);
}
-static __inline void __DEFAULT_FN_ATTRS
+static __inline void __DEFAULT_FN_ATTRS512
_mm512_mask_storeu_pd(void *__P, __mmask8 __U, __m512d __A)
{
__builtin_ia32_storeupd512_mask ((double *)__P, (__v8df) __A, (__mmask8) __U);
}
-static __inline void __DEFAULT_FN_ATTRS
+static __inline void __DEFAULT_FN_ATTRS512
_mm512_storeu_pd(void *__P, __m512d __A)
{
- __builtin_ia32_storeupd512_mask((double *)__P, (__v8df)__A, (__mmask8)-1);
+ struct __storeu_pd {
+ __m512d __v;
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __storeu_pd*)__P)->__v = __A;
}
-static __inline void __DEFAULT_FN_ATTRS
+static __inline void __DEFAULT_FN_ATTRS512
_mm512_mask_storeu_ps(void *__P, __mmask16 __U, __m512 __A)
{
__builtin_ia32_storeups512_mask ((float *)__P, (__v16sf) __A,
(__mmask16) __U);
}
-static __inline void __DEFAULT_FN_ATTRS
+static __inline void __DEFAULT_FN_ATTRS512
_mm512_storeu_ps(void *__P, __m512 __A)
{
- __builtin_ia32_storeups512_mask((float *)__P, (__v16sf)__A, (__mmask16)-1);
+ struct __storeu_ps {
+ __m512 __v;
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __storeu_ps*)__P)->__v = __A;
}
-static __inline void __DEFAULT_FN_ATTRS
+static __inline void __DEFAULT_FN_ATTRS512
_mm512_mask_store_pd(void *__P, __mmask8 __U, __m512d __A)
{
__builtin_ia32_storeapd512_mask ((__v8df *)__P, (__v8df) __A, (__mmask8) __U);
}
-static __inline void __DEFAULT_FN_ATTRS
+static __inline void __DEFAULT_FN_ATTRS512
_mm512_store_pd(void *__P, __m512d __A)
{
*(__m512d*)__P = __A;
}
-static __inline void __DEFAULT_FN_ATTRS
+static __inline void __DEFAULT_FN_ATTRS512
_mm512_mask_store_ps(void *__P, __mmask16 __U, __m512 __A)
{
__builtin_ia32_storeaps512_mask ((__v16sf *)__P, (__v16sf) __A,
(__mmask16) __U);
}
-static __inline void __DEFAULT_FN_ATTRS
+static __inline void __DEFAULT_FN_ATTRS512
_mm512_store_ps(void *__P, __m512 __A)
{
*(__m512*)__P = __A;
}
-static __inline void __DEFAULT_FN_ATTRS
+static __inline void __DEFAULT_FN_ATTRS512
_mm512_store_si512 (void *__P, __m512i __A)
{
*(__m512i *) __P = __A;
}
-static __inline void __DEFAULT_FN_ATTRS
+static __inline void __DEFAULT_FN_ATTRS512
_mm512_store_epi32 (void *__P, __m512i __A)
{
*(__m512i *) __P = __A;
}
-static __inline void __DEFAULT_FN_ATTRS
+static __inline void __DEFAULT_FN_ATTRS512
_mm512_store_epi64 (void *__P, __m512i __A)
{
*(__m512i *) __P = __A;
@@ -4805,7 +4580,7 @@ _mm512_store_epi64 (void *__P, __m512i __A)
/* Mask ops */
-static __inline __mmask16 __DEFAULT_FN_ATTRS
+static __inline __mmask16 __DEFAULT_FN_ATTRS512
_mm512_knot(__mmask16 __M)
{
return __builtin_ia32_knothi(__M);
@@ -4913,7 +4688,7 @@ _mm512_knot(__mmask16 __M)
#define _mm512_mask_cmpneq_epu64_mask(k, A, B) \
_mm512_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_NE)
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_cvtepi8_epi32(__m128i __A)
{
/* This function always performs a signed extension, but __v16qi is a char
@@ -4921,7 +4696,7 @@ _mm512_cvtepi8_epi32(__m128i __A)
return (__m512i)__builtin_convertvector((__v16qs)__A, __v16si);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_cvtepi8_epi32(__m512i __W, __mmask16 __U, __m128i __A)
{
return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
@@ -4929,7 +4704,7 @@ _mm512_mask_cvtepi8_epi32(__m512i __W, __mmask16 __U, __m128i __A)
(__v16si)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_cvtepi8_epi32(__mmask16 __U, __m128i __A)
{
return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
@@ -4937,7 +4712,7 @@ _mm512_maskz_cvtepi8_epi32(__mmask16 __U, __m128i __A)
(__v16si)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_cvtepi8_epi64(__m128i __A)
{
/* This function always performs a signed extension, but __v16qi is a char
@@ -4945,7 +4720,7 @@ _mm512_cvtepi8_epi64(__m128i __A)
return (__m512i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__A, (__v16qs)__A, 0, 1, 2, 3, 4, 5, 6, 7), __v8di);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_cvtepi8_epi64(__m512i __W, __mmask8 __U, __m128i __A)
{
return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
@@ -4953,7 +4728,7 @@ _mm512_mask_cvtepi8_epi64(__m512i __W, __mmask8 __U, __m128i __A)
(__v8di)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_cvtepi8_epi64(__mmask8 __U, __m128i __A)
{
return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
@@ -4961,13 +4736,13 @@ _mm512_maskz_cvtepi8_epi64(__mmask8 __U, __m128i __A)
(__v8di)_mm512_setzero_si512 ());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_cvtepi32_epi64(__m256i __X)
{
return (__m512i)__builtin_convertvector((__v8si)__X, __v8di);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_cvtepi32_epi64(__m512i __W, __mmask8 __U, __m256i __X)
{
return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
@@ -4975,7 +4750,7 @@ _mm512_mask_cvtepi32_epi64(__m512i __W, __mmask8 __U, __m256i __X)
(__v8di)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_cvtepi32_epi64(__mmask8 __U, __m256i __X)
{
return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
@@ -4983,13 +4758,13 @@ _mm512_maskz_cvtepi32_epi64(__mmask8 __U, __m256i __X)
(__v8di)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_cvtepi16_epi32(__m256i __A)
{
return (__m512i)__builtin_convertvector((__v16hi)__A, __v16si);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_cvtepi16_epi32(__m512i __W, __mmask16 __U, __m256i __A)
{
return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
@@ -4997,7 +4772,7 @@ _mm512_mask_cvtepi16_epi32(__m512i __W, __mmask16 __U, __m256i __A)
(__v16si)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_cvtepi16_epi32(__mmask16 __U, __m256i __A)
{
return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
@@ -5005,13 +4780,13 @@ _mm512_maskz_cvtepi16_epi32(__mmask16 __U, __m256i __A)
(__v16si)_mm512_setzero_si512 ());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_cvtepi16_epi64(__m128i __A)
{
return (__m512i)__builtin_convertvector((__v8hi)__A, __v8di);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_cvtepi16_epi64(__m512i __W, __mmask8 __U, __m128i __A)
{
return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
@@ -5019,7 +4794,7 @@ _mm512_mask_cvtepi16_epi64(__m512i __W, __mmask8 __U, __m128i __A)
(__v8di)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_cvtepi16_epi64(__mmask8 __U, __m128i __A)
{
return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
@@ -5027,13 +4802,13 @@ _mm512_maskz_cvtepi16_epi64(__mmask8 __U, __m128i __A)
(__v8di)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_cvtepu8_epi32(__m128i __A)
{
return (__m512i)__builtin_convertvector((__v16qu)__A, __v16si);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_cvtepu8_epi32(__m512i __W, __mmask16 __U, __m128i __A)
{
return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
@@ -5041,7 +4816,7 @@ _mm512_mask_cvtepu8_epi32(__m512i __W, __mmask16 __U, __m128i __A)
(__v16si)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_cvtepu8_epi32(__mmask16 __U, __m128i __A)
{
return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
@@ -5049,13 +4824,13 @@ _mm512_maskz_cvtepu8_epi32(__mmask16 __U, __m128i __A)
(__v16si)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_cvtepu8_epi64(__m128i __A)
{
return (__m512i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__A, (__v16qu)__A, 0, 1, 2, 3, 4, 5, 6, 7), __v8di);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_cvtepu8_epi64(__m512i __W, __mmask8 __U, __m128i __A)
{
return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
@@ -5063,7 +4838,7 @@ _mm512_mask_cvtepu8_epi64(__m512i __W, __mmask8 __U, __m128i __A)
(__v8di)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_cvtepu8_epi64(__mmask8 __U, __m128i __A)
{
return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
@@ -5071,13 +4846,13 @@ _mm512_maskz_cvtepu8_epi64(__mmask8 __U, __m128i __A)
(__v8di)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_cvtepu32_epi64(__m256i __X)
{
return (__m512i)__builtin_convertvector((__v8su)__X, __v8di);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_cvtepu32_epi64(__m512i __W, __mmask8 __U, __m256i __X)
{
return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
@@ -5085,7 +4860,7 @@ _mm512_mask_cvtepu32_epi64(__m512i __W, __mmask8 __U, __m256i __X)
(__v8di)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_cvtepu32_epi64(__mmask8 __U, __m256i __X)
{
return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
@@ -5093,13 +4868,13 @@ _mm512_maskz_cvtepu32_epi64(__mmask8 __U, __m256i __X)
(__v8di)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_cvtepu16_epi32(__m256i __A)
{
return (__m512i)__builtin_convertvector((__v16hu)__A, __v16si);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_cvtepu16_epi32(__m512i __W, __mmask16 __U, __m256i __A)
{
return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
@@ -5107,7 +4882,7 @@ _mm512_mask_cvtepu16_epi32(__m512i __W, __mmask16 __U, __m256i __A)
(__v16si)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_cvtepu16_epi32(__mmask16 __U, __m256i __A)
{
return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
@@ -5115,13 +4890,13 @@ _mm512_maskz_cvtepu16_epi32(__mmask16 __U, __m256i __A)
(__v16si)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_cvtepu16_epi64(__m128i __A)
{
return (__m512i)__builtin_convertvector((__v8hu)__A, __v8di);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_cvtepu16_epi64(__m512i __W, __mmask8 __U, __m128i __A)
{
return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
@@ -5129,7 +4904,7 @@ _mm512_mask_cvtepu16_epi64(__m512i __W, __mmask8 __U, __m128i __A)
(__v8di)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_cvtepu16_epi64(__mmask8 __U, __m128i __A)
{
return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
@@ -5137,228 +4912,195 @@ _mm512_maskz_cvtepu16_epi64(__mmask8 __U, __m128i __A)
(__v8di)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_rorv_epi32 (__m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_prorvd512_mask ((__v16si) __A,
- (__v16si) __B,
- (__v16si)
- _mm512_setzero_si512 (),
- (__mmask16) -1);
+ return (__m512i)__builtin_ia32_prorvd512((__v16si)__A, (__v16si)__B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_rorv_epi32 (__m512i __W, __mmask16 __U, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_prorvd512_mask ((__v16si) __A,
- (__v16si) __B,
- (__v16si) __W,
- (__mmask16) __U);
+ return (__m512i)__builtin_ia32_selectd_512(__U,
+ (__v16si)_mm512_rorv_epi32(__A, __B),
+ (__v16si)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_rorv_epi32 (__mmask16 __U, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_prorvd512_mask ((__v16si) __A,
- (__v16si) __B,
- (__v16si)
- _mm512_setzero_si512 (),
- (__mmask16) __U);
+ return (__m512i)__builtin_ia32_selectd_512(__U,
+ (__v16si)_mm512_rorv_epi32(__A, __B),
+ (__v16si)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_rorv_epi64 (__m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_prorvq512_mask ((__v8di) __A,
- (__v8di) __B,
- (__v8di)
- _mm512_setzero_si512 (),
- (__mmask8) -1);
+ return (__m512i)__builtin_ia32_prorvq512((__v8di)__A, (__v8di)__B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_rorv_epi64 (__m512i __W, __mmask8 __U, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_prorvq512_mask ((__v8di) __A,
- (__v8di) __B,
- (__v8di) __W,
- (__mmask8) __U);
+ return (__m512i)__builtin_ia32_selectq_512(__U,
+ (__v8di)_mm512_rorv_epi64(__A, __B),
+ (__v8di)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_rorv_epi64 (__mmask8 __U, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_prorvq512_mask ((__v8di) __A,
- (__v8di) __B,
- (__v8di)
- _mm512_setzero_si512 (),
- (__mmask8) __U);
+ return (__m512i)__builtin_ia32_selectq_512(__U,
+ (__v8di)_mm512_rorv_epi64(__A, __B),
+ (__v8di)_mm512_setzero_si512());
}
-#define _mm512_cmp_epi32_mask(a, b, p) __extension__ ({ \
+#define _mm512_cmp_epi32_mask(a, b, p) \
(__mmask16)__builtin_ia32_cmpd512_mask((__v16si)(__m512i)(a), \
(__v16si)(__m512i)(b), (int)(p), \
- (__mmask16)-1); })
+ (__mmask16)-1)
-#define _mm512_cmp_epu32_mask(a, b, p) __extension__ ({ \
+#define _mm512_cmp_epu32_mask(a, b, p) \
(__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)(__m512i)(a), \
(__v16si)(__m512i)(b), (int)(p), \
- (__mmask16)-1); })
+ (__mmask16)-1)
-#define _mm512_cmp_epi64_mask(a, b, p) __extension__ ({ \
+#define _mm512_cmp_epi64_mask(a, b, p) \
(__mmask8)__builtin_ia32_cmpq512_mask((__v8di)(__m512i)(a), \
(__v8di)(__m512i)(b), (int)(p), \
- (__mmask8)-1); })
+ (__mmask8)-1)
-#define _mm512_cmp_epu64_mask(a, b, p) __extension__ ({ \
+#define _mm512_cmp_epu64_mask(a, b, p) \
(__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)(__m512i)(a), \
(__v8di)(__m512i)(b), (int)(p), \
- (__mmask8)-1); })
+ (__mmask8)-1)
-#define _mm512_mask_cmp_epi32_mask(m, a, b, p) __extension__ ({ \
+#define _mm512_mask_cmp_epi32_mask(m, a, b, p) \
(__mmask16)__builtin_ia32_cmpd512_mask((__v16si)(__m512i)(a), \
(__v16si)(__m512i)(b), (int)(p), \
- (__mmask16)(m)); })
+ (__mmask16)(m))
-#define _mm512_mask_cmp_epu32_mask(m, a, b, p) __extension__ ({ \
+#define _mm512_mask_cmp_epu32_mask(m, a, b, p) \
(__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)(__m512i)(a), \
(__v16si)(__m512i)(b), (int)(p), \
- (__mmask16)(m)); })
+ (__mmask16)(m))
-#define _mm512_mask_cmp_epi64_mask(m, a, b, p) __extension__ ({ \
+#define _mm512_mask_cmp_epi64_mask(m, a, b, p) \
(__mmask8)__builtin_ia32_cmpq512_mask((__v8di)(__m512i)(a), \
(__v8di)(__m512i)(b), (int)(p), \
- (__mmask8)(m)); })
+ (__mmask8)(m))
-#define _mm512_mask_cmp_epu64_mask(m, a, b, p) __extension__ ({ \
+#define _mm512_mask_cmp_epu64_mask(m, a, b, p) \
(__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)(__m512i)(a), \
(__v8di)(__m512i)(b), (int)(p), \
- (__mmask8)(m)); })
-
-#define _mm512_rol_epi32(a, b) __extension__ ({ \
- (__m512i)__builtin_ia32_prold512_mask((__v16si)(__m512i)(a), (int)(b), \
- (__v16si)_mm512_setzero_si512(), \
- (__mmask16)-1); })
-
-#define _mm512_mask_rol_epi32(W, U, a, b) __extension__ ({ \
- (__m512i)__builtin_ia32_prold512_mask((__v16si)(__m512i)(a), (int)(b), \
- (__v16si)(__m512i)(W), \
- (__mmask16)(U)); })
-
-#define _mm512_maskz_rol_epi32(U, a, b) __extension__ ({ \
- (__m512i)__builtin_ia32_prold512_mask((__v16si)(__m512i)(a), (int)(b), \
- (__v16si)_mm512_setzero_si512(), \
- (__mmask16)(U)); })
-
-#define _mm512_rol_epi64(a, b) __extension__ ({ \
- (__m512i)__builtin_ia32_prolq512_mask((__v8di)(__m512i)(a), (int)(b), \
- (__v8di)_mm512_setzero_si512(), \
- (__mmask8)-1); })
-
-#define _mm512_mask_rol_epi64(W, U, a, b) __extension__ ({ \
- (__m512i)__builtin_ia32_prolq512_mask((__v8di)(__m512i)(a), (int)(b), \
- (__v8di)(__m512i)(W), (__mmask8)(U)); })
-
-#define _mm512_maskz_rol_epi64(U, a, b) __extension__ ({ \
- (__m512i)__builtin_ia32_prolq512_mask((__v8di)(__m512i)(a), (int)(b), \
- (__v8di)_mm512_setzero_si512(), \
- (__mmask8)(U)); })
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+ (__mmask8)(m))
+
+#define _mm512_rol_epi32(a, b) \
+ (__m512i)__builtin_ia32_prold512((__v16si)(__m512i)(a), (int)(b))
+
+#define _mm512_mask_rol_epi32(W, U, a, b) \
+ (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
+ (__v16si)_mm512_rol_epi32((a), (b)), \
+ (__v16si)(__m512i)(W))
+
+#define _mm512_maskz_rol_epi32(U, a, b) \
+ (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
+ (__v16si)_mm512_rol_epi32((a), (b)), \
+ (__v16si)_mm512_setzero_si512())
+
+#define _mm512_rol_epi64(a, b) \
+ (__m512i)__builtin_ia32_prolq512((__v8di)(__m512i)(a), (int)(b))
+
+#define _mm512_mask_rol_epi64(W, U, a, b) \
+ (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
+ (__v8di)_mm512_rol_epi64((a), (b)), \
+ (__v8di)(__m512i)(W))
+
+#define _mm512_maskz_rol_epi64(U, a, b) \
+ (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
+ (__v8di)_mm512_rol_epi64((a), (b)), \
+ (__v8di)_mm512_setzero_si512())
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_rolv_epi32 (__m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_prolvd512_mask ((__v16si) __A,
- (__v16si) __B,
- (__v16si)
- _mm512_setzero_si512 (),
- (__mmask16) -1);
+ return (__m512i)__builtin_ia32_prolvd512((__v16si)__A, (__v16si)__B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_rolv_epi32 (__m512i __W, __mmask16 __U, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_prolvd512_mask ((__v16si) __A,
- (__v16si) __B,
- (__v16si) __W,
- (__mmask16) __U);
+ return (__m512i)__builtin_ia32_selectd_512(__U,
+ (__v16si)_mm512_rolv_epi32(__A, __B),
+ (__v16si)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_rolv_epi32 (__mmask16 __U, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_prolvd512_mask ((__v16si) __A,
- (__v16si) __B,
- (__v16si)
- _mm512_setzero_si512 (),
- (__mmask16) __U);
+ return (__m512i)__builtin_ia32_selectd_512(__U,
+ (__v16si)_mm512_rolv_epi32(__A, __B),
+ (__v16si)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_rolv_epi64 (__m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_prolvq512_mask ((__v8di) __A,
- (__v8di) __B,
- (__v8di)
- _mm512_setzero_si512 (),
- (__mmask8) -1);
+ return (__m512i)__builtin_ia32_prolvq512((__v8di)__A, (__v8di)__B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_rolv_epi64 (__m512i __W, __mmask8 __U, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_prolvq512_mask ((__v8di) __A,
- (__v8di) __B,
- (__v8di) __W,
- (__mmask8) __U);
+ return (__m512i)__builtin_ia32_selectq_512(__U,
+ (__v8di)_mm512_rolv_epi64(__A, __B),
+ (__v8di)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_rolv_epi64 (__mmask8 __U, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_prolvq512_mask ((__v8di) __A,
- (__v8di) __B,
- (__v8di)
- _mm512_setzero_si512 (),
- (__mmask8) __U);
+ return (__m512i)__builtin_ia32_selectq_512(__U,
+ (__v8di)_mm512_rolv_epi64(__A, __B),
+ (__v8di)_mm512_setzero_si512());
}
-#define _mm512_ror_epi32(A, B) __extension__ ({ \
- (__m512i)__builtin_ia32_prord512_mask((__v16si)(__m512i)(A), (int)(B), \
- (__v16si)_mm512_setzero_si512(), \
- (__mmask16)-1); })
+#define _mm512_ror_epi32(A, B) \
+ (__m512i)__builtin_ia32_prord512((__v16si)(__m512i)(A), (int)(B))
-#define _mm512_mask_ror_epi32(W, U, A, B) __extension__ ({ \
- (__m512i)__builtin_ia32_prord512_mask((__v16si)(__m512i)(A), (int)(B), \
- (__v16si)(__m512i)(W), \
- (__mmask16)(U)); })
+#define _mm512_mask_ror_epi32(W, U, A, B) \
+ (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
+ (__v16si)_mm512_ror_epi32((A), (B)), \
+ (__v16si)(__m512i)(W))
-#define _mm512_maskz_ror_epi32(U, A, B) __extension__ ({ \
- (__m512i)__builtin_ia32_prord512_mask((__v16si)(__m512i)(A), (int)(B), \
- (__v16si)_mm512_setzero_si512(), \
- (__mmask16)(U)); })
+#define _mm512_maskz_ror_epi32(U, A, B) \
+ (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
+ (__v16si)_mm512_ror_epi32((A), (B)), \
+ (__v16si)_mm512_setzero_si512())
-#define _mm512_ror_epi64(A, B) __extension__ ({ \
- (__m512i)__builtin_ia32_prorq512_mask((__v8di)(__m512i)(A), (int)(B), \
- (__v8di)_mm512_setzero_si512(), \
- (__mmask8)-1); })
+#define _mm512_ror_epi64(A, B) \
+ (__m512i)__builtin_ia32_prorq512((__v8di)(__m512i)(A), (int)(B))
-#define _mm512_mask_ror_epi64(W, U, A, B) __extension__ ({ \
- (__m512i)__builtin_ia32_prorq512_mask((__v8di)(__m512i)(A), (int)(B), \
- (__v8di)(__m512i)(W), (__mmask8)(U)); })
+#define _mm512_mask_ror_epi64(W, U, A, B) \
+ (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
+ (__v8di)_mm512_ror_epi64((A), (B)), \
+ (__v8di)(__m512i)(W))
-#define _mm512_maskz_ror_epi64(U, A, B) __extension__ ({ \
- (__m512i)__builtin_ia32_prorq512_mask((__v8di)(__m512i)(A), (int)(B), \
- (__v8di)_mm512_setzero_si512(), \
- (__mmask8)(U)); })
+#define _mm512_maskz_ror_epi64(U, A, B) \
+ (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
+ (__v8di)_mm512_ror_epi64((A), (B)), \
+ (__v8di)_mm512_setzero_si512())
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_slli_epi32(__m512i __A, int __B)
{
return (__m512i)__builtin_ia32_pslldi512((__v16si)__A, __B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_slli_epi32(__m512i __W, __mmask16 __U, __m512i __A, int __B)
{
return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
@@ -5366,20 +5108,20 @@ _mm512_mask_slli_epi32(__m512i __W, __mmask16 __U, __m512i __A, int __B)
(__v16si)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_slli_epi32(__mmask16 __U, __m512i __A, int __B) {
return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
(__v16si)_mm512_slli_epi32(__A, __B),
(__v16si)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_slli_epi64(__m512i __A, int __B)
{
return (__m512i)__builtin_ia32_psllqi512((__v8di)__A, __B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_slli_epi64(__m512i __W, __mmask8 __U, __m512i __A, int __B)
{
return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
@@ -5387,7 +5129,7 @@ _mm512_mask_slli_epi64(__m512i __W, __mmask8 __U, __m512i __A, int __B)
(__v8di)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_slli_epi64(__mmask8 __U, __m512i __A, int __B)
{
return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
@@ -5395,13 +5137,13 @@ _mm512_maskz_slli_epi64(__mmask8 __U, __m512i __A, int __B)
(__v8di)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_srli_epi32(__m512i __A, int __B)
{
return (__m512i)__builtin_ia32_psrldi512((__v16si)__A, __B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_srli_epi32(__m512i __W, __mmask16 __U, __m512i __A, int __B)
{
return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
@@ -5409,20 +5151,20 @@ _mm512_mask_srli_epi32(__m512i __W, __mmask16 __U, __m512i __A, int __B)
(__v16si)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_srli_epi32(__mmask16 __U, __m512i __A, int __B) {
return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
(__v16si)_mm512_srli_epi32(__A, __B),
(__v16si)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_srli_epi64(__m512i __A, int __B)
{
return (__m512i)__builtin_ia32_psrlqi512((__v8di)__A, __B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_srli_epi64(__m512i __W, __mmask8 __U, __m512i __A, int __B)
{
return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
@@ -5430,7 +5172,7 @@ _mm512_mask_srli_epi64(__m512i __W, __mmask8 __U, __m512i __A, int __B)
(__v8di)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_srli_epi64(__mmask8 __U, __m512i __A, int __B)
{
return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
@@ -5438,7 +5180,7 @@ _mm512_maskz_srli_epi64(__mmask8 __U, __m512i __A, int __B)
(__v8di)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_load_epi32 (__m512i __W, __mmask16 __U, void const *__P)
{
return (__m512i) __builtin_ia32_movdqa32load512_mask ((const __v16si *) __P,
@@ -5446,7 +5188,7 @@ _mm512_mask_load_epi32 (__m512i __W, __mmask16 __U, void const *__P)
(__mmask16) __U);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_load_epi32 (__mmask16 __U, void const *__P)
{
return (__m512i) __builtin_ia32_movdqa32load512_mask ((const __v16si *) __P,
@@ -5455,14 +5197,14 @@ _mm512_maskz_load_epi32 (__mmask16 __U, void const *__P)
(__mmask16) __U);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS512
_mm512_mask_store_epi32 (void *__P, __mmask16 __U, __m512i __A)
{
__builtin_ia32_movdqa32store512_mask ((__v16si *) __P, (__v16si) __A,
(__mmask16) __U);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_mov_epi32 (__m512i __W, __mmask16 __U, __m512i __A)
{
return (__m512i) __builtin_ia32_selectd_512 ((__mmask16) __U,
@@ -5470,7 +5212,7 @@ _mm512_mask_mov_epi32 (__m512i __W, __mmask16 __U, __m512i __A)
(__v16si) __W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_mov_epi32 (__mmask16 __U, __m512i __A)
{
return (__m512i) __builtin_ia32_selectd_512 ((__mmask16) __U,
@@ -5478,7 +5220,7 @@ _mm512_maskz_mov_epi32 (__mmask16 __U, __m512i __A)
(__v16si) _mm512_setzero_si512 ());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_mov_epi64 (__m512i __W, __mmask8 __U, __m512i __A)
{
return (__m512i) __builtin_ia32_selectq_512 ((__mmask8) __U,
@@ -5486,7 +5228,7 @@ _mm512_mask_mov_epi64 (__m512i __W, __mmask8 __U, __m512i __A)
(__v8di) __W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_mov_epi64 (__mmask8 __U, __m512i __A)
{
return (__m512i) __builtin_ia32_selectq_512 ((__mmask8) __U,
@@ -5494,7 +5236,7 @@ _mm512_maskz_mov_epi64 (__mmask8 __U, __m512i __A)
(__v8di) _mm512_setzero_si512 ());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_load_epi64 (__m512i __W, __mmask8 __U, void const *__P)
{
return (__m512i) __builtin_ia32_movdqa64load512_mask ((const __v8di *) __P,
@@ -5502,7 +5244,7 @@ _mm512_mask_load_epi64 (__m512i __W, __mmask8 __U, void const *__P)
(__mmask8) __U);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_load_epi64 (__mmask8 __U, void const *__P)
{
return (__m512i) __builtin_ia32_movdqa64load512_mask ((const __v8di *) __P,
@@ -5511,21 +5253,21 @@ _mm512_maskz_load_epi64 (__mmask8 __U, void const *__P)
(__mmask8) __U);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS512
_mm512_mask_store_epi64 (void *__P, __mmask8 __U, __m512i __A)
{
__builtin_ia32_movdqa64store512_mask ((__v8di *) __P, (__v8di) __A,
(__mmask8) __U);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_movedup_pd (__m512d __A)
{
return (__m512d)__builtin_shufflevector((__v8df)__A, (__v8df)__A,
0, 0, 2, 2, 4, 4, 6, 6);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_mask_movedup_pd (__m512d __W, __mmask8 __U, __m512d __A)
{
return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
@@ -5533,7 +5275,7 @@ _mm512_mask_movedup_pd (__m512d __W, __mmask8 __U, __m512d __A)
(__v8df)__W);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_maskz_movedup_pd (__mmask8 __U, __m512d __A)
{
return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
@@ -5541,179 +5283,179 @@ _mm512_maskz_movedup_pd (__mmask8 __U, __m512d __A)
(__v8df)_mm512_setzero_pd());
}
-#define _mm512_fixupimm_round_pd(A, B, C, imm, R) __extension__ ({ \
+#define _mm512_fixupimm_round_pd(A, B, C, imm, R) \
(__m512d)__builtin_ia32_fixupimmpd512_mask((__v8df)(__m512d)(A), \
(__v8df)(__m512d)(B), \
(__v8di)(__m512i)(C), (int)(imm), \
- (__mmask8)-1, (int)(R)); })
+ (__mmask8)-1, (int)(R))
-#define _mm512_mask_fixupimm_round_pd(A, U, B, C, imm, R) __extension__ ({ \
+#define _mm512_mask_fixupimm_round_pd(A, U, B, C, imm, R) \
(__m512d)__builtin_ia32_fixupimmpd512_mask((__v8df)(__m512d)(A), \
(__v8df)(__m512d)(B), \
(__v8di)(__m512i)(C), (int)(imm), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-#define _mm512_fixupimm_pd(A, B, C, imm) __extension__ ({ \
+#define _mm512_fixupimm_pd(A, B, C, imm) \
(__m512d)__builtin_ia32_fixupimmpd512_mask((__v8df)(__m512d)(A), \
(__v8df)(__m512d)(B), \
(__v8di)(__m512i)(C), (int)(imm), \
(__mmask8)-1, \
- _MM_FROUND_CUR_DIRECTION); })
+ _MM_FROUND_CUR_DIRECTION)
-#define _mm512_mask_fixupimm_pd(A, U, B, C, imm) __extension__ ({ \
+#define _mm512_mask_fixupimm_pd(A, U, B, C, imm) \
(__m512d)__builtin_ia32_fixupimmpd512_mask((__v8df)(__m512d)(A), \
(__v8df)(__m512d)(B), \
(__v8di)(__m512i)(C), (int)(imm), \
(__mmask8)(U), \
- _MM_FROUND_CUR_DIRECTION); })
+ _MM_FROUND_CUR_DIRECTION)
-#define _mm512_maskz_fixupimm_round_pd(U, A, B, C, imm, R) __extension__ ({ \
+#define _mm512_maskz_fixupimm_round_pd(U, A, B, C, imm, R) \
(__m512d)__builtin_ia32_fixupimmpd512_maskz((__v8df)(__m512d)(A), \
(__v8df)(__m512d)(B), \
(__v8di)(__m512i)(C), \
(int)(imm), (__mmask8)(U), \
- (int)(R)); })
+ (int)(R))
-#define _mm512_maskz_fixupimm_pd(U, A, B, C, imm) __extension__ ({ \
+#define _mm512_maskz_fixupimm_pd(U, A, B, C, imm) \
(__m512d)__builtin_ia32_fixupimmpd512_maskz((__v8df)(__m512d)(A), \
(__v8df)(__m512d)(B), \
(__v8di)(__m512i)(C), \
(int)(imm), (__mmask8)(U), \
- _MM_FROUND_CUR_DIRECTION); })
+ _MM_FROUND_CUR_DIRECTION)
-#define _mm512_fixupimm_round_ps(A, B, C, imm, R) __extension__ ({ \
+#define _mm512_fixupimm_round_ps(A, B, C, imm, R) \
(__m512)__builtin_ia32_fixupimmps512_mask((__v16sf)(__m512)(A), \
(__v16sf)(__m512)(B), \
(__v16si)(__m512i)(C), (int)(imm), \
- (__mmask16)-1, (int)(R)); })
+ (__mmask16)-1, (int)(R))
-#define _mm512_mask_fixupimm_round_ps(A, U, B, C, imm, R) __extension__ ({ \
+#define _mm512_mask_fixupimm_round_ps(A, U, B, C, imm, R) \
(__m512)__builtin_ia32_fixupimmps512_mask((__v16sf)(__m512)(A), \
(__v16sf)(__m512)(B), \
(__v16si)(__m512i)(C), (int)(imm), \
- (__mmask16)(U), (int)(R)); })
+ (__mmask16)(U), (int)(R))
-#define _mm512_fixupimm_ps(A, B, C, imm) __extension__ ({ \
+#define _mm512_fixupimm_ps(A, B, C, imm) \
(__m512)__builtin_ia32_fixupimmps512_mask((__v16sf)(__m512)(A), \
(__v16sf)(__m512)(B), \
(__v16si)(__m512i)(C), (int)(imm), \
(__mmask16)-1, \
- _MM_FROUND_CUR_DIRECTION); })
+ _MM_FROUND_CUR_DIRECTION)
-#define _mm512_mask_fixupimm_ps(A, U, B, C, imm) __extension__ ({ \
+#define _mm512_mask_fixupimm_ps(A, U, B, C, imm) \
(__m512)__builtin_ia32_fixupimmps512_mask((__v16sf)(__m512)(A), \
(__v16sf)(__m512)(B), \
(__v16si)(__m512i)(C), (int)(imm), \
(__mmask16)(U), \
- _MM_FROUND_CUR_DIRECTION); })
+ _MM_FROUND_CUR_DIRECTION)
-#define _mm512_maskz_fixupimm_round_ps(U, A, B, C, imm, R) __extension__ ({ \
+#define _mm512_maskz_fixupimm_round_ps(U, A, B, C, imm, R) \
(__m512)__builtin_ia32_fixupimmps512_maskz((__v16sf)(__m512)(A), \
(__v16sf)(__m512)(B), \
(__v16si)(__m512i)(C), \
(int)(imm), (__mmask16)(U), \
- (int)(R)); })
+ (int)(R))
-#define _mm512_maskz_fixupimm_ps(U, A, B, C, imm) __extension__ ({ \
+#define _mm512_maskz_fixupimm_ps(U, A, B, C, imm) \
(__m512)__builtin_ia32_fixupimmps512_maskz((__v16sf)(__m512)(A), \
(__v16sf)(__m512)(B), \
(__v16si)(__m512i)(C), \
(int)(imm), (__mmask16)(U), \
- _MM_FROUND_CUR_DIRECTION); })
+ _MM_FROUND_CUR_DIRECTION)
-#define _mm_fixupimm_round_sd(A, B, C, imm, R) __extension__ ({ \
+#define _mm_fixupimm_round_sd(A, B, C, imm, R) \
(__m128d)__builtin_ia32_fixupimmsd_mask((__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), \
(__v2di)(__m128i)(C), (int)(imm), \
- (__mmask8)-1, (int)(R)); })
+ (__mmask8)-1, (int)(R))
-#define _mm_mask_fixupimm_round_sd(A, U, B, C, imm, R) __extension__ ({ \
+#define _mm_mask_fixupimm_round_sd(A, U, B, C, imm, R) \
(__m128d)__builtin_ia32_fixupimmsd_mask((__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), \
(__v2di)(__m128i)(C), (int)(imm), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-#define _mm_fixupimm_sd(A, B, C, imm) __extension__ ({ \
+#define _mm_fixupimm_sd(A, B, C, imm) \
(__m128d)__builtin_ia32_fixupimmsd_mask((__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), \
(__v2di)(__m128i)(C), (int)(imm), \
(__mmask8)-1, \
- _MM_FROUND_CUR_DIRECTION); })
+ _MM_FROUND_CUR_DIRECTION)
-#define _mm_mask_fixupimm_sd(A, U, B, C, imm) __extension__ ({ \
+#define _mm_mask_fixupimm_sd(A, U, B, C, imm) \
(__m128d)__builtin_ia32_fixupimmsd_mask((__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), \
(__v2di)(__m128i)(C), (int)(imm), \
(__mmask8)(U), \
- _MM_FROUND_CUR_DIRECTION); })
+ _MM_FROUND_CUR_DIRECTION)
-#define _mm_maskz_fixupimm_round_sd(U, A, B, C, imm, R) __extension__ ({ \
+#define _mm_maskz_fixupimm_round_sd(U, A, B, C, imm, R) \
(__m128d)__builtin_ia32_fixupimmsd_maskz((__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), \
(__v2di)(__m128i)(C), (int)(imm), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-#define _mm_maskz_fixupimm_sd(U, A, B, C, imm) __extension__ ({ \
+#define _mm_maskz_fixupimm_sd(U, A, B, C, imm) \
(__m128d)__builtin_ia32_fixupimmsd_maskz((__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), \
(__v2di)(__m128i)(C), (int)(imm), \
(__mmask8)(U), \
- _MM_FROUND_CUR_DIRECTION); })
+ _MM_FROUND_CUR_DIRECTION)
-#define _mm_fixupimm_round_ss(A, B, C, imm, R) __extension__ ({ \
+#define _mm_fixupimm_round_ss(A, B, C, imm, R) \
(__m128)__builtin_ia32_fixupimmss_mask((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), \
(__v4si)(__m128i)(C), (int)(imm), \
- (__mmask8)-1, (int)(R)); })
+ (__mmask8)-1, (int)(R))
-#define _mm_mask_fixupimm_round_ss(A, U, B, C, imm, R) __extension__ ({ \
+#define _mm_mask_fixupimm_round_ss(A, U, B, C, imm, R) \
(__m128)__builtin_ia32_fixupimmss_mask((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), \
(__v4si)(__m128i)(C), (int)(imm), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-#define _mm_fixupimm_ss(A, B, C, imm) __extension__ ({ \
+#define _mm_fixupimm_ss(A, B, C, imm) \
(__m128)__builtin_ia32_fixupimmss_mask((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), \
(__v4si)(__m128i)(C), (int)(imm), \
(__mmask8)-1, \
- _MM_FROUND_CUR_DIRECTION); })
+ _MM_FROUND_CUR_DIRECTION)
-#define _mm_mask_fixupimm_ss(A, U, B, C, imm) __extension__ ({ \
+#define _mm_mask_fixupimm_ss(A, U, B, C, imm) \
(__m128)__builtin_ia32_fixupimmss_mask((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), \
(__v4si)(__m128i)(C), (int)(imm), \
(__mmask8)(U), \
- _MM_FROUND_CUR_DIRECTION); })
+ _MM_FROUND_CUR_DIRECTION)
-#define _mm_maskz_fixupimm_round_ss(U, A, B, C, imm, R) __extension__ ({ \
+#define _mm_maskz_fixupimm_round_ss(U, A, B, C, imm, R) \
(__m128)__builtin_ia32_fixupimmss_maskz((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), \
(__v4si)(__m128i)(C), (int)(imm), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-#define _mm_maskz_fixupimm_ss(U, A, B, C, imm) __extension__ ({ \
+#define _mm_maskz_fixupimm_ss(U, A, B, C, imm) \
(__m128)__builtin_ia32_fixupimmss_maskz((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), \
(__v4si)(__m128i)(C), (int)(imm), \
(__mmask8)(U), \
- _MM_FROUND_CUR_DIRECTION); })
+ _MM_FROUND_CUR_DIRECTION)
-#define _mm_getexp_round_sd(A, B, R) __extension__ ({ \
+#define _mm_getexp_round_sd(A, B, R) \
(__m128d)__builtin_ia32_getexpsd128_round_mask((__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), \
(__v2df)_mm_setzero_pd(), \
- (__mmask8)-1, (int)(R)); })
+ (__mmask8)-1, (int)(R))
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_getexp_sd (__m128d __A, __m128d __B)
{
return (__m128d) __builtin_ia32_getexpsd128_round_mask ((__v2df) __A,
(__v2df) __B, (__v2df) _mm_setzero_pd(), (__mmask8) -1, _MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_getexp_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
{
return (__m128d) __builtin_ia32_getexpsd128_round_mask ( (__v2df) __A,
@@ -5723,13 +5465,13 @@ _mm_mask_getexp_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
_MM_FROUND_CUR_DIRECTION);
}
-#define _mm_mask_getexp_round_sd(W, U, A, B, R) __extension__ ({\
+#define _mm_mask_getexp_round_sd(W, U, A, B, R) \
(__m128d)__builtin_ia32_getexpsd128_round_mask((__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), \
(__v2df)(__m128d)(W), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_maskz_getexp_sd (__mmask8 __U, __m128d __A, __m128d __B)
{
return (__m128d) __builtin_ia32_getexpsd128_round_mask ( (__v2df) __A,
@@ -5739,26 +5481,26 @@ _mm_maskz_getexp_sd (__mmask8 __U, __m128d __A, __m128d __B)
_MM_FROUND_CUR_DIRECTION);
}
-#define _mm_maskz_getexp_round_sd(U, A, B, R) __extension__ ({\
+#define _mm_maskz_getexp_round_sd(U, A, B, R) \
(__m128d)__builtin_ia32_getexpsd128_round_mask((__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), \
(__v2df)_mm_setzero_pd(), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-#define _mm_getexp_round_ss(A, B, R) __extension__ ({ \
+#define _mm_getexp_round_ss(A, B, R) \
(__m128)__builtin_ia32_getexpss128_round_mask((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), \
(__v4sf)_mm_setzero_ps(), \
- (__mmask8)-1, (int)(R)); })
+ (__mmask8)-1, (int)(R))
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_getexp_ss (__m128 __A, __m128 __B)
{
return (__m128) __builtin_ia32_getexpss128_round_mask ((__v4sf) __A,
(__v4sf) __B, (__v4sf) _mm_setzero_ps(), (__mmask8) -1, _MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_getexp_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
{
return (__m128) __builtin_ia32_getexpss128_round_mask ((__v4sf) __A,
@@ -5768,155 +5510,144 @@ _mm_mask_getexp_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
_MM_FROUND_CUR_DIRECTION);
}
-#define _mm_mask_getexp_round_ss(W, U, A, B, R) __extension__ ({\
+#define _mm_mask_getexp_round_ss(W, U, A, B, R) \
(__m128)__builtin_ia32_getexpss128_round_mask((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), \
(__v4sf)(__m128)(W), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_maskz_getexp_ss (__mmask8 __U, __m128 __A, __m128 __B)
{
return (__m128) __builtin_ia32_getexpss128_round_mask ((__v4sf) __A,
(__v4sf) __B,
- (__v4sf) _mm_setzero_pd (),
+ (__v4sf) _mm_setzero_ps (),
(__mmask8) __U,
_MM_FROUND_CUR_DIRECTION);
}
-#define _mm_maskz_getexp_round_ss(U, A, B, R) __extension__ ({\
+#define _mm_maskz_getexp_round_ss(U, A, B, R) \
(__m128)__builtin_ia32_getexpss128_round_mask((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), \
(__v4sf)_mm_setzero_ps(), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-#define _mm_getmant_round_sd(A, B, C, D, R) __extension__ ({ \
+#define _mm_getmant_round_sd(A, B, C, D, R) \
(__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), \
(int)(((D)<<2) | (C)), \
(__v2df)_mm_setzero_pd(), \
- (__mmask8)-1, (int)(R)); })
+ (__mmask8)-1, (int)(R))
-#define _mm_getmant_sd(A, B, C, D) __extension__ ({ \
+#define _mm_getmant_sd(A, B, C, D) \
(__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), \
(int)(((D)<<2) | (C)), \
(__v2df)_mm_setzero_pd(), \
(__mmask8)-1, \
- _MM_FROUND_CUR_DIRECTION); })
+ _MM_FROUND_CUR_DIRECTION)
-#define _mm_mask_getmant_sd(W, U, A, B, C, D) __extension__ ({\
+#define _mm_mask_getmant_sd(W, U, A, B, C, D) \
(__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), \
(int)(((D)<<2) | (C)), \
(__v2df)(__m128d)(W), \
(__mmask8)(U), \
- _MM_FROUND_CUR_DIRECTION); })
+ _MM_FROUND_CUR_DIRECTION)
-#define _mm_mask_getmant_round_sd(W, U, A, B, C, D, R)({\
+#define _mm_mask_getmant_round_sd(W, U, A, B, C, D, R) \
(__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), \
(int)(((D)<<2) | (C)), \
(__v2df)(__m128d)(W), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-#define _mm_maskz_getmant_sd(U, A, B, C, D) __extension__ ({\
+#define _mm_maskz_getmant_sd(U, A, B, C, D) \
(__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), \
(int)(((D)<<2) | (C)), \
(__v2df)_mm_setzero_pd(), \
(__mmask8)(U), \
- _MM_FROUND_CUR_DIRECTION); })
+ _MM_FROUND_CUR_DIRECTION)
-#define _mm_maskz_getmant_round_sd(U, A, B, C, D, R) __extension__ ({\
+#define _mm_maskz_getmant_round_sd(U, A, B, C, D, R) \
(__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), \
(int)(((D)<<2) | (C)), \
(__v2df)_mm_setzero_pd(), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-#define _mm_getmant_round_ss(A, B, C, D, R) __extension__ ({ \
+#define _mm_getmant_round_ss(A, B, C, D, R) \
(__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), \
(int)(((D)<<2) | (C)), \
(__v4sf)_mm_setzero_ps(), \
- (__mmask8)-1, (int)(R)); })
+ (__mmask8)-1, (int)(R))
-#define _mm_getmant_ss(A, B, C, D) __extension__ ({ \
+#define _mm_getmant_ss(A, B, C, D) \
(__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), \
(int)(((D)<<2) | (C)), \
(__v4sf)_mm_setzero_ps(), \
(__mmask8)-1, \
- _MM_FROUND_CUR_DIRECTION); })
+ _MM_FROUND_CUR_DIRECTION)
-#define _mm_mask_getmant_ss(W, U, A, B, C, D) __extension__ ({\
+#define _mm_mask_getmant_ss(W, U, A, B, C, D) \
(__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), \
(int)(((D)<<2) | (C)), \
(__v4sf)(__m128)(W), \
(__mmask8)(U), \
- _MM_FROUND_CUR_DIRECTION); })
+ _MM_FROUND_CUR_DIRECTION)
-#define _mm_mask_getmant_round_ss(W, U, A, B, C, D, R)({\
+#define _mm_mask_getmant_round_ss(W, U, A, B, C, D, R) \
(__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), \
(int)(((D)<<2) | (C)), \
(__v4sf)(__m128)(W), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-#define _mm_maskz_getmant_ss(U, A, B, C, D) __extension__ ({\
+#define _mm_maskz_getmant_ss(U, A, B, C, D) \
(__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), \
(int)(((D)<<2) | (C)), \
- (__v4sf)_mm_setzero_pd(), \
+ (__v4sf)_mm_setzero_ps(), \
(__mmask8)(U), \
- _MM_FROUND_CUR_DIRECTION); })
+ _MM_FROUND_CUR_DIRECTION)
-#define _mm_maskz_getmant_round_ss(U, A, B, C, D, R) __extension__ ({\
+#define _mm_maskz_getmant_round_ss(U, A, B, C, D, R) \
(__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), \
(int)(((D)<<2) | (C)), \
(__v4sf)_mm_setzero_ps(), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS512
_mm512_kmov (__mmask16 __A)
{
return __A;
}
-#define _mm_comi_round_sd(A, B, P, R) __extension__ ({\
+#define _mm_comi_round_sd(A, B, P, R) \
(int)__builtin_ia32_vcomisd((__v2df)(__m128d)(A), (__v2df)(__m128d)(B), \
- (int)(P), (int)(R)); })
+ (int)(P), (int)(R))
-#define _mm_comi_round_ss(A, B, P, R) __extension__ ({\
+#define _mm_comi_round_ss(A, B, P, R) \
(int)__builtin_ia32_vcomiss((__v4sf)(__m128)(A), (__v4sf)(__m128)(B), \
- (int)(P), (int)(R)); })
+ (int)(P), (int)(R))
#ifdef __x86_64__
-#define _mm_cvt_roundsd_si64(A, R) __extension__ ({ \
- (long long)__builtin_ia32_vcvtsd2si64((__v2df)(__m128d)(A), (int)(R)); })
+#define _mm_cvt_roundsd_si64(A, R) \
+ (long long)__builtin_ia32_vcvtsd2si64((__v2df)(__m128d)(A), (int)(R))
#endif
-static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask2_permutex2var_epi32 (__m512i __A, __m512i __I,
- __mmask16 __U, __m512i __B)
-{
- return (__m512i) __builtin_ia32_vpermi2vard512_mask ((__v16si) __A,
- (__v16si) __I
- /* idx */ ,
- (__v16si) __B,
- (__mmask16) __U);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_sll_epi32(__m512i __A, __m128i __B)
{
return (__m512i)__builtin_ia32_pslld512((__v16si) __A, (__v4si)__B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_sll_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m128i __B)
{
return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
@@ -5924,7 +5655,7 @@ _mm512_mask_sll_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m128i __B)
(__v16si)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_sll_epi32(__mmask16 __U, __m512i __A, __m128i __B)
{
return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
@@ -5932,13 +5663,13 @@ _mm512_maskz_sll_epi32(__mmask16 __U, __m512i __A, __m128i __B)
(__v16si)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_sll_epi64(__m512i __A, __m128i __B)
{
return (__m512i)__builtin_ia32_psllq512((__v8di)__A, (__v2di)__B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_sll_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m128i __B)
{
return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
@@ -5946,7 +5677,7 @@ _mm512_mask_sll_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m128i __B)
(__v8di)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_sll_epi64(__mmask8 __U, __m512i __A, __m128i __B)
{
return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
@@ -5954,13 +5685,13 @@ _mm512_maskz_sll_epi64(__mmask8 __U, __m512i __A, __m128i __B)
(__v8di)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_sllv_epi32(__m512i __X, __m512i __Y)
{
return (__m512i)__builtin_ia32_psllv16si((__v16si)__X, (__v16si)__Y);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_sllv_epi32(__m512i __W, __mmask16 __U, __m512i __X, __m512i __Y)
{
return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
@@ -5968,7 +5699,7 @@ _mm512_mask_sllv_epi32(__m512i __W, __mmask16 __U, __m512i __X, __m512i __Y)
(__v16si)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_sllv_epi32(__mmask16 __U, __m512i __X, __m512i __Y)
{
return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
@@ -5976,13 +5707,13 @@ _mm512_maskz_sllv_epi32(__mmask16 __U, __m512i __X, __m512i __Y)
(__v16si)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_sllv_epi64(__m512i __X, __m512i __Y)
{
return (__m512i)__builtin_ia32_psllv8di((__v8di)__X, (__v8di)__Y);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_sllv_epi64(__m512i __W, __mmask8 __U, __m512i __X, __m512i __Y)
{
return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
@@ -5990,7 +5721,7 @@ _mm512_mask_sllv_epi64(__m512i __W, __mmask8 __U, __m512i __X, __m512i __Y)
(__v8di)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_sllv_epi64(__mmask8 __U, __m512i __X, __m512i __Y)
{
return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
@@ -5998,13 +5729,13 @@ _mm512_maskz_sllv_epi64(__mmask8 __U, __m512i __X, __m512i __Y)
(__v8di)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_sra_epi32(__m512i __A, __m128i __B)
{
return (__m512i)__builtin_ia32_psrad512((__v16si) __A, (__v4si)__B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_sra_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m128i __B)
{
return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
@@ -6012,7 +5743,7 @@ _mm512_mask_sra_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m128i __B)
(__v16si)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_sra_epi32(__mmask16 __U, __m512i __A, __m128i __B)
{
return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
@@ -6020,13 +5751,13 @@ _mm512_maskz_sra_epi32(__mmask16 __U, __m512i __A, __m128i __B)
(__v16si)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_sra_epi64(__m512i __A, __m128i __B)
{
return (__m512i)__builtin_ia32_psraq512((__v8di)__A, (__v2di)__B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_sra_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m128i __B)
{
return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
@@ -6034,7 +5765,7 @@ _mm512_mask_sra_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m128i __B)
(__v8di)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_sra_epi64(__mmask8 __U, __m512i __A, __m128i __B)
{
return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
@@ -6042,13 +5773,13 @@ _mm512_maskz_sra_epi64(__mmask8 __U, __m512i __A, __m128i __B)
(__v8di)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_srav_epi32(__m512i __X, __m512i __Y)
{
return (__m512i)__builtin_ia32_psrav16si((__v16si)__X, (__v16si)__Y);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_srav_epi32(__m512i __W, __mmask16 __U, __m512i __X, __m512i __Y)
{
return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
@@ -6056,7 +5787,7 @@ _mm512_mask_srav_epi32(__m512i __W, __mmask16 __U, __m512i __X, __m512i __Y)
(__v16si)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_srav_epi32(__mmask16 __U, __m512i __X, __m512i __Y)
{
return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
@@ -6064,13 +5795,13 @@ _mm512_maskz_srav_epi32(__mmask16 __U, __m512i __X, __m512i __Y)
(__v16si)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_srav_epi64(__m512i __X, __m512i __Y)
{
return (__m512i)__builtin_ia32_psrav8di((__v8di)__X, (__v8di)__Y);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_srav_epi64(__m512i __W, __mmask8 __U, __m512i __X, __m512i __Y)
{
return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
@@ -6078,7 +5809,7 @@ _mm512_mask_srav_epi64(__m512i __W, __mmask8 __U, __m512i __X, __m512i __Y)
(__v8di)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_srav_epi64(__mmask8 __U, __m512i __X, __m512i __Y)
{
return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
@@ -6086,13 +5817,13 @@ _mm512_maskz_srav_epi64(__mmask8 __U, __m512i __X, __m512i __Y)
(__v8di)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_srl_epi32(__m512i __A, __m128i __B)
{
return (__m512i)__builtin_ia32_psrld512((__v16si) __A, (__v4si)__B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_srl_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m128i __B)
{
return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
@@ -6100,7 +5831,7 @@ _mm512_mask_srl_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m128i __B)
(__v16si)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_srl_epi32(__mmask16 __U, __m512i __A, __m128i __B)
{
return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
@@ -6108,13 +5839,13 @@ _mm512_maskz_srl_epi32(__mmask16 __U, __m512i __A, __m128i __B)
(__v16si)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_srl_epi64(__m512i __A, __m128i __B)
{
return (__m512i)__builtin_ia32_psrlq512((__v8di)__A, (__v2di)__B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_srl_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m128i __B)
{
return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
@@ -6122,7 +5853,7 @@ _mm512_mask_srl_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m128i __B)
(__v8di)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_srl_epi64(__mmask8 __U, __m512i __A, __m128i __B)
{
return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
@@ -6130,13 +5861,13 @@ _mm512_maskz_srl_epi64(__mmask8 __U, __m512i __A, __m128i __B)
(__v8di)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_srlv_epi32(__m512i __X, __m512i __Y)
{
return (__m512i)__builtin_ia32_psrlv16si((__v16si)__X, (__v16si)__Y);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_srlv_epi32(__m512i __W, __mmask16 __U, __m512i __X, __m512i __Y)
{
return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
@@ -6144,7 +5875,7 @@ _mm512_mask_srlv_epi32(__m512i __W, __mmask16 __U, __m512i __X, __m512i __Y)
(__v16si)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_srlv_epi32(__mmask16 __U, __m512i __X, __m512i __Y)
{
return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
@@ -6152,13 +5883,13 @@ _mm512_maskz_srlv_epi32(__mmask16 __U, __m512i __X, __m512i __Y)
(__v16si)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_srlv_epi64 (__m512i __X, __m512i __Y)
{
return (__m512i)__builtin_ia32_psrlv8di((__v8di)__X, (__v8di)__Y);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_srlv_epi64(__m512i __W, __mmask8 __U, __m512i __X, __m512i __Y)
{
return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
@@ -6166,7 +5897,7 @@ _mm512_mask_srlv_epi64(__m512i __W, __mmask8 __U, __m512i __X, __m512i __Y)
(__v8di)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_srlv_epi64(__mmask8 __U, __m512i __X, __m512i __Y)
{
return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
@@ -6174,57 +5905,57 @@ _mm512_maskz_srlv_epi64(__mmask8 __U, __m512i __X, __m512i __Y)
(__v8di)_mm512_setzero_si512());
}
-#define _mm512_ternarylogic_epi32(A, B, C, imm) __extension__ ({ \
+#define _mm512_ternarylogic_epi32(A, B, C, imm) \
(__m512i)__builtin_ia32_pternlogd512_mask((__v16si)(__m512i)(A), \
(__v16si)(__m512i)(B), \
(__v16si)(__m512i)(C), (int)(imm), \
- (__mmask16)-1); })
+ (__mmask16)-1)
-#define _mm512_mask_ternarylogic_epi32(A, U, B, C, imm) __extension__ ({ \
+#define _mm512_mask_ternarylogic_epi32(A, U, B, C, imm) \
(__m512i)__builtin_ia32_pternlogd512_mask((__v16si)(__m512i)(A), \
(__v16si)(__m512i)(B), \
(__v16si)(__m512i)(C), (int)(imm), \
- (__mmask16)(U)); })
+ (__mmask16)(U))
-#define _mm512_maskz_ternarylogic_epi32(U, A, B, C, imm) __extension__ ({ \
+#define _mm512_maskz_ternarylogic_epi32(U, A, B, C, imm) \
(__m512i)__builtin_ia32_pternlogd512_maskz((__v16si)(__m512i)(A), \
(__v16si)(__m512i)(B), \
(__v16si)(__m512i)(C), \
- (int)(imm), (__mmask16)(U)); })
+ (int)(imm), (__mmask16)(U))
-#define _mm512_ternarylogic_epi64(A, B, C, imm) __extension__ ({ \
+#define _mm512_ternarylogic_epi64(A, B, C, imm) \
(__m512i)__builtin_ia32_pternlogq512_mask((__v8di)(__m512i)(A), \
(__v8di)(__m512i)(B), \
(__v8di)(__m512i)(C), (int)(imm), \
- (__mmask8)-1); })
+ (__mmask8)-1)
-#define _mm512_mask_ternarylogic_epi64(A, U, B, C, imm) __extension__ ({ \
+#define _mm512_mask_ternarylogic_epi64(A, U, B, C, imm) \
(__m512i)__builtin_ia32_pternlogq512_mask((__v8di)(__m512i)(A), \
(__v8di)(__m512i)(B), \
(__v8di)(__m512i)(C), (int)(imm), \
- (__mmask8)(U)); })
+ (__mmask8)(U))
-#define _mm512_maskz_ternarylogic_epi64(U, A, B, C, imm) __extension__ ({ \
+#define _mm512_maskz_ternarylogic_epi64(U, A, B, C, imm) \
(__m512i)__builtin_ia32_pternlogq512_maskz((__v8di)(__m512i)(A), \
(__v8di)(__m512i)(B), \
(__v8di)(__m512i)(C), (int)(imm), \
- (__mmask8)(U)); })
+ (__mmask8)(U))
#ifdef __x86_64__
-#define _mm_cvt_roundsd_i64(A, R) __extension__ ({ \
- (long long)__builtin_ia32_vcvtsd2si64((__v2df)(__m128d)(A), (int)(R)); })
+#define _mm_cvt_roundsd_i64(A, R) \
+ (long long)__builtin_ia32_vcvtsd2si64((__v2df)(__m128d)(A), (int)(R))
#endif
-#define _mm_cvt_roundsd_si32(A, R) __extension__ ({ \
- (int)__builtin_ia32_vcvtsd2si32((__v2df)(__m128d)(A), (int)(R)); })
+#define _mm_cvt_roundsd_si32(A, R) \
+ (int)__builtin_ia32_vcvtsd2si32((__v2df)(__m128d)(A), (int)(R))
-#define _mm_cvt_roundsd_i32(A, R) __extension__ ({ \
- (int)__builtin_ia32_vcvtsd2si32((__v2df)(__m128d)(A), (int)(R)); })
+#define _mm_cvt_roundsd_i32(A, R) \
+ (int)__builtin_ia32_vcvtsd2si32((__v2df)(__m128d)(A), (int)(R))
-#define _mm_cvt_roundsd_u32(A, R) __extension__ ({ \
- (unsigned int)__builtin_ia32_vcvtsd2usi32((__v2df)(__m128d)(A), (int)(R)); })
+#define _mm_cvt_roundsd_u32(A, R) \
+ (unsigned int)__builtin_ia32_vcvtsd2usi32((__v2df)(__m128d)(A), (int)(R))
-static __inline__ unsigned __DEFAULT_FN_ATTRS
+static __inline__ unsigned __DEFAULT_FN_ATTRS128
_mm_cvtsd_u32 (__m128d __A)
{
return (unsigned) __builtin_ia32_vcvtsd2usi32 ((__v2df) __A,
@@ -6232,11 +5963,11 @@ _mm_cvtsd_u32 (__m128d __A)
}
#ifdef __x86_64__
-#define _mm_cvt_roundsd_u64(A, R) __extension__ ({ \
+#define _mm_cvt_roundsd_u64(A, R) \
(unsigned long long)__builtin_ia32_vcvtsd2usi64((__v2df)(__m128d)(A), \
- (int)(R)); })
+ (int)(R))
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS128
_mm_cvtsd_u64 (__m128d __A)
{
return (unsigned long long) __builtin_ia32_vcvtsd2usi64 ((__v2df)
@@ -6245,24 +5976,24 @@ _mm_cvtsd_u64 (__m128d __A)
}
#endif
-#define _mm_cvt_roundss_si32(A, R) __extension__ ({ \
- (int)__builtin_ia32_vcvtss2si32((__v4sf)(__m128)(A), (int)(R)); })
+#define _mm_cvt_roundss_si32(A, R) \
+ (int)__builtin_ia32_vcvtss2si32((__v4sf)(__m128)(A), (int)(R))
-#define _mm_cvt_roundss_i32(A, R) __extension__ ({ \
- (int)__builtin_ia32_vcvtss2si32((__v4sf)(__m128)(A), (int)(R)); })
+#define _mm_cvt_roundss_i32(A, R) \
+ (int)__builtin_ia32_vcvtss2si32((__v4sf)(__m128)(A), (int)(R))
#ifdef __x86_64__
-#define _mm_cvt_roundss_si64(A, R) __extension__ ({ \
- (long long)__builtin_ia32_vcvtss2si64((__v4sf)(__m128)(A), (int)(R)); })
+#define _mm_cvt_roundss_si64(A, R) \
+ (long long)__builtin_ia32_vcvtss2si64((__v4sf)(__m128)(A), (int)(R))
-#define _mm_cvt_roundss_i64(A, R) __extension__ ({ \
- (long long)__builtin_ia32_vcvtss2si64((__v4sf)(__m128)(A), (int)(R)); })
+#define _mm_cvt_roundss_i64(A, R) \
+ (long long)__builtin_ia32_vcvtss2si64((__v4sf)(__m128)(A), (int)(R))
#endif
-#define _mm_cvt_roundss_u32(A, R) __extension__ ({ \
- (unsigned int)__builtin_ia32_vcvtss2usi32((__v4sf)(__m128)(A), (int)(R)); })
+#define _mm_cvt_roundss_u32(A, R) \
+ (unsigned int)__builtin_ia32_vcvtss2usi32((__v4sf)(__m128)(A), (int)(R))
-static __inline__ unsigned __DEFAULT_FN_ATTRS
+static __inline__ unsigned __DEFAULT_FN_ATTRS128
_mm_cvtss_u32 (__m128 __A)
{
return (unsigned) __builtin_ia32_vcvtss2usi32 ((__v4sf) __A,
@@ -6270,11 +6001,11 @@ _mm_cvtss_u32 (__m128 __A)
}
#ifdef __x86_64__
-#define _mm_cvt_roundss_u64(A, R) __extension__ ({ \
+#define _mm_cvt_roundss_u64(A, R) \
(unsigned long long)__builtin_ia32_vcvtss2usi64((__v4sf)(__m128)(A), \
- (int)(R)); })
+ (int)(R))
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS128
_mm_cvtss_u64 (__m128 __A)
{
return (unsigned long long) __builtin_ia32_vcvtss2usi64 ((__v4sf)
@@ -6283,13 +6014,13 @@ _mm_cvtss_u64 (__m128 __A)
}
#endif
-#define _mm_cvtt_roundsd_i32(A, R) __extension__ ({ \
- (int)__builtin_ia32_vcvttsd2si32((__v2df)(__m128d)(A), (int)(R)); })
+#define _mm_cvtt_roundsd_i32(A, R) \
+ (int)__builtin_ia32_vcvttsd2si32((__v2df)(__m128d)(A), (int)(R))
-#define _mm_cvtt_roundsd_si32(A, R) __extension__ ({ \
- (int)__builtin_ia32_vcvttsd2si32((__v2df)(__m128d)(A), (int)(R)); })
+#define _mm_cvtt_roundsd_si32(A, R) \
+ (int)__builtin_ia32_vcvttsd2si32((__v2df)(__m128d)(A), (int)(R))
-static __inline__ int __DEFAULT_FN_ATTRS
+static __inline__ int __DEFAULT_FN_ATTRS128
_mm_cvttsd_i32 (__m128d __A)
{
return (int) __builtin_ia32_vcvttsd2si32 ((__v2df) __A,
@@ -6297,13 +6028,13 @@ _mm_cvttsd_i32 (__m128d __A)
}
#ifdef __x86_64__
-#define _mm_cvtt_roundsd_si64(A, R) __extension__ ({ \
- (long long)__builtin_ia32_vcvttsd2si64((__v2df)(__m128d)(A), (int)(R)); })
+#define _mm_cvtt_roundsd_si64(A, R) \
+ (long long)__builtin_ia32_vcvttsd2si64((__v2df)(__m128d)(A), (int)(R))
-#define _mm_cvtt_roundsd_i64(A, R) __extension__ ({ \
- (long long)__builtin_ia32_vcvttsd2si64((__v2df)(__m128d)(A), (int)(R)); })
+#define _mm_cvtt_roundsd_i64(A, R) \
+ (long long)__builtin_ia32_vcvttsd2si64((__v2df)(__m128d)(A), (int)(R))
-static __inline__ long long __DEFAULT_FN_ATTRS
+static __inline__ long long __DEFAULT_FN_ATTRS128
_mm_cvttsd_i64 (__m128d __A)
{
return (long long) __builtin_ia32_vcvttsd2si64 ((__v2df) __A,
@@ -6311,10 +6042,10 @@ _mm_cvttsd_i64 (__m128d __A)
}
#endif
-#define _mm_cvtt_roundsd_u32(A, R) __extension__ ({ \
- (unsigned int)__builtin_ia32_vcvttsd2usi32((__v2df)(__m128d)(A), (int)(R)); })
+#define _mm_cvtt_roundsd_u32(A, R) \
+ (unsigned int)__builtin_ia32_vcvttsd2usi32((__v2df)(__m128d)(A), (int)(R))
-static __inline__ unsigned __DEFAULT_FN_ATTRS
+static __inline__ unsigned __DEFAULT_FN_ATTRS128
_mm_cvttsd_u32 (__m128d __A)
{
return (unsigned) __builtin_ia32_vcvttsd2usi32 ((__v2df) __A,
@@ -6322,11 +6053,11 @@ _mm_cvttsd_u32 (__m128d __A)
}
#ifdef __x86_64__
-#define _mm_cvtt_roundsd_u64(A, R) __extension__ ({ \
+#define _mm_cvtt_roundsd_u64(A, R) \
(unsigned long long)__builtin_ia32_vcvttsd2usi64((__v2df)(__m128d)(A), \
- (int)(R)); })
+ (int)(R))
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS128
_mm_cvttsd_u64 (__m128d __A)
{
return (unsigned long long) __builtin_ia32_vcvttsd2usi64 ((__v2df)
@@ -6335,13 +6066,13 @@ _mm_cvttsd_u64 (__m128d __A)
}
#endif
-#define _mm_cvtt_roundss_i32(A, R) __extension__ ({ \
- (int)__builtin_ia32_vcvttss2si32((__v4sf)(__m128)(A), (int)(R)); })
+#define _mm_cvtt_roundss_i32(A, R) \
+ (int)__builtin_ia32_vcvttss2si32((__v4sf)(__m128)(A), (int)(R))
-#define _mm_cvtt_roundss_si32(A, R) __extension__ ({ \
- (int)__builtin_ia32_vcvttss2si32((__v4sf)(__m128)(A), (int)(R)); })
+#define _mm_cvtt_roundss_si32(A, R) \
+ (int)__builtin_ia32_vcvttss2si32((__v4sf)(__m128)(A), (int)(R))
-static __inline__ int __DEFAULT_FN_ATTRS
+static __inline__ int __DEFAULT_FN_ATTRS128
_mm_cvttss_i32 (__m128 __A)
{
return (int) __builtin_ia32_vcvttss2si32 ((__v4sf) __A,
@@ -6349,13 +6080,13 @@ _mm_cvttss_i32 (__m128 __A)
}
#ifdef __x86_64__
-#define _mm_cvtt_roundss_i64(A, R) __extension__ ({ \
- (long long)__builtin_ia32_vcvttss2si64((__v4sf)(__m128)(A), (int)(R)); })
+#define _mm_cvtt_roundss_i64(A, R) \
+ (long long)__builtin_ia32_vcvttss2si64((__v4sf)(__m128)(A), (int)(R))
-#define _mm_cvtt_roundss_si64(A, R) __extension__ ({ \
- (long long)__builtin_ia32_vcvttss2si64((__v4sf)(__m128)(A), (int)(R)); })
+#define _mm_cvtt_roundss_si64(A, R) \
+ (long long)__builtin_ia32_vcvttss2si64((__v4sf)(__m128)(A), (int)(R))
-static __inline__ long long __DEFAULT_FN_ATTRS
+static __inline__ long long __DEFAULT_FN_ATTRS128
_mm_cvttss_i64 (__m128 __A)
{
return (long long) __builtin_ia32_vcvttss2si64 ((__v4sf) __A,
@@ -6363,10 +6094,10 @@ _mm_cvttss_i64 (__m128 __A)
}
#endif
-#define _mm_cvtt_roundss_u32(A, R) __extension__ ({ \
- (unsigned int)__builtin_ia32_vcvttss2usi32((__v4sf)(__m128)(A), (int)(R)); })
+#define _mm_cvtt_roundss_u32(A, R) \
+ (unsigned int)__builtin_ia32_vcvttss2usi32((__v4sf)(__m128)(A), (int)(R))
-static __inline__ unsigned __DEFAULT_FN_ATTRS
+static __inline__ unsigned __DEFAULT_FN_ATTRS128
_mm_cvttss_u32 (__m128 __A)
{
return (unsigned) __builtin_ia32_vcvttss2usi32 ((__v4sf) __A,
@@ -6374,11 +6105,11 @@ _mm_cvttss_u32 (__m128 __A)
}
#ifdef __x86_64__
-#define _mm_cvtt_roundss_u64(A, R) __extension__ ({ \
+#define _mm_cvtt_roundss_u64(A, R) \
(unsigned long long)__builtin_ia32_vcvttss2usi64((__v4sf)(__m128)(A), \
- (int)(R)); })
+ (int)(R))
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS128
_mm_cvttss_u64 (__m128 __A)
{
return (unsigned long long) __builtin_ia32_vcvttss2usi64 ((__v4sf)
@@ -6387,98 +6118,39 @@ _mm_cvttss_u64 (__m128 __A)
}
#endif
-static __inline__ __m512d __DEFAULT_FN_ATTRS
-_mm512_mask2_permutex2var_pd (__m512d __A, __m512i __I, __mmask8 __U,
- __m512d __B)
-{
- return (__m512d) __builtin_ia32_vpermi2varpd512_mask ((__v8df) __A,
- (__v8di) __I
- /* idx */ ,
- (__v8df) __B,
- (__mmask8) __U);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS
-_mm512_mask2_permutex2var_ps (__m512 __A, __m512i __I, __mmask16 __U,
- __m512 __B)
-{
- return (__m512) __builtin_ia32_vpermi2varps512_mask ((__v16sf) __A,
- (__v16si) __I
- /* idx */ ,
- (__v16sf) __B,
- (__mmask16) __U);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask2_permutex2var_epi64 (__m512i __A, __m512i __I,
- __mmask8 __U, __m512i __B)
-{
- return (__m512i) __builtin_ia32_vpermi2varq512_mask ((__v8di) __A,
- (__v8di) __I
- /* idx */ ,
- (__v8di) __B,
- (__mmask8) __U);
-}
-
-#define _mm512_permute_pd(X, C) __extension__ ({ \
- (__m512d)__builtin_shufflevector((__v8df)(__m512d)(X), \
- (__v8df)_mm512_undefined_pd(), \
- 0 + (((C) >> 0) & 0x1), \
- 0 + (((C) >> 1) & 0x1), \
- 2 + (((C) >> 2) & 0x1), \
- 2 + (((C) >> 3) & 0x1), \
- 4 + (((C) >> 4) & 0x1), \
- 4 + (((C) >> 5) & 0x1), \
- 6 + (((C) >> 6) & 0x1), \
- 6 + (((C) >> 7) & 0x1)); })
+#define _mm512_permute_pd(X, C) \
+ (__m512d)__builtin_ia32_vpermilpd512((__v8df)(__m512d)(X), (int)(C))
-#define _mm512_mask_permute_pd(W, U, X, C) __extension__ ({ \
+#define _mm512_mask_permute_pd(W, U, X, C) \
(__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
(__v8df)_mm512_permute_pd((X), (C)), \
- (__v8df)(__m512d)(W)); })
+ (__v8df)(__m512d)(W))
-#define _mm512_maskz_permute_pd(U, X, C) __extension__ ({ \
+#define _mm512_maskz_permute_pd(U, X, C) \
(__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
(__v8df)_mm512_permute_pd((X), (C)), \
- (__v8df)_mm512_setzero_pd()); })
-
-#define _mm512_permute_ps(X, C) __extension__ ({ \
- (__m512)__builtin_shufflevector((__v16sf)(__m512)(X), \
- (__v16sf)_mm512_undefined_ps(), \
- 0 + (((C) >> 0) & 0x3), \
- 0 + (((C) >> 2) & 0x3), \
- 0 + (((C) >> 4) & 0x3), \
- 0 + (((C) >> 6) & 0x3), \
- 4 + (((C) >> 0) & 0x3), \
- 4 + (((C) >> 2) & 0x3), \
- 4 + (((C) >> 4) & 0x3), \
- 4 + (((C) >> 6) & 0x3), \
- 8 + (((C) >> 0) & 0x3), \
- 8 + (((C) >> 2) & 0x3), \
- 8 + (((C) >> 4) & 0x3), \
- 8 + (((C) >> 6) & 0x3), \
- 12 + (((C) >> 0) & 0x3), \
- 12 + (((C) >> 2) & 0x3), \
- 12 + (((C) >> 4) & 0x3), \
- 12 + (((C) >> 6) & 0x3)); })
-
-#define _mm512_mask_permute_ps(W, U, X, C) __extension__ ({ \
+ (__v8df)_mm512_setzero_pd())
+
+#define _mm512_permute_ps(X, C) \
+ (__m512)__builtin_ia32_vpermilps512((__v16sf)(__m512)(X), (int)(C))
+
+#define _mm512_mask_permute_ps(W, U, X, C) \
(__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
(__v16sf)_mm512_permute_ps((X), (C)), \
- (__v16sf)(__m512)(W)); })
+ (__v16sf)(__m512)(W))
-#define _mm512_maskz_permute_ps(U, X, C) __extension__ ({ \
+#define _mm512_maskz_permute_ps(U, X, C) \
(__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
(__v16sf)_mm512_permute_ps((X), (C)), \
- (__v16sf)_mm512_setzero_ps()); })
+ (__v16sf)_mm512_setzero_ps())
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_permutevar_pd(__m512d __A, __m512i __C)
{
return (__m512d)__builtin_ia32_vpermilvarpd512((__v8df)__A, (__v8di)__C);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_mask_permutevar_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512i __C)
{
return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
@@ -6486,7 +6158,7 @@ _mm512_mask_permutevar_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512i __C)
(__v8df)__W);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_maskz_permutevar_pd(__mmask8 __U, __m512d __A, __m512i __C)
{
return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
@@ -6494,13 +6166,13 @@ _mm512_maskz_permutevar_pd(__mmask8 __U, __m512d __A, __m512i __C)
(__v8df)_mm512_setzero_pd());
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_permutevar_ps(__m512 __A, __m512i __C)
{
return (__m512)__builtin_ia32_vpermilvarps512((__v16sf)__A, (__v16si)__C);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_mask_permutevar_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512i __C)
{
return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
@@ -6508,7 +6180,7 @@ _mm512_mask_permutevar_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512i __C)
(__v16sf)__W);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_maskz_permutevar_ps(__mmask16 __U, __m512 __A, __m512i __C)
{
return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
@@ -6516,85 +6188,87 @@ _mm512_maskz_permutevar_ps(__mmask16 __U, __m512 __A, __m512i __C)
(__v16sf)_mm512_setzero_ps());
}
-static __inline __m512d __DEFAULT_FN_ATTRS
+static __inline __m512d __DEFAULT_FN_ATTRS512
_mm512_permutex2var_pd(__m512d __A, __m512i __I, __m512d __B)
{
- return (__m512d) __builtin_ia32_vpermt2varpd512_mask ((__v8di) __I
- /* idx */ ,
- (__v8df) __A,
- (__v8df) __B,
- (__mmask8) -1);
+ return (__m512d)__builtin_ia32_vpermi2varpd512((__v8df)__A, (__v8di)__I,
+ (__v8df)__B);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
+_mm512_mask_permutex2var_pd(__m512d __A, __mmask8 __U, __m512i __I, __m512d __B)
+{
+ return (__m512d)__builtin_ia32_selectpd_512(__U,
+ (__v8df)_mm512_permutex2var_pd(__A, __I, __B),
+ (__v8df)__A);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
-_mm512_mask_permutex2var_pd (__m512d __A, __mmask8 __U, __m512i __I, __m512d __B)
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
+_mm512_mask2_permutex2var_pd(__m512d __A, __m512i __I, __mmask8 __U,
+ __m512d __B)
{
- return (__m512d) __builtin_ia32_vpermt2varpd512_mask ((__v8di) __I
- /* idx */ ,
- (__v8df) __A,
- (__v8df) __B,
- (__mmask8) __U);
+ return (__m512d)__builtin_ia32_selectpd_512(__U,
+ (__v8df)_mm512_permutex2var_pd(__A, __I, __B),
+ (__v8df)(__m512d)__I);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
-_mm512_maskz_permutex2var_pd (__mmask8 __U, __m512d __A, __m512i __I,
- __m512d __B)
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
+_mm512_maskz_permutex2var_pd(__mmask8 __U, __m512d __A, __m512i __I,
+ __m512d __B)
{
- return (__m512d) __builtin_ia32_vpermt2varpd512_maskz ((__v8di) __I
- /* idx */ ,
- (__v8df) __A,
- (__v8df) __B,
- (__mmask8) __U);
+ return (__m512d)__builtin_ia32_selectpd_512(__U,
+ (__v8df)_mm512_permutex2var_pd(__A, __I, __B),
+ (__v8df)_mm512_setzero_pd());
}
-static __inline __m512 __DEFAULT_FN_ATTRS
+static __inline __m512 __DEFAULT_FN_ATTRS512
_mm512_permutex2var_ps(__m512 __A, __m512i __I, __m512 __B)
{
- return (__m512) __builtin_ia32_vpermt2varps512_mask ((__v16si) __I
- /* idx */ ,
- (__v16sf) __A,
- (__v16sf) __B,
- (__mmask16) -1);
+ return (__m512)__builtin_ia32_vpermi2varps512((__v16sf)__A, (__v16si)__I,
+ (__v16sf) __B);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
-_mm512_mask_permutex2var_ps (__m512 __A, __mmask16 __U, __m512i __I, __m512 __B)
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
+_mm512_mask_permutex2var_ps(__m512 __A, __mmask16 __U, __m512i __I, __m512 __B)
{
- return (__m512) __builtin_ia32_vpermt2varps512_mask ((__v16si) __I
- /* idx */ ,
- (__v16sf) __A,
- (__v16sf) __B,
- (__mmask16) __U);
+ return (__m512)__builtin_ia32_selectps_512(__U,
+ (__v16sf)_mm512_permutex2var_ps(__A, __I, __B),
+ (__v16sf)__A);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
-_mm512_maskz_permutex2var_ps (__mmask16 __U, __m512 __A, __m512i __I,
- __m512 __B)
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
+_mm512_mask2_permutex2var_ps(__m512 __A, __m512i __I, __mmask16 __U, __m512 __B)
{
- return (__m512) __builtin_ia32_vpermt2varps512_maskz ((__v16si) __I
- /* idx */ ,
- (__v16sf) __A,
- (__v16sf) __B,
- (__mmask16) __U);
+ return (__m512)__builtin_ia32_selectps_512(__U,
+ (__v16sf)_mm512_permutex2var_ps(__A, __I, __B),
+ (__v16sf)(__m512)__I);
}
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
+_mm512_maskz_permutex2var_ps(__mmask16 __U, __m512 __A, __m512i __I, __m512 __B)
+{
+ return (__m512)__builtin_ia32_selectps_512(__U,
+ (__v16sf)_mm512_permutex2var_ps(__A, __I, __B),
+ (__v16sf)_mm512_setzero_ps());
+}
-#define _mm512_cvtt_roundpd_epu32(A, R) __extension__ ({ \
+
+#define _mm512_cvtt_roundpd_epu32(A, R) \
(__m256i)__builtin_ia32_cvttpd2udq512_mask((__v8df)(__m512d)(A), \
(__v8si)_mm256_undefined_si256(), \
- (__mmask8)-1, (int)(R)); })
+ (__mmask8)-1, (int)(R))
-#define _mm512_mask_cvtt_roundpd_epu32(W, U, A, R) __extension__ ({ \
+#define _mm512_mask_cvtt_roundpd_epu32(W, U, A, R) \
(__m256i)__builtin_ia32_cvttpd2udq512_mask((__v8df)(__m512d)(A), \
(__v8si)(__m256i)(W), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-#define _mm512_maskz_cvtt_roundpd_epu32(U, A, R) __extension__ ({ \
+#define _mm512_maskz_cvtt_roundpd_epu32(U, A, R) \
(__m256i)__builtin_ia32_cvttpd2udq512_mask((__v8df)(__m512d)(A), \
(__v8si)_mm256_setzero_si256(), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS512
_mm512_cvttpd_epu32 (__m512d __A)
{
return (__m256i) __builtin_ia32_cvttpd2udq512_mask ((__v8df) __A,
@@ -6604,7 +6278,7 @@ _mm512_cvttpd_epu32 (__m512d __A)
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS512
_mm512_mask_cvttpd_epu32 (__m256i __W, __mmask8 __U, __m512d __A)
{
return (__m256i) __builtin_ia32_cvttpd2udq512_mask ((__v8df) __A,
@@ -6613,7 +6287,7 @@ _mm512_mask_cvttpd_epu32 (__m256i __W, __mmask8 __U, __m512d __A)
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS512
_mm512_maskz_cvttpd_epu32 (__mmask8 __U, __m512d __A)
{
return (__m256i) __builtin_ia32_cvttpd2udq512_mask ((__v8df) __A,
@@ -6623,109 +6297,109 @@ _mm512_maskz_cvttpd_epu32 (__mmask8 __U, __m512d __A)
_MM_FROUND_CUR_DIRECTION);
}
-#define _mm_roundscale_round_sd(A, B, imm, R) __extension__ ({ \
+#define _mm_roundscale_round_sd(A, B, imm, R) \
(__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), \
(__v2df)_mm_setzero_pd(), \
(__mmask8)-1, (int)(imm), \
- (int)(R)); })
+ (int)(R))
-#define _mm_roundscale_sd(A, B, imm) __extension__ ({ \
+#define _mm_roundscale_sd(A, B, imm) \
(__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), \
(__v2df)_mm_setzero_pd(), \
(__mmask8)-1, (int)(imm), \
- _MM_FROUND_CUR_DIRECTION); })
+ _MM_FROUND_CUR_DIRECTION)
-#define _mm_mask_roundscale_sd(W, U, A, B, imm) __extension__ ({ \
+#define _mm_mask_roundscale_sd(W, U, A, B, imm) \
(__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), \
(__v2df)(__m128d)(W), \
(__mmask8)(U), (int)(imm), \
- _MM_FROUND_CUR_DIRECTION); })
+ _MM_FROUND_CUR_DIRECTION)
-#define _mm_mask_roundscale_round_sd(W, U, A, B, I, R) __extension__ ({ \
+#define _mm_mask_roundscale_round_sd(W, U, A, B, I, R) \
(__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), \
(__v2df)(__m128d)(W), \
(__mmask8)(U), (int)(I), \
- (int)(R)); })
+ (int)(R))
-#define _mm_maskz_roundscale_sd(U, A, B, I) __extension__ ({ \
+#define _mm_maskz_roundscale_sd(U, A, B, I) \
(__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), \
(__v2df)_mm_setzero_pd(), \
(__mmask8)(U), (int)(I), \
- _MM_FROUND_CUR_DIRECTION); })
+ _MM_FROUND_CUR_DIRECTION)
-#define _mm_maskz_roundscale_round_sd(U, A, B, I, R) __extension__ ({ \
+#define _mm_maskz_roundscale_round_sd(U, A, B, I, R) \
(__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), \
(__v2df)_mm_setzero_pd(), \
(__mmask8)(U), (int)(I), \
- (int)(R)); })
+ (int)(R))
-#define _mm_roundscale_round_ss(A, B, imm, R) __extension__ ({ \
+#define _mm_roundscale_round_ss(A, B, imm, R) \
(__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), \
(__v4sf)_mm_setzero_ps(), \
(__mmask8)-1, (int)(imm), \
- (int)(R)); })
+ (int)(R))
-#define _mm_roundscale_ss(A, B, imm) __extension__ ({ \
+#define _mm_roundscale_ss(A, B, imm) \
(__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), \
(__v4sf)_mm_setzero_ps(), \
(__mmask8)-1, (int)(imm), \
- _MM_FROUND_CUR_DIRECTION); })
+ _MM_FROUND_CUR_DIRECTION)
-#define _mm_mask_roundscale_ss(W, U, A, B, I) __extension__ ({ \
+#define _mm_mask_roundscale_ss(W, U, A, B, I) \
(__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), \
(__v4sf)(__m128)(W), \
(__mmask8)(U), (int)(I), \
- _MM_FROUND_CUR_DIRECTION); })
+ _MM_FROUND_CUR_DIRECTION)
-#define _mm_mask_roundscale_round_ss(W, U, A, B, I, R) __extension__ ({ \
+#define _mm_mask_roundscale_round_ss(W, U, A, B, I, R) \
(__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), \
(__v4sf)(__m128)(W), \
(__mmask8)(U), (int)(I), \
- (int)(R)); })
+ (int)(R))
-#define _mm_maskz_roundscale_ss(U, A, B, I) __extension__ ({ \
+#define _mm_maskz_roundscale_ss(U, A, B, I) \
(__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), \
(__v4sf)_mm_setzero_ps(), \
(__mmask8)(U), (int)(I), \
- _MM_FROUND_CUR_DIRECTION); })
+ _MM_FROUND_CUR_DIRECTION)
-#define _mm_maskz_roundscale_round_ss(U, A, B, I, R) __extension__ ({ \
+#define _mm_maskz_roundscale_round_ss(U, A, B, I, R) \
(__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), \
(__v4sf)_mm_setzero_ps(), \
(__mmask8)(U), (int)(I), \
- (int)(R)); })
+ (int)(R))
-#define _mm512_scalef_round_pd(A, B, R) __extension__ ({ \
+#define _mm512_scalef_round_pd(A, B, R) \
(__m512d)__builtin_ia32_scalefpd512_mask((__v8df)(__m512d)(A), \
(__v8df)(__m512d)(B), \
(__v8df)_mm512_undefined_pd(), \
- (__mmask8)-1, (int)(R)); })
+ (__mmask8)-1, (int)(R))
-#define _mm512_mask_scalef_round_pd(W, U, A, B, R) __extension__ ({ \
+#define _mm512_mask_scalef_round_pd(W, U, A, B, R) \
(__m512d)__builtin_ia32_scalefpd512_mask((__v8df)(__m512d)(A), \
(__v8df)(__m512d)(B), \
(__v8df)(__m512d)(W), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-#define _mm512_maskz_scalef_round_pd(U, A, B, R) __extension__ ({ \
+#define _mm512_maskz_scalef_round_pd(U, A, B, R) \
(__m512d)__builtin_ia32_scalefpd512_mask((__v8df)(__m512d)(A), \
(__v8df)(__m512d)(B), \
(__v8df)_mm512_setzero_pd(), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_scalef_pd (__m512d __A, __m512d __B)
{
return (__m512d) __builtin_ia32_scalefpd512_mask ((__v8df) __A,
@@ -6736,7 +6410,7 @@ _mm512_scalef_pd (__m512d __A, __m512d __B)
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_mask_scalef_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d __B)
{
return (__m512d) __builtin_ia32_scalefpd512_mask ((__v8df) __A,
@@ -6746,7 +6420,7 @@ _mm512_mask_scalef_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d __B)
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_maskz_scalef_pd (__mmask8 __U, __m512d __A, __m512d __B)
{
return (__m512d) __builtin_ia32_scalefpd512_mask ((__v8df) __A,
@@ -6757,25 +6431,25 @@ _mm512_maskz_scalef_pd (__mmask8 __U, __m512d __A, __m512d __B)
_MM_FROUND_CUR_DIRECTION);
}
-#define _mm512_scalef_round_ps(A, B, R) __extension__ ({ \
+#define _mm512_scalef_round_ps(A, B, R) \
(__m512)__builtin_ia32_scalefps512_mask((__v16sf)(__m512)(A), \
(__v16sf)(__m512)(B), \
(__v16sf)_mm512_undefined_ps(), \
- (__mmask16)-1, (int)(R)); })
+ (__mmask16)-1, (int)(R))
-#define _mm512_mask_scalef_round_ps(W, U, A, B, R) __extension__ ({ \
+#define _mm512_mask_scalef_round_ps(W, U, A, B, R) \
(__m512)__builtin_ia32_scalefps512_mask((__v16sf)(__m512)(A), \
(__v16sf)(__m512)(B), \
(__v16sf)(__m512)(W), \
- (__mmask16)(U), (int)(R)); })
+ (__mmask16)(U), (int)(R))
-#define _mm512_maskz_scalef_round_ps(U, A, B, R) __extension__ ({ \
+#define _mm512_maskz_scalef_round_ps(U, A, B, R) \
(__m512)__builtin_ia32_scalefps512_mask((__v16sf)(__m512)(A), \
(__v16sf)(__m512)(B), \
(__v16sf)_mm512_setzero_ps(), \
- (__mmask16)(U), (int)(R)); })
+ (__mmask16)(U), (int)(R))
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_scalef_ps (__m512 __A, __m512 __B)
{
return (__m512) __builtin_ia32_scalefps512_mask ((__v16sf) __A,
@@ -6786,7 +6460,7 @@ _mm512_scalef_ps (__m512 __A, __m512 __B)
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_mask_scalef_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B)
{
return (__m512) __builtin_ia32_scalefps512_mask ((__v16sf) __A,
@@ -6796,7 +6470,7 @@ _mm512_mask_scalef_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B)
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_maskz_scalef_ps (__mmask16 __U, __m512 __A, __m512 __B)
{
return (__m512) __builtin_ia32_scalefps512_mask ((__v16sf) __A,
@@ -6807,13 +6481,13 @@ _mm512_maskz_scalef_ps (__mmask16 __U, __m512 __A, __m512 __B)
_MM_FROUND_CUR_DIRECTION);
}
-#define _mm_scalef_round_sd(A, B, R) __extension__ ({ \
+#define _mm_scalef_round_sd(A, B, R) \
(__m128d)__builtin_ia32_scalefsd_round_mask((__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), \
(__v2df)_mm_setzero_pd(), \
- (__mmask8)-1, (int)(R)); })
+ (__mmask8)-1, (int)(R))
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_scalef_sd (__m128d __A, __m128d __B)
{
return (__m128d) __builtin_ia32_scalefsd_round_mask ((__v2df) __A,
@@ -6822,7 +6496,7 @@ _mm_scalef_sd (__m128d __A, __m128d __B)
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_scalef_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
{
return (__m128d) __builtin_ia32_scalefsd_round_mask ( (__v2df) __A,
@@ -6832,13 +6506,13 @@ _mm_mask_scalef_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
_MM_FROUND_CUR_DIRECTION);
}
-#define _mm_mask_scalef_round_sd(W, U, A, B, R) __extension__ ({ \
+#define _mm_mask_scalef_round_sd(W, U, A, B, R) \
(__m128d)__builtin_ia32_scalefsd_round_mask((__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), \
(__v2df)(__m128d)(W), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_maskz_scalef_sd (__mmask8 __U, __m128d __A, __m128d __B)
{
return (__m128d) __builtin_ia32_scalefsd_round_mask ( (__v2df) __A,
@@ -6848,19 +6522,19 @@ _mm_maskz_scalef_sd (__mmask8 __U, __m128d __A, __m128d __B)
_MM_FROUND_CUR_DIRECTION);
}
-#define _mm_maskz_scalef_round_sd(U, A, B, R) __extension__ ({ \
+#define _mm_maskz_scalef_round_sd(U, A, B, R) \
(__m128d)__builtin_ia32_scalefsd_round_mask((__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), \
(__v2df)_mm_setzero_pd(), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-#define _mm_scalef_round_ss(A, B, R) __extension__ ({ \
+#define _mm_scalef_round_ss(A, B, R) \
(__m128)__builtin_ia32_scalefss_round_mask((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), \
(__v4sf)_mm_setzero_ps(), \
- (__mmask8)-1, (int)(R)); })
+ (__mmask8)-1, (int)(R))
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_scalef_ss (__m128 __A, __m128 __B)
{
return (__m128) __builtin_ia32_scalefss_round_mask ((__v4sf) __A,
@@ -6869,7 +6543,7 @@ _mm_scalef_ss (__m128 __A, __m128 __B)
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_scalef_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
{
return (__m128) __builtin_ia32_scalefss_round_mask ( (__v4sf) __A,
@@ -6879,13 +6553,13 @@ _mm_mask_scalef_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
_MM_FROUND_CUR_DIRECTION);
}
-#define _mm_mask_scalef_round_ss(W, U, A, B, R) __extension__ ({ \
+#define _mm_mask_scalef_round_ss(W, U, A, B, R) \
(__m128)__builtin_ia32_scalefss_round_mask((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), \
(__v4sf)(__m128)(W), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_maskz_scalef_ss (__mmask8 __U, __m128 __A, __m128 __B)
{
return (__m128) __builtin_ia32_scalefss_round_mask ( (__v4sf) __A,
@@ -6895,211 +6569,147 @@ _mm_maskz_scalef_ss (__mmask8 __U, __m128 __A, __m128 __B)
_MM_FROUND_CUR_DIRECTION);
}
-#define _mm_maskz_scalef_round_ss(U, A, B, R) __extension__ ({ \
+#define _mm_maskz_scalef_round_ss(U, A, B, R) \
(__m128)__builtin_ia32_scalefss_round_mask((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), \
(__v4sf)_mm_setzero_ps(), \
(__mmask8)(U), \
- _MM_FROUND_CUR_DIRECTION); })
+ (int)(R))
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_srai_epi32(__m512i __A, int __B)
{
return (__m512i)__builtin_ia32_psradi512((__v16si)__A, __B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_srai_epi32(__m512i __W, __mmask16 __U, __m512i __A, int __B)
{
- return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, \
- (__v16si)_mm512_srai_epi32(__A, __B), \
+ return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
+ (__v16si)_mm512_srai_epi32(__A, __B),
(__v16si)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_srai_epi32(__mmask16 __U, __m512i __A, int __B) {
- return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, \
- (__v16si)_mm512_srai_epi32(__A, __B), \
+ return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
+ (__v16si)_mm512_srai_epi32(__A, __B),
(__v16si)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_srai_epi64(__m512i __A, int __B)
{
return (__m512i)__builtin_ia32_psraqi512((__v8di)__A, __B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_srai_epi64(__m512i __W, __mmask8 __U, __m512i __A, int __B)
{
- return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, \
- (__v8di)_mm512_srai_epi64(__A, __B), \
+ return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
+ (__v8di)_mm512_srai_epi64(__A, __B),
(__v8di)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_srai_epi64(__mmask8 __U, __m512i __A, int __B)
{
- return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, \
- (__v8di)_mm512_srai_epi64(__A, __B), \
+ return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
+ (__v8di)_mm512_srai_epi64(__A, __B),
(__v8di)_mm512_setzero_si512());
}
-#define _mm512_shuffle_f32x4(A, B, imm) __extension__ ({ \
- (__m512)__builtin_shufflevector((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- 0 + ((((imm) >> 0) & 0x3) * 4), \
- 1 + ((((imm) >> 0) & 0x3) * 4), \
- 2 + ((((imm) >> 0) & 0x3) * 4), \
- 3 + ((((imm) >> 0) & 0x3) * 4), \
- 0 + ((((imm) >> 2) & 0x3) * 4), \
- 1 + ((((imm) >> 2) & 0x3) * 4), \
- 2 + ((((imm) >> 2) & 0x3) * 4), \
- 3 + ((((imm) >> 2) & 0x3) * 4), \
- 16 + ((((imm) >> 4) & 0x3) * 4), \
- 17 + ((((imm) >> 4) & 0x3) * 4), \
- 18 + ((((imm) >> 4) & 0x3) * 4), \
- 19 + ((((imm) >> 4) & 0x3) * 4), \
- 16 + ((((imm) >> 6) & 0x3) * 4), \
- 17 + ((((imm) >> 6) & 0x3) * 4), \
- 18 + ((((imm) >> 6) & 0x3) * 4), \
- 19 + ((((imm) >> 6) & 0x3) * 4)); })
-
-#define _mm512_mask_shuffle_f32x4(W, U, A, B, imm) __extension__ ({ \
+#define _mm512_shuffle_f32x4(A, B, imm) \
+ (__m512)__builtin_ia32_shuf_f32x4((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), (int)(imm))
+
+#define _mm512_mask_shuffle_f32x4(W, U, A, B, imm) \
(__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
(__v16sf)_mm512_shuffle_f32x4((A), (B), (imm)), \
- (__v16sf)(__m512)(W)); })
+ (__v16sf)(__m512)(W))
-#define _mm512_maskz_shuffle_f32x4(U, A, B, imm) __extension__ ({ \
+#define _mm512_maskz_shuffle_f32x4(U, A, B, imm) \
(__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
(__v16sf)_mm512_shuffle_f32x4((A), (B), (imm)), \
- (__v16sf)_mm512_setzero_ps()); })
-
-#define _mm512_shuffle_f64x2(A, B, imm) __extension__ ({ \
- (__m512d)__builtin_shufflevector((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- 0 + ((((imm) >> 0) & 0x3) * 2), \
- 1 + ((((imm) >> 0) & 0x3) * 2), \
- 0 + ((((imm) >> 2) & 0x3) * 2), \
- 1 + ((((imm) >> 2) & 0x3) * 2), \
- 8 + ((((imm) >> 4) & 0x3) * 2), \
- 9 + ((((imm) >> 4) & 0x3) * 2), \
- 8 + ((((imm) >> 6) & 0x3) * 2), \
- 9 + ((((imm) >> 6) & 0x3) * 2)); })
-
-#define _mm512_mask_shuffle_f64x2(W, U, A, B, imm) __extension__ ({ \
+ (__v16sf)_mm512_setzero_ps())
+
+#define _mm512_shuffle_f64x2(A, B, imm) \
+ (__m512d)__builtin_ia32_shuf_f64x2((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), (int)(imm))
+
+#define _mm512_mask_shuffle_f64x2(W, U, A, B, imm) \
(__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
(__v8df)_mm512_shuffle_f64x2((A), (B), (imm)), \
- (__v8df)(__m512d)(W)); })
+ (__v8df)(__m512d)(W))
-#define _mm512_maskz_shuffle_f64x2(U, A, B, imm) __extension__ ({ \
+#define _mm512_maskz_shuffle_f64x2(U, A, B, imm) \
(__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
(__v8df)_mm512_shuffle_f64x2((A), (B), (imm)), \
- (__v8df)_mm512_setzero_pd()); })
-
-#define _mm512_shuffle_i32x4(A, B, imm) __extension__ ({ \
- (__m512i)__builtin_shufflevector((__v8di)(__m512i)(A), \
- (__v8di)(__m512i)(B), \
- 0 + ((((imm) >> 0) & 0x3) * 2), \
- 1 + ((((imm) >> 0) & 0x3) * 2), \
- 0 + ((((imm) >> 2) & 0x3) * 2), \
- 1 + ((((imm) >> 2) & 0x3) * 2), \
- 8 + ((((imm) >> 4) & 0x3) * 2), \
- 9 + ((((imm) >> 4) & 0x3) * 2), \
- 8 + ((((imm) >> 6) & 0x3) * 2), \
- 9 + ((((imm) >> 6) & 0x3) * 2)); })
-
-#define _mm512_mask_shuffle_i32x4(W, U, A, B, imm) __extension__ ({ \
+ (__v8df)_mm512_setzero_pd())
+
+#define _mm512_shuffle_i32x4(A, B, imm) \
+ (__m512i)__builtin_ia32_shuf_i32x4((__v16si)(__m512i)(A), \
+ (__v16si)(__m512i)(B), (int)(imm))
+
+#define _mm512_mask_shuffle_i32x4(W, U, A, B, imm) \
(__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
(__v16si)_mm512_shuffle_i32x4((A), (B), (imm)), \
- (__v16si)(__m512i)(W)); })
+ (__v16si)(__m512i)(W))
-#define _mm512_maskz_shuffle_i32x4(U, A, B, imm) __extension__ ({ \
+#define _mm512_maskz_shuffle_i32x4(U, A, B, imm) \
(__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
(__v16si)_mm512_shuffle_i32x4((A), (B), (imm)), \
- (__v16si)_mm512_setzero_si512()); })
-
-#define _mm512_shuffle_i64x2(A, B, imm) __extension__ ({ \
- (__m512i)__builtin_shufflevector((__v8di)(__m512i)(A), \
- (__v8di)(__m512i)(B), \
- 0 + ((((imm) >> 0) & 0x3) * 2), \
- 1 + ((((imm) >> 0) & 0x3) * 2), \
- 0 + ((((imm) >> 2) & 0x3) * 2), \
- 1 + ((((imm) >> 2) & 0x3) * 2), \
- 8 + ((((imm) >> 4) & 0x3) * 2), \
- 9 + ((((imm) >> 4) & 0x3) * 2), \
- 8 + ((((imm) >> 6) & 0x3) * 2), \
- 9 + ((((imm) >> 6) & 0x3) * 2)); })
-
-#define _mm512_mask_shuffle_i64x2(W, U, A, B, imm) __extension__ ({ \
+ (__v16si)_mm512_setzero_si512())
+
+#define _mm512_shuffle_i64x2(A, B, imm) \
+ (__m512i)__builtin_ia32_shuf_i64x2((__v8di)(__m512i)(A), \
+ (__v8di)(__m512i)(B), (int)(imm))
+
+#define _mm512_mask_shuffle_i64x2(W, U, A, B, imm) \
(__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
(__v8di)_mm512_shuffle_i64x2((A), (B), (imm)), \
- (__v8di)(__m512i)(W)); })
+ (__v8di)(__m512i)(W))
-#define _mm512_maskz_shuffle_i64x2(U, A, B, imm) __extension__ ({ \
+#define _mm512_maskz_shuffle_i64x2(U, A, B, imm) \
(__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
(__v8di)_mm512_shuffle_i64x2((A), (B), (imm)), \
- (__v8di)_mm512_setzero_si512()); })
-
-#define _mm512_shuffle_pd(A, B, M) __extension__ ({ \
- (__m512d)__builtin_shufflevector((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- 0 + (((M) >> 0) & 0x1), \
- 8 + (((M) >> 1) & 0x1), \
- 2 + (((M) >> 2) & 0x1), \
- 10 + (((M) >> 3) & 0x1), \
- 4 + (((M) >> 4) & 0x1), \
- 12 + (((M) >> 5) & 0x1), \
- 6 + (((M) >> 6) & 0x1), \
- 14 + (((M) >> 7) & 0x1)); })
-
-#define _mm512_mask_shuffle_pd(W, U, A, B, M) __extension__ ({ \
+ (__v8di)_mm512_setzero_si512())
+
+#define _mm512_shuffle_pd(A, B, M) \
+ (__m512d)__builtin_ia32_shufpd512((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), (int)(M))
+
+#define _mm512_mask_shuffle_pd(W, U, A, B, M) \
(__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
(__v8df)_mm512_shuffle_pd((A), (B), (M)), \
- (__v8df)(__m512d)(W)); })
+ (__v8df)(__m512d)(W))
-#define _mm512_maskz_shuffle_pd(U, A, B, M) __extension__ ({ \
+#define _mm512_maskz_shuffle_pd(U, A, B, M) \
(__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
(__v8df)_mm512_shuffle_pd((A), (B), (M)), \
- (__v8df)_mm512_setzero_pd()); })
-
-#define _mm512_shuffle_ps(A, B, M) __extension__ ({ \
- (__m512d)__builtin_shufflevector((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- 0 + (((M) >> 0) & 0x3), \
- 0 + (((M) >> 2) & 0x3), \
- 16 + (((M) >> 4) & 0x3), \
- 16 + (((M) >> 6) & 0x3), \
- 4 + (((M) >> 0) & 0x3), \
- 4 + (((M) >> 2) & 0x3), \
- 20 + (((M) >> 4) & 0x3), \
- 20 + (((M) >> 6) & 0x3), \
- 8 + (((M) >> 0) & 0x3), \
- 8 + (((M) >> 2) & 0x3), \
- 24 + (((M) >> 4) & 0x3), \
- 24 + (((M) >> 6) & 0x3), \
- 12 + (((M) >> 0) & 0x3), \
- 12 + (((M) >> 2) & 0x3), \
- 28 + (((M) >> 4) & 0x3), \
- 28 + (((M) >> 6) & 0x3)); })
-
-#define _mm512_mask_shuffle_ps(W, U, A, B, M) __extension__ ({ \
+ (__v8df)_mm512_setzero_pd())
+
+#define _mm512_shuffle_ps(A, B, M) \
+ (__m512)__builtin_ia32_shufps512((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), (int)(M))
+
+#define _mm512_mask_shuffle_ps(W, U, A, B, M) \
(__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
(__v16sf)_mm512_shuffle_ps((A), (B), (M)), \
- (__v16sf)(__m512)(W)); })
+ (__v16sf)(__m512)(W))
-#define _mm512_maskz_shuffle_ps(U, A, B, M) __extension__ ({ \
+#define _mm512_maskz_shuffle_ps(U, A, B, M) \
(__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
(__v16sf)_mm512_shuffle_ps((A), (B), (M)), \
- (__v16sf)_mm512_setzero_ps()); })
+ (__v16sf)_mm512_setzero_ps())
-#define _mm_sqrt_round_sd(A, B, R) __extension__ ({ \
+#define _mm_sqrt_round_sd(A, B, R) \
(__m128d)__builtin_ia32_sqrtsd_round_mask((__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), \
(__v2df)_mm_setzero_pd(), \
- (__mmask8)-1, (int)(R)); })
+ (__mmask8)-1, (int)(R))
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_sqrt_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
{
return (__m128d) __builtin_ia32_sqrtsd_round_mask ( (__v2df) __A,
@@ -7109,13 +6719,13 @@ _mm_mask_sqrt_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
_MM_FROUND_CUR_DIRECTION);
}
-#define _mm_mask_sqrt_round_sd(W, U, A, B, R) __extension__ ({ \
+#define _mm_mask_sqrt_round_sd(W, U, A, B, R) \
(__m128d)__builtin_ia32_sqrtsd_round_mask((__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), \
(__v2df)(__m128d)(W), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_maskz_sqrt_sd (__mmask8 __U, __m128d __A, __m128d __B)
{
return (__m128d) __builtin_ia32_sqrtsd_round_mask ( (__v2df) __A,
@@ -7125,19 +6735,19 @@ _mm_maskz_sqrt_sd (__mmask8 __U, __m128d __A, __m128d __B)
_MM_FROUND_CUR_DIRECTION);
}
-#define _mm_maskz_sqrt_round_sd(U, A, B, R) __extension__ ({ \
+#define _mm_maskz_sqrt_round_sd(U, A, B, R) \
(__m128d)__builtin_ia32_sqrtsd_round_mask((__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), \
(__v2df)_mm_setzero_pd(), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-#define _mm_sqrt_round_ss(A, B, R) __extension__ ({ \
+#define _mm_sqrt_round_ss(A, B, R) \
(__m128)__builtin_ia32_sqrtss_round_mask((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), \
(__v4sf)_mm_setzero_ps(), \
- (__mmask8)-1, (int)(R)); })
+ (__mmask8)-1, (int)(R))
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_sqrt_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
{
return (__m128) __builtin_ia32_sqrtss_round_mask ( (__v4sf) __A,
@@ -7147,13 +6757,13 @@ _mm_mask_sqrt_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
_MM_FROUND_CUR_DIRECTION);
}
-#define _mm_mask_sqrt_round_ss(W, U, A, B, R) __extension__ ({ \
+#define _mm_mask_sqrt_round_ss(W, U, A, B, R) \
(__m128)__builtin_ia32_sqrtss_round_mask((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), \
(__v4sf)(__m128)(W), (__mmask8)(U), \
- (int)(R)); })
+ (int)(R))
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_maskz_sqrt_ss (__mmask8 __U, __m128 __A, __m128 __B)
{
return (__m128) __builtin_ia32_sqrtss_round_mask ( (__v4sf) __A,
@@ -7163,13 +6773,13 @@ _mm_maskz_sqrt_ss (__mmask8 __U, __m128 __A, __m128 __B)
_MM_FROUND_CUR_DIRECTION);
}
-#define _mm_maskz_sqrt_round_ss(U, A, B, R) __extension__ ({ \
+#define _mm_maskz_sqrt_round_ss(U, A, B, R) \
(__m128)__builtin_ia32_sqrtss_round_mask((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), \
(__v4sf)_mm_setzero_ps(), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_broadcast_f32x4(__m128 __A)
{
return (__m512)__builtin_shufflevector((__v4sf)__A, (__v4sf)__A,
@@ -7177,7 +6787,7 @@ _mm512_broadcast_f32x4(__m128 __A)
0, 1, 2, 3, 0, 1, 2, 3);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_mask_broadcast_f32x4(__m512 __O, __mmask16 __M, __m128 __A)
{
return (__m512)__builtin_ia32_selectps_512((__mmask16)__M,
@@ -7185,7 +6795,7 @@ _mm512_mask_broadcast_f32x4(__m512 __O, __mmask16 __M, __m128 __A)
(__v16sf)__O);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_maskz_broadcast_f32x4(__mmask16 __M, __m128 __A)
{
return (__m512)__builtin_ia32_selectps_512((__mmask16)__M,
@@ -7193,14 +6803,14 @@ _mm512_maskz_broadcast_f32x4(__mmask16 __M, __m128 __A)
(__v16sf)_mm512_setzero_ps());
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_broadcast_f64x4(__m256d __A)
{
return (__m512d)__builtin_shufflevector((__v4df)__A, (__v4df)__A,
0, 1, 2, 3, 0, 1, 2, 3);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_mask_broadcast_f64x4(__m512d __O, __mmask8 __M, __m256d __A)
{
return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__M,
@@ -7208,7 +6818,7 @@ _mm512_mask_broadcast_f64x4(__m512d __O, __mmask8 __M, __m256d __A)
(__v8df)__O);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_maskz_broadcast_f64x4(__mmask8 __M, __m256d __A)
{
return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__M,
@@ -7216,7 +6826,7 @@ _mm512_maskz_broadcast_f64x4(__mmask8 __M, __m256d __A)
(__v8df)_mm512_setzero_pd());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_broadcast_i32x4(__m128i __A)
{
return (__m512i)__builtin_shufflevector((__v4si)__A, (__v4si)__A,
@@ -7224,7 +6834,7 @@ _mm512_broadcast_i32x4(__m128i __A)
0, 1, 2, 3, 0, 1, 2, 3);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_broadcast_i32x4(__m512i __O, __mmask16 __M, __m128i __A)
{
return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
@@ -7232,7 +6842,7 @@ _mm512_mask_broadcast_i32x4(__m512i __O, __mmask16 __M, __m128i __A)
(__v16si)__O);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_broadcast_i32x4(__mmask16 __M, __m128i __A)
{
return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
@@ -7240,14 +6850,14 @@ _mm512_maskz_broadcast_i32x4(__mmask16 __M, __m128i __A)
(__v16si)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_broadcast_i64x4(__m256i __A)
{
return (__m512i)__builtin_shufflevector((__v4di)__A, (__v4di)__A,
0, 1, 2, 3, 0, 1, 2, 3);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_broadcast_i64x4(__m512i __O, __mmask8 __M, __m256i __A)
{
return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
@@ -7255,7 +6865,7 @@ _mm512_mask_broadcast_i64x4(__m512i __O, __mmask8 __M, __m256i __A)
(__v8di)__O);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_broadcast_i64x4(__mmask8 __M, __m256i __A)
{
return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
@@ -7263,7 +6873,7 @@ _mm512_maskz_broadcast_i64x4(__mmask8 __M, __m256i __A)
(__v8di)_mm512_setzero_si512());
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_mask_broadcastsd_pd (__m512d __O, __mmask8 __M, __m128d __A)
{
return (__m512d)__builtin_ia32_selectpd_512(__M,
@@ -7271,7 +6881,7 @@ _mm512_mask_broadcastsd_pd (__m512d __O, __mmask8 __M, __m128d __A)
(__v8df) __O);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_maskz_broadcastsd_pd (__mmask8 __M, __m128d __A)
{
return (__m512d)__builtin_ia32_selectpd_512(__M,
@@ -7279,7 +6889,7 @@ _mm512_maskz_broadcastsd_pd (__mmask8 __M, __m128d __A)
(__v8df) _mm512_setzero_pd());
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_mask_broadcastss_ps (__m512 __O, __mmask16 __M, __m128 __A)
{
return (__m512)__builtin_ia32_selectps_512(__M,
@@ -7287,7 +6897,7 @@ _mm512_mask_broadcastss_ps (__m512 __O, __mmask16 __M, __m128 __A)
(__v16sf) __O);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_maskz_broadcastss_ps (__mmask16 __M, __m128 __A)
{
return (__m512)__builtin_ia32_selectps_512(__M,
@@ -7295,7 +6905,7 @@ _mm512_maskz_broadcastss_ps (__mmask16 __M, __m128 __A)
(__v16sf) _mm512_setzero_ps());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS512
_mm512_cvtsepi32_epi8 (__m512i __A)
{
return (__m128i) __builtin_ia32_pmovsdb512_mask ((__v16si) __A,
@@ -7303,14 +6913,14 @@ _mm512_cvtsepi32_epi8 (__m512i __A)
(__mmask16) -1);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS512
_mm512_mask_cvtsepi32_epi8 (__m128i __O, __mmask16 __M, __m512i __A)
{
return (__m128i) __builtin_ia32_pmovsdb512_mask ((__v16si) __A,
(__v16qi) __O, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS512
_mm512_maskz_cvtsepi32_epi8 (__mmask16 __M, __m512i __A)
{
return (__m128i) __builtin_ia32_pmovsdb512_mask ((__v16si) __A,
@@ -7318,13 +6928,13 @@ _mm512_maskz_cvtsepi32_epi8 (__mmask16 __M, __m512i __A)
__M);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS512
_mm512_mask_cvtsepi32_storeu_epi8 (void * __P, __mmask16 __M, __m512i __A)
{
__builtin_ia32_pmovsdb512mem_mask ((__v16qi *) __P, (__v16si) __A, __M);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS512
_mm512_cvtsepi32_epi16 (__m512i __A)
{
return (__m256i) __builtin_ia32_pmovsdw512_mask ((__v16si) __A,
@@ -7332,14 +6942,14 @@ _mm512_cvtsepi32_epi16 (__m512i __A)
(__mmask16) -1);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS512
_mm512_mask_cvtsepi32_epi16 (__m256i __O, __mmask16 __M, __m512i __A)
{
return (__m256i) __builtin_ia32_pmovsdw512_mask ((__v16si) __A,
(__v16hi) __O, __M);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS512
_mm512_maskz_cvtsepi32_epi16 (__mmask16 __M, __m512i __A)
{
return (__m256i) __builtin_ia32_pmovsdw512_mask ((__v16si) __A,
@@ -7347,13 +6957,13 @@ _mm512_maskz_cvtsepi32_epi16 (__mmask16 __M, __m512i __A)
__M);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS512
_mm512_mask_cvtsepi32_storeu_epi16 (void *__P, __mmask16 __M, __m512i __A)
{
__builtin_ia32_pmovsdw512mem_mask ((__v16hi*) __P, (__v16si) __A, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS512
_mm512_cvtsepi64_epi8 (__m512i __A)
{
return (__m128i) __builtin_ia32_pmovsqb512_mask ((__v8di) __A,
@@ -7361,14 +6971,14 @@ _mm512_cvtsepi64_epi8 (__m512i __A)
(__mmask8) -1);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS512
_mm512_mask_cvtsepi64_epi8 (__m128i __O, __mmask8 __M, __m512i __A)
{
return (__m128i) __builtin_ia32_pmovsqb512_mask ((__v8di) __A,
(__v16qi) __O, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS512
_mm512_maskz_cvtsepi64_epi8 (__mmask8 __M, __m512i __A)
{
return (__m128i) __builtin_ia32_pmovsqb512_mask ((__v8di) __A,
@@ -7376,13 +6986,13 @@ _mm512_maskz_cvtsepi64_epi8 (__mmask8 __M, __m512i __A)
__M);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS512
_mm512_mask_cvtsepi64_storeu_epi8 (void * __P, __mmask8 __M, __m512i __A)
{
__builtin_ia32_pmovsqb512mem_mask ((__v16qi *) __P, (__v8di) __A, __M);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS512
_mm512_cvtsepi64_epi32 (__m512i __A)
{
return (__m256i) __builtin_ia32_pmovsqd512_mask ((__v8di) __A,
@@ -7390,14 +7000,14 @@ _mm512_cvtsepi64_epi32 (__m512i __A)
(__mmask8) -1);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS512
_mm512_mask_cvtsepi64_epi32 (__m256i __O, __mmask8 __M, __m512i __A)
{
return (__m256i) __builtin_ia32_pmovsqd512_mask ((__v8di) __A,
(__v8si) __O, __M);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS512
_mm512_maskz_cvtsepi64_epi32 (__mmask8 __M, __m512i __A)
{
return (__m256i) __builtin_ia32_pmovsqd512_mask ((__v8di) __A,
@@ -7405,13 +7015,13 @@ _mm512_maskz_cvtsepi64_epi32 (__mmask8 __M, __m512i __A)
__M);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS512
_mm512_mask_cvtsepi64_storeu_epi32 (void *__P, __mmask8 __M, __m512i __A)
{
__builtin_ia32_pmovsqd512mem_mask ((__v8si *) __P, (__v8di) __A, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS512
_mm512_cvtsepi64_epi16 (__m512i __A)
{
return (__m128i) __builtin_ia32_pmovsqw512_mask ((__v8di) __A,
@@ -7419,14 +7029,14 @@ _mm512_cvtsepi64_epi16 (__m512i __A)
(__mmask8) -1);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS512
_mm512_mask_cvtsepi64_epi16 (__m128i __O, __mmask8 __M, __m512i __A)
{
return (__m128i) __builtin_ia32_pmovsqw512_mask ((__v8di) __A,
(__v8hi) __O, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS512
_mm512_maskz_cvtsepi64_epi16 (__mmask8 __M, __m512i __A)
{
return (__m128i) __builtin_ia32_pmovsqw512_mask ((__v8di) __A,
@@ -7434,13 +7044,13 @@ _mm512_maskz_cvtsepi64_epi16 (__mmask8 __M, __m512i __A)
__M);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS512
_mm512_mask_cvtsepi64_storeu_epi16 (void * __P, __mmask8 __M, __m512i __A)
{
__builtin_ia32_pmovsqw512mem_mask ((__v8hi *) __P, (__v8di) __A, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS512
_mm512_cvtusepi32_epi8 (__m512i __A)
{
return (__m128i) __builtin_ia32_pmovusdb512_mask ((__v16si) __A,
@@ -7448,7 +7058,7 @@ _mm512_cvtusepi32_epi8 (__m512i __A)
(__mmask16) -1);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS512
_mm512_mask_cvtusepi32_epi8 (__m128i __O, __mmask16 __M, __m512i __A)
{
return (__m128i) __builtin_ia32_pmovusdb512_mask ((__v16si) __A,
@@ -7456,7 +7066,7 @@ _mm512_mask_cvtusepi32_epi8 (__m128i __O, __mmask16 __M, __m512i __A)
__M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS512
_mm512_maskz_cvtusepi32_epi8 (__mmask16 __M, __m512i __A)
{
return (__m128i) __builtin_ia32_pmovusdb512_mask ((__v16si) __A,
@@ -7464,13 +7074,13 @@ _mm512_maskz_cvtusepi32_epi8 (__mmask16 __M, __m512i __A)
__M);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS512
_mm512_mask_cvtusepi32_storeu_epi8 (void * __P, __mmask16 __M, __m512i __A)
{
__builtin_ia32_pmovusdb512mem_mask ((__v16qi *) __P, (__v16si) __A, __M);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS512
_mm512_cvtusepi32_epi16 (__m512i __A)
{
return (__m256i) __builtin_ia32_pmovusdw512_mask ((__v16si) __A,
@@ -7478,7 +7088,7 @@ _mm512_cvtusepi32_epi16 (__m512i __A)
(__mmask16) -1);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS512
_mm512_mask_cvtusepi32_epi16 (__m256i __O, __mmask16 __M, __m512i __A)
{
return (__m256i) __builtin_ia32_pmovusdw512_mask ((__v16si) __A,
@@ -7486,7 +7096,7 @@ _mm512_mask_cvtusepi32_epi16 (__m256i __O, __mmask16 __M, __m512i __A)
__M);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS512
_mm512_maskz_cvtusepi32_epi16 (__mmask16 __M, __m512i __A)
{
return (__m256i) __builtin_ia32_pmovusdw512_mask ((__v16si) __A,
@@ -7494,13 +7104,13 @@ _mm512_maskz_cvtusepi32_epi16 (__mmask16 __M, __m512i __A)
__M);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS512
_mm512_mask_cvtusepi32_storeu_epi16 (void *__P, __mmask16 __M, __m512i __A)
{
__builtin_ia32_pmovusdw512mem_mask ((__v16hi*) __P, (__v16si) __A, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS512
_mm512_cvtusepi64_epi8 (__m512i __A)
{
return (__m128i) __builtin_ia32_pmovusqb512_mask ((__v8di) __A,
@@ -7508,7 +7118,7 @@ _mm512_cvtusepi64_epi8 (__m512i __A)
(__mmask8) -1);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS512
_mm512_mask_cvtusepi64_epi8 (__m128i __O, __mmask8 __M, __m512i __A)
{
return (__m128i) __builtin_ia32_pmovusqb512_mask ((__v8di) __A,
@@ -7516,7 +7126,7 @@ _mm512_mask_cvtusepi64_epi8 (__m128i __O, __mmask8 __M, __m512i __A)
__M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS512
_mm512_maskz_cvtusepi64_epi8 (__mmask8 __M, __m512i __A)
{
return (__m128i) __builtin_ia32_pmovusqb512_mask ((__v8di) __A,
@@ -7524,13 +7134,13 @@ _mm512_maskz_cvtusepi64_epi8 (__mmask8 __M, __m512i __A)
__M);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS512
_mm512_mask_cvtusepi64_storeu_epi8 (void * __P, __mmask8 __M, __m512i __A)
{
__builtin_ia32_pmovusqb512mem_mask ((__v16qi *) __P, (__v8di) __A, __M);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS512
_mm512_cvtusepi64_epi32 (__m512i __A)
{
return (__m256i) __builtin_ia32_pmovusqd512_mask ((__v8di) __A,
@@ -7538,14 +7148,14 @@ _mm512_cvtusepi64_epi32 (__m512i __A)
(__mmask8) -1);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS512
_mm512_mask_cvtusepi64_epi32 (__m256i __O, __mmask8 __M, __m512i __A)
{
return (__m256i) __builtin_ia32_pmovusqd512_mask ((__v8di) __A,
(__v8si) __O, __M);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS512
_mm512_maskz_cvtusepi64_epi32 (__mmask8 __M, __m512i __A)
{
return (__m256i) __builtin_ia32_pmovusqd512_mask ((__v8di) __A,
@@ -7553,13 +7163,13 @@ _mm512_maskz_cvtusepi64_epi32 (__mmask8 __M, __m512i __A)
__M);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS512
_mm512_mask_cvtusepi64_storeu_epi32 (void* __P, __mmask8 __M, __m512i __A)
{
__builtin_ia32_pmovusqd512mem_mask ((__v8si*) __P, (__v8di) __A, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS512
_mm512_cvtusepi64_epi16 (__m512i __A)
{
return (__m128i) __builtin_ia32_pmovusqw512_mask ((__v8di) __A,
@@ -7567,14 +7177,14 @@ _mm512_cvtusepi64_epi16 (__m512i __A)
(__mmask8) -1);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS512
_mm512_mask_cvtusepi64_epi16 (__m128i __O, __mmask8 __M, __m512i __A)
{
return (__m128i) __builtin_ia32_pmovusqw512_mask ((__v8di) __A,
(__v8hi) __O, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS512
_mm512_maskz_cvtusepi64_epi16 (__mmask8 __M, __m512i __A)
{
return (__m128i) __builtin_ia32_pmovusqw512_mask ((__v8di) __A,
@@ -7582,13 +7192,13 @@ _mm512_maskz_cvtusepi64_epi16 (__mmask8 __M, __m512i __A)
__M);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS512
_mm512_mask_cvtusepi64_storeu_epi16 (void *__P, __mmask8 __M, __m512i __A)
{
__builtin_ia32_pmovusqw512mem_mask ((__v8hi*) __P, (__v8di) __A, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS512
_mm512_cvtepi32_epi8 (__m512i __A)
{
return (__m128i) __builtin_ia32_pmovdb512_mask ((__v16si) __A,
@@ -7596,14 +7206,14 @@ _mm512_cvtepi32_epi8 (__m512i __A)
(__mmask16) -1);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS512
_mm512_mask_cvtepi32_epi8 (__m128i __O, __mmask16 __M, __m512i __A)
{
return (__m128i) __builtin_ia32_pmovdb512_mask ((__v16si) __A,
(__v16qi) __O, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS512
_mm512_maskz_cvtepi32_epi8 (__mmask16 __M, __m512i __A)
{
return (__m128i) __builtin_ia32_pmovdb512_mask ((__v16si) __A,
@@ -7611,13 +7221,13 @@ _mm512_maskz_cvtepi32_epi8 (__mmask16 __M, __m512i __A)
__M);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS512
_mm512_mask_cvtepi32_storeu_epi8 (void * __P, __mmask16 __M, __m512i __A)
{
__builtin_ia32_pmovdb512mem_mask ((__v16qi *) __P, (__v16si) __A, __M);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS512
_mm512_cvtepi32_epi16 (__m512i __A)
{
return (__m256i) __builtin_ia32_pmovdw512_mask ((__v16si) __A,
@@ -7625,14 +7235,14 @@ _mm512_cvtepi32_epi16 (__m512i __A)
(__mmask16) -1);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS512
_mm512_mask_cvtepi32_epi16 (__m256i __O, __mmask16 __M, __m512i __A)
{
return (__m256i) __builtin_ia32_pmovdw512_mask ((__v16si) __A,
(__v16hi) __O, __M);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS512
_mm512_maskz_cvtepi32_epi16 (__mmask16 __M, __m512i __A)
{
return (__m256i) __builtin_ia32_pmovdw512_mask ((__v16si) __A,
@@ -7640,13 +7250,13 @@ _mm512_maskz_cvtepi32_epi16 (__mmask16 __M, __m512i __A)
__M);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS512
_mm512_mask_cvtepi32_storeu_epi16 (void * __P, __mmask16 __M, __m512i __A)
{
__builtin_ia32_pmovdw512mem_mask ((__v16hi *) __P, (__v16si) __A, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS512
_mm512_cvtepi64_epi8 (__m512i __A)
{
return (__m128i) __builtin_ia32_pmovqb512_mask ((__v8di) __A,
@@ -7654,14 +7264,14 @@ _mm512_cvtepi64_epi8 (__m512i __A)
(__mmask8) -1);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS512
_mm512_mask_cvtepi64_epi8 (__m128i __O, __mmask8 __M, __m512i __A)
{
return (__m128i) __builtin_ia32_pmovqb512_mask ((__v8di) __A,
(__v16qi) __O, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS512
_mm512_maskz_cvtepi64_epi8 (__mmask8 __M, __m512i __A)
{
return (__m128i) __builtin_ia32_pmovqb512_mask ((__v8di) __A,
@@ -7669,13 +7279,13 @@ _mm512_maskz_cvtepi64_epi8 (__mmask8 __M, __m512i __A)
__M);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS512
_mm512_mask_cvtepi64_storeu_epi8 (void * __P, __mmask8 __M, __m512i __A)
{
__builtin_ia32_pmovqb512mem_mask ((__v16qi *) __P, (__v8di) __A, __M);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS512
_mm512_cvtepi64_epi32 (__m512i __A)
{
return (__m256i) __builtin_ia32_pmovqd512_mask ((__v8di) __A,
@@ -7683,14 +7293,14 @@ _mm512_cvtepi64_epi32 (__m512i __A)
(__mmask8) -1);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS512
_mm512_mask_cvtepi64_epi32 (__m256i __O, __mmask8 __M, __m512i __A)
{
return (__m256i) __builtin_ia32_pmovqd512_mask ((__v8di) __A,
(__v8si) __O, __M);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS512
_mm512_maskz_cvtepi64_epi32 (__mmask8 __M, __m512i __A)
{
return (__m256i) __builtin_ia32_pmovqd512_mask ((__v8di) __A,
@@ -7698,13 +7308,13 @@ _mm512_maskz_cvtepi64_epi32 (__mmask8 __M, __m512i __A)
__M);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS512
_mm512_mask_cvtepi64_storeu_epi32 (void* __P, __mmask8 __M, __m512i __A)
{
__builtin_ia32_pmovqd512mem_mask ((__v8si *) __P, (__v8di) __A, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS512
_mm512_cvtepi64_epi16 (__m512i __A)
{
return (__m128i) __builtin_ia32_pmovqw512_mask ((__v8di) __A,
@@ -7712,14 +7322,14 @@ _mm512_cvtepi64_epi16 (__m512i __A)
(__mmask8) -1);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS512
_mm512_mask_cvtepi64_epi16 (__m128i __O, __mmask8 __M, __m512i __A)
{
return (__m128i) __builtin_ia32_pmovqw512_mask ((__v8di) __A,
(__v8hi) __O, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS512
_mm512_maskz_cvtepi64_epi16 (__mmask8 __M, __m512i __A)
{
return (__m128i) __builtin_ia32_pmovqw512_mask ((__v8di) __A,
@@ -7727,246 +7337,192 @@ _mm512_maskz_cvtepi64_epi16 (__mmask8 __M, __m512i __A)
__M);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS512
_mm512_mask_cvtepi64_storeu_epi16 (void *__P, __mmask8 __M, __m512i __A)
{
__builtin_ia32_pmovqw512mem_mask ((__v8hi *) __P, (__v8di) __A, __M);
}
-#define _mm512_extracti32x4_epi32(A, imm) __extension__ ({ \
- (__m128i)__builtin_shufflevector((__v16si)(__m512i)(A), \
- (__v16si)_mm512_undefined_epi32(), \
- 0 + ((imm) & 0x3) * 4, \
- 1 + ((imm) & 0x3) * 4, \
- 2 + ((imm) & 0x3) * 4, \
- 3 + ((imm) & 0x3) * 4); })
-
-#define _mm512_mask_extracti32x4_epi32(W, U, A, imm) __extension__ ({ \
- (__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \
- (__v4si)_mm512_extracti32x4_epi32((A), (imm)), \
- (__v4si)(W)); })
-
-#define _mm512_maskz_extracti32x4_epi32(U, A, imm) __extension__ ({ \
- (__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \
- (__v4si)_mm512_extracti32x4_epi32((A), (imm)), \
- (__v4si)_mm_setzero_si128()); })
-
-#define _mm512_extracti64x4_epi64(A, imm) __extension__ ({ \
- (__m256i)__builtin_shufflevector((__v8di)(__m512i)(A), \
- (__v8di)_mm512_undefined_epi32(), \
- ((imm) & 1) ? 4 : 0, \
- ((imm) & 1) ? 5 : 1, \
- ((imm) & 1) ? 6 : 2, \
- ((imm) & 1) ? 7 : 3); })
-
-#define _mm512_mask_extracti64x4_epi64(W, U, A, imm) __extension__ ({ \
- (__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
- (__v4di)_mm512_extracti64x4_epi64((A), (imm)), \
- (__v4di)(W)); })
-
-#define _mm512_maskz_extracti64x4_epi64(U, A, imm) __extension__ ({ \
- (__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
- (__v4di)_mm512_extracti64x4_epi64((A), (imm)), \
- (__v4di)_mm256_setzero_si256()); })
-
-#define _mm512_insertf64x4(A, B, imm) __extension__ ({ \
- (__m512d)__builtin_shufflevector((__v8df)(__m512d)(A), \
- (__v8df)_mm512_castpd256_pd512((__m256d)(B)), \
- ((imm) & 0x1) ? 0 : 8, \
- ((imm) & 0x1) ? 1 : 9, \
- ((imm) & 0x1) ? 2 : 10, \
- ((imm) & 0x1) ? 3 : 11, \
- ((imm) & 0x1) ? 8 : 4, \
- ((imm) & 0x1) ? 9 : 5, \
- ((imm) & 0x1) ? 10 : 6, \
- ((imm) & 0x1) ? 11 : 7); })
-
-#define _mm512_mask_insertf64x4(W, U, A, B, imm) __extension__ ({ \
+#define _mm512_extracti32x4_epi32(A, imm) \
+ (__m128i)__builtin_ia32_extracti32x4_mask((__v16si)(__m512i)(A), (int)(imm), \
+ (__v4si)_mm_undefined_si128(), \
+ (__mmask8)-1)
+
+#define _mm512_mask_extracti32x4_epi32(W, U, A, imm) \
+ (__m128i)__builtin_ia32_extracti32x4_mask((__v16si)(__m512i)(A), (int)(imm), \
+ (__v4si)(__m128i)(W), \
+ (__mmask8)(U))
+
+#define _mm512_maskz_extracti32x4_epi32(U, A, imm) \
+ (__m128i)__builtin_ia32_extracti32x4_mask((__v16si)(__m512i)(A), (int)(imm), \
+ (__v4si)_mm_setzero_si128(), \
+ (__mmask8)(U))
+
+#define _mm512_extracti64x4_epi64(A, imm) \
+ (__m256i)__builtin_ia32_extracti64x4_mask((__v8di)(__m512i)(A), (int)(imm), \
+ (__v4di)_mm256_undefined_si256(), \
+ (__mmask8)-1)
+
+#define _mm512_mask_extracti64x4_epi64(W, U, A, imm) \
+ (__m256i)__builtin_ia32_extracti64x4_mask((__v8di)(__m512i)(A), (int)(imm), \
+ (__v4di)(__m256i)(W), \
+ (__mmask8)(U))
+
+#define _mm512_maskz_extracti64x4_epi64(U, A, imm) \
+ (__m256i)__builtin_ia32_extracti64x4_mask((__v8di)(__m512i)(A), (int)(imm), \
+ (__v4di)_mm256_setzero_si256(), \
+ (__mmask8)(U))
+
+#define _mm512_insertf64x4(A, B, imm) \
+ (__m512d)__builtin_ia32_insertf64x4((__v8df)(__m512d)(A), \
+ (__v4df)(__m256d)(B), (int)(imm))
+
+#define _mm512_mask_insertf64x4(W, U, A, B, imm) \
(__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
(__v8df)_mm512_insertf64x4((A), (B), (imm)), \
- (__v8df)(W)); })
+ (__v8df)(__m512d)(W))
-#define _mm512_maskz_insertf64x4(U, A, B, imm) __extension__ ({ \
+#define _mm512_maskz_insertf64x4(U, A, B, imm) \
(__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
(__v8df)_mm512_insertf64x4((A), (B), (imm)), \
- (__v8df)_mm512_setzero_pd()); })
-
-#define _mm512_inserti64x4(A, B, imm) __extension__ ({ \
- (__m512i)__builtin_shufflevector((__v8di)(__m512i)(A), \
- (__v8di)_mm512_castsi256_si512((__m256i)(B)), \
- ((imm) & 0x1) ? 0 : 8, \
- ((imm) & 0x1) ? 1 : 9, \
- ((imm) & 0x1) ? 2 : 10, \
- ((imm) & 0x1) ? 3 : 11, \
- ((imm) & 0x1) ? 8 : 4, \
- ((imm) & 0x1) ? 9 : 5, \
- ((imm) & 0x1) ? 10 : 6, \
- ((imm) & 0x1) ? 11 : 7); })
-
-#define _mm512_mask_inserti64x4(W, U, A, B, imm) __extension__ ({ \
+ (__v8df)_mm512_setzero_pd())
+
+#define _mm512_inserti64x4(A, B, imm) \
+ (__m512i)__builtin_ia32_inserti64x4((__v8di)(__m512i)(A), \
+ (__v4di)(__m256i)(B), (int)(imm))
+
+#define _mm512_mask_inserti64x4(W, U, A, B, imm) \
(__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
(__v8di)_mm512_inserti64x4((A), (B), (imm)), \
- (__v8di)(W)); })
+ (__v8di)(__m512i)(W))
-#define _mm512_maskz_inserti64x4(U, A, B, imm) __extension__ ({ \
+#define _mm512_maskz_inserti64x4(U, A, B, imm) \
(__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
(__v8di)_mm512_inserti64x4((A), (B), (imm)), \
- (__v8di)_mm512_setzero_si512()); })
-
-#define _mm512_insertf32x4(A, B, imm) __extension__ ({ \
- (__m512)__builtin_shufflevector((__v16sf)(__m512)(A), \
- (__v16sf)_mm512_castps128_ps512((__m128)(B)),\
- (((imm) & 0x3) == 0) ? 16 : 0, \
- (((imm) & 0x3) == 0) ? 17 : 1, \
- (((imm) & 0x3) == 0) ? 18 : 2, \
- (((imm) & 0x3) == 0) ? 19 : 3, \
- (((imm) & 0x3) == 1) ? 16 : 4, \
- (((imm) & 0x3) == 1) ? 17 : 5, \
- (((imm) & 0x3) == 1) ? 18 : 6, \
- (((imm) & 0x3) == 1) ? 19 : 7, \
- (((imm) & 0x3) == 2) ? 16 : 8, \
- (((imm) & 0x3) == 2) ? 17 : 9, \
- (((imm) & 0x3) == 2) ? 18 : 10, \
- (((imm) & 0x3) == 2) ? 19 : 11, \
- (((imm) & 0x3) == 3) ? 16 : 12, \
- (((imm) & 0x3) == 3) ? 17 : 13, \
- (((imm) & 0x3) == 3) ? 18 : 14, \
- (((imm) & 0x3) == 3) ? 19 : 15); })
-
-#define _mm512_mask_insertf32x4(W, U, A, B, imm) __extension__ ({ \
+ (__v8di)_mm512_setzero_si512())
+
+#define _mm512_insertf32x4(A, B, imm) \
+ (__m512)__builtin_ia32_insertf32x4((__v16sf)(__m512)(A), \
+ (__v4sf)(__m128)(B), (int)(imm))
+
+#define _mm512_mask_insertf32x4(W, U, A, B, imm) \
(__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
(__v16sf)_mm512_insertf32x4((A), (B), (imm)), \
- (__v16sf)(W)); })
+ (__v16sf)(__m512)(W))
-#define _mm512_maskz_insertf32x4(U, A, B, imm) __extension__ ({ \
+#define _mm512_maskz_insertf32x4(U, A, B, imm) \
(__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
(__v16sf)_mm512_insertf32x4((A), (B), (imm)), \
- (__v16sf)_mm512_setzero_ps()); })
-
-#define _mm512_inserti32x4(A, B, imm) __extension__ ({ \
- (__m512i)__builtin_shufflevector((__v16si)(__m512i)(A), \
- (__v16si)_mm512_castsi128_si512((__m128i)(B)),\
- (((imm) & 0x3) == 0) ? 16 : 0, \
- (((imm) & 0x3) == 0) ? 17 : 1, \
- (((imm) & 0x3) == 0) ? 18 : 2, \
- (((imm) & 0x3) == 0) ? 19 : 3, \
- (((imm) & 0x3) == 1) ? 16 : 4, \
- (((imm) & 0x3) == 1) ? 17 : 5, \
- (((imm) & 0x3) == 1) ? 18 : 6, \
- (((imm) & 0x3) == 1) ? 19 : 7, \
- (((imm) & 0x3) == 2) ? 16 : 8, \
- (((imm) & 0x3) == 2) ? 17 : 9, \
- (((imm) & 0x3) == 2) ? 18 : 10, \
- (((imm) & 0x3) == 2) ? 19 : 11, \
- (((imm) & 0x3) == 3) ? 16 : 12, \
- (((imm) & 0x3) == 3) ? 17 : 13, \
- (((imm) & 0x3) == 3) ? 18 : 14, \
- (((imm) & 0x3) == 3) ? 19 : 15); })
-
-#define _mm512_mask_inserti32x4(W, U, A, B, imm) __extension__ ({ \
+ (__v16sf)_mm512_setzero_ps())
+
+#define _mm512_inserti32x4(A, B, imm) \
+ (__m512i)__builtin_ia32_inserti32x4((__v16si)(__m512i)(A), \
+ (__v4si)(__m128i)(B), (int)(imm))
+
+#define _mm512_mask_inserti32x4(W, U, A, B, imm) \
(__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
(__v16si)_mm512_inserti32x4((A), (B), (imm)), \
- (__v16si)(W)); })
+ (__v16si)(__m512i)(W))
-#define _mm512_maskz_inserti32x4(U, A, B, imm) __extension__ ({ \
+#define _mm512_maskz_inserti32x4(U, A, B, imm) \
(__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
(__v16si)_mm512_inserti32x4((A), (B), (imm)), \
- (__v16si)_mm512_setzero_si512()); })
+ (__v16si)_mm512_setzero_si512())
-#define _mm512_getmant_round_pd(A, B, C, R) __extension__ ({ \
+#define _mm512_getmant_round_pd(A, B, C, R) \
(__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
(int)(((C)<<2) | (B)), \
(__v8df)_mm512_undefined_pd(), \
- (__mmask8)-1, (int)(R)); })
+ (__mmask8)-1, (int)(R))
-#define _mm512_mask_getmant_round_pd(W, U, A, B, C, R) __extension__ ({ \
+#define _mm512_mask_getmant_round_pd(W, U, A, B, C, R) \
(__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
(int)(((C)<<2) | (B)), \
(__v8df)(__m512d)(W), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-#define _mm512_maskz_getmant_round_pd(U, A, B, C, R) __extension__ ({ \
+#define _mm512_maskz_getmant_round_pd(U, A, B, C, R) \
(__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
(int)(((C)<<2) | (B)), \
(__v8df)_mm512_setzero_pd(), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-#define _mm512_getmant_pd(A, B, C) __extension__ ({ \
+#define _mm512_getmant_pd(A, B, C) \
(__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
(int)(((C)<<2) | (B)), \
(__v8df)_mm512_setzero_pd(), \
(__mmask8)-1, \
- _MM_FROUND_CUR_DIRECTION); })
+ _MM_FROUND_CUR_DIRECTION)
-#define _mm512_mask_getmant_pd(W, U, A, B, C) __extension__ ({ \
+#define _mm512_mask_getmant_pd(W, U, A, B, C) \
(__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
(int)(((C)<<2) | (B)), \
(__v8df)(__m512d)(W), \
(__mmask8)(U), \
- _MM_FROUND_CUR_DIRECTION); })
+ _MM_FROUND_CUR_DIRECTION)
-#define _mm512_maskz_getmant_pd(U, A, B, C) __extension__ ({ \
+#define _mm512_maskz_getmant_pd(U, A, B, C) \
(__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
(int)(((C)<<2) | (B)), \
(__v8df)_mm512_setzero_pd(), \
(__mmask8)(U), \
- _MM_FROUND_CUR_DIRECTION); })
+ _MM_FROUND_CUR_DIRECTION)
-#define _mm512_getmant_round_ps(A, B, C, R) __extension__ ({ \
+#define _mm512_getmant_round_ps(A, B, C, R) \
(__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
(int)(((C)<<2) | (B)), \
(__v16sf)_mm512_undefined_ps(), \
- (__mmask16)-1, (int)(R)); })
+ (__mmask16)-1, (int)(R))
-#define _mm512_mask_getmant_round_ps(W, U, A, B, C, R) __extension__ ({ \
+#define _mm512_mask_getmant_round_ps(W, U, A, B, C, R) \
(__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
(int)(((C)<<2) | (B)), \
(__v16sf)(__m512)(W), \
- (__mmask16)(U), (int)(R)); })
+ (__mmask16)(U), (int)(R))
-#define _mm512_maskz_getmant_round_ps(U, A, B, C, R) __extension__ ({ \
+#define _mm512_maskz_getmant_round_ps(U, A, B, C, R) \
(__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
(int)(((C)<<2) | (B)), \
(__v16sf)_mm512_setzero_ps(), \
- (__mmask16)(U), (int)(R)); })
+ (__mmask16)(U), (int)(R))
-#define _mm512_getmant_ps(A, B, C) __extension__ ({ \
+#define _mm512_getmant_ps(A, B, C) \
(__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
(int)(((C)<<2)|(B)), \
(__v16sf)_mm512_undefined_ps(), \
(__mmask16)-1, \
- _MM_FROUND_CUR_DIRECTION); })
+ _MM_FROUND_CUR_DIRECTION)
-#define _mm512_mask_getmant_ps(W, U, A, B, C) __extension__ ({ \
+#define _mm512_mask_getmant_ps(W, U, A, B, C) \
(__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
(int)(((C)<<2)|(B)), \
(__v16sf)(__m512)(W), \
(__mmask16)(U), \
- _MM_FROUND_CUR_DIRECTION); })
+ _MM_FROUND_CUR_DIRECTION)
-#define _mm512_maskz_getmant_ps(U, A, B, C) __extension__ ({ \
+#define _mm512_maskz_getmant_ps(U, A, B, C) \
(__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
(int)(((C)<<2)|(B)), \
(__v16sf)_mm512_setzero_ps(), \
(__mmask16)(U), \
- _MM_FROUND_CUR_DIRECTION); })
+ _MM_FROUND_CUR_DIRECTION)
-#define _mm512_getexp_round_pd(A, R) __extension__ ({ \
+#define _mm512_getexp_round_pd(A, R) \
(__m512d)__builtin_ia32_getexppd512_mask((__v8df)(__m512d)(A), \
(__v8df)_mm512_undefined_pd(), \
- (__mmask8)-1, (int)(R)); })
+ (__mmask8)-1, (int)(R))
-#define _mm512_mask_getexp_round_pd(W, U, A, R) __extension__ ({ \
+#define _mm512_mask_getexp_round_pd(W, U, A, R) \
(__m512d)__builtin_ia32_getexppd512_mask((__v8df)(__m512d)(A), \
(__v8df)(__m512d)(W), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-#define _mm512_maskz_getexp_round_pd(U, A, R) __extension__ ({ \
+#define _mm512_maskz_getexp_round_pd(U, A, R) \
(__m512d)__builtin_ia32_getexppd512_mask((__v8df)(__m512d)(A), \
(__v8df)_mm512_setzero_pd(), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_getexp_pd (__m512d __A)
{
return (__m512d) __builtin_ia32_getexppd512_mask ((__v8df) __A,
@@ -7975,7 +7531,7 @@ _mm512_getexp_pd (__m512d __A)
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_mask_getexp_pd (__m512d __W, __mmask8 __U, __m512d __A)
{
return (__m512d) __builtin_ia32_getexppd512_mask ((__v8df) __A,
@@ -7984,7 +7540,7 @@ _mm512_mask_getexp_pd (__m512d __W, __mmask8 __U, __m512d __A)
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_maskz_getexp_pd (__mmask8 __U, __m512d __A)
{
return (__m512d) __builtin_ia32_getexppd512_mask ((__v8df) __A,
@@ -7993,22 +7549,22 @@ _mm512_maskz_getexp_pd (__mmask8 __U, __m512d __A)
_MM_FROUND_CUR_DIRECTION);
}
-#define _mm512_getexp_round_ps(A, R) __extension__ ({ \
+#define _mm512_getexp_round_ps(A, R) \
(__m512)__builtin_ia32_getexpps512_mask((__v16sf)(__m512)(A), \
(__v16sf)_mm512_undefined_ps(), \
- (__mmask16)-1, (int)(R)); })
+ (__mmask16)-1, (int)(R))
-#define _mm512_mask_getexp_round_ps(W, U, A, R) __extension__ ({ \
+#define _mm512_mask_getexp_round_ps(W, U, A, R) \
(__m512)__builtin_ia32_getexpps512_mask((__v16sf)(__m512)(A), \
(__v16sf)(__m512)(W), \
- (__mmask16)(U), (int)(R)); })
+ (__mmask16)(U), (int)(R))
-#define _mm512_maskz_getexp_round_ps(U, A, R) __extension__ ({ \
+#define _mm512_maskz_getexp_round_ps(U, A, R) \
(__m512)__builtin_ia32_getexpps512_mask((__v16sf)(__m512)(A), \
(__v16sf)_mm512_setzero_ps(), \
- (__mmask16)(U), (int)(R)); })
+ (__mmask16)(U), (int)(R))
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_getexp_ps (__m512 __A)
{
return (__m512) __builtin_ia32_getexpps512_mask ((__v16sf) __A,
@@ -8017,7 +7573,7 @@ _mm512_getexp_ps (__m512 __A)
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_mask_getexp_ps (__m512 __W, __mmask16 __U, __m512 __A)
{
return (__m512) __builtin_ia32_getexpps512_mask ((__v16sf) __A,
@@ -8026,7 +7582,7 @@ _mm512_mask_getexp_ps (__m512 __W, __mmask16 __U, __m512 __A)
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_maskz_getexp_ps (__mmask16 __U, __m512 __A)
{
return (__m512) __builtin_ia32_getexpps512_mask ((__v16sf) __A,
@@ -8035,802 +7591,812 @@ _mm512_maskz_getexp_ps (__mmask16 __U, __m512 __A)
_MM_FROUND_CUR_DIRECTION);
}
-#define _mm512_i64gather_ps(index, addr, scale) __extension__ ({ \
+#define _mm512_i64gather_ps(index, addr, scale) \
(__m256)__builtin_ia32_gatherdiv16sf((__v8sf)_mm256_undefined_ps(), \
(float const *)(addr), \
(__v8di)(__m512i)(index), (__mmask8)-1, \
- (int)(scale)); })
+ (int)(scale))
-#define _mm512_mask_i64gather_ps(v1_old, mask, index, addr, scale) __extension__({\
+#define _mm512_mask_i64gather_ps(v1_old, mask, index, addr, scale) \
(__m256)__builtin_ia32_gatherdiv16sf((__v8sf)(__m256)(v1_old),\
(float const *)(addr), \
(__v8di)(__m512i)(index), \
- (__mmask8)(mask), (int)(scale)); })
+ (__mmask8)(mask), (int)(scale))
-#define _mm512_i64gather_epi32(index, addr, scale) __extension__ ({\
- (__m256i)__builtin_ia32_gatherdiv16si((__v8si)_mm256_undefined_ps(), \
+#define _mm512_i64gather_epi32(index, addr, scale) \
+ (__m256i)__builtin_ia32_gatherdiv16si((__v8si)_mm256_undefined_si256(), \
(int const *)(addr), \
(__v8di)(__m512i)(index), \
- (__mmask8)-1, (int)(scale)); })
+ (__mmask8)-1, (int)(scale))
-#define _mm512_mask_i64gather_epi32(v1_old, mask, index, addr, scale) __extension__ ({\
+#define _mm512_mask_i64gather_epi32(v1_old, mask, index, addr, scale) \
(__m256i)__builtin_ia32_gatherdiv16si((__v8si)(__m256i)(v1_old), \
(int const *)(addr), \
(__v8di)(__m512i)(index), \
- (__mmask8)(mask), (int)(scale)); })
+ (__mmask8)(mask), (int)(scale))
-#define _mm512_i64gather_pd(index, addr, scale) __extension__ ({\
+#define _mm512_i64gather_pd(index, addr, scale) \
(__m512d)__builtin_ia32_gatherdiv8df((__v8df)_mm512_undefined_pd(), \
(double const *)(addr), \
(__v8di)(__m512i)(index), (__mmask8)-1, \
- (int)(scale)); })
+ (int)(scale))
-#define _mm512_mask_i64gather_pd(v1_old, mask, index, addr, scale) __extension__ ({\
+#define _mm512_mask_i64gather_pd(v1_old, mask, index, addr, scale) \
(__m512d)__builtin_ia32_gatherdiv8df((__v8df)(__m512d)(v1_old), \
(double const *)(addr), \
(__v8di)(__m512i)(index), \
- (__mmask8)(mask), (int)(scale)); })
+ (__mmask8)(mask), (int)(scale))
-#define _mm512_i64gather_epi64(index, addr, scale) __extension__ ({\
- (__m512i)__builtin_ia32_gatherdiv8di((__v8di)_mm512_undefined_pd(), \
+#define _mm512_i64gather_epi64(index, addr, scale) \
+ (__m512i)__builtin_ia32_gatherdiv8di((__v8di)_mm512_undefined_epi32(), \
(long long const *)(addr), \
(__v8di)(__m512i)(index), (__mmask8)-1, \
- (int)(scale)); })
+ (int)(scale))
-#define _mm512_mask_i64gather_epi64(v1_old, mask, index, addr, scale) __extension__ ({\
+#define _mm512_mask_i64gather_epi64(v1_old, mask, index, addr, scale) \
(__m512i)__builtin_ia32_gatherdiv8di((__v8di)(__m512i)(v1_old), \
(long long const *)(addr), \
(__v8di)(__m512i)(index), \
- (__mmask8)(mask), (int)(scale)); })
+ (__mmask8)(mask), (int)(scale))
-#define _mm512_i32gather_ps(index, addr, scale) __extension__ ({\
+#define _mm512_i32gather_ps(index, addr, scale) \
(__m512)__builtin_ia32_gathersiv16sf((__v16sf)_mm512_undefined_ps(), \
(float const *)(addr), \
(__v16sf)(__m512)(index), \
- (__mmask16)-1, (int)(scale)); })
+ (__mmask16)-1, (int)(scale))
-#define _mm512_mask_i32gather_ps(v1_old, mask, index, addr, scale) __extension__ ({\
+#define _mm512_mask_i32gather_ps(v1_old, mask, index, addr, scale) \
(__m512)__builtin_ia32_gathersiv16sf((__v16sf)(__m512)(v1_old), \
(float const *)(addr), \
(__v16sf)(__m512)(index), \
- (__mmask16)(mask), (int)(scale)); })
+ (__mmask16)(mask), (int)(scale))
-#define _mm512_i32gather_epi32(index, addr, scale) __extension__ ({\
+#define _mm512_i32gather_epi32(index, addr, scale) \
(__m512i)__builtin_ia32_gathersiv16si((__v16si)_mm512_undefined_epi32(), \
(int const *)(addr), \
(__v16si)(__m512i)(index), \
- (__mmask16)-1, (int)(scale)); })
+ (__mmask16)-1, (int)(scale))
-#define _mm512_mask_i32gather_epi32(v1_old, mask, index, addr, scale) __extension__ ({\
+#define _mm512_mask_i32gather_epi32(v1_old, mask, index, addr, scale) \
(__m512i)__builtin_ia32_gathersiv16si((__v16si)(__m512i)(v1_old), \
(int const *)(addr), \
(__v16si)(__m512i)(index), \
- (__mmask16)(mask), (int)(scale)); })
+ (__mmask16)(mask), (int)(scale))
-#define _mm512_i32gather_pd(index, addr, scale) __extension__ ({\
+#define _mm512_i32gather_pd(index, addr, scale) \
(__m512d)__builtin_ia32_gathersiv8df((__v8df)_mm512_undefined_pd(), \
(double const *)(addr), \
(__v8si)(__m256i)(index), (__mmask8)-1, \
- (int)(scale)); })
+ (int)(scale))
-#define _mm512_mask_i32gather_pd(v1_old, mask, index, addr, scale) __extension__ ({\
+#define _mm512_mask_i32gather_pd(v1_old, mask, index, addr, scale) \
(__m512d)__builtin_ia32_gathersiv8df((__v8df)(__m512d)(v1_old), \
(double const *)(addr), \
(__v8si)(__m256i)(index), \
- (__mmask8)(mask), (int)(scale)); })
+ (__mmask8)(mask), (int)(scale))
-#define _mm512_i32gather_epi64(index, addr, scale) __extension__ ({\
+#define _mm512_i32gather_epi64(index, addr, scale) \
(__m512i)__builtin_ia32_gathersiv8di((__v8di)_mm512_undefined_epi32(), \
(long long const *)(addr), \
(__v8si)(__m256i)(index), (__mmask8)-1, \
- (int)(scale)); })
+ (int)(scale))
-#define _mm512_mask_i32gather_epi64(v1_old, mask, index, addr, scale) __extension__ ({\
+#define _mm512_mask_i32gather_epi64(v1_old, mask, index, addr, scale) \
(__m512i)__builtin_ia32_gathersiv8di((__v8di)(__m512i)(v1_old), \
(long long const *)(addr), \
(__v8si)(__m256i)(index), \
- (__mmask8)(mask), (int)(scale)); })
+ (__mmask8)(mask), (int)(scale))
-#define _mm512_i64scatter_ps(addr, index, v1, scale) __extension__ ({\
+#define _mm512_i64scatter_ps(addr, index, v1, scale) \
__builtin_ia32_scatterdiv16sf((float *)(addr), (__mmask8)-1, \
(__v8di)(__m512i)(index), \
- (__v8sf)(__m256)(v1), (int)(scale)); })
+ (__v8sf)(__m256)(v1), (int)(scale))
-#define _mm512_mask_i64scatter_ps(addr, mask, index, v1, scale) __extension__ ({\
+#define _mm512_mask_i64scatter_ps(addr, mask, index, v1, scale) \
__builtin_ia32_scatterdiv16sf((float *)(addr), (__mmask8)(mask), \
(__v8di)(__m512i)(index), \
- (__v8sf)(__m256)(v1), (int)(scale)); })
+ (__v8sf)(__m256)(v1), (int)(scale))
-#define _mm512_i64scatter_epi32(addr, index, v1, scale) __extension__ ({\
+#define _mm512_i64scatter_epi32(addr, index, v1, scale) \
__builtin_ia32_scatterdiv16si((int *)(addr), (__mmask8)-1, \
(__v8di)(__m512i)(index), \
- (__v8si)(__m256i)(v1), (int)(scale)); })
+ (__v8si)(__m256i)(v1), (int)(scale))
-#define _mm512_mask_i64scatter_epi32(addr, mask, index, v1, scale) __extension__ ({\
+#define _mm512_mask_i64scatter_epi32(addr, mask, index, v1, scale) \
__builtin_ia32_scatterdiv16si((int *)(addr), (__mmask8)(mask), \
(__v8di)(__m512i)(index), \
- (__v8si)(__m256i)(v1), (int)(scale)); })
+ (__v8si)(__m256i)(v1), (int)(scale))
-#define _mm512_i64scatter_pd(addr, index, v1, scale) __extension__ ({\
+#define _mm512_i64scatter_pd(addr, index, v1, scale) \
__builtin_ia32_scatterdiv8df((double *)(addr), (__mmask8)-1, \
(__v8di)(__m512i)(index), \
- (__v8df)(__m512d)(v1), (int)(scale)); })
+ (__v8df)(__m512d)(v1), (int)(scale))
-#define _mm512_mask_i64scatter_pd(addr, mask, index, v1, scale) __extension__ ({\
+#define _mm512_mask_i64scatter_pd(addr, mask, index, v1, scale) \
__builtin_ia32_scatterdiv8df((double *)(addr), (__mmask8)(mask), \
(__v8di)(__m512i)(index), \
- (__v8df)(__m512d)(v1), (int)(scale)); })
+ (__v8df)(__m512d)(v1), (int)(scale))
-#define _mm512_i64scatter_epi64(addr, index, v1, scale) __extension__ ({\
+#define _mm512_i64scatter_epi64(addr, index, v1, scale) \
__builtin_ia32_scatterdiv8di((long long *)(addr), (__mmask8)-1, \
(__v8di)(__m512i)(index), \
- (__v8di)(__m512i)(v1), (int)(scale)); })
+ (__v8di)(__m512i)(v1), (int)(scale))
-#define _mm512_mask_i64scatter_epi64(addr, mask, index, v1, scale) __extension__ ({\
+#define _mm512_mask_i64scatter_epi64(addr, mask, index, v1, scale) \
__builtin_ia32_scatterdiv8di((long long *)(addr), (__mmask8)(mask), \
(__v8di)(__m512i)(index), \
- (__v8di)(__m512i)(v1), (int)(scale)); })
+ (__v8di)(__m512i)(v1), (int)(scale))
-#define _mm512_i32scatter_ps(addr, index, v1, scale) __extension__ ({\
+#define _mm512_i32scatter_ps(addr, index, v1, scale) \
__builtin_ia32_scattersiv16sf((float *)(addr), (__mmask16)-1, \
(__v16si)(__m512i)(index), \
- (__v16sf)(__m512)(v1), (int)(scale)); })
+ (__v16sf)(__m512)(v1), (int)(scale))
-#define _mm512_mask_i32scatter_ps(addr, mask, index, v1, scale) __extension__ ({\
+#define _mm512_mask_i32scatter_ps(addr, mask, index, v1, scale) \
__builtin_ia32_scattersiv16sf((float *)(addr), (__mmask16)(mask), \
(__v16si)(__m512i)(index), \
- (__v16sf)(__m512)(v1), (int)(scale)); })
+ (__v16sf)(__m512)(v1), (int)(scale))
-#define _mm512_i32scatter_epi32(addr, index, v1, scale) __extension__ ({\
+#define _mm512_i32scatter_epi32(addr, index, v1, scale) \
__builtin_ia32_scattersiv16si((int *)(addr), (__mmask16)-1, \
(__v16si)(__m512i)(index), \
- (__v16si)(__m512i)(v1), (int)(scale)); })
+ (__v16si)(__m512i)(v1), (int)(scale))
-#define _mm512_mask_i32scatter_epi32(addr, mask, index, v1, scale) __extension__ ({\
+#define _mm512_mask_i32scatter_epi32(addr, mask, index, v1, scale) \
__builtin_ia32_scattersiv16si((int *)(addr), (__mmask16)(mask), \
(__v16si)(__m512i)(index), \
- (__v16si)(__m512i)(v1), (int)(scale)); })
+ (__v16si)(__m512i)(v1), (int)(scale))
-#define _mm512_i32scatter_pd(addr, index, v1, scale) __extension__ ({\
+#define _mm512_i32scatter_pd(addr, index, v1, scale) \
__builtin_ia32_scattersiv8df((double *)(addr), (__mmask8)-1, \
(__v8si)(__m256i)(index), \
- (__v8df)(__m512d)(v1), (int)(scale)); })
+ (__v8df)(__m512d)(v1), (int)(scale))
-#define _mm512_mask_i32scatter_pd(addr, mask, index, v1, scale) __extension__ ({\
+#define _mm512_mask_i32scatter_pd(addr, mask, index, v1, scale) \
__builtin_ia32_scattersiv8df((double *)(addr), (__mmask8)(mask), \
(__v8si)(__m256i)(index), \
- (__v8df)(__m512d)(v1), (int)(scale)); })
+ (__v8df)(__m512d)(v1), (int)(scale))
-#define _mm512_i32scatter_epi64(addr, index, v1, scale) __extension__ ({\
+#define _mm512_i32scatter_epi64(addr, index, v1, scale) \
__builtin_ia32_scattersiv8di((long long *)(addr), (__mmask8)-1, \
(__v8si)(__m256i)(index), \
- (__v8di)(__m512i)(v1), (int)(scale)); })
+ (__v8di)(__m512i)(v1), (int)(scale))
-#define _mm512_mask_i32scatter_epi64(addr, mask, index, v1, scale) __extension__ ({\
+#define _mm512_mask_i32scatter_epi64(addr, mask, index, v1, scale) \
__builtin_ia32_scattersiv8di((long long *)(addr), (__mmask8)(mask), \
(__v8si)(__m256i)(index), \
- (__v8di)(__m512i)(v1), (int)(scale)); })
+ (__v8di)(__m512i)(v1), (int)(scale))
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_fmadd_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
{
- return (__m128) __builtin_ia32_vfmaddss3_mask ((__v4sf) __W,
- (__v4sf) __A,
- (__v4sf) __B,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
+ return __builtin_ia32_vfmaddss3_mask((__v4sf)__W,
+ (__v4sf)__A,
+ (__v4sf)__B,
+ (__mmask8)__U,
+ _MM_FROUND_CUR_DIRECTION);
}
-#define _mm_mask_fmadd_round_ss(W, U, A, B, R) __extension__({\
+#define _mm_fmadd_round_ss(A, B, C, R) \
+ (__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)(__m128)(C), (__mmask8)-1, \
+ (int)(R))
+
+#define _mm_mask_fmadd_round_ss(W, U, A, B, R) \
(__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(W), \
(__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), (__mmask8)(U), \
- (int)(R)); })
+ (int)(R))
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_maskz_fmadd_ss (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
{
- return (__m128) __builtin_ia32_vfmaddss3_maskz ((__v4sf) __A,
- (__v4sf) __B,
- (__v4sf) __C,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
+ return __builtin_ia32_vfmaddss3_maskz((__v4sf)__A,
+ (__v4sf)__B,
+ (__v4sf)__C,
+ (__mmask8)__U,
+ _MM_FROUND_CUR_DIRECTION);
}
-#define _mm_maskz_fmadd_round_ss(U, A, B, C, R) __extension__ ({\
+#define _mm_maskz_fmadd_round_ss(U, A, B, C, R) \
(__m128)__builtin_ia32_vfmaddss3_maskz((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), \
(__v4sf)(__m128)(C), (__mmask8)(U), \
- _MM_FROUND_CUR_DIRECTION); })
+ (int)(R))
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask3_fmadd_ss (__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U)
{
- return (__m128) __builtin_ia32_vfmaddss3_mask3 ((__v4sf) __W,
- (__v4sf) __X,
- (__v4sf) __Y,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
+ return __builtin_ia32_vfmaddss3_mask3((__v4sf)__W,
+ (__v4sf)__X,
+ (__v4sf)__Y,
+ (__mmask8)__U,
+ _MM_FROUND_CUR_DIRECTION);
}
-#define _mm_mask3_fmadd_round_ss(W, X, Y, U, R) __extension__ ({\
+#define _mm_mask3_fmadd_round_ss(W, X, Y, U, R) \
(__m128)__builtin_ia32_vfmaddss3_mask3((__v4sf)(__m128)(W), \
(__v4sf)(__m128)(X), \
(__v4sf)(__m128)(Y), (__mmask8)(U), \
- (int)(R)); })
+ (int)(R))
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_fmsub_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
{
- return (__m128) __builtin_ia32_vfmaddss3_mask ((__v4sf) __W,
- (__v4sf) __A,
- -(__v4sf) __B,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
+ return __builtin_ia32_vfmaddss3_mask((__v4sf)__W,
+ (__v4sf)__A,
+ -(__v4sf)__B,
+ (__mmask8)__U,
+ _MM_FROUND_CUR_DIRECTION);
}
-#define _mm_mask_fmsub_round_ss(W, U, A, B, R) __extension__ ({\
+#define _mm_fmsub_round_ss(A, B, C, R) \
+ (__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ -(__v4sf)(__m128)(C), (__mmask8)-1, \
+ (int)(R))
+
+#define _mm_mask_fmsub_round_ss(W, U, A, B, R) \
(__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(W), \
(__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), (__mmask8)(U), \
- (int)(R)); })
+ -(__v4sf)(__m128)(B), (__mmask8)(U), \
+ (int)(R))
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_maskz_fmsub_ss (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
{
- return (__m128) __builtin_ia32_vfmaddss3_maskz ((__v4sf) __A,
- (__v4sf) __B,
- -(__v4sf) __C,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
+ return __builtin_ia32_vfmaddss3_maskz((__v4sf)__A,
+ (__v4sf)__B,
+ -(__v4sf)__C,
+ (__mmask8)__U,
+ _MM_FROUND_CUR_DIRECTION);
}
-#define _mm_maskz_fmsub_round_ss(U, A, B, C, R) __extension__ ({\
+#define _mm_maskz_fmsub_round_ss(U, A, B, C, R) \
(__m128)__builtin_ia32_vfmaddss3_maskz((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), \
-(__v4sf)(__m128)(C), (__mmask8)(U), \
- (int)(R)); })
+ (int)(R))
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask3_fmsub_ss (__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U)
{
- return (__m128) __builtin_ia32_vfmsubss3_mask3 ((__v4sf) __W,
- (__v4sf) __X,
- (__v4sf) __Y,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
+ return __builtin_ia32_vfmsubss3_mask3((__v4sf)__W,
+ (__v4sf)__X,
+ (__v4sf)__Y,
+ (__mmask8)__U,
+ _MM_FROUND_CUR_DIRECTION);
}
-#define _mm_mask3_fmsub_round_ss(W, X, Y, U, R) __extension__ ({\
+#define _mm_mask3_fmsub_round_ss(W, X, Y, U, R) \
(__m128)__builtin_ia32_vfmsubss3_mask3((__v4sf)(__m128)(W), \
(__v4sf)(__m128)(X), \
(__v4sf)(__m128)(Y), (__mmask8)(U), \
- (int)(R)); })
+ (int)(R))
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_fnmadd_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
{
- return (__m128) __builtin_ia32_vfmaddss3_mask ((__v4sf) __W,
- -(__v4sf) __A,
- (__v4sf) __B,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
+ return __builtin_ia32_vfmaddss3_mask((__v4sf)__W,
+ -(__v4sf)__A,
+ (__v4sf)__B,
+ (__mmask8)__U,
+ _MM_FROUND_CUR_DIRECTION);
}
-#define _mm_mask_fnmadd_round_ss(W, U, A, B, R) __extension__ ({\
+#define _mm_fnmadd_round_ss(A, B, C, R) \
+ (__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(A), \
+ -(__v4sf)(__m128)(B), \
+ (__v4sf)(__m128)(C), (__mmask8)-1, \
+ (int)(R))
+
+#define _mm_mask_fnmadd_round_ss(W, U, A, B, R) \
(__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(W), \
-(__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), (__mmask8)(U), \
- (int)(R)); })
+ (int)(R))
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_maskz_fnmadd_ss (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
{
- return (__m128) __builtin_ia32_vfmaddss3_maskz (-(__v4sf) __A,
- (__v4sf) __B,
- (__v4sf) __C,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
+ return __builtin_ia32_vfmaddss3_maskz((__v4sf)__A,
+ -(__v4sf)__B,
+ (__v4sf)__C,
+ (__mmask8)__U,
+ _MM_FROUND_CUR_DIRECTION);
}
-#define _mm_maskz_fnmadd_round_ss(U, A, B, C, R) __extension__ ({\
- (__m128)__builtin_ia32_vfmaddss3_maskz(-(__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
+#define _mm_maskz_fnmadd_round_ss(U, A, B, C, R) \
+ (__m128)__builtin_ia32_vfmaddss3_maskz((__v4sf)(__m128)(A), \
+ -(__v4sf)(__m128)(B), \
(__v4sf)(__m128)(C), (__mmask8)(U), \
- (int)(R)); })
+ (int)(R))
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask3_fnmadd_ss (__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U)
{
- return (__m128) __builtin_ia32_vfmaddss3_mask3 (-(__v4sf) __W,
- (__v4sf) __X,
- (__v4sf) __Y,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
+ return __builtin_ia32_vfmaddss3_mask3((__v4sf)__W,
+ -(__v4sf)__X,
+ (__v4sf)__Y,
+ (__mmask8)__U,
+ _MM_FROUND_CUR_DIRECTION);
}
-#define _mm_mask3_fnmadd_round_ss(W, X, Y, U, R) __extension__({\
- (__m128)__builtin_ia32_vfmaddss3_mask3(-(__v4sf)(__m128)(W), \
- (__v4sf)(__m128)(X), \
+#define _mm_mask3_fnmadd_round_ss(W, X, Y, U, R) \
+ (__m128)__builtin_ia32_vfmaddss3_mask3((__v4sf)(__m128)(W), \
+ -(__v4sf)(__m128)(X), \
(__v4sf)(__m128)(Y), (__mmask8)(U), \
- (int)(R)); })
+ (int)(R))
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_fnmsub_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
{
- return (__m128) __builtin_ia32_vfmaddss3_mask ((__v4sf) __W,
- -(__v4sf) __A,
- -(__v4sf) __B,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
+ return __builtin_ia32_vfmaddss3_mask((__v4sf)__W,
+ -(__v4sf)__A,
+ -(__v4sf)__B,
+ (__mmask8)__U,
+ _MM_FROUND_CUR_DIRECTION);
}
-#define _mm_mask_fnmsub_round_ss(W, U, A, B, R) __extension__ ({\
+#define _mm_fnmsub_round_ss(A, B, C, R) \
+ (__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(A), \
+ -(__v4sf)(__m128)(B), \
+ -(__v4sf)(__m128)(C), (__mmask8)-1, \
+ (int)(R))
+
+#define _mm_mask_fnmsub_round_ss(W, U, A, B, R) \
(__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(W), \
-(__v4sf)(__m128)(A), \
-(__v4sf)(__m128)(B), (__mmask8)(U), \
- (int)(R)); })
+ (int)(R))
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_maskz_fnmsub_ss (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
{
- return (__m128) __builtin_ia32_vfmaddss3_maskz (-(__v4sf) __A,
- (__v4sf) __B,
- -(__v4sf) __C,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
+ return __builtin_ia32_vfmaddss3_maskz((__v4sf)__A,
+ -(__v4sf)__B,
+ -(__v4sf)__C,
+ (__mmask8)__U,
+ _MM_FROUND_CUR_DIRECTION);
}
-#define _mm_maskz_fnmsub_round_ss(U, A, B, C, R) __extension__ ({\
- (__m128)__builtin_ia32_vfmaddss3_maskz(-(__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
+#define _mm_maskz_fnmsub_round_ss(U, A, B, C, R) \
+ (__m128)__builtin_ia32_vfmaddss3_maskz((__v4sf)(__m128)(A), \
+ -(__v4sf)(__m128)(B), \
-(__v4sf)(__m128)(C), (__mmask8)(U), \
- _MM_FROUND_CUR_DIRECTION); })
+ (int)(R))
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask3_fnmsub_ss (__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U)
{
- return (__m128) __builtin_ia32_vfnmsubss3_mask3 ((__v4sf) __W,
- (__v4sf) __X,
- (__v4sf) __Y,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
+ return __builtin_ia32_vfmsubss3_mask3((__v4sf)__W,
+ -(__v4sf)__X,
+ (__v4sf)__Y,
+ (__mmask8)__U,
+ _MM_FROUND_CUR_DIRECTION);
}
-#define _mm_mask3_fnmsub_round_ss(W, X, Y, U, R) __extension__({\
- (__m128)__builtin_ia32_vfnmsubss3_mask3((__v4sf)(__m128)(W), \
- (__v4sf)(__m128)(X), \
+#define _mm_mask3_fnmsub_round_ss(W, X, Y, U, R) \
+ (__m128)__builtin_ia32_vfmsubss3_mask3((__v4sf)(__m128)(W), \
+ -(__v4sf)(__m128)(X), \
(__v4sf)(__m128)(Y), (__mmask8)(U), \
- (int)(R)); })
+ (int)(R))
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_fmadd_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
{
- return (__m128d) __builtin_ia32_vfmaddsd3_mask ( (__v2df) __W,
- (__v2df) __A,
- (__v2df) __B,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
+ return __builtin_ia32_vfmaddsd3_mask((__v2df)__W,
+ (__v2df)__A,
+ (__v2df)__B,
+ (__mmask8)__U,
+ _MM_FROUND_CUR_DIRECTION);
}
-#define _mm_mask_fmadd_round_sd(W, U, A, B, R) __extension__({\
+#define _mm_fmadd_round_sd(A, B, C, R) \
+ (__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)(__m128d)(C), (__mmask8)-1, \
+ (int)(R))
+
+#define _mm_mask_fmadd_round_sd(W, U, A, B, R) \
(__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(W), \
(__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), (__mmask8)(U), \
- (int)(R)); })
+ (int)(R))
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_maskz_fmadd_sd (__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
{
- return (__m128d) __builtin_ia32_vfmaddsd3_maskz ( (__v2df) __A,
- (__v2df) __B,
- (__v2df) __C,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
+ return __builtin_ia32_vfmaddsd3_maskz((__v2df)__A,
+ (__v2df)__B,
+ (__v2df)__C,
+ (__mmask8)__U,
+ _MM_FROUND_CUR_DIRECTION);
}
-#define _mm_maskz_fmadd_round_sd(U, A, B, C, R) __extension__ ({\
+#define _mm_maskz_fmadd_round_sd(U, A, B, C, R) \
(__m128d)__builtin_ia32_vfmaddsd3_maskz((__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), \
(__v2df)(__m128d)(C), (__mmask8)(U), \
- _MM_FROUND_CUR_DIRECTION); })
+ (int)(R))
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask3_fmadd_sd (__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U)
{
- return (__m128d) __builtin_ia32_vfmaddsd3_mask3 ((__v2df) __W,
- (__v2df) __X,
- (__v2df) __Y,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
+ return __builtin_ia32_vfmaddsd3_mask3((__v2df)__W,
+ (__v2df)__X,
+ (__v2df)__Y,
+ (__mmask8)__U,
+ _MM_FROUND_CUR_DIRECTION);
}
-#define _mm_mask3_fmadd_round_sd(W, X, Y, U, R) __extension__ ({\
+#define _mm_mask3_fmadd_round_sd(W, X, Y, U, R) \
(__m128d)__builtin_ia32_vfmaddsd3_mask3((__v2df)(__m128d)(W), \
(__v2df)(__m128d)(X), \
(__v2df)(__m128d)(Y), (__mmask8)(U), \
- (int)(R)); })
+ (int)(R))
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_fmsub_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
{
- return (__m128d) __builtin_ia32_vfmaddsd3_mask ( (__v2df) __W,
- (__v2df) __A,
- -(__v2df) __B,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
+ return __builtin_ia32_vfmaddsd3_mask((__v2df)__W,
+ (__v2df)__A,
+ -(__v2df)__B,
+ (__mmask8)__U,
+ _MM_FROUND_CUR_DIRECTION);
}
-#define _mm_mask_fmsub_round_sd(W, U, A, B, R) __extension__ ({\
+#define _mm_fmsub_round_sd(A, B, C, R) \
+ (__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ -(__v2df)(__m128d)(C), (__mmask8)-1, \
+ (int)(R))
+
+#define _mm_mask_fmsub_round_sd(W, U, A, B, R) \
(__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(W), \
(__v2df)(__m128d)(A), \
-(__v2df)(__m128d)(B), (__mmask8)(U), \
- (int)(R)); })
+ (int)(R))
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_maskz_fmsub_sd (__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
{
- return (__m128d) __builtin_ia32_vfmaddsd3_maskz ( (__v2df) __A,
- (__v2df) __B,
- -(__v2df) __C,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
+ return __builtin_ia32_vfmaddsd3_maskz((__v2df)__A,
+ (__v2df)__B,
+ -(__v2df)__C,
+ (__mmask8)__U,
+ _MM_FROUND_CUR_DIRECTION);
}
-#define _mm_maskz_fmsub_round_sd(U, A, B, C, R) __extension__ ({\
+#define _mm_maskz_fmsub_round_sd(U, A, B, C, R) \
(__m128d)__builtin_ia32_vfmaddsd3_maskz((__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), \
-(__v2df)(__m128d)(C), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask3_fmsub_sd (__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U)
{
- return (__m128d) __builtin_ia32_vfmsubsd3_mask3 ((__v2df) __W,
- (__v2df) __X,
- (__v2df) __Y,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
+ return __builtin_ia32_vfmsubsd3_mask3((__v2df)__W,
+ (__v2df)__X,
+ (__v2df)__Y,
+ (__mmask8)__U,
+ _MM_FROUND_CUR_DIRECTION);
}
-#define _mm_mask3_fmsub_round_sd(W, X, Y, U, R) __extension__ ({\
+#define _mm_mask3_fmsub_round_sd(W, X, Y, U, R) \
(__m128d)__builtin_ia32_vfmsubsd3_mask3((__v2df)(__m128d)(W), \
(__v2df)(__m128d)(X), \
(__v2df)(__m128d)(Y), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_fnmadd_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
{
- return (__m128d) __builtin_ia32_vfmaddsd3_mask ( (__v2df) __W,
- -(__v2df) __A,
- (__v2df) __B,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
+ return __builtin_ia32_vfmaddsd3_mask((__v2df)__W,
+ -(__v2df)__A,
+ (__v2df)__B,
+ (__mmask8)__U,
+ _MM_FROUND_CUR_DIRECTION);
}
-#define _mm_mask_fnmadd_round_sd(W, U, A, B, R) __extension__ ({\
+#define _mm_fnmadd_round_sd(A, B, C, R) \
+ (__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(A), \
+ -(__v2df)(__m128d)(B), \
+ (__v2df)(__m128d)(C), (__mmask8)-1, \
+ (int)(R))
+
+#define _mm_mask_fnmadd_round_sd(W, U, A, B, R) \
(__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(W), \
-(__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), (__mmask8)(U), \
- (int)(R)); })
+ (int)(R))
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_maskz_fnmadd_sd (__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
{
- return (__m128d) __builtin_ia32_vfmaddsd3_maskz ( -(__v2df) __A,
- (__v2df) __B,
- (__v2df) __C,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
+ return __builtin_ia32_vfmaddsd3_maskz((__v2df)__A,
+ -(__v2df)__B,
+ (__v2df)__C,
+ (__mmask8)__U,
+ _MM_FROUND_CUR_DIRECTION);
}
-#define _mm_maskz_fnmadd_round_sd(U, A, B, C, R) __extension__ ({\
- (__m128d)__builtin_ia32_vfmaddsd3_maskz(-(__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
+#define _mm_maskz_fnmadd_round_sd(U, A, B, C, R) \
+ (__m128d)__builtin_ia32_vfmaddsd3_maskz((__v2df)(__m128d)(A), \
+ -(__v2df)(__m128d)(B), \
(__v2df)(__m128d)(C), (__mmask8)(U), \
- (int)(R)); })
+ (int)(R))
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask3_fnmadd_sd (__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U)
{
- return (__m128d) __builtin_ia32_vfmaddsd3_mask3 (-(__v2df) __W,
- (__v2df) __X,
- (__v2df) __Y,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
+ return __builtin_ia32_vfmaddsd3_mask3((__v2df)__W,
+ -(__v2df)__X,
+ (__v2df)__Y,
+ (__mmask8)__U,
+ _MM_FROUND_CUR_DIRECTION);
}
-#define _mm_mask3_fnmadd_round_sd(W, X, Y, U, R) __extension__({\
- (__m128d)__builtin_ia32_vfmaddsd3_mask3(-(__v2df)(__m128d)(W), \
- (__v2df)(__m128d)(X), \
+#define _mm_mask3_fnmadd_round_sd(W, X, Y, U, R) \
+ (__m128d)__builtin_ia32_vfmaddsd3_mask3((__v2df)(__m128d)(W), \
+ -(__v2df)(__m128d)(X), \
(__v2df)(__m128d)(Y), (__mmask8)(U), \
- (int)(R)); })
+ (int)(R))
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_fnmsub_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
{
- return (__m128d) __builtin_ia32_vfmaddsd3_mask ( (__v2df) __W,
- -(__v2df) __A,
- -(__v2df) __B,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
+ return __builtin_ia32_vfmaddsd3_mask((__v2df)__W,
+ -(__v2df)__A,
+ -(__v2df)__B,
+ (__mmask8)__U,
+ _MM_FROUND_CUR_DIRECTION);
}
-#define _mm_mask_fnmsub_round_sd(W, U, A, B, R) __extension__ ({\
+#define _mm_fnmsub_round_sd(A, B, C, R) \
+ (__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(A), \
+ -(__v2df)(__m128d)(B), \
+ -(__v2df)(__m128d)(C), (__mmask8)-1, \
+ (int)(R))
+
+#define _mm_mask_fnmsub_round_sd(W, U, A, B, R) \
(__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(W), \
-(__v2df)(__m128d)(A), \
-(__v2df)(__m128d)(B), (__mmask8)(U), \
- (int)(R)); })
+ (int)(R))
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_maskz_fnmsub_sd (__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
{
- return (__m128d) __builtin_ia32_vfmaddsd3_maskz ( -(__v2df) __A,
- (__v2df) __B,
- -(__v2df) __C,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
+ return __builtin_ia32_vfmaddsd3_maskz((__v2df)__A,
+ -(__v2df)__B,
+ -(__v2df)__C,
+ (__mmask8)__U,
+ _MM_FROUND_CUR_DIRECTION);
}
-#define _mm_maskz_fnmsub_round_sd(U, A, B, C, R) __extension__ ({\
- (__m128d)__builtin_ia32_vfmaddsd3_maskz(-(__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
+#define _mm_maskz_fnmsub_round_sd(U, A, B, C, R) \
+ (__m128d)__builtin_ia32_vfmaddsd3_maskz((__v2df)(__m128d)(A), \
+ -(__v2df)(__m128d)(B), \
-(__v2df)(__m128d)(C), \
(__mmask8)(U), \
- _MM_FROUND_CUR_DIRECTION); })
+ (int)(R))
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask3_fnmsub_sd (__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U)
{
- return (__m128d) __builtin_ia32_vfnmsubsd3_mask3 ((__v2df) (__W),
- (__v2df) __X,
- (__v2df) (__Y),
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
+ return __builtin_ia32_vfmsubsd3_mask3((__v2df)__W,
+ -(__v2df)__X,
+ (__v2df)__Y,
+ (__mmask8)__U,
+ _MM_FROUND_CUR_DIRECTION);
}
-#define _mm_mask3_fnmsub_round_sd(W, X, Y, U, R) __extension__({\
- (__m128d)__builtin_ia32_vfnmsubsd3_mask3((__v2df)(__m128d)(W), \
- (__v2df)(__m128d)(X), \
+#define _mm_mask3_fnmsub_round_sd(W, X, Y, U, R) \
+ (__m128d)__builtin_ia32_vfmsubsd3_mask3((__v2df)(__m128d)(W), \
+ -(__v2df)(__m128d)(X), \
(__v2df)(__m128d)(Y), \
- (__mmask8)(U), (int)(R)); })
-
-#define _mm512_permutex_pd(X, C) __extension__ ({ \
- (__m512d)__builtin_shufflevector((__v8df)(__m512d)(X), \
- (__v8df)_mm512_undefined_pd(), \
- 0 + (((C) >> 0) & 0x3), \
- 0 + (((C) >> 2) & 0x3), \
- 0 + (((C) >> 4) & 0x3), \
- 0 + (((C) >> 6) & 0x3), \
- 4 + (((C) >> 0) & 0x3), \
- 4 + (((C) >> 2) & 0x3), \
- 4 + (((C) >> 4) & 0x3), \
- 4 + (((C) >> 6) & 0x3)); })
-
-#define _mm512_mask_permutex_pd(W, U, X, C) __extension__ ({ \
+ (__mmask8)(U), (int)(R))
+
+#define _mm512_permutex_pd(X, C) \
+ (__m512d)__builtin_ia32_permdf512((__v8df)(__m512d)(X), (int)(C))
+
+#define _mm512_mask_permutex_pd(W, U, X, C) \
(__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
(__v8df)_mm512_permutex_pd((X), (C)), \
- (__v8df)(__m512d)(W)); })
+ (__v8df)(__m512d)(W))
-#define _mm512_maskz_permutex_pd(U, X, C) __extension__ ({ \
+#define _mm512_maskz_permutex_pd(U, X, C) \
(__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
(__v8df)_mm512_permutex_pd((X), (C)), \
- (__v8df)_mm512_setzero_pd()); })
-
-#define _mm512_permutex_epi64(X, C) __extension__ ({ \
- (__m512i)__builtin_shufflevector((__v8di)(__m512i)(X), \
- (__v8di)_mm512_undefined_epi32(), \
- 0 + (((C) >> 0) & 0x3), \
- 0 + (((C) >> 2) & 0x3), \
- 0 + (((C) >> 4) & 0x3), \
- 0 + (((C) >> 6) & 0x3), \
- 4 + (((C) >> 0) & 0x3), \
- 4 + (((C) >> 2) & 0x3), \
- 4 + (((C) >> 4) & 0x3), \
- 4 + (((C) >> 6) & 0x3)); })
-
-#define _mm512_mask_permutex_epi64(W, U, X, C) __extension__ ({ \
+ (__v8df)_mm512_setzero_pd())
+
+#define _mm512_permutex_epi64(X, C) \
+ (__m512i)__builtin_ia32_permdi512((__v8di)(__m512i)(X), (int)(C))
+
+#define _mm512_mask_permutex_epi64(W, U, X, C) \
(__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
(__v8di)_mm512_permutex_epi64((X), (C)), \
- (__v8di)(__m512i)(W)); })
+ (__v8di)(__m512i)(W))
-#define _mm512_maskz_permutex_epi64(U, X, C) __extension__ ({ \
+#define _mm512_maskz_permutex_epi64(U, X, C) \
(__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
(__v8di)_mm512_permutex_epi64((X), (C)), \
- (__v8di)_mm512_setzero_si512()); })
+ (__v8di)_mm512_setzero_si512())
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_permutexvar_pd (__m512i __X, __m512d __Y)
{
- return (__m512d) __builtin_ia32_permvardf512_mask ((__v8df) __Y,
- (__v8di) __X,
- (__v8df) _mm512_undefined_pd (),
- (__mmask8) -1);
+ return (__m512d)__builtin_ia32_permvardf512((__v8df) __Y, (__v8di) __X);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_mask_permutexvar_pd (__m512d __W, __mmask8 __U, __m512i __X, __m512d __Y)
{
- return (__m512d) __builtin_ia32_permvardf512_mask ((__v8df) __Y,
- (__v8di) __X,
- (__v8df) __W,
- (__mmask8) __U);
+ return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
+ (__v8df)_mm512_permutexvar_pd(__X, __Y),
+ (__v8df)__W);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_maskz_permutexvar_pd (__mmask8 __U, __m512i __X, __m512d __Y)
{
- return (__m512d) __builtin_ia32_permvardf512_mask ((__v8df) __Y,
- (__v8di) __X,
- (__v8df) _mm512_setzero_pd (),
- (__mmask8) __U);
+ return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
+ (__v8df)_mm512_permutexvar_pd(__X, __Y),
+ (__v8df)_mm512_setzero_pd());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_maskz_permutexvar_epi64 (__mmask8 __M, __m512i __X, __m512i __Y)
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
+_mm512_permutexvar_epi64 (__m512i __X, __m512i __Y)
{
- return (__m512i) __builtin_ia32_permvardi512_mask ((__v8di) __Y,
- (__v8di) __X,
- (__v8di) _mm512_setzero_si512 (),
- __M);
+ return (__m512i)__builtin_ia32_permvardi512((__v8di)__Y, (__v8di)__X);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_permutexvar_epi64 (__m512i __X, __m512i __Y)
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
+_mm512_maskz_permutexvar_epi64 (__mmask8 __M, __m512i __X, __m512i __Y)
{
- return (__m512i) __builtin_ia32_permvardi512_mask ((__v8di) __Y,
- (__v8di) __X,
- (__v8di) _mm512_undefined_epi32 (),
- (__mmask8) -1);
+ return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
+ (__v8di)_mm512_permutexvar_epi64(__X, __Y),
+ (__v8di)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_permutexvar_epi64 (__m512i __W, __mmask8 __M, __m512i __X,
__m512i __Y)
{
- return (__m512i) __builtin_ia32_permvardi512_mask ((__v8di) __Y,
- (__v8di) __X,
- (__v8di) __W,
- __M);
+ return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
+ (__v8di)_mm512_permutexvar_epi64(__X, __Y),
+ (__v8di)__W);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_permutexvar_ps (__m512i __X, __m512 __Y)
{
- return (__m512) __builtin_ia32_permvarsf512_mask ((__v16sf) __Y,
- (__v16si) __X,
- (__v16sf) _mm512_undefined_ps (),
- (__mmask16) -1);
+ return (__m512)__builtin_ia32_permvarsf512((__v16sf)__Y, (__v16si)__X);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_mask_permutexvar_ps (__m512 __W, __mmask16 __U, __m512i __X, __m512 __Y)
{
- return (__m512) __builtin_ia32_permvarsf512_mask ((__v16sf) __Y,
- (__v16si) __X,
- (__v16sf) __W,
- (__mmask16) __U);
+ return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
+ (__v16sf)_mm512_permutexvar_ps(__X, __Y),
+ (__v16sf)__W);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_maskz_permutexvar_ps (__mmask16 __U, __m512i __X, __m512 __Y)
{
- return (__m512) __builtin_ia32_permvarsf512_mask ((__v16sf) __Y,
- (__v16si) __X,
- (__v16sf) _mm512_setzero_ps (),
- (__mmask16) __U);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_maskz_permutexvar_epi32 (__mmask16 __M, __m512i __X, __m512i __Y)
-{
- return (__m512i) __builtin_ia32_permvarsi512_mask ((__v16si) __Y,
- (__v16si) __X,
- (__v16si) _mm512_setzero_si512 (),
- __M);
+ return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
+ (__v16sf)_mm512_permutexvar_ps(__X, __Y),
+ (__v16sf)_mm512_setzero_ps());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_permutexvar_epi32 (__m512i __X, __m512i __Y)
{
- return (__m512i) __builtin_ia32_permvarsi512_mask ((__v16si) __Y,
- (__v16si) __X,
- (__v16si) _mm512_undefined_epi32 (),
- (__mmask16) -1);
+ return (__m512i)__builtin_ia32_permvarsi512((__v16si)__Y, (__v16si)__X);
}
#define _mm512_permutevar_epi32 _mm512_permutexvar_epi32
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
+_mm512_maskz_permutexvar_epi32 (__mmask16 __M, __m512i __X, __m512i __Y)
+{
+ return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
+ (__v16si)_mm512_permutexvar_epi32(__X, __Y),
+ (__v16si)_mm512_setzero_si512());
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_permutexvar_epi32 (__m512i __W, __mmask16 __M, __m512i __X,
__m512i __Y)
{
- return (__m512i) __builtin_ia32_permvarsi512_mask ((__v16si) __Y,
- (__v16si) __X,
- (__v16si) __W,
- __M);
+ return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
+ (__v16si)_mm512_permutexvar_epi32(__X, __Y),
+ (__v16si)__W);
}
#define _mm512_mask_permutevar_epi32 _mm512_mask_permutexvar_epi32
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS512
_mm512_kand (__mmask16 __A, __mmask16 __B)
{
return (__mmask16) __builtin_ia32_kandhi ((__mmask16) __A, (__mmask16) __B);
}
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS512
_mm512_kandn (__mmask16 __A, __mmask16 __B)
{
return (__mmask16) __builtin_ia32_kandnhi ((__mmask16) __A, (__mmask16) __B);
}
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS512
_mm512_kor (__mmask16 __A, __mmask16 __B)
{
return (__mmask16) __builtin_ia32_korhi ((__mmask16) __A, (__mmask16) __B);
}
-static __inline__ int __DEFAULT_FN_ATTRS
+static __inline__ int __DEFAULT_FN_ATTRS512
_mm512_kortestc (__mmask16 __A, __mmask16 __B)
{
return __builtin_ia32_kortestchi ((__mmask16) __A, (__mmask16) __B);
}
-static __inline__ int __DEFAULT_FN_ATTRS
+static __inline__ int __DEFAULT_FN_ATTRS512
_mm512_kortestz (__mmask16 __A, __mmask16 __B)
{
return __builtin_ia32_kortestzhi ((__mmask16) __A, (__mmask16) __B);
}
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS512
_mm512_kunpackb (__mmask16 __A, __mmask16 __B)
{
- return (__mmask16) (( __A & 0xFF) | ( __B << 8));
+ return (__mmask16) __builtin_ia32_kunpckhi ((__mmask16) __A, (__mmask16) __B);
}
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS512
_mm512_kxnor (__mmask16 __A, __mmask16 __B)
{
return (__mmask16) __builtin_ia32_kxnorhi ((__mmask16) __A, (__mmask16) __B);
}
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS512
_mm512_kxor (__mmask16 __A, __mmask16 __B)
{
return (__mmask16) __builtin_ia32_kxorhi ((__mmask16) __A, (__mmask16) __B);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS512
_mm512_stream_si512 (__m512i * __P, __m512i __A)
{
typedef __v8di __v8di_aligned __attribute__((aligned(64)));
__builtin_nontemporal_store((__v8di_aligned)__A, (__v8di_aligned*)__P);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_stream_load_si512 (void const *__P)
{
typedef __v8di __v8di_aligned __attribute__((aligned(64)));
return (__m512i) __builtin_nontemporal_load((const __v8di_aligned *)__P);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS512
_mm512_stream_pd (double *__P, __m512d __A)
{
typedef __v8df __v8df_aligned __attribute__((aligned(64)));
__builtin_nontemporal_store((__v8df_aligned)__A, (__v8df_aligned*)__P);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS512
_mm512_stream_ps (float *__P, __m512 __A)
{
typedef __v16sf __v16sf_aligned __attribute__((aligned(64)));
__builtin_nontemporal_store((__v16sf_aligned)__A, (__v16sf_aligned*)__P);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_mask_compress_pd (__m512d __W, __mmask8 __U, __m512d __A)
{
return (__m512d) __builtin_ia32_compressdf512_mask ((__v8df) __A,
@@ -8838,7 +8404,7 @@ _mm512_mask_compress_pd (__m512d __W, __mmask8 __U, __m512d __A)
(__mmask8) __U);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_maskz_compress_pd (__mmask8 __U, __m512d __A)
{
return (__m512d) __builtin_ia32_compressdf512_mask ((__v8df) __A,
@@ -8847,7 +8413,7 @@ _mm512_maskz_compress_pd (__mmask8 __U, __m512d __A)
(__mmask8) __U);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_compress_epi64 (__m512i __W, __mmask8 __U, __m512i __A)
{
return (__m512i) __builtin_ia32_compressdi512_mask ((__v8di) __A,
@@ -8855,7 +8421,7 @@ _mm512_mask_compress_epi64 (__m512i __W, __mmask8 __U, __m512i __A)
(__mmask8) __U);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_compress_epi64 (__mmask8 __U, __m512i __A)
{
return (__m512i) __builtin_ia32_compressdi512_mask ((__v8di) __A,
@@ -8864,7 +8430,7 @@ _mm512_maskz_compress_epi64 (__mmask8 __U, __m512i __A)
(__mmask8) __U);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_mask_compress_ps (__m512 __W, __mmask16 __U, __m512 __A)
{
return (__m512) __builtin_ia32_compresssf512_mask ((__v16sf) __A,
@@ -8872,7 +8438,7 @@ _mm512_mask_compress_ps (__m512 __W, __mmask16 __U, __m512 __A)
(__mmask16) __U);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_maskz_compress_ps (__mmask16 __U, __m512 __A)
{
return (__m512) __builtin_ia32_compresssf512_mask ((__v16sf) __A,
@@ -8881,7 +8447,7 @@ _mm512_maskz_compress_ps (__mmask16 __U, __m512 __A)
(__mmask16) __U);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_compress_epi32 (__m512i __W, __mmask16 __U, __m512i __A)
{
return (__m512i) __builtin_ia32_compresssi512_mask ((__v16si) __A,
@@ -8889,7 +8455,7 @@ _mm512_mask_compress_epi32 (__m512i __W, __mmask16 __U, __m512i __A)
(__mmask16) __U);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_compress_epi32 (__mmask16 __U, __m512i __A)
{
return (__m512i) __builtin_ia32_compresssi512_mask ((__v16si) __A,
@@ -8898,116 +8464,116 @@ _mm512_maskz_compress_epi32 (__mmask16 __U, __m512i __A)
(__mmask16) __U);
}
-#define _mm_cmp_round_ss_mask(X, Y, P, R) __extension__ ({ \
+#define _mm_cmp_round_ss_mask(X, Y, P, R) \
(__mmask8)__builtin_ia32_cmpss_mask((__v4sf)(__m128)(X), \
(__v4sf)(__m128)(Y), (int)(P), \
- (__mmask8)-1, (int)(R)); })
+ (__mmask8)-1, (int)(R))
-#define _mm_mask_cmp_round_ss_mask(M, X, Y, P, R) __extension__ ({ \
+#define _mm_mask_cmp_round_ss_mask(M, X, Y, P, R) \
(__mmask8)__builtin_ia32_cmpss_mask((__v4sf)(__m128)(X), \
(__v4sf)(__m128)(Y), (int)(P), \
- (__mmask8)(M), (int)(R)); })
+ (__mmask8)(M), (int)(R))
-#define _mm_cmp_ss_mask(X, Y, P) __extension__ ({ \
+#define _mm_cmp_ss_mask(X, Y, P) \
(__mmask8)__builtin_ia32_cmpss_mask((__v4sf)(__m128)(X), \
(__v4sf)(__m128)(Y), (int)(P), \
(__mmask8)-1, \
- _MM_FROUND_CUR_DIRECTION); })
+ _MM_FROUND_CUR_DIRECTION)
-#define _mm_mask_cmp_ss_mask(M, X, Y, P) __extension__ ({ \
+#define _mm_mask_cmp_ss_mask(M, X, Y, P) \
(__mmask8)__builtin_ia32_cmpss_mask((__v4sf)(__m128)(X), \
(__v4sf)(__m128)(Y), (int)(P), \
(__mmask8)(M), \
- _MM_FROUND_CUR_DIRECTION); })
+ _MM_FROUND_CUR_DIRECTION)
-#define _mm_cmp_round_sd_mask(X, Y, P, R) __extension__ ({ \
+#define _mm_cmp_round_sd_mask(X, Y, P, R) \
(__mmask8)__builtin_ia32_cmpsd_mask((__v2df)(__m128d)(X), \
(__v2df)(__m128d)(Y), (int)(P), \
- (__mmask8)-1, (int)(R)); })
+ (__mmask8)-1, (int)(R))
-#define _mm_mask_cmp_round_sd_mask(M, X, Y, P, R) __extension__ ({ \
+#define _mm_mask_cmp_round_sd_mask(M, X, Y, P, R) \
(__mmask8)__builtin_ia32_cmpsd_mask((__v2df)(__m128d)(X), \
(__v2df)(__m128d)(Y), (int)(P), \
- (__mmask8)(M), (int)(R)); })
+ (__mmask8)(M), (int)(R))
-#define _mm_cmp_sd_mask(X, Y, P) __extension__ ({ \
+#define _mm_cmp_sd_mask(X, Y, P) \
(__mmask8)__builtin_ia32_cmpsd_mask((__v2df)(__m128d)(X), \
(__v2df)(__m128d)(Y), (int)(P), \
(__mmask8)-1, \
- _MM_FROUND_CUR_DIRECTION); })
+ _MM_FROUND_CUR_DIRECTION)
-#define _mm_mask_cmp_sd_mask(M, X, Y, P) __extension__ ({ \
+#define _mm_mask_cmp_sd_mask(M, X, Y, P) \
(__mmask8)__builtin_ia32_cmpsd_mask((__v2df)(__m128d)(X), \
(__v2df)(__m128d)(Y), (int)(P), \
(__mmask8)(M), \
- _MM_FROUND_CUR_DIRECTION); })
+ _MM_FROUND_CUR_DIRECTION)
/* Bit Test */
-static __inline __mmask16 __DEFAULT_FN_ATTRS
+static __inline __mmask16 __DEFAULT_FN_ATTRS512
_mm512_test_epi32_mask (__m512i __A, __m512i __B)
{
return _mm512_cmpneq_epi32_mask (_mm512_and_epi32(__A, __B),
- _mm512_setzero_epi32());
+ _mm512_setzero_si512());
}
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS512
_mm512_mask_test_epi32_mask (__mmask16 __U, __m512i __A, __m512i __B)
{
return _mm512_mask_cmpneq_epi32_mask (__U, _mm512_and_epi32 (__A, __B),
- _mm512_setzero_epi32());
+ _mm512_setzero_si512());
}
-static __inline __mmask8 __DEFAULT_FN_ATTRS
+static __inline __mmask8 __DEFAULT_FN_ATTRS512
_mm512_test_epi64_mask (__m512i __A, __m512i __B)
{
return _mm512_cmpneq_epi64_mask (_mm512_and_epi32 (__A, __B),
- _mm512_setzero_epi32());
+ _mm512_setzero_si512());
}
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS512
_mm512_mask_test_epi64_mask (__mmask8 __U, __m512i __A, __m512i __B)
{
return _mm512_mask_cmpneq_epi64_mask (__U, _mm512_and_epi32 (__A, __B),
- _mm512_setzero_epi32());
+ _mm512_setzero_si512());
}
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS512
_mm512_testn_epi32_mask (__m512i __A, __m512i __B)
{
return _mm512_cmpeq_epi32_mask (_mm512_and_epi32 (__A, __B),
- _mm512_setzero_epi32());
+ _mm512_setzero_si512());
}
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS512
_mm512_mask_testn_epi32_mask (__mmask16 __U, __m512i __A, __m512i __B)
{
return _mm512_mask_cmpeq_epi32_mask (__U, _mm512_and_epi32 (__A, __B),
- _mm512_setzero_epi32());
+ _mm512_setzero_si512());
}
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS512
_mm512_testn_epi64_mask (__m512i __A, __m512i __B)
{
return _mm512_cmpeq_epi64_mask (_mm512_and_epi32 (__A, __B),
- _mm512_setzero_epi32());
+ _mm512_setzero_si512());
}
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS512
_mm512_mask_testn_epi64_mask (__mmask8 __U, __m512i __A, __m512i __B)
{
return _mm512_mask_cmpeq_epi64_mask (__U, _mm512_and_epi32 (__A, __B),
- _mm512_setzero_epi32());
+ _mm512_setzero_si512());
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_movehdup_ps (__m512 __A)
{
return (__m512)__builtin_shufflevector((__v16sf)__A, (__v16sf)__A,
1, 1, 3, 3, 5, 5, 7, 7, 9, 9, 11, 11, 13, 13, 15, 15);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_mask_movehdup_ps (__m512 __W, __mmask16 __U, __m512 __A)
{
return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
@@ -9015,7 +8581,7 @@ _mm512_mask_movehdup_ps (__m512 __W, __mmask16 __U, __m512 __A)
(__v16sf)__W);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_maskz_movehdup_ps (__mmask16 __U, __m512 __A)
{
return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
@@ -9023,14 +8589,14 @@ _mm512_maskz_movehdup_ps (__mmask16 __U, __m512 __A)
(__v16sf)_mm512_setzero_ps());
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_moveldup_ps (__m512 __A)
{
return (__m512)__builtin_shufflevector((__v16sf)__A, (__v16sf)__A,
0, 0, 2, 2, 4, 4, 6, 6, 8, 8, 10, 10, 12, 12, 14, 14);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_mask_moveldup_ps (__m512 __W, __mmask16 __U, __m512 __A)
{
return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
@@ -9038,7 +8604,7 @@ _mm512_mask_moveldup_ps (__m512 __W, __mmask16 __U, __m512 __A)
(__v16sf)__W);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_maskz_moveldup_ps (__mmask16 __U, __m512 __A)
{
return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
@@ -9046,132 +8612,94 @@ _mm512_maskz_moveldup_ps (__mmask16 __U, __m512 __A)
(__v16sf)_mm512_setzero_ps());
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_move_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
{
- __m128 res = __A;
- res[0] = (__U & 1) ? __B[0] : __W[0];
- return res;
+ return __builtin_ia32_selectss_128(__U, _mm_move_ss(__A, __B), __W);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_maskz_move_ss (__mmask8 __U, __m128 __A, __m128 __B)
{
- __m128 res = __A;
- res[0] = (__U & 1) ? __B[0] : 0;
- return res;
+ return __builtin_ia32_selectss_128(__U, _mm_move_ss(__A, __B),
+ _mm_setzero_ps());
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_move_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
{
- __m128d res = __A;
- res[0] = (__U & 1) ? __B[0] : __W[0];
- return res;
+ return __builtin_ia32_selectsd_128(__U, _mm_move_sd(__A, __B), __W);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_maskz_move_sd (__mmask8 __U, __m128d __A, __m128d __B)
{
- __m128d res = __A;
- res[0] = (__U & 1) ? __B[0] : 0;
- return res;
+ return __builtin_ia32_selectsd_128(__U, _mm_move_sd(__A, __B),
+ _mm_setzero_pd());
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS128
_mm_mask_store_ss (float * __W, __mmask8 __U, __m128 __A)
{
- __builtin_ia32_storess128_mask ((__v16sf *)__W,
- (__v16sf) _mm512_castps128_ps512(__A),
- (__mmask16) __U & (__mmask16)1);
+ __builtin_ia32_storess128_mask ((__v4sf *)__W, __A, __U & 1);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS128
_mm_mask_store_sd (double * __W, __mmask8 __U, __m128d __A)
{
- __builtin_ia32_storesd128_mask ((__v8df *)__W,
- (__v8df) _mm512_castpd128_pd512(__A),
- (__mmask8) __U & 1);
+ __builtin_ia32_storesd128_mask ((__v2df *)__W, __A, __U & 1);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_load_ss (__m128 __W, __mmask8 __U, const float* __A)
{
__m128 src = (__v4sf) __builtin_shufflevector((__v4sf) __W,
- (__v4sf) {0.0, 0.0, 0.0, 0.0},
+ (__v4sf)_mm_setzero_ps(),
0, 4, 4, 4);
- return (__m128) __builtin_shufflevector(
- __builtin_ia32_loadss128_mask ((__v16sf *) __A,
- (__v16sf) _mm512_castps128_ps512(src),
- (__mmask16) __U & 1),
- _mm512_undefined_ps(), 0, 1, 2, 3);
+ return (__m128) __builtin_ia32_loadss128_mask ((__v4sf *) __A, src, __U & 1);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_maskz_load_ss (__mmask8 __U, const float* __A)
{
- return (__m128) __builtin_shufflevector(
- __builtin_ia32_loadss128_mask ((__v16sf *) __A,
- (__v16sf) _mm512_setzero_ps(),
- (__mmask16) __U & 1),
- _mm512_undefined_ps(), 0, 1, 2, 3);
+ return (__m128)__builtin_ia32_loadss128_mask ((__v4sf *) __A,
+ (__v4sf) _mm_setzero_ps(),
+ __U & 1);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_load_sd (__m128d __W, __mmask8 __U, const double* __A)
{
__m128d src = (__v2df) __builtin_shufflevector((__v2df) __W,
- (__v2df) {0.0, 0.0}, 0, 2);
+ (__v2df)_mm_setzero_pd(),
+ 0, 2);
- return (__m128d) __builtin_shufflevector(
- __builtin_ia32_loadsd128_mask ((__v8df *) __A,
- (__v8df) _mm512_castpd128_pd512(src),
- (__mmask8) __U & 1),
- _mm512_undefined_pd(), 0, 1);
+ return (__m128d) __builtin_ia32_loadsd128_mask ((__v2df *) __A, src, __U & 1);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_maskz_load_sd (__mmask8 __U, const double* __A)
{
- return (__m128d) __builtin_shufflevector(
- __builtin_ia32_loadsd128_mask ((__v8df *) __A,
- (__v8df) _mm512_setzero_pd(),
- (__mmask8) __U & 1),
- _mm512_undefined_pd(), 0, 1);
-}
-
-#define _mm512_shuffle_epi32(A, I) __extension__ ({ \
- (__m512i)__builtin_shufflevector((__v16si)(__m512i)(A), \
- (__v16si)_mm512_undefined_epi32(), \
- 0 + (((I) >> 0) & 0x3), \
- 0 + (((I) >> 2) & 0x3), \
- 0 + (((I) >> 4) & 0x3), \
- 0 + (((I) >> 6) & 0x3), \
- 4 + (((I) >> 0) & 0x3), \
- 4 + (((I) >> 2) & 0x3), \
- 4 + (((I) >> 4) & 0x3), \
- 4 + (((I) >> 6) & 0x3), \
- 8 + (((I) >> 0) & 0x3), \
- 8 + (((I) >> 2) & 0x3), \
- 8 + (((I) >> 4) & 0x3), \
- 8 + (((I) >> 6) & 0x3), \
- 12 + (((I) >> 0) & 0x3), \
- 12 + (((I) >> 2) & 0x3), \
- 12 + (((I) >> 4) & 0x3), \
- 12 + (((I) >> 6) & 0x3)); })
-
-#define _mm512_mask_shuffle_epi32(W, U, A, I) __extension__ ({ \
+ return (__m128d) __builtin_ia32_loadsd128_mask ((__v2df *) __A,
+ (__v2df) _mm_setzero_pd(),
+ __U & 1);
+}
+
+#define _mm512_shuffle_epi32(A, I) \
+ (__m512i)__builtin_ia32_pshufd512((__v16si)(__m512i)(A), (int)(I))
+
+#define _mm512_mask_shuffle_epi32(W, U, A, I) \
(__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
(__v16si)_mm512_shuffle_epi32((A), (I)), \
- (__v16si)(__m512i)(W)); })
+ (__v16si)(__m512i)(W))
-#define _mm512_maskz_shuffle_epi32(U, A, I) __extension__ ({ \
+#define _mm512_maskz_shuffle_epi32(U, A, I) \
(__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
(__v16si)_mm512_shuffle_epi32((A), (I)), \
- (__v16si)_mm512_setzero_si512()); })
+ (__v16si)_mm512_setzero_si512())
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_mask_expand_pd (__m512d __W, __mmask8 __U, __m512d __A)
{
return (__m512d) __builtin_ia32_expanddf512_mask ((__v8df) __A,
@@ -9179,7 +8707,7 @@ _mm512_mask_expand_pd (__m512d __W, __mmask8 __U, __m512d __A)
(__mmask8) __U);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_maskz_expand_pd (__mmask8 __U, __m512d __A)
{
return (__m512d) __builtin_ia32_expanddf512_mask ((__v8df) __A,
@@ -9187,7 +8715,7 @@ _mm512_maskz_expand_pd (__mmask8 __U, __m512d __A)
(__mmask8) __U);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_expand_epi64 (__m512i __W, __mmask8 __U, __m512i __A)
{
return (__m512i) __builtin_ia32_expanddi512_mask ((__v8di) __A,
@@ -9195,15 +8723,15 @@ _mm512_mask_expand_epi64 (__m512i __W, __mmask8 __U, __m512i __A)
(__mmask8) __U);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_expand_epi64 ( __mmask8 __U, __m512i __A)
{
return (__m512i) __builtin_ia32_expanddi512_mask ((__v8di) __A,
- (__v8di) _mm512_setzero_pd (),
+ (__v8di) _mm512_setzero_si512 (),
(__mmask8) __U);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_mask_expandloadu_pd(__m512d __W, __mmask8 __U, void const *__P)
{
return (__m512d) __builtin_ia32_expandloaddf512_mask ((const __v8df *)__P,
@@ -9211,7 +8739,7 @@ _mm512_mask_expandloadu_pd(__m512d __W, __mmask8 __U, void const *__P)
(__mmask8) __U);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_maskz_expandloadu_pd(__mmask8 __U, void const *__P)
{
return (__m512d) __builtin_ia32_expandloaddf512_mask ((const __v8df *)__P,
@@ -9219,7 +8747,7 @@ _mm512_maskz_expandloadu_pd(__mmask8 __U, void const *__P)
(__mmask8) __U);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_expandloadu_epi64(__m512i __W, __mmask8 __U, void const *__P)
{
return (__m512i) __builtin_ia32_expandloaddi512_mask ((const __v8di *)__P,
@@ -9227,15 +8755,15 @@ _mm512_mask_expandloadu_epi64(__m512i __W, __mmask8 __U, void const *__P)
(__mmask8) __U);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_expandloadu_epi64(__mmask8 __U, void const *__P)
{
return (__m512i) __builtin_ia32_expandloaddi512_mask ((const __v8di *)__P,
- (__v8di) _mm512_setzero_pd(),
+ (__v8di) _mm512_setzero_si512(),
(__mmask8) __U);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_mask_expandloadu_ps(__m512 __W, __mmask16 __U, void const *__P)
{
return (__m512) __builtin_ia32_expandloadsf512_mask ((const __v16sf *)__P,
@@ -9243,7 +8771,7 @@ _mm512_mask_expandloadu_ps(__m512 __W, __mmask16 __U, void const *__P)
(__mmask16) __U);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_maskz_expandloadu_ps(__mmask16 __U, void const *__P)
{
return (__m512) __builtin_ia32_expandloadsf512_mask ((const __v16sf *)__P,
@@ -9251,7 +8779,7 @@ _mm512_maskz_expandloadu_ps(__mmask16 __U, void const *__P)
(__mmask16) __U);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_expandloadu_epi32(__m512i __W, __mmask16 __U, void const *__P)
{
return (__m512i) __builtin_ia32_expandloadsi512_mask ((const __v16si *)__P,
@@ -9259,15 +8787,15 @@ _mm512_mask_expandloadu_epi32(__m512i __W, __mmask16 __U, void const *__P)
(__mmask16) __U);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_expandloadu_epi32(__mmask16 __U, void const *__P)
{
return (__m512i) __builtin_ia32_expandloadsi512_mask ((const __v16si *)__P,
- (__v16si) _mm512_setzero_ps(),
+ (__v16si) _mm512_setzero_si512(),
(__mmask16) __U);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_mask_expand_ps (__m512 __W, __mmask16 __U, __m512 __A)
{
return (__m512) __builtin_ia32_expandsf512_mask ((__v16sf) __A,
@@ -9275,7 +8803,7 @@ _mm512_mask_expand_ps (__m512 __W, __mmask16 __U, __m512 __A)
(__mmask16) __U);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_maskz_expand_ps (__mmask16 __U, __m512 __A)
{
return (__m512) __builtin_ia32_expandsf512_mask ((__v16sf) __A,
@@ -9283,7 +8811,7 @@ _mm512_maskz_expand_ps (__mmask16 __U, __m512 __A)
(__mmask16) __U);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_expand_epi32 (__m512i __W, __mmask16 __U, __m512i __A)
{
return (__m512i) __builtin_ia32_expandsi512_mask ((__v16si) __A,
@@ -9291,71 +8819,64 @@ _mm512_mask_expand_epi32 (__m512i __W, __mmask16 __U, __m512i __A)
(__mmask16) __U);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_expand_epi32 (__mmask16 __U, __m512i __A)
{
return (__m512i) __builtin_ia32_expandsi512_mask ((__v16si) __A,
- (__v16si) _mm512_setzero_ps(),
+ (__v16si) _mm512_setzero_si512(),
(__mmask16) __U);
}
-#define _mm512_cvt_roundps_pd(A, R) __extension__ ({ \
+#define _mm512_cvt_roundps_pd(A, R) \
(__m512d)__builtin_ia32_cvtps2pd512_mask((__v8sf)(__m256)(A), \
(__v8df)_mm512_undefined_pd(), \
- (__mmask8)-1, (int)(R)); })
+ (__mmask8)-1, (int)(R))
-#define _mm512_mask_cvt_roundps_pd(W, U, A, R) __extension__ ({ \
+#define _mm512_mask_cvt_roundps_pd(W, U, A, R) \
(__m512d)__builtin_ia32_cvtps2pd512_mask((__v8sf)(__m256)(A), \
(__v8df)(__m512d)(W), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-#define _mm512_maskz_cvt_roundps_pd(U, A, R) __extension__ ({ \
+#define _mm512_maskz_cvt_roundps_pd(U, A, R) \
(__m512d)__builtin_ia32_cvtps2pd512_mask((__v8sf)(__m256)(A), \
(__v8df)_mm512_setzero_pd(), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_cvtps_pd (__m256 __A)
{
- return (__m512d) __builtin_ia32_cvtps2pd512_mask ((__v8sf) __A,
- (__v8df)
- _mm512_undefined_pd (),
- (__mmask8) -1,
- _MM_FROUND_CUR_DIRECTION);
+ return (__m512d) __builtin_convertvector((__v8sf)__A, __v8df);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_mask_cvtps_pd (__m512d __W, __mmask8 __U, __m256 __A)
{
- return (__m512d) __builtin_ia32_cvtps2pd512_mask ((__v8sf) __A,
- (__v8df) __W,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
+ return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
+ (__v8df)_mm512_cvtps_pd(__A),
+ (__v8df)__W);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_maskz_cvtps_pd (__mmask8 __U, __m256 __A)
{
- return (__m512d) __builtin_ia32_cvtps2pd512_mask ((__v8sf) __A,
- (__v8df)
- _mm512_setzero_pd (),
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
+ return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
+ (__v8df)_mm512_cvtps_pd(__A),
+ (__v8df)_mm512_setzero_pd());
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_cvtpslo_pd (__m512 __A)
{
- return (__m512) _mm512_cvtps_pd(_mm512_castps512_ps256(__A));
+ return (__m512d) _mm512_cvtps_pd(_mm512_castps512_ps256(__A));
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_mask_cvtpslo_pd (__m512d __W, __mmask8 __U, __m512 __A)
{
- return (__m512) _mm512_mask_cvtps_pd(__W, __U, _mm512_castps512_ps256(__A));
+ return (__m512d) _mm512_mask_cvtps_pd(__W, __U, _mm512_castps512_ps256(__A));
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_mask_mov_pd (__m512d __W, __mmask8 __U, __m512d __A)
{
return (__m512d) __builtin_ia32_selectpd_512 ((__mmask8) __U,
@@ -9363,7 +8884,7 @@ _mm512_mask_mov_pd (__m512d __W, __mmask8 __U, __m512d __A)
(__v8df) __W);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_maskz_mov_pd (__mmask8 __U, __m512d __A)
{
return (__m512d) __builtin_ia32_selectpd_512 ((__mmask8) __U,
@@ -9371,7 +8892,7 @@ _mm512_maskz_mov_pd (__mmask8 __U, __m512d __A)
(__v8df) _mm512_setzero_pd ());
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_mask_mov_ps (__m512 __W, __mmask16 __U, __m512 __A)
{
return (__m512) __builtin_ia32_selectps_512 ((__mmask16) __U,
@@ -9379,7 +8900,7 @@ _mm512_mask_mov_ps (__m512 __W, __mmask16 __U, __m512 __A)
(__v16sf) __W);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_maskz_mov_ps (__mmask16 __U, __m512 __A)
{
return (__m512) __builtin_ia32_selectps_512 ((__mmask16) __U,
@@ -9387,68 +8908,68 @@ _mm512_maskz_mov_ps (__mmask16 __U, __m512 __A)
(__v16sf) _mm512_setzero_ps ());
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS512
_mm512_mask_compressstoreu_pd (void *__P, __mmask8 __U, __m512d __A)
{
__builtin_ia32_compressstoredf512_mask ((__v8df *) __P, (__v8df) __A,
(__mmask8) __U);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS512
_mm512_mask_compressstoreu_epi64 (void *__P, __mmask8 __U, __m512i __A)
{
__builtin_ia32_compressstoredi512_mask ((__v8di *) __P, (__v8di) __A,
(__mmask8) __U);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS512
_mm512_mask_compressstoreu_ps (void *__P, __mmask16 __U, __m512 __A)
{
__builtin_ia32_compressstoresf512_mask ((__v16sf *) __P, (__v16sf) __A,
(__mmask16) __U);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS512
_mm512_mask_compressstoreu_epi32 (void *__P, __mmask16 __U, __m512i __A)
{
__builtin_ia32_compressstoresi512_mask ((__v16si *) __P, (__v16si) __A,
(__mmask16) __U);
}
-#define _mm_cvt_roundsd_ss(A, B, R) __extension__ ({ \
+#define _mm_cvt_roundsd_ss(A, B, R) \
(__m128)__builtin_ia32_cvtsd2ss_round_mask((__v4sf)(__m128)(A), \
(__v2df)(__m128d)(B), \
(__v4sf)_mm_undefined_ps(), \
- (__mmask8)-1, (int)(R)); })
+ (__mmask8)-1, (int)(R))
-#define _mm_mask_cvt_roundsd_ss(W, U, A, B, R) __extension__ ({ \
+#define _mm_mask_cvt_roundsd_ss(W, U, A, B, R) \
(__m128)__builtin_ia32_cvtsd2ss_round_mask((__v4sf)(__m128)(A), \
(__v2df)(__m128d)(B), \
(__v4sf)(__m128)(W), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-#define _mm_maskz_cvt_roundsd_ss(U, A, B, R) __extension__ ({ \
+#define _mm_maskz_cvt_roundsd_ss(U, A, B, R) \
(__m128)__builtin_ia32_cvtsd2ss_round_mask((__v4sf)(__m128)(A), \
(__v2df)(__m128d)(B), \
(__v4sf)_mm_setzero_ps(), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_cvtsd_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128d __B)
{
- return __builtin_ia32_cvtsd2ss_round_mask ((__v4sf)(__A),
- (__v2df)(__B),
- (__v4sf)(__W),
- (__mmask8)(__U), _MM_FROUND_CUR_DIRECTION);
+ return __builtin_ia32_cvtsd2ss_round_mask ((__v4sf)__A,
+ (__v2df)__B,
+ (__v4sf)__W,
+ (__mmask8)__U, _MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_maskz_cvtsd_ss (__mmask8 __U, __m128 __A, __m128d __B)
{
- return __builtin_ia32_cvtsd2ss_round_mask ((__v4sf)(__A),
- (__v2df)(__B),
+ return __builtin_ia32_cvtsd2ss_round_mask ((__v4sf)__A,
+ (__v2df)__B,
(__v4sf)_mm_setzero_ps(),
- (__mmask8)(__U), _MM_FROUND_CUR_DIRECTION);
+ (__mmask8)__U, _MM_FROUND_CUR_DIRECTION);
}
#define _mm_cvtss_i32 _mm_cvtss_si32
@@ -9463,111 +8984,112 @@ _mm_maskz_cvtsd_ss (__mmask8 __U, __m128 __A, __m128d __B)
#endif
#ifdef __x86_64__
-#define _mm_cvt_roundi64_sd(A, B, R) __extension__ ({ \
+#define _mm_cvt_roundi64_sd(A, B, R) \
(__m128d)__builtin_ia32_cvtsi2sd64((__v2df)(__m128d)(A), (long long)(B), \
- (int)(R)); })
+ (int)(R))
-#define _mm_cvt_roundsi64_sd(A, B, R) __extension__ ({ \
+#define _mm_cvt_roundsi64_sd(A, B, R) \
(__m128d)__builtin_ia32_cvtsi2sd64((__v2df)(__m128d)(A), (long long)(B), \
- (int)(R)); })
+ (int)(R))
#endif
-#define _mm_cvt_roundsi32_ss(A, B, R) __extension__ ({ \
- (__m128)__builtin_ia32_cvtsi2ss32((__v4sf)(__m128)(A), (int)(B), (int)(R)); })
+#define _mm_cvt_roundsi32_ss(A, B, R) \
+ (__m128)__builtin_ia32_cvtsi2ss32((__v4sf)(__m128)(A), (int)(B), (int)(R))
-#define _mm_cvt_roundi32_ss(A, B, R) __extension__ ({ \
- (__m128)__builtin_ia32_cvtsi2ss32((__v4sf)(__m128)(A), (int)(B), (int)(R)); })
+#define _mm_cvt_roundi32_ss(A, B, R) \
+ (__m128)__builtin_ia32_cvtsi2ss32((__v4sf)(__m128)(A), (int)(B), (int)(R))
#ifdef __x86_64__
-#define _mm_cvt_roundsi64_ss(A, B, R) __extension__ ({ \
+#define _mm_cvt_roundsi64_ss(A, B, R) \
(__m128)__builtin_ia32_cvtsi2ss64((__v4sf)(__m128)(A), (long long)(B), \
- (int)(R)); })
+ (int)(R))
-#define _mm_cvt_roundi64_ss(A, B, R) __extension__ ({ \
+#define _mm_cvt_roundi64_ss(A, B, R) \
(__m128)__builtin_ia32_cvtsi2ss64((__v4sf)(__m128)(A), (long long)(B), \
- (int)(R)); })
+ (int)(R))
#endif
-#define _mm_cvt_roundss_sd(A, B, R) __extension__ ({ \
+#define _mm_cvt_roundss_sd(A, B, R) \
(__m128d)__builtin_ia32_cvtss2sd_round_mask((__v2df)(__m128d)(A), \
(__v4sf)(__m128)(B), \
(__v2df)_mm_undefined_pd(), \
- (__mmask8)-1, (int)(R)); })
+ (__mmask8)-1, (int)(R))
-#define _mm_mask_cvt_roundss_sd(W, U, A, B, R) __extension__ ({ \
+#define _mm_mask_cvt_roundss_sd(W, U, A, B, R) \
(__m128d)__builtin_ia32_cvtss2sd_round_mask((__v2df)(__m128d)(A), \
(__v4sf)(__m128)(B), \
(__v2df)(__m128d)(W), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-#define _mm_maskz_cvt_roundss_sd(U, A, B, R) __extension__ ({ \
+#define _mm_maskz_cvt_roundss_sd(U, A, B, R) \
(__m128d)__builtin_ia32_cvtss2sd_round_mask((__v2df)(__m128d)(A), \
(__v4sf)(__m128)(B), \
(__v2df)_mm_setzero_pd(), \
- (__mmask8)(U), (int)(R)); })
+ (__mmask8)(U), (int)(R))
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_cvtss_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128 __B)
{
- return __builtin_ia32_cvtss2sd_round_mask((__v2df)(__A),
- (__v4sf)(__B),
- (__v2df)(__W),
- (__mmask8)(__U), _MM_FROUND_CUR_DIRECTION);
+ return __builtin_ia32_cvtss2sd_round_mask((__v2df)__A,
+ (__v4sf)__B,
+ (__v2df)__W,
+ (__mmask8)__U, _MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_maskz_cvtss_sd (__mmask8 __U, __m128d __A, __m128 __B)
{
- return __builtin_ia32_cvtss2sd_round_mask((__v2df)(__A),
- (__v4sf)(__B),
- (__v2df)_mm_setzero_pd(),
- (__mmask8)(__U), _MM_FROUND_CUR_DIRECTION);
+ return __builtin_ia32_cvtss2sd_round_mask((__v2df)__A,
+ (__v4sf)__B,
+ (__v2df)_mm_setzero_pd(),
+ (__mmask8)__U, _MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_cvtu32_sd (__m128d __A, unsigned __B)
{
- return (__m128d) __builtin_ia32_cvtusi2sd32 ((__v2df) __A, __B);
+ __A[0] = __B;
+ return __A;
}
#ifdef __x86_64__
-#define _mm_cvt_roundu64_sd(A, B, R) __extension__ ({ \
+#define _mm_cvt_roundu64_sd(A, B, R) \
(__m128d)__builtin_ia32_cvtusi2sd64((__v2df)(__m128d)(A), \
- (unsigned long long)(B), (int)(R)); })
+ (unsigned long long)(B), (int)(R))
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_cvtu64_sd (__m128d __A, unsigned long long __B)
{
- return (__m128d) __builtin_ia32_cvtusi2sd64 ((__v2df) __A, __B,
- _MM_FROUND_CUR_DIRECTION);
+ __A[0] = __B;
+ return __A;
}
#endif
-#define _mm_cvt_roundu32_ss(A, B, R) __extension__ ({ \
+#define _mm_cvt_roundu32_ss(A, B, R) \
(__m128)__builtin_ia32_cvtusi2ss32((__v4sf)(__m128)(A), (unsigned int)(B), \
- (int)(R)); })
+ (int)(R))
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_cvtu32_ss (__m128 __A, unsigned __B)
{
- return (__m128) __builtin_ia32_cvtusi2ss32 ((__v4sf) __A, __B,
- _MM_FROUND_CUR_DIRECTION);
+ __A[0] = __B;
+ return __A;
}
#ifdef __x86_64__
-#define _mm_cvt_roundu64_ss(A, B, R) __extension__ ({ \
+#define _mm_cvt_roundu64_ss(A, B, R) \
(__m128)__builtin_ia32_cvtusi2ss64((__v4sf)(__m128)(A), \
- (unsigned long long)(B), (int)(R)); })
+ (unsigned long long)(B), (int)(R))
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_cvtu64_ss (__m128 __A, unsigned long long __B)
{
- return (__m128) __builtin_ia32_cvtusi2ss64 ((__v4sf) __A, __B,
- _MM_FROUND_CUR_DIRECTION);
+ __A[0] = __B;
+ return __A;
}
#endif
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_set1_epi32 (__m512i __O, __mmask16 __M, int __A)
{
return (__m512i) __builtin_ia32_selectd_512(__M,
@@ -9575,17 +9097,15 @@ _mm512_mask_set1_epi32 (__m512i __O, __mmask16 __M, int __A)
(__v16si) __O);
}
-#ifdef __x86_64__
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_set1_epi64 (__m512i __O, __mmask8 __M, long long __A)
{
return (__m512i) __builtin_ia32_selectq_512(__M,
(__v8di) _mm512_set1_epi64(__A),
(__v8di) __O);
}
-#endif
-static __inline __m512i __DEFAULT_FN_ATTRS
+static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_set_epi8 (char __e63, char __e62, char __e61, char __e60, char __e59,
char __e58, char __e57, char __e56, char __e55, char __e54, char __e53,
char __e52, char __e51, char __e50, char __e49, char __e48, char __e47,
@@ -9609,7 +9129,7 @@ _mm512_set_epi8 (char __e63, char __e62, char __e61, char __e60, char __e59,
__e56, __e57, __e58, __e59, __e60, __e61, __e62, __e63};
}
-static __inline __m512i __DEFAULT_FN_ATTRS
+static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_set_epi16(short __e31, short __e30, short __e29, short __e28,
short __e27, short __e26, short __e25, short __e24, short __e23,
short __e22, short __e21, short __e20, short __e19, short __e18,
@@ -9624,7 +9144,7 @@ _mm512_set_epi16(short __e31, short __e30, short __e29, short __e28,
__e24, __e25, __e26, __e27, __e28, __e29, __e30, __e31 };
}
-static __inline __m512i __DEFAULT_FN_ATTRS
+static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_set_epi32 (int __A, int __B, int __C, int __D,
int __E, int __F, int __G, int __H,
int __I, int __J, int __K, int __L,
@@ -9640,7 +9160,7 @@ _mm512_set_epi32 (int __A, int __B, int __C, int __D,
_mm512_set_epi32((e15),(e14),(e13),(e12),(e11),(e10),(e9),(e8),(e7),(e6), \
(e5),(e4),(e3),(e2),(e1),(e0))
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_set_epi64 (long long __A, long long __B, long long __C,
long long __D, long long __E, long long __F,
long long __G, long long __H)
@@ -9652,7 +9172,7 @@ _mm512_set_epi64 (long long __A, long long __B, long long __C,
#define _mm512_setr_epi64(e0,e1,e2,e3,e4,e5,e6,e7) \
_mm512_set_epi64((e7),(e6),(e5),(e4),(e3),(e2),(e1),(e0))
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_set_pd (double __A, double __B, double __C, double __D,
double __E, double __F, double __G, double __H)
{
@@ -9663,7 +9183,7 @@ _mm512_set_pd (double __A, double __B, double __C, double __D,
#define _mm512_setr_pd(e0,e1,e2,e3,e4,e5,e6,e7) \
_mm512_set_pd((e7),(e6),(e5),(e4),(e3),(e2),(e1),(e0))
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_set_ps (float __A, float __B, float __C, float __D,
float __E, float __F, float __G, float __H,
float __I, float __J, float __K, float __L,
@@ -9678,556 +9198,401 @@ _mm512_set_ps (float __A, float __B, float __C, float __D,
_mm512_set_ps((e15),(e14),(e13),(e12),(e11),(e10),(e9),(e8),(e7),(e6),(e5), \
(e4),(e3),(e2),(e1),(e0))
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_abs_ps(__m512 __A)
{
return (__m512)_mm512_and_epi32(_mm512_set1_epi32(0x7FFFFFFF),(__m512i)__A) ;
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_mask_abs_ps(__m512 __W, __mmask16 __K, __m512 __A)
{
return (__m512)_mm512_mask_and_epi32((__m512i)__W, __K, _mm512_set1_epi32(0x7FFFFFFF),(__m512i)__A) ;
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_abs_pd(__m512d __A)
{
return (__m512d)_mm512_and_epi64(_mm512_set1_epi64(0x7FFFFFFFFFFFFFFF),(__v8di)__A) ;
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_mask_abs_pd(__m512d __W, __mmask8 __K, __m512d __A)
{
return (__m512d)_mm512_mask_and_epi64((__v8di)__W, __K, _mm512_set1_epi64(0x7FFFFFFFFFFFFFFF),(__v8di)__A);
}
-// Vector-reduction arithmetic accepts vectors as inputs and produces scalars as
-// outputs. This class of vector operation forms the basis of many scientific
-// computations. In vector-reduction arithmetic, the evaluation off is
-// independent of the order of the input elements of V.
-
-// Used bisection method. At each step, we partition the vector with previous
-// step in half, and the operation is performed on its two halves.
-// This takes log2(n) steps where n is the number of elements in the vector.
-
-// Vec512 - Vector with size 512.
-// Operator - Can be one of following: +,*,&,|
-// T2 - Can get 'i' for int and 'f' for float.
-// T1 - Can get 'i' for int and 'd' for double.
-
-#define _mm512_reduce_operator_64bit(Vec512, Operator, T2, T1) \
- __extension__({ \
- __m256##T1 Vec256 = __builtin_shufflevector( \
- (__v8d##T2)Vec512, \
- (__v8d##T2)Vec512, \
- 0, 1, 2, 3) \
- Operator \
- __builtin_shufflevector( \
- (__v8d##T2)Vec512, \
- (__v8d##T2)Vec512, \
- 4, 5, 6, 7); \
- __m128##T1 Vec128 = __builtin_shufflevector( \
- (__v4d##T2)Vec256, \
- (__v4d##T2)Vec256, \
- 0, 1) \
- Operator \
- __builtin_shufflevector( \
- (__v4d##T2)Vec256, \
- (__v4d##T2)Vec256, \
- 2, 3); \
- Vec128 = __builtin_shufflevector((__v2d##T2)Vec128, \
- (__v2d##T2)Vec128, 0, -1) \
- Operator \
- __builtin_shufflevector((__v2d##T2)Vec128, \
- (__v2d##T2)Vec128, 1, -1); \
- return Vec128[0]; \
- })
-
-static __inline__ long long __DEFAULT_FN_ATTRS _mm512_reduce_add_epi64(__m512i __W) {
- _mm512_reduce_operator_64bit(__W, +, i, i);
-}
-
-static __inline__ long long __DEFAULT_FN_ATTRS _mm512_reduce_mul_epi64(__m512i __W) {
- _mm512_reduce_operator_64bit(__W, *, i, i);
-}
-
-static __inline__ long long __DEFAULT_FN_ATTRS _mm512_reduce_and_epi64(__m512i __W) {
- _mm512_reduce_operator_64bit(__W, &, i, i);
-}
-
-static __inline__ long long __DEFAULT_FN_ATTRS _mm512_reduce_or_epi64(__m512i __W) {
- _mm512_reduce_operator_64bit(__W, |, i, i);
-}
-
-static __inline__ double __DEFAULT_FN_ATTRS _mm512_reduce_add_pd(__m512d __W) {
- _mm512_reduce_operator_64bit(__W, +, f, d);
-}
-
-static __inline__ double __DEFAULT_FN_ATTRS _mm512_reduce_mul_pd(__m512d __W) {
- _mm512_reduce_operator_64bit(__W, *, f, d);
-}
-
-// Vec512 - Vector with size 512.
-// Vec512Neutral - All vector elements set to the identity element.
-// Identity element: {+,0},{*,1},{&,0xFFFFFFFFFFFFFFFF},{|,0}
-// Operator - Can be one of following: +,*,&,|
-// Mask - Intrinsic Mask
-// T2 - Can get 'i' for int and 'f' for float.
-// T1 - Can get 'i' for int and 'd' for packed double-precision.
-// T3 - Can be Pd for packed double or q for q-word.
-
-#define _mm512_mask_reduce_operator_64bit(Vec512, Vec512Neutral, Operator, \
- Mask, T2, T1, T3) \
- __extension__({ \
- Vec512 = __builtin_ia32_select##T3##_512( \
- (__mmask8)Mask, \
- (__v8d##T2)Vec512, \
- (__v8d##T2)Vec512Neutral); \
- _mm512_reduce_operator_64bit(Vec512, Operator, T2, T1); \
- })
-
-static __inline__ long long __DEFAULT_FN_ATTRS
+/* Vector-reduction arithmetic accepts vectors as inputs and produces scalars as
+ * outputs. This class of vector operation forms the basis of many scientific
+ * computations. In vector-reduction arithmetic, the evaluation off is
+ * independent of the order of the input elements of V.
+
+ * Used bisection method. At each step, we partition the vector with previous
+ * step in half, and the operation is performed on its two halves.
+ * This takes log2(n) steps where n is the number of elements in the vector.
+ */
+
+#define _mm512_mask_reduce_operator(op) \
+ __v4du __t1 = (__v4du)_mm512_extracti64x4_epi64(__W, 0); \
+ __v4du __t2 = (__v4du)_mm512_extracti64x4_epi64(__W, 1); \
+ __m256i __t3 = (__m256i)(__t1 op __t2); \
+ __v2du __t4 = (__v2du)_mm256_extracti128_si256(__t3, 0); \
+ __v2du __t5 = (__v2du)_mm256_extracti128_si256(__t3, 1); \
+ __v2du __t6 = __t4 op __t5; \
+ __v2du __t7 = __builtin_shufflevector(__t6, __t6, 1, 0); \
+ __v2du __t8 = __t6 op __t7; \
+ return __t8[0];
+
+static __inline__ long long __DEFAULT_FN_ATTRS512 _mm512_reduce_add_epi64(__m512i __W) {
+ _mm512_mask_reduce_operator(+);
+}
+
+static __inline__ long long __DEFAULT_FN_ATTRS512 _mm512_reduce_mul_epi64(__m512i __W) {
+ _mm512_mask_reduce_operator(*);
+}
+
+static __inline__ long long __DEFAULT_FN_ATTRS512 _mm512_reduce_and_epi64(__m512i __W) {
+ _mm512_mask_reduce_operator(&);
+}
+
+static __inline__ long long __DEFAULT_FN_ATTRS512 _mm512_reduce_or_epi64(__m512i __W) {
+ _mm512_mask_reduce_operator(|);
+}
+
+static __inline__ long long __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_add_epi64(__mmask8 __M, __m512i __W) {
- _mm512_mask_reduce_operator_64bit(__W, _mm512_set1_epi64(0), +, __M, i, i, q);
+ __W = _mm512_maskz_mov_epi64(__M, __W);
+ _mm512_mask_reduce_operator(+);
}
-static __inline__ long long __DEFAULT_FN_ATTRS
+static __inline__ long long __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_mul_epi64(__mmask8 __M, __m512i __W) {
- _mm512_mask_reduce_operator_64bit(__W, _mm512_set1_epi64(1), *, __M, i, i, q);
+ __W = _mm512_mask_mov_epi64(_mm512_set1_epi64(1), __M, __W);
+ _mm512_mask_reduce_operator(*);
}
-static __inline__ long long __DEFAULT_FN_ATTRS
+static __inline__ long long __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_and_epi64(__mmask8 __M, __m512i __W) {
- _mm512_mask_reduce_operator_64bit(__W, _mm512_set1_epi64(0xFFFFFFFFFFFFFFFF),
- &, __M, i, i, q);
+ __W = _mm512_mask_mov_epi64(_mm512_set1_epi64(~0ULL), __M, __W);
+ _mm512_mask_reduce_operator(&);
}
-static __inline__ long long __DEFAULT_FN_ATTRS
+static __inline__ long long __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_or_epi64(__mmask8 __M, __m512i __W) {
- _mm512_mask_reduce_operator_64bit(__W, _mm512_set1_epi64(0), |, __M,
- i, i, q);
+ __W = _mm512_maskz_mov_epi64(__M, __W);
+ _mm512_mask_reduce_operator(|);
+}
+#undef _mm512_mask_reduce_operator
+
+#define _mm512_mask_reduce_operator(op) \
+ __m256d __t1 = _mm512_extractf64x4_pd(__W, 0); \
+ __m256d __t2 = _mm512_extractf64x4_pd(__W, 1); \
+ __m256d __t3 = __t1 op __t2; \
+ __m128d __t4 = _mm256_extractf128_pd(__t3, 0); \
+ __m128d __t5 = _mm256_extractf128_pd(__t3, 1); \
+ __m128d __t6 = __t4 op __t5; \
+ __m128d __t7 = __builtin_shufflevector(__t6, __t6, 1, 0); \
+ __m128d __t8 = __t6 op __t7; \
+ return __t8[0];
+
+static __inline__ double __DEFAULT_FN_ATTRS512 _mm512_reduce_add_pd(__m512d __W) {
+ _mm512_mask_reduce_operator(+);
+}
+
+static __inline__ double __DEFAULT_FN_ATTRS512 _mm512_reduce_mul_pd(__m512d __W) {
+ _mm512_mask_reduce_operator(*);
}
-static __inline__ double __DEFAULT_FN_ATTRS
+static __inline__ double __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_add_pd(__mmask8 __M, __m512d __W) {
- _mm512_mask_reduce_operator_64bit(__W, _mm512_set1_pd(0), +, __M,
- f, d, pd);
+ __W = _mm512_maskz_mov_pd(__M, __W);
+ _mm512_mask_reduce_operator(+);
}
-static __inline__ double __DEFAULT_FN_ATTRS
+static __inline__ double __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_mul_pd(__mmask8 __M, __m512d __W) {
- _mm512_mask_reduce_operator_64bit(__W, _mm512_set1_pd(1), *, __M,
- f, d, pd);
-}
-
-// Vec512 - Vector with size 512.
-// Operator - Can be one of following: +,*,&,|
-// T2 - Can get 'i' for int and ' ' for packed single.
-// T1 - Can get 'i' for int and 'f' for float.
-
-#define _mm512_reduce_operator_32bit(Vec512, Operator, T2, T1) __extension__({ \
- __m256##T1 Vec256 = \
- (__m256##T1)(__builtin_shufflevector( \
- (__v16s##T2)Vec512, \
- (__v16s##T2)Vec512, \
- 0, 1, 2, 3, 4, 5, 6, 7) \
- Operator \
- __builtin_shufflevector( \
- (__v16s##T2)Vec512, \
- (__v16s##T2)Vec512, \
- 8, 9, 10, 11, 12, 13, 14, 15)); \
- __m128##T1 Vec128 = \
- (__m128##T1)(__builtin_shufflevector( \
- (__v8s##T2)Vec256, \
- (__v8s##T2)Vec256, \
- 0, 1, 2, 3) \
- Operator \
- __builtin_shufflevector( \
- (__v8s##T2)Vec256, \
- (__v8s##T2)Vec256, \
- 4, 5, 6, 7)); \
- Vec128 = (__m128##T1)(__builtin_shufflevector( \
- (__v4s##T2)Vec128, \
- (__v4s##T2)Vec128, \
- 0, 1, -1, -1) \
- Operator \
- __builtin_shufflevector( \
- (__v4s##T2)Vec128, \
- (__v4s##T2)Vec128, \
- 2, 3, -1, -1)); \
- Vec128 = (__m128##T1)(__builtin_shufflevector( \
- (__v4s##T2)Vec128, \
- (__v4s##T2)Vec128, \
- 0, -1, -1, -1) \
- Operator \
- __builtin_shufflevector( \
- (__v4s##T2)Vec128, \
- (__v4s##T2)Vec128, \
- 1, -1, -1, -1)); \
- return Vec128[0]; \
- })
-
-static __inline__ int __DEFAULT_FN_ATTRS
+ __W = _mm512_mask_mov_pd(_mm512_set1_pd(1.0), __M, __W);
+ _mm512_mask_reduce_operator(*);
+}
+#undef _mm512_mask_reduce_operator
+
+#define _mm512_mask_reduce_operator(op) \
+ __v8su __t1 = (__v8su)_mm512_extracti64x4_epi64(__W, 0); \
+ __v8su __t2 = (__v8su)_mm512_extracti64x4_epi64(__W, 1); \
+ __m256i __t3 = (__m256i)(__t1 op __t2); \
+ __v4su __t4 = (__v4su)_mm256_extracti128_si256(__t3, 0); \
+ __v4su __t5 = (__v4su)_mm256_extracti128_si256(__t3, 1); \
+ __v4su __t6 = __t4 op __t5; \
+ __v4su __t7 = __builtin_shufflevector(__t6, __t6, 2, 3, 0, 1); \
+ __v4su __t8 = __t6 op __t7; \
+ __v4su __t9 = __builtin_shufflevector(__t8, __t8, 1, 0, 3, 2); \
+ __v4su __t10 = __t8 op __t9; \
+ return __t10[0];
+
+static __inline__ int __DEFAULT_FN_ATTRS512
_mm512_reduce_add_epi32(__m512i __W) {
- _mm512_reduce_operator_32bit(__W, +, i, i);
+ _mm512_mask_reduce_operator(+);
}
-static __inline__ int __DEFAULT_FN_ATTRS
+static __inline__ int __DEFAULT_FN_ATTRS512
_mm512_reduce_mul_epi32(__m512i __W) {
- _mm512_reduce_operator_32bit(__W, *, i, i);
+ _mm512_mask_reduce_operator(*);
}
-static __inline__ int __DEFAULT_FN_ATTRS
+static __inline__ int __DEFAULT_FN_ATTRS512
_mm512_reduce_and_epi32(__m512i __W) {
- _mm512_reduce_operator_32bit(__W, &, i, i);
+ _mm512_mask_reduce_operator(&);
}
-static __inline__ int __DEFAULT_FN_ATTRS
+static __inline__ int __DEFAULT_FN_ATTRS512
_mm512_reduce_or_epi32(__m512i __W) {
- _mm512_reduce_operator_32bit(__W, |, i, i);
+ _mm512_mask_reduce_operator(|);
}
-static __inline__ float __DEFAULT_FN_ATTRS
-_mm512_reduce_add_ps(__m512 __W) {
- _mm512_reduce_operator_32bit(__W, +, f, );
-}
-
-static __inline__ float __DEFAULT_FN_ATTRS
-_mm512_reduce_mul_ps(__m512 __W) {
- _mm512_reduce_operator_32bit(__W, *, f, );
-}
-
-// Vec512 - Vector with size 512.
-// Vec512Neutral - All vector elements set to the identity element.
-// Identity element: {+,0},{*,1},{&,0xFFFFFFFF},{|,0}
-// Operator - Can be one of following: +,*,&,|
-// Mask - Intrinsic Mask
-// T2 - Can get 'i' for int and 'f' for float.
-// T1 - Can get 'i' for int and 'd' for double.
-// T3 - Can be Ps for packed single or d for d-word.
-
-#define _mm512_mask_reduce_operator_32bit(Vec512, Vec512Neutral, Operator, \
- Mask, T2, T1, T3) \
- __extension__({ \
- Vec512 = (__m512##T1)__builtin_ia32_select##T3##_512( \
- (__mmask16)Mask, \
- (__v16s##T2)Vec512, \
- (__v16s##T2)Vec512Neutral); \
- _mm512_reduce_operator_32bit(Vec512, Operator, T2, T1); \
- })
-
-static __inline__ int __DEFAULT_FN_ATTRS
+static __inline__ int __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_add_epi32( __mmask16 __M, __m512i __W) {
- _mm512_mask_reduce_operator_32bit(__W, _mm512_set1_epi32(0), +, __M, i, i, d);
+ __W = _mm512_maskz_mov_epi32(__M, __W);
+ _mm512_mask_reduce_operator(+);
}
-static __inline__ int __DEFAULT_FN_ATTRS
+static __inline__ int __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_mul_epi32( __mmask16 __M, __m512i __W) {
- _mm512_mask_reduce_operator_32bit(__W, _mm512_set1_epi32(1), *, __M, i, i, d);
+ __W = _mm512_mask_mov_epi32(_mm512_set1_epi32(1), __M, __W);
+ _mm512_mask_reduce_operator(*);
}
-static __inline__ int __DEFAULT_FN_ATTRS
+static __inline__ int __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_and_epi32( __mmask16 __M, __m512i __W) {
- _mm512_mask_reduce_operator_32bit(__W, _mm512_set1_epi32(0xFFFFFFFF), &, __M,
- i, i, d);
+ __W = _mm512_mask_mov_epi32(_mm512_set1_epi32(~0U), __M, __W);
+ _mm512_mask_reduce_operator(&);
}
-static __inline__ int __DEFAULT_FN_ATTRS
+static __inline__ int __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_or_epi32(__mmask16 __M, __m512i __W) {
- _mm512_mask_reduce_operator_32bit(__W, _mm512_set1_epi32(0), |, __M, i, i, d);
+ __W = _mm512_maskz_mov_epi32(__M, __W);
+ _mm512_mask_reduce_operator(|);
+}
+#undef _mm512_mask_reduce_operator
+
+#define _mm512_mask_reduce_operator(op) \
+ __m256 __t1 = (__m256)_mm512_extractf64x4_pd((__m512d)__W, 0); \
+ __m256 __t2 = (__m256)_mm512_extractf64x4_pd((__m512d)__W, 1); \
+ __m256 __t3 = __t1 op __t2; \
+ __m128 __t4 = _mm256_extractf128_ps(__t3, 0); \
+ __m128 __t5 = _mm256_extractf128_ps(__t3, 1); \
+ __m128 __t6 = __t4 op __t5; \
+ __m128 __t7 = __builtin_shufflevector(__t6, __t6, 2, 3, 0, 1); \
+ __m128 __t8 = __t6 op __t7; \
+ __m128 __t9 = __builtin_shufflevector(__t8, __t8, 1, 0, 3, 2); \
+ __m128 __t10 = __t8 op __t9; \
+ return __t10[0];
+
+static __inline__ float __DEFAULT_FN_ATTRS512
+_mm512_reduce_add_ps(__m512 __W) {
+ _mm512_mask_reduce_operator(+);
+}
+
+static __inline__ float __DEFAULT_FN_ATTRS512
+_mm512_reduce_mul_ps(__m512 __W) {
+ _mm512_mask_reduce_operator(*);
}
-static __inline__ float __DEFAULT_FN_ATTRS
+static __inline__ float __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_add_ps(__mmask16 __M, __m512 __W) {
- _mm512_mask_reduce_operator_32bit(__W, _mm512_set1_ps(0), +, __M, f, , ps);
+ __W = _mm512_maskz_mov_ps(__M, __W);
+ _mm512_mask_reduce_operator(+);
}
-static __inline__ float __DEFAULT_FN_ATTRS
+static __inline__ float __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_mul_ps(__mmask16 __M, __m512 __W) {
- _mm512_mask_reduce_operator_32bit(__W, _mm512_set1_ps(1), *, __M, f, , ps);
-}
-
-// Used bisection method. At each step, we partition the vector with previous
-// step in half, and the operation is performed on its two halves.
-// This takes log2(n) steps where n is the number of elements in the vector.
-// This macro uses only intrinsics from the AVX512F feature.
-
-// Vec512 - Vector with size of 512.
-// IntrinName - Can be one of following: {max|min}_{epi64|epu64|pd} for example:
-// __mm512_max_epi64
-// T1 - Can get 'i' for int and 'd' for double.[__m512{i|d}]
-// T2 - Can get 'i' for int and 'f' for float. [__v8d{i|f}]
-
-#define _mm512_reduce_maxMin_64bit(Vec512, IntrinName, T1, T2) __extension__({ \
- Vec512 = _mm512_##IntrinName( \
- (__m512##T1)__builtin_shufflevector( \
- (__v8d##T2)Vec512, \
- (__v8d##T2)Vec512, \
- 0, 1, 2, 3, -1, -1, -1, -1), \
- (__m512##T1)__builtin_shufflevector( \
- (__v8d##T2)Vec512, \
- (__v8d##T2)Vec512, \
- 4, 5, 6, 7, -1, -1, -1, -1)); \
- Vec512 = _mm512_##IntrinName( \
- (__m512##T1)__builtin_shufflevector( \
- (__v8d##T2)Vec512, \
- (__v8d##T2)Vec512, \
- 0, 1, -1, -1, -1, -1, -1, -1),\
- (__m512##T1)__builtin_shufflevector( \
- (__v8d##T2)Vec512, \
- (__v8d##T2)Vec512, \
- 2, 3, -1, -1, -1, -1, -1, \
- -1)); \
- Vec512 = _mm512_##IntrinName( \
- (__m512##T1)__builtin_shufflevector( \
- (__v8d##T2)Vec512, \
- (__v8d##T2)Vec512, \
- 0, -1, -1, -1, -1, -1, -1, -1),\
- (__m512##T1)__builtin_shufflevector( \
- (__v8d##T2)Vec512, \
- (__v8d##T2)Vec512, \
- 1, -1, -1, -1, -1, -1, -1, -1))\
- ; \
- return Vec512[0]; \
- })
-
-static __inline__ long long __DEFAULT_FN_ATTRS
-_mm512_reduce_max_epi64(__m512i __V) {
- _mm512_reduce_maxMin_64bit(__V, max_epi64, i, i);
+ __W = _mm512_mask_mov_ps(_mm512_set1_ps(1.0f), __M, __W);
+ _mm512_mask_reduce_operator(*);
}
+#undef _mm512_mask_reduce_operator
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS
-_mm512_reduce_max_epu64(__m512i __V) {
- _mm512_reduce_maxMin_64bit(__V, max_epu64, i, i);
+#define _mm512_mask_reduce_operator(op) \
+ __m512i __t1 = (__m512i)__builtin_shufflevector((__v8di)__V, (__v8di)__V, 4, 5, 6, 7, 0, 1, 2, 3); \
+ __m512i __t2 = _mm512_##op(__V, __t1); \
+ __m512i __t3 = (__m512i)__builtin_shufflevector((__v8di)__t2, (__v8di)__t2, 2, 3, 0, 1, 6, 7, 4, 5); \
+ __m512i __t4 = _mm512_##op(__t2, __t3); \
+ __m512i __t5 = (__m512i)__builtin_shufflevector((__v8di)__t4, (__v8di)__t4, 1, 0, 3, 2, 5, 4, 7, 6); \
+ __v8di __t6 = (__v8di)_mm512_##op(__t4, __t5); \
+ return __t6[0];
+
+static __inline__ long long __DEFAULT_FN_ATTRS512
+_mm512_reduce_max_epi64(__m512i __V) {
+ _mm512_mask_reduce_operator(max_epi64);
}
-static __inline__ double __DEFAULT_FN_ATTRS
-_mm512_reduce_max_pd(__m512d __V) {
- _mm512_reduce_maxMin_64bit(__V, max_pd, d, f);
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS512
+_mm512_reduce_max_epu64(__m512i __V) {
+ _mm512_mask_reduce_operator(max_epu64);
}
-static __inline__ long long __DEFAULT_FN_ATTRS _mm512_reduce_min_epi64
-(__m512i __V) {
- _mm512_reduce_maxMin_64bit(__V, min_epi64, i, i);
+static __inline__ long long __DEFAULT_FN_ATTRS512
+_mm512_reduce_min_epi64(__m512i __V) {
+ _mm512_mask_reduce_operator(min_epi64);
}
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS512
_mm512_reduce_min_epu64(__m512i __V) {
- _mm512_reduce_maxMin_64bit(__V, min_epu64, i, i);
+ _mm512_mask_reduce_operator(min_epu64);
}
-static __inline__ double __DEFAULT_FN_ATTRS
-_mm512_reduce_min_pd(__m512d __V) {
- _mm512_reduce_maxMin_64bit(__V, min_pd, d, f);
-}
-
-// Vec512 - Vector with size 512.
-// Vec512Neutral - A 512 length vector with elements set to the identity element
-// Identity element: {max_epi,0x8000000000000000}
-// {max_epu,0x0000000000000000}
-// {max_pd, 0xFFF0000000000000}
-// {min_epi,0x7FFFFFFFFFFFFFFF}
-// {min_epu,0xFFFFFFFFFFFFFFFF}
-// {min_pd, 0x7FF0000000000000}
-//
-// IntrinName - Can be one of following: {max|min}_{epi64|epu64|pd} for example:
-// __mm512_max_epi64
-// T1 - Can get 'i' for int and 'd' for double.[__m512{i|d}]
-// T2 - Can get 'i' for int and 'f' for float. [__v8d{i|f}]
-// T3 - Can get 'q' q word and 'pd' for packed double.
-// [__builtin_ia32_select{q|pd}_512]
-// Mask - Intrinsic Mask
-
-#define _mm512_mask_reduce_maxMin_64bit(Vec512, Vec512Neutral, IntrinName, T1, \
- T2, T3, Mask) \
- __extension__({ \
- Vec512 = (__m512##T1)__builtin_ia32_select##T3##_512( \
- (__mmask8)Mask, \
- (__v8d##T2)Vec512, \
- (__v8d##T2)Vec512Neutral); \
- _mm512_reduce_maxMin_64bit(Vec512, IntrinName, T1, T2); \
- })
-
-static __inline__ long long __DEFAULT_FN_ATTRS
+static __inline__ long long __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_max_epi64(__mmask8 __M, __m512i __V) {
- _mm512_mask_reduce_maxMin_64bit(__V, _mm512_set1_epi64(0x8000000000000000),
- max_epi64, i, i, q, __M);
+ __V = _mm512_mask_mov_epi64(_mm512_set1_epi64(-__LONG_LONG_MAX__ - 1LL), __M, __V);
+ _mm512_mask_reduce_operator(max_epi64);
}
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_max_epu64(__mmask8 __M, __m512i __V) {
- _mm512_mask_reduce_maxMin_64bit(__V, _mm512_set1_epi64(0x0000000000000000),
- max_epu64, i, i, q, __M);
-}
-
-static __inline__ double __DEFAULT_FN_ATTRS
-_mm512_mask_reduce_max_pd(__mmask8 __M, __m512d __V) {
- _mm512_mask_reduce_maxMin_64bit(__V, -_mm512_set1_pd(__builtin_inf()),
- max_pd, d, f, pd, __M);
+ __V = _mm512_maskz_mov_epi64(__M, __V);
+ _mm512_mask_reduce_operator(max_epu64);
}
-static __inline__ long long __DEFAULT_FN_ATTRS
+static __inline__ long long __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_min_epi64(__mmask8 __M, __m512i __V) {
- _mm512_mask_reduce_maxMin_64bit(__V, _mm512_set1_epi64(0x7FFFFFFFFFFFFFFF),
- min_epi64, i, i, q, __M);
+ __V = _mm512_mask_mov_epi64(_mm512_set1_epi64(__LONG_LONG_MAX__), __M, __V);
+ _mm512_mask_reduce_operator(min_epi64);
}
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_min_epu64(__mmask8 __M, __m512i __V) {
- _mm512_mask_reduce_maxMin_64bit(__V, _mm512_set1_epi64(0xFFFFFFFFFFFFFFFF),
- min_epu64, i, i, q, __M);
+ __V = _mm512_mask_mov_epi64(_mm512_set1_epi64(~0ULL), __M, __V);
+ _mm512_mask_reduce_operator(min_epu64);
}
+#undef _mm512_mask_reduce_operator
-static __inline__ double __DEFAULT_FN_ATTRS
-_mm512_mask_reduce_min_pd(__mmask8 __M, __m512d __V) {
- _mm512_mask_reduce_maxMin_64bit(__V, _mm512_set1_pd(__builtin_inf()),
- min_pd, d, f, pd, __M);
-}
-
-// Vec512 - Vector with size 512.
-// IntrinName - Can be one of following: {max|min}_{epi32|epu32|ps} for example:
-// __mm512_max_epi32
-// T1 - Can get 'i' for int and ' ' .[__m512{i|}]
-// T2 - Can get 'i' for int and 'f' for float.[__v16s{i|f}]
-
-#define _mm512_reduce_maxMin_32bit(Vec512, IntrinName, T1, T2) __extension__({ \
- Vec512 = _mm512_##IntrinName( \
- (__m512##T1)__builtin_shufflevector( \
- (__v16s##T2)Vec512, \
- (__v16s##T2)Vec512, \
- 0, 1, 2, 3, 4, 5, 6, 7, \
- -1, -1, -1, -1, -1, -1, -1, -1), \
- (__m512##T1)__builtin_shufflevector( \
- (__v16s##T2)Vec512, \
- (__v16s##T2)Vec512, \
- 8, 9, 10, 11, 12, 13, 14, 15, \
- -1, -1, -1, -1, -1, -1, -1, -1)); \
- Vec512 = _mm512_##IntrinName( \
- (__m512##T1)__builtin_shufflevector( \
- (__v16s##T2)Vec512, \
- (__v16s##T2)Vec512, \
- 0, 1, 2, 3, -1, -1, -1, -1, \
- -1, -1, -1, -1, -1, -1, -1, -1), \
- (__m512##T1)__builtin_shufflevector( \
- (__v16s##T2)Vec512, \
- (__v16s##T2)Vec512, \
- 4, 5, 6, 7, -1, -1, -1, -1, \
- -1, -1, -1, -1, -1, -1, -1, -1)); \
- Vec512 = _mm512_##IntrinName( \
- (__m512##T1)__builtin_shufflevector( \
- (__v16s##T2)Vec512, \
- (__v16s##T2)Vec512, \
- 0, 1, -1, -1, -1, -1, -1, -1, \
- -1, -1, -1, -1, -1, -1, -1, -1), \
- (__m512##T1)__builtin_shufflevector( \
- (__v16s##T2)Vec512, \
- (__v16s##T2)Vec512, \
- 2, 3, -1, -1, -1, -1, -1, -1, \
- -1, -1, -1, -1, -1, -1, -1, -1)); \
- Vec512 = _mm512_##IntrinName( \
- (__m512##T1)__builtin_shufflevector( \
- (__v16s##T2)Vec512, \
- (__v16s##T2)Vec512, \
- 0, -1, -1, -1, -1, -1, -1, -1, \
- -1, -1, -1, -1, -1, -1, -1, -1), \
- (__m512##T1)__builtin_shufflevector( \
- (__v16s##T2)Vec512, \
- (__v16s##T2)Vec512, \
- 1, -1, -1, -1, -1, -1, -1, -1, \
- -1, -1, -1, -1, -1, -1, -1, -1)); \
- return Vec512[0]; \
- })
-
-static __inline__ int __DEFAULT_FN_ATTRS _mm512_reduce_max_epi32(__m512i a) {
- _mm512_reduce_maxMin_32bit(a, max_epi32, i, i);
-}
-
-static __inline__ unsigned int __DEFAULT_FN_ATTRS
-_mm512_reduce_max_epu32(__m512i a) {
- _mm512_reduce_maxMin_32bit(a, max_epu32, i, i);
-}
-
-static __inline__ float __DEFAULT_FN_ATTRS _mm512_reduce_max_ps(__m512 a) {
- _mm512_reduce_maxMin_32bit(a, max_ps, , f);
-}
-
-static __inline__ int __DEFAULT_FN_ATTRS _mm512_reduce_min_epi32(__m512i a) {
- _mm512_reduce_maxMin_32bit(a, min_epi32, i, i);
-}
-
-static __inline__ unsigned int __DEFAULT_FN_ATTRS
-_mm512_reduce_min_epu32(__m512i a) {
- _mm512_reduce_maxMin_32bit(a, min_epu32, i, i);
-}
-
-static __inline__ float __DEFAULT_FN_ATTRS _mm512_reduce_min_ps(__m512 a) {
- _mm512_reduce_maxMin_32bit(a, min_ps, , f);
-}
-
-// Vec512 - Vector with size 512.
-// Vec512Neutral - A 512 length vector with elements set to the identity element
-// Identity element: {max_epi,0x80000000}
-// {max_epu,0x00000000}
-// {max_ps, 0xFF800000}
-// {min_epi,0x7FFFFFFF}
-// {min_epu,0xFFFFFFFF}
-// {min_ps, 0x7F800000}
-//
-// IntrinName - Can be one of following: {max|min}_{epi32|epu32|ps} for example:
-// __mm512_max_epi32
-// T1 - Can get 'i' for int and ' ' .[__m512{i|}]
-// T2 - Can get 'i' for int and 'f' for float.[__v16s{i|f}]
-// T3 - Can get 'q' q word and 'pd' for packed double.
-// [__builtin_ia32_select{q|pd}_512]
-// Mask - Intrinsic Mask
-
-#define _mm512_mask_reduce_maxMin_32bit(Vec512, Vec512Neutral, IntrinName, T1, \
- T2, T3, Mask) \
- __extension__({ \
- Vec512 = (__m512##T1)__builtin_ia32_select##T3##_512( \
- (__mmask16)Mask, \
- (__v16s##T2)Vec512, \
- (__v16s##T2)Vec512Neutral); \
- _mm512_reduce_maxMin_32bit(Vec512, IntrinName, T1, T2); \
- })
-
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm512_mask_reduce_max_epi32(__mmask16 __M, __m512i __V) {
- _mm512_mask_reduce_maxMin_32bit(__V, _mm512_set1_epi32(0x80000000), max_epi32,
- i, i, d, __M);
+#define _mm512_mask_reduce_operator(op) \
+ __m256i __t1 = _mm512_extracti64x4_epi64(__V, 0); \
+ __m256i __t2 = _mm512_extracti64x4_epi64(__V, 1); \
+ __m256i __t3 = _mm256_##op(__t1, __t2); \
+ __m128i __t4 = _mm256_extracti128_si256(__t3, 0); \
+ __m128i __t5 = _mm256_extracti128_si256(__t3, 1); \
+ __m128i __t6 = _mm_##op(__t4, __t5); \
+ __m128i __t7 = (__m128i)__builtin_shufflevector((__v4si)__t6, (__v4si)__t6, 2, 3, 0, 1); \
+ __m128i __t8 = _mm_##op(__t6, __t7); \
+ __m128i __t9 = (__m128i)__builtin_shufflevector((__v4si)__t8, (__v4si)__t8, 1, 0, 3, 2); \
+ __v4si __t10 = (__v4si)_mm_##op(__t8, __t9); \
+ return __t10[0];
+
+static __inline__ int __DEFAULT_FN_ATTRS512
+_mm512_reduce_max_epi32(__m512i __V) {
+ _mm512_mask_reduce_operator(max_epi32);
}
-static __inline__ unsigned int __DEFAULT_FN_ATTRS
-_mm512_mask_reduce_max_epu32(__mmask16 __M, __m512i __V) {
- _mm512_mask_reduce_maxMin_32bit(__V, _mm512_set1_epi32(0x00000000), max_epu32,
- i, i, d, __M);
+static __inline__ unsigned int __DEFAULT_FN_ATTRS512
+_mm512_reduce_max_epu32(__m512i __V) {
+ _mm512_mask_reduce_operator(max_epu32);
}
-static __inline__ float __DEFAULT_FN_ATTRS
-_mm512_mask_reduce_max_ps(__mmask16 __M, __m512 __V) {
- _mm512_mask_reduce_maxMin_32bit(__V,-_mm512_set1_ps(__builtin_inff()), max_ps, , f,
- ps, __M);
+static __inline__ int __DEFAULT_FN_ATTRS512
+_mm512_reduce_min_epi32(__m512i __V) {
+ _mm512_mask_reduce_operator(min_epi32);
}
-static __inline__ int __DEFAULT_FN_ATTRS
+static __inline__ unsigned int __DEFAULT_FN_ATTRS512
+_mm512_reduce_min_epu32(__m512i __V) {
+ _mm512_mask_reduce_operator(min_epu32);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS512
+_mm512_mask_reduce_max_epi32(__mmask16 __M, __m512i __V) {
+ __V = _mm512_mask_mov_epi32(_mm512_set1_epi32(-__INT_MAX__ - 1), __M, __V);
+ _mm512_mask_reduce_operator(max_epi32);
+}
+
+static __inline__ unsigned int __DEFAULT_FN_ATTRS512
+_mm512_mask_reduce_max_epu32(__mmask16 __M, __m512i __V) {
+ __V = _mm512_maskz_mov_epi32(__M, __V);
+ _mm512_mask_reduce_operator(max_epu32);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_min_epi32(__mmask16 __M, __m512i __V) {
- _mm512_mask_reduce_maxMin_32bit(__V, _mm512_set1_epi32(0x7FFFFFFF), min_epi32,
- i, i, d, __M);
+ __V = _mm512_mask_mov_epi32(_mm512_set1_epi32(__INT_MAX__), __M, __V);
+ _mm512_mask_reduce_operator(min_epi32);
}
-static __inline__ unsigned int __DEFAULT_FN_ATTRS
+static __inline__ unsigned int __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_min_epu32(__mmask16 __M, __m512i __V) {
- _mm512_mask_reduce_maxMin_32bit(__V, _mm512_set1_epi32(0xFFFFFFFF), min_epu32,
- i, i, d, __M);
+ __V = _mm512_mask_mov_epi32(_mm512_set1_epi32(~0U), __M, __V);
+ _mm512_mask_reduce_operator(min_epu32);
+}
+#undef _mm512_mask_reduce_operator
+
+#define _mm512_mask_reduce_operator(op) \
+ __m256d __t1 = _mm512_extractf64x4_pd(__V, 0); \
+ __m256d __t2 = _mm512_extractf64x4_pd(__V, 1); \
+ __m256d __t3 = _mm256_##op(__t1, __t2); \
+ __m128d __t4 = _mm256_extractf128_pd(__t3, 0); \
+ __m128d __t5 = _mm256_extractf128_pd(__t3, 1); \
+ __m128d __t6 = _mm_##op(__t4, __t5); \
+ __m128d __t7 = __builtin_shufflevector(__t6, __t6, 1, 0); \
+ __m128d __t8 = _mm_##op(__t6, __t7); \
+ return __t8[0];
+
+static __inline__ double __DEFAULT_FN_ATTRS512
+_mm512_reduce_max_pd(__m512d __V) {
+ _mm512_mask_reduce_operator(max_pd);
+}
+
+static __inline__ double __DEFAULT_FN_ATTRS512
+_mm512_reduce_min_pd(__m512d __V) {
+ _mm512_mask_reduce_operator(min_pd);
+}
+
+static __inline__ double __DEFAULT_FN_ATTRS512
+_mm512_mask_reduce_max_pd(__mmask8 __M, __m512d __V) {
+ __V = _mm512_mask_mov_pd(_mm512_set1_pd(-__builtin_inf()), __M, __V);
+ _mm512_mask_reduce_operator(max_pd);
+}
+
+static __inline__ double __DEFAULT_FN_ATTRS512
+_mm512_mask_reduce_min_pd(__mmask8 __M, __m512d __V) {
+ __V = _mm512_mask_mov_pd(_mm512_set1_pd(__builtin_inf()), __M, __V);
+ _mm512_mask_reduce_operator(min_pd);
+}
+#undef _mm512_mask_reduce_operator
+
+#define _mm512_mask_reduce_operator(op) \
+ __m256 __t1 = (__m256)_mm512_extractf64x4_pd((__m512d)__V, 0); \
+ __m256 __t2 = (__m256)_mm512_extractf64x4_pd((__m512d)__V, 1); \
+ __m256 __t3 = _mm256_##op(__t1, __t2); \
+ __m128 __t4 = _mm256_extractf128_ps(__t3, 0); \
+ __m128 __t5 = _mm256_extractf128_ps(__t3, 1); \
+ __m128 __t6 = _mm_##op(__t4, __t5); \
+ __m128 __t7 = __builtin_shufflevector(__t6, __t6, 2, 3, 0, 1); \
+ __m128 __t8 = _mm_##op(__t6, __t7); \
+ __m128 __t9 = __builtin_shufflevector(__t8, __t8, 1, 0, 3, 2); \
+ __m128 __t10 = _mm_##op(__t8, __t9); \
+ return __t10[0];
+
+static __inline__ float __DEFAULT_FN_ATTRS512
+_mm512_reduce_max_ps(__m512 __V) {
+ _mm512_mask_reduce_operator(max_ps);
+}
+
+static __inline__ float __DEFAULT_FN_ATTRS512
+_mm512_reduce_min_ps(__m512 __V) {
+ _mm512_mask_reduce_operator(min_ps);
+}
+
+static __inline__ float __DEFAULT_FN_ATTRS512
+_mm512_mask_reduce_max_ps(__mmask16 __M, __m512 __V) {
+ __V = _mm512_mask_mov_ps(_mm512_set1_ps(-__builtin_inff()), __M, __V);
+ _mm512_mask_reduce_operator(max_ps);
}
-static __inline__ float __DEFAULT_FN_ATTRS
+static __inline__ float __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_min_ps(__mmask16 __M, __m512 __V) {
- _mm512_mask_reduce_maxMin_32bit(__V, _mm512_set1_ps(__builtin_inff()), min_ps, , f,
- ps, __M);
+ __V = _mm512_mask_mov_ps(_mm512_set1_ps(__builtin_inff()), __M, __V);
+ _mm512_mask_reduce_operator(min_ps);
}
+#undef _mm512_mask_reduce_operator
-#undef __DEFAULT_FN_ATTRS
+#undef __DEFAULT_FN_ATTRS512
+#undef __DEFAULT_FN_ATTRS128
-#endif // __AVX512FINTRIN_H
+#endif /* __AVX512FINTRIN_H */
diff --git a/lib/Headers/avx512ifmaintrin.h b/lib/Headers/avx512ifmaintrin.h
index 5defbaea8bcc..159713049c1a 100644
--- a/lib/Headers/avx512ifmaintrin.h
+++ b/lib/Headers/avx512ifmaintrin.h
@@ -29,62 +29,52 @@
#define __IFMAINTRIN_H
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512ifma")))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512ifma"), __min_vector_width__(512)))
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_madd52hi_epu64 (__m512i __X, __m512i __Y, __m512i __Z)
{
- return (__m512i) __builtin_ia32_vpmadd52huq512_mask ((__v8di) __X,
- (__v8di) __Y,
- (__v8di) __Z,
- (__mmask8) -1);
+ return (__m512i)__builtin_ia32_vpmadd52huq512((__v8di) __X, (__v8di) __Y,
+ (__v8di) __Z);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_madd52hi_epu64 (__m512i __W, __mmask8 __M, __m512i __X,
- __m512i __Y)
+_mm512_mask_madd52hi_epu64 (__m512i __W, __mmask8 __M, __m512i __X, __m512i __Y)
{
- return (__m512i) __builtin_ia32_vpmadd52huq512_mask ((__v8di) __W,
- (__v8di) __X,
- (__v8di) __Y,
- (__mmask8) __M);
+ return (__m512i)__builtin_ia32_selectq_512(__M,
+ (__v8di)_mm512_madd52hi_epu64(__W, __X, __Y),
+ (__v8di)__W);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_maskz_madd52hi_epu64 (__mmask8 __M, __m512i __X, __m512i __Y, __m512i __Z)
{
- return (__m512i) __builtin_ia32_vpmadd52huq512_maskz ((__v8di) __X,
- (__v8di) __Y,
- (__v8di) __Z,
- (__mmask8) __M);
+ return (__m512i)__builtin_ia32_selectq_512(__M,
+ (__v8di)_mm512_madd52hi_epu64(__X, __Y, __Z),
+ (__v8di)_mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_madd52lo_epu64 (__m512i __X, __m512i __Y, __m512i __Z)
{
- return (__m512i) __builtin_ia32_vpmadd52luq512_mask ((__v8di) __X,
- (__v8di) __Y,
- (__v8di) __Z,
- (__mmask8) -1);
+ return (__m512i)__builtin_ia32_vpmadd52luq512((__v8di) __X, (__v8di) __Y,
+ (__v8di) __Z);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_madd52lo_epu64 (__m512i __W, __mmask8 __M, __m512i __X,
- __m512i __Y)
+_mm512_mask_madd52lo_epu64 (__m512i __W, __mmask8 __M, __m512i __X, __m512i __Y)
{
- return (__m512i) __builtin_ia32_vpmadd52luq512_mask ((__v8di) __W,
- (__v8di) __X,
- (__v8di) __Y,
- (__mmask8) __M);
+ return (__m512i)__builtin_ia32_selectq_512(__M,
+ (__v8di)_mm512_madd52lo_epu64(__W, __X, __Y),
+ (__v8di)__W);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_maskz_madd52lo_epu64 (__mmask8 __M, __m512i __X, __m512i __Y, __m512i __Z)
{
- return (__m512i) __builtin_ia32_vpmadd52luq512_maskz ((__v8di) __X,
- (__v8di) __Y,
- (__v8di) __Z,
- (__mmask8) __M);
+ return (__m512i)__builtin_ia32_selectq_512(__M,
+ (__v8di)_mm512_madd52lo_epu64(__X, __Y, __Z),
+ (__v8di)_mm512_setzero_si512());
}
#undef __DEFAULT_FN_ATTRS
diff --git a/lib/Headers/avx512ifmavlintrin.h b/lib/Headers/avx512ifmavlintrin.h
index 131ee5cb4f88..afdea888c55b 100644
--- a/lib/Headers/avx512ifmavlintrin.h
+++ b/lib/Headers/avx512ifmavlintrin.h
@@ -29,121 +29,105 @@
#define __IFMAVLINTRIN_H
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512ifma,avx512vl")))
+#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("avx512ifma,avx512vl"), __min_vector_width__(128)))
+#define __DEFAULT_FN_ATTRS256 __attribute__((__always_inline__, __nodebug__, __target__("avx512ifma,avx512vl"), __min_vector_width__(256)))
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_madd52hi_epu64 (__m128i __X, __m128i __Y, __m128i __Z)
{
- return (__m128i) __builtin_ia32_vpmadd52huq128_mask ((__v2di) __X,
- (__v2di) __Y,
- (__v2di) __Z,
- (__mmask8) -1);
+ return (__m128i)__builtin_ia32_vpmadd52huq128((__v2di) __X, (__v2di) __Y,
+ (__v2di) __Z);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_madd52hi_epu64 (__m128i __W, __mmask8 __M, __m128i __X, __m128i __Y)
{
- return (__m128i) __builtin_ia32_vpmadd52huq128_mask ((__v2di) __W,
- (__v2di) __X,
- (__v2di) __Y,
- (__mmask8) __M);
+ return (__m128i)__builtin_ia32_selectq_128(__M,
+ (__v2di)_mm_madd52hi_epu64(__W, __X, __Y),
+ (__v2di)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_madd52hi_epu64 (__mmask8 __M, __m128i __X, __m128i __Y, __m128i __Z)
{
- return (__m128i) __builtin_ia32_vpmadd52huq128_maskz ((__v2di) __X,
- (__v2di) __Y,
- (__v2di) __Z,
- (__mmask8) __M);
+ return (__m128i)__builtin_ia32_selectq_128(__M,
+ (__v2di)_mm_madd52hi_epu64(__X, __Y, __Z),
+ (__v2di)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_madd52hi_epu64 (__m256i __X, __m256i __Y, __m256i __Z)
{
- return (__m256i) __builtin_ia32_vpmadd52huq256_mask ((__v4di) __X,
- (__v4di) __Y,
- (__v4di) __Z,
- (__mmask8) -1);
+ return (__m256i)__builtin_ia32_vpmadd52huq256((__v4di)__X, (__v4di)__Y,
+ (__v4di)__Z);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_madd52hi_epu64 (__m256i __W, __mmask8 __M, __m256i __X,
- __m256i __Y)
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_mask_madd52hi_epu64 (__m256i __W, __mmask8 __M, __m256i __X, __m256i __Y)
{
- return (__m256i) __builtin_ia32_vpmadd52huq256_mask ((__v4di) __W,
- (__v4di) __X,
- (__v4di) __Y,
- (__mmask8) __M);
+ return (__m256i)__builtin_ia32_selectq_256(__M,
+ (__v4di)_mm256_madd52hi_epu64(__W, __X, __Y),
+ (__v4di)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_madd52hi_epu64 (__mmask8 __M, __m256i __X, __m256i __Y, __m256i __Z)
{
- return (__m256i) __builtin_ia32_vpmadd52huq256_maskz ((__v4di) __X,
- (__v4di) __Y,
- (__v4di) __Z,
- (__mmask8) __M);
+ return (__m256i)__builtin_ia32_selectq_256(__M,
+ (__v4di)_mm256_madd52hi_epu64(__X, __Y, __Z),
+ (__v4di)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_madd52lo_epu64 (__m128i __X, __m128i __Y, __m128i __Z)
{
- return (__m128i) __builtin_ia32_vpmadd52luq128_mask ((__v2di) __X,
- (__v2di) __Y,
- (__v2di) __Z,
- (__mmask8) -1);
+ return (__m128i)__builtin_ia32_vpmadd52luq128((__v2di)__X, (__v2di)__Y,
+ (__v2di)__Z);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_madd52lo_epu64 (__m128i __W, __mmask8 __M, __m128i __X, __m128i __Y)
{
- return (__m128i) __builtin_ia32_vpmadd52luq128_mask ((__v2di) __W,
- (__v2di) __X,
- (__v2di) __Y,
- (__mmask8) __M);
+ return (__m128i)__builtin_ia32_selectq_128(__M,
+ (__v2di)_mm_madd52lo_epu64(__W, __X, __Y),
+ (__v2di)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_madd52lo_epu64 (__mmask8 __M, __m128i __X, __m128i __Y, __m128i __Z)
{
- return (__m128i) __builtin_ia32_vpmadd52luq128_maskz ((__v2di) __X,
- (__v2di) __Y,
- (__v2di) __Z,
- (__mmask8) __M);
+ return (__m128i)__builtin_ia32_selectq_128(__M,
+ (__v2di)_mm_madd52lo_epu64(__X, __Y, __Z),
+ (__v2di)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_madd52lo_epu64 (__m256i __X, __m256i __Y, __m256i __Z)
{
- return (__m256i) __builtin_ia32_vpmadd52luq256_mask ((__v4di) __X,
- (__v4di) __Y,
- (__v4di) __Z,
- (__mmask8) -1);
+ return (__m256i)__builtin_ia32_vpmadd52luq256((__v4di)__X, (__v4di)__Y,
+ (__v4di)__Z);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_madd52lo_epu64 (__m256i __W, __mmask8 __M, __m256i __X,
- __m256i __Y)
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_mask_madd52lo_epu64 (__m256i __W, __mmask8 __M, __m256i __X, __m256i __Y)
{
- return (__m256i) __builtin_ia32_vpmadd52luq256_mask ((__v4di) __W,
- (__v4di) __X,
- (__v4di) __Y,
- (__mmask8) __M);
+ return (__m256i)__builtin_ia32_selectq_256(__M,
+ (__v4di)_mm256_madd52lo_epu64(__W, __X, __Y),
+ (__v4di)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_madd52lo_epu64 (__mmask8 __M, __m256i __X, __m256i __Y, __m256i __Z)
{
- return (__m256i) __builtin_ia32_vpmadd52luq256_maskz ((__v4di) __X,
- (__v4di) __Y,
- (__v4di) __Z,
- (__mmask8) __M);
+ return (__m256i)__builtin_ia32_selectq_256(__M,
+ (__v4di)_mm256_madd52lo_epu64(__X, __Y, __Z),
+ (__v4di)_mm256_setzero_si256());
}
-#undef __DEFAULT_FN_ATTRS
+#undef __DEFAULT_FN_ATTRS128
+#undef __DEFAULT_FN_ATTRS256
#endif
diff --git a/lib/Headers/avx512pfintrin.h b/lib/Headers/avx512pfintrin.h
index c7fa3cf313e3..5b8260b77c63 100644
--- a/lib/Headers/avx512pfintrin.h
+++ b/lib/Headers/avx512pfintrin.h
@@ -1,4 +1,4 @@
-/*===------------- avx512pfintrin.h - PF intrinsics ------------------===
+/*===------------- avx512pfintrin.h - PF intrinsics ------------------------===
*
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
@@ -31,80 +31,80 @@
/* Define the default attributes for the functions in this file. */
#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512pf")))
-#define _mm512_mask_prefetch_i32gather_pd(index, mask, addr, scale, hint) __extension__ ({\
+#define _mm512_mask_prefetch_i32gather_pd(index, mask, addr, scale, hint) \
__builtin_ia32_gatherpfdpd((__mmask8)(mask), (__v8si)(__m256i)(index), \
(long long const *)(addr), (int)(scale), \
- (int)(hint)); })
-
-#define _mm512_prefetch_i32gather_pd(index, addr, scale, hint) __extension__ ({\
+ (int)(hint))
+
+#define _mm512_prefetch_i32gather_pd(index, addr, scale, hint) \
__builtin_ia32_gatherpfdpd((__mmask8) -1, (__v8si)(__m256i)(index), \
(long long const *)(addr), (int)(scale), \
- (int)(hint)); })
+ (int)(hint))
-#define _mm512_mask_prefetch_i32gather_ps(index, mask, addr, scale, hint) ({\
+#define _mm512_mask_prefetch_i32gather_ps(index, mask, addr, scale, hint) \
__builtin_ia32_gatherpfdps((__mmask16)(mask), \
(__v16si)(__m512i)(index), (int const *)(addr), \
- (int)(scale), (int)(hint)); })
+ (int)(scale), (int)(hint))
-#define _mm512_prefetch_i32gather_ps(index, addr, scale, hint) ({\
+#define _mm512_prefetch_i32gather_ps(index, addr, scale, hint) \
__builtin_ia32_gatherpfdps((__mmask16) -1, \
(__v16si)(__m512i)(index), (int const *)(addr), \
- (int)(scale), (int)(hint)); })
+ (int)(scale), (int)(hint))
-#define _mm512_mask_prefetch_i64gather_pd(index, mask, addr, scale, hint) __extension__ ({\
+#define _mm512_mask_prefetch_i64gather_pd(index, mask, addr, scale, hint) \
__builtin_ia32_gatherpfqpd((__mmask8)(mask), (__v8di)(__m512i)(index), \
(long long const *)(addr), (int)(scale), \
- (int)(hint)); })
+ (int)(hint))
-#define _mm512_prefetch_i64gather_pd(index, addr, scale, hint) __extension__ ({\
+#define _mm512_prefetch_i64gather_pd(index, addr, scale, hint) \
__builtin_ia32_gatherpfqpd((__mmask8) -1, (__v8di)(__m512i)(index), \
(long long const *)(addr), (int)(scale), \
- (int)(hint)); })
-
-#define _mm512_mask_prefetch_i64gather_ps(index, mask, addr, scale, hint) ({\
+ (int)(hint))
+
+#define _mm512_mask_prefetch_i64gather_ps(index, mask, addr, scale, hint) \
__builtin_ia32_gatherpfqps((__mmask8)(mask), (__v8di)(__m512i)(index), \
- (int const *)(addr), (int)(scale), (int)(hint)); })
+ (int const *)(addr), (int)(scale), (int)(hint))
-#define _mm512_prefetch_i64gather_ps(index, addr, scale, hint) ({\
+#define _mm512_prefetch_i64gather_ps(index, addr, scale, hint) \
__builtin_ia32_gatherpfqps((__mmask8) -1, (__v8di)(__m512i)(index), \
- (int const *)(addr), (int)(scale), (int)(hint)); })
+ (int const *)(addr), (int)(scale), (int)(hint))
-#define _mm512_prefetch_i32scatter_pd(addr, index, scale, hint) __extension__ ({\
+#define _mm512_prefetch_i32scatter_pd(addr, index, scale, hint) \
__builtin_ia32_scatterpfdpd((__mmask8)-1, (__v8si)(__m256i)(index), \
(long long *)(addr), (int)(scale), \
- (int)(hint)); })
+ (int)(hint))
-#define _mm512_mask_prefetch_i32scatter_pd(addr, mask, index, scale, hint) __extension__ ({\
+#define _mm512_mask_prefetch_i32scatter_pd(addr, mask, index, scale, hint) \
__builtin_ia32_scatterpfdpd((__mmask8)(mask), (__v8si)(__m256i)(index), \
(long long *)(addr), (int)(scale), \
- (int)(hint)); })
+ (int)(hint))
-#define _mm512_prefetch_i32scatter_ps(addr, index, scale, hint) __extension__ ({\
+#define _mm512_prefetch_i32scatter_ps(addr, index, scale, hint) \
__builtin_ia32_scatterpfdps((__mmask16)-1, (__v16si)(__m512i)(index), \
- (int *)(addr), (int)(scale), (int)(hint)); })
+ (int *)(addr), (int)(scale), (int)(hint))
-#define _mm512_mask_prefetch_i32scatter_ps(addr, mask, index, scale, hint) __extension__ ({\
+#define _mm512_mask_prefetch_i32scatter_ps(addr, mask, index, scale, hint) \
__builtin_ia32_scatterpfdps((__mmask16)(mask), \
(__v16si)(__m512i)(index), (int *)(addr), \
- (int)(scale), (int)(hint)); })
+ (int)(scale), (int)(hint))
-#define _mm512_prefetch_i64scatter_pd(addr, index, scale, hint) __extension__ ({\
+#define _mm512_prefetch_i64scatter_pd(addr, index, scale, hint) \
__builtin_ia32_scatterpfqpd((__mmask8)-1, (__v8di)(__m512i)(index), \
(long long *)(addr), (int)(scale), \
- (int)(hint)); })
+ (int)(hint))
-#define _mm512_mask_prefetch_i64scatter_pd(addr, mask, index, scale, hint) __extension__ ({\
+#define _mm512_mask_prefetch_i64scatter_pd(addr, mask, index, scale, hint) \
__builtin_ia32_scatterpfqpd((__mmask8)(mask), (__v8di)(__m512i)(index), \
(long long *)(addr), (int)(scale), \
- (int)(hint)); })
+ (int)(hint))
-#define _mm512_prefetch_i64scatter_ps(addr, index, scale, hint) __extension__ ({\
+#define _mm512_prefetch_i64scatter_ps(addr, index, scale, hint) \
__builtin_ia32_scatterpfqps((__mmask8)-1, (__v8di)(__m512i)(index), \
- (int *)(addr), (int)(scale), (int)(hint)); })
+ (int *)(addr), (int)(scale), (int)(hint))
-#define _mm512_mask_prefetch_i64scatter_ps(addr, mask, index, scale, hint) __extension__ ({\
+#define _mm512_mask_prefetch_i64scatter_ps(addr, mask, index, scale, hint) \
__builtin_ia32_scatterpfqps((__mmask8)(mask), (__v8di)(__m512i)(index), \
- (int *)(addr), (int)(scale), (int)(hint)); })
+ (int *)(addr), (int)(scale), (int)(hint))
#undef __DEFAULT_FN_ATTRS
diff --git a/lib/Headers/avx512vbmi2intrin.h b/lib/Headers/avx512vbmi2intrin.h
index 43e97b40a098..d2a58094fd07 100644
--- a/lib/Headers/avx512vbmi2intrin.h
+++ b/lib/Headers/avx512vbmi2intrin.h
@@ -29,7 +29,7 @@
#define __AVX512VBMI2INTRIN_H
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512vbmi2")))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512vbmi2"), __min_vector_width__(512)))
static __inline__ __m512i __DEFAULT_FN_ATTRS
@@ -44,7 +44,7 @@ static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_maskz_compress_epi16(__mmask32 __U, __m512i __D)
{
return (__m512i) __builtin_ia32_compresshi512_mask ((__v32hi) __D,
- (__v32hi) _mm512_setzero_hi(),
+ (__v32hi) _mm512_setzero_si512(),
__U);
}
@@ -60,7 +60,7 @@ static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_maskz_compress_epi8(__mmask64 __U, __m512i __D)
{
return (__m512i) __builtin_ia32_compressqi512_mask ((__v64qi) __D,
- (__v64qi) _mm512_setzero_qi(),
+ (__v64qi) _mm512_setzero_si512(),
__U);
}
@@ -90,7 +90,7 @@ static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_maskz_expand_epi16(__mmask32 __U, __m512i __D)
{
return (__m512i) __builtin_ia32_expandhi512_mask ((__v32hi) __D,
- (__v32hi) _mm512_setzero_hi(),
+ (__v32hi) _mm512_setzero_si512(),
__U);
}
@@ -106,7 +106,7 @@ static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_maskz_expand_epi8(__mmask64 __U, __m512i __D)
{
return (__m512i) __builtin_ia32_expandqi512_mask ((__v64qi) __D,
- (__v64qi) _mm512_setzero_qi(),
+ (__v64qi) _mm512_setzero_si512(),
__U);
}
@@ -122,7 +122,7 @@ static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_maskz_expandloadu_epi16(__mmask32 __U, void const *__P)
{
return (__m512i) __builtin_ia32_expandloadhi512_mask ((const __v32hi *)__P,
- (__v32hi) _mm512_setzero_hi(),
+ (__v32hi) _mm512_setzero_si512(),
__U);
}
@@ -138,87 +138,93 @@ static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_maskz_expandloadu_epi8(__mmask64 __U, void const *__P)
{
return (__m512i) __builtin_ia32_expandloadqi512_mask ((const __v64qi *)__P,
- (__v64qi) _mm512_setzero_qi(),
+ (__v64qi) _mm512_setzero_si512(),
__U);
}
-#define _mm512_mask_shldi_epi64(S, U, A, B, I) __extension__ ({ \
- (__m512i)__builtin_ia32_vpshldq512_mask((__v8di)(A), \
- (__v8di)(B), \
- (int)(I), \
- (__v8di)(S), \
- (__mmask8)(U)); })
+#define _mm512_shldi_epi64(A, B, I) \
+ (__m512i)__builtin_ia32_vpshldq512((__v8di)(__m512i)(A), \
+ (__v8di)(__m512i)(B), (int)(I))
+
+#define _mm512_mask_shldi_epi64(S, U, A, B, I) \
+ (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
+ (__v8di)_mm512_shldi_epi64((A), (B), (I)), \
+ (__v8di)(__m512i)(S))
#define _mm512_maskz_shldi_epi64(U, A, B, I) \
- _mm512_mask_shldi_epi64(_mm512_setzero_hi(), (U), (A), (B), (I))
+ (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
+ (__v8di)_mm512_shldi_epi64((A), (B), (I)), \
+ (__v8di)_mm512_setzero_si512())
-#define _mm512_shldi_epi64(A, B, I) \
- _mm512_mask_shldi_epi64(_mm512_undefined(), (__mmask8)(-1), (A), (B), (I))
+#define _mm512_shldi_epi32(A, B, I) \
+ (__m512i)__builtin_ia32_vpshldd512((__v16si)(__m512i)(A), \
+ (__v16si)(__m512i)(B), (int)(I))
-#define _mm512_mask_shldi_epi32(S, U, A, B, I) __extension__ ({ \
- (__m512i)__builtin_ia32_vpshldd512_mask((__v16si)(A), \
- (__v16si)(B), \
- (int)(I), \
- (__v16si)(S), \
- (__mmask16)(U)); })
+#define _mm512_mask_shldi_epi32(S, U, A, B, I) \
+ (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
+ (__v16si)_mm512_shldi_epi32((A), (B), (I)), \
+ (__v16si)(__m512i)(S))
#define _mm512_maskz_shldi_epi32(U, A, B, I) \
- _mm512_mask_shldi_epi32(_mm512_setzero_hi(), (U), (A), (B), (I))
+ (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
+ (__v16si)_mm512_shldi_epi32((A), (B), (I)), \
+ (__v16si)_mm512_setzero_si512())
-#define _mm512_shldi_epi32(A, B, I) \
- _mm512_mask_shldi_epi32(_mm512_undefined(), (__mmask16)(-1), (A), (B), (I))
+#define _mm512_shldi_epi16(A, B, I) \
+ (__m512i)__builtin_ia32_vpshldw512((__v32hi)(__m512i)(A), \
+ (__v32hi)(__m512i)(B), (int)(I))
-#define _mm512_mask_shldi_epi16(S, U, A, B, I) __extension__ ({ \
- (__m512i)__builtin_ia32_vpshldw512_mask((__v32hi)(A), \
- (__v32hi)(B), \
- (int)(I), \
- (__v32hi)(S), \
- (__mmask32)(U)); })
+#define _mm512_mask_shldi_epi16(S, U, A, B, I) \
+ (__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
+ (__v32hi)_mm512_shldi_epi16((A), (B), (I)), \
+ (__v32hi)(__m512i)(S))
#define _mm512_maskz_shldi_epi16(U, A, B, I) \
- _mm512_mask_shldi_epi16(_mm512_setzero_hi(), (U), (A), (B), (I))
+ (__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
+ (__v32hi)_mm512_shldi_epi16((A), (B), (I)), \
+ (__v32hi)_mm512_setzero_si512())
-#define _mm512_shldi_epi16(A, B, I) \
- _mm512_mask_shldi_epi16(_mm512_undefined(), (__mmask32)(-1), (A), (B), (I))
+#define _mm512_shrdi_epi64(A, B, I) \
+ (__m512i)__builtin_ia32_vpshrdq512((__v8di)(__m512i)(A), \
+ (__v8di)(__m512i)(B), (int)(I))
-#define _mm512_mask_shrdi_epi64(S, U, A, B, I) __extension__ ({ \
- (__m512i)__builtin_ia32_vpshrdq512_mask((__v8di)(A), \
- (__v8di)(B), \
- (int)(I), \
- (__v8di)(S), \
- (__mmask8)(U)); })
+#define _mm512_mask_shrdi_epi64(S, U, A, B, I) \
+ (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
+ (__v8di)_mm512_shrdi_epi64((A), (B), (I)), \
+ (__v8di)(__m512i)(S))
#define _mm512_maskz_shrdi_epi64(U, A, B, I) \
- _mm512_mask_shrdi_epi64(_mm512_setzero_hi(), (U), (A), (B), (I))
+ (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
+ (__v8di)_mm512_shrdi_epi64((A), (B), (I)), \
+ (__v8di)_mm512_setzero_si512())
-#define _mm512_shrdi_epi64(A, B, I) \
- _mm512_mask_shrdi_epi64(_mm512_undefined(), (__mmask8)(-1), (A), (B), (I))
+#define _mm512_shrdi_epi32(A, B, I) \
+ (__m512i)__builtin_ia32_vpshrdd512((__v16si)(__m512i)(A), \
+ (__v16si)(__m512i)(B), (int)(I))
-#define _mm512_mask_shrdi_epi32(S, U, A, B, I) __extension__ ({ \
- (__m512i)__builtin_ia32_vpshrdd512_mask((__v16si)(A), \
- (__v16si)(B), \
- (int)(I), \
- (__v16si)(S), \
- (__mmask16)(U)); })
+#define _mm512_mask_shrdi_epi32(S, U, A, B, I) \
+ (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
+ (__v16si)_mm512_shrdi_epi32((A), (B), (I)), \
+ (__v16si)(__m512i)(S))
#define _mm512_maskz_shrdi_epi32(U, A, B, I) \
- _mm512_mask_shrdi_epi32(_mm512_setzero_hi(), (U), (A), (B), (I))
+ (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
+ (__v16si)_mm512_shrdi_epi32((A), (B), (I)), \
+ (__v16si)_mm512_setzero_si512())
-#define _mm512_shrdi_epi32(A, B, I) \
- _mm512_mask_shrdi_epi32(_mm512_undefined(), (__mmask16)(-1), (A), (B), (I))
+#define _mm512_shrdi_epi16(A, B, I) \
+ (__m512i)__builtin_ia32_vpshrdw512((__v32hi)(__m512i)(A), \
+ (__v32hi)(__m512i)(B), (int)(I))
-#define _mm512_mask_shrdi_epi16(S, U, A, B, I) __extension__ ({ \
- (__m512i)__builtin_ia32_vpshrdw512_mask((__v32hi)(A), \
- (__v32hi)(B), \
- (int)(I), \
- (__v32hi)(S), \
- (__mmask32)(U)); })
+#define _mm512_mask_shrdi_epi16(S, U, A, B, I) \
+ (__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
+ (__v32hi)_mm512_shrdi_epi16((A), (B), (I)), \
+ (__v32hi)(__m512i)(S))
#define _mm512_maskz_shrdi_epi16(U, A, B, I) \
- _mm512_mask_shrdi_epi16(_mm512_setzero_hi(), (U), (A), (B), (I))
-
-#define _mm512_shrdi_epi16(A, B, I) \
- _mm512_mask_shrdi_epi16(_mm512_undefined(), (__mmask32)(-1), (A), (B), (I))
+ (__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
+ (__v32hi)_mm512_shrdi_epi16((A), (B), (I)), \
+ (__v32hi)_mm512_setzero_si512())
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_mask_shldv_epi64(__m512i __S, __mmask8 __U, __m512i __A, __m512i __B)
diff --git a/lib/Headers/avx512vbmiintrin.h b/lib/Headers/avx512vbmiintrin.h
index 837238eda97f..b6e93c285871 100644
--- a/lib/Headers/avx512vbmiintrin.h
+++ b/lib/Headers/avx512vbmiintrin.h
@@ -29,79 +29,65 @@
#define __VBMIINTRIN_H
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512vbmi")))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512vbmi"), __min_vector_width__(512)))
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask2_permutex2var_epi8 (__m512i __A, __m512i __I,
- __mmask64 __U, __m512i __B)
+_mm512_permutex2var_epi8(__m512i __A, __m512i __I, __m512i __B)
{
- return (__m512i) __builtin_ia32_vpermi2varqi512_mask ((__v64qi) __A,
- (__v64qi) __I
- /* idx */ ,
- (__v64qi) __B,
- (__mmask64) __U);
+ return (__m512i)__builtin_ia32_vpermi2varqi512((__v64qi)__A, (__v64qi)__I,
+ (__v64qi) __B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_permutex2var_epi8 (__m512i __A, __m512i __I, __m512i __B)
+_mm512_mask_permutex2var_epi8(__m512i __A, __mmask64 __U, __m512i __I,
+ __m512i __B)
{
- return (__m512i) __builtin_ia32_vpermt2varqi512_mask ((__v64qi) __I
- /* idx */ ,
- (__v64qi) __A,
- (__v64qi) __B,
- (__mmask64) -1);
+ return (__m512i)__builtin_ia32_selectb_512(__U,
+ (__v64qi)_mm512_permutex2var_epi8(__A, __I, __B),
+ (__v64qi)__A);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_permutex2var_epi8 (__m512i __A, __mmask64 __U,
- __m512i __I, __m512i __B)
+_mm512_mask2_permutex2var_epi8(__m512i __A, __m512i __I, __mmask64 __U,
+ __m512i __B)
{
- return (__m512i) __builtin_ia32_vpermt2varqi512_mask ((__v64qi) __I
- /* idx */ ,
- (__v64qi) __A,
- (__v64qi) __B,
- (__mmask64) __U);
+ return (__m512i)__builtin_ia32_selectb_512(__U,
+ (__v64qi)_mm512_permutex2var_epi8(__A, __I, __B),
+ (__v64qi)__I);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_maskz_permutex2var_epi8 (__mmask64 __U, __m512i __A,
- __m512i __I, __m512i __B)
+_mm512_maskz_permutex2var_epi8(__mmask64 __U, __m512i __A, __m512i __I,
+ __m512i __B)
{
- return (__m512i) __builtin_ia32_vpermt2varqi512_maskz ((__v64qi) __I
- /* idx */ ,
- (__v64qi) __A,
- (__v64qi) __B,
- (__mmask64) __U);
+ return (__m512i)__builtin_ia32_selectb_512(__U,
+ (__v64qi)_mm512_permutex2var_epi8(__A, __I, __B),
+ (__v64qi)_mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_permutexvar_epi8 (__m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_permvarqi512_mask ((__v64qi) __B,
- (__v64qi) __A,
- (__v64qi) _mm512_undefined_epi32 (),
- (__mmask64) -1);
+ return (__m512i)__builtin_ia32_permvarqi512((__v64qi) __B, (__v64qi) __A);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_maskz_permutexvar_epi8 (__mmask64 __M, __m512i __A,
__m512i __B)
{
- return (__m512i) __builtin_ia32_permvarqi512_mask ((__v64qi) __B,
- (__v64qi) __A,
- (__v64qi) _mm512_setzero_si512(),
- (__mmask64) __M);
+ return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M,
+ (__v64qi)_mm512_permutexvar_epi8(__A, __B),
+ (__v64qi)_mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_mask_permutexvar_epi8 (__m512i __W, __mmask64 __M, __m512i __A,
__m512i __B)
{
- return (__m512i) __builtin_ia32_permvarqi512_mask ((__v64qi) __B,
- (__v64qi) __A,
- (__v64qi) __W,
- (__mmask64) __M);
+ return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M,
+ (__v64qi)_mm512_permutexvar_epi8(__A, __B),
+ (__v64qi)__W);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
diff --git a/lib/Headers/avx512vbmivlintrin.h b/lib/Headers/avx512vbmivlintrin.h
index 105c6d142fa6..9a0400b2b5d5 100644
--- a/lib/Headers/avx512vbmivlintrin.h
+++ b/lib/Headers/avx512vbmivlintrin.h
@@ -29,161 +29,127 @@
#define __VBMIVLINTRIN_H
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512vbmi,avx512vl")))
+#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("avx512vbmi,avx512vl"), __min_vector_width__(128)))
+#define __DEFAULT_FN_ATTRS256 __attribute__((__always_inline__, __nodebug__, __target__("avx512vbmi,avx512vl"), __min_vector_width__(256)))
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask2_permutex2var_epi8 (__m128i __A, __m128i __I, __mmask16 __U,
- __m128i __B)
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_permutex2var_epi8(__m128i __A, __m128i __I, __m128i __B)
{
- return (__m128i) __builtin_ia32_vpermi2varqi128_mask ((__v16qi) __A,
- (__v16qi) __I
- /* idx */ ,
- (__v16qi) __B,
- (__mmask16)
- __U);
+ return (__m128i)__builtin_ia32_vpermi2varqi128((__v16qi)__A,
+ (__v16qi)__I,
+ (__v16qi)__B);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask2_permutex2var_epi8 (__m256i __A, __m256i __I,
- __mmask32 __U, __m256i __B)
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_mask_permutex2var_epi8(__m128i __A, __mmask16 __U, __m128i __I,
+ __m128i __B)
{
- return (__m256i) __builtin_ia32_vpermi2varqi256_mask ((__v32qi) __A,
- (__v32qi) __I
- /* idx */ ,
- (__v32qi) __B,
- (__mmask32)
- __U);
+ return (__m128i)__builtin_ia32_selectb_128(__U,
+ (__v16qi)_mm_permutex2var_epi8(__A, __I, __B),
+ (__v16qi)__A);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_permutex2var_epi8 (__m128i __A, __m128i __I, __m128i __B)
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_mask2_permutex2var_epi8(__m128i __A, __m128i __I, __mmask16 __U,
+ __m128i __B)
{
- return (__m128i) __builtin_ia32_vpermt2varqi128_mask ((__v16qi) __I
- /* idx */ ,
- (__v16qi) __A,
- (__v16qi) __B,
- (__mmask16) -
- 1);
+ return (__m128i)__builtin_ia32_selectb_128(__U,
+ (__v16qi)_mm_permutex2var_epi8(__A, __I, __B),
+ (__v16qi)__I);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_permutex2var_epi8 (__m128i __A, __mmask16 __U, __m128i __I,
- __m128i __B)
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_maskz_permutex2var_epi8(__mmask16 __U, __m128i __A, __m128i __I,
+ __m128i __B)
{
- return (__m128i) __builtin_ia32_vpermt2varqi128_mask ((__v16qi) __I
- /* idx */ ,
- (__v16qi) __A,
- (__v16qi) __B,
- (__mmask16)
- __U);
+ return (__m128i)__builtin_ia32_selectb_128(__U,
+ (__v16qi)_mm_permutex2var_epi8(__A, __I, __B),
+ (__v16qi)_mm_setzero_si128());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_permutex2var_epi8 (__mmask16 __U, __m128i __A, __m128i __I,
- __m128i __B)
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_permutex2var_epi8(__m256i __A, __m256i __I, __m256i __B)
{
- return (__m128i) __builtin_ia32_vpermt2varqi128_maskz ((__v16qi) __I
- /* idx */ ,
- (__v16qi) __A,
- (__v16qi) __B,
- (__mmask16)
- __U);
+ return (__m256i)__builtin_ia32_vpermi2varqi256((__v32qi)__A, (__v32qi)__I,
+ (__v32qi)__B);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_permutex2var_epi8 (__m256i __A, __m256i __I, __m256i __B)
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_mask_permutex2var_epi8(__m256i __A, __mmask32 __U, __m256i __I,
+ __m256i __B)
{
- return (__m256i) __builtin_ia32_vpermt2varqi256_mask ((__v32qi) __I
- /* idx */ ,
- (__v32qi) __A,
- (__v32qi) __B,
- (__mmask32) -
- 1);
+ return (__m256i)__builtin_ia32_selectb_256(__U,
+ (__v32qi)_mm256_permutex2var_epi8(__A, __I, __B),
+ (__v32qi)__A);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_permutex2var_epi8 (__m256i __A, __mmask32 __U,
- __m256i __I, __m256i __B)
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_mask2_permutex2var_epi8(__m256i __A, __m256i __I, __mmask32 __U,
+ __m256i __B)
{
- return (__m256i) __builtin_ia32_vpermt2varqi256_mask ((__v32qi) __I
- /* idx */ ,
- (__v32qi) __A,
- (__v32qi) __B,
- (__mmask32)
- __U);
+ return (__m256i)__builtin_ia32_selectb_256(__U,
+ (__v32qi)_mm256_permutex2var_epi8(__A, __I, __B),
+ (__v32qi)__I);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_permutex2var_epi8 (__mmask32 __U, __m256i __A,
- __m256i __I, __m256i __B)
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_maskz_permutex2var_epi8(__mmask32 __U, __m256i __A, __m256i __I,
+ __m256i __B)
{
- return (__m256i) __builtin_ia32_vpermt2varqi256_maskz ((__v32qi) __I
- /* idx */ ,
- (__v32qi) __A,
- (__v32qi) __B,
- (__mmask32)
- __U);
+ return (__m256i)__builtin_ia32_selectb_256(__U,
+ (__v32qi)_mm256_permutex2var_epi8(__A, __I, __B),
+ (__v32qi)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_permutexvar_epi8 (__m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_permvarqi128_mask ((__v16qi) __B,
- (__v16qi) __A,
- (__v16qi) _mm_undefined_si128 (),
- (__mmask16) -1);
+ return (__m128i)__builtin_ia32_permvarqi128((__v16qi)__B, (__v16qi)__A);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_permutexvar_epi8 (__mmask16 __M, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_permvarqi128_mask ((__v16qi) __B,
- (__v16qi) __A,
- (__v16qi) _mm_setzero_si128 (),
- (__mmask16) __M);
+ return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M,
+ (__v16qi)_mm_permutexvar_epi8(__A, __B),
+ (__v16qi)_mm_setzero_si128());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_permutexvar_epi8 (__m128i __W, __mmask16 __M, __m128i __A,
__m128i __B)
{
- return (__m128i) __builtin_ia32_permvarqi128_mask ((__v16qi) __B,
- (__v16qi) __A,
- (__v16qi) __W,
- (__mmask16) __M);
+ return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M,
+ (__v16qi)_mm_permutexvar_epi8(__A, __B),
+ (__v16qi)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_permutexvar_epi8 (__m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_permvarqi256_mask ((__v32qi) __B,
- (__v32qi) __A,
- (__v32qi) _mm256_undefined_si256 (),
- (__mmask32) -1);
+ return (__m256i)__builtin_ia32_permvarqi256((__v32qi) __B, (__v32qi) __A);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_permutexvar_epi8 (__mmask32 __M, __m256i __A,
__m256i __B)
{
- return (__m256i) __builtin_ia32_permvarqi256_mask ((__v32qi) __B,
- (__v32qi) __A,
- (__v32qi) _mm256_setzero_si256 (),
- (__mmask32) __M);
+ return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M,
+ (__v32qi)_mm256_permutexvar_epi8(__A, __B),
+ (__v32qi)_mm256_setzero_si256());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_permutexvar_epi8 (__m256i __W, __mmask32 __M, __m256i __A,
__m256i __B)
{
- return (__m256i) __builtin_ia32_permvarqi256_mask ((__v32qi) __B,
- (__v32qi) __A,
- (__v32qi) __W,
- (__mmask32) __M);
+ return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M,
+ (__v32qi)_mm256_permutexvar_epi8(__A, __B),
+ (__v32qi)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_multishift_epi64_epi8 (__m128i __W, __mmask16 __M, __m128i __X, __m128i __Y)
{
return (__m128i) __builtin_ia32_vpmultishiftqb128_mask ((__v16qi) __X,
@@ -192,7 +158,7 @@ _mm_mask_multishift_epi64_epi8 (__m128i __W, __mmask16 __M, __m128i __X, __m128i
(__mmask16) __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_multishift_epi64_epi8 (__mmask16 __M, __m128i __X, __m128i __Y)
{
return (__m128i) __builtin_ia32_vpmultishiftqb128_mask ((__v16qi) __X,
@@ -202,7 +168,7 @@ _mm_maskz_multishift_epi64_epi8 (__mmask16 __M, __m128i __X, __m128i __Y)
(__mmask16) __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_multishift_epi64_epi8 (__m128i __X, __m128i __Y)
{
return (__m128i) __builtin_ia32_vpmultishiftqb128_mask ((__v16qi) __X,
@@ -212,7 +178,7 @@ _mm_multishift_epi64_epi8 (__m128i __X, __m128i __Y)
(__mmask16) -1);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_multishift_epi64_epi8 (__m256i __W, __mmask32 __M, __m256i __X, __m256i __Y)
{
return (__m256i) __builtin_ia32_vpmultishiftqb256_mask ((__v32qi) __X,
@@ -221,7 +187,7 @@ _mm256_mask_multishift_epi64_epi8 (__m256i __W, __mmask32 __M, __m256i __X, __m2
(__mmask32) __M);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_multishift_epi64_epi8 (__mmask32 __M, __m256i __X, __m256i __Y)
{
return (__m256i) __builtin_ia32_vpmultishiftqb256_mask ((__v32qi) __X,
@@ -231,7 +197,7 @@ _mm256_maskz_multishift_epi64_epi8 (__mmask32 __M, __m256i __X, __m256i __Y)
(__mmask32) __M);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_multishift_epi64_epi8 (__m256i __X, __m256i __Y)
{
return (__m256i) __builtin_ia32_vpmultishiftqb256_mask ((__v32qi) __X,
@@ -242,6 +208,7 @@ _mm256_multishift_epi64_epi8 (__m256i __X, __m256i __Y)
}
-#undef __DEFAULT_FN_ATTRS
+#undef __DEFAULT_FN_ATTRS128
+#undef __DEFAULT_FN_ATTRS256
#endif
diff --git a/lib/Headers/avx512vlbitalgintrin.h b/lib/Headers/avx512vlbitalgintrin.h
index 76eb87721b8b..64860b29254f 100644
--- a/lib/Headers/avx512vlbitalgintrin.h
+++ b/lib/Headers/avx512vlbitalgintrin.h
@@ -1,4 +1,4 @@
-/*===------------- avx512vlbitalgintrin.h - BITALG intrinsics ------------------===
+/*===---- avx512vlbitalgintrin.h - BITALG intrinsics -----------------------===
*
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
@@ -29,15 +29,16 @@
#define __AVX512VLBITALGINTRIN_H
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512vl,avx512bitalg")))
+#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("avx512vl,avx512bitalg"), __min_vector_width__(128)))
+#define __DEFAULT_FN_ATTRS256 __attribute__((__always_inline__, __nodebug__, __target__("avx512vl,avx512bitalg"), __min_vector_width__(256)))
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_popcnt_epi16(__m256i __A)
{
return (__m256i) __builtin_ia32_vpopcntw_256((__v16hi) __A);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_popcnt_epi16(__m256i __A, __mmask16 __U, __m256i __B)
{
return (__m256i) __builtin_ia32_selectw_256((__mmask16) __U,
@@ -45,7 +46,7 @@ _mm256_mask_popcnt_epi16(__m256i __A, __mmask16 __U, __m256i __B)
(__v16hi) __A);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_popcnt_epi16(__mmask16 __U, __m256i __B)
{
return _mm256_mask_popcnt_epi16((__m256i) _mm256_setzero_si256(),
@@ -53,35 +54,35 @@ _mm256_maskz_popcnt_epi16(__mmask16 __U, __m256i __B)
__B);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm128_popcnt_epi16(__m128i __A)
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_popcnt_epi16(__m128i __A)
{
return (__m128i) __builtin_ia32_vpopcntw_128((__v8hi) __A);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm128_mask_popcnt_epi16(__m128i __A, __mmask8 __U, __m128i __B)
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_mask_popcnt_epi16(__m128i __A, __mmask8 __U, __m128i __B)
{
return (__m128i) __builtin_ia32_selectw_128((__mmask8) __U,
- (__v8hi) _mm128_popcnt_epi16(__B),
+ (__v8hi) _mm_popcnt_epi16(__B),
(__v8hi) __A);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm128_maskz_popcnt_epi16(__mmask8 __U, __m128i __B)
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_maskz_popcnt_epi16(__mmask8 __U, __m128i __B)
{
- return _mm128_mask_popcnt_epi16((__m128i) _mm_setzero_si128(),
+ return _mm_mask_popcnt_epi16((__m128i) _mm_setzero_si128(),
__U,
__B);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_popcnt_epi8(__m256i __A)
{
return (__m256i) __builtin_ia32_vpopcntb_256((__v32qi) __A);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_popcnt_epi8(__m256i __A, __mmask32 __U, __m256i __B)
{
return (__m256i) __builtin_ia32_selectb_256((__mmask32) __U,
@@ -89,7 +90,7 @@ _mm256_mask_popcnt_epi8(__m256i __A, __mmask32 __U, __m256i __B)
(__v32qi) __A);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_popcnt_epi8(__mmask32 __U, __m256i __B)
{
return _mm256_mask_popcnt_epi8((__m256i) _mm256_setzero_si256(),
@@ -97,61 +98,62 @@ _mm256_maskz_popcnt_epi8(__mmask32 __U, __m256i __B)
__B);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm128_popcnt_epi8(__m128i __A)
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_popcnt_epi8(__m128i __A)
{
return (__m128i) __builtin_ia32_vpopcntb_128((__v16qi) __A);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm128_mask_popcnt_epi8(__m128i __A, __mmask16 __U, __m128i __B)
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_mask_popcnt_epi8(__m128i __A, __mmask16 __U, __m128i __B)
{
return (__m128i) __builtin_ia32_selectb_128((__mmask16) __U,
- (__v16qi) _mm128_popcnt_epi8(__B),
+ (__v16qi) _mm_popcnt_epi8(__B),
(__v16qi) __A);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm128_maskz_popcnt_epi8(__mmask16 __U, __m128i __B)
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_maskz_popcnt_epi8(__mmask16 __U, __m128i __B)
{
- return _mm128_mask_popcnt_epi8((__m128i) _mm_setzero_si128(),
+ return _mm_mask_popcnt_epi8((__m128i) _mm_setzero_si128(),
__U,
__B);
}
-static __inline__ __mmask32 __DEFAULT_FN_ATTRS
-_mm256_mask_bitshuffle_epi32_mask(__mmask32 __U, __m256i __A, __m256i __B)
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS256
+_mm256_mask_bitshuffle_epi64_mask(__mmask32 __U, __m256i __A, __m256i __B)
{
return (__mmask32) __builtin_ia32_vpshufbitqmb256_mask((__v32qi) __A,
(__v32qi) __B,
__U);
}
-static __inline__ __mmask32 __DEFAULT_FN_ATTRS
-_mm256_bitshuffle_epi32_mask(__m256i __A, __m256i __B)
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS256
+_mm256_bitshuffle_epi64_mask(__m256i __A, __m256i __B)
{
- return _mm256_mask_bitshuffle_epi32_mask((__mmask32) -1,
+ return _mm256_mask_bitshuffle_epi64_mask((__mmask32) -1,
__A,
__B);
}
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS
-_mm128_mask_bitshuffle_epi16_mask(__mmask16 __U, __m128i __A, __m128i __B)
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS128
+_mm_mask_bitshuffle_epi64_mask(__mmask16 __U, __m128i __A, __m128i __B)
{
return (__mmask16) __builtin_ia32_vpshufbitqmb128_mask((__v16qi) __A,
(__v16qi) __B,
__U);
}
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS
-_mm128_bitshuffle_epi16_mask(__m128i __A, __m128i __B)
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS128
+_mm_bitshuffle_epi64_mask(__m128i __A, __m128i __B)
{
- return _mm128_mask_bitshuffle_epi16_mask((__mmask16) -1,
+ return _mm_mask_bitshuffle_epi64_mask((__mmask16) -1,
__A,
__B);
}
-#undef __DEFAULT_FN_ATTRS
+#undef __DEFAULT_FN_ATTRS128
+#undef __DEFAULT_FN_ATTRS256
#endif
diff --git a/lib/Headers/avx512vlbwintrin.h b/lib/Headers/avx512vlbwintrin.h
index e940e2b68533..1b038dd04df6 100644
--- a/lib/Headers/avx512vlbwintrin.h
+++ b/lib/Headers/avx512vlbwintrin.h
@@ -29,94 +29,90 @@
#define __AVX512VLBWINTRIN_H
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512vl,avx512bw")))
-
-static __inline __m128i __DEFAULT_FN_ATTRS
-_mm_setzero_hi(void){
- return (__m128i)(__v8hi){ 0, 0, 0, 0, 0, 0, 0, 0 };
-}
+#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("avx512vl,avx512bw"), __min_vector_width__(128)))
+#define __DEFAULT_FN_ATTRS256 __attribute__((__always_inline__, __nodebug__, __target__("avx512vl,avx512bw"), __min_vector_width__(256)))
/* Integer compare */
-#define _mm_cmp_epi8_mask(a, b, p) __extension__ ({ \
+#define _mm_cmp_epi8_mask(a, b, p) \
(__mmask16)__builtin_ia32_cmpb128_mask((__v16qi)(__m128i)(a), \
(__v16qi)(__m128i)(b), (int)(p), \
- (__mmask16)-1); })
+ (__mmask16)-1)
-#define _mm_mask_cmp_epi8_mask(m, a, b, p) __extension__ ({ \
+#define _mm_mask_cmp_epi8_mask(m, a, b, p) \
(__mmask16)__builtin_ia32_cmpb128_mask((__v16qi)(__m128i)(a), \
(__v16qi)(__m128i)(b), (int)(p), \
- (__mmask16)(m)); })
+ (__mmask16)(m))
-#define _mm_cmp_epu8_mask(a, b, p) __extension__ ({ \
+#define _mm_cmp_epu8_mask(a, b, p) \
(__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)(__m128i)(a), \
(__v16qi)(__m128i)(b), (int)(p), \
- (__mmask16)-1); })
+ (__mmask16)-1)
-#define _mm_mask_cmp_epu8_mask(m, a, b, p) __extension__ ({ \
+#define _mm_mask_cmp_epu8_mask(m, a, b, p) \
(__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)(__m128i)(a), \
(__v16qi)(__m128i)(b), (int)(p), \
- (__mmask16)(m)); })
+ (__mmask16)(m))
-#define _mm256_cmp_epi8_mask(a, b, p) __extension__ ({ \
+#define _mm256_cmp_epi8_mask(a, b, p) \
(__mmask32)__builtin_ia32_cmpb256_mask((__v32qi)(__m256i)(a), \
(__v32qi)(__m256i)(b), (int)(p), \
- (__mmask32)-1); })
+ (__mmask32)-1)
-#define _mm256_mask_cmp_epi8_mask(m, a, b, p) __extension__ ({ \
+#define _mm256_mask_cmp_epi8_mask(m, a, b, p) \
(__mmask32)__builtin_ia32_cmpb256_mask((__v32qi)(__m256i)(a), \
(__v32qi)(__m256i)(b), (int)(p), \
- (__mmask32)(m)); })
+ (__mmask32)(m))
-#define _mm256_cmp_epu8_mask(a, b, p) __extension__ ({ \
+#define _mm256_cmp_epu8_mask(a, b, p) \
(__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)(__m256i)(a), \
(__v32qi)(__m256i)(b), (int)(p), \
- (__mmask32)-1); })
+ (__mmask32)-1)
-#define _mm256_mask_cmp_epu8_mask(m, a, b, p) __extension__ ({ \
+#define _mm256_mask_cmp_epu8_mask(m, a, b, p) \
(__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)(__m256i)(a), \
(__v32qi)(__m256i)(b), (int)(p), \
- (__mmask32)(m)); })
+ (__mmask32)(m))
-#define _mm_cmp_epi16_mask(a, b, p) __extension__ ({ \
+#define _mm_cmp_epi16_mask(a, b, p) \
(__mmask8)__builtin_ia32_cmpw128_mask((__v8hi)(__m128i)(a), \
(__v8hi)(__m128i)(b), (int)(p), \
- (__mmask8)-1); })
+ (__mmask8)-1)
-#define _mm_mask_cmp_epi16_mask(m, a, b, p) __extension__ ({ \
+#define _mm_mask_cmp_epi16_mask(m, a, b, p) \
(__mmask8)__builtin_ia32_cmpw128_mask((__v8hi)(__m128i)(a), \
(__v8hi)(__m128i)(b), (int)(p), \
- (__mmask8)(m)); })
+ (__mmask8)(m))
-#define _mm_cmp_epu16_mask(a, b, p) __extension__ ({ \
+#define _mm_cmp_epu16_mask(a, b, p) \
(__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)(__m128i)(a), \
(__v8hi)(__m128i)(b), (int)(p), \
- (__mmask8)-1); })
+ (__mmask8)-1)
-#define _mm_mask_cmp_epu16_mask(m, a, b, p) __extension__ ({ \
+#define _mm_mask_cmp_epu16_mask(m, a, b, p) \
(__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)(__m128i)(a), \
(__v8hi)(__m128i)(b), (int)(p), \
- (__mmask8)(m)); })
+ (__mmask8)(m))
-#define _mm256_cmp_epi16_mask(a, b, p) __extension__ ({ \
+#define _mm256_cmp_epi16_mask(a, b, p) \
(__mmask16)__builtin_ia32_cmpw256_mask((__v16hi)(__m256i)(a), \
(__v16hi)(__m256i)(b), (int)(p), \
- (__mmask16)-1); })
+ (__mmask16)-1)
-#define _mm256_mask_cmp_epi16_mask(m, a, b, p) __extension__ ({ \
+#define _mm256_mask_cmp_epi16_mask(m, a, b, p) \
(__mmask16)__builtin_ia32_cmpw256_mask((__v16hi)(__m256i)(a), \
(__v16hi)(__m256i)(b), (int)(p), \
- (__mmask16)(m)); })
+ (__mmask16)(m))
-#define _mm256_cmp_epu16_mask(a, b, p) __extension__ ({ \
+#define _mm256_cmp_epu16_mask(a, b, p) \
(__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)(__m256i)(a), \
(__v16hi)(__m256i)(b), (int)(p), \
- (__mmask16)-1); })
+ (__mmask16)-1)
-#define _mm256_mask_cmp_epu16_mask(m, a, b, p) __extension__ ({ \
+#define _mm256_mask_cmp_epu16_mask(m, a, b, p) \
(__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)(__m256i)(a), \
(__v16hi)(__m256i)(b), (int)(p), \
- (__mmask16)(m)); })
+ (__mmask16)(m))
#define _mm_cmpeq_epi8_mask(A, B) \
_mm_cmp_epi8_mask((A), (B), _MM_CMPINT_EQ)
@@ -318,147 +314,147 @@ _mm_setzero_hi(void){
#define _mm256_mask_cmpneq_epu16_mask(k, A, B) \
_mm256_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_NE)
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_add_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B){
return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
(__v32qi)_mm256_add_epi8(__A, __B),
(__v32qi)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_add_epi8(__mmask32 __U, __m256i __A, __m256i __B) {
return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
(__v32qi)_mm256_add_epi8(__A, __B),
(__v32qi)_mm256_setzero_si256());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_add_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) {
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
(__v16hi)_mm256_add_epi16(__A, __B),
(__v16hi)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_add_epi16(__mmask16 __U, __m256i __A, __m256i __B) {
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
(__v16hi)_mm256_add_epi16(__A, __B),
(__v16hi)_mm256_setzero_si256());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_sub_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) {
return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
(__v32qi)_mm256_sub_epi8(__A, __B),
(__v32qi)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_sub_epi8(__mmask32 __U, __m256i __A, __m256i __B) {
return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
(__v32qi)_mm256_sub_epi8(__A, __B),
(__v32qi)_mm256_setzero_si256());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_sub_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) {
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
(__v16hi)_mm256_sub_epi16(__A, __B),
(__v16hi)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_sub_epi16(__mmask16 __U, __m256i __A, __m256i __B) {
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
(__v16hi)_mm256_sub_epi16(__A, __B),
(__v16hi)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_add_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) {
return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
(__v16qi)_mm_add_epi8(__A, __B),
(__v16qi)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_add_epi8(__mmask16 __U, __m128i __A, __m128i __B) {
return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
(__v16qi)_mm_add_epi8(__A, __B),
(__v16qi)_mm_setzero_si128());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_add_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
(__v8hi)_mm_add_epi16(__A, __B),
(__v8hi)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_add_epi16(__mmask8 __U, __m128i __A, __m128i __B) {
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
(__v8hi)_mm_add_epi16(__A, __B),
(__v8hi)_mm_setzero_si128());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_sub_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) {
return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
(__v16qi)_mm_sub_epi8(__A, __B),
(__v16qi)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_sub_epi8(__mmask16 __U, __m128i __A, __m128i __B) {
return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
(__v16qi)_mm_sub_epi8(__A, __B),
(__v16qi)_mm_setzero_si128());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_sub_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
(__v8hi)_mm_sub_epi16(__A, __B),
(__v8hi)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_sub_epi16(__mmask8 __U, __m128i __A, __m128i __B) {
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
(__v8hi)_mm_sub_epi16(__A, __B),
(__v8hi)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_mullo_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) {
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
(__v16hi)_mm256_mullo_epi16(__A, __B),
(__v16hi)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_mullo_epi16(__mmask16 __U, __m256i __A, __m256i __B) {
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
(__v16hi)_mm256_mullo_epi16(__A, __B),
(__v16hi)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_mullo_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
(__v8hi)_mm_mullo_epi16(__A, __B),
(__v8hi)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_mullo_epi16(__mmask8 __U, __m128i __A, __m128i __B) {
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
(__v8hi)_mm_mullo_epi16(__A, __B),
(__v8hi)_mm_setzero_si128());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_blend_epi8 (__mmask16 __U, __m128i __A, __m128i __W)
{
return (__m128i) __builtin_ia32_selectb_128 ((__mmask16) __U,
@@ -466,7 +462,7 @@ _mm_mask_blend_epi8 (__mmask16 __U, __m128i __A, __m128i __W)
(__v16qi) __A);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_blend_epi8 (__mmask32 __U, __m256i __A, __m256i __W)
{
return (__m256i) __builtin_ia32_selectb_256 ((__mmask32) __U,
@@ -474,7 +470,7 @@ _mm256_mask_blend_epi8 (__mmask32 __U, __m256i __A, __m256i __W)
(__v32qi) __A);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_blend_epi16 (__mmask8 __U, __m128i __A, __m128i __W)
{
return (__m128i) __builtin_ia32_selectw_128 ((__mmask8) __U,
@@ -482,7 +478,7 @@ _mm_mask_blend_epi16 (__mmask8 __U, __m128i __A, __m128i __W)
(__v8hi) __A);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_blend_epi16 (__mmask16 __U, __m256i __A, __m256i __W)
{
return (__m256i) __builtin_ia32_selectw_256 ((__mmask16) __U,
@@ -490,7 +486,7 @@ _mm256_mask_blend_epi16 (__mmask16 __U, __m256i __A, __m256i __W)
(__v16hi) __A);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_abs_epi8(__m128i __W, __mmask16 __U, __m128i __A)
{
return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
@@ -498,7 +494,7 @@ _mm_mask_abs_epi8(__m128i __W, __mmask16 __U, __m128i __A)
(__v16qi)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_abs_epi8(__mmask16 __U, __m128i __A)
{
return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
@@ -506,7 +502,7 @@ _mm_maskz_abs_epi8(__mmask16 __U, __m128i __A)
(__v16qi)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_abs_epi8(__m256i __W, __mmask32 __U, __m256i __A)
{
return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
@@ -514,7 +510,7 @@ _mm256_mask_abs_epi8(__m256i __W, __mmask32 __U, __m256i __A)
(__v32qi)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_abs_epi8 (__mmask32 __U, __m256i __A)
{
return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
@@ -522,7 +518,7 @@ _mm256_maskz_abs_epi8 (__mmask32 __U, __m256i __A)
(__v32qi)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_abs_epi16(__m128i __W, __mmask8 __U, __m128i __A)
{
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
@@ -530,7 +526,7 @@ _mm_mask_abs_epi16(__m128i __W, __mmask8 __U, __m128i __A)
(__v8hi)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_abs_epi16(__mmask8 __U, __m128i __A)
{
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
@@ -538,7 +534,7 @@ _mm_maskz_abs_epi16(__mmask8 __U, __m128i __A)
(__v8hi)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_abs_epi16(__m256i __W, __mmask16 __U, __m256i __A)
{
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
@@ -546,7 +542,7 @@ _mm256_mask_abs_epi16(__m256i __W, __mmask16 __U, __m256i __A)
(__v16hi)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_abs_epi16(__mmask16 __U, __m256i __A)
{
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
@@ -554,22 +550,22 @@ _mm256_maskz_abs_epi16(__mmask16 __U, __m256i __A)
(__v16hi)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_packs_epi32(__mmask8 __M, __m128i __A, __m128i __B) {
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M,
(__v8hi)_mm_packs_epi32(__A, __B),
(__v8hi)_mm_setzero_si128());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_packs_epi32(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B)
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_mask_packs_epi32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M,
(__v8hi)_mm_packs_epi32(__A, __B),
(__v8hi)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_packs_epi32(__mmask16 __M, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M,
@@ -577,7 +573,7 @@ _mm256_maskz_packs_epi32(__mmask16 __M, __m256i __A, __m256i __B)
(__v16hi)_mm256_setzero_si256());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_packs_epi32(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M,
@@ -585,7 +581,7 @@ _mm256_mask_packs_epi32(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B)
(__v16hi)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_packs_epi16(__mmask16 __M, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M,
@@ -593,7 +589,7 @@ _mm_maskz_packs_epi16(__mmask16 __M, __m128i __A, __m128i __B)
(__v16qi)_mm_setzero_si128());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_packs_epi16(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M,
@@ -601,7 +597,7 @@ _mm_mask_packs_epi16(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B)
(__v16qi)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_packs_epi16(__mmask32 __M, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M,
@@ -609,7 +605,7 @@ _mm256_maskz_packs_epi16(__mmask32 __M, __m256i __A, __m256i __B)
(__v32qi)_mm256_setzero_si256());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_packs_epi16(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M,
@@ -617,7 +613,7 @@ _mm256_mask_packs_epi16(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B)
(__v32qi)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_packus_epi32(__mmask8 __M, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M,
@@ -625,15 +621,15 @@ _mm_maskz_packus_epi32(__mmask8 __M, __m128i __A, __m128i __B)
(__v8hi)_mm_setzero_si128());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_packus_epi32(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B)
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_mask_packus_epi32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M,
(__v8hi)_mm_packus_epi32(__A, __B),
(__v8hi)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_packus_epi32(__mmask16 __M, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M,
@@ -641,7 +637,7 @@ _mm256_maskz_packus_epi32(__mmask16 __M, __m256i __A, __m256i __B)
(__v16hi)_mm256_setzero_si256());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_packus_epi32(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M,
@@ -649,7 +645,7 @@ _mm256_mask_packus_epi32(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B)
(__v16hi)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_packus_epi16(__mmask16 __M, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M,
@@ -657,7 +653,7 @@ _mm_maskz_packus_epi16(__mmask16 __M, __m128i __A, __m128i __B)
(__v16qi)_mm_setzero_si128());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_packus_epi16(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M,
@@ -665,7 +661,7 @@ _mm_mask_packus_epi16(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B)
(__v16qi)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_packus_epi16(__mmask32 __M, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M,
@@ -673,7 +669,7 @@ _mm256_maskz_packus_epi16(__mmask32 __M, __m256i __A, __m256i __B)
(__v32qi)_mm256_setzero_si256());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_packus_epi16(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M,
@@ -681,7 +677,7 @@ _mm256_mask_packus_epi16(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B)
(__v32qi)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_adds_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
@@ -689,7 +685,7 @@ _mm_mask_adds_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B)
(__v16qi)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_adds_epi8(__mmask16 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
@@ -697,7 +693,7 @@ _mm_maskz_adds_epi8(__mmask16 __U, __m128i __A, __m128i __B)
(__v16qi)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_adds_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
@@ -705,7 +701,7 @@ _mm256_mask_adds_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B)
(__v32qi)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_adds_epi8(__mmask32 __U, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
@@ -713,7 +709,7 @@ _mm256_maskz_adds_epi8(__mmask32 __U, __m256i __A, __m256i __B)
(__v32qi)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_adds_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
@@ -721,7 +717,7 @@ _mm_mask_adds_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
(__v8hi)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_adds_epi16(__mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
@@ -729,7 +725,7 @@ _mm_maskz_adds_epi16(__mmask8 __U, __m128i __A, __m128i __B)
(__v8hi)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_adds_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
@@ -737,7 +733,7 @@ _mm256_mask_adds_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B)
(__v16hi)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_adds_epi16(__mmask16 __U, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
@@ -745,7 +741,7 @@ _mm256_maskz_adds_epi16(__mmask16 __U, __m256i __A, __m256i __B)
(__v16hi)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_adds_epu8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
@@ -753,7 +749,7 @@ _mm_mask_adds_epu8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B)
(__v16qi)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_adds_epu8(__mmask16 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
@@ -761,7 +757,7 @@ _mm_maskz_adds_epu8(__mmask16 __U, __m128i __A, __m128i __B)
(__v16qi)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_adds_epu8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
@@ -769,7 +765,7 @@ _mm256_mask_adds_epu8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B)
(__v32qi)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_adds_epu8(__mmask32 __U, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
@@ -777,7 +773,7 @@ _mm256_maskz_adds_epu8(__mmask32 __U, __m256i __A, __m256i __B)
(__v32qi)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_adds_epu16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
@@ -785,7 +781,7 @@ _mm_mask_adds_epu16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
(__v8hi)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_adds_epu16(__mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
@@ -793,7 +789,7 @@ _mm_maskz_adds_epu16(__mmask8 __U, __m128i __A, __m128i __B)
(__v8hi)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_adds_epu16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
@@ -801,7 +797,7 @@ _mm256_mask_adds_epu16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B)
(__v16hi)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_adds_epu16(__mmask16 __U, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
@@ -809,7 +805,7 @@ _mm256_maskz_adds_epu16(__mmask16 __U, __m256i __A, __m256i __B)
(__v16hi)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_avg_epu8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
@@ -817,7 +813,7 @@ _mm_mask_avg_epu8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B)
(__v16qi)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_avg_epu8(__mmask16 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
@@ -825,7 +821,7 @@ _mm_maskz_avg_epu8(__mmask16 __U, __m128i __A, __m128i __B)
(__v16qi)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_avg_epu8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
@@ -833,7 +829,7 @@ _mm256_mask_avg_epu8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B)
(__v32qi)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_avg_epu8(__mmask32 __U, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
@@ -841,7 +837,7 @@ _mm256_maskz_avg_epu8(__mmask32 __U, __m256i __A, __m256i __B)
(__v32qi)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_avg_epu16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
@@ -849,7 +845,7 @@ _mm_mask_avg_epu16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
(__v8hi)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_avg_epu16(__mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
@@ -857,7 +853,7 @@ _mm_maskz_avg_epu16(__mmask8 __U, __m128i __A, __m128i __B)
(__v8hi)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_avg_epu16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
@@ -865,7 +861,7 @@ _mm256_mask_avg_epu16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B)
(__v16hi)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_avg_epu16(__mmask16 __U, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
@@ -873,7 +869,7 @@ _mm256_maskz_avg_epu16(__mmask16 __U, __m256i __A, __m256i __B)
(__v16hi)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_max_epi8(__mmask16 __M, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M,
@@ -881,7 +877,7 @@ _mm_maskz_max_epi8(__mmask16 __M, __m128i __A, __m128i __B)
(__v16qi)_mm_setzero_si128());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_max_epi8(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M,
@@ -889,7 +885,7 @@ _mm_mask_max_epi8(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B)
(__v16qi)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_max_epi8(__mmask32 __M, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M,
@@ -897,7 +893,7 @@ _mm256_maskz_max_epi8(__mmask32 __M, __m256i __A, __m256i __B)
(__v32qi)_mm256_setzero_si256());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_max_epi8(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M,
@@ -905,7 +901,7 @@ _mm256_mask_max_epi8(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B)
(__v32qi)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_max_epi16(__mmask8 __M, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M,
@@ -913,7 +909,7 @@ _mm_maskz_max_epi16(__mmask8 __M, __m128i __A, __m128i __B)
(__v8hi)_mm_setzero_si128());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_max_epi16(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M,
@@ -921,7 +917,7 @@ _mm_mask_max_epi16(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B)
(__v8hi)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_max_epi16(__mmask16 __M, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M,
@@ -929,7 +925,7 @@ _mm256_maskz_max_epi16(__mmask16 __M, __m256i __A, __m256i __B)
(__v16hi)_mm256_setzero_si256());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_max_epi16(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M,
@@ -937,7 +933,7 @@ _mm256_mask_max_epi16(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B)
(__v16hi)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_max_epu8(__mmask16 __M, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M,
@@ -945,7 +941,7 @@ _mm_maskz_max_epu8(__mmask16 __M, __m128i __A, __m128i __B)
(__v16qi)_mm_setzero_si128());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_max_epu8(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M,
@@ -953,7 +949,7 @@ _mm_mask_max_epu8(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B)
(__v16qi)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_max_epu8 (__mmask32 __M, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M,
@@ -961,7 +957,7 @@ _mm256_maskz_max_epu8 (__mmask32 __M, __m256i __A, __m256i __B)
(__v32qi)_mm256_setzero_si256());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_max_epu8(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M,
@@ -969,7 +965,7 @@ _mm256_mask_max_epu8(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B)
(__v32qi)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_max_epu16(__mmask8 __M, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M,
@@ -977,7 +973,7 @@ _mm_maskz_max_epu16(__mmask8 __M, __m128i __A, __m128i __B)
(__v8hi)_mm_setzero_si128());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_max_epu16(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M,
@@ -985,7 +981,7 @@ _mm_mask_max_epu16(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B)
(__v8hi)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_max_epu16(__mmask16 __M, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M,
@@ -993,7 +989,7 @@ _mm256_maskz_max_epu16(__mmask16 __M, __m256i __A, __m256i __B)
(__v16hi)_mm256_setzero_si256());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_max_epu16(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M,
@@ -1001,7 +997,7 @@ _mm256_mask_max_epu16(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B)
(__v16hi)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_min_epi8(__mmask16 __M, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M,
@@ -1009,7 +1005,7 @@ _mm_maskz_min_epi8(__mmask16 __M, __m128i __A, __m128i __B)
(__v16qi)_mm_setzero_si128());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_min_epi8(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M,
@@ -1017,7 +1013,7 @@ _mm_mask_min_epi8(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B)
(__v16qi)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_min_epi8(__mmask32 __M, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M,
@@ -1025,7 +1021,7 @@ _mm256_maskz_min_epi8(__mmask32 __M, __m256i __A, __m256i __B)
(__v32qi)_mm256_setzero_si256());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_min_epi8(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M,
@@ -1033,7 +1029,7 @@ _mm256_mask_min_epi8(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B)
(__v32qi)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_min_epi16(__mmask8 __M, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M,
@@ -1041,7 +1037,7 @@ _mm_maskz_min_epi16(__mmask8 __M, __m128i __A, __m128i __B)
(__v8hi)_mm_setzero_si128());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_min_epi16(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M,
@@ -1049,7 +1045,7 @@ _mm_mask_min_epi16(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B)
(__v8hi)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_min_epi16(__mmask16 __M, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M,
@@ -1057,7 +1053,7 @@ _mm256_maskz_min_epi16(__mmask16 __M, __m256i __A, __m256i __B)
(__v16hi)_mm256_setzero_si256());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_min_epi16(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M,
@@ -1065,7 +1061,7 @@ _mm256_mask_min_epi16(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B)
(__v16hi)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_min_epu8(__mmask16 __M, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M,
@@ -1073,7 +1069,7 @@ _mm_maskz_min_epu8(__mmask16 __M, __m128i __A, __m128i __B)
(__v16qi)_mm_setzero_si128());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_min_epu8(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M,
@@ -1081,7 +1077,7 @@ _mm_mask_min_epu8(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B)
(__v16qi)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_min_epu8 (__mmask32 __M, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M,
@@ -1089,7 +1085,7 @@ _mm256_maskz_min_epu8 (__mmask32 __M, __m256i __A, __m256i __B)
(__v32qi)_mm256_setzero_si256());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_min_epu8(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M,
@@ -1097,7 +1093,7 @@ _mm256_mask_min_epu8(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B)
(__v32qi)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_min_epu16(__mmask8 __M, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M,
@@ -1105,7 +1101,7 @@ _mm_maskz_min_epu16(__mmask8 __M, __m128i __A, __m128i __B)
(__v8hi)_mm_setzero_si128());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_min_epu16(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M,
@@ -1113,7 +1109,7 @@ _mm_mask_min_epu16(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B)
(__v8hi)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_min_epu16(__mmask16 __M, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M,
@@ -1121,7 +1117,7 @@ _mm256_maskz_min_epu16(__mmask16 __M, __m256i __A, __m256i __B)
(__v16hi)_mm256_setzero_si256());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_min_epu16(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M,
@@ -1129,7 +1125,7 @@ _mm256_mask_min_epu16(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B)
(__v16hi)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_shuffle_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
@@ -1137,7 +1133,7 @@ _mm_mask_shuffle_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B)
(__v16qi)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_shuffle_epi8(__mmask16 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
@@ -1145,7 +1141,7 @@ _mm_maskz_shuffle_epi8(__mmask16 __U, __m128i __A, __m128i __B)
(__v16qi)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_shuffle_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
@@ -1153,7 +1149,7 @@ _mm256_mask_shuffle_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B)
(__v32qi)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_shuffle_epi8(__mmask32 __U, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
@@ -1161,7 +1157,7 @@ _mm256_maskz_shuffle_epi8(__mmask32 __U, __m256i __A, __m256i __B)
(__v32qi)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_subs_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
@@ -1169,7 +1165,7 @@ _mm_mask_subs_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B)
(__v16qi)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_subs_epi8(__mmask16 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
@@ -1177,7 +1173,7 @@ _mm_maskz_subs_epi8(__mmask16 __U, __m128i __A, __m128i __B)
(__v16qi)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_subs_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
@@ -1185,7 +1181,7 @@ _mm256_mask_subs_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B)
(__v32qi)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_subs_epi8(__mmask32 __U, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
@@ -1193,7 +1189,7 @@ _mm256_maskz_subs_epi8(__mmask32 __U, __m256i __A, __m256i __B)
(__v32qi)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_subs_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
@@ -1201,7 +1197,7 @@ _mm_mask_subs_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
(__v8hi)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_subs_epi16(__mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
@@ -1209,7 +1205,7 @@ _mm_maskz_subs_epi16(__mmask8 __U, __m128i __A, __m128i __B)
(__v8hi)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_subs_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
@@ -1217,7 +1213,7 @@ _mm256_mask_subs_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B)
(__v16hi)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_subs_epi16(__mmask16 __U, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
@@ -1225,7 +1221,7 @@ _mm256_maskz_subs_epi16(__mmask16 __U, __m256i __A, __m256i __B)
(__v16hi)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_subs_epu8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
@@ -1233,7 +1229,7 @@ _mm_mask_subs_epu8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B)
(__v16qi)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_subs_epu8(__mmask16 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
@@ -1241,7 +1237,7 @@ _mm_maskz_subs_epu8(__mmask16 __U, __m128i __A, __m128i __B)
(__v16qi)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_subs_epu8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
@@ -1249,7 +1245,7 @@ _mm256_mask_subs_epu8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B)
(__v32qi)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_subs_epu8(__mmask32 __U, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
@@ -1257,7 +1253,7 @@ _mm256_maskz_subs_epu8(__mmask32 __U, __m256i __A, __m256i __B)
(__v32qi)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_subs_epu16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
@@ -1265,7 +1261,7 @@ _mm_mask_subs_epu16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
(__v8hi)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_subs_epu16(__mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
@@ -1273,7 +1269,7 @@ _mm_maskz_subs_epu16(__mmask8 __U, __m128i __A, __m128i __B)
(__v8hi)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_subs_epu16(__m256i __W, __mmask16 __U, __m256i __A,
__m256i __B) {
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
@@ -1281,7 +1277,7 @@ _mm256_mask_subs_epu16(__m256i __W, __mmask16 __U, __m256i __A,
(__v16hi)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_subs_epu16(__mmask16 __U, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
@@ -1289,99 +1285,89 @@ _mm256_maskz_subs_epu16(__mmask16 __U, __m256i __A, __m256i __B)
(__v16hi)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask2_permutex2var_epi16 (__m128i __A, __m128i __I, __mmask8 __U,
- __m128i __B)
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_permutex2var_epi16(__m128i __A, __m128i __I, __m128i __B)
{
- return (__m128i) __builtin_ia32_vpermi2varhi128_mask ((__v8hi) __A,
- (__v8hi) __I /* idx */ ,
- (__v8hi) __B,
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_vpermi2varhi128((__v8hi)__A, (__v8hi)__I,
+ (__v8hi) __B);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask2_permutex2var_epi16 (__m256i __A, __m256i __I,
- __mmask16 __U, __m256i __B)
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_mask_permutex2var_epi16(__m128i __A, __mmask8 __U, __m128i __I,
+ __m128i __B)
{
- return (__m256i) __builtin_ia32_vpermi2varhi256_mask ((__v16hi) __A,
- (__v16hi) __I /* idx */ ,
- (__v16hi) __B,
- (__mmask16) __U);
+ return (__m128i)__builtin_ia32_selectw_128(__U,
+ (__v8hi)_mm_permutex2var_epi16(__A, __I, __B),
+ (__v8hi)__A);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_permutex2var_epi16 (__m128i __A, __m128i __I, __m128i __B)
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_mask2_permutex2var_epi16(__m128i __A, __m128i __I, __mmask8 __U,
+ __m128i __B)
{
- return (__m128i) __builtin_ia32_vpermt2varhi128_mask ((__v8hi) __I/* idx */,
- (__v8hi) __A,
- (__v8hi) __B,
- (__mmask8) -1);
+ return (__m128i)__builtin_ia32_selectw_128(__U,
+ (__v8hi)_mm_permutex2var_epi16(__A, __I, __B),
+ (__v8hi)__I);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_permutex2var_epi16 (__m128i __A, __mmask8 __U, __m128i __I,
- __m128i __B)
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_maskz_permutex2var_epi16 (__mmask8 __U, __m128i __A, __m128i __I,
+ __m128i __B)
{
- return (__m128i) __builtin_ia32_vpermt2varhi128_mask ((__v8hi) __I/* idx */,
- (__v8hi) __A,
- (__v8hi) __B,
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectw_128(__U,
+ (__v8hi)_mm_permutex2var_epi16(__A, __I, __B),
+ (__v8hi)_mm_setzero_si128());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_permutex2var_epi16 (__mmask8 __U, __m128i __A, __m128i __I,
- __m128i __B)
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_permutex2var_epi16(__m256i __A, __m256i __I, __m256i __B)
{
- return (__m128i) __builtin_ia32_vpermt2varhi128_maskz ((__v8hi) __I/* idx */,
- (__v8hi) __A,
- (__v8hi) __B,
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_vpermi2varhi256((__v16hi)__A, (__v16hi)__I,
+ (__v16hi)__B);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_permutex2var_epi16 (__m256i __A, __m256i __I, __m256i __B)
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_mask_permutex2var_epi16(__m256i __A, __mmask16 __U, __m256i __I,
+ __m256i __B)
{
- return (__m256i) __builtin_ia32_vpermt2varhi256_mask ((__v16hi) __I/* idx */,
- (__v16hi) __A,
- (__v16hi) __B,
- (__mmask16) -1);
+ return (__m256i)__builtin_ia32_selectw_256(__U,
+ (__v16hi)_mm256_permutex2var_epi16(__A, __I, __B),
+ (__v16hi)__A);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_permutex2var_epi16 (__m256i __A, __mmask16 __U,
- __m256i __I, __m256i __B)
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_mask2_permutex2var_epi16(__m256i __A, __m256i __I, __mmask16 __U,
+ __m256i __B)
{
- return (__m256i) __builtin_ia32_vpermt2varhi256_mask ((__v16hi) __I/* idx */,
- (__v16hi) __A,
- (__v16hi) __B,
- (__mmask16) __U);
+ return (__m256i)__builtin_ia32_selectw_256(__U,
+ (__v16hi)_mm256_permutex2var_epi16(__A, __I, __B),
+ (__v16hi)__I);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_permutex2var_epi16 (__mmask16 __U, __m256i __A,
- __m256i __I, __m256i __B)
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_maskz_permutex2var_epi16 (__mmask16 __U, __m256i __A, __m256i __I,
+ __m256i __B)
{
- return (__m256i) __builtin_ia32_vpermt2varhi256_maskz ((__v16hi) __I/* idx */,
- (__v16hi) __A,
- (__v16hi) __B,
- (__mmask16) __U);
+ return (__m256i)__builtin_ia32_selectw_256(__U,
+ (__v16hi)_mm256_permutex2var_epi16(__A, __I, __B),
+ (__v16hi)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_maddubs_epi16(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y) {
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
(__v8hi)_mm_maddubs_epi16(__X, __Y),
(__v8hi)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_maddubs_epi16(__mmask8 __U, __m128i __X, __m128i __Y) {
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
(__v8hi)_mm_maddubs_epi16(__X, __Y),
(__v8hi)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_maddubs_epi16(__m256i __W, __mmask16 __U, __m256i __X,
__m256i __Y) {
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
@@ -1389,402 +1375,400 @@ _mm256_mask_maddubs_epi16(__m256i __W, __mmask16 __U, __m256i __X,
(__v16hi)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_maddubs_epi16(__mmask16 __U, __m256i __X, __m256i __Y) {
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
(__v16hi)_mm256_maddubs_epi16(__X, __Y),
(__v16hi)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_madd_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
(__v4si)_mm_madd_epi16(__A, __B),
(__v4si)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_madd_epi16(__mmask8 __U, __m128i __A, __m128i __B) {
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
(__v4si)_mm_madd_epi16(__A, __B),
(__v4si)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_madd_epi16(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) {
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
(__v8si)_mm256_madd_epi16(__A, __B),
(__v8si)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_madd_epi16(__mmask8 __U, __m256i __A, __m256i __B) {
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
(__v8si)_mm256_madd_epi16(__A, __B),
(__v8si)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_cvtsepi16_epi8 (__m128i __A) {
return (__m128i) __builtin_ia32_pmovswb128_mask ((__v8hi) __A,
(__v16qi) _mm_setzero_si128(),
(__mmask8) -1);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_cvtsepi16_epi8 (__m128i __O, __mmask8 __M, __m128i __A) {
return (__m128i) __builtin_ia32_pmovswb128_mask ((__v8hi) __A,
(__v16qi) __O,
__M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_cvtsepi16_epi8 (__mmask8 __M, __m128i __A) {
return (__m128i) __builtin_ia32_pmovswb128_mask ((__v8hi) __A,
(__v16qi) _mm_setzero_si128(),
__M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS256
_mm256_cvtsepi16_epi8 (__m256i __A) {
return (__m128i) __builtin_ia32_pmovswb256_mask ((__v16hi) __A,
(__v16qi) _mm_setzero_si128(),
(__mmask16) -1);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS256
_mm256_mask_cvtsepi16_epi8 (__m128i __O, __mmask16 __M, __m256i __A) {
return (__m128i) __builtin_ia32_pmovswb256_mask ((__v16hi) __A,
(__v16qi) __O,
__M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS256
_mm256_maskz_cvtsepi16_epi8 (__mmask16 __M, __m256i __A) {
return (__m128i) __builtin_ia32_pmovswb256_mask ((__v16hi) __A,
(__v16qi) _mm_setzero_si128(),
__M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_cvtusepi16_epi8 (__m128i __A) {
return (__m128i) __builtin_ia32_pmovuswb128_mask ((__v8hi) __A,
(__v16qi) _mm_setzero_si128(),
(__mmask8) -1);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_cvtusepi16_epi8 (__m128i __O, __mmask8 __M, __m128i __A) {
return (__m128i) __builtin_ia32_pmovuswb128_mask ((__v8hi) __A,
(__v16qi) __O,
__M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_cvtusepi16_epi8 (__mmask8 __M, __m128i __A) {
return (__m128i) __builtin_ia32_pmovuswb128_mask ((__v8hi) __A,
(__v16qi) _mm_setzero_si128(),
__M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS256
_mm256_cvtusepi16_epi8 (__m256i __A) {
return (__m128i) __builtin_ia32_pmovuswb256_mask ((__v16hi) __A,
(__v16qi) _mm_setzero_si128(),
(__mmask16) -1);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS256
_mm256_mask_cvtusepi16_epi8 (__m128i __O, __mmask16 __M, __m256i __A) {
return (__m128i) __builtin_ia32_pmovuswb256_mask ((__v16hi) __A,
(__v16qi) __O,
__M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS256
_mm256_maskz_cvtusepi16_epi8 (__mmask16 __M, __m256i __A) {
return (__m128i) __builtin_ia32_pmovuswb256_mask ((__v16hi) __A,
(__v16qi) _mm_setzero_si128(),
__M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_cvtepi16_epi8 (__m128i __A) {
-
- return (__m128i) __builtin_ia32_pmovwb128_mask ((__v8hi) __A,
- (__v16qi) _mm_setzero_si128(),
- (__mmask8) -1);
+ return (__m128i)__builtin_shufflevector(
+ __builtin_convertvector((__v8hi)__A, __v8qi),
+ (__v8qi){0, 0, 0, 0, 0, 0, 0, 0}, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
+ 12, 13, 14, 15);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_cvtepi16_epi8 (__m128i __O, __mmask8 __M, __m128i __A) {
return (__m128i) __builtin_ia32_pmovwb128_mask ((__v8hi) __A,
(__v16qi) __O,
__M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_cvtepi16_epi8 (__mmask8 __M, __m128i __A) {
return (__m128i) __builtin_ia32_pmovwb128_mask ((__v8hi) __A,
(__v16qi) _mm_setzero_si128(),
__M);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS128
_mm_mask_cvtepi16_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A)
{
__builtin_ia32_pmovwb128mem_mask ((__v16qi *) __P, (__v8hi) __A, __M);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS128
_mm_mask_cvtsepi16_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A)
{
__builtin_ia32_pmovswb128mem_mask ((__v16qi *) __P, (__v8hi) __A, __M);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS128
_mm_mask_cvtusepi16_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A)
{
__builtin_ia32_pmovuswb128mem_mask ((__v16qi *) __P, (__v8hi) __A, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS256
_mm256_cvtepi16_epi8 (__m256i __A) {
- return (__m128i) __builtin_ia32_pmovwb256_mask ((__v16hi) __A,
- (__v16qi) _mm_setzero_si128(),
- (__mmask16) -1);
+ return (__m128i)__builtin_convertvector((__v16hi) __A, __v16qi);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS256
_mm256_mask_cvtepi16_epi8 (__m128i __O, __mmask16 __M, __m256i __A) {
- return (__m128i) __builtin_ia32_pmovwb256_mask ((__v16hi) __A,
- (__v16qi) __O,
- __M);
+ return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M,
+ (__v16qi)_mm256_cvtepi16_epi8(__A),
+ (__v16qi)__O);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS256
_mm256_maskz_cvtepi16_epi8 (__mmask16 __M, __m256i __A) {
- return (__m128i) __builtin_ia32_pmovwb256_mask ((__v16hi) __A,
- (__v16qi) _mm_setzero_si128(),
- __M);
+ return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M,
+ (__v16qi)_mm256_cvtepi16_epi8(__A),
+ (__v16qi)_mm_setzero_si128());
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS256
_mm256_mask_cvtepi16_storeu_epi8 (void * __P, __mmask16 __M, __m256i __A)
{
__builtin_ia32_pmovwb256mem_mask ((__v16qi *) __P, (__v16hi) __A, __M);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS256
_mm256_mask_cvtsepi16_storeu_epi8 (void * __P, __mmask16 __M, __m256i __A)
{
__builtin_ia32_pmovswb256mem_mask ((__v16qi *) __P, (__v16hi) __A, __M);
}
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm256_mask_cvtusepi16_storeu_epi8 (void * __P, __mmask8 __M, __m256i __A)
+static __inline__ void __DEFAULT_FN_ATTRS256
+_mm256_mask_cvtusepi16_storeu_epi8 (void * __P, __mmask16 __M, __m256i __A)
{
__builtin_ia32_pmovuswb256mem_mask ((__v16qi*) __P, (__v16hi) __A, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_mulhrs_epi16(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y) {
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
(__v8hi)_mm_mulhrs_epi16(__X, __Y),
(__v8hi)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_mulhrs_epi16(__mmask8 __U, __m128i __X, __m128i __Y) {
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
(__v8hi)_mm_mulhrs_epi16(__X, __Y),
(__v8hi)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_mulhrs_epi16(__m256i __W, __mmask16 __U, __m256i __X, __m256i __Y) {
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
(__v16hi)_mm256_mulhrs_epi16(__X, __Y),
(__v16hi)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_mulhrs_epi16(__mmask16 __U, __m256i __X, __m256i __Y) {
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
(__v16hi)_mm256_mulhrs_epi16(__X, __Y),
(__v16hi)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_mulhi_epu16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
(__v8hi)_mm_mulhi_epu16(__A, __B),
(__v8hi)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_mulhi_epu16(__mmask8 __U, __m128i __A, __m128i __B) {
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
(__v8hi)_mm_mulhi_epu16(__A, __B),
(__v8hi)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_mulhi_epu16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) {
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
(__v16hi)_mm256_mulhi_epu16(__A, __B),
(__v16hi)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_mulhi_epu16(__mmask16 __U, __m256i __A, __m256i __B) {
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
(__v16hi)_mm256_mulhi_epu16(__A, __B),
(__v16hi)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_mulhi_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
(__v8hi)_mm_mulhi_epi16(__A, __B),
(__v8hi)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_mulhi_epi16(__mmask8 __U, __m128i __A, __m128i __B) {
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
(__v8hi)_mm_mulhi_epi16(__A, __B),
(__v8hi)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_mulhi_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) {
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
(__v16hi)_mm256_mulhi_epi16(__A, __B),
(__v16hi)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_mulhi_epi16(__mmask16 __U, __m256i __A, __m256i __B) {
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
(__v16hi)_mm256_mulhi_epi16(__A, __B),
(__v16hi)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_unpackhi_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) {
return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
(__v16qi)_mm_unpackhi_epi8(__A, __B),
(__v16qi)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_unpackhi_epi8(__mmask16 __U, __m128i __A, __m128i __B) {
return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
(__v16qi)_mm_unpackhi_epi8(__A, __B),
(__v16qi)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_unpackhi_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) {
return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
(__v32qi)_mm256_unpackhi_epi8(__A, __B),
(__v32qi)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_unpackhi_epi8(__mmask32 __U, __m256i __A, __m256i __B) {
return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
(__v32qi)_mm256_unpackhi_epi8(__A, __B),
(__v32qi)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_unpackhi_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
(__v8hi)_mm_unpackhi_epi16(__A, __B),
(__v8hi)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_unpackhi_epi16(__mmask8 __U, __m128i __A, __m128i __B) {
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
(__v8hi)_mm_unpackhi_epi16(__A, __B),
(__v8hi) _mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_unpackhi_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) {
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
(__v16hi)_mm256_unpackhi_epi16(__A, __B),
(__v16hi)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_unpackhi_epi16(__mmask16 __U, __m256i __A, __m256i __B) {
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
(__v16hi)_mm256_unpackhi_epi16(__A, __B),
(__v16hi)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_unpacklo_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) {
return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
(__v16qi)_mm_unpacklo_epi8(__A, __B),
(__v16qi)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_unpacklo_epi8(__mmask16 __U, __m128i __A, __m128i __B) {
return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
(__v16qi)_mm_unpacklo_epi8(__A, __B),
(__v16qi)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_unpacklo_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) {
return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
(__v32qi)_mm256_unpacklo_epi8(__A, __B),
(__v32qi)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_unpacklo_epi8(__mmask32 __U, __m256i __A, __m256i __B) {
return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
(__v32qi)_mm256_unpacklo_epi8(__A, __B),
(__v32qi)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_unpacklo_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
(__v8hi)_mm_unpacklo_epi16(__A, __B),
(__v8hi)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_unpacklo_epi16(__mmask8 __U, __m128i __A, __m128i __B) {
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
(__v8hi)_mm_unpacklo_epi16(__A, __B),
(__v8hi) _mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_unpacklo_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) {
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
(__v16hi)_mm256_unpacklo_epi16(__A, __B),
(__v16hi)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_unpacklo_epi16(__mmask16 __U, __m256i __A, __m256i __B) {
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
(__v16hi)_mm256_unpacklo_epi16(__A, __B),
(__v16hi)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_cvtepi8_epi16(__m128i __W, __mmask8 __U, __m128i __A)
{
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
@@ -1792,7 +1776,7 @@ _mm_mask_cvtepi8_epi16(__m128i __W, __mmask8 __U, __m128i __A)
(__v8hi)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_cvtepi8_epi16(__mmask8 __U, __m128i __A)
{
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
@@ -1800,7 +1784,7 @@ _mm_maskz_cvtepi8_epi16(__mmask8 __U, __m128i __A)
(__v8hi)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_cvtepi8_epi16(__m256i __W, __mmask16 __U, __m128i __A)
{
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
@@ -1808,7 +1792,7 @@ _mm256_mask_cvtepi8_epi16(__m256i __W, __mmask16 __U, __m128i __A)
(__v16hi)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_cvtepi8_epi16(__mmask16 __U, __m128i __A)
{
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
@@ -1817,7 +1801,7 @@ _mm256_maskz_cvtepi8_epi16(__mmask16 __U, __m128i __A)
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_cvtepu8_epi16(__m128i __W, __mmask8 __U, __m128i __A)
{
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
@@ -1825,7 +1809,7 @@ _mm_mask_cvtepu8_epi16(__m128i __W, __mmask8 __U, __m128i __A)
(__v8hi)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_cvtepu8_epi16(__mmask8 __U, __m128i __A)
{
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
@@ -1833,7 +1817,7 @@ _mm_maskz_cvtepu8_epi16(__mmask8 __U, __m128i __A)
(__v8hi)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_cvtepu8_epi16(__m256i __W, __mmask16 __U, __m128i __A)
{
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
@@ -1841,7 +1825,7 @@ _mm256_mask_cvtepu8_epi16(__m256i __W, __mmask16 __U, __m128i __A)
(__v16hi)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_cvtepu8_epi16 (__mmask16 __U, __m128i __A)
{
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
@@ -1850,55 +1834,55 @@ _mm256_maskz_cvtepu8_epi16 (__mmask16 __U, __m128i __A)
}
-#define _mm_mask_shufflehi_epi16(W, U, A, imm) __extension__ ({ \
+#define _mm_mask_shufflehi_epi16(W, U, A, imm) \
(__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
(__v8hi)_mm_shufflehi_epi16((A), (imm)), \
- (__v8hi)(__m128i)(W)); })
+ (__v8hi)(__m128i)(W))
-#define _mm_maskz_shufflehi_epi16(U, A, imm) __extension__ ({ \
+#define _mm_maskz_shufflehi_epi16(U, A, imm) \
(__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
(__v8hi)_mm_shufflehi_epi16((A), (imm)), \
- (__v8hi)_mm_setzero_hi()); })
+ (__v8hi)_mm_setzero_si128())
-#define _mm256_mask_shufflehi_epi16(W, U, A, imm) __extension__ ({ \
+#define _mm256_mask_shufflehi_epi16(W, U, A, imm) \
(__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
(__v16hi)_mm256_shufflehi_epi16((A), (imm)), \
- (__v16hi)(__m256i)(W)); })
+ (__v16hi)(__m256i)(W))
-#define _mm256_maskz_shufflehi_epi16(U, A, imm) __extension__ ({ \
+#define _mm256_maskz_shufflehi_epi16(U, A, imm) \
(__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
(__v16hi)_mm256_shufflehi_epi16((A), (imm)), \
- (__v16hi)_mm256_setzero_si256()); })
+ (__v16hi)_mm256_setzero_si256())
-#define _mm_mask_shufflelo_epi16(W, U, A, imm) __extension__ ({ \
+#define _mm_mask_shufflelo_epi16(W, U, A, imm) \
(__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
(__v8hi)_mm_shufflelo_epi16((A), (imm)), \
- (__v8hi)(__m128i)(W)); })
+ (__v8hi)(__m128i)(W))
-#define _mm_maskz_shufflelo_epi16(U, A, imm) __extension__ ({ \
+#define _mm_maskz_shufflelo_epi16(U, A, imm) \
(__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
(__v8hi)_mm_shufflelo_epi16((A), (imm)), \
- (__v8hi)_mm_setzero_hi()); })
+ (__v8hi)_mm_setzero_si128())
-#define _mm256_mask_shufflelo_epi16(W, U, A, imm) __extension__ ({ \
+#define _mm256_mask_shufflelo_epi16(W, U, A, imm) \
(__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
(__v16hi)_mm256_shufflelo_epi16((A), \
(imm)), \
- (__v16hi)(__m256i)(W)); })
+ (__v16hi)(__m256i)(W))
-#define _mm256_maskz_shufflelo_epi16(U, A, imm) __extension__ ({ \
+#define _mm256_maskz_shufflelo_epi16(U, A, imm) \
(__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
(__v16hi)_mm256_shufflelo_epi16((A), \
(imm)), \
- (__v16hi)_mm256_setzero_si256()); })
+ (__v16hi)_mm256_setzero_si256())
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_sllv_epi16(__m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_psllv16hi((__v16hi)__A, (__v16hi)__B);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_sllv_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
@@ -1906,7 +1890,7 @@ _mm256_mask_sllv_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B)
(__v16hi)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_sllv_epi16(__mmask16 __U, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
@@ -1914,13 +1898,13 @@ _mm256_maskz_sllv_epi16(__mmask16 __U, __m256i __A, __m256i __B)
(__v16hi)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_sllv_epi16(__m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_psllv8hi((__v8hi)__A, (__v8hi)__B);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_sllv_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
@@ -1928,7 +1912,7 @@ _mm_mask_sllv_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
(__v8hi)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_sllv_epi16(__mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
@@ -1936,7 +1920,7 @@ _mm_maskz_sllv_epi16(__mmask8 __U, __m128i __A, __m128i __B)
(__v8hi)_mm_setzero_si128());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_sll_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
@@ -1944,7 +1928,7 @@ _mm_mask_sll_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
(__v8hi)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_sll_epi16 (__mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
@@ -1952,7 +1936,7 @@ _mm_maskz_sll_epi16 (__mmask8 __U, __m128i __A, __m128i __B)
(__v8hi)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_sll_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m128i __B)
{
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
@@ -1960,7 +1944,7 @@ _mm256_mask_sll_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m128i __B)
(__v16hi)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_sll_epi16(__mmask16 __U, __m256i __A, __m128i __B)
{
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
@@ -1968,7 +1952,7 @@ _mm256_maskz_sll_epi16(__mmask16 __U, __m256i __A, __m128i __B)
(__v16hi)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_slli_epi16(__m128i __W, __mmask8 __U, __m128i __A, int __B)
{
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
@@ -1976,7 +1960,7 @@ _mm_mask_slli_epi16(__m128i __W, __mmask8 __U, __m128i __A, int __B)
(__v8hi)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_slli_epi16 (__mmask8 __U, __m128i __A, int __B)
{
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
@@ -1984,7 +1968,7 @@ _mm_maskz_slli_epi16 (__mmask8 __U, __m128i __A, int __B)
(__v8hi)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_slli_epi16(__m256i __W, __mmask16 __U, __m256i __A, int __B)
{
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
@@ -1992,7 +1976,7 @@ _mm256_mask_slli_epi16(__m256i __W, __mmask16 __U, __m256i __A, int __B)
(__v16hi)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_slli_epi16(__mmask16 __U, __m256i __A, int __B)
{
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
@@ -2000,13 +1984,13 @@ _mm256_maskz_slli_epi16(__mmask16 __U, __m256i __A, int __B)
(__v16hi)_mm256_setzero_si256());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_srlv_epi16(__m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_psrlv16hi((__v16hi)__A, (__v16hi)__B);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_srlv_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
@@ -2014,7 +1998,7 @@ _mm256_mask_srlv_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B)
(__v16hi)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_srlv_epi16(__mmask16 __U, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
@@ -2022,13 +2006,13 @@ _mm256_maskz_srlv_epi16(__mmask16 __U, __m256i __A, __m256i __B)
(__v16hi)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_srlv_epi16(__m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_psrlv8hi((__v8hi)__A, (__v8hi)__B);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_srlv_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
@@ -2036,7 +2020,7 @@ _mm_mask_srlv_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
(__v8hi)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_srlv_epi16(__mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
@@ -2044,13 +2028,13 @@ _mm_maskz_srlv_epi16(__mmask8 __U, __m128i __A, __m128i __B)
(__v8hi)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_srav_epi16(__m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_psrav16hi((__v16hi)__A, (__v16hi)__B);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_srav_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
@@ -2058,7 +2042,7 @@ _mm256_mask_srav_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B)
(__v16hi)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_srav_epi16(__mmask16 __U, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
@@ -2066,13 +2050,13 @@ _mm256_maskz_srav_epi16(__mmask16 __U, __m256i __A, __m256i __B)
(__v16hi)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_srav_epi16(__m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_psrav8hi((__v8hi)__A, (__v8hi)__B);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_srav_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
@@ -2080,7 +2064,7 @@ _mm_mask_srav_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
(__v8hi)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_srav_epi16(__mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
@@ -2088,7 +2072,7 @@ _mm_maskz_srav_epi16(__mmask8 __U, __m128i __A, __m128i __B)
(__v8hi)_mm_setzero_si128());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_sra_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
@@ -2096,7 +2080,7 @@ _mm_mask_sra_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
(__v8hi)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_sra_epi16(__mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
@@ -2104,7 +2088,7 @@ _mm_maskz_sra_epi16(__mmask8 __U, __m128i __A, __m128i __B)
(__v8hi)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_sra_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m128i __B)
{
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
@@ -2112,7 +2096,7 @@ _mm256_mask_sra_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m128i __B)
(__v16hi)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_sra_epi16(__mmask16 __U, __m256i __A, __m128i __B)
{
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
@@ -2120,7 +2104,7 @@ _mm256_maskz_sra_epi16(__mmask16 __U, __m256i __A, __m128i __B)
(__v16hi)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_srai_epi16(__m128i __W, __mmask8 __U, __m128i __A, int __B)
{
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
@@ -2128,7 +2112,7 @@ _mm_mask_srai_epi16(__m128i __W, __mmask8 __U, __m128i __A, int __B)
(__v8hi)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_srai_epi16(__mmask8 __U, __m128i __A, int __B)
{
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
@@ -2136,7 +2120,7 @@ _mm_maskz_srai_epi16(__mmask8 __U, __m128i __A, int __B)
(__v8hi)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_srai_epi16(__m256i __W, __mmask16 __U, __m256i __A, int __B)
{
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
@@ -2144,7 +2128,7 @@ _mm256_mask_srai_epi16(__m256i __W, __mmask16 __U, __m256i __A, int __B)
(__v16hi)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_srai_epi16(__mmask16 __U, __m256i __A, int __B)
{
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
@@ -2152,7 +2136,7 @@ _mm256_maskz_srai_epi16(__mmask16 __U, __m256i __A, int __B)
(__v16hi)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_srl_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
@@ -2160,7 +2144,7 @@ _mm_mask_srl_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
(__v8hi)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_srl_epi16 (__mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
@@ -2168,7 +2152,7 @@ _mm_maskz_srl_epi16 (__mmask8 __U, __m128i __A, __m128i __B)
(__v8hi)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_srl_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m128i __B)
{
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
@@ -2176,7 +2160,7 @@ _mm256_mask_srl_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m128i __B)
(__v16hi)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_srl_epi16(__mmask16 __U, __m256i __A, __m128i __B)
{
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
@@ -2184,7 +2168,7 @@ _mm256_maskz_srl_epi16(__mmask16 __U, __m256i __A, __m128i __B)
(__v16hi)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_srli_epi16(__m128i __W, __mmask8 __U, __m128i __A, int __B)
{
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
@@ -2192,7 +2176,7 @@ _mm_mask_srli_epi16(__m128i __W, __mmask8 __U, __m128i __A, int __B)
(__v8hi)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_srli_epi16 (__mmask8 __U, __m128i __A, int __B)
{
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
@@ -2200,7 +2184,7 @@ _mm_maskz_srli_epi16 (__mmask8 __U, __m128i __A, int __B)
(__v8hi)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_srli_epi16(__m256i __W, __mmask16 __U, __m256i __A, int __B)
{
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
@@ -2208,7 +2192,7 @@ _mm256_mask_srli_epi16(__m256i __W, __mmask16 __U, __m256i __A, int __B)
(__v16hi)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_srli_epi16(__mmask16 __U, __m256i __A, int __B)
{
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
@@ -2216,7 +2200,7 @@ _mm256_maskz_srli_epi16(__mmask16 __U, __m256i __A, int __B)
(__v16hi)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_mov_epi16 (__m128i __W, __mmask8 __U, __m128i __A)
{
return (__m128i) __builtin_ia32_selectw_128 ((__mmask8) __U,
@@ -2224,15 +2208,15 @@ _mm_mask_mov_epi16 (__m128i __W, __mmask8 __U, __m128i __A)
(__v8hi) __W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_mov_epi16 (__mmask8 __U, __m128i __A)
{
return (__m128i) __builtin_ia32_selectw_128 ((__mmask8) __U,
(__v8hi) __A,
- (__v8hi) _mm_setzero_hi ());
+ (__v8hi) _mm_setzero_si128 ());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_mov_epi16 (__m256i __W, __mmask16 __U, __m256i __A)
{
return (__m256i) __builtin_ia32_selectw_256 ((__mmask16) __U,
@@ -2240,7 +2224,7 @@ _mm256_mask_mov_epi16 (__m256i __W, __mmask16 __U, __m256i __A)
(__v16hi) __W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_mov_epi16 (__mmask16 __U, __m256i __A)
{
return (__m256i) __builtin_ia32_selectw_256 ((__mmask16) __U,
@@ -2248,7 +2232,7 @@ _mm256_maskz_mov_epi16 (__mmask16 __U, __m256i __A)
(__v16hi) _mm256_setzero_si256 ());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_mov_epi8 (__m128i __W, __mmask16 __U, __m128i __A)
{
return (__m128i) __builtin_ia32_selectb_128 ((__mmask16) __U,
@@ -2256,15 +2240,15 @@ _mm_mask_mov_epi8 (__m128i __W, __mmask16 __U, __m128i __A)
(__v16qi) __W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_mov_epi8 (__mmask16 __U, __m128i __A)
{
return (__m128i) __builtin_ia32_selectb_128 ((__mmask16) __U,
(__v16qi) __A,
- (__v16qi) _mm_setzero_hi ());
+ (__v16qi) _mm_setzero_si128 ());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_mov_epi8 (__m256i __W, __mmask32 __U, __m256i __A)
{
return (__m256i) __builtin_ia32_selectb_256 ((__mmask32) __U,
@@ -2272,7 +2256,7 @@ _mm256_mask_mov_epi8 (__m256i __W, __mmask32 __U, __m256i __A)
(__v32qi) __W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_mov_epi8 (__mmask32 __U, __m256i __A)
{
return (__m256i) __builtin_ia32_selectb_256 ((__mmask32) __U,
@@ -2281,7 +2265,7 @@ _mm256_maskz_mov_epi8 (__mmask32 __U, __m256i __A)
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_set1_epi8 (__m128i __O, __mmask16 __M, char __A)
{
return (__m128i) __builtin_ia32_selectb_128(__M,
@@ -2289,7 +2273,7 @@ _mm_mask_set1_epi8 (__m128i __O, __mmask16 __M, char __A)
(__v16qi) __O);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_set1_epi8 (__mmask16 __M, char __A)
{
return (__m128i) __builtin_ia32_selectb_128(__M,
@@ -2297,7 +2281,7 @@ _mm_maskz_set1_epi8 (__mmask16 __M, char __A)
(__v16qi) _mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_set1_epi8 (__m256i __O, __mmask32 __M, char __A)
{
return (__m256i) __builtin_ia32_selectb_256(__M,
@@ -2305,7 +2289,7 @@ _mm256_mask_set1_epi8 (__m256i __O, __mmask32 __M, char __A)
(__v32qi) __O);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_set1_epi8 (__mmask32 __M, char __A)
{
return (__m256i) __builtin_ia32_selectb_256(__M,
@@ -2313,7 +2297,7 @@ _mm256_maskz_set1_epi8 (__mmask32 __M, char __A)
(__v32qi) _mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_loadu_epi16 (__m128i __W, __mmask8 __U, void const *__P)
{
return (__m128i) __builtin_ia32_loaddquhi128_mask ((__v8hi *) __P,
@@ -2321,16 +2305,16 @@ _mm_mask_loadu_epi16 (__m128i __W, __mmask8 __U, void const *__P)
(__mmask8) __U);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_loadu_epi16 (__mmask8 __U, void const *__P)
{
return (__m128i) __builtin_ia32_loaddquhi128_mask ((__v8hi *) __P,
(__v8hi)
- _mm_setzero_hi (),
+ _mm_setzero_si128 (),
(__mmask8) __U);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_loadu_epi16 (__m256i __W, __mmask16 __U, void const *__P)
{
return (__m256i) __builtin_ia32_loaddquhi256_mask ((__v16hi *) __P,
@@ -2338,7 +2322,7 @@ _mm256_mask_loadu_epi16 (__m256i __W, __mmask16 __U, void const *__P)
(__mmask16) __U);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_loadu_epi16 (__mmask16 __U, void const *__P)
{
return (__m256i) __builtin_ia32_loaddquhi256_mask ((__v16hi *) __P,
@@ -2347,7 +2331,7 @@ _mm256_maskz_loadu_epi16 (__mmask16 __U, void const *__P)
(__mmask16) __U);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_loadu_epi8 (__m128i __W, __mmask16 __U, void const *__P)
{
return (__m128i) __builtin_ia32_loaddquqi128_mask ((__v16qi *) __P,
@@ -2355,7 +2339,7 @@ _mm_mask_loadu_epi8 (__m128i __W, __mmask16 __U, void const *__P)
(__mmask16) __U);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_loadu_epi8 (__mmask16 __U, void const *__P)
{
return (__m128i) __builtin_ia32_loaddquqi128_mask ((__v16qi *) __P,
@@ -2364,7 +2348,7 @@ _mm_maskz_loadu_epi8 (__mmask16 __U, void const *__P)
(__mmask16) __U);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_loadu_epi8 (__m256i __W, __mmask32 __U, void const *__P)
{
return (__m256i) __builtin_ia32_loaddquqi256_mask ((__v32qi *) __P,
@@ -2372,7 +2356,7 @@ _mm256_mask_loadu_epi8 (__m256i __W, __mmask32 __U, void const *__P)
(__mmask32) __U);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_loadu_epi8 (__mmask32 __U, void const *__P)
{
return (__m256i) __builtin_ia32_loaddquqi256_mask ((__v32qi *) __P,
@@ -2380,7 +2364,7 @@ _mm256_maskz_loadu_epi8 (__mmask32 __U, void const *__P)
_mm256_setzero_si256 (),
(__mmask32) __U);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS256
_mm_mask_storeu_epi16 (void *__P, __mmask8 __U, __m128i __A)
{
__builtin_ia32_storedquhi128_mask ((__v8hi *) __P,
@@ -2388,7 +2372,7 @@ _mm_mask_storeu_epi16 (void *__P, __mmask8 __U, __m128i __A)
(__mmask8) __U);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS256
_mm256_mask_storeu_epi16 (void *__P, __mmask16 __U, __m256i __A)
{
__builtin_ia32_storedquhi256_mask ((__v16hi *) __P,
@@ -2396,7 +2380,7 @@ _mm256_mask_storeu_epi16 (void *__P, __mmask16 __U, __m256i __A)
(__mmask16) __U);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS128
_mm_mask_storeu_epi8 (void *__P, __mmask16 __U, __m128i __A)
{
__builtin_ia32_storedquqi128_mask ((__v16qi *) __P,
@@ -2404,7 +2388,7 @@ _mm_mask_storeu_epi8 (void *__P, __mmask16 __U, __m128i __A)
(__mmask16) __U);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS256
_mm256_mask_storeu_epi8 (void *__P, __mmask32 __U, __m256i __A)
{
__builtin_ia32_storedquqi256_mask ((__v32qi *) __P,
@@ -2412,162 +2396,162 @@ _mm256_mask_storeu_epi8 (void *__P, __mmask32 __U, __m256i __A)
(__mmask32) __U);
}
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS128
_mm_test_epi8_mask (__m128i __A, __m128i __B)
{
- return _mm_cmpneq_epi8_mask (_mm_and_si128(__A, __B), _mm_setzero_hi());
+ return _mm_cmpneq_epi8_mask (_mm_and_si128(__A, __B), _mm_setzero_si128());
}
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS128
_mm_mask_test_epi8_mask (__mmask16 __U, __m128i __A, __m128i __B)
{
return _mm_mask_cmpneq_epi8_mask (__U, _mm_and_si128 (__A, __B),
- _mm_setzero_hi());
+ _mm_setzero_si128());
}
-static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS256
_mm256_test_epi8_mask (__m256i __A, __m256i __B)
{
return _mm256_cmpneq_epi8_mask (_mm256_and_si256(__A, __B),
_mm256_setzero_si256());
}
-static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS256
_mm256_mask_test_epi8_mask (__mmask32 __U, __m256i __A, __m256i __B)
{
return _mm256_mask_cmpneq_epi8_mask (__U, _mm256_and_si256(__A, __B),
_mm256_setzero_si256());
}
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS128
_mm_test_epi16_mask (__m128i __A, __m128i __B)
{
- return _mm_cmpneq_epi16_mask (_mm_and_si128 (__A, __B), _mm_setzero_hi());
+ return _mm_cmpneq_epi16_mask (_mm_and_si128 (__A, __B), _mm_setzero_si128());
}
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS128
_mm_mask_test_epi16_mask (__mmask8 __U, __m128i __A, __m128i __B)
{
return _mm_mask_cmpneq_epi16_mask (__U, _mm_and_si128 (__A, __B),
- _mm_setzero_hi());
+ _mm_setzero_si128());
}
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS256
_mm256_test_epi16_mask (__m256i __A, __m256i __B)
{
return _mm256_cmpneq_epi16_mask (_mm256_and_si256 (__A, __B),
_mm256_setzero_si256 ());
}
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS256
_mm256_mask_test_epi16_mask (__mmask16 __U, __m256i __A, __m256i __B)
{
return _mm256_mask_cmpneq_epi16_mask (__U, _mm256_and_si256(__A, __B),
_mm256_setzero_si256());
}
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS128
_mm_testn_epi8_mask (__m128i __A, __m128i __B)
{
- return _mm_cmpeq_epi8_mask (_mm_and_si128 (__A, __B), _mm_setzero_hi());
+ return _mm_cmpeq_epi8_mask (_mm_and_si128 (__A, __B), _mm_setzero_si128());
}
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS128
_mm_mask_testn_epi8_mask (__mmask16 __U, __m128i __A, __m128i __B)
{
return _mm_mask_cmpeq_epi8_mask (__U, _mm_and_si128 (__A, __B),
- _mm_setzero_hi());
+ _mm_setzero_si128());
}
-static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS256
_mm256_testn_epi8_mask (__m256i __A, __m256i __B)
{
return _mm256_cmpeq_epi8_mask (_mm256_and_si256 (__A, __B),
_mm256_setzero_si256());
}
-static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS256
_mm256_mask_testn_epi8_mask (__mmask32 __U, __m256i __A, __m256i __B)
{
return _mm256_mask_cmpeq_epi8_mask (__U, _mm256_and_si256 (__A, __B),
_mm256_setzero_si256());
}
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS128
_mm_testn_epi16_mask (__m128i __A, __m128i __B)
{
- return _mm_cmpeq_epi16_mask (_mm_and_si128 (__A, __B), _mm_setzero_hi());
+ return _mm_cmpeq_epi16_mask (_mm_and_si128 (__A, __B), _mm_setzero_si128());
}
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS128
_mm_mask_testn_epi16_mask (__mmask8 __U, __m128i __A, __m128i __B)
{
- return _mm_mask_cmpeq_epi16_mask (__U, _mm_and_si128(__A, __B), _mm_setzero_hi());
+ return _mm_mask_cmpeq_epi16_mask (__U, _mm_and_si128(__A, __B), _mm_setzero_si128());
}
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS256
_mm256_testn_epi16_mask (__m256i __A, __m256i __B)
{
return _mm256_cmpeq_epi16_mask (_mm256_and_si256(__A, __B),
_mm256_setzero_si256());
}
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS256
_mm256_mask_testn_epi16_mask (__mmask16 __U, __m256i __A, __m256i __B)
{
return _mm256_mask_cmpeq_epi16_mask (__U, _mm256_and_si256 (__A, __B),
_mm256_setzero_si256());
}
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS128
_mm_movepi8_mask (__m128i __A)
{
return (__mmask16) __builtin_ia32_cvtb2mask128 ((__v16qi) __A);
}
-static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS256
_mm256_movepi8_mask (__m256i __A)
{
return (__mmask32) __builtin_ia32_cvtb2mask256 ((__v32qi) __A);
}
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS128
_mm_movepi16_mask (__m128i __A)
{
return (__mmask8) __builtin_ia32_cvtw2mask128 ((__v8hi) __A);
}
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS256
_mm256_movepi16_mask (__m256i __A)
{
return (__mmask16) __builtin_ia32_cvtw2mask256 ((__v16hi) __A);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_movm_epi8 (__mmask16 __A)
{
return (__m128i) __builtin_ia32_cvtmask2b128 (__A);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_movm_epi8 (__mmask32 __A)
{
return (__m256i) __builtin_ia32_cvtmask2b256 (__A);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_movm_epi16 (__mmask8 __A)
{
return (__m128i) __builtin_ia32_cvtmask2w128 (__A);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_movm_epi16 (__mmask16 __A)
{
return (__m256i) __builtin_ia32_cvtmask2w256 (__A);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_broadcastb_epi8 (__m128i __O, __mmask16 __M, __m128i __A)
{
return (__m128i)__builtin_ia32_selectb_128(__M,
@@ -2575,7 +2559,7 @@ _mm_mask_broadcastb_epi8 (__m128i __O, __mmask16 __M, __m128i __A)
(__v16qi) __O);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_broadcastb_epi8 (__mmask16 __M, __m128i __A)
{
return (__m128i)__builtin_ia32_selectb_128(__M,
@@ -2583,7 +2567,7 @@ _mm_maskz_broadcastb_epi8 (__mmask16 __M, __m128i __A)
(__v16qi) _mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_broadcastb_epi8 (__m256i __O, __mmask32 __M, __m128i __A)
{
return (__m256i)__builtin_ia32_selectb_256(__M,
@@ -2591,7 +2575,7 @@ _mm256_mask_broadcastb_epi8 (__m256i __O, __mmask32 __M, __m128i __A)
(__v32qi) __O);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_broadcastb_epi8 (__mmask32 __M, __m128i __A)
{
return (__m256i)__builtin_ia32_selectb_256(__M,
@@ -2599,7 +2583,7 @@ _mm256_maskz_broadcastb_epi8 (__mmask32 __M, __m128i __A)
(__v32qi) _mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_broadcastw_epi16 (__m128i __O, __mmask8 __M, __m128i __A)
{
return (__m128i)__builtin_ia32_selectw_128(__M,
@@ -2607,7 +2591,7 @@ _mm_mask_broadcastw_epi16 (__m128i __O, __mmask8 __M, __m128i __A)
(__v8hi) __O);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_broadcastw_epi16 (__mmask8 __M, __m128i __A)
{
return (__m128i)__builtin_ia32_selectw_128(__M,
@@ -2615,7 +2599,7 @@ _mm_maskz_broadcastw_epi16 (__mmask8 __M, __m128i __A)
(__v8hi) _mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_broadcastw_epi16 (__m256i __O, __mmask16 __M, __m128i __A)
{
return (__m256i)__builtin_ia32_selectw_256(__M,
@@ -2623,7 +2607,7 @@ _mm256_mask_broadcastw_epi16 (__m256i __O, __mmask16 __M, __m128i __A)
(__v16hi) __O);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_broadcastw_epi16 (__mmask16 __M, __m128i __A)
{
return (__m256i)__builtin_ia32_selectw_256(__M,
@@ -2631,7 +2615,7 @@ _mm256_maskz_broadcastw_epi16 (__mmask16 __M, __m128i __A)
(__v16hi) _mm256_setzero_si256());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_set1_epi16 (__m256i __O, __mmask16 __M, short __A)
{
return (__m256i) __builtin_ia32_selectw_256 (__M,
@@ -2639,7 +2623,7 @@ _mm256_mask_set1_epi16 (__m256i __O, __mmask16 __M, short __A)
(__v16hi) __O);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_set1_epi16 (__mmask16 __M, short __A)
{
return (__m256i) __builtin_ia32_selectw_256(__M,
@@ -2647,7 +2631,7 @@ _mm256_maskz_set1_epi16 (__mmask16 __M, short __A)
(__v16hi) _mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_set1_epi16 (__m128i __O, __mmask8 __M, short __A)
{
return (__m128i) __builtin_ia32_selectw_128(__M,
@@ -2655,7 +2639,7 @@ _mm_mask_set1_epi16 (__m128i __O, __mmask8 __M, short __A)
(__v8hi) __O);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_set1_epi16 (__mmask8 __M, short __A)
{
return (__m128i) __builtin_ia32_selectw_128(__M,
@@ -2663,119 +2647,102 @@ _mm_maskz_set1_epi16 (__mmask8 __M, short __A)
(__v8hi) _mm_setzero_si128());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_permutexvar_epi16 (__m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_permvarhi128_mask ((__v8hi) __B,
- (__v8hi) __A,
- (__v8hi) _mm_undefined_si128 (),
- (__mmask8) -1);
+ return (__m128i)__builtin_ia32_permvarhi128((__v8hi) __B, (__v8hi) __A);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_permutexvar_epi16 (__mmask8 __M, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_permvarhi128_mask ((__v8hi) __B,
- (__v8hi) __A,
- (__v8hi) _mm_setzero_si128 (),
- (__mmask8) __M);
+ return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M,
+ (__v8hi)_mm_permutexvar_epi16(__A, __B),
+ (__v8hi) _mm_setzero_si128());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_permutexvar_epi16 (__m128i __W, __mmask8 __M, __m128i __A,
__m128i __B)
{
- return (__m128i) __builtin_ia32_permvarhi128_mask ((__v8hi) __B,
- (__v8hi) __A,
- (__v8hi) __W,
- (__mmask8) __M);
+ return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M,
+ (__v8hi)_mm_permutexvar_epi16(__A, __B),
+ (__v8hi)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_permutexvar_epi16 (__m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_permvarhi256_mask ((__v16hi) __B,
- (__v16hi) __A,
- (__v16hi) _mm256_undefined_si256 (),
- (__mmask16) -1);
+ return (__m256i)__builtin_ia32_permvarhi256((__v16hi) __B, (__v16hi) __A);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_permutexvar_epi16 (__mmask16 __M, __m256i __A,
__m256i __B)
{
- return (__m256i) __builtin_ia32_permvarhi256_mask ((__v16hi) __B,
- (__v16hi) __A,
- (__v16hi) _mm256_setzero_si256 (),
- (__mmask16) __M);
+ return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M,
+ (__v16hi)_mm256_permutexvar_epi16(__A, __B),
+ (__v16hi)_mm256_setzero_si256());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_permutexvar_epi16 (__m256i __W, __mmask16 __M, __m256i __A,
__m256i __B)
{
- return (__m256i) __builtin_ia32_permvarhi256_mask ((__v16hi) __B,
- (__v16hi) __A,
- (__v16hi) __W,
- (__mmask16) __M);
+ return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M,
+ (__v16hi)_mm256_permutexvar_epi16(__A, __B),
+ (__v16hi)__W);
}
-#define _mm_mask_alignr_epi8(W, U, A, B, N) __extension__ ({ \
+#define _mm_mask_alignr_epi8(W, U, A, B, N) \
(__m128i)__builtin_ia32_selectb_128((__mmask16)(U), \
(__v16qi)_mm_alignr_epi8((A), (B), (int)(N)), \
- (__v16qi)(__m128i)(W)); })
+ (__v16qi)(__m128i)(W))
-#define _mm_maskz_alignr_epi8(U, A, B, N) __extension__ ({ \
+#define _mm_maskz_alignr_epi8(U, A, B, N) \
(__m128i)__builtin_ia32_selectb_128((__mmask16)(U), \
(__v16qi)_mm_alignr_epi8((A), (B), (int)(N)), \
- (__v16qi)_mm_setzero_si128()); })
+ (__v16qi)_mm_setzero_si128())
-#define _mm256_mask_alignr_epi8(W, U, A, B, N) __extension__ ({ \
+#define _mm256_mask_alignr_epi8(W, U, A, B, N) \
(__m256i)__builtin_ia32_selectb_256((__mmask32)(U), \
(__v32qi)_mm256_alignr_epi8((A), (B), (int)(N)), \
- (__v32qi)(__m256i)(W)); })
+ (__v32qi)(__m256i)(W))
-#define _mm256_maskz_alignr_epi8(U, A, B, N) __extension__ ({ \
+#define _mm256_maskz_alignr_epi8(U, A, B, N) \
(__m256i)__builtin_ia32_selectb_256((__mmask32)(U), \
(__v32qi)_mm256_alignr_epi8((A), (B), (int)(N)), \
- (__v32qi)_mm256_setzero_si256()); })
-
-#define _mm_dbsad_epu8(A, B, imm) __extension__ ({ \
- (__m128i)__builtin_ia32_dbpsadbw128_mask((__v16qi)(__m128i)(A), \
- (__v16qi)(__m128i)(B), (int)(imm), \
- (__v8hi)_mm_setzero_hi(), \
- (__mmask8)-1); })
-
-#define _mm_mask_dbsad_epu8(W, U, A, B, imm) __extension__ ({ \
- (__m128i)__builtin_ia32_dbpsadbw128_mask((__v16qi)(__m128i)(A), \
- (__v16qi)(__m128i)(B), (int)(imm), \
- (__v8hi)(__m128i)(W), \
- (__mmask8)(U)); })
-
-#define _mm_maskz_dbsad_epu8(U, A, B, imm) __extension__ ({ \
- (__m128i)__builtin_ia32_dbpsadbw128_mask((__v16qi)(__m128i)(A), \
- (__v16qi)(__m128i)(B), (int)(imm), \
- (__v8hi)_mm_setzero_si128(), \
- (__mmask8)(U)); })
-
-#define _mm256_dbsad_epu8(A, B, imm) __extension__ ({ \
- (__m256i)__builtin_ia32_dbpsadbw256_mask((__v32qi)(__m256i)(A), \
- (__v32qi)(__m256i)(B), (int)(imm), \
- (__v16hi)_mm256_setzero_si256(), \
- (__mmask16)-1); })
-
-#define _mm256_mask_dbsad_epu8(W, U, A, B, imm) __extension__ ({ \
- (__m256i)__builtin_ia32_dbpsadbw256_mask((__v32qi)(__m256i)(A), \
- (__v32qi)(__m256i)(B), (int)(imm), \
- (__v16hi)(__m256i)(W), \
- (__mmask16)(U)); })
-
-#define _mm256_maskz_dbsad_epu8(U, A, B, imm) __extension__ ({ \
- (__m256i)__builtin_ia32_dbpsadbw256_mask((__v32qi)(__m256i)(A), \
- (__v32qi)(__m256i)(B), (int)(imm), \
- (__v16hi)_mm256_setzero_si256(), \
- (__mmask16)(U)); })
-
-#undef __DEFAULT_FN_ATTRS
+ (__v32qi)_mm256_setzero_si256())
+
+#define _mm_dbsad_epu8(A, B, imm) \
+ (__m128i)__builtin_ia32_dbpsadbw128((__v16qi)(__m128i)(A), \
+ (__v16qi)(__m128i)(B), (int)(imm))
+
+#define _mm_mask_dbsad_epu8(W, U, A, B, imm) \
+ (__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
+ (__v8hi)_mm_dbsad_epu8((A), (B), (imm)), \
+ (__v8hi)(__m128i)(W))
+
+#define _mm_maskz_dbsad_epu8(U, A, B, imm) \
+ (__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
+ (__v8hi)_mm_dbsad_epu8((A), (B), (imm)), \
+ (__v8hi)_mm_setzero_si128())
+
+#define _mm256_dbsad_epu8(A, B, imm) \
+ (__m256i)__builtin_ia32_dbpsadbw256((__v32qi)(__m256i)(A), \
+ (__v32qi)(__m256i)(B), (int)(imm))
+
+#define _mm256_mask_dbsad_epu8(W, U, A, B, imm) \
+ (__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
+ (__v16hi)_mm256_dbsad_epu8((A), (B), (imm)), \
+ (__v16hi)(__m256i)(W))
+
+#define _mm256_maskz_dbsad_epu8(U, A, B, imm) \
+ (__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
+ (__v16hi)_mm256_dbsad_epu8((A), (B), (imm)), \
+ (__v16hi)_mm256_setzero_si256())
+
+#undef __DEFAULT_FN_ATTRS128
+#undef __DEFAULT_FN_ATTRS256
#endif /* __AVX512VLBWINTRIN_H */
diff --git a/lib/Headers/avx512vlcdintrin.h b/lib/Headers/avx512vlcdintrin.h
index 8f1cd25f0b50..127c5b132ac7 100644
--- a/lib/Headers/avx512vlcdintrin.h
+++ b/lib/Headers/avx512vlcdintrin.h
@@ -1,4 +1,4 @@
-/*===---- avx512vlcdintrin.h - AVX512VL and AVX512CD intrinsics ---------------------------===
+/*===---- avx512vlcdintrin.h - AVX512VL and AVX512CD intrinsics ------------===
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
@@ -28,35 +28,36 @@
#define __AVX512VLCDINTRIN_H
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512vl,avx512cd")))
+#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("avx512vl,avx512cd"), __min_vector_width__(128)))
+#define __DEFAULT_FN_ATTRS256 __attribute__((__always_inline__, __nodebug__, __target__("avx512vl,avx512cd"), __min_vector_width__(256)))
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_broadcastmb_epi64 (__mmask8 __A)
{
return (__m128i) _mm_set1_epi64x((long long) __A);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_broadcastmb_epi64 (__mmask8 __A)
{
return (__m256i) _mm256_set1_epi64x((long long)__A);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_broadcastmw_epi32 (__mmask16 __A)
{
return (__m128i) _mm_set1_epi32((int)__A);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_broadcastmw_epi32 (__mmask16 __A)
{
return (__m256i) _mm256_set1_epi32((int)__A);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_conflict_epi64 (__m128i __A)
{
return (__m128i) __builtin_ia32_vpconflictdi_128_mask ((__v2di) __A,
@@ -64,7 +65,7 @@ _mm_conflict_epi64 (__m128i __A)
(__mmask8) -1);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_conflict_epi64 (__m128i __W, __mmask8 __U, __m128i __A)
{
return (__m128i) __builtin_ia32_vpconflictdi_128_mask ((__v2di) __A,
@@ -72,16 +73,16 @@ _mm_mask_conflict_epi64 (__m128i __W, __mmask8 __U, __m128i __A)
(__mmask8) __U);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_conflict_epi64 (__mmask8 __U, __m128i __A)
{
return (__m128i) __builtin_ia32_vpconflictdi_128_mask ((__v2di) __A,
(__v2di)
- _mm_setzero_di (),
+ _mm_setzero_si128 (),
(__mmask8) __U);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_conflict_epi64 (__m256i __A)
{
return (__m256i) __builtin_ia32_vpconflictdi_256_mask ((__v4di) __A,
@@ -89,7 +90,7 @@ _mm256_conflict_epi64 (__m256i __A)
(__mmask8) -1);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_conflict_epi64 (__m256i __W, __mmask8 __U, __m256i __A)
{
return (__m256i) __builtin_ia32_vpconflictdi_256_mask ((__v4di) __A,
@@ -97,7 +98,7 @@ _mm256_mask_conflict_epi64 (__m256i __W, __mmask8 __U, __m256i __A)
(__mmask8) __U);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_conflict_epi64 (__mmask8 __U, __m256i __A)
{
return (__m256i) __builtin_ia32_vpconflictdi_256_mask ((__v4di) __A,
@@ -105,7 +106,7 @@ _mm256_maskz_conflict_epi64 (__mmask8 __U, __m256i __A)
(__mmask8) __U);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_conflict_epi32 (__m128i __A)
{
return (__m128i) __builtin_ia32_vpconflictsi_128_mask ((__v4si) __A,
@@ -113,7 +114,7 @@ _mm_conflict_epi32 (__m128i __A)
(__mmask8) -1);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_conflict_epi32 (__m128i __W, __mmask8 __U, __m128i __A)
{
return (__m128i) __builtin_ia32_vpconflictsi_128_mask ((__v4si) __A,
@@ -121,7 +122,7 @@ _mm_mask_conflict_epi32 (__m128i __W, __mmask8 __U, __m128i __A)
(__mmask8) __U);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_conflict_epi32 (__mmask8 __U, __m128i __A)
{
return (__m128i) __builtin_ia32_vpconflictsi_128_mask ((__v4si) __A,
@@ -129,7 +130,7 @@ _mm_maskz_conflict_epi32 (__mmask8 __U, __m128i __A)
(__mmask8) __U);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_conflict_epi32 (__m256i __A)
{
return (__m256i) __builtin_ia32_vpconflictsi_256_mask ((__v8si) __A,
@@ -137,7 +138,7 @@ _mm256_conflict_epi32 (__m256i __A)
(__mmask8) -1);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_conflict_epi32 (__m256i __W, __mmask8 __U, __m256i __A)
{
return (__m256i) __builtin_ia32_vpconflictsi_256_mask ((__v8si) __A,
@@ -145,7 +146,7 @@ _mm256_mask_conflict_epi32 (__m256i __W, __mmask8 __U, __m256i __A)
(__mmask8) __U);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_conflict_epi32 (__mmask8 __U, __m256i __A)
{
return (__m256i) __builtin_ia32_vpconflictsi_256_mask ((__v8si) __A,
@@ -154,110 +155,95 @@ _mm256_maskz_conflict_epi32 (__mmask8 __U, __m256i __A)
(__mmask8) __U);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_lzcnt_epi32 (__m128i __A)
{
- return (__m128i) __builtin_ia32_vplzcntd_128_mask ((__v4si) __A,
- (__v4si)
- _mm_setzero_si128 (),
- (__mmask8) -1);
+ return (__m128i) __builtin_ia32_vplzcntd_128 ((__v4si) __A);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_lzcnt_epi32 (__m128i __W, __mmask8 __U, __m128i __A)
{
- return (__m128i) __builtin_ia32_vplzcntd_128_mask ((__v4si) __A,
- (__v4si) __W,
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
+ (__v4si)_mm_lzcnt_epi32(__A),
+ (__v4si)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_lzcnt_epi32 (__mmask8 __U, __m128i __A)
{
- return (__m128i) __builtin_ia32_vplzcntd_128_mask ((__v4si) __A,
- (__v4si)
- _mm_setzero_si128 (),
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
+ (__v4si)_mm_lzcnt_epi32(__A),
+ (__v4si)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_lzcnt_epi32 (__m256i __A)
{
- return (__m256i) __builtin_ia32_vplzcntd_256_mask ((__v8si) __A,
- (__v8si)
- _mm256_setzero_si256 (),
- (__mmask8) -1);
+ return (__m256i) __builtin_ia32_vplzcntd_256 ((__v8si) __A);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_lzcnt_epi32 (__m256i __W, __mmask8 __U, __m256i __A)
{
- return (__m256i) __builtin_ia32_vplzcntd_256_mask ((__v8si) __A,
- (__v8si) __W,
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
+ (__v8si)_mm256_lzcnt_epi32(__A),
+ (__v8si)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_lzcnt_epi32 (__mmask8 __U, __m256i __A)
{
- return (__m256i) __builtin_ia32_vplzcntd_256_mask ((__v8si) __A,
- (__v8si)
- _mm256_setzero_si256 (),
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
+ (__v8si)_mm256_lzcnt_epi32(__A),
+ (__v8si)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_lzcnt_epi64 (__m128i __A)
{
- return (__m128i) __builtin_ia32_vplzcntq_128_mask ((__v2di) __A,
- (__v2di)
- _mm_setzero_di (),
- (__mmask8) -1);
+ return (__m128i) __builtin_ia32_vplzcntq_128 ((__v2di) __A);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_lzcnt_epi64 (__m128i __W, __mmask8 __U, __m128i __A)
{
- return (__m128i) __builtin_ia32_vplzcntq_128_mask ((__v2di) __A,
- (__v2di) __W,
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
+ (__v2di)_mm_lzcnt_epi64(__A),
+ (__v2di)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_lzcnt_epi64 (__mmask8 __U, __m128i __A)
{
- return (__m128i) __builtin_ia32_vplzcntq_128_mask ((__v2di) __A,
- (__v2di)
- _mm_setzero_di (),
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
+ (__v2di)_mm_lzcnt_epi64(__A),
+ (__v2di)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_lzcnt_epi64 (__m256i __A)
{
- return (__m256i) __builtin_ia32_vplzcntq_256_mask ((__v4di) __A,
- (__v4di)
- _mm256_setzero_si256 (),
- (__mmask8) -1);
+ return (__m256i) __builtin_ia32_vplzcntq_256 ((__v4di) __A);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_lzcnt_epi64 (__m256i __W, __mmask8 __U, __m256i __A)
{
- return (__m256i) __builtin_ia32_vplzcntq_256_mask ((__v4di) __A,
- (__v4di) __W,
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
+ (__v4di)_mm256_lzcnt_epi64(__A),
+ (__v4di)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_lzcnt_epi64 (__mmask8 __U, __m256i __A)
{
- return (__m256i) __builtin_ia32_vplzcntq_256_mask ((__v4di) __A,
- (__v4di)
- _mm256_setzero_si256 (),
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
+ (__v4di)_mm256_lzcnt_epi64(__A),
+ (__v4di)_mm256_setzero_si256());
}
-#undef __DEFAULT_FN_ATTRS
+#undef __DEFAULT_FN_ATTRS128
+#undef __DEFAULT_FN_ATTRS256
#endif /* __AVX512VLCDINTRIN_H */
diff --git a/lib/Headers/avx512vldqintrin.h b/lib/Headers/avx512vldqintrin.h
index d80df9eaffea..9d13846e8964 100644
--- a/lib/Headers/avx512vldqintrin.h
+++ b/lib/Headers/avx512vldqintrin.h
@@ -29,961 +29,953 @@
#define __AVX512VLDQINTRIN_H
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512vl,avx512dq")))
+#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("avx512vl,avx512dq"), __min_vector_width__(128)))
+#define __DEFAULT_FN_ATTRS256 __attribute__((__always_inline__, __nodebug__, __target__("avx512vl,avx512dq"), __min_vector_width__(256)))
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mullo_epi64 (__m256i __A, __m256i __B) {
return (__m256i) ((__v4du) __A * (__v4du) __B);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_mullo_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) {
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
(__v4di)_mm256_mullo_epi64(__A, __B),
(__v4di)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_mullo_epi64(__mmask8 __U, __m256i __A, __m256i __B) {
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
(__v4di)_mm256_mullo_epi64(__A, __B),
(__v4di)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mullo_epi64 (__m128i __A, __m128i __B) {
return (__m128i) ((__v2du) __A * (__v2du) __B);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_mullo_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
(__v2di)_mm_mullo_epi64(__A, __B),
(__v2di)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_mullo_epi64(__mmask8 __U, __m128i __A, __m128i __B) {
return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
(__v2di)_mm_mullo_epi64(__A, __B),
(__v2di)_mm_setzero_si128());
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_mask_andnot_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
(__v4df)_mm256_andnot_pd(__A, __B),
(__v4df)__W);
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_maskz_andnot_pd(__mmask8 __U, __m256d __A, __m256d __B) {
return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
(__v4df)_mm256_andnot_pd(__A, __B),
(__v4df)_mm256_setzero_pd());
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_andnot_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
(__v2df)_mm_andnot_pd(__A, __B),
(__v2df)__W);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_maskz_andnot_pd(__mmask8 __U, __m128d __A, __m128d __B) {
return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
(__v2df)_mm_andnot_pd(__A, __B),
(__v2df)_mm_setzero_pd());
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_mask_andnot_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
(__v8sf)_mm256_andnot_ps(__A, __B),
(__v8sf)__W);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_maskz_andnot_ps(__mmask8 __U, __m256 __A, __m256 __B) {
return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
(__v8sf)_mm256_andnot_ps(__A, __B),
(__v8sf)_mm256_setzero_ps());
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_andnot_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
(__v4sf)_mm_andnot_ps(__A, __B),
(__v4sf)__W);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_maskz_andnot_ps(__mmask8 __U, __m128 __A, __m128 __B) {
return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
(__v4sf)_mm_andnot_ps(__A, __B),
(__v4sf)_mm_setzero_ps());
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_mask_and_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
(__v4df)_mm256_and_pd(__A, __B),
(__v4df)__W);
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_maskz_and_pd(__mmask8 __U, __m256d __A, __m256d __B) {
return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
(__v4df)_mm256_and_pd(__A, __B),
(__v4df)_mm256_setzero_pd());
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_and_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
(__v2df)_mm_and_pd(__A, __B),
(__v2df)__W);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_maskz_and_pd(__mmask8 __U, __m128d __A, __m128d __B) {
return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
(__v2df)_mm_and_pd(__A, __B),
(__v2df)_mm_setzero_pd());
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_mask_and_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
(__v8sf)_mm256_and_ps(__A, __B),
(__v8sf)__W);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_maskz_and_ps(__mmask8 __U, __m256 __A, __m256 __B) {
return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
(__v8sf)_mm256_and_ps(__A, __B),
(__v8sf)_mm256_setzero_ps());
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_and_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
(__v4sf)_mm_and_ps(__A, __B),
(__v4sf)__W);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_maskz_and_ps(__mmask8 __U, __m128 __A, __m128 __B) {
return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
(__v4sf)_mm_and_ps(__A, __B),
(__v4sf)_mm_setzero_ps());
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_mask_xor_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
(__v4df)_mm256_xor_pd(__A, __B),
(__v4df)__W);
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_maskz_xor_pd(__mmask8 __U, __m256d __A, __m256d __B) {
return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
(__v4df)_mm256_xor_pd(__A, __B),
(__v4df)_mm256_setzero_pd());
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_xor_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
(__v2df)_mm_xor_pd(__A, __B),
(__v2df)__W);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_maskz_xor_pd (__mmask8 __U, __m128d __A, __m128d __B) {
return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
(__v2df)_mm_xor_pd(__A, __B),
(__v2df)_mm_setzero_pd());
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_mask_xor_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
(__v8sf)_mm256_xor_ps(__A, __B),
(__v8sf)__W);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_maskz_xor_ps(__mmask8 __U, __m256 __A, __m256 __B) {
return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
(__v8sf)_mm256_xor_ps(__A, __B),
(__v8sf)_mm256_setzero_ps());
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_xor_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
(__v4sf)_mm_xor_ps(__A, __B),
(__v4sf)__W);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_maskz_xor_ps(__mmask8 __U, __m128 __A, __m128 __B) {
return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
(__v4sf)_mm_xor_ps(__A, __B),
(__v4sf)_mm_setzero_ps());
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_mask_or_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
(__v4df)_mm256_or_pd(__A, __B),
(__v4df)__W);
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_maskz_or_pd(__mmask8 __U, __m256d __A, __m256d __B) {
return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
(__v4df)_mm256_or_pd(__A, __B),
(__v4df)_mm256_setzero_pd());
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_or_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
(__v2df)_mm_or_pd(__A, __B),
(__v2df)__W);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_maskz_or_pd(__mmask8 __U, __m128d __A, __m128d __B) {
return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
(__v2df)_mm_or_pd(__A, __B),
(__v2df)_mm_setzero_pd());
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_mask_or_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
(__v8sf)_mm256_or_ps(__A, __B),
(__v8sf)__W);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_maskz_or_ps(__mmask8 __U, __m256 __A, __m256 __B) {
return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
(__v8sf)_mm256_or_ps(__A, __B),
(__v8sf)_mm256_setzero_ps());
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_or_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
(__v4sf)_mm_or_ps(__A, __B),
(__v4sf)__W);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_maskz_or_ps(__mmask8 __U, __m128 __A, __m128 __B) {
return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
(__v4sf)_mm_or_ps(__A, __B),
(__v4sf)_mm_setzero_ps());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_cvtpd_epi64 (__m128d __A) {
return (__m128i) __builtin_ia32_cvtpd2qq128_mask ((__v2df) __A,
(__v2di) _mm_setzero_si128(),
(__mmask8) -1);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_cvtpd_epi64 (__m128i __W, __mmask8 __U, __m128d __A) {
return (__m128i) __builtin_ia32_cvtpd2qq128_mask ((__v2df) __A,
(__v2di) __W,
(__mmask8) __U);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_cvtpd_epi64 (__mmask8 __U, __m128d __A) {
return (__m128i) __builtin_ia32_cvtpd2qq128_mask ((__v2df) __A,
(__v2di) _mm_setzero_si128(),
(__mmask8) __U);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cvtpd_epi64 (__m256d __A) {
return (__m256i) __builtin_ia32_cvtpd2qq256_mask ((__v4df) __A,
(__v4di) _mm256_setzero_si256(),
(__mmask8) -1);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_cvtpd_epi64 (__m256i __W, __mmask8 __U, __m256d __A) {
return (__m256i) __builtin_ia32_cvtpd2qq256_mask ((__v4df) __A,
(__v4di) __W,
(__mmask8) __U);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_cvtpd_epi64 (__mmask8 __U, __m256d __A) {
return (__m256i) __builtin_ia32_cvtpd2qq256_mask ((__v4df) __A,
(__v4di) _mm256_setzero_si256(),
(__mmask8) __U);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_cvtpd_epu64 (__m128d __A) {
return (__m128i) __builtin_ia32_cvtpd2uqq128_mask ((__v2df) __A,
(__v2di) _mm_setzero_si128(),
(__mmask8) -1);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_cvtpd_epu64 (__m128i __W, __mmask8 __U, __m128d __A) {
return (__m128i) __builtin_ia32_cvtpd2uqq128_mask ((__v2df) __A,
(__v2di) __W,
(__mmask8) __U);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_cvtpd_epu64 (__mmask8 __U, __m128d __A) {
return (__m128i) __builtin_ia32_cvtpd2uqq128_mask ((__v2df) __A,
(__v2di) _mm_setzero_si128(),
(__mmask8) __U);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cvtpd_epu64 (__m256d __A) {
return (__m256i) __builtin_ia32_cvtpd2uqq256_mask ((__v4df) __A,
(__v4di) _mm256_setzero_si256(),
(__mmask8) -1);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_cvtpd_epu64 (__m256i __W, __mmask8 __U, __m256d __A) {
return (__m256i) __builtin_ia32_cvtpd2uqq256_mask ((__v4df) __A,
(__v4di) __W,
(__mmask8) __U);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_cvtpd_epu64 (__mmask8 __U, __m256d __A) {
return (__m256i) __builtin_ia32_cvtpd2uqq256_mask ((__v4df) __A,
(__v4di) _mm256_setzero_si256(),
(__mmask8) __U);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_cvtps_epi64 (__m128 __A) {
return (__m128i) __builtin_ia32_cvtps2qq128_mask ((__v4sf) __A,
(__v2di) _mm_setzero_si128(),
(__mmask8) -1);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_cvtps_epi64 (__m128i __W, __mmask8 __U, __m128 __A) {
return (__m128i) __builtin_ia32_cvtps2qq128_mask ((__v4sf) __A,
(__v2di) __W,
(__mmask8) __U);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_cvtps_epi64 (__mmask8 __U, __m128 __A) {
return (__m128i) __builtin_ia32_cvtps2qq128_mask ((__v4sf) __A,
(__v2di) _mm_setzero_si128(),
(__mmask8) __U);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cvtps_epi64 (__m128 __A) {
return (__m256i) __builtin_ia32_cvtps2qq256_mask ((__v4sf) __A,
(__v4di) _mm256_setzero_si256(),
(__mmask8) -1);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_cvtps_epi64 (__m256i __W, __mmask8 __U, __m128 __A) {
return (__m256i) __builtin_ia32_cvtps2qq256_mask ((__v4sf) __A,
(__v4di) __W,
(__mmask8) __U);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_cvtps_epi64 (__mmask8 __U, __m128 __A) {
return (__m256i) __builtin_ia32_cvtps2qq256_mask ((__v4sf) __A,
(__v4di) _mm256_setzero_si256(),
(__mmask8) __U);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_cvtps_epu64 (__m128 __A) {
return (__m128i) __builtin_ia32_cvtps2uqq128_mask ((__v4sf) __A,
(__v2di) _mm_setzero_si128(),
(__mmask8) -1);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_cvtps_epu64 (__m128i __W, __mmask8 __U, __m128 __A) {
return (__m128i) __builtin_ia32_cvtps2uqq128_mask ((__v4sf) __A,
(__v2di) __W,
(__mmask8) __U);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_cvtps_epu64 (__mmask8 __U, __m128 __A) {
return (__m128i) __builtin_ia32_cvtps2uqq128_mask ((__v4sf) __A,
(__v2di) _mm_setzero_si128(),
(__mmask8) __U);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cvtps_epu64 (__m128 __A) {
return (__m256i) __builtin_ia32_cvtps2uqq256_mask ((__v4sf) __A,
(__v4di) _mm256_setzero_si256(),
(__mmask8) -1);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_cvtps_epu64 (__m256i __W, __mmask8 __U, __m128 __A) {
return (__m256i) __builtin_ia32_cvtps2uqq256_mask ((__v4sf) __A,
(__v4di) __W,
(__mmask8) __U);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_cvtps_epu64 (__mmask8 __U, __m128 __A) {
return (__m256i) __builtin_ia32_cvtps2uqq256_mask ((__v4sf) __A,
(__v4di) _mm256_setzero_si256(),
(__mmask8) __U);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_cvtepi64_pd (__m128i __A) {
- return (__m128d) __builtin_ia32_cvtqq2pd128_mask ((__v2di) __A,
- (__v2df) _mm_setzero_pd(),
- (__mmask8) -1);
+ return (__m128d)__builtin_convertvector((__v2di)__A, __v2df);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_cvtepi64_pd (__m128d __W, __mmask8 __U, __m128i __A) {
- return (__m128d) __builtin_ia32_cvtqq2pd128_mask ((__v2di) __A,
- (__v2df) __W,
- (__mmask8) __U);
+ return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
+ (__v2df)_mm_cvtepi64_pd(__A),
+ (__v2df)__W);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_maskz_cvtepi64_pd (__mmask8 __U, __m128i __A) {
- return (__m128d) __builtin_ia32_cvtqq2pd128_mask ((__v2di) __A,
- (__v2df) _mm_setzero_pd(),
- (__mmask8) __U);
+ return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
+ (__v2df)_mm_cvtepi64_pd(__A),
+ (__v2df)_mm_setzero_pd());
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_cvtepi64_pd (__m256i __A) {
- return (__m256d) __builtin_ia32_cvtqq2pd256_mask ((__v4di) __A,
- (__v4df) _mm256_setzero_pd(),
- (__mmask8) -1);
+ return (__m256d)__builtin_convertvector((__v4di)__A, __v4df);
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_mask_cvtepi64_pd (__m256d __W, __mmask8 __U, __m256i __A) {
- return (__m256d) __builtin_ia32_cvtqq2pd256_mask ((__v4di) __A,
- (__v4df) __W,
- (__mmask8) __U);
+ return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
+ (__v4df)_mm256_cvtepi64_pd(__A),
+ (__v4df)__W);
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_maskz_cvtepi64_pd (__mmask8 __U, __m256i __A) {
- return (__m256d) __builtin_ia32_cvtqq2pd256_mask ((__v4di) __A,
- (__v4df) _mm256_setzero_pd(),
- (__mmask8) __U);
+ return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
+ (__v4df)_mm256_cvtepi64_pd(__A),
+ (__v4df)_mm256_setzero_pd());
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_cvtepi64_ps (__m128i __A) {
return (__m128) __builtin_ia32_cvtqq2ps128_mask ((__v2di) __A,
(__v4sf) _mm_setzero_ps(),
(__mmask8) -1);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_cvtepi64_ps (__m128 __W, __mmask8 __U, __m128i __A) {
return (__m128) __builtin_ia32_cvtqq2ps128_mask ((__v2di) __A,
(__v4sf) __W,
(__mmask8) __U);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_maskz_cvtepi64_ps (__mmask8 __U, __m128i __A) {
return (__m128) __builtin_ia32_cvtqq2ps128_mask ((__v2di) __A,
(__v4sf) _mm_setzero_ps(),
(__mmask8) __U);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS256
_mm256_cvtepi64_ps (__m256i __A) {
return (__m128) __builtin_ia32_cvtqq2ps256_mask ((__v4di) __A,
(__v4sf) _mm_setzero_ps(),
(__mmask8) -1);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS256
_mm256_mask_cvtepi64_ps (__m128 __W, __mmask8 __U, __m256i __A) {
return (__m128) __builtin_ia32_cvtqq2ps256_mask ((__v4di) __A,
(__v4sf) __W,
(__mmask8) __U);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS256
_mm256_maskz_cvtepi64_ps (__mmask8 __U, __m256i __A) {
return (__m128) __builtin_ia32_cvtqq2ps256_mask ((__v4di) __A,
(__v4sf) _mm_setzero_ps(),
(__mmask8) __U);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_cvttpd_epi64 (__m128d __A) {
return (__m128i) __builtin_ia32_cvttpd2qq128_mask ((__v2df) __A,
(__v2di) _mm_setzero_si128(),
(__mmask8) -1);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_cvttpd_epi64 (__m128i __W, __mmask8 __U, __m128d __A) {
return (__m128i) __builtin_ia32_cvttpd2qq128_mask ((__v2df) __A,
(__v2di) __W,
(__mmask8) __U);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_cvttpd_epi64 (__mmask8 __U, __m128d __A) {
return (__m128i) __builtin_ia32_cvttpd2qq128_mask ((__v2df) __A,
(__v2di) _mm_setzero_si128(),
(__mmask8) __U);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cvttpd_epi64 (__m256d __A) {
return (__m256i) __builtin_ia32_cvttpd2qq256_mask ((__v4df) __A,
(__v4di) _mm256_setzero_si256(),
(__mmask8) -1);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_cvttpd_epi64 (__m256i __W, __mmask8 __U, __m256d __A) {
return (__m256i) __builtin_ia32_cvttpd2qq256_mask ((__v4df) __A,
(__v4di) __W,
(__mmask8) __U);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_cvttpd_epi64 (__mmask8 __U, __m256d __A) {
return (__m256i) __builtin_ia32_cvttpd2qq256_mask ((__v4df) __A,
(__v4di) _mm256_setzero_si256(),
(__mmask8) __U);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_cvttpd_epu64 (__m128d __A) {
return (__m128i) __builtin_ia32_cvttpd2uqq128_mask ((__v2df) __A,
(__v2di) _mm_setzero_si128(),
(__mmask8) -1);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_cvttpd_epu64 (__m128i __W, __mmask8 __U, __m128d __A) {
return (__m128i) __builtin_ia32_cvttpd2uqq128_mask ((__v2df) __A,
(__v2di) __W,
(__mmask8) __U);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_cvttpd_epu64 (__mmask8 __U, __m128d __A) {
return (__m128i) __builtin_ia32_cvttpd2uqq128_mask ((__v2df) __A,
(__v2di) _mm_setzero_si128(),
(__mmask8) __U);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cvttpd_epu64 (__m256d __A) {
return (__m256i) __builtin_ia32_cvttpd2uqq256_mask ((__v4df) __A,
(__v4di) _mm256_setzero_si256(),
(__mmask8) -1);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_cvttpd_epu64 (__m256i __W, __mmask8 __U, __m256d __A) {
return (__m256i) __builtin_ia32_cvttpd2uqq256_mask ((__v4df) __A,
(__v4di) __W,
(__mmask8) __U);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_cvttpd_epu64 (__mmask8 __U, __m256d __A) {
return (__m256i) __builtin_ia32_cvttpd2uqq256_mask ((__v4df) __A,
(__v4di) _mm256_setzero_si256(),
(__mmask8) __U);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_cvttps_epi64 (__m128 __A) {
return (__m128i) __builtin_ia32_cvttps2qq128_mask ((__v4sf) __A,
(__v2di) _mm_setzero_si128(),
(__mmask8) -1);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_cvttps_epi64 (__m128i __W, __mmask8 __U, __m128 __A) {
return (__m128i) __builtin_ia32_cvttps2qq128_mask ((__v4sf) __A,
(__v2di) __W,
(__mmask8) __U);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_cvttps_epi64 (__mmask8 __U, __m128 __A) {
return (__m128i) __builtin_ia32_cvttps2qq128_mask ((__v4sf) __A,
(__v2di) _mm_setzero_si128(),
(__mmask8) __U);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cvttps_epi64 (__m128 __A) {
return (__m256i) __builtin_ia32_cvttps2qq256_mask ((__v4sf) __A,
(__v4di) _mm256_setzero_si256(),
(__mmask8) -1);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_cvttps_epi64 (__m256i __W, __mmask8 __U, __m128 __A) {
return (__m256i) __builtin_ia32_cvttps2qq256_mask ((__v4sf) __A,
(__v4di) __W,
(__mmask8) __U);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_cvttps_epi64 (__mmask8 __U, __m128 __A) {
return (__m256i) __builtin_ia32_cvttps2qq256_mask ((__v4sf) __A,
(__v4di) _mm256_setzero_si256(),
(__mmask8) __U);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_cvttps_epu64 (__m128 __A) {
return (__m128i) __builtin_ia32_cvttps2uqq128_mask ((__v4sf) __A,
(__v2di) _mm_setzero_si128(),
(__mmask8) -1);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_cvttps_epu64 (__m128i __W, __mmask8 __U, __m128 __A) {
return (__m128i) __builtin_ia32_cvttps2uqq128_mask ((__v4sf) __A,
(__v2di) __W,
(__mmask8) __U);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_cvttps_epu64 (__mmask8 __U, __m128 __A) {
return (__m128i) __builtin_ia32_cvttps2uqq128_mask ((__v4sf) __A,
(__v2di) _mm_setzero_si128(),
(__mmask8) __U);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cvttps_epu64 (__m128 __A) {
return (__m256i) __builtin_ia32_cvttps2uqq256_mask ((__v4sf) __A,
(__v4di) _mm256_setzero_si256(),
(__mmask8) -1);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_cvttps_epu64 (__m256i __W, __mmask8 __U, __m128 __A) {
return (__m256i) __builtin_ia32_cvttps2uqq256_mask ((__v4sf) __A,
(__v4di) __W,
(__mmask8) __U);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_cvttps_epu64 (__mmask8 __U, __m128 __A) {
return (__m256i) __builtin_ia32_cvttps2uqq256_mask ((__v4sf) __A,
(__v4di) _mm256_setzero_si256(),
(__mmask8) __U);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_cvtepu64_pd (__m128i __A) {
- return (__m128d) __builtin_ia32_cvtuqq2pd128_mask ((__v2di) __A,
- (__v2df) _mm_setzero_pd(),
- (__mmask8) -1);
+ return (__m128d)__builtin_convertvector((__v2du)__A, __v2df);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_cvtepu64_pd (__m128d __W, __mmask8 __U, __m128i __A) {
- return (__m128d) __builtin_ia32_cvtuqq2pd128_mask ((__v2di) __A,
- (__v2df) __W,
- (__mmask8) __U);
+ return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
+ (__v2df)_mm_cvtepu64_pd(__A),
+ (__v2df)__W);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_maskz_cvtepu64_pd (__mmask8 __U, __m128i __A) {
- return (__m128d) __builtin_ia32_cvtuqq2pd128_mask ((__v2di) __A,
- (__v2df) _mm_setzero_pd(),
- (__mmask8) __U);
+ return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
+ (__v2df)_mm_cvtepu64_pd(__A),
+ (__v2df)_mm_setzero_pd());
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_cvtepu64_pd (__m256i __A) {
- return (__m256d) __builtin_ia32_cvtuqq2pd256_mask ((__v4di) __A,
- (__v4df) _mm256_setzero_pd(),
- (__mmask8) -1);
+ return (__m256d)__builtin_convertvector((__v4du)__A, __v4df);
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_mask_cvtepu64_pd (__m256d __W, __mmask8 __U, __m256i __A) {
- return (__m256d) __builtin_ia32_cvtuqq2pd256_mask ((__v4di) __A,
- (__v4df) __W,
- (__mmask8) __U);
+ return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
+ (__v4df)_mm256_cvtepu64_pd(__A),
+ (__v4df)__W);
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_maskz_cvtepu64_pd (__mmask8 __U, __m256i __A) {
- return (__m256d) __builtin_ia32_cvtuqq2pd256_mask ((__v4di) __A,
- (__v4df) _mm256_setzero_pd(),
- (__mmask8) __U);
+ return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
+ (__v4df)_mm256_cvtepu64_pd(__A),
+ (__v4df)_mm256_setzero_pd());
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_cvtepu64_ps (__m128i __A) {
return (__m128) __builtin_ia32_cvtuqq2ps128_mask ((__v2di) __A,
(__v4sf) _mm_setzero_ps(),
(__mmask8) -1);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_cvtepu64_ps (__m128 __W, __mmask8 __U, __m128i __A) {
return (__m128) __builtin_ia32_cvtuqq2ps128_mask ((__v2di) __A,
(__v4sf) __W,
(__mmask8) __U);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_maskz_cvtepu64_ps (__mmask8 __U, __m128i __A) {
return (__m128) __builtin_ia32_cvtuqq2ps128_mask ((__v2di) __A,
(__v4sf) _mm_setzero_ps(),
(__mmask8) __U);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS256
_mm256_cvtepu64_ps (__m256i __A) {
return (__m128) __builtin_ia32_cvtuqq2ps256_mask ((__v4di) __A,
(__v4sf) _mm_setzero_ps(),
(__mmask8) -1);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS256
_mm256_mask_cvtepu64_ps (__m128 __W, __mmask8 __U, __m256i __A) {
return (__m128) __builtin_ia32_cvtuqq2ps256_mask ((__v4di) __A,
(__v4sf) __W,
(__mmask8) __U);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS256
_mm256_maskz_cvtepu64_ps (__mmask8 __U, __m256i __A) {
return (__m128) __builtin_ia32_cvtuqq2ps256_mask ((__v4di) __A,
(__v4sf) _mm_setzero_ps(),
(__mmask8) __U);
}
-#define _mm_range_pd(A, B, C) __extension__ ({ \
+#define _mm_range_pd(A, B, C) \
(__m128d)__builtin_ia32_rangepd128_mask((__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), (int)(C), \
(__v2df)_mm_setzero_pd(), \
- (__mmask8)-1); })
+ (__mmask8)-1)
-#define _mm_mask_range_pd(W, U, A, B, C) __extension__ ({ \
+#define _mm_mask_range_pd(W, U, A, B, C) \
(__m128d)__builtin_ia32_rangepd128_mask((__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), (int)(C), \
(__v2df)(__m128d)(W), \
- (__mmask8)(U)); })
+ (__mmask8)(U))
-#define _mm_maskz_range_pd(U, A, B, C) __extension__ ({ \
+#define _mm_maskz_range_pd(U, A, B, C) \
(__m128d)__builtin_ia32_rangepd128_mask((__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), (int)(C), \
(__v2df)_mm_setzero_pd(), \
- (__mmask8)(U)); })
+ (__mmask8)(U))
-#define _mm256_range_pd(A, B, C) __extension__ ({ \
+#define _mm256_range_pd(A, B, C) \
(__m256d)__builtin_ia32_rangepd256_mask((__v4df)(__m256d)(A), \
(__v4df)(__m256d)(B), (int)(C), \
(__v4df)_mm256_setzero_pd(), \
- (__mmask8)-1); })
+ (__mmask8)-1)
-#define _mm256_mask_range_pd(W, U, A, B, C) __extension__ ({ \
+#define _mm256_mask_range_pd(W, U, A, B, C) \
(__m256d)__builtin_ia32_rangepd256_mask((__v4df)(__m256d)(A), \
(__v4df)(__m256d)(B), (int)(C), \
(__v4df)(__m256d)(W), \
- (__mmask8)(U)); })
+ (__mmask8)(U))
-#define _mm256_maskz_range_pd(U, A, B, C) __extension__ ({ \
+#define _mm256_maskz_range_pd(U, A, B, C) \
(__m256d)__builtin_ia32_rangepd256_mask((__v4df)(__m256d)(A), \
(__v4df)(__m256d)(B), (int)(C), \
(__v4df)_mm256_setzero_pd(), \
- (__mmask8)(U)); })
+ (__mmask8)(U))
-#define _mm_range_ps(A, B, C) __extension__ ({ \
+#define _mm_range_ps(A, B, C) \
(__m128)__builtin_ia32_rangeps128_mask((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), (int)(C), \
(__v4sf)_mm_setzero_ps(), \
- (__mmask8)-1); })
+ (__mmask8)-1)
-#define _mm_mask_range_ps(W, U, A, B, C) __extension__ ({ \
+#define _mm_mask_range_ps(W, U, A, B, C) \
(__m128)__builtin_ia32_rangeps128_mask((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), (int)(C), \
- (__v4sf)(__m128)(W), (__mmask8)(U)); })
+ (__v4sf)(__m128)(W), (__mmask8)(U))
-#define _mm_maskz_range_ps(U, A, B, C) __extension__ ({ \
+#define _mm_maskz_range_ps(U, A, B, C) \
(__m128)__builtin_ia32_rangeps128_mask((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), (int)(C), \
(__v4sf)_mm_setzero_ps(), \
- (__mmask8)(U)); })
+ (__mmask8)(U))
-#define _mm256_range_ps(A, B, C) __extension__ ({ \
+#define _mm256_range_ps(A, B, C) \
(__m256)__builtin_ia32_rangeps256_mask((__v8sf)(__m256)(A), \
(__v8sf)(__m256)(B), (int)(C), \
(__v8sf)_mm256_setzero_ps(), \
- (__mmask8)-1); })
+ (__mmask8)-1)
-#define _mm256_mask_range_ps(W, U, A, B, C) __extension__ ({ \
+#define _mm256_mask_range_ps(W, U, A, B, C) \
(__m256)__builtin_ia32_rangeps256_mask((__v8sf)(__m256)(A), \
(__v8sf)(__m256)(B), (int)(C), \
- (__v8sf)(__m256)(W), (__mmask8)(U)); })
+ (__v8sf)(__m256)(W), (__mmask8)(U))
-#define _mm256_maskz_range_ps(U, A, B, C) __extension__ ({ \
+#define _mm256_maskz_range_ps(U, A, B, C) \
(__m256)__builtin_ia32_rangeps256_mask((__v8sf)(__m256)(A), \
(__v8sf)(__m256)(B), (int)(C), \
(__v8sf)_mm256_setzero_ps(), \
- (__mmask8)(U)); })
+ (__mmask8)(U))
-#define _mm_reduce_pd(A, B) __extension__ ({ \
+#define _mm_reduce_pd(A, B) \
(__m128d)__builtin_ia32_reducepd128_mask((__v2df)(__m128d)(A), (int)(B), \
(__v2df)_mm_setzero_pd(), \
- (__mmask8)-1); })
+ (__mmask8)-1)
-#define _mm_mask_reduce_pd(W, U, A, B) __extension__ ({ \
+#define _mm_mask_reduce_pd(W, U, A, B) \
(__m128d)__builtin_ia32_reducepd128_mask((__v2df)(__m128d)(A), (int)(B), \
(__v2df)(__m128d)(W), \
- (__mmask8)(U)); })
+ (__mmask8)(U))
-#define _mm_maskz_reduce_pd(U, A, B) __extension__ ({ \
+#define _mm_maskz_reduce_pd(U, A, B) \
(__m128d)__builtin_ia32_reducepd128_mask((__v2df)(__m128d)(A), (int)(B), \
(__v2df)_mm_setzero_pd(), \
- (__mmask8)(U)); })
+ (__mmask8)(U))
-#define _mm256_reduce_pd(A, B) __extension__ ({ \
+#define _mm256_reduce_pd(A, B) \
(__m256d)__builtin_ia32_reducepd256_mask((__v4df)(__m256d)(A), (int)(B), \
(__v4df)_mm256_setzero_pd(), \
- (__mmask8)-1); })
+ (__mmask8)-1)
-#define _mm256_mask_reduce_pd(W, U, A, B) __extension__ ({ \
+#define _mm256_mask_reduce_pd(W, U, A, B) \
(__m256d)__builtin_ia32_reducepd256_mask((__v4df)(__m256d)(A), (int)(B), \
(__v4df)(__m256d)(W), \
- (__mmask8)(U)); })
+ (__mmask8)(U))
-#define _mm256_maskz_reduce_pd(U, A, B) __extension__ ({ \
+#define _mm256_maskz_reduce_pd(U, A, B) \
(__m256d)__builtin_ia32_reducepd256_mask((__v4df)(__m256d)(A), (int)(B), \
(__v4df)_mm256_setzero_pd(), \
- (__mmask8)(U)); })
+ (__mmask8)(U))
-#define _mm_reduce_ps(A, B) __extension__ ({ \
+#define _mm_reduce_ps(A, B) \
(__m128)__builtin_ia32_reduceps128_mask((__v4sf)(__m128)(A), (int)(B), \
(__v4sf)_mm_setzero_ps(), \
- (__mmask8)-1); })
+ (__mmask8)-1)
-#define _mm_mask_reduce_ps(W, U, A, B) __extension__ ({ \
+#define _mm_mask_reduce_ps(W, U, A, B) \
(__m128)__builtin_ia32_reduceps128_mask((__v4sf)(__m128)(A), (int)(B), \
(__v4sf)(__m128)(W), \
- (__mmask8)(U)); })
+ (__mmask8)(U))
-#define _mm_maskz_reduce_ps(U, A, B) __extension__ ({ \
+#define _mm_maskz_reduce_ps(U, A, B) \
(__m128)__builtin_ia32_reduceps128_mask((__v4sf)(__m128)(A), (int)(B), \
(__v4sf)_mm_setzero_ps(), \
- (__mmask8)(U)); })
+ (__mmask8)(U))
-#define _mm256_reduce_ps(A, B) __extension__ ({ \
+#define _mm256_reduce_ps(A, B) \
(__m256)__builtin_ia32_reduceps256_mask((__v8sf)(__m256)(A), (int)(B), \
(__v8sf)_mm256_setzero_ps(), \
- (__mmask8)-1); })
+ (__mmask8)-1)
-#define _mm256_mask_reduce_ps(W, U, A, B) __extension__ ({ \
+#define _mm256_mask_reduce_ps(W, U, A, B) \
(__m256)__builtin_ia32_reduceps256_mask((__v8sf)(__m256)(A), (int)(B), \
(__v8sf)(__m256)(W), \
- (__mmask8)(U)); })
+ (__mmask8)(U))
-#define _mm256_maskz_reduce_ps(U, A, B) __extension__ ({ \
+#define _mm256_maskz_reduce_ps(U, A, B) \
(__m256)__builtin_ia32_reduceps256_mask((__v8sf)(__m256)(A), (int)(B), \
(__v8sf)_mm256_setzero_ps(), \
- (__mmask8)(U)); })
+ (__mmask8)(U))
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS128
_mm_movepi32_mask (__m128i __A)
{
return (__mmask8) __builtin_ia32_cvtd2mask128 ((__v4si) __A);
}
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS256
_mm256_movepi32_mask (__m256i __A)
{
return (__mmask8) __builtin_ia32_cvtd2mask256 ((__v8si) __A);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_movm_epi32 (__mmask8 __A)
{
return (__m128i) __builtin_ia32_cvtmask2d128 (__A);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_movm_epi32 (__mmask8 __A)
{
return (__m256i) __builtin_ia32_cvtmask2d256 (__A);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_movm_epi64 (__mmask8 __A)
{
return (__m128i) __builtin_ia32_cvtmask2q128 (__A);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_movm_epi64 (__mmask8 __A)
{
return (__m256i) __builtin_ia32_cvtmask2q256 (__A);
}
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS128
_mm_movepi64_mask (__m128i __A)
{
return (__mmask8) __builtin_ia32_cvtq2mask128 ((__v2di) __A);
}
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS256
_mm256_movepi64_mask (__m256i __A)
{
return (__mmask8) __builtin_ia32_cvtq2mask256 ((__v4di) __A);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_broadcast_f32x2 (__m128 __A)
{
- return (__m256)__builtin_shufflevector((__v4sf)__A,
- (__v4sf)_mm_undefined_ps(),
+ return (__m256)__builtin_shufflevector((__v4sf)__A, (__v4sf)__A,
0, 1, 0, 1, 0, 1, 0, 1);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_mask_broadcast_f32x2 (__m256 __O, __mmask8 __M, __m128 __A)
{
return (__m256)__builtin_ia32_selectps_256((__mmask8)__M,
@@ -991,7 +983,7 @@ _mm256_mask_broadcast_f32x2 (__m256 __O, __mmask8 __M, __m128 __A)
(__v8sf)__O);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_maskz_broadcast_f32x2 (__mmask8 __M, __m128 __A)
{
return (__m256)__builtin_ia32_selectps_256((__mmask8)__M,
@@ -999,14 +991,14 @@ _mm256_maskz_broadcast_f32x2 (__mmask8 __M, __m128 __A)
(__v8sf)_mm256_setzero_ps());
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_broadcast_f64x2(__m128d __A)
{
return (__m256d)__builtin_shufflevector((__v2df)__A, (__v2df)__A,
0, 1, 0, 1);
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_mask_broadcast_f64x2(__m256d __O, __mmask8 __M, __m128d __A)
{
return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__M,
@@ -1014,7 +1006,7 @@ _mm256_mask_broadcast_f64x2(__m256d __O, __mmask8 __M, __m128d __A)
(__v4df)__O);
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_maskz_broadcast_f64x2 (__mmask8 __M, __m128d __A)
{
return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__M,
@@ -1022,15 +1014,14 @@ _mm256_maskz_broadcast_f64x2 (__mmask8 __M, __m128d __A)
(__v4df)_mm256_setzero_pd());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_broadcast_i32x2 (__m128i __A)
{
- return (__m128i)__builtin_shufflevector((__v4si)__A,
- (__v4si)_mm_undefined_si128(),
+ return (__m128i)__builtin_shufflevector((__v4si)__A, (__v4si)__A,
0, 1, 0, 1);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_broadcast_i32x2 (__m128i __O, __mmask8 __M, __m128i __A)
{
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M,
@@ -1038,7 +1029,7 @@ _mm_mask_broadcast_i32x2 (__m128i __O, __mmask8 __M, __m128i __A)
(__v4si)__O);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_broadcast_i32x2 (__mmask8 __M, __m128i __A)
{
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M,
@@ -1046,15 +1037,14 @@ _mm_maskz_broadcast_i32x2 (__mmask8 __M, __m128i __A)
(__v4si)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_broadcast_i32x2 (__m128i __A)
{
- return (__m256i)__builtin_shufflevector((__v4si)__A,
- (__v4si)_mm_undefined_si128(),
+ return (__m256i)__builtin_shufflevector((__v4si)__A, (__v4si)__A,
0, 1, 0, 1, 0, 1, 0, 1);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_broadcast_i32x2 (__m256i __O, __mmask8 __M, __m128i __A)
{
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M,
@@ -1062,7 +1052,7 @@ _mm256_mask_broadcast_i32x2 (__m256i __O, __mmask8 __M, __m128i __A)
(__v8si)__O);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_broadcast_i32x2 (__mmask8 __M, __m128i __A)
{
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M,
@@ -1070,14 +1060,14 @@ _mm256_maskz_broadcast_i32x2 (__mmask8 __M, __m128i __A)
(__v8si)_mm256_setzero_si256());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_broadcast_i64x2(__m128i __A)
{
return (__m256i)__builtin_shufflevector((__v2di)__A, (__v2di)__A,
0, 1, 0, 1);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_broadcast_i64x2(__m256i __O, __mmask8 __M, __m128i __A)
{
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M,
@@ -1085,7 +1075,7 @@ _mm256_mask_broadcast_i64x2(__m256i __O, __mmask8 __M, __m128i __A)
(__v4di)__O);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_broadcast_i64x2 (__mmask8 __M, __m128i __A)
{
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M,
@@ -1093,106 +1083,103 @@ _mm256_maskz_broadcast_i64x2 (__mmask8 __M, __m128i __A)
(__v4di)_mm256_setzero_si256());
}
-#define _mm256_extractf64x2_pd(A, imm) __extension__ ({ \
- (__m128d)__builtin_shufflevector((__v4df)(__m256d)(A), \
- (__v4df)_mm256_undefined_pd(), \
- ((imm) & 1) ? 2 : 0, \
- ((imm) & 1) ? 3 : 1); })
-
-#define _mm256_mask_extractf64x2_pd(W, U, A, imm) __extension__ ({ \
- (__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \
- (__v2df)_mm256_extractf64x2_pd((A), (imm)), \
- (__v2df)(W)); })
-
-#define _mm256_maskz_extractf64x2_pd(U, A, imm) __extension__ ({ \
- (__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \
- (__v2df)_mm256_extractf64x2_pd((A), (imm)), \
- (__v2df)_mm_setzero_pd()); })
-
-#define _mm256_extracti64x2_epi64(A, imm) __extension__ ({ \
- (__m128i)__builtin_shufflevector((__v4di)(__m256i)(A), \
- (__v4di)_mm256_undefined_si256(), \
- ((imm) & 1) ? 2 : 0, \
- ((imm) & 1) ? 3 : 1); })
-
-#define _mm256_mask_extracti64x2_epi64(W, U, A, imm) __extension__ ({ \
- (__m128i)__builtin_ia32_selectq_128((__mmask8)(U), \
- (__v2di)_mm256_extracti64x2_epi64((A), (imm)), \
- (__v2di)(W)); })
-
-#define _mm256_maskz_extracti64x2_epi64(U, A, imm) __extension__ ({ \
- (__m128i)__builtin_ia32_selectq_128((__mmask8)(U), \
- (__v2di)_mm256_extracti64x2_epi64((A), (imm)), \
- (__v2di)_mm_setzero_di()); })
-
-#define _mm256_insertf64x2(A, B, imm) __extension__ ({ \
- (__m256d)__builtin_shufflevector((__v4df)(A), \
- (__v4df)_mm256_castpd128_pd256((__m128d)(B)), \
- ((imm) & 0x1) ? 0 : 4, \
- ((imm) & 0x1) ? 1 : 5, \
- ((imm) & 0x1) ? 4 : 2, \
- ((imm) & 0x1) ? 5 : 3); })
-
-#define _mm256_mask_insertf64x2(W, U, A, B, imm) __extension__ ({ \
+#define _mm256_extractf64x2_pd(A, imm) \
+ (__m128d)__builtin_ia32_extractf64x2_256_mask((__v4df)(__m256d)(A), \
+ (int)(imm), \
+ (__v2df)_mm_undefined_pd(), \
+ (__mmask8)-1)
+
+#define _mm256_mask_extractf64x2_pd(W, U, A, imm) \
+ (__m128d)__builtin_ia32_extractf64x2_256_mask((__v4df)(__m256d)(A), \
+ (int)(imm), \
+ (__v2df)(__m128d)(W), \
+ (__mmask8)(U))
+
+#define _mm256_maskz_extractf64x2_pd(U, A, imm) \
+ (__m128d)__builtin_ia32_extractf64x2_256_mask((__v4df)(__m256d)(A), \
+ (int)(imm), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)(U))
+
+#define _mm256_extracti64x2_epi64(A, imm) \
+ (__m128i)__builtin_ia32_extracti64x2_256_mask((__v4di)(__m256i)(A), \
+ (int)(imm), \
+ (__v2di)_mm_undefined_si128(), \
+ (__mmask8)-1)
+
+#define _mm256_mask_extracti64x2_epi64(W, U, A, imm) \
+ (__m128i)__builtin_ia32_extracti64x2_256_mask((__v4di)(__m256i)(A), \
+ (int)(imm), \
+ (__v2di)(__m128i)(W), \
+ (__mmask8)(U))
+
+#define _mm256_maskz_extracti64x2_epi64(U, A, imm) \
+ (__m128i)__builtin_ia32_extracti64x2_256_mask((__v4di)(__m256i)(A), \
+ (int)(imm), \
+ (__v2di)_mm_setzero_si128(), \
+ (__mmask8)(U))
+
+#define _mm256_insertf64x2(A, B, imm) \
+ (__m256d)__builtin_ia32_insertf64x2_256((__v4df)(__m256d)(A), \
+ (__v2df)(__m128d)(B), (int)(imm))
+
+#define _mm256_mask_insertf64x2(W, U, A, B, imm) \
(__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
(__v4df)_mm256_insertf64x2((A), (B), (imm)), \
- (__v4df)(W)); })
+ (__v4df)(__m256d)(W))
-#define _mm256_maskz_insertf64x2(U, A, B, imm) __extension__ ({ \
+#define _mm256_maskz_insertf64x2(U, A, B, imm) \
(__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
(__v4df)_mm256_insertf64x2((A), (B), (imm)), \
- (__v4df)_mm256_setzero_pd()); })
+ (__v4df)_mm256_setzero_pd())
-#define _mm256_inserti64x2(A, B, imm) __extension__ ({ \
- (__m256i)__builtin_shufflevector((__v4di)(A), \
- (__v4di)_mm256_castsi128_si256((__m128i)(B)), \
- ((imm) & 0x1) ? 0 : 4, \
- ((imm) & 0x1) ? 1 : 5, \
- ((imm) & 0x1) ? 4 : 2, \
- ((imm) & 0x1) ? 5 : 3); })
+#define _mm256_inserti64x2(A, B, imm) \
+ (__m256i)__builtin_ia32_inserti64x2_256((__v4di)(__m256i)(A), \
+ (__v2di)(__m128i)(B), (int)(imm))
-#define _mm256_mask_inserti64x2(W, U, A, B, imm) __extension__ ({ \
+#define _mm256_mask_inserti64x2(W, U, A, B, imm) \
(__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
(__v4di)_mm256_inserti64x2((A), (B), (imm)), \
- (__v4di)(W)); })
+ (__v4di)(__m256i)(W))
-#define _mm256_maskz_inserti64x2(U, A, B, imm) __extension__ ({ \
+#define _mm256_maskz_inserti64x2(U, A, B, imm) \
(__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
(__v4di)_mm256_inserti64x2((A), (B), (imm)), \
- (__v4di)_mm256_setzero_si256()); })
+ (__v4di)_mm256_setzero_si256())
-#define _mm_mask_fpclass_pd_mask(U, A, imm) __extension__ ({ \
+#define _mm_mask_fpclass_pd_mask(U, A, imm) \
(__mmask8)__builtin_ia32_fpclasspd128_mask((__v2df)(__m128d)(A), (int)(imm), \
- (__mmask8)(U)); })
+ (__mmask8)(U))
-#define _mm_fpclass_pd_mask(A, imm) __extension__ ({ \
+#define _mm_fpclass_pd_mask(A, imm) \
(__mmask8)__builtin_ia32_fpclasspd128_mask((__v2df)(__m128d)(A), (int)(imm), \
- (__mmask8)-1); })
+ (__mmask8)-1)
-#define _mm256_mask_fpclass_pd_mask(U, A, imm) __extension__ ({ \
+#define _mm256_mask_fpclass_pd_mask(U, A, imm) \
(__mmask8)__builtin_ia32_fpclasspd256_mask((__v4df)(__m256d)(A), (int)(imm), \
- (__mmask8)(U)); })
+ (__mmask8)(U))
-#define _mm256_fpclass_pd_mask(A, imm) __extension__ ({ \
+#define _mm256_fpclass_pd_mask(A, imm) \
(__mmask8)__builtin_ia32_fpclasspd256_mask((__v4df)(__m256d)(A), (int)(imm), \
- (__mmask8)-1); })
+ (__mmask8)-1)
-#define _mm_mask_fpclass_ps_mask(U, A, imm) __extension__ ({ \
+#define _mm_mask_fpclass_ps_mask(U, A, imm) \
(__mmask8)__builtin_ia32_fpclassps128_mask((__v4sf)(__m128)(A), (int)(imm), \
- (__mmask8)(U)); })
+ (__mmask8)(U))
-#define _mm_fpclass_ps_mask(A, imm) __extension__ ({ \
+#define _mm_fpclass_ps_mask(A, imm) \
(__mmask8)__builtin_ia32_fpclassps128_mask((__v4sf)(__m128)(A), (int)(imm), \
- (__mmask8)-1); })
+ (__mmask8)-1)
-#define _mm256_mask_fpclass_ps_mask(U, A, imm) __extension__ ({ \
+#define _mm256_mask_fpclass_ps_mask(U, A, imm) \
(__mmask8)__builtin_ia32_fpclassps256_mask((__v8sf)(__m256)(A), (int)(imm), \
- (__mmask8)(U)); })
+ (__mmask8)(U))
-#define _mm256_fpclass_ps_mask(A, imm) __extension__ ({ \
+#define _mm256_fpclass_ps_mask(A, imm) \
(__mmask8)__builtin_ia32_fpclassps256_mask((__v8sf)(__m256)(A), (int)(imm), \
- (__mmask8)-1); })
+ (__mmask8)-1)
-#undef __DEFAULT_FN_ATTRS
+#undef __DEFAULT_FN_ATTRS128
+#undef __DEFAULT_FN_ATTRS256
#endif
diff --git a/lib/Headers/avx512vlintrin.h b/lib/Headers/avx512vlintrin.h
index fb8056e3f8d8..0ee1d00ef4d2 100644
--- a/lib/Headers/avx512vlintrin.h
+++ b/lib/Headers/avx512vlintrin.h
@@ -28,13 +28,12 @@
#ifndef __AVX512VLINTRIN_H
#define __AVX512VLINTRIN_H
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512vl")))
+#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("avx512vl"), __min_vector_width__(128)))
+#define __DEFAULT_FN_ATTRS256 __attribute__((__always_inline__, __nodebug__, __target__("avx512vl"), __min_vector_width__(256)))
-/* Doesn't require avx512vl, used in avx512dqintrin.h */
-static __inline __m128i __attribute__((__always_inline__, __nodebug__, __target__("avx512f")))
-_mm_setzero_di(void) {
- return (__m128i)(__v2di){ 0LL, 0LL};
-}
+typedef short __v2hi __attribute__((__vector_size__(4)));
+typedef char __v4qi __attribute__((__vector_size__(4)));
+typedef char __v2qi __attribute__((__vector_size__(2)));
/* Integer compare */
@@ -238,7 +237,7 @@ _mm_setzero_di(void) {
#define _mm256_mask_cmpneq_epu64_mask(k, A, B) \
_mm256_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_NE)
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_add_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
@@ -246,7 +245,7 @@ _mm256_mask_add_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
(__v8si)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_add_epi32(__mmask8 __U, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
@@ -254,7 +253,7 @@ _mm256_maskz_add_epi32(__mmask8 __U, __m256i __A, __m256i __B)
(__v8si)_mm256_setzero_si256());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_add_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
@@ -262,7 +261,7 @@ _mm256_mask_add_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
(__v4di)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_add_epi64(__mmask8 __U, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
@@ -270,7 +269,7 @@ _mm256_maskz_add_epi64(__mmask8 __U, __m256i __A, __m256i __B)
(__v4di)_mm256_setzero_si256());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_sub_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
@@ -278,7 +277,7 @@ _mm256_mask_sub_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
(__v8si)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_sub_epi32(__mmask8 __U, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
@@ -286,7 +285,7 @@ _mm256_maskz_sub_epi32(__mmask8 __U, __m256i __A, __m256i __B)
(__v8si)_mm256_setzero_si256());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_sub_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
@@ -294,7 +293,7 @@ _mm256_mask_sub_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
(__v4di)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_sub_epi64(__mmask8 __U, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
@@ -302,7 +301,7 @@ _mm256_maskz_sub_epi64(__mmask8 __U, __m256i __A, __m256i __B)
(__v4di)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_add_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
@@ -310,7 +309,7 @@ _mm_mask_add_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
(__v4si)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_add_epi32(__mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
@@ -318,7 +317,7 @@ _mm_maskz_add_epi32(__mmask8 __U, __m128i __A, __m128i __B)
(__v4si)_mm_setzero_si128());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_add_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
@@ -326,7 +325,7 @@ _mm_mask_add_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
(__v2di)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_add_epi64(__mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
@@ -334,7 +333,7 @@ _mm_maskz_add_epi64(__mmask8 __U, __m128i __A, __m128i __B)
(__v2di)_mm_setzero_si128());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_sub_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
@@ -342,7 +341,7 @@ _mm_mask_sub_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
(__v4si)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_sub_epi32(__mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
@@ -350,7 +349,7 @@ _mm_maskz_sub_epi32(__mmask8 __U, __m128i __A, __m128i __B)
(__v4si)_mm_setzero_si128());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_sub_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
@@ -358,7 +357,7 @@ _mm_mask_sub_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
(__v2di)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_sub_epi64(__mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
@@ -366,7 +365,7 @@ _mm_maskz_sub_epi64(__mmask8 __U, __m128i __A, __m128i __B)
(__v2di)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_mul_epi32(__m256i __W, __mmask8 __M, __m256i __X, __m256i __Y)
{
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M,
@@ -374,7 +373,7 @@ _mm256_mask_mul_epi32(__m256i __W, __mmask8 __M, __m256i __X, __m256i __Y)
(__v4di)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_mul_epi32(__mmask8 __M, __m256i __X, __m256i __Y)
{
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M,
@@ -382,7 +381,7 @@ _mm256_maskz_mul_epi32(__mmask8 __M, __m256i __X, __m256i __Y)
(__v4di)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_mul_epi32(__m128i __W, __mmask8 __M, __m128i __X, __m128i __Y)
{
return (__m128i)__builtin_ia32_selectq_128((__mmask8)__M,
@@ -390,7 +389,7 @@ _mm_mask_mul_epi32(__m128i __W, __mmask8 __M, __m128i __X, __m128i __Y)
(__v2di)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_mul_epi32(__mmask8 __M, __m128i __X, __m128i __Y)
{
return (__m128i)__builtin_ia32_selectq_128((__mmask8)__M,
@@ -398,7 +397,7 @@ _mm_maskz_mul_epi32(__mmask8 __M, __m128i __X, __m128i __Y)
(__v2di)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_mul_epu32(__m256i __W, __mmask8 __M, __m256i __X, __m256i __Y)
{
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M,
@@ -406,7 +405,7 @@ _mm256_mask_mul_epu32(__m256i __W, __mmask8 __M, __m256i __X, __m256i __Y)
(__v4di)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_mul_epu32(__mmask8 __M, __m256i __X, __m256i __Y)
{
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M,
@@ -414,7 +413,7 @@ _mm256_maskz_mul_epu32(__mmask8 __M, __m256i __X, __m256i __Y)
(__v4di)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_mul_epu32(__m128i __W, __mmask8 __M, __m128i __X, __m128i __Y)
{
return (__m128i)__builtin_ia32_selectq_128((__mmask8)__M,
@@ -422,7 +421,7 @@ _mm_mask_mul_epu32(__m128i __W, __mmask8 __M, __m128i __X, __m128i __Y)
(__v2di)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_mul_epu32(__mmask8 __M, __m128i __X, __m128i __Y)
{
return (__m128i)__builtin_ia32_selectq_128((__mmask8)__M,
@@ -430,7 +429,7 @@ _mm_maskz_mul_epu32(__mmask8 __M, __m128i __X, __m128i __Y)
(__v2di)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_mullo_epi32(__mmask8 __M, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M,
@@ -438,7 +437,7 @@ _mm256_maskz_mullo_epi32(__mmask8 __M, __m256i __A, __m256i __B)
(__v8si)_mm256_setzero_si256());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_mullo_epi32(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M,
@@ -446,7 +445,7 @@ _mm256_mask_mullo_epi32(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B)
(__v8si)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_mullo_epi32(__mmask8 __M, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M,
@@ -454,7 +453,7 @@ _mm_maskz_mullo_epi32(__mmask8 __M, __m128i __A, __m128i __B)
(__v4si)_mm_setzero_si128());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_mullo_epi32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M,
@@ -462,7 +461,7 @@ _mm_mask_mullo_epi32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B)
(__v4si)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_and_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
@@ -470,13 +469,13 @@ _mm256_mask_and_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
(__v8si)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_and_epi32(__mmask8 __U, __m256i __A, __m256i __B)
{
return (__m256i)_mm256_mask_and_epi32(_mm256_setzero_si256(), __U, __A, __B);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_and_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
@@ -484,13 +483,13 @@ _mm_mask_and_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
(__v4si)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_and_epi32(__mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)_mm_mask_and_epi32(_mm_setzero_si128(), __U, __A, __B);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_andnot_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
@@ -498,14 +497,14 @@ _mm256_mask_andnot_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
(__v8si)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_andnot_epi32(__mmask8 __U, __m256i __A, __m256i __B)
{
return (__m256i)_mm256_mask_andnot_epi32(_mm256_setzero_si256(),
__U, __A, __B);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_andnot_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
@@ -513,13 +512,13 @@ _mm_mask_andnot_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
(__v4si)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_andnot_epi32 (__mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)_mm_mask_andnot_epi32(_mm_setzero_si128(), __U, __A, __B);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_or_epi32 (__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
@@ -527,13 +526,13 @@ _mm256_mask_or_epi32 (__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
(__v8si)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_or_epi32(__mmask8 __U, __m256i __A, __m256i __B)
{
return (__m256i)_mm256_mask_or_epi32(_mm256_setzero_si256(), __U, __A, __B);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_or_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
@@ -541,13 +540,13 @@ _mm_mask_or_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
(__v4si)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_or_epi32(__mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)_mm_mask_or_epi32(_mm_setzero_si128(), __U, __A, __B);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_xor_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
@@ -555,13 +554,13 @@ _mm256_mask_xor_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
(__v8si)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_xor_epi32(__mmask8 __U, __m256i __A, __m256i __B)
{
return (__m256i)_mm256_mask_xor_epi32(_mm256_setzero_si256(), __U, __A, __B);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_xor_epi32(__m128i __W, __mmask8 __U, __m128i __A,
__m128i __B)
{
@@ -570,13 +569,13 @@ _mm_mask_xor_epi32(__m128i __W, __mmask8 __U, __m128i __A,
(__v4si)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_xor_epi32(__mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)_mm_mask_xor_epi32(_mm_setzero_si128(), __U, __A, __B);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_and_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
@@ -584,13 +583,13 @@ _mm256_mask_and_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
(__v4di)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_and_epi64(__mmask8 __U, __m256i __A, __m256i __B)
{
return (__m256i)_mm256_mask_and_epi64(_mm256_setzero_si256(), __U, __A, __B);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_and_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
@@ -598,13 +597,13 @@ _mm_mask_and_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
(__v2di)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_and_epi64(__mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)_mm_mask_and_epi64(_mm_setzero_si128(), __U, __A, __B);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_andnot_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
@@ -612,14 +611,14 @@ _mm256_mask_andnot_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
(__v4di)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_andnot_epi64(__mmask8 __U, __m256i __A, __m256i __B)
{
return (__m256i)_mm256_mask_andnot_epi64(_mm256_setzero_si256(),
__U, __A, __B);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_andnot_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
@@ -627,13 +626,13 @@ _mm_mask_andnot_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
(__v2di)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_andnot_epi64(__mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)_mm_mask_andnot_epi64(_mm_setzero_si128(), __U, __A, __B);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_or_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
@@ -641,13 +640,13 @@ _mm256_mask_or_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
(__v4di)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_or_epi64(__mmask8 __U, __m256i __A, __m256i __B)
{
return (__m256i)_mm256_mask_or_epi64(_mm256_setzero_si256(), __U, __A, __B);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_or_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
@@ -655,13 +654,13 @@ _mm_mask_or_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
(__v2di)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_or_epi64(__mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)_mm_mask_or_epi64(_mm_setzero_si128(), __U, __A, __B);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_xor_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
@@ -669,13 +668,13 @@ _mm256_mask_xor_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
(__v4di)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_xor_epi64(__mmask8 __U, __m256i __A, __m256i __B)
{
return (__m256i)_mm256_mask_xor_epi64(_mm256_setzero_si256(), __U, __A, __B);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_xor_epi64(__m128i __W, __mmask8 __U, __m128i __A,
__m128i __B)
{
@@ -684,909 +683,973 @@ _mm_mask_xor_epi64(__m128i __W, __mmask8 __U, __m128i __A,
(__v2di)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_xor_epi64(__mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)_mm_mask_xor_epi64(_mm_setzero_si128(), __U, __A, __B);
}
-#define _mm_cmp_epi32_mask(a, b, p) __extension__ ({ \
+#define _mm_cmp_epi32_mask(a, b, p) \
(__mmask8)__builtin_ia32_cmpd128_mask((__v4si)(__m128i)(a), \
(__v4si)(__m128i)(b), (int)(p), \
- (__mmask8)-1); })
+ (__mmask8)-1)
-#define _mm_mask_cmp_epi32_mask(m, a, b, p) __extension__ ({ \
+#define _mm_mask_cmp_epi32_mask(m, a, b, p) \
(__mmask8)__builtin_ia32_cmpd128_mask((__v4si)(__m128i)(a), \
(__v4si)(__m128i)(b), (int)(p), \
- (__mmask8)(m)); })
+ (__mmask8)(m))
-#define _mm_cmp_epu32_mask(a, b, p) __extension__ ({ \
+#define _mm_cmp_epu32_mask(a, b, p) \
(__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)(__m128i)(a), \
(__v4si)(__m128i)(b), (int)(p), \
- (__mmask8)-1); })
+ (__mmask8)-1)
-#define _mm_mask_cmp_epu32_mask(m, a, b, p) __extension__ ({ \
+#define _mm_mask_cmp_epu32_mask(m, a, b, p) \
(__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)(__m128i)(a), \
(__v4si)(__m128i)(b), (int)(p), \
- (__mmask8)(m)); })
+ (__mmask8)(m))
-#define _mm256_cmp_epi32_mask(a, b, p) __extension__ ({ \
+#define _mm256_cmp_epi32_mask(a, b, p) \
(__mmask8)__builtin_ia32_cmpd256_mask((__v8si)(__m256i)(a), \
(__v8si)(__m256i)(b), (int)(p), \
- (__mmask8)-1); })
+ (__mmask8)-1)
-#define _mm256_mask_cmp_epi32_mask(m, a, b, p) __extension__ ({ \
+#define _mm256_mask_cmp_epi32_mask(m, a, b, p) \
(__mmask8)__builtin_ia32_cmpd256_mask((__v8si)(__m256i)(a), \
(__v8si)(__m256i)(b), (int)(p), \
- (__mmask8)(m)); })
+ (__mmask8)(m))
-#define _mm256_cmp_epu32_mask(a, b, p) __extension__ ({ \
+#define _mm256_cmp_epu32_mask(a, b, p) \
(__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)(__m256i)(a), \
(__v8si)(__m256i)(b), (int)(p), \
- (__mmask8)-1); })
+ (__mmask8)-1)
-#define _mm256_mask_cmp_epu32_mask(m, a, b, p) __extension__ ({ \
+#define _mm256_mask_cmp_epu32_mask(m, a, b, p) \
(__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)(__m256i)(a), \
(__v8si)(__m256i)(b), (int)(p), \
- (__mmask8)(m)); })
+ (__mmask8)(m))
-#define _mm_cmp_epi64_mask(a, b, p) __extension__ ({ \
+#define _mm_cmp_epi64_mask(a, b, p) \
(__mmask8)__builtin_ia32_cmpq128_mask((__v2di)(__m128i)(a), \
(__v2di)(__m128i)(b), (int)(p), \
- (__mmask8)-1); })
+ (__mmask8)-1)
-#define _mm_mask_cmp_epi64_mask(m, a, b, p) __extension__ ({ \
+#define _mm_mask_cmp_epi64_mask(m, a, b, p) \
(__mmask8)__builtin_ia32_cmpq128_mask((__v2di)(__m128i)(a), \
(__v2di)(__m128i)(b), (int)(p), \
- (__mmask8)(m)); })
+ (__mmask8)(m))
-#define _mm_cmp_epu64_mask(a, b, p) __extension__ ({ \
+#define _mm_cmp_epu64_mask(a, b, p) \
(__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)(__m128i)(a), \
(__v2di)(__m128i)(b), (int)(p), \
- (__mmask8)-1); })
+ (__mmask8)-1)
-#define _mm_mask_cmp_epu64_mask(m, a, b, p) __extension__ ({ \
+#define _mm_mask_cmp_epu64_mask(m, a, b, p) \
(__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)(__m128i)(a), \
(__v2di)(__m128i)(b), (int)(p), \
- (__mmask8)(m)); })
+ (__mmask8)(m))
-#define _mm256_cmp_epi64_mask(a, b, p) __extension__ ({ \
+#define _mm256_cmp_epi64_mask(a, b, p) \
(__mmask8)__builtin_ia32_cmpq256_mask((__v4di)(__m256i)(a), \
(__v4di)(__m256i)(b), (int)(p), \
- (__mmask8)-1); })
+ (__mmask8)-1)
-#define _mm256_mask_cmp_epi64_mask(m, a, b, p) __extension__ ({ \
+#define _mm256_mask_cmp_epi64_mask(m, a, b, p) \
(__mmask8)__builtin_ia32_cmpq256_mask((__v4di)(__m256i)(a), \
(__v4di)(__m256i)(b), (int)(p), \
- (__mmask8)(m)); })
+ (__mmask8)(m))
-#define _mm256_cmp_epu64_mask(a, b, p) __extension__ ({ \
+#define _mm256_cmp_epu64_mask(a, b, p) \
(__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)(__m256i)(a), \
(__v4di)(__m256i)(b), (int)(p), \
- (__mmask8)-1); })
+ (__mmask8)-1)
-#define _mm256_mask_cmp_epu64_mask(m, a, b, p) __extension__ ({ \
+#define _mm256_mask_cmp_epu64_mask(m, a, b, p) \
(__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)(__m256i)(a), \
(__v4di)(__m256i)(b), (int)(p), \
- (__mmask8)(m)); })
+ (__mmask8)(m))
-#define _mm256_cmp_ps_mask(a, b, p) __extension__ ({ \
+#define _mm256_cmp_ps_mask(a, b, p) \
(__mmask8)__builtin_ia32_cmpps256_mask((__v8sf)(__m256)(a), \
(__v8sf)(__m256)(b), (int)(p), \
- (__mmask8)-1); })
+ (__mmask8)-1)
-#define _mm256_mask_cmp_ps_mask(m, a, b, p) __extension__ ({ \
+#define _mm256_mask_cmp_ps_mask(m, a, b, p) \
(__mmask8)__builtin_ia32_cmpps256_mask((__v8sf)(__m256)(a), \
(__v8sf)(__m256)(b), (int)(p), \
- (__mmask8)(m)); })
+ (__mmask8)(m))
-#define _mm256_cmp_pd_mask(a, b, p) __extension__ ({ \
+#define _mm256_cmp_pd_mask(a, b, p) \
(__mmask8)__builtin_ia32_cmppd256_mask((__v4df)(__m256d)(a), \
(__v4df)(__m256d)(b), (int)(p), \
- (__mmask8)-1); })
+ (__mmask8)-1)
-#define _mm256_mask_cmp_pd_mask(m, a, b, p) __extension__ ({ \
+#define _mm256_mask_cmp_pd_mask(m, a, b, p) \
(__mmask8)__builtin_ia32_cmppd256_mask((__v4df)(__m256d)(a), \
(__v4df)(__m256d)(b), (int)(p), \
- (__mmask8)(m)); })
+ (__mmask8)(m))
-#define _mm_cmp_ps_mask(a, b, p) __extension__ ({ \
+#define _mm_cmp_ps_mask(a, b, p) \
(__mmask8)__builtin_ia32_cmpps128_mask((__v4sf)(__m128)(a), \
(__v4sf)(__m128)(b), (int)(p), \
- (__mmask8)-1); })
+ (__mmask8)-1)
-#define _mm_mask_cmp_ps_mask(m, a, b, p) __extension__ ({ \
+#define _mm_mask_cmp_ps_mask(m, a, b, p) \
(__mmask8)__builtin_ia32_cmpps128_mask((__v4sf)(__m128)(a), \
(__v4sf)(__m128)(b), (int)(p), \
- (__mmask8)(m)); })
+ (__mmask8)(m))
-#define _mm_cmp_pd_mask(a, b, p) __extension__ ({ \
+#define _mm_cmp_pd_mask(a, b, p) \
(__mmask8)__builtin_ia32_cmppd128_mask((__v2df)(__m128d)(a), \
(__v2df)(__m128d)(b), (int)(p), \
- (__mmask8)-1); })
+ (__mmask8)-1)
-#define _mm_mask_cmp_pd_mask(m, a, b, p) __extension__ ({ \
+#define _mm_mask_cmp_pd_mask(m, a, b, p) \
(__mmask8)__builtin_ia32_cmppd128_mask((__v2df)(__m128d)(a), \
(__v2df)(__m128d)(b), (int)(p), \
- (__mmask8)(m)); })
+ (__mmask8)(m))
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_fmadd_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
{
- return (__m128d) __builtin_ia32_vfmaddpd128_mask ((__v2df) __A,
- (__v2df) __B,
- (__v2df) __C,
- (__mmask8) __U);
+ return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
+ __builtin_ia32_vfmaddpd ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __C),
+ (__v2df) __A);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask3_fmadd_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
{
- return (__m128d) __builtin_ia32_vfmaddpd128_mask3 ((__v2df) __A,
- (__v2df) __B,
- (__v2df) __C,
- (__mmask8) __U);
+ return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
+ __builtin_ia32_vfmaddpd ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __C),
+ (__v2df) __C);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_maskz_fmadd_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
{
- return (__m128d) __builtin_ia32_vfmaddpd128_maskz ((__v2df) __A,
- (__v2df) __B,
- (__v2df) __C,
- (__mmask8) __U);
+ return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
+ __builtin_ia32_vfmaddpd ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __C),
+ (__v2df)_mm_setzero_pd());
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_fmsub_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
{
- return (__m128d) __builtin_ia32_vfmaddpd128_mask ((__v2df) __A,
- (__v2df) __B,
- -(__v2df) __C,
- (__mmask8) __U);
+ return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
+ __builtin_ia32_vfmaddpd ((__v2df) __A,
+ (__v2df) __B,
+ -(__v2df) __C),
+ (__v2df) __A);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_maskz_fmsub_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
{
- return (__m128d) __builtin_ia32_vfmaddpd128_maskz ((__v2df) __A,
- (__v2df) __B,
- -(__v2df) __C,
- (__mmask8) __U);
+ return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
+ __builtin_ia32_vfmaddpd ((__v2df) __A,
+ (__v2df) __B,
+ -(__v2df) __C),
+ (__v2df)_mm_setzero_pd());
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask3_fnmadd_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
{
- return (__m128d) __builtin_ia32_vfmaddpd128_mask3 (-(__v2df) __A,
- (__v2df) __B,
- (__v2df) __C,
- (__mmask8) __U);
+ return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
+ __builtin_ia32_vfmaddpd (-(__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __C),
+ (__v2df) __C);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_maskz_fnmadd_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
{
- return (__m128d) __builtin_ia32_vfmaddpd128_maskz (-(__v2df) __A,
- (__v2df) __B,
- (__v2df) __C,
- (__mmask8) __U);
+ return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
+ __builtin_ia32_vfmaddpd (-(__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __C),
+ (__v2df)_mm_setzero_pd());
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_maskz_fnmsub_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
{
- return (__m128d) __builtin_ia32_vfmaddpd128_maskz (-(__v2df) __A,
- (__v2df) __B,
- -(__v2df) __C,
- (__mmask8) __U);
+ return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
+ __builtin_ia32_vfmaddpd (-(__v2df) __A,
+ (__v2df) __B,
+ -(__v2df) __C),
+ (__v2df)_mm_setzero_pd());
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_mask_fmadd_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
{
- return (__m256d) __builtin_ia32_vfmaddpd256_mask ((__v4df) __A,
- (__v4df) __B,
- (__v4df) __C,
- (__mmask8) __U);
+ return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
+ __builtin_ia32_vfmaddpd256 ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __C),
+ (__v4df) __A);
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_mask3_fmadd_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
{
- return (__m256d) __builtin_ia32_vfmaddpd256_mask3 ((__v4df) __A,
- (__v4df) __B,
- (__v4df) __C,
- (__mmask8) __U);
+ return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
+ __builtin_ia32_vfmaddpd256 ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __C),
+ (__v4df) __C);
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_maskz_fmadd_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
{
- return (__m256d) __builtin_ia32_vfmaddpd256_maskz ((__v4df) __A,
- (__v4df) __B,
- (__v4df) __C,
- (__mmask8) __U);
+ return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
+ __builtin_ia32_vfmaddpd256 ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __C),
+ (__v4df)_mm256_setzero_pd());
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_mask_fmsub_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
{
- return (__m256d) __builtin_ia32_vfmaddpd256_mask ((__v4df) __A,
- (__v4df) __B,
- -(__v4df) __C,
- (__mmask8) __U);
+ return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
+ __builtin_ia32_vfmaddpd256 ((__v4df) __A,
+ (__v4df) __B,
+ -(__v4df) __C),
+ (__v4df) __A);
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_maskz_fmsub_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
{
- return (__m256d) __builtin_ia32_vfmaddpd256_maskz ((__v4df) __A,
- (__v4df) __B,
- -(__v4df) __C,
- (__mmask8) __U);
+ return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
+ __builtin_ia32_vfmaddpd256 ((__v4df) __A,
+ (__v4df) __B,
+ -(__v4df) __C),
+ (__v4df)_mm256_setzero_pd());
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_mask3_fnmadd_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
{
- return (__m256d) __builtin_ia32_vfmaddpd256_mask3 (-(__v4df) __A,
- (__v4df) __B,
- (__v4df) __C,
- (__mmask8) __U);
+ return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
+ __builtin_ia32_vfmaddpd256 (-(__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __C),
+ (__v4df) __C);
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_maskz_fnmadd_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
{
- return (__m256d) __builtin_ia32_vfmaddpd256_maskz (-(__v4df) __A,
- (__v4df) __B,
- (__v4df) __C,
- (__mmask8) __U);
+ return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
+ __builtin_ia32_vfmaddpd256 (-(__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __C),
+ (__v4df)_mm256_setzero_pd());
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_maskz_fnmsub_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
{
- return (__m256d) __builtin_ia32_vfmaddpd256_maskz (-(__v4df) __A,
- (__v4df) __B,
- -(__v4df) __C,
- (__mmask8) __U);
+ return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
+ __builtin_ia32_vfmaddpd256 (-(__v4df) __A,
+ (__v4df) __B,
+ -(__v4df) __C),
+ (__v4df)_mm256_setzero_pd());
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_fmadd_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
{
- return (__m128) __builtin_ia32_vfmaddps128_mask ((__v4sf) __A,
- (__v4sf) __B,
- (__v4sf) __C,
- (__mmask8) __U);
+ return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
+ __builtin_ia32_vfmaddps ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __C),
+ (__v4sf) __A);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask3_fmadd_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
{
- return (__m128) __builtin_ia32_vfmaddps128_mask3 ((__v4sf) __A,
- (__v4sf) __B,
- (__v4sf) __C,
- (__mmask8) __U);
+ return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
+ __builtin_ia32_vfmaddps ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __C),
+ (__v4sf) __C);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_maskz_fmadd_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
{
- return (__m128) __builtin_ia32_vfmaddps128_maskz ((__v4sf) __A,
- (__v4sf) __B,
- (__v4sf) __C,
- (__mmask8) __U);
+ return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
+ __builtin_ia32_vfmaddps ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __C),
+ (__v4sf)_mm_setzero_ps());
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_fmsub_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
{
- return (__m128) __builtin_ia32_vfmaddps128_mask ((__v4sf) __A,
- (__v4sf) __B,
- -(__v4sf) __C,
- (__mmask8) __U);
+ return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
+ __builtin_ia32_vfmaddps ((__v4sf) __A,
+ (__v4sf) __B,
+ -(__v4sf) __C),
+ (__v4sf) __A);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_maskz_fmsub_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
{
- return (__m128) __builtin_ia32_vfmaddps128_maskz ((__v4sf) __A,
- (__v4sf) __B,
- -(__v4sf) __C,
- (__mmask8) __U);
+ return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
+ __builtin_ia32_vfmaddps ((__v4sf) __A,
+ (__v4sf) __B,
+ -(__v4sf) __C),
+ (__v4sf)_mm_setzero_ps());
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask3_fnmadd_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
{
- return (__m128) __builtin_ia32_vfmaddps128_mask3 (-(__v4sf) __A,
- (__v4sf) __B,
- (__v4sf) __C,
- (__mmask8) __U);
+ return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
+ __builtin_ia32_vfmaddps (-(__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __C),
+ (__v4sf) __C);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_maskz_fnmadd_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
{
- return (__m128) __builtin_ia32_vfmaddps128_maskz (-(__v4sf) __A,
- (__v4sf) __B,
- (__v4sf) __C,
- (__mmask8) __U);
+ return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
+ __builtin_ia32_vfmaddps (-(__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __C),
+ (__v4sf)_mm_setzero_ps());
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_maskz_fnmsub_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
{
- return (__m128) __builtin_ia32_vfmaddps128_maskz (-(__v4sf) __A,
- (__v4sf) __B,
- -(__v4sf) __C,
- (__mmask8) __U);
+ return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
+ __builtin_ia32_vfmaddps (-(__v4sf) __A,
+ (__v4sf) __B,
+ -(__v4sf) __C),
+ (__v4sf)_mm_setzero_ps());
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_mask_fmadd_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C)
{
- return (__m256) __builtin_ia32_vfmaddps256_mask ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf) __C,
- (__mmask8) __U);
+ return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
+ __builtin_ia32_vfmaddps256 ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __C),
+ (__v8sf) __A);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_mask3_fmadd_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
{
- return (__m256) __builtin_ia32_vfmaddps256_mask3 ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf) __C,
- (__mmask8) __U);
+ return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
+ __builtin_ia32_vfmaddps256 ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __C),
+ (__v8sf) __C);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_maskz_fmadd_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
{
- return (__m256) __builtin_ia32_vfmaddps256_maskz ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf) __C,
- (__mmask8) __U);
+ return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
+ __builtin_ia32_vfmaddps256 ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __C),
+ (__v8sf)_mm256_setzero_ps());
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_mask_fmsub_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C)
{
- return (__m256) __builtin_ia32_vfmaddps256_mask ((__v8sf) __A,
- (__v8sf) __B,
- -(__v8sf) __C,
- (__mmask8) __U);
+ return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
+ __builtin_ia32_vfmaddps256 ((__v8sf) __A,
+ (__v8sf) __B,
+ -(__v8sf) __C),
+ (__v8sf) __A);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_maskz_fmsub_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
{
- return (__m256) __builtin_ia32_vfmaddps256_maskz ((__v8sf) __A,
- (__v8sf) __B,
- -(__v8sf) __C,
- (__mmask8) __U);
+ return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
+ __builtin_ia32_vfmaddps256 ((__v8sf) __A,
+ (__v8sf) __B,
+ -(__v8sf) __C),
+ (__v8sf)_mm256_setzero_ps());
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_mask3_fnmadd_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
{
- return (__m256) __builtin_ia32_vfmaddps256_mask3 (-(__v8sf) __A,
- (__v8sf) __B,
- (__v8sf) __C,
- (__mmask8) __U);
+ return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
+ __builtin_ia32_vfmaddps256 (-(__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __C),
+ (__v8sf) __C);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_maskz_fnmadd_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
{
- return (__m256) __builtin_ia32_vfmaddps256_maskz (-(__v8sf) __A,
- (__v8sf) __B,
- (__v8sf) __C,
- (__mmask8) __U);
+ return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
+ __builtin_ia32_vfmaddps256 (-(__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __C),
+ (__v8sf)_mm256_setzero_ps());
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_maskz_fnmsub_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
{
- return (__m256) __builtin_ia32_vfmaddps256_maskz (-(__v8sf) __A,
- (__v8sf) __B,
- -(__v8sf) __C,
- (__mmask8) __U);
+ return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
+ __builtin_ia32_vfmaddps256 (-(__v8sf) __A,
+ (__v8sf) __B,
+ -(__v8sf) __C),
+ (__v8sf)_mm256_setzero_ps());
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_fmaddsub_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
{
- return (__m128d) __builtin_ia32_vfmaddsubpd128_mask ((__v2df) __A,
- (__v2df) __B,
- (__v2df) __C,
- (__mmask8) __U);
+ return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
+ __builtin_ia32_vfmaddsubpd ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __C),
+ (__v2df) __A);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask3_fmaddsub_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
{
- return (__m128d) __builtin_ia32_vfmaddsubpd128_mask3 ((__v2df) __A,
- (__v2df) __B,
- (__v2df) __C,
- (__mmask8)
- __U);
+ return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
+ __builtin_ia32_vfmaddsubpd ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __C),
+ (__v2df) __C);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_maskz_fmaddsub_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
{
- return (__m128d) __builtin_ia32_vfmaddsubpd128_maskz ((__v2df) __A,
- (__v2df) __B,
- (__v2df) __C,
- (__mmask8)
- __U);
+ return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
+ __builtin_ia32_vfmaddsubpd ((__v2df) __A,
+ (__v2df) __B,
+ (__v2df) __C),
+ (__v2df)_mm_setzero_pd());
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_fmsubadd_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
{
- return (__m128d) __builtin_ia32_vfmaddsubpd128_mask ((__v2df) __A,
- (__v2df) __B,
- -(__v2df) __C,
- (__mmask8) __U);
+ return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
+ __builtin_ia32_vfmaddsubpd ((__v2df) __A,
+ (__v2df) __B,
+ -(__v2df) __C),
+ (__v2df) __A);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_maskz_fmsubadd_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
{
- return (__m128d) __builtin_ia32_vfmaddsubpd128_maskz ((__v2df) __A,
- (__v2df) __B,
- -(__v2df) __C,
- (__mmask8)
- __U);
+ return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
+ __builtin_ia32_vfmaddsubpd ((__v2df) __A,
+ (__v2df) __B,
+ -(__v2df) __C),
+ (__v2df)_mm_setzero_pd());
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_mask_fmaddsub_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
{
- return (__m256d) __builtin_ia32_vfmaddsubpd256_mask ((__v4df) __A,
- (__v4df) __B,
- (__v4df) __C,
- (__mmask8) __U);
+ return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
+ __builtin_ia32_vfmaddsubpd256 ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __C),
+ (__v4df) __A);
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_mask3_fmaddsub_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
{
- return (__m256d) __builtin_ia32_vfmaddsubpd256_mask3 ((__v4df) __A,
- (__v4df) __B,
- (__v4df) __C,
- (__mmask8)
- __U);
+ return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
+ __builtin_ia32_vfmaddsubpd256 ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __C),
+ (__v4df) __C);
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_maskz_fmaddsub_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
{
- return (__m256d) __builtin_ia32_vfmaddsubpd256_maskz ((__v4df) __A,
- (__v4df) __B,
- (__v4df) __C,
- (__mmask8)
- __U);
+ return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
+ __builtin_ia32_vfmaddsubpd256 ((__v4df) __A,
+ (__v4df) __B,
+ (__v4df) __C),
+ (__v4df)_mm256_setzero_pd());
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_mask_fmsubadd_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
{
- return (__m256d) __builtin_ia32_vfmaddsubpd256_mask ((__v4df) __A,
- (__v4df) __B,
- -(__v4df) __C,
- (__mmask8) __U);
+ return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
+ __builtin_ia32_vfmaddsubpd256 ((__v4df) __A,
+ (__v4df) __B,
+ -(__v4df) __C),
+ (__v4df) __A);
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_maskz_fmsubadd_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
{
- return (__m256d) __builtin_ia32_vfmaddsubpd256_maskz ((__v4df) __A,
- (__v4df) __B,
- -(__v4df) __C,
- (__mmask8)
- __U);
+ return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
+ __builtin_ia32_vfmaddsubpd256 ((__v4df) __A,
+ (__v4df) __B,
+ -(__v4df) __C),
+ (__v4df)_mm256_setzero_pd());
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_fmaddsub_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
{
- return (__m128) __builtin_ia32_vfmaddsubps128_mask ((__v4sf) __A,
- (__v4sf) __B,
- (__v4sf) __C,
- (__mmask8) __U);
+ return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
+ __builtin_ia32_vfmaddsubps ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __C),
+ (__v4sf) __A);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask3_fmaddsub_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
{
- return (__m128) __builtin_ia32_vfmaddsubps128_mask3 ((__v4sf) __A,
- (__v4sf) __B,
- (__v4sf) __C,
- (__mmask8) __U);
+ return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
+ __builtin_ia32_vfmaddsubps ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __C),
+ (__v4sf) __C);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_maskz_fmaddsub_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
{
- return (__m128) __builtin_ia32_vfmaddsubps128_maskz ((__v4sf) __A,
- (__v4sf) __B,
- (__v4sf) __C,
- (__mmask8) __U);
+ return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
+ __builtin_ia32_vfmaddsubps ((__v4sf) __A,
+ (__v4sf) __B,
+ (__v4sf) __C),
+ (__v4sf)_mm_setzero_ps());
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_fmsubadd_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
{
- return (__m128) __builtin_ia32_vfmaddsubps128_mask ((__v4sf) __A,
- (__v4sf) __B,
- -(__v4sf) __C,
- (__mmask8) __U);
+ return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
+ __builtin_ia32_vfmaddsubps ((__v4sf) __A,
+ (__v4sf) __B,
+ -(__v4sf) __C),
+ (__v4sf) __A);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_maskz_fmsubadd_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
{
- return (__m128) __builtin_ia32_vfmaddsubps128_maskz ((__v4sf) __A,
- (__v4sf) __B,
- -(__v4sf) __C,
- (__mmask8) __U);
+ return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
+ __builtin_ia32_vfmaddsubps ((__v4sf) __A,
+ (__v4sf) __B,
+ -(__v4sf) __C),
+ (__v4sf)_mm_setzero_ps());
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_mask_fmaddsub_ps(__m256 __A, __mmask8 __U, __m256 __B,
__m256 __C)
{
- return (__m256) __builtin_ia32_vfmaddsubps256_mask ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf) __C,
- (__mmask8) __U);
+ return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
+ __builtin_ia32_vfmaddsubps256 ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __C),
+ (__v8sf) __A);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_mask3_fmaddsub_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
{
- return (__m256) __builtin_ia32_vfmaddsubps256_mask3 ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf) __C,
- (__mmask8) __U);
+ return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
+ __builtin_ia32_vfmaddsubps256 ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __C),
+ (__v8sf) __C);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_maskz_fmaddsub_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
{
- return (__m256) __builtin_ia32_vfmaddsubps256_maskz ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf) __C,
- (__mmask8) __U);
+ return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
+ __builtin_ia32_vfmaddsubps256 ((__v8sf) __A,
+ (__v8sf) __B,
+ (__v8sf) __C),
+ (__v8sf)_mm256_setzero_ps());
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_mask_fmsubadd_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C)
{
- return (__m256) __builtin_ia32_vfmaddsubps256_mask ((__v8sf) __A,
- (__v8sf) __B,
- -(__v8sf) __C,
- (__mmask8) __U);
+ return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
+ __builtin_ia32_vfmaddsubps256 ((__v8sf) __A,
+ (__v8sf) __B,
+ -(__v8sf) __C),
+ (__v8sf) __A);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_maskz_fmsubadd_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
{
- return (__m256) __builtin_ia32_vfmaddsubps256_maskz ((__v8sf) __A,
- (__v8sf) __B,
- -(__v8sf) __C,
- (__mmask8) __U);
+ return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
+ __builtin_ia32_vfmaddsubps256 ((__v8sf) __A,
+ (__v8sf) __B,
+ -(__v8sf) __C),
+ (__v8sf)_mm256_setzero_ps());
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask3_fmsub_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
{
- return (__m128d) __builtin_ia32_vfmsubpd128_mask3 ((__v2df) __A,
- (__v2df) __B,
- (__v2df) __C,
- (__mmask8) __U);
+ return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
+ __builtin_ia32_vfmaddpd ((__v2df) __A,
+ (__v2df) __B,
+ -(__v2df) __C),
+ (__v2df) __C);
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_mask3_fmsub_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
{
- return (__m256d) __builtin_ia32_vfmsubpd256_mask3 ((__v4df) __A,
- (__v4df) __B,
- (__v4df) __C,
- (__mmask8) __U);
+ return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
+ __builtin_ia32_vfmaddpd256 ((__v4df) __A,
+ (__v4df) __B,
+ -(__v4df) __C),
+ (__v4df) __C);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask3_fmsub_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
{
- return (__m128) __builtin_ia32_vfmsubps128_mask3 ((__v4sf) __A,
- (__v4sf) __B,
- (__v4sf) __C,
- (__mmask8) __U);
+ return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
+ __builtin_ia32_vfmaddps ((__v4sf) __A,
+ (__v4sf) __B,
+ -(__v4sf) __C),
+ (__v4sf) __C);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_mask3_fmsub_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
{
- return (__m256) __builtin_ia32_vfmsubps256_mask3 ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf) __C,
- (__mmask8) __U);
+ return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
+ __builtin_ia32_vfmaddps256 ((__v8sf) __A,
+ (__v8sf) __B,
+ -(__v8sf) __C),
+ (__v8sf) __C);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask3_fmsubadd_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
{
- return (__m128d) __builtin_ia32_vfmsubaddpd128_mask3 ((__v2df) __A,
- (__v2df) __B,
- (__v2df) __C,
- (__mmask8)
- __U);
+ return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
+ __builtin_ia32_vfmaddsubpd ((__v2df) __A,
+ (__v2df) __B,
+ -(__v2df) __C),
+ (__v2df) __C);
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_mask3_fmsubadd_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
{
- return (__m256d) __builtin_ia32_vfmsubaddpd256_mask3 ((__v4df) __A,
- (__v4df) __B,
- (__v4df) __C,
- (__mmask8)
- __U);
+ return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
+ __builtin_ia32_vfmaddsubpd256 ((__v4df) __A,
+ (__v4df) __B,
+ -(__v4df) __C),
+ (__v4df) __C);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask3_fmsubadd_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
{
- return (__m128) __builtin_ia32_vfmsubaddps128_mask3 ((__v4sf) __A,
- (__v4sf) __B,
- (__v4sf) __C,
- (__mmask8) __U);
+ return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
+ __builtin_ia32_vfmaddsubps ((__v4sf) __A,
+ (__v4sf) __B,
+ -(__v4sf) __C),
+ (__v4sf) __C);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_mask3_fmsubadd_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
{
- return (__m256) __builtin_ia32_vfmsubaddps256_mask3 ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf) __C,
- (__mmask8) __U);
+ return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
+ __builtin_ia32_vfmaddsubps256 ((__v8sf) __A,
+ (__v8sf) __B,
+ -(__v8sf) __C),
+ (__v8sf) __C);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_fnmadd_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
{
- return (__m128d) __builtin_ia32_vfnmaddpd128_mask ((__v2df) __A,
- (__v2df) __B,
- (__v2df) __C,
- (__mmask8) __U);
+ return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
+ __builtin_ia32_vfmaddpd ((__v2df) __A,
+ -(__v2df) __B,
+ (__v2df) __C),
+ (__v2df) __A);
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_mask_fnmadd_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
{
- return (__m256d) __builtin_ia32_vfnmaddpd256_mask ((__v4df) __A,
- (__v4df) __B,
- (__v4df) __C,
- (__mmask8) __U);
+ return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
+ __builtin_ia32_vfmaddpd256 ((__v4df) __A,
+ -(__v4df) __B,
+ (__v4df) __C),
+ (__v4df) __A);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_fnmadd_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
{
- return (__m128) __builtin_ia32_vfnmaddps128_mask ((__v4sf) __A,
- (__v4sf) __B,
- (__v4sf) __C,
- (__mmask8) __U);
+ return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
+ __builtin_ia32_vfmaddps ((__v4sf) __A,
+ -(__v4sf) __B,
+ (__v4sf) __C),
+ (__v4sf) __A);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_mask_fnmadd_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C)
{
- return (__m256) __builtin_ia32_vfnmaddps256_mask ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf) __C,
- (__mmask8) __U);
+ return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
+ __builtin_ia32_vfmaddps256 ((__v8sf) __A,
+ -(__v8sf) __B,
+ (__v8sf) __C),
+ (__v8sf) __A);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_fnmsub_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
{
- return (__m128d) __builtin_ia32_vfnmsubpd128_mask ((__v2df) __A,
- (__v2df) __B,
- (__v2df) __C,
- (__mmask8) __U);
+ return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
+ __builtin_ia32_vfmaddpd ((__v2df) __A,
+ -(__v2df) __B,
+ -(__v2df) __C),
+ (__v2df) __A);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask3_fnmsub_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
{
- return (__m128d) __builtin_ia32_vfnmsubpd128_mask3 ((__v2df) __A,
- (__v2df) __B,
- (__v2df) __C,
- (__mmask8) __U);
+ return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
+ __builtin_ia32_vfmaddpd ((__v2df) __A,
+ -(__v2df) __B,
+ -(__v2df) __C),
+ (__v2df) __C);
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_mask_fnmsub_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
{
- return (__m256d) __builtin_ia32_vfnmsubpd256_mask ((__v4df) __A,
- (__v4df) __B,
- (__v4df) __C,
- (__mmask8) __U);
+ return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
+ __builtin_ia32_vfmaddpd256 ((__v4df) __A,
+ -(__v4df) __B,
+ -(__v4df) __C),
+ (__v4df) __A);
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_mask3_fnmsub_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
{
- return (__m256d) __builtin_ia32_vfnmsubpd256_mask3 ((__v4df) __A,
- (__v4df) __B,
- (__v4df) __C,
- (__mmask8) __U);
+ return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
+ __builtin_ia32_vfmaddpd256 ((__v4df) __A,
+ -(__v4df) __B,
+ -(__v4df) __C),
+ (__v4df) __C);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_fnmsub_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
{
- return (__m128) __builtin_ia32_vfnmsubps128_mask ((__v4sf) __A,
- (__v4sf) __B,
- (__v4sf) __C,
- (__mmask8) __U);
+ return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
+ __builtin_ia32_vfmaddps ((__v4sf) __A,
+ -(__v4sf) __B,
+ -(__v4sf) __C),
+ (__v4sf) __A);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask3_fnmsub_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
{
- return (__m128) __builtin_ia32_vfnmsubps128_mask3 ((__v4sf) __A,
- (__v4sf) __B,
- (__v4sf) __C,
- (__mmask8) __U);
+ return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
+ __builtin_ia32_vfmaddps ((__v4sf) __A,
+ -(__v4sf) __B,
+ -(__v4sf) __C),
+ (__v4sf) __C);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_mask_fnmsub_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C)
{
- return (__m256) __builtin_ia32_vfnmsubps256_mask ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf) __C,
- (__mmask8) __U);
+ return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
+ __builtin_ia32_vfmaddps256 ((__v8sf) __A,
+ -(__v8sf) __B,
+ -(__v8sf) __C),
+ (__v8sf) __A);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_mask3_fnmsub_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
{
- return (__m256) __builtin_ia32_vfnmsubps256_mask3 ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf) __C,
- (__mmask8) __U);
+ return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
+ __builtin_ia32_vfmaddps256 ((__v8sf) __A,
+ -(__v8sf) __B,
+ -(__v8sf) __C),
+ (__v8sf) __C);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_add_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
(__v2df)_mm_add_pd(__A, __B),
(__v2df)__W);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_maskz_add_pd(__mmask8 __U, __m128d __A, __m128d __B) {
return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
(__v2df)_mm_add_pd(__A, __B),
(__v2df)_mm_setzero_pd());
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_mask_add_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
(__v4df)_mm256_add_pd(__A, __B),
(__v4df)__W);
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_maskz_add_pd(__mmask8 __U, __m256d __A, __m256d __B) {
return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
(__v4df)_mm256_add_pd(__A, __B),
(__v4df)_mm256_setzero_pd());
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_add_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
(__v4sf)_mm_add_ps(__A, __B),
(__v4sf)__W);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_maskz_add_ps(__mmask8 __U, __m128 __A, __m128 __B) {
return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
(__v4sf)_mm_add_ps(__A, __B),
(__v4sf)_mm_setzero_ps());
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_mask_add_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
(__v8sf)_mm256_add_ps(__A, __B),
(__v8sf)__W);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_maskz_add_ps(__mmask8 __U, __m256 __A, __m256 __B) {
return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
(__v8sf)_mm256_add_ps(__A, __B),
(__v8sf)_mm256_setzero_ps());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_blend_epi32 (__mmask8 __U, __m128i __A, __m128i __W) {
return (__m128i) __builtin_ia32_selectd_128 ((__mmask8) __U,
(__v4si) __W,
(__v4si) __A);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_blend_epi32 (__mmask8 __U, __m256i __A, __m256i __W) {
return (__m256i) __builtin_ia32_selectd_256 ((__mmask8) __U,
(__v8si) __W,
(__v8si) __A);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_blend_pd (__mmask8 __U, __m128d __A, __m128d __W) {
return (__m128d) __builtin_ia32_selectpd_128 ((__mmask8) __U,
(__v2df) __W,
(__v2df) __A);
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_mask_blend_pd (__mmask8 __U, __m256d __A, __m256d __W) {
return (__m256d) __builtin_ia32_selectpd_256 ((__mmask8) __U,
(__v4df) __W,
(__v4df) __A);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_blend_ps (__mmask8 __U, __m128 __A, __m128 __W) {
return (__m128) __builtin_ia32_selectps_128 ((__mmask8) __U,
(__v4sf) __W,
(__v4sf) __A);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_mask_blend_ps (__mmask8 __U, __m256 __A, __m256 __W) {
return (__m256) __builtin_ia32_selectps_256 ((__mmask8) __U,
(__v8sf) __W,
(__v8sf) __A);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_blend_epi64 (__mmask8 __U, __m128i __A, __m128i __W) {
return (__m128i) __builtin_ia32_selectq_128 ((__mmask8) __U,
(__v2di) __W,
(__v2di) __A);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_blend_epi64 (__mmask8 __U, __m256i __A, __m256i __W) {
return (__m256i) __builtin_ia32_selectq_256 ((__mmask8) __U,
(__v4di) __W,
(__v4di) __A);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_compress_pd (__m128d __W, __mmask8 __U, __m128d __A) {
return (__m128d) __builtin_ia32_compressdf128_mask ((__v2df) __A,
(__v2df) __W,
(__mmask8) __U);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_maskz_compress_pd (__mmask8 __U, __m128d __A) {
return (__m128d) __builtin_ia32_compressdf128_mask ((__v2df) __A,
(__v2df)
@@ -1594,14 +1657,14 @@ _mm_maskz_compress_pd (__mmask8 __U, __m128d __A) {
(__mmask8) __U);
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_mask_compress_pd (__m256d __W, __mmask8 __U, __m256d __A) {
return (__m256d) __builtin_ia32_compressdf256_mask ((__v4df) __A,
(__v4df) __W,
(__mmask8) __U);
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_maskz_compress_pd (__mmask8 __U, __m256d __A) {
return (__m256d) __builtin_ia32_compressdf256_mask ((__v4df) __A,
(__v4df)
@@ -1609,14 +1672,14 @@ _mm256_maskz_compress_pd (__mmask8 __U, __m256d __A) {
(__mmask8) __U);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_compress_epi64 (__m128i __W, __mmask8 __U, __m128i __A) {
return (__m128i) __builtin_ia32_compressdi128_mask ((__v2di) __A,
(__v2di) __W,
(__mmask8) __U);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_compress_epi64 (__mmask8 __U, __m128i __A) {
return (__m128i) __builtin_ia32_compressdi128_mask ((__v2di) __A,
(__v2di)
@@ -1624,14 +1687,14 @@ _mm_maskz_compress_epi64 (__mmask8 __U, __m128i __A) {
(__mmask8) __U);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_compress_epi64 (__m256i __W, __mmask8 __U, __m256i __A) {
return (__m256i) __builtin_ia32_compressdi256_mask ((__v4di) __A,
(__v4di) __W,
(__mmask8) __U);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_compress_epi64 (__mmask8 __U, __m256i __A) {
return (__m256i) __builtin_ia32_compressdi256_mask ((__v4di) __A,
(__v4di)
@@ -1639,14 +1702,14 @@ _mm256_maskz_compress_epi64 (__mmask8 __U, __m256i __A) {
(__mmask8) __U);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_compress_ps (__m128 __W, __mmask8 __U, __m128 __A) {
return (__m128) __builtin_ia32_compresssf128_mask ((__v4sf) __A,
(__v4sf) __W,
(__mmask8) __U);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_maskz_compress_ps (__mmask8 __U, __m128 __A) {
return (__m128) __builtin_ia32_compresssf128_mask ((__v4sf) __A,
(__v4sf)
@@ -1654,14 +1717,14 @@ _mm_maskz_compress_ps (__mmask8 __U, __m128 __A) {
(__mmask8) __U);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_mask_compress_ps (__m256 __W, __mmask8 __U, __m256 __A) {
return (__m256) __builtin_ia32_compresssf256_mask ((__v8sf) __A,
(__v8sf) __W,
(__mmask8) __U);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_maskz_compress_ps (__mmask8 __U, __m256 __A) {
return (__m256) __builtin_ia32_compresssf256_mask ((__v8sf) __A,
(__v8sf)
@@ -1669,14 +1732,14 @@ _mm256_maskz_compress_ps (__mmask8 __U, __m256 __A) {
(__mmask8) __U);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_compress_epi32 (__m128i __W, __mmask8 __U, __m128i __A) {
return (__m128i) __builtin_ia32_compresssi128_mask ((__v4si) __A,
(__v4si) __W,
(__mmask8) __U);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_compress_epi32 (__mmask8 __U, __m128i __A) {
return (__m128i) __builtin_ia32_compresssi128_mask ((__v4si) __A,
(__v4si)
@@ -1684,14 +1747,14 @@ _mm_maskz_compress_epi32 (__mmask8 __U, __m128i __A) {
(__mmask8) __U);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_compress_epi32 (__m256i __W, __mmask8 __U, __m256i __A) {
return (__m256i) __builtin_ia32_compresssi256_mask ((__v8si) __A,
(__v8si) __W,
(__mmask8) __U);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_compress_epi32 (__mmask8 __U, __m256i __A) {
return (__m256i) __builtin_ia32_compresssi256_mask ((__v8si) __A,
(__v8si)
@@ -1699,128 +1762,126 @@ _mm256_maskz_compress_epi32 (__mmask8 __U, __m256i __A) {
(__mmask8) __U);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS128
_mm_mask_compressstoreu_pd (void *__P, __mmask8 __U, __m128d __A) {
__builtin_ia32_compressstoredf128_mask ((__v2df *) __P,
(__v2df) __A,
(__mmask8) __U);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS256
_mm256_mask_compressstoreu_pd (void *__P, __mmask8 __U, __m256d __A) {
__builtin_ia32_compressstoredf256_mask ((__v4df *) __P,
(__v4df) __A,
(__mmask8) __U);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS128
_mm_mask_compressstoreu_epi64 (void *__P, __mmask8 __U, __m128i __A) {
__builtin_ia32_compressstoredi128_mask ((__v2di *) __P,
(__v2di) __A,
(__mmask8) __U);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS256
_mm256_mask_compressstoreu_epi64 (void *__P, __mmask8 __U, __m256i __A) {
__builtin_ia32_compressstoredi256_mask ((__v4di *) __P,
(__v4di) __A,
(__mmask8) __U);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS128
_mm_mask_compressstoreu_ps (void *__P, __mmask8 __U, __m128 __A) {
__builtin_ia32_compressstoresf128_mask ((__v4sf *) __P,
(__v4sf) __A,
(__mmask8) __U);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS256
_mm256_mask_compressstoreu_ps (void *__P, __mmask8 __U, __m256 __A) {
__builtin_ia32_compressstoresf256_mask ((__v8sf *) __P,
(__v8sf) __A,
(__mmask8) __U);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS128
_mm_mask_compressstoreu_epi32 (void *__P, __mmask8 __U, __m128i __A) {
__builtin_ia32_compressstoresi128_mask ((__v4si *) __P,
(__v4si) __A,
(__mmask8) __U);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS256
_mm256_mask_compressstoreu_epi32 (void *__P, __mmask8 __U, __m256i __A) {
__builtin_ia32_compressstoresi256_mask ((__v8si *) __P,
(__v8si) __A,
(__mmask8) __U);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_cvtepi32_pd (__m128d __W, __mmask8 __U, __m128i __A) {
return (__m128d)__builtin_ia32_selectpd_128((__mmask8) __U,
(__v2df)_mm_cvtepi32_pd(__A),
(__v2df)__W);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_maskz_cvtepi32_pd (__mmask8 __U, __m128i __A) {
return (__m128d)__builtin_ia32_selectpd_128((__mmask8) __U,
(__v2df)_mm_cvtepi32_pd(__A),
(__v2df)_mm_setzero_pd());
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_mask_cvtepi32_pd (__m256d __W, __mmask8 __U, __m128i __A) {
return (__m256d)__builtin_ia32_selectpd_256((__mmask8) __U,
(__v4df)_mm256_cvtepi32_pd(__A),
(__v4df)__W);
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_maskz_cvtepi32_pd (__mmask8 __U, __m128i __A) {
return (__m256d)__builtin_ia32_selectpd_256((__mmask8) __U,
(__v4df)_mm256_cvtepi32_pd(__A),
(__v4df)_mm256_setzero_pd());
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_cvtepi32_ps (__m128 __W, __mmask8 __U, __m128i __A) {
- return (__m128) __builtin_ia32_cvtdq2ps128_mask ((__v4si) __A,
- (__v4sf) __W,
- (__mmask8) __U);
+ return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
+ (__v4sf)_mm_cvtepi32_ps(__A),
+ (__v4sf)__W);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_maskz_cvtepi32_ps (__mmask16 __U, __m128i __A) {
- return (__m128) __builtin_ia32_cvtdq2ps128_mask ((__v4si) __A,
- (__v4sf)
- _mm_setzero_ps (),
- (__mmask8) __U);
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
+_mm_maskz_cvtepi32_ps (__mmask8 __U, __m128i __A) {
+ return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
+ (__v4sf)_mm_cvtepi32_ps(__A),
+ (__v4sf)_mm_setzero_ps());
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_mask_cvtepi32_ps (__m256 __W, __mmask8 __U, __m256i __A) {
- return (__m256) __builtin_ia32_cvtdq2ps256_mask ((__v8si) __A,
- (__v8sf) __W,
- (__mmask8) __U);
+ return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
+ (__v8sf)_mm256_cvtepi32_ps(__A),
+ (__v8sf)__W);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
-_mm256_maskz_cvtepi32_ps (__mmask16 __U, __m256i __A) {
- return (__m256) __builtin_ia32_cvtdq2ps256_mask ((__v8si) __A,
- (__v8sf)
- _mm256_setzero_ps (),
- (__mmask8) __U);
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
+_mm256_maskz_cvtepi32_ps (__mmask8 __U, __m256i __A) {
+ return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
+ (__v8sf)_mm256_cvtepi32_ps(__A),
+ (__v8sf)_mm256_setzero_ps());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_cvtpd_epi32 (__m128i __W, __mmask8 __U, __m128d __A) {
return (__m128i) __builtin_ia32_cvtpd2dq128_mask ((__v2df) __A,
(__v4si) __W,
(__mmask8) __U);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_cvtpd_epi32 (__mmask8 __U, __m128d __A) {
return (__m128i) __builtin_ia32_cvtpd2dq128_mask ((__v2df) __A,
(__v4si)
@@ -1828,29 +1889,28 @@ _mm_maskz_cvtpd_epi32 (__mmask8 __U, __m128d __A) {
(__mmask8) __U);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS256
_mm256_mask_cvtpd_epi32 (__m128i __W, __mmask8 __U, __m256d __A) {
- return (__m128i) __builtin_ia32_cvtpd2dq256_mask ((__v4df) __A,
- (__v4si) __W,
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
+ (__v4si)_mm256_cvtpd_epi32(__A),
+ (__v4si)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS256
_mm256_maskz_cvtpd_epi32 (__mmask8 __U, __m256d __A) {
- return (__m128i) __builtin_ia32_cvtpd2dq256_mask ((__v4df) __A,
- (__v4si)
- _mm_setzero_si128 (),
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
+ (__v4si)_mm256_cvtpd_epi32(__A),
+ (__v4si)_mm_setzero_si128());
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_cvtpd_ps (__m128 __W, __mmask8 __U, __m128d __A) {
return (__m128) __builtin_ia32_cvtpd2ps_mask ((__v2df) __A,
(__v4sf) __W,
(__mmask8) __U);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_maskz_cvtpd_ps (__mmask8 __U, __m128d __A) {
return (__m128) __builtin_ia32_cvtpd2ps_mask ((__v2df) __A,
(__v4sf)
@@ -1858,22 +1918,21 @@ _mm_maskz_cvtpd_ps (__mmask8 __U, __m128d __A) {
(__mmask8) __U);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS256
_mm256_mask_cvtpd_ps (__m128 __W, __mmask8 __U, __m256d __A) {
- return (__m128) __builtin_ia32_cvtpd2ps256_mask ((__v4df) __A,
- (__v4sf) __W,
- (__mmask8) __U);
+ return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
+ (__v4sf)_mm256_cvtpd_ps(__A),
+ (__v4sf)__W);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS256
_mm256_maskz_cvtpd_ps (__mmask8 __U, __m256d __A) {
- return (__m128) __builtin_ia32_cvtpd2ps256_mask ((__v4df) __A,
- (__v4sf)
- _mm_setzero_ps (),
- (__mmask8) __U);
+ return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
+ (__v4sf)_mm256_cvtpd_ps(__A),
+ (__v4sf)_mm_setzero_ps());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_cvtpd_epu32 (__m128d __A) {
return (__m128i) __builtin_ia32_cvtpd2udq128_mask ((__v2df) __A,
(__v4si)
@@ -1881,14 +1940,14 @@ _mm_cvtpd_epu32 (__m128d __A) {
(__mmask8) -1);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_cvtpd_epu32 (__m128i __W, __mmask8 __U, __m128d __A) {
return (__m128i) __builtin_ia32_cvtpd2udq128_mask ((__v2df) __A,
(__v4si) __W,
(__mmask8) __U);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_cvtpd_epu32 (__mmask8 __U, __m128d __A) {
return (__m128i) __builtin_ia32_cvtpd2udq128_mask ((__v2df) __A,
(__v4si)
@@ -1896,7 +1955,7 @@ _mm_maskz_cvtpd_epu32 (__mmask8 __U, __m128d __A) {
(__mmask8) __U);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS256
_mm256_cvtpd_epu32 (__m256d __A) {
return (__m128i) __builtin_ia32_cvtpd2udq256_mask ((__v4df) __A,
(__v4si)
@@ -1904,14 +1963,14 @@ _mm256_cvtpd_epu32 (__m256d __A) {
(__mmask8) -1);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS256
_mm256_mask_cvtpd_epu32 (__m128i __W, __mmask8 __U, __m256d __A) {
return (__m128i) __builtin_ia32_cvtpd2udq256_mask ((__v4df) __A,
(__v4si) __W,
(__mmask8) __U);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS256
_mm256_maskz_cvtpd_epu32 (__mmask8 __U, __m256d __A) {
return (__m128i) __builtin_ia32_cvtpd2udq256_mask ((__v4df) __A,
(__v4si)
@@ -1919,67 +1978,63 @@ _mm256_maskz_cvtpd_epu32 (__mmask8 __U, __m256d __A) {
(__mmask8) __U);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_cvtps_epi32 (__m128i __W, __mmask8 __U, __m128 __A) {
- return (__m128i) __builtin_ia32_cvtps2dq128_mask ((__v4sf) __A,
- (__v4si) __W,
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
+ (__v4si)_mm_cvtps_epi32(__A),
+ (__v4si)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_cvtps_epi32 (__mmask8 __U, __m128 __A) {
- return (__m128i) __builtin_ia32_cvtps2dq128_mask ((__v4sf) __A,
- (__v4si)
- _mm_setzero_si128 (),
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
+ (__v4si)_mm_cvtps_epi32(__A),
+ (__v4si)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_cvtps_epi32 (__m256i __W, __mmask8 __U, __m256 __A) {
- return (__m256i) __builtin_ia32_cvtps2dq256_mask ((__v8sf) __A,
- (__v8si) __W,
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
+ (__v8si)_mm256_cvtps_epi32(__A),
+ (__v8si)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_cvtps_epi32 (__mmask8 __U, __m256 __A) {
- return (__m256i) __builtin_ia32_cvtps2dq256_mask ((__v8sf) __A,
- (__v8si)
- _mm256_setzero_si256 (),
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
+ (__v8si)_mm256_cvtps_epi32(__A),
+ (__v8si)_mm256_setzero_si256());
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_cvtps_pd (__m128d __W, __mmask8 __U, __m128 __A) {
- return (__m128d) __builtin_ia32_cvtps2pd128_mask ((__v4sf) __A,
- (__v2df) __W,
- (__mmask8) __U);
+ return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
+ (__v2df)_mm_cvtps_pd(__A),
+ (__v2df)__W);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_maskz_cvtps_pd (__mmask8 __U, __m128 __A) {
- return (__m128d) __builtin_ia32_cvtps2pd128_mask ((__v4sf) __A,
- (__v2df)
- _mm_setzero_pd (),
- (__mmask8) __U);
+ return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
+ (__v2df)_mm_cvtps_pd(__A),
+ (__v2df)_mm_setzero_pd());
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_mask_cvtps_pd (__m256d __W, __mmask8 __U, __m128 __A) {
- return (__m256d) __builtin_ia32_cvtps2pd256_mask ((__v4sf) __A,
- (__v4df) __W,
- (__mmask8) __U);
+ return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
+ (__v4df)_mm256_cvtps_pd(__A),
+ (__v4df)__W);
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_maskz_cvtps_pd (__mmask8 __U, __m128 __A) {
- return (__m256d) __builtin_ia32_cvtps2pd256_mask ((__v4sf) __A,
- (__v4df)
- _mm256_setzero_pd (),
- (__mmask8) __U);
+ return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
+ (__v4df)_mm256_cvtps_pd(__A),
+ (__v4df)_mm256_setzero_pd());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_cvtps_epu32 (__m128 __A) {
return (__m128i) __builtin_ia32_cvtps2udq128_mask ((__v4sf) __A,
(__v4si)
@@ -1987,14 +2042,14 @@ _mm_cvtps_epu32 (__m128 __A) {
(__mmask8) -1);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_cvtps_epu32 (__m128i __W, __mmask8 __U, __m128 __A) {
return (__m128i) __builtin_ia32_cvtps2udq128_mask ((__v4sf) __A,
(__v4si) __W,
(__mmask8) __U);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_cvtps_epu32 (__mmask8 __U, __m128 __A) {
return (__m128i) __builtin_ia32_cvtps2udq128_mask ((__v4sf) __A,
(__v4si)
@@ -2002,7 +2057,7 @@ _mm_maskz_cvtps_epu32 (__mmask8 __U, __m128 __A) {
(__mmask8) __U);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cvtps_epu32 (__m256 __A) {
return (__m256i) __builtin_ia32_cvtps2udq256_mask ((__v8sf) __A,
(__v8si)
@@ -2010,14 +2065,14 @@ _mm256_cvtps_epu32 (__m256 __A) {
(__mmask8) -1);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_cvtps_epu32 (__m256i __W, __mmask8 __U, __m256 __A) {
return (__m256i) __builtin_ia32_cvtps2udq256_mask ((__v8sf) __A,
(__v8si) __W,
(__mmask8) __U);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_cvtps_epu32 (__mmask8 __U, __m256 __A) {
return (__m256i) __builtin_ia32_cvtps2udq256_mask ((__v8sf) __A,
(__v8si)
@@ -2025,14 +2080,14 @@ _mm256_maskz_cvtps_epu32 (__mmask8 __U, __m256 __A) {
(__mmask8) __U);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_cvttpd_epi32 (__m128i __W, __mmask8 __U, __m128d __A) {
return (__m128i) __builtin_ia32_cvttpd2dq128_mask ((__v2df) __A,
(__v4si) __W,
(__mmask8) __U);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_cvttpd_epi32 (__mmask8 __U, __m128d __A) {
return (__m128i) __builtin_ia32_cvttpd2dq128_mask ((__v2df) __A,
(__v4si)
@@ -2040,22 +2095,21 @@ _mm_maskz_cvttpd_epi32 (__mmask8 __U, __m128d __A) {
(__mmask8) __U);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS256
_mm256_mask_cvttpd_epi32 (__m128i __W, __mmask8 __U, __m256d __A) {
- return (__m128i) __builtin_ia32_cvttpd2dq256_mask ((__v4df) __A,
- (__v4si) __W,
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
+ (__v4si)_mm256_cvttpd_epi32(__A),
+ (__v4si)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS256
_mm256_maskz_cvttpd_epi32 (__mmask8 __U, __m256d __A) {
- return (__m128i) __builtin_ia32_cvttpd2dq256_mask ((__v4df) __A,
- (__v4si)
- _mm_setzero_si128 (),
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
+ (__v4si)_mm256_cvttpd_epi32(__A),
+ (__v4si)_mm_setzero_si128());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_cvttpd_epu32 (__m128d __A) {
return (__m128i) __builtin_ia32_cvttpd2udq128_mask ((__v2df) __A,
(__v4si)
@@ -2063,14 +2117,14 @@ _mm_cvttpd_epu32 (__m128d __A) {
(__mmask8) -1);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_cvttpd_epu32 (__m128i __W, __mmask8 __U, __m128d __A) {
return (__m128i) __builtin_ia32_cvttpd2udq128_mask ((__v2df) __A,
(__v4si) __W,
(__mmask8) __U);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_cvttpd_epu32 (__mmask8 __U, __m128d __A) {
return (__m128i) __builtin_ia32_cvttpd2udq128_mask ((__v2df) __A,
(__v4si)
@@ -2078,7 +2132,7 @@ _mm_maskz_cvttpd_epu32 (__mmask8 __U, __m128d __A) {
(__mmask8) __U);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS256
_mm256_cvttpd_epu32 (__m256d __A) {
return (__m128i) __builtin_ia32_cvttpd2udq256_mask ((__v4df) __A,
(__v4si)
@@ -2086,14 +2140,14 @@ _mm256_cvttpd_epu32 (__m256d __A) {
(__mmask8) -1);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS256
_mm256_mask_cvttpd_epu32 (__m128i __W, __mmask8 __U, __m256d __A) {
return (__m128i) __builtin_ia32_cvttpd2udq256_mask ((__v4df) __A,
(__v4si) __W,
(__mmask8) __U);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS256
_mm256_maskz_cvttpd_epu32 (__mmask8 __U, __m256d __A) {
return (__m128i) __builtin_ia32_cvttpd2udq256_mask ((__v4df) __A,
(__v4si)
@@ -2101,37 +2155,35 @@ _mm256_maskz_cvttpd_epu32 (__mmask8 __U, __m256d __A) {
(__mmask8) __U);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_cvttps_epi32 (__m128i __W, __mmask8 __U, __m128 __A) {
- return (__m128i) __builtin_ia32_cvttps2dq128_mask ((__v4sf) __A,
- (__v4si) __W,
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
+ (__v4si)_mm_cvttps_epi32(__A),
+ (__v4si)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_cvttps_epi32 (__mmask8 __U, __m128 __A) {
- return (__m128i) __builtin_ia32_cvttps2dq128_mask ((__v4sf) __A,
- (__v4si)
- _mm_setzero_si128 (),
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
+ (__v4si)_mm_cvttps_epi32(__A),
+ (__v4si)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_cvttps_epi32 (__m256i __W, __mmask8 __U, __m256 __A) {
- return (__m256i) __builtin_ia32_cvttps2dq256_mask ((__v8sf) __A,
- (__v8si) __W,
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
+ (__v8si)_mm256_cvttps_epi32(__A),
+ (__v8si)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_cvttps_epi32 (__mmask8 __U, __m256 __A) {
- return (__m256i) __builtin_ia32_cvttps2dq256_mask ((__v8sf) __A,
- (__v8si)
- _mm256_setzero_si256 (),
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
+ (__v8si)_mm256_cvttps_epi32(__A),
+ (__v8si)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_cvttps_epu32 (__m128 __A) {
return (__m128i) __builtin_ia32_cvttps2udq128_mask ((__v4sf) __A,
(__v4si)
@@ -2139,14 +2191,14 @@ _mm_cvttps_epu32 (__m128 __A) {
(__mmask8) -1);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_cvttps_epu32 (__m128i __W, __mmask8 __U, __m128 __A) {
return (__m128i) __builtin_ia32_cvttps2udq128_mask ((__v4sf) __A,
(__v4si) __W,
(__mmask8) __U);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_cvttps_epu32 (__mmask8 __U, __m128 __A) {
return (__m128i) __builtin_ia32_cvttps2udq128_mask ((__v4sf) __A,
(__v4si)
@@ -2154,7 +2206,7 @@ _mm_maskz_cvttps_epu32 (__mmask8 __U, __m128 __A) {
(__mmask8) __U);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cvttps_epu32 (__m256 __A) {
return (__m256i) __builtin_ia32_cvttps2udq256_mask ((__v8sf) __A,
(__v8si)
@@ -2162,14 +2214,14 @@ _mm256_cvttps_epu32 (__m256 __A) {
(__mmask8) -1);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_cvttps_epu32 (__m256i __W, __mmask8 __U, __m256 __A) {
return (__m256i) __builtin_ia32_cvttps2udq256_mask ((__v8sf) __A,
(__v8si) __W,
(__mmask8) __U);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_cvttps_epu32 (__mmask8 __U, __m256 __A) {
return (__m256i) __builtin_ia32_cvttps2udq256_mask ((__v8sf) __A,
(__v8si)
@@ -2177,155 +2229,147 @@ _mm256_maskz_cvttps_epu32 (__mmask8 __U, __m256 __A) {
(__mmask8) __U);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_cvtepu32_pd (__m128i __A) {
return (__m128d) __builtin_convertvector(
__builtin_shufflevector((__v4su)__A, (__v4su)__A, 0, 1), __v2df);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_cvtepu32_pd (__m128d __W, __mmask8 __U, __m128i __A) {
return (__m128d)__builtin_ia32_selectpd_128((__mmask8) __U,
(__v2df)_mm_cvtepu32_pd(__A),
(__v2df)__W);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_maskz_cvtepu32_pd (__mmask8 __U, __m128i __A) {
return (__m128d)__builtin_ia32_selectpd_128((__mmask8) __U,
(__v2df)_mm_cvtepu32_pd(__A),
(__v2df)_mm_setzero_pd());
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_cvtepu32_pd (__m128i __A) {
return (__m256d)__builtin_convertvector((__v4su)__A, __v4df);
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_mask_cvtepu32_pd (__m256d __W, __mmask8 __U, __m128i __A) {
return (__m256d)__builtin_ia32_selectpd_256((__mmask8) __U,
(__v4df)_mm256_cvtepu32_pd(__A),
(__v4df)__W);
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_maskz_cvtepu32_pd (__mmask8 __U, __m128i __A) {
return (__m256d)__builtin_ia32_selectpd_256((__mmask8) __U,
(__v4df)_mm256_cvtepu32_pd(__A),
(__v4df)_mm256_setzero_pd());
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_cvtepu32_ps (__m128i __A) {
- return (__m128) __builtin_ia32_cvtudq2ps128_mask ((__v4si) __A,
- (__v4sf)
- _mm_setzero_ps (),
- (__mmask8) -1);
+ return (__m128)__builtin_convertvector((__v4su)__A, __v4sf);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_cvtepu32_ps (__m128 __W, __mmask8 __U, __m128i __A) {
- return (__m128) __builtin_ia32_cvtudq2ps128_mask ((__v4si) __A,
- (__v4sf) __W,
- (__mmask8) __U);
+ return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
+ (__v4sf)_mm_cvtepu32_ps(__A),
+ (__v4sf)__W);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_maskz_cvtepu32_ps (__mmask8 __U, __m128i __A) {
- return (__m128) __builtin_ia32_cvtudq2ps128_mask ((__v4si) __A,
- (__v4sf)
- _mm_setzero_ps (),
- (__mmask8) __U);
+ return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
+ (__v4sf)_mm_cvtepu32_ps(__A),
+ (__v4sf)_mm_setzero_ps());
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_cvtepu32_ps (__m256i __A) {
- return (__m256) __builtin_ia32_cvtudq2ps256_mask ((__v8si) __A,
- (__v8sf)
- _mm256_setzero_ps (),
- (__mmask8) -1);
+ return (__m256)__builtin_convertvector((__v8su)__A, __v8sf);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_mask_cvtepu32_ps (__m256 __W, __mmask8 __U, __m256i __A) {
- return (__m256) __builtin_ia32_cvtudq2ps256_mask ((__v8si) __A,
- (__v8sf) __W,
- (__mmask8) __U);
+ return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
+ (__v8sf)_mm256_cvtepu32_ps(__A),
+ (__v8sf)__W);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_maskz_cvtepu32_ps (__mmask8 __U, __m256i __A) {
- return (__m256) __builtin_ia32_cvtudq2ps256_mask ((__v8si) __A,
- (__v8sf)
- _mm256_setzero_ps (),
- (__mmask8) __U);
+ return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
+ (__v8sf)_mm256_cvtepu32_ps(__A),
+ (__v8sf)_mm256_setzero_ps());
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_div_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
(__v2df)_mm_div_pd(__A, __B),
(__v2df)__W);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_maskz_div_pd(__mmask8 __U, __m128d __A, __m128d __B) {
return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
(__v2df)_mm_div_pd(__A, __B),
(__v2df)_mm_setzero_pd());
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_mask_div_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
(__v4df)_mm256_div_pd(__A, __B),
(__v4df)__W);
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_maskz_div_pd(__mmask8 __U, __m256d __A, __m256d __B) {
return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
(__v4df)_mm256_div_pd(__A, __B),
(__v4df)_mm256_setzero_pd());
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_div_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
(__v4sf)_mm_div_ps(__A, __B),
(__v4sf)__W);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_maskz_div_ps(__mmask8 __U, __m128 __A, __m128 __B) {
return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
(__v4sf)_mm_div_ps(__A, __B),
(__v4sf)_mm_setzero_ps());
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_mask_div_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
(__v8sf)_mm256_div_ps(__A, __B),
(__v8sf)__W);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_maskz_div_ps(__mmask8 __U, __m256 __A, __m256 __B) {
return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
(__v8sf)_mm256_div_ps(__A, __B),
(__v8sf)_mm256_setzero_ps());
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_expand_pd (__m128d __W, __mmask8 __U, __m128d __A) {
return (__m128d) __builtin_ia32_expanddf128_mask ((__v2df) __A,
(__v2df) __W,
(__mmask8) __U);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_maskz_expand_pd (__mmask8 __U, __m128d __A) {
return (__m128d) __builtin_ia32_expanddf128_mask ((__v2df) __A,
(__v2df)
@@ -2333,14 +2377,14 @@ _mm_maskz_expand_pd (__mmask8 __U, __m128d __A) {
(__mmask8) __U);
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_mask_expand_pd (__m256d __W, __mmask8 __U, __m256d __A) {
return (__m256d) __builtin_ia32_expanddf256_mask ((__v4df) __A,
(__v4df) __W,
(__mmask8) __U);
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_maskz_expand_pd (__mmask8 __U, __m256d __A) {
return (__m256d) __builtin_ia32_expanddf256_mask ((__v4df) __A,
(__v4df)
@@ -2348,14 +2392,14 @@ _mm256_maskz_expand_pd (__mmask8 __U, __m256d __A) {
(__mmask8) __U);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_expand_epi64 (__m128i __W, __mmask8 __U, __m128i __A) {
return (__m128i) __builtin_ia32_expanddi128_mask ((__v2di) __A,
(__v2di) __W,
(__mmask8) __U);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_expand_epi64 (__mmask8 __U, __m128i __A) {
return (__m128i) __builtin_ia32_expanddi128_mask ((__v2di) __A,
(__v2di)
@@ -2363,14 +2407,14 @@ _mm_maskz_expand_epi64 (__mmask8 __U, __m128i __A) {
(__mmask8) __U);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_expand_epi64 (__m256i __W, __mmask8 __U, __m256i __A) {
return (__m256i) __builtin_ia32_expanddi256_mask ((__v4di) __A,
(__v4di) __W,
(__mmask8) __U);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_expand_epi64 (__mmask8 __U, __m256i __A) {
return (__m256i) __builtin_ia32_expanddi256_mask ((__v4di) __A,
(__v4di)
@@ -2378,7 +2422,7 @@ _mm256_maskz_expand_epi64 (__mmask8 __U, __m256i __A) {
(__mmask8) __U);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_expandloadu_pd (__m128d __W, __mmask8 __U, void const *__P) {
return (__m128d) __builtin_ia32_expandloaddf128_mask ((__v2df *) __P,
(__v2df) __W,
@@ -2386,7 +2430,7 @@ _mm_mask_expandloadu_pd (__m128d __W, __mmask8 __U, void const *__P) {
__U);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_maskz_expandloadu_pd (__mmask8 __U, void const *__P) {
return (__m128d) __builtin_ia32_expandloaddf128_mask ((__v2df *) __P,
(__v2df)
@@ -2395,7 +2439,7 @@ _mm_maskz_expandloadu_pd (__mmask8 __U, void const *__P) {
__U);
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_mask_expandloadu_pd (__m256d __W, __mmask8 __U, void const *__P) {
return (__m256d) __builtin_ia32_expandloaddf256_mask ((__v4df *) __P,
(__v4df) __W,
@@ -2403,7 +2447,7 @@ _mm256_mask_expandloadu_pd (__m256d __W, __mmask8 __U, void const *__P) {
__U);
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_maskz_expandloadu_pd (__mmask8 __U, void const *__P) {
return (__m256d) __builtin_ia32_expandloaddf256_mask ((__v4df *) __P,
(__v4df)
@@ -2412,7 +2456,7 @@ _mm256_maskz_expandloadu_pd (__mmask8 __U, void const *__P) {
__U);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_expandloadu_epi64 (__m128i __W, __mmask8 __U, void const *__P) {
return (__m128i) __builtin_ia32_expandloaddi128_mask ((__v2di *) __P,
(__v2di) __W,
@@ -2420,7 +2464,7 @@ _mm_mask_expandloadu_epi64 (__m128i __W, __mmask8 __U, void const *__P) {
__U);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_expandloadu_epi64 (__mmask8 __U, void const *__P) {
return (__m128i) __builtin_ia32_expandloaddi128_mask ((__v2di *) __P,
(__v2di)
@@ -2429,7 +2473,7 @@ _mm_maskz_expandloadu_epi64 (__mmask8 __U, void const *__P) {
__U);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_expandloadu_epi64 (__m256i __W, __mmask8 __U,
void const *__P) {
return (__m256i) __builtin_ia32_expandloaddi256_mask ((__v4di *) __P,
@@ -2438,7 +2482,7 @@ _mm256_mask_expandloadu_epi64 (__m256i __W, __mmask8 __U,
__U);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_expandloadu_epi64 (__mmask8 __U, void const *__P) {
return (__m256i) __builtin_ia32_expandloaddi256_mask ((__v4di *) __P,
(__v4di)
@@ -2447,14 +2491,14 @@ _mm256_maskz_expandloadu_epi64 (__mmask8 __U, void const *__P) {
__U);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_expandloadu_ps (__m128 __W, __mmask8 __U, void const *__P) {
return (__m128) __builtin_ia32_expandloadsf128_mask ((__v4sf *) __P,
(__v4sf) __W,
(__mmask8) __U);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_maskz_expandloadu_ps (__mmask8 __U, void const *__P) {
return (__m128) __builtin_ia32_expandloadsf128_mask ((__v4sf *) __P,
(__v4sf)
@@ -2463,14 +2507,14 @@ _mm_maskz_expandloadu_ps (__mmask8 __U, void const *__P) {
__U);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_mask_expandloadu_ps (__m256 __W, __mmask8 __U, void const *__P) {
return (__m256) __builtin_ia32_expandloadsf256_mask ((__v8sf *) __P,
(__v8sf) __W,
(__mmask8) __U);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_maskz_expandloadu_ps (__mmask8 __U, void const *__P) {
return (__m256) __builtin_ia32_expandloadsf256_mask ((__v8sf *) __P,
(__v8sf)
@@ -2479,7 +2523,7 @@ _mm256_maskz_expandloadu_ps (__mmask8 __U, void const *__P) {
__U);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_expandloadu_epi32 (__m128i __W, __mmask8 __U, void const *__P) {
return (__m128i) __builtin_ia32_expandloadsi128_mask ((__v4si *) __P,
(__v4si) __W,
@@ -2487,7 +2531,7 @@ _mm_mask_expandloadu_epi32 (__m128i __W, __mmask8 __U, void const *__P) {
__U);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_expandloadu_epi32 (__mmask8 __U, void const *__P) {
return (__m128i) __builtin_ia32_expandloadsi128_mask ((__v4si *) __P,
(__v4si)
@@ -2495,7 +2539,7 @@ _mm_maskz_expandloadu_epi32 (__mmask8 __U, void const *__P) {
(__mmask8) __U);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_expandloadu_epi32 (__m256i __W, __mmask8 __U,
void const *__P) {
return (__m256i) __builtin_ia32_expandloadsi256_mask ((__v8si *) __P,
@@ -2504,7 +2548,7 @@ _mm256_mask_expandloadu_epi32 (__m256i __W, __mmask8 __U,
__U);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_expandloadu_epi32 (__mmask8 __U, void const *__P) {
return (__m256i) __builtin_ia32_expandloadsi256_mask ((__v8si *) __P,
(__v8si)
@@ -2513,14 +2557,14 @@ _mm256_maskz_expandloadu_epi32 (__mmask8 __U, void const *__P) {
__U);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_expand_ps (__m128 __W, __mmask8 __U, __m128 __A) {
return (__m128) __builtin_ia32_expandsf128_mask ((__v4sf) __A,
(__v4sf) __W,
(__mmask8) __U);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_maskz_expand_ps (__mmask8 __U, __m128 __A) {
return (__m128) __builtin_ia32_expandsf128_mask ((__v4sf) __A,
(__v4sf)
@@ -2528,14 +2572,14 @@ _mm_maskz_expand_ps (__mmask8 __U, __m128 __A) {
(__mmask8) __U);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_mask_expand_ps (__m256 __W, __mmask8 __U, __m256 __A) {
return (__m256) __builtin_ia32_expandsf256_mask ((__v8sf) __A,
(__v8sf) __W,
(__mmask8) __U);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_maskz_expand_ps (__mmask8 __U, __m256 __A) {
return (__m256) __builtin_ia32_expandsf256_mask ((__v8sf) __A,
(__v8sf)
@@ -2543,14 +2587,14 @@ _mm256_maskz_expand_ps (__mmask8 __U, __m256 __A) {
(__mmask8) __U);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_expand_epi32 (__m128i __W, __mmask8 __U, __m128i __A) {
return (__m128i) __builtin_ia32_expandsi128_mask ((__v4si) __A,
(__v4si) __W,
(__mmask8) __U);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_expand_epi32 (__mmask8 __U, __m128i __A) {
return (__m128i) __builtin_ia32_expandsi128_mask ((__v4si) __A,
(__v4si)
@@ -2558,14 +2602,14 @@ _mm_maskz_expand_epi32 (__mmask8 __U, __m128i __A) {
(__mmask8) __U);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_expand_epi32 (__m256i __W, __mmask8 __U, __m256i __A) {
return (__m256i) __builtin_ia32_expandsi256_mask ((__v8si) __A,
(__v8si) __W,
(__mmask8) __U);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_expand_epi32 (__mmask8 __U, __m256i __A) {
return (__m256i) __builtin_ia32_expandsi256_mask ((__v8si) __A,
(__v8si)
@@ -2573,7 +2617,7 @@ _mm256_maskz_expand_epi32 (__mmask8 __U, __m256i __A) {
(__mmask8) __U);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_getexp_pd (__m128d __A) {
return (__m128d) __builtin_ia32_getexppd128_mask ((__v2df) __A,
(__v2df)
@@ -2581,14 +2625,14 @@ _mm_getexp_pd (__m128d __A) {
(__mmask8) -1);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_getexp_pd (__m128d __W, __mmask8 __U, __m128d __A) {
return (__m128d) __builtin_ia32_getexppd128_mask ((__v2df) __A,
(__v2df) __W,
(__mmask8) __U);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_maskz_getexp_pd (__mmask8 __U, __m128d __A) {
return (__m128d) __builtin_ia32_getexppd128_mask ((__v2df) __A,
(__v2df)
@@ -2596,7 +2640,7 @@ _mm_maskz_getexp_pd (__mmask8 __U, __m128d __A) {
(__mmask8) __U);
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_getexp_pd (__m256d __A) {
return (__m256d) __builtin_ia32_getexppd256_mask ((__v4df) __A,
(__v4df)
@@ -2604,14 +2648,14 @@ _mm256_getexp_pd (__m256d __A) {
(__mmask8) -1);
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_mask_getexp_pd (__m256d __W, __mmask8 __U, __m256d __A) {
return (__m256d) __builtin_ia32_getexppd256_mask ((__v4df) __A,
(__v4df) __W,
(__mmask8) __U);
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_maskz_getexp_pd (__mmask8 __U, __m256d __A) {
return (__m256d) __builtin_ia32_getexppd256_mask ((__v4df) __A,
(__v4df)
@@ -2619,7 +2663,7 @@ _mm256_maskz_getexp_pd (__mmask8 __U, __m256d __A) {
(__mmask8) __U);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_getexp_ps (__m128 __A) {
return (__m128) __builtin_ia32_getexpps128_mask ((__v4sf) __A,
(__v4sf)
@@ -2627,14 +2671,14 @@ _mm_getexp_ps (__m128 __A) {
(__mmask8) -1);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_getexp_ps (__m128 __W, __mmask8 __U, __m128 __A) {
return (__m128) __builtin_ia32_getexpps128_mask ((__v4sf) __A,
(__v4sf) __W,
(__mmask8) __U);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_maskz_getexp_ps (__mmask8 __U, __m128 __A) {
return (__m128) __builtin_ia32_getexpps128_mask ((__v4sf) __A,
(__v4sf)
@@ -2642,7 +2686,7 @@ _mm_maskz_getexp_ps (__mmask8 __U, __m128 __A) {
(__mmask8) __U);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_getexp_ps (__m256 __A) {
return (__m256) __builtin_ia32_getexpps256_mask ((__v8sf) __A,
(__v8sf)
@@ -2650,14 +2694,14 @@ _mm256_getexp_ps (__m256 __A) {
(__mmask8) -1);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_mask_getexp_ps (__m256 __W, __mmask8 __U, __m256 __A) {
return (__m256) __builtin_ia32_getexpps256_mask ((__v8sf) __A,
(__v8sf) __W,
(__mmask8) __U);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_maskz_getexp_ps (__mmask8 __U, __m256 __A) {
return (__m256) __builtin_ia32_getexpps256_mask ((__v8sf) __A,
(__v8sf)
@@ -2665,643 +2709,579 @@ _mm256_maskz_getexp_ps (__mmask8 __U, __m256 __A) {
(__mmask8) __U);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_max_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
(__v2df)_mm_max_pd(__A, __B),
(__v2df)__W);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_maskz_max_pd(__mmask8 __U, __m128d __A, __m128d __B) {
return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
(__v2df)_mm_max_pd(__A, __B),
(__v2df)_mm_setzero_pd());
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_mask_max_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
(__v4df)_mm256_max_pd(__A, __B),
(__v4df)__W);
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_maskz_max_pd(__mmask8 __U, __m256d __A, __m256d __B) {
return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
(__v4df)_mm256_max_pd(__A, __B),
(__v4df)_mm256_setzero_pd());
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_max_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
(__v4sf)_mm_max_ps(__A, __B),
(__v4sf)__W);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_maskz_max_ps(__mmask8 __U, __m128 __A, __m128 __B) {
return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
(__v4sf)_mm_max_ps(__A, __B),
(__v4sf)_mm_setzero_ps());
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_mask_max_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
(__v8sf)_mm256_max_ps(__A, __B),
(__v8sf)__W);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_maskz_max_ps(__mmask8 __U, __m256 __A, __m256 __B) {
return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
(__v8sf)_mm256_max_ps(__A, __B),
(__v8sf)_mm256_setzero_ps());
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_min_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
(__v2df)_mm_min_pd(__A, __B),
(__v2df)__W);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_maskz_min_pd(__mmask8 __U, __m128d __A, __m128d __B) {
return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
(__v2df)_mm_min_pd(__A, __B),
(__v2df)_mm_setzero_pd());
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_mask_min_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
(__v4df)_mm256_min_pd(__A, __B),
(__v4df)__W);
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_maskz_min_pd(__mmask8 __U, __m256d __A, __m256d __B) {
return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
(__v4df)_mm256_min_pd(__A, __B),
(__v4df)_mm256_setzero_pd());
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_min_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
(__v4sf)_mm_min_ps(__A, __B),
(__v4sf)__W);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_maskz_min_ps(__mmask8 __U, __m128 __A, __m128 __B) {
return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
(__v4sf)_mm_min_ps(__A, __B),
(__v4sf)_mm_setzero_ps());
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_mask_min_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
(__v8sf)_mm256_min_ps(__A, __B),
(__v8sf)__W);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_maskz_min_ps(__mmask8 __U, __m256 __A, __m256 __B) {
return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
(__v8sf)_mm256_min_ps(__A, __B),
(__v8sf)_mm256_setzero_ps());
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_mul_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
(__v2df)_mm_mul_pd(__A, __B),
(__v2df)__W);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_maskz_mul_pd(__mmask8 __U, __m128d __A, __m128d __B) {
return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
(__v2df)_mm_mul_pd(__A, __B),
(__v2df)_mm_setzero_pd());
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_mask_mul_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
(__v4df)_mm256_mul_pd(__A, __B),
(__v4df)__W);
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_maskz_mul_pd(__mmask8 __U, __m256d __A, __m256d __B) {
return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
(__v4df)_mm256_mul_pd(__A, __B),
(__v4df)_mm256_setzero_pd());
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_mul_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
(__v4sf)_mm_mul_ps(__A, __B),
(__v4sf)__W);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_maskz_mul_ps(__mmask8 __U, __m128 __A, __m128 __B) {
return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
(__v4sf)_mm_mul_ps(__A, __B),
(__v4sf)_mm_setzero_ps());
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_mask_mul_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
(__v8sf)_mm256_mul_ps(__A, __B),
(__v8sf)__W);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_maskz_mul_ps(__mmask8 __U, __m256 __A, __m256 __B) {
return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
(__v8sf)_mm256_mul_ps(__A, __B),
(__v8sf)_mm256_setzero_ps());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_abs_epi32(__m128i __W, __mmask8 __U, __m128i __A) {
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
(__v4si)_mm_abs_epi32(__A),
(__v4si)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_abs_epi32(__mmask8 __U, __m128i __A) {
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
(__v4si)_mm_abs_epi32(__A),
(__v4si)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_abs_epi32(__m256i __W, __mmask8 __U, __m256i __A) {
- return (__m256i)__builtin_ia32_selectd_256((__mmask16)__U,
+ return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
(__v8si)_mm256_abs_epi32(__A),
(__v8si)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_abs_epi32(__mmask8 __U, __m256i __A) {
- return (__m256i)__builtin_ia32_selectd_256((__mmask16)__U,
+ return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
(__v8si)_mm256_abs_epi32(__A),
(__v8si)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_abs_epi64 (__m128i __A) {
- return (__m128i) __builtin_ia32_pabsq128_mask ((__v2di) __A,
- (__v2di)
- _mm_setzero_si128 (),
- (__mmask8) -1);
+ return (__m128i)__builtin_ia32_pabsq128((__v2di)__A);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_abs_epi64 (__m128i __W, __mmask8 __U, __m128i __A) {
- return (__m128i) __builtin_ia32_pabsq128_mask ((__v2di) __A,
- (__v2di) __W,
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
+ (__v2di)_mm_abs_epi64(__A),
+ (__v2di)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_abs_epi64 (__mmask8 __U, __m128i __A) {
- return (__m128i) __builtin_ia32_pabsq128_mask ((__v2di) __A,
- (__v2di)
- _mm_setzero_si128 (),
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
+ (__v2di)_mm_abs_epi64(__A),
+ (__v2di)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_abs_epi64 (__m256i __A) {
- return (__m256i) __builtin_ia32_pabsq256_mask ((__v4di) __A,
- (__v4di)
- _mm256_setzero_si256 (),
- (__mmask8) -1);
+ return (__m256i)__builtin_ia32_pabsq256 ((__v4di)__A);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_abs_epi64 (__m256i __W, __mmask8 __U, __m256i __A) {
- return (__m256i) __builtin_ia32_pabsq256_mask ((__v4di) __A,
- (__v4di) __W,
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
+ (__v4di)_mm256_abs_epi64(__A),
+ (__v4di)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_abs_epi64 (__mmask8 __U, __m256i __A) {
- return (__m256i) __builtin_ia32_pabsq256_mask ((__v4di) __A,
- (__v4di)
- _mm256_setzero_si256 (),
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
+ (__v4di)_mm256_abs_epi64(__A),
+ (__v4di)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_max_epi32(__mmask8 __M, __m128i __A, __m128i __B) {
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M,
(__v4si)_mm_max_epi32(__A, __B),
(__v4si)_mm_setzero_si128());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_max_epi32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) {
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M,
(__v4si)_mm_max_epi32(__A, __B),
(__v4si)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_max_epi32(__mmask8 __M, __m256i __A, __m256i __B) {
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M,
(__v8si)_mm256_max_epi32(__A, __B),
(__v8si)_mm256_setzero_si256());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_max_epi32(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) {
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M,
(__v8si)_mm256_max_epi32(__A, __B),
(__v8si)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_max_epi64 (__mmask8 __M, __m128i __A, __m128i __B) {
- return (__m128i) __builtin_ia32_pmaxsq128_mask ((__v2di) __A,
- (__v2di) __B,
- (__v2di)
- _mm_setzero_si128 (),
- __M);
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_max_epi64 (__m128i __A, __m128i __B) {
+ return (__m128i)__builtin_ia32_pmaxsq128((__v2di)__A, (__v2di)__B);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_max_epi64 (__m128i __W, __mmask8 __M, __m128i __A,
- __m128i __B) {
- return (__m128i) __builtin_ia32_pmaxsq128_mask ((__v2di) __A,
- (__v2di) __B,
- (__v2di) __W, __M);
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_maskz_max_epi64 (__mmask8 __M, __m128i __A, __m128i __B) {
+ return (__m128i)__builtin_ia32_selectq_128((__mmask8)__M,
+ (__v2di)_mm_max_epi64(__A, __B),
+ (__v2di)_mm_setzero_si128());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_max_epi64 (__m128i __A, __m128i __B) {
- return (__m128i) __builtin_ia32_pmaxsq128_mask ((__v2di) __A,
- (__v2di) __B,
- (__v2di)
- _mm_setzero_si128 (),
- (__mmask8) -1);
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_mask_max_epi64 (__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) {
+ return (__m128i)__builtin_ia32_selectq_128((__mmask8)__M,
+ (__v2di)_mm_max_epi64(__A, __B),
+ (__v2di)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_max_epi64 (__mmask8 __M, __m256i __A, __m256i __B) {
- return (__m256i) __builtin_ia32_pmaxsq256_mask ((__v4di) __A,
- (__v4di) __B,
- (__v4di)
- _mm256_setzero_si256 (),
- __M);
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_max_epi64 (__m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_pmaxsq256((__v4di)__A, (__v4di)__B);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_max_epi64 (__m256i __W, __mmask8 __M, __m256i __A,
- __m256i __B) {
- return (__m256i) __builtin_ia32_pmaxsq256_mask ((__v4di) __A,
- (__v4di) __B,
- (__v4di) __W, __M);
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_maskz_max_epi64 (__mmask8 __M, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M,
+ (__v4di)_mm256_max_epi64(__A, __B),
+ (__v4di)_mm256_setzero_si256());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_max_epi64 (__m256i __A, __m256i __B) {
- return (__m256i) __builtin_ia32_pmaxsq256_mask ((__v4di) __A,
- (__v4di) __B,
- (__v4di)
- _mm256_setzero_si256 (),
- (__mmask8) -1);
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_mask_max_epi64 (__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M,
+ (__v4di)_mm256_max_epi64(__A, __B),
+ (__v4di)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_max_epu32(__mmask8 __M, __m128i __A, __m128i __B) {
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M,
(__v4si)_mm_max_epu32(__A, __B),
(__v4si)_mm_setzero_si128());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_max_epu32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) {
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M,
(__v4si)_mm_max_epu32(__A, __B),
(__v4si)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_max_epu32(__mmask8 __M, __m256i __A, __m256i __B) {
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M,
(__v8si)_mm256_max_epu32(__A, __B),
(__v8si)_mm256_setzero_si256());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_max_epu32(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) {
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M,
(__v8si)_mm256_max_epu32(__A, __B),
(__v8si)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_max_epu64 (__mmask8 __M, __m128i __A, __m128i __B) {
- return (__m128i) __builtin_ia32_pmaxuq128_mask ((__v2di) __A,
- (__v2di) __B,
- (__v2di)
- _mm_setzero_si128 (),
- __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_max_epu64 (__m128i __A, __m128i __B) {
- return (__m128i) __builtin_ia32_pmaxuq128_mask ((__v2di) __A,
- (__v2di) __B,
- (__v2di)
- _mm_setzero_si128 (),
- (__mmask8) -1);
+ return (__m128i)__builtin_ia32_pmaxuq128((__v2di)__A, (__v2di)__B);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_max_epu64 (__m128i __W, __mmask8 __M, __m128i __A,
- __m128i __B) {
- return (__m128i) __builtin_ia32_pmaxuq128_mask ((__v2di) __A,
- (__v2di) __B,
- (__v2di) __W, __M);
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_maskz_max_epu64 (__mmask8 __M, __m128i __A, __m128i __B) {
+ return (__m128i)__builtin_ia32_selectq_128((__mmask8)__M,
+ (__v2di)_mm_max_epu64(__A, __B),
+ (__v2di)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_max_epu64 (__mmask8 __M, __m256i __A, __m256i __B) {
- return (__m256i) __builtin_ia32_pmaxuq256_mask ((__v4di) __A,
- (__v4di) __B,
- (__v4di)
- _mm256_setzero_si256 (),
- __M);
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_mask_max_epu64 (__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) {
+ return (__m128i)__builtin_ia32_selectq_128((__mmask8)__M,
+ (__v2di)_mm_max_epu64(__A, __B),
+ (__v2di)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_max_epu64 (__m256i __A, __m256i __B) {
- return (__m256i) __builtin_ia32_pmaxuq256_mask ((__v4di) __A,
- (__v4di) __B,
- (__v4di)
- _mm256_setzero_si256 (),
- (__mmask8) -1);
+ return (__m256i)__builtin_ia32_pmaxuq256((__v4di)__A, (__v4di)__B);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_max_epu64 (__m256i __W, __mmask8 __M, __m256i __A,
- __m256i __B) {
- return (__m256i) __builtin_ia32_pmaxuq256_mask ((__v4di) __A,
- (__v4di) __B,
- (__v4di) __W, __M);
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_maskz_max_epu64 (__mmask8 __M, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M,
+ (__v4di)_mm256_max_epu64(__A, __B),
+ (__v4di)_mm256_setzero_si256());
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_mask_max_epu64 (__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M,
+ (__v4di)_mm256_max_epu64(__A, __B),
+ (__v4di)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_min_epi32(__mmask8 __M, __m128i __A, __m128i __B) {
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M,
(__v4si)_mm_min_epi32(__A, __B),
(__v4si)_mm_setzero_si128());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_min_epi32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) {
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M,
(__v4si)_mm_min_epi32(__A, __B),
(__v4si)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_min_epi32(__mmask8 __M, __m256i __A, __m256i __B) {
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M,
(__v8si)_mm256_min_epi32(__A, __B),
(__v8si)_mm256_setzero_si256());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_min_epi32(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) {
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M,
(__v8si)_mm256_min_epi32(__A, __B),
(__v8si)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_min_epi64 (__m128i __A, __m128i __B) {
- return (__m128i) __builtin_ia32_pminsq128_mask ((__v2di) __A,
- (__v2di) __B,
- (__v2di)
- _mm_setzero_si128 (),
- (__mmask8) -1);
+ return (__m128i)__builtin_ia32_pminsq128((__v2di)__A, (__v2di)__B);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_min_epi64 (__m128i __W, __mmask8 __M, __m128i __A,
- __m128i __B) {
- return (__m128i) __builtin_ia32_pminsq128_mask ((__v2di) __A,
- (__v2di) __B,
- (__v2di) __W, __M);
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_mask_min_epi64 (__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) {
+ return (__m128i)__builtin_ia32_selectq_128((__mmask8)__M,
+ (__v2di)_mm_min_epi64(__A, __B),
+ (__v2di)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_min_epi64 (__mmask8 __M, __m128i __A, __m128i __B) {
- return (__m128i) __builtin_ia32_pminsq128_mask ((__v2di) __A,
- (__v2di) __B,
- (__v2di)
- _mm_setzero_si128 (),
- __M);
+ return (__m128i)__builtin_ia32_selectq_128((__mmask8)__M,
+ (__v2di)_mm_min_epi64(__A, __B),
+ (__v2di)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_min_epi64 (__m256i __A, __m256i __B) {
- return (__m256i) __builtin_ia32_pminsq256_mask ((__v4di) __A,
- (__v4di) __B,
- (__v4di)
- _mm256_setzero_si256 (),
- (__mmask8) -1);
+ return (__m256i)__builtin_ia32_pminsq256((__v4di)__A, (__v4di)__B);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_min_epi64 (__m256i __W, __mmask8 __M, __m256i __A,
- __m256i __B) {
- return (__m256i) __builtin_ia32_pminsq256_mask ((__v4di) __A,
- (__v4di) __B,
- (__v4di) __W, __M);
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_mask_min_epi64 (__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M,
+ (__v4di)_mm256_min_epi64(__A, __B),
+ (__v4di)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_min_epi64 (__mmask8 __M, __m256i __A, __m256i __B) {
- return (__m256i) __builtin_ia32_pminsq256_mask ((__v4di) __A,
- (__v4di) __B,
- (__v4di)
- _mm256_setzero_si256 (),
- __M);
+ return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M,
+ (__v4di)_mm256_min_epi64(__A, __B),
+ (__v4di)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_min_epu32(__mmask8 __M, __m128i __A, __m128i __B) {
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M,
(__v4si)_mm_min_epu32(__A, __B),
(__v4si)_mm_setzero_si128());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_min_epu32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) {
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M,
(__v4si)_mm_min_epu32(__A, __B),
(__v4si)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_min_epu32(__mmask8 __M, __m256i __A, __m256i __B) {
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M,
(__v8si)_mm256_min_epu32(__A, __B),
(__v8si)_mm256_setzero_si256());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_min_epu32(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) {
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M,
(__v8si)_mm256_min_epu32(__A, __B),
(__v8si)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_min_epu64 (__m128i __A, __m128i __B) {
- return (__m128i) __builtin_ia32_pminuq128_mask ((__v2di) __A,
- (__v2di) __B,
- (__v2di)
- _mm_setzero_si128 (),
- (__mmask8) -1);
+ return (__m128i)__builtin_ia32_pminuq128((__v2di)__A, (__v2di)__B);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_min_epu64 (__m128i __W, __mmask8 __M, __m128i __A,
- __m128i __B) {
- return (__m128i) __builtin_ia32_pminuq128_mask ((__v2di) __A,
- (__v2di) __B,
- (__v2di) __W, __M);
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_mask_min_epu64 (__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) {
+ return (__m128i)__builtin_ia32_selectq_128((__mmask8)__M,
+ (__v2di)_mm_min_epu64(__A, __B),
+ (__v2di)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_min_epu64 (__mmask8 __M, __m128i __A, __m128i __B) {
- return (__m128i) __builtin_ia32_pminuq128_mask ((__v2di) __A,
- (__v2di) __B,
- (__v2di)
- _mm_setzero_si128 (),
- __M);
+ return (__m128i)__builtin_ia32_selectq_128((__mmask8)__M,
+ (__v2di)_mm_min_epu64(__A, __B),
+ (__v2di)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_min_epu64 (__m256i __A, __m256i __B) {
- return (__m256i) __builtin_ia32_pminuq256_mask ((__v4di) __A,
- (__v4di) __B,
- (__v4di)
- _mm256_setzero_si256 (),
- (__mmask8) -1);
+ return (__m256i)__builtin_ia32_pminuq256((__v4di)__A, (__v4di)__B);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_min_epu64 (__m256i __W, __mmask8 __M, __m256i __A,
- __m256i __B) {
- return (__m256i) __builtin_ia32_pminuq256_mask ((__v4di) __A,
- (__v4di) __B,
- (__v4di) __W, __M);
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_mask_min_epu64 (__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M,
+ (__v4di)_mm256_min_epu64(__A, __B),
+ (__v4di)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_min_epu64 (__mmask8 __M, __m256i __A, __m256i __B) {
- return (__m256i) __builtin_ia32_pminuq256_mask ((__v4di) __A,
- (__v4di) __B,
- (__v4di)
- _mm256_setzero_si256 (),
- __M);
+ return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M,
+ (__v4di)_mm256_min_epu64(__A, __B),
+ (__v4di)_mm256_setzero_si256());
}
-#define _mm_roundscale_pd(A, imm) __extension__ ({ \
+#define _mm_roundscale_pd(A, imm) \
(__m128d)__builtin_ia32_rndscalepd_128_mask((__v2df)(__m128d)(A), \
(int)(imm), \
(__v2df)_mm_setzero_pd(), \
- (__mmask8)-1); })
+ (__mmask8)-1)
-#define _mm_mask_roundscale_pd(W, U, A, imm) __extension__ ({ \
+#define _mm_mask_roundscale_pd(W, U, A, imm) \
(__m128d)__builtin_ia32_rndscalepd_128_mask((__v2df)(__m128d)(A), \
(int)(imm), \
(__v2df)(__m128d)(W), \
- (__mmask8)(U)); })
+ (__mmask8)(U))
-#define _mm_maskz_roundscale_pd(U, A, imm) __extension__ ({ \
+#define _mm_maskz_roundscale_pd(U, A, imm) \
(__m128d)__builtin_ia32_rndscalepd_128_mask((__v2df)(__m128d)(A), \
(int)(imm), \
(__v2df)_mm_setzero_pd(), \
- (__mmask8)(U)); })
+ (__mmask8)(U))
-#define _mm256_roundscale_pd(A, imm) __extension__ ({ \
+#define _mm256_roundscale_pd(A, imm) \
(__m256d)__builtin_ia32_rndscalepd_256_mask((__v4df)(__m256d)(A), \
(int)(imm), \
(__v4df)_mm256_setzero_pd(), \
- (__mmask8)-1); })
+ (__mmask8)-1)
-#define _mm256_mask_roundscale_pd(W, U, A, imm) __extension__ ({ \
+#define _mm256_mask_roundscale_pd(W, U, A, imm) \
(__m256d)__builtin_ia32_rndscalepd_256_mask((__v4df)(__m256d)(A), \
(int)(imm), \
(__v4df)(__m256d)(W), \
- (__mmask8)(U)); })
+ (__mmask8)(U))
-#define _mm256_maskz_roundscale_pd(U, A, imm) __extension__ ({ \
+#define _mm256_maskz_roundscale_pd(U, A, imm) \
(__m256d)__builtin_ia32_rndscalepd_256_mask((__v4df)(__m256d)(A), \
(int)(imm), \
(__v4df)_mm256_setzero_pd(), \
- (__mmask8)(U)); })
+ (__mmask8)(U))
-#define _mm_roundscale_ps(A, imm) __extension__ ({ \
+#define _mm_roundscale_ps(A, imm) \
(__m128)__builtin_ia32_rndscaleps_128_mask((__v4sf)(__m128)(A), (int)(imm), \
(__v4sf)_mm_setzero_ps(), \
- (__mmask8)-1); })
+ (__mmask8)-1)
-#define _mm_mask_roundscale_ps(W, U, A, imm) __extension__ ({ \
+#define _mm_mask_roundscale_ps(W, U, A, imm) \
(__m128)__builtin_ia32_rndscaleps_128_mask((__v4sf)(__m128)(A), (int)(imm), \
(__v4sf)(__m128)(W), \
- (__mmask8)(U)); })
+ (__mmask8)(U))
-#define _mm_maskz_roundscale_ps(U, A, imm) __extension__ ({ \
+#define _mm_maskz_roundscale_ps(U, A, imm) \
(__m128)__builtin_ia32_rndscaleps_128_mask((__v4sf)(__m128)(A), (int)(imm), \
(__v4sf)_mm_setzero_ps(), \
- (__mmask8)(U)); })
+ (__mmask8)(U))
-#define _mm256_roundscale_ps(A, imm) __extension__ ({ \
+#define _mm256_roundscale_ps(A, imm) \
(__m256)__builtin_ia32_rndscaleps_256_mask((__v8sf)(__m256)(A), (int)(imm), \
(__v8sf)_mm256_setzero_ps(), \
- (__mmask8)-1); })
+ (__mmask8)-1)
-#define _mm256_mask_roundscale_ps(W, U, A, imm) __extension__ ({ \
+#define _mm256_mask_roundscale_ps(W, U, A, imm) \
(__m256)__builtin_ia32_rndscaleps_256_mask((__v8sf)(__m256)(A), (int)(imm), \
(__v8sf)(__m256)(W), \
- (__mmask8)(U)); })
+ (__mmask8)(U))
-#define _mm256_maskz_roundscale_ps(U, A, imm) __extension__ ({ \
+#define _mm256_maskz_roundscale_ps(U, A, imm) \
(__m256)__builtin_ia32_rndscaleps_256_mask((__v8sf)(__m256)(A), (int)(imm), \
(__v8sf)_mm256_setzero_ps(), \
- (__mmask8)(U)); })
+ (__mmask8)(U))
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_scalef_pd (__m128d __A, __m128d __B) {
return (__m128d) __builtin_ia32_scalefpd128_mask ((__v2df) __A,
(__v2df) __B,
@@ -3310,7 +3290,7 @@ _mm_scalef_pd (__m128d __A, __m128d __B) {
(__mmask8) -1);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_scalef_pd (__m128d __W, __mmask8 __U, __m128d __A,
__m128d __B) {
return (__m128d) __builtin_ia32_scalefpd128_mask ((__v2df) __A,
@@ -3319,7 +3299,7 @@ _mm_mask_scalef_pd (__m128d __W, __mmask8 __U, __m128d __A,
(__mmask8) __U);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_maskz_scalef_pd (__mmask8 __U, __m128d __A, __m128d __B) {
return (__m128d) __builtin_ia32_scalefpd128_mask ((__v2df) __A,
(__v2df) __B,
@@ -3328,7 +3308,7 @@ _mm_maskz_scalef_pd (__mmask8 __U, __m128d __A, __m128d __B) {
(__mmask8) __U);
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_scalef_pd (__m256d __A, __m256d __B) {
return (__m256d) __builtin_ia32_scalefpd256_mask ((__v4df) __A,
(__v4df) __B,
@@ -3337,7 +3317,7 @@ _mm256_scalef_pd (__m256d __A, __m256d __B) {
(__mmask8) -1);
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_mask_scalef_pd (__m256d __W, __mmask8 __U, __m256d __A,
__m256d __B) {
return (__m256d) __builtin_ia32_scalefpd256_mask ((__v4df) __A,
@@ -3346,7 +3326,7 @@ _mm256_mask_scalef_pd (__m256d __W, __mmask8 __U, __m256d __A,
(__mmask8) __U);
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_maskz_scalef_pd (__mmask8 __U, __m256d __A, __m256d __B) {
return (__m256d) __builtin_ia32_scalefpd256_mask ((__v4df) __A,
(__v4df) __B,
@@ -3355,7 +3335,7 @@ _mm256_maskz_scalef_pd (__mmask8 __U, __m256d __A, __m256d __B) {
(__mmask8) __U);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_scalef_ps (__m128 __A, __m128 __B) {
return (__m128) __builtin_ia32_scalefps128_mask ((__v4sf) __A,
(__v4sf) __B,
@@ -3364,7 +3344,7 @@ _mm_scalef_ps (__m128 __A, __m128 __B) {
(__mmask8) -1);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_scalef_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
return (__m128) __builtin_ia32_scalefps128_mask ((__v4sf) __A,
(__v4sf) __B,
@@ -3372,7 +3352,7 @@ _mm_mask_scalef_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
(__mmask8) __U);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_maskz_scalef_ps (__mmask8 __U, __m128 __A, __m128 __B) {
return (__m128) __builtin_ia32_scalefps128_mask ((__v4sf) __A,
(__v4sf) __B,
@@ -3381,7 +3361,7 @@ _mm_maskz_scalef_ps (__mmask8 __U, __m128 __A, __m128 __B) {
(__mmask8) __U);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_scalef_ps (__m256 __A, __m256 __B) {
return (__m256) __builtin_ia32_scalefps256_mask ((__v8sf) __A,
(__v8sf) __B,
@@ -3390,7 +3370,7 @@ _mm256_scalef_ps (__m256 __A, __m256 __B) {
(__mmask8) -1);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_mask_scalef_ps (__m256 __W, __mmask8 __U, __m256 __A,
__m256 __B) {
return (__m256) __builtin_ia32_scalefps256_mask ((__v8sf) __A,
@@ -3399,7 +3379,7 @@ _mm256_mask_scalef_ps (__m256 __W, __mmask8 __U, __m256 __A,
(__mmask8) __U);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_maskz_scalef_ps (__mmask8 __U, __m256 __A, __m256 __B) {
return (__m256) __builtin_ia32_scalefps256_mask ((__v8sf) __A,
(__v8sf) __B,
@@ -3408,1160 +3388,1027 @@ _mm256_maskz_scalef_ps (__mmask8 __U, __m256 __A, __m256 __B) {
(__mmask8) __U);
}
-#define _mm_i64scatter_pd(addr, index, v1, scale) __extension__ ({ \
+#define _mm_i64scatter_pd(addr, index, v1, scale) \
__builtin_ia32_scatterdiv2df((double *)(addr), (__mmask8)-1, \
(__v2di)(__m128i)(index), \
- (__v2df)(__m128d)(v1), (int)(scale)); })
+ (__v2df)(__m128d)(v1), (int)(scale))
-#define _mm_mask_i64scatter_pd(addr, mask, index, v1, scale) __extension__ ({ \
+#define _mm_mask_i64scatter_pd(addr, mask, index, v1, scale) \
__builtin_ia32_scatterdiv2df((double *)(addr), (__mmask8)(mask), \
(__v2di)(__m128i)(index), \
- (__v2df)(__m128d)(v1), (int)(scale)); })
+ (__v2df)(__m128d)(v1), (int)(scale))
-#define _mm_i64scatter_epi64(addr, index, v1, scale) __extension__ ({ \
+#define _mm_i64scatter_epi64(addr, index, v1, scale) \
__builtin_ia32_scatterdiv2di((long long *)(addr), (__mmask8)-1, \
(__v2di)(__m128i)(index), \
- (__v2di)(__m128i)(v1), (int)(scale)); })
+ (__v2di)(__m128i)(v1), (int)(scale))
-#define _mm_mask_i64scatter_epi64(addr, mask, index, v1, scale) __extension__ ({ \
+#define _mm_mask_i64scatter_epi64(addr, mask, index, v1, scale) \
__builtin_ia32_scatterdiv2di((long long *)(addr), (__mmask8)(mask), \
(__v2di)(__m128i)(index), \
- (__v2di)(__m128i)(v1), (int)(scale)); })
+ (__v2di)(__m128i)(v1), (int)(scale))
-#define _mm256_i64scatter_pd(addr, index, v1, scale) __extension__ ({ \
+#define _mm256_i64scatter_pd(addr, index, v1, scale) \
__builtin_ia32_scatterdiv4df((double *)(addr), (__mmask8)-1, \
(__v4di)(__m256i)(index), \
- (__v4df)(__m256d)(v1), (int)(scale)); })
+ (__v4df)(__m256d)(v1), (int)(scale))
-#define _mm256_mask_i64scatter_pd(addr, mask, index, v1, scale) __extension__ ({ \
+#define _mm256_mask_i64scatter_pd(addr, mask, index, v1, scale) \
__builtin_ia32_scatterdiv4df((double *)(addr), (__mmask8)(mask), \
(__v4di)(__m256i)(index), \
- (__v4df)(__m256d)(v1), (int)(scale)); })
+ (__v4df)(__m256d)(v1), (int)(scale))
-#define _mm256_i64scatter_epi64(addr, index, v1, scale) __extension__ ({ \
+#define _mm256_i64scatter_epi64(addr, index, v1, scale) \
__builtin_ia32_scatterdiv4di((long long *)(addr), (__mmask8)-1, \
(__v4di)(__m256i)(index), \
- (__v4di)(__m256i)(v1), (int)(scale)); })
+ (__v4di)(__m256i)(v1), (int)(scale))
-#define _mm256_mask_i64scatter_epi64(addr, mask, index, v1, scale) __extension__ ({ \
+#define _mm256_mask_i64scatter_epi64(addr, mask, index, v1, scale) \
__builtin_ia32_scatterdiv4di((long long *)(addr), (__mmask8)(mask), \
(__v4di)(__m256i)(index), \
- (__v4di)(__m256i)(v1), (int)(scale)); })
+ (__v4di)(__m256i)(v1), (int)(scale))
-#define _mm_i64scatter_ps(addr, index, v1, scale) __extension__ ({ \
+#define _mm_i64scatter_ps(addr, index, v1, scale) \
__builtin_ia32_scatterdiv4sf((float *)(addr), (__mmask8)-1, \
(__v2di)(__m128i)(index), (__v4sf)(__m128)(v1), \
- (int)(scale)); })
+ (int)(scale))
-#define _mm_mask_i64scatter_ps(addr, mask, index, v1, scale) __extension__ ({ \
+#define _mm_mask_i64scatter_ps(addr, mask, index, v1, scale) \
__builtin_ia32_scatterdiv4sf((float *)(addr), (__mmask8)(mask), \
(__v2di)(__m128i)(index), (__v4sf)(__m128)(v1), \
- (int)(scale)); })
+ (int)(scale))
-#define _mm_i64scatter_epi32(addr, index, v1, scale) __extension__ ({ \
+#define _mm_i64scatter_epi32(addr, index, v1, scale) \
__builtin_ia32_scatterdiv4si((int *)(addr), (__mmask8)-1, \
(__v2di)(__m128i)(index), \
- (__v4si)(__m128i)(v1), (int)(scale)); })
+ (__v4si)(__m128i)(v1), (int)(scale))
-#define _mm_mask_i64scatter_epi32(addr, mask, index, v1, scale) __extension__ ({ \
+#define _mm_mask_i64scatter_epi32(addr, mask, index, v1, scale) \
__builtin_ia32_scatterdiv4si((int *)(addr), (__mmask8)(mask), \
(__v2di)(__m128i)(index), \
- (__v4si)(__m128i)(v1), (int)(scale)); })
+ (__v4si)(__m128i)(v1), (int)(scale))
-#define _mm256_i64scatter_ps(addr, index, v1, scale) __extension__ ({ \
+#define _mm256_i64scatter_ps(addr, index, v1, scale) \
__builtin_ia32_scatterdiv8sf((float *)(addr), (__mmask8)-1, \
(__v4di)(__m256i)(index), (__v4sf)(__m128)(v1), \
- (int)(scale)); })
+ (int)(scale))
-#define _mm256_mask_i64scatter_ps(addr, mask, index, v1, scale) __extension__ ({ \
+#define _mm256_mask_i64scatter_ps(addr, mask, index, v1, scale) \
__builtin_ia32_scatterdiv8sf((float *)(addr), (__mmask8)(mask), \
(__v4di)(__m256i)(index), (__v4sf)(__m128)(v1), \
- (int)(scale)); })
+ (int)(scale))
-#define _mm256_i64scatter_epi32(addr, index, v1, scale) __extension__ ({ \
+#define _mm256_i64scatter_epi32(addr, index, v1, scale) \
__builtin_ia32_scatterdiv8si((int *)(addr), (__mmask8)-1, \
(__v4di)(__m256i)(index), \
- (__v4si)(__m128i)(v1), (int)(scale)); })
+ (__v4si)(__m128i)(v1), (int)(scale))
-#define _mm256_mask_i64scatter_epi32(addr, mask, index, v1, scale) __extension__ ({ \
+#define _mm256_mask_i64scatter_epi32(addr, mask, index, v1, scale) \
__builtin_ia32_scatterdiv8si((int *)(addr), (__mmask8)(mask), \
(__v4di)(__m256i)(index), \
- (__v4si)(__m128i)(v1), (int)(scale)); })
+ (__v4si)(__m128i)(v1), (int)(scale))
-#define _mm_i32scatter_pd(addr, index, v1, scale) __extension__ ({ \
+#define _mm_i32scatter_pd(addr, index, v1, scale) \
__builtin_ia32_scattersiv2df((double *)(addr), (__mmask8)-1, \
(__v4si)(__m128i)(index), \
- (__v2df)(__m128d)(v1), (int)(scale)); })
-
-#define _mm_mask_i32scatter_pd(addr, mask, index, v1, scale) __extension__ ({ \
- __builtin_ia32_scattersiv2df((double *)(addr), (__mmask8)(mask), \
- (__v4si)(__m128i)(index), \
- (__v2df)(__m128d)(v1), (int)(scale)); })
-
-#define _mm_i32scatter_epi64(addr, index, v1, scale) __extension__ ({ \
- __builtin_ia32_scattersiv2di((long long *)(addr), (__mmask8)-1, \
- (__v4si)(__m128i)(index), \
- (__v2di)(__m128i)(v1), (int)(scale)); })
-
-#define _mm_mask_i32scatter_epi64(addr, mask, index, v1, scale) __extension__ ({ \
- __builtin_ia32_scattersiv2di((long long *)(addr), (__mmask8)(mask), \
- (__v4si)(__m128i)(index), \
- (__v2di)(__m128i)(v1), (int)(scale)); })
-
-#define _mm256_i32scatter_pd(addr, index, v1, scale) __extension__ ({ \
- __builtin_ia32_scattersiv4df((double *)(addr), (__mmask8)-1, \
- (__v4si)(__m128i)(index), \
- (__v4df)(__m256d)(v1), (int)(scale)); })
-
-#define _mm256_mask_i32scatter_pd(addr, mask, index, v1, scale) __extension__ ({ \
- __builtin_ia32_scattersiv4df((double *)(addr), (__mmask8)(mask), \
- (__v4si)(__m128i)(index), \
- (__v4df)(__m256d)(v1), (int)(scale)); })
-
-#define _mm256_i32scatter_epi64(addr, index, v1, scale) __extension__ ({ \
- __builtin_ia32_scattersiv4di((long long *)(addr), (__mmask8)-1, \
- (__v4si)(__m128i)(index), \
- (__v4di)(__m256i)(v1), (int)(scale)); })
-
-#define _mm256_mask_i32scatter_epi64(addr, mask, index, v1, scale) __extension__ ({ \
- __builtin_ia32_scattersiv4di((long long *)(addr), (__mmask8)(mask), \
- (__v4si)(__m128i)(index), \
- (__v4di)(__m256i)(v1), (int)(scale)); })
-
-#define _mm_i32scatter_ps(addr, index, v1, scale) __extension__ ({ \
- __builtin_ia32_scattersiv4sf((float *)(addr), (__mmask8)-1, \
- (__v4si)(__m128i)(index), (__v4sf)(__m128)(v1), \
- (int)(scale)); })
-
-#define _mm_mask_i32scatter_ps(addr, mask, index, v1, scale) __extension__ ({ \
- __builtin_ia32_scattersiv4sf((float *)(addr), (__mmask8)(mask), \
- (__v4si)(__m128i)(index), (__v4sf)(__m128)(v1), \
- (int)(scale)); })
-
-#define _mm_i32scatter_epi32(addr, index, v1, scale) __extension__ ({ \
- __builtin_ia32_scattersiv4si((int *)(addr), (__mmask8)-1, \
- (__v4si)(__m128i)(index), \
- (__v4si)(__m128i)(v1), (int)(scale)); })
-
-#define _mm_mask_i32scatter_epi32(addr, mask, index, v1, scale) __extension__ ({ \
- __builtin_ia32_scattersiv4si((int *)(addr), (__mmask8)(mask), \
- (__v4si)(__m128i)(index), \
- (__v4si)(__m128i)(v1), (int)(scale)); })
-
-#define _mm256_i32scatter_ps(addr, index, v1, scale) __extension__ ({ \
- __builtin_ia32_scattersiv8sf((float *)(addr), (__mmask8)-1, \
- (__v8si)(__m256i)(index), (__v8sf)(__m256)(v1), \
- (int)(scale)); })
-
-#define _mm256_mask_i32scatter_ps(addr, mask, index, v1, scale) __extension__ ({ \
- __builtin_ia32_scattersiv8sf((float *)(addr), (__mmask8)(mask), \
- (__v8si)(__m256i)(index), (__v8sf)(__m256)(v1), \
- (int)(scale)); })
-
-#define _mm256_i32scatter_epi32(addr, index, v1, scale) __extension__ ({ \
- __builtin_ia32_scattersiv8si((int *)(addr), (__mmask8)-1, \
- (__v8si)(__m256i)(index), \
- (__v8si)(__m256i)(v1), (int)(scale)); })
-
-#define _mm256_mask_i32scatter_epi32(addr, mask, index, v1, scale) __extension__ ({ \
- __builtin_ia32_scattersiv8si((int *)(addr), (__mmask8)(mask), \
- (__v8si)(__m256i)(index), \
- (__v8si)(__m256i)(v1), (int)(scale)); })
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_mask_sqrt_pd(__m128d __W, __mmask8 __U, __m128d __A) {
- return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
- (__v2df)_mm_sqrt_pd(__A),
- (__v2df)__W);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_maskz_sqrt_pd(__mmask8 __U, __m128d __A) {
- return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
- (__v2df)_mm_sqrt_pd(__A),
- (__v2df)_mm_setzero_pd());
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS
-_mm256_mask_sqrt_pd(__m256d __W, __mmask8 __U, __m256d __A) {
- return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
- (__v4df)_mm256_sqrt_pd(__A),
- (__v4df)__W);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS
-_mm256_maskz_sqrt_pd(__mmask8 __U, __m256d __A) {
- return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
- (__v4df)_mm256_sqrt_pd(__A),
- (__v4df)_mm256_setzero_pd());
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_mask_sqrt_ps(__m128 __W, __mmask8 __U, __m128 __A) {
- return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
- (__v4sf)_mm_sqrt_ps(__A),
- (__v4sf)__W);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_maskz_sqrt_ps(__mmask8 __U, __m128 __A) {
- return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
- (__v4sf)_mm_sqrt_ps(__A),
- (__v4sf)_mm_setzero_pd());
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS
-_mm256_mask_sqrt_ps(__m256 __W, __mmask8 __U, __m256 __A) {
- return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
- (__v8sf)_mm256_sqrt_ps(__A),
- (__v8sf)__W);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS
-_mm256_maskz_sqrt_ps(__mmask8 __U, __m256 __A) {
- return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
- (__v8sf)_mm256_sqrt_ps(__A),
- (__v8sf)_mm256_setzero_ps());
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_mask_sub_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
- return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
- (__v2df)_mm_sub_pd(__A, __B),
- (__v2df)__W);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_maskz_sub_pd(__mmask8 __U, __m128d __A, __m128d __B) {
- return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
- (__v2df)_mm_sub_pd(__A, __B),
- (__v2df)_mm_setzero_pd());
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS
-_mm256_mask_sub_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
- return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
- (__v4df)_mm256_sub_pd(__A, __B),
- (__v4df)__W);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS
-_mm256_maskz_sub_pd(__mmask8 __U, __m256d __A, __m256d __B) {
- return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
- (__v4df)_mm256_sub_pd(__A, __B),
- (__v4df)_mm256_setzero_pd());
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_mask_sub_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
- return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
- (__v4sf)_mm_sub_ps(__A, __B),
- (__v4sf)__W);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_maskz_sub_ps(__mmask8 __U, __m128 __A, __m128 __B) {
- return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
- (__v4sf)_mm_sub_ps(__A, __B),
- (__v4sf)_mm_setzero_ps());
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS
-_mm256_mask_sub_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
- return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
- (__v8sf)_mm256_sub_ps(__A, __B),
- (__v8sf)__W);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS
-_mm256_maskz_sub_ps(__mmask8 __U, __m256 __A, __m256 __B) {
- return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
- (__v8sf)_mm256_sub_ps(__A, __B),
- (__v8sf)_mm256_setzero_ps());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask2_permutex2var_epi32 (__m128i __A, __m128i __I, __mmask8 __U,
- __m128i __B) {
- return (__m128i) __builtin_ia32_vpermi2vard128_mask ((__v4si) __A,
- (__v4si) __I
- /* idx */ ,
- (__v4si) __B,
- (__mmask8) __U);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask2_permutex2var_epi32 (__m256i __A, __m256i __I,
- __mmask8 __U, __m256i __B) {
- return (__m256i) __builtin_ia32_vpermi2vard256_mask ((__v8si) __A,
- (__v8si) __I
- /* idx */ ,
- (__v8si) __B,
- (__mmask8) __U);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_mask2_permutex2var_pd (__m128d __A, __m128i __I, __mmask8 __U,
- __m128d __B) {
- return (__m128d) __builtin_ia32_vpermi2varpd128_mask ((__v2df) __A,
- (__v2di) __I
- /* idx */ ,
- (__v2df) __B,
- (__mmask8)
- __U);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS
-_mm256_mask2_permutex2var_pd (__m256d __A, __m256i __I, __mmask8 __U,
- __m256d __B) {
- return (__m256d) __builtin_ia32_vpermi2varpd256_mask ((__v4df) __A,
- (__v4di) __I
- /* idx */ ,
- (__v4df) __B,
- (__mmask8)
- __U);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_mask2_permutex2var_ps (__m128 __A, __m128i __I, __mmask8 __U,
- __m128 __B) {
- return (__m128) __builtin_ia32_vpermi2varps128_mask ((__v4sf) __A,
- (__v4si) __I
- /* idx */ ,
- (__v4sf) __B,
- (__mmask8) __U);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS
-_mm256_mask2_permutex2var_ps (__m256 __A, __m256i __I, __mmask8 __U,
- __m256 __B) {
- return (__m256) __builtin_ia32_vpermi2varps256_mask ((__v8sf) __A,
- (__v8si) __I
- /* idx */ ,
- (__v8sf) __B,
- (__mmask8) __U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask2_permutex2var_epi64 (__m128i __A, __m128i __I, __mmask8 __U,
- __m128i __B) {
- return (__m128i) __builtin_ia32_vpermi2varq128_mask ((__v2di) __A,
- (__v2di) __I
- /* idx */ ,
- (__v2di) __B,
- (__mmask8) __U);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask2_permutex2var_epi64 (__m256i __A, __m256i __I,
- __mmask8 __U, __m256i __B) {
- return (__m256i) __builtin_ia32_vpermi2varq256_mask ((__v4di) __A,
- (__v4di) __I
- /* idx */ ,
- (__v4di) __B,
- (__mmask8) __U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_permutex2var_epi32 (__m128i __A, __m128i __I, __m128i __B) {
- return (__m128i) __builtin_ia32_vpermt2vard128_mask ((__v4si) __I
- /* idx */ ,
- (__v4si) __A,
- (__v4si) __B,
- (__mmask8) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_permutex2var_epi32 (__m128i __A, __mmask8 __U, __m128i __I,
- __m128i __B) {
- return (__m128i) __builtin_ia32_vpermt2vard128_mask ((__v4si) __I
- /* idx */ ,
- (__v4si) __A,
- (__v4si) __B,
- (__mmask8) __U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_permutex2var_epi32 (__mmask8 __U, __m128i __A, __m128i __I,
- __m128i __B) {
- return (__m128i) __builtin_ia32_vpermt2vard128_maskz ((__v4si) __I
- /* idx */ ,
- (__v4si) __A,
- (__v4si) __B,
- (__mmask8)
- __U);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_permutex2var_epi32 (__m256i __A, __m256i __I, __m256i __B) {
- return (__m256i) __builtin_ia32_vpermt2vard256_mask ((__v8si) __I
- /* idx */ ,
- (__v8si) __A,
- (__v8si) __B,
- (__mmask8) -1);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_permutex2var_epi32 (__m256i __A, __mmask8 __U, __m256i __I,
- __m256i __B) {
- return (__m256i) __builtin_ia32_vpermt2vard256_mask ((__v8si) __I
- /* idx */ ,
- (__v8si) __A,
- (__v8si) __B,
- (__mmask8) __U);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_permutex2var_epi32 (__mmask8 __U, __m256i __A,
- __m256i __I, __m256i __B) {
- return (__m256i) __builtin_ia32_vpermt2vard256_maskz ((__v8si) __I
- /* idx */ ,
- (__v8si) __A,
- (__v8si) __B,
- (__mmask8)
- __U);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_permutex2var_pd (__m128d __A, __m128i __I, __m128d __B) {
- return (__m128d) __builtin_ia32_vpermt2varpd128_mask ((__v2di) __I
- /* idx */ ,
- (__v2df) __A,
- (__v2df) __B,
- (__mmask8) -
- 1);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_mask_permutex2var_pd (__m128d __A, __mmask8 __U, __m128i __I,
- __m128d __B) {
- return (__m128d) __builtin_ia32_vpermt2varpd128_mask ((__v2di) __I
- /* idx */ ,
- (__v2df) __A,
- (__v2df) __B,
- (__mmask8)
- __U);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_maskz_permutex2var_pd (__mmask8 __U, __m128d __A, __m128i __I,
- __m128d __B) {
- return (__m128d) __builtin_ia32_vpermt2varpd128_maskz ((__v2di) __I
- /* idx */ ,
- (__v2df) __A,
- (__v2df) __B,
- (__mmask8)
- __U);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS
-_mm256_permutex2var_pd (__m256d __A, __m256i __I, __m256d __B) {
- return (__m256d) __builtin_ia32_vpermt2varpd256_mask ((__v4di) __I
- /* idx */ ,
- (__v4df) __A,
- (__v4df) __B,
- (__mmask8) -
- 1);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS
-_mm256_mask_permutex2var_pd (__m256d __A, __mmask8 __U, __m256i __I,
- __m256d __B) {
- return (__m256d) __builtin_ia32_vpermt2varpd256_mask ((__v4di) __I
- /* idx */ ,
- (__v4df) __A,
- (__v4df) __B,
- (__mmask8)
- __U);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS
-_mm256_maskz_permutex2var_pd (__mmask8 __U, __m256d __A, __m256i __I,
- __m256d __B) {
- return (__m256d) __builtin_ia32_vpermt2varpd256_maskz ((__v4di) __I
- /* idx */ ,
- (__v4df) __A,
- (__v4df) __B,
- (__mmask8)
- __U);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_permutex2var_ps (__m128 __A, __m128i __I, __m128 __B) {
- return (__m128) __builtin_ia32_vpermt2varps128_mask ((__v4si) __I
- /* idx */ ,
- (__v4sf) __A,
- (__v4sf) __B,
- (__mmask8) -1);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_mask_permutex2var_ps (__m128 __A, __mmask8 __U, __m128i __I,
- __m128 __B) {
- return (__m128) __builtin_ia32_vpermt2varps128_mask ((__v4si) __I
- /* idx */ ,
- (__v4sf) __A,
- (__v4sf) __B,
- (__mmask8) __U);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_maskz_permutex2var_ps (__mmask8 __U, __m128 __A, __m128i __I,
- __m128 __B) {
- return (__m128) __builtin_ia32_vpermt2varps128_maskz ((__v4si) __I
- /* idx */ ,
- (__v4sf) __A,
- (__v4sf) __B,
- (__mmask8)
- __U);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS
-_mm256_permutex2var_ps (__m256 __A, __m256i __I, __m256 __B) {
- return (__m256) __builtin_ia32_vpermt2varps256_mask ((__v8si) __I
- /* idx */ ,
- (__v8sf) __A,
- (__v8sf) __B,
- (__mmask8) -1);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS
-_mm256_mask_permutex2var_ps (__m256 __A, __mmask8 __U, __m256i __I,
- __m256 __B) {
- return (__m256) __builtin_ia32_vpermt2varps256_mask ((__v8si) __I
- /* idx */ ,
- (__v8sf) __A,
- (__v8sf) __B,
- (__mmask8) __U);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS
-_mm256_maskz_permutex2var_ps (__mmask8 __U, __m256 __A, __m256i __I,
- __m256 __B) {
- return (__m256) __builtin_ia32_vpermt2varps256_maskz ((__v8si) __I
- /* idx */ ,
- (__v8sf) __A,
- (__v8sf) __B,
- (__mmask8)
- __U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_permutex2var_epi64 (__m128i __A, __m128i __I, __m128i __B) {
- return (__m128i) __builtin_ia32_vpermt2varq128_mask ((__v2di) __I
- /* idx */ ,
- (__v2di) __A,
- (__v2di) __B,
- (__mmask8) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_permutex2var_epi64 (__m128i __A, __mmask8 __U, __m128i __I,
- __m128i __B) {
- return (__m128i) __builtin_ia32_vpermt2varq128_mask ((__v2di) __I
- /* idx */ ,
- (__v2di) __A,
- (__v2di) __B,
- (__mmask8) __U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_permutex2var_epi64 (__mmask8 __U, __m128i __A, __m128i __I,
- __m128i __B) {
- return (__m128i) __builtin_ia32_vpermt2varq128_maskz ((__v2di) __I
- /* idx */ ,
- (__v2di) __A,
- (__v2di) __B,
- (__mmask8)
- __U);
-}
-
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_permutex2var_epi64 (__m256i __A, __m256i __I, __m256i __B) {
- return (__m256i) __builtin_ia32_vpermt2varq256_mask ((__v4di) __I
- /* idx */ ,
- (__v4di) __A,
- (__v4di) __B,
- (__mmask8) -1);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_permutex2var_epi64 (__m256i __A, __mmask8 __U, __m256i __I,
- __m256i __B) {
- return (__m256i) __builtin_ia32_vpermt2varq256_mask ((__v4di) __I
- /* idx */ ,
- (__v4di) __A,
- (__v4di) __B,
- (__mmask8) __U);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_permutex2var_epi64 (__mmask8 __U, __m256i __A,
- __m256i __I, __m256i __B) {
- return (__m256i) __builtin_ia32_vpermt2varq256_maskz ((__v4di) __I
- /* idx */ ,
- (__v4di) __A,
- (__v4di) __B,
- (__mmask8)
- __U);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_cvtepi8_epi32(__m128i __W, __mmask8 __U, __m128i __A)
-{
- return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
- (__v4si)_mm_cvtepi8_epi32(__A),
- (__v4si)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_cvtepi8_epi32(__mmask8 __U, __m128i __A)
-{
- return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
- (__v4si)_mm_cvtepi8_epi32(__A),
- (__v4si)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_cvtepi8_epi32 (__m256i __W, __mmask8 __U, __m128i __A)
-{
- return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
- (__v8si)_mm256_cvtepi8_epi32(__A),
- (__v8si)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_cvtepi8_epi32 (__mmask8 __U, __m128i __A)
-{
- return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
- (__v8si)_mm256_cvtepi8_epi32(__A),
- (__v8si)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_cvtepi8_epi64(__m128i __W, __mmask8 __U, __m128i __A)
-{
- return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
- (__v2di)_mm_cvtepi8_epi64(__A),
- (__v2di)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_cvtepi8_epi64(__mmask8 __U, __m128i __A)
-{
- return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
- (__v2di)_mm_cvtepi8_epi64(__A),
- (__v2di)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_cvtepi8_epi64(__m256i __W, __mmask8 __U, __m128i __A)
-{
- return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
- (__v4di)_mm256_cvtepi8_epi64(__A),
- (__v4di)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_cvtepi8_epi64(__mmask8 __U, __m128i __A)
-{
- return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
- (__v4di)_mm256_cvtepi8_epi64(__A),
- (__v4di)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_cvtepi32_epi64(__m128i __W, __mmask8 __U, __m128i __X)
-{
- return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
- (__v2di)_mm_cvtepi32_epi64(__X),
- (__v2di)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_cvtepi32_epi64(__mmask8 __U, __m128i __X)
-{
- return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
- (__v2di)_mm_cvtepi32_epi64(__X),
- (__v2di)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_cvtepi32_epi64(__m256i __W, __mmask8 __U, __m128i __X)
-{
- return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
- (__v4di)_mm256_cvtepi32_epi64(__X),
- (__v4di)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_cvtepi32_epi64(__mmask8 __U, __m128i __X)
-{
- return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
- (__v4di)_mm256_cvtepi32_epi64(__X),
- (__v4di)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_cvtepi16_epi32(__m128i __W, __mmask8 __U, __m128i __A)
-{
- return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
- (__v4si)_mm_cvtepi16_epi32(__A),
- (__v4si)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_cvtepi16_epi32(__mmask8 __U, __m128i __A)
-{
- return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
- (__v4si)_mm_cvtepi16_epi32(__A),
- (__v4si)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_cvtepi16_epi32(__m256i __W, __mmask8 __U, __m128i __A)
-{
- return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
- (__v8si)_mm256_cvtepi16_epi32(__A),
- (__v8si)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_cvtepi16_epi32 (__mmask8 __U, __m128i __A)
-{
- return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
- (__v8si)_mm256_cvtepi16_epi32(__A),
- (__v8si)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_cvtepi16_epi64(__m128i __W, __mmask8 __U, __m128i __A)
-{
- return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
- (__v2di)_mm_cvtepi16_epi64(__A),
- (__v2di)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_cvtepi16_epi64(__mmask8 __U, __m128i __A)
-{
- return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
- (__v2di)_mm_cvtepi16_epi64(__A),
- (__v2di)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_cvtepi16_epi64(__m256i __W, __mmask8 __U, __m128i __A)
-{
- return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
- (__v4di)_mm256_cvtepi16_epi64(__A),
- (__v4di)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_cvtepi16_epi64(__mmask8 __U, __m128i __A)
-{
- return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
- (__v4di)_mm256_cvtepi16_epi64(__A),
- (__v4di)_mm256_setzero_si256());
-}
-
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_cvtepu8_epi32(__m128i __W, __mmask8 __U, __m128i __A)
-{
- return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
- (__v4si)_mm_cvtepu8_epi32(__A),
- (__v4si)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_cvtepu8_epi32(__mmask8 __U, __m128i __A)
-{
- return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
- (__v4si)_mm_cvtepu8_epi32(__A),
- (__v4si)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_cvtepu8_epi32(__m256i __W, __mmask8 __U, __m128i __A)
-{
- return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
- (__v8si)_mm256_cvtepu8_epi32(__A),
- (__v8si)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_cvtepu8_epi32(__mmask8 __U, __m128i __A)
-{
- return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
- (__v8si)_mm256_cvtepu8_epi32(__A),
- (__v8si)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_cvtepu8_epi64(__m128i __W, __mmask8 __U, __m128i __A)
-{
- return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
- (__v2di)_mm_cvtepu8_epi64(__A),
- (__v2di)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_cvtepu8_epi64(__mmask8 __U, __m128i __A)
-{
- return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
- (__v2di)_mm_cvtepu8_epi64(__A),
- (__v2di)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_cvtepu8_epi64(__m256i __W, __mmask8 __U, __m128i __A)
-{
- return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
- (__v4di)_mm256_cvtepu8_epi64(__A),
- (__v4di)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_cvtepu8_epi64 (__mmask8 __U, __m128i __A)
-{
- return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
- (__v4di)_mm256_cvtepu8_epi64(__A),
- (__v4di)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_cvtepu32_epi64(__m128i __W, __mmask8 __U, __m128i __X)
-{
- return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
- (__v2di)_mm_cvtepu32_epi64(__X),
- (__v2di)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_cvtepu32_epi64(__mmask8 __U, __m128i __X)
-{
- return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
- (__v2di)_mm_cvtepu32_epi64(__X),
- (__v2di)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_cvtepu32_epi64(__m256i __W, __mmask8 __U, __m128i __X)
-{
- return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
- (__v4di)_mm256_cvtepu32_epi64(__X),
- (__v4di)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_cvtepu32_epi64(__mmask8 __U, __m128i __X)
-{
- return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
- (__v4di)_mm256_cvtepu32_epi64(__X),
- (__v4di)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_cvtepu16_epi32(__m128i __W, __mmask8 __U, __m128i __A)
-{
- return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
- (__v4si)_mm_cvtepu16_epi32(__A),
- (__v4si)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_cvtepu16_epi32(__mmask8 __U, __m128i __A)
-{
- return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
- (__v4si)_mm_cvtepu16_epi32(__A),
- (__v4si)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_cvtepu16_epi32(__m256i __W, __mmask8 __U, __m128i __A)
-{
- return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
- (__v8si)_mm256_cvtepu16_epi32(__A),
- (__v8si)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_cvtepu16_epi32(__mmask8 __U, __m128i __A)
-{
- return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
- (__v8si)_mm256_cvtepu16_epi32(__A),
- (__v8si)_mm256_setzero_si256());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_cvtepu16_epi64(__m128i __W, __mmask8 __U, __m128i __A)
-{
- return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
- (__v2di)_mm_cvtepu16_epi64(__A),
- (__v2di)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_cvtepu16_epi64(__mmask8 __U, __m128i __A)
-{
- return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
- (__v2di)_mm_cvtepu16_epi64(__A),
- (__v2di)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_cvtepu16_epi64(__m256i __W, __mmask8 __U, __m128i __A)
-{
- return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
- (__v4di)_mm256_cvtepu16_epi64(__A),
- (__v4di)__W);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_cvtepu16_epi64(__mmask8 __U, __m128i __A)
-{
- return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
- (__v4di)_mm256_cvtepu16_epi64(__A),
- (__v4di)_mm256_setzero_si256());
-}
-
-
-#define _mm_rol_epi32(a, b) __extension__ ({\
- (__m128i)__builtin_ia32_prold128_mask((__v4si)(__m128i)(a), (int)(b), \
- (__v4si)_mm_setzero_si128(), \
- (__mmask8)-1); })
-
-#define _mm_mask_rol_epi32(w, u, a, b) __extension__ ({\
- (__m128i)__builtin_ia32_prold128_mask((__v4si)(__m128i)(a), (int)(b), \
- (__v4si)(__m128i)(w), (__mmask8)(u)); })
-
-#define _mm_maskz_rol_epi32(u, a, b) __extension__ ({\
- (__m128i)__builtin_ia32_prold128_mask((__v4si)(__m128i)(a), (int)(b), \
- (__v4si)_mm_setzero_si128(), \
- (__mmask8)(u)); })
-
-#define _mm256_rol_epi32(a, b) __extension__ ({\
- (__m256i)__builtin_ia32_prold256_mask((__v8si)(__m256i)(a), (int)(b), \
- (__v8si)_mm256_setzero_si256(), \
- (__mmask8)-1); })
-
-#define _mm256_mask_rol_epi32(w, u, a, b) __extension__ ({\
- (__m256i)__builtin_ia32_prold256_mask((__v8si)(__m256i)(a), (int)(b), \
- (__v8si)(__m256i)(w), (__mmask8)(u)); })
-
-#define _mm256_maskz_rol_epi32(u, a, b) __extension__ ({\
- (__m256i)__builtin_ia32_prold256_mask((__v8si)(__m256i)(a), (int)(b), \
- (__v8si)_mm256_setzero_si256(), \
- (__mmask8)(u)); })
-
-#define _mm_rol_epi64(a, b) __extension__ ({\
- (__m128i)__builtin_ia32_prolq128_mask((__v2di)(__m128i)(a), (int)(b), \
- (__v2di)_mm_setzero_di(), \
- (__mmask8)-1); })
-
-#define _mm_mask_rol_epi64(w, u, a, b) __extension__ ({\
- (__m128i)__builtin_ia32_prolq128_mask((__v2di)(__m128i)(a), (int)(b), \
- (__v2di)(__m128i)(w), (__mmask8)(u)); })
-
-#define _mm_maskz_rol_epi64(u, a, b) __extension__ ({\
- (__m128i)__builtin_ia32_prolq128_mask((__v2di)(__m128i)(a), (int)(b), \
- (__v2di)_mm_setzero_di(), \
- (__mmask8)(u)); })
-
-#define _mm256_rol_epi64(a, b) __extension__ ({\
- (__m256i)__builtin_ia32_prolq256_mask((__v4di)(__m256i)(a), (int)(b), \
- (__v4di)_mm256_setzero_si256(), \
- (__mmask8)-1); })
-
-#define _mm256_mask_rol_epi64(w, u, a, b) __extension__ ({\
- (__m256i)__builtin_ia32_prolq256_mask((__v4di)(__m256i)(a), (int)(b), \
- (__v4di)(__m256i)(w), (__mmask8)(u)); })
-
-#define _mm256_maskz_rol_epi64(u, a, b) __extension__ ({\
- (__m256i)__builtin_ia32_prolq256_mask((__v4di)(__m256i)(a), (int)(b), \
- (__v4di)_mm256_setzero_si256(), \
- (__mmask8)(u)); })
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+ (__v2df)(__m128d)(v1), (int)(scale))
+
+#define _mm_mask_i32scatter_pd(addr, mask, index, v1, scale) \
+ __builtin_ia32_scattersiv2df((double *)(addr), (__mmask8)(mask), \
+ (__v4si)(__m128i)(index), \
+ (__v2df)(__m128d)(v1), (int)(scale))
+
+#define _mm_i32scatter_epi64(addr, index, v1, scale) \
+ __builtin_ia32_scattersiv2di((long long *)(addr), (__mmask8)-1, \
+ (__v4si)(__m128i)(index), \
+ (__v2di)(__m128i)(v1), (int)(scale))
+
+#define _mm_mask_i32scatter_epi64(addr, mask, index, v1, scale) \
+ __builtin_ia32_scattersiv2di((long long *)(addr), (__mmask8)(mask), \
+ (__v4si)(__m128i)(index), \
+ (__v2di)(__m128i)(v1), (int)(scale))
+
+#define _mm256_i32scatter_pd(addr, index, v1, scale) \
+ __builtin_ia32_scattersiv4df((double *)(addr), (__mmask8)-1, \
+ (__v4si)(__m128i)(index), \
+ (__v4df)(__m256d)(v1), (int)(scale))
+
+#define _mm256_mask_i32scatter_pd(addr, mask, index, v1, scale) \
+ __builtin_ia32_scattersiv4df((double *)(addr), (__mmask8)(mask), \
+ (__v4si)(__m128i)(index), \
+ (__v4df)(__m256d)(v1), (int)(scale))
+
+#define _mm256_i32scatter_epi64(addr, index, v1, scale) \
+ __builtin_ia32_scattersiv4di((long long *)(addr), (__mmask8)-1, \
+ (__v4si)(__m128i)(index), \
+ (__v4di)(__m256i)(v1), (int)(scale))
+
+#define _mm256_mask_i32scatter_epi64(addr, mask, index, v1, scale) \
+ __builtin_ia32_scattersiv4di((long long *)(addr), (__mmask8)(mask), \
+ (__v4si)(__m128i)(index), \
+ (__v4di)(__m256i)(v1), (int)(scale))
+
+#define _mm_i32scatter_ps(addr, index, v1, scale) \
+ __builtin_ia32_scattersiv4sf((float *)(addr), (__mmask8)-1, \
+ (__v4si)(__m128i)(index), (__v4sf)(__m128)(v1), \
+ (int)(scale))
+
+#define _mm_mask_i32scatter_ps(addr, mask, index, v1, scale) \
+ __builtin_ia32_scattersiv4sf((float *)(addr), (__mmask8)(mask), \
+ (__v4si)(__m128i)(index), (__v4sf)(__m128)(v1), \
+ (int)(scale))
+
+#define _mm_i32scatter_epi32(addr, index, v1, scale) \
+ __builtin_ia32_scattersiv4si((int *)(addr), (__mmask8)-1, \
+ (__v4si)(__m128i)(index), \
+ (__v4si)(__m128i)(v1), (int)(scale))
+
+#define _mm_mask_i32scatter_epi32(addr, mask, index, v1, scale) \
+ __builtin_ia32_scattersiv4si((int *)(addr), (__mmask8)(mask), \
+ (__v4si)(__m128i)(index), \
+ (__v4si)(__m128i)(v1), (int)(scale))
+
+#define _mm256_i32scatter_ps(addr, index, v1, scale) \
+ __builtin_ia32_scattersiv8sf((float *)(addr), (__mmask8)-1, \
+ (__v8si)(__m256i)(index), (__v8sf)(__m256)(v1), \
+ (int)(scale))
+
+#define _mm256_mask_i32scatter_ps(addr, mask, index, v1, scale) \
+ __builtin_ia32_scattersiv8sf((float *)(addr), (__mmask8)(mask), \
+ (__v8si)(__m256i)(index), (__v8sf)(__m256)(v1), \
+ (int)(scale))
+
+#define _mm256_i32scatter_epi32(addr, index, v1, scale) \
+ __builtin_ia32_scattersiv8si((int *)(addr), (__mmask8)-1, \
+ (__v8si)(__m256i)(index), \
+ (__v8si)(__m256i)(v1), (int)(scale))
+
+#define _mm256_mask_i32scatter_epi32(addr, mask, index, v1, scale) \
+ __builtin_ia32_scattersiv8si((int *)(addr), (__mmask8)(mask), \
+ (__v8si)(__m256i)(index), \
+ (__v8si)(__m256i)(v1), (int)(scale))
+
+ static __inline__ __m128d __DEFAULT_FN_ATTRS128
+ _mm_mask_sqrt_pd(__m128d __W, __mmask8 __U, __m128d __A) {
+ return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
+ (__v2df)_mm_sqrt_pd(__A),
+ (__v2df)__W);
+ }
+
+ static __inline__ __m128d __DEFAULT_FN_ATTRS128
+ _mm_maskz_sqrt_pd(__mmask8 __U, __m128d __A) {
+ return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
+ (__v2df)_mm_sqrt_pd(__A),
+ (__v2df)_mm_setzero_pd());
+ }
+
+ static __inline__ __m256d __DEFAULT_FN_ATTRS256
+ _mm256_mask_sqrt_pd(__m256d __W, __mmask8 __U, __m256d __A) {
+ return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
+ (__v4df)_mm256_sqrt_pd(__A),
+ (__v4df)__W);
+ }
+
+ static __inline__ __m256d __DEFAULT_FN_ATTRS256
+ _mm256_maskz_sqrt_pd(__mmask8 __U, __m256d __A) {
+ return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
+ (__v4df)_mm256_sqrt_pd(__A),
+ (__v4df)_mm256_setzero_pd());
+ }
+
+ static __inline__ __m128 __DEFAULT_FN_ATTRS128
+ _mm_mask_sqrt_ps(__m128 __W, __mmask8 __U, __m128 __A) {
+ return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
+ (__v4sf)_mm_sqrt_ps(__A),
+ (__v4sf)__W);
+ }
+
+ static __inline__ __m128 __DEFAULT_FN_ATTRS128
+ _mm_maskz_sqrt_ps(__mmask8 __U, __m128 __A) {
+ return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
+ (__v4sf)_mm_sqrt_ps(__A),
+ (__v4sf)_mm_setzero_ps());
+ }
+
+ static __inline__ __m256 __DEFAULT_FN_ATTRS256
+ _mm256_mask_sqrt_ps(__m256 __W, __mmask8 __U, __m256 __A) {
+ return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
+ (__v8sf)_mm256_sqrt_ps(__A),
+ (__v8sf)__W);
+ }
+
+ static __inline__ __m256 __DEFAULT_FN_ATTRS256
+ _mm256_maskz_sqrt_ps(__mmask8 __U, __m256 __A) {
+ return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
+ (__v8sf)_mm256_sqrt_ps(__A),
+ (__v8sf)_mm256_setzero_ps());
+ }
+
+ static __inline__ __m128d __DEFAULT_FN_ATTRS128
+ _mm_mask_sub_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
+ (__v2df)_mm_sub_pd(__A, __B),
+ (__v2df)__W);
+ }
+
+ static __inline__ __m128d __DEFAULT_FN_ATTRS128
+ _mm_maskz_sub_pd(__mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
+ (__v2df)_mm_sub_pd(__A, __B),
+ (__v2df)_mm_setzero_pd());
+ }
+
+ static __inline__ __m256d __DEFAULT_FN_ATTRS256
+ _mm256_mask_sub_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
+ return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
+ (__v4df)_mm256_sub_pd(__A, __B),
+ (__v4df)__W);
+ }
+
+ static __inline__ __m256d __DEFAULT_FN_ATTRS256
+ _mm256_maskz_sub_pd(__mmask8 __U, __m256d __A, __m256d __B) {
+ return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
+ (__v4df)_mm256_sub_pd(__A, __B),
+ (__v4df)_mm256_setzero_pd());
+ }
+
+ static __inline__ __m128 __DEFAULT_FN_ATTRS128
+ _mm_mask_sub_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
+ (__v4sf)_mm_sub_ps(__A, __B),
+ (__v4sf)__W);
+ }
+
+ static __inline__ __m128 __DEFAULT_FN_ATTRS128
+ _mm_maskz_sub_ps(__mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
+ (__v4sf)_mm_sub_ps(__A, __B),
+ (__v4sf)_mm_setzero_ps());
+ }
+
+ static __inline__ __m256 __DEFAULT_FN_ATTRS256
+ _mm256_mask_sub_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
+ (__v8sf)_mm256_sub_ps(__A, __B),
+ (__v8sf)__W);
+ }
+
+ static __inline__ __m256 __DEFAULT_FN_ATTRS256
+ _mm256_maskz_sub_ps(__mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
+ (__v8sf)_mm256_sub_ps(__A, __B),
+ (__v8sf)_mm256_setzero_ps());
+ }
+
+ static __inline__ __m128i __DEFAULT_FN_ATTRS128
+ _mm_permutex2var_epi32(__m128i __A, __m128i __I, __m128i __B) {
+ return (__m128i)__builtin_ia32_vpermi2vard128((__v4si) __A, (__v4si)__I,
+ (__v4si)__B);
+ }
+
+ static __inline__ __m128i __DEFAULT_FN_ATTRS128
+ _mm_mask_permutex2var_epi32(__m128i __A, __mmask8 __U, __m128i __I,
+ __m128i __B) {
+ return (__m128i)__builtin_ia32_selectd_128(__U,
+ (__v4si)_mm_permutex2var_epi32(__A, __I, __B),
+ (__v4si)__A);
+ }
+
+ static __inline__ __m128i __DEFAULT_FN_ATTRS128
+ _mm_mask2_permutex2var_epi32(__m128i __A, __m128i __I, __mmask8 __U,
+ __m128i __B) {
+ return (__m128i)__builtin_ia32_selectd_128(__U,
+ (__v4si)_mm_permutex2var_epi32(__A, __I, __B),
+ (__v4si)__I);
+ }
+
+ static __inline__ __m128i __DEFAULT_FN_ATTRS128
+ _mm_maskz_permutex2var_epi32(__mmask8 __U, __m128i __A, __m128i __I,
+ __m128i __B) {
+ return (__m128i)__builtin_ia32_selectd_128(__U,
+ (__v4si)_mm_permutex2var_epi32(__A, __I, __B),
+ (__v4si)_mm_setzero_si128());
+ }
+
+ static __inline__ __m256i __DEFAULT_FN_ATTRS256
+ _mm256_permutex2var_epi32(__m256i __A, __m256i __I, __m256i __B) {
+ return (__m256i)__builtin_ia32_vpermi2vard256((__v8si)__A, (__v8si) __I,
+ (__v8si) __B);
+ }
+
+ static __inline__ __m256i __DEFAULT_FN_ATTRS256
+ _mm256_mask_permutex2var_epi32(__m256i __A, __mmask8 __U, __m256i __I,
+ __m256i __B) {
+ return (__m256i)__builtin_ia32_selectd_256(__U,
+ (__v8si)_mm256_permutex2var_epi32(__A, __I, __B),
+ (__v8si)__A);
+ }
+
+ static __inline__ __m256i __DEFAULT_FN_ATTRS256
+ _mm256_mask2_permutex2var_epi32(__m256i __A, __m256i __I, __mmask8 __U,
+ __m256i __B) {
+ return (__m256i)__builtin_ia32_selectd_256(__U,
+ (__v8si)_mm256_permutex2var_epi32(__A, __I, __B),
+ (__v8si)__I);
+ }
+
+ static __inline__ __m256i __DEFAULT_FN_ATTRS256
+ _mm256_maskz_permutex2var_epi32(__mmask8 __U, __m256i __A, __m256i __I,
+ __m256i __B) {
+ return (__m256i)__builtin_ia32_selectd_256(__U,
+ (__v8si)_mm256_permutex2var_epi32(__A, __I, __B),
+ (__v8si)_mm256_setzero_si256());
+ }
+
+ static __inline__ __m128d __DEFAULT_FN_ATTRS128
+ _mm_permutex2var_pd(__m128d __A, __m128i __I, __m128d __B) {
+ return (__m128d)__builtin_ia32_vpermi2varpd128((__v2df)__A, (__v2di)__I,
+ (__v2df)__B);
+ }
+
+ static __inline__ __m128d __DEFAULT_FN_ATTRS128
+ _mm_mask_permutex2var_pd(__m128d __A, __mmask8 __U, __m128i __I, __m128d __B) {
+ return (__m128d)__builtin_ia32_selectpd_128(__U,
+ (__v2df)_mm_permutex2var_pd(__A, __I, __B),
+ (__v2df)__A);
+ }
+
+ static __inline__ __m128d __DEFAULT_FN_ATTRS128
+ _mm_mask2_permutex2var_pd(__m128d __A, __m128i __I, __mmask8 __U, __m128d __B) {
+ return (__m128d)__builtin_ia32_selectpd_128(__U,
+ (__v2df)_mm_permutex2var_pd(__A, __I, __B),
+ (__v2df)(__m128d)__I);
+ }
+
+ static __inline__ __m128d __DEFAULT_FN_ATTRS128
+ _mm_maskz_permutex2var_pd(__mmask8 __U, __m128d __A, __m128i __I, __m128d __B) {
+ return (__m128d)__builtin_ia32_selectpd_128(__U,
+ (__v2df)_mm_permutex2var_pd(__A, __I, __B),
+ (__v2df)_mm_setzero_pd());
+ }
+
+ static __inline__ __m256d __DEFAULT_FN_ATTRS256
+ _mm256_permutex2var_pd(__m256d __A, __m256i __I, __m256d __B) {
+ return (__m256d)__builtin_ia32_vpermi2varpd256((__v4df)__A, (__v4di)__I,
+ (__v4df)__B);
+ }
+
+ static __inline__ __m256d __DEFAULT_FN_ATTRS256
+ _mm256_mask_permutex2var_pd(__m256d __A, __mmask8 __U, __m256i __I,
+ __m256d __B) {
+ return (__m256d)__builtin_ia32_selectpd_256(__U,
+ (__v4df)_mm256_permutex2var_pd(__A, __I, __B),
+ (__v4df)__A);
+ }
+
+ static __inline__ __m256d __DEFAULT_FN_ATTRS256
+ _mm256_mask2_permutex2var_pd(__m256d __A, __m256i __I, __mmask8 __U,
+ __m256d __B) {
+ return (__m256d)__builtin_ia32_selectpd_256(__U,
+ (__v4df)_mm256_permutex2var_pd(__A, __I, __B),
+ (__v4df)(__m256d)__I);
+ }
+
+ static __inline__ __m256d __DEFAULT_FN_ATTRS256
+ _mm256_maskz_permutex2var_pd(__mmask8 __U, __m256d __A, __m256i __I,
+ __m256d __B) {
+ return (__m256d)__builtin_ia32_selectpd_256(__U,
+ (__v4df)_mm256_permutex2var_pd(__A, __I, __B),
+ (__v4df)_mm256_setzero_pd());
+ }
+
+ static __inline__ __m128 __DEFAULT_FN_ATTRS128
+ _mm_permutex2var_ps(__m128 __A, __m128i __I, __m128 __B) {
+ return (__m128)__builtin_ia32_vpermi2varps128((__v4sf)__A, (__v4si)__I,
+ (__v4sf)__B);
+ }
+
+ static __inline__ __m128 __DEFAULT_FN_ATTRS128
+ _mm_mask_permutex2var_ps(__m128 __A, __mmask8 __U, __m128i __I, __m128 __B) {
+ return (__m128)__builtin_ia32_selectps_128(__U,
+ (__v4sf)_mm_permutex2var_ps(__A, __I, __B),
+ (__v4sf)__A);
+ }
+
+ static __inline__ __m128 __DEFAULT_FN_ATTRS128
+ _mm_mask2_permutex2var_ps(__m128 __A, __m128i __I, __mmask8 __U, __m128 __B) {
+ return (__m128)__builtin_ia32_selectps_128(__U,
+ (__v4sf)_mm_permutex2var_ps(__A, __I, __B),
+ (__v4sf)(__m128)__I);
+ }
+
+ static __inline__ __m128 __DEFAULT_FN_ATTRS128
+ _mm_maskz_permutex2var_ps(__mmask8 __U, __m128 __A, __m128i __I, __m128 __B) {
+ return (__m128)__builtin_ia32_selectps_128(__U,
+ (__v4sf)_mm_permutex2var_ps(__A, __I, __B),
+ (__v4sf)_mm_setzero_ps());
+ }
+
+ static __inline__ __m256 __DEFAULT_FN_ATTRS256
+ _mm256_permutex2var_ps(__m256 __A, __m256i __I, __m256 __B) {
+ return (__m256)__builtin_ia32_vpermi2varps256((__v8sf)__A, (__v8si)__I,
+ (__v8sf) __B);
+ }
+
+ static __inline__ __m256 __DEFAULT_FN_ATTRS256
+ _mm256_mask_permutex2var_ps(__m256 __A, __mmask8 __U, __m256i __I, __m256 __B) {
+ return (__m256)__builtin_ia32_selectps_256(__U,
+ (__v8sf)_mm256_permutex2var_ps(__A, __I, __B),
+ (__v8sf)__A);
+ }
+
+ static __inline__ __m256 __DEFAULT_FN_ATTRS256
+ _mm256_mask2_permutex2var_ps(__m256 __A, __m256i __I, __mmask8 __U,
+ __m256 __B) {
+ return (__m256)__builtin_ia32_selectps_256(__U,
+ (__v8sf)_mm256_permutex2var_ps(__A, __I, __B),
+ (__v8sf)(__m256)__I);
+ }
+
+ static __inline__ __m256 __DEFAULT_FN_ATTRS256
+ _mm256_maskz_permutex2var_ps(__mmask8 __U, __m256 __A, __m256i __I,
+ __m256 __B) {
+ return (__m256)__builtin_ia32_selectps_256(__U,
+ (__v8sf)_mm256_permutex2var_ps(__A, __I, __B),
+ (__v8sf)_mm256_setzero_ps());
+ }
+
+ static __inline__ __m128i __DEFAULT_FN_ATTRS128
+ _mm_permutex2var_epi64(__m128i __A, __m128i __I, __m128i __B) {
+ return (__m128i)__builtin_ia32_vpermi2varq128((__v2di)__A, (__v2di)__I,
+ (__v2di)__B);
+ }
+
+ static __inline__ __m128i __DEFAULT_FN_ATTRS128
+ _mm_mask_permutex2var_epi64(__m128i __A, __mmask8 __U, __m128i __I,
+ __m128i __B) {
+ return (__m128i)__builtin_ia32_selectq_128(__U,
+ (__v2di)_mm_permutex2var_epi64(__A, __I, __B),
+ (__v2di)__A);
+ }
+
+ static __inline__ __m128i __DEFAULT_FN_ATTRS128
+ _mm_mask2_permutex2var_epi64(__m128i __A, __m128i __I, __mmask8 __U,
+ __m128i __B) {
+ return (__m128i)__builtin_ia32_selectq_128(__U,
+ (__v2di)_mm_permutex2var_epi64(__A, __I, __B),
+ (__v2di)__I);
+ }
+
+ static __inline__ __m128i __DEFAULT_FN_ATTRS128
+ _mm_maskz_permutex2var_epi64(__mmask8 __U, __m128i __A, __m128i __I,
+ __m128i __B) {
+ return (__m128i)__builtin_ia32_selectq_128(__U,
+ (__v2di)_mm_permutex2var_epi64(__A, __I, __B),
+ (__v2di)_mm_setzero_si128());
+ }
+
+
+ static __inline__ __m256i __DEFAULT_FN_ATTRS256
+ _mm256_permutex2var_epi64(__m256i __A, __m256i __I, __m256i __B) {
+ return (__m256i)__builtin_ia32_vpermi2varq256((__v4di)__A, (__v4di) __I,
+ (__v4di) __B);
+ }
+
+ static __inline__ __m256i __DEFAULT_FN_ATTRS256
+ _mm256_mask_permutex2var_epi64(__m256i __A, __mmask8 __U, __m256i __I,
+ __m256i __B) {
+ return (__m256i)__builtin_ia32_selectq_256(__U,
+ (__v4di)_mm256_permutex2var_epi64(__A, __I, __B),
+ (__v4di)__A);
+ }
+
+ static __inline__ __m256i __DEFAULT_FN_ATTRS256
+ _mm256_mask2_permutex2var_epi64(__m256i __A, __m256i __I, __mmask8 __U,
+ __m256i __B) {
+ return (__m256i)__builtin_ia32_selectq_256(__U,
+ (__v4di)_mm256_permutex2var_epi64(__A, __I, __B),
+ (__v4di)__I);
+ }
+
+ static __inline__ __m256i __DEFAULT_FN_ATTRS256
+ _mm256_maskz_permutex2var_epi64(__mmask8 __U, __m256i __A, __m256i __I,
+ __m256i __B) {
+ return (__m256i)__builtin_ia32_selectq_256(__U,
+ (__v4di)_mm256_permutex2var_epi64(__A, __I, __B),
+ (__v4di)_mm256_setzero_si256());
+ }
+
+ static __inline__ __m128i __DEFAULT_FN_ATTRS128
+ _mm_mask_cvtepi8_epi32(__m128i __W, __mmask8 __U, __m128i __A)
+ {
+ return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
+ (__v4si)_mm_cvtepi8_epi32(__A),
+ (__v4si)__W);
+ }
+
+ static __inline__ __m128i __DEFAULT_FN_ATTRS128
+ _mm_maskz_cvtepi8_epi32(__mmask8 __U, __m128i __A)
+ {
+ return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
+ (__v4si)_mm_cvtepi8_epi32(__A),
+ (__v4si)_mm_setzero_si128());
+ }
+
+ static __inline__ __m256i __DEFAULT_FN_ATTRS256
+ _mm256_mask_cvtepi8_epi32 (__m256i __W, __mmask8 __U, __m128i __A)
+ {
+ return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
+ (__v8si)_mm256_cvtepi8_epi32(__A),
+ (__v8si)__W);
+ }
+
+ static __inline__ __m256i __DEFAULT_FN_ATTRS256
+ _mm256_maskz_cvtepi8_epi32 (__mmask8 __U, __m128i __A)
+ {
+ return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
+ (__v8si)_mm256_cvtepi8_epi32(__A),
+ (__v8si)_mm256_setzero_si256());
+ }
+
+ static __inline__ __m128i __DEFAULT_FN_ATTRS128
+ _mm_mask_cvtepi8_epi64(__m128i __W, __mmask8 __U, __m128i __A)
+ {
+ return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
+ (__v2di)_mm_cvtepi8_epi64(__A),
+ (__v2di)__W);
+ }
+
+ static __inline__ __m128i __DEFAULT_FN_ATTRS128
+ _mm_maskz_cvtepi8_epi64(__mmask8 __U, __m128i __A)
+ {
+ return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
+ (__v2di)_mm_cvtepi8_epi64(__A),
+ (__v2di)_mm_setzero_si128());
+ }
+
+ static __inline__ __m256i __DEFAULT_FN_ATTRS256
+ _mm256_mask_cvtepi8_epi64(__m256i __W, __mmask8 __U, __m128i __A)
+ {
+ return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
+ (__v4di)_mm256_cvtepi8_epi64(__A),
+ (__v4di)__W);
+ }
+
+ static __inline__ __m256i __DEFAULT_FN_ATTRS256
+ _mm256_maskz_cvtepi8_epi64(__mmask8 __U, __m128i __A)
+ {
+ return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
+ (__v4di)_mm256_cvtepi8_epi64(__A),
+ (__v4di)_mm256_setzero_si256());
+ }
+
+ static __inline__ __m128i __DEFAULT_FN_ATTRS128
+ _mm_mask_cvtepi32_epi64(__m128i __W, __mmask8 __U, __m128i __X)
+ {
+ return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
+ (__v2di)_mm_cvtepi32_epi64(__X),
+ (__v2di)__W);
+ }
+
+ static __inline__ __m128i __DEFAULT_FN_ATTRS128
+ _mm_maskz_cvtepi32_epi64(__mmask8 __U, __m128i __X)
+ {
+ return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
+ (__v2di)_mm_cvtepi32_epi64(__X),
+ (__v2di)_mm_setzero_si128());
+ }
+
+ static __inline__ __m256i __DEFAULT_FN_ATTRS256
+ _mm256_mask_cvtepi32_epi64(__m256i __W, __mmask8 __U, __m128i __X)
+ {
+ return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
+ (__v4di)_mm256_cvtepi32_epi64(__X),
+ (__v4di)__W);
+ }
+
+ static __inline__ __m256i __DEFAULT_FN_ATTRS256
+ _mm256_maskz_cvtepi32_epi64(__mmask8 __U, __m128i __X)
+ {
+ return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
+ (__v4di)_mm256_cvtepi32_epi64(__X),
+ (__v4di)_mm256_setzero_si256());
+ }
+
+ static __inline__ __m128i __DEFAULT_FN_ATTRS128
+ _mm_mask_cvtepi16_epi32(__m128i __W, __mmask8 __U, __m128i __A)
+ {
+ return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
+ (__v4si)_mm_cvtepi16_epi32(__A),
+ (__v4si)__W);
+ }
+
+ static __inline__ __m128i __DEFAULT_FN_ATTRS128
+ _mm_maskz_cvtepi16_epi32(__mmask8 __U, __m128i __A)
+ {
+ return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
+ (__v4si)_mm_cvtepi16_epi32(__A),
+ (__v4si)_mm_setzero_si128());
+ }
+
+ static __inline__ __m256i __DEFAULT_FN_ATTRS256
+ _mm256_mask_cvtepi16_epi32(__m256i __W, __mmask8 __U, __m128i __A)
+ {
+ return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
+ (__v8si)_mm256_cvtepi16_epi32(__A),
+ (__v8si)__W);
+ }
+
+ static __inline__ __m256i __DEFAULT_FN_ATTRS256
+ _mm256_maskz_cvtepi16_epi32 (__mmask8 __U, __m128i __A)
+ {
+ return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
+ (__v8si)_mm256_cvtepi16_epi32(__A),
+ (__v8si)_mm256_setzero_si256());
+ }
+
+ static __inline__ __m128i __DEFAULT_FN_ATTRS128
+ _mm_mask_cvtepi16_epi64(__m128i __W, __mmask8 __U, __m128i __A)
+ {
+ return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
+ (__v2di)_mm_cvtepi16_epi64(__A),
+ (__v2di)__W);
+ }
+
+ static __inline__ __m128i __DEFAULT_FN_ATTRS128
+ _mm_maskz_cvtepi16_epi64(__mmask8 __U, __m128i __A)
+ {
+ return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
+ (__v2di)_mm_cvtepi16_epi64(__A),
+ (__v2di)_mm_setzero_si128());
+ }
+
+ static __inline__ __m256i __DEFAULT_FN_ATTRS256
+ _mm256_mask_cvtepi16_epi64(__m256i __W, __mmask8 __U, __m128i __A)
+ {
+ return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
+ (__v4di)_mm256_cvtepi16_epi64(__A),
+ (__v4di)__W);
+ }
+
+ static __inline__ __m256i __DEFAULT_FN_ATTRS256
+ _mm256_maskz_cvtepi16_epi64(__mmask8 __U, __m128i __A)
+ {
+ return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
+ (__v4di)_mm256_cvtepi16_epi64(__A),
+ (__v4di)_mm256_setzero_si256());
+ }
+
+
+ static __inline__ __m128i __DEFAULT_FN_ATTRS128
+ _mm_mask_cvtepu8_epi32(__m128i __W, __mmask8 __U, __m128i __A)
+ {
+ return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
+ (__v4si)_mm_cvtepu8_epi32(__A),
+ (__v4si)__W);
+ }
+
+ static __inline__ __m128i __DEFAULT_FN_ATTRS128
+ _mm_maskz_cvtepu8_epi32(__mmask8 __U, __m128i __A)
+ {
+ return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
+ (__v4si)_mm_cvtepu8_epi32(__A),
+ (__v4si)_mm_setzero_si128());
+ }
+
+ static __inline__ __m256i __DEFAULT_FN_ATTRS256
+ _mm256_mask_cvtepu8_epi32(__m256i __W, __mmask8 __U, __m128i __A)
+ {
+ return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
+ (__v8si)_mm256_cvtepu8_epi32(__A),
+ (__v8si)__W);
+ }
+
+ static __inline__ __m256i __DEFAULT_FN_ATTRS256
+ _mm256_maskz_cvtepu8_epi32(__mmask8 __U, __m128i __A)
+ {
+ return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
+ (__v8si)_mm256_cvtepu8_epi32(__A),
+ (__v8si)_mm256_setzero_si256());
+ }
+
+ static __inline__ __m128i __DEFAULT_FN_ATTRS128
+ _mm_mask_cvtepu8_epi64(__m128i __W, __mmask8 __U, __m128i __A)
+ {
+ return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
+ (__v2di)_mm_cvtepu8_epi64(__A),
+ (__v2di)__W);
+ }
+
+ static __inline__ __m128i __DEFAULT_FN_ATTRS128
+ _mm_maskz_cvtepu8_epi64(__mmask8 __U, __m128i __A)
+ {
+ return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
+ (__v2di)_mm_cvtepu8_epi64(__A),
+ (__v2di)_mm_setzero_si128());
+ }
+
+ static __inline__ __m256i __DEFAULT_FN_ATTRS256
+ _mm256_mask_cvtepu8_epi64(__m256i __W, __mmask8 __U, __m128i __A)
+ {
+ return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
+ (__v4di)_mm256_cvtepu8_epi64(__A),
+ (__v4di)__W);
+ }
+
+ static __inline__ __m256i __DEFAULT_FN_ATTRS256
+ _mm256_maskz_cvtepu8_epi64 (__mmask8 __U, __m128i __A)
+ {
+ return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
+ (__v4di)_mm256_cvtepu8_epi64(__A),
+ (__v4di)_mm256_setzero_si256());
+ }
+
+ static __inline__ __m128i __DEFAULT_FN_ATTRS128
+ _mm_mask_cvtepu32_epi64(__m128i __W, __mmask8 __U, __m128i __X)
+ {
+ return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
+ (__v2di)_mm_cvtepu32_epi64(__X),
+ (__v2di)__W);
+ }
+
+ static __inline__ __m128i __DEFAULT_FN_ATTRS128
+ _mm_maskz_cvtepu32_epi64(__mmask8 __U, __m128i __X)
+ {
+ return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
+ (__v2di)_mm_cvtepu32_epi64(__X),
+ (__v2di)_mm_setzero_si128());
+ }
+
+ static __inline__ __m256i __DEFAULT_FN_ATTRS256
+ _mm256_mask_cvtepu32_epi64(__m256i __W, __mmask8 __U, __m128i __X)
+ {
+ return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
+ (__v4di)_mm256_cvtepu32_epi64(__X),
+ (__v4di)__W);
+ }
+
+ static __inline__ __m256i __DEFAULT_FN_ATTRS256
+ _mm256_maskz_cvtepu32_epi64(__mmask8 __U, __m128i __X)
+ {
+ return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
+ (__v4di)_mm256_cvtepu32_epi64(__X),
+ (__v4di)_mm256_setzero_si256());
+ }
+
+ static __inline__ __m128i __DEFAULT_FN_ATTRS128
+ _mm_mask_cvtepu16_epi32(__m128i __W, __mmask8 __U, __m128i __A)
+ {
+ return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
+ (__v4si)_mm_cvtepu16_epi32(__A),
+ (__v4si)__W);
+ }
+
+ static __inline__ __m128i __DEFAULT_FN_ATTRS128
+ _mm_maskz_cvtepu16_epi32(__mmask8 __U, __m128i __A)
+ {
+ return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
+ (__v4si)_mm_cvtepu16_epi32(__A),
+ (__v4si)_mm_setzero_si128());
+ }
+
+ static __inline__ __m256i __DEFAULT_FN_ATTRS256
+ _mm256_mask_cvtepu16_epi32(__m256i __W, __mmask8 __U, __m128i __A)
+ {
+ return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
+ (__v8si)_mm256_cvtepu16_epi32(__A),
+ (__v8si)__W);
+ }
+
+ static __inline__ __m256i __DEFAULT_FN_ATTRS256
+ _mm256_maskz_cvtepu16_epi32(__mmask8 __U, __m128i __A)
+ {
+ return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
+ (__v8si)_mm256_cvtepu16_epi32(__A),
+ (__v8si)_mm256_setzero_si256());
+ }
+
+ static __inline__ __m128i __DEFAULT_FN_ATTRS128
+ _mm_mask_cvtepu16_epi64(__m128i __W, __mmask8 __U, __m128i __A)
+ {
+ return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
+ (__v2di)_mm_cvtepu16_epi64(__A),
+ (__v2di)__W);
+ }
+
+ static __inline__ __m128i __DEFAULT_FN_ATTRS128
+ _mm_maskz_cvtepu16_epi64(__mmask8 __U, __m128i __A)
+ {
+ return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
+ (__v2di)_mm_cvtepu16_epi64(__A),
+ (__v2di)_mm_setzero_si128());
+ }
+
+ static __inline__ __m256i __DEFAULT_FN_ATTRS256
+ _mm256_mask_cvtepu16_epi64(__m256i __W, __mmask8 __U, __m128i __A)
+ {
+ return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
+ (__v4di)_mm256_cvtepu16_epi64(__A),
+ (__v4di)__W);
+ }
+
+ static __inline__ __m256i __DEFAULT_FN_ATTRS256
+ _mm256_maskz_cvtepu16_epi64(__mmask8 __U, __m128i __A)
+ {
+ return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
+ (__v4di)_mm256_cvtepu16_epi64(__A),
+ (__v4di)_mm256_setzero_si256());
+ }
+
+
+#define _mm_rol_epi32(a, b) \
+ (__m128i)__builtin_ia32_prold128((__v4si)(__m128i)(a), (int)(b))
+
+#define _mm_mask_rol_epi32(w, u, a, b) \
+ (__m128i)__builtin_ia32_selectd_128((__mmask8)(u), \
+ (__v4si)_mm_rol_epi32((a), (b)), \
+ (__v4si)(__m128i)(w))
+
+#define _mm_maskz_rol_epi32(u, a, b) \
+ (__m128i)__builtin_ia32_selectd_128((__mmask8)(u), \
+ (__v4si)_mm_rol_epi32((a), (b)), \
+ (__v4si)_mm_setzero_si128())
+
+#define _mm256_rol_epi32(a, b) \
+ (__m256i)__builtin_ia32_prold256((__v8si)(__m256i)(a), (int)(b))
+
+#define _mm256_mask_rol_epi32(w, u, a, b) \
+ (__m256i)__builtin_ia32_selectd_256((__mmask8)(u), \
+ (__v8si)_mm256_rol_epi32((a), (b)), \
+ (__v8si)(__m256i)(w))
+
+#define _mm256_maskz_rol_epi32(u, a, b) \
+ (__m256i)__builtin_ia32_selectd_256((__mmask8)(u), \
+ (__v8si)_mm256_rol_epi32((a), (b)), \
+ (__v8si)_mm256_setzero_si256())
+
+#define _mm_rol_epi64(a, b) \
+ (__m128i)__builtin_ia32_prolq128((__v2di)(__m128i)(a), (int)(b))
+
+#define _mm_mask_rol_epi64(w, u, a, b) \
+ (__m128i)__builtin_ia32_selectq_128((__mmask8)(u), \
+ (__v2di)_mm_rol_epi64((a), (b)), \
+ (__v2di)(__m128i)(w))
+
+#define _mm_maskz_rol_epi64(u, a, b) \
+ (__m128i)__builtin_ia32_selectq_128((__mmask8)(u), \
+ (__v2di)_mm_rol_epi64((a), (b)), \
+ (__v2di)_mm_setzero_si128())
+
+#define _mm256_rol_epi64(a, b) \
+ (__m256i)__builtin_ia32_prolq256((__v4di)(__m256i)(a), (int)(b))
+
+#define _mm256_mask_rol_epi64(w, u, a, b) \
+ (__m256i)__builtin_ia32_selectq_256((__mmask8)(u), \
+ (__v4di)_mm256_rol_epi64((a), (b)), \
+ (__v4di)(__m256i)(w))
+
+#define _mm256_maskz_rol_epi64(u, a, b) \
+ (__m256i)__builtin_ia32_selectq_256((__mmask8)(u), \
+ (__v4di)_mm256_rol_epi64((a), (b)), \
+ (__v4di)_mm256_setzero_si256())
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_rolv_epi32 (__m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_prolvd128_mask ((__v4si) __A,
- (__v4si) __B,
- (__v4si)
- _mm_setzero_si128 (),
- (__mmask8) -1);
+ return (__m128i)__builtin_ia32_prolvd128((__v4si)__A, (__v4si)__B);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_rolv_epi32 (__m128i __W, __mmask8 __U, __m128i __A,
- __m128i __B)
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_mask_rolv_epi32 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_prolvd128_mask ((__v4si) __A,
- (__v4si) __B,
- (__v4si) __W,
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectd_128(__U,
+ (__v4si)_mm_rolv_epi32(__A, __B),
+ (__v4si)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_rolv_epi32 (__mmask8 __U, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_prolvd128_mask ((__v4si) __A,
- (__v4si) __B,
- (__v4si)
- _mm_setzero_si128 (),
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectd_128(__U,
+ (__v4si)_mm_rolv_epi32(__A, __B),
+ (__v4si)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_rolv_epi32 (__m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_prolvd256_mask ((__v8si) __A,
- (__v8si) __B,
- (__v8si)
- _mm256_setzero_si256 (),
- (__mmask8) -1);
+ return (__m256i)__builtin_ia32_prolvd256((__v8si)__A, (__v8si)__B);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_rolv_epi32 (__m256i __W, __mmask8 __U, __m256i __A,
- __m256i __B)
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_mask_rolv_epi32 (__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_prolvd256_mask ((__v8si) __A,
- (__v8si) __B,
- (__v8si) __W,
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectd_256(__U,
+ (__v8si)_mm256_rolv_epi32(__A, __B),
+ (__v8si)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_rolv_epi32 (__mmask8 __U, __m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_prolvd256_mask ((__v8si) __A,
- (__v8si) __B,
- (__v8si)
- _mm256_setzero_si256 (),
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectd_256(__U,
+ (__v8si)_mm256_rolv_epi32(__A, __B),
+ (__v8si)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_rolv_epi64 (__m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_prolvq128_mask ((__v2di) __A,
- (__v2di) __B,
- (__v2di)
- _mm_setzero_di (),
- (__mmask8) -1);
+ return (__m128i)__builtin_ia32_prolvq128((__v2di)__A, (__v2di)__B);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_rolv_epi64 (__m128i __W, __mmask8 __U, __m128i __A,
- __m128i __B)
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_mask_rolv_epi64 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_prolvq128_mask ((__v2di) __A,
- (__v2di) __B,
- (__v2di) __W,
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectq_128(__U,
+ (__v2di)_mm_rolv_epi64(__A, __B),
+ (__v2di)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_rolv_epi64 (__mmask8 __U, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_prolvq128_mask ((__v2di) __A,
- (__v2di) __B,
- (__v2di)
- _mm_setzero_di (),
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectq_128(__U,
+ (__v2di)_mm_rolv_epi64(__A, __B),
+ (__v2di)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_rolv_epi64 (__m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_prolvq256_mask ((__v4di) __A,
- (__v4di) __B,
- (__v4di)
- _mm256_setzero_si256 (),
- (__mmask8) -1);
+ return (__m256i)__builtin_ia32_prolvq256((__v4di)__A, (__v4di)__B);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_rolv_epi64 (__m256i __W, __mmask8 __U, __m256i __A,
- __m256i __B)
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_mask_rolv_epi64 (__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_prolvq256_mask ((__v4di) __A,
- (__v4di) __B,
- (__v4di) __W,
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectq_256(__U,
+ (__v4di)_mm256_rolv_epi64(__A, __B),
+ (__v4di)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_rolv_epi64 (__mmask8 __U, __m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_prolvq256_mask ((__v4di) __A,
- (__v4di) __B,
- (__v4di)
- _mm256_setzero_si256 (),
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectq_256(__U,
+ (__v4di)_mm256_rolv_epi64(__A, __B),
+ (__v4di)_mm256_setzero_si256());
}
-#define _mm_ror_epi32(A, B) __extension__ ({ \
- (__m128i)__builtin_ia32_prord128_mask((__v4si)(__m128i)(A), (int)(B), \
- (__v4si)_mm_setzero_si128(), \
- (__mmask8)-1); })
+#define _mm_ror_epi32(a, b) \
+ (__m128i)__builtin_ia32_prord128((__v4si)(__m128i)(a), (int)(b))
-#define _mm_mask_ror_epi32(W, U, A, B) __extension__ ({ \
- (__m128i)__builtin_ia32_prord128_mask((__v4si)(__m128i)(A), (int)(B), \
- (__v4si)(__m128i)(W), (__mmask8)(U)); })
+#define _mm_mask_ror_epi32(w, u, a, b) \
+ (__m128i)__builtin_ia32_selectd_128((__mmask8)(u), \
+ (__v4si)_mm_ror_epi32((a), (b)), \
+ (__v4si)(__m128i)(w))
-#define _mm_maskz_ror_epi32(U, A, B) __extension__ ({ \
- (__m128i)__builtin_ia32_prord128_mask((__v4si)(__m128i)(A), (int)(B), \
- (__v4si)_mm_setzero_si128(), \
- (__mmask8)(U)); })
+#define _mm_maskz_ror_epi32(u, a, b) \
+ (__m128i)__builtin_ia32_selectd_128((__mmask8)(u), \
+ (__v4si)_mm_ror_epi32((a), (b)), \
+ (__v4si)_mm_setzero_si128())
-#define _mm256_ror_epi32(A, B) __extension__ ({ \
- (__m256i)__builtin_ia32_prord256_mask((__v8si)(__m256i)(A), (int)(B), \
- (__v8si)_mm256_setzero_si256(), \
- (__mmask8)-1); })
+#define _mm256_ror_epi32(a, b) \
+ (__m256i)__builtin_ia32_prord256((__v8si)(__m256i)(a), (int)(b))
-#define _mm256_mask_ror_epi32(W, U, A, B) __extension__ ({ \
- (__m256i)__builtin_ia32_prord256_mask((__v8si)(__m256i)(A), (int)(B), \
- (__v8si)(__m256i)(W), (__mmask8)(U)); })
+#define _mm256_mask_ror_epi32(w, u, a, b) \
+ (__m256i)__builtin_ia32_selectd_256((__mmask8)(u), \
+ (__v8si)_mm256_ror_epi32((a), (b)), \
+ (__v8si)(__m256i)(w))
-#define _mm256_maskz_ror_epi32(U, A, B) __extension__ ({ \
- (__m256i)__builtin_ia32_prord256_mask((__v8si)(__m256i)(A), (int)(B), \
- (__v8si)_mm256_setzero_si256(), \
- (__mmask8)(U)); })
+#define _mm256_maskz_ror_epi32(u, a, b) \
+ (__m256i)__builtin_ia32_selectd_256((__mmask8)(u), \
+ (__v8si)_mm256_ror_epi32((a), (b)), \
+ (__v8si)_mm256_setzero_si256())
-#define _mm_ror_epi64(A, B) __extension__ ({ \
- (__m128i)__builtin_ia32_prorq128_mask((__v2di)(__m128i)(A), (int)(B), \
- (__v2di)_mm_setzero_di(), \
- (__mmask8)-1); })
+#define _mm_ror_epi64(a, b) \
+ (__m128i)__builtin_ia32_prorq128((__v2di)(__m128i)(a), (int)(b))
-#define _mm_mask_ror_epi64(W, U, A, B) __extension__ ({ \
- (__m128i)__builtin_ia32_prorq128_mask((__v2di)(__m128i)(A), (int)(B), \
- (__v2di)(__m128i)(W), (__mmask8)(U)); })
+#define _mm_mask_ror_epi64(w, u, a, b) \
+ (__m128i)__builtin_ia32_selectq_128((__mmask8)(u), \
+ (__v2di)_mm_ror_epi64((a), (b)), \
+ (__v2di)(__m128i)(w))
-#define _mm_maskz_ror_epi64(U, A, B) __extension__ ({ \
- (__m128i)__builtin_ia32_prorq128_mask((__v2di)(__m128i)(A), (int)(B), \
- (__v2di)_mm_setzero_di(), \
- (__mmask8)(U)); })
+#define _mm_maskz_ror_epi64(u, a, b) \
+ (__m128i)__builtin_ia32_selectq_128((__mmask8)(u), \
+ (__v2di)_mm_ror_epi64((a), (b)), \
+ (__v2di)_mm_setzero_si128())
-#define _mm256_ror_epi64(A, B) __extension__ ({ \
- (__m256i)__builtin_ia32_prorq256_mask((__v4di)(__m256i)(A), (int)(B), \
- (__v4di)_mm256_setzero_si256(), \
- (__mmask8)-1); })
+#define _mm256_ror_epi64(a, b) \
+ (__m256i)__builtin_ia32_prorq256((__v4di)(__m256i)(a), (int)(b))
-#define _mm256_mask_ror_epi64(W, U, A, B) __extension__ ({ \
- (__m256i)__builtin_ia32_prorq256_mask((__v4di)(__m256i)(A), (int)(B), \
- (__v4di)(__m256i)(W), (__mmask8)(U)); })
+#define _mm256_mask_ror_epi64(w, u, a, b) \
+ (__m256i)__builtin_ia32_selectq_256((__mmask8)(u), \
+ (__v4di)_mm256_ror_epi64((a), (b)), \
+ (__v4di)(__m256i)(w))
-#define _mm256_maskz_ror_epi64(U, A, B) __extension__ ({ \
- (__m256i)__builtin_ia32_prorq256_mask((__v4di)(__m256i)(A), (int)(B), \
- (__v4di)_mm256_setzero_si256(), \
- (__mmask8)(U)); })
+#define _mm256_maskz_ror_epi64(u, a, b) \
+ (__m256i)__builtin_ia32_selectq_256((__mmask8)(u), \
+ (__v4di)_mm256_ror_epi64((a), (b)), \
+ (__v4di)_mm256_setzero_si256())
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_sll_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
@@ -4569,7 +4416,7 @@ _mm_mask_sll_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
(__v4si)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_sll_epi32(__mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
@@ -4577,7 +4424,7 @@ _mm_maskz_sll_epi32(__mmask8 __U, __m128i __A, __m128i __B)
(__v4si)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_sll_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B)
{
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
@@ -4585,7 +4432,7 @@ _mm256_mask_sll_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B)
(__v8si)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_sll_epi32(__mmask8 __U, __m256i __A, __m128i __B)
{
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
@@ -4593,7 +4440,7 @@ _mm256_maskz_sll_epi32(__mmask8 __U, __m256i __A, __m128i __B)
(__v8si)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_slli_epi32(__m128i __W, __mmask8 __U, __m128i __A, int __B)
{
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
@@ -4601,7 +4448,7 @@ _mm_mask_slli_epi32(__m128i __W, __mmask8 __U, __m128i __A, int __B)
(__v4si)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_slli_epi32(__mmask8 __U, __m128i __A, int __B)
{
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
@@ -4609,7 +4456,7 @@ _mm_maskz_slli_epi32(__mmask8 __U, __m128i __A, int __B)
(__v4si)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_slli_epi32(__m256i __W, __mmask8 __U, __m256i __A, int __B)
{
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
@@ -4617,7 +4464,7 @@ _mm256_mask_slli_epi32(__m256i __W, __mmask8 __U, __m256i __A, int __B)
(__v8si)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_slli_epi32(__mmask8 __U, __m256i __A, int __B)
{
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
@@ -4625,7 +4472,7 @@ _mm256_maskz_slli_epi32(__mmask8 __U, __m256i __A, int __B)
(__v8si)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_sll_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
@@ -4633,15 +4480,15 @@ _mm_mask_sll_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
(__v2di)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_sll_epi64(__mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
(__v2di)_mm_sll_epi64(__A, __B),
- (__v2di)_mm_setzero_di());
+ (__v2di)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_sll_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B)
{
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
@@ -4649,7 +4496,7 @@ _mm256_mask_sll_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B)
(__v4di)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_sll_epi64(__mmask8 __U, __m256i __A, __m128i __B)
{
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
@@ -4657,7 +4504,7 @@ _mm256_maskz_sll_epi64(__mmask8 __U, __m256i __A, __m128i __B)
(__v4di)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_slli_epi64(__m128i __W, __mmask8 __U, __m128i __A, int __B)
{
return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
@@ -4665,15 +4512,15 @@ _mm_mask_slli_epi64(__m128i __W, __mmask8 __U, __m128i __A, int __B)
(__v2di)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_slli_epi64(__mmask8 __U, __m128i __A, int __B)
{
return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
(__v2di)_mm_slli_epi64(__A, __B),
- (__v2di)_mm_setzero_di());
+ (__v2di)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_slli_epi64(__m256i __W, __mmask8 __U, __m256i __A, int __B)
{
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
@@ -4681,7 +4528,7 @@ _mm256_mask_slli_epi64(__m256i __W, __mmask8 __U, __m256i __A, int __B)
(__v4di)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_slli_epi64(__mmask8 __U, __m256i __A, int __B)
{
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
@@ -4689,127 +4536,95 @@ _mm256_maskz_slli_epi64(__mmask8 __U, __m256i __A, int __B)
(__v4di)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_rorv_epi32 (__m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_prorvd128_mask ((__v4si) __A,
- (__v4si) __B,
- (__v4si)
- _mm_setzero_si128 (),
- (__mmask8) -1);
+ return (__m128i)__builtin_ia32_prorvd128((__v4si)__A, (__v4si)__B);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_rorv_epi32 (__m128i __W, __mmask8 __U, __m128i __A,
- __m128i __B)
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_mask_rorv_epi32 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_prorvd128_mask ((__v4si) __A,
- (__v4si) __B,
- (__v4si) __W,
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectd_128(__U,
+ (__v4si)_mm_rorv_epi32(__A, __B),
+ (__v4si)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_rorv_epi32 (__mmask8 __U, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_prorvd128_mask ((__v4si) __A,
- (__v4si) __B,
- (__v4si)
- _mm_setzero_si128 (),
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectd_128(__U,
+ (__v4si)_mm_rorv_epi32(__A, __B),
+ (__v4si)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_rorv_epi32 (__m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_prorvd256_mask ((__v8si) __A,
- (__v8si) __B,
- (__v8si)
- _mm256_setzero_si256 (),
- (__mmask8) -1);
+ return (__m256i)__builtin_ia32_prorvd256((__v8si)__A, (__v8si)__B);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_rorv_epi32 (__m256i __W, __mmask8 __U, __m256i __A,
- __m256i __B)
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_mask_rorv_epi32 (__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_prorvd256_mask ((__v8si) __A,
- (__v8si) __B,
- (__v8si) __W,
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectd_256(__U,
+ (__v8si)_mm256_rorv_epi32(__A, __B),
+ (__v8si)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_rorv_epi32 (__mmask8 __U, __m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_prorvd256_mask ((__v8si) __A,
- (__v8si) __B,
- (__v8si)
- _mm256_setzero_si256 (),
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectd_256(__U,
+ (__v8si)_mm256_rorv_epi32(__A, __B),
+ (__v8si)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_rorv_epi64 (__m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_prorvq128_mask ((__v2di) __A,
- (__v2di) __B,
- (__v2di)
- _mm_setzero_di (),
- (__mmask8) -1);
+ return (__m128i)__builtin_ia32_prorvq128((__v2di)__A, (__v2di)__B);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_rorv_epi64 (__m128i __W, __mmask8 __U, __m128i __A,
- __m128i __B)
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_mask_rorv_epi64 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_prorvq128_mask ((__v2di) __A,
- (__v2di) __B,
- (__v2di) __W,
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectq_128(__U,
+ (__v2di)_mm_rorv_epi64(__A, __B),
+ (__v2di)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_rorv_epi64 (__mmask8 __U, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_prorvq128_mask ((__v2di) __A,
- (__v2di) __B,
- (__v2di)
- _mm_setzero_di (),
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectq_128(__U,
+ (__v2di)_mm_rorv_epi64(__A, __B),
+ (__v2di)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_rorv_epi64 (__m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_prorvq256_mask ((__v4di) __A,
- (__v4di) __B,
- (__v4di)
- _mm256_setzero_si256 (),
- (__mmask8) -1);
+ return (__m256i)__builtin_ia32_prorvq256((__v4di)__A, (__v4di)__B);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_rorv_epi64 (__m256i __W, __mmask8 __U, __m256i __A,
- __m256i __B)
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_mask_rorv_epi64 (__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_prorvq256_mask ((__v4di) __A,
- (__v4di) __B,
- (__v4di) __W,
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectq_256(__U,
+ (__v4di)_mm256_rorv_epi64(__A, __B),
+ (__v4di)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_rorv_epi64 (__mmask8 __U, __m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_prorvq256_mask ((__v4di) __A,
- (__v4di) __B,
- (__v4di)
- _mm256_setzero_si256 (),
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectq_256(__U,
+ (__v4di)_mm256_rorv_epi64(__A, __B),
+ (__v4di)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_sllv_epi64(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y)
{
return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
@@ -4817,15 +4632,15 @@ _mm_mask_sllv_epi64(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y)
(__v2di)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_sllv_epi64(__mmask8 __U, __m128i __X, __m128i __Y)
{
return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
(__v2di)_mm_sllv_epi64(__X, __Y),
- (__v2di)_mm_setzero_di());
+ (__v2di)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_sllv_epi64(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y)
{
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
@@ -4833,7 +4648,7 @@ _mm256_mask_sllv_epi64(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y)
(__v4di)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_sllv_epi64(__mmask8 __U, __m256i __X, __m256i __Y)
{
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
@@ -4841,7 +4656,7 @@ _mm256_maskz_sllv_epi64(__mmask8 __U, __m256i __X, __m256i __Y)
(__v4di)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_sllv_epi32(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y)
{
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
@@ -4849,7 +4664,7 @@ _mm_mask_sllv_epi32(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y)
(__v4si)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_sllv_epi32(__mmask8 __U, __m128i __X, __m128i __Y)
{
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
@@ -4857,7 +4672,7 @@ _mm_maskz_sllv_epi32(__mmask8 __U, __m128i __X, __m128i __Y)
(__v4si)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_sllv_epi32(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y)
{
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
@@ -4865,7 +4680,7 @@ _mm256_mask_sllv_epi32(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y)
(__v8si)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_sllv_epi32(__mmask8 __U, __m256i __X, __m256i __Y)
{
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
@@ -4873,7 +4688,7 @@ _mm256_maskz_sllv_epi32(__mmask8 __U, __m256i __X, __m256i __Y)
(__v8si)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_srlv_epi64(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y)
{
return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
@@ -4881,15 +4696,15 @@ _mm_mask_srlv_epi64(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y)
(__v2di)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_srlv_epi64(__mmask8 __U, __m128i __X, __m128i __Y)
{
return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
(__v2di)_mm_srlv_epi64(__X, __Y),
- (__v2di)_mm_setzero_di());
+ (__v2di)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_srlv_epi64(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y)
{
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
@@ -4897,7 +4712,7 @@ _mm256_mask_srlv_epi64(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y)
(__v4di)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_srlv_epi64(__mmask8 __U, __m256i __X, __m256i __Y)
{
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
@@ -4905,7 +4720,7 @@ _mm256_maskz_srlv_epi64(__mmask8 __U, __m256i __X, __m256i __Y)
(__v4di)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_srlv_epi32(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y)
{
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
@@ -4913,7 +4728,7 @@ _mm_mask_srlv_epi32(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y)
(__v4si)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_srlv_epi32(__mmask8 __U, __m128i __X, __m128i __Y)
{
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
@@ -4921,7 +4736,7 @@ _mm_maskz_srlv_epi32(__mmask8 __U, __m128i __X, __m128i __Y)
(__v4si)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_srlv_epi32(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y)
{
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
@@ -4929,7 +4744,7 @@ _mm256_mask_srlv_epi32(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y)
(__v8si)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_srlv_epi32(__mmask8 __U, __m256i __X, __m256i __Y)
{
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
@@ -4937,7 +4752,7 @@ _mm256_maskz_srlv_epi32(__mmask8 __U, __m256i __X, __m256i __Y)
(__v8si)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_srl_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
@@ -4945,7 +4760,7 @@ _mm_mask_srl_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
(__v4si)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_srl_epi32(__mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
@@ -4953,7 +4768,7 @@ _mm_maskz_srl_epi32(__mmask8 __U, __m128i __A, __m128i __B)
(__v4si)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_srl_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B)
{
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
@@ -4961,7 +4776,7 @@ _mm256_mask_srl_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B)
(__v8si)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_srl_epi32(__mmask8 __U, __m256i __A, __m128i __B)
{
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
@@ -4969,7 +4784,7 @@ _mm256_maskz_srl_epi32(__mmask8 __U, __m256i __A, __m128i __B)
(__v8si)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_srli_epi32(__m128i __W, __mmask8 __U, __m128i __A, int __B)
{
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
@@ -4977,7 +4792,7 @@ _mm_mask_srli_epi32(__m128i __W, __mmask8 __U, __m128i __A, int __B)
(__v4si)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_srli_epi32(__mmask8 __U, __m128i __A, int __B)
{
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
@@ -4985,7 +4800,7 @@ _mm_maskz_srli_epi32(__mmask8 __U, __m128i __A, int __B)
(__v4si)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_srli_epi32(__m256i __W, __mmask8 __U, __m256i __A, int __B)
{
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
@@ -4993,7 +4808,7 @@ _mm256_mask_srli_epi32(__m256i __W, __mmask8 __U, __m256i __A, int __B)
(__v8si)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_srli_epi32(__mmask8 __U, __m256i __A, int __B)
{
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
@@ -5001,7 +4816,7 @@ _mm256_maskz_srli_epi32(__mmask8 __U, __m256i __A, int __B)
(__v8si)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_srl_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
@@ -5009,15 +4824,15 @@ _mm_mask_srl_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
(__v2di)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_srl_epi64(__mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
(__v2di)_mm_srl_epi64(__A, __B),
- (__v2di)_mm_setzero_di());
+ (__v2di)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_srl_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B)
{
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
@@ -5025,7 +4840,7 @@ _mm256_mask_srl_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B)
(__v4di)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_srl_epi64(__mmask8 __U, __m256i __A, __m128i __B)
{
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
@@ -5033,7 +4848,7 @@ _mm256_maskz_srl_epi64(__mmask8 __U, __m256i __A, __m128i __B)
(__v4di)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_srli_epi64(__m128i __W, __mmask8 __U, __m128i __A, int __B)
{
return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
@@ -5041,15 +4856,15 @@ _mm_mask_srli_epi64(__m128i __W, __mmask8 __U, __m128i __A, int __B)
(__v2di)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_srli_epi64(__mmask8 __U, __m128i __A, int __B)
{
return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
(__v2di)_mm_srli_epi64(__A, __B),
- (__v2di)_mm_setzero_di());
+ (__v2di)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_srli_epi64(__m256i __W, __mmask8 __U, __m256i __A, int __B)
{
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
@@ -5057,7 +4872,7 @@ _mm256_mask_srli_epi64(__m256i __W, __mmask8 __U, __m256i __A, int __B)
(__v4di)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_srli_epi64(__mmask8 __U, __m256i __A, int __B)
{
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
@@ -5065,7 +4880,7 @@ _mm256_maskz_srli_epi64(__mmask8 __U, __m256i __A, int __B)
(__v4di)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_srav_epi32(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y)
{
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
@@ -5073,7 +4888,7 @@ _mm_mask_srav_epi32(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y)
(__v4si)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_srav_epi32(__mmask8 __U, __m128i __X, __m128i __Y)
{
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
@@ -5081,7 +4896,7 @@ _mm_maskz_srav_epi32(__mmask8 __U, __m128i __X, __m128i __Y)
(__v4si)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_srav_epi32(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y)
{
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
@@ -5089,7 +4904,7 @@ _mm256_mask_srav_epi32(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y)
(__v8si)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_srav_epi32(__mmask8 __U, __m256i __X, __m256i __Y)
{
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
@@ -5097,13 +4912,13 @@ _mm256_maskz_srav_epi32(__mmask8 __U, __m256i __X, __m256i __Y)
(__v8si)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_srav_epi64(__m128i __X, __m128i __Y)
{
return (__m128i)__builtin_ia32_psravq128((__v2di)__X, (__v2di)__Y);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_srav_epi64(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y)
{
return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
@@ -5111,21 +4926,21 @@ _mm_mask_srav_epi64(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y)
(__v2di)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_srav_epi64(__mmask8 __U, __m128i __X, __m128i __Y)
{
return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
(__v2di)_mm_srav_epi64(__X, __Y),
- (__v2di)_mm_setzero_di());
+ (__v2di)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_srav_epi64(__m256i __X, __m256i __Y)
{
return (__m256i)__builtin_ia32_psravq256((__v4di)__X, (__v4di) __Y);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_srav_epi64(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y)
{
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
@@ -5133,7 +4948,7 @@ _mm256_mask_srav_epi64(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y)
(__v4di)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_srav_epi64 (__mmask8 __U, __m256i __X, __m256i __Y)
{
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
@@ -5141,7 +4956,7 @@ _mm256_maskz_srav_epi64 (__mmask8 __U, __m256i __X, __m256i __Y)
(__v4di)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_mov_epi32 (__m128i __W, __mmask8 __U, __m128i __A)
{
return (__m128i) __builtin_ia32_selectd_128 ((__mmask8) __U,
@@ -5149,7 +4964,7 @@ _mm_mask_mov_epi32 (__m128i __W, __mmask8 __U, __m128i __A)
(__v4si) __W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_mov_epi32 (__mmask8 __U, __m128i __A)
{
return (__m128i) __builtin_ia32_selectd_128 ((__mmask8) __U,
@@ -5158,7 +4973,7 @@ _mm_maskz_mov_epi32 (__mmask8 __U, __m128i __A)
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_mov_epi32 (__m256i __W, __mmask8 __U, __m256i __A)
{
return (__m256i) __builtin_ia32_selectd_256 ((__mmask8) __U,
@@ -5166,7 +4981,7 @@ _mm256_mask_mov_epi32 (__m256i __W, __mmask8 __U, __m256i __A)
(__v8si) __W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_mov_epi32 (__mmask8 __U, __m256i __A)
{
return (__m256i) __builtin_ia32_selectd_256 ((__mmask8) __U,
@@ -5174,7 +4989,7 @@ _mm256_maskz_mov_epi32 (__mmask8 __U, __m256i __A)
(__v8si) _mm256_setzero_si256 ());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_load_epi32 (__m128i __W, __mmask8 __U, void const *__P)
{
return (__m128i) __builtin_ia32_movdqa32load128_mask ((__v4si *) __P,
@@ -5183,7 +4998,7 @@ _mm_mask_load_epi32 (__m128i __W, __mmask8 __U, void const *__P)
__U);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_load_epi32 (__mmask8 __U, void const *__P)
{
return (__m128i) __builtin_ia32_movdqa32load128_mask ((__v4si *) __P,
@@ -5193,7 +5008,7 @@ _mm_maskz_load_epi32 (__mmask8 __U, void const *__P)
__U);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_load_epi32 (__m256i __W, __mmask8 __U, void const *__P)
{
return (__m256i) __builtin_ia32_movdqa32load256_mask ((__v8si *) __P,
@@ -5202,7 +5017,7 @@ _mm256_mask_load_epi32 (__m256i __W, __mmask8 __U, void const *__P)
__U);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_load_epi32 (__mmask8 __U, void const *__P)
{
return (__m256i) __builtin_ia32_movdqa32load256_mask ((__v8si *) __P,
@@ -5212,7 +5027,7 @@ _mm256_maskz_load_epi32 (__mmask8 __U, void const *__P)
__U);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS128
_mm_mask_store_epi32 (void *__P, __mmask8 __U, __m128i __A)
{
__builtin_ia32_movdqa32store128_mask ((__v4si *) __P,
@@ -5220,7 +5035,7 @@ _mm_mask_store_epi32 (void *__P, __mmask8 __U, __m128i __A)
(__mmask8) __U);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS256
_mm256_mask_store_epi32 (void *__P, __mmask8 __U, __m256i __A)
{
__builtin_ia32_movdqa32store256_mask ((__v8si *) __P,
@@ -5228,7 +5043,7 @@ _mm256_mask_store_epi32 (void *__P, __mmask8 __U, __m256i __A)
(__mmask8) __U);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_mov_epi64 (__m128i __W, __mmask8 __U, __m128i __A)
{
return (__m128i) __builtin_ia32_selectq_128 ((__mmask8) __U,
@@ -5236,15 +5051,15 @@ _mm_mask_mov_epi64 (__m128i __W, __mmask8 __U, __m128i __A)
(__v2di) __W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_mov_epi64 (__mmask8 __U, __m128i __A)
{
return (__m128i) __builtin_ia32_selectq_128 ((__mmask8) __U,
(__v2di) __A,
- (__v2di) _mm_setzero_di ());
+ (__v2di) _mm_setzero_si128 ());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_mov_epi64 (__m256i __W, __mmask8 __U, __m256i __A)
{
return (__m256i) __builtin_ia32_selectq_256 ((__mmask8) __U,
@@ -5252,7 +5067,7 @@ _mm256_mask_mov_epi64 (__m256i __W, __mmask8 __U, __m256i __A)
(__v4di) __W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_mov_epi64 (__mmask8 __U, __m256i __A)
{
return (__m256i) __builtin_ia32_selectq_256 ((__mmask8) __U,
@@ -5260,7 +5075,7 @@ _mm256_maskz_mov_epi64 (__mmask8 __U, __m256i __A)
(__v4di) _mm256_setzero_si256 ());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_load_epi64 (__m128i __W, __mmask8 __U, void const *__P)
{
return (__m128i) __builtin_ia32_movdqa64load128_mask ((__v2di *) __P,
@@ -5269,17 +5084,17 @@ _mm_mask_load_epi64 (__m128i __W, __mmask8 __U, void const *__P)
__U);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_load_epi64 (__mmask8 __U, void const *__P)
{
return (__m128i) __builtin_ia32_movdqa64load128_mask ((__v2di *) __P,
(__v2di)
- _mm_setzero_di (),
+ _mm_setzero_si128 (),
(__mmask8)
__U);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_load_epi64 (__m256i __W, __mmask8 __U, void const *__P)
{
return (__m256i) __builtin_ia32_movdqa64load256_mask ((__v4di *) __P,
@@ -5288,7 +5103,7 @@ _mm256_mask_load_epi64 (__m256i __W, __mmask8 __U, void const *__P)
__U);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_load_epi64 (__mmask8 __U, void const *__P)
{
return (__m256i) __builtin_ia32_movdqa64load256_mask ((__v4di *) __P,
@@ -5298,7 +5113,7 @@ _mm256_maskz_load_epi64 (__mmask8 __U, void const *__P)
__U);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS128
_mm_mask_store_epi64 (void *__P, __mmask8 __U, __m128i __A)
{
__builtin_ia32_movdqa64store128_mask ((__v2di *) __P,
@@ -5306,7 +5121,7 @@ _mm_mask_store_epi64 (void *__P, __mmask8 __U, __m128i __A)
(__mmask8) __U);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS256
_mm256_mask_store_epi64 (void *__P, __mmask8 __U, __m256i __A)
{
__builtin_ia32_movdqa64store256_mask ((__v4di *) __P,
@@ -5314,7 +5129,7 @@ _mm256_mask_store_epi64 (void *__P, __mmask8 __U, __m256i __A)
(__mmask8) __U);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_movedup_pd (__m128d __W, __mmask8 __U, __m128d __A)
{
return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
@@ -5322,7 +5137,7 @@ _mm_mask_movedup_pd (__m128d __W, __mmask8 __U, __m128d __A)
(__v2df)__W);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_maskz_movedup_pd (__mmask8 __U, __m128d __A)
{
return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
@@ -5330,7 +5145,7 @@ _mm_maskz_movedup_pd (__mmask8 __U, __m128d __A)
(__v2df)_mm_setzero_pd());
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_mask_movedup_pd (__m256d __W, __mmask8 __U, __m256d __A)
{
return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
@@ -5338,7 +5153,7 @@ _mm256_mask_movedup_pd (__m256d __W, __mmask8 __U, __m256d __A)
(__v4df)__W);
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_maskz_movedup_pd (__mmask8 __U, __m256d __A)
{
return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
@@ -5346,7 +5161,7 @@ _mm256_maskz_movedup_pd (__mmask8 __U, __m256d __A)
(__v4df)_mm256_setzero_pd());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_set1_epi32(__m128i __O, __mmask8 __M, int __A)
{
return (__m128i)__builtin_ia32_selectd_128(__M,
@@ -5354,7 +5169,7 @@ _mm_mask_set1_epi32(__m128i __O, __mmask8 __M, int __A)
(__v4si)__O);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_set1_epi32( __mmask8 __M, int __A)
{
return (__m128i)__builtin_ia32_selectd_128(__M,
@@ -5362,7 +5177,7 @@ _mm_maskz_set1_epi32( __mmask8 __M, int __A)
(__v4si)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_set1_epi32(__m256i __O, __mmask8 __M, int __A)
{
return (__m256i)__builtin_ia32_selectd_256(__M,
@@ -5370,7 +5185,7 @@ _mm256_mask_set1_epi32(__m256i __O, __mmask8 __M, int __A)
(__v8si)__O);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_set1_epi32( __mmask8 __M, int __A)
{
return (__m256i)__builtin_ia32_selectd_256(__M,
@@ -5379,8 +5194,7 @@ _mm256_maskz_set1_epi32( __mmask8 __M, int __A)
}
-#ifdef __x86_64__
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_set1_epi64 (__m128i __O, __mmask8 __M, long long __A)
{
return (__m128i) __builtin_ia32_selectq_128(__M,
@@ -5388,7 +5202,7 @@ _mm_mask_set1_epi64 (__m128i __O, __mmask8 __M, long long __A)
(__v2di) __O);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_set1_epi64 (__mmask8 __M, long long __A)
{
return (__m128i) __builtin_ia32_selectq_128(__M,
@@ -5396,7 +5210,7 @@ _mm_maskz_set1_epi64 (__mmask8 __M, long long __A)
(__v2di) _mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_set1_epi64 (__m256i __O, __mmask8 __M, long long __A)
{
return (__m256i) __builtin_ia32_selectq_256(__M,
@@ -5404,89 +5218,87 @@ _mm256_mask_set1_epi64 (__m256i __O, __mmask8 __M, long long __A)
(__v4di) __O) ;
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_set1_epi64 (__mmask8 __M, long long __A)
{
return (__m256i) __builtin_ia32_selectq_256(__M,
(__v4di) _mm256_set1_epi64x(__A),
(__v4di) _mm256_setzero_si256());
}
-
-#endif
-#define _mm_fixupimm_pd(A, B, C, imm) __extension__ ({ \
+#define _mm_fixupimm_pd(A, B, C, imm) \
(__m128d)__builtin_ia32_fixupimmpd128_mask((__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), \
(__v2di)(__m128i)(C), (int)(imm), \
- (__mmask8)-1); })
+ (__mmask8)-1)
-#define _mm_mask_fixupimm_pd(A, U, B, C, imm) __extension__ ({ \
+#define _mm_mask_fixupimm_pd(A, U, B, C, imm) \
(__m128d)__builtin_ia32_fixupimmpd128_mask((__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), \
(__v2di)(__m128i)(C), (int)(imm), \
- (__mmask8)(U)); })
+ (__mmask8)(U))
-#define _mm_maskz_fixupimm_pd(U, A, B, C, imm) __extension__ ({ \
+#define _mm_maskz_fixupimm_pd(U, A, B, C, imm) \
(__m128d)__builtin_ia32_fixupimmpd128_maskz((__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), \
(__v2di)(__m128i)(C), \
- (int)(imm), (__mmask8)(U)); })
+ (int)(imm), (__mmask8)(U))
-#define _mm256_fixupimm_pd(A, B, C, imm) __extension__ ({ \
+#define _mm256_fixupimm_pd(A, B, C, imm) \
(__m256d)__builtin_ia32_fixupimmpd256_mask((__v4df)(__m256d)(A), \
(__v4df)(__m256d)(B), \
(__v4di)(__m256i)(C), (int)(imm), \
- (__mmask8)-1); })
+ (__mmask8)-1)
-#define _mm256_mask_fixupimm_pd(A, U, B, C, imm) __extension__ ({ \
+#define _mm256_mask_fixupimm_pd(A, U, B, C, imm) \
(__m256d)__builtin_ia32_fixupimmpd256_mask((__v4df)(__m256d)(A), \
(__v4df)(__m256d)(B), \
(__v4di)(__m256i)(C), (int)(imm), \
- (__mmask8)(U)); })
+ (__mmask8)(U))
-#define _mm256_maskz_fixupimm_pd(U, A, B, C, imm) __extension__ ({ \
+#define _mm256_maskz_fixupimm_pd(U, A, B, C, imm) \
(__m256d)__builtin_ia32_fixupimmpd256_maskz((__v4df)(__m256d)(A), \
(__v4df)(__m256d)(B), \
(__v4di)(__m256i)(C), \
- (int)(imm), (__mmask8)(U)); })
+ (int)(imm), (__mmask8)(U))
-#define _mm_fixupimm_ps(A, B, C, imm) __extension__ ({ \
+#define _mm_fixupimm_ps(A, B, C, imm) \
(__m128)__builtin_ia32_fixupimmps128_mask((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), \
(__v4si)(__m128i)(C), (int)(imm), \
- (__mmask8)-1); })
+ (__mmask8)-1)
-#define _mm_mask_fixupimm_ps(A, U, B, C, imm) __extension__ ({ \
+#define _mm_mask_fixupimm_ps(A, U, B, C, imm) \
(__m128)__builtin_ia32_fixupimmps128_mask((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), \
(__v4si)(__m128i)(C), (int)(imm), \
- (__mmask8)(U)); })
+ (__mmask8)(U))
-#define _mm_maskz_fixupimm_ps(U, A, B, C, imm) __extension__ ({ \
+#define _mm_maskz_fixupimm_ps(U, A, B, C, imm) \
(__m128)__builtin_ia32_fixupimmps128_maskz((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), \
(__v4si)(__m128i)(C), (int)(imm), \
- (__mmask8)(U)); })
+ (__mmask8)(U))
-#define _mm256_fixupimm_ps(A, B, C, imm) __extension__ ({ \
+#define _mm256_fixupimm_ps(A, B, C, imm) \
(__m256)__builtin_ia32_fixupimmps256_mask((__v8sf)(__m256)(A), \
(__v8sf)(__m256)(B), \
(__v8si)(__m256i)(C), (int)(imm), \
- (__mmask8)-1); })
+ (__mmask8)-1)
-#define _mm256_mask_fixupimm_ps(A, U, B, C, imm) __extension__ ({ \
+#define _mm256_mask_fixupimm_ps(A, U, B, C, imm) \
(__m256)__builtin_ia32_fixupimmps256_mask((__v8sf)(__m256)(A), \
(__v8sf)(__m256)(B), \
(__v8si)(__m256i)(C), (int)(imm), \
- (__mmask8)(U)); })
+ (__mmask8)(U))
-#define _mm256_maskz_fixupimm_ps(U, A, B, C, imm) __extension__ ({ \
+#define _mm256_maskz_fixupimm_ps(U, A, B, C, imm) \
(__m256)__builtin_ia32_fixupimmps256_maskz((__v8sf)(__m256)(A), \
(__v8sf)(__m256)(B), \
(__v8si)(__m256i)(C), (int)(imm), \
- (__mmask8)(U)); })
+ (__mmask8)(U))
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_load_pd (__m128d __W, __mmask8 __U, void const *__P)
{
return (__m128d) __builtin_ia32_loadapd128_mask ((__v2df *) __P,
@@ -5494,7 +5306,7 @@ _mm_mask_load_pd (__m128d __W, __mmask8 __U, void const *__P)
(__mmask8) __U);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_maskz_load_pd (__mmask8 __U, void const *__P)
{
return (__m128d) __builtin_ia32_loadapd128_mask ((__v2df *) __P,
@@ -5503,7 +5315,7 @@ _mm_maskz_load_pd (__mmask8 __U, void const *__P)
(__mmask8) __U);
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_mask_load_pd (__m256d __W, __mmask8 __U, void const *__P)
{
return (__m256d) __builtin_ia32_loadapd256_mask ((__v4df *) __P,
@@ -5511,7 +5323,7 @@ _mm256_mask_load_pd (__m256d __W, __mmask8 __U, void const *__P)
(__mmask8) __U);
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_maskz_load_pd (__mmask8 __U, void const *__P)
{
return (__m256d) __builtin_ia32_loadapd256_mask ((__v4df *) __P,
@@ -5520,7 +5332,7 @@ _mm256_maskz_load_pd (__mmask8 __U, void const *__P)
(__mmask8) __U);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_load_ps (__m128 __W, __mmask8 __U, void const *__P)
{
return (__m128) __builtin_ia32_loadaps128_mask ((__v4sf *) __P,
@@ -5528,7 +5340,7 @@ _mm_mask_load_ps (__m128 __W, __mmask8 __U, void const *__P)
(__mmask8) __U);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_maskz_load_ps (__mmask8 __U, void const *__P)
{
return (__m128) __builtin_ia32_loadaps128_mask ((__v4sf *) __P,
@@ -5537,7 +5349,7 @@ _mm_maskz_load_ps (__mmask8 __U, void const *__P)
(__mmask8) __U);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_mask_load_ps (__m256 __W, __mmask8 __U, void const *__P)
{
return (__m256) __builtin_ia32_loadaps256_mask ((__v8sf *) __P,
@@ -5545,7 +5357,7 @@ _mm256_mask_load_ps (__m256 __W, __mmask8 __U, void const *__P)
(__mmask8) __U);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_maskz_load_ps (__mmask8 __U, void const *__P)
{
return (__m256) __builtin_ia32_loadaps256_mask ((__v8sf *) __P,
@@ -5554,7 +5366,7 @@ _mm256_maskz_load_ps (__mmask8 __U, void const *__P)
(__mmask8) __U);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_loadu_epi64 (__m128i __W, __mmask8 __U, void const *__P)
{
return (__m128i) __builtin_ia32_loaddqudi128_mask ((__v2di *) __P,
@@ -5562,7 +5374,7 @@ _mm_mask_loadu_epi64 (__m128i __W, __mmask8 __U, void const *__P)
(__mmask8) __U);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_loadu_epi64 (__mmask8 __U, void const *__P)
{
return (__m128i) __builtin_ia32_loaddqudi128_mask ((__v2di *) __P,
@@ -5571,7 +5383,7 @@ _mm_maskz_loadu_epi64 (__mmask8 __U, void const *__P)
(__mmask8) __U);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_loadu_epi64 (__m256i __W, __mmask8 __U, void const *__P)
{
return (__m256i) __builtin_ia32_loaddqudi256_mask ((__v4di *) __P,
@@ -5579,7 +5391,7 @@ _mm256_mask_loadu_epi64 (__m256i __W, __mmask8 __U, void const *__P)
(__mmask8) __U);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_loadu_epi64 (__mmask8 __U, void const *__P)
{
return (__m256i) __builtin_ia32_loaddqudi256_mask ((__v4di *) __P,
@@ -5588,7 +5400,7 @@ _mm256_maskz_loadu_epi64 (__mmask8 __U, void const *__P)
(__mmask8) __U);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_loadu_epi32 (__m128i __W, __mmask8 __U, void const *__P)
{
return (__m128i) __builtin_ia32_loaddqusi128_mask ((__v4si *) __P,
@@ -5596,7 +5408,7 @@ _mm_mask_loadu_epi32 (__m128i __W, __mmask8 __U, void const *__P)
(__mmask8) __U);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_loadu_epi32 (__mmask8 __U, void const *__P)
{
return (__m128i) __builtin_ia32_loaddqusi128_mask ((__v4si *) __P,
@@ -5605,7 +5417,7 @@ _mm_maskz_loadu_epi32 (__mmask8 __U, void const *__P)
(__mmask8) __U);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_loadu_epi32 (__m256i __W, __mmask8 __U, void const *__P)
{
return (__m256i) __builtin_ia32_loaddqusi256_mask ((__v8si *) __P,
@@ -5613,7 +5425,7 @@ _mm256_mask_loadu_epi32 (__m256i __W, __mmask8 __U, void const *__P)
(__mmask8) __U);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_loadu_epi32 (__mmask8 __U, void const *__P)
{
return (__m256i) __builtin_ia32_loaddqusi256_mask ((__v8si *) __P,
@@ -5622,7 +5434,7 @@ _mm256_maskz_loadu_epi32 (__mmask8 __U, void const *__P)
(__mmask8) __U);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_loadu_pd (__m128d __W, __mmask8 __U, void const *__P)
{
return (__m128d) __builtin_ia32_loadupd128_mask ((__v2df *) __P,
@@ -5630,7 +5442,7 @@ _mm_mask_loadu_pd (__m128d __W, __mmask8 __U, void const *__P)
(__mmask8) __U);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_maskz_loadu_pd (__mmask8 __U, void const *__P)
{
return (__m128d) __builtin_ia32_loadupd128_mask ((__v2df *) __P,
@@ -5639,7 +5451,7 @@ _mm_maskz_loadu_pd (__mmask8 __U, void const *__P)
(__mmask8) __U);
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_mask_loadu_pd (__m256d __W, __mmask8 __U, void const *__P)
{
return (__m256d) __builtin_ia32_loadupd256_mask ((__v4df *) __P,
@@ -5647,7 +5459,7 @@ _mm256_mask_loadu_pd (__m256d __W, __mmask8 __U, void const *__P)
(__mmask8) __U);
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_maskz_loadu_pd (__mmask8 __U, void const *__P)
{
return (__m256d) __builtin_ia32_loadupd256_mask ((__v4df *) __P,
@@ -5656,7 +5468,7 @@ _mm256_maskz_loadu_pd (__mmask8 __U, void const *__P)
(__mmask8) __U);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_loadu_ps (__m128 __W, __mmask8 __U, void const *__P)
{
return (__m128) __builtin_ia32_loadups128_mask ((__v4sf *) __P,
@@ -5664,7 +5476,7 @@ _mm_mask_loadu_ps (__m128 __W, __mmask8 __U, void const *__P)
(__mmask8) __U);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_maskz_loadu_ps (__mmask8 __U, void const *__P)
{
return (__m128) __builtin_ia32_loadups128_mask ((__v4sf *) __P,
@@ -5673,7 +5485,7 @@ _mm_maskz_loadu_ps (__mmask8 __U, void const *__P)
(__mmask8) __U);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_mask_loadu_ps (__m256 __W, __mmask8 __U, void const *__P)
{
return (__m256) __builtin_ia32_loadups256_mask ((__v8sf *) __P,
@@ -5681,7 +5493,7 @@ _mm256_mask_loadu_ps (__m256 __W, __mmask8 __U, void const *__P)
(__mmask8) __U);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_maskz_loadu_ps (__mmask8 __U, void const *__P)
{
return (__m256) __builtin_ia32_loadups256_mask ((__v8sf *) __P,
@@ -5690,7 +5502,7 @@ _mm256_maskz_loadu_ps (__mmask8 __U, void const *__P)
(__mmask8) __U);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS128
_mm_mask_store_pd (void *__P, __mmask8 __U, __m128d __A)
{
__builtin_ia32_storeapd128_mask ((__v2df *) __P,
@@ -5698,7 +5510,7 @@ _mm_mask_store_pd (void *__P, __mmask8 __U, __m128d __A)
(__mmask8) __U);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS256
_mm256_mask_store_pd (void *__P, __mmask8 __U, __m256d __A)
{
__builtin_ia32_storeapd256_mask ((__v4df *) __P,
@@ -5706,7 +5518,7 @@ _mm256_mask_store_pd (void *__P, __mmask8 __U, __m256d __A)
(__mmask8) __U);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS128
_mm_mask_store_ps (void *__P, __mmask8 __U, __m128 __A)
{
__builtin_ia32_storeaps128_mask ((__v4sf *) __P,
@@ -5714,7 +5526,7 @@ _mm_mask_store_ps (void *__P, __mmask8 __U, __m128 __A)
(__mmask8) __U);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS256
_mm256_mask_store_ps (void *__P, __mmask8 __U, __m256 __A)
{
__builtin_ia32_storeaps256_mask ((__v8sf *) __P,
@@ -5722,7 +5534,7 @@ _mm256_mask_store_ps (void *__P, __mmask8 __U, __m256 __A)
(__mmask8) __U);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS128
_mm_mask_storeu_epi64 (void *__P, __mmask8 __U, __m128i __A)
{
__builtin_ia32_storedqudi128_mask ((__v2di *) __P,
@@ -5730,7 +5542,7 @@ _mm_mask_storeu_epi64 (void *__P, __mmask8 __U, __m128i __A)
(__mmask8) __U);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS256
_mm256_mask_storeu_epi64 (void *__P, __mmask8 __U, __m256i __A)
{
__builtin_ia32_storedqudi256_mask ((__v4di *) __P,
@@ -5738,7 +5550,7 @@ _mm256_mask_storeu_epi64 (void *__P, __mmask8 __U, __m256i __A)
(__mmask8) __U);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS128
_mm_mask_storeu_epi32 (void *__P, __mmask8 __U, __m128i __A)
{
__builtin_ia32_storedqusi128_mask ((__v4si *) __P,
@@ -5746,7 +5558,7 @@ _mm_mask_storeu_epi32 (void *__P, __mmask8 __U, __m128i __A)
(__mmask8) __U);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS256
_mm256_mask_storeu_epi32 (void *__P, __mmask8 __U, __m256i __A)
{
__builtin_ia32_storedqusi256_mask ((__v8si *) __P,
@@ -5754,7 +5566,7 @@ _mm256_mask_storeu_epi32 (void *__P, __mmask8 __U, __m256i __A)
(__mmask8) __U);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS128
_mm_mask_storeu_pd (void *__P, __mmask8 __U, __m128d __A)
{
__builtin_ia32_storeupd128_mask ((__v2df *) __P,
@@ -5762,7 +5574,7 @@ _mm_mask_storeu_pd (void *__P, __mmask8 __U, __m128d __A)
(__mmask8) __U);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS256
_mm256_mask_storeu_pd (void *__P, __mmask8 __U, __m256d __A)
{
__builtin_ia32_storeupd256_mask ((__v4df *) __P,
@@ -5770,7 +5582,7 @@ _mm256_mask_storeu_pd (void *__P, __mmask8 __U, __m256d __A)
(__mmask8) __U);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS128
_mm_mask_storeu_ps (void *__P, __mmask8 __U, __m128 __A)
{
__builtin_ia32_storeups128_mask ((__v4sf *) __P,
@@ -5778,7 +5590,7 @@ _mm_mask_storeu_ps (void *__P, __mmask8 __U, __m128 __A)
(__mmask8) __U);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS256
_mm256_mask_storeu_ps (void *__P, __mmask8 __U, __m256 __A)
{
__builtin_ia32_storeups256_mask ((__v8sf *) __P,
@@ -5787,7 +5599,7 @@ _mm256_mask_storeu_ps (void *__P, __mmask8 __U, __m256 __A)
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_unpackhi_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
{
return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
@@ -5795,7 +5607,7 @@ _mm_mask_unpackhi_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
(__v2df)__W);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_maskz_unpackhi_pd(__mmask8 __U, __m128d __A, __m128d __B)
{
return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
@@ -5803,7 +5615,7 @@ _mm_maskz_unpackhi_pd(__mmask8 __U, __m128d __A, __m128d __B)
(__v2df)_mm_setzero_pd());
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_mask_unpackhi_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B)
{
return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
@@ -5811,7 +5623,7 @@ _mm256_mask_unpackhi_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B)
(__v4df)__W);
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_maskz_unpackhi_pd(__mmask8 __U, __m256d __A, __m256d __B)
{
return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
@@ -5819,7 +5631,7 @@ _mm256_maskz_unpackhi_pd(__mmask8 __U, __m256d __A, __m256d __B)
(__v4df)_mm256_setzero_pd());
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_unpackhi_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
{
return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
@@ -5827,7 +5639,7 @@ _mm_mask_unpackhi_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
(__v4sf)__W);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_maskz_unpackhi_ps(__mmask8 __U, __m128 __A, __m128 __B)
{
return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
@@ -5835,7 +5647,7 @@ _mm_maskz_unpackhi_ps(__mmask8 __U, __m128 __A, __m128 __B)
(__v4sf)_mm_setzero_ps());
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_mask_unpackhi_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B)
{
return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
@@ -5843,7 +5655,7 @@ _mm256_mask_unpackhi_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B)
(__v8sf)__W);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_maskz_unpackhi_ps(__mmask8 __U, __m256 __A, __m256 __B)
{
return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
@@ -5851,7 +5663,7 @@ _mm256_maskz_unpackhi_ps(__mmask8 __U, __m256 __A, __m256 __B)
(__v8sf)_mm256_setzero_ps());
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_unpacklo_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
{
return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
@@ -5859,7 +5671,7 @@ _mm_mask_unpacklo_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
(__v2df)__W);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_maskz_unpacklo_pd(__mmask8 __U, __m128d __A, __m128d __B)
{
return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
@@ -5867,7 +5679,7 @@ _mm_maskz_unpacklo_pd(__mmask8 __U, __m128d __A, __m128d __B)
(__v2df)_mm_setzero_pd());
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_mask_unpacklo_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B)
{
return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
@@ -5875,7 +5687,7 @@ _mm256_mask_unpacklo_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B)
(__v4df)__W);
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_maskz_unpacklo_pd(__mmask8 __U, __m256d __A, __m256d __B)
{
return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
@@ -5883,7 +5695,7 @@ _mm256_maskz_unpacklo_pd(__mmask8 __U, __m256d __A, __m256d __B)
(__v4df)_mm256_setzero_pd());
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_unpacklo_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
{
return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
@@ -5891,7 +5703,7 @@ _mm_mask_unpacklo_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
(__v4sf)__W);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_maskz_unpacklo_ps(__mmask8 __U, __m128 __A, __m128 __B)
{
return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
@@ -5899,7 +5711,7 @@ _mm_maskz_unpacklo_ps(__mmask8 __U, __m128 __A, __m128 __B)
(__v4sf)_mm_setzero_ps());
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_mask_unpacklo_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B)
{
return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
@@ -5907,7 +5719,7 @@ _mm256_mask_unpacklo_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B)
(__v8sf)__W);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_maskz_unpacklo_ps(__mmask8 __U, __m256 __A, __m256 __B)
{
return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
@@ -5915,7 +5727,7 @@ _mm256_maskz_unpacklo_ps(__mmask8 __U, __m256 __A, __m256 __B)
(__v8sf)_mm256_setzero_ps());
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_rcp14_pd (__m128d __A)
{
return (__m128d) __builtin_ia32_rcp14pd128_mask ((__v2df) __A,
@@ -5924,7 +5736,7 @@ _mm_rcp14_pd (__m128d __A)
(__mmask8) -1);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_rcp14_pd (__m128d __W, __mmask8 __U, __m128d __A)
{
return (__m128d) __builtin_ia32_rcp14pd128_mask ((__v2df) __A,
@@ -5932,7 +5744,7 @@ _mm_mask_rcp14_pd (__m128d __W, __mmask8 __U, __m128d __A)
(__mmask8) __U);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_maskz_rcp14_pd (__mmask8 __U, __m128d __A)
{
return (__m128d) __builtin_ia32_rcp14pd128_mask ((__v2df) __A,
@@ -5941,7 +5753,7 @@ _mm_maskz_rcp14_pd (__mmask8 __U, __m128d __A)
(__mmask8) __U);
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_rcp14_pd (__m256d __A)
{
return (__m256d) __builtin_ia32_rcp14pd256_mask ((__v4df) __A,
@@ -5950,7 +5762,7 @@ _mm256_rcp14_pd (__m256d __A)
(__mmask8) -1);
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_mask_rcp14_pd (__m256d __W, __mmask8 __U, __m256d __A)
{
return (__m256d) __builtin_ia32_rcp14pd256_mask ((__v4df) __A,
@@ -5958,7 +5770,7 @@ _mm256_mask_rcp14_pd (__m256d __W, __mmask8 __U, __m256d __A)
(__mmask8) __U);
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_maskz_rcp14_pd (__mmask8 __U, __m256d __A)
{
return (__m256d) __builtin_ia32_rcp14pd256_mask ((__v4df) __A,
@@ -5967,7 +5779,7 @@ _mm256_maskz_rcp14_pd (__mmask8 __U, __m256d __A)
(__mmask8) __U);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_rcp14_ps (__m128 __A)
{
return (__m128) __builtin_ia32_rcp14ps128_mask ((__v4sf) __A,
@@ -5976,7 +5788,7 @@ _mm_rcp14_ps (__m128 __A)
(__mmask8) -1);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_rcp14_ps (__m128 __W, __mmask8 __U, __m128 __A)
{
return (__m128) __builtin_ia32_rcp14ps128_mask ((__v4sf) __A,
@@ -5984,7 +5796,7 @@ _mm_mask_rcp14_ps (__m128 __W, __mmask8 __U, __m128 __A)
(__mmask8) __U);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_maskz_rcp14_ps (__mmask8 __U, __m128 __A)
{
return (__m128) __builtin_ia32_rcp14ps128_mask ((__v4sf) __A,
@@ -5993,7 +5805,7 @@ _mm_maskz_rcp14_ps (__mmask8 __U, __m128 __A)
(__mmask8) __U);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_rcp14_ps (__m256 __A)
{
return (__m256) __builtin_ia32_rcp14ps256_mask ((__v8sf) __A,
@@ -6002,7 +5814,7 @@ _mm256_rcp14_ps (__m256 __A)
(__mmask8) -1);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_mask_rcp14_ps (__m256 __W, __mmask8 __U, __m256 __A)
{
return (__m256) __builtin_ia32_rcp14ps256_mask ((__v8sf) __A,
@@ -6010,7 +5822,7 @@ _mm256_mask_rcp14_ps (__m256 __W, __mmask8 __U, __m256 __A)
(__mmask8) __U);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_maskz_rcp14_ps (__mmask8 __U, __m256 __A)
{
return (__m256) __builtin_ia32_rcp14ps256_mask ((__v8sf) __A,
@@ -6019,47 +5831,47 @@ _mm256_maskz_rcp14_ps (__mmask8 __U, __m256 __A)
(__mmask8) __U);
}
-#define _mm_mask_permute_pd(W, U, X, C) __extension__ ({ \
+#define _mm_mask_permute_pd(W, U, X, C) \
(__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \
(__v2df)_mm_permute_pd((X), (C)), \
- (__v2df)(__m128d)(W)); })
+ (__v2df)(__m128d)(W))
-#define _mm_maskz_permute_pd(U, X, C) __extension__ ({ \
+#define _mm_maskz_permute_pd(U, X, C) \
(__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \
(__v2df)_mm_permute_pd((X), (C)), \
- (__v2df)_mm_setzero_pd()); })
+ (__v2df)_mm_setzero_pd())
-#define _mm256_mask_permute_pd(W, U, X, C) __extension__ ({ \
+#define _mm256_mask_permute_pd(W, U, X, C) \
(__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
(__v4df)_mm256_permute_pd((X), (C)), \
- (__v4df)(__m256d)(W)); })
+ (__v4df)(__m256d)(W))
-#define _mm256_maskz_permute_pd(U, X, C) __extension__ ({ \
+#define _mm256_maskz_permute_pd(U, X, C) \
(__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
(__v4df)_mm256_permute_pd((X), (C)), \
- (__v4df)_mm256_setzero_pd()); })
+ (__v4df)_mm256_setzero_pd())
-#define _mm_mask_permute_ps(W, U, X, C) __extension__ ({ \
+#define _mm_mask_permute_ps(W, U, X, C) \
(__m128)__builtin_ia32_selectps_128((__mmask8)(U), \
(__v4sf)_mm_permute_ps((X), (C)), \
- (__v4sf)(__m128)(W)); })
+ (__v4sf)(__m128)(W))
-#define _mm_maskz_permute_ps(U, X, C) __extension__ ({ \
+#define _mm_maskz_permute_ps(U, X, C) \
(__m128)__builtin_ia32_selectps_128((__mmask8)(U), \
(__v4sf)_mm_permute_ps((X), (C)), \
- (__v4sf)_mm_setzero_ps()); })
+ (__v4sf)_mm_setzero_ps())
-#define _mm256_mask_permute_ps(W, U, X, C) __extension__ ({ \
+#define _mm256_mask_permute_ps(W, U, X, C) \
(__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
(__v8sf)_mm256_permute_ps((X), (C)), \
- (__v8sf)(__m256)(W)); })
+ (__v8sf)(__m256)(W))
-#define _mm256_maskz_permute_ps(U, X, C) __extension__ ({ \
+#define _mm256_maskz_permute_ps(U, X, C) \
(__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
(__v8sf)_mm256_permute_ps((X), (C)), \
- (__v8sf)_mm256_setzero_ps()); })
+ (__v8sf)_mm256_setzero_ps())
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_permutevar_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128i __C)
{
return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
@@ -6067,7 +5879,7 @@ _mm_mask_permutevar_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128i __C)
(__v2df)__W);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_maskz_permutevar_pd(__mmask8 __U, __m128d __A, __m128i __C)
{
return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
@@ -6075,7 +5887,7 @@ _mm_maskz_permutevar_pd(__mmask8 __U, __m128d __A, __m128i __C)
(__v2df)_mm_setzero_pd());
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_mask_permutevar_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256i __C)
{
return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
@@ -6083,7 +5895,7 @@ _mm256_mask_permutevar_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256i __C)
(__v4df)__W);
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_maskz_permutevar_pd(__mmask8 __U, __m256d __A, __m256i __C)
{
return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
@@ -6091,7 +5903,7 @@ _mm256_maskz_permutevar_pd(__mmask8 __U, __m256d __A, __m256i __C)
(__v4df)_mm256_setzero_pd());
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_permutevar_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128i __C)
{
return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
@@ -6099,7 +5911,7 @@ _mm_mask_permutevar_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128i __C)
(__v4sf)__W);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_maskz_permutevar_ps(__mmask8 __U, __m128 __A, __m128i __C)
{
return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
@@ -6107,7 +5919,7 @@ _mm_maskz_permutevar_ps(__mmask8 __U, __m128 __A, __m128i __C)
(__v4sf)_mm_setzero_ps());
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_mask_permutevar_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256i __C)
{
return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
@@ -6115,7 +5927,7 @@ _mm256_mask_permutevar_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256i __C)
(__v8sf)__W);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_maskz_permutevar_ps(__mmask8 __U, __m256 __A, __m256i __C)
{
return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
@@ -6123,115 +5935,115 @@ _mm256_maskz_permutevar_ps(__mmask8 __U, __m256 __A, __m256i __C)
(__v8sf)_mm256_setzero_ps());
}
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS128
_mm_test_epi32_mask (__m128i __A, __m128i __B)
{
- return _mm_cmpneq_epi32_mask (_mm_and_si128 (__A, __B), _mm_setzero_di());
+ return _mm_cmpneq_epi32_mask (_mm_and_si128 (__A, __B), _mm_setzero_si128());
}
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS128
_mm_mask_test_epi32_mask (__mmask8 __U, __m128i __A, __m128i __B)
{
return _mm_mask_cmpneq_epi32_mask (__U, _mm_and_si128 (__A, __B),
- _mm_setzero_di());
+ _mm_setzero_si128());
}
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS256
_mm256_test_epi32_mask (__m256i __A, __m256i __B)
{
return _mm256_cmpneq_epi32_mask (_mm256_and_si256 (__A, __B),
_mm256_setzero_si256());
}
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS256
_mm256_mask_test_epi32_mask (__mmask8 __U, __m256i __A, __m256i __B)
{
return _mm256_mask_cmpneq_epi32_mask (__U, _mm256_and_si256 (__A, __B),
_mm256_setzero_si256());
}
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS128
_mm_test_epi64_mask (__m128i __A, __m128i __B)
{
- return _mm_cmpneq_epi64_mask (_mm_and_si128 (__A, __B), _mm_setzero_di());
+ return _mm_cmpneq_epi64_mask (_mm_and_si128 (__A, __B), _mm_setzero_si128());
}
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS128
_mm_mask_test_epi64_mask (__mmask8 __U, __m128i __A, __m128i __B)
{
return _mm_mask_cmpneq_epi64_mask (__U, _mm_and_si128 (__A, __B),
- _mm_setzero_di());
+ _mm_setzero_si128());
}
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS256
_mm256_test_epi64_mask (__m256i __A, __m256i __B)
{
return _mm256_cmpneq_epi64_mask (_mm256_and_si256 (__A, __B),
_mm256_setzero_si256());
}
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS256
_mm256_mask_test_epi64_mask (__mmask8 __U, __m256i __A, __m256i __B)
{
return _mm256_mask_cmpneq_epi64_mask (__U, _mm256_and_si256 (__A, __B),
_mm256_setzero_si256());
}
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS128
_mm_testn_epi32_mask (__m128i __A, __m128i __B)
{
- return _mm_cmpeq_epi32_mask (_mm_and_si128 (__A, __B), _mm_setzero_di());
+ return _mm_cmpeq_epi32_mask (_mm_and_si128 (__A, __B), _mm_setzero_si128());
}
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS128
_mm_mask_testn_epi32_mask (__mmask8 __U, __m128i __A, __m128i __B)
{
return _mm_mask_cmpeq_epi32_mask (__U, _mm_and_si128 (__A, __B),
- _mm_setzero_di());
+ _mm_setzero_si128());
}
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS256
_mm256_testn_epi32_mask (__m256i __A, __m256i __B)
{
return _mm256_cmpeq_epi32_mask (_mm256_and_si256 (__A, __B),
_mm256_setzero_si256());
}
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS256
_mm256_mask_testn_epi32_mask (__mmask8 __U, __m256i __A, __m256i __B)
{
return _mm256_mask_cmpeq_epi32_mask (__U, _mm256_and_si256 (__A, __B),
_mm256_setzero_si256());
}
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS128
_mm_testn_epi64_mask (__m128i __A, __m128i __B)
{
- return _mm_cmpeq_epi64_mask (_mm_and_si128 (__A, __B), _mm_setzero_di());
+ return _mm_cmpeq_epi64_mask (_mm_and_si128 (__A, __B), _mm_setzero_si128());
}
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS128
_mm_mask_testn_epi64_mask (__mmask8 __U, __m128i __A, __m128i __B)
{
return _mm_mask_cmpeq_epi64_mask (__U, _mm_and_si128 (__A, __B),
- _mm_setzero_di());
+ _mm_setzero_si128());
}
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS256
_mm256_testn_epi64_mask (__m256i __A, __m256i __B)
{
return _mm256_cmpeq_epi64_mask (_mm256_and_si256 (__A, __B),
_mm256_setzero_si256());
}
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS256
_mm256_mask_testn_epi64_mask (__mmask8 __U, __m256i __A, __m256i __B)
{
return _mm256_mask_cmpeq_epi64_mask (__U, _mm256_and_si256 (__A, __B),
_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_unpackhi_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
@@ -6239,7 +6051,7 @@ _mm_mask_unpackhi_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
(__v4si)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_unpackhi_epi32(__mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
@@ -6247,7 +6059,7 @@ _mm_maskz_unpackhi_epi32(__mmask8 __U, __m128i __A, __m128i __B)
(__v4si)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_unpackhi_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
@@ -6255,7 +6067,7 @@ _mm256_mask_unpackhi_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
(__v8si)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_unpackhi_epi32(__mmask8 __U, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
@@ -6263,7 +6075,7 @@ _mm256_maskz_unpackhi_epi32(__mmask8 __U, __m256i __A, __m256i __B)
(__v8si)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_unpackhi_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
@@ -6271,15 +6083,15 @@ _mm_mask_unpackhi_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
(__v2di)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_unpackhi_epi64(__mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
(__v2di)_mm_unpackhi_epi64(__A, __B),
- (__v2di)_mm_setzero_di());
+ (__v2di)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_unpackhi_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
@@ -6287,7 +6099,7 @@ _mm256_mask_unpackhi_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
(__v4di)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_unpackhi_epi64(__mmask8 __U, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
@@ -6295,7 +6107,7 @@ _mm256_maskz_unpackhi_epi64(__mmask8 __U, __m256i __A, __m256i __B)
(__v4di)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_unpacklo_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
@@ -6303,7 +6115,7 @@ _mm_mask_unpacklo_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
(__v4si)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_unpacklo_epi32(__mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
@@ -6311,7 +6123,7 @@ _mm_maskz_unpacklo_epi32(__mmask8 __U, __m128i __A, __m128i __B)
(__v4si)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_unpacklo_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
@@ -6319,7 +6131,7 @@ _mm256_mask_unpacklo_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
(__v8si)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_unpacklo_epi32(__mmask8 __U, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
@@ -6327,7 +6139,7 @@ _mm256_maskz_unpacklo_epi32(__mmask8 __U, __m256i __A, __m256i __B)
(__v8si)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_unpacklo_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
@@ -6335,15 +6147,15 @@ _mm_mask_unpacklo_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
(__v2di)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_unpacklo_epi64(__mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
(__v2di)_mm_unpacklo_epi64(__A, __B),
- (__v2di)_mm_setzero_di());
+ (__v2di)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_unpacklo_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
@@ -6351,7 +6163,7 @@ _mm256_mask_unpacklo_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
(__v4di)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_unpacklo_epi64(__mmask8 __U, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
@@ -6359,7 +6171,7 @@ _mm256_maskz_unpacklo_epi64(__mmask8 __U, __m256i __A, __m256i __B)
(__v4di)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_sra_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
@@ -6367,7 +6179,7 @@ _mm_mask_sra_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
(__v4si)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_sra_epi32(__mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
@@ -6375,7 +6187,7 @@ _mm_maskz_sra_epi32(__mmask8 __U, __m128i __A, __m128i __B)
(__v4si)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_sra_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B)
{
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
@@ -6383,7 +6195,7 @@ _mm256_mask_sra_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B)
(__v8si)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_sra_epi32(__mmask8 __U, __m256i __A, __m128i __B)
{
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
@@ -6391,7 +6203,7 @@ _mm256_maskz_sra_epi32(__mmask8 __U, __m256i __A, __m128i __B)
(__v8si)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_srai_epi32(__m128i __W, __mmask8 __U, __m128i __A, int __B)
{
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
@@ -6399,7 +6211,7 @@ _mm_mask_srai_epi32(__m128i __W, __mmask8 __U, __m128i __A, int __B)
(__v4si)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_srai_epi32(__mmask8 __U, __m128i __A, int __B)
{
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
@@ -6407,7 +6219,7 @@ _mm_maskz_srai_epi32(__mmask8 __U, __m128i __A, int __B)
(__v4si)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_srai_epi32(__m256i __W, __mmask8 __U, __m256i __A, int __B)
{
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
@@ -6415,7 +6227,7 @@ _mm256_mask_srai_epi32(__m256i __W, __mmask8 __U, __m256i __A, int __B)
(__v8si)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_srai_epi32(__mmask8 __U, __m256i __A, int __B)
{
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
@@ -6423,13 +6235,13 @@ _mm256_maskz_srai_epi32(__mmask8 __U, __m256i __A, int __B)
(__v8si)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_sra_epi64(__m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_psraq128((__v2di)__A, (__v2di)__B);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_sra_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, \
@@ -6437,21 +6249,21 @@ _mm_mask_sra_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
(__v2di)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_sra_epi64(__mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, \
(__v2di)_mm_sra_epi64(__A, __B), \
- (__v2di)_mm_setzero_di());
+ (__v2di)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_sra_epi64(__m256i __A, __m128i __B)
{
return (__m256i)__builtin_ia32_psraq256((__v4di) __A, (__v2di) __B);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_sra_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B)
{
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, \
@@ -6459,7 +6271,7 @@ _mm256_mask_sra_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B)
(__v4di)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_sra_epi64(__mmask8 __U, __m256i __A, __m128i __B)
{
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, \
@@ -6467,13 +6279,13 @@ _mm256_maskz_sra_epi64(__mmask8 __U, __m256i __A, __m128i __B)
(__v4di)_mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_srai_epi64(__m128i __A, int __imm)
{
return (__m128i)__builtin_ia32_psraqi128((__v2di)__A, __imm);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_srai_epi64(__m128i __W, __mmask8 __U, __m128i __A, int __imm)
{
return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, \
@@ -6481,21 +6293,21 @@ _mm_mask_srai_epi64(__m128i __W, __mmask8 __U, __m128i __A, int __imm)
(__v2di)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_srai_epi64(__mmask8 __U, __m128i __A, int __imm)
{
return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, \
(__v2di)_mm_srai_epi64(__A, __imm), \
- (__v2di)_mm_setzero_di());
+ (__v2di)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_srai_epi64(__m256i __A, int __imm)
{
return (__m256i)__builtin_ia32_psraqi256((__v4di)__A, __imm);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_srai_epi64(__m256i __W, __mmask8 __U, __m256i __A, int __imm)
{
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, \
@@ -6503,7 +6315,7 @@ _mm256_mask_srai_epi64(__m256i __W, __mmask8 __U, __m256i __A, int __imm)
(__v4di)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_srai_epi64(__mmask8 __U, __m256i __A, int __imm)
{
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, \
@@ -6511,198 +6323,178 @@ _mm256_maskz_srai_epi64(__mmask8 __U, __m256i __A, int __imm)
(__v4di)_mm256_setzero_si256());
}
-#define _mm_ternarylogic_epi32(A, B, C, imm) __extension__ ({ \
+#define _mm_ternarylogic_epi32(A, B, C, imm) \
(__m128i)__builtin_ia32_pternlogd128_mask((__v4si)(__m128i)(A), \
(__v4si)(__m128i)(B), \
(__v4si)(__m128i)(C), (int)(imm), \
- (__mmask8)-1); })
+ (__mmask8)-1)
-#define _mm_mask_ternarylogic_epi32(A, U, B, C, imm) __extension__ ({ \
+#define _mm_mask_ternarylogic_epi32(A, U, B, C, imm) \
(__m128i)__builtin_ia32_pternlogd128_mask((__v4si)(__m128i)(A), \
(__v4si)(__m128i)(B), \
(__v4si)(__m128i)(C), (int)(imm), \
- (__mmask8)(U)); })
+ (__mmask8)(U))
-#define _mm_maskz_ternarylogic_epi32(U, A, B, C, imm) __extension__ ({ \
+#define _mm_maskz_ternarylogic_epi32(U, A, B, C, imm) \
(__m128i)__builtin_ia32_pternlogd128_maskz((__v4si)(__m128i)(A), \
(__v4si)(__m128i)(B), \
(__v4si)(__m128i)(C), (int)(imm), \
- (__mmask8)(U)); })
+ (__mmask8)(U))
-#define _mm256_ternarylogic_epi32(A, B, C, imm) __extension__ ({ \
+#define _mm256_ternarylogic_epi32(A, B, C, imm) \
(__m256i)__builtin_ia32_pternlogd256_mask((__v8si)(__m256i)(A), \
(__v8si)(__m256i)(B), \
(__v8si)(__m256i)(C), (int)(imm), \
- (__mmask8)-1); })
+ (__mmask8)-1)
-#define _mm256_mask_ternarylogic_epi32(A, U, B, C, imm) __extension__ ({ \
+#define _mm256_mask_ternarylogic_epi32(A, U, B, C, imm) \
(__m256i)__builtin_ia32_pternlogd256_mask((__v8si)(__m256i)(A), \
(__v8si)(__m256i)(B), \
(__v8si)(__m256i)(C), (int)(imm), \
- (__mmask8)(U)); })
+ (__mmask8)(U))
-#define _mm256_maskz_ternarylogic_epi32(U, A, B, C, imm) __extension__ ({ \
+#define _mm256_maskz_ternarylogic_epi32(U, A, B, C, imm) \
(__m256i)__builtin_ia32_pternlogd256_maskz((__v8si)(__m256i)(A), \
(__v8si)(__m256i)(B), \
(__v8si)(__m256i)(C), (int)(imm), \
- (__mmask8)(U)); })
+ (__mmask8)(U))
-#define _mm_ternarylogic_epi64(A, B, C, imm) __extension__ ({ \
+#define _mm_ternarylogic_epi64(A, B, C, imm) \
(__m128i)__builtin_ia32_pternlogq128_mask((__v2di)(__m128i)(A), \
(__v2di)(__m128i)(B), \
(__v2di)(__m128i)(C), (int)(imm), \
- (__mmask8)-1); })
+ (__mmask8)-1)
-#define _mm_mask_ternarylogic_epi64(A, U, B, C, imm) __extension__ ({ \
+#define _mm_mask_ternarylogic_epi64(A, U, B, C, imm) \
(__m128i)__builtin_ia32_pternlogq128_mask((__v2di)(__m128i)(A), \
(__v2di)(__m128i)(B), \
(__v2di)(__m128i)(C), (int)(imm), \
- (__mmask8)(U)); })
+ (__mmask8)(U))
-#define _mm_maskz_ternarylogic_epi64(U, A, B, C, imm) __extension__ ({ \
+#define _mm_maskz_ternarylogic_epi64(U, A, B, C, imm) \
(__m128i)__builtin_ia32_pternlogq128_maskz((__v2di)(__m128i)(A), \
(__v2di)(__m128i)(B), \
(__v2di)(__m128i)(C), (int)(imm), \
- (__mmask8)(U)); })
+ (__mmask8)(U))
-#define _mm256_ternarylogic_epi64(A, B, C, imm) __extension__ ({ \
+#define _mm256_ternarylogic_epi64(A, B, C, imm) \
(__m256i)__builtin_ia32_pternlogq256_mask((__v4di)(__m256i)(A), \
(__v4di)(__m256i)(B), \
(__v4di)(__m256i)(C), (int)(imm), \
- (__mmask8)-1); })
+ (__mmask8)-1)
-#define _mm256_mask_ternarylogic_epi64(A, U, B, C, imm) __extension__ ({ \
+#define _mm256_mask_ternarylogic_epi64(A, U, B, C, imm) \
(__m256i)__builtin_ia32_pternlogq256_mask((__v4di)(__m256i)(A), \
(__v4di)(__m256i)(B), \
(__v4di)(__m256i)(C), (int)(imm), \
- (__mmask8)(U)); })
+ (__mmask8)(U))
-#define _mm256_maskz_ternarylogic_epi64(U, A, B, C, imm) __extension__ ({ \
+#define _mm256_maskz_ternarylogic_epi64(U, A, B, C, imm) \
(__m256i)__builtin_ia32_pternlogq256_maskz((__v4di)(__m256i)(A), \
(__v4di)(__m256i)(B), \
(__v4di)(__m256i)(C), (int)(imm), \
- (__mmask8)(U)); })
+ (__mmask8)(U))
-#define _mm256_shuffle_f32x4(A, B, imm) __extension__ ({ \
- (__m256)__builtin_shufflevector((__v8sf)(__m256)(A), \
- (__v8sf)(__m256)(B), \
- 0 + ((((imm) >> 0) & 0x1) * 4), \
- 1 + ((((imm) >> 0) & 0x1) * 4), \
- 2 + ((((imm) >> 0) & 0x1) * 4), \
- 3 + ((((imm) >> 0) & 0x1) * 4), \
- 8 + ((((imm) >> 1) & 0x1) * 4), \
- 9 + ((((imm) >> 1) & 0x1) * 4), \
- 10 + ((((imm) >> 1) & 0x1) * 4), \
- 11 + ((((imm) >> 1) & 0x1) * 4)); })
+#define _mm256_shuffle_f32x4(A, B, imm) \
+ (__m256)__builtin_ia32_shuf_f32x4_256((__v8sf)(__m256)(A), \
+ (__v8sf)(__m256)(B), (int)(imm))
-#define _mm256_mask_shuffle_f32x4(W, U, A, B, imm) __extension__ ({ \
+#define _mm256_mask_shuffle_f32x4(W, U, A, B, imm) \
(__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
(__v8sf)_mm256_shuffle_f32x4((A), (B), (imm)), \
- (__v8sf)(__m256)(W)); })
+ (__v8sf)(__m256)(W))
-#define _mm256_maskz_shuffle_f32x4(U, A, B, imm) __extension__ ({ \
+#define _mm256_maskz_shuffle_f32x4(U, A, B, imm) \
(__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
(__v8sf)_mm256_shuffle_f32x4((A), (B), (imm)), \
- (__v8sf)_mm256_setzero_ps()); })
+ (__v8sf)_mm256_setzero_ps())
-#define _mm256_shuffle_f64x2(A, B, imm) __extension__ ({ \
- (__m256d)__builtin_shufflevector((__v4df)(__m256d)(A), \
- (__v4df)(__m256d)(B), \
- 0 + ((((imm) >> 0) & 0x1) * 2), \
- 1 + ((((imm) >> 0) & 0x1) * 2), \
- 4 + ((((imm) >> 1) & 0x1) * 2), \
- 5 + ((((imm) >> 1) & 0x1) * 2)); })
+#define _mm256_shuffle_f64x2(A, B, imm) \
+ (__m256d)__builtin_ia32_shuf_f64x2_256((__v4df)(__m256d)(A), \
+ (__v4df)(__m256d)(B), (int)(imm))
-#define _mm256_mask_shuffle_f64x2(W, U, A, B, imm) __extension__ ({ \
+#define _mm256_mask_shuffle_f64x2(W, U, A, B, imm) \
(__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
(__v4df)_mm256_shuffle_f64x2((A), (B), (imm)), \
- (__v4df)(__m256)(W)); })
+ (__v4df)(__m256d)(W))
-#define _mm256_maskz_shuffle_f64x2(U, A, B, imm) __extension__ ({ \
+#define _mm256_maskz_shuffle_f64x2(U, A, B, imm) \
(__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
(__v4df)_mm256_shuffle_f64x2((A), (B), (imm)), \
- (__v4df)_mm256_setzero_pd()); })
+ (__v4df)_mm256_setzero_pd())
-#define _mm256_shuffle_i32x4(A, B, imm) __extension__ ({ \
- (__m256i)__builtin_shufflevector((__v4di)(__m256i)(A), \
- (__v4di)(__m256i)(B), \
- 0 + ((((imm) >> 0) & 0x1) * 2), \
- 1 + ((((imm) >> 0) & 0x1) * 2), \
- 4 + ((((imm) >> 1) & 0x1) * 2), \
- 5 + ((((imm) >> 1) & 0x1) * 2)); })
+#define _mm256_shuffle_i32x4(A, B, imm) \
+ (__m256i)__builtin_ia32_shuf_i32x4_256((__v8si)(__m256i)(A), \
+ (__v8si)(__m256i)(B), (int)(imm))
-#define _mm256_mask_shuffle_i32x4(W, U, A, B, imm) __extension__ ({ \
+#define _mm256_mask_shuffle_i32x4(W, U, A, B, imm) \
(__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
(__v8si)_mm256_shuffle_i32x4((A), (B), (imm)), \
- (__v8si)(__m256)(W)); })
+ (__v8si)(__m256i)(W))
-#define _mm256_maskz_shuffle_i32x4(U, A, B, imm) __extension__ ({ \
+#define _mm256_maskz_shuffle_i32x4(U, A, B, imm) \
(__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
(__v8si)_mm256_shuffle_i32x4((A), (B), (imm)), \
- (__v8si)_mm256_setzero_si256()); })
+ (__v8si)_mm256_setzero_si256())
-#define _mm256_shuffle_i64x2(A, B, imm) __extension__ ({ \
- (__m256i)__builtin_shufflevector((__v4di)(__m256i)(A), \
- (__v4di)(__m256i)(B), \
- 0 + ((((imm) >> 0) & 0x1) * 2), \
- 1 + ((((imm) >> 0) & 0x1) * 2), \
- 4 + ((((imm) >> 1) & 0x1) * 2), \
- 5 + ((((imm) >> 1) & 0x1) * 2)); })
+#define _mm256_shuffle_i64x2(A, B, imm) \
+ (__m256i)__builtin_ia32_shuf_i64x2_256((__v4di)(__m256i)(A), \
+ (__v4di)(__m256i)(B), (int)(imm))
-#define _mm256_mask_shuffle_i64x2(W, U, A, B, imm) __extension__ ({ \
+#define _mm256_mask_shuffle_i64x2(W, U, A, B, imm) \
(__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
(__v4di)_mm256_shuffle_i64x2((A), (B), (imm)), \
- (__v4di)(__m256)(W)); })
+ (__v4di)(__m256i)(W))
-#define _mm256_maskz_shuffle_i64x2(U, A, B, imm) __extension__ ({ \
+#define _mm256_maskz_shuffle_i64x2(U, A, B, imm) \
(__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
(__v4di)_mm256_shuffle_i64x2((A), (B), (imm)), \
- (__v4di)_mm256_setzero_si256()); })
+ (__v4di)_mm256_setzero_si256())
-#define _mm_mask_shuffle_pd(W, U, A, B, M) __extension__ ({ \
+#define _mm_mask_shuffle_pd(W, U, A, B, M) \
(__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \
(__v2df)_mm_shuffle_pd((A), (B), (M)), \
- (__v2df)(__m128d)(W)); })
+ (__v2df)(__m128d)(W))
-#define _mm_maskz_shuffle_pd(U, A, B, M) __extension__ ({ \
+#define _mm_maskz_shuffle_pd(U, A, B, M) \
(__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \
(__v2df)_mm_shuffle_pd((A), (B), (M)), \
- (__v2df)_mm_setzero_pd()); })
+ (__v2df)_mm_setzero_pd())
-#define _mm256_mask_shuffle_pd(W, U, A, B, M) __extension__ ({ \
+#define _mm256_mask_shuffle_pd(W, U, A, B, M) \
(__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
(__v4df)_mm256_shuffle_pd((A), (B), (M)), \
- (__v4df)(__m256d)(W)); })
+ (__v4df)(__m256d)(W))
-#define _mm256_maskz_shuffle_pd(U, A, B, M) __extension__ ({ \
+#define _mm256_maskz_shuffle_pd(U, A, B, M) \
(__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
(__v4df)_mm256_shuffle_pd((A), (B), (M)), \
- (__v4df)_mm256_setzero_pd()); })
+ (__v4df)_mm256_setzero_pd())
-#define _mm_mask_shuffle_ps(W, U, A, B, M) __extension__ ({ \
+#define _mm_mask_shuffle_ps(W, U, A, B, M) \
(__m128)__builtin_ia32_selectps_128((__mmask8)(U), \
(__v4sf)_mm_shuffle_ps((A), (B), (M)), \
- (__v4sf)(__m128)(W)); })
+ (__v4sf)(__m128)(W))
-#define _mm_maskz_shuffle_ps(U, A, B, M) __extension__ ({ \
+#define _mm_maskz_shuffle_ps(U, A, B, M) \
(__m128)__builtin_ia32_selectps_128((__mmask8)(U), \
(__v4sf)_mm_shuffle_ps((A), (B), (M)), \
- (__v4sf)_mm_setzero_ps()); })
+ (__v4sf)_mm_setzero_ps())
-#define _mm256_mask_shuffle_ps(W, U, A, B, M) __extension__ ({ \
+#define _mm256_mask_shuffle_ps(W, U, A, B, M) \
(__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
(__v8sf)_mm256_shuffle_ps((A), (B), (M)), \
- (__v8sf)(__m256)(W)); })
+ (__v8sf)(__m256)(W))
-#define _mm256_maskz_shuffle_ps(U, A, B, M) __extension__ ({ \
+#define _mm256_maskz_shuffle_ps(U, A, B, M) \
(__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
(__v8sf)_mm256_shuffle_ps((A), (B), (M)), \
- (__v8sf)_mm256_setzero_ps()); })
+ (__v8sf)_mm256_setzero_ps())
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_rsqrt14_pd (__m128d __A)
{
return (__m128d) __builtin_ia32_rsqrt14pd128_mask ((__v2df) __A,
@@ -6711,7 +6503,7 @@ _mm_rsqrt14_pd (__m128d __A)
(__mmask8) -1);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_rsqrt14_pd (__m128d __W, __mmask8 __U, __m128d __A)
{
return (__m128d) __builtin_ia32_rsqrt14pd128_mask ((__v2df) __A,
@@ -6719,7 +6511,7 @@ _mm_mask_rsqrt14_pd (__m128d __W, __mmask8 __U, __m128d __A)
(__mmask8) __U);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_maskz_rsqrt14_pd (__mmask8 __U, __m128d __A)
{
return (__m128d) __builtin_ia32_rsqrt14pd128_mask ((__v2df) __A,
@@ -6728,7 +6520,7 @@ _mm_maskz_rsqrt14_pd (__mmask8 __U, __m128d __A)
(__mmask8) __U);
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_rsqrt14_pd (__m256d __A)
{
return (__m256d) __builtin_ia32_rsqrt14pd256_mask ((__v4df) __A,
@@ -6737,7 +6529,7 @@ _mm256_rsqrt14_pd (__m256d __A)
(__mmask8) -1);
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_mask_rsqrt14_pd (__m256d __W, __mmask8 __U, __m256d __A)
{
return (__m256d) __builtin_ia32_rsqrt14pd256_mask ((__v4df) __A,
@@ -6745,7 +6537,7 @@ _mm256_mask_rsqrt14_pd (__m256d __W, __mmask8 __U, __m256d __A)
(__mmask8) __U);
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_maskz_rsqrt14_pd (__mmask8 __U, __m256d __A)
{
return (__m256d) __builtin_ia32_rsqrt14pd256_mask ((__v4df) __A,
@@ -6754,7 +6546,7 @@ _mm256_maskz_rsqrt14_pd (__mmask8 __U, __m256d __A)
(__mmask8) __U);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_rsqrt14_ps (__m128 __A)
{
return (__m128) __builtin_ia32_rsqrt14ps128_mask ((__v4sf) __A,
@@ -6763,7 +6555,7 @@ _mm_rsqrt14_ps (__m128 __A)
(__mmask8) -1);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_rsqrt14_ps (__m128 __W, __mmask8 __U, __m128 __A)
{
return (__m128) __builtin_ia32_rsqrt14ps128_mask ((__v4sf) __A,
@@ -6771,7 +6563,7 @@ _mm_mask_rsqrt14_ps (__m128 __W, __mmask8 __U, __m128 __A)
(__mmask8) __U);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_maskz_rsqrt14_ps (__mmask8 __U, __m128 __A)
{
return (__m128) __builtin_ia32_rsqrt14ps128_mask ((__v4sf) __A,
@@ -6780,7 +6572,7 @@ _mm_maskz_rsqrt14_ps (__mmask8 __U, __m128 __A)
(__mmask8) __U);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_rsqrt14_ps (__m256 __A)
{
return (__m256) __builtin_ia32_rsqrt14ps256_mask ((__v8sf) __A,
@@ -6789,7 +6581,7 @@ _mm256_rsqrt14_ps (__m256 __A)
(__mmask8) -1);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_mask_rsqrt14_ps (__m256 __W, __mmask8 __U, __m256 __A)
{
return (__m256) __builtin_ia32_rsqrt14ps256_mask ((__v8sf) __A,
@@ -6797,7 +6589,7 @@ _mm256_mask_rsqrt14_ps (__m256 __W, __mmask8 __U, __m256 __A)
(__mmask8) __U);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_maskz_rsqrt14_ps (__mmask8 __U, __m256 __A)
{
return (__m256) __builtin_ia32_rsqrt14ps256_mask ((__v8sf) __A,
@@ -6806,14 +6598,14 @@ _mm256_maskz_rsqrt14_ps (__mmask8 __U, __m256 __A)
(__mmask8) __U);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_broadcast_f32x4(__m128 __A)
{
return (__m256)__builtin_shufflevector((__v4sf)__A, (__v4sf)__A,
0, 1, 2, 3, 0, 1, 2, 3);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_mask_broadcast_f32x4(__m256 __O, __mmask8 __M, __m128 __A)
{
return (__m256)__builtin_ia32_selectps_256((__mmask8)__M,
@@ -6821,7 +6613,7 @@ _mm256_mask_broadcast_f32x4(__m256 __O, __mmask8 __M, __m128 __A)
(__v8sf)__O);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_maskz_broadcast_f32x4 (__mmask8 __M, __m128 __A)
{
return (__m256)__builtin_ia32_selectps_256((__mmask8)__M,
@@ -6829,14 +6621,14 @@ _mm256_maskz_broadcast_f32x4 (__mmask8 __M, __m128 __A)
(__v8sf)_mm256_setzero_ps());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_broadcast_i32x4(__m128i __A)
{
return (__m256i)__builtin_shufflevector((__v4si)__A, (__v4si)__A,
0, 1, 2, 3, 0, 1, 2, 3);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_broadcast_i32x4(__m256i __O, __mmask8 __M, __m128i __A)
{
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M,
@@ -6844,7 +6636,7 @@ _mm256_mask_broadcast_i32x4(__m256i __O, __mmask8 __M, __m128i __A)
(__v8si)__O);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_broadcast_i32x4(__mmask8 __M, __m128i __A)
{
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M,
@@ -6852,7 +6644,7 @@ _mm256_maskz_broadcast_i32x4(__mmask8 __M, __m128i __A)
(__v8si)_mm256_setzero_si256());
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_mask_broadcastsd_pd (__m256d __O, __mmask8 __M, __m128d __A)
{
return (__m256d)__builtin_ia32_selectpd_256(__M,
@@ -6860,7 +6652,7 @@ _mm256_mask_broadcastsd_pd (__m256d __O, __mmask8 __M, __m128d __A)
(__v4df) __O);
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_maskz_broadcastsd_pd (__mmask8 __M, __m128d __A)
{
return (__m256d)__builtin_ia32_selectpd_256(__M,
@@ -6868,7 +6660,7 @@ _mm256_maskz_broadcastsd_pd (__mmask8 __M, __m128d __A)
(__v4df) _mm256_setzero_pd());
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_broadcastss_ps (__m128 __O, __mmask8 __M, __m128 __A)
{
return (__m128)__builtin_ia32_selectps_128(__M,
@@ -6876,7 +6668,7 @@ _mm_mask_broadcastss_ps (__m128 __O, __mmask8 __M, __m128 __A)
(__v4sf) __O);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_maskz_broadcastss_ps (__mmask8 __M, __m128 __A)
{
return (__m128)__builtin_ia32_selectps_128(__M,
@@ -6884,7 +6676,7 @@ _mm_maskz_broadcastss_ps (__mmask8 __M, __m128 __A)
(__v4sf) _mm_setzero_ps());
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_mask_broadcastss_ps (__m256 __O, __mmask8 __M, __m128 __A)
{
return (__m256)__builtin_ia32_selectps_256(__M,
@@ -6892,7 +6684,7 @@ _mm256_mask_broadcastss_ps (__m256 __O, __mmask8 __M, __m128 __A)
(__v8sf) __O);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_maskz_broadcastss_ps (__mmask8 __M, __m128 __A)
{
return (__m256)__builtin_ia32_selectps_256(__M,
@@ -6900,7 +6692,7 @@ _mm256_maskz_broadcastss_ps (__mmask8 __M, __m128 __A)
(__v8sf) _mm256_setzero_ps());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_broadcastd_epi32 (__m128i __O, __mmask8 __M, __m128i __A)
{
return (__m128i)__builtin_ia32_selectd_128(__M,
@@ -6908,7 +6700,7 @@ _mm_mask_broadcastd_epi32 (__m128i __O, __mmask8 __M, __m128i __A)
(__v4si) __O);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_broadcastd_epi32 (__mmask8 __M, __m128i __A)
{
return (__m128i)__builtin_ia32_selectd_128(__M,
@@ -6916,7 +6708,7 @@ _mm_maskz_broadcastd_epi32 (__mmask8 __M, __m128i __A)
(__v4si) _mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_broadcastd_epi32 (__m256i __O, __mmask8 __M, __m128i __A)
{
return (__m256i)__builtin_ia32_selectd_256(__M,
@@ -6924,7 +6716,7 @@ _mm256_mask_broadcastd_epi32 (__m256i __O, __mmask8 __M, __m128i __A)
(__v8si) __O);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_broadcastd_epi32 (__mmask8 __M, __m128i __A)
{
return (__m256i)__builtin_ia32_selectd_256(__M,
@@ -6932,7 +6724,7 @@ _mm256_maskz_broadcastd_epi32 (__mmask8 __M, __m128i __A)
(__v8si) _mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_broadcastq_epi64 (__m128i __O, __mmask8 __M, __m128i __A)
{
return (__m128i)__builtin_ia32_selectq_128(__M,
@@ -6940,7 +6732,7 @@ _mm_mask_broadcastq_epi64 (__m128i __O, __mmask8 __M, __m128i __A)
(__v2di) __O);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_broadcastq_epi64 (__mmask8 __M, __m128i __A)
{
return (__m128i)__builtin_ia32_selectq_128(__M,
@@ -6948,7 +6740,7 @@ _mm_maskz_broadcastq_epi64 (__mmask8 __M, __m128i __A)
(__v2di) _mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_broadcastq_epi64 (__m256i __O, __mmask8 __M, __m128i __A)
{
return (__m256i)__builtin_ia32_selectq_256(__M,
@@ -6956,7 +6748,7 @@ _mm256_mask_broadcastq_epi64 (__m256i __O, __mmask8 __M, __m128i __A)
(__v4di) __O);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_broadcastq_epi64 (__mmask8 __M, __m128i __A)
{
return (__m256i)__builtin_ia32_selectq_256(__M,
@@ -6964,7 +6756,7 @@ _mm256_maskz_broadcastq_epi64 (__mmask8 __M, __m128i __A)
(__v4di) _mm256_setzero_si256());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_cvtsepi32_epi8 (__m128i __A)
{
return (__m128i) __builtin_ia32_pmovsdb128_mask ((__v4si) __A,
@@ -6972,14 +6764,14 @@ _mm_cvtsepi32_epi8 (__m128i __A)
(__mmask8) -1);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_cvtsepi32_epi8 (__m128i __O, __mmask8 __M, __m128i __A)
{
return (__m128i) __builtin_ia32_pmovsdb128_mask ((__v4si) __A,
(__v16qi) __O, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_cvtsepi32_epi8 (__mmask8 __M, __m128i __A)
{
return (__m128i) __builtin_ia32_pmovsdb128_mask ((__v4si) __A,
@@ -6987,13 +6779,13 @@ _mm_maskz_cvtsepi32_epi8 (__mmask8 __M, __m128i __A)
__M);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS128
_mm_mask_cvtsepi32_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A)
{
__builtin_ia32_pmovsdb128mem_mask ((__v16qi *) __P, (__v4si) __A, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm256_cvtsepi32_epi8 (__m256i __A)
{
return (__m128i) __builtin_ia32_pmovsdb256_mask ((__v8si) __A,
@@ -7001,14 +6793,14 @@ _mm256_cvtsepi32_epi8 (__m256i __A)
(__mmask8) -1);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS256
_mm256_mask_cvtsepi32_epi8 (__m128i __O, __mmask8 __M, __m256i __A)
{
return (__m128i) __builtin_ia32_pmovsdb256_mask ((__v8si) __A,
(__v16qi) __O, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS256
_mm256_maskz_cvtsepi32_epi8 (__mmask8 __M, __m256i __A)
{
return (__m128i) __builtin_ia32_pmovsdb256_mask ((__v8si) __A,
@@ -7016,13 +6808,13 @@ _mm256_maskz_cvtsepi32_epi8 (__mmask8 __M, __m256i __A)
__M);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS128
_mm256_mask_cvtsepi32_storeu_epi8 (void * __P, __mmask8 __M, __m256i __A)
{
__builtin_ia32_pmovsdb256mem_mask ((__v16qi *) __P, (__v8si) __A, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_cvtsepi32_epi16 (__m128i __A)
{
return (__m128i) __builtin_ia32_pmovsdw128_mask ((__v4si) __A,
@@ -7030,7 +6822,7 @@ _mm_cvtsepi32_epi16 (__m128i __A)
(__mmask8) -1);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_cvtsepi32_epi16 (__m128i __O, __mmask8 __M, __m128i __A)
{
return (__m128i) __builtin_ia32_pmovsdw128_mask ((__v4si) __A,
@@ -7038,7 +6830,7 @@ _mm_mask_cvtsepi32_epi16 (__m128i __O, __mmask8 __M, __m128i __A)
__M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_cvtsepi32_epi16 (__mmask8 __M, __m128i __A)
{
return (__m128i) __builtin_ia32_pmovsdw128_mask ((__v4si) __A,
@@ -7046,13 +6838,13 @@ _mm_maskz_cvtsepi32_epi16 (__mmask8 __M, __m128i __A)
__M);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS128
_mm_mask_cvtsepi32_storeu_epi16 (void * __P, __mmask8 __M, __m128i __A)
{
__builtin_ia32_pmovsdw128mem_mask ((__v8hi *) __P, (__v4si) __A, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS256
_mm256_cvtsepi32_epi16 (__m256i __A)
{
return (__m128i) __builtin_ia32_pmovsdw256_mask ((__v8si) __A,
@@ -7060,14 +6852,14 @@ _mm256_cvtsepi32_epi16 (__m256i __A)
(__mmask8) -1);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS256
_mm256_mask_cvtsepi32_epi16 (__m128i __O, __mmask8 __M, __m256i __A)
{
return (__m128i) __builtin_ia32_pmovsdw256_mask ((__v8si) __A,
(__v8hi) __O, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS256
_mm256_maskz_cvtsepi32_epi16 (__mmask8 __M, __m256i __A)
{
return (__m128i) __builtin_ia32_pmovsdw256_mask ((__v8si) __A,
@@ -7075,13 +6867,13 @@ _mm256_maskz_cvtsepi32_epi16 (__mmask8 __M, __m256i __A)
__M);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS256
_mm256_mask_cvtsepi32_storeu_epi16 (void * __P, __mmask8 __M, __m256i __A)
{
__builtin_ia32_pmovsdw256mem_mask ((__v8hi *) __P, (__v8si) __A, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_cvtsepi64_epi8 (__m128i __A)
{
return (__m128i) __builtin_ia32_pmovsqb128_mask ((__v2di) __A,
@@ -7089,14 +6881,14 @@ _mm_cvtsepi64_epi8 (__m128i __A)
(__mmask8) -1);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_cvtsepi64_epi8 (__m128i __O, __mmask8 __M, __m128i __A)
{
return (__m128i) __builtin_ia32_pmovsqb128_mask ((__v2di) __A,
(__v16qi) __O, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_cvtsepi64_epi8 (__mmask8 __M, __m128i __A)
{
return (__m128i) __builtin_ia32_pmovsqb128_mask ((__v2di) __A,
@@ -7104,13 +6896,13 @@ _mm_maskz_cvtsepi64_epi8 (__mmask8 __M, __m128i __A)
__M);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS128
_mm_mask_cvtsepi64_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A)
{
__builtin_ia32_pmovsqb128mem_mask ((__v16qi *) __P, (__v2di) __A, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS256
_mm256_cvtsepi64_epi8 (__m256i __A)
{
return (__m128i) __builtin_ia32_pmovsqb256_mask ((__v4di) __A,
@@ -7118,14 +6910,14 @@ _mm256_cvtsepi64_epi8 (__m256i __A)
(__mmask8) -1);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS256
_mm256_mask_cvtsepi64_epi8 (__m128i __O, __mmask8 __M, __m256i __A)
{
return (__m128i) __builtin_ia32_pmovsqb256_mask ((__v4di) __A,
(__v16qi) __O, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS256
_mm256_maskz_cvtsepi64_epi8 (__mmask8 __M, __m256i __A)
{
return (__m128i) __builtin_ia32_pmovsqb256_mask ((__v4di) __A,
@@ -7133,13 +6925,13 @@ _mm256_maskz_cvtsepi64_epi8 (__mmask8 __M, __m256i __A)
__M);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS256
_mm256_mask_cvtsepi64_storeu_epi8 (void * __P, __mmask8 __M, __m256i __A)
{
__builtin_ia32_pmovsqb256mem_mask ((__v16qi *) __P, (__v4di) __A, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_cvtsepi64_epi32 (__m128i __A)
{
return (__m128i) __builtin_ia32_pmovsqd128_mask ((__v2di) __A,
@@ -7147,14 +6939,14 @@ _mm_cvtsepi64_epi32 (__m128i __A)
(__mmask8) -1);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_cvtsepi64_epi32 (__m128i __O, __mmask8 __M, __m128i __A)
{
return (__m128i) __builtin_ia32_pmovsqd128_mask ((__v2di) __A,
(__v4si) __O, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_cvtsepi64_epi32 (__mmask8 __M, __m128i __A)
{
return (__m128i) __builtin_ia32_pmovsqd128_mask ((__v2di) __A,
@@ -7162,13 +6954,13 @@ _mm_maskz_cvtsepi64_epi32 (__mmask8 __M, __m128i __A)
__M);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS128
_mm_mask_cvtsepi64_storeu_epi32 (void * __P, __mmask8 __M, __m128i __A)
{
__builtin_ia32_pmovsqd128mem_mask ((__v4si *) __P, (__v2di) __A, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS256
_mm256_cvtsepi64_epi32 (__m256i __A)
{
return (__m128i) __builtin_ia32_pmovsqd256_mask ((__v4di) __A,
@@ -7176,7 +6968,7 @@ _mm256_cvtsepi64_epi32 (__m256i __A)
(__mmask8) -1);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS256
_mm256_mask_cvtsepi64_epi32 (__m128i __O, __mmask8 __M, __m256i __A)
{
return (__m128i) __builtin_ia32_pmovsqd256_mask ((__v4di) __A,
@@ -7184,7 +6976,7 @@ _mm256_mask_cvtsepi64_epi32 (__m128i __O, __mmask8 __M, __m256i __A)
__M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS256
_mm256_maskz_cvtsepi64_epi32 (__mmask8 __M, __m256i __A)
{
return (__m128i) __builtin_ia32_pmovsqd256_mask ((__v4di) __A,
@@ -7192,13 +6984,13 @@ _mm256_maskz_cvtsepi64_epi32 (__mmask8 __M, __m256i __A)
__M);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS256
_mm256_mask_cvtsepi64_storeu_epi32 (void * __P, __mmask8 __M, __m256i __A)
{
__builtin_ia32_pmovsqd256mem_mask ((__v4si *) __P, (__v4di) __A, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_cvtsepi64_epi16 (__m128i __A)
{
return (__m128i) __builtin_ia32_pmovsqw128_mask ((__v2di) __A,
@@ -7206,14 +6998,14 @@ _mm_cvtsepi64_epi16 (__m128i __A)
(__mmask8) -1);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_cvtsepi64_epi16 (__m128i __O, __mmask8 __M, __m128i __A)
{
return (__m128i) __builtin_ia32_pmovsqw128_mask ((__v2di) __A,
(__v8hi) __O, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_cvtsepi64_epi16 (__mmask8 __M, __m128i __A)
{
return (__m128i) __builtin_ia32_pmovsqw128_mask ((__v2di) __A,
@@ -7221,13 +7013,13 @@ _mm_maskz_cvtsepi64_epi16 (__mmask8 __M, __m128i __A)
__M);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS128
_mm_mask_cvtsepi64_storeu_epi16 (void * __P, __mmask8 __M, __m128i __A)
{
__builtin_ia32_pmovsqw128mem_mask ((__v8hi *) __P, (__v2di) __A, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS256
_mm256_cvtsepi64_epi16 (__m256i __A)
{
return (__m128i) __builtin_ia32_pmovsqw256_mask ((__v4di) __A,
@@ -7235,14 +7027,14 @@ _mm256_cvtsepi64_epi16 (__m256i __A)
(__mmask8) -1);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS256
_mm256_mask_cvtsepi64_epi16 (__m128i __O, __mmask8 __M, __m256i __A)
{
return (__m128i) __builtin_ia32_pmovsqw256_mask ((__v4di) __A,
(__v8hi) __O, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS256
_mm256_maskz_cvtsepi64_epi16 (__mmask8 __M, __m256i __A)
{
return (__m128i) __builtin_ia32_pmovsqw256_mask ((__v4di) __A,
@@ -7250,13 +7042,13 @@ _mm256_maskz_cvtsepi64_epi16 (__mmask8 __M, __m256i __A)
__M);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS256
_mm256_mask_cvtsepi64_storeu_epi16 (void * __P, __mmask8 __M, __m256i __A)
{
__builtin_ia32_pmovsqw256mem_mask ((__v8hi *) __P, (__v4di) __A, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_cvtusepi32_epi8 (__m128i __A)
{
return (__m128i) __builtin_ia32_pmovusdb128_mask ((__v4si) __A,
@@ -7264,7 +7056,7 @@ _mm_cvtusepi32_epi8 (__m128i __A)
(__mmask8) -1);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_cvtusepi32_epi8 (__m128i __O, __mmask8 __M, __m128i __A)
{
return (__m128i) __builtin_ia32_pmovusdb128_mask ((__v4si) __A,
@@ -7272,7 +7064,7 @@ _mm_mask_cvtusepi32_epi8 (__m128i __O, __mmask8 __M, __m128i __A)
__M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_cvtusepi32_epi8 (__mmask8 __M, __m128i __A)
{
return (__m128i) __builtin_ia32_pmovusdb128_mask ((__v4si) __A,
@@ -7280,13 +7072,13 @@ _mm_maskz_cvtusepi32_epi8 (__mmask8 __M, __m128i __A)
__M);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS128
_mm_mask_cvtusepi32_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A)
{
__builtin_ia32_pmovusdb128mem_mask ((__v16qi *) __P, (__v4si) __A, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS256
_mm256_cvtusepi32_epi8 (__m256i __A)
{
return (__m128i) __builtin_ia32_pmovusdb256_mask ((__v8si) __A,
@@ -7294,7 +7086,7 @@ _mm256_cvtusepi32_epi8 (__m256i __A)
(__mmask8) -1);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS256
_mm256_mask_cvtusepi32_epi8 (__m128i __O, __mmask8 __M, __m256i __A)
{
return (__m128i) __builtin_ia32_pmovusdb256_mask ((__v8si) __A,
@@ -7302,7 +7094,7 @@ _mm256_mask_cvtusepi32_epi8 (__m128i __O, __mmask8 __M, __m256i __A)
__M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS256
_mm256_maskz_cvtusepi32_epi8 (__mmask8 __M, __m256i __A)
{
return (__m128i) __builtin_ia32_pmovusdb256_mask ((__v8si) __A,
@@ -7310,13 +7102,13 @@ _mm256_maskz_cvtusepi32_epi8 (__mmask8 __M, __m256i __A)
__M);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS256
_mm256_mask_cvtusepi32_storeu_epi8 (void * __P, __mmask8 __M, __m256i __A)
{
__builtin_ia32_pmovusdb256mem_mask ((__v16qi*) __P, (__v8si) __A, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_cvtusepi32_epi16 (__m128i __A)
{
return (__m128i) __builtin_ia32_pmovusdw128_mask ((__v4si) __A,
@@ -7324,14 +7116,14 @@ _mm_cvtusepi32_epi16 (__m128i __A)
(__mmask8) -1);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_cvtusepi32_epi16 (__m128i __O, __mmask8 __M, __m128i __A)
{
return (__m128i) __builtin_ia32_pmovusdw128_mask ((__v4si) __A,
(__v8hi) __O, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_cvtusepi32_epi16 (__mmask8 __M, __m128i __A)
{
return (__m128i) __builtin_ia32_pmovusdw128_mask ((__v4si) __A,
@@ -7339,13 +7131,13 @@ _mm_maskz_cvtusepi32_epi16 (__mmask8 __M, __m128i __A)
__M);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS128
_mm_mask_cvtusepi32_storeu_epi16 (void * __P, __mmask8 __M, __m128i __A)
{
__builtin_ia32_pmovusdw128mem_mask ((__v8hi *) __P, (__v4si) __A, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS256
_mm256_cvtusepi32_epi16 (__m256i __A)
{
return (__m128i) __builtin_ia32_pmovusdw256_mask ((__v8si) __A,
@@ -7353,14 +7145,14 @@ _mm256_cvtusepi32_epi16 (__m256i __A)
(__mmask8) -1);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS256
_mm256_mask_cvtusepi32_epi16 (__m128i __O, __mmask8 __M, __m256i __A)
{
return (__m128i) __builtin_ia32_pmovusdw256_mask ((__v8si) __A,
(__v8hi) __O, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS256
_mm256_maskz_cvtusepi32_epi16 (__mmask8 __M, __m256i __A)
{
return (__m128i) __builtin_ia32_pmovusdw256_mask ((__v8si) __A,
@@ -7368,13 +7160,13 @@ _mm256_maskz_cvtusepi32_epi16 (__mmask8 __M, __m256i __A)
__M);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS256
_mm256_mask_cvtusepi32_storeu_epi16 (void * __P, __mmask8 __M, __m256i __A)
{
__builtin_ia32_pmovusdw256mem_mask ((__v8hi *) __P, (__v8si) __A, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_cvtusepi64_epi8 (__m128i __A)
{
return (__m128i) __builtin_ia32_pmovusqb128_mask ((__v2di) __A,
@@ -7382,7 +7174,7 @@ _mm_cvtusepi64_epi8 (__m128i __A)
(__mmask8) -1);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_cvtusepi64_epi8 (__m128i __O, __mmask8 __M, __m128i __A)
{
return (__m128i) __builtin_ia32_pmovusqb128_mask ((__v2di) __A,
@@ -7390,7 +7182,7 @@ _mm_mask_cvtusepi64_epi8 (__m128i __O, __mmask8 __M, __m128i __A)
__M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_cvtusepi64_epi8 (__mmask8 __M, __m128i __A)
{
return (__m128i) __builtin_ia32_pmovusqb128_mask ((__v2di) __A,
@@ -7398,13 +7190,13 @@ _mm_maskz_cvtusepi64_epi8 (__mmask8 __M, __m128i __A)
__M);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS128
_mm_mask_cvtusepi64_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A)
{
__builtin_ia32_pmovusqb128mem_mask ((__v16qi *) __P, (__v2di) __A, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS256
_mm256_cvtusepi64_epi8 (__m256i __A)
{
return (__m128i) __builtin_ia32_pmovusqb256_mask ((__v4di) __A,
@@ -7412,7 +7204,7 @@ _mm256_cvtusepi64_epi8 (__m256i __A)
(__mmask8) -1);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS256
_mm256_mask_cvtusepi64_epi8 (__m128i __O, __mmask8 __M, __m256i __A)
{
return (__m128i) __builtin_ia32_pmovusqb256_mask ((__v4di) __A,
@@ -7420,7 +7212,7 @@ _mm256_mask_cvtusepi64_epi8 (__m128i __O, __mmask8 __M, __m256i __A)
__M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS256
_mm256_maskz_cvtusepi64_epi8 (__mmask8 __M, __m256i __A)
{
return (__m128i) __builtin_ia32_pmovusqb256_mask ((__v4di) __A,
@@ -7428,13 +7220,13 @@ _mm256_maskz_cvtusepi64_epi8 (__mmask8 __M, __m256i __A)
__M);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS256
_mm256_mask_cvtusepi64_storeu_epi8 (void * __P, __mmask8 __M, __m256i __A)
{
__builtin_ia32_pmovusqb256mem_mask ((__v16qi *) __P, (__v4di) __A, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_cvtusepi64_epi32 (__m128i __A)
{
return (__m128i) __builtin_ia32_pmovusqd128_mask ((__v2di) __A,
@@ -7442,14 +7234,14 @@ _mm_cvtusepi64_epi32 (__m128i __A)
(__mmask8) -1);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_cvtusepi64_epi32 (__m128i __O, __mmask8 __M, __m128i __A)
{
return (__m128i) __builtin_ia32_pmovusqd128_mask ((__v2di) __A,
(__v4si) __O, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_cvtusepi64_epi32 (__mmask8 __M, __m128i __A)
{
return (__m128i) __builtin_ia32_pmovusqd128_mask ((__v2di) __A,
@@ -7457,13 +7249,13 @@ _mm_maskz_cvtusepi64_epi32 (__mmask8 __M, __m128i __A)
__M);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS128
_mm_mask_cvtusepi64_storeu_epi32 (void * __P, __mmask8 __M, __m128i __A)
{
__builtin_ia32_pmovusqd128mem_mask ((__v4si *) __P, (__v2di) __A, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS256
_mm256_cvtusepi64_epi32 (__m256i __A)
{
return (__m128i) __builtin_ia32_pmovusqd256_mask ((__v4di) __A,
@@ -7471,14 +7263,14 @@ _mm256_cvtusepi64_epi32 (__m256i __A)
(__mmask8) -1);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS256
_mm256_mask_cvtusepi64_epi32 (__m128i __O, __mmask8 __M, __m256i __A)
{
return (__m128i) __builtin_ia32_pmovusqd256_mask ((__v4di) __A,
(__v4si) __O, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS256
_mm256_maskz_cvtusepi64_epi32 (__mmask8 __M, __m256i __A)
{
return (__m128i) __builtin_ia32_pmovusqd256_mask ((__v4di) __A,
@@ -7486,13 +7278,13 @@ _mm256_maskz_cvtusepi64_epi32 (__mmask8 __M, __m256i __A)
__M);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS256
_mm256_mask_cvtusepi64_storeu_epi32 (void * __P, __mmask8 __M, __m256i __A)
{
__builtin_ia32_pmovusqd256mem_mask ((__v4si *) __P, (__v4di) __A, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_cvtusepi64_epi16 (__m128i __A)
{
return (__m128i) __builtin_ia32_pmovusqw128_mask ((__v2di) __A,
@@ -7500,14 +7292,14 @@ _mm_cvtusepi64_epi16 (__m128i __A)
(__mmask8) -1);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_cvtusepi64_epi16 (__m128i __O, __mmask8 __M, __m128i __A)
{
return (__m128i) __builtin_ia32_pmovusqw128_mask ((__v2di) __A,
(__v8hi) __O, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_cvtusepi64_epi16 (__mmask8 __M, __m128i __A)
{
return (__m128i) __builtin_ia32_pmovusqw128_mask ((__v2di) __A,
@@ -7515,13 +7307,13 @@ _mm_maskz_cvtusepi64_epi16 (__mmask8 __M, __m128i __A)
__M);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS128
_mm_mask_cvtusepi64_storeu_epi16 (void * __P, __mmask8 __M, __m128i __A)
{
__builtin_ia32_pmovusqw128mem_mask ((__v8hi *) __P, (__v2di) __A, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS256
_mm256_cvtusepi64_epi16 (__m256i __A)
{
return (__m128i) __builtin_ia32_pmovusqw256_mask ((__v4di) __A,
@@ -7529,14 +7321,14 @@ _mm256_cvtusepi64_epi16 (__m256i __A)
(__mmask8) -1);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS256
_mm256_mask_cvtusepi64_epi16 (__m128i __O, __mmask8 __M, __m256i __A)
{
return (__m128i) __builtin_ia32_pmovusqw256_mask ((__v4di) __A,
(__v8hi) __O, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS256
_mm256_maskz_cvtusepi64_epi16 (__mmask8 __M, __m256i __A)
{
return (__m128i) __builtin_ia32_pmovusqw256_mask ((__v4di) __A,
@@ -7544,28 +7336,28 @@ _mm256_maskz_cvtusepi64_epi16 (__mmask8 __M, __m256i __A)
__M);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS256
_mm256_mask_cvtusepi64_storeu_epi16 (void * __P, __mmask8 __M, __m256i __A)
{
- return __builtin_ia32_pmovusqw256mem_mask ((__v8hi *) __P, (__v4di) __A, __M);
+ __builtin_ia32_pmovusqw256mem_mask ((__v8hi *) __P, (__v4di) __A, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_cvtepi32_epi8 (__m128i __A)
{
- return (__m128i) __builtin_ia32_pmovdb128_mask ((__v4si) __A,
- (__v16qi)_mm_undefined_si128(),
- (__mmask8) -1);
+ return (__m128i)__builtin_shufflevector(
+ __builtin_convertvector((__v4si)__A, __v4qi), (__v4qi){0, 0, 0, 0}, 0, 1,
+ 2, 3, 4, 5, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_cvtepi32_epi8 (__m128i __O, __mmask8 __M, __m128i __A)
{
return (__m128i) __builtin_ia32_pmovdb128_mask ((__v4si) __A,
(__v16qi) __O, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_cvtepi32_epi8 (__mmask8 __M, __m128i __A)
{
return (__m128i) __builtin_ia32_pmovdb128_mask ((__v4si) __A,
@@ -7574,28 +7366,29 @@ _mm_maskz_cvtepi32_epi8 (__mmask8 __M, __m128i __A)
__M);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS256
_mm_mask_cvtepi32_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A)
{
__builtin_ia32_pmovdb128mem_mask ((__v16qi *) __P, (__v4si) __A, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS256
_mm256_cvtepi32_epi8 (__m256i __A)
{
- return (__m128i) __builtin_ia32_pmovdb256_mask ((__v8si) __A,
- (__v16qi)_mm_undefined_si128(),
- (__mmask8) -1);
+ return (__m128i)__builtin_shufflevector(
+ __builtin_convertvector((__v8si)__A, __v8qi),
+ (__v8qi){0, 0, 0, 0, 0, 0, 0, 0}, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
+ 12, 13, 14, 15);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS256
_mm256_mask_cvtepi32_epi8 (__m128i __O, __mmask8 __M, __m256i __A)
{
return (__m128i) __builtin_ia32_pmovdb256_mask ((__v8si) __A,
(__v16qi) __O, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS256
_mm256_maskz_cvtepi32_epi8 (__mmask8 __M, __m256i __A)
{
return (__m128i) __builtin_ia32_pmovdb256_mask ((__v8si) __A,
@@ -7603,28 +7396,28 @@ _mm256_maskz_cvtepi32_epi8 (__mmask8 __M, __m256i __A)
__M);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS256
_mm256_mask_cvtepi32_storeu_epi8 (void * __P, __mmask8 __M, __m256i __A)
{
__builtin_ia32_pmovdb256mem_mask ((__v16qi *) __P, (__v8si) __A, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_cvtepi32_epi16 (__m128i __A)
{
- return (__m128i) __builtin_ia32_pmovdw128_mask ((__v4si) __A,
- (__v8hi) _mm_setzero_si128 (),
- (__mmask8) -1);
+ return (__m128i)__builtin_shufflevector(
+ __builtin_convertvector((__v4si)__A, __v4hi), (__v4hi){0, 0, 0, 0}, 0, 1,
+ 2, 3, 4, 5, 6, 7);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_cvtepi32_epi16 (__m128i __O, __mmask8 __M, __m128i __A)
{
return (__m128i) __builtin_ia32_pmovdw128_mask ((__v4si) __A,
(__v8hi) __O, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_cvtepi32_epi16 (__mmask8 __M, __m128i __A)
{
return (__m128i) __builtin_ia32_pmovdw128_mask ((__v4si) __A,
@@ -7632,28 +7425,26 @@ _mm_maskz_cvtepi32_epi16 (__mmask8 __M, __m128i __A)
__M);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS128
_mm_mask_cvtepi32_storeu_epi16 (void * __P, __mmask8 __M, __m128i __A)
{
__builtin_ia32_pmovdw128mem_mask ((__v8hi *) __P, (__v4si) __A, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS256
_mm256_cvtepi32_epi16 (__m256i __A)
{
- return (__m128i) __builtin_ia32_pmovdw256_mask ((__v8si) __A,
- (__v8hi)_mm_setzero_si128 (),
- (__mmask8) -1);
+ return (__m128i)__builtin_convertvector((__v8si)__A, __v8hi);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS256
_mm256_mask_cvtepi32_epi16 (__m128i __O, __mmask8 __M, __m256i __A)
{
return (__m128i) __builtin_ia32_pmovdw256_mask ((__v8si) __A,
(__v8hi) __O, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS256
_mm256_maskz_cvtepi32_epi16 (__mmask8 __M, __m256i __A)
{
return (__m128i) __builtin_ia32_pmovdw256_mask ((__v8si) __A,
@@ -7661,28 +7452,28 @@ _mm256_maskz_cvtepi32_epi16 (__mmask8 __M, __m256i __A)
__M);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS256
_mm256_mask_cvtepi32_storeu_epi16 (void * __P, __mmask8 __M, __m256i __A)
{
__builtin_ia32_pmovdw256mem_mask ((__v8hi *) __P, (__v8si) __A, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_cvtepi64_epi8 (__m128i __A)
{
- return (__m128i) __builtin_ia32_pmovqb128_mask ((__v2di) __A,
- (__v16qi) _mm_undefined_si128(),
- (__mmask8) -1);
+ return (__m128i)__builtin_shufflevector(
+ __builtin_convertvector((__v2di)__A, __v2qi), (__v2qi){0, 0}, 0, 1, 2, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_cvtepi64_epi8 (__m128i __O, __mmask8 __M, __m128i __A)
{
return (__m128i) __builtin_ia32_pmovqb128_mask ((__v2di) __A,
(__v16qi) __O, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_cvtepi64_epi8 (__mmask8 __M, __m128i __A)
{
return (__m128i) __builtin_ia32_pmovqb128_mask ((__v2di) __A,
@@ -7690,28 +7481,28 @@ _mm_maskz_cvtepi64_epi8 (__mmask8 __M, __m128i __A)
__M);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS128
_mm_mask_cvtepi64_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A)
{
__builtin_ia32_pmovqb128mem_mask ((__v16qi *) __P, (__v2di) __A, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS256
_mm256_cvtepi64_epi8 (__m256i __A)
{
- return (__m128i) __builtin_ia32_pmovqb256_mask ((__v4di) __A,
- (__v16qi) _mm_undefined_si128(),
- (__mmask8) -1);
+ return (__m128i)__builtin_shufflevector(
+ __builtin_convertvector((__v4di)__A, __v4qi), (__v4qi){0, 0, 0, 0}, 0, 1,
+ 2, 3, 4, 5, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS256
_mm256_mask_cvtepi64_epi8 (__m128i __O, __mmask8 __M, __m256i __A)
{
return (__m128i) __builtin_ia32_pmovqb256_mask ((__v4di) __A,
(__v16qi) __O, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS256
_mm256_maskz_cvtepi64_epi8 (__mmask8 __M, __m256i __A)
{
return (__m128i) __builtin_ia32_pmovqb256_mask ((__v4di) __A,
@@ -7719,28 +7510,27 @@ _mm256_maskz_cvtepi64_epi8 (__mmask8 __M, __m256i __A)
__M);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS256
_mm256_mask_cvtepi64_storeu_epi8 (void * __P, __mmask8 __M, __m256i __A)
{
__builtin_ia32_pmovqb256mem_mask ((__v16qi *) __P, (__v4di) __A, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_cvtepi64_epi32 (__m128i __A)
{
- return (__m128i) __builtin_ia32_pmovqd128_mask ((__v2di) __A,
- (__v4si)_mm_undefined_si128(),
- (__mmask8) -1);
+ return (__m128i)__builtin_shufflevector(
+ __builtin_convertvector((__v2di)__A, __v2si), (__v2si){0, 0}, 0, 1, 2, 3);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_cvtepi64_epi32 (__m128i __O, __mmask8 __M, __m128i __A)
{
return (__m128i) __builtin_ia32_pmovqd128_mask ((__v2di) __A,
(__v4si) __O, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_cvtepi64_epi32 (__mmask8 __M, __m128i __A)
{
return (__m128i) __builtin_ia32_pmovqd128_mask ((__v2di) __A,
@@ -7748,50 +7538,49 @@ _mm_maskz_cvtepi64_epi32 (__mmask8 __M, __m128i __A)
__M);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS128
_mm_mask_cvtepi64_storeu_epi32 (void * __P, __mmask8 __M, __m128i __A)
{
__builtin_ia32_pmovqd128mem_mask ((__v4si *) __P, (__v2di) __A, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS256
_mm256_cvtepi64_epi32 (__m256i __A)
{
- return (__m128i) __builtin_ia32_pmovqd256_mask ((__v4di) __A,
- (__v4si) _mm_undefined_si128(),
- (__mmask8) -1);
+ return (__m128i)__builtin_convertvector((__v4di)__A, __v4si);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS256
_mm256_mask_cvtepi64_epi32 (__m128i __O, __mmask8 __M, __m256i __A)
{
- return (__m128i) __builtin_ia32_pmovqd256_mask ((__v4di) __A,
- (__v4si) __O, __M);
+ return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M,
+ (__v4si)_mm256_cvtepi64_epi32(__A),
+ (__v4si)__O);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS256
_mm256_maskz_cvtepi64_epi32 (__mmask8 __M, __m256i __A)
{
- return (__m128i) __builtin_ia32_pmovqd256_mask ((__v4di) __A,
- (__v4si) _mm_setzero_si128 (),
- __M);
+ return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M,
+ (__v4si)_mm256_cvtepi64_epi32(__A),
+ (__v4si)_mm_setzero_si128());
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS256
_mm256_mask_cvtepi64_storeu_epi32 (void * __P, __mmask8 __M, __m256i __A)
{
__builtin_ia32_pmovqd256mem_mask ((__v4si *) __P, (__v4di) __A, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_cvtepi64_epi16 (__m128i __A)
{
- return (__m128i) __builtin_ia32_pmovqw128_mask ((__v2di) __A,
- (__v8hi) _mm_undefined_si128(),
- (__mmask8) -1);
+ return (__m128i)__builtin_shufflevector(
+ __builtin_convertvector((__v2di)__A, __v2hi), (__v2hi){0, 0}, 0, 1, 2, 3,
+ 3, 3, 3, 3);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_cvtepi64_epi16 (__m128i __O, __mmask8 __M, __m128i __A)
{
return (__m128i) __builtin_ia32_pmovqw128_mask ((__v2di) __A,
@@ -7799,7 +7588,7 @@ _mm_mask_cvtepi64_epi16 (__m128i __O, __mmask8 __M, __m128i __A)
__M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_cvtepi64_epi16 (__mmask8 __M, __m128i __A)
{
return (__m128i) __builtin_ia32_pmovqw128_mask ((__v2di) __A,
@@ -7807,28 +7596,28 @@ _mm_maskz_cvtepi64_epi16 (__mmask8 __M, __m128i __A)
__M);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS128
_mm_mask_cvtepi64_storeu_epi16 (void * __P, __mmask8 __M, __m128i __A)
{
__builtin_ia32_pmovqw128mem_mask ((__v8hi *) __P, (__v2di) __A, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS256
_mm256_cvtepi64_epi16 (__m256i __A)
{
- return (__m128i) __builtin_ia32_pmovqw256_mask ((__v4di) __A,
- (__v8hi)_mm_undefined_si128(),
- (__mmask8) -1);
+ return (__m128i)__builtin_shufflevector(
+ __builtin_convertvector((__v4di)__A, __v4hi), (__v4hi){0, 0, 0, 0}, 0, 1,
+ 2, 3, 4, 5, 6, 7);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS256
_mm256_mask_cvtepi64_epi16 (__m128i __O, __mmask8 __M, __m256i __A)
{
return (__m128i) __builtin_ia32_pmovqw256_mask ((__v4di) __A,
(__v8hi) __O, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS256
_mm256_maskz_cvtepi64_epi16 (__mmask8 __M, __m256i __A)
{
return (__m128i) __builtin_ia32_pmovqw256_mask ((__v4di) __A,
@@ -7836,479 +7625,410 @@ _mm256_maskz_cvtepi64_epi16 (__mmask8 __M, __m256i __A)
__M);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS256
_mm256_mask_cvtepi64_storeu_epi16 (void * __P, __mmask8 __M, __m256i __A)
{
__builtin_ia32_pmovqw256mem_mask ((__v8hi *) __P, (__v4di) __A, __M);
}
-#define _mm256_extractf32x4_ps(A, imm) __extension__ ({ \
- (__m128)__builtin_shufflevector((__v8sf)(__m256)(A), \
- (__v8sf)_mm256_undefined_ps(), \
- ((imm) & 1) ? 4 : 0, \
- ((imm) & 1) ? 5 : 1, \
- ((imm) & 1) ? 6 : 2, \
- ((imm) & 1) ? 7 : 3); })
-
-#define _mm256_mask_extractf32x4_ps(W, U, A, imm) __extension__ ({ \
- (__m128)__builtin_ia32_selectps_128((__mmask8)(U), \
- (__v4sf)_mm256_extractf32x4_ps((A), (imm)), \
- (__v4sf)(W)); })
-
-#define _mm256_maskz_extractf32x4_ps(U, A, imm) __extension__ ({ \
- (__m128)__builtin_ia32_selectps_128((__mmask8)(U), \
- (__v4sf)_mm256_extractf32x4_ps((A), (imm)), \
- (__v4sf)_mm_setzero_ps()); })
-
-#define _mm256_extracti32x4_epi32(A, imm) __extension__ ({ \
- (__m128i)__builtin_shufflevector((__v8si)(__m256)(A), \
- (__v8si)_mm256_undefined_si256(), \
- ((imm) & 1) ? 4 : 0, \
- ((imm) & 1) ? 5 : 1, \
- ((imm) & 1) ? 6 : 2, \
- ((imm) & 1) ? 7 : 3); })
-
-#define _mm256_mask_extracti32x4_epi32(W, U, A, imm) __extension__ ({ \
- (__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \
- (__v4si)_mm256_extracti32x4_epi32((A), (imm)), \
- (__v4si)(W)); })
-
-#define _mm256_maskz_extracti32x4_epi32(U, A, imm) __extension__ ({ \
- (__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \
- (__v4si)_mm256_extracti32x4_epi32((A), (imm)), \
- (__v4si)_mm_setzero_si128()); })
-
-#define _mm256_insertf32x4(A, B, imm) __extension__ ({ \
- (__m256)__builtin_shufflevector((__v8sf)(A), \
- (__v8sf)_mm256_castps128_ps256((__m128)(B)), \
- ((imm) & 0x1) ? 0 : 8, \
- ((imm) & 0x1) ? 1 : 9, \
- ((imm) & 0x1) ? 2 : 10, \
- ((imm) & 0x1) ? 3 : 11, \
- ((imm) & 0x1) ? 8 : 4, \
- ((imm) & 0x1) ? 9 : 5, \
- ((imm) & 0x1) ? 10 : 6, \
- ((imm) & 0x1) ? 11 : 7); })
-
-#define _mm256_mask_insertf32x4(W, U, A, B, imm) __extension__ ({ \
+#define _mm256_extractf32x4_ps(A, imm) \
+ (__m128)__builtin_ia32_extractf32x4_256_mask((__v8sf)(__m256)(A), \
+ (int)(imm), \
+ (__v4sf)_mm_undefined_ps(), \
+ (__mmask8)-1)
+
+#define _mm256_mask_extractf32x4_ps(W, U, A, imm) \
+ (__m128)__builtin_ia32_extractf32x4_256_mask((__v8sf)(__m256)(A), \
+ (int)(imm), \
+ (__v4sf)(__m128)(W), \
+ (__mmask8)(U))
+
+#define _mm256_maskz_extractf32x4_ps(U, A, imm) \
+ (__m128)__builtin_ia32_extractf32x4_256_mask((__v8sf)(__m256)(A), \
+ (int)(imm), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)(U))
+
+#define _mm256_extracti32x4_epi32(A, imm) \
+ (__m128i)__builtin_ia32_extracti32x4_256_mask((__v8si)(__m256i)(A), \
+ (int)(imm), \
+ (__v4si)_mm_undefined_si128(), \
+ (__mmask8)-1)
+
+#define _mm256_mask_extracti32x4_epi32(W, U, A, imm) \
+ (__m128i)__builtin_ia32_extracti32x4_256_mask((__v8si)(__m256i)(A), \
+ (int)(imm), \
+ (__v4si)(__m128i)(W), \
+ (__mmask8)(U))
+
+#define _mm256_maskz_extracti32x4_epi32(U, A, imm) \
+ (__m128i)__builtin_ia32_extracti32x4_256_mask((__v8si)(__m256i)(A), \
+ (int)(imm), \
+ (__v4si)_mm_setzero_si128(), \
+ (__mmask8)(U))
+
+#define _mm256_insertf32x4(A, B, imm) \
+ (__m256)__builtin_ia32_insertf32x4_256((__v8sf)(__m256)(A), \
+ (__v4sf)(__m128)(B), (int)(imm))
+
+#define _mm256_mask_insertf32x4(W, U, A, B, imm) \
(__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
(__v8sf)_mm256_insertf32x4((A), (B), (imm)), \
- (__v8sf)(W)); })
+ (__v8sf)(__m256)(W))
-#define _mm256_maskz_insertf32x4(U, A, B, imm) __extension__ ({ \
+#define _mm256_maskz_insertf32x4(U, A, B, imm) \
(__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
(__v8sf)_mm256_insertf32x4((A), (B), (imm)), \
- (__v8sf)_mm256_setzero_ps()); })
-
-#define _mm256_inserti32x4(A, B, imm) __extension__ ({ \
- (__m256i)__builtin_shufflevector((__v8si)(A), \
- (__v8si)_mm256_castsi128_si256((__m128i)(B)), \
- ((imm) & 0x1) ? 0 : 8, \
- ((imm) & 0x1) ? 1 : 9, \
- ((imm) & 0x1) ? 2 : 10, \
- ((imm) & 0x1) ? 3 : 11, \
- ((imm) & 0x1) ? 8 : 4, \
- ((imm) & 0x1) ? 9 : 5, \
- ((imm) & 0x1) ? 10 : 6, \
- ((imm) & 0x1) ? 11 : 7); })
-
-#define _mm256_mask_inserti32x4(W, U, A, B, imm) __extension__ ({ \
+ (__v8sf)_mm256_setzero_ps())
+
+#define _mm256_inserti32x4(A, B, imm) \
+ (__m256i)__builtin_ia32_inserti32x4_256((__v8si)(__m256i)(A), \
+ (__v4si)(__m128i)(B), (int)(imm))
+
+#define _mm256_mask_inserti32x4(W, U, A, B, imm) \
(__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
(__v8si)_mm256_inserti32x4((A), (B), (imm)), \
- (__v8si)(W)); })
+ (__v8si)(__m256i)(W))
-#define _mm256_maskz_inserti32x4(U, A, B, imm) __extension__ ({ \
+#define _mm256_maskz_inserti32x4(U, A, B, imm) \
(__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
(__v8si)_mm256_inserti32x4((A), (B), (imm)), \
- (__v8si)_mm256_setzero_si256()); })
+ (__v8si)_mm256_setzero_si256())
-#define _mm_getmant_pd(A, B, C) __extension__({\
+#define _mm_getmant_pd(A, B, C) \
(__m128d)__builtin_ia32_getmantpd128_mask((__v2df)(__m128d)(A), \
(int)(((C)<<2) | (B)), \
(__v2df)_mm_setzero_pd(), \
- (__mmask8)-1); })
+ (__mmask8)-1)
-#define _mm_mask_getmant_pd(W, U, A, B, C) __extension__({\
+#define _mm_mask_getmant_pd(W, U, A, B, C) \
(__m128d)__builtin_ia32_getmantpd128_mask((__v2df)(__m128d)(A), \
(int)(((C)<<2) | (B)), \
(__v2df)(__m128d)(W), \
- (__mmask8)(U)); })
+ (__mmask8)(U))
-#define _mm_maskz_getmant_pd(U, A, B, C) __extension__({\
+#define _mm_maskz_getmant_pd(U, A, B, C) \
(__m128d)__builtin_ia32_getmantpd128_mask((__v2df)(__m128d)(A), \
(int)(((C)<<2) | (B)), \
(__v2df)_mm_setzero_pd(), \
- (__mmask8)(U)); })
+ (__mmask8)(U))
-#define _mm256_getmant_pd(A, B, C) __extension__ ({ \
+#define _mm256_getmant_pd(A, B, C) \
(__m256d)__builtin_ia32_getmantpd256_mask((__v4df)(__m256d)(A), \
(int)(((C)<<2) | (B)), \
(__v4df)_mm256_setzero_pd(), \
- (__mmask8)-1); })
+ (__mmask8)-1)
-#define _mm256_mask_getmant_pd(W, U, A, B, C) __extension__ ({ \
+#define _mm256_mask_getmant_pd(W, U, A, B, C) \
(__m256d)__builtin_ia32_getmantpd256_mask((__v4df)(__m256d)(A), \
(int)(((C)<<2) | (B)), \
(__v4df)(__m256d)(W), \
- (__mmask8)(U)); })
+ (__mmask8)(U))
-#define _mm256_maskz_getmant_pd(U, A, B, C) __extension__ ({ \
+#define _mm256_maskz_getmant_pd(U, A, B, C) \
(__m256d)__builtin_ia32_getmantpd256_mask((__v4df)(__m256d)(A), \
(int)(((C)<<2) | (B)), \
(__v4df)_mm256_setzero_pd(), \
- (__mmask8)(U)); })
+ (__mmask8)(U))
-#define _mm_getmant_ps(A, B, C) __extension__ ({ \
+#define _mm_getmant_ps(A, B, C) \
(__m128)__builtin_ia32_getmantps128_mask((__v4sf)(__m128)(A), \
(int)(((C)<<2) | (B)), \
(__v4sf)_mm_setzero_ps(), \
- (__mmask8)-1); })
+ (__mmask8)-1)
-#define _mm_mask_getmant_ps(W, U, A, B, C) __extension__ ({ \
+#define _mm_mask_getmant_ps(W, U, A, B, C) \
(__m128)__builtin_ia32_getmantps128_mask((__v4sf)(__m128)(A), \
(int)(((C)<<2) | (B)), \
(__v4sf)(__m128)(W), \
- (__mmask8)(U)); })
+ (__mmask8)(U))
-#define _mm_maskz_getmant_ps(U, A, B, C) __extension__ ({ \
+#define _mm_maskz_getmant_ps(U, A, B, C) \
(__m128)__builtin_ia32_getmantps128_mask((__v4sf)(__m128)(A), \
(int)(((C)<<2) | (B)), \
(__v4sf)_mm_setzero_ps(), \
- (__mmask8)(U)); })
+ (__mmask8)(U))
-#define _mm256_getmant_ps(A, B, C) __extension__ ({ \
+#define _mm256_getmant_ps(A, B, C) \
(__m256)__builtin_ia32_getmantps256_mask((__v8sf)(__m256)(A), \
(int)(((C)<<2) | (B)), \
(__v8sf)_mm256_setzero_ps(), \
- (__mmask8)-1); })
+ (__mmask8)-1)
-#define _mm256_mask_getmant_ps(W, U, A, B, C) __extension__ ({ \
+#define _mm256_mask_getmant_ps(W, U, A, B, C) \
(__m256)__builtin_ia32_getmantps256_mask((__v8sf)(__m256)(A), \
(int)(((C)<<2) | (B)), \
(__v8sf)(__m256)(W), \
- (__mmask8)(U)); })
+ (__mmask8)(U))
-#define _mm256_maskz_getmant_ps(U, A, B, C) __extension__ ({ \
+#define _mm256_maskz_getmant_ps(U, A, B, C) \
(__m256)__builtin_ia32_getmantps256_mask((__v8sf)(__m256)(A), \
(int)(((C)<<2) | (B)), \
(__v8sf)_mm256_setzero_ps(), \
- (__mmask8)(U)); })
+ (__mmask8)(U))
-#define _mm_mmask_i64gather_pd(v1_old, mask, index, addr, scale) __extension__ ({\
+#define _mm_mmask_i64gather_pd(v1_old, mask, index, addr, scale) \
(__m128d)__builtin_ia32_gather3div2df((__v2df)(__m128d)(v1_old), \
(double const *)(addr), \
(__v2di)(__m128i)(index), \
- (__mmask8)(mask), (int)(scale)); })
+ (__mmask8)(mask), (int)(scale))
-#define _mm_mmask_i64gather_epi64(v1_old, mask, index, addr, scale) __extension__ ({\
+#define _mm_mmask_i64gather_epi64(v1_old, mask, index, addr, scale) \
(__m128i)__builtin_ia32_gather3div2di((__v2di)(__m128i)(v1_old), \
(long long const *)(addr), \
(__v2di)(__m128i)(index), \
- (__mmask8)(mask), (int)(scale)); })
+ (__mmask8)(mask), (int)(scale))
-#define _mm256_mmask_i64gather_pd(v1_old, mask, index, addr, scale) __extension__ ({\
+#define _mm256_mmask_i64gather_pd(v1_old, mask, index, addr, scale) \
(__m256d)__builtin_ia32_gather3div4df((__v4df)(__m256d)(v1_old), \
(double const *)(addr), \
(__v4di)(__m256i)(index), \
- (__mmask8)(mask), (int)(scale)); })
+ (__mmask8)(mask), (int)(scale))
-#define _mm256_mmask_i64gather_epi64(v1_old, mask, index, addr, scale) __extension__ ({\
+#define _mm256_mmask_i64gather_epi64(v1_old, mask, index, addr, scale) \
(__m256i)__builtin_ia32_gather3div4di((__v4di)(__m256i)(v1_old), \
(long long const *)(addr), \
(__v4di)(__m256i)(index), \
- (__mmask8)(mask), (int)(scale)); })
+ (__mmask8)(mask), (int)(scale))
-#define _mm_mmask_i64gather_ps(v1_old, mask, index, addr, scale) __extension__ ({\
+#define _mm_mmask_i64gather_ps(v1_old, mask, index, addr, scale) \
(__m128)__builtin_ia32_gather3div4sf((__v4sf)(__m128)(v1_old), \
(float const *)(addr), \
(__v2di)(__m128i)(index), \
- (__mmask8)(mask), (int)(scale)); })
+ (__mmask8)(mask), (int)(scale))
-#define _mm_mmask_i64gather_epi32(v1_old, mask, index, addr, scale) __extension__ ({\
+#define _mm_mmask_i64gather_epi32(v1_old, mask, index, addr, scale) \
(__m128i)__builtin_ia32_gather3div4si((__v4si)(__m128i)(v1_old), \
(int const *)(addr), \
(__v2di)(__m128i)(index), \
- (__mmask8)(mask), (int)(scale)); })
+ (__mmask8)(mask), (int)(scale))
-#define _mm256_mmask_i64gather_ps(v1_old, mask, index, addr, scale) __extension__ ({\
+#define _mm256_mmask_i64gather_ps(v1_old, mask, index, addr, scale) \
(__m128)__builtin_ia32_gather3div8sf((__v4sf)(__m128)(v1_old), \
(float const *)(addr), \
(__v4di)(__m256i)(index), \
- (__mmask8)(mask), (int)(scale)); })
+ (__mmask8)(mask), (int)(scale))
-#define _mm256_mmask_i64gather_epi32(v1_old, mask, index, addr, scale) __extension__ ({\
+#define _mm256_mmask_i64gather_epi32(v1_old, mask, index, addr, scale) \
(__m128i)__builtin_ia32_gather3div8si((__v4si)(__m128i)(v1_old), \
(int const *)(addr), \
(__v4di)(__m256i)(index), \
- (__mmask8)(mask), (int)(scale)); })
+ (__mmask8)(mask), (int)(scale))
-#define _mm_mmask_i32gather_pd(v1_old, mask, index, addr, scale) __extension__ ({\
+#define _mm_mmask_i32gather_pd(v1_old, mask, index, addr, scale) \
(__m128d)__builtin_ia32_gather3siv2df((__v2df)(__m128d)(v1_old), \
(double const *)(addr), \
(__v4si)(__m128i)(index), \
- (__mmask8)(mask), (int)(scale)); })
+ (__mmask8)(mask), (int)(scale))
-#define _mm_mmask_i32gather_epi64(v1_old, mask, index, addr, scale) __extension__ ({\
+#define _mm_mmask_i32gather_epi64(v1_old, mask, index, addr, scale) \
(__m128i)__builtin_ia32_gather3siv2di((__v2di)(__m128i)(v1_old), \
(long long const *)(addr), \
(__v4si)(__m128i)(index), \
- (__mmask8)(mask), (int)(scale)); })
+ (__mmask8)(mask), (int)(scale))
-#define _mm256_mmask_i32gather_pd(v1_old, mask, index, addr, scale) __extension__ ({\
+#define _mm256_mmask_i32gather_pd(v1_old, mask, index, addr, scale) \
(__m256d)__builtin_ia32_gather3siv4df((__v4df)(__m256d)(v1_old), \
(double const *)(addr), \
(__v4si)(__m128i)(index), \
- (__mmask8)(mask), (int)(scale)); })
+ (__mmask8)(mask), (int)(scale))
-#define _mm256_mmask_i32gather_epi64(v1_old, mask, index, addr, scale) __extension__ ({\
+#define _mm256_mmask_i32gather_epi64(v1_old, mask, index, addr, scale) \
(__m256i)__builtin_ia32_gather3siv4di((__v4di)(__m256i)(v1_old), \
(long long const *)(addr), \
(__v4si)(__m128i)(index), \
- (__mmask8)(mask), (int)(scale)); })
+ (__mmask8)(mask), (int)(scale))
-#define _mm_mmask_i32gather_ps(v1_old, mask, index, addr, scale) __extension__ ({\
+#define _mm_mmask_i32gather_ps(v1_old, mask, index, addr, scale) \
(__m128)__builtin_ia32_gather3siv4sf((__v4sf)(__m128)(v1_old), \
(float const *)(addr), \
(__v4si)(__m128i)(index), \
- (__mmask8)(mask), (int)(scale)); })
+ (__mmask8)(mask), (int)(scale))
-#define _mm_mmask_i32gather_epi32(v1_old, mask, index, addr, scale) __extension__ ({\
+#define _mm_mmask_i32gather_epi32(v1_old, mask, index, addr, scale) \
(__m128i)__builtin_ia32_gather3siv4si((__v4si)(__m128i)(v1_old), \
(int const *)(addr), \
(__v4si)(__m128i)(index), \
- (__mmask8)(mask), (int)(scale)); })
+ (__mmask8)(mask), (int)(scale))
-#define _mm256_mmask_i32gather_ps(v1_old, mask, index, addr, scale) __extension__ ({\
+#define _mm256_mmask_i32gather_ps(v1_old, mask, index, addr, scale) \
(__m256)__builtin_ia32_gather3siv8sf((__v8sf)(__m256)(v1_old), \
(float const *)(addr), \
(__v8si)(__m256i)(index), \
- (__mmask8)(mask), (int)(scale)); })
+ (__mmask8)(mask), (int)(scale))
-#define _mm256_mmask_i32gather_epi32(v1_old, mask, index, addr, scale) __extension__ ({\
+#define _mm256_mmask_i32gather_epi32(v1_old, mask, index, addr, scale) \
(__m256i)__builtin_ia32_gather3siv8si((__v8si)(__m256i)(v1_old), \
(int const *)(addr), \
(__v8si)(__m256i)(index), \
- (__mmask8)(mask), (int)(scale)); })
+ (__mmask8)(mask), (int)(scale))
-#define _mm256_permutex_pd(X, C) __extension__ ({ \
- (__m256d)__builtin_shufflevector((__v4df)(__m256d)(X), \
- (__v4df)_mm256_undefined_pd(), \
- ((C) >> 0) & 0x3, ((C) >> 2) & 0x3, \
- ((C) >> 4) & 0x3, ((C) >> 6) & 0x3); })
+#define _mm256_permutex_pd(X, C) \
+ (__m256d)__builtin_ia32_permdf256((__v4df)(__m256d)(X), (int)(C))
-#define _mm256_mask_permutex_pd(W, U, X, C) __extension__ ({ \
+#define _mm256_mask_permutex_pd(W, U, X, C) \
(__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
(__v4df)_mm256_permutex_pd((X), (C)), \
- (__v4df)(__m256d)(W)); })
+ (__v4df)(__m256d)(W))
-#define _mm256_maskz_permutex_pd(U, X, C) __extension__ ({ \
+#define _mm256_maskz_permutex_pd(U, X, C) \
(__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
(__v4df)_mm256_permutex_pd((X), (C)), \
- (__v4df)_mm256_setzero_pd()); })
+ (__v4df)_mm256_setzero_pd())
-#define _mm256_permutex_epi64(X, C) __extension__ ({ \
- (__m256i)__builtin_shufflevector((__v4di)(__m256i)(X), \
- (__v4di)_mm256_undefined_si256(), \
- ((C) >> 0) & 0x3, ((C) >> 2) & 0x3, \
- ((C) >> 4) & 0x3, ((C) >> 6) & 0x3); })
+#define _mm256_permutex_epi64(X, C) \
+ (__m256i)__builtin_ia32_permdi256((__v4di)(__m256i)(X), (int)(C))
-#define _mm256_mask_permutex_epi64(W, U, X, C) __extension__ ({ \
+#define _mm256_mask_permutex_epi64(W, U, X, C) \
(__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
(__v4di)_mm256_permutex_epi64((X), (C)), \
- (__v4di)(__m256i)(W)); })
+ (__v4di)(__m256i)(W))
-#define _mm256_maskz_permutex_epi64(U, X, C) __extension__ ({ \
+#define _mm256_maskz_permutex_epi64(U, X, C) \
(__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
(__v4di)_mm256_permutex_epi64((X), (C)), \
- (__v4di)_mm256_setzero_si256()); })
+ (__v4di)_mm256_setzero_si256())
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_permutexvar_pd (__m256i __X, __m256d __Y)
{
- return (__m256d) __builtin_ia32_permvardf256_mask ((__v4df) __Y,
- (__v4di) __X,
- (__v4df) _mm256_undefined_si256 (),
- (__mmask8) -1);
+ return (__m256d)__builtin_ia32_permvardf256((__v4df)__Y, (__v4di)__X);
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_mask_permutexvar_pd (__m256d __W, __mmask8 __U, __m256i __X,
__m256d __Y)
{
- return (__m256d) __builtin_ia32_permvardf256_mask ((__v4df) __Y,
- (__v4di) __X,
- (__v4df) __W,
- (__mmask8) __U);
+ return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
+ (__v4df)_mm256_permutexvar_pd(__X, __Y),
+ (__v4df)__W);
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_maskz_permutexvar_pd (__mmask8 __U, __m256i __X, __m256d __Y)
{
- return (__m256d) __builtin_ia32_permvardf256_mask ((__v4df) __Y,
- (__v4di) __X,
- (__v4df) _mm256_setzero_pd (),
- (__mmask8) __U);
+ return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
+ (__v4df)_mm256_permutexvar_pd(__X, __Y),
+ (__v4df)_mm256_setzero_pd());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_permutexvar_epi64 (__mmask8 __M, __m256i __X, __m256i __Y)
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_permutexvar_epi64 ( __m256i __X, __m256i __Y)
{
- return (__m256i) __builtin_ia32_permvardi256_mask ((__v4di) __Y,
- (__v4di) __X,
- (__v4di) _mm256_setzero_si256 (),
- (__mmask8) __M);
+ return (__m256i)__builtin_ia32_permvardi256((__v4di) __Y, (__v4di) __X);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_permutexvar_epi64 ( __m256i __X, __m256i __Y)
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_maskz_permutexvar_epi64 (__mmask8 __M, __m256i __X, __m256i __Y)
{
- return (__m256i) __builtin_ia32_permvardi256_mask ((__v4di) __Y,
- (__v4di) __X,
- (__v4di) _mm256_undefined_si256 (),
- (__mmask8) -1);
+ return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M,
+ (__v4di)_mm256_permutexvar_epi64(__X, __Y),
+ (__v4di)_mm256_setzero_si256());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_permutexvar_epi64 (__m256i __W, __mmask8 __M, __m256i __X,
__m256i __Y)
{
- return (__m256i) __builtin_ia32_permvardi256_mask ((__v4di) __Y,
- (__v4di) __X,
- (__v4di) __W,
- __M);
+ return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M,
+ (__v4di)_mm256_permutexvar_epi64(__X, __Y),
+ (__v4di)__W);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
-_mm256_mask_permutexvar_ps (__m256 __W, __mmask8 __U, __m256i __X,
- __m256 __Y)
-{
- return (__m256) __builtin_ia32_permvarsf256_mask ((__v8sf) __Y,
- (__v8si) __X,
- (__v8sf) __W,
- (__mmask8) __U);
-}
+#define _mm256_permutexvar_ps(A, B) _mm256_permutevar8x32_ps((B), (A))
-static __inline__ __m256 __DEFAULT_FN_ATTRS
-_mm256_maskz_permutexvar_ps (__mmask8 __U, __m256i __X, __m256 __Y)
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
+_mm256_mask_permutexvar_ps(__m256 __W, __mmask8 __U, __m256i __X, __m256 __Y)
{
- return (__m256) __builtin_ia32_permvarsf256_mask ((__v8sf) __Y,
- (__v8si) __X,
- (__v8sf) _mm256_setzero_ps (),
- (__mmask8) __U);
+ return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
+ (__v8sf)_mm256_permutexvar_ps(__X, __Y),
+ (__v8sf)__W);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
-_mm256_permutexvar_ps (__m256i __X, __m256 __Y)
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
+_mm256_maskz_permutexvar_ps(__mmask8 __U, __m256i __X, __m256 __Y)
{
- return (__m256) __builtin_ia32_permvarsf256_mask ((__v8sf) __Y,
- (__v8si) __X,
- (__v8sf) _mm256_undefined_si256 (),
- (__mmask8) -1);
+ return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
+ (__v8sf)_mm256_permutexvar_ps(__X, __Y),
+ (__v8sf)_mm256_setzero_ps());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_permutexvar_epi32 (__mmask8 __M, __m256i __X, __m256i __Y)
-{
- return (__m256i) __builtin_ia32_permvarsi256_mask ((__v8si) __Y,
- (__v8si) __X,
- (__v8si) _mm256_setzero_si256 (),
- __M);
-}
+#define _mm256_permutexvar_epi32(A, B) _mm256_permutevar8x32_epi32((B), (A))
-static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_permutexvar_epi32 (__m256i __W, __mmask8 __M, __m256i __X,
- __m256i __Y)
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_mask_permutexvar_epi32(__m256i __W, __mmask8 __M, __m256i __X,
+ __m256i __Y)
{
- return (__m256i) __builtin_ia32_permvarsi256_mask ((__v8si) __Y,
- (__v8si) __X,
- (__v8si) __W,
- (__mmask8) __M);
+ return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M,
+ (__v8si)_mm256_permutexvar_epi32(__X, __Y),
+ (__v8si)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_permutexvar_epi32 (__m256i __X, __m256i __Y)
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_maskz_permutexvar_epi32(__mmask8 __M, __m256i __X, __m256i __Y)
{
- return (__m256i) __builtin_ia32_permvarsi256_mask ((__v8si) __Y,
- (__v8si) __X,
- (__v8si) _mm256_undefined_si256(),
- (__mmask8) -1);
+ return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M,
+ (__v8si)_mm256_permutexvar_epi32(__X, __Y),
+ (__v8si)_mm256_setzero_si256());
}
-#define _mm_alignr_epi32(A, B, imm) __extension__ ({ \
- (__m128i)__builtin_shufflevector((__v4si)(__m128i)(B), \
- (__v4si)(__m128i)(A), \
- ((int)(imm) & 0x3) + 0, \
- ((int)(imm) & 0x3) + 1, \
- ((int)(imm) & 0x3) + 2, \
- ((int)(imm) & 0x3) + 3); })
+#define _mm_alignr_epi32(A, B, imm) \
+ (__m128i)__builtin_ia32_alignd128((__v4si)(__m128i)(A), \
+ (__v4si)(__m128i)(B), (int)(imm))
-#define _mm_mask_alignr_epi32(W, U, A, B, imm) __extension__ ({ \
+#define _mm_mask_alignr_epi32(W, U, A, B, imm) \
(__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \
(__v4si)_mm_alignr_epi32((A), (B), (imm)), \
- (__v4si)(__m128i)(W)); })
+ (__v4si)(__m128i)(W))
-#define _mm_maskz_alignr_epi32(U, A, B, imm) __extension__ ({ \
+#define _mm_maskz_alignr_epi32(U, A, B, imm) \
(__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \
(__v4si)_mm_alignr_epi32((A), (B), (imm)), \
- (__v4si)_mm_setzero_si128()); })
-
-#define _mm256_alignr_epi32(A, B, imm) __extension__ ({ \
- (__m256i)__builtin_shufflevector((__v8si)(__m256i)(B), \
- (__v8si)(__m256i)(A), \
- ((int)(imm) & 0x7) + 0, \
- ((int)(imm) & 0x7) + 1, \
- ((int)(imm) & 0x7) + 2, \
- ((int)(imm) & 0x7) + 3, \
- ((int)(imm) & 0x7) + 4, \
- ((int)(imm) & 0x7) + 5, \
- ((int)(imm) & 0x7) + 6, \
- ((int)(imm) & 0x7) + 7); })
-
-#define _mm256_mask_alignr_epi32(W, U, A, B, imm) __extension__ ({ \
+ (__v4si)_mm_setzero_si128())
+
+#define _mm256_alignr_epi32(A, B, imm) \
+ (__m256i)__builtin_ia32_alignd256((__v8si)(__m256i)(A), \
+ (__v8si)(__m256i)(B), (int)(imm))
+
+#define _mm256_mask_alignr_epi32(W, U, A, B, imm) \
(__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
(__v8si)_mm256_alignr_epi32((A), (B), (imm)), \
- (__v8si)(__m256i)(W)); })
+ (__v8si)(__m256i)(W))
-#define _mm256_maskz_alignr_epi32(U, A, B, imm) __extension__ ({ \
+#define _mm256_maskz_alignr_epi32(U, A, B, imm) \
(__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
(__v8si)_mm256_alignr_epi32((A), (B), (imm)), \
- (__v8si)_mm256_setzero_si256()); })
+ (__v8si)_mm256_setzero_si256())
-#define _mm_alignr_epi64(A, B, imm) __extension__ ({ \
- (__m128i)__builtin_shufflevector((__v2di)(__m128i)(B), \
- (__v2di)(__m128i)(A), \
- ((int)(imm) & 0x1) + 0, \
- ((int)(imm) & 0x1) + 1); })
+#define _mm_alignr_epi64(A, B, imm) \
+ (__m128i)__builtin_ia32_alignq128((__v2di)(__m128i)(A), \
+ (__v2di)(__m128i)(B), (int)(imm))
-#define _mm_mask_alignr_epi64(W, U, A, B, imm) __extension__ ({ \
+#define _mm_mask_alignr_epi64(W, U, A, B, imm) \
(__m128i)__builtin_ia32_selectq_128((__mmask8)(U), \
(__v2di)_mm_alignr_epi64((A), (B), (imm)), \
- (__v2di)(__m128i)(W)); })
+ (__v2di)(__m128i)(W))
-#define _mm_maskz_alignr_epi64(U, A, B, imm) __extension__ ({ \
+#define _mm_maskz_alignr_epi64(U, A, B, imm) \
(__m128i)__builtin_ia32_selectq_128((__mmask8)(U), \
(__v2di)_mm_alignr_epi64((A), (B), (imm)), \
- (__v2di)_mm_setzero_di()); })
+ (__v2di)_mm_setzero_si128())
-#define _mm256_alignr_epi64(A, B, imm) __extension__ ({ \
- (__m256i)__builtin_shufflevector((__v4di)(__m256i)(B), \
- (__v4di)(__m256i)(A), \
- ((int)(imm) & 0x3) + 0, \
- ((int)(imm) & 0x3) + 1, \
- ((int)(imm) & 0x3) + 2, \
- ((int)(imm) & 0x3) + 3); })
+#define _mm256_alignr_epi64(A, B, imm) \
+ (__m256i)__builtin_ia32_alignq256((__v4di)(__m256i)(A), \
+ (__v4di)(__m256i)(B), (int)(imm))
-#define _mm256_mask_alignr_epi64(W, U, A, B, imm) __extension__ ({ \
+#define _mm256_mask_alignr_epi64(W, U, A, B, imm) \
(__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
(__v4di)_mm256_alignr_epi64((A), (B), (imm)), \
- (__v4di)(__m256i)(W)); })
+ (__v4di)(__m256i)(W))
-#define _mm256_maskz_alignr_epi64(U, A, B, imm) __extension__ ({ \
+#define _mm256_maskz_alignr_epi64(U, A, B, imm) \
(__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
(__v4di)_mm256_alignr_epi64((A), (B), (imm)), \
- (__v4di)_mm256_setzero_si256()); })
+ (__v4di)_mm256_setzero_si256())
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_movehdup_ps (__m128 __W, __mmask8 __U, __m128 __A)
{
return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
@@ -8316,7 +8036,7 @@ _mm_mask_movehdup_ps (__m128 __W, __mmask8 __U, __m128 __A)
(__v4sf)__W);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_maskz_movehdup_ps (__mmask8 __U, __m128 __A)
{
return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
@@ -8324,7 +8044,7 @@ _mm_maskz_movehdup_ps (__mmask8 __U, __m128 __A)
(__v4sf)_mm_setzero_ps());
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_mask_movehdup_ps (__m256 __W, __mmask8 __U, __m256 __A)
{
return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
@@ -8332,7 +8052,7 @@ _mm256_mask_movehdup_ps (__m256 __W, __mmask8 __U, __m256 __A)
(__v8sf)__W);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_maskz_movehdup_ps (__mmask8 __U, __m256 __A)
{
return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
@@ -8340,7 +8060,7 @@ _mm256_maskz_movehdup_ps (__mmask8 __U, __m256 __A)
(__v8sf)_mm256_setzero_ps());
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_moveldup_ps (__m128 __W, __mmask8 __U, __m128 __A)
{
return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
@@ -8348,7 +8068,7 @@ _mm_mask_moveldup_ps (__m128 __W, __mmask8 __U, __m128 __A)
(__v4sf)__W);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_maskz_moveldup_ps (__mmask8 __U, __m128 __A)
{
return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
@@ -8356,7 +8076,7 @@ _mm_maskz_moveldup_ps (__mmask8 __U, __m128 __A)
(__v4sf)_mm_setzero_ps());
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_mask_moveldup_ps (__m256 __W, __mmask8 __U, __m256 __A)
{
return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
@@ -8364,7 +8084,7 @@ _mm256_mask_moveldup_ps (__m256 __W, __mmask8 __U, __m256 __A)
(__v8sf)__W);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_maskz_moveldup_ps (__mmask8 __U, __m256 __A)
{
return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
@@ -8372,27 +8092,27 @@ _mm256_maskz_moveldup_ps (__mmask8 __U, __m256 __A)
(__v8sf)_mm256_setzero_ps());
}
-#define _mm256_mask_shuffle_epi32(W, U, A, I) __extension__({\
+#define _mm256_mask_shuffle_epi32(W, U, A, I) \
(__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
(__v8si)_mm256_shuffle_epi32((A), (I)), \
- (__v8si)(__m256i)(W)); })
+ (__v8si)(__m256i)(W))
-#define _mm256_maskz_shuffle_epi32(U, A, I) __extension__({\
+#define _mm256_maskz_shuffle_epi32(U, A, I) \
(__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
(__v8si)_mm256_shuffle_epi32((A), (I)), \
- (__v8si)_mm256_setzero_si256()); })
+ (__v8si)_mm256_setzero_si256())
-#define _mm_mask_shuffle_epi32(W, U, A, I) __extension__({\
+#define _mm_mask_shuffle_epi32(W, U, A, I) \
(__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \
(__v4si)_mm_shuffle_epi32((A), (I)), \
- (__v4si)(__m128i)(W)); })
+ (__v4si)(__m128i)(W))
-#define _mm_maskz_shuffle_epi32(U, A, I) __extension__({\
+#define _mm_maskz_shuffle_epi32(U, A, I) \
(__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \
(__v4si)_mm_shuffle_epi32((A), (I)), \
- (__v4si)_mm_setzero_si128()); })
+ (__v4si)_mm_setzero_si128())
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_mov_pd (__m128d __W, __mmask8 __U, __m128d __A)
{
return (__m128d) __builtin_ia32_selectpd_128 ((__mmask8) __U,
@@ -8400,7 +8120,7 @@ _mm_mask_mov_pd (__m128d __W, __mmask8 __U, __m128d __A)
(__v2df) __W);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_maskz_mov_pd (__mmask8 __U, __m128d __A)
{
return (__m128d) __builtin_ia32_selectpd_128 ((__mmask8) __U,
@@ -8408,7 +8128,7 @@ _mm_maskz_mov_pd (__mmask8 __U, __m128d __A)
(__v2df) _mm_setzero_pd ());
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_mask_mov_pd (__m256d __W, __mmask8 __U, __m256d __A)
{
return (__m256d) __builtin_ia32_selectpd_256 ((__mmask8) __U,
@@ -8416,7 +8136,7 @@ _mm256_mask_mov_pd (__m256d __W, __mmask8 __U, __m256d __A)
(__v4df) __W);
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_maskz_mov_pd (__mmask8 __U, __m256d __A)
{
return (__m256d) __builtin_ia32_selectpd_256 ((__mmask8) __U,
@@ -8424,7 +8144,7 @@ _mm256_maskz_mov_pd (__mmask8 __U, __m256d __A)
(__v4df) _mm256_setzero_pd ());
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_mov_ps (__m128 __W, __mmask8 __U, __m128 __A)
{
return (__m128) __builtin_ia32_selectps_128 ((__mmask8) __U,
@@ -8432,7 +8152,7 @@ _mm_mask_mov_ps (__m128 __W, __mmask8 __U, __m128 __A)
(__v4sf) __W);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_maskz_mov_ps (__mmask8 __U, __m128 __A)
{
return (__m128) __builtin_ia32_selectps_128 ((__mmask8) __U,
@@ -8440,7 +8160,7 @@ _mm_maskz_mov_ps (__mmask8 __U, __m128 __A)
(__v4sf) _mm_setzero_ps ());
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_mask_mov_ps (__m256 __W, __mmask8 __U, __m256 __A)
{
return (__m256) __builtin_ia32_selectps_256 ((__mmask8) __U,
@@ -8448,7 +8168,7 @@ _mm256_mask_mov_ps (__m256 __W, __mmask8 __U, __m256 __A)
(__v8sf) __W);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_maskz_mov_ps (__mmask8 __U, __m256 __A)
{
return (__m256) __builtin_ia32_selectps_256 ((__mmask8) __U,
@@ -8456,7 +8176,7 @@ _mm256_maskz_mov_ps (__mmask8 __U, __m256 __A)
(__v8sf) _mm256_setzero_ps ());
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_cvtph_ps (__m128 __W, __mmask8 __U, __m128i __A)
{
return (__m128) __builtin_ia32_vcvtph2ps_mask ((__v8hi) __A,
@@ -8464,7 +8184,7 @@ _mm_mask_cvtph_ps (__m128 __W, __mmask8 __U, __m128i __A)
(__mmask8) __U);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_maskz_cvtph_ps (__mmask8 __U, __m128i __A)
{
return (__m128) __builtin_ia32_vcvtph2ps_mask ((__v8hi) __A,
@@ -8473,7 +8193,7 @@ _mm_maskz_cvtph_ps (__mmask8 __U, __m128i __A)
(__mmask8) __U);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_mask_cvtph_ps (__m256 __W, __mmask8 __U, __m128i __A)
{
return (__m256) __builtin_ia32_vcvtph2ps256_mask ((__v8hi) __A,
@@ -8481,7 +8201,7 @@ _mm256_mask_cvtph_ps (__m256 __W, __mmask8 __U, __m128i __A)
(__mmask8) __U);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_maskz_cvtph_ps (__mmask8 __U, __m128i __A)
{
return (__m256) __builtin_ia32_vcvtph2ps256_mask ((__v8hi) __A,
@@ -8490,7 +8210,7 @@ _mm256_maskz_cvtph_ps (__mmask8 __U, __m128i __A)
(__mmask8) __U);
}
-static __inline __m128i __DEFAULT_FN_ATTRS
+static __inline __m128i __DEFAULT_FN_ATTRS128
_mm_mask_cvtps_ph (__m128i __W, __mmask8 __U, __m128 __A)
{
return (__m128i) __builtin_ia32_vcvtps2ph_mask ((__v4sf) __A, _MM_FROUND_CUR_DIRECTION,
@@ -8498,7 +8218,7 @@ _mm_mask_cvtps_ph (__m128i __W, __mmask8 __U, __m128 __A)
(__mmask8) __U);
}
-static __inline __m128i __DEFAULT_FN_ATTRS
+static __inline __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_cvtps_ph (__mmask8 __U, __m128 __A)
{
return (__m128i) __builtin_ia32_vcvtps2ph_mask ((__v4sf) __A, _MM_FROUND_CUR_DIRECTION,
@@ -8506,17 +8226,17 @@ _mm_maskz_cvtps_ph (__mmask8 __U, __m128 __A)
(__mmask8) __U);
}
-#define _mm_mask_cvt_roundps_ph(W, U, A, I) __extension__ ({ \
+#define _mm_mask_cvt_roundps_ph(W, U, A, I) \
(__m128i)__builtin_ia32_vcvtps2ph_mask((__v4sf)(__m128)(A), (int)(I), \
(__v8hi)(__m128i)(W), \
- (__mmask8)(U)); })
+ (__mmask8)(U))
-#define _mm_maskz_cvt_roundps_ph(U, A, I) __extension__ ({ \
+#define _mm_maskz_cvt_roundps_ph(U, A, I) \
(__m128i)__builtin_ia32_vcvtps2ph_mask((__v4sf)(__m128)(A), (int)(I), \
(__v8hi)_mm_setzero_si128(), \
- (__mmask8)(U)); })
+ (__mmask8)(U))
-static __inline __m128i __DEFAULT_FN_ATTRS
+static __inline __m128i __DEFAULT_FN_ATTRS256
_mm256_mask_cvtps_ph (__m128i __W, __mmask8 __U, __m256 __A)
{
return (__m128i) __builtin_ia32_vcvtps2ph256_mask ((__v8sf) __A, _MM_FROUND_CUR_DIRECTION,
@@ -8524,24 +8244,25 @@ _mm256_mask_cvtps_ph (__m128i __W, __mmask8 __U, __m256 __A)
(__mmask8) __U);
}
-static __inline __m128i __DEFAULT_FN_ATTRS
+static __inline __m128i __DEFAULT_FN_ATTRS256
_mm256_maskz_cvtps_ph ( __mmask8 __U, __m256 __A)
{
return (__m128i) __builtin_ia32_vcvtps2ph256_mask ((__v8sf) __A, _MM_FROUND_CUR_DIRECTION,
(__v8hi) _mm_setzero_si128(),
(__mmask8) __U);
}
-#define _mm256_mask_cvt_roundps_ph(W, U, A, I) __extension__ ({ \
+#define _mm256_mask_cvt_roundps_ph(W, U, A, I) \
(__m128i)__builtin_ia32_vcvtps2ph256_mask((__v8sf)(__m256)(A), (int)(I), \
(__v8hi)(__m128i)(W), \
- (__mmask8)(U)); })
+ (__mmask8)(U))
-#define _mm256_maskz_cvt_roundps_ph(U, A, I) __extension__ ({ \
+#define _mm256_maskz_cvt_roundps_ph(U, A, I) \
(__m128i)__builtin_ia32_vcvtps2ph256_mask((__v8sf)(__m256)(A), (int)(I), \
(__v8hi)_mm_setzero_si128(), \
- (__mmask8)(U)); })
+ (__mmask8)(U))
-#undef __DEFAULT_FN_ATTRS
+#undef __DEFAULT_FN_ATTRS128
+#undef __DEFAULT_FN_ATTRS256
#endif /* __AVX512VLINTRIN_H */
diff --git a/lib/Headers/avx512vlvbmi2intrin.h b/lib/Headers/avx512vlvbmi2intrin.h
index d1ec4976f274..baaf5654631c 100644
--- a/lib/Headers/avx512vlvbmi2intrin.h
+++ b/lib/Headers/avx512vlvbmi2intrin.h
@@ -29,130 +29,120 @@
#define __AVX512VLVBMI2INTRIN_H
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512vl,avx512vbmi2")))
+#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("avx512vl,avx512vbmi2"), __min_vector_width__(128)))
+#define __DEFAULT_FN_ATTRS256 __attribute__((__always_inline__, __nodebug__, __target__("avx512vl,avx512vbmi2"), __min_vector_width__(256)))
-static __inline __m128i __DEFAULT_FN_ATTRS
-_mm128_setzero_hi(void) {
- return (__m128i)(__v8hi){ 0, 0, 0, 0, 0, 0, 0, 0 };
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm128_mask_compress_epi16(__m128i __S, __mmask8 __U, __m128i __D)
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_mask_compress_epi16(__m128i __S, __mmask8 __U, __m128i __D)
{
return (__m128i) __builtin_ia32_compresshi128_mask ((__v8hi) __D,
(__v8hi) __S,
__U);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm128_maskz_compress_epi16(__mmask8 __U, __m128i __D)
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_maskz_compress_epi16(__mmask8 __U, __m128i __D)
{
return (__m128i) __builtin_ia32_compresshi128_mask ((__v8hi) __D,
- (__v8hi) _mm128_setzero_hi(),
+ (__v8hi) _mm_setzero_si128(),
__U);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm128_mask_compress_epi8(__m128i __S, __mmask16 __U, __m128i __D)
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_mask_compress_epi8(__m128i __S, __mmask16 __U, __m128i __D)
{
return (__m128i) __builtin_ia32_compressqi128_mask ((__v16qi) __D,
(__v16qi) __S,
__U);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm128_maskz_compress_epi8(__mmask16 __U, __m128i __D)
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_maskz_compress_epi8(__mmask16 __U, __m128i __D)
{
return (__m128i) __builtin_ia32_compressqi128_mask ((__v16qi) __D,
- (__v16qi) _mm128_setzero_hi(),
+ (__v16qi) _mm_setzero_si128(),
__U);
}
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm128_mask_compressstoreu_epi16(void *__P, __mmask8 __U, __m128i __D)
+static __inline__ void __DEFAULT_FN_ATTRS128
+_mm_mask_compressstoreu_epi16(void *__P, __mmask8 __U, __m128i __D)
{
__builtin_ia32_compressstorehi128_mask ((__v8hi *) __P, (__v8hi) __D,
__U);
}
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm128_mask_compressstoreu_epi8(void *__P, __mmask16 __U, __m128i __D)
+static __inline__ void __DEFAULT_FN_ATTRS128
+_mm_mask_compressstoreu_epi8(void *__P, __mmask16 __U, __m128i __D)
{
__builtin_ia32_compressstoreqi128_mask ((__v16qi *) __P, (__v16qi) __D,
__U);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm128_mask_expand_epi16(__m128i __S, __mmask8 __U, __m128i __D)
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_mask_expand_epi16(__m128i __S, __mmask8 __U, __m128i __D)
{
return (__m128i) __builtin_ia32_expandhi128_mask ((__v8hi) __D,
(__v8hi) __S,
__U);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm128_maskz_expand_epi16(__mmask8 __U, __m128i __D)
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_maskz_expand_epi16(__mmask8 __U, __m128i __D)
{
return (__m128i) __builtin_ia32_expandhi128_mask ((__v8hi) __D,
- (__v8hi) _mm128_setzero_hi(),
+ (__v8hi) _mm_setzero_si128(),
__U);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm128_mask_expand_epi8(__m128i __S, __mmask16 __U, __m128i __D)
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_mask_expand_epi8(__m128i __S, __mmask16 __U, __m128i __D)
{
return (__m128i) __builtin_ia32_expandqi128_mask ((__v16qi) __D,
(__v16qi) __S,
__U);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm128_maskz_expand_epi8(__mmask16 __U, __m128i __D)
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_maskz_expand_epi8(__mmask16 __U, __m128i __D)
{
return (__m128i) __builtin_ia32_expandqi128_mask ((__v16qi) __D,
- (__v16qi) _mm128_setzero_hi(),
+ (__v16qi) _mm_setzero_si128(),
__U);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm128_mask_expandloadu_epi16(__m128i __S, __mmask8 __U, void const *__P)
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_mask_expandloadu_epi16(__m128i __S, __mmask8 __U, void const *__P)
{
return (__m128i) __builtin_ia32_expandloadhi128_mask ((const __v8hi *)__P,
(__v8hi) __S,
__U);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm128_maskz_expandloadu_epi16(__mmask8 __U, void const *__P)
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_maskz_expandloadu_epi16(__mmask8 __U, void const *__P)
{
return (__m128i) __builtin_ia32_expandloadhi128_mask ((const __v8hi *)__P,
- (__v8hi) _mm128_setzero_hi(),
+ (__v8hi) _mm_setzero_si128(),
__U);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm128_mask_expandloadu_epi8(__m128i __S, __mmask16 __U, void const *__P)
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_mask_expandloadu_epi8(__m128i __S, __mmask16 __U, void const *__P)
{
return (__m128i) __builtin_ia32_expandloadqi128_mask ((const __v16qi *)__P,
(__v16qi) __S,
__U);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm128_maskz_expandloadu_epi8(__mmask16 __U, void const *__P)
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_maskz_expandloadu_epi8(__mmask16 __U, void const *__P)
{
return (__m128i) __builtin_ia32_expandloadqi128_mask ((const __v16qi *)__P,
- (__v16qi) _mm128_setzero_hi(),
+ (__v16qi) _mm_setzero_si128(),
__U);
}
-static __inline __m256i __DEFAULT_FN_ATTRS
-_mm256_setzero_hi(void) {
- return (__m256i)(__v16hi){ 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0 };
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_compress_epi16(__m256i __S, __mmask16 __U, __m256i __D)
{
return (__m256i) __builtin_ia32_compresshi256_mask ((__v16hi) __D,
@@ -160,15 +150,15 @@ _mm256_mask_compress_epi16(__m256i __S, __mmask16 __U, __m256i __D)
__U);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_compress_epi16(__mmask16 __U, __m256i __D)
{
return (__m256i) __builtin_ia32_compresshi256_mask ((__v16hi) __D,
- (__v16hi) _mm256_setzero_hi(),
+ (__v16hi) _mm256_setzero_si256(),
__U);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_compress_epi8(__m256i __S, __mmask32 __U, __m256i __D)
{
return (__m256i) __builtin_ia32_compressqi256_mask ((__v32qi) __D,
@@ -176,29 +166,29 @@ _mm256_mask_compress_epi8(__m256i __S, __mmask32 __U, __m256i __D)
__U);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_compress_epi8(__mmask32 __U, __m256i __D)
{
return (__m256i) __builtin_ia32_compressqi256_mask ((__v32qi) __D,
- (__v32qi) _mm256_setzero_hi(),
+ (__v32qi) _mm256_setzero_si256(),
__U);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS256
_mm256_mask_compressstoreu_epi16(void *__P, __mmask16 __U, __m256i __D)
{
__builtin_ia32_compressstorehi256_mask ((__v16hi *) __P, (__v16hi) __D,
__U);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS256
_mm256_mask_compressstoreu_epi8(void *__P, __mmask32 __U, __m256i __D)
{
__builtin_ia32_compressstoreqi256_mask ((__v32qi *) __P, (__v32qi) __D,
__U);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_expand_epi16(__m256i __S, __mmask16 __U, __m256i __D)
{
return (__m256i) __builtin_ia32_expandhi256_mask ((__v16hi) __D,
@@ -206,15 +196,15 @@ _mm256_mask_expand_epi16(__m256i __S, __mmask16 __U, __m256i __D)
__U);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_expand_epi16(__mmask16 __U, __m256i __D)
{
return (__m256i) __builtin_ia32_expandhi256_mask ((__v16hi) __D,
- (__v16hi) _mm256_setzero_hi(),
+ (__v16hi) _mm256_setzero_si256(),
__U);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_expand_epi8(__m256i __S, __mmask32 __U, __m256i __D)
{
return (__m256i) __builtin_ia32_expandqi256_mask ((__v32qi) __D,
@@ -222,15 +212,15 @@ _mm256_mask_expand_epi8(__m256i __S, __mmask32 __U, __m256i __D)
__U);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_expand_epi8(__mmask32 __U, __m256i __D)
{
return (__m256i) __builtin_ia32_expandqi256_mask ((__v32qi) __D,
- (__v32qi) _mm256_setzero_hi(),
+ (__v32qi) _mm256_setzero_si256(),
__U);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_expandloadu_epi16(__m256i __S, __mmask16 __U, void const *__P)
{
return (__m256i) __builtin_ia32_expandloadhi256_mask ((const __v16hi *)__P,
@@ -238,15 +228,15 @@ _mm256_mask_expandloadu_epi16(__m256i __S, __mmask16 __U, void const *__P)
__U);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_expandloadu_epi16(__mmask16 __U, void const *__P)
{
return (__m256i) __builtin_ia32_expandloadhi256_mask ((const __v16hi *)__P,
- (__v16hi) _mm256_setzero_hi(),
+ (__v16hi) _mm256_setzero_si256(),
__U);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_expandloadu_epi8(__m256i __S, __mmask32 __U, void const *__P)
{
return (__m256i) __builtin_ia32_expandloadqi256_mask ((const __v32qi *)__P,
@@ -254,171 +244,183 @@ _mm256_mask_expandloadu_epi8(__m256i __S, __mmask32 __U, void const *__P)
__U);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_expandloadu_epi8(__mmask32 __U, void const *__P)
{
return (__m256i) __builtin_ia32_expandloadqi256_mask ((const __v32qi *)__P,
- (__v32qi) _mm256_setzero_hi(),
+ (__v32qi) _mm256_setzero_si256(),
__U);
}
-#define _mm256_mask_shldi_epi64(S, U, A, B, I) __extension__ ({ \
- (__m256i)__builtin_ia32_vpshldq256_mask((__v4di)(A), \
- (__v4di)(B), \
- (int)(I), \
- (__v4di)(S), \
- (__mmask8)(U)); })
+#define _mm256_shldi_epi64(A, B, I) \
+ (__m256i)__builtin_ia32_vpshldq256((__v4di)(__m256i)(A), \
+ (__v4di)(__m256i)(B), (int)(I))
+
+#define _mm256_mask_shldi_epi64(S, U, A, B, I) \
+ (__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
+ (__v4di)_mm256_shldi_epi64((A), (B), (I)), \
+ (__v4di)(__m256i)(S))
#define _mm256_maskz_shldi_epi64(U, A, B, I) \
- _mm256_mask_shldi_epi64(_mm256_setzero_hi(), (U), (A), (B), (I))
+ (__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
+ (__v4di)_mm256_shldi_epi64((A), (B), (I)), \
+ (__v4di)_mm256_setzero_si256())
-#define _mm256_shldi_epi64(A, B, I) \
- _mm256_mask_shldi_epi64(_mm256_undefined_si256(), (__mmask8)(-1), (A), (B), (I))
+#define _mm_shldi_epi64(A, B, I) \
+ (__m128i)__builtin_ia32_vpshldq128((__v2di)(__m128i)(A), \
+ (__v2di)(__m128i)(B), (int)(I))
-#define _mm128_mask_shldi_epi64(S, U, A, B, I) __extension__ ({ \
- (__m128i)__builtin_ia32_vpshldq128_mask((__v2di)(A), \
- (__v2di)(B), \
- (int)(I), \
- (__v2di)(S), \
- (__mmask8)(U)); })
+#define _mm_mask_shldi_epi64(S, U, A, B, I) \
+ (__m128i)__builtin_ia32_selectq_128((__mmask8)(U), \
+ (__v2di)_mm_shldi_epi64((A), (B), (I)), \
+ (__v2di)(__m128i)(S))
-#define _mm128_maskz_shldi_epi64(U, A, B, I) \
- _mm128_mask_shldi_epi64(_mm128_setzero_hi(), (U), (A), (B), (I))
+#define _mm_maskz_shldi_epi64(U, A, B, I) \
+ (__m128i)__builtin_ia32_selectq_128((__mmask8)(U), \
+ (__v2di)_mm_shldi_epi64((A), (B), (I)), \
+ (__v2di)_mm_setzero_si128())
-#define _mm128_shldi_epi64(A, B, I) \
- _mm128_mask_shldi_epi64(_mm_undefined_si128(), (__mmask8)(-1), (A), (B), (I))
+#define _mm256_shldi_epi32(A, B, I) \
+ (__m256i)__builtin_ia32_vpshldd256((__v8si)(__m256i)(A), \
+ (__v8si)(__m256i)(B), (int)(I))
-#define _mm256_mask_shldi_epi32(S, U, A, B, I) __extension__ ({ \
- (__m256i)__builtin_ia32_vpshldd256_mask((__v8si)(A), \
- (__v8si)(B), \
- (int)(I), \
- (__v8si)(S), \
- (__mmask8)(U)); })
+#define _mm256_mask_shldi_epi32(S, U, A, B, I) \
+ (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
+ (__v8si)_mm256_shldi_epi32((A), (B), (I)), \
+ (__v8si)(__m256i)(S))
#define _mm256_maskz_shldi_epi32(U, A, B, I) \
- _mm256_mask_shldi_epi32(_mm256_setzero_hi(), (U), (A), (B), (I))
+ (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
+ (__v8si)_mm256_shldi_epi32((A), (B), (I)), \
+ (__v8si)_mm256_setzero_si256())
-#define _mm256_shldi_epi32(A, B, I) \
- _mm256_mask_shldi_epi32(_mm256_undefined_si256(), (__mmask8)(-1), (A), (B), (I))
+#define _mm_shldi_epi32(A, B, I) \
+ (__m128i)__builtin_ia32_vpshldd128((__v4si)(__m128i)(A), \
+ (__v4si)(__m128i)(B), (int)(I))
-#define _mm128_mask_shldi_epi32(S, U, A, B, I) __extension__ ({ \
- (__m128i)__builtin_ia32_vpshldd128_mask((__v4si)(A), \
- (__v4si)(B), \
- (int)(I), \
- (__v4si)(S), \
- (__mmask8)(U)); })
+#define _mm_mask_shldi_epi32(S, U, A, B, I) \
+ (__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \
+ (__v4si)_mm_shldi_epi32((A), (B), (I)), \
+ (__v4si)(__m128i)(S))
-#define _mm128_maskz_shldi_epi32(U, A, B, I) \
- _mm128_mask_shldi_epi32(_mm128_setzero_hi(), (U), (A), (B), (I))
+#define _mm_maskz_shldi_epi32(U, A, B, I) \
+ (__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \
+ (__v4si)_mm_shldi_epi32((A), (B), (I)), \
+ (__v4si)_mm_setzero_si128())
-#define _mm128_shldi_epi32(A, B, I) \
- _mm128_mask_shldi_epi32(_mm_undefined_si128(), (__mmask8)(-1), (A), (B), (I))
+#define _mm256_shldi_epi16(A, B, I) \
+ (__m256i)__builtin_ia32_vpshldw256((__v16hi)(__m256i)(A), \
+ (__v16hi)(__m256i)(B), (int)(I))
-#define _mm256_mask_shldi_epi16(S, U, A, B, I) __extension__ ({ \
- (__m256i)__builtin_ia32_vpshldw256_mask((__v16hi)(A), \
- (__v16hi)(B), \
- (int)(I), \
- (__v16hi)(S), \
- (__mmask16)(U)); })
+#define _mm256_mask_shldi_epi16(S, U, A, B, I) \
+ (__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
+ (__v16hi)_mm256_shldi_epi16((A), (B), (I)), \
+ (__v16hi)(__m256i)(S))
#define _mm256_maskz_shldi_epi16(U, A, B, I) \
- _mm256_mask_shldi_epi16(_mm256_setzero_hi(), (U), (A), (B), (I))
+ (__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
+ (__v16hi)_mm256_shldi_epi16((A), (B), (I)), \
+ (__v16hi)_mm256_setzero_si256())
-#define _mm256_shldi_epi16(A, B, I) \
- _mm256_mask_shldi_epi16(_mm256_undefined_si256(), (__mmask8)(-1), (A), (B), (I))
+#define _mm_shldi_epi16(A, B, I) \
+ (__m128i)__builtin_ia32_vpshldw128((__v8hi)(__m128i)(A), \
+ (__v8hi)(__m128i)(B), (int)(I))
-#define _mm128_mask_shldi_epi16(S, U, A, B, I) __extension__ ({ \
- (__m128i)__builtin_ia32_vpshldw128_mask((__v8hi)(A), \
- (__v8hi)(B), \
- (int)(I), \
- (__v8hi)(S), \
- (__mmask8)(U)); })
+#define _mm_mask_shldi_epi16(S, U, A, B, I) \
+ (__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
+ (__v8hi)_mm_shldi_epi16((A), (B), (I)), \
+ (__v8hi)(__m128i)(S))
-#define _mm128_maskz_shldi_epi16(U, A, B, I) \
- _mm128_mask_shldi_epi16(_mm128_setzero_hi(), (U), (A), (B), (I))
+#define _mm_maskz_shldi_epi16(U, A, B, I) \
+ (__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
+ (__v8hi)_mm_shldi_epi16((A), (B), (I)), \
+ (__v8hi)_mm_setzero_si128())
-#define _mm128_shldi_epi16(A, B, I) \
- _mm128_mask_shldi_epi16(_mm_undefined_si128(), (__mmask8)(-1), (A), (B), (I))
+#define _mm256_shrdi_epi64(A, B, I) \
+ (__m256i)__builtin_ia32_vpshrdq256((__v4di)(__m256i)(A), \
+ (__v4di)(__m256i)(B), (int)(I))
-#define _mm256_mask_shrdi_epi64(S, U, A, B, I) __extension__ ({ \
- (__m256i)__builtin_ia32_vpshrdq256_mask((__v4di)(A), \
- (__v4di)(B), \
- (int)(I), \
- (__v4di)(S), \
- (__mmask8)(U)); })
+#define _mm256_mask_shrdi_epi64(S, U, A, B, I) \
+ (__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
+ (__v4di)_mm256_shrdi_epi64((A), (B), (I)), \
+ (__v4di)(__m256i)(S))
#define _mm256_maskz_shrdi_epi64(U, A, B, I) \
- _mm256_mask_shrdi_epi64(_mm256_setzero_hi(), (U), (A), (B), (I))
+ (__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
+ (__v4di)_mm256_shrdi_epi64((A), (B), (I)), \
+ (__v4di)_mm256_setzero_si256())
-#define _mm256_shrdi_epi64(A, B, I) \
- _mm256_mask_shrdi_epi64(_mm256_undefined_si256(), (__mmask8)(-1), (A), (B), (I))
+#define _mm_shrdi_epi64(A, B, I) \
+ (__m128i)__builtin_ia32_vpshrdq128((__v2di)(__m128i)(A), \
+ (__v2di)(__m128i)(B), (int)(I))
-#define _mm128_mask_shrdi_epi64(S, U, A, B, I) __extension__ ({ \
- (__m128i)__builtin_ia32_vpshrdq128_mask((__v2di)(A), \
- (__v2di)(B), \
- (int)(I), \
- (__v2di)(S), \
- (__mmask8)(U)); })
+#define _mm_mask_shrdi_epi64(S, U, A, B, I) \
+ (__m128i)__builtin_ia32_selectq_128((__mmask8)(U), \
+ (__v2di)_mm_shrdi_epi64((A), (B), (I)), \
+ (__v2di)(__m128i)(S))
-#define _mm128_maskz_shrdi_epi64(U, A, B, I) \
- _mm128_mask_shrdi_epi64(_mm128_setzero_hi(), (U), (A), (B), (I))
+#define _mm_maskz_shrdi_epi64(U, A, B, I) \
+ (__m128i)__builtin_ia32_selectq_128((__mmask8)(U), \
+ (__v2di)_mm_shrdi_epi64((A), (B), (I)), \
+ (__v2di)_mm_setzero_si128())
-#define _mm128_shrdi_epi64(A, B, I) \
- _mm128_mask_shrdi_epi64(_mm_undefined_si128(), (__mmask8)(-1), (A), (B), (I))
+#define _mm256_shrdi_epi32(A, B, I) \
+ (__m256i)__builtin_ia32_vpshrdd256((__v8si)(__m256i)(A), \
+ (__v8si)(__m256i)(B), (int)(I))
-#define _mm256_mask_shrdi_epi32(S, U, A, B, I) __extension__ ({ \
- (__m256i)__builtin_ia32_vpshrdd256_mask((__v8si)(A), \
- (__v8si)(B), \
- (int)(I), \
- (__v8si)(S), \
- (__mmask8)(U)); })
+#define _mm256_mask_shrdi_epi32(S, U, A, B, I) \
+ (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
+ (__v8si)_mm256_shrdi_epi32((A), (B), (I)), \
+ (__v8si)(__m256i)(S))
#define _mm256_maskz_shrdi_epi32(U, A, B, I) \
- _mm256_mask_shrdi_epi32(_mm256_setzero_hi(), (U), (A), (B), (I))
+ (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
+ (__v8si)_mm256_shrdi_epi32((A), (B), (I)), \
+ (__v8si)_mm256_setzero_si256())
-#define _mm256_shrdi_epi32(A, B, I) \
- _mm256_mask_shrdi_epi32(_mm256_undefined_si256(), (__mmask8)(-1), (A), (B), (I))
+#define _mm_shrdi_epi32(A, B, I) \
+ (__m128i)__builtin_ia32_vpshrdd128((__v4si)(__m128i)(A), \
+ (__v4si)(__m128i)(B), (int)(I))
-#define _mm128_mask_shrdi_epi32(S, U, A, B, I) __extension__ ({ \
- (__m128i)__builtin_ia32_vpshrdd128_mask((__v4si)(A), \
- (__v4si)(B), \
- (int)(I), \
- (__v4si)(S), \
- (__mmask8)(U)); })
+#define _mm_mask_shrdi_epi32(S, U, A, B, I) \
+ (__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \
+ (__v4si)_mm_shrdi_epi32((A), (B), (I)), \
+ (__v4si)(__m128i)(S))
-#define _mm128_maskz_shrdi_epi32(U, A, B, I) \
- _mm128_mask_shrdi_epi32(_mm128_setzero_hi(), (U), (A), (B), (I))
+#define _mm_maskz_shrdi_epi32(U, A, B, I) \
+ (__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \
+ (__v4si)_mm_shrdi_epi32((A), (B), (I)), \
+ (__v4si)_mm_setzero_si128())
-#define _mm128_shrdi_epi32(A, B, I) \
- _mm128_mask_shrdi_epi32(_mm_undefined_si128(), (__mmask8)(-1), (A), (B), (I))
+#define _mm256_shrdi_epi16(A, B, I) \
+ (__m256i)__builtin_ia32_vpshrdw256((__v16hi)(__m256i)(A), \
+ (__v16hi)(__m256i)(B), (int)(I))
-#define _mm256_mask_shrdi_epi16(S, U, A, B, I) __extension__ ({ \
- (__m256i)__builtin_ia32_vpshrdw256_mask((__v16hi)(A), \
- (__v16hi)(B), \
- (int)(I), \
- (__v16hi)(S), \
- (__mmask16)(U)); })
+#define _mm256_mask_shrdi_epi16(S, U, A, B, I) \
+ (__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
+ (__v16hi)_mm256_shrdi_epi16((A), (B), (I)), \
+ (__v16hi)(__m256i)(S))
#define _mm256_maskz_shrdi_epi16(U, A, B, I) \
- _mm256_mask_shrdi_epi16(_mm256_setzero_hi(), (U), (A), (B), (I))
-
-#define _mm256_shrdi_epi16(A, B, I) \
- _mm256_mask_shrdi_epi16(_mm256_undefined_si256(), (__mmask8)(-1), (A), (B), (I))
+ (__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
+ (__v16hi)_mm256_shrdi_epi16((A), (B), (I)), \
+ (__v16hi)_mm256_setzero_si256())
-#define _mm128_mask_shrdi_epi16(S, U, A, B, I) __extension__ ({ \
- (__m128i)__builtin_ia32_vpshrdw128_mask((__v8hi)(A), \
- (__v8hi)(B), \
- (int)(I), \
- (__v8hi)(S), \
- (__mmask8)(U)); })
+#define _mm_shrdi_epi16(A, B, I) \
+ (__m128i)__builtin_ia32_vpshrdw128((__v8hi)(__m128i)(A), \
+ (__v8hi)(__m128i)(B), (int)(I))
-#define _mm128_maskz_shrdi_epi16(U, A, B, I) \
- _mm128_mask_shrdi_epi16(_mm128_setzero_hi(), (U), (A), (B), (I))
+#define _mm_mask_shrdi_epi16(S, U, A, B, I) \
+ (__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
+ (__v8hi)_mm_shrdi_epi16((A), (B), (I)), \
+ (__v8hi)(__m128i)(S))
-#define _mm128_shrdi_epi16(A, B, I) \
- _mm128_mask_shrdi_epi16(_mm_undefined_si128(), (__mmask8)(-1), (A), (B), (I))
+#define _mm_maskz_shrdi_epi16(U, A, B, I) \
+ (__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
+ (__v8hi)_mm_shrdi_epi16((A), (B), (I)), \
+ (__v8hi)_mm_setzero_si128())
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_shldv_epi64(__m256i __S, __mmask8 __U, __m256i __A, __m256i __B)
{
return (__m256i) __builtin_ia32_vpshldvq256_mask ((__v4di) __S,
@@ -427,7 +429,7 @@ _mm256_mask_shldv_epi64(__m256i __S, __mmask8 __U, __m256i __A, __m256i __B)
__U);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_shldv_epi64(__mmask8 __U, __m256i __S, __m256i __A, __m256i __B)
{
return (__m256i) __builtin_ia32_vpshldvq256_maskz ((__v4di) __S,
@@ -436,7 +438,7 @@ _mm256_maskz_shldv_epi64(__mmask8 __U, __m256i __S, __m256i __A, __m256i __B)
__U);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_shldv_epi64(__m256i __S, __m256i __A, __m256i __B)
{
return (__m256i) __builtin_ia32_vpshldvq256_mask ((__v4di) __S,
@@ -445,8 +447,8 @@ _mm256_shldv_epi64(__m256i __S, __m256i __A, __m256i __B)
(__mmask8) -1);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm128_mask_shldv_epi64(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B)
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_mask_shldv_epi64(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i) __builtin_ia32_vpshldvq128_mask ((__v2di) __S,
(__v2di) __A,
@@ -454,8 +456,8 @@ _mm128_mask_shldv_epi64(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B)
__U);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm128_maskz_shldv_epi64(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B)
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_maskz_shldv_epi64(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B)
{
return (__m128i) __builtin_ia32_vpshldvq128_maskz ((__v2di) __S,
(__v2di) __A,
@@ -463,8 +465,8 @@ _mm128_maskz_shldv_epi64(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B)
__U);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm128_shldv_epi64(__m128i __S, __m128i __A, __m128i __B)
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_shldv_epi64(__m128i __S, __m128i __A, __m128i __B)
{
return (__m128i) __builtin_ia32_vpshldvq128_mask ((__v2di) __S,
(__v2di) __A,
@@ -472,7 +474,7 @@ _mm128_shldv_epi64(__m128i __S, __m128i __A, __m128i __B)
(__mmask8) -1);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_shldv_epi32(__m256i __S, __mmask8 __U, __m256i __A, __m256i __B)
{
return (__m256i) __builtin_ia32_vpshldvd256_mask ((__v8si) __S,
@@ -481,7 +483,7 @@ _mm256_mask_shldv_epi32(__m256i __S, __mmask8 __U, __m256i __A, __m256i __B)
__U);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_shldv_epi32(__mmask8 __U, __m256i __S, __m256i __A, __m256i __B)
{
return (__m256i) __builtin_ia32_vpshldvd256_maskz ((__v8si) __S,
@@ -490,7 +492,7 @@ _mm256_maskz_shldv_epi32(__mmask8 __U, __m256i __S, __m256i __A, __m256i __B)
__U);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_shldv_epi32(__m256i __S, __m256i __A, __m256i __B)
{
return (__m256i) __builtin_ia32_vpshldvd256_mask ((__v8si) __S,
@@ -499,8 +501,8 @@ _mm256_shldv_epi32(__m256i __S, __m256i __A, __m256i __B)
(__mmask8) -1);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm128_mask_shldv_epi32(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B)
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_mask_shldv_epi32(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i) __builtin_ia32_vpshldvd128_mask ((__v4si) __S,
(__v4si) __A,
@@ -508,8 +510,8 @@ _mm128_mask_shldv_epi32(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B)
__U);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm128_maskz_shldv_epi32(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B)
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_maskz_shldv_epi32(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B)
{
return (__m128i) __builtin_ia32_vpshldvd128_maskz ((__v4si) __S,
(__v4si) __A,
@@ -517,8 +519,8 @@ _mm128_maskz_shldv_epi32(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B)
__U);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm128_shldv_epi32(__m128i __S, __m128i __A, __m128i __B)
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_shldv_epi32(__m128i __S, __m128i __A, __m128i __B)
{
return (__m128i) __builtin_ia32_vpshldvd128_mask ((__v4si) __S,
(__v4si) __A,
@@ -526,7 +528,7 @@ _mm128_shldv_epi32(__m128i __S, __m128i __A, __m128i __B)
(__mmask8) -1);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_shldv_epi16(__m256i __S, __mmask16 __U, __m256i __A, __m256i __B)
{
return (__m256i) __builtin_ia32_vpshldvw256_mask ((__v16hi) __S,
@@ -535,7 +537,7 @@ _mm256_mask_shldv_epi16(__m256i __S, __mmask16 __U, __m256i __A, __m256i __B)
__U);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_shldv_epi16(__mmask16 __U, __m256i __S, __m256i __A, __m256i __B)
{
return (__m256i) __builtin_ia32_vpshldvw256_maskz ((__v16hi) __S,
@@ -544,7 +546,7 @@ _mm256_maskz_shldv_epi16(__mmask16 __U, __m256i __S, __m256i __A, __m256i __B)
__U);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_shldv_epi16(__m256i __S, __m256i __A, __m256i __B)
{
return (__m256i) __builtin_ia32_vpshldvw256_mask ((__v16hi) __S,
@@ -553,8 +555,8 @@ _mm256_shldv_epi16(__m256i __S, __m256i __A, __m256i __B)
(__mmask16) -1);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm128_mask_shldv_epi16(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B)
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_mask_shldv_epi16(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i) __builtin_ia32_vpshldvw128_mask ((__v8hi) __S,
(__v8hi) __A,
@@ -562,8 +564,8 @@ _mm128_mask_shldv_epi16(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B)
__U);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm128_maskz_shldv_epi16(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B)
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_maskz_shldv_epi16(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B)
{
return (__m128i) __builtin_ia32_vpshldvw128_maskz ((__v8hi) __S,
(__v8hi) __A,
@@ -571,8 +573,8 @@ _mm128_maskz_shldv_epi16(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B)
__U);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm128_shldv_epi16(__m128i __S, __m128i __A, __m128i __B)
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_shldv_epi16(__m128i __S, __m128i __A, __m128i __B)
{
return (__m128i) __builtin_ia32_vpshldvw128_mask ((__v8hi) __S,
(__v8hi) __A,
@@ -580,7 +582,7 @@ _mm128_shldv_epi16(__m128i __S, __m128i __A, __m128i __B)
(__mmask8) -1);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_shrdv_epi64(__m256i __S, __mmask8 __U, __m256i __A, __m256i __B)
{
return (__m256i) __builtin_ia32_vpshrdvq256_mask ((__v4di) __S,
@@ -589,7 +591,7 @@ _mm256_mask_shrdv_epi64(__m256i __S, __mmask8 __U, __m256i __A, __m256i __B)
__U);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_shrdv_epi64(__mmask8 __U, __m256i __S, __m256i __A, __m256i __B)
{
return (__m256i) __builtin_ia32_vpshrdvq256_maskz ((__v4di) __S,
@@ -598,7 +600,7 @@ _mm256_maskz_shrdv_epi64(__mmask8 __U, __m256i __S, __m256i __A, __m256i __B)
__U);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_shrdv_epi64(__m256i __S, __m256i __A, __m256i __B)
{
return (__m256i) __builtin_ia32_vpshrdvq256_mask ((__v4di) __S,
@@ -607,8 +609,8 @@ _mm256_shrdv_epi64(__m256i __S, __m256i __A, __m256i __B)
(__mmask8) -1);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm128_mask_shrdv_epi64(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B)
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_mask_shrdv_epi64(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i) __builtin_ia32_vpshrdvq128_mask ((__v2di) __S,
(__v2di) __A,
@@ -616,8 +618,8 @@ _mm128_mask_shrdv_epi64(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B)
__U);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm128_maskz_shrdv_epi64(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B)
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_maskz_shrdv_epi64(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B)
{
return (__m128i) __builtin_ia32_vpshrdvq128_maskz ((__v2di) __S,
(__v2di) __A,
@@ -625,8 +627,8 @@ _mm128_maskz_shrdv_epi64(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B)
__U);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm128_shrdv_epi64(__m128i __S, __m128i __A, __m128i __B)
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_shrdv_epi64(__m128i __S, __m128i __A, __m128i __B)
{
return (__m128i) __builtin_ia32_vpshrdvq128_mask ((__v2di) __S,
(__v2di) __A,
@@ -634,7 +636,7 @@ _mm128_shrdv_epi64(__m128i __S, __m128i __A, __m128i __B)
(__mmask8) -1);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_shrdv_epi32(__m256i __S, __mmask8 __U, __m256i __A, __m256i __B)
{
return (__m256i) __builtin_ia32_vpshrdvd256_mask ((__v8si) __S,
@@ -643,7 +645,7 @@ _mm256_mask_shrdv_epi32(__m256i __S, __mmask8 __U, __m256i __A, __m256i __B)
__U);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_shrdv_epi32(__mmask8 __U, __m256i __S, __m256i __A, __m256i __B)
{
return (__m256i) __builtin_ia32_vpshrdvd256_maskz ((__v8si) __S,
@@ -652,7 +654,7 @@ _mm256_maskz_shrdv_epi32(__mmask8 __U, __m256i __S, __m256i __A, __m256i __B)
__U);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_shrdv_epi32(__m256i __S, __m256i __A, __m256i __B)
{
return (__m256i) __builtin_ia32_vpshrdvd256_mask ((__v8si) __S,
@@ -661,8 +663,8 @@ _mm256_shrdv_epi32(__m256i __S, __m256i __A, __m256i __B)
(__mmask8) -1);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm128_mask_shrdv_epi32(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B)
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_mask_shrdv_epi32(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i) __builtin_ia32_vpshrdvd128_mask ((__v4si) __S,
(__v4si) __A,
@@ -670,8 +672,8 @@ _mm128_mask_shrdv_epi32(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B)
__U);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm128_maskz_shrdv_epi32(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B)
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_maskz_shrdv_epi32(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B)
{
return (__m128i) __builtin_ia32_vpshrdvd128_maskz ((__v4si) __S,
(__v4si) __A,
@@ -679,8 +681,8 @@ _mm128_maskz_shrdv_epi32(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B)
__U);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm128_shrdv_epi32(__m128i __S, __m128i __A, __m128i __B)
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_shrdv_epi32(__m128i __S, __m128i __A, __m128i __B)
{
return (__m128i) __builtin_ia32_vpshrdvd128_mask ((__v4si) __S,
(__v4si) __A,
@@ -688,7 +690,7 @@ _mm128_shrdv_epi32(__m128i __S, __m128i __A, __m128i __B)
(__mmask8) -1);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_shrdv_epi16(__m256i __S, __mmask16 __U, __m256i __A, __m256i __B)
{
return (__m256i) __builtin_ia32_vpshrdvw256_mask ((__v16hi) __S,
@@ -697,7 +699,7 @@ _mm256_mask_shrdv_epi16(__m256i __S, __mmask16 __U, __m256i __A, __m256i __B)
__U);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_shrdv_epi16(__mmask16 __U, __m256i __S, __m256i __A, __m256i __B)
{
return (__m256i) __builtin_ia32_vpshrdvw256_maskz ((__v16hi) __S,
@@ -706,7 +708,7 @@ _mm256_maskz_shrdv_epi16(__mmask16 __U, __m256i __S, __m256i __A, __m256i __B)
__U);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_shrdv_epi16(__m256i __S, __m256i __A, __m256i __B)
{
return (__m256i) __builtin_ia32_vpshrdvw256_mask ((__v16hi) __S,
@@ -715,8 +717,8 @@ _mm256_shrdv_epi16(__m256i __S, __m256i __A, __m256i __B)
(__mmask16) -1);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm128_mask_shrdv_epi16(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B)
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_mask_shrdv_epi16(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i) __builtin_ia32_vpshrdvw128_mask ((__v8hi) __S,
(__v8hi) __A,
@@ -724,8 +726,8 @@ _mm128_mask_shrdv_epi16(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B)
__U);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm128_maskz_shrdv_epi16(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B)
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_maskz_shrdv_epi16(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B)
{
return (__m128i) __builtin_ia32_vpshrdvw128_maskz ((__v8hi) __S,
(__v8hi) __A,
@@ -733,8 +735,8 @@ _mm128_maskz_shrdv_epi16(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B)
__U);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm128_shrdv_epi16(__m128i __S, __m128i __A, __m128i __B)
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_shrdv_epi16(__m128i __S, __m128i __A, __m128i __B)
{
return (__m128i) __builtin_ia32_vpshrdvw128_mask ((__v8hi) __S,
(__v8hi) __A,
@@ -743,6 +745,7 @@ _mm128_shrdv_epi16(__m128i __S, __m128i __A, __m128i __B)
}
-#undef __DEFAULT_FN_ATTRS
+#undef __DEFAULT_FN_ATTRS128
+#undef __DEFAULT_FN_ATTRS256
#endif
diff --git a/lib/Headers/avx512vlvnniintrin.h b/lib/Headers/avx512vlvnniintrin.h
index 745ae8b7ad3d..62382268ec9f 100644
--- a/lib/Headers/avx512vlvnniintrin.h
+++ b/lib/Headers/avx512vlvnniintrin.h
@@ -29,226 +29,195 @@
#define __AVX512VLVNNIINTRIN_H
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512vl,avx512vnni")))
+#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("avx512vl,avx512vnni"), __min_vector_width__(128)))
+#define __DEFAULT_FN_ATTRS256 __attribute__((__always_inline__, __nodebug__, __target__("avx512vl,avx512vnni"), __min_vector_width__(256)))
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_dpbusd_epi32(__m256i __S, __m256i __A, __m256i __B)
+{
+ return (__m256i)__builtin_ia32_vpdpbusd256((__v8si)__S, (__v8si)__A,
+ (__v8si)__B);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_dpbusd_epi32(__m256i __S, __mmask8 __U, __m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_vpdpbusd256_mask ((__v8si) __S,
- (__v8si) __A,
- (__v8si) __B,
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectd_256(__U,
+ (__v8si)_mm256_dpbusd_epi32(__S, __A, __B),
+ (__v8si)__S);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_dpbusd_epi32(__mmask8 __U, __m256i __S, __m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_vpdpbusd256_maskz ((__v8si) __S,
- (__v8si) __A,
- (__v8si) __B,
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectd_256(__U,
+ (__v8si)_mm256_dpbusd_epi32(__S, __A, __B),
+ (__v8si)_mm256_setzero_si256());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_dpbusd_epi32(__m256i __S, __m256i __A, __m256i __B)
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_dpbusds_epi32(__m256i __S, __m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_vpdpbusd256_mask ((__v8si) __S,
- (__v8si) __A,
- (__v8si) __B,
- (__mmask8) -1);
+ return (__m256i)__builtin_ia32_vpdpbusds256((__v8si)__S, (__v8si)__A,
+ (__v8si)__B);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_dpbusds_epi32(__m256i __S, __mmask8 __U, __m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_vpdpbusds256_mask ((__v8si) __S,
- (__v8si) __A,
- (__v8si) __B,
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectd_256(__U,
+ (__v8si)_mm256_dpbusds_epi32(__S, __A, __B),
+ (__v8si)__S);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_dpbusds_epi32(__mmask8 __U, __m256i __S, __m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_vpdpbusds256_maskz ((__v8si) __S,
- (__v8si) __A,
- (__v8si) __B,
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectd_256(__U,
+ (__v8si)_mm256_dpbusds_epi32(__S, __A, __B),
+ (__v8si)_mm256_setzero_si256());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_dpbusds_epi32(__m256i __S, __m256i __A, __m256i __B)
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_dpwssd_epi32(__m256i __S, __m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_vpdpbusds256_mask ((__v8si) __S,
- (__v8si) __A,
- (__v8si) __B,
- (__mmask8) -1);
+ return (__m256i)__builtin_ia32_vpdpwssd256((__v8si)__S, (__v8si)__A,
+ (__v8si)__B);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_dpwssd_epi32(__m256i __S, __mmask8 __U, __m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_vpdpwssd256_mask ((__v8si) __S,
- (__v8si) __A,
- (__v8si) __B,
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectd_256(__U,
+ (__v8si)_mm256_dpwssd_epi32(__S, __A, __B),
+ (__v8si)__S);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_dpwssd_epi32(__mmask8 __U, __m256i __S, __m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_vpdpwssd256_maskz ((__v8si) __S,
- (__v8si) __A,
- (__v8si) __B,
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectd_256(__U,
+ (__v8si)_mm256_dpwssd_epi32(__S, __A, __B),
+ (__v8si)_mm256_setzero_si256());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_dpwssd_epi32(__m256i __S, __m256i __A, __m256i __B)
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_dpwssds_epi32(__m256i __S, __m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_vpdpwssd256_mask ((__v8si) __S,
- (__v8si) __A,
- (__v8si) __B,
- (__mmask8) -1);
+ return (__m256i)__builtin_ia32_vpdpwssds256((__v8si)__S, (__v8si)__A,
+ (__v8si)__B);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_dpwssds_epi32(__m256i __S, __mmask8 __U, __m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_vpdpwssds256_mask ((__v8si) __S,
- (__v8si) __A,
- (__v8si) __B,
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectd_256(__U,
+ (__v8si)_mm256_dpwssds_epi32(__S, __A, __B),
+ (__v8si)__S);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_dpwssds_epi32(__mmask8 __U, __m256i __S, __m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_vpdpwssds256_maskz ((__v8si) __S,
- (__v8si) __A,
- (__v8si) __B,
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectd_256(__U,
+ (__v8si)_mm256_dpwssds_epi32(__S, __A, __B),
+ (__v8si)_mm256_setzero_si256());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_dpwssds_epi32(__m256i __S, __m256i __A, __m256i __B)
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_dpbusd_epi32(__m128i __S, __m128i __A, __m128i __B)
{
- return (__m256i) __builtin_ia32_vpdpwssds256_mask ((__v8si) __S,
- (__v8si) __A,
- (__v8si) __B,
- (__mmask8) -1);
+ return (__m128i)__builtin_ia32_vpdpbusd128((__v4si)__S, (__v4si)__A,
+ (__v4si)__B);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm128_mask_dpbusd_epi32(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B)
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_mask_dpbusd_epi32(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_vpdpbusd128_mask ((__v4si) __S,
- (__v4si) __A,
- (__v4si) __B,
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectd_128(__U,
+ (__v4si)_mm_dpbusd_epi32(__S, __A, __B),
+ (__v4si)__S);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm128_maskz_dpbusd_epi32(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B)
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_maskz_dpbusd_epi32(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_vpdpbusd128_maskz ((__v4si) __S,
- (__v4si) __A,
- (__v4si) __B,
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectd_128(__U,
+ (__v4si)_mm_dpbusd_epi32(__S, __A, __B),
+ (__v4si)_mm_setzero_si128());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm128_dpbusd_epi32(__m128i __S, __m128i __A, __m128i __B)
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_dpbusds_epi32(__m128i __S, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_vpdpbusd128_mask ((__v4si) __S,
- (__v4si) __A,
- (__v4si) __B,
- (__mmask8) -1);
+ return (__m128i)__builtin_ia32_vpdpbusds128((__v4si)__S, (__v4si)__A,
+ (__v4si)__B);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm128_mask_dpbusds_epi32(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B)
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_mask_dpbusds_epi32(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_vpdpbusds128_mask ((__v4si) __S,
- (__v4si) __A,
- (__v4si) __B,
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectd_128(__U,
+ (__v4si)_mm_dpbusds_epi32(__S, __A, __B),
+ (__v4si)__S);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm128_maskz_dpbusds_epi32(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B)
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_maskz_dpbusds_epi32(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_vpdpbusds128_maskz ((__v4si) __S,
- (__v4si) __A,
- (__v4si) __B,
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectd_128(__U,
+ (__v4si)_mm_dpbusds_epi32(__S, __A, __B),
+ (__v4si)_mm_setzero_si128());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm128_dpbusds_epi32(__m128i __S, __m128i __A, __m128i __B)
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_dpwssd_epi32(__m128i __S, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_vpdpbusds128_mask ((__v4si) __S,
- (__v4si) __A,
- (__v4si) __B,
- (__mmask8) -1);
+ return (__m128i)__builtin_ia32_vpdpwssd128((__v4si)__S, (__v4si)__A,
+ (__v4si)__B);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm128_mask_dpwssd_epi32(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B)
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_mask_dpwssd_epi32(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_vpdpwssd128_mask ((__v4si) __S,
- (__v4si) __A,
- (__v4si) __B,
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectd_128(__U,
+ (__v4si)_mm_dpwssd_epi32(__S, __A, __B),
+ (__v4si)__S);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm128_maskz_dpwssd_epi32(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B)
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_maskz_dpwssd_epi32(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_vpdpwssd128_maskz ((__v4si) __S,
- (__v4si) __A,
- (__v4si) __B,
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectd_128(__U,
+ (__v4si)_mm_dpwssd_epi32(__S, __A, __B),
+ (__v4si)_mm_setzero_si128());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm128_dpwssd_epi32(__m128i __S, __m128i __A, __m128i __B)
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_dpwssds_epi32(__m128i __S, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_vpdpwssd128_mask ((__v4si) __S,
- (__v4si) __A,
- (__v4si) __B,
- (__mmask8) -1);
+ return (__m128i)__builtin_ia32_vpdpwssds128((__v4si)__S, (__v4si)__A,
+ (__v4si)__B);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm128_mask_dpwssds_epi32(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B)
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_mask_dpwssds_epi32(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_vpdpwssds128_mask ((__v4si) __S,
- (__v4si) __A,
- (__v4si) __B,
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectd_128(__U,
+ (__v4si)_mm_dpwssds_epi32(__S, __A, __B),
+ (__v4si)__S);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm128_maskz_dpwssds_epi32(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B)
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_maskz_dpwssds_epi32(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_vpdpwssds128_maskz ((__v4si) __S,
- (__v4si) __A,
- (__v4si) __B,
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectd_128(__U,
+ (__v4si)_mm_dpwssds_epi32(__S, __A, __B),
+ (__v4si)_mm_setzero_si128());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm128_dpwssds_epi32(__m128i __S, __m128i __A, __m128i __B)
-{
- return (__m128i) __builtin_ia32_vpdpwssds128_mask ((__v4si) __S,
- (__v4si) __A,
- (__v4si) __B,
- (__mmask8) -1);
-}
-
-
-#undef __DEFAULT_FN_ATTRS
+#undef __DEFAULT_FN_ATTRS128
+#undef __DEFAULT_FN_ATTRS256
#endif
diff --git a/lib/Headers/avx512vnniintrin.h b/lib/Headers/avx512vnniintrin.h
index 0c6badd231aa..620ef5a78959 100644
--- a/lib/Headers/avx512vnniintrin.h
+++ b/lib/Headers/avx512vnniintrin.h
@@ -29,118 +29,101 @@
#define __AVX512VNNIINTRIN_H
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512vnni")))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512vnni"), __min_vector_width__(512)))
static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_dpbusd_epi32(__m512i __S, __m512i __A, __m512i __B)
+{
+ return (__m512i)__builtin_ia32_vpdpbusd512((__v16si)__S, (__v16si)__A,
+ (__v16si)__B);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_mask_dpbusd_epi32(__m512i __S, __mmask16 __U, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_vpdpbusd512_mask ((__v16si) __S,
- (__v16si) __A,
- (__v16si) __B,
- (__mmask16) __U);
+ return (__m512i)__builtin_ia32_selectd_512(__U,
+ (__v16si)_mm512_dpbusd_epi32(__S, __A, __B),
+ (__v16si)__S);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_maskz_dpbusd_epi32(__mmask16 __U, __m512i __S, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_vpdpbusd512_maskz ((__v16si) __S,
- (__v16si) __A,
- (__v16si) __B,
- (__mmask16) __U);
+ return (__m512i)__builtin_ia32_selectd_512(__U,
+ (__v16si)_mm512_dpbusd_epi32(__S, __A, __B),
+ (__v16si)_mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_dpbusd_epi32(__m512i __S, __m512i __A, __m512i __B)
+_mm512_dpbusds_epi32(__m512i __S, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_vpdpbusd512_mask ((__v16si) __S,
- (__v16si) __A,
- (__v16si) __B,
- (__mmask16) -1);
+ return (__m512i)__builtin_ia32_vpdpbusds512((__v16si)__S, (__v16si)__A,
+ (__v16si)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_mask_dpbusds_epi32(__m512i __S, __mmask16 __U, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_vpdpbusds512_mask ((__v16si) __S,
- (__v16si) __A,
- (__v16si) __B,
- (__mmask16) __U);
+ return (__m512i)__builtin_ia32_selectd_512(__U,
+ (__v16si)_mm512_dpbusds_epi32(__S, __A, __B),
+ (__v16si)__S);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_maskz_dpbusds_epi32(__mmask16 __U, __m512i __S, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_vpdpbusds512_maskz ((__v16si) __S,
- (__v16si) __A,
- (__v16si) __B,
- (__mmask16) __U);
+ return (__m512i)__builtin_ia32_selectd_512(__U,
+ (__v16si)_mm512_dpbusds_epi32(__S, __A, __B),
+ (__v16si)_mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_dpbusds_epi32(__m512i __S, __m512i __A, __m512i __B)
+_mm512_dpwssd_epi32(__m512i __S, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_vpdpbusds512_mask ((__v16si) __S,
- (__v16si) __A,
- (__v16si) __B,
- (__mmask16) -1);
+ return (__m512i)__builtin_ia32_vpdpwssd512((__v16si)__S, (__v16si)__A,
+ (__v16si)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_mask_dpwssd_epi32(__m512i __S, __mmask16 __U, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_vpdpwssd512_mask ((__v16si) __S,
- (__v16si) __A,
- (__v16si) __B,
- (__mmask16) __U);
+ return (__m512i)__builtin_ia32_selectd_512(__U,
+ (__v16si)_mm512_dpwssd_epi32(__S, __A, __B),
+ (__v16si)__S);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_maskz_dpwssd_epi32(__mmask16 __U, __m512i __S, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_vpdpwssd512_maskz ((__v16si) __S,
- (__v16si) __A,
- (__v16si) __B,
- (__mmask16) __U);
+ return (__m512i)__builtin_ia32_selectd_512(__U,
+ (__v16si)_mm512_dpwssd_epi32(__S, __A, __B),
+ (__v16si)_mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_dpwssd_epi32(__m512i __S, __m512i __A, __m512i __B)
+_mm512_dpwssds_epi32(__m512i __S, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_vpdpwssd512_mask ((__v16si) __S,
- (__v16si) __A,
- (__v16si) __B,
- (__mmask16) -1);
+ return (__m512i)__builtin_ia32_vpdpwssds512((__v16si)__S, (__v16si)__A,
+ (__v16si)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_mask_dpwssds_epi32(__m512i __S, __mmask16 __U, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_vpdpwssds512_mask ((__v16si) __S,
- (__v16si) __A,
- (__v16si) __B,
- (__mmask16) __U);
+ return (__m512i)__builtin_ia32_selectd_512(__U,
+ (__v16si)_mm512_dpwssds_epi32(__S, __A, __B),
+ (__v16si)__S);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_maskz_dpwssds_epi32(__mmask16 __U, __m512i __S, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_vpdpwssds512_maskz ((__v16si) __S,
- (__v16si) __A,
- (__v16si) __B,
- (__mmask16) __U);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_dpwssds_epi32(__m512i __S, __m512i __A, __m512i __B)
-{
- return (__m512i) __builtin_ia32_vpdpwssds512_mask ((__v16si) __S,
- (__v16si) __A,
- (__v16si) __B,
- (__mmask16) -1);
+ return (__m512i)__builtin_ia32_selectd_512(__U,
+ (__v16si)_mm512_dpwssds_epi32(__S, __A, __B),
+ (__v16si)_mm512_setzero_si512());
}
-
#undef __DEFAULT_FN_ATTRS
#endif
diff --git a/lib/Headers/avx512vpopcntdqintrin.h b/lib/Headers/avx512vpopcntdqintrin.h
index 34ab84932e7a..c99f5945699e 100644
--- a/lib/Headers/avx512vpopcntdqintrin.h
+++ b/lib/Headers/avx512vpopcntdqintrin.h
@@ -1,5 +1,4 @@
-/*===------------- avx512vpopcntdqintrin.h - AVX512VPOPCNTDQ intrinsics
- *------------------===
+/*===----- avx512vpopcntdqintrin.h - AVX512VPOPCNTDQ intrinsics-------------===
*
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
@@ -32,8 +31,7 @@
/* Define the default attributes for the functions in this file. */
#define __DEFAULT_FN_ATTRS \
- __attribute__((__always_inline__, __nodebug__, __target__("avx512vpopcntd" \
- "q")))
+ __attribute__((__always_inline__, __nodebug__, __target__("avx512vpopcntdq"), __min_vector_width__(512)))
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_popcnt_epi64(__m512i __A) {
return (__m512i)__builtin_ia32_vpopcntq_512((__v8di)__A);
diff --git a/lib/Headers/avx512vpopcntdqvlintrin.h b/lib/Headers/avx512vpopcntdqvlintrin.h
index c2058a8f5154..681a75fa07cd 100644
--- a/lib/Headers/avx512vpopcntdqvlintrin.h
+++ b/lib/Headers/avx512vpopcntdqvlintrin.h
@@ -1,5 +1,4 @@
-/*===------------- avx512vpopcntdqintrin.h - AVX512VPOPCNTDQ intrinsics
- *------------------===
+/*===---- avx512vpopcntdqintrin.h - AVX512VPOPCNTDQ intrinsics -------------===
*
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
@@ -31,69 +30,76 @@
#define __AVX512VPOPCNTDQVLINTRIN_H
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS \
- __attribute__((__always_inline__, __nodebug__, __target__("avx512vpopcntdq,avx512vl")))
+#define __DEFAULT_FN_ATTRS128 \
+ __attribute__((__always_inline__, __nodebug__, __target__("avx512vpopcntdq,avx512vl"), __min_vector_width__(128)))
+#define __DEFAULT_FN_ATTRS256 \
+ __attribute__((__always_inline__, __nodebug__, __target__("avx512vpopcntdq,avx512vl"), __min_vector_width__(256)))
-static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_popcnt_epi64(__m128i __A) {
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_popcnt_epi64(__m128i __A) {
return (__m128i)__builtin_ia32_vpopcntq_128((__v2di)__A);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_popcnt_epi64(__m128i __W, __mmask8 __U, __m128i __A) {
return (__m128i)__builtin_ia32_selectq_128(
(__mmask8)__U, (__v2di)_mm_popcnt_epi64(__A), (__v2di)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_popcnt_epi64(__mmask8 __U, __m128i __A) {
return _mm_mask_popcnt_epi64((__m128i)_mm_setzero_si128(), __U, __A);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_popcnt_epi32(__m128i __A) {
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_popcnt_epi32(__m128i __A) {
return (__m128i)__builtin_ia32_vpopcntd_128((__v4si)__A);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_popcnt_epi32(__m128i __W, __mmask8 __U, __m128i __A) {
return (__m128i)__builtin_ia32_selectd_128(
(__mmask8)__U, (__v4si)_mm_popcnt_epi32(__A), (__v4si)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_popcnt_epi32(__mmask8 __U, __m128i __A) {
return _mm_mask_popcnt_epi32((__m128i)_mm_setzero_si128(), __U, __A);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS _mm256_popcnt_epi64(__m256i __A) {
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_popcnt_epi64(__m256i __A) {
return (__m256i)__builtin_ia32_vpopcntq_256((__v4di)__A);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_popcnt_epi64(__m256i __W, __mmask8 __U, __m256i __A) {
return (__m256i)__builtin_ia32_selectq_256(
(__mmask8)__U, (__v4di)_mm256_popcnt_epi64(__A), (__v4di)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_popcnt_epi64(__mmask8 __U, __m256i __A) {
return _mm256_mask_popcnt_epi64((__m256i)_mm256_setzero_si256(), __U, __A);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS _mm256_popcnt_epi32(__m256i __A) {
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_popcnt_epi32(__m256i __A) {
return (__m256i)__builtin_ia32_vpopcntd_256((__v8si)__A);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_popcnt_epi32(__m256i __W, __mmask8 __U, __m256i __A) {
return (__m256i)__builtin_ia32_selectd_256(
(__mmask8)__U, (__v8si)_mm256_popcnt_epi32(__A), (__v8si)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_popcnt_epi32(__mmask8 __U, __m256i __A) {
return _mm256_mask_popcnt_epi32((__m256i)_mm256_setzero_si256(), __U, __A);
}
-#undef __DEFAULT_FN_ATTRS
+#undef __DEFAULT_FN_ATTRS128
+#undef __DEFAULT_FN_ATTRS256
#endif
diff --git a/lib/Headers/avxintrin.h b/lib/Headers/avxintrin.h
index dff5897b6bb6..cb15396b3faf 100644
--- a/lib/Headers/avxintrin.h
+++ b/lib/Headers/avxintrin.h
@@ -50,10 +50,11 @@ typedef double __m256d __attribute__((__vector_size__(32)));
typedef long long __m256i __attribute__((__vector_size__(32)));
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx")))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx"), __min_vector_width__(256)))
+#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("avx"), __min_vector_width__(128)))
/* Arithmetic */
-/// \brief Adds two 256-bit vectors of [4 x double].
+/// Adds two 256-bit vectors of [4 x double].
///
/// \headerfile <x86intrin.h>
///
@@ -71,7 +72,7 @@ _mm256_add_pd(__m256d __a, __m256d __b)
return (__m256d)((__v4df)__a+(__v4df)__b);
}
-/// \brief Adds two 256-bit vectors of [8 x float].
+/// Adds two 256-bit vectors of [8 x float].
///
/// \headerfile <x86intrin.h>
///
@@ -89,7 +90,7 @@ _mm256_add_ps(__m256 __a, __m256 __b)
return (__m256)((__v8sf)__a+(__v8sf)__b);
}
-/// \brief Subtracts two 256-bit vectors of [4 x double].
+/// Subtracts two 256-bit vectors of [4 x double].
///
/// \headerfile <x86intrin.h>
///
@@ -107,7 +108,7 @@ _mm256_sub_pd(__m256d __a, __m256d __b)
return (__m256d)((__v4df)__a-(__v4df)__b);
}
-/// \brief Subtracts two 256-bit vectors of [8 x float].
+/// Subtracts two 256-bit vectors of [8 x float].
///
/// \headerfile <x86intrin.h>
///
@@ -125,7 +126,7 @@ _mm256_sub_ps(__m256 __a, __m256 __b)
return (__m256)((__v8sf)__a-(__v8sf)__b);
}
-/// \brief Adds the even-indexed values and subtracts the odd-indexed values of
+/// Adds the even-indexed values and subtracts the odd-indexed values of
/// two 256-bit vectors of [4 x double].
///
/// \headerfile <x86intrin.h>
@@ -144,7 +145,7 @@ _mm256_addsub_pd(__m256d __a, __m256d __b)
return (__m256d)__builtin_ia32_addsubpd256((__v4df)__a, (__v4df)__b);
}
-/// \brief Adds the even-indexed values and subtracts the odd-indexed values of
+/// Adds the even-indexed values and subtracts the odd-indexed values of
/// two 256-bit vectors of [8 x float].
///
/// \headerfile <x86intrin.h>
@@ -163,7 +164,7 @@ _mm256_addsub_ps(__m256 __a, __m256 __b)
return (__m256)__builtin_ia32_addsubps256((__v8sf)__a, (__v8sf)__b);
}
-/// \brief Divides two 256-bit vectors of [4 x double].
+/// Divides two 256-bit vectors of [4 x double].
///
/// \headerfile <x86intrin.h>
///
@@ -181,7 +182,7 @@ _mm256_div_pd(__m256d __a, __m256d __b)
return (__m256d)((__v4df)__a/(__v4df)__b);
}
-/// \brief Divides two 256-bit vectors of [8 x float].
+/// Divides two 256-bit vectors of [8 x float].
///
/// \headerfile <x86intrin.h>
///
@@ -199,7 +200,7 @@ _mm256_div_ps(__m256 __a, __m256 __b)
return (__m256)((__v8sf)__a/(__v8sf)__b);
}
-/// \brief Compares two 256-bit vectors of [4 x double] and returns the greater
+/// Compares two 256-bit vectors of [4 x double] and returns the greater
/// of each pair of values.
///
/// \headerfile <x86intrin.h>
@@ -218,7 +219,7 @@ _mm256_max_pd(__m256d __a, __m256d __b)
return (__m256d)__builtin_ia32_maxpd256((__v4df)__a, (__v4df)__b);
}
-/// \brief Compares two 256-bit vectors of [8 x float] and returns the greater
+/// Compares two 256-bit vectors of [8 x float] and returns the greater
/// of each pair of values.
///
/// \headerfile <x86intrin.h>
@@ -237,7 +238,7 @@ _mm256_max_ps(__m256 __a, __m256 __b)
return (__m256)__builtin_ia32_maxps256((__v8sf)__a, (__v8sf)__b);
}
-/// \brief Compares two 256-bit vectors of [4 x double] and returns the lesser
+/// Compares two 256-bit vectors of [4 x double] and returns the lesser
/// of each pair of values.
///
/// \headerfile <x86intrin.h>
@@ -256,7 +257,7 @@ _mm256_min_pd(__m256d __a, __m256d __b)
return (__m256d)__builtin_ia32_minpd256((__v4df)__a, (__v4df)__b);
}
-/// \brief Compares two 256-bit vectors of [8 x float] and returns the lesser
+/// Compares two 256-bit vectors of [8 x float] and returns the lesser
/// of each pair of values.
///
/// \headerfile <x86intrin.h>
@@ -275,7 +276,7 @@ _mm256_min_ps(__m256 __a, __m256 __b)
return (__m256)__builtin_ia32_minps256((__v8sf)__a, (__v8sf)__b);
}
-/// \brief Multiplies two 256-bit vectors of [4 x double].
+/// Multiplies two 256-bit vectors of [4 x double].
///
/// \headerfile <x86intrin.h>
///
@@ -293,7 +294,7 @@ _mm256_mul_pd(__m256d __a, __m256d __b)
return (__m256d)((__v4df)__a * (__v4df)__b);
}
-/// \brief Multiplies two 256-bit vectors of [8 x float].
+/// Multiplies two 256-bit vectors of [8 x float].
///
/// \headerfile <x86intrin.h>
///
@@ -311,7 +312,7 @@ _mm256_mul_ps(__m256 __a, __m256 __b)
return (__m256)((__v8sf)__a * (__v8sf)__b);
}
-/// \brief Calculates the square roots of the values in a 256-bit vector of
+/// Calculates the square roots of the values in a 256-bit vector of
/// [4 x double].
///
/// \headerfile <x86intrin.h>
@@ -328,7 +329,7 @@ _mm256_sqrt_pd(__m256d __a)
return (__m256d)__builtin_ia32_sqrtpd256((__v4df)__a);
}
-/// \brief Calculates the square roots of the values in a 256-bit vector of
+/// Calculates the square roots of the values in a 256-bit vector of
/// [8 x float].
///
/// \headerfile <x86intrin.h>
@@ -345,7 +346,7 @@ _mm256_sqrt_ps(__m256 __a)
return (__m256)__builtin_ia32_sqrtps256((__v8sf)__a);
}
-/// \brief Calculates the reciprocal square roots of the values in a 256-bit
+/// Calculates the reciprocal square roots of the values in a 256-bit
/// vector of [8 x float].
///
/// \headerfile <x86intrin.h>
@@ -362,7 +363,7 @@ _mm256_rsqrt_ps(__m256 __a)
return (__m256)__builtin_ia32_rsqrtps256((__v8sf)__a);
}
-/// \brief Calculates the reciprocals of the values in a 256-bit vector of
+/// Calculates the reciprocals of the values in a 256-bit vector of
/// [8 x float].
///
/// \headerfile <x86intrin.h>
@@ -379,7 +380,7 @@ _mm256_rcp_ps(__m256 __a)
return (__m256)__builtin_ia32_rcpps256((__v8sf)__a);
}
-/// \brief Rounds the values in a 256-bit vector of [4 x double] as specified
+/// Rounds the values in a 256-bit vector of [4 x double] as specified
/// by the byte operand. The source values are rounded to integer values and
/// returned as 64-bit double-precision floating-point values.
///
@@ -408,10 +409,10 @@ _mm256_rcp_ps(__m256 __a)
/// 10: Upward (toward positive infinity). \n
/// 11: Truncated.
/// \returns A 256-bit vector of [4 x double] containing the rounded values.
-#define _mm256_round_pd(V, M) __extension__ ({ \
- (__m256d)__builtin_ia32_roundpd256((__v4df)(__m256d)(V), (M)); })
+#define _mm256_round_pd(V, M) \
+ (__m256d)__builtin_ia32_roundpd256((__v4df)(__m256d)(V), (M))
-/// \brief Rounds the values stored in a 256-bit vector of [8 x float] as
+/// Rounds the values stored in a 256-bit vector of [8 x float] as
/// specified by the byte operand. The source values are rounded to integer
/// values and returned as floating-point values.
///
@@ -440,10 +441,10 @@ _mm256_rcp_ps(__m256 __a)
/// 10: Upward (toward positive infinity). \n
/// 11: Truncated.
/// \returns A 256-bit vector of [8 x float] containing the rounded values.
-#define _mm256_round_ps(V, M) __extension__ ({ \
- (__m256)__builtin_ia32_roundps256((__v8sf)(__m256)(V), (M)); })
+#define _mm256_round_ps(V, M) \
+ (__m256)__builtin_ia32_roundps256((__v8sf)(__m256)(V), (M))
-/// \brief Rounds up the values stored in a 256-bit vector of [4 x double]. The
+/// Rounds up the values stored in a 256-bit vector of [4 x double]. The
/// source values are rounded up to integer values and returned as 64-bit
/// double-precision floating-point values.
///
@@ -460,7 +461,7 @@ _mm256_rcp_ps(__m256 __a)
/// \returns A 256-bit vector of [4 x double] containing the rounded up values.
#define _mm256_ceil_pd(V) _mm256_round_pd((V), _MM_FROUND_CEIL)
-/// \brief Rounds down the values stored in a 256-bit vector of [4 x double].
+/// Rounds down the values stored in a 256-bit vector of [4 x double].
/// The source values are rounded down to integer values and returned as
/// 64-bit double-precision floating-point values.
///
@@ -478,7 +479,7 @@ _mm256_rcp_ps(__m256 __a)
/// values.
#define _mm256_floor_pd(V) _mm256_round_pd((V), _MM_FROUND_FLOOR)
-/// \brief Rounds up the values stored in a 256-bit vector of [8 x float]. The
+/// Rounds up the values stored in a 256-bit vector of [8 x float]. The
/// source values are rounded up to integer values and returned as
/// floating-point values.
///
@@ -495,7 +496,7 @@ _mm256_rcp_ps(__m256 __a)
/// \returns A 256-bit vector of [8 x float] containing the rounded up values.
#define _mm256_ceil_ps(V) _mm256_round_ps((V), _MM_FROUND_CEIL)
-/// \brief Rounds down the values stored in a 256-bit vector of [8 x float]. The
+/// Rounds down the values stored in a 256-bit vector of [8 x float]. The
/// source values are rounded down to integer values and returned as
/// floating-point values.
///
@@ -513,7 +514,7 @@ _mm256_rcp_ps(__m256 __a)
#define _mm256_floor_ps(V) _mm256_round_ps((V), _MM_FROUND_FLOOR)
/* Logical */
-/// \brief Performs a bitwise AND of two 256-bit vectors of [4 x double].
+/// Performs a bitwise AND of two 256-bit vectors of [4 x double].
///
/// \headerfile <x86intrin.h>
///
@@ -531,7 +532,7 @@ _mm256_and_pd(__m256d __a, __m256d __b)
return (__m256d)((__v4du)__a & (__v4du)__b);
}
-/// \brief Performs a bitwise AND of two 256-bit vectors of [8 x float].
+/// Performs a bitwise AND of two 256-bit vectors of [8 x float].
///
/// \headerfile <x86intrin.h>
///
@@ -549,7 +550,7 @@ _mm256_and_ps(__m256 __a, __m256 __b)
return (__m256)((__v8su)__a & (__v8su)__b);
}
-/// \brief Performs a bitwise AND of two 256-bit vectors of [4 x double], using
+/// Performs a bitwise AND of two 256-bit vectors of [4 x double], using
/// the one's complement of the values contained in the first source operand.
///
/// \headerfile <x86intrin.h>
@@ -570,7 +571,7 @@ _mm256_andnot_pd(__m256d __a, __m256d __b)
return (__m256d)(~(__v4du)__a & (__v4du)__b);
}
-/// \brief Performs a bitwise AND of two 256-bit vectors of [8 x float], using
+/// Performs a bitwise AND of two 256-bit vectors of [8 x float], using
/// the one's complement of the values contained in the first source operand.
///
/// \headerfile <x86intrin.h>
@@ -591,7 +592,7 @@ _mm256_andnot_ps(__m256 __a, __m256 __b)
return (__m256)(~(__v8su)__a & (__v8su)__b);
}
-/// \brief Performs a bitwise OR of two 256-bit vectors of [4 x double].
+/// Performs a bitwise OR of two 256-bit vectors of [4 x double].
///
/// \headerfile <x86intrin.h>
///
@@ -609,7 +610,7 @@ _mm256_or_pd(__m256d __a, __m256d __b)
return (__m256d)((__v4du)__a | (__v4du)__b);
}
-/// \brief Performs a bitwise OR of two 256-bit vectors of [8 x float].
+/// Performs a bitwise OR of two 256-bit vectors of [8 x float].
///
/// \headerfile <x86intrin.h>
///
@@ -627,7 +628,7 @@ _mm256_or_ps(__m256 __a, __m256 __b)
return (__m256)((__v8su)__a | (__v8su)__b);
}
-/// \brief Performs a bitwise XOR of two 256-bit vectors of [4 x double].
+/// Performs a bitwise XOR of two 256-bit vectors of [4 x double].
///
/// \headerfile <x86intrin.h>
///
@@ -645,7 +646,7 @@ _mm256_xor_pd(__m256d __a, __m256d __b)
return (__m256d)((__v4du)__a ^ (__v4du)__b);
}
-/// \brief Performs a bitwise XOR of two 256-bit vectors of [8 x float].
+/// Performs a bitwise XOR of two 256-bit vectors of [8 x float].
///
/// \headerfile <x86intrin.h>
///
@@ -664,7 +665,7 @@ _mm256_xor_ps(__m256 __a, __m256 __b)
}
/* Horizontal arithmetic */
-/// \brief Horizontally adds the adjacent pairs of values contained in two
+/// Horizontally adds the adjacent pairs of values contained in two
/// 256-bit vectors of [4 x double].
///
/// \headerfile <x86intrin.h>
@@ -687,7 +688,7 @@ _mm256_hadd_pd(__m256d __a, __m256d __b)
return (__m256d)__builtin_ia32_haddpd256((__v4df)__a, (__v4df)__b);
}
-/// \brief Horizontally adds the adjacent pairs of values contained in two
+/// Horizontally adds the adjacent pairs of values contained in two
/// 256-bit vectors of [8 x float].
///
/// \headerfile <x86intrin.h>
@@ -710,7 +711,7 @@ _mm256_hadd_ps(__m256 __a, __m256 __b)
return (__m256)__builtin_ia32_haddps256((__v8sf)__a, (__v8sf)__b);
}
-/// \brief Horizontally subtracts the adjacent pairs of values contained in two
+/// Horizontally subtracts the adjacent pairs of values contained in two
/// 256-bit vectors of [4 x double].
///
/// \headerfile <x86intrin.h>
@@ -733,7 +734,7 @@ _mm256_hsub_pd(__m256d __a, __m256d __b)
return (__m256d)__builtin_ia32_hsubpd256((__v4df)__a, (__v4df)__b);
}
-/// \brief Horizontally subtracts the adjacent pairs of values contained in two
+/// Horizontally subtracts the adjacent pairs of values contained in two
/// 256-bit vectors of [8 x float].
///
/// \headerfile <x86intrin.h>
@@ -757,7 +758,7 @@ _mm256_hsub_ps(__m256 __a, __m256 __b)
}
/* Vector permutations */
-/// \brief Copies the values in a 128-bit vector of [2 x double] as specified
+/// Copies the values in a 128-bit vector of [2 x double] as specified
/// by the 128-bit integer vector operand.
///
/// \headerfile <x86intrin.h>
@@ -780,13 +781,13 @@ _mm256_hsub_ps(__m256 __a, __m256 __b)
/// 1: Bits [127:64] of the source are copied to bits [127:64] of the
/// returned vector.
/// \returns A 128-bit vector of [2 x double] containing the copied values.
-static __inline __m128d __DEFAULT_FN_ATTRS
+static __inline __m128d __DEFAULT_FN_ATTRS128
_mm_permutevar_pd(__m128d __a, __m128i __c)
{
return (__m128d)__builtin_ia32_vpermilvarpd((__v2df)__a, (__v2di)__c);
}
-/// \brief Copies the values in a 256-bit vector of [4 x double] as specified
+/// Copies the values in a 256-bit vector of [4 x double] as specified
/// by the 256-bit integer vector operand.
///
/// \headerfile <x86intrin.h>
@@ -825,7 +826,7 @@ _mm256_permutevar_pd(__m256d __a, __m256i __c)
return (__m256d)__builtin_ia32_vpermilvarpd256((__v4df)__a, (__v4di)__c);
}
-/// \brief Copies the values stored in a 128-bit vector of [4 x float] as
+/// Copies the values stored in a 128-bit vector of [4 x float] as
/// specified by the 128-bit integer vector operand.
/// \headerfile <x86intrin.h>
///
@@ -873,13 +874,13 @@ _mm256_permutevar_pd(__m256d __a, __m256i __c)
/// 11: Bits [127:96] of the source are copied to bits [127:96] of the
/// returned vector.
/// \returns A 128-bit vector of [4 x float] containing the copied values.
-static __inline __m128 __DEFAULT_FN_ATTRS
+static __inline __m128 __DEFAULT_FN_ATTRS128
_mm_permutevar_ps(__m128 __a, __m128i __c)
{
return (__m128)__builtin_ia32_vpermilvarps((__v4sf)__a, (__v4si)__c);
}
-/// \brief Copies the values stored in a 256-bit vector of [8 x float] as
+/// Copies the values stored in a 256-bit vector of [8 x float] as
/// specified by the 256-bit integer vector operand.
///
/// \headerfile <x86intrin.h>
@@ -970,7 +971,7 @@ _mm256_permutevar_ps(__m256 __a, __m256i __c)
return (__m256)__builtin_ia32_vpermilvarps256((__v8sf)__a, (__v8si)__c);
}
-/// \brief Copies the values in a 128-bit vector of [2 x double] as specified
+/// Copies the values in a 128-bit vector of [2 x double] as specified
/// by the immediate integer operand.
///
/// \headerfile <x86intrin.h>
@@ -997,12 +998,10 @@ _mm256_permutevar_ps(__m256 __a, __m256i __c)
/// 1: Bits [127:64] of the source are copied to bits [127:64] of the
/// returned vector.
/// \returns A 128-bit vector of [2 x double] containing the copied values.
-#define _mm_permute_pd(A, C) __extension__ ({ \
- (__m128d)__builtin_shufflevector((__v2df)(__m128d)(A), \
- (__v2df)_mm_undefined_pd(), \
- ((C) >> 0) & 0x1, ((C) >> 1) & 0x1); })
+#define _mm_permute_pd(A, C) \
+ (__m128d)__builtin_ia32_vpermilpd((__v2df)(__m128d)(A), (int)(C))
-/// \brief Copies the values in a 256-bit vector of [4 x double] as specified by
+/// Copies the values in a 256-bit vector of [4 x double] as specified by
/// the immediate integer operand.
///
/// \headerfile <x86intrin.h>
@@ -1039,15 +1038,10 @@ _mm256_permutevar_ps(__m256 __a, __m256i __c)
/// 1: Bits [255:192] of the source are copied to bits [255:192] of the
/// returned vector.
/// \returns A 256-bit vector of [4 x double] containing the copied values.
-#define _mm256_permute_pd(A, C) __extension__ ({ \
- (__m256d)__builtin_shufflevector((__v4df)(__m256d)(A), \
- (__v4df)_mm256_undefined_pd(), \
- 0 + (((C) >> 0) & 0x1), \
- 0 + (((C) >> 1) & 0x1), \
- 2 + (((C) >> 2) & 0x1), \
- 2 + (((C) >> 3) & 0x1)); })
-
-/// \brief Copies the values in a 128-bit vector of [4 x float] as specified by
+#define _mm256_permute_pd(A, C) \
+ (__m256d)__builtin_ia32_vpermilpd256((__v4df)(__m256d)(A), (int)(C))
+
+/// Copies the values in a 128-bit vector of [4 x float] as specified by
/// the immediate integer operand.
///
/// \headerfile <x86intrin.h>
@@ -1100,13 +1094,10 @@ _mm256_permutevar_ps(__m256 __a, __m256i __c)
/// 11: Bits [127:96] of the source are copied to bits [127:96] of the
/// returned vector.
/// \returns A 128-bit vector of [4 x float] containing the copied values.
-#define _mm_permute_ps(A, C) __extension__ ({ \
- (__m128)__builtin_shufflevector((__v4sf)(__m128)(A), \
- (__v4sf)_mm_undefined_ps(), \
- ((C) >> 0) & 0x3, ((C) >> 2) & 0x3, \
- ((C) >> 4) & 0x3, ((C) >> 6) & 0x3); })
+#define _mm_permute_ps(A, C) \
+ (__m128)__builtin_ia32_vpermilps((__v4sf)(__m128)(A), (int)(C))
-/// \brief Copies the values in a 256-bit vector of [8 x float] as specified by
+/// Copies the values in a 256-bit vector of [8 x float] as specified by
/// the immediate integer operand.
///
/// \headerfile <x86intrin.h>
@@ -1120,7 +1111,7 @@ _mm256_permutevar_ps(__m256 __a, __m256i __c)
/// \param A
/// A 256-bit vector of [8 x float].
/// \param C
-/// An immediate integer operand specifying how the values are to be \n
+/// An immediate integer operand specifying how the values are to be
/// copied. \n
/// Bits [1:0]: \n
/// 00: Bits [31:0] of the source are copied to bits [31:0] of the
@@ -1150,7 +1141,7 @@ _mm256_permutevar_ps(__m256 __a, __m256i __c)
/// 11: Bits [127:96] of the source are copied to bits [95:64] of the
/// returned vector. \n
/// Bits [7:6]: \n
-/// 00: Bits [31:qq0] of the source are copied to bits [127:96] of the
+/// 00: Bits [31:0] of the source are copied to bits [127:96] of the
/// returned vector. \n
/// 01: Bits [63:32] of the source are copied to bits [127:96] of the
/// returned vector. \n
@@ -1195,19 +1186,10 @@ _mm256_permutevar_ps(__m256 __a, __m256i __c)
/// 11: Bits [255:224] of the source are copied to bits [255:224] of the
/// returned vector.
/// \returns A 256-bit vector of [8 x float] containing the copied values.
-#define _mm256_permute_ps(A, C) __extension__ ({ \
- (__m256)__builtin_shufflevector((__v8sf)(__m256)(A), \
- (__v8sf)_mm256_undefined_ps(), \
- 0 + (((C) >> 0) & 0x3), \
- 0 + (((C) >> 2) & 0x3), \
- 0 + (((C) >> 4) & 0x3), \
- 0 + (((C) >> 6) & 0x3), \
- 4 + (((C) >> 0) & 0x3), \
- 4 + (((C) >> 2) & 0x3), \
- 4 + (((C) >> 4) & 0x3), \
- 4 + (((C) >> 6) & 0x3)); })
-
-/// \brief Permutes 128-bit data values stored in two 256-bit vectors of
+#define _mm256_permute_ps(A, C) \
+ (__m256)__builtin_ia32_vpermilps256((__v8sf)(__m256)(A), (int)(C))
+
+/// Permutes 128-bit data values stored in two 256-bit vectors of
/// [4 x double], as specified by the immediate integer operand.
///
/// \headerfile <x86intrin.h>
@@ -1244,11 +1226,11 @@ _mm256_permutevar_ps(__m256 __a, __m256i __c)
/// 11: Bits [255:128] of operand \a V2 are copied to bits [255:128] of the
/// destination.
/// \returns A 256-bit vector of [4 x double] containing the copied values.
-#define _mm256_permute2f128_pd(V1, V2, M) __extension__ ({ \
+#define _mm256_permute2f128_pd(V1, V2, M) \
(__m256d)__builtin_ia32_vperm2f128_pd256((__v4df)(__m256d)(V1), \
- (__v4df)(__m256d)(V2), (M)); })
+ (__v4df)(__m256d)(V2), (int)(M))
-/// \brief Permutes 128-bit data values stored in two 256-bit vectors of
+/// Permutes 128-bit data values stored in two 256-bit vectors of
/// [8 x float], as specified by the immediate integer operand.
///
/// \headerfile <x86intrin.h>
@@ -1285,11 +1267,11 @@ _mm256_permutevar_ps(__m256 __a, __m256i __c)
/// 11: Bits [255:128] of operand \a V2 are copied to bits [255:128] of the
/// destination.
/// \returns A 256-bit vector of [8 x float] containing the copied values.
-#define _mm256_permute2f128_ps(V1, V2, M) __extension__ ({ \
+#define _mm256_permute2f128_ps(V1, V2, M) \
(__m256)__builtin_ia32_vperm2f128_ps256((__v8sf)(__m256)(V1), \
- (__v8sf)(__m256)(V2), (M)); })
+ (__v8sf)(__m256)(V2), (int)(M))
-/// \brief Permutes 128-bit data values stored in two 256-bit integer vectors,
+/// Permutes 128-bit data values stored in two 256-bit integer vectors,
/// as specified by the immediate integer operand.
///
/// \headerfile <x86intrin.h>
@@ -1325,12 +1307,12 @@ _mm256_permutevar_ps(__m256 __a, __m256i __c)
/// 11: Bits [255:128] of operand \a V2 are copied to bits [255:128] of the
/// destination.
/// \returns A 256-bit integer vector containing the copied values.
-#define _mm256_permute2f128_si256(V1, V2, M) __extension__ ({ \
+#define _mm256_permute2f128_si256(V1, V2, M) \
(__m256i)__builtin_ia32_vperm2f128_si256((__v8si)(__m256i)(V1), \
- (__v8si)(__m256i)(V2), (M)); })
+ (__v8si)(__m256i)(V2), (int)(M))
/* Vector Blend */
-/// \brief Merges 64-bit double-precision data values stored in either of the
+/// Merges 64-bit double-precision data values stored in either of the
/// two 256-bit vectors of [4 x double], as specified by the immediate
/// integer operand.
///
@@ -1354,15 +1336,11 @@ _mm256_permutevar_ps(__m256 __a, __m256i __c)
/// destination. When a mask bit is 1, the corresponding 64-bit element in
/// operand \a V2 is copied to the same position in the destination.
/// \returns A 256-bit vector of [4 x double] containing the copied values.
-#define _mm256_blend_pd(V1, V2, M) __extension__ ({ \
- (__m256d)__builtin_shufflevector((__v4df)(__m256d)(V1), \
- (__v4df)(__m256d)(V2), \
- (((M) & 0x01) ? 4 : 0), \
- (((M) & 0x02) ? 5 : 1), \
- (((M) & 0x04) ? 6 : 2), \
- (((M) & 0x08) ? 7 : 3)); })
-
-/// \brief Merges 32-bit single-precision data values stored in either of the
+#define _mm256_blend_pd(V1, V2, M) \
+ (__m256d)__builtin_ia32_blendpd256((__v4df)(__m256d)(V1), \
+ (__v4df)(__m256d)(V2), (int)(M))
+
+/// Merges 32-bit single-precision data values stored in either of the
/// two 256-bit vectors of [8 x float], as specified by the immediate
/// integer operand.
///
@@ -1386,19 +1364,11 @@ _mm256_permutevar_ps(__m256 __a, __m256i __c)
/// destination. When a mask bit is 1, the corresponding 32-bit element in
/// operand \a V2 is copied to the same position in the destination.
/// \returns A 256-bit vector of [8 x float] containing the copied values.
-#define _mm256_blend_ps(V1, V2, M) __extension__ ({ \
- (__m256)__builtin_shufflevector((__v8sf)(__m256)(V1), \
- (__v8sf)(__m256)(V2), \
- (((M) & 0x01) ? 8 : 0), \
- (((M) & 0x02) ? 9 : 1), \
- (((M) & 0x04) ? 10 : 2), \
- (((M) & 0x08) ? 11 : 3), \
- (((M) & 0x10) ? 12 : 4), \
- (((M) & 0x20) ? 13 : 5), \
- (((M) & 0x40) ? 14 : 6), \
- (((M) & 0x80) ? 15 : 7)); })
-
-/// \brief Merges 64-bit double-precision data values stored in either of the
+#define _mm256_blend_ps(V1, V2, M) \
+ (__m256)__builtin_ia32_blendps256((__v8sf)(__m256)(V1), \
+ (__v8sf)(__m256)(V2), (int)(M))
+
+/// Merges 64-bit double-precision data values stored in either of the
/// two 256-bit vectors of [4 x double], as specified by the 256-bit vector
/// operand.
///
@@ -1426,7 +1396,7 @@ _mm256_blendv_pd(__m256d __a, __m256d __b, __m256d __c)
(__v4df)__a, (__v4df)__b, (__v4df)__c);
}
-/// \brief Merges 32-bit single-precision data values stored in either of the
+/// Merges 32-bit single-precision data values stored in either of the
/// two 256-bit vectors of [8 x float], as specified by the 256-bit vector
/// operand.
///
@@ -1455,7 +1425,7 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
}
/* Vector Dot Product */
-/// \brief Computes two dot products in parallel, using the lower and upper
+/// Computes two dot products in parallel, using the lower and upper
/// halves of two [8 x float] vectors as input to the two computations, and
/// returning the two dot products in the lower and upper halves of the
/// [8 x float] result.
@@ -1492,12 +1462,12 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
/// is set to zero. The bitmask is applied in the same way to each of the
/// two parallel dot product computations.
/// \returns A 256-bit vector of [8 x float] containing the two dot products.
-#define _mm256_dp_ps(V1, V2, M) __extension__ ({ \
+#define _mm256_dp_ps(V1, V2, M) \
(__m256)__builtin_ia32_dpps256((__v8sf)(__m256)(V1), \
- (__v8sf)(__m256)(V2), (M)); })
+ (__v8sf)(__m256)(V2), (M))
/* Vector shuffle */
-/// \brief Selects 8 float values from the 256-bit operands of [8 x float], as
+/// Selects 8 float values from the 256-bit operands of [8 x float], as
/// specified by the immediate value operand.
///
/// The four selected elements in each operand are copied to the destination
@@ -1546,19 +1516,11 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
/// 10: Bits [95:64] and [223:192] are copied from the selected operand. \n
/// 11: Bits [127:96] and [255:224] are copied from the selected operand.
/// \returns A 256-bit vector of [8 x float] containing the shuffled values.
-#define _mm256_shuffle_ps(a, b, mask) __extension__ ({ \
- (__m256)__builtin_shufflevector((__v8sf)(__m256)(a), \
- (__v8sf)(__m256)(b), \
- 0 + (((mask) >> 0) & 0x3), \
- 0 + (((mask) >> 2) & 0x3), \
- 8 + (((mask) >> 4) & 0x3), \
- 8 + (((mask) >> 6) & 0x3), \
- 4 + (((mask) >> 0) & 0x3), \
- 4 + (((mask) >> 2) & 0x3), \
- 12 + (((mask) >> 4) & 0x3), \
- 12 + (((mask) >> 6) & 0x3)); })
-
-/// \brief Selects four double-precision values from the 256-bit operands of
+#define _mm256_shuffle_ps(a, b, mask) \
+ (__m256)__builtin_ia32_shufps256((__v8sf)(__m256)(a), \
+ (__v8sf)(__m256)(b), (int)(mask))
+
+/// Selects four double-precision values from the 256-bit operands of
/// [4 x double], as specified by the immediate value operand.
///
/// The selected elements from the first 256-bit operand are copied to bits
@@ -1600,13 +1562,9 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
/// Bit [3]=1: Bits [255:192] are copied from \a b to bits [255:192] of the
/// destination.
/// \returns A 256-bit vector of [4 x double] containing the shuffled values.
-#define _mm256_shuffle_pd(a, b, mask) __extension__ ({ \
- (__m256d)__builtin_shufflevector((__v4df)(__m256d)(a), \
- (__v4df)(__m256d)(b), \
- 0 + (((mask) >> 0) & 0x1), \
- 4 + (((mask) >> 1) & 0x1), \
- 2 + (((mask) >> 2) & 0x1), \
- 6 + (((mask) >> 3) & 0x1)); })
+#define _mm256_shuffle_pd(a, b, mask) \
+ (__m256d)__builtin_ia32_shufpd256((__v4df)(__m256d)(a), \
+ (__v4df)(__m256d)(b), (int)(mask))
/* Compare */
#define _CMP_EQ_OQ 0x00 /* Equal (ordered, non-signaling) */
@@ -1642,7 +1600,7 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
#define _CMP_GT_OQ 0x1e /* Greater-than (ordered, non-signaling) */
#define _CMP_TRUE_US 0x1f /* True (unordered, signaling) */
-/// \brief Compares each of the corresponding double-precision values of two
+/// Compares each of the corresponding double-precision values of two
/// 128-bit vectors of [2 x double], using the operation specified by the
/// immediate integer operand.
///
@@ -1665,44 +1623,44 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
/// \param c
/// An immediate integer operand, with bits [4:0] specifying which comparison
/// operation to use: \n
-/// 0x00 : Equal (ordered, non-signaling)
-/// 0x01 : Less-than (ordered, signaling)
-/// 0x02 : Less-than-or-equal (ordered, signaling)
-/// 0x03 : Unordered (non-signaling)
-/// 0x04 : Not-equal (unordered, non-signaling)
-/// 0x05 : Not-less-than (unordered, signaling)
-/// 0x06 : Not-less-than-or-equal (unordered, signaling)
-/// 0x07 : Ordered (non-signaling)
-/// 0x08 : Equal (unordered, non-signaling)
-/// 0x09 : Not-greater-than-or-equal (unordered, signaling)
-/// 0x0a : Not-greater-than (unordered, signaling)
-/// 0x0b : False (ordered, non-signaling)
-/// 0x0c : Not-equal (ordered, non-signaling)
-/// 0x0d : Greater-than-or-equal (ordered, signaling)
-/// 0x0e : Greater-than (ordered, signaling)
-/// 0x0f : True (unordered, non-signaling)
-/// 0x10 : Equal (ordered, signaling)
-/// 0x11 : Less-than (ordered, non-signaling)
-/// 0x12 : Less-than-or-equal (ordered, non-signaling)
-/// 0x13 : Unordered (signaling)
-/// 0x14 : Not-equal (unordered, signaling)
-/// 0x15 : Not-less-than (unordered, non-signaling)
-/// 0x16 : Not-less-than-or-equal (unordered, non-signaling)
-/// 0x17 : Ordered (signaling)
-/// 0x18 : Equal (unordered, signaling)
-/// 0x19 : Not-greater-than-or-equal (unordered, non-signaling)
-/// 0x1a : Not-greater-than (unordered, non-signaling)
-/// 0x1b : False (ordered, signaling)
-/// 0x1c : Not-equal (ordered, signaling)
-/// 0x1d : Greater-than-or-equal (ordered, non-signaling)
-/// 0x1e : Greater-than (ordered, non-signaling)
-/// 0x1f : True (unordered, signaling)
+/// 0x00: Equal (ordered, non-signaling) \n
+/// 0x01: Less-than (ordered, signaling) \n
+/// 0x02: Less-than-or-equal (ordered, signaling) \n
+/// 0x03: Unordered (non-signaling) \n
+/// 0x04: Not-equal (unordered, non-signaling) \n
+/// 0x05: Not-less-than (unordered, signaling) \n
+/// 0x06: Not-less-than-or-equal (unordered, signaling) \n
+/// 0x07: Ordered (non-signaling) \n
+/// 0x08: Equal (unordered, non-signaling) \n
+/// 0x09: Not-greater-than-or-equal (unordered, signaling) \n
+/// 0x0A: Not-greater-than (unordered, signaling) \n
+/// 0x0B: False (ordered, non-signaling) \n
+/// 0x0C: Not-equal (ordered, non-signaling) \n
+/// 0x0D: Greater-than-or-equal (ordered, signaling) \n
+/// 0x0E: Greater-than (ordered, signaling) \n
+/// 0x0F: True (unordered, non-signaling) \n
+/// 0x10: Equal (ordered, signaling) \n
+/// 0x11: Less-than (ordered, non-signaling) \n
+/// 0x12: Less-than-or-equal (ordered, non-signaling) \n
+/// 0x13: Unordered (signaling) \n
+/// 0x14: Not-equal (unordered, signaling) \n
+/// 0x15: Not-less-than (unordered, non-signaling) \n
+/// 0x16: Not-less-than-or-equal (unordered, non-signaling) \n
+/// 0x17: Ordered (signaling) \n
+/// 0x18: Equal (unordered, signaling) \n
+/// 0x19: Not-greater-than-or-equal (unordered, non-signaling) \n
+/// 0x1A: Not-greater-than (unordered, non-signaling) \n
+/// 0x1B: False (ordered, signaling) \n
+/// 0x1C: Not-equal (ordered, signaling) \n
+/// 0x1D: Greater-than-or-equal (ordered, non-signaling) \n
+/// 0x1E: Greater-than (ordered, non-signaling) \n
+/// 0x1F: True (unordered, signaling)
/// \returns A 128-bit vector of [2 x double] containing the comparison results.
-#define _mm_cmp_pd(a, b, c) __extension__ ({ \
+#define _mm_cmp_pd(a, b, c) \
(__m128d)__builtin_ia32_cmppd((__v2df)(__m128d)(a), \
- (__v2df)(__m128d)(b), (c)); })
+ (__v2df)(__m128d)(b), (c))
-/// \brief Compares each of the corresponding values of two 128-bit vectors of
+/// Compares each of the corresponding values of two 128-bit vectors of
/// [4 x float], using the operation specified by the immediate integer
/// operand.
///
@@ -1725,44 +1683,44 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
/// \param c
/// An immediate integer operand, with bits [4:0] specifying which comparison
/// operation to use: \n
-/// 0x00 : Equal (ordered, non-signaling)
-/// 0x01 : Less-than (ordered, signaling)
-/// 0x02 : Less-than-or-equal (ordered, signaling)
-/// 0x03 : Unordered (non-signaling)
-/// 0x04 : Not-equal (unordered, non-signaling)
-/// 0x05 : Not-less-than (unordered, signaling)
-/// 0x06 : Not-less-than-or-equal (unordered, signaling)
-/// 0x07 : Ordered (non-signaling)
-/// 0x08 : Equal (unordered, non-signaling)
-/// 0x09 : Not-greater-than-or-equal (unordered, signaling)
-/// 0x0a : Not-greater-than (unordered, signaling)
-/// 0x0b : False (ordered, non-signaling)
-/// 0x0c : Not-equal (ordered, non-signaling)
-/// 0x0d : Greater-than-or-equal (ordered, signaling)
-/// 0x0e : Greater-than (ordered, signaling)
-/// 0x0f : True (unordered, non-signaling)
-/// 0x10 : Equal (ordered, signaling)
-/// 0x11 : Less-than (ordered, non-signaling)
-/// 0x12 : Less-than-or-equal (ordered, non-signaling)
-/// 0x13 : Unordered (signaling)
-/// 0x14 : Not-equal (unordered, signaling)
-/// 0x15 : Not-less-than (unordered, non-signaling)
-/// 0x16 : Not-less-than-or-equal (unordered, non-signaling)
-/// 0x17 : Ordered (signaling)
-/// 0x18 : Equal (unordered, signaling)
-/// 0x19 : Not-greater-than-or-equal (unordered, non-signaling)
-/// 0x1a : Not-greater-than (unordered, non-signaling)
-/// 0x1b : False (ordered, signaling)
-/// 0x1c : Not-equal (ordered, signaling)
-/// 0x1d : Greater-than-or-equal (ordered, non-signaling)
-/// 0x1e : Greater-than (ordered, non-signaling)
-/// 0x1f : True (unordered, signaling)
+/// 0x00: Equal (ordered, non-signaling) \n
+/// 0x01: Less-than (ordered, signaling) \n
+/// 0x02: Less-than-or-equal (ordered, signaling) \n
+/// 0x03: Unordered (non-signaling) \n
+/// 0x04: Not-equal (unordered, non-signaling) \n
+/// 0x05: Not-less-than (unordered, signaling) \n
+/// 0x06: Not-less-than-or-equal (unordered, signaling) \n
+/// 0x07: Ordered (non-signaling) \n
+/// 0x08: Equal (unordered, non-signaling) \n
+/// 0x09: Not-greater-than-or-equal (unordered, signaling) \n
+/// 0x0A: Not-greater-than (unordered, signaling) \n
+/// 0x0B: False (ordered, non-signaling) \n
+/// 0x0C: Not-equal (ordered, non-signaling) \n
+/// 0x0D: Greater-than-or-equal (ordered, signaling) \n
+/// 0x0E: Greater-than (ordered, signaling) \n
+/// 0x0F: True (unordered, non-signaling) \n
+/// 0x10: Equal (ordered, signaling) \n
+/// 0x11: Less-than (ordered, non-signaling) \n
+/// 0x12: Less-than-or-equal (ordered, non-signaling) \n
+/// 0x13: Unordered (signaling) \n
+/// 0x14: Not-equal (unordered, signaling) \n
+/// 0x15: Not-less-than (unordered, non-signaling) \n
+/// 0x16: Not-less-than-or-equal (unordered, non-signaling) \n
+/// 0x17: Ordered (signaling) \n
+/// 0x18: Equal (unordered, signaling) \n
+/// 0x19: Not-greater-than-or-equal (unordered, non-signaling) \n
+/// 0x1A: Not-greater-than (unordered, non-signaling) \n
+/// 0x1B: False (ordered, signaling) \n
+/// 0x1C: Not-equal (ordered, signaling) \n
+/// 0x1D: Greater-than-or-equal (ordered, non-signaling) \n
+/// 0x1E: Greater-than (ordered, non-signaling) \n
+/// 0x1F: True (unordered, signaling)
/// \returns A 128-bit vector of [4 x float] containing the comparison results.
-#define _mm_cmp_ps(a, b, c) __extension__ ({ \
+#define _mm_cmp_ps(a, b, c) \
(__m128)__builtin_ia32_cmpps((__v4sf)(__m128)(a), \
- (__v4sf)(__m128)(b), (c)); })
+ (__v4sf)(__m128)(b), (c))
-/// \brief Compares each of the corresponding double-precision values of two
+/// Compares each of the corresponding double-precision values of two
/// 256-bit vectors of [4 x double], using the operation specified by the
/// immediate integer operand.
///
@@ -1785,44 +1743,44 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
/// \param c
/// An immediate integer operand, with bits [4:0] specifying which comparison
/// operation to use: \n
-/// 0x00 : Equal (ordered, non-signaling)
-/// 0x01 : Less-than (ordered, signaling)
-/// 0x02 : Less-than-or-equal (ordered, signaling)
-/// 0x03 : Unordered (non-signaling)
-/// 0x04 : Not-equal (unordered, non-signaling)
-/// 0x05 : Not-less-than (unordered, signaling)
-/// 0x06 : Not-less-than-or-equal (unordered, signaling)
-/// 0x07 : Ordered (non-signaling)
-/// 0x08 : Equal (unordered, non-signaling)
-/// 0x09 : Not-greater-than-or-equal (unordered, signaling)
-/// 0x0a : Not-greater-than (unordered, signaling)
-/// 0x0b : False (ordered, non-signaling)
-/// 0x0c : Not-equal (ordered, non-signaling)
-/// 0x0d : Greater-than-or-equal (ordered, signaling)
-/// 0x0e : Greater-than (ordered, signaling)
-/// 0x0f : True (unordered, non-signaling)
-/// 0x10 : Equal (ordered, signaling)
-/// 0x11 : Less-than (ordered, non-signaling)
-/// 0x12 : Less-than-or-equal (ordered, non-signaling)
-/// 0x13 : Unordered (signaling)
-/// 0x14 : Not-equal (unordered, signaling)
-/// 0x15 : Not-less-than (unordered, non-signaling)
-/// 0x16 : Not-less-than-or-equal (unordered, non-signaling)
-/// 0x17 : Ordered (signaling)
-/// 0x18 : Equal (unordered, signaling)
-/// 0x19 : Not-greater-than-or-equal (unordered, non-signaling)
-/// 0x1a : Not-greater-than (unordered, non-signaling)
-/// 0x1b : False (ordered, signaling)
-/// 0x1c : Not-equal (ordered, signaling)
-/// 0x1d : Greater-than-or-equal (ordered, non-signaling)
-/// 0x1e : Greater-than (ordered, non-signaling)
-/// 0x1f : True (unordered, signaling)
+/// 0x00: Equal (ordered, non-signaling) \n
+/// 0x01: Less-than (ordered, signaling) \n
+/// 0x02: Less-than-or-equal (ordered, signaling) \n
+/// 0x03: Unordered (non-signaling) \n
+/// 0x04: Not-equal (unordered, non-signaling) \n
+/// 0x05: Not-less-than (unordered, signaling) \n
+/// 0x06: Not-less-than-or-equal (unordered, signaling) \n
+/// 0x07: Ordered (non-signaling) \n
+/// 0x08: Equal (unordered, non-signaling) \n
+/// 0x09: Not-greater-than-or-equal (unordered, signaling) \n
+/// 0x0A: Not-greater-than (unordered, signaling) \n
+/// 0x0B: False (ordered, non-signaling) \n
+/// 0x0C: Not-equal (ordered, non-signaling) \n
+/// 0x0D: Greater-than-or-equal (ordered, signaling) \n
+/// 0x0E: Greater-than (ordered, signaling) \n
+/// 0x0F: True (unordered, non-signaling) \n
+/// 0x10: Equal (ordered, signaling) \n
+/// 0x11: Less-than (ordered, non-signaling) \n
+/// 0x12: Less-than-or-equal (ordered, non-signaling) \n
+/// 0x13: Unordered (signaling) \n
+/// 0x14: Not-equal (unordered, signaling) \n
+/// 0x15: Not-less-than (unordered, non-signaling) \n
+/// 0x16: Not-less-than-or-equal (unordered, non-signaling) \n
+/// 0x17: Ordered (signaling) \n
+/// 0x18: Equal (unordered, signaling) \n
+/// 0x19: Not-greater-than-or-equal (unordered, non-signaling) \n
+/// 0x1A: Not-greater-than (unordered, non-signaling) \n
+/// 0x1B: False (ordered, signaling) \n
+/// 0x1C: Not-equal (ordered, signaling) \n
+/// 0x1D: Greater-than-or-equal (ordered, non-signaling) \n
+/// 0x1E: Greater-than (ordered, non-signaling) \n
+/// 0x1F: True (unordered, signaling)
/// \returns A 256-bit vector of [4 x double] containing the comparison results.
-#define _mm256_cmp_pd(a, b, c) __extension__ ({ \
+#define _mm256_cmp_pd(a, b, c) \
(__m256d)__builtin_ia32_cmppd256((__v4df)(__m256d)(a), \
- (__v4df)(__m256d)(b), (c)); })
+ (__v4df)(__m256d)(b), (c))
-/// \brief Compares each of the corresponding values of two 256-bit vectors of
+/// Compares each of the corresponding values of two 256-bit vectors of
/// [8 x float], using the operation specified by the immediate integer
/// operand.
///
@@ -1845,44 +1803,44 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
/// \param c
/// An immediate integer operand, with bits [4:0] specifying which comparison
/// operation to use: \n
-/// 0x00 : Equal (ordered, non-signaling)
-/// 0x01 : Less-than (ordered, signaling)
-/// 0x02 : Less-than-or-equal (ordered, signaling)
-/// 0x03 : Unordered (non-signaling)
-/// 0x04 : Not-equal (unordered, non-signaling)
-/// 0x05 : Not-less-than (unordered, signaling)
-/// 0x06 : Not-less-than-or-equal (unordered, signaling)
-/// 0x07 : Ordered (non-signaling)
-/// 0x08 : Equal (unordered, non-signaling)
-/// 0x09 : Not-greater-than-or-equal (unordered, signaling)
-/// 0x0a : Not-greater-than (unordered, signaling)
-/// 0x0b : False (ordered, non-signaling)
-/// 0x0c : Not-equal (ordered, non-signaling)
-/// 0x0d : Greater-than-or-equal (ordered, signaling)
-/// 0x0e : Greater-than (ordered, signaling)
-/// 0x0f : True (unordered, non-signaling)
-/// 0x10 : Equal (ordered, signaling)
-/// 0x11 : Less-than (ordered, non-signaling)
-/// 0x12 : Less-than-or-equal (ordered, non-signaling)
-/// 0x13 : Unordered (signaling)
-/// 0x14 : Not-equal (unordered, signaling)
-/// 0x15 : Not-less-than (unordered, non-signaling)
-/// 0x16 : Not-less-than-or-equal (unordered, non-signaling)
-/// 0x17 : Ordered (signaling)
-/// 0x18 : Equal (unordered, signaling)
-/// 0x19 : Not-greater-than-or-equal (unordered, non-signaling)
-/// 0x1a : Not-greater-than (unordered, non-signaling)
-/// 0x1b : False (ordered, signaling)
-/// 0x1c : Not-equal (ordered, signaling)
-/// 0x1d : Greater-than-or-equal (ordered, non-signaling)
-/// 0x1e : Greater-than (ordered, non-signaling)
-/// 0x1f : True (unordered, signaling)
+/// 0x00: Equal (ordered, non-signaling) \n
+/// 0x01: Less-than (ordered, signaling) \n
+/// 0x02: Less-than-or-equal (ordered, signaling) \n
+/// 0x03: Unordered (non-signaling) \n
+/// 0x04: Not-equal (unordered, non-signaling) \n
+/// 0x05: Not-less-than (unordered, signaling) \n
+/// 0x06: Not-less-than-or-equal (unordered, signaling) \n
+/// 0x07: Ordered (non-signaling) \n
+/// 0x08: Equal (unordered, non-signaling) \n
+/// 0x09: Not-greater-than-or-equal (unordered, signaling) \n
+/// 0x0A: Not-greater-than (unordered, signaling) \n
+/// 0x0B: False (ordered, non-signaling) \n
+/// 0x0C: Not-equal (ordered, non-signaling) \n
+/// 0x0D: Greater-than-or-equal (ordered, signaling) \n
+/// 0x0E: Greater-than (ordered, signaling) \n
+/// 0x0F: True (unordered, non-signaling) \n
+/// 0x10: Equal (ordered, signaling) \n
+/// 0x11: Less-than (ordered, non-signaling) \n
+/// 0x12: Less-than-or-equal (ordered, non-signaling) \n
+/// 0x13: Unordered (signaling) \n
+/// 0x14: Not-equal (unordered, signaling) \n
+/// 0x15: Not-less-than (unordered, non-signaling) \n
+/// 0x16: Not-less-than-or-equal (unordered, non-signaling) \n
+/// 0x17: Ordered (signaling) \n
+/// 0x18: Equal (unordered, signaling) \n
+/// 0x19: Not-greater-than-or-equal (unordered, non-signaling) \n
+/// 0x1A: Not-greater-than (unordered, non-signaling) \n
+/// 0x1B: False (ordered, signaling) \n
+/// 0x1C: Not-equal (ordered, signaling) \n
+/// 0x1D: Greater-than-or-equal (ordered, non-signaling) \n
+/// 0x1E: Greater-than (ordered, non-signaling) \n
+/// 0x1F: True (unordered, signaling)
/// \returns A 256-bit vector of [8 x float] containing the comparison results.
-#define _mm256_cmp_ps(a, b, c) __extension__ ({ \
+#define _mm256_cmp_ps(a, b, c) \
(__m256)__builtin_ia32_cmpps256((__v8sf)(__m256)(a), \
- (__v8sf)(__m256)(b), (c)); })
+ (__v8sf)(__m256)(b), (c))
-/// \brief Compares each of the corresponding scalar double-precision values of
+/// Compares each of the corresponding scalar double-precision values of
/// two 128-bit vectors of [2 x double], using the operation specified by the
/// immediate integer operand.
///
@@ -1904,44 +1862,44 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
/// \param c
/// An immediate integer operand, with bits [4:0] specifying which comparison
/// operation to use: \n
-/// 0x00 : Equal (ordered, non-signaling)
-/// 0x01 : Less-than (ordered, signaling)
-/// 0x02 : Less-than-or-equal (ordered, signaling)
-/// 0x03 : Unordered (non-signaling)
-/// 0x04 : Not-equal (unordered, non-signaling)
-/// 0x05 : Not-less-than (unordered, signaling)
-/// 0x06 : Not-less-than-or-equal (unordered, signaling)
-/// 0x07 : Ordered (non-signaling)
-/// 0x08 : Equal (unordered, non-signaling)
-/// 0x09 : Not-greater-than-or-equal (unordered, signaling)
-/// 0x0a : Not-greater-than (unordered, signaling)
-/// 0x0b : False (ordered, non-signaling)
-/// 0x0c : Not-equal (ordered, non-signaling)
-/// 0x0d : Greater-than-or-equal (ordered, signaling)
-/// 0x0e : Greater-than (ordered, signaling)
-/// 0x0f : True (unordered, non-signaling)
-/// 0x10 : Equal (ordered, signaling)
-/// 0x11 : Less-than (ordered, non-signaling)
-/// 0x12 : Less-than-or-equal (ordered, non-signaling)
-/// 0x13 : Unordered (signaling)
-/// 0x14 : Not-equal (unordered, signaling)
-/// 0x15 : Not-less-than (unordered, non-signaling)
-/// 0x16 : Not-less-than-or-equal (unordered, non-signaling)
-/// 0x17 : Ordered (signaling)
-/// 0x18 : Equal (unordered, signaling)
-/// 0x19 : Not-greater-than-or-equal (unordered, non-signaling)
-/// 0x1a : Not-greater-than (unordered, non-signaling)
-/// 0x1b : False (ordered, signaling)
-/// 0x1c : Not-equal (ordered, signaling)
-/// 0x1d : Greater-than-or-equal (ordered, non-signaling)
-/// 0x1e : Greater-than (ordered, non-signaling)
-/// 0x1f : True (unordered, signaling)
+/// 0x00: Equal (ordered, non-signaling) \n
+/// 0x01: Less-than (ordered, signaling) \n
+/// 0x02: Less-than-or-equal (ordered, signaling) \n
+/// 0x03: Unordered (non-signaling) \n
+/// 0x04: Not-equal (unordered, non-signaling) \n
+/// 0x05: Not-less-than (unordered, signaling) \n
+/// 0x06: Not-less-than-or-equal (unordered, signaling) \n
+/// 0x07: Ordered (non-signaling) \n
+/// 0x08: Equal (unordered, non-signaling) \n
+/// 0x09: Not-greater-than-or-equal (unordered, signaling) \n
+/// 0x0A: Not-greater-than (unordered, signaling) \n
+/// 0x0B: False (ordered, non-signaling) \n
+/// 0x0C: Not-equal (ordered, non-signaling) \n
+/// 0x0D: Greater-than-or-equal (ordered, signaling) \n
+/// 0x0E: Greater-than (ordered, signaling) \n
+/// 0x0F: True (unordered, non-signaling) \n
+/// 0x10: Equal (ordered, signaling) \n
+/// 0x11: Less-than (ordered, non-signaling) \n
+/// 0x12: Less-than-or-equal (ordered, non-signaling) \n
+/// 0x13: Unordered (signaling) \n
+/// 0x14: Not-equal (unordered, signaling) \n
+/// 0x15: Not-less-than (unordered, non-signaling) \n
+/// 0x16: Not-less-than-or-equal (unordered, non-signaling) \n
+/// 0x17: Ordered (signaling) \n
+/// 0x18: Equal (unordered, signaling) \n
+/// 0x19: Not-greater-than-or-equal (unordered, non-signaling) \n
+/// 0x1A: Not-greater-than (unordered, non-signaling) \n
+/// 0x1B: False (ordered, signaling) \n
+/// 0x1C: Not-equal (ordered, signaling) \n
+/// 0x1D: Greater-than-or-equal (ordered, non-signaling) \n
+/// 0x1E: Greater-than (ordered, non-signaling) \n
+/// 0x1F: True (unordered, signaling)
/// \returns A 128-bit vector of [2 x double] containing the comparison results.
-#define _mm_cmp_sd(a, b, c) __extension__ ({ \
+#define _mm_cmp_sd(a, b, c) \
(__m128d)__builtin_ia32_cmpsd((__v2df)(__m128d)(a), \
- (__v2df)(__m128d)(b), (c)); })
+ (__v2df)(__m128d)(b), (c))
-/// \brief Compares each of the corresponding scalar values of two 128-bit
+/// Compares each of the corresponding scalar values of two 128-bit
/// vectors of [4 x float], using the operation specified by the immediate
/// integer operand.
///
@@ -1963,44 +1921,44 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
/// \param c
/// An immediate integer operand, with bits [4:0] specifying which comparison
/// operation to use: \n
-/// 0x00 : Equal (ordered, non-signaling)
-/// 0x01 : Less-than (ordered, signaling)
-/// 0x02 : Less-than-or-equal (ordered, signaling)
-/// 0x03 : Unordered (non-signaling)
-/// 0x04 : Not-equal (unordered, non-signaling)
-/// 0x05 : Not-less-than (unordered, signaling)
-/// 0x06 : Not-less-than-or-equal (unordered, signaling)
-/// 0x07 : Ordered (non-signaling)
-/// 0x08 : Equal (unordered, non-signaling)
-/// 0x09 : Not-greater-than-or-equal (unordered, signaling)
-/// 0x0a : Not-greater-than (unordered, signaling)
-/// 0x0b : False (ordered, non-signaling)
-/// 0x0c : Not-equal (ordered, non-signaling)
-/// 0x0d : Greater-than-or-equal (ordered, signaling)
-/// 0x0e : Greater-than (ordered, signaling)
-/// 0x0f : True (unordered, non-signaling)
-/// 0x10 : Equal (ordered, signaling)
-/// 0x11 : Less-than (ordered, non-signaling)
-/// 0x12 : Less-than-or-equal (ordered, non-signaling)
-/// 0x13 : Unordered (signaling)
-/// 0x14 : Not-equal (unordered, signaling)
-/// 0x15 : Not-less-than (unordered, non-signaling)
-/// 0x16 : Not-less-than-or-equal (unordered, non-signaling)
-/// 0x17 : Ordered (signaling)
-/// 0x18 : Equal (unordered, signaling)
-/// 0x19 : Not-greater-than-or-equal (unordered, non-signaling)
-/// 0x1a : Not-greater-than (unordered, non-signaling)
-/// 0x1b : False (ordered, signaling)
-/// 0x1c : Not-equal (ordered, signaling)
-/// 0x1d : Greater-than-or-equal (ordered, non-signaling)
-/// 0x1e : Greater-than (ordered, non-signaling)
-/// 0x1f : True (unordered, signaling)
+/// 0x00: Equal (ordered, non-signaling) \n
+/// 0x01: Less-than (ordered, signaling) \n
+/// 0x02: Less-than-or-equal (ordered, signaling) \n
+/// 0x03: Unordered (non-signaling) \n
+/// 0x04: Not-equal (unordered, non-signaling) \n
+/// 0x05: Not-less-than (unordered, signaling) \n
+/// 0x06: Not-less-than-or-equal (unordered, signaling) \n
+/// 0x07: Ordered (non-signaling) \n
+/// 0x08: Equal (unordered, non-signaling) \n
+/// 0x09: Not-greater-than-or-equal (unordered, signaling) \n
+/// 0x0A: Not-greater-than (unordered, signaling) \n
+/// 0x0B: False (ordered, non-signaling) \n
+/// 0x0C: Not-equal (ordered, non-signaling) \n
+/// 0x0D: Greater-than-or-equal (ordered, signaling) \n
+/// 0x0E: Greater-than (ordered, signaling) \n
+/// 0x0F: True (unordered, non-signaling) \n
+/// 0x10: Equal (ordered, signaling) \n
+/// 0x11: Less-than (ordered, non-signaling) \n
+/// 0x12: Less-than-or-equal (ordered, non-signaling) \n
+/// 0x13: Unordered (signaling) \n
+/// 0x14: Not-equal (unordered, signaling) \n
+/// 0x15: Not-less-than (unordered, non-signaling) \n
+/// 0x16: Not-less-than-or-equal (unordered, non-signaling) \n
+/// 0x17: Ordered (signaling) \n
+/// 0x18: Equal (unordered, signaling) \n
+/// 0x19: Not-greater-than-or-equal (unordered, non-signaling) \n
+/// 0x1A: Not-greater-than (unordered, non-signaling) \n
+/// 0x1B: False (ordered, signaling) \n
+/// 0x1C: Not-equal (ordered, signaling) \n
+/// 0x1D: Greater-than-or-equal (ordered, non-signaling) \n
+/// 0x1E: Greater-than (ordered, non-signaling) \n
+/// 0x1F: True (unordered, signaling)
/// \returns A 128-bit vector of [4 x float] containing the comparison results.
-#define _mm_cmp_ss(a, b, c) __extension__ ({ \
+#define _mm_cmp_ss(a, b, c) \
(__m128)__builtin_ia32_cmpss((__v4sf)(__m128)(a), \
- (__v4sf)(__m128)(b), (c)); })
+ (__v4sf)(__m128)(b), (c))
-/// \brief Takes a [8 x i32] vector and returns the vector element value
+/// Takes a [8 x i32] vector and returns the vector element value
/// indexed by the immediate constant operand.
///
/// \headerfile <x86intrin.h>
@@ -2015,14 +1973,10 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
/// element is extracted and returned.
/// \returns A 32-bit integer containing the extracted 32 bits of extended
/// packed data.
-static __inline int __DEFAULT_FN_ATTRS
-_mm256_extract_epi32(__m256i __a, const int __imm)
-{
- __v8si __b = (__v8si)__a;
- return __b[__imm & 7];
-}
+#define _mm256_extract_epi32(X, N) \
+ (int)__builtin_ia32_vec_ext_v8si((__v8si)(__m256i)(X), (int)(N))
-/// \brief Takes a [16 x i16] vector and returns the vector element value
+/// Takes a [16 x i16] vector and returns the vector element value
/// indexed by the immediate constant operand.
///
/// \headerfile <x86intrin.h>
@@ -2037,14 +1991,11 @@ _mm256_extract_epi32(__m256i __a, const int __imm)
/// element is extracted and returned.
/// \returns A 32-bit integer containing the extracted 16 bits of zero extended
/// packed data.
-static __inline int __DEFAULT_FN_ATTRS
-_mm256_extract_epi16(__m256i __a, const int __imm)
-{
- __v16hi __b = (__v16hi)__a;
- return (unsigned short)__b[__imm & 15];
-}
+#define _mm256_extract_epi16(X, N) \
+ (int)(unsigned short)__builtin_ia32_vec_ext_v16hi((__v16hi)(__m256i)(X), \
+ (int)(N))
-/// \brief Takes a [32 x i8] vector and returns the vector element value
+/// Takes a [32 x i8] vector and returns the vector element value
/// indexed by the immediate constant operand.
///
/// \headerfile <x86intrin.h>
@@ -2059,15 +2010,12 @@ _mm256_extract_epi16(__m256i __a, const int __imm)
/// element is extracted and returned.
/// \returns A 32-bit integer containing the extracted 8 bits of zero extended
/// packed data.
-static __inline int __DEFAULT_FN_ATTRS
-_mm256_extract_epi8(__m256i __a, const int __imm)
-{
- __v32qi __b = (__v32qi)__a;
- return (unsigned char)__b[__imm & 31];
-}
+#define _mm256_extract_epi8(X, N) \
+ (int)(unsigned char)__builtin_ia32_vec_ext_v32qi((__v32qi)(__m256i)(X), \
+ (int)(N))
#ifdef __x86_64__
-/// \brief Takes a [4 x i64] vector and returns the vector element value
+/// Takes a [4 x i64] vector and returns the vector element value
/// indexed by the immediate constant operand.
///
/// \headerfile <x86intrin.h>
@@ -2082,15 +2030,11 @@ _mm256_extract_epi8(__m256i __a, const int __imm)
/// element is extracted and returned.
/// \returns A 64-bit integer containing the extracted 64 bits of extended
/// packed data.
-static __inline long long __DEFAULT_FN_ATTRS
-_mm256_extract_epi64(__m256i __a, const int __imm)
-{
- __v4di __b = (__v4di)__a;
- return __b[__imm & 3];
-}
+#define _mm256_extract_epi64(X, N) \
+ (long long)__builtin_ia32_vec_ext_v4di((__v4di)(__m256i)(X), (int)(N))
#endif
-/// \brief Takes a [8 x i32] vector and replaces the vector element value
+/// Takes a [8 x i32] vector and replaces the vector element value
/// indexed by the immediate constant operand by a new value. Returns the
/// modified vector.
///
@@ -2108,16 +2052,12 @@ _mm256_extract_epi64(__m256i __a, const int __imm)
/// replaced.
/// \returns A copy of vector \a __a, after replacing its element indexed by
/// \a __imm with \a __b.
-static __inline __m256i __DEFAULT_FN_ATTRS
-_mm256_insert_epi32(__m256i __a, int __b, int const __imm)
-{
- __v8si __c = (__v8si)__a;
- __c[__imm & 7] = __b;
- return (__m256i)__c;
-}
+#define _mm256_insert_epi32(X, I, N) \
+ (__m256i)__builtin_ia32_vec_set_v8si((__v8si)(__m256i)(X), \
+ (int)(I), (int)(N))
-/// \brief Takes a [16 x i16] vector and replaces the vector element value
+/// Takes a [16 x i16] vector and replaces the vector element value
/// indexed by the immediate constant operand with a new value. Returns the
/// modified vector.
///
@@ -2135,15 +2075,11 @@ _mm256_insert_epi32(__m256i __a, int __b, int const __imm)
/// replaced.
/// \returns A copy of vector \a __a, after replacing its element indexed by
/// \a __imm with \a __b.
-static __inline __m256i __DEFAULT_FN_ATTRS
-_mm256_insert_epi16(__m256i __a, int __b, int const __imm)
-{
- __v16hi __c = (__v16hi)__a;
- __c[__imm & 15] = __b;
- return (__m256i)__c;
-}
+#define _mm256_insert_epi16(X, I, N) \
+ (__m256i)__builtin_ia32_vec_set_v16hi((__v16hi)(__m256i)(X), \
+ (int)(I), (int)(N))
-/// \brief Takes a [32 x i8] vector and replaces the vector element value
+/// Takes a [32 x i8] vector and replaces the vector element value
/// indexed by the immediate constant operand with a new value. Returns the
/// modified vector.
///
@@ -2161,16 +2097,12 @@ _mm256_insert_epi16(__m256i __a, int __b, int const __imm)
/// replaced.
/// \returns A copy of vector \a __a, after replacing its element indexed by
/// \a __imm with \a __b.
-static __inline __m256i __DEFAULT_FN_ATTRS
-_mm256_insert_epi8(__m256i __a, int __b, int const __imm)
-{
- __v32qi __c = (__v32qi)__a;
- __c[__imm & 31] = __b;
- return (__m256i)__c;
-}
+#define _mm256_insert_epi8(X, I, N) \
+ (__m256i)__builtin_ia32_vec_set_v32qi((__v32qi)(__m256i)(X), \
+ (int)(I), (int)(N))
#ifdef __x86_64__
-/// \brief Takes a [4 x i64] vector and replaces the vector element value
+/// Takes a [4 x i64] vector and replaces the vector element value
/// indexed by the immediate constant operand with a new value. Returns the
/// modified vector.
///
@@ -2188,17 +2120,13 @@ _mm256_insert_epi8(__m256i __a, int __b, int const __imm)
/// replaced.
/// \returns A copy of vector \a __a, after replacing its element indexed by
/// \a __imm with \a __b.
-static __inline __m256i __DEFAULT_FN_ATTRS
-_mm256_insert_epi64(__m256i __a, long long __b, int const __imm)
-{
- __v4di __c = (__v4di)__a;
- __c[__imm & 3] = __b;
- return (__m256i)__c;
-}
+#define _mm256_insert_epi64(X, I, N) \
+ (__m256i)__builtin_ia32_vec_set_v4di((__v4di)(__m256i)(X), \
+ (long long)(I), (int)(N))
#endif
/* Conversion */
-/// \brief Converts a vector of [4 x i32] into a vector of [4 x double].
+/// Converts a vector of [4 x i32] into a vector of [4 x double].
///
/// \headerfile <x86intrin.h>
///
@@ -2213,7 +2141,7 @@ _mm256_cvtepi32_pd(__m128i __a)
return (__m256d)__builtin_convertvector((__v4si)__a, __v4df);
}
-/// \brief Converts a vector of [8 x i32] into a vector of [8 x float].
+/// Converts a vector of [8 x i32] into a vector of [8 x float].
///
/// \headerfile <x86intrin.h>
///
@@ -2225,10 +2153,10 @@ _mm256_cvtepi32_pd(__m128i __a)
static __inline __m256 __DEFAULT_FN_ATTRS
_mm256_cvtepi32_ps(__m256i __a)
{
- return (__m256)__builtin_ia32_cvtdq2ps256((__v8si) __a);
+ return (__m256)__builtin_convertvector((__v8si)__a, __v8sf);
}
-/// \brief Converts a 256-bit vector of [4 x double] into a 128-bit vector of
+/// Converts a 256-bit vector of [4 x double] into a 128-bit vector of
/// [4 x float].
///
/// \headerfile <x86intrin.h>
@@ -2244,7 +2172,7 @@ _mm256_cvtpd_ps(__m256d __a)
return (__m128)__builtin_ia32_cvtpd2ps256((__v4df) __a);
}
-/// \brief Converts a vector of [8 x float] into a vector of [8 x i32].
+/// Converts a vector of [8 x float] into a vector of [8 x i32].
///
/// \headerfile <x86intrin.h>
///
@@ -2259,7 +2187,7 @@ _mm256_cvtps_epi32(__m256 __a)
return (__m256i)__builtin_ia32_cvtps2dq256((__v8sf) __a);
}
-/// \brief Converts a 128-bit vector of [4 x float] into a 256-bit vector of [4
+/// Converts a 128-bit vector of [4 x float] into a 256-bit vector of [4
/// x double].
///
/// \headerfile <x86intrin.h>
@@ -2275,7 +2203,7 @@ _mm256_cvtps_pd(__m128 __a)
return (__m256d)__builtin_convertvector((__v4sf)__a, __v4df);
}
-/// \brief Converts a 256-bit vector of [4 x double] into a 128-bit vector of [4
+/// Converts a 256-bit vector of [4 x double] into a 128-bit vector of [4
/// x i32], truncating the result by rounding towards zero when it is
/// inexact.
///
@@ -2292,7 +2220,7 @@ _mm256_cvttpd_epi32(__m256d __a)
return (__m128i)__builtin_ia32_cvttpd2dq256((__v4df) __a);
}
-/// \brief Converts a 256-bit vector of [4 x double] into a 128-bit vector of [4
+/// Converts a 256-bit vector of [4 x double] into a 128-bit vector of [4
/// x i32]. When a conversion is inexact, the value returned is rounded
/// according to the rounding control bits in the MXCSR register.
///
@@ -2309,7 +2237,7 @@ _mm256_cvtpd_epi32(__m256d __a)
return (__m128i)__builtin_ia32_cvtpd2dq256((__v4df) __a);
}
-/// \brief Converts a vector of [8 x float] into a vector of [8 x i32],
+/// Converts a vector of [8 x float] into a vector of [8 x i32],
/// truncating the result by rounding towards zero when it is inexact.
///
/// \headerfile <x86intrin.h>
@@ -2325,7 +2253,7 @@ _mm256_cvttps_epi32(__m256 __a)
return (__m256i)__builtin_ia32_cvttps2dq256((__v8sf) __a);
}
-/// \brief Returns the first element of the input vector of [4 x double].
+/// Returns the first element of the input vector of [4 x double].
///
/// \headerfile <avxintrin.h>
///
@@ -2341,7 +2269,7 @@ _mm256_cvtsd_f64(__m256d __a)
return __a[0];
}
-/// \brief Returns the first element of the input vector of [8 x i32].
+/// Returns the first element of the input vector of [8 x i32].
///
/// \headerfile <avxintrin.h>
///
@@ -2358,7 +2286,7 @@ _mm256_cvtsi256_si32(__m256i __a)
return __b[0];
}
-/// \brief Returns the first element of the input vector of [8 x float].
+/// Returns the first element of the input vector of [8 x float].
///
/// \headerfile <avxintrin.h>
///
@@ -2375,9 +2303,8 @@ _mm256_cvtss_f32(__m256 __a)
}
/* Vector replicate */
-/// \brief Moves and duplicates high-order (odd-indexed) values from a 256-bit
-/// vector of [8 x float] to float values in a 256-bit vector of
-/// [8 x float].
+/// Moves and duplicates odd-indexed values from a 256-bit vector of
+/// [8 x float] to float values in a 256-bit vector of [8 x float].
///
/// \headerfile <x86intrin.h>
///
@@ -2401,8 +2328,8 @@ _mm256_movehdup_ps(__m256 __a)
return __builtin_shufflevector((__v8sf)__a, (__v8sf)__a, 1, 1, 3, 3, 5, 5, 7, 7);
}
-/// \brief Moves and duplicates low-order (even-indexed) values from a 256-bit
-/// vector of [8 x float] to float values in a 256-bit vector of [8 x float].
+/// Moves and duplicates even-indexed values from a 256-bit vector of
+/// [8 x float] to float values in a 256-bit vector of [8 x float].
///
/// \headerfile <x86intrin.h>
///
@@ -2426,7 +2353,7 @@ _mm256_moveldup_ps(__m256 __a)
return __builtin_shufflevector((__v8sf)__a, (__v8sf)__a, 0, 0, 2, 2, 4, 4, 6, 6);
}
-/// \brief Moves and duplicates double-precision floating point values from a
+/// Moves and duplicates double-precision floating point values from a
/// 256-bit vector of [4 x double] to double-precision values in a 256-bit
/// vector of [4 x double].
///
@@ -2449,7 +2376,7 @@ _mm256_movedup_pd(__m256d __a)
}
/* Unpack and Interleave */
-/// \brief Unpacks the odd-indexed vector elements from two 256-bit vectors of
+/// Unpacks the odd-indexed vector elements from two 256-bit vectors of
/// [4 x double] and interleaves them into a 256-bit vector of [4 x double].
///
/// \headerfile <x86intrin.h>
@@ -2471,7 +2398,7 @@ _mm256_unpackhi_pd(__m256d __a, __m256d __b)
return __builtin_shufflevector((__v4df)__a, (__v4df)__b, 1, 5, 1+2, 5+2);
}
-/// \brief Unpacks the even-indexed vector elements from two 256-bit vectors of
+/// Unpacks the even-indexed vector elements from two 256-bit vectors of
/// [4 x double] and interleaves them into a 256-bit vector of [4 x double].
///
/// \headerfile <x86intrin.h>
@@ -2493,7 +2420,7 @@ _mm256_unpacklo_pd(__m256d __a, __m256d __b)
return __builtin_shufflevector((__v4df)__a, (__v4df)__b, 0, 4, 0+2, 4+2);
}
-/// \brief Unpacks the 32-bit vector elements 2, 3, 6 and 7 from each of the
+/// Unpacks the 32-bit vector elements 2, 3, 6 and 7 from each of the
/// two 256-bit vectors of [8 x float] and interleaves them into a 256-bit
/// vector of [8 x float].
///
@@ -2520,7 +2447,7 @@ _mm256_unpackhi_ps(__m256 __a, __m256 __b)
return __builtin_shufflevector((__v8sf)__a, (__v8sf)__b, 2, 10, 2+1, 10+1, 6, 14, 6+1, 14+1);
}
-/// \brief Unpacks the 32-bit vector elements 0, 1, 4 and 5 from each of the
+/// Unpacks the 32-bit vector elements 0, 1, 4 and 5 from each of the
/// two 256-bit vectors of [8 x float] and interleaves them into a 256-bit
/// vector of [8 x float].
///
@@ -2548,7 +2475,7 @@ _mm256_unpacklo_ps(__m256 __a, __m256 __b)
}
/* Bit Test */
-/// \brief Given two 128-bit floating-point vectors of [2 x double], perform an
+/// Given two 128-bit floating-point vectors of [2 x double], perform an
/// element-by-element comparison of the double-precision element in the
/// first source vector and the corresponding element in the second source
/// vector.
@@ -2571,13 +2498,13 @@ _mm256_unpacklo_ps(__m256 __a, __m256 __b)
/// \param __b
/// A 128-bit vector of [2 x double].
/// \returns the ZF flag in the EFLAGS register.
-static __inline int __DEFAULT_FN_ATTRS
+static __inline int __DEFAULT_FN_ATTRS128
_mm_testz_pd(__m128d __a, __m128d __b)
{
return __builtin_ia32_vtestzpd((__v2df)__a, (__v2df)__b);
}
-/// \brief Given two 128-bit floating-point vectors of [2 x double], perform an
+/// Given two 128-bit floating-point vectors of [2 x double], perform an
/// element-by-element comparison of the double-precision element in the
/// first source vector and the corresponding element in the second source
/// vector.
@@ -2600,13 +2527,13 @@ _mm_testz_pd(__m128d __a, __m128d __b)
/// \param __b
/// A 128-bit vector of [2 x double].
/// \returns the CF flag in the EFLAGS register.
-static __inline int __DEFAULT_FN_ATTRS
+static __inline int __DEFAULT_FN_ATTRS128
_mm_testc_pd(__m128d __a, __m128d __b)
{
return __builtin_ia32_vtestcpd((__v2df)__a, (__v2df)__b);
}
-/// \brief Given two 128-bit floating-point vectors of [2 x double], perform an
+/// Given two 128-bit floating-point vectors of [2 x double], perform an
/// element-by-element comparison of the double-precision element in the
/// first source vector and the corresponding element in the second source
/// vector.
@@ -2630,13 +2557,13 @@ _mm_testc_pd(__m128d __a, __m128d __b)
/// \param __b
/// A 128-bit vector of [2 x double].
/// \returns 1 if both the ZF and CF flags are set to 0, otherwise returns 0.
-static __inline int __DEFAULT_FN_ATTRS
+static __inline int __DEFAULT_FN_ATTRS128
_mm_testnzc_pd(__m128d __a, __m128d __b)
{
return __builtin_ia32_vtestnzcpd((__v2df)__a, (__v2df)__b);
}
-/// \brief Given two 128-bit floating-point vectors of [4 x float], perform an
+/// Given two 128-bit floating-point vectors of [4 x float], perform an
/// element-by-element comparison of the single-precision element in the
/// first source vector and the corresponding element in the second source
/// vector.
@@ -2659,13 +2586,13 @@ _mm_testnzc_pd(__m128d __a, __m128d __b)
/// \param __b
/// A 128-bit vector of [4 x float].
/// \returns the ZF flag.
-static __inline int __DEFAULT_FN_ATTRS
+static __inline int __DEFAULT_FN_ATTRS128
_mm_testz_ps(__m128 __a, __m128 __b)
{
return __builtin_ia32_vtestzps((__v4sf)__a, (__v4sf)__b);
}
-/// \brief Given two 128-bit floating-point vectors of [4 x float], perform an
+/// Given two 128-bit floating-point vectors of [4 x float], perform an
/// element-by-element comparison of the single-precision element in the
/// first source vector and the corresponding element in the second source
/// vector.
@@ -2688,13 +2615,13 @@ _mm_testz_ps(__m128 __a, __m128 __b)
/// \param __b
/// A 128-bit vector of [4 x float].
/// \returns the CF flag.
-static __inline int __DEFAULT_FN_ATTRS
+static __inline int __DEFAULT_FN_ATTRS128
_mm_testc_ps(__m128 __a, __m128 __b)
{
return __builtin_ia32_vtestcps((__v4sf)__a, (__v4sf)__b);
}
-/// \brief Given two 128-bit floating-point vectors of [4 x float], perform an
+/// Given two 128-bit floating-point vectors of [4 x float], perform an
/// element-by-element comparison of the single-precision element in the
/// first source vector and the corresponding element in the second source
/// vector.
@@ -2718,13 +2645,13 @@ _mm_testc_ps(__m128 __a, __m128 __b)
/// \param __b
/// A 128-bit vector of [4 x float].
/// \returns 1 if both the ZF and CF flags are set to 0, otherwise returns 0.
-static __inline int __DEFAULT_FN_ATTRS
+static __inline int __DEFAULT_FN_ATTRS128
_mm_testnzc_ps(__m128 __a, __m128 __b)
{
return __builtin_ia32_vtestnzcps((__v4sf)__a, (__v4sf)__b);
}
-/// \brief Given two 256-bit floating-point vectors of [4 x double], perform an
+/// Given two 256-bit floating-point vectors of [4 x double], perform an
/// element-by-element comparison of the double-precision elements in the
/// first source vector and the corresponding elements in the second source
/// vector.
@@ -2753,7 +2680,7 @@ _mm256_testz_pd(__m256d __a, __m256d __b)
return __builtin_ia32_vtestzpd256((__v4df)__a, (__v4df)__b);
}
-/// \brief Given two 256-bit floating-point vectors of [4 x double], perform an
+/// Given two 256-bit floating-point vectors of [4 x double], perform an
/// element-by-element comparison of the double-precision elements in the
/// first source vector and the corresponding elements in the second source
/// vector.
@@ -2782,7 +2709,7 @@ _mm256_testc_pd(__m256d __a, __m256d __b)
return __builtin_ia32_vtestcpd256((__v4df)__a, (__v4df)__b);
}
-/// \brief Given two 256-bit floating-point vectors of [4 x double], perform an
+/// Given two 256-bit floating-point vectors of [4 x double], perform an
/// element-by-element comparison of the double-precision elements in the
/// first source vector and the corresponding elements in the second source
/// vector.
@@ -2812,7 +2739,7 @@ _mm256_testnzc_pd(__m256d __a, __m256d __b)
return __builtin_ia32_vtestnzcpd256((__v4df)__a, (__v4df)__b);
}
-/// \brief Given two 256-bit floating-point vectors of [8 x float], perform an
+/// Given two 256-bit floating-point vectors of [8 x float], perform an
/// element-by-element comparison of the single-precision element in the
/// first source vector and the corresponding element in the second source
/// vector.
@@ -2841,7 +2768,7 @@ _mm256_testz_ps(__m256 __a, __m256 __b)
return __builtin_ia32_vtestzps256((__v8sf)__a, (__v8sf)__b);
}
-/// \brief Given two 256-bit floating-point vectors of [8 x float], perform an
+/// Given two 256-bit floating-point vectors of [8 x float], perform an
/// element-by-element comparison of the single-precision element in the
/// first source vector and the corresponding element in the second source
/// vector.
@@ -2870,7 +2797,7 @@ _mm256_testc_ps(__m256 __a, __m256 __b)
return __builtin_ia32_vtestcps256((__v8sf)__a, (__v8sf)__b);
}
-/// \brief Given two 256-bit floating-point vectors of [8 x float], perform an
+/// Given two 256-bit floating-point vectors of [8 x float], perform an
/// element-by-element comparison of the single-precision elements in the
/// first source vector and the corresponding elements in the second source
/// vector.
@@ -2900,7 +2827,7 @@ _mm256_testnzc_ps(__m256 __a, __m256 __b)
return __builtin_ia32_vtestnzcps256((__v8sf)__a, (__v8sf)__b);
}
-/// \brief Given two 256-bit integer vectors, perform a bit-by-bit comparison
+/// Given two 256-bit integer vectors, perform a bit-by-bit comparison
/// of the two source vectors.
///
/// The EFLAGS register is updated as follows: \n
@@ -2926,7 +2853,7 @@ _mm256_testz_si256(__m256i __a, __m256i __b)
return __builtin_ia32_ptestz256((__v4di)__a, (__v4di)__b);
}
-/// \brief Given two 256-bit integer vectors, perform a bit-by-bit comparison
+/// Given two 256-bit integer vectors, perform a bit-by-bit comparison
/// of the two source vectors.
///
/// The EFLAGS register is updated as follows: \n
@@ -2952,7 +2879,7 @@ _mm256_testc_si256(__m256i __a, __m256i __b)
return __builtin_ia32_ptestc256((__v4di)__a, (__v4di)__b);
}
-/// \brief Given two 256-bit integer vectors, perform a bit-by-bit comparison
+/// Given two 256-bit integer vectors, perform a bit-by-bit comparison
/// of the two source vectors.
///
/// The EFLAGS register is updated as follows: \n
@@ -2980,7 +2907,7 @@ _mm256_testnzc_si256(__m256i __a, __m256i __b)
}
/* Vector extract sign mask */
-/// \brief Extracts the sign bits of double-precision floating point elements
+/// Extracts the sign bits of double-precision floating point elements
/// in a 256-bit vector of [4 x double] and writes them to the lower order
/// bits of the return value.
///
@@ -2998,7 +2925,7 @@ _mm256_movemask_pd(__m256d __a)
return __builtin_ia32_movmskpd256((__v4df)__a);
}
-/// \brief Extracts the sign bits of double-precision floating point elements
+/// Extracts the sign bits of single-precision floating point elements
/// in a 256-bit vector of [8 x float] and writes them to the lower order
/// bits of the return value.
///
@@ -3007,7 +2934,7 @@ _mm256_movemask_pd(__m256d __a)
/// This intrinsic corresponds to the <c> VMOVMSKPS </c> instruction.
///
/// \param __a
-/// A 256-bit vector of [8 x float] containing the double-precision floating
+/// A 256-bit vector of [8 x float] containing the single-precision floating
/// point values with sign bits to be extracted.
/// \returns The sign bits from the operand, written to bits [7:0].
static __inline int __DEFAULT_FN_ATTRS
@@ -3017,30 +2944,30 @@ _mm256_movemask_ps(__m256 __a)
}
/* Vector __zero */
-/// \brief Zeroes the contents of all XMM or YMM registers.
+/// Zeroes the contents of all XMM or YMM registers.
///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> VZEROALL </c> instruction.
-static __inline void __DEFAULT_FN_ATTRS
+static __inline void __attribute__((__always_inline__, __nodebug__, __target__("avx")))
_mm256_zeroall(void)
{
__builtin_ia32_vzeroall();
}
-/// \brief Zeroes the upper 128 bits (bits 255:128) of all YMM registers.
+/// Zeroes the upper 128 bits (bits 255:128) of all YMM registers.
///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> VZEROUPPER </c> instruction.
-static __inline void __DEFAULT_FN_ATTRS
+static __inline void __attribute__((__always_inline__, __nodebug__, __target__("avx")))
_mm256_zeroupper(void)
{
__builtin_ia32_vzeroupper();
}
/* Vector load with broadcast */
-/// \brief Loads a scalar single-precision floating point value from the
+/// Loads a scalar single-precision floating point value from the
/// specified address pointed to by \a __a and broadcasts it to the elements
/// of a [4 x float] vector.
///
@@ -3052,14 +2979,14 @@ _mm256_zeroupper(void)
/// The single-precision floating point value to be broadcast.
/// \returns A 128-bit vector of [4 x float] whose 32-bit elements are set
/// equal to the broadcast value.
-static __inline __m128 __DEFAULT_FN_ATTRS
+static __inline __m128 __DEFAULT_FN_ATTRS128
_mm_broadcast_ss(float const *__a)
{
float __f = *__a;
- return (__m128)(__v4sf){ __f, __f, __f, __f };
+ return __extension__ (__m128)(__v4sf){ __f, __f, __f, __f };
}
-/// \brief Loads a scalar double-precision floating point value from the
+/// Loads a scalar double-precision floating point value from the
/// specified address pointed to by \a __a and broadcasts it to the elements
/// of a [4 x double] vector.
///
@@ -3075,10 +3002,10 @@ static __inline __m256d __DEFAULT_FN_ATTRS
_mm256_broadcast_sd(double const *__a)
{
double __d = *__a;
- return (__m256d)(__v4df){ __d, __d, __d, __d };
+ return __extension__ (__m256d)(__v4df){ __d, __d, __d, __d };
}
-/// \brief Loads a scalar single-precision floating point value from the
+/// Loads a scalar single-precision floating point value from the
/// specified address pointed to by \a __a and broadcasts it to the elements
/// of a [8 x float] vector.
///
@@ -3094,10 +3021,10 @@ static __inline __m256 __DEFAULT_FN_ATTRS
_mm256_broadcast_ss(float const *__a)
{
float __f = *__a;
- return (__m256)(__v8sf){ __f, __f, __f, __f, __f, __f, __f, __f };
+ return __extension__ (__m256)(__v8sf){ __f, __f, __f, __f, __f, __f, __f, __f };
}
-/// \brief Loads the data from a 128-bit vector of [2 x double] from the
+/// Loads the data from a 128-bit vector of [2 x double] from the
/// specified address pointed to by \a __a and broadcasts it to 128-bit
/// elements in a 256-bit vector of [4 x double].
///
@@ -3112,10 +3039,12 @@ _mm256_broadcast_ss(float const *__a)
static __inline __m256d __DEFAULT_FN_ATTRS
_mm256_broadcast_pd(__m128d const *__a)
{
- return (__m256d)__builtin_ia32_vbroadcastf128_pd256((__v2df const *)__a);
+ __m128d __b = _mm_loadu_pd((const double *)__a);
+ return (__m256d)__builtin_shufflevector((__v2df)__b, (__v2df)__b,
+ 0, 1, 0, 1);
}
-/// \brief Loads the data from a 128-bit vector of [4 x float] from the
+/// Loads the data from a 128-bit vector of [4 x float] from the
/// specified address pointed to by \a __a and broadcasts it to 128-bit
/// elements in a 256-bit vector of [8 x float].
///
@@ -3130,11 +3059,13 @@ _mm256_broadcast_pd(__m128d const *__a)
static __inline __m256 __DEFAULT_FN_ATTRS
_mm256_broadcast_ps(__m128 const *__a)
{
- return (__m256)__builtin_ia32_vbroadcastf128_ps256((__v4sf const *)__a);
+ __m128 __b = _mm_loadu_ps((const float *)__a);
+ return (__m256)__builtin_shufflevector((__v4sf)__b, (__v4sf)__b,
+ 0, 1, 2, 3, 0, 1, 2, 3);
}
/* SIMD load ops */
-/// \brief Loads 4 double-precision floating point values from a 32-byte aligned
+/// Loads 4 double-precision floating point values from a 32-byte aligned
/// memory location pointed to by \a __p into a vector of [4 x double].
///
/// \headerfile <x86intrin.h>
@@ -3151,7 +3082,7 @@ _mm256_load_pd(double const *__p)
return *(__m256d *)__p;
}
-/// \brief Loads 8 single-precision floating point values from a 32-byte aligned
+/// Loads 8 single-precision floating point values from a 32-byte aligned
/// memory location pointed to by \a __p into a vector of [8 x float].
///
/// \headerfile <x86intrin.h>
@@ -3167,7 +3098,7 @@ _mm256_load_ps(float const *__p)
return *(__m256 *)__p;
}
-/// \brief Loads 4 double-precision floating point values from an unaligned
+/// Loads 4 double-precision floating point values from an unaligned
/// memory location pointed to by \a __p into a vector of [4 x double].
///
/// \headerfile <x86intrin.h>
@@ -3187,7 +3118,7 @@ _mm256_loadu_pd(double const *__p)
return ((struct __loadu_pd*)__p)->__v;
}
-/// \brief Loads 8 single-precision floating point values from an unaligned
+/// Loads 8 single-precision floating point values from an unaligned
/// memory location pointed to by \a __p into a vector of [8 x float].
///
/// \headerfile <x86intrin.h>
@@ -3207,7 +3138,7 @@ _mm256_loadu_ps(float const *__p)
return ((struct __loadu_ps*)__p)->__v;
}
-/// \brief Loads 256 bits of integer data from a 32-byte aligned memory
+/// Loads 256 bits of integer data from a 32-byte aligned memory
/// location pointed to by \a __p into elements of a 256-bit integer vector.
///
/// \headerfile <x86intrin.h>
@@ -3224,7 +3155,7 @@ _mm256_load_si256(__m256i const *__p)
return *__p;
}
-/// \brief Loads 256 bits of integer data from an unaligned memory location
+/// Loads 256 bits of integer data from an unaligned memory location
/// pointed to by \a __p into a 256-bit integer vector.
///
/// \headerfile <x86intrin.h>
@@ -3243,7 +3174,7 @@ _mm256_loadu_si256(__m256i const *__p)
return ((struct __loadu_si256*)__p)->__v;
}
-/// \brief Loads 256 bits of integer data from an unaligned memory location
+/// Loads 256 bits of integer data from an unaligned memory location
/// pointed to by \a __p into a 256-bit integer vector. This intrinsic may
/// perform better than \c _mm256_loadu_si256 when the data crosses a cache
/// line boundary.
@@ -3262,7 +3193,7 @@ _mm256_lddqu_si256(__m256i const *__p)
}
/* SIMD store ops */
-/// \brief Stores double-precision floating point values from a 256-bit vector
+/// Stores double-precision floating point values from a 256-bit vector
/// of [4 x double] to a 32-byte aligned memory location pointed to by
/// \a __p.
///
@@ -3281,7 +3212,7 @@ _mm256_store_pd(double *__p, __m256d __a)
*(__m256d *)__p = __a;
}
-/// \brief Stores single-precision floating point values from a 256-bit vector
+/// Stores single-precision floating point values from a 256-bit vector
/// of [8 x float] to a 32-byte aligned memory location pointed to by \a __p.
///
/// \headerfile <x86intrin.h>
@@ -3299,7 +3230,7 @@ _mm256_store_ps(float *__p, __m256 __a)
*(__m256 *)__p = __a;
}
-/// \brief Stores double-precision floating point values from a 256-bit vector
+/// Stores double-precision floating point values from a 256-bit vector
/// of [4 x double] to an unaligned memory location pointed to by \a __p.
///
/// \headerfile <x86intrin.h>
@@ -3320,7 +3251,7 @@ _mm256_storeu_pd(double *__p, __m256d __a)
((struct __storeu_pd*)__p)->__v = __a;
}
-/// \brief Stores single-precision floating point values from a 256-bit vector
+/// Stores single-precision floating point values from a 256-bit vector
/// of [8 x float] to an unaligned memory location pointed to by \a __p.
///
/// \headerfile <x86intrin.h>
@@ -3340,7 +3271,7 @@ _mm256_storeu_ps(float *__p, __m256 __a)
((struct __storeu_ps*)__p)->__v = __a;
}
-/// \brief Stores integer values from a 256-bit integer vector to a 32-byte
+/// Stores integer values from a 256-bit integer vector to a 32-byte
/// aligned memory location pointed to by \a __p.
///
/// \headerfile <x86intrin.h>
@@ -3358,7 +3289,7 @@ _mm256_store_si256(__m256i *__p, __m256i __a)
*__p = __a;
}
-/// \brief Stores integer values from a 256-bit integer vector to an unaligned
+/// Stores integer values from a 256-bit integer vector to an unaligned
/// memory location pointed to by \a __p.
///
/// \headerfile <x86intrin.h>
@@ -3379,7 +3310,7 @@ _mm256_storeu_si256(__m256i *__p, __m256i __a)
}
/* Conditional load ops */
-/// \brief Conditionally loads double-precision floating point elements from a
+/// Conditionally loads double-precision floating point elements from a
/// memory location pointed to by \a __p into a 128-bit vector of
/// [2 x double], depending on the mask bits associated with each data
/// element.
@@ -3397,13 +3328,13 @@ _mm256_storeu_si256(__m256i *__p, __m256i __a)
/// corresponding value in the memory location is not loaded and the
/// corresponding field in the return value is set to zero.
/// \returns A 128-bit vector of [2 x double] containing the loaded values.
-static __inline __m128d __DEFAULT_FN_ATTRS
+static __inline __m128d __DEFAULT_FN_ATTRS128
_mm_maskload_pd(double const *__p, __m128i __m)
{
return (__m128d)__builtin_ia32_maskloadpd((const __v2df *)__p, (__v2di)__m);
}
-/// \brief Conditionally loads double-precision floating point elements from a
+/// Conditionally loads double-precision floating point elements from a
/// memory location pointed to by \a __p into a 256-bit vector of
/// [4 x double], depending on the mask bits associated with each data
/// element.
@@ -3428,7 +3359,7 @@ _mm256_maskload_pd(double const *__p, __m256i __m)
(__v4di)__m);
}
-/// \brief Conditionally loads single-precision floating point elements from a
+/// Conditionally loads single-precision floating point elements from a
/// memory location pointed to by \a __p into a 128-bit vector of
/// [4 x float], depending on the mask bits associated with each data
/// element.
@@ -3446,13 +3377,13 @@ _mm256_maskload_pd(double const *__p, __m256i __m)
/// corresponding value in the memory location is not loaded and the
/// corresponding field in the return value is set to zero.
/// \returns A 128-bit vector of [4 x float] containing the loaded values.
-static __inline __m128 __DEFAULT_FN_ATTRS
+static __inline __m128 __DEFAULT_FN_ATTRS128
_mm_maskload_ps(float const *__p, __m128i __m)
{
return (__m128)__builtin_ia32_maskloadps((const __v4sf *)__p, (__v4si)__m);
}
-/// \brief Conditionally loads single-precision floating point elements from a
+/// Conditionally loads single-precision floating point elements from a
/// memory location pointed to by \a __p into a 256-bit vector of
/// [8 x float], depending on the mask bits associated with each data
/// element.
@@ -3477,7 +3408,7 @@ _mm256_maskload_ps(float const *__p, __m256i __m)
}
/* Conditional store ops */
-/// \brief Moves single-precision floating point values from a 256-bit vector
+/// Moves single-precision floating point values from a 256-bit vector
/// of [8 x float] to a memory location pointed to by \a __p, according to
/// the specified mask.
///
@@ -3501,7 +3432,7 @@ _mm256_maskstore_ps(float *__p, __m256i __m, __m256 __a)
__builtin_ia32_maskstoreps256((__v8sf *)__p, (__v8si)__m, (__v8sf)__a);
}
-/// \brief Moves double-precision values from a 128-bit vector of [2 x double]
+/// Moves double-precision values from a 128-bit vector of [2 x double]
/// to a memory location pointed to by \a __p, according to the specified
/// mask.
///
@@ -3519,13 +3450,13 @@ _mm256_maskstore_ps(float *__p, __m256i __m, __m256 __a)
/// changed.
/// \param __a
/// A 128-bit vector of [2 x double] containing the values to be stored.
-static __inline void __DEFAULT_FN_ATTRS
+static __inline void __DEFAULT_FN_ATTRS128
_mm_maskstore_pd(double *__p, __m128i __m, __m128d __a)
{
__builtin_ia32_maskstorepd((__v2df *)__p, (__v2di)__m, (__v2df)__a);
}
-/// \brief Moves double-precision values from a 256-bit vector of [4 x double]
+/// Moves double-precision values from a 256-bit vector of [4 x double]
/// to a memory location pointed to by \a __p, according to the specified
/// mask.
///
@@ -3549,7 +3480,7 @@ _mm256_maskstore_pd(double *__p, __m256i __m, __m256d __a)
__builtin_ia32_maskstorepd256((__v4df *)__p, (__v4di)__m, (__v4df)__a);
}
-/// \brief Moves single-precision floating point values from a 128-bit vector
+/// Moves single-precision floating point values from a 128-bit vector
/// of [4 x float] to a memory location pointed to by \a __p, according to
/// the specified mask.
///
@@ -3567,14 +3498,14 @@ _mm256_maskstore_pd(double *__p, __m256i __m, __m256d __a)
/// changed.
/// \param __a
/// A 128-bit vector of [4 x float] containing the values to be stored.
-static __inline void __DEFAULT_FN_ATTRS
+static __inline void __DEFAULT_FN_ATTRS128
_mm_maskstore_ps(float *__p, __m128i __m, __m128 __a)
{
__builtin_ia32_maskstoreps((__v4sf *)__p, (__v4si)__m, (__v4sf)__a);
}
/* Cacheability support ops */
-/// \brief Moves integer data from a 256-bit integer vector to a 32-byte
+/// Moves integer data from a 256-bit integer vector to a 32-byte
/// aligned memory location. To minimize caching, the data is flagged as
/// non-temporal (unlikely to be used again soon).
///
@@ -3594,7 +3525,7 @@ _mm256_stream_si256(__m256i *__a, __m256i __b)
__builtin_nontemporal_store((__v4di_aligned)__b, (__v4di_aligned*)__a);
}
-/// \brief Moves double-precision values from a 256-bit vector of [4 x double]
+/// Moves double-precision values from a 256-bit vector of [4 x double]
/// to a 32-byte aligned memory location. To minimize caching, the data is
/// flagged as non-temporal (unlikely to be used again soon).
///
@@ -3614,7 +3545,7 @@ _mm256_stream_pd(double *__a, __m256d __b)
__builtin_nontemporal_store((__v4df_aligned)__b, (__v4df_aligned*)__a);
}
-/// \brief Moves single-precision floating point values from a 256-bit vector
+/// Moves single-precision floating point values from a 256-bit vector
/// of [8 x float] to a 32-byte aligned memory location. To minimize
/// caching, the data is flagged as non-temporal (unlikely to be used again
/// soon).
@@ -3636,7 +3567,7 @@ _mm256_stream_ps(float *__p, __m256 __a)
}
/* Create vectors */
-/// \brief Create a 256-bit vector of [4 x double] with undefined values.
+/// Create a 256-bit vector of [4 x double] with undefined values.
///
/// \headerfile <x86intrin.h>
///
@@ -3649,7 +3580,7 @@ _mm256_undefined_pd(void)
return (__m256d)__builtin_ia32_undef256();
}
-/// \brief Create a 256-bit vector of [8 x float] with undefined values.
+/// Create a 256-bit vector of [8 x float] with undefined values.
///
/// \headerfile <x86intrin.h>
///
@@ -3662,7 +3593,7 @@ _mm256_undefined_ps(void)
return (__m256)__builtin_ia32_undef256();
}
-/// \brief Create a 256-bit integer vector with undefined values.
+/// Create a 256-bit integer vector with undefined values.
///
/// \headerfile <x86intrin.h>
///
@@ -3675,7 +3606,7 @@ _mm256_undefined_si256(void)
return (__m256i)__builtin_ia32_undef256();
}
-/// \brief Constructs a 256-bit floating-point vector of [4 x double]
+/// Constructs a 256-bit floating-point vector of [4 x double]
/// initialized with the specified double-precision floating-point values.
///
/// \headerfile <x86intrin.h>
@@ -3699,10 +3630,10 @@ _mm256_undefined_si256(void)
static __inline __m256d __DEFAULT_FN_ATTRS
_mm256_set_pd(double __a, double __b, double __c, double __d)
{
- return (__m256d){ __d, __c, __b, __a };
+ return __extension__ (__m256d){ __d, __c, __b, __a };
}
-/// \brief Constructs a 256-bit floating-point vector of [8 x float] initialized
+/// Constructs a 256-bit floating-point vector of [8 x float] initialized
/// with the specified single-precision floating-point values.
///
/// \headerfile <x86intrin.h>
@@ -3739,10 +3670,10 @@ static __inline __m256 __DEFAULT_FN_ATTRS
_mm256_set_ps(float __a, float __b, float __c, float __d,
float __e, float __f, float __g, float __h)
{
- return (__m256){ __h, __g, __f, __e, __d, __c, __b, __a };
+ return __extension__ (__m256){ __h, __g, __f, __e, __d, __c, __b, __a };
}
-/// \brief Constructs a 256-bit integer vector initialized with the specified
+/// Constructs a 256-bit integer vector initialized with the specified
/// 32-bit integral values.
///
/// \headerfile <x86intrin.h>
@@ -3771,10 +3702,10 @@ static __inline __m256i __DEFAULT_FN_ATTRS
_mm256_set_epi32(int __i0, int __i1, int __i2, int __i3,
int __i4, int __i5, int __i6, int __i7)
{
- return (__m256i)(__v8si){ __i7, __i6, __i5, __i4, __i3, __i2, __i1, __i0 };
+ return __extension__ (__m256i)(__v8si){ __i7, __i6, __i5, __i4, __i3, __i2, __i1, __i0 };
}
-/// \brief Constructs a 256-bit integer vector initialized with the specified
+/// Constructs a 256-bit integer vector initialized with the specified
/// 16-bit integral values.
///
/// \headerfile <x86intrin.h>
@@ -3821,11 +3752,11 @@ _mm256_set_epi16(short __w15, short __w14, short __w13, short __w12,
short __w07, short __w06, short __w05, short __w04,
short __w03, short __w02, short __w01, short __w00)
{
- return (__m256i)(__v16hi){ __w00, __w01, __w02, __w03, __w04, __w05, __w06,
+ return __extension__ (__m256i)(__v16hi){ __w00, __w01, __w02, __w03, __w04, __w05, __w06,
__w07, __w08, __w09, __w10, __w11, __w12, __w13, __w14, __w15 };
}
-/// \brief Constructs a 256-bit integer vector initialized with the specified
+/// Constructs a 256-bit integer vector initialized with the specified
/// 8-bit integral values.
///
/// \headerfile <x86intrin.h>
@@ -3908,7 +3839,7 @@ _mm256_set_epi8(char __b31, char __b30, char __b29, char __b28,
char __b07, char __b06, char __b05, char __b04,
char __b03, char __b02, char __b01, char __b00)
{
- return (__m256i)(__v32qi){
+ return __extension__ (__m256i)(__v32qi){
__b00, __b01, __b02, __b03, __b04, __b05, __b06, __b07,
__b08, __b09, __b10, __b11, __b12, __b13, __b14, __b15,
__b16, __b17, __b18, __b19, __b20, __b21, __b22, __b23,
@@ -3916,7 +3847,7 @@ _mm256_set_epi8(char __b31, char __b30, char __b29, char __b28,
};
}
-/// \brief Constructs a 256-bit integer vector initialized with the specified
+/// Constructs a 256-bit integer vector initialized with the specified
/// 64-bit integral values.
///
/// \headerfile <x86intrin.h>
@@ -3936,11 +3867,11 @@ _mm256_set_epi8(char __b31, char __b30, char __b29, char __b28,
static __inline __m256i __DEFAULT_FN_ATTRS
_mm256_set_epi64x(long long __a, long long __b, long long __c, long long __d)
{
- return (__m256i)(__v4di){ __d, __c, __b, __a };
+ return __extension__ (__m256i)(__v4di){ __d, __c, __b, __a };
}
/* Create vectors with elements in reverse order */
-/// \brief Constructs a 256-bit floating-point vector of [4 x double],
+/// Constructs a 256-bit floating-point vector of [4 x double],
/// initialized in reverse order with the specified double-precision
/// floating-point values.
///
@@ -3965,10 +3896,10 @@ _mm256_set_epi64x(long long __a, long long __b, long long __c, long long __d)
static __inline __m256d __DEFAULT_FN_ATTRS
_mm256_setr_pd(double __a, double __b, double __c, double __d)
{
- return (__m256d){ __a, __b, __c, __d };
+ return _mm256_set_pd(__d, __c, __b, __a);
}
-/// \brief Constructs a 256-bit floating-point vector of [8 x float],
+/// Constructs a 256-bit floating-point vector of [8 x float],
/// initialized in reverse order with the specified single-precision
/// float-point values.
///
@@ -4006,10 +3937,10 @@ static __inline __m256 __DEFAULT_FN_ATTRS
_mm256_setr_ps(float __a, float __b, float __c, float __d,
float __e, float __f, float __g, float __h)
{
- return (__m256){ __a, __b, __c, __d, __e, __f, __g, __h };
+ return _mm256_set_ps(__h, __g, __f, __e, __d, __c, __b, __a);
}
-/// \brief Constructs a 256-bit integer vector, initialized in reverse order
+/// Constructs a 256-bit integer vector, initialized in reverse order
/// with the specified 32-bit integral values.
///
/// \headerfile <x86intrin.h>
@@ -4038,10 +3969,10 @@ static __inline __m256i __DEFAULT_FN_ATTRS
_mm256_setr_epi32(int __i0, int __i1, int __i2, int __i3,
int __i4, int __i5, int __i6, int __i7)
{
- return (__m256i)(__v8si){ __i0, __i1, __i2, __i3, __i4, __i5, __i6, __i7 };
+ return _mm256_set_epi32(__i7, __i6, __i5, __i4, __i3, __i2, __i1, __i0);
}
-/// \brief Constructs a 256-bit integer vector, initialized in reverse order
+/// Constructs a 256-bit integer vector, initialized in reverse order
/// with the specified 16-bit integral values.
///
/// \headerfile <x86intrin.h>
@@ -4088,11 +4019,13 @@ _mm256_setr_epi16(short __w15, short __w14, short __w13, short __w12,
short __w07, short __w06, short __w05, short __w04,
short __w03, short __w02, short __w01, short __w00)
{
- return (__m256i)(__v16hi){ __w15, __w14, __w13, __w12, __w11, __w10, __w09,
- __w08, __w07, __w06, __w05, __w04, __w03, __w02, __w01, __w00 };
+ return _mm256_set_epi16(__w00, __w01, __w02, __w03,
+ __w04, __w05, __w06, __w07,
+ __w08, __w09, __w10, __w11,
+ __w12, __w13, __w14, __w15);
}
-/// \brief Constructs a 256-bit integer vector, initialized in reverse order
+/// Constructs a 256-bit integer vector, initialized in reverse order
/// with the specified 8-bit integral values.
///
/// \headerfile <x86intrin.h>
@@ -4175,14 +4108,13 @@ _mm256_setr_epi8(char __b31, char __b30, char __b29, char __b28,
char __b07, char __b06, char __b05, char __b04,
char __b03, char __b02, char __b01, char __b00)
{
- return (__m256i)(__v32qi){
- __b31, __b30, __b29, __b28, __b27, __b26, __b25, __b24,
- __b23, __b22, __b21, __b20, __b19, __b18, __b17, __b16,
- __b15, __b14, __b13, __b12, __b11, __b10, __b09, __b08,
- __b07, __b06, __b05, __b04, __b03, __b02, __b01, __b00 };
+ return _mm256_set_epi8(__b00, __b01, __b02, __b03, __b04, __b05, __b06, __b07,
+ __b08, __b09, __b10, __b11, __b12, __b13, __b14, __b15,
+ __b16, __b17, __b18, __b19, __b20, __b21, __b22, __b23,
+ __b24, __b25, __b26, __b27, __b28, __b29, __b30, __b31);
}
-/// \brief Constructs a 256-bit integer vector, initialized in reverse order
+/// Constructs a 256-bit integer vector, initialized in reverse order
/// with the specified 64-bit integral values.
///
/// \headerfile <x86intrin.h>
@@ -4202,11 +4134,11 @@ _mm256_setr_epi8(char __b31, char __b30, char __b29, char __b28,
static __inline __m256i __DEFAULT_FN_ATTRS
_mm256_setr_epi64x(long long __a, long long __b, long long __c, long long __d)
{
- return (__m256i)(__v4di){ __a, __b, __c, __d };
+ return _mm256_set_epi64x(__d, __c, __b, __a);
}
/* Create vectors with repeated elements */
-/// \brief Constructs a 256-bit floating-point vector of [4 x double], with each
+/// Constructs a 256-bit floating-point vector of [4 x double], with each
/// of the four double-precision floating-point vector elements set to the
/// specified double-precision floating-point value.
///
@@ -4221,10 +4153,10 @@ _mm256_setr_epi64x(long long __a, long long __b, long long __c, long long __d)
static __inline __m256d __DEFAULT_FN_ATTRS
_mm256_set1_pd(double __w)
{
- return (__m256d){ __w, __w, __w, __w };
+ return _mm256_set_pd(__w, __w, __w, __w);
}
-/// \brief Constructs a 256-bit floating-point vector of [8 x float], with each
+/// Constructs a 256-bit floating-point vector of [8 x float], with each
/// of the eight single-precision floating-point vector elements set to the
/// specified single-precision floating-point value.
///
@@ -4240,10 +4172,10 @@ _mm256_set1_pd(double __w)
static __inline __m256 __DEFAULT_FN_ATTRS
_mm256_set1_ps(float __w)
{
- return (__m256){ __w, __w, __w, __w, __w, __w, __w, __w };
+ return _mm256_set_ps(__w, __w, __w, __w, __w, __w, __w, __w);
}
-/// \brief Constructs a 256-bit integer vector of [8 x i32], with each of the
+/// Constructs a 256-bit integer vector of [8 x i32], with each of the
/// 32-bit integral vector elements set to the specified 32-bit integral
/// value.
///
@@ -4259,10 +4191,10 @@ _mm256_set1_ps(float __w)
static __inline __m256i __DEFAULT_FN_ATTRS
_mm256_set1_epi32(int __i)
{
- return (__m256i)(__v8si){ __i, __i, __i, __i, __i, __i, __i, __i };
+ return _mm256_set_epi32(__i, __i, __i, __i, __i, __i, __i, __i);
}
-/// \brief Constructs a 256-bit integer vector of [16 x i16], with each of the
+/// Constructs a 256-bit integer vector of [16 x i16], with each of the
/// 16-bit integral vector elements set to the specified 16-bit integral
/// value.
///
@@ -4277,11 +4209,11 @@ _mm256_set1_epi32(int __i)
static __inline __m256i __DEFAULT_FN_ATTRS
_mm256_set1_epi16(short __w)
{
- return (__m256i)(__v16hi){ __w, __w, __w, __w, __w, __w, __w, __w, __w, __w,
- __w, __w, __w, __w, __w, __w };
+ return _mm256_set_epi16(__w, __w, __w, __w, __w, __w, __w, __w,
+ __w, __w, __w, __w, __w, __w, __w, __w);
}
-/// \brief Constructs a 256-bit integer vector of [32 x i8], with each of the
+/// Constructs a 256-bit integer vector of [32 x i8], with each of the
/// 8-bit integral vector elements set to the specified 8-bit integral value.
///
/// \headerfile <x86intrin.h>
@@ -4295,12 +4227,13 @@ _mm256_set1_epi16(short __w)
static __inline __m256i __DEFAULT_FN_ATTRS
_mm256_set1_epi8(char __b)
{
- return (__m256i)(__v32qi){ __b, __b, __b, __b, __b, __b, __b, __b, __b, __b,
- __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b,
- __b, __b, __b, __b, __b, __b, __b };
+ return _mm256_set_epi8(__b, __b, __b, __b, __b, __b, __b, __b,
+ __b, __b, __b, __b, __b, __b, __b, __b,
+ __b, __b, __b, __b, __b, __b, __b, __b,
+ __b, __b, __b, __b, __b, __b, __b, __b);
}
-/// \brief Constructs a 256-bit integer vector of [4 x i64], with each of the
+/// Constructs a 256-bit integer vector of [4 x i64], with each of the
/// 64-bit integral vector elements set to the specified 64-bit integral
/// value.
///
@@ -4315,11 +4248,11 @@ _mm256_set1_epi8(char __b)
static __inline __m256i __DEFAULT_FN_ATTRS
_mm256_set1_epi64x(long long __q)
{
- return (__m256i)(__v4di){ __q, __q, __q, __q };
+ return _mm256_set_epi64x(__q, __q, __q, __q);
}
/* Create __zeroed vectors */
-/// \brief Constructs a 256-bit floating-point vector of [4 x double] with all
+/// Constructs a 256-bit floating-point vector of [4 x double] with all
/// vector elements initialized to zero.
///
/// \headerfile <x86intrin.h>
@@ -4330,10 +4263,10 @@ _mm256_set1_epi64x(long long __q)
static __inline __m256d __DEFAULT_FN_ATTRS
_mm256_setzero_pd(void)
{
- return (__m256d){ 0, 0, 0, 0 };
+ return __extension__ (__m256d){ 0, 0, 0, 0 };
}
-/// \brief Constructs a 256-bit floating-point vector of [8 x float] with all
+/// Constructs a 256-bit floating-point vector of [8 x float] with all
/// vector elements initialized to zero.
///
/// \headerfile <x86intrin.h>
@@ -4344,10 +4277,10 @@ _mm256_setzero_pd(void)
static __inline __m256 __DEFAULT_FN_ATTRS
_mm256_setzero_ps(void)
{
- return (__m256){ 0, 0, 0, 0, 0, 0, 0, 0 };
+ return __extension__ (__m256){ 0, 0, 0, 0, 0, 0, 0, 0 };
}
-/// \brief Constructs a 256-bit integer vector initialized to zero.
+/// Constructs a 256-bit integer vector initialized to zero.
///
/// \headerfile <x86intrin.h>
///
@@ -4357,11 +4290,11 @@ _mm256_setzero_ps(void)
static __inline __m256i __DEFAULT_FN_ATTRS
_mm256_setzero_si256(void)
{
- return (__m256i){ 0LL, 0LL, 0LL, 0LL };
+ return __extension__ (__m256i)(__v4di){ 0, 0, 0, 0 };
}
/* Cast between vector types */
-/// \brief Casts a 256-bit floating-point vector of [4 x double] into a 256-bit
+/// Casts a 256-bit floating-point vector of [4 x double] into a 256-bit
/// floating-point vector of [8 x float].
///
/// \headerfile <x86intrin.h>
@@ -4378,7 +4311,7 @@ _mm256_castpd_ps(__m256d __a)
return (__m256)__a;
}
-/// \brief Casts a 256-bit floating-point vector of [4 x double] into a 256-bit
+/// Casts a 256-bit floating-point vector of [4 x double] into a 256-bit
/// integer vector.
///
/// \headerfile <x86intrin.h>
@@ -4395,7 +4328,7 @@ _mm256_castpd_si256(__m256d __a)
return (__m256i)__a;
}
-/// \brief Casts a 256-bit floating-point vector of [8 x float] into a 256-bit
+/// Casts a 256-bit floating-point vector of [8 x float] into a 256-bit
/// floating-point vector of [4 x double].
///
/// \headerfile <x86intrin.h>
@@ -4412,7 +4345,7 @@ _mm256_castps_pd(__m256 __a)
return (__m256d)__a;
}
-/// \brief Casts a 256-bit floating-point vector of [8 x float] into a 256-bit
+/// Casts a 256-bit floating-point vector of [8 x float] into a 256-bit
/// integer vector.
///
/// \headerfile <x86intrin.h>
@@ -4429,7 +4362,7 @@ _mm256_castps_si256(__m256 __a)
return (__m256i)__a;
}
-/// \brief Casts a 256-bit integer vector into a 256-bit floating-point vector
+/// Casts a 256-bit integer vector into a 256-bit floating-point vector
/// of [8 x float].
///
/// \headerfile <x86intrin.h>
@@ -4446,7 +4379,7 @@ _mm256_castsi256_ps(__m256i __a)
return (__m256)__a;
}
-/// \brief Casts a 256-bit integer vector into a 256-bit floating-point vector
+/// Casts a 256-bit integer vector into a 256-bit floating-point vector
/// of [4 x double].
///
/// \headerfile <x86intrin.h>
@@ -4463,7 +4396,7 @@ _mm256_castsi256_pd(__m256i __a)
return (__m256d)__a;
}
-/// \brief Returns the lower 128 bits of a 256-bit floating-point vector of
+/// Returns the lower 128 bits of a 256-bit floating-point vector of
/// [4 x double] as a 128-bit floating-point vector of [2 x double].
///
/// \headerfile <x86intrin.h>
@@ -4480,7 +4413,7 @@ _mm256_castpd256_pd128(__m256d __a)
return __builtin_shufflevector((__v4df)__a, (__v4df)__a, 0, 1);
}
-/// \brief Returns the lower 128 bits of a 256-bit floating-point vector of
+/// Returns the lower 128 bits of a 256-bit floating-point vector of
/// [8 x float] as a 128-bit floating-point vector of [4 x float].
///
/// \headerfile <x86intrin.h>
@@ -4497,7 +4430,7 @@ _mm256_castps256_ps128(__m256 __a)
return __builtin_shufflevector((__v8sf)__a, (__v8sf)__a, 0, 1, 2, 3);
}
-/// \brief Truncates a 256-bit integer vector into a 128-bit integer vector.
+/// Truncates a 256-bit integer vector into a 128-bit integer vector.
///
/// \headerfile <x86intrin.h>
///
@@ -4513,7 +4446,7 @@ _mm256_castsi256_si128(__m256i __a)
return __builtin_shufflevector((__v4di)__a, (__v4di)__a, 0, 1);
}
-/// \brief Constructs a 256-bit floating-point vector of [4 x double] from a
+/// Constructs a 256-bit floating-point vector of [4 x double] from a
/// 128-bit floating-point vector of [2 x double].
///
/// The lower 128 bits contain the value of the source vector. The contents
@@ -4534,7 +4467,7 @@ _mm256_castpd128_pd256(__m128d __a)
return __builtin_shufflevector((__v2df)__a, (__v2df)__a, 0, 1, -1, -1);
}
-/// \brief Constructs a 256-bit floating-point vector of [8 x float] from a
+/// Constructs a 256-bit floating-point vector of [8 x float] from a
/// 128-bit floating-point vector of [4 x float].
///
/// The lower 128 bits contain the value of the source vector. The contents
@@ -4555,7 +4488,7 @@ _mm256_castps128_ps256(__m128 __a)
return __builtin_shufflevector((__v4sf)__a, (__v4sf)__a, 0, 1, 2, 3, -1, -1, -1, -1);
}
-/// \brief Constructs a 256-bit integer vector from a 128-bit integer vector.
+/// Constructs a 256-bit integer vector from a 128-bit integer vector.
///
/// The lower 128 bits contain the value of the source vector. The contents
/// of the upper 128 bits are undefined.
@@ -4574,7 +4507,7 @@ _mm256_castsi128_si256(__m128i __a)
return __builtin_shufflevector((__v2di)__a, (__v2di)__a, 0, 1, -1, -1);
}
-/// \brief Constructs a 256-bit floating-point vector of [4 x double] from a
+/// Constructs a 256-bit floating-point vector of [4 x double] from a
/// 128-bit floating-point vector of [2 x double]. The lower 128 bits
/// contain the value of the source vector. The upper 128 bits are set
/// to zero.
@@ -4593,7 +4526,7 @@ _mm256_zextpd128_pd256(__m128d __a)
return __builtin_shufflevector((__v2df)__a, (__v2df)_mm_setzero_pd(), 0, 1, 2, 3);
}
-/// \brief Constructs a 256-bit floating-point vector of [8 x float] from a
+/// Constructs a 256-bit floating-point vector of [8 x float] from a
/// 128-bit floating-point vector of [4 x float]. The lower 128 bits contain
/// the value of the source vector. The upper 128 bits are set to zero.
///
@@ -4611,7 +4544,7 @@ _mm256_zextps128_ps256(__m128 __a)
return __builtin_shufflevector((__v4sf)__a, (__v4sf)_mm_setzero_ps(), 0, 1, 2, 3, 4, 5, 6, 7);
}
-/// \brief Constructs a 256-bit integer vector from a 128-bit integer vector.
+/// Constructs a 256-bit integer vector from a 128-bit integer vector.
/// The lower 128 bits contain the value of the source vector. The upper
/// 128 bits are set to zero.
///
@@ -4634,7 +4567,7 @@ _mm256_zextsi128_si256(__m128i __a)
We use macros rather than inlines because we only want to accept
invocations where the immediate M is a constant expression.
*/
-/// \brief Constructs a new 256-bit vector of [8 x float] by first duplicating
+/// Constructs a new 256-bit vector of [8 x float] by first duplicating
/// a 256-bit vector of [8 x float] given in the first parameter, and then
/// replacing either the upper or the lower 128 bits with the contents of a
/// 128-bit vector of [4 x float] in the second parameter.
@@ -4668,20 +4601,11 @@ _mm256_zextsi128_si256(__m128i __a)
/// result, and bits [127:0] of \a V1 are copied to bits [127:0] of the
/// result.
/// \returns A 256-bit vector of [8 x float] containing the interleaved values.
-#define _mm256_insertf128_ps(V1, V2, M) __extension__ ({ \
- (__m256)__builtin_shufflevector( \
- (__v8sf)(__m256)(V1), \
- (__v8sf)_mm256_castps128_ps256((__m128)(V2)), \
- (((M) & 1) ? 0 : 8), \
- (((M) & 1) ? 1 : 9), \
- (((M) & 1) ? 2 : 10), \
- (((M) & 1) ? 3 : 11), \
- (((M) & 1) ? 8 : 4), \
- (((M) & 1) ? 9 : 5), \
- (((M) & 1) ? 10 : 6), \
- (((M) & 1) ? 11 : 7) );})
-
-/// \brief Constructs a new 256-bit vector of [4 x double] by first duplicating
+#define _mm256_insertf128_ps(V1, V2, M) \
+ (__m256)__builtin_ia32_vinsertf128_ps256((__v8sf)(__m256)(V1), \
+ (__v4sf)(__m128)(V2), (int)(M))
+
+/// Constructs a new 256-bit vector of [4 x double] by first duplicating
/// a 256-bit vector of [4 x double] given in the first parameter, and then
/// replacing either the upper or the lower 128 bits with the contents of a
/// 128-bit vector of [2 x double] in the second parameter.
@@ -4715,16 +4639,11 @@ _mm256_zextsi128_si256(__m128i __a)
/// result, and bits [127:0] of \a V1 are copied to bits [127:0] of the
/// result.
/// \returns A 256-bit vector of [4 x double] containing the interleaved values.
-#define _mm256_insertf128_pd(V1, V2, M) __extension__ ({ \
- (__m256d)__builtin_shufflevector( \
- (__v4df)(__m256d)(V1), \
- (__v4df)_mm256_castpd128_pd256((__m128d)(V2)), \
- (((M) & 1) ? 0 : 4), \
- (((M) & 1) ? 1 : 5), \
- (((M) & 1) ? 4 : 2), \
- (((M) & 1) ? 5 : 3) );})
-
-/// \brief Constructs a new 256-bit integer vector by first duplicating a
+#define _mm256_insertf128_pd(V1, V2, M) \
+ (__m256d)__builtin_ia32_vinsertf128_pd256((__v4df)(__m256d)(V1), \
+ (__v2df)(__m128d)(V2), (int)(M))
+
+/// Constructs a new 256-bit integer vector by first duplicating a
/// 256-bit integer vector given in the first parameter, and then replacing
/// either the upper or the lower 128 bits with the contents of a 128-bit
/// integer vector in the second parameter.
@@ -4758,21 +4677,16 @@ _mm256_zextsi128_si256(__m128i __a)
/// result, and bits [127:0] of \a V1 are copied to bits [127:0] of the
/// result.
/// \returns A 256-bit integer vector containing the interleaved values.
-#define _mm256_insertf128_si256(V1, V2, M) __extension__ ({ \
- (__m256i)__builtin_shufflevector( \
- (__v4di)(__m256i)(V1), \
- (__v4di)_mm256_castsi128_si256((__m128i)(V2)), \
- (((M) & 1) ? 0 : 4), \
- (((M) & 1) ? 1 : 5), \
- (((M) & 1) ? 4 : 2), \
- (((M) & 1) ? 5 : 3) );})
+#define _mm256_insertf128_si256(V1, V2, M) \
+ (__m256i)__builtin_ia32_vinsertf128_si256((__v8si)(__m256i)(V1), \
+ (__v4si)(__m128i)(V2), (int)(M))
/*
Vector extract.
We use macros rather than inlines because we only want to accept
invocations where the immediate M is a constant expression.
*/
-/// \brief Extracts either the upper or the lower 128 bits from a 256-bit vector
+/// Extracts either the upper or the lower 128 bits from a 256-bit vector
/// of [8 x float], as determined by the immediate integer parameter, and
/// returns the extracted bits as a 128-bit vector of [4 x float].
///
@@ -4793,16 +4707,10 @@ _mm256_zextsi128_si256(__m128i __a)
/// result. \n
/// If bit [0] of \a M is 1, bits [255:128] of \a V are copied to the result.
/// \returns A 128-bit vector of [4 x float] containing the extracted bits.
-#define _mm256_extractf128_ps(V, M) __extension__ ({ \
- (__m128)__builtin_shufflevector( \
- (__v8sf)(__m256)(V), \
- (__v8sf)(_mm256_undefined_ps()), \
- (((M) & 1) ? 4 : 0), \
- (((M) & 1) ? 5 : 1), \
- (((M) & 1) ? 6 : 2), \
- (((M) & 1) ? 7 : 3) );})
-
-/// \brief Extracts either the upper or the lower 128 bits from a 256-bit vector
+#define _mm256_extractf128_ps(V, M) \
+ (__m128)__builtin_ia32_vextractf128_ps256((__v8sf)(__m256)(V), (int)(M))
+
+/// Extracts either the upper or the lower 128 bits from a 256-bit vector
/// of [4 x double], as determined by the immediate integer parameter, and
/// returns the extracted bits as a 128-bit vector of [2 x double].
///
@@ -4823,14 +4731,10 @@ _mm256_zextsi128_si256(__m128i __a)
/// result. \n
/// If bit [0] of \a M is 1, bits [255:128] of \a V are copied to the result.
/// \returns A 128-bit vector of [2 x double] containing the extracted bits.
-#define _mm256_extractf128_pd(V, M) __extension__ ({ \
- (__m128d)__builtin_shufflevector( \
- (__v4df)(__m256d)(V), \
- (__v4df)(_mm256_undefined_pd()), \
- (((M) & 1) ? 2 : 0), \
- (((M) & 1) ? 3 : 1) );})
-
-/// \brief Extracts either the upper or the lower 128 bits from a 256-bit
+#define _mm256_extractf128_pd(V, M) \
+ (__m128d)__builtin_ia32_vextractf128_pd256((__v4df)(__m256d)(V), (int)(M))
+
+/// Extracts either the upper or the lower 128 bits from a 256-bit
/// integer vector, as determined by the immediate integer parameter, and
/// returns the extracted bits as a 128-bit integer vector.
///
@@ -4851,15 +4755,11 @@ _mm256_zextsi128_si256(__m128i __a)
/// result. \n
/// If bit [0] of \a M is 1, bits [255:128] of \a V are copied to the result.
/// \returns A 128-bit integer vector containing the extracted bits.
-#define _mm256_extractf128_si256(V, M) __extension__ ({ \
- (__m128i)__builtin_shufflevector( \
- (__v4di)(__m256i)(V), \
- (__v4di)(_mm256_undefined_si256()), \
- (((M) & 1) ? 2 : 0), \
- (((M) & 1) ? 3 : 1) );})
+#define _mm256_extractf128_si256(V, M) \
+ (__m128i)__builtin_ia32_vextractf128_si256((__v8si)(__m256i)(V), (int)(M))
/* SIMD load ops (unaligned) */
-/// \brief Loads two 128-bit floating-point vectors of [4 x float] from
+/// Loads two 128-bit floating-point vectors of [4 x float] from
/// unaligned memory locations and constructs a 256-bit floating-point vector
/// of [8 x float] by concatenating the two 128-bit vectors.
///
@@ -4887,7 +4787,7 @@ _mm256_loadu2_m128(float const *__addr_hi, float const *__addr_lo)
return _mm256_insertf128_ps(__v256, _mm_loadu_ps(__addr_hi), 1);
}
-/// \brief Loads two 128-bit floating-point vectors of [2 x double] from
+/// Loads two 128-bit floating-point vectors of [2 x double] from
/// unaligned memory locations and constructs a 256-bit floating-point vector
/// of [4 x double] by concatenating the two 128-bit vectors.
///
@@ -4915,7 +4815,7 @@ _mm256_loadu2_m128d(double const *__addr_hi, double const *__addr_lo)
return _mm256_insertf128_pd(__v256, _mm_loadu_pd(__addr_hi), 1);
}
-/// \brief Loads two 128-bit integer vectors from unaligned memory locations and
+/// Loads two 128-bit integer vectors from unaligned memory locations and
/// constructs a 256-bit integer vector by concatenating the two 128-bit
/// vectors.
///
@@ -4941,7 +4841,7 @@ _mm256_loadu2_m128i(__m128i const *__addr_hi, __m128i const *__addr_lo)
}
/* SIMD store ops (unaligned) */
-/// \brief Stores the upper and lower 128 bits of a 256-bit floating-point
+/// Stores the upper and lower 128 bits of a 256-bit floating-point
/// vector of [8 x float] into two different unaligned memory locations.
///
/// \headerfile <x86intrin.h>
@@ -4970,7 +4870,7 @@ _mm256_storeu2_m128(float *__addr_hi, float *__addr_lo, __m256 __a)
_mm_storeu_ps(__addr_hi, __v128);
}
-/// \brief Stores the upper and lower 128 bits of a 256-bit floating-point
+/// Stores the upper and lower 128 bits of a 256-bit floating-point
/// vector of [4 x double] into two different unaligned memory locations.
///
/// \headerfile <x86intrin.h>
@@ -4999,7 +4899,7 @@ _mm256_storeu2_m128d(double *__addr_hi, double *__addr_lo, __m256d __a)
_mm_storeu_pd(__addr_hi, __v128);
}
-/// \brief Stores the upper and lower 128 bits of a 256-bit integer vector into
+/// Stores the upper and lower 128 bits of a 256-bit integer vector into
/// two different unaligned memory locations.
///
/// \headerfile <x86intrin.h>
@@ -5028,7 +4928,7 @@ _mm256_storeu2_m128i(__m128i *__addr_hi, __m128i *__addr_lo, __m256i __a)
_mm_storeu_si128(__addr_hi, __v128);
}
-/// \brief Constructs a 256-bit floating-point vector of [8 x float] by
+/// Constructs a 256-bit floating-point vector of [8 x float] by
/// concatenating two 128-bit floating-point vectors of [4 x float].
///
/// \headerfile <x86intrin.h>
@@ -5049,7 +4949,7 @@ _mm256_set_m128 (__m128 __hi, __m128 __lo)
return (__m256) __builtin_shufflevector((__v4sf)__lo, (__v4sf)__hi, 0, 1, 2, 3, 4, 5, 6, 7);
}
-/// \brief Constructs a 256-bit floating-point vector of [4 x double] by
+/// Constructs a 256-bit floating-point vector of [4 x double] by
/// concatenating two 128-bit floating-point vectors of [2 x double].
///
/// \headerfile <x86intrin.h>
@@ -5067,10 +4967,10 @@ _mm256_set_m128 (__m128 __hi, __m128 __lo)
static __inline __m256d __DEFAULT_FN_ATTRS
_mm256_set_m128d (__m128d __hi, __m128d __lo)
{
- return (__m256d)_mm256_set_m128((__m128)__hi, (__m128)__lo);
+ return (__m256d) __builtin_shufflevector((__v2df)__lo, (__v2df)__hi, 0, 1, 2, 3);
}
-/// \brief Constructs a 256-bit integer vector by concatenating two 128-bit
+/// Constructs a 256-bit integer vector by concatenating two 128-bit
/// integer vectors.
///
/// \headerfile <x86intrin.h>
@@ -5087,10 +4987,10 @@ _mm256_set_m128d (__m128d __hi, __m128d __lo)
static __inline __m256i __DEFAULT_FN_ATTRS
_mm256_set_m128i (__m128i __hi, __m128i __lo)
{
- return (__m256i)_mm256_set_m128((__m128)__hi, (__m128)__lo);
+ return (__m256i) __builtin_shufflevector((__v2di)__lo, (__v2di)__hi, 0, 1, 2, 3);
}
-/// \brief Constructs a 256-bit floating-point vector of [8 x float] by
+/// Constructs a 256-bit floating-point vector of [8 x float] by
/// concatenating two 128-bit floating-point vectors of [4 x float]. This is
/// similar to _mm256_set_m128, but the order of the input parameters is
/// swapped.
@@ -5113,7 +5013,7 @@ _mm256_setr_m128 (__m128 __lo, __m128 __hi)
return _mm256_set_m128(__hi, __lo);
}
-/// \brief Constructs a 256-bit floating-point vector of [4 x double] by
+/// Constructs a 256-bit floating-point vector of [4 x double] by
/// concatenating two 128-bit floating-point vectors of [2 x double]. This is
/// similar to _mm256_set_m128d, but the order of the input parameters is
/// swapped.
@@ -5133,10 +5033,10 @@ _mm256_setr_m128 (__m128 __lo, __m128 __hi)
static __inline __m256d __DEFAULT_FN_ATTRS
_mm256_setr_m128d (__m128d __lo, __m128d __hi)
{
- return (__m256d)_mm256_set_m128((__m128)__hi, (__m128)__lo);
+ return (__m256d)_mm256_set_m128d(__hi, __lo);
}
-/// \brief Constructs a 256-bit integer vector by concatenating two 128-bit
+/// Constructs a 256-bit integer vector by concatenating two 128-bit
/// integer vectors. This is similar to _mm256_set_m128i, but the order of
/// the input parameters is swapped.
///
@@ -5154,9 +5054,10 @@ _mm256_setr_m128d (__m128d __lo, __m128d __hi)
static __inline __m256i __DEFAULT_FN_ATTRS
_mm256_setr_m128i (__m128i __lo, __m128i __hi)
{
- return (__m256i)_mm256_set_m128((__m128)__hi, (__m128)__lo);
+ return (__m256i)_mm256_set_m128i(__hi, __lo);
}
#undef __DEFAULT_FN_ATTRS
+#undef __DEFAULT_FN_ATTRS128
#endif /* __AVXINTRIN_H */
diff --git a/lib/Headers/bmiintrin.h b/lib/Headers/bmiintrin.h
index e812a1632b91..d03bef442a28 100644
--- a/lib/Headers/bmiintrin.h
+++ b/lib/Headers/bmiintrin.h
@@ -49,7 +49,7 @@
to use it as a potentially faster version of BSF. */
#define __RELAXED_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
-/// \brief Counts the number of trailing zero bits in the operand.
+/// Counts the number of trailing zero bits in the operand.
///
/// \headerfile <x86intrin.h>
///
@@ -65,7 +65,7 @@ __tzcnt_u16(unsigned short __X)
return __X ? __builtin_ctzs(__X) : 16;
}
-/// \brief Performs a bitwise AND of the second operand with the one's
+/// Performs a bitwise AND of the second operand with the one's
/// complement of the first operand.
///
/// \headerfile <x86intrin.h>
@@ -85,7 +85,7 @@ __andn_u32(unsigned int __X, unsigned int __Y)
}
/* AMD-specified, double-leading-underscore version of BEXTR */
-/// \brief Extracts the specified bits from the first operand and returns them
+/// Extracts the specified bits from the first operand and returns them
/// in the least significant bits of the result.
///
/// \headerfile <x86intrin.h>
@@ -100,6 +100,7 @@ __andn_u32(unsigned int __X, unsigned int __Y)
/// number of bits to be extracted.
/// \returns An unsigned integer whose least significant bits contain the
/// extracted bits.
+/// \see _bextr_u32
static __inline__ unsigned int __DEFAULT_FN_ATTRS
__bextr_u32(unsigned int __X, unsigned int __Y)
{
@@ -107,7 +108,7 @@ __bextr_u32(unsigned int __X, unsigned int __Y)
}
/* Intel-specified, single-leading-underscore version of BEXTR */
-/// \brief Extracts the specified bits from the first operand and returns them
+/// Extracts the specified bits from the first operand and returns them
/// in the least significant bits of the result.
///
/// \headerfile <x86intrin.h>
@@ -124,13 +125,14 @@ __bextr_u32(unsigned int __X, unsigned int __Y)
/// Bits [7:0] specify the number of bits.
/// \returns An unsigned integer whose least significant bits contain the
/// extracted bits.
+/// \see __bextr_u32
static __inline__ unsigned int __DEFAULT_FN_ATTRS
_bextr_u32(unsigned int __X, unsigned int __Y, unsigned int __Z)
{
return __builtin_ia32_bextr_u32 (__X, ((__Y & 0xff) | ((__Z & 0xff) << 8)));
}
-/// \brief Clears all bits in the source except for the least significant bit
+/// Clears all bits in the source except for the least significant bit
/// containing a value of 1 and returns the result.
///
/// \headerfile <x86intrin.h>
@@ -147,7 +149,7 @@ __blsi_u32(unsigned int __X)
return __X & -__X;
}
-/// \brief Creates a mask whose bits are set to 1, using bit 0 up to and
+/// Creates a mask whose bits are set to 1, using bit 0 up to and
/// including the least significant bit that is set to 1 in the source
/// operand and returns the result.
///
@@ -164,7 +166,7 @@ __blsmsk_u32(unsigned int __X)
return __X ^ (__X - 1);
}
-/// \brief Clears the least significant bit that is set to 1 in the source
+/// Clears the least significant bit that is set to 1 in the source
/// operand and returns the result.
///
/// \headerfile <x86intrin.h>
@@ -181,7 +183,7 @@ __blsr_u32(unsigned int __X)
return __X & (__X - 1);
}
-/// \brief Counts the number of trailing zero bits in the operand.
+/// Counts the number of trailing zero bits in the operand.
///
/// \headerfile <x86intrin.h>
///
@@ -197,7 +199,7 @@ __tzcnt_u32(unsigned int __X)
return __X ? __builtin_ctz(__X) : 32;
}
-/// \brief Counts the number of trailing zero bits in the operand.
+/// Counts the number of trailing zero bits in the operand.
///
/// \headerfile <x86intrin.h>
///
@@ -226,7 +228,7 @@ _mm_tzcnt_32(unsigned int __X)
#define _tzcnt_u64(a) (__tzcnt_u64((a)))
-/// \brief Performs a bitwise AND of the second operand with the one's
+/// Performs a bitwise AND of the second operand with the one's
/// complement of the first operand.
///
/// \headerfile <x86intrin.h>
@@ -246,7 +248,7 @@ __andn_u64 (unsigned long long __X, unsigned long long __Y)
}
/* AMD-specified, double-leading-underscore version of BEXTR */
-/// \brief Extracts the specified bits from the first operand and returns them
+/// Extracts the specified bits from the first operand and returns them
/// in the least significant bits of the result.
///
/// \headerfile <x86intrin.h>
@@ -261,6 +263,7 @@ __andn_u64 (unsigned long long __X, unsigned long long __Y)
/// the number of bits to be extracted.
/// \returns An unsigned 64-bit integer whose least significant bits contain the
/// extracted bits.
+/// \see _bextr_u64
static __inline__ unsigned long long __DEFAULT_FN_ATTRS
__bextr_u64(unsigned long long __X, unsigned long long __Y)
{
@@ -268,7 +271,7 @@ __bextr_u64(unsigned long long __X, unsigned long long __Y)
}
/* Intel-specified, single-leading-underscore version of BEXTR */
-/// \brief Extracts the specified bits from the first operand and returns them
+/// Extracts the specified bits from the first operand and returns them
/// in the least significant bits of the result.
///
/// \headerfile <x86intrin.h>
@@ -285,13 +288,14 @@ __bextr_u64(unsigned long long __X, unsigned long long __Y)
/// Bits [7:0] specify the number of bits.
/// \returns An unsigned 64-bit integer whose least significant bits contain the
/// extracted bits.
+/// \see __bextr_u64
static __inline__ unsigned long long __DEFAULT_FN_ATTRS
_bextr_u64(unsigned long long __X, unsigned int __Y, unsigned int __Z)
{
return __builtin_ia32_bextr_u64 (__X, ((__Y & 0xff) | ((__Z & 0xff) << 8)));
}
-/// \brief Clears all bits in the source except for the least significant bit
+/// Clears all bits in the source except for the least significant bit
/// containing a value of 1 and returns the result.
///
/// \headerfile <x86intrin.h>
@@ -308,7 +312,7 @@ __blsi_u64(unsigned long long __X)
return __X & -__X;
}
-/// \brief Creates a mask whose bits are set to 1, using bit 0 up to and
+/// Creates a mask whose bits are set to 1, using bit 0 up to and
/// including the least significant bit that is set to 1 in the source
/// operand and returns the result.
///
@@ -325,7 +329,7 @@ __blsmsk_u64(unsigned long long __X)
return __X ^ (__X - 1);
}
-/// \brief Clears the least significant bit that is set to 1 in the source
+/// Clears the least significant bit that is set to 1 in the source
/// operand and returns the result.
///
/// \headerfile <x86intrin.h>
@@ -342,7 +346,7 @@ __blsr_u64(unsigned long long __X)
return __X & (__X - 1);
}
-/// \brief Counts the number of trailing zero bits in the operand.
+/// Counts the number of trailing zero bits in the operand.
///
/// \headerfile <x86intrin.h>
///
@@ -358,7 +362,7 @@ __tzcnt_u64(unsigned long long __X)
return __X ? __builtin_ctzll(__X) : 64;
}
-/// \brief Counts the number of trailing zero bits in the operand.
+/// Counts the number of trailing zero bits in the operand.
///
/// \headerfile <x86intrin.h>
///
diff --git a/lib/Headers/cetintrin.h b/lib/Headers/cetintrin.h
index 1256a3f63a16..120c95424da1 100644
--- a/lib/Headers/cetintrin.h
+++ b/lib/Headers/cetintrin.h
@@ -1,4 +1,4 @@
-/*===---- cetintrin.h - CET intrinsic ------------------------------------===
+/*===---- cetintrin.h - CET intrinsic --------------------------------------===
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
@@ -42,6 +42,16 @@ static __inline__ void __DEFAULT_FN_ATTRS _incsspq(unsigned long long __a) {
}
#endif /* __x86_64__ */
+#ifdef __x86_64__
+static __inline__ void __DEFAULT_FN_ATTRS _inc_ssp(unsigned int __a) {
+ __builtin_ia32_incsspq(__a);
+}
+#else /* __x86_64__ */
+static __inline__ void __DEFAULT_FN_ATTRS _inc_ssp(unsigned int __a) {
+ __builtin_ia32_incsspd((int)__a);
+}
+#endif /* __x86_64__ */
+
static __inline__ unsigned int __DEFAULT_FN_ATTRS _rdsspd(unsigned int __a) {
return __builtin_ia32_rdsspd(__a);
}
@@ -52,6 +62,16 @@ static __inline__ unsigned long long __DEFAULT_FN_ATTRS _rdsspq(unsigned long lo
}
#endif /* __x86_64__ */
+#ifdef __x86_64__
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS _get_ssp(void) {
+ return __builtin_ia32_rdsspq(0);
+}
+#else /* __x86_64__ */
+static __inline__ unsigned int __DEFAULT_FN_ATTRS _get_ssp(void) {
+ return __builtin_ia32_rdsspd(0);
+}
+#endif /* __x86_64__ */
+
static __inline__ void __DEFAULT_FN_ATTRS _saveprevssp() {
__builtin_ia32_saveprevssp();
}
diff --git a/lib/Headers/cldemoteintrin.h b/lib/Headers/cldemoteintrin.h
new file mode 100644
index 000000000000..fa78148ebf02
--- /dev/null
+++ b/lib/Headers/cldemoteintrin.h
@@ -0,0 +1,42 @@
+/*===---- cldemoteintrin.h - CLDEMOTE intrinsic ----------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H
+#error "Never use <cldemoteintrin.h> directly; include <x86intrin.h> instead."
+#endif
+
+#ifndef __CLDEMOTEINTRIN_H
+#define __CLDEMOTEINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS \
+ __attribute__((__always_inline__, __nodebug__, __target__("cldemote")))
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_cldemote(const void * __P) {
+ __builtin_ia32_cldemote(__P);
+}
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif
diff --git a/lib/Headers/clflushoptintrin.h b/lib/Headers/clflushoptintrin.h
index f1f133023441..79bb4589fc75 100644
--- a/lib/Headers/clflushoptintrin.h
+++ b/lib/Headers/clflushoptintrin.h
@@ -1,4 +1,4 @@
-/*===---- clflushoptintrin.h - CLFLUSHOPT intrinsic ------------------------------------===
+/*===---- clflushoptintrin.h - CLFLUSHOPT intrinsic ------------------------===
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
diff --git a/lib/Headers/clwbintrin.h b/lib/Headers/clwbintrin.h
index 2594a6c38756..c09286ba6748 100644
--- a/lib/Headers/clwbintrin.h
+++ b/lib/Headers/clwbintrin.h
@@ -31,7 +31,7 @@
/* Define the default attributes for the functions in this file. */
#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("clwb")))
-/// \brief Writes back to memory the cache line (if modified) that contains the
+/// Writes back to memory the cache line (if modified) that contains the
/// linear address specified in \a __p from any level of the cache hierarchy in
/// the cache coherence domain
///
diff --git a/lib/Headers/clzerointrin.h b/lib/Headers/clzerointrin.h
index ed7478ff87ea..07628acd8005 100644
--- a/lib/Headers/clzerointrin.h
+++ b/lib/Headers/clzerointrin.h
@@ -20,18 +20,18 @@
*
*===-----------------------------------------------------------------------===
*/
-#ifndef __X86INTRIN_H
+#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H
#error "Never use <clzerointrin.h> directly; include <x86intrin.h> instead."
#endif
-#ifndef _CLZEROINTRIN_H
-#define _CLZEROINTRIN_H
+#ifndef __CLZEROINTRIN_H
+#define __CLZEROINTRIN_H
/* Define the default attributes for the functions in this file. */
#define __DEFAULT_FN_ATTRS \
__attribute__((__always_inline__, __nodebug__, __target__("clzero")))
-/// \brief Loads the cache line address and zero's out the cacheline
+/// Loads the cache line address and zero's out the cacheline
///
/// \headerfile <clzerointrin.h>
///
@@ -47,4 +47,4 @@ _mm_clzero (void * __line)
#undef __DEFAULT_FN_ATTRS
-#endif /* _CLZEROINTRIN_H */
+#endif /* __CLZEROINTRIN_H */
diff --git a/lib/Headers/cpuid.h b/lib/Headers/cpuid.h
index 3ae90de0b91f..fce6af52dd3f 100644
--- a/lib/Headers/cpuid.h
+++ b/lib/Headers/cpuid.h
@@ -156,6 +156,7 @@
#define bit_SMEP 0x00000080
#define bit_BMI2 0x00000100
#define bit_ENH_MOVSB 0x00000200
+#define bit_INVPCID 0x00000400
#define bit_RTM 0x00000800
#define bit_MPX 0x00004000
#define bit_AVX512F 0x00010000
@@ -166,7 +167,7 @@
#define bit_CLFLUSHOPT 0x00800000
#define bit_CLWB 0x01000000
#define bit_AVX512PF 0x04000000
-#define bit_AVX51SER 0x08000000
+#define bit_AVX512ER 0x08000000
#define bit_AVX512CD 0x10000000
#define bit_SHA 0x20000000
#define bit_AVX512BW 0x40000000
@@ -177,6 +178,7 @@
#define bit_AVX512VBMI 0x00000002
#define bit_PKU 0x00000004
#define bit_OSPKE 0x00000010
+#define bit_WAITPKG 0x00000020
#define bit_AVX512VBMI2 0x00000040
#define bit_SHSTK 0x00000080
#define bit_GFNI 0x00000100
@@ -186,10 +188,14 @@
#define bit_AVX512BITALG 0x00001000
#define bit_AVX512VPOPCNTDQ 0x00004000
#define bit_RDPID 0x00400000
+#define bit_CLDEMOTE 0x02000000
+#define bit_MOVDIRI 0x08000000
+#define bit_MOVDIR64B 0x10000000
/* Features in %edx for leaf 7 sub-leaf 0 */
#define bit_AVX5124VNNIW 0x00000004
#define bit_AVX5124FMAPS 0x00000008
+#define bit_PCONFIG 0x00040000
#define bit_IBT 0x00100000
/* Features in %eax for leaf 13 sub-leaf 1 */
@@ -197,6 +203,9 @@
#define bit_XSAVEC 0x00000002
#define bit_XSAVES 0x00000008
+/* Features in %eax for leaf 0x14 sub-leaf 0 */
+#define bit_PTWRITE 0x00000010
+
/* Features in %ecx for leaf 0x80000001 */
#define bit_LAHF_LM 0x00000001
#define bit_ABM 0x00000020
@@ -215,8 +224,9 @@
#define bit_3DNOWP 0x40000000
#define bit_3DNOW 0x80000000
-/* Features in %ebx for leaf 0x80000001 */
+/* Features in %ebx for leaf 0x80000008 */
#define bit_CLZERO 0x00000001
+#define bit_WBNOINVD 0x00000200
#if __i386__
diff --git a/lib/Headers/cuda_wrappers/algorithm b/lib/Headers/cuda_wrappers/algorithm
index cedd70762c48..01af18360d8d 100644
--- a/lib/Headers/cuda_wrappers/algorithm
+++ b/lib/Headers/cuda_wrappers/algorithm
@@ -24,28 +24,36 @@
#ifndef __CLANG_CUDA_WRAPPERS_ALGORITHM
#define __CLANG_CUDA_WRAPPERS_ALGORITHM
-// This header defines __device__ overloads of std::min/max, but only if we're
-// <= C++11. In C++14, these functions are constexpr, and so are implicitly
-// __host__ __device__.
+// This header defines __device__ overloads of std::min/max.
//
-// We don't support the initializer_list overloads because
-// initializer_list::begin() and end() are not __host__ __device__ functions.
+// Ideally we'd declare these functions only if we're <= C++11. In C++14,
+// these functions are constexpr, and so are implicitly __host__ __device__.
//
-// When compiling in C++14 mode, we could force std::min/max to have different
-// implementations for host and device, by declaring the device overloads
-// before the constexpr overloads appear. We choose not to do this because
-
-// a) why write our own implementation when we can use one from the standard
-// library? and
-// b) libstdc++ is evil and declares min/max inside a header that is included
-// *before* we include <algorithm>. So we'd have to unconditionally
-// declare our __device__ overloads of min/max, but that would pollute
-// things for people who choose not to include <algorithm>.
+// However, the compiler being in C++14 mode does not imply that the standard
+// library supports C++14. There is no macro we can test to check that the
+// stdlib has constexpr std::min/max. Thus we have to unconditionally define
+// our device overloads.
+//
+// A host+device function cannot be overloaded, and a constexpr function
+// implicitly become host device if there's no explicitly host or device
+// overload preceding it. So the simple thing to do would be to declare our
+// device min/max overloads, and then #include_next <algorithm>. This way our
+// device overloads would come first, and so if we have a C++14 stdlib, its
+// min/max won't become host+device and conflict with our device overloads.
+//
+// But that also doesn't work. libstdc++ is evil and declares std::min/max in
+// an internal header that is included *before* <algorithm>. Thus by the time
+// we're inside of this file, std::min/max may already have been declared, and
+// thus we can't prevent them from becoming host+device if they're constexpr.
+//
+// Therefore we perpetrate the following hack: We mark our __device__ overloads
+// with __attribute__((enable_if(true, ""))). This causes the signature of the
+// function to change without changing anything else about it. (Except that
+// overload resolution will prefer it over the __host__ __device__ version
+// rather than considering them equally good).
#include_next <algorithm>
-#if __cplusplus <= 201103L
-
// We need to define these overloads in exactly the namespace our standard
// library uses (including the right inline namespace), otherwise they won't be
// picked up by other functions in the standard library (e.g. functions in
@@ -59,30 +67,43 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
#endif
#endif
+#pragma push_macro("_CPP14_CONSTEXPR")
+#if __cplusplus >= 201402L
+#define _CPP14_CONSTEXPR constexpr
+#else
+#define _CPP14_CONSTEXPR
+#endif
+
template <class __T, class __Cmp>
-inline __device__ const __T &
+__attribute__((enable_if(true, "")))
+inline _CPP14_CONSTEXPR __host__ __device__ const __T &
max(const __T &__a, const __T &__b, __Cmp __cmp) {
return __cmp(__a, __b) ? __b : __a;
}
template <class __T>
-inline __device__ const __T &
+__attribute__((enable_if(true, "")))
+inline _CPP14_CONSTEXPR __host__ __device__ const __T &
max(const __T &__a, const __T &__b) {
return __a < __b ? __b : __a;
}
template <class __T, class __Cmp>
-inline __device__ const __T &
+__attribute__((enable_if(true, "")))
+inline _CPP14_CONSTEXPR __host__ __device__ const __T &
min(const __T &__a, const __T &__b, __Cmp __cmp) {
return __cmp(__b, __a) ? __b : __a;
}
template <class __T>
-inline __device__ const __T &
+__attribute__((enable_if(true, "")))
+inline _CPP14_CONSTEXPR __host__ __device__ const __T &
min(const __T &__a, const __T &__b) {
return __a < __b ? __a : __b;
}
+#pragma pop_macro("_CPP14_CONSTEXPR")
+
#ifdef _LIBCPP_END_NAMESPACE_STD
_LIBCPP_END_NAMESPACE_STD
#else
@@ -92,5 +113,4 @@ _GLIBCXX_END_NAMESPACE_VERSION
} // namespace std
#endif
-#endif // __cplusplus <= 201103L
#endif // __CLANG_CUDA_WRAPPERS_ALGORITHM
diff --git a/lib/Headers/emmintrin.h b/lib/Headers/emmintrin.h
index 3372508a7f81..f0ea7cd05c63 100644
--- a/lib/Headers/emmintrin.h
+++ b/lib/Headers/emmintrin.h
@@ -44,12 +44,11 @@ typedef unsigned char __v16qu __attribute__((__vector_size__(16)));
* appear in the interface though. */
typedef signed char __v16qs __attribute__((__vector_size__(16)));
-#include <f16cintrin.h>
-
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sse2")))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sse2"), __min_vector_width__(128)))
+#define __DEFAULT_FN_ATTRS_MMX __attribute__((__always_inline__, __nodebug__, __target__("mmx,sse2"), __min_vector_width__(64)))
-/// \brief Adds lower double-precision values in both operands and returns the
+/// Adds lower double-precision values in both operands and returns the
/// sum in the lower 64 bits of the result. The upper 64 bits of the result
/// are copied from the upper double-precision value of the first operand.
///
@@ -71,7 +70,7 @@ _mm_add_sd(__m128d __a, __m128d __b)
return __a;
}
-/// \brief Adds two 128-bit vectors of [2 x double].
+/// Adds two 128-bit vectors of [2 x double].
///
/// \headerfile <x86intrin.h>
///
@@ -89,7 +88,7 @@ _mm_add_pd(__m128d __a, __m128d __b)
return (__m128d)((__v2df)__a + (__v2df)__b);
}
-/// \brief Subtracts the lower double-precision value of the second operand
+/// Subtracts the lower double-precision value of the second operand
/// from the lower double-precision value of the first operand and returns
/// the difference in the lower 64 bits of the result. The upper 64 bits of
/// the result are copied from the upper double-precision value of the first
@@ -113,7 +112,7 @@ _mm_sub_sd(__m128d __a, __m128d __b)
return __a;
}
-/// \brief Subtracts two 128-bit vectors of [2 x double].
+/// Subtracts two 128-bit vectors of [2 x double].
///
/// \headerfile <x86intrin.h>
///
@@ -131,7 +130,7 @@ _mm_sub_pd(__m128d __a, __m128d __b)
return (__m128d)((__v2df)__a - (__v2df)__b);
}
-/// \brief Multiplies lower double-precision values in both operands and returns
+/// Multiplies lower double-precision values in both operands and returns
/// the product in the lower 64 bits of the result. The upper 64 bits of the
/// result are copied from the upper double-precision value of the first
/// operand.
@@ -154,7 +153,7 @@ _mm_mul_sd(__m128d __a, __m128d __b)
return __a;
}
-/// \brief Multiplies two 128-bit vectors of [2 x double].
+/// Multiplies two 128-bit vectors of [2 x double].
///
/// \headerfile <x86intrin.h>
///
@@ -172,7 +171,7 @@ _mm_mul_pd(__m128d __a, __m128d __b)
return (__m128d)((__v2df)__a * (__v2df)__b);
}
-/// \brief Divides the lower double-precision value of the first operand by the
+/// Divides the lower double-precision value of the first operand by the
/// lower double-precision value of the second operand and returns the
/// quotient in the lower 64 bits of the result. The upper 64 bits of the
/// result are copied from the upper double-precision value of the first
@@ -196,7 +195,7 @@ _mm_div_sd(__m128d __a, __m128d __b)
return __a;
}
-/// \brief Performs an element-by-element division of two 128-bit vectors of
+/// Performs an element-by-element division of two 128-bit vectors of
/// [2 x double].
///
/// \headerfile <x86intrin.h>
@@ -215,10 +214,10 @@ _mm_div_pd(__m128d __a, __m128d __b)
return (__m128d)((__v2df)__a / (__v2df)__b);
}
-/// \brief Calculates the square root of the lower double-precision value of
+/// Calculates the square root of the lower double-precision value of
/// the second operand and returns it in the lower 64 bits of the result.
-/// The upper 64 bits of the result are copied from the upper double-
-/// precision value of the first operand.
+/// The upper 64 bits of the result are copied from the upper
+/// double-precision value of the first operand.
///
/// \headerfile <x86intrin.h>
///
@@ -238,10 +237,10 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_sqrt_sd(__m128d __a, __m128d __b)
{
__m128d __c = __builtin_ia32_sqrtsd((__v2df)__b);
- return (__m128d) { __c[0], __a[1] };
+ return __extension__ (__m128d) { __c[0], __a[1] };
}
-/// \brief Calculates the square root of the each of two values stored in a
+/// Calculates the square root of the each of two values stored in a
/// 128-bit vector of [2 x double].
///
/// \headerfile <x86intrin.h>
@@ -258,10 +257,10 @@ _mm_sqrt_pd(__m128d __a)
return __builtin_ia32_sqrtpd((__v2df)__a);
}
-/// \brief Compares lower 64-bit double-precision values of both operands, and
+/// Compares lower 64-bit double-precision values of both operands, and
/// returns the lesser of the pair of values in the lower 64-bits of the
-/// result. The upper 64 bits of the result are copied from the upper double-
-/// precision value of the first operand.
+/// result. The upper 64 bits of the result are copied from the upper
+/// double-precision value of the first operand.
///
/// \headerfile <x86intrin.h>
///
@@ -282,7 +281,7 @@ _mm_min_sd(__m128d __a, __m128d __b)
return __builtin_ia32_minsd((__v2df)__a, (__v2df)__b);
}
-/// \brief Performs element-by-element comparison of the two 128-bit vectors of
+/// Performs element-by-element comparison of the two 128-bit vectors of
/// [2 x double] and returns the vector containing the lesser of each pair of
/// values.
///
@@ -302,10 +301,10 @@ _mm_min_pd(__m128d __a, __m128d __b)
return __builtin_ia32_minpd((__v2df)__a, (__v2df)__b);
}
-/// \brief Compares lower 64-bit double-precision values of both operands, and
+/// Compares lower 64-bit double-precision values of both operands, and
/// returns the greater of the pair of values in the lower 64-bits of the
-/// result. The upper 64 bits of the result are copied from the upper double-
-/// precision value of the first operand.
+/// result. The upper 64 bits of the result are copied from the upper
+/// double-precision value of the first operand.
///
/// \headerfile <x86intrin.h>
///
@@ -326,7 +325,7 @@ _mm_max_sd(__m128d __a, __m128d __b)
return __builtin_ia32_maxsd((__v2df)__a, (__v2df)__b);
}
-/// \brief Performs element-by-element comparison of the two 128-bit vectors of
+/// Performs element-by-element comparison of the two 128-bit vectors of
/// [2 x double] and returns the vector containing the greater of each pair
/// of values.
///
@@ -346,7 +345,7 @@ _mm_max_pd(__m128d __a, __m128d __b)
return __builtin_ia32_maxpd((__v2df)__a, (__v2df)__b);
}
-/// \brief Performs a bitwise AND of two 128-bit vectors of [2 x double].
+/// Performs a bitwise AND of two 128-bit vectors of [2 x double].
///
/// \headerfile <x86intrin.h>
///
@@ -364,7 +363,7 @@ _mm_and_pd(__m128d __a, __m128d __b)
return (__m128d)((__v2du)__a & (__v2du)__b);
}
-/// \brief Performs a bitwise AND of two 128-bit vectors of [2 x double], using
+/// Performs a bitwise AND of two 128-bit vectors of [2 x double], using
/// the one's complement of the values contained in the first source operand.
///
/// \headerfile <x86intrin.h>
@@ -385,7 +384,7 @@ _mm_andnot_pd(__m128d __a, __m128d __b)
return (__m128d)(~(__v2du)__a & (__v2du)__b);
}
-/// \brief Performs a bitwise OR of two 128-bit vectors of [2 x double].
+/// Performs a bitwise OR of two 128-bit vectors of [2 x double].
///
/// \headerfile <x86intrin.h>
///
@@ -403,7 +402,7 @@ _mm_or_pd(__m128d __a, __m128d __b)
return (__m128d)((__v2du)__a | (__v2du)__b);
}
-/// \brief Performs a bitwise XOR of two 128-bit vectors of [2 x double].
+/// Performs a bitwise XOR of two 128-bit vectors of [2 x double].
///
/// \headerfile <x86intrin.h>
///
@@ -421,9 +420,9 @@ _mm_xor_pd(__m128d __a, __m128d __b)
return (__m128d)((__v2du)__a ^ (__v2du)__b);
}
-/// \brief Compares each of the corresponding double-precision values of the
-/// 128-bit vectors of [2 x double] for equality. Each comparison yields 0h
-/// for false, FFFFFFFFFFFFFFFFh for true.
+/// Compares each of the corresponding double-precision values of the
+/// 128-bit vectors of [2 x double] for equality. Each comparison yields 0x0
+/// for false, 0xFFFFFFFFFFFFFFFF for true.
///
/// \headerfile <x86intrin.h>
///
@@ -440,10 +439,10 @@ _mm_cmpeq_pd(__m128d __a, __m128d __b)
return (__m128d)__builtin_ia32_cmpeqpd((__v2df)__a, (__v2df)__b);
}
-/// \brief Compares each of the corresponding double-precision values of the
+/// Compares each of the corresponding double-precision values of the
/// 128-bit vectors of [2 x double] to determine if the values in the first
/// operand are less than those in the second operand. Each comparison
-/// yields 0h for false, FFFFFFFFFFFFFFFFh for true.
+/// yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
///
/// \headerfile <x86intrin.h>
///
@@ -460,11 +459,11 @@ _mm_cmplt_pd(__m128d __a, __m128d __b)
return (__m128d)__builtin_ia32_cmpltpd((__v2df)__a, (__v2df)__b);
}
-/// \brief Compares each of the corresponding double-precision values of the
+/// Compares each of the corresponding double-precision values of the
/// 128-bit vectors of [2 x double] to determine if the values in the first
/// operand are less than or equal to those in the second operand.
///
-/// Each comparison yields 0h for false, FFFFFFFFFFFFFFFFh for true.
+/// Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
///
/// \headerfile <x86intrin.h>
///
@@ -481,11 +480,11 @@ _mm_cmple_pd(__m128d __a, __m128d __b)
return (__m128d)__builtin_ia32_cmplepd((__v2df)__a, (__v2df)__b);
}
-/// \brief Compares each of the corresponding double-precision values of the
+/// Compares each of the corresponding double-precision values of the
/// 128-bit vectors of [2 x double] to determine if the values in the first
/// operand are greater than those in the second operand.
///
-/// Each comparison yields 0h for false, FFFFFFFFFFFFFFFFh for true.
+/// Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
///
/// \headerfile <x86intrin.h>
///
@@ -502,11 +501,11 @@ _mm_cmpgt_pd(__m128d __a, __m128d __b)
return (__m128d)__builtin_ia32_cmpltpd((__v2df)__b, (__v2df)__a);
}
-/// \brief Compares each of the corresponding double-precision values of the
+/// Compares each of the corresponding double-precision values of the
/// 128-bit vectors of [2 x double] to determine if the values in the first
/// operand are greater than or equal to those in the second operand.
///
-/// Each comparison yields 0h for false, FFFFFFFFFFFFFFFFh for true.
+/// Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
///
/// \headerfile <x86intrin.h>
///
@@ -523,13 +522,13 @@ _mm_cmpge_pd(__m128d __a, __m128d __b)
return (__m128d)__builtin_ia32_cmplepd((__v2df)__b, (__v2df)__a);
}
-/// \brief Compares each of the corresponding double-precision values of the
+/// Compares each of the corresponding double-precision values of the
/// 128-bit vectors of [2 x double] to determine if the values in the first
/// operand are ordered with respect to those in the second operand.
///
/// A pair of double-precision values are "ordered" with respect to each
-/// other if neither value is a NaN. Each comparison yields 0h for false,
-/// FFFFFFFFFFFFFFFFh for true.
+/// other if neither value is a NaN. Each comparison yields 0x0 for false,
+/// 0xFFFFFFFFFFFFFFFF for true.
///
/// \headerfile <x86intrin.h>
///
@@ -546,13 +545,13 @@ _mm_cmpord_pd(__m128d __a, __m128d __b)
return (__m128d)__builtin_ia32_cmpordpd((__v2df)__a, (__v2df)__b);
}
-/// \brief Compares each of the corresponding double-precision values of the
+/// Compares each of the corresponding double-precision values of the
/// 128-bit vectors of [2 x double] to determine if the values in the first
/// operand are unordered with respect to those in the second operand.
///
/// A pair of double-precision values are "unordered" with respect to each
-/// other if one or both values are NaN. Each comparison yields 0h for false,
-/// FFFFFFFFFFFFFFFFh for true.
+/// other if one or both values are NaN. Each comparison yields 0x0 for
+/// false, 0xFFFFFFFFFFFFFFFF for true.
///
/// \headerfile <x86intrin.h>
///
@@ -570,11 +569,11 @@ _mm_cmpunord_pd(__m128d __a, __m128d __b)
return (__m128d)__builtin_ia32_cmpunordpd((__v2df)__a, (__v2df)__b);
}
-/// \brief Compares each of the corresponding double-precision values of the
+/// Compares each of the corresponding double-precision values of the
/// 128-bit vectors of [2 x double] to determine if the values in the first
/// operand are unequal to those in the second operand.
///
-/// Each comparison yields 0h for false, FFFFFFFFFFFFFFFFh for true.
+/// Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
///
/// \headerfile <x86intrin.h>
///
@@ -591,11 +590,11 @@ _mm_cmpneq_pd(__m128d __a, __m128d __b)
return (__m128d)__builtin_ia32_cmpneqpd((__v2df)__a, (__v2df)__b);
}
-/// \brief Compares each of the corresponding double-precision values of the
+/// Compares each of the corresponding double-precision values of the
/// 128-bit vectors of [2 x double] to determine if the values in the first
/// operand are not less than those in the second operand.
///
-/// Each comparison yields 0h for false, FFFFFFFFFFFFFFFFh for true.
+/// Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
///
/// \headerfile <x86intrin.h>
///
@@ -612,11 +611,11 @@ _mm_cmpnlt_pd(__m128d __a, __m128d __b)
return (__m128d)__builtin_ia32_cmpnltpd((__v2df)__a, (__v2df)__b);
}
-/// \brief Compares each of the corresponding double-precision values of the
+/// Compares each of the corresponding double-precision values of the
/// 128-bit vectors of [2 x double] to determine if the values in the first
/// operand are not less than or equal to those in the second operand.
///
-/// Each comparison yields 0h for false, FFFFFFFFFFFFFFFFh for true.
+/// Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
///
/// \headerfile <x86intrin.h>
///
@@ -633,11 +632,11 @@ _mm_cmpnle_pd(__m128d __a, __m128d __b)
return (__m128d)__builtin_ia32_cmpnlepd((__v2df)__a, (__v2df)__b);
}
-/// \brief Compares each of the corresponding double-precision values of the
+/// Compares each of the corresponding double-precision values of the
/// 128-bit vectors of [2 x double] to determine if the values in the first
/// operand are not greater than those in the second operand.
///
-/// Each comparison yields 0h for false, FFFFFFFFFFFFFFFFh for true.
+/// Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
///
/// \headerfile <x86intrin.h>
///
@@ -654,11 +653,11 @@ _mm_cmpngt_pd(__m128d __a, __m128d __b)
return (__m128d)__builtin_ia32_cmpnltpd((__v2df)__b, (__v2df)__a);
}
-/// \brief Compares each of the corresponding double-precision values of the
+/// Compares each of the corresponding double-precision values of the
/// 128-bit vectors of [2 x double] to determine if the values in the first
/// operand are not greater than or equal to those in the second operand.
///
-/// Each comparison yields 0h for false, FFFFFFFFFFFFFFFFh for true.
+/// Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
///
/// \headerfile <x86intrin.h>
///
@@ -675,10 +674,10 @@ _mm_cmpnge_pd(__m128d __a, __m128d __b)
return (__m128d)__builtin_ia32_cmpnlepd((__v2df)__b, (__v2df)__a);
}
-/// \brief Compares the lower double-precision floating-point values in each of
+/// Compares the lower double-precision floating-point values in each of
/// the two 128-bit floating-point vectors of [2 x double] for equality.
///
-/// The comparison yields 0h for false, FFFFFFFFFFFFFFFFh for true.
+/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
///
/// \headerfile <x86intrin.h>
///
@@ -698,12 +697,12 @@ _mm_cmpeq_sd(__m128d __a, __m128d __b)
return (__m128d)__builtin_ia32_cmpeqsd((__v2df)__a, (__v2df)__b);
}
-/// \brief Compares the lower double-precision floating-point values in each of
+/// Compares the lower double-precision floating-point values in each of
/// the two 128-bit floating-point vectors of [2 x double] to determine if
/// the value in the first parameter is less than the corresponding value in
/// the second parameter.
///
-/// The comparison yields 0h for false, FFFFFFFFFFFFFFFFh for true.
+/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
///
/// \headerfile <x86intrin.h>
///
@@ -723,12 +722,12 @@ _mm_cmplt_sd(__m128d __a, __m128d __b)
return (__m128d)__builtin_ia32_cmpltsd((__v2df)__a, (__v2df)__b);
}
-/// \brief Compares the lower double-precision floating-point values in each of
+/// Compares the lower double-precision floating-point values in each of
/// the two 128-bit floating-point vectors of [2 x double] to determine if
/// the value in the first parameter is less than or equal to the
/// corresponding value in the second parameter.
///
-/// The comparison yields 0h for false, FFFFFFFFFFFFFFFFh for true.
+/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
///
/// \headerfile <x86intrin.h>
///
@@ -748,12 +747,12 @@ _mm_cmple_sd(__m128d __a, __m128d __b)
return (__m128d)__builtin_ia32_cmplesd((__v2df)__a, (__v2df)__b);
}
-/// \brief Compares the lower double-precision floating-point values in each of
+/// Compares the lower double-precision floating-point values in each of
/// the two 128-bit floating-point vectors of [2 x double] to determine if
/// the value in the first parameter is greater than the corresponding value
/// in the second parameter.
///
-/// The comparison yields 0h for false, FFFFFFFFFFFFFFFFh for true.
+/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
///
/// \headerfile <x86intrin.h>
///
@@ -771,15 +770,15 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_cmpgt_sd(__m128d __a, __m128d __b)
{
__m128d __c = __builtin_ia32_cmpltsd((__v2df)__b, (__v2df)__a);
- return (__m128d) { __c[0], __a[1] };
+ return __extension__ (__m128d) { __c[0], __a[1] };
}
-/// \brief Compares the lower double-precision floating-point values in each of
+/// Compares the lower double-precision floating-point values in each of
/// the two 128-bit floating-point vectors of [2 x double] to determine if
/// the value in the first parameter is greater than or equal to the
/// corresponding value in the second parameter.
///
-/// The comparison yields 0h for false, FFFFFFFFFFFFFFFFh for true.
+/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
///
/// \headerfile <x86intrin.h>
///
@@ -797,16 +796,16 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_cmpge_sd(__m128d __a, __m128d __b)
{
__m128d __c = __builtin_ia32_cmplesd((__v2df)__b, (__v2df)__a);
- return (__m128d) { __c[0], __a[1] };
+ return __extension__ (__m128d) { __c[0], __a[1] };
}
-/// \brief Compares the lower double-precision floating-point values in each of
+/// Compares the lower double-precision floating-point values in each of
/// the two 128-bit floating-point vectors of [2 x double] to determine if
/// the value in the first parameter is "ordered" with respect to the
/// corresponding value in the second parameter.
///
-/// The comparison yields 0h for false, FFFFFFFFFFFFFFFFh for true. A pair of
-/// double-precision values are "ordered" with respect to each other if
+/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. A pair
+/// of double-precision values are "ordered" with respect to each other if
/// neither value is a NaN.
///
/// \headerfile <x86intrin.h>
@@ -827,14 +826,14 @@ _mm_cmpord_sd(__m128d __a, __m128d __b)
return (__m128d)__builtin_ia32_cmpordsd((__v2df)__a, (__v2df)__b);
}
-/// \brief Compares the lower double-precision floating-point values in each of
+/// Compares the lower double-precision floating-point values in each of
/// the two 128-bit floating-point vectors of [2 x double] to determine if
/// the value in the first parameter is "unordered" with respect to the
/// corresponding value in the second parameter.
///
-/// The comparison yields 0h for false, FFFFFFFFFFFFFFFFh for true. A pair of
-/// double-precision values are "unordered" with respect to each other if one
-/// or both values are NaN.
+/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. A pair
+/// of double-precision values are "unordered" with respect to each other if
+/// one or both values are NaN.
///
/// \headerfile <x86intrin.h>
///
@@ -855,12 +854,12 @@ _mm_cmpunord_sd(__m128d __a, __m128d __b)
return (__m128d)__builtin_ia32_cmpunordsd((__v2df)__a, (__v2df)__b);
}
-/// \brief Compares the lower double-precision floating-point values in each of
+/// Compares the lower double-precision floating-point values in each of
/// the two 128-bit floating-point vectors of [2 x double] to determine if
/// the value in the first parameter is unequal to the corresponding value in
/// the second parameter.
///
-/// The comparison yields 0h for false, FFFFFFFFFFFFFFFFh for true.
+/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
///
/// \headerfile <x86intrin.h>
///
@@ -880,12 +879,12 @@ _mm_cmpneq_sd(__m128d __a, __m128d __b)
return (__m128d)__builtin_ia32_cmpneqsd((__v2df)__a, (__v2df)__b);
}
-/// \brief Compares the lower double-precision floating-point values in each of
+/// Compares the lower double-precision floating-point values in each of
/// the two 128-bit floating-point vectors of [2 x double] to determine if
/// the value in the first parameter is not less than the corresponding
/// value in the second parameter.
///
-/// The comparison yields 0h for false, FFFFFFFFFFFFFFFFh for true.
+/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
///
/// \headerfile <x86intrin.h>
///
@@ -905,12 +904,12 @@ _mm_cmpnlt_sd(__m128d __a, __m128d __b)
return (__m128d)__builtin_ia32_cmpnltsd((__v2df)__a, (__v2df)__b);
}
-/// \brief Compares the lower double-precision floating-point values in each of
+/// Compares the lower double-precision floating-point values in each of
/// the two 128-bit floating-point vectors of [2 x double] to determine if
/// the value in the first parameter is not less than or equal to the
/// corresponding value in the second parameter.
///
-/// The comparison yields 0h for false, FFFFFFFFFFFFFFFFh for true.
+/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
///
/// \headerfile <x86intrin.h>
///
@@ -930,12 +929,12 @@ _mm_cmpnle_sd(__m128d __a, __m128d __b)
return (__m128d)__builtin_ia32_cmpnlesd((__v2df)__a, (__v2df)__b);
}
-/// \brief Compares the lower double-precision floating-point values in each of
+/// Compares the lower double-precision floating-point values in each of
/// the two 128-bit floating-point vectors of [2 x double] to determine if
/// the value in the first parameter is not greater than the corresponding
/// value in the second parameter.
///
-/// The comparison yields 0h for false, FFFFFFFFFFFFFFFFh for true.
+/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
///
/// \headerfile <x86intrin.h>
///
@@ -953,15 +952,15 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_cmpngt_sd(__m128d __a, __m128d __b)
{
__m128d __c = __builtin_ia32_cmpnltsd((__v2df)__b, (__v2df)__a);
- return (__m128d) { __c[0], __a[1] };
+ return __extension__ (__m128d) { __c[0], __a[1] };
}
-/// \brief Compares the lower double-precision floating-point values in each of
+/// Compares the lower double-precision floating-point values in each of
/// the two 128-bit floating-point vectors of [2 x double] to determine if
/// the value in the first parameter is not greater than or equal to the
/// corresponding value in the second parameter.
///
-/// The comparison yields 0h for false, FFFFFFFFFFFFFFFFh for true.
+/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
///
/// \headerfile <x86intrin.h>
///
@@ -979,12 +978,14 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_cmpnge_sd(__m128d __a, __m128d __b)
{
__m128d __c = __builtin_ia32_cmpnlesd((__v2df)__b, (__v2df)__a);
- return (__m128d) { __c[0], __a[1] };
+ return __extension__ (__m128d) { __c[0], __a[1] };
}
-/// \brief Compares the lower double-precision floating-point values in each of
-/// the two 128-bit floating-point vectors of [2 x double] for equality. The
-/// comparison yields 0 for false, 1 for true.
+/// Compares the lower double-precision floating-point values in each of
+/// the two 128-bit floating-point vectors of [2 x double] for equality.
+///
+/// The comparison yields 0 for false, 1 for true. If either of the two
+/// lower double-precision values is NaN, 0 is returned.
///
/// \headerfile <x86intrin.h>
///
@@ -996,19 +997,21 @@ _mm_cmpnge_sd(__m128d __a, __m128d __b)
/// \param __b
/// A 128-bit vector of [2 x double]. The lower double-precision value is
/// compared to the lower double-precision value of \a __a.
-/// \returns An integer containing the comparison results.
+/// \returns An integer containing the comparison results. If either of the two
+/// lower double-precision values is NaN, 0 is returned.
static __inline__ int __DEFAULT_FN_ATTRS
_mm_comieq_sd(__m128d __a, __m128d __b)
{
return __builtin_ia32_comisdeq((__v2df)__a, (__v2df)__b);
}
-/// \brief Compares the lower double-precision floating-point values in each of
+/// Compares the lower double-precision floating-point values in each of
/// the two 128-bit floating-point vectors of [2 x double] to determine if
/// the value in the first parameter is less than the corresponding value in
/// the second parameter.
///
-/// The comparison yields 0 for false, 1 for true.
+/// The comparison yields 0 for false, 1 for true. If either of the two
+/// lower double-precision values is NaN, 0 is returned.
///
/// \headerfile <x86intrin.h>
///
@@ -1020,19 +1023,21 @@ _mm_comieq_sd(__m128d __a, __m128d __b)
/// \param __b
/// A 128-bit vector of [2 x double]. The lower double-precision value is
/// compared to the lower double-precision value of \a __a.
-/// \returns An integer containing the comparison results.
+/// \returns An integer containing the comparison results. If either of the two
+/// lower double-precision values is NaN, 0 is returned.
static __inline__ int __DEFAULT_FN_ATTRS
_mm_comilt_sd(__m128d __a, __m128d __b)
{
return __builtin_ia32_comisdlt((__v2df)__a, (__v2df)__b);
}
-/// \brief Compares the lower double-precision floating-point values in each of
+/// Compares the lower double-precision floating-point values in each of
/// the two 128-bit floating-point vectors of [2 x double] to determine if
/// the value in the first parameter is less than or equal to the
/// corresponding value in the second parameter.
///
-/// The comparison yields 0 for false, 1 for true.
+/// The comparison yields 0 for false, 1 for true. If either of the two
+/// lower double-precision values is NaN, 0 is returned.
///
/// \headerfile <x86intrin.h>
///
@@ -1044,19 +1049,21 @@ _mm_comilt_sd(__m128d __a, __m128d __b)
/// \param __b
/// A 128-bit vector of [2 x double]. The lower double-precision value is
/// compared to the lower double-precision value of \a __a.
-/// \returns An integer containing the comparison results.
+/// \returns An integer containing the comparison results. If either of the two
+/// lower double-precision values is NaN, 0 is returned.
static __inline__ int __DEFAULT_FN_ATTRS
_mm_comile_sd(__m128d __a, __m128d __b)
{
return __builtin_ia32_comisdle((__v2df)__a, (__v2df)__b);
}
-/// \brief Compares the lower double-precision floating-point values in each of
+/// Compares the lower double-precision floating-point values in each of
/// the two 128-bit floating-point vectors of [2 x double] to determine if
/// the value in the first parameter is greater than the corresponding value
/// in the second parameter.
///
-/// The comparison yields 0 for false, 1 for true.
+/// The comparison yields 0 for false, 1 for true. If either of the two
+/// lower double-precision values is NaN, 0 is returned.
///
/// \headerfile <x86intrin.h>
///
@@ -1068,19 +1075,21 @@ _mm_comile_sd(__m128d __a, __m128d __b)
/// \param __b
/// A 128-bit vector of [2 x double]. The lower double-precision value is
/// compared to the lower double-precision value of \a __a.
-/// \returns An integer containing the comparison results.
+/// \returns An integer containing the comparison results. If either of the two
+/// lower double-precision values is NaN, 0 is returned.
static __inline__ int __DEFAULT_FN_ATTRS
_mm_comigt_sd(__m128d __a, __m128d __b)
{
return __builtin_ia32_comisdgt((__v2df)__a, (__v2df)__b);
}
-/// \brief Compares the lower double-precision floating-point values in each of
+/// Compares the lower double-precision floating-point values in each of
/// the two 128-bit floating-point vectors of [2 x double] to determine if
/// the value in the first parameter is greater than or equal to the
/// corresponding value in the second parameter.
///
-/// The comparison yields 0 for false, 1 for true.
+/// The comparison yields 0 for false, 1 for true. If either of the two
+/// lower double-precision values is NaN, 0 is returned.
///
/// \headerfile <x86intrin.h>
///
@@ -1092,19 +1101,21 @@ _mm_comigt_sd(__m128d __a, __m128d __b)
/// \param __b
/// A 128-bit vector of [2 x double]. The lower double-precision value is
/// compared to the lower double-precision value of \a __a.
-/// \returns An integer containing the comparison results.
+/// \returns An integer containing the comparison results. If either of the two
+/// lower double-precision values is NaN, 0 is returned.
static __inline__ int __DEFAULT_FN_ATTRS
_mm_comige_sd(__m128d __a, __m128d __b)
{
return __builtin_ia32_comisdge((__v2df)__a, (__v2df)__b);
}
-/// \brief Compares the lower double-precision floating-point values in each of
+/// Compares the lower double-precision floating-point values in each of
/// the two 128-bit floating-point vectors of [2 x double] to determine if
/// the value in the first parameter is unequal to the corresponding value in
/// the second parameter.
///
-/// The comparison yields 0 for false, 1 for true.
+/// The comparison yields 0 for false, 1 for true. If either of the two
+/// lower double-precision values is NaN, 1 is returned.
///
/// \headerfile <x86intrin.h>
///
@@ -1116,18 +1127,19 @@ _mm_comige_sd(__m128d __a, __m128d __b)
/// \param __b
/// A 128-bit vector of [2 x double]. The lower double-precision value is
/// compared to the lower double-precision value of \a __a.
-/// \returns An integer containing the comparison results.
+/// \returns An integer containing the comparison results. If either of the two
+/// lower double-precision values is NaN, 1 is returned.
static __inline__ int __DEFAULT_FN_ATTRS
_mm_comineq_sd(__m128d __a, __m128d __b)
{
return __builtin_ia32_comisdneq((__v2df)__a, (__v2df)__b);
}
-/// \brief Compares the lower double-precision floating-point values in each of
+/// Compares the lower double-precision floating-point values in each of
/// the two 128-bit floating-point vectors of [2 x double] for equality. The
/// comparison yields 0 for false, 1 for true.
///
-/// If either of the two lower double-precision values is NaN, 1 is returned.
+/// If either of the two lower double-precision values is NaN, 0 is returned.
///
/// \headerfile <x86intrin.h>
///
@@ -1140,20 +1152,20 @@ _mm_comineq_sd(__m128d __a, __m128d __b)
/// A 128-bit vector of [2 x double]. The lower double-precision value is
/// compared to the lower double-precision value of \a __a.
/// \returns An integer containing the comparison results. If either of the two
-/// lower double-precision values is NaN, 1 is returned.
+/// lower double-precision values is NaN, 0 is returned.
static __inline__ int __DEFAULT_FN_ATTRS
_mm_ucomieq_sd(__m128d __a, __m128d __b)
{
return __builtin_ia32_ucomisdeq((__v2df)__a, (__v2df)__b);
}
-/// \brief Compares the lower double-precision floating-point values in each of
+/// Compares the lower double-precision floating-point values in each of
/// the two 128-bit floating-point vectors of [2 x double] to determine if
/// the value in the first parameter is less than the corresponding value in
/// the second parameter.
///
/// The comparison yields 0 for false, 1 for true. If either of the two lower
-/// double-precision values is NaN, 1 is returned.
+/// double-precision values is NaN, 0 is returned.
///
/// \headerfile <x86intrin.h>
///
@@ -1166,20 +1178,20 @@ _mm_ucomieq_sd(__m128d __a, __m128d __b)
/// A 128-bit vector of [2 x double]. The lower double-precision value is
/// compared to the lower double-precision value of \a __a.
/// \returns An integer containing the comparison results. If either of the two
-/// lower double-precision values is NaN, 1 is returned.
+/// lower double-precision values is NaN, 0 is returned.
static __inline__ int __DEFAULT_FN_ATTRS
_mm_ucomilt_sd(__m128d __a, __m128d __b)
{
return __builtin_ia32_ucomisdlt((__v2df)__a, (__v2df)__b);
}
-/// \brief Compares the lower double-precision floating-point values in each of
+/// Compares the lower double-precision floating-point values in each of
/// the two 128-bit floating-point vectors of [2 x double] to determine if
/// the value in the first parameter is less than or equal to the
/// corresponding value in the second parameter.
///
/// The comparison yields 0 for false, 1 for true. If either of the two lower
-/// double-precision values is NaN, 1 is returned.
+/// double-precision values is NaN, 0 is returned.
///
/// \headerfile <x86intrin.h>
///
@@ -1192,14 +1204,14 @@ _mm_ucomilt_sd(__m128d __a, __m128d __b)
/// A 128-bit vector of [2 x double]. The lower double-precision value is
/// compared to the lower double-precision value of \a __a.
/// \returns An integer containing the comparison results. If either of the two
-/// lower double-precision values is NaN, 1 is returned.
+/// lower double-precision values is NaN, 0 is returned.
static __inline__ int __DEFAULT_FN_ATTRS
_mm_ucomile_sd(__m128d __a, __m128d __b)
{
return __builtin_ia32_ucomisdle((__v2df)__a, (__v2df)__b);
}
-/// \brief Compares the lower double-precision floating-point values in each of
+/// Compares the lower double-precision floating-point values in each of
/// the two 128-bit floating-point vectors of [2 x double] to determine if
/// the value in the first parameter is greater than the corresponding value
/// in the second parameter.
@@ -1225,7 +1237,7 @@ _mm_ucomigt_sd(__m128d __a, __m128d __b)
return __builtin_ia32_ucomisdgt((__v2df)__a, (__v2df)__b);
}
-/// \brief Compares the lower double-precision floating-point values in each of
+/// Compares the lower double-precision floating-point values in each of
/// the two 128-bit floating-point vectors of [2 x double] to determine if
/// the value in the first parameter is greater than or equal to the
/// corresponding value in the second parameter.
@@ -1251,13 +1263,13 @@ _mm_ucomige_sd(__m128d __a, __m128d __b)
return __builtin_ia32_ucomisdge((__v2df)__a, (__v2df)__b);
}
-/// \brief Compares the lower double-precision floating-point values in each of
+/// Compares the lower double-precision floating-point values in each of
/// the two 128-bit floating-point vectors of [2 x double] to determine if
/// the value in the first parameter is unequal to the corresponding value in
/// the second parameter.
///
/// The comparison yields 0 for false, 1 for true. If either of the two lower
-/// double-precision values is NaN, 0 is returned.
+/// double-precision values is NaN, 1 is returned.
///
/// \headerfile <x86intrin.h>
///
@@ -1270,14 +1282,14 @@ _mm_ucomige_sd(__m128d __a, __m128d __b)
/// A 128-bit vector of [2 x double]. The lower double-precision value is
/// compared to the lower double-precision value of \a __a.
/// \returns An integer containing the comparison result. If either of the two
-/// lower double-precision values is NaN, 0 is returned.
+/// lower double-precision values is NaN, 1 is returned.
static __inline__ int __DEFAULT_FN_ATTRS
_mm_ucomineq_sd(__m128d __a, __m128d __b)
{
return __builtin_ia32_ucomisdneq((__v2df)__a, (__v2df)__b);
}
-/// \brief Converts the two double-precision floating-point elements of a
+/// Converts the two double-precision floating-point elements of a
/// 128-bit vector of [2 x double] into two single-precision floating-point
/// values, returned in the lower 64 bits of a 128-bit vector of [4 x float].
/// The upper 64 bits of the result vector are set to zero.
@@ -1296,7 +1308,7 @@ _mm_cvtpd_ps(__m128d __a)
return __builtin_ia32_cvtpd2ps((__v2df)__a);
}
-/// \brief Converts the lower two single-precision floating-point elements of a
+/// Converts the lower two single-precision floating-point elements of a
/// 128-bit vector of [4 x float] into two double-precision floating-point
/// values, returned in a 128-bit vector of [2 x double]. The upper two
/// elements of the input vector are unused.
@@ -1317,7 +1329,7 @@ _mm_cvtps_pd(__m128 __a)
__builtin_shufflevector((__v4sf)__a, (__v4sf)__a, 0, 1), __v2df);
}
-/// \brief Converts the lower two integer elements of a 128-bit vector of
+/// Converts the lower two integer elements of a 128-bit vector of
/// [4 x i32] into two double-precision floating-point values, returned in a
/// 128-bit vector of [2 x double].
///
@@ -1340,7 +1352,7 @@ _mm_cvtepi32_pd(__m128i __a)
__builtin_shufflevector((__v4si)__a, (__v4si)__a, 0, 1), __v2df);
}
-/// \brief Converts the two double-precision floating-point elements of a
+/// Converts the two double-precision floating-point elements of a
/// 128-bit vector of [2 x double] into two signed 32-bit integer values,
/// returned in the lower 64 bits of a 128-bit vector of [4 x i32]. The upper
/// 64 bits of the result vector are set to zero.
@@ -1359,7 +1371,7 @@ _mm_cvtpd_epi32(__m128d __a)
return __builtin_ia32_cvtpd2dq((__v2df)__a);
}
-/// \brief Converts the low-order element of a 128-bit vector of [2 x double]
+/// Converts the low-order element of a 128-bit vector of [2 x double]
/// into a 32-bit signed integer value.
///
/// \headerfile <x86intrin.h>
@@ -1376,7 +1388,7 @@ _mm_cvtsd_si32(__m128d __a)
return __builtin_ia32_cvtsd2si((__v2df)__a);
}
-/// \brief Converts the lower double-precision floating-point element of a
+/// Converts the lower double-precision floating-point element of a
/// 128-bit vector of [2 x double], in the second parameter, into a
/// single-precision floating-point value, returned in the lower 32 bits of a
/// 128-bit vector of [4 x float]. The upper 96 bits of the result vector are
@@ -1401,7 +1413,7 @@ _mm_cvtsd_ss(__m128 __a, __m128d __b)
return (__m128)__builtin_ia32_cvtsd2ss((__v4sf)__a, (__v2df)__b);
}
-/// \brief Converts a 32-bit signed integer value, in the second parameter, into
+/// Converts a 32-bit signed integer value, in the second parameter, into
/// a double-precision floating-point value, returned in the lower 64 bits of
/// a 128-bit vector of [2 x double]. The upper 64 bits of the result vector
/// are copied from the upper 64 bits of the first parameter.
@@ -1425,7 +1437,7 @@ _mm_cvtsi32_sd(__m128d __a, int __b)
return __a;
}
-/// \brief Converts the lower single-precision floating-point element of a
+/// Converts the lower single-precision floating-point element of a
/// 128-bit vector of [4 x float], in the second parameter, into a
/// double-precision floating-point value, returned in the lower 64 bits of
/// a 128-bit vector of [2 x double]. The upper 64 bits of the result vector
@@ -1451,7 +1463,7 @@ _mm_cvtss_sd(__m128d __a, __m128 __b)
return __a;
}
-/// \brief Converts the two double-precision floating-point elements of a
+/// Converts the two double-precision floating-point elements of a
/// 128-bit vector of [2 x double] into two signed 32-bit integer values,
/// returned in the lower 64 bits of a 128-bit vector of [4 x i32].
///
@@ -1474,7 +1486,7 @@ _mm_cvttpd_epi32(__m128d __a)
return (__m128i)__builtin_ia32_cvttpd2dq((__v2df)__a);
}
-/// \brief Converts the low-order element of a [2 x double] vector into a 32-bit
+/// Converts the low-order element of a [2 x double] vector into a 32-bit
/// signed integer value, truncating the result when it is inexact.
///
/// \headerfile <x86intrin.h>
@@ -1492,7 +1504,7 @@ _mm_cvttsd_si32(__m128d __a)
return __builtin_ia32_cvttsd2si((__v2df)__a);
}
-/// \brief Converts the two double-precision floating-point elements of a
+/// Converts the two double-precision floating-point elements of a
/// 128-bit vector of [2 x double] into two signed 32-bit integer values,
/// returned in a 64-bit vector of [2 x i32].
///
@@ -1503,13 +1515,13 @@ _mm_cvttsd_si32(__m128d __a)
/// \param __a
/// A 128-bit vector of [2 x double].
/// \returns A 64-bit vector of [2 x i32] containing the converted values.
-static __inline__ __m64 __DEFAULT_FN_ATTRS
+static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
_mm_cvtpd_pi32(__m128d __a)
{
return (__m64)__builtin_ia32_cvtpd2pi((__v2df)__a);
}
-/// \brief Converts the two double-precision floating-point elements of a
+/// Converts the two double-precision floating-point elements of a
/// 128-bit vector of [2 x double] into two signed 32-bit integer values,
/// returned in a 64-bit vector of [2 x i32].
///
@@ -1523,13 +1535,13 @@ _mm_cvtpd_pi32(__m128d __a)
/// \param __a
/// A 128-bit vector of [2 x double].
/// \returns A 64-bit vector of [2 x i32] containing the converted values.
-static __inline__ __m64 __DEFAULT_FN_ATTRS
+static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
_mm_cvttpd_pi32(__m128d __a)
{
return (__m64)__builtin_ia32_cvttpd2pi((__v2df)__a);
}
-/// \brief Converts the two signed 32-bit integer elements of a 64-bit vector of
+/// Converts the two signed 32-bit integer elements of a 64-bit vector of
/// [2 x i32] into two double-precision floating-point values, returned in a
/// 128-bit vector of [2 x double].
///
@@ -1540,13 +1552,13 @@ _mm_cvttpd_pi32(__m128d __a)
/// \param __a
/// A 64-bit vector of [2 x i32].
/// \returns A 128-bit vector of [2 x double] containing the converted values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS_MMX
_mm_cvtpi32_pd(__m64 __a)
{
return __builtin_ia32_cvtpi2pd((__v2si)__a);
}
-/// \brief Returns the low-order element of a 128-bit vector of [2 x double] as
+/// Returns the low-order element of a 128-bit vector of [2 x double] as
/// a double-precision floating-point value.
///
/// \headerfile <x86intrin.h>
@@ -1563,7 +1575,7 @@ _mm_cvtsd_f64(__m128d __a)
return __a[0];
}
-/// \brief Loads a 128-bit floating-point vector of [2 x double] from an aligned
+/// Loads a 128-bit floating-point vector of [2 x double] from an aligned
/// memory location.
///
/// \headerfile <x86intrin.h>
@@ -1580,7 +1592,7 @@ _mm_load_pd(double const *__dp)
return *(__m128d*)__dp;
}
-/// \brief Loads a double-precision floating-point value from a specified memory
+/// Loads a double-precision floating-point value from a specified memory
/// location and duplicates it to both vector elements of a 128-bit vector of
/// [2 x double].
///
@@ -1599,12 +1611,12 @@ _mm_load1_pd(double const *__dp)
double __u;
} __attribute__((__packed__, __may_alias__));
double __u = ((struct __mm_load1_pd_struct*)__dp)->__u;
- return (__m128d){ __u, __u };
+ return __extension__ (__m128d){ __u, __u };
}
#define _mm_load_pd1(dp) _mm_load1_pd(dp)
-/// \brief Loads two double-precision values, in reverse order, from an aligned
+/// Loads two double-precision values, in reverse order, from an aligned
/// memory location into a 128-bit vector of [2 x double].
///
/// \headerfile <x86intrin.h>
@@ -1625,7 +1637,7 @@ _mm_loadr_pd(double const *__dp)
return __builtin_shufflevector((__v2df)__u, (__v2df)__u, 1, 0);
}
-/// \brief Loads a 128-bit floating-point vector of [2 x double] from an
+/// Loads a 128-bit floating-point vector of [2 x double] from an
/// unaligned memory location.
///
/// \headerfile <x86intrin.h>
@@ -1645,7 +1657,7 @@ _mm_loadu_pd(double const *__dp)
return ((struct __loadu_pd*)__dp)->__v;
}
-/// \brief Loads a 64-bit integer value to the low element of a 128-bit integer
+/// Loads a 64-bit integer value to the low element of a 128-bit integer
/// vector and clears the upper element.
///
/// \headerfile <x86intrin.h>
@@ -1663,10 +1675,10 @@ _mm_loadu_si64(void const *__a)
long long __v;
} __attribute__((__packed__, __may_alias__));
long long __u = ((struct __loadu_si64*)__a)->__v;
- return (__m128i){__u, 0L};
+ return __extension__ (__m128i)(__v2di){__u, 0L};
}
-/// \brief Loads a 64-bit double-precision value to the low element of a
+/// Loads a 64-bit double-precision value to the low element of a
/// 128-bit integer vector and clears the upper element.
///
/// \headerfile <x86intrin.h>
@@ -1684,10 +1696,10 @@ _mm_load_sd(double const *__dp)
double __u;
} __attribute__((__packed__, __may_alias__));
double __u = ((struct __mm_load_sd_struct*)__dp)->__u;
- return (__m128d){ __u, 0 };
+ return __extension__ (__m128d){ __u, 0 };
}
-/// \brief Loads a double-precision value into the high-order bits of a 128-bit
+/// Loads a double-precision value into the high-order bits of a 128-bit
/// vector of [2 x double]. The low-order bits are copied from the low-order
/// bits of the first operand.
///
@@ -1711,10 +1723,10 @@ _mm_loadh_pd(__m128d __a, double const *__dp)
double __u;
} __attribute__((__packed__, __may_alias__));
double __u = ((struct __mm_loadh_pd_struct*)__dp)->__u;
- return (__m128d){ __a[0], __u };
+ return __extension__ (__m128d){ __a[0], __u };
}
-/// \brief Loads a double-precision value into the low-order bits of a 128-bit
+/// Loads a double-precision value into the low-order bits of a 128-bit
/// vector of [2 x double]. The high-order bits are copied from the
/// high-order bits of the first operand.
///
@@ -1738,10 +1750,10 @@ _mm_loadl_pd(__m128d __a, double const *__dp)
double __u;
} __attribute__((__packed__, __may_alias__));
double __u = ((struct __mm_loadl_pd_struct*)__dp)->__u;
- return (__m128d){ __u, __a[1] };
+ return __extension__ (__m128d){ __u, __a[1] };
}
-/// \brief Constructs a 128-bit floating-point vector of [2 x double] with
+/// Constructs a 128-bit floating-point vector of [2 x double] with
/// unspecified content. This could be used as an argument to another
/// intrinsic function where the argument is required but the value is not
/// actually used.
@@ -1758,7 +1770,7 @@ _mm_undefined_pd(void)
return (__m128d)__builtin_ia32_undef128();
}
-/// \brief Constructs a 128-bit floating-point vector of [2 x double]. The lower
+/// Constructs a 128-bit floating-point vector of [2 x double]. The lower
/// 64 bits of the vector are initialized with the specified double-precision
/// floating-point value. The upper 64 bits are set to zero.
///
@@ -1775,10 +1787,10 @@ _mm_undefined_pd(void)
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_set_sd(double __w)
{
- return (__m128d){ __w, 0 };
+ return __extension__ (__m128d){ __w, 0 };
}
-/// \brief Constructs a 128-bit floating-point vector of [2 x double], with each
+/// Constructs a 128-bit floating-point vector of [2 x double], with each
/// of the two double-precision floating-point vector elements set to the
/// specified double-precision floating-point value.
///
@@ -1793,10 +1805,10 @@ _mm_set_sd(double __w)
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_set1_pd(double __w)
{
- return (__m128d){ __w, __w };
+ return __extension__ (__m128d){ __w, __w };
}
-/// \brief Constructs a 128-bit floating-point vector of [2 x double], with each
+/// Constructs a 128-bit floating-point vector of [2 x double], with each
/// of the two double-precision floating-point vector elements set to the
/// specified double-precision floating-point value.
///
@@ -1814,7 +1826,7 @@ _mm_set_pd1(double __w)
return _mm_set1_pd(__w);
}
-/// \brief Constructs a 128-bit floating-point vector of [2 x double]
+/// Constructs a 128-bit floating-point vector of [2 x double]
/// initialized with the specified double-precision floating-point values.
///
/// \headerfile <x86intrin.h>
@@ -1831,10 +1843,10 @@ _mm_set_pd1(double __w)
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_set_pd(double __w, double __x)
{
- return (__m128d){ __x, __w };
+ return __extension__ (__m128d){ __x, __w };
}
-/// \brief Constructs a 128-bit floating-point vector of [2 x double],
+/// Constructs a 128-bit floating-point vector of [2 x double],
/// initialized in reverse order with the specified double-precision
/// floating-point values.
///
@@ -1852,10 +1864,10 @@ _mm_set_pd(double __w, double __x)
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_setr_pd(double __w, double __x)
{
- return (__m128d){ __w, __x };
+ return __extension__ (__m128d){ __w, __x };
}
-/// \brief Constructs a 128-bit floating-point vector of [2 x double]
+/// Constructs a 128-bit floating-point vector of [2 x double]
/// initialized to zero.
///
/// \headerfile <x86intrin.h>
@@ -1867,10 +1879,10 @@ _mm_setr_pd(double __w, double __x)
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_setzero_pd(void)
{
- return (__m128d){ 0, 0 };
+ return __extension__ (__m128d){ 0, 0 };
}
-/// \brief Constructs a 128-bit floating-point vector of [2 x double]. The lower
+/// Constructs a 128-bit floating-point vector of [2 x double]. The lower
/// 64 bits are set to the lower 64 bits of the second parameter. The upper
/// 64 bits are set to the upper 64 bits of the first parameter.
///
@@ -1888,10 +1900,11 @@ _mm_setzero_pd(void)
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_move_sd(__m128d __a, __m128d __b)
{
- return (__m128d){ __b[0], __a[1] };
+ __a[0] = __b[0];
+ return __a;
}
-/// \brief Stores the lower 64 bits of a 128-bit vector of [2 x double] to a
+/// Stores the lower 64 bits of a 128-bit vector of [2 x double] to a
/// memory location.
///
/// \headerfile <x86intrin.h>
@@ -1911,7 +1924,7 @@ _mm_store_sd(double *__dp, __m128d __a)
((struct __mm_store_sd_struct*)__dp)->__u = __a[0];
}
-/// \brief Moves packed double-precision values from a 128-bit vector of
+/// Moves packed double-precision values from a 128-bit vector of
/// [2 x double] to a memory location.
///
/// \headerfile <x86intrin.h>
@@ -1930,19 +1943,20 @@ _mm_store_pd(double *__dp, __m128d __a)
*(__m128d*)__dp = __a;
}
-/// \brief Moves the lower 64 bits of a 128-bit vector of [2 x double] twice to
+/// Moves the lower 64 bits of a 128-bit vector of [2 x double] twice to
/// the upper and lower 64 bits of a memory location.
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the <c>VMOVDDUP + VMOVAPD / MOVLHPS + MOVAPS </c> instruction.
+/// This intrinsic corresponds to the
+/// <c> VMOVDDUP + VMOVAPD / MOVLHPS + MOVAPS </c> instruction.
///
/// \param __dp
/// A pointer to a memory location that can store two double-precision
/// values.
/// \param __a
/// A 128-bit vector of [2 x double] whose lower 64 bits are copied to each
-/// of the values in \a dp.
+/// of the values in \a __dp.
static __inline__ void __DEFAULT_FN_ATTRS
_mm_store1_pd(double *__dp, __m128d __a)
{
@@ -1950,25 +1964,27 @@ _mm_store1_pd(double *__dp, __m128d __a)
_mm_store_pd(__dp, __a);
}
-/// \brief Stores a 128-bit vector of [2 x double] into an aligned memory
-/// location.
+/// Moves the lower 64 bits of a 128-bit vector of [2 x double] twice to
+/// the upper and lower 64 bits of a memory location.
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the <c> VMOVAPD / MOVAPD </c> instruction.
+/// This intrinsic corresponds to the
+/// <c> VMOVDDUP + VMOVAPD / MOVLHPS + MOVAPS </c> instruction.
///
/// \param __dp
-/// A pointer to a 128-bit memory location. The address of the memory
-/// location has to be 16-byte aligned.
+/// A pointer to a memory location that can store two double-precision
+/// values.
/// \param __a
-/// A 128-bit vector of [2 x double] containing the values to be stored.
+/// A 128-bit vector of [2 x double] whose lower 64 bits are copied to each
+/// of the values in \a __dp.
static __inline__ void __DEFAULT_FN_ATTRS
_mm_store_pd1(double *__dp, __m128d __a)
{
- return _mm_store1_pd(__dp, __a);
+ _mm_store1_pd(__dp, __a);
}
-/// \brief Stores a 128-bit vector of [2 x double] into an unaligned memory
+/// Stores a 128-bit vector of [2 x double] into an unaligned memory
/// location.
///
/// \headerfile <x86intrin.h>
@@ -1989,7 +2005,7 @@ _mm_storeu_pd(double *__dp, __m128d __a)
((struct __storeu_pd*)__dp)->__v = __a;
}
-/// \brief Stores two double-precision values, in reverse order, from a 128-bit
+/// Stores two double-precision values, in reverse order, from a 128-bit
/// vector of [2 x double] to a 16-byte aligned memory location.
///
/// \headerfile <x86intrin.h>
@@ -2010,7 +2026,7 @@ _mm_storer_pd(double *__dp, __m128d __a)
*(__m128d *)__dp = __a;
}
-/// \brief Stores the upper 64 bits of a 128-bit vector of [2 x double] to a
+/// Stores the upper 64 bits of a 128-bit vector of [2 x double] to a
/// memory location.
///
/// \headerfile <x86intrin.h>
@@ -2030,7 +2046,7 @@ _mm_storeh_pd(double *__dp, __m128d __a)
((struct __mm_storeh_pd_struct*)__dp)->__u = __a[1];
}
-/// \brief Stores the lower 64 bits of a 128-bit vector of [2 x double] to a
+/// Stores the lower 64 bits of a 128-bit vector of [2 x double] to a
/// memory location.
///
/// \headerfile <x86intrin.h>
@@ -2050,7 +2066,7 @@ _mm_storel_pd(double *__dp, __m128d __a)
((struct __mm_storeh_pd_struct*)__dp)->__u = __a[0];
}
-/// \brief Adds the corresponding elements of two 128-bit vectors of [16 x i8],
+/// Adds the corresponding elements of two 128-bit vectors of [16 x i8],
/// saving the lower 8 bits of each sum in the corresponding element of a
/// 128-bit result vector of [16 x i8].
///
@@ -2072,7 +2088,7 @@ _mm_add_epi8(__m128i __a, __m128i __b)
return (__m128i)((__v16qu)__a + (__v16qu)__b);
}
-/// \brief Adds the corresponding elements of two 128-bit vectors of [8 x i16],
+/// Adds the corresponding elements of two 128-bit vectors of [8 x i16],
/// saving the lower 16 bits of each sum in the corresponding element of a
/// 128-bit result vector of [8 x i16].
///
@@ -2094,7 +2110,7 @@ _mm_add_epi16(__m128i __a, __m128i __b)
return (__m128i)((__v8hu)__a + (__v8hu)__b);
}
-/// \brief Adds the corresponding elements of two 128-bit vectors of [4 x i32],
+/// Adds the corresponding elements of two 128-bit vectors of [4 x i32],
/// saving the lower 32 bits of each sum in the corresponding element of a
/// 128-bit result vector of [4 x i32].
///
@@ -2116,7 +2132,7 @@ _mm_add_epi32(__m128i __a, __m128i __b)
return (__m128i)((__v4su)__a + (__v4su)__b);
}
-/// \brief Adds two signed or unsigned 64-bit integer values, returning the
+/// Adds two signed or unsigned 64-bit integer values, returning the
/// lower 64 bits of the sum.
///
/// \headerfile <x86intrin.h>
@@ -2128,13 +2144,13 @@ _mm_add_epi32(__m128i __a, __m128i __b)
/// \param __b
/// A 64-bit integer.
/// \returns A 64-bit integer containing the sum of both parameters.
-static __inline__ __m64 __DEFAULT_FN_ATTRS
+static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
_mm_add_si64(__m64 __a, __m64 __b)
{
return (__m64)__builtin_ia32_paddq((__v1di)__a, (__v1di)__b);
}
-/// \brief Adds the corresponding elements of two 128-bit vectors of [2 x i64],
+/// Adds the corresponding elements of two 128-bit vectors of [2 x i64],
/// saving the lower 64 bits of each sum in the corresponding element of a
/// 128-bit result vector of [2 x i64].
///
@@ -2156,10 +2172,10 @@ _mm_add_epi64(__m128i __a, __m128i __b)
return (__m128i)((__v2du)__a + (__v2du)__b);
}
-/// \brief Adds, with saturation, the corresponding elements of two 128-bit
+/// Adds, with saturation, the corresponding elements of two 128-bit
/// signed [16 x i8] vectors, saving each sum in the corresponding element of
-/// a 128-bit result vector of [16 x i8]. Positive sums greater than 7Fh are
-/// saturated to 7Fh. Negative sums less than 80h are saturated to 80h.
+/// a 128-bit result vector of [16 x i8]. Positive sums greater than 0x7F are
+/// saturated to 0x7F. Negative sums less than 0x80 are saturated to 0x80.
///
/// \headerfile <x86intrin.h>
///
@@ -2177,11 +2193,11 @@ _mm_adds_epi8(__m128i __a, __m128i __b)
return (__m128i)__builtin_ia32_paddsb128((__v16qi)__a, (__v16qi)__b);
}
-/// \brief Adds, with saturation, the corresponding elements of two 128-bit
+/// Adds, with saturation, the corresponding elements of two 128-bit
/// signed [8 x i16] vectors, saving each sum in the corresponding element of
-/// a 128-bit result vector of [8 x i16]. Positive sums greater than 7FFFh
-/// are saturated to 7FFFh. Negative sums less than 8000h are saturated to
-/// 8000h.
+/// a 128-bit result vector of [8 x i16]. Positive sums greater than 0x7FFF
+/// are saturated to 0x7FFF. Negative sums less than 0x8000 are saturated to
+/// 0x8000.
///
/// \headerfile <x86intrin.h>
///
@@ -2199,10 +2215,10 @@ _mm_adds_epi16(__m128i __a, __m128i __b)
return (__m128i)__builtin_ia32_paddsw128((__v8hi)__a, (__v8hi)__b);
}
-/// \brief Adds, with saturation, the corresponding elements of two 128-bit
+/// Adds, with saturation, the corresponding elements of two 128-bit
/// unsigned [16 x i8] vectors, saving each sum in the corresponding element
-/// of a 128-bit result vector of [16 x i8]. Positive sums greater than FFh
-/// are saturated to FFh. Negative sums are saturated to 00h.
+/// of a 128-bit result vector of [16 x i8]. Positive sums greater than 0xFF
+/// are saturated to 0xFF. Negative sums are saturated to 0x00.
///
/// \headerfile <x86intrin.h>
///
@@ -2220,10 +2236,10 @@ _mm_adds_epu8(__m128i __a, __m128i __b)
return (__m128i)__builtin_ia32_paddusb128((__v16qi)__a, (__v16qi)__b);
}
-/// \brief Adds, with saturation, the corresponding elements of two 128-bit
+/// Adds, with saturation, the corresponding elements of two 128-bit
/// unsigned [8 x i16] vectors, saving each sum in the corresponding element
-/// of a 128-bit result vector of [8 x i16]. Positive sums greater than FFFFh
-/// are saturated to FFFFh. Negative sums are saturated to 0000h.
+/// of a 128-bit result vector of [8 x i16]. Positive sums greater than
+/// 0xFFFF are saturated to 0xFFFF. Negative sums are saturated to 0x0000.
///
/// \headerfile <x86intrin.h>
///
@@ -2241,7 +2257,7 @@ _mm_adds_epu16(__m128i __a, __m128i __b)
return (__m128i)__builtin_ia32_paddusw128((__v8hi)__a, (__v8hi)__b);
}
-/// \brief Computes the rounded avarages of corresponding elements of two
+/// Computes the rounded avarages of corresponding elements of two
/// 128-bit unsigned [16 x i8] vectors, saving each result in the
/// corresponding element of a 128-bit result vector of [16 x i8].
///
@@ -2265,7 +2281,7 @@ _mm_avg_epu8(__m128i __a, __m128i __b)
>> 1, __v16qu);
}
-/// \brief Computes the rounded avarages of corresponding elements of two
+/// Computes the rounded avarages of corresponding elements of two
/// 128-bit unsigned [8 x i16] vectors, saving each result in the
/// corresponding element of a 128-bit result vector of [8 x i16].
///
@@ -2289,7 +2305,7 @@ _mm_avg_epu16(__m128i __a, __m128i __b)
>> 1, __v8hu);
}
-/// \brief Multiplies the corresponding elements of two 128-bit signed [8 x i16]
+/// Multiplies the corresponding elements of two 128-bit signed [8 x i16]
/// vectors, producing eight intermediate 32-bit signed integer products, and
/// adds the consecutive pairs of 32-bit products to form a 128-bit signed
/// [4 x i32] vector.
@@ -2315,7 +2331,7 @@ _mm_madd_epi16(__m128i __a, __m128i __b)
return (__m128i)__builtin_ia32_pmaddwd128((__v8hi)__a, (__v8hi)__b);
}
-/// \brief Compares corresponding elements of two 128-bit signed [8 x i16]
+/// Compares corresponding elements of two 128-bit signed [8 x i16]
/// vectors, saving the greater value from each comparison in the
/// corresponding element of a 128-bit result vector of [8 x i16].
///
@@ -2335,7 +2351,7 @@ _mm_max_epi16(__m128i __a, __m128i __b)
return (__m128i)__builtin_ia32_pmaxsw128((__v8hi)__a, (__v8hi)__b);
}
-/// \brief Compares corresponding elements of two 128-bit unsigned [16 x i8]
+/// Compares corresponding elements of two 128-bit unsigned [16 x i8]
/// vectors, saving the greater value from each comparison in the
/// corresponding element of a 128-bit result vector of [16 x i8].
///
@@ -2355,7 +2371,7 @@ _mm_max_epu8(__m128i __a, __m128i __b)
return (__m128i)__builtin_ia32_pmaxub128((__v16qi)__a, (__v16qi)__b);
}
-/// \brief Compares corresponding elements of two 128-bit signed [8 x i16]
+/// Compares corresponding elements of two 128-bit signed [8 x i16]
/// vectors, saving the smaller value from each comparison in the
/// corresponding element of a 128-bit result vector of [8 x i16].
///
@@ -2375,7 +2391,7 @@ _mm_min_epi16(__m128i __a, __m128i __b)
return (__m128i)__builtin_ia32_pminsw128((__v8hi)__a, (__v8hi)__b);
}
-/// \brief Compares corresponding elements of two 128-bit unsigned [16 x i8]
+/// Compares corresponding elements of two 128-bit unsigned [16 x i8]
/// vectors, saving the smaller value from each comparison in the
/// corresponding element of a 128-bit result vector of [16 x i8].
///
@@ -2395,7 +2411,7 @@ _mm_min_epu8(__m128i __a, __m128i __b)
return (__m128i)__builtin_ia32_pminub128((__v16qi)__a, (__v16qi)__b);
}
-/// \brief Multiplies the corresponding elements of two signed [8 x i16]
+/// Multiplies the corresponding elements of two signed [8 x i16]
/// vectors, saving the upper 16 bits of each 32-bit product in the
/// corresponding element of a 128-bit signed [8 x i16] result vector.
///
@@ -2415,7 +2431,7 @@ _mm_mulhi_epi16(__m128i __a, __m128i __b)
return (__m128i)__builtin_ia32_pmulhw128((__v8hi)__a, (__v8hi)__b);
}
-/// \brief Multiplies the corresponding elements of two unsigned [8 x i16]
+/// Multiplies the corresponding elements of two unsigned [8 x i16]
/// vectors, saving the upper 16 bits of each 32-bit product in the
/// corresponding element of a 128-bit unsigned [8 x i16] result vector.
///
@@ -2435,7 +2451,7 @@ _mm_mulhi_epu16(__m128i __a, __m128i __b)
return (__m128i)__builtin_ia32_pmulhuw128((__v8hi)__a, (__v8hi)__b);
}
-/// \brief Multiplies the corresponding elements of two signed [8 x i16]
+/// Multiplies the corresponding elements of two signed [8 x i16]
/// vectors, saving the lower 16 bits of each 32-bit product in the
/// corresponding element of a 128-bit signed [8 x i16] result vector.
///
@@ -2455,7 +2471,7 @@ _mm_mullo_epi16(__m128i __a, __m128i __b)
return (__m128i)((__v8hu)__a * (__v8hu)__b);
}
-/// \brief Multiplies 32-bit unsigned integer values contained in the lower bits
+/// Multiplies 32-bit unsigned integer values contained in the lower bits
/// of the two 64-bit integer vectors and returns the 64-bit unsigned
/// product.
///
@@ -2468,13 +2484,13 @@ _mm_mullo_epi16(__m128i __a, __m128i __b)
/// \param __b
/// A 64-bit integer containing one of the source operands.
/// \returns A 64-bit integer vector containing the product of both operands.
-static __inline__ __m64 __DEFAULT_FN_ATTRS
+static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
_mm_mul_su32(__m64 __a, __m64 __b)
{
return __builtin_ia32_pmuludq((__v2si)__a, (__v2si)__b);
}
-/// \brief Multiplies 32-bit unsigned integer values contained in the lower
+/// Multiplies 32-bit unsigned integer values contained in the lower
/// bits of the corresponding elements of two [2 x i64] vectors, and returns
/// the 64-bit products in the corresponding elements of a [2 x i64] vector.
///
@@ -2493,7 +2509,7 @@ _mm_mul_epu32(__m128i __a, __m128i __b)
return __builtin_ia32_pmuludq128((__v4si)__a, (__v4si)__b);
}
-/// \brief Computes the absolute differences of corresponding 8-bit integer
+/// Computes the absolute differences of corresponding 8-bit integer
/// values in two 128-bit vectors. Sums the first 8 absolute differences, and
/// separately sums the second 8 absolute differences. Packs these two
/// unsigned 16-bit integer sums into the upper and lower elements of a
@@ -2515,7 +2531,7 @@ _mm_sad_epu8(__m128i __a, __m128i __b)
return __builtin_ia32_psadbw128((__v16qi)__a, (__v16qi)__b);
}
-/// \brief Subtracts the corresponding 8-bit integer values in the operands.
+/// Subtracts the corresponding 8-bit integer values in the operands.
///
/// \headerfile <x86intrin.h>
///
@@ -2533,7 +2549,7 @@ _mm_sub_epi8(__m128i __a, __m128i __b)
return (__m128i)((__v16qu)__a - (__v16qu)__b);
}
-/// \brief Subtracts the corresponding 16-bit integer values in the operands.
+/// Subtracts the corresponding 16-bit integer values in the operands.
///
/// \headerfile <x86intrin.h>
///
@@ -2551,7 +2567,7 @@ _mm_sub_epi16(__m128i __a, __m128i __b)
return (__m128i)((__v8hu)__a - (__v8hu)__b);
}
-/// \brief Subtracts the corresponding 32-bit integer values in the operands.
+/// Subtracts the corresponding 32-bit integer values in the operands.
///
/// \headerfile <x86intrin.h>
///
@@ -2569,7 +2585,7 @@ _mm_sub_epi32(__m128i __a, __m128i __b)
return (__m128i)((__v4su)__a - (__v4su)__b);
}
-/// \brief Subtracts signed or unsigned 64-bit integer values and writes the
+/// Subtracts signed or unsigned 64-bit integer values and writes the
/// difference to the corresponding bits in the destination.
///
/// \headerfile <x86intrin.h>
@@ -2582,13 +2598,13 @@ _mm_sub_epi32(__m128i __a, __m128i __b)
/// A 64-bit integer vector containing the subtrahend.
/// \returns A 64-bit integer vector containing the difference of the values in
/// the operands.
-static __inline__ __m64 __DEFAULT_FN_ATTRS
+static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
_mm_sub_si64(__m64 __a, __m64 __b)
{
return (__m64)__builtin_ia32_psubq((__v1di)__a, (__v1di)__b);
}
-/// \brief Subtracts the corresponding elements of two [2 x i64] vectors.
+/// Subtracts the corresponding elements of two [2 x i64] vectors.
///
/// \headerfile <x86intrin.h>
///
@@ -2606,10 +2622,10 @@ _mm_sub_epi64(__m128i __a, __m128i __b)
return (__m128i)((__v2du)__a - (__v2du)__b);
}
-/// \brief Subtracts corresponding 8-bit signed integer values in the input and
+/// Subtracts corresponding 8-bit signed integer values in the input and
/// returns the differences in the corresponding bytes in the destination.
-/// Differences greater than 7Fh are saturated to 7Fh, and differences less
-/// than 80h are saturated to 80h.
+/// Differences greater than 0x7F are saturated to 0x7F, and differences less
+/// than 0x80 are saturated to 0x80.
///
/// \headerfile <x86intrin.h>
///
@@ -2627,10 +2643,10 @@ _mm_subs_epi8(__m128i __a, __m128i __b)
return (__m128i)__builtin_ia32_psubsb128((__v16qi)__a, (__v16qi)__b);
}
-/// \brief Subtracts corresponding 16-bit signed integer values in the input and
+/// Subtracts corresponding 16-bit signed integer values in the input and
/// returns the differences in the corresponding bytes in the destination.
-/// Differences greater than 7FFFh are saturated to 7FFFh, and values less
-/// than 8000h are saturated to 8000h.
+/// Differences greater than 0x7FFF are saturated to 0x7FFF, and values less
+/// than 0x8000 are saturated to 0x8000.
///
/// \headerfile <x86intrin.h>
///
@@ -2648,9 +2664,9 @@ _mm_subs_epi16(__m128i __a, __m128i __b)
return (__m128i)__builtin_ia32_psubsw128((__v8hi)__a, (__v8hi)__b);
}
-/// \brief Subtracts corresponding 8-bit unsigned integer values in the input
+/// Subtracts corresponding 8-bit unsigned integer values in the input
/// and returns the differences in the corresponding bytes in the
-/// destination. Differences less than 00h are saturated to 00h.
+/// destination. Differences less than 0x00 are saturated to 0x00.
///
/// \headerfile <x86intrin.h>
///
@@ -2668,9 +2684,9 @@ _mm_subs_epu8(__m128i __a, __m128i __b)
return (__m128i)__builtin_ia32_psubusb128((__v16qi)__a, (__v16qi)__b);
}
-/// \brief Subtracts corresponding 16-bit unsigned integer values in the input
+/// Subtracts corresponding 16-bit unsigned integer values in the input
/// and returns the differences in the corresponding bytes in the
-/// destination. Differences less than 0000h are saturated to 0000h.
+/// destination. Differences less than 0x0000 are saturated to 0x0000.
///
/// \headerfile <x86intrin.h>
///
@@ -2688,7 +2704,7 @@ _mm_subs_epu16(__m128i __a, __m128i __b)
return (__m128i)__builtin_ia32_psubusw128((__v8hi)__a, (__v8hi)__b);
}
-/// \brief Performs a bitwise AND of two 128-bit integer vectors.
+/// Performs a bitwise AND of two 128-bit integer vectors.
///
/// \headerfile <x86intrin.h>
///
@@ -2706,7 +2722,7 @@ _mm_and_si128(__m128i __a, __m128i __b)
return (__m128i)((__v2du)__a & (__v2du)__b);
}
-/// \brief Performs a bitwise AND of two 128-bit integer vectors, using the
+/// Performs a bitwise AND of two 128-bit integer vectors, using the
/// one's complement of the values contained in the first source operand.
///
/// \headerfile <x86intrin.h>
@@ -2725,7 +2741,7 @@ _mm_andnot_si128(__m128i __a, __m128i __b)
{
return (__m128i)(~(__v2du)__a & (__v2du)__b);
}
-/// \brief Performs a bitwise OR of two 128-bit integer vectors.
+/// Performs a bitwise OR of two 128-bit integer vectors.
///
/// \headerfile <x86intrin.h>
///
@@ -2743,7 +2759,7 @@ _mm_or_si128(__m128i __a, __m128i __b)
return (__m128i)((__v2du)__a | (__v2du)__b);
}
-/// \brief Performs a bitwise exclusive OR of two 128-bit integer vectors.
+/// Performs a bitwise exclusive OR of two 128-bit integer vectors.
///
/// \headerfile <x86intrin.h>
///
@@ -2761,7 +2777,7 @@ _mm_xor_si128(__m128i __a, __m128i __b)
return (__m128i)((__v2du)__a ^ (__v2du)__b);
}
-/// \brief Left-shifts the 128-bit integer vector operand by the specified
+/// Left-shifts the 128-bit integer vector operand by the specified
/// number of bytes. Low-order bits are cleared.
///
/// \headerfile <x86intrin.h>
@@ -2778,31 +2794,13 @@ _mm_xor_si128(__m128i __a, __m128i __b)
/// An immediate value specifying the number of bytes to left-shift operand
/// \a a.
/// \returns A 128-bit integer vector containing the left-shifted value.
-#define _mm_slli_si128(a, imm) __extension__ ({ \
- (__m128i)__builtin_shufflevector( \
- (__v16qi)_mm_setzero_si128(), \
- (__v16qi)(__m128i)(a), \
- ((char)(imm)&0xF0) ? 0 : 16 - (char)(imm), \
- ((char)(imm)&0xF0) ? 1 : 17 - (char)(imm), \
- ((char)(imm)&0xF0) ? 2 : 18 - (char)(imm), \
- ((char)(imm)&0xF0) ? 3 : 19 - (char)(imm), \
- ((char)(imm)&0xF0) ? 4 : 20 - (char)(imm), \
- ((char)(imm)&0xF0) ? 5 : 21 - (char)(imm), \
- ((char)(imm)&0xF0) ? 6 : 22 - (char)(imm), \
- ((char)(imm)&0xF0) ? 7 : 23 - (char)(imm), \
- ((char)(imm)&0xF0) ? 8 : 24 - (char)(imm), \
- ((char)(imm)&0xF0) ? 9 : 25 - (char)(imm), \
- ((char)(imm)&0xF0) ? 10 : 26 - (char)(imm), \
- ((char)(imm)&0xF0) ? 11 : 27 - (char)(imm), \
- ((char)(imm)&0xF0) ? 12 : 28 - (char)(imm), \
- ((char)(imm)&0xF0) ? 13 : 29 - (char)(imm), \
- ((char)(imm)&0xF0) ? 14 : 30 - (char)(imm), \
- ((char)(imm)&0xF0) ? 15 : 31 - (char)(imm)); })
+#define _mm_slli_si128(a, imm) \
+ (__m128i)__builtin_ia32_pslldqi128_byteshift((__v2di)(__m128i)(a), (int)(imm))
#define _mm_bslli_si128(a, imm) \
- _mm_slli_si128((a), (imm))
+ (__m128i)__builtin_ia32_pslldqi128_byteshift((__v2di)(__m128i)(a), (int)(imm))
-/// \brief Left-shifts each 16-bit value in the 128-bit integer vector operand
+/// Left-shifts each 16-bit value in the 128-bit integer vector operand
/// by the specified number of bits. Low-order bits are cleared.
///
/// \headerfile <x86intrin.h>
@@ -2821,7 +2819,7 @@ _mm_slli_epi16(__m128i __a, int __count)
return (__m128i)__builtin_ia32_psllwi128((__v8hi)__a, __count);
}
-/// \brief Left-shifts each 16-bit value in the 128-bit integer vector operand
+/// Left-shifts each 16-bit value in the 128-bit integer vector operand
/// by the specified number of bits. Low-order bits are cleared.
///
/// \headerfile <x86intrin.h>
@@ -2840,7 +2838,7 @@ _mm_sll_epi16(__m128i __a, __m128i __count)
return (__m128i)__builtin_ia32_psllw128((__v8hi)__a, (__v8hi)__count);
}
-/// \brief Left-shifts each 32-bit value in the 128-bit integer vector operand
+/// Left-shifts each 32-bit value in the 128-bit integer vector operand
/// by the specified number of bits. Low-order bits are cleared.
///
/// \headerfile <x86intrin.h>
@@ -2859,7 +2857,7 @@ _mm_slli_epi32(__m128i __a, int __count)
return (__m128i)__builtin_ia32_pslldi128((__v4si)__a, __count);
}
-/// \brief Left-shifts each 32-bit value in the 128-bit integer vector operand
+/// Left-shifts each 32-bit value in the 128-bit integer vector operand
/// by the specified number of bits. Low-order bits are cleared.
///
/// \headerfile <x86intrin.h>
@@ -2878,7 +2876,7 @@ _mm_sll_epi32(__m128i __a, __m128i __count)
return (__m128i)__builtin_ia32_pslld128((__v4si)__a, (__v4si)__count);
}
-/// \brief Left-shifts each 64-bit value in the 128-bit integer vector operand
+/// Left-shifts each 64-bit value in the 128-bit integer vector operand
/// by the specified number of bits. Low-order bits are cleared.
///
/// \headerfile <x86intrin.h>
@@ -2897,7 +2895,7 @@ _mm_slli_epi64(__m128i __a, int __count)
return __builtin_ia32_psllqi128((__v2di)__a, __count);
}
-/// \brief Left-shifts each 64-bit value in the 128-bit integer vector operand
+/// Left-shifts each 64-bit value in the 128-bit integer vector operand
/// by the specified number of bits. Low-order bits are cleared.
///
/// \headerfile <x86intrin.h>
@@ -2916,7 +2914,7 @@ _mm_sll_epi64(__m128i __a, __m128i __count)
return __builtin_ia32_psllq128((__v2di)__a, (__v2di)__count);
}
-/// \brief Right-shifts each 16-bit value in the 128-bit integer vector operand
+/// Right-shifts each 16-bit value in the 128-bit integer vector operand
/// by the specified number of bits. High-order bits are filled with the sign
/// bit of the initial value.
///
@@ -2936,7 +2934,7 @@ _mm_srai_epi16(__m128i __a, int __count)
return (__m128i)__builtin_ia32_psrawi128((__v8hi)__a, __count);
}
-/// \brief Right-shifts each 16-bit value in the 128-bit integer vector operand
+/// Right-shifts each 16-bit value in the 128-bit integer vector operand
/// by the specified number of bits. High-order bits are filled with the sign
/// bit of the initial value.
///
@@ -2956,7 +2954,7 @@ _mm_sra_epi16(__m128i __a, __m128i __count)
return (__m128i)__builtin_ia32_psraw128((__v8hi)__a, (__v8hi)__count);
}
-/// \brief Right-shifts each 32-bit value in the 128-bit integer vector operand
+/// Right-shifts each 32-bit value in the 128-bit integer vector operand
/// by the specified number of bits. High-order bits are filled with the sign
/// bit of the initial value.
///
@@ -2976,7 +2974,7 @@ _mm_srai_epi32(__m128i __a, int __count)
return (__m128i)__builtin_ia32_psradi128((__v4si)__a, __count);
}
-/// \brief Right-shifts each 32-bit value in the 128-bit integer vector operand
+/// Right-shifts each 32-bit value in the 128-bit integer vector operand
/// by the specified number of bits. High-order bits are filled with the sign
/// bit of the initial value.
///
@@ -2996,7 +2994,7 @@ _mm_sra_epi32(__m128i __a, __m128i __count)
return (__m128i)__builtin_ia32_psrad128((__v4si)__a, (__v4si)__count);
}
-/// \brief Right-shifts the 128-bit integer vector operand by the specified
+/// Right-shifts the 128-bit integer vector operand by the specified
/// number of bytes. High-order bits are cleared.
///
/// \headerfile <x86intrin.h>
@@ -3013,31 +3011,13 @@ _mm_sra_epi32(__m128i __a, __m128i __count)
/// An immediate value specifying the number of bytes to right-shift operand
/// \a a.
/// \returns A 128-bit integer vector containing the right-shifted value.
-#define _mm_srli_si128(a, imm) __extension__ ({ \
- (__m128i)__builtin_shufflevector( \
- (__v16qi)(__m128i)(a), \
- (__v16qi)_mm_setzero_si128(), \
- ((char)(imm)&0xF0) ? 16 : (char)(imm) + 0, \
- ((char)(imm)&0xF0) ? 17 : (char)(imm) + 1, \
- ((char)(imm)&0xF0) ? 18 : (char)(imm) + 2, \
- ((char)(imm)&0xF0) ? 19 : (char)(imm) + 3, \
- ((char)(imm)&0xF0) ? 20 : (char)(imm) + 4, \
- ((char)(imm)&0xF0) ? 21 : (char)(imm) + 5, \
- ((char)(imm)&0xF0) ? 22 : (char)(imm) + 6, \
- ((char)(imm)&0xF0) ? 23 : (char)(imm) + 7, \
- ((char)(imm)&0xF0) ? 24 : (char)(imm) + 8, \
- ((char)(imm)&0xF0) ? 25 : (char)(imm) + 9, \
- ((char)(imm)&0xF0) ? 26 : (char)(imm) + 10, \
- ((char)(imm)&0xF0) ? 27 : (char)(imm) + 11, \
- ((char)(imm)&0xF0) ? 28 : (char)(imm) + 12, \
- ((char)(imm)&0xF0) ? 29 : (char)(imm) + 13, \
- ((char)(imm)&0xF0) ? 30 : (char)(imm) + 14, \
- ((char)(imm)&0xF0) ? 31 : (char)(imm) + 15); })
+#define _mm_srli_si128(a, imm) \
+ (__m128i)__builtin_ia32_psrldqi128_byteshift((__v2di)(__m128i)(a), (int)(imm))
#define _mm_bsrli_si128(a, imm) \
- _mm_srli_si128((a), (imm))
+ (__m128i)__builtin_ia32_psrldqi128_byteshift((__v2di)(__m128i)(a), (int)(imm))
-/// \brief Right-shifts each of 16-bit values in the 128-bit integer vector
+/// Right-shifts each of 16-bit values in the 128-bit integer vector
/// operand by the specified number of bits. High-order bits are cleared.
///
/// \headerfile <x86intrin.h>
@@ -3056,7 +3036,7 @@ _mm_srli_epi16(__m128i __a, int __count)
return (__m128i)__builtin_ia32_psrlwi128((__v8hi)__a, __count);
}
-/// \brief Right-shifts each of 16-bit values in the 128-bit integer vector
+/// Right-shifts each of 16-bit values in the 128-bit integer vector
/// operand by the specified number of bits. High-order bits are cleared.
///
/// \headerfile <x86intrin.h>
@@ -3075,7 +3055,7 @@ _mm_srl_epi16(__m128i __a, __m128i __count)
return (__m128i)__builtin_ia32_psrlw128((__v8hi)__a, (__v8hi)__count);
}
-/// \brief Right-shifts each of 32-bit values in the 128-bit integer vector
+/// Right-shifts each of 32-bit values in the 128-bit integer vector
/// operand by the specified number of bits. High-order bits are cleared.
///
/// \headerfile <x86intrin.h>
@@ -3094,7 +3074,7 @@ _mm_srli_epi32(__m128i __a, int __count)
return (__m128i)__builtin_ia32_psrldi128((__v4si)__a, __count);
}
-/// \brief Right-shifts each of 32-bit values in the 128-bit integer vector
+/// Right-shifts each of 32-bit values in the 128-bit integer vector
/// operand by the specified number of bits. High-order bits are cleared.
///
/// \headerfile <x86intrin.h>
@@ -3113,7 +3093,7 @@ _mm_srl_epi32(__m128i __a, __m128i __count)
return (__m128i)__builtin_ia32_psrld128((__v4si)__a, (__v4si)__count);
}
-/// \brief Right-shifts each of 64-bit values in the 128-bit integer vector
+/// Right-shifts each of 64-bit values in the 128-bit integer vector
/// operand by the specified number of bits. High-order bits are cleared.
///
/// \headerfile <x86intrin.h>
@@ -3132,7 +3112,7 @@ _mm_srli_epi64(__m128i __a, int __count)
return __builtin_ia32_psrlqi128((__v2di)__a, __count);
}
-/// \brief Right-shifts each of 64-bit values in the 128-bit integer vector
+/// Right-shifts each of 64-bit values in the 128-bit integer vector
/// operand by the specified number of bits. High-order bits are cleared.
///
/// \headerfile <x86intrin.h>
@@ -3151,8 +3131,8 @@ _mm_srl_epi64(__m128i __a, __m128i __count)
return __builtin_ia32_psrlq128((__v2di)__a, (__v2di)__count);
}
-/// \brief Compares each of the corresponding 8-bit values of the 128-bit
-/// integer vectors for equality. Each comparison yields 0h for false, FFh
+/// Compares each of the corresponding 8-bit values of the 128-bit
+/// integer vectors for equality. Each comparison yields 0x0 for false, 0xFF
/// for true.
///
/// \headerfile <x86intrin.h>
@@ -3170,9 +3150,9 @@ _mm_cmpeq_epi8(__m128i __a, __m128i __b)
return (__m128i)((__v16qi)__a == (__v16qi)__b);
}
-/// \brief Compares each of the corresponding 16-bit values of the 128-bit
-/// integer vectors for equality. Each comparison yields 0h for false, FFFFh
-/// for true.
+/// Compares each of the corresponding 16-bit values of the 128-bit
+/// integer vectors for equality. Each comparison yields 0x0 for false,
+/// 0xFFFF for true.
///
/// \headerfile <x86intrin.h>
///
@@ -3189,9 +3169,9 @@ _mm_cmpeq_epi16(__m128i __a, __m128i __b)
return (__m128i)((__v8hi)__a == (__v8hi)__b);
}
-/// \brief Compares each of the corresponding 32-bit values of the 128-bit
-/// integer vectors for equality. Each comparison yields 0h for false,
-/// FFFFFFFFh for true.
+/// Compares each of the corresponding 32-bit values of the 128-bit
+/// integer vectors for equality. Each comparison yields 0x0 for false,
+/// 0xFFFFFFFF for true.
///
/// \headerfile <x86intrin.h>
///
@@ -3208,10 +3188,10 @@ _mm_cmpeq_epi32(__m128i __a, __m128i __b)
return (__m128i)((__v4si)__a == (__v4si)__b);
}
-/// \brief Compares each of the corresponding signed 8-bit values of the 128-bit
+/// Compares each of the corresponding signed 8-bit values of the 128-bit
/// integer vectors to determine if the values in the first operand are
-/// greater than those in the second operand. Each comparison yields 0h for
-/// false, FFh for true.
+/// greater than those in the second operand. Each comparison yields 0x0 for
+/// false, 0xFF for true.
///
/// \headerfile <x86intrin.h>
///
@@ -3230,11 +3210,11 @@ _mm_cmpgt_epi8(__m128i __a, __m128i __b)
return (__m128i)((__v16qs)__a > (__v16qs)__b);
}
-/// \brief Compares each of the corresponding signed 16-bit values of the
+/// Compares each of the corresponding signed 16-bit values of the
/// 128-bit integer vectors to determine if the values in the first operand
/// are greater than those in the second operand.
///
-/// Each comparison yields 0h for false, FFFFh for true.
+/// Each comparison yields 0x0 for false, 0xFFFF for true.
///
/// \headerfile <x86intrin.h>
///
@@ -3251,11 +3231,11 @@ _mm_cmpgt_epi16(__m128i __a, __m128i __b)
return (__m128i)((__v8hi)__a > (__v8hi)__b);
}
-/// \brief Compares each of the corresponding signed 32-bit values of the
+/// Compares each of the corresponding signed 32-bit values of the
/// 128-bit integer vectors to determine if the values in the first operand
/// are greater than those in the second operand.
///
-/// Each comparison yields 0h for false, FFFFFFFFh for true.
+/// Each comparison yields 0x0 for false, 0xFFFFFFFF for true.
///
/// \headerfile <x86intrin.h>
///
@@ -3272,11 +3252,11 @@ _mm_cmpgt_epi32(__m128i __a, __m128i __b)
return (__m128i)((__v4si)__a > (__v4si)__b);
}
-/// \brief Compares each of the corresponding signed 8-bit values of the 128-bit
+/// Compares each of the corresponding signed 8-bit values of the 128-bit
/// integer vectors to determine if the values in the first operand are less
/// than those in the second operand.
///
-/// Each comparison yields 0h for false, FFh for true.
+/// Each comparison yields 0x0 for false, 0xFF for true.
///
/// \headerfile <x86intrin.h>
///
@@ -3293,11 +3273,11 @@ _mm_cmplt_epi8(__m128i __a, __m128i __b)
return _mm_cmpgt_epi8(__b, __a);
}
-/// \brief Compares each of the corresponding signed 16-bit values of the
+/// Compares each of the corresponding signed 16-bit values of the
/// 128-bit integer vectors to determine if the values in the first operand
/// are less than those in the second operand.
///
-/// Each comparison yields 0h for false, FFFFh for true.
+/// Each comparison yields 0x0 for false, 0xFFFF for true.
///
/// \headerfile <x86intrin.h>
///
@@ -3314,11 +3294,11 @@ _mm_cmplt_epi16(__m128i __a, __m128i __b)
return _mm_cmpgt_epi16(__b, __a);
}
-/// \brief Compares each of the corresponding signed 32-bit values of the
+/// Compares each of the corresponding signed 32-bit values of the
/// 128-bit integer vectors to determine if the values in the first operand
/// are less than those in the second operand.
///
-/// Each comparison yields 0h for false, FFFFFFFFh for true.
+/// Each comparison yields 0x0 for false, 0xFFFFFFFF for true.
///
/// \headerfile <x86intrin.h>
///
@@ -3336,7 +3316,7 @@ _mm_cmplt_epi32(__m128i __a, __m128i __b)
}
#ifdef __x86_64__
-/// \brief Converts a 64-bit signed integer value from the second operand into a
+/// Converts a 64-bit signed integer value from the second operand into a
/// double-precision value and returns it in the lower element of a [2 x
/// double] vector; the upper element of the returned vector is copied from
/// the upper element of the first operand.
@@ -3360,7 +3340,7 @@ _mm_cvtsi64_sd(__m128d __a, long long __b)
return __a;
}
-/// \brief Converts the first (lower) element of a vector of [2 x double] into a
+/// Converts the first (lower) element of a vector of [2 x double] into a
/// 64-bit signed integer value, according to the current rounding mode.
///
/// \headerfile <x86intrin.h>
@@ -3377,7 +3357,7 @@ _mm_cvtsd_si64(__m128d __a)
return __builtin_ia32_cvtsd2si64((__v2df)__a);
}
-/// \brief Converts the first (lower) element of a vector of [2 x double] into a
+/// Converts the first (lower) element of a vector of [2 x double] into a
/// 64-bit signed integer value, truncating the result when it is inexact.
///
/// \headerfile <x86intrin.h>
@@ -3396,7 +3376,7 @@ _mm_cvttsd_si64(__m128d __a)
}
#endif
-/// \brief Converts a vector of [4 x i32] into a vector of [4 x float].
+/// Converts a vector of [4 x i32] into a vector of [4 x float].
///
/// \headerfile <x86intrin.h>
///
@@ -3408,10 +3388,10 @@ _mm_cvttsd_si64(__m128d __a)
static __inline__ __m128 __DEFAULT_FN_ATTRS
_mm_cvtepi32_ps(__m128i __a)
{
- return __builtin_ia32_cvtdq2ps((__v4si)__a);
+ return (__m128)__builtin_convertvector((__v4si)__a, __v4sf);
}
-/// \brief Converts a vector of [4 x float] into a vector of [4 x i32].
+/// Converts a vector of [4 x float] into a vector of [4 x i32].
///
/// \headerfile <x86intrin.h>
///
@@ -3427,7 +3407,7 @@ _mm_cvtps_epi32(__m128 __a)
return (__m128i)__builtin_ia32_cvtps2dq((__v4sf)__a);
}
-/// \brief Converts a vector of [4 x float] into a vector of [4 x i32],
+/// Converts a vector of [4 x float] into a vector of [4 x i32],
/// truncating the result when it is inexact.
///
/// \headerfile <x86intrin.h>
@@ -3444,7 +3424,7 @@ _mm_cvttps_epi32(__m128 __a)
return (__m128i)__builtin_ia32_cvttps2dq((__v4sf)__a);
}
-/// \brief Returns a vector of [4 x i32] where the lowest element is the input
+/// Returns a vector of [4 x i32] where the lowest element is the input
/// operand and the remaining elements are zero.
///
/// \headerfile <x86intrin.h>
@@ -3457,11 +3437,11 @@ _mm_cvttps_epi32(__m128 __a)
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_cvtsi32_si128(int __a)
{
- return (__m128i)(__v4si){ __a, 0, 0, 0 };
+ return __extension__ (__m128i)(__v4si){ __a, 0, 0, 0 };
}
#ifdef __x86_64__
-/// \brief Returns a vector of [2 x i64] where the lower element is the input
+/// Returns a vector of [2 x i64] where the lower element is the input
/// operand and the upper element is zero.
///
/// \headerfile <x86intrin.h>
@@ -3474,11 +3454,11 @@ _mm_cvtsi32_si128(int __a)
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_cvtsi64_si128(long long __a)
{
- return (__m128i){ __a, 0 };
+ return __extension__ (__m128i)(__v2di){ __a, 0 };
}
#endif
-/// \brief Moves the least significant 32 bits of a vector of [4 x i32] to a
+/// Moves the least significant 32 bits of a vector of [4 x i32] to a
/// 32-bit signed integer value.
///
/// \headerfile <x86intrin.h>
@@ -3497,7 +3477,7 @@ _mm_cvtsi128_si32(__m128i __a)
}
#ifdef __x86_64__
-/// \brief Moves the least significant 64 bits of a vector of [2 x i64] to a
+/// Moves the least significant 64 bits of a vector of [2 x i64] to a
/// 64-bit signed integer value.
///
/// \headerfile <x86intrin.h>
@@ -3515,7 +3495,7 @@ _mm_cvtsi128_si64(__m128i __a)
}
#endif
-/// \brief Moves packed integer values from an aligned 128-bit memory location
+/// Moves packed integer values from an aligned 128-bit memory location
/// to elements in a 128-bit integer vector.
///
/// \headerfile <x86intrin.h>
@@ -3531,7 +3511,7 @@ _mm_load_si128(__m128i const *__p)
return *__p;
}
-/// \brief Moves packed integer values from an unaligned 128-bit memory location
+/// Moves packed integer values from an unaligned 128-bit memory location
/// to elements in a 128-bit integer vector.
///
/// \headerfile <x86intrin.h>
@@ -3550,7 +3530,7 @@ _mm_loadu_si128(__m128i const *__p)
return ((struct __loadu_si128*)__p)->__v;
}
-/// \brief Returns a vector of [2 x i64] where the lower element is taken from
+/// Returns a vector of [2 x i64] where the lower element is taken from
/// the lower element of the operand, and the upper element is zero.
///
/// \headerfile <x86intrin.h>
@@ -3568,10 +3548,10 @@ _mm_loadl_epi64(__m128i const *__p)
struct __mm_loadl_epi64_struct {
long long __u;
} __attribute__((__packed__, __may_alias__));
- return (__m128i) { ((struct __mm_loadl_epi64_struct*)__p)->__u, 0};
+ return __extension__ (__m128i) { ((struct __mm_loadl_epi64_struct*)__p)->__u, 0};
}
-/// \brief Generates a 128-bit vector of [4 x i32] with unspecified content.
+/// Generates a 128-bit vector of [4 x i32] with unspecified content.
/// This could be used as an argument to another intrinsic function where the
/// argument is required but the value is not actually used.
///
@@ -3586,7 +3566,7 @@ _mm_undefined_si128(void)
return (__m128i)__builtin_ia32_undef128();
}
-/// \brief Initializes both 64-bit values in a 128-bit vector of [2 x i64] with
+/// Initializes both 64-bit values in a 128-bit vector of [2 x i64] with
/// the specified 64-bit integer values.
///
/// \headerfile <x86intrin.h>
@@ -3605,10 +3585,10 @@ _mm_undefined_si128(void)
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_set_epi64x(long long __q1, long long __q0)
{
- return (__m128i){ __q0, __q1 };
+ return __extension__ (__m128i)(__v2di){ __q0, __q1 };
}
-/// \brief Initializes both 64-bit values in a 128-bit vector of [2 x i64] with
+/// Initializes both 64-bit values in a 128-bit vector of [2 x i64] with
/// the specified 64-bit integer values.
///
/// \headerfile <x86intrin.h>
@@ -3627,10 +3607,10 @@ _mm_set_epi64x(long long __q1, long long __q0)
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_set_epi64(__m64 __q1, __m64 __q0)
{
- return (__m128i){ (long long)__q0, (long long)__q1 };
+ return _mm_set_epi64x((long long)__q1, (long long)__q0);
}
-/// \brief Initializes the 32-bit values in a 128-bit vector of [4 x i32] with
+/// Initializes the 32-bit values in a 128-bit vector of [4 x i32] with
/// the specified 32-bit integer values.
///
/// \headerfile <x86intrin.h>
@@ -3655,10 +3635,10 @@ _mm_set_epi64(__m64 __q1, __m64 __q0)
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_set_epi32(int __i3, int __i2, int __i1, int __i0)
{
- return (__m128i)(__v4si){ __i0, __i1, __i2, __i3};
+ return __extension__ (__m128i)(__v4si){ __i0, __i1, __i2, __i3};
}
-/// \brief Initializes the 16-bit values in a 128-bit vector of [8 x i16] with
+/// Initializes the 16-bit values in a 128-bit vector of [8 x i16] with
/// the specified 16-bit integer values.
///
/// \headerfile <x86intrin.h>
@@ -3695,10 +3675,10 @@ _mm_set_epi32(int __i3, int __i2, int __i1, int __i0)
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_set_epi16(short __w7, short __w6, short __w5, short __w4, short __w3, short __w2, short __w1, short __w0)
{
- return (__m128i)(__v8hi){ __w0, __w1, __w2, __w3, __w4, __w5, __w6, __w7 };
+ return __extension__ (__m128i)(__v8hi){ __w0, __w1, __w2, __w3, __w4, __w5, __w6, __w7 };
}
-/// \brief Initializes the 8-bit values in a 128-bit vector of [16 x i8] with
+/// Initializes the 8-bit values in a 128-bit vector of [16 x i8] with
/// the specified 8-bit integer values.
///
/// \headerfile <x86intrin.h>
@@ -3743,10 +3723,10 @@ _mm_set_epi16(short __w7, short __w6, short __w5, short __w4, short __w3, short
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_set_epi8(char __b15, char __b14, char __b13, char __b12, char __b11, char __b10, char __b9, char __b8, char __b7, char __b6, char __b5, char __b4, char __b3, char __b2, char __b1, char __b0)
{
- return (__m128i)(__v16qi){ __b0, __b1, __b2, __b3, __b4, __b5, __b6, __b7, __b8, __b9, __b10, __b11, __b12, __b13, __b14, __b15 };
+ return __extension__ (__m128i)(__v16qi){ __b0, __b1, __b2, __b3, __b4, __b5, __b6, __b7, __b8, __b9, __b10, __b11, __b12, __b13, __b14, __b15 };
}
-/// \brief Initializes both values in a 128-bit integer vector with the
+/// Initializes both values in a 128-bit integer vector with the
/// specified 64-bit integer value.
///
/// \headerfile <x86intrin.h>
@@ -3762,10 +3742,10 @@ _mm_set_epi8(char __b15, char __b14, char __b13, char __b12, char __b11, char __
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_set1_epi64x(long long __q)
{
- return (__m128i){ __q, __q };
+ return _mm_set_epi64x(__q, __q);
}
-/// \brief Initializes both values in a 128-bit vector of [2 x i64] with the
+/// Initializes both values in a 128-bit vector of [2 x i64] with the
/// specified 64-bit value.
///
/// \headerfile <x86intrin.h>
@@ -3781,10 +3761,10 @@ _mm_set1_epi64x(long long __q)
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_set1_epi64(__m64 __q)
{
- return (__m128i){ (long long)__q, (long long)__q };
+ return _mm_set_epi64(__q, __q);
}
-/// \brief Initializes all values in a 128-bit vector of [4 x i32] with the
+/// Initializes all values in a 128-bit vector of [4 x i32] with the
/// specified 32-bit value.
///
/// \headerfile <x86intrin.h>
@@ -3800,10 +3780,10 @@ _mm_set1_epi64(__m64 __q)
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_set1_epi32(int __i)
{
- return (__m128i)(__v4si){ __i, __i, __i, __i };
+ return _mm_set_epi32(__i, __i, __i, __i);
}
-/// \brief Initializes all values in a 128-bit vector of [8 x i16] with the
+/// Initializes all values in a 128-bit vector of [8 x i16] with the
/// specified 16-bit value.
///
/// \headerfile <x86intrin.h>
@@ -3819,10 +3799,10 @@ _mm_set1_epi32(int __i)
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_set1_epi16(short __w)
{
- return (__m128i)(__v8hi){ __w, __w, __w, __w, __w, __w, __w, __w };
+ return _mm_set_epi16(__w, __w, __w, __w, __w, __w, __w, __w);
}
-/// \brief Initializes all values in a 128-bit vector of [16 x i8] with the
+/// Initializes all values in a 128-bit vector of [16 x i8] with the
/// specified 8-bit value.
///
/// \headerfile <x86intrin.h>
@@ -3838,16 +3818,15 @@ _mm_set1_epi16(short __w)
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_set1_epi8(char __b)
{
- return (__m128i)(__v16qi){ __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b };
+ return _mm_set_epi8(__b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b);
}
-/// \brief Constructs a 128-bit integer vector, initialized in reverse order
+/// Constructs a 128-bit integer vector, initialized in reverse order
/// with the specified 64-bit integral values.
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the <c> VPUNPCKLQDQ / PUNPCKLQDQ </c>
-/// instruction.
+/// This intrinsic does not correspond to a specific instruction.
///
/// \param __q0
/// A 64-bit integral value used to initialize the lower 64 bits of the
@@ -3859,10 +3838,10 @@ _mm_set1_epi8(char __b)
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_setr_epi64(__m64 __q0, __m64 __q1)
{
- return (__m128i){ (long long)__q0, (long long)__q1 };
+ return _mm_set_epi64(__q1, __q0);
}
-/// \brief Constructs a 128-bit integer vector, initialized in reverse order
+/// Constructs a 128-bit integer vector, initialized in reverse order
/// with the specified 32-bit integral values.
///
/// \headerfile <x86intrin.h>
@@ -3882,10 +3861,10 @@ _mm_setr_epi64(__m64 __q0, __m64 __q1)
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_setr_epi32(int __i0, int __i1, int __i2, int __i3)
{
- return (__m128i)(__v4si){ __i0, __i1, __i2, __i3};
+ return _mm_set_epi32(__i3, __i2, __i1, __i0);
}
-/// \brief Constructs a 128-bit integer vector, initialized in reverse order
+/// Constructs a 128-bit integer vector, initialized in reverse order
/// with the specified 16-bit integral values.
///
/// \headerfile <x86intrin.h>
@@ -3913,10 +3892,10 @@ _mm_setr_epi32(int __i0, int __i1, int __i2, int __i3)
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_setr_epi16(short __w0, short __w1, short __w2, short __w3, short __w4, short __w5, short __w6, short __w7)
{
- return (__m128i)(__v8hi){ __w0, __w1, __w2, __w3, __w4, __w5, __w6, __w7 };
+ return _mm_set_epi16(__w7, __w6, __w5, __w4, __w3, __w2, __w1, __w0);
}
-/// \brief Constructs a 128-bit integer vector, initialized in reverse order
+/// Constructs a 128-bit integer vector, initialized in reverse order
/// with the specified 8-bit integral values.
///
/// \headerfile <x86intrin.h>
@@ -3960,10 +3939,10 @@ _mm_setr_epi16(short __w0, short __w1, short __w2, short __w3, short __w4, short
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_setr_epi8(char __b0, char __b1, char __b2, char __b3, char __b4, char __b5, char __b6, char __b7, char __b8, char __b9, char __b10, char __b11, char __b12, char __b13, char __b14, char __b15)
{
- return (__m128i)(__v16qi){ __b0, __b1, __b2, __b3, __b4, __b5, __b6, __b7, __b8, __b9, __b10, __b11, __b12, __b13, __b14, __b15 };
+ return _mm_set_epi8(__b15, __b14, __b13, __b12, __b11, __b10, __b9, __b8, __b7, __b6, __b5, __b4, __b3, __b2, __b1, __b0);
}
-/// \brief Creates a 128-bit integer vector initialized to zero.
+/// Creates a 128-bit integer vector initialized to zero.
///
/// \headerfile <x86intrin.h>
///
@@ -3974,10 +3953,10 @@ _mm_setr_epi8(char __b0, char __b1, char __b2, char __b3, char __b4, char __b5,
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_setzero_si128(void)
{
- return (__m128i){ 0LL, 0LL };
+ return __extension__ (__m128i)(__v2di){ 0LL, 0LL };
}
-/// \brief Stores a 128-bit integer vector to a memory location aligned on a
+/// Stores a 128-bit integer vector to a memory location aligned on a
/// 128-bit boundary.
///
/// \headerfile <x86intrin.h>
@@ -3995,7 +3974,7 @@ _mm_store_si128(__m128i *__p, __m128i __b)
*__p = __b;
}
-/// \brief Stores a 128-bit integer vector to an unaligned memory location.
+/// Stores a 128-bit integer vector to an unaligned memory location.
///
/// \headerfile <x86intrin.h>
///
@@ -4014,11 +3993,11 @@ _mm_storeu_si128(__m128i *__p, __m128i __b)
((struct __storeu_si128*)__p)->__v = __b;
}
-/// \brief Moves bytes selected by the mask from the first operand to the
+/// Moves bytes selected by the mask from the first operand to the
/// specified unaligned memory location. When a mask bit is 1, the
/// corresponding byte is written, otherwise it is not written.
///
-/// To minimize caching, the date is flagged as non-temporal (unlikely to be
+/// To minimize caching, the data is flagged as non-temporal (unlikely to be
/// used again soon). Exception and trap behavior for elements not selected
/// for storage to memory are implementation dependent.
///
@@ -4041,7 +4020,7 @@ _mm_maskmoveu_si128(__m128i __d, __m128i __n, char *__p)
__builtin_ia32_maskmovdqu((__v16qi)__d, (__v16qi)__n, __p);
}
-/// \brief Stores the lower 64 bits of a 128-bit integer vector of [2 x i64] to
+/// Stores the lower 64 bits of a 128-bit integer vector of [2 x i64] to
/// a memory location.
///
/// \headerfile <x86intrin.h>
@@ -4063,7 +4042,7 @@ _mm_storel_epi64(__m128i *__p, __m128i __a)
((struct __mm_storel_epi64_struct*)__p)->__u = __a[0];
}
-/// \brief Stores a 128-bit floating point vector of [2 x double] to a 128-bit
+/// Stores a 128-bit floating point vector of [2 x double] to a 128-bit
/// aligned memory location.
///
/// To minimize caching, the data is flagged as non-temporal (unlikely to be
@@ -4083,7 +4062,7 @@ _mm_stream_pd(double *__p, __m128d __a)
__builtin_nontemporal_store((__v2df)__a, (__v2df*)__p);
}
-/// \brief Stores a 128-bit integer vector to a 128-bit aligned memory location.
+/// Stores a 128-bit integer vector to a 128-bit aligned memory location.
///
/// To minimize caching, the data is flagged as non-temporal (unlikely to be
/// used again soon).
@@ -4102,7 +4081,7 @@ _mm_stream_si128(__m128i *__p, __m128i __a)
__builtin_nontemporal_store((__v2di)__a, (__v2di*)__p);
}
-/// \brief Stores a 32-bit integer value in the specified memory location.
+/// Stores a 32-bit integer value in the specified memory location.
///
/// To minimize caching, the data is flagged as non-temporal (unlikely to be
/// used again soon).
@@ -4115,14 +4094,14 @@ _mm_stream_si128(__m128i *__p, __m128i __a)
/// A pointer to the 32-bit memory location used to store the value.
/// \param __a
/// A 32-bit integer containing the value to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("sse2")))
_mm_stream_si32(int *__p, int __a)
{
__builtin_ia32_movnti(__p, __a);
}
#ifdef __x86_64__
-/// \brief Stores a 64-bit integer value in the specified memory location.
+/// Stores a 64-bit integer value in the specified memory location.
///
/// To minimize caching, the data is flagged as non-temporal (unlikely to be
/// used again soon).
@@ -4135,7 +4114,7 @@ _mm_stream_si32(int *__p, int __a)
/// A pointer to the 64-bit memory location used to store the value.
/// \param __a
/// A 64-bit integer containing the value to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("sse2")))
_mm_stream_si64(long long *__p, long long __a)
{
__builtin_ia32_movnti64(__p, __a);
@@ -4146,7 +4125,7 @@ _mm_stream_si64(long long *__p, long long __a)
extern "C" {
#endif
-/// \brief The cache line containing \a __p is flushed and invalidated from all
+/// The cache line containing \a __p is flushed and invalidated from all
/// caches in the coherency domain.
///
/// \headerfile <x86intrin.h>
@@ -4158,7 +4137,7 @@ extern "C" {
/// flushed.
void _mm_clflush(void const * __p);
-/// \brief Forces strong memory ordering (serialization) between load
+/// Forces strong memory ordering (serialization) between load
/// instructions preceding this instruction and load instructions following
/// this instruction, ensuring the system completes all previous loads before
/// executing subsequent loads.
@@ -4169,7 +4148,7 @@ void _mm_clflush(void const * __p);
///
void _mm_lfence(void);
-/// \brief Forces strong memory ordering (serialization) between load and store
+/// Forces strong memory ordering (serialization) between load and store
/// instructions preceding this instruction and load and store instructions
/// following this instruction, ensuring that the system completes all
/// previous memory accesses before executing subsequent memory accesses.
@@ -4184,7 +4163,7 @@ void _mm_mfence(void);
} // extern "C"
#endif
-/// \brief Converts 16-bit signed integers from both 128-bit integer vector
+/// Converts 16-bit signed integers from both 128-bit integer vector
/// operands into 8-bit signed integers, and packs the results into the
/// destination. Positive values greater than 0x7F are saturated to 0x7F.
/// Negative values less than 0x80 are saturated to 0x80.
@@ -4212,7 +4191,7 @@ _mm_packs_epi16(__m128i __a, __m128i __b)
return (__m128i)__builtin_ia32_packsswb128((__v8hi)__a, (__v8hi)__b);
}
-/// \brief Converts 32-bit signed integers from both 128-bit integer vector
+/// Converts 32-bit signed integers from both 128-bit integer vector
/// operands into 16-bit signed integers, and packs the results into the
/// destination. Positive values greater than 0x7FFF are saturated to 0x7FFF.
/// Negative values less than 0x8000 are saturated to 0x8000.
@@ -4240,7 +4219,7 @@ _mm_packs_epi32(__m128i __a, __m128i __b)
return (__m128i)__builtin_ia32_packssdw128((__v4si)__a, (__v4si)__b);
}
-/// \brief Converts 16-bit signed integers from both 128-bit integer vector
+/// Converts 16-bit signed integers from both 128-bit integer vector
/// operands into 8-bit unsigned integers, and packs the results into the
/// destination. Values greater than 0xFF are saturated to 0xFF. Values less
/// than 0x00 are saturated to 0x00.
@@ -4268,7 +4247,7 @@ _mm_packus_epi16(__m128i __a, __m128i __b)
return (__m128i)__builtin_ia32_packuswb128((__v8hi)__a, (__v8hi)__b);
}
-/// \brief Extracts 16 bits from a 128-bit integer vector of [8 x i16], using
+/// Extracts 16 bits from a 128-bit integer vector of [8 x i16], using
/// the immediate-value parameter as a selector.
///
/// \headerfile <x86intrin.h>
@@ -4290,14 +4269,11 @@ _mm_packus_epi16(__m128i __a, __m128i __b)
/// 111: assign values from bits [127:112] of \a __a.
/// \returns An integer, whose lower 16 bits are selected from the 128-bit
/// integer vector parameter and the remaining bits are assigned zeros.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_extract_epi16(__m128i __a, int __imm)
-{
- __v8hi __b = (__v8hi)__a;
- return (unsigned short)__b[__imm & 7];
-}
+#define _mm_extract_epi16(a, imm) \
+ (int)(unsigned short)__builtin_ia32_vec_ext_v8hi((__v8hi)(__m128i)(a), \
+ (int)(imm))
-/// \brief Constructs a 128-bit integer vector by first making a copy of the
+/// Constructs a 128-bit integer vector by first making a copy of the
/// 128-bit integer vector parameter, and then inserting the lower 16 bits
/// of an integer parameter into an offset specified by the immediate-value
/// parameter.
@@ -4317,15 +4293,11 @@ _mm_extract_epi16(__m128i __a, int __imm)
/// An immediate value specifying the bit offset in the result at which the
/// lower 16 bits of \a __b are written.
/// \returns A 128-bit integer vector containing the constructed values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_insert_epi16(__m128i __a, int __b, int __imm)
-{
- __v8hi __c = (__v8hi)__a;
- __c[__imm & 7] = __b;
- return (__m128i)__c;
-}
+#define _mm_insert_epi16(a, b, imm) \
+ (__m128i)__builtin_ia32_vec_set_v8hi((__v8hi)(__m128i)(a), (int)(b), \
+ (int)(imm))
-/// \brief Copies the values of the most significant bits from each 8-bit
+/// Copies the values of the most significant bits from each 8-bit
/// element in a 128-bit integer vector of [16 x i8] to create a 16-bit mask
/// value, zero-extends the value, and writes it to the destination.
///
@@ -4343,7 +4315,7 @@ _mm_movemask_epi8(__m128i __a)
return __builtin_ia32_pmovmskb128((__v16qi)__a);
}
-/// \brief Constructs a 128-bit integer vector by shuffling four 32-bit
+/// Constructs a 128-bit integer vector by shuffling four 32-bit
/// elements of a 128-bit integer vector parameter, using the immediate-value
/// parameter as a specifier.
///
@@ -4371,13 +4343,10 @@ _mm_movemask_epi8(__m128i __a)
/// 10: assign values from bits [95:64] of \a a. \n
/// 11: assign values from bits [127:96] of \a a.
/// \returns A 128-bit integer vector containing the shuffled values.
-#define _mm_shuffle_epi32(a, imm) __extension__ ({ \
- (__m128i)__builtin_shufflevector((__v4si)(__m128i)(a), \
- (__v4si)_mm_undefined_si128(), \
- ((imm) >> 0) & 0x3, ((imm) >> 2) & 0x3, \
- ((imm) >> 4) & 0x3, ((imm) >> 6) & 0x3); })
+#define _mm_shuffle_epi32(a, imm) \
+ (__m128i)__builtin_ia32_pshufd((__v4si)(__m128i)(a), (int)(imm))
-/// \brief Constructs a 128-bit integer vector by shuffling four lower 16-bit
+/// Constructs a 128-bit integer vector by shuffling four lower 16-bit
/// elements of a 128-bit integer vector of [8 x i16], using the immediate
/// value parameter as a specifier.
///
@@ -4404,14 +4373,10 @@ _mm_movemask_epi8(__m128i __a)
/// 10: assign values from bits [47:32] of \a a. \n
/// 11: assign values from bits [63:48] of \a a. \n
/// \returns A 128-bit integer vector containing the shuffled values.
-#define _mm_shufflelo_epi16(a, imm) __extension__ ({ \
- (__m128i)__builtin_shufflevector((__v8hi)(__m128i)(a), \
- (__v8hi)_mm_undefined_si128(), \
- ((imm) >> 0) & 0x3, ((imm) >> 2) & 0x3, \
- ((imm) >> 4) & 0x3, ((imm) >> 6) & 0x3, \
- 4, 5, 6, 7); })
-
-/// \brief Constructs a 128-bit integer vector by shuffling four upper 16-bit
+#define _mm_shufflelo_epi16(a, imm) \
+ (__m128i)__builtin_ia32_pshuflw((__v8hi)(__m128i)(a), (int)(imm))
+
+/// Constructs a 128-bit integer vector by shuffling four upper 16-bit
/// elements of a 128-bit integer vector of [8 x i16], using the immediate
/// value parameter as a specifier.
///
@@ -4438,16 +4403,10 @@ _mm_movemask_epi8(__m128i __a)
/// 10: assign values from bits [111:96] of \a a. \n
/// 11: assign values from bits [127:112] of \a a. \n
/// \returns A 128-bit integer vector containing the shuffled values.
-#define _mm_shufflehi_epi16(a, imm) __extension__ ({ \
- (__m128i)__builtin_shufflevector((__v8hi)(__m128i)(a), \
- (__v8hi)_mm_undefined_si128(), \
- 0, 1, 2, 3, \
- 4 + (((imm) >> 0) & 0x3), \
- 4 + (((imm) >> 2) & 0x3), \
- 4 + (((imm) >> 4) & 0x3), \
- 4 + (((imm) >> 6) & 0x3)); })
-
-/// \brief Unpacks the high-order (index 8-15) values from two 128-bit vectors
+#define _mm_shufflehi_epi16(a, imm) \
+ (__m128i)__builtin_ia32_pshufhw((__v8hi)(__m128i)(a), (int)(imm))
+
+/// Unpacks the high-order (index 8-15) values from two 128-bit vectors
/// of [16 x i8] and interleaves them into a 128-bit vector of [16 x i8].
///
/// \headerfile <x86intrin.h>
@@ -4482,7 +4441,7 @@ _mm_unpackhi_epi8(__m128i __a, __m128i __b)
return (__m128i)__builtin_shufflevector((__v16qi)__a, (__v16qi)__b, 8, 16+8, 9, 16+9, 10, 16+10, 11, 16+11, 12, 16+12, 13, 16+13, 14, 16+14, 15, 16+15);
}
-/// \brief Unpacks the high-order (index 4-7) values from two 128-bit vectors of
+/// Unpacks the high-order (index 4-7) values from two 128-bit vectors of
/// [8 x i16] and interleaves them into a 128-bit vector of [8 x i16].
///
/// \headerfile <x86intrin.h>
@@ -4509,7 +4468,7 @@ _mm_unpackhi_epi16(__m128i __a, __m128i __b)
return (__m128i)__builtin_shufflevector((__v8hi)__a, (__v8hi)__b, 4, 8+4, 5, 8+5, 6, 8+6, 7, 8+7);
}
-/// \brief Unpacks the high-order (index 2,3) values from two 128-bit vectors of
+/// Unpacks the high-order (index 2,3) values from two 128-bit vectors of
/// [4 x i32] and interleaves them into a 128-bit vector of [4 x i32].
///
/// \headerfile <x86intrin.h>
@@ -4532,8 +4491,8 @@ _mm_unpackhi_epi32(__m128i __a, __m128i __b)
return (__m128i)__builtin_shufflevector((__v4si)__a, (__v4si)__b, 2, 4+2, 3, 4+3);
}
-/// \brief Unpacks the high-order (odd-indexed) values from two 128-bit vectors
-/// of [2 x i64] and interleaves them into a 128-bit vector of [2 x i64].
+/// Unpacks the high-order 64-bit elements from two 128-bit vectors of
+/// [2 x i64] and interleaves them into a 128-bit vector of [2 x i64].
///
/// \headerfile <x86intrin.h>
///
@@ -4553,7 +4512,7 @@ _mm_unpackhi_epi64(__m128i __a, __m128i __b)
return (__m128i)__builtin_shufflevector((__v2di)__a, (__v2di)__b, 1, 2+1);
}
-/// \brief Unpacks the low-order (index 0-7) values from two 128-bit vectors of
+/// Unpacks the low-order (index 0-7) values from two 128-bit vectors of
/// [16 x i8] and interleaves them into a 128-bit vector of [16 x i8].
///
/// \headerfile <x86intrin.h>
@@ -4588,7 +4547,7 @@ _mm_unpacklo_epi8(__m128i __a, __m128i __b)
return (__m128i)__builtin_shufflevector((__v16qi)__a, (__v16qi)__b, 0, 16+0, 1, 16+1, 2, 16+2, 3, 16+3, 4, 16+4, 5, 16+5, 6, 16+6, 7, 16+7);
}
-/// \brief Unpacks the low-order (index 0-3) values from each of the two 128-bit
+/// Unpacks the low-order (index 0-3) values from each of the two 128-bit
/// vectors of [8 x i16] and interleaves them into a 128-bit vector of
/// [8 x i16].
///
@@ -4616,7 +4575,7 @@ _mm_unpacklo_epi16(__m128i __a, __m128i __b)
return (__m128i)__builtin_shufflevector((__v8hi)__a, (__v8hi)__b, 0, 8+0, 1, 8+1, 2, 8+2, 3, 8+3);
}
-/// \brief Unpacks the low-order (index 0,1) values from two 128-bit vectors of
+/// Unpacks the low-order (index 0,1) values from two 128-bit vectors of
/// [4 x i32] and interleaves them into a 128-bit vector of [4 x i32].
///
/// \headerfile <x86intrin.h>
@@ -4639,7 +4598,7 @@ _mm_unpacklo_epi32(__m128i __a, __m128i __b)
return (__m128i)__builtin_shufflevector((__v4si)__a, (__v4si)__b, 0, 4+0, 1, 4+1);
}
-/// \brief Unpacks the low-order 64-bit elements from two 128-bit vectors of
+/// Unpacks the low-order 64-bit elements from two 128-bit vectors of
/// [2 x i64] and interleaves them into a 128-bit vector of [2 x i64].
///
/// \headerfile <x86intrin.h>
@@ -4660,12 +4619,12 @@ _mm_unpacklo_epi64(__m128i __a, __m128i __b)
return (__m128i)__builtin_shufflevector((__v2di)__a, (__v2di)__b, 0, 2+0);
}
-/// \brief Returns the lower 64 bits of a 128-bit integer vector as a 64-bit
+/// Returns the lower 64 bits of a 128-bit integer vector as a 64-bit
/// integer.
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic has no corresponding instruction.
+/// This intrinsic corresponds to the <c> MOVDQ2Q </c> instruction.
///
/// \param __a
/// A 128-bit integer vector operand. The lower 64 bits are moved to the
@@ -4677,12 +4636,12 @@ _mm_movepi64_pi64(__m128i __a)
return (__m64)__a[0];
}
-/// \brief Moves the 64-bit operand to a 128-bit integer vector, zeroing the
+/// Moves the 64-bit operand to a 128-bit integer vector, zeroing the
/// upper bits.
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the <c> VMOVQ / MOVQ / MOVD </c> instruction.
+/// This intrinsic corresponds to the <c> MOVD+VMOVQ </c> instruction.
///
/// \param __a
/// A 64-bit value.
@@ -4691,10 +4650,10 @@ _mm_movepi64_pi64(__m128i __a)
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_movpi64_epi64(__m64 __a)
{
- return (__m128i){ (long long)__a, 0 };
+ return __extension__ (__m128i)(__v2di){ (long long)__a, 0 };
}
-/// \brief Moves the lower 64 bits of a 128-bit integer vector to a 128-bit
+/// Moves the lower 64 bits of a 128-bit integer vector to a 128-bit
/// integer vector, zeroing the upper bits.
///
/// \headerfile <x86intrin.h>
@@ -4709,11 +4668,11 @@ _mm_movpi64_epi64(__m64 __a)
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_move_epi64(__m128i __a)
{
- return __builtin_shufflevector((__v2di)__a, (__m128i){ 0 }, 0, 2);
+ return __builtin_shufflevector((__v2di)__a, _mm_setzero_si128(), 0, 2);
}
-/// \brief Unpacks the high-order (odd-indexed) values from two 128-bit vectors
-/// of [2 x double] and interleaves them into a 128-bit vector of [2 x
+/// Unpacks the high-order 64-bit elements from two 128-bit vectors of
+/// [2 x double] and interleaves them into a 128-bit vector of [2 x
/// double].
///
/// \headerfile <x86intrin.h>
@@ -4733,7 +4692,7 @@ _mm_unpackhi_pd(__m128d __a, __m128d __b)
return __builtin_shufflevector((__v2df)__a, (__v2df)__b, 1, 2+1);
}
-/// \brief Unpacks the low-order (even-indexed) values from two 128-bit vectors
+/// Unpacks the low-order 64-bit elements from two 128-bit vectors
/// of [2 x double] and interleaves them into a 128-bit vector of [2 x
/// double].
///
@@ -4754,7 +4713,7 @@ _mm_unpacklo_pd(__m128d __a, __m128d __b)
return __builtin_shufflevector((__v2df)__a, (__v2df)__b, 0, 2+0);
}
-/// \brief Extracts the sign bits of the double-precision values in the 128-bit
+/// Extracts the sign bits of the double-precision values in the 128-bit
/// vector of [2 x double], zero-extends the value, and writes it to the
/// low-order bits of the destination.
///
@@ -4774,7 +4733,7 @@ _mm_movemask_pd(__m128d __a)
}
-/// \brief Constructs a 128-bit floating-point vector of [2 x double] from two
+/// Constructs a 128-bit floating-point vector of [2 x double] from two
/// 128-bit vector parameters of [2 x double], using the immediate-value
/// parameter as a specifier.
///
@@ -4792,18 +4751,17 @@ _mm_movemask_pd(__m128d __a)
/// A 128-bit vector of [2 x double].
/// \param i
/// An 8-bit immediate value. The least significant two bits specify which
-/// elements to copy from a and b: \n
-/// Bit[0] = 0: lower element of a copied to lower element of result. \n
-/// Bit[0] = 1: upper element of a copied to lower element of result. \n
+/// elements to copy from \a a and \a b: \n
+/// Bit[0] = 0: lower element of \a a copied to lower element of result. \n
+/// Bit[0] = 1: upper element of \a a copied to lower element of result. \n
/// Bit[1] = 0: lower element of \a b copied to upper element of result. \n
/// Bit[1] = 1: upper element of \a b copied to upper element of result. \n
/// \returns A 128-bit vector of [2 x double] containing the shuffled values.
-#define _mm_shuffle_pd(a, b, i) __extension__ ({ \
- (__m128d)__builtin_shufflevector((__v2df)(__m128d)(a), (__v2df)(__m128d)(b), \
- 0 + (((i) >> 0) & 0x1), \
- 2 + (((i) >> 1) & 0x1)); })
+#define _mm_shuffle_pd(a, b, i) \
+ (__m128d)__builtin_ia32_shufpd((__v2df)(__m128d)(a), (__v2df)(__m128d)(b), \
+ (int)(i))
-/// \brief Casts a 128-bit floating-point vector of [2 x double] into a 128-bit
+/// Casts a 128-bit floating-point vector of [2 x double] into a 128-bit
/// floating-point vector of [4 x float].
///
/// \headerfile <x86intrin.h>
@@ -4820,7 +4778,7 @@ _mm_castpd_ps(__m128d __a)
return (__m128)__a;
}
-/// \brief Casts a 128-bit floating-point vector of [2 x double] into a 128-bit
+/// Casts a 128-bit floating-point vector of [2 x double] into a 128-bit
/// integer vector.
///
/// \headerfile <x86intrin.h>
@@ -4837,7 +4795,7 @@ _mm_castpd_si128(__m128d __a)
return (__m128i)__a;
}
-/// \brief Casts a 128-bit floating-point vector of [4 x float] into a 128-bit
+/// Casts a 128-bit floating-point vector of [4 x float] into a 128-bit
/// floating-point vector of [2 x double].
///
/// \headerfile <x86intrin.h>
@@ -4854,7 +4812,7 @@ _mm_castps_pd(__m128 __a)
return (__m128d)__a;
}
-/// \brief Casts a 128-bit floating-point vector of [4 x float] into a 128-bit
+/// Casts a 128-bit floating-point vector of [4 x float] into a 128-bit
/// integer vector.
///
/// \headerfile <x86intrin.h>
@@ -4871,7 +4829,7 @@ _mm_castps_si128(__m128 __a)
return (__m128i)__a;
}
-/// \brief Casts a 128-bit integer vector into a 128-bit floating-point vector
+/// Casts a 128-bit integer vector into a 128-bit floating-point vector
/// of [4 x float].
///
/// \headerfile <x86intrin.h>
@@ -4888,7 +4846,7 @@ _mm_castsi128_ps(__m128i __a)
return (__m128)__a;
}
-/// \brief Casts a 128-bit integer vector into a 128-bit floating-point vector
+/// Casts a 128-bit integer vector into a 128-bit floating-point vector
/// of [2 x double].
///
/// \headerfile <x86intrin.h>
@@ -4909,7 +4867,7 @@ _mm_castsi128_pd(__m128i __a)
extern "C" {
#endif
-/// \brief Indicates that a spin loop is being executed for the purposes of
+/// Indicates that a spin loop is being executed for the purposes of
/// optimizing power consumption during the loop.
///
/// \headerfile <x86intrin.h>
@@ -4922,6 +4880,7 @@ void _mm_pause(void);
} // extern "C"
#endif
#undef __DEFAULT_FN_ATTRS
+#undef __DEFAULT_FN_ATTRS_MMX
#define _MM_SHUFFLE2(x, y) (((x) << 1) | (y))
diff --git a/lib/Headers/f16cintrin.h b/lib/Headers/f16cintrin.h
index b796cc84316f..3d35f28eb356 100644
--- a/lib/Headers/f16cintrin.h
+++ b/lib/Headers/f16cintrin.h
@@ -21,18 +21,25 @@
*===-----------------------------------------------------------------------===
*/
-#if !defined __X86INTRIN_H && !defined __EMMINTRIN_H && !defined __IMMINTRIN_H
-#error "Never use <f16cintrin.h> directly; include <emmintrin.h> instead."
+#if !defined __IMMINTRIN_H
+#error "Never use <f16cintrin.h> directly; include <immintrin.h> instead."
#endif
#ifndef __F16CINTRIN_H
#define __F16CINTRIN_H
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS \
- __attribute__((__always_inline__, __nodebug__, __target__("f16c")))
+#define __DEFAULT_FN_ATTRS128 \
+ __attribute__((__always_inline__, __nodebug__, __target__("f16c"), __min_vector_width__(128)))
+#define __DEFAULT_FN_ATTRS256 \
+ __attribute__((__always_inline__, __nodebug__, __target__("f16c"), __min_vector_width__(256)))
-/// \brief Converts a 16-bit half-precision float value into a 32-bit float
+/* NOTE: Intel documents the 128-bit versions of these as being in emmintrin.h,
+ * but that's because icc can emulate these without f16c using a library call.
+ * Since we don't do that let's leave these in f16cintrin.h.
+ */
+
+/// Converts a 16-bit half-precision float value into a 32-bit float
/// value.
///
/// \headerfile <x86intrin.h>
@@ -42,7 +49,7 @@
/// \param __a
/// A 16-bit half-precision float value.
/// \returns The converted 32-bit float value.
-static __inline float __DEFAULT_FN_ATTRS
+static __inline float __DEFAULT_FN_ATTRS128
_cvtsh_ss(unsigned short __a)
{
__v8hi v = {(short)__a, 0, 0, 0, 0, 0, 0, 0};
@@ -50,7 +57,7 @@ _cvtsh_ss(unsigned short __a)
return r[0];
}
-/// \brief Converts a 32-bit single-precision float value to a 16-bit
+/// Converts a 32-bit single-precision float value to a 16-bit
/// half-precision float value.
///
/// \headerfile <x86intrin.h>
@@ -72,11 +79,11 @@ _cvtsh_ss(unsigned short __a)
/// 011: Truncate \n
/// 1XX: Use MXCSR.RC for rounding
/// \returns The converted 16-bit half-precision float value.
-#define _cvtss_sh(a, imm) __extension__ ({ \
+#define _cvtss_sh(a, imm) \
(unsigned short)(((__v8hi)__builtin_ia32_vcvtps2ph((__v4sf){a, 0, 0, 0}, \
- (imm)))[0]); })
+ (imm)))[0])
-/// \brief Converts a 128-bit vector containing 32-bit float values into a
+/// Converts a 128-bit vector containing 32-bit float values into a
/// 128-bit vector containing 16-bit half-precision float values.
///
/// \headerfile <x86intrin.h>
@@ -99,10 +106,10 @@ _cvtsh_ss(unsigned short __a)
/// \returns A 128-bit vector containing converted 16-bit half-precision float
/// values. The lower 64 bits are used to store the converted 16-bit
/// half-precision floating-point values.
-#define _mm_cvtps_ph(a, imm) __extension__ ({ \
- (__m128i)__builtin_ia32_vcvtps2ph((__v4sf)(__m128)(a), (imm)); })
+#define _mm_cvtps_ph(a, imm) \
+ (__m128i)__builtin_ia32_vcvtps2ph((__v4sf)(__m128)(a), (imm))
-/// \brief Converts a 128-bit vector containing 16-bit half-precision float
+/// Converts a 128-bit vector containing 16-bit half-precision float
/// values into a 128-bit vector containing 32-bit float values.
///
/// \headerfile <x86intrin.h>
@@ -113,12 +120,57 @@ _cvtsh_ss(unsigned short __a)
/// A 128-bit vector containing 16-bit half-precision float values. The lower
/// 64 bits are used in the conversion.
/// \returns A 128-bit vector of [4 x float] containing converted float values.
-static __inline __m128 __DEFAULT_FN_ATTRS
+static __inline __m128 __DEFAULT_FN_ATTRS128
_mm_cvtph_ps(__m128i __a)
{
return (__m128)__builtin_ia32_vcvtph2ps((__v8hi)__a);
}
-#undef __DEFAULT_FN_ATTRS
+/// Converts a 256-bit vector of [8 x float] into a 128-bit vector
+/// containing 16-bit half-precision float values.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m128i _mm256_cvtps_ph(__m256 a, const int imm);
+/// \endcode
+///
+/// This intrinsic corresponds to the <c> VCVTPS2PH </c> instruction.
+///
+/// \param a
+/// A 256-bit vector containing 32-bit single-precision float values to be
+/// converted to 16-bit half-precision float values.
+/// \param imm
+/// An immediate value controlling rounding using bits [2:0]: \n
+/// 000: Nearest \n
+/// 001: Down \n
+/// 010: Up \n
+/// 011: Truncate \n
+/// 1XX: Use MXCSR.RC for rounding
+/// \returns A 128-bit vector containing the converted 16-bit half-precision
+/// float values.
+#define _mm256_cvtps_ph(a, imm) \
+ (__m128i)__builtin_ia32_vcvtps2ph256((__v8sf)(__m256)(a), (imm))
+
+/// Converts a 128-bit vector containing 16-bit half-precision float
+/// values into a 256-bit vector of [8 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VCVTPH2PS </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector containing 16-bit half-precision float values to be
+/// converted to 32-bit single-precision float values.
+/// \returns A vector of [8 x float] containing the converted 32-bit
+/// single-precision float values.
+static __inline __m256 __DEFAULT_FN_ATTRS256
+_mm256_cvtph_ps(__m128i __a)
+{
+ return (__m256)__builtin_ia32_vcvtph2ps256((__v8hi)__a);
+}
+
+#undef __DEFAULT_FN_ATTRS128
+#undef __DEFAULT_FN_ATTRS256
#endif /* __F16CINTRIN_H */
diff --git a/lib/Headers/fma4intrin.h b/lib/Headers/fma4intrin.h
index 962b1a60a258..7bae2f4a3155 100644
--- a/lib/Headers/fma4intrin.h
+++ b/lib/Headers/fma4intrin.h
@@ -31,200 +31,202 @@
#include <pmmintrin.h>
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("fma4")))
+#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("fma4"), __min_vector_width__(128)))
+#define __DEFAULT_FN_ATTRS256 __attribute__((__always_inline__, __nodebug__, __target__("fma4"), __min_vector_width__(256)))
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_macc_ps(__m128 __A, __m128 __B, __m128 __C)
{
return (__m128)__builtin_ia32_vfmaddps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_macc_pd(__m128d __A, __m128d __B, __m128d __C)
{
return (__m128d)__builtin_ia32_vfmaddpd((__v2df)__A, (__v2df)__B, (__v2df)__C);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_macc_ss(__m128 __A, __m128 __B, __m128 __C)
{
return (__m128)__builtin_ia32_vfmaddss((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_macc_sd(__m128d __A, __m128d __B, __m128d __C)
{
return (__m128d)__builtin_ia32_vfmaddsd((__v2df)__A, (__v2df)__B, (__v2df)__C);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_msub_ps(__m128 __A, __m128 __B, __m128 __C)
{
return (__m128)__builtin_ia32_vfmaddps((__v4sf)__A, (__v4sf)__B, -(__v4sf)__C);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_msub_pd(__m128d __A, __m128d __B, __m128d __C)
{
return (__m128d)__builtin_ia32_vfmaddpd((__v2df)__A, (__v2df)__B, -(__v2df)__C);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_msub_ss(__m128 __A, __m128 __B, __m128 __C)
{
return (__m128)__builtin_ia32_vfmaddss((__v4sf)__A, (__v4sf)__B, -(__v4sf)__C);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_msub_sd(__m128d __A, __m128d __B, __m128d __C)
{
return (__m128d)__builtin_ia32_vfmaddsd((__v2df)__A, (__v2df)__B, -(__v2df)__C);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_nmacc_ps(__m128 __A, __m128 __B, __m128 __C)
{
return (__m128)__builtin_ia32_vfmaddps(-(__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_nmacc_pd(__m128d __A, __m128d __B, __m128d __C)
{
return (__m128d)__builtin_ia32_vfmaddpd(-(__v2df)__A, (__v2df)__B, (__v2df)__C);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_nmacc_ss(__m128 __A, __m128 __B, __m128 __C)
{
return (__m128)__builtin_ia32_vfmaddss(-(__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_nmacc_sd(__m128d __A, __m128d __B, __m128d __C)
{
return (__m128d)__builtin_ia32_vfmaddsd(-(__v2df)__A, (__v2df)__B, (__v2df)__C);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_nmsub_ps(__m128 __A, __m128 __B, __m128 __C)
{
return (__m128)__builtin_ia32_vfmaddps(-(__v4sf)__A, (__v4sf)__B, -(__v4sf)__C);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_nmsub_pd(__m128d __A, __m128d __B, __m128d __C)
{
return (__m128d)__builtin_ia32_vfmaddpd(-(__v2df)__A, (__v2df)__B, -(__v2df)__C);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_nmsub_ss(__m128 __A, __m128 __B, __m128 __C)
{
return (__m128)__builtin_ia32_vfmaddss(-(__v4sf)__A, (__v4sf)__B, -(__v4sf)__C);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_nmsub_sd(__m128d __A, __m128d __B, __m128d __C)
{
return (__m128d)__builtin_ia32_vfmaddsd(-(__v2df)__A, (__v2df)__B, -(__v2df)__C);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_maddsub_ps(__m128 __A, __m128 __B, __m128 __C)
{
return (__m128)__builtin_ia32_vfmaddsubps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_maddsub_pd(__m128d __A, __m128d __B, __m128d __C)
{
return (__m128d)__builtin_ia32_vfmaddsubpd((__v2df)__A, (__v2df)__B, (__v2df)__C);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_msubadd_ps(__m128 __A, __m128 __B, __m128 __C)
{
return (__m128)__builtin_ia32_vfmaddsubps((__v4sf)__A, (__v4sf)__B, -(__v4sf)__C);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_msubadd_pd(__m128d __A, __m128d __B, __m128d __C)
{
return (__m128d)__builtin_ia32_vfmaddsubpd((__v2df)__A, (__v2df)__B, -(__v2df)__C);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_macc_ps(__m256 __A, __m256 __B, __m256 __C)
{
return (__m256)__builtin_ia32_vfmaddps256((__v8sf)__A, (__v8sf)__B, (__v8sf)__C);
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_macc_pd(__m256d __A, __m256d __B, __m256d __C)
{
return (__m256d)__builtin_ia32_vfmaddpd256((__v4df)__A, (__v4df)__B, (__v4df)__C);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_msub_ps(__m256 __A, __m256 __B, __m256 __C)
{
return (__m256)__builtin_ia32_vfmaddps256((__v8sf)__A, (__v8sf)__B, -(__v8sf)__C);
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_msub_pd(__m256d __A, __m256d __B, __m256d __C)
{
return (__m256d)__builtin_ia32_vfmaddpd256((__v4df)__A, (__v4df)__B, -(__v4df)__C);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_nmacc_ps(__m256 __A, __m256 __B, __m256 __C)
{
return (__m256)__builtin_ia32_vfmaddps256(-(__v8sf)__A, (__v8sf)__B, (__v8sf)__C);
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_nmacc_pd(__m256d __A, __m256d __B, __m256d __C)
{
return (__m256d)__builtin_ia32_vfmaddpd256(-(__v4df)__A, (__v4df)__B, (__v4df)__C);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_nmsub_ps(__m256 __A, __m256 __B, __m256 __C)
{
return (__m256)__builtin_ia32_vfmaddps256(-(__v8sf)__A, (__v8sf)__B, -(__v8sf)__C);
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_nmsub_pd(__m256d __A, __m256d __B, __m256d __C)
{
return (__m256d)__builtin_ia32_vfmaddpd256(-(__v4df)__A, (__v4df)__B, -(__v4df)__C);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_maddsub_ps(__m256 __A, __m256 __B, __m256 __C)
{
return (__m256)__builtin_ia32_vfmaddsubps256((__v8sf)__A, (__v8sf)__B, (__v8sf)__C);
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_maddsub_pd(__m256d __A, __m256d __B, __m256d __C)
{
return (__m256d)__builtin_ia32_vfmaddsubpd256((__v4df)__A, (__v4df)__B, (__v4df)__C);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_msubadd_ps(__m256 __A, __m256 __B, __m256 __C)
{
return (__m256)__builtin_ia32_vfmaddsubps256((__v8sf)__A, (__v8sf)__B, -(__v8sf)__C);
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_msubadd_pd(__m256d __A, __m256d __B, __m256d __C)
{
return (__m256d)__builtin_ia32_vfmaddsubpd256((__v4df)__A, (__v4df)__B, -(__v4df)__C);
}
-#undef __DEFAULT_FN_ATTRS
+#undef __DEFAULT_FN_ATTRS128
+#undef __DEFAULT_FN_ATTRS256
#endif /* __FMA4INTRIN_H */
diff --git a/lib/Headers/fmaintrin.h b/lib/Headers/fmaintrin.h
index 478a0ac81cf2..094d13afea09 100644
--- a/lib/Headers/fmaintrin.h
+++ b/lib/Headers/fmaintrin.h
@@ -1,4 +1,4 @@
-/*===---- fma4intrin.h - FMA4 intrinsics -----------------------------------===
+/*===---- fmaintrin.h - FMA intrinsics -------------------------------------===
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
@@ -29,200 +29,202 @@
#define __FMAINTRIN_H
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("fma")))
+#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("fma"), __min_vector_width__(128)))
+#define __DEFAULT_FN_ATTRS256 __attribute__((__always_inline__, __nodebug__, __target__("fma"), __min_vector_width__(256)))
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_fmadd_ps(__m128 __A, __m128 __B, __m128 __C)
{
return (__m128)__builtin_ia32_vfmaddps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_fmadd_pd(__m128d __A, __m128d __B, __m128d __C)
{
return (__m128d)__builtin_ia32_vfmaddpd((__v2df)__A, (__v2df)__B, (__v2df)__C);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_fmadd_ss(__m128 __A, __m128 __B, __m128 __C)
{
return (__m128)__builtin_ia32_vfmaddss3((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_fmadd_sd(__m128d __A, __m128d __B, __m128d __C)
{
return (__m128d)__builtin_ia32_vfmaddsd3((__v2df)__A, (__v2df)__B, (__v2df)__C);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_fmsub_ps(__m128 __A, __m128 __B, __m128 __C)
{
return (__m128)__builtin_ia32_vfmaddps((__v4sf)__A, (__v4sf)__B, -(__v4sf)__C);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_fmsub_pd(__m128d __A, __m128d __B, __m128d __C)
{
return (__m128d)__builtin_ia32_vfmaddpd((__v2df)__A, (__v2df)__B, -(__v2df)__C);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_fmsub_ss(__m128 __A, __m128 __B, __m128 __C)
{
return (__m128)__builtin_ia32_vfmaddss3((__v4sf)__A, (__v4sf)__B, -(__v4sf)__C);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_fmsub_sd(__m128d __A, __m128d __B, __m128d __C)
{
return (__m128d)__builtin_ia32_vfmaddsd3((__v2df)__A, (__v2df)__B, -(__v2df)__C);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_fnmadd_ps(__m128 __A, __m128 __B, __m128 __C)
{
return (__m128)__builtin_ia32_vfmaddps(-(__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_fnmadd_pd(__m128d __A, __m128d __B, __m128d __C)
{
return (__m128d)__builtin_ia32_vfmaddpd(-(__v2df)__A, (__v2df)__B, (__v2df)__C);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_fnmadd_ss(__m128 __A, __m128 __B, __m128 __C)
{
return (__m128)__builtin_ia32_vfmaddss3((__v4sf)__A, -(__v4sf)__B, (__v4sf)__C);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_fnmadd_sd(__m128d __A, __m128d __B, __m128d __C)
{
return (__m128d)__builtin_ia32_vfmaddsd3((__v2df)__A, -(__v2df)__B, (__v2df)__C);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_fnmsub_ps(__m128 __A, __m128 __B, __m128 __C)
{
return (__m128)__builtin_ia32_vfmaddps(-(__v4sf)__A, (__v4sf)__B, -(__v4sf)__C);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_fnmsub_pd(__m128d __A, __m128d __B, __m128d __C)
{
return (__m128d)__builtin_ia32_vfmaddpd(-(__v2df)__A, (__v2df)__B, -(__v2df)__C);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_fnmsub_ss(__m128 __A, __m128 __B, __m128 __C)
{
return (__m128)__builtin_ia32_vfmaddss3((__v4sf)__A, -(__v4sf)__B, -(__v4sf)__C);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_fnmsub_sd(__m128d __A, __m128d __B, __m128d __C)
{
return (__m128d)__builtin_ia32_vfmaddsd3((__v2df)__A, -(__v2df)__B, -(__v2df)__C);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_fmaddsub_ps(__m128 __A, __m128 __B, __m128 __C)
{
return (__m128)__builtin_ia32_vfmaddsubps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_fmaddsub_pd(__m128d __A, __m128d __B, __m128d __C)
{
return (__m128d)__builtin_ia32_vfmaddsubpd((__v2df)__A, (__v2df)__B, (__v2df)__C);
}
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_fmsubadd_ps(__m128 __A, __m128 __B, __m128 __C)
{
return (__m128)__builtin_ia32_vfmaddsubps((__v4sf)__A, (__v4sf)__B, -(__v4sf)__C);
}
-static __inline__ __m128d __DEFAULT_FN_ATTRS
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_fmsubadd_pd(__m128d __A, __m128d __B, __m128d __C)
{
return (__m128d)__builtin_ia32_vfmaddsubpd((__v2df)__A, (__v2df)__B, -(__v2df)__C);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_fmadd_ps(__m256 __A, __m256 __B, __m256 __C)
{
return (__m256)__builtin_ia32_vfmaddps256((__v8sf)__A, (__v8sf)__B, (__v8sf)__C);
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_fmadd_pd(__m256d __A, __m256d __B, __m256d __C)
{
return (__m256d)__builtin_ia32_vfmaddpd256((__v4df)__A, (__v4df)__B, (__v4df)__C);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_fmsub_ps(__m256 __A, __m256 __B, __m256 __C)
{
return (__m256)__builtin_ia32_vfmaddps256((__v8sf)__A, (__v8sf)__B, -(__v8sf)__C);
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_fmsub_pd(__m256d __A, __m256d __B, __m256d __C)
{
return (__m256d)__builtin_ia32_vfmaddpd256((__v4df)__A, (__v4df)__B, -(__v4df)__C);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_fnmadd_ps(__m256 __A, __m256 __B, __m256 __C)
{
return (__m256)__builtin_ia32_vfmaddps256(-(__v8sf)__A, (__v8sf)__B, (__v8sf)__C);
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_fnmadd_pd(__m256d __A, __m256d __B, __m256d __C)
{
return (__m256d)__builtin_ia32_vfmaddpd256(-(__v4df)__A, (__v4df)__B, (__v4df)__C);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_fnmsub_ps(__m256 __A, __m256 __B, __m256 __C)
{
return (__m256)__builtin_ia32_vfmaddps256(-(__v8sf)__A, (__v8sf)__B, -(__v8sf)__C);
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_fnmsub_pd(__m256d __A, __m256d __B, __m256d __C)
{
return (__m256d)__builtin_ia32_vfmaddpd256(-(__v4df)__A, (__v4df)__B, -(__v4df)__C);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_fmaddsub_ps(__m256 __A, __m256 __B, __m256 __C)
{
return (__m256)__builtin_ia32_vfmaddsubps256((__v8sf)__A, (__v8sf)__B, (__v8sf)__C);
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_fmaddsub_pd(__m256d __A, __m256d __B, __m256d __C)
{
return (__m256d)__builtin_ia32_vfmaddsubpd256((__v4df)__A, (__v4df)__B, (__v4df)__C);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_fmsubadd_ps(__m256 __A, __m256 __B, __m256 __C)
{
return (__m256)__builtin_ia32_vfmaddsubps256((__v8sf)__A, (__v8sf)__B, -(__v8sf)__C);
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_fmsubadd_pd(__m256d __A, __m256d __B, __m256d __C)
{
return (__m256d)__builtin_ia32_vfmaddsubpd256((__v4df)__A, (__v4df)__B, -(__v4df)__C);
}
-#undef __DEFAULT_FN_ATTRS
+#undef __DEFAULT_FN_ATTRS128
+#undef __DEFAULT_FN_ATTRS256
#endif /* __FMAINTRIN_H */
diff --git a/lib/Headers/fxsrintrin.h b/lib/Headers/fxsrintrin.h
index 786081ca8eab..704b5ad60aa5 100644
--- a/lib/Headers/fxsrintrin.h
+++ b/lib/Headers/fxsrintrin.h
@@ -30,7 +30,7 @@
#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("fxsr")))
-/// \brief Saves the XMM, MMX, MXCSR and x87 FPU registers into a 512-byte
+/// Saves the XMM, MMX, MXCSR and x87 FPU registers into a 512-byte
/// memory region pointed to by the input parameter \a __p.
///
/// \headerfile <x86intrin.h>
@@ -43,10 +43,10 @@
static __inline__ void __DEFAULT_FN_ATTRS
_fxsave(void *__p)
{
- return __builtin_ia32_fxsave(__p);
+ __builtin_ia32_fxsave(__p);
}
-/// \brief Restores the XMM, MMX, MXCSR and x87 FPU registers from the 512-byte
+/// Restores the XMM, MMX, MXCSR and x87 FPU registers from the 512-byte
/// memory region pointed to by the input parameter \a __p. The contents of
/// this memory region should have been written to by a previous \c _fxsave
/// or \c _fxsave64 intrinsic.
@@ -61,11 +61,11 @@ _fxsave(void *__p)
static __inline__ void __DEFAULT_FN_ATTRS
_fxrstor(void *__p)
{
- return __builtin_ia32_fxrstor(__p);
+ __builtin_ia32_fxrstor(__p);
}
#ifdef __x86_64__
-/// \brief Saves the XMM, MMX, MXCSR and x87 FPU registers into a 512-byte
+/// Saves the XMM, MMX, MXCSR and x87 FPU registers into a 512-byte
/// memory region pointed to by the input parameter \a __p.
///
/// \headerfile <x86intrin.h>
@@ -78,10 +78,10 @@ _fxrstor(void *__p)
static __inline__ void __DEFAULT_FN_ATTRS
_fxsave64(void *__p)
{
- return __builtin_ia32_fxsave64(__p);
+ __builtin_ia32_fxsave64(__p);
}
-/// \brief Restores the XMM, MMX, MXCSR and x87 FPU registers from the 512-byte
+/// Restores the XMM, MMX, MXCSR and x87 FPU registers from the 512-byte
/// memory region pointed to by the input parameter \a __p. The contents of
/// this memory region should have been written to by a previous \c _fxsave
/// or \c _fxsave64 intrinsic.
@@ -96,7 +96,7 @@ _fxsave64(void *__p)
static __inline__ void __DEFAULT_FN_ATTRS
_fxrstor64(void *__p)
{
- return __builtin_ia32_fxrstor64(__p);
+ __builtin_ia32_fxrstor64(__p);
}
#endif
diff --git a/lib/Headers/gfniintrin.h b/lib/Headers/gfniintrin.h
index 20fadccfaaed..804d4f3d068c 100644
--- a/lib/Headers/gfniintrin.h
+++ b/lib/Headers/gfniintrin.h
@@ -29,104 +29,108 @@
#define __GFNIINTRIN_H
-#define _mm_gf2p8affineinv_epi64_epi8(A, B, I) __extension__ ({ \
+#define _mm_gf2p8affineinv_epi64_epi8(A, B, I) \
(__m128i)__builtin_ia32_vgf2p8affineinvqb_v16qi((__v16qi)(__m128i)(A), \
(__v16qi)(__m128i)(B), \
- (char)(I)); })
+ (char)(I))
-#define _mm_mask_gf2p8affineinv_epi64_epi8(S, U, A, B, I) __extension__ ({ \
+#define _mm_mask_gf2p8affineinv_epi64_epi8(S, U, A, B, I) \
(__m128i)__builtin_ia32_selectb_128((__mmask16)(U), \
(__v16qi)_mm_gf2p8affineinv_epi64_epi8(A, B, I), \
- (__v16qi)(__m128i)(S)); })
+ (__v16qi)(__m128i)(S))
-#define _mm_maskz_gf2p8affineinv_epi64_epi8(U, A, B, I) __extension__ ({ \
+#define _mm_maskz_gf2p8affineinv_epi64_epi8(U, A, B, I) \
(__m128i)_mm_mask_gf2p8affineinv_epi64_epi8((__m128i)_mm_setzero_si128(), \
- U, A, B, I); })
+ U, A, B, I)
-#define _mm256_gf2p8affineinv_epi64_epi8(A, B, I) __extension__ ({ \
+#define _mm256_gf2p8affineinv_epi64_epi8(A, B, I) \
(__m256i)__builtin_ia32_vgf2p8affineinvqb_v32qi((__v32qi)(__m256i)(A), \
(__v32qi)(__m256i)(B), \
- (char)(I)); })
+ (char)(I))
-#define _mm256_mask_gf2p8affineinv_epi64_epi8(S, U, A, B, I) __extension__ ({ \
+#define _mm256_mask_gf2p8affineinv_epi64_epi8(S, U, A, B, I) \
(__m256i)__builtin_ia32_selectb_256((__mmask32)(U), \
(__v32qi)_mm256_gf2p8affineinv_epi64_epi8(A, B, I), \
- (__v32qi)(__m256i)(S)); })
+ (__v32qi)(__m256i)(S))
-#define _mm256_maskz_gf2p8affineinv_epi64_epi8(U, A, B, I) __extension__ ({ \
+#define _mm256_maskz_gf2p8affineinv_epi64_epi8(U, A, B, I) \
(__m256i)_mm256_mask_gf2p8affineinv_epi64_epi8((__m256i)_mm256_setzero_si256(), \
- U, A, B, I); })
+ U, A, B, I)
-#define _mm512_gf2p8affineinv_epi64_epi8(A, B, I) __extension__ ({ \
+#define _mm512_gf2p8affineinv_epi64_epi8(A, B, I) \
(__m512i)__builtin_ia32_vgf2p8affineinvqb_v64qi((__v64qi)(__m512i)(A), \
(__v64qi)(__m512i)(B), \
- (char)(I)); })
+ (char)(I))
-#define _mm512_mask_gf2p8affineinv_epi64_epi8(S, U, A, B, I) __extension__ ({ \
+#define _mm512_mask_gf2p8affineinv_epi64_epi8(S, U, A, B, I) \
(__m512i)__builtin_ia32_selectb_512((__mmask64)(U), \
(__v64qi)_mm512_gf2p8affineinv_epi64_epi8(A, B, I), \
- (__v64qi)(__m512i)(S)); })
+ (__v64qi)(__m512i)(S))
-#define _mm512_maskz_gf2p8affineinv_epi64_epi8(U, A, B, I) __extension__ ({ \
- (__m512i)_mm512_mask_gf2p8affineinv_epi64_epi8((__m512i)_mm512_setzero_qi(), \
- U, A, B, I); })
+#define _mm512_maskz_gf2p8affineinv_epi64_epi8(U, A, B, I) \
+ (__m512i)_mm512_mask_gf2p8affineinv_epi64_epi8((__m512i)_mm512_setzero_si512(), \
+ U, A, B, I)
-#define _mm_gf2p8affine_epi64_epi8(A, B, I) __extension__ ({ \
+#define _mm_gf2p8affine_epi64_epi8(A, B, I) \
(__m128i)__builtin_ia32_vgf2p8affineqb_v16qi((__v16qi)(__m128i)(A), \
(__v16qi)(__m128i)(B), \
- (char)(I)); })
+ (char)(I))
-#define _mm_mask_gf2p8affine_epi64_epi8(S, U, A, B, I) __extension__ ({ \
+#define _mm_mask_gf2p8affine_epi64_epi8(S, U, A, B, I) \
(__m128i)__builtin_ia32_selectb_128((__mmask16)(U), \
(__v16qi)_mm_gf2p8affine_epi64_epi8(A, B, I), \
- (__v16qi)(__m128i)(S)); })
+ (__v16qi)(__m128i)(S))
-#define _mm_maskz_gf2p8affine_epi64_epi8(U, A, B, I) __extension__ ({ \
+#define _mm_maskz_gf2p8affine_epi64_epi8(U, A, B, I) \
(__m128i)_mm_mask_gf2p8affine_epi64_epi8((__m128i)_mm_setzero_si128(), \
- U, A, B, I); })
+ U, A, B, I)
-#define _mm256_gf2p8affine_epi64_epi8(A, B, I) __extension__ ({ \
+#define _mm256_gf2p8affine_epi64_epi8(A, B, I) \
(__m256i)__builtin_ia32_vgf2p8affineqb_v32qi((__v32qi)(__m256i)(A), \
(__v32qi)(__m256i)(B), \
- (char)(I)); })
+ (char)(I))
-#define _mm256_mask_gf2p8affine_epi64_epi8(S, U, A, B, I) __extension__ ({ \
+#define _mm256_mask_gf2p8affine_epi64_epi8(S, U, A, B, I) \
(__m256i)__builtin_ia32_selectb_256((__mmask32)(U), \
(__v32qi)_mm256_gf2p8affine_epi64_epi8(A, B, I), \
- (__v32qi)(__m256i)(S)); })
+ (__v32qi)(__m256i)(S))
-#define _mm256_maskz_gf2p8affine_epi64_epi8(U, A, B, I) __extension__ ({ \
+#define _mm256_maskz_gf2p8affine_epi64_epi8(U, A, B, I) \
(__m256i)_mm256_mask_gf2p8affine_epi64_epi8((__m256i)_mm256_setzero_si256(), \
- U, A, B, I); })
+ U, A, B, I)
-#define _mm512_gf2p8affine_epi64_epi8(A, B, I) __extension__ ({ \
+#define _mm512_gf2p8affine_epi64_epi8(A, B, I) \
(__m512i)__builtin_ia32_vgf2p8affineqb_v64qi((__v64qi)(__m512i)(A), \
(__v64qi)(__m512i)(B), \
- (char)(I)); })
+ (char)(I))
-#define _mm512_mask_gf2p8affine_epi64_epi8(S, U, A, B, I) __extension__ ({ \
+#define _mm512_mask_gf2p8affine_epi64_epi8(S, U, A, B, I) \
(__m512i)__builtin_ia32_selectb_512((__mmask64)(U), \
(__v64qi)_mm512_gf2p8affine_epi64_epi8(A, B, I), \
- (__v64qi)(__m512i)(S)); })
+ (__v64qi)(__m512i)(S))
-#define _mm512_maskz_gf2p8affine_epi64_epi8(U, A, B, I) __extension__ ({ \
- (__m512i)_mm512_mask_gf2p8affine_epi64_epi8((__m512i)_mm512_setzero_qi(), \
- U, A, B, I); })
+#define _mm512_maskz_gf2p8affine_epi64_epi8(U, A, B, I) \
+ (__m512i)_mm512_mask_gf2p8affine_epi64_epi8((__m512i)_mm512_setzero_si512(), \
+ U, A, B, I)
/* Default attributes for simple form (no masking). */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("gfni")))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("gfni"), __min_vector_width__(128)))
+
+/* Default attributes for YMM unmasked form. */
+#define __DEFAULT_FN_ATTRS_Y __attribute__((__always_inline__, __nodebug__, __target__("avx,gfni"), __min_vector_width__(256)))
/* Default attributes for ZMM forms. */
-#define __DEFAULT_FN_ATTRS_F __attribute__((__always_inline__, __nodebug__, __target__("avx512bw,gfni")))
+#define __DEFAULT_FN_ATTRS_Z __attribute__((__always_inline__, __nodebug__, __target__("avx512bw,gfni"), __min_vector_width__(512)))
/* Default attributes for VLX forms. */
-#define __DEFAULT_FN_ATTRS_VL __attribute__((__always_inline__, __nodebug__, __target__("avx512bw,avx512vl,gfni")))
+#define __DEFAULT_FN_ATTRS_VL128 __attribute__((__always_inline__, __nodebug__, __target__("avx512bw,avx512vl,gfni"), __min_vector_width__(128)))
+#define __DEFAULT_FN_ATTRS_VL256 __attribute__((__always_inline__, __nodebug__, __target__("avx512bw,avx512vl,gfni"), __min_vector_width__(256)))
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_gf2p8mul_epi8(__m128i __A, __m128i __B)
@@ -135,7 +139,7 @@ _mm_gf2p8mul_epi8(__m128i __A, __m128i __B)
(__v16qi) __B);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS_VL
+static __inline__ __m128i __DEFAULT_FN_ATTRS_VL128
_mm_mask_gf2p8mul_epi8(__m128i __S, __mmask16 __U, __m128i __A, __m128i __B)
{
return (__m128i) __builtin_ia32_selectb_128(__U,
@@ -143,21 +147,21 @@ _mm_mask_gf2p8mul_epi8(__m128i __S, __mmask16 __U, __m128i __A, __m128i __B)
(__v16qi) __S);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS_VL
+static __inline__ __m128i __DEFAULT_FN_ATTRS_VL128
_mm_maskz_gf2p8mul_epi8(__mmask16 __U, __m128i __A, __m128i __B)
{
return _mm_mask_gf2p8mul_epi8((__m128i)_mm_setzero_si128(),
__U, __A, __B);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS_Y
_mm256_gf2p8mul_epi8(__m256i __A, __m256i __B)
{
return (__m256i) __builtin_ia32_vgf2p8mulb_v32qi((__v32qi) __A,
(__v32qi) __B);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS_VL
+static __inline__ __m256i __DEFAULT_FN_ATTRS_VL256
_mm256_mask_gf2p8mul_epi8(__m256i __S, __mmask32 __U, __m256i __A, __m256i __B)
{
return (__m256i) __builtin_ia32_selectb_256(__U,
@@ -165,21 +169,21 @@ _mm256_mask_gf2p8mul_epi8(__m256i __S, __mmask32 __U, __m256i __A, __m256i __B)
(__v32qi) __S);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS_VL
+static __inline__ __m256i __DEFAULT_FN_ATTRS_VL256
_mm256_maskz_gf2p8mul_epi8(__mmask32 __U, __m256i __A, __m256i __B)
{
return _mm256_mask_gf2p8mul_epi8((__m256i)_mm256_setzero_si256(),
__U, __A, __B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS_F
+static __inline__ __m512i __DEFAULT_FN_ATTRS_Z
_mm512_gf2p8mul_epi8(__m512i __A, __m512i __B)
{
return (__m512i) __builtin_ia32_vgf2p8mulb_v64qi((__v64qi) __A,
(__v64qi) __B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS_F
+static __inline__ __m512i __DEFAULT_FN_ATTRS_Z
_mm512_mask_gf2p8mul_epi8(__m512i __S, __mmask64 __U, __m512i __A, __m512i __B)
{
return (__m512i) __builtin_ia32_selectb_512(__U,
@@ -187,16 +191,18 @@ _mm512_mask_gf2p8mul_epi8(__m512i __S, __mmask64 __U, __m512i __A, __m512i __B)
(__v64qi) __S);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS_F
+static __inline__ __m512i __DEFAULT_FN_ATTRS_Z
_mm512_maskz_gf2p8mul_epi8(__mmask64 __U, __m512i __A, __m512i __B)
{
- return _mm512_mask_gf2p8mul_epi8((__m512i)_mm512_setzero_qi(),
+ return _mm512_mask_gf2p8mul_epi8((__m512i)_mm512_setzero_si512(),
__U, __A, __B);
}
#undef __DEFAULT_FN_ATTRS
-#undef __DEFAULT_FN_ATTRS_F
-#undef __DEFAULT_FN_ATTRS_VL
+#undef __DEFAULT_FN_ATTRS_Y
+#undef __DEFAULT_FN_ATTRS_Z
+#undef __DEFAULT_FN_ATTRS_VL128
+#undef __DEFAULT_FN_ATTRS_VL256
-#endif // __GFNIINTRIN_H
+#endif /* __GFNIINTRIN_H */
diff --git a/lib/Headers/htmxlintrin.h b/lib/Headers/htmxlintrin.h
index 28f7d025bb30..049dbd61df75 100644
--- a/lib/Headers/htmxlintrin.h
+++ b/lib/Headers/htmxlintrin.h
@@ -214,7 +214,7 @@ __TM_failure_code(void* const __TM_buff)
/* These intrinsics are being made available for compatibility with
the IBM XL compiler. For documentation please see the "z/OS XL
- C/C++ Programming Guide" publically available on the web. */
+ C/C++ Programming Guide" publicly available on the web. */
static __inline long __attribute__((__always_inline__, __nodebug__))
__TM_simple_begin ()
diff --git a/lib/Headers/ia32intrin.h b/lib/Headers/ia32intrin.h
index 4928300103ad..f8972e3053a3 100644
--- a/lib/Headers/ia32intrin.h
+++ b/lib/Headers/ia32intrin.h
@@ -70,4 +70,9 @@ __rdtscp(unsigned int *__A) {
#define _rdpmc(A) __rdpmc(A)
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+_wbinvd(void) {
+ __builtin_ia32_wbinvd();
+}
+
#endif /* __IA32INTRIN_H */
diff --git a/lib/Headers/immintrin.h b/lib/Headers/immintrin.h
index d3421dc86c99..e7bfbf964d56 100644
--- a/lib/Headers/immintrin.h
+++ b/lib/Headers/immintrin.h
@@ -68,55 +68,11 @@
#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX2__)
#include <avx2intrin.h>
+#endif
-/* The 256-bit versions of functions in f16cintrin.h.
- Intel documents these as being in immintrin.h, and
- they depend on typedefs from avxintrin.h. */
-
-/// \brief Converts a 256-bit vector of [8 x float] into a 128-bit vector
-/// containing 16-bit half-precision float values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m128i _mm256_cvtps_ph(__m256 a, const int imm);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VCVTPS2PH </c> instruction.
-///
-/// \param a
-/// A 256-bit vector containing 32-bit single-precision float values to be
-/// converted to 16-bit half-precision float values.
-/// \param imm
-/// An immediate value controlling rounding using bits [2:0]: \n
-/// 000: Nearest \n
-/// 001: Down \n
-/// 010: Up \n
-/// 011: Truncate \n
-/// 1XX: Use MXCSR.RC for rounding
-/// \returns A 128-bit vector containing the converted 16-bit half-precision
-/// float values.
-#define _mm256_cvtps_ph(a, imm) __extension__ ({ \
- (__m128i)__builtin_ia32_vcvtps2ph256((__v8sf)(__m256)(a), (imm)); })
-
-/// \brief Converts a 128-bit vector containing 16-bit half-precision float
-/// values into a 256-bit vector of [8 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTPH2PS </c> instruction.
-///
-/// \param __a
-/// A 128-bit vector containing 16-bit half-precision float values to be
-/// converted to 32-bit single-precision float values.
-/// \returns A vector of [8 x float] containing the converted 32-bit
-/// single-precision float values.
-static __inline __m256 __attribute__((__always_inline__, __nodebug__, __target__("f16c")))
-_mm256_cvtph_ps(__m128i __a)
-{
- return (__m256)__builtin_ia32_vcvtph2ps256((__v8hi)__a);
-}
-#endif /* __AVX2__ */
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__F16C__)
+#include <f16cintrin.h>
+#endif
#if !defined(_MSC_VER) || __has_feature(modules) || defined(__VPCLMULQDQ__)
#include <vpclmulqdqintrin.h>
@@ -134,6 +90,10 @@ _mm256_cvtph_ps(__m128i __a)
#include <lzcntintrin.h>
#endif
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__POPCNT__)
+#include <popcntintrin.h>
+#endif
+
#if !defined(_MSC_VER) || __has_feature(modules) || defined(__FMA__)
#include <fmaintrin.h>
#endif
@@ -247,6 +207,18 @@ _mm256_cvtph_ps(__m128i __a)
#include <gfniintrin.h>
#endif
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__RDPID__)
+/// Returns the value of the IA32_TSC_AUX MSR (0xc0000103).
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the <c> RDPID </c> instruction.
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__, __target__("rdpid")))
+_rdpid_u32(void) {
+ return __builtin_ia32_rdpid();
+}
+#endif // __RDPID__
+
#if !defined(_MSC_VER) || __has_feature(modules) || defined(__RDRND__)
static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("rdrnd")))
_rdrand16_step(unsigned short *__p)
@@ -310,25 +282,25 @@ _readgsbase_u64(void)
static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
_writefsbase_u32(unsigned int __V)
{
- return __builtin_ia32_wrfsbase32(__V);
+ __builtin_ia32_wrfsbase32(__V);
}
static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
_writefsbase_u64(unsigned long long __V)
{
- return __builtin_ia32_wrfsbase64(__V);
+ __builtin_ia32_wrfsbase64(__V);
}
static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
_writegsbase_u32(unsigned int __V)
{
- return __builtin_ia32_wrgsbase32(__V);
+ __builtin_ia32_wrgsbase32(__V);
}
static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
_writegsbase_u64(unsigned long long __V)
{
- return __builtin_ia32_wrgsbase64(__V);
+ __builtin_ia32_wrgsbase64(__V);
}
#endif
@@ -371,4 +343,125 @@ _writegsbase_u64(unsigned long long __V)
* whereas others are also available at all times. */
#include <adxintrin.h>
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__RDSEED__)
+#include <rdseedintrin.h>
+#endif
+
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__WBNOINVD__)
+#include <wbnoinvdintrin.h>
+#endif
+
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__CLDEMOTE__)
+#include <cldemoteintrin.h>
+#endif
+
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__WAITPKG__)
+#include <waitpkgintrin.h>
+#endif
+
+#if !defined(_MSC_VER) || __has_feature(modules) || \
+ defined(__MOVDIRI__) || defined(__MOVDIR64B__)
+#include <movdirintrin.h>
+#endif
+
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__PCONFIG__)
+#include <pconfigintrin.h>
+#endif
+
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__SGX__)
+#include <sgxintrin.h>
+#endif
+
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__PTWRITE__)
+#include <ptwriteintrin.h>
+#endif
+
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__INVPCID__)
+#include <invpcidintrin.h>
+#endif
+
+#ifdef _MSC_VER
+/* Define the default attributes for these intrinsics */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
+#ifdef __cplusplus
+extern "C" {
+#endif
+/*----------------------------------------------------------------------------*\
+|* Interlocked Exchange HLE
+\*----------------------------------------------------------------------------*/
+#if defined(__i386__) || defined(__x86_64__)
+static __inline__ long __DEFAULT_FN_ATTRS
+_InterlockedExchange_HLEAcquire(long volatile *_Target, long _Value) {
+ __asm__ __volatile__(".byte 0xf2 ; lock ; xchg %0, %1"
+ : "+r" (_Value), "+m" (*_Target) :: "memory");
+ return _Value;
+}
+static __inline__ long __DEFAULT_FN_ATTRS
+_InterlockedExchange_HLERelease(long volatile *_Target, long _Value) {
+ __asm__ __volatile__(".byte 0xf3 ; lock ; xchg %0, %1"
+ : "+r" (_Value), "+m" (*_Target) :: "memory");
+ return _Value;
+}
+#endif
+#if defined(__x86_64__)
+static __inline__ __int64 __DEFAULT_FN_ATTRS
+_InterlockedExchange64_HLEAcquire(__int64 volatile *_Target, __int64 _Value) {
+ __asm__ __volatile__(".byte 0xf2 ; lock ; xchg %0, %1"
+ : "+r" (_Value), "+m" (*_Target) :: "memory");
+ return _Value;
+}
+static __inline__ __int64 __DEFAULT_FN_ATTRS
+_InterlockedExchange64_HLERelease(__int64 volatile *_Target, __int64 _Value) {
+ __asm__ __volatile__(".byte 0xf3 ; lock ; xchg %0, %1"
+ : "+r" (_Value), "+m" (*_Target) :: "memory");
+ return _Value;
+}
+#endif
+/*----------------------------------------------------------------------------*\
+|* Interlocked Compare Exchange HLE
+\*----------------------------------------------------------------------------*/
+#if defined(__i386__) || defined(__x86_64__)
+static __inline__ long __DEFAULT_FN_ATTRS
+_InterlockedCompareExchange_HLEAcquire(long volatile *_Destination,
+ long _Exchange, long _Comparand) {
+ __asm__ __volatile__(".byte 0xf2 ; lock ; cmpxchg %2, %1"
+ : "+a" (_Comparand), "+m" (*_Destination)
+ : "r" (_Exchange) : "memory");
+ return _Comparand;
+}
+static __inline__ long __DEFAULT_FN_ATTRS
+_InterlockedCompareExchange_HLERelease(long volatile *_Destination,
+ long _Exchange, long _Comparand) {
+ __asm__ __volatile__(".byte 0xf3 ; lock ; cmpxchg %2, %1"
+ : "+a" (_Comparand), "+m" (*_Destination)
+ : "r" (_Exchange) : "memory");
+ return _Comparand;
+}
+#endif
+#if defined(__x86_64__)
+static __inline__ __int64 __DEFAULT_FN_ATTRS
+_InterlockedCompareExchange64_HLEAcquire(__int64 volatile *_Destination,
+ __int64 _Exchange, __int64 _Comparand) {
+ __asm__ __volatile__(".byte 0xf2 ; lock ; cmpxchg %2, %1"
+ : "+a" (_Comparand), "+m" (*_Destination)
+ : "r" (_Exchange) : "memory");
+ return _Comparand;
+}
+static __inline__ __int64 __DEFAULT_FN_ATTRS
+_InterlockedCompareExchange64_HLERelease(__int64 volatile *_Destination,
+ __int64 _Exchange, __int64 _Comparand) {
+ __asm__ __volatile__(".byte 0xf3 ; lock ; cmpxchg %2, %1"
+ : "+a" (_Comparand), "+m" (*_Destination)
+ : "r" (_Exchange) : "memory");
+ return _Comparand;
+}
+#endif
+#ifdef __cplusplus
+}
+#endif
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* _MSC_VER */
+
#endif /* __IMMINTRIN_H */
diff --git a/lib/Headers/intrin.h b/lib/Headers/intrin.h
index b30aa215a452..91914214e299 100644
--- a/lib/Headers/intrin.h
+++ b/lib/Headers/intrin.h
@@ -38,7 +38,7 @@
#include <armintr.h>
#endif
-#if defined(_M_ARM64)
+#if defined(__aarch64__)
#include <arm64intr.h>
#endif
@@ -83,6 +83,7 @@ void __incfsdword(unsigned long);
void __incfsword(unsigned long);
unsigned long __indword(unsigned short);
void __indwordstring(unsigned short, unsigned long *, unsigned long);
+void __int2c(void);
void __invlpg(void *);
unsigned short __inword(unsigned short);
void __inwordstring(unsigned short, unsigned short *, unsigned long);
@@ -140,6 +141,7 @@ void __svm_stgi(void);
void __svm_vmload(size_t);
void __svm_vmrun(size_t);
void __svm_vmsave(size_t);
+void __ud2(void);
unsigned __int64 __ull_rshift(unsigned __int64, int);
void __vmx_off(void);
void __vmx_vmptrst(unsigned __int64 *);
@@ -161,25 +163,15 @@ static __inline__
unsigned char _BitScanForward(unsigned long *_Index, unsigned long _Mask);
static __inline__
unsigned char _BitScanReverse(unsigned long *_Index, unsigned long _Mask);
-static __inline__
unsigned char _bittest(long const *, long);
-static __inline__
unsigned char _bittestandcomplement(long *, long);
-static __inline__
unsigned char _bittestandreset(long *, long);
-static __inline__
unsigned char _bittestandset(long *, long);
void __cdecl _disable(void);
void __cdecl _enable(void);
long _InterlockedAddLargeStatistic(__int64 volatile *_Addend, long _Value);
unsigned char _interlockedbittestandreset(long volatile *, long);
unsigned char _interlockedbittestandset(long volatile *, long);
-long _InterlockedCompareExchange_HLEAcquire(long volatile *, long, long);
-long _InterlockedCompareExchange_HLERelease(long volatile *, long, long);
-__int64 _InterlockedcompareExchange64_HLEAcquire(__int64 volatile *, __int64,
- __int64);
-__int64 _InterlockedCompareExchange64_HLERelease(__int64 volatile *, __int64,
- __int64);
void *_InterlockedCompareExchangePointer_HLEAcquire(void *volatile *, void *,
void *);
void *_InterlockedCompareExchangePointer_HLERelease(void *volatile *, void *,
@@ -256,24 +248,15 @@ void __writegsbyte(unsigned long, unsigned char);
void __writegsdword(unsigned long, unsigned long);
void __writegsqword(unsigned long, unsigned __int64);
void __writegsword(unsigned long, unsigned short);
-static __inline__
-unsigned char _BitScanForward64(unsigned long *_Index, unsigned __int64 _Mask);
-static __inline__
-unsigned char _BitScanReverse64(unsigned long *_Index, unsigned __int64 _Mask);
-static __inline__
unsigned char _bittest64(__int64 const *, __int64);
-static __inline__
unsigned char _bittestandcomplement64(__int64 *, __int64);
-static __inline__
unsigned char _bittestandreset64(__int64 *, __int64);
-static __inline__
unsigned char _bittestandset64(__int64 *, __int64);
long _InterlockedAnd_np(long volatile *_Value, long _Mask);
short _InterlockedAnd16_np(short volatile *_Value, short _Mask);
__int64 _InterlockedAnd64_np(__int64 volatile *_Value, __int64 _Mask);
char _InterlockedAnd8_np(char volatile *_Value, char _Mask);
unsigned char _interlockedbittestandreset64(__int64 volatile *, __int64);
-static __inline__
unsigned char _interlockedbittestandset64(__int64 volatile *, __int64);
long _InterlockedCompareExchange_np(long volatile *_Destination, long _Exchange,
long _Comparand);
@@ -287,10 +270,6 @@ unsigned char _InterlockedCompareExchange128_np(__int64 volatile *_Destination,
__int64 *_ComparandResult);
short _InterlockedCompareExchange16_np(short volatile *_Destination,
short _Exchange, short _Comparand);
-__int64 _InterlockedCompareExchange64_HLEAcquire(__int64 volatile *, __int64,
- __int64);
-__int64 _InterlockedCompareExchange64_HLERelease(__int64 volatile *, __int64,
- __int64);
__int64 _InterlockedCompareExchange64_np(__int64 volatile *_Destination,
__int64 _Exchange, __int64 _Comparand);
void *_InterlockedCompareExchangePointer_np(void *volatile *_Destination,
@@ -320,7 +299,12 @@ unsigned __int64 _umul128(unsigned __int64,
#endif /* __x86_64__ */
-#if defined(__x86_64__) || defined(__arm__)
+#if defined(__x86_64__) || defined(__arm__) || defined(__aarch64__)
+
+static __inline__
+unsigned char _BitScanForward64(unsigned long *_Index, unsigned __int64 _Mask);
+static __inline__
+unsigned char _BitScanReverse64(unsigned long *_Index, unsigned __int64 _Mask);
static __inline__
__int64 _InterlockedDecrement64(__int64 volatile *_Addend);
@@ -342,78 +326,6 @@ __int64 _InterlockedAnd64(__int64 volatile *_Value, __int64 _Mask);
#endif
/*----------------------------------------------------------------------------*\
-|* Bit Counting and Testing
-\*----------------------------------------------------------------------------*/
-static __inline__ unsigned char __DEFAULT_FN_ATTRS
-_bittest(long const *_BitBase, long _BitPos) {
- return (*_BitBase >> _BitPos) & 1;
-}
-static __inline__ unsigned char __DEFAULT_FN_ATTRS
-_bittestandcomplement(long *_BitBase, long _BitPos) {
- unsigned char _Res = (*_BitBase >> _BitPos) & 1;
- *_BitBase = *_BitBase ^ (1 << _BitPos);
- return _Res;
-}
-static __inline__ unsigned char __DEFAULT_FN_ATTRS
-_bittestandreset(long *_BitBase, long _BitPos) {
- unsigned char _Res = (*_BitBase >> _BitPos) & 1;
- *_BitBase = *_BitBase & ~(1 << _BitPos);
- return _Res;
-}
-static __inline__ unsigned char __DEFAULT_FN_ATTRS
-_bittestandset(long *_BitBase, long _BitPos) {
- unsigned char _Res = (*_BitBase >> _BitPos) & 1;
- *_BitBase = *_BitBase | (1 << _BitPos);
- return _Res;
-}
-#if defined(__arm__) || defined(__aarch64__)
-static __inline__ unsigned char __DEFAULT_FN_ATTRS
-_interlockedbittestandset_acq(long volatile *_BitBase, long _BitPos) {
- long _PrevVal = __atomic_fetch_or(_BitBase, 1l << _BitPos, __ATOMIC_ACQUIRE);
- return (_PrevVal >> _BitPos) & 1;
-}
-static __inline__ unsigned char __DEFAULT_FN_ATTRS
-_interlockedbittestandset_nf(long volatile *_BitBase, long _BitPos) {
- long _PrevVal = __atomic_fetch_or(_BitBase, 1l << _BitPos, __ATOMIC_RELAXED);
- return (_PrevVal >> _BitPos) & 1;
-}
-static __inline__ unsigned char __DEFAULT_FN_ATTRS
-_interlockedbittestandset_rel(long volatile *_BitBase, long _BitPos) {
- long _PrevVal = __atomic_fetch_or(_BitBase, 1l << _BitPos, __ATOMIC_RELEASE);
- return (_PrevVal >> _BitPos) & 1;
-}
-#endif
-#ifdef __x86_64__
-static __inline__ unsigned char __DEFAULT_FN_ATTRS
-_bittest64(__int64 const *_BitBase, __int64 _BitPos) {
- return (*_BitBase >> _BitPos) & 1;
-}
-static __inline__ unsigned char __DEFAULT_FN_ATTRS
-_bittestandcomplement64(__int64 *_BitBase, __int64 _BitPos) {
- unsigned char _Res = (*_BitBase >> _BitPos) & 1;
- *_BitBase = *_BitBase ^ (1ll << _BitPos);
- return _Res;
-}
-static __inline__ unsigned char __DEFAULT_FN_ATTRS
-_bittestandreset64(__int64 *_BitBase, __int64 _BitPos) {
- unsigned char _Res = (*_BitBase >> _BitPos) & 1;
- *_BitBase = *_BitBase & ~(1ll << _BitPos);
- return _Res;
-}
-static __inline__ unsigned char __DEFAULT_FN_ATTRS
-_bittestandset64(__int64 *_BitBase, __int64 _BitPos) {
- unsigned char _Res = (*_BitBase >> _BitPos) & 1;
- *_BitBase = *_BitBase | (1ll << _BitPos);
- return _Res;
-}
-static __inline__ unsigned char __DEFAULT_FN_ATTRS
-_interlockedbittestandset64(__int64 volatile *_BitBase, __int64 _BitPos) {
- long long _PrevVal =
- __atomic_fetch_or(_BitBase, 1ll << _BitPos, __ATOMIC_SEQ_CST);
- return (_PrevVal >> _BitPos) & 1;
-}
-#endif
-/*----------------------------------------------------------------------------*\
|* Interlocked Exchange Add
\*----------------------------------------------------------------------------*/
#if defined(__arm__) || defined(__aarch64__)
@@ -602,6 +514,23 @@ _InterlockedAnd64_rel(__int64 volatile *_Value, __int64 _Mask) {
}
#endif
/*----------------------------------------------------------------------------*\
+|* Bit Counting and Testing
+\*----------------------------------------------------------------------------*/
+#if defined(__arm__) || defined(__aarch64__)
+unsigned char _interlockedbittestandset_acq(long volatile *_BitBase,
+ long _BitPos);
+unsigned char _interlockedbittestandset_nf(long volatile *_BitBase,
+ long _BitPos);
+unsigned char _interlockedbittestandset_rel(long volatile *_BitBase,
+ long _BitPos);
+unsigned char _interlockedbittestandreset_acq(long volatile *_BitBase,
+ long _BitPos);
+unsigned char _interlockedbittestandreset_nf(long volatile *_BitBase,
+ long _BitPos);
+unsigned char _interlockedbittestandreset_rel(long volatile *_BitBase,
+ long _BitPos);
+#endif
+/*----------------------------------------------------------------------------*\
|* Interlocked Or
\*----------------------------------------------------------------------------*/
#if defined(__arm__) || defined(__aarch64__)
@@ -868,33 +797,40 @@ _InterlockedCompareExchange64_rel(__int64 volatile *_Destination,
#if defined(__i386__) || defined(__x86_64__)
static __inline__ void __DEFAULT_FN_ATTRS
__movsb(unsigned char *__dst, unsigned char const *__src, size_t __n) {
- __asm__("rep movsb" : : "D"(__dst), "S"(__src), "c"(__n));
+ __asm__ __volatile__("rep movsb" : "+D"(__dst), "+S"(__src), "+c"(__n)
+ : : "memory");
}
static __inline__ void __DEFAULT_FN_ATTRS
__movsd(unsigned long *__dst, unsigned long const *__src, size_t __n) {
- __asm__("rep movsl" : : "D"(__dst), "S"(__src), "c"(__n));
+ __asm__ __volatile__("rep movsl" : "+D"(__dst), "+S"(__src), "+c"(__n)
+ : : "memory");
}
static __inline__ void __DEFAULT_FN_ATTRS
__movsw(unsigned short *__dst, unsigned short const *__src, size_t __n) {
- __asm__("rep movsw" : : "D"(__dst), "S"(__src), "c"(__n));
+ __asm__ __volatile__("rep movsw" : "+D"(__dst), "+S"(__src), "+c"(__n)
+ : : "memory");
}
static __inline__ void __DEFAULT_FN_ATTRS
__stosd(unsigned long *__dst, unsigned long __x, size_t __n) {
- __asm__("rep stosl" : : "D"(__dst), "a"(__x), "c"(__n));
+ __asm__ __volatile__("rep stosl" : "+D"(__dst), "+c"(__n) : "a"(__x)
+ : "memory");
}
static __inline__ void __DEFAULT_FN_ATTRS
__stosw(unsigned short *__dst, unsigned short __x, size_t __n) {
- __asm__("rep stosw" : : "D"(__dst), "a"(__x), "c"(__n));
+ __asm__ __volatile__("rep stosw" : "+D"(__dst), "+c"(__n) : "a"(__x)
+ : "memory");
}
#endif
#ifdef __x86_64__
static __inline__ void __DEFAULT_FN_ATTRS
__movsq(unsigned long long *__dst, unsigned long long const *__src, size_t __n) {
- __asm__("rep movsq" : : "D"(__dst), "S"(__src), "c"(__n));
+ __asm__ __volatile__("rep movsq" : "+D"(__dst), "+S"(__src), "+c"(__n)
+ : : "memory");
}
static __inline__ void __DEFAULT_FN_ATTRS
__stosq(unsigned __int64 *__dst, unsigned __int64 __x, size_t __n) {
- __asm__("rep stosq" : : "D"(__dst), "a"(__x), "c"(__n));
+ __asm__ __volatile__("rep stosq" : "+D"(__dst), "+c"(__n) : "a"(__x)
+ : "memory");
}
#endif
@@ -927,6 +863,20 @@ __nop(void) {
__asm__ volatile ("nop");
}
#endif
+#if defined(__x86_64__)
+static __inline__ unsigned __int64 __DEFAULT_FN_ATTRS
+__shiftleft128(unsigned __int64 __l, unsigned __int64 __h, unsigned char __d) {
+ unsigned __int128 __val = ((unsigned __int128)__h << 64) | __l;
+ unsigned __int128 __res = __val << (__d & 63);
+ return (unsigned __int64)(__res >> 64);
+}
+static __inline__ unsigned __int64 __DEFAULT_FN_ATTRS
+__shiftright128(unsigned __int64 __l, unsigned __int64 __h, unsigned char __d) {
+ unsigned __int128 __val = ((unsigned __int128)__h << 64) | __l;
+ unsigned __int128 __res = __val >> (__d & 63);
+ return (unsigned __int64)__res;
+}
+#endif
/*----------------------------------------------------------------------------*\
|* Privileged intrinsics
diff --git a/lib/Headers/invpcidintrin.h b/lib/Headers/invpcidintrin.h
new file mode 100644
index 000000000000..c30a19fa3d22
--- /dev/null
+++ b/lib/Headers/invpcidintrin.h
@@ -0,0 +1,37 @@
+/*===------------- invpcidintrin.h - INVPCID intrinsic ---------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <invpcidintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __INVPCIDINTRIN_H
+#define __INVPCIDINTRIN_H
+
+static __inline__ void
+ __attribute__((__always_inline__, __nodebug__, __target__("invpcid")))
+_invpcid(unsigned int __type, void *__descriptor) {
+ __builtin_ia32_invpcid(__type, __descriptor);
+}
+
+#endif /* __INVPCIDINTRIN_H */
diff --git a/lib/Headers/lwpintrin.h b/lib/Headers/lwpintrin.h
index c95fdd9a201a..3455575cced6 100644
--- a/lib/Headers/lwpintrin.h
+++ b/lib/Headers/lwpintrin.h
@@ -31,7 +31,7 @@
/* Define the default attributes for the functions in this file. */
#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("lwp")))
-/// \brief Parses the LWPCB at the specified address and enables
+/// Parses the LWPCB at the specified address and enables
/// profiling if valid.
///
/// \headerfile <x86intrin.h>
@@ -48,7 +48,7 @@ __llwpcb (void *__addr)
__builtin_ia32_llwpcb(__addr);
}
-/// \brief Flushes the LWP state to memory and returns the address of the LWPCB.
+/// Flushes the LWP state to memory and returns the address of the LWPCB.
///
/// \headerfile <x86intrin.h>
///
@@ -58,12 +58,12 @@ __llwpcb (void *__addr)
/// Address to the current Lightweight Profiling Control Block (LWPCB).
/// If LWP is not currently enabled, returns NULL.
static __inline__ void* __DEFAULT_FN_ATTRS
-__slwpcb ()
+__slwpcb (void)
{
return __builtin_ia32_slwpcb();
}
-/// \brief Inserts programmed event record into the LWP event ring buffer
+/// Inserts programmed event record into the LWP event ring buffer
/// and advances the ring buffer pointer.
///
/// \headerfile <x86intrin.h>
@@ -84,7 +84,7 @@ __slwpcb ()
(__builtin_ia32_lwpins32((unsigned int) (DATA2), (unsigned int) (DATA1), \
(unsigned int) (FLAGS)))
-/// \brief Decrements the LWP programmed value sample event counter. If the result is
+/// Decrements the LWP programmed value sample event counter. If the result is
/// negative, inserts an event record into the LWP event ring buffer in memory
/// and advances the ring buffer pointer.
///
@@ -104,7 +104,7 @@ __slwpcb ()
#ifdef __x86_64__
-/// \brief Inserts programmed event record into the LWP event ring buffer
+/// Inserts programmed event record into the LWP event ring buffer
/// and advances the ring buffer pointer.
///
/// \headerfile <x86intrin.h>
@@ -125,7 +125,7 @@ __slwpcb ()
(__builtin_ia32_lwpins64((unsigned long long) (DATA2), (unsigned int) (DATA1), \
(unsigned int) (FLAGS)))
-/// \brief Decrements the LWP programmed value sample event counter. If the result is
+/// Decrements the LWP programmed value sample event counter. If the result is
/// negative, inserts an event record into the LWP event ring buffer in memory
/// and advances the ring buffer pointer.
///
diff --git a/lib/Headers/lzcntintrin.h b/lib/Headers/lzcntintrin.h
index 3d2769da3bae..558f1828f0e7 100644
--- a/lib/Headers/lzcntintrin.h
+++ b/lib/Headers/lzcntintrin.h
@@ -31,7 +31,7 @@
/* Define the default attributes for the functions in this file. */
#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("lzcnt")))
-/// \brief Counts the number of leading zero bits in the operand.
+/// Counts the number of leading zero bits in the operand.
///
/// \headerfile <x86intrin.h>
///
@@ -47,7 +47,7 @@ __lzcnt16(unsigned short __X)
return __X ? __builtin_clzs(__X) : 16;
}
-/// \brief Counts the number of leading zero bits in the operand.
+/// Counts the number of leading zero bits in the operand.
///
/// \headerfile <x86intrin.h>
///
@@ -57,13 +57,14 @@ __lzcnt16(unsigned short __X)
/// An unsigned 32-bit integer whose leading zeros are to be counted.
/// \returns An unsigned 32-bit integer containing the number of leading zero
/// bits in the operand.
+/// \see _lzcnt_u32
static __inline__ unsigned int __DEFAULT_FN_ATTRS
__lzcnt32(unsigned int __X)
{
return __X ? __builtin_clz(__X) : 32;
}
-/// \brief Counts the number of leading zero bits in the operand.
+/// Counts the number of leading zero bits in the operand.
///
/// \headerfile <x86intrin.h>
///
@@ -73,6 +74,7 @@ __lzcnt32(unsigned int __X)
/// An unsigned 32-bit integer whose leading zeros are to be counted.
/// \returns An unsigned 32-bit integer containing the number of leading zero
/// bits in the operand.
+/// \see __lzcnt32
static __inline__ unsigned int __DEFAULT_FN_ATTRS
_lzcnt_u32(unsigned int __X)
{
@@ -80,7 +82,7 @@ _lzcnt_u32(unsigned int __X)
}
#ifdef __x86_64__
-/// \brief Counts the number of leading zero bits in the operand.
+/// Counts the number of leading zero bits in the operand.
///
/// \headerfile <x86intrin.h>
///
@@ -90,13 +92,14 @@ _lzcnt_u32(unsigned int __X)
/// An unsigned 64-bit integer whose leading zeros are to be counted.
/// \returns An unsigned 64-bit integer containing the number of leading zero
/// bits in the operand.
+/// \see _lzcnt_u64
static __inline__ unsigned long long __DEFAULT_FN_ATTRS
__lzcnt64(unsigned long long __X)
{
return __X ? __builtin_clzll(__X) : 64;
}
-/// \brief Counts the number of leading zero bits in the operand.
+/// Counts the number of leading zero bits in the operand.
///
/// \headerfile <x86intrin.h>
///
@@ -106,6 +109,7 @@ __lzcnt64(unsigned long long __X)
/// An unsigned 64-bit integer whose leading zeros are to be counted.
/// \returns An unsigned 64-bit integer containing the number of leading zero
/// bits in the operand.
+/// \see __lzcnt64
static __inline__ unsigned long long __DEFAULT_FN_ATTRS
_lzcnt_u64(unsigned long long __X)
{
diff --git a/lib/Headers/mm3dnow.h b/lib/Headers/mm3dnow.h
index 294866c1dc0d..b0288757a396 100644
--- a/lib/Headers/mm3dnow.h
+++ b/lib/Headers/mm3dnow.h
@@ -30,9 +30,9 @@
typedef float __v2sf __attribute__((__vector_size__(8)));
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("3dnow")))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("3dnow"), __min_vector_width__(64)))
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("3dnow")))
_m_femms(void) {
__builtin_ia32_femms();
}
@@ -134,7 +134,7 @@ _m_pmulhrw(__m64 __m1, __m64 __m2) {
/* Handle the 3dnowa instructions here. */
#undef __DEFAULT_FN_ATTRS
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("3dnowa")))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("3dnowa"), __min_vector_width__(64)))
static __inline__ __m64 __DEFAULT_FN_ATTRS
_m_pf2iw(__m64 __m) {
diff --git a/lib/Headers/mmintrin.h b/lib/Headers/mmintrin.h
index 4b38d51713d8..a73539942a92 100644
--- a/lib/Headers/mmintrin.h
+++ b/lib/Headers/mmintrin.h
@@ -32,27 +32,27 @@ typedef short __v4hi __attribute__((__vector_size__(8)));
typedef char __v8qi __attribute__((__vector_size__(8)));
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("mmx")))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("mmx"), __min_vector_width__(64)))
-/// \brief Clears the MMX state by setting the state of the x87 stack registers
+/// Clears the MMX state by setting the state of the x87 stack registers
/// to empty.
///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> EMMS </c> instruction.
///
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("mmx")))
_mm_empty(void)
{
__builtin_ia32_emms();
}
-/// \brief Constructs a 64-bit integer vector, setting the lower 32 bits to the
+/// Constructs a 64-bit integer vector, setting the lower 32 bits to the
/// value of the 32-bit integer parameter and setting the upper 32 bits to 0.
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the <c> VMOVD / MOVD </c> instruction.
+/// This intrinsic corresponds to the <c> MOVD </c> instruction.
///
/// \param __i
/// A 32-bit integer value.
@@ -64,12 +64,12 @@ _mm_cvtsi32_si64(int __i)
return (__m64)__builtin_ia32_vec_init_v2si(__i, 0);
}
-/// \brief Returns the lower 32 bits of a 64-bit integer vector as a 32-bit
+/// Returns the lower 32 bits of a 64-bit integer vector as a 32-bit
/// signed integer.
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the <c> VMOVD / MOVD </c> instruction.
+/// This intrinsic corresponds to the <c> MOVD </c> instruction.
///
/// \param __m
/// A 64-bit integer vector.
@@ -81,11 +81,11 @@ _mm_cvtsi64_si32(__m64 __m)
return __builtin_ia32_vec_ext_v2si((__v2si)__m, 0);
}
-/// \brief Casts a 64-bit signed integer value into a 64-bit integer vector.
+/// Casts a 64-bit signed integer value into a 64-bit integer vector.
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the <c> VMOVQ / MOVD </c> instruction.
+/// This intrinsic corresponds to the <c> MOVQ </c> instruction.
///
/// \param __i
/// A 64-bit signed integer.
@@ -97,11 +97,11 @@ _mm_cvtsi64_m64(long long __i)
return (__m64)__i;
}
-/// \brief Casts a 64-bit integer vector into a 64-bit signed integer value.
+/// Casts a 64-bit integer vector into a 64-bit signed integer value.
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the <c> VMOVQ / MOVD </c> instruction.
+/// This intrinsic corresponds to the <c> MOVQ </c> instruction.
///
/// \param __m
/// A 64-bit integer vector.
@@ -113,7 +113,7 @@ _mm_cvtm64_si64(__m64 __m)
return (long long)__m;
}
-/// \brief Converts 16-bit signed integers from both 64-bit integer vector
+/// Converts 16-bit signed integers from both 64-bit integer vector
/// parameters of [4 x i16] into 8-bit signed integer values, and constructs
/// a 64-bit integer vector of [8 x i8] as the result. Positive values
/// greater than 0x7F are saturated to 0x7F. Negative values less than 0x80
@@ -143,7 +143,7 @@ _mm_packs_pi16(__m64 __m1, __m64 __m2)
return (__m64)__builtin_ia32_packsswb((__v4hi)__m1, (__v4hi)__m2);
}
-/// \brief Converts 32-bit signed integers from both 64-bit integer vector
+/// Converts 32-bit signed integers from both 64-bit integer vector
/// parameters of [2 x i32] into 16-bit signed integer values, and constructs
/// a 64-bit integer vector of [4 x i16] as the result. Positive values
/// greater than 0x7FFF are saturated to 0x7FFF. Negative values less than
@@ -173,7 +173,7 @@ _mm_packs_pi32(__m64 __m1, __m64 __m2)
return (__m64)__builtin_ia32_packssdw((__v2si)__m1, (__v2si)__m2);
}
-/// \brief Converts 16-bit signed integers from both 64-bit integer vector
+/// Converts 16-bit signed integers from both 64-bit integer vector
/// parameters of [4 x i16] into 8-bit unsigned integer values, and
/// constructs a 64-bit integer vector of [8 x i8] as the result. Values
/// greater than 0xFF are saturated to 0xFF. Values less than 0 are saturated
@@ -203,7 +203,7 @@ _mm_packs_pu16(__m64 __m1, __m64 __m2)
return (__m64)__builtin_ia32_packuswb((__v4hi)__m1, (__v4hi)__m2);
}
-/// \brief Unpacks the upper 32 bits from two 64-bit integer vectors of [8 x i8]
+/// Unpacks the upper 32 bits from two 64-bit integer vectors of [8 x i8]
/// and interleaves them into a 64-bit integer vector of [8 x i8].
///
/// \headerfile <x86intrin.h>
@@ -230,7 +230,7 @@ _mm_unpackhi_pi8(__m64 __m1, __m64 __m2)
return (__m64)__builtin_ia32_punpckhbw((__v8qi)__m1, (__v8qi)__m2);
}
-/// \brief Unpacks the upper 32 bits from two 64-bit integer vectors of
+/// Unpacks the upper 32 bits from two 64-bit integer vectors of
/// [4 x i16] and interleaves them into a 64-bit integer vector of [4 x i16].
///
/// \headerfile <x86intrin.h>
@@ -253,7 +253,7 @@ _mm_unpackhi_pi16(__m64 __m1, __m64 __m2)
return (__m64)__builtin_ia32_punpckhwd((__v4hi)__m1, (__v4hi)__m2);
}
-/// \brief Unpacks the upper 32 bits from two 64-bit integer vectors of
+/// Unpacks the upper 32 bits from two 64-bit integer vectors of
/// [2 x i32] and interleaves them into a 64-bit integer vector of [2 x i32].
///
/// \headerfile <x86intrin.h>
@@ -274,7 +274,7 @@ _mm_unpackhi_pi32(__m64 __m1, __m64 __m2)
return (__m64)__builtin_ia32_punpckhdq((__v2si)__m1, (__v2si)__m2);
}
-/// \brief Unpacks the lower 32 bits from two 64-bit integer vectors of [8 x i8]
+/// Unpacks the lower 32 bits from two 64-bit integer vectors of [8 x i8]
/// and interleaves them into a 64-bit integer vector of [8 x i8].
///
/// \headerfile <x86intrin.h>
@@ -301,7 +301,7 @@ _mm_unpacklo_pi8(__m64 __m1, __m64 __m2)
return (__m64)__builtin_ia32_punpcklbw((__v8qi)__m1, (__v8qi)__m2);
}
-/// \brief Unpacks the lower 32 bits from two 64-bit integer vectors of
+/// Unpacks the lower 32 bits from two 64-bit integer vectors of
/// [4 x i16] and interleaves them into a 64-bit integer vector of [4 x i16].
///
/// \headerfile <x86intrin.h>
@@ -324,7 +324,7 @@ _mm_unpacklo_pi16(__m64 __m1, __m64 __m2)
return (__m64)__builtin_ia32_punpcklwd((__v4hi)__m1, (__v4hi)__m2);
}
-/// \brief Unpacks the lower 32 bits from two 64-bit integer vectors of
+/// Unpacks the lower 32 bits from two 64-bit integer vectors of
/// [2 x i32] and interleaves them into a 64-bit integer vector of [2 x i32].
///
/// \headerfile <x86intrin.h>
@@ -345,7 +345,7 @@ _mm_unpacklo_pi32(__m64 __m1, __m64 __m2)
return (__m64)__builtin_ia32_punpckldq((__v2si)__m1, (__v2si)__m2);
}
-/// \brief Adds each 8-bit integer element of the first 64-bit integer vector
+/// Adds each 8-bit integer element of the first 64-bit integer vector
/// of [8 x i8] to the corresponding 8-bit integer element of the second
/// 64-bit integer vector of [8 x i8]. The lower 8 bits of the results are
/// packed into a 64-bit integer vector of [8 x i8].
@@ -366,7 +366,7 @@ _mm_add_pi8(__m64 __m1, __m64 __m2)
return (__m64)__builtin_ia32_paddb((__v8qi)__m1, (__v8qi)__m2);
}
-/// \brief Adds each 16-bit integer element of the first 64-bit integer vector
+/// Adds each 16-bit integer element of the first 64-bit integer vector
/// of [4 x i16] to the corresponding 16-bit integer element of the second
/// 64-bit integer vector of [4 x i16]. The lower 16 bits of the results are
/// packed into a 64-bit integer vector of [4 x i16].
@@ -387,7 +387,7 @@ _mm_add_pi16(__m64 __m1, __m64 __m2)
return (__m64)__builtin_ia32_paddw((__v4hi)__m1, (__v4hi)__m2);
}
-/// \brief Adds each 32-bit integer element of the first 64-bit integer vector
+/// Adds each 32-bit integer element of the first 64-bit integer vector
/// of [2 x i32] to the corresponding 32-bit integer element of the second
/// 64-bit integer vector of [2 x i32]. The lower 32 bits of the results are
/// packed into a 64-bit integer vector of [2 x i32].
@@ -408,7 +408,7 @@ _mm_add_pi32(__m64 __m1, __m64 __m2)
return (__m64)__builtin_ia32_paddd((__v2si)__m1, (__v2si)__m2);
}
-/// \brief Adds each 8-bit signed integer element of the first 64-bit integer
+/// Adds each 8-bit signed integer element of the first 64-bit integer
/// vector of [8 x i8] to the corresponding 8-bit signed integer element of
/// the second 64-bit integer vector of [8 x i8]. Positive sums greater than
/// 0x7F are saturated to 0x7F. Negative sums less than 0x80 are saturated to
@@ -430,7 +430,7 @@ _mm_adds_pi8(__m64 __m1, __m64 __m2)
return (__m64)__builtin_ia32_paddsb((__v8qi)__m1, (__v8qi)__m2);
}
-/// \brief Adds each 16-bit signed integer element of the first 64-bit integer
+/// Adds each 16-bit signed integer element of the first 64-bit integer
/// vector of [4 x i16] to the corresponding 16-bit signed integer element of
/// the second 64-bit integer vector of [4 x i16]. Positive sums greater than
/// 0x7FFF are saturated to 0x7FFF. Negative sums less than 0x8000 are
@@ -453,7 +453,7 @@ _mm_adds_pi16(__m64 __m1, __m64 __m2)
return (__m64)__builtin_ia32_paddsw((__v4hi)__m1, (__v4hi)__m2);
}
-/// \brief Adds each 8-bit unsigned integer element of the first 64-bit integer
+/// Adds each 8-bit unsigned integer element of the first 64-bit integer
/// vector of [8 x i8] to the corresponding 8-bit unsigned integer element of
/// the second 64-bit integer vector of [8 x i8]. Sums greater than 0xFF are
/// saturated to 0xFF. The results are packed into a 64-bit integer vector of
@@ -475,7 +475,7 @@ _mm_adds_pu8(__m64 __m1, __m64 __m2)
return (__m64)__builtin_ia32_paddusb((__v8qi)__m1, (__v8qi)__m2);
}
-/// \brief Adds each 16-bit unsigned integer element of the first 64-bit integer
+/// Adds each 16-bit unsigned integer element of the first 64-bit integer
/// vector of [4 x i16] to the corresponding 16-bit unsigned integer element
/// of the second 64-bit integer vector of [4 x i16]. Sums greater than
/// 0xFFFF are saturated to 0xFFFF. The results are packed into a 64-bit
@@ -497,7 +497,7 @@ _mm_adds_pu16(__m64 __m1, __m64 __m2)
return (__m64)__builtin_ia32_paddusw((__v4hi)__m1, (__v4hi)__m2);
}
-/// \brief Subtracts each 8-bit integer element of the second 64-bit integer
+/// Subtracts each 8-bit integer element of the second 64-bit integer
/// vector of [8 x i8] from the corresponding 8-bit integer element of the
/// first 64-bit integer vector of [8 x i8]. The lower 8 bits of the results
/// are packed into a 64-bit integer vector of [8 x i8].
@@ -518,7 +518,7 @@ _mm_sub_pi8(__m64 __m1, __m64 __m2)
return (__m64)__builtin_ia32_psubb((__v8qi)__m1, (__v8qi)__m2);
}
-/// \brief Subtracts each 16-bit integer element of the second 64-bit integer
+/// Subtracts each 16-bit integer element of the second 64-bit integer
/// vector of [4 x i16] from the corresponding 16-bit integer element of the
/// first 64-bit integer vector of [4 x i16]. The lower 16 bits of the
/// results are packed into a 64-bit integer vector of [4 x i16].
@@ -539,7 +539,7 @@ _mm_sub_pi16(__m64 __m1, __m64 __m2)
return (__m64)__builtin_ia32_psubw((__v4hi)__m1, (__v4hi)__m2);
}
-/// \brief Subtracts each 32-bit integer element of the second 64-bit integer
+/// Subtracts each 32-bit integer element of the second 64-bit integer
/// vector of [2 x i32] from the corresponding 32-bit integer element of the
/// first 64-bit integer vector of [2 x i32]. The lower 32 bits of the
/// results are packed into a 64-bit integer vector of [2 x i32].
@@ -560,7 +560,7 @@ _mm_sub_pi32(__m64 __m1, __m64 __m2)
return (__m64)__builtin_ia32_psubd((__v2si)__m1, (__v2si)__m2);
}
-/// \brief Subtracts each 8-bit signed integer element of the second 64-bit
+/// Subtracts each 8-bit signed integer element of the second 64-bit
/// integer vector of [8 x i8] from the corresponding 8-bit signed integer
/// element of the first 64-bit integer vector of [8 x i8]. Positive results
/// greater than 0x7F are saturated to 0x7F. Negative results less than 0x80
@@ -583,7 +583,7 @@ _mm_subs_pi8(__m64 __m1, __m64 __m2)
return (__m64)__builtin_ia32_psubsb((__v8qi)__m1, (__v8qi)__m2);
}
-/// \brief Subtracts each 16-bit signed integer element of the second 64-bit
+/// Subtracts each 16-bit signed integer element of the second 64-bit
/// integer vector of [4 x i16] from the corresponding 16-bit signed integer
/// element of the first 64-bit integer vector of [4 x i16]. Positive results
/// greater than 0x7FFF are saturated to 0x7FFF. Negative results less than
@@ -606,7 +606,7 @@ _mm_subs_pi16(__m64 __m1, __m64 __m2)
return (__m64)__builtin_ia32_psubsw((__v4hi)__m1, (__v4hi)__m2);
}
-/// \brief Subtracts each 8-bit unsigned integer element of the second 64-bit
+/// Subtracts each 8-bit unsigned integer element of the second 64-bit
/// integer vector of [8 x i8] from the corresponding 8-bit unsigned integer
/// element of the first 64-bit integer vector of [8 x i8].
///
@@ -630,7 +630,7 @@ _mm_subs_pu8(__m64 __m1, __m64 __m2)
return (__m64)__builtin_ia32_psubusb((__v8qi)__m1, (__v8qi)__m2);
}
-/// \brief Subtracts each 16-bit unsigned integer element of the second 64-bit
+/// Subtracts each 16-bit unsigned integer element of the second 64-bit
/// integer vector of [4 x i16] from the corresponding 16-bit unsigned
/// integer element of the first 64-bit integer vector of [4 x i16].
///
@@ -654,7 +654,7 @@ _mm_subs_pu16(__m64 __m1, __m64 __m2)
return (__m64)__builtin_ia32_psubusw((__v4hi)__m1, (__v4hi)__m2);
}
-/// \brief Multiplies each 16-bit signed integer element of the first 64-bit
+/// Multiplies each 16-bit signed integer element of the first 64-bit
/// integer vector of [4 x i16] by the corresponding 16-bit signed integer
/// element of the second 64-bit integer vector of [4 x i16] and get four
/// 32-bit products. Adds adjacent pairs of products to get two 32-bit sums.
@@ -681,7 +681,7 @@ _mm_madd_pi16(__m64 __m1, __m64 __m2)
return (__m64)__builtin_ia32_pmaddwd((__v4hi)__m1, (__v4hi)__m2);
}
-/// \brief Multiplies each 16-bit signed integer element of the first 64-bit
+/// Multiplies each 16-bit signed integer element of the first 64-bit
/// integer vector of [4 x i16] by the corresponding 16-bit signed integer
/// element of the second 64-bit integer vector of [4 x i16]. Packs the upper
/// 16 bits of the 32-bit products into a 64-bit integer vector of [4 x i16].
@@ -702,7 +702,7 @@ _mm_mulhi_pi16(__m64 __m1, __m64 __m2)
return (__m64)__builtin_ia32_pmulhw((__v4hi)__m1, (__v4hi)__m2);
}
-/// \brief Multiplies each 16-bit signed integer element of the first 64-bit
+/// Multiplies each 16-bit signed integer element of the first 64-bit
/// integer vector of [4 x i16] by the corresponding 16-bit signed integer
/// element of the second 64-bit integer vector of [4 x i16]. Packs the lower
/// 16 bits of the 32-bit products into a 64-bit integer vector of [4 x i16].
@@ -723,7 +723,7 @@ _mm_mullo_pi16(__m64 __m1, __m64 __m2)
return (__m64)__builtin_ia32_pmullw((__v4hi)__m1, (__v4hi)__m2);
}
-/// \brief Left-shifts each 16-bit signed integer element of the first
+/// Left-shifts each 16-bit signed integer element of the first
/// parameter, which is a 64-bit integer vector of [4 x i16], by the number
/// of bits specified by the second parameter, which is a 64-bit integer. The
/// lower 16 bits of the results are packed into a 64-bit integer vector of
@@ -746,7 +746,7 @@ _mm_sll_pi16(__m64 __m, __m64 __count)
return (__m64)__builtin_ia32_psllw((__v4hi)__m, __count);
}
-/// \brief Left-shifts each 16-bit signed integer element of a 64-bit integer
+/// Left-shifts each 16-bit signed integer element of a 64-bit integer
/// vector of [4 x i16] by the number of bits specified by a 32-bit integer.
/// The lower 16 bits of the results are packed into a 64-bit integer vector
/// of [4 x i16].
@@ -768,7 +768,7 @@ _mm_slli_pi16(__m64 __m, int __count)
return (__m64)__builtin_ia32_psllwi((__v4hi)__m, __count);
}
-/// \brief Left-shifts each 32-bit signed integer element of the first
+/// Left-shifts each 32-bit signed integer element of the first
/// parameter, which is a 64-bit integer vector of [2 x i32], by the number
/// of bits specified by the second parameter, which is a 64-bit integer. The
/// lower 32 bits of the results are packed into a 64-bit integer vector of
@@ -791,7 +791,7 @@ _mm_sll_pi32(__m64 __m, __m64 __count)
return (__m64)__builtin_ia32_pslld((__v2si)__m, __count);
}
-/// \brief Left-shifts each 32-bit signed integer element of a 64-bit integer
+/// Left-shifts each 32-bit signed integer element of a 64-bit integer
/// vector of [2 x i32] by the number of bits specified by a 32-bit integer.
/// The lower 32 bits of the results are packed into a 64-bit integer vector
/// of [2 x i32].
@@ -813,7 +813,7 @@ _mm_slli_pi32(__m64 __m, int __count)
return (__m64)__builtin_ia32_pslldi((__v2si)__m, __count);
}
-/// \brief Left-shifts the first 64-bit integer parameter by the number of bits
+/// Left-shifts the first 64-bit integer parameter by the number of bits
/// specified by the second 64-bit integer parameter. The lower 64 bits of
/// result are returned.
///
@@ -833,7 +833,7 @@ _mm_sll_si64(__m64 __m, __m64 __count)
return (__m64)__builtin_ia32_psllq((__v1di)__m, __count);
}
-/// \brief Left-shifts the first parameter, which is a 64-bit integer, by the
+/// Left-shifts the first parameter, which is a 64-bit integer, by the
/// number of bits specified by the second parameter, which is a 32-bit
/// integer. The lower 64 bits of result are returned.
///
@@ -853,7 +853,7 @@ _mm_slli_si64(__m64 __m, int __count)
return (__m64)__builtin_ia32_psllqi((__v1di)__m, __count);
}
-/// \brief Right-shifts each 16-bit integer element of the first parameter,
+/// Right-shifts each 16-bit integer element of the first parameter,
/// which is a 64-bit integer vector of [4 x i16], by the number of bits
/// specified by the second parameter, which is a 64-bit integer.
///
@@ -877,7 +877,7 @@ _mm_sra_pi16(__m64 __m, __m64 __count)
return (__m64)__builtin_ia32_psraw((__v4hi)__m, __count);
}
-/// \brief Right-shifts each 16-bit integer element of a 64-bit integer vector
+/// Right-shifts each 16-bit integer element of a 64-bit integer vector
/// of [4 x i16] by the number of bits specified by a 32-bit integer.
///
/// High-order bits are filled with the sign bit of the initial value of each
@@ -900,7 +900,7 @@ _mm_srai_pi16(__m64 __m, int __count)
return (__m64)__builtin_ia32_psrawi((__v4hi)__m, __count);
}
-/// \brief Right-shifts each 32-bit integer element of the first parameter,
+/// Right-shifts each 32-bit integer element of the first parameter,
/// which is a 64-bit integer vector of [2 x i32], by the number of bits
/// specified by the second parameter, which is a 64-bit integer.
///
@@ -924,7 +924,7 @@ _mm_sra_pi32(__m64 __m, __m64 __count)
return (__m64)__builtin_ia32_psrad((__v2si)__m, __count);
}
-/// \brief Right-shifts each 32-bit integer element of a 64-bit integer vector
+/// Right-shifts each 32-bit integer element of a 64-bit integer vector
/// of [2 x i32] by the number of bits specified by a 32-bit integer.
///
/// High-order bits are filled with the sign bit of the initial value of each
@@ -947,7 +947,7 @@ _mm_srai_pi32(__m64 __m, int __count)
return (__m64)__builtin_ia32_psradi((__v2si)__m, __count);
}
-/// \brief Right-shifts each 16-bit integer element of the first parameter,
+/// Right-shifts each 16-bit integer element of the first parameter,
/// which is a 64-bit integer vector of [4 x i16], by the number of bits
/// specified by the second parameter, which is a 64-bit integer.
///
@@ -970,7 +970,7 @@ _mm_srl_pi16(__m64 __m, __m64 __count)
return (__m64)__builtin_ia32_psrlw((__v4hi)__m, __count);
}
-/// \brief Right-shifts each 16-bit integer element of a 64-bit integer vector
+/// Right-shifts each 16-bit integer element of a 64-bit integer vector
/// of [4 x i16] by the number of bits specified by a 32-bit integer.
///
/// High-order bits are cleared. The 16-bit results are packed into a 64-bit
@@ -992,7 +992,7 @@ _mm_srli_pi16(__m64 __m, int __count)
return (__m64)__builtin_ia32_psrlwi((__v4hi)__m, __count);
}
-/// \brief Right-shifts each 32-bit integer element of the first parameter,
+/// Right-shifts each 32-bit integer element of the first parameter,
/// which is a 64-bit integer vector of [2 x i32], by the number of bits
/// specified by the second parameter, which is a 64-bit integer.
///
@@ -1015,7 +1015,7 @@ _mm_srl_pi32(__m64 __m, __m64 __count)
return (__m64)__builtin_ia32_psrld((__v2si)__m, __count);
}
-/// \brief Right-shifts each 32-bit integer element of a 64-bit integer vector
+/// Right-shifts each 32-bit integer element of a 64-bit integer vector
/// of [2 x i32] by the number of bits specified by a 32-bit integer.
///
/// High-order bits are cleared. The 32-bit results are packed into a 64-bit
@@ -1037,7 +1037,7 @@ _mm_srli_pi32(__m64 __m, int __count)
return (__m64)__builtin_ia32_psrldi((__v2si)__m, __count);
}
-/// \brief Right-shifts the first 64-bit integer parameter by the number of bits
+/// Right-shifts the first 64-bit integer parameter by the number of bits
/// specified by the second 64-bit integer parameter.
///
/// High-order bits are cleared.
@@ -1057,7 +1057,7 @@ _mm_srl_si64(__m64 __m, __m64 __count)
return (__m64)__builtin_ia32_psrlq((__v1di)__m, __count);
}
-/// \brief Right-shifts the first parameter, which is a 64-bit integer, by the
+/// Right-shifts the first parameter, which is a 64-bit integer, by the
/// number of bits specified by the second parameter, which is a 32-bit
/// integer.
///
@@ -1078,7 +1078,7 @@ _mm_srli_si64(__m64 __m, int __count)
return (__m64)__builtin_ia32_psrlqi((__v1di)__m, __count);
}
-/// \brief Performs a bitwise AND of two 64-bit integer vectors.
+/// Performs a bitwise AND of two 64-bit integer vectors.
///
/// \headerfile <x86intrin.h>
///
@@ -1096,7 +1096,7 @@ _mm_and_si64(__m64 __m1, __m64 __m2)
return __builtin_ia32_pand((__v1di)__m1, (__v1di)__m2);
}
-/// \brief Performs a bitwise NOT of the first 64-bit integer vector, and then
+/// Performs a bitwise NOT of the first 64-bit integer vector, and then
/// performs a bitwise AND of the intermediate result and the second 64-bit
/// integer vector.
///
@@ -1117,7 +1117,7 @@ _mm_andnot_si64(__m64 __m1, __m64 __m2)
return __builtin_ia32_pandn((__v1di)__m1, (__v1di)__m2);
}
-/// \brief Performs a bitwise OR of two 64-bit integer vectors.
+/// Performs a bitwise OR of two 64-bit integer vectors.
///
/// \headerfile <x86intrin.h>
///
@@ -1135,7 +1135,7 @@ _mm_or_si64(__m64 __m1, __m64 __m2)
return __builtin_ia32_por((__v1di)__m1, (__v1di)__m2);
}
-/// \brief Performs a bitwise exclusive OR of two 64-bit integer vectors.
+/// Performs a bitwise exclusive OR of two 64-bit integer vectors.
///
/// \headerfile <x86intrin.h>
///
@@ -1153,7 +1153,7 @@ _mm_xor_si64(__m64 __m1, __m64 __m2)
return __builtin_ia32_pxor((__v1di)__m1, (__v1di)__m2);
}
-/// \brief Compares the 8-bit integer elements of two 64-bit integer vectors of
+/// Compares the 8-bit integer elements of two 64-bit integer vectors of
/// [8 x i8] to determine if the element of the first vector is equal to the
/// corresponding element of the second vector.
///
@@ -1175,7 +1175,7 @@ _mm_cmpeq_pi8(__m64 __m1, __m64 __m2)
return (__m64)__builtin_ia32_pcmpeqb((__v8qi)__m1, (__v8qi)__m2);
}
-/// \brief Compares the 16-bit integer elements of two 64-bit integer vectors of
+/// Compares the 16-bit integer elements of two 64-bit integer vectors of
/// [4 x i16] to determine if the element of the first vector is equal to the
/// corresponding element of the second vector.
///
@@ -1197,7 +1197,7 @@ _mm_cmpeq_pi16(__m64 __m1, __m64 __m2)
return (__m64)__builtin_ia32_pcmpeqw((__v4hi)__m1, (__v4hi)__m2);
}
-/// \brief Compares the 32-bit integer elements of two 64-bit integer vectors of
+/// Compares the 32-bit integer elements of two 64-bit integer vectors of
/// [2 x i32] to determine if the element of the first vector is equal to the
/// corresponding element of the second vector.
///
@@ -1219,7 +1219,7 @@ _mm_cmpeq_pi32(__m64 __m1, __m64 __m2)
return (__m64)__builtin_ia32_pcmpeqd((__v2si)__m1, (__v2si)__m2);
}
-/// \brief Compares the 8-bit integer elements of two 64-bit integer vectors of
+/// Compares the 8-bit integer elements of two 64-bit integer vectors of
/// [8 x i8] to determine if the element of the first vector is greater than
/// the corresponding element of the second vector.
///
@@ -1241,7 +1241,7 @@ _mm_cmpgt_pi8(__m64 __m1, __m64 __m2)
return (__m64)__builtin_ia32_pcmpgtb((__v8qi)__m1, (__v8qi)__m2);
}
-/// \brief Compares the 16-bit integer elements of two 64-bit integer vectors of
+/// Compares the 16-bit integer elements of two 64-bit integer vectors of
/// [4 x i16] to determine if the element of the first vector is greater than
/// the corresponding element of the second vector.
///
@@ -1263,7 +1263,7 @@ _mm_cmpgt_pi16(__m64 __m1, __m64 __m2)
return (__m64)__builtin_ia32_pcmpgtw((__v4hi)__m1, (__v4hi)__m2);
}
-/// \brief Compares the 32-bit integer elements of two 64-bit integer vectors of
+/// Compares the 32-bit integer elements of two 64-bit integer vectors of
/// [2 x i32] to determine if the element of the first vector is greater than
/// the corresponding element of the second vector.
///
@@ -1285,20 +1285,20 @@ _mm_cmpgt_pi32(__m64 __m1, __m64 __m2)
return (__m64)__builtin_ia32_pcmpgtd((__v2si)__m1, (__v2si)__m2);
}
-/// \brief Constructs a 64-bit integer vector initialized to zero.
+/// Constructs a 64-bit integer vector initialized to zero.
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the <c> VXORPS / XORPS </c> instruction.
+/// This intrinsic corresponds to the <c> PXOR </c> instruction.
///
/// \returns An initialized 64-bit integer vector with all elements set to zero.
static __inline__ __m64 __DEFAULT_FN_ATTRS
_mm_setzero_si64(void)
{
- return (__m64){ 0LL };
+ return __extension__ (__m64){ 0LL };
}
-/// \brief Constructs a 64-bit integer vector initialized with the specified
+/// Constructs a 64-bit integer vector initialized with the specified
/// 32-bit integer values.
///
/// \headerfile <x86intrin.h>
@@ -1319,7 +1319,7 @@ _mm_set_pi32(int __i1, int __i0)
return (__m64)__builtin_ia32_vec_init_v2si(__i0, __i1);
}
-/// \brief Constructs a 64-bit integer vector initialized with the specified
+/// Constructs a 64-bit integer vector initialized with the specified
/// 16-bit integer values.
///
/// \headerfile <x86intrin.h>
@@ -1342,7 +1342,7 @@ _mm_set_pi16(short __s3, short __s2, short __s1, short __s0)
return (__m64)__builtin_ia32_vec_init_v4hi(__s0, __s1, __s2, __s3);
}
-/// \brief Constructs a 64-bit integer vector initialized with the specified
+/// Constructs a 64-bit integer vector initialized with the specified
/// 8-bit integer values.
///
/// \headerfile <x86intrin.h>
@@ -1375,13 +1375,14 @@ _mm_set_pi8(char __b7, char __b6, char __b5, char __b4, char __b3, char __b2,
__b4, __b5, __b6, __b7);
}
-/// \brief Constructs a 64-bit integer vector of [2 x i32], with each of the
+/// Constructs a 64-bit integer vector of [2 x i32], with each of the
/// 32-bit integer vector elements set to the specified 32-bit integer
/// value.
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the <c> VPSHUFD / PSHUFD </c> instruction.
+/// This intrinsic is a utility function and does not correspond to a specific
+/// instruction.
///
/// \param __i
/// A 32-bit integer value used to initialize each vector element of the
@@ -1393,13 +1394,14 @@ _mm_set1_pi32(int __i)
return _mm_set_pi32(__i, __i);
}
-/// \brief Constructs a 64-bit integer vector of [4 x i16], with each of the
+/// Constructs a 64-bit integer vector of [4 x i16], with each of the
/// 16-bit integer vector elements set to the specified 16-bit integer
/// value.
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the <c> VPSHUFLW / PSHUFLW </c> instruction.
+/// This intrinsic is a utility function and does not correspond to a specific
+/// instruction.
///
/// \param __w
/// A 16-bit integer value used to initialize each vector element of the
@@ -1411,13 +1413,13 @@ _mm_set1_pi16(short __w)
return _mm_set_pi16(__w, __w, __w, __w);
}
-/// \brief Constructs a 64-bit integer vector of [8 x i8], with each of the
+/// Constructs a 64-bit integer vector of [8 x i8], with each of the
/// 8-bit integer vector elements set to the specified 8-bit integer value.
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the <c> VPUNPCKLBW + VPSHUFLW / PUNPCKLBW +
-/// PSHUFLW </c> instruction.
+/// This intrinsic is a utility function and does not correspond to a specific
+/// instruction.
///
/// \param __b
/// An 8-bit integer value used to initialize each vector element of the
@@ -1429,7 +1431,7 @@ _mm_set1_pi8(char __b)
return _mm_set_pi8(__b, __b, __b, __b, __b, __b, __b, __b);
}
-/// \brief Constructs a 64-bit integer vector, initialized in reverse order with
+/// Constructs a 64-bit integer vector, initialized in reverse order with
/// the specified 32-bit integer values.
///
/// \headerfile <x86intrin.h>
@@ -1450,7 +1452,7 @@ _mm_setr_pi32(int __i0, int __i1)
return _mm_set_pi32(__i1, __i0);
}
-/// \brief Constructs a 64-bit integer vector, initialized in reverse order with
+/// Constructs a 64-bit integer vector, initialized in reverse order with
/// the specified 16-bit integer values.
///
/// \headerfile <x86intrin.h>
@@ -1473,7 +1475,7 @@ _mm_setr_pi16(short __w0, short __w1, short __w2, short __w3)
return _mm_set_pi16(__w3, __w2, __w1, __w0);
}
-/// \brief Constructs a 64-bit integer vector, initialized in reverse order with
+/// Constructs a 64-bit integer vector, initialized in reverse order with
/// the specified 8-bit integer values.
///
/// \headerfile <x86intrin.h>
diff --git a/lib/Headers/module.modulemap b/lib/Headers/module.modulemap
index 95d26cefa6f7..1d1af57fd030 100644
--- a/lib/Headers/module.modulemap
+++ b/lib/Headers/module.modulemap
@@ -38,6 +38,7 @@ module _Builtin_intrinsics [system] [extern_c] {
explicit module neon {
requires neon
header "arm_neon.h"
+ header "arm_fp16.h"
export *
}
}
@@ -62,6 +63,17 @@ module _Builtin_intrinsics [system] [extern_c] {
textual header "fma4intrin.h"
textual header "mwaitxintrin.h"
textual header "clzerointrin.h"
+ textual header "wbnoinvdintrin.h"
+ textual header "cldemoteintrin.h"
+ textual header "waitpkgintrin.h"
+ textual header "movdirintrin.h"
+ textual header "pconfigintrin.h"
+ textual header "sgxintrin.h"
+ textual header "ptwriteintrin.h"
+ textual header "invpcidintrin.h"
+
+ textual header "__wmmintrin_aes.h"
+ textual header "__wmmintrin_pclmul.h"
explicit module mm_malloc {
requires !freestanding
@@ -128,14 +140,6 @@ module _Builtin_intrinsics [system] [extern_c] {
export aes
export pclmul
}
-
- explicit module aes {
- header "__wmmintrin_aes.h"
- }
-
- explicit module pclmul {
- header "__wmmintrin_pclmul.h"
- }
}
explicit module systemz {
diff --git a/lib/Headers/movdirintrin.h b/lib/Headers/movdirintrin.h
new file mode 100644
index 000000000000..ec20c53709bc
--- /dev/null
+++ b/lib/Headers/movdirintrin.h
@@ -0,0 +1,63 @@
+/*===------------------------- movdirintrin.h ------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H
+#error "Never use <movdirintrin.h> directly; include <x86intrin.h> instead."
+#endif
+
+#ifndef _MOVDIRINTRIN_H
+#define _MOVDIRINTRIN_H
+
+/* Move doubleword as direct store */
+static __inline__ void
+__attribute__((__always_inline__, __nodebug__, __target__("movdiri")))
+_directstoreu_u32 (void *__dst, unsigned int __value)
+{
+ __builtin_ia32_directstore_u32((unsigned int *)__dst, (unsigned int)__value);
+}
+
+#ifdef __x86_64__
+
+/* Move quadword as direct store */
+static __inline__ void
+__attribute__((__always_inline__, __nodebug__, __target__("movdiri")))
+_directstoreu_u64 (void *__dst, unsigned long __value)
+{
+ __builtin_ia32_directstore_u64((unsigned long *)__dst, __value);
+}
+
+#endif /* __x86_64__ */
+
+/*
+ * movdir64b - Move 64 bytes as direct store.
+ * The destination must be 64 byte aligned, and the store is atomic.
+ * The source address has no alignment requirement, and the load from
+ * the source address is not atomic.
+ */
+static __inline__ void
+__attribute__((__always_inline__, __nodebug__, __target__("movdir64b")))
+_movdir64b (void *__dst __attribute__((align_value(64))), const void *__src)
+{
+ __builtin_ia32_movdir64b(__dst, __src);
+}
+
+#endif /* _MOVDIRINTRIN_H */
diff --git a/lib/Headers/mwaitxintrin.h b/lib/Headers/mwaitxintrin.h
index 635f2ac6cab5..2921eadfa540 100644
--- a/lib/Headers/mwaitxintrin.h
+++ b/lib/Headers/mwaitxintrin.h
@@ -25,8 +25,8 @@
#error "Never use <mwaitxintrin.h> directly; include <x86intrin.h> instead."
#endif
-#ifndef _MWAITXINTRIN_H
-#define _MWAITXINTRIN_H
+#ifndef __MWAITXINTRIN_H
+#define __MWAITXINTRIN_H
/* Define the default attributes for the functions in this file. */
#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("mwaitx")))
@@ -44,4 +44,4 @@ _mm_mwaitx(unsigned __extensions, unsigned __hints, unsigned __clock)
#undef __DEFAULT_FN_ATTRS
-#endif /* _MWAITXINTRIN_H */
+#endif /* __MWAITXINTRIN_H */
diff --git a/lib/Headers/nmmintrin.h b/lib/Headers/nmmintrin.h
index 57fec15963d1..348fb8c7c18f 100644
--- a/lib/Headers/nmmintrin.h
+++ b/lib/Headers/nmmintrin.h
@@ -21,10 +21,10 @@
*===-----------------------------------------------------------------------===
*/
-#ifndef _NMMINTRIN_H
-#define _NMMINTRIN_H
+#ifndef __NMMINTRIN_H
+#define __NMMINTRIN_H
/* To match expectations of gcc we put the sse4.2 definitions into smmintrin.h,
just include it now then. */
#include <smmintrin.h>
-#endif /* _NMMINTRIN_H */
+#endif /* __NMMINTRIN_H */
diff --git a/lib/Headers/opencl-c.h b/lib/Headers/opencl-c.h
index ce204b04c030..e648b0f2f370 100644
--- a/lib/Headers/opencl-c.h
+++ b/lib/Headers/opencl-c.h
@@ -12862,7 +12862,7 @@ void __ovld mem_fence(cl_mem_fence_flags flags);
* Read memory barrier that orders only
* loads.
* The flags argument specifies the memory
- * address space and can be set to to a
+ * address space and can be set to a
* combination of the following literal
* values:
* CLK_LOCAL_MEM_FENCE
@@ -12874,7 +12874,7 @@ void __ovld read_mem_fence(cl_mem_fence_flags flags);
* Write memory barrier that orders only
* stores.
* The flags argument specifies the memory
- * address space and can be set to to a
+ * address space and can be set to a
* combination of the following literal
* values:
* CLK_LOCAL_MEM_FENCE
@@ -15421,8 +15421,8 @@ int __ovld __cnfn get_image_channel_data_type(read_write image2d_array_msaa_dept
#define CLK_DEPTH_STENCIL 0x10BE
#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
#define CLK_sRGB 0x10BF
-#define CLK_sRGBA 0x10C1
#define CLK_sRGBx 0x10C0
+#define CLK_sRGBA 0x10C1
#define CLK_sBGRA 0x10C2
#define CLK_ABGR 0x10C3
#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
diff --git a/lib/Headers/pconfigintrin.h b/lib/Headers/pconfigintrin.h
new file mode 100644
index 000000000000..fee3cad38854
--- /dev/null
+++ b/lib/Headers/pconfigintrin.h
@@ -0,0 +1,50 @@
+/*===---- pconfigintrin.h - X86 platform configuration ---------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H
+#error "Never use <pconfigintrin.h> directly; include <x86intrin.h> instead."
+#endif
+
+#ifndef __PCONFIGINTRIN_H
+#define __PCONFIGINTRIN_H
+
+#define __PCONFIG_KEY_PROGRAM 0x00000001
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS \
+ __attribute__((__always_inline__, __nodebug__, __target__("pconfig")))
+
+static __inline unsigned int __DEFAULT_FN_ATTRS
+_pconfig_u32(unsigned int __leaf, __SIZE_TYPE__ __d[])
+{
+ unsigned int __result;
+ __asm__ ("pconfig"
+ : "=a" (__result), "=b" (__d[0]), "=c" (__d[1]), "=d" (__d[2])
+ : "a" (__leaf), "b" (__d[0]), "c" (__d[1]), "d" (__d[2])
+ : "cc");
+ return __result;
+}
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif
diff --git a/lib/Headers/pkuintrin.h b/lib/Headers/pkuintrin.h
index 9e5459450b62..6976924d829e 100644
--- a/lib/Headers/pkuintrin.h
+++ b/lib/Headers/pkuintrin.h
@@ -1,4 +1,4 @@
-/*===------------- pkuintrin.h - PKU intrinsics ------------------===
+/*===---- pkuintrin.h - PKU intrinsics -------------------------------------===
*
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
@@ -40,7 +40,7 @@ _rdpkru_u32(void)
static __inline__ void __DEFAULT_FN_ATTRS
_wrpkru(unsigned int __val)
{
- return __builtin_ia32_wrpkru(__val);
+ __builtin_ia32_wrpkru(__val);
}
#undef __DEFAULT_FN_ATTRS
diff --git a/lib/Headers/pmmintrin.h b/lib/Headers/pmmintrin.h
index 559ece2e3974..7e1a9eae59f6 100644
--- a/lib/Headers/pmmintrin.h
+++ b/lib/Headers/pmmintrin.h
@@ -28,9 +28,9 @@
/* Define the default attributes for the functions in this file. */
#define __DEFAULT_FN_ATTRS \
- __attribute__((__always_inline__, __nodebug__, __target__("sse3")))
+ __attribute__((__always_inline__, __nodebug__, __target__("sse3"), __min_vector_width__(128)))
-/// \brief Loads data from an unaligned memory location to elements in a 128-bit
+/// Loads data from an unaligned memory location to elements in a 128-bit
/// vector.
///
/// If the address of the data is not 16-byte aligned, the instruction may
@@ -50,7 +50,7 @@ _mm_lddqu_si128(__m128i const *__p)
return (__m128i)__builtin_ia32_lddqu((char const *)__p);
}
-/// \brief Adds the even-indexed values and subtracts the odd-indexed values of
+/// Adds the even-indexed values and subtracts the odd-indexed values of
/// two 128-bit vectors of [4 x float].
///
/// \headerfile <x86intrin.h>
@@ -69,7 +69,7 @@ _mm_addsub_ps(__m128 __a, __m128 __b)
return __builtin_ia32_addsubps((__v4sf)__a, (__v4sf)__b);
}
-/// \brief Horizontally adds the adjacent pairs of values contained in two
+/// Horizontally adds the adjacent pairs of values contained in two
/// 128-bit vectors of [4 x float].
///
/// \headerfile <x86intrin.h>
@@ -92,7 +92,7 @@ _mm_hadd_ps(__m128 __a, __m128 __b)
return __builtin_ia32_haddps((__v4sf)__a, (__v4sf)__b);
}
-/// \brief Horizontally subtracts the adjacent pairs of values contained in two
+/// Horizontally subtracts the adjacent pairs of values contained in two
/// 128-bit vectors of [4 x float].
///
/// \headerfile <x86intrin.h>
@@ -115,8 +115,8 @@ _mm_hsub_ps(__m128 __a, __m128 __b)
return __builtin_ia32_hsubps((__v4sf)__a, (__v4sf)__b);
}
-/// \brief Moves and duplicates high-order (odd-indexed) values from a 128-bit
-/// vector of [4 x float] to float values stored in a 128-bit vector of
+/// Moves and duplicates odd-indexed values from a 128-bit vector
+/// of [4 x float] to float values stored in a 128-bit vector of
/// [4 x float].
///
/// \headerfile <x86intrin.h>
@@ -137,7 +137,7 @@ _mm_movehdup_ps(__m128 __a)
return __builtin_shufflevector((__v4sf)__a, (__v4sf)__a, 1, 1, 3, 3);
}
-/// \brief Duplicates low-order (even-indexed) values from a 128-bit vector of
+/// Duplicates even-indexed values from a 128-bit vector of
/// [4 x float] to float values stored in a 128-bit vector of [4 x float].
///
/// \headerfile <x86intrin.h>
@@ -158,7 +158,7 @@ _mm_moveldup_ps(__m128 __a)
return __builtin_shufflevector((__v4sf)__a, (__v4sf)__a, 0, 0, 2, 2);
}
-/// \brief Adds the even-indexed values and subtracts the odd-indexed values of
+/// Adds the even-indexed values and subtracts the odd-indexed values of
/// two 128-bit vectors of [2 x double].
///
/// \headerfile <x86intrin.h>
@@ -177,7 +177,7 @@ _mm_addsub_pd(__m128d __a, __m128d __b)
return __builtin_ia32_addsubpd((__v2df)__a, (__v2df)__b);
}
-/// \brief Horizontally adds the pairs of values contained in two 128-bit
+/// Horizontally adds the pairs of values contained in two 128-bit
/// vectors of [2 x double].
///
/// \headerfile <x86intrin.h>
@@ -200,7 +200,7 @@ _mm_hadd_pd(__m128d __a, __m128d __b)
return __builtin_ia32_haddpd((__v2df)__a, (__v2df)__b);
}
-/// \brief Horizontally subtracts the pairs of values contained in two 128-bit
+/// Horizontally subtracts the pairs of values contained in two 128-bit
/// vectors of [2 x double].
///
/// \headerfile <x86intrin.h>
@@ -223,13 +223,13 @@ _mm_hsub_pd(__m128d __a, __m128d __b)
return __builtin_ia32_hsubpd((__v2df)__a, (__v2df)__b);
}
-/// \brief Moves and duplicates one double-precision value to double-precision
+/// Moves and duplicates one double-precision value to double-precision
/// values stored in a 128-bit vector of [2 x double].
///
/// \headerfile <x86intrin.h>
///
/// \code
-/// __m128d _mm_loaddup_pd(double const * dp);
+/// __m128d _mm_loaddup_pd(double const *dp);
/// \endcode
///
/// This intrinsic corresponds to the <c> VMOVDDUP </c> instruction.
@@ -240,7 +240,7 @@ _mm_hsub_pd(__m128d __a, __m128d __b)
/// duplicated values.
#define _mm_loaddup_pd(dp) _mm_load1_pd(dp)
-/// \brief Moves and duplicates the double-precision value in the lower bits of
+/// Moves and duplicates the double-precision value in the lower bits of
/// a 128-bit vector of [2 x double] to double-precision values stored in a
/// 128-bit vector of [2 x double].
///
@@ -259,7 +259,7 @@ _mm_movedup_pd(__m128d __a)
return __builtin_shufflevector((__v2df)__a, (__v2df)__a, 0, 0);
}
-/// \brief Establishes a linear address memory range to be monitored and puts
+/// Establishes a linear address memory range to be monitored and puts
/// the processor in the monitor event pending state. Data stored in the
/// monitored address range causes the processor to exit the pending state.
///
@@ -280,7 +280,7 @@ _mm_monitor(void const *__p, unsigned __extensions, unsigned __hints)
__builtin_ia32_monitor((void *)__p, __extensions, __hints);
}
-/// \brief Used with the MONITOR instruction to wait while the processor is in
+/// Used with the MONITOR instruction to wait while the processor is in
/// the monitor event pending state. Data stored in the monitored address
/// range causes the processor to exit the pending state.
///
diff --git a/lib/Headers/popcntintrin.h b/lib/Headers/popcntintrin.h
index 0b4793e58bcb..75ceab9e150b 100644
--- a/lib/Headers/popcntintrin.h
+++ b/lib/Headers/popcntintrin.h
@@ -21,13 +21,13 @@
*===-----------------------------------------------------------------------===
*/
-#ifndef _POPCNTINTRIN_H
-#define _POPCNTINTRIN_H
+#ifndef __POPCNTINTRIN_H
+#define __POPCNTINTRIN_H
/* Define the default attributes for the functions in this file. */
#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("popcnt")))
-/// \brief Counts the number of bits in the source operand having a value of 1.
+/// Counts the number of bits in the source operand having a value of 1.
///
/// \headerfile <x86intrin.h>
///
@@ -43,7 +43,7 @@ _mm_popcnt_u32(unsigned int __A)
return __builtin_popcount(__A);
}
-/// \brief Counts the number of bits in the source operand having a value of 1.
+/// Counts the number of bits in the source operand having a value of 1.
///
/// \headerfile <x86intrin.h>
///
@@ -60,7 +60,7 @@ _popcnt32(int __A)
}
#ifdef __x86_64__
-/// \brief Counts the number of bits in the source operand having a value of 1.
+/// Counts the number of bits in the source operand having a value of 1.
///
/// \headerfile <x86intrin.h>
///
@@ -76,7 +76,7 @@ _mm_popcnt_u64(unsigned long long __A)
return __builtin_popcountll(__A);
}
-/// \brief Counts the number of bits in the source operand having a value of 1.
+/// Counts the number of bits in the source operand having a value of 1.
///
/// \headerfile <x86intrin.h>
///
@@ -95,4 +95,4 @@ _popcnt64(long long __A)
#undef __DEFAULT_FN_ATTRS
-#endif /* _POPCNTINTRIN_H */
+#endif /* __POPCNTINTRIN_H */
diff --git a/lib/Headers/prfchwintrin.h b/lib/Headers/prfchwintrin.h
index b52f31da2706..70851396f48e 100644
--- a/lib/Headers/prfchwintrin.h
+++ b/lib/Headers/prfchwintrin.h
@@ -28,8 +28,7 @@
#ifndef __PRFCHWINTRIN_H
#define __PRFCHWINTRIN_H
-#if defined(__PRFCHW__) || defined(__3dNOW__)
-/// \brief Loads a memory sequence containing the specified memory address into
+/// Loads a memory sequence containing the specified memory address into
/// all data cache levels. The cache-coherency state is set to exclusive.
/// Data can be read from and written to the cache line without additional
/// delay.
@@ -46,7 +45,7 @@ _m_prefetch(void *__P)
__builtin_prefetch (__P, 0, 3 /* _MM_HINT_T0 */);
}
-/// \brief Loads a memory sequence containing the specified memory address into
+/// Loads a memory sequence containing the specified memory address into
/// the L1 data cache and sets the cache-coherency to modified. This
/// provides a hint to the processor that the cache line will be modified.
/// It is intended for use when the cache line will be written to shortly
@@ -66,6 +65,5 @@ _m_prefetchw(void *__P)
{
__builtin_prefetch (__P, 1, 3 /* _MM_HINT_T0 */);
}
-#endif
#endif /* __PRFCHWINTRIN_H */
diff --git a/lib/Headers/ptwriteintrin.h b/lib/Headers/ptwriteintrin.h
new file mode 100644
index 000000000000..1bb1df0a2edf
--- /dev/null
+++ b/lib/Headers/ptwriteintrin.h
@@ -0,0 +1,51 @@
+/*===------------ ptwriteintrin.h - PTWRITE intrinsic --------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H
+#error "Never use <ptwriteintrin.h> directly; include <x86intrin.h> instead."
+#endif
+
+#ifndef __PTWRITEINTRIN_H
+#define __PTWRITEINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS \
+ __attribute__((__always_inline__, __nodebug__, __target__("ptwrite")))
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_ptwrite32(unsigned int __value) {
+ __builtin_ia32_ptwrite32(__value);
+}
+
+#ifdef __x86_64__
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_ptwrite64(unsigned long long __value) {
+ __builtin_ia32_ptwrite64(__value);
+}
+
+#endif /* __x86_64__ */
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* __PTWRITEINTRIN_H */
diff --git a/lib/Headers/rdseedintrin.h b/lib/Headers/rdseedintrin.h
index 421f4ea48702..419466932cf5 100644
--- a/lib/Headers/rdseedintrin.h
+++ b/lib/Headers/rdseedintrin.h
@@ -21,7 +21,7 @@
*===-----------------------------------------------------------------------===
*/
-#ifndef __X86INTRIN_H
+#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H
#error "Never use <rdseedintrin.h> directly; include <x86intrin.h> instead."
#endif
diff --git a/lib/Headers/sgxintrin.h b/lib/Headers/sgxintrin.h
new file mode 100644
index 000000000000..20aee766103c
--- /dev/null
+++ b/lib/Headers/sgxintrin.h
@@ -0,0 +1,70 @@
+/*===---- sgxintrin.h - X86 SGX intrinsics configuration -------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H
+#error "Never use <sgxintrin.h> directly; include <x86intrin.h> instead."
+#endif
+
+#ifndef __SGXINTRIN_H
+#define __SGXINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS \
+ __attribute__((__always_inline__, __nodebug__, __target__("sgx")))
+
+static __inline unsigned int __DEFAULT_FN_ATTRS
+_enclu_u32(unsigned int __leaf, __SIZE_TYPE__ __d[])
+{
+ unsigned int __result;
+ __asm__ ("enclu"
+ : "=a" (__result), "=b" (__d[0]), "=c" (__d[1]), "=d" (__d[2])
+ : "a" (__leaf), "b" (__d[0]), "c" (__d[1]), "d" (__d[2])
+ : "cc");
+ return __result;
+}
+
+static __inline unsigned int __DEFAULT_FN_ATTRS
+_encls_u32(unsigned int __leaf, __SIZE_TYPE__ __d[])
+{
+ unsigned int __result;
+ __asm__ ("encls"
+ : "=a" (__result), "=b" (__d[0]), "=c" (__d[1]), "=d" (__d[2])
+ : "a" (__leaf), "b" (__d[0]), "c" (__d[1]), "d" (__d[2])
+ : "cc");
+ return __result;
+}
+
+static __inline unsigned int __DEFAULT_FN_ATTRS
+_enclv_u32(unsigned int __leaf, __SIZE_TYPE__ __d[])
+{
+ unsigned int __result;
+ __asm__ ("enclv"
+ : "=a" (__result), "=b" (__d[0]), "=c" (__d[1]), "=d" (__d[2])
+ : "a" (__leaf), "b" (__d[0]), "c" (__d[1]), "d" (__d[2])
+ : "cc");
+ return __result;
+}
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif
diff --git a/lib/Headers/shaintrin.h b/lib/Headers/shaintrin.h
index 9b5d21800819..3df4718ced4d 100644
--- a/lib/Headers/shaintrin.h
+++ b/lib/Headers/shaintrin.h
@@ -29,10 +29,10 @@
#define __SHAINTRIN_H
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sha")))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sha"), __min_vector_width__(128)))
-#define _mm_sha1rnds4_epu32(V1, V2, M) __extension__ ({ \
- __builtin_ia32_sha1rnds4((__v4si)(__m128i)(V1), (__v4si)(__m128i)(V2), (M)); })
+#define _mm_sha1rnds4_epu32(V1, V2, M) \
+ __builtin_ia32_sha1rnds4((__v4si)(__m128i)(V1), (__v4si)(__m128i)(V2), (M))
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_sha1nexte_epu32(__m128i __X, __m128i __Y)
diff --git a/lib/Headers/smmintrin.h b/lib/Headers/smmintrin.h
index c2fa5a452bce..4806b3e4e150 100644
--- a/lib/Headers/smmintrin.h
+++ b/lib/Headers/smmintrin.h
@@ -21,13 +21,13 @@
*===-----------------------------------------------------------------------===
*/
-#ifndef _SMMINTRIN_H
-#define _SMMINTRIN_H
+#ifndef __SMMINTRIN_H
+#define __SMMINTRIN_H
#include <tmmintrin.h>
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sse4.1")))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sse4.1"), __min_vector_width__(128)))
/* SSE4 Rounding macros. */
#define _MM_FROUND_TO_NEAREST_INT 0x00
@@ -46,7 +46,7 @@
#define _MM_FROUND_RINT (_MM_FROUND_RAISE_EXC | _MM_FROUND_CUR_DIRECTION)
#define _MM_FROUND_NEARBYINT (_MM_FROUND_NO_EXC | _MM_FROUND_CUR_DIRECTION)
-/// \brief Rounds up each element of the 128-bit vector of [4 x float] to an
+/// Rounds up each element of the 128-bit vector of [4 x float] to an
/// integer and returns the rounded values in a 128-bit vector of
/// [4 x float].
///
@@ -63,7 +63,7 @@
/// \returns A 128-bit vector of [4 x float] containing the rounded values.
#define _mm_ceil_ps(X) _mm_round_ps((X), _MM_FROUND_CEIL)
-/// \brief Rounds up each element of the 128-bit vector of [2 x double] to an
+/// Rounds up each element of the 128-bit vector of [2 x double] to an
/// integer and returns the rounded values in a 128-bit vector of
/// [2 x double].
///
@@ -80,7 +80,7 @@
/// \returns A 128-bit vector of [2 x double] containing the rounded values.
#define _mm_ceil_pd(X) _mm_round_pd((X), _MM_FROUND_CEIL)
-/// \brief Copies three upper elements of the first 128-bit vector operand to
+/// Copies three upper elements of the first 128-bit vector operand to
/// the corresponding three upper elements of the 128-bit result vector of
/// [4 x float]. Rounds up the lowest element of the second 128-bit vector
/// operand to an integer and copies it to the lowest element of the 128-bit
@@ -105,7 +105,7 @@
/// values.
#define _mm_ceil_ss(X, Y) _mm_round_ss((X), (Y), _MM_FROUND_CEIL)
-/// \brief Copies the upper element of the first 128-bit vector operand to the
+/// Copies the upper element of the first 128-bit vector operand to the
/// corresponding upper element of the 128-bit result vector of [2 x double].
/// Rounds up the lower element of the second 128-bit vector operand to an
/// integer and copies it to the lower element of the 128-bit result vector
@@ -130,7 +130,7 @@
/// values.
#define _mm_ceil_sd(X, Y) _mm_round_sd((X), (Y), _MM_FROUND_CEIL)
-/// \brief Rounds down each element of the 128-bit vector of [4 x float] to an
+/// Rounds down each element of the 128-bit vector of [4 x float] to an
/// an integer and returns the rounded values in a 128-bit vector of
/// [4 x float].
///
@@ -147,7 +147,7 @@
/// \returns A 128-bit vector of [4 x float] containing the rounded values.
#define _mm_floor_ps(X) _mm_round_ps((X), _MM_FROUND_FLOOR)
-/// \brief Rounds down each element of the 128-bit vector of [2 x double] to an
+/// Rounds down each element of the 128-bit vector of [2 x double] to an
/// integer and returns the rounded values in a 128-bit vector of
/// [2 x double].
///
@@ -164,7 +164,7 @@
/// \returns A 128-bit vector of [2 x double] containing the rounded values.
#define _mm_floor_pd(X) _mm_round_pd((X), _MM_FROUND_FLOOR)
-/// \brief Copies three upper elements of the first 128-bit vector operand to
+/// Copies three upper elements of the first 128-bit vector operand to
/// the corresponding three upper elements of the 128-bit result vector of
/// [4 x float]. Rounds down the lowest element of the second 128-bit vector
/// operand to an integer and copies it to the lowest element of the 128-bit
@@ -189,7 +189,7 @@
/// values.
#define _mm_floor_ss(X, Y) _mm_round_ss((X), (Y), _MM_FROUND_FLOOR)
-/// \brief Copies the upper element of the first 128-bit vector operand to the
+/// Copies the upper element of the first 128-bit vector operand to the
/// corresponding upper element of the 128-bit result vector of [2 x double].
/// Rounds down the lower element of the second 128-bit vector operand to an
/// integer and copies it to the lower element of the 128-bit result vector
@@ -214,7 +214,7 @@
/// values.
#define _mm_floor_sd(X, Y) _mm_round_sd((X), (Y), _MM_FROUND_FLOOR)
-/// \brief Rounds each element of the 128-bit vector of [4 x float] to an
+/// Rounds each element of the 128-bit vector of [4 x float] to an
/// integer value according to the rounding control specified by the second
/// argument and returns the rounded values in a 128-bit vector of
/// [4 x float].
@@ -244,10 +244,10 @@
/// 10: Upward (toward positive infinity) \n
/// 11: Truncated
/// \returns A 128-bit vector of [4 x float] containing the rounded values.
-#define _mm_round_ps(X, M) __extension__ ({ \
- (__m128)__builtin_ia32_roundps((__v4sf)(__m128)(X), (M)); })
+#define _mm_round_ps(X, M) \
+ (__m128)__builtin_ia32_roundps((__v4sf)(__m128)(X), (M))
-/// \brief Copies three upper elements of the first 128-bit vector operand to
+/// Copies three upper elements of the first 128-bit vector operand to
/// the corresponding three upper elements of the 128-bit result vector of
/// [4 x float]. Rounds the lowest element of the second 128-bit vector
/// operand to an integer value according to the rounding control specified
@@ -285,11 +285,11 @@
/// 11: Truncated
/// \returns A 128-bit vector of [4 x float] containing the copied and rounded
/// values.
-#define _mm_round_ss(X, Y, M) __extension__ ({ \
+#define _mm_round_ss(X, Y, M) \
(__m128)__builtin_ia32_roundss((__v4sf)(__m128)(X), \
- (__v4sf)(__m128)(Y), (M)); })
+ (__v4sf)(__m128)(Y), (M))
-/// \brief Rounds each element of the 128-bit vector of [2 x double] to an
+/// Rounds each element of the 128-bit vector of [2 x double] to an
/// integer value according to the rounding control specified by the second
/// argument and returns the rounded values in a 128-bit vector of
/// [2 x double].
@@ -319,10 +319,10 @@
/// 10: Upward (toward positive infinity) \n
/// 11: Truncated
/// \returns A 128-bit vector of [2 x double] containing the rounded values.
-#define _mm_round_pd(X, M) __extension__ ({ \
- (__m128d)__builtin_ia32_roundpd((__v2df)(__m128d)(X), (M)); })
+#define _mm_round_pd(X, M) \
+ (__m128d)__builtin_ia32_roundpd((__v2df)(__m128d)(X), (M))
-/// \brief Copies the upper element of the first 128-bit vector operand to the
+/// Copies the upper element of the first 128-bit vector operand to the
/// corresponding upper element of the 128-bit result vector of [2 x double].
/// Rounds the lower element of the second 128-bit vector operand to an
/// integer value according to the rounding control specified by the third
@@ -360,12 +360,12 @@
/// 11: Truncated
/// \returns A 128-bit vector of [2 x double] containing the copied and rounded
/// values.
-#define _mm_round_sd(X, Y, M) __extension__ ({ \
+#define _mm_round_sd(X, Y, M) \
(__m128d)__builtin_ia32_roundsd((__v2df)(__m128d)(X), \
- (__v2df)(__m128d)(Y), (M)); })
+ (__v2df)(__m128d)(Y), (M))
/* SSE4 Packed Blending Intrinsics. */
-/// \brief Returns a 128-bit vector of [2 x double] where the values are
+/// Returns a 128-bit vector of [2 x double] where the values are
/// selected from either the first or second operand as specified by the
/// third operand, the control mask.
///
@@ -389,13 +389,11 @@
/// When a mask bit is 1, the corresponding 64-bit element in operand \a V2
/// is copied to the same position in the result.
/// \returns A 128-bit vector of [2 x double] containing the copied values.
-#define _mm_blend_pd(V1, V2, M) __extension__ ({ \
- (__m128d)__builtin_shufflevector((__v2df)(__m128d)(V1), \
- (__v2df)(__m128d)(V2), \
- (((M) & 0x01) ? 2 : 0), \
- (((M) & 0x02) ? 3 : 1)); })
+#define _mm_blend_pd(V1, V2, M) \
+ (__m128d) __builtin_ia32_blendpd ((__v2df)(__m128d)(V1), \
+ (__v2df)(__m128d)(V2), (int)(M))
-/// \brief Returns a 128-bit vector of [4 x float] where the values are selected
+/// Returns a 128-bit vector of [4 x float] where the values are selected
/// from either the first or second operand as specified by the third
/// operand, the control mask.
///
@@ -419,14 +417,11 @@
/// When a mask bit is 1, the corresponding 32-bit element in operand \a V2
/// is copied to the same position in the result.
/// \returns A 128-bit vector of [4 x float] containing the copied values.
-#define _mm_blend_ps(V1, V2, M) __extension__ ({ \
- (__m128)__builtin_shufflevector((__v4sf)(__m128)(V1), (__v4sf)(__m128)(V2), \
- (((M) & 0x01) ? 4 : 0), \
- (((M) & 0x02) ? 5 : 1), \
- (((M) & 0x04) ? 6 : 2), \
- (((M) & 0x08) ? 7 : 3)); })
-
-/// \brief Returns a 128-bit vector of [2 x double] where the values are
+#define _mm_blend_ps(V1, V2, M) \
+ (__m128) __builtin_ia32_blendps ((__v4sf)(__m128)(V1), \
+ (__v4sf)(__m128)(V2), (int)(M))
+
+/// Returns a 128-bit vector of [2 x double] where the values are
/// selected from either the first or second operand as specified by the
/// third operand, the control mask.
///
@@ -453,7 +448,7 @@ _mm_blendv_pd (__m128d __V1, __m128d __V2, __m128d __M)
(__v2df)__M);
}
-/// \brief Returns a 128-bit vector of [4 x float] where the values are
+/// Returns a 128-bit vector of [4 x float] where the values are
/// selected from either the first or second operand as specified by the
/// third operand, the control mask.
///
@@ -480,7 +475,7 @@ _mm_blendv_ps (__m128 __V1, __m128 __V2, __m128 __M)
(__v4sf)__M);
}
-/// \brief Returns a 128-bit vector of [16 x i8] where the values are selected
+/// Returns a 128-bit vector of [16 x i8] where the values are selected
/// from either of the first or second operand as specified by the third
/// operand, the control mask.
///
@@ -493,7 +488,7 @@ _mm_blendv_ps (__m128 __V1, __m128 __V2, __m128 __M)
/// \param __V2
/// A 128-bit vector of [16 x i8].
/// \param __M
-/// A 128-bit vector operand, with mask bits 127, 119, 111 ... 7 specifying
+/// A 128-bit vector operand, with mask bits 127, 119, 111...7 specifying
/// how the values are to be copied. The position of the mask bit corresponds
/// to the most significant bit of a copied value. When a mask bit is 0, the
/// corresponding 8-bit element in operand \a __V1 is copied to the same
@@ -507,7 +502,7 @@ _mm_blendv_epi8 (__m128i __V1, __m128i __V2, __m128i __M)
(__v16qi)__M);
}
-/// \brief Returns a 128-bit vector of [8 x i16] where the values are selected
+/// Returns a 128-bit vector of [8 x i16] where the values are selected
/// from either of the first or second operand as specified by the third
/// operand, the control mask.
///
@@ -531,20 +526,12 @@ _mm_blendv_epi8 (__m128i __V1, __m128i __V2, __m128i __M)
/// When a mask bit is 1, the corresponding 16-bit element in operand \a V2
/// is copied to the same position in the result.
/// \returns A 128-bit vector of [8 x i16] containing the copied values.
-#define _mm_blend_epi16(V1, V2, M) __extension__ ({ \
- (__m128i)__builtin_shufflevector((__v8hi)(__m128i)(V1), \
- (__v8hi)(__m128i)(V2), \
- (((M) & 0x01) ? 8 : 0), \
- (((M) & 0x02) ? 9 : 1), \
- (((M) & 0x04) ? 10 : 2), \
- (((M) & 0x08) ? 11 : 3), \
- (((M) & 0x10) ? 12 : 4), \
- (((M) & 0x20) ? 13 : 5), \
- (((M) & 0x40) ? 14 : 6), \
- (((M) & 0x80) ? 15 : 7)); })
+#define _mm_blend_epi16(V1, V2, M) \
+ (__m128i) __builtin_ia32_pblendw128 ((__v8hi)(__m128i)(V1), \
+ (__v8hi)(__m128i)(V2), (int)(M))
/* SSE4 Dword Multiply Instructions. */
-/// \brief Multiples corresponding elements of two 128-bit vectors of [4 x i32]
+/// Multiples corresponding elements of two 128-bit vectors of [4 x i32]
/// and returns the lower 32 bits of the each product in a 128-bit vector of
/// [4 x i32].
///
@@ -563,7 +550,7 @@ _mm_mullo_epi32 (__m128i __V1, __m128i __V2)
return (__m128i) ((__v4su)__V1 * (__v4su)__V2);
}
-/// \brief Multiplies corresponding even-indexed elements of two 128-bit
+/// Multiplies corresponding even-indexed elements of two 128-bit
/// vectors of [4 x i32] and returns a 128-bit vector of [2 x i64]
/// containing the products.
///
@@ -584,7 +571,7 @@ _mm_mul_epi32 (__m128i __V1, __m128i __V2)
}
/* SSE4 Floating Point Dot Product Instructions. */
-/// \brief Computes the dot product of the two 128-bit vectors of [4 x float]
+/// Computes the dot product of the two 128-bit vectors of [4 x float]
/// and returns it in the elements of the 128-bit result vector of
/// [4 x float].
///
@@ -616,11 +603,11 @@ _mm_mul_epi32 (__m128i __V1, __m128i __V2)
/// each [4 x float] subvector. If a bit is set, the dot product is returned
/// in the corresponding element; otherwise that element is set to zero.
/// \returns A 128-bit vector of [4 x float] containing the dot product.
-#define _mm_dp_ps(X, Y, M) __extension__ ({ \
+#define _mm_dp_ps(X, Y, M) \
(__m128) __builtin_ia32_dpps((__v4sf)(__m128)(X), \
- (__v4sf)(__m128)(Y), (M)); })
+ (__v4sf)(__m128)(Y), (M))
-/// \brief Computes the dot product of the two 128-bit vectors of [2 x double]
+/// Computes the dot product of the two 128-bit vectors of [2 x double]
/// and returns it in the elements of the 128-bit result vector of
/// [2 x double].
///
@@ -648,15 +635,15 @@ _mm_mul_epi32 (__m128i __V1, __m128i __V2)
/// input vectors are used as an input for dot product; otherwise that input
/// is treated as zero. Bits [1:0] determine which elements of the result
/// will receive a copy of the final dot product, with bit [0] corresponding
-/// to the lowest element and bit [3] corresponding to the highest element of
+/// to the lowest element and bit [1] corresponding to the highest element of
/// each [2 x double] vector. If a bit is set, the dot product is returned in
/// the corresponding element; otherwise that element is set to zero.
-#define _mm_dp_pd(X, Y, M) __extension__ ({\
+#define _mm_dp_pd(X, Y, M) \
(__m128d) __builtin_ia32_dppd((__v2df)(__m128d)(X), \
- (__v2df)(__m128d)(Y), (M)); })
+ (__v2df)(__m128d)(Y), (M))
/* SSE4 Streaming Load Hint Instruction. */
-/// \brief Loads integer values from a 128-bit aligned memory location to a
+/// Loads integer values from a 128-bit aligned memory location to a
/// 128-bit integer vector.
///
/// \headerfile <x86intrin.h>
@@ -675,7 +662,7 @@ _mm_stream_load_si128 (__m128i const *__V)
}
/* SSE4 Packed Integer Min/Max Instructions. */
-/// \brief Compares the corresponding elements of two 128-bit vectors of
+/// Compares the corresponding elements of two 128-bit vectors of
/// [16 x i8] and returns a 128-bit vector of [16 x i8] containing the lesser
/// of the two values.
///
@@ -694,7 +681,7 @@ _mm_min_epi8 (__m128i __V1, __m128i __V2)
return (__m128i) __builtin_ia32_pminsb128 ((__v16qi) __V1, (__v16qi) __V2);
}
-/// \brief Compares the corresponding elements of two 128-bit vectors of
+/// Compares the corresponding elements of two 128-bit vectors of
/// [16 x i8] and returns a 128-bit vector of [16 x i8] containing the
/// greater value of the two.
///
@@ -713,7 +700,7 @@ _mm_max_epi8 (__m128i __V1, __m128i __V2)
return (__m128i) __builtin_ia32_pmaxsb128 ((__v16qi) __V1, (__v16qi) __V2);
}
-/// \brief Compares the corresponding elements of two 128-bit vectors of
+/// Compares the corresponding elements of two 128-bit vectors of
/// [8 x u16] and returns a 128-bit vector of [8 x u16] containing the lesser
/// value of the two.
///
@@ -732,7 +719,7 @@ _mm_min_epu16 (__m128i __V1, __m128i __V2)
return (__m128i) __builtin_ia32_pminuw128 ((__v8hi) __V1, (__v8hi) __V2);
}
-/// \brief Compares the corresponding elements of two 128-bit vectors of
+/// Compares the corresponding elements of two 128-bit vectors of
/// [8 x u16] and returns a 128-bit vector of [8 x u16] containing the
/// greater value of the two.
///
@@ -751,7 +738,7 @@ _mm_max_epu16 (__m128i __V1, __m128i __V2)
return (__m128i) __builtin_ia32_pmaxuw128 ((__v8hi) __V1, (__v8hi) __V2);
}
-/// \brief Compares the corresponding elements of two 128-bit vectors of
+/// Compares the corresponding elements of two 128-bit vectors of
/// [4 x i32] and returns a 128-bit vector of [4 x i32] containing the lesser
/// value of the two.
///
@@ -770,7 +757,7 @@ _mm_min_epi32 (__m128i __V1, __m128i __V2)
return (__m128i) __builtin_ia32_pminsd128 ((__v4si) __V1, (__v4si) __V2);
}
-/// \brief Compares the corresponding elements of two 128-bit vectors of
+/// Compares the corresponding elements of two 128-bit vectors of
/// [4 x i32] and returns a 128-bit vector of [4 x i32] containing the
/// greater value of the two.
///
@@ -789,7 +776,7 @@ _mm_max_epi32 (__m128i __V1, __m128i __V2)
return (__m128i) __builtin_ia32_pmaxsd128 ((__v4si) __V1, (__v4si) __V2);
}
-/// \brief Compares the corresponding elements of two 128-bit vectors of
+/// Compares the corresponding elements of two 128-bit vectors of
/// [4 x u32] and returns a 128-bit vector of [4 x u32] containing the lesser
/// value of the two.
///
@@ -808,7 +795,7 @@ _mm_min_epu32 (__m128i __V1, __m128i __V2)
return (__m128i) __builtin_ia32_pminud128((__v4si) __V1, (__v4si) __V2);
}
-/// \brief Compares the corresponding elements of two 128-bit vectors of
+/// Compares the corresponding elements of two 128-bit vectors of
/// [4 x u32] and returns a 128-bit vector of [4 x u32] containing the
/// greater value of the two.
///
@@ -828,7 +815,7 @@ _mm_max_epu32 (__m128i __V1, __m128i __V2)
}
/* SSE4 Insertion and Extraction from XMM Register Instructions. */
-/// \brief Takes the first argument \a X and inserts an element from the second
+/// Takes the first argument \a X and inserts an element from the second
/// argument \a Y as selected by the third argument \a N. That result then
/// has elements zeroed out also as selected by the third argument \a N. The
/// resulting 128-bit vector of [4 x float] is then returned.
@@ -866,11 +853,11 @@ _mm_max_epu32 (__m128i __V1, __m128i __V2)
/// 11: Copies the selected bits from \a Y to result bits [127:96]. \n
/// Bits[3:0]: If any of these bits are set, the corresponding result
/// element is cleared.
-/// \returns A 128-bit vector of [4 x float] containing the copied single-
-/// precision floating point elements from the operands.
+/// \returns A 128-bit vector of [4 x float] containing the copied
+/// single-precision floating point elements from the operands.
#define _mm_insert_ps(X, Y, N) __builtin_ia32_insertps128((X), (Y), (N))
-/// \brief Extracts a 32-bit integer from a 128-bit vector of [4 x float] and
+/// Extracts a 32-bit integer from a 128-bit vector of [4 x float] and
/// returns it, using the immediate value parameter \a N as a selector.
///
/// \headerfile <x86intrin.h>
@@ -893,15 +880,14 @@ _mm_max_epu32 (__m128i __V1, __m128i __V2)
/// 11: Bits [127:96] of parameter \a X are returned.
/// \returns A 32-bit integer containing the extracted 32 bits of float data.
#define _mm_extract_ps(X, N) (__extension__ \
- ({ union { int __i; float __f; } __t; \
- __v4sf __a = (__v4sf)(__m128)(X); \
- __t.__f = __a[(N) & 3]; \
- __t.__i;}))
+ ({ union { int __i; float __f; } __t; \
+ __t.__f = __builtin_ia32_vec_ext_v4sf((__v4sf)(__m128)(X), (int)(N)); \
+ __t.__i;}))
/* Miscellaneous insert and extract macros. */
/* Extract a single-precision float from X at index N into D. */
-#define _MM_EXTRACT_FLOAT(D, X, N) (__extension__ ({ __v4sf __a = (__v4sf)(X); \
- (D) = __a[N]; }))
+#define _MM_EXTRACT_FLOAT(D, X, N) \
+ { (D) = __builtin_ia32_vec_ext_v4sf((__v4sf)(__m128)(X), (int)(N)); }
/* Or together 2 sets of indexes (X and Y) with the zeroing bits (Z) to create
an index suitable for _mm_insert_ps. */
@@ -912,7 +898,7 @@ _mm_max_epu32 (__m128i __V1, __m128i __V2)
_MM_MK_INSERTPS_NDX((N), 0, 0x0e))
/* Insert int into packed integer array at index. */
-/// \brief Constructs a 128-bit vector of [16 x i8] by first making a copy of
+/// Constructs a 128-bit vector of [16 x i8] by first making a copy of
/// the 128-bit integer vector parameter, and then inserting the lower 8 bits
/// of an integer parameter \a I into an offset specified by the immediate
/// value parameter \a N.
@@ -952,12 +938,11 @@ _mm_max_epu32 (__m128i __V1, __m128i __V2)
/// 1110: Bits [119:112] of the result are used for insertion. \n
/// 1111: Bits [127:120] of the result are used for insertion.
/// \returns A 128-bit integer vector containing the constructed values.
-#define _mm_insert_epi8(X, I, N) (__extension__ \
- ({ __v16qi __a = (__v16qi)(__m128i)(X); \
- __a[(N) & 15] = (I); \
- (__m128i)__a;}))
+#define _mm_insert_epi8(X, I, N) \
+ (__m128i)__builtin_ia32_vec_set_v16qi((__v16qi)(__m128i)(X), \
+ (int)(I), (int)(N))
-/// \brief Constructs a 128-bit vector of [4 x i32] by first making a copy of
+/// Constructs a 128-bit vector of [4 x i32] by first making a copy of
/// the 128-bit integer vector parameter, and then inserting the 32-bit
/// integer parameter \a I at the offset specified by the immediate value
/// parameter \a N.
@@ -985,13 +970,12 @@ _mm_max_epu32 (__m128i __V1, __m128i __V2)
/// 10: Bits [95:64] of the result are used for insertion. \n
/// 11: Bits [127:96] of the result are used for insertion.
/// \returns A 128-bit integer vector containing the constructed values.
-#define _mm_insert_epi32(X, I, N) (__extension__ \
- ({ __v4si __a = (__v4si)(__m128i)(X); \
- __a[(N) & 3] = (I); \
- (__m128i)__a;}))
+#define _mm_insert_epi32(X, I, N) \
+ (__m128i)__builtin_ia32_vec_set_v4si((__v4si)(__m128i)(X), \
+ (int)(I), (int)(N))
#ifdef __x86_64__
-/// \brief Constructs a 128-bit vector of [2 x i64] by first making a copy of
+/// Constructs a 128-bit vector of [2 x i64] by first making a copy of
/// the 128-bit integer vector parameter, and then inserting the 64-bit
/// integer parameter \a I, using the immediate value parameter \a N as an
/// insertion location selector.
@@ -1017,16 +1001,15 @@ _mm_max_epu32 (__m128i __V1, __m128i __V2)
/// 0: Bits [63:0] of the result are used for insertion. \n
/// 1: Bits [127:64] of the result are used for insertion. \n
/// \returns A 128-bit integer vector containing the constructed values.
-#define _mm_insert_epi64(X, I, N) (__extension__ \
- ({ __v2di __a = (__v2di)(__m128i)(X); \
- __a[(N) & 1] = (I); \
- (__m128i)__a;}))
+#define _mm_insert_epi64(X, I, N) \
+ (__m128i)__builtin_ia32_vec_set_v2di((__v2di)(__m128i)(X), \
+ (long long)(I), (int)(N))
#endif /* __x86_64__ */
/* Extract int from packed integer array at index. This returns the element
* as a zero extended value, so it is unsigned.
*/
-/// \brief Extracts an 8-bit element from the 128-bit integer vector of
+/// Extracts an 8-bit element from the 128-bit integer vector of
/// [16 x i8], using the immediate value parameter \a N as a selector.
///
/// \headerfile <x86intrin.h>
@@ -1061,11 +1044,11 @@ _mm_max_epu32 (__m128i __V1, __m128i __V2)
/// \returns An unsigned integer, whose lower 8 bits are selected from the
/// 128-bit integer vector parameter and the remaining bits are assigned
/// zeros.
-#define _mm_extract_epi8(X, N) (__extension__ \
- ({ __v16qi __a = (__v16qi)(__m128i)(X); \
- (int)(unsigned char) __a[(N) & 15];}))
+#define _mm_extract_epi8(X, N) \
+ (int)(unsigned char)__builtin_ia32_vec_ext_v16qi((__v16qi)(__m128i)(X), \
+ (int)(N))
-/// \brief Extracts a 32-bit element from the 128-bit integer vector of
+/// Extracts a 32-bit element from the 128-bit integer vector of
/// [4 x i32], using the immediate value parameter \a N as a selector.
///
/// \headerfile <x86intrin.h>
@@ -1087,12 +1070,11 @@ _mm_max_epu32 (__m128i __V1, __m128i __V2)
/// 11: Bits [127:96] of the parameter \a X are exracted.
/// \returns An integer, whose lower 32 bits are selected from the 128-bit
/// integer vector parameter and the remaining bits are assigned zeros.
-#define _mm_extract_epi32(X, N) (__extension__ \
- ({ __v4si __a = (__v4si)(__m128i)(X); \
- (int)__a[(N) & 3];}))
+#define _mm_extract_epi32(X, N) \
+ (int)__builtin_ia32_vec_ext_v4si((__v4si)(__m128i)(X), (int)(N))
#ifdef __x86_64__
-/// \brief Extracts a 64-bit element from the 128-bit integer vector of
+/// Extracts a 64-bit element from the 128-bit integer vector of
/// [2 x i64], using the immediate value parameter \a N as a selector.
///
/// \headerfile <x86intrin.h>
@@ -1111,13 +1093,12 @@ _mm_max_epu32 (__m128i __V1, __m128i __V2)
/// 0: Bits [63:0] are returned. \n
/// 1: Bits [127:64] are returned. \n
/// \returns A 64-bit integer.
-#define _mm_extract_epi64(X, N) (__extension__ \
- ({ __v2di __a = (__v2di)(__m128i)(X); \
- (long long)__a[(N) & 1];}))
+#define _mm_extract_epi64(X, N) \
+ (long long)__builtin_ia32_vec_ext_v2di((__v2di)(__m128i)(X), (int)(N))
#endif /* __x86_64 */
/* SSE4 128-bit Packed Integer Comparisons. */
-/// \brief Tests whether the specified bits in a 128-bit integer vector are all
+/// Tests whether the specified bits in a 128-bit integer vector are all
/// zeros.
///
/// \headerfile <x86intrin.h>
@@ -1135,7 +1116,7 @@ _mm_testz_si128(__m128i __M, __m128i __V)
return __builtin_ia32_ptestz128((__v2di)__M, (__v2di)__V);
}
-/// \brief Tests whether the specified bits in a 128-bit integer vector are all
+/// Tests whether the specified bits in a 128-bit integer vector are all
/// ones.
///
/// \headerfile <x86intrin.h>
@@ -1153,7 +1134,7 @@ _mm_testc_si128(__m128i __M, __m128i __V)
return __builtin_ia32_ptestc128((__v2di)__M, (__v2di)__V);
}
-/// \brief Tests whether the specified bits in a 128-bit integer vector are
+/// Tests whether the specified bits in a 128-bit integer vector are
/// neither all zeros nor all ones.
///
/// \headerfile <x86intrin.h>
@@ -1172,7 +1153,7 @@ _mm_testnzc_si128(__m128i __M, __m128i __V)
return __builtin_ia32_ptestnzc128((__v2di)__M, (__v2di)__V);
}
-/// \brief Tests whether the specified bits in a 128-bit integer vector are all
+/// Tests whether the specified bits in a 128-bit integer vector are all
/// ones.
///
/// \headerfile <x86intrin.h>
@@ -1189,7 +1170,7 @@ _mm_testnzc_si128(__m128i __M, __m128i __V)
/// otherwise.
#define _mm_test_all_ones(V) _mm_testc_si128((V), _mm_cmpeq_epi32((V), (V)))
-/// \brief Tests whether the specified bits in a 128-bit integer vector are
+/// Tests whether the specified bits in a 128-bit integer vector are
/// neither all zeros nor all ones.
///
/// \headerfile <x86intrin.h>
@@ -1208,7 +1189,7 @@ _mm_testnzc_si128(__m128i __M, __m128i __V)
/// FALSE otherwise.
#define _mm_test_mix_ones_zeros(M, V) _mm_testnzc_si128((M), (V))
-/// \brief Tests whether the specified bits in a 128-bit integer vector are all
+/// Tests whether the specified bits in a 128-bit integer vector are all
/// zeros.
///
/// \headerfile <x86intrin.h>
@@ -1227,7 +1208,7 @@ _mm_testnzc_si128(__m128i __M, __m128i __V)
#define _mm_test_all_zeros(M, V) _mm_testz_si128 ((M), (V))
/* SSE4 64-bit Packed Integer Comparisons. */
-/// \brief Compares each of the corresponding 64-bit values of the 128-bit
+/// Compares each of the corresponding 64-bit values of the 128-bit
/// integer vectors for equality.
///
/// \headerfile <x86intrin.h>
@@ -1246,7 +1227,7 @@ _mm_cmpeq_epi64(__m128i __V1, __m128i __V2)
}
/* SSE4 Packed Integer Sign-Extension. */
-/// \brief Sign-extends each of the lower eight 8-bit integer elements of a
+/// Sign-extends each of the lower eight 8-bit integer elements of a
/// 128-bit vector of [16 x i8] to 16-bit values and returns them in a
/// 128-bit vector of [8 x i16]. The upper eight elements of the input vector
/// are unused.
@@ -1267,7 +1248,7 @@ _mm_cvtepi8_epi16(__m128i __V)
return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3, 4, 5, 6, 7), __v8hi);
}
-/// \brief Sign-extends each of the lower four 8-bit integer elements of a
+/// Sign-extends each of the lower four 8-bit integer elements of a
/// 128-bit vector of [16 x i8] to 32-bit values and returns them in a
/// 128-bit vector of [4 x i32]. The upper twelve elements of the input
/// vector are unused.
@@ -1277,8 +1258,8 @@ _mm_cvtepi8_epi16(__m128i __V)
/// This intrinsic corresponds to the <c> VPMOVSXBD / PMOVSXBD </c> instruction.
///
/// \param __V
-/// A 128-bit vector of [16 x i8]. The lower four 8-bit elements are sign-
-/// extended to 32-bit values.
+/// A 128-bit vector of [16 x i8]. The lower four 8-bit elements are
+/// sign-extended to 32-bit values.
/// \returns A 128-bit vector of [4 x i32] containing the sign-extended values.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_cvtepi8_epi32(__m128i __V)
@@ -1288,7 +1269,7 @@ _mm_cvtepi8_epi32(__m128i __V)
return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3), __v4si);
}
-/// \brief Sign-extends each of the lower two 8-bit integer elements of a
+/// Sign-extends each of the lower two 8-bit integer elements of a
/// 128-bit integer vector of [16 x i8] to 64-bit values and returns them in
/// a 128-bit vector of [2 x i64]. The upper fourteen elements of the input
/// vector are unused.
@@ -1298,8 +1279,8 @@ _mm_cvtepi8_epi32(__m128i __V)
/// This intrinsic corresponds to the <c> VPMOVSXBQ / PMOVSXBQ </c> instruction.
///
/// \param __V
-/// A 128-bit vector of [16 x i8]. The lower two 8-bit elements are sign-
-/// extended to 64-bit values.
+/// A 128-bit vector of [16 x i8]. The lower two 8-bit elements are
+/// sign-extended to 64-bit values.
/// \returns A 128-bit vector of [2 x i64] containing the sign-extended values.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_cvtepi8_epi64(__m128i __V)
@@ -1309,7 +1290,7 @@ _mm_cvtepi8_epi64(__m128i __V)
return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1), __v2di);
}
-/// \brief Sign-extends each of the lower four 16-bit integer elements of a
+/// Sign-extends each of the lower four 16-bit integer elements of a
/// 128-bit integer vector of [8 x i16] to 32-bit values and returns them in
/// a 128-bit vector of [4 x i32]. The upper four elements of the input
/// vector are unused.
@@ -1319,8 +1300,8 @@ _mm_cvtepi8_epi64(__m128i __V)
/// This intrinsic corresponds to the <c> VPMOVSXWD / PMOVSXWD </c> instruction.
///
/// \param __V
-/// A 128-bit vector of [8 x i16]. The lower four 16-bit elements are sign-
-/// extended to 32-bit values.
+/// A 128-bit vector of [8 x i16]. The lower four 16-bit elements are
+/// sign-extended to 32-bit values.
/// \returns A 128-bit vector of [4 x i32] containing the sign-extended values.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_cvtepi16_epi32(__m128i __V)
@@ -1328,7 +1309,7 @@ _mm_cvtepi16_epi32(__m128i __V)
return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v8hi)__V, (__v8hi)__V, 0, 1, 2, 3), __v4si);
}
-/// \brief Sign-extends each of the lower two 16-bit integer elements of a
+/// Sign-extends each of the lower two 16-bit integer elements of a
/// 128-bit integer vector of [8 x i16] to 64-bit values and returns them in
/// a 128-bit vector of [2 x i64]. The upper six elements of the input
/// vector are unused.
@@ -1338,8 +1319,8 @@ _mm_cvtepi16_epi32(__m128i __V)
/// This intrinsic corresponds to the <c> VPMOVSXWQ / PMOVSXWQ </c> instruction.
///
/// \param __V
-/// A 128-bit vector of [8 x i16]. The lower two 16-bit elements are sign-
-/// extended to 64-bit values.
+/// A 128-bit vector of [8 x i16]. The lower two 16-bit elements are
+/// sign-extended to 64-bit values.
/// \returns A 128-bit vector of [2 x i64] containing the sign-extended values.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_cvtepi16_epi64(__m128i __V)
@@ -1347,7 +1328,7 @@ _mm_cvtepi16_epi64(__m128i __V)
return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v8hi)__V, (__v8hi)__V, 0, 1), __v2di);
}
-/// \brief Sign-extends each of the lower two 32-bit integer elements of a
+/// Sign-extends each of the lower two 32-bit integer elements of a
/// 128-bit integer vector of [4 x i32] to 64-bit values and returns them in
/// a 128-bit vector of [2 x i64]. The upper two elements of the input vector
/// are unused.
@@ -1357,8 +1338,8 @@ _mm_cvtepi16_epi64(__m128i __V)
/// This intrinsic corresponds to the <c> VPMOVSXDQ / PMOVSXDQ </c> instruction.
///
/// \param __V
-/// A 128-bit vector of [4 x i32]. The lower two 32-bit elements are sign-
-/// extended to 64-bit values.
+/// A 128-bit vector of [4 x i32]. The lower two 32-bit elements are
+/// sign-extended to 64-bit values.
/// \returns A 128-bit vector of [2 x i64] containing the sign-extended values.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_cvtepi32_epi64(__m128i __V)
@@ -1367,7 +1348,7 @@ _mm_cvtepi32_epi64(__m128i __V)
}
/* SSE4 Packed Integer Zero-Extension. */
-/// \brief Zero-extends each of the lower eight 8-bit integer elements of a
+/// Zero-extends each of the lower eight 8-bit integer elements of a
/// 128-bit vector of [16 x i8] to 16-bit values and returns them in a
/// 128-bit vector of [8 x i16]. The upper eight elements of the input vector
/// are unused.
@@ -1377,8 +1358,8 @@ _mm_cvtepi32_epi64(__m128i __V)
/// This intrinsic corresponds to the <c> VPMOVZXBW / PMOVZXBW </c> instruction.
///
/// \param __V
-/// A 128-bit vector of [16 x i8]. The lower eight 8-bit elements are zero-
-/// extended to 16-bit values.
+/// A 128-bit vector of [16 x i8]. The lower eight 8-bit elements are
+/// zero-extended to 16-bit values.
/// \returns A 128-bit vector of [8 x i16] containing the zero-extended values.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_cvtepu8_epi16(__m128i __V)
@@ -1386,7 +1367,7 @@ _mm_cvtepu8_epi16(__m128i __V)
return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3, 4, 5, 6, 7), __v8hi);
}
-/// \brief Zero-extends each of the lower four 8-bit integer elements of a
+/// Zero-extends each of the lower four 8-bit integer elements of a
/// 128-bit vector of [16 x i8] to 32-bit values and returns them in a
/// 128-bit vector of [4 x i32]. The upper twelve elements of the input
/// vector are unused.
@@ -1396,8 +1377,8 @@ _mm_cvtepu8_epi16(__m128i __V)
/// This intrinsic corresponds to the <c> VPMOVZXBD / PMOVZXBD </c> instruction.
///
/// \param __V
-/// A 128-bit vector of [16 x i8]. The lower four 8-bit elements are zero-
-/// extended to 32-bit values.
+/// A 128-bit vector of [16 x i8]. The lower four 8-bit elements are
+/// zero-extended to 32-bit values.
/// \returns A 128-bit vector of [4 x i32] containing the zero-extended values.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_cvtepu8_epi32(__m128i __V)
@@ -1405,7 +1386,7 @@ _mm_cvtepu8_epi32(__m128i __V)
return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3), __v4si);
}
-/// \brief Zero-extends each of the lower two 8-bit integer elements of a
+/// Zero-extends each of the lower two 8-bit integer elements of a
/// 128-bit integer vector of [16 x i8] to 64-bit values and returns them in
/// a 128-bit vector of [2 x i64]. The upper fourteen elements of the input
/// vector are unused.
@@ -1415,8 +1396,8 @@ _mm_cvtepu8_epi32(__m128i __V)
/// This intrinsic corresponds to the <c> VPMOVZXBQ / PMOVZXBQ </c> instruction.
///
/// \param __V
-/// A 128-bit vector of [16 x i8]. The lower two 8-bit elements are zero-
-/// extended to 64-bit values.
+/// A 128-bit vector of [16 x i8]. The lower two 8-bit elements are
+/// zero-extended to 64-bit values.
/// \returns A 128-bit vector of [2 x i64] containing the zero-extended values.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_cvtepu8_epi64(__m128i __V)
@@ -1424,7 +1405,7 @@ _mm_cvtepu8_epi64(__m128i __V)
return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1), __v2di);
}
-/// \brief Zero-extends each of the lower four 16-bit integer elements of a
+/// Zero-extends each of the lower four 16-bit integer elements of a
/// 128-bit integer vector of [8 x i16] to 32-bit values and returns them in
/// a 128-bit vector of [4 x i32]. The upper four elements of the input
/// vector are unused.
@@ -1434,8 +1415,8 @@ _mm_cvtepu8_epi64(__m128i __V)
/// This intrinsic corresponds to the <c> VPMOVZXWD / PMOVZXWD </c> instruction.
///
/// \param __V
-/// A 128-bit vector of [8 x i16]. The lower four 16-bit elements are zero-
-/// extended to 32-bit values.
+/// A 128-bit vector of [8 x i16]. The lower four 16-bit elements are
+/// zero-extended to 32-bit values.
/// \returns A 128-bit vector of [4 x i32] containing the zero-extended values.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_cvtepu16_epi32(__m128i __V)
@@ -1443,7 +1424,7 @@ _mm_cvtepu16_epi32(__m128i __V)
return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v8hu)__V, (__v8hu)__V, 0, 1, 2, 3), __v4si);
}
-/// \brief Zero-extends each of the lower two 16-bit integer elements of a
+/// Zero-extends each of the lower two 16-bit integer elements of a
/// 128-bit integer vector of [8 x i16] to 64-bit values and returns them in
/// a 128-bit vector of [2 x i64]. The upper six elements of the input vector
/// are unused.
@@ -1453,8 +1434,8 @@ _mm_cvtepu16_epi32(__m128i __V)
/// This intrinsic corresponds to the <c> VPMOVZXWQ / PMOVZXWQ </c> instruction.
///
/// \param __V
-/// A 128-bit vector of [8 x i16]. The lower two 16-bit elements are zero-
-/// extended to 64-bit values.
+/// A 128-bit vector of [8 x i16]. The lower two 16-bit elements are
+/// zero-extended to 64-bit values.
/// \returns A 128-bit vector of [2 x i64] containing the zero-extended values.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_cvtepu16_epi64(__m128i __V)
@@ -1462,7 +1443,7 @@ _mm_cvtepu16_epi64(__m128i __V)
return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v8hu)__V, (__v8hu)__V, 0, 1), __v2di);
}
-/// \brief Zero-extends each of the lower two 32-bit integer elements of a
+/// Zero-extends each of the lower two 32-bit integer elements of a
/// 128-bit integer vector of [4 x i32] to 64-bit values and returns them in
/// a 128-bit vector of [2 x i64]. The upper two elements of the input vector
/// are unused.
@@ -1472,8 +1453,8 @@ _mm_cvtepu16_epi64(__m128i __V)
/// This intrinsic corresponds to the <c> VPMOVZXDQ / PMOVZXDQ </c> instruction.
///
/// \param __V
-/// A 128-bit vector of [4 x i32]. The lower two 32-bit elements are zero-
-/// extended to 64-bit values.
+/// A 128-bit vector of [4 x i32]. The lower two 32-bit elements are
+/// zero-extended to 64-bit values.
/// \returns A 128-bit vector of [2 x i64] containing the zero-extended values.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_cvtepu32_epi64(__m128i __V)
@@ -1482,7 +1463,7 @@ _mm_cvtepu32_epi64(__m128i __V)
}
/* SSE4 Pack with Unsigned Saturation. */
-/// \brief Converts 32-bit signed integers from both 128-bit integer vector
+/// Converts 32-bit signed integers from both 128-bit integer vector
/// operands into 16-bit unsigned integers, and returns the packed result.
/// Values greater than 0xFFFF are saturated to 0xFFFF. Values less than
/// 0x0000 are saturated to 0x0000.
@@ -1511,7 +1492,7 @@ _mm_packus_epi32(__m128i __V1, __m128i __V2)
}
/* SSE4 Multiple Packed Sums of Absolute Difference. */
-/// \brief Subtracts 8-bit unsigned integer values and computes the absolute
+/// Subtracts 8-bit unsigned integer values and computes the absolute
/// values of the differences to the corresponding bits in the destination.
/// Then sums of the absolute differences are returned according to the bit
/// fields in the immediate operand.
@@ -1534,23 +1515,23 @@ _mm_packus_epi32(__m128i __V1, __m128i __V2)
/// \code
/// // M2 represents bit 2 of the immediate operand
/// // M10 represents bits [1:0] of the immediate operand
-/// i = M2 * 4
-/// j = M10 * 4
+/// i = M2 * 4;
+/// j = M10 * 4;
/// for (k = 0; k < 8; k = k + 1) {
-/// d0 = abs(X[i + k + 0] - Y[j + 0])
-/// d1 = abs(X[i + k + 1] - Y[j + 1])
-/// d2 = abs(X[i + k + 2] - Y[j + 2])
-/// d3 = abs(X[i + k + 3] - Y[j + 3])
-/// r[k] = d0 + d1 + d2 + d3
+/// d0 = abs(X[i + k + 0] - Y[j + 0]);
+/// d1 = abs(X[i + k + 1] - Y[j + 1]);
+/// d2 = abs(X[i + k + 2] - Y[j + 2]);
+/// d3 = abs(X[i + k + 3] - Y[j + 3]);
+/// r[k] = d0 + d1 + d2 + d3;
/// }
/// \endcode
/// \returns A 128-bit integer vector containing the sums of the sets of
/// absolute differences between both operands.
-#define _mm_mpsadbw_epu8(X, Y, M) __extension__ ({ \
+#define _mm_mpsadbw_epu8(X, Y, M) \
(__m128i) __builtin_ia32_mpsadbw128((__v16qi)(__m128i)(X), \
- (__v16qi)(__m128i)(Y), (M)); })
+ (__v16qi)(__m128i)(Y), (M))
-/// \brief Finds the minimum unsigned 16-bit element in the input 128-bit
+/// Finds the minimum unsigned 16-bit element in the input 128-bit
/// vector of [8 x u16] and returns it and along with its index.
///
/// \headerfile <x86intrin.h>
@@ -1604,7 +1585,7 @@ _mm_minpos_epu16(__m128i __V)
#define _SIDD_UNIT_MASK 0x40
/* SSE4.2 Packed Comparison Intrinsics. */
-/// \brief Uses the immediate operand \a M to perform a comparison of string
+/// Uses the immediate operand \a M to perform a comparison of string
/// data with implicitly defined lengths that is contained in source operands
/// \a A and \a B. Returns a 128-bit integer vector representing the result
/// mask of the comparison.
@@ -1660,7 +1641,7 @@ _mm_minpos_epu16(__m128i __V)
(__m128i)__builtin_ia32_pcmpistrm128((__v16qi)(__m128i)(A), \
(__v16qi)(__m128i)(B), (int)(M))
-/// \brief Uses the immediate operand \a M to perform a comparison of string
+/// Uses the immediate operand \a M to perform a comparison of string
/// data with implicitly defined lengths that is contained in source operands
/// \a A and \a B. Returns an integer representing the result index of the
/// comparison.
@@ -1714,7 +1695,7 @@ _mm_minpos_epu16(__m128i __V)
(int)__builtin_ia32_pcmpistri128((__v16qi)(__m128i)(A), \
(__v16qi)(__m128i)(B), (int)(M))
-/// \brief Uses the immediate operand \a M to perform a comparison of string
+/// Uses the immediate operand \a M to perform a comparison of string
/// data with explicitly defined lengths that is contained in source operands
/// \a A and \a B. Returns a 128-bit integer vector representing the result
/// mask of the comparison.
@@ -1775,7 +1756,7 @@ _mm_minpos_epu16(__m128i __V)
(__v16qi)(__m128i)(B), (int)(LB), \
(int)(M))
-/// \brief Uses the immediate operand \a M to perform a comparison of string
+/// Uses the immediate operand \a M to perform a comparison of string
/// data with explicitly defined lengths that is contained in source operands
/// \a A and \a B. Returns an integer representing the result index of the
/// comparison.
@@ -1835,7 +1816,7 @@ _mm_minpos_epu16(__m128i __V)
(int)(M))
/* SSE4.2 Packed Comparison Intrinsics and EFlag Reading. */
-/// \brief Uses the immediate operand \a M to perform a comparison of string
+/// Uses the immediate operand \a M to perform a comparison of string
/// data with implicitly defined lengths that is contained in source operands
/// \a A and \a B. Returns 1 if the bit mask is zero and the length of the
/// string in \a B is the maximum, otherwise, returns 0.
@@ -1885,7 +1866,7 @@ _mm_minpos_epu16(__m128i __V)
(int)__builtin_ia32_pcmpistria128((__v16qi)(__m128i)(A), \
(__v16qi)(__m128i)(B), (int)(M))
-/// \brief Uses the immediate operand \a M to perform a comparison of string
+/// Uses the immediate operand \a M to perform a comparison of string
/// data with implicitly defined lengths that is contained in source operands
/// \a A and \a B. Returns 1 if the bit mask is non-zero, otherwise, returns
/// 0.
@@ -1934,7 +1915,7 @@ _mm_minpos_epu16(__m128i __V)
(int)__builtin_ia32_pcmpistric128((__v16qi)(__m128i)(A), \
(__v16qi)(__m128i)(B), (int)(M))
-/// \brief Uses the immediate operand \a M to perform a comparison of string
+/// Uses the immediate operand \a M to perform a comparison of string
/// data with implicitly defined lengths that is contained in source operands
/// \a A and \a B. Returns bit 0 of the resulting bit mask.
///
@@ -1982,7 +1963,7 @@ _mm_minpos_epu16(__m128i __V)
(int)__builtin_ia32_pcmpistrio128((__v16qi)(__m128i)(A), \
(__v16qi)(__m128i)(B), (int)(M))
-/// \brief Uses the immediate operand \a M to perform a comparison of string
+/// Uses the immediate operand \a M to perform a comparison of string
/// data with implicitly defined lengths that is contained in source operands
/// \a A and \a B. Returns 1 if the length of the string in \a A is less than
/// the maximum, otherwise, returns 0.
@@ -2032,7 +2013,7 @@ _mm_minpos_epu16(__m128i __V)
(int)__builtin_ia32_pcmpistris128((__v16qi)(__m128i)(A), \
(__v16qi)(__m128i)(B), (int)(M))
-/// \brief Uses the immediate operand \a M to perform a comparison of string
+/// Uses the immediate operand \a M to perform a comparison of string
/// data with implicitly defined lengths that is contained in source operands
/// \a A and \a B. Returns 1 if the length of the string in \a B is less than
/// the maximum, otherwise, returns 0.
@@ -2082,7 +2063,7 @@ _mm_minpos_epu16(__m128i __V)
(int)__builtin_ia32_pcmpistriz128((__v16qi)(__m128i)(A), \
(__v16qi)(__m128i)(B), (int)(M))
-/// \brief Uses the immediate operand \a M to perform a comparison of string
+/// Uses the immediate operand \a M to perform a comparison of string
/// data with explicitly defined lengths that is contained in source operands
/// \a A and \a B. Returns 1 if the bit mask is zero and the length of the
/// string in \a B is the maximum, otherwise, returns 0.
@@ -2137,7 +2118,7 @@ _mm_minpos_epu16(__m128i __V)
(__v16qi)(__m128i)(B), (int)(LB), \
(int)(M))
-/// \brief Uses the immediate operand \a M to perform a comparison of string
+/// Uses the immediate operand \a M to perform a comparison of string
/// data with explicitly defined lengths that is contained in source operands
/// \a A and \a B. Returns 1 if the resulting mask is non-zero, otherwise,
/// returns 0.
@@ -2191,7 +2172,7 @@ _mm_minpos_epu16(__m128i __V)
(__v16qi)(__m128i)(B), (int)(LB), \
(int)(M))
-/// \brief Uses the immediate operand \a M to perform a comparison of string
+/// Uses the immediate operand \a M to perform a comparison of string
/// data with explicitly defined lengths that is contained in source operands
/// \a A and \a B. Returns bit 0 of the resulting bit mask.
///
@@ -2244,7 +2225,7 @@ _mm_minpos_epu16(__m128i __V)
(__v16qi)(__m128i)(B), (int)(LB), \
(int)(M))
-/// \brief Uses the immediate operand \a M to perform a comparison of string
+/// Uses the immediate operand \a M to perform a comparison of string
/// data with explicitly defined lengths that is contained in source operands
/// \a A and \a B. Returns 1 if the length of the string in \a A is less than
/// the maximum, otherwise, returns 0.
@@ -2299,7 +2280,7 @@ _mm_minpos_epu16(__m128i __V)
(__v16qi)(__m128i)(B), (int)(LB), \
(int)(M))
-/// \brief Uses the immediate operand \a M to perform a comparison of string
+/// Uses the immediate operand \a M to perform a comparison of string
/// data with explicitly defined lengths that is contained in source operands
/// \a A and \a B. Returns 1 if the length of the string in \a B is less than
/// the maximum, otherwise, returns 0.
@@ -2354,7 +2335,7 @@ _mm_minpos_epu16(__m128i __V)
(int)(M))
/* SSE4.2 Compare Packed Data -- Greater Than. */
-/// \brief Compares each of the corresponding 64-bit values of the 128-bit
+/// Compares each of the corresponding 64-bit values of the 128-bit
/// integer vectors to determine if the values in the first operand are
/// greater than those in the second operand.
///
@@ -2374,7 +2355,7 @@ _mm_cmpgt_epi64(__m128i __V1, __m128i __V2)
}
/* SSE4.2 Accumulate CRC32. */
-/// \brief Adds the unsigned integer operand to the CRC-32C checksum of the
+/// Adds the unsigned integer operand to the CRC-32C checksum of the
/// unsigned char operand.
///
/// \headerfile <x86intrin.h>
@@ -2394,7 +2375,7 @@ _mm_crc32_u8(unsigned int __C, unsigned char __D)
return __builtin_ia32_crc32qi(__C, __D);
}
-/// \brief Adds the unsigned integer operand to the CRC-32C checksum of the
+/// Adds the unsigned integer operand to the CRC-32C checksum of the
/// unsigned short operand.
///
/// \headerfile <x86intrin.h>
@@ -2414,7 +2395,7 @@ _mm_crc32_u16(unsigned int __C, unsigned short __D)
return __builtin_ia32_crc32hi(__C, __D);
}
-/// \brief Adds the first unsigned integer operand to the CRC-32C checksum of
+/// Adds the first unsigned integer operand to the CRC-32C checksum of
/// the second unsigned integer operand.
///
/// \headerfile <x86intrin.h>
@@ -2435,7 +2416,7 @@ _mm_crc32_u32(unsigned int __C, unsigned int __D)
}
#ifdef __x86_64__
-/// \brief Adds the unsigned integer operand to the CRC-32C checksum of the
+/// Adds the unsigned integer operand to the CRC-32C checksum of the
/// unsigned 64-bit integer operand.
///
/// \headerfile <x86intrin.h>
@@ -2458,8 +2439,6 @@ _mm_crc32_u64(unsigned long long __C, unsigned long long __D)
#undef __DEFAULT_FN_ATTRS
-#ifdef __POPCNT__
#include <popcntintrin.h>
-#endif
-#endif /* _SMMINTRIN_H */
+#endif /* __SMMINTRIN_H */
diff --git a/lib/Headers/stdint.h b/lib/Headers/stdint.h
index c48815314b51..0afcca3a9daa 100644
--- a/lib/Headers/stdint.h
+++ b/lib/Headers/stdint.h
@@ -88,7 +88,7 @@
*
* To accommodate targets that are missing types that are exactly 8, 16, 32, or
* 64 bits wide, this implementation takes an approach of cascading
- * redefintions, redefining __int_leastN_t to successively smaller exact-width
+ * redefinitions, redefining __int_leastN_t to successively smaller exact-width
* types. It is therefore important that the types are defined in order of
* descending widths.
*
@@ -461,7 +461,7 @@ typedef __UINTMAX_TYPE__ uintmax_t;
* As in the type definitions, this section takes an approach of
* successive-shrinking to determine which limits to use for the standard (8,
* 16, 32, 64) bit widths when they don't have exact representations. It is
- * therefore important that the defintions be kept in order of decending
+ * therefore important that the definitions be kept in order of decending
* widths.
*
* Note that C++ should not check __STDC_LIMIT_MACROS here, contrary to the
diff --git a/lib/Headers/tmmintrin.h b/lib/Headers/tmmintrin.h
index 042bfc7e3b0d..734cd391be60 100644
--- a/lib/Headers/tmmintrin.h
+++ b/lib/Headers/tmmintrin.h
@@ -27,9 +27,10 @@
#include <pmmintrin.h>
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("ssse3")))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("ssse3"), __min_vector_width__(64)))
+#define __DEFAULT_FN_ATTRS_MMX __attribute__((__always_inline__, __nodebug__, __target__("mmx,ssse3"), __min_vector_width__(64)))
-/// \brief Computes the absolute value of each of the packed 8-bit signed
+/// Computes the absolute value of each of the packed 8-bit signed
/// integers in the source operand and stores the 8-bit unsigned integer
/// results in the destination.
///
@@ -41,13 +42,13 @@
/// A 64-bit vector of [8 x i8].
/// \returns A 64-bit integer vector containing the absolute values of the
/// elements in the operand.
-static __inline__ __m64 __DEFAULT_FN_ATTRS
+static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
_mm_abs_pi8(__m64 __a)
{
return (__m64)__builtin_ia32_pabsb((__v8qi)__a);
}
-/// \brief Computes the absolute value of each of the packed 8-bit signed
+/// Computes the absolute value of each of the packed 8-bit signed
/// integers in the source operand and stores the 8-bit unsigned integer
/// results in the destination.
///
@@ -65,7 +66,7 @@ _mm_abs_epi8(__m128i __a)
return (__m128i)__builtin_ia32_pabsb128((__v16qi)__a);
}
-/// \brief Computes the absolute value of each of the packed 16-bit signed
+/// Computes the absolute value of each of the packed 16-bit signed
/// integers in the source operand and stores the 16-bit unsigned integer
/// results in the destination.
///
@@ -77,13 +78,13 @@ _mm_abs_epi8(__m128i __a)
/// A 64-bit vector of [4 x i16].
/// \returns A 64-bit integer vector containing the absolute values of the
/// elements in the operand.
-static __inline__ __m64 __DEFAULT_FN_ATTRS
+static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
_mm_abs_pi16(__m64 __a)
{
return (__m64)__builtin_ia32_pabsw((__v4hi)__a);
}
-/// \brief Computes the absolute value of each of the packed 16-bit signed
+/// Computes the absolute value of each of the packed 16-bit signed
/// integers in the source operand and stores the 16-bit unsigned integer
/// results in the destination.
///
@@ -101,7 +102,7 @@ _mm_abs_epi16(__m128i __a)
return (__m128i)__builtin_ia32_pabsw128((__v8hi)__a);
}
-/// \brief Computes the absolute value of each of the packed 32-bit signed
+/// Computes the absolute value of each of the packed 32-bit signed
/// integers in the source operand and stores the 32-bit unsigned integer
/// results in the destination.
///
@@ -113,13 +114,13 @@ _mm_abs_epi16(__m128i __a)
/// A 64-bit vector of [2 x i32].
/// \returns A 64-bit integer vector containing the absolute values of the
/// elements in the operand.
-static __inline__ __m64 __DEFAULT_FN_ATTRS
+static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
_mm_abs_pi32(__m64 __a)
{
return (__m64)__builtin_ia32_pabsd((__v2si)__a);
}
-/// \brief Computes the absolute value of each of the packed 32-bit signed
+/// Computes the absolute value of each of the packed 32-bit signed
/// integers in the source operand and stores the 32-bit unsigned integer
/// results in the destination.
///
@@ -137,7 +138,7 @@ _mm_abs_epi32(__m128i __a)
return (__m128i)__builtin_ia32_pabsd128((__v4si)__a);
}
-/// \brief Concatenates the two 128-bit integer vector operands, and
+/// Concatenates the two 128-bit integer vector operands, and
/// right-shifts the result by the number of bytes specified in the immediate
/// operand.
///
@@ -157,11 +158,11 @@ _mm_abs_epi32(__m128i __a)
/// An immediate operand specifying how many bytes to right-shift the result.
/// \returns A 128-bit integer vector containing the concatenated right-shifted
/// value.
-#define _mm_alignr_epi8(a, b, n) __extension__ ({ \
+#define _mm_alignr_epi8(a, b, n) \
(__m128i)__builtin_ia32_palignr128((__v16qi)(__m128i)(a), \
- (__v16qi)(__m128i)(b), (n)); })
+ (__v16qi)(__m128i)(b), (n))
-/// \brief Concatenates the two 64-bit integer vector operands, and right-shifts
+/// Concatenates the two 64-bit integer vector operands, and right-shifts
/// the result by the number of bytes specified in the immediate operand.
///
/// \headerfile <x86intrin.h>
@@ -180,10 +181,10 @@ _mm_abs_epi32(__m128i __a)
/// An immediate operand specifying how many bytes to right-shift the result.
/// \returns A 64-bit integer vector containing the concatenated right-shifted
/// value.
-#define _mm_alignr_pi8(a, b, n) __extension__ ({ \
- (__m64)__builtin_ia32_palignr((__v8qi)(__m64)(a), (__v8qi)(__m64)(b), (n)); })
+#define _mm_alignr_pi8(a, b, n) \
+ (__m64)__builtin_ia32_palignr((__v8qi)(__m64)(a), (__v8qi)(__m64)(b), (n))
-/// \brief Horizontally adds the adjacent pairs of values contained in 2 packed
+/// Horizontally adds the adjacent pairs of values contained in 2 packed
/// 128-bit vectors of [8 x i16].
///
/// \headerfile <x86intrin.h>
@@ -206,7 +207,7 @@ _mm_hadd_epi16(__m128i __a, __m128i __b)
return (__m128i)__builtin_ia32_phaddw128((__v8hi)__a, (__v8hi)__b);
}
-/// \brief Horizontally adds the adjacent pairs of values contained in 2 packed
+/// Horizontally adds the adjacent pairs of values contained in 2 packed
/// 128-bit vectors of [4 x i32].
///
/// \headerfile <x86intrin.h>
@@ -229,7 +230,7 @@ _mm_hadd_epi32(__m128i __a, __m128i __b)
return (__m128i)__builtin_ia32_phaddd128((__v4si)__a, (__v4si)__b);
}
-/// \brief Horizontally adds the adjacent pairs of values contained in 2 packed
+/// Horizontally adds the adjacent pairs of values contained in 2 packed
/// 64-bit vectors of [4 x i16].
///
/// \headerfile <x86intrin.h>
@@ -246,13 +247,13 @@ _mm_hadd_epi32(__m128i __a, __m128i __b)
/// destination.
/// \returns A 64-bit vector of [4 x i16] containing the horizontal sums of both
/// operands.
-static __inline__ __m64 __DEFAULT_FN_ATTRS
+static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
_mm_hadd_pi16(__m64 __a, __m64 __b)
{
return (__m64)__builtin_ia32_phaddw((__v4hi)__a, (__v4hi)__b);
}
-/// \brief Horizontally adds the adjacent pairs of values contained in 2 packed
+/// Horizontally adds the adjacent pairs of values contained in 2 packed
/// 64-bit vectors of [2 x i32].
///
/// \headerfile <x86intrin.h>
@@ -269,15 +270,16 @@ _mm_hadd_pi16(__m64 __a, __m64 __b)
/// destination.
/// \returns A 64-bit vector of [2 x i32] containing the horizontal sums of both
/// operands.
-static __inline__ __m64 __DEFAULT_FN_ATTRS
+static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
_mm_hadd_pi32(__m64 __a, __m64 __b)
{
return (__m64)__builtin_ia32_phaddd((__v2si)__a, (__v2si)__b);
}
-/// \brief Horizontally adds the adjacent pairs of values contained in 2 packed
-/// 128-bit vectors of [8 x i16]. Positive sums greater than 7FFFh are
-/// saturated to 7FFFh. Negative sums less than 8000h are saturated to 8000h.
+/// Horizontally adds the adjacent pairs of values contained in 2 packed
+/// 128-bit vectors of [8 x i16]. Positive sums greater than 0x7FFF are
+/// saturated to 0x7FFF. Negative sums less than 0x8000 are saturated to
+/// 0x8000.
///
/// \headerfile <x86intrin.h>
///
@@ -299,9 +301,10 @@ _mm_hadds_epi16(__m128i __a, __m128i __b)
return (__m128i)__builtin_ia32_phaddsw128((__v8hi)__a, (__v8hi)__b);
}
-/// \brief Horizontally adds the adjacent pairs of values contained in 2 packed
-/// 64-bit vectors of [4 x i16]. Positive sums greater than 7FFFh are
-/// saturated to 7FFFh. Negative sums less than 8000h are saturated to 8000h.
+/// Horizontally adds the adjacent pairs of values contained in 2 packed
+/// 64-bit vectors of [4 x i16]. Positive sums greater than 0x7FFF are
+/// saturated to 0x7FFF. Negative sums less than 0x8000 are saturated to
+/// 0x8000.
///
/// \headerfile <x86intrin.h>
///
@@ -317,13 +320,13 @@ _mm_hadds_epi16(__m128i __a, __m128i __b)
/// destination.
/// \returns A 64-bit vector of [4 x i16] containing the horizontal saturated
/// sums of both operands.
-static __inline__ __m64 __DEFAULT_FN_ATTRS
+static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
_mm_hadds_pi16(__m64 __a, __m64 __b)
{
return (__m64)__builtin_ia32_phaddsw((__v4hi)__a, (__v4hi)__b);
}
-/// \brief Horizontally subtracts the adjacent pairs of values contained in 2
+/// Horizontally subtracts the adjacent pairs of values contained in 2
/// packed 128-bit vectors of [8 x i16].
///
/// \headerfile <x86intrin.h>
@@ -346,7 +349,7 @@ _mm_hsub_epi16(__m128i __a, __m128i __b)
return (__m128i)__builtin_ia32_phsubw128((__v8hi)__a, (__v8hi)__b);
}
-/// \brief Horizontally subtracts the adjacent pairs of values contained in 2
+/// Horizontally subtracts the adjacent pairs of values contained in 2
/// packed 128-bit vectors of [4 x i32].
///
/// \headerfile <x86intrin.h>
@@ -369,7 +372,7 @@ _mm_hsub_epi32(__m128i __a, __m128i __b)
return (__m128i)__builtin_ia32_phsubd128((__v4si)__a, (__v4si)__b);
}
-/// \brief Horizontally subtracts the adjacent pairs of values contained in 2
+/// Horizontally subtracts the adjacent pairs of values contained in 2
/// packed 64-bit vectors of [4 x i16].
///
/// \headerfile <x86intrin.h>
@@ -386,13 +389,13 @@ _mm_hsub_epi32(__m128i __a, __m128i __b)
/// the destination.
/// \returns A 64-bit vector of [4 x i16] containing the horizontal differences
/// of both operands.
-static __inline__ __m64 __DEFAULT_FN_ATTRS
+static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
_mm_hsub_pi16(__m64 __a, __m64 __b)
{
return (__m64)__builtin_ia32_phsubw((__v4hi)__a, (__v4hi)__b);
}
-/// \brief Horizontally subtracts the adjacent pairs of values contained in 2
+/// Horizontally subtracts the adjacent pairs of values contained in 2
/// packed 64-bit vectors of [2 x i32].
///
/// \headerfile <x86intrin.h>
@@ -409,16 +412,16 @@ _mm_hsub_pi16(__m64 __a, __m64 __b)
/// the destination.
/// \returns A 64-bit vector of [2 x i32] containing the horizontal differences
/// of both operands.
-static __inline__ __m64 __DEFAULT_FN_ATTRS
+static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
_mm_hsub_pi32(__m64 __a, __m64 __b)
{
return (__m64)__builtin_ia32_phsubd((__v2si)__a, (__v2si)__b);
}
-/// \brief Horizontally subtracts the adjacent pairs of values contained in 2
+/// Horizontally subtracts the adjacent pairs of values contained in 2
/// packed 128-bit vectors of [8 x i16]. Positive differences greater than
-/// 7FFFh are saturated to 7FFFh. Negative differences less than 8000h are
-/// saturated to 8000h.
+/// 0x7FFF are saturated to 0x7FFF. Negative differences less than 0x8000 are
+/// saturated to 0x8000.
///
/// \headerfile <x86intrin.h>
///
@@ -440,10 +443,10 @@ _mm_hsubs_epi16(__m128i __a, __m128i __b)
return (__m128i)__builtin_ia32_phsubsw128((__v8hi)__a, (__v8hi)__b);
}
-/// \brief Horizontally subtracts the adjacent pairs of values contained in 2
+/// Horizontally subtracts the adjacent pairs of values contained in 2
/// packed 64-bit vectors of [4 x i16]. Positive differences greater than
-/// 7FFFh are saturated to 7FFFh. Negative differences less than 8000h are
-/// saturated to 8000h.
+/// 0x7FFF are saturated to 0x7FFF. Negative differences less than 0x8000 are
+/// saturated to 0x8000.
///
/// \headerfile <x86intrin.h>
///
@@ -459,13 +462,13 @@ _mm_hsubs_epi16(__m128i __a, __m128i __b)
/// the destination.
/// \returns A 64-bit vector of [4 x i16] containing the horizontal saturated
/// differences of both operands.
-static __inline__ __m64 __DEFAULT_FN_ATTRS
+static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
_mm_hsubs_pi16(__m64 __a, __m64 __b)
{
return (__m64)__builtin_ia32_phsubsw((__v4hi)__a, (__v4hi)__b);
}
-/// \brief Multiplies corresponding pairs of packed 8-bit unsigned integer
+/// Multiplies corresponding pairs of packed 8-bit unsigned integer
/// values contained in the first source operand and packed 8-bit signed
/// integer values contained in the second source operand, adds pairs of
/// contiguous products with signed saturation, and writes the 16-bit sums to
@@ -499,7 +502,7 @@ _mm_maddubs_epi16(__m128i __a, __m128i __b)
return (__m128i)__builtin_ia32_pmaddubsw128((__v16qi)__a, (__v16qi)__b);
}
-/// \brief Multiplies corresponding pairs of packed 8-bit unsigned integer
+/// Multiplies corresponding pairs of packed 8-bit unsigned integer
/// values contained in the first source operand and packed 8-bit signed
/// integer values contained in the second source operand, adds pairs of
/// contiguous products with signed saturation, and writes the 16-bit sums to
@@ -523,13 +526,13 @@ _mm_maddubs_epi16(__m128i __a, __m128i __b)
/// \a R1 := (\a __a2 * \a __b2) + (\a __a3 * \a __b3) \n
/// \a R2 := (\a __a4 * \a __b4) + (\a __a5 * \a __b5) \n
/// \a R3 := (\a __a6 * \a __b6) + (\a __a7 * \a __b7)
-static __inline__ __m64 __DEFAULT_FN_ATTRS
+static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
_mm_maddubs_pi16(__m64 __a, __m64 __b)
{
return (__m64)__builtin_ia32_pmaddubsw((__v8qi)__a, (__v8qi)__b);
}
-/// \brief Multiplies packed 16-bit signed integer values, truncates the 32-bit
+/// Multiplies packed 16-bit signed integer values, truncates the 32-bit
/// products to the 18 most significant bits by right-shifting, rounds the
/// truncated value by adding 1, and writes bits [16:1] to the destination.
///
@@ -549,7 +552,7 @@ _mm_mulhrs_epi16(__m128i __a, __m128i __b)
return (__m128i)__builtin_ia32_pmulhrsw128((__v8hi)__a, (__v8hi)__b);
}
-/// \brief Multiplies packed 16-bit signed integer values, truncates the 32-bit
+/// Multiplies packed 16-bit signed integer values, truncates the 32-bit
/// products to the 18 most significant bits by right-shifting, rounds the
/// truncated value by adding 1, and writes bits [16:1] to the destination.
///
@@ -563,13 +566,13 @@ _mm_mulhrs_epi16(__m128i __a, __m128i __b)
/// A 64-bit vector of [4 x i16] containing one of the source operands.
/// \returns A 64-bit vector of [4 x i16] containing the rounded and scaled
/// products of both operands.
-static __inline__ __m64 __DEFAULT_FN_ATTRS
+static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
_mm_mulhrs_pi16(__m64 __a, __m64 __b)
{
return (__m64)__builtin_ia32_pmulhrsw((__v4hi)__a, (__v4hi)__b);
}
-/// \brief Copies the 8-bit integers from a 128-bit integer vector to the
+/// Copies the 8-bit integers from a 128-bit integer vector to the
/// destination or clears 8-bit values in the destination, as specified by
/// the second source operand.
///
@@ -595,7 +598,7 @@ _mm_shuffle_epi8(__m128i __a, __m128i __b)
return (__m128i)__builtin_ia32_pshufb128((__v16qi)__a, (__v16qi)__b);
}
-/// \brief Copies the 8-bit integers from a 64-bit integer vector to the
+/// Copies the 8-bit integers from a 64-bit integer vector to the
/// destination or clears 8-bit values in the destination, as specified by
/// the second source operand.
///
@@ -614,13 +617,13 @@ _mm_shuffle_epi8(__m128i __a, __m128i __b)
/// destination. \n
/// Bits [3:0] select the source byte to be copied.
/// \returns A 64-bit integer vector containing the copied or cleared values.
-static __inline__ __m64 __DEFAULT_FN_ATTRS
+static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
_mm_shuffle_pi8(__m64 __a, __m64 __b)
{
return (__m64)__builtin_ia32_pshufb((__v8qi)__a, (__v8qi)__b);
}
-/// \brief For each 8-bit integer in the first source operand, perform one of
+/// For each 8-bit integer in the first source operand, perform one of
/// the following actions as specified by the second source operand.
///
/// If the byte in the second source is negative, calculate the two's
@@ -646,7 +649,7 @@ _mm_sign_epi8(__m128i __a, __m128i __b)
return (__m128i)__builtin_ia32_psignb128((__v16qi)__a, (__v16qi)__b);
}
-/// \brief For each 16-bit integer in the first source operand, perform one of
+/// For each 16-bit integer in the first source operand, perform one of
/// the following actions as specified by the second source operand.
///
/// If the word in the second source is negative, calculate the two's
@@ -672,7 +675,7 @@ _mm_sign_epi16(__m128i __a, __m128i __b)
return (__m128i)__builtin_ia32_psignw128((__v8hi)__a, (__v8hi)__b);
}
-/// \brief For each 32-bit integer in the first source operand, perform one of
+/// For each 32-bit integer in the first source operand, perform one of
/// the following actions as specified by the second source operand.
///
/// If the doubleword in the second source is negative, calculate the two's
@@ -698,7 +701,7 @@ _mm_sign_epi32(__m128i __a, __m128i __b)
return (__m128i)__builtin_ia32_psignd128((__v4si)__a, (__v4si)__b);
}
-/// \brief For each 8-bit integer in the first source operand, perform one of
+/// For each 8-bit integer in the first source operand, perform one of
/// the following actions as specified by the second source operand.
///
/// If the byte in the second source is negative, calculate the two's
@@ -718,13 +721,13 @@ _mm_sign_epi32(__m128i __a, __m128i __b)
/// A 64-bit integer vector containing control bytes corresponding to
/// positions in the destination.
/// \returns A 64-bit integer vector containing the resultant values.
-static __inline__ __m64 __DEFAULT_FN_ATTRS
+static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
_mm_sign_pi8(__m64 __a, __m64 __b)
{
return (__m64)__builtin_ia32_psignb((__v8qi)__a, (__v8qi)__b);
}
-/// \brief For each 16-bit integer in the first source operand, perform one of
+/// For each 16-bit integer in the first source operand, perform one of
/// the following actions as specified by the second source operand.
///
/// If the word in the second source is negative, calculate the two's
@@ -744,13 +747,13 @@ _mm_sign_pi8(__m64 __a, __m64 __b)
/// A 64-bit integer vector containing control words corresponding to
/// positions in the destination.
/// \returns A 64-bit integer vector containing the resultant values.
-static __inline__ __m64 __DEFAULT_FN_ATTRS
+static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
_mm_sign_pi16(__m64 __a, __m64 __b)
{
return (__m64)__builtin_ia32_psignw((__v4hi)__a, (__v4hi)__b);
}
-/// \brief For each 32-bit integer in the first source operand, perform one of
+/// For each 32-bit integer in the first source operand, perform one of
/// the following actions as specified by the second source operand.
///
/// If the doubleword in the second source is negative, calculate the two's
@@ -770,12 +773,13 @@ _mm_sign_pi16(__m64 __a, __m64 __b)
/// A 64-bit integer vector containing two control doublewords corresponding
/// to positions in the destination.
/// \returns A 64-bit integer vector containing the resultant values.
-static __inline__ __m64 __DEFAULT_FN_ATTRS
+static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
_mm_sign_pi32(__m64 __a, __m64 __b)
{
return (__m64)__builtin_ia32_psignd((__v2si)__a, (__v2si)__b);
}
#undef __DEFAULT_FN_ATTRS
+#undef __DEFAULT_FN_ATTRS_MMX
#endif /* __TMMINTRIN_H */
diff --git a/lib/Headers/vaesintrin.h b/lib/Headers/vaesintrin.h
index efbb8a565292..e4174bb82ff0 100644
--- a/lib/Headers/vaesintrin.h
+++ b/lib/Headers/vaesintrin.h
@@ -29,10 +29,10 @@
#define __VAESINTRIN_H
/* Default attributes for YMM forms. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("vaes")))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("vaes"), __min_vector_width__(256)))
/* Default attributes for ZMM forms. */
-#define __DEFAULT_FN_ATTRS_F __attribute__((__always_inline__, __nodebug__, __target__("avx512f,vaes")))
+#define __DEFAULT_FN_ATTRS_F __attribute__((__always_inline__, __nodebug__, __target__("avx512f,vaes"), __min_vector_width__(512)))
static __inline__ __m256i __DEFAULT_FN_ATTRS
diff --git a/lib/Headers/vpclmulqdqintrin.h b/lib/Headers/vpclmulqdqintrin.h
index 21cda2221007..86174a457e11 100644
--- a/lib/Headers/vpclmulqdqintrin.h
+++ b/lib/Headers/vpclmulqdqintrin.h
@@ -28,15 +28,15 @@
#ifndef __VPCLMULQDQINTRIN_H
#define __VPCLMULQDQINTRIN_H
-#define _mm256_clmulepi64_epi128(A, B, I) __extension__ ({ \
+#define _mm256_clmulepi64_epi128(A, B, I) \
(__m256i)__builtin_ia32_pclmulqdq256((__v4di)(__m256i)(A), \
(__v4di)(__m256i)(B), \
- (char)(I)); })
+ (char)(I))
-#define _mm512_clmulepi64_epi128(A, B, I) __extension__ ({ \
+#define _mm512_clmulepi64_epi128(A, B, I) \
(__m512i)__builtin_ia32_pclmulqdq512((__v8di)(__m512i)(A), \
(__v8di)(__m512i)(B), \
- (char)(I)); })
+ (char)(I))
-#endif // __VPCLMULQDQINTRIN_H
+#endif /* __VPCLMULQDQINTRIN_H */
diff --git a/lib/Headers/waitpkgintrin.h b/lib/Headers/waitpkgintrin.h
new file mode 100644
index 000000000000..e29d6cfa5a51
--- /dev/null
+++ b/lib/Headers/waitpkgintrin.h
@@ -0,0 +1,56 @@
+/*===----------------------- waitpkgintrin.h - WAITPKG --------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H
+#error "Never use <waitpkgintrin.h> directly; include <x86intrin.h> instead."
+#endif
+
+#ifndef __WAITPKGINTRIN_H
+#define __WAITPKGINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS \
+ __attribute__((__always_inline__, __nodebug__, __target__("waitpkg")))
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_umonitor (void * __address)
+{
+ __builtin_ia32_umonitor (__address);
+}
+
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_umwait (unsigned int __control, unsigned long long __counter)
+{
+ return __builtin_ia32_umwait (__control,
+ (unsigned int)(__counter >> 32), (unsigned int)__counter);
+}
+
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_tpause (unsigned int __control, unsigned long long __counter)
+{
+ return __builtin_ia32_tpause (__control,
+ (unsigned int)(__counter >> 32), (unsigned int)__counter);
+}
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* __WAITPKGINTRIN_H */
diff --git a/lib/Headers/wbnoinvdintrin.h b/lib/Headers/wbnoinvdintrin.h
new file mode 100644
index 000000000000..cad83368dbe7
--- /dev/null
+++ b/lib/Headers/wbnoinvdintrin.h
@@ -0,0 +1,38 @@
+/*===-------------- wbnoinvdintrin.h - wbnoinvd intrinsic-------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H
+#error "Never use <wbnoinvdintrin.h> directly; include <x86intrin.h> instead."
+#endif
+
+#ifndef __WBNOINVDINTRIN_H
+#define __WBNOINVDINTRIN_H
+
+static __inline__ void
+ __attribute__((__always_inline__, __nodebug__, __target__("wbnoinvd")))
+_wbnoinvd (void)
+{
+ __builtin_ia32_wbnoinvd ();
+}
+
+#endif /* __WBNOINVDINTRIN_H */
diff --git a/lib/Headers/wmmintrin.h b/lib/Headers/wmmintrin.h
index a2d931010aea..569a8d838dad 100644
--- a/lib/Headers/wmmintrin.h
+++ b/lib/Headers/wmmintrin.h
@@ -21,8 +21,8 @@
*===-----------------------------------------------------------------------===
*/
-#ifndef _WMMINTRIN_H
-#define _WMMINTRIN_H
+#ifndef __WMMINTRIN_H
+#define __WMMINTRIN_H
#include <emmintrin.h>
@@ -30,4 +30,4 @@
#include <__wmmintrin_pclmul.h>
-#endif /* _WMMINTRIN_H */
+#endif /* __WMMINTRIN_H */
diff --git a/lib/Headers/x86intrin.h b/lib/Headers/x86intrin.h
index 31ee7b82dd53..728c58c3ebbc 100644
--- a/lib/Headers/x86intrin.h
+++ b/lib/Headers/x86intrin.h
@@ -32,26 +32,6 @@
#include <mm3dnow.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__BMI__)
-#include <bmiintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__BMI2__)
-#include <bmi2intrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__LZCNT__)
-#include <lzcntintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__POPCNT__)
-#include <popcntintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__RDSEED__)
-#include <rdseedintrin.h>
-#endif
-
#if !defined(_MSC_VER) || __has_feature(modules) || defined(__PRFCHW__)
#include <prfchwintrin.h>
#endif
@@ -76,10 +56,6 @@
#include <lwpintrin.h>
#endif
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__F16C__)
-#include <f16cintrin.h>
-#endif
-
#if !defined(_MSC_VER) || __has_feature(modules) || defined(__MWAITX__)
#include <mwaitxintrin.h>
#endif
@@ -88,4 +64,5 @@
#include <clzerointrin.h>
#endif
+
#endif /* __X86INTRIN_H */
diff --git a/lib/Headers/xmmintrin.h b/lib/Headers/xmmintrin.h
index 279c0275d93f..17af17267c83 100644
--- a/lib/Headers/xmmintrin.h
+++ b/lib/Headers/xmmintrin.h
@@ -40,9 +40,10 @@ typedef unsigned int __v4su __attribute__((__vector_size__(16)));
#endif
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sse")))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sse"), __min_vector_width__(128)))
+#define __DEFAULT_FN_ATTRS_MMX __attribute__((__always_inline__, __nodebug__, __target__("mmx,sse"), __min_vector_width__(64)))
-/// \brief Adds the 32-bit float values in the low-order bits of the operands.
+/// Adds the 32-bit float values in the low-order bits of the operands.
///
/// \headerfile <x86intrin.h>
///
@@ -64,7 +65,7 @@ _mm_add_ss(__m128 __a, __m128 __b)
return __a;
}
-/// \brief Adds two 128-bit vectors of [4 x float], and returns the results of
+/// Adds two 128-bit vectors of [4 x float], and returns the results of
/// the addition.
///
/// \headerfile <x86intrin.h>
@@ -83,7 +84,7 @@ _mm_add_ps(__m128 __a, __m128 __b)
return (__m128)((__v4sf)__a + (__v4sf)__b);
}
-/// \brief Subtracts the 32-bit float value in the low-order bits of the second
+/// Subtracts the 32-bit float value in the low-order bits of the second
/// operand from the corresponding value in the first operand.
///
/// \headerfile <x86intrin.h>
@@ -106,7 +107,7 @@ _mm_sub_ss(__m128 __a, __m128 __b)
return __a;
}
-/// \brief Subtracts each of the values of the second operand from the first
+/// Subtracts each of the values of the second operand from the first
/// operand, both of which are 128-bit vectors of [4 x float] and returns
/// the results of the subtraction.
///
@@ -126,7 +127,7 @@ _mm_sub_ps(__m128 __a, __m128 __b)
return (__m128)((__v4sf)__a - (__v4sf)__b);
}
-/// \brief Multiplies two 32-bit float values in the low-order bits of the
+/// Multiplies two 32-bit float values in the low-order bits of the
/// operands.
///
/// \headerfile <x86intrin.h>
@@ -149,7 +150,7 @@ _mm_mul_ss(__m128 __a, __m128 __b)
return __a;
}
-/// \brief Multiplies two 128-bit vectors of [4 x float] and returns the
+/// Multiplies two 128-bit vectors of [4 x float] and returns the
/// results of the multiplication.
///
/// \headerfile <x86intrin.h>
@@ -168,7 +169,7 @@ _mm_mul_ps(__m128 __a, __m128 __b)
return (__m128)((__v4sf)__a * (__v4sf)__b);
}
-/// \brief Divides the value in the low-order 32 bits of the first operand by
+/// Divides the value in the low-order 32 bits of the first operand by
/// the corresponding value in the second operand.
///
/// \headerfile <x86intrin.h>
@@ -191,7 +192,7 @@ _mm_div_ss(__m128 __a, __m128 __b)
return __a;
}
-/// \brief Divides two 128-bit vectors of [4 x float].
+/// Divides two 128-bit vectors of [4 x float].
///
/// \headerfile <x86intrin.h>
///
@@ -209,7 +210,7 @@ _mm_div_ps(__m128 __a, __m128 __b)
return (__m128)((__v4sf)__a / (__v4sf)__b);
}
-/// \brief Calculates the square root of the value stored in the low-order bits
+/// Calculates the square root of the value stored in the low-order bits
/// of a 128-bit vector of [4 x float].
///
/// \headerfile <x86intrin.h>
@@ -224,11 +225,10 @@ _mm_div_ps(__m128 __a, __m128 __b)
static __inline__ __m128 __DEFAULT_FN_ATTRS
_mm_sqrt_ss(__m128 __a)
{
- __m128 __c = __builtin_ia32_sqrtss((__v4sf)__a);
- return (__m128) { __c[0], __a[1], __a[2], __a[3] };
+ return (__m128)__builtin_ia32_sqrtss((__v4sf)__a);
}
-/// \brief Calculates the square roots of the values stored in a 128-bit vector
+/// Calculates the square roots of the values stored in a 128-bit vector
/// of [4 x float].
///
/// \headerfile <x86intrin.h>
@@ -245,7 +245,7 @@ _mm_sqrt_ps(__m128 __a)
return __builtin_ia32_sqrtps((__v4sf)__a);
}
-/// \brief Calculates the approximate reciprocal of the value stored in the
+/// Calculates the approximate reciprocal of the value stored in the
/// low-order bits of a 128-bit vector of [4 x float].
///
/// \headerfile <x86intrin.h>
@@ -260,11 +260,10 @@ _mm_sqrt_ps(__m128 __a)
static __inline__ __m128 __DEFAULT_FN_ATTRS
_mm_rcp_ss(__m128 __a)
{
- __m128 __c = __builtin_ia32_rcpss((__v4sf)__a);
- return (__m128) { __c[0], __a[1], __a[2], __a[3] };
+ return (__m128)__builtin_ia32_rcpss((__v4sf)__a);
}
-/// \brief Calculates the approximate reciprocals of the values stored in a
+/// Calculates the approximate reciprocals of the values stored in a
/// 128-bit vector of [4 x float].
///
/// \headerfile <x86intrin.h>
@@ -278,10 +277,10 @@ _mm_rcp_ss(__m128 __a)
static __inline__ __m128 __DEFAULT_FN_ATTRS
_mm_rcp_ps(__m128 __a)
{
- return __builtin_ia32_rcpps((__v4sf)__a);
+ return (__m128)__builtin_ia32_rcpps((__v4sf)__a);
}
-/// \brief Calculates the approximate reciprocal of the square root of the value
+/// Calculates the approximate reciprocal of the square root of the value
/// stored in the low-order bits of a 128-bit vector of [4 x float].
///
/// \headerfile <x86intrin.h>
@@ -297,11 +296,10 @@ _mm_rcp_ps(__m128 __a)
static __inline__ __m128 __DEFAULT_FN_ATTRS
_mm_rsqrt_ss(__m128 __a)
{
- __m128 __c = __builtin_ia32_rsqrtss((__v4sf)__a);
- return (__m128) { __c[0], __a[1], __a[2], __a[3] };
+ return __builtin_ia32_rsqrtss((__v4sf)__a);
}
-/// \brief Calculates the approximate reciprocals of the square roots of the
+/// Calculates the approximate reciprocals of the square roots of the
/// values stored in a 128-bit vector of [4 x float].
///
/// \headerfile <x86intrin.h>
@@ -318,7 +316,7 @@ _mm_rsqrt_ps(__m128 __a)
return __builtin_ia32_rsqrtps((__v4sf)__a);
}
-/// \brief Compares two 32-bit float values in the low-order bits of both
+/// Compares two 32-bit float values in the low-order bits of both
/// operands and returns the lesser value in the low-order bits of the
/// vector of [4 x float].
///
@@ -341,7 +339,7 @@ _mm_min_ss(__m128 __a, __m128 __b)
return __builtin_ia32_minss((__v4sf)__a, (__v4sf)__b);
}
-/// \brief Compares two 128-bit vectors of [4 x float] and returns the lesser
+/// Compares two 128-bit vectors of [4 x float] and returns the lesser
/// of each pair of values.
///
/// \headerfile <x86intrin.h>
@@ -360,7 +358,7 @@ _mm_min_ps(__m128 __a, __m128 __b)
return __builtin_ia32_minps((__v4sf)__a, (__v4sf)__b);
}
-/// \brief Compares two 32-bit float values in the low-order bits of both
+/// Compares two 32-bit float values in the low-order bits of both
/// operands and returns the greater value in the low-order bits of a 128-bit
/// vector of [4 x float].
///
@@ -383,7 +381,7 @@ _mm_max_ss(__m128 __a, __m128 __b)
return __builtin_ia32_maxss((__v4sf)__a, (__v4sf)__b);
}
-/// \brief Compares two 128-bit vectors of [4 x float] and returns the greater
+/// Compares two 128-bit vectors of [4 x float] and returns the greater
/// of each pair of values.
///
/// \headerfile <x86intrin.h>
@@ -402,7 +400,7 @@ _mm_max_ps(__m128 __a, __m128 __b)
return __builtin_ia32_maxps((__v4sf)__a, (__v4sf)__b);
}
-/// \brief Performs a bitwise AND of two 128-bit vectors of [4 x float].
+/// Performs a bitwise AND of two 128-bit vectors of [4 x float].
///
/// \headerfile <x86intrin.h>
///
@@ -420,7 +418,7 @@ _mm_and_ps(__m128 __a, __m128 __b)
return (__m128)((__v4su)__a & (__v4su)__b);
}
-/// \brief Performs a bitwise AND of two 128-bit vectors of [4 x float], using
+/// Performs a bitwise AND of two 128-bit vectors of [4 x float], using
/// the one's complement of the values contained in the first source
/// operand.
///
@@ -442,7 +440,7 @@ _mm_andnot_ps(__m128 __a, __m128 __b)
return (__m128)(~(__v4su)__a & (__v4su)__b);
}
-/// \brief Performs a bitwise OR of two 128-bit vectors of [4 x float].
+/// Performs a bitwise OR of two 128-bit vectors of [4 x float].
///
/// \headerfile <x86intrin.h>
///
@@ -460,7 +458,7 @@ _mm_or_ps(__m128 __a, __m128 __b)
return (__m128)((__v4su)__a | (__v4su)__b);
}
-/// \brief Performs a bitwise exclusive OR of two 128-bit vectors of
+/// Performs a bitwise exclusive OR of two 128-bit vectors of
/// [4 x float].
///
/// \headerfile <x86intrin.h>
@@ -479,7 +477,7 @@ _mm_xor_ps(__m128 __a, __m128 __b)
return (__m128)((__v4su)__a ^ (__v4su)__b);
}
-/// \brief Compares two 32-bit float values in the low-order bits of both
+/// Compares two 32-bit float values in the low-order bits of both
/// operands for equality and returns the result of the comparison in the
/// low-order bits of a vector [4 x float].
///
@@ -501,7 +499,7 @@ _mm_cmpeq_ss(__m128 __a, __m128 __b)
return (__m128)__builtin_ia32_cmpeqss((__v4sf)__a, (__v4sf)__b);
}
-/// \brief Compares each of the corresponding 32-bit float values of the
+/// Compares each of the corresponding 32-bit float values of the
/// 128-bit vectors of [4 x float] for equality.
///
/// \headerfile <x86intrin.h>
@@ -519,7 +517,7 @@ _mm_cmpeq_ps(__m128 __a, __m128 __b)
return (__m128)__builtin_ia32_cmpeqps((__v4sf)__a, (__v4sf)__b);
}
-/// \brief Compares two 32-bit float values in the low-order bits of both
+/// Compares two 32-bit float values in the low-order bits of both
/// operands to determine if the value in the first operand is less than the
/// corresponding value in the second operand and returns the result of the
/// comparison in the low-order bits of a vector of [4 x float].
@@ -542,7 +540,7 @@ _mm_cmplt_ss(__m128 __a, __m128 __b)
return (__m128)__builtin_ia32_cmpltss((__v4sf)__a, (__v4sf)__b);
}
-/// \brief Compares each of the corresponding 32-bit float values of the
+/// Compares each of the corresponding 32-bit float values of the
/// 128-bit vectors of [4 x float] to determine if the values in the first
/// operand are less than those in the second operand.
///
@@ -561,7 +559,7 @@ _mm_cmplt_ps(__m128 __a, __m128 __b)
return (__m128)__builtin_ia32_cmpltps((__v4sf)__a, (__v4sf)__b);
}
-/// \brief Compares two 32-bit float values in the low-order bits of both
+/// Compares two 32-bit float values in the low-order bits of both
/// operands to determine if the value in the first operand is less than or
/// equal to the corresponding value in the second operand and returns the
/// result of the comparison in the low-order bits of a vector of
@@ -585,7 +583,7 @@ _mm_cmple_ss(__m128 __a, __m128 __b)
return (__m128)__builtin_ia32_cmpless((__v4sf)__a, (__v4sf)__b);
}
-/// \brief Compares each of the corresponding 32-bit float values of the
+/// Compares each of the corresponding 32-bit float values of the
/// 128-bit vectors of [4 x float] to determine if the values in the first
/// operand are less than or equal to those in the second operand.
///
@@ -604,7 +602,7 @@ _mm_cmple_ps(__m128 __a, __m128 __b)
return (__m128)__builtin_ia32_cmpleps((__v4sf)__a, (__v4sf)__b);
}
-/// \brief Compares two 32-bit float values in the low-order bits of both
+/// Compares two 32-bit float values in the low-order bits of both
/// operands to determine if the value in the first operand is greater than
/// the corresponding value in the second operand and returns the result of
/// the comparison in the low-order bits of a vector of [4 x float].
@@ -629,7 +627,7 @@ _mm_cmpgt_ss(__m128 __a, __m128 __b)
4, 1, 2, 3);
}
-/// \brief Compares each of the corresponding 32-bit float values of the
+/// Compares each of the corresponding 32-bit float values of the
/// 128-bit vectors of [4 x float] to determine if the values in the first
/// operand are greater than those in the second operand.
///
@@ -648,7 +646,7 @@ _mm_cmpgt_ps(__m128 __a, __m128 __b)
return (__m128)__builtin_ia32_cmpltps((__v4sf)__b, (__v4sf)__a);
}
-/// \brief Compares two 32-bit float values in the low-order bits of both
+/// Compares two 32-bit float values in the low-order bits of both
/// operands to determine if the value in the first operand is greater than
/// or equal to the corresponding value in the second operand and returns
/// the result of the comparison in the low-order bits of a vector of
@@ -674,7 +672,7 @@ _mm_cmpge_ss(__m128 __a, __m128 __b)
4, 1, 2, 3);
}
-/// \brief Compares each of the corresponding 32-bit float values of the
+/// Compares each of the corresponding 32-bit float values of the
/// 128-bit vectors of [4 x float] to determine if the values in the first
/// operand are greater than or equal to those in the second operand.
///
@@ -693,7 +691,7 @@ _mm_cmpge_ps(__m128 __a, __m128 __b)
return (__m128)__builtin_ia32_cmpleps((__v4sf)__b, (__v4sf)__a);
}
-/// \brief Compares two 32-bit float values in the low-order bits of both
+/// Compares two 32-bit float values in the low-order bits of both
/// operands for inequality and returns the result of the comparison in the
/// low-order bits of a vector of [4 x float].
///
@@ -716,7 +714,7 @@ _mm_cmpneq_ss(__m128 __a, __m128 __b)
return (__m128)__builtin_ia32_cmpneqss((__v4sf)__a, (__v4sf)__b);
}
-/// \brief Compares each of the corresponding 32-bit float values of the
+/// Compares each of the corresponding 32-bit float values of the
/// 128-bit vectors of [4 x float] for inequality.
///
/// \headerfile <x86intrin.h>
@@ -735,7 +733,7 @@ _mm_cmpneq_ps(__m128 __a, __m128 __b)
return (__m128)__builtin_ia32_cmpneqps((__v4sf)__a, (__v4sf)__b);
}
-/// \brief Compares two 32-bit float values in the low-order bits of both
+/// Compares two 32-bit float values in the low-order bits of both
/// operands to determine if the value in the first operand is not less than
/// the corresponding value in the second operand and returns the result of
/// the comparison in the low-order bits of a vector of [4 x float].
@@ -759,7 +757,7 @@ _mm_cmpnlt_ss(__m128 __a, __m128 __b)
return (__m128)__builtin_ia32_cmpnltss((__v4sf)__a, (__v4sf)__b);
}
-/// \brief Compares each of the corresponding 32-bit float values of the
+/// Compares each of the corresponding 32-bit float values of the
/// 128-bit vectors of [4 x float] to determine if the values in the first
/// operand are not less than those in the second operand.
///
@@ -779,7 +777,7 @@ _mm_cmpnlt_ps(__m128 __a, __m128 __b)
return (__m128)__builtin_ia32_cmpnltps((__v4sf)__a, (__v4sf)__b);
}
-/// \brief Compares two 32-bit float values in the low-order bits of both
+/// Compares two 32-bit float values in the low-order bits of both
/// operands to determine if the value in the first operand is not less than
/// or equal to the corresponding value in the second operand and returns
/// the result of the comparison in the low-order bits of a vector of
@@ -804,7 +802,7 @@ _mm_cmpnle_ss(__m128 __a, __m128 __b)
return (__m128)__builtin_ia32_cmpnless((__v4sf)__a, (__v4sf)__b);
}
-/// \brief Compares each of the corresponding 32-bit float values of the
+/// Compares each of the corresponding 32-bit float values of the
/// 128-bit vectors of [4 x float] to determine if the values in the first
/// operand are not less than or equal to those in the second operand.
///
@@ -824,7 +822,7 @@ _mm_cmpnle_ps(__m128 __a, __m128 __b)
return (__m128)__builtin_ia32_cmpnleps((__v4sf)__a, (__v4sf)__b);
}
-/// \brief Compares two 32-bit float values in the low-order bits of both
+/// Compares two 32-bit float values in the low-order bits of both
/// operands to determine if the value in the first operand is not greater
/// than the corresponding value in the second operand and returns the
/// result of the comparison in the low-order bits of a vector of
@@ -851,7 +849,7 @@ _mm_cmpngt_ss(__m128 __a, __m128 __b)
4, 1, 2, 3);
}
-/// \brief Compares each of the corresponding 32-bit float values of the
+/// Compares each of the corresponding 32-bit float values of the
/// 128-bit vectors of [4 x float] to determine if the values in the first
/// operand are not greater than those in the second operand.
///
@@ -871,7 +869,7 @@ _mm_cmpngt_ps(__m128 __a, __m128 __b)
return (__m128)__builtin_ia32_cmpnltps((__v4sf)__b, (__v4sf)__a);
}
-/// \brief Compares two 32-bit float values in the low-order bits of both
+/// Compares two 32-bit float values in the low-order bits of both
/// operands to determine if the value in the first operand is not greater
/// than or equal to the corresponding value in the second operand and
/// returns the result of the comparison in the low-order bits of a vector
@@ -898,7 +896,7 @@ _mm_cmpnge_ss(__m128 __a, __m128 __b)
4, 1, 2, 3);
}
-/// \brief Compares each of the corresponding 32-bit float values of the
+/// Compares each of the corresponding 32-bit float values of the
/// 128-bit vectors of [4 x float] to determine if the values in the first
/// operand are not greater than or equal to those in the second operand.
///
@@ -918,7 +916,7 @@ _mm_cmpnge_ps(__m128 __a, __m128 __b)
return (__m128)__builtin_ia32_cmpnleps((__v4sf)__b, (__v4sf)__a);
}
-/// \brief Compares two 32-bit float values in the low-order bits of both
+/// Compares two 32-bit float values in the low-order bits of both
/// operands to determine if the value in the first operand is ordered with
/// respect to the corresponding value in the second operand and returns the
/// result of the comparison in the low-order bits of a vector of
@@ -943,7 +941,7 @@ _mm_cmpord_ss(__m128 __a, __m128 __b)
return (__m128)__builtin_ia32_cmpordss((__v4sf)__a, (__v4sf)__b);
}
-/// \brief Compares each of the corresponding 32-bit float values of the
+/// Compares each of the corresponding 32-bit float values of the
/// 128-bit vectors of [4 x float] to determine if the values in the first
/// operand are ordered with respect to those in the second operand.
///
@@ -963,7 +961,7 @@ _mm_cmpord_ps(__m128 __a, __m128 __b)
return (__m128)__builtin_ia32_cmpordps((__v4sf)__a, (__v4sf)__b);
}
-/// \brief Compares two 32-bit float values in the low-order bits of both
+/// Compares two 32-bit float values in the low-order bits of both
/// operands to determine if the value in the first operand is unordered
/// with respect to the corresponding value in the second operand and
/// returns the result of the comparison in the low-order bits of a vector
@@ -988,7 +986,7 @@ _mm_cmpunord_ss(__m128 __a, __m128 __b)
return (__m128)__builtin_ia32_cmpunordss((__v4sf)__a, (__v4sf)__b);
}
-/// \brief Compares each of the corresponding 32-bit float values of the
+/// Compares each of the corresponding 32-bit float values of the
/// 128-bit vectors of [4 x float] to determine if the values in the first
/// operand are unordered with respect to those in the second operand.
///
@@ -1008,9 +1006,11 @@ _mm_cmpunord_ps(__m128 __a, __m128 __b)
return (__m128)__builtin_ia32_cmpunordps((__v4sf)__a, (__v4sf)__b);
}
-/// \brief Compares two 32-bit float values in the low-order bits of both
+/// Compares two 32-bit float values in the low-order bits of both
/// operands for equality and returns the result of the comparison.
///
+/// If either of the two lower 32-bit values is NaN, 0 is returned.
+///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> VCOMISS / COMISS </c>
@@ -1022,17 +1022,20 @@ _mm_cmpunord_ps(__m128 __a, __m128 __b)
/// \param __b
/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
/// used in the comparison.
-/// \returns An integer containing the comparison results.
+/// \returns An integer containing the comparison results. If either of the
+/// two lower 32-bit values is NaN, 0 is returned.
static __inline__ int __DEFAULT_FN_ATTRS
_mm_comieq_ss(__m128 __a, __m128 __b)
{
return __builtin_ia32_comieq((__v4sf)__a, (__v4sf)__b);
}
-/// \brief Compares two 32-bit float values in the low-order bits of both
+/// Compares two 32-bit float values in the low-order bits of both
/// operands to determine if the first operand is less than the second
/// operand and returns the result of the comparison.
///
+/// If either of the two lower 32-bit values is NaN, 0 is returned.
+///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> VCOMISS / COMISS </c>
@@ -1044,17 +1047,20 @@ _mm_comieq_ss(__m128 __a, __m128 __b)
/// \param __b
/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
/// used in the comparison.
-/// \returns An integer containing the comparison results.
+/// \returns An integer containing the comparison results. If either of the two
+/// lower 32-bit values is NaN, 0 is returned.
static __inline__ int __DEFAULT_FN_ATTRS
_mm_comilt_ss(__m128 __a, __m128 __b)
{
return __builtin_ia32_comilt((__v4sf)__a, (__v4sf)__b);
}
-/// \brief Compares two 32-bit float values in the low-order bits of both
+/// Compares two 32-bit float values in the low-order bits of both
/// operands to determine if the first operand is less than or equal to the
/// second operand and returns the result of the comparison.
///
+/// If either of the two lower 32-bit values is NaN, 0 is returned.
+///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> VCOMISS / COMISS </c> instructions.
@@ -1065,17 +1071,20 @@ _mm_comilt_ss(__m128 __a, __m128 __b)
/// \param __b
/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
/// used in the comparison.
-/// \returns An integer containing the comparison results.
+/// \returns An integer containing the comparison results. If either of the two
+/// lower 32-bit values is NaN, 0 is returned.
static __inline__ int __DEFAULT_FN_ATTRS
_mm_comile_ss(__m128 __a, __m128 __b)
{
return __builtin_ia32_comile((__v4sf)__a, (__v4sf)__b);
}
-/// \brief Compares two 32-bit float values in the low-order bits of both
+/// Compares two 32-bit float values in the low-order bits of both
/// operands to determine if the first operand is greater than the second
/// operand and returns the result of the comparison.
///
+/// If either of the two lower 32-bit values is NaN, 0 is returned.
+///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> VCOMISS / COMISS </c> instructions.
@@ -1086,17 +1095,20 @@ _mm_comile_ss(__m128 __a, __m128 __b)
/// \param __b
/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
/// used in the comparison.
-/// \returns An integer containing the comparison results.
+/// \returns An integer containing the comparison results. If either of the
+/// two lower 32-bit values is NaN, 0 is returned.
static __inline__ int __DEFAULT_FN_ATTRS
_mm_comigt_ss(__m128 __a, __m128 __b)
{
return __builtin_ia32_comigt((__v4sf)__a, (__v4sf)__b);
}
-/// \brief Compares two 32-bit float values in the low-order bits of both
+/// Compares two 32-bit float values in the low-order bits of both
/// operands to determine if the first operand is greater than or equal to
/// the second operand and returns the result of the comparison.
///
+/// If either of the two lower 32-bit values is NaN, 0 is returned.
+///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> VCOMISS / COMISS </c> instructions.
@@ -1107,17 +1119,20 @@ _mm_comigt_ss(__m128 __a, __m128 __b)
/// \param __b
/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
/// used in the comparison.
-/// \returns An integer containing the comparison results.
+/// \returns An integer containing the comparison results. If either of the two
+/// lower 32-bit values is NaN, 0 is returned.
static __inline__ int __DEFAULT_FN_ATTRS
_mm_comige_ss(__m128 __a, __m128 __b)
{
return __builtin_ia32_comige((__v4sf)__a, (__v4sf)__b);
}
-/// \brief Compares two 32-bit float values in the low-order bits of both
+/// Compares two 32-bit float values in the low-order bits of both
/// operands to determine if the first operand is not equal to the second
/// operand and returns the result of the comparison.
///
+/// If either of the two lower 32-bit values is NaN, 1 is returned.
+///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> VCOMISS / COMISS </c> instructions.
@@ -1128,17 +1143,20 @@ _mm_comige_ss(__m128 __a, __m128 __b)
/// \param __b
/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
/// used in the comparison.
-/// \returns An integer containing the comparison results.
+/// \returns An integer containing the comparison results. If either of the
+/// two lower 32-bit values is NaN, 1 is returned.
static __inline__ int __DEFAULT_FN_ATTRS
_mm_comineq_ss(__m128 __a, __m128 __b)
{
return __builtin_ia32_comineq((__v4sf)__a, (__v4sf)__b);
}
-/// \brief Performs an unordered comparison of two 32-bit float values using
+/// Performs an unordered comparison of two 32-bit float values using
/// the low-order bits of both operands to determine equality and returns
/// the result of the comparison.
///
+/// If either of the two lower 32-bit values is NaN, 0 is returned.
+///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> VUCOMISS / UCOMISS </c> instructions.
@@ -1149,17 +1167,20 @@ _mm_comineq_ss(__m128 __a, __m128 __b)
/// \param __b
/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
/// used in the comparison.
-/// \returns An integer containing the comparison results.
+/// \returns An integer containing the comparison results. If either of the two
+/// lower 32-bit values is NaN, 0 is returned.
static __inline__ int __DEFAULT_FN_ATTRS
_mm_ucomieq_ss(__m128 __a, __m128 __b)
{
return __builtin_ia32_ucomieq((__v4sf)__a, (__v4sf)__b);
}
-/// \brief Performs an unordered comparison of two 32-bit float values using
+/// Performs an unordered comparison of two 32-bit float values using
/// the low-order bits of both operands to determine if the first operand is
/// less than the second operand and returns the result of the comparison.
///
+/// If either of the two lower 32-bit values is NaN, 0 is returned.
+///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> VUCOMISS / UCOMISS </c> instructions.
@@ -1170,18 +1191,21 @@ _mm_ucomieq_ss(__m128 __a, __m128 __b)
/// \param __b
/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
/// used in the comparison.
-/// \returns An integer containing the comparison results.
+/// \returns An integer containing the comparison results. If either of the two
+/// lower 32-bit values is NaN, 0 is returned.
static __inline__ int __DEFAULT_FN_ATTRS
_mm_ucomilt_ss(__m128 __a, __m128 __b)
{
return __builtin_ia32_ucomilt((__v4sf)__a, (__v4sf)__b);
}
-/// \brief Performs an unordered comparison of two 32-bit float values using
+/// Performs an unordered comparison of two 32-bit float values using
/// the low-order bits of both operands to determine if the first operand is
/// less than or equal to the second operand and returns the result of the
/// comparison.
///
+/// If either of the two lower 32-bit values is NaN, 0 is returned.
+///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> VUCOMISS / UCOMISS </c> instructions.
@@ -1192,18 +1216,21 @@ _mm_ucomilt_ss(__m128 __a, __m128 __b)
/// \param __b
/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
/// used in the comparison.
-/// \returns An integer containing the comparison results.
+/// \returns An integer containing the comparison results. If either of the two
+/// lower 32-bit values is NaN, 0 is returned.
static __inline__ int __DEFAULT_FN_ATTRS
_mm_ucomile_ss(__m128 __a, __m128 __b)
{
return __builtin_ia32_ucomile((__v4sf)__a, (__v4sf)__b);
}
-/// \brief Performs an unordered comparison of two 32-bit float values using
+/// Performs an unordered comparison of two 32-bit float values using
/// the low-order bits of both operands to determine if the first operand is
/// greater than the second operand and returns the result of the
/// comparison.
///
+/// If either of the two lower 32-bit values is NaN, 0 is returned.
+///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> VUCOMISS / UCOMISS </c> instructions.
@@ -1214,18 +1241,21 @@ _mm_ucomile_ss(__m128 __a, __m128 __b)
/// \param __b
/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
/// used in the comparison.
-/// \returns An integer containing the comparison results.
+/// \returns An integer containing the comparison results. If either of the two
+/// lower 32-bit values is NaN, 0 is returned.
static __inline__ int __DEFAULT_FN_ATTRS
_mm_ucomigt_ss(__m128 __a, __m128 __b)
{
return __builtin_ia32_ucomigt((__v4sf)__a, (__v4sf)__b);
}
-/// \brief Performs an unordered comparison of two 32-bit float values using
+/// Performs an unordered comparison of two 32-bit float values using
/// the low-order bits of both operands to determine if the first operand is
/// greater than or equal to the second operand and returns the result of
/// the comparison.
///
+/// If either of the two lower 32-bit values is NaN, 0 is returned.
+///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> VUCOMISS / UCOMISS </c> instructions.
@@ -1236,17 +1266,20 @@ _mm_ucomigt_ss(__m128 __a, __m128 __b)
/// \param __b
/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
/// used in the comparison.
-/// \returns An integer containing the comparison results.
+/// \returns An integer containing the comparison results. If either of the two
+/// lower 32-bit values is NaN, 0 is returned.
static __inline__ int __DEFAULT_FN_ATTRS
_mm_ucomige_ss(__m128 __a, __m128 __b)
{
return __builtin_ia32_ucomige((__v4sf)__a, (__v4sf)__b);
}
-/// \brief Performs an unordered comparison of two 32-bit float values using
+/// Performs an unordered comparison of two 32-bit float values using
/// the low-order bits of both operands to determine inequality and returns
/// the result of the comparison.
///
+/// If either of the two lower 32-bit values is NaN, 1 is returned.
+///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> VUCOMISS / UCOMISS </c> instructions.
@@ -1257,14 +1290,15 @@ _mm_ucomige_ss(__m128 __a, __m128 __b)
/// \param __b
/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
/// used in the comparison.
-/// \returns An integer containing the comparison results.
+/// \returns An integer containing the comparison results. If either of the two
+/// lower 32-bit values is NaN, 1 is returned.
static __inline__ int __DEFAULT_FN_ATTRS
_mm_ucomineq_ss(__m128 __a, __m128 __b)
{
return __builtin_ia32_ucomineq((__v4sf)__a, (__v4sf)__b);
}
-/// \brief Converts a float value contained in the lower 32 bits of a vector of
+/// Converts a float value contained in the lower 32 bits of a vector of
/// [4 x float] into a 32-bit integer.
///
/// \headerfile <x86intrin.h>
@@ -1282,7 +1316,7 @@ _mm_cvtss_si32(__m128 __a)
return __builtin_ia32_cvtss2si((__v4sf)__a);
}
-/// \brief Converts a float value contained in the lower 32 bits of a vector of
+/// Converts a float value contained in the lower 32 bits of a vector of
/// [4 x float] into a 32-bit integer.
///
/// \headerfile <x86intrin.h>
@@ -1302,7 +1336,7 @@ _mm_cvt_ss2si(__m128 __a)
#ifdef __x86_64__
-/// \brief Converts a float value contained in the lower 32 bits of a vector of
+/// Converts a float value contained in the lower 32 bits of a vector of
/// [4 x float] into a 64-bit integer.
///
/// \headerfile <x86intrin.h>
@@ -1322,7 +1356,7 @@ _mm_cvtss_si64(__m128 __a)
#endif
-/// \brief Converts two low-order float values in a 128-bit vector of
+/// Converts two low-order float values in a 128-bit vector of
/// [4 x float] into a 64-bit vector of [2 x i32].
///
/// \headerfile <x86intrin.h>
@@ -1332,13 +1366,13 @@ _mm_cvtss_si64(__m128 __a)
/// \param __a
/// A 128-bit vector of [4 x float].
/// \returns A 64-bit integer vector containing the converted values.
-static __inline__ __m64 __DEFAULT_FN_ATTRS
+static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
_mm_cvtps_pi32(__m128 __a)
{
return (__m64)__builtin_ia32_cvtps2pi((__v4sf)__a);
}
-/// \brief Converts two low-order float values in a 128-bit vector of
+/// Converts two low-order float values in a 128-bit vector of
/// [4 x float] into a 64-bit vector of [2 x i32].
///
/// \headerfile <x86intrin.h>
@@ -1348,13 +1382,13 @@ _mm_cvtps_pi32(__m128 __a)
/// \param __a
/// A 128-bit vector of [4 x float].
/// \returns A 64-bit integer vector containing the converted values.
-static __inline__ __m64 __DEFAULT_FN_ATTRS
+static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
_mm_cvt_ps2pi(__m128 __a)
{
return _mm_cvtps_pi32(__a);
}
-/// \brief Converts a float value contained in the lower 32 bits of a vector of
+/// Converts a float value contained in the lower 32 bits of a vector of
/// [4 x float] into a 32-bit integer, truncating the result when it is
/// inexact.
///
@@ -1373,7 +1407,7 @@ _mm_cvttss_si32(__m128 __a)
return __builtin_ia32_cvttss2si((__v4sf)__a);
}
-/// \brief Converts a float value contained in the lower 32 bits of a vector of
+/// Converts a float value contained in the lower 32 bits of a vector of
/// [4 x float] into a 32-bit integer, truncating the result when it is
/// inexact.
///
@@ -1393,7 +1427,7 @@ _mm_cvtt_ss2si(__m128 __a)
}
#ifdef __x86_64__
-/// \brief Converts a float value contained in the lower 32 bits of a vector of
+/// Converts a float value contained in the lower 32 bits of a vector of
/// [4 x float] into a 64-bit integer, truncating the result when it is
/// inexact.
///
@@ -1413,7 +1447,7 @@ _mm_cvttss_si64(__m128 __a)
}
#endif
-/// \brief Converts two low-order float values in a 128-bit vector of
+/// Converts two low-order float values in a 128-bit vector of
/// [4 x float] into a 64-bit vector of [2 x i32], truncating the result
/// when it is inexact.
///
@@ -1425,13 +1459,13 @@ _mm_cvttss_si64(__m128 __a)
/// \param __a
/// A 128-bit vector of [4 x float].
/// \returns A 64-bit integer vector containing the converted values.
-static __inline__ __m64 __DEFAULT_FN_ATTRS
+static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
_mm_cvttps_pi32(__m128 __a)
{
return (__m64)__builtin_ia32_cvttps2pi((__v4sf)__a);
}
-/// \brief Converts two low-order float values in a 128-bit vector of [4 x
+/// Converts two low-order float values in a 128-bit vector of [4 x
/// float] into a 64-bit vector of [2 x i32], truncating the result when it
/// is inexact.
///
@@ -1442,13 +1476,13 @@ _mm_cvttps_pi32(__m128 __a)
/// \param __a
/// A 128-bit vector of [4 x float].
/// \returns A 64-bit integer vector containing the converted values.
-static __inline__ __m64 __DEFAULT_FN_ATTRS
+static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
_mm_cvtt_ps2pi(__m128 __a)
{
return _mm_cvttps_pi32(__a);
}
-/// \brief Converts a 32-bit signed integer value into a floating point value
+/// Converts a 32-bit signed integer value into a floating point value
/// and writes it to the lower 32 bits of the destination. The remaining
/// higher order elements of the destination vector are copied from the
/// corresponding elements in the first operand.
@@ -1471,7 +1505,7 @@ _mm_cvtsi32_ss(__m128 __a, int __b)
return __a;
}
-/// \brief Converts a 32-bit signed integer value into a floating point value
+/// Converts a 32-bit signed integer value into a floating point value
/// and writes it to the lower 32 bits of the destination. The remaining
/// higher order elements of the destination are copied from the
/// corresponding elements in the first operand.
@@ -1495,7 +1529,7 @@ _mm_cvt_si2ss(__m128 __a, int __b)
#ifdef __x86_64__
-/// \brief Converts a 64-bit signed integer value into a floating point value
+/// Converts a 64-bit signed integer value into a floating point value
/// and writes it to the lower 32 bits of the destination. The remaining
/// higher order elements of the destination are copied from the
/// corresponding elements in the first operand.
@@ -1520,7 +1554,7 @@ _mm_cvtsi64_ss(__m128 __a, long long __b)
#endif
-/// \brief Converts two elements of a 64-bit vector of [2 x i32] into two
+/// Converts two elements of a 64-bit vector of [2 x i32] into two
/// floating point values and writes them to the lower 64-bits of the
/// destination. The remaining higher order elements of the destination are
/// copied from the corresponding elements in the first operand.
@@ -1537,13 +1571,13 @@ _mm_cvtsi64_ss(__m128 __a, long long __b)
/// \returns A 128-bit vector of [4 x float] whose lower 64 bits contain the
/// converted value of the second operand. The upper 64 bits are copied from
/// the upper 64 bits of the first operand.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS_MMX
_mm_cvtpi32_ps(__m128 __a, __m64 __b)
{
return __builtin_ia32_cvtpi2ps((__v4sf)__a, (__v2si)__b);
}
-/// \brief Converts two elements of a 64-bit vector of [2 x i32] into two
+/// Converts two elements of a 64-bit vector of [2 x i32] into two
/// floating point values and writes them to the lower 64-bits of the
/// destination. The remaining higher order elements of the destination are
/// copied from the corresponding elements in the first operand.
@@ -1560,18 +1594,18 @@ _mm_cvtpi32_ps(__m128 __a, __m64 __b)
/// \returns A 128-bit vector of [4 x float] whose lower 64 bits contain the
/// converted value from the second operand. The upper 64 bits are copied
/// from the upper 64 bits of the first operand.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS_MMX
_mm_cvt_pi2ps(__m128 __a, __m64 __b)
{
return _mm_cvtpi32_ps(__a, __b);
}
-/// \brief Extracts a float value contained in the lower 32 bits of a vector of
+/// Extracts a float value contained in the lower 32 bits of a vector of
/// [4 x float].
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the <c> VMOVSS / MOVSS </c> instruction.
+/// This intrinsic has no corresponding instruction.
///
/// \param __a
/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
@@ -1583,7 +1617,7 @@ _mm_cvtss_f32(__m128 __a)
return __a[0];
}
-/// \brief Loads two packed float values from the address \a __p into the
+/// Loads two packed float values from the address \a __p into the
/// high-order bits of a 128-bit vector of [4 x float]. The low-order bits
/// are copied from the low-order bits of the first operand.
///
@@ -1610,7 +1644,7 @@ _mm_loadh_pi(__m128 __a, const __m64 *__p)
return __builtin_shufflevector(__a, __bb, 0, 1, 4, 5);
}
-/// \brief Loads two packed float values from the address \a __p into the
+/// Loads two packed float values from the address \a __p into the
/// low-order bits of a 128-bit vector of [4 x float]. The high-order bits
/// are copied from the high-order bits of the first operand.
///
@@ -1637,7 +1671,7 @@ _mm_loadl_pi(__m128 __a, const __m64 *__p)
return __builtin_shufflevector(__a, __bb, 4, 5, 2, 3);
}
-/// \brief Constructs a 128-bit floating-point vector of [4 x float]. The lower
+/// Constructs a 128-bit floating-point vector of [4 x float]. The lower
/// 32 bits of the vector are initialized with the single-precision
/// floating-point value loaded from a specified memory location. The upper
/// 96 bits are set to zero.
@@ -1659,15 +1693,15 @@ _mm_load_ss(const float *__p)
float __u;
} __attribute__((__packed__, __may_alias__));
float __u = ((struct __mm_load_ss_struct*)__p)->__u;
- return (__m128){ __u, 0, 0, 0 };
+ return __extension__ (__m128){ __u, 0, 0, 0 };
}
-/// \brief Loads a 32-bit float value and duplicates it to all four vector
+/// Loads a 32-bit float value and duplicates it to all four vector
/// elements of a 128-bit vector of [4 x float].
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the <c> VMOVSS / MOVSS + shuffling </c>
+/// This intrinsic corresponds to the <c> VBROADCASTSS / MOVSS + shuffling </c>
/// instruction.
///
/// \param __p
@@ -1681,12 +1715,12 @@ _mm_load1_ps(const float *__p)
float __u;
} __attribute__((__packed__, __may_alias__));
float __u = ((struct __mm_load1_ps_struct*)__p)->__u;
- return (__m128){ __u, __u, __u, __u };
+ return __extension__ (__m128){ __u, __u, __u, __u };
}
#define _mm_load_ps1(p) _mm_load1_ps(p)
-/// \brief Loads a 128-bit floating-point vector of [4 x float] from an aligned
+/// Loads a 128-bit floating-point vector of [4 x float] from an aligned
/// memory location.
///
/// \headerfile <x86intrin.h>
@@ -1696,14 +1730,14 @@ _mm_load1_ps(const float *__p)
/// \param __p
/// A pointer to a 128-bit memory location. The address of the memory
/// location has to be 128-bit aligned.
-/// \returns A 128-bit vector of [4 x float] containing the loaded valus.
+/// \returns A 128-bit vector of [4 x float] containing the loaded values.
static __inline__ __m128 __DEFAULT_FN_ATTRS
_mm_load_ps(const float *__p)
{
return *(__m128*)__p;
}
-/// \brief Loads a 128-bit floating-point vector of [4 x float] from an
+/// Loads a 128-bit floating-point vector of [4 x float] from an
/// unaligned memory location.
///
/// \headerfile <x86intrin.h>
@@ -1723,7 +1757,7 @@ _mm_loadu_ps(const float *__p)
return ((struct __loadu_ps*)__p)->__v;
}
-/// \brief Loads four packed float values, in reverse order, from an aligned
+/// Loads four packed float values, in reverse order, from an aligned
/// memory location to 32-bit elements in a 128-bit vector of [4 x float].
///
/// \headerfile <x86intrin.h>
@@ -1743,7 +1777,7 @@ _mm_loadr_ps(const float *__p)
return __builtin_shufflevector((__v4sf)__a, (__v4sf)__a, 3, 2, 1, 0);
}
-/// \brief Create a 128-bit vector of [4 x float] with undefined values.
+/// Create a 128-bit vector of [4 x float] with undefined values.
///
/// \headerfile <x86intrin.h>
///
@@ -1756,7 +1790,7 @@ _mm_undefined_ps(void)
return (__m128)__builtin_ia32_undef128();
}
-/// \brief Constructs a 128-bit floating-point vector of [4 x float]. The lower
+/// Constructs a 128-bit floating-point vector of [4 x float]. The lower
/// 32 bits of the vector are initialized with the specified single-precision
/// floating-point value. The upper 96 bits are set to zero.
///
@@ -1773,10 +1807,10 @@ _mm_undefined_ps(void)
static __inline__ __m128 __DEFAULT_FN_ATTRS
_mm_set_ss(float __w)
{
- return (__m128){ __w, 0, 0, 0 };
+ return __extension__ (__m128){ __w, 0, 0, 0 };
}
-/// \brief Constructs a 128-bit floating-point vector of [4 x float], with each
+/// Constructs a 128-bit floating-point vector of [4 x float], with each
/// of the four single-precision floating-point vector elements set to the
/// specified single-precision floating-point value.
///
@@ -1791,11 +1825,11 @@ _mm_set_ss(float __w)
static __inline__ __m128 __DEFAULT_FN_ATTRS
_mm_set1_ps(float __w)
{
- return (__m128){ __w, __w, __w, __w };
+ return __extension__ (__m128){ __w, __w, __w, __w };
}
/* Microsoft specific. */
-/// \brief Constructs a 128-bit floating-point vector of [4 x float], with each
+/// Constructs a 128-bit floating-point vector of [4 x float], with each
/// of the four single-precision floating-point vector elements set to the
/// specified single-precision floating-point value.
///
@@ -1813,7 +1847,7 @@ _mm_set_ps1(float __w)
return _mm_set1_ps(__w);
}
-/// \brief Constructs a 128-bit floating-point vector of [4 x float]
+/// Constructs a 128-bit floating-point vector of [4 x float]
/// initialized with the specified single-precision floating-point values.
///
/// \headerfile <x86intrin.h>
@@ -1837,10 +1871,10 @@ _mm_set_ps1(float __w)
static __inline__ __m128 __DEFAULT_FN_ATTRS
_mm_set_ps(float __z, float __y, float __x, float __w)
{
- return (__m128){ __w, __x, __y, __z };
+ return __extension__ (__m128){ __w, __x, __y, __z };
}
-/// \brief Constructs a 128-bit floating-point vector of [4 x float],
+/// Constructs a 128-bit floating-point vector of [4 x float],
/// initialized in reverse order with the specified 32-bit single-precision
/// float-point values.
///
@@ -1865,10 +1899,10 @@ _mm_set_ps(float __z, float __y, float __x, float __w)
static __inline__ __m128 __DEFAULT_FN_ATTRS
_mm_setr_ps(float __z, float __y, float __x, float __w)
{
- return (__m128){ __z, __y, __x, __w };
+ return __extension__ (__m128){ __z, __y, __x, __w };
}
-/// \brief Constructs a 128-bit floating-point vector of [4 x float] initialized
+/// Constructs a 128-bit floating-point vector of [4 x float] initialized
/// to zero.
///
/// \headerfile <x86intrin.h>
@@ -1880,15 +1914,15 @@ _mm_setr_ps(float __z, float __y, float __x, float __w)
static __inline__ __m128 __DEFAULT_FN_ATTRS
_mm_setzero_ps(void)
{
- return (__m128){ 0, 0, 0, 0 };
+ return __extension__ (__m128){ 0, 0, 0, 0 };
}
-/// \brief Stores the upper 64 bits of a 128-bit vector of [4 x float] to a
+/// Stores the upper 64 bits of a 128-bit vector of [4 x float] to a
/// memory location.
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the <c> VPEXTRQ / MOVQ </c> instruction.
+/// This intrinsic corresponds to the <c> VPEXTRQ / PEXTRQ </c> instruction.
///
/// \param __p
/// A pointer to a 64-bit memory location.
@@ -1900,7 +1934,7 @@ _mm_storeh_pi(__m64 *__p, __m128 __a)
__builtin_ia32_storehps((__v2si *)__p, (__v4sf)__a);
}
-/// \brief Stores the lower 64 bits of a 128-bit vector of [4 x float] to a
+/// Stores the lower 64 bits of a 128-bit vector of [4 x float] to a
/// memory location.
///
/// \headerfile <x86intrin.h>
@@ -1917,7 +1951,7 @@ _mm_storel_pi(__m64 *__p, __m128 __a)
__builtin_ia32_storelps((__v2si *)__p, (__v4sf)__a);
}
-/// \brief Stores the lower 32 bits of a 128-bit vector of [4 x float] to a
+/// Stores the lower 32 bits of a 128-bit vector of [4 x float] to a
/// memory location.
///
/// \headerfile <x86intrin.h>
@@ -1937,7 +1971,7 @@ _mm_store_ss(float *__p, __m128 __a)
((struct __mm_store_ss_struct*)__p)->__u = __a[0];
}
-/// \brief Stores a 128-bit vector of [4 x float] to an unaligned memory
+/// Stores a 128-bit vector of [4 x float] to an unaligned memory
/// location.
///
/// \headerfile <x86intrin.h>
@@ -1958,7 +1992,7 @@ _mm_storeu_ps(float *__p, __m128 __a)
((struct __storeu_ps*)__p)->__v = __a;
}
-/// \brief Stores a 128-bit vector of [4 x float] into an aligned memory
+/// Stores a 128-bit vector of [4 x float] into an aligned memory
/// location.
///
/// \headerfile <x86intrin.h>
@@ -1976,7 +2010,7 @@ _mm_store_ps(float *__p, __m128 __a)
*(__m128*)__p = __a;
}
-/// \brief Stores the lower 32 bits of a 128-bit vector of [4 x float] into
+/// Stores the lower 32 bits of a 128-bit vector of [4 x float] into
/// four contiguous elements in an aligned memory location.
///
/// \headerfile <x86intrin.h>
@@ -1996,7 +2030,7 @@ _mm_store1_ps(float *__p, __m128 __a)
_mm_store_ps(__p, __a);
}
-/// \brief Stores the lower 32 bits of a 128-bit vector of [4 x float] into
+/// Stores the lower 32 bits of a 128-bit vector of [4 x float] into
/// four contiguous elements in an aligned memory location.
///
/// \headerfile <x86intrin.h>
@@ -2012,10 +2046,10 @@ _mm_store1_ps(float *__p, __m128 __a)
static __inline__ void __DEFAULT_FN_ATTRS
_mm_store_ps1(float *__p, __m128 __a)
{
- return _mm_store1_ps(__p, __a);
+ _mm_store1_ps(__p, __a);
}
-/// \brief Stores float values from a 128-bit vector of [4 x float] to an
+/// Stores float values from a 128-bit vector of [4 x float] to an
/// aligned memory location in reverse order.
///
/// \headerfile <x86intrin.h>
@@ -2046,7 +2080,7 @@ _mm_storer_ps(float *__p, __m128 __a)
/* FIXME: We have to #define this because "sel" must be a constant integer, and
Sema doesn't do any form of constant propagation yet. */
-/// \brief Loads one cache line of data from the specified address to a location
+/// Loads one cache line of data from the specified address to a location
/// closer to the processor.
///
/// \headerfile <x86intrin.h>
@@ -2074,7 +2108,7 @@ _mm_storer_ps(float *__p, __m128 __a)
((sel) >> 2) & 1, (sel) & 0x3))
#endif
-/// \brief Stores a 64-bit integer in the specified aligned memory location. To
+/// Stores a 64-bit integer in the specified aligned memory location. To
/// minimize caching, the data is flagged as non-temporal (unlikely to be
/// used again soon).
///
@@ -2086,13 +2120,13 @@ _mm_storer_ps(float *__p, __m128 __a)
/// A pointer to an aligned memory location used to store the register value.
/// \param __a
/// A 64-bit integer containing the value to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS_MMX
_mm_stream_pi(__m64 *__p, __m64 __a)
{
__builtin_ia32_movntq(__p, __a);
}
-/// \brief Moves packed float values from a 128-bit vector of [4 x float] to a
+/// Moves packed float values from a 128-bit vector of [4 x float] to a
/// 128-bit aligned memory location. To minimize caching, the data is flagged
/// as non-temporal (unlikely to be used again soon).
///
@@ -2115,7 +2149,7 @@ _mm_stream_ps(float *__p, __m128 __a)
extern "C" {
#endif
-/// \brief Forces strong memory ordering (serialization) between store
+/// Forces strong memory ordering (serialization) between store
/// instructions preceding this instruction and store instructions following
/// this instruction, ensuring the system completes all previous stores
/// before executing subsequent stores.
@@ -2130,7 +2164,7 @@ void _mm_sfence(void);
} // extern "C"
#endif
-/// \brief Extracts 16-bit element from a 64-bit vector of [4 x i16] and
+/// Extracts 16-bit element from a 64-bit vector of [4 x i16] and
/// returns it, as specified by the immediate integer operand.
///
/// \headerfile <x86intrin.h>
@@ -2150,10 +2184,10 @@ void _mm_sfence(void);
/// 2: Bits [47:32] are copied to the destination. \n
/// 3: Bits [63:48] are copied to the destination.
/// \returns A 16-bit integer containing the extracted 16 bits of packed data.
-#define _mm_extract_pi16(a, n) __extension__ ({ \
- (int)__builtin_ia32_vec_ext_v4hi((__m64)a, (int)n); })
+#define _mm_extract_pi16(a, n) \
+ (int)__builtin_ia32_vec_ext_v4hi((__m64)a, (int)n)
-/// \brief Copies data from the 64-bit vector of [4 x i16] to the destination,
+/// Copies data from the 64-bit vector of [4 x i16] to the destination,
/// and inserts the lower 16-bits of an integer operand at the 16-bit offset
/// specified by the immediate operand \a n.
///
@@ -2163,7 +2197,7 @@ void _mm_sfence(void);
/// __m64 _mm_insert_pi16(__m64 a, int d, int n);
/// \endcode
///
-/// This intrinsic corresponds to the <c> VPINSRW / PINSRW </c> instruction.
+/// This intrinsic corresponds to the <c> PINSRW </c> instruction.
///
/// \param a
/// A 64-bit vector of [4 x i16].
@@ -2181,10 +2215,10 @@ void _mm_sfence(void);
/// bits in operand \a a.
/// \returns A 64-bit integer vector containing the copied packed data from the
/// operands.
-#define _mm_insert_pi16(a, d, n) __extension__ ({ \
- (__m64)__builtin_ia32_vec_set_v4hi((__m64)a, (int)d, (int)n); })
+#define _mm_insert_pi16(a, d, n) \
+ (__m64)__builtin_ia32_vec_set_v4hi((__m64)a, (int)d, (int)n)
-/// \brief Compares each of the corresponding packed 16-bit integer values of
+/// Compares each of the corresponding packed 16-bit integer values of
/// the 64-bit integer vectors, and writes the greater value to the
/// corresponding bits in the destination.
///
@@ -2197,13 +2231,13 @@ void _mm_sfence(void);
/// \param __b
/// A 64-bit integer vector containing one of the source operands.
/// \returns A 64-bit integer vector containing the comparison results.
-static __inline__ __m64 __DEFAULT_FN_ATTRS
+static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
_mm_max_pi16(__m64 __a, __m64 __b)
{
return (__m64)__builtin_ia32_pmaxsw((__v4hi)__a, (__v4hi)__b);
}
-/// \brief Compares each of the corresponding packed 8-bit unsigned integer
+/// Compares each of the corresponding packed 8-bit unsigned integer
/// values of the 64-bit integer vectors, and writes the greater value to the
/// corresponding bits in the destination.
///
@@ -2216,13 +2250,13 @@ _mm_max_pi16(__m64 __a, __m64 __b)
/// \param __b
/// A 64-bit integer vector containing one of the source operands.
/// \returns A 64-bit integer vector containing the comparison results.
-static __inline__ __m64 __DEFAULT_FN_ATTRS
+static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
_mm_max_pu8(__m64 __a, __m64 __b)
{
return (__m64)__builtin_ia32_pmaxub((__v8qi)__a, (__v8qi)__b);
}
-/// \brief Compares each of the corresponding packed 16-bit integer values of
+/// Compares each of the corresponding packed 16-bit integer values of
/// the 64-bit integer vectors, and writes the lesser value to the
/// corresponding bits in the destination.
///
@@ -2235,13 +2269,13 @@ _mm_max_pu8(__m64 __a, __m64 __b)
/// \param __b
/// A 64-bit integer vector containing one of the source operands.
/// \returns A 64-bit integer vector containing the comparison results.
-static __inline__ __m64 __DEFAULT_FN_ATTRS
+static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
_mm_min_pi16(__m64 __a, __m64 __b)
{
return (__m64)__builtin_ia32_pminsw((__v4hi)__a, (__v4hi)__b);
}
-/// \brief Compares each of the corresponding packed 8-bit unsigned integer
+/// Compares each of the corresponding packed 8-bit unsigned integer
/// values of the 64-bit integer vectors, and writes the lesser value to the
/// corresponding bits in the destination.
///
@@ -2254,14 +2288,14 @@ _mm_min_pi16(__m64 __a, __m64 __b)
/// \param __b
/// A 64-bit integer vector containing one of the source operands.
/// \returns A 64-bit integer vector containing the comparison results.
-static __inline__ __m64 __DEFAULT_FN_ATTRS
+static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
_mm_min_pu8(__m64 __a, __m64 __b)
{
return (__m64)__builtin_ia32_pminub((__v8qi)__a, (__v8qi)__b);
}
-/// \brief Takes the most significant bit from each 8-bit element in a 64-bit
-/// integer vector to create a 16-bit mask value. Zero-extends the value to
+/// Takes the most significant bit from each 8-bit element in a 64-bit
+/// integer vector to create an 8-bit mask value. Zero-extends the value to
/// 32-bit integer and writes it to the destination.
///
/// \headerfile <x86intrin.h>
@@ -2270,15 +2304,15 @@ _mm_min_pu8(__m64 __a, __m64 __b)
///
/// \param __a
/// A 64-bit integer vector containing the values with bits to be extracted.
-/// \returns The most significant bit from each 8-bit element in the operand,
-/// written to bits [15:0].
-static __inline__ int __DEFAULT_FN_ATTRS
+/// \returns The most significant bit from each 8-bit element in \a __a,
+/// written to bits [7:0].
+static __inline__ int __DEFAULT_FN_ATTRS_MMX
_mm_movemask_pi8(__m64 __a)
{
return __builtin_ia32_pmovmskb((__v8qi)__a);
}
-/// \brief Multiplies packed 16-bit unsigned integer values and writes the
+/// Multiplies packed 16-bit unsigned integer values and writes the
/// high-order 16 bits of each 32-bit product to the corresponding bits in
/// the destination.
///
@@ -2291,13 +2325,13 @@ _mm_movemask_pi8(__m64 __a)
/// \param __b
/// A 64-bit integer vector containing one of the source operands.
/// \returns A 64-bit integer vector containing the products of both operands.
-static __inline__ __m64 __DEFAULT_FN_ATTRS
+static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
_mm_mulhi_pu16(__m64 __a, __m64 __b)
{
return (__m64)__builtin_ia32_pmulhuw((__v4hi)__a, (__v4hi)__b);
}
-/// \brief Shuffles the 4 16-bit integers from a 64-bit integer vector to the
+/// Shuffles the 4 16-bit integers from a 64-bit integer vector to the
/// destination, as specified by the immediate value operand.
///
/// \headerfile <x86intrin.h>
@@ -2328,10 +2362,10 @@ _mm_mulhi_pu16(__m64 __a, __m64 __b)
/// 10: assigned from bits [47:32] of \a a. \n
/// 11: assigned from bits [63:48] of \a a.
/// \returns A 64-bit integer vector containing the shuffled values.
-#define _mm_shuffle_pi16(a, n) __extension__ ({ \
- (__m64)__builtin_ia32_pshufw((__v4hi)(__m64)(a), (n)); })
+#define _mm_shuffle_pi16(a, n) \
+ (__m64)__builtin_ia32_pshufw((__v4hi)(__m64)(a), (n))
-/// \brief Conditionally copies the values from each 8-bit element in the first
+/// Conditionally copies the values from each 8-bit element in the first
/// 64-bit integer vector operand to the specified memory location, as
/// specified by the most significant bit in the corresponding element in the
/// second 64-bit integer vector operand.
@@ -2354,13 +2388,13 @@ _mm_mulhi_pu16(__m64 __a, __m64 __b)
/// A pointer to a 64-bit memory location that will receive the conditionally
/// copied integer values. The address of the memory location does not have
/// to be aligned.
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS_MMX
_mm_maskmove_si64(__m64 __d, __m64 __n, char *__p)
{
__builtin_ia32_maskmovq((__v8qi)__d, (__v8qi)__n, __p);
}
-/// \brief Computes the rounded averages of the packed unsigned 8-bit integer
+/// Computes the rounded averages of the packed unsigned 8-bit integer
/// values and writes the averages to the corresponding bits in the
/// destination.
///
@@ -2373,13 +2407,13 @@ _mm_maskmove_si64(__m64 __d, __m64 __n, char *__p)
/// \param __b
/// A 64-bit integer vector containing one of the source operands.
/// \returns A 64-bit integer vector containing the averages of both operands.
-static __inline__ __m64 __DEFAULT_FN_ATTRS
+static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
_mm_avg_pu8(__m64 __a, __m64 __b)
{
return (__m64)__builtin_ia32_pavgb((__v8qi)__a, (__v8qi)__b);
}
-/// \brief Computes the rounded averages of the packed unsigned 16-bit integer
+/// Computes the rounded averages of the packed unsigned 16-bit integer
/// values and writes the averages to the corresponding bits in the
/// destination.
///
@@ -2392,13 +2426,13 @@ _mm_avg_pu8(__m64 __a, __m64 __b)
/// \param __b
/// A 64-bit integer vector containing one of the source operands.
/// \returns A 64-bit integer vector containing the averages of both operands.
-static __inline__ __m64 __DEFAULT_FN_ATTRS
+static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
_mm_avg_pu16(__m64 __a, __m64 __b)
{
return (__m64)__builtin_ia32_pavgw((__v4hi)__a, (__v4hi)__b);
}
-/// \brief Subtracts the corresponding 8-bit unsigned integer values of the two
+/// Subtracts the corresponding 8-bit unsigned integer values of the two
/// 64-bit vector operands and computes the absolute value for each of the
/// difference. Then sum of the 8 absolute differences is written to the
/// bits [15:0] of the destination; the remaining bits [63:16] are cleared.
@@ -2414,7 +2448,7 @@ _mm_avg_pu16(__m64 __a, __m64 __b)
/// \returns A 64-bit integer vector whose lower 16 bits contain the sums of the
/// sets of absolute differences between both operands. The upper bits are
/// cleared.
-static __inline__ __m64 __DEFAULT_FN_ATTRS
+static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
_mm_sad_pu8(__m64 __a, __m64 __b)
{
return (__m64)__builtin_ia32_psadbw((__v8qi)__a, (__v8qi)__b);
@@ -2424,7 +2458,7 @@ _mm_sad_pu8(__m64 __a, __m64 __b)
extern "C" {
#endif
-/// \brief Returns the contents of the MXCSR register as a 32-bit unsigned
+/// Returns the contents of the MXCSR register as a 32-bit unsigned
/// integer value.
///
/// There are several groups of macros associated with this
@@ -2444,7 +2478,7 @@ extern "C" {
/// <li>
/// For checking rounding modes: _MM_ROUND_NEAREST, _MM_ROUND_DOWN,
/// _MM_ROUND_UP, _MM_ROUND_TOWARD_ZERO. There is a convenience wrapper
-/// _MM_GET_ROUNDING_MODE(x) where x is one of these macros.
+/// _MM_GET_ROUNDING_MODE().
/// </li>
/// <li>
/// For checking flush-to-zero mode: _MM_FLUSH_ZERO_ON, _MM_FLUSH_ZERO_OFF.
@@ -2457,12 +2491,16 @@ extern "C" {
/// </li>
/// </ul>
///
-/// For example, the expression below checks if an overflow exception has
+/// For example, the following expression checks if an overflow exception has
/// occurred:
+/// \code
/// ( _mm_getcsr() & _MM_EXCEPT_OVERFLOW )
+/// \endcode
///
-/// The following example gets the current rounding mode:
+/// The following expression gets the current rounding mode:
+/// \code
/// _MM_GET_ROUNDING_MODE()
+/// \endcode
///
/// \headerfile <x86intrin.h>
///
@@ -2472,7 +2510,7 @@ extern "C" {
/// register.
unsigned int _mm_getcsr(void);
-/// \brief Sets the MXCSR register with the 32-bit unsigned integer value.
+/// Sets the MXCSR register with the 32-bit unsigned integer value.
///
/// There are several groups of macros associated with this intrinsic,
/// including:
@@ -2511,10 +2549,12 @@ unsigned int _mm_getcsr(void);
/// _mm_setcsr(_mm_getcsr() | _MM_ROUND_UP)
///
/// The following example sets the DAZ and FTZ flags:
-/// void setFlags() {
-/// _MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON)
-/// _MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON)
-/// }
+/// \code
+/// void setFlags() {
+/// _MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON);
+/// _MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON);
+/// }
+/// \endcode
///
/// \headerfile <x86intrin.h>
///
@@ -2528,7 +2568,7 @@ void _mm_setcsr(unsigned int __i);
} // extern "C"
#endif
-/// \brief Selects 4 float values from the 128-bit operands of [4 x float], as
+/// Selects 4 float values from the 128-bit operands of [4 x float], as
/// specified by the immediate value operand.
///
/// \headerfile <x86intrin.h>
@@ -2564,14 +2604,11 @@ void _mm_setcsr(unsigned int __i);
/// 10: Bits [95:64] copied from the specified operand. \n
/// 11: Bits [127:96] copied from the specified operand.
/// \returns A 128-bit vector of [4 x float] containing the shuffled values.
-#define _mm_shuffle_ps(a, b, mask) __extension__ ({ \
- (__m128)__builtin_shufflevector((__v4sf)(__m128)(a), (__v4sf)(__m128)(b), \
- 0 + (((mask) >> 0) & 0x3), \
- 0 + (((mask) >> 2) & 0x3), \
- 4 + (((mask) >> 4) & 0x3), \
- 4 + (((mask) >> 6) & 0x3)); })
-
-/// \brief Unpacks the high-order (index 2,3) values from two 128-bit vectors of
+#define _mm_shuffle_ps(a, b, mask) \
+ (__m128)__builtin_ia32_shufps((__v4sf)(__m128)(a), (__v4sf)(__m128)(b), \
+ (int)(mask))
+
+/// Unpacks the high-order (index 2,3) values from two 128-bit vectors of
/// [4 x float] and interleaves them into a 128-bit vector of [4 x float].
///
/// \headerfile <x86intrin.h>
@@ -2593,7 +2630,7 @@ _mm_unpackhi_ps(__m128 __a, __m128 __b)
return __builtin_shufflevector((__v4sf)__a, (__v4sf)__b, 2, 6, 3, 7);
}
-/// \brief Unpacks the low-order (index 0,1) values from two 128-bit vectors of
+/// Unpacks the low-order (index 0,1) values from two 128-bit vectors of
/// [4 x float] and interleaves them into a 128-bit vector of [4 x float].
///
/// \headerfile <x86intrin.h>
@@ -2615,13 +2652,14 @@ _mm_unpacklo_ps(__m128 __a, __m128 __b)
return __builtin_shufflevector((__v4sf)__a, (__v4sf)__b, 0, 4, 1, 5);
}
-/// \brief Constructs a 128-bit floating-point vector of [4 x float]. The lower
+/// Constructs a 128-bit floating-point vector of [4 x float]. The lower
/// 32 bits are set to the lower 32 bits of the second parameter. The upper
/// 96 bits are set to the upper 96 bits of the first parameter.
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the <c> VMOVSS / MOVSS </c> instruction.
+/// This intrinsic corresponds to the <c> VBLENDPS / BLENDPS / MOVSS </c>
+/// instruction.
///
/// \param __a
/// A 128-bit floating-point vector of [4 x float]. The upper 96 bits are
@@ -2633,10 +2671,11 @@ _mm_unpacklo_ps(__m128 __a, __m128 __b)
static __inline__ __m128 __DEFAULT_FN_ATTRS
_mm_move_ss(__m128 __a, __m128 __b)
{
- return __builtin_shufflevector((__v4sf)__a, (__v4sf)__b, 4, 1, 2, 3);
+ __a[0] = __b[0];
+ return __a;
}
-/// \brief Constructs a 128-bit floating-point vector of [4 x float]. The lower
+/// Constructs a 128-bit floating-point vector of [4 x float]. The lower
/// 64 bits are set to the upper 64 bits of the second parameter. The upper
/// 64 bits are set to the upper 64 bits of the first parameter.
///
@@ -2657,7 +2696,7 @@ _mm_movehl_ps(__m128 __a, __m128 __b)
return __builtin_shufflevector((__v4sf)__a, (__v4sf)__b, 6, 7, 2, 3);
}
-/// \brief Constructs a 128-bit floating-point vector of [4 x float]. The lower
+/// Constructs a 128-bit floating-point vector of [4 x float]. The lower
/// 64 bits are set to the lower 64 bits of the first parameter. The upper
/// 64 bits are set to the lower 64 bits of the second parameter.
///
@@ -2678,7 +2717,7 @@ _mm_movelh_ps(__m128 __a, __m128 __b)
return __builtin_shufflevector((__v4sf)__a, (__v4sf)__b, 0, 1, 4, 5);
}
-/// \brief Converts a 64-bit vector of [4 x i16] into a 128-bit vector of [4 x
+/// Converts a 64-bit vector of [4 x i16] into a 128-bit vector of [4 x
/// float].
///
/// \headerfile <x86intrin.h>
@@ -2690,7 +2729,7 @@ _mm_movelh_ps(__m128 __a, __m128 __b)
/// from the corresponding elements in this operand.
/// \returns A 128-bit vector of [4 x float] containing the copied and converted
/// values from the operand.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS_MMX
_mm_cvtpi16_ps(__m64 __a)
{
__m64 __b, __c;
@@ -2708,7 +2747,7 @@ _mm_cvtpi16_ps(__m64 __a)
return __r;
}
-/// \brief Converts a 64-bit vector of 16-bit unsigned integer values into a
+/// Converts a 64-bit vector of 16-bit unsigned integer values into a
/// 128-bit vector of [4 x float].
///
/// \headerfile <x86intrin.h>
@@ -2720,7 +2759,7 @@ _mm_cvtpi16_ps(__m64 __a)
/// destination are copied from the corresponding elements in this operand.
/// \returns A 128-bit vector of [4 x float] containing the copied and converted
/// values from the operand.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS_MMX
_mm_cvtpu16_ps(__m64 __a)
{
__m64 __b, __c;
@@ -2737,7 +2776,7 @@ _mm_cvtpu16_ps(__m64 __a)
return __r;
}
-/// \brief Converts the lower four 8-bit values from a 64-bit vector of [8 x i8]
+/// Converts the lower four 8-bit values from a 64-bit vector of [8 x i8]
/// into a 128-bit vector of [4 x float].
///
/// \headerfile <x86intrin.h>
@@ -2749,7 +2788,7 @@ _mm_cvtpu16_ps(__m64 __a)
/// from the corresponding lower 4 elements in this operand.
/// \returns A 128-bit vector of [4 x float] containing the copied and converted
/// values from the operand.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS_MMX
_mm_cvtpi8_ps(__m64 __a)
{
__m64 __b;
@@ -2761,7 +2800,7 @@ _mm_cvtpi8_ps(__m64 __a)
return _mm_cvtpi16_ps(__b);
}
-/// \brief Converts the lower four unsigned 8-bit integer values from a 64-bit
+/// Converts the lower four unsigned 8-bit integer values from a 64-bit
/// vector of [8 x u8] into a 128-bit vector of [4 x float].
///
/// \headerfile <x86intrin.h>
@@ -2774,7 +2813,7 @@ _mm_cvtpi8_ps(__m64 __a)
/// operand.
/// \returns A 128-bit vector of [4 x float] containing the copied and converted
/// values from the source operand.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS_MMX
_mm_cvtpu8_ps(__m64 __a)
{
__m64 __b;
@@ -2785,7 +2824,7 @@ _mm_cvtpu8_ps(__m64 __a)
return _mm_cvtpi16_ps(__b);
}
-/// \brief Converts the two 32-bit signed integer values from each 64-bit vector
+/// Converts the two 32-bit signed integer values from each 64-bit vector
/// operand of [2 x i32] into a 128-bit vector of [4 x float].
///
/// \headerfile <x86intrin.h>
@@ -2801,7 +2840,7 @@ _mm_cvtpu8_ps(__m64 __a)
/// \returns A 128-bit vector of [4 x float] whose lower 64 bits contain the
/// copied and converted values from the first operand. The upper 64 bits
/// contain the copied and converted values from the second operand.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
+static __inline__ __m128 __DEFAULT_FN_ATTRS_MMX
_mm_cvtpi32x2_ps(__m64 __a, __m64 __b)
{
__m128 __c;
@@ -2813,7 +2852,7 @@ _mm_cvtpi32x2_ps(__m64 __a, __m64 __b)
return _mm_cvtpi32_ps(__c, __a);
}
-/// \brief Converts each single-precision floating-point element of a 128-bit
+/// Converts each single-precision floating-point element of a 128-bit
/// floating-point vector of [4 x float] into a 16-bit signed integer, and
/// packs the results into a 64-bit integer vector of [4 x i16].
///
@@ -2830,7 +2869,7 @@ _mm_cvtpi32x2_ps(__m64 __a, __m64 __b)
/// A 128-bit floating-point vector of [4 x float].
/// \returns A 64-bit integer vector of [4 x i16] containing the converted
/// values.
-static __inline__ __m64 __DEFAULT_FN_ATTRS
+static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
_mm_cvtps_pi16(__m128 __a)
{
__m64 __b, __c;
@@ -2842,7 +2881,7 @@ _mm_cvtps_pi16(__m128 __a)
return _mm_packs_pi32(__b, __c);
}
-/// \brief Converts each single-precision floating-point element of a 128-bit
+/// Converts each single-precision floating-point element of a 128-bit
/// floating-point vector of [4 x float] into an 8-bit signed integer, and
/// packs the results into the lower 32 bits of a 64-bit integer vector of
/// [8 x i8]. The upper 32 bits of the vector are set to 0.
@@ -2860,7 +2899,7 @@ _mm_cvtps_pi16(__m128 __a)
/// 128-bit floating-point vector of [4 x float].
/// \returns A 64-bit integer vector of [8 x i8]. The lower 32 bits contain the
/// converted values and the uppper 32 bits are set to zero.
-static __inline__ __m64 __DEFAULT_FN_ATTRS
+static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
_mm_cvtps_pi8(__m128 __a)
{
__m64 __b, __c;
@@ -2871,7 +2910,7 @@ _mm_cvtps_pi8(__m128 __a)
return _mm_packs_pi16(__b, __c);
}
-/// \brief Extracts the sign bits from each single-precision floating-point
+/// Extracts the sign bits from each single-precision floating-point
/// element of a 128-bit floating-point vector of [4 x float] and returns the
/// sign bits in bits [0:3] of the result. Bits [31:4] of the result are set
/// to zero.
@@ -2963,6 +3002,7 @@ do { \
#define _m_ _mm_
#undef __DEFAULT_FN_ATTRS
+#undef __DEFAULT_FN_ATTRS_MMX
/* Ugly hack for backwards-compatibility (compatible with gcc) */
#if defined(__SSE2__) && !__building_module(_Builtin_intrinsics)
diff --git a/lib/Headers/xopintrin.h b/lib/Headers/xopintrin.h
index 4a34f770d58d..9d540a2abdbe 100644
--- a/lib/Headers/xopintrin.h
+++ b/lib/Headers/xopintrin.h
@@ -31,7 +31,8 @@
#include <fma4intrin.h>
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("xop")))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("xop"), __min_vector_width__(128)))
+#define __DEFAULT_FN_ATTRS256 __attribute__((__always_inline__, __nodebug__, __target__("xop"), __min_vector_width__(256)))
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_maccs_epi16(__m128i __A, __m128i __B, __m128i __C)
@@ -201,7 +202,7 @@ _mm_cmov_si128(__m128i __A, __m128i __B, __m128i __C)
return (__m128i)(((__v2du)__A & (__v2du)__C) | ((__v2du)__B & ~(__v2du)__C));
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cmov_si256(__m256i __A, __m256i __B, __m256i __C)
{
return (__m256i)(((__v4du)__A & (__v4du)__C) | ((__v4du)__B & ~(__v4du)__C));
@@ -237,17 +238,17 @@ _mm_rot_epi64(__m128i __A, __m128i __B)
return (__m128i)__builtin_ia32_vprotq((__v2di)__A, (__v2di)__B);
}
-#define _mm_roti_epi8(A, N) __extension__ ({ \
- (__m128i)__builtin_ia32_vprotbi((__v16qi)(__m128i)(A), (N)); })
+#define _mm_roti_epi8(A, N) \
+ (__m128i)__builtin_ia32_vprotbi((__v16qi)(__m128i)(A), (N))
-#define _mm_roti_epi16(A, N) __extension__ ({ \
- (__m128i)__builtin_ia32_vprotwi((__v8hi)(__m128i)(A), (N)); })
+#define _mm_roti_epi16(A, N) \
+ (__m128i)__builtin_ia32_vprotwi((__v8hi)(__m128i)(A), (N))
-#define _mm_roti_epi32(A, N) __extension__ ({ \
- (__m128i)__builtin_ia32_vprotdi((__v4si)(__m128i)(A), (N)); })
+#define _mm_roti_epi32(A, N) \
+ (__m128i)__builtin_ia32_vprotdi((__v4si)(__m128i)(A), (N))
-#define _mm_roti_epi64(A, N) __extension__ ({ \
- (__m128i)__builtin_ia32_vprotqi((__v2di)(__m128i)(A), (N)); })
+#define _mm_roti_epi64(A, N) \
+ (__m128i)__builtin_ia32_vprotqi((__v2di)(__m128i)(A), (N))
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_shl_epi8(__m128i __A, __m128i __B)
@@ -297,37 +298,37 @@ _mm_sha_epi64(__m128i __A, __m128i __B)
return (__m128i)__builtin_ia32_vpshaq((__v2di)__A, (__v2di)__B);
}
-#define _mm_com_epu8(A, B, N) __extension__ ({ \
+#define _mm_com_epu8(A, B, N) \
(__m128i)__builtin_ia32_vpcomub((__v16qi)(__m128i)(A), \
- (__v16qi)(__m128i)(B), (N)); })
+ (__v16qi)(__m128i)(B), (N))
-#define _mm_com_epu16(A, B, N) __extension__ ({ \
+#define _mm_com_epu16(A, B, N) \
(__m128i)__builtin_ia32_vpcomuw((__v8hi)(__m128i)(A), \
- (__v8hi)(__m128i)(B), (N)); })
+ (__v8hi)(__m128i)(B), (N))
-#define _mm_com_epu32(A, B, N) __extension__ ({ \
+#define _mm_com_epu32(A, B, N) \
(__m128i)__builtin_ia32_vpcomud((__v4si)(__m128i)(A), \
- (__v4si)(__m128i)(B), (N)); })
+ (__v4si)(__m128i)(B), (N))
-#define _mm_com_epu64(A, B, N) __extension__ ({ \
+#define _mm_com_epu64(A, B, N) \
(__m128i)__builtin_ia32_vpcomuq((__v2di)(__m128i)(A), \
- (__v2di)(__m128i)(B), (N)); })
+ (__v2di)(__m128i)(B), (N))
-#define _mm_com_epi8(A, B, N) __extension__ ({ \
+#define _mm_com_epi8(A, B, N) \
(__m128i)__builtin_ia32_vpcomb((__v16qi)(__m128i)(A), \
- (__v16qi)(__m128i)(B), (N)); })
+ (__v16qi)(__m128i)(B), (N))
-#define _mm_com_epi16(A, B, N) __extension__ ({ \
+#define _mm_com_epi16(A, B, N) \
(__m128i)__builtin_ia32_vpcomw((__v8hi)(__m128i)(A), \
- (__v8hi)(__m128i)(B), (N)); })
+ (__v8hi)(__m128i)(B), (N))
-#define _mm_com_epi32(A, B, N) __extension__ ({ \
+#define _mm_com_epi32(A, B, N) \
(__m128i)__builtin_ia32_vpcomd((__v4si)(__m128i)(A), \
- (__v4si)(__m128i)(B), (N)); })
+ (__v4si)(__m128i)(B), (N))
-#define _mm_com_epi64(A, B, N) __extension__ ({ \
+#define _mm_com_epi64(A, B, N) \
(__m128i)__builtin_ia32_vpcomq((__v2di)(__m128i)(A), \
- (__v2di)(__m128i)(B), (N)); })
+ (__v2di)(__m128i)(B), (N))
#define _MM_PCOMCTRL_LT 0
#define _MM_PCOMCTRL_LE 1
@@ -722,24 +723,24 @@ _mm_comtrue_epi64(__m128i __A, __m128i __B)
return _mm_com_epi64(__A, __B, _MM_PCOMCTRL_TRUE);
}
-#define _mm_permute2_pd(X, Y, C, I) __extension__ ({ \
+#define _mm_permute2_pd(X, Y, C, I) \
(__m128d)__builtin_ia32_vpermil2pd((__v2df)(__m128d)(X), \
(__v2df)(__m128d)(Y), \
- (__v2di)(__m128i)(C), (I)); })
+ (__v2di)(__m128i)(C), (I))
-#define _mm256_permute2_pd(X, Y, C, I) __extension__ ({ \
+#define _mm256_permute2_pd(X, Y, C, I) \
(__m256d)__builtin_ia32_vpermil2pd256((__v4df)(__m256d)(X), \
(__v4df)(__m256d)(Y), \
- (__v4di)(__m256i)(C), (I)); })
+ (__v4di)(__m256i)(C), (I))
-#define _mm_permute2_ps(X, Y, C, I) __extension__ ({ \
+#define _mm_permute2_ps(X, Y, C, I) \
(__m128)__builtin_ia32_vpermil2ps((__v4sf)(__m128)(X), (__v4sf)(__m128)(Y), \
- (__v4si)(__m128i)(C), (I)); })
+ (__v4si)(__m128i)(C), (I))
-#define _mm256_permute2_ps(X, Y, C, I) __extension__ ({ \
+#define _mm256_permute2_ps(X, Y, C, I) \
(__m256)__builtin_ia32_vpermil2ps256((__v8sf)(__m256)(X), \
(__v8sf)(__m256)(Y), \
- (__v8si)(__m256i)(C), (I)); })
+ (__v8si)(__m256i)(C), (I))
static __inline__ __m128 __DEFAULT_FN_ATTRS
_mm_frcz_ss(__m128 __A)
@@ -765,18 +766,19 @@ _mm_frcz_pd(__m128d __A)
return (__m128d)__builtin_ia32_vfrczpd((__v2df)__A);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_frcz_ps(__m256 __A)
{
return (__m256)__builtin_ia32_vfrczps256((__v8sf)__A);
}
-static __inline__ __m256d __DEFAULT_FN_ATTRS
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_frcz_pd(__m256d __A)
{
return (__m256d)__builtin_ia32_vfrczpd256((__v4df)__A);
}
#undef __DEFAULT_FN_ATTRS
+#undef __DEFAULT_FN_ATTRS256
#endif /* __XOPINTRIN_H */
diff --git a/lib/Headers/xsavecintrin.h b/lib/Headers/xsavecintrin.h
index 598470a682e2..25577a95fc9a 100644
--- a/lib/Headers/xsavecintrin.h
+++ b/lib/Headers/xsavecintrin.h
@@ -1,4 +1,4 @@
-/*===---- xsavecintrin.h - XSAVEC intrinsic ------------------------------------===
+/*===---- xsavecintrin.h - XSAVEC intrinsic --------------------------------===
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
diff --git a/lib/Headers/xsaveintrin.h b/lib/Headers/xsaveintrin.h
index a2e6b2e742ff..16f3a78d3f5b 100644
--- a/lib/Headers/xsaveintrin.h
+++ b/lib/Headers/xsaveintrin.h
@@ -1,4 +1,4 @@
-/*===---- xsaveintrin.h - XSAVE intrinsic ------------------------------------===
+/*===---- xsaveintrin.h - XSAVE intrinsic ----------------------------------===
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
@@ -33,23 +33,23 @@
static __inline__ void __DEFAULT_FN_ATTRS
_xsave(void *__p, unsigned long long __m) {
- return __builtin_ia32_xsave(__p, __m);
+ __builtin_ia32_xsave(__p, __m);
}
static __inline__ void __DEFAULT_FN_ATTRS
_xrstor(void *__p, unsigned long long __m) {
- return __builtin_ia32_xrstor(__p, __m);
+ __builtin_ia32_xrstor(__p, __m);
}
#ifdef __x86_64__
static __inline__ void __DEFAULT_FN_ATTRS
_xsave64(void *__p, unsigned long long __m) {
- return __builtin_ia32_xsave64(__p, __m);
+ __builtin_ia32_xsave64(__p, __m);
}
static __inline__ void __DEFAULT_FN_ATTRS
_xrstor64(void *__p, unsigned long long __m) {
- return __builtin_ia32_xrstor64(__p, __m);
+ __builtin_ia32_xrstor64(__p, __m);
}
#endif
diff --git a/lib/Headers/xsaveoptintrin.h b/lib/Headers/xsaveoptintrin.h
index d3faae78be4f..792cf92d46e8 100644
--- a/lib/Headers/xsaveoptintrin.h
+++ b/lib/Headers/xsaveoptintrin.h
@@ -1,4 +1,4 @@
-/*===---- xsaveoptintrin.h - XSAVEOPT intrinsic ------------------------------------===
+/*===---- xsaveoptintrin.h - XSAVEOPT intrinsic ----------------------------===
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
@@ -33,13 +33,13 @@
static __inline__ void __DEFAULT_FN_ATTRS
_xsaveopt(void *__p, unsigned long long __m) {
- return __builtin_ia32_xsaveopt(__p, __m);
+ __builtin_ia32_xsaveopt(__p, __m);
}
#ifdef __x86_64__
static __inline__ void __DEFAULT_FN_ATTRS
_xsaveopt64(void *__p, unsigned long long __m) {
- return __builtin_ia32_xsaveopt64(__p, __m);
+ __builtin_ia32_xsaveopt64(__p, __m);
}
#endif
diff --git a/lib/Headers/xsavesintrin.h b/lib/Headers/xsavesintrin.h
index c5e540a86edb..fe2bc4b93b22 100644
--- a/lib/Headers/xsavesintrin.h
+++ b/lib/Headers/xsavesintrin.h
@@ -1,4 +1,4 @@
-/*===---- xsavesintrin.h - XSAVES intrinsic ------------------------------------===
+/*===---- xsavesintrin.h - XSAVES intrinsic --------------------------------===
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
diff --git a/lib/Headers/xtestintrin.h b/lib/Headers/xtestintrin.h
index 9d3378fd1eea..924424386b94 100644
--- a/lib/Headers/xtestintrin.h
+++ b/lib/Headers/xtestintrin.h
@@ -1,4 +1,4 @@
-/*===---- xtestintrin.h - XTEST intrinsic ---------------------------------===
+/*===---- xtestintrin.h - XTEST intrinsic ----------------------------------===
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
diff --git a/lib/Index/CMakeLists.txt b/lib/Index/CMakeLists.txt
index c9fbfafcf946..1362143fb0d4 100644
--- a/lib/Index/CMakeLists.txt
+++ b/lib/Index/CMakeLists.txt
@@ -23,6 +23,7 @@ add_clang_library(clangIndex
clangBasic
clangFormat
clangFrontend
+ clangLex
clangRewrite
clangSerialization
clangToolingCore
diff --git a/lib/Index/IndexDecl.cpp b/lib/Index/IndexDecl.cpp
index e14750e046eb..01ad3a277216 100644
--- a/lib/Index/IndexDecl.cpp
+++ b/lib/Index/IndexDecl.cpp
@@ -43,7 +43,7 @@ public:
return true;
}
- /// \brief Returns true if the given method has been defined explicitly by the
+ /// Returns true if the given method has been defined explicitly by the
/// user.
static bool hasUserDefined(const ObjCMethodDecl *D,
const ObjCImplDecl *Container) {
@@ -664,8 +664,11 @@ public:
bool VisitTemplateDecl(const TemplateDecl *D) {
- // Index the default values for the template parameters.
const NamedDecl *Parent = D->getTemplatedDecl();
+ if (!Parent)
+ return true;
+
+ // Index the default values for the template parameters.
if (D->getTemplateParameters() &&
shouldIndexTemplateParameterDefaultValue(Parent)) {
const TemplateParameterList *Params = D->getTemplateParameters();
@@ -684,7 +687,7 @@ public:
}
}
- return Visit(D->getTemplatedDecl());
+ return Visit(Parent);
}
bool VisitFriendDecl(const FriendDecl *D) {
@@ -723,7 +726,7 @@ bool IndexingContext::indexDecl(const Decl *D) {
if (D->isImplicit() && shouldIgnoreIfImplicit(D))
return true;
- if (isTemplateImplicitInstantiation(D))
+ if (isTemplateImplicitInstantiation(D) && !shouldIndexImplicitInstantiation())
return true;
IndexingDeclVisitor Visitor(*this);
diff --git a/lib/Index/IndexSymbol.cpp b/lib/Index/IndexSymbol.cpp
index 733d4dbc2f94..03b55ffe8a4e 100644
--- a/lib/Index/IndexSymbol.cpp
+++ b/lib/Index/IndexSymbol.cpp
@@ -12,6 +12,7 @@
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/PrettyPrinter.h"
+#include "clang/Lex/MacroInfo.h"
using namespace clang;
using namespace clang::index;
@@ -348,6 +349,15 @@ SymbolInfo index::getSymbolInfo(const Decl *D) {
return Info;
}
+SymbolInfo index::getSymbolInfoForMacro(const MacroInfo &) {
+ SymbolInfo Info;
+ Info.Kind = SymbolKind::Macro;
+ Info.SubKind = SymbolSubKind::None;
+ Info.Properties = SymbolPropertySet();
+ Info.Lang = SymbolLanguage::C;
+ return Info;
+}
+
bool index::applyForEachSymbolRoleInterruptible(SymbolRoleSet Roles,
llvm::function_ref<bool(SymbolRole)> Fn) {
#define APPLY_FOR_ROLE(Role) \
@@ -364,6 +374,7 @@ bool index::applyForEachSymbolRoleInterruptible(SymbolRoleSet Roles,
APPLY_FOR_ROLE(Dynamic);
APPLY_FOR_ROLE(AddressOf);
APPLY_FOR_ROLE(Implicit);
+ APPLY_FOR_ROLE(Undefinition);
APPLY_FOR_ROLE(RelationChildOf);
APPLY_FOR_ROLE(RelationBaseOf);
APPLY_FOR_ROLE(RelationOverrideOf);
@@ -405,6 +416,7 @@ void index::printSymbolRoles(SymbolRoleSet Roles, raw_ostream &OS) {
case SymbolRole::Dynamic: OS << "Dyn"; break;
case SymbolRole::AddressOf: OS << "Addr"; break;
case SymbolRole::Implicit: OS << "Impl"; break;
+ case SymbolRole::Undefinition: OS << "Undef"; break;
case SymbolRole::RelationChildOf: OS << "RelChild"; break;
case SymbolRole::RelationBaseOf: OS << "RelBase"; break;
case SymbolRole::RelationOverrideOf: OS << "RelOver"; break;
diff --git a/lib/Index/IndexTypeSourceInfo.cpp b/lib/Index/IndexTypeSourceInfo.cpp
index c8ff3d72d4be..7a7a156478f8 100644
--- a/lib/Index/IndexTypeSourceInfo.cpp
+++ b/lib/Index/IndexTypeSourceInfo.cpp
@@ -129,7 +129,7 @@ public:
template<typename TypeLocType>
bool HandleTemplateSpecializationTypeLoc(TypeLocType TL) {
if (const auto *T = TL.getTypePtr()) {
- if (IndexCtx.shouldIndexImplicitTemplateInsts()) {
+ if (IndexCtx.shouldIndexImplicitInstantiation()) {
if (CXXRecordDecl *RD = T->getAsCXXRecordDecl())
IndexCtx.handleReference(RD, TL.getTemplateNameLoc(),
Parent, ParentDC, SymbolRoleSet(), Relations);
diff --git a/lib/Index/IndexingAction.cpp b/lib/Index/IndexingAction.cpp
index 411657bf3dcd..16f6c21745ef 100644
--- a/lib/Index/IndexingAction.cpp
+++ b/lib/Index/IndexingAction.cpp
@@ -13,30 +13,32 @@
#include "clang/Frontend/FrontendAction.h"
#include "clang/Frontend/MultiplexConsumer.h"
#include "clang/Index/IndexDataConsumer.h"
+#include "clang/Lex/PPCallbacks.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Serialization/ASTReader.h"
+#include "llvm/ADT/STLExtras.h"
+#include <memory>
using namespace clang;
using namespace clang::index;
-void IndexDataConsumer::_anchor() {}
-
bool IndexDataConsumer::handleDeclOccurence(const Decl *D, SymbolRoleSet Roles,
ArrayRef<SymbolRelation> Relations,
- FileID FID, unsigned Offset,
+ SourceLocation Loc,
ASTNodeInfo ASTNode) {
return true;
}
bool IndexDataConsumer::handleMacroOccurence(const IdentifierInfo *Name,
- const MacroInfo *MI, SymbolRoleSet Roles,
- FileID FID, unsigned Offset) {
+ const MacroInfo *MI,
+ SymbolRoleSet Roles,
+ SourceLocation Loc) {
return true;
}
bool IndexDataConsumer::handleModuleOccurence(const ImportDecl *ImportD,
SymbolRoleSet Roles,
- FileID FID, unsigned Offset) {
+ SourceLocation Loc) {
return true;
}
@@ -44,21 +46,22 @@ namespace {
class IndexASTConsumer : public ASTConsumer {
std::shared_ptr<Preprocessor> PP;
- IndexingContext &IndexCtx;
+ std::shared_ptr<IndexingContext> IndexCtx;
public:
- IndexASTConsumer(std::shared_ptr<Preprocessor> PP, IndexingContext &IndexCtx)
- : PP(std::move(PP)), IndexCtx(IndexCtx) {}
+ IndexASTConsumer(std::shared_ptr<Preprocessor> PP,
+ std::shared_ptr<IndexingContext> IndexCtx)
+ : PP(std::move(PP)), IndexCtx(std::move(IndexCtx)) {}
protected:
void Initialize(ASTContext &Context) override {
- IndexCtx.setASTContext(Context);
- IndexCtx.getDataConsumer().initialize(Context);
- IndexCtx.getDataConsumer().setPreprocessor(PP);
+ IndexCtx->setASTContext(Context);
+ IndexCtx->getDataConsumer().initialize(Context);
+ IndexCtx->getDataConsumer().setPreprocessor(PP);
}
bool HandleTopLevelDecl(DeclGroupRef DG) override {
- return IndexCtx.indexDeclGroupRef(DG);
+ return IndexCtx->indexDeclGroupRef(DG);
}
void HandleInterestingDecl(DeclGroupRef DG) override {
@@ -66,22 +69,52 @@ protected:
}
void HandleTopLevelDeclInObjCContainer(DeclGroupRef DG) override {
- IndexCtx.indexDeclGroupRef(DG);
+ IndexCtx->indexDeclGroupRef(DG);
}
void HandleTranslationUnit(ASTContext &Ctx) override {
}
};
+class IndexPPCallbacks : public PPCallbacks {
+ std::shared_ptr<IndexingContext> IndexCtx;
+
+public:
+ IndexPPCallbacks(std::shared_ptr<IndexingContext> IndexCtx)
+ : IndexCtx(std::move(IndexCtx)) {}
+
+ void MacroExpands(const Token &MacroNameTok, const MacroDefinition &MD,
+ SourceRange Range, const MacroArgs *Args) override {
+ IndexCtx->handleMacroReference(*MacroNameTok.getIdentifierInfo(),
+ Range.getBegin(), *MD.getMacroInfo());
+ }
+
+ void MacroDefined(const Token &MacroNameTok,
+ const MacroDirective *MD) override {
+ IndexCtx->handleMacroDefined(*MacroNameTok.getIdentifierInfo(),
+ MacroNameTok.getLocation(),
+ *MD->getMacroInfo());
+ }
+
+ void MacroUndefined(const Token &MacroNameTok, const MacroDefinition &MD,
+ const MacroDirective *Undef) override {
+ if (!MD.getMacroInfo()) // Ignore noop #undef.
+ return;
+ IndexCtx->handleMacroUndefined(*MacroNameTok.getIdentifierInfo(),
+ MacroNameTok.getLocation(),
+ *MD.getMacroInfo());
+ }
+};
+
class IndexActionBase {
protected:
std::shared_ptr<IndexDataConsumer> DataConsumer;
- IndexingContext IndexCtx;
+ std::shared_ptr<IndexingContext> IndexCtx;
IndexActionBase(std::shared_ptr<IndexDataConsumer> dataConsumer,
IndexingOptions Opts)
- : DataConsumer(std::move(dataConsumer)),
- IndexCtx(Opts, *DataConsumer) {}
+ : DataConsumer(std::move(dataConsumer)),
+ IndexCtx(new IndexingContext(Opts, *DataConsumer)) {}
std::unique_ptr<IndexASTConsumer>
createIndexASTConsumer(CompilerInstance &CI) {
@@ -89,6 +122,10 @@ protected:
IndexCtx);
}
+ std::unique_ptr<PPCallbacks> createIndexPPCallbacks() {
+ return llvm::make_unique<IndexPPCallbacks>(IndexCtx);
+ }
+
void finish() {
DataConsumer->finish();
}
@@ -106,6 +143,11 @@ protected:
return createIndexASTConsumer(CI);
}
+ bool BeginSourceFileAction(clang::CompilerInstance &CI) override {
+ CI.getPreprocessor().addPPCallbacks(createIndexPPCallbacks());
+ return true;
+ }
+
void EndSourceFileAction() override {
FrontendAction::EndSourceFileAction();
finish();
@@ -124,32 +166,34 @@ public:
protected:
std::unique_ptr<ASTConsumer> CreateASTConsumer(CompilerInstance &CI,
- StringRef InFile) override;
- void EndSourceFileAction() override;
-};
-
-} // anonymous namespace
+ StringRef InFile) override {
+ auto OtherConsumer = WrapperFrontendAction::CreateASTConsumer(CI, InFile);
+ if (!OtherConsumer) {
+ IndexActionFailed = true;
+ return nullptr;
+ }
+
+ std::vector<std::unique_ptr<ASTConsumer>> Consumers;
+ Consumers.push_back(std::move(OtherConsumer));
+ Consumers.push_back(createIndexASTConsumer(CI));
+ return llvm::make_unique<MultiplexConsumer>(std::move(Consumers));
+ }
-void WrappingIndexAction::EndSourceFileAction() {
- // Invoke wrapped action's method.
- WrapperFrontendAction::EndSourceFileAction();
- if (!IndexActionFailed)
- finish();
-}
+ bool BeginSourceFileAction(clang::CompilerInstance &CI) override {
+ WrapperFrontendAction::BeginSourceFileAction(CI);
+ CI.getPreprocessor().addPPCallbacks(createIndexPPCallbacks());
+ return true;
+ }
-std::unique_ptr<ASTConsumer>
-WrappingIndexAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
- auto OtherConsumer = WrapperFrontendAction::CreateASTConsumer(CI, InFile);
- if (!OtherConsumer) {
- IndexActionFailed = true;
- return nullptr;
+ void EndSourceFileAction() override {
+ // Invoke wrapped action's method.
+ WrapperFrontendAction::EndSourceFileAction();
+ if (!IndexActionFailed)
+ finish();
}
+};
- std::vector<std::unique_ptr<ASTConsumer>> Consumers;
- Consumers.push_back(std::move(OtherConsumer));
- Consumers.push_back(createIndexASTConsumer(CI));
- return llvm::make_unique<MultiplexConsumer>(std::move(Consumers));
-}
+} // anonymous namespace
std::unique_ptr<FrontendAction>
index::createIndexingAction(std::shared_ptr<IndexDataConsumer> DataConsumer,
@@ -162,7 +206,6 @@ index::createIndexingAction(std::shared_ptr<IndexDataConsumer> DataConsumer,
return llvm::make_unique<IndexAction>(std::move(DataConsumer), Opts);
}
-
static bool topLevelDeclVisitor(void *context, const Decl *D) {
IndexingContext &IndexCtx = *static_cast<IndexingContext*>(context);
return IndexCtx.indexTopLevelDecl(D);
@@ -172,40 +215,44 @@ static void indexTranslationUnit(ASTUnit &Unit, IndexingContext &IndexCtx) {
Unit.visitLocalTopLevelDecls(&IndexCtx, topLevelDeclVisitor);
}
-void index::indexASTUnit(ASTUnit &Unit,
- std::shared_ptr<IndexDataConsumer> DataConsumer,
+void index::indexASTUnit(ASTUnit &Unit, IndexDataConsumer &DataConsumer,
IndexingOptions Opts) {
- IndexingContext IndexCtx(Opts, *DataConsumer);
+ IndexingContext IndexCtx(Opts, DataConsumer);
IndexCtx.setASTContext(Unit.getASTContext());
- DataConsumer->initialize(Unit.getASTContext());
- DataConsumer->setPreprocessor(Unit.getPreprocessorPtr());
+ DataConsumer.initialize(Unit.getASTContext());
+ DataConsumer.setPreprocessor(Unit.getPreprocessorPtr());
indexTranslationUnit(Unit, IndexCtx);
- DataConsumer->finish();
+ DataConsumer.finish();
}
void index::indexTopLevelDecls(ASTContext &Ctx, ArrayRef<const Decl *> Decls,
- std::shared_ptr<IndexDataConsumer> DataConsumer,
+ IndexDataConsumer &DataConsumer,
IndexingOptions Opts) {
- IndexingContext IndexCtx(Opts, *DataConsumer);
+ IndexingContext IndexCtx(Opts, DataConsumer);
IndexCtx.setASTContext(Ctx);
- DataConsumer->initialize(Ctx);
+ DataConsumer.initialize(Ctx);
for (const Decl *D : Decls)
IndexCtx.indexTopLevelDecl(D);
- DataConsumer->finish();
+ DataConsumer.finish();
+}
+
+std::unique_ptr<PPCallbacks>
+index::indexMacrosCallback(IndexDataConsumer &Consumer, IndexingOptions Opts) {
+ return llvm::make_unique<IndexPPCallbacks>(
+ std::make_shared<IndexingContext>(Opts, Consumer));
}
-void index::indexModuleFile(serialization::ModuleFile &Mod,
- ASTReader &Reader,
- std::shared_ptr<IndexDataConsumer> DataConsumer,
+void index::indexModuleFile(serialization::ModuleFile &Mod, ASTReader &Reader,
+ IndexDataConsumer &DataConsumer,
IndexingOptions Opts) {
ASTContext &Ctx = Reader.getContext();
- IndexingContext IndexCtx(Opts, *DataConsumer);
+ IndexingContext IndexCtx(Opts, DataConsumer);
IndexCtx.setASTContext(Ctx);
- DataConsumer->initialize(Ctx);
+ DataConsumer.initialize(Ctx);
for (const Decl *D : Reader.getModuleFileLevelDecls(Mod)) {
IndexCtx.indexTopLevelDecl(D);
}
- DataConsumer->finish();
+ DataConsumer.finish();
}
diff --git a/lib/Index/IndexingContext.cpp b/lib/Index/IndexingContext.cpp
index de9fe39df031..80d851b43d73 100644
--- a/lib/Index/IndexingContext.cpp
+++ b/lib/Index/IndexingContext.cpp
@@ -8,6 +8,7 @@
//===----------------------------------------------------------------------===//
#include "IndexingContext.h"
+#include "clang/Basic/SourceLocation.h"
#include "clang/Index/IndexDataConsumer.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclTemplate.h"
@@ -36,6 +37,10 @@ bool IndexingContext::shouldIndexFunctionLocalSymbols() const {
return IndexOpts.IndexFunctionLocals;
}
+bool IndexingContext::shouldIndexImplicitInstantiation() const {
+ return IndexOpts.IndexImplicitInstantiation;
+}
+
bool IndexingContext::handleDecl(const Decl *D,
SymbolRoleSet Roles,
ArrayRef<SymbolRelation> Relations) {
@@ -82,14 +87,9 @@ bool IndexingContext::importedModule(const ImportDecl *ImportD) {
Loc = IdLocs.front();
else
Loc = ImportD->getLocation();
- SourceManager &SM = Ctx->getSourceManager();
- Loc = SM.getFileLoc(Loc);
- if (Loc.isInvalid())
- return true;
- FileID FID;
- unsigned Offset;
- std::tie(FID, Offset) = SM.getDecomposedLoc(Loc);
+ SourceManager &SM = Ctx->getSourceManager();
+ FileID FID = SM.getFileID(SM.getFileLoc(Loc));
if (FID.isInvalid())
return true;
@@ -112,7 +112,7 @@ bool IndexingContext::importedModule(const ImportDecl *ImportD) {
if (ImportD->isImplicit())
Roles |= (unsigned)SymbolRole::Implicit;
- return DataConsumer.handleModuleOccurence(ImportD, Roles, FID, Offset);
+ return DataConsumer.handleModuleOccurence(ImportD, Roles, Loc);
}
bool IndexingContext::isTemplateImplicitInstantiation(const Decl *D) {
@@ -295,6 +295,7 @@ static bool shouldReportOccurrenceForSystemDeclOnlyMode(
case SymbolRole::Dynamic:
case SymbolRole::AddressOf:
case SymbolRole::Implicit:
+ case SymbolRole::Undefinition:
case SymbolRole::RelationReceivedBy:
case SymbolRole::RelationCalledBy:
case SymbolRole::RelationContainedBy:
@@ -327,13 +328,7 @@ bool IndexingContext::handleDeclOccurrence(const Decl *D, SourceLocation Loc,
return true;
SourceManager &SM = Ctx->getSourceManager();
- Loc = SM.getFileLoc(Loc);
- if (Loc.isInvalid())
- return true;
-
- FileID FID;
- unsigned Offset;
- std::tie(FID, Offset) = SM.getDecomposedLoc(Loc);
+ FileID FID = SM.getFileID(SM.getFileLoc(Loc));
if (FID.isInvalid())
return true;
@@ -355,6 +350,9 @@ bool IndexingContext::handleDeclOccurrence(const Decl *D, SourceLocation Loc,
}
}
+ if (!OrigD)
+ OrigD = D;
+
if (isTemplateImplicitInstantiation(D)) {
if (!IsRef)
return true;
@@ -364,9 +362,6 @@ bool IndexingContext::handleDeclOccurrence(const Decl *D, SourceLocation Loc,
assert(!isTemplateImplicitInstantiation(D));
}
- if (!OrigD)
- OrigD = D;
-
if (IsRef)
Roles |= (unsigned)SymbolRole::Reference;
else if (isDeclADefinition(OrigD, ContainerDC, *Ctx))
@@ -414,7 +409,27 @@ bool IndexingContext::handleDeclOccurrence(const Decl *D, SourceLocation Loc,
Rel.RelatedSymbol->getCanonicalDecl()));
}
- IndexDataConsumer::ASTNodeInfo Node{ OrigE, OrigD, Parent, ContainerDC };
- return DataConsumer.handleDeclOccurence(D, Roles, FinalRelations, FID, Offset,
- Node);
+ IndexDataConsumer::ASTNodeInfo Node{OrigE, OrigD, Parent, ContainerDC};
+ return DataConsumer.handleDeclOccurence(D, Roles, FinalRelations, Loc, Node);
+}
+
+void IndexingContext::handleMacroDefined(const IdentifierInfo &Name,
+ SourceLocation Loc,
+ const MacroInfo &MI) {
+ SymbolRoleSet Roles = (unsigned)SymbolRole::Definition;
+ DataConsumer.handleMacroOccurence(&Name, &MI, Roles, Loc);
+}
+
+void IndexingContext::handleMacroUndefined(const IdentifierInfo &Name,
+ SourceLocation Loc,
+ const MacroInfo &MI) {
+ SymbolRoleSet Roles = (unsigned)SymbolRole::Undefinition;
+ DataConsumer.handleMacroOccurence(&Name, &MI, Roles, Loc);
+}
+
+void IndexingContext::handleMacroReference(const IdentifierInfo &Name,
+ SourceLocation Loc,
+ const MacroInfo &MI) {
+ SymbolRoleSet Roles = (unsigned)SymbolRole::Reference;
+ DataConsumer.handleMacroOccurence(&Name, &MI, Roles, Loc);
}
diff --git a/lib/Index/IndexingContext.h b/lib/Index/IndexingContext.h
index 566651c83a75..04960086d092 100644
--- a/lib/Index/IndexingContext.h
+++ b/lib/Index/IndexingContext.h
@@ -10,9 +10,11 @@
#ifndef LLVM_CLANG_LIB_INDEX_INDEXINGCONTEXT_H
#define LLVM_CLANG_LIB_INDEX_INDEXINGCONTEXT_H
+#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LLVM.h"
#include "clang/Index/IndexSymbol.h"
#include "clang/Index/IndexingAction.h"
+#include "clang/Lex/MacroInfo.h"
#include "llvm/ADT/ArrayRef.h"
namespace clang {
@@ -58,9 +60,7 @@ public:
bool shouldIndexFunctionLocalSymbols() const;
- bool shouldIndexImplicitTemplateInsts() const {
- return false;
- }
+ bool shouldIndexImplicitInstantiation() const;
static bool isTemplateImplicitInstantiation(const Decl *D);
@@ -80,6 +80,15 @@ public:
const Expr *RefE = nullptr,
const Decl *RefD = nullptr);
+ void handleMacroDefined(const IdentifierInfo &Name, SourceLocation Loc,
+ const MacroInfo &MI);
+
+ void handleMacroUndefined(const IdentifierInfo &Name, SourceLocation Loc,
+ const MacroInfo &MI);
+
+ void handleMacroReference(const IdentifierInfo &Name, SourceLocation Loc,
+ const MacroInfo &MD);
+
bool importedModule(const ImportDecl *ImportD);
bool indexDecl(const Decl *D);
diff --git a/lib/Index/SimpleFormatContext.h b/lib/Index/SimpleFormatContext.h
index 2c26e4d82e08..9c6d29bec329 100644
--- a/lib/Index/SimpleFormatContext.h
+++ b/lib/Index/SimpleFormatContext.h
@@ -9,7 +9,7 @@
//
/// \file
///
-/// \brief Defines a utility class for use of clang-format in libclang
+/// Defines a utility class for use of clang-format in libclang
//
//===----------------------------------------------------------------------===//
@@ -29,7 +29,7 @@
namespace clang {
namespace index {
-/// \brief A small class to be used by libclang clients to format
+/// A small class to be used by libclang clients to format
/// a declaration string in memory. This object is instantiated once
/// and used each time a formatting is needed.
class SimpleFormatContext {
diff --git a/lib/Index/USRGeneration.cpp b/lib/Index/USRGeneration.cpp
index 3a06554b256c..e69fa749b45f 100644
--- a/lib/Index/USRGeneration.cpp
+++ b/lib/Index/USRGeneration.cpp
@@ -103,7 +103,7 @@ public:
void VisitUnresolvedUsingTypenameDecl(const UnresolvedUsingTypenameDecl *D);
void VisitLinkageSpecDecl(const LinkageSpecDecl *D) {
- IgnoreResults = true;
+ IgnoreResults = true; // No USRs for linkage specs themselves.
}
void VisitUsingDirectiveDecl(const UsingDirectiveDecl *D) {
@@ -192,6 +192,8 @@ bool USRGenerator::ShouldGenerateLocation(const NamedDecl *D) {
void USRGenerator::VisitDeclContext(const DeclContext *DC) {
if (const NamedDecl *D = dyn_cast<NamedDecl>(DC))
Visit(D);
+ else if (isa<LinkageSpecDecl>(DC)) // Linkage specs are transparent in USRs.
+ VisitDeclContext(DC->getParent());
}
void USRGenerator::VisitFieldDecl(const FieldDecl *D) {
@@ -648,6 +650,8 @@ void USRGenerator::VisitType(QualType T) {
c = 'b'; break;
case BuiltinType::UChar:
c = 'c'; break;
+ case BuiltinType::Char8:
+ c = 'u'; break; // FIXME: Check this doesn't collide
case BuiltinType::Char16:
c = 'q'; break;
case BuiltinType::Char32:
@@ -705,6 +709,30 @@ void USRGenerator::VisitType(QualType T) {
case BuiltinType::OCLQueue:
case BuiltinType::OCLReserveID:
case BuiltinType::OCLSampler:
+ case BuiltinType::ShortAccum:
+ case BuiltinType::Accum:
+ case BuiltinType::LongAccum:
+ case BuiltinType::UShortAccum:
+ case BuiltinType::UAccum:
+ case BuiltinType::ULongAccum:
+ case BuiltinType::ShortFract:
+ case BuiltinType::Fract:
+ case BuiltinType::LongFract:
+ case BuiltinType::UShortFract:
+ case BuiltinType::UFract:
+ case BuiltinType::ULongFract:
+ case BuiltinType::SatShortAccum:
+ case BuiltinType::SatAccum:
+ case BuiltinType::SatLongAccum:
+ case BuiltinType::SatUShortAccum:
+ case BuiltinType::SatUAccum:
+ case BuiltinType::SatULongAccum:
+ case BuiltinType::SatShortFract:
+ case BuiltinType::SatFract:
+ case BuiltinType::SatLongFract:
+ case BuiltinType::SatUShortFract:
+ case BuiltinType::SatUFract:
+ case BuiltinType::SatULongFract:
IgnoreResults = true;
return;
case BuiltinType::ObjCId:
diff --git a/lib/Lex/HeaderSearch.cpp b/lib/Lex/HeaderSearch.cpp
index 6976294a2eaf..b1a2ef121288 100644
--- a/lib/Lex/HeaderSearch.cpp
+++ b/lib/Lex/HeaderSearch.cpp
@@ -124,7 +124,7 @@ const HeaderMap *HeaderSearch::CreateHeaderMap(const FileEntry *FE) {
return nullptr;
}
-/// \brief Get filenames for all registered header maps.
+/// Get filenames for all registered header maps.
void HeaderSearch::getHeaderMapFileNames(
SmallVectorImpl<std::string> &Names) const {
for (auto &HM : HeaderMaps)
@@ -198,31 +198,33 @@ std::string HeaderSearch::getCachedModuleFileName(StringRef ModuleName,
return Result.str().str();
}
-Module *HeaderSearch::lookupModule(StringRef ModuleName, bool AllowSearch) {
+Module *HeaderSearch::lookupModule(StringRef ModuleName, bool AllowSearch,
+ bool AllowExtraModuleMapSearch) {
// Look in the module map to determine if there is a module by this name.
Module *Module = ModMap.findModule(ModuleName);
if (Module || !AllowSearch || !HSOpts->ImplicitModuleMaps)
return Module;
StringRef SearchName = ModuleName;
- Module = lookupModule(ModuleName, SearchName);
+ Module = lookupModule(ModuleName, SearchName, AllowExtraModuleMapSearch);
// The facility for "private modules" -- adjacent, optional module maps named
// module.private.modulemap that are supposed to define private submodules --
// may have different flavors of names: FooPrivate, Foo_Private and Foo.Private.
//
- // Foo.Private is now depracated in favor of Foo_Private. Users of FooPrivate
+ // Foo.Private is now deprecated in favor of Foo_Private. Users of FooPrivate
// should also rename to Foo_Private. Representing private as submodules
// could force building unwanted dependencies into the parent module and cause
// dependency cycles.
if (!Module && SearchName.consume_back("_Private"))
- Module = lookupModule(ModuleName, SearchName);
+ Module = lookupModule(ModuleName, SearchName, AllowExtraModuleMapSearch);
if (!Module && SearchName.consume_back("Private"))
- Module = lookupModule(ModuleName, SearchName);
+ Module = lookupModule(ModuleName, SearchName, AllowExtraModuleMapSearch);
return Module;
}
-Module *HeaderSearch::lookupModule(StringRef ModuleName, StringRef SearchName) {
+Module *HeaderSearch::lookupModule(StringRef ModuleName, StringRef SearchName,
+ bool AllowExtraModuleMapSearch) {
Module *Module = nullptr;
// Look through the various header search paths to load any available module
@@ -281,8 +283,9 @@ Module *HeaderSearch::lookupModule(StringRef ModuleName, StringRef SearchName) {
continue;
// Load all module maps in the immediate subdirectories of this search
- // directory.
- loadSubdirectoryModuleMaps(SearchDirs[Idx]);
+ // directory if ModuleName was from @import.
+ if (AllowExtraModuleMapSearch)
+ loadSubdirectoryModuleMaps(SearchDirs[Idx]);
// Look again for the module.
Module = ModMap.findModule(ModuleName);
@@ -404,7 +407,7 @@ const FileEntry *DirectoryLookup::LookupFile(
return Result;
}
-/// \brief Given a framework directory, find the top-most framework directory.
+/// Given a framework directory, find the top-most framework directory.
///
/// \param FileMgr The file manager to use for directory lookups.
/// \param DirName The name of the framework directory.
@@ -600,7 +603,7 @@ void HeaderSearch::setTarget(const TargetInfo &Target) {
// Header File Location.
//===----------------------------------------------------------------------===//
-/// \brief Return true with a diagnostic if the file that MSVC would have found
+/// Return true with a diagnostic if the file that MSVC would have found
/// fails to match the one that Clang would have found with MSVC header search
/// disabled.
static bool checkMSVCHeaderSearch(DiagnosticsEngine &Diags,
@@ -621,6 +624,74 @@ static const char *copyString(StringRef Str, llvm::BumpPtrAllocator &Alloc) {
return CopyStr;
}
+static bool isFrameworkStylePath(StringRef Path, bool &IsPrivateHeader,
+ SmallVectorImpl<char> &FrameworkName) {
+ using namespace llvm::sys;
+ path::const_iterator I = path::begin(Path);
+ path::const_iterator E = path::end(Path);
+ IsPrivateHeader = false;
+
+ // Detect different types of framework style paths:
+ //
+ // ...Foo.framework/{Headers,PrivateHeaders}
+ // ...Foo.framework/Versions/{A,Current}/{Headers,PrivateHeaders}
+ // ...Foo.framework/Frameworks/Nested.framework/{Headers,PrivateHeaders}
+ // ...<other variations with 'Versions' like in the above path>
+ //
+ // and some other variations among these lines.
+ int FoundComp = 0;
+ while (I != E) {
+ if (*I == "Headers")
+ ++FoundComp;
+ if (I->endswith(".framework")) {
+ FrameworkName.append(I->begin(), I->end());
+ ++FoundComp;
+ }
+ if (*I == "PrivateHeaders") {
+ ++FoundComp;
+ IsPrivateHeader = true;
+ }
+ ++I;
+ }
+
+ return FoundComp >= 2;
+}
+
+static void
+diagnoseFrameworkInclude(DiagnosticsEngine &Diags, SourceLocation IncludeLoc,
+ StringRef Includer, StringRef IncludeFilename,
+ const FileEntry *IncludeFE, bool isAngled = false,
+ bool FoundByHeaderMap = false) {
+ bool IsIncluderPrivateHeader = false;
+ SmallString<128> FromFramework, ToFramework;
+ if (!isFrameworkStylePath(Includer, IsIncluderPrivateHeader, FromFramework))
+ return;
+ bool IsIncludeePrivateHeader = false;
+ bool IsIncludeeInFramework = isFrameworkStylePath(
+ IncludeFE->getName(), IsIncludeePrivateHeader, ToFramework);
+
+ if (!isAngled && !FoundByHeaderMap) {
+ SmallString<128> NewInclude("<");
+ if (IsIncludeeInFramework) {
+ NewInclude += StringRef(ToFramework).drop_back(10); // drop .framework
+ NewInclude += "/";
+ }
+ NewInclude += IncludeFilename;
+ NewInclude += ">";
+ Diags.Report(IncludeLoc, diag::warn_quoted_include_in_framework_header)
+ << IncludeFilename
+ << FixItHint::CreateReplacement(IncludeLoc, NewInclude);
+ }
+
+ // Headers in Foo.framework/Headers should not include headers
+ // from Foo.framework/PrivateHeaders, since this violates public/private
+ // API boundaries and can cause modular dependency cycles.
+ if (!IsIncluderPrivateHeader && IsIncludeeInFramework &&
+ IsIncludeePrivateHeader && FromFramework == ToFramework)
+ Diags.Report(IncludeLoc, diag::warn_framework_include_private_from_public)
+ << IncludeFilename;
+}
+
/// LookupFile - Given a "foo" or \<foo> reference, look up the indicated file,
/// return null on failure. isAngled indicates whether the file reference is
/// for system \#include's or not (i.e. using <> instead of ""). Includers, if
@@ -722,8 +793,12 @@ const FileEntry *HeaderSearch::LookupFile(
RelativePath->clear();
RelativePath->append(Filename.begin(), Filename.end());
}
- if (First)
+ if (First) {
+ diagnoseFrameworkInclude(Diags, IncludeLoc,
+ IncluderAndDir.second->getName(), Filename,
+ FE);
return FE;
+ }
// Otherwise, we found the path via MSVC header search rules. If
// -Wmsvc-include is enabled, we have to keep searching to see if we
@@ -834,6 +909,12 @@ const FileEntry *HeaderSearch::LookupFile(
return MSFE;
}
+ bool FoundByHeaderMap = !IsMapped ? false : *IsMapped;
+ if (!Includers.empty())
+ diagnoseFrameworkInclude(Diags, IncludeLoc,
+ Includers.front().second->getName(), Filename,
+ FE, isAngled, FoundByHeaderMap);
+
// Remember this location for the next lookup we do.
CacheLookup.HitIdx = i;
return FE;
@@ -996,7 +1077,7 @@ LookupSubframeworkHeader(StringRef Filename,
// File Info Management.
//===----------------------------------------------------------------------===//
-/// \brief Merge the header file info provided by \p OtherHFI into the current
+/// Merge the header file info provided by \p OtherHFI into the current
/// header file info (\p HFI)
static void mergeHeaderFileInfo(HeaderFileInfo &HFI,
const HeaderFileInfo &OtherHFI) {
@@ -1580,9 +1661,15 @@ void HeaderSearch::loadSubdirectoryModuleMaps(DirectoryLookup &SearchDir) {
std::string HeaderSearch::suggestPathToFileForDiagnostics(const FileEntry *File,
bool *IsSystem) {
// FIXME: We assume that the path name currently cached in the FileEntry is
- // the most appropriate one for this analysis (and that it's spelled the same
- // way as the corresponding header search path).
- StringRef Name = File->getName();
+ // the most appropriate one for this analysis (and that it's spelled the
+ // same way as the corresponding header search path).
+ return suggestPathToFileForDiagnostics(File->getName(), /*BuildDir=*/"",
+ IsSystem);
+}
+
+std::string HeaderSearch::suggestPathToFileForDiagnostics(
+ llvm::StringRef File, llvm::StringRef WorkingDir, bool *IsSystem) {
+ using namespace llvm::sys;
unsigned BestPrefixLength = 0;
unsigned BestSearchDir;
@@ -1593,12 +1680,17 @@ std::string HeaderSearch::suggestPathToFileForDiagnostics(const FileEntry *File,
continue;
StringRef Dir = SearchDirs[I].getDir()->getName();
- for (auto NI = llvm::sys::path::begin(Name),
- NE = llvm::sys::path::end(Name),
- DI = llvm::sys::path::begin(Dir),
- DE = llvm::sys::path::end(Dir);
+ llvm::SmallString<32> DirPath(Dir.begin(), Dir.end());
+ if (!WorkingDir.empty() && !path::is_absolute(Dir)) {
+ auto err = fs::make_absolute(WorkingDir, DirPath);
+ if (!err)
+ path::remove_dots(DirPath, /*remove_dot_dot=*/true);
+ Dir = DirPath;
+ }
+ for (auto NI = path::begin(File), NE = path::end(File),
+ DI = path::begin(Dir), DE = path::end(Dir);
/*termination condition in loop*/; ++NI, ++DI) {
- // '.' components in Name are ignored.
+ // '.' components in File are ignored.
while (NI != NE && *NI == ".")
++NI;
if (NI == NE)
@@ -1608,9 +1700,9 @@ std::string HeaderSearch::suggestPathToFileForDiagnostics(const FileEntry *File,
while (DI != DE && *DI == ".")
++DI;
if (DI == DE) {
- // Dir is a prefix of Name, up to '.' components and choice of path
+ // Dir is a prefix of File, up to '.' components and choice of path
// separators.
- unsigned PrefixLength = NI - llvm::sys::path::begin(Name);
+ unsigned PrefixLength = NI - path::begin(File);
if (PrefixLength > BestPrefixLength) {
BestPrefixLength = PrefixLength;
BestSearchDir = I;
@@ -1625,5 +1717,5 @@ std::string HeaderSearch::suggestPathToFileForDiagnostics(const FileEntry *File,
if (IsSystem)
*IsSystem = BestPrefixLength ? BestSearchDir >= SystemDirIdx : false;
- return Name.drop_front(BestPrefixLength);
+ return File.drop_front(BestPrefixLength);
}
diff --git a/lib/Lex/Lexer.cpp b/lib/Lex/Lexer.cpp
index 830354ab23f0..e8588a771a43 100644
--- a/lib/Lex/Lexer.cpp
+++ b/lib/Lex/Lexer.cpp
@@ -257,7 +257,7 @@ void Lexer::Stringify(SmallVectorImpl<char> &Str) { StringifyImpl(Str, '"'); }
// Token Spelling
//===----------------------------------------------------------------------===//
-/// \brief Slow case of getSpelling. Extract the characters comprising the
+/// Slow case of getSpelling. Extract the characters comprising the
/// spelling of this token from the provided input buffer.
static size_t getSpellingSlow(const Token &Tok, const char *BufPtr,
const LangOptions &LangOpts, char *Spelling) {
@@ -442,7 +442,7 @@ unsigned Lexer::MeasureTokenLength(SourceLocation Loc,
return TheTok.getLength();
}
-/// \brief Relex the token at the specified location.
+/// Relex the token at the specified location.
/// \returns true if there was a failure, false on success.
bool Lexer::getRawToken(SourceLocation Loc, Token &Result,
const SourceManager &SM,
@@ -708,12 +708,9 @@ PreambleBounds Lexer::ComputePreamble(StringRef Buffer,
TheTok.isAtStartOfLine());
}
-/// AdvanceToTokenCharacter - Given a location that specifies the start of a
-/// token, return a new location that specifies a character within the token.
-SourceLocation Lexer::AdvanceToTokenCharacter(SourceLocation TokStart,
- unsigned CharNo,
- const SourceManager &SM,
- const LangOptions &LangOpts) {
+unsigned Lexer::getTokenPrefixLength(SourceLocation TokStart, unsigned CharNo,
+ const SourceManager &SM,
+ const LangOptions &LangOpts) {
// Figure out how many physical characters away the specified expansion
// character is. This needs to take into consideration newlines and
// trigraphs.
@@ -722,7 +719,7 @@ SourceLocation Lexer::AdvanceToTokenCharacter(SourceLocation TokStart,
// If they request the first char of the token, we're trivially done.
if (Invalid || (CharNo == 0 && Lexer::isObviouslySimpleCharacter(*TokPtr)))
- return TokStart;
+ return 0;
unsigned PhysOffset = 0;
@@ -731,7 +728,7 @@ SourceLocation Lexer::AdvanceToTokenCharacter(SourceLocation TokStart,
// chars, this method is extremely fast.
while (Lexer::isObviouslySimpleCharacter(*TokPtr)) {
if (CharNo == 0)
- return TokStart.getLocWithOffset(PhysOffset);
+ return PhysOffset;
++TokPtr;
--CharNo;
++PhysOffset;
@@ -753,10 +750,10 @@ SourceLocation Lexer::AdvanceToTokenCharacter(SourceLocation TokStart,
if (!Lexer::isObviouslySimpleCharacter(*TokPtr))
PhysOffset += Lexer::SkipEscapedNewLines(TokPtr)-TokPtr;
- return TokStart.getLocWithOffset(PhysOffset);
+ return PhysOffset;
}
-/// \brief Computes the source location just past the end of the
+/// Computes the source location just past the end of the
/// token at this source location.
///
/// This routine can be used to produce a source location that
@@ -791,7 +788,7 @@ SourceLocation Lexer::getLocForEndOfToken(SourceLocation Loc, unsigned Offset,
return Loc.getLocWithOffset(Len);
}
-/// \brief Returns true if the given MacroID location points at the first
+/// Returns true if the given MacroID location points at the first
/// token of the macro expansion.
bool Lexer::isAtStartOfMacroExpansion(SourceLocation loc,
const SourceManager &SM,
@@ -813,7 +810,7 @@ bool Lexer::isAtStartOfMacroExpansion(SourceLocation loc,
return isAtStartOfMacroExpansion(expansionLoc, SM, LangOpts, MacroBegin);
}
-/// \brief Returns true if the given MacroID location points at the last
+/// Returns true if the given MacroID location points at the last
/// token of the macro expansion.
bool Lexer::isAtEndOfMacroExpansion(SourceLocation loc,
const SourceManager &SM,
@@ -971,7 +968,7 @@ StringRef Lexer::getSourceText(CharSourceRange Range,
StringRef Lexer::getImmediateMacroName(SourceLocation Loc,
const SourceManager &SM,
const LangOptions &LangOpts) {
- assert(Loc.isMacroID() && "Only reasonble to call this on macros");
+ assert(Loc.isMacroID() && "Only reasonable to call this on macros");
// Find the location of the immediate macro expansion.
while (true) {
@@ -987,7 +984,7 @@ StringRef Lexer::getImmediateMacroName(SourceLocation Loc,
// Loc points to the argument id of the macro definition, move to the
// macro expansion.
- Loc = SM.getImmediateExpansionRange(Loc).first;
+ Loc = SM.getImmediateExpansionRange(Loc).getBegin();
SourceLocation SpellLoc = Expansion.getSpellingLoc();
if (SpellLoc.isFileID())
break; // No inner macro.
@@ -1017,10 +1014,10 @@ StringRef Lexer::getImmediateMacroName(SourceLocation Loc,
StringRef Lexer::getImmediateMacroNameForDiagnostics(
SourceLocation Loc, const SourceManager &SM, const LangOptions &LangOpts) {
- assert(Loc.isMacroID() && "Only reasonble to call this on macros");
+ assert(Loc.isMacroID() && "Only reasonable to call this on macros");
// Walk past macro argument expanions.
while (SM.isMacroArgExpansion(Loc))
- Loc = SM.getImmediateExpansionRange(Loc).first;
+ Loc = SM.getImmediateExpansionRange(Loc).getBegin();
// If the macro's spelling has no FileID, then it's actually a token paste
// or stringization (or similar) and not a macro at all.
@@ -1030,7 +1027,7 @@ StringRef Lexer::getImmediateMacroNameForDiagnostics(
// Find the spelling location of the start of the non-argument expansion
// range. This is where the macro name was spelled in order to begin
// expanding this macro.
- Loc = SM.getSpellingLoc(SM.getImmediateExpansionRange(Loc).first);
+ Loc = SM.getSpellingLoc(SM.getImmediateExpansionRange(Loc).getBegin());
// Dig out the buffer where the macro name was spelled and the extents of the
// name so that we can render it into the expansion note.
@@ -1112,10 +1109,9 @@ static SourceLocation GetMappedTokenLoc(Preprocessor &PP,
// Figure out the expansion loc range, which is the range covered by the
// original _Pragma(...) sequence.
- std::pair<SourceLocation,SourceLocation> II =
- SM.getImmediateExpansionRange(FileLoc);
+ CharSourceRange II = SM.getImmediateExpansionRange(FileLoc);
- return SM.createExpansionLoc(SpellingLoc, II.first, II.second, TokLen);
+ return SM.createExpansionLoc(SpellingLoc, II.getBegin(), II.getEnd(), TokLen);
}
/// getSourceLocation - Return a source location identifier for the specified
@@ -1260,7 +1256,7 @@ Optional<Token> Lexer::findNextToken(SourceLocation Loc,
return Tok;
}
-/// \brief Checks that the given token is the first token that occurs after the
+/// Checks that the given token is the first token that occurs after the
/// given location (this excludes comments and whitespace). Returns the location
/// immediately after the specified token. If the token is not found or the
/// location is inside a macro, the returned source location will be invalid.
@@ -1413,7 +1409,7 @@ Slash:
// Helper methods for lexing.
//===----------------------------------------------------------------------===//
-/// \brief Routine that indiscriminately sets the offset into the source file.
+/// Routine that indiscriminately sets the offset into the source file.
void Lexer::SetByteOffset(unsigned Offset, bool StartOfLine) {
BufferPtr = BufferStart + Offset;
if (BufferPtr > BufferEnd)
@@ -1645,20 +1641,38 @@ FinishIdentifier:
// Fill in Result.IdentifierInfo and update the token kind,
// looking up the identifier in the identifier table.
IdentifierInfo *II = PP->LookUpIdentifierInfo(Result);
+ // Note that we have to call PP->LookUpIdentifierInfo() even for code
+ // completion, it writes IdentifierInfo into Result, and callers rely on it.
+
+ // If the completion point is at the end of an identifier, we want to treat
+ // the identifier as incomplete even if it resolves to a macro or a keyword.
+ // This allows e.g. 'class^' to complete to 'classifier'.
+ if (isCodeCompletionPoint(CurPtr)) {
+ // Return the code-completion token.
+ Result.setKind(tok::code_completion);
+ // Skip the code-completion char and all immediate identifier characters.
+ // This ensures we get consistent behavior when completing at any point in
+ // an identifier (i.e. at the start, in the middle, at the end). Note that
+ // only simple cases (i.e. [a-zA-Z0-9_]) are supported to keep the code
+ // simpler.
+ assert(*CurPtr == 0 && "Completion character must be 0");
+ ++CurPtr;
+ // Note that code completion token is not added as a separate character
+ // when the completion point is at the end of the buffer. Therefore, we need
+ // to check if the buffer has ended.
+ if (CurPtr < BufferEnd) {
+ while (isIdentifierBody(*CurPtr))
+ ++CurPtr;
+ }
+ BufferPtr = CurPtr;
+ return true;
+ }
// Finally, now that we know we have an identifier, pass this off to the
// preprocessor, which may macro expand it or something.
if (II->isHandleIdentifierCase())
return PP->HandleIdentifier(Result);
- if (II->getTokenID() == tok::identifier && isCodeCompletionPoint(CurPtr)
- && II->getPPKeywordID() == tok::pp_not_keyword
- && II->getObjCKeywordID() == tok::objc_not_keyword) {
- // Return the code-completion token.
- Result.setKind(tok::code_completion);
- cutOffLexing();
- return true;
- }
return true;
}
@@ -2009,18 +2023,21 @@ bool Lexer::LexAngledStringLiteral(Token &Result, const char *CurPtr) {
const char *AfterLessPos = CurPtr;
char C = getAndAdvanceChar(CurPtr, Result);
while (C != '>') {
- // Skip escaped characters.
- if (C == '\\' && CurPtr < BufferEnd) {
- // Skip the escaped character.
- getAndAdvanceChar(CurPtr, Result);
- } else if (C == '\n' || C == '\r' || // Newline.
- (C == 0 && (CurPtr-1 == BufferEnd || // End of file.
- isCodeCompletionPoint(CurPtr-1)))) {
+ // Skip escaped characters. Escaped newlines will already be processed by
+ // getAndAdvanceChar.
+ if (C == '\\')
+ C = getAndAdvanceChar(CurPtr, Result);
+
+ if (C == '\n' || C == '\r' || // Newline.
+ (C == 0 && (CurPtr-1 == BufferEnd || // End of file.
+ isCodeCompletionPoint(CurPtr-1)))) {
// If the filename is unterminated, then it must just be a lone <
// character. Return this as such.
FormTokenWithChars(Result, AfterLessPos, tok::less);
return true;
- } else if (C == 0) {
+ }
+
+ if (C == 0) {
NulCharacter = CurPtr-1;
}
C = getAndAdvanceChar(CurPtr, Result);
@@ -2160,7 +2177,7 @@ bool Lexer::SkipWhitespace(Token &Result, const char *CurPtr,
}
/// We have just read the // characters from input. Skip until we find the
-/// newline character thats terminate the comment. Then update BufferPtr and
+/// newline character that terminates the comment. Then update BufferPtr and
/// return.
///
/// If we're in KeepCommentMode or any CommentHandler has inserted
@@ -2738,7 +2755,7 @@ unsigned Lexer::isNextPPTokenLParen() {
return Tok.is(tok::l_paren);
}
-/// \brief Find the end of a version control conflict marker.
+/// Find the end of a version control conflict marker.
static const char *FindConflictEnd(const char *CurPtr, const char *BufferEnd,
ConflictMarkerKind CMK) {
const char *Terminator = CMK == CMK_Perforce ? "<<<<\n" : ">>>>>>>";
@@ -3509,7 +3526,7 @@ LexNextToken:
// want to lex this as a comment. There is one problem with this though,
// that in one particular corner case, this can change the behavior of the
// resultant program. For example, In "foo //**/ bar", C89 would lex
- // this as "foo / bar" and langauges with Line comments would lex it as
+ // this as "foo / bar" and languages with Line comments would lex it as
// "foo". Check to see if the character after the second slash is a '*'.
// If so, we will lex that as a "/" instead of the start of a comment.
// However, we never do this if we are just preprocessing.
diff --git a/lib/Lex/LiteralSupport.cpp b/lib/Lex/LiteralSupport.cpp
index cbec5e6b6385..966dafca2719 100644
--- a/lib/Lex/LiteralSupport.cpp
+++ b/lib/Lex/LiteralSupport.cpp
@@ -70,7 +70,7 @@ static CharSourceRange MakeCharSourceRange(const LangOptions &Features,
return CharSourceRange::getCharRange(Begin, End);
}
-/// \brief Produce a diagnostic highlighting some portion of a literal.
+/// Produce a diagnostic highlighting some portion of a literal.
///
/// Emits the diagnostic \p DiagID, highlighting the range of characters from
/// \p TokRangeBegin (inclusive) to \p TokRangeEnd (exclusive), which must be
@@ -538,6 +538,7 @@ NumericLiteralParser::NumericLiteralParser(StringRef TokSpelling,
saw_exponent = false;
saw_period = false;
saw_ud_suffix = false;
+ saw_fixed_point_suffix = false;
isLong = false;
isUnsigned = false;
isLongLong = false;
@@ -547,6 +548,8 @@ NumericLiteralParser::NumericLiteralParser(StringRef TokSpelling,
isFloat16 = false;
isFloat128 = false;
MicrosoftInteger = 0;
+ isFract = false;
+ isAccum = false;
hadError = false;
if (*s == '0') { // parse radix
@@ -568,6 +571,16 @@ NumericLiteralParser::NumericLiteralParser(StringRef TokSpelling,
SuffixBegin = s;
checkSeparator(TokLoc, s, CSK_AfterDigits);
+ // Initial scan to lookahead for fixed point suffix.
+ if (PP.getLangOpts().FixedPoint) {
+ for (const char *c = s; c != ThisTokEnd; ++c) {
+ if (*c == 'r' || *c == 'k' || *c == 'R' || *c == 'K') {
+ saw_fixed_point_suffix = true;
+ break;
+ }
+ }
+ }
+
// Parse the suffix. At this point we can classify whether we have an FP or
// integer constant.
bool isFPConstant = isFloatingLiteral();
@@ -576,11 +589,25 @@ NumericLiteralParser::NumericLiteralParser(StringRef TokSpelling,
// we break out of the loop.
for (; s != ThisTokEnd; ++s) {
switch (*s) {
+ case 'R':
+ case 'r':
+ if (!PP.getLangOpts().FixedPoint) break;
+ if (isFract || isAccum) break;
+ if (!(saw_period || saw_exponent)) break;
+ isFract = true;
+ continue;
+ case 'K':
+ case 'k':
+ if (!PP.getLangOpts().FixedPoint) break;
+ if (isFract || isAccum) break;
+ if (!(saw_period || saw_exponent)) break;
+ isAccum = true;
+ continue;
case 'h': // FP Suffix for "half".
case 'H':
// OpenCL Extension v1.2 s9.5 - h or H suffix for half type.
- if (!PP.getLangOpts().Half) break;
- if (!isFPConstant) break; // Error for integer constant.
+ if (!(PP.getLangOpts().Half || PP.getLangOpts().FixedPoint)) break;
+ if (isIntegerLiteral()) break; // Error for integer constant.
if (isHalf || isFloat || isLong) break; // HH, FH, LH invalid.
isHalf = true;
continue; // Success.
@@ -693,6 +720,9 @@ NumericLiteralParser::NumericLiteralParser(StringRef TokSpelling,
isHalf = false;
isImaginary = false;
MicrosoftInteger = 0;
+ saw_fixed_point_suffix = false;
+ isFract = false;
+ isAccum = false;
}
saw_ud_suffix = true;
@@ -707,6 +737,10 @@ NumericLiteralParser::NumericLiteralParser(StringRef TokSpelling,
hadError = true;
}
}
+
+ if (!hadError && saw_fixed_point_suffix) {
+ assert(isFract || isAccum);
+ }
}
/// ParseDecimalOrOctalCommon - This method is called for decimal or octal
@@ -717,7 +751,8 @@ void NumericLiteralParser::ParseDecimalOrOctalCommon(SourceLocation TokLoc){
// If we have a hex digit other than 'e' (which denotes a FP exponent) then
// the code is using an incorrect base.
- if (isHexDigit(*s) && *s != 'e' && *s != 'E') {
+ if (isHexDigit(*s) && *s != 'e' && *s != 'E' &&
+ !isValidUDSuffix(PP.getLangOpts(), StringRef(s, ThisTokEnd - s))) {
PP.Diag(PP.AdvanceToTokenCharacter(TokLoc, s-ThisTokBegin),
diag::err_invalid_digit) << StringRef(s, 1) << (radix == 8 ? 1 : 0);
hadError = true;
@@ -738,15 +773,17 @@ void NumericLiteralParser::ParseDecimalOrOctalCommon(SourceLocation TokLoc){
s++;
radix = 10;
saw_exponent = true;
- if (*s == '+' || *s == '-') s++; // sign
+ if (s != ThisTokEnd && (*s == '+' || *s == '-')) s++; // sign
const char *first_non_digit = SkipDigits(s);
if (containsDigits(s, first_non_digit)) {
checkSeparator(TokLoc, s, CSK_BeforeDigits);
s = first_non_digit;
} else {
- PP.Diag(PP.AdvanceToTokenCharacter(TokLoc, Exponent-ThisTokBegin),
- diag::err_exponent_has_no_digits);
- hadError = true;
+ if (!hadError) {
+ PP.Diag(PP.AdvanceToTokenCharacter(TokLoc, Exponent-ThisTokBegin),
+ diag::err_exponent_has_no_digits);
+ hadError = true;
+ }
return;
}
}
@@ -768,12 +805,14 @@ bool NumericLiteralParser::isValidUDSuffix(const LangOptions &LangOpts,
if (!LangOpts.CPlusPlus14)
return false;
- // In C++1y, "s", "h", "min", "ms", "us", and "ns" are used in the library.
+ // In C++14, "s", "h", "min", "ms", "us", and "ns" are used in the library.
// Per tweaked N3660, "il", "i", and "if" are also used in the library.
+ // In C++2a "d" and "y" are used in the library.
return llvm::StringSwitch<bool>(Suffix)
.Cases("h", "min", "s", true)
.Cases("ms", "us", "ns", true)
.Cases("il", "i", "if", true)
+ .Cases("d", "y", LangOpts.CPlusPlus2a)
.Default(false);
}
@@ -787,10 +826,12 @@ void NumericLiteralParser::checkSeparator(SourceLocation TokLoc,
} else if (Pos == ThisTokEnd)
return;
- if (isDigitSeparator(*Pos))
+ if (isDigitSeparator(*Pos)) {
PP.Diag(PP.AdvanceToTokenCharacter(TokLoc, Pos - ThisTokBegin),
diag::err_digit_separator_not_between_digits)
<< IsAfterDigits;
+ hadError = true;
+ }
}
/// ParseNumberStartingWithZero - This method is called when the first character
@@ -840,12 +881,14 @@ void NumericLiteralParser::ParseNumberStartingWithZero(SourceLocation TokLoc) {
const char *Exponent = s;
s++;
saw_exponent = true;
- if (*s == '+' || *s == '-') s++; // sign
+ if (s != ThisTokEnd && (*s == '+' || *s == '-')) s++; // sign
const char *first_non_digit = SkipDigits(s);
if (!containsDigits(s, first_non_digit)) {
- PP.Diag(PP.AdvanceToTokenCharacter(TokLoc, Exponent-ThisTokBegin),
- diag::err_exponent_has_no_digits);
- hadError = true;
+ if (!hadError) {
+ PP.Diag(PP.AdvanceToTokenCharacter(TokLoc, Exponent-ThisTokBegin),
+ diag::err_exponent_has_no_digits);
+ hadError = true;
+ }
return;
}
checkSeparator(TokLoc, s, CSK_BeforeDigits);
@@ -882,7 +925,9 @@ void NumericLiteralParser::ParseNumberStartingWithZero(SourceLocation TokLoc) {
s = SkipBinaryDigits(s);
if (s == ThisTokEnd) {
// Done.
- } else if (isHexDigit(*s)) {
+ } else if (isHexDigit(*s) &&
+ !isValidUDSuffix(PP.getLangOpts(),
+ StringRef(s, ThisTokEnd - s))) {
PP.Diag(PP.AdvanceToTokenCharacter(TokLoc, s-ThisTokBegin),
diag::err_invalid_digit) << StringRef(s, 1) << 2;
hadError = true;
@@ -1006,6 +1051,126 @@ NumericLiteralParser::GetFloatValue(llvm::APFloat &Result) {
return Result.convertFromString(Str, APFloat::rmNearestTiesToEven);
}
+static inline bool IsExponentPart(char c) {
+ return c == 'p' || c == 'P' || c == 'e' || c == 'E';
+}
+
+bool NumericLiteralParser::GetFixedPointValue(llvm::APInt &StoreVal, unsigned Scale) {
+ assert(radix == 16 || radix == 10);
+
+ // Find how many digits are needed to store the whole literal.
+ unsigned NumDigits = SuffixBegin - DigitsBegin;
+ if (saw_period) --NumDigits;
+
+ // Initial scan of the exponent if it exists
+ bool ExpOverflowOccurred = false;
+ bool NegativeExponent = false;
+ const char *ExponentBegin;
+ uint64_t Exponent = 0;
+ int64_t BaseShift = 0;
+ if (saw_exponent) {
+ const char *Ptr = DigitsBegin;
+
+ while (!IsExponentPart(*Ptr)) ++Ptr;
+ ExponentBegin = Ptr;
+ ++Ptr;
+ NegativeExponent = *Ptr == '-';
+ if (NegativeExponent) ++Ptr;
+
+ unsigned NumExpDigits = SuffixBegin - Ptr;
+ if (alwaysFitsInto64Bits(radix, NumExpDigits)) {
+ llvm::StringRef ExpStr(Ptr, NumExpDigits);
+ llvm::APInt ExpInt(/*numBits=*/64, ExpStr, /*radix=*/10);
+ Exponent = ExpInt.getZExtValue();
+ } else {
+ ExpOverflowOccurred = true;
+ }
+
+ if (NegativeExponent) BaseShift -= Exponent;
+ else BaseShift += Exponent;
+ }
+
+ // Number of bits needed for decimal literal is
+ // ceil(NumDigits * log2(10)) Integral part
+ // + Scale Fractional part
+ // + ceil(Exponent * log2(10)) Exponent
+ // --------------------------------------------------
+ // ceil((NumDigits + Exponent) * log2(10)) + Scale
+ //
+ // But for simplicity in handling integers, we can round up log2(10) to 4,
+ // making:
+ // 4 * (NumDigits + Exponent) + Scale
+ //
+ // Number of digits needed for hexadecimal literal is
+ // 4 * NumDigits Integral part
+ // + Scale Fractional part
+ // + Exponent Exponent
+ // --------------------------------------------------
+ // (4 * NumDigits) + Scale + Exponent
+ uint64_t NumBitsNeeded;
+ if (radix == 10)
+ NumBitsNeeded = 4 * (NumDigits + Exponent) + Scale;
+ else
+ NumBitsNeeded = 4 * NumDigits + Exponent + Scale;
+
+ if (NumBitsNeeded > std::numeric_limits<unsigned>::max())
+ ExpOverflowOccurred = true;
+ llvm::APInt Val(static_cast<unsigned>(NumBitsNeeded), 0, /*isSigned=*/false);
+
+ bool FoundDecimal = false;
+
+ int64_t FractBaseShift = 0;
+ const char *End = saw_exponent ? ExponentBegin : SuffixBegin;
+ for (const char *Ptr = DigitsBegin; Ptr < End; ++Ptr) {
+ if (*Ptr == '.') {
+ FoundDecimal = true;
+ continue;
+ }
+
+ // Normal reading of an integer
+ unsigned C = llvm::hexDigitValue(*Ptr);
+ assert(C < radix && "NumericLiteralParser ctor should have rejected this");
+
+ Val *= radix;
+ Val += C;
+
+ if (FoundDecimal)
+ // Keep track of how much we will need to adjust this value by from the
+ // number of digits past the radix point.
+ --FractBaseShift;
+ }
+
+ // For a radix of 16, we will be multiplying by 2 instead of 16.
+ if (radix == 16) FractBaseShift *= 4;
+ BaseShift += FractBaseShift;
+
+ Val <<= Scale;
+
+ uint64_t Base = (radix == 16) ? 2 : 10;
+ if (BaseShift > 0) {
+ for (int64_t i = 0; i < BaseShift; ++i) {
+ Val *= Base;
+ }
+ } else if (BaseShift < 0) {
+ for (int64_t i = BaseShift; i < 0 && !Val.isNullValue(); ++i)
+ Val = Val.udiv(Base);
+ }
+
+ bool IntOverflowOccurred = false;
+ auto MaxVal = llvm::APInt::getMaxValue(StoreVal.getBitWidth());
+ if (Val.getBitWidth() > StoreVal.getBitWidth()) {
+ IntOverflowOccurred |= Val.ugt(MaxVal.zext(Val.getBitWidth()));
+ StoreVal = Val.trunc(StoreVal.getBitWidth());
+ } else if (Val.getBitWidth() < StoreVal.getBitWidth()) {
+ IntOverflowOccurred |= Val.zext(MaxVal.getBitWidth()).ugt(MaxVal);
+ StoreVal = Val.zext(StoreVal.getBitWidth());
+ } else {
+ StoreVal = Val;
+ }
+
+ return IntOverflowOccurred || ExpOverflowOccurred;
+}
+
/// \verbatim
/// user-defined-character-literal: [C++11 lex.ext]
/// character-literal ud-suffix
@@ -1585,7 +1750,7 @@ static const char *resyncUTF8(const char *Err, const char *End) {
return Err;
}
-/// \brief This function copies from Fragment, which is a sequence of bytes
+/// This function copies from Fragment, which is a sequence of bytes
/// within Tok's contents (which begin at TokBegin) into ResultPtr.
/// Performs widening for multi-byte characters.
bool StringLiteralParser::CopyStringFragment(const Token &Tok,
diff --git a/lib/Lex/MacroArgs.cpp b/lib/Lex/MacroArgs.cpp
index 5c0f0623c3e1..3b6e2dc2411a 100644
--- a/lib/Lex/MacroArgs.cpp
+++ b/lib/Lex/MacroArgs.cpp
@@ -49,7 +49,8 @@ MacroArgs *MacroArgs::create(const MacroInfo *MI,
if (!ResultEnt) {
// Allocate memory for a MacroArgs object with the lexer tokens at the end,
// and construct the MacroArgs object.
- Result = new (std::malloc(totalSizeToAlloc<Token>(UnexpArgTokens.size())))
+ Result = new (
+ llvm::safe_malloc(totalSizeToAlloc<Token>(UnexpArgTokens.size())))
MacroArgs(UnexpArgTokens.size(), VarargsElided, MI->getNumParams());
} else {
Result = *ResultEnt;
@@ -272,7 +273,7 @@ Token MacroArgs::StringifyArgument(const Token *ArgToks,
// If the last character of the string is a \, and if it isn't escaped, this
// is an invalid string literal, diagnose it as specified in C99.
if (Result.back() == '\\') {
- // Count the number of consequtive \ characters. If even, then they are
+ // Count the number of consecutive \ characters. If even, then they are
// just escaped backslashes, otherwise it's an error.
unsigned FirstNonSlash = Result.size()-2;
// Guaranteed to find the starting " if nothing else.
diff --git a/lib/Lex/MacroInfo.cpp b/lib/Lex/MacroInfo.cpp
index b13767aa1d67..4ed69ecc465d 100644
--- a/lib/Lex/MacroInfo.cpp
+++ b/lib/Lex/MacroInfo.cpp
@@ -65,7 +65,7 @@ unsigned MacroInfo::getDefinitionLengthSlow(const SourceManager &SM) const {
return DefinitionLength;
}
-/// \brief Return true if the specified macro definition is equal to
+/// Return true if the specified macro definition is equal to
/// this macro in spelling, arguments, and whitespace.
///
/// \param Syntactically if true, the macro definitions can be identical even
diff --git a/lib/Lex/ModuleMap.cpp b/lib/Lex/ModuleMap.cpp
index b3ac10c5c5ae..f048a73a8ccc 100644
--- a/lib/Lex/ModuleMap.cpp
+++ b/lib/Lex/ModuleMap.cpp
@@ -54,6 +54,24 @@
using namespace clang;
+void ModuleMap::resolveLinkAsDependencies(Module *Mod) {
+ auto PendingLinkAs = PendingLinkAsModule.find(Mod->Name);
+ if (PendingLinkAs != PendingLinkAsModule.end()) {
+ for (auto &Name : PendingLinkAs->second) {
+ auto *M = findModule(Name.getKey());
+ if (M)
+ M->UseExportAsModuleLinkName = true;
+ }
+ }
+}
+
+void ModuleMap::addLinkAsDependency(Module *Mod) {
+ if (findModule(Mod->ExportAsModule))
+ Mod->UseExportAsModuleLinkName = true;
+ else
+ PendingLinkAsModule[Mod->ExportAsModule].insert(Mod->Name);
+}
+
Module::HeaderKind ModuleMap::headerRoleToKind(ModuleHeaderRole Role) {
switch ((int)Role) {
default: llvm_unreachable("unknown header role");
@@ -133,7 +151,7 @@ Module *ModuleMap::resolveModuleId(const ModuleId &Id, Module *Mod,
return Context;
}
-/// \brief Append to \p Paths the set of paths needed to get to the
+/// Append to \p Paths the set of paths needed to get to the
/// subframework in which the given module lives.
static void appendSubframeworkPaths(Module *Mod,
SmallVectorImpl<char> &Path) {
@@ -152,10 +170,13 @@ static void appendSubframeworkPaths(Module *Mod,
llvm::sys::path::append(Path, "Frameworks", Paths[I-1] + ".framework");
}
-const FileEntry *
-ModuleMap::findHeader(Module *M,
- const Module::UnresolvedHeaderDirective &Header,
- SmallVectorImpl<char> &RelativePathName) {
+const FileEntry *ModuleMap::findHeader(
+ Module *M, const Module::UnresolvedHeaderDirective &Header,
+ SmallVectorImpl<char> &RelativePathName, bool &NeedsFramework) {
+ // Search for the header file within the module's home directory.
+ auto *Directory = M->Directory;
+ SmallString<128> FullPathName(Directory->getName());
+
auto GetFile = [&](StringRef Filename) -> const FileEntry * {
auto *File = SourceMgr.getFileManager().getFile(Filename);
if (!File ||
@@ -165,18 +186,8 @@ ModuleMap::findHeader(Module *M,
return File;
};
- if (llvm::sys::path::is_absolute(Header.FileName)) {
- RelativePathName.clear();
- RelativePathName.append(Header.FileName.begin(), Header.FileName.end());
- return GetFile(Header.FileName);
- }
-
- // Search for the header file within the module's home directory.
- auto *Directory = M->Directory;
- SmallString<128> FullPathName(Directory->getName());
- unsigned FullPathLength = FullPathName.size();
-
- if (M->isPartOfFramework()) {
+ auto GetFrameworkFile = [&]() -> const FileEntry * {
+ unsigned FullPathLength = FullPathName.size();
appendSubframeworkPaths(M, RelativePathName);
unsigned RelativePathLength = RelativePathName.size();
@@ -201,18 +212,46 @@ ModuleMap::findHeader(Module *M,
Header.FileName);
llvm::sys::path::append(FullPathName, RelativePathName);
return GetFile(FullPathName);
+ };
+
+ if (llvm::sys::path::is_absolute(Header.FileName)) {
+ RelativePathName.clear();
+ RelativePathName.append(Header.FileName.begin(), Header.FileName.end());
+ return GetFile(Header.FileName);
}
+ if (M->isPartOfFramework())
+ return GetFrameworkFile();
+
// Lookup for normal headers.
llvm::sys::path::append(RelativePathName, Header.FileName);
llvm::sys::path::append(FullPathName, RelativePathName);
- return GetFile(FullPathName);
+ auto *NormalHdrFile = GetFile(FullPathName);
+
+ if (M && !NormalHdrFile && Directory->getName().endswith(".framework")) {
+ // The lack of 'framework' keyword in a module declaration it's a simple
+ // mistake we can diagnose when the header exists within the proper
+ // framework style path.
+ FullPathName.assign(Directory->getName());
+ RelativePathName.clear();
+ if (GetFrameworkFile()) {
+ Diags.Report(Header.FileNameLoc,
+ diag::warn_mmap_incomplete_framework_module_declaration)
+ << Header.FileName << M->getFullModuleName();
+ NeedsFramework = true;
+ }
+ return nullptr;
+ }
+
+ return NormalHdrFile;
}
void ModuleMap::resolveHeader(Module *Mod,
- const Module::UnresolvedHeaderDirective &Header) {
+ const Module::UnresolvedHeaderDirective &Header,
+ bool &NeedsFramework) {
SmallString<128> RelativePathName;
- if (const FileEntry *File = findHeader(Mod, Header, RelativePathName)) {
+ if (const FileEntry *File =
+ findHeader(Mod, Header, RelativePathName, NeedsFramework)) {
if (Header.IsUmbrella) {
const DirectoryEntry *UmbrellaDir = File->getDir();
if (Module *UmbrellaMod = UmbrellaDirs[UmbrellaDir])
@@ -281,6 +320,8 @@ ModuleMap::ModuleMap(SourceManager &SourceMgr, DiagnosticsEngine &Diags,
ModuleMap::~ModuleMap() {
for (auto &M : Modules)
delete M.getValue();
+ for (auto *M : ShadowModules)
+ delete M;
}
void ModuleMap::setTarget(const TargetInfo &Target) {
@@ -289,7 +330,7 @@ void ModuleMap::setTarget(const TargetInfo &Target) {
this->Target = &Target;
}
-/// \brief "Sanitize" a filename so that it can be used as an identifier.
+/// "Sanitize" a filename so that it can be used as an identifier.
static StringRef sanitizeFilenameAsIdentifier(StringRef Name,
SmallVectorImpl<char> &Buffer) {
if (Name.empty())
@@ -326,7 +367,7 @@ static StringRef sanitizeFilenameAsIdentifier(StringRef Name,
return Name;
}
-/// \brief Determine whether the given file name is the name of a builtin
+/// Determine whether the given file name is the name of a builtin
/// header, supplied by Clang to replace, override, or augment existing system
/// headers.
bool ModuleMap::isBuiltinHeader(StringRef FileName) {
@@ -473,7 +514,7 @@ void ModuleMap::diagnoseHeaderInclusion(Module *RequestingModule,
// We have found a module, but we don't use it.
if (NotUsed) {
Diags.Report(FilenameLoc, diag::err_undeclared_use_of_module)
- << RequestingModule->getFullModuleName() << Filename;
+ << RequestingModule->getTopLevelModule()->Name << Filename;
return;
}
@@ -484,7 +525,7 @@ void ModuleMap::diagnoseHeaderInclusion(Module *RequestingModule,
if (LangOpts.ModulesStrictDeclUse) {
Diags.Report(FilenameLoc, diag::err_undeclared_use_of_module)
- << RequestingModule->getFullModuleName() << Filename;
+ << RequestingModule->getTopLevelModule()->Name << Filename;
} else if (RequestingModule && RequestingModuleIsModuleInterface &&
LangOpts.isCompilingModule()) {
// Do not diagnose when we are not compiling a module.
@@ -751,7 +792,7 @@ std::pair<Module *, bool> ModuleMap::findOrCreateModule(StringRef Name,
// Try to find an existing module with this name.
if (Module *Sub = lookupModuleQualified(Name, Parent))
return std::make_pair(Sub, false);
-
+
// Create a new module with this name.
Module *Result = new Module(Name, SourceLocation(), Parent, IsFramework,
IsExplicit, NumCreatedModules++);
@@ -759,6 +800,7 @@ std::pair<Module *, bool> ModuleMap::findOrCreateModule(StringRef Name,
if (LangOpts.CurrentModule == Name)
SourceModule = Result;
Modules[Name] = Result;
+ ModuleScopeIDs[Result] = CurrentModuleScopeID;
}
return std::make_pair(Result, true);
}
@@ -799,7 +841,7 @@ Module *ModuleMap::createModuleForInterfaceUnit(SourceLocation Loc,
return Result;
}
-/// \brief For a framework module, infer the framework against which we
+/// For a framework module, infer the framework against which we
/// should link.
static void inferFrameworkLink(Module *Mod, const DirectoryEntry *FrameworkDir,
FileManager &FileMgr) {
@@ -927,6 +969,7 @@ Module *ModuleMap::inferFrameworkModule(const DirectoryEntry *FrameworkDir,
if (LangOpts.CurrentModule == ModuleName)
SourceModule = Result;
Modules[ModuleName] = Result;
+ ModuleScopeIDs[Result] = CurrentModuleScopeID;
}
Result->IsSystem |= Attrs.IsSystem;
@@ -999,6 +1042,21 @@ Module *ModuleMap::inferFrameworkModule(const DirectoryEntry *FrameworkDir,
return Result;
}
+Module *ModuleMap::createShadowedModule(StringRef Name, bool IsFramework,
+ Module *ShadowingModule) {
+
+ // Create a new module with this name.
+ Module *Result =
+ new Module(Name, SourceLocation(), /*Parent=*/nullptr, IsFramework,
+ /*IsExplicit=*/false, NumCreatedModules++);
+ Result->ShadowingModule = ShadowingModule;
+ Result->IsAvailable = false;
+ ModuleScopeIDs[Result] = CurrentModuleScopeID;
+ ShadowModules.push_back(Result);
+
+ return Result;
+}
+
void ModuleMap::setUmbrellaHeader(Module *Mod, const FileEntry *UmbrellaHeader,
Twine NameAsWritten) {
Headers[UmbrellaHeader].push_back(KnownHeader(Mod, NormalHeader));
@@ -1019,7 +1077,8 @@ void ModuleMap::setUmbrellaDir(Module *Mod, const DirectoryEntry *UmbrellaDir,
}
void ModuleMap::addUnresolvedHeader(Module *Mod,
- Module::UnresolvedHeaderDirective Header) {
+ Module::UnresolvedHeaderDirective Header,
+ bool &NeedsFramework) {
// If there is a builtin counterpart to this file, add it now so it can
// wrap the system header.
if (resolveAsBuiltinHeader(Mod, Header)) {
@@ -1050,7 +1109,7 @@ void ModuleMap::addUnresolvedHeader(Module *Mod,
// We don't have stat information or can't defer looking this file up.
// Perform the lookup now.
- resolveHeader(Mod, Header);
+ resolveHeader(Mod, Header, NeedsFramework);
}
void ModuleMap::resolveHeaderDirectives(const FileEntry *File) const {
@@ -1070,10 +1129,11 @@ void ModuleMap::resolveHeaderDirectives(const FileEntry *File) const {
}
void ModuleMap::resolveHeaderDirectives(Module *Mod) const {
+ bool NeedsFramework = false;
for (auto &Header : Mod->UnresolvedHeaders)
// This operation is logically const; we're just changing how we represent
// the header information for this file.
- const_cast<ModuleMap*>(this)->resolveHeader(Mod, Header);
+ const_cast<ModuleMap*>(this)->resolveHeader(Mod, Header, NeedsFramework);
Mod->UnresolvedHeaders.clear();
}
@@ -1207,7 +1267,7 @@ bool ModuleMap::resolveConflicts(Module *Mod, bool Complain) {
namespace clang {
- /// \brief A token in a module map file.
+ /// A token in a module map file.
struct MMToken {
enum TokenKind {
Comma,
@@ -1277,37 +1337,40 @@ namespace clang {
Lexer &L;
SourceManager &SourceMgr;
- /// \brief Default target information, used only for string literal
+ /// Default target information, used only for string literal
/// parsing.
const TargetInfo *Target;
DiagnosticsEngine &Diags;
ModuleMap &Map;
- /// \brief The current module map file.
+ /// The current module map file.
const FileEntry *ModuleMapFile;
-
- /// \brief The directory that file names in this module map file should
+
+ /// Source location of most recent parsed module declaration
+ SourceLocation CurrModuleDeclLoc;
+
+ /// The directory that file names in this module map file should
/// be resolved relative to.
const DirectoryEntry *Directory;
- /// \brief Whether this module map is in a system header directory.
+ /// Whether this module map is in a system header directory.
bool IsSystem;
- /// \brief Whether an error occurred.
+ /// Whether an error occurred.
bool HadError = false;
- /// \brief Stores string data for the various string literals referenced
+ /// Stores string data for the various string literals referenced
/// during parsing.
llvm::BumpPtrAllocator StringData;
- /// \brief The current token.
+ /// The current token.
MMToken Tok;
- /// \brief The active module.
+ /// The active module.
Module *ActiveModule = nullptr;
- /// \brief Whether a module uses the 'requires excluded' hack to mark its
+ /// Whether a module uses the 'requires excluded' hack to mark its
/// contents as 'textual'.
///
/// On older Darwin SDK versions, 'requires excluded' is used to mark the
@@ -1317,10 +1380,10 @@ namespace clang {
/// 'textual' to match the original intent.
llvm::SmallPtrSet<Module *, 2> UsesRequiresExcludedHack;
- /// \brief Consume the current token and return its location.
+ /// Consume the current token and return its location.
SourceLocation consumeToken();
-
- /// \brief Skip tokens until we reach the a token with the given kind
+
+ /// Skip tokens until we reach the a token with the given kind
/// (or the end of the file).
void skipUntil(MMToken::TokenKind K);
@@ -1340,25 +1403,29 @@ namespace clang {
void parseConflict();
void parseInferredModuleDecl(bool Framework, bool Explicit);
+ /// Private modules are canonicalized as Foo_Private. Clang provides extra
+ /// module map search logic to find the appropriate private module when PCH
+ /// is used with implicit module maps. Warn when private modules are written
+ /// in other ways (FooPrivate and Foo.Private), providing notes and fixits.
+ void diagnosePrivateModules(SourceLocation ExplicitLoc,
+ SourceLocation FrameworkLoc);
+
using Attributes = ModuleMap::Attributes;
bool parseOptionalAttributes(Attributes &Attrs);
public:
- explicit ModuleMapParser(Lexer &L, SourceManager &SourceMgr,
- const TargetInfo *Target,
- DiagnosticsEngine &Diags,
- ModuleMap &Map,
- const FileEntry *ModuleMapFile,
- const DirectoryEntry *Directory,
- bool IsSystem)
+ explicit ModuleMapParser(Lexer &L, SourceManager &SourceMgr,
+ const TargetInfo *Target, DiagnosticsEngine &Diags,
+ ModuleMap &Map, const FileEntry *ModuleMapFile,
+ const DirectoryEntry *Directory, bool IsSystem)
: L(L), SourceMgr(SourceMgr), Target(Target), Diags(Diags), Map(Map),
ModuleMapFile(ModuleMapFile), Directory(Directory),
IsSystem(IsSystem) {
Tok.clear();
consumeToken();
}
-
+
bool parseModuleMapFile();
bool terminatedByDirective() { return false; }
@@ -1559,7 +1626,7 @@ void ModuleMapParser::skipUntil(MMToken::TokenKind K) {
} while (true);
}
-/// \brief Parse a module-id.
+/// Parse a module-id.
///
/// module-id:
/// identifier
@@ -1588,21 +1655,21 @@ bool ModuleMapParser::parseModuleId(ModuleId &Id) {
namespace {
- /// \brief Enumerates the known attributes.
+ /// Enumerates the known attributes.
enum AttributeKind {
- /// \brief An unknown attribute.
+ /// An unknown attribute.
AT_unknown,
- /// \brief The 'system' attribute.
+ /// The 'system' attribute.
AT_system,
- /// \brief The 'extern_c' attribute.
+ /// The 'extern_c' attribute.
AT_extern_c,
- /// \brief The 'exhaustive' attribute.
+ /// The 'exhaustive' attribute.
AT_exhaustive,
- /// \brief The 'no_undeclared_includes' attribute.
+ /// The 'no_undeclared_includes' attribute.
AT_no_undeclared_includes
};
@@ -1612,16 +1679,14 @@ namespace {
/// module map search logic to find the appropriate private module when PCH
/// is used with implicit module maps. Warn when private modules are written
/// in other ways (FooPrivate and Foo.Private), providing notes and fixits.
-static void diagnosePrivateModules(const ModuleMap &Map,
- DiagnosticsEngine &Diags,
- const Module *ActiveModule) {
-
+void ModuleMapParser::diagnosePrivateModules(SourceLocation ExplicitLoc,
+ SourceLocation FrameworkLoc) {
auto GenNoteAndFixIt = [&](StringRef BadName, StringRef Canonical,
- const Module *M) {
+ const Module *M, SourceRange ReplLoc) {
auto D = Diags.Report(ActiveModule->DefinitionLoc,
diag::note_mmap_rename_top_level_private_module);
D << BadName << M->Name;
- D << FixItHint::CreateReplacement(ActiveModule->DefinitionLoc, Canonical);
+ D << FixItHint::CreateReplacement(ReplLoc, Canonical);
};
for (auto E = Map.module_begin(); E != Map.module_end(); ++E) {
@@ -1632,6 +1697,7 @@ static void diagnosePrivateModules(const ModuleMap &Map,
SmallString<128> FullName(ActiveModule->getFullModuleName());
if (!FullName.startswith(M->Name) && !FullName.endswith("Private"))
continue;
+ SmallString<128> FixedPrivModDecl;
SmallString<128> Canonical(M->Name);
Canonical.append("_Private");
@@ -1641,7 +1707,20 @@ static void diagnosePrivateModules(const ModuleMap &Map,
Diags.Report(ActiveModule->DefinitionLoc,
diag::warn_mmap_mismatched_private_submodule)
<< FullName;
- GenNoteAndFixIt(FullName, Canonical, M);
+
+ SourceLocation FixItInitBegin = CurrModuleDeclLoc;
+ if (FrameworkLoc.isValid())
+ FixItInitBegin = FrameworkLoc;
+ if (ExplicitLoc.isValid())
+ FixItInitBegin = ExplicitLoc;
+
+ if (FrameworkLoc.isValid() || ActiveModule->Parent->IsFramework)
+ FixedPrivModDecl.append("framework ");
+ FixedPrivModDecl.append("module ");
+ FixedPrivModDecl.append(Canonical);
+
+ GenNoteAndFixIt(FullName, FixedPrivModDecl, M,
+ SourceRange(FixItInitBegin, ActiveModule->DefinitionLoc));
continue;
}
@@ -1651,12 +1730,13 @@ static void diagnosePrivateModules(const ModuleMap &Map,
Diags.Report(ActiveModule->DefinitionLoc,
diag::warn_mmap_mismatched_private_module_name)
<< ActiveModule->Name;
- GenNoteAndFixIt(ActiveModule->Name, Canonical, M);
+ GenNoteAndFixIt(ActiveModule->Name, Canonical, M,
+ SourceRange(ActiveModule->DefinitionLoc));
}
}
}
-/// \brief Parse a module declaration.
+/// Parse a module declaration.
///
/// module-declaration:
/// 'extern' 'module' module-id string-literal
@@ -1684,6 +1764,7 @@ void ModuleMapParser::parseModuleDecl() {
// Parse 'explicit' or 'framework' keyword, if present.
SourceLocation ExplicitLoc;
+ SourceLocation FrameworkLoc;
bool Explicit = false;
bool Framework = false;
@@ -1695,7 +1776,7 @@ void ModuleMapParser::parseModuleDecl() {
// Parse 'framework' keyword, if present.
if (Tok.is(MMToken::FrameworkKeyword)) {
- consumeToken();
+ FrameworkLoc = consumeToken();
Framework = true;
}
@@ -1706,7 +1787,7 @@ void ModuleMapParser::parseModuleDecl() {
HadError = true;
return;
}
- consumeToken(); // 'module' keyword
+ CurrModuleDeclLoc = consumeToken(); // 'module' keyword
// If we have a wildcard for the module name, this is an inferred submodule.
// Parse it.
@@ -1787,6 +1868,7 @@ void ModuleMapParser::parseModuleDecl() {
SourceLocation LBraceLoc = consumeToken();
// Determine whether this (sub)module has already been defined.
+ Module *ShadowingModule = nullptr;
if (Module *Existing = Map.lookupModuleQualified(ModuleName, ActiveModule)) {
// We might see a (re)definition of a module that we already have a
// definition for in two cases:
@@ -1812,23 +1894,35 @@ void ModuleMapParser::parseModuleDecl() {
}
return;
}
-
- Diags.Report(ModuleNameLoc, diag::err_mmap_module_redefinition)
- << ModuleName;
- Diags.Report(Existing->DefinitionLoc, diag::note_mmap_prev_definition);
-
- // Skip the module definition.
- skipUntil(MMToken::RBrace);
- if (Tok.is(MMToken::RBrace))
- consumeToken();
-
- HadError = true;
- return;
+
+ if (!Existing->Parent && Map.mayShadowNewModule(Existing)) {
+ ShadowingModule = Existing;
+ } else {
+ // This is not a shawdowed module decl, it is an illegal redefinition.
+ Diags.Report(ModuleNameLoc, diag::err_mmap_module_redefinition)
+ << ModuleName;
+ Diags.Report(Existing->DefinitionLoc, diag::note_mmap_prev_definition);
+
+ // Skip the module definition.
+ skipUntil(MMToken::RBrace);
+ if (Tok.is(MMToken::RBrace))
+ consumeToken();
+
+ HadError = true;
+ return;
+ }
}
// Start defining this module.
- ActiveModule = Map.findOrCreateModule(ModuleName, ActiveModule, Framework,
- Explicit).first;
+ if (ShadowingModule) {
+ ActiveModule =
+ Map.createShadowedModule(ModuleName, Framework, ShadowingModule);
+ } else {
+ ActiveModule =
+ Map.findOrCreateModule(ModuleName, ActiveModule, Framework, Explicit)
+ .first;
+ }
+
ActiveModule->DefinitionLoc = ModuleNameLoc;
if (Attrs.IsSystem || IsSystem)
ActiveModule->IsSystem = true;
@@ -1839,21 +1933,24 @@ void ModuleMapParser::parseModuleDecl() {
ActiveModule->NoUndeclaredIncludes = true;
ActiveModule->Directory = Directory;
+ StringRef MapFileName(ModuleMapFile->getName());
+ if (MapFileName.endswith("module.private.modulemap") ||
+ MapFileName.endswith("module_private.map")) {
+ ActiveModule->ModuleMapIsPrivate = true;
+ }
// Private modules named as FooPrivate, Foo.Private or similar are likely a
// user error; provide warnings, notes and fixits to direct users to use
// Foo_Private instead.
SourceLocation StartLoc =
SourceMgr.getLocForStartOfFile(SourceMgr.getMainFileID());
- StringRef MapFileName(ModuleMapFile->getName());
if (Map.HeaderInfo.getHeaderSearchOpts().ImplicitModuleMaps &&
!Diags.isIgnored(diag::warn_mmap_mismatched_private_submodule,
StartLoc) &&
!Diags.isIgnored(diag::warn_mmap_mismatched_private_module_name,
StartLoc) &&
- (MapFileName.endswith("module.private.modulemap") ||
- MapFileName.endswith("module_private.map")))
- diagnosePrivateModules(Map, Diags, ActiveModule);
+ ActiveModule->ModuleMapIsPrivate)
+ diagnosePrivateModules(ExplicitLoc, FrameworkLoc);
bool Done = false;
do {
@@ -1958,7 +2055,7 @@ void ModuleMapParser::parseModuleDecl() {
ActiveModule = PreviousActiveModule;
}
-/// \brief Parse an extern module declaration.
+/// Parse an extern module declaration.
///
/// extern module-declaration:
/// 'extern' 'module' module-id string-literal
@@ -2036,7 +2133,7 @@ static bool shouldAddRequirement(Module *M, StringRef Feature,
return true;
}
-/// \brief Parse a requires declaration.
+/// Parse a requires declaration.
///
/// requires-declaration:
/// 'requires' feature-list
@@ -2092,7 +2189,7 @@ void ModuleMapParser::parseRequiresDecl() {
} while (true);
}
-/// \brief Parse a header declaration.
+/// Parse a header declaration.
///
/// header-declaration:
/// 'textual'[opt] 'header' string-literal
@@ -2212,7 +2309,13 @@ void ModuleMapParser::parseHeaderDecl(MMToken::TokenKind LeadingToken,
}
}
- Map.addUnresolvedHeader(ActiveModule, std::move(Header));
+ bool NeedsFramework = false;
+ Map.addUnresolvedHeader(ActiveModule, std::move(Header), NeedsFramework);
+
+ if (NeedsFramework && ActiveModule)
+ Diags.Report(CurrModuleDeclLoc, diag::note_mmap_add_framework_keyword)
+ << ActiveModule->getFullModuleName()
+ << FixItHint::CreateReplacement(CurrModuleDeclLoc, "framework module");
}
static int compareModuleHeaders(const Module::Header *A,
@@ -2220,7 +2323,7 @@ static int compareModuleHeaders(const Module::Header *A,
return A->NameAsWritten.compare(B->NameAsWritten);
}
-/// \brief Parse an umbrella directory declaration.
+/// Parse an umbrella directory declaration.
///
/// umbrella-dir-declaration:
/// umbrella string-literal
@@ -2298,7 +2401,7 @@ void ModuleMapParser::parseUmbrellaDirDecl(SourceLocation UmbrellaLoc) {
Map.setUmbrellaDir(ActiveModule, Dir, DirName);
}
-/// \brief Parse a module export declaration.
+/// Parse a module export declaration.
///
/// export-declaration:
/// 'export' wildcard-module-id
@@ -2346,7 +2449,7 @@ void ModuleMapParser::parseExportDecl() {
ActiveModule->UnresolvedExports.push_back(Unresolved);
}
-/// \brief Parse a module export_as declaration.
+/// Parse a module export_as declaration.
///
/// export-as-declaration:
/// 'export_as' identifier
@@ -2378,10 +2481,12 @@ void ModuleMapParser::parseExportAsDecl() {
}
ActiveModule->ExportAsModule = Tok.getString();
+ Map.addLinkAsDependency(ActiveModule);
+
consumeToken();
}
-/// \brief Parse a module use declaration.
+/// Parse a module use declaration.
///
/// use-declaration:
/// 'use' wildcard-module-id
@@ -2398,7 +2503,7 @@ void ModuleMapParser::parseUseDecl() {
ActiveModule->UnresolvedDirectUses.push_back(ParsedModuleId);
}
-/// \brief Parse a link declaration.
+/// Parse a link declaration.
///
/// module-declaration:
/// 'link' 'framework'[opt] string-literal
@@ -2427,7 +2532,7 @@ void ModuleMapParser::parseLinkDecl() {
IsFramework));
}
-/// \brief Parse a configuration macro declaration.
+/// Parse a configuration macro declaration.
///
/// module-declaration:
/// 'config_macros' attributes[opt] config-macro-list?
@@ -2484,7 +2589,7 @@ void ModuleMapParser::parseConfigMacros() {
} while (true);
}
-/// \brief Format a module-id into a string.
+/// Format a module-id into a string.
static std::string formatModuleId(const ModuleId &Id) {
std::string result;
{
@@ -2500,7 +2605,7 @@ static std::string formatModuleId(const ModuleId &Id) {
return result;
}
-/// \brief Parse a conflict declaration.
+/// Parse a conflict declaration.
///
/// module-declaration:
/// 'conflict' module-id ',' string-literal
@@ -2534,7 +2639,7 @@ void ModuleMapParser::parseConflict() {
ActiveModule->UnresolvedConflicts.push_back(Conflict);
}
-/// \brief Parse an inferred module declaration (wildcard modules).
+/// Parse an inferred module declaration (wildcard modules).
///
/// module-declaration:
/// 'explicit'[opt] 'framework'[opt] 'module' * attributes[opt]
@@ -2687,7 +2792,7 @@ void ModuleMapParser::parseInferredModuleDecl(bool Framework, bool Explicit) {
}
}
-/// \brief Parse optional attributes.
+/// Parse optional attributes.
///
/// attributes:
/// attribute attributes
@@ -2762,7 +2867,7 @@ bool ModuleMapParser::parseOptionalAttributes(Attributes &Attrs) {
return HadError;
}
-/// \brief Parse a module map file.
+/// Parse a module map file.
///
/// module-map-file:
/// module-declaration*
@@ -2854,5 +2959,6 @@ bool ModuleMap::parseModuleMapFile(const FileEntry *File, bool IsSystem,
// Notify callbacks that we parsed it.
for (const auto &Cb : Callbacks)
Cb->moduleMapFileRead(Start, *File, IsSystem);
+
return Result;
}
diff --git a/lib/Lex/PPCaching.cpp b/lib/Lex/PPCaching.cpp
index f5e8cdc25d38..9758557d7b44 100644
--- a/lib/Lex/PPCaching.cpp
+++ b/lib/Lex/PPCaching.cpp
@@ -105,8 +105,10 @@ void Preprocessor::CachingLex(Token &Result) {
}
void Preprocessor::EnterCachingLexMode() {
- if (InCachingLexMode())
+ if (InCachingLexMode()) {
+ assert(CurLexerKind == CLK_CachingLexer && "Unexpected lexer kind");
return;
+ }
PushIncludeMacroStack();
CurLexerKind = CLK_CachingLexer;
diff --git a/lib/Lex/PPDirectives.cpp b/lib/Lex/PPDirectives.cpp
index ca3e70fd1060..d8dae73037a8 100644
--- a/lib/Lex/PPDirectives.cpp
+++ b/lib/Lex/PPDirectives.cpp
@@ -8,7 +8,7 @@
//===----------------------------------------------------------------------===//
///
/// \file
-/// \brief Implements # directive processing for the Preprocessor.
+/// Implements # directive processing for the Preprocessor.
///
//===----------------------------------------------------------------------===//
@@ -78,7 +78,7 @@ Preprocessor::AllocateVisibilityMacroDirective(SourceLocation Loc,
return new (BP) VisibilityMacroDirective(Loc, isPublic);
}
-/// \brief Read and discard all tokens remaining on the current line until
+/// Read and discard all tokens remaining on the current line until
/// the tok::eod token is found.
void Preprocessor::DiscardUntilEndOfDirective() {
Token Tmp;
@@ -88,14 +88,14 @@ void Preprocessor::DiscardUntilEndOfDirective() {
} while (Tmp.isNot(tok::eod));
}
-/// \brief Enumerates possible cases of #define/#undef a reserved identifier.
+/// Enumerates possible cases of #define/#undef a reserved identifier.
enum MacroDiag {
MD_NoWarn, //> Not a reserved identifier
MD_KeywordDef, //> Macro hides keyword, enabled by default
MD_ReservedMacro //> #define of #undef reserved id, disabled by default
};
-/// \brief Checks if the specified identifier is reserved in the specified
+/// Checks if the specified identifier is reserved in the specified
/// language.
/// This function does not check if the identifier is a keyword.
static bool isReservedId(StringRef Text, const LangOptions &Lang) {
@@ -115,6 +115,25 @@ static bool isReservedId(StringRef Text, const LangOptions &Lang) {
return false;
}
+// The -fmodule-name option tells the compiler to textually include headers in
+// the specified module, meaning clang won't build the specified module. This is
+// useful in a number of situations, for instance, when building a library that
+// vends a module map, one might want to avoid hitting intermediate build
+// products containig the the module map or avoid finding the system installed
+// modulemap for that library.
+static bool isForModuleBuilding(Module *M, StringRef CurrentModule,
+ StringRef ModuleName) {
+ StringRef TopLevelName = M->getTopLevelModuleName();
+
+ // When building framework Foo, we wanna make sure that Foo *and* Foo_Private
+ // are textually included and no modules are built for both.
+ if (M->getTopLevelModule()->IsFramework && CurrentModule == ModuleName &&
+ !CurrentModule.endswith("_Private") && TopLevelName.endswith("_Private"))
+ TopLevelName = TopLevelName.drop_back(8);
+
+ return TopLevelName == CurrentModule;
+}
+
static MacroDiag shouldWarnOnMacroDef(Preprocessor &PP, IdentifierInfo *II) {
const LangOptions &Lang = PP.getLangOpts();
StringRef Text = II->getName();
@@ -277,7 +296,7 @@ bool Preprocessor::CheckMacroName(Token &MacroNameTok, MacroUse isDefineUndef,
return false;
}
-/// \brief Lex and validate a macro name, which occurs after a
+/// Lex and validate a macro name, which occurs after a
/// \#define or \#undef.
///
/// This sets the token kind to eod and discards the rest of the macro line if
@@ -309,7 +328,7 @@ void Preprocessor::ReadMacroName(Token &MacroNameTok, MacroUse isDefineUndef,
}
}
-/// \brief Ensure that the next token is a tok::eod token.
+/// Ensure that the next token is a tok::eod token.
///
/// If not, emit a diagnostic and consume up until the eod. If EnableMacros is
/// true, then we consider macros that expand to zero tokens as being ok.
@@ -558,7 +577,9 @@ void Preprocessor::SkipExcludedConditionalBlock(SourceLocation HashTokenLoc,
// the #if block.
CurPPLexer->LexingRawMode = false;
- if (Callbacks)
+ // The last skipped range isn't actually skipped yet if it's truncated
+ // by the end of the preamble; we'll resume parsing after the preamble.
+ if (Callbacks && (Tok.isNot(tok::eof) || !isRecordingPreamble()))
Callbacks->SourceRangeSkipped(
SourceRange(HashTokenLoc, CurPPLexer->getSourceLocation()),
Tok.getLocation());
@@ -866,6 +887,22 @@ private:
bool save;
};
+/// Process a directive while looking for the through header.
+/// Only #include (to check if it is the through header) and #define (to warn
+/// about macros that don't match the PCH) are handled. All other directives
+/// are completely discarded.
+void Preprocessor::HandleSkippedThroughHeaderDirective(Token &Result,
+ SourceLocation HashLoc) {
+ if (const IdentifierInfo *II = Result.getIdentifierInfo()) {
+ if (II->getPPKeywordID() == tok::pp_include)
+ return HandleIncludeDirective(HashLoc, Result);
+ if (II->getPPKeywordID() == tok::pp_define)
+ return HandleDefineDirective(Result,
+ /*ImmediatelyAfterHeaderGuard=*/false);
+ }
+ DiscardUntilEndOfDirective();
+}
+
/// HandleDirective - This callback is invoked when the lexer sees a # token
/// at the start of a line. This consumes the directive, modifies the
/// lexer/preprocessor state, and advances the lexer(s) so that the next token
@@ -927,6 +964,9 @@ void Preprocessor::HandleDirective(Token &Result) {
// and reset to previous state when returning from this function.
ResetMacroExpansionHelper helper(this);
+ if (SkippingUntilPCHThroughHeader)
+ return HandleSkippedThroughHeaderDirective(Result, SavedHash.getLocation());
+
switch (Result.getKind()) {
case tok::eod:
return; // null directive.
@@ -1107,7 +1147,7 @@ static bool GetLineValue(Token &DigitTok, unsigned &Val,
return false;
}
-/// \brief Handle a \#line directive: C99 6.10.4.
+/// Handle a \#line directive: C99 6.10.4.
///
/// The two acceptable forms are:
/// \verbatim
@@ -1343,7 +1383,7 @@ void Preprocessor::HandleUserDiagnosticDirective(Token &Tok,
// Read the rest of the line raw. We do this because we don't want macros
// to be expanded and we don't require that the tokens be valid preprocessing
// tokens. For example, this is allowed: "#warning ` 'foo". GCC does
- // collapse multiple consequtive white space between tokens, but this isn't
+ // collapse multiple consecutive white space between tokens, but this isn't
// specified by the standard.
SmallString<128> Message;
CurLexer->ReadToEndOfLine(&Message);
@@ -1393,7 +1433,7 @@ void Preprocessor::HandleIdentSCCSDirective(Token &Tok) {
}
}
-/// \brief Handle a #public directive.
+/// Handle a #public directive.
void Preprocessor::HandleMacroPublicDirective(Token &Tok) {
Token MacroNameTok;
ReadMacroName(MacroNameTok, MU_Undef);
@@ -1420,7 +1460,7 @@ void Preprocessor::HandleMacroPublicDirective(Token &Tok) {
MacroNameTok.getLocation(), /*IsPublic=*/true));
}
-/// \brief Handle a #private directive.
+/// Handle a #private directive.
void Preprocessor::HandleMacroPrivateDirective() {
Token MacroNameTok;
ReadMacroName(MacroNameTok, MU_Undef);
@@ -1496,7 +1536,7 @@ bool Preprocessor::GetIncludeFilenameSpelling(SourceLocation Loc,
return isAngled;
}
-// \brief Handle cases where the \#include name is expanded from a macro
+// Handle cases where the \#include name is expanded from a macro
// as multiple tokens, which need to be glued together.
//
// This occurs for code like:
@@ -1557,7 +1597,7 @@ bool Preprocessor::ConcatenateIncludeName(SmallString<128> &FilenameBuffer,
return true;
}
-/// \brief Push a token onto the token stream containing an annotation.
+/// Push a token onto the token stream containing an annotation.
void Preprocessor::EnterAnnotationToken(SourceRange Range,
tok::TokenKind Kind,
void *AnnotationVal) {
@@ -1572,7 +1612,7 @@ void Preprocessor::EnterAnnotationToken(SourceRange Range,
EnterTokenStream(std::move(Tok), 1, true);
}
-/// \brief Produce a diagnostic informing the user that a #include or similar
+/// Produce a diagnostic informing the user that a #include or similar
/// was implicitly treated as a module import.
static void diagnoseAutoModuleImport(
Preprocessor &PP, SourceLocation HashLoc, Token &IncludeTok,
@@ -1655,12 +1695,18 @@ bool Preprocessor::checkModuleIsAvailable(const LangOptions &LangOpts,
DiagnosticsEngine &Diags, Module *M) {
Module::Requirement Requirement;
Module::UnresolvedHeaderDirective MissingHeader;
- if (M->isAvailable(LangOpts, TargetInfo, Requirement, MissingHeader))
+ Module *ShadowingModule = nullptr;
+ if (M->isAvailable(LangOpts, TargetInfo, Requirement, MissingHeader,
+ ShadowingModule))
return false;
if (MissingHeader.FileNameLoc.isValid()) {
Diags.Report(MissingHeader.FileNameLoc, diag::err_module_header_missing)
<< MissingHeader.IsUmbrella << MissingHeader.FileName;
+ } else if (ShadowingModule) {
+ Diags.Report(M->DefinitionLoc, diag::err_module_shadowed) << M->Name;
+ Diags.Report(ShadowingModule->DefinitionLoc,
+ diag::note_previous_definition);
} else {
// FIXME: Track the location at which the requirement was specified, and
// use it here.
@@ -1779,7 +1825,7 @@ void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc,
SmallString<128> NormalizedPath;
if (LangOpts.MSVCCompat) {
NormalizedPath = Filename.str();
-#ifndef LLVM_ON_WIN32
+#ifndef _WIN32
llvm::sys::path::native(NormalizedPath);
#endif
}
@@ -1835,6 +1881,12 @@ void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc,
}
}
+ if (usingPCHWithThroughHeader() && SkippingUntilPCHThroughHeader) {
+ if (isPCHThroughHeader(File))
+ SkippingUntilPCHThroughHeader = false;
+ return;
+ }
+
// Should we enter the source file? Set to false if either the source file is
// known to have no effect beyond its effect on module visibility -- that is,
// if it's got an include guard that is already defined or is a modular header
@@ -1844,12 +1896,19 @@ void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc,
if (PPOpts->SingleFileParseMode)
ShouldEnter = false;
+ // Any diagnostics after the fatal error will not be visible. As the
+ // compilation failed already and errors in subsequently included files won't
+ // be visible, avoid preprocessing those files.
+ if (ShouldEnter && Diags->hasFatalErrorOccurred())
+ ShouldEnter = false;
+
// Determine whether we should try to import the module for this #include, if
// there is one. Don't do so if precompiled module support is disabled or we
// are processing this module textually (because we're building the module).
if (ShouldEnter && File && SuggestedModule && getLangOpts().Modules &&
- SuggestedModule.getModule()->getTopLevelModuleName() !=
- getLangOpts().CurrentModule) {
+ !isForModuleBuilding(SuggestedModule.getModule(),
+ getLangOpts().CurrentModule,
+ getLangOpts().ModuleName)) {
// If this include corresponds to a module but that module is
// unavailable, diagnose the situation and bail out.
// FIXME: Remove this; loadModule does the same check (but produces
@@ -1940,7 +1999,7 @@ void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc,
HashLoc, IncludeTok,
LangOpts.MSVCCompat ? NormalizedPath.c_str() : Filename, isAngled,
FilenameRange, File, SearchPath, RelativePath,
- ShouldEnter ? nullptr : SuggestedModule.getModule());
+ ShouldEnter ? nullptr : SuggestedModule.getModule(), FileCharacter);
if (SkipHeader && !SuggestedModule.getModule())
Callbacks->FileSkipped(*File, FilenameTok, FileCharacter);
}
@@ -1996,7 +2055,8 @@ void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc,
// ShouldEnter is false because we are skipping the header. In that
// case, We are not importing the specified module.
if (SkipHeader && getLangOpts().CompilingPCH &&
- M->getTopLevelModuleName() == getLangOpts().CurrentModule)
+ isForModuleBuilding(M, getLangOpts().CurrentModule,
+ getLangOpts().ModuleName))
return;
makeModuleVisible(M, HashLoc);
@@ -2014,7 +2074,7 @@ void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc,
// If the filename string was the result of macro expansions, set the include
// position on the file where it will be included and after the expansions.
if (IncludePos.isMacroID())
- IncludePos = SourceMgr.getExpansionRange(IncludePos).second;
+ IncludePos = SourceMgr.getExpansionRange(IncludePos).getEnd();
FileID FID = SourceMgr.createFileID(File, IncludePos, FileCharacter);
assert(FID.isValid() && "Expected valid file ID");
@@ -2024,11 +2084,21 @@ void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc,
// Determine if we're switching to building a new submodule, and which one.
if (auto *M = SuggestedModule.getModule()) {
+ if (M->getTopLevelModule()->ShadowingModule) {
+ // We are building a submodule that belongs to a shadowed module. This
+ // means we find header files in the shadowed module.
+ Diag(M->DefinitionLoc, diag::err_module_build_shadowed_submodule)
+ << M->getFullModuleName();
+ Diag(M->getTopLevelModule()->ShadowingModule->DefinitionLoc,
+ diag::note_previous_definition);
+ return;
+ }
// When building a pch, -fmodule-name tells the compiler to textually
// include headers in the specified module. We are not building the
// specified module.
if (getLangOpts().CompilingPCH &&
- M->getTopLevelModuleName() == getLangOpts().CurrentModule)
+ isForModuleBuilding(M, getLangOpts().CurrentModule,
+ getLangOpts().ModuleName))
return;
assert(!CurLexerSubmodule && "should not have marked this as a module yet");
@@ -2548,7 +2618,15 @@ void Preprocessor::HandleDefineDirective(
}
}
-
+ // When skipping just warn about macros that do not match.
+ if (SkippingUntilPCHThroughHeader) {
+ const MacroInfo *OtherMI = getMacroInfo(MacroNameTok.getIdentifierInfo());
+ if (!OtherMI || !MI->isIdenticalTo(*OtherMI, *this,
+ /*Syntactic=*/LangOpts.MicrosoftExt))
+ Diag(MI->getDefinitionLoc(), diag::warn_pp_macro_def_mismatch_with_pch)
+ << MacroNameTok.getIdentifierInfo();
+ return;
+ }
// Finally, if this identifier already had a macro defined for it, verify that
// the macro bodies are identical, and issue diagnostics if they are not.
diff --git a/lib/Lex/PPExpressions.cpp b/lib/Lex/PPExpressions.cpp
index d8431827e9cd..b1ed0e10c6fc 100644
--- a/lib/Lex/PPExpressions.cpp
+++ b/lib/Lex/PPExpressions.cpp
@@ -363,7 +363,7 @@ static bool EvaluateValue(PPValue &Result, Token &PeekTok, DefinedTracker &DT,
NumBits = TI.getChar16Width();
else if (Literal.isUTF32())
NumBits = TI.getChar32Width();
- else
+ else // char or char8_t
NumBits = TI.getCharWidth();
// Set the width.
diff --git a/lib/Lex/PPLexerChange.cpp b/lib/Lex/PPLexerChange.cpp
index e484e9c4c3a3..352814d715fa 100644
--- a/lib/Lex/PPLexerChange.cpp
+++ b/lib/Lex/PPLexerChange.cpp
@@ -13,6 +13,7 @@
//===----------------------------------------------------------------------===//
#include "clang/Lex/Preprocessor.h"
+#include "clang/Lex/PreprocessorOptions.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Lex/HeaderSearch.h"
@@ -226,7 +227,7 @@ void Preprocessor::EnterTokenStream(const Token *Toks, unsigned NumToks,
CurLexerKind = CLK_TokenLexer;
}
-/// \brief Compute the relative path that names the given file relative to
+/// Compute the relative path that names the given file relative to
/// the given directory.
static void computeRelativePath(FileManager &FM, const DirectoryEntry *Dir,
const FileEntry *File,
@@ -264,7 +265,7 @@ void Preprocessor::PropagateLineStartLeadingSpaceInfo(Token &Result) {
// but it might if they're empty?
}
-/// \brief Determine the location to use as the end of the buffer for a lexer.
+/// Determine the location to use as the end of the buffer for a lexer.
///
/// If the file ends with a newline, form the EOF token on the newline itself,
/// rather than "on the line following it", which doesn't exist. This makes
@@ -425,6 +426,8 @@ bool Preprocessor::HandleEndOfFile(Token &Result, bool isEndOfMacro) {
PragmaAssumeNonNullLoc = SourceLocation();
}
+ bool LeavingPCHThroughHeader = false;
+
// If this is a #include'd file, pop it off the include stack and continue
// lexing the #includer file.
if (!IncludeMacroStack.empty()) {
@@ -444,6 +447,7 @@ bool Preprocessor::HandleEndOfFile(Token &Result, bool isEndOfMacro) {
}
CurPPLexer = nullptr;
+ recomputeCurLexerKind();
return true;
}
@@ -480,6 +484,12 @@ bool Preprocessor::HandleEndOfFile(Token &Result, bool isEndOfMacro) {
Result.setAnnotationValue(M);
}
+ bool FoundPCHThroughHeader = false;
+ if (CurPPLexer && creatingPCHWithThroughHeader() &&
+ isPCHThroughHeader(
+ SourceMgr.getFileEntryForID(CurPPLexer->getFileID())))
+ FoundPCHThroughHeader = true;
+
// We're done with the #included file.
RemoveTopOfLexerStack();
@@ -499,8 +509,16 @@ bool Preprocessor::HandleEndOfFile(Token &Result, bool isEndOfMacro) {
if (ExitedFromPredefinesFile)
replayPreambleConditionalStack();
- // Client should lex another token unless we generated an EOM.
- return LeavingSubmodule;
+ if (!isEndOfMacro && CurPPLexer && FoundPCHThroughHeader &&
+ (isInPrimaryFile() ||
+ CurPPLexer->getFileID() == getPredefinesFileID())) {
+ // Leaving the through header. Continue directly to end of main file
+ // processing.
+ LeavingPCHThroughHeader = true;
+ } else {
+ // Client should lex another token unless we generated an EOM.
+ return LeavingSubmodule;
+ }
}
// If this is the end of the main file, form an EOF token.
@@ -521,6 +539,12 @@ bool Preprocessor::HandleEndOfFile(Token &Result, bool isEndOfMacro) {
Result.setLocation(Result.getLocation().getLocWithOffset(-1));
}
+ if (creatingPCHWithThroughHeader() && !LeavingPCHThroughHeader) {
+ // Reached the end of the compilation without finding the through header.
+ Diag(CurLexer->getFileLoc(), diag::err_pp_through_header_not_seen)
+ << PPOpts->PCHThroughHeader << 0;
+ }
+
if (!isIncrementalProcessingEnabled())
// We're done with lexing.
CurLexer.reset();
diff --git a/lib/Lex/PPMacroExpansion.cpp b/lib/Lex/PPMacroExpansion.cpp
index 41633f90c34d..d9992e00f8c0 100644
--- a/lib/Lex/PPMacroExpansion.cpp
+++ b/lib/Lex/PPMacroExpansion.cpp
@@ -41,7 +41,6 @@
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSwitch.h"
-#include "llvm/Config/llvm-config.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Format.h"
@@ -1016,7 +1015,7 @@ MacroArgs *Preprocessor::ReadMacroCallArgumentList(Token &MacroName,
return MacroArgs::create(MI, ArgTokens, isVarargsElided, *this);
}
-/// \brief Keeps macro expanded tokens for TokenLexers.
+/// Keeps macro expanded tokens for TokenLexers.
//
/// Works like a stack; a TokenLexer adds the macro expanded tokens that is
/// going to lex in the cache and when it finishes the tokens are removed
@@ -1100,183 +1099,11 @@ static bool HasFeature(const Preprocessor &PP, StringRef Feature) {
if (Feature.startswith("__") && Feature.endswith("__") && Feature.size() >= 4)
Feature = Feature.substr(2, Feature.size() - 4);
+#define FEATURE(Name, Predicate) .Case(#Name, Predicate)
return llvm::StringSwitch<bool>(Feature)
- .Case("address_sanitizer",
- LangOpts.Sanitize.hasOneOf(SanitizerKind::Address |
- SanitizerKind::KernelAddress))
- .Case("hwaddress_sanitizer",
- LangOpts.Sanitize.hasOneOf(SanitizerKind::HWAddress))
- .Case("assume_nonnull", true)
- .Case("attribute_analyzer_noreturn", true)
- .Case("attribute_availability", true)
- .Case("attribute_availability_with_message", true)
- .Case("attribute_availability_app_extension", true)
- .Case("attribute_availability_with_version_underscores", true)
- .Case("attribute_availability_tvos", true)
- .Case("attribute_availability_watchos", true)
- .Case("attribute_availability_with_strict", true)
- .Case("attribute_availability_with_replacement", true)
- .Case("attribute_availability_in_templates", true)
- .Case("attribute_cf_returns_not_retained", true)
- .Case("attribute_cf_returns_retained", true)
- .Case("attribute_cf_returns_on_parameters", true)
- .Case("attribute_deprecated_with_message", true)
- .Case("attribute_deprecated_with_replacement", true)
- .Case("attribute_ext_vector_type", true)
- .Case("attribute_ns_returns_not_retained", true)
- .Case("attribute_ns_returns_retained", true)
- .Case("attribute_ns_consumes_self", true)
- .Case("attribute_ns_consumed", true)
- .Case("attribute_cf_consumed", true)
- .Case("attribute_objc_ivar_unused", true)
- .Case("attribute_objc_method_family", true)
- .Case("attribute_overloadable", true)
- .Case("attribute_unavailable_with_message", true)
- .Case("attribute_unused_on_fields", true)
- .Case("attribute_diagnose_if_objc", true)
- .Case("blocks", LangOpts.Blocks)
- .Case("c_thread_safety_attributes", true)
- .Case("cxx_exceptions", LangOpts.CXXExceptions)
- .Case("cxx_rtti", LangOpts.RTTI && LangOpts.RTTIData)
- .Case("enumerator_attributes", true)
- .Case("nullability", true)
- .Case("nullability_on_arrays", true)
- .Case("memory_sanitizer", LangOpts.Sanitize.has(SanitizerKind::Memory))
- .Case("thread_sanitizer", LangOpts.Sanitize.has(SanitizerKind::Thread))
- .Case("dataflow_sanitizer",
- LangOpts.Sanitize.has(SanitizerKind::DataFlow))
- .Case("efficiency_sanitizer",
- LangOpts.Sanitize.hasOneOf(SanitizerKind::Efficiency))
- .Case("scudo", LangOpts.Sanitize.hasOneOf(SanitizerKind::Scudo))
- // Objective-C features
- .Case("objc_arr", LangOpts.ObjCAutoRefCount) // FIXME: REMOVE?
- .Case("objc_arc", LangOpts.ObjCAutoRefCount)
- .Case("objc_arc_weak", LangOpts.ObjCWeak)
- .Case("objc_default_synthesize_properties", LangOpts.ObjC2)
- .Case("objc_fixed_enum", LangOpts.ObjC2)
- .Case("objc_instancetype", LangOpts.ObjC2)
- .Case("objc_kindof", LangOpts.ObjC2)
- .Case("objc_modules", LangOpts.ObjC2 && LangOpts.Modules)
- .Case("objc_nonfragile_abi", LangOpts.ObjCRuntime.isNonFragile())
- .Case("objc_property_explicit_atomic",
- true) // Does clang support explicit "atomic" keyword?
- .Case("objc_protocol_qualifier_mangling", true)
- .Case("objc_weak_class", LangOpts.ObjCRuntime.hasWeakClassImport())
- .Case("ownership_holds", true)
- .Case("ownership_returns", true)
- .Case("ownership_takes", true)
- .Case("objc_bool", true)
- .Case("objc_subscripting", LangOpts.ObjCRuntime.isNonFragile())
- .Case("objc_array_literals", LangOpts.ObjC2)
- .Case("objc_dictionary_literals", LangOpts.ObjC2)
- .Case("objc_boxed_expressions", LangOpts.ObjC2)
- .Case("objc_boxed_nsvalue_expressions", LangOpts.ObjC2)
- .Case("arc_cf_code_audited", true)
- .Case("objc_bridge_id", true)
- .Case("objc_bridge_id_on_typedefs", true)
- .Case("objc_generics", LangOpts.ObjC2)
- .Case("objc_generics_variance", LangOpts.ObjC2)
- .Case("objc_class_property", LangOpts.ObjC2)
- // C11 features
- .Case("c_alignas", LangOpts.C11)
- .Case("c_alignof", LangOpts.C11)
- .Case("c_atomic", LangOpts.C11)
- .Case("c_generic_selections", LangOpts.C11)
- .Case("c_static_assert", LangOpts.C11)
- .Case("c_thread_local",
- LangOpts.C11 && PP.getTargetInfo().isTLSSupported())
- // C++11 features
- .Case("cxx_access_control_sfinae", LangOpts.CPlusPlus11)
- .Case("cxx_alias_templates", LangOpts.CPlusPlus11)
- .Case("cxx_alignas", LangOpts.CPlusPlus11)
- .Case("cxx_alignof", LangOpts.CPlusPlus11)
- .Case("cxx_atomic", LangOpts.CPlusPlus11)
- .Case("cxx_attributes", LangOpts.CPlusPlus11)
- .Case("cxx_auto_type", LangOpts.CPlusPlus11)
- .Case("cxx_constexpr", LangOpts.CPlusPlus11)
- .Case("cxx_constexpr_string_builtins", LangOpts.CPlusPlus11)
- .Case("cxx_decltype", LangOpts.CPlusPlus11)
- .Case("cxx_decltype_incomplete_return_types", LangOpts.CPlusPlus11)
- .Case("cxx_default_function_template_args", LangOpts.CPlusPlus11)
- .Case("cxx_defaulted_functions", LangOpts.CPlusPlus11)
- .Case("cxx_delegating_constructors", LangOpts.CPlusPlus11)
- .Case("cxx_deleted_functions", LangOpts.CPlusPlus11)
- .Case("cxx_explicit_conversions", LangOpts.CPlusPlus11)
- .Case("cxx_generalized_initializers", LangOpts.CPlusPlus11)
- .Case("cxx_implicit_moves", LangOpts.CPlusPlus11)
- .Case("cxx_inheriting_constructors", LangOpts.CPlusPlus11)
- .Case("cxx_inline_namespaces", LangOpts.CPlusPlus11)
- .Case("cxx_lambdas", LangOpts.CPlusPlus11)
- .Case("cxx_local_type_template_args", LangOpts.CPlusPlus11)
- .Case("cxx_nonstatic_member_init", LangOpts.CPlusPlus11)
- .Case("cxx_noexcept", LangOpts.CPlusPlus11)
- .Case("cxx_nullptr", LangOpts.CPlusPlus11)
- .Case("cxx_override_control", LangOpts.CPlusPlus11)
- .Case("cxx_range_for", LangOpts.CPlusPlus11)
- .Case("cxx_raw_string_literals", LangOpts.CPlusPlus11)
- .Case("cxx_reference_qualified_functions", LangOpts.CPlusPlus11)
- .Case("cxx_rvalue_references", LangOpts.CPlusPlus11)
- .Case("cxx_strong_enums", LangOpts.CPlusPlus11)
- .Case("cxx_static_assert", LangOpts.CPlusPlus11)
- .Case("cxx_thread_local",
- LangOpts.CPlusPlus11 && PP.getTargetInfo().isTLSSupported())
- .Case("cxx_trailing_return", LangOpts.CPlusPlus11)
- .Case("cxx_unicode_literals", LangOpts.CPlusPlus11)
- .Case("cxx_unrestricted_unions", LangOpts.CPlusPlus11)
- .Case("cxx_user_literals", LangOpts.CPlusPlus11)
- .Case("cxx_variadic_templates", LangOpts.CPlusPlus11)
- // C++14 features
- .Case("cxx_aggregate_nsdmi", LangOpts.CPlusPlus14)
- .Case("cxx_binary_literals", LangOpts.CPlusPlus14)
- .Case("cxx_contextual_conversions", LangOpts.CPlusPlus14)
- .Case("cxx_decltype_auto", LangOpts.CPlusPlus14)
- .Case("cxx_generic_lambdas", LangOpts.CPlusPlus14)
- .Case("cxx_init_captures", LangOpts.CPlusPlus14)
- .Case("cxx_relaxed_constexpr", LangOpts.CPlusPlus14)
- .Case("cxx_return_type_deduction", LangOpts.CPlusPlus14)
- .Case("cxx_variable_templates", LangOpts.CPlusPlus14)
- // NOTE: For features covered by SD-6, it is preferable to provide *only*
- // the SD-6 macro and not a __has_feature check.
-
- // C++ TSes
- //.Case("cxx_runtime_arrays", LangOpts.CPlusPlusTSArrays)
- //.Case("cxx_concepts", LangOpts.CPlusPlusTSConcepts)
- // FIXME: Should this be __has_feature or __has_extension?
- //.Case("raw_invocation_type", LangOpts.CPlusPlus)
- // Type traits
- // N.B. Additional type traits should not be added to the following list.
- // Instead, they should be detected by has_extension.
- .Case("has_nothrow_assign", LangOpts.CPlusPlus)
- .Case("has_nothrow_copy", LangOpts.CPlusPlus)
- .Case("has_nothrow_constructor", LangOpts.CPlusPlus)
- .Case("has_trivial_assign", LangOpts.CPlusPlus)
- .Case("has_trivial_copy", LangOpts.CPlusPlus)
- .Case("has_trivial_constructor", LangOpts.CPlusPlus)
- .Case("has_trivial_destructor", LangOpts.CPlusPlus)
- .Case("has_virtual_destructor", LangOpts.CPlusPlus)
- .Case("is_abstract", LangOpts.CPlusPlus)
- .Case("is_base_of", LangOpts.CPlusPlus)
- .Case("is_class", LangOpts.CPlusPlus)
- .Case("is_constructible", LangOpts.CPlusPlus)
- .Case("is_convertible_to", LangOpts.CPlusPlus)
- .Case("is_empty", LangOpts.CPlusPlus)
- .Case("is_enum", LangOpts.CPlusPlus)
- .Case("is_final", LangOpts.CPlusPlus)
- .Case("is_literal", LangOpts.CPlusPlus)
- .Case("is_standard_layout", LangOpts.CPlusPlus)
- .Case("is_pod", LangOpts.CPlusPlus)
- .Case("is_polymorphic", LangOpts.CPlusPlus)
- .Case("is_sealed", LangOpts.CPlusPlus && LangOpts.MicrosoftExt)
- .Case("is_trivial", LangOpts.CPlusPlus)
- .Case("is_trivially_assignable", LangOpts.CPlusPlus)
- .Case("is_trivially_constructible", LangOpts.CPlusPlus)
- .Case("is_trivially_copyable", LangOpts.CPlusPlus)
- .Case("is_union", LangOpts.CPlusPlus)
- .Case("modules", LangOpts.Modules)
- .Case("safe_stack", LangOpts.Sanitize.has(SanitizerKind::SafeStack))
- .Case("tls", PP.getTargetInfo().isTLSSupported())
- .Case("underlying_type", LangOpts.CPlusPlus)
+#include "clang/Basic/Features.def"
.Default(false);
+#undef FEATURE
}
/// HasExtension - Return true if we recognize and implement the feature
@@ -1299,35 +1126,13 @@ static bool HasExtension(const Preprocessor &PP, StringRef Extension) {
Extension.size() >= 4)
Extension = Extension.substr(2, Extension.size() - 4);
- // Because we inherit the feature list from HasFeature, this string switch
- // must be less restrictive than HasFeature's.
+ // Because we inherit the feature list from HasFeature, this string switch
+ // must be less restrictive than HasFeature's.
+#define EXTENSION(Name, Predicate) .Case(#Name, Predicate)
return llvm::StringSwitch<bool>(Extension)
- // C11 features supported by other languages as extensions.
- .Case("c_alignas", true)
- .Case("c_alignof", true)
- .Case("c_atomic", true)
- .Case("c_generic_selections", true)
- .Case("c_static_assert", true)
- .Case("c_thread_local", PP.getTargetInfo().isTLSSupported())
- // C++11 features supported by other languages as extensions.
- .Case("cxx_atomic", LangOpts.CPlusPlus)
- .Case("cxx_deleted_functions", LangOpts.CPlusPlus)
- .Case("cxx_explicit_conversions", LangOpts.CPlusPlus)
- .Case("cxx_inline_namespaces", LangOpts.CPlusPlus)
- .Case("cxx_local_type_template_args", LangOpts.CPlusPlus)
- .Case("cxx_nonstatic_member_init", LangOpts.CPlusPlus)
- .Case("cxx_override_control", LangOpts.CPlusPlus)
- .Case("cxx_range_for", LangOpts.CPlusPlus)
- .Case("cxx_reference_qualified_functions", LangOpts.CPlusPlus)
- .Case("cxx_rvalue_references", LangOpts.CPlusPlus)
- .Case("cxx_variadic_templates", LangOpts.CPlusPlus)
- // C++14 features supported by other languages as extensions.
- .Case("cxx_binary_literals", true)
- .Case("cxx_init_captures", LangOpts.CPlusPlus11)
- .Case("cxx_variable_templates", LangOpts.CPlusPlus)
- // Miscellaneous language extensions
- .Case("overloadable_unmarked", true)
- .Default(false);
+#include "clang/Basic/Features.def"
+ .Default(false);
+#undef EXTENSION
}
/// EvaluateHasIncludeCommon - Process a '__has_include("path")'
@@ -1343,7 +1148,7 @@ static bool EvaluateHasIncludeCommon(Token &Tok,
// These expressions are only allowed within a preprocessor directive.
if (!PP.isParsingIfOrElifDirective()) {
- PP.Diag(LParenLoc, diag::err_pp_directive_required) << II->getName();
+ PP.Diag(LParenLoc, diag::err_pp_directive_required) << II;
// Return a valid identifier token.
assert(Tok.is(tok::identifier));
Tok.setIdentifierInfo(II);
@@ -1482,7 +1287,7 @@ static bool EvaluateHasIncludeNext(Token &Tok,
return EvaluateHasIncludeCommon(Tok, II, PP, Lookup, LookupFromFile);
}
-/// \brief Process single-argument builtin feature-like macros that return
+/// Process single-argument builtin feature-like macros that return
/// integer values.
static void EvaluateFeatureLikeBuiltinMacro(llvm::raw_svector_ostream& OS,
Token &Tok, IdentifierInfo *II,
@@ -1585,7 +1390,7 @@ already_lexed:
}
}
-/// \brief Helper function to return the IdentifierInfo structure of a Token
+/// Helper function to return the IdentifierInfo structure of a Token
/// or generate a diagnostic if none available.
static IdentifierInfo *ExpectFeatureIdentifierInfo(Token &Tok,
Preprocessor &PP,
@@ -1686,7 +1491,7 @@ void Preprocessor::ExpandBuiltinMacro(Token &Tok) {
// can matter for a function-like macro that expands to contain __LINE__.
// Skip down through expansion points until we find a file loc for the
// end of the expansion history.
- Loc = SourceMgr.getExpansionRange(Loc).second;
+ Loc = SourceMgr.getExpansionRange(Loc).getEnd();
PresumedLoc PLoc = SourceMgr.getPresumedLoc(Loc);
// __LINE__ expands to a simple numeric value.
@@ -1800,12 +1605,21 @@ void Preprocessor::ExpandBuiltinMacro(Token &Tok) {
[this](Token &Tok, bool &HasLexedNextToken) -> int {
IdentifierInfo *II = ExpectFeatureIdentifierInfo(Tok, *this,
diag::err_feature_check_malformed);
+ const LangOptions &LangOpts = getLangOpts();
if (!II)
return false;
- else if (II->getBuiltinID() != 0)
+ else if (II->getBuiltinID() != 0) {
+ switch (II->getBuiltinID()) {
+ case Builtin::BI__builtin_operator_new:
+ case Builtin::BI__builtin_operator_delete:
+ // denotes date of behavior change to support calling arbitrary
+ // usual allocation and deallocation functions. Required by libc++
+ return 201802;
+ default:
+ return true;
+ }
return true;
- else {
- const LangOptions &LangOpts = getLangOpts();
+ } else {
return llvm::StringSwitch<bool>(II->getName())
.Case("__make_integer_seq", LangOpts.CPlusPlus)
.Case("__type_pack_element", LangOpts.CPlusPlus)
diff --git a/lib/Lex/PTHLexer.cpp b/lib/Lex/PTHLexer.cpp
index d6c20a13d27b..45cff56dcaa1 100644
--- a/lib/Lex/PTHLexer.cpp
+++ b/lib/Lex/PTHLexer.cpp
@@ -23,8 +23,8 @@
#include "clang/Lex/Preprocessor.h"
#include "clang/Lex/Token.h"
#include "llvm/ADT/STLExtras.h"
-#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/DJB.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/ErrorOr.h"
#include "llvm/Support/FileSystem.h"
@@ -145,7 +145,7 @@ bool PTHLexer::LexEndOfFile(Token &Result) {
ParsingPreprocessorDirective = false; // Done parsing the "line".
return true; // Have a token.
}
-
+
assert(!LexingRawMode);
// If we are in a #if directive, emit an error.
@@ -215,7 +215,7 @@ bool PTHLexer::SkipBlock() {
// Compute the actual memory address of the '#' token data for this entry.
HashEntryI = TokBuf + Offset;
- // Optmization: "Sibling jumping". #if...#else...#endif blocks can
+ // Optimization: "Sibling jumping". #if...#else...#endif blocks can
// contain nested blocks. In the side-table we can jump over these
// nested blocks instead of doing a linear search if the next "sibling"
// entry is not at a location greater than LastHashTokPtr.
@@ -336,7 +336,7 @@ public:
using offset_type = unsigned;
static hash_value_type ComputeHash(internal_key_type x) {
- return llvm::HashString(x.second);
+ return llvm::djbHash(x.second);
}
static std::pair<unsigned, unsigned>
@@ -396,7 +396,7 @@ public:
}
static hash_value_type ComputeHash(const internal_key_type& a) {
- return llvm::HashString(StringRef(a.first, a.second));
+ return llvm::djbHash(StringRef(a.first, a.second));
}
// This hopefully will just get inlined and removed by the optimizer.
diff --git a/lib/Lex/Pragma.cpp b/lib/Lex/Pragma.cpp
index b8acd92521fb..930c5f6b069c 100644
--- a/lib/Lex/Pragma.cpp
+++ b/lib/Lex/Pragma.cpp
@@ -148,7 +148,7 @@ void Preprocessor::HandlePragmaDirective(SourceLocation IntroducerLoc,
namespace {
-/// \brief Helper class for \see Preprocessor::Handle_Pragma.
+/// Helper class for \see Preprocessor::Handle_Pragma.
class LexingFor_PragmaRAII {
Preprocessor &PP;
bool InMacroArgPreExpansion;
@@ -588,7 +588,7 @@ IdentifierInfo *Preprocessor::ParsePragmaPushOrPopMacro(Token &Tok) {
return LookUpIdentifierInfo(MacroTok);
}
-/// \brief Handle \#pragma push_macro.
+/// Handle \#pragma push_macro.
///
/// The syntax is:
/// \code
@@ -611,7 +611,7 @@ void Preprocessor::HandlePragmaPushMacro(Token &PushMacroTok) {
PragmaPushMacroInfo[IdentInfo].push_back(MI);
}
-/// \brief Handle \#pragma pop_macro.
+/// Handle \#pragma pop_macro.
///
/// The syntax is:
/// \code
@@ -1053,6 +1053,20 @@ struct PragmaDebugHandler : public PragmaHandler {
PP.Diag(Identifier, diag::warn_pragma_debug_missing_argument)
<< II->getName();
}
+ } else if (II->isStr("diag_mapping")) {
+ Token DiagName;
+ PP.LexUnexpandedToken(DiagName);
+ if (DiagName.is(tok::eod))
+ PP.getDiagnostics().dump();
+ else if (DiagName.is(tok::string_literal) && !DiagName.hasUDSuffix()) {
+ StringLiteralParser Literal(DiagName, PP);
+ if (Literal.hadError)
+ return;
+ PP.getDiagnostics().dump(Literal.GetString());
+ } else {
+ PP.Diag(DiagName, diag::warn_pragma_debug_missing_argument)
+ << II->getName();
+ }
} else if (II->isStr("llvm_fatal_error")) {
llvm::report_fatal_error("#pragma clang __debug llvm_fatal_error");
} else if (II->isStr("llvm_unreachable")) {
@@ -1601,44 +1615,6 @@ struct PragmaPopMacroHandler : public PragmaHandler {
}
};
-// Pragma STDC implementations.
-
-/// PragmaSTDC_FENV_ACCESSHandler - "\#pragma STDC FENV_ACCESS ...".
-struct PragmaSTDC_FENV_ACCESSHandler : public PragmaHandler {
- PragmaSTDC_FENV_ACCESSHandler() : PragmaHandler("FENV_ACCESS") {}
-
- void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
- Token &Tok) override {
- tok::OnOffSwitch OOS;
- if (PP.LexOnOffSwitch(OOS))
- return;
- if (OOS == tok::OOS_ON)
- PP.Diag(Tok, diag::warn_stdc_fenv_access_not_supported);
- }
-};
-
-/// PragmaSTDC_CX_LIMITED_RANGEHandler - "\#pragma STDC CX_LIMITED_RANGE ...".
-struct PragmaSTDC_CX_LIMITED_RANGEHandler : public PragmaHandler {
- PragmaSTDC_CX_LIMITED_RANGEHandler() : PragmaHandler("CX_LIMITED_RANGE") {}
-
- void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
- Token &Tok) override {
- tok::OnOffSwitch OOS;
- PP.LexOnOffSwitch(OOS);
- }
-};
-
-/// PragmaSTDC_UnknownHandler - "\#pragma STDC ...".
-struct PragmaSTDC_UnknownHandler : public PragmaHandler {
- PragmaSTDC_UnknownHandler() = default;
-
- void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
- Token &UnknownTok) override {
- // C99 6.10.6p2, unknown forms are not allowed.
- PP.Diag(UnknownTok, diag::ext_stdc_pragma_ignored);
- }
-};
-
/// PragmaARCCFCodeAuditedHandler -
/// \#pragma clang arc_cf_code_audited begin/end
struct PragmaARCCFCodeAuditedHandler : public PragmaHandler {
@@ -1754,7 +1730,7 @@ struct PragmaAssumeNonNullHandler : public PragmaHandler {
}
};
-/// \brief Handle "\#pragma region [...]"
+/// Handle "\#pragma region [...]"
///
/// The syntax is
/// \code
@@ -1814,17 +1790,15 @@ void Preprocessor::RegisterBuiltinPragmas() {
ModuleHandler->AddPragma(new PragmaModuleEndHandler());
ModuleHandler->AddPragma(new PragmaModuleBuildHandler());
ModuleHandler->AddPragma(new PragmaModuleLoadHandler());
-
- AddPragmaHandler("STDC", new PragmaSTDC_FENV_ACCESSHandler());
- AddPragmaHandler("STDC", new PragmaSTDC_CX_LIMITED_RANGEHandler());
- AddPragmaHandler("STDC", new PragmaSTDC_UnknownHandler());
+
+ // Add region pragmas.
+ AddPragmaHandler(new PragmaRegionHandler("region"));
+ AddPragmaHandler(new PragmaRegionHandler("endregion"));
// MS extensions.
if (LangOpts.MicrosoftExt) {
AddPragmaHandler(new PragmaWarningHandler());
AddPragmaHandler(new PragmaIncludeAliasHandler());
- AddPragmaHandler(new PragmaRegionHandler("region"));
- AddPragmaHandler(new PragmaRegionHandler("endregion"));
}
// Pragmas added by plugins
@@ -1843,17 +1817,4 @@ void Preprocessor::IgnorePragmas() {
// in Preprocessor::RegisterBuiltinPragmas().
AddPragmaHandler("GCC", new EmptyPragmaHandler());
AddPragmaHandler("clang", new EmptyPragmaHandler());
- if (PragmaHandler *NS = PragmaHandlers->FindHandler("STDC")) {
- // Preprocessor::RegisterBuiltinPragmas() already registers
- // PragmaSTDC_UnknownHandler as the empty handler, so remove it first,
- // otherwise there will be an assert about a duplicate handler.
- PragmaNamespace *STDCNamespace = NS->getIfNamespace();
- assert(STDCNamespace &&
- "Invalid namespace, registered as a regular pragma handler!");
- if (PragmaHandler *Existing = STDCNamespace->FindHandler("", false)) {
- RemovePragmaHandler("STDC", Existing);
- delete Existing;
- }
- }
- AddPragmaHandler("STDC", new EmptyPragmaHandler());
}
diff --git a/lib/Lex/PreprocessingRecord.cpp b/lib/Lex/PreprocessingRecord.cpp
index af439dbfa584..b59820003b56 100644
--- a/lib/Lex/PreprocessingRecord.cpp
+++ b/lib/Lex/PreprocessingRecord.cpp
@@ -54,7 +54,7 @@ InclusionDirective::InclusionDirective(PreprocessingRecord &PPRec,
PreprocessingRecord::PreprocessingRecord(SourceManager &SM) : SourceMgr(SM) {}
-/// \brief Returns a pair of [Begin, End) iterators of preprocessed entities
+/// Returns a pair of [Begin, End) iterators of preprocessed entities
/// that source range \p Range encompasses.
llvm::iterator_range<PreprocessingRecord::iterator>
PreprocessingRecord::getPreprocessedEntitiesInRange(SourceRange Range) {
@@ -88,7 +88,7 @@ static bool isPreprocessedEntityIfInFileID(PreprocessedEntity *PPE, FileID FID,
return SM.isInFileID(SM.getFileLoc(Loc), FID);
}
-/// \brief Returns true if the preprocessed entity that \arg PPEI iterator
+/// Returns true if the preprocessed entity that \arg PPEI iterator
/// points to is coming from the file \arg FID.
///
/// Can be used to avoid implicit deserializations of preallocated
@@ -132,7 +132,7 @@ bool PreprocessingRecord::isEntityInFileID(iterator PPEI, FileID FID) {
FID, SourceMgr);
}
-/// \brief Returns a pair of [Begin, End) iterators of preprocessed entities
+/// Returns a pair of [Begin, End) iterators of preprocessed entities
/// that source range \arg R encompasses.
std::pair<int, int>
PreprocessingRecord::getPreprocessedEntitiesInRangeSlow(SourceRange Range) {
@@ -329,12 +329,29 @@ unsigned PreprocessingRecord::allocateLoadedEntities(unsigned NumEntities) {
return Result;
}
+unsigned PreprocessingRecord::allocateSkippedRanges(unsigned NumRanges) {
+ unsigned Result = SkippedRanges.size();
+ SkippedRanges.resize(SkippedRanges.size() + NumRanges);
+ SkippedRangesAllLoaded = false;
+ return Result;
+}
+
+void PreprocessingRecord::ensureSkippedRangesLoaded() {
+ if (SkippedRangesAllLoaded || !ExternalSource)
+ return;
+ for (unsigned Index = 0; Index != SkippedRanges.size(); ++Index) {
+ if (SkippedRanges[Index].isInvalid())
+ SkippedRanges[Index] = ExternalSource->ReadSkippedRange(Index);
+ }
+ SkippedRangesAllLoaded = true;
+}
+
void PreprocessingRecord::RegisterMacroDefinition(MacroInfo *Macro,
MacroDefinitionRecord *Def) {
MacroDefinitions[Macro] = Def;
}
-/// \brief Retrieve the preprocessed entity at the given ID.
+/// Retrieve the preprocessed entity at the given ID.
PreprocessedEntity *PreprocessingRecord::getPreprocessedEntity(PPEntityID PPID){
if (PPID.ID < 0) {
unsigned Index = -PPID.ID - 1;
@@ -351,7 +368,7 @@ PreprocessedEntity *PreprocessingRecord::getPreprocessedEntity(PPEntityID PPID){
return PreprocessedEntities[Index];
}
-/// \brief Retrieve the loaded preprocessed entity at the given index.
+/// Retrieve the loaded preprocessed entity at the given index.
PreprocessedEntity *
PreprocessingRecord::getLoadedPreprocessedEntity(unsigned Index) {
assert(Index < LoadedPreprocessedEntities.size() &&
@@ -418,6 +435,7 @@ void PreprocessingRecord::Defined(const Token &MacroNameTok,
void PreprocessingRecord::SourceRangeSkipped(SourceRange Range,
SourceLocation EndifLoc) {
+ assert(Range.isValid());
SkippedRanges.emplace_back(Range.getBegin(), EndifLoc);
}
@@ -453,7 +471,8 @@ void PreprocessingRecord::InclusionDirective(
const FileEntry *File,
StringRef SearchPath,
StringRef RelativePath,
- const Module *Imported) {
+ const Module *Imported,
+ SrcMgr::CharacteristicKind FileType) {
InclusionDirective::InclusionKind Kind = InclusionDirective::Include;
switch (IncludeTok.getIdentifierInfo()->getPPKeywordID()) {
@@ -497,5 +516,6 @@ size_t PreprocessingRecord::getTotalMemory() const {
return BumpAlloc.getTotalMemory()
+ llvm::capacity_in_bytes(MacroDefinitions)
+ llvm::capacity_in_bytes(PreprocessedEntities)
- + llvm::capacity_in_bytes(LoadedPreprocessedEntities);
+ + llvm::capacity_in_bytes(LoadedPreprocessedEntities)
+ + llvm::capacity_in_bytes(SkippedRanges);
}
diff --git a/lib/Lex/Preprocessor.cpp b/lib/Lex/Preprocessor.cpp
index 7d789e780113..0217a2e60ede 100644
--- a/lib/Lex/Preprocessor.cpp
+++ b/lib/Lex/Preprocessor.cpp
@@ -85,12 +85,14 @@ Preprocessor::Preprocessor(std::shared_ptr<PreprocessorOptions> PPOpts,
IdentifierInfoLookup *IILookup, bool OwnsHeaders,
TranslationUnitKind TUKind)
: PPOpts(std::move(PPOpts)), Diags(&diags), LangOpts(opts),
- FileMgr(Headers.getFileMgr()), SourceMgr(SM),
- PCMCache(PCMCache), ScratchBuf(new ScratchBuffer(SourceMgr)),
- HeaderInfo(Headers), TheModuleLoader(TheModuleLoader),
- ExternalSource(nullptr), Identifiers(opts, IILookup),
- PragmaHandlers(new PragmaNamespace(StringRef())), TUKind(TUKind),
- SkipMainFilePreamble(0, true),
+ FileMgr(Headers.getFileMgr()), SourceMgr(SM), PCMCache(PCMCache),
+ ScratchBuf(new ScratchBuffer(SourceMgr)), HeaderInfo(Headers),
+ TheModuleLoader(TheModuleLoader), ExternalSource(nullptr),
+ // As the language options may have not been loaded yet (when
+ // deserializing an ASTUnit), adding keywords to the identifier table is
+ // deferred to Preprocessor::Initialize().
+ Identifiers(IILookup), PragmaHandlers(new PragmaNamespace(StringRef())),
+ TUKind(TUKind), SkipMainFilePreamble(0, true),
CurSubmoduleState(&NullSubmoduleState) {
OwnsHeaderSearch = OwnsHeaders;
@@ -147,6 +149,11 @@ Preprocessor::Preprocessor(std::shared_ptr<PreprocessorOptions> PPOpts,
Ident_AbnormalTermination = nullptr;
}
+ // If using a PCH with a through header, start skipping tokens.
+ if (!this->PPOpts->PCHThroughHeader.empty() &&
+ !this->PPOpts->ImplicitPCHInclude.empty())
+ SkippingUntilPCHThroughHeader = true;
+
if (this->PPOpts->GeneratePreamble)
PreambleConditionalStack.startRecording();
}
@@ -190,6 +197,9 @@ void Preprocessor::Initialize(const TargetInfo &Target,
// Initialize information about built-ins.
BuiltinInfo.InitializeTarget(Target, AuxTarget);
HeaderInfo.setTarget(Target);
+
+ // Populate the identifier table with info about keywords for the current language.
+ Identifiers.AddKeywords(LangOpts);
}
void Preprocessor::InitializeForModelFile() {
@@ -328,7 +338,7 @@ Preprocessor::macro_end(bool IncludeExternalMacros) const {
return CurSubmoduleState->Macros.end();
}
-/// \brief Compares macro tokens with a specified token value sequence.
+/// Compares macro tokens with a specified token value sequence.
static bool MacroDefinitionEquals(const MacroInfo *MI,
ArrayRef<TokenValue> Tokens) {
return Tokens.size() == MI->getNumTokens() &&
@@ -482,6 +492,22 @@ void Preprocessor::CreateString(StringRef Str, Token &Tok,
Tok.setLiteralData(DestPtr);
}
+SourceLocation Preprocessor::SplitToken(SourceLocation Loc, unsigned Length) {
+ auto &SM = getSourceManager();
+ SourceLocation SpellingLoc = SM.getSpellingLoc(Loc);
+ std::pair<FileID, unsigned> LocInfo = SM.getDecomposedLoc(SpellingLoc);
+ bool Invalid = false;
+ StringRef Buffer = SM.getBufferData(LocInfo.first, &Invalid);
+ if (Invalid)
+ return SourceLocation();
+
+ // FIXME: We could consider re-using spelling for tokens we see repeatedly.
+ const char *DestPtr;
+ SourceLocation Spelling =
+ ScratchBuf->getToken(Buffer.data() + LocInfo.second, Length, DestPtr);
+ return SM.createTokenSplitLoc(Spelling, Loc, Loc.getLocWithOffset(Length));
+}
+
Module *Preprocessor::getCurrentModule() {
if (!getLangOpts().isCompilingModule())
return nullptr;
@@ -530,6 +556,72 @@ void Preprocessor::EnterMainSourceFile() {
// Start parsing the predefines.
EnterSourceFile(FID, nullptr, SourceLocation());
+
+ if (!PPOpts->PCHThroughHeader.empty()) {
+ // Lookup and save the FileID for the through header. If it isn't found
+ // in the search path, it's a fatal error.
+ const DirectoryLookup *CurDir;
+ const FileEntry *File = LookupFile(
+ SourceLocation(), PPOpts->PCHThroughHeader,
+ /*isAngled=*/false, /*FromDir=*/nullptr, /*FromFile=*/nullptr, CurDir,
+ /*SearchPath=*/nullptr, /*RelativePath=*/nullptr,
+ /*SuggestedModule=*/nullptr, /*IsMapped=*/nullptr);
+ if (!File) {
+ Diag(SourceLocation(), diag::err_pp_through_header_not_found)
+ << PPOpts->PCHThroughHeader;
+ return;
+ }
+ setPCHThroughHeaderFileID(
+ SourceMgr.createFileID(File, SourceLocation(), SrcMgr::C_User));
+ }
+
+ // Skip tokens from the Predefines and if needed the main file.
+ if (usingPCHWithThroughHeader() && SkippingUntilPCHThroughHeader)
+ SkipTokensUntilPCHThroughHeader();
+}
+
+void Preprocessor::setPCHThroughHeaderFileID(FileID FID) {
+ assert(PCHThroughHeaderFileID.isInvalid() &&
+ "PCHThroughHeaderFileID already set!");
+ PCHThroughHeaderFileID = FID;
+}
+
+bool Preprocessor::isPCHThroughHeader(const FileEntry *FE) {
+ assert(PCHThroughHeaderFileID.isValid() &&
+ "Invalid PCH through header FileID");
+ return FE == SourceMgr.getFileEntryForID(PCHThroughHeaderFileID);
+}
+
+bool Preprocessor::creatingPCHWithThroughHeader() {
+ return TUKind == TU_Prefix && !PPOpts->PCHThroughHeader.empty() &&
+ PCHThroughHeaderFileID.isValid();
+}
+
+bool Preprocessor::usingPCHWithThroughHeader() {
+ return TUKind != TU_Prefix && !PPOpts->PCHThroughHeader.empty() &&
+ PCHThroughHeaderFileID.isValid();
+}
+
+/// Skip tokens until after the #include of the through header.
+/// Tokens in the predefines file and the main file may be skipped. If the end
+/// of the predefines file is reached, skipping continues into the main file.
+/// If the end of the main file is reached, it's a fatal error.
+void Preprocessor::SkipTokensUntilPCHThroughHeader() {
+ bool ReachedMainFileEOF = false;
+ Token Tok;
+ while (true) {
+ bool InPredefines = (CurLexer->getFileID() == getPredefinesFileID());
+ CurLexer->Lex(Tok);
+ if (Tok.is(tok::eof) && !InPredefines) {
+ ReachedMainFileEOF = true;
+ break;
+ }
+ if (!SkippingUntilPCHThroughHeader)
+ break;
+ }
+ if (ReachedMainFileEOF)
+ Diag(SourceLocation(), diag::err_pp_through_header_not_seen)
+ << PPOpts->PCHThroughHeader << 1;
}
void Preprocessor::replayPreambleConditionalStack() {
@@ -624,7 +716,7 @@ void Preprocessor::HandlePoisonedIdentifier(Token & Identifier) {
Diag(Identifier,it->second) << Identifier.getIdentifierInfo();
}
-/// \brief Returns a diagnostic message kind for reporting a future keyword as
+/// Returns a diagnostic message kind for reporting a future keyword as
/// appropriate for the identifier and specified language.
static diag::kind getFutureCompatDiagKind(const IdentifierInfo &II,
const LangOptions &LangOpts) {
@@ -773,13 +865,18 @@ void Preprocessor::Lex(Token &Result) {
}
} while (!ReturnedToken);
- if (Result.is(tok::code_completion))
+ if (Result.is(tok::code_completion) && Result.getIdentifierInfo()) {
+ // Remember the identifier before code completion token.
setCodeCompletionIdentifierInfo(Result.getIdentifierInfo());
+ // Set IdenfitierInfo to null to avoid confusing code that handles both
+ // identifiers and completion tokens.
+ Result.setIdentifierInfo(nullptr);
+ }
LastTokenWasAt = Result.is(tok::at);
}
-/// \brief Lex a token following the 'import' contextual keyword.
+/// Lex a token following the 'import' contextual keyword.
///
void Preprocessor::LexAfterModuleImport(Token &Result) {
// Figure out what kind of lexer we actually have.
diff --git a/lib/Lex/PreprocessorLexer.cpp b/lib/Lex/PreprocessorLexer.cpp
index 2e85f46f52c5..9f930c3a3c6a 100644
--- a/lib/Lex/PreprocessorLexer.cpp
+++ b/lib/Lex/PreprocessorLexer.cpp
@@ -28,7 +28,7 @@ PreprocessorLexer::PreprocessorLexer(Preprocessor *pp, FileID fid)
InitialNumSLocEntries = pp->getSourceManager().local_sloc_entry_size();
}
-/// \brief After the preprocessor has parsed a \#include, lex and
+/// After the preprocessor has parsed a \#include, lex and
/// (potentially) macro expand the filename.
void PreprocessorLexer::LexIncludeFilename(Token &FilenameTok) {
assert(ParsingPreprocessorDirective &&
diff --git a/lib/Lex/ScratchBuffer.cpp b/lib/Lex/ScratchBuffer.cpp
index e0f3966fce48..dc03e16daa8b 100644
--- a/lib/Lex/ScratchBuffer.cpp
+++ b/lib/Lex/ScratchBuffer.cpp
@@ -74,11 +74,11 @@ void ScratchBuffer::AllocScratchBuffer(unsigned RequestLen) {
// Get scratch buffer. Zero-initialize it so it can be dumped into a PCH file
// deterministically.
- std::unique_ptr<llvm::MemoryBuffer> OwnBuf =
- llvm::MemoryBuffer::getNewMemBuffer(RequestLen, "<scratch space>");
- llvm::MemoryBuffer &Buf = *OwnBuf;
+ std::unique_ptr<llvm::WritableMemoryBuffer> OwnBuf =
+ llvm::WritableMemoryBuffer::getNewMemBuffer(RequestLen,
+ "<scratch space>");
+ CurBuffer = OwnBuf->getBufferStart();
FileID FID = SourceMgr.createFileID(std::move(OwnBuf));
BufferStartLoc = SourceMgr.getLocForStartOfFile(FID);
- CurBuffer = const_cast<char*>(Buf.getBufferStart());
BytesUsed = 0;
}
diff --git a/lib/Lex/TokenLexer.cpp b/lib/Lex/TokenLexer.cpp
index d7f1c7a93fda..184b1b390287 100644
--- a/lib/Lex/TokenLexer.cpp
+++ b/lib/Lex/TokenLexer.cpp
@@ -483,7 +483,7 @@ void TokenLexer::ExpandFunctionArguments() {
bool VaArgsPseudoPaste = false;
// If this is the GNU ", ## __VA_ARGS__" extension, and we just learned
// that __VA_ARGS__ expands to multiple tokens, avoid a pasting error when
- // the expander trys to paste ',' with the first token of the __VA_ARGS__
+ // the expander tries to paste ',' with the first token of the __VA_ARGS__
// expansion.
if (NonEmptyPasteBefore && ResultToks.size() >= 2 &&
ResultToks[ResultToks.size()-2].is(tok::comma) &&
@@ -574,7 +574,7 @@ void TokenLexer::ExpandFunctionArguments() {
}
}
-/// \brief Checks if two tokens form wide string literal.
+/// Checks if two tokens form wide string literal.
static bool isWideStringLiteralFromMacro(const Token &FirstTok,
const Token &SecondTok) {
return FirstTok.is(tok::identifier) &&
@@ -865,9 +865,9 @@ bool TokenLexer::pasteTokens(Token &LHSTok, ArrayRef<Token> TokenStream,
EndLoc = getExpansionLocForMacroDefLoc(EndLoc);
FileID MacroFID = SM.getFileID(MacroExpansionStart);
while (SM.getFileID(StartLoc) != MacroFID)
- StartLoc = SM.getImmediateExpansionRange(StartLoc).first;
+ StartLoc = SM.getImmediateExpansionRange(StartLoc).getBegin();
while (SM.getFileID(EndLoc) != MacroFID)
- EndLoc = SM.getImmediateExpansionRange(EndLoc).second;
+ EndLoc = SM.getImmediateExpansionRange(EndLoc).getEnd();
LHSTok.setLocation(SM.createExpansionLoc(LHSTok.getLocation(), StartLoc, EndLoc,
LHSTok.getLength()));
@@ -918,7 +918,7 @@ void TokenLexer::HandleMicrosoftCommentPaste(Token &Tok, SourceLocation OpLoc) {
PP.HandleMicrosoftCommentPaste(Tok);
}
-/// \brief If \arg loc is a file ID and points inside the current macro
+/// If \arg loc is a file ID and points inside the current macro
/// definition, returns the appropriate source location pointing at the
/// macro expansion source location entry, otherwise it returns an invalid
/// SourceLocation.
@@ -937,7 +937,7 @@ TokenLexer::getExpansionLocForMacroDefLoc(SourceLocation loc) const {
return MacroExpansionStart.getLocWithOffset(relativeOffset);
}
-/// \brief Finds the tokens that are consecutive (from the same FileID)
+/// Finds the tokens that are consecutive (from the same FileID)
/// creates a single SLocEntry, and assigns SourceLocations to each token that
/// point to that SLocEntry. e.g for
/// assert(foo == bar);
@@ -1007,7 +1007,7 @@ static void updateConsecutiveMacroArgTokens(SourceManager &SM,
}
}
-/// \brief Creates SLocEntries and updates the locations of macro argument
+/// Creates SLocEntries and updates the locations of macro argument
/// tokens to their new expanded locations.
///
/// \param ArgIdSpellLoc the location of the macro argument id inside the macro
diff --git a/lib/Parse/ParseAST.cpp b/lib/Parse/ParseAST.cpp
index d018d4c08ed9..696506099e58 100644
--- a/lib/Parse/ParseAST.cpp
+++ b/lib/Parse/ParseAST.cpp
@@ -21,6 +21,7 @@
#include "clang/Sema/CodeCompleteConsumer.h"
#include "clang/Sema/Sema.h"
#include "clang/Sema/SemaConsumer.h"
+#include "clang/Sema/TemplateInstCallback.h"
#include "llvm/Support/CrashRecoveryContext.h"
#include <cstdio>
#include <memory>
@@ -121,6 +122,10 @@ void clang::ParseAST(Sema &S, bool PrintStats, bool SkipFunctionBodies) {
bool OldCollectStats = PrintStats;
std::swap(OldCollectStats, S.CollectStats);
+ // Initialize the template instantiation observer chain.
+ // FIXME: See note on "finalize" below.
+ initialize(S.TemplateInstCallbacks, S);
+
ASTConsumer *Consumer = &S.getASTConsumer();
std::unique_ptr<Parser> ParseOP(
@@ -136,6 +141,12 @@ void clang::ParseAST(Sema &S, bool PrintStats, bool SkipFunctionBodies) {
CleanupParser(ParseOP.get());
S.getPreprocessor().EnterMainSourceFile();
+ if (!S.getPreprocessor().getCurrentLexer()) {
+ // If a PCH through header is specified that does not have an include in
+ // the source, there won't be any tokens or a Lexer.
+ return;
+ }
+
P.Initialize();
Parser::DeclGroupPtrTy ADecl;
@@ -158,6 +169,13 @@ void clang::ParseAST(Sema &S, bool PrintStats, bool SkipFunctionBodies) {
Consumer->HandleTranslationUnit(S.getASTContext());
+ // Finalize the template instantiation observer chain.
+ // FIXME: This (and init.) should be done in the Sema class, but because
+ // Sema does not have a reliable "Finalize" function (it has a
+ // destructor, but it is not guaranteed to be called ("-disable-free")).
+ // So, do the initialization above and do the finalization here:
+ finalize(S.TemplateInstCallbacks, S);
+
std::swap(OldCollectStats, S.CollectStats);
if (PrintStats) {
llvm::errs() << "\nSTATISTICS:\n";
diff --git a/lib/Parse/ParseCXXInlineMethods.cpp b/lib/Parse/ParseCXXInlineMethods.cpp
index 2b3d4ba85bd8..27d48be0e3eb 100644
--- a/lib/Parse/ParseCXXInlineMethods.cpp
+++ b/lib/Parse/ParseCXXInlineMethods.cpp
@@ -22,12 +22,10 @@ using namespace clang;
/// ParseCXXInlineMethodDef - We parsed and verified that the specified
/// Declarator is a well formed C++ inline method definition. Now lex its body
/// and store its tokens for parsing after the C++ class is complete.
-NamedDecl *Parser::ParseCXXInlineMethodDef(AccessSpecifier AS,
- AttributeList *AccessAttrs,
- ParsingDeclarator &D,
- const ParsedTemplateInfo &TemplateInfo,
- const VirtSpecifiers& VS,
- SourceLocation PureSpecLoc) {
+NamedDecl *Parser::ParseCXXInlineMethodDef(
+ AccessSpecifier AS, ParsedAttributes &AccessAttrs, ParsingDeclarator &D,
+ const ParsedTemplateInfo &TemplateInfo, const VirtSpecifiers &VS,
+ SourceLocation PureSpecLoc) {
assert(D.isFunctionDeclarator() && "This isn't a function declarator!");
assert(Tok.isOneOf(tok::l_brace, tok::colon, tok::kw_try, tok::equal) &&
"Current token not a '{', ':', '=', or 'try'!");
@@ -312,6 +310,8 @@ void Parser::ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM) {
Actions.ActOnDelayedCXXMethodParameter(getCurScope(), Param);
std::unique_ptr<CachedTokens> Toks = std::move(LM.DefaultArgs[I].Toks);
if (Toks) {
+ ParenBraceBracketBalancer BalancerRAIIObj(*this);
+
// Mark the end of the default argument so that we know when to stop when
// we parse it later on.
Token LastDefaultArgToken = Toks->back();
@@ -384,6 +384,8 @@ void Parser::ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM) {
// Parse a delayed exception-specification, if there is one.
if (CachedTokens *Toks = LM.ExceptionSpecTokens) {
+ ParenBraceBracketBalancer BalancerRAIIObj(*this);
+
// Add the 'stop' token.
Token LastExceptionSpecToken = Toks->back();
Token ExceptionSpecEnd;
@@ -489,6 +491,8 @@ void Parser::ParseLexedMethodDef(LexedMethod &LM) {
++CurTemplateDepthTracker;
}
+ ParenBraceBracketBalancer BalancerRAIIObj(*this);
+
assert(!LM.Toks.empty() && "Empty body!");
Token LastBodyToken = LM.Toks.back();
Token BodyEnd;
@@ -609,6 +613,8 @@ void Parser::ParseLexedMemberInitializer(LateParsedMemberInitializer &MI) {
if (!MI.Field || MI.Field->isInvalidDecl())
return;
+ ParenBraceBracketBalancer BalancerRAIIObj(*this);
+
// Append the current token at the end of the new token stream so that it
// doesn't get lost.
MI.Toks.push_back(Tok);
@@ -733,7 +739,7 @@ bool Parser::ConsumeAndStoreUntil(tok::TokenKind T1, tok::TokenKind T2,
}
}
-/// \brief Consume tokens and store them in the passed token container until
+/// Consume tokens and store them in the passed token container until
/// we've passed the try keyword and constructor initializers and have consumed
/// the opening brace of the function body. The opening brace will be consumed
/// if and only if there was no error.
@@ -937,7 +943,7 @@ bool Parser::ConsumeAndStoreFunctionPrologue(CachedTokens &Toks) {
}
}
-/// \brief Consume and store tokens from the '?' to the ':' in a conditional
+/// Consume and store tokens from the '?' to the ':' in a conditional
/// expression.
bool Parser::ConsumeAndStoreConditional(CachedTokens &Toks) {
// Consume '?'.
@@ -962,7 +968,7 @@ bool Parser::ConsumeAndStoreConditional(CachedTokens &Toks) {
return true;
}
-/// \brief A tentative parsing action that can also revert token annotations.
+/// A tentative parsing action that can also revert token annotations.
class Parser::UnannotatedTentativeParsingAction : public TentativeParsingAction {
public:
explicit UnannotatedTentativeParsingAction(Parser &Self,
diff --git a/lib/Parse/ParseDecl.cpp b/lib/Parse/ParseDecl.cpp
index 2a999399fb50..4f6bb08bdc64 100644
--- a/lib/Parse/ParseDecl.cpp
+++ b/lib/Parse/ParseDecl.cpp
@@ -15,6 +15,7 @@
#include "clang/Parse/RAIIObjectsForParser.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/PrettyDeclStackTrace.h"
#include "clang/Basic/AddressSpaces.h"
#include "clang/Basic/Attributes.h"
#include "clang/Basic/CharInfo.h"
@@ -22,14 +23,12 @@
#include "clang/Parse/ParseDiagnostic.h"
#include "clang/Sema/Lookup.h"
#include "clang/Sema/ParsedTemplate.h"
-#include "clang/Sema/PrettyDeclStackTrace.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/SemaDiagnostic.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringSwitch.h"
-#include "llvm/Support/ScopedPrinter.h"
using namespace clang;
@@ -43,18 +42,18 @@ using namespace clang;
///
/// Called type-id in C++.
TypeResult Parser::ParseTypeName(SourceRange *Range,
- Declarator::TheContext Context,
+ DeclaratorContext Context,
AccessSpecifier AS,
Decl **OwnedType,
ParsedAttributes *Attrs) {
DeclSpecContext DSC = getDeclSpecContextFromDeclaratorContext(Context);
- if (DSC == DSC_normal)
- DSC = DSC_type_specifier;
+ if (DSC == DeclSpecContext::DSC_normal)
+ DSC = DeclSpecContext::DSC_type_specifier;
// Parse the common declaration-specifiers piece.
DeclSpec DS(AttrFactory);
if (Attrs)
- DS.addAttributes(Attrs->getList());
+ DS.addAttributes(*Attrs);
ParseSpecifierQualifierList(DS, AS, DSC);
if (OwnedType)
*OwnedType = DS.isTypeSpecOwned() ? DS.getRepAsDecl() : nullptr;
@@ -71,7 +70,7 @@ TypeResult Parser::ParseTypeName(SourceRange *Range,
return Actions.ActOnTypeName(getCurScope(), DeclaratorInfo);
}
-/// \brief Normalizes an attribute name by dropping prefixed and suffixed __.
+/// Normalizes an attribute name by dropping prefixed and suffixed __.
static StringRef normalizeAttrName(StringRef Name) {
if (Name.size() >= 4 && Name.startswith("__") && Name.endswith("__"))
return Name.drop_front(2).drop_back(2);
@@ -163,14 +162,14 @@ void Parser::ParseGNUAttributes(ParsedAttributes &attrs,
if (Tok.isNot(tok::l_paren)) {
attrs.addNew(AttrName, AttrNameLoc, nullptr, AttrNameLoc, nullptr, 0,
- AttributeList::AS_GNU);
+ ParsedAttr::AS_GNU);
continue;
}
// Handle "parameterized" attributes
if (!LateAttrs || !isAttributeLateParsed(*AttrName)) {
ParseGNUAttributeArgs(AttrName, AttrNameLoc, attrs, endLoc, nullptr,
- SourceLocation(), AttributeList::AS_GNU, D);
+ SourceLocation(), ParsedAttr::AS_GNU, D);
continue;
}
@@ -207,7 +206,7 @@ void Parser::ParseGNUAttributes(ParsedAttributes &attrs,
}
}
-/// \brief Determine whether the given attribute has an identifier argument.
+/// Determine whether the given attribute has an identifier argument.
static bool attributeHasIdentifierArg(const IdentifierInfo &II) {
#define CLANG_ATTR_IDENTIFIER_ARG_LIST
return llvm::StringSwitch<bool>(normalizeAttrName(II.getName()))
@@ -216,7 +215,16 @@ static bool attributeHasIdentifierArg(const IdentifierInfo &II) {
#undef CLANG_ATTR_IDENTIFIER_ARG_LIST
}
-/// \brief Determine whether the given attribute parses a type argument.
+/// Determine whether the given attribute has a variadic identifier argument.
+static bool attributeHasVariadicIdentifierArg(const IdentifierInfo &II) {
+#define CLANG_ATTR_VARIADIC_IDENTIFIER_ARG_LIST
+ return llvm::StringSwitch<bool>(normalizeAttrName(II.getName()))
+#include "clang/Parse/AttrParserStringSwitches.inc"
+ .Default(false);
+#undef CLANG_ATTR_VARIADIC_IDENTIFIER_ARG_LIST
+}
+
+/// Determine whether the given attribute parses a type argument.
static bool attributeIsTypeArgAttr(const IdentifierInfo &II) {
#define CLANG_ATTR_TYPE_ARG_LIST
return llvm::StringSwitch<bool>(normalizeAttrName(II.getName()))
@@ -225,7 +233,7 @@ static bool attributeIsTypeArgAttr(const IdentifierInfo &II) {
#undef CLANG_ATTR_TYPE_ARG_LIST
}
-/// \brief Determine whether the given attribute requires parsing its arguments
+/// Determine whether the given attribute requires parsing its arguments
/// in an unevaluated context or not.
static bool attributeParsedArgsUnevaluated(const IdentifierInfo &II) {
#define CLANG_ATTR_ARG_CONTEXT_LIST
@@ -250,7 +258,7 @@ void Parser::ParseAttributeWithTypeArg(IdentifierInfo &AttrName,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
- AttributeList::Syntax Syntax) {
+ ParsedAttr::Syntax Syntax) {
BalancedDelimiterTracker Parens(*this, tok::l_paren);
Parens.consumeOpen();
@@ -276,21 +284,22 @@ void Parser::ParseAttributeWithTypeArg(IdentifierInfo &AttrName,
unsigned Parser::ParseAttributeArgsCommon(
IdentifierInfo *AttrName, SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName,
- SourceLocation ScopeLoc, AttributeList::Syntax Syntax) {
+ SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax) {
// Ignore the left paren location for now.
ConsumeParen();
ArgsVector ArgExprs;
if (Tok.is(tok::identifier)) {
// If this attribute wants an 'identifier' argument, make it so.
- bool IsIdentifierArg = attributeHasIdentifierArg(*AttrName);
- AttributeList::Kind AttrKind =
- AttributeList::getKind(AttrName, ScopeName, Syntax);
+ bool IsIdentifierArg = attributeHasIdentifierArg(*AttrName) ||
+ attributeHasVariadicIdentifierArg(*AttrName);
+ ParsedAttr::Kind AttrKind =
+ ParsedAttr::getKind(AttrName, ScopeName, Syntax);
// If we don't know how to parse this attribute, but this is the only
// token in this argument, assume it's meant to be an identifier.
- if (AttrKind == AttributeList::UnknownAttribute ||
- AttrKind == AttributeList::IgnoredAttribute) {
+ if (AttrKind == ParsedAttr::UnknownAttribute ||
+ AttrKind == ParsedAttr::IgnoredAttribute) {
const Token &Next = NextToken();
IsIdentifierArg = Next.isOneOf(tok::r_paren, tok::comma);
}
@@ -306,21 +315,25 @@ unsigned Parser::ParseAttributeArgsCommon(
// Parse the non-empty comma-separated list of expressions.
do {
- bool Uneval = attributeParsedArgsUnevaluated(*AttrName);
- EnterExpressionEvaluationContext Unevaluated(
- Actions,
- Uneval ? Sema::ExpressionEvaluationContext::Unevaluated
- : Sema::ExpressionEvaluationContext::ConstantEvaluated,
- /*LambdaContextDecl=*/nullptr,
- /*IsDecltype=*/false);
-
- ExprResult ArgExpr(
- Actions.CorrectDelayedTyposInExpr(ParseAssignmentExpression()));
- if (ArgExpr.isInvalid()) {
- SkipUntil(tok::r_paren, StopAtSemi);
- return 0;
+ ExprResult ArgExpr;
+ if (Tok.is(tok::identifier) &&
+ attributeHasVariadicIdentifierArg(*AttrName)) {
+ ArgExprs.push_back(ParseIdentifierLoc());
+ } else {
+ bool Uneval = attributeParsedArgsUnevaluated(*AttrName);
+ EnterExpressionEvaluationContext Unevaluated(
+ Actions,
+ Uneval ? Sema::ExpressionEvaluationContext::Unevaluated
+ : Sema::ExpressionEvaluationContext::ConstantEvaluated);
+
+ ExprResult ArgExpr(
+ Actions.CorrectDelayedTyposInExpr(ParseAssignmentExpression()));
+ if (ArgExpr.isInvalid()) {
+ SkipUntil(tok::r_paren, StopAtSemi);
+ return 0;
+ }
+ ArgExprs.push_back(ArgExpr.get());
}
- ArgExprs.push_back(ArgExpr.get());
// Eat the comma, move to the next argument
} while (TryConsumeToken(tok::comma));
}
@@ -346,27 +359,27 @@ void Parser::ParseGNUAttributeArgs(IdentifierInfo *AttrName,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
- AttributeList::Syntax Syntax,
+ ParsedAttr::Syntax Syntax,
Declarator *D) {
assert(Tok.is(tok::l_paren) && "Attribute arg list not starting with '('");
- AttributeList::Kind AttrKind =
- AttributeList::getKind(AttrName, ScopeName, Syntax);
+ ParsedAttr::Kind AttrKind =
+ ParsedAttr::getKind(AttrName, ScopeName, Syntax);
- if (AttrKind == AttributeList::AT_Availability) {
+ if (AttrKind == ParsedAttr::AT_Availability) {
ParseAvailabilityAttribute(*AttrName, AttrNameLoc, Attrs, EndLoc, ScopeName,
ScopeLoc, Syntax);
return;
- } else if (AttrKind == AttributeList::AT_ExternalSourceSymbol) {
+ } else if (AttrKind == ParsedAttr::AT_ExternalSourceSymbol) {
ParseExternalSourceSymbolAttribute(*AttrName, AttrNameLoc, Attrs, EndLoc,
ScopeName, ScopeLoc, Syntax);
return;
- } else if (AttrKind == AttributeList::AT_ObjCBridgeRelated) {
+ } else if (AttrKind == ParsedAttr::AT_ObjCBridgeRelated) {
ParseObjCBridgeRelatedAttribute(*AttrName, AttrNameLoc, Attrs, EndLoc,
ScopeName, ScopeLoc, Syntax);
return;
- } else if (AttrKind == AttributeList::AT_TypeTagForDatatype) {
+ } else if (AttrKind == ParsedAttr::AT_TypeTagForDatatype) {
ParseTypeTagForDatatypeAttribute(*AttrName, AttrNameLoc, Attrs, EndLoc,
ScopeName, ScopeLoc, Syntax);
return;
@@ -398,20 +411,34 @@ void Parser::ParseGNUAttributeArgs(IdentifierInfo *AttrName,
unsigned Parser::ParseClangAttributeArgs(
IdentifierInfo *AttrName, SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName,
- SourceLocation ScopeLoc, AttributeList::Syntax Syntax) {
+ SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax) {
assert(Tok.is(tok::l_paren) && "Attribute arg list not starting with '('");
- AttributeList::Kind AttrKind =
- AttributeList::getKind(AttrName, ScopeName, Syntax);
+ ParsedAttr::Kind AttrKind =
+ ParsedAttr::getKind(AttrName, ScopeName, Syntax);
- if (AttrKind == AttributeList::AT_ExternalSourceSymbol) {
+ switch (AttrKind) {
+ default:
+ return ParseAttributeArgsCommon(AttrName, AttrNameLoc, Attrs, EndLoc,
+ ScopeName, ScopeLoc, Syntax);
+ case ParsedAttr::AT_ExternalSourceSymbol:
ParseExternalSourceSymbolAttribute(*AttrName, AttrNameLoc, Attrs, EndLoc,
ScopeName, ScopeLoc, Syntax);
- return Attrs.getList() ? Attrs.getList()->getNumArgs() : 0;
+ break;
+ case ParsedAttr::AT_Availability:
+ ParseAvailabilityAttribute(*AttrName, AttrNameLoc, Attrs, EndLoc, ScopeName,
+ ScopeLoc, Syntax);
+ break;
+ case ParsedAttr::AT_ObjCBridgeRelated:
+ ParseObjCBridgeRelatedAttribute(*AttrName, AttrNameLoc, Attrs, EndLoc,
+ ScopeName, ScopeLoc, Syntax);
+ break;
+ case ParsedAttr::AT_TypeTagForDatatype:
+ ParseTypeTagForDatatypeAttribute(*AttrName, AttrNameLoc, Attrs, EndLoc,
+ ScopeName, ScopeLoc, Syntax);
+ break;
}
-
- return ParseAttributeArgsCommon(AttrName, AttrNameLoc, Attrs, EndLoc,
- ScopeName, ScopeLoc, Syntax);
+ return !Attrs.empty() ? Attrs.begin()->getNumArgs() : 0;
}
bool Parser::ParseMicrosoftDeclSpecArgs(IdentifierInfo *AttrName,
@@ -538,19 +565,18 @@ bool Parser::ParseMicrosoftDeclSpecArgs(IdentifierInfo *AttrName,
if (!HasInvalidAccessor)
Attrs.addNewPropertyAttr(AttrName, AttrNameLoc, nullptr, SourceLocation(),
AccessorNames[AK_Get], AccessorNames[AK_Put],
- AttributeList::AS_Declspec);
+ ParsedAttr::AS_Declspec);
T.skipToEnd();
return !HasInvalidAccessor;
}
unsigned NumArgs =
ParseAttributeArgsCommon(AttrName, AttrNameLoc, Attrs, nullptr, nullptr,
- SourceLocation(), AttributeList::AS_Declspec);
+ SourceLocation(), ParsedAttr::AS_Declspec);
// If this attribute's args were parsed, and it was expected to have
// arguments but none were provided, emit a diagnostic.
- const AttributeList *Attr = Attrs.getList();
- if (Attr && Attr->getMaxArgs() && !NumArgs) {
+ if (!Attrs.empty() && Attrs.begin()->getMaxArgs() && !NumArgs) {
Diag(OpenParenLoc, diag::err_attribute_requires_arguments) << AttrName;
return false;
}
@@ -621,7 +647,7 @@ void Parser::ParseMicrosoftDeclSpecs(ParsedAttributes &Attrs,
if (!AttrHandled)
Attrs.addNew(AttrName, AttrNameLoc, nullptr, AttrNameLoc, nullptr, 0,
- AttributeList::AS_Declspec);
+ ParsedAttr::AS_Declspec);
}
T.consumeClose();
if (End)
@@ -647,7 +673,7 @@ void Parser::ParseMicrosoftTypeAttributes(ParsedAttributes &attrs) {
IdentifierInfo *AttrName = Tok.getIdentifierInfo();
SourceLocation AttrNameLoc = ConsumeToken();
attrs.addNew(AttrName, AttrNameLoc, nullptr, AttrNameLoc, nullptr, 0,
- AttributeList::AS_Keyword);
+ ParsedAttr::AS_Keyword);
break;
}
default:
@@ -698,7 +724,7 @@ void Parser::ParseBorlandTypeAttributes(ParsedAttributes &attrs) {
IdentifierInfo *AttrName = Tok.getIdentifierInfo();
SourceLocation AttrNameLoc = ConsumeToken();
attrs.addNew(AttrName, AttrNameLoc, nullptr, AttrNameLoc, nullptr, 0,
- AttributeList::AS_Keyword);
+ ParsedAttr::AS_Keyword);
}
}
@@ -708,7 +734,7 @@ void Parser::ParseOpenCLKernelAttributes(ParsedAttributes &attrs) {
IdentifierInfo *AttrName = Tok.getIdentifierInfo();
SourceLocation AttrNameLoc = ConsumeToken();
attrs.addNew(AttrName, AttrNameLoc, nullptr, AttrNameLoc, nullptr, 0,
- AttributeList::AS_Keyword);
+ ParsedAttr::AS_Keyword);
}
}
@@ -716,7 +742,7 @@ void Parser::ParseOpenCLQualifiers(ParsedAttributes &Attrs) {
IdentifierInfo *AttrName = Tok.getIdentifierInfo();
SourceLocation AttrNameLoc = Tok.getLocation();
Attrs.addNew(AttrName, AttrNameLoc, nullptr, AttrNameLoc, nullptr, 0,
- AttributeList::AS_Keyword);
+ ParsedAttr::AS_Keyword);
}
void Parser::ParseNullabilityTypeSpecifiers(ParsedAttributes &attrs) {
@@ -732,7 +758,7 @@ void Parser::ParseNullabilityTypeSpecifiers(ParsedAttributes &attrs) {
Diag(AttrNameLoc, diag::ext_nullability)
<< AttrName;
attrs.addNew(AttrName, AttrNameLoc, nullptr, AttrNameLoc, nullptr, 0,
- AttributeList::AS_Keyword);
+ ParsedAttr::AS_Keyword);
break;
}
default:
@@ -745,12 +771,14 @@ static bool VersionNumberSeparator(const char Separator) {
return (Separator == '.' || Separator == '_');
}
-/// \brief Parse a version number.
+/// Parse a version number.
///
/// version:
/// simple-integer
-/// simple-integer ',' simple-integer
-/// simple-integer ',' simple-integer ',' simple-integer
+/// simple-integer '.' simple-integer
+/// simple-integer '_' simple-integer
+/// simple-integer '.' simple-integer '.' simple-integer
+/// simple-integer '_' simple-integer '_' simple-integer
VersionTuple Parser::ParseVersionTuple(SourceRange &Range) {
Range = SourceRange(Tok.getLocation(), Tok.getEndLoc());
@@ -828,7 +856,7 @@ VersionTuple Parser::ParseVersionTuple(SourceRange &Range) {
return VersionTuple();
}
- return VersionTuple(Major, Minor, (AfterMajorSeparator == '_'));
+ return VersionTuple(Major, Minor);
}
const char AfterMinorSeparator = ThisTokBegin[AfterMinor];
@@ -859,10 +887,10 @@ VersionTuple Parser::ParseVersionTuple(SourceRange &Range) {
return VersionTuple();
}
ConsumeToken();
- return VersionTuple(Major, Minor, Subminor, (AfterMajorSeparator == '_'));
+ return VersionTuple(Major, Minor, Subminor);
}
-/// \brief Parse the contents of the "availability" attribute.
+/// Parse the contents of the "availability" attribute.
///
/// availability-attribute:
/// 'availability' '(' platform ',' opt-strict version-arg-list,
@@ -893,7 +921,7 @@ void Parser::ParseAvailabilityAttribute(IdentifierInfo &Availability,
SourceLocation *endLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
- AttributeList::Syntax Syntax) {
+ ParsedAttr::Syntax Syntax) {
enum { Introduced, Deprecated, Obsoleted, Unknown };
AvailabilityChange Changes[Unknown];
ExprResult MessageExpr, ReplacementExpr;
@@ -1094,7 +1122,7 @@ void Parser::ParseAvailabilityAttribute(IdentifierInfo &Availability,
Syntax, StrictLoc, ReplacementExpr.get());
}
-/// \brief Parse the contents of the "external_source_symbol" attribute.
+/// Parse the contents of the "external_source_symbol" attribute.
///
/// external-source-symbol-attribute:
/// 'external_source_symbol' '(' keyword-arg-list ')'
@@ -1110,7 +1138,7 @@ void Parser::ParseAvailabilityAttribute(IdentifierInfo &Availability,
void Parser::ParseExternalSourceSymbolAttribute(
IdentifierInfo &ExternalSourceSymbol, SourceLocation Loc,
ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName,
- SourceLocation ScopeLoc, AttributeList::Syntax Syntax) {
+ SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax) {
// Opening '('.
BalancedDelimiterTracker T(*this, tok::l_paren);
if (T.expectAndConsume())
@@ -1207,7 +1235,7 @@ void Parser::ParseExternalSourceSymbolAttribute(
ScopeName, ScopeLoc, Args, llvm::array_lengthof(Args), Syntax);
}
-/// \brief Parse the contents of the "objc_bridge_related" attribute.
+/// Parse the contents of the "objc_bridge_related" attribute.
/// objc_bridge_related '(' related_class ',' opt-class_method ',' opt-instance_method ')'
/// related_class:
/// Identifier
@@ -1224,7 +1252,7 @@ void Parser::ParseObjCBridgeRelatedAttribute(IdentifierInfo &ObjCBridgeRelated,
SourceLocation *endLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
- AttributeList::Syntax Syntax) {
+ ParsedAttr::Syntax Syntax) {
// Opening '('.
BalancedDelimiterTracker T(*this, tok::l_paren);
if (T.consumeOpen()) {
@@ -1244,7 +1272,9 @@ void Parser::ParseObjCBridgeRelatedAttribute(IdentifierInfo &ObjCBridgeRelated,
return;
}
- // Parse optional class method name.
+ // Parse class method name. It's non-optional in the sense that a trailing
+ // comma is required, but it can be the empty string, and then we record a
+ // nullptr.
IdentifierLoc *ClassMethod = nullptr;
if (Tok.is(tok::identifier)) {
ClassMethod = ParseIdentifierLoc();
@@ -1263,7 +1293,8 @@ void Parser::ParseObjCBridgeRelatedAttribute(IdentifierInfo &ObjCBridgeRelated,
return;
}
- // Parse optional instance method name.
+ // Parse instance method name. Also non-optional but empty string is
+ // permitted.
IdentifierLoc *InstanceMethod = nullptr;
if (Tok.is(tok::identifier))
InstanceMethod = ParseIdentifierLoc();
@@ -1335,7 +1366,7 @@ void Parser::ParseLexedAttributes(ParsingClass &Class) {
Class.TagOrTemplate);
}
-/// \brief Parse all attributes in LAs, and attach them to Decl D.
+/// Parse all attributes in LAs, and attach them to Decl D.
void Parser::ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D,
bool EnterScope, bool OnDefinition) {
assert(LAs.parseSoon() &&
@@ -1349,7 +1380,7 @@ void Parser::ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D,
LAs.clear();
}
-/// \brief Finish parsing an attribute for which parsing was delayed.
+/// Finish parsing an attribute for which parsing was delayed.
/// This will be called at the end of parsing a class declaration
/// for each LateParsedAttribute. We consume the saved tokens and
/// create an attribute with the arguments filled in. We add this
@@ -1400,7 +1431,7 @@ void Parser::ParseLexedAttribute(LateParsedAttribute &LA,
Actions.ActOnReenterFunctionContext(Actions.CurScope, D);
ParseGNUAttributeArgs(&LA.AttrName, LA.AttrNameLoc, Attrs, &endLoc,
- nullptr, SourceLocation(), AttributeList::AS_GNU,
+ nullptr, SourceLocation(), ParsedAttr::AS_GNU,
nullptr);
if (HasFunScope) {
@@ -1414,16 +1445,15 @@ void Parser::ParseLexedAttribute(LateParsedAttribute &LA,
// If there are multiple decls, then the decl cannot be within the
// function scope.
ParseGNUAttributeArgs(&LA.AttrName, LA.AttrNameLoc, Attrs, &endLoc,
- nullptr, SourceLocation(), AttributeList::AS_GNU,
+ nullptr, SourceLocation(), ParsedAttr::AS_GNU,
nullptr);
}
} else {
Diag(Tok, diag::warn_attribute_no_decl) << LA.AttrName.getName();
}
- const AttributeList *AL = Attrs.getList();
- if (OnDefinition && AL && !AL->isCXX11Attribute() &&
- AL->isKnownToGCC())
+ if (OnDefinition && !Attrs.empty() && !Attrs.begin()->isCXX11Attribute() &&
+ Attrs.begin()->isKnownToGCC())
Diag(Tok, diag::warn_attribute_on_function_definition)
<< &LA.AttrName;
@@ -1445,7 +1475,7 @@ void Parser::ParseTypeTagForDatatypeAttribute(IdentifierInfo &AttrName,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
- AttributeList::Syntax Syntax) {
+ ParsedAttr::Syntax Syntax) {
assert(Tok.is(tok::l_paren) && "Attribute arg list not starting with '('");
BalancedDelimiterTracker T(*this, tok::l_paren);
@@ -1535,7 +1565,7 @@ bool Parser::DiagnoseProhibitedCXX11Attribute() {
llvm_unreachable("All cases handled above.");
}
-/// \brief We have found the opening square brackets of a C++11
+/// We have found the opening square brackets of a C++11
/// attribute-specifier in a location where an attribute is not permitted, but
/// we know where the attributes ought to be written. Parse them anyway, and
/// provide a fixit moving them to the right place.
@@ -1554,29 +1584,27 @@ void Parser::DiagnoseMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs,
<< FixItHint::CreateRemoval(AttrRange);
}
-void Parser::DiagnoseProhibitedAttributes(ParsedAttributesWithRange &attrs,
- const SourceLocation CorrectLocation) {
+void Parser::DiagnoseProhibitedAttributes(
+ const SourceRange &Range, const SourceLocation CorrectLocation) {
if (CorrectLocation.isValid()) {
- CharSourceRange AttrRange(attrs.Range, true);
+ CharSourceRange AttrRange(Range, true);
Diag(CorrectLocation, diag::err_attributes_misplaced)
<< FixItHint::CreateInsertionFromRange(CorrectLocation, AttrRange)
<< FixItHint::CreateRemoval(AttrRange);
} else
- Diag(attrs.Range.getBegin(), diag::err_attributes_not_allowed) << attrs.Range;
+ Diag(Range.getBegin(), diag::err_attributes_not_allowed) << Range;
}
void Parser::ProhibitCXX11Attributes(ParsedAttributesWithRange &Attrs,
unsigned DiagID) {
- for (AttributeList *Attr = Attrs.getList(); Attr; Attr = Attr->getNext()) {
- if (!Attr->isCXX11Attribute() && !Attr->isC2xAttribute())
+ for (const ParsedAttr &AL : Attrs) {
+ if (!AL.isCXX11Attribute() && !AL.isC2xAttribute())
continue;
- if (Attr->getKind() == AttributeList::UnknownAttribute)
- Diag(Attr->getLoc(), diag::warn_unknown_attribute_ignored)
- << Attr->getName();
+ if (AL.getKind() == ParsedAttr::UnknownAttribute)
+ Diag(AL.getLoc(), diag::warn_unknown_attribute_ignored) << AL.getName();
else {
- Diag(Attr->getLoc(), DiagID)
- << Attr->getName();
- Attr->setInvalid();
+ Diag(AL.getLoc(), DiagID) << AL.getName();
+ AL.setInvalid();
}
}
}
@@ -1594,52 +1622,24 @@ void Parser::stripTypeAttributesOffDeclSpec(ParsedAttributesWithRange &Attrs,
if (TUK == Sema::TUK_Reference)
return;
- ParsedAttributes &PA = DS.getAttributes();
- AttributeList *AL = PA.getList();
- AttributeList *Prev = nullptr;
- AttributeList *TypeAttrHead = nullptr;
- AttributeList *TypeAttrTail = nullptr;
- while (AL) {
- AttributeList *Next = AL->getNext();
-
- if ((AL->getKind() == AttributeList::AT_Aligned &&
- AL->isDeclspecAttribute()) ||
- AL->isMicrosoftAttribute()) {
- // Stitch the attribute into the tag's attribute list.
- if (TypeAttrTail)
- TypeAttrTail->setNext(AL);
- else
- TypeAttrHead = AL;
- TypeAttrTail = AL;
- TypeAttrTail->setNext(nullptr);
-
- // Remove the attribute from the variable's attribute list.
- if (Prev) {
- // Set the last variable attribute's next attribute to be the attribute
- // after the current one.
- Prev->setNext(Next);
- } else {
- // Removing the head of the list requires us to reset the head to the
- // next attribute.
- PA.set(Next);
- }
- } else {
- Prev = AL;
- }
+ llvm::SmallVector<ParsedAttr *, 1> ToBeMoved;
- AL = Next;
+ for (ParsedAttr &AL : DS.getAttributes()) {
+ if ((AL.getKind() == ParsedAttr::AT_Aligned &&
+ AL.isDeclspecAttribute()) ||
+ AL.isMicrosoftAttribute())
+ ToBeMoved.push_back(&AL);
}
- // Find end of type attributes Attrs and add NewTypeAttributes in the same
- // order they were in originally. (Remember, in AttributeList things earlier
- // in source order are later in the list, since new attributes are added to
- // the front of the list.)
- Attrs.addAllAtEnd(TypeAttrHead);
+ for (ParsedAttr *AL : ToBeMoved) {
+ DS.getAttributes().remove(AL);
+ Attrs.addAtEnd(AL);
+ }
}
/// ParseDeclaration - Parse a full 'declaration', which consists of
/// declaration-specifiers, some number of declarators, and a semicolon.
-/// 'Context' should be a Declarator::TheContext value. This returns the
+/// 'Context' should be a DeclaratorContext value. This returns the
/// location of the semicolon in DeclEnd.
///
/// declaration: [C99 6.7]
@@ -1653,7 +1653,7 @@ void Parser::stripTypeAttributesOffDeclSpec(ParsedAttributesWithRange &Attrs,
/// [C++11/C11] static_assert-declaration
/// others... [FIXME]
///
-Parser::DeclGroupPtrTy Parser::ParseDeclaration(unsigned Context,
+Parser::DeclGroupPtrTy Parser::ParseDeclaration(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributesWithRange &attrs) {
ParenBraceBracketBalancer BalancerRAIIObj(*this);
@@ -1666,7 +1666,7 @@ Parser::DeclGroupPtrTy Parser::ParseDeclaration(unsigned Context,
case tok::kw_template:
case tok::kw_export:
ProhibitAttributes(attrs);
- SingleDecl = ParseDeclarationStartingWithTemplate(Context, DeclEnd);
+ SingleDecl = ParseDeclarationStartingWithTemplate(Context, DeclEnd, attrs);
break;
case tok::kw_inline:
// Could be the start of an inline namespace. Allowed as an ext in C++03.
@@ -1714,7 +1714,7 @@ Parser::DeclGroupPtrTy Parser::ParseDeclaration(unsigned Context,
/// of a simple-declaration. If we find that we are, we also parse the
/// for-range-initializer, and place it here.
Parser::DeclGroupPtrTy
-Parser::ParseSimpleDeclaration(unsigned Context,
+Parser::ParseSimpleDeclaration(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributesWithRange &Attrs,
bool RequireSemi, ForRangeInit *FRI) {
@@ -1753,7 +1753,7 @@ Parser::ParseSimpleDeclaration(unsigned Context,
/// Returns true if this might be the start of a declarator, or a common typo
/// for a declarator.
-bool Parser::MightBeDeclarator(unsigned Context) {
+bool Parser::MightBeDeclarator(DeclaratorContext Context) {
switch (Tok.getKind()) {
case tok::annot_cxxscope:
case tok::annot_template_id:
@@ -1772,11 +1772,12 @@ bool Parser::MightBeDeclarator(unsigned Context) {
return getLangOpts().CPlusPlus;
case tok::l_square: // Might be an attribute on an unnamed bit-field.
- return Context == Declarator::MemberContext && getLangOpts().CPlusPlus11 &&
- NextToken().is(tok::l_square);
+ return Context == DeclaratorContext::MemberContext &&
+ getLangOpts().CPlusPlus11 && NextToken().is(tok::l_square);
case tok::colon: // Might be a typo for '::' or an unnamed bit-field.
- return Context == Declarator::MemberContext || getLangOpts().CPlusPlus;
+ return Context == DeclaratorContext::MemberContext ||
+ getLangOpts().CPlusPlus;
case tok::identifier:
switch (NextToken().getKind()) {
@@ -1802,8 +1803,9 @@ bool Parser::MightBeDeclarator(unsigned Context) {
// At namespace scope, 'identifier:' is probably a typo for 'identifier::'
// and in block scope it's probably a label. Inside a class definition,
// this is a bit-field.
- return Context == Declarator::MemberContext ||
- (getLangOpts().CPlusPlus && Context == Declarator::FileContext);
+ return Context == DeclaratorContext::MemberContext ||
+ (getLangOpts().CPlusPlus &&
+ Context == DeclaratorContext::FileContext);
case tok::identifier: // Possible virt-specifier.
return getLangOpts().CPlusPlus11 && isCXX11VirtSpecifier(NextToken());
@@ -1902,11 +1904,11 @@ void Parser::SkipMalformedDecl() {
/// definition or a group of object declarations, actually parse the
/// result.
Parser::DeclGroupPtrTy Parser::ParseDeclGroup(ParsingDeclSpec &DS,
- unsigned Context,
+ DeclaratorContext Context,
SourceLocation *DeclEnd,
ForRangeInit *FRI) {
// Parse the first declarator.
- ParsingDeclarator D(*this, DS, static_cast<Declarator::TheContext>(Context));
+ ParsingDeclarator D(*this, DS, Context);
ParseDeclarator(D);
// Bail out if the first declarator didn't seem well-formed.
@@ -1953,7 +1955,7 @@ Parser::DeclGroupPtrTy Parser::ParseDeclGroup(ParsingDeclSpec &DS,
// Function definitions are only allowed at file scope and in C++ classes.
// The C++ inline method definition case is handled elsewhere, so we only
// need to handle the file scope definition case.
- if (Context == Declarator::FileContext) {
+ if (Context == DeclaratorContext::FileContext) {
if (isStartOfFunctionDefinition(D)) {
if (DS.getStorageClassSpec() == DeclSpec::SCS_typedef) {
Diag(Tok, diag::err_function_declared_typedef);
@@ -2008,8 +2010,13 @@ Parser::DeclGroupPtrTy Parser::ParseDeclGroup(ParsingDeclSpec &DS,
}
Decl *ThisDecl = Actions.ActOnDeclarator(getCurScope(), D);
- if (IsForRangeLoop)
+ if (IsForRangeLoop) {
Actions.ActOnCXXForRangeDecl(ThisDecl);
+ } else {
+ // Obj-C for loop
+ if (auto *VD = dyn_cast_or_null<VarDecl>(ThisDecl))
+ VD->setObjCForDecl(true);
+ }
Actions.FinalizeDeclaration(ThisDecl);
D.complete(ThisDecl);
return Actions.FinalizeDeclaratorGroup(getCurScope(), DS, ThisDecl);
@@ -2024,7 +2031,7 @@ Parser::DeclGroupPtrTy Parser::ParseDeclGroup(ParsingDeclSpec &DS,
if (FirstDecl)
DeclsInGroup.push_back(FirstDecl);
- bool ExpectSemi = Context != Declarator::ForContext;
+ bool ExpectSemi = Context != DeclaratorContext::ForContext;
// If we don't have a comma, it is either the end of the list (a ';') or an
// error, bail out.
@@ -2070,7 +2077,7 @@ Parser::DeclGroupPtrTy Parser::ParseDeclGroup(ParsingDeclSpec &DS,
*DeclEnd = Tok.getLocation();
if (ExpectSemi &&
- ExpectAndConsumeSemi(Context == Declarator::FileContext
+ ExpectAndConsumeSemi(Context == DeclaratorContext::FileContext
? diag::err_invalid_token_after_toplevel_declarator
: diag::err_expected_semi_declaration)) {
// Okay, there was no semicolon and one was expected. If we see a
@@ -2105,7 +2112,7 @@ bool Parser::ParseAsmAttributesAfterDeclarator(Declarator &D) {
return false;
}
-/// \brief Parse 'declaration' after parsing 'declaration-specifiers
+/// Parse 'declaration' after parsing 'declaration-specifiers
/// declarator'. This method parses the remainder of the declaration
/// (including any attributes or initializer, among other things) and
/// finalizes the declaration.
@@ -2199,7 +2206,7 @@ Decl *Parser::ParseDeclarationAfterDeclaratorAndAttributes(
// FIXME: This check should be for a variable template instantiation only.
// Check that this is a valid instantiation
- if (D.getName().getKind() != UnqualifiedId::IK_TemplateId) {
+ if (D.getName().getKind() != UnqualifiedIdKind::IK_TemplateId) {
// If the declarator-id is not a template-id, issue a diagnostic and
// recover by ignoring the 'template' keyword.
Diag(Tok, diag::err_template_defn_explicit_instantiation)
@@ -2273,8 +2280,8 @@ Decl *Parser::ParseDeclarationAfterDeclaratorAndAttributes(
if (Init.isInvalid()) {
SmallVector<tok::TokenKind, 2> StopTokens;
StopTokens.push_back(tok::comma);
- if (D.getContext() == Declarator::ForContext ||
- D.getContext() == Declarator::InitStmtContext)
+ if (D.getContext() == DeclaratorContext::ForContext ||
+ D.getContext() == DeclaratorContext::InitStmtContext)
StopTokens.push_back(tok::r_paren);
SkipUntil(StopTokens, StopAtSemi | StopBeforeMatch);
Actions.ActOnInitializerError(ThisDecl);
@@ -2397,7 +2404,7 @@ void Parser::ParseSpecifierQualifierList(DeclSpec &DS, AccessSpecifier AS,
}
// Issue diagnostic and remove constexpr specfier if present.
- if (DS.isConstexprSpecified() && DSC != DSC_condition) {
+ if (DS.isConstexprSpecified() && DSC != DeclSpecContext::DSC_condition) {
Diag(DS.getConstexprSpecLoc(), diag::err_typename_invalid_constexpr);
DS.ClearConstexprSpec();
}
@@ -2444,7 +2451,7 @@ bool Parser::ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS,
SourceLocation Loc = Tok.getLocation();
// If we see an identifier that is not a type name, we normally would
- // parse it as the identifer being declared. However, when a typename
+ // parse it as the identifier being declared. However, when a typename
// is typo'd or the definition is not included, this will incorrectly
// parse the typename as the identifier name and fall over misparsing
// later parts of the diagnostic.
@@ -2486,7 +2493,7 @@ bool Parser::ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS,
// classes.
if (ParsedType T = Actions.ActOnMSVCUnknownTypeName(
*Tok.getIdentifierInfo(), Tok.getLocation(),
- DSC == DSC_template_type_arg)) {
+ DSC == DeclSpecContext::DSC_template_type_arg)) {
const char *PrevSpec;
unsigned DiagID;
DS.SetTypeSpecType(DeclSpec::TST_typename, Loc, PrevSpec, DiagID, T,
@@ -2540,18 +2547,20 @@ bool Parser::ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS,
// Parse this as a tag as if the missing tag were present.
if (TagKind == tok::kw_enum)
- ParseEnumSpecifier(Loc, DS, TemplateInfo, AS, DSC_normal);
+ ParseEnumSpecifier(Loc, DS, TemplateInfo, AS,
+ DeclSpecContext::DSC_normal);
else
ParseClassSpecifier(TagKind, Loc, DS, TemplateInfo, AS,
- /*EnteringContext*/ false, DSC_normal, Attrs);
+ /*EnteringContext*/ false,
+ DeclSpecContext::DSC_normal, Attrs);
return true;
}
}
// Determine whether this identifier could plausibly be the name of something
// being declared (with a missing type).
- if (!isTypeSpecifier(DSC) &&
- (!SS || DSC == DSC_top_level || DSC == DSC_class)) {
+ if (!isTypeSpecifier(DSC) && (!SS || DSC == DeclSpecContext::DSC_top_level ||
+ DSC == DeclSpecContext::DSC_class)) {
// Look ahead to the next token to try to figure out what this declaration
// was supposed to be.
switch (NextToken().getKind()) {
@@ -2575,7 +2584,8 @@ bool Parser::ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS,
// If we're in a context where we could be declaring a constructor,
// check whether this is a constructor declaration with a bogus name.
- if (DSC == DSC_class || (DSC == DSC_top_level && SS)) {
+ if (DSC == DeclSpecContext::DSC_class ||
+ (DSC == DeclSpecContext::DSC_top_level && SS)) {
IdentifierInfo *II = Tok.getIdentifierInfo();
if (Actions.isCurrentClassNameTypo(II, SS)) {
Diag(Loc, diag::err_constructor_bad_name)
@@ -2651,27 +2661,29 @@ bool Parser::ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS,
return false;
}
-/// \brief Determine the declaration specifier context from the declarator
+/// Determine the declaration specifier context from the declarator
/// context.
///
/// \param Context the declarator context, which is one of the
-/// Declarator::TheContext enumerator values.
+/// DeclaratorContext enumerator values.
Parser::DeclSpecContext
-Parser::getDeclSpecContextFromDeclaratorContext(unsigned Context) {
- if (Context == Declarator::MemberContext)
- return DSC_class;
- if (Context == Declarator::FileContext)
- return DSC_top_level;
- if (Context == Declarator::TemplateParamContext)
- return DSC_template_param;
- if (Context == Declarator::TemplateTypeArgContext)
- return DSC_template_type_arg;
- if (Context == Declarator::TrailingReturnContext)
- return DSC_trailing;
- if (Context == Declarator::AliasDeclContext ||
- Context == Declarator::AliasTemplateContext)
- return DSC_alias_declaration;
- return DSC_normal;
+Parser::getDeclSpecContextFromDeclaratorContext(DeclaratorContext Context) {
+ if (Context == DeclaratorContext::MemberContext)
+ return DeclSpecContext::DSC_class;
+ if (Context == DeclaratorContext::FileContext)
+ return DeclSpecContext::DSC_top_level;
+ if (Context == DeclaratorContext::TemplateParamContext)
+ return DeclSpecContext::DSC_template_param;
+ if (Context == DeclaratorContext::TemplateArgContext ||
+ Context == DeclaratorContext::TemplateTypeArgContext)
+ return DeclSpecContext::DSC_template_type_arg;
+ if (Context == DeclaratorContext::TrailingReturnContext ||
+ Context == DeclaratorContext::TrailingReturnVarContext)
+ return DeclSpecContext::DSC_trailing;
+ if (Context == DeclaratorContext::AliasDeclContext ||
+ Context == DeclaratorContext::AliasTemplateContext)
+ return DeclSpecContext::DSC_alias_declaration;
+ return DeclSpecContext::DSC_normal;
}
/// ParseAlignArgument - Parse the argument to an alignment-specifier.
@@ -2735,7 +2747,7 @@ void Parser::ParseAlignmentSpecifier(ParsedAttributes &Attrs,
ArgsVector ArgExprs;
ArgExprs.push_back(ArgExpr.get());
Attrs.addNew(KWName, KWLoc, nullptr, KWLoc, ArgExprs.data(), 1,
- AttributeList::AS_Keyword, EllipsisLoc);
+ ParsedAttr::AS_Keyword, EllipsisLoc);
}
/// Determine whether we're looking at something that might be a declarator
@@ -2751,7 +2763,8 @@ Parser::DiagnoseMissingSemiAfterTagDefinition(DeclSpec &DS, AccessSpecifier AS,
LateParsedAttrList *LateAttrs) {
assert(DS.hasTagDefinition() && "shouldn't call this");
- bool EnteringContext = (DSContext == DSC_class || DSContext == DSC_top_level);
+ bool EnteringContext = (DSContext == DeclSpecContext::DSC_class ||
+ DSContext == DeclSpecContext::DSC_top_level);
if (getLangOpts().CPlusPlus &&
Tok.isOneOf(tok::identifier, tok::coloncolon, tok::kw_decltype,
@@ -2845,6 +2858,17 @@ Parser::DiagnoseMissingSemiAfterTagDefinition(DeclSpec &DS, AccessSpecifier AS,
return false;
}
+// Choose the apprpriate diagnostic error for why fixed point types are
+// disabled, set the previous specifier, and mark as invalid.
+static void SetupFixedPointError(const LangOptions &LangOpts,
+ const char *&PrevSpec, unsigned &DiagID,
+ bool &isInvalid) {
+ assert(!LangOpts.FixedPoint);
+ DiagID = diag::err_fixed_point_not_enabled;
+ PrevSpec = ""; // Not used by diagnostic
+ isInvalid = true;
+}
+
/// ParseDeclarationSpecifiers
/// declaration-specifiers: [C99 6.7]
/// storage-class-specifier declaration-specifiers[opt]
@@ -2885,7 +2909,8 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
DS.SetRangeEnd(SourceLocation());
}
- bool EnteringContext = (DSContext == DSC_class || DSContext == DSC_top_level);
+ bool EnteringContext = (DSContext == DeclSpecContext::DSC_class ||
+ DSContext == DeclSpecContext::DSC_top_level);
bool AttrsLastTime = false;
ParsedAttributesWithRange attrs(AttrFactory);
// We use Sema's policy to get bool macros right.
@@ -2955,8 +2980,8 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
Scope::FunctionPrototypeScope |
Scope::AtCatchScope)) == 0;
bool AllowNestedNameSpecifiers
- = DSContext == DSC_top_level ||
- (DSContext == DSC_class && DS.isFriendSpecified());
+ = DSContext == DeclSpecContext::DSC_top_level ||
+ (DSContext == DeclSpecContext::DSC_class && DS.isFriendSpecified());
Actions.CodeCompleteDeclSpec(getCurScope(), DS,
AllowNonIdentifiers,
@@ -2967,9 +2992,9 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
if (getCurScope()->getFnParent() || getCurScope()->getBlockParent())
CCC = Sema::PCC_LocalDeclarationSpecifiers;
else if (TemplateInfo.Kind != ParsedTemplateInfo::NonTemplate)
- CCC = DSContext == DSC_class? Sema::PCC_MemberTemplate
- : Sema::PCC_Template;
- else if (DSContext == DSC_class)
+ CCC = DSContext == DeclSpecContext::DSC_class ? Sema::PCC_MemberTemplate
+ : Sema::PCC_Template;
+ else if (DSContext == DeclSpecContext::DSC_class)
CCC = Sema::PCC_Class;
else if (CurParsedObjCImpl)
CCC = Sema::PCC_ObjCImplementation;
@@ -3013,10 +3038,11 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
// To improve diagnostics for this case, parse the declaration as a
// constructor (and reject the extra template arguments later).
TemplateIdAnnotation *TemplateId = takeTemplateIdAnnotation(Next);
- if ((DSContext == DSC_top_level || DSContext == DSC_class) &&
+ if ((DSContext == DeclSpecContext::DSC_top_level ||
+ DSContext == DeclSpecContext::DSC_class) &&
TemplateId->Name &&
Actions.isCurrentClassName(*TemplateId->Name, getCurScope(), &SS) &&
- isConstructorDeclarator(/*Unqualified*/false)) {
+ isConstructorDeclarator(/*Unqualified*/ false)) {
// The user meant this to be an out-of-line constructor
// definition, but template arguments are not allowed
// there. Just allow this as a constructor; we'll
@@ -3055,7 +3081,8 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
// Check whether this is a constructor declaration. If we're in a
// context where the identifier could be a class name, and it has the
// shape of a constructor declaration, process it as one.
- if ((DSContext == DSC_top_level || DSContext == DSC_class) &&
+ if ((DSContext == DeclSpecContext::DSC_top_level ||
+ DSContext == DeclSpecContext::DSC_class) &&
Actions.isCurrentClassName(*Next.getIdentifierInfo(), getCurScope(),
&SS) &&
isConstructorDeclarator(/*Unqualified*/ false))
@@ -3193,7 +3220,8 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
if (DS.isTypeAltiVecVector())
goto DoneWithDeclSpec;
- if (DSContext == DSC_objc_method_result && isObjCInstancetype()) {
+ if (DSContext == DeclSpecContext::DSC_objc_method_result &&
+ isObjCInstancetype()) {
ParsedType TypeRep = Actions.ActOnObjCInstanceType(Loc);
assert(TypeRep);
isInvalid = DS.SetTypeSpecType(DeclSpec::TST_typename, Loc, PrevSpec,
@@ -3206,6 +3234,13 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
continue;
}
+ // If we're in a context where the identifier could be a class name,
+ // check whether this is a constructor declaration.
+ if (getLangOpts().CPlusPlus && DSContext == DeclSpecContext::DSC_class &&
+ Actions.isCurrentClassName(*Tok.getIdentifierInfo(), getCurScope()) &&
+ isConstructorDeclarator(/*Unqualified*/true))
+ goto DoneWithDeclSpec;
+
ParsedType TypeRep = Actions.getTypeName(
*Tok.getIdentifierInfo(), Tok.getLocation(), getCurScope(), nullptr,
false, false, nullptr, false, false,
@@ -3225,17 +3260,11 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
goto DoneWithDeclSpec;
}
- // If we're in a context where the identifier could be a class name,
- // check whether this is a constructor declaration.
- if (getLangOpts().CPlusPlus && DSContext == DSC_class &&
- Actions.isCurrentClassName(*Tok.getIdentifierInfo(), getCurScope()) &&
- isConstructorDeclarator(/*Unqualified*/true))
- goto DoneWithDeclSpec;
-
// Likewise, if this is a context where the identifier could be a template
// name, check whether this is a deduction guide declaration.
if (getLangOpts().CPlusPlus17 &&
- (DSContext == DSC_class || DSContext == DSC_top_level) &&
+ (DSContext == DeclSpecContext::DSC_class ||
+ DSContext == DeclSpecContext::DSC_top_level) &&
Actions.isDeductionGuideName(getCurScope(), *Tok.getIdentifierInfo(),
Tok.getLocation()) &&
isConstructorDeclarator(/*Unqualified*/ true,
@@ -3281,7 +3310,7 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
// If we're in a context where the template-id could be a
// constructor name or specialization, check whether this is a
// constructor declaration.
- if (getLangOpts().CPlusPlus && DSContext == DSC_class &&
+ if (getLangOpts().CPlusPlus && DSContext == DeclSpecContext::DSC_class &&
Actions.isCurrentClassName(*TemplateId->Name, getCurScope()) &&
isConstructorDeclarator(TemplateId->SS.isEmpty()))
goto DoneWithDeclSpec;
@@ -3308,7 +3337,7 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
IdentifierInfo *AttrName = Tok.getIdentifierInfo();
SourceLocation AttrNameLoc = Tok.getLocation();
DS.getAttributes().addNew(AttrName, AttrNameLoc, nullptr, AttrNameLoc,
- nullptr, 0, AttributeList::AS_Keyword);
+ nullptr, 0, ParsedAttr::AS_Keyword);
break;
}
@@ -3351,7 +3380,7 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
// Objective-C 'kindof' types.
case tok::kw___kindof:
DS.getAttributes().addNew(Tok.getIdentifierInfo(), Loc, nullptr, Loc,
- nullptr, 0, AttributeList::AS_Keyword);
+ nullptr, 0, ParsedAttr::AS_Keyword);
(void)ConsumeToken();
continue;
@@ -3419,6 +3448,7 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
case tok::kw_thread_local:
isInvalid = DS.SetStorageClassSpecThread(DeclSpec::TSCS_thread_local, Loc,
PrevSpec, DiagID);
+ isStorageClass = true;
break;
case tok::kw__Thread_local:
isInvalid = DS.SetStorageClassSpecThread(DeclSpec::TSCS__Thread_local,
@@ -3431,7 +3461,15 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
isInvalid = DS.setFunctionSpecInline(Loc, PrevSpec, DiagID);
break;
case tok::kw_virtual:
- isInvalid = DS.setFunctionSpecVirtual(Loc, PrevSpec, DiagID);
+ // OpenCL C++ v1.0 s2.9: the virtual function qualifier is not supported.
+ if (getLangOpts().OpenCLCPlusPlus) {
+ DiagID = diag::err_openclcxx_virtual_function;
+ PrevSpec = Tok.getIdentifierInfo()->getNameStart();
+ isInvalid = true;
+ }
+ else {
+ isInvalid = DS.setFunctionSpecVirtual(Loc, PrevSpec, DiagID);
+ }
break;
case tok::kw_explicit:
isInvalid = DS.setFunctionSpecExplicit(Loc, PrevSpec, DiagID);
@@ -3451,7 +3489,7 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
// friend
case tok::kw_friend:
- if (DSContext == DSC_class)
+ if (DSContext == DeclSpecContext::DSC_class)
isInvalid = DS.SetFriendSpec(Loc, PrevSpec, DiagID);
else {
PrevSpec = ""; // not actually used by the diagnostic
@@ -3535,6 +3573,29 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
isInvalid = DS.SetTypeSpecType(DeclSpec::TST_float16, Loc, PrevSpec,
DiagID, Policy);
break;
+ case tok::kw__Accum:
+ if (!getLangOpts().FixedPoint) {
+ SetupFixedPointError(getLangOpts(), PrevSpec, DiagID, isInvalid);
+ } else {
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_accum, Loc, PrevSpec,
+ DiagID, Policy);
+ }
+ break;
+ case tok::kw__Fract:
+ if (!getLangOpts().FixedPoint) {
+ SetupFixedPointError(getLangOpts(), PrevSpec, DiagID, isInvalid);
+ } else {
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_fract, Loc, PrevSpec,
+ DiagID, Policy);
+ }
+ break;
+ case tok::kw__Sat:
+ if (!getLangOpts().FixedPoint) {
+ SetupFixedPointError(getLangOpts(), PrevSpec, DiagID, isInvalid);
+ } else {
+ isInvalid = DS.SetTypeSpecSat(Loc, PrevSpec, DiagID);
+ }
+ break;
case tok::kw___float128:
isInvalid = DS.SetTypeSpecType(DeclSpec::TST_float128, Loc, PrevSpec,
DiagID, Policy);
@@ -3543,6 +3604,10 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
isInvalid = DS.SetTypeSpecType(DeclSpec::TST_wchar, Loc, PrevSpec,
DiagID, Policy);
break;
+ case tok::kw_char8_t:
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_char8, Loc, PrevSpec,
+ DiagID, Policy);
+ break;
case tok::kw_char16_t:
isInvalid = DS.SetTypeSpecType(DeclSpec::TST_char16, Loc, PrevSpec,
DiagID, Policy);
@@ -3703,11 +3768,25 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
getLangOpts());
break;
- // OpenCL qualifiers:
+ // OpenCL access qualifiers:
+ case tok::kw___read_only:
+ case tok::kw___write_only:
+ case tok::kw___read_write:
+ // OpenCL C++ 1.0 s2.2: access qualifiers are reserved keywords.
+ if (Actions.getLangOpts().OpenCLCPlusPlus) {
+ DiagID = diag::err_openclcxx_reserved;
+ PrevSpec = Tok.getIdentifierInfo()->getNameStart();
+ isInvalid = true;
+ }
+ ParseOpenCLQualifiers(DS.getAttributes());
+ break;
+
+ // OpenCL address space qualifiers:
case tok::kw___generic:
// generic address space is introduced only in OpenCL v2.0
// see OpenCL C Spec v2.0 s6.5.5
- if (Actions.getLangOpts().OpenCLVersion < 200) {
+ if (Actions.getLangOpts().OpenCLVersion < 200 &&
+ !Actions.getLangOpts().OpenCLCPlusPlus) {
DiagID = diag::err_opencl_unknown_type_specifier;
PrevSpec = Tok.getIdentifierInfo()->getNameStart();
isInvalid = true;
@@ -3718,9 +3797,6 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
case tok::kw___global:
case tok::kw___local:
case tok::kw___constant:
- case tok::kw___read_only:
- case tok::kw___write_only:
- case tok::kw___read_write:
ParseOpenCLQualifiers(DS.getAttributes());
break;
@@ -3758,18 +3834,17 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
Diag(Tok, DiagID)
<< PrevSpec << FixItHint::CreateRemoval(Tok.getLocation());
else if (DiagID == diag::err_opencl_unknown_type_specifier) {
- const int OpenCLVer = getLangOpts().OpenCLVersion;
- std::string VerSpec = llvm::to_string(OpenCLVer / 100) +
- std::string (".") +
- llvm::to_string((OpenCLVer % 100) / 10);
- Diag(Tok, DiagID) << VerSpec << PrevSpec << isStorageClass;
+ Diag(Tok, DiagID) << getLangOpts().OpenCLCPlusPlus
+ << getLangOpts().getOpenCLVersionTuple().getAsString()
+ << PrevSpec << isStorageClass;
} else
Diag(Tok, DiagID) << PrevSpec;
}
DS.SetRangeEnd(Tok.getLocation());
if (DiagID != diag::err_bool_redeclaration)
- ConsumeToken();
+ // After an error the next token can be an annotation token.
+ ConsumeAnyToken();
AttrsLastTime = false;
}
@@ -3878,7 +3953,7 @@ void Parser::ParseStructDeclaration(
///
void Parser::ParseStructUnionBody(SourceLocation RecordLoc,
unsigned TagType, Decl *TagDecl) {
- PrettyDeclStackTraceEntry CrashInfo(Actions, TagDecl, RecordLoc,
+ PrettyDeclStackTraceEntry CrashInfo(Actions.Context, TagDecl, RecordLoc,
"parsing struct/union body");
assert(!getLangOpts().CPlusPlus && "C++ declarations not supported");
@@ -3984,10 +4059,8 @@ void Parser::ParseStructUnionBody(SourceLocation RecordLoc,
// If attributes exist after struct contents, parse them.
MaybeParseGNUAttributes(attrs);
- Actions.ActOnFields(getCurScope(),
- RecordLoc, TagDecl, FieldDecls,
- T.getOpenLocation(), T.getCloseLocation(),
- attrs.getList());
+ Actions.ActOnFields(getCurScope(), RecordLoc, TagDecl, FieldDecls,
+ T.getOpenLocation(), T.getCloseLocation(), attrs);
StructScope.Exit();
Actions.ActOnTagFinishDefinition(getCurScope(), TagDecl, T.getRange());
}
@@ -4070,7 +4143,7 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
SuppressAccessChecks diagsFromTag(*this, shouldDelayDiagsInTag);
// Enum definitions should not be parsed in a trailing-return-type.
- bool AllowDeclaration = DSC != DSC_trailing;
+ bool AllowDeclaration = DSC != DeclSpecContext::DSC_trailing;
bool AllowFixedUnderlyingType = AllowDeclaration &&
(getLangOpts().CPlusPlus11 || getLangOpts().MicrosoftExt ||
@@ -4296,14 +4369,14 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
bool IsDependent = false;
const char *PrevSpec = nullptr;
unsigned DiagID;
- Decl *TagDecl = Actions.ActOnTag(getCurScope(), DeclSpec::TST_enum, TUK,
- StartLoc, SS, Name, NameLoc, attrs.getList(),
- AS, DS.getModulePrivateSpecLoc(), TParams,
- Owned, IsDependent, ScopedEnumKWLoc,
- IsScopedUsingClassTag, BaseType,
- DSC == DSC_type_specifier,
- DSC == DSC_template_param ||
- DSC == DSC_template_type_arg, &SkipBody);
+ Decl *TagDecl = Actions.ActOnTag(
+ getCurScope(), DeclSpec::TST_enum, TUK, StartLoc, SS, Name, NameLoc,
+ attrs, AS, DS.getModulePrivateSpecLoc(), TParams, Owned, IsDependent,
+ ScopedEnumKWLoc, IsScopedUsingClassTag, BaseType,
+ DSC == DeclSpecContext::DSC_type_specifier,
+ DSC == DeclSpecContext::DSC_template_param ||
+ DSC == DeclSpecContext::DSC_template_type_arg,
+ &SkipBody);
if (SkipBody.ShouldSkip) {
assert(TUK == Sema::TUK_Definition && "can only skip a definition");
@@ -4440,8 +4513,8 @@ void Parser::ParseEnumBody(SourceLocation StartLoc, Decl *EnumDecl) {
// Install the enumerator constant into EnumDecl.
Decl *EnumConstDecl = Actions.ActOnEnumConstant(
- getCurScope(), EnumDecl, LastEnumConstDecl, IdentLoc, Ident,
- attrs.getList(), EqualLoc, AssignedVal.get());
+ getCurScope(), EnumDecl, LastEnumConstDecl, IdentLoc, Ident, attrs,
+ EqualLoc, AssignedVal.get());
EnumAvailabilityDiags.back().done();
EnumConstantDecls.push_back(EnumConstDecl);
@@ -4493,10 +4566,8 @@ void Parser::ParseEnumBody(SourceLocation StartLoc, Decl *EnumDecl) {
ParsedAttributes attrs(AttrFactory);
MaybeParseGNUAttributes(attrs);
- Actions.ActOnEnumBody(StartLoc, T.getRange(),
- EnumDecl, EnumConstantDecls,
- getCurScope(),
- attrs.getList());
+ Actions.ActOnEnumBody(StartLoc, T.getRange(), EnumDecl, EnumConstantDecls,
+ getCurScope(), attrs);
// Now handle enum constant availability diagnostics.
assert(EnumConstantDecls.size() == EnumAvailabilityDiags.size());
@@ -4540,12 +4611,15 @@ bool Parser::isKnownToBeTypeSpecifier(const Token &Tok) const {
case tok::kw_void:
case tok::kw_char:
case tok::kw_wchar_t:
+ case tok::kw_char8_t:
case tok::kw_char16_t:
case tok::kw_char32_t:
case tok::kw_int:
case tok::kw_half:
case tok::kw_float:
case tok::kw_double:
+ case tok::kw__Accum:
+ case tok::kw__Fract:
case tok::kw__Float16:
case tok::kw___float128:
case tok::kw_bool:
@@ -4616,12 +4690,15 @@ bool Parser::isTypeSpecifierQualifier() {
case tok::kw_void:
case tok::kw_char:
case tok::kw_wchar_t:
+ case tok::kw_char8_t:
case tok::kw_char16_t:
case tok::kw_char32_t:
case tok::kw_int:
case tok::kw_half:
case tok::kw_float:
case tok::kw_double:
+ case tok::kw__Accum:
+ case tok::kw__Fract:
case tok::kw__Float16:
case tok::kw___float128:
case tok::kw_bool:
@@ -4645,6 +4722,7 @@ bool Parser::isTypeSpecifierQualifier() {
case tok::kw_const:
case tok::kw_volatile:
case tok::kw_restrict:
+ case tok::kw__Sat:
// Debugger support.
case tok::kw___unknown_anytype:
@@ -4772,6 +4850,7 @@ bool Parser::isDeclarationSpecifier(bool DisambiguatingWithExpression) {
case tok::kw_void:
case tok::kw_char:
case tok::kw_wchar_t:
+ case tok::kw_char8_t:
case tok::kw_char16_t:
case tok::kw_char32_t:
@@ -4779,6 +4858,8 @@ bool Parser::isDeclarationSpecifier(bool DisambiguatingWithExpression) {
case tok::kw_half:
case tok::kw_float:
case tok::kw_double:
+ case tok::kw__Accum:
+ case tok::kw__Fract:
case tok::kw__Float16:
case tok::kw___float128:
case tok::kw_bool:
@@ -4800,6 +4881,7 @@ bool Parser::isDeclarationSpecifier(bool DisambiguatingWithExpression) {
case tok::kw_const:
case tok::kw_volatile:
case tok::kw_restrict:
+ case tok::kw__Sat:
// function-specifier
case tok::kw_inline:
@@ -5089,7 +5171,7 @@ void Parser::ParseTypeQualifierListOpt(
getLangOpts());
break;
case tok::kw___uptr:
- // GNU libc headers in C mode use '__uptr' as an identifer which conflicts
+ // GNU libc headers in C mode use '__uptr' as an identifier which conflicts
// with the MS modifier keyword.
if ((AttrReqs & AR_DeclspecAttributesParsed) && !getLangOpts().CPlusPlus &&
IdentifierRequired && DS.isEmpty() && NextToken().is(tok::semi)) {
@@ -5129,7 +5211,7 @@ void Parser::ParseTypeQualifierListOpt(
// Objective-C 'kindof' types.
case tok::kw___kindof:
DS.getAttributes().addNew(Tok.getIdentifierInfo(), Loc, nullptr, Loc,
- nullptr, 0, AttributeList::AS_Keyword);
+ nullptr, 0, ParsedAttr::AS_Keyword);
(void)ConsumeToken();
continue;
@@ -5175,7 +5257,7 @@ void Parser::ParseDeclarator(Declarator &D) {
}
static bool isPtrOperatorToken(tok::TokenKind Kind, const LangOptions &Lang,
- unsigned TheContext) {
+ DeclaratorContext TheContext) {
if (Kind == tok::star || Kind == tok::caret)
return true;
@@ -5194,8 +5276,9 @@ static bool isPtrOperatorToken(tok::TokenKind Kind, const LangOptions &Lang,
// (The same thing can in theory happen after a trailing-return-type, but
// since those are a C++11 feature, there is no rejects-valid issue there.)
if (Kind == tok::ampamp)
- return Lang.CPlusPlus11 || (TheContext != Declarator::ConversionIdContext &&
- TheContext != Declarator::CXXNewContext);
+ return Lang.CPlusPlus11 ||
+ (TheContext != DeclaratorContext::ConversionIdContext &&
+ TheContext != DeclaratorContext::CXXNewContext);
return false;
}
@@ -5249,8 +5332,9 @@ void Parser::ParseDeclaratorInternal(Declarator &D,
(Tok.is(tok::identifier) &&
(NextToken().is(tok::coloncolon) || NextToken().is(tok::less))) ||
Tok.is(tok::annot_cxxscope))) {
- bool EnteringContext = D.getContext() == Declarator::FileContext ||
- D.getContext() == Declarator::MemberContext;
+ bool EnteringContext =
+ D.getContext() == DeclaratorContext::FileContext ||
+ D.getContext() == DeclaratorContext::MemberContext;
CXXScopeSpec SS;
ParseOptionalCXXScopeSpecifier(SS, nullptr, EnteringContext);
@@ -5278,10 +5362,10 @@ void Parser::ParseDeclaratorInternal(Declarator &D,
// Sema will have to catch (syntactically invalid) pointers into global
// scope. It has to catch pointers into namespace scope anyway.
- D.AddTypeInfo(DeclaratorChunk::getMemberPointer(SS,DS.getTypeQualifiers(),
- DS.getLocEnd()),
- DS.getAttributes(),
- /* Don't replace range end. */SourceLocation());
+ D.AddTypeInfo(DeclaratorChunk::getMemberPointer(
+ SS, DS.getTypeQualifiers(), DS.getLocEnd()),
+ std::move(DS.getAttributes()),
+ /* Don't replace range end. */ SourceLocation());
return;
}
}
@@ -5294,7 +5378,7 @@ void Parser::ParseDeclaratorInternal(Declarator &D,
D.AddTypeInfo(
DeclaratorChunk::getPipe(DS.getTypeQualifiers(), DS.getPipeLoc()),
- DS.getAttributes(), SourceLocation());
+ std::move(DS.getAttributes()), SourceLocation());
}
// Not a pointer, C++ reference, or block.
@@ -5316,9 +5400,9 @@ void Parser::ParseDeclaratorInternal(Declarator &D,
// GNU attributes are not allowed here in a new-type-id, but Declspec and
// C++11 attributes are allowed.
unsigned Reqs = AR_CXX11AttributesParsed | AR_DeclspecAttributesParsed |
- ((D.getContext() != Declarator::CXXNewContext)
- ? AR_GNUAttributesParsed
- : AR_GNUAttributesParsedAndRejected);
+ ((D.getContext() != DeclaratorContext::CXXNewContext)
+ ? AR_GNUAttributesParsed
+ : AR_GNUAttributesParsedAndRejected);
ParseTypeQualifierListOpt(DS, Reqs, true, !D.mayOmitIdentifier());
D.ExtendWithDeclSpec(DS);
@@ -5326,20 +5410,16 @@ void Parser::ParseDeclaratorInternal(Declarator &D,
ParseDeclaratorInternal(D, DirectDeclParser);
if (Kind == tok::star)
// Remember that we parsed a pointer type, and remember the type-quals.
- D.AddTypeInfo(DeclaratorChunk::getPointer(DS.getTypeQualifiers(), Loc,
- DS.getConstSpecLoc(),
- DS.getVolatileSpecLoc(),
- DS.getRestrictSpecLoc(),
- DS.getAtomicSpecLoc(),
- DS.getUnalignedSpecLoc()),
- DS.getAttributes(),
- SourceLocation());
+ D.AddTypeInfo(DeclaratorChunk::getPointer(
+ DS.getTypeQualifiers(), Loc, DS.getConstSpecLoc(),
+ DS.getVolatileSpecLoc(), DS.getRestrictSpecLoc(),
+ DS.getAtomicSpecLoc(), DS.getUnalignedSpecLoc()),
+ std::move(DS.getAttributes()), SourceLocation());
else
// Remember that we parsed a Block type, and remember the type-quals.
- D.AddTypeInfo(DeclaratorChunk::getBlockPointer(DS.getTypeQualifiers(),
- Loc),
- DS.getAttributes(),
- SourceLocation());
+ D.AddTypeInfo(
+ DeclaratorChunk::getBlockPointer(DS.getTypeQualifiers(), Loc),
+ std::move(DS.getAttributes()), SourceLocation());
} else {
// Is a reference
DeclSpec DS(AttrFactory);
@@ -5394,8 +5474,7 @@ void Parser::ParseDeclaratorInternal(Declarator &D,
// Remember that we parsed a reference type.
D.AddTypeInfo(DeclaratorChunk::getReference(DS.getTypeQualifiers(), Loc,
Kind == tok::amp),
- DS.getAttributes(),
- SourceLocation());
+ std::move(DS.getAttributes()), SourceLocation());
}
}
@@ -5470,15 +5549,16 @@ void Parser::ParseDirectDeclarator(Declarator &D) {
// Don't parse FOO:BAR as if it were a typo for FOO::BAR inside a class, in
// this context it is a bitfield. Also in range-based for statement colon
// may delimit for-range-declaration.
- ColonProtectionRAIIObject X(*this,
- D.getContext() == Declarator::MemberContext ||
- (D.getContext() == Declarator::ForContext &&
- getLangOpts().CPlusPlus11));
+ ColonProtectionRAIIObject X(
+ *this, D.getContext() == DeclaratorContext::MemberContext ||
+ (D.getContext() == DeclaratorContext::ForContext &&
+ getLangOpts().CPlusPlus11));
// ParseDeclaratorInternal might already have parsed the scope.
if (D.getCXXScopeSpec().isEmpty()) {
- bool EnteringContext = D.getContext() == Declarator::FileContext ||
- D.getContext() == Declarator::MemberContext;
+ bool EnteringContext =
+ D.getContext() == DeclaratorContext::FileContext ||
+ D.getContext() == DeclaratorContext::MemberContext;
ParseOptionalCXXScopeSpecifier(D.getCXXScopeSpec(), nullptr,
EnteringContext);
}
@@ -5507,9 +5587,9 @@ void Parser::ParseDirectDeclarator(Declarator &D) {
// been expanded or contains auto; otherwise, it is parsed as part of the
// parameter-declaration-clause.
if (Tok.is(tok::ellipsis) && D.getCXXScopeSpec().isEmpty() &&
- !((D.getContext() == Declarator::PrototypeContext ||
- D.getContext() == Declarator::LambdaExprParameterContext ||
- D.getContext() == Declarator::BlockLiteralContext) &&
+ !((D.getContext() == DeclaratorContext::PrototypeContext ||
+ D.getContext() == DeclaratorContext::LambdaExprParameterContext ||
+ D.getContext() == DeclaratorContext::BlockLiteralContext) &&
NextToken().is(tok::r_paren) &&
!D.hasGroupingParens() &&
!Actions.containsUnexpandedParameterPacks(D) &&
@@ -5541,22 +5621,22 @@ void Parser::ParseDirectDeclarator(Declarator &D) {
AllowDeductionGuide = false;
} else if (D.getCXXScopeSpec().isSet()) {
AllowConstructorName =
- (D.getContext() == Declarator::FileContext ||
- D.getContext() == Declarator::MemberContext);
+ (D.getContext() == DeclaratorContext::FileContext ||
+ D.getContext() == DeclaratorContext::MemberContext);
AllowDeductionGuide = false;
} else {
- AllowConstructorName = (D.getContext() == Declarator::MemberContext);
+ AllowConstructorName =
+ (D.getContext() == DeclaratorContext::MemberContext);
AllowDeductionGuide =
- (D.getContext() == Declarator::FileContext ||
- D.getContext() == Declarator::MemberContext);
+ (D.getContext() == DeclaratorContext::FileContext ||
+ D.getContext() == DeclaratorContext::MemberContext);
}
- SourceLocation TemplateKWLoc;
bool HadScope = D.getCXXScopeSpec().isValid();
if (ParseUnqualifiedId(D.getCXXScopeSpec(),
/*EnteringContext=*/true,
/*AllowDestructorName=*/true, AllowConstructorName,
- AllowDeductionGuide, nullptr, TemplateKWLoc,
+ AllowDeductionGuide, nullptr, nullptr,
D.getName()) ||
// Once we're past the identifier, if the scope was bad, mark the
// whole declarator bad.
@@ -5604,15 +5684,16 @@ void Parser::ParseDirectDeclarator(Declarator &D) {
// An identifier within parens is unlikely to be intended to be anything
// other than a name being "declared".
DiagnoseIdentifier = true;
- else if (D.getContext() == Declarator::TemplateTypeArgContext)
+ else if (D.getContext() == DeclaratorContext::TemplateArgContext)
// T<int N> is an accidental identifier; T<int N indicates a missing '>'.
DiagnoseIdentifier =
NextToken().isOneOf(tok::comma, tok::greater, tok::greatergreater);
- else if (D.getContext() == Declarator::AliasDeclContext ||
- D.getContext() == Declarator::AliasTemplateContext)
+ else if (D.getContext() == DeclaratorContext::AliasDeclContext ||
+ D.getContext() == DeclaratorContext::AliasTemplateContext)
// The most likely error is that the ';' was forgotten.
DiagnoseIdentifier = NextToken().isOneOf(tok::comma, tok::semi);
- else if (D.getContext() == Declarator::TrailingReturnContext &&
+ else if ((D.getContext() == DeclaratorContext::TrailingReturnContext ||
+ D.getContext() == DeclaratorContext::TrailingReturnVarContext) &&
!isCXX11VirtSpecifier(Tok))
DiagnoseIdentifier = NextToken().isOneOf(
tok::comma, tok::semi, tok::equal, tok::l_brace, tok::kw_try);
@@ -5626,6 +5707,18 @@ void Parser::ParseDirectDeclarator(Declarator &D) {
}
if (Tok.is(tok::l_paren)) {
+ // If this might be an abstract-declarator followed by a direct-initializer,
+ // check whether this is a valid declarator chunk. If it can't be, assume
+ // that it's an initializer instead.
+ if (D.mayOmitIdentifier() && D.mayBeFollowedByCXXDirectInit()) {
+ RevertingTentativeParsingAction PA(*this);
+ if (TryParseDeclarator(true, D.mayHaveIdentifier(), true) ==
+ TPResult::False) {
+ D.SetIdentifier(nullptr, Tok.getLocation());
+ goto PastIdentifier;
+ }
+ }
+
// direct-declarator: '(' declarator ')'
// direct-declarator: '(' attributes declarator ')'
// Example: 'char (*X)' or 'int (*XX)(void)'
@@ -5659,7 +5752,7 @@ void Parser::ParseDirectDeclarator(Declarator &D) {
LLVM_BUILTIN_TRAP;
if (Tok.is(tok::l_square))
return ParseMisplacedBracketDeclarator(D);
- if (D.getContext() == Declarator::MemberContext) {
+ if (D.getContext() == DeclaratorContext::MemberContext) {
// Objective-C++: Detect C++ keywords and try to prevent further errors by
// treating these keyword as valid member names.
if (getLangOpts().ObjC1 && getLangOpts().CPlusPlus &&
@@ -5889,9 +5982,9 @@ void Parser::ParseParenDeclarator(Declarator &D) {
ParseDeclaratorInternal(D, &Parser::ParseDirectDeclarator);
// Match the ')'.
T.consumeClose();
- D.AddTypeInfo(DeclaratorChunk::getParen(T.getOpenLocation(),
- T.getCloseLocation()),
- attrs, T.getCloseLocation());
+ D.AddTypeInfo(
+ DeclaratorChunk::getParen(T.getOpenLocation(), T.getCloseLocation()),
+ std::move(attrs), T.getCloseLocation());
D.setGroupingParens(hadGroupingParens);
@@ -6042,9 +6135,9 @@ void Parser::ParseFunctionDeclarator(Declarator &D,
bool IsCXX11MemberFunction =
getLangOpts().CPlusPlus11 &&
D.getDeclSpec().getStorageClassSpec() != DeclSpec::SCS_typedef &&
- (D.getContext() == Declarator::MemberContext
+ (D.getContext() == DeclaratorContext::MemberContext
? !D.getDeclSpec().isFriendSpecified()
- : D.getContext() == Declarator::FileContext &&
+ : D.getContext() == DeclaratorContext::FileContext &&
D.getCXXScopeSpec().isValid() &&
Actions.CurContext->isRecord());
Sema::CXXThisScopeRAII ThisScope(Actions,
@@ -6096,7 +6189,8 @@ void Parser::ParseFunctionDeclarator(Declarator &D,
StartLoc = D.getDeclSpec().getTypeSpecTypeLoc();
LocalEndLoc = Tok.getLocation();
SourceRange Range;
- TrailingReturnType = ParseTrailingReturnType(Range);
+ TrailingReturnType =
+ ParseTrailingReturnType(Range, D.mayBeFollowedByCXXDirectInit());
EndLoc = Range.getEnd();
}
} else if (standardAttributesAllowed()) {
@@ -6120,28 +6214,19 @@ void Parser::ParseFunctionDeclarator(Declarator &D,
}
// Remember that we parsed a function type, and remember the attributes.
- D.AddTypeInfo(DeclaratorChunk::getFunction(HasProto,
- IsAmbiguous,
- LParenLoc,
- ParamInfo.data(), ParamInfo.size(),
- EllipsisLoc, RParenLoc,
- DS.getTypeQualifiers(),
- RefQualifierIsLValueRef,
- RefQualifierLoc, ConstQualifierLoc,
- VolatileQualifierLoc,
- RestrictQualifierLoc,
- /*MutableLoc=*/SourceLocation(),
- ESpecType, ESpecRange,
- DynamicExceptions.data(),
- DynamicExceptionRanges.data(),
- DynamicExceptions.size(),
- NoexceptExpr.isUsable() ?
- NoexceptExpr.get() : nullptr,
- ExceptionSpecTokens,
- DeclsInPrototype,
- StartLoc, LocalEndLoc, D,
- TrailingReturnType),
- FnAttrs, EndLoc);
+ D.AddTypeInfo(DeclaratorChunk::getFunction(
+ HasProto, IsAmbiguous, LParenLoc, ParamInfo.data(),
+ ParamInfo.size(), EllipsisLoc, RParenLoc,
+ DS.getTypeQualifiers(), RefQualifierIsLValueRef,
+ RefQualifierLoc, ConstQualifierLoc, VolatileQualifierLoc,
+ RestrictQualifierLoc,
+ /*MutableLoc=*/SourceLocation(), ESpecType, ESpecRange,
+ DynamicExceptions.data(), DynamicExceptionRanges.data(),
+ DynamicExceptions.size(),
+ NoexceptExpr.isUsable() ? NoexceptExpr.get() : nullptr,
+ ExceptionSpecTokens, DeclsInPrototype, StartLoc,
+ LocalEndLoc, D, TrailingReturnType),
+ std::move(FnAttrs), EndLoc);
}
/// ParseRefQualifier - Parses a member function ref-qualifier. Returns
@@ -6309,10 +6394,10 @@ void Parser::ParseParameterDeclarationClause(
// Parse the declarator. This is "PrototypeContext" or
// "LambdaExprParameterContext", because we must accept either
// 'declarator' or 'abstract-declarator' here.
- Declarator ParmDeclarator(DS,
- D.getContext() == Declarator::LambdaExprContext ?
- Declarator::LambdaExprParameterContext :
- Declarator::PrototypeContext);
+ Declarator ParmDeclarator(
+ DS, D.getContext() == DeclaratorContext::LambdaExprContext
+ ? DeclaratorContext::LambdaExprParameterContext
+ : DeclaratorContext::PrototypeContext);
ParseDeclarator(ParmDeclarator);
// Parse GNU attributes, if present.
@@ -6355,7 +6440,7 @@ void Parser::ParseParameterDeclarationClause(
SourceLocation EqualLoc = Tok.getLocation();
// Parse the default argument
- if (D.getContext() == Declarator::MemberContext) {
+ if (D.getContext() == DeclaratorContext::MemberContext) {
// If we're inside a class definition, cache the tokens
// corresponding to the default argument. We'll actually parse
// them when we see the end of the class definition.
@@ -6463,7 +6548,7 @@ void Parser::ParseBracketDeclarator(Declarator &D) {
D.AddTypeInfo(DeclaratorChunk::getArray(0, false, false, nullptr,
T.getOpenLocation(),
T.getCloseLocation()),
- attrs, T.getCloseLocation());
+ std::move(attrs), T.getCloseLocation());
return;
} else if (Tok.getKind() == tok::numeric_constant &&
GetLookAheadToken(1).is(tok::r_square)) {
@@ -6476,11 +6561,10 @@ void Parser::ParseBracketDeclarator(Declarator &D) {
MaybeParseCXX11Attributes(attrs);
// Remember that we parsed a array type, and remember its features.
- D.AddTypeInfo(DeclaratorChunk::getArray(0, false, false,
- ExprRes.get(),
+ D.AddTypeInfo(DeclaratorChunk::getArray(0, false, false, ExprRes.get(),
T.getOpenLocation(),
T.getCloseLocation()),
- attrs, T.getCloseLocation());
+ std::move(attrs), T.getCloseLocation());
return;
} else if (Tok.getKind() == tok::code_completion) {
Actions.CodeCompleteBracketDeclarator(getCurScope());
@@ -6553,12 +6637,11 @@ void Parser::ParseBracketDeclarator(Declarator &D) {
MaybeParseCXX11Attributes(DS.getAttributes());
// Remember that we parsed a array type, and remember its features.
- D.AddTypeInfo(DeclaratorChunk::getArray(DS.getTypeQualifiers(),
- StaticLoc.isValid(), isStar,
- NumElements.get(),
- T.getOpenLocation(),
- T.getCloseLocation()),
- DS.getAttributes(), T.getCloseLocation());
+ D.AddTypeInfo(
+ DeclaratorChunk::getArray(DS.getTypeQualifiers(), StaticLoc.isValid(),
+ isStar, NumElements.get(), T.getOpenLocation(),
+ T.getCloseLocation()),
+ std::move(DS.getAttributes()), T.getCloseLocation());
}
/// Diagnose brackets before an identifier.
@@ -6610,18 +6693,15 @@ void Parser::ParseMisplacedBracketDeclarator(Declarator &D) {
if (NeedParens) {
// Create a DeclaratorChunk for the inserted parens.
- ParsedAttributes attrs(AttrFactory);
SourceLocation EndLoc = PP.getLocForEndOfToken(D.getLocEnd());
- D.AddTypeInfo(DeclaratorChunk::getParen(SuggestParenLoc, EndLoc), attrs,
+ D.AddTypeInfo(DeclaratorChunk::getParen(SuggestParenLoc, EndLoc),
SourceLocation());
}
// Adding back the bracket info to the end of the Declarator.
for (unsigned i = 0, e = TempDeclarator.getNumTypeObjects(); i < e; ++i) {
const DeclaratorChunk &Chunk = TempDeclarator.getTypeObject(i);
- ParsedAttributes attrs(AttrFactory);
- attrs.set(Chunk.Common.AttrList);
- D.AddTypeInfo(Chunk, attrs, SourceLocation());
+ D.AddTypeInfo(Chunk, SourceLocation());
}
// The missing identifier would have been diagnosed in ParseDirectDeclarator.
diff --git a/lib/Parse/ParseDeclCXX.cpp b/lib/Parse/ParseDeclCXX.cpp
index 44e7a3512098..7c4c83d032b6 100644
--- a/lib/Parse/ParseDeclCXX.cpp
+++ b/lib/Parse/ParseDeclCXX.cpp
@@ -14,6 +14,7 @@
#include "clang/Parse/Parser.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/PrettyDeclStackTrace.h"
#include "clang/Basic/Attributes.h"
#include "clang/Basic/CharInfo.h"
#include "clang/Basic/OperatorKinds.h"
@@ -22,7 +23,6 @@
#include "clang/Parse/RAIIObjectsForParser.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/ParsedTemplate.h"
-#include "clang/Sema/PrettyDeclStackTrace.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/SemaDiagnostic.h"
#include "llvm/ADT/SmallString.h"
@@ -55,7 +55,7 @@ using namespace clang;
/// namespace-alias-definition: [C++ 7.3.2: namespace.alias]
/// 'namespace' identifier '=' qualified-namespace-specifier ';'
///
-Parser::DeclGroupPtrTy Parser::ParseNamespace(unsigned Context,
+Parser::DeclGroupPtrTy Parser::ParseNamespace(DeclaratorContext Context,
SourceLocation &DeclEnd,
SourceLocation InlineLoc) {
assert(Tok.is(tok::kw_namespace) && "Not a namespace!");
@@ -183,13 +183,12 @@ Parser::DeclGroupPtrTy Parser::ParseNamespace(unsigned Context,
ParseScope NamespaceScope(this, Scope::DeclScope);
UsingDirectiveDecl *ImplicitUsingDirectiveDecl = nullptr;
- Decl *NamespcDecl =
- Actions.ActOnStartNamespaceDef(getCurScope(), InlineLoc, NamespaceLoc,
- IdentLoc, Ident, T.getOpenLocation(),
- attrs.getList(), ImplicitUsingDirectiveDecl);
+ Decl *NamespcDecl = Actions.ActOnStartNamespaceDef(
+ getCurScope(), InlineLoc, NamespaceLoc, IdentLoc, Ident,
+ T.getOpenLocation(), attrs, ImplicitUsingDirectiveDecl);
- PrettyDeclStackTraceEntry CrashInfo(Actions, NamespcDecl, NamespaceLoc,
- "parsing namespace");
+ PrettyDeclStackTraceEntry CrashInfo(Actions.Context, NamespcDecl,
+ NamespaceLoc, "parsing namespace");
// Parse the contents of the namespace. This includes parsing recovery on
// any improperly nested namespaces.
@@ -233,11 +232,10 @@ void Parser::ParseInnerNamespace(std::vector<SourceLocation> &IdentLoc,
// desugaring it here.
ParseScope NamespaceScope(this, Scope::DeclScope);
UsingDirectiveDecl *ImplicitUsingDirectiveDecl = nullptr;
- Decl *NamespcDecl =
- Actions.ActOnStartNamespaceDef(getCurScope(), SourceLocation(),
- NamespaceLoc[index], IdentLoc[index],
- Ident[index], Tracker.getOpenLocation(),
- attrs.getList(), ImplicitUsingDirectiveDecl);
+ Decl *NamespcDecl = Actions.ActOnStartNamespaceDef(
+ getCurScope(), SourceLocation(), NamespaceLoc[index], IdentLoc[index],
+ Ident[index], Tracker.getOpenLocation(), attrs,
+ ImplicitUsingDirectiveDecl);
assert(!ImplicitUsingDirectiveDecl &&
"nested namespace definition cannot define anonymous namespace");
@@ -307,7 +305,7 @@ Decl *Parser::ParseNamespaceAlias(SourceLocation NamespaceLoc,
/// 'extern' string-literal '{' declaration-seq[opt] '}'
/// 'extern' string-literal declaration
///
-Decl *Parser::ParseLinkage(ParsingDeclSpec &DS, unsigned Context) {
+Decl *Parser::ParseLinkage(ParsingDeclSpec &DS, DeclaratorContext Context) {
assert(isTokenStringLiteral() && "Not a string literal!");
ExprResult Lang = ParseStringLiteralExpression(false);
@@ -434,7 +432,7 @@ Decl *Parser::ParseExportDeclaration() {
/// ParseUsingDirectiveOrDeclaration - Parse C++ using using-declaration or
/// using-directive. Assumes that current token is 'using'.
Parser::DeclGroupPtrTy
-Parser::ParseUsingDirectiveOrDeclaration(unsigned Context,
+Parser::ParseUsingDirectiveOrDeclaration(DeclaratorContext Context,
const ParsedTemplateInfo &TemplateInfo,
SourceLocation &DeclEnd,
ParsedAttributesWithRange &attrs) {
@@ -482,7 +480,7 @@ Parser::ParseUsingDirectiveOrDeclaration(unsigned Context,
/// 'using' 'namespace' ::[opt] nested-name-specifier[opt]
/// namespace-name attributes[opt] ;
///
-Decl *Parser::ParseUsingDirective(unsigned Context,
+Decl *Parser::ParseUsingDirective(DeclaratorContext Context,
SourceLocation UsingLoc,
SourceLocation &DeclEnd,
ParsedAttributes &attrs) {
@@ -543,7 +541,7 @@ Decl *Parser::ParseUsingDirective(unsigned Context,
SkipUntil(tok::semi);
return Actions.ActOnUsingDirective(getCurScope(), UsingLoc, NamespcLoc, SS,
- IdentLoc, NamespcName, attrs.getList());
+ IdentLoc, NamespcName, attrs);
}
/// Parse a using-declarator (or the identifier in a C++11 alias-declaration).
@@ -551,7 +549,8 @@ Decl *Parser::ParseUsingDirective(unsigned Context,
/// using-declarator:
/// 'typename'[opt] nested-name-specifier unqualified-id
///
-bool Parser::ParseUsingDeclarator(unsigned Context, UsingDeclarator &D) {
+bool Parser::ParseUsingDeclarator(DeclaratorContext Context,
+ UsingDeclarator &D) {
D.clear();
// Ignore optional 'typename'.
@@ -582,7 +581,8 @@ bool Parser::ParseUsingDeclarator(unsigned Context, UsingDeclarator &D) {
// or the simple-template-id's template-name in the last component of the
// nested-name-specifier, the name is [...] considered to name the
// constructor.
- if (getLangOpts().CPlusPlus11 && Context == Declarator::MemberContext &&
+ if (getLangOpts().CPlusPlus11 &&
+ Context == DeclaratorContext::MemberContext &&
Tok.is(tok::identifier) &&
(NextToken().is(tok::semi) || NextToken().is(tok::comma) ||
NextToken().is(tok::ellipsis)) &&
@@ -600,7 +600,7 @@ bool Parser::ParseUsingDeclarator(unsigned Context, UsingDeclarator &D) {
/*AllowConstructorName=*/!(Tok.is(tok::identifier) &&
NextToken().is(tok::equal)),
/*AllowDeductionGuide=*/false,
- nullptr, D.TemplateKWLoc, D.Name))
+ nullptr, nullptr, D.Name))
return true;
}
@@ -629,7 +629,7 @@ bool Parser::ParseUsingDeclarator(unsigned Context, UsingDeclarator &D) {
/// 'using' identifier attribute-specifier-seq[opt] = type-id ;
///
Parser::DeclGroupPtrTy
-Parser::ParseUsingDeclaration(unsigned Context,
+Parser::ParseUsingDeclaration(DeclaratorContext Context,
const ParsedTemplateInfo &TemplateInfo,
SourceLocation UsingLoc, SourceLocation &DeclEnd,
AccessSpecifier AS) {
@@ -699,7 +699,7 @@ Parser::ParseUsingDeclaration(unsigned Context,
// "typename" keyword is allowed for identifiers only,
// because it may be a type definition.
if (D.TypenameLoc.isValid() &&
- D.Name.getKind() != UnqualifiedId::IK_Identifier) {
+ D.Name.getKind() != UnqualifiedIdKind::IK_Identifier) {
Diag(D.Name.getSourceRange().getBegin(),
diag::err_typename_identifiers_only)
<< FixItHint::CreateRemoval(SourceRange(D.TypenameLoc));
@@ -709,7 +709,7 @@ Parser::ParseUsingDeclaration(unsigned Context,
Decl *UD = Actions.ActOnUsingDeclaration(getCurScope(), AS, UsingLoc,
D.TypenameLoc, D.SS, D.Name,
- D.EllipsisLoc, Attrs.getList());
+ D.EllipsisLoc, Attrs);
if (UD)
DeclsInGroup.push_back(UD);
}
@@ -753,7 +753,7 @@ Decl *Parser::ParseAliasDeclarationAfterDeclarator(
// Type alias templates cannot be specialized.
int SpecKind = -1;
if (TemplateInfo.Kind == ParsedTemplateInfo::Template &&
- D.Name.getKind() == UnqualifiedId::IK_TemplateId)
+ D.Name.getKind() == UnqualifiedIdKind::IK_TemplateId)
SpecKind = 0;
if (TemplateInfo.Kind == ParsedTemplateInfo::ExplicitSpecialization)
SpecKind = 1;
@@ -773,7 +773,7 @@ Decl *Parser::ParseAliasDeclarationAfterDeclarator(
}
// Name must be an identifier.
- if (D.Name.getKind() != UnqualifiedId::IK_Identifier) {
+ if (D.Name.getKind() != UnqualifiedIdKind::IK_Identifier) {
Diag(D.Name.StartLocation, diag::err_alias_declaration_not_identifier);
// No removal fixit: can't recover from this.
SkipUntil(tok::semi);
@@ -791,11 +791,11 @@ Decl *Parser::ParseAliasDeclarationAfterDeclarator(
<< FixItHint::CreateRemoval(SourceRange(D.EllipsisLoc));
Decl *DeclFromDeclSpec = nullptr;
- TypeResult TypeAlias =
- ParseTypeName(nullptr,
- TemplateInfo.Kind ? Declarator::AliasTemplateContext
- : Declarator::AliasDeclContext,
- AS, &DeclFromDeclSpec, &Attrs);
+ TypeResult TypeAlias = ParseTypeName(
+ nullptr,
+ TemplateInfo.Kind ? DeclaratorContext::AliasTemplateContext
+ : DeclaratorContext::AliasDeclContext,
+ AS, &DeclFromDeclSpec, &Attrs);
if (OwnedType)
*OwnedType = DeclFromDeclSpec;
@@ -811,8 +811,8 @@ Decl *Parser::ParseAliasDeclarationAfterDeclarator(
TemplateParams ? TemplateParams->data() : nullptr,
TemplateParams ? TemplateParams->size() : 0);
return Actions.ActOnAliasDeclaration(getCurScope(), AS, TemplateParamsArg,
- UsingLoc, D.Name, Attrs.getList(),
- TypeAlias, DeclFromDeclSpec);
+ UsingLoc, D.Name, Attrs, TypeAlias,
+ DeclFromDeclSpec);
}
/// ParseStaticAssertDeclaration - Parse C++0x or C11 static_assert-declaration.
@@ -940,7 +940,7 @@ SourceLocation Parser::ParseDecltypeSpecifier(DeclSpec &DS) {
// The operand of the decltype specifier is an unevaluated operand.
EnterExpressionEvaluationContext Unevaluated(
Actions, Sema::ExpressionEvaluationContext::Unevaluated, nullptr,
- /*IsDecltype=*/true);
+ Sema::ExpressionEvaluationContextRecord::EK_Decltype);
Result =
Actions.CorrectDelayedTyposInExpr(ParseExpression(), [](Expr *E) {
return E->hasPlaceholderType() ? ExprError() : E;
@@ -1094,7 +1094,7 @@ TypeResult Parser::ParseBaseTypeSpecifier(SourceLocation &BaseLoc,
EndLocation = ParseDecltypeSpecifier(DS);
- Declarator DeclaratorInfo(DS, Declarator::TypeNameContext);
+ Declarator DeclaratorInfo(DS, DeclaratorContext::TypeNameContext);
return Actions.ActOnTypeName(getCurScope(), DeclaratorInfo);
}
@@ -1195,7 +1195,7 @@ TypeResult Parser::ParseBaseTypeSpecifier(SourceLocation &BaseLoc,
DS.SetTypeSpecType(TST_typename, IdLoc, PrevSpec, DiagID, Type,
Actions.getASTContext().getPrintingPolicy());
- Declarator DeclaratorInfo(DS, Declarator::TypeNameContext);
+ Declarator DeclaratorInfo(DS, DeclaratorContext::TypeNameContext);
return Actions.ActOnTypeName(getCurScope(), DeclaratorInfo);
}
@@ -1206,7 +1206,7 @@ void Parser::ParseMicrosoftInheritanceClassAttributes(ParsedAttributes &attrs) {
IdentifierInfo *AttrName = Tok.getIdentifierInfo();
SourceLocation AttrNameLoc = ConsumeToken();
attrs.addNew(AttrName, AttrNameLoc, nullptr, AttrNameLoc, nullptr, 0,
- AttributeList::AS_Keyword);
+ ParsedAttr::AS_Keyword);
}
}
@@ -1612,14 +1612,15 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
// new struct s;
// or
// &T::operator struct s;
- // For these, DSC is DSC_type_specifier or DSC_alias_declaration.
+ // For these, DSC is DeclSpecContext::DSC_type_specifier or
+ // DeclSpecContext::DSC_alias_declaration.
// If there are attributes after class name, parse them.
MaybeParseCXX11Attributes(Attributes);
const PrintingPolicy &Policy = Actions.getASTContext().getPrintingPolicy();
Sema::TagUseKind TUK;
- if (DSC == DSC_trailing)
+ if (DSC == DeclSpecContext::DSC_trailing)
TUK = Sema::TUK_Reference;
else if (Tok.is(tok::l_brace) ||
(getLangOpts().CPlusPlus && Tok.is(tok::colon)) ||
@@ -1749,24 +1750,16 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
// This is an explicit instantiation of a class template.
ProhibitAttributes(attrs);
- TagOrTempResult
- = Actions.ActOnExplicitInstantiation(getCurScope(),
- TemplateInfo.ExternLoc,
- TemplateInfo.TemplateLoc,
- TagType,
- StartLoc,
- SS,
- TemplateId->Template,
- TemplateId->TemplateNameLoc,
- TemplateId->LAngleLoc,
- TemplateArgsPtr,
- TemplateId->RAngleLoc,
- attrs.getList());
-
- // Friend template-ids are treated as references unless
- // they have template headers, in which case they're ill-formed
- // (FIXME: "template <class T> friend class A<T>::B<int>;").
- // We diagnose this error in ActOnClassTemplateSpecialization.
+ TagOrTempResult = Actions.ActOnExplicitInstantiation(
+ getCurScope(), TemplateInfo.ExternLoc, TemplateInfo.TemplateLoc,
+ TagType, StartLoc, SS, TemplateId->Template,
+ TemplateId->TemplateNameLoc, TemplateId->LAngleLoc, TemplateArgsPtr,
+ TemplateId->RAngleLoc, attrs);
+
+ // Friend template-ids are treated as references unless
+ // they have template headers, in which case they're ill-formed
+ // (FIXME: "template <class T> friend class A<T>::B<int>;").
+ // We diagnose this error in ActOnClassTemplateSpecialization.
} else if (TUK == Sema::TUK_Reference ||
(TUK == Sema::TUK_Friend &&
TemplateInfo.Kind == ParsedTemplateInfo::NonTemplate)) {
@@ -1822,7 +1815,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
// Build the class template specialization.
TagOrTempResult = Actions.ActOnClassTemplateSpecialization(
getCurScope(), TagType, TUK, StartLoc, DS.getModulePrivateSpecLoc(),
- *TemplateId, attrs.getList(),
+ *TemplateId, attrs,
MultiTemplateParamsArg(TemplateParams ? &(*TemplateParams)[0]
: nullptr,
TemplateParams ? TemplateParams->size() : 0),
@@ -1837,24 +1830,18 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
//
ProhibitAttributes(attrs);
- TagOrTempResult
- = Actions.ActOnExplicitInstantiation(getCurScope(),
- TemplateInfo.ExternLoc,
- TemplateInfo.TemplateLoc,
- TagType, StartLoc, SS, Name,
- NameLoc, attrs.getList());
+ TagOrTempResult = Actions.ActOnExplicitInstantiation(
+ getCurScope(), TemplateInfo.ExternLoc, TemplateInfo.TemplateLoc,
+ TagType, StartLoc, SS, Name, NameLoc, attrs);
} else if (TUK == Sema::TUK_Friend &&
TemplateInfo.Kind != ParsedTemplateInfo::NonTemplate) {
ProhibitAttributes(attrs);
- TagOrTempResult =
- Actions.ActOnTemplatedFriendTag(getCurScope(), DS.getFriendSpecLoc(),
- TagType, StartLoc, SS,
- Name, NameLoc, attrs.getList(),
- MultiTemplateParamsArg(
- TemplateParams? &(*TemplateParams)[0]
- : nullptr,
- TemplateParams? TemplateParams->size() : 0));
+ TagOrTempResult = Actions.ActOnTemplatedFriendTag(
+ getCurScope(), DS.getFriendSpecLoc(), TagType, StartLoc, SS, Name,
+ NameLoc, attrs,
+ MultiTemplateParamsArg(TemplateParams ? &(*TemplateParams)[0] : nullptr,
+ TemplateParams ? TemplateParams->size() : 0));
} else {
if (TUK != Sema::TUK_Declaration && TUK != Sema::TUK_Definition)
ProhibitAttributes(attrs);
@@ -1881,15 +1868,14 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
stripTypeAttributesOffDeclSpec(attrs, DS, TUK);
// Declaration or definition of a class type
- TagOrTempResult = Actions.ActOnTag(getCurScope(), TagType, TUK, StartLoc,
- SS, Name, NameLoc, attrs.getList(), AS,
- DS.getModulePrivateSpecLoc(),
- TParams, Owned, IsDependent,
- SourceLocation(), false,
- clang::TypeResult(),
- DSC == DSC_type_specifier,
- DSC == DSC_template_param ||
- DSC == DSC_template_type_arg, &SkipBody);
+ TagOrTempResult = Actions.ActOnTag(
+ getCurScope(), TagType, TUK, StartLoc, SS, Name, NameLoc, attrs, AS,
+ DS.getModulePrivateSpecLoc(), TParams, Owned, IsDependent,
+ SourceLocation(), false, clang::TypeResult(),
+ DSC == DeclSpecContext::DSC_type_specifier,
+ DSC == DeclSpecContext::DSC_template_param ||
+ DSC == DeclSpecContext::DSC_template_type_arg,
+ &SkipBody);
// If ActOnTag said the type was dependent, try again with the
// less common call.
@@ -1927,7 +1913,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
if (!TagOrTempResult.isInvalid())
// Delayed processing of attributes.
- Actions.ProcessDeclAttributeDelayed(TagOrTempResult.get(), attrs.getList());
+ Actions.ProcessDeclAttributeDelayed(TagOrTempResult.get(), attrs);
const char *PrevSpec = nullptr;
unsigned DiagID;
@@ -2107,7 +2093,7 @@ AccessSpecifier Parser::getAccessSpecifierIfPresent() const {
}
}
-/// \brief If the given declarator has any parts for which parsing has to be
+/// If the given declarator has any parts for which parsing has to be
/// delayed, e.g., default arguments or an exception-specification, create a
/// late-parsed method declaration record to handle the parsing at the end of
/// the class definition.
@@ -2247,7 +2233,7 @@ bool Parser::isCXX11FinalKeyword() const {
Specifier == VirtSpecifiers::VS_Sealed;
}
-/// \brief Parse a C++ member-declarator up to, but not including, the optional
+/// Parse a C++ member-declarator up to, but not including, the optional
/// brace-or-equal-initializer or pure-specifier.
bool Parser::ParseCXXMemberDeclaratorBeforeInitializer(
Declarator &DeclaratorInfo, VirtSpecifiers &VS, ExprResult &BitfieldSize,
@@ -2298,12 +2284,10 @@ bool Parser::ParseCXXMemberDeclaratorBeforeInitializer(
if (!VS.isUnset()) {
// If we saw any GNU-style attributes that are known to GCC followed by a
// virt-specifier, issue a GCC-compat warning.
- const AttributeList *Attr = DeclaratorInfo.getAttributes();
- while (Attr) {
- if (Attr->isKnownToGCC() && !Attr->isCXX11Attribute())
- Diag(Attr->getLoc(), diag::warn_gcc_attribute_location);
- Attr = Attr->getNext();
- }
+ for (const ParsedAttr &AL : DeclaratorInfo.getAttributes())
+ if (AL.isKnownToGCC() && !AL.isCXX11Attribute())
+ Diag(AL.getLoc(), diag::warn_gcc_attribute_location);
+
MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(DeclaratorInfo, VS);
}
}
@@ -2318,7 +2302,7 @@ bool Parser::ParseCXXMemberDeclaratorBeforeInitializer(
return false;
}
-/// \brief Look for declaration specifiers possibly occurring after C++11
+/// Look for declaration specifiers possibly occurring after C++11
/// virt-specifier-seq and diagnose them.
void Parser::MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(
Declarator &D,
@@ -2422,7 +2406,7 @@ void Parser::MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(
///
Parser::DeclGroupPtrTy
Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
- AttributeList *AccessAttrs,
+ ParsedAttributes &AccessAttrs,
const ParsedTemplateInfo &TemplateInfo,
ParsingDeclRAIIObject *TemplateDiags) {
if (Tok.is(tok::at)) {
@@ -2474,7 +2458,7 @@ Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
SourceLocation TemplateKWLoc;
UnqualifiedId Name;
if (ParseUnqualifiedId(SS, false, true, true, false, nullptr,
- TemplateKWLoc, Name)) {
+ &TemplateKWLoc, Name)) {
SkipUntil(tok::semi);
return nullptr;
}
@@ -2486,10 +2470,12 @@ Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
return nullptr;
}
+ // FIXME: We should do something with the 'template' keyword here.
return DeclGroupPtrTy::make(DeclGroupRef(Actions.ActOnUsingDeclaration(
getCurScope(), AS, /*UsingLoc*/ SourceLocation(),
/*TypenameLoc*/ SourceLocation(), SS, Name,
- /*EllipsisLoc*/ SourceLocation(), /*AttrList*/ nullptr)));
+ /*EllipsisLoc*/ SourceLocation(),
+ /*AttrList*/ ParsedAttributesView())));
}
}
@@ -2509,7 +2495,7 @@ Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
SourceLocation DeclEnd;
return DeclGroupPtrTy::make(
DeclGroupRef(ParseTemplateDeclarationOrSpecialization(
- Declarator::MemberContext, DeclEnd, AS, AccessAttrs)));
+ DeclaratorContext::MemberContext, DeclEnd, AccessAttrs, AS)));
}
// Handle: member-declaration ::= '__extension__' member-declaration
@@ -2522,12 +2508,12 @@ Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
}
ParsedAttributesWithRange attrs(AttrFactory);
- ParsedAttributesWithRange FnAttrs(AttrFactory);
+ ParsedAttributesViewWithRange FnAttrs;
// Optional C++11 attribute-specifier
MaybeParseCXX11Attributes(attrs);
// We need to keep these attributes for future diagnostic
// before they are taken over by declaration specifier.
- FnAttrs.addAll(attrs.getList());
+ FnAttrs.addAll(attrs.begin(), attrs.end());
FnAttrs.Range = attrs.Range;
MaybeParseMicrosoftAttributes(attrs);
@@ -2545,7 +2531,7 @@ Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
}
SourceLocation DeclEnd;
// Otherwise, it must be a using-declaration or an alias-declaration.
- return ParseUsingDeclaration(Declarator::MemberContext, TemplateInfo,
+ return ParseUsingDeclaration(DeclaratorContext::MemberContext, TemplateInfo,
UsingLoc, DeclEnd, AS);
}
@@ -2559,7 +2545,7 @@ Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
if (MalformedTypeSpec)
DS.SetTypeSpecError();
- ParseDeclarationSpecifiers(DS, TemplateInfo, AS, DSC_class,
+ ParseDeclarationSpecifiers(DS, TemplateInfo, AS, DeclSpecContext::DSC_class,
&CommonLateParsedAttrs);
// Turn off colon protection that was set for declspec.
@@ -2569,7 +2555,7 @@ Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
// may get this far before the problem becomes obvious.
if (DS.hasTagDefinition() &&
TemplateInfo.Kind == ParsedTemplateInfo::NonTemplate &&
- DiagnoseMissingSemiAfterTagDefinition(DS, AS, DSC_class,
+ DiagnoseMissingSemiAfterTagDefinition(DS, AS, DeclSpecContext::DSC_class,
&CommonLateParsedAttrs))
return nullptr;
@@ -2593,7 +2579,7 @@ Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
return Actions.ConvertDeclToDeclGroup(TheDecl);
}
- ParsingDeclarator DeclaratorInfo(*this, DS, Declarator::MemberContext);
+ ParsingDeclarator DeclaratorInfo(*this, DS, DeclaratorContext::MemberContext);
VirtSpecifiers VS;
// Hold late-parsed attributes so we can attach a Decl to them later.
@@ -2774,7 +2760,7 @@ Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
// initialize it.
ThisDecl = VT->getTemplatedDecl();
- if (ThisDecl && AccessAttrs)
+ if (ThisDecl)
Actions.ProcessDeclAttributeList(getCurScope(), ThisDecl, AccessAttrs);
}
@@ -2852,7 +2838,7 @@ Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
break;
if (Tok.isAtStartOfLine() &&
- !MightBeDeclarator(Declarator::MemberContext)) {
+ !MightBeDeclarator(DeclaratorContext::MemberContext)) {
// This comma was followed by a line-break and something which can't be
// the start of a declarator. The comma was probably a typo for a
// semicolon.
@@ -3006,10 +2992,12 @@ void Parser::SkipCXXMemberSpecification(SourceLocation RecordLoc,
Parser::DeclGroupPtrTy Parser::ParseCXXClassMemberDeclarationWithPragmas(
AccessSpecifier &AS, ParsedAttributesWithRange &AccessAttrs,
DeclSpec::TST TagType, Decl *TagDecl) {
+ ParenBraceBracketBalancer BalancerRAIIObj(*this);
+
switch (Tok.getKind()) {
case tok::kw___if_exists:
case tok::kw___if_not_exists:
- ParseMicrosoftIfExistsClassDeclaration(TagType, AS);
+ ParseMicrosoftIfExistsClassDeclaration(TagType, AccessAttrs, AS);
return nullptr;
case tok::semi:
@@ -3075,8 +3063,7 @@ Parser::DeclGroupPtrTy Parser::ParseCXXClassMemberDeclarationWithPragmas(
Diag(ASLoc, diag::err_access_specifier_interface) << (AS == AS_protected);
}
- if (Actions.ActOnAccessSpecifier(NewAS, ASLoc, EndLoc,
- AccessAttrs.getList())) {
+ if (Actions.ActOnAccessSpecifier(NewAS, ASLoc, EndLoc, AccessAttrs)) {
// found another attribute than only annotations
AccessAttrs.clear();
}
@@ -3089,7 +3076,7 @@ Parser::DeclGroupPtrTy Parser::ParseCXXClassMemberDeclarationWithPragmas(
TagDecl);
default:
- return ParseCXXClassMemberDeclaration(AS, AccessAttrs.getList());
+ return ParseCXXClassMemberDeclaration(AS, AccessAttrs);
}
}
@@ -3108,7 +3095,7 @@ void Parser::ParseCXXMemberSpecification(SourceLocation RecordLoc,
TagType == DeclSpec::TST_union ||
TagType == DeclSpec::TST_class) && "Invalid TagType!");
- PrettyDeclStackTraceEntry CrashInfo(Actions, TagDecl, RecordLoc,
+ PrettyDeclStackTraceEntry CrashInfo(Actions.Context, TagDecl, RecordLoc,
"parsing struct/union/class body");
// Determine whether this is a non-nested class. Note that local
@@ -3277,9 +3264,8 @@ void Parser::ParseCXXMemberSpecification(SourceLocation RecordLoc,
if (TagDecl)
Actions.ActOnFinishCXXMemberSpecification(getCurScope(), RecordLoc, TagDecl,
- T.getOpenLocation(),
- T.getCloseLocation(),
- attrs.getList());
+ T.getOpenLocation(),
+ T.getCloseLocation(), attrs);
// C++11 [class.mem]p2:
// Within the class member-specification, the class is regarded as complete
@@ -3502,7 +3488,7 @@ MemInitResult Parser::ParseMemInitializer(Decl *ConstructorDecl) {
return Diag(Tok, diag::err_expected) << tok::l_paren;
}
-/// \brief Parse a C++ exception-specification if present (C++0x [except.spec]).
+/// Parse a C++ exception-specification if present (C++0x [except.spec]).
///
/// exception-specification:
/// dynamic-exception-specification
@@ -3583,15 +3569,11 @@ Parser::tryParseExceptionSpecification(bool Delayed,
// There is an argument.
BalancedDelimiterTracker T(*this, tok::l_paren);
T.consumeOpen();
- NoexceptType = EST_ComputedNoexcept;
NoexceptExpr = ParseConstantExpression();
T.consumeClose();
- // The argument must be contextually convertible to bool. We use
- // CheckBooleanCondition for this purpose.
- // FIXME: Add a proper Sema entry point for this.
if (!NoexceptExpr.isInvalid()) {
- NoexceptExpr =
- Actions.CheckBooleanCondition(KeywordLoc, NoexceptExpr.get());
+ NoexceptExpr = Actions.ActOnNoexceptSpec(KeywordLoc, NoexceptExpr.get(),
+ NoexceptType);
NoexceptRange = SourceRange(KeywordLoc, T.getCloseLocation());
} else {
NoexceptType = EST_BasicNoexcept;
@@ -3704,15 +3686,18 @@ ExceptionSpecificationType Parser::ParseDynamicExceptionSpecification(
/// ParseTrailingReturnType - Parse a trailing return type on a new-style
/// function declaration.
-TypeResult Parser::ParseTrailingReturnType(SourceRange &Range) {
+TypeResult Parser::ParseTrailingReturnType(SourceRange &Range,
+ bool MayBeFollowedByDirectInit) {
assert(Tok.is(tok::arrow) && "expected arrow");
ConsumeToken();
- return ParseTypeName(&Range, Declarator::TrailingReturnContext);
+ return ParseTypeName(&Range, MayBeFollowedByDirectInit
+ ? DeclaratorContext::TrailingReturnVarContext
+ : DeclaratorContext::TrailingReturnContext);
}
-/// \brief We have just started parsing the definition of a new class,
+/// We have just started parsing the definition of a new class,
/// so push that class onto our stack of classes that is currently
/// being parsed.
Sema::ParsingClassState
@@ -3724,7 +3709,7 @@ Parser::PushParsingClass(Decl *ClassDecl, bool NonNestedClass,
return Actions.PushParsingClass();
}
-/// \brief Deallocate the given parsed class and all of its nested
+/// Deallocate the given parsed class and all of its nested
/// classes.
void Parser::DeallocateParsedClasses(Parser::ParsingClass *Class) {
for (unsigned I = 0, N = Class->LateParsedDeclarations.size(); I != N; ++I)
@@ -3732,7 +3717,7 @@ void Parser::DeallocateParsedClasses(Parser::ParsingClass *Class) {
delete Class;
}
-/// \brief Pop the top class of the stack of classes that are
+/// Pop the top class of the stack of classes that are
/// currently being parsed.
///
/// This routine should be called when we have finished parsing the
@@ -3770,7 +3755,7 @@ void Parser::PopParsingClass(Sema::ParsingClassState state) {
Victim->TemplateScope = getCurScope()->getParent()->isTemplateParamScope();
}
-/// \brief Try to parse an 'identifier' which appears within an attribute-token.
+/// Try to parse an 'identifier' which appears within an attribute-token.
///
/// \return the parsed identifier on success, and 0 if the next token is not an
/// attribute-token.
@@ -3818,16 +3803,15 @@ IdentifierInfo *Parser::TryParseCXX11AttributeIdentifier(SourceLocation &Loc) {
static bool IsBuiltInOrStandardCXX11Attribute(IdentifierInfo *AttrName,
IdentifierInfo *ScopeName) {
- switch (AttributeList::getKind(AttrName, ScopeName,
- AttributeList::AS_CXX11)) {
- case AttributeList::AT_CarriesDependency:
- case AttributeList::AT_Deprecated:
- case AttributeList::AT_FallThrough:
- case AttributeList::AT_CXX11NoReturn:
+ switch (ParsedAttr::getKind(AttrName, ScopeName, ParsedAttr::AS_CXX11)) {
+ case ParsedAttr::AT_CarriesDependency:
+ case ParsedAttr::AT_Deprecated:
+ case ParsedAttr::AT_FallThrough:
+ case ParsedAttr::AT_CXX11NoReturn:
return true;
- case AttributeList::AT_WarnUnusedResult:
+ case ParsedAttr::AT_WarnUnusedResult:
return !ScopeName && AttrName->getName().equals("nodiscard");
- case AttributeList::AT_Unused:
+ case ParsedAttr::AT_Unused:
return !ScopeName && AttrName->getName().equals("maybe_unused");
default:
return false;
@@ -3857,8 +3841,8 @@ bool Parser::ParseCXX11AttributeArgs(IdentifierInfo *AttrName,
assert(Tok.is(tok::l_paren) && "Not a C++11 attribute argument list");
SourceLocation LParenLoc = Tok.getLocation();
const LangOptions &LO = getLangOpts();
- AttributeList::Syntax Syntax =
- LO.CPlusPlus ? AttributeList::AS_CXX11 : AttributeList::AS_C2x;
+ ParsedAttr::Syntax Syntax =
+ LO.CPlusPlus ? ParsedAttr::AS_CXX11 : ParsedAttr::AS_C2x;
// If the attribute isn't known, we will not attempt to parse any
// arguments.
@@ -3889,25 +3873,26 @@ bool Parser::ParseCXX11AttributeArgs(IdentifierInfo *AttrName,
ParseAttributeArgsCommon(AttrName, AttrNameLoc, Attrs, EndLoc,
ScopeName, ScopeLoc, Syntax);
- const AttributeList *Attr = Attrs.getList();
- if (Attr && IsBuiltInOrStandardCXX11Attribute(AttrName, ScopeName)) {
+ if (!Attrs.empty() &&
+ IsBuiltInOrStandardCXX11Attribute(AttrName, ScopeName)) {
+ ParsedAttr &Attr = *Attrs.begin();
// If the attribute is a standard or built-in attribute and we are
// parsing an argument list, we need to determine whether this attribute
// was allowed to have an argument list (such as [[deprecated]]), and how
// many arguments were parsed (so we can diagnose on [[deprecated()]]).
- if (Attr->getMaxArgs() && !NumArgs) {
+ if (Attr.getMaxArgs() && !NumArgs) {
// The attribute was allowed to have arguments, but none were provided
// even though the attribute parsed successfully. This is an error.
Diag(LParenLoc, diag::err_attribute_requires_arguments) << AttrName;
- Attr->setInvalid(true);
- } else if (!Attr->getMaxArgs()) {
+ Attr.setInvalid(true);
+ } else if (!Attr.getMaxArgs()) {
// The attribute parsed successfully, but was not allowed to have any
// arguments. It doesn't matter whether any were provided -- the
// presence of the argument list (even if empty) is diagnosed.
Diag(LParenLoc, diag::err_cxx11_attribute_forbids_arguments)
<< AttrName
<< FixItHint::CreateRemoval(SourceRange(LParenLoc, *EndLoc));
- Attr->setInvalid(true);
+ Attr.setInvalid(true);
}
}
return true;
@@ -4026,12 +4011,11 @@ void Parser::ParseCXX11AttributeSpecifier(ParsedAttributes &attrs,
AttrName,
SourceRange(ScopeLoc.isValid() ? ScopeLoc : AttrLoc, AttrLoc),
ScopeName, ScopeLoc, nullptr, 0,
- getLangOpts().CPlusPlus ? AttributeList::AS_CXX11
- : AttributeList::AS_C2x);
+ getLangOpts().CPlusPlus ? ParsedAttr::AS_CXX11 : ParsedAttr::AS_C2x);
if (TryConsumeToken(tok::ellipsis))
Diag(Tok, diag::err_cxx11_attribute_forbids_ellipsis)
- << AttrName->getName();
+ << AttrName;
}
if (ExpectAndConsume(tok::r_square))
@@ -4179,7 +4163,7 @@ void Parser::ParseMicrosoftUuidAttributeArgs(ParsedAttributes &Attrs) {
if (!T.consumeClose()) {
Attrs.addNew(UuidIdent, SourceRange(UuidLoc, T.getCloseLocation()), nullptr,
SourceLocation(), ArgExprs.data(), ArgExprs.size(),
- AttributeList::AS_Microsoft);
+ ParsedAttr::AS_Microsoft);
}
}
@@ -4217,8 +4201,9 @@ void Parser::ParseMicrosoftAttributes(ParsedAttributes &attrs,
} while (Tok.is(tok::l_square));
}
-void Parser::ParseMicrosoftIfExistsClassDeclaration(DeclSpec::TST TagType,
- AccessSpecifier& CurAS) {
+void Parser::ParseMicrosoftIfExistsClassDeclaration(
+ DeclSpec::TST TagType, ParsedAttributes &AccessAttrs,
+ AccessSpecifier &CurAS) {
IfExistsCondition Result;
if (ParseMicrosoftIfExistsCondition(Result))
return;
@@ -4248,7 +4233,8 @@ void Parser::ParseMicrosoftIfExistsClassDeclaration(DeclSpec::TST TagType,
while (Tok.isNot(tok::r_brace) && !isEofOrEom()) {
// __if_exists, __if_not_exists can nest.
if (Tok.isOneOf(tok::kw___if_exists, tok::kw___if_not_exists)) {
- ParseMicrosoftIfExistsClassDeclaration((DeclSpec::TST)TagType, CurAS);
+ ParseMicrosoftIfExistsClassDeclaration((DeclSpec::TST)TagType,
+ AccessAttrs, CurAS);
continue;
}
@@ -4265,7 +4251,8 @@ void Parser::ParseMicrosoftIfExistsClassDeclaration(DeclSpec::TST TagType,
SourceLocation ASLoc = Tok.getLocation();
ConsumeToken();
if (Tok.is(tok::colon))
- Actions.ActOnAccessSpecifier(AS, ASLoc, Tok.getLocation());
+ Actions.ActOnAccessSpecifier(AS, ASLoc, Tok.getLocation(),
+ ParsedAttributesView{});
else
Diag(Tok, diag::err_expected) << tok::colon;
ConsumeToken();
@@ -4273,7 +4260,7 @@ void Parser::ParseMicrosoftIfExistsClassDeclaration(DeclSpec::TST TagType,
}
// Parse all the comma separated declarators.
- ParseCXXClassMemberDeclaration(CurAS, nullptr);
+ ParseCXXClassMemberDeclaration(CurAS, AccessAttrs);
}
Braces.consumeClose();
diff --git a/lib/Parse/ParseExpr.cpp b/lib/Parse/ParseExpr.cpp
index bc587628c954..4a0e1c5e3413 100644
--- a/lib/Parse/ParseExpr.cpp
+++ b/lib/Parse/ParseExpr.cpp
@@ -8,7 +8,7 @@
//===----------------------------------------------------------------------===//
///
/// \file
-/// \brief Provides the Expression parsing implementation.
+/// Provides the Expression parsing implementation.
///
/// Expressions in C99 basically consist of a bunch of binary operators with
/// unary operators and other random stuff at the leaves.
@@ -32,7 +32,7 @@
#include "llvm/ADT/SmallVector.h"
using namespace clang;
-/// \brief Simple precedence-based parser for binary/ternary operators.
+/// Simple precedence-based parser for binary/ternary operators.
///
/// Note: we diverge from the C99 grammar when parsing the assignment-expression
/// production. C99 specifies that the LHS of an assignment operator should be
@@ -156,7 +156,7 @@ Parser::ParseExpressionWithLeadingExtension(SourceLocation ExtLoc) {
return ParseRHSOfBinaryExpression(LHS, prec::Comma);
}
-/// \brief Parse an expr that doesn't include (top-level) commas.
+/// Parse an expr that doesn't include (top-level) commas.
ExprResult Parser::ParseAssignmentExpression(TypeCastState isTypeCast) {
if (Tok.is(tok::code_completion)) {
Actions.CodeCompleteOrdinaryName(getCurScope(), Sema::PCC_Expression);
@@ -175,7 +175,7 @@ ExprResult Parser::ParseAssignmentExpression(TypeCastState isTypeCast) {
return ParseRHSOfBinaryExpression(LHS, prec::Assignment);
}
-/// \brief Parse an assignment expression where part of an Objective-C message
+/// Parse an assignment expression where part of an Objective-C message
/// send has already been parsed.
///
/// In this case \p LBracLoc indicates the location of the '[' of the message
@@ -217,7 +217,15 @@ ExprResult Parser::ParseConstantExpression(TypeCastState isTypeCast) {
return ParseConstantExpressionInExprEvalContext(isTypeCast);
}
-/// \brief Parse a constraint-expression.
+ExprResult Parser::ParseCaseExpression(SourceLocation CaseLoc) {
+ EnterExpressionEvaluationContext ConstantEvaluated(
+ Actions, Sema::ExpressionEvaluationContext::ConstantEvaluated);
+ ExprResult LHS(ParseCastExpression(false, false, NotTypeCast));
+ ExprResult Res(ParseRHSOfBinaryExpression(LHS, prec::Conditional));
+ return Actions.ActOnCaseExpr(CaseLoc, Res);
+}
+
+/// Parse a constraint-expression.
///
/// \verbatim
/// constraint-expression: [Concepts TS temp.constr.decl p1]
@@ -246,30 +254,6 @@ bool Parser::isNotExpressionStart() {
return isKnownToBeDeclarationSpecifier();
}
-/// We've parsed something that could plausibly be intended to be a template
-/// name (\p LHS) followed by a '<' token, and the following code can't possibly
-/// be an expression. Determine if this is likely to be a template-id and if so,
-/// diagnose it.
-bool Parser::diagnoseUnknownTemplateId(ExprResult LHS, SourceLocation Less) {
- TentativeParsingAction TPA(*this);
- // FIXME: We could look at the token sequence in a lot more detail here.
- if (SkipUntil(tok::greater, tok::greatergreater, tok::greatergreatergreater,
- StopAtSemi | StopBeforeMatch)) {
- TPA.Commit();
-
- SourceLocation Greater;
- ParseGreaterThanInTemplateList(Greater, true, false);
- Actions.diagnoseExprIntendedAsTemplateName(getCurScope(), LHS,
- Less, Greater);
- return true;
- }
-
- // There's no matching '>' token, this probably isn't supposed to be
- // interpreted as a template-id. Parse it as an (ill-formed) comparison.
- TPA.Revert();
- return false;
-}
-
bool Parser::isFoldOperator(prec::Level Level) const {
return Level > prec::Unknown && Level != prec::Conditional &&
Level != prec::Spaceship;
@@ -279,7 +263,7 @@ bool Parser::isFoldOperator(tok::TokenKind Kind) const {
return isFoldOperator(getBinOpPrecedence(Kind, GreaterThanIsOperator, true));
}
-/// \brief Parse a binary expression that starts with \p LHS and has a
+/// Parse a binary expression that starts with \p LHS and has a
/// precedence of at least \p MinPrec.
ExprResult
Parser::ParseRHSOfBinaryExpression(ExprResult LHS, prec::Level MinPrec) {
@@ -302,6 +286,14 @@ Parser::ParseRHSOfBinaryExpression(ExprResult LHS, prec::Level MinPrec) {
if (OpToken.is(tok::caretcaret)) {
return ExprError(Diag(Tok, diag::err_opencl_logical_exclusive_or));
}
+
+ // If we're potentially in a template-id, we may now be able to determine
+ // whether we're actually in one or not.
+ if (OpToken.isOneOf(tok::comma, tok::greater, tok::greatergreater,
+ tok::greatergreatergreater) &&
+ checkPotentialAngleBracketDelimiter(OpToken))
+ return ExprError();
+
// Bail out when encountering a comma followed by a token which can't
// possibly be the start of an expression. For instance:
// int f() { return 1, }
@@ -313,16 +305,6 @@ Parser::ParseRHSOfBinaryExpression(ExprResult LHS, prec::Level MinPrec) {
return LHS;
}
- // If a '<' token is followed by a type that can be a template argument and
- // cannot be an expression, then this is ill-formed, but might be intended
- // to be a template-id.
- if (OpToken.is(tok::less) && Actions.mightBeIntendedToBeTemplateName(LHS) &&
- (isKnownToBeDeclarationSpecifier() ||
- Tok.isOneOf(tok::greater, tok::greatergreater,
- tok::greatergreatergreater)) &&
- diagnoseUnknownTemplateId(LHS, OpToken.getLocation()))
- return ExprError();
-
// If the next token is an ellipsis, then this is a fold-expression. Leave
// it alone so we can handle it in the paren expression.
if (isFoldOperator(NextTokPrec) && Tok.is(tok::ellipsis)) {
@@ -336,7 +318,17 @@ Parser::ParseRHSOfBinaryExpression(ExprResult LHS, prec::Level MinPrec) {
// Special case handling for the ternary operator.
ExprResult TernaryMiddle(true);
if (NextTokPrec == prec::Conditional) {
- if (Tok.isNot(tok::colon)) {
+ if (getLangOpts().CPlusPlus11 && Tok.is(tok::l_brace)) {
+ // Parse a braced-init-list here for error recovery purposes.
+ SourceLocation BraceLoc = Tok.getLocation();
+ TernaryMiddle = ParseBraceInitializer();
+ if (!TernaryMiddle.isInvalid()) {
+ Diag(BraceLoc, diag::err_init_list_bin_op)
+ << /*RHS*/ 1 << PP.getSpelling(OpToken)
+ << Actions.getExprRange(TernaryMiddle.get());
+ TernaryMiddle = ExprError();
+ }
+ } else if (Tok.isNot(tok::colon)) {
// Don't parse FOO:BAR as if it were a typo for FOO::BAR.
ColonProtectionRAIIObject X(*this);
@@ -345,11 +337,6 @@ Parser::ParseRHSOfBinaryExpression(ExprResult LHS, prec::Level MinPrec) {
// In particular, the RHS of the '?' is 'expression', not
// 'logical-OR-expression' as we might expect.
TernaryMiddle = ParseExpression();
- if (TernaryMiddle.isInvalid()) {
- Actions.CorrectDelayedTyposInExpr(LHS);
- LHS = ExprError();
- TernaryMiddle = nullptr;
- }
} else {
// Special case handling of "X ? Y : Z" where Y is empty:
// logical-OR-expression '?' ':' conditional-expression [GNU]
@@ -357,6 +344,12 @@ Parser::ParseRHSOfBinaryExpression(ExprResult LHS, prec::Level MinPrec) {
Diag(Tok, diag::ext_gnu_conditional_expr);
}
+ if (TernaryMiddle.isInvalid()) {
+ Actions.CorrectDelayedTyposInExpr(LHS);
+ LHS = ExprError();
+ TernaryMiddle = nullptr;
+ }
+
if (!TryConsumeToken(tok::colon, ColonLoc)) {
// Otherwise, we're missing a ':'. Assume that this was a typo that
// the user forgot. If we're not in a macro expansion, we can suggest
@@ -469,6 +462,11 @@ Parser::ParseRHSOfBinaryExpression(ExprResult LHS, prec::Level MinPrec) {
if (ThisPrec == prec::Assignment) {
Diag(OpToken, diag::warn_cxx98_compat_generalized_initializer_lists)
<< Actions.getExprRange(RHS.get());
+ } else if (ColonLoc.isValid()) {
+ Diag(ColonLoc, diag::err_init_list_bin_op)
+ << /*RHS*/1 << ":"
+ << Actions.getExprRange(RHS.get());
+ LHS = ExprError();
} else {
Diag(OpToken, diag::err_init_list_bin_op)
<< /*RHS*/1 << PP.getSpelling(OpToken)
@@ -513,7 +511,7 @@ Parser::ParseRHSOfBinaryExpression(ExprResult LHS, prec::Level MinPrec) {
}
}
-/// \brief Parse a cast-expression, or, if \p isUnaryExpression is true,
+/// Parse a cast-expression, or, if \p isUnaryExpression is true,
/// parse a unary-expression.
///
/// \p isAddressOfOperand exists because an id-expression that is the
@@ -570,7 +568,7 @@ class CastExpressionIdValidator : public CorrectionCandidateCallback {
};
}
-/// \brief Parse a cast-expression, or, if \pisUnaryExpression is true, parse
+/// Parse a cast-expression, or, if \pisUnaryExpression is true, parse
/// a unary-expression.
///
/// \p isAddressOfOperand exists because an id-expression that is the operand
@@ -619,6 +617,8 @@ class CastExpressionIdValidator : public CorrectionCandidateCallback {
/// [GNU] '__FUNCTION__'
/// [MS] '__FUNCDNAME__'
/// [MS] 'L__FUNCTION__'
+/// [MS] '__FUNCSIG__'
+/// [MS] 'L__FUNCSIG__'
/// [GNU] '__PRETTY_FUNCTION__'
/// [GNU] '(' compound-statement ')'
/// [GNU] '__builtin_va_arg' '(' assignment-expression ',' type-name ')'
@@ -820,6 +820,8 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
assert(Res.get() == nullptr && "Stray primary-expression annotation?");
Res = getExprAnnotation(Tok);
ConsumeAnnotationToken();
+ if (!Res.isInvalid() && Tok.is(tok::less))
+ checkPotentialAngleBracket(Res);
break;
case tok::kw___super:
@@ -1000,7 +1002,7 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
DS.SetTypeSpecType(TST_typename, ILoc, PrevSpec, DiagID, Typ,
Actions.getASTContext().getPrintingPolicy());
- Declarator DeclaratorInfo(DS, Declarator::TypeNameContext);
+ Declarator DeclaratorInfo(DS, DeclaratorContext::TypeNameContext);
TypeResult Ty = Actions.ActOnTypeName(getCurScope(),
DeclaratorInfo);
if (Ty.isInvalid())
@@ -1039,11 +1041,13 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
isAddressOfOperand, std::move(Validator),
/*IsInlineAsmIdentifier=*/false,
Tok.is(tok::r_paren) ? nullptr : &Replacement);
- if (!Res.isInvalid() && !Res.get()) {
+ if (!Res.isInvalid() && Res.isUnset()) {
UnconsumeToken(Replacement);
return ParseCastExpression(isUnaryExpression, isAddressOfOperand,
NotCastExpr, isTypeCast);
}
+ if (!Res.isInvalid() && Tok.is(tok::less))
+ checkPotentialAngleBracket(Res);
break;
}
case tok::char_constant: // constant: character-constant
@@ -1059,6 +1063,7 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
case tok::kw___FUNCDNAME__: // primary-expression: __FUNCDNAME__ [MS]
case tok::kw___FUNCSIG__: // primary-expression: __FUNCSIG__ [MS]
case tok::kw_L__FUNCTION__: // primary-expression: L__FUNCTION__ [MS]
+ case tok::kw_L__FUNCSIG__: // primary-expression: L__FUNCSIG__ [MS]
case tok::kw___PRETTY_FUNCTION__: // primary-expression: __P..Y_F..N__ [GNU]
Res = Actions.ActOnPredefinedExpr(Tok.getLocation(), SavedKind);
ConsumeToken();
@@ -1209,7 +1214,7 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
PrevSpec, DiagID, Type,
Actions.getASTContext().getPrintingPolicy());
- Declarator DeclaratorInfo(DS, Declarator::TypeNameContext);
+ Declarator DeclaratorInfo(DS, DeclaratorContext::TypeNameContext);
TypeResult Ty = Actions.ActOnTypeName(getCurScope(), DeclaratorInfo);
if (Ty.isInvalid())
break;
@@ -1224,6 +1229,7 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
case tok::annot_decltype:
case tok::kw_char:
case tok::kw_wchar_t:
+ case tok::kw_char8_t:
case tok::kw_char16_t:
case tok::kw_char32_t:
case tok::kw_bool:
@@ -1449,7 +1455,7 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
return Res;
}
-/// \brief Once the leading part of a postfix-expression is parsed, this
+/// Once the leading part of a postfix-expression is parsed, this
/// method parses any suffixes that apply.
///
/// \verbatim
@@ -1686,8 +1692,10 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
CXXScopeSpec SS;
ParsedType ObjectType;
bool MayBePseudoDestructor = false;
+ Expr* OrigLHS = !LHS.isInvalid() ? LHS.get() : nullptr;
+
if (getLangOpts().CPlusPlus && !LHS.isInvalid()) {
- Expr *Base = LHS.get();
+ Expr *Base = OrigLHS;
const Type* BaseType = Base->getType().getTypePtrOrNull();
if (BaseType && Tok.is(tok::l_paren) &&
(BaseType->isFunctionType() ||
@@ -1712,11 +1720,25 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
}
if (Tok.is(tok::code_completion)) {
+ tok::TokenKind CorrectedOpKind =
+ OpKind == tok::arrow ? tok::period : tok::arrow;
+ ExprResult CorrectedLHS(/*IsInvalid=*/true);
+ if (getLangOpts().CPlusPlus && OrigLHS) {
+ const bool DiagsAreSuppressed = Diags.getSuppressAllDiagnostics();
+ Diags.setSuppressAllDiagnostics(true);
+ CorrectedLHS = Actions.ActOnStartCXXMemberReference(
+ getCurScope(), OrigLHS, OpLoc, CorrectedOpKind, ObjectType,
+ MayBePseudoDestructor);
+ Diags.setSuppressAllDiagnostics(DiagsAreSuppressed);
+ }
+
+ Expr *Base = LHS.get();
+ Expr *CorrectedBase = CorrectedLHS.get();
+
// Code completion for a member access expression.
- if (Expr *Base = LHS.get())
- Actions.CodeCompleteMemberReferenceExpr(
- getCurScope(), Base, OpLoc, OpKind == tok::arrow,
- ExprStatementTokLoc == Base->getLocStart());
+ Actions.CodeCompleteMemberReferenceExpr(
+ getCurScope(), Base, CorrectedBase, OpLoc, OpKind == tok::arrow,
+ Base && ExprStatementTokLoc == Base->getLocStart());
cutOffParsing();
return ExprError();
@@ -1755,7 +1777,7 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
/*AllowConstructorName=*/
getLangOpts().MicrosoftExt,
/*AllowDeductionGuide=*/false,
- ObjectType, TemplateKWLoc, Name)) {
+ ObjectType, &TemplateKWLoc, Name)) {
(void)Actions.CorrectDelayedTyposInExpr(LHS);
LHS = ExprError();
}
@@ -1765,6 +1787,8 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
OpKind, SS, TemplateKWLoc, Name,
CurParsedObjCImpl ? CurParsedObjCImpl->Dcl
: nullptr);
+ if (!LHS.isInvalid() && Tok.is(tok::less))
+ checkPotentialAngleBracket(LHS);
break;
}
case tok::plusplus: // postfix-expression: postfix-expression '++'
@@ -1824,7 +1848,7 @@ Parser::ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok,
if (isTypeIdUnambiguously()) {
DeclSpec DS(AttrFactory);
ParseSpecifierQualifierList(DS);
- Declarator DeclaratorInfo(DS, Declarator::TypeNameContext);
+ Declarator DeclaratorInfo(DS, DeclaratorContext::TypeNameContext);
ParseDeclarator(DeclaratorInfo);
SourceLocation LParenLoc = PP.getLocForEndOfToken(OpTok.getLocation());
@@ -1881,7 +1905,7 @@ Parser::ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok,
}
-/// \brief Parse a sizeof or alignof expression.
+/// Parse a sizeof or alignof expression.
///
/// \verbatim
/// unary-expression: [C99 6.5.3]
@@ -2381,7 +2405,7 @@ Parser::ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr,
// Parse the type declarator.
DeclSpec DS(AttrFactory);
ParseSpecifierQualifierList(DS);
- Declarator DeclaratorInfo(DS, Declarator::TypeNameContext);
+ Declarator DeclaratorInfo(DS, DeclaratorContext::TypeNameContext);
ParseDeclarator(DeclaratorInfo);
// If our type is followed by an identifier and either ':' or ']', then
@@ -2694,7 +2718,7 @@ ExprResult Parser::ParseGenericSelectionExpression() {
Types, Exprs);
}
-/// \brief Parse A C++1z fold-expression after the opening paren and optional
+/// Parse A C++1z fold-expression after the opening paren and optional
/// left-hand-side expression.
///
/// \verbatim
@@ -2802,7 +2826,10 @@ bool Parser::ParseExpressionList(SmallVectorImpl<Expr *> &Exprs,
if (Tok.isNot(tok::comma))
break;
// Move to the next argument, remember where the comma was.
+ Token Comma = Tok;
CommaLocs.push_back(ConsumeToken());
+
+ checkPotentialAngleBracketDelimiter(Comma);
}
if (SawError) {
// Ensure typos get diagnosed when errors were encountered while parsing the
@@ -2837,7 +2864,10 @@ Parser::ParseSimpleExpressionList(SmallVectorImpl<Expr*> &Exprs,
return false;
// Move to the next argument, remember where the comma was.
+ Token Comma = Tok;
CommaLocs.push_back(ConsumeToken());
+
+ checkPotentialAngleBracketDelimiter(Comma);
}
}
@@ -2858,7 +2888,7 @@ void Parser::ParseBlockId(SourceLocation CaretLoc) {
ParseSpecifierQualifierList(DS);
// Parse the block-declarator.
- Declarator DeclaratorInfo(DS, Declarator::BlockLiteralContext);
+ Declarator DeclaratorInfo(DS, DeclaratorContext::BlockLiteralContext);
DeclaratorInfo.setFunctionDefinitionKind(FDK_Definition);
ParseDeclarator(DeclaratorInfo);
@@ -2897,7 +2927,7 @@ ExprResult Parser::ParseBlockLiteralExpression() {
// Parse the return type if present.
DeclSpec DS(AttrFactory);
- Declarator ParamInfo(DS, Declarator::BlockLiteralContext);
+ Declarator ParamInfo(DS, DeclaratorContext::BlockLiteralContext);
ParamInfo.setFunctionDefinitionKind(FDK_Definition);
// FIXME: Since the return type isn't actually parsed, it can't be used to
// fill ParamInfo with an initial valid range, so do it manually.
@@ -2929,33 +2959,31 @@ ExprResult Parser::ParseBlockLiteralExpression() {
ParseBlockId(CaretLoc);
} else {
// Otherwise, pretend we saw (void).
- ParsedAttributes attrs(AttrFactory);
SourceLocation NoLoc;
- ParamInfo.AddTypeInfo(DeclaratorChunk::getFunction(/*HasProto=*/true,
- /*IsAmbiguous=*/false,
- /*RParenLoc=*/NoLoc,
- /*ArgInfo=*/nullptr,
- /*NumArgs=*/0,
- /*EllipsisLoc=*/NoLoc,
- /*RParenLoc=*/NoLoc,
- /*TypeQuals=*/0,
- /*RefQualifierIsLvalueRef=*/true,
- /*RefQualifierLoc=*/NoLoc,
- /*ConstQualifierLoc=*/NoLoc,
- /*VolatileQualifierLoc=*/NoLoc,
- /*RestrictQualifierLoc=*/NoLoc,
- /*MutableLoc=*/NoLoc,
- EST_None,
- /*ESpecRange=*/SourceRange(),
- /*Exceptions=*/nullptr,
- /*ExceptionRanges=*/nullptr,
- /*NumExceptions=*/0,
- /*NoexceptExpr=*/nullptr,
- /*ExceptionSpecTokens=*/nullptr,
- /*DeclsInPrototype=*/None,
- CaretLoc, CaretLoc,
- ParamInfo),
- attrs, CaretLoc);
+ ParamInfo.AddTypeInfo(
+ DeclaratorChunk::getFunction(/*HasProto=*/true,
+ /*IsAmbiguous=*/false,
+ /*RParenLoc=*/NoLoc,
+ /*ArgInfo=*/nullptr,
+ /*NumArgs=*/0,
+ /*EllipsisLoc=*/NoLoc,
+ /*RParenLoc=*/NoLoc,
+ /*TypeQuals=*/0,
+ /*RefQualifierIsLvalueRef=*/true,
+ /*RefQualifierLoc=*/NoLoc,
+ /*ConstQualifierLoc=*/NoLoc,
+ /*VolatileQualifierLoc=*/NoLoc,
+ /*RestrictQualifierLoc=*/NoLoc,
+ /*MutableLoc=*/NoLoc, EST_None,
+ /*ESpecRange=*/SourceRange(),
+ /*Exceptions=*/nullptr,
+ /*ExceptionRanges=*/nullptr,
+ /*NumExceptions=*/0,
+ /*NoexceptExpr=*/nullptr,
+ /*ExceptionSpecTokens=*/nullptr,
+ /*DeclsInPrototype=*/None, CaretLoc,
+ CaretLoc, ParamInfo),
+ CaretLoc);
MaybeParseGNUAttributes(ParamInfo);
diff --git a/lib/Parse/ParseExprCXX.cpp b/lib/Parse/ParseExprCXX.cpp
index 959cb7a61d3a..26e75999518a 100644
--- a/lib/Parse/ParseExprCXX.cpp
+++ b/lib/Parse/ParseExprCXX.cpp
@@ -100,7 +100,7 @@ void Parser::CheckForTemplateAndDigraph(Token &Next, ParsedType ObjectType,
/*AtDigraph*/false);
}
-/// \brief Parse global scope or nested-name-specifier if present.
+/// Parse global scope or nested-name-specifier if present.
///
/// Parses a C++ global scope specifier ('::') or nested-name-specifier (which
/// may be preceded by '::'). Note that this routine will not parse ::new or
@@ -292,8 +292,8 @@ bool Parser::ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
break;
}
- if (TemplateName.getKind() != UnqualifiedId::IK_OperatorFunctionId &&
- TemplateName.getKind() != UnqualifiedId::IK_LiteralOperatorId) {
+ if (TemplateName.getKind() != UnqualifiedIdKind::IK_OperatorFunctionId &&
+ TemplateName.getKind() != UnqualifiedIdKind::IK_LiteralOperatorId) {
Diag(TemplateName.getSourceRange().getBegin(),
diag::err_id_after_template_in_nested_name_spec)
<< TemplateName.getSourceRange();
@@ -515,7 +515,7 @@ bool Parser::ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
<< FixItHint::CreateInsertion(Tok.getLocation(), "template ");
if (TemplateNameKind TNK = Actions.ActOnDependentTemplateName(
- getCurScope(), SS, SourceLocation(), TemplateName, ObjectType,
+ getCurScope(), SS, Tok.getLocation(), TemplateName, ObjectType,
EnteringContext, Template, /*AllowInjectedClassName*/ true)) {
// Consume the identifier.
ConsumeToken();
@@ -553,7 +553,7 @@ ExprResult Parser::tryParseCXXIdExpression(CXXScopeSpec &SS, bool isAddressOfOpe
/*AllowDestructorName=*/false,
/*AllowConstructorName=*/false,
/*AllowDeductionGuide=*/false,
- /*ObjectType=*/nullptr, TemplateKWLoc, Name))
+ /*ObjectType=*/nullptr, &TemplateKWLoc, Name))
return ExprError();
// This is only the direct operand of an & operator if it is not
@@ -561,10 +561,13 @@ ExprResult Parser::tryParseCXXIdExpression(CXXScopeSpec &SS, bool isAddressOfOpe
if (isAddressOfOperand && isPostfixExpressionSuffixStart())
isAddressOfOperand = false;
- return Actions.ActOnIdExpression(getCurScope(), SS, TemplateKWLoc, Name,
- Tok.is(tok::l_paren), isAddressOfOperand,
- nullptr, /*IsInlineAsmIdentifier=*/false,
- &Replacement);
+ ExprResult E = Actions.ActOnIdExpression(
+ getCurScope(), SS, TemplateKWLoc, Name, Tok.is(tok::l_paren),
+ isAddressOfOperand, nullptr, /*IsInlineAsmIdentifier=*/false,
+ &Replacement);
+ if (!E.isInvalid() && !E.isUnset() && Tok.is(tok::less))
+ checkPotentialAngleBracket(E);
+ return E;
}
/// ParseCXXIdExpression - Handle id-expression.
@@ -730,7 +733,7 @@ ExprResult Parser::TryParseLambdaExpression() {
return ParseLambdaExpressionAfterIntroducer(Intro);
}
-/// \brief Parse a lambda introducer.
+/// Parse a lambda introducer.
/// \param Intro A LambdaIntroducer filled in with information about the
/// contents of the lambda-introducer.
/// \param SkippedInits If non-null, we are disambiguating between an Obj-C
@@ -805,6 +808,7 @@ Optional<unsigned> Parser::ParseLambdaIntroducer(LambdaIntroducer &Intro,
IdentifierInfo *Id = nullptr;
SourceLocation EllipsisLoc;
ExprResult Init;
+ SourceLocation LocStart = Tok.getLocation();
if (Tok.is(tok::star)) {
Loc = ConsumeToken();
@@ -978,8 +982,11 @@ Optional<unsigned> Parser::ParseLambdaIntroducer(LambdaIntroducer &Intro,
Loc, Kind == LCK_ByRef, Id, InitKind, InitExpr);
Init = InitExpr;
}
+
+ SourceLocation LocEnd = PrevTokLocation;
+
Intro.addCapture(Kind, Loc, Id, EllipsisLoc, InitKind, Init,
- InitCaptureType);
+ InitCaptureType, SourceRange(LocStart, LocEnd));
}
T.consumeClose();
@@ -1090,7 +1097,7 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
// Parse lambda-declarator[opt].
DeclSpec DS(AttrFactory);
- Declarator D(DS, Declarator::LambdaExprContext);
+ Declarator D(DS, DeclaratorContext::LambdaExprContext);
TemplateParameterDepthRAII CurTemplateDepthTracker(TemplateParameterDepth);
Actions.PushLambdaScope();
@@ -1106,12 +1113,12 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
// after '(...)'. nvcc doesn't accept this.
auto WarnIfHasCUDATargetAttr = [&] {
if (getLangOpts().CUDA)
- for (auto *A = Attr.getList(); A != nullptr; A = A->getNext())
- if (A->getKind() == AttributeList::AT_CUDADevice ||
- A->getKind() == AttributeList::AT_CUDAHost ||
- A->getKind() == AttributeList::AT_CUDAGlobal)
- Diag(A->getLoc(), diag::warn_cuda_attr_lambda_position)
- << A->getName()->getName();
+ for (const ParsedAttr &A : Attr)
+ if (A.getKind() == ParsedAttr::AT_CUDADevice ||
+ A.getKind() == ParsedAttr::AT_CUDAHost ||
+ A.getKind() == ParsedAttr::AT_CUDAGlobal)
+ Diag(A.getLoc(), diag::warn_cuda_attr_lambda_position)
+ << A.getName()->getName();
};
TypeResult TrailingReturnType;
@@ -1183,7 +1190,8 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
if (Tok.is(tok::arrow)) {
FunLocalRangeEnd = Tok.getLocation();
SourceRange Range;
- TrailingReturnType = ParseTrailingReturnType(Range);
+ TrailingReturnType =
+ ParseTrailingReturnType(Range, /*MayBeFollowedByDirectInit*/ false);
if (Range.getEnd().isValid())
DeclEndLoc = Range.getEnd();
}
@@ -1193,29 +1201,23 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
WarnIfHasCUDATargetAttr();
SourceLocation NoLoc;
- D.AddTypeInfo(DeclaratorChunk::getFunction(/*hasProto=*/true,
- /*isAmbiguous=*/false,
- LParenLoc,
- ParamInfo.data(), ParamInfo.size(),
- EllipsisLoc, RParenLoc,
- DS.getTypeQualifiers(),
- /*RefQualifierIsLValueRef=*/true,
- /*RefQualifierLoc=*/NoLoc,
- /*ConstQualifierLoc=*/NoLoc,
- /*VolatileQualifierLoc=*/NoLoc,
- /*RestrictQualifierLoc=*/NoLoc,
- MutableLoc,
- ESpecType, ESpecRange,
- DynamicExceptions.data(),
- DynamicExceptionRanges.data(),
- DynamicExceptions.size(),
- NoexceptExpr.isUsable() ?
- NoexceptExpr.get() : nullptr,
- /*ExceptionSpecTokens*/nullptr,
- /*DeclsInPrototype=*/None,
- LParenLoc, FunLocalRangeEnd, D,
- TrailingReturnType),
- Attr, DeclEndLoc);
+ D.AddTypeInfo(DeclaratorChunk::getFunction(
+ /*hasProto=*/true,
+ /*isAmbiguous=*/false, LParenLoc, ParamInfo.data(),
+ ParamInfo.size(), EllipsisLoc, RParenLoc,
+ DS.getTypeQualifiers(),
+ /*RefQualifierIsLValueRef=*/true,
+ /*RefQualifierLoc=*/NoLoc,
+ /*ConstQualifierLoc=*/NoLoc,
+ /*VolatileQualifierLoc=*/NoLoc,
+ /*RestrictQualifierLoc=*/NoLoc, MutableLoc, ESpecType,
+ ESpecRange, DynamicExceptions.data(),
+ DynamicExceptionRanges.data(), DynamicExceptions.size(),
+ NoexceptExpr.isUsable() ? NoexceptExpr.get() : nullptr,
+ /*ExceptionSpecTokens*/ nullptr,
+ /*DeclsInPrototype=*/None, LParenLoc, FunLocalRangeEnd, D,
+ TrailingReturnType),
+ std::move(Attr), DeclEndLoc);
} else if (Tok.isOneOf(tok::kw_mutable, tok::arrow, tok::kw___attribute,
tok::kw_constexpr) ||
(Tok.is(tok::l_square) && NextToken().is(tok::l_square))) {
@@ -1253,7 +1255,8 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
// Parse the return type, if there is one.
if (Tok.is(tok::arrow)) {
SourceRange Range;
- TrailingReturnType = ParseTrailingReturnType(Range);
+ TrailingReturnType =
+ ParseTrailingReturnType(Range, /*MayBeFollowedByDirectInit*/ false);
if (Range.getEnd().isValid())
DeclEndLoc = Range.getEnd();
}
@@ -1261,31 +1264,29 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
WarnIfHasCUDATargetAttr();
SourceLocation NoLoc;
- D.AddTypeInfo(DeclaratorChunk::getFunction(/*hasProto=*/true,
- /*isAmbiguous=*/false,
- /*LParenLoc=*/NoLoc,
- /*Params=*/nullptr,
- /*NumParams=*/0,
- /*EllipsisLoc=*/NoLoc,
- /*RParenLoc=*/NoLoc,
- /*TypeQuals=*/0,
- /*RefQualifierIsLValueRef=*/true,
- /*RefQualifierLoc=*/NoLoc,
- /*ConstQualifierLoc=*/NoLoc,
- /*VolatileQualifierLoc=*/NoLoc,
- /*RestrictQualifierLoc=*/NoLoc,
- MutableLoc,
- EST_None,
- /*ESpecRange=*/SourceRange(),
- /*Exceptions=*/nullptr,
- /*ExceptionRanges=*/nullptr,
- /*NumExceptions=*/0,
- /*NoexceptExpr=*/nullptr,
- /*ExceptionSpecTokens=*/nullptr,
- /*DeclsInPrototype=*/None,
- DeclLoc, DeclEndLoc, D,
- TrailingReturnType),
- Attr, DeclEndLoc);
+ D.AddTypeInfo(DeclaratorChunk::getFunction(
+ /*hasProto=*/true,
+ /*isAmbiguous=*/false,
+ /*LParenLoc=*/NoLoc,
+ /*Params=*/nullptr,
+ /*NumParams=*/0,
+ /*EllipsisLoc=*/NoLoc,
+ /*RParenLoc=*/NoLoc,
+ /*TypeQuals=*/0,
+ /*RefQualifierIsLValueRef=*/true,
+ /*RefQualifierLoc=*/NoLoc,
+ /*ConstQualifierLoc=*/NoLoc,
+ /*VolatileQualifierLoc=*/NoLoc,
+ /*RestrictQualifierLoc=*/NoLoc, MutableLoc, EST_None,
+ /*ESpecRange=*/SourceRange(),
+ /*Exceptions=*/nullptr,
+ /*ExceptionRanges=*/nullptr,
+ /*NumExceptions=*/0,
+ /*NoexceptExpr=*/nullptr,
+ /*ExceptionSpecTokens=*/nullptr,
+ /*DeclsInPrototype=*/None, DeclLoc, DeclEndLoc, D,
+ TrailingReturnType),
+ std::move(Attr), DeclEndLoc);
}
// FIXME: Rename BlockScope -> ClosureScope if we decide to continue using
@@ -1353,7 +1354,7 @@ ExprResult Parser::ParseCXXCasts() {
ParseSpecifierQualifierList(DS);
// Parse the abstract-declarator, if present.
- Declarator DeclaratorInfo(DS, Declarator::TypeNameContext);
+ Declarator DeclaratorInfo(DS, DeclaratorContext::TypeNameContext);
ParseDeclarator(DeclaratorInfo);
SourceLocation RAngleBracketLoc = Tok.getLocation();
@@ -1498,7 +1499,7 @@ ExprResult Parser::ParseCXXUuidof() {
return Result;
}
-/// \brief Parse a C++ pseudo-destructor expression after the base,
+/// Parse a C++ pseudo-destructor expression after the base,
/// . or -> operator, and nested-name-specifier have already been
/// parsed.
///
@@ -1619,7 +1620,7 @@ ExprResult Parser::ParseThrowExpression() {
}
}
-/// \brief Parse the C++ Coroutines co_yield expression.
+/// Parse the C++ Coroutines co_yield expression.
///
/// co_yield-expression:
/// 'co_yield' assignment-expression[opt]
@@ -1660,7 +1661,7 @@ ExprResult Parser::ParseCXXThis() {
/// In C++1z onwards, the type specifier can also be a template-name.
ExprResult
Parser::ParseCXXTypeConstructExpression(const DeclSpec &DS) {
- Declarator DeclaratorInfo(DS, Declarator::FunctionalCastContext);
+ Declarator DeclaratorInfo(DS, DeclaratorContext::FunctionalCastContext);
ParsedType TypeRep = Actions.ActOnTypeName(getCurScope(), DeclaratorInfo).get();
assert((Tok.is(tok::l_paren) ||
@@ -1672,9 +1673,9 @@ Parser::ParseCXXTypeConstructExpression(const DeclSpec &DS) {
if (Init.isInvalid())
return Init;
Expr *InitList = Init.get();
- return Actions.ActOnCXXTypeConstructExpr(TypeRep, SourceLocation(),
- MultiExprArg(&InitList, 1),
- SourceLocation());
+ return Actions.ActOnCXXTypeConstructExpr(
+ TypeRep, InitList->getLocStart(), MultiExprArg(&InitList, 1),
+ InitList->getLocEnd(), /*ListInitialization=*/true);
} else {
BalancedDelimiterTracker T(*this, tok::l_paren);
T.consumeOpen();
@@ -1702,9 +1703,9 @@ Parser::ParseCXXTypeConstructExpression(const DeclSpec &DS) {
assert((Exprs.size() == 0 || Exprs.size()-1 == CommaLocs.size())&&
"Unexpected number of commas!");
- return Actions.ActOnCXXTypeConstructExpr(TypeRep, T.getOpenLocation(),
- Exprs,
- T.getCloseLocation());
+ return Actions.ActOnCXXTypeConstructExpr(TypeRep, T.getOpenLocation(),
+ Exprs, T.getCloseLocation(),
+ /*ListInitialization=*/false);
}
}
@@ -1733,6 +1734,8 @@ Parser::ParseCXXTypeConstructExpression(const DeclSpec &DS) {
Sema::ConditionResult Parser::ParseCXXCondition(StmtResult *InitStmt,
SourceLocation Loc,
Sema::ConditionKind CK) {
+ ParenBraceBracketBalancer BalancerRAIIObj(*this);
+
if (Tok.is(tok::code_completion)) {
Actions.CodeCompleteOrdinaryName(getCurScope(), Sema::PCC_Condition);
cutOffParsing();
@@ -1742,17 +1745,34 @@ Sema::ConditionResult Parser::ParseCXXCondition(StmtResult *InitStmt,
ParsedAttributesWithRange attrs(AttrFactory);
MaybeParseCXX11Attributes(attrs);
+ const auto WarnOnInit = [this, &CK] {
+ Diag(Tok.getLocation(), getLangOpts().CPlusPlus17
+ ? diag::warn_cxx14_compat_init_statement
+ : diag::ext_init_statement)
+ << (CK == Sema::ConditionKind::Switch);
+ };
+
// Determine what kind of thing we have.
switch (isCXXConditionDeclarationOrInitStatement(InitStmt)) {
case ConditionOrInitStatement::Expression: {
ProhibitAttributes(attrs);
+ // We can have an empty expression here.
+ // if (; true);
+ if (InitStmt && Tok.is(tok::semi)) {
+ WarnOnInit();
+ SourceLocation SemiLoc = ConsumeToken();
+ *InitStmt = Actions.ActOnNullStmt(SemiLoc);
+ return ParseCXXCondition(nullptr, Loc, CK);
+ }
+
// Parse the expression.
ExprResult Expr = ParseExpression(); // expression
if (Expr.isInvalid())
return Sema::ConditionError();
if (InitStmt && Tok.is(tok::semi)) {
+ WarnOnInit();
*InitStmt = Actions.ActOnExprStmt(Expr.get());
ConsumeToken();
return ParseCXXCondition(nullptr, Loc, CK);
@@ -1762,13 +1782,11 @@ Sema::ConditionResult Parser::ParseCXXCondition(StmtResult *InitStmt,
}
case ConditionOrInitStatement::InitStmtDecl: {
- Diag(Tok.getLocation(), getLangOpts().CPlusPlus17
- ? diag::warn_cxx14_compat_init_statement
- : diag::ext_init_statement)
- << (CK == Sema::ConditionKind::Switch);
+ WarnOnInit();
SourceLocation DeclStart = Tok.getLocation(), DeclEnd;
- DeclGroupPtrTy DG = ParseSimpleDeclaration(
- Declarator::InitStmtContext, DeclEnd, attrs, /*RequireSemi=*/true);
+ DeclGroupPtrTy DG =
+ ParseSimpleDeclaration(DeclaratorContext::InitStmtContext, DeclEnd,
+ attrs, /*RequireSemi=*/true);
*InitStmt = Actions.ActOnDeclStmt(DG, DeclStart, DeclEnd);
return ParseCXXCondition(nullptr, Loc, CK);
}
@@ -1781,10 +1799,10 @@ Sema::ConditionResult Parser::ParseCXXCondition(StmtResult *InitStmt,
// type-specifier-seq
DeclSpec DS(AttrFactory);
DS.takeAttributesFrom(attrs);
- ParseSpecifierQualifierList(DS, AS_none, DSC_condition);
+ ParseSpecifierQualifierList(DS, AS_none, DeclSpecContext::DSC_condition);
// declarator
- Declarator DeclaratorInfo(DS, Declarator::ConditionContext);
+ Declarator DeclaratorInfo(DS, DeclaratorContext::ConditionContext);
ParseDeclarator(DeclaratorInfo);
// simple-asm-expr[opt]
@@ -1945,6 +1963,9 @@ void Parser::ParseCXXSimpleTypeSpecifier(DeclSpec &DS) {
case tok::kw_wchar_t:
DS.SetTypeSpecType(DeclSpec::TST_wchar, Loc, PrevSpec, DiagID, Policy);
break;
+ case tok::kw_char8_t:
+ DS.SetTypeSpecType(DeclSpec::TST_char8, Loc, PrevSpec, DiagID, Policy);
+ break;
case tok::kw_char16_t:
DS.SetTypeSpecType(DeclSpec::TST_char16, Loc, PrevSpec, DiagID, Policy);
break;
@@ -1982,12 +2003,12 @@ void Parser::ParseCXXSimpleTypeSpecifier(DeclSpec &DS) {
/// type-specifier type-specifier-seq[opt]
///
bool Parser::ParseCXXTypeSpecifierSeq(DeclSpec &DS) {
- ParseSpecifierQualifierList(DS, AS_none, DSC_type_specifier);
+ ParseSpecifierQualifierList(DS, AS_none, DeclSpecContext::DSC_type_specifier);
DS.Finish(Actions, Actions.getASTContext().getPrintingPolicy());
return false;
}
-/// \brief Finish parsing a C++ unqualified-id that is a template-id of
+/// Finish parsing a C++ unqualified-id that is a template-id of
/// some form.
///
/// This routine is invoked when a '<' is encountered after an identifier or
@@ -2027,15 +2048,14 @@ bool Parser::ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS,
ParsedType ObjectType,
UnqualifiedId &Id,
bool AssumeTemplateId) {
- assert((AssumeTemplateId || Tok.is(tok::less)) &&
- "Expected '<' to finish parsing a template-id");
-
+ assert(Tok.is(tok::less) && "Expected '<' to finish parsing a template-id");
+
TemplateTy Template;
TemplateNameKind TNK = TNK_Non_template;
switch (Id.getKind()) {
- case UnqualifiedId::IK_Identifier:
- case UnqualifiedId::IK_OperatorFunctionId:
- case UnqualifiedId::IK_LiteralOperatorId:
+ case UnqualifiedIdKind::IK_Identifier:
+ case UnqualifiedIdKind::IK_OperatorFunctionId:
+ case UnqualifiedIdKind::IK_LiteralOperatorId:
if (AssumeTemplateId) {
// We defer the injected-class-name checks until we've found whether
// this template-id is used to form a nested-name-specifier or not.
@@ -2058,11 +2078,11 @@ bool Parser::ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS,
// parse correctly as a template, so suggest the keyword 'template'
// before 'getAs' and treat this as a dependent template name.
std::string Name;
- if (Id.getKind() == UnqualifiedId::IK_Identifier)
+ if (Id.getKind() == UnqualifiedIdKind::IK_Identifier)
Name = Id.Identifier->getName();
else {
Name = "operator ";
- if (Id.getKind() == UnqualifiedId::IK_OperatorFunctionId)
+ if (Id.getKind() == UnqualifiedIdKind::IK_OperatorFunctionId)
Name += getOperatorSpelling(Id.OperatorFunctionId.Operator);
else
Name += Id.Identifier->getName();
@@ -2079,7 +2099,7 @@ bool Parser::ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS,
}
break;
- case UnqualifiedId::IK_ConstructorName: {
+ case UnqualifiedIdKind::IK_ConstructorName: {
UnqualifiedId TemplateName;
bool MemberOfUnknownSpecialization;
TemplateName.setIdentifier(Name, NameLoc);
@@ -2090,7 +2110,7 @@ bool Parser::ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS,
break;
}
- case UnqualifiedId::IK_DestructorName: {
+ case UnqualifiedIdKind::IK_DestructorName: {
UnqualifiedId TemplateName;
bool MemberOfUnknownSpecialization;
TemplateName.setIdentifier(Name, NameLoc);
@@ -2125,22 +2145,24 @@ bool Parser::ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS,
// Parse the enclosed template argument list.
SourceLocation LAngleLoc, RAngleLoc;
TemplateArgList TemplateArgs;
- if (Tok.is(tok::less) && ParseTemplateIdAfterTemplateName(
- true, LAngleLoc, TemplateArgs, RAngleLoc))
+ if (ParseTemplateIdAfterTemplateName(true, LAngleLoc, TemplateArgs,
+ RAngleLoc))
return true;
-
- if (Id.getKind() == UnqualifiedId::IK_Identifier ||
- Id.getKind() == UnqualifiedId::IK_OperatorFunctionId ||
- Id.getKind() == UnqualifiedId::IK_LiteralOperatorId) {
+
+ if (Id.getKind() == UnqualifiedIdKind::IK_Identifier ||
+ Id.getKind() == UnqualifiedIdKind::IK_OperatorFunctionId ||
+ Id.getKind() == UnqualifiedIdKind::IK_LiteralOperatorId) {
// Form a parsed representation of the template-id to be stored in the
// UnqualifiedId.
// FIXME: Store name for literal operator too.
IdentifierInfo *TemplateII =
- Id.getKind() == UnqualifiedId::IK_Identifier ? Id.Identifier : nullptr;
- OverloadedOperatorKind OpKind = Id.getKind() == UnqualifiedId::IK_Identifier
- ? OO_None
- : Id.OperatorFunctionId.Operator;
+ Id.getKind() == UnqualifiedIdKind::IK_Identifier ? Id.Identifier
+ : nullptr;
+ OverloadedOperatorKind OpKind =
+ Id.getKind() == UnqualifiedIdKind::IK_Identifier
+ ? OO_None
+ : Id.OperatorFunctionId.Operator;
TemplateIdAnnotation *TemplateId = TemplateIdAnnotation::Create(
SS, TemplateKWLoc, Id.StartLocation, TemplateII, OpKind, Template, TNK,
@@ -2162,7 +2184,7 @@ bool Parser::ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS,
if (Type.isInvalid())
return true;
- if (Id.getKind() == UnqualifiedId::IK_ConstructorName)
+ if (Id.getKind() == UnqualifiedIdKind::IK_ConstructorName)
Id.setConstructorName(Type.get(), NameLoc, RAngleLoc);
else
Id.setDestructorName(Id.StartLocation, Type.get(), RAngleLoc);
@@ -2170,7 +2192,7 @@ bool Parser::ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS,
return false;
}
-/// \brief Parse an operator-function-id or conversion-function-id as part
+/// Parse an operator-function-id or conversion-function-id as part
/// of a C++ unqualified-id.
///
/// This routine is responsible only for parsing the operator-function-id or
@@ -2395,7 +2417,7 @@ bool Parser::ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext,
// Parse the conversion-declarator, which is merely a sequence of
// ptr-operators.
- Declarator D(DS, Declarator::ConversionIdContext);
+ Declarator D(DS, DeclaratorContext::ConversionIdContext);
ParseDeclaratorInternal(D, /*DirectDeclParser=*/nullptr);
// Finish up the type.
@@ -2409,7 +2431,7 @@ bool Parser::ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext,
return false;
}
-/// \brief Parse a C++ unqualified-id (or a C identifier), which describes the
+/// Parse a C++ unqualified-id (or a C identifier), which describes the
/// name of an entity.
///
/// \code
@@ -2446,16 +2468,23 @@ bool Parser::ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext,
bool AllowConstructorName,
bool AllowDeductionGuide,
ParsedType ObjectType,
- SourceLocation& TemplateKWLoc,
+ SourceLocation *TemplateKWLoc,
UnqualifiedId &Result) {
+ if (TemplateKWLoc)
+ *TemplateKWLoc = SourceLocation();
// Handle 'A::template B'. This is for template-ids which have not
// already been annotated by ParseOptionalCXXScopeSpecifier().
bool TemplateSpecified = false;
- if (getLangOpts().CPlusPlus && Tok.is(tok::kw_template) &&
- (ObjectType || SS.isSet())) {
- TemplateSpecified = true;
- TemplateKWLoc = ConsumeToken();
+ if (Tok.is(tok::kw_template)) {
+ if (TemplateKWLoc && (ObjectType || SS.isSet())) {
+ TemplateSpecified = true;
+ *TemplateKWLoc = ConsumeToken();
+ } else {
+ SourceLocation TemplateLoc = ConsumeToken();
+ Diag(TemplateLoc, diag::err_unexpected_template_in_unqualified_id)
+ << FixItHint::CreateRemoval(TemplateLoc);
+ }
}
// unqualified-id:
@@ -2477,10 +2506,10 @@ bool Parser::ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext,
if (AllowConstructorName &&
Actions.isCurrentClassName(*Id, getCurScope(), &SS)) {
// We have parsed a constructor name.
- ParsedType Ty = Actions.getTypeName(*Id, IdLoc, getCurScope(), &SS, false,
- false, nullptr,
- /*IsCtorOrDtorName=*/true,
- /*NonTrivialTypeSourceInfo=*/true);
+ ParsedType Ty = Actions.getConstructorName(*Id, IdLoc, getCurScope(), SS,
+ EnteringContext);
+ if (!Ty)
+ return true;
Result.setConstructorName(Ty, IdLoc, IdLoc);
} else if (getLangOpts().CPlusPlus17 &&
AllowDeductionGuide && SS.isEmpty() &&
@@ -2494,11 +2523,18 @@ bool Parser::ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext,
}
// If the next token is a '<', we may have a template.
- if (TemplateSpecified || Tok.is(tok::less))
- return ParseUnqualifiedIdTemplateId(SS, TemplateKWLoc, Id, IdLoc,
- EnteringContext, ObjectType,
- Result, TemplateSpecified);
-
+ TemplateTy Template;
+ if (Tok.is(tok::less))
+ return ParseUnqualifiedIdTemplateId(
+ SS, TemplateKWLoc ? *TemplateKWLoc : SourceLocation(), Id, IdLoc,
+ EnteringContext, ObjectType, Result, TemplateSpecified);
+ else if (TemplateSpecified &&
+ Actions.ActOnDependentTemplateName(
+ getCurScope(), SS, *TemplateKWLoc, Result, ObjectType,
+ EnteringContext, Template,
+ /*AllowInjectedClassName*/ true) == TNK_Non_template)
+ return true;
+
return false;
}
@@ -2520,11 +2556,11 @@ bool Parser::ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext,
<< TemplateId->Name
<< FixItHint::CreateRemoval(
SourceRange(TemplateId->LAngleLoc, TemplateId->RAngleLoc));
- ParsedType Ty =
- Actions.getTypeName(*TemplateId->Name, TemplateId->TemplateNameLoc,
- getCurScope(), &SS, false, false, nullptr,
- /*IsCtorOrDtorName=*/true,
- /*NontrivialTypeSourceInfo=*/true);
+ ParsedType Ty = Actions.getConstructorName(
+ *TemplateId->Name, TemplateId->TemplateNameLoc, getCurScope(), SS,
+ EnteringContext);
+ if (!Ty)
+ return true;
Result.setConstructorName(Ty, TemplateId->TemplateNameLoc,
TemplateId->RAngleLoc);
ConsumeAnnotationToken();
@@ -2539,7 +2575,14 @@ bool Parser::ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext,
// We have already parsed a template-id; consume the annotation token as
// our unqualified-id.
Result.setTemplateId(TemplateId);
- TemplateKWLoc = TemplateId->TemplateKWLoc;
+ SourceLocation TemplateLoc = TemplateId->TemplateKWLoc;
+ if (TemplateLoc.isValid()) {
+ if (TemplateKWLoc && (ObjectType || SS.isSet()))
+ *TemplateKWLoc = TemplateLoc;
+ else
+ Diag(TemplateLoc, diag::err_unexpected_template_in_unqualified_id)
+ << FixItHint::CreateRemoval(TemplateLoc);
+ }
ConsumeAnnotationToken();
return false;
}
@@ -2556,13 +2599,20 @@ bool Parser::ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext,
//
// template-id:
// operator-function-id < template-argument-list[opt] >
- if ((Result.getKind() == UnqualifiedId::IK_OperatorFunctionId ||
- Result.getKind() == UnqualifiedId::IK_LiteralOperatorId) &&
- (TemplateSpecified || Tok.is(tok::less)))
- return ParseUnqualifiedIdTemplateId(SS, TemplateKWLoc,
- nullptr, SourceLocation(),
- EnteringContext, ObjectType,
- Result, TemplateSpecified);
+ TemplateTy Template;
+ if ((Result.getKind() == UnqualifiedIdKind::IK_OperatorFunctionId ||
+ Result.getKind() == UnqualifiedIdKind::IK_LiteralOperatorId) &&
+ Tok.is(tok::less))
+ return ParseUnqualifiedIdTemplateId(
+ SS, TemplateKWLoc ? *TemplateKWLoc : SourceLocation(), nullptr,
+ SourceLocation(), EnteringContext, ObjectType, Result,
+ TemplateSpecified);
+ else if (TemplateSpecified &&
+ Actions.ActOnDependentTemplateName(
+ getCurScope(), SS, *TemplateKWLoc, Result, ObjectType,
+ EnteringContext, Template,
+ /*AllowInjectedClassName*/ true) == TNK_Non_template)
+ return true;
return false;
}
@@ -2630,12 +2680,11 @@ bool Parser::ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext,
IdentifierInfo *ClassName = Tok.getIdentifierInfo();
SourceLocation ClassNameLoc = ConsumeToken();
- if (TemplateSpecified || Tok.is(tok::less)) {
+ if (Tok.is(tok::less)) {
Result.setDestructorName(TildeLoc, nullptr, ClassNameLoc);
- return ParseUnqualifiedIdTemplateId(SS, TemplateKWLoc,
- ClassName, ClassNameLoc,
- EnteringContext, ObjectType,
- Result, TemplateSpecified);
+ return ParseUnqualifiedIdTemplateId(
+ SS, TemplateKWLoc ? *TemplateKWLoc : SourceLocation(), ClassName,
+ ClassNameLoc, EnteringContext, ObjectType, Result, TemplateSpecified);
}
// Note that this is a destructor name.
@@ -2696,7 +2745,7 @@ Parser::ParseCXXNewExpression(bool UseGlobal, SourceLocation Start) {
SourceRange TypeIdParens;
DeclSpec DS(AttrFactory);
- Declarator DeclaratorInfo(DS, Declarator::CXXNewContext);
+ Declarator DeclaratorInfo(DS, DeclaratorContext::CXXNewContext);
if (Tok.is(tok::l_paren)) {
// If it turns out to be a placement, we change the type location.
BalancedDelimiterTracker T(*this, tok::l_paren);
@@ -2836,10 +2885,9 @@ void Parser::ParseDirectNewDeclarator(Declarator &D) {
D.AddTypeInfo(DeclaratorChunk::getArray(0,
/*static=*/false, /*star=*/false,
- Size.get(),
- T.getOpenLocation(),
+ Size.get(), T.getOpenLocation(),
T.getCloseLocation()),
- Attrs, T.getCloseLocation());
+ std::move(Attrs), T.getCloseLocation());
if (T.getCloseLocation().isInvalid())
return;
@@ -2954,7 +3002,7 @@ static unsigned TypeTraitArity(tok::TokenKind kind) {
}
}
-/// \brief Parse the built-in type-trait pseudo-functions that allow
+/// Parse the built-in type-trait pseudo-functions that allow
/// implementation of the TR1/C++11 type traits templates.
///
/// primary-expression:
@@ -3172,7 +3220,7 @@ Parser::ParseCXXAmbiguousParenExpression(ParenParseOption &ExprType,
if (ParseAs >= CompoundLiteral) {
// Parse the type declarator.
DeclSpec DS(AttrFactory);
- Declarator DeclaratorInfo(DS, Declarator::TypeNameContext);
+ Declarator DeclaratorInfo(DS, DeclaratorContext::TypeNameContext);
{
ColonProtectionRAIIObject InnerColonProtection(*this);
ParseSpecifierQualifierList(DS);
diff --git a/lib/Parse/ParseObjc.cpp b/lib/Parse/ParseObjc.cpp
index fb8624a324b9..5c5b3cdfcf33 100644
--- a/lib/Parse/ParseObjc.cpp
+++ b/lib/Parse/ParseObjc.cpp
@@ -13,11 +13,11 @@
#include "clang/Parse/Parser.h"
#include "clang/AST/ASTContext.h"
+#include "clang/AST/PrettyDeclStackTrace.h"
#include "clang/Basic/CharInfo.h"
#include "clang/Parse/ParseDiagnostic.h"
#include "clang/Parse/RAIIObjectsForParser.h"
#include "clang/Sema/DeclSpec.h"
-#include "clang/Sema/PrettyDeclStackTrace.h"
#include "clang/Sema/Scope.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
@@ -45,7 +45,8 @@ void Parser::MaybeSkipAttributes(tok::ObjCKeywordKind Kind) {
/// [OBJC] objc-protocol-definition
/// [OBJC] objc-method-definition
/// [OBJC] '@' 'end'
-Parser::DeclGroupPtrTy Parser::ParseObjCAtDirectives() {
+Parser::DeclGroupPtrTy
+Parser::ParseObjCAtDirectives(ParsedAttributesWithRange &Attrs) {
SourceLocation AtLoc = ConsumeToken(); // the "@"
if (Tok.is(tok::code_completion)) {
@@ -58,15 +59,11 @@ Parser::DeclGroupPtrTy Parser::ParseObjCAtDirectives() {
switch (Tok.getObjCKeywordID()) {
case tok::objc_class:
return ParseObjCAtClassDeclaration(AtLoc);
- case tok::objc_interface: {
- ParsedAttributes attrs(AttrFactory);
- SingleDecl = ParseObjCAtInterfaceDeclaration(AtLoc, attrs);
+ case tok::objc_interface:
+ SingleDecl = ParseObjCAtInterfaceDeclaration(AtLoc, Attrs);
break;
- }
- case tok::objc_protocol: {
- ParsedAttributes attrs(AttrFactory);
- return ParseObjCAtProtocolDeclaration(AtLoc, attrs);
- }
+ case tok::objc_protocol:
+ return ParseObjCAtProtocolDeclaration(AtLoc, Attrs);
case tok::objc_implementation:
return ParseObjCAtImplementationDeclaration(AtLoc);
case tok::objc_end:
@@ -290,7 +287,7 @@ Decl *Parser::ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc,
Decl *CategoryType = Actions.ActOnStartCategoryInterface(
AtLoc, nameId, nameLoc, typeParameterList, categoryId, categoryLoc,
ProtocolRefs.data(), ProtocolRefs.size(), ProtocolLocs.data(),
- EndProtoLoc, attrs.getList());
+ EndProtoLoc, attrs);
if (Tok.is(tok::l_brace))
ParseObjCClassInstanceVariables(CategoryType, tok::objc_private, AtLoc);
@@ -356,17 +353,12 @@ Decl *Parser::ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc,
if (Tok.isNot(tok::less))
Actions.ActOnTypedefedProtocols(protocols, protocolLocs,
superClassId, superClassLoc);
-
- Decl *ClsType =
- Actions.ActOnStartClassInterface(getCurScope(), AtLoc, nameId, nameLoc,
- typeParameterList, superClassId,
- superClassLoc,
- typeArgs,
- SourceRange(typeArgsLAngleLoc,
- typeArgsRAngleLoc),
- protocols.data(), protocols.size(),
- protocolLocs.data(),
- EndProtoLoc, attrs.getList());
+
+ Decl *ClsType = Actions.ActOnStartClassInterface(
+ getCurScope(), AtLoc, nameId, nameLoc, typeParameterList, superClassId,
+ superClassLoc, typeArgs,
+ SourceRange(typeArgsLAngleLoc, typeArgsRAngleLoc), protocols.data(),
+ protocols.size(), protocolLocs.data(), EndProtoLoc, attrs);
if (Tok.is(tok::l_brace))
ParseObjCClassInstanceVariables(ClsType, tok::objc_protected, AtLoc);
@@ -384,25 +376,21 @@ static void addContextSensitiveTypeNullability(Parser &P,
SourceLocation nullabilityLoc,
bool &addedToDeclSpec) {
// Create the attribute.
- auto getNullabilityAttr = [&]() -> AttributeList * {
- return D.getAttributePool().create(
- P.getNullabilityKeyword(nullability),
- SourceRange(nullabilityLoc),
- nullptr, SourceLocation(),
- nullptr, 0,
- AttributeList::AS_ContextSensitiveKeyword);
+ auto getNullabilityAttr = [&](AttributePool &Pool) -> ParsedAttr * {
+ return Pool.create(P.getNullabilityKeyword(nullability),
+ SourceRange(nullabilityLoc), nullptr, SourceLocation(),
+ nullptr, 0, ParsedAttr::AS_ContextSensitiveKeyword);
};
if (D.getNumTypeObjects() > 0) {
// Add the attribute to the declarator chunk nearest the declarator.
- auto nullabilityAttr = getNullabilityAttr();
- DeclaratorChunk &chunk = D.getTypeObject(0);
- nullabilityAttr->setNext(chunk.getAttrListRef());
- chunk.getAttrListRef() = nullabilityAttr;
+ D.getTypeObject(0).getAttrs().addAtStart(
+ getNullabilityAttr(D.getAttributePool()));
} else if (!addedToDeclSpec) {
// Otherwise, just put it on the declaration specifiers (if one
// isn't there already).
- D.getMutableDeclSpec().addAttributes(getNullabilityAttr());
+ D.getMutableDeclSpec().getAttributes().addAtStart(
+ getNullabilityAttr(D.getMutableDeclSpec().getAttributes().getPool()));
addedToDeclSpec = true;
}
}
@@ -1140,14 +1128,14 @@ bool Parser::isTokIdentifier_in() const {
/// 'null_unspecified'
///
void Parser::ParseObjCTypeQualifierList(ObjCDeclSpec &DS,
- Declarator::TheContext Context) {
- assert(Context == Declarator::ObjCParameterContext ||
- Context == Declarator::ObjCResultContext);
+ DeclaratorContext Context) {
+ assert(Context == DeclaratorContext::ObjCParameterContext ||
+ Context == DeclaratorContext::ObjCResultContext);
while (1) {
if (Tok.is(tok::code_completion)) {
Actions.CodeCompleteObjCPassingType(getCurScope(), DS,
- Context == Declarator::ObjCParameterContext);
+ Context == DeclaratorContext::ObjCParameterContext);
return cutOffParsing();
}
@@ -1205,18 +1193,12 @@ void Parser::ParseObjCTypeQualifierList(ObjCDeclSpec &DS,
/// Take all the decl attributes out of the given list and add
/// them to the given attribute set.
-static void takeDeclAttributes(ParsedAttributes &attrs,
- AttributeList *list) {
- while (list) {
- AttributeList *cur = list;
- list = cur->getNext();
-
- if (!cur->isUsedAsTypeAttr()) {
- // Clear out the next pointer. We're really completely
- // destroying the internal invariants of the declarator here,
- // but it doesn't matter because we're done with it.
- cur->setNext(nullptr);
- attrs.add(cur);
+static void takeDeclAttributes(ParsedAttributesView &attrs,
+ ParsedAttributesView &from) {
+ for (auto &AL : llvm::reverse(from)) {
+ if (!AL.isUsedAsTypeAttr()) {
+ from.remove(&AL);
+ attrs.addAtStart(&AL);
}
}
}
@@ -1230,11 +1212,10 @@ static void takeDeclAttributes(ParsedAttributes &attrs,
attrs.getPool().takeAllFrom(D.getDeclSpec().getAttributePool());
// Now actually move the attributes over.
- takeDeclAttributes(attrs, D.getDeclSpec().getAttributes().getList());
+ takeDeclAttributes(attrs, D.getMutableDeclSpec().getAttributes());
takeDeclAttributes(attrs, D.getAttributes());
for (unsigned i = 0, e = D.getNumTypeObjects(); i != e; ++i)
- takeDeclAttributes(attrs,
- const_cast<AttributeList*>(D.getTypeObject(i).getAttrs()));
+ takeDeclAttributes(attrs, D.getTypeObject(i).getAttrs());
}
/// objc-type-name:
@@ -1242,12 +1223,12 @@ static void takeDeclAttributes(ParsedAttributes &attrs,
/// '(' objc-type-qualifiers[opt] ')'
///
ParsedType Parser::ParseObjCTypeName(ObjCDeclSpec &DS,
- Declarator::TheContext context,
+ DeclaratorContext context,
ParsedAttributes *paramAttrs) {
- assert(context == Declarator::ObjCParameterContext ||
- context == Declarator::ObjCResultContext);
+ assert(context == DeclaratorContext::ObjCParameterContext ||
+ context == DeclaratorContext::ObjCResultContext);
assert((paramAttrs != nullptr) ==
- (context == Declarator::ObjCParameterContext));
+ (context == DeclaratorContext::ObjCParameterContext));
assert(Tok.is(tok::l_paren) && "expected (");
@@ -1265,9 +1246,9 @@ ParsedType Parser::ParseObjCTypeName(ObjCDeclSpec &DS,
// Parse an abstract declarator.
DeclSpec declSpec(AttrFactory);
declSpec.setObjCQualifiers(&DS);
- DeclSpecContext dsContext = DSC_normal;
- if (context == Declarator::ObjCResultContext)
- dsContext = DSC_objc_method_result;
+ DeclSpecContext dsContext = DeclSpecContext::DSC_normal;
+ if (context == DeclaratorContext::ObjCResultContext)
+ dsContext = DeclSpecContext::DSC_objc_method_result;
ParseSpecifierQualifierList(declSpec, AS_none, dsContext);
Declarator declarator(declSpec, context);
ParseDeclarator(declarator);
@@ -1288,7 +1269,7 @@ ParsedType Parser::ParseObjCTypeName(ObjCDeclSpec &DS,
// If we're parsing a parameter, steal all the decl attributes
// and add them to the decl spec.
- if (context == Declarator::ObjCParameterContext)
+ if (context == DeclaratorContext::ObjCParameterContext)
takeDeclAttributes(*paramAttrs, declarator);
}
}
@@ -1352,13 +1333,14 @@ Decl *Parser::ParseObjCMethodDecl(SourceLocation mLoc,
ParsedType ReturnType;
ObjCDeclSpec DSRet;
if (Tok.is(tok::l_paren))
- ReturnType = ParseObjCTypeName(DSRet, Declarator::ObjCResultContext,
+ ReturnType = ParseObjCTypeName(DSRet, DeclaratorContext::ObjCResultContext,
nullptr);
// If attributes exist before the method, parse them.
ParsedAttributes methodAttrs(AttrFactory);
if (getLangOpts().ObjC2)
MaybeParseGNUAttributes(methodAttrs);
+ MaybeParseCXX11Attributes(methodAttrs);
if (Tok.is(tok::code_completion)) {
Actions.CodeCompleteObjCMethodDecl(getCurScope(), mType == tok::minus,
@@ -1385,15 +1367,13 @@ Decl *Parser::ParseObjCMethodDecl(SourceLocation mLoc,
// If attributes exist after the method, parse them.
if (getLangOpts().ObjC2)
MaybeParseGNUAttributes(methodAttrs);
+ MaybeParseCXX11Attributes(methodAttrs);
Selector Sel = PP.getSelectorTable().getNullarySelector(SelIdent);
- Decl *Result
- = Actions.ActOnMethodDeclaration(getCurScope(), mLoc, Tok.getLocation(),
- mType, DSRet, ReturnType,
- selLoc, Sel, nullptr,
- CParamInfo.data(), CParamInfo.size(),
- methodAttrs.getList(), MethodImplKind,
- false, MethodDefinition);
+ Decl *Result = Actions.ActOnMethodDeclaration(
+ getCurScope(), mLoc, Tok.getLocation(), mType, DSRet, ReturnType,
+ selLoc, Sel, nullptr, CParamInfo.data(), CParamInfo.size(), methodAttrs,
+ MethodImplKind, false, MethodDefinition);
PD.complete(Result);
return Result;
}
@@ -1416,16 +1396,15 @@ Decl *Parser::ParseObjCMethodDecl(SourceLocation mLoc,
ArgInfo.Type = nullptr;
if (Tok.is(tok::l_paren)) // Parse the argument type if present.
ArgInfo.Type = ParseObjCTypeName(ArgInfo.DeclSpec,
- Declarator::ObjCParameterContext,
+ DeclaratorContext::ObjCParameterContext,
&paramAttrs);
// If attributes exist before the argument name, parse them.
// Regardless, collect all the attributes we've parsed so far.
- ArgInfo.ArgAttrs = nullptr;
- if (getLangOpts().ObjC2) {
+ if (getLangOpts().ObjC2)
MaybeParseGNUAttributes(paramAttrs);
- ArgInfo.ArgAttrs = paramAttrs.getList();
- }
+ MaybeParseCXX11Attributes(paramAttrs);
+ ArgInfo.ArgAttrs = paramAttrs;
// Code completion for the next piece of the selector.
if (Tok.is(tok::code_completion)) {
@@ -1494,7 +1473,7 @@ Decl *Parser::ParseObjCMethodDecl(SourceLocation mLoc,
DeclSpec DS(AttrFactory);
ParseDeclarationSpecifiers(DS);
// Parse the declarator.
- Declarator ParmDecl(DS, Declarator::PrototypeContext);
+ Declarator ParmDecl(DS, DeclaratorContext::PrototypeContext);
ParseDeclarator(ParmDecl);
IdentifierInfo *ParmII = ParmDecl.getIdentifier();
Decl *Param = Actions.ActOnParamDeclarator(getCurScope(), ParmDecl);
@@ -1508,20 +1487,18 @@ Decl *Parser::ParseObjCMethodDecl(SourceLocation mLoc,
// If attributes exist after the method, parse them.
if (getLangOpts().ObjC2)
MaybeParseGNUAttributes(methodAttrs);
-
+ MaybeParseCXX11Attributes(methodAttrs);
+
if (KeyIdents.size() == 0)
return nullptr;
Selector Sel = PP.getSelectorTable().getSelector(KeyIdents.size(),
&KeyIdents[0]);
- Decl *Result
- = Actions.ActOnMethodDeclaration(getCurScope(), mLoc, Tok.getLocation(),
- mType, DSRet, ReturnType,
- KeyLocs, Sel, &ArgInfos[0],
- CParamInfo.data(), CParamInfo.size(),
- methodAttrs.getList(),
- MethodImplKind, isVariadic, MethodDefinition);
-
+ Decl *Result = Actions.ActOnMethodDeclaration(
+ getCurScope(), mLoc, Tok.getLocation(), mType, DSRet, ReturnType, KeyLocs,
+ Sel, &ArgInfos[0], CParamInfo.data(), CParamInfo.size(), methodAttrs,
+ MethodImplKind, isVariadic, MethodDefinition);
+
PD.complete(Result);
return Result;
}
@@ -1703,7 +1680,7 @@ void Parser::parseObjCTypeArgsOrProtocolQualifiers(
typeArg, Actions.getASTContext().getPrintingPolicy());
// Form a declarator to turn this into a type.
- Declarator D(DS, Declarator::TypeNameContext);
+ Declarator D(DS, DeclaratorContext::TypeNameContext);
TypeResult fullTypeArg = Actions.ActOnTypeName(getCurScope(), D);
if (fullTypeArg.isUsable()) {
typeArgs.push_back(fullTypeArg.get());
@@ -1886,9 +1863,9 @@ void Parser::HelperActionsForIvarDeclarations(Decl *interfaceDecl, SourceLocatio
Actions.ActOnObjCContainerFinishDefinition();
// Call ActOnFields() even if we don't have any decls. This is useful
// for code rewriting tools that need to be aware of the empty list.
- Actions.ActOnFields(getCurScope(), atLoc, interfaceDecl,
- AllIvarDecls,
- T.getOpenLocation(), T.getCloseLocation(), nullptr);
+ Actions.ActOnFields(getCurScope(), atLoc, interfaceDecl, AllIvarDecls,
+ T.getOpenLocation(), T.getCloseLocation(),
+ ParsedAttributesView());
}
/// objc-class-instance-variables:
@@ -2038,8 +2015,7 @@ Parser::ParseObjCAtProtocolDeclaration(SourceLocation AtLoc,
if (TryConsumeToken(tok::semi)) { // forward declaration of one protocol.
IdentifierLocPair ProtoInfo(protocolName, nameLoc);
- return Actions.ActOnForwardProtocolDeclaration(AtLoc, ProtoInfo,
- attrs.getList());
+ return Actions.ActOnForwardProtocolDeclaration(AtLoc, ProtoInfo, attrs);
}
CheckNestedObjCContexts(AtLoc);
@@ -2066,8 +2042,7 @@ Parser::ParseObjCAtProtocolDeclaration(SourceLocation AtLoc,
if (ExpectAndConsume(tok::semi, diag::err_expected_after, "@protocol"))
return nullptr;
- return Actions.ActOnForwardProtocolDeclaration(AtLoc, ProtocolRefs,
- attrs.getList());
+ return Actions.ActOnForwardProtocolDeclaration(AtLoc, ProtocolRefs, attrs);
}
// Last, and definitely not least, parse a protocol declaration.
@@ -2081,12 +2056,9 @@ Parser::ParseObjCAtProtocolDeclaration(SourceLocation AtLoc,
/*consumeLastToken=*/true))
return nullptr;
- Decl *ProtoType =
- Actions.ActOnStartProtocolInterface(AtLoc, protocolName, nameLoc,
- ProtocolRefs.data(),
- ProtocolRefs.size(),
- ProtocolLocs.data(),
- EndProtoLoc, attrs.getList());
+ Decl *ProtoType = Actions.ActOnStartProtocolInterface(
+ AtLoc, protocolName, nameLoc, ProtocolRefs.data(), ProtocolRefs.size(),
+ ProtocolLocs.data(), EndProtoLoc, attrs);
ParseObjCInterfaceDeclList(tok::objc_protocol, ProtoType);
return Actions.ConvertDeclToDeclGroup(ProtoType);
@@ -2273,7 +2245,7 @@ void Parser::ObjCImplParsingDataRAII::finish(SourceRange AtEnd) {
P.ParseLexedObjCMethodDefs(*LateParsedObjCMethods[i],
false/*c-functions*/);
- /// \brief Clear and free the cached objc methods.
+ /// Clear and free the cached objc methods.
for (LateParsedObjCMethodContainer::iterator
I = LateParsedObjCMethods.begin(),
E = LateParsedObjCMethods.end(); I != E; ++I)
@@ -2543,7 +2515,7 @@ StmtResult Parser::ParseObjCTryStmt(SourceLocation atLoc) {
if (Tok.isNot(tok::ellipsis)) {
DeclSpec DS(AttrFactory);
ParseDeclarationSpecifiers(DS);
- Declarator ParmDecl(DS, Declarator::ObjCCatchContext);
+ Declarator ParmDecl(DS, DeclaratorContext::ObjCCatchContext);
ParseDeclarator(ParmDecl);
// Inform the actions module about the declarator, so it
@@ -2586,13 +2558,26 @@ StmtResult Parser::ParseObjCTryStmt(SourceLocation atLoc) {
ParseScope FinallyScope(this,
Scope::DeclScope | Scope::CompoundStmtScope);
+ bool ShouldCapture =
+ getTargetInfo().getTriple().isWindowsMSVCEnvironment();
+ if (ShouldCapture)
+ Actions.ActOnCapturedRegionStart(Tok.getLocation(), getCurScope(),
+ CR_ObjCAtFinally, 1);
+
StmtResult FinallyBody(true);
if (Tok.is(tok::l_brace))
FinallyBody = ParseCompoundStatementBody();
else
Diag(Tok, diag::err_expected) << tok::l_brace;
- if (FinallyBody.isInvalid())
+
+ if (FinallyBody.isInvalid()) {
FinallyBody = Actions.ActOnNullStmt(Tok.getLocation());
+ if (ShouldCapture)
+ Actions.ActOnCapturedRegionError();
+ } else if (ShouldCapture) {
+ FinallyBody = Actions.ActOnCapturedRegionEnd(FinallyBody.get());
+ }
+
FinallyStmt = Actions.ActOnObjCAtFinallyStmt(AtCatchFinallyLoc,
FinallyBody.get());
catch_or_finally_seen = true;
@@ -2681,7 +2666,7 @@ void Parser::StashAwayMethodOrFunctionBodyTokens(Decl *MDecl) {
Decl *Parser::ParseObjCMethodDefinition() {
Decl *MDecl = ParseObjCMethodPrototype();
- PrettyDeclStackTraceEntry CrashInfo(Actions, MDecl, Tok.getLocation(),
+ PrettyDeclStackTraceEntry CrashInfo(Actions.Context, MDecl, Tok.getLocation(),
"parsing Objective-C method");
// parse optional ';'
@@ -2865,7 +2850,7 @@ ExprResult Parser::ParseObjCAtExpression(SourceLocation AtLoc) {
}
}
-/// \brief Parse the receiver of an Objective-C++ message send.
+/// Parse the receiver of an Objective-C++ message send.
///
/// This routine parses the receiver of a message send in
/// Objective-C++ either as a type or as an expression. Note that this
@@ -2945,7 +2930,7 @@ bool Parser::ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr) {
// We have a class message. Turn the simple-type-specifier or
// typename-specifier we parsed into a type and parse the
// remainder of the class message.
- Declarator DeclaratorInfo(DS, Declarator::TypeNameContext);
+ Declarator DeclaratorInfo(DS, DeclaratorContext::TypeNameContext);
TypeResult Type = Actions.ActOnTypeName(getCurScope(), DeclaratorInfo);
if (Type.isInvalid())
return true;
@@ -2955,7 +2940,7 @@ bool Parser::ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr) {
return false;
}
-/// \brief Determine whether the parser is currently referring to a an
+/// Determine whether the parser is currently referring to a an
/// Objective-C message send, using a simplified heuristic to avoid overhead.
///
/// This routine will only return true for a subset of valid message-send
@@ -3100,7 +3085,7 @@ ExprResult Parser::ParseObjCMessageExpression() {
Res.get());
}
-/// \brief Parse the remainder of an Objective-C message following the
+/// Parse the remainder of an Objective-C message following the
/// '[' objc-receiver.
///
/// This routine handles sends to super, class messages (sent to a
diff --git a/lib/Parse/ParseOpenMP.cpp b/lib/Parse/ParseOpenMP.cpp
index a67a5bbe0dea..a413e96a91e7 100644
--- a/lib/Parse/ParseOpenMP.cpp
+++ b/lib/Parse/ParseOpenMP.cpp
@@ -7,7 +7,7 @@
//
//===----------------------------------------------------------------------===//
/// \file
-/// \brief This file implements parsing of all OpenMP directives and clauses.
+/// This file implements parsing of all OpenMP directives and clauses.
///
//===----------------------------------------------------------------------===//
@@ -80,51 +80,56 @@ static unsigned getOpenMPDirectiveKindEx(StringRef S) {
.Default(OMPD_unknown);
}
-static OpenMPDirectiveKind ParseOpenMPDirectiveKind(Parser &P) {
+static OpenMPDirectiveKind parseOpenMPDirectiveKind(Parser &P) {
// Array of foldings: F[i][0] F[i][1] ===> F[i][2].
// E.g.: OMPD_for OMPD_simd ===> OMPD_for_simd
// TODO: add other combined directives in topological order.
static const unsigned F[][3] = {
- { OMPD_cancellation, OMPD_point, OMPD_cancellation_point },
- { OMPD_declare, OMPD_reduction, OMPD_declare_reduction },
- { OMPD_declare, OMPD_simd, OMPD_declare_simd },
- { OMPD_declare, OMPD_target, OMPD_declare_target },
- { OMPD_distribute, OMPD_parallel, OMPD_distribute_parallel },
- { OMPD_distribute_parallel, OMPD_for, OMPD_distribute_parallel_for },
- { OMPD_distribute_parallel_for, OMPD_simd,
- OMPD_distribute_parallel_for_simd },
- { OMPD_distribute, OMPD_simd, OMPD_distribute_simd },
- { OMPD_end, OMPD_declare, OMPD_end_declare },
- { OMPD_end_declare, OMPD_target, OMPD_end_declare_target },
- { OMPD_target, OMPD_data, OMPD_target_data },
- { OMPD_target, OMPD_enter, OMPD_target_enter },
- { OMPD_target, OMPD_exit, OMPD_target_exit },
- { OMPD_target, OMPD_update, OMPD_target_update },
- { OMPD_target_enter, OMPD_data, OMPD_target_enter_data },
- { OMPD_target_exit, OMPD_data, OMPD_target_exit_data },
- { OMPD_for, OMPD_simd, OMPD_for_simd },
- { OMPD_parallel, OMPD_for, OMPD_parallel_for },
- { OMPD_parallel_for, OMPD_simd, OMPD_parallel_for_simd },
- { OMPD_parallel, OMPD_sections, OMPD_parallel_sections },
- { OMPD_taskloop, OMPD_simd, OMPD_taskloop_simd },
- { OMPD_target, OMPD_parallel, OMPD_target_parallel },
- { OMPD_target, OMPD_simd, OMPD_target_simd },
- { OMPD_target_parallel, OMPD_for, OMPD_target_parallel_for },
- { OMPD_target_parallel_for, OMPD_simd, OMPD_target_parallel_for_simd },
- { OMPD_teams, OMPD_distribute, OMPD_teams_distribute },
- { OMPD_teams_distribute, OMPD_simd, OMPD_teams_distribute_simd },
- { OMPD_teams_distribute, OMPD_parallel, OMPD_teams_distribute_parallel },
- { OMPD_teams_distribute_parallel, OMPD_for, OMPD_teams_distribute_parallel_for },
- { OMPD_teams_distribute_parallel_for, OMPD_simd, OMPD_teams_distribute_parallel_for_simd },
- { OMPD_target, OMPD_teams, OMPD_target_teams },
- { OMPD_target_teams, OMPD_distribute, OMPD_target_teams_distribute },
- { OMPD_target_teams_distribute, OMPD_parallel, OMPD_target_teams_distribute_parallel },
- { OMPD_target_teams_distribute, OMPD_simd, OMPD_target_teams_distribute_simd },
- { OMPD_target_teams_distribute_parallel, OMPD_for, OMPD_target_teams_distribute_parallel_for },
- { OMPD_target_teams_distribute_parallel_for, OMPD_simd, OMPD_target_teams_distribute_parallel_for_simd }
- };
+ {OMPD_cancellation, OMPD_point, OMPD_cancellation_point},
+ {OMPD_declare, OMPD_reduction, OMPD_declare_reduction},
+ {OMPD_declare, OMPD_simd, OMPD_declare_simd},
+ {OMPD_declare, OMPD_target, OMPD_declare_target},
+ {OMPD_distribute, OMPD_parallel, OMPD_distribute_parallel},
+ {OMPD_distribute_parallel, OMPD_for, OMPD_distribute_parallel_for},
+ {OMPD_distribute_parallel_for, OMPD_simd,
+ OMPD_distribute_parallel_for_simd},
+ {OMPD_distribute, OMPD_simd, OMPD_distribute_simd},
+ {OMPD_end, OMPD_declare, OMPD_end_declare},
+ {OMPD_end_declare, OMPD_target, OMPD_end_declare_target},
+ {OMPD_target, OMPD_data, OMPD_target_data},
+ {OMPD_target, OMPD_enter, OMPD_target_enter},
+ {OMPD_target, OMPD_exit, OMPD_target_exit},
+ {OMPD_target, OMPD_update, OMPD_target_update},
+ {OMPD_target_enter, OMPD_data, OMPD_target_enter_data},
+ {OMPD_target_exit, OMPD_data, OMPD_target_exit_data},
+ {OMPD_for, OMPD_simd, OMPD_for_simd},
+ {OMPD_parallel, OMPD_for, OMPD_parallel_for},
+ {OMPD_parallel_for, OMPD_simd, OMPD_parallel_for_simd},
+ {OMPD_parallel, OMPD_sections, OMPD_parallel_sections},
+ {OMPD_taskloop, OMPD_simd, OMPD_taskloop_simd},
+ {OMPD_target, OMPD_parallel, OMPD_target_parallel},
+ {OMPD_target, OMPD_simd, OMPD_target_simd},
+ {OMPD_target_parallel, OMPD_for, OMPD_target_parallel_for},
+ {OMPD_target_parallel_for, OMPD_simd, OMPD_target_parallel_for_simd},
+ {OMPD_teams, OMPD_distribute, OMPD_teams_distribute},
+ {OMPD_teams_distribute, OMPD_simd, OMPD_teams_distribute_simd},
+ {OMPD_teams_distribute, OMPD_parallel, OMPD_teams_distribute_parallel},
+ {OMPD_teams_distribute_parallel, OMPD_for,
+ OMPD_teams_distribute_parallel_for},
+ {OMPD_teams_distribute_parallel_for, OMPD_simd,
+ OMPD_teams_distribute_parallel_for_simd},
+ {OMPD_target, OMPD_teams, OMPD_target_teams},
+ {OMPD_target_teams, OMPD_distribute, OMPD_target_teams_distribute},
+ {OMPD_target_teams_distribute, OMPD_parallel,
+ OMPD_target_teams_distribute_parallel},
+ {OMPD_target_teams_distribute, OMPD_simd,
+ OMPD_target_teams_distribute_simd},
+ {OMPD_target_teams_distribute_parallel, OMPD_for,
+ OMPD_target_teams_distribute_parallel_for},
+ {OMPD_target_teams_distribute_parallel_for, OMPD_simd,
+ OMPD_target_teams_distribute_parallel_for_simd}};
enum { CancellationPoint = 0, DeclareReduction = 1, TargetData = 2 };
- auto Tok = P.getCurToken();
+ Token Tok = P.getCurToken();
unsigned DKind =
Tok.isAnnotation()
? static_cast<unsigned>(OMPD_unknown)
@@ -132,8 +137,8 @@ static OpenMPDirectiveKind ParseOpenMPDirectiveKind(Parser &P) {
if (DKind == OMPD_unknown)
return OMPD_unknown;
- for (unsigned i = 0; i < llvm::array_lengthof(F); ++i) {
- if (DKind != F[i][0])
+ for (unsigned I = 0; I < llvm::array_lengthof(F); ++I) {
+ if (DKind != F[I][0])
continue;
Tok = P.getPreprocessor().LookAhead(0);
@@ -144,9 +149,9 @@ static OpenMPDirectiveKind ParseOpenMPDirectiveKind(Parser &P) {
if (SDKind == OMPD_unknown)
continue;
- if (SDKind == F[i][1]) {
+ if (SDKind == F[I][1]) {
P.ConsumeToken();
- DKind = F[i][2];
+ DKind = F[I][2];
}
}
return DKind < OMPD_unknown ? static_cast<OpenMPDirectiveKind>(DKind)
@@ -205,7 +210,7 @@ static DeclarationName parseOpenMPReductionId(Parser &P) {
: DeclNames.getCXXOperatorName(OOK);
}
-/// \brief Parse 'omp declare reduction' construct.
+/// Parse 'omp declare reduction' construct.
///
/// declare-reduction-directive:
/// annot_pragma_openmp 'declare' 'reduction'
@@ -250,9 +255,10 @@ Parser::ParseOpenMPDeclareReductionDirective(AccessSpecifier AS) {
do {
ColonProtectionRAIIObject ColonRAII(*this);
SourceRange Range;
- TypeResult TR = ParseTypeName(&Range, Declarator::PrototypeContext, AS);
+ TypeResult TR =
+ ParseTypeName(&Range, DeclaratorContext::PrototypeContext, AS);
if (TR.isUsable()) {
- auto ReductionType =
+ QualType ReductionType =
Actions.ActOnOpenMPDeclareReductionType(Range.getBegin(), TR);
if (!ReductionType.isNull()) {
ReductionTypes.push_back(
@@ -299,7 +305,7 @@ Parser::ParseOpenMPDeclareReductionDirective(AccessSpecifier AS) {
// Parse <combiner> expression and then parse initializer if any for each
// correct type.
unsigned I = 0, E = ReductionTypes.size();
- for (auto *D : DRD.get()) {
+ for (Decl *D : DRD.get()) {
TentativeParsingAction TPA(*this);
ParseScope OMPDRScope(this, Scope::FnScope | Scope::DeclScope |
Scope::CompoundStmtScope |
@@ -322,9 +328,9 @@ Parser::ParseOpenMPDeclareReductionDirective(AccessSpecifier AS) {
if (Tok.isNot(tok::annot_pragma_openmp_end)) {
// Parse <initializer> expression.
if (Tok.is(tok::identifier) &&
- Tok.getIdentifierInfo()->isStr("initializer"))
+ Tok.getIdentifierInfo()->isStr("initializer")) {
ConsumeToken();
- else {
+ } else {
Diag(Tok.getLocation(), diag::err_expected) << "'initializer'";
TPA.Commit();
IsCorrect = false;
@@ -418,13 +424,15 @@ void Parser::ParseOpenMPReductionInitializerForDecl(VarDecl *OmpPrivParm) {
SkipUntil(tok::r_paren, tok::annot_pragma_openmp_end, StopBeforeMatch);
} else {
// Match the ')'.
- T.consumeClose();
+ SourceLocation RLoc = Tok.getLocation();
+ if (!T.consumeClose())
+ RLoc = T.getCloseLocation();
assert(!Exprs.empty() && Exprs.size() - 1 == CommaLocs.size() &&
"Unexpected number of commas!");
- ExprResult Initializer = Actions.ActOnParenListExpr(
- T.getOpenLocation(), T.getCloseLocation(), Exprs);
+ ExprResult Initializer =
+ Actions.ActOnParenListExpr(T.getOpenLocation(), RLoc, Exprs);
Actions.AddInitializerToDecl(OmpPrivParm, Initializer.get(),
/*DirectInit=*/true);
}
@@ -550,7 +558,7 @@ static bool parseDeclareSimdClauses(
if (CKind == OMPC_uniform || CKind == OMPC_aligned ||
CKind == OMPC_linear) {
Parser::OpenMPVarListDataTy Data;
- auto *Vars = &Uniforms;
+ SmallVectorImpl<Expr *> *Vars = &Uniforms;
if (CKind == OMPC_aligned)
Vars = &Aligneds;
else if (CKind == OMPC_linear)
@@ -560,9 +568,9 @@ static bool parseDeclareSimdClauses(
if (P.ParseOpenMPVarList(OMPD_declare_simd,
getOpenMPClauseKind(ClauseName), *Vars, Data))
IsError = true;
- if (CKind == OMPC_aligned)
+ if (CKind == OMPC_aligned) {
Alignments.append(Aligneds.size() - Alignments.size(), Data.TailExpr);
- else if (CKind == OMPC_linear) {
+ } else if (CKind == OMPC_linear) {
if (P.getActions().CheckOpenMPLinearModifier(Data.LinKind,
Data.DepLinMapLoc))
Data.LinKind = OMPC_LINEAR_val;
@@ -612,15 +620,14 @@ Parser::ParseOMPDeclareSimdClauses(Parser::DeclGroupPtrTy Ptr,
}
// Skip the last annot_pragma_openmp_end.
SourceLocation EndLoc = ConsumeAnnotationToken();
- if (!IsError) {
- return Actions.ActOnOpenMPDeclareSimdDirective(
- Ptr, BS, Simdlen.get(), Uniforms, Aligneds, Alignments, Linears,
- LinModifiers, Steps, SourceRange(Loc, EndLoc));
- }
- return Ptr;
+ if (IsError)
+ return Ptr;
+ return Actions.ActOnOpenMPDeclareSimdDirective(
+ Ptr, BS, Simdlen.get(), Uniforms, Aligneds, Alignments, Linears,
+ LinModifiers, Steps, SourceRange(Loc, EndLoc));
}
-/// \brief Parsing of declarative OpenMP directives.
+/// Parsing of declarative OpenMP directives.
///
/// threadprivate-directive:
/// annot_pragma_openmp 'threadprivate' simple-variable-list
@@ -642,7 +649,7 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
ParenBraceBracketBalancer BalancerRAIIObj(*this);
SourceLocation Loc = ConsumeAnnotationToken();
- auto DKind = ParseOpenMPDirectiveKind(*this);
+ OpenMPDirectiveKind DKind = parseOpenMPDirectiveKind(*this);
switch (DKind) {
case OMPD_threadprivate: {
@@ -665,7 +672,7 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
}
case OMPD_declare_reduction:
ConsumeToken();
- if (auto Res = ParseOpenMPDeclareReductionDirective(AS)) {
+ if (DeclGroupPtrTy Res = ParseOpenMPDeclareReductionDirective(AS)) {
// The last seen token is annot_pragma_openmp_end - need to check for
// extra tokens.
if (Tok.isNot(tok::annot_pragma_openmp_end)) {
@@ -694,9 +701,9 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
ConsumeAnyToken();
DeclGroupPtrTy Ptr;
- if (Tok.is(tok::annot_pragma_openmp))
+ if (Tok.is(tok::annot_pragma_openmp)) {
Ptr = ParseOpenMPDeclarativeDirectiveWithExtDecl(AS, Attrs, TagType, Tag);
- else if (Tok.isNot(tok::r_brace) && !isEofOrEom()) {
+ } else if (Tok.isNot(tok::r_brace) && !isEofOrEom()) {
// Here we expect to see some function declaration.
if (AS == AS_none) {
assert(TagType == DeclSpec::TST_unspecified);
@@ -718,7 +725,7 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
SourceLocation DTLoc = ConsumeAnyToken();
if (Tok.isNot(tok::annot_pragma_openmp_end)) {
// OpenMP 4.5 syntax with list of entities.
- llvm::SmallSetVector<const NamedDecl*, 16> SameDirectiveDecls;
+ Sema::NamedDeclSetType SameDirectiveDecls;
while (Tok.isNot(tok::annot_pragma_openmp_end)) {
OMPDeclareTargetDeclAttr::MapTypeTy MT =
OMPDeclareTargetDeclAttr::MT_To;
@@ -734,12 +741,13 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
}
ConsumeToken();
}
- auto Callback = [this, MT, &SameDirectiveDecls](
- CXXScopeSpec &SS, DeclarationNameInfo NameInfo) {
+ auto &&Callback = [this, MT, &SameDirectiveDecls](
+ CXXScopeSpec &SS, DeclarationNameInfo NameInfo) {
Actions.ActOnOpenMPDeclareTargetName(getCurScope(), SS, NameInfo, MT,
SameDirectiveDecls);
};
- if (ParseOpenMPSimpleVarList(OMPD_declare_target, Callback, true))
+ if (ParseOpenMPSimpleVarList(OMPD_declare_target, Callback,
+ /*AllowScopeSpecifier=*/true))
break;
// Consume optional ','.
@@ -748,7 +756,11 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
}
SkipUntil(tok::annot_pragma_openmp_end, StopBeforeMatch);
ConsumeAnyToken();
- return DeclGroupPtrTy();
+ SmallVector<Decl *, 4> Decls(SameDirectiveDecls.begin(),
+ SameDirectiveDecls.end());
+ if (Decls.empty())
+ return DeclGroupPtrTy();
+ return Actions.BuildDeclaratorGroup(Decls);
}
// Skip the last annot_pragma_openmp_end.
@@ -757,7 +769,8 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
if (!Actions.ActOnStartOpenMPDeclareTargetDirective(DTLoc))
return DeclGroupPtrTy();
- DKind = ParseOpenMPDirectiveKind(*this);
+ llvm::SmallVector<Decl *, 4> Decls;
+ DKind = parseOpenMPDirectiveKind(*this);
while (DKind != OMPD_end_declare_target && DKind != OMPD_declare_target &&
Tok.isNot(tok::eof) && Tok.isNot(tok::r_brace)) {
DeclGroupPtrTy Ptr;
@@ -771,10 +784,14 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
Ptr =
ParseCXXClassMemberDeclarationWithPragmas(AS, Attrs, TagType, Tag);
}
+ if (Ptr) {
+ DeclGroupRef Ref = Ptr.get();
+ Decls.append(Ref.begin(), Ref.end());
+ }
if (Tok.isAnnotation() && Tok.is(tok::annot_pragma_openmp)) {
TentativeParsingAction TPA(*this);
ConsumeAnnotationToken();
- DKind = ParseOpenMPDirectiveKind(*this);
+ DKind = parseOpenMPDirectiveKind(*this);
if (DKind != OMPD_end_declare_target)
TPA.Revert();
else
@@ -796,7 +813,7 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
Diag(DTLoc, diag::note_matching) << "'#pragma omp declare target'";
}
Actions.ActOnFinishOpenMPDeclareTargetDirective();
- return DeclGroupPtrTy();
+ return Actions.BuildDeclaratorGroup(Decls);
}
case OMPD_unknown:
Diag(Tok, diag::err_omp_unknown_directive);
@@ -850,7 +867,7 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
case OMPD_target_teams_distribute_parallel_for_simd:
case OMPD_target_teams_distribute_simd:
Diag(Tok, diag::err_omp_unexpected_directive)
- << getOpenMPDirectiveName(DKind);
+ << 1 << getOpenMPDirectiveName(DKind);
break;
}
while (Tok.isNot(tok::annot_pragma_openmp_end))
@@ -859,7 +876,7 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
return nullptr;
}
-/// \brief Parsing of declarative or executable OpenMP directives.
+/// Parsing of declarative or executable OpenMP directives.
///
/// threadprivate-directive:
/// annot_pragma_openmp 'threadprivate' simple-variable-list
@@ -902,7 +919,7 @@ StmtResult Parser::ParseOpenMPDeclarativeOrExecutableDirective(
unsigned ScopeFlags = Scope::FnScope | Scope::DeclScope |
Scope::CompoundStmtScope | Scope::OpenMPDirectiveScope;
SourceLocation Loc = ConsumeAnnotationToken(), EndLoc;
- auto DKind = ParseOpenMPDirectiveKind(*this);
+ OpenMPDirectiveKind DKind = parseOpenMPDirectiveKind(*this);
OpenMPDirectiveKind CancelRegion = OMPD_unknown;
// Name of critical directive.
DeclarationNameInfo DirName;
@@ -935,7 +952,8 @@ StmtResult Parser::ParseOpenMPDeclarativeOrExecutableDirective(
}
case OMPD_declare_reduction:
ConsumeToken();
- if (auto Res = ParseOpenMPDeclareReductionDirective(/*AS=*/AS_none)) {
+ if (DeclGroupPtrTy Res =
+ ParseOpenMPDeclareReductionDirective(/*AS=*/AS_none)) {
// The last seen token is annot_pragma_openmp_end - need to check for
// extra tokens.
if (Tok.isNot(tok::annot_pragma_openmp_end)) {
@@ -946,8 +964,9 @@ StmtResult Parser::ParseOpenMPDeclarativeOrExecutableDirective(
}
ConsumeAnyToken();
Directive = Actions.ActOnDeclStmt(Res, Loc, Tok.getLocation());
- } else
+ } else {
SkipUntil(tok::annot_pragma_openmp_end);
+ }
break;
case OMPD_flush:
if (PP.LookAhead(0).is(tok::l_paren)) {
@@ -1026,7 +1045,7 @@ StmtResult Parser::ParseOpenMPDeclarativeOrExecutableDirective(
T.consumeClose();
}
} else if (DKind == OMPD_cancellation_point || DKind == OMPD_cancel) {
- CancelRegion = ParseOpenMPDirectiveKind(*this);
+ CancelRegion = parseOpenMPDirectiveKind(*this);
if (Tok.isNot(tok::annot_pragma_openmp_end))
ConsumeToken();
}
@@ -1079,21 +1098,18 @@ StmtResult Parser::ParseOpenMPDeclarativeOrExecutableDirective(
StmtResult AssociatedStmt;
if (HasAssociatedStatement) {
// The body is a block scope like in Lambdas and Blocks.
- Sema::CompoundScopeRAII CompoundScope(Actions);
Actions.ActOnOpenMPRegionStart(DKind, getCurScope());
- Actions.ActOnStartOfCompoundStmt();
- // Parse statement
- AssociatedStmt = ParseStatement();
- Actions.ActOnFinishOfCompoundStmt();
+ // FIXME: We create a bogus CompoundStmt scope to hold the contents of
+ // the captured region. Code elsewhere assumes that any FunctionScopeInfo
+ // should have at least one compound statement scope within it.
+ AssociatedStmt = (Sema::CompoundScopeRAII(Actions), ParseStatement());
AssociatedStmt = Actions.ActOnOpenMPRegionEnd(AssociatedStmt, Clauses);
} else if (DKind == OMPD_target_update || DKind == OMPD_target_enter_data ||
DKind == OMPD_target_exit_data) {
- Sema::CompoundScopeRAII CompoundScope(Actions);
Actions.ActOnOpenMPRegionStart(DKind, getCurScope());
- Actions.ActOnStartOfCompoundStmt();
- AssociatedStmt =
- Actions.ActOnCompoundStmt(Loc, Loc, llvm::None, /*isStmtExpr=*/false);
- Actions.ActOnFinishOfCompoundStmt();
+ AssociatedStmt = (Sema::CompoundScopeRAII(Actions),
+ Actions.ActOnCompoundStmt(Loc, Loc, llvm::None,
+ /*isStmtExpr=*/false));
AssociatedStmt = Actions.ActOnOpenMPRegionEnd(AssociatedStmt, Clauses);
}
Directive = Actions.ActOnOpenMPExecutableDirective(
@@ -1109,7 +1125,7 @@ StmtResult Parser::ParseOpenMPDeclarativeOrExecutableDirective(
case OMPD_declare_target:
case OMPD_end_declare_target:
Diag(Tok, diag::err_omp_unexpected_directive)
- << getOpenMPDirectiveName(DKind);
+ << 1 << getOpenMPDirectiveName(DKind);
SkipUntil(tok::annot_pragma_openmp_end);
break;
case OMPD_unknown:
@@ -1140,7 +1156,6 @@ bool Parser::ParseOpenMPSimpleVarList(
// Read tokens while ')' or annot_pragma_openmp_end is not found.
while (Tok.isNot(tok::r_paren) && Tok.isNot(tok::annot_pragma_openmp_end)) {
CXXScopeSpec SS;
- SourceLocation TemplateKWLoc;
UnqualifiedId Name;
// Read var name.
Token PrevTok = Tok;
@@ -1152,7 +1167,7 @@ bool Parser::ParseOpenMPSimpleVarList(
SkipUntil(tok::comma, tok::r_paren, tok::annot_pragma_openmp_end,
StopBeforeMatch);
} else if (ParseUnqualifiedId(SS, false, false, false, false, nullptr,
- TemplateKWLoc, Name)) {
+ nullptr, Name)) {
IsCorrect = false;
SkipUntil(tok::comma, tok::r_paren, tok::annot_pragma_openmp_end,
StopBeforeMatch);
@@ -1184,7 +1199,7 @@ bool Parser::ParseOpenMPSimpleVarList(
return !IsCorrect;
}
-/// \brief Parsing of OpenMP clauses.
+/// Parsing of OpenMP clauses.
///
/// clause:
/// if-clause | final-clause | num_threads-clause | safelen-clause |
@@ -1204,11 +1219,13 @@ OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind CKind, bool FirstClause) {
OMPClause *Clause = nullptr;
bool ErrorFound = false;
+ bool WrongDirective = false;
// Check if clause is allowed for the given directive.
if (CKind != OMPC_unknown && !isAllowedClauseForDirective(DKind, CKind)) {
Diag(Tok, diag::err_omp_unexpected_clause) << getOpenMPClauseName(CKind)
<< getOpenMPDirectiveName(DKind);
ErrorFound = true;
+ WrongDirective = true;
}
switch (CKind) {
@@ -1252,9 +1269,9 @@ OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind,
}
if (CKind == OMPC_ordered && PP.LookAhead(/*N=*/0).isNot(tok::l_paren))
- Clause = ParseOpenMPClause(CKind);
+ Clause = ParseOpenMPClause(CKind, WrongDirective);
else
- Clause = ParseOpenMPSingleExprClause(CKind);
+ Clause = ParseOpenMPSingleExprClause(CKind, WrongDirective);
break;
case OMPC_default:
case OMPC_proc_bind:
@@ -1269,7 +1286,7 @@ OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind,
ErrorFound = true;
}
- Clause = ParseOpenMPSimpleClause(CKind);
+ Clause = ParseOpenMPSimpleClause(CKind, WrongDirective);
break;
case OMPC_schedule:
case OMPC_dist_schedule:
@@ -1286,7 +1303,7 @@ OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind,
LLVM_FALLTHROUGH;
case OMPC_if:
- Clause = ParseOpenMPSingleExprWithArgClause(CKind);
+ Clause = ParseOpenMPSingleExprWithArgClause(CKind, WrongDirective);
break;
case OMPC_nowait:
case OMPC_untied:
@@ -1309,7 +1326,7 @@ OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind,
ErrorFound = true;
}
- Clause = ParseOpenMPClause(CKind);
+ Clause = ParseOpenMPClause(CKind, WrongDirective);
break;
case OMPC_private:
case OMPC_firstprivate:
@@ -1329,7 +1346,7 @@ OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind,
case OMPC_from:
case OMPC_use_device_ptr:
case OMPC_is_device_ptr:
- Clause = ParseOpenMPVarListClause(DKind, CKind);
+ Clause = ParseOpenMPVarListClause(DKind, CKind, WrongDirective);
break;
case OMPC_unknown:
Diag(Tok, diag::warn_omp_extra_tokens_at_eol)
@@ -1338,8 +1355,9 @@ OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind,
break;
case OMPC_threadprivate:
case OMPC_uniform:
- Diag(Tok, diag::err_omp_unexpected_clause) << getOpenMPClauseName(CKind)
- << getOpenMPDirectiveName(DKind);
+ if (!WrongDirective)
+ Diag(Tok, diag::err_omp_unexpected_clause)
+ << getOpenMPClauseName(CKind) << getOpenMPDirectiveName(DKind);
SkipUntil(tok::comma, tok::annot_pragma_openmp_end, StopBeforeMatch);
break;
}
@@ -1362,13 +1380,14 @@ ExprResult Parser::ParseOpenMPParensExpr(StringRef ClauseName,
Val = Actions.ActOnFinishFullExpr(Val.get(), ELoc);
// Parse ')'.
- T.consumeClose();
+ RLoc = Tok.getLocation();
+ if (!T.consumeClose())
+ RLoc = T.getCloseLocation();
- RLoc = T.getCloseLocation();
return Val;
}
-/// \brief Parsing of OpenMP clauses with single expressions like 'final',
+/// Parsing of OpenMP clauses with single expressions like 'final',
/// 'collapse', 'safelen', 'num_threads', 'simdlen', 'num_teams',
/// 'thread_limit', 'simdlen', 'priority', 'grainsize', 'num_tasks' or 'hint'.
///
@@ -1399,7 +1418,8 @@ ExprResult Parser::ParseOpenMPParensExpr(StringRef ClauseName,
/// hint-clause:
/// 'hint' '(' expression ')'
///
-OMPClause *Parser::ParseOpenMPSingleExprClause(OpenMPClauseKind Kind) {
+OMPClause *Parser::ParseOpenMPSingleExprClause(OpenMPClauseKind Kind,
+ bool ParseOnly) {
SourceLocation Loc = ConsumeToken();
SourceLocation LLoc = Tok.getLocation();
SourceLocation RLoc;
@@ -1409,10 +1429,12 @@ OMPClause *Parser::ParseOpenMPSingleExprClause(OpenMPClauseKind Kind) {
if (Val.isInvalid())
return nullptr;
+ if (ParseOnly)
+ return nullptr;
return Actions.ActOnOpenMPSingleExprClause(Kind, Val.get(), Loc, LLoc, RLoc);
}
-/// \brief Parsing of simple OpenMP clauses like 'default' or 'proc_bind'.
+/// Parsing of simple OpenMP clauses like 'default' or 'proc_bind'.
///
/// default-clause:
/// 'default' '(' 'none' | 'shared' ')
@@ -1420,7 +1442,8 @@ OMPClause *Parser::ParseOpenMPSingleExprClause(OpenMPClauseKind Kind) {
/// proc_bind-clause:
/// 'proc_bind' '(' 'master' | 'close' | 'spread' ')
///
-OMPClause *Parser::ParseOpenMPSimpleClause(OpenMPClauseKind Kind) {
+OMPClause *Parser::ParseOpenMPSimpleClause(OpenMPClauseKind Kind,
+ bool ParseOnly) {
SourceLocation Loc = Tok.getLocation();
SourceLocation LOpen = ConsumeToken();
// Parse '('.
@@ -1437,13 +1460,16 @@ OMPClause *Parser::ParseOpenMPSimpleClause(OpenMPClauseKind Kind) {
ConsumeAnyToken();
// Parse ')'.
- T.consumeClose();
+ SourceLocation RLoc = Tok.getLocation();
+ if (!T.consumeClose())
+ RLoc = T.getCloseLocation();
- return Actions.ActOnOpenMPSimpleClause(Kind, Type, TypeLoc, LOpen, Loc,
- Tok.getLocation());
+ if (ParseOnly)
+ return nullptr;
+ return Actions.ActOnOpenMPSimpleClause(Kind, Type, TypeLoc, LOpen, Loc, RLoc);
}
-/// \brief Parsing of OpenMP clauses like 'ordered'.
+/// Parsing of OpenMP clauses like 'ordered'.
///
/// ordered-clause:
/// 'ordered'
@@ -1469,15 +1495,17 @@ OMPClause *Parser::ParseOpenMPSimpleClause(OpenMPClauseKind Kind) {
/// nogroup-clause:
/// 'nogroup'
///
-OMPClause *Parser::ParseOpenMPClause(OpenMPClauseKind Kind) {
+OMPClause *Parser::ParseOpenMPClause(OpenMPClauseKind Kind, bool ParseOnly) {
SourceLocation Loc = Tok.getLocation();
ConsumeAnyToken();
+ if (ParseOnly)
+ return nullptr;
return Actions.ActOnOpenMPClause(Kind, Loc, Tok.getLocation());
}
-/// \brief Parsing of OpenMP clauses with single expressions and some additional
+/// Parsing of OpenMP clauses with single expressions and some additional
/// argument like 'schedule' or 'dist_schedule'.
///
/// schedule-clause:
@@ -1490,7 +1518,8 @@ OMPClause *Parser::ParseOpenMPClause(OpenMPClauseKind Kind) {
/// defaultmap:
/// 'defaultmap' '(' modifier ':' kind ')'
///
-OMPClause *Parser::ParseOpenMPSingleExprWithArgClause(OpenMPClauseKind Kind) {
+OMPClause *Parser::ParseOpenMPSingleExprWithArgClause(OpenMPClauseKind Kind,
+ bool ParseOnly) {
SourceLocation Loc = ConsumeToken();
SourceLocation DelimLoc;
// Parse '('.
@@ -1509,7 +1538,7 @@ OMPClause *Parser::ParseOpenMPSingleExprWithArgClause(OpenMPClauseKind Kind) {
Arg[Modifier1] = OMPC_SCHEDULE_MODIFIER_unknown;
Arg[Modifier2] = OMPC_SCHEDULE_MODIFIER_unknown;
Arg[ScheduleKind] = OMPC_SCHEDULE_unknown;
- auto KindModifier = getOpenMPSimpleClauseType(
+ unsigned KindModifier = getOpenMPSimpleClauseType(
Kind, Tok.isAnnotation() ? "" : PP.getSpelling(Tok));
if (KindModifier > OMPC_SCHEDULE_unknown) {
// Parse 'modifier'
@@ -1582,7 +1611,7 @@ OMPClause *Parser::ParseOpenMPSingleExprWithArgClause(OpenMPClauseKind Kind) {
assert(Kind == OMPC_if);
KLoc.push_back(Tok.getLocation());
TentativeParsingAction TPA(*this);
- Arg.push_back(ParseOpenMPDirectiveKind(*this));
+ Arg.push_back(parseOpenMPDirectiveKind(*this));
if (Arg.back() != OMPD_unknown) {
ConsumeToken();
if (Tok.is(tok::colon) && getLangOpts().OpenMP > 40) {
@@ -1592,8 +1621,9 @@ OMPClause *Parser::ParseOpenMPSingleExprWithArgClause(OpenMPClauseKind Kind) {
TPA.Revert();
Arg.back() = OMPD_unknown;
}
- } else
+ } else {
TPA.Revert();
+ }
}
bool NeedAnExpression = (Kind == OMPC_schedule && DelimLoc.isValid()) ||
@@ -1607,19 +1637,21 @@ OMPClause *Parser::ParseOpenMPSingleExprWithArgClause(OpenMPClauseKind Kind) {
}
// Parse ')'.
- T.consumeClose();
+ SourceLocation RLoc = Tok.getLocation();
+ if (!T.consumeClose())
+ RLoc = T.getCloseLocation();
if (NeedAnExpression && Val.isInvalid())
return nullptr;
+ if (ParseOnly)
+ return nullptr;
return Actions.ActOnOpenMPSingleExprWithArgClause(
- Kind, Arg, Val.get(), Loc, T.getOpenLocation(), KLoc, DelimLoc,
- T.getCloseLocation());
+ Kind, Arg, Val.get(), Loc, T.getOpenLocation(), KLoc, DelimLoc, RLoc);
}
static bool ParseReductionId(Parser &P, CXXScopeSpec &ReductionIdScopeSpec,
UnqualifiedId &ReductionId) {
- SourceLocation TemplateKWLoc;
if (ReductionIdScopeSpec.isEmpty()) {
auto OOK = OO_None;
switch (P.getCurToken().getKind()) {
@@ -1661,7 +1693,7 @@ static bool ParseReductionId(Parser &P, CXXScopeSpec &ReductionIdScopeSpec,
/*AllowDestructorName*/ false,
/*AllowConstructorName*/ false,
/*AllowDeductionGuide*/ false,
- nullptr, TemplateKWLoc, ReductionId);
+ nullptr, nullptr, ReductionId);
}
/// Parses clauses with list.
@@ -1723,9 +1755,9 @@ bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
return false;
}
}
- if (Tok.is(tok::colon))
+ if (Tok.is(tok::colon)) {
Data.ColonLoc = ConsumeToken();
- else {
+ } else {
Diag(Tok, DKind == OMPD_ordered ? diag::warn_pragma_expected_colon_r_paren
: diag::warn_pragma_expected_colon)
<< "dependency type";
@@ -1774,8 +1806,9 @@ bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
if (Data.MapTypeModifier != OMPC_MAP_always) {
Diag(Tok, diag::err_omp_unknown_map_type_modifier);
Data.MapTypeModifier = OMPC_MAP_unknown;
- } else
+ } else {
MapTypeModifierSpecified = true;
+ }
ConsumeToken();
ConsumeToken();
@@ -1799,8 +1832,9 @@ bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
if (Data.MapTypeModifier != OMPC_MAP_always) {
Diag(Tok, diag::err_omp_unknown_map_type_modifier);
Data.MapTypeModifier = OMPC_MAP_unknown;
- } else
+ } else {
MapTypeModifierSpecified = true;
+ }
ConsumeToken();
@@ -1847,9 +1881,9 @@ bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
// Parse variable
ExprResult VarExpr =
Actions.CorrectDelayedTyposInExpr(ParseAssignmentExpression());
- if (VarExpr.isUsable())
+ if (VarExpr.isUsable()) {
Vars.push_back(VarExpr.get());
- else {
+ } else {
SkipUntil(tok::comma, tok::r_paren, tok::annot_pragma_openmp_end,
StopBeforeMatch);
}
@@ -1885,16 +1919,16 @@ bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
}
// Parse ')'.
- T.consumeClose();
- if ((Kind == OMPC_depend && Data.DepKind != OMPC_DEPEND_unknown &&
- Vars.empty()) ||
- (Kind != OMPC_depend && Kind != OMPC_map && Vars.empty()) ||
- (MustHaveTail && !Data.TailExpr) || InvalidReductionId)
- return true;
- return false;
+ Data.RLoc = Tok.getLocation();
+ if (!T.consumeClose())
+ Data.RLoc = T.getCloseLocation();
+ return (Kind == OMPC_depend && Data.DepKind != OMPC_DEPEND_unknown &&
+ Vars.empty()) ||
+ (Kind != OMPC_depend && Kind != OMPC_map && Vars.empty()) ||
+ (MustHaveTail && !Data.TailExpr) || InvalidReductionId;
}
-/// \brief Parsing of OpenMP clause 'private', 'firstprivate', 'lastprivate',
+/// Parsing of OpenMP clause 'private', 'firstprivate', 'lastprivate',
/// 'shared', 'copyin', 'copyprivate', 'flush', 'reduction', 'task_reduction' or
/// 'in_reduction'.
///
@@ -1939,7 +1973,8 @@ bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
/// modifier(list)
/// where modifier is 'val' (C) or 'ref', 'val' or 'uval'(C++).
OMPClause *Parser::ParseOpenMPVarListClause(OpenMPDirectiveKind DKind,
- OpenMPClauseKind Kind) {
+ OpenMPClauseKind Kind,
+ bool ParseOnly) {
SourceLocation Loc = Tok.getLocation();
SourceLocation LOpen = ConsumeToken();
SmallVector<Expr *, 4> Vars;
@@ -1948,8 +1983,10 @@ OMPClause *Parser::ParseOpenMPVarListClause(OpenMPDirectiveKind DKind,
if (ParseOpenMPVarList(DKind, Kind, Vars, Data))
return nullptr;
+ if (ParseOnly)
+ return nullptr;
return Actions.ActOnOpenMPVarListClause(
- Kind, Vars, Data.TailExpr, Loc, LOpen, Data.ColonLoc, Tok.getLocation(),
+ Kind, Vars, Data.TailExpr, Loc, LOpen, Data.ColonLoc, Data.RLoc,
Data.ReductionIdScopeSpec, Data.ReductionId, Data.DepKind, Data.LinKind,
Data.MapTypeModifier, Data.MapType, Data.IsMapTypeImplicit,
Data.DepLinMapLoc);
diff --git a/lib/Parse/ParsePragma.cpp b/lib/Parse/ParsePragma.cpp
index 198d5c6e9cb0..9a25f9c25c03 100644
--- a/lib/Parse/ParsePragma.cpp
+++ b/lib/Parse/ParsePragma.cpp
@@ -95,6 +95,44 @@ struct PragmaFPContractHandler : public PragmaHandler {
Token &FirstToken) override;
};
+// Pragma STDC implementations.
+
+/// PragmaSTDC_FENV_ACCESSHandler - "\#pragma STDC FENV_ACCESS ...".
+struct PragmaSTDC_FENV_ACCESSHandler : public PragmaHandler {
+ PragmaSTDC_FENV_ACCESSHandler() : PragmaHandler("FENV_ACCESS") {}
+
+ void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &Tok) override {
+ tok::OnOffSwitch OOS;
+ if (PP.LexOnOffSwitch(OOS))
+ return;
+ if (OOS == tok::OOS_ON)
+ PP.Diag(Tok, diag::warn_stdc_fenv_access_not_supported);
+ }
+};
+
+/// PragmaSTDC_CX_LIMITED_RANGEHandler - "\#pragma STDC CX_LIMITED_RANGE ...".
+struct PragmaSTDC_CX_LIMITED_RANGEHandler : public PragmaHandler {
+ PragmaSTDC_CX_LIMITED_RANGEHandler() : PragmaHandler("CX_LIMITED_RANGE") {}
+
+ void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &Tok) override {
+ tok::OnOffSwitch OOS;
+ PP.LexOnOffSwitch(OOS);
+ }
+};
+
+/// PragmaSTDC_UnknownHandler - "\#pragma STDC ...".
+struct PragmaSTDC_UnknownHandler : public PragmaHandler {
+ PragmaSTDC_UnknownHandler() = default;
+
+ void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &UnknownTok) override {
+ // C99 6.10.6p2, unknown forms are not allowed.
+ PP.Diag(UnknownTok, diag::ext_stdc_pragma_ignored);
+ }
+};
+
struct PragmaFPHandler : public PragmaHandler {
PragmaFPHandler() : PragmaHandler("fp") {}
void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
@@ -182,6 +220,12 @@ struct PragmaMSIntrinsicHandler : public PragmaHandler {
Token &FirstToken) override;
};
+struct PragmaMSOptimizeHandler : public PragmaHandler {
+ PragmaMSOptimizeHandler() : PragmaHandler("optimize") {}
+ void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &FirstToken) override;
+};
+
struct PragmaForceCUDAHostDeviceHandler : public PragmaHandler {
PragmaForceCUDAHostDeviceHandler(Sema &Actions)
: PragmaHandler("force_cuda_host_device"), Actions(Actions) {}
@@ -233,6 +277,15 @@ void Parser::initializePragmaHandlers() {
FPContractHandler.reset(new PragmaFPContractHandler());
PP.AddPragmaHandler("STDC", FPContractHandler.get());
+ STDCFENVHandler.reset(new PragmaSTDC_FENV_ACCESSHandler());
+ PP.AddPragmaHandler("STDC", STDCFENVHandler.get());
+
+ STDCCXLIMITHandler.reset(new PragmaSTDC_CX_LIMITED_RANGEHandler());
+ PP.AddPragmaHandler("STDC", STDCCXLIMITHandler.get());
+
+ STDCUnknownHandler.reset(new PragmaSTDC_UnknownHandler());
+ PP.AddPragmaHandler("STDC", STDCUnknownHandler.get());
+
PCSectionHandler.reset(new PragmaClangSectionHandler(Actions));
PP.AddPragmaHandler("clang", PCSectionHandler.get());
@@ -248,7 +301,8 @@ void Parser::initializePragmaHandlers() {
OpenMPHandler.reset(new PragmaNoOpenMPHandler());
PP.AddPragmaHandler(OpenMPHandler.get());
- if (getLangOpts().MicrosoftExt || getTargetInfo().getTriple().isPS4()) {
+ if (getLangOpts().MicrosoftExt ||
+ getTargetInfo().getTriple().isOSBinFormatELF()) {
MSCommentHandler.reset(new PragmaCommentHandler(Actions));
PP.AddPragmaHandler(MSCommentHandler.get());
}
@@ -276,6 +330,8 @@ void Parser::initializePragmaHandlers() {
PP.AddPragmaHandler(MSRuntimeChecks.get());
MSIntrinsic.reset(new PragmaMSIntrinsicHandler());
PP.AddPragmaHandler(MSIntrinsic.get());
+ MSOptimize.reset(new PragmaMSOptimizeHandler());
+ PP.AddPragmaHandler(MSOptimize.get());
}
if (getLangOpts().CUDA) {
@@ -330,7 +386,8 @@ void Parser::resetPragmaHandlers() {
PP.RemovePragmaHandler(OpenMPHandler.get());
OpenMPHandler.reset();
- if (getLangOpts().MicrosoftExt || getTargetInfo().getTriple().isPS4()) {
+ if (getLangOpts().MicrosoftExt ||
+ getTargetInfo().getTriple().isOSBinFormatELF()) {
PP.RemovePragmaHandler(MSCommentHandler.get());
MSCommentHandler.reset();
}
@@ -361,6 +418,8 @@ void Parser::resetPragmaHandlers() {
MSRuntimeChecks.reset();
PP.RemovePragmaHandler(MSIntrinsic.get());
MSIntrinsic.reset();
+ PP.RemovePragmaHandler(MSOptimize.get());
+ MSOptimize.reset();
}
if (getLangOpts().CUDA) {
@@ -371,6 +430,15 @@ void Parser::resetPragmaHandlers() {
PP.RemovePragmaHandler("STDC", FPContractHandler.get());
FPContractHandler.reset();
+ PP.RemovePragmaHandler("STDC", STDCFENVHandler.get());
+ STDCFENVHandler.reset();
+
+ PP.RemovePragmaHandler("STDC", STDCCXLIMITHandler.get());
+ STDCCXLIMITHandler.reset();
+
+ PP.RemovePragmaHandler("STDC", STDCUnknownHandler.get());
+ STDCUnknownHandler.reset();
+
PP.RemovePragmaHandler("clang", OptimizeHandler.get());
OptimizeHandler.reset();
@@ -390,7 +458,7 @@ void Parser::resetPragmaHandlers() {
AttributePragmaHandler.reset();
}
-/// \brief Handle the annotation token produced for #pragma unused(...)
+/// Handle the annotation token produced for #pragma unused(...)
///
/// Each annot_pragma_unused is followed by the argument token so e.g.
/// "#pragma unused(x,y)" becomes:
@@ -451,8 +519,10 @@ void Parser::HandlePragmaAlign() {
Sema::PragmaOptionsAlignKind Kind =
static_cast<Sema::PragmaOptionsAlignKind>(
reinterpret_cast<uintptr_t>(Tok.getAnnotationValue()));
- SourceLocation PragmaLoc = ConsumeAnnotationToken();
- Actions.ActOnPragmaOptionsAlign(Kind, PragmaLoc);
+ Actions.ActOnPragmaOptionsAlign(Kind, Tok.getLocation());
+ // Consume the token after processing the pragma to enable pragma-specific
+ // #include warnings.
+ ConsumeAnnotationToken();
}
void Parser::HandlePragmaDump() {
@@ -1175,7 +1245,7 @@ bool Parser::ParsePragmaAttributeSubjectMatchRuleSet(
namespace {
-/// Describes the stage at which attribute subject rule parsing was interruped.
+/// Describes the stage at which attribute subject rule parsing was interrupted.
enum class MissingAttributeSubjectRulesRecoveryPoint {
Comma,
ApplyTo,
@@ -1201,7 +1271,7 @@ getAttributeSubjectRulesRecoveryPointForToken(const Token &Tok) {
/// suggests the possible attribute subject rules in a fix-it together with
/// any other missing tokens.
DiagnosticBuilder createExpectedAttributeSubjectRulesTokenDiagnostic(
- unsigned DiagID, AttributeList &Attribute,
+ unsigned DiagID, ParsedAttr &Attribute,
MissingAttributeSubjectRulesRecoveryPoint Point, Parser &PRef) {
SourceLocation Loc = PRef.getEndOfPreviousToken();
if (Loc.isInvalid())
@@ -1301,12 +1371,11 @@ void Parser::HandlePragmaAttribute() {
if (Tok.isNot(tok::l_paren))
Attrs.addNew(AttrName, AttrNameLoc, nullptr, AttrNameLoc, nullptr, 0,
- AttributeList::AS_GNU);
+ ParsedAttr::AS_GNU);
else
ParseGNUAttributeArgs(AttrName, AttrNameLoc, Attrs, /*EndLoc=*/nullptr,
/*ScopeName=*/nullptr,
- /*ScopeLoc=*/SourceLocation(),
- AttributeList::AS_GNU,
+ /*ScopeLoc=*/SourceLocation(), ParsedAttr::AS_GNU,
/*Declarator=*/nullptr);
if (ExpectAndConsume(tok::r_paren))
@@ -1320,9 +1389,9 @@ void Parser::HandlePragmaAttribute() {
if (Tok.getIdentifierInfo()) {
// If we suspect that this is an attribute suggest the use of
// '__attribute__'.
- if (AttributeList::getKind(Tok.getIdentifierInfo(), /*ScopeName=*/nullptr,
- AttributeList::AS_GNU) !=
- AttributeList::UnknownAttribute) {
+ if (ParsedAttr::getKind(Tok.getIdentifierInfo(), /*ScopeName=*/nullptr,
+ ParsedAttr::AS_GNU) !=
+ ParsedAttr::UnknownAttribute) {
SourceLocation InsertStartLoc = Tok.getLocation();
ConsumeToken();
if (Tok.is(tok::l_paren)) {
@@ -1340,26 +1409,26 @@ void Parser::HandlePragmaAttribute() {
return;
}
- if (!Attrs.getList() || Attrs.getList()->isInvalid()) {
+ if (Attrs.empty() || Attrs.begin()->isInvalid()) {
SkipToEnd();
return;
}
// Ensure that we don't have more than one attribute.
- if (Attrs.getList()->getNext()) {
- SourceLocation Loc = Attrs.getList()->getNext()->getLoc();
+ if (Attrs.size() > 1) {
+ SourceLocation Loc = Attrs[1].getLoc();
Diag(Loc, diag::err_pragma_attribute_multiple_attributes);
SkipToEnd();
return;
}
- if (!Attrs.getList()->isSupportedByPragmaAttribute()) {
+ ParsedAttr &Attribute = *Attrs.begin();
+ if (!Attribute.isSupportedByPragmaAttribute()) {
Diag(PragmaLoc, diag::err_pragma_attribute_unsupported_attribute)
- << Attrs.getList()->getName();
+ << Attribute.getName();
SkipToEnd();
return;
}
- AttributeList &Attribute = *Attrs.getList();
// Parse the subject-list.
if (!TryConsumeToken(tok::comma)) {
@@ -2030,7 +2099,7 @@ PragmaOpenCLExtensionHandler::HandlePragma(Preprocessor &PP,
StateLoc, State);
}
-/// \brief Handle '#pragma omp ...' when OpenMP is disabled.
+/// Handle '#pragma omp ...' when OpenMP is disabled.
///
void
PragmaNoOpenMPHandler::HandlePragma(Preprocessor &PP,
@@ -2045,7 +2114,7 @@ PragmaNoOpenMPHandler::HandlePragma(Preprocessor &PP,
PP.DiscardUntilEndOfDirective();
}
-/// \brief Handle '#pragma omp ...' when OpenMP is enabled.
+/// Handle '#pragma omp ...' when OpenMP is enabled.
///
void
PragmaOpenMPHandler::HandlePragma(Preprocessor &PP,
@@ -2057,9 +2126,21 @@ PragmaOpenMPHandler::HandlePragma(Preprocessor &PP,
Tok.setKind(tok::annot_pragma_openmp);
Tok.setLocation(FirstTok.getLocation());
- while (Tok.isNot(tok::eod)) {
+ while (Tok.isNot(tok::eod) && Tok.isNot(tok::eof)) {
Pragma.push_back(Tok);
PP.Lex(Tok);
+ if (Tok.is(tok::annot_pragma_openmp)) {
+ PP.Diag(Tok, diag::err_omp_unexpected_directive) << 0;
+ unsigned InnerPragmaCnt = 1;
+ while (InnerPragmaCnt != 0) {
+ PP.Lex(Tok);
+ if (Tok.is(tok::annot_pragma_openmp))
+ ++InnerPragmaCnt;
+ else if (Tok.is(tok::annot_pragma_openmp_end))
+ --InnerPragmaCnt;
+ }
+ PP.Lex(Tok);
+ }
}
SourceLocation EodLoc = Tok.getLocation();
Tok.startToken();
@@ -2073,7 +2154,7 @@ PragmaOpenMPHandler::HandlePragma(Preprocessor &PP,
/*DisableMacroExpansion=*/false);
}
-/// \brief Handle '#pragma pointers_to_members'
+/// Handle '#pragma pointers_to_members'
// The grammar for this pragma is as follows:
//
// <inheritance model> ::= ('single' | 'multiple' | 'virtual') '_inheritance'
@@ -2171,7 +2252,7 @@ void PragmaMSPointersToMembers::HandlePragma(Preprocessor &PP,
PP.EnterToken(AnnotTok);
}
-/// \brief Handle '#pragma vtordisp'
+/// Handle '#pragma vtordisp'
// The grammar for this pragma is as follows:
//
// <vtordisp-mode> ::= ('off' | 'on' | '0' | '1' | '2' )
@@ -2264,7 +2345,7 @@ void PragmaMSVtorDisp::HandlePragma(Preprocessor &PP,
PP.EnterToken(AnnotTok);
}
-/// \brief Handle all MS pragmas. Simply forwards the tokens after inserting
+/// Handle all MS pragmas. Simply forwards the tokens after inserting
/// an annotation token.
void PragmaMSPragma::HandlePragma(Preprocessor &PP,
PragmaIntroducerKind Introducer,
@@ -2282,7 +2363,7 @@ void PragmaMSPragma::HandlePragma(Preprocessor &PP,
TokenVector.push_back(Tok);
AnnotTok.setAnnotationEndLoc(Tok.getLocation());
}
- // Add a sentinal EoF token to the end of the list.
+ // Add a sentinel EoF token to the end of the list.
TokenVector.push_back(EoF);
// We must allocate this array with new because EnterTokenStream is going to
// delete it later.
@@ -2295,7 +2376,7 @@ void PragmaMSPragma::HandlePragma(Preprocessor &PP,
PP.EnterToken(AnnotTok);
}
-/// \brief Handle the Microsoft \#pragma detect_mismatch extension.
+/// Handle the Microsoft \#pragma detect_mismatch extension.
///
/// The syntax is:
/// \code
@@ -2352,7 +2433,7 @@ void PragmaDetectMismatchHandler::HandlePragma(Preprocessor &PP,
Actions.ActOnPragmaDetectMismatch(DetectMismatchLoc, NameString, ValueString);
}
-/// \brief Handle the microsoft \#pragma comment extension.
+/// Handle the microsoft \#pragma comment extension.
///
/// The syntax is:
/// \code
@@ -2393,6 +2474,12 @@ void PragmaCommentHandler::HandlePragma(Preprocessor &PP,
return;
}
+ if (PP.getTargetInfo().getTriple().isOSBinFormatELF() && Kind != PCK_Lib) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_comment_ignored)
+ << II->getName();
+ return;
+ }
+
// On PS4, issue a warning about any pragma comments other than
// #pragma comment lib.
if (PP.getTargetInfo().getTriple().isPS4() && Kind != PCK_Lib) {
@@ -2595,7 +2682,7 @@ void Parser::HandlePragmaFP() {
ConsumeAnnotationToken();
}
-/// \brief Parses loop or unroll pragma hint value and fills in Info.
+/// Parses loop or unroll pragma hint value and fills in Info.
static bool ParseLoopHintValue(Preprocessor &PP, Token &Tok, Token PragmaName,
Token Option, bool ValueInParens,
PragmaLoopHintInfo &Info) {
@@ -2637,7 +2724,7 @@ static bool ParseLoopHintValue(Preprocessor &PP, Token &Tok, Token PragmaName,
return false;
}
-/// \brief Handle the \#pragma clang loop directive.
+/// Handle the \#pragma clang loop directive.
/// #pragma clang 'loop' loop-hints
///
/// loop-hints:
@@ -2752,7 +2839,7 @@ void PragmaLoopHintHandler::HandlePragma(Preprocessor &PP,
/*DisableMacroExpansion=*/false);
}
-/// \brief Handle the loop unroll optimization pragmas.
+/// Handle the loop unroll optimization pragmas.
/// #pragma unroll
/// #pragma unroll unroll-hint-value
/// #pragma unroll '(' unroll-hint-value ')'
@@ -2822,7 +2909,7 @@ void PragmaUnrollHintHandler::HandlePragma(Preprocessor &PP,
/*DisableMacroExpansion=*/false);
}
-/// \brief Handle the Microsoft \#pragma intrinsic extension.
+/// Handle the Microsoft \#pragma intrinsic extension.
///
/// The syntax is:
/// \code
@@ -2871,6 +2958,61 @@ void PragmaMSIntrinsicHandler::HandlePragma(Preprocessor &PP,
PP.Diag(Tok.getLocation(), diag::warn_pragma_extra_tokens_at_eol)
<< "intrinsic";
}
+
+// #pragma optimize("gsty", on|off)
+void PragmaMSOptimizeHandler::HandlePragma(Preprocessor &PP,
+ PragmaIntroducerKind Introducer,
+ Token &Tok) {
+ SourceLocation StartLoc = Tok.getLocation();
+ PP.Lex(Tok);
+
+ if (Tok.isNot(tok::l_paren)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_lparen) << "optimize";
+ return;
+ }
+ PP.Lex(Tok);
+
+ if (Tok.isNot(tok::string_literal)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_string) << "optimize";
+ return;
+ }
+ // We could syntax check the string but it's probably not worth the effort.
+ PP.Lex(Tok);
+
+ if (Tok.isNot(tok::comma)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_comma) << "optimize";
+ return;
+ }
+ PP.Lex(Tok);
+
+ if (Tok.is(tok::eod) || Tok.is(tok::r_paren)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_missing_argument)
+ << "optimize" << /*Expected=*/true << "'on' or 'off'";
+ return;
+ }
+ IdentifierInfo *II = Tok.getIdentifierInfo();
+ if (!II || (!II->isStr("on") && !II->isStr("off"))) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_invalid_argument)
+ << PP.getSpelling(Tok) << "optimize" << /*Expected=*/true
+ << "'on' or 'off'";
+ return;
+ }
+ PP.Lex(Tok);
+
+ if (Tok.isNot(tok::r_paren)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_rparen) << "optimize";
+ return;
+ }
+ PP.Lex(Tok);
+
+ if (Tok.isNot(tok::eod)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_extra_tokens_at_eol)
+ << "optimize";
+ return;
+ }
+ PP.Diag(StartLoc, diag::warn_pragma_optimize);
+}
+
void PragmaForceCUDAHostDeviceHandler::HandlePragma(
Preprocessor &PP, PragmaIntroducerKind Introducer, Token &Tok) {
Token FirstTok = Tok;
@@ -2895,7 +3037,7 @@ void PragmaForceCUDAHostDeviceHandler::HandlePragma(
diag::warn_pragma_force_cuda_host_device_bad_arg);
}
-/// \brief Handle the #pragma clang attribute directive.
+/// Handle the #pragma clang attribute directive.
///
/// The syntax is:
/// \code
diff --git a/lib/Parse/ParseStmt.cpp b/lib/Parse/ParseStmt.cpp
index 3f25610f4471..deb10af4b17b 100644
--- a/lib/Parse/ParseStmt.cpp
+++ b/lib/Parse/ParseStmt.cpp
@@ -12,13 +12,13 @@
//
//===----------------------------------------------------------------------===//
+#include "clang/AST/PrettyDeclStackTrace.h"
#include "clang/Basic/Attributes.h"
#include "clang/Basic/PrettyStackTrace.h"
#include "clang/Parse/Parser.h"
#include "clang/Parse/RAIIObjectsForParser.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/LoopHint.h"
-#include "clang/Sema/PrettyDeclStackTrace.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/TypoCorrection.h"
using namespace clang;
@@ -27,7 +27,7 @@ using namespace clang;
// C99 6.8: Statements and Blocks.
//===----------------------------------------------------------------------===//
-/// \brief Parse a standalone statement (for instance, as the body of an 'if',
+/// Parse a standalone statement (for instance, as the body of an 'if',
/// 'while', or 'for').
StmtResult Parser::ParseStatement(SourceLocation *TrailingElseLoc,
bool AllowOpenMPStandalone) {
@@ -116,7 +116,7 @@ Parser::ParseStatementOrDeclaration(StmtVector &Stmts,
if (Attrs.empty() || Res.isInvalid())
return Res;
- return Actions.ProcessStmtAttributes(Res.get(), Attrs.getList(), Attrs.Range);
+ return Actions.ProcessStmtAttributes(Res.get(), Attrs, Attrs.Range);
}
namespace {
@@ -211,7 +211,7 @@ Retry:
Allowed == ACK_Any) &&
isDeclarationStatement()) {
SourceLocation DeclStart = Tok.getLocation(), DeclEnd;
- DeclGroupPtrTy Decl = ParseDeclaration(Declarator::BlockContext,
+ DeclGroupPtrTy Decl = ParseDeclaration(DeclaratorContext::BlockContext,
DeclEnd, Attrs);
return Actions.ActOnDeclStmt(Decl, DeclStart, DeclEnd);
}
@@ -402,7 +402,7 @@ Retry:
return Res;
}
-/// \brief Parse an expression statement.
+/// Parse an expression statement.
StmtResult Parser::ParseExprStatement() {
// If a case keyword is missing, this is where it should be inserted.
Token OldToken = Tok;
@@ -610,8 +610,8 @@ StmtResult Parser::ParseLabeledStatement(ParsedAttributesWithRange &attrs) {
Stmts, /*Allowed=*/ACK_StatementsOpenMPNonStandalone, nullptr,
TempAttrs);
if (!TempAttrs.empty() && !SubStmt.isInvalid())
- SubStmt = Actions.ProcessStmtAttributes(
- SubStmt.get(), TempAttrs.getList(), TempAttrs.Range);
+ SubStmt = Actions.ProcessStmtAttributes(SubStmt.get(), TempAttrs,
+ TempAttrs.Range);
} else {
Diag(Tok, diag::err_expected_after) << "__attribute__" << tok::semi;
}
@@ -627,10 +627,8 @@ StmtResult Parser::ParseLabeledStatement(ParsedAttributesWithRange &attrs) {
LabelDecl *LD = Actions.LookupOrCreateLabel(IdentTok.getIdentifierInfo(),
IdentTok.getLocation());
- if (AttributeList *Attrs = attrs.getList()) {
- Actions.ProcessDeclAttributeList(Actions.CurScope, LD, Attrs);
- attrs.clear();
- }
+ Actions.ProcessDeclAttributeList(Actions.CurScope, LD, attrs);
+ attrs.clear();
return Actions.ActOnLabelStmt(IdentTok.getLocation(), LD, ColonLoc,
SubStmt.get());
@@ -687,20 +685,12 @@ StmtResult Parser::ParseCaseStatement(bool MissingCase, ExprResult Expr) {
ExprResult LHS;
if (!MissingCase) {
- LHS = ParseConstantExpression();
- if (!getLangOpts().CPlusPlus11) {
- LHS = Actions.CorrectDelayedTyposInExpr(LHS, [this](class Expr *E) {
- return Actions.VerifyIntegerConstantExpression(E);
- });
- }
+ LHS = ParseCaseExpression(CaseLoc);
if (LHS.isInvalid()) {
// If constant-expression is parsed unsuccessfully, recover by skipping
// current case statement (moving to the colon that ends it).
- if (SkipUntil(tok::colon, tok::r_brace, StopAtSemi | StopBeforeMatch)) {
- TryConsumeToken(tok::colon, ColonLoc);
- continue;
- }
- return StmtError();
+ if (!SkipUntil(tok::colon, tok::r_brace, StopAtSemi | StopBeforeMatch))
+ return StmtError();
}
} else {
LHS = Expr;
@@ -712,13 +702,10 @@ StmtResult Parser::ParseCaseStatement(bool MissingCase, ExprResult Expr) {
ExprResult RHS;
if (TryConsumeToken(tok::ellipsis, DotDotDotLoc)) {
Diag(DotDotDotLoc, diag::ext_gnu_case_range);
- RHS = ParseConstantExpression();
+ RHS = ParseCaseExpression(CaseLoc);
if (RHS.isInvalid()) {
- if (SkipUntil(tok::colon, tok::r_brace, StopAtSemi | StopBeforeMatch)) {
- TryConsumeToken(tok::colon, ColonLoc);
- continue;
- }
- return StmtError();
+ if (!SkipUntil(tok::colon, tok::r_brace, StopAtSemi | StopBeforeMatch))
+ return StmtError();
}
}
@@ -740,8 +727,7 @@ StmtResult Parser::ParseCaseStatement(bool MissingCase, ExprResult Expr) {
}
StmtResult Case =
- Actions.ActOnCaseStmt(CaseLoc, LHS.get(), DotDotDotLoc,
- RHS.get(), ColonLoc);
+ Actions.ActOnCaseStmt(CaseLoc, LHS, DotDotDotLoc, RHS, ColonLoc);
// If we had a sema error parsing this case, then just ignore it and
// continue parsing the sub-stmt.
@@ -954,7 +940,7 @@ StmtResult Parser::ParseCompoundStatementBody(bool isStmtExpr) {
if (T.consumeOpen())
return StmtError();
- Sema::CompoundScopeRAII CompoundScope(Actions);
+ Sema::CompoundScopeRAII CompoundScope(Actions, isStmtExpr);
// Parse any pragmas at the beginning of the compound statement.
ParseCompoundStatementLeadingPragmas();
@@ -1021,8 +1007,8 @@ StmtResult Parser::ParseCompoundStatementBody(bool isStmtExpr) {
ExtensionRAIIObject O(Diags);
SourceLocation DeclStart = Tok.getLocation(), DeclEnd;
- DeclGroupPtrTy Res = ParseDeclaration(Declarator::BlockContext, DeclEnd,
- attrs);
+ DeclGroupPtrTy Res =
+ ParseDeclaration(DeclaratorContext::BlockContext, DeclEnd, attrs);
R = Actions.ActOnDeclStmt(Res, DeclStart, DeclEnd);
} else {
// Otherwise this was a unary __extension__ marker.
@@ -1196,7 +1182,7 @@ StmtResult Parser::ParseIfStatement(SourceLocation *TrailingElseLoc) {
{
EnterExpressionEvaluationContext PotentiallyDiscarded(
Actions, Sema::ExpressionEvaluationContext::DiscardedStatement, nullptr,
- false,
+ Sema::ExpressionEvaluationContextRecord::EK_Other,
/*ShouldEnter=*/ConstexprCondition && !*ConstexprCondition);
ThenStmt = ParseStatement(&InnerStatementTrailingElseLoc);
}
@@ -1230,7 +1216,7 @@ StmtResult Parser::ParseIfStatement(SourceLocation *TrailingElseLoc) {
EnterExpressionEvaluationContext PotentiallyDiscarded(
Actions, Sema::ExpressionEvaluationContext::DiscardedStatement, nullptr,
- false,
+ Sema::ExpressionEvaluationContextRecord::EK_Other,
/*ShouldEnter=*/ConstexprCondition && *ConstexprCondition);
ElseStmt = ParseStatement();
@@ -1621,9 +1607,13 @@ StmtResult Parser::ParseForStatement(SourceLocation *TrailingElseLoc) {
attrs, attrs.Range.getEnd());
ForRange = true;
} else if (isForInitDeclaration()) { // for (int X = 4;
+ ParenBraceBracketBalancer BalancerRAIIObj(*this);
+
// Parse declaration, which eats the ';'.
- if (!C99orCXXorObjC) // Use of C99-style for loops in C90 mode?
+ if (!C99orCXXorObjC) { // Use of C99-style for loops in C90 mode?
Diag(Tok, diag::ext_c99_variable_decl_in_for_loop);
+ Diag(Tok, diag::warn_gcc_variable_decl_in_for_loop);
+ }
// In C++0x, "for (T NS:a" might not be a typo for ::
bool MightBeForRangeStmt = getLangOpts().CPlusPlus;
@@ -1631,7 +1621,7 @@ StmtResult Parser::ParseForStatement(SourceLocation *TrailingElseLoc) {
SourceLocation DeclStart = Tok.getLocation(), DeclEnd;
DeclGroupPtrTy DG = ParseSimpleDeclaration(
- Declarator::ForContext, DeclEnd, attrs, false,
+ DeclaratorContext::ForContext, DeclEnd, attrs, false,
MightBeForRangeStmt ? &ForRangeInit : nullptr);
FirstPart = Actions.ActOnDeclStmt(DG, DeclStart, Tok.getLocation());
if (ForRangeInit.ParsedForRangeDecl()) {
@@ -1940,7 +1930,7 @@ StmtResult Parser::ParsePragmaLoopHint(StmtVector &Stmts,
ArgsUnion(Hint.ValueExpr)};
TempAttrs.addNew(Hint.PragmaNameLoc->Ident, Hint.Range, nullptr,
Hint.PragmaNameLoc->Loc, ArgHints, 4,
- AttributeList::AS_Pragma);
+ ParsedAttr::AS_Pragma);
}
// Get the next statement.
@@ -1957,7 +1947,7 @@ Decl *Parser::ParseFunctionStatementBody(Decl *Decl, ParseScope &BodyScope) {
assert(Tok.is(tok::l_brace));
SourceLocation LBraceLoc = Tok.getLocation();
- PrettyDeclStackTraceEntry CrashInfo(Actions, Decl, LBraceLoc,
+ PrettyDeclStackTraceEntry CrashInfo(Actions.Context, Decl, LBraceLoc,
"parsing function body");
// Save and reset current vtordisp stack if we have entered a C++ method body.
@@ -1990,7 +1980,7 @@ Decl *Parser::ParseFunctionTryBlock(Decl *Decl, ParseScope &BodyScope) {
assert(Tok.is(tok::kw_try) && "Expected 'try'");
SourceLocation TryLoc = ConsumeToken();
- PrettyDeclStackTraceEntry CrashInfo(Actions, Decl, TryLoc,
+ PrettyDeclStackTraceEntry CrashInfo(Actions.Context, Decl, TryLoc,
"parsing function try block");
// Constructor initializer list?
@@ -2181,7 +2171,7 @@ StmtResult Parser::ParseCXXCatchBlock(bool FnCatch) {
if (ParseCXXTypeSpecifierSeq(DS))
return StmtError();
- Declarator ExDecl(DS, Declarator::CXXCatchContext);
+ Declarator ExDecl(DS, DeclaratorContext::CXXCatchContext);
ParseDeclarator(ExDecl);
ExceptionDecl = Actions.ActOnExceptionDeclarator(getCurScope(), ExDecl);
} else
@@ -2265,7 +2255,7 @@ bool Parser::ParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs) {
if (Attrs.empty())
return true;
- if (Attrs.getList()->getKind() != AttributeList::AT_OpenCLUnrollHint)
+ if (Attrs.begin()->getKind() != ParsedAttr::AT_OpenCLUnrollHint)
return true;
if (!(Tok.is(tok::kw_for) || Tok.is(tok::kw_while) || Tok.is(tok::kw_do))) {
diff --git a/lib/Parse/ParseStmtAsm.cpp b/lib/Parse/ParseStmtAsm.cpp
index d81029e27974..290723c29532 100644
--- a/lib/Parse/ParseStmtAsm.cpp
+++ b/lib/Parse/ParseStmtAsm.cpp
@@ -239,7 +239,7 @@ ExprResult Parser::ParseMSAsmIdentifier(llvm::SmallVectorImpl<Token> &LineToks,
/*AllowDestructorName=*/false,
/*AllowConstructorName=*/false,
/*AllowDeductionGuide=*/false,
- /*ObjectType=*/nullptr, TemplateKWLoc, Id);
+ /*ObjectType=*/nullptr, &TemplateKWLoc, Id);
// Perform the lookup.
Result = Actions.LookupInlineAsmIdentifier(SS, TemplateKWLoc, Id,
IsUnevaluatedContext);
diff --git a/lib/Parse/ParseTemplate.cpp b/lib/Parse/ParseTemplate.cpp
index 56a16b9e0271..f7a69c482e17 100644
--- a/lib/Parse/ParseTemplate.cpp
+++ b/lib/Parse/ParseTemplate.cpp
@@ -21,27 +21,22 @@
#include "clang/Sema/Scope.h"
using namespace clang;
-/// \brief Parse a template declaration, explicit instantiation, or
+/// Parse a template declaration, explicit instantiation, or
/// explicit specialization.
-Decl *
-Parser::ParseDeclarationStartingWithTemplate(unsigned Context,
- SourceLocation &DeclEnd,
- AccessSpecifier AS,
- AttributeList *AccessAttrs) {
+Decl *Parser::ParseDeclarationStartingWithTemplate(
+ DeclaratorContext Context, SourceLocation &DeclEnd,
+ ParsedAttributes &AccessAttrs, AccessSpecifier AS) {
ObjCDeclContextSwitch ObjCDC(*this);
if (Tok.is(tok::kw_template) && NextToken().isNot(tok::less)) {
- return ParseExplicitInstantiation(Context,
- SourceLocation(), ConsumeToken(),
- DeclEnd, AS);
+ return ParseExplicitInstantiation(Context, SourceLocation(), ConsumeToken(),
+ DeclEnd, AccessAttrs, AS);
}
- return ParseTemplateDeclarationOrSpecialization(Context, DeclEnd, AS,
- AccessAttrs);
+ return ParseTemplateDeclarationOrSpecialization(Context, DeclEnd, AccessAttrs,
+ AS);
}
-
-
-/// \brief Parse a template declaration or an explicit specialization.
+/// Parse a template declaration or an explicit specialization.
///
/// Template declarations include one or more template parameter lists
/// and either the function or class template declaration. Explicit
@@ -56,11 +51,9 @@ Parser::ParseDeclarationStartingWithTemplate(unsigned Context,
///
/// explicit-specialization: [ C++ temp.expl.spec]
/// 'template' '<' '>' declaration
-Decl *
-Parser::ParseTemplateDeclarationOrSpecialization(unsigned Context,
- SourceLocation &DeclEnd,
- AccessSpecifier AS,
- AttributeList *AccessAttrs) {
+Decl *Parser::ParseTemplateDeclarationOrSpecialization(
+ DeclaratorContext Context, SourceLocation &DeclEnd,
+ ParsedAttributes &AccessAttrs, AccessSpecifier AS) {
assert(Tok.isOneOf(tok::kw_export, tok::kw_template) &&
"Token does not start a template declaration.");
@@ -149,15 +142,13 @@ Parser::ParseTemplateDeclarationOrSpecialization(unsigned Context,
ParseScopeFlags TemplateScopeFlags(this, NewFlags, isSpecialization);
// Parse the actual template declaration.
- return ParseSingleDeclarationAfterTemplate(Context,
- ParsedTemplateInfo(&ParamLists,
- isSpecialization,
- LastParamListWasEmpty),
- ParsingTemplateParams,
- DeclEnd, AS, AccessAttrs);
+ return ParseSingleDeclarationAfterTemplate(
+ Context,
+ ParsedTemplateInfo(&ParamLists, isSpecialization, LastParamListWasEmpty),
+ ParsingTemplateParams, DeclEnd, AccessAttrs, AS);
}
-/// \brief Parse a single declaration that declares a template,
+/// Parse a single declaration that declares a template,
/// template specialization, or explicit instantiation of a template.
///
/// \param DeclEnd will receive the source location of the last token
@@ -167,14 +158,10 @@ Parser::ParseTemplateDeclarationOrSpecialization(unsigned Context,
/// declaration. Will be AS_none for namespace-scope declarations.
///
/// \returns the new declaration.
-Decl *
-Parser::ParseSingleDeclarationAfterTemplate(
- unsigned Context,
- const ParsedTemplateInfo &TemplateInfo,
- ParsingDeclRAIIObject &DiagsFromTParams,
- SourceLocation &DeclEnd,
- AccessSpecifier AS,
- AttributeList *AccessAttrs) {
+Decl *Parser::ParseSingleDeclarationAfterTemplate(
+ DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo,
+ ParsingDeclRAIIObject &DiagsFromTParams, SourceLocation &DeclEnd,
+ ParsedAttributes &AccessAttrs, AccessSpecifier AS) {
assert(TemplateInfo.Kind != ParsedTemplateInfo::NonTemplate &&
"Template information required");
@@ -186,7 +173,7 @@ Parser::ParseSingleDeclarationAfterTemplate(
return ParseStaticAssertDeclaration(DeclEnd);
}
- if (Context == Declarator::MemberContext) {
+ if (Context == DeclaratorContext::MemberContext) {
// We are parsing a member template.
ParseCXXClassMemberDeclaration(AS, AccessAttrs, TemplateInfo,
&DiagsFromTParams);
@@ -234,7 +221,7 @@ Parser::ParseSingleDeclarationAfterTemplate(
DS.takeAttributesFrom(prefixAttrs);
// Parse the declarator.
- ParsingDeclarator DeclaratorInfo(*this, DS, (Declarator::TheContext)Context);
+ ParsingDeclarator DeclaratorInfo(*this, DS, (DeclaratorContext)Context);
ParseDeclarator(DeclaratorInfo);
// Error parsing the declarator?
if (!DeclaratorInfo.hasName()) {
@@ -255,7 +242,7 @@ Parser::ParseSingleDeclarationAfterTemplate(
// Function definitions are only allowed at file scope and in C++ classes.
// The C++ inline method definition case is handled elsewhere, so we only
// need to handle the file scope definition case.
- if (Context != Declarator::FileContext) {
+ if (Context != DeclaratorContext::FileContext) {
Diag(Tok, diag::err_function_definition_not_allowed);
SkipMalformedDecl();
return nullptr;
@@ -271,7 +258,8 @@ Parser::ParseSingleDeclarationAfterTemplate(
}
if (TemplateInfo.Kind == ParsedTemplateInfo::ExplicitInstantiation) {
- if (DeclaratorInfo.getName().getKind() != UnqualifiedId::IK_TemplateId) {
+ if (DeclaratorInfo.getName().getKind() !=
+ UnqualifiedIdKind::IK_TemplateId) {
// If the declarator-id is not a template-id, issue a diagnostic and
// recover by ignoring the 'template' keyword.
Diag(Tok, diag::err_template_defn_explicit_instantiation) << 0;
@@ -369,7 +357,7 @@ bool Parser::ParseTemplateParameters(
/// template-parameter
/// template-parameter-list ',' template-parameter
bool
-Parser::ParseTemplateParameterList(unsigned Depth,
+Parser::ParseTemplateParameterList(const unsigned Depth,
SmallVectorImpl<NamedDecl*> &TemplateParams) {
while (1) {
@@ -402,7 +390,7 @@ Parser::ParseTemplateParameterList(unsigned Depth,
return true;
}
-/// \brief Determine whether the parser is at the start of a template
+/// Determine whether the parser is at the start of a template
/// type parameter.
bool Parser::isStartOfTemplateTypeParameter() {
if (Tok.is(tok::kw_class)) {
@@ -487,6 +475,20 @@ NamedDecl *Parser::ParseTemplateParameter(unsigned Depth, unsigned Position) {
if (Tok.is(tok::kw_template))
return ParseTemplateTemplateParameter(Depth, Position);
+ // Is there just a typo in the input code? ('typedef' instead of 'typename')
+ if (Tok.is(tok::kw_typedef)) {
+ Diag(Tok.getLocation(), diag::err_expected_template_parameter);
+
+ Diag(Tok.getLocation(), diag::note_meant_to_use_typename)
+ << FixItHint::CreateReplacement(CharSourceRange::getCharRange(
+ Tok.getLocation(), Tok.getEndLoc()),
+ "typename");
+
+ Tok.setKind(tok::kw_typename);
+
+ return ParseTypeParameter(Depth, Position);
+ }
+
// If it's none of the above, then it must be a parameter declaration.
// NOTE: This will pick up errors in the closure of the template parameter
// list (e.g., template < ; Check here to implement >> style closures.
@@ -546,7 +548,7 @@ NamedDecl *Parser::ParseTypeParameter(unsigned Depth, unsigned Position) {
ParsedType DefaultArg;
if (TryConsumeToken(tok::equal, EqualLoc))
DefaultArg = ParseTypeName(/*Range=*/nullptr,
- Declarator::TemplateTypeArgContext).get();
+ DeclaratorContext::TemplateTypeArgContext).get();
return Actions.ActOnTypeParameter(getCurScope(), TypenameKeyword, EllipsisLoc,
KeyLoc, ParamName, NameLoc, Depth, Position,
@@ -676,10 +678,10 @@ Parser::ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position) {
// declarators (parts of declarators?) are accepted for parameters.
DeclSpec DS(AttrFactory);
ParseDeclarationSpecifiers(DS, ParsedTemplateInfo(), AS_none,
- DSC_template_param);
+ DeclSpecContext::DSC_template_param);
// Parse this as a typename.
- Declarator ParamDecl(DS, Declarator::TemplateParamContext);
+ Declarator ParamDecl(DS, DeclaratorContext::TemplateParamContext);
ParseDeclarator(ParamDecl);
if (DS.getTypeSpecType() == DeclSpec::TST_unspecified) {
Diag(Tok.getLocation(), diag::err_expected_template_parameter);
@@ -739,7 +741,7 @@ void Parser::DiagnoseMisplacedEllipsisInDeclarator(SourceLocation EllipsisLoc,
AlreadyHasEllipsis, D.hasName());
}
-/// \brief Parses a '>' at the end of a template list.
+/// Parses a '>' at the end of a template list.
///
/// If this function encounters '>>', '>>>', '>=', or '>>=', it tries
/// to determine if these tokens were supposed to be a '>' followed by
@@ -760,6 +762,7 @@ bool Parser::ParseGreaterThanInTemplateList(SourceLocation &RAngleLoc,
// What will be left once we've consumed the '>'.
tok::TokenKind RemainingToken;
const char *ReplacementStr = "> >";
+ bool MergeWithNextToken = false;
switch (Tok.getKind()) {
default:
@@ -785,6 +788,15 @@ bool Parser::ParseGreaterThanInTemplateList(SourceLocation &RAngleLoc,
case tok::greaterequal:
RemainingToken = tok::equal;
ReplacementStr = "> =";
+
+ // Join two adjacent '=' tokens into one, for cases like:
+ // void (*p)() = f<int>;
+ // return f<int>==p;
+ if (NextToken().is(tok::equal) &&
+ areTokensAdjacent(Tok, NextToken())) {
+ RemainingToken = tok::equalequal;
+ MergeWithNextToken = true;
+ }
break;
case tok::greatergreaterequal:
@@ -792,22 +804,35 @@ bool Parser::ParseGreaterThanInTemplateList(SourceLocation &RAngleLoc,
break;
}
- // This template-id is terminated by a token which starts with a '>'. Outside
- // C++11, this is now error recovery, and in C++11, this is error recovery if
- // the token isn't '>>' or '>>>'.
- // '>>>' is for CUDA, where this sequence of characters is parsed into
- // tok::greatergreatergreater, rather than two separate tokens.
+ // This template-id is terminated by a token that starts with a '>'.
+ // Outside C++11 and Objective-C, this is now error recovery.
+ //
+ // C++11 allows this when the token is '>>', and in CUDA + C++11 mode, we
+ // extend that treatment to also apply to the '>>>' token.
//
- // We always allow this for Objective-C type parameter and type argument
- // lists.
- RAngleLoc = Tok.getLocation();
+ // Objective-C allows this in its type parameter / argument lists.
+
+ SourceLocation TokBeforeGreaterLoc = PrevTokLocation;
+ SourceLocation TokLoc = Tok.getLocation();
Token Next = NextToken();
+
+ // Whether splitting the current token after the '>' would undesirably result
+ // in the remaining token pasting with the token after it. This excludes the
+ // MergeWithNextToken cases, which we've already handled.
+ bool PreventMergeWithNextToken =
+ (RemainingToken == tok::greater ||
+ RemainingToken == tok::greatergreater) &&
+ (Next.isOneOf(tok::greater, tok::greatergreater,
+ tok::greatergreatergreater, tok::equal, tok::greaterequal,
+ tok::greatergreaterequal, tok::equalequal)) &&
+ areTokensAdjacent(Tok, Next);
+
+ // Diagnose this situation as appropriate.
if (!ObjCGenericList) {
- // The source range of the '>>' or '>=' at the start of the token.
- CharSourceRange ReplacementRange =
- CharSourceRange::getCharRange(RAngleLoc,
- Lexer::AdvanceToTokenCharacter(RAngleLoc, 2, PP.getSourceManager(),
- getLangOpts()));
+ // The source range of the replaced token(s).
+ CharSourceRange ReplacementRange = CharSourceRange::getCharRange(
+ TokLoc, Lexer::AdvanceToTokenCharacter(TokLoc, 2, PP.getSourceManager(),
+ getLangOpts()));
// A hint to put a space between the '>>'s. In order to make the hint as
// clear as possible, we include the characters either side of the space in
@@ -818,13 +843,7 @@ bool Parser::ParseGreaterThanInTemplateList(SourceLocation &RAngleLoc,
// A hint to put another space after the token, if it would otherwise be
// lexed differently.
FixItHint Hint2;
- if ((RemainingToken == tok::greater ||
- RemainingToken == tok::greatergreater) &&
- (Next.isOneOf(tok::greater, tok::greatergreater,
- tok::greatergreatergreater, tok::equal,
- tok::greaterequal, tok::greatergreaterequal,
- tok::equalequal)) &&
- areTokensAdjacent(Tok, Next))
+ if (PreventMergeWithNextToken)
Hint2 = FixItHint::CreateInsertion(Next.getLocation(), " ");
unsigned DiagId = diag::err_two_right_angle_brackets_need_space;
@@ -833,55 +852,68 @@ bool Parser::ParseGreaterThanInTemplateList(SourceLocation &RAngleLoc,
DiagId = diag::warn_cxx98_compat_two_right_angle_brackets;
else if (Tok.is(tok::greaterequal))
DiagId = diag::err_right_angle_bracket_equal_needs_space;
- Diag(Tok.getLocation(), DiagId) << Hint1 << Hint2;
+ Diag(TokLoc, DiagId) << Hint1 << Hint2;
}
+ // Find the "length" of the resulting '>' token. This is not always 1, as it
+ // can contain escaped newlines.
+ unsigned GreaterLength = Lexer::getTokenPrefixLength(
+ TokLoc, 1, PP.getSourceManager(), getLangOpts());
+
+ // Annotate the source buffer to indicate that we split the token after the
+ // '>'. This allows us to properly find the end of, and extract the spelling
+ // of, the '>' token later.
+ RAngleLoc = PP.SplitToken(TokLoc, GreaterLength);
+
// Strip the initial '>' from the token.
- Token PrevTok = Tok;
- if (RemainingToken == tok::equal && Next.is(tok::equal) &&
- areTokensAdjacent(Tok, Next)) {
- // Join two adjacent '=' tokens into one, for cases like:
- // void (*p)() = f<int>;
- // return f<int>==p;
+ bool CachingTokens = PP.IsPreviousCachedToken(Tok);
+
+ Token Greater = Tok;
+ Greater.setLocation(RAngleLoc);
+ Greater.setKind(tok::greater);
+ Greater.setLength(GreaterLength);
+
+ unsigned OldLength = Tok.getLength();
+ if (MergeWithNextToken) {
ConsumeToken();
- Tok.setKind(tok::equalequal);
- Tok.setLength(Tok.getLength() + 1);
- } else {
- Tok.setKind(RemainingToken);
- Tok.setLength(Tok.getLength() - 1);
+ OldLength += Tok.getLength();
}
- Tok.setLocation(Lexer::AdvanceToTokenCharacter(RAngleLoc, 1,
- PP.getSourceManager(),
- getLangOpts()));
-
- // The advance from '>>' to '>' in a ObjectiveC template argument list needs
- // to be properly reflected in the token cache to allow correct interaction
- // between annotation and backtracking.
- if (ObjCGenericList && PrevTok.getKind() == tok::greatergreater &&
- RemainingToken == tok::greater && PP.IsPreviousCachedToken(PrevTok)) {
- PrevTok.setKind(RemainingToken);
- PrevTok.setLength(1);
- // Break tok::greatergreater into two tok::greater but only add the second
- // one in case the client asks to consume the last token.
+
+ Tok.setKind(RemainingToken);
+ Tok.setLength(OldLength - GreaterLength);
+
+ // Split the second token if lexing it normally would lex a different token
+ // (eg, the fifth token in 'A<B>>>' should re-lex as '>', not '>>').
+ SourceLocation AfterGreaterLoc = TokLoc.getLocWithOffset(GreaterLength);
+ if (PreventMergeWithNextToken)
+ AfterGreaterLoc = PP.SplitToken(AfterGreaterLoc, Tok.getLength());
+ Tok.setLocation(AfterGreaterLoc);
+
+ // Update the token cache to match what we just did if necessary.
+ if (CachingTokens) {
+ // If the previous cached token is being merged, delete it.
+ if (MergeWithNextToken)
+ PP.ReplacePreviousCachedToken({});
+
if (ConsumeLastToken)
- PP.ReplacePreviousCachedToken({PrevTok, Tok});
+ PP.ReplacePreviousCachedToken({Greater, Tok});
else
- PP.ReplacePreviousCachedToken({PrevTok});
+ PP.ReplacePreviousCachedToken({Greater});
}
- if (!ConsumeLastToken) {
- // Since we're not supposed to consume the '>' token, we need to push
- // this token and revert the current token back to the '>'.
+ if (ConsumeLastToken) {
+ PrevTokLocation = RAngleLoc;
+ } else {
+ PrevTokLocation = TokBeforeGreaterLoc;
PP.EnterToken(Tok);
- Tok.setKind(tok::greater);
- Tok.setLength(1);
- Tok.setLocation(RAngleLoc);
+ Tok = Greater;
}
+
return false;
}
-/// \brief Parses a template-id that after the template name has
+/// Parses a template-id that after the template name has
/// already been parsed.
///
/// This routine takes care of parsing the enclosed template argument
@@ -923,7 +955,7 @@ Parser::ParseTemplateIdAfterTemplateName(bool ConsumeLastToken,
/*ObjCGenericList=*/false);
}
-/// \brief Replace the tokens that form a simple-template-id with an
+/// Replace the tokens that form a simple-template-id with an
/// annotation token containing the complete template-id.
///
/// The first token in the stream must be the name of a template that
@@ -1015,12 +1047,12 @@ bool Parser::AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK,
Tok.setKind(tok::annot_template_id);
IdentifierInfo *TemplateII =
- TemplateName.getKind() == UnqualifiedId::IK_Identifier
+ TemplateName.getKind() == UnqualifiedIdKind::IK_Identifier
? TemplateName.Identifier
: nullptr;
OverloadedOperatorKind OpKind =
- TemplateName.getKind() == UnqualifiedId::IK_Identifier
+ TemplateName.getKind() == UnqualifiedIdKind::IK_Identifier
? OO_None
: TemplateName.OperatorFunctionId.Operator;
@@ -1044,7 +1076,7 @@ bool Parser::AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK,
return false;
}
-/// \brief Replaces a template-id annotation token with a type
+/// Replaces a template-id annotation token with a type
/// annotation token.
///
/// If there was a failure when forming the type from the template-id,
@@ -1089,12 +1121,12 @@ void Parser::AnnotateTemplateIdTokenAsType(bool IsClassName) {
PP.AnnotateCachedTokens(Tok);
}
-/// \brief Determine whether the given token can end a template argument.
+/// Determine whether the given token can end a template argument.
static bool isEndOfTemplateArgument(Token Tok) {
return Tok.isOneOf(tok::comma, tok::greater, tok::greatergreater);
}
-/// \brief Parse a C++ template template argument.
+/// Parse a C++ template template argument.
ParsedTemplateArgument Parser::ParseTemplateTemplateArgument() {
if (!Tok.is(tok::identifier) && !Tok.is(tok::coloncolon) &&
!Tok.is(tok::annot_cxxscope))
@@ -1190,17 +1222,13 @@ ParsedTemplateArgument Parser::ParseTemplateArgument() {
// argument before trying to disambiguate.
EnterExpressionEvaluationContext EnterConstantEvaluated(
- Actions, Sema::ExpressionEvaluationContext::ConstantEvaluated);
+ Actions, Sema::ExpressionEvaluationContext::ConstantEvaluated,
+ /*LambdaContextDecl=*/nullptr,
+ /*ExprContext=*/Sema::ExpressionEvaluationContextRecord::EK_TemplateArgument);
if (isCXXTypeId(TypeIdAsTemplateArgument)) {
- SourceLocation Loc = Tok.getLocation();
- TypeResult TypeArg = ParseTypeName(/*Range=*/nullptr,
- Declarator::TemplateTypeArgContext);
- if (TypeArg.isInvalid())
- return ParsedTemplateArgument();
-
- return ParsedTemplateArgument(ParsedTemplateArgument::Type,
- TypeArg.get().getAsOpaquePtr(),
- Loc);
+ TypeResult TypeArg = ParseTypeName(
+ /*Range=*/nullptr, DeclaratorContext::TemplateArgContext);
+ return Actions.ActOnTemplateTypeArgument(TypeArg);
}
// Try to parse a template template argument.
@@ -1228,7 +1256,7 @@ ParsedTemplateArgument Parser::ParseTemplateArgument() {
ExprArg.get(), Loc);
}
-/// \brief Determine whether the current tokens can only be parsed as a
+/// Determine whether the current tokens can only be parsed as a
/// template argument list (starting with the '<') and never as a '<'
/// expression.
bool Parser::IsTemplateArgumentList(unsigned Skip) {
@@ -1290,27 +1318,26 @@ Parser::ParseTemplateArgumentList(TemplateArgList &TemplateArgs) {
return false;
}
-/// \brief Parse a C++ explicit template instantiation
+/// Parse a C++ explicit template instantiation
/// (C++ [temp.explicit]).
///
/// explicit-instantiation:
/// 'extern' [opt] 'template' declaration
///
/// Note that the 'extern' is a GNU extension and C++11 feature.
-Decl *Parser::ParseExplicitInstantiation(unsigned Context,
+Decl *Parser::ParseExplicitInstantiation(DeclaratorContext Context,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
SourceLocation &DeclEnd,
+ ParsedAttributes &AccessAttrs,
AccessSpecifier AS) {
// This isn't really required here.
ParsingDeclRAIIObject
ParsingTemplateParams(*this, ParsingDeclRAIIObject::NoParent);
- return ParseSingleDeclarationAfterTemplate(Context,
- ParsedTemplateInfo(ExternLoc,
- TemplateLoc),
- ParsingTemplateParams,
- DeclEnd, AS);
+ return ParseSingleDeclarationAfterTemplate(
+ Context, ParsedTemplateInfo(ExternLoc, TemplateLoc),
+ ParsingTemplateParams, DeclEnd, AccessAttrs, AS);
}
SourceRange Parser::ParsedTemplateInfo::getSourceRange() const {
@@ -1328,7 +1355,7 @@ void Parser::LateTemplateParserCallback(void *P, LateParsedTemplate &LPT) {
((Parser *)P)->ParseLateTemplatedFuncDef(LPT);
}
-/// \brief Late parse a C++ function template in Microsoft mode.
+/// Late parse a C++ function template in Microsoft mode.
void Parser::ParseLateTemplatedFuncDef(LateParsedTemplate &LPT) {
if (!LPT.D)
return;
@@ -1419,7 +1446,7 @@ void Parser::ParseLateTemplatedFuncDef(LateParsedTemplate &LPT) {
delete *I;
}
-/// \brief Lex a delayed template function for late parsing.
+/// Lex a delayed template function for late parsing.
void Parser::LexTemplateFunctionForLateParsing(CachedTokens &Toks) {
tok::TokenKind kind = Tok.getKind();
if (!ConsumeAndStoreFunctionPrologue(Toks)) {
@@ -1435,3 +1462,111 @@ void Parser::LexTemplateFunctionForLateParsing(CachedTokens &Toks) {
}
}
}
+
+/// We've parsed something that could plausibly be intended to be a template
+/// name (\p LHS) followed by a '<' token, and the following code can't possibly
+/// be an expression. Determine if this is likely to be a template-id and if so,
+/// diagnose it.
+bool Parser::diagnoseUnknownTemplateId(ExprResult LHS, SourceLocation Less) {
+ TentativeParsingAction TPA(*this);
+ // FIXME: We could look at the token sequence in a lot more detail here.
+ if (SkipUntil(tok::greater, tok::greatergreater, tok::greatergreatergreater,
+ StopAtSemi | StopBeforeMatch)) {
+ TPA.Commit();
+
+ SourceLocation Greater;
+ ParseGreaterThanInTemplateList(Greater, true, false);
+ Actions.diagnoseExprIntendedAsTemplateName(getCurScope(), LHS,
+ Less, Greater);
+ return true;
+ }
+
+ // There's no matching '>' token, this probably isn't supposed to be
+ // interpreted as a template-id. Parse it as an (ill-formed) comparison.
+ TPA.Revert();
+ return false;
+}
+
+void Parser::checkPotentialAngleBracket(ExprResult &PotentialTemplateName) {
+ assert(Tok.is(tok::less) && "not at a potential angle bracket");
+
+ bool DependentTemplateName = false;
+ if (!Actions.mightBeIntendedToBeTemplateName(PotentialTemplateName,
+ DependentTemplateName))
+ return;
+
+ // OK, this might be a name that the user intended to be parsed as a
+ // template-name, followed by a '<' token. Check for some easy cases.
+
+ // If we have potential_template<>, then it's supposed to be a template-name.
+ if (NextToken().is(tok::greater) ||
+ (getLangOpts().CPlusPlus11 &&
+ NextToken().isOneOf(tok::greatergreater, tok::greatergreatergreater))) {
+ SourceLocation Less = ConsumeToken();
+ SourceLocation Greater;
+ ParseGreaterThanInTemplateList(Greater, true, false);
+ Actions.diagnoseExprIntendedAsTemplateName(
+ getCurScope(), PotentialTemplateName, Less, Greater);
+ // FIXME: Perform error recovery.
+ PotentialTemplateName = ExprError();
+ return;
+ }
+
+ // If we have 'potential_template<type-id', assume it's supposed to be a
+ // template-name if there's a matching '>' later on.
+ {
+ // FIXME: Avoid the tentative parse when NextToken() can't begin a type.
+ TentativeParsingAction TPA(*this);
+ SourceLocation Less = ConsumeToken();
+ if (isTypeIdUnambiguously() &&
+ diagnoseUnknownTemplateId(PotentialTemplateName, Less)) {
+ TPA.Commit();
+ // FIXME: Perform error recovery.
+ PotentialTemplateName = ExprError();
+ return;
+ }
+ TPA.Revert();
+ }
+
+ // Otherwise, remember that we saw this in case we see a potentially-matching
+ // '>' token later on.
+ AngleBracketTracker::Priority Priority =
+ (DependentTemplateName ? AngleBracketTracker::DependentName
+ : AngleBracketTracker::PotentialTypo) |
+ (Tok.hasLeadingSpace() ? AngleBracketTracker::SpaceBeforeLess
+ : AngleBracketTracker::NoSpaceBeforeLess);
+ AngleBrackets.add(*this, PotentialTemplateName.get(), Tok.getLocation(),
+ Priority);
+}
+
+bool Parser::checkPotentialAngleBracketDelimiter(
+ const AngleBracketTracker::Loc &LAngle, const Token &OpToken) {
+ // If a comma in an expression context is followed by a type that can be a
+ // template argument and cannot be an expression, then this is ill-formed,
+ // but might be intended to be part of a template-id.
+ if (OpToken.is(tok::comma) && isTypeIdUnambiguously() &&
+ diagnoseUnknownTemplateId(LAngle.TemplateName, LAngle.LessLoc)) {
+ AngleBrackets.clear(*this);
+ return true;
+ }
+
+ // If a context that looks like a template-id is followed by '()', then
+ // this is ill-formed, but might be intended to be a template-id
+ // followed by '()'.
+ if (OpToken.is(tok::greater) && Tok.is(tok::l_paren) &&
+ NextToken().is(tok::r_paren)) {
+ Actions.diagnoseExprIntendedAsTemplateName(
+ getCurScope(), LAngle.TemplateName, LAngle.LessLoc,
+ OpToken.getLocation());
+ AngleBrackets.clear(*this);
+ return true;
+ }
+
+ // After a '>' (etc), we're no longer potentially in a construct that's
+ // intended to be treated as a template-id.
+ if (OpToken.is(tok::greater) ||
+ (getLangOpts().CPlusPlus11 &&
+ OpToken.isOneOf(tok::greatergreater, tok::greatergreatergreater)))
+ AngleBrackets.clear(*this);
+ return false;
+}
diff --git a/lib/Parse/ParseTentative.cpp b/lib/Parse/ParseTentative.cpp
index 5c206f4eab90..0603d8e75eea 100644
--- a/lib/Parse/ParseTentative.cpp
+++ b/lib/Parse/ParseTentative.cpp
@@ -401,7 +401,7 @@ struct Parser::ConditionDeclarationOrInitStatementState {
}
};
-/// \brief Disambiguates between a declaration in a condition, a
+/// Disambiguates between a declaration in a condition, a
/// simple-declaration in an init-statement, and an expression for
/// a condition of a if/switch statement.
///
@@ -472,7 +472,7 @@ Parser::isCXXConditionDeclarationOrInitStatement(bool CanBeInitStatement) {
return ConditionOrInitStatement::Expression;
}
- /// \brief Determine whether the next set of tokens contains a type-id.
+ /// Determine whether the next set of tokens contains a type-id.
///
/// The context parameter states what context we're parsing right
/// now, which affects how this routine copes with the token
@@ -553,7 +553,7 @@ bool Parser::isCXXTypeId(TentativeCXXTypeIdContext Context, bool &isAmbiguous) {
return TPR == TPResult::True;
}
-/// \brief Returns true if this is a C++11 attribute-specifier. Per
+/// Returns true if this is a C++11 attribute-specifier. Per
/// C++11 [dcl.attr.grammar]p6, two consecutive left square bracket tokens
/// always introduce an attribute. In Objective-C++11, this rule does not
/// apply if either '[' begins a message-send.
@@ -873,7 +873,8 @@ Parser::TPResult Parser::TryParseOperatorId() {
/// template-id [TODO]
///
Parser::TPResult Parser::TryParseDeclarator(bool mayBeAbstract,
- bool mayHaveIdentifier) {
+ bool mayHaveIdentifier,
+ bool mayHaveDirectInit) {
// declarator:
// direct-declarator
// ptr-operator declarator
@@ -930,6 +931,9 @@ Parser::TPResult Parser::TryParseDeclarator(bool mayBeAbstract,
return TPResult::False;
}
+ if (mayHaveDirectInit)
+ return TPResult::Ambiguous;
+
while (1) {
TPResult TPR(TPResult::Ambiguous);
@@ -1015,6 +1019,7 @@ Parser::isExpressionOrTypeSpecifierSimple(tok::TokenKind Kind) {
case tok::kw___FUNCDNAME__:
case tok::kw___FUNCSIG__:
case tok::kw_L__FUNCTION__:
+ case tok::kw_L__FUNCSIG__:
case tok::kw___PRETTY_FUNCTION__:
case tok::kw___uuidof:
#define TYPE_TRAIT(N,Spelling,K) \
@@ -1048,6 +1053,7 @@ Parser::isExpressionOrTypeSpecifierSimple(tok::TokenKind Kind) {
case tok::kw_class:
case tok::kw_typename:
case tok::kw_wchar_t:
+ case tok::kw_char8_t:
case tok::kw_char16_t:
case tok::kw_char32_t:
case tok::kw__Decimal32:
@@ -1242,6 +1248,17 @@ Parser::isCXXDeclarationSpecifier(Parser::TPResult BracedCastResult,
case ANK_TentativeDecl:
return TPResult::False;
case ANK_TemplateName:
+ // In C++17, this could be a type template for class template argument
+ // deduction. Try to form a type annotation for it. If we're in a
+ // template template argument, we'll undo this when checking the
+ // validity of the argument.
+ if (getLangOpts().CPlusPlus17) {
+ if (TryAnnotateTypeOrScopeToken())
+ return TPResult::Error;
+ if (Tok.isNot(tok::identifier))
+ break;
+ }
+
// A bare type template-name which can't be a template template
// argument is an error, and was probably intended to be a type.
return GreaterThanIsOperator ? TPResult::True : TPResult::False;
@@ -1341,6 +1358,11 @@ Parser::isCXXDeclarationSpecifier(Parser::TPResult BracedCastResult,
// cv-qualifier
case tok::kw_const:
case tok::kw_volatile:
+ case tok::kw___private:
+ case tok::kw___local:
+ case tok::kw___global:
+ case tok::kw___constant:
+ case tok::kw___generic:
// GNU
case tok::kw_restrict:
@@ -1420,8 +1442,6 @@ Parser::isCXXDeclarationSpecifier(Parser::TPResult BracedCastResult,
*HasMissingTypename = true;
return TPResult::Ambiguous;
}
-
- // FIXME: Fails to either revert or commit the tentative parse!
} else {
// Try to resolve the name. If it doesn't exist, assume it was
// intended to name a type and keep disambiguating.
@@ -1431,19 +1451,33 @@ Parser::isCXXDeclarationSpecifier(Parser::TPResult BracedCastResult,
case ANK_TentativeDecl:
return TPResult::False;
case ANK_TemplateName:
+ // In C++17, this could be a type template for class template
+ // argument deduction.
+ if (getLangOpts().CPlusPlus17) {
+ if (TryAnnotateTypeOrScopeToken())
+ return TPResult::Error;
+ if (Tok.isNot(tok::identifier))
+ break;
+ }
+
// A bare type template-name which can't be a template template
// argument is an error, and was probably intended to be a type.
- return GreaterThanIsOperator ? TPResult::True : TPResult::False;
+ // In C++17, this could be class template argument deduction.
+ return (getLangOpts().CPlusPlus17 || GreaterThanIsOperator)
+ ? TPResult::True
+ : TPResult::False;
case ANK_Unresolved:
return HasMissingTypename ? TPResult::Ambiguous
: TPResult::False;
case ANK_Success:
- // Annotated it, check again.
- assert(Tok.isNot(tok::annot_cxxscope) ||
- NextToken().isNot(tok::identifier));
- return isCXXDeclarationSpecifier(BracedCastResult,
- HasMissingTypename);
+ break;
}
+
+ // Annotated it, check again.
+ assert(Tok.isNot(tok::annot_cxxscope) ||
+ NextToken().isNot(tok::identifier));
+ return isCXXDeclarationSpecifier(BracedCastResult,
+ HasMissingTypename);
}
}
return TPResult::False;
@@ -1496,6 +1530,7 @@ Parser::isCXXDeclarationSpecifier(Parser::TPResult BracedCastResult,
case tok::kw_char:
case tok::kw_wchar_t:
+ case tok::kw_char8_t:
case tok::kw_char16_t:
case tok::kw_char32_t:
case tok::kw_bool:
@@ -1587,6 +1622,7 @@ bool Parser::isCXXDeclarationSpecifierAType() {
// simple-type-specifier
case tok::kw_char:
case tok::kw_wchar_t:
+ case tok::kw_char8_t:
case tok::kw_char16_t:
case tok::kw_char32_t:
case tok::kw_bool:
@@ -1867,7 +1903,8 @@ Parser::TPResult Parser::TryParseFunctionDeclarator() {
return TPResult::Error;
// cv-qualifier-seq
- while (Tok.isOneOf(tok::kw_const, tok::kw_volatile, tok::kw_restrict))
+ while (Tok.isOneOf(tok::kw_const, tok::kw_volatile, tok::kw___unaligned,
+ tok::kw_restrict))
ConsumeToken();
// ref-qualifier[opt]
diff --git a/lib/Parse/Parser.cpp b/lib/Parse/Parser.cpp
index 8aa50a2c7f2a..7a0b29cbae86 100644
--- a/lib/Parse/Parser.cpp
+++ b/lib/Parse/Parser.cpp
@@ -20,11 +20,12 @@
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/ParsedTemplate.h"
#include "clang/Sema/Scope.h"
+#include "llvm/Support/Path.h"
using namespace clang;
namespace {
-/// \brief A comment handler that passes comments found by the preprocessor
+/// A comment handler that passes comments found by the preprocessor
/// to the parser action.
class ActionCommentHandler : public CommentHandler {
Sema &S;
@@ -77,7 +78,7 @@ DiagnosticBuilder Parser::Diag(const Token &Tok, unsigned DiagID) {
return Diag(Tok.getLocation(), DiagID);
}
-/// \brief Emits a diagnostic suggesting parentheses surrounding a
+/// Emits a diagnostic suggesting parentheses surrounding a
/// given range.
///
/// \param Loc The location where we'll emit the diagnostic.
@@ -697,9 +698,8 @@ Parser::ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
return nullptr;
case tok::semi:
// Either a C++11 empty-declaration or attribute-declaration.
- SingleDecl = Actions.ActOnEmptyDeclaration(getCurScope(),
- attrs.getList(),
- Tok.getLocation());
+ SingleDecl =
+ Actions.ActOnEmptyDeclaration(getCurScope(), attrs, Tok.getLocation());
ConsumeExtraSemi(OutsideFunction);
break;
case tok::r_brace:
@@ -741,7 +741,7 @@ Parser::ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
break;
}
case tok::at:
- return ParseObjCAtDirectives();
+ return ParseObjCAtDirectives(attrs);
case tok::minus:
case tok::plus:
if (!getLangOpts().ObjC1) {
@@ -783,7 +783,7 @@ Parser::ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
// A function definition cannot start with any of these keywords.
{
SourceLocation DeclEnd;
- return ParseDeclaration(Declarator::FileContext, DeclEnd, attrs);
+ return ParseDeclaration(DeclaratorContext::FileContext, DeclEnd, attrs);
}
case tok::kw_static:
@@ -793,7 +793,7 @@ Parser::ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
Diag(ConsumeToken(), diag::warn_static_inline_explicit_inst_ignored)
<< 0;
SourceLocation DeclEnd;
- return ParseDeclaration(Declarator::FileContext, DeclEnd, attrs);
+ return ParseDeclaration(DeclaratorContext::FileContext, DeclEnd, attrs);
}
goto dont_know;
@@ -804,7 +804,7 @@ Parser::ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
// Inline namespaces. Allowed as an extension even in C++03.
if (NextKind == tok::kw_namespace) {
SourceLocation DeclEnd;
- return ParseDeclaration(Declarator::FileContext, DeclEnd, attrs);
+ return ParseDeclaration(DeclaratorContext::FileContext, DeclEnd, attrs);
}
// Parse (then ignore) 'inline' prior to a template instantiation. This is
@@ -813,7 +813,7 @@ Parser::ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
Diag(ConsumeToken(), diag::warn_static_inline_explicit_inst_ignored)
<< 1;
SourceLocation DeclEnd;
- return ParseDeclaration(Declarator::FileContext, DeclEnd, attrs);
+ return ParseDeclaration(DeclaratorContext::FileContext, DeclEnd, attrs);
}
}
goto dont_know;
@@ -828,8 +828,8 @@ Parser::ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
diag::ext_extern_template) << SourceRange(ExternLoc, TemplateLoc);
SourceLocation DeclEnd;
return Actions.ConvertDeclToDeclGroup(
- ParseExplicitInstantiation(Declarator::FileContext,
- ExternLoc, TemplateLoc, DeclEnd));
+ ParseExplicitInstantiation(DeclaratorContext::FileContext, ExternLoc,
+ TemplateLoc, DeclEnd, attrs));
}
goto dont_know;
@@ -858,7 +858,7 @@ Parser::ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
return Actions.ConvertDeclToDeclGroup(SingleDecl);
}
-/// \brief Determine whether the current token, if it occurs after a
+/// Determine whether the current token, if it occurs after a
/// declarator, continues a declaration or declaration list.
bool Parser::isDeclarationAfterDeclarator() {
// Check for '= delete' or '= default'
@@ -877,7 +877,7 @@ bool Parser::isDeclarationAfterDeclarator() {
Tok.is(tok::l_paren)); // int X(0) -> not a function def [C++]
}
-/// \brief Determine whether the current token, if it occurs after a
+/// Determine whether the current token, if it occurs after a
/// declarator, indicates the start of a function definition.
bool Parser::isStartOfFunctionDefinition(const ParsingDeclarator &Declarator) {
assert(Declarator.isFunctionDeclarator() && "Isn't a function declarator");
@@ -919,12 +919,13 @@ Parser::ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs,
AccessSpecifier AS) {
MaybeParseMicrosoftAttributes(DS.getAttributes());
// Parse the common declaration-specifiers piece.
- ParseDeclarationSpecifiers(DS, ParsedTemplateInfo(), AS, DSC_top_level);
+ ParseDeclarationSpecifiers(DS, ParsedTemplateInfo(), AS,
+ DeclSpecContext::DSC_top_level);
// If we had a free-standing type definition with a missing semicolon, we
// may get this far before the problem becomes obvious.
- if (DS.hasTagDefinition() &&
- DiagnoseMissingSemiAfterTagDefinition(DS, AS, DSC_top_level))
+ if (DS.hasTagDefinition() && DiagnoseMissingSemiAfterTagDefinition(
+ DS, AS, DeclSpecContext::DSC_top_level))
return nullptr;
// C99 6.7.2.3p6: Handle "struct-or-union identifier;", "enum { X };"
@@ -1004,11 +1005,11 @@ Parser::ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs,
if (getLangOpts().CPlusPlus && isTokenStringLiteral() &&
DS.getStorageClassSpec() == DeclSpec::SCS_extern &&
DS.getParsedSpecifiers() == DeclSpec::PQ_StorageClassSpecifier) {
- Decl *TheDecl = ParseLinkage(DS, Declarator::FileContext);
+ Decl *TheDecl = ParseLinkage(DS, DeclaratorContext::FileContext);
return Actions.ConvertDeclToDeclGroup(TheDecl);
}
- return ParseDeclGroup(DS, Declarator::FileContext);
+ return ParseDeclGroup(DS, DeclaratorContext::FileContext);
}
Parser::DeclGroupPtrTy
@@ -1088,15 +1089,10 @@ Decl *Parser::ParseFunctionDefinition(ParsingDeclarator &D,
// Check to make sure that any normal attributes are allowed to be on
// a definition. Late parsed attributes are checked at the end.
if (Tok.isNot(tok::equal)) {
- AttributeList *DtorAttrs = D.getAttributes();
- while (DtorAttrs) {
- if (DtorAttrs->isKnownToGCC() &&
- !DtorAttrs->isCXX11Attribute()) {
- Diag(DtorAttrs->getLoc(), diag::warn_attribute_on_function_definition)
- << DtorAttrs->getName();
- }
- DtorAttrs = DtorAttrs->getNext();
- }
+ for (const ParsedAttr &AL : D.getAttributes())
+ if (AL.isKnownToGCC() && !AL.isCXX11Attribute())
+ Diag(AL.getLoc(), diag::warn_attribute_on_function_definition)
+ << AL.getName();
}
// In delayed template parsing mode, for function template we consume the
@@ -1313,7 +1309,7 @@ void Parser::ParseKNRParamDeclarations(Declarator &D) {
}
// Parse the first declarator attached to this declspec.
- Declarator ParmDeclarator(DS, Declarator::KNRTypeListContext);
+ Declarator ParmDeclarator(DS, DeclaratorContext::KNRTypeListContext);
ParseDeclarator(ParmDeclarator);
// Handle the full declarator list.
@@ -1450,7 +1446,7 @@ ExprResult Parser::ParseSimpleAsm(SourceLocation *EndLoc) {
return Result;
}
-/// \brief Get the TemplateIdAnnotation from the token and put it in the
+/// Get the TemplateIdAnnotation from the token and put it in the
/// cleanup pool so that it gets destroyed when parsing the current top level
/// declaration is finished.
TemplateIdAnnotation *Parser::takeTemplateIdAnnotation(const Token &tok) {
@@ -1478,7 +1474,7 @@ void Parser::AnnotateScopeToken(CXXScopeSpec &SS, bool IsNewAnnotation) {
PP.AnnotateCachedTokens(Tok);
}
-/// \brief Attempt to classify the name at the current token position. This may
+/// Attempt to classify the name at the current token position. This may
/// form a type, scope or primary expression annotation, or replace the token
/// with a typo-corrected keyword. This is only appropriate when the current
/// name must refer to an entity which has already been declared.
@@ -1763,7 +1759,7 @@ bool Parser::TryAnnotateTypeOrScopeToken() {
return TryAnnotateTypeOrScopeTokenAfterScopeSpec(SS, !WasScopeAnnotation);
}
-/// \brief Try to annotate a type or scope token, having already parsed an
+/// Try to annotate a type or scope token, having already parsed an
/// optional scope specifier. \p IsNewScope should be \c true unless the scope
/// specifier was extracted from an existing tok::annot_cxxscope annotation.
bool Parser::TryAnnotateTypeOrScopeTokenAfterScopeSpec(CXXScopeSpec &SS,
@@ -1774,8 +1770,8 @@ bool Parser::TryAnnotateTypeOrScopeTokenAfterScopeSpec(CXXScopeSpec &SS,
*Tok.getIdentifierInfo(), Tok.getLocation(), getCurScope(), &SS,
false, NextToken().is(tok::period), nullptr,
/*IsCtorOrDtorName=*/false,
- /*NonTrivialTypeSourceInfo*/ true,
- /*IsClassTemplateDeductionContext*/GreaterThanIsOperator)) {
+ /*NonTrivialTypeSourceInfo*/true,
+ /*IsClassTemplateDeductionContext*/true)) {
SourceLocation BeginLoc = Tok.getLocation();
if (SS.isNotEmpty()) // it was a C++ qualified type name.
BeginLoc = SS.getBeginLoc();
@@ -2001,7 +1997,7 @@ bool Parser::ParseMicrosoftIfExistsCondition(IfExistsCondition& Result) {
if (ParseUnqualifiedId(
Result.SS, /*EnteringContext*/false, /*AllowDestructorName*/true,
/*AllowConstructorName*/true, /*AllowDeductionGuide*/false, nullptr,
- TemplateKWLoc, Result.Name)) {
+ &TemplateKWLoc, Result.Name)) {
T.skipToEnd();
return true;
}
@@ -2122,6 +2118,7 @@ Decl *Parser::ParseModuleImport(SourceLocation AtLoc) {
assert((AtLoc.isInvalid() ? Tok.is(tok::kw_import)
: Tok.isObjCAtKeyword(tok::objc_import)) &&
"Improper start to module import");
+ bool IsObjCAtImport = Tok.isObjCAtKeyword(tok::objc_import);
SourceLocation ImportLoc = ConsumeToken();
SourceLocation StartLoc = AtLoc.isInvalid() ? ImportLoc : AtLoc;
@@ -2145,6 +2142,16 @@ Decl *Parser::ParseModuleImport(SourceLocation AtLoc) {
if (Import.isInvalid())
return nullptr;
+ // Using '@import' in framework headers requires modules to be enabled so that
+ // the header is parseable. Emit a warning to make the user aware.
+ if (IsObjCAtImport && AtLoc.isValid()) {
+ auto &SrcMgr = PP.getSourceManager();
+ auto *FE = SrcMgr.getFileEntryForID(SrcMgr.getFileID(AtLoc));
+ if (FE && llvm::sys::path::parent_path(FE->getDir()->getName())
+ .endswith(".framework"))
+ Diags.Report(AtLoc, diag::warn_atimport_in_framework_header);
+ }
+
return Import.get();
}
@@ -2184,7 +2191,7 @@ bool Parser::ParseModuleName(
}
}
-/// \brief Try recover parser when module annotation appears where it must not
+/// Try recover parser when module annotation appears where it must not
/// be found.
/// \returns false if the recover was successful and parsing may be continued, or
/// true if parser must bail out to top level and handle the token there.
@@ -2249,7 +2256,7 @@ bool BalancedDelimiterTracker::expectAndConsume(unsigned DiagID,
return true;
}
- if (getDepth() < MaxDepth)
+ if (getDepth() < P.getLangOpts().BracketDepth)
return false;
return diagnoseOverflow();
diff --git a/lib/Rewrite/DeltaTree.cpp b/lib/Rewrite/DeltaTree.cpp
index 352fab077a2e..1dfc26cc918f 100644
--- a/lib/Rewrite/DeltaTree.cpp
+++ b/lib/Rewrite/DeltaTree.cpp
@@ -1,4 +1,4 @@
-//===--- DeltaTree.cpp - B-Tree for Rewrite Delta tracking ----------------===//
+//===- DeltaTree.cpp - B-Tree for Rewrite Delta tracking ------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -13,8 +13,10 @@
#include "clang/Rewrite/Core/DeltaTree.h"
#include "clang/Basic/LLVM.h"
-#include <cstdio>
+#include "llvm/Support/Casting.h"
+#include <cassert>
#include <cstring>
+
using namespace clang;
/// The DeltaTree class is a multiway search tree (BTree) structure with some
@@ -33,6 +35,7 @@ using namespace clang;
/// full delta implied by a whole subtree in constant time.
namespace {
+
/// SourceDelta - As code in the original input buffer is added and deleted,
/// SourceDelta records are used to keep track of how the input SourceLocation
/// object is mapped into the output buffer.
@@ -67,12 +70,11 @@ namespace {
enum { WidthFactor = 8 };
/// Values - This tracks the SourceDelta's currently in this node.
- ///
SourceDelta Values[2*WidthFactor-1];
/// NumValuesUsed - This tracks the number of values this node currently
/// holds.
- unsigned char NumValuesUsed;
+ unsigned char NumValuesUsed = 0;
/// IsLeaf - This is true if this is a leaf of the btree. If false, this is
/// an interior node, and is actually an instance of DeltaTreeInteriorNode.
@@ -80,20 +82,22 @@ namespace {
/// FullDelta - This is the full delta of all the values in this node and
/// all children nodes.
- int FullDelta;
+ int FullDelta = 0;
+
public:
- DeltaTreeNode(bool isLeaf = true)
- : NumValuesUsed(0), IsLeaf(isLeaf), FullDelta(0) {}
+ DeltaTreeNode(bool isLeaf = true) : IsLeaf(isLeaf) {}
bool isLeaf() const { return IsLeaf; }
int getFullDelta() const { return FullDelta; }
bool isFull() const { return NumValuesUsed == 2*WidthFactor-1; }
unsigned getNumValuesUsed() const { return NumValuesUsed; }
+
const SourceDelta &getValue(unsigned i) const {
assert(i < NumValuesUsed && "Invalid value #");
return Values[i];
}
+
SourceDelta &getValue(unsigned i) {
assert(i < NumValuesUsed && "Invalid value #");
return Values[i];
@@ -114,23 +118,24 @@ namespace {
void Destroy();
};
-} // end anonymous namespace
-namespace {
/// DeltaTreeInteriorNode - When isLeaf = false, a node has child pointers.
/// This class tracks them.
class DeltaTreeInteriorNode : public DeltaTreeNode {
+ friend class DeltaTreeNode;
+
DeltaTreeNode *Children[2*WidthFactor];
+
~DeltaTreeInteriorNode() {
for (unsigned i = 0, e = NumValuesUsed+1; i != e; ++i)
Children[i]->Destroy();
}
- friend class DeltaTreeNode;
+
public:
DeltaTreeInteriorNode() : DeltaTreeNode(false /*nonleaf*/) {}
DeltaTreeInteriorNode(const InsertResult &IR)
- : DeltaTreeNode(false /*nonleaf*/) {
+ : DeltaTreeNode(false /*nonleaf*/) {
Children[0] = IR.LHS;
Children[1] = IR.RHS;
Values[0] = IR.Split;
@@ -142,15 +147,16 @@ namespace {
assert(i < getNumValuesUsed()+1 && "Invalid child");
return Children[i];
}
+
DeltaTreeNode *getChild(unsigned i) {
assert(i < getNumValuesUsed()+1 && "Invalid child");
return Children[i];
}
- static inline bool classof(const DeltaTreeNode *N) { return !N->isLeaf(); }
+ static bool classof(const DeltaTreeNode *N) { return !N->isLeaf(); }
};
-}
+} // namespace
/// Destroy - A 'virtual' destructor.
void DeltaTreeNode::Destroy() {
@@ -166,7 +172,7 @@ void DeltaTreeNode::RecomputeFullDeltaLocally() {
int NewFullDelta = 0;
for (unsigned i = 0, e = getNumValuesUsed(); i != e; ++i)
NewFullDelta += Values[i].Delta;
- if (DeltaTreeInteriorNode *IN = dyn_cast<DeltaTreeInteriorNode>(this))
+ if (auto *IN = dyn_cast<DeltaTreeInteriorNode>(this))
for (unsigned i = 0, e = getNumValuesUsed()+1; i != e; ++i)
NewFullDelta += IN->getChild(i)->getFullDelta();
FullDelta = NewFullDelta;
@@ -223,7 +229,7 @@ bool DeltaTreeNode::DoInsertion(unsigned FileIndex, int Delta,
}
// Otherwise, this is an interior node. Send the request down the tree.
- DeltaTreeInteriorNode *IN = cast<DeltaTreeInteriorNode>(this);
+ auto *IN = cast<DeltaTreeInteriorNode>(this);
if (!IN->Children[i]->DoInsertion(FileIndex, Delta, InsertRes))
return false; // If there was space in the child, just return.
@@ -300,7 +306,7 @@ void DeltaTreeNode::DoSplit(InsertResult &InsertRes) {
// Create the new child node.
DeltaTreeNode *NewNode;
- if (DeltaTreeInteriorNode *IN = dyn_cast<DeltaTreeInteriorNode>(this)) {
+ if (auto *IN = dyn_cast<DeltaTreeInteriorNode>(this)) {
// If this is an interior node, also move over 'WidthFactor' children
// into the new node.
DeltaTreeInteriorNode *New = new DeltaTreeInteriorNode();
@@ -328,8 +334,6 @@ void DeltaTreeNode::DoSplit(InsertResult &InsertRes) {
InsertRes.Split = Values[WidthFactor-1];
}
-
-
//===----------------------------------------------------------------------===//
// DeltaTree Implementation
//===----------------------------------------------------------------------===//
@@ -340,7 +344,7 @@ void DeltaTreeNode::DoSplit(InsertResult &InsertRes) {
/// VerifyTree - Walk the btree performing assertions on various properties to
/// verify consistency. This is useful for debugging new changes to the tree.
static void VerifyTree(const DeltaTreeNode *N) {
- const DeltaTreeInteriorNode *IN = dyn_cast<DeltaTreeInteriorNode>(N);
+ const auto *IN = dyn_cast<DeltaTreeInteriorNode>(N);
if (IN == 0) {
// Verify leaves, just ensure that FullDelta matches up and the elements
// are in proper order.
@@ -387,6 +391,7 @@ static DeltaTreeNode *getRoot(void *Root) {
DeltaTree::DeltaTree() {
Root = new DeltaTreeNode();
}
+
DeltaTree::DeltaTree(const DeltaTree &RHS) {
// Currently we only support copying when the RHS is empty.
assert(getRoot(RHS.Root)->getNumValuesUsed() == 0 &&
@@ -407,7 +412,7 @@ int DeltaTree::getDeltaAt(unsigned FileIndex) const {
int Result = 0;
// Walk down the tree.
- while (1) {
+ while (true) {
// For all nodes, include any local deltas before the specified file
// index by summing them up directly. Keep track of how many were
// included.
@@ -423,7 +428,7 @@ int DeltaTree::getDeltaAt(unsigned FileIndex) const {
// If we have an interior node, include information about children and
// recurse. Otherwise, if we have a leaf, we're done.
- const DeltaTreeInteriorNode *IN = dyn_cast<DeltaTreeInteriorNode>(Node);
+ const auto *IN = dyn_cast<DeltaTreeInteriorNode>(Node);
if (!IN) return Result;
// Include any children to the left of the values we skipped, all of
@@ -461,4 +466,3 @@ void DeltaTree::AddDelta(unsigned FileIndex, int Delta) {
VerifyTree(MyRoot);
#endif
}
-
diff --git a/lib/Rewrite/HTMLRewrite.cpp b/lib/Rewrite/HTMLRewrite.cpp
index 618c0179f100..d93961f3582e 100644
--- a/lib/Rewrite/HTMLRewrite.cpp
+++ b/lib/Rewrite/HTMLRewrite.cpp
@@ -30,7 +30,8 @@ using namespace clang;
/// start/end tags are placed at the start/end of each line if the range is
/// multiline.
void html::HighlightRange(Rewriter &R, SourceLocation B, SourceLocation E,
- const char *StartTag, const char *EndTag) {
+ const char *StartTag, const char *EndTag,
+ bool IsTokenRange) {
SourceManager &SM = R.getSourceMgr();
B = SM.getExpansionLoc(B);
E = SM.getExpansionLoc(E);
@@ -41,7 +42,8 @@ void html::HighlightRange(Rewriter &R, SourceLocation B, SourceLocation E,
unsigned EOffset = SM.getFileOffset(E);
// Include the whole end token in the range.
- EOffset += Lexer::MeasureTokenLength(E, R.getSourceMgr(), R.getLangOpts());
+ if (IsTokenRange)
+ EOffset += Lexer::MeasureTokenLength(E, R.getSourceMgr(), R.getLangOpts());
bool Invalid = false;
const char *BufferStart = SM.getBufferData(FID, &Invalid).data();
@@ -210,9 +212,9 @@ static void AddLineNumber(RewriteBuffer &RB, unsigned LineNo,
SmallString<256> Str;
llvm::raw_svector_ostream OS(Str);
- OS << "<tr><td class=\"num\" id=\"LN"
- << LineNo << "\">"
- << LineNo << "</td><td class=\"line\">";
+ OS << "<tr class=\"codeline\" data-linenumber=\"" << LineNo << "\">"
+ << "<td class=\"num\" id=\"LN" << LineNo << "\">" << LineNo
+ << "</td><td class=\"line\">";
if (B == E) { // Handle empty lines.
OS << " </td></tr>";
@@ -263,7 +265,10 @@ void html::AddLineNumbers(Rewriter& R, FileID FID) {
}
// Add one big table tag that surrounds all of the code.
- RB.InsertTextBefore(0, "<table class=\"code\">\n");
+ std::string s;
+ llvm::raw_string_ostream os(s);
+ os << "<table class=\"code\" data-fileid=\"" << FID.getHashValue() << "\">\n";
+ RB.InsertTextBefore(0, os.str());
RB.InsertTextAfter(FileEnd - FileBeg, "</table>");
}
@@ -285,78 +290,128 @@ void html::AddHeaderFooterInternalBuiltinCSS(Rewriter &R, FileID FID,
if (!title.empty())
os << "<title>" << html::EscapeText(title) << "</title>\n";
- os << "<style type=\"text/css\">\n"
- " body { color:#000000; background-color:#ffffff }\n"
- " body { font-family:Helvetica, sans-serif; font-size:10pt }\n"
- " h1 { font-size:14pt }\n"
- " .FileName { margin-top: 5px; margin-bottom: 5px; display: inline; }\n"
- " .FileNav { margin-left: 5px; margin-right: 5px; display: inline; }\n"
- " .FileNav a { text-decoration:none; font-size: larger; }\n"
- " .divider { margin-top: 30px; margin-bottom: 30px; height: 15px; }\n"
- " .divider { background-color: gray; }\n"
- " .code { border-collapse:collapse; width:100%; }\n"
- " .code { font-family: \"Monospace\", monospace; font-size:10pt }\n"
- " .code { line-height: 1.2em }\n"
- " .comment { color: green; font-style: oblique }\n"
- " .keyword { color: blue }\n"
- " .string_literal { color: red }\n"
- " .directive { color: darkmagenta }\n"
- // Macro expansions.
- " .expansion { display: none; }\n"
- " .macro:hover .expansion { display: block; border: 2px solid #FF0000; "
- "padding: 2px; background-color:#FFF0F0; font-weight: normal; "
- " -webkit-border-radius:5px; -webkit-box-shadow:1px 1px 7px #000; "
- " border-radius:5px; box-shadow:1px 1px 7px #000; "
- "position: absolute; top: -1em; left:10em; z-index: 1 } \n"
- " .macro { color: darkmagenta; background-color:LemonChiffon;"
- // Macros are position: relative to provide base for expansions.
- " position: relative }\n"
- " .num { width:2.5em; padding-right:2ex; background-color:#eeeeee }\n"
- " .num { text-align:right; font-size:8pt }\n"
- " .num { color:#444444 }\n"
- " .line { padding-left: 1ex; border-left: 3px solid #ccc }\n"
- " .line { white-space: pre }\n"
- " .msg { -webkit-box-shadow:1px 1px 7px #000 }\n"
- " .msg { box-shadow:1px 1px 7px #000 }\n"
- " .msg { -webkit-border-radius:5px }\n"
- " .msg { border-radius:5px }\n"
- " .msg { font-family:Helvetica, sans-serif; font-size:8pt }\n"
- " .msg { float:left }\n"
- " .msg { padding:0.25em 1ex 0.25em 1ex }\n"
- " .msg { margin-top:10px; margin-bottom:10px }\n"
- " .msg { font-weight:bold }\n"
- " .msg { max-width:60em; word-wrap: break-word; white-space: pre-wrap }\n"
- " .msgT { padding:0x; spacing:0x }\n"
- " .msgEvent { background-color:#fff8b4; color:#000000 }\n"
- " .msgControl { background-color:#bbbbbb; color:#000000 }\n"
- " .msgNote { background-color:#ddeeff; color:#000000 }\n"
- " .mrange { background-color:#dfddf3 }\n"
- " .mrange { border-bottom:1px solid #6F9DBE }\n"
- " .PathIndex { font-weight: bold; padding:0px 5px; "
- "margin-right:5px; }\n"
- " .PathIndex { -webkit-border-radius:8px }\n"
- " .PathIndex { border-radius:8px }\n"
- " .PathIndexEvent { background-color:#bfba87 }\n"
- " .PathIndexControl { background-color:#8c8c8c }\n"
- " .PathNav a { text-decoration:none; font-size: larger }\n"
- " .CodeInsertionHint { font-weight: bold; background-color: #10dd10 }\n"
- " .CodeRemovalHint { background-color:#de1010 }\n"
- " .CodeRemovalHint { border-bottom:1px solid #6F9DBE }\n"
- " .selected{ background-color:orange !important; }\n"
- " table.simpletable {\n"
- " padding: 5px;\n"
- " font-size:12pt;\n"
- " margin:20px;\n"
- " border-collapse: collapse; border-spacing: 0px;\n"
- " }\n"
- " td.rowname {\n"
- " text-align: right;\n"
- " vertical-align: top;\n"
- " font-weight: bold;\n"
- " color:#444444;\n"
- " padding-right:2ex;\n"
- " }\n"
- "</style>\n</head>\n<body>";
+ os << R"<<<(
+<style type="text/css">
+body { color:#000000; background-color:#ffffff }
+body { font-family:Helvetica, sans-serif; font-size:10pt }
+h1 { font-size:14pt }
+.FileName { margin-top: 5px; margin-bottom: 5px; display: inline; }
+.FileNav { margin-left: 5px; margin-right: 5px; display: inline; }
+.FileNav a { text-decoration:none; font-size: larger; }
+.divider { margin-top: 30px; margin-bottom: 30px; height: 15px; }
+.divider { background-color: gray; }
+.code { border-collapse:collapse; width:100%; }
+.code { font-family: "Monospace", monospace; font-size:10pt }
+.code { line-height: 1.2em }
+.comment { color: green; font-style: oblique }
+.keyword { color: blue }
+.string_literal { color: red }
+.directive { color: darkmagenta }
+/* Macro expansions. */
+.expansion { display: none; }
+.macro:hover .expansion {
+ display: block;
+ border: 2px solid #FF0000;
+ padding: 2px;
+ background-color:#FFF0F0;
+ font-weight: normal;
+ -webkit-border-radius:5px;
+ -webkit-box-shadow:1px 1px 7px #000;
+ border-radius:5px;
+ box-shadow:1px 1px 7px #000;
+ position: absolute;
+ top: -1em;
+ left:10em;
+ z-index: 1
+}
+
+#tooltiphint {
+ position: fixed;
+ width: 50em;
+ margin-left: -25em;
+ left: 50%;
+ padding: 10px;
+ border: 1px solid #b0b0b0;
+ border-radius: 2px;
+ box-shadow: 1px 1px 7px black;
+ background-color: #c0c0c0;
+ z-index: 2;
+}
+.macro {
+ color: darkmagenta;
+ background-color:LemonChiffon;
+ /* Macros are position: relative to provide base for expansions. */
+ position: relative;
+}
+
+.num { width:2.5em; padding-right:2ex; background-color:#eeeeee }
+.num { text-align:right; font-size:8pt }
+.num { color:#444444 }
+.line { padding-left: 1ex; border-left: 3px solid #ccc }
+.line { white-space: pre }
+.msg { -webkit-box-shadow:1px 1px 7px #000 }
+.msg { box-shadow:1px 1px 7px #000 }
+.msg { -webkit-border-radius:5px }
+.msg { border-radius:5px }
+.msg { font-family:Helvetica, sans-serif; font-size:8pt }
+.msg { float:left }
+.msg { padding:0.25em 1ex 0.25em 1ex }
+.msg { margin-top:10px; margin-bottom:10px }
+.msg { font-weight:bold }
+.msg { max-width:60em; word-wrap: break-word; white-space: pre-wrap }
+.msgT { padding:0x; spacing:0x }
+.msgEvent { background-color:#fff8b4; color:#000000 }
+.msgControl { background-color:#bbbbbb; color:#000000 }
+.msgNote { background-color:#ddeeff; color:#000000 }
+.mrange { background-color:#dfddf3 }
+.mrange { border-bottom:1px solid #6F9DBE }
+.PathIndex { font-weight: bold; padding:0px 5px; margin-right:5px; }
+.PathIndex { -webkit-border-radius:8px }
+.PathIndex { border-radius:8px }
+.PathIndexEvent { background-color:#bfba87 }
+.PathIndexControl { background-color:#8c8c8c }
+.PathNav a { text-decoration:none; font-size: larger }
+.CodeInsertionHint { font-weight: bold; background-color: #10dd10 }
+.CodeRemovalHint { background-color:#de1010 }
+.CodeRemovalHint { border-bottom:1px solid #6F9DBE }
+.selected{ background-color:orange !important; }
+
+table.simpletable {
+ padding: 5px;
+ font-size:12pt;
+ margin:20px;
+ border-collapse: collapse; border-spacing: 0px;
+}
+td.rowname {
+ text-align: right;
+ vertical-align: top;
+ font-weight: bold;
+ color:#444444;
+ padding-right:2ex;
+}
+
+/* Hidden text. */
+input.spoilerhider + label {
+ cursor: pointer;
+ text-decoration: underline;
+ display: block;
+}
+input.spoilerhider {
+ display: none;
+}
+input.spoilerhider ~ .spoiler {
+ overflow: hidden;
+ margin: 10px auto 0;
+ height: 0;
+ opacity: 0;
+}
+input.spoilerhider:checked + label + .spoiler{
+ height: auto;
+ opacity: 1;
+}
+</style>
+</head>
+<body>)<<<";
// Generate header
R.InsertTextBefore(StartLoc, os.str());
@@ -535,16 +590,15 @@ void html::HighlightMacros(Rewriter &R, FileID FID, const Preprocessor& PP) {
// Okay, we have the first token of a macro expansion: highlight the
// expansion by inserting a start tag before the macro expansion and
// end tag after it.
- std::pair<SourceLocation, SourceLocation> LLoc =
- SM.getExpansionRange(Tok.getLocation());
+ CharSourceRange LLoc = SM.getExpansionRange(Tok.getLocation());
// Ignore tokens whose instantiation location was not the main file.
- if (SM.getFileID(LLoc.first) != FID) {
+ if (SM.getFileID(LLoc.getBegin()) != FID) {
TmpPP.Lex(Tok);
continue;
}
- assert(SM.getFileID(LLoc.second) == FID &&
+ assert(SM.getFileID(LLoc.getEnd()) == FID &&
"Start and end of expansion must be in the same ultimate file!");
std::string Expansion = EscapeText(TmpPP.getSpelling(Tok));
@@ -559,7 +613,7 @@ void html::HighlightMacros(Rewriter &R, FileID FID, const Preprocessor& PP) {
// instantiation. It would be really nice to pop up a window with all the
// spelling of the tokens or something.
while (!Tok.is(tok::eof) &&
- SM.getExpansionLoc(Tok.getLocation()) == LLoc.first) {
+ SM.getExpansionLoc(Tok.getLocation()) == LLoc.getBegin()) {
// Insert a newline if the macro expansion is getting large.
if (LineLen > 60) {
Expansion += "<br>";
@@ -588,8 +642,8 @@ void html::HighlightMacros(Rewriter &R, FileID FID, const Preprocessor& PP) {
// highlighted.
Expansion = "<span class='expansion'>" + Expansion + "</span></span>";
- HighlightRange(R, LLoc.first, LLoc.second,
- "<span class='macro'>", Expansion.c_str());
+ HighlightRange(R, LLoc.getBegin(), LLoc.getEnd(), "<span class='macro'>",
+ Expansion.c_str(), LLoc.isTokenRange());
}
// Restore the preprocessor's old state.
diff --git a/lib/Rewrite/RewriteRope.cpp b/lib/Rewrite/RewriteRope.cpp
index 030ab7732fc3..5bc79f3eddc9 100644
--- a/lib/Rewrite/RewriteRope.cpp
+++ b/lib/Rewrite/RewriteRope.cpp
@@ -1,4 +1,4 @@
-//===--- RewriteRope.cpp - Rope specialized for rewriter --------*- C++ -*-===//
+//===- RewriteRope.cpp - Rope specialized for rewriter --------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -13,7 +13,11 @@
#include "clang/Rewrite/Core/RewriteRope.h"
#include "clang/Basic/LLVM.h"
+#include "llvm/Support/Casting.h"
#include <algorithm>
+#include <cassert>
+#include <cstring>
+
using namespace clang;
/// RewriteRope is a "strong" string class, designed to make insertions and
@@ -59,12 +63,12 @@ using namespace clang;
/// RopePieceBTreeInterior - An interior node in the B+ Tree, which manages
/// up to '2*WidthFactor' other nodes in the tree.
+namespace {
//===----------------------------------------------------------------------===//
// RopePieceBTreeNode Class
//===----------------------------------------------------------------------===//
-namespace {
/// RopePieceBTreeNode - Common base class of RopePieceBTreeLeaf and
/// RopePieceBTreeInterior. This provides some 'virtual' dispatching methods
/// and a flag that determines which subclass the instance is. Also
@@ -82,13 +86,13 @@ namespace {
/// Size - This is the number of bytes of file this node (including any
/// potential children) covers.
- unsigned Size;
+ unsigned Size = 0;
/// IsLeaf - True if this is an instance of RopePieceBTreeLeaf, false if it
/// is an instance of RopePieceBTreeInterior.
bool IsLeaf;
- RopePieceBTreeNode(bool isLeaf) : Size(0), IsLeaf(isLeaf) {}
+ RopePieceBTreeNode(bool isLeaf) : IsLeaf(isLeaf) {}
~RopePieceBTreeNode() = default;
public:
@@ -116,15 +120,12 @@ namespace {
/// erase - Remove NumBytes from this node at the specified offset. We are
/// guaranteed that there is a split at Offset.
void erase(unsigned Offset, unsigned NumBytes);
-
};
-} // end anonymous namespace
//===----------------------------------------------------------------------===//
// RopePieceBTreeLeaf Class
//===----------------------------------------------------------------------===//
-namespace {
/// RopePieceBTreeLeaf - Directly manages up to '2*WidthFactor' RopePiece
/// nodes. This directly represents a chunk of the string with those
/// RopePieces contatenated. Since this is a B+Tree, all values (in this case
@@ -135,18 +136,19 @@ namespace {
class RopePieceBTreeLeaf : public RopePieceBTreeNode {
/// NumPieces - This holds the number of rope pieces currently active in the
/// Pieces array.
- unsigned char NumPieces;
+ unsigned char NumPieces = 0;
/// Pieces - This tracks the file chunks currently in this leaf.
- ///
RopePiece Pieces[2*WidthFactor];
/// NextLeaf - This is a pointer to the next leaf in the tree, allowing
/// efficient in-order forward iteration of the tree without traversal.
- RopePieceBTreeLeaf **PrevLeaf, *NextLeaf;
+ RopePieceBTreeLeaf **PrevLeaf = nullptr;
+ RopePieceBTreeLeaf *NextLeaf = nullptr;
+
public:
- RopePieceBTreeLeaf() : RopePieceBTreeNode(true), NumPieces(0),
- PrevLeaf(nullptr), NextLeaf(nullptr) {}
+ RopePieceBTreeLeaf() : RopePieceBTreeNode(true) {}
+
~RopePieceBTreeLeaf() {
if (PrevLeaf || NextLeaf)
removeFromLeafInOrder();
@@ -170,6 +172,7 @@ namespace {
}
const RopePieceBTreeLeaf *getNextLeafInOrder() const { return NextLeaf; }
+
void insertAfterLeafInOrder(RopePieceBTreeLeaf *Node) {
assert(!PrevLeaf && !NextLeaf && "Already in ordering");
@@ -214,16 +217,16 @@ namespace {
/// node is returned and must be inserted into a parent.
RopePieceBTreeNode *insert(unsigned Offset, const RopePiece &R);
-
/// erase - Remove NumBytes from this node at the specified offset. We are
/// guaranteed that there is a split at Offset.
void erase(unsigned Offset, unsigned NumBytes);
- static inline bool classof(const RopePieceBTreeNode *N) {
+ static bool classof(const RopePieceBTreeNode *N) {
return N->isLeaf();
}
};
-} // end anonymous namespace
+
+} // namespace
/// split - Split the range containing the specified offset so that we are
/// guaranteed that there is a place to do an insertion at the specified
@@ -266,7 +269,6 @@ RopePieceBTreeNode *RopePieceBTreeLeaf::split(unsigned Offset) {
return insert(Offset, Tail);
}
-
/// insert - Insert the specified RopePiece into this tree node at the
/// specified offset. The offset is relative, so "0" is the start of the node.
///
@@ -388,18 +390,21 @@ void RopePieceBTreeLeaf::erase(unsigned Offset, unsigned NumBytes) {
//===----------------------------------------------------------------------===//
namespace {
+
/// RopePieceBTreeInterior - This represents an interior node in the B+Tree,
/// which holds up to 2*WidthFactor pointers to child nodes.
class RopePieceBTreeInterior : public RopePieceBTreeNode {
/// NumChildren - This holds the number of children currently active in the
/// Children array.
- unsigned char NumChildren;
+ unsigned char NumChildren = 0;
+
RopePieceBTreeNode *Children[2*WidthFactor];
+
public:
- RopePieceBTreeInterior() : RopePieceBTreeNode(false), NumChildren(0) {}
+ RopePieceBTreeInterior() : RopePieceBTreeNode(false) {}
RopePieceBTreeInterior(RopePieceBTreeNode *LHS, RopePieceBTreeNode *RHS)
- : RopePieceBTreeNode(false) {
+ : RopePieceBTreeNode(false) {
Children[0] = LHS;
Children[1] = RHS;
NumChildren = 2;
@@ -414,10 +419,12 @@ namespace {
bool isFull() const { return NumChildren == 2*WidthFactor; }
unsigned getNumChildren() const { return NumChildren; }
+
const RopePieceBTreeNode *getChild(unsigned i) const {
assert(i < NumChildren && "invalid child #");
return Children[i];
}
+
RopePieceBTreeNode *getChild(unsigned i) {
assert(i < NumChildren && "invalid child #");
return Children[i];
@@ -431,7 +438,6 @@ namespace {
Size += getChild(i)->size();
}
-
/// split - Split the range containing the specified offset so that we are
/// guaranteed that there is a place to do an insertion at the specified
/// offset. The offset is relative, so "0" is the start of the node.
@@ -440,7 +446,6 @@ namespace {
/// node is returned and must be inserted into a parent.
RopePieceBTreeNode *split(unsigned Offset);
-
/// insert - Insert the specified ropepiece into this tree node at the
/// specified offset. The offset is relative, so "0" is the start of the
/// node.
@@ -457,11 +462,12 @@ namespace {
/// guaranteed that there is a split at Offset.
void erase(unsigned Offset, unsigned NumBytes);
- static inline bool classof(const RopePieceBTreeNode *N) {
+ static bool classof(const RopePieceBTreeNode *N) {
return !N->isLeaf();
}
};
-} // end anonymous namespace
+
+} // namespace
/// split - Split the range containing the specified offset so that we are
/// guaranteed that there is a place to do an insertion at the specified
@@ -613,7 +619,7 @@ void RopePieceBTreeInterior::erase(unsigned Offset, unsigned NumBytes) {
//===----------------------------------------------------------------------===//
void RopePieceBTreeNode::Destroy() {
- if (RopePieceBTreeLeaf *Leaf = dyn_cast<RopePieceBTreeLeaf>(this))
+ if (auto *Leaf = dyn_cast<RopePieceBTreeLeaf>(this))
delete Leaf;
else
delete cast<RopePieceBTreeInterior>(this);
@@ -627,7 +633,7 @@ void RopePieceBTreeNode::Destroy() {
/// node is returned and must be inserted into a parent.
RopePieceBTreeNode *RopePieceBTreeNode::split(unsigned Offset) {
assert(Offset <= size() && "Invalid offset to split!");
- if (RopePieceBTreeLeaf *Leaf = dyn_cast<RopePieceBTreeLeaf>(this))
+ if (auto *Leaf = dyn_cast<RopePieceBTreeLeaf>(this))
return Leaf->split(Offset);
return cast<RopePieceBTreeInterior>(this)->split(Offset);
}
@@ -641,7 +647,7 @@ RopePieceBTreeNode *RopePieceBTreeNode::split(unsigned Offset) {
RopePieceBTreeNode *RopePieceBTreeNode::insert(unsigned Offset,
const RopePiece &R) {
assert(Offset <= size() && "Invalid offset to insert!");
- if (RopePieceBTreeLeaf *Leaf = dyn_cast<RopePieceBTreeLeaf>(this))
+ if (auto *Leaf = dyn_cast<RopePieceBTreeLeaf>(this))
return Leaf->insert(Offset, R);
return cast<RopePieceBTreeInterior>(this)->insert(Offset, R);
}
@@ -650,12 +656,11 @@ RopePieceBTreeNode *RopePieceBTreeNode::insert(unsigned Offset,
/// guaranteed that there is a split at Offset.
void RopePieceBTreeNode::erase(unsigned Offset, unsigned NumBytes) {
assert(Offset+NumBytes <= size() && "Invalid offset to erase!");
- if (RopePieceBTreeLeaf *Leaf = dyn_cast<RopePieceBTreeLeaf>(this))
+ if (auto *Leaf = dyn_cast<RopePieceBTreeLeaf>(this))
return Leaf->erase(Offset, NumBytes);
return cast<RopePieceBTreeInterior>(this)->erase(Offset, NumBytes);
}
-
//===----------------------------------------------------------------------===//
// RopePieceBTreeIterator Implementation
//===----------------------------------------------------------------------===//
@@ -666,10 +671,10 @@ static const RopePieceBTreeLeaf *getCN(const void *P) {
// begin iterator.
RopePieceBTreeIterator::RopePieceBTreeIterator(const void *n) {
- const RopePieceBTreeNode *N = static_cast<const RopePieceBTreeNode*>(n);
+ const auto *N = static_cast<const RopePieceBTreeNode *>(n);
// Walk down the left side of the tree until we get to a leaf.
- while (const RopePieceBTreeInterior *IN = dyn_cast<RopePieceBTreeInterior>(N))
+ while (const auto *IN = dyn_cast<RopePieceBTreeInterior>(N))
N = IN->getChild(0);
// We must have at least one leaf.
@@ -717,10 +722,12 @@ static RopePieceBTreeNode *getRoot(void *P) {
RopePieceBTree::RopePieceBTree() {
Root = new RopePieceBTreeLeaf();
}
+
RopePieceBTree::RopePieceBTree(const RopePieceBTree &RHS) {
assert(RHS.empty() && "Can't copy non-empty tree yet");
Root = new RopePieceBTreeLeaf();
}
+
RopePieceBTree::~RopePieceBTree() {
getRoot(Root)->Destroy();
}
@@ -730,7 +737,7 @@ unsigned RopePieceBTree::size() const {
}
void RopePieceBTree::clear() {
- if (RopePieceBTreeLeaf *Leaf = dyn_cast<RopePieceBTreeLeaf>(getRoot(Root)))
+ if (auto *Leaf = dyn_cast<RopePieceBTreeLeaf>(getRoot(Root)))
Leaf->clear();
else {
getRoot(Root)->Destroy();
@@ -780,8 +787,7 @@ RopePiece RewriteRope::MakeRopeString(const char *Start, const char *End) {
// just allocate a new rope piece for it alone.
if (Len > AllocChunkSize) {
unsigned Size = End-Start+sizeof(RopeRefCountString)-1;
- RopeRefCountString *Res =
- reinterpret_cast<RopeRefCountString *>(new char[Size]);
+ auto *Res = reinterpret_cast<RopeRefCountString *>(new char[Size]);
Res->RefCount = 0;
memcpy(Res->Data, Start, End-Start);
return RopePiece(Res, 0, End-Start);
@@ -791,8 +797,7 @@ RopePiece RewriteRope::MakeRopeString(const char *Start, const char *End) {
// Make a new chunk and share it with later allocations.
unsigned AllocSize = offsetof(RopeRefCountString, Data) + AllocChunkSize;
- RopeRefCountString *Res =
- reinterpret_cast<RopeRefCountString *>(new char[AllocSize]);
+ auto *Res = reinterpret_cast<RopeRefCountString *>(new char[AllocSize]);
Res->RefCount = 0;
memcpy(Res->Data, Start, Len);
AllocBuffer = Res;
@@ -800,5 +805,3 @@ RopePiece RewriteRope::MakeRopeString(const char *Start, const char *End) {
return RopePiece(AllocBuffer, 0, Len);
}
-
-
diff --git a/lib/Rewrite/Rewriter.cpp b/lib/Rewrite/Rewriter.cpp
index ae41decc64a3..cb59a161fe68 100644
--- a/lib/Rewrite/Rewriter.cpp
+++ b/lib/Rewrite/Rewriter.cpp
@@ -1,4 +1,4 @@
-//===--- Rewriter.cpp - Code rewriting interface --------------------------===//
+//===- Rewriter.cpp - Code rewriting interface ----------------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -15,11 +15,24 @@
#include "clang/Rewrite/Core/Rewriter.h"
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/DiagnosticIDs.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Lex/Lexer.h"
+#include "clang/Rewrite/Core/RewriteBuffer.h"
+#include "clang/Rewrite/Core/RewriteRope.h"
#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/raw_ostream.h"
+#include <cassert>
+#include <iterator>
+#include <map>
+#include <memory>
+#include <system_error>
+#include <utility>
+
using namespace clang;
raw_ostream &RewriteBuffer::write(raw_ostream &os) const {
@@ -31,7 +44,7 @@ raw_ostream &RewriteBuffer::write(raw_ostream &os) const {
return os;
}
-/// \brief Return true if this character is non-new-line whitespace:
+/// Return true if this character is non-new-line whitespace:
/// ' ', '\\t', '\\f', '\\v', '\\r'.
static inline bool isWhitespaceExceptNL(unsigned char c) {
switch (c) {
@@ -91,7 +104,6 @@ void RewriteBuffer::RemoveText(unsigned OrigOffset, unsigned Size,
void RewriteBuffer::InsertText(unsigned OrigOffset, StringRef Str,
bool InsertAfter) {
-
// Nothing to insert, exit early.
if (Str.empty()) return;
@@ -114,7 +126,6 @@ void RewriteBuffer::ReplaceText(unsigned OrigOffset, unsigned OrigLength,
AddReplaceDelta(OrigOffset, NewStr.size() - OrigLength);
}
-
//===----------------------------------------------------------------------===//
// Rewriter class
//===----------------------------------------------------------------------===//
@@ -127,10 +138,8 @@ int Rewriter::getRangeSize(const CharSourceRange &Range,
!isRewritable(Range.getEnd())) return -1;
FileID StartFileID, EndFileID;
- unsigned StartOff, EndOff;
-
- StartOff = getLocationOffsetAndFileID(Range.getBegin(), StartFileID);
- EndOff = getLocationOffsetAndFileID(Range.getEnd(), EndFileID);
+ unsigned StartOff = getLocationOffsetAndFileID(Range.getBegin(), StartFileID);
+ unsigned EndOff = getLocationOffsetAndFileID(Range.getEnd(), EndFileID);
if (StartFileID != EndFileID)
return -1;
@@ -145,7 +154,6 @@ int Rewriter::getRangeSize(const CharSourceRange &Range,
StartOff = RB.getMappedOffset(StartOff, !opts.IncludeInsertsAtBeginOfRange);
}
-
// Adjust the end offset to the end of the last token, instead of being the
// start of the last token if this is a token range.
if (Range.isTokenRange())
@@ -158,17 +166,15 @@ int Rewriter::getRangeSize(SourceRange Range, RewriteOptions opts) const {
return getRangeSize(CharSourceRange::getTokenRange(Range), opts);
}
-
/// getRewrittenText - Return the rewritten form of the text in the specified
/// range. If the start or end of the range was unrewritable or if they are
/// in different buffers, this returns an empty string.
///
/// Note that this method is not particularly efficient.
-///
std::string Rewriter::getRewrittenText(SourceRange Range) const {
if (!isRewritable(Range.getBegin()) ||
!isRewritable(Range.getEnd()))
- return "";
+ return {};
FileID StartFileID, EndFileID;
unsigned StartOff, EndOff;
@@ -176,7 +182,7 @@ std::string Rewriter::getRewrittenText(SourceRange Range) const {
EndOff = getLocationOffsetAndFileID(Range.getEnd(), EndFileID);
if (StartFileID != EndFileID)
- return ""; // Start and end in different buffers.
+ return {}; // Start and end in different buffers.
// If edits have been made to this buffer, the delta between the range may
// have changed.
@@ -212,14 +218,12 @@ std::string Rewriter::getRewrittenText(SourceRange Range) const {
unsigned Rewriter::getLocationOffsetAndFileID(SourceLocation Loc,
FileID &FID) const {
assert(Loc.isValid() && "Invalid location");
- std::pair<FileID,unsigned> V = SourceMgr->getDecomposedLoc(Loc);
+ std::pair<FileID, unsigned> V = SourceMgr->getDecomposedLoc(Loc);
FID = V.first;
return V.second;
}
-
/// getEditBuffer - Get or create a RewriteBuffer for the specified FileID.
-///
RewriteBuffer &Rewriter::getEditBuffer(FileID FID) {
std::map<FileID, RewriteBuffer>::iterator I =
RewriteBuffers.lower_bound(FID);
@@ -393,6 +397,7 @@ bool Rewriter::IncreaseIndentation(CharSourceRange range,
}
namespace {
+
// A wrapper for a file stream that atomically overwrites the target.
//
// Creates a file output stream for a temporary file in the constructor,
@@ -403,7 +408,7 @@ class AtomicallyMovedFile {
public:
AtomicallyMovedFile(DiagnosticsEngine &Diagnostics, StringRef Filename,
bool &AllWritten)
- : Diagnostics(Diagnostics), Filename(Filename), AllWritten(AllWritten) {
+ : Diagnostics(Diagnostics), Filename(Filename), AllWritten(AllWritten) {
TempFilename = Filename;
TempFilename += "-%%%%%%%%";
int FD;
@@ -441,7 +446,8 @@ private:
std::unique_ptr<llvm::raw_fd_ostream> FileStream;
bool &AllWritten;
};
-} // end anonymous namespace
+
+} // namespace
bool Rewriter::overwriteChangedFiles() {
bool AllWritten = true;
diff --git a/lib/Rewrite/TokenRewriter.cpp b/lib/Rewrite/TokenRewriter.cpp
index 494defdedaa9..1f5dec499c92 100644
--- a/lib/Rewrite/TokenRewriter.cpp
+++ b/lib/Rewrite/TokenRewriter.cpp
@@ -1,4 +1,4 @@
-//===--- TokenRewriter.cpp - Token-based code rewriting interface ---------===//
+//===- TokenRewriter.cpp - Token-based code rewriting interface -----------===//
//
// The LLVM Compiler Infrastructure
//
@@ -16,6 +16,12 @@
#include "clang/Basic/SourceManager.h"
#include "clang/Lex/Lexer.h"
#include "clang/Lex/ScratchBuffer.h"
+#include "clang/Lex/Token.h"
+#include <cassert>
+#include <cstring>
+#include <map>
+#include <utility>
+
using namespace clang;
TokenRewriter::TokenRewriter(FileID FID, SourceManager &SM,
@@ -46,9 +52,7 @@ TokenRewriter::TokenRewriter(FileID FID, SourceManager &SM,
}
}
-TokenRewriter::~TokenRewriter() {
-}
-
+TokenRewriter::~TokenRewriter() = default;
/// RemapIterator - Convert from token_iterator (a const iterator) to
/// TokenRefTy (a non-const iterator).
@@ -63,7 +67,6 @@ TokenRewriter::TokenRefTy TokenRewriter::RemapIterator(token_iterator I) {
return MapIt->second;
}
-
/// AddToken - Add the specified token into the Rewriter before the other
/// position.
TokenRewriter::TokenRefTy
@@ -77,7 +80,6 @@ TokenRewriter::AddToken(const Token &T, TokenRefTy Where) {
return Where;
}
-
TokenRewriter::token_iterator
TokenRewriter::AddTokenBefore(token_iterator I, const char *Val) {
unsigned Len = strlen(Val);
@@ -96,4 +98,3 @@ TokenRewriter::AddTokenBefore(token_iterator I, const char *Val) {
return AddToken(Tok, RemapIterator(I));
}
-
diff --git a/lib/Sema/AnalysisBasedWarnings.cpp b/lib/Sema/AnalysisBasedWarnings.cpp
index 0033edf326ac..82d9df25d934 100644
--- a/lib/Sema/AnalysisBasedWarnings.cpp
+++ b/lib/Sema/AnalysisBasedWarnings.cpp
@@ -122,7 +122,7 @@ static void CheckUnreachable(Sema &S, AnalysisDeclContext &AC) {
}
namespace {
-/// \brief Warn on logical operator errors in CFGBuilder
+/// Warn on logical operator errors in CFGBuilder
class LogicalErrorHandler : public CFGCallback {
Sema &S;
@@ -200,60 +200,41 @@ static bool hasRecursiveCallInPath(const FunctionDecl *FD, CFGBlock &Block) {
return false;
}
-// All blocks are in one of three states. States are ordered so that blocks
-// can only move to higher states.
-enum RecursiveState {
- FoundNoPath,
- FoundPath,
- FoundPathWithNoRecursiveCall
-};
-
-// Returns true if there exists a path to the exit block and every path
-// to the exit block passes through a call to FD.
+// Returns true if every path from the entry block passes through a call to FD.
static bool checkForRecursiveFunctionCall(const FunctionDecl *FD, CFG *cfg) {
+ llvm::SmallPtrSet<CFGBlock *, 16> Visited;
+ llvm::SmallVector<CFGBlock *, 16> WorkList;
+ // Keep track of whether we found at least one recursive path.
+ bool foundRecursion = false;
const unsigned ExitID = cfg->getExit().getBlockID();
- // Mark all nodes as FoundNoPath, then set the status of the entry block.
- SmallVector<RecursiveState, 16> States(cfg->getNumBlockIDs(), FoundNoPath);
- States[cfg->getEntry().getBlockID()] = FoundPathWithNoRecursiveCall;
-
- // Make the processing stack and seed it with the entry block.
- SmallVector<CFGBlock *, 16> Stack;
- Stack.push_back(&cfg->getEntry());
-
- while (!Stack.empty()) {
- CFGBlock *CurBlock = Stack.back();
- Stack.pop_back();
+ // Seed the work list with the entry block.
+ WorkList.push_back(&cfg->getEntry());
- unsigned ID = CurBlock->getBlockID();
- RecursiveState CurState = States[ID];
+ while (!WorkList.empty()) {
+ CFGBlock *Block = WorkList.pop_back_val();
- if (CurState == FoundPathWithNoRecursiveCall) {
- // Found a path to the exit node without a recursive call.
- if (ExitID == ID)
- return false;
+ for (auto I = Block->succ_begin(), E = Block->succ_end(); I != E; ++I) {
+ if (CFGBlock *SuccBlock = *I) {
+ if (!Visited.insert(SuccBlock).second)
+ continue;
- // Only change state if the block has a recursive call.
- if (hasRecursiveCallInPath(FD, *CurBlock))
- CurState = FoundPath;
- }
+ // Found a path to the exit node without a recursive call.
+ if (ExitID == SuccBlock->getBlockID())
+ return false;
- // Loop over successor blocks and add them to the Stack if their state
- // changes.
- for (auto I = CurBlock->succ_begin(), E = CurBlock->succ_end(); I != E; ++I)
- if (*I) {
- unsigned next_ID = (*I)->getBlockID();
- if (States[next_ID] < CurState) {
- States[next_ID] = CurState;
- Stack.push_back(*I);
+ // If the successor block contains a recursive call, end analysis there.
+ if (hasRecursiveCallInPath(FD, *SuccBlock)) {
+ foundRecursion = true;
+ continue;
}
+
+ WorkList.push_back(SuccBlock);
}
+ }
}
-
- // Return true if the exit node is reachable, and only reachable through
- // a recursive call.
- return States[ExitID] == FoundPath;
+ return foundRecursion;
}
static void checkRecursiveFunction(Sema &S, const FunctionDecl *FD,
@@ -269,10 +250,6 @@ static void checkRecursiveFunction(Sema &S, const FunctionDecl *FD,
CFG *cfg = AC.getCFG();
if (!cfg) return;
- // If the exit block is unreachable, skip processing the function.
- if (cfg->getExit().pred_empty())
- return;
-
// Emit diagnostic if a recursive function call is detected for all paths.
if (checkForRecursiveFunctionCall(FD, cfg))
S.Diag(Body->getLocStart(), diag::warn_infinite_recursive_function);
@@ -281,114 +258,62 @@ static void checkRecursiveFunction(Sema &S, const FunctionDecl *FD,
//===----------------------------------------------------------------------===//
// Check for throw in a non-throwing function.
//===----------------------------------------------------------------------===//
-enum ThrowState {
- FoundNoPathForThrow,
- FoundPathForThrow,
- FoundPathWithNoThrowOutFunction,
-};
-static bool isThrowCaught(const CXXThrowExpr *Throw,
- const CXXCatchStmt *Catch) {
- const Type *CaughtType = Catch->getCaughtType().getTypePtrOrNull();
- if (!CaughtType)
- return true;
- const Type *ThrowType = nullptr;
- if (Throw->getSubExpr())
- ThrowType = Throw->getSubExpr()->getType().getTypePtrOrNull();
- if (!ThrowType)
- return false;
- if (ThrowType->isReferenceType())
- ThrowType = ThrowType->castAs<ReferenceType>()
- ->getPointeeType()
- ->getUnqualifiedDesugaredType();
- if (CaughtType->isReferenceType())
- CaughtType = CaughtType->castAs<ReferenceType>()
- ->getPointeeType()
- ->getUnqualifiedDesugaredType();
- if (ThrowType->isPointerType() && CaughtType->isPointerType()) {
- ThrowType = ThrowType->getPointeeType()->getUnqualifiedDesugaredType();
- CaughtType = CaughtType->getPointeeType()->getUnqualifiedDesugaredType();
- }
- if (CaughtType == ThrowType)
- return true;
- const CXXRecordDecl *CaughtAsRecordType =
- CaughtType->getAsCXXRecordDecl();
- const CXXRecordDecl *ThrowTypeAsRecordType = ThrowType->getAsCXXRecordDecl();
- if (CaughtAsRecordType && ThrowTypeAsRecordType)
- return ThrowTypeAsRecordType->isDerivedFrom(CaughtAsRecordType);
- return false;
-}
+/// Determine whether an exception thrown by E, unwinding from ThrowBlock,
+/// can reach ExitBlock.
+static bool throwEscapes(Sema &S, const CXXThrowExpr *E, CFGBlock &ThrowBlock,
+ CFG *Body) {
+ SmallVector<CFGBlock *, 16> Stack;
+ llvm::BitVector Queued(Body->getNumBlockIDs());
-static bool isThrowCaughtByHandlers(const CXXThrowExpr *CE,
- const CXXTryStmt *TryStmt) {
- for (unsigned H = 0, E = TryStmt->getNumHandlers(); H < E; ++H) {
- if (isThrowCaught(CE, TryStmt->getHandler(H)))
- return true;
- }
- return false;
-}
+ Stack.push_back(&ThrowBlock);
+ Queued[ThrowBlock.getBlockID()] = true;
-static bool doesThrowEscapePath(CFGBlock Block, SourceLocation &OpLoc) {
- for (const auto &B : Block) {
- if (B.getKind() != CFGElement::Statement)
- continue;
- const auto *CE = dyn_cast<CXXThrowExpr>(B.getAs<CFGStmt>()->getStmt());
- if (!CE)
- continue;
+ while (!Stack.empty()) {
+ CFGBlock &UnwindBlock = *Stack.back();
+ Stack.pop_back();
- OpLoc = CE->getThrowLoc();
- for (const auto &I : Block.succs()) {
- if (!I.isReachable())
+ for (auto &Succ : UnwindBlock.succs()) {
+ if (!Succ.isReachable() || Queued[Succ->getBlockID()])
continue;
- if (const auto *Terminator =
- dyn_cast_or_null<CXXTryStmt>(I->getTerminator()))
- if (isThrowCaughtByHandlers(CE, Terminator))
- return false;
+
+ if (Succ->getBlockID() == Body->getExit().getBlockID())
+ return true;
+
+ if (auto *Catch =
+ dyn_cast_or_null<CXXCatchStmt>(Succ->getLabel())) {
+ QualType Caught = Catch->getCaughtType();
+ if (Caught.isNull() || // catch (...) catches everything
+ !E->getSubExpr() || // throw; is considered cuaght by any handler
+ S.handlerCanCatch(Caught, E->getSubExpr()->getType()))
+ // Exception doesn't escape via this path.
+ break;
+ } else {
+ Stack.push_back(Succ);
+ Queued[Succ->getBlockID()] = true;
+ }
}
- return true;
}
+
return false;
}
-static bool hasThrowOutNonThrowingFunc(SourceLocation &OpLoc, CFG *BodyCFG) {
-
- unsigned ExitID = BodyCFG->getExit().getBlockID();
-
- SmallVector<ThrowState, 16> States(BodyCFG->getNumBlockIDs(),
- FoundNoPathForThrow);
- States[BodyCFG->getEntry().getBlockID()] = FoundPathWithNoThrowOutFunction;
-
- SmallVector<CFGBlock *, 16> Stack;
- Stack.push_back(&BodyCFG->getEntry());
- while (!Stack.empty()) {
- CFGBlock *CurBlock = Stack.pop_back_val();
-
- unsigned ID = CurBlock->getBlockID();
- ThrowState CurState = States[ID];
- if (CurState == FoundPathWithNoThrowOutFunction) {
- if (ExitID == ID)
+static void visitReachableThrows(
+ CFG *BodyCFG,
+ llvm::function_ref<void(const CXXThrowExpr *, CFGBlock &)> Visit) {
+ llvm::BitVector Reachable(BodyCFG->getNumBlockIDs());
+ clang::reachable_code::ScanReachableFromBlock(&BodyCFG->getEntry(), Reachable);
+ for (CFGBlock *B : *BodyCFG) {
+ if (!Reachable[B->getBlockID()])
+ continue;
+ for (CFGElement &E : *B) {
+ Optional<CFGStmt> S = E.getAs<CFGStmt>();
+ if (!S)
continue;
-
- if (doesThrowEscapePath(*CurBlock, OpLoc))
- CurState = FoundPathForThrow;
+ if (auto *Throw = dyn_cast<CXXThrowExpr>(S->getStmt()))
+ Visit(Throw, *B);
}
-
- // Loop over successor blocks and add them to the Stack if their state
- // changes.
- for (const auto &I : CurBlock->succs())
- if (I.isReachable()) {
- unsigned NextID = I->getBlockID();
- if (NextID == ExitID && CurState == FoundPathForThrow) {
- States[NextID] = CurState;
- } else if (States[NextID] < CurState) {
- States[NextID] = CurState;
- Stack.push_back(I);
- }
- }
}
- // Return true if the exit node is reachable, and only reachable through
- // a throw expression.
- return States[ExitID] == FoundPathForThrow;
}
static void EmitDiagForCXXThrowInNonThrowingFunc(Sema &S, SourceLocation OpLoc,
@@ -418,14 +343,15 @@ static void checkThrowInNonThrowingFunc(Sema &S, const FunctionDecl *FD,
return;
if (BodyCFG->getExit().pred_empty())
return;
- SourceLocation OpLoc;
- if (hasThrowOutNonThrowingFunc(OpLoc, BodyCFG))
- EmitDiagForCXXThrowInNonThrowingFunc(S, OpLoc, FD);
+ visitReachableThrows(BodyCFG, [&](const CXXThrowExpr *Throw, CFGBlock &Block) {
+ if (throwEscapes(S, Throw, Block, BodyCFG))
+ EmitDiagForCXXThrowInNonThrowingFunc(S, Throw->getThrowLoc(), FD);
+ });
}
static bool isNoexcept(const FunctionDecl *FD) {
const auto *FPT = FD->getType()->castAs<FunctionProtoType>();
- if (FPT->isNothrow(FD->getASTContext()) || FD->hasAttr<NoThrowAttr>())
+ if (FPT->isNothrow() || FD->hasAttr<NoThrowAttr>())
return true;
return false;
}
@@ -491,9 +417,10 @@ static ControlFlowKind CheckFallThrough(AnalysisDeclContext &AC) {
CFGBlock::FilterOptions FO;
FO.IgnoreDefaultsWithCoveredEnums = 1;
- for (CFGBlock::filtered_pred_iterator
- I = cfg->getExit().filtered_pred_start_end(FO); I.hasMore(); ++I) {
- const CFGBlock& B = **I;
+ for (CFGBlock::filtered_pred_iterator I =
+ cfg->getExit().filtered_pred_start_end(FO);
+ I.hasMore(); ++I) {
+ const CFGBlock &B = **I;
if (!live[B.getBlockID()])
continue;
@@ -683,18 +610,19 @@ struct CheckFallThroughDiagnostics {
} // anonymous namespace
-/// CheckFallThroughForFunctionDef - Check that we don't fall off the end of a
+/// CheckFallThroughForBody - Check that we don't fall off the end of a
/// function that should return a value. Check that we don't fall off the end
/// of a noreturn function. We assume that functions and blocks not marked
/// noreturn will return.
static void CheckFallThroughForBody(Sema &S, const Decl *D, const Stmt *Body,
const BlockExpr *blkExpr,
- const CheckFallThroughDiagnostics& CD,
- AnalysisDeclContext &AC) {
+ const CheckFallThroughDiagnostics &CD,
+ AnalysisDeclContext &AC,
+ sema::FunctionScopeInfo *FSI) {
bool ReturnsVoid = false;
bool HasNoReturn = false;
- bool IsCoroutine = S.getCurFunction() && S.getCurFunction()->isCoroutine();
+ bool IsCoroutine = FSI->isCoroutine();
if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
if (const auto *CBody = dyn_cast<CoroutineBodyStmt>(Body))
@@ -726,10 +654,15 @@ static void CheckFallThroughForBody(Sema &S, const Decl *D, const Stmt *Body,
SourceLocation LBrace = Body->getLocStart(), RBrace = Body->getLocEnd();
auto EmitDiag = [&](SourceLocation Loc, unsigned DiagID) {
if (IsCoroutine)
- S.Diag(Loc, DiagID) << S.getCurFunction()->CoroutinePromise->getType();
+ S.Diag(Loc, DiagID) << FSI->CoroutinePromise->getType();
else
S.Diag(Loc, DiagID);
};
+
+ // cpu_dispatch functions permit empty function bodies for ICC compatibility.
+ if (D->getAsFunction() && D->getAsFunction()->isCPUDispatchMultiVersion())
+ return;
+
// Either in a function body compound statement, or a function-try-block.
switch (CheckFallThrough(AC)) {
case UnknownFallThrough:
@@ -1461,8 +1394,8 @@ static void diagnoseRepeatedUseOfWeak(Sema &S,
// Sort by first use so that we emit the warnings in a deterministic order.
SourceManager &SM = S.getSourceManager();
- std::sort(UsesByStmt.begin(), UsesByStmt.end(),
- [&SM](const StmtUsesPair &LHS, const StmtUsesPair &RHS) {
+ llvm::sort(UsesByStmt.begin(), UsesByStmt.end(),
+ [&SM](const StmtUsesPair &LHS, const StmtUsesPair &RHS) {
return SM.isBeforeInTranslationUnit(LHS.first->getLocStart(),
RHS.first->getLocStart());
});
@@ -1600,8 +1533,8 @@ public:
// Sort the uses by their SourceLocations. While not strictly
// guaranteed to produce them in line/column order, this will provide
// a stable ordering.
- std::sort(vec->begin(), vec->end(),
- [](const UninitUse &a, const UninitUse &b) {
+ llvm::sort(vec->begin(), vec->end(),
+ [](const UninitUse &a, const UninitUse &b) {
// Prefer a more confident report over a less confident one.
if (a.getKind() != b.getKind())
return a.getKind() > b.getKind();
@@ -1674,7 +1607,7 @@ class ThreadSafetyReporter : public clang::threadSafety::ThreadSafetyHandler {
if (Verbose && CurrentFunction) {
PartialDiagnosticAt FNote(CurrentFunction->getBody()->getLocStart(),
S.PDiag(diag::note_thread_warning_in_fun)
- << CurrentFunction->getNameAsString());
+ << CurrentFunction);
return OptionalNotes(1, FNote);
}
return OptionalNotes();
@@ -1685,7 +1618,7 @@ class ThreadSafetyReporter : public clang::threadSafety::ThreadSafetyHandler {
if (Verbose && CurrentFunction) {
PartialDiagnosticAt FNote(CurrentFunction->getBody()->getLocStart(),
S.PDiag(diag::note_thread_warning_in_fun)
- << CurrentFunction->getNameAsString());
+ << CurrentFunction);
ONS.push_back(std::move(FNote));
}
return ONS;
@@ -1699,7 +1632,7 @@ class ThreadSafetyReporter : public clang::threadSafety::ThreadSafetyHandler {
if (Verbose && CurrentFunction) {
PartialDiagnosticAt FNote(CurrentFunction->getBody()->getLocStart(),
S.PDiag(diag::note_thread_warning_in_fun)
- << CurrentFunction->getNameAsString());
+ << CurrentFunction);
ONS.push_back(std::move(FNote));
}
return ONS;
@@ -1723,7 +1656,7 @@ class ThreadSafetyReporter : public clang::threadSafety::ThreadSafetyHandler {
void setVerbose(bool b) { Verbose = b; }
- /// \brief Emit all buffered diagnostics in order of sourcelocation.
+ /// Emit all buffered diagnostics in order of sourcelocation.
/// We need to output diagnostics produced while iterating through
/// the lockset in deterministic order, so this function orders diagnostics
/// and outputs them.
@@ -1815,7 +1748,7 @@ class ThreadSafetyReporter : public clang::threadSafety::ThreadSafetyHandler {
diag::warn_variable_requires_any_lock:
diag::warn_var_deref_requires_any_lock;
PartialDiagnosticAt Warning(Loc, S.PDiag(DiagID)
- << D->getNameAsString() << getLockKindFromAccessKind(AK));
+ << D << getLockKindFromAccessKind(AK));
Warnings.emplace_back(std::move(Warning), getNotes());
}
@@ -1843,7 +1776,7 @@ class ThreadSafetyReporter : public clang::threadSafety::ThreadSafetyHandler {
break;
}
PartialDiagnosticAt Warning(Loc, S.PDiag(DiagID) << Kind
- << D->getNameAsString()
+ << D
<< LockName << LK);
PartialDiagnosticAt Note(Loc, S.PDiag(diag::note_found_mutex_near_match)
<< *PossibleMatch);
@@ -1873,12 +1806,11 @@ class ThreadSafetyReporter : public clang::threadSafety::ThreadSafetyHandler {
break;
}
PartialDiagnosticAt Warning(Loc, S.PDiag(DiagID) << Kind
- << D->getNameAsString()
+ << D
<< LockName << LK);
if (Verbose && POK == POK_VarAccess) {
PartialDiagnosticAt Note(D->getLocation(),
- S.PDiag(diag::note_guarded_by_declared_here)
- << D->getNameAsString());
+ S.PDiag(diag::note_guarded_by_declared_here));
Warnings.emplace_back(std::move(Warning), getNotes(Note));
} else
Warnings.emplace_back(std::move(Warning), getNotes());
@@ -2194,7 +2126,7 @@ AnalysisBasedWarnings::IssueWarnings(sema::AnalysisBasedWarnings::Policy P,
: (fscope->isCoroutine()
? CheckFallThroughDiagnostics::MakeForCoroutine(D)
: CheckFallThroughDiagnostics::MakeForFunction(D)));
- CheckFallThroughForBody(S, D, Body, blkExpr, CD, AC);
+ CheckFallThroughForBody(S, D, Body, blkExpr, CD, AC, fscope);
}
// Warning: check for unreachable code
diff --git a/lib/Sema/CMakeLists.txt b/lib/Sema/CMakeLists.txt
index 7d9ae621c93d..bad42a2ce252 100644
--- a/lib/Sema/CMakeLists.txt
+++ b/lib/Sema/CMakeLists.txt
@@ -9,13 +9,13 @@ endif()
add_clang_library(clangSema
AnalysisBasedWarnings.cpp
- AttributeList.cpp
CodeCompleteConsumer.cpp
DeclSpec.cpp
DelayedDiagnostic.cpp
IdentifierResolver.cpp
JumpDiagnostics.cpp
MultiplexExternalSemaSource.cpp
+ ParsedAttr.cpp
Scope.cpp
ScopeInfo.cpp
Sema.cpp
diff --git a/lib/Sema/CodeCompleteConsumer.cpp b/lib/Sema/CodeCompleteConsumer.cpp
index 3431ddcf70a2..9c4d315a692f 100644
--- a/lib/Sema/CodeCompleteConsumer.cpp
+++ b/lib/Sema/CodeCompleteConsumer.cpp
@@ -1,4 +1,4 @@
-//===--- CodeCompleteConsumer.cpp - Code Completion Interface ---*- C++ -*-===//
+//===- CodeCompleteConsumer.cpp - Code Completion Interface ---------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -10,21 +10,30 @@
// This file implements the CodeCompleteConsumer class.
//
//===----------------------------------------------------------------------===//
+
#include "clang/Sema/CodeCompleteConsumer.h"
#include "clang-c/Index.h"
-#include "clang/AST/DeclCXX.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclBase.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
-#include "clang/Sema/Scope.h"
+#include "clang/AST/DeclarationName.h"
+#include "clang/AST/Type.h"
+#include "clang/Basic/IdentifierTable.h"
#include "clang/Sema/Sema.h"
#include "clang/Lex/Preprocessor.h"
-#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
-#include <cstring>
-#include <functional>
+#include <cassert>
+#include <cstdint>
+#include <string>
using namespace clang;
@@ -33,7 +42,7 @@ using namespace clang;
//===----------------------------------------------------------------------===//
bool CodeCompletionContext::wantConstructorResults() const {
- switch (Kind) {
+ switch (CCKind) {
case CCC_Recovery:
case CCC_Statement:
case CCC_Expression:
@@ -76,12 +85,87 @@ bool CodeCompletionContext::wantConstructorResults() const {
llvm_unreachable("Invalid CodeCompletionContext::Kind!");
}
+StringRef clang::getCompletionKindString(CodeCompletionContext::Kind Kind) {
+ using CCKind = CodeCompletionContext::Kind;
+ switch (Kind) {
+ case CCKind::CCC_Other:
+ return "Other";
+ case CCKind::CCC_OtherWithMacros:
+ return "OtherWithMacros";
+ case CCKind::CCC_TopLevel:
+ return "TopLevel";
+ case CCKind::CCC_ObjCInterface:
+ return "ObjCInterface";
+ case CCKind::CCC_ObjCImplementation:
+ return "ObjCImplementation";
+ case CCKind::CCC_ObjCIvarList:
+ return "ObjCIvarList";
+ case CCKind::CCC_ClassStructUnion:
+ return "ClassStructUnion";
+ case CCKind::CCC_Statement:
+ return "Statement";
+ case CCKind::CCC_Expression:
+ return "Expression";
+ case CCKind::CCC_ObjCMessageReceiver:
+ return "ObjCMessageReceiver";
+ case CCKind::CCC_DotMemberAccess:
+ return "DotMemberAccess";
+ case CCKind::CCC_ArrowMemberAccess:
+ return "ArrowMemberAccess";
+ case CCKind::CCC_ObjCPropertyAccess:
+ return "ObjCPropertyAccess";
+ case CCKind::CCC_EnumTag:
+ return "EnumTag";
+ case CCKind::CCC_UnionTag:
+ return "UnionTag";
+ case CCKind::CCC_ClassOrStructTag:
+ return "ClassOrStructTag";
+ case CCKind::CCC_ObjCProtocolName:
+ return "ObjCProtocolName";
+ case CCKind::CCC_Namespace:
+ return "Namespace";
+ case CCKind::CCC_Type:
+ return "Type";
+ case CCKind::CCC_Name:
+ return "Name";
+ case CCKind::CCC_PotentiallyQualifiedName:
+ return "PotentiallyQualifiedName";
+ case CCKind::CCC_MacroName:
+ return "MacroName";
+ case CCKind::CCC_MacroNameUse:
+ return "MacroNameUse";
+ case CCKind::CCC_PreprocessorExpression:
+ return "PreprocessorExpression";
+ case CCKind::CCC_PreprocessorDirective:
+ return "PreprocessorDirective";
+ case CCKind::CCC_NaturalLanguage:
+ return "NaturalLanguage";
+ case CCKind::CCC_SelectorName:
+ return "SelectorName";
+ case CCKind::CCC_TypeQualifiers:
+ return "TypeQualifiers";
+ case CCKind::CCC_ParenthesizedExpression:
+ return "ParenthesizedExpression";
+ case CCKind::CCC_ObjCInstanceMessage:
+ return "ObjCInstanceMessage";
+ case CCKind::CCC_ObjCClassMessage:
+ return "ObjCClassMessage";
+ case CCKind::CCC_ObjCInterfaceName:
+ return "ObjCInterfaceName";
+ case CCKind::CCC_ObjCCategoryName:
+ return "ObjCCategoryName";
+ case CCKind::CCC_Recovery:
+ return "Recovery";
+ }
+ llvm_unreachable("Invalid CodeCompletionContext::Kind!");
+}
+
//===----------------------------------------------------------------------===//
// Code completion string implementation
//===----------------------------------------------------------------------===//
+
CodeCompletionString::Chunk::Chunk(ChunkKind Kind, const char *Text)
- : Kind(Kind), Text("")
-{
+ : Kind(Kind), Text("") {
switch (Kind) {
case CK_TypedText:
case CK_Text:
@@ -195,10 +279,9 @@ CodeCompletionString::CodeCompletionString(const Chunk *Chunks,
unsigned NumAnnotations,
StringRef ParentName,
const char *BriefComment)
- : NumChunks(NumChunks), NumAnnotations(NumAnnotations),
- Priority(Priority), Availability(Availability),
- ParentName(ParentName), BriefComment(BriefComment)
-{
+ : NumChunks(NumChunks), NumAnnotations(NumAnnotations),
+ Priority(Priority), Availability(Availability),
+ ParentName(ParentName), BriefComment(BriefComment) {
assert(NumChunks <= 0xffff);
assert(NumAnnotations <= 0xffff);
@@ -222,7 +305,6 @@ const char *CodeCompletionString::getAnnotation(unsigned AnnotationNr) const {
return nullptr;
}
-
std::string CodeCompletionString::getAsString() const {
std::string Result;
llvm::raw_string_ostream OS(Result);
@@ -267,7 +349,7 @@ const char *CodeCompletionAllocator::CopyString(const Twine &String) {
StringRef CodeCompletionTUInfo::getParentName(const DeclContext *DC) {
const NamedDecl *ND = dyn_cast<NamedDecl>(DC);
if (!ND)
- return StringRef();
+ return {};
// Check whether we've already cached the parent name.
StringRef &CachedParentName = ParentNames[DC];
@@ -277,7 +359,7 @@ StringRef CodeCompletionTUInfo::getParentName(const DeclContext *DC) {
// If we already processed this DeclContext and assigned empty to it, the
// data pointer will be non-null.
if (CachedParentName.data() != nullptr)
- return StringRef();
+ return {};
// Find the interesting names.
SmallVector<const DeclContext *, 2> Contexts;
@@ -311,7 +393,7 @@ StringRef CodeCompletionTUInfo::getParentName(const DeclContext *DC) {
// Assign an empty StringRef but with non-null data to distinguish
// between empty because we didn't process the DeclContext yet.
CachedParentName = StringRef((const char *)(uintptr_t)~0U, 0);
- return StringRef();
+ return {};
}
OS << Interface->getName() << '(' << Cat->getName() << ')';
@@ -375,9 +457,8 @@ void CodeCompletionBuilder::AddChunk(CodeCompletionString::ChunkKind CK,
}
void CodeCompletionBuilder::addParentContext(const DeclContext *DC) {
- if (DC->isTranslationUnit()) {
+ if (DC->isTranslationUnit())
return;
- }
if (DC->isFunctionOrMethod())
return;
@@ -427,25 +508,21 @@ CodeCompleteConsumer::OverloadCandidate::getFunctionType() const {
// Code completion consumer implementation
//===----------------------------------------------------------------------===//
-CodeCompleteConsumer::~CodeCompleteConsumer() { }
+CodeCompleteConsumer::~CodeCompleteConsumer() = default;
bool PrintingCodeCompleteConsumer::isResultFilteredOut(StringRef Filter,
CodeCompletionResult Result) {
switch (Result.Kind) {
- case CodeCompletionResult::RK_Declaration: {
+ case CodeCompletionResult::RK_Declaration:
return !(Result.Declaration->getIdentifier() &&
Result.Declaration->getIdentifier()->getName().startswith(Filter));
- }
- case CodeCompletionResult::RK_Keyword: {
+ case CodeCompletionResult::RK_Keyword:
return !StringRef(Result.Keyword).startswith(Filter);
- }
- case CodeCompletionResult::RK_Macro: {
+ case CodeCompletionResult::RK_Macro:
return !Result.Macro->getName().startswith(Filter);
- }
- case CodeCompletionResult::RK_Pattern: {
+ case CodeCompletionResult::RK_Pattern:
return !StringRef(Result.Pattern->getAsString()).startswith(Filter);
}
- }
llvm_unreachable("Unknown code completion result Kind.");
}
@@ -477,7 +554,24 @@ PrintingCodeCompleteConsumer::ProcessCodeCompleteResults(Sema &SemaRef,
if (const char *BriefComment = CCS->getBriefComment())
OS << " : " << BriefComment;
}
-
+ for (const FixItHint &FixIt : Results[I].FixIts) {
+ const SourceLocation BLoc = FixIt.RemoveRange.getBegin();
+ const SourceLocation ELoc = FixIt.RemoveRange.getEnd();
+
+ SourceManager &SM = SemaRef.SourceMgr;
+ std::pair<FileID, unsigned> BInfo = SM.getDecomposedLoc(BLoc);
+ std::pair<FileID, unsigned> EInfo = SM.getDecomposedLoc(ELoc);
+ // Adjust for token ranges.
+ if (FixIt.RemoveRange.isTokenRange())
+ EInfo.second += Lexer::MeasureTokenLength(ELoc, SM, SemaRef.LangOpts);
+
+ OS << " (requires fix-it:"
+ << " {" << SM.getLineNumber(BInfo.first, BInfo.second) << ':'
+ << SM.getColumnNumber(BInfo.first, BInfo.second) << '-'
+ << SM.getLineNumber(EInfo.first, EInfo.second) << ':'
+ << SM.getColumnNumber(EInfo.first, EInfo.second) << "}"
+ << " to \"" << FixIt.CodeToInsert << "\")";
+ }
OS << '\n';
break;
@@ -485,7 +579,7 @@ PrintingCodeCompleteConsumer::ProcessCodeCompleteResults(Sema &SemaRef,
OS << Results[I].Keyword << '\n';
break;
- case CodeCompletionResult::RK_Macro: {
+ case CodeCompletionResult::RK_Macro:
OS << Results[I].Macro->getName();
if (CodeCompletionString *CCS
= Results[I].CreateCodeCompletionString(SemaRef, Context,
@@ -496,14 +590,12 @@ PrintingCodeCompleteConsumer::ProcessCodeCompleteResults(Sema &SemaRef,
}
OS << '\n';
break;
- }
- case CodeCompletionResult::RK_Pattern: {
+ case CodeCompletionResult::RK_Pattern:
OS << "Pattern : "
<< Results[I].Pattern->getAsString() << '\n';
break;
}
- }
}
}
@@ -547,7 +639,7 @@ PrintingCodeCompleteConsumer::ProcessOverloadCandidates(Sema &SemaRef,
}
}
-/// \brief Retrieve the effective availability of the given declaration.
+/// Retrieve the effective availability of the given declaration.
static AvailabilityResult getDeclAvailability(const Decl *D) {
AvailabilityResult AR = D->getAvailability();
if (isa<EnumConstantDecl>(D))
@@ -609,7 +701,7 @@ void CodeCompletionResult::computeCursorKindAndAvailability(bool Accessible) {
Availability = CXAvailability_NotAccessible;
}
-/// \brief Retrieve the name that should be used to order a result.
+/// Retrieve the name that should be used to order a result.
///
/// If the name needs to be constructed as a string, that string will be
/// saved into Saved and the returned StringRef will refer to it.
diff --git a/lib/Sema/CoroutineStmtBuilder.h b/lib/Sema/CoroutineStmtBuilder.h
index 33a368d92ff4..d15cf0b756e7 100644
--- a/lib/Sema/CoroutineStmtBuilder.h
+++ b/lib/Sema/CoroutineStmtBuilder.h
@@ -33,16 +33,16 @@ class CoroutineStmtBuilder : public CoroutineBodyStmt::CtorArgs {
CXXRecordDecl *PromiseRecordDecl = nullptr;
public:
- /// \brief Construct a CoroutineStmtBuilder and initialize the promise
+ /// Construct a CoroutineStmtBuilder and initialize the promise
/// statement and initial/final suspends from the FunctionScopeInfo.
CoroutineStmtBuilder(Sema &S, FunctionDecl &FD, sema::FunctionScopeInfo &Fn,
Stmt *Body);
- /// \brief Build the coroutine body statements, including the
+ /// Build the coroutine body statements, including the
/// "promise dependent" statements when the promise type is not dependent.
bool buildStatements();
- /// \brief Build the coroutine body statements that require a non-dependent
+ /// Build the coroutine body statements that require a non-dependent
/// promise type in order to construct.
///
/// For example different new/delete overloads are selected depending on
@@ -51,9 +51,6 @@ public:
/// name lookup.
bool buildDependentStatements();
- /// \brief Build just parameter moves. To use for rebuilding in TreeTransform.
- bool buildParameterMoves();
-
bool isInvalid() const { return !this->IsValid; }
private:
@@ -65,7 +62,6 @@ private:
bool makeReturnObject();
bool makeGroDeclAndReturnStmt();
bool makeReturnOnAllocFailure();
- bool makeParamMoves();
};
} // end namespace clang
diff --git a/lib/Sema/DeclSpec.cpp b/lib/Sema/DeclSpec.cpp
index 6fe2dcc9895f..ccca5d37ea39 100644
--- a/lib/Sema/DeclSpec.cpp
+++ b/lib/Sema/DeclSpec.cpp
@@ -30,7 +30,7 @@ using namespace clang;
void UnqualifiedId::setTemplateId(TemplateIdAnnotation *TemplateId) {
assert(TemplateId && "NULL template-id annotation?");
- Kind = IK_TemplateId;
+ Kind = UnqualifiedIdKind::IK_TemplateId;
this->TemplateId = TemplateId;
StartLocation = TemplateId->TemplateNameLoc;
EndLocation = TemplateId->RAngleLoc;
@@ -38,7 +38,7 @@ void UnqualifiedId::setTemplateId(TemplateIdAnnotation *TemplateId) {
void UnqualifiedId::setConstructorTemplateId(TemplateIdAnnotation *TemplateId) {
assert(TemplateId && "NULL template-id annotation?");
- Kind = IK_ConstructorTemplateId;
+ Kind = UnqualifiedIdKind::IK_ConstructorTemplateId;
this->TemplateId = TemplateId;
StartLocation = TemplateId->TemplateNameLoc;
EndLocation = TemplateId->RAngleLoc;
@@ -186,7 +186,6 @@ DeclaratorChunk DeclaratorChunk::getFunction(bool hasProto,
I.Kind = Function;
I.Loc = LocalRangeBegin;
I.EndLoc = LocalRangeEnd;
- I.Fun.AttrList = nullptr;
I.Fun.hasPrototype = hasProto;
I.Fun.isVariadic = EllipsisLoc.isValid();
I.Fun.isAmbiguous = isAmbiguous;
@@ -251,7 +250,9 @@ DeclaratorChunk DeclaratorChunk::getFunction(bool hasProto,
}
break;
- case EST_ComputedNoexcept:
+ case EST_DependentNoexcept:
+ case EST_NoexceptFalse:
+ case EST_NoexceptTrue:
I.Fun.NoexceptExpr = NoexceptExpr;
break;
@@ -329,6 +330,7 @@ bool Declarator::isDeclarationOfFunction() const {
case TST_auto_type:
case TST_bool:
case TST_char:
+ case TST_char8:
case TST_char16:
case TST_char32:
case TST_class:
@@ -336,6 +338,8 @@ bool Declarator::isDeclarationOfFunction() const {
case TST_decimal32:
case TST_decimal64:
case TST_double:
+ case TST_Accum:
+ case TST_Fract:
case TST_Float16:
case TST_float128:
case TST_enum:
@@ -387,16 +391,16 @@ bool Declarator::isDeclarationOfFunction() const {
}
bool Declarator::isStaticMember() {
- assert(getContext() == MemberContext);
+ assert(getContext() == DeclaratorContext::MemberContext);
return getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_static ||
- (getName().Kind == UnqualifiedId::IK_OperatorFunctionId &&
+ (getName().Kind == UnqualifiedIdKind::IK_OperatorFunctionId &&
CXXMethodDecl::isStaticOverloadedOperator(
getName().OperatorFunctionId.Operator));
}
bool Declarator::isCtorOrDtor() {
- return (getName().getKind() == UnqualifiedId::IK_ConstructorName) ||
- (getName().getKind() == UnqualifiedId::IK_DestructorName);
+ return (getName().getKind() == UnqualifiedIdKind::IK_ConstructorName) ||
+ (getName().getKind() == UnqualifiedIdKind::IK_DestructorName);
}
bool DeclSpec::hasTagDefinition() const {
@@ -499,6 +503,7 @@ const char *DeclSpec::getSpecifierName(DeclSpec::TST T,
case DeclSpec::TST_void: return "void";
case DeclSpec::TST_char: return "char";
case DeclSpec::TST_wchar: return Policy.MSWChar ? "__wchar_t" : "wchar_t";
+ case DeclSpec::TST_char8: return "char8_t";
case DeclSpec::TST_char16: return "char16_t";
case DeclSpec::TST_char32: return "char32_t";
case DeclSpec::TST_int: return "int";
@@ -506,6 +511,8 @@ const char *DeclSpec::getSpecifierName(DeclSpec::TST T,
case DeclSpec::TST_half: return "half";
case DeclSpec::TST_float: return "float";
case DeclSpec::TST_double: return "double";
+ case DeclSpec::TST_accum: return "_Accum";
+ case DeclSpec::TST_fract: return "_Fract";
case DeclSpec::TST_float16: return "_Float16";
case DeclSpec::TST_float128: return "__float128";
case DeclSpec::TST_bool: return Policy.Bool ? "bool" : "_Bool";
@@ -761,6 +768,19 @@ bool DeclSpec::SetTypeSpecType(TST T, SourceLocation Loc,
return false;
}
+bool DeclSpec::SetTypeSpecSat(SourceLocation Loc, const char *&PrevSpec,
+ unsigned &DiagID) {
+ // Cannot set twice
+ if (TypeSpecSat) {
+ DiagID = diag::warn_duplicate_declspec;
+ PrevSpec = "_Sat";
+ return true;
+ }
+ TypeSpecSat = true;
+ TSSatLoc = Loc;
+ return false;
+}
+
bool DeclSpec::SetTypeAltiVecVector(bool isAltiVecVector, SourceLocation Loc,
const char *&PrevSpec, unsigned &DiagID,
const PrintingPolicy &Policy) {
@@ -974,15 +994,7 @@ void DeclSpec::SaveWrittenBuiltinSpecs() {
writtenBS.Width = getTypeSpecWidth();
writtenBS.Type = getTypeSpecType();
// Search the list of attributes for the presence of a mode attribute.
- writtenBS.ModeAttr = false;
- AttributeList* attrs = getAttributes().getList();
- while (attrs) {
- if (attrs->getKind() == AttributeList::AT_Mode) {
- writtenBS.ModeAttr = true;
- break;
- }
- attrs = attrs->getNext();
- }
+ writtenBS.ModeAttr = getAttributes().hasAttribute(ParsedAttr::AT_Mode);
}
/// Finish - This does final analysis of the declspec, rejecting things like
@@ -1096,12 +1108,16 @@ void DeclSpec::Finish(Sema &S, const PrintingPolicy &Policy) {
}
}
- // signed/unsigned are only valid with int/char/wchar_t.
+ bool IsFixedPointType =
+ TypeSpecType == TST_accum || TypeSpecType == TST_fract;
+
+ // signed/unsigned are only valid with int/char/wchar_t/_Accum.
if (TypeSpecSign != TSS_unspecified) {
if (TypeSpecType == TST_unspecified)
TypeSpecType = TST_int; // unsigned -> unsigned int, signed -> signed int.
- else if (TypeSpecType != TST_int && TypeSpecType != TST_int128 &&
- TypeSpecType != TST_char && TypeSpecType != TST_wchar) {
+ else if (TypeSpecType != TST_int && TypeSpecType != TST_int128 &&
+ TypeSpecType != TST_char && TypeSpecType != TST_wchar &&
+ !IsFixedPointType) {
S.Diag(TSSLoc, diag::err_invalid_sign_spec)
<< getSpecifierName((TST)TypeSpecType, Policy);
// signed double -> double.
@@ -1116,20 +1132,24 @@ void DeclSpec::Finish(Sema &S, const PrintingPolicy &Policy) {
case TSW_longlong: // long long int
if (TypeSpecType == TST_unspecified)
TypeSpecType = TST_int; // short -> short int, long long -> long long int.
- else if (TypeSpecType != TST_int) {
+ else if (!(TypeSpecType == TST_int ||
+ (IsFixedPointType && TypeSpecWidth != TSW_longlong))) {
S.Diag(TSWRange.getBegin(), diag::err_invalid_width_spec)
<< (int)TypeSpecWidth << getSpecifierName((TST)TypeSpecType, Policy);
TypeSpecType = TST_int;
+ TypeSpecSat = false;
TypeSpecOwned = false;
}
break;
case TSW_long: // long double, long int
if (TypeSpecType == TST_unspecified)
TypeSpecType = TST_int; // long -> long int.
- else if (TypeSpecType != TST_int && TypeSpecType != TST_double) {
+ else if (TypeSpecType != TST_int && TypeSpecType != TST_double &&
+ !IsFixedPointType) {
S.Diag(TSWRange.getBegin(), diag::err_invalid_width_spec)
<< (int)TypeSpecWidth << getSpecifierName((TST)TypeSpecType, Policy);
TypeSpecType = TST_int;
+ TypeSpecSat = false;
TypeSpecOwned = false;
}
break;
@@ -1202,7 +1222,9 @@ void DeclSpec::Finish(Sema &S, const PrintingPolicy &Policy) {
StorageClassSpec == SCS_auto)
S.Diag(StorageClassSpecLoc, diag::warn_auto_storage_class)
<< FixItHint::CreateRemoval(StorageClassSpecLoc);
- if (TypeSpecType == TST_char16 || TypeSpecType == TST_char32)
+ if (TypeSpecType == TST_char8)
+ S.Diag(TSTLoc, diag::warn_cxx17_compat_unicode_type);
+ else if (TypeSpecType == TST_char16 || TypeSpecType == TST_char32)
S.Diag(TSTLoc, diag::warn_cxx98_compat_unicode_type)
<< (TypeSpecType == TST_char16 ? "char16_t" : "char32_t");
if (Constexpr_specified)
@@ -1281,7 +1303,7 @@ bool DeclSpec::isMissingDeclaratorOk() {
void UnqualifiedId::setOperatorFunctionId(SourceLocation OperatorLoc,
OverloadedOperatorKind Op,
SourceLocation SymbolLocations[3]) {
- Kind = IK_OperatorFunctionId;
+ Kind = UnqualifiedIdKind::IK_OperatorFunctionId;
StartLocation = OperatorLoc;
EndLocation = OperatorLoc;
OperatorFunctionId.Operator = Op;
diff --git a/lib/Sema/DelayedDiagnostic.cpp b/lib/Sema/DelayedDiagnostic.cpp
index 3d321d561e60..122b477d5522 100644
--- a/lib/Sema/DelayedDiagnostic.cpp
+++ b/lib/Sema/DelayedDiagnostic.cpp
@@ -1,4 +1,4 @@
-//===--- DelayedDiagnostic.cpp - Delayed declarator diagnostics -*- C++ -*-===//
+//===- DelayedDiagnostic.cpp - Delayed declarator diagnostics -------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -14,36 +14,44 @@
// This file also defines AccessedEntity.
//
//===----------------------------------------------------------------------===//
+
#include "clang/Sema/DelayedDiagnostic.h"
-#include <string.h>
+#include <cstring>
+
using namespace clang;
using namespace sema;
DelayedDiagnostic
DelayedDiagnostic::makeAvailability(AvailabilityResult AR,
- SourceLocation Loc,
+ ArrayRef<SourceLocation> Locs,
const NamedDecl *ReferringDecl,
const NamedDecl *OffendingDecl,
const ObjCInterfaceDecl *UnknownObjCClass,
const ObjCPropertyDecl *ObjCProperty,
StringRef Msg,
bool ObjCPropertyAccess) {
+ assert(!Locs.empty());
DelayedDiagnostic DD;
DD.Kind = Availability;
DD.Triggered = false;
- DD.Loc = Loc;
+ DD.Loc = Locs.front();
DD.AvailabilityData.ReferringDecl = ReferringDecl;
DD.AvailabilityData.OffendingDecl = OffendingDecl;
DD.AvailabilityData.UnknownObjCClass = UnknownObjCClass;
DD.AvailabilityData.ObjCProperty = ObjCProperty;
char *MessageData = nullptr;
- if (Msg.size()) {
+ if (!Msg.empty()) {
MessageData = new char [Msg.size()];
memcpy(MessageData, Msg.data(), Msg.size());
}
-
DD.AvailabilityData.Message = MessageData;
DD.AvailabilityData.MessageLen = Msg.size();
+
+ DD.AvailabilityData.SelectorLocs = new SourceLocation[Locs.size()];
+ memcpy(DD.AvailabilityData.SelectorLocs, Locs.data(),
+ sizeof(SourceLocation) * Locs.size());
+ DD.AvailabilityData.NumSelectorLocs = Locs.size();
+
DD.AvailabilityData.AR = AR;
DD.AvailabilityData.ObjCPropertyAccess = ObjCPropertyAccess;
return DD;
@@ -57,6 +65,7 @@ void DelayedDiagnostic::Destroy() {
case Availability:
delete[] AvailabilityData.Message;
+ delete[] AvailabilityData.SelectorLocs;
break;
case ForbiddenType:
diff --git a/lib/Sema/IdentifierResolver.cpp b/lib/Sema/IdentifierResolver.cpp
index 0bdb19490bc5..dbd52dee1eea 100644
--- a/lib/Sema/IdentifierResolver.cpp
+++ b/lib/Sema/IdentifierResolver.cpp
@@ -1,4 +1,4 @@
-//===- IdentifierResolver.cpp - Lexical Scope Name lookup -------*- C++ -*-===//
+//===- IdentifierResolver.cpp - Lexical Scope Name lookup -----------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -14,10 +14,16 @@
#include "clang/Sema/IdentifierResolver.h"
#include "clang/AST/Decl.h"
+#include "clang/AST/DeclBase.h"
+#include "clang/AST/DeclarationName.h"
+#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Lex/ExternalPreprocessorSource.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/Scope.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <cassert>
+#include <cstdint>
using namespace clang;
@@ -35,17 +41,17 @@ class IdentifierResolver::IdDeclInfoMap {
/// impossible to add something to a pre-C++0x STL container without
/// a completely unnecessary copy.
struct IdDeclInfoPool {
- IdDeclInfoPool(IdDeclInfoPool *Next) : Next(Next) {}
-
IdDeclInfoPool *Next;
IdDeclInfo Pool[POOL_SIZE];
+
+ IdDeclInfoPool(IdDeclInfoPool *Next) : Next(Next) {}
};
- IdDeclInfoPool *CurPool;
- unsigned int CurIndex;
+ IdDeclInfoPool *CurPool = nullptr;
+ unsigned int CurIndex = POOL_SIZE;
public:
- IdDeclInfoMap() : CurPool(nullptr), CurIndex(POOL_SIZE) {}
+ IdDeclInfoMap() = default;
~IdDeclInfoMap() {
IdDeclInfoPool *Cur = CurPool;
@@ -60,7 +66,6 @@ public:
IdDeclInfo &operator[](DeclarationName Name);
};
-
//===----------------------------------------------------------------------===//
// IdDeclInfo Implementation
//===----------------------------------------------------------------------===//
@@ -83,9 +88,7 @@ void IdentifierResolver::IdDeclInfo::RemoveDecl(NamedDecl *D) {
//===----------------------------------------------------------------------===//
IdentifierResolver::IdentifierResolver(Preprocessor &PP)
- : LangOpt(PP.getLangOpts()), PP(PP),
- IdDeclInfos(new IdDeclInfoMap) {
-}
+ : LangOpt(PP.getLangOpts()), PP(PP), IdDeclInfos(new IdDeclInfoMap) {}
IdentifierResolver::~IdentifierResolver() {
delete IdDeclInfos;
@@ -245,14 +248,16 @@ IdentifierResolver::begin(DeclarationName Name) {
}
namespace {
- enum DeclMatchKind {
- DMK_Different,
- DMK_Replace,
- DMK_Ignore
- };
-}
-/// \brief Compare two declarations to see whether they are different or,
+enum DeclMatchKind {
+ DMK_Different,
+ DMK_Replace,
+ DMK_Ignore
+};
+
+} // namespace
+
+/// Compare two declarations to see whether they are different or,
/// if they are the same, whether the new declaration should replace the
/// existing declaration.
static DeclMatchKind compareDeclarations(NamedDecl *Existing, NamedDecl *New) {
diff --git a/lib/Sema/JumpDiagnostics.cpp b/lib/Sema/JumpDiagnostics.cpp
index 64fa2c34b238..58a7862370cc 100644
--- a/lib/Sema/JumpDiagnostics.cpp
+++ b/lib/Sema/JumpDiagnostics.cpp
@@ -154,6 +154,10 @@ static ScopePair GetDiagForGotoScopeDecl(Sema &S, const Decl *D) {
return ScopePair(diag::note_protected_by_objc_weak_init,
diag::note_exits_objc_weak);
+ case QualType::DK_nontrivial_c_struct:
+ return ScopePair(diag::note_protected_by_non_trivial_c_struct_init,
+ diag::note_exits_dtor);
+
case QualType::DK_cxx_destructor:
OutDiag = diag::note_exits_dtor;
break;
@@ -212,7 +216,7 @@ static ScopePair GetDiagForGotoScopeDecl(Sema &S, const Decl *D) {
return ScopePair(0U, 0U);
}
-/// \brief Build scope information for a declaration that is part of a DeclStmt.
+/// Build scope information for a declaration that is part of a DeclStmt.
void JumpScopeChecker::BuildScopeInformation(Decl *D, unsigned &ParentScope) {
// If this decl causes a new scope, push and switch to it.
std::pair<unsigned,unsigned> Diags = GetDiagForGotoScopeDecl(S, D);
@@ -229,7 +233,7 @@ void JumpScopeChecker::BuildScopeInformation(Decl *D, unsigned &ParentScope) {
BuildScopeInformation(Init, ParentScope);
}
-/// \brief Build scope information for a captured block literal variables.
+/// Build scope information for a captured block literal variables.
void JumpScopeChecker::BuildScopeInformation(VarDecl *D,
const BlockDecl *BDecl,
unsigned &ParentScope) {
@@ -254,6 +258,10 @@ void JumpScopeChecker::BuildScopeInformation(VarDecl *D,
Diags = ScopePair(diag::note_enters_block_captures_weak,
diag::note_exits_block_captures_weak);
break;
+ case QualType::DK_nontrivial_c_struct:
+ Diags = ScopePair(diag::note_enters_block_captures_non_trivial_c_struct,
+ diag::note_exits_block_captures_non_trivial_c_struct);
+ break;
case QualType::DK_none:
llvm_unreachable("non-lifetime captured variable");
}
diff --git a/lib/Sema/MultiplexExternalSemaSource.cpp b/lib/Sema/MultiplexExternalSemaSource.cpp
index 77ace0cfa579..7e61ccbb1068 100644
--- a/lib/Sema/MultiplexExternalSemaSource.cpp
+++ b/lib/Sema/MultiplexExternalSemaSource.cpp
@@ -16,7 +16,7 @@
using namespace clang;
-///\brief Constructs a new multiplexing external sema source and appends the
+///Constructs a new multiplexing external sema source and appends the
/// given element to it.
///
MultiplexExternalSemaSource::MultiplexExternalSemaSource(ExternalSemaSource &s1,
@@ -28,7 +28,7 @@ MultiplexExternalSemaSource::MultiplexExternalSemaSource(ExternalSemaSource &s1,
// pin the vtable here.
MultiplexExternalSemaSource::~MultiplexExternalSemaSource() {}
-///\brief Appends new source to the source list.
+///Appends new source to the source list.
///
///\param[in] source - An ExternalSemaSource.
///
@@ -164,6 +164,20 @@ void MultiplexExternalSemaSource::PrintStats() {
Sources[i]->PrintStats();
}
+Module *MultiplexExternalSemaSource::getModule(unsigned ID) {
+ for (size_t i = 0; i < Sources.size(); ++i)
+ if (auto M = Sources[i]->getModule(ID))
+ return M;
+ return nullptr;
+}
+
+bool MultiplexExternalSemaSource::DeclIsFromPCHWithObjectFile(const Decl *D) {
+ for (auto *S : Sources)
+ if (S->DeclIsFromPCHWithObjectFile(D))
+ return true;
+ return false;
+}
+
bool MultiplexExternalSemaSource::layoutRecordType(const RecordDecl *Record,
uint64_t &Size,
uint64_t &Alignment,
diff --git a/lib/Sema/AttributeList.cpp b/lib/Sema/ParsedAttr.cpp
index 14d334746f1f..6509df9985ef 100644
--- a/lib/Sema/AttributeList.cpp
+++ b/lib/Sema/ParsedAttr.cpp
@@ -1,4 +1,4 @@
-//===--- AttributeList.cpp --------------------------------------*- C++ -*-===//
+//======- ParsedAttr.cpp --------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -7,20 +7,23 @@
//
//===----------------------------------------------------------------------===//
//
-// This file defines the AttributeList class implementation
+// This file defines the ParsedAttr class implementation
//
//===----------------------------------------------------------------------===//
-#include "clang/Sema/AttributeList.h"
+#include "clang/Sema/ParsedAttr.h"
#include "clang/AST/ASTContext.h"
-#include "clang/AST/DeclCXX.h"
-#include "clang/AST/DeclTemplate.h"
-#include "clang/AST/Expr.h"
#include "clang/Basic/AttrSubjectMatchRules.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Sema/SemaInternal.h"
#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include <cassert>
+#include <cstddef>
+#include <utility>
+
using namespace clang;
IdentifierLoc *IdentifierLoc::create(ASTContext &Ctx, SourceLocation Loc,
@@ -31,91 +34,79 @@ IdentifierLoc *IdentifierLoc::create(ASTContext &Ctx, SourceLocation Loc,
return Result;
}
-size_t AttributeList::allocated_size() const {
+size_t ParsedAttr::allocated_size() const {
if (IsAvailability) return AttributeFactory::AvailabilityAllocSize;
else if (IsTypeTagForDatatype)
return AttributeFactory::TypeTagForDatatypeAllocSize;
else if (IsProperty)
return AttributeFactory::PropertyAllocSize;
- return (sizeof(AttributeList) + NumArgs * sizeof(ArgsUnion));
+ else if (HasParsedType)
+ return sizeof(ParsedAttr) + sizeof(void *);
+ return (sizeof(ParsedAttr) + NumArgs * sizeof(ArgsUnion));
}
AttributeFactory::AttributeFactory() {
// Go ahead and configure all the inline capacity. This is just a memset.
FreeLists.resize(InlineFreeListsCapacity);
}
-AttributeFactory::~AttributeFactory() {}
+AttributeFactory::~AttributeFactory() = default;
static size_t getFreeListIndexForSize(size_t size) {
- assert(size >= sizeof(AttributeList));
+ assert(size >= sizeof(ParsedAttr));
assert((size % sizeof(void*)) == 0);
- return ((size - sizeof(AttributeList)) / sizeof(void*));
+ return ((size - sizeof(ParsedAttr)) / sizeof(void *));
}
void *AttributeFactory::allocate(size_t size) {
// Check for a previously reclaimed attribute.
size_t index = getFreeListIndexForSize(size);
- if (index < FreeLists.size()) {
- if (AttributeList *attr = FreeLists[index]) {
- FreeLists[index] = attr->NextInPool;
- return attr;
- }
+ if (index < FreeLists.size() && !FreeLists[index].empty()) {
+ ParsedAttr *attr = FreeLists[index].back();
+ FreeLists[index].pop_back();
+ return attr;
}
// Otherwise, allocate something new.
return Alloc.Allocate(size, alignof(AttributeFactory));
}
-void AttributeFactory::reclaimPool(AttributeList *cur) {
- assert(cur && "reclaiming empty pool!");
- do {
- // Read this here, because we're going to overwrite NextInPool
- // when we toss 'cur' into the appropriate queue.
- AttributeList *next = cur->NextInPool;
+void AttributeFactory::deallocate(ParsedAttr *Attr) {
+ size_t size = Attr->allocated_size();
+ size_t freeListIndex = getFreeListIndexForSize(size);
- size_t size = cur->allocated_size();
- size_t freeListIndex = getFreeListIndexForSize(size);
+ // Expand FreeLists to the appropriate size, if required.
+ if (freeListIndex >= FreeLists.size())
+ FreeLists.resize(freeListIndex + 1);
- // Expand FreeLists to the appropriate size, if required.
- if (freeListIndex >= FreeLists.size())
- FreeLists.resize(freeListIndex+1);
+#if !NDEBUG
+ // In debug mode, zero out the attribute to help find memory overwriting.
+ memset(Attr, 0, size);
+#endif
- // Add 'cur' to the appropriate free-list.
- cur->NextInPool = FreeLists[freeListIndex];
- FreeLists[freeListIndex] = cur;
-
- cur = next;
- } while (cur);
+ // Add 'Attr' to the appropriate free-list.
+ FreeLists[freeListIndex].push_back(Attr);
}
-void AttributePool::takePool(AttributeList *pool) {
- assert(pool);
-
- // Fast path: this pool is empty.
- if (!Head) {
- Head = pool;
- return;
- }
+void AttributeFactory::reclaimPool(AttributePool &cur) {
+ for (ParsedAttr *AL : cur.Attrs)
+ deallocate(AL);
+}
- // Reverse the pool onto the current head. This optimizes for the
- // pattern of pulling a lot of pools into a single pool.
- do {
- AttributeList *next = pool->NextInPool;
- pool->NextInPool = Head;
- Head = pool;
- pool = next;
- } while (pool);
+void AttributePool::takePool(AttributePool &pool) {
+ Attrs.insert(Attrs.end(), pool.Attrs.begin(), pool.Attrs.end());
+ pool.Attrs.clear();
}
#include "clang/Sema/AttrParsedAttrKinds.inc"
static StringRef normalizeAttrName(StringRef AttrName, StringRef ScopeName,
- AttributeList::Syntax SyntaxUsed) {
+ ParsedAttr::Syntax SyntaxUsed) {
// Normalize the attribute name, __foo__ becomes foo. This is only allowable
// for GNU attributes.
- bool IsGNU = SyntaxUsed == AttributeList::AS_GNU ||
- ((SyntaxUsed == AttributeList::AS_CXX11 ||
- SyntaxUsed == AttributeList::AS_C2x) && ScopeName == "gnu");
+ bool IsGNU = SyntaxUsed == ParsedAttr::AS_GNU ||
+ ((SyntaxUsed == ParsedAttr::AS_CXX11 ||
+ SyntaxUsed == ParsedAttr::AS_C2x) &&
+ ScopeName == "gnu");
if (IsGNU && AttrName.size() >= 4 && AttrName.startswith("__") &&
AttrName.endswith("__"))
AttrName = AttrName.slice(2, AttrName.size() - 2);
@@ -123,9 +114,9 @@ static StringRef normalizeAttrName(StringRef AttrName, StringRef ScopeName,
return AttrName;
}
-AttributeList::Kind AttributeList::getKind(const IdentifierInfo *Name,
- const IdentifierInfo *ScopeName,
- Syntax SyntaxUsed) {
+ParsedAttr::Kind ParsedAttr::getKind(const IdentifierInfo *Name,
+ const IdentifierInfo *ScopeName,
+ Syntax SyntaxUsed) {
StringRef AttrName = Name->getName();
SmallString<64> FullName;
@@ -143,12 +134,12 @@ AttributeList::Kind AttributeList::getKind(const IdentifierInfo *Name,
return ::getAttrKind(FullName, SyntaxUsed);
}
-unsigned AttributeList::getAttributeSpellingListIndex() const {
+unsigned ParsedAttr::getAttributeSpellingListIndex() const {
// Both variables will be used in tablegen generated
// attribute spell list index matching code.
StringRef Scope = ScopeName ? ScopeName->getName() : "";
StringRef Name = normalizeAttrName(AttrName->getName(), Scope,
- (AttributeList::Syntax)SyntaxUsed);
+ (ParsedAttr::Syntax)SyntaxUsed);
#include "clang/Sema/AttrSpellingListIndex.inc"
@@ -164,85 +155,78 @@ struct ParsedAttrInfo {
unsigned IsKnownToGCC : 1;
unsigned IsSupportedByPragmaAttribute : 1;
- bool (*DiagAppertainsToDecl)(Sema &S, const AttributeList &Attr,
- const Decl *);
- bool (*DiagLangOpts)(Sema &S, const AttributeList &Attr);
+ bool (*DiagAppertainsToDecl)(Sema &S, const ParsedAttr &Attr, const Decl *);
+ bool (*DiagLangOpts)(Sema &S, const ParsedAttr &Attr);
bool (*ExistsInTarget)(const TargetInfo &Target);
- unsigned (*SpellingIndexToSemanticSpelling)(const AttributeList &Attr);
+ unsigned (*SpellingIndexToSemanticSpelling)(const ParsedAttr &Attr);
void (*GetPragmaAttributeMatchRules)(
llvm::SmallVectorImpl<std::pair<attr::SubjectMatchRule, bool>> &Rules,
const LangOptions &LangOpts);
};
namespace {
- #include "clang/Sema/AttrParsedAttrImpl.inc"
-}
-static const ParsedAttrInfo &getInfo(const AttributeList &A) {
+#include "clang/Sema/AttrParsedAttrImpl.inc"
+
+} // namespace
+
+static const ParsedAttrInfo &getInfo(const ParsedAttr &A) {
return AttrInfoMap[A.getKind()];
}
-unsigned AttributeList::getMinArgs() const {
- return getInfo(*this).NumArgs;
-}
+unsigned ParsedAttr::getMinArgs() const { return getInfo(*this).NumArgs; }
-unsigned AttributeList::getMaxArgs() const {
+unsigned ParsedAttr::getMaxArgs() const {
return getMinArgs() + getInfo(*this).OptArgs;
}
-bool AttributeList::hasCustomParsing() const {
+bool ParsedAttr::hasCustomParsing() const {
return getInfo(*this).HasCustomParsing;
}
-bool AttributeList::diagnoseAppertainsTo(Sema &S, const Decl *D) const {
+bool ParsedAttr::diagnoseAppertainsTo(Sema &S, const Decl *D) const {
return getInfo(*this).DiagAppertainsToDecl(S, *this, D);
}
-bool AttributeList::appliesToDecl(const Decl *D,
- attr::SubjectMatchRule MatchRule) const {
+bool ParsedAttr::appliesToDecl(const Decl *D,
+ attr::SubjectMatchRule MatchRule) const {
return checkAttributeMatchRuleAppliesTo(D, MatchRule);
}
-void AttributeList::getMatchRules(
+void ParsedAttr::getMatchRules(
const LangOptions &LangOpts,
SmallVectorImpl<std::pair<attr::SubjectMatchRule, bool>> &MatchRules)
const {
return getInfo(*this).GetPragmaAttributeMatchRules(MatchRules, LangOpts);
}
-bool AttributeList::diagnoseLangOpts(Sema &S) const {
+bool ParsedAttr::diagnoseLangOpts(Sema &S) const {
return getInfo(*this).DiagLangOpts(S, *this);
}
-bool AttributeList::isTargetSpecificAttr() const {
+bool ParsedAttr::isTargetSpecificAttr() const {
return getInfo(*this).IsTargetSpecific;
}
-bool AttributeList::isTypeAttr() const {
- return getInfo(*this).IsType;
-}
+bool ParsedAttr::isTypeAttr() const { return getInfo(*this).IsType; }
-bool AttributeList::isStmtAttr() const {
- return getInfo(*this).IsStmt;
-}
+bool ParsedAttr::isStmtAttr() const { return getInfo(*this).IsStmt; }
-bool AttributeList::existsInTarget(const TargetInfo &Target) const {
+bool ParsedAttr::existsInTarget(const TargetInfo &Target) const {
return getInfo(*this).ExistsInTarget(Target);
}
-bool AttributeList::isKnownToGCC() const {
- return getInfo(*this).IsKnownToGCC;
-}
+bool ParsedAttr::isKnownToGCC() const { return getInfo(*this).IsKnownToGCC; }
-bool AttributeList::isSupportedByPragmaAttribute() const {
+bool ParsedAttr::isSupportedByPragmaAttribute() const {
return getInfo(*this).IsSupportedByPragmaAttribute;
}
-unsigned AttributeList::getSemanticSpelling() const {
+unsigned ParsedAttr::getSemanticSpelling() const {
return getInfo(*this).SpellingIndexToSemanticSpelling(*this);
}
-bool AttributeList::hasVariadicArg() const {
+bool ParsedAttr::hasVariadicArg() const {
// If the attribute has the maximum number of optional arguments, we will
// claim that as being variadic. If we someday get an attribute that
// legitimately bumps up against that maximum, we can use another bit to track
diff --git a/lib/Sema/Scope.cpp b/lib/Sema/Scope.cpp
index ae5b181c6728..eae5a328bfa2 100644
--- a/lib/Sema/Scope.cpp
+++ b/lib/Sema/Scope.cpp
@@ -143,72 +143,43 @@ void Scope::dumpImpl(raw_ostream &OS) const {
if (HasFlags)
OS << "Flags: ";
- while (Flags) {
- if (Flags & FnScope) {
- OS << "FnScope";
- Flags &= ~FnScope;
- } else if (Flags & BreakScope) {
- OS << "BreakScope";
- Flags &= ~BreakScope;
- } else if (Flags & ContinueScope) {
- OS << "ContinueScope";
- Flags &= ~ContinueScope;
- } else if (Flags & DeclScope) {
- OS << "DeclScope";
- Flags &= ~DeclScope;
- } else if (Flags & ControlScope) {
- OS << "ControlScope";
- Flags &= ~ControlScope;
- } else if (Flags & ClassScope) {
- OS << "ClassScope";
- Flags &= ~ClassScope;
- } else if (Flags & BlockScope) {
- OS << "BlockScope";
- Flags &= ~BlockScope;
- } else if (Flags & TemplateParamScope) {
- OS << "TemplateParamScope";
- Flags &= ~TemplateParamScope;
- } else if (Flags & FunctionPrototypeScope) {
- OS << "FunctionPrototypeScope";
- Flags &= ~FunctionPrototypeScope;
- } else if (Flags & FunctionDeclarationScope) {
- OS << "FunctionDeclarationScope";
- Flags &= ~FunctionDeclarationScope;
- } else if (Flags & AtCatchScope) {
- OS << "AtCatchScope";
- Flags &= ~AtCatchScope;
- } else if (Flags & ObjCMethodScope) {
- OS << "ObjCMethodScope";
- Flags &= ~ObjCMethodScope;
- } else if (Flags & SwitchScope) {
- OS << "SwitchScope";
- Flags &= ~SwitchScope;
- } else if (Flags & TryScope) {
- OS << "TryScope";
- Flags &= ~TryScope;
- } else if (Flags & FnTryCatchScope) {
- OS << "FnTryCatchScope";
- Flags &= ~FnTryCatchScope;
- } else if (Flags & SEHTryScope) {
- OS << "SEHTryScope";
- Flags &= ~SEHTryScope;
- } else if (Flags & SEHExceptScope) {
- OS << "SEHExceptScope";
- Flags &= ~SEHExceptScope;
- } else if (Flags & OpenMPDirectiveScope) {
- OS << "OpenMPDirectiveScope";
- Flags &= ~OpenMPDirectiveScope;
- } else if (Flags & OpenMPLoopDirectiveScope) {
- OS << "OpenMPLoopDirectiveScope";
- Flags &= ~OpenMPLoopDirectiveScope;
- } else if (Flags & OpenMPSimdDirectiveScope) {
- OS << "OpenMPSimdDirectiveScope";
- Flags &= ~OpenMPSimdDirectiveScope;
+ std::pair<unsigned, const char *> FlagInfo[] = {
+ {FnScope, "FnScope"},
+ {BreakScope, "BreakScope"},
+ {ContinueScope, "ContinueScope"},
+ {DeclScope, "DeclScope"},
+ {ControlScope, "ControlScope"},
+ {ClassScope, "ClassScope"},
+ {BlockScope, "BlockScope"},
+ {TemplateParamScope, "TemplateParamScope"},
+ {FunctionPrototypeScope, "FunctionPrototypeScope"},
+ {FunctionDeclarationScope, "FunctionDeclarationScope"},
+ {AtCatchScope, "AtCatchScope"},
+ {ObjCMethodScope, "ObjCMethodScope"},
+ {SwitchScope, "SwitchScope"},
+ {TryScope, "TryScope"},
+ {FnTryCatchScope, "FnTryCatchScope"},
+ {OpenMPDirectiveScope, "OpenMPDirectiveScope"},
+ {OpenMPLoopDirectiveScope, "OpenMPLoopDirectiveScope"},
+ {OpenMPSimdDirectiveScope, "OpenMPSimdDirectiveScope"},
+ {EnumScope, "EnumScope"},
+ {SEHTryScope, "SEHTryScope"},
+ {SEHExceptScope, "SEHExceptScope"},
+ {SEHFilterScope, "SEHFilterScope"},
+ {CompoundStmtScope, "CompoundStmtScope"},
+ {ClassInheritanceScope, "ClassInheritanceScope"}};
+
+ for (auto Info : FlagInfo) {
+ if (Flags & Info.first) {
+ OS << Info.second;
+ Flags &= ~Info.first;
+ if (Flags)
+ OS << " | ";
}
-
- if (Flags)
- OS << " | ";
}
+
+ assert(Flags == 0 && "Unknown scope flags");
+
if (HasFlags)
OS << '\n';
diff --git a/lib/Sema/ScopeInfo.cpp b/lib/Sema/ScopeInfo.cpp
index b309a36a30a3..62a83ccb70aa 100644
--- a/lib/Sema/ScopeInfo.cpp
+++ b/lib/Sema/ScopeInfo.cpp
@@ -43,6 +43,7 @@ void FunctionScopeInfo::Clear() {
// Coroutine state
FirstCoroutineStmtLoc = SourceLocation();
CoroutinePromise = nullptr;
+ CoroutineParameterMoves.clear();
NeedsCoroutineSuspends = true;
CoroutineSuspends.first = nullptr;
CoroutineSuspends.second = nullptr;
diff --git a/lib/Sema/Sema.cpp b/lib/Sema/Sema.cpp
index 4e57e5ef81c6..d57473c5616f 100644
--- a/lib/Sema/Sema.cpp
+++ b/lib/Sema/Sema.cpp
@@ -19,6 +19,7 @@
#include "clang/AST/DeclObjC.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
+#include "clang/AST/PrettyDeclStackTrace.h"
#include "clang/AST/StmtCXX.h"
#include "clang/Basic/DiagnosticOptions.h"
#include "clang/Basic/PartialDiagnostic.h"
@@ -31,12 +32,12 @@
#include "clang/Sema/Initialization.h"
#include "clang/Sema/MultiplexExternalSemaSource.h"
#include "clang/Sema/ObjCMethodList.h"
-#include "clang/Sema/PrettyDeclStackTrace.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/SemaConsumer.h"
#include "clang/Sema/SemaInternal.h"
#include "clang/Sema/TemplateDeduction.h"
+#include "clang/Sema/TemplateInstCallback.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallSet.h"
using namespace clang;
@@ -51,8 +52,8 @@ ModuleLoader &Sema::getModuleLoader() const { return PP.getModuleLoader(); }
PrintingPolicy Sema::getPrintingPolicy(const ASTContext &Context,
const Preprocessor &PP) {
PrintingPolicy Policy = Context.getPrintingPolicy();
- // Our printing policy is copied over the ASTContext printing policy whenever
- // a diagnostic is emitted, so recompute it.
+ // In diagnostics, we print _Bool as bool if the latter is defined as the
+ // former.
Policy.Bool = Context.getLangOpts().Bool;
if (!Policy.Bool) {
if (const MacroInfo *BoolMacro = PP.getMacroInfo(Context.getBoolName())) {
@@ -130,16 +131,19 @@ Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
IsBuildingRecoveryCallExpr(false), Cleanup{}, LateTemplateParser(nullptr),
LateTemplateParserCleanup(nullptr), OpaqueParser(nullptr), IdResolver(pp),
StdExperimentalNamespaceCache(nullptr), StdInitializerList(nullptr),
- CXXTypeInfoDecl(nullptr), MSVCGuidDecl(nullptr), NSNumberDecl(nullptr),
- NSValueDecl(nullptr), NSStringDecl(nullptr),
- StringWithUTF8StringMethod(nullptr),
+ StdCoroutineTraitsCache(nullptr), CXXTypeInfoDecl(nullptr),
+ MSVCGuidDecl(nullptr), NSNumberDecl(nullptr), NSValueDecl(nullptr),
+ NSStringDecl(nullptr), StringWithUTF8StringMethod(nullptr),
ValueWithBytesObjCTypeMethod(nullptr), NSArrayDecl(nullptr),
ArrayWithObjectsMethod(nullptr), NSDictionaryDecl(nullptr),
DictionaryWithObjectsMethod(nullptr), GlobalNewDeleteDeclared(false),
- TUKind(TUKind), NumSFINAEErrors(0), AccessCheckingSFINAE(false),
- InNonInstantiationSFINAEContext(false), NonInstantiationEntries(0),
- ArgumentPackSubstitutionIndex(-1), CurrentInstantiationScope(nullptr),
- DisableTypoCorrection(false), TyposCorrected(0), AnalysisWarnings(*this),
+ TUKind(TUKind), NumSFINAEErrors(0),
+ FullyCheckedComparisonCategories(
+ static_cast<unsigned>(ComparisonCategoryType::Last) + 1),
+ AccessCheckingSFINAE(false), InNonInstantiationSFINAEContext(false),
+ NonInstantiationEntries(0), ArgumentPackSubstitutionIndex(-1),
+ CurrentInstantiationScope(nullptr), DisableTypoCorrection(false),
+ TyposCorrected(0), AnalysisWarnings(*this),
ThreadSafetyDeclCache(nullptr), VarDataSharingAttributesStack(nullptr),
CurScope(nullptr), Ident_super(nullptr), Ident___float128(nullptr) {
TUScope = nullptr;
@@ -159,9 +163,9 @@ Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
ExprEvalContexts.emplace_back(
ExpressionEvaluationContext::PotentiallyEvaluated, 0, CleanupInfo{},
- nullptr, false);
+ nullptr, ExpressionEvaluationContextRecord::EK_Other);
- FunctionScopes.push_back(new FunctionScopeInfo(Diags));
+ PreallocatedFunctionScope.reset(new FunctionScopeInfo(Diags));
// Initilization of data sharing attributes stack for OpenMP
InitDataSharingAttributesStack();
@@ -331,11 +335,11 @@ void Sema::Initialize() {
Sema::~Sema() {
if (VisContext) FreeVisContext();
+
// Kill all the active scopes.
- for (unsigned I = 1, E = FunctionScopes.size(); I != E; ++I)
- delete FunctionScopes[I];
- if (FunctionScopes.size() == 1)
- delete FunctionScopes[0];
+ for (sema::FunctionScopeInfo *FSI : FunctionScopes)
+ if (FSI != PreallocatedFunctionScope.get())
+ delete FSI;
// Tell the SemaConsumer to forget about us; we're going out of scope.
if (SemaConsumer *SC = dyn_cast<SemaConsumer>(&Consumer))
@@ -391,7 +395,7 @@ ASTMutationListener *Sema::getASTMutationListener() const {
return getASTConsumer().GetASTMutationListener();
}
-///\brief Registers an external source. If an external source already exists,
+///Registers an external source. If an external source already exists,
/// creates a multiplex external source and appends to it.
///
///\param[in] E - A non-null external sema source.
@@ -412,7 +416,7 @@ void Sema::addExternalSource(ExternalSemaSource *E) {
}
}
-/// \brief Print out statistics about the semantic analysis.
+/// Print out statistics about the semantic analysis.
void Sema::PrintStats() const {
llvm::errs() << "\n*** Semantic Analysis Stats:\n";
llvm::errs() << NumSFINAEErrors << " SFINAE diagnostics trapped.\n";
@@ -477,6 +481,7 @@ ExprResult Sema::ImpCastExprToType(Expr *E, QualType Ty,
case CK_ArrayToPointerDecay:
case CK_FunctionToPointerDecay:
case CK_ToVoid:
+ case CK_NonAtomicToAtomic:
break;
}
}
@@ -532,7 +537,7 @@ CastKind Sema::ScalarTypeToBooleanCastKind(QualType ScalarTy) {
llvm_unreachable("unknown scalar type kind");
}
-/// \brief Used to prune the decls of Sema's UnusedFileScopedDecls vector.
+/// Used to prune the decls of Sema's UnusedFileScopedDecls vector.
static bool ShouldRemoveFromUnused(Sema *SemaRef, const DeclaratorDecl *D) {
if (D->getMostRecentDecl()->isUsed())
return true;
@@ -641,6 +646,8 @@ void Sema::getUndefinedButUsed(
!isExternalWithNoLinkageType(FD) &&
!FD->getMostRecentDecl()->isInlined())
continue;
+ if (FD->getBuiltinID())
+ continue;
} else {
auto *VD = cast<VarDecl>(ND);
if (VD->hasDefinition() != VarDecl::DeclarationOnly)
@@ -649,6 +656,11 @@ void Sema::getUndefinedButUsed(
!isExternalWithNoLinkageType(VD) &&
!VD->getMostRecentDecl()->isInline())
continue;
+
+ // Skip VarDecls that lack formal definitions but which we know are in
+ // fact defined somewhere.
+ if (VD->isKnownToBeDefined())
+ continue;
}
Undefined.push_back(std::make_pair(ND, UndefinedUse.second));
@@ -720,7 +732,7 @@ void Sema::LoadExternalWeakUndeclaredIdentifiers() {
typedef llvm::DenseMap<const CXXRecordDecl*, bool> RecordCompleteMap;
-/// \brief Returns true, if all methods and nested classes of the given
+/// Returns true, if all methods and nested classes of the given
/// CXXRecordDecl are defined in this translation unit.
///
/// Should only be called from ActOnEndOfTranslationUnit so that all
@@ -760,7 +772,7 @@ static bool MethodsAndNestedClassesComplete(const CXXRecordDecl *RD,
return Complete;
}
-/// \brief Returns true, if the given CXXRecordDecl is fully defined in this
+/// Returns true, if the given CXXRecordDecl is fully defined in this
/// translation unit, i.e. all methods are defined or pure virtual and all
/// friends, friend functions and nested classes are fully defined in this
/// translation unit.
@@ -849,6 +861,20 @@ void Sema::ActOnEndOfTranslationUnit() {
if (PP.isCodeCompletionEnabled())
return;
+ // Transfer late parsed template instantiations over to the pending template
+ // instantiation list. During normal compliation, the late template parser
+ // will be installed and instantiating these templates will succeed.
+ //
+ // If we are building a TU prefix for serialization, it is also safe to
+ // transfer these over, even though they are not parsed. The end of the TU
+ // should be outside of any eager template instantiation scope, so when this
+ // AST is deserialized, these templates will not be parsed until the end of
+ // the combined TU.
+ PendingInstantiations.insert(PendingInstantiations.end(),
+ LateParsedInstantiations.begin(),
+ LateParsedInstantiations.end());
+ LateParsedInstantiations.clear();
+
// Complete translation units and modules define vtables and perform implicit
// instantiations. PCH files do not.
if (TUKind != TU_Prefix) {
@@ -878,8 +904,13 @@ void Sema::ActOnEndOfTranslationUnit() {
PendingInstantiations.insert(PendingInstantiations.begin(),
Pending.begin(), Pending.end());
}
+
PerformPendingInstantiations();
+ assert(LateParsedInstantiations.empty() &&
+ "end of TU template instantiation should not create more "
+ "late-parsed templates");
+
if (LateTemplateParserCleanup)
LateTemplateParserCleanup(OpaqueParser);
@@ -980,11 +1011,6 @@ void Sema::ActOnEndOfTranslationUnit() {
// Warnings emitted in ActOnEndOfTranslationUnit() should be emitted for
// modules when they are built, not every time they are used.
emitAndClearUnusedLocalTypedefWarnings();
-
- // Modules don't need any of the checking below.
- if (!PP.isIncrementalProcessingEnabled())
- TUScope = nullptr;
- return;
}
// C99 6.9.2p2:
@@ -1002,8 +1028,7 @@ void Sema::ActOnEndOfTranslationUnit() {
for (TentativeDefinitionsType::iterator
T = TentativeDefinitions.begin(ExternalSource),
TEnd = TentativeDefinitions.end();
- T != TEnd; ++T)
- {
+ T != TEnd; ++T) {
VarDecl *VD = (*T)->getActingDefinition();
// If the tentative definition was completed, getActingDefinition() returns
@@ -1030,12 +1055,13 @@ void Sema::ActOnEndOfTranslationUnit() {
// Notify the consumer that we've completed a tentative definition.
if (!VD->isInvalidDecl())
Consumer.CompleteTentativeDefinition(VD);
-
}
// If there were errors, disable 'unused' warnings since they will mostly be
- // noise.
- if (!Diags.hasErrorOccurred()) {
+ // noise. Don't warn for a use from a module: either we should warn on all
+ // file-scope declarations in modules or not at all, but whether the
+ // declaration is used is immaterial.
+ if (!Diags.hasErrorOccurred() && TUKind != TU_Module) {
// Output warning for unused file scoped decls.
for (UnusedFileScopedDeclsType::iterator
I = UnusedFileScopedDecls.begin(ExternalSource),
@@ -1103,6 +1129,8 @@ void Sema::ActOnEndOfTranslationUnit() {
}
if (!Diags.isIgnored(diag::warn_unused_private_field, SourceLocation())) {
+ // FIXME: Load additional unused private field candidates from the external
+ // source.
RecordCompleteMap RecordsComplete;
RecordCompleteMap MNCComplete;
for (NamedDeclSetType::iterator I = UnusedPrivateFields.begin(),
@@ -1264,7 +1292,8 @@ void Sema::EmitCurrentDiagnostic(unsigned DiagID) {
}
}
- // Set up the context's printing policy based on our current state.
+ // Copy the diagnostic printing policy over the ASTContext printing policy.
+ // TODO: Stop doing that. See: https://reviews.llvm.org/D45093#1090292
Context.setPrintingPolicy(getPrintingPolicy());
// Emit the diagnostic.
@@ -1287,7 +1316,7 @@ Sema::Diag(SourceLocation Loc, const PartialDiagnostic& PD) {
return Builder;
}
-/// \brief Looks through the macro-expansion chain for the given
+/// Looks through the macro-expansion chain for the given
/// location, looking for a macro expansion with the given name.
/// If one is found, returns true and sets the location to that
/// expansion loc.
@@ -1308,7 +1337,7 @@ bool Sema::findMacroSpelling(SourceLocation &locref, StringRef name) {
return false;
}
-/// \brief Determines the active Scope associated with the given declaration
+/// Determines the active Scope associated with the given declaration
/// context.
///
/// This routine maps a declaration context to the active Scope object that
@@ -1337,19 +1366,15 @@ Scope *Sema::getScopeForContext(DeclContext *Ctx) {
return nullptr;
}
-/// \brief Enter a new function scope
+/// Enter a new function scope
void Sema::PushFunctionScope() {
- if (FunctionScopes.size() == 1) {
- // Use the "top" function scope rather than having to allocate
- // memory for a new scope.
- FunctionScopes.back()->Clear();
- FunctionScopes.push_back(FunctionScopes.back());
- if (LangOpts.OpenMP)
- pushOpenMPFunctionRegion();
- return;
+ if (FunctionScopes.empty()) {
+ // Use PreallocatedFunctionScope to avoid allocating memory when possible.
+ PreallocatedFunctionScope->Clear();
+ FunctionScopes.push_back(PreallocatedFunctionScope.get());
+ } else {
+ FunctionScopes.push_back(new FunctionScopeInfo(getDiagnostics()));
}
-
- FunctionScopes.push_back(new FunctionScopeInfo(getDiagnostics()));
if (LangOpts.OpenMP)
pushOpenMPFunctionRegion();
}
@@ -1369,15 +1394,15 @@ void Sema::RecordParsingTemplateParameterDepth(unsigned Depth) {
if (LambdaScopeInfo *const LSI = getCurLambda()) {
LSI->AutoTemplateParameterDepth = Depth;
return;
- }
- llvm_unreachable(
+ }
+ llvm_unreachable(
"Remove assertion if intentionally called in a non-lambda context.");
}
void Sema::PopFunctionScopeInfo(const AnalysisBasedWarnings::Policy *WP,
const Decl *D, const BlockExpr *blkExpr) {
- FunctionScopeInfo *Scope = FunctionScopes.pop_back_val();
assert(!FunctionScopes.empty() && "mismatched push/pop!");
+ FunctionScopeInfo *Scope = FunctionScopes.pop_back_val();
if (LangOpts.OpenMP)
popOpenMPFunctionRegion(Scope);
@@ -1389,12 +1414,13 @@ void Sema::PopFunctionScopeInfo(const AnalysisBasedWarnings::Policy *WP,
for (const auto &PUD : Scope->PossiblyUnreachableDiags)
Diag(PUD.Loc, PUD.PD);
- if (FunctionScopes.back() != Scope)
+ // Delete the scope unless its our preallocated scope.
+ if (Scope != PreallocatedFunctionScope.get())
delete Scope;
}
-void Sema::PushCompoundScope() {
- getCurFunction()->CompoundScopes.push_back(CompoundScopeInfo());
+void Sema::PushCompoundScope(bool IsStmtExpr) {
+ getCurFunction()->CompoundScopes.push_back(CompoundScopeInfo(IsStmtExpr));
}
void Sema::PopCompoundScope() {
@@ -1404,12 +1430,27 @@ void Sema::PopCompoundScope() {
CurFunction->CompoundScopes.pop_back();
}
-/// \brief Determine whether any errors occurred within this function/method/
+/// Determine whether any errors occurred within this function/method/
/// block.
bool Sema::hasAnyUnrecoverableErrorsInThisFunction() const {
return getCurFunction()->ErrorTrap.hasUnrecoverableErrorOccurred();
}
+void Sema::setFunctionHasBranchIntoScope() {
+ if (!FunctionScopes.empty())
+ FunctionScopes.back()->setHasBranchIntoScope();
+}
+
+void Sema::setFunctionHasBranchProtectedScope() {
+ if (!FunctionScopes.empty())
+ FunctionScopes.back()->setHasBranchProtectedScope();
+}
+
+void Sema::setFunctionHasIndirectGoto() {
+ if (!FunctionScopes.empty())
+ FunctionScopes.back()->setHasIndirectGoto();
+}
+
BlockScopeInfo *Sema::getCurBlock() {
if (FunctionScopes.empty())
return nullptr;
@@ -1425,6 +1466,18 @@ BlockScopeInfo *Sema::getCurBlock() {
return CurBSI;
}
+FunctionScopeInfo *Sema::getEnclosingFunction() const {
+ if (FunctionScopes.empty())
+ return nullptr;
+
+ for (int e = FunctionScopes.size() - 1; e >= 0; --e) {
+ if (isa<sema::BlockScopeInfo>(FunctionScopes[e]))
+ continue;
+ return FunctionScopes[e];
+ }
+ return nullptr;
+}
+
LambdaScopeInfo *Sema::getCurLambda(bool IgnoreNonLambdaCapturingScope) {
if (FunctionScopes.empty())
return nullptr;
@@ -1462,8 +1515,7 @@ void Sema::ActOnComment(SourceRange Comment) {
if (!LangOpts.RetainCommentsFromSystemHeaders &&
SourceMgr.isInSystemHeader(Comment.getBegin()))
return;
- RawComment RC(SourceMgr, Comment, false,
- LangOpts.CommentOpts.ParseAllComments);
+ RawComment RC(SourceMgr, Comment, LangOpts.CommentOpts, false);
if (RC.isAlmostTrailingComment()) {
SourceRange MagicMarkerRange(Comment.getBegin(),
Comment.getBegin().getLocWithOffset(3));
@@ -1501,25 +1553,7 @@ void ExternalSemaSource::ReadUndefinedButUsed(
void ExternalSemaSource::ReadMismatchingDeleteExpressions(llvm::MapVector<
FieldDecl *, llvm::SmallVector<std::pair<SourceLocation, bool>, 4>> &) {}
-void PrettyDeclStackTraceEntry::print(raw_ostream &OS) const {
- SourceLocation Loc = this->Loc;
- if (!Loc.isValid() && TheDecl) Loc = TheDecl->getLocation();
- if (Loc.isValid()) {
- Loc.print(OS, S.getSourceManager());
- OS << ": ";
- }
- OS << Message;
-
- if (auto *ND = dyn_cast_or_null<NamedDecl>(TheDecl)) {
- OS << " '";
- ND->getNameForDiagnostic(OS, ND->getASTContext().getPrintingPolicy(), true);
- OS << "'";
- }
-
- OS << '\n';
-}
-
-/// \brief Figure out if an expression could be turned into a call.
+/// Figure out if an expression could be turned into a call.
///
/// Use this when trying to recover from an error where the programmer may have
/// written just the name of a function instead of actually calling it.
@@ -1551,6 +1585,7 @@ bool Sema::tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
}
bool Ambiguous = false;
+ bool IsMV = false;
if (Overloads) {
for (OverloadExpr::decls_iterator it = Overloads->decls_begin(),
@@ -1564,11 +1599,16 @@ bool Sema::tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
if (const FunctionDecl *OverloadDecl
= dyn_cast<FunctionDecl>((*it)->getUnderlyingDecl())) {
if (OverloadDecl->getMinRequiredArguments() == 0) {
- if (!ZeroArgCallReturnTy.isNull() && !Ambiguous) {
+ if (!ZeroArgCallReturnTy.isNull() && !Ambiguous &&
+ (!IsMV || !(OverloadDecl->isCPUDispatchMultiVersion() ||
+ OverloadDecl->isCPUSpecificMultiVersion()))) {
ZeroArgCallReturnTy = QualType();
Ambiguous = true;
- } else
+ } else {
ZeroArgCallReturnTy = OverloadDecl->getReturnType();
+ IsMV = OverloadDecl->isCPUDispatchMultiVersion() ||
+ OverloadDecl->isCPUSpecificMultiVersion();
+ }
}
}
}
@@ -1621,7 +1661,7 @@ bool Sema::tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
return false;
}
-/// \brief Give notes for a set of overloads.
+/// Give notes for a set of overloads.
///
/// A companion to tryExprAsCall. In cases when the name that the programmer
/// wrote was an overloaded function, we may be able to make some guesses about
@@ -1647,6 +1687,12 @@ static void noteOverloads(Sema &S, const UnresolvedSetImpl &Overloads,
}
NamedDecl *Fn = (*It)->getUnderlyingDecl();
+ // Don't print overloads for non-default multiversioned functions.
+ if (const auto *FD = Fn->getAsFunction()) {
+ if (FD->isMultiVersion() && FD->hasAttr<TargetAttr>() &&
+ !FD->getAttr<TargetAttr>()->isDefaultVersion())
+ continue;
+ }
S.Diag(Fn->getLocation(), diag::note_possible_target_of_call);
++ShownOverloads;
}
@@ -1685,6 +1731,21 @@ static bool IsCallableWithAppend(Expr *E) {
!isa<CXXOperatorCallExpr>(E));
}
+static bool IsCPUDispatchCPUSpecificMultiVersion(const Expr *E) {
+ if (const auto *UO = dyn_cast<UnaryOperator>(E))
+ E = UO->getSubExpr();
+
+ if (const auto *ULE = dyn_cast<UnresolvedLookupExpr>(E)) {
+ if (ULE->getNumDecls() == 0)
+ return false;
+
+ const NamedDecl *ND = *ULE->decls_begin();
+ if (const auto *FD = dyn_cast<FunctionDecl>(ND))
+ return FD->isCPUDispatchMultiVersion() || FD->isCPUSpecificMultiVersion();
+ }
+ return false;
+}
+
bool Sema::tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
bool ForceComplain,
bool (*IsPlausibleResult)(QualType)) {
@@ -1701,12 +1762,13 @@ bool Sema::tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
// so we can emit a fixit and carry on pretending that E was
// actually a CallExpr.
SourceLocation ParenInsertionLoc = getLocForEndOfToken(Range.getEnd());
- Diag(Loc, PD)
- << /*zero-arg*/ 1 << Range
- << (IsCallableWithAppend(E.get())
- ? FixItHint::CreateInsertion(ParenInsertionLoc, "()")
- : FixItHint());
- notePlausibleOverloads(*this, Loc, Overloads, IsPlausibleResult);
+ bool IsMV = IsCPUDispatchCPUSpecificMultiVersion(E.get());
+ Diag(Loc, PD) << /*zero-arg*/ 1 << IsMV << Range
+ << (IsCallableWithAppend(E.get())
+ ? FixItHint::CreateInsertion(ParenInsertionLoc, "()")
+ : FixItHint());
+ if (!IsMV)
+ notePlausibleOverloads(*this, Loc, Overloads, IsPlausibleResult);
// FIXME: Try this before emitting the fixit, and suppress diagnostics
// while doing so.
@@ -1717,8 +1779,10 @@ bool Sema::tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
if (!ForceComplain) return false;
- Diag(Loc, PD) << /*not zero-arg*/ 0 << Range;
- notePlausibleOverloads(*this, Loc, Overloads, IsPlausibleResult);
+ bool IsMV = IsCPUDispatchCPUSpecificMultiVersion(E.get());
+ Diag(Loc, PD) << /*not zero-arg*/ 0 << IsMV << Range;
+ if (!IsMV)
+ notePlausibleOverloads(*this, Loc, Overloads, IsPlausibleResult);
E = ExprError();
return true;
}
diff --git a/lib/Sema/SemaAccess.cpp b/lib/Sema/SemaAccess.cpp
index 98a918bd7d63..9fbae2ca297f 100644
--- a/lib/Sema/SemaAccess.cpp
+++ b/lib/Sema/SemaAccess.cpp
@@ -11,6 +11,7 @@
//
//===----------------------------------------------------------------------===//
+#include "clang/Basic/Specifiers.h"
#include "clang/Sema/SemaInternal.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/CXXInheritance.h"
@@ -1712,7 +1713,7 @@ Sema::AccessResult Sema::CheckAllocationAccess(SourceLocation OpLoc,
return CheckAccess(*this, OpLoc, Entity);
}
-/// \brief Checks access to a member.
+/// Checks access to a member.
Sema::AccessResult Sema::CheckMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *NamingClass,
DeclAccessPair Found) {
@@ -1856,29 +1857,31 @@ void Sema::CheckLookupAccess(const LookupResult &R) {
}
}
-/// Checks access to Decl from the given class. The check will take access
+/// Checks access to Target from the given class. The check will take access
/// specifiers into account, but no member access expressions and such.
///
-/// \param Decl the declaration to check if it can be accessed
+/// \param Target the declaration to check if it can be accessed
/// \param Ctx the class/context from which to start the search
-/// \return true if the Decl is accessible from the Class, false otherwise.
-bool Sema::IsSimplyAccessible(NamedDecl *Decl, DeclContext *Ctx) {
+/// \return true if the Target is accessible from the Class, false otherwise.
+bool Sema::IsSimplyAccessible(NamedDecl *Target, DeclContext *Ctx) {
if (CXXRecordDecl *Class = dyn_cast<CXXRecordDecl>(Ctx)) {
- if (!Decl->isCXXClassMember())
+ if (!Target->isCXXClassMember())
return true;
+ if (Target->getAccess() == AS_public)
+ return true;
QualType qType = Class->getTypeForDecl()->getCanonicalTypeInternal();
+ // The unprivileged access is AS_none as we don't know how the member was
+ // accessed, which is described by the access in DeclAccessPair.
+ // `IsAccessible` will examine the actual access of Target (i.e.
+ // Decl->getAccess()) when calculating the access.
AccessTarget Entity(Context, AccessedEntity::Member, Class,
- DeclAccessPair::make(Decl, Decl->getAccess()),
- qType);
- if (Entity.getAccess() == AS_public)
- return true;
-
+ DeclAccessPair::make(Target, AS_none), qType);
EffectiveContext EC(CurContext);
return ::IsAccessible(*this, EC, Entity) != ::AR_inaccessible;
}
-
- if (ObjCIvarDecl *Ivar = dyn_cast<ObjCIvarDecl>(Decl)) {
+
+ if (ObjCIvarDecl *Ivar = dyn_cast<ObjCIvarDecl>(Target)) {
// @public and @package ivars are always accessible.
if (Ivar->getCanonicalAccessControl() == ObjCIvarDecl::Public ||
Ivar->getCanonicalAccessControl() == ObjCIvarDecl::Package)
diff --git a/lib/Sema/SemaAttr.cpp b/lib/Sema/SemaAttr.cpp
index 4ba2a317e1f9..4f3cf4633c80 100644
--- a/lib/Sema/SemaAttr.cpp
+++ b/lib/Sema/SemaAttr.cpp
@@ -205,7 +205,7 @@ void Sema::ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action,
// "#pragma pack(pop, identifier, n) is undefined"
if (Action & Sema::PSK_Pop) {
if (Alignment && !SlotLabel.empty())
- Diag(PragmaLoc, diag::warn_pragma_pack_pop_identifer_and_alignment);
+ Diag(PragmaLoc, diag::warn_pragma_pack_pop_identifier_and_alignment);
if (PackStack.Stack.empty())
Diag(PragmaLoc, diag::warn_pragma_pop_failed) << "pack" << "stack empty";
}
@@ -330,7 +330,7 @@ void Sema::PragmaStack<ValueType>::Act(SourceLocation PragmaLocation,
Stack.erase(std::prev(I.base()), Stack.end());
}
} else if (!Stack.empty()) {
- // We don't have a label, just pop the last entry.
+ // We do not have a label, just pop the last entry.
CurrentValue = Stack.back().Value;
CurrentPragmaLocation = Stack.back().PragmaLocation;
Stack.pop_back();
@@ -389,7 +389,7 @@ bool Sema::UnifySection(StringRef SectionName,
return false;
}
-/// \brief Called on well formed \#pragma bss_seg().
+/// Called on well formed \#pragma bss_seg().
void Sema::ActOnPragmaMSSeg(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
@@ -410,7 +410,7 @@ void Sema::ActOnPragmaMSSeg(SourceLocation PragmaLocation,
Stack->Act(PragmaLocation, Action, StackSlotLabel, SegmentName);
}
-/// \brief Called on well formed \#pragma bss_seg().
+/// Called on well formed \#pragma bss_seg().
void Sema::ActOnPragmaMSSection(SourceLocation PragmaLocation,
int SectionFlags, StringLiteral *SegmentName) {
UnifySection(SegmentName->getString(), SectionFlags, PragmaLocation);
@@ -520,7 +520,7 @@ attrMatcherRuleListToString(ArrayRef<attr::SubjectMatchRule> Rules) {
} // end anonymous namespace
-void Sema::ActOnPragmaAttributePush(AttributeList &Attribute,
+void Sema::ActOnPragmaAttributePush(ParsedAttr &Attribute,
SourceLocation PragmaLoc,
attr::ParsedSubjectMatchRuleSet Rules) {
SmallVector<attr::SubjectMatchRule, 4> SubjectMatchRules;
@@ -645,7 +645,7 @@ void Sema::AddPragmaAttributes(Scope *S, Decl *D) {
if (PragmaAttributeStack.empty())
return;
for (auto &Entry : PragmaAttributeStack) {
- const AttributeList *Attribute = Entry.Attribute;
+ ParsedAttr *Attribute = Entry.Attribute;
assert(Attribute && "Expected an attribute");
// Ensure that the attribute can be applied to the given declaration.
@@ -659,9 +659,10 @@ void Sema::AddPragmaAttributes(Scope *S, Decl *D) {
if (!Applies)
continue;
Entry.IsUsed = true;
- assert(!Attribute->getNext() && "Expected just one attribute");
PragmaAttributeCurrentTargetDecl = D;
- ProcessDeclAttributeList(S, D, Attribute);
+ ParsedAttributesView Attrs;
+ Attrs.addAtStart(Attribute);
+ ProcessDeclAttributeList(S, D, Attrs);
PragmaAttributeCurrentTargetDecl = nullptr;
}
}
diff --git a/lib/Sema/SemaCUDA.cpp b/lib/Sema/SemaCUDA.cpp
index cac5f682275e..13dd8d936fd2 100644
--- a/lib/Sema/SemaCUDA.cpp
+++ b/lib/Sema/SemaCUDA.cpp
@@ -7,7 +7,7 @@
//
//===----------------------------------------------------------------------===//
/// \file
-/// \brief This file implements semantic analysis for CUDA constructs.
+/// This file implements semantic analysis for CUDA constructs.
///
//===----------------------------------------------------------------------===//
@@ -42,8 +42,9 @@ ExprResult Sema::ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
SourceLocation GGGLoc) {
FunctionDecl *ConfigDecl = Context.getcudaConfigureCallDecl();
if (!ConfigDecl)
- return ExprError(Diag(LLLLoc, diag::err_undeclared_var_use)
- << "cudaConfigureCall");
+ return ExprError(
+ Diag(LLLLoc, diag::err_undeclared_var_use)
+ << (getLangOpts().HIP ? "hipConfigureCall" : "cudaConfigureCall"));
QualType ConfigQTy = ConfigDecl->getType();
DeclRefExpr *ConfigDR = new (Context)
@@ -54,30 +55,31 @@ ExprResult Sema::ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
/*IsExecConfig=*/true);
}
-Sema::CUDAFunctionTarget Sema::IdentifyCUDATarget(const AttributeList *Attr) {
+Sema::CUDAFunctionTarget
+Sema::IdentifyCUDATarget(const ParsedAttributesView &Attrs) {
bool HasHostAttr = false;
bool HasDeviceAttr = false;
bool HasGlobalAttr = false;
bool HasInvalidTargetAttr = false;
- while (Attr) {
- switch(Attr->getKind()){
- case AttributeList::AT_CUDAGlobal:
+ for (const ParsedAttr &AL : Attrs) {
+ switch (AL.getKind()) {
+ case ParsedAttr::AT_CUDAGlobal:
HasGlobalAttr = true;
break;
- case AttributeList::AT_CUDAHost:
+ case ParsedAttr::AT_CUDAHost:
HasHostAttr = true;
break;
- case AttributeList::AT_CUDADevice:
+ case ParsedAttr::AT_CUDADevice:
HasDeviceAttr = true;
break;
- case AttributeList::AT_CUDAInvalidTarget:
+ case ParsedAttr::AT_CUDAInvalidTarget:
HasInvalidTargetAttr = true;
break;
default:
break;
}
- Attr = Attr->getNext();
}
+
if (HasInvalidTargetAttr)
return CFT_InvalidTarget;
@@ -471,6 +473,59 @@ bool Sema::isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *DD) {
return true;
}
+void Sema::checkAllowedCUDAInitializer(VarDecl *VD) {
+ if (VD->isInvalidDecl() || !VD->hasInit() || !VD->hasGlobalStorage())
+ return;
+ const Expr *Init = VD->getInit();
+ if (VD->hasAttr<CUDADeviceAttr>() || VD->hasAttr<CUDAConstantAttr>() ||
+ VD->hasAttr<CUDASharedAttr>()) {
+ assert(!VD->isStaticLocal() || VD->hasAttr<CUDASharedAttr>());
+ bool AllowedInit = false;
+ if (const CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(Init))
+ AllowedInit =
+ isEmptyCudaConstructor(VD->getLocation(), CE->getConstructor());
+ // We'll allow constant initializers even if it's a non-empty
+ // constructor according to CUDA rules. This deviates from NVCC,
+ // but allows us to handle things like constexpr constructors.
+ if (!AllowedInit &&
+ (VD->hasAttr<CUDADeviceAttr>() || VD->hasAttr<CUDAConstantAttr>()))
+ AllowedInit = VD->getInit()->isConstantInitializer(
+ Context, VD->getType()->isReferenceType());
+
+ // Also make sure that destructor, if there is one, is empty.
+ if (AllowedInit)
+ if (CXXRecordDecl *RD = VD->getType()->getAsCXXRecordDecl())
+ AllowedInit =
+ isEmptyCudaDestructor(VD->getLocation(), RD->getDestructor());
+
+ if (!AllowedInit) {
+ Diag(VD->getLocation(), VD->hasAttr<CUDASharedAttr>()
+ ? diag::err_shared_var_init
+ : diag::err_dynamic_var_init)
+ << Init->getSourceRange();
+ VD->setInvalidDecl();
+ }
+ } else {
+ // This is a host-side global variable. Check that the initializer is
+ // callable from the host side.
+ const FunctionDecl *InitFn = nullptr;
+ if (const CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(Init)) {
+ InitFn = CE->getConstructor();
+ } else if (const CallExpr *CE = dyn_cast<CallExpr>(Init)) {
+ InitFn = CE->getDirectCallee();
+ }
+ if (InitFn) {
+ CUDAFunctionTarget InitFnTarget = IdentifyCUDATarget(InitFn);
+ if (InitFnTarget != CFT_Host && InitFnTarget != CFT_HostDevice) {
+ Diag(VD->getLocation(), diag::err_ref_bad_target_global_initializer)
+ << InitFnTarget << InitFn;
+ Diag(InitFn->getLocation(), diag::note_previous_decl) << InitFn;
+ VD->setInvalidDecl();
+ }
+ }
+ }
+}
+
// With -fcuda-host-device-constexpr, an unattributed constexpr function is
// treated as implicitly __host__ __device__, unless:
// * it is a variadic function (device-side variadic functions are not
@@ -521,7 +576,7 @@ void Sema::maybeAddCUDAHostDeviceAttrs(FunctionDecl *NewD,
if (!getSourceManager().isInSystemHeader(Match->getLocation())) {
Diag(NewD->getLocation(),
diag::err_cuda_unattributed_constexpr_cannot_overload_device)
- << NewD->getName();
+ << NewD;
Diag(Match->getLocation(),
diag::note_cuda_conflicting_device_function_declared_here);
}
@@ -790,9 +845,12 @@ bool Sema::CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee) {
// If the caller is known-emitted, mark the callee as known-emitted.
// Otherwise, mark the call in our call graph so we can traverse it later.
bool CallerKnownEmitted = IsKnownEmitted(*this, Caller);
- if (CallerKnownEmitted)
- MarkKnownEmitted(*this, Caller, Callee, Loc);
- else {
+ if (CallerKnownEmitted) {
+ // Host-side references to a __global__ function refer to the stub, so the
+ // function itself is never emitted and therefore should not be marked.
+ if (getLangOpts().CUDAIsDevice || IdentifyCUDATarget(Callee) != CFT_Global)
+ MarkKnownEmitted(*this, Caller, Callee, Loc);
+ } else {
// If we have
// host fn calls kernel fn calls host+device,
// the HD function does not get instantiated on the host. We model this by
diff --git a/lib/Sema/SemaCXXScopeSpec.cpp b/lib/Sema/SemaCXXScopeSpec.cpp
index 6da4d2a26191..f2fad825c3e7 100644
--- a/lib/Sema/SemaCXXScopeSpec.cpp
+++ b/lib/Sema/SemaCXXScopeSpec.cpp
@@ -24,7 +24,7 @@
#include "llvm/ADT/STLExtras.h"
using namespace clang;
-/// \brief Find the current instantiation that associated with the given type.
+/// Find the current instantiation that associated with the given type.
static CXXRecordDecl *getCurrentInstantiationOf(QualType T,
DeclContext *CurContext) {
if (T.isNull())
@@ -44,7 +44,7 @@ static CXXRecordDecl *getCurrentInstantiationOf(QualType T,
return nullptr;
}
-/// \brief Compute the DeclContext that is associated with the given type.
+/// Compute the DeclContext that is associated with the given type.
///
/// \param T the type for which we are attempting to find a DeclContext.
///
@@ -59,7 +59,7 @@ DeclContext *Sema::computeDeclContext(QualType T) {
return ::getCurrentInstantiationOf(T, CurContext);
}
-/// \brief Compute the DeclContext that is associated with the given
+/// Compute the DeclContext that is associated with the given
/// scope specifier.
///
/// \param SS the C++ scope specifier as it appears in the source
@@ -172,7 +172,7 @@ bool Sema::isDependentScopeSpecifier(const CXXScopeSpec &SS) {
return SS.getScopeRep()->isDependent();
}
-/// \brief If the given nested name specifier refers to the current
+/// If the given nested name specifier refers to the current
/// instantiation, return the declaration that corresponds to that
/// current instantiation (C++0x [temp.dep.type]p1).
///
@@ -188,7 +188,7 @@ CXXRecordDecl *Sema::getCurrentInstantiationOf(NestedNameSpecifier *NNS) {
return ::getCurrentInstantiationOf(T, CurContext);
}
-/// \brief Require that the context specified by SS be complete.
+/// Require that the context specified by SS be complete.
///
/// If SS refers to a type, this routine checks whether the type is
/// complete enough (or can be made complete enough) for name lookup
@@ -305,7 +305,7 @@ bool Sema::ActOnSuperScopeSpecifier(SourceLocation SuperLoc,
return false;
}
-/// \brief Determines whether the given declaration is an valid acceptable
+/// Determines whether the given declaration is an valid acceptable
/// result for name lookup of a nested-name-specifier.
/// \param SD Declaration checked for nested-name-specifier.
/// \param IsExtension If not null and the declaration is accepted as an
@@ -350,7 +350,7 @@ bool Sema::isAcceptableNestedNameSpecifier(const NamedDecl *SD,
return false;
}
-/// \brief If the given nested-name-specifier begins with a bare identifier
+/// If the given nested-name-specifier begins with a bare identifier
/// (e.g., Base::), perform name lookup for that identifier as a
/// nested-name-specifier within the given scope, and return the result of that
/// name lookup.
@@ -443,7 +443,7 @@ class NestedNameSpecifierValidatorCCC : public CorrectionCandidateCallback {
}
-/// \brief Build a new nested-name-specifier for "identifier::", as described
+/// Build a new nested-name-specifier for "identifier::", as described
/// by ActOnCXXNestedNameSpecifier.
///
/// \param S Scope in which the nested-name-specifier occurs.
@@ -846,6 +846,9 @@ bool Sema::ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS,
assert(DS.getTypeSpecType() == DeclSpec::TST_decltype);
QualType T = BuildDecltypeType(DS.getRepAsExpr(), DS.getTypeSpecTypeLoc());
+ if (T.isNull())
+ return true;
+
if (!T->isDependentType() && !T->getAs<TagType>()) {
Diag(DS.getTypeSpecTypeLoc(), diag::err_expected_class_or_namespace)
<< T << getLangOpts().CPlusPlus;
@@ -967,7 +970,7 @@ bool Sema::ActOnCXXNestedNameSpecifier(Scope *S,
}
namespace {
- /// \brief A structure that stores a nested-name-specifier annotation,
+ /// A structure that stores a nested-name-specifier annotation,
/// including both the nested-name-specifier
struct NestedNameSpecifierAnnotation {
NestedNameSpecifier *NNS;
diff --git a/lib/Sema/SemaCast.cpp b/lib/Sema/SemaCast.cpp
index ad6348685b64..b7f4629fbab7 100644
--- a/lib/Sema/SemaCast.cpp
+++ b/lib/Sema/SemaCast.cpp
@@ -33,10 +33,16 @@ using namespace clang;
enum TryCastResult {
TC_NotApplicable, ///< The cast method is not applicable.
TC_Success, ///< The cast method is appropriate and successful.
+ TC_Extension, ///< The cast method is appropriate and accepted as a
+ ///< language extension.
TC_Failed ///< The cast method is appropriate, but failed. A
///< diagnostic has been emitted.
};
+static bool isValidCast(TryCastResult TCR) {
+ return TCR == TC_Success || TCR == TC_Extension;
+}
+
enum CastType {
CT_Const, ///< const_cast
CT_Static, ///< static_cast
@@ -83,6 +89,14 @@ namespace {
void CheckCXXCStyleCast(bool FunctionalCast, bool ListInitialization);
void CheckCStyleCast();
+ void updatePartOfExplicitCastFlags(CastExpr *CE) {
+ // Walk down from the CE to the OrigSrcExpr, and mark all immediate
+ // ImplicitCastExpr's as being part of ExplicitCastExpr. The original CE
+ // (which is a ExplicitCastExpr), and the OrigSrcExpr are not touched.
+ for (; auto *ICE = dyn_cast<ImplicitCastExpr>(CE->getSubExpr()); CE = ICE)
+ ICE->setIsPartOfExplicitCast(true);
+ }
+
/// Complete an apparently-successful cast operation that yields
/// the given expression.
ExprResult complete(CastExpr *castExpr) {
@@ -94,6 +108,7 @@ namespace {
CK_Dependent, castExpr, nullptr,
castExpr->getValueKind());
}
+ updatePartOfExplicitCastFlags(castExpr);
return castExpr;
}
@@ -267,6 +282,12 @@ Sema::BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind,
AngleBrackets));
case tok::kw_dynamic_cast: {
+ // OpenCL C++ 1.0 s2.9: dynamic_cast is not supported.
+ if (getLangOpts().OpenCLCPlusPlus) {
+ return ExprError(Diag(OpLoc, diag::err_openclcxx_not_supported)
+ << "dynamic_cast");
+ }
+
if (!TypeDependent) {
Op.CheckDynamicCast();
if (Op.SrcExpr.isInvalid())
@@ -425,95 +446,114 @@ static void diagnoseBadCast(Sema &S, unsigned msg, CastType castType,
}
}
-/// UnwrapDissimilarPointerTypes - Like Sema::UnwrapSimilarPointerTypes,
-/// this removes one level of indirection from both types, provided that they're
-/// the same kind of pointer (plain or to-member). Unlike the Sema function,
-/// this one doesn't care if the two pointers-to-member don't point into the
-/// same class. This is because CastsAwayConstness doesn't care.
-/// And additionally, it handles C++ references. If both the types are
-/// references, then their pointee types are returned,
-/// else if only one of them is reference, it's pointee type is returned,
-/// and the other type is returned as-is.
-static bool UnwrapDissimilarPointerTypes(QualType& T1, QualType& T2) {
- const PointerType *T1PtrType = T1->getAs<PointerType>(),
- *T2PtrType = T2->getAs<PointerType>();
- if (T1PtrType && T2PtrType) {
- T1 = T1PtrType->getPointeeType();
- T2 = T2PtrType->getPointeeType();
- return true;
- }
- const ObjCObjectPointerType *T1ObjCPtrType =
- T1->getAs<ObjCObjectPointerType>(),
- *T2ObjCPtrType =
- T2->getAs<ObjCObjectPointerType>();
- if (T1ObjCPtrType) {
- if (T2ObjCPtrType) {
- T1 = T1ObjCPtrType->getPointeeType();
- T2 = T2ObjCPtrType->getPointeeType();
- return true;
- }
- else if (T2PtrType) {
- T1 = T1ObjCPtrType->getPointeeType();
- T2 = T2PtrType->getPointeeType();
- return true;
- }
- }
- else if (T2ObjCPtrType) {
- if (T1PtrType) {
- T2 = T2ObjCPtrType->getPointeeType();
- T1 = T1PtrType->getPointeeType();
- return true;
- }
- }
-
- const MemberPointerType *T1MPType = T1->getAs<MemberPointerType>(),
- *T2MPType = T2->getAs<MemberPointerType>();
- if (T1MPType && T2MPType) {
- T1 = T1MPType->getPointeeType();
- T2 = T2MPType->getPointeeType();
- return true;
- }
-
- const BlockPointerType *T1BPType = T1->getAs<BlockPointerType>(),
- *T2BPType = T2->getAs<BlockPointerType>();
- if (T1BPType && T2BPType) {
- T1 = T1BPType->getPointeeType();
- T2 = T2BPType->getPointeeType();
- return true;
- }
-
- const LValueReferenceType *T1RefType = T1->getAs<LValueReferenceType>(),
- *T2RefType = T2->getAs<LValueReferenceType>();
- if (T1RefType && T2RefType) {
- T1 = T1RefType->getPointeeType();
- T2 = T2RefType->getPointeeType();
- return true;
- }
+namespace {
+/// The kind of unwrapping we did when determining whether a conversion casts
+/// away constness.
+enum CastAwayConstnessKind {
+ /// The conversion does not cast away constness.
+ CACK_None = 0,
+ /// We unwrapped similar types.
+ CACK_Similar = 1,
+ /// We unwrapped dissimilar types with similar representations (eg, a pointer
+ /// versus an Objective-C object pointer).
+ CACK_SimilarKind = 2,
+ /// We unwrapped representationally-unrelated types, such as a pointer versus
+ /// a pointer-to-member.
+ CACK_Incoherent = 3,
+};
+}
- if (T1RefType) {
- T1 = T1RefType->getPointeeType();
- // T2 = T2;
- return true;
+/// Unwrap one level of types for CastsAwayConstness.
+///
+/// Like Sema::UnwrapSimilarTypes, this removes one level of indirection from
+/// both types, provided that they're both pointer-like or array-like. Unlike
+/// the Sema function, doesn't care if the unwrapped pieces are related.
+///
+/// This function may remove additional levels as necessary for correctness:
+/// the resulting T1 is unwrapped sufficiently that it is never an array type,
+/// so that its qualifiers can be directly compared to those of T2 (which will
+/// have the combined set of qualifiers from all indermediate levels of T2),
+/// as (effectively) required by [expr.const.cast]p7 replacing T1's qualifiers
+/// with those from T2.
+static CastAwayConstnessKind
+unwrapCastAwayConstnessLevel(ASTContext &Context, QualType &T1, QualType &T2) {
+ enum { None, Ptr, MemPtr, BlockPtr, Array };
+ auto Classify = [](QualType T) {
+ if (T->isAnyPointerType()) return Ptr;
+ if (T->isMemberPointerType()) return MemPtr;
+ if (T->isBlockPointerType()) return BlockPtr;
+ // We somewhat-arbitrarily don't look through VLA types here. This is at
+ // least consistent with the behavior of UnwrapSimilarTypes.
+ if (T->isConstantArrayType() || T->isIncompleteArrayType()) return Array;
+ return None;
+ };
+
+ auto Unwrap = [&](QualType T) {
+ if (auto *AT = Context.getAsArrayType(T))
+ return AT->getElementType();
+ return T->getPointeeType();
+ };
+
+ CastAwayConstnessKind Kind;
+
+ if (T2->isReferenceType()) {
+ // Special case: if the destination type is a reference type, unwrap it as
+ // the first level. (The source will have been an lvalue expression in this
+ // case, so there is no corresponding "reference to" in T1 to remove.) This
+ // simulates removing a "pointer to" from both sides.
+ T2 = T2->getPointeeType();
+ Kind = CastAwayConstnessKind::CACK_Similar;
+ } else if (Context.UnwrapSimilarTypes(T1, T2)) {
+ Kind = CastAwayConstnessKind::CACK_Similar;
+ } else {
+ // Try unwrapping mismatching levels.
+ int T1Class = Classify(T1);
+ if (T1Class == None)
+ return CastAwayConstnessKind::CACK_None;
+
+ int T2Class = Classify(T2);
+ if (T2Class == None)
+ return CastAwayConstnessKind::CACK_None;
+
+ T1 = Unwrap(T1);
+ T2 = Unwrap(T2);
+ Kind = T1Class == T2Class ? CastAwayConstnessKind::CACK_SimilarKind
+ : CastAwayConstnessKind::CACK_Incoherent;
}
- if (T2RefType) {
- // T1 = T1;
- T2 = T2RefType->getPointeeType();
- return true;
+ // We've unwrapped at least one level. If the resulting T1 is a (possibly
+ // multidimensional) array type, any qualifier on any matching layer of
+ // T2 is considered to correspond to T1. Decompose down to the element
+ // type of T1 so that we can compare properly.
+ while (true) {
+ Context.UnwrapSimilarArrayTypes(T1, T2);
+
+ if (Classify(T1) != Array)
+ break;
+
+ auto T2Class = Classify(T2);
+ if (T2Class == None)
+ break;
+
+ if (T2Class != Array)
+ Kind = CastAwayConstnessKind::CACK_Incoherent;
+ else if (Kind != CastAwayConstnessKind::CACK_Incoherent)
+ Kind = CastAwayConstnessKind::CACK_SimilarKind;
+
+ T1 = Unwrap(T1);
+ T2 = Unwrap(T2).withCVRQualifiers(T2.getCVRQualifiers());
}
- return false;
+ return Kind;
}
-/// CastsAwayConstness - Check if the pointer conversion from SrcType to
-/// DestType casts away constness as defined in C++ 5.2.11p8ff. This is used by
-/// the cast checkers. Both arguments must denote pointer (possibly to member)
-/// types.
+/// Check if the pointer conversion from SrcType to DestType casts away
+/// constness as defined in C++ [expr.const.cast]. This is used by the cast
+/// checkers. Both arguments must denote pointer (possibly to member) types.
///
/// \param CheckCVR Whether to check for const/volatile/restrict qualifiers.
-///
/// \param CheckObjCLifetime Whether to check Objective-C lifetime qualifiers.
-static bool
+static CastAwayConstnessKind
CastsAwayConstness(Sema &Self, QualType SrcType, QualType DestType,
bool CheckCVR, bool CheckObjCLifetime,
QualType *TheOffendingSrcType = nullptr,
@@ -521,33 +561,35 @@ CastsAwayConstness(Sema &Self, QualType SrcType, QualType DestType,
Qualifiers *CastAwayQualifiers = nullptr) {
// If the only checking we care about is for Objective-C lifetime qualifiers,
// and we're not in ObjC mode, there's nothing to check.
- if (!CheckCVR && CheckObjCLifetime &&
- !Self.Context.getLangOpts().ObjC1)
- return false;
-
- // Casting away constness is defined in C++ 5.2.11p8 with reference to
- // C++ 4.4. We piggyback on Sema::IsQualificationConversion for this, since
- // the rules are non-trivial. So first we construct Tcv *...cv* as described
- // in C++ 5.2.11p8.
- assert((SrcType->isAnyPointerType() || SrcType->isMemberPointerType() ||
- SrcType->isBlockPointerType() ||
- DestType->isLValueReferenceType()) &&
- "Source type is not pointer or pointer to member.");
- assert((DestType->isAnyPointerType() || DestType->isMemberPointerType() ||
- DestType->isBlockPointerType() ||
- DestType->isLValueReferenceType()) &&
- "Destination type is not pointer or pointer to member, or reference.");
+ if (!CheckCVR && CheckObjCLifetime && !Self.Context.getLangOpts().ObjC1)
+ return CastAwayConstnessKind::CACK_None;
+
+ if (!DestType->isReferenceType()) {
+ assert((SrcType->isAnyPointerType() || SrcType->isMemberPointerType() ||
+ SrcType->isBlockPointerType()) &&
+ "Source type is not pointer or pointer to member.");
+ assert((DestType->isAnyPointerType() || DestType->isMemberPointerType() ||
+ DestType->isBlockPointerType()) &&
+ "Destination type is not pointer or pointer to member.");
+ }
QualType UnwrappedSrcType = Self.Context.getCanonicalType(SrcType),
UnwrappedDestType = Self.Context.getCanonicalType(DestType);
- SmallVector<Qualifiers, 8> cv1, cv2;
// Find the qualifiers. We only care about cvr-qualifiers for the
// purpose of this check, because other qualifiers (address spaces,
// Objective-C GC, etc.) are part of the type's identity.
QualType PrevUnwrappedSrcType = UnwrappedSrcType;
QualType PrevUnwrappedDestType = UnwrappedDestType;
- while (UnwrapDissimilarPointerTypes(UnwrappedSrcType, UnwrappedDestType)) {
+ auto WorstKind = CastAwayConstnessKind::CACK_Similar;
+ bool AllConstSoFar = true;
+ while (auto Kind = unwrapCastAwayConstnessLevel(
+ Self.Context, UnwrappedSrcType, UnwrappedDestType)) {
+ // Track the worst kind of unwrap we needed to do before we found a
+ // problem.
+ if (Kind > WorstKind)
+ WorstKind = Kind;
+
// Determine the relevant qualifiers at this level.
Qualifiers SrcQuals, DestQuals;
Self.Context.getUnqualifiedArrayType(UnwrappedSrcType, SrcQuals);
@@ -560,51 +602,71 @@ CastsAwayConstness(Sema &Self, QualType SrcType, QualType DestType,
UnwrappedDestType->isObjCObjectType())
SrcQuals.removeConst();
- Qualifiers RetainedSrcQuals, RetainedDestQuals;
if (CheckCVR) {
- RetainedSrcQuals.setCVRQualifiers(SrcQuals.getCVRQualifiers());
- RetainedDestQuals.setCVRQualifiers(DestQuals.getCVRQualifiers());
+ Qualifiers SrcCvrQuals =
+ Qualifiers::fromCVRMask(SrcQuals.getCVRQualifiers());
+ Qualifiers DestCvrQuals =
+ Qualifiers::fromCVRMask(DestQuals.getCVRQualifiers());
+
+ if (SrcCvrQuals != DestCvrQuals) {
+ if (CastAwayQualifiers)
+ *CastAwayQualifiers = SrcCvrQuals - DestCvrQuals;
+
+ // If we removed a cvr-qualifier, this is casting away 'constness'.
+ if (!DestCvrQuals.compatiblyIncludes(SrcCvrQuals)) {
+ if (TheOffendingSrcType)
+ *TheOffendingSrcType = PrevUnwrappedSrcType;
+ if (TheOffendingDestType)
+ *TheOffendingDestType = PrevUnwrappedDestType;
+ return WorstKind;
+ }
- if (RetainedSrcQuals != RetainedDestQuals && TheOffendingSrcType &&
- TheOffendingDestType && CastAwayQualifiers) {
- *TheOffendingSrcType = PrevUnwrappedSrcType;
- *TheOffendingDestType = PrevUnwrappedDestType;
- *CastAwayQualifiers = RetainedSrcQuals - RetainedDestQuals;
+ // If any prior level was not 'const', this is also casting away
+ // 'constness'. We noted the outermost type missing a 'const' already.
+ if (!AllConstSoFar)
+ return WorstKind;
}
}
-
+
if (CheckObjCLifetime &&
!DestQuals.compatiblyIncludesObjCLifetime(SrcQuals))
- return true;
-
- cv1.push_back(RetainedSrcQuals);
- cv2.push_back(RetainedDestQuals);
+ return WorstKind;
+
+ // If we found our first non-const-qualified type, this may be the place
+ // where things start to go wrong.
+ if (AllConstSoFar && !DestQuals.hasConst()) {
+ AllConstSoFar = false;
+ if (TheOffendingSrcType)
+ *TheOffendingSrcType = PrevUnwrappedSrcType;
+ if (TheOffendingDestType)
+ *TheOffendingDestType = PrevUnwrappedDestType;
+ }
PrevUnwrappedSrcType = UnwrappedSrcType;
PrevUnwrappedDestType = UnwrappedDestType;
}
- if (cv1.empty())
- return false;
- // Construct void pointers with those qualifiers (in reverse order of
- // unwrapping, of course).
- QualType SrcConstruct = Self.Context.VoidTy;
- QualType DestConstruct = Self.Context.VoidTy;
- ASTContext &Context = Self.Context;
- for (SmallVectorImpl<Qualifiers>::reverse_iterator i1 = cv1.rbegin(),
- i2 = cv2.rbegin();
- i1 != cv1.rend(); ++i1, ++i2) {
- SrcConstruct
- = Context.getPointerType(Context.getQualifiedType(SrcConstruct, *i1));
- DestConstruct
- = Context.getPointerType(Context.getQualifiedType(DestConstruct, *i2));
- }
-
- // Test if they're compatible.
- bool ObjCLifetimeConversion;
- return SrcConstruct != DestConstruct &&
- !Self.IsQualificationConversion(SrcConstruct, DestConstruct, false,
- ObjCLifetimeConversion);
+ return CastAwayConstnessKind::CACK_None;
+}
+
+static TryCastResult getCastAwayConstnessCastKind(CastAwayConstnessKind CACK,
+ unsigned &DiagID) {
+ switch (CACK) {
+ case CastAwayConstnessKind::CACK_None:
+ llvm_unreachable("did not cast away constness");
+
+ case CastAwayConstnessKind::CACK_Similar:
+ // FIXME: Accept these as an extension too?
+ case CastAwayConstnessKind::CACK_SimilarKind:
+ DiagID = diag::err_bad_cxx_cast_qualifiers_away;
+ return TC_Failed;
+
+ case CastAwayConstnessKind::CACK_Incoherent:
+ DiagID = diag::ext_bad_cxx_cast_qualifiers_away_incoherent;
+ return TC_Extension;
+ }
+
+ llvm_unreachable("unexpected cast away constness kind");
}
/// CheckDynamicCast - Check that a dynamic_cast\<DestType\>(SrcExpr) is valid.
@@ -772,12 +834,13 @@ void CastOperation::CheckConstCast() {
return;
unsigned msg = diag::err_bad_cxx_cast_generic;
- if (TryConstCast(Self, SrcExpr, DestType, /*CStyle*/false, msg) != TC_Success
- && msg != 0) {
+ auto TCR = TryConstCast(Self, SrcExpr, DestType, /*CStyle*/ false, msg);
+ if (TCR != TC_Success && msg != 0) {
Self.Diag(OpRange.getBegin(), msg) << CT_Const
<< SrcExpr.get()->getType() << DestType << OpRange;
- SrcExpr = ExprError();
}
+ if (!isValidCast(TCR))
+ SrcExpr = ExprError();
}
/// Check that a reinterpret_cast\<DestType\>(SrcExpr) is not used as upcast
@@ -890,8 +953,7 @@ void CastOperation::CheckReinterpretCast() {
TryCastResult tcr =
TryReinterpretCast(Self, SrcExpr, DestType,
/*CStyle*/false, OpRange, msg, Kind);
- if (tcr != TC_Success && msg != 0)
- {
+ if (tcr != TC_Success && msg != 0) {
if (SrcExpr.isInvalid()) // if conversion failed, don't report another error
return;
if (SrcExpr.get()->getType() == Self.Context.OverloadTy) {
@@ -905,11 +967,14 @@ void CastOperation::CheckReinterpretCast() {
diagnoseBadCast(Self, msg, CT_Reinterpret, OpRange, SrcExpr.get(),
DestType, /*listInitialization=*/false);
}
- SrcExpr = ExprError();
- } else if (tcr == TC_Success) {
+ }
+
+ if (isValidCast(tcr)) {
if (Self.getLangOpts().allowsNonTrivialObjCLifetimeQualifiers())
checkObjCConversion(Sema::CCK_OtherCast);
DiagnoseReinterpretUpDownCast(Self, SrcExpr.get(), DestType, OpRange);
+ } else {
+ SrcExpr = ExprError();
}
}
@@ -967,14 +1032,15 @@ void CastOperation::CheckStaticCast() {
diagnoseBadCast(Self, msg, CT_Static, OpRange, SrcExpr.get(), DestType,
/*listInitialization=*/false);
}
- SrcExpr = ExprError();
- } else if (tcr == TC_Success) {
+ }
+
+ if (isValidCast(tcr)) {
if (Kind == CK_BitCast)
checkCastAlign();
if (Self.getLangOpts().allowsNonTrivialObjCLifetimeQualifiers())
checkObjCConversion(Sema::CCK_OtherCast);
- } else if (Kind == CK_BitCast) {
- checkCastAlign();
+ } else {
+ SrcExpr = ExprError();
}
}
@@ -1145,7 +1211,7 @@ static TryCastResult TryStaticCast(Sema &Self, ExprResult &SrcExpr,
}
}
}
- // Allow arbitray objective-c pointer conversion with static casts.
+ // Allow arbitrary objective-c pointer conversion with static casts.
if (SrcType->isObjCObjectPointerType() &&
DestType->isObjCObjectPointerType()) {
Kind = CK_BitCast;
@@ -1663,29 +1729,14 @@ static TryCastResult TryConstCast(Sema &Self, ExprResult &SrcExpr,
msg = diag::err_bad_const_cast_dest;
return TC_NotApplicable;
}
- SrcType = Self.Context.getCanonicalType(SrcType);
-
- // Unwrap the pointers. Ignore qualifiers. Terminate early if the types are
- // completely equal.
- // C++ 5.2.11p3 describes the core semantics of const_cast. All cv specifiers
- // in multi-level pointers may change, but the level count must be the same,
- // as must be the final pointee type.
- while (SrcType != DestType &&
- Self.Context.UnwrapSimilarPointerTypes(SrcType, DestType)) {
- Qualifiers SrcQuals, DestQuals;
- SrcType = Self.Context.getUnqualifiedArrayType(SrcType, SrcQuals);
- DestType = Self.Context.getUnqualifiedArrayType(DestType, DestQuals);
-
- // const_cast is permitted to strip cvr-qualifiers, only. Make sure that
- // the other qualifiers (e.g., address spaces) are identical.
- SrcQuals.removeCVRQualifiers();
- DestQuals.removeCVRQualifiers();
- if (SrcQuals != DestQuals)
- return TC_NotApplicable;
- }
- // Since we're dealing in canonical types, the remainder must be the same.
- if (SrcType != DestType)
+ // C++ [expr.const.cast]p3:
+ // "For two similar types T1 and T2, [...]"
+ //
+ // We only allow a const_cast to change cvr-qualifiers, not other kinds of
+ // type qualifiers. (Likewise, we ignore other changes when determining
+ // whether a cast casts away constness.)
+ if (!Self.Context.hasCvrSimilarType(SrcType, DestType))
return TC_NotApplicable;
if (NeedToMaterializeTemporary)
@@ -1913,6 +1964,12 @@ static bool fixOverloadedReinterpretCastExpr(Sema &Self, QualType DestType,
return Result.isUsable();
}
+static bool IsAddressSpaceConversion(QualType SrcType, QualType DestType) {
+ return SrcType->isPointerType() && DestType->isPointerType() &&
+ SrcType->getAs<PointerType>()->getPointeeType().getAddressSpace() !=
+ DestType->getAs<PointerType>()->getPointeeType().getAddressSpace();
+}
+
static TryCastResult TryReinterpretCast(Sema &Self, ExprResult &SrcExpr,
QualType DestType, bool CStyle,
SourceRange OpRange,
@@ -1994,16 +2051,6 @@ static TryCastResult TryReinterpretCast(Sema &Self, ExprResult &SrcExpr,
SrcMemPtr->isMemberFunctionPointer())
return TC_NotApplicable;
- // C++ 5.2.10p2: The reinterpret_cast operator shall not cast away
- // constness.
- // A reinterpret_cast followed by a const_cast can, though, so in C-style,
- // we accept it.
- if (CastsAwayConstness(Self, SrcType, DestType, /*CheckCVR=*/!CStyle,
- /*CheckObjCLifetime=*/CStyle)) {
- msg = diag::err_bad_cxx_cast_qualifiers_away;
- return TC_Failed;
- }
-
if (Self.Context.getTargetInfo().getCXXABI().isMicrosoft()) {
// We need to determine the inheritance model that the class will use if
// haven't yet.
@@ -2018,6 +2065,15 @@ static TryCastResult TryReinterpretCast(Sema &Self, ExprResult &SrcExpr,
return TC_Failed;
}
+ // C++ 5.2.10p2: The reinterpret_cast operator shall not cast away
+ // constness.
+ // A reinterpret_cast followed by a const_cast can, though, so in C-style,
+ // we accept it.
+ if (auto CACK =
+ CastsAwayConstness(Self, SrcType, DestType, /*CheckCVR=*/!CStyle,
+ /*CheckObjCLifetime=*/CStyle))
+ return getCastAwayConstnessCastKind(CACK, msg);
+
// A valid member pointer cast.
assert(!IsLValueCast);
Kind = CK_ReinterpretMemberPointer;
@@ -2134,19 +2190,19 @@ static TryCastResult TryReinterpretCast(Sema &Self, ExprResult &SrcExpr,
return TC_NotApplicable;
}
- // C++ 5.2.10p2: The reinterpret_cast operator shall not cast away constness.
- // The C-style cast operator can.
- if (CastsAwayConstness(Self, SrcType, DestType, /*CheckCVR=*/!CStyle,
- /*CheckObjCLifetime=*/CStyle)) {
- msg = diag::err_bad_cxx_cast_qualifiers_away;
- return TC_Failed;
- }
-
// Cannot convert between block pointers and Objective-C object pointers.
if ((SrcType->isBlockPointerType() && DestType->isObjCObjectPointerType()) ||
(DestType->isBlockPointerType() && SrcType->isObjCObjectPointerType()))
return TC_NotApplicable;
+ // C++ 5.2.10p2: The reinterpret_cast operator shall not cast away constness.
+ // The C-style cast operator can.
+ TryCastResult SuccessResult = TC_Success;
+ if (auto CACK =
+ CastsAwayConstness(Self, SrcType, DestType, /*CheckCVR=*/!CStyle,
+ /*CheckObjCLifetime=*/CStyle))
+ SuccessResult = getCastAwayConstnessCastKind(CACK, msg);
+
if (IsLValueCast) {
Kind = CK_LValueBitCast;
} else if (DestType->isObjCObjectPointerType()) {
@@ -2157,6 +2213,8 @@ static TryCastResult TryReinterpretCast(Sema &Self, ExprResult &SrcExpr,
} else {
Kind = CK_BitCast;
}
+ } else if (IsAddressSpaceConversion(SrcType, DestType)) {
+ Kind = CK_AddressSpaceConversion;
} else {
Kind = CK_BitCast;
}
@@ -2164,7 +2222,7 @@ static TryCastResult TryReinterpretCast(Sema &Self, ExprResult &SrcExpr,
// Any pointer can be cast to an Objective-C pointer type with a C-style
// cast.
if (CStyle && DestType->isObjCObjectPointerType()) {
- return TC_Success;
+ return SuccessResult;
}
if (CStyle)
DiagnoseCastOfObjCSEL(Self, SrcExpr, DestType);
@@ -2178,7 +2236,7 @@ static TryCastResult TryReinterpretCast(Sema &Self, ExprResult &SrcExpr,
if (DestType->isFunctionPointerType()) {
// C++ 5.2.10p6: A pointer to a function can be explicitly converted to
// a pointer to a function of a different type.
- return TC_Success;
+ return SuccessResult;
}
// C++0x 5.2.10p8: Converting a pointer to a function into a pointer to
@@ -2191,7 +2249,7 @@ static TryCastResult TryReinterpretCast(Sema &Self, ExprResult &SrcExpr,
Self.getLangOpts().CPlusPlus11 ?
diag::warn_cxx98_compat_cast_fn_obj : diag::ext_cast_fn_obj)
<< OpRange;
- return TC_Success;
+ return SuccessResult;
}
if (DestType->isFunctionPointerType()) {
@@ -2200,7 +2258,7 @@ static TryCastResult TryReinterpretCast(Sema &Self, ExprResult &SrcExpr,
Self.getLangOpts().CPlusPlus11 ?
diag::warn_cxx98_compat_cast_fn_obj : diag::ext_cast_fn_obj)
<< OpRange;
- return TC_Success;
+ return SuccessResult;
}
// C++ 5.2.10p7: A pointer to an object can be explicitly converted to
@@ -2208,8 +2266,8 @@ static TryCastResult TryReinterpretCast(Sema &Self, ExprResult &SrcExpr,
// Void pointers are not specified, but supported by every compiler out there.
// So we finish by allowing everything that remains - it's got to be two
// object pointers.
- return TC_Success;
-}
+ return SuccessResult;
+}
void CastOperation::CheckCXXCStyleCast(bool FunctionalStyle,
bool ListInitialization) {
@@ -2289,7 +2347,7 @@ void CastOperation::CheckCXXCStyleCast(bool FunctionalStyle,
/*CStyle*/true, msg);
if (SrcExpr.isInvalid())
return;
- if (tcr == TC_Success)
+ if (isValidCast(tcr))
Kind = CK_NoOp;
Sema::CheckedConversionKind CCK
@@ -2312,7 +2370,7 @@ void CastOperation::CheckCXXCStyleCast(bool FunctionalStyle,
}
if (Self.getLangOpts().allowsNonTrivialObjCLifetimeQualifiers() &&
- tcr == TC_Success)
+ isValidCast(tcr))
checkObjCConversion(CCK);
if (tcr != TC_Success && msg != 0) {
@@ -2336,13 +2394,14 @@ void CastOperation::CheckCXXCStyleCast(bool FunctionalStyle,
diagnoseBadCast(Self, msg, (FunctionalStyle ? CT_Functional : CT_CStyle),
OpRange, SrcExpr.get(), DestType, ListInitialization);
}
- } else if (Kind == CK_BitCast) {
- checkCastAlign();
}
- // Clear out SrcExpr if there was a fatal error.
- if (tcr != TC_Success)
+ if (isValidCast(tcr)) {
+ if (Kind == CK_BitCast)
+ checkCastAlign();
+ } else {
SrcExpr = ExprError();
+ }
}
/// DiagnoseBadFunctionCast - Warn whenever a function call is cast to a
@@ -2627,11 +2686,13 @@ static void DiagnoseCastQual(Sema &Self, const ExprResult &SrcExpr,
QualType TheOffendingSrcType, TheOffendingDestType;
Qualifiers CastAwayQualifiers;
- if (!CastsAwayConstness(Self, SrcType, DestType, true, false,
- &TheOffendingSrcType, &TheOffendingDestType,
- &CastAwayQualifiers))
+ if (CastsAwayConstness(Self, SrcType, DestType, true, false,
+ &TheOffendingSrcType, &TheOffendingDestType,
+ &CastAwayQualifiers) !=
+ CastAwayConstnessKind::CACK_Similar)
return;
+ // FIXME: 'restrict' is not properly handled here.
int qualifiers = -1;
if (CastAwayQualifiers.hasConst() && CastAwayQualifiers.hasVolatile()) {
qualifiers = 0;
diff --git a/lib/Sema/SemaChecking.cpp b/lib/Sema/SemaChecking.cpp
index 803f87b3c568..8d953b6ef341 100644
--- a/lib/Sema/SemaChecking.cpp
+++ b/lib/Sema/SemaChecking.cpp
@@ -28,6 +28,7 @@
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExprOpenMP.h"
#include "clang/AST/NSAPI.h"
+#include "clang/AST/NonTrivialTypeVisitor.h"
#include "clang/AST/OperationKinds.h"
#include "clang/AST/Stmt.h"
#include "clang/AST/TemplateBase.h"
@@ -196,35 +197,47 @@ static bool SemaBuiltinOverflow(Sema &S, CallExpr *TheCall) {
// First two arguments should be integers.
for (unsigned I = 0; I < 2; ++I) {
- Expr *Arg = TheCall->getArg(I);
- QualType Ty = Arg->getType();
+ ExprResult Arg = TheCall->getArg(I);
+ QualType Ty = Arg.get()->getType();
if (!Ty->isIntegerType()) {
- S.Diag(Arg->getLocStart(), diag::err_overflow_builtin_must_be_int)
- << Ty << Arg->getSourceRange();
+ S.Diag(Arg.get()->getLocStart(), diag::err_overflow_builtin_must_be_int)
+ << Ty << Arg.get()->getSourceRange();
return true;
}
+ InitializedEntity Entity = InitializedEntity::InitializeParameter(
+ S.getASTContext(), Ty, /*consume*/ false);
+ Arg = S.PerformCopyInitialization(Entity, SourceLocation(), Arg);
+ if (Arg.isInvalid())
+ return true;
+ TheCall->setArg(I, Arg.get());
}
// Third argument should be a pointer to a non-const integer.
// IRGen correctly handles volatile, restrict, and address spaces, and
// the other qualifiers aren't possible.
{
- Expr *Arg = TheCall->getArg(2);
- QualType Ty = Arg->getType();
+ ExprResult Arg = TheCall->getArg(2);
+ QualType Ty = Arg.get()->getType();
const auto *PtrTy = Ty->getAs<PointerType>();
if (!(PtrTy && PtrTy->getPointeeType()->isIntegerType() &&
!PtrTy->getPointeeType().isConstQualified())) {
- S.Diag(Arg->getLocStart(), diag::err_overflow_builtin_must_be_ptr_int)
- << Ty << Arg->getSourceRange();
+ S.Diag(Arg.get()->getLocStart(),
+ diag::err_overflow_builtin_must_be_ptr_int)
+ << Ty << Arg.get()->getSourceRange();
return true;
}
+ InitializedEntity Entity = InitializedEntity::InitializeParameter(
+ S.getASTContext(), Ty, /*consume*/ false);
+ Arg = S.PerformCopyInitialization(Entity, SourceLocation(), Arg);
+ if (Arg.isInvalid())
+ return true;
+ TheCall->setArg(2, Arg.get());
}
-
return false;
}
static void SemaBuiltinMemChkCall(Sema &S, FunctionDecl *FDecl,
- CallExpr *TheCall, unsigned SizeIdx,
+ CallExpr *TheCall, unsigned SizeIdx,
unsigned DstSizeIdx) {
if (TheCall->getNumArgs() <= SizeIdx ||
TheCall->getNumArgs() <= DstSizeIdx)
@@ -683,7 +696,7 @@ static bool checkOpenCLPipePacketType(Sema &S, CallExpr *Call, unsigned Idx) {
return false;
}
-// \brief Performs semantic analysis for the read/write_pipe call.
+// Performs semantic analysis for the read/write_pipe call.
// \param S Reference to the semantic analyzer.
// \param Call A pointer to the builtin call.
// \return True if a semantic error has been found, false otherwise.
@@ -737,7 +750,7 @@ static bool SemaBuiltinRWPipe(Sema &S, CallExpr *Call) {
return false;
}
-// \brief Performs a semantic analysis on the {work_group_/sub_group_
+// Performs a semantic analysis on the {work_group_/sub_group_
// /_}reserve_{read/write}_pipe
// \param S Reference to the semantic analyzer.
// \param Call The call to the builtin function to be analyzed.
@@ -766,7 +779,7 @@ static bool SemaBuiltinReserveRWPipe(Sema &S, CallExpr *Call) {
return false;
}
-// \brief Performs a semantic analysis on {work_group_/sub_group_
+// Performs a semantic analysis on {work_group_/sub_group_
// /_}commit_{read/write}_pipe
// \param S Reference to the semantic analyzer.
// \param Call The call to the builtin function to be analyzed.
@@ -789,7 +802,7 @@ static bool SemaBuiltinCommitRWPipe(Sema &S, CallExpr *Call) {
return false;
}
-// \brief Performs a semantic analysis on the call to built-in Pipe
+// Performs a semantic analysis on the call to built-in Pipe
// Query Functions.
// \param S Reference to the semantic analyzer.
// \param Call The call to the builtin function to be analyzed.
@@ -807,8 +820,8 @@ static bool SemaBuiltinPipePackets(Sema &S, CallExpr *Call) {
return false;
}
-// \brief OpenCL v2.0 s6.13.9 - Address space qualifier functions.
-// \brief Performs semantic analysis for the to_global/local/private call.
+// OpenCL v2.0 s6.13.9 - Address space qualifier functions.
+// Performs semantic analysis for the to_global/local/private call.
// \param S Reference to the semantic analyzer.
// \param BuiltinID ID of the builtin function.
// \param Call A pointer to the builtin call.
@@ -850,6 +863,20 @@ static bool SemaOpenCLBuiltinToAddr(Sema &S, unsigned BuiltinID,
return false;
}
+// Emit an error and return true if the current architecture is not in the list
+// of supported architectures.
+static bool
+CheckBuiltinTargetSupport(Sema &S, unsigned BuiltinID, CallExpr *TheCall,
+ ArrayRef<llvm::Triple::ArchType> SupportedArchs) {
+ llvm::Triple::ArchType CurArch =
+ S.getASTContext().getTargetInfo().getTriple().getArch();
+ if (llvm::is_contained(SupportedArchs, CurArch))
+ return false;
+ S.Diag(TheCall->getLocStart(), diag::err_builtin_target_unsupported)
+ << TheCall->getSourceRange();
+ return true;
+}
+
ExprResult
Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
CallExpr *TheCall) {
@@ -900,6 +927,33 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
}
break;
}
+
+ // The acquire, release, and no fence variants are ARM and AArch64 only.
+ case Builtin::BI_interlockedbittestandset_acq:
+ case Builtin::BI_interlockedbittestandset_rel:
+ case Builtin::BI_interlockedbittestandset_nf:
+ case Builtin::BI_interlockedbittestandreset_acq:
+ case Builtin::BI_interlockedbittestandreset_rel:
+ case Builtin::BI_interlockedbittestandreset_nf:
+ if (CheckBuiltinTargetSupport(
+ *this, BuiltinID, TheCall,
+ {llvm::Triple::arm, llvm::Triple::thumb, llvm::Triple::aarch64}))
+ return ExprError();
+ break;
+
+ // The 64-bit bittest variants are x64, ARM, and AArch64 only.
+ case Builtin::BI_bittest64:
+ case Builtin::BI_bittestandcomplement64:
+ case Builtin::BI_bittestandreset64:
+ case Builtin::BI_bittestandset64:
+ case Builtin::BI_interlockedbittestandreset64:
+ case Builtin::BI_interlockedbittestandset64:
+ if (CheckBuiltinTargetSupport(*this, BuiltinID, TheCall,
+ {llvm::Triple::x86_64, llvm::Triple::arm,
+ llvm::Triple::thumb, llvm::Triple::aarch64}))
+ return ExprError();
+ break;
+
case Builtin::BI__builtin_isgreater:
case Builtin::BI__builtin_isgreaterequal:
case Builtin::BI__builtin_isless:
@@ -918,6 +972,9 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
case Builtin::BI__builtin_isinf_sign:
case Builtin::BI__builtin_isnan:
case Builtin::BI__builtin_isnormal:
+ case Builtin::BI__builtin_signbit:
+ case Builtin::BI__builtin_signbitf:
+ case Builtin::BI__builtin_signbitl:
if (SemaBuiltinFPClassification(TheCall, 1))
return ExprError();
break;
@@ -1097,19 +1154,70 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
return ExprError();
break;
case Builtin::BI__builtin_operator_new:
- case Builtin::BI__builtin_operator_delete:
- if (!getLangOpts().CPlusPlus) {
- Diag(TheCall->getExprLoc(), diag::err_builtin_requires_language)
- << (BuiltinID == Builtin::BI__builtin_operator_new
- ? "__builtin_operator_new"
- : "__builtin_operator_delete")
- << "C++";
+ case Builtin::BI__builtin_operator_delete: {
+ bool IsDelete = BuiltinID == Builtin::BI__builtin_operator_delete;
+ ExprResult Res =
+ SemaBuiltinOperatorNewDeleteOverloaded(TheCallResult, IsDelete);
+ if (Res.isInvalid())
+ CorrectDelayedTyposInExpr(TheCallResult.get());
+ return Res;
+ }
+ case Builtin::BI__builtin_dump_struct: {
+ // We first want to ensure we are called with 2 arguments
+ if (checkArgCount(*this, TheCall, 2))
+ return ExprError();
+ // Ensure that the first argument is of type 'struct XX *'
+ const Expr *PtrArg = TheCall->getArg(0)->IgnoreParenImpCasts();
+ const QualType PtrArgType = PtrArg->getType();
+ if (!PtrArgType->isPointerType() ||
+ !PtrArgType->getPointeeType()->isRecordType()) {
+ Diag(PtrArg->getLocStart(), diag::err_typecheck_convert_incompatible)
+ << PtrArgType << "structure pointer" << 1 << 0 << 3 << 1 << PtrArgType
+ << "structure pointer";
+ return ExprError();
+ }
+
+ // Ensure that the second argument is of type 'FunctionType'
+ const Expr *FnPtrArg = TheCall->getArg(1)->IgnoreImpCasts();
+ const QualType FnPtrArgType = FnPtrArg->getType();
+ if (!FnPtrArgType->isPointerType()) {
+ Diag(FnPtrArg->getLocStart(), diag::err_typecheck_convert_incompatible)
+ << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3
+ << 2 << FnPtrArgType << "'int (*)(const char *, ...)'";
return ExprError();
}
- // CodeGen assumes it can find the global new and delete to call,
- // so ensure that they are declared.
- DeclareGlobalNewDelete();
+
+ const auto *FuncType =
+ FnPtrArgType->getPointeeType()->getAs<FunctionType>();
+
+ if (!FuncType) {
+ Diag(FnPtrArg->getLocStart(), diag::err_typecheck_convert_incompatible)
+ << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3
+ << 2 << FnPtrArgType << "'int (*)(const char *, ...)'";
+ return ExprError();
+ }
+
+ if (const auto *FT = dyn_cast<FunctionProtoType>(FuncType)) {
+ if (!FT->getNumParams()) {
+ Diag(FnPtrArg->getLocStart(), diag::err_typecheck_convert_incompatible)
+ << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3
+ << 2 << FnPtrArgType << "'int (*)(const char *, ...)'";
+ return ExprError();
+ }
+ QualType PT = FT->getParamType(0);
+ if (!FT->isVariadic() || FT->getReturnType() != Context.IntTy ||
+ !PT->isPointerType() || !PT->getPointeeType()->isCharType() ||
+ !PT->getPointeeType().isConstQualified()) {
+ Diag(FnPtrArg->getLocStart(), diag::err_typecheck_convert_incompatible)
+ << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3
+ << 2 << FnPtrArgType << "'int (*)(const char *, ...)'";
+ return ExprError();
+ }
+ }
+
+ TheCall->setType(Context.IntTy);
break;
+ }
// check secure string manipulation functions where overflows
// are detectable at compile time
@@ -1215,7 +1323,6 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
if (SemaOpenCLBuiltinKernelWorkGroupSize(*this, TheCall))
return ExprError();
break;
- break;
case Builtin::BIget_kernel_max_sub_group_size_for_ndrange:
case Builtin::BIget_kernel_sub_group_count_for_ndrange:
if (SemaOpenCLBuiltinNDRangeAndBlock(*this, TheCall))
@@ -1244,6 +1351,10 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
if (CheckAArch64BuiltinFunctionCall(BuiltinID, TheCall))
return ExprError();
break;
+ case llvm::Triple::hexagon:
+ if (CheckHexagonBuiltinFunctionCall(BuiltinID, TheCall))
+ return ExprError();
+ break;
case llvm::Triple::mips:
case llvm::Triple::mipsel:
case llvm::Triple::mips64:
@@ -1353,6 +1464,7 @@ bool Sema::CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
switch (BuiltinID) {
#define GET_NEON_OVERLOAD_CHECK
#include "clang/Basic/arm_neon.inc"
+#include "clang/Basic/arm_fp16.inc"
#undef GET_NEON_OVERLOAD_CHECK
}
@@ -1402,9 +1514,10 @@ bool Sema::CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
switch (BuiltinID) {
default:
return false;
-#define GET_NEON_IMMEDIATE_CHECK
-#include "clang/Basic/arm_neon.inc"
-#undef GET_NEON_IMMEDIATE_CHECK
+ #define GET_NEON_IMMEDIATE_CHECK
+ #include "clang/Basic/arm_neon.inc"
+ #include "clang/Basic/arm_fp16.inc"
+ #undef GET_NEON_IMMEDIATE_CHECK
}
return SemaBuiltinConstantArgRange(TheCall, i, l, u + l);
@@ -1618,6 +1731,1015 @@ bool Sema::CheckAArch64BuiltinFunctionCall(unsigned BuiltinID,
return SemaBuiltinConstantArgRange(TheCall, i, l, u + l);
}
+bool Sema::CheckHexagonBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall) {
+ static const std::map<unsigned, std::vector<StringRef>> ValidCPU = {
+ { Hexagon::BI__builtin_HEXAGON_A6_vcmpbeq_notany, {"v65"} },
+ { Hexagon::BI__builtin_HEXAGON_A6_vminub_RdP, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_M6_vabsdiffb, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_M6_vabsdiffub, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_acc, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_and, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_nac, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_or, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_xacc, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_acc, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_and, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_nac, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_or, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_xacc, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_S6_vsplatrbp, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_S6_vtrunehb_ppp, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_S6_vtrunohb_ppp, {"v62", "v65"} },
+ };
+
+ static const std::map<unsigned, std::vector<StringRef>> ValidHVX = {
+ { Hexagon::BI__builtin_HEXAGON_V6_extractw, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_extractw_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_hi, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_hi_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_lo, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_lo_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_lvsplatb, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_lvsplatb_128B, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_lvsplath, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_lvsplath_128B, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_lvsplatw, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_lvsplatw_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_pred_and, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_pred_and_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_pred_and_n, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_pred_and_n_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_pred_not, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_pred_not_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_pred_or, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_pred_or_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_pred_or_n, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_pred_or_n_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_pred_scalar2, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_pred_scalar2_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_pred_scalar2v2, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_pred_scalar2v2_128B, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_pred_xor, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_pred_xor_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_shuffeqh, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_shuffeqh_128B, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_shuffeqw, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_shuffeqw_128B, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vabsb, {"v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vabsb_128B, {"v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vabsb_sat, {"v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vabsb_sat_128B, {"v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffh, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffh_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffub, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffub_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffuh, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffuh_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffw, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffw_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vabsh, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vabsh_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vabsh_sat, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vabsh_sat_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vabsw, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vabsw_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vabsw_sat, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vabsw_sat_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddb, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddb_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddb_dv, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddb_dv_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddbsat, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddbsat_128B, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddbsat_dv, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddbsat_dv_128B, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddcarry, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddcarry_128B, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddclbh, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddclbh_128B, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddclbw, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddclbw_128B, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddh, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddh_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddh_dv, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddh_dv_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddhsat, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddhsat_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddhsat_dv, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddhsat_dv_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddhw, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddhw_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddhw_acc, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddhw_acc_128B, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddubh, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddubh_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddubh_acc, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddubh_acc_128B, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddubsat, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddubsat_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddubsat_dv, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddubsat_dv_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddububb_sat, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddububb_sat_128B, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vadduhsat, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vadduhsat_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vadduhsat_dv, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vadduhsat_dv_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vadduhw, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vadduhw_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vadduhw_acc, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vadduhw_acc_128B, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vadduwsat, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vadduwsat_128B, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vadduwsat_dv, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vadduwsat_dv_128B, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddw, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddw_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddw_dv, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddw_dv_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddwsat, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddwsat_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddwsat_dv, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddwsat_dv_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_valignb, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_valignb_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_valignbi, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_valignbi_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vand, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vand_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vandnqrt, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vandnqrt_128B, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vandnqrt_acc, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vandnqrt_acc_128B, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vandqrt, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vandqrt_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vandqrt_acc, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vandqrt_acc_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vandvnqv, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vandvnqv_128B, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vandvqv, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vandvqv_128B, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vandvrt, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vandvrt_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vandvrt_acc, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vandvrt_acc_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaslh, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaslh_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaslh_acc, {"v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaslh_acc_128B, {"v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaslhv, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaslhv_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaslw, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaslw_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaslw_acc, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaslw_acc_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaslwv, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaslwv_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrh, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrh_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrh_acc, {"v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrh_acc_128B, {"v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrhbrndsat, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrhbrndsat_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrhbsat, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrhbsat_128B, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrhubrndsat, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrhubrndsat_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrhubsat, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrhubsat_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrhv, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrhv_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasruhubrndsat, {"v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasruhubrndsat_128B, {"v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasruhubsat, {"v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasruhubsat_128B, {"v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasruwuhrndsat, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasruwuhrndsat_128B, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasruwuhsat, {"v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasruwuhsat_128B, {"v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrw, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrw_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrw_acc, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrw_acc_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrwh, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrwh_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrwhrndsat, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrwhrndsat_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrwhsat, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrwhsat_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrwuhrndsat, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrwuhrndsat_128B, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrwuhsat, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrwuhsat_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrwv, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrwv_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vassign, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vassign_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vassignp, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vassignp_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vavgb, {"v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vavgb_128B, {"v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vavgbrnd, {"v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vavgbrnd_128B, {"v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vavgh, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vavgh_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vavghrnd, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vavghrnd_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vavgub, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vavgub_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vavgubrnd, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vavgubrnd_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vavguh, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vavguh_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vavguhrnd, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vavguhrnd_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vavguw, {"v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vavguw_128B, {"v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vavguwrnd, {"v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vavguwrnd_128B, {"v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vavgw, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vavgw_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vavgwrnd, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vavgwrnd_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vcl0h, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vcl0h_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vcl0w, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vcl0w_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vcombine, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vcombine_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vd0, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vd0_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdd0, {"v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdd0_128B, {"v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdealb, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdealb_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdealb4w, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdealb4w_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdealh, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdealh_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdealvdd, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdealvdd_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdelta, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdelta_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_acc, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_acc_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_dv, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_dv_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_dv_acc, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_dv_acc_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_acc, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_acc_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_dv, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_dv_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_dv_acc, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_dv_acc_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhisat, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhisat_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhisat_acc, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhisat_acc_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsat, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsat_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsat_acc, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsat_acc_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsuisat, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsuisat_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsuisat_acc, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsuisat_acc_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsusat, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsusat_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsusat_acc, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsusat_acc_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhvsat, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhvsat_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhvsat_acc, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhvsat_acc_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdsaduh, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdsaduh_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdsaduh_acc, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdsaduh_acc_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_veqb, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_veqb_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_veqb_and, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_veqb_and_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_veqb_or, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_veqb_or_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_veqb_xor, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_veqb_xor_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_veqh, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_veqh_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_veqh_and, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_veqh_and_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_veqh_or, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_veqh_or_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_veqh_xor, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_veqh_xor_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_veqw, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_veqw_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_veqw_and, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_veqw_and_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_veqw_or, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_veqw_or_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_veqw_xor, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_veqw_xor_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtb, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtb_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtb_and, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtb_and_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtb_or, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtb_or_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtb_xor, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtb_xor_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgth, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgth_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgth_and, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgth_and_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgth_or, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgth_or_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgth_xor, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgth_xor_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtub, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtub_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtub_and, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtub_and_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtub_or, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtub_or_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtub_xor, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtub_xor_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtuh, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_and, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_and_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_or, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_or_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_xor, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_xor_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtuw, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_and, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_and_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_or, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_or_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_xor, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_xor_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtw, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtw_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtw_and, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtw_and_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtw_or, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtw_or_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtw_xor, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtw_xor_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vinsertwr, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vinsertwr_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlalignb, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlalignb_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlsrb, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlsrb_128B, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlsrh, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlsrh_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlsrhv, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlsrhv_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlsrw, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlsrw_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlsrwv, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlsrwv_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlut4, {"v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlut4_128B, {"v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvvbi, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvvbi_128B, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_nm, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_nm_128B, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracc, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracc_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracci, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracci_128B, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvwhi, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvwhi_128B, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_nm, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_nm_128B, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracc, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracc_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracci, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracci_128B, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmaxb, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmaxb_128B, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmaxh, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmaxh_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmaxub, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmaxub_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmaxuh, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmaxuh_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmaxw, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmaxw_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vminb, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vminb_128B, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vminh, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vminh_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vminub, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vminub_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vminuh, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vminuh_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vminw, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vminw_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpabus, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpabus_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpabus_acc, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpabus_acc_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpabusv, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpabusv_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpabuu, {"v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpabuu_128B, {"v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpabuu_acc, {"v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpabuu_acc_128B, {"v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpabuuv, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpabuuv_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpahb, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpahb_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpahb_acc, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpahb_acc_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpahhsat, {"v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpahhsat_128B, {"v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpauhb, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpauhb_128B, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpauhb_acc, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpauhb_acc_128B, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpauhuhsat, {"v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpauhuhsat_128B, {"v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpsuhuhsat, {"v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpsuhuhsat_128B, {"v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpybus, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpybus_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpybus_acc, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpybus_acc_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpybusv, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpybusv_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpybusv_acc, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpybusv_acc_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpybv, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpybv_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpybv_acc, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpybv_acc_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyewuh, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyewuh_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyewuh_64, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyewuh_64_128B, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyh, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyh_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyh_acc, {"v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyh_acc_128B, {"v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyhsat_acc, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyhsat_acc_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyhsrs, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyhsrs_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyhss, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyhss_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyhus, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyhus_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyhus_acc, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyhus_acc_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyhv, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyhv_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyhv_acc, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyhv_acc_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyhvsrs, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyhvsrs_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyieoh, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyieoh_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewh_acc, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewh_acc_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewuh, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewuh_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewuh_acc, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewuh_acc_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyih, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyih_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyih_acc, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyih_acc_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyihb, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyihb_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyihb_acc, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyihb_acc_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyiowh, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyiowh_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwb, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwb_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwb_acc, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwb_acc_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwh, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwh_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwh_acc, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwh_acc_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwub, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwub_128B, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwub_acc, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwub_acc_128B, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_64_acc, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_64_acc_128B, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_rnd, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_rnd_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_rnd_sacc, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_rnd_sacc_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_sacc, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_sacc_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyub, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyub_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyub_acc, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyub_acc_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyubv, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyubv_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyubv_acc, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyubv_acc_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyuh, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyuh_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyuh_acc, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyuh_acc_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhe, {"v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhe_128B, {"v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhe_acc, {"v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhe_acc_128B, {"v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhv, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhv_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhv_acc, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhv_acc_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmux, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmux_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vnavgb, {"v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vnavgb_128B, {"v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vnavgh, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vnavgh_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vnavgub, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vnavgub_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vnavgw, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vnavgw_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vnormamth, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vnormamth_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vnormamtw, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vnormamtw_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vnot, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vnot_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vor, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vor_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vpackeb, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vpackeb_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vpackeh, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vpackeh_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vpackhb_sat, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vpackhb_sat_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vpackhub_sat, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vpackhub_sat_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vpackob, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vpackob_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vpackoh, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vpackoh_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vpackwh_sat, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vpackwh_sat_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vpackwuh_sat, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vpackwuh_sat_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vpopcounth, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vpopcounth_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vprefixqb, {"v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vprefixqb_128B, {"v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vprefixqh, {"v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vprefixqh_128B, {"v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vprefixqw, {"v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vprefixqw_128B, {"v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrdelta, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrdelta_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpybub_rtt, {"v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpybub_rtt_128B, {"v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpybub_rtt_acc, {"v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpybub_rtt_acc_128B, {"v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpybus, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpybus_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpybus_acc, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpybus_acc_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusv, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusv_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusv_acc, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusv_acc_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpybv, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpybv_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpybv_acc, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpybv_acc_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_acc, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_acc_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_rtt, {"v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_rtt_128B, {"v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_rtt_acc, {"v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_rtt_acc_128B, {"v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubv, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubv_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubv_acc, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubv_acc_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vror, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vror_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vroundhb, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vroundhb_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vroundhub, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vroundhub_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrounduhub, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrounduhub_128B, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrounduwuh, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrounduwuh_128B, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vroundwh, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vroundwh_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vroundwuh, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vroundwuh_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsathub, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsathub_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsatuwuh, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsatuwuh_128B, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsatwh, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsatwh_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsb, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsb_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsh, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsh_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vshufeh, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vshufeh_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vshuffb, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vshuffb_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vshuffeb, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vshuffeb_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vshuffh, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vshuffh_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vshuffob, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vshuffob_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vshuffvdd, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vshuffvdd_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vshufoeb, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vshufoeb_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vshufoeh, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vshufoeh_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vshufoh, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vshufoh_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubb, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubb_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubb_dv, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubb_dv_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubbsat, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubbsat_128B, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubbsat_dv, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubbsat_dv_128B, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubcarry, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubcarry_128B, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubh, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubh_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubh_dv, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubh_dv_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubhsat, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubhsat_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubhsat_dv, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubhsat_dv_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubhw, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubhw_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsububh, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsububh_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsububsat, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsububsat_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsububsat_dv, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsububsat_dv_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubububb_sat, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubububb_sat_128B, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubuhsat, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubuhsat_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubuhsat_dv, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubuhsat_dv_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubuhw, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubuhw_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubuwsat, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubuwsat_128B, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubuwsat_dv, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubuwsat_dv_128B, {"v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubw, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubw_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubw_dv, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubw_dv_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubwsat, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubwsat_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubwsat_dv, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubwsat_dv_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vswap, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vswap_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vtmpyb, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vtmpyb_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vtmpyb_acc, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vtmpyb_acc_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vtmpybus, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vtmpybus_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vtmpybus_acc, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vtmpybus_acc_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vtmpyhb, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vtmpyhb_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vtmpyhb_acc, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vtmpyhb_acc_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vunpackb, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vunpackb_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vunpackh, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vunpackh_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vunpackob, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vunpackob_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vunpackoh, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vunpackoh_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vunpackub, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vunpackub_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vunpackuh, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vunpackuh_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vxor, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vxor_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vzb, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vzb_128B, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vzh, {"v60", "v62", "v65"} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vzh_128B, {"v60", "v62", "v65"} },
+ };
+
+ const TargetInfo &TI = Context.getTargetInfo();
+
+ auto FC = ValidCPU.find(BuiltinID);
+ if (FC != ValidCPU.end()) {
+ const TargetOptions &Opts = TI.getTargetOpts();
+ StringRef CPU = Opts.CPU;
+ if (!CPU.empty()) {
+ assert(CPU.startswith("hexagon") && "Unexpected CPU name");
+ CPU.consume_front("hexagon");
+ if (llvm::none_of(FC->second, [CPU](StringRef S) { return S == CPU; }))
+ return Diag(TheCall->getLocStart(),
+ diag::err_hexagon_builtin_unsupported_cpu);
+ }
+ }
+
+ auto FH = ValidHVX.find(BuiltinID);
+ if (FH != ValidHVX.end()) {
+ if (!TI.hasFeature("hvx"))
+ return Diag(TheCall->getLocStart(),
+ diag::err_hexagon_builtin_requires_hvx);
+
+ bool IsValid = llvm::any_of(FH->second,
+ [&TI] (StringRef V) {
+ std::string F = "hvx" + V.str();
+ return TI.hasFeature(F);
+ });
+ if (!IsValid)
+ return Diag(TheCall->getLocStart(),
+ diag::err_hexagon_builtin_unsupported_hvx);
+ }
+
+ return false;
+}
+
+bool Sema::CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) {
+ struct ArgInfo {
+ ArgInfo(unsigned O, bool S, unsigned W, unsigned A)
+ : OpNum(O), IsSigned(S), BitWidth(W), Align(A) {}
+ unsigned OpNum = 0;
+ bool IsSigned = false;
+ unsigned BitWidth = 0;
+ unsigned Align = 0;
+ };
+
+ static const std::map<unsigned, std::vector<ArgInfo>> Infos = {
+ { Hexagon::BI__builtin_circ_ldd, {{ 3, true, 4, 3 }} },
+ { Hexagon::BI__builtin_circ_ldw, {{ 3, true, 4, 2 }} },
+ { Hexagon::BI__builtin_circ_ldh, {{ 3, true, 4, 1 }} },
+ { Hexagon::BI__builtin_circ_lduh, {{ 3, true, 4, 0 }} },
+ { Hexagon::BI__builtin_circ_ldb, {{ 3, true, 4, 0 }} },
+ { Hexagon::BI__builtin_circ_ldub, {{ 3, true, 4, 0 }} },
+ { Hexagon::BI__builtin_circ_std, {{ 3, true, 4, 3 }} },
+ { Hexagon::BI__builtin_circ_stw, {{ 3, true, 4, 2 }} },
+ { Hexagon::BI__builtin_circ_sth, {{ 3, true, 4, 1 }} },
+ { Hexagon::BI__builtin_circ_sthhi, {{ 3, true, 4, 1 }} },
+ { Hexagon::BI__builtin_circ_stb, {{ 3, true, 4, 0 }} },
+
+ { Hexagon::BI__builtin_HEXAGON_L2_loadrub_pci, {{ 1, true, 4, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_L2_loadrb_pci, {{ 1, true, 4, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_L2_loadruh_pci, {{ 1, true, 4, 1 }} },
+ { Hexagon::BI__builtin_HEXAGON_L2_loadrh_pci, {{ 1, true, 4, 1 }} },
+ { Hexagon::BI__builtin_HEXAGON_L2_loadri_pci, {{ 1, true, 4, 2 }} },
+ { Hexagon::BI__builtin_HEXAGON_L2_loadrd_pci, {{ 1, true, 4, 3 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_storerb_pci, {{ 1, true, 4, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_storerh_pci, {{ 1, true, 4, 1 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_storerf_pci, {{ 1, true, 4, 1 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_storeri_pci, {{ 1, true, 4, 2 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_storerd_pci, {{ 1, true, 4, 3 }} },
+
+ { Hexagon::BI__builtin_HEXAGON_A2_combineii, {{ 1, true, 8, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_A2_tfrih, {{ 1, false, 16, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_A2_tfril, {{ 1, false, 16, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_A2_tfrpi, {{ 0, true, 8, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_A4_bitspliti, {{ 1, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_A4_cmpbeqi, {{ 1, false, 8, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_A4_cmpbgti, {{ 1, true, 8, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_A4_cround_ri, {{ 1, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_A4_round_ri, {{ 1, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_A4_round_ri_sat, {{ 1, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_A4_vcmpbeqi, {{ 1, false, 8, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgti, {{ 1, true, 8, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgtui, {{ 1, false, 7, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_A4_vcmpheqi, {{ 1, true, 8, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_A4_vcmphgti, {{ 1, true, 8, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_A4_vcmphgtui, {{ 1, false, 7, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_A4_vcmpweqi, {{ 1, true, 8, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgti, {{ 1, true, 8, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgtui, {{ 1, false, 7, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_C2_bitsclri, {{ 1, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_C2_muxii, {{ 2, true, 8, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_C4_nbitsclri, {{ 1, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_F2_dfclass, {{ 1, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_F2_dfimm_n, {{ 0, false, 10, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_F2_dfimm_p, {{ 0, false, 10, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_F2_sfclass, {{ 1, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_F2_sfimm_n, {{ 0, false, 10, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_F2_sfimm_p, {{ 0, false, 10, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addi, {{ 2, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addr_u2, {{ 1, false, 6, 2 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_addasl_rrri, {{ 2, false, 3, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_acc, {{ 2, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_and, {{ 2, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p, {{ 1, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_nac, {{ 2, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_or, {{ 2, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_xacc, {{ 2, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_acc, {{ 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_and, {{ 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r, {{ 1, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_nac, {{ 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_or, {{ 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_sat, {{ 1, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_xacc, {{ 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vh, {{ 1, false, 4, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vw, {{ 1, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_acc, {{ 2, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_and, {{ 2, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p, {{ 1, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_nac, {{ 2, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_or, {{ 2, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd_goodsyntax,
+ {{ 1, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd, {{ 1, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_acc, {{ 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_and, {{ 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r, {{ 1, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_nac, {{ 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_or, {{ 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd_goodsyntax,
+ {{ 1, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd, {{ 1, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asr_i_svw_trun, {{ 1, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vh, {{ 1, false, 4, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vw, {{ 1, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_clrbit_i, {{ 1, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_extractu, {{ 1, false, 5, 0 },
+ { 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_extractup, {{ 1, false, 6, 0 },
+ { 2, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_insert, {{ 2, false, 5, 0 },
+ { 3, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_insertp, {{ 2, false, 6, 0 },
+ { 3, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_acc, {{ 2, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_and, {{ 2, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p, {{ 1, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_nac, {{ 2, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_or, {{ 2, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_xacc, {{ 2, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_acc, {{ 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_and, {{ 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r, {{ 1, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_nac, {{ 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_or, {{ 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_xacc, {{ 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vh, {{ 1, false, 4, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vw, {{ 1, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_setbit_i, {{ 1, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_tableidxb_goodsyntax,
+ {{ 2, false, 4, 0 },
+ { 3, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_tableidxd_goodsyntax,
+ {{ 2, false, 4, 0 },
+ { 3, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_tableidxh_goodsyntax,
+ {{ 2, false, 4, 0 },
+ { 3, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_tableidxw_goodsyntax,
+ {{ 2, false, 4, 0 },
+ { 3, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_togglebit_i, {{ 1, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_tstbit_i, {{ 1, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_valignib, {{ 2, false, 3, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S2_vspliceib, {{ 2, false, 3, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S4_addi_asl_ri, {{ 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S4_addi_lsr_ri, {{ 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S4_andi_asl_ri, {{ 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S4_andi_lsr_ri, {{ 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S4_clbaddi, {{ 1, true , 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S4_clbpaddi, {{ 1, true, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S4_extract, {{ 1, false, 5, 0 },
+ { 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S4_extractp, {{ 1, false, 6, 0 },
+ { 2, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S4_lsli, {{ 0, true, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S4_ntstbit_i, {{ 1, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S4_ori_asl_ri, {{ 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S4_ori_lsr_ri, {{ 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S4_subi_asl_ri, {{ 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S4_subi_lsr_ri, {{ 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate_acc, {{ 3, false, 2, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate, {{ 2, false, 2, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S5_asrhub_rnd_sat_goodsyntax,
+ {{ 1, false, 4, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S5_asrhub_sat, {{ 1, false, 4, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S5_vasrhrnd_goodsyntax,
+ {{ 1, false, 4, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p, {{ 1, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_acc, {{ 2, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_and, {{ 2, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_nac, {{ 2, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_or, {{ 2, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_xacc, {{ 2, false, 6, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r, {{ 1, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_acc, {{ 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_and, {{ 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_nac, {{ 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_or, {{ 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_xacc, {{ 2, false, 5, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_valignbi, {{ 2, false, 3, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_valignbi_128B, {{ 2, false, 3, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi, {{ 2, false, 3, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi_128B, {{ 2, false, 3, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi, {{ 2, false, 1, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_128B, {{ 2, false, 1, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc, {{ 3, false, 1, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc_128B,
+ {{ 3, false, 1, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi, {{ 2, false, 1, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_128B, {{ 2, false, 1, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc, {{ 3, false, 1, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc_128B,
+ {{ 3, false, 1, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi, {{ 2, false, 1, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_128B, {{ 2, false, 1, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc, {{ 3, false, 1, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc_128B,
+ {{ 3, false, 1, 0 }} },
+ };
+
+ auto F = Infos.find(BuiltinID);
+ if (F == Infos.end())
+ return false;
+
+ bool Error = false;
+
+ for (const ArgInfo &A : F->second) {
+ int32_t Min = A.IsSigned ? -(1 << (A.BitWidth-1)) : 0;
+ int32_t Max = (1 << (A.IsSigned ? A.BitWidth-1 : A.BitWidth)) - 1;
+ if (!A.Align) {
+ Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max);
+ } else {
+ unsigned M = 1 << A.Align;
+ Min *= M;
+ Max *= M;
+ Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max) |
+ SemaBuiltinConstantArgMultiple(TheCall, A.OpNum, M);
+ }
+ }
+ return Error;
+}
+
+bool Sema::CheckHexagonBuiltinFunctionCall(unsigned BuiltinID,
+ CallExpr *TheCall) {
+ return CheckHexagonBuiltinCpu(BuiltinID, TheCall) ||
+ CheckHexagonBuiltinArgument(BuiltinID, TheCall);
+}
+
+
// CheckMipsBuiltinFunctionCall - Checks the constant value passed to the
// intrinsic is correct. The switch statement is ordered by DSP, MSA. The
// ordering for DSP is unspecified. MSA is ordered by the data format used
@@ -1666,7 +2788,7 @@ bool Sema::CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
case Mips::BI__builtin_msa_srlri_h: i = 1; l = 0; u = 15; break;
case Mips::BI__builtin_msa_binsli_h:
case Mips::BI__builtin_msa_binsri_h: i = 2; l = 0; u = 15; break;
- // These intrinsics take an unsigned 5 bit immedate.
+ // These intrinsics take an unsigned 5 bit immediate.
// The first block of intrinsics actually have an unsigned 5 bit field,
// not a df/n field.
case Mips::BI__builtin_msa_clei_u_b:
@@ -1966,6 +3088,12 @@ bool Sema::CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall) {
case X86::BI__builtin_ia32_vcvttss2usi64:
ArgNum = 1;
break;
+ case X86::BI__builtin_ia32_maxpd512:
+ case X86::BI__builtin_ia32_maxps512:
+ case X86::BI__builtin_ia32_minpd512:
+ case X86::BI__builtin_ia32_minps512:
+ ArgNum = 2;
+ break;
case X86::BI__builtin_ia32_cvtps2pd512_mask:
case X86::BI__builtin_ia32_cvttpd2dq512_mask:
case X86::BI__builtin_ia32_cvttpd2qq512_mask:
@@ -1995,12 +3123,8 @@ bool Sema::CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall) {
case X86::BI__builtin_ia32_cvtss2sd_round_mask:
case X86::BI__builtin_ia32_getexpsd128_round_mask:
case X86::BI__builtin_ia32_getexpss128_round_mask:
- case X86::BI__builtin_ia32_maxpd512_mask:
- case X86::BI__builtin_ia32_maxps512_mask:
case X86::BI__builtin_ia32_maxsd_round_mask:
case X86::BI__builtin_ia32_maxss_round_mask:
- case X86::BI__builtin_ia32_minpd512_mask:
- case X86::BI__builtin_ia32_minps512_mask:
case X86::BI__builtin_ia32_minsd_round_mask:
case X86::BI__builtin_ia32_minss_round_mask:
case X86::BI__builtin_ia32_rcp28sd_round_mask:
@@ -2039,9 +3163,19 @@ bool Sema::CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall) {
case X86::BI__builtin_ia32_vcvtss2si64:
case X86::BI__builtin_ia32_vcvtss2usi32:
case X86::BI__builtin_ia32_vcvtss2usi64:
+ case X86::BI__builtin_ia32_sqrtpd512:
+ case X86::BI__builtin_ia32_sqrtps512:
ArgNum = 1;
HasRC = true;
break;
+ case X86::BI__builtin_ia32_addpd512:
+ case X86::BI__builtin_ia32_addps512:
+ case X86::BI__builtin_ia32_divpd512:
+ case X86::BI__builtin_ia32_divps512:
+ case X86::BI__builtin_ia32_mulpd512:
+ case X86::BI__builtin_ia32_mulps512:
+ case X86::BI__builtin_ia32_subpd512:
+ case X86::BI__builtin_ia32_subps512:
case X86::BI__builtin_ia32_cvtsi2sd64:
case X86::BI__builtin_ia32_cvtsi2ss32:
case X86::BI__builtin_ia32_cvtsi2ss64:
@@ -2062,19 +3196,9 @@ bool Sema::CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall) {
case X86::BI__builtin_ia32_cvtqq2ps512_mask:
case X86::BI__builtin_ia32_cvtuqq2pd512_mask:
case X86::BI__builtin_ia32_cvtuqq2ps512_mask:
- case X86::BI__builtin_ia32_sqrtpd512_mask:
- case X86::BI__builtin_ia32_sqrtps512_mask:
ArgNum = 3;
HasRC = true;
break;
- case X86::BI__builtin_ia32_addpd512_mask:
- case X86::BI__builtin_ia32_addps512_mask:
- case X86::BI__builtin_ia32_divpd512_mask:
- case X86::BI__builtin_ia32_divps512_mask:
- case X86::BI__builtin_ia32_mulpd512_mask:
- case X86::BI__builtin_ia32_mulps512_mask:
- case X86::BI__builtin_ia32_subpd512_mask:
- case X86::BI__builtin_ia32_subps512_mask:
case X86::BI__builtin_ia32_addss_round_mask:
case X86::BI__builtin_ia32_addsd_round_mask:
case X86::BI__builtin_ia32_divss_round_mask:
@@ -2092,34 +3216,28 @@ bool Sema::CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall) {
case X86::BI__builtin_ia32_cvtsd2ss_round_mask:
case X86::BI__builtin_ia32_sqrtsd_round_mask:
case X86::BI__builtin_ia32_sqrtss_round_mask:
+ case X86::BI__builtin_ia32_vfmaddsd3_mask:
+ case X86::BI__builtin_ia32_vfmaddsd3_maskz:
+ case X86::BI__builtin_ia32_vfmaddsd3_mask3:
+ case X86::BI__builtin_ia32_vfmaddss3_mask:
+ case X86::BI__builtin_ia32_vfmaddss3_maskz:
+ case X86::BI__builtin_ia32_vfmaddss3_mask3:
case X86::BI__builtin_ia32_vfmaddpd512_mask:
- case X86::BI__builtin_ia32_vfmaddpd512_mask3:
case X86::BI__builtin_ia32_vfmaddpd512_maskz:
+ case X86::BI__builtin_ia32_vfmaddpd512_mask3:
+ case X86::BI__builtin_ia32_vfmsubpd512_mask3:
case X86::BI__builtin_ia32_vfmaddps512_mask:
- case X86::BI__builtin_ia32_vfmaddps512_mask3:
case X86::BI__builtin_ia32_vfmaddps512_maskz:
+ case X86::BI__builtin_ia32_vfmaddps512_mask3:
+ case X86::BI__builtin_ia32_vfmsubps512_mask3:
case X86::BI__builtin_ia32_vfmaddsubpd512_mask:
- case X86::BI__builtin_ia32_vfmaddsubpd512_mask3:
case X86::BI__builtin_ia32_vfmaddsubpd512_maskz:
+ case X86::BI__builtin_ia32_vfmaddsubpd512_mask3:
+ case X86::BI__builtin_ia32_vfmsubaddpd512_mask3:
case X86::BI__builtin_ia32_vfmaddsubps512_mask:
- case X86::BI__builtin_ia32_vfmaddsubps512_mask3:
case X86::BI__builtin_ia32_vfmaddsubps512_maskz:
- case X86::BI__builtin_ia32_vfmsubpd512_mask3:
- case X86::BI__builtin_ia32_vfmsubps512_mask3:
- case X86::BI__builtin_ia32_vfmsubaddpd512_mask3:
+ case X86::BI__builtin_ia32_vfmaddsubps512_mask3:
case X86::BI__builtin_ia32_vfmsubaddps512_mask3:
- case X86::BI__builtin_ia32_vfnmaddpd512_mask:
- case X86::BI__builtin_ia32_vfnmaddps512_mask:
- case X86::BI__builtin_ia32_vfnmsubpd512_mask:
- case X86::BI__builtin_ia32_vfnmsubpd512_mask3:
- case X86::BI__builtin_ia32_vfnmsubps512_mask:
- case X86::BI__builtin_ia32_vfnmsubps512_mask3:
- case X86::BI__builtin_ia32_vfmaddsd3_mask:
- case X86::BI__builtin_ia32_vfmaddsd3_maskz:
- case X86::BI__builtin_ia32_vfmaddsd3_mask3:
- case X86::BI__builtin_ia32_vfmaddss3_mask:
- case X86::BI__builtin_ia32_vfmaddss3_maskz:
- case X86::BI__builtin_ia32_vfmaddss3_mask3:
ArgNum = 4;
HasRC = true;
break;
@@ -2256,6 +3374,17 @@ bool Sema::CheckX86BuiltinGatherScatterScale(unsigned BuiltinID,
<< Arg->getSourceRange();
}
+static bool isX86_32Builtin(unsigned BuiltinID) {
+ // These builtins only work on x86-32 targets.
+ switch (BuiltinID) {
+ case X86::BI__builtin_ia32_readeflags_u32:
+ case X86::BI__builtin_ia32_writeeflags_u32:
+ return true;
+ }
+
+ return false;
+}
+
bool Sema::CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
if (BuiltinID == X86::BI__builtin_cpu_supports)
return SemaBuiltinCpuSupports(*this, TheCall);
@@ -2263,6 +3392,12 @@ bool Sema::CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
if (BuiltinID == X86::BI__builtin_cpu_is)
return SemaBuiltinCpuIs(*this, TheCall);
+ // Check for 32-bit only builtins on a 64-bit target.
+ const llvm::Triple &TT = Context.getTargetInfo().getTriple();
+ if (TT.getArch() != llvm::Triple::x86 && isX86_32Builtin(BuiltinID))
+ return Diag(TheCall->getCallee()->getLocStart(),
+ diag::err_32_bit_builtin_64_bit_tgt);
+
// If the intrinsic has rounding or SAE make sure its valid.
if (CheckX86BuiltinRoundingOrSAE(BuiltinID, TheCall))
return true;
@@ -2277,14 +3412,67 @@ bool Sema::CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
switch (BuiltinID) {
default:
return false;
+ case X86::BI__builtin_ia32_vec_ext_v2si:
+ case X86::BI__builtin_ia32_vec_ext_v2di:
+ case X86::BI__builtin_ia32_vextractf128_pd256:
+ case X86::BI__builtin_ia32_vextractf128_ps256:
+ case X86::BI__builtin_ia32_vextractf128_si256:
+ case X86::BI__builtin_ia32_extract128i256:
+ case X86::BI__builtin_ia32_extractf64x4_mask:
+ case X86::BI__builtin_ia32_extracti64x4_mask:
+ case X86::BI__builtin_ia32_extractf32x8_mask:
+ case X86::BI__builtin_ia32_extracti32x8_mask:
+ case X86::BI__builtin_ia32_extractf64x2_256_mask:
+ case X86::BI__builtin_ia32_extracti64x2_256_mask:
+ case X86::BI__builtin_ia32_extractf32x4_256_mask:
+ case X86::BI__builtin_ia32_extracti32x4_256_mask:
+ i = 1; l = 0; u = 1;
+ break;
+ case X86::BI__builtin_ia32_vec_set_v2di:
+ case X86::BI__builtin_ia32_vinsertf128_pd256:
+ case X86::BI__builtin_ia32_vinsertf128_ps256:
+ case X86::BI__builtin_ia32_vinsertf128_si256:
+ case X86::BI__builtin_ia32_insert128i256:
+ case X86::BI__builtin_ia32_insertf32x8:
+ case X86::BI__builtin_ia32_inserti32x8:
+ case X86::BI__builtin_ia32_insertf64x4:
+ case X86::BI__builtin_ia32_inserti64x4:
+ case X86::BI__builtin_ia32_insertf64x2_256:
+ case X86::BI__builtin_ia32_inserti64x2_256:
+ case X86::BI__builtin_ia32_insertf32x4_256:
+ case X86::BI__builtin_ia32_inserti32x4_256:
+ i = 2; l = 0; u = 1;
+ break;
+ case X86::BI__builtin_ia32_vpermilpd:
+ case X86::BI__builtin_ia32_vec_ext_v4hi:
+ case X86::BI__builtin_ia32_vec_ext_v4si:
+ case X86::BI__builtin_ia32_vec_ext_v4sf:
+ case X86::BI__builtin_ia32_vec_ext_v4di:
+ case X86::BI__builtin_ia32_extractf32x4_mask:
+ case X86::BI__builtin_ia32_extracti32x4_mask:
+ case X86::BI__builtin_ia32_extractf64x2_512_mask:
+ case X86::BI__builtin_ia32_extracti64x2_512_mask:
+ i = 1; l = 0; u = 3;
+ break;
case X86::BI_mm_prefetch:
+ case X86::BI__builtin_ia32_vec_ext_v8hi:
+ case X86::BI__builtin_ia32_vec_ext_v8si:
i = 1; l = 0; u = 7;
break;
case X86::BI__builtin_ia32_sha1rnds4:
- case X86::BI__builtin_ia32_shuf_f32x4_256_mask:
- case X86::BI__builtin_ia32_shuf_f64x2_256_mask:
- case X86::BI__builtin_ia32_shuf_i32x4_256_mask:
- case X86::BI__builtin_ia32_shuf_i64x2_256_mask:
+ case X86::BI__builtin_ia32_blendpd:
+ case X86::BI__builtin_ia32_shufpd:
+ case X86::BI__builtin_ia32_vec_set_v4hi:
+ case X86::BI__builtin_ia32_vec_set_v4si:
+ case X86::BI__builtin_ia32_vec_set_v4di:
+ case X86::BI__builtin_ia32_shuf_f32x4_256:
+ case X86::BI__builtin_ia32_shuf_f64x2_256:
+ case X86::BI__builtin_ia32_shuf_i32x4_256:
+ case X86::BI__builtin_ia32_shuf_i64x2_256:
+ case X86::BI__builtin_ia32_insertf64x2_512:
+ case X86::BI__builtin_ia32_inserti64x2_512:
+ case X86::BI__builtin_ia32_insertf32x4:
+ case X86::BI__builtin_ia32_inserti32x4:
i = 2; l = 0; u = 3;
break;
case X86::BI__builtin_ia32_vpermil2pd:
@@ -2325,14 +3513,29 @@ bool Sema::CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
case X86::BI__builtin_ia32_vpcomw:
case X86::BI__builtin_ia32_vpcomd:
case X86::BI__builtin_ia32_vpcomq:
+ case X86::BI__builtin_ia32_vec_set_v8hi:
+ case X86::BI__builtin_ia32_vec_set_v8si:
i = 2; l = 0; u = 7;
break;
+ case X86::BI__builtin_ia32_vpermilpd256:
case X86::BI__builtin_ia32_roundps:
case X86::BI__builtin_ia32_roundpd:
case X86::BI__builtin_ia32_roundps256:
case X86::BI__builtin_ia32_roundpd256:
+ case X86::BI__builtin_ia32_getmantpd128_mask:
+ case X86::BI__builtin_ia32_getmantpd256_mask:
+ case X86::BI__builtin_ia32_getmantps128_mask:
+ case X86::BI__builtin_ia32_getmantps256_mask:
+ case X86::BI__builtin_ia32_getmantpd512_mask:
+ case X86::BI__builtin_ia32_getmantps512_mask:
+ case X86::BI__builtin_ia32_vec_ext_v16qi:
+ case X86::BI__builtin_ia32_vec_ext_v16hi:
i = 1; l = 0; u = 15;
break;
+ case X86::BI__builtin_ia32_pblendd128:
+ case X86::BI__builtin_ia32_blendps:
+ case X86::BI__builtin_ia32_blendpd256:
+ case X86::BI__builtin_ia32_shufpd256:
case X86::BI__builtin_ia32_roundss:
case X86::BI__builtin_ia32_roundsd:
case X86::BI__builtin_ia32_rangepd128_mask:
@@ -2343,8 +3546,13 @@ bool Sema::CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
case X86::BI__builtin_ia32_rangeps512_mask:
case X86::BI__builtin_ia32_getmantsd_round_mask:
case X86::BI__builtin_ia32_getmantss_round_mask:
+ case X86::BI__builtin_ia32_vec_set_v16qi:
+ case X86::BI__builtin_ia32_vec_set_v16hi:
i = 2; l = 0; u = 15;
break;
+ case X86::BI__builtin_ia32_vec_ext_v32qi:
+ i = 1; l = 0; u = 31;
+ break;
case X86::BI__builtin_ia32_cmpps:
case X86::BI__builtin_ia32_cmpss:
case X86::BI__builtin_ia32_cmppd:
@@ -2359,15 +3567,26 @@ bool Sema::CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
case X86::BI__builtin_ia32_cmppd512_mask:
case X86::BI__builtin_ia32_cmpsd_mask:
case X86::BI__builtin_ia32_cmpss_mask:
+ case X86::BI__builtin_ia32_vec_set_v32qi:
i = 2; l = 0; u = 31;
break;
- case X86::BI__builtin_ia32_xabort:
- i = 0; l = -128; u = 255;
- break;
- case X86::BI__builtin_ia32_pshufw:
- case X86::BI__builtin_ia32_aeskeygenassist128:
- i = 1; l = -128; u = 255;
- break;
+ case X86::BI__builtin_ia32_permdf256:
+ case X86::BI__builtin_ia32_permdi256:
+ case X86::BI__builtin_ia32_permdf512:
+ case X86::BI__builtin_ia32_permdi512:
+ case X86::BI__builtin_ia32_vpermilps:
+ case X86::BI__builtin_ia32_vpermilps256:
+ case X86::BI__builtin_ia32_vpermilpd512:
+ case X86::BI__builtin_ia32_vpermilps512:
+ case X86::BI__builtin_ia32_pshufd:
+ case X86::BI__builtin_ia32_pshufd256:
+ case X86::BI__builtin_ia32_pshufd512:
+ case X86::BI__builtin_ia32_pshufhw:
+ case X86::BI__builtin_ia32_pshufhw256:
+ case X86::BI__builtin_ia32_pshufhw512:
+ case X86::BI__builtin_ia32_pshuflw:
+ case X86::BI__builtin_ia32_pshuflw256:
+ case X86::BI__builtin_ia32_pshuflw512:
case X86::BI__builtin_ia32_vcvtps2ph:
case X86::BI__builtin_ia32_vcvtps2ph_mask:
case X86::BI__builtin_ia32_vcvtps2ph256:
@@ -2385,16 +3604,18 @@ bool Sema::CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
case X86::BI__builtin_ia32_reduceps128_mask:
case X86::BI__builtin_ia32_reduceps256_mask:
case X86::BI__builtin_ia32_reduceps512_mask:
- case X86::BI__builtin_ia32_prold512_mask:
- case X86::BI__builtin_ia32_prolq512_mask:
- case X86::BI__builtin_ia32_prold128_mask:
- case X86::BI__builtin_ia32_prold256_mask:
- case X86::BI__builtin_ia32_prolq128_mask:
- case X86::BI__builtin_ia32_prolq256_mask:
- case X86::BI__builtin_ia32_prord128_mask:
- case X86::BI__builtin_ia32_prord256_mask:
- case X86::BI__builtin_ia32_prorq128_mask:
- case X86::BI__builtin_ia32_prorq256_mask:
+ case X86::BI__builtin_ia32_prold512:
+ case X86::BI__builtin_ia32_prolq512:
+ case X86::BI__builtin_ia32_prold128:
+ case X86::BI__builtin_ia32_prold256:
+ case X86::BI__builtin_ia32_prolq128:
+ case X86::BI__builtin_ia32_prolq256:
+ case X86::BI__builtin_ia32_prord512:
+ case X86::BI__builtin_ia32_prorq512:
+ case X86::BI__builtin_ia32_prord128:
+ case X86::BI__builtin_ia32_prord256:
+ case X86::BI__builtin_ia32_prorq128:
+ case X86::BI__builtin_ia32_prorq256:
case X86::BI__builtin_ia32_fpclasspd128_mask:
case X86::BI__builtin_ia32_fpclasspd256_mask:
case X86::BI__builtin_ia32_fpclassps128_mask:
@@ -2403,41 +3624,62 @@ bool Sema::CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
case X86::BI__builtin_ia32_fpclasspd512_mask:
case X86::BI__builtin_ia32_fpclasssd_mask:
case X86::BI__builtin_ia32_fpclassss_mask:
+ case X86::BI__builtin_ia32_pslldqi128_byteshift:
+ case X86::BI__builtin_ia32_pslldqi256_byteshift:
+ case X86::BI__builtin_ia32_pslldqi512_byteshift:
+ case X86::BI__builtin_ia32_psrldqi128_byteshift:
+ case X86::BI__builtin_ia32_psrldqi256_byteshift:
+ case X86::BI__builtin_ia32_psrldqi512_byteshift:
i = 1; l = 0; u = 255;
break;
- case X86::BI__builtin_ia32_palignr:
- case X86::BI__builtin_ia32_insertps128:
- case X86::BI__builtin_ia32_dpps:
- case X86::BI__builtin_ia32_dppd:
- case X86::BI__builtin_ia32_dpps256:
- case X86::BI__builtin_ia32_mpsadbw128:
- case X86::BI__builtin_ia32_mpsadbw256:
- case X86::BI__builtin_ia32_pcmpistrm128:
- case X86::BI__builtin_ia32_pcmpistri128:
- case X86::BI__builtin_ia32_pcmpistria128:
- case X86::BI__builtin_ia32_pcmpistric128:
- case X86::BI__builtin_ia32_pcmpistrio128:
- case X86::BI__builtin_ia32_pcmpistris128:
- case X86::BI__builtin_ia32_pcmpistriz128:
- case X86::BI__builtin_ia32_pclmulqdq128:
case X86::BI__builtin_ia32_vperm2f128_pd256:
case X86::BI__builtin_ia32_vperm2f128_ps256:
case X86::BI__builtin_ia32_vperm2f128_si256:
case X86::BI__builtin_ia32_permti256:
- i = 2; l = -128; u = 255;
- break;
+ case X86::BI__builtin_ia32_pblendw128:
+ case X86::BI__builtin_ia32_pblendw256:
+ case X86::BI__builtin_ia32_blendps256:
+ case X86::BI__builtin_ia32_pblendd256:
case X86::BI__builtin_ia32_palignr128:
case X86::BI__builtin_ia32_palignr256:
- case X86::BI__builtin_ia32_palignr512_mask:
+ case X86::BI__builtin_ia32_palignr512:
+ case X86::BI__builtin_ia32_alignq512:
+ case X86::BI__builtin_ia32_alignd512:
+ case X86::BI__builtin_ia32_alignd128:
+ case X86::BI__builtin_ia32_alignd256:
+ case X86::BI__builtin_ia32_alignq128:
+ case X86::BI__builtin_ia32_alignq256:
case X86::BI__builtin_ia32_vcomisd:
case X86::BI__builtin_ia32_vcomiss:
- case X86::BI__builtin_ia32_shuf_f32x4_mask:
- case X86::BI__builtin_ia32_shuf_f64x2_mask:
- case X86::BI__builtin_ia32_shuf_i32x4_mask:
- case X86::BI__builtin_ia32_shuf_i64x2_mask:
- case X86::BI__builtin_ia32_dbpsadbw128_mask:
- case X86::BI__builtin_ia32_dbpsadbw256_mask:
- case X86::BI__builtin_ia32_dbpsadbw512_mask:
+ case X86::BI__builtin_ia32_shuf_f32x4:
+ case X86::BI__builtin_ia32_shuf_f64x2:
+ case X86::BI__builtin_ia32_shuf_i32x4:
+ case X86::BI__builtin_ia32_shuf_i64x2:
+ case X86::BI__builtin_ia32_shufpd512:
+ case X86::BI__builtin_ia32_shufps:
+ case X86::BI__builtin_ia32_shufps256:
+ case X86::BI__builtin_ia32_shufps512:
+ case X86::BI__builtin_ia32_dbpsadbw128:
+ case X86::BI__builtin_ia32_dbpsadbw256:
+ case X86::BI__builtin_ia32_dbpsadbw512:
+ case X86::BI__builtin_ia32_vpshldd128:
+ case X86::BI__builtin_ia32_vpshldd256:
+ case X86::BI__builtin_ia32_vpshldd512:
+ case X86::BI__builtin_ia32_vpshldq128:
+ case X86::BI__builtin_ia32_vpshldq256:
+ case X86::BI__builtin_ia32_vpshldq512:
+ case X86::BI__builtin_ia32_vpshldw128:
+ case X86::BI__builtin_ia32_vpshldw256:
+ case X86::BI__builtin_ia32_vpshldw512:
+ case X86::BI__builtin_ia32_vpshrdd128:
+ case X86::BI__builtin_ia32_vpshrdd256:
+ case X86::BI__builtin_ia32_vpshrdd512:
+ case X86::BI__builtin_ia32_vpshrdq128:
+ case X86::BI__builtin_ia32_vpshrdq256:
+ case X86::BI__builtin_ia32_vpshrdq512:
+ case X86::BI__builtin_ia32_vpshrdw128:
+ case X86::BI__builtin_ia32_vpshrdw256:
+ case X86::BI__builtin_ia32_vpshrdw512:
i = 2; l = 0; u = 255;
break;
case X86::BI__builtin_ia32_fixupimmpd512_mask:
@@ -2480,21 +3722,17 @@ bool Sema::CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
case X86::BI__builtin_ia32_scatterpfqps:
i = 4; l = 2; u = 3;
break;
- case X86::BI__builtin_ia32_pcmpestrm128:
- case X86::BI__builtin_ia32_pcmpestri128:
- case X86::BI__builtin_ia32_pcmpestria128:
- case X86::BI__builtin_ia32_pcmpestric128:
- case X86::BI__builtin_ia32_pcmpestrio128:
- case X86::BI__builtin_ia32_pcmpestris128:
- case X86::BI__builtin_ia32_pcmpestriz128:
- i = 4; l = -128; u = 255;
- break;
case X86::BI__builtin_ia32_rndscalesd_round_mask:
case X86::BI__builtin_ia32_rndscaless_round_mask:
i = 4; l = 0; u = 255;
break;
}
- return SemaBuiltinConstantArgRange(TheCall, i, l, u);
+
+ // Note that we don't force a hard error on the range check here, allowing
+ // template-generated or macro-generated dead code to potentially have out-of-
+ // range values. These need to code generate, but don't need to necessarily
+ // make any sense. We use a warning that defaults to an error.
+ return SemaBuiltinConstantArgRange(TheCall, i, l, u, /*RangeIsError*/ false);
}
/// Given a FunctionDecl's FormatAttr, attempts to populate the FomatStringInfo
@@ -2522,7 +3760,7 @@ bool Sema::getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
/// Checks if a the given expression evaluates to null.
///
-/// \brief Returns true if the value evaluates to null.
+/// Returns true if the value evaluates to null.
static bool CheckNonNullExpr(Sema &S, const Expr *Expr) {
// If the expression has non-null type, it doesn't evaluate to null.
if (auto nullability
@@ -2566,7 +3804,7 @@ bool Sema::GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx) {
return false;
}
-/// \brief Diagnose use of %s directive in an NSString which is being passed
+/// Diagnose use of %s directive in an NSString which is being passed
/// as formatting string to formatting method.
static void
DiagnoseCStringFormatDirectiveInCFAPI(Sema &S,
@@ -2636,12 +3874,13 @@ static void CheckNonNullArguments(Sema &S,
return;
}
- for (unsigned Val : NonNull->args()) {
- if (Val >= Args.size())
+ for (const ParamIdx &Idx : NonNull->args()) {
+ unsigned IdxAST = Idx.getASTIndex();
+ if (IdxAST >= Args.size())
continue;
if (NonNullArgs.empty())
NonNullArgs.resize(Args.size());
- NonNullArgs.set(Val);
+ NonNullArgs.set(IdxAST);
}
}
}
@@ -2985,6 +4224,7 @@ ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult,
Op == AtomicExpr::AO__atomic_exchange_n ||
Op == AtomicExpr::AO__atomic_compare_exchange_n;
bool IsAddSub = false;
+ bool IsMinMax = false;
switch (Op) {
case AtomicExpr::AO__c11_atomic_init:
@@ -3038,6 +4278,12 @@ ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult,
Form = Arithmetic;
break;
+ case AtomicExpr::AO__atomic_fetch_min:
+ case AtomicExpr::AO__atomic_fetch_max:
+ IsMinMax = true;
+ Form = Arithmetic;
+ break;
+
case AtomicExpr::AO__c11_atomic_exchange:
case AtomicExpr::AO__opencl_atomic_exchange:
case AtomicExpr::AO__atomic_exchange_n:
@@ -3120,12 +4366,21 @@ ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult,
// For an arithmetic operation, the implied arithmetic must be well-formed.
if (Form == Arithmetic) {
// gcc does not enforce these rules for GNU atomics, but we do so for sanity.
- if (IsAddSub && !ValType->isIntegerType() && !ValType->isPointerType()) {
+ if (IsAddSub && !ValType->isIntegerType()
+ && !ValType->isPointerType()) {
Diag(DRE->getLocStart(), diag::err_atomic_op_needs_atomic_int_or_ptr)
<< IsC11 << Ptr->getType() << Ptr->getSourceRange();
return ExprError();
}
- if (!IsAddSub && !ValType->isIntegerType()) {
+ if (IsMinMax) {
+ const BuiltinType *BT = ValType->getAs<BuiltinType>();
+ if (!BT || (BT->getKind() != BuiltinType::Int &&
+ BT->getKind() != BuiltinType::UInt)) {
+ Diag(DRE->getLocStart(), diag::err_atomic_op_needs_int32_or_ptr);
+ return ExprError();
+ }
+ }
+ if (!IsAddSub && !IsMinMax && !ValType->isIntegerType()) {
Diag(DRE->getLocStart(), diag::err_atomic_op_bitwise_needs_atomic_int)
<< IsC11 << Ptr->getType() << Ptr->getSourceRange();
return ExprError();
@@ -3168,9 +4423,10 @@ ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult,
return ExprError();
}
- // atomic_fetch_or takes a pointer to a volatile 'A'. We shouldn't let the
- // volatile-ness of the pointee-type inject itself into the result or the
- // other operands. Similarly atomic_load can take a pointer to a const 'A'.
+ // All atomic operations have an overload which takes a pointer to a volatile
+ // 'A'. We shouldn't let the volatile-ness of the pointee-type inject itself
+ // into the result or the other operands. Similarly atomic_load takes a
+ // pointer to a const 'A'.
ValType.removeLocalVolatile();
ValType.removeLocalConst();
QualType ResultType = ValType;
@@ -3183,16 +4439,27 @@ ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult,
// The type of a parameter passed 'by value'. In the GNU atomics, such
// arguments are actually passed as pointers.
QualType ByValType = ValType; // 'CP'
- if (!IsC11 && !IsN)
+ bool IsPassedByAddress = false;
+ if (!IsC11 && !IsN) {
ByValType = Ptr->getType();
+ IsPassedByAddress = true;
+ }
- // The first argument --- the pointer --- has a fixed type; we
- // deduce the types of the rest of the arguments accordingly. Walk
- // the remaining arguments, converting them to the deduced value type.
- for (unsigned i = 1; i != TheCall->getNumArgs(); ++i) {
+ // The first argument's non-CV pointer type is used to deduce the type of
+ // subsequent arguments, except for:
+ // - weak flag (always converted to bool)
+ // - memory order (always converted to int)
+ // - scope (always converted to int)
+ for (unsigned i = 0; i != TheCall->getNumArgs(); ++i) {
QualType Ty;
if (i < NumVals[Form] + 1) {
switch (i) {
+ case 0:
+ // The first argument is always a pointer. It has a fixed type.
+ // It is always dereferenced, a nullptr is undefined.
+ CheckNonNullArgument(*this, TheCall->getArg(i), DRE->getLocStart());
+ // Nothing else to do: we already know all we want about this pointer.
+ continue;
case 1:
// The second argument is the non-atomic operand. For arithmetic, this
// is always passed by value, and for a compare_exchange it is always
@@ -3201,14 +4468,16 @@ ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult,
assert(Form != Load);
if (Form == Init || (Form == Arithmetic && ValType->isIntegerType()))
Ty = ValType;
- else if (Form == Copy || Form == Xchg)
+ else if (Form == Copy || Form == Xchg) {
+ if (IsPassedByAddress)
+ // The value pointer is always dereferenced, a nullptr is undefined.
+ CheckNonNullArgument(*this, TheCall->getArg(i), DRE->getLocStart());
Ty = ByValType;
- else if (Form == Arithmetic)
+ } else if (Form == Arithmetic)
Ty = Context.getPointerDiffType();
else {
Expr *ValArg = TheCall->getArg(i);
- // Treat this argument as _Nonnull as we want to show a warning if
- // NULL is passed into it.
+ // The value pointer is always dereferenced, a nullptr is undefined.
CheckNonNullArgument(*this, ValArg, DRE->getLocStart());
LangAS AS = LangAS::Default;
// Keep address space of non-atomic pointer type.
@@ -3221,8 +4490,10 @@ ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult,
}
break;
case 2:
- // The third argument to compare_exchange / GNU exchange is a
- // (pointer to a) desired value.
+ // The third argument to compare_exchange / GNU exchange is the desired
+ // value, either by-value (for the C11 and *_n variant) or as a pointer.
+ if (IsPassedByAddress)
+ CheckNonNullArgument(*this, TheCall->getArg(i), DRE->getLocStart());
Ty = ByValType;
break;
case 3:
@@ -3393,6 +4664,12 @@ Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) {
return ExprError();
}
+ if (ValType.isConstQualified()) {
+ Diag(DRE->getLocStart(), diag::err_atomic_builtin_cannot_be_const)
+ << FirstArg->getType() << FirstArg->getSourceRange();
+ return ExprError();
+ }
+
switch (ValType.getObjCLifetime()) {
case Qualifiers::OCL_None:
case Qualifiers::OCL_ExplicitNone:
@@ -3598,7 +4875,7 @@ Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) {
ResultType = Context.BoolTy;
break;
- case Builtin::BI__sync_lock_test_and_set:
+ case Builtin::BI__sync_lock_test_and_set:
case Builtin::BI__sync_lock_test_and_set_1:
case Builtin::BI__sync_lock_test_and_set_2:
case Builtin::BI__sync_lock_test_and_set_4:
@@ -4115,15 +5392,19 @@ bool Sema::SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs) {
diag::err_typecheck_call_invalid_unary_fp)
<< OrigArg->getType() << OrigArg->getSourceRange();
- // If this is an implicit conversion from float -> float or double, remove it.
+ // If this is an implicit conversion from float -> float, double, or
+ // long double, remove it.
if (ImplicitCastExpr *Cast = dyn_cast<ImplicitCastExpr>(OrigArg)) {
// Only remove standard FloatCasts, leaving other casts inplace
if (Cast->getCastKind() == CK_FloatingCast) {
Expr *CastArg = Cast->getSubExpr();
if (CastArg->getType()->isSpecificBuiltinType(BuiltinType::Float)) {
- assert((Cast->getType()->isSpecificBuiltinType(BuiltinType::Double) ||
- Cast->getType()->isSpecificBuiltinType(BuiltinType::Float)) &&
- "promotion from float to either float or double is the only expected cast here");
+ assert(
+ (Cast->getType()->isSpecificBuiltinType(BuiltinType::Double) ||
+ Cast->getType()->isSpecificBuiltinType(BuiltinType::Float) ||
+ Cast->getType()->isSpecificBuiltinType(BuiltinType::LongDouble)) &&
+ "promotion from float to either float, double, or long double is "
+ "the only expected cast here");
Cast->setSubExpr(nullptr);
TheCall->setArg(NumArgs-1, CastArg);
}
@@ -4519,7 +5800,7 @@ bool Sema::SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
/// SemaBuiltinConstantArgRange - Handle a check if argument ArgNum of CallExpr
/// TheCall is a constant expression in the range [Low, High].
bool Sema::SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum,
- int Low, int High) {
+ int Low, int High, bool RangeIsError) {
llvm::APSInt Result;
// We can't check the value of a dependent argument.
@@ -4531,9 +5812,18 @@ bool Sema::SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum,
if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
return true;
- if (Result.getSExtValue() < Low || Result.getSExtValue() > High)
- return Diag(TheCall->getLocStart(), diag::err_argument_invalid_range)
- << Low << High << Arg->getSourceRange();
+ if (Result.getSExtValue() < Low || Result.getSExtValue() > High) {
+ if (RangeIsError)
+ return Diag(TheCall->getLocStart(), diag::err_argument_invalid_range)
+ << Result.toString(10) << Low << High << Arg->getSourceRange();
+ else
+ // Defer the warning until we know if the code will be emitted so that
+ // dead code can ignore this.
+ DiagRuntimeBehavior(TheCall->getLocStart(), TheCall,
+ PDiag(diag::warn_argument_invalid_range)
+ << Result.toString(10) << Low << High
+ << Arg->getSourceRange());
+ }
return false;
}
@@ -5018,18 +6308,22 @@ checkFormatStringExpr(Sema &S, const Expr *E, ArrayRef<const Expr *> Args,
case Stmt::CXXMemberCallExprClass: {
const CallExpr *CE = cast<CallExpr>(E);
if (const NamedDecl *ND = dyn_cast_or_null<NamedDecl>(CE->getCalleeDecl())) {
- if (const FormatArgAttr *FA = ND->getAttr<FormatArgAttr>()) {
- unsigned ArgIndex = FA->getFormatIdx();
- if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(ND))
- if (MD->isInstance())
- --ArgIndex;
- const Expr *Arg = CE->getArg(ArgIndex - 1);
-
- return checkFormatStringExpr(S, Arg, Args,
- HasVAListArg, format_idx, firstDataArg,
- Type, CallType, InFunctionCall,
- CheckedVarArgs, UncoveredArg, Offset);
- } else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) {
+ bool IsFirst = true;
+ StringLiteralCheckType CommonResult;
+ for (const auto *FA : ND->specific_attrs<FormatArgAttr>()) {
+ const Expr *Arg = CE->getArg(FA->getFormatIdx().getASTIndex());
+ StringLiteralCheckType Result = checkFormatStringExpr(
+ S, Arg, Args, HasVAListArg, format_idx, firstDataArg, Type,
+ CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset);
+ if (IsFirst) {
+ CommonResult = Result;
+ IsFirst = false;
+ }
+ }
+ if (!IsFirst)
+ return CommonResult;
+
+ if (const auto *FD = dyn_cast<FunctionDecl>(ND)) {
unsigned BuiltinID = FD->getBuiltinID();
if (BuiltinID == Builtin::BI__builtin___CFStringMakeConstantString ||
BuiltinID == Builtin::BI__builtin___NSStringMakeConstantString) {
@@ -5049,8 +6343,7 @@ checkFormatStringExpr(Sema &S, const Expr *E, ArrayRef<const Expr *> Args,
const auto *ME = cast<ObjCMessageExpr>(E);
if (const auto *ND = ME->getMethodDecl()) {
if (const auto *FA = ND->getAttr<FormatArgAttr>()) {
- unsigned ArgIndex = FA->getFormatIdx();
- const Expr *Arg = ME->getArg(ArgIndex - 1);
+ const Expr *Arg = ME->getArg(FA->getFormatIdx().getASTIndex());
return checkFormatStringExpr(
S, Arg, Args, HasVAListArg, format_idx, firstDataArg, Type,
CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset);
@@ -5654,7 +6947,7 @@ void CheckFormatHandler::EmitFormatDiagnostic(PartialDiagnostic PDiag,
Loc, IsStringLocation, StringRange, FixIt);
}
-/// \brief If the format string is not within the funcion call, emit a note
+/// If the format string is not within the function call, emit a note
/// so that the function call and string are in diagnostic messages.
///
/// \param InFunctionCall if true, the format string is within the function
@@ -6311,11 +7604,11 @@ CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS,
ExprTy = TET->getUnderlyingExpr()->getType();
}
- analyze_printf::ArgType::MatchKind match = AT.matchesType(S.Context, ExprTy);
-
- if (match == analyze_printf::ArgType::Match) {
+ const analyze_printf::ArgType::MatchKind Match =
+ AT.matchesType(S.Context, ExprTy);
+ bool Pedantic = Match == analyze_printf::ArgType::NoMatchPedantic;
+ if (Match == analyze_printf::ArgType::Match)
return true;
- }
// Look through argument promotions for our error message's reported type.
// This includes the integral and floating promotions, but excludes array
@@ -6391,6 +7684,12 @@ CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS,
QualType CastTy;
std::tie(CastTy, CastTyName) = shouldNotPrintDirectly(S.Context, IntendedTy, E);
if (!CastTy.isNull()) {
+ // %zi/%zu and %td/%tu are OK to use for NSInteger/NSUInteger of type int
+ // (long in ASTContext). Only complain to pedants.
+ if ((CastTyName == "NSInteger" || CastTyName == "NSUInteger") &&
+ (AT.isSizeT() || AT.isPtrdiffT()) &&
+ AT.matchesType(S.Context, CastTy))
+ Pedantic = true;
IntendedTy = CastTy;
ShouldNotPrintDirectly = true;
}
@@ -6398,10 +7697,10 @@ CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS,
// We may be able to offer a FixItHint if it is a supported type.
PrintfSpecifier fixedFS = FS;
- bool success =
+ bool Success =
fixedFS.fixType(IntendedTy, S.getLangOpts(), S.Context, isObjCContext());
- if (success) {
+ if (Success) {
// Get the fix string from the fixed format specifier
SmallString<16> buf;
llvm::raw_svector_ostream os(buf);
@@ -6410,13 +7709,13 @@ CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS,
CharSourceRange SpecRange = getSpecifierRange(StartSpecifier, SpecifierLen);
if (IntendedTy == ExprTy && !ShouldNotPrintDirectly) {
- unsigned diag = diag::warn_format_conversion_argument_type_mismatch;
- if (match == analyze_format_string::ArgType::NoMatchPedantic) {
- diag = diag::warn_format_conversion_argument_type_mismatch_pedantic;
- }
+ unsigned Diag =
+ Pedantic
+ ? diag::warn_format_conversion_argument_type_mismatch_pedantic
+ : diag::warn_format_conversion_argument_type_mismatch;
// In this case, the specifier is wrong and should be changed to match
// the argument.
- EmitFormatDiagnostic(S.PDiag(diag)
+ EmitFormatDiagnostic(S.PDiag(Diag)
<< AT.getRepresentativeTypeName(S.Context)
<< IntendedTy << IsEnum << E->getSourceRange(),
E->getLocStart(),
@@ -6469,9 +7768,11 @@ CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS,
Name = TypedefTy->getDecl()->getName();
else
Name = CastTyName;
- EmitFormatDiagnostic(S.PDiag(diag::warn_format_argument_needs_cast)
- << Name << IntendedTy << IsEnum
- << E->getSourceRange(),
+ unsigned Diag = Pedantic
+ ? diag::warn_format_argument_needs_cast_pedantic
+ : diag::warn_format_argument_needs_cast;
+ EmitFormatDiagnostic(S.PDiag(Diag) << Name << IntendedTy << IsEnum
+ << E->getSourceRange(),
E->getLocStart(), /*IsStringLocation=*/false,
SpecRange, Hints);
} else {
@@ -6495,13 +7796,13 @@ CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS,
switch (S.isValidVarArgType(ExprTy)) {
case Sema::VAK_Valid:
case Sema::VAK_ValidInCXX11: {
- unsigned diag = diag::warn_format_conversion_argument_type_mismatch;
- if (match == analyze_printf::ArgType::NoMatchPedantic) {
- diag = diag::warn_format_conversion_argument_type_mismatch_pedantic;
- }
+ unsigned Diag =
+ Pedantic
+ ? diag::warn_format_conversion_argument_type_mismatch_pedantic
+ : diag::warn_format_conversion_argument_type_mismatch;
EmitFormatDiagnostic(
- S.PDiag(diag) << AT.getRepresentativeTypeName(S.Context) << ExprTy
+ S.PDiag(Diag) << AT.getRepresentativeTypeName(S.Context) << ExprTy
<< IsEnum << CSR << E->getSourceRange(),
E->getLocStart(), /*IsStringLocation*/ false, CSR);
break;
@@ -6684,29 +7985,28 @@ bool CheckScanfHandler::HandleScanfSpecifier(
return true;
}
- analyze_format_string::ArgType::MatchKind match =
+ analyze_format_string::ArgType::MatchKind Match =
AT.matchesType(S.Context, Ex->getType());
- if (match == analyze_format_string::ArgType::Match) {
+ bool Pedantic = Match == analyze_format_string::ArgType::NoMatchPedantic;
+ if (Match == analyze_format_string::ArgType::Match)
return true;
- }
ScanfSpecifier fixedFS = FS;
- bool success = fixedFS.fixType(Ex->getType(), Ex->IgnoreImpCasts()->getType(),
+ bool Success = fixedFS.fixType(Ex->getType(), Ex->IgnoreImpCasts()->getType(),
S.getLangOpts(), S.Context);
- unsigned diag = diag::warn_format_conversion_argument_type_mismatch;
- if (match == analyze_format_string::ArgType::NoMatchPedantic) {
- diag = diag::warn_format_conversion_argument_type_mismatch_pedantic;
- }
+ unsigned Diag =
+ Pedantic ? diag::warn_format_conversion_argument_type_mismatch_pedantic
+ : diag::warn_format_conversion_argument_type_mismatch;
- if (success) {
+ if (Success) {
// Get the fix string from the fixed format specifier.
SmallString<128> buf;
llvm::raw_svector_ostream os(buf);
fixedFS.toString(os);
EmitFormatDiagnostic(
- S.PDiag(diag) << AT.getRepresentativeTypeName(S.Context)
+ S.PDiag(Diag) << AT.getRepresentativeTypeName(S.Context)
<< Ex->getType() << false << Ex->getSourceRange(),
Ex->getLocStart(),
/*IsStringLocation*/ false,
@@ -6714,7 +8014,7 @@ bool CheckScanfHandler::HandleScanfSpecifier(
FixItHint::CreateReplacement(
getSpecifierRange(startSpecifier, specifierLen), os.str()));
} else {
- EmitFormatDiagnostic(S.PDiag(diag)
+ EmitFormatDiagnostic(S.PDiag(Diag)
<< AT.getRepresentativeTypeName(S.Context)
<< Ex->getType() << false << Ex->getSourceRange(),
Ex->getLocStart(),
@@ -7267,7 +8567,7 @@ void Sema::CheckMaxUnsignedZero(const CallExpr *Call,
//===--- CHECK: Standard memory functions ---------------------------------===//
-/// \brief Takes the expression passed to the size_t parameter of functions
+/// Takes the expression passed to the size_t parameter of functions
/// such as memcmp, strncat, etc and warns if it's a comparison.
///
/// This is to catch typos like `if (memcmp(&a, &b, sizeof(a) > 0))`.
@@ -7298,7 +8598,7 @@ static bool CheckMemorySizeofForComparison(Sema &S, const Expr *E,
return true;
}
-/// \brief Determine whether the given type is or contains a dynamic class type
+/// Determine whether the given type is or contains a dynamic class type
/// (e.g., whether it has a vtable).
static const CXXRecordDecl *getContainedDynamicClass(QualType T,
bool &IsContained) {
@@ -7329,28 +8629,205 @@ static const CXXRecordDecl *getContainedDynamicClass(QualType T,
return nullptr;
}
-/// \brief If E is a sizeof expression, returns its argument expression,
+static const UnaryExprOrTypeTraitExpr *getAsSizeOfExpr(const Expr *E) {
+ if (const auto *Unary = dyn_cast<UnaryExprOrTypeTraitExpr>(E))
+ if (Unary->getKind() == UETT_SizeOf)
+ return Unary;
+ return nullptr;
+}
+
+/// If E is a sizeof expression, returns its argument expression,
/// otherwise returns NULL.
static const Expr *getSizeOfExprArg(const Expr *E) {
- if (const UnaryExprOrTypeTraitExpr *SizeOf =
- dyn_cast<UnaryExprOrTypeTraitExpr>(E))
- if (SizeOf->getKind() == UETT_SizeOf && !SizeOf->isArgumentType())
+ if (const UnaryExprOrTypeTraitExpr *SizeOf = getAsSizeOfExpr(E))
+ if (!SizeOf->isArgumentType())
return SizeOf->getArgumentExpr()->IgnoreParenImpCasts();
-
return nullptr;
}
-/// \brief If E is a sizeof expression, returns its argument type.
+/// If E is a sizeof expression, returns its argument type.
static QualType getSizeOfArgType(const Expr *E) {
- if (const UnaryExprOrTypeTraitExpr *SizeOf =
- dyn_cast<UnaryExprOrTypeTraitExpr>(E))
- if (SizeOf->getKind() == UETT_SizeOf)
- return SizeOf->getTypeOfArgument();
-
+ if (const UnaryExprOrTypeTraitExpr *SizeOf = getAsSizeOfExpr(E))
+ return SizeOf->getTypeOfArgument();
return QualType();
}
-/// \brief Check for dangerous or invalid arguments to memset().
+namespace {
+
+struct SearchNonTrivialToInitializeField
+ : DefaultInitializedTypeVisitor<SearchNonTrivialToInitializeField> {
+ using Super =
+ DefaultInitializedTypeVisitor<SearchNonTrivialToInitializeField>;
+
+ SearchNonTrivialToInitializeField(const Expr *E, Sema &S) : E(E), S(S) {}
+
+ void visitWithKind(QualType::PrimitiveDefaultInitializeKind PDIK, QualType FT,
+ SourceLocation SL) {
+ if (const auto *AT = asDerived().getContext().getAsArrayType(FT)) {
+ asDerived().visitArray(PDIK, AT, SL);
+ return;
+ }
+
+ Super::visitWithKind(PDIK, FT, SL);
+ }
+
+ void visitARCStrong(QualType FT, SourceLocation SL) {
+ S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 1);
+ }
+ void visitARCWeak(QualType FT, SourceLocation SL) {
+ S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 1);
+ }
+ void visitStruct(QualType FT, SourceLocation SL) {
+ for (const FieldDecl *FD : FT->castAs<RecordType>()->getDecl()->fields())
+ visit(FD->getType(), FD->getLocation());
+ }
+ void visitArray(QualType::PrimitiveDefaultInitializeKind PDIK,
+ const ArrayType *AT, SourceLocation SL) {
+ visit(getContext().getBaseElementType(AT), SL);
+ }
+ void visitTrivial(QualType FT, SourceLocation SL) {}
+
+ static void diag(QualType RT, const Expr *E, Sema &S) {
+ SearchNonTrivialToInitializeField(E, S).visitStruct(RT, SourceLocation());
+ }
+
+ ASTContext &getContext() { return S.getASTContext(); }
+
+ const Expr *E;
+ Sema &S;
+};
+
+struct SearchNonTrivialToCopyField
+ : CopiedTypeVisitor<SearchNonTrivialToCopyField, false> {
+ using Super = CopiedTypeVisitor<SearchNonTrivialToCopyField, false>;
+
+ SearchNonTrivialToCopyField(const Expr *E, Sema &S) : E(E), S(S) {}
+
+ void visitWithKind(QualType::PrimitiveCopyKind PCK, QualType FT,
+ SourceLocation SL) {
+ if (const auto *AT = asDerived().getContext().getAsArrayType(FT)) {
+ asDerived().visitArray(PCK, AT, SL);
+ return;
+ }
+
+ Super::visitWithKind(PCK, FT, SL);
+ }
+
+ void visitARCStrong(QualType FT, SourceLocation SL) {
+ S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 0);
+ }
+ void visitARCWeak(QualType FT, SourceLocation SL) {
+ S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 0);
+ }
+ void visitStruct(QualType FT, SourceLocation SL) {
+ for (const FieldDecl *FD : FT->castAs<RecordType>()->getDecl()->fields())
+ visit(FD->getType(), FD->getLocation());
+ }
+ void visitArray(QualType::PrimitiveCopyKind PCK, const ArrayType *AT,
+ SourceLocation SL) {
+ visit(getContext().getBaseElementType(AT), SL);
+ }
+ void preVisit(QualType::PrimitiveCopyKind PCK, QualType FT,
+ SourceLocation SL) {}
+ void visitTrivial(QualType FT, SourceLocation SL) {}
+ void visitVolatileTrivial(QualType FT, SourceLocation SL) {}
+
+ static void diag(QualType RT, const Expr *E, Sema &S) {
+ SearchNonTrivialToCopyField(E, S).visitStruct(RT, SourceLocation());
+ }
+
+ ASTContext &getContext() { return S.getASTContext(); }
+
+ const Expr *E;
+ Sema &S;
+};
+
+}
+
+/// Detect if \c SizeofExpr is likely to calculate the sizeof an object.
+static bool doesExprLikelyComputeSize(const Expr *SizeofExpr) {
+ SizeofExpr = SizeofExpr->IgnoreParenImpCasts();
+
+ if (const auto *BO = dyn_cast<BinaryOperator>(SizeofExpr)) {
+ if (BO->getOpcode() != BO_Mul && BO->getOpcode() != BO_Add)
+ return false;
+
+ return doesExprLikelyComputeSize(BO->getLHS()) ||
+ doesExprLikelyComputeSize(BO->getRHS());
+ }
+
+ return getAsSizeOfExpr(SizeofExpr) != nullptr;
+}
+
+/// Check if the ArgLoc originated from a macro passed to the call at CallLoc.
+///
+/// \code
+/// #define MACRO 0
+/// foo(MACRO);
+/// foo(0);
+/// \endcode
+///
+/// This should return true for the first call to foo, but not for the second
+/// (regardless of whether foo is a macro or function).
+static bool isArgumentExpandedFromMacro(SourceManager &SM,
+ SourceLocation CallLoc,
+ SourceLocation ArgLoc) {
+ if (!CallLoc.isMacroID())
+ return SM.getFileID(CallLoc) != SM.getFileID(ArgLoc);
+
+ return SM.getFileID(SM.getImmediateMacroCallerLoc(CallLoc)) !=
+ SM.getFileID(SM.getImmediateMacroCallerLoc(ArgLoc));
+}
+
+/// Diagnose cases like 'memset(buf, sizeof(buf), 0)', which should have the
+/// last two arguments transposed.
+static void CheckMemaccessSize(Sema &S, unsigned BId, const CallExpr *Call) {
+ if (BId != Builtin::BImemset && BId != Builtin::BIbzero)
+ return;
+
+ const Expr *SizeArg =
+ Call->getArg(BId == Builtin::BImemset ? 2 : 1)->IgnoreImpCasts();
+
+ auto isLiteralZero = [](const Expr *E) {
+ return isa<IntegerLiteral>(E) && cast<IntegerLiteral>(E)->getValue() == 0;
+ };
+
+ // If we're memsetting or bzeroing 0 bytes, then this is likely an error.
+ SourceLocation CallLoc = Call->getRParenLoc();
+ SourceManager &SM = S.getSourceManager();
+ if (isLiteralZero(SizeArg) &&
+ !isArgumentExpandedFromMacro(SM, CallLoc, SizeArg->getExprLoc())) {
+
+ SourceLocation DiagLoc = SizeArg->getExprLoc();
+
+ // Some platforms #define bzero to __builtin_memset. See if this is the
+ // case, and if so, emit a better diagnostic.
+ if (BId == Builtin::BIbzero ||
+ (CallLoc.isMacroID() && Lexer::getImmediateMacroName(
+ CallLoc, SM, S.getLangOpts()) == "bzero")) {
+ S.Diag(DiagLoc, diag::warn_suspicious_bzero_size);
+ S.Diag(DiagLoc, diag::note_suspicious_bzero_size_silence);
+ } else if (!isLiteralZero(Call->getArg(1)->IgnoreImpCasts())) {
+ S.Diag(DiagLoc, diag::warn_suspicious_sizeof_memset) << 0;
+ S.Diag(DiagLoc, diag::note_suspicious_sizeof_memset_silence) << 0;
+ }
+ return;
+ }
+
+ // If the second argument to a memset is a sizeof expression and the third
+ // isn't, this is also likely an error. This should catch
+ // 'memset(buf, sizeof(buf), 0xff)'.
+ if (BId == Builtin::BImemset &&
+ doesExprLikelyComputeSize(Call->getArg(1)) &&
+ !doesExprLikelyComputeSize(Call->getArg(2))) {
+ SourceLocation DiagLoc = Call->getArg(1)->getExprLoc();
+ S.Diag(DiagLoc, diag::warn_suspicious_sizeof_memset) << 1;
+ S.Diag(DiagLoc, diag::note_suspicious_sizeof_memset_silence) << 1;
+ return;
+ }
+}
+
+/// Check for dangerous or invalid arguments to memset().
///
/// This issues warnings on known problematic, dangerous or unspecified
/// arguments to the standard 'memset', 'memcpy', 'memmove', and 'memcmp'
@@ -7379,6 +8856,9 @@ void Sema::CheckMemaccessArguments(const CallExpr *Call,
Call->getLocStart(), Call->getRParenLoc()))
return;
+ // Catch cases like 'memset(buf, sizeof(buf), 0)'.
+ CheckMemaccessSize(*this, BId, Call);
+
// We have special checking when the length is a sizeof expression.
QualType SizeOfArgTy = getSizeOfArgType(LenExpr);
const Expr *SizeOfArg = getSizeOfExprArg(LenExpr);
@@ -7515,7 +8995,23 @@ void Sema::CheckMemaccessArguments(const CallExpr *Call,
PDiag(diag::warn_arc_object_memaccess)
<< ArgIdx << FnName << PointeeTy
<< Call->getCallee()->getSourceRange());
- else
+ else if (const auto *RT = PointeeTy->getAs<RecordType>()) {
+ if ((BId == Builtin::BImemset || BId == Builtin::BIbzero) &&
+ RT->getDecl()->isNonTrivialToPrimitiveDefaultInitialize()) {
+ DiagRuntimeBehavior(Dest->getExprLoc(), Dest,
+ PDiag(diag::warn_cstruct_memaccess)
+ << ArgIdx << FnName << PointeeTy << 0);
+ SearchNonTrivialToInitializeField::diag(PointeeTy, Dest, *this);
+ } else if ((BId == Builtin::BImemcpy || BId == Builtin::BImemmove) &&
+ RT->getDecl()->isNonTrivialToPrimitiveCopy()) {
+ DiagRuntimeBehavior(Dest->getExprLoc(), Dest,
+ PDiag(diag::warn_cstruct_memaccess)
+ << ArgIdx << FnName << PointeeTy << 1);
+ SearchNonTrivialToCopyField::diag(PointeeTy, Dest, *this);
+ } else {
+ continue;
+ }
+ } else
continue;
DiagRuntimeBehavior(
@@ -7736,421 +9232,12 @@ void Sema::CheckStrncatArguments(const CallExpr *CE,
<< FixItHint::CreateReplacement(SR, OS.str());
}
-//===--- CHECK: Return Address of Stack Variable --------------------------===//
-
-static const Expr *EvalVal(const Expr *E,
- SmallVectorImpl<const DeclRefExpr *> &refVars,
- const Decl *ParentDecl);
-static const Expr *EvalAddr(const Expr *E,
- SmallVectorImpl<const DeclRefExpr *> &refVars,
- const Decl *ParentDecl);
-
-/// CheckReturnStackAddr - Check if a return statement returns the address
-/// of a stack variable.
-static void
-CheckReturnStackAddr(Sema &S, Expr *RetValExp, QualType lhsType,
- SourceLocation ReturnLoc) {
- const Expr *stackE = nullptr;
- SmallVector<const DeclRefExpr *, 8> refVars;
-
- // Perform checking for returned stack addresses, local blocks,
- // label addresses or references to temporaries.
- if (lhsType->isPointerType() ||
- (!S.getLangOpts().ObjCAutoRefCount && lhsType->isBlockPointerType())) {
- stackE = EvalAddr(RetValExp, refVars, /*ParentDecl=*/nullptr);
- } else if (lhsType->isReferenceType()) {
- stackE = EvalVal(RetValExp, refVars, /*ParentDecl=*/nullptr);
- }
-
- if (!stackE)
- return; // Nothing suspicious was found.
-
- // Parameters are initialized in the calling scope, so taking the address
- // of a parameter reference doesn't need a warning.
- for (auto *DRE : refVars)
- if (isa<ParmVarDecl>(DRE->getDecl()))
- return;
-
- SourceLocation diagLoc;
- SourceRange diagRange;
- if (refVars.empty()) {
- diagLoc = stackE->getLocStart();
- diagRange = stackE->getSourceRange();
- } else {
- // We followed through a reference variable. 'stackE' contains the
- // problematic expression but we will warn at the return statement pointing
- // at the reference variable. We will later display the "trail" of
- // reference variables using notes.
- diagLoc = refVars[0]->getLocStart();
- diagRange = refVars[0]->getSourceRange();
- }
-
- if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(stackE)) {
- // address of local var
- S.Diag(diagLoc, diag::warn_ret_stack_addr_ref) << lhsType->isReferenceType()
- << DR->getDecl()->getDeclName() << diagRange;
- } else if (isa<BlockExpr>(stackE)) { // local block.
- S.Diag(diagLoc, diag::err_ret_local_block) << diagRange;
- } else if (isa<AddrLabelExpr>(stackE)) { // address of label.
- S.Diag(diagLoc, diag::warn_ret_addr_label) << diagRange;
- } else { // local temporary.
- // If there is an LValue->RValue conversion, then the value of the
- // reference type is used, not the reference.
- if (auto *ICE = dyn_cast<ImplicitCastExpr>(RetValExp)) {
- if (ICE->getCastKind() == CK_LValueToRValue) {
- return;
- }
- }
- S.Diag(diagLoc, diag::warn_ret_local_temp_addr_ref)
- << lhsType->isReferenceType() << diagRange;
- }
-
- // Display the "trail" of reference variables that we followed until we
- // found the problematic expression using notes.
- for (unsigned i = 0, e = refVars.size(); i != e; ++i) {
- const VarDecl *VD = cast<VarDecl>(refVars[i]->getDecl());
- // If this var binds to another reference var, show the range of the next
- // var, otherwise the var binds to the problematic expression, in which case
- // show the range of the expression.
- SourceRange range = (i < e - 1) ? refVars[i + 1]->getSourceRange()
- : stackE->getSourceRange();
- S.Diag(VD->getLocation(), diag::note_ref_var_local_bind)
- << VD->getDeclName() << range;
- }
-}
-
-/// EvalAddr - EvalAddr and EvalVal are mutually recursive functions that
-/// check if the expression in a return statement evaluates to an address
-/// to a location on the stack, a local block, an address of a label, or a
-/// reference to local temporary. The recursion is used to traverse the
-/// AST of the return expression, with recursion backtracking when we
-/// encounter a subexpression that (1) clearly does not lead to one of the
-/// above problematic expressions (2) is something we cannot determine leads to
-/// a problematic expression based on such local checking.
-///
-/// Both EvalAddr and EvalVal follow through reference variables to evaluate
-/// the expression that they point to. Such variables are added to the
-/// 'refVars' vector so that we know what the reference variable "trail" was.
-///
-/// EvalAddr processes expressions that are pointers that are used as
-/// references (and not L-values). EvalVal handles all other values.
-/// At the base case of the recursion is a check for the above problematic
-/// expressions.
-///
-/// This implementation handles:
-///
-/// * pointer-to-pointer casts
-/// * implicit conversions from array references to pointers
-/// * taking the address of fields
-/// * arbitrary interplay between "&" and "*" operators
-/// * pointer arithmetic from an address of a stack variable
-/// * taking the address of an array element where the array is on the stack
-static const Expr *EvalAddr(const Expr *E,
- SmallVectorImpl<const DeclRefExpr *> &refVars,
- const Decl *ParentDecl) {
- if (E->isTypeDependent())
- return nullptr;
-
- // We should only be called for evaluating pointer expressions.
- assert((E->getType()->isAnyPointerType() ||
- E->getType()->isBlockPointerType() ||
- E->getType()->isObjCQualifiedIdType()) &&
- "EvalAddr only works on pointers");
-
- E = E->IgnoreParens();
-
- // Our "symbolic interpreter" is just a dispatch off the currently
- // viewed AST node. We then recursively traverse the AST by calling
- // EvalAddr and EvalVal appropriately.
- switch (E->getStmtClass()) {
- case Stmt::DeclRefExprClass: {
- const DeclRefExpr *DR = cast<DeclRefExpr>(E);
-
- // If we leave the immediate function, the lifetime isn't about to end.
- if (DR->refersToEnclosingVariableOrCapture())
- return nullptr;
-
- if (const VarDecl *V = dyn_cast<VarDecl>(DR->getDecl()))
- // If this is a reference variable, follow through to the expression that
- // it points to.
- if (V->hasLocalStorage() &&
- V->getType()->isReferenceType() && V->hasInit()) {
- // Add the reference variable to the "trail".
- refVars.push_back(DR);
- return EvalAddr(V->getInit(), refVars, ParentDecl);
- }
-
- return nullptr;
- }
-
- case Stmt::UnaryOperatorClass: {
- // The only unary operator that make sense to handle here
- // is AddrOf. All others don't make sense as pointers.
- const UnaryOperator *U = cast<UnaryOperator>(E);
-
- if (U->getOpcode() == UO_AddrOf)
- return EvalVal(U->getSubExpr(), refVars, ParentDecl);
- return nullptr;
- }
-
- case Stmt::BinaryOperatorClass: {
- // Handle pointer arithmetic. All other binary operators are not valid
- // in this context.
- const BinaryOperator *B = cast<BinaryOperator>(E);
- BinaryOperatorKind op = B->getOpcode();
-
- if (op != BO_Add && op != BO_Sub)
- return nullptr;
-
- const Expr *Base = B->getLHS();
-
- // Determine which argument is the real pointer base. It could be
- // the RHS argument instead of the LHS.
- if (!Base->getType()->isPointerType())
- Base = B->getRHS();
-
- assert(Base->getType()->isPointerType());
- return EvalAddr(Base, refVars, ParentDecl);
- }
-
- // For conditional operators we need to see if either the LHS or RHS are
- // valid DeclRefExpr*s. If one of them is valid, we return it.
- case Stmt::ConditionalOperatorClass: {
- const ConditionalOperator *C = cast<ConditionalOperator>(E);
-
- // Handle the GNU extension for missing LHS.
- // FIXME: That isn't a ConditionalOperator, so doesn't get here.
- if (const Expr *LHSExpr = C->getLHS()) {
- // In C++, we can have a throw-expression, which has 'void' type.
- if (!LHSExpr->getType()->isVoidType())
- if (const Expr *LHS = EvalAddr(LHSExpr, refVars, ParentDecl))
- return LHS;
- }
-
- // In C++, we can have a throw-expression, which has 'void' type.
- if (C->getRHS()->getType()->isVoidType())
- return nullptr;
-
- return EvalAddr(C->getRHS(), refVars, ParentDecl);
- }
-
- case Stmt::BlockExprClass:
- if (cast<BlockExpr>(E)->getBlockDecl()->hasCaptures())
- return E; // local block.
- return nullptr;
-
- case Stmt::AddrLabelExprClass:
- return E; // address of label.
-
- case Stmt::ExprWithCleanupsClass:
- return EvalAddr(cast<ExprWithCleanups>(E)->getSubExpr(), refVars,
- ParentDecl);
-
- // For casts, we need to handle conversions from arrays to
- // pointer values, and pointer-to-pointer conversions.
- case Stmt::ImplicitCastExprClass:
- case Stmt::CStyleCastExprClass:
- case Stmt::CXXFunctionalCastExprClass:
- case Stmt::ObjCBridgedCastExprClass:
- case Stmt::CXXStaticCastExprClass:
- case Stmt::CXXDynamicCastExprClass:
- case Stmt::CXXConstCastExprClass:
- case Stmt::CXXReinterpretCastExprClass: {
- const Expr* SubExpr = cast<CastExpr>(E)->getSubExpr();
- switch (cast<CastExpr>(E)->getCastKind()) {
- case CK_LValueToRValue:
- case CK_NoOp:
- case CK_BaseToDerived:
- case CK_DerivedToBase:
- case CK_UncheckedDerivedToBase:
- case CK_Dynamic:
- case CK_CPointerToObjCPointerCast:
- case CK_BlockPointerToObjCPointerCast:
- case CK_AnyPointerToBlockPointerCast:
- return EvalAddr(SubExpr, refVars, ParentDecl);
-
- case CK_ArrayToPointerDecay:
- return EvalVal(SubExpr, refVars, ParentDecl);
-
- case CK_BitCast:
- if (SubExpr->getType()->isAnyPointerType() ||
- SubExpr->getType()->isBlockPointerType() ||
- SubExpr->getType()->isObjCQualifiedIdType())
- return EvalAddr(SubExpr, refVars, ParentDecl);
- else
- return nullptr;
-
- default:
- return nullptr;
- }
- }
-
- case Stmt::MaterializeTemporaryExprClass:
- if (const Expr *Result =
- EvalAddr(cast<MaterializeTemporaryExpr>(E)->GetTemporaryExpr(),
- refVars, ParentDecl))
- return Result;
- return E;
-
- // Everything else: we simply don't reason about them.
- default:
- return nullptr;
- }
-}
-
-/// EvalVal - This function is complements EvalAddr in the mutual recursion.
-/// See the comments for EvalAddr for more details.
-static const Expr *EvalVal(const Expr *E,
- SmallVectorImpl<const DeclRefExpr *> &refVars,
- const Decl *ParentDecl) {
- do {
- // We should only be called for evaluating non-pointer expressions, or
- // expressions with a pointer type that are not used as references but
- // instead
- // are l-values (e.g., DeclRefExpr with a pointer type).
-
- // Our "symbolic interpreter" is just a dispatch off the currently
- // viewed AST node. We then recursively traverse the AST by calling
- // EvalAddr and EvalVal appropriately.
-
- E = E->IgnoreParens();
- switch (E->getStmtClass()) {
- case Stmt::ImplicitCastExprClass: {
- const ImplicitCastExpr *IE = cast<ImplicitCastExpr>(E);
- if (IE->getValueKind() == VK_LValue) {
- E = IE->getSubExpr();
- continue;
- }
- return nullptr;
- }
-
- case Stmt::ExprWithCleanupsClass:
- return EvalVal(cast<ExprWithCleanups>(E)->getSubExpr(), refVars,
- ParentDecl);
-
- case Stmt::DeclRefExprClass: {
- // When we hit a DeclRefExpr we are looking at code that refers to a
- // variable's name. If it's not a reference variable we check if it has
- // local storage within the function, and if so, return the expression.
- const DeclRefExpr *DR = cast<DeclRefExpr>(E);
-
- // If we leave the immediate function, the lifetime isn't about to end.
- if (DR->refersToEnclosingVariableOrCapture())
- return nullptr;
-
- if (const VarDecl *V = dyn_cast<VarDecl>(DR->getDecl())) {
- // Check if it refers to itself, e.g. "int& i = i;".
- if (V == ParentDecl)
- return DR;
-
- if (V->hasLocalStorage()) {
- if (!V->getType()->isReferenceType())
- return DR;
-
- // Reference variable, follow through to the expression that
- // it points to.
- if (V->hasInit()) {
- // Add the reference variable to the "trail".
- refVars.push_back(DR);
- return EvalVal(V->getInit(), refVars, V);
- }
- }
- }
-
- return nullptr;
- }
-
- case Stmt::UnaryOperatorClass: {
- // The only unary operator that make sense to handle here
- // is Deref. All others don't resolve to a "name." This includes
- // handling all sorts of rvalues passed to a unary operator.
- const UnaryOperator *U = cast<UnaryOperator>(E);
-
- if (U->getOpcode() == UO_Deref)
- return EvalAddr(U->getSubExpr(), refVars, ParentDecl);
-
- return nullptr;
- }
-
- case Stmt::ArraySubscriptExprClass: {
- // Array subscripts are potential references to data on the stack. We
- // retrieve the DeclRefExpr* for the array variable if it indeed
- // has local storage.
- const auto *ASE = cast<ArraySubscriptExpr>(E);
- if (ASE->isTypeDependent())
- return nullptr;
- return EvalAddr(ASE->getBase(), refVars, ParentDecl);
- }
-
- case Stmt::OMPArraySectionExprClass: {
- return EvalAddr(cast<OMPArraySectionExpr>(E)->getBase(), refVars,
- ParentDecl);
- }
-
- case Stmt::ConditionalOperatorClass: {
- // For conditional operators we need to see if either the LHS or RHS are
- // non-NULL Expr's. If one is non-NULL, we return it.
- const ConditionalOperator *C = cast<ConditionalOperator>(E);
-
- // Handle the GNU extension for missing LHS.
- if (const Expr *LHSExpr = C->getLHS()) {
- // In C++, we can have a throw-expression, which has 'void' type.
- if (!LHSExpr->getType()->isVoidType())
- if (const Expr *LHS = EvalVal(LHSExpr, refVars, ParentDecl))
- return LHS;
- }
-
- // In C++, we can have a throw-expression, which has 'void' type.
- if (C->getRHS()->getType()->isVoidType())
- return nullptr;
-
- return EvalVal(C->getRHS(), refVars, ParentDecl);
- }
-
- // Accesses to members are potential references to data on the stack.
- case Stmt::MemberExprClass: {
- const MemberExpr *M = cast<MemberExpr>(E);
-
- // Check for indirect access. We only want direct field accesses.
- if (M->isArrow())
- return nullptr;
-
- // Check whether the member type is itself a reference, in which case
- // we're not going to refer to the member, but to what the member refers
- // to.
- if (M->getMemberDecl()->getType()->isReferenceType())
- return nullptr;
-
- return EvalVal(M->getBase(), refVars, ParentDecl);
- }
-
- case Stmt::MaterializeTemporaryExprClass:
- if (const Expr *Result =
- EvalVal(cast<MaterializeTemporaryExpr>(E)->GetTemporaryExpr(),
- refVars, ParentDecl))
- return Result;
- return E;
-
- default:
- // Check that we don't return or take the address of a reference to a
- // temporary. This is only useful in C++.
- if (!E->isTypeDependent() && E->isRValue())
- return E;
-
- // Everything else: we simply don't reason about them.
- return nullptr;
- }
- } while (true);
-}
-
void
Sema::CheckReturnValExpr(Expr *RetValExp, QualType lhsType,
SourceLocation ReturnLoc,
bool isObjCMethod,
const AttrVec *Attrs,
const FunctionDecl *FD) {
- CheckReturnStackAddr(*this, RetValExp, lhsType, ReturnLoc);
-
// Check if the return value is null but should not be.
if (((Attrs && hasSpecificAttr<ReturnsNonNullAttr>(*Attrs)) ||
(!isObjCMethod && isNonNullType(Context, lhsType))) &&
@@ -8168,7 +9255,7 @@ Sema::CheckReturnValExpr(Expr *RetValExp, QualType lhsType,
if (Op == OO_New || Op == OO_Array_New) {
const FunctionProtoType *Proto
= FD->getType()->castAs<FunctionProtoType>();
- if (!Proto->isNothrow(Context, /*ResultIfDependent*/true) &&
+ if (!Proto->isNothrow(/*ResultIfDependent*/true) &&
CheckNonNullExpr(*this, RetValExp))
Diag(ReturnLoc, diag::warn_operator_new_returns_null)
<< FD << getLangOpts().CPlusPlus11;
@@ -8917,7 +10004,7 @@ static void AnalyzeImpConvsInComparison(Sema &S, BinaryOperator *E) {
AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc());
}
-/// \brief Implements -Wsign-compare.
+/// Implements -Wsign-compare.
///
/// \param E the binary operator to check for warnings
static void AnalyzeComparison(Sema &S, BinaryOperator *E) {
@@ -8972,6 +10059,16 @@ static void AnalyzeComparison(Sema &S, BinaryOperator *E) {
LHS = LHS->IgnoreParenImpCasts();
RHS = RHS->IgnoreParenImpCasts();
+ if (!S.getLangOpts().CPlusPlus) {
+ // Avoid warning about comparison of integers with different signs when
+ // RHS/LHS has a `typeof(E)` type whose sign is different from the sign of
+ // the type of `E`.
+ if (const auto *TET = dyn_cast<TypeOfExprType>(LHS->getType()))
+ LHS = TET->getUnderlyingExpr()->IgnoreParenImpCasts();
+ if (const auto *TET = dyn_cast<TypeOfExprType>(RHS->getType()))
+ RHS = TET->getUnderlyingExpr()->IgnoreParenImpCasts();
+ }
+
// Check to see if one of the (unmodified) operands is of different
// signedness.
Expr *signedOperand, *unsignedOperand;
@@ -9195,6 +10292,32 @@ static void DiagnoseImpCast(Sema &S, Expr *E, QualType T,
DiagnoseImpCast(S, E, E->getType(), T, CContext, diag, pruneControlFlow);
}
+/// Analyze the given compound assignment for the possible losing of
+/// floating-point precision.
+static void AnalyzeCompoundAssignment(Sema &S, BinaryOperator *E) {
+ assert(isa<CompoundAssignOperator>(E) &&
+ "Must be compound assignment operation");
+ // Recurse on the LHS and RHS in here
+ AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc());
+ AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc());
+
+ // Now check the outermost expression
+ const auto *ResultBT = E->getLHS()->getType()->getAs<BuiltinType>();
+ const auto *RBT = cast<CompoundAssignOperator>(E)
+ ->getComputationResultType()
+ ->getAs<BuiltinType>();
+
+ // If both source and target are floating points.
+ if (ResultBT && ResultBT->isFloatingPoint() && RBT && RBT->isFloatingPoint())
+ // Builtin FP kinds are ordered by increasing FP rank.
+ if (ResultBT->getKind() < RBT->getKind())
+ // We don't want to warn for system macro.
+ if (!S.SourceMgr.isInSystemMacro(E->getOperatorLoc()))
+ // warn about dropping FP rank.
+ DiagnoseImpCast(S, E->getRHS(), E->getLHS()->getType(),
+ E->getOperatorLoc(),
+ diag::warn_impcast_float_result_precision);
+}
/// Diagnose an implicit cast from a floating point value to an integer value.
static void DiagnoseFloatingImpCast(Sema &S, Expr *E, QualType T,
@@ -9223,14 +10346,24 @@ static void DiagnoseFloatingImpCast(Sema &S, Expr *E, QualType T,
llvm::APSInt IntegerValue(S.Context.getIntWidth(T),
T->hasUnsignedIntegerRepresentation());
- if (Value.convertToInteger(IntegerValue, llvm::APFloat::rmTowardZero,
- &isExact) == llvm::APFloat::opOK &&
- isExact) {
+ llvm::APFloat::opStatus Result = Value.convertToInteger(
+ IntegerValue, llvm::APFloat::rmTowardZero, &isExact);
+
+ if (Result == llvm::APFloat::opOK && isExact) {
if (IsLiteral) return;
return DiagnoseImpCast(S, E, T, CContext, diag::warn_impcast_float_integer,
PruneWarnings);
}
+ // Conversion of a floating-point value to a non-bool integer where the
+ // integral part cannot be represented by the integer type is undefined.
+ if (!IsBool && Result == llvm::APFloat::opInvalidOp)
+ return DiagnoseImpCast(
+ S, E, T, CContext,
+ IsLiteral ? diag::warn_impcast_literal_float_to_integer_out_of_range
+ : diag::warn_impcast_float_to_integer_out_of_range,
+ PruneWarnings);
+
unsigned DiagID = 0;
if (IsLiteral) {
// Warn on floating point literal to integer.
@@ -9364,18 +10497,15 @@ static void DiagnoseNullConversion(Sema &S, Expr *E, QualType T,
// Venture through the macro stacks to get to the source of macro arguments.
// The new location is a better location than the complete location that was
// passed in.
- while (S.SourceMgr.isMacroArgExpansion(Loc))
- Loc = S.SourceMgr.getImmediateMacroCallerLoc(Loc);
-
- while (S.SourceMgr.isMacroArgExpansion(CC))
- CC = S.SourceMgr.getImmediateMacroCallerLoc(CC);
+ Loc = S.SourceMgr.getTopMacroCallerLoc(Loc);
+ CC = S.SourceMgr.getTopMacroCallerLoc(CC);
// __null is usually wrapped in a macro. Go up a macro if that is the case.
if (NullKind == Expr::NPCK_GNUNull && Loc.isMacroID()) {
StringRef MacroName = Lexer::getImmediateMacroNameForDiagnostics(
Loc, S.SourceMgr, S.getLangOpts());
if (MacroName == "NULL")
- Loc = S.SourceMgr.getImmediateExpansionRange(Loc).first;
+ Loc = S.SourceMgr.getImmediateExpansionRange(Loc).getBegin();
}
// Only warn if the null and context location are in the same macro expansion.
@@ -9565,7 +10695,7 @@ CheckImplicitConversion(Sema &S, Expr *E, QualType T, SourceLocation CC,
return;
return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_vector_scalar);
}
-
+
// If the vector cast is cast between two vectors of the same size, it is
// a bitcast, not a conversion.
if (S.Context.getTypeSize(Source) == S.Context.getTypeSize(Target))
@@ -9842,7 +10972,7 @@ static void AnalyzeImplicitConversions(Sema &S, Expr *OrigE,
if (E->isTypeDependent() || E->isValueDependent())
return;
-
+
// For conditional operators, we analyze the arguments as if they
// were being fed directly into the output.
if (isa<ConditionalOperator>(E)) {
@@ -9886,6 +11016,9 @@ static void AnalyzeImplicitConversions(Sema &S, Expr *OrigE,
// And with simple assignments.
if (BO->getOpcode() == BO_Assign)
return AnalyzeAssignment(S, BO);
+ // And with compound assignments.
+ if (BO->isAssignmentOp())
+ return AnalyzeCompoundAssignment(S, BO);
}
// These break the otherwise-useful invariant below. Fortunately,
@@ -9929,7 +11062,7 @@ static void AnalyzeImplicitConversions(Sema &S, Expr *OrigE,
::CheckBoolLikeConversion(S, U->getSubExpr(), CC);
}
-/// Diagnose integer type and any valid implicit convertion to it.
+/// Diagnose integer type and any valid implicit conversion to it.
static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E, const QualType &IntT) {
// Taking into account implicit conversions,
// allow any integer.
@@ -9992,7 +11125,7 @@ static bool IsInAnyMacroBody(const SourceManager &SM, SourceLocation Loc) {
return false;
}
-/// \brief Diagnose pointers that are always non-null.
+/// Diagnose pointers that are always non-null.
/// \param E the expression containing the pointer
/// \param NullKind NPCK_NotNull if E is a cast to bool, otherwise, E is
/// compared to a null pointer
@@ -10096,8 +11229,8 @@ void Sema::DiagnoseAlwaysNonNullPointer(Expr *E,
return;
}
- for (unsigned ArgNo : NonNull->args()) {
- if (ArgNo == ParamNo) {
+ for (const ParamIdx &ArgNo : NonNull->args()) {
+ if (ArgNo.getASTIndex() == ParamNo) {
ComplainAboutNonnullParamOrCall(NonNull);
return;
}
@@ -10218,29 +11351,33 @@ void Sema::CheckForIntOverflow (Expr *E) {
SmallVector<Expr *, 2> Exprs(1, E);
do {
- Expr *E = Exprs.pop_back_val();
+ Expr *OriginalE = Exprs.pop_back_val();
+ Expr *E = OriginalE->IgnoreParenCasts();
- if (isa<BinaryOperator>(E->IgnoreParenCasts())) {
- E->IgnoreParenCasts()->EvaluateForOverflow(Context);
+ if (isa<BinaryOperator>(E)) {
+ E->EvaluateForOverflow(Context);
continue;
}
- if (auto InitList = dyn_cast<InitListExpr>(E))
+ if (auto InitList = dyn_cast<InitListExpr>(OriginalE))
Exprs.append(InitList->inits().begin(), InitList->inits().end());
-
- if (isa<ObjCBoxedExpr>(E))
- E->IgnoreParenCasts()->EvaluateForOverflow(Context);
+ else if (isa<ObjCBoxedExpr>(OriginalE))
+ E->EvaluateForOverflow(Context);
+ else if (auto Call = dyn_cast<CallExpr>(E))
+ Exprs.append(Call->arg_begin(), Call->arg_end());
+ else if (auto Message = dyn_cast<ObjCMessageExpr>(E))
+ Exprs.append(Message->arg_begin(), Message->arg_end());
} while (!Exprs.empty());
}
namespace {
-/// \brief Visitor for expressions which looks for unsequenced operations on the
+/// Visitor for expressions which looks for unsequenced operations on the
/// same object.
class SequenceChecker : public EvaluatedExprVisitor<SequenceChecker> {
using Base = EvaluatedExprVisitor<SequenceChecker>;
- /// \brief A tree of sequenced regions within an expression. Two regions are
+ /// A tree of sequenced regions within an expression. Two regions are
/// unsequenced if one is an ancestor or a descendent of the other. When we
/// finish processing an expression with sequencing, such as a comma
/// expression, we fold its tree nodes into its parent, since they are
@@ -10254,7 +11391,7 @@ class SequenceChecker : public EvaluatedExprVisitor<SequenceChecker> {
SmallVector<Value, 8> Values;
public:
- /// \brief A region within an expression which may be sequenced with respect
+ /// A region within an expression which may be sequenced with respect
/// to some other region.
class Seq {
friend class SequenceTree;
@@ -10270,7 +11407,7 @@ class SequenceChecker : public EvaluatedExprVisitor<SequenceChecker> {
SequenceTree() { Values.push_back(Value(0)); }
Seq root() const { return Seq(0); }
- /// \brief Create a new sequence of operations, which is an unsequenced
+ /// Create a new sequence of operations, which is an unsequenced
/// subset of \p Parent. This sequence of operations is sequenced with
/// respect to other children of \p Parent.
Seq allocate(Seq Parent) {
@@ -10278,12 +11415,12 @@ class SequenceChecker : public EvaluatedExprVisitor<SequenceChecker> {
return Seq(Values.size() - 1);
}
- /// \brief Merge a sequence of operations into its parent.
+ /// Merge a sequence of operations into its parent.
void merge(Seq S) {
Values[S.Index].Merged = true;
}
- /// \brief Determine whether two operations are unsequenced. This operation
+ /// Determine whether two operations are unsequenced. This operation
/// is asymmetric: \p Cur should be the more recent sequence, and \p Old
/// should have been merged into its parent as appropriate.
bool isUnsequenced(Seq Cur, Seq Old) {
@@ -10298,7 +11435,7 @@ class SequenceChecker : public EvaluatedExprVisitor<SequenceChecker> {
}
private:
- /// \brief Pick a representative for a sequence.
+ /// Pick a representative for a sequence.
unsigned representative(unsigned K) {
if (Values[K].Merged)
// Perform path compression as we go.
@@ -10419,7 +11556,7 @@ class SequenceChecker : public EvaluatedExprVisitor<SequenceChecker> {
bool EvalOK = true;
} *EvalTracker = nullptr;
- /// \brief Find the object which is produced by the specified expression,
+ /// Find the object which is produced by the specified expression,
/// if any.
Object getObject(Expr *E, bool Mod) const {
E = E->IgnoreParenCasts();
@@ -10441,7 +11578,7 @@ class SequenceChecker : public EvaluatedExprVisitor<SequenceChecker> {
return nullptr;
}
- /// \brief Note that an object was modified or used by an expression.
+ /// Note that an object was modified or used by an expression.
void addUsage(UsageInfo &UI, Object O, Expr *Ref, UsageKind UK) {
Usage &U = UI.Uses[UK];
if (!U.Use || !Tree.isUnsequenced(Region, U.Seq)) {
@@ -10452,7 +11589,7 @@ class SequenceChecker : public EvaluatedExprVisitor<SequenceChecker> {
}
}
- /// \brief Check whether a modification or use conflicts with a prior usage.
+ /// Check whether a modification or use conflicts with a prior usage.
void checkUsage(Object O, UsageInfo &UI, Expr *Ref, UsageKind OtherKind,
bool IsModMod) {
if (UI.Diagnosed)
@@ -10831,23 +11968,18 @@ bool Sema::CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters,
// information is added for it.
diagnoseArrayStarInParamType(*this, PType, Param->getLocation());
- // MSVC destroys objects passed by value in the callee. Therefore a
- // function definition which takes such a parameter must be able to call the
- // object's destructor. However, we don't perform any direct access check
- // on the dtor.
- if (getLangOpts().CPlusPlus && Context.getTargetInfo()
- .getCXXABI()
- .areArgsDestroyedLeftToRightInCallee()) {
- if (!Param->isInvalidDecl()) {
- if (const RecordType *RT = Param->getType()->getAs<RecordType>()) {
- CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(RT->getDecl());
- if (!ClassDecl->isInvalidDecl() &&
- !ClassDecl->hasIrrelevantDestructor() &&
- !ClassDecl->isDependentContext()) {
- CXXDestructorDecl *Destructor = LookupDestructor(ClassDecl);
- MarkFunctionReferenced(Param->getLocation(), Destructor);
- DiagnoseUseOfDecl(Destructor, Param->getLocation());
- }
+ // If the parameter is a c++ class type and it has to be destructed in the
+ // callee function, declare the destructor so that it can be called by the
+ // callee function. Do not perform any direct access check on the dtor here.
+ if (!Param->isInvalidDecl()) {
+ if (CXXRecordDecl *ClassDecl = Param->getType()->getAsCXXRecordDecl()) {
+ if (!ClassDecl->isInvalidDecl() &&
+ !ClassDecl->hasIrrelevantDestructor() &&
+ !ClassDecl->isDependentContext() &&
+ ClassDecl->isParamDestroyedInCallee()) {
+ CXXDestructorDecl *Destructor = LookupDestructor(ClassDecl);
+ MarkFunctionReferenced(Param->getLocation(), Destructor);
+ DiagnoseUseOfDecl(Destructor, Param->getLocation());
}
}
}
@@ -10930,7 +12062,7 @@ void Sema::CheckCastAlign(Expr *Op, QualType T, SourceRange TRange) {
<< TRange << Op->getSourceRange();
}
-/// \brief Check whether this array fits the idiom of a size-one tail padded
+/// Check whether this array fits the idiom of a size-one tail padded
/// array member of a struct.
///
/// We avoid emitting out-of-bounds access warnings for such arrays as they are
@@ -11000,9 +12132,9 @@ void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
const NamedDecl *ND = nullptr;
if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(BaseExpr))
- ND = dyn_cast<NamedDecl>(DRE->getDecl());
+ ND = DRE->getDecl();
if (const MemberExpr *ME = dyn_cast<MemberExpr>(BaseExpr))
- ND = dyn_cast<NamedDecl>(ME->getMemberDecl());
+ ND = ME->getMemberDecl();
if (index.isUnsigned() || !index.isNegative()) {
llvm::APInt size = ArrayTy->getSize();
@@ -11085,9 +12217,9 @@ void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
dyn_cast<ArraySubscriptExpr>(BaseExpr))
BaseExpr = ASE->getBase()->IgnoreParenCasts();
if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(BaseExpr))
- ND = dyn_cast<NamedDecl>(DRE->getDecl());
+ ND = DRE->getDecl();
if (const MemberExpr *ME = dyn_cast<MemberExpr>(BaseExpr))
- ND = dyn_cast<NamedDecl>(ME->getMemberDecl());
+ ND = ME->getMemberDecl();
}
if (ND)
@@ -11105,7 +12237,12 @@ void Sema::CheckArrayAccess(const Expr *expr) {
const ArraySubscriptExpr *ASE = cast<ArraySubscriptExpr>(expr);
CheckArrayAccess(ASE->getBase(), ASE->getIdx(), ASE,
AllowOnePastEnd > 0);
- return;
+ expr = ASE->getBase();
+ break;
+ }
+ case Stmt::MemberExprClass: {
+ expr = cast<MemberExpr>(expr)->getBase();
+ break;
}
case Stmt::OMPArraySectionExprClass: {
const OMPArraySectionExpr *ASE = cast<OMPArraySectionExpr>(expr);
@@ -11515,7 +12652,7 @@ void Sema::CheckObjCCircularContainer(ObjCMessageExpr *Message) {
if (ArgRE->isObjCSelfExpr()) {
Diag(Message->getSourceRange().getBegin(),
diag::warn_objc_circular_container)
- << ArgRE->getDecl()->getName() << StringRef("super");
+ << ArgRE->getDecl() << StringRef("'super'");
}
}
} else {
@@ -11531,11 +12668,11 @@ void Sema::CheckObjCCircularContainer(ObjCMessageExpr *Message) {
ValueDecl *Decl = ReceiverRE->getDecl();
Diag(Message->getSourceRange().getBegin(),
diag::warn_objc_circular_container)
- << Decl->getName() << Decl->getName();
+ << Decl << Decl;
if (!ArgRE->isObjCSelfExpr()) {
Diag(Decl->getLocation(),
diag::note_objc_circular_container_declared_here)
- << Decl->getName();
+ << Decl;
}
}
}
@@ -11545,10 +12682,10 @@ void Sema::CheckObjCCircularContainer(ObjCMessageExpr *Message) {
ObjCIvarDecl *Decl = IvarRE->getDecl();
Diag(Message->getSourceRange().getBegin(),
diag::warn_objc_circular_container)
- << Decl->getName() << Decl->getName();
+ << Decl << Decl;
Diag(Decl->getLocation(),
diag::note_objc_circular_container_declared_here)
- << Decl->getName();
+ << Decl;
}
}
}
@@ -11941,7 +13078,7 @@ void Sema::DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr,
static bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2);
-/// \brief Check if two enumeration types are layout-compatible.
+/// Check if two enumeration types are layout-compatible.
static bool isLayoutCompatible(ASTContext &C, EnumDecl *ED1, EnumDecl *ED2) {
// C++11 [dcl.enum] p8:
// Two enumeration types are layout-compatible if they have the same
@@ -11950,7 +13087,7 @@ static bool isLayoutCompatible(ASTContext &C, EnumDecl *ED1, EnumDecl *ED2) {
C.hasSameType(ED1->getIntegerType(), ED2->getIntegerType());
}
-/// \brief Check if two fields are layout-compatible.
+/// Check if two fields are layout-compatible.
static bool isLayoutCompatible(ASTContext &C, FieldDecl *Field1,
FieldDecl *Field2) {
if (!isLayoutCompatible(C, Field1->getType(), Field2->getType()))
@@ -11971,7 +13108,7 @@ static bool isLayoutCompatible(ASTContext &C, FieldDecl *Field1,
return true;
}
-/// \brief Check if two standard-layout structs are layout-compatible.
+/// Check if two standard-layout structs are layout-compatible.
/// (C++11 [class.mem] p17)
static bool isLayoutCompatibleStruct(ASTContext &C, RecordDecl *RD1,
RecordDecl *RD2) {
@@ -12015,7 +13152,7 @@ static bool isLayoutCompatibleStruct(ASTContext &C, RecordDecl *RD1,
return true;
}
-/// \brief Check if two standard-layout unions are layout-compatible.
+/// Check if two standard-layout unions are layout-compatible.
/// (C++11 [class.mem] p18)
static bool isLayoutCompatibleUnion(ASTContext &C, RecordDecl *RD1,
RecordDecl *RD2) {
@@ -12054,7 +13191,7 @@ static bool isLayoutCompatible(ASTContext &C, RecordDecl *RD1,
return isLayoutCompatibleStruct(C, RD1, RD2);
}
-/// \brief Check if two types are layout-compatible in C++11 sense.
+/// Check if two types are layout-compatible in C++11 sense.
static bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2) {
if (T1.isNull() || T2.isNull())
return false;
@@ -12092,7 +13229,7 @@ static bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2) {
//===--- CHECK: pointer_with_type_tag attribute: datatypes should match ----//
-/// \brief Given a type tag expression find the type tag itself.
+/// Given a type tag expression find the type tag itself.
///
/// \param TypeExpr Type tag expression, as it appears in user's code.
///
@@ -12163,7 +13300,7 @@ static bool FindTypeTagExpr(const Expr *TypeExpr, const ASTContext &Ctx,
}
}
-/// \brief Retrieve the C type corresponding to type tag TypeExpr.
+/// Retrieve the C type corresponding to type tag TypeExpr.
///
/// \param TypeExpr Expression that specifies a type tag.
///
@@ -12257,13 +13394,13 @@ void Sema::CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr,
bool IsPointerAttr = Attr->getIsPointer();
// Retrieve the argument representing the 'type_tag'.
- if (Attr->getTypeTagIdx() >= ExprArgs.size()) {
- // Add 1 to display the user's specified value.
+ unsigned TypeTagIdxAST = Attr->getTypeTagIdx().getASTIndex();
+ if (TypeTagIdxAST >= ExprArgs.size()) {
Diag(CallSiteLoc, diag::err_tag_index_out_of_range)
- << 0 << Attr->getTypeTagIdx() + 1;
+ << 0 << Attr->getTypeTagIdx().getSourceIndex();
return;
}
- const Expr *TypeTagExpr = ExprArgs[Attr->getTypeTagIdx()];
+ const Expr *TypeTagExpr = ExprArgs[TypeTagIdxAST];
bool FoundWrongKind;
TypeTagData TypeInfo;
if (!GetMatchingCType(ArgumentKind, TypeTagExpr, Context,
@@ -12277,13 +13414,13 @@ void Sema::CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr,
}
// Retrieve the argument representing the 'arg_idx'.
- if (Attr->getArgumentIdx() >= ExprArgs.size()) {
- // Add 1 to display the user's specified value.
+ unsigned ArgumentIdxAST = Attr->getArgumentIdx().getASTIndex();
+ if (ArgumentIdxAST >= ExprArgs.size()) {
Diag(CallSiteLoc, diag::err_tag_index_out_of_range)
- << 1 << Attr->getArgumentIdx() + 1;
+ << 1 << Attr->getArgumentIdx().getSourceIndex();
return;
}
- const Expr *ArgumentExpr = ExprArgs[Attr->getArgumentIdx()];
+ const Expr *ArgumentExpr = ExprArgs[ArgumentIdxAST];
if (IsPointerAttr) {
// Skip implicit cast of pointer to `void *' (as a function argument).
if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(ArgumentExpr))
diff --git a/lib/Sema/SemaCodeComplete.cpp b/lib/Sema/SemaCodeComplete.cpp
index 9aed178763dc..4e571eba17e9 100644
--- a/lib/Sema/SemaCodeComplete.cpp
+++ b/lib/Sema/SemaCodeComplete.cpp
@@ -40,39 +40,39 @@ using namespace clang;
using namespace sema;
namespace {
- /// \brief A container of code-completion results.
+ /// A container of code-completion results.
class ResultBuilder {
public:
- /// \brief The type of a name-lookup filter, which can be provided to the
+ /// The type of a name-lookup filter, which can be provided to the
/// name-lookup routines to specify which declarations should be included in
/// the result set (when it returns true) and which declarations should be
/// filtered out (returns false).
typedef bool (ResultBuilder::*LookupFilter)(const NamedDecl *) const;
typedef CodeCompletionResult Result;
-
+
private:
- /// \brief The actual results we have found.
+ /// The actual results we have found.
std::vector<Result> Results;
-
- /// \brief A record of all of the declarations we have found and placed
+
+ /// A record of all of the declarations we have found and placed
/// into the result set, used to ensure that no declaration ever gets into
/// the result set twice.
llvm::SmallPtrSet<const Decl*, 16> AllDeclsFound;
-
+
typedef std::pair<const NamedDecl *, unsigned> DeclIndexPair;
- /// \brief An entry in the shadow map, which is optimized to store
+ /// An entry in the shadow map, which is optimized to store
/// a single (declaration, index) mapping (the common case) but
/// can also store a list of (declaration, index) mappings.
class ShadowMapEntry {
typedef SmallVector<DeclIndexPair, 4> DeclIndexPairVector;
- /// \brief Contains either the solitary NamedDecl * or a vector
+ /// Contains either the solitary NamedDecl * or a vector
/// of (declaration, index) pairs.
llvm::PointerUnion<const NamedDecl *, DeclIndexPairVector*> DeclOrVector;
- /// \brief When the entry contains a single declaration, this is
+ /// When the entry contains a single declaration, this is
/// the index associated with that entry.
unsigned SingleDeclIndex;
@@ -115,72 +115,72 @@ namespace {
iterator end() const;
};
- /// \brief A mapping from declaration names to the declarations that have
+ /// A mapping from declaration names to the declarations that have
/// this name within a particular scope and their index within the list of
/// results.
typedef llvm::DenseMap<DeclarationName, ShadowMapEntry> ShadowMap;
-
- /// \brief The semantic analysis object for which results are being
+
+ /// The semantic analysis object for which results are being
/// produced.
Sema &SemaRef;
- /// \brief The allocator used to allocate new code-completion strings.
+ /// The allocator used to allocate new code-completion strings.
CodeCompletionAllocator &Allocator;
CodeCompletionTUInfo &CCTUInfo;
-
- /// \brief If non-NULL, a filter function used to remove any code-completion
+
+ /// If non-NULL, a filter function used to remove any code-completion
/// results that are not desirable.
LookupFilter Filter;
- /// \brief Whether we should allow declarations as
+ /// Whether we should allow declarations as
/// nested-name-specifiers that would otherwise be filtered out.
bool AllowNestedNameSpecifiers;
- /// \brief If set, the type that we would prefer our resulting value
+ /// If set, the type that we would prefer our resulting value
/// declarations to have.
///
- /// Closely matching the preferred type gives a boost to a result's
+ /// Closely matching the preferred type gives a boost to a result's
/// priority.
CanQualType PreferredType;
-
- /// \brief A list of shadow maps, which is used to model name hiding at
+
+ /// A list of shadow maps, which is used to model name hiding at
/// different levels of, e.g., the inheritance hierarchy.
std::list<ShadowMap> ShadowMaps;
-
- /// \brief If we're potentially referring to a C++ member function, the set
+
+ /// If we're potentially referring to a C++ member function, the set
/// of qualifiers applied to the object type.
Qualifiers ObjectTypeQualifiers;
-
- /// \brief Whether the \p ObjectTypeQualifiers field is active.
+
+ /// Whether the \p ObjectTypeQualifiers field is active.
bool HasObjectTypeQualifiers;
-
- /// \brief The selector that we prefer.
+
+ /// The selector that we prefer.
Selector PreferredSelector;
-
- /// \brief The completion context in which we are gathering results.
+
+ /// The completion context in which we are gathering results.
CodeCompletionContext CompletionContext;
-
- /// \brief If we are in an instance method definition, the \@implementation
+
+ /// If we are in an instance method definition, the \@implementation
/// object.
ObjCImplementationDecl *ObjCImplementation;
void AdjustResultPriorityForDecl(Result &R);
void MaybeAddConstructorResults(Result R);
-
+
public:
explicit ResultBuilder(Sema &SemaRef, CodeCompletionAllocator &Allocator,
CodeCompletionTUInfo &CCTUInfo,
const CodeCompletionContext &CompletionContext,
LookupFilter Filter = nullptr)
: SemaRef(SemaRef), Allocator(Allocator), CCTUInfo(CCTUInfo),
- Filter(Filter),
- AllowNestedNameSpecifiers(false), HasObjectTypeQualifiers(false),
+ Filter(Filter),
+ AllowNestedNameSpecifiers(false), HasObjectTypeQualifiers(false),
CompletionContext(CompletionContext),
ObjCImplementation(nullptr)
- {
- // If this is an Objective-C instance method definition, dig out the
+ {
+ // If this is an Objective-C instance method definition, dig out the
// corresponding implementation.
switch (CompletionContext.getKind()) {
case CodeCompletionContext::CCC_Expression:
@@ -193,23 +193,23 @@ namespace {
if (ObjCInterfaceDecl *Interface = Method->getClassInterface())
ObjCImplementation = Interface->getImplementation();
break;
-
+
default:
break;
}
}
- /// \brief Determine the priority for a reference to the given declaration.
+ /// Determine the priority for a reference to the given declaration.
unsigned getBasePriority(const NamedDecl *D);
- /// \brief Whether we should include code patterns in the completion
+ /// Whether we should include code patterns in the completion
/// results.
bool includeCodePatterns() const {
- return SemaRef.CodeCompleter &&
+ return SemaRef.CodeCompleter &&
SemaRef.CodeCompleter->includeCodePatterns();
}
-
- /// \brief Set the filter used for code-completion results.
+
+ /// Set the filter used for code-completion results.
void setFilter(LookupFilter Filter) {
this->Filter = Filter;
}
@@ -217,25 +217,25 @@ namespace {
Result *data() { return Results.empty()? nullptr : &Results.front(); }
unsigned size() const { return Results.size(); }
bool empty() const { return Results.empty(); }
-
- /// \brief Specify the preferred type.
- void setPreferredType(QualType T) {
- PreferredType = SemaRef.Context.getCanonicalType(T);
+
+ /// Specify the preferred type.
+ void setPreferredType(QualType T) {
+ PreferredType = SemaRef.Context.getCanonicalType(T);
}
-
- /// \brief Set the cv-qualifiers on the object type, for us in filtering
+
+ /// Set the cv-qualifiers on the object type, for us in filtering
/// calls to member functions.
///
/// When there are qualifiers in this set, they will be used to filter
- /// out member functions that aren't available (because there will be a
+ /// out member functions that aren't available (because there will be a
/// cv-qualifier mismatch) or prefer functions with an exact qualifier
/// match.
void setObjectTypeQualifiers(Qualifiers Quals) {
ObjectTypeQualifiers = Quals;
HasObjectTypeQualifiers = true;
}
-
- /// \brief Set the preferred selector.
+
+ /// Set the preferred selector.
///
/// When an Objective-C method declaration result is added, and that
/// method's selector matches this preferred selector, we give that method
@@ -243,28 +243,28 @@ namespace {
void setPreferredSelector(Selector Sel) {
PreferredSelector = Sel;
}
-
- /// \brief Retrieve the code-completion context for which results are
+
+ /// Retrieve the code-completion context for which results are
/// being collected.
- const CodeCompletionContext &getCompletionContext() const {
- return CompletionContext;
+ const CodeCompletionContext &getCompletionContext() const {
+ return CompletionContext;
}
-
- /// \brief Specify whether nested-name-specifiers are allowed.
+
+ /// Specify whether nested-name-specifiers are allowed.
void allowNestedNameSpecifiers(bool Allow = true) {
AllowNestedNameSpecifiers = Allow;
}
- /// \brief Return the semantic analysis object for which we are collecting
+ /// Return the semantic analysis object for which we are collecting
/// code completion results.
Sema &getSema() const { return SemaRef; }
-
- /// \brief Retrieve the allocator used to allocate code completion strings.
+
+ /// Retrieve the allocator used to allocate code completion strings.
CodeCompletionAllocator &getAllocator() const { return Allocator; }
CodeCompletionTUInfo &getCodeCompletionTUInfo() const { return CCTUInfo; }
-
- /// \brief Determine whether the given declaration is at all interesting
+
+ /// Determine whether the given declaration is at all interesting
/// as a code-completion result.
///
/// \param ND the declaration that we are inspecting.
@@ -273,8 +273,8 @@ namespace {
/// only interesting when it is a nested-name-specifier.
bool isInterestingDecl(const NamedDecl *ND,
bool &AsNestedNameSpecifier) const;
-
- /// \brief Check whether the result is hidden by the Hiding declaration.
+
+ /// Check whether the result is hidden by the Hiding declaration.
///
/// \returns true if the result is hidden and cannot be found, false if
/// the hidden result could still be found. When false, \p R may be
@@ -282,9 +282,9 @@ namespace {
/// qualification).
bool CheckHiddenResult(Result &R, DeclContext *CurContext,
const NamedDecl *Hiding);
-
- /// \brief Add a new result to this result set (if it isn't already in one
- /// of the shadow maps), or replace an existing result (for, e.g., a
+
+ /// Add a new result to this result set (if it isn't already in one
+ /// of the shadow maps), or replace an existing result (for, e.g., a
/// redeclaration).
///
/// \param R the result to add (if it is unique).
@@ -292,7 +292,7 @@ namespace {
/// \param CurContext the context in which this result will be named.
void MaybeAddResult(Result R, DeclContext *CurContext = nullptr);
- /// \brief Add a new result to this result set, where we already know
+ /// Add a new result to this result set, where we already know
/// the hiding declaration (if any).
///
/// \param R the result to add (if it is unique).
@@ -305,24 +305,29 @@ namespace {
/// class of the searched context.
void AddResult(Result R, DeclContext *CurContext, NamedDecl *Hiding,
bool InBaseClass);
-
- /// \brief Add a new non-declaration result to this result set.
+
+ /// Add a new non-declaration result to this result set.
void AddResult(Result R);
- /// \brief Enter into a new scope.
+ /// Enter into a new scope.
void EnterNewScope();
-
- /// \brief Exit from the current scope.
+
+ /// Exit from the current scope.
void ExitScope();
-
- /// \brief Ignore this declaration, if it is seen again.
+
+ /// Ignore this declaration, if it is seen again.
void Ignore(const Decl *D) { AllDeclsFound.insert(D->getCanonicalDecl()); }
+ /// Add a visited context.
+ void addVisitedContext(DeclContext *Ctx) {
+ CompletionContext.addVisitedContext(Ctx);
+ }
+
/// \name Name lookup predicates
///
/// These predicates can be passed to the name lookup functions to filter the
/// results of name lookup. All of the predicates have the same type, so that
- ///
+ ///
//@{
bool IsOrdinaryName(const NamedDecl *ND) const;
bool IsOrdinaryNonTypeName(const NamedDecl *ND) const;
@@ -341,8 +346,8 @@ namespace {
bool IsObjCMessageReceiverOrLambdaCapture(const NamedDecl *ND) const;
bool IsObjCCollection(const NamedDecl *ND) const;
bool IsImpossibleToSatisfy(const NamedDecl *ND) const;
- //@}
- };
+ //@}
+ };
}
class ResultBuilder::ShadowMapEntry::iterator {
@@ -354,7 +359,7 @@ public:
typedef value_type reference;
typedef std::ptrdiff_t difference_type;
typedef std::input_iterator_tag iterator_category;
-
+
class pointer {
DeclIndexPair Value;
@@ -415,7 +420,7 @@ public:
}
};
-ResultBuilder::ShadowMapEntry::iterator
+ResultBuilder::ShadowMapEntry::iterator
ResultBuilder::ShadowMapEntry::begin() const {
if (DeclOrVector.isNull())
return iterator();
@@ -426,7 +431,7 @@ ResultBuilder::ShadowMapEntry::begin() const {
return iterator(DeclOrVector.get<DeclIndexPairVector *>()->begin());
}
-ResultBuilder::ShadowMapEntry::iterator
+ResultBuilder::ShadowMapEntry::iterator
ResultBuilder::ShadowMapEntry::end() const {
if (DeclOrVector.is<const NamedDecl *>() || DeclOrVector.isNull())
return iterator();
@@ -434,7 +439,7 @@ ResultBuilder::ShadowMapEntry::end() const {
return iterator(DeclOrVector.get<DeclIndexPairVector *>()->end());
}
-/// \brief Compute the qualification required to get from the current context
+/// Compute the qualification required to get from the current context
/// (\p CurContext) to the target context (\p TargetContext).
///
/// \param Context the AST context in which the qualification will be used.
@@ -442,7 +447,7 @@ ResultBuilder::ShadowMapEntry::end() const {
/// \param CurContext the context where an entity is being named, which is
/// typically based on the current scope.
///
-/// \param TargetContext the context in which the named entity actually
+/// \param TargetContext the context in which the named entity actually
/// resides.
///
/// \returns a nested name specifier that refers into the target context, or
@@ -452,14 +457,14 @@ getRequiredQualification(ASTContext &Context,
const DeclContext *CurContext,
const DeclContext *TargetContext) {
SmallVector<const DeclContext *, 4> TargetParents;
-
+
for (const DeclContext *CommonAncestor = TargetContext;
CommonAncestor && !CommonAncestor->Encloses(CurContext);
CommonAncestor = CommonAncestor->getLookupParent()) {
if (CommonAncestor->isTransparentContext() ||
CommonAncestor->isFunctionOrMethod())
continue;
-
+
TargetParents.push_back(CommonAncestor);
}
@@ -477,7 +482,7 @@ getRequiredQualification(ASTContext &Context,
Result = NestedNameSpecifier::Create(Context, Result,
false,
Context.getTypeDeclType(TD).getTypePtr());
- }
+ }
return Result;
}
@@ -526,17 +531,17 @@ bool ResultBuilder::isInterestingDecl(const NamedDecl *ND,
// Skip unnamed entities.
if (!ND->getDeclName())
return false;
-
+
// Friend declarations and declarations introduced due to friends are never
// added as results.
if (ND->getFriendObjectKind() == Decl::FOK_Undeclared)
return false;
-
+
// Class template (partial) specializations are never added as results.
if (isa<ClassTemplateSpecializationDecl>(ND) ||
isa<ClassTemplatePartialSpecializationDecl>(ND))
return false;
-
+
// Using declarations themselves are never added as results.
if (isa<UsingDecl>(ND))
return false;
@@ -554,17 +559,17 @@ bool ResultBuilder::isInterestingDecl(const NamedDecl *ND,
// Filter out any unwanted results.
if (Filter && !(this->*Filter)(Named)) {
// Check whether it is interesting as a nested-name-specifier.
- if (AllowNestedNameSpecifiers && SemaRef.getLangOpts().CPlusPlus &&
+ if (AllowNestedNameSpecifiers && SemaRef.getLangOpts().CPlusPlus &&
IsNestedNameSpecifier(ND) &&
(Filter != &ResultBuilder::IsMember ||
- (isa<CXXRecordDecl>(ND) &&
+ (isa<CXXRecordDecl>(ND) &&
cast<CXXRecordDecl>(ND)->isInjectedClassName()))) {
AsNestedNameSpecifier = true;
return true;
}
return false;
- }
+ }
// ... then it must be interesting!
return true;
}
@@ -576,29 +581,29 @@ bool ResultBuilder::CheckHiddenResult(Result &R, DeclContext *CurContext,
// name if we introduce the tag type.
if (!SemaRef.getLangOpts().CPlusPlus)
return true;
-
+
const DeclContext *HiddenCtx =
R.Declaration->getDeclContext()->getRedeclContext();
-
+
// There is no way to qualify a name declared in a function or method.
if (HiddenCtx->isFunctionOrMethod())
return true;
-
+
if (HiddenCtx == Hiding->getDeclContext()->getRedeclContext())
return true;
-
+
// We can refer to the result with the appropriate qualification. Do it.
R.Hidden = true;
R.QualifierIsInformative = false;
-
+
if (!R.Qualifier)
- R.Qualifier = getRequiredQualification(SemaRef.Context,
- CurContext,
+ R.Qualifier = getRequiredQualification(SemaRef.Context,
+ CurContext,
R.Declaration->getDeclContext());
return false;
}
-/// \brief A simplified classification of types used to determine whether two
+/// A simplified classification of types used to determine whether two
/// types are "similar enough" when adjusting priorities.
SimplifiedTypeClass clang::getSimplifiedTypeClass(CanQualType T) {
switch (T->getTypeClass()) {
@@ -606,77 +611,77 @@ SimplifiedTypeClass clang::getSimplifiedTypeClass(CanQualType T) {
switch (cast<BuiltinType>(T)->getKind()) {
case BuiltinType::Void:
return STC_Void;
-
+
case BuiltinType::NullPtr:
return STC_Pointer;
-
+
case BuiltinType::Overload:
case BuiltinType::Dependent:
return STC_Other;
-
+
case BuiltinType::ObjCId:
case BuiltinType::ObjCClass:
case BuiltinType::ObjCSel:
return STC_ObjectiveC;
-
+
default:
return STC_Arithmetic;
}
case Type::Complex:
return STC_Arithmetic;
-
+
case Type::Pointer:
return STC_Pointer;
-
+
case Type::BlockPointer:
return STC_Block;
-
+
case Type::LValueReference:
case Type::RValueReference:
return getSimplifiedTypeClass(T->getAs<ReferenceType>()->getPointeeType());
-
+
case Type::ConstantArray:
case Type::IncompleteArray:
case Type::VariableArray:
case Type::DependentSizedArray:
return STC_Array;
-
+
case Type::DependentSizedExtVector:
case Type::Vector:
case Type::ExtVector:
return STC_Arithmetic;
-
+
case Type::FunctionProto:
case Type::FunctionNoProto:
return STC_Function;
-
+
case Type::Record:
return STC_Record;
-
+
case Type::Enum:
return STC_Arithmetic;
-
+
case Type::ObjCObject:
case Type::ObjCInterface:
case Type::ObjCObjectPointer:
return STC_ObjectiveC;
-
+
default:
return STC_Other;
}
}
-/// \brief Get the type that a given expression will have if this declaration
+/// Get the type that a given expression will have if this declaration
/// is used as an expression in its "typical" code-completion form.
QualType clang::getDeclUsageType(ASTContext &C, const NamedDecl *ND) {
- ND = cast<NamedDecl>(ND->getUnderlyingDecl());
-
+ ND = ND->getUnderlyingDecl();
+
if (const TypeDecl *Type = dyn_cast<TypeDecl>(ND))
return C.getTypeDeclType(Type);
if (const ObjCInterfaceDecl *Iface = dyn_cast<ObjCInterfaceDecl>(ND))
return C.getObjCInterfaceType(Iface);
-
+
QualType T;
if (const FunctionDecl *Function = ND->getAsFunction())
T = Function->getCallResultType();
@@ -721,7 +726,7 @@ QualType clang::getDeclUsageType(ASTContext &C, const NamedDecl *ND) {
break;
} while (true);
-
+
return T;
}
@@ -781,7 +786,7 @@ void ResultBuilder::AdjustResultPriorityForDecl(Result &R) {
if (const ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(R.Declaration))
if (PreferredSelector == Method->getSelector())
R.Priority += CCD_SelectorMatch;
-
+
// If we have a preferred type, adjust the priority for results with exactly-
// matching or nearly-matching types.
if (!PreferredType.isNull()) {
@@ -795,16 +800,16 @@ void ResultBuilder::AdjustResultPriorityForDecl(Result &R) {
else if ((getSimplifiedTypeClass(PreferredType)
== getSimplifiedTypeClass(TC)) &&
!(PreferredType->isEnumeralType() && TC->isEnumeralType()))
- R.Priority /= CCF_SimilarTypeMatch;
+ R.Priority /= CCF_SimilarTypeMatch;
}
- }
+ }
}
void ResultBuilder::MaybeAddConstructorResults(Result R) {
if (!SemaRef.getLangOpts().CPlusPlus || !R.Declaration ||
!CompletionContext.wantConstructorResults())
return;
-
+
ASTContext &Context = SemaRef.Context;
const NamedDecl *D = R.Declaration;
const CXXRecordDecl *Record = nullptr;
@@ -818,12 +823,12 @@ void ResultBuilder::MaybeAddConstructorResults(Result R) {
// There are no constructors here.
return;
}
-
+
Record = Record->getDefinition();
if (!Record)
return;
-
+
QualType RecordTy = Context.getTypeDeclType(Record);
DeclarationName ConstructorName
= Context.DeclarationNames.getCXXConstructorName(
@@ -838,9 +843,15 @@ void ResultBuilder::MaybeAddConstructorResults(Result R) {
}
}
+static bool isConstructor(const Decl *ND) {
+ if (const auto *Tmpl = dyn_cast<FunctionTemplateDecl>(ND))
+ ND = Tmpl->getTemplatedDecl();
+ return isa<CXXConstructorDecl>(ND);
+}
+
void ResultBuilder::MaybeAddResult(Result R, DeclContext *CurContext) {
assert(!ShadowMaps.empty() && "Must enter into a results scope");
-
+
if (R.Kind != Result::RK_Declaration) {
// For non-declaration results, just add the result.
Results.push_back(R);
@@ -848,24 +859,24 @@ void ResultBuilder::MaybeAddResult(Result R, DeclContext *CurContext) {
}
// Look through using declarations.
- if (const UsingShadowDecl *Using =
- dyn_cast<UsingShadowDecl>(R.Declaration)) {
- MaybeAddResult(Result(Using->getTargetDecl(),
- getBasePriority(Using->getTargetDecl()),
- R.Qualifier),
- CurContext);
+ if (const UsingShadowDecl *Using = dyn_cast<UsingShadowDecl>(R.Declaration)) {
+ CodeCompletionResult Result(Using->getTargetDecl(),
+ getBasePriority(Using->getTargetDecl()),
+ R.Qualifier);
+ Result.ShadowDecl = Using;
+ MaybeAddResult(Result, CurContext);
return;
}
-
+
const Decl *CanonDecl = R.Declaration->getCanonicalDecl();
unsigned IDNS = CanonDecl->getIdentifierNamespace();
bool AsNestedNameSpecifier = false;
if (!isInterestingDecl(R.Declaration, AsNestedNameSpecifier))
return;
-
+
// C++ constructors are never found by name lookup.
- if (isa<CXXConstructorDecl>(R.Declaration))
+ if (isConstructor(R.Declaration))
return;
ShadowMap &SMap = ShadowMaps.back();
@@ -882,12 +893,12 @@ void ResultBuilder::MaybeAddResult(Result R, DeclContext *CurContext) {
if (ND->getCanonicalDecl() == CanonDecl) {
// This is a redeclaration. Always pick the newer declaration.
Results[Index].Declaration = R.Declaration;
-
+
// We're done.
return;
}
}
-
+
// This is a new declaration in this scope. However, check whether this
// declaration name is hidden by a similarly-named declaration in an outer
// scope.
@@ -906,21 +917,21 @@ void ResultBuilder::MaybeAddResult(Result R, DeclContext *CurContext) {
(IDNS & (Decl::IDNS_Member | Decl::IDNS_Ordinary |
Decl::IDNS_LocalExtern | Decl::IDNS_ObjCProtocol)))
continue;
-
+
// Protocols are in distinct namespaces from everything else.
if (((I->first->getIdentifierNamespace() & Decl::IDNS_ObjCProtocol)
|| (IDNS & Decl::IDNS_ObjCProtocol)) &&
I->first->getIdentifierNamespace() != IDNS)
continue;
-
+
// The newly-added result is hidden by an entry in the shadow map.
if (CheckHiddenResult(R, CurContext, I->first))
return;
-
+
break;
}
}
-
+
// Make sure that any given declaration only shows up in the result set once.
if (!AllDeclsFound.insert(CanonDecl).second)
return;
@@ -930,9 +941,9 @@ void ResultBuilder::MaybeAddResult(Result R, DeclContext *CurContext) {
if (AsNestedNameSpecifier) {
R.StartsNestedNameSpecifier = true;
R.Priority = CCP_NestedNameSpecifier;
- } else
+ } else
AdjustResultPriorityForDecl(R);
-
+
// If this result is supposed to have an informative qualifier, add one.
if (R.QualifierIsInformative && !R.Qualifier &&
!R.StartsNestedNameSpecifier) {
@@ -946,17 +957,17 @@ void ResultBuilder::MaybeAddResult(Result R, DeclContext *CurContext) {
else
R.QualifierIsInformative = false;
}
-
+
// Insert this result into the set of results and into the current shadow
// map.
SMap[R.Declaration->getDeclName()].Add(R.Declaration, Results.size());
Results.push_back(R);
-
+
if (!AsNestedNameSpecifier)
MaybeAddConstructorResults(R);
}
-void ResultBuilder::AddResult(Result R, DeclContext *CurContext,
+void ResultBuilder::AddResult(Result R, DeclContext *CurContext,
NamedDecl *Hiding, bool InBaseClass = false) {
if (R.Kind != Result::RK_Declaration) {
// For non-declaration results, just add the result.
@@ -966,19 +977,20 @@ void ResultBuilder::AddResult(Result R, DeclContext *CurContext,
// Look through using declarations.
if (const UsingShadowDecl *Using = dyn_cast<UsingShadowDecl>(R.Declaration)) {
- AddResult(Result(Using->getTargetDecl(),
- getBasePriority(Using->getTargetDecl()),
- R.Qualifier),
- CurContext, Hiding);
+ CodeCompletionResult Result(Using->getTargetDecl(),
+ getBasePriority(Using->getTargetDecl()),
+ R.Qualifier);
+ Result.ShadowDecl = Using;
+ AddResult(Result, CurContext, Hiding);
return;
}
-
+
bool AsNestedNameSpecifier = false;
if (!isInterestingDecl(R.Declaration, AsNestedNameSpecifier))
return;
-
+
// C++ constructors are never found by name lookup.
- if (isa<CXXConstructorDecl>(R.Declaration))
+ if (isConstructor(R.Declaration))
return;
if (Hiding && CheckHiddenResult(R, CurContext, Hiding))
@@ -987,16 +999,16 @@ void ResultBuilder::AddResult(Result R, DeclContext *CurContext,
// Make sure that any given declaration only shows up in the result set once.
if (!AllDeclsFound.insert(R.Declaration->getCanonicalDecl()).second)
return;
-
+
// If the filter is for nested-name-specifiers, then this result starts a
// nested-name-specifier.
if (AsNestedNameSpecifier) {
R.StartsNestedNameSpecifier = true;
R.Priority = CCP_NestedNameSpecifier;
- }
- else if (Filter == &ResultBuilder::IsMember && !R.Qualifier && InBaseClass &&
- isa<CXXRecordDecl>(R.Declaration->getDeclContext()
- ->getRedeclContext()))
+ } else if (Filter == &ResultBuilder::IsMember && !R.Qualifier &&
+ InBaseClass &&
+ isa<CXXRecordDecl>(
+ R.Declaration->getDeclContext()->getRedeclContext()))
R.QualifierIsInformative = true;
// If this result is supposed to have an informative qualifier, add one.
@@ -1012,13 +1024,13 @@ void ResultBuilder::AddResult(Result R, DeclContext *CurContext,
else
R.QualifierIsInformative = false;
}
-
+
// Adjust the priority if this result comes from a base class.
if (InBaseClass)
R.Priority += CCD_InBaseClass;
-
+
AdjustResultPriorityForDecl(R);
-
+
if (HasObjectTypeQualifiers)
if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(R.Declaration))
if (Method->isInstance()) {
@@ -1027,43 +1039,43 @@ void ResultBuilder::AddResult(Result R, DeclContext *CurContext,
if (ObjectTypeQualifiers == MethodQuals)
R.Priority += CCD_ObjectQualifierMatch;
else if (ObjectTypeQualifiers - MethodQuals) {
- // The method cannot be invoked, because doing so would drop
+ // The method cannot be invoked, because doing so would drop
// qualifiers.
return;
}
}
-
+
// Insert this result into the set of results.
Results.push_back(R);
-
+
if (!AsNestedNameSpecifier)
MaybeAddConstructorResults(R);
}
void ResultBuilder::AddResult(Result R) {
- assert(R.Kind != Result::RK_Declaration &&
+ assert(R.Kind != Result::RK_Declaration &&
"Declaration results need more context");
Results.push_back(R);
}
-/// \brief Enter into a new scope.
+/// Enter into a new scope.
void ResultBuilder::EnterNewScope() { ShadowMaps.emplace_back(); }
-/// \brief Exit from the current scope.
+/// Exit from the current scope.
void ResultBuilder::ExitScope() {
for (ShadowMap::iterator E = ShadowMaps.back().begin(),
EEnd = ShadowMaps.back().end();
E != EEnd;
++E)
E->second.Destroy();
-
+
ShadowMaps.pop_back();
}
-/// \brief Determines whether this given declaration will be found by
+/// Determines whether this given declaration will be found by
/// ordinary name lookup.
bool ResultBuilder::IsOrdinaryName(const NamedDecl *ND) const {
- ND = cast<NamedDecl>(ND->getUnderlyingDecl());
+ ND = ND->getUnderlyingDecl();
// If name lookup finds a local extern declaration, then we are in a
// context where it behaves like an ordinary name.
@@ -1074,14 +1086,14 @@ bool ResultBuilder::IsOrdinaryName(const NamedDecl *ND) const {
if (isa<ObjCIvarDecl>(ND))
return true;
}
-
+
return ND->getIdentifierNamespace() & IDNS;
}
-/// \brief Determines whether this given declaration will be found by
+/// Determines whether this given declaration will be found by
/// ordinary name lookup but is not a type name.
bool ResultBuilder::IsOrdinaryNonTypeName(const NamedDecl *ND) const {
- ND = cast<NamedDecl>(ND->getUnderlyingDecl());
+ ND = ND->getUnderlyingDecl();
if (isa<TypeDecl>(ND))
return false;
// Objective-C interfaces names are not filtered by this method because they
@@ -1099,51 +1111,51 @@ bool ResultBuilder::IsOrdinaryNonTypeName(const NamedDecl *ND) const {
if (isa<ObjCIvarDecl>(ND))
return true;
}
-
+
return ND->getIdentifierNamespace() & IDNS;
}
bool ResultBuilder::IsIntegralConstantValue(const NamedDecl *ND) const {
if (!IsOrdinaryNonTypeName(ND))
return 0;
-
+
if (const ValueDecl *VD = dyn_cast<ValueDecl>(ND->getUnderlyingDecl()))
if (VD->getType()->isIntegralOrEnumerationType())
return true;
-
+
return false;
}
-/// \brief Determines whether this given declaration will be found by
+/// Determines whether this given declaration will be found by
/// ordinary name lookup.
bool ResultBuilder::IsOrdinaryNonValueName(const NamedDecl *ND) const {
- ND = cast<NamedDecl>(ND->getUnderlyingDecl());
+ ND = ND->getUnderlyingDecl();
unsigned IDNS = Decl::IDNS_Ordinary | Decl::IDNS_LocalExtern;
if (SemaRef.getLangOpts().CPlusPlus)
IDNS |= Decl::IDNS_Tag | Decl::IDNS_Namespace;
-
- return (ND->getIdentifierNamespace() & IDNS) &&
- !isa<ValueDecl>(ND) && !isa<FunctionTemplateDecl>(ND) &&
+
+ return (ND->getIdentifierNamespace() & IDNS) &&
+ !isa<ValueDecl>(ND) && !isa<FunctionTemplateDecl>(ND) &&
!isa<ObjCPropertyDecl>(ND);
}
-/// \brief Determines whether the given declaration is suitable as the
+/// Determines whether the given declaration is suitable as the
/// start of a C++ nested-name-specifier, e.g., a class or namespace.
bool ResultBuilder::IsNestedNameSpecifier(const NamedDecl *ND) const {
// Allow us to find class templates, too.
if (const ClassTemplateDecl *ClassTemplate = dyn_cast<ClassTemplateDecl>(ND))
ND = ClassTemplate->getTemplatedDecl();
-
+
return SemaRef.isAcceptableNestedNameSpecifier(ND);
}
-/// \brief Determines whether the given declaration is an enumeration.
+/// Determines whether the given declaration is an enumeration.
bool ResultBuilder::IsEnum(const NamedDecl *ND) const {
return isa<EnumDecl>(ND);
}
-/// \brief Determines whether the given declaration is a class or struct.
+/// Determines whether the given declaration is a class or struct.
bool ResultBuilder::IsClassOrStruct(const NamedDecl *ND) const {
// Allow us to find class templates, too.
if (const ClassTemplateDecl *ClassTemplate = dyn_cast<ClassTemplateDecl>(ND))
@@ -1154,40 +1166,40 @@ bool ResultBuilder::IsClassOrStruct(const NamedDecl *ND) const {
return RD->getTagKind() == TTK_Class ||
RD->getTagKind() == TTK_Struct ||
RD->getTagKind() == TTK_Interface;
-
+
return false;
}
-/// \brief Determines whether the given declaration is a union.
+/// Determines whether the given declaration is a union.
bool ResultBuilder::IsUnion(const NamedDecl *ND) const {
// Allow us to find class templates, too.
if (const ClassTemplateDecl *ClassTemplate = dyn_cast<ClassTemplateDecl>(ND))
ND = ClassTemplate->getTemplatedDecl();
-
+
if (const RecordDecl *RD = dyn_cast<RecordDecl>(ND))
return RD->getTagKind() == TTK_Union;
-
+
return false;
}
-/// \brief Determines whether the given declaration is a namespace.
+/// Determines whether the given declaration is a namespace.
bool ResultBuilder::IsNamespace(const NamedDecl *ND) const {
return isa<NamespaceDecl>(ND);
}
-/// \brief Determines whether the given declaration is a namespace or
+/// Determines whether the given declaration is a namespace or
/// namespace alias.
bool ResultBuilder::IsNamespaceOrAlias(const NamedDecl *ND) const {
return isa<NamespaceDecl>(ND->getUnderlyingDecl());
}
-/// \brief Determines whether the given declaration is a type.
+/// Determines whether the given declaration is a type.
bool ResultBuilder::IsType(const NamedDecl *ND) const {
ND = ND->getUnderlyingDecl();
return isa<TypeDecl>(ND) || isa<ObjCInterfaceDecl>(ND);
}
-/// \brief Determines which members of a class should be visible via
+/// Determines which members of a class should be visible via
/// "." or "->". Only value declarations, nested name specifiers, and
/// using declarations thereof should show up.
bool ResultBuilder::IsMember(const NamedDecl *ND) const {
@@ -1199,31 +1211,31 @@ bool ResultBuilder::IsMember(const NamedDecl *ND) const {
static bool isObjCReceiverType(ASTContext &C, QualType T) {
T = C.getCanonicalType(T);
switch (T->getTypeClass()) {
- case Type::ObjCObject:
+ case Type::ObjCObject:
case Type::ObjCInterface:
case Type::ObjCObjectPointer:
return true;
-
+
case Type::Builtin:
switch (cast<BuiltinType>(T)->getKind()) {
case BuiltinType::ObjCId:
case BuiltinType::ObjCClass:
case BuiltinType::ObjCSel:
return true;
-
+
default:
break;
}
return false;
-
+
default:
break;
}
-
+
if (!C.getLangOpts().CPlusPlus)
return false;
- // FIXME: We could perform more analysis here to determine whether a
+ // FIXME: We could perform more analysis here to determine whether a
// particular class type has any conversions to Objective-C types. For now,
// just accept all class types.
return T->isDependentType() || T->isRecordType();
@@ -1233,7 +1245,7 @@ bool ResultBuilder::IsObjCMessageReceiver(const NamedDecl *ND) const {
QualType T = getDeclUsageType(SemaRef.Context, ND);
if (T.isNull())
return false;
-
+
T = SemaRef.Context.getBaseElementType(T);
return isObjCReceiverType(SemaRef.Context, T);
}
@@ -1241,11 +1253,11 @@ bool ResultBuilder::IsObjCMessageReceiver(const NamedDecl *ND) const {
bool ResultBuilder::IsObjCMessageReceiverOrLambdaCapture(const NamedDecl *ND) const {
if (IsObjCMessageReceiver(ND))
return true;
-
+
const VarDecl *Var = dyn_cast<VarDecl>(ND);
if (!Var)
return false;
-
+
return Var->hasLocalStorage() && !Var->hasAttr<BlocksAttr>();
}
@@ -1253,14 +1265,14 @@ bool ResultBuilder::IsObjCCollection(const NamedDecl *ND) const {
if ((SemaRef.getLangOpts().CPlusPlus && !IsOrdinaryName(ND)) ||
(!SemaRef.getLangOpts().CPlusPlus && !IsOrdinaryNonTypeName(ND)))
return false;
-
+
QualType T = getDeclUsageType(SemaRef.Context, ND);
if (T.isNull())
return false;
-
+
T = SemaRef.Context.getBaseElementType(T);
return T->isObjCObjectType() || T->isObjCObjectPointerType() ||
- T->isObjCIdType() ||
+ T->isObjCIdType() ||
(SemaRef.getLangOpts().CPlusPlus && T->isRecordType());
}
@@ -1268,37 +1280,69 @@ bool ResultBuilder::IsImpossibleToSatisfy(const NamedDecl *ND) const {
return false;
}
-/// \brief Determines whether the given declaration is an Objective-C
+/// Determines whether the given declaration is an Objective-C
/// instance variable.
bool ResultBuilder::IsObjCIvar(const NamedDecl *ND) const {
return isa<ObjCIvarDecl>(ND);
}
namespace {
- /// \brief Visible declaration consumer that adds a code-completion result
+ /// Visible declaration consumer that adds a code-completion result
/// for each visible declaration.
class CodeCompletionDeclConsumer : public VisibleDeclConsumer {
ResultBuilder &Results;
DeclContext *CurContext;
-
+ std::vector<FixItHint> FixIts;
+
public:
- CodeCompletionDeclConsumer(ResultBuilder &Results, DeclContext *CurContext)
- : Results(Results), CurContext(CurContext) { }
+ CodeCompletionDeclConsumer(
+ ResultBuilder &Results, DeclContext *CurContext,
+ std::vector<FixItHint> FixIts = std::vector<FixItHint>())
+ : Results(Results), CurContext(CurContext), FixIts(std::move(FixIts)) {}
void FoundDecl(NamedDecl *ND, NamedDecl *Hiding, DeclContext *Ctx,
bool InBaseClass) override {
bool Accessible = true;
- if (Ctx)
- Accessible = Results.getSema().IsSimplyAccessible(ND, Ctx);
+ if (Ctx) {
+ DeclContext *AccessingCtx = Ctx;
+ // If ND comes from a base class, set the naming class back to the
+ // derived class if the search starts from the derived class (i.e.
+ // InBaseClass is true).
+ //
+ // Example:
+ // class B { protected: int X; }
+ // class D : public B { void f(); }
+ // void D::f() { this->^; }
+ // The completion after "this->" will have `InBaseClass` set to true and
+ // `Ctx` set to "B", when looking up in `B`. We need to set the actual
+ // accessing context (i.e. naming class) to "D" so that access can be
+ // calculated correctly.
+ if (InBaseClass && isa<CXXRecordDecl>(Ctx)) {
+ CXXRecordDecl *RC = nullptr;
+ // Get the enclosing record.
+ for (DeclContext *DC = CurContext; !DC->isFileContext();
+ DC = DC->getParent()) {
+ if ((RC = dyn_cast<CXXRecordDecl>(DC)))
+ break;
+ }
+ if (RC)
+ AccessingCtx = RC;
+ }
+ Accessible = Results.getSema().IsSimplyAccessible(ND, AccessingCtx);
+ }
ResultBuilder::Result Result(ND, Results.getBasePriority(ND), nullptr,
- false, Accessible);
+ false, Accessible, FixIts);
Results.AddResult(Result, CurContext, Hiding, InBaseClass);
}
+
+ void EnteredContext(DeclContext* Ctx) override {
+ Results.addVisitedContext(Ctx);
+ }
};
}
-/// \brief Add type specifiers for the current language as keyword results.
+/// Add type specifiers for the current language as keyword results.
static void AddTypeSpecifierResults(const LangOptions &LangOpts,
ResultBuilder &Results) {
typedef CodeCompletionResult Result;
@@ -1324,16 +1368,16 @@ static void AddTypeSpecifierResults(const LangOptions &LangOpts,
Results.AddResult(Result("_Bool", CCP_Type));
Results.AddResult(Result("restrict", CCP_Type));
}
-
+
CodeCompletionBuilder Builder(Results.getAllocator(),
Results.getCodeCompletionTUInfo());
if (LangOpts.CPlusPlus) {
// C++-specific
- Results.AddResult(Result("bool", CCP_Type +
+ Results.AddResult(Result("bool", CCP_Type +
(LangOpts.ObjC1? CCD_bool_in_ObjC : 0)));
Results.AddResult(Result("class", CCP_Type));
Results.AddResult(Result("wchar_t", CCP_Type));
-
+
// typename qualified-id
Builder.AddTypedTextChunk("typename");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
@@ -1341,12 +1385,12 @@ static void AddTypeSpecifierResults(const LangOptions &LangOpts,
Builder.AddTextChunk("::");
Builder.AddPlaceholderChunk("name");
Results.AddResult(Result(Builder.TakeString()));
-
+
if (LangOpts.CPlusPlus11) {
Results.AddResult(Result("auto", CCP_Type));
Results.AddResult(Result("char16_t", CCP_Type));
Results.AddResult(Result("char32_t", CCP_Type));
-
+
Builder.AddTypedTextChunk("decltype");
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddPlaceholderChunk("expression");
@@ -1356,13 +1400,13 @@ static void AddTypeSpecifierResults(const LangOptions &LangOpts,
} else
Results.AddResult(Result("__auto_type", CCP_Type));
- // GNU extensions
- if (LangOpts.GNUMode) {
+ // GNU keywords
+ if (LangOpts.GNUKeywords) {
// FIXME: Enable when we actually support decimal floating point.
// Results.AddResult(Result("_Decimal32"));
// Results.AddResult(Result("_Decimal64"));
// Results.AddResult(Result("_Decimal128"));
-
+
Builder.AddTypedTextChunk("typeof");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("expression");
@@ -1382,7 +1426,7 @@ static void AddTypeSpecifierResults(const LangOptions &LangOpts,
}
static void AddStorageSpecifiers(Sema::ParserCompletionContext CCC,
- const LangOptions &LangOpts,
+ const LangOptions &LangOpts,
ResultBuilder &Results) {
typedef CodeCompletionResult Result;
// Note: we don't suggest either "auto" or "register", because both
@@ -1408,7 +1452,7 @@ static void AddStorageSpecifiers(Sema::ParserCompletionContext CCC,
}
static void AddFunctionSpecifiers(Sema::ParserCompletionContext CCC,
- const LangOptions &LangOpts,
+ const LangOptions &LangOpts,
ResultBuilder &Results) {
typedef CodeCompletionResult Result;
switch (CCC) {
@@ -1419,7 +1463,7 @@ static void AddFunctionSpecifiers(Sema::ParserCompletionContext CCC,
Results.AddResult(Result("friend"));
Results.AddResult(Result("mutable"));
Results.AddResult(Result("virtual"));
- }
+ }
LLVM_FALLTHROUGH;
case Sema::PCC_ObjCInterface:
@@ -1447,7 +1491,7 @@ static void AddObjCExpressionResults(ResultBuilder &Results, bool NeedAt);
static void AddObjCStatementResults(ResultBuilder &Results, bool NeedAt);
static void AddObjCVisibilityResults(const LangOptions &LangOpts,
ResultBuilder &Results,
- bool NeedAt);
+ bool NeedAt);
static void AddObjCImplementationResults(const LangOptions &LangOpts,
ResultBuilder &Results,
bool NeedAt);
@@ -1464,7 +1508,7 @@ static void AddTypedefResult(ResultBuilder &Results) {
Builder.AddPlaceholderChunk("type");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("name");
- Results.AddResult(CodeCompletionResult(Builder.TakeString()));
+ Results.AddResult(CodeCompletionResult(Builder.TakeString()));
}
static bool WantTypesInContext(Sema::ParserCompletionContext CCC,
@@ -1481,15 +1525,15 @@ static bool WantTypesInContext(Sema::ParserCompletionContext CCC,
case Sema::PCC_ParenthesizedExpression:
case Sema::PCC_LocalDeclarationSpecifiers:
return true;
-
+
case Sema::PCC_Expression:
case Sema::PCC_Condition:
return LangOpts.CPlusPlus;
-
+
case Sema::PCC_ObjCInterface:
case Sema::PCC_ObjCImplementation:
return false;
-
+
case Sema::PCC_ForInit:
return LangOpts.CPlusPlus || LangOpts.ObjC1 || LangOpts.C99;
}
@@ -1507,12 +1551,12 @@ static PrintingPolicy getCompletionPrintingPolicy(const ASTContext &Context,
return Policy;
}
-/// \brief Retrieve a printing policy suitable for code completion.
+/// Retrieve a printing policy suitable for code completion.
static PrintingPolicy getCompletionPrintingPolicy(Sema &S) {
return getCompletionPrintingPolicy(S.Context, S.PP);
}
-/// \brief Retrieve the string representation of the given type as a string
+/// Retrieve the string representation of the given type as a string
/// that has the appropriate lifetime for code completion.
///
/// This routine provides a fast path where we provide constant strings for
@@ -1525,7 +1569,7 @@ static const char *GetCompletionTypeString(QualType T,
// Built-in type names are constant strings.
if (const BuiltinType *BT = dyn_cast<BuiltinType>(T))
return BT->getNameAsCString(Policy);
-
+
// Anonymous tag types are constant strings.
if (const TagType *TagT = dyn_cast<TagType>(T))
if (TagDecl *Tag = TagT->getDecl())
@@ -1539,24 +1583,24 @@ static const char *GetCompletionTypeString(QualType T,
}
}
}
-
+
// Slow path: format the type as a string.
std::string Result;
T.getAsStringInternal(Result, Policy);
return Allocator.CopyString(Result);
}
-/// \brief Add a completion for "this", if we're in a member function.
+/// Add a completion for "this", if we're in a member function.
static void addThisCompletion(Sema &S, ResultBuilder &Results) {
QualType ThisTy = S.getCurrentThisType();
if (ThisTy.isNull())
return;
-
+
CodeCompletionAllocator &Allocator = Results.getAllocator();
CodeCompletionBuilder Builder(Allocator, Results.getCodeCompletionTUInfo());
PrintingPolicy Policy = getCompletionPrintingPolicy(S);
- Builder.AddResultTypeChunk(GetCompletionTypeString(ThisTy,
- S.Context,
+ Builder.AddResultTypeChunk(GetCompletionTypeString(ThisTy,
+ S.Context,
Policy,
Allocator));
Builder.AddTypedTextChunk("this");
@@ -1578,14 +1622,14 @@ static void AddStaticAssertResult(CodeCompletionBuilder &Builder,
Results.AddResult(CodeCompletionResult(Builder.TakeString()));
}
-/// \brief Add language constructs that show up for "ordinary" names.
+/// Add language constructs that show up for "ordinary" names.
static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC,
Scope *S,
Sema &SemaRef,
ResultBuilder &Results) {
CodeCompletionAllocator &Allocator = Results.getAllocator();
CodeCompletionBuilder Builder(Allocator, Results.getCodeCompletionTUInfo());
-
+
typedef CodeCompletionResult Result;
switch (CCC) {
case Sema::PCC_Namespace:
@@ -1601,7 +1645,7 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC,
Builder.AddChunk(CodeCompletionString::CK_RightBrace);
Results.AddResult(Result(Builder.TakeString()));
}
-
+
// namespace identifier = identifier ;
Builder.AddTypedTextChunk("namespace");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
@@ -1618,7 +1662,7 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC,
Builder.AddPlaceholderChunk("identifier");
Results.AddResult(Result(Builder.TakeString()));
- // asm(string-literal)
+ // asm(string-literal)
Builder.AddTypedTextChunk("asm");
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddPlaceholderChunk("string-literal");
@@ -1633,10 +1677,10 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC,
Results.AddResult(Result(Builder.TakeString()));
}
}
-
+
if (SemaRef.getLangOpts().ObjC1)
AddObjCTopLevelResults(Results, true);
-
+
AddTypedefResult(Results);
LLVM_FALLTHROUGH;
@@ -1649,7 +1693,7 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC,
Builder.AddTextChunk("::");
Builder.AddPlaceholderChunk("name");
Results.AddResult(Result(Builder.TakeString()));
-
+
// using typename qualifier::name (only in a dependent context)
if (SemaRef.CurContext->isDependentContext()) {
Builder.AddTypedTextChunk("using");
@@ -1710,17 +1754,17 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC,
AddStorageSpecifiers(CCC, SemaRef.getLangOpts(), Results);
AddFunctionSpecifiers(CCC, SemaRef.getLangOpts(), Results);
break;
-
+
case Sema::PCC_ObjCImplementation:
AddObjCImplementationResults(SemaRef.getLangOpts(), Results, true);
AddStorageSpecifiers(CCC, SemaRef.getLangOpts(), Results);
AddFunctionSpecifiers(CCC, SemaRef.getLangOpts(), Results);
break;
-
+
case Sema::PCC_ObjCInstanceVariableList:
AddObjCVisibilityResults(SemaRef.getLangOpts(), Results, true);
break;
-
+
case Sema::PCC_RecoveryInFunction:
case Sema::PCC_Statement: {
AddTypedefResult(Results);
@@ -1744,7 +1788,7 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC,
}
if (SemaRef.getLangOpts().ObjC1)
AddObjCStatementResults(Results, true);
-
+
if (Results.includeCodePatterns()) {
// if (condition) { statements }
Builder.AddTypedTextChunk("if");
@@ -1773,7 +1817,7 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC,
Builder.AddChunk(CodeCompletionString::CK_RightBrace);
Results.AddResult(Result(Builder.TakeString()));
}
-
+
// Switch-specific statements.
if (!SemaRef.getCurFunction()->SwitchStack.empty()) {
// case expression:
@@ -1835,7 +1879,7 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC,
Builder.AddChunk(CodeCompletionString::CK_RightBrace);
Results.AddResult(Result(Builder.TakeString()));
}
-
+
if (S->getContinueParent()) {
// continue ;
Builder.AddTypedTextChunk("continue");
@@ -1856,7 +1900,7 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC,
else if (ObjCMethodDecl *Method
= dyn_cast<ObjCMethodDecl>(SemaRef.CurContext))
isVoid = Method->getReturnType()->isVoidType();
- else if (SemaRef.getCurBlock() &&
+ else if (SemaRef.getCurBlock() &&
!SemaRef.getCurBlock()->ReturnType.isNull())
isVoid = SemaRef.getCurBlock()->ReturnType->isVoidType();
Builder.AddTypedTextChunk("return");
@@ -1870,7 +1914,7 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC,
Builder.AddTypedTextChunk("goto");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("label");
- Results.AddResult(Result(Builder.TakeString()));
+ Results.AddResult(Result(Builder.TakeString()));
// Using directives
Builder.AddTypedTextChunk("using");
@@ -1900,7 +1944,7 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC,
Builder.AddPlaceholderChunk("type");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Builder.AddPlaceholderChunk("expression");
- Results.AddResult(Result(Builder.TakeString()));
+ Results.AddResult(Result(Builder.TakeString()));
// (__bridge_transfer <Objective-C type>)<expression>
Builder.AddTypedTextChunk("__bridge_transfer");
@@ -1908,7 +1952,7 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC,
Builder.AddPlaceholderChunk("Objective-C type");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Builder.AddPlaceholderChunk("expression");
- Results.AddResult(Result(Builder.TakeString()));
+ Results.AddResult(Result(Builder.TakeString()));
// (__bridge_retained <CF type>)<expression>
Builder.AddTypedTextChunk("__bridge_retained");
@@ -1916,7 +1960,7 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC,
Builder.AddPlaceholderChunk("CF type");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Builder.AddPlaceholderChunk("expression");
- Results.AddResult(Result(Builder.TakeString()));
+ Results.AddResult(Result(Builder.TakeString()));
}
// Fall through
LLVM_FALLTHROUGH;
@@ -1925,12 +1969,12 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC,
if (SemaRef.getLangOpts().CPlusPlus) {
// 'this', if we're in a non-static member function.
addThisCompletion(SemaRef, Results);
-
+
// true
Builder.AddResultTypeChunk("bool");
Builder.AddTypedTextChunk("true");
Results.AddResult(Result(Builder.TakeString()));
-
+
// false
Builder.AddResultTypeChunk("bool");
Builder.AddTypedTextChunk("false");
@@ -1945,9 +1989,9 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC,
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddPlaceholderChunk("expression");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
- Results.AddResult(Result(Builder.TakeString()));
+ Results.AddResult(Result(Builder.TakeString()));
}
-
+
// static_cast < type-id > ( expression )
Builder.AddTypedTextChunk("static_cast");
Builder.AddChunk(CodeCompletionString::CK_LeftAngle);
@@ -1956,7 +2000,7 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC,
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddPlaceholderChunk("expression");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
- Results.AddResult(Result(Builder.TakeString()));
+ Results.AddResult(Result(Builder.TakeString()));
// reinterpret_cast < type-id > ( expression )
Builder.AddTypedTextChunk("reinterpret_cast");
@@ -1966,7 +2010,7 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC,
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddPlaceholderChunk("expression");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
- Results.AddResult(Result(Builder.TakeString()));
+ Results.AddResult(Result(Builder.TakeString()));
// const_cast < type-id > ( expression )
Builder.AddTypedTextChunk("const_cast");
@@ -1976,7 +2020,7 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC,
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddPlaceholderChunk("expression");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
- Results.AddResult(Result(Builder.TakeString()));
+ Results.AddResult(Result(Builder.TakeString()));
if (SemaRef.getLangOpts().RTTI) {
// typeid ( expression-or-type )
@@ -1985,9 +2029,9 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC,
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddPlaceholderChunk("expression-or-type");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
- Results.AddResult(Result(Builder.TakeString()));
+ Results.AddResult(Result(Builder.TakeString()));
}
-
+
// new T ( ... )
Builder.AddTypedTextChunk("new");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
@@ -1995,7 +2039,7 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC,
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddPlaceholderChunk("expressions");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
- Results.AddResult(Result(Builder.TakeString()));
+ Results.AddResult(Result(Builder.TakeString()));
// new T [ ] ( ... )
Builder.AddTypedTextChunk("new");
@@ -2007,14 +2051,14 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC,
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddPlaceholderChunk("expressions");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
- Results.AddResult(Result(Builder.TakeString()));
+ Results.AddResult(Result(Builder.TakeString()));
// delete expression
Builder.AddResultTypeChunk("void");
Builder.AddTypedTextChunk("delete");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("expression");
- Results.AddResult(Result(Builder.TakeString()));
+ Results.AddResult(Result(Builder.TakeString()));
// delete [] expression
Builder.AddResultTypeChunk("void");
@@ -2034,7 +2078,7 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC,
Builder.AddPlaceholderChunk("expression");
Results.AddResult(Result(Builder.TakeString()));
}
-
+
// FIXME: Rethrow?
if (SemaRef.getLangOpts().CPlusPlus11) {
@@ -2079,7 +2123,7 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC,
SuperType = ID->getSuperClass()->getNameAsString();
if (Method->isInstanceMethod())
SuperType += " *";
-
+
Builder.AddResultTypeChunk(Allocator.CopyString(SuperType));
Builder.AddTypedTextChunk("super");
Results.AddResult(Result(Builder.TakeString()));
@@ -2111,7 +2155,7 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC,
Results.AddResult(Result(Builder.TakeString()));
break;
}
-
+
case Sema::PCC_Type:
case Sema::PCC_LocalDeclarationSpecifiers:
break;
@@ -2124,7 +2168,7 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC,
Results.AddResult(Result("operator"));
}
-/// \brief If the given declaration has an associated type, add it as a result
+/// If the given declaration has an associated type, add it as a result
/// type chunk.
static void AddResultTypeChunk(ASTContext &Context,
const PrintingPolicy &Policy,
@@ -2136,7 +2180,7 @@ static void AddResultTypeChunk(ASTContext &Context,
// Skip constructors and conversion functions, which have their return types
// built into their names.
- if (isa<CXXConstructorDecl>(ND) || isa<CXXConversionDecl>(ND))
+ if (isConstructor(ND) || isa<CXXConversionDecl>(ND))
return;
// Determine the type of the declaration (if it has a type).
@@ -2166,10 +2210,10 @@ static void AddResultTypeChunk(ASTContext &Context,
else
T = Property->getType();
}
-
+
if (T.isNull() || Context.hasSameType(T, Context.DependentTy))
return;
-
+
Result.AddResultTypeChunk(GetCompletionTypeString(T, Context, Policy,
Result.getAllocator()));
}
@@ -2188,7 +2232,7 @@ static void MaybeAddSentinel(Preprocessor &PP,
}
}
-static std::string formatObjCParamQualifiers(unsigned ObjCQuals,
+static std::string formatObjCParamQualifiers(unsigned ObjCQuals,
QualType &Type) {
std::string Result;
if (ObjCQuals & Decl::OBJC_TQ_In)
@@ -2223,7 +2267,7 @@ static std::string formatObjCParamQualifiers(unsigned ObjCQuals,
return Result;
}
-/// \brief Tries to find the most appropriate type location for an Objective-C
+/// Tries to find the most appropriate type location for an Objective-C
/// block placeholder.
///
/// This function ignores things like typedefs and qualifiers in order to
@@ -2285,13 +2329,13 @@ static std::string FormatFunctionParameter(const PrintingPolicy &Policy,
bool ObjCMethodParam = isa<ObjCMethodDecl>(Param->getDeclContext());
if (Param->getType()->isDependentType() ||
!Param->getType()->isBlockPointerType()) {
- // The argument for a dependent or non-block parameter is a placeholder
+ // The argument for a dependent or non-block parameter is a placeholder
// containing that parameter's type.
std::string Result;
-
+
if (Param->getIdentifier() && !ObjCMethodParam && !SuppressName)
Result = Param->getIdentifier()->getName();
-
+
QualType Type = Param->getType();
if (ObjCSubsts)
Type = Type.substObjCTypeArgs(Param->getASTContext(), *ObjCSubsts,
@@ -2332,7 +2376,7 @@ static std::string FormatFunctionParameter(const PrintingPolicy &Policy,
Result = Param->getIdentifier()->getName();
QualType Type = Param->getType().getUnqualifiedType();
-
+
if (ObjCMethodParam) {
Result = Type.getAsString(Policy);
std::string Quals =
@@ -2346,7 +2390,7 @@ static std::string FormatFunctionParameter(const PrintingPolicy &Policy,
} else {
Type.getAsStringInternal(Result, Policy);
}
-
+
return Result;
}
@@ -2357,14 +2401,14 @@ static std::string FormatFunctionParameter(const PrintingPolicy &Policy,
ObjCSubsts);
}
-/// \brief Returns a placeholder string that corresponds to an Objective-C block
+/// Returns a placeholder string that corresponds to an Objective-C block
/// declaration.
///
/// \param BlockDecl A declaration with an Objective-C block type.
///
/// \param Block The most relevant type location for that block type.
///
-/// \param SuppressBlockName Determines wether or not the name of the block
+/// \param SuppressBlockName Determines whether or not the name of the block
/// declaration is included in the resulting string.
static std::string
formatBlockPlaceholder(const PrintingPolicy &Policy, const NamedDecl *BlockDecl,
@@ -2449,7 +2493,7 @@ static std::string GetDefaultValueString(const ParmVarDecl *Param,
return " " + DefValue;
}
-/// \brief Add function parameter chunks to the given code completion string.
+/// Add function parameter chunks to the given code completion string.
static void AddFunctionParameterChunks(Preprocessor &PP,
const PrintingPolicy &Policy,
const FunctionDecl *Function,
@@ -2457,10 +2501,10 @@ static void AddFunctionParameterChunks(Preprocessor &PP,
unsigned Start = 0,
bool InOptional = false) {
bool FirstParameter = true;
-
+
for (unsigned P = Start, N = Function->getNumParams(); P != N; ++P) {
const ParmVarDecl *Param = Function->getParamDecl(P);
-
+
if (Param->hasDefaultArg() && !InOptional) {
// When we see an optional default argument, put that argument and
// the remaining default arguments into a new, optional string.
@@ -2472,14 +2516,14 @@ static void AddFunctionParameterChunks(Preprocessor &PP,
Result.AddOptionalChunk(Opt.TakeString());
break;
}
-
+
if (FirstParameter)
FirstParameter = false;
else
Result.AddChunk(CodeCompletionString::CK_Comma);
-
+
InOptional = false;
-
+
// Format the placeholder string.
std::string PlaceholderStr = FormatFunctionParameter(Policy, Param);
if (Param->hasDefaultArg())
@@ -2492,8 +2536,8 @@ static void AddFunctionParameterChunks(Preprocessor &PP,
Result.AddPlaceholderChunk(
Result.getAllocator().CopyString(PlaceholderStr));
}
-
- if (const FunctionProtoType *Proto
+
+ if (const FunctionProtoType *Proto
= Function->getType()->getAs<FunctionProtoType>())
if (Proto->isVariadic()) {
if (Proto->getNumParams() == 0)
@@ -2503,7 +2547,7 @@ static void AddFunctionParameterChunks(Preprocessor &PP,
}
}
-/// \brief Add template parameter chunks to the given code completion string.
+/// Add template parameter chunks to the given code completion string.
static void AddTemplateParameterChunks(ASTContext &Context,
const PrintingPolicy &Policy,
const TemplateDecl *Template,
@@ -2521,7 +2565,7 @@ static void AddTemplateParameterChunks(ASTContext &Context,
TemplateParameterList::iterator PEnd = Params->end();
if (MaxParameters)
PEnd = Params->begin() + MaxParameters;
- for (TemplateParameterList::iterator P = Params->begin() + Start;
+ for (TemplateParameterList::iterator P = Params->begin() + Start;
P != PEnd; ++P) {
bool HasDefaultArg = false;
std::string PlaceholderStr;
@@ -2530,14 +2574,14 @@ static void AddTemplateParameterChunks(ASTContext &Context,
PlaceholderStr = "typename";
else
PlaceholderStr = "class";
-
+
if (TTP->getIdentifier()) {
PlaceholderStr += ' ';
PlaceholderStr += TTP->getIdentifier()->getName();
}
-
+
HasDefaultArg = TTP->hasDefaultArgument();
- } else if (NonTypeTemplateParmDecl *NTTP
+ } else if (NonTypeTemplateParmDecl *NTTP
= dyn_cast<NonTypeTemplateParmDecl>(*P)) {
if (NTTP->getIdentifier())
PlaceholderStr = NTTP->getIdentifier()->getName();
@@ -2546,7 +2590,7 @@ static void AddTemplateParameterChunks(ASTContext &Context,
} else {
assert(isa<TemplateTemplateParmDecl>(*P));
TemplateTemplateParmDecl *TTP = cast<TemplateTemplateParmDecl>(*P);
-
+
// Since putting the template argument list into the placeholder would
// be very, very long, we just use an abbreviation.
PlaceholderStr = "template<...> class";
@@ -2554,10 +2598,10 @@ static void AddTemplateParameterChunks(ASTContext &Context,
PlaceholderStr += ' ';
PlaceholderStr += TTP->getIdentifier()->getName();
}
-
+
HasDefaultArg = TTP->hasDefaultArgument();
}
-
+
if (HasDefaultArg && !InDefaultArg) {
// When we see an optional default argument, put that argument and
// the remaining default arguments into a new, optional string.
@@ -2570,31 +2614,31 @@ static void AddTemplateParameterChunks(ASTContext &Context,
Result.AddOptionalChunk(Opt.TakeString());
break;
}
-
+
InDefaultArg = false;
-
+
if (FirstParameter)
FirstParameter = false;
else
Result.AddChunk(CodeCompletionString::CK_Comma);
-
+
// Add the placeholder string.
Result.AddPlaceholderChunk(
Result.getAllocator().CopyString(PlaceholderStr));
- }
+ }
}
-/// \brief Add a qualifier to the given code-completion string, if the
+/// Add a qualifier to the given code-completion string, if the
/// provided nested-name-specifier is non-NULL.
-static void
-AddQualifierToCompletionString(CodeCompletionBuilder &Result,
- NestedNameSpecifier *Qualifier,
+static void
+AddQualifierToCompletionString(CodeCompletionBuilder &Result,
+ NestedNameSpecifier *Qualifier,
bool QualifierIsInformative,
ASTContext &Context,
const PrintingPolicy &Policy) {
if (!Qualifier)
return;
-
+
std::string PrintedNNS;
{
llvm::raw_string_ostream OS(PrintedNNS);
@@ -2606,7 +2650,7 @@ AddQualifierToCompletionString(CodeCompletionBuilder &Result,
Result.AddTextChunk(Result.getAllocator().CopyString(PrintedNNS));
}
-static void
+static void
AddFunctionTypeQualsToCompletionString(CodeCompletionBuilder &Result,
const FunctionDecl *Function) {
const FunctionProtoType *Proto
@@ -2615,7 +2659,7 @@ AddFunctionTypeQualsToCompletionString(CodeCompletionBuilder &Result,
return;
// FIXME: Add ref-qualifier!
-
+
// Handle single qualifiers without copying
if (Proto->getTypeQuals() == Qualifiers::Const) {
Result.AddInformativeChunk(" const");
@@ -2643,29 +2687,29 @@ AddFunctionTypeQualsToCompletionString(CodeCompletionBuilder &Result,
Result.AddInformativeChunk(Result.getAllocator().CopyString(QualsStr));
}
-/// \brief Add the name of the given declaration
+/// Add the name of the given declaration
static void AddTypedNameChunk(ASTContext &Context, const PrintingPolicy &Policy,
const NamedDecl *ND,
CodeCompletionBuilder &Result) {
DeclarationName Name = ND->getDeclName();
if (!Name)
return;
-
+
switch (Name.getNameKind()) {
case DeclarationName::CXXOperatorName: {
const char *OperatorName = nullptr;
switch (Name.getCXXOverloadedOperator()) {
- case OO_None:
+ case OO_None:
case OO_Conditional:
case NUM_OVERLOADED_OPERATORS:
- OperatorName = "operator";
+ OperatorName = "operator";
break;
-
+
#define OVERLOADED_OPERATOR(Name,Spelling,Token,Unary,Binary,MemberOnly) \
case OO_##Name: OperatorName = "operator" Spelling; break;
#define OVERLOADED_OPERATOR_MULTI(Name,Spelling,Unary,Binary,MemberOnly)
#include "clang/Basic/OperatorKinds.def"
-
+
case OO_New: OperatorName = "operator new"; break;
case OO_Delete: OperatorName = "operator delete"; break;
case OO_Array_New: OperatorName = "operator new[]"; break;
@@ -2676,7 +2720,7 @@ static void AddTypedNameChunk(ASTContext &Context, const PrintingPolicy &Policy,
Result.AddTypedTextChunk(OperatorName);
break;
}
-
+
case DeclarationName::Identifier:
case DeclarationName::CXXConversionFunctionName:
case DeclarationName::CXXDestructorName:
@@ -2684,14 +2728,14 @@ static void AddTypedNameChunk(ASTContext &Context, const PrintingPolicy &Policy,
Result.AddTypedTextChunk(
Result.getAllocator().CopyString(ND->getNameAsString()));
break;
-
+
case DeclarationName::CXXDeductionGuideName:
case DeclarationName::CXXUsingDirective:
case DeclarationName::ObjCZeroArgSelector:
case DeclarationName::ObjCOneArgSelector:
case DeclarationName::ObjCMultiArgSelector:
break;
-
+
case DeclarationName::CXXConstructorName: {
CXXRecordDecl *Record = nullptr;
QualType Ty = Name.getCXXNameType();
@@ -2705,7 +2749,7 @@ static void AddTypedNameChunk(ASTContext &Context, const PrintingPolicy &Policy,
Result.getAllocator().CopyString(ND->getNameAsString()));
break;
}
-
+
Result.AddTypedTextChunk(
Result.getAllocator().CopyString(Record->getNameAsString()));
if (ClassTemplateDecl *Template = Record->getDescribedClassTemplate()) {
@@ -2727,7 +2771,53 @@ CodeCompletionString *CodeCompletionResult::CreateCodeCompletionString(Sema &S,
CCTUInfo, IncludeBriefComments);
}
-/// \brief If possible, create a new code completion string for the given
+CodeCompletionString *CodeCompletionResult::CreateCodeCompletionStringForMacro(
+ Preprocessor &PP, CodeCompletionAllocator &Allocator,
+ CodeCompletionTUInfo &CCTUInfo) {
+ assert(Kind == RK_Macro);
+ CodeCompletionBuilder Result(Allocator, CCTUInfo, Priority, Availability);
+ const MacroInfo *MI = PP.getMacroInfo(Macro);
+ Result.AddTypedTextChunk(Result.getAllocator().CopyString(Macro->getName()));
+
+ if (!MI || !MI->isFunctionLike())
+ return Result.TakeString();
+
+ // Format a function-like macro with placeholders for the arguments.
+ Result.AddChunk(CodeCompletionString::CK_LeftParen);
+ MacroInfo::param_iterator A = MI->param_begin(), AEnd = MI->param_end();
+
+ // C99 variadic macros add __VA_ARGS__ at the end. Skip it.
+ if (MI->isC99Varargs()) {
+ --AEnd;
+
+ if (A == AEnd) {
+ Result.AddPlaceholderChunk("...");
+ }
+ }
+
+ for (MacroInfo::param_iterator A = MI->param_begin(); A != AEnd; ++A) {
+ if (A != MI->param_begin())
+ Result.AddChunk(CodeCompletionString::CK_Comma);
+
+ if (MI->isVariadic() && (A + 1) == AEnd) {
+ SmallString<32> Arg = (*A)->getName();
+ if (MI->isC99Varargs())
+ Arg += ", ...";
+ else
+ Arg += "...";
+ Result.AddPlaceholderChunk(Result.getAllocator().CopyString(Arg));
+ break;
+ }
+
+ // Non-variadic macros are simple.
+ Result.AddPlaceholderChunk(
+ Result.getAllocator().CopyString((*A)->getName()));
+ }
+ Result.AddChunk(CodeCompletionString::CK_RightParen);
+ return Result.TakeString();
+}
+
+/// If possible, create a new code completion string for the given
/// result.
///
/// \returns Either a new, heap-allocated code completion string describing
@@ -2740,104 +2830,42 @@ CodeCompletionResult::CreateCodeCompletionString(ASTContext &Ctx,
CodeCompletionAllocator &Allocator,
CodeCompletionTUInfo &CCTUInfo,
bool IncludeBriefComments) {
+ if (Kind == RK_Macro)
+ return CreateCodeCompletionStringForMacro(PP, Allocator, CCTUInfo);
+
CodeCompletionBuilder Result(Allocator, CCTUInfo, Priority, Availability);
-
+
PrintingPolicy Policy = getCompletionPrintingPolicy(Ctx, PP);
if (Kind == RK_Pattern) {
Pattern->Priority = Priority;
Pattern->Availability = Availability;
-
+
if (Declaration) {
Result.addParentContext(Declaration->getDeclContext());
Pattern->ParentName = Result.getParentName();
- // Provide code completion comment for self.GetterName where
- // GetterName is the getter method for a property with name
- // different from the property name (declared via a property
- // getter attribute.
- const NamedDecl *ND = Declaration;
- if (const ObjCMethodDecl *M = dyn_cast<ObjCMethodDecl>(ND))
- if (M->isPropertyAccessor())
- if (const ObjCPropertyDecl *PDecl = M->findPropertyDecl())
- if (PDecl->getGetterName() == M->getSelector() &&
- PDecl->getIdentifier() != M->getIdentifier()) {
- if (const RawComment *RC =
- Ctx.getRawCommentForAnyRedecl(M)) {
- Result.addBriefComment(RC->getBriefText(Ctx));
- Pattern->BriefComment = Result.getBriefComment();
- }
- else if (const RawComment *RC =
- Ctx.getRawCommentForAnyRedecl(PDecl)) {
- Result.addBriefComment(RC->getBriefText(Ctx));
- Pattern->BriefComment = Result.getBriefComment();
- }
- }
- }
-
- return Pattern;
- }
-
- if (Kind == RK_Keyword) {
- Result.AddTypedTextChunk(Keyword);
- return Result.TakeString();
- }
-
- if (Kind == RK_Macro) {
- const MacroInfo *MI = PP.getMacroInfo(Macro);
- Result.AddTypedTextChunk(
- Result.getAllocator().CopyString(Macro->getName()));
-
- if (!MI || !MI->isFunctionLike())
- return Result.TakeString();
-
- // Format a function-like macro with placeholders for the arguments.
- Result.AddChunk(CodeCompletionString::CK_LeftParen);
- MacroInfo::param_iterator A = MI->param_begin(), AEnd = MI->param_end();
-
- // C99 variadic macros add __VA_ARGS__ at the end. Skip it.
- if (MI->isC99Varargs()) {
- --AEnd;
-
- if (A == AEnd) {
- Result.AddPlaceholderChunk("...");
+ if (const RawComment *RC =
+ getPatternCompletionComment(Ctx, Declaration)) {
+ Result.addBriefComment(RC->getBriefText(Ctx));
+ Pattern->BriefComment = Result.getBriefComment();
}
}
-
- for (MacroInfo::param_iterator A = MI->param_begin(); A != AEnd; ++A) {
- if (A != MI->param_begin())
- Result.AddChunk(CodeCompletionString::CK_Comma);
- if (MI->isVariadic() && (A+1) == AEnd) {
- SmallString<32> Arg = (*A)->getName();
- if (MI->isC99Varargs())
- Arg += ", ...";
- else
- Arg += "...";
- Result.AddPlaceholderChunk(Result.getAllocator().CopyString(Arg));
- break;
- }
+ return Pattern;
+ }
- // Non-variadic macros are simple.
- Result.AddPlaceholderChunk(
- Result.getAllocator().CopyString((*A)->getName()));
- }
- Result.AddChunk(CodeCompletionString::CK_RightParen);
+ if (Kind == RK_Keyword) {
+ Result.AddTypedTextChunk(Keyword);
return Result.TakeString();
}
-
assert(Kind == RK_Declaration && "Missed a result kind?");
const NamedDecl *ND = Declaration;
Result.addParentContext(ND->getDeclContext());
if (IncludeBriefComments) {
// Add documentation comment, if it exists.
- if (const RawComment *RC = Ctx.getRawCommentForAnyRedecl(ND)) {
+ if (const RawComment *RC = getCompletionComment(Ctx, Declaration)) {
Result.addBriefComment(RC->getBriefText(Ctx));
- }
- else if (const ObjCMethodDecl *OMD = dyn_cast<ObjCMethodDecl>(ND))
- if (OMD->isPropertyAccessor())
- if (const ObjCPropertyDecl *PDecl = OMD->findPropertyDecl())
- if (const RawComment *RC = Ctx.getRawCommentForAnyRedecl(PDecl))
- Result.addBriefComment(RC->getBriefText(Ctx));
+ }
}
if (StartsNestedNameSpecifier) {
@@ -2851,9 +2879,9 @@ CodeCompletionResult::CreateCodeCompletionString(ASTContext &Ctx,
Result.AddAnnotation(Result.getAllocator().CopyString(I->getAnnotation()));
AddResultTypeChunk(Ctx, Policy, ND, CCContext.getBaseType(), Result);
-
+
if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(ND)) {
- AddQualifierToCompletionString(Result, Qualifier, QualifierIsInformative,
+ AddQualifierToCompletionString(Result, Qualifier, QualifierIsInformative,
Ctx, Policy);
AddTypedNameChunk(Ctx, Policy, ND, Result);
Result.AddChunk(CodeCompletionString::CK_LeftParen);
@@ -2862,9 +2890,9 @@ CodeCompletionResult::CreateCodeCompletionString(ASTContext &Ctx,
AddFunctionTypeQualsToCompletionString(Result, Function);
return Result.TakeString();
}
-
+
if (const FunctionTemplateDecl *FunTmpl = dyn_cast<FunctionTemplateDecl>(ND)) {
- AddQualifierToCompletionString(Result, Qualifier, QualifierIsInformative,
+ AddQualifierToCompletionString(Result, Qualifier, QualifierIsInformative,
Ctx, Policy);
FunctionDecl *Function = FunTmpl->getTemplatedDecl();
AddTypedNameChunk(Ctx, Policy, Function, Result);
@@ -2885,30 +2913,30 @@ CodeCompletionResult::CreateCodeCompletionString(ASTContext &Ctx,
LastDeducibleArgument - 1);
if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(Param))
HasDefaultArg = TTP->hasDefaultArgument();
- else if (NonTypeTemplateParmDecl *NTTP
+ else if (NonTypeTemplateParmDecl *NTTP
= dyn_cast<NonTypeTemplateParmDecl>(Param))
HasDefaultArg = NTTP->hasDefaultArgument();
else {
assert(isa<TemplateTemplateParmDecl>(Param));
- HasDefaultArg
+ HasDefaultArg
= cast<TemplateTemplateParmDecl>(Param)->hasDefaultArgument();
}
-
+
if (!HasDefaultArg)
break;
}
}
-
+
if (LastDeducibleArgument) {
// Some of the function template arguments cannot be deduced from a
// function call, so we introduce an explicit template argument list
// containing all of the arguments up to the first deducible argument.
Result.AddChunk(CodeCompletionString::CK_LeftAngle);
- AddTemplateParameterChunks(Ctx, Policy, FunTmpl, Result,
+ AddTemplateParameterChunks(Ctx, Policy, FunTmpl, Result,
LastDeducibleArgument);
Result.AddChunk(CodeCompletionString::CK_RightAngle);
}
-
+
// Add the function parameters
Result.AddChunk(CodeCompletionString::CK_LeftParen);
AddFunctionParameterChunks(PP, Policy, Function, Result);
@@ -2916,9 +2944,9 @@ CodeCompletionResult::CreateCodeCompletionString(ASTContext &Ctx,
AddFunctionTypeQualsToCompletionString(Result, Function);
return Result.TakeString();
}
-
+
if (const TemplateDecl *Template = dyn_cast<TemplateDecl>(ND)) {
- AddQualifierToCompletionString(Result, Qualifier, QualifierIsInformative,
+ AddQualifierToCompletionString(Result, Qualifier, QualifierIsInformative,
Ctx, Policy);
Result.AddTypedTextChunk(
Result.getAllocator().CopyString(Template->getNameAsString()));
@@ -2927,7 +2955,7 @@ CodeCompletionResult::CreateCodeCompletionString(ASTContext &Ctx,
Result.AddChunk(CodeCompletionString::CK_RightAngle);
return Result.TakeString();
}
-
+
if (const ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(ND)) {
Selector Sel = Method->getSelector();
if (Sel.isUnarySelector()) {
@@ -2942,7 +2970,7 @@ CodeCompletionResult::CreateCodeCompletionString(ASTContext &Ctx,
Result.AddTypedTextChunk(Result.getAllocator().CopyString(SelName));
else {
Result.AddInformativeChunk(Result.getAllocator().CopyString(SelName));
-
+
// If there is only one parameter, and we're past it, add an empty
// typed-text chunk since there is nothing to type.
if (Method->param_size() == 1)
@@ -2961,10 +2989,10 @@ CodeCompletionResult::CreateCodeCompletionString(ASTContext &Ctx,
Keyword += ":";
if (Idx < StartParameter || AllParametersAreInformative)
Result.AddInformativeChunk(Result.getAllocator().CopyString(Keyword));
- else
+ else
Result.AddTypedTextChunk(Result.getAllocator().CopyString(Keyword));
}
-
+
// If we're before the starting parameter, skip the placeholder.
if (Idx < StartParameter)
continue;
@@ -2990,10 +3018,10 @@ CodeCompletionResult::CreateCodeCompletionString(ASTContext &Ctx,
if (DeclaringEntity || AllParametersAreInformative)
Arg += II->getName();
}
-
+
if (Method->isVariadic() && (P + 1) == PEnd)
Arg += ", ...";
-
+
if (DeclaringEntity)
Result.AddTextChunk(Result.getAllocator().CopyString(Arg));
else if (AllParametersAreInformative)
@@ -3011,15 +3039,15 @@ CodeCompletionResult::CreateCodeCompletionString(ASTContext &Ctx,
else
Result.AddPlaceholderChunk(", ...");
}
-
+
MaybeAddSentinel(PP, Method, Result);
}
-
+
return Result.TakeString();
}
if (Qualifier)
- AddQualifierToCompletionString(Result, Qualifier, QualifierIsInformative,
+ AddQualifierToCompletionString(Result, Qualifier, QualifierIsInformative,
Ctx, Policy);
Result.AddTypedTextChunk(
@@ -3027,7 +3055,60 @@ CodeCompletionResult::CreateCodeCompletionString(ASTContext &Ctx,
return Result.TakeString();
}
-/// \brief Add function overload parameter chunks to the given code completion
+const RawComment *clang::getCompletionComment(const ASTContext &Ctx,
+ const NamedDecl *ND) {
+ if (!ND)
+ return nullptr;
+ if (auto *RC = Ctx.getRawCommentForAnyRedecl(ND))
+ return RC;
+
+ // Try to find comment from a property for ObjC methods.
+ const ObjCMethodDecl *M = dyn_cast<ObjCMethodDecl>(ND);
+ if (!M)
+ return nullptr;
+ const ObjCPropertyDecl *PDecl = M->findPropertyDecl();
+ if (!PDecl)
+ return nullptr;
+
+ return Ctx.getRawCommentForAnyRedecl(PDecl);
+}
+
+const RawComment *clang::getPatternCompletionComment(const ASTContext &Ctx,
+ const NamedDecl *ND) {
+ const ObjCMethodDecl *M = dyn_cast_or_null<ObjCMethodDecl>(ND);
+ if (!M || !M->isPropertyAccessor())
+ return nullptr;
+
+ // Provide code completion comment for self.GetterName where
+ // GetterName is the getter method for a property with name
+ // different from the property name (declared via a property
+ // getter attribute.
+ const ObjCPropertyDecl *PDecl = M->findPropertyDecl();
+ if (!PDecl)
+ return nullptr;
+ if (PDecl->getGetterName() == M->getSelector() &&
+ PDecl->getIdentifier() != M->getIdentifier()) {
+ if (auto *RC = Ctx.getRawCommentForAnyRedecl(M))
+ return RC;
+ if (auto *RC = Ctx.getRawCommentForAnyRedecl(PDecl))
+ return RC;
+ }
+ return nullptr;
+}
+
+const RawComment *clang::getParameterComment(
+ const ASTContext &Ctx,
+ const CodeCompleteConsumer::OverloadCandidate &Result,
+ unsigned ArgIndex) {
+ auto FDecl = Result.getFunction();
+ if (!FDecl)
+ return nullptr;
+ if (ArgIndex < FDecl->getNumParams())
+ return Ctx.getRawCommentForAnyRedecl(FDecl->getParamDecl(ArgIndex));
+ return nullptr;
+}
+
+/// Add function overload parameter chunks to the given code completion
/// string.
static void AddOverloadParameterChunks(ASTContext &Context,
const PrintingPolicy &Policy,
@@ -3110,7 +3191,7 @@ CodeCompleteConsumer::OverloadCandidate::CreateSignatureString(
const FunctionProtoType *Proto
= dyn_cast<FunctionProtoType>(getFunctionType());
if (!FDecl && !Proto) {
- // Function without a prototype. Just give the return type and a
+ // Function without a prototype. Just give the return type and a
// highlighted ellipsis.
const FunctionType *FT = getFunctionType();
Result.AddResultTypeChunk(Result.getAllocator().CopyString(
@@ -3122,10 +3203,10 @@ CodeCompleteConsumer::OverloadCandidate::CreateSignatureString(
}
if (FDecl) {
- if (IncludeBriefComments && CurrentArg < FDecl->getNumParams())
- if (auto RC = S.getASTContext().getRawCommentForAnyRedecl(
- FDecl->getParamDecl(CurrentArg)))
+ if (IncludeBriefComments) {
+ if (auto RC = getParameterComment(S.getASTContext(), *this, CurrentArg))
Result.addBriefComment(RC->getBriefText(S.getASTContext()));
+ }
AddResultTypeChunk(S.Context, Policy, FDecl, QualType(), Result);
Result.AddTextChunk(
Result.getAllocator().CopyString(FDecl->getNameAsString()));
@@ -3143,18 +3224,18 @@ CodeCompleteConsumer::OverloadCandidate::CreateSignatureString(
return Result.TakeString();
}
-unsigned clang::getMacroUsagePriority(StringRef MacroName,
+unsigned clang::getMacroUsagePriority(StringRef MacroName,
const LangOptions &LangOpts,
bool PreferredTypeIsPointer) {
unsigned Priority = CCP_Macro;
-
+
// Treat the "nil", "Nil" and "NULL" macros as null pointer constants.
- if (MacroName.equals("nil") || MacroName.equals("NULL") ||
+ if (MacroName.equals("nil") || MacroName.equals("NULL") ||
MacroName.equals("Nil")) {
Priority = CCP_Constant;
if (PreferredTypeIsPointer)
Priority = Priority / CCF_SimilarTypeMatch;
- }
+ }
// Treat "YES", "NO", "true", and "false" as constants.
else if (MacroName.equals("YES") || MacroName.equals("NO") ||
MacroName.equals("true") || MacroName.equals("false"))
@@ -3162,27 +3243,27 @@ unsigned clang::getMacroUsagePriority(StringRef MacroName,
// Treat "bool" as a type.
else if (MacroName.equals("bool"))
Priority = CCP_Type + (LangOpts.ObjC1? CCD_bool_in_ObjC : 0);
-
-
+
+
return Priority;
}
CXCursorKind clang::getCursorKindForDecl(const Decl *D) {
if (!D)
return CXCursor_UnexposedDecl;
-
+
switch (D->getKind()) {
case Decl::Enum: return CXCursor_EnumDecl;
case Decl::EnumConstant: return CXCursor_EnumConstantDecl;
case Decl::Field: return CXCursor_FieldDecl;
- case Decl::Function:
+ case Decl::Function:
return CXCursor_FunctionDecl;
case Decl::ObjCCategory: return CXCursor_ObjCCategoryDecl;
case Decl::ObjCCategoryImpl: return CXCursor_ObjCCategoryImplDecl;
case Decl::ObjCImplementation: return CXCursor_ObjCImplementationDecl;
case Decl::ObjCInterface: return CXCursor_ObjCInterfaceDecl;
- case Decl::ObjCIvar: return CXCursor_ObjCIvarDecl;
+ case Decl::ObjCIvar: return CXCursor_ObjCIvarDecl;
case Decl::ObjCMethod:
return cast<ObjCMethodDecl>(D)->isInstanceMethod()
? CXCursor_ObjCInstanceMethodDecl : CXCursor_ObjCClassMethodDecl;
@@ -3211,17 +3292,17 @@ CXCursorKind clang::getCursorKindForDecl(const Decl *D) {
case Decl::StaticAssert: return CXCursor_StaticAssert;
case Decl::Friend: return CXCursor_FriendDecl;
case Decl::TranslationUnit: return CXCursor_TranslationUnit;
-
+
case Decl::Using:
case Decl::UnresolvedUsingValue:
- case Decl::UnresolvedUsingTypename:
+ case Decl::UnresolvedUsingTypename:
return CXCursor_UsingDeclaration;
-
+
case Decl::ObjCPropertyImpl:
switch (cast<ObjCPropertyImplDecl>(D)->getPropertyImplementation()) {
case ObjCPropertyImplDecl::Dynamic:
return CXCursor_ObjCDynamicDecl;
-
+
case ObjCPropertyImplDecl::Synthesize:
return CXCursor_ObjCSynthesizeDecl;
}
@@ -3242,7 +3323,7 @@ CXCursorKind clang::getCursorKindForDecl(const Decl *D) {
}
}
}
-
+
return CXCursor_UnexposedDecl;
}
@@ -3250,10 +3331,10 @@ static void AddMacroResults(Preprocessor &PP, ResultBuilder &Results,
bool IncludeUndefined,
bool TargetTypeIsPointer = false) {
typedef CodeCompletionResult Result;
-
+
Results.EnterNewScope();
-
- for (Preprocessor::macro_iterator M = PP.macro_begin(),
+
+ for (Preprocessor::macro_iterator M = PP.macro_begin(),
MEnd = PP.macro_end();
M != MEnd; ++M) {
auto MD = PP.getMacroDefinition(M->first);
@@ -3268,17 +3349,17 @@ static void AddMacroResults(Preprocessor &PP, ResultBuilder &Results,
TargetTypeIsPointer)));
}
}
-
+
Results.ExitScope();
-
+
}
-static void AddPrettyFunctionResults(const LangOptions &LangOpts,
+static void AddPrettyFunctionResults(const LangOptions &LangOpts,
ResultBuilder &Results) {
typedef CodeCompletionResult Result;
-
+
Results.EnterNewScope();
-
+
Results.AddResult(Result("__PRETTY_FUNCTION__", CCP_Constant));
Results.AddResult(Result("__FUNCTION__", CCP_Constant));
if (LangOpts.C99 || LangOpts.CPlusPlus11)
@@ -3295,24 +3376,24 @@ static void HandleCodeCompleteResults(Sema *S,
CodeCompleter->ProcessCodeCompleteResults(*S, Context, Results, NumResults);
}
-static enum CodeCompletionContext::Kind mapCodeCompletionContext(Sema &S,
+static enum CodeCompletionContext::Kind mapCodeCompletionContext(Sema &S,
Sema::ParserCompletionContext PCC) {
switch (PCC) {
case Sema::PCC_Namespace:
return CodeCompletionContext::CCC_TopLevel;
-
+
case Sema::PCC_Class:
return CodeCompletionContext::CCC_ClassStructUnion;
case Sema::PCC_ObjCInterface:
return CodeCompletionContext::CCC_ObjCInterface;
-
+
case Sema::PCC_ObjCImplementation:
return CodeCompletionContext::CCC_ObjCImplementation;
case Sema::PCC_ObjCInstanceVariableList:
return CodeCompletionContext::CCC_ObjCIvarList;
-
+
case Sema::PCC_Template:
case Sema::PCC_MemberTemplate:
if (S.CurContext->isFileContext())
@@ -3320,7 +3401,7 @@ static enum CodeCompletionContext::Kind mapCodeCompletionContext(Sema &S,
if (S.CurContext->isRecord())
return CodeCompletionContext::CCC_ClassStructUnion;
return CodeCompletionContext::CCC_Other;
-
+
case Sema::PCC_RecoveryInFunction:
return CodeCompletionContext::CCC_Recovery;
@@ -3334,7 +3415,7 @@ static enum CodeCompletionContext::Kind mapCodeCompletionContext(Sema &S,
case Sema::PCC_Expression:
case Sema::PCC_Condition:
return CodeCompletionContext::CCC_Expression;
-
+
case Sema::PCC_Statement:
return CodeCompletionContext::CCC_Statement;
@@ -3343,7 +3424,7 @@ static enum CodeCompletionContext::Kind mapCodeCompletionContext(Sema &S,
case Sema::PCC_ParenthesizedExpression:
return CodeCompletionContext::CCC_ParenthesizedExpression;
-
+
case Sema::PCC_LocalDeclarationSpecifiers:
return CodeCompletionContext::CCC_Type;
}
@@ -3351,27 +3432,27 @@ static enum CodeCompletionContext::Kind mapCodeCompletionContext(Sema &S,
llvm_unreachable("Invalid ParserCompletionContext!");
}
-/// \brief If we're in a C++ virtual member function, add completion results
-/// that invoke the functions we override, since it's common to invoke the
+/// If we're in a C++ virtual member function, add completion results
+/// that invoke the functions we override, since it's common to invoke the
/// overridden function as well as adding new functionality.
///
/// \param S The semantic analysis object for which we are generating results.
///
/// \param InContext This context in which the nested-name-specifier preceding
-/// the code-completion point
+/// the code-completion point
static void MaybeAddOverrideCalls(Sema &S, DeclContext *InContext,
ResultBuilder &Results) {
// Look through blocks.
DeclContext *CurContext = S.CurContext;
while (isa<BlockDecl>(CurContext))
CurContext = CurContext->getParent();
-
-
+
+
CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(CurContext);
if (!Method || !Method->isVirtual())
return;
-
- // We need to have names for all of the parameters, if we're going to
+
+ // We need to have names for all of the parameters, if we're going to
// generate a forwarding call.
for (auto P : Method->parameters())
if (!P->getDeclName())
@@ -3383,7 +3464,7 @@ static void MaybeAddOverrideCalls(Sema &S, DeclContext *InContext,
Results.getCodeCompletionTUInfo());
if (Overridden->getCanonicalDecl() == Method->getCanonicalDecl())
continue;
-
+
// If we need a nested-name-specifier, add one now.
if (!InContext) {
NestedNameSpecifier *NNS
@@ -3397,8 +3478,8 @@ static void MaybeAddOverrideCalls(Sema &S, DeclContext *InContext,
}
} else if (!InContext->Equals(Overridden->getDeclContext()))
continue;
-
- Builder.AddTypedTextChunk(Results.getAllocator().CopyString(
+
+ Builder.AddTypedTextChunk(Results.getAllocator().CopyString(
Overridden->getNameAsString()));
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
bool FirstParam = true;
@@ -3421,14 +3502,14 @@ static void MaybeAddOverrideCalls(Sema &S, DeclContext *InContext,
}
}
-void Sema::CodeCompleteModuleImport(SourceLocation ImportLoc,
+void Sema::CodeCompleteModuleImport(SourceLocation ImportLoc,
ModuleIdPath Path) {
typedef CodeCompletionResult Result;
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_Other);
Results.EnterNewScope();
-
+
CodeCompletionAllocator &Allocator = Results.getAllocator();
CodeCompletionBuilder Builder(Allocator, Results.getCodeCompletionTUInfo());
typedef CodeCompletionResult Result;
@@ -3440,7 +3521,7 @@ void Sema::CodeCompleteModuleImport(SourceLocation ImportLoc,
Builder.AddTypedTextChunk(
Builder.getAllocator().CopyString(Modules[I]->Name));
Results.AddResult(Result(Builder.TakeString(),
- CCP_Declaration,
+ CCP_Declaration,
CXCursor_ModuleImportDecl,
Modules[I]->isAvailable()
? CXAvailability_Available
@@ -3453,14 +3534,14 @@ void Sema::CodeCompleteModuleImport(SourceLocation ImportLoc,
/*IsInclusionDirective=*/false);
// Enumerate submodules.
if (Mod) {
- for (Module::submodule_iterator Sub = Mod->submodule_begin(),
+ for (Module::submodule_iterator Sub = Mod->submodule_begin(),
SubEnd = Mod->submodule_end();
Sub != SubEnd; ++Sub) {
-
+
Builder.AddTypedTextChunk(
Builder.getAllocator().CopyString((*Sub)->Name));
Results.AddResult(Result(Builder.TakeString(),
- CCP_Declaration,
+ CCP_Declaration,
CXCursor_ModuleImportDecl,
(*Sub)->isAvailable()
? CXAvailability_Available
@@ -3468,18 +3549,18 @@ void Sema::CodeCompleteModuleImport(SourceLocation ImportLoc,
}
}
}
- Results.ExitScope();
+ Results.ExitScope();
HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
Results.data(),Results.size());
}
-void Sema::CodeCompleteOrdinaryName(Scope *S,
+void Sema::CodeCompleteOrdinaryName(Scope *S,
ParserCompletionContext CompletionContext) {
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
mapCodeCompletionContext(*this, CompletionContext));
Results.EnterNewScope();
-
+
// Determine how to filter results, e.g., so that the names of
// values (functions, enumerators, function templates, etc.) are
// only allowed where we can have an expression.
@@ -3505,11 +3586,11 @@ void Sema::CodeCompleteOrdinaryName(Scope *S,
Results.setFilter(&ResultBuilder::IsOrdinaryName);
else
Results.setFilter(&ResultBuilder::IsOrdinaryNonTypeName);
-
+
if (getLangOpts().CPlusPlus)
MaybeAddOverrideCalls(*this, /*InContext=*/nullptr, Results);
break;
-
+
case PCC_RecoveryInFunction:
// Unfiltered
break;
@@ -3521,10 +3602,11 @@ void Sema::CodeCompleteOrdinaryName(Scope *S,
if (CurMethod->isInstance())
Results.setObjectTypeQualifiers(
Qualifiers::fromCVRMask(CurMethod->getTypeQualifiers()));
-
+
CodeCompletionDeclConsumer Consumer(Results, CurContext);
LookupVisibleDecls(S, LookupOrdinaryName, Consumer,
- CodeCompleter->includeGlobals());
+ CodeCompleter->includeGlobals(),
+ CodeCompleter->loadExternal());
AddOrdinaryNameResults(CompletionContext, S, *this, Results);
Results.ExitScope();
@@ -3537,7 +3619,7 @@ void Sema::CodeCompleteOrdinaryName(Scope *S,
if (S->getFnParent())
AddPrettyFunctionResults(getLangOpts(), Results);
break;
-
+
case PCC_Namespace:
case PCC_Class:
case PCC_ObjCInterface:
@@ -3551,15 +3633,15 @@ void Sema::CodeCompleteOrdinaryName(Scope *S,
case PCC_LocalDeclarationSpecifiers:
break;
}
-
+
if (CodeCompleter->includeMacros())
AddMacroResults(PP, Results, false);
-
+
HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
Results.data(),Results.size());
}
-static void AddClassMessageCompletions(Sema &SemaRef, Scope *S,
+static void AddClassMessageCompletions(Sema &SemaRef, Scope *S,
ParsedType Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
@@ -3576,7 +3658,7 @@ void Sema::CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
? CodeCompletionContext::CCC_PotentiallyQualifiedName
: CodeCompletionContext::CCC_Name);
Results.EnterNewScope();
-
+
// Type qualifiers can come after names.
Results.AddResult(Result("const"));
Results.AddResult(Result("volatile"));
@@ -3590,16 +3672,17 @@ void Sema::CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
Results.AddResult("final");
if (AllowNonIdentifiers) {
- Results.AddResult(Result("operator"));
+ Results.AddResult(Result("operator"));
}
-
+
// Add nested-name-specifiers.
if (AllowNestedNameSpecifiers) {
Results.allowNestedNameSpecifiers();
Results.setFilter(&ResultBuilder::IsImpossibleToSatisfy);
CodeCompletionDeclConsumer Consumer(Results, CurContext);
LookupVisibleDecls(S, LookupNestedNameSpecifierName, Consumer,
- CodeCompleter->includeGlobals());
+ CodeCompleter->includeGlobals(),
+ CodeCompleter->loadExternal());
Results.setFilter(nullptr);
}
}
@@ -3618,7 +3701,7 @@ void Sema::CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
S &&
(S->getFlags() & Scope::DeclScope) != 0 &&
(S->getFlags() & (Scope::ClassScope | Scope::TemplateParamScope |
- Scope::FunctionPrototypeScope |
+ Scope::FunctionPrototypeScope |
Scope::AtCatchScope)) == 0) {
ParsedType T = DS.getRepAsType();
if (!T.get().isNull() && T.get()->isObjCObjectOrInterfaceType())
@@ -3628,29 +3711,31 @@ void Sema::CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
// Note that we intentionally suppress macro results here, since we do not
// encourage using macros to produce the names of entities.
- HandleCodeCompleteResults(this, CodeCompleter,
+ HandleCodeCompleteResults(this, CodeCompleter,
Results.getCompletionContext(),
Results.data(), Results.size());
}
struct Sema::CodeCompleteExpressionData {
- CodeCompleteExpressionData(QualType PreferredType = QualType())
+ CodeCompleteExpressionData(QualType PreferredType = QualType())
: PreferredType(PreferredType), IntegralConstantExpression(false),
ObjCCollection(false) { }
-
+
QualType PreferredType;
bool IntegralConstantExpression;
bool ObjCCollection;
SmallVector<Decl *, 4> IgnoreDecls;
};
-/// \brief Perform code-completion in an expression context when we know what
+/// Perform code-completion in an expression context when we know what
/// type we're looking for.
-void Sema::CodeCompleteExpression(Scope *S,
+void Sema::CodeCompleteExpression(Scope *S,
const CodeCompleteExpressionData &Data) {
- ResultBuilder Results(*this, CodeCompleter->getAllocator(),
- CodeCompleter->getCodeCompletionTUInfo(),
- CodeCompletionContext::CCC_Expression);
+ ResultBuilder Results(
+ *this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext(CodeCompletionContext::CCC_Expression,
+ Data.PreferredType));
if (Data.ObjCCollection)
Results.setFilter(&ResultBuilder::IsObjCCollection);
else if (Data.IntegralConstantExpression)
@@ -3662,36 +3747,35 @@ void Sema::CodeCompleteExpression(Scope *S,
if (!Data.PreferredType.isNull())
Results.setPreferredType(Data.PreferredType.getNonReferenceType());
-
+
// Ignore any declarations that we were told that we don't care about.
for (unsigned I = 0, N = Data.IgnoreDecls.size(); I != N; ++I)
Results.Ignore(Data.IgnoreDecls[I]);
-
+
CodeCompletionDeclConsumer Consumer(Results, CurContext);
LookupVisibleDecls(S, LookupOrdinaryName, Consumer,
- CodeCompleter->includeGlobals());
-
+ CodeCompleter->includeGlobals(),
+ CodeCompleter->loadExternal());
+
Results.EnterNewScope();
AddOrdinaryNameResults(PCC_Expression, S, *this, Results);
Results.ExitScope();
-
+
bool PreferredTypeIsPointer = false;
if (!Data.PreferredType.isNull())
PreferredTypeIsPointer = Data.PreferredType->isAnyPointerType()
- || Data.PreferredType->isMemberPointerType()
+ || Data.PreferredType->isMemberPointerType()
|| Data.PreferredType->isBlockPointerType();
-
- if (S->getFnParent() &&
- !Data.ObjCCollection &&
+
+ if (S->getFnParent() &&
+ !Data.ObjCCollection &&
!Data.IntegralConstantExpression)
AddPrettyFunctionResults(getLangOpts(), Results);
if (CodeCompleter->includeMacros())
AddMacroResults(PP, Results, false, PreferredTypeIsPointer);
- HandleCodeCompleteResults(this, CodeCompleter,
- CodeCompletionContext(CodeCompletionContext::CCC_Expression,
- Data.PreferredType),
- Results.data(),Results.size());
+ HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
+ Results.data(), Results.size());
}
void Sema::CodeCompletePostfixExpression(Scope *S, ExprResult E) {
@@ -3701,29 +3785,29 @@ void Sema::CodeCompletePostfixExpression(Scope *S, ExprResult E) {
CodeCompleteObjCInstanceMessage(S, E.get(), None, false);
}
-/// \brief The set of properties that have already been added, referenced by
+/// The set of properties that have already been added, referenced by
/// property name.
typedef llvm::SmallPtrSet<IdentifierInfo*, 16> AddedPropertiesSet;
-/// \brief Retrieve the container definition, if any?
+/// Retrieve the container definition, if any?
static ObjCContainerDecl *getContainerDef(ObjCContainerDecl *Container) {
if (ObjCInterfaceDecl *Interface = dyn_cast<ObjCInterfaceDecl>(Container)) {
if (Interface->hasDefinition())
return Interface->getDefinition();
-
+
return Interface;
}
-
+
if (ObjCProtocolDecl *Protocol = dyn_cast<ObjCProtocolDecl>(Container)) {
if (Protocol->hasDefinition())
return Protocol->getDefinition();
-
+
return Protocol;
}
return Container;
}
-/// \brief Adds a block invocation code completion result for the given block
+/// Adds a block invocation code completion result for the given block
/// declaration \p BD.
static void AddObjCBlockCall(ASTContext &Context, const PrintingPolicy &Policy,
CodeCompletionBuilder &Builder,
@@ -3770,7 +3854,7 @@ static void AddObjCProperties(
// Retrieve the definition.
Container = getContainerDef(Container);
-
+
// Add properties in this container.
const auto AddProperty = [&](const ObjCPropertyDecl *P) {
if (!AddedProperties.insert(P->getIdentifier()).second)
@@ -3889,7 +3973,7 @@ static void AddObjCProperties(
}
}
}
-
+
// Add properties in referenced protocols.
if (ObjCProtocolDecl *Protocol = dyn_cast<ObjCProtocolDecl>(Container)) {
for (auto *P : Protocol->protocols())
@@ -3929,17 +4013,22 @@ static void AddObjCProperties(
static void AddRecordMembersCompletionResults(Sema &SemaRef,
ResultBuilder &Results, Scope *S,
QualType BaseType,
- RecordDecl *RD) {
+ RecordDecl *RD,
+ Optional<FixItHint> AccessOpFixIt) {
// Indicate that we are performing a member access, and the cv-qualifiers
// for the base object type.
Results.setObjectTypeQualifiers(BaseType.getQualifiers());
// Access to a C/C++ class, struct, or union.
Results.allowNestedNameSpecifiers();
- CodeCompletionDeclConsumer Consumer(Results, SemaRef.CurContext);
+ std::vector<FixItHint> FixIts;
+ if (AccessOpFixIt)
+ FixIts.emplace_back(AccessOpFixIt.getValue());
+ CodeCompletionDeclConsumer Consumer(Results, SemaRef.CurContext, std::move(FixIts));
SemaRef.LookupVisibleDecls(RD, Sema::LookupMemberName, Consumer,
SemaRef.CodeCompleter->includeGlobals(),
- /*IncludeDependentBases=*/true);
+ /*IncludeDependentBases=*/true,
+ SemaRef.CodeCompleter->loadExternal());
if (SemaRef.getLangOpts().CPlusPlus) {
if (!Results.empty()) {
@@ -3962,106 +4051,138 @@ static void AddRecordMembersCompletionResults(Sema &SemaRef,
}
void Sema::CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base,
+ Expr *OtherOpBase,
SourceLocation OpLoc, bool IsArrow,
bool IsBaseExprStatement) {
if (!Base || !CodeCompleter)
return;
-
+
ExprResult ConvertedBase = PerformMemberExprBaseConversion(Base, IsArrow);
if (ConvertedBase.isInvalid())
return;
- Base = ConvertedBase.get();
-
- QualType BaseType = Base->getType();
+ QualType ConvertedBaseType = ConvertedBase.get()->getType();
+
+ enum CodeCompletionContext::Kind contextKind;
if (IsArrow) {
- if (const PointerType *Ptr = BaseType->getAs<PointerType>())
- BaseType = Ptr->getPointeeType();
- else if (BaseType->isObjCObjectPointerType())
- /*Do nothing*/ ;
- else
- return;
+ if (const PointerType *Ptr = ConvertedBaseType->getAs<PointerType>())
+ ConvertedBaseType = Ptr->getPointeeType();
}
-
- enum CodeCompletionContext::Kind contextKind;
-
+
if (IsArrow) {
contextKind = CodeCompletionContext::CCC_ArrowMemberAccess;
- }
- else {
- if (BaseType->isObjCObjectPointerType() ||
- BaseType->isObjCObjectOrInterfaceType()) {
+ } else {
+ if (ConvertedBaseType->isObjCObjectPointerType() ||
+ ConvertedBaseType->isObjCObjectOrInterfaceType()) {
contextKind = CodeCompletionContext::CCC_ObjCPropertyAccess;
- }
- else {
+ } else {
contextKind = CodeCompletionContext::CCC_DotMemberAccess;
}
}
- CodeCompletionContext CCContext(contextKind, BaseType);
+ CodeCompletionContext CCContext(contextKind, ConvertedBaseType);
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
- CodeCompleter->getCodeCompletionTUInfo(),
- CCContext,
+ CodeCompleter->getCodeCompletionTUInfo(), CCContext,
&ResultBuilder::IsMember);
- Results.EnterNewScope();
- if (const RecordType *Record = BaseType->getAs<RecordType>()) {
- AddRecordMembersCompletionResults(*this, Results, S, BaseType,
- Record->getDecl());
- } else if (const auto *TST = BaseType->getAs<TemplateSpecializationType>()) {
- TemplateName TN = TST->getTemplateName();
- if (const auto *TD =
- dyn_cast_or_null<ClassTemplateDecl>(TN.getAsTemplateDecl())) {
- CXXRecordDecl *RD = TD->getTemplatedDecl();
- AddRecordMembersCompletionResults(*this, Results, S, BaseType, RD);
- }
- } else if (const auto *ICNT = BaseType->getAs<InjectedClassNameType>()) {
- if (auto *RD = ICNT->getDecl())
- AddRecordMembersCompletionResults(*this, Results, S, BaseType, RD);
- } else if (!IsArrow && BaseType->isObjCObjectPointerType()) {
- // Objective-C property reference.
- AddedPropertiesSet AddedProperties;
-
- if (const ObjCObjectPointerType *ObjCPtr =
- BaseType->getAsObjCInterfacePointerType()) {
- // Add property results based on our interface.
- assert(ObjCPtr && "Non-NULL pointer guaranteed above!");
- AddObjCProperties(CCContext, ObjCPtr->getInterfaceDecl(), true,
- /*AllowNullaryMethods=*/true, CurContext,
- AddedProperties, Results, IsBaseExprStatement);
- }
-
- // Add properties from the protocols in a qualified interface.
- for (auto *I : BaseType->getAs<ObjCObjectPointerType>()->quals())
- AddObjCProperties(CCContext, I, true, /*AllowNullaryMethods=*/true,
- CurContext, AddedProperties, Results,
- IsBaseExprStatement);
- } else if ((IsArrow && BaseType->isObjCObjectPointerType()) ||
- (!IsArrow && BaseType->isObjCObjectType())) {
- // Objective-C instance variable access.
- ObjCInterfaceDecl *Class = nullptr;
- if (const ObjCObjectPointerType *ObjCPtr
- = BaseType->getAs<ObjCObjectPointerType>())
- Class = ObjCPtr->getInterfaceDecl();
- else
- Class = BaseType->getAs<ObjCObjectType>()->getInterface();
-
- // Add all ivars from this class and its superclasses.
- if (Class) {
- CodeCompletionDeclConsumer Consumer(Results, CurContext);
- Results.setFilter(&ResultBuilder::IsObjCIvar);
- LookupVisibleDecls(Class, LookupMemberName, Consumer,
- CodeCompleter->includeGlobals());
+
+ auto DoCompletion = [&](Expr *Base, bool IsArrow, Optional<FixItHint> AccessOpFixIt) -> bool {
+ if (!Base)
+ return false;
+
+ ExprResult ConvertedBase = PerformMemberExprBaseConversion(Base, IsArrow);
+ if (ConvertedBase.isInvalid())
+ return false;
+ Base = ConvertedBase.get();
+
+ QualType BaseType = Base->getType();
+
+ if (IsArrow) {
+ if (const PointerType *Ptr = BaseType->getAs<PointerType>())
+ BaseType = Ptr->getPointeeType();
+ else if (BaseType->isObjCObjectPointerType())
+ /*Do nothing*/;
+ else
+ return false;
+ }
+
+ if (const RecordType *Record = BaseType->getAs<RecordType>()) {
+ AddRecordMembersCompletionResults(*this, Results, S, BaseType,
+ Record->getDecl(),
+ std::move(AccessOpFixIt));
+ } else if (const auto *TST =
+ BaseType->getAs<TemplateSpecializationType>()) {
+ TemplateName TN = TST->getTemplateName();
+ if (const auto *TD =
+ dyn_cast_or_null<ClassTemplateDecl>(TN.getAsTemplateDecl())) {
+ CXXRecordDecl *RD = TD->getTemplatedDecl();
+ AddRecordMembersCompletionResults(*this, Results, S, BaseType, RD,
+ std::move(AccessOpFixIt));
+ }
+ } else if (const auto *ICNT = BaseType->getAs<InjectedClassNameType>()) {
+ if (auto *RD = ICNT->getDecl())
+ AddRecordMembersCompletionResults(*this, Results, S, BaseType, RD,
+ std::move(AccessOpFixIt));
+ } else if (!IsArrow && BaseType->isObjCObjectPointerType()) {
+ // Objective-C property reference.
+ AddedPropertiesSet AddedProperties;
+
+ if (const ObjCObjectPointerType *ObjCPtr =
+ BaseType->getAsObjCInterfacePointerType()) {
+ // Add property results based on our interface.
+ assert(ObjCPtr && "Non-NULL pointer guaranteed above!");
+ AddObjCProperties(CCContext, ObjCPtr->getInterfaceDecl(), true,
+ /*AllowNullaryMethods=*/true, CurContext,
+ AddedProperties, Results, IsBaseExprStatement);
+ }
+
+ // Add properties from the protocols in a qualified interface.
+ for (auto *I : BaseType->getAs<ObjCObjectPointerType>()->quals())
+ AddObjCProperties(CCContext, I, true, /*AllowNullaryMethods=*/true,
+ CurContext, AddedProperties, Results,
+ IsBaseExprStatement);
+ } else if ((IsArrow && BaseType->isObjCObjectPointerType()) ||
+ (!IsArrow && BaseType->isObjCObjectType())) {
+ // Objective-C instance variable access.
+ ObjCInterfaceDecl *Class = nullptr;
+ if (const ObjCObjectPointerType *ObjCPtr =
+ BaseType->getAs<ObjCObjectPointerType>())
+ Class = ObjCPtr->getInterfaceDecl();
+ else
+ Class = BaseType->getAs<ObjCObjectType>()->getInterface();
+
+ // Add all ivars from this class and its superclasses.
+ if (Class) {
+ CodeCompletionDeclConsumer Consumer(Results, CurContext);
+ Results.setFilter(&ResultBuilder::IsObjCIvar);
+ LookupVisibleDecls(
+ Class, LookupMemberName, Consumer, CodeCompleter->includeGlobals(),
+ /*IncludeDependentBases=*/false, CodeCompleter->loadExternal());
+ }
}
+
+ // FIXME: How do we cope with isa?
+ return true;
+ };
+
+ Results.EnterNewScope();
+
+ bool CompletionSucceded = DoCompletion(Base, IsArrow, None);
+ if (CodeCompleter->includeFixIts()) {
+ const CharSourceRange OpRange =
+ CharSourceRange::getTokenRange(OpLoc, OpLoc);
+ CompletionSucceded |= DoCompletion(
+ OtherOpBase, !IsArrow,
+ FixItHint::CreateReplacement(OpRange, IsArrow ? "." : "->"));
}
-
- // FIXME: How do we cope with isa?
-
+
Results.ExitScope();
+ if (!CompletionSucceded)
+ return;
+
// Hand off the results found for code completion.
- HandleCodeCompleteResults(this, CodeCompleter,
- Results.getCompletionContext(),
- Results.data(),Results.size());
+ HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
+ Results.data(), Results.size());
}
void Sema::CodeCompleteObjCClassPropertyRefExpr(Scope *S,
@@ -4100,23 +4221,23 @@ void Sema::CodeCompleteTag(Scope *S, unsigned TagSpec) {
Filter = &ResultBuilder::IsEnum;
ContextKind = CodeCompletionContext::CCC_EnumTag;
break;
-
+
case DeclSpec::TST_union:
Filter = &ResultBuilder::IsUnion;
ContextKind = CodeCompletionContext::CCC_UnionTag;
break;
-
+
case DeclSpec::TST_struct:
case DeclSpec::TST_class:
case DeclSpec::TST_interface:
Filter = &ResultBuilder::IsClassOrStruct;
ContextKind = CodeCompletionContext::CCC_ClassOrStructTag;
break;
-
+
default:
llvm_unreachable("Unknown type specifier kind in CodeCompleteTag");
}
-
+
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(), ContextKind);
CodeCompletionDeclConsumer Consumer(Results, CurContext);
@@ -4124,14 +4245,17 @@ void Sema::CodeCompleteTag(Scope *S, unsigned TagSpec) {
// First pass: look for tags.
Results.setFilter(Filter);
LookupVisibleDecls(S, LookupTagName, Consumer,
- CodeCompleter->includeGlobals());
+ CodeCompleter->includeGlobals(),
+ CodeCompleter->loadExternal());
if (CodeCompleter->includeGlobals()) {
// Second pass: look for nested name specifiers.
Results.setFilter(&ResultBuilder::IsNestedNameSpecifier);
- LookupVisibleDecls(S, LookupNestedNameSpecifierName, Consumer);
+ LookupVisibleDecls(S, LookupNestedNameSpecifierName, Consumer,
+ CodeCompleter->includeGlobals(),
+ CodeCompleter->loadExternal());
}
-
+
HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
Results.data(),Results.size());
}
@@ -4157,7 +4281,7 @@ void Sema::CodeCompleteTypeQualifiers(DeclSpec &DS) {
Results.EnterNewScope();
AddTypeQualifierResults(DS, Results, LangOpts);
Results.ExitScope();
- HandleCodeCompleteResults(this, CodeCompleter,
+ HandleCodeCompleteResults(this, CodeCompleter,
Results.getCompletionContext(),
Results.data(), Results.size());
}
@@ -4171,8 +4295,8 @@ void Sema::CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D,
AddTypeQualifierResults(DS, Results, LangOpts);
if (LangOpts.CPlusPlus11) {
Results.AddResult("noexcept");
- if (D.getContext() == Declarator::MemberContext && !D.isCtorOrDtor() &&
- !D.isStaticMember()) {
+ if (D.getContext() == DeclaratorContext::MemberContext &&
+ !D.isCtorOrDtor() && !D.isStaticMember()) {
if (!VS || !VS->isFinalSpecified())
Results.AddResult("final");
if (!VS || !VS->isOverrideSpecified())
@@ -4192,7 +4316,7 @@ void Sema::CodeCompleteCase(Scope *S) {
if (getCurFunction()->SwitchStack.empty() || !CodeCompleter)
return;
- SwitchStmt *Switch = getCurFunction()->SwitchStack.back();
+ SwitchStmt *Switch = getCurFunction()->SwitchStack.back().getPointer();
QualType type = Switch->getCond()->IgnoreImplicit()->getType();
if (!type->isEnumeralType()) {
CodeCompleteExpressionData Data(type);
@@ -4200,20 +4324,20 @@ void Sema::CodeCompleteCase(Scope *S) {
CodeCompleteExpression(S, Data);
return;
}
-
+
// Code-complete the cases of a switch statement over an enumeration type
- // by providing the list of
+ // by providing the list of
EnumDecl *Enum = type->castAs<EnumType>()->getDecl();
if (EnumDecl *Def = Enum->getDefinition())
Enum = Def;
-
+
// Determine which enumerators we have already seen in the switch statement.
// FIXME: Ideally, we would also be able to look *past* the code-completion
// token, in case we are code-completing in the middle of the switch and not
// at the end. However, we aren't able to do so at the moment.
llvm::SmallPtrSet<EnumConstantDecl *, 8> EnumeratorsSeen;
NestedNameSpecifier *Qualifier = nullptr;
- for (SwitchCase *SC = Switch->getSwitchCaseList(); SC;
+ for (SwitchCase *SC = Switch->getSwitchCaseList(); SC;
SC = SC->getNextSwitchCase()) {
CaseStmt *Case = dyn_cast<CaseStmt>(SC);
if (!Case)
@@ -4221,16 +4345,16 @@ void Sema::CodeCompleteCase(Scope *S) {
Expr *CaseVal = Case->getLHS()->IgnoreParenCasts();
if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(CaseVal))
- if (EnumConstantDecl *Enumerator
+ if (EnumConstantDecl *Enumerator
= dyn_cast<EnumConstantDecl>(DRE->getDecl())) {
- // We look into the AST of the case statement to determine which
- // enumerator was named. Alternatively, we could compute the value of
+ // We look into the AST of the case statement to determine which
+ // enumerator was named. Alternatively, we could compute the value of
// the integral constant expression, then compare it against the
- // values of each enumerator. However, value-based approach would not
- // work as well with C++ templates where enumerators declared within a
+ // values of each enumerator. However, value-based approach would not
+ // work as well with C++ templates where enumerators declared within a
// template are type- and value-dependent.
EnumeratorsSeen.insert(Enumerator);
-
+
// If this is a qualified-id, keep track of the nested-name-specifier
// so that we can reproduce it as part of code completion, e.g.,
//
@@ -4245,14 +4369,14 @@ void Sema::CodeCompleteCase(Scope *S) {
Qualifier = DRE->getQualifier();
}
}
-
+
if (getLangOpts().CPlusPlus && !Qualifier && EnumeratorsSeen.empty()) {
- // If there are no prior enumerators in C++, check whether we have to
+ // If there are no prior enumerators in C++, check whether we have to
// qualify the names of the enumerators that we suggest, because they
// may not be visible in this scope.
Qualifier = getRequiredQualification(Context, CurContext, Enum);
}
-
+
// Add any enumerators that have not yet been mentioned.
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
@@ -4261,23 +4385,17 @@ void Sema::CodeCompleteCase(Scope *S) {
for (auto *E : Enum->enumerators()) {
if (EnumeratorsSeen.count(E))
continue;
-
+
CodeCompletionResult R(E, CCP_EnumInCase, Qualifier);
Results.AddResult(R, CurContext, nullptr, false);
}
Results.ExitScope();
- //We need to make sure we're setting the right context,
- //so only say we include macros if the code completer says we do
- enum CodeCompletionContext::Kind kind = CodeCompletionContext::CCC_Other;
if (CodeCompleter->includeMacros()) {
AddMacroResults(PP, Results, false);
- kind = CodeCompletionContext::CCC_OtherWithMacros;
}
-
- HandleCodeCompleteResults(this, CodeCompleter,
- kind,
- Results.data(),Results.size());
+ HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
+ Results.data(), Results.size());
}
static bool anyNullArguments(ArrayRef<Expr *> Args) {
@@ -4316,7 +4434,7 @@ static void mergeCandidatesWithResults(Sema &SemaRef,
}
}
-/// \brief Get the type of the Nth parameter from a given set of overload
+/// Get the type of the Nth parameter from a given set of overload
/// candidates.
static QualType getParamType(Sema &SemaRef,
ArrayRef<ResultCandidate> Candidates,
@@ -4470,10 +4588,8 @@ void Sema::CodeCompleteConstructor(Scope *S, QualType Type, SourceLocation Loc,
return;
// A complete type is needed to lookup for constructors.
- if (!isCompleteType(Loc, Type))
- return;
-
- CXXRecordDecl *RD = Type->getAsCXXRecordDecl();
+ CXXRecordDecl *RD =
+ isCompleteType(Loc, Type) ? Type->getAsCXXRecordDecl() : nullptr;
if (!RD) {
CodeCompleteExpression(S, Type);
return;
@@ -4511,7 +4627,7 @@ void Sema::CodeCompleteInitializer(Scope *S, Decl *D) {
CodeCompleteOrdinaryName(S, PCC_Expression);
return;
}
-
+
CodeCompleteExpression(S, VD->getType());
}
@@ -4537,13 +4653,14 @@ void Sema::CodeCompleteAfterIf(Scope *S) {
mapCodeCompletionContext(*this, PCC_Statement));
Results.setFilter(&ResultBuilder::IsOrdinaryName);
Results.EnterNewScope();
-
+
CodeCompletionDeclConsumer Consumer(Results, CurContext);
LookupVisibleDecls(S, LookupOrdinaryName, Consumer,
- CodeCompleter->includeGlobals());
-
+ CodeCompleter->includeGlobals(),
+ CodeCompleter->loadExternal());
+
AddOrdinaryNameResults(PCC_Statement, S, *this, Results);
-
+
// "else" block
CodeCompletionBuilder Builder(Results.getAllocator(),
Results.getCodeCompletionTUInfo());
@@ -4580,13 +4697,13 @@ void Sema::CodeCompleteAfterIf(Scope *S) {
Results.AddResult(Builder.TakeString());
Results.ExitScope();
-
+
if (S->getFnParent())
AddPrettyFunctionResults(getLangOpts(), Results);
-
+
if (CodeCompleter->includeMacros())
AddMacroResults(PP, Results, false);
-
+
HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
Results.data(),Results.size());
}
@@ -4649,7 +4766,8 @@ void Sema::CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS,
CodeCompletionDeclConsumer Consumer(Results, CurContext);
LookupVisibleDecls(Ctx, LookupOrdinaryName, Consumer,
/*IncludeGlobalScope=*/true,
- /*IncludeDependentBases=*/true);
+ /*IncludeDependentBases=*/true,
+ CodeCompleter->loadExternal());
}
auto CC = Results.getCompletionContext();
@@ -4662,33 +4780,33 @@ void Sema::CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS,
void Sema::CodeCompleteUsing(Scope *S) {
if (!CodeCompleter)
return;
-
+
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_PotentiallyQualifiedName,
&ResultBuilder::IsNestedNameSpecifier);
Results.EnterNewScope();
-
+
// If we aren't in class scope, we could see the "namespace" keyword.
if (!S->isClassScope())
Results.AddResult(CodeCompletionResult("namespace"));
-
- // After "using", we can see anything that would start a
+
+ // After "using", we can see anything that would start a
// nested-name-specifier.
CodeCompletionDeclConsumer Consumer(Results, CurContext);
LookupVisibleDecls(S, LookupOrdinaryName, Consumer,
- CodeCompleter->includeGlobals());
+ CodeCompleter->includeGlobals(),
+ CodeCompleter->loadExternal());
Results.ExitScope();
-
- HandleCodeCompleteResults(this, CodeCompleter,
- CodeCompletionContext::CCC_PotentiallyQualifiedName,
- Results.data(),Results.size());
+
+ HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
+ Results.data(), Results.size());
}
void Sema::CodeCompleteUsingDirective(Scope *S) {
if (!CodeCompleter)
return;
-
+
// After "using namespace", we expect to see a namespace name or namespace
// alias.
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
@@ -4698,46 +4816,46 @@ void Sema::CodeCompleteUsingDirective(Scope *S) {
Results.EnterNewScope();
CodeCompletionDeclConsumer Consumer(Results, CurContext);
LookupVisibleDecls(S, LookupOrdinaryName, Consumer,
- CodeCompleter->includeGlobals());
+ CodeCompleter->includeGlobals(),
+ CodeCompleter->loadExternal());
Results.ExitScope();
- HandleCodeCompleteResults(this, CodeCompleter,
- CodeCompletionContext::CCC_Namespace,
- Results.data(),Results.size());
+ HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
+ Results.data(), Results.size());
}
void Sema::CodeCompleteNamespaceDecl(Scope *S) {
if (!CodeCompleter)
return;
-
+
DeclContext *Ctx = S->getEntity();
if (!S->getParent())
Ctx = Context.getTranslationUnitDecl();
-
+
bool SuppressedGlobalResults
= Ctx && !CodeCompleter->includeGlobals() && isa<TranslationUnitDecl>(Ctx);
-
+
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
SuppressedGlobalResults
? CodeCompletionContext::CCC_Namespace
: CodeCompletionContext::CCC_Other,
&ResultBuilder::IsNamespace);
-
+
if (Ctx && Ctx->isFileContext() && !SuppressedGlobalResults) {
// We only want to see those namespaces that have already been defined
// within this scope, because its likely that the user is creating an
- // extended namespace declaration. Keep track of the most recent
+ // extended namespace declaration. Keep track of the most recent
// definition of each namespace.
std::map<NamespaceDecl *, NamespaceDecl *> OrigToLatest;
- for (DeclContext::specific_decl_iterator<NamespaceDecl>
+ for (DeclContext::specific_decl_iterator<NamespaceDecl>
NS(Ctx->decls_begin()), NSEnd(Ctx->decls_end());
NS != NSEnd; ++NS)
OrigToLatest[NS->getOriginalNamespace()] = *NS;
-
- // Add the most recent definition (or extended definition) of each
+
+ // Add the most recent definition (or extended definition) of each
// namespace to the list of results.
Results.EnterNewScope();
- for (std::map<NamespaceDecl *, NamespaceDecl *>::iterator
+ for (std::map<NamespaceDecl *, NamespaceDecl *>::iterator
NS = OrigToLatest.begin(),
NSEnd = OrigToLatest.end();
NS != NSEnd; ++NS)
@@ -4747,8 +4865,8 @@ void Sema::CodeCompleteNamespaceDecl(Scope *S) {
CurContext, nullptr, false);
Results.ExitScope();
}
-
- HandleCodeCompleteResults(this, CodeCompleter,
+
+ HandleCodeCompleteResults(this, CodeCompleter,
Results.getCompletionContext(),
Results.data(),Results.size());
}
@@ -4756,7 +4874,7 @@ void Sema::CodeCompleteNamespaceDecl(Scope *S) {
void Sema::CodeCompleteNamespaceAliasDecl(Scope *S) {
if (!CodeCompleter)
return;
-
+
// After "namespace", we expect to see a namespace or alias.
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
@@ -4764,8 +4882,9 @@ void Sema::CodeCompleteNamespaceAliasDecl(Scope *S) {
&ResultBuilder::IsNamespaceOrAlias);
CodeCompletionDeclConsumer Consumer(Results, CurContext);
LookupVisibleDecls(S, LookupOrdinaryName, Consumer,
- CodeCompleter->includeGlobals());
- HandleCodeCompleteResults(this, CodeCompleter,
+ CodeCompleter->includeGlobals(),
+ CodeCompleter->loadExternal());
+ HandleCodeCompleteResults(this, CodeCompleter,
Results.getCompletionContext(),
Results.data(),Results.size());
}
@@ -4780,26 +4899,26 @@ void Sema::CodeCompleteOperatorName(Scope *S) {
CodeCompletionContext::CCC_Type,
&ResultBuilder::IsType);
Results.EnterNewScope();
-
+
// Add the names of overloadable operators.
#define OVERLOADED_OPERATOR(Name,Spelling,Token,Unary,Binary,MemberOnly) \
if (std::strcmp(Spelling, "?")) \
Results.AddResult(Result(Spelling));
#include "clang/Basic/OperatorKinds.def"
-
+
// Add any type names visible from the current scope
Results.allowNestedNameSpecifiers();
CodeCompletionDeclConsumer Consumer(Results, CurContext);
LookupVisibleDecls(S, LookupOrdinaryName, Consumer,
- CodeCompleter->includeGlobals());
-
+ CodeCompleter->includeGlobals(),
+ CodeCompleter->loadExternal());
+
// Add any type specifiers
AddTypeSpecifierResults(getLangOpts(), Results);
Results.ExitScope();
-
- HandleCodeCompleteResults(this, CodeCompleter,
- CodeCompletionContext::CCC_Type,
- Results.data(),Results.size());
+
+ HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
+ Results.data(), Results.size());
}
void Sema::CodeCompleteConstructorInitializer(
@@ -4813,12 +4932,12 @@ void Sema::CodeCompleteConstructorInitializer(
CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(ConstructorD);
if (!Constructor)
return;
-
+
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_PotentiallyQualifiedName);
Results.EnterNewScope();
-
+
// Fill in any already-initialized fields or base classes.
llvm::SmallPtrSet<FieldDecl *, 4> InitializedFields;
llvm::SmallPtrSet<CanQualType, 4> InitializedBases;
@@ -4830,7 +4949,7 @@ void Sema::CodeCompleteConstructorInitializer(
InitializedFields.insert(cast<FieldDecl>(
Initializers[I]->getAnyMember()));
}
-
+
// Add completions for base classes.
CodeCompletionBuilder Builder(Results.getAllocator(),
Results.getCodeCompletionTUInfo());
@@ -4841,69 +4960,69 @@ void Sema::CodeCompleteConstructorInitializer(
if (!InitializedBases.insert(Context.getCanonicalType(Base.getType()))
.second) {
SawLastInitializer
- = !Initializers.empty() &&
+ = !Initializers.empty() &&
Initializers.back()->isBaseInitializer() &&
Context.hasSameUnqualifiedType(Base.getType(),
QualType(Initializers.back()->getBaseClass(), 0));
continue;
}
-
+
Builder.AddTypedTextChunk(
Results.getAllocator().CopyString(
Base.getType().getAsString(Policy)));
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddPlaceholderChunk("args");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
- Results.AddResult(CodeCompletionResult(Builder.TakeString(),
+ Results.AddResult(CodeCompletionResult(Builder.TakeString(),
SawLastInitializer? CCP_NextInitializer
: CCP_MemberDeclaration));
SawLastInitializer = false;
}
-
+
// Add completions for virtual base classes.
for (const auto &Base : ClassDecl->vbases()) {
if (!InitializedBases.insert(Context.getCanonicalType(Base.getType()))
.second) {
SawLastInitializer
- = !Initializers.empty() &&
+ = !Initializers.empty() &&
Initializers.back()->isBaseInitializer() &&
Context.hasSameUnqualifiedType(Base.getType(),
QualType(Initializers.back()->getBaseClass(), 0));
continue;
}
-
+
Builder.AddTypedTextChunk(
Builder.getAllocator().CopyString(
Base.getType().getAsString(Policy)));
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddPlaceholderChunk("args");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
- Results.AddResult(CodeCompletionResult(Builder.TakeString(),
+ Results.AddResult(CodeCompletionResult(Builder.TakeString(),
SawLastInitializer? CCP_NextInitializer
: CCP_MemberDeclaration));
SawLastInitializer = false;
}
-
+
// Add completions for members.
for (auto *Field : ClassDecl->fields()) {
if (!InitializedFields.insert(cast<FieldDecl>(Field->getCanonicalDecl()))
.second) {
SawLastInitializer
- = !Initializers.empty() &&
+ = !Initializers.empty() &&
Initializers.back()->isAnyMemberInitializer() &&
Initializers.back()->getAnyMember() == Field;
continue;
}
-
+
if (!Field->getDeclName())
continue;
-
+
Builder.AddTypedTextChunk(Builder.getAllocator().CopyString(
Field->getIdentifier()->getName()));
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddPlaceholderChunk("args");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
- Results.AddResult(CodeCompletionResult(Builder.TakeString(),
+ Results.AddResult(CodeCompletionResult(Builder.TakeString(),
SawLastInitializer? CCP_NextInitializer
: CCP_MemberDeclaration,
CXCursor_MemberRef,
@@ -4912,12 +5031,12 @@ void Sema::CodeCompleteConstructorInitializer(
SawLastInitializer = false;
}
Results.ExitScope();
-
+
HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
Results.data(), Results.size());
}
-/// \brief Determine whether this scope denotes a namespace.
+/// Determine whether this scope denotes a namespace.
static bool isNamespaceScope(Scope *S) {
DeclContext *DC = S->getEntity();
if (!DC)
@@ -4941,10 +5060,10 @@ void Sema::CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro,
IncludedThis = true;
continue;
}
-
+
Known.insert(C.Id);
}
-
+
// Look for other capturable variables.
for (; S && !isNamespaceScope(S); S = S->getParent()) {
for (const auto *D : S->decls()) {
@@ -4953,7 +5072,7 @@ void Sema::CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro,
!Var->hasLocalStorage() ||
Var->hasAttr<BlocksAttr>())
continue;
-
+
if (Known.insert(Var->getIdentifier()).second)
Results.AddResult(CodeCompletionResult(Var, CCP_LocalDeclaration),
CurContext, nullptr, false);
@@ -4963,9 +5082,9 @@ void Sema::CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro,
// Add 'this', if it would be valid.
if (!IncludedThis && !AfterAmpersand && Intro.Default != LCD_ByCopy)
addThisCompletion(*this, Results);
-
+
Results.ExitScope();
-
+
HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
Results.data(), Results.size());
}
@@ -4980,7 +5099,7 @@ static void AddObjCImplementationResults(const LangOptions &LangOpts,
typedef CodeCompletionResult Result;
// Since we have an implementation, we can end it.
Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt,"end")));
-
+
CodeCompletionBuilder Builder(Results.getAllocator(),
Results.getCodeCompletionTUInfo());
if (LangOpts.ObjC2) {
@@ -4989,30 +5108,30 @@ static void AddObjCImplementationResults(const LangOptions &LangOpts,
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("property");
Results.AddResult(Result(Builder.TakeString()));
-
+
// @synthesize
Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"synthesize"));
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("property");
Results.AddResult(Result(Builder.TakeString()));
- }
+ }
}
static void AddObjCInterfaceResults(const LangOptions &LangOpts,
ResultBuilder &Results,
bool NeedAt) {
typedef CodeCompletionResult Result;
-
+
// Since we have an interface or protocol, we can end it.
Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt,"end")));
-
+
if (LangOpts.ObjC2) {
// @property
Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt,"property")));
-
+
// @required
Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt,"required")));
-
+
// @optional
Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt,"optional")));
}
@@ -5022,35 +5141,35 @@ static void AddObjCTopLevelResults(ResultBuilder &Results, bool NeedAt) {
typedef CodeCompletionResult Result;
CodeCompletionBuilder Builder(Results.getAllocator(),
Results.getCodeCompletionTUInfo());
-
+
// @class name ;
Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"class"));
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("name");
Results.AddResult(Result(Builder.TakeString()));
-
+
if (Results.includeCodePatterns()) {
- // @interface name
- // FIXME: Could introduce the whole pattern, including superclasses and
+ // @interface name
+ // FIXME: Could introduce the whole pattern, including superclasses and
// such.
Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"interface"));
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("class");
Results.AddResult(Result(Builder.TakeString()));
-
+
// @protocol name
Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"protocol"));
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("protocol");
Results.AddResult(Result(Builder.TakeString()));
-
+
// @implementation name
Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"implementation"));
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("class");
Results.AddResult(Result(Builder.TakeString()));
}
-
+
// @compatibility_alias name
Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"compatibility_alias"));
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
@@ -5080,9 +5199,8 @@ void Sema::CodeCompleteObjCAtDirective(Scope *S) {
else
AddObjCTopLevelResults(Results, false);
Results.ExitScope();
- HandleCodeCompleteResults(this, CodeCompleter,
- CodeCompletionContext::CCC_Other,
- Results.data(),Results.size());
+ HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
+ Results.data(), Results.size());
}
static void AddObjCExpressionResults(ResultBuilder &Results, bool NeedAt) {
@@ -5101,7 +5219,7 @@ static void AddObjCExpressionResults(ResultBuilder &Results, bool NeedAt) {
Builder.AddPlaceholderChunk("type-name");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Results.AddResult(Result(Builder.TakeString()));
-
+
// @protocol ( protocol-name )
Builder.AddResultTypeChunk("Protocol *");
Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"protocol"));
@@ -5154,7 +5272,7 @@ static void AddObjCStatementResults(ResultBuilder &Results, bool NeedAt) {
typedef CodeCompletionResult Result;
CodeCompletionBuilder Builder(Results.getAllocator(),
Results.getCodeCompletionTUInfo());
-
+
if (Results.includeCodePatterns()) {
// @try { statements } @catch ( declaration ) { statements } @finally
// { statements }
@@ -5175,13 +5293,13 @@ static void AddObjCStatementResults(ResultBuilder &Results, bool NeedAt) {
Builder.AddChunk(CodeCompletionString::CK_RightBrace);
Results.AddResult(Result(Builder.TakeString()));
}
-
+
// @throw
Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"throw"));
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("expression");
Results.AddResult(Result(Builder.TakeString()));
-
+
if (Results.includeCodePatterns()) {
// @synchronized ( expression ) { statements }
Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"synchronized"));
@@ -5214,9 +5332,8 @@ void Sema::CodeCompleteObjCAtVisibility(Scope *S) {
Results.EnterNewScope();
AddObjCVisibilityResults(getLangOpts(), Results, false);
Results.ExitScope();
- HandleCodeCompleteResults(this, CodeCompleter,
- CodeCompletionContext::CCC_Other,
- Results.data(),Results.size());
+ HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
+ Results.data(), Results.size());
}
void Sema::CodeCompleteObjCAtStatement(Scope *S) {
@@ -5227,9 +5344,8 @@ void Sema::CodeCompleteObjCAtStatement(Scope *S) {
AddObjCStatementResults(Results, false);
AddObjCExpressionResults(Results, false);
Results.ExitScope();
- HandleCodeCompleteResults(this, CodeCompleter,
- CodeCompletionContext::CCC_Other,
- Results.data(),Results.size());
+ HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
+ Results.data(), Results.size());
}
void Sema::CodeCompleteObjCAtExpression(Scope *S) {
@@ -5239,25 +5355,24 @@ void Sema::CodeCompleteObjCAtExpression(Scope *S) {
Results.EnterNewScope();
AddObjCExpressionResults(Results, false);
Results.ExitScope();
- HandleCodeCompleteResults(this, CodeCompleter,
- CodeCompletionContext::CCC_Other,
- Results.data(),Results.size());
+ HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
+ Results.data(), Results.size());
}
-/// \brief Determine whether the addition of the given flag to an Objective-C
+/// Determine whether the addition of the given flag to an Objective-C
/// property's attributes will cause a conflict.
static bool ObjCPropertyFlagConflicts(unsigned Attributes, unsigned NewFlag) {
// Check if we've already added this flag.
if (Attributes & NewFlag)
return true;
-
+
Attributes |= NewFlag;
-
+
// Check for collisions with "readonly".
if ((Attributes & ObjCDeclSpec::DQ_PR_readonly) &&
(Attributes & ObjCDeclSpec::DQ_PR_readwrite))
return true;
-
+
// Check for more than one of { assign, copy, retain, strong, weak }.
unsigned AssignCopyRetMask = Attributes & (ObjCDeclSpec::DQ_PR_assign |
ObjCDeclSpec::DQ_PR_unsafe_unretained |
@@ -5273,16 +5388,16 @@ static bool ObjCPropertyFlagConflicts(unsigned Attributes, unsigned NewFlag) {
AssignCopyRetMask != ObjCDeclSpec::DQ_PR_strong &&
AssignCopyRetMask != ObjCDeclSpec::DQ_PR_weak)
return true;
-
+
return false;
}
-void Sema::CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS) {
+void Sema::CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS) {
if (!CodeCompleter)
return;
-
+
unsigned Attributes = ODS.getPropertyAttributes();
-
+
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_Other);
@@ -5335,12 +5450,11 @@ void Sema::CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS) {
Results.AddResult(CodeCompletionResult("null_resettable"));
}
Results.ExitScope();
- HandleCodeCompleteResults(this, CodeCompleter,
- CodeCompletionContext::CCC_Other,
- Results.data(),Results.size());
+ HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
+ Results.data(), Results.size());
}
-/// \brief Describes the kind of Objective-C method that we want to find
+/// Describes the kind of Objective-C method that we want to find
/// via code completion.
enum ObjCMethodKind {
MK_Any, ///< Any kind of method, provided it means other specified criteria.
@@ -5355,20 +5469,20 @@ static bool isAcceptableObjCSelector(Selector Sel,
unsigned NumSelIdents = SelIdents.size();
if (NumSelIdents > Sel.getNumArgs())
return false;
-
+
switch (WantKind) {
case MK_Any: break;
case MK_ZeroArgSelector: return Sel.isUnarySelector();
case MK_OneArgSelector: return Sel.getNumArgs() == 1;
}
-
+
if (!AllowSameLength && NumSelIdents && NumSelIdents == Sel.getNumArgs())
return false;
-
+
for (unsigned I = 0; I != NumSelIdents; ++I)
if (SelIdents[I] != Sel.getIdentifierInfoForSlot(I))
return false;
-
+
return true;
}
@@ -5381,16 +5495,16 @@ static bool isAcceptableObjCMethod(ObjCMethodDecl *Method,
}
namespace {
- /// \brief A set of selectors, which is used to avoid introducing multiple
+ /// A set of selectors, which is used to avoid introducing multiple
/// completions with the same selector into the result set.
typedef llvm::SmallPtrSet<Selector, 16> VisitedSelectorSet;
}
-/// \brief Add all of the Objective-C methods in the given Objective-C
+/// Add all of the Objective-C methods in the given Objective-C
/// container to the set of results.
///
-/// The container will be a class, protocol, category, or implementation of
-/// any of the above. This mether will recurse to include methods from
+/// The container will be a class, protocol, category, or implementation of
+/// any of the above. This mether will recurse to include methods from
/// the superclasses of classes along with their categories, protocols, and
/// implementations.
///
@@ -5422,7 +5536,7 @@ static void AddObjCMethods(ObjCContainerDecl *Container,
// metaclass.
if (M->isInstanceMethod() == WantInstanceMethods ||
(IsRootClass && !WantInstanceMethods)) {
- // Check whether the selector identifiers we've been given are a
+ // Check whether the selector identifiers we've been given are a
// subset of the identifiers for this particular method.
if (!isAcceptableObjCMethod(M, WantKind, SelIdents, AllowSameLength))
continue;
@@ -5438,23 +5552,23 @@ static void AddObjCMethods(ObjCContainerDecl *Container,
Results.MaybeAddResult(R, CurContext);
}
}
-
+
// Visit the protocols of protocols.
if (ObjCProtocolDecl *Protocol = dyn_cast<ObjCProtocolDecl>(Container)) {
if (Protocol->hasDefinition()) {
const ObjCList<ObjCProtocolDecl> &Protocols
= Protocol->getReferencedProtocols();
for (ObjCList<ObjCProtocolDecl>::iterator I = Protocols.begin(),
- E = Protocols.end();
+ E = Protocols.end();
I != E; ++I)
AddObjCMethods(*I, WantInstanceMethods, WantKind, SelIdents, CurContext,
Selectors, AllowSameLength, Results, false, IsRootClass);
}
}
-
+
if (!IFace || !IFace->hasDefinition())
return;
-
+
// Add methods in protocols.
for (auto *I : IFace->protocols())
AddObjCMethods(I, WantInstanceMethods, WantKind, SelIdents, CurContext,
@@ -5467,7 +5581,7 @@ static void AddObjCMethods(ObjCContainerDecl *Container,
InOriginalClass, IsRootClass);
// Add a categories protocol methods.
- const ObjCList<ObjCProtocolDecl> &Protocols
+ const ObjCList<ObjCProtocolDecl> &Protocols
= CatDecl->getReferencedProtocols();
for (ObjCList<ObjCProtocolDecl>::iterator I = Protocols.begin(),
E = Protocols.end();
@@ -5481,7 +5595,7 @@ static void AddObjCMethods(ObjCContainerDecl *Container,
Selectors, AllowSameLength, Results, InOriginalClass,
IsRootClass);
}
-
+
// Add methods in superclass.
// Avoid passing in IsRootClass since root classes won't have super classes.
if (IFace->getSuperClass())
@@ -5519,9 +5633,8 @@ void Sema::CodeCompleteObjCPropertyGetter(Scope *S) {
AddObjCMethods(Class, true, MK_ZeroArgSelector, None, CurContext, Selectors,
/*AllowSameLength=*/true, Results);
Results.ExitScope();
- HandleCodeCompleteResults(this, CodeCompleter,
- CodeCompletionContext::CCC_Other,
- Results.data(),Results.size());
+ HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
+ Results.data(), Results.size());
}
void Sema::CodeCompleteObjCPropertySetter(Scope *S) {
@@ -5548,9 +5661,8 @@ void Sema::CodeCompleteObjCPropertySetter(Scope *S) {
Selectors, /*AllowSameLength=*/true, Results);
Results.ExitScope();
- HandleCodeCompleteResults(this, CodeCompleter,
- CodeCompletionContext::CCC_Other,
- Results.data(),Results.size());
+ HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
+ Results.data(), Results.size());
}
void Sema::CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
@@ -5559,22 +5671,22 @@ void Sema::CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_Type);
Results.EnterNewScope();
-
+
// Add context-sensitive, Objective-C parameter-passing keywords.
bool AddedInOut = false;
- if ((DS.getObjCDeclQualifier() &
+ if ((DS.getObjCDeclQualifier() &
(ObjCDeclSpec::DQ_In | ObjCDeclSpec::DQ_Inout)) == 0) {
Results.AddResult("in");
Results.AddResult("inout");
AddedInOut = true;
}
- if ((DS.getObjCDeclQualifier() &
+ if ((DS.getObjCDeclQualifier() &
(ObjCDeclSpec::DQ_Out | ObjCDeclSpec::DQ_Inout)) == 0) {
Results.AddResult("out");
if (!AddedInOut)
Results.AddResult("inout");
}
- if ((DS.getObjCDeclQualifier() &
+ if ((DS.getObjCDeclQualifier() &
(ObjCDeclSpec::DQ_Bycopy | ObjCDeclSpec::DQ_Byref |
ObjCDeclSpec::DQ_Oneway)) == 0) {
Results.AddResult("bycopy");
@@ -5586,8 +5698,8 @@ void Sema::CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
Results.AddResult("nullable");
Results.AddResult("null_unspecified");
}
-
- // If we're completing the return type of an Objective-C method and the
+
+ // If we're completing the return type of an Objective-C method and the
// identifier IBAction refers to a macro, provide a completion item for
// an action, e.g.,
// IBAction)<#selector#>:(id)sender
@@ -5611,26 +5723,26 @@ void Sema::CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
if (!IsParameter) {
Results.AddResult(CodeCompletionResult("instancetype"));
}
-
+
// Add various builtin type names and specifiers.
AddOrdinaryNameResults(PCC_Type, S, *this, Results);
Results.ExitScope();
-
+
// Add the various type names
Results.setFilter(&ResultBuilder::IsOrdinaryNonValueName);
CodeCompletionDeclConsumer Consumer(Results, CurContext);
LookupVisibleDecls(S, LookupOrdinaryName, Consumer,
- CodeCompleter->includeGlobals());
-
+ CodeCompleter->includeGlobals(),
+ CodeCompleter->loadExternal());
+
if (CodeCompleter->includeMacros())
AddMacroResults(PP, Results, false);
- HandleCodeCompleteResults(this, CodeCompleter,
- CodeCompletionContext::CCC_Type,
+ HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
Results.data(), Results.size());
}
-/// \brief When we have an expression with type "id", we may assume
+/// When we have an expression with type "id", we may assume
/// that it has some more-specific class type based on knowledge of
/// common uses of Objective-C. This routine returns that class type,
/// or NULL if no better result could be determined.
@@ -5702,7 +5814,7 @@ static ObjCInterfaceDecl *GetAssumedMessageSendExprType(Expr *E) {
}
// Add a special completion for a message send to "super", which fills in the
-// most likely case of forwarding all of our arguments to the superclass
+// most likely case of forwarding all of our arguments to the superclass
// function.
///
/// \param S The semantic analysis object.
@@ -5715,7 +5827,7 @@ static ObjCInterfaceDecl *GetAssumedMessageSendExprType(Expr *E) {
///
/// \param Results The set of results to augment.
///
-/// \returns the Objective-C method declaration that would be invoked by
+/// \returns the Objective-C method declaration that would be invoked by
/// this "super" completion. If NULL, no completion was added.
static ObjCMethodDecl *AddSuperSendCompletion(
Sema &S, bool NeedSuperKeyword,
@@ -5733,7 +5845,7 @@ static ObjCMethodDecl *AddSuperSendCompletion(
ObjCMethodDecl *SuperMethod = nullptr;
while ((Class = Class->getSuperClass()) && !SuperMethod) {
// Check in the class
- SuperMethod = Class->getMethod(CurMethod->getSelector(),
+ SuperMethod = Class->getMethod(CurMethod->getSelector(),
CurMethod->isInstanceMethod());
// Check in categories or class extensions.
@@ -5759,7 +5871,7 @@ static ObjCMethodDecl *AddSuperSendCompletion(
SuperP = SuperMethod->param_begin();
CurP != CurPEnd; ++CurP, ++SuperP) {
// Make sure the parameter types are compatible.
- if (!S.Context.hasSameUnqualifiedType((*CurP)->getType(),
+ if (!S.Context.hasSameUnqualifiedType((*CurP)->getType(),
(*SuperP)->getType()))
return nullptr;
@@ -5767,11 +5879,11 @@ static ObjCMethodDecl *AddSuperSendCompletion(
if (!(*CurP)->getIdentifier())
return nullptr;
}
-
+
// We have a superclass method. Now, form the send-to-super completion.
CodeCompletionBuilder Builder(Results.getAllocator(),
Results.getCodeCompletionTUInfo());
-
+
// Give this completion a return type.
AddResultTypeChunk(S.Context, getCompletionPrintingPolicy(S), SuperMethod,
Results.getCompletionContext().getBaseType(),
@@ -5782,7 +5894,7 @@ static ObjCMethodDecl *AddSuperSendCompletion(
Builder.AddTypedTextChunk("super");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
}
-
+
Selector Sel = CurMethod->getSelector();
if (Sel.isUnarySelector()) {
if (NeedSuperKeyword)
@@ -5796,7 +5908,7 @@ static ObjCMethodDecl *AddSuperSendCompletion(
for (unsigned I = 0, N = Sel.getNumArgs(); I != N; ++I, ++CurP) {
if (I > SelIdents.size())
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
-
+
if (I < SelIdents.size())
Builder.AddInformativeChunk(
Builder.getAllocator().CopyString(
@@ -5812,16 +5924,16 @@ static ObjCMethodDecl *AddSuperSendCompletion(
Builder.getAllocator().CopyString(
Sel.getNameForSlot(I) + ":"));
Builder.AddPlaceholderChunk(Builder.getAllocator().CopyString(
- (*CurP)->getIdentifier()->getName()));
+ (*CurP)->getIdentifier()->getName()));
}
}
}
-
+
Results.AddResult(CodeCompletionResult(Builder.TakeString(), SuperMethod,
CCP_SuperCompletion));
return SuperMethod;
}
-
+
void Sema::CodeCompleteObjCMessageReceiver(Scope *S) {
typedef CodeCompletionResult Result;
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
@@ -5830,32 +5942,33 @@ void Sema::CodeCompleteObjCMessageReceiver(Scope *S) {
getLangOpts().CPlusPlus11
? &ResultBuilder::IsObjCMessageReceiverOrLambdaCapture
: &ResultBuilder::IsObjCMessageReceiver);
-
+
CodeCompletionDeclConsumer Consumer(Results, CurContext);
Results.EnterNewScope();
LookupVisibleDecls(S, LookupOrdinaryName, Consumer,
- CodeCompleter->includeGlobals());
-
+ CodeCompleter->includeGlobals(),
+ CodeCompleter->loadExternal());
+
// If we are in an Objective-C method inside a class that has a superclass,
// add "super" as an option.
if (ObjCMethodDecl *Method = getCurMethodDecl())
if (ObjCInterfaceDecl *Iface = Method->getClassInterface())
if (Iface->getSuperClass()) {
Results.AddResult(Result("super"));
-
+
AddSuperSendCompletion(*this, /*NeedSuperKeyword=*/true, None, Results);
}
-
+
if (getLangOpts().CPlusPlus11)
addThisCompletion(*this, Results);
-
+
Results.ExitScope();
-
+
if (CodeCompleter->includeMacros())
AddMacroResults(PP, Results, false);
HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
Results.data(), Results.size());
-
+
}
void Sema::CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
@@ -5867,7 +5980,7 @@ void Sema::CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
CDecl = CurMethod->getClassInterface();
if (!CDecl)
return;
-
+
// Find the superclass of this class.
CDecl = CDecl->getSuperClass();
if (!CDecl)
@@ -5887,7 +6000,7 @@ void Sema::CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
// "super" may be the name of a type or variable. Figure out which
// it is.
IdentifierInfo *Super = getSuperIdentifier();
- NamedDecl *ND = LookupSingleName(S, Super, SuperLoc,
+ NamedDecl *ND = LookupSingleName(S, Super, SuperLoc,
LookupOrdinaryName);
if ((CDecl = dyn_cast_or_null<ObjCInterfaceDecl>(ND))) {
// "super" names an interface. Use it.
@@ -5916,24 +6029,24 @@ void Sema::CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
ParsedType Receiver;
if (CDecl)
Receiver = ParsedType::make(Context.getObjCInterfaceType(CDecl));
- return CodeCompleteObjCClassMessage(S, Receiver, SelIdents,
+ return CodeCompleteObjCClassMessage(S, Receiver, SelIdents,
AtArgumentExpression,
/*IsSuper=*/true);
}
-/// \brief Given a set of code-completion results for the argument of a message
+/// Given a set of code-completion results for the argument of a message
/// send, determine the preferred type (if any) for that argument expression.
static QualType getPreferredArgumentTypeForMessageSend(ResultBuilder &Results,
unsigned NumSelIdents) {
- typedef CodeCompletionResult Result;
+ typedef CodeCompletionResult Result;
ASTContext &Context = Results.getSema().Context;
-
+
QualType PreferredType;
unsigned BestPriority = CCP_Unlikely * 2;
Result *ResultsData = Results.data();
for (unsigned I = 0, N = Results.size(); I != N; ++I) {
Result &R = ResultsData[I];
- if (R.Kind == Result::RK_Declaration &&
+ if (R.Kind == Result::RK_Declaration &&
isa<ObjCMethodDecl>(R.Declaration)) {
if (R.Priority <= BestPriority) {
const ObjCMethodDecl *Method = cast<ObjCMethodDecl>(R.Declaration);
@@ -5955,7 +6068,7 @@ static QualType getPreferredArgumentTypeForMessageSend(ResultBuilder &Results,
return PreferredType;
}
-static void AddClassMessageCompletions(Sema &SemaRef, Scope *S,
+static void AddClassMessageCompletions(Sema &SemaRef, Scope *S,
ParsedType Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
@@ -5968,55 +6081,55 @@ static void AddClassMessageCompletions(Sema &SemaRef, Scope *S,
// corresponding declaration.
if (Receiver) {
QualType T = SemaRef.GetTypeFromParser(Receiver, nullptr);
- if (!T.isNull())
+ if (!T.isNull())
if (const ObjCObjectType *Interface = T->getAs<ObjCObjectType>())
CDecl = Interface->getInterface();
}
-
+
// Add all of the factory methods in this Objective-C class, its protocols,
// superclasses, categories, implementation, etc.
Results.EnterNewScope();
-
- // If this is a send-to-super, try to add the special "super" send
+
+ // If this is a send-to-super, try to add the special "super" send
// completion.
if (IsSuper) {
if (ObjCMethodDecl *SuperMethod
= AddSuperSendCompletion(SemaRef, false, SelIdents, Results))
Results.Ignore(SuperMethod);
}
-
+
// If we're inside an Objective-C method definition, prefer its selector to
// others.
if (ObjCMethodDecl *CurMethod = SemaRef.getCurMethodDecl())
Results.setPreferredSelector(CurMethod->getSelector());
-
+
VisitedSelectorSet Selectors;
- if (CDecl)
+ if (CDecl)
AddObjCMethods(CDecl, false, MK_Any, SelIdents,
SemaRef.CurContext, Selectors, AtArgumentExpression,
- Results);
+ Results);
else {
// We're messaging "id" as a type; provide all class/factory methods.
-
+
// If we have an external source, load the entire class method
// pool from the AST file.
if (SemaRef.getExternalSource()) {
- for (uint32_t I = 0,
+ for (uint32_t I = 0,
N = SemaRef.getExternalSource()->GetNumExternalSelectors();
I != N; ++I) {
Selector Sel = SemaRef.getExternalSource()->GetExternalSelector(I);
if (Sel.isNull() || SemaRef.MethodPool.count(Sel))
continue;
-
+
SemaRef.ReadMethodPool(Sel);
}
}
-
+
for (Sema::GlobalMethodPool::iterator M = SemaRef.MethodPool.begin(),
MEnd = SemaRef.MethodPool.end();
M != MEnd; ++M) {
for (ObjCMethodList *MethList = &M->second.second;
- MethList && MethList->getMethod();
+ MethList && MethList->getMethod();
MethList = MethList->getNext()) {
if (!isAcceptableObjCMethod(MethList->getMethod(), MK_Any, SelIdents))
continue;
@@ -6029,32 +6142,32 @@ static void AddClassMessageCompletions(Sema &SemaRef, Scope *S,
}
}
}
-
- Results.ExitScope();
+
+ Results.ExitScope();
}
void Sema::CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
bool IsSuper) {
-
+
QualType T = this->GetTypeFromParser(Receiver);
-
+
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext(CodeCompletionContext::CCC_ObjCClassMessage,
T, SelIdents));
-
+
AddClassMessageCompletions(*this, S, Receiver, SelIdents,
AtArgumentExpression, IsSuper, Results);
-
- // If we're actually at the argument expression (rather than prior to the
+
+ // If we're actually at the argument expression (rather than prior to the
// selector), we're actually performing code completion for an expression.
- // Determine whether we have a single, best method. If so, we can
+ // Determine whether we have a single, best method. If so, we can
// code-complete the expression using the corresponding parameter type as
// our preferred type, improving completion results.
if (AtArgumentExpression) {
- QualType PreferredType = getPreferredArgumentTypeForMessageSend(Results,
+ QualType PreferredType = getPreferredArgumentTypeForMessageSend(Results,
SelIdents.size());
if (PreferredType.isNull())
CodeCompleteOrdinaryName(S, PCC_Expression);
@@ -6063,7 +6176,7 @@ void Sema::CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
return;
}
- HandleCodeCompleteResults(this, CodeCompleter,
+ HandleCodeCompleteResults(this, CodeCompleter,
Results.getCompletionContext(),
Results.data(), Results.size());
}
@@ -6073,9 +6186,9 @@ void Sema::CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
bool AtArgumentExpression,
ObjCInterfaceDecl *Super) {
typedef CodeCompletionResult Result;
-
+
Expr *RecExpr = static_cast<Expr *>(Receiver);
-
+
// If necessary, apply function/array conversion to the receiver.
// C99 6.7.5.3p[7,8].
if (RecExpr) {
@@ -6084,18 +6197,18 @@ void Sema::CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
return;
RecExpr = Conv.get();
}
- QualType ReceiverType = RecExpr? RecExpr->getType()
+ QualType ReceiverType = RecExpr? RecExpr->getType()
: Super? Context.getObjCObjectPointerType(
Context.getObjCInterfaceType(Super))
: Context.getObjCIdType();
-
+
// If we're messaging an expression with type "id" or "Class", check
// whether we know something special about the receiver that allows
// us to assume a more-specific receiver type.
if (ReceiverType->isObjCIdType() || ReceiverType->isObjCClassType()) {
if (ObjCInterfaceDecl *IFace = GetAssumedMessageSendExprType(RecExpr)) {
if (ReceiverType->isObjCClassType())
- return CodeCompleteObjCClassMessage(S,
+ return CodeCompleteObjCClassMessage(S,
ParsedType::make(Context.getObjCInterfaceType(IFace)),
SelIdents,
AtArgumentExpression, Super);
@@ -6116,36 +6229,36 @@ void Sema::CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext(CodeCompletionContext::CCC_ObjCInstanceMessage,
ReceiverType, SelIdents));
-
+
Results.EnterNewScope();
- // If this is a send-to-super, try to add the special "super" send
+ // If this is a send-to-super, try to add the special "super" send
// completion.
if (Super) {
if (ObjCMethodDecl *SuperMethod
= AddSuperSendCompletion(*this, false, SelIdents, Results))
Results.Ignore(SuperMethod);
}
-
+
// If we're inside an Objective-C method definition, prefer its selector to
// others.
if (ObjCMethodDecl *CurMethod = getCurMethodDecl())
Results.setPreferredSelector(CurMethod->getSelector());
-
+
// Keep track of the selectors we've already added.
VisitedSelectorSet Selectors;
-
+
// Handle messages to Class. This really isn't a message to an instance
// method, so we treat it the same way we would treat a message send to a
// class method.
- if (ReceiverType->isObjCClassType() ||
+ if (ReceiverType->isObjCClassType() ||
ReceiverType->isObjCQualifiedClassType()) {
if (ObjCMethodDecl *CurMethod = getCurMethodDecl()) {
if (ObjCInterfaceDecl *ClassDecl = CurMethod->getClassInterface())
AddObjCMethods(ClassDecl, false, MK_Any, SelIdents,
CurContext, Selectors, AtArgumentExpression, Results);
}
- }
+ }
// Handle messages to a qualified ID ("id<foo>").
else if (const ObjCObjectPointerType *QualID
= ReceiverType->getAsObjCQualifiedIdType()) {
@@ -6161,7 +6274,7 @@ void Sema::CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
AddObjCMethods(IFacePtr->getInterfaceDecl(), true, MK_Any, SelIdents,
CurContext, Selectors, AtArgumentExpression,
Results);
-
+
// Search protocols for instance methods.
for (auto *I : IFacePtr->quals())
AddObjCMethods(I, true, MK_Any, SelIdents, CurContext,
@@ -6189,11 +6302,11 @@ void Sema::CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
MEnd = MethodPool.end();
M != MEnd; ++M) {
for (ObjCMethodList *MethList = &M->second.first;
- MethList && MethList->getMethod();
+ MethList && MethList->getMethod();
MethList = MethList->getNext()) {
if (!isAcceptableObjCMethod(MethList->getMethod(), MK_Any, SelIdents))
continue;
-
+
if (!Selectors.insert(MethList->getMethod()->getSelector()).second)
continue;
@@ -6206,15 +6319,15 @@ void Sema::CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
}
}
Results.ExitScope();
-
-
- // If we're actually at the argument expression (rather than prior to the
+
+
+ // If we're actually at the argument expression (rather than prior to the
// selector), we're actually performing code completion for an expression.
- // Determine whether we have a single, best method. If so, we can
+ // Determine whether we have a single, best method. If so, we can
// code-complete the expression using the corresponding parameter type as
// our preferred type, improving completion results.
if (AtArgumentExpression) {
- QualType PreferredType = getPreferredArgumentTypeForMessageSend(Results,
+ QualType PreferredType = getPreferredArgumentTypeForMessageSend(Results,
SelIdents.size());
if (PreferredType.isNull())
CodeCompleteOrdinaryName(S, PCC_Expression);
@@ -6222,17 +6335,17 @@ void Sema::CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
CodeCompleteExpression(S, PreferredType);
return;
}
-
- HandleCodeCompleteResults(this, CodeCompleter,
+
+ HandleCodeCompleteResults(this, CodeCompleter,
Results.getCompletionContext(),
Results.data(),Results.size());
}
-void Sema::CodeCompleteObjCForCollection(Scope *S,
+void Sema::CodeCompleteObjCForCollection(Scope *S,
DeclGroupPtrTy IterationVar) {
CodeCompleteExpressionData Data;
Data.ObjCCollection = true;
-
+
if (IterationVar.getAsOpaquePtr()) {
DeclGroupRef DG = IterationVar.get();
for (DeclGroupRef::iterator I = DG.begin(), End = DG.end(); I != End; ++I) {
@@ -6240,7 +6353,7 @@ void Sema::CodeCompleteObjCForCollection(Scope *S,
Data.IgnoreDecls.push_back(*I);
}
}
-
+
CodeCompleteExpression(S, Data);
}
@@ -6254,11 +6367,11 @@ void Sema::CodeCompleteObjCSelector(Scope *S,
Selector Sel = ExternalSource->GetExternalSelector(I);
if (Sel.isNull() || MethodPool.count(Sel))
continue;
-
+
ReadMethodPool(Sel);
}
}
-
+
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_SelectorName);
@@ -6266,7 +6379,7 @@ void Sema::CodeCompleteObjCSelector(Scope *S,
for (GlobalMethodPool::iterator M = MethodPool.begin(),
MEnd = MethodPool.end();
M != MEnd; ++M) {
-
+
Selector Sel = M->first;
if (!isAcceptableObjCSelector(Sel, MK_Any, SelIdents))
continue;
@@ -6279,7 +6392,7 @@ void Sema::CodeCompleteObjCSelector(Scope *S,
Results.AddResult(Builder.TakeString());
continue;
}
-
+
std::string Accumulator;
for (unsigned I = 0, N = Sel.getNumArgs(); I != N; ++I) {
if (I == SelIdents.size()) {
@@ -6289,7 +6402,7 @@ void Sema::CodeCompleteObjCSelector(Scope *S,
Accumulator.clear();
}
}
-
+
Accumulator += Sel.getNameForSlot(I);
Accumulator += ':';
}
@@ -6297,19 +6410,18 @@ void Sema::CodeCompleteObjCSelector(Scope *S,
Results.AddResult(Builder.TakeString());
}
Results.ExitScope();
-
- HandleCodeCompleteResults(this, CodeCompleter,
- CodeCompletionContext::CCC_SelectorName,
+
+ HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
Results.data(), Results.size());
}
-/// \brief Add all of the protocol declarations that we find in the given
+/// Add all of the protocol declarations that we find in the given
/// (translation unit) context.
static void AddProtocolResults(DeclContext *Ctx, DeclContext *CurContext,
bool OnlyForwardDeclarations,
ResultBuilder &Results) {
typedef CodeCompletionResult Result;
-
+
for (const auto *D : Ctx->decls()) {
// Record any protocols we find.
if (const auto *Proto = dyn_cast<ObjCProtocolDecl>(D))
@@ -6324,10 +6436,10 @@ void Sema::CodeCompleteObjCProtocolReferences(
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_ObjCProtocolName);
-
+
if (CodeCompleter->includeGlobals()) {
Results.EnterNewScope();
-
+
// Tell the result set to ignore all of the protocols we have
// already seen.
// FIXME: This doesn't work when caching code-completion results.
@@ -6342,40 +6454,38 @@ void Sema::CodeCompleteObjCProtocolReferences(
Results.ExitScope();
}
-
- HandleCodeCompleteResults(this, CodeCompleter,
- CodeCompletionContext::CCC_ObjCProtocolName,
- Results.data(),Results.size());
+
+ HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
+ Results.data(), Results.size());
}
void Sema::CodeCompleteObjCProtocolDecl(Scope *) {
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_ObjCProtocolName);
-
+
if (CodeCompleter->includeGlobals()) {
Results.EnterNewScope();
-
+
// Add all protocols.
AddProtocolResults(Context.getTranslationUnitDecl(), CurContext, true,
Results);
Results.ExitScope();
}
-
- HandleCodeCompleteResults(this, CodeCompleter,
- CodeCompletionContext::CCC_ObjCProtocolName,
- Results.data(),Results.size());
+
+ HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
+ Results.data(), Results.size());
}
-/// \brief Add all of the Objective-C interface declarations that we find in
+/// Add all of the Objective-C interface declarations that we find in
/// the given (translation unit) context.
static void AddInterfaceResults(DeclContext *Ctx, DeclContext *CurContext,
bool OnlyForwardDeclarations,
bool OnlyUnimplemented,
ResultBuilder &Results) {
typedef CodeCompletionResult Result;
-
+
for (const auto *D : Ctx->decls()) {
// Record any interfaces we find.
if (const auto *Class = dyn_cast<ObjCInterfaceDecl>(D))
@@ -6386,32 +6496,31 @@ static void AddInterfaceResults(DeclContext *Ctx, DeclContext *CurContext,
}
}
-void Sema::CodeCompleteObjCInterfaceDecl(Scope *S) {
+void Sema::CodeCompleteObjCInterfaceDecl(Scope *S) {
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
- CodeCompletionContext::CCC_Other);
+ CodeCompletionContext::CCC_ObjCInterfaceName);
Results.EnterNewScope();
-
+
if (CodeCompleter->includeGlobals()) {
// Add all classes.
AddInterfaceResults(Context.getTranslationUnitDecl(), CurContext, false,
false, Results);
}
-
+
Results.ExitScope();
- HandleCodeCompleteResults(this, CodeCompleter,
- CodeCompletionContext::CCC_ObjCInterfaceName,
- Results.data(),Results.size());
+ HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
+ Results.data(), Results.size());
}
void Sema::CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName,
- SourceLocation ClassNameLoc) {
+ SourceLocation ClassNameLoc) {
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_ObjCInterfaceName);
Results.EnterNewScope();
-
+
// Make sure that we ignore the class we're currently defining.
NamedDecl *CurClass
= LookupSingleName(TUScope, ClassName, ClassNameLoc, LookupOrdinaryName);
@@ -6423,18 +6532,17 @@ void Sema::CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName,
AddInterfaceResults(Context.getTranslationUnitDecl(), CurContext, false,
false, Results);
}
-
+
Results.ExitScope();
- HandleCodeCompleteResults(this, CodeCompleter,
- CodeCompletionContext::CCC_ObjCInterfaceName,
- Results.data(),Results.size());
+ HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
+ Results.data(), Results.size());
}
-void Sema::CodeCompleteObjCImplementationDecl(Scope *S) {
+void Sema::CodeCompleteObjCImplementationDecl(Scope *S) {
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
- CodeCompletionContext::CCC_Other);
+ CodeCompletionContext::CCC_ObjCImplementation);
Results.EnterNewScope();
if (CodeCompleter->includeGlobals()) {
@@ -6442,23 +6550,22 @@ void Sema::CodeCompleteObjCImplementationDecl(Scope *S) {
AddInterfaceResults(Context.getTranslationUnitDecl(), CurContext, false,
true, Results);
}
-
+
Results.ExitScope();
- HandleCodeCompleteResults(this, CodeCompleter,
- CodeCompletionContext::CCC_ObjCInterfaceName,
- Results.data(),Results.size());
+ HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
+ Results.data(), Results.size());
}
-void Sema::CodeCompleteObjCInterfaceCategory(Scope *S,
+void Sema::CodeCompleteObjCInterfaceCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc) {
typedef CodeCompletionResult Result;
-
+
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_ObjCCategoryName);
-
+
// Ignore any categories we find that have already been implemented by this
// interface.
llvm::SmallPtrSet<IdentifierInfo *, 16> CategoryNames;
@@ -6472,24 +6579,23 @@ void Sema::CodeCompleteObjCInterfaceCategory(Scope *S,
// Add all of the categories we know about.
Results.EnterNewScope();
TranslationUnitDecl *TU = Context.getTranslationUnitDecl();
- for (const auto *D : TU->decls())
+ for (const auto *D : TU->decls())
if (const auto *Category = dyn_cast<ObjCCategoryDecl>(D))
if (CategoryNames.insert(Category->getIdentifier()).second)
Results.AddResult(Result(Category, Results.getBasePriority(Category),
nullptr),
CurContext, nullptr, false);
Results.ExitScope();
-
- HandleCodeCompleteResults(this, CodeCompleter,
- CodeCompletionContext::CCC_ObjCCategoryName,
- Results.data(),Results.size());
+
+ HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
+ Results.data(), Results.size());
}
-void Sema::CodeCompleteObjCImplementationCategory(Scope *S,
+void Sema::CodeCompleteObjCImplementationCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc) {
typedef CodeCompletionResult Result;
-
+
// Find the corresponding interface. If we couldn't find the interface, the
// program itself is ill-formed. However, we'll try to be helpful still by
// providing the list of all of the categories we know about.
@@ -6498,12 +6604,12 @@ void Sema::CodeCompleteObjCImplementationCategory(Scope *S,
ObjCInterfaceDecl *Class = dyn_cast_or_null<ObjCInterfaceDecl>(CurClass);
if (!Class)
return CodeCompleteObjCInterfaceCategory(S, ClassName, ClassNameLoc);
-
+
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_ObjCCategoryName);
-
- // Add all of the categories that have have corresponding interface
+
+ // Add all of the categories that have have corresponding interface
// declarations in this class and any of its superclasses, except for
// already-implemented categories in the class itself.
llvm::SmallPtrSet<IdentifierInfo *, 16> CategoryNames;
@@ -6516,15 +6622,14 @@ void Sema::CodeCompleteObjCImplementationCategory(Scope *S,
Results.AddResult(Result(Cat, Results.getBasePriority(Cat), nullptr),
CurContext, nullptr, false);
}
-
+
Class = Class->getSuperClass();
IgnoreImplemented = false;
}
Results.ExitScope();
-
- HandleCodeCompleteResults(this, CodeCompleter,
- CodeCompletionContext::CCC_ObjCCategoryName,
- Results.data(),Results.size());
+
+ HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
+ Results.data(), Results.size());
}
void Sema::CodeCompleteObjCPropertyDefinition(Scope *S) {
@@ -6536,38 +6641,37 @@ void Sema::CodeCompleteObjCPropertyDefinition(Scope *S) {
// Figure out where this @synthesize lives.
ObjCContainerDecl *Container
= dyn_cast_or_null<ObjCContainerDecl>(CurContext);
- if (!Container ||
- (!isa<ObjCImplementationDecl>(Container) &&
+ if (!Container ||
+ (!isa<ObjCImplementationDecl>(Container) &&
!isa<ObjCCategoryImplDecl>(Container)))
- return;
+ return;
// Ignore any properties that have already been implemented.
Container = getContainerDef(Container);
for (const auto *D : Container->decls())
if (const auto *PropertyImpl = dyn_cast<ObjCPropertyImplDecl>(D))
Results.Ignore(PropertyImpl->getPropertyDecl());
-
+
// Add any properties that we find.
AddedPropertiesSet AddedProperties;
Results.EnterNewScope();
if (ObjCImplementationDecl *ClassImpl
= dyn_cast<ObjCImplementationDecl>(Container))
AddObjCProperties(CCContext, ClassImpl->getClassInterface(), false,
- /*AllowNullaryMethods=*/false, CurContext,
+ /*AllowNullaryMethods=*/false, CurContext,
AddedProperties, Results);
else
AddObjCProperties(CCContext,
cast<ObjCCategoryImplDecl>(Container)->getCategoryDecl(),
- false, /*AllowNullaryMethods=*/false, CurContext,
+ false, /*AllowNullaryMethods=*/false, CurContext,
AddedProperties, Results);
Results.ExitScope();
-
- HandleCodeCompleteResults(this, CodeCompleter,
- CodeCompletionContext::CCC_Other,
- Results.data(),Results.size());
+
+ HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
+ Results.data(), Results.size());
}
-void Sema::CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
+void Sema::CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
IdentifierInfo *PropertyName) {
typedef CodeCompletionResult Result;
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
@@ -6577,11 +6681,11 @@ void Sema::CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
// Figure out where this @synthesize lives.
ObjCContainerDecl *Container
= dyn_cast_or_null<ObjCContainerDecl>(CurContext);
- if (!Container ||
- (!isa<ObjCImplementationDecl>(Container) &&
+ if (!Container ||
+ (!isa<ObjCImplementationDecl>(Container) &&
!isa<ObjCCategoryImplDecl>(Container)))
- return;
-
+ return;
+
// Figure out which interface we're looking into.
ObjCInterfaceDecl *Class = nullptr;
if (ObjCImplementationDecl *ClassImpl
@@ -6596,10 +6700,10 @@ void Sema::CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
if (Class) {
if (ObjCPropertyDecl *Property = Class->FindPropertyDeclaration(
PropertyName, ObjCPropertyQueryKind::OBJC_PR_query_instance)) {
- PropertyType
+ PropertyType
= Property->getType().getNonReferenceType().getUnqualifiedType();
-
- // Give preference to ivars
+
+ // Give preference to ivars
Results.setPreferredType(PropertyType);
}
}
@@ -6613,32 +6717,32 @@ void Sema::CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
std::string NameWithSuffix = PropertyName->getName().str();
NameWithSuffix += '_';
for(; Class; Class = Class->getSuperClass()) {
- for (ObjCIvarDecl *Ivar = Class->all_declared_ivar_begin(); Ivar;
+ for (ObjCIvarDecl *Ivar = Class->all_declared_ivar_begin(); Ivar;
Ivar = Ivar->getNextIvar()) {
Results.AddResult(Result(Ivar, Results.getBasePriority(Ivar), nullptr),
CurContext, nullptr, false);
- // Determine whether we've seen an ivar with a name similar to the
+ // Determine whether we've seen an ivar with a name similar to the
// property.
if ((PropertyName == Ivar->getIdentifier() ||
NameWithPrefix == Ivar->getName() ||
NameWithSuffix == Ivar->getName())) {
SawSimilarlyNamedIvar = true;
-
+
// Reduce the priority of this result by one, to give it a slight
// advantage over other results whose names don't match so closely.
- if (Results.size() &&
- Results.data()[Results.size() - 1].Kind
+ if (Results.size() &&
+ Results.data()[Results.size() - 1].Kind
== CodeCompletionResult::RK_Declaration &&
Results.data()[Results.size() - 1].Declaration == Ivar)
Results.data()[Results.size() - 1].Priority--;
}
}
}
-
+
if (!SawSimilarlyNamedIvar) {
// Create ivar result _propName, that the user can use to synthesize
- // an ivar of the appropriate type.
+ // an ivar of the appropriate type.
unsigned Priority = CCP_MemberDeclaration + 1;
typedef CodeCompletionResult Result;
CodeCompletionAllocator &Allocator = Results.getAllocator();
@@ -6649,15 +6753,14 @@ void Sema::CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
Builder.AddResultTypeChunk(GetCompletionTypeString(PropertyType, Context,
Policy, Allocator));
Builder.AddTypedTextChunk(Allocator.CopyString(NameWithPrefix));
- Results.AddResult(Result(Builder.TakeString(), Priority,
+ Results.AddResult(Result(Builder.TakeString(), Priority,
CXCursor_ObjCIvarDecl));
}
-
+
Results.ExitScope();
-
- HandleCodeCompleteResults(this, CodeCompleter,
- CodeCompletionContext::CCC_Other,
- Results.data(),Results.size());
+
+ HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
+ Results.data(), Results.size());
}
// Mapping from selectors to the methods that implement that selector, along
@@ -6665,7 +6768,7 @@ void Sema::CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
typedef llvm::DenseMap<
Selector, llvm::PointerIntPair<ObjCMethodDecl *, 1, bool> > KnownMethodsMap;
-/// \brief Find all of the methods that reside in the given container
+/// Find all of the methods that reside in the given container
/// (and its superclasses, protocols, etc.) that meet the given
/// criteria. Insert those methods into the map of known methods,
/// indexed by selector so they can be easily found.
@@ -6682,11 +6785,11 @@ static void FindImplementableMethods(ASTContext &Context,
IFace = IFace->getDefinition();
Container = IFace;
-
+
const ObjCList<ObjCProtocolDecl> &Protocols
= IFace->getReferencedProtocols();
for (ObjCList<ObjCProtocolDecl>::iterator I = Protocols.begin(),
- E = Protocols.end();
+ E = Protocols.end();
I != E; ++I)
FindImplementableMethods(Context, *I, WantInstanceMethods, ReturnType,
KnownMethods, InOriginalClass);
@@ -6694,12 +6797,12 @@ static void FindImplementableMethods(ASTContext &Context,
// Add methods from any class extensions and categories.
for (auto *Cat : IFace->visible_categories()) {
FindImplementableMethods(Context, Cat, WantInstanceMethods, ReturnType,
- KnownMethods, false);
+ KnownMethods, false);
}
// Visit the superclass.
if (IFace->getSuperClass())
- FindImplementableMethods(Context, IFace->getSuperClass(),
+ FindImplementableMethods(Context, IFace->getSuperClass(),
WantInstanceMethods, ReturnType,
KnownMethods, false);
}
@@ -6709,14 +6812,14 @@ static void FindImplementableMethods(ASTContext &Context,
const ObjCList<ObjCProtocolDecl> &Protocols
= Category->getReferencedProtocols();
for (ObjCList<ObjCProtocolDecl>::iterator I = Protocols.begin(),
- E = Protocols.end();
+ E = Protocols.end();
I != E; ++I)
FindImplementableMethods(Context, *I, WantInstanceMethods, ReturnType,
KnownMethods, InOriginalClass);
-
+
// If this category is the original class, jump to the interface.
if (InOriginalClass && Category->getClassInterface())
- FindImplementableMethods(Context, Category->getClassInterface(),
+ FindImplementableMethods(Context, Category->getClassInterface(),
WantInstanceMethods, ReturnType, KnownMethods,
false);
}
@@ -6727,12 +6830,12 @@ static void FindImplementableMethods(ASTContext &Context,
return;
Protocol = Protocol->getDefinition();
Container = Protocol;
-
+
// Recurse into protocols.
const ObjCList<ObjCProtocolDecl> &Protocols
= Protocol->getReferencedProtocols();
for (ObjCList<ObjCProtocolDecl>::iterator I = Protocols.begin(),
- E = Protocols.end();
+ E = Protocols.end();
I != E; ++I)
FindImplementableMethods(Context, *I, WantInstanceMethods, ReturnType,
KnownMethods, false);
@@ -6753,7 +6856,7 @@ static void FindImplementableMethods(ASTContext &Context,
}
}
-/// \brief Add the parenthesized return or parameter type chunk to a code
+/// Add the parenthesized return or parameter type chunk to a code
/// completion string.
static void AddObjCPassingTypeChunk(QualType Type,
unsigned ObjCDeclQuals,
@@ -6769,20 +6872,20 @@ static void AddObjCPassingTypeChunk(QualType Type,
Builder.AddChunk(CodeCompletionString::CK_RightParen);
}
-/// \brief Determine whether the given class is or inherits from a class by
+/// Determine whether the given class is or inherits from a class by
/// the given name.
-static bool InheritsFromClassNamed(ObjCInterfaceDecl *Class,
+static bool InheritsFromClassNamed(ObjCInterfaceDecl *Class,
StringRef Name) {
if (!Class)
return false;
-
+
if (Class->getIdentifier() && Class->getIdentifier()->getName() == Name)
return true;
-
+
return InheritsFromClassNamed(Class->getSuperClass(), Name);
}
-
-/// \brief Add code completions for Objective-C Key-Value Coding (KVC) and
+
+/// Add code completions for Objective-C Key-Value Coding (KVC) and
/// Key-Value Observing (KVO).
static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
bool IsInstanceMethod,
@@ -6793,17 +6896,17 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
IdentifierInfo *PropName = Property->getIdentifier();
if (!PropName || PropName->getLength() == 0)
return;
-
+
PrintingPolicy Policy = getCompletionPrintingPolicy(Results.getSema());
// Builder that will create each code completion.
typedef CodeCompletionResult Result;
CodeCompletionAllocator &Allocator = Results.getAllocator();
CodeCompletionBuilder Builder(Allocator, Results.getCodeCompletionTUInfo());
-
+
// The selector table.
SelectorTable &Selectors = Context.Selectors;
-
+
// The property name, copied into the code completion allocation region
// on demand.
struct KeyHolder {
@@ -6817,22 +6920,22 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
operator const char *() {
if (CopiedKey)
return CopiedKey;
-
+
return CopiedKey = Allocator.CopyString(Key);
}
} Key(Allocator, PropName->getName());
-
+
// The uppercased name of the property name.
std::string UpperKey = PropName->getName();
if (!UpperKey.empty())
UpperKey[0] = toUppercase(UpperKey[0]);
-
+
bool ReturnTypeMatchesProperty = ReturnType.isNull() ||
- Context.hasSameUnqualifiedType(ReturnType.getNonReferenceType(),
+ Context.hasSameUnqualifiedType(ReturnType.getNonReferenceType(),
Property->getType());
- bool ReturnTypeMatchesVoid
+ bool ReturnTypeMatchesVoid
= ReturnType.isNull() || ReturnType->isVoidType();
-
+
// Add the normal accessor -(type)key.
if (IsInstanceMethod &&
KnownSelectors.insert(Selectors.getNullarySelector(PropName)).second &&
@@ -6840,19 +6943,19 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
if (ReturnType.isNull())
AddObjCPassingTypeChunk(Property->getType(), /*Quals=*/0,
Context, Policy, Builder);
-
+
Builder.AddTypedTextChunk(Key);
- Results.AddResult(Result(Builder.TakeString(), CCP_CodePattern,
+ Results.AddResult(Result(Builder.TakeString(), CCP_CodePattern,
CXCursor_ObjCInstanceMethodDecl));
}
-
+
// If we have an integral or boolean property (or the user has provided
// an integral or boolean return type), add the accessor -(type)isKey.
if (IsInstanceMethod &&
- ((!ReturnType.isNull() &&
+ ((!ReturnType.isNull() &&
(ReturnType->isIntegerType() || ReturnType->isBooleanType())) ||
- (ReturnType.isNull() &&
- (Property->getType()->isIntegerType() ||
+ (ReturnType.isNull() &&
+ (Property->getType()->isIntegerType() ||
Property->getType()->isBooleanType())))) {
std::string SelectorName = (Twine("is") + UpperKey).str();
IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
@@ -6863,16 +6966,16 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
Builder.AddTextChunk("BOOL");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
}
-
+
Builder.AddTypedTextChunk(
Allocator.CopyString(SelectorId->getName()));
- Results.AddResult(Result(Builder.TakeString(), CCP_CodePattern,
+ Results.AddResult(Result(Builder.TakeString(), CCP_CodePattern,
CXCursor_ObjCInstanceMethodDecl));
}
}
-
+
// Add the normal mutator.
- if (IsInstanceMethod && ReturnTypeMatchesVoid &&
+ if (IsInstanceMethod && ReturnTypeMatchesVoid &&
!Property->getSetterMethodDecl()) {
std::string SelectorName = (Twine("set") + UpperKey).str();
IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
@@ -6882,36 +6985,36 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
Builder.AddTextChunk("void");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
}
-
+
Builder.AddTypedTextChunk(
Allocator.CopyString(SelectorId->getName()));
Builder.AddTypedTextChunk(":");
AddObjCPassingTypeChunk(Property->getType(), /*Quals=*/0,
Context, Policy, Builder);
Builder.AddTextChunk(Key);
- Results.AddResult(Result(Builder.TakeString(), CCP_CodePattern,
+ Results.AddResult(Result(Builder.TakeString(), CCP_CodePattern,
CXCursor_ObjCInstanceMethodDecl));
}
}
-
+
// Indexed and unordered accessors
unsigned IndexedGetterPriority = CCP_CodePattern;
unsigned IndexedSetterPriority = CCP_CodePattern;
unsigned UnorderedGetterPriority = CCP_CodePattern;
unsigned UnorderedSetterPriority = CCP_CodePattern;
- if (const ObjCObjectPointerType *ObjCPointer
+ if (const ObjCObjectPointerType *ObjCPointer
= Property->getType()->getAs<ObjCObjectPointerType>()) {
if (ObjCInterfaceDecl *IFace = ObjCPointer->getInterfaceDecl()) {
// If this interface type is not provably derived from a known
// collection, penalize the corresponding completions.
if (!InheritsFromClassNamed(IFace, "NSMutableArray")) {
- IndexedSetterPriority += CCD_ProbablyNotObjCCollection;
+ IndexedSetterPriority += CCD_ProbablyNotObjCCollection;
if (!InheritsFromClassNamed(IFace, "NSArray"))
IndexedGetterPriority += CCD_ProbablyNotObjCCollection;
}
if (!InheritsFromClassNamed(IFace, "NSMutableSet")) {
- UnorderedSetterPriority += CCD_ProbablyNotObjCCollection;
+ UnorderedSetterPriority += CCD_ProbablyNotObjCCollection;
if (!InheritsFromClassNamed(IFace, "NSSet"))
UnorderedGetterPriority += CCD_ProbablyNotObjCCollection;
}
@@ -6922,9 +7025,9 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
UnorderedGetterPriority += CCD_ProbablyNotObjCCollection;
UnorderedSetterPriority += CCD_ProbablyNotObjCCollection;
}
-
+
// Add -(NSUInteger)countOf<key>
- if (IsInstanceMethod &&
+ if (IsInstanceMethod &&
(ReturnType.isNull() || ReturnType->isIntegerType())) {
std::string SelectorName = (Twine("countOf") + UpperKey).str();
IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
@@ -6935,16 +7038,16 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
Builder.AddTextChunk("NSUInteger");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
}
-
+
Builder.AddTypedTextChunk(
Allocator.CopyString(SelectorId->getName()));
- Results.AddResult(Result(Builder.TakeString(),
- std::min(IndexedGetterPriority,
+ Results.AddResult(Result(Builder.TakeString(),
+ std::min(IndexedGetterPriority,
UnorderedGetterPriority),
CXCursor_ObjCInstanceMethodDecl));
}
}
-
+
// Indexed getters
// Add -(id)objectInKeyAtIndex:(NSUInteger)index
if (IsInstanceMethod &&
@@ -6958,20 +7061,20 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
Builder.AddTextChunk("id");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
}
-
+
Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName + ":"));
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddTextChunk("NSUInteger");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Builder.AddTextChunk("index");
- Results.AddResult(Result(Builder.TakeString(), IndexedGetterPriority,
+ Results.AddResult(Result(Builder.TakeString(), IndexedGetterPriority,
CXCursor_ObjCInstanceMethodDecl));
}
}
-
+
// Add -(NSArray *)keyAtIndexes:(NSIndexSet *)indexes
if (IsInstanceMethod &&
- (ReturnType.isNull() ||
+ (ReturnType.isNull() ||
(ReturnType->isObjCObjectPointerType() &&
ReturnType->getAs<ObjCObjectPointerType>()->getInterfaceDecl() &&
ReturnType->getAs<ObjCObjectPointerType>()->getInterfaceDecl()
@@ -6985,17 +7088,17 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
Builder.AddTextChunk("NSArray *");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
}
-
+
Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName + ":"));
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddTextChunk("NSIndexSet *");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Builder.AddTextChunk("indexes");
- Results.AddResult(Result(Builder.TakeString(), IndexedGetterPriority,
+ Results.AddResult(Result(Builder.TakeString(), IndexedGetterPriority,
CXCursor_ObjCInstanceMethodDecl));
}
}
-
+
// Add -(void)getKey:(type **)buffer range:(NSRange)inRange
if (IsInstanceMethod && ReturnTypeMatchesVoid) {
std::string SelectorName = (Twine("get") + UpperKey).str();
@@ -7003,14 +7106,14 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
&Context.Idents.get(SelectorName),
&Context.Idents.get("range")
};
-
+
if (KnownSelectors.insert(Selectors.getSelector(2, SelectorIds)).second) {
if (ReturnType.isNull()) {
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddTextChunk("void");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
}
-
+
Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName + ":"));
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddPlaceholderChunk("object-type");
@@ -7023,13 +7126,13 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
Builder.AddTextChunk("NSRange");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Builder.AddTextChunk("inRange");
- Results.AddResult(Result(Builder.TakeString(), IndexedGetterPriority,
+ Results.AddResult(Result(Builder.TakeString(), IndexedGetterPriority,
CXCursor_ObjCInstanceMethodDecl));
}
}
-
+
// Mutable indexed accessors
-
+
// - (void)insertObject:(type *)object inKeyAtIndex:(NSUInteger)index
if (IsInstanceMethod && ReturnTypeMatchesVoid) {
std::string SelectorName = (Twine("in") + UpperKey + "AtIndex").str();
@@ -7037,14 +7140,14 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
&Context.Idents.get("insertObject"),
&Context.Idents.get(SelectorName)
};
-
+
if (KnownSelectors.insert(Selectors.getSelector(2, SelectorIds)).second) {
if (ReturnType.isNull()) {
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddTextChunk("void");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
}
-
+
Builder.AddTypedTextChunk("insertObject:");
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddPlaceholderChunk("object-type");
@@ -7057,11 +7160,11 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
Builder.AddPlaceholderChunk("NSUInteger");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Builder.AddTextChunk("index");
- Results.AddResult(Result(Builder.TakeString(), IndexedSetterPriority,
+ Results.AddResult(Result(Builder.TakeString(), IndexedSetterPriority,
CXCursor_ObjCInstanceMethodDecl));
}
}
-
+
// - (void)insertKey:(NSArray *)array atIndexes:(NSIndexSet *)indexes
if (IsInstanceMethod && ReturnTypeMatchesVoid) {
std::string SelectorName = (Twine("insert") + UpperKey).str();
@@ -7069,14 +7172,14 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
&Context.Idents.get(SelectorName),
&Context.Idents.get("atIndexes")
};
-
+
if (KnownSelectors.insert(Selectors.getSelector(2, SelectorIds)).second) {
if (ReturnType.isNull()) {
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddTextChunk("void");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
}
-
+
Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName + ":"));
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddTextChunk("NSArray *");
@@ -7088,55 +7191,55 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
Builder.AddPlaceholderChunk("NSIndexSet *");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Builder.AddTextChunk("indexes");
- Results.AddResult(Result(Builder.TakeString(), IndexedSetterPriority,
+ Results.AddResult(Result(Builder.TakeString(), IndexedSetterPriority,
CXCursor_ObjCInstanceMethodDecl));
}
}
-
+
// -(void)removeObjectFromKeyAtIndex:(NSUInteger)index
if (IsInstanceMethod && ReturnTypeMatchesVoid) {
std::string SelectorName
= (Twine("removeObjectFrom") + UpperKey + "AtIndex").str();
- IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
+ IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId)).second) {
if (ReturnType.isNull()) {
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddTextChunk("void");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
}
-
+
Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName + ":"));
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddTextChunk("NSUInteger");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Builder.AddTextChunk("index");
- Results.AddResult(Result(Builder.TakeString(), IndexedSetterPriority,
+ Results.AddResult(Result(Builder.TakeString(), IndexedSetterPriority,
CXCursor_ObjCInstanceMethodDecl));
}
}
-
+
// -(void)removeKeyAtIndexes:(NSIndexSet *)indexes
if (IsInstanceMethod && ReturnTypeMatchesVoid) {
std::string SelectorName
= (Twine("remove") + UpperKey + "AtIndexes").str();
- IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
+ IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId)).second) {
if (ReturnType.isNull()) {
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddTextChunk("void");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
}
-
+
Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName + ":"));
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddTextChunk("NSIndexSet *");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Builder.AddTextChunk("indexes");
- Results.AddResult(Result(Builder.TakeString(), IndexedSetterPriority,
+ Results.AddResult(Result(Builder.TakeString(), IndexedSetterPriority,
CXCursor_ObjCInstanceMethodDecl));
}
}
-
+
// - (void)replaceObjectInKeyAtIndex:(NSUInteger)index withObject:(id)object
if (IsInstanceMethod && ReturnTypeMatchesVoid) {
std::string SelectorName
@@ -7145,14 +7248,14 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
&Context.Idents.get(SelectorName),
&Context.Idents.get("withObject")
};
-
+
if (KnownSelectors.insert(Selectors.getSelector(2, SelectorIds)).second) {
if (ReturnType.isNull()) {
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddTextChunk("void");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
}
-
+
Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName + ":"));
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddPlaceholderChunk("NSUInteger");
@@ -7164,28 +7267,28 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
Builder.AddTextChunk("id");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Builder.AddTextChunk("object");
- Results.AddResult(Result(Builder.TakeString(), IndexedSetterPriority,
+ Results.AddResult(Result(Builder.TakeString(), IndexedSetterPriority,
CXCursor_ObjCInstanceMethodDecl));
}
}
-
+
// - (void)replaceKeyAtIndexes:(NSIndexSet *)indexes withKey:(NSArray *)array
if (IsInstanceMethod && ReturnTypeMatchesVoid) {
- std::string SelectorName1
+ std::string SelectorName1
= (Twine("replace") + UpperKey + "AtIndexes").str();
std::string SelectorName2 = (Twine("with") + UpperKey).str();
IdentifierInfo *SelectorIds[2] = {
&Context.Idents.get(SelectorName1),
&Context.Idents.get(SelectorName2)
};
-
+
if (KnownSelectors.insert(Selectors.getSelector(2, SelectorIds)).second) {
if (ReturnType.isNull()) {
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddTextChunk("void");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
}
-
+
Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName1 + ":"));
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddPlaceholderChunk("NSIndexSet *");
@@ -7197,15 +7300,15 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
Builder.AddTextChunk("NSArray *");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Builder.AddTextChunk("array");
- Results.AddResult(Result(Builder.TakeString(), IndexedSetterPriority,
+ Results.AddResult(Result(Builder.TakeString(), IndexedSetterPriority,
CXCursor_ObjCInstanceMethodDecl));
}
- }
-
+ }
+
// Unordered getters
// - (NSEnumerator *)enumeratorOfKey
- if (IsInstanceMethod &&
- (ReturnType.isNull() ||
+ if (IsInstanceMethod &&
+ (ReturnType.isNull() ||
(ReturnType->isObjCObjectPointerType() &&
ReturnType->getAs<ObjCObjectPointerType>()->getInterfaceDecl() &&
ReturnType->getAs<ObjCObjectPointerType>()->getInterfaceDecl()
@@ -7219,15 +7322,15 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
Builder.AddTextChunk("NSEnumerator *");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
}
-
+
Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName));
- Results.AddResult(Result(Builder.TakeString(), UnorderedGetterPriority,
+ Results.AddResult(Result(Builder.TakeString(), UnorderedGetterPriority,
CXCursor_ObjCInstanceMethodDecl));
}
}
// - (type *)memberOfKey:(type *)object
- if (IsInstanceMethod &&
+ if (IsInstanceMethod &&
(ReturnType.isNull() || ReturnType->isObjCObjectPointerType())) {
std::string SelectorName = (Twine("memberOf") + UpperKey).str();
IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
@@ -7238,24 +7341,24 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
Builder.AddTextChunk(" *");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
}
-
+
Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName + ":"));
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
if (ReturnType.isNull()) {
Builder.AddPlaceholderChunk("object-type");
Builder.AddTextChunk(" *");
} else {
- Builder.AddTextChunk(GetCompletionTypeString(ReturnType, Context,
+ Builder.AddTextChunk(GetCompletionTypeString(ReturnType, Context,
Policy,
Builder.getAllocator()));
}
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Builder.AddTextChunk("object");
- Results.AddResult(Result(Builder.TakeString(), UnorderedGetterPriority,
+ Results.AddResult(Result(Builder.TakeString(), UnorderedGetterPriority,
CXCursor_ObjCInstanceMethodDecl));
}
}
-
+
// Mutable unordered accessors
// - (void)addKeyObject:(type *)object
if (IsInstanceMethod && ReturnTypeMatchesVoid) {
@@ -7268,17 +7371,17 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
Builder.AddTextChunk("void");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
}
-
+
Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName + ":"));
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddPlaceholderChunk("object-type");
Builder.AddTextChunk(" *");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Builder.AddTextChunk("object");
- Results.AddResult(Result(Builder.TakeString(), UnorderedSetterPriority,
+ Results.AddResult(Result(Builder.TakeString(), UnorderedSetterPriority,
CXCursor_ObjCInstanceMethodDecl));
}
- }
+ }
// - (void)addKey:(NSSet *)objects
if (IsInstanceMethod && ReturnTypeMatchesVoid) {
@@ -7290,17 +7393,17 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
Builder.AddTextChunk("void");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
}
-
+
Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName + ":"));
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddTextChunk("NSSet *");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Builder.AddTextChunk("objects");
- Results.AddResult(Result(Builder.TakeString(), UnorderedSetterPriority,
+ Results.AddResult(Result(Builder.TakeString(), UnorderedSetterPriority,
CXCursor_ObjCInstanceMethodDecl));
}
- }
-
+ }
+
// - (void)removeKeyObject:(type *)object
if (IsInstanceMethod && ReturnTypeMatchesVoid) {
std::string SelectorName
@@ -7312,18 +7415,18 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
Builder.AddTextChunk("void");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
}
-
+
Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName + ":"));
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddPlaceholderChunk("object-type");
Builder.AddTextChunk(" *");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Builder.AddTextChunk("object");
- Results.AddResult(Result(Builder.TakeString(), UnorderedSetterPriority,
+ Results.AddResult(Result(Builder.TakeString(), UnorderedSetterPriority,
CXCursor_ObjCInstanceMethodDecl));
}
- }
-
+ }
+
// - (void)removeKey:(NSSet *)objects
if (IsInstanceMethod && ReturnTypeMatchesVoid) {
std::string SelectorName = (Twine("remove") + UpperKey).str();
@@ -7334,16 +7437,16 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
Builder.AddTextChunk("void");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
}
-
+
Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName + ":"));
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddTextChunk("NSSet *");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Builder.AddTextChunk("objects");
- Results.AddResult(Result(Builder.TakeString(), UnorderedSetterPriority,
+ Results.AddResult(Result(Builder.TakeString(), UnorderedSetterPriority,
CXCursor_ObjCInstanceMethodDecl));
}
- }
+ }
// - (void)intersectKey:(NSSet *)objects
if (IsInstanceMethod && ReturnTypeMatchesVoid) {
@@ -7355,26 +7458,26 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
Builder.AddTextChunk("void");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
}
-
+
Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName + ":"));
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddTextChunk("NSSet *");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Builder.AddTextChunk("objects");
- Results.AddResult(Result(Builder.TakeString(), UnorderedSetterPriority,
+ Results.AddResult(Result(Builder.TakeString(), UnorderedSetterPriority,
CXCursor_ObjCInstanceMethodDecl));
}
- }
-
+ }
+
// Key-Value Observing
// + (NSSet *)keyPathsForValuesAffectingKey
- if (!IsInstanceMethod &&
- (ReturnType.isNull() ||
+ if (!IsInstanceMethod &&
+ (ReturnType.isNull() ||
(ReturnType->isObjCObjectPointerType() &&
ReturnType->getAs<ObjCObjectPointerType>()->getInterfaceDecl() &&
ReturnType->getAs<ObjCObjectPointerType>()->getInterfaceDecl()
->getName() == "NSSet"))) {
- std::string SelectorName
+ std::string SelectorName
= (Twine("keyPathsForValuesAffecting") + UpperKey).str();
IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
if (KnownSelectors.insert(Selectors.getNullarySelector(SelectorId))
@@ -7384,9 +7487,9 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
Builder.AddTextChunk("NSSet<NSString *> *");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
}
-
+
Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName));
- Results.AddResult(Result(Builder.TakeString(), CCP_CodePattern,
+ Results.AddResult(Result(Builder.TakeString(), CCP_CodePattern,
CXCursor_ObjCClassMethodDecl));
}
}
@@ -7394,9 +7497,9 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
// + (BOOL)automaticallyNotifiesObserversForKey
if (!IsInstanceMethod &&
(ReturnType.isNull() ||
- ReturnType->isIntegerType() ||
+ ReturnType->isIntegerType() ||
ReturnType->isBooleanType())) {
- std::string SelectorName
+ std::string SelectorName
= (Twine("automaticallyNotifiesObserversOf") + UpperKey).str();
IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
if (KnownSelectors.insert(Selectors.getNullarySelector(SelectorId))
@@ -7406,9 +7509,9 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
Builder.AddTextChunk("BOOL");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
}
-
+
Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName));
- Results.AddResult(Result(Builder.TakeString(), CCP_CodePattern,
+ Results.AddResult(Result(Builder.TakeString(), CCP_CodePattern,
CXCursor_ObjCClassMethodDecl));
}
}
@@ -7422,7 +7525,7 @@ void Sema::CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod,
Decl *IDecl = nullptr;
if (CurContext->isObjCContainer()) {
ObjCContainerDecl *OCD = dyn_cast<ObjCContainerDecl>(CurContext);
- IDecl = cast<Decl>(OCD);
+ IDecl = OCD;
}
// Determine where we should start searching for methods.
ObjCContainerDecl *SearchDecl = nullptr;
@@ -7431,7 +7534,7 @@ void Sema::CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod,
if (ObjCImplementationDecl *Impl = dyn_cast<ObjCImplementationDecl>(D)) {
SearchDecl = Impl->getClassInterface();
IsInImplementation = true;
- } else if (ObjCCategoryImplDecl *CatImpl
+ } else if (ObjCCategoryImplDecl *CatImpl
= dyn_cast<ObjCCategoryImplDecl>(D)) {
SearchDecl = CatImpl->getCategoryDecl();
IsInImplementation = true;
@@ -7445,17 +7548,17 @@ void Sema::CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod,
}
if (!SearchDecl) {
- HandleCodeCompleteResults(this, CodeCompleter,
+ HandleCodeCompleteResults(this, CodeCompleter,
CodeCompletionContext::CCC_Other,
nullptr, 0);
return;
}
-
+
// Find all of the methods that we could declare/implement here.
KnownMethodsMap KnownMethods;
- FindImplementableMethods(Context, SearchDecl, IsInstanceMethod,
+ FindImplementableMethods(Context, SearchDecl, IsInstanceMethod,
ReturnType, KnownMethods);
-
+
// Add declarations or definitions for each of the known methods.
typedef CodeCompletionResult Result;
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
@@ -7463,7 +7566,7 @@ void Sema::CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod,
CodeCompletionContext::CCC_Other);
Results.EnterNewScope();
PrintingPolicy Policy = getCompletionPrintingPolicy(*this);
- for (KnownMethodsMap::iterator M = KnownMethods.begin(),
+ for (KnownMethodsMap::iterator M = KnownMethods.begin(),
MEnd = KnownMethods.end();
M != MEnd; ++M) {
ObjCMethodDecl *Method = M->second.getPointer();
@@ -7494,7 +7597,7 @@ void Sema::CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod,
// Add parameters to the pattern.
unsigned I = 0;
- for (ObjCMethodDecl::param_iterator P = Method->param_begin(),
+ for (ObjCMethodDecl::param_iterator P = Method->param_begin(),
PEnd = Method->param_end();
P != PEnd; (void)++P, ++I) {
// Add the part of the selector name.
@@ -7520,16 +7623,16 @@ void Sema::CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod,
(*P)->getObjCDeclQualifier(),
Context, Policy,
Builder);
-
+
if (IdentifierInfo *Id = (*P)->getIdentifier())
- Builder.AddTextChunk(Builder.getAllocator().CopyString( Id->getName()));
+ Builder.AddTextChunk(Builder.getAllocator().CopyString( Id->getName()));
}
if (Method->isVariadic()) {
if (Method->param_size() > 0)
Builder.AddChunk(CodeCompletionString::CK_Comma);
Builder.AddTextChunk("...");
- }
+ }
if (IsInImplementation && Results.includeCodePatterns()) {
// We will be defining the method here, so add a compound statement.
@@ -7544,7 +7647,7 @@ void Sema::CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod,
Builder.AddChunk(CodeCompletionString::CK_SemiColon);
} else
Builder.AddPlaceholderChunk("statements");
-
+
Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
Builder.AddChunk(CodeCompletionString::CK_RightBrace);
}
@@ -7552,28 +7655,28 @@ void Sema::CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod,
unsigned Priority = CCP_CodePattern;
if (!M->second.getInt())
Priority += CCD_InBaseClass;
-
+
Results.AddResult(Result(Builder.TakeString(), Method, Priority));
}
- // Add Key-Value-Coding and Key-Value-Observing accessor methods for all of
+ // Add Key-Value-Coding and Key-Value-Observing accessor methods for all of
// the properties in this class and its categories.
if (Context.getLangOpts().ObjC2) {
SmallVector<ObjCContainerDecl *, 4> Containers;
Containers.push_back(SearchDecl);
-
+
VisitedSelectorSet KnownSelectors;
- for (KnownMethodsMap::iterator M = KnownMethods.begin(),
+ for (KnownMethodsMap::iterator M = KnownMethods.begin(),
MEnd = KnownMethods.end();
M != MEnd; ++M)
KnownSelectors.insert(M->first);
-
+
ObjCInterfaceDecl *IFace = dyn_cast<ObjCInterfaceDecl>(SearchDecl);
if (!IFace)
if (ObjCCategoryDecl *Category = dyn_cast<ObjCCategoryDecl>(SearchDecl))
IFace = Category->getClassInterface();
-
+
if (IFace)
for (auto *Cat : IFace->visible_categories())
Containers.push_back(Cat);
@@ -7585,15 +7688,14 @@ void Sema::CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod,
KnownSelectors, Results);
}
}
-
+
Results.ExitScope();
-
- HandleCodeCompleteResults(this, CodeCompleter,
- CodeCompletionContext::CCC_Other,
- Results.data(),Results.size());
+
+ HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
+ Results.data(), Results.size());
}
-void Sema::CodeCompleteObjCMethodDeclSelector(Scope *S,
+void Sema::CodeCompleteObjCMethodDeclSelector(Scope *S,
bool IsInstanceMethod,
bool AtParameterName,
ParsedType ReturnTy,
@@ -7616,21 +7718,21 @@ void Sema::CodeCompleteObjCMethodDeclSelector(Scope *S,
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_Other);
-
+
if (ReturnTy)
Results.setPreferredType(GetTypeFromParser(ReturnTy).getNonReferenceType());
- Results.EnterNewScope();
+ Results.EnterNewScope();
for (GlobalMethodPool::iterator M = MethodPool.begin(),
MEnd = MethodPool.end();
M != MEnd; ++M) {
for (ObjCMethodList *MethList = IsInstanceMethod ? &M->second.first :
&M->second.second;
- MethList && MethList->getMethod();
+ MethList && MethList->getMethod();
MethList = MethList->getNext()) {
if (!isAcceptableObjCMethod(MethList->getMethod(), MK_Any, SelIdents))
continue;
-
+
if (AtParameterName) {
// Suggest parameter names we've seen before.
unsigned NumSelIdents = SelIdents.size();
@@ -7646,7 +7748,7 @@ void Sema::CodeCompleteObjCMethodDeclSelector(Scope *S,
Results.AddResult(Builder.TakeString());
}
}
-
+
continue;
}
@@ -7658,7 +7760,7 @@ void Sema::CodeCompleteObjCMethodDeclSelector(Scope *S,
Results.MaybeAddResult(R, CurContext);
}
}
-
+
Results.ExitScope();
if (!AtParameterName && !SelIdents.empty() &&
@@ -7677,9 +7779,8 @@ void Sema::CodeCompleteObjCMethodDeclSelector(Scope *S,
}
}
- HandleCodeCompleteResults(this, CodeCompleter,
- CodeCompletionContext::CCC_Other,
- Results.data(),Results.size());
+ HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
+ Results.data(), Results.size());
}
void Sema::CodeCompletePreprocessorDirective(bool InConditional) {
@@ -7687,7 +7788,7 @@ void Sema::CodeCompletePreprocessorDirective(bool InConditional) {
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_PreprocessorDirective);
Results.EnterNewScope();
-
+
// #if <condition>
CodeCompletionBuilder Builder(Results.getAllocator(),
Results.getCodeCompletionTUInfo());
@@ -7695,13 +7796,13 @@ void Sema::CodeCompletePreprocessorDirective(bool InConditional) {
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("condition");
Results.AddResult(Builder.TakeString());
-
+
// #ifdef <macro>
Builder.AddTypedTextChunk("ifdef");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("macro");
Results.AddResult(Builder.TakeString());
-
+
// #ifndef <macro>
Builder.AddTypedTextChunk("ifndef");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
@@ -7723,7 +7824,7 @@ void Sema::CodeCompletePreprocessorDirective(bool InConditional) {
Builder.AddTypedTextChunk("endif");
Results.AddResult(Builder.TakeString());
}
-
+
// #include "header"
Builder.AddTypedTextChunk("include");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
@@ -7739,13 +7840,13 @@ void Sema::CodeCompletePreprocessorDirective(bool InConditional) {
Builder.AddPlaceholderChunk("header");
Builder.AddTextChunk(">");
Results.AddResult(Builder.TakeString());
-
+
// #define <macro>
Builder.AddTypedTextChunk("define");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("macro");
Results.AddResult(Builder.TakeString());
-
+
// #define <macro>(<args>)
Builder.AddTypedTextChunk("define");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
@@ -7754,7 +7855,7 @@ void Sema::CodeCompletePreprocessorDirective(bool InConditional) {
Builder.AddPlaceholderChunk("args");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Results.AddResult(Builder.TakeString());
-
+
// #undef <macro>
Builder.AddTypedTextChunk("undef");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
@@ -7766,7 +7867,7 @@ void Sema::CodeCompletePreprocessorDirective(bool InConditional) {
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("number");
Results.AddResult(Builder.TakeString());
-
+
// #line <number> "filename"
Builder.AddTypedTextChunk("line");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
@@ -7776,7 +7877,7 @@ void Sema::CodeCompletePreprocessorDirective(bool InConditional) {
Builder.AddPlaceholderChunk("filename");
Builder.AddTextChunk("\"");
Results.AddResult(Builder.TakeString());
-
+
// #error <message>
Builder.AddTypedTextChunk("error");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
@@ -7797,7 +7898,7 @@ void Sema::CodeCompletePreprocessorDirective(bool InConditional) {
Builder.AddPlaceholderChunk("header");
Builder.AddTextChunk("\"");
Results.AddResult(Builder.TakeString());
-
+
// #import <header>
Builder.AddTypedTextChunk("import");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
@@ -7806,7 +7907,7 @@ void Sema::CodeCompletePreprocessorDirective(bool InConditional) {
Builder.AddTextChunk(">");
Results.AddResult(Builder.TakeString());
}
-
+
// #include_next "header"
Builder.AddTypedTextChunk("include_next");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
@@ -7814,7 +7915,7 @@ void Sema::CodeCompletePreprocessorDirective(bool InConditional) {
Builder.AddPlaceholderChunk("header");
Builder.AddTextChunk("\"");
Results.AddResult(Builder.TakeString());
-
+
// #include_next <header>
Builder.AddTypedTextChunk("include_next");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
@@ -7835,15 +7936,14 @@ void Sema::CodeCompletePreprocessorDirective(bool InConditional) {
// FIXME: we don't support #assert or #unassert, so don't suggest them.
Results.ExitScope();
-
- HandleCodeCompleteResults(this, CodeCompleter,
- CodeCompletionContext::CCC_PreprocessorDirective,
+
+ HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
Results.data(), Results.size());
}
void Sema::CodeCompleteInPreprocessorConditionalExclusion(Scope *S) {
CodeCompleteOrdinaryName(S,
- S->getFnParent()? Sema::PCC_RecoveryInFunction
+ S->getFnParent()? Sema::PCC_RecoveryInFunction
: Sema::PCC_Namespace);
}
@@ -7853,11 +7953,11 @@ void Sema::CodeCompletePreprocessorMacroName(bool IsDefinition) {
IsDefinition? CodeCompletionContext::CCC_MacroName
: CodeCompletionContext::CCC_MacroNameUse);
if (!IsDefinition && (!CodeCompleter || CodeCompleter->includeMacros())) {
- // Add just the names of macros, not their arguments.
+ // Add just the names of macros, not their arguments.
CodeCompletionBuilder Builder(Results.getAllocator(),
Results.getCodeCompletionTUInfo());
Results.EnterNewScope();
- for (Preprocessor::macro_iterator M = PP.macro_begin(),
+ for (Preprocessor::macro_iterator M = PP.macro_begin(),
MEnd = PP.macro_end();
M != MEnd; ++M) {
Builder.AddTypedTextChunk(Builder.getAllocator().CopyString(
@@ -7870,19 +7970,19 @@ void Sema::CodeCompletePreprocessorMacroName(bool IsDefinition) {
} else if (IsDefinition) {
// FIXME: Can we detect when the user just wrote an include guard above?
}
-
+
HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
- Results.data(), Results.size());
+ Results.data(), Results.size());
}
void Sema::CodeCompletePreprocessorExpression() {
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_PreprocessorExpression);
-
+
if (!CodeCompleter || CodeCompleter->includeMacros())
AddMacroResults(PP, Results, true);
-
+
// defined (<macro>)
Results.EnterNewScope();
CodeCompletionBuilder Builder(Results.getAllocator(),
@@ -7894,10 +7994,9 @@ void Sema::CodeCompletePreprocessorExpression() {
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Results.AddResult(Builder.TakeString());
Results.ExitScope();
-
- HandleCodeCompleteResults(this, CodeCompleter,
- CodeCompletionContext::CCC_PreprocessorExpression,
- Results.data(), Results.size());
+
+ HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
+ Results.data(), Results.size());
}
void Sema::CodeCompletePreprocessorMacroArgument(Scope *S,
@@ -7906,7 +8005,7 @@ void Sema::CodeCompletePreprocessorMacroArgument(Scope *S,
unsigned Argument) {
// FIXME: In the future, we could provide "overload" results, much like we
// do for function calls.
-
+
// Now just ignore this. There will be another code-completion callback
// for the expanded tokens.
}
@@ -7929,9 +8028,8 @@ void Sema::CodeCompleteAvailabilityPlatformName() {
Twine(Platform) + "ApplicationExtension")));
}
Results.ExitScope();
- HandleCodeCompleteResults(this, CodeCompleter,
- CodeCompletionContext::CCC_Other, Results.data(),
- Results.size());
+ HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
+ Results.data(), Results.size());
}
void Sema::GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator,
@@ -7940,16 +8038,17 @@ void Sema::GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator,
ResultBuilder Builder(*this, Allocator, CCTUInfo,
CodeCompletionContext::CCC_Recovery);
if (!CodeCompleter || CodeCompleter->includeGlobals()) {
- CodeCompletionDeclConsumer Consumer(Builder,
+ CodeCompletionDeclConsumer Consumer(Builder,
Context.getTranslationUnitDecl());
- LookupVisibleDecls(Context.getTranslationUnitDecl(), LookupAnyName,
- Consumer);
+ LookupVisibleDecls(Context.getTranslationUnitDecl(), LookupAnyName,
+ Consumer,
+ !CodeCompleter || CodeCompleter->loadExternal());
}
-
+
if (!CodeCompleter || CodeCompleter->includeMacros())
AddMacroResults(PP, Builder, true);
-
+
Results.clear();
- Results.insert(Results.end(),
+ Results.insert(Results.end(),
Builder.data(), Builder.data() + Builder.size());
}
diff --git a/lib/Sema/SemaCoroutine.cpp b/lib/Sema/SemaCoroutine.cpp
index e6b640f878c2..1d5454ca778b 100644
--- a/lib/Sema/SemaCoroutine.cpp
+++ b/lib/Sema/SemaCoroutine.cpp
@@ -9,15 +9,20 @@
//
// This file implements semantic analysis for C++ Coroutines.
//
+// This file contains references to sections of the Coroutines TS, which
+// can be found at http://wg21.link/coroutines.
+//
//===----------------------------------------------------------------------===//
#include "CoroutineStmtBuilder.h"
+#include "clang/AST/ASTLambda.h"
#include "clang/AST/Decl.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/StmtCXX.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/Initialization.h"
#include "clang/Sema/Overload.h"
+#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/SemaInternal.h"
using namespace clang;
@@ -55,20 +60,8 @@ static QualType lookupPromiseType(Sema &S, const FunctionDecl *FD,
return QualType();
}
- LookupResult Result(S, &S.PP.getIdentifierTable().get("coroutine_traits"),
- FuncLoc, Sema::LookupOrdinaryName);
- if (!S.LookupQualifiedName(Result, StdExp)) {
- S.Diag(KwLoc, diag::err_implied_coroutine_type_not_found)
- << "std::experimental::coroutine_traits";
- return QualType();
- }
-
- ClassTemplateDecl *CoroTraits = Result.getAsSingle<ClassTemplateDecl>();
+ ClassTemplateDecl *CoroTraits = S.lookupCoroutineTraits(KwLoc, FuncLoc);
if (!CoroTraits) {
- Result.suppressDiagnostics();
- // We found something weird. Complain about the first thing we found.
- NamedDecl *Found = *Result.begin();
- S.Diag(Found->getLocation(), diag::err_malformed_std_coroutine_traits);
return QualType();
}
@@ -194,13 +187,25 @@ static QualType lookupCoroutineHandleType(Sema &S, QualType PromiseType,
static bool isValidCoroutineContext(Sema &S, SourceLocation Loc,
StringRef Keyword) {
- // 'co_await' and 'co_yield' are not permitted in unevaluated operands.
+ // 'co_await' and 'co_yield' are not permitted in unevaluated operands,
+ // such as subexpressions of \c sizeof.
+ //
+ // [expr.await]p2, emphasis added: "An await-expression shall appear only in
+ // a *potentially evaluated* expression within the compound-statement of a
+ // function-body outside of a handler [...] A context within a function where
+ // an await-expression can appear is called a suspension context of the
+ // function." And per [expr.yield]p1: "A yield-expression shall appear only
+ // within a suspension context of a function."
if (S.isUnevaluatedContext()) {
S.Diag(Loc, diag::err_coroutine_unevaluated_context) << Keyword;
return false;
}
- // Any other usage must be within a function.
+ // Per [expr.await]p2, any other usage must be within a function.
+ // FIXME: This also covers [expr.await]p2: "An await-expression shall not
+ // appear in a default argument." But the diagnostic QoI here could be
+ // improved to inform the user that default arguments specifically are not
+ // allowed.
auto *FD = dyn_cast<FunctionDecl>(S.CurContext);
if (!FD) {
S.Diag(Loc, isa<ObjCMethodDecl>(S.CurContext)
@@ -231,22 +236,37 @@ static bool isValidCoroutineContext(Sema &S, SourceLocation Loc,
// Diagnose when a constructor, destructor, copy/move assignment operator,
// or the function 'main' are declared as a coroutine.
auto *MD = dyn_cast<CXXMethodDecl>(FD);
+ // [class.ctor]p6: "A constructor shall not be a coroutine."
if (MD && isa<CXXConstructorDecl>(MD))
return DiagInvalid(DiagCtor);
+ // [class.dtor]p17: "A destructor shall not be a coroutine."
else if (MD && isa<CXXDestructorDecl>(MD))
return DiagInvalid(DiagDtor);
+ // N4499 [special]p6: "A special member function shall not be a coroutine."
+ // Per C++ [special]p1, special member functions are the "default constructor,
+ // copy constructor and copy assignment operator, move constructor and move
+ // assignment operator, and destructor."
else if (MD && MD->isCopyAssignmentOperator())
return DiagInvalid(DiagCopyAssign);
else if (MD && MD->isMoveAssignmentOperator())
return DiagInvalid(DiagMoveAssign);
+ // [basic.start.main]p3: "The function main shall not be a coroutine."
else if (FD->isMain())
return DiagInvalid(DiagMain);
// Emit a diagnostics for each of the following conditions which is not met.
+ // [expr.const]p2: "An expression e is a core constant expression unless the
+ // evaluation of e [...] would evaluate one of the following expressions:
+ // [...] an await-expression [...] a yield-expression."
if (FD->isConstexpr())
DiagInvalid(DiagConstexpr);
+ // [dcl.spec.auto]p15: "A function declared with a return type that uses a
+ // placeholder type shall not be a coroutine."
if (FD->getReturnType()->isUndeducedType())
DiagInvalid(DiagAutoRet);
+ // [dcl.fct.def.coroutine]p1: "The parameter-declaration-clause of the
+ // coroutine shall not terminate with an ellipsis that is not part of a
+ // parameter-declaration."
if (FD->isVariadic())
DiagInvalid(DiagVarargs);
@@ -360,12 +380,21 @@ static ExprResult buildMemberCall(Sema &S, Expr *Base, SourceLocation Loc,
if (Result.isInvalid())
return ExprError();
+ // We meant exactly what we asked for. No need for typo correction.
+ if (auto *TE = dyn_cast<TypoExpr>(Result.get())) {
+ S.clearDelayedTypo(TE);
+ S.Diag(Loc, diag::err_no_member)
+ << NameInfo.getName() << Base->getType()->getAsCXXRecordDecl()
+ << Base->getSourceRange();
+ return ExprError();
+ }
+
return S.ActOnCallExpr(nullptr, Result.get(), Loc, Args, Loc, nullptr);
}
// See if return type is coroutine-handle and if so, invoke builtin coro-resume
// on its address. This is to enable experimental support for coroutine-handle
-// returning await_suspend that results in a guranteed tail call to the target
+// returning await_suspend that results in a guaranteed tail call to the target
// coroutine.
static Expr *maybeTailCall(Sema &S, QualType RetType, Expr *E,
SourceLocation Loc) {
@@ -494,9 +523,72 @@ VarDecl *Sema::buildCoroutinePromise(SourceLocation Loc) {
CheckVariableDeclarationType(VD);
if (VD->isInvalidDecl())
return nullptr;
- ActOnUninitializedDecl(VD);
+
+ auto *ScopeInfo = getCurFunction();
+ // Build a list of arguments, based on the coroutine functions arguments,
+ // that will be passed to the promise type's constructor.
+ llvm::SmallVector<Expr *, 4> CtorArgExprs;
+
+ // Add implicit object parameter.
+ if (auto *MD = dyn_cast<CXXMethodDecl>(FD)) {
+ if (MD->isInstance() && !isLambdaCallOperator(MD)) {
+ ExprResult ThisExpr = ActOnCXXThis(Loc);
+ if (ThisExpr.isInvalid())
+ return nullptr;
+ ThisExpr = CreateBuiltinUnaryOp(Loc, UO_Deref, ThisExpr.get());
+ if (ThisExpr.isInvalid())
+ return nullptr;
+ CtorArgExprs.push_back(ThisExpr.get());
+ }
+ }
+
+ auto &Moves = ScopeInfo->CoroutineParameterMoves;
+ for (auto *PD : FD->parameters()) {
+ if (PD->getType()->isDependentType())
+ continue;
+
+ auto RefExpr = ExprEmpty();
+ auto Move = Moves.find(PD);
+ assert(Move != Moves.end() &&
+ "Coroutine function parameter not inserted into move map");
+ // If a reference to the function parameter exists in the coroutine
+ // frame, use that reference.
+ auto *MoveDecl =
+ cast<VarDecl>(cast<DeclStmt>(Move->second)->getSingleDecl());
+ RefExpr =
+ BuildDeclRefExpr(MoveDecl, MoveDecl->getType().getNonReferenceType(),
+ ExprValueKind::VK_LValue, FD->getLocation());
+ if (RefExpr.isInvalid())
+ return nullptr;
+ CtorArgExprs.push_back(RefExpr.get());
+ }
+
+ // Create an initialization sequence for the promise type using the
+ // constructor arguments, wrapped in a parenthesized list expression.
+ Expr *PLE = new (Context) ParenListExpr(Context, FD->getLocation(),
+ CtorArgExprs, FD->getLocation());
+ InitializedEntity Entity = InitializedEntity::InitializeVariable(VD);
+ InitializationKind Kind = InitializationKind::CreateForInit(
+ VD->getLocation(), /*DirectInit=*/true, PLE);
+ InitializationSequence InitSeq(*this, Entity, Kind, CtorArgExprs,
+ /*TopLevelOfInitList=*/false,
+ /*TreatUnavailableAsInvalid=*/false);
+
+ // Attempt to initialize the promise type with the arguments.
+ // If that fails, fall back to the promise type's default constructor.
+ if (InitSeq) {
+ ExprResult Result = InitSeq.Perform(*this, Entity, Kind, CtorArgExprs);
+ if (Result.isInvalid()) {
+ VD->setInvalidDecl();
+ } else if (Result.get()) {
+ VD->setInit(MaybeCreateExprWithCleanups(Result.get()));
+ VD->setInitStyle(VarDecl::CallInit);
+ CheckCompleteVariableDeclaration(VD);
+ }
+ } else
+ ActOnUninitializedDecl(VD);
+
FD->addDecl(VD);
- assert(!VD->isInvalidDecl());
return VD;
}
@@ -518,6 +610,9 @@ static FunctionScopeInfo *checkCoroutineContext(Sema &S, SourceLocation Loc,
if (ScopeInfo->CoroutinePromise)
return ScopeInfo;
+ if (!S.buildCoroutineParameterMoves(Loc))
+ return nullptr;
+
ScopeInfo->CoroutinePromise = S.buildCoroutinePromise(Loc);
if (!ScopeInfo->CoroutinePromise)
return nullptr;
@@ -654,9 +749,14 @@ ExprResult Sema::BuildResolvedCoawaitExpr(SourceLocation Loc, Expr *E,
if (E->getValueKind() == VK_RValue)
E = CreateMaterializeTemporaryExpr(E->getType(), E, true);
+ // The location of the `co_await` token cannot be used when constructing
+ // the member call expressions since it's before the location of `Expr`, which
+ // is used as the start of the member call expression.
+ SourceLocation CallLoc = E->getExprLoc();
+
// Build the await_ready, await_suspend, await_resume calls.
ReadySuspendResumeResult RSS =
- buildCoawaitCalls(*this, Coroutine->CoroutinePromise, Loc, E);
+ buildCoawaitCalls(*this, Coroutine->CoroutinePromise, CallLoc, E);
if (RSS.IsInvalid)
return ExprError();
@@ -861,6 +961,11 @@ CoroutineStmtBuilder::CoroutineStmtBuilder(Sema &S, FunctionDecl &FD,
!Fn.CoroutinePromise ||
Fn.CoroutinePromise->getType()->isDependentType()) {
this->Body = Body;
+
+ for (auto KV : Fn.CoroutineParameterMoves)
+ this->ParamMovesVector.push_back(KV.second);
+ this->ParamMoves = this->ParamMovesVector;
+
if (!IsPromiseDependentType) {
PromiseRecordDecl = Fn.CoroutinePromise->getType()->getAsCXXRecordDecl();
assert(PromiseRecordDecl && "Type should have already been checked");
@@ -870,7 +975,7 @@ CoroutineStmtBuilder::CoroutineStmtBuilder(Sema &S, FunctionDecl &FD,
bool CoroutineStmtBuilder::buildStatements() {
assert(this->IsValid && "coroutine already invalid");
- this->IsValid = makeReturnObject() && makeParamMoves();
+ this->IsValid = makeReturnObject();
if (this->IsValid && !IsPromiseDependentType)
buildDependentStatements();
return this->IsValid;
@@ -886,12 +991,6 @@ bool CoroutineStmtBuilder::buildDependentStatements() {
return this->IsValid;
}
-bool CoroutineStmtBuilder::buildParameterMoves() {
- assert(this->IsValid && "coroutine already invalid");
- assert(this->ParamMoves.empty() && "param moves already built");
- return this->IsValid = makeParamMoves();
-}
-
bool CoroutineStmtBuilder::makePromiseStmt() {
// Form a declaration statement for the promise declaration, so that AST
// visitors can more easily find it.
@@ -990,7 +1089,12 @@ bool CoroutineStmtBuilder::makeNewAndDeleteExpr() {
const bool RequiresNoThrowAlloc = ReturnStmtOnAllocFailure != nullptr;
- // FIXME: Add support for stateful allocators.
+ // [dcl.fct.def.coroutine]/7
+ // Lookup allocation functions using a parameter list composed of the
+ // requested size of the coroutine state being allocated, followed by
+ // the coroutine function's arguments. If a matching allocation function
+ // exists, use it. Otherwise, use an allocation function that just takes
+ // the requested size.
FunctionDecl *OperatorNew = nullptr;
FunctionDecl *OperatorDelete = nullptr;
@@ -998,10 +1102,73 @@ bool CoroutineStmtBuilder::makeNewAndDeleteExpr() {
bool PassAlignment = false;
SmallVector<Expr *, 1> PlacementArgs;
- S.FindAllocationFunctions(Loc, SourceRange(),
- /*UseGlobal*/ false, PromiseType,
+ // [dcl.fct.def.coroutine]/7
+ // "The allocation function’s name is looked up in the scope of P.
+ // [...] If the lookup finds an allocation function in the scope of P,
+ // overload resolution is performed on a function call created by assembling
+ // an argument list. The first argument is the amount of space requested,
+ // and has type std::size_t. The lvalues p1 ... pn are the succeeding
+ // arguments."
+ //
+ // ...where "p1 ... pn" are defined earlier as:
+ //
+ // [dcl.fct.def.coroutine]/3
+ // "For a coroutine f that is a non-static member function, let P1 denote the
+ // type of the implicit object parameter (13.3.1) and P2 ... Pn be the types
+ // of the function parameters; otherwise let P1 ... Pn be the types of the
+ // function parameters. Let p1 ... pn be lvalues denoting those objects."
+ if (auto *MD = dyn_cast<CXXMethodDecl>(&FD)) {
+ if (MD->isInstance() && !isLambdaCallOperator(MD)) {
+ ExprResult ThisExpr = S.ActOnCXXThis(Loc);
+ if (ThisExpr.isInvalid())
+ return false;
+ ThisExpr = S.CreateBuiltinUnaryOp(Loc, UO_Deref, ThisExpr.get());
+ if (ThisExpr.isInvalid())
+ return false;
+ PlacementArgs.push_back(ThisExpr.get());
+ }
+ }
+ for (auto *PD : FD.parameters()) {
+ if (PD->getType()->isDependentType())
+ continue;
+
+ // Build a reference to the parameter.
+ auto PDLoc = PD->getLocation();
+ ExprResult PDRefExpr =
+ S.BuildDeclRefExpr(PD, PD->getOriginalType().getNonReferenceType(),
+ ExprValueKind::VK_LValue, PDLoc);
+ if (PDRefExpr.isInvalid())
+ return false;
+
+ PlacementArgs.push_back(PDRefExpr.get());
+ }
+ S.FindAllocationFunctions(Loc, SourceRange(), /*NewScope*/ Sema::AFS_Class,
+ /*DeleteScope*/ Sema::AFS_Both, PromiseType,
/*isArray*/ false, PassAlignment, PlacementArgs,
- OperatorNew, UnusedResult);
+ OperatorNew, UnusedResult, /*Diagnose*/ false);
+
+ // [dcl.fct.def.coroutine]/7
+ // "If no matching function is found, overload resolution is performed again
+ // on a function call created by passing just the amount of space required as
+ // an argument of type std::size_t."
+ if (!OperatorNew && !PlacementArgs.empty()) {
+ PlacementArgs.clear();
+ S.FindAllocationFunctions(Loc, SourceRange(), /*NewScope*/ Sema::AFS_Class,
+ /*DeleteScope*/ Sema::AFS_Both, PromiseType,
+ /*isArray*/ false, PassAlignment, PlacementArgs,
+ OperatorNew, UnusedResult, /*Diagnose*/ false);
+ }
+
+ // [dcl.fct.def.coroutine]/7
+ // "The allocation function’s name is looked up in the scope of P. If this
+ // lookup fails, the allocation function’s name is looked up in the global
+ // scope."
+ if (!OperatorNew) {
+ S.FindAllocationFunctions(Loc, SourceRange(), /*NewScope*/ Sema::AFS_Global,
+ /*DeleteScope*/ Sema::AFS_Both, PromiseType,
+ /*isArray*/ false, PassAlignment, PlacementArgs,
+ OperatorNew, UnusedResult);
+ }
bool IsGlobalOverload =
OperatorNew && !isa<CXXRecordDecl>(OperatorNew->getDeclContext());
@@ -1014,17 +1181,18 @@ bool CoroutineStmtBuilder::makeNewAndDeleteExpr() {
return false;
PlacementArgs = {StdNoThrow};
OperatorNew = nullptr;
- S.FindAllocationFunctions(Loc, SourceRange(),
- /*UseGlobal*/ true, PromiseType,
+ S.FindAllocationFunctions(Loc, SourceRange(), /*NewScope*/ Sema::AFS_Both,
+ /*DeleteScope*/ Sema::AFS_Both, PromiseType,
/*isArray*/ false, PassAlignment, PlacementArgs,
OperatorNew, UnusedResult);
}
- assert(OperatorNew && "expected definition of operator new to be found");
+ if (!OperatorNew)
+ return false;
if (RequiresNoThrowAlloc) {
const auto *FT = OperatorNew->getType()->getAs<FunctionProtoType>();
- if (!FT->isNothrow(S.Context, /*ResultIfDependent*/ false)) {
+ if (!FT->isNothrow(/*ResultIfDependent*/ false)) {
S.Diag(OperatorNew->getLocation(),
diag::err_coroutine_promise_new_requires_nothrow)
<< OperatorNew;
@@ -1256,10 +1424,6 @@ bool CoroutineStmtBuilder::makeGroDeclAndReturnStmt() {
if (Res.isInvalid())
return false;
- if (GroType == FnRetType) {
- GroDecl->setNRVOVariable(true);
- }
-
S.AddInitializerToDecl(GroDecl, Res.get(),
/*DirectInit=*/false);
@@ -1283,6 +1447,8 @@ bool CoroutineStmtBuilder::makeGroDeclAndReturnStmt() {
noteMemberDeclaredHere(S, ReturnValue, Fn);
return false;
}
+ if (cast<clang::ReturnStmt>(ReturnStmt.get())->getNRVOCandidate() == GroDecl)
+ GroDecl->setNRVOVariable(true);
this->ReturnStmt = ReturnStmt.get();
return true;
@@ -1304,47 +1470,53 @@ static Expr *castForMoving(Sema &S, Expr *E, QualType T = QualType()) {
.get();
}
-
-/// \brief Build a variable declaration for move parameter.
+/// Build a variable declaration for move parameter.
static VarDecl *buildVarDecl(Sema &S, SourceLocation Loc, QualType Type,
IdentifierInfo *II) {
TypeSourceInfo *TInfo = S.Context.getTrivialTypeSourceInfo(Type, Loc);
- VarDecl *Decl =
- VarDecl::Create(S.Context, S.CurContext, Loc, Loc, II, Type, TInfo, SC_None);
+ VarDecl *Decl = VarDecl::Create(S.Context, S.CurContext, Loc, Loc, II, Type,
+ TInfo, SC_None);
Decl->setImplicit();
return Decl;
}
-bool CoroutineStmtBuilder::makeParamMoves() {
- for (auto *paramDecl : FD.parameters()) {
- auto Ty = paramDecl->getType();
- if (Ty->isDependentType())
+// Build statements that move coroutine function parameters to the coroutine
+// frame, and store them on the function scope info.
+bool Sema::buildCoroutineParameterMoves(SourceLocation Loc) {
+ assert(isa<FunctionDecl>(CurContext) && "not in a function scope");
+ auto *FD = cast<FunctionDecl>(CurContext);
+
+ auto *ScopeInfo = getCurFunction();
+ assert(ScopeInfo->CoroutineParameterMoves.empty() &&
+ "Should not build parameter moves twice");
+
+ for (auto *PD : FD->parameters()) {
+ if (PD->getType()->isDependentType())
continue;
- // No need to copy scalars, llvm will take care of them.
- if (Ty->getAsCXXRecordDecl()) {
- ExprResult ParamRef =
- S.BuildDeclRefExpr(paramDecl, paramDecl->getType(),
- ExprValueKind::VK_LValue, Loc); // FIXME: scope?
- if (ParamRef.isInvalid())
- return false;
+ ExprResult PDRefExpr =
+ BuildDeclRefExpr(PD, PD->getType().getNonReferenceType(),
+ ExprValueKind::VK_LValue, Loc); // FIXME: scope?
+ if (PDRefExpr.isInvalid())
+ return false;
- Expr *RCast = castForMoving(S, ParamRef.get());
+ Expr *CExpr = nullptr;
+ if (PD->getType()->getAsCXXRecordDecl() ||
+ PD->getType()->isRValueReferenceType())
+ CExpr = castForMoving(*this, PDRefExpr.get());
+ else
+ CExpr = PDRefExpr.get();
- auto D = buildVarDecl(S, Loc, Ty, paramDecl->getIdentifier());
- S.AddInitializerToDecl(D, RCast, /*DirectInit=*/true);
+ auto D = buildVarDecl(*this, Loc, PD->getType(), PD->getIdentifier());
+ AddInitializerToDecl(D, CExpr, /*DirectInit=*/true);
- // Convert decl to a statement.
- StmtResult Stmt = S.ActOnDeclStmt(S.ConvertDeclToDeclGroup(D), Loc, Loc);
- if (Stmt.isInvalid())
- return false;
+ // Convert decl to a statement.
+ StmtResult Stmt = ActOnDeclStmt(ConvertDeclToDeclGroup(D), Loc, Loc);
+ if (Stmt.isInvalid())
+ return false;
- ParamMovesVector.push_back(Stmt.get());
- }
+ ScopeInfo->CoroutineParameterMoves.insert(std::make_pair(PD, Stmt.get()));
}
-
- // Convert to ArrayRef in CtorArgs structure that builder inherits from.
- ParamMoves = ParamMovesVector;
return true;
}
@@ -1354,3 +1526,27 @@ StmtResult Sema::BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs Args) {
return StmtError();
return Res;
}
+
+ClassTemplateDecl *Sema::lookupCoroutineTraits(SourceLocation KwLoc,
+ SourceLocation FuncLoc) {
+ if (!StdCoroutineTraitsCache) {
+ if (auto StdExp = lookupStdExperimentalNamespace()) {
+ LookupResult Result(*this,
+ &PP.getIdentifierTable().get("coroutine_traits"),
+ FuncLoc, LookupOrdinaryName);
+ if (!LookupQualifiedName(Result, StdExp)) {
+ Diag(KwLoc, diag::err_implied_coroutine_type_not_found)
+ << "std::experimental::coroutine_traits";
+ return nullptr;
+ }
+ if (!(StdCoroutineTraitsCache =
+ Result.getAsSingle<ClassTemplateDecl>())) {
+ Result.suppressDiagnostics();
+ NamedDecl *Found = *Result.begin();
+ Diag(Found->getLocation(), diag::err_malformed_std_coroutine_traits);
+ return nullptr;
+ }
+ }
+ }
+ return StdCoroutineTraitsCache;
+}
diff --git a/lib/Sema/SemaDecl.cpp b/lib/Sema/SemaDecl.cpp
index a1fc725f8df4..55542828f783 100644
--- a/lib/Sema/SemaDecl.cpp
+++ b/lib/Sema/SemaDecl.cpp
@@ -115,7 +115,7 @@ class TypeNameValidatorCCC : public CorrectionCandidateCallback {
} // end anonymous namespace
-/// \brief Determine whether the token kind starts a simple-type-specifier.
+/// Determine whether the token kind starts a simple-type-specifier.
bool Sema::isSimpleTypeSpecifier(tok::TokenKind Kind) const {
switch (Kind) {
// FIXME: Take into account the current language when deciding whether a
@@ -148,6 +148,9 @@ bool Sema::isSimpleTypeSpecifier(tok::TokenKind Kind) const {
case tok::kw_decltype:
return getLangOpts().CPlusPlus;
+ case tok::kw_char8_t:
+ return getLangOpts().Char8;
+
default:
break;
}
@@ -163,7 +166,7 @@ enum class UnqualifiedTypeNameLookupResult {
};
} // end anonymous namespace
-/// \brief Tries to perform unqualified lookup of the type decls in bases for
+/// Tries to perform unqualified lookup of the type decls in bases for
/// dependent class.
/// \return \a NotFound if no any decls is found, \a FoundNotType if found not a
/// type decl, \a FoundType if only type decls are found.
@@ -263,7 +266,7 @@ static ParsedType recoverFromTypeInKnownDependentBase(Sema &S,
return S.CreateParsedType(T, Builder.getTypeSourceInfo(Context, T));
}
-/// \brief If the identifier refers to a type name within this scope,
+/// If the identifier refers to a type name within this scope,
/// return the declaration of that type.
///
/// This routine performs ordinary name lookup of the identifier II
@@ -724,13 +727,7 @@ void Sema::DiagnoseUnknownTypeName(IdentifierInfo *&II,
if (isTemplateName(S, SS ? *SS : EmptySS, /*hasTemplateKeyword=*/false,
Name, nullptr, true, TemplateResult,
MemberOfUnknownSpecialization) == TNK_Type_template) {
- TemplateName TplName = TemplateResult.get();
- Diag(IILoc, diag::err_template_missing_args)
- << (int)getTemplateNameKindForDiagnostics(TplName) << TplName;
- if (TemplateDecl *TplDecl = TplName.getAsTemplateDecl()) {
- Diag(TplDecl->getLocation(), diag::note_template_decl_here)
- << TplDecl->getTemplateParameters()->getSourceRange();
- }
+ diagnoseMissingTemplateArguments(TemplateResult.get(), IILoc);
return;
}
}
@@ -763,7 +760,7 @@ void Sema::DiagnoseUnknownTypeName(IdentifierInfo *&II,
}
}
-/// \brief Determine whether the given result set contains either a type name
+/// Determine whether the given result set contains either a type name
/// or
static bool isResultTypeOrTemplate(LookupResult &R, const Token &NextToken) {
bool CheckTemplate = R.getSema().getLangOpts().CPlusPlus &&
@@ -1318,7 +1315,7 @@ void Sema::ActOnExitFunctionContext() {
assert(CurContext && "Popped translation unit!");
}
-/// \brief Determine whether we allow overloading of the function
+/// Determine whether we allow overloading of the function
/// PrevDecl with another declaration.
///
/// This routine determines whether overloading is possible, not
@@ -1504,7 +1501,7 @@ static void RemoveUsingDecls(LookupResult &R) {
F.done();
}
-/// \brief Check for this common pattern:
+/// Check for this common pattern:
/// @code
/// class S {
/// S(const S&); // DO NOT IMPLEMENT
@@ -1519,9 +1516,7 @@ static bool IsDisallowedCopyOrAssign(const CXXMethodDecl *D) {
if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(D))
return CD->isCopyConstructor();
- if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D))
- return Method->isCopyAssignmentOperator();
- return false;
+ return D->isCopyAssignmentOperator();
}
// We need this to handle
@@ -1843,7 +1838,7 @@ void Sema::ActOnPopScope(SourceLocation Loc, Scope *S) {
}
}
-/// \brief Look for an Objective-C class in the translation unit.
+/// Look for an Objective-C class in the translation unit.
///
/// \param Id The name of the Objective-C class we're looking for. If
/// typo-correction fixes this name, the Id will be updated
@@ -1913,7 +1908,7 @@ Scope *Sema::getNonFieldDeclScope(Scope *S) {
return S;
}
-/// \brief Looks up the declaration of "struct objc_super" and
+/// Looks up the declaration of "struct objc_super" and
/// saves it for later use in building builtin declaration of
/// objc_msgSendSuper and objc_msgSendSuper_stret. If no such
/// pre-existing declaration exists no action takes place.
@@ -2457,6 +2452,9 @@ static bool mergeDeclAttribute(Sema &S, NamedDecl *D,
else if (const auto *SA = dyn_cast<SectionAttr>(Attr))
NewAttr = S.mergeSectionAttr(D, SA->getRange(), SA->getName(),
AttrSpellingListIndex);
+ else if (const auto *CSA = dyn_cast<CodeSegAttr>(Attr))
+ NewAttr = S.mergeCodeSegAttr(D, CSA->getRange(), CSA->getName(),
+ AttrSpellingListIndex);
else if (const auto *IA = dyn_cast<MSInheritanceAttr>(Attr))
NewAttr = S.mergeMSInheritanceAttr(D, IA->getRange(), IA->getBestCase(),
AttrSpellingListIndex,
@@ -2495,7 +2493,7 @@ static bool mergeDeclAttribute(Sema &S, NamedDecl *D,
else if (const auto *UA = dyn_cast<UuidAttr>(Attr))
NewAttr = S.mergeUuidAttr(D, UA->getRange(), AttrSpellingListIndex,
UA->getGuid());
- else if (Attr->duplicatesAllowed() || !DeclHasAttr(D, Attr))
+ else if (Attr->shouldInheritEvenIfAlreadyPresent() || !DeclHasAttr(D, Attr))
NewAttr = cast<InheritableAttr>(Attr->clone(S.Context));
if (NewAttr) {
@@ -2675,6 +2673,15 @@ void Sema::mergeDeclAttributes(NamedDecl *New, Decl *Old,
}
}
+ // Redeclaration adds code-seg attribute.
+ const auto *NewCSA = New->getAttr<CodeSegAttr>();
+ if (NewCSA && !Old->hasAttr<CodeSegAttr>() &&
+ !NewCSA->isImplicit() && isa<CXXMethodDecl>(New)) {
+ Diag(New->getLocation(), diag::warn_mismatched_section)
+ << 0 /*codeseg*/;
+ Diag(Old->getLocation(), diag::note_previous_declaration);
+ }
+
if (!Old->hasAttrs())
return;
@@ -2875,7 +2882,7 @@ static bool haveIncompatibleLanguageLinkages(const T *Old, const T *New) {
template<typename T> static bool isExternC(T *D) { return D->isExternC(); }
static bool isExternC(VarTemplateDecl *) { return false; }
-/// \brief Check whether a redeclaration of an entity introduced by a
+/// Check whether a redeclaration of an entity introduced by a
/// using-declaration is valid, given that we know it's not an overload
/// (nor a hidden tag declaration).
template<typename ExpectedDecl>
@@ -2929,6 +2936,48 @@ static bool hasIdenticalPassObjectSizeAttrs(const FunctionDecl *A,
return std::equal(A->param_begin(), A->param_end(), B->param_begin(), AttrEq);
}
+/// If necessary, adjust the semantic declaration context for a qualified
+/// declaration to name the correct inline namespace within the qualifier.
+static void adjustDeclContextForDeclaratorDecl(DeclaratorDecl *NewD,
+ DeclaratorDecl *OldD) {
+ // The only case where we need to update the DeclContext is when
+ // redeclaration lookup for a qualified name finds a declaration
+ // in an inline namespace within the context named by the qualifier:
+ //
+ // inline namespace N { int f(); }
+ // int ::f(); // Sema DC needs adjusting from :: to N::.
+ //
+ // For unqualified declarations, the semantic context *can* change
+ // along the redeclaration chain (for local extern declarations,
+ // extern "C" declarations, and friend declarations in particular).
+ if (!NewD->getQualifier())
+ return;
+
+ // NewD is probably already in the right context.
+ auto *NamedDC = NewD->getDeclContext()->getRedeclContext();
+ auto *SemaDC = OldD->getDeclContext()->getRedeclContext();
+ if (NamedDC->Equals(SemaDC))
+ return;
+
+ assert((NamedDC->InEnclosingNamespaceSetOf(SemaDC) ||
+ NewD->isInvalidDecl() || OldD->isInvalidDecl()) &&
+ "unexpected context for redeclaration");
+
+ auto *LexDC = NewD->getLexicalDeclContext();
+ auto FixSemaDC = [=](NamedDecl *D) {
+ if (!D)
+ return;
+ D->setDeclContext(SemaDC);
+ D->setLexicalDeclContext(LexDC);
+ };
+
+ FixSemaDC(NewD);
+ if (auto *FD = dyn_cast<FunctionDecl>(NewD))
+ FixSemaDC(FD->getDescribedFunctionTemplate());
+ else if (auto *VD = dyn_cast<VarDecl>(NewD))
+ FixSemaDC(VD->getDescribedVarTemplate());
+}
+
/// MergeFunctionDecl - We just parsed a function 'New' from
/// declarator D which has the same name and scope as a previous
/// declaration 'Old'. Figure out how to resolve this situation,
@@ -2971,6 +3020,14 @@ bool Sema::MergeFunctionDecl(FunctionDecl *New, NamedDecl *&OldD,
if (Old->isInvalidDecl())
return true;
+ // Disallow redeclaration of some builtins.
+ if (!getASTContext().canBuiltinBeRedeclared(Old)) {
+ Diag(New->getLocation(), diag::err_builtin_redeclare) << Old->getDeclName();
+ Diag(Old->getLocation(), diag::note_previous_builtin_declaration)
+ << Old << Old->getType();
+ return true;
+ }
+
diag::kind PrevDiag;
SourceLocation OldLocation;
std::tie(PrevDiag, OldLocation) =
@@ -3518,7 +3575,7 @@ bool Sema::MergeFunctionDecl(FunctionDecl *New, NamedDecl *&OldD,
return true;
}
-/// \brief Completes the merge of two function declarations that are
+/// Completes the merge of two function declarations that are
/// known to be compatible.
///
/// This routine handles the merging of attributes and other
@@ -3581,6 +3638,8 @@ void Sema::mergeObjCMethodDecls(ObjCMethodDecl *newMethod,
ni = newMethod->param_begin(), ne = newMethod->param_end();
ni != ne && oi != oe; ++ni, ++oi)
mergeParamDeclAttributes(*ni, *oi, *this);
+
+ CheckObjCMethodOverride(newMethod, oldMethod);
}
static void diagnoseVarDeclTypeMismatch(Sema &S, VarDecl *New, VarDecl* Old) {
@@ -3953,6 +4012,7 @@ void Sema::MergeVarDecl(VarDecl *New, LookupResult &Previous) {
New->setPreviousDecl(Old);
if (NewTemplate)
NewTemplate->setPreviousDecl(OldTemplate);
+ adjustDeclContextForDeclaratorDecl(New, Old);
// Inherit access appropriately.
New->setAccess(Old->getAccess());
@@ -4014,7 +4074,8 @@ void Sema::notePreviousDefinition(const NamedDecl *Old, SourceLocation New) {
}
// Redefinition coming from different files or couldn't do better above.
- Diag(Old->getLocation(), diag::note_previous_definition);
+ if (Old->getLocation().isValid())
+ Diag(Old->getLocation(), diag::note_previous_definition);
}
/// We've just determined that \p Old and \p New both appear to be definitions
@@ -4398,10 +4459,9 @@ Sema::ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
TypeSpecType == DeclSpec::TST_interface ||
TypeSpecType == DeclSpec::TST_union ||
TypeSpecType == DeclSpec::TST_enum) {
- for (AttributeList* attrs = DS.getAttributes().getList(); attrs;
- attrs = attrs->getNext())
- Diag(attrs->getLoc(), diag::warn_declspec_attribute_ignored)
- << attrs->getName() << GetDiagnosticTypeSpecifierID(TypeSpecType);
+ for (const ParsedAttr &AL : DS.getAttributes())
+ Diag(AL.getLoc(), diag::warn_declspec_attribute_ignored)
+ << AL.getName() << GetDiagnosticTypeSpecifierID(TypeSpecType);
}
}
@@ -4593,12 +4653,14 @@ Decl *Sema::BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
unsigned DiagID;
if (Record->isUnion()) {
// C++ [class.union]p6:
+ // C++17 [class.union.anon]p2:
// Anonymous unions declared in a named namespace or in the
// global namespace shall be declared static.
+ DeclContext *OwnerScope = Owner->getRedeclContext();
if (DS.getStorageClassSpec() != DeclSpec::SCS_static &&
- (isa<TranslationUnitDecl>(Owner) ||
- (isa<NamespaceDecl>(Owner) &&
- cast<NamespaceDecl>(Owner)->getDeclName()))) {
+ (OwnerScope->isTranslationUnit() ||
+ (OwnerScope->isNamespace() &&
+ !cast<NamespaceDecl>(OwnerScope)->isAnonymousNamespace()))) {
Diag(Record->getLocation(), diag::err_anonymous_union_not_static)
<< FixItHint::CreateInsertion(Record->getLocation(), "static ");
@@ -4746,7 +4808,7 @@ Decl *Sema::BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
}
// Mock up a declarator.
- Declarator Dc(DS, Declarator::MemberContext);
+ Declarator Dc(DS, DeclaratorContext::MemberContext);
TypeSourceInfo *TInfo = GetTypeForDeclarator(Dc, S);
assert(TInfo && "couldn't build declarator info for anonymous struct/union");
@@ -4843,7 +4905,7 @@ Decl *Sema::BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
assert(Record && "expected a record!");
// Mock up a declarator.
- Declarator Dc(DS, Declarator::TypeNameContext);
+ Declarator Dc(DS, DeclaratorContext::TypeNameContext);
TypeSourceInfo *TInfo = GetTypeForDeclarator(Dc, S);
assert(TInfo && "couldn't build declarator info for anonymous struct");
@@ -4889,7 +4951,7 @@ DeclarationNameInfo Sema::GetNameForDeclarator(Declarator &D) {
return GetNameFromUnqualifiedId(D.getName());
}
-/// \brief Retrieves the declaration name from a parsed unqualified-id.
+/// Retrieves the declaration name from a parsed unqualified-id.
DeclarationNameInfo
Sema::GetNameFromUnqualifiedId(const UnqualifiedId &Name) {
DeclarationNameInfo NameInfo;
@@ -4897,13 +4959,13 @@ Sema::GetNameFromUnqualifiedId(const UnqualifiedId &Name) {
switch (Name.getKind()) {
- case UnqualifiedId::IK_ImplicitSelfParam:
- case UnqualifiedId::IK_Identifier:
+ case UnqualifiedIdKind::IK_ImplicitSelfParam:
+ case UnqualifiedIdKind::IK_Identifier:
NameInfo.setName(Name.Identifier);
NameInfo.setLoc(Name.StartLocation);
return NameInfo;
- case UnqualifiedId::IK_DeductionGuideName: {
+ case UnqualifiedIdKind::IK_DeductionGuideName: {
// C++ [temp.deduct.guide]p3:
// The simple-template-id shall name a class template specialization.
// The template-name shall be the same identifier as the template-name
@@ -4931,7 +4993,7 @@ Sema::GetNameFromUnqualifiedId(const UnqualifiedId &Name) {
return NameInfo;
}
- case UnqualifiedId::IK_OperatorFunctionId:
+ case UnqualifiedIdKind::IK_OperatorFunctionId:
NameInfo.setName(Context.DeclarationNames.getCXXOperatorName(
Name.OperatorFunctionId.Operator));
NameInfo.setLoc(Name.StartLocation);
@@ -4941,14 +5003,14 @@ Sema::GetNameFromUnqualifiedId(const UnqualifiedId &Name) {
= Name.EndLocation.getRawEncoding();
return NameInfo;
- case UnqualifiedId::IK_LiteralOperatorId:
+ case UnqualifiedIdKind::IK_LiteralOperatorId:
NameInfo.setName(Context.DeclarationNames.getCXXLiteralOperatorName(
Name.Identifier));
NameInfo.setLoc(Name.StartLocation);
NameInfo.setCXXLiteralOperatorNameLoc(Name.EndLocation);
return NameInfo;
- case UnqualifiedId::IK_ConversionFunctionId: {
+ case UnqualifiedIdKind::IK_ConversionFunctionId: {
TypeSourceInfo *TInfo;
QualType Ty = GetTypeFromParser(Name.ConversionFunctionId, &TInfo);
if (Ty.isNull())
@@ -4960,7 +5022,7 @@ Sema::GetNameFromUnqualifiedId(const UnqualifiedId &Name) {
return NameInfo;
}
- case UnqualifiedId::IK_ConstructorName: {
+ case UnqualifiedIdKind::IK_ConstructorName: {
TypeSourceInfo *TInfo;
QualType Ty = GetTypeFromParser(Name.ConstructorName, &TInfo);
if (Ty.isNull())
@@ -4972,7 +5034,7 @@ Sema::GetNameFromUnqualifiedId(const UnqualifiedId &Name) {
return NameInfo;
}
- case UnqualifiedId::IK_ConstructorTemplateId: {
+ case UnqualifiedIdKind::IK_ConstructorTemplateId: {
// In well-formed code, we can only have a constructor
// template-id that refers to the current context, so go there
// to find the actual type being constructed.
@@ -4995,7 +5057,7 @@ Sema::GetNameFromUnqualifiedId(const UnqualifiedId &Name) {
return NameInfo;
}
- case UnqualifiedId::IK_DestructorName: {
+ case UnqualifiedIdKind::IK_DestructorName: {
TypeSourceInfo *TInfo;
QualType Ty = GetTypeFromParser(Name.DestructorName, &TInfo);
if (Ty.isNull())
@@ -5007,7 +5069,7 @@ Sema::GetNameFromUnqualifiedId(const UnqualifiedId &Name) {
return NameInfo;
}
- case UnqualifiedId::IK_TemplateId: {
+ case UnqualifiedIdKind::IK_TemplateId: {
TemplateName TName = Name.TemplateId->Template.get();
SourceLocation TNameLoc = Name.TemplateId->TemplateNameLoc;
return Context.getNameForTemplate(TName, TNameLoc);
@@ -5176,7 +5238,7 @@ bool Sema::DiagnoseClassNameShadow(DeclContext *DC,
return false;
}
-/// \brief Diagnose a declaration whose declarator-id has the given
+/// Diagnose a declaration whose declarator-id has the given
/// nested-name-specifier.
///
/// \param SS The nested-name-specifier of the declarator-id.
@@ -5188,10 +5250,13 @@ bool Sema::DiagnoseClassNameShadow(DeclContext *DC,
///
/// \param Loc The location of the name of the entity being declared.
///
+/// \param IsTemplateId Whether the name is a (simple-)template-id, and thus
+/// we're declaring an explicit / partial specialization / instantiation.
+///
/// \returns true if we cannot safely recover from this error, false otherwise.
bool Sema::diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC,
DeclarationName Name,
- SourceLocation Loc) {
+ SourceLocation Loc, bool IsTemplateId) {
DeclContext *Cur = CurContext;
while (isa<LinkageSpecDecl>(Cur) || isa<CapturedDecl>(Cur))
Cur = Cur->getParent();
@@ -5218,8 +5283,9 @@ bool Sema::diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC,
}
// Check whether the qualifying scope encloses the scope of the original
- // declaration.
- if (!Cur->Encloses(DC)) {
+ // declaration. For a template-id, we perform the checks in
+ // CheckTemplateSpecializationScope.
+ if (!Cur->Encloses(DC) && !IsTemplateId) {
if (Cur->isRecord())
Diag(Loc, diag::err_member_qualification)
<< Name << SS.getRange();
@@ -5331,8 +5397,9 @@ NamedDecl *Sema::HandleDeclarator(Scope *S, Declarator &D,
return nullptr;
}
if (!D.getDeclSpec().isFriendSpecified()) {
- if (diagnoseQualifiedDeclaration(D.getCXXScopeSpec(), DC,
- Name, D.getIdentifierLoc())) {
+ if (diagnoseQualifiedDeclaration(
+ D.getCXXScopeSpec(), DC, Name, D.getIdentifierLoc(),
+ D.getName().getKind() == UnqualifiedIdKind::IK_TemplateId)) {
if (DC->isRecord())
return nullptr;
@@ -5609,7 +5676,7 @@ TryToFixInvalidVariablyModifiedTypeSourceInfo(TypeSourceInfo *TInfo,
return FixedTInfo;
}
-/// \brief Register the given locally-scoped extern "C" declaration so
+/// Register the given locally-scoped extern "C" declaration so
/// that it can be found later for redeclarations. We include any extern "C"
/// declaration that is not visible in the translation unit here, not just
/// function-scope declarations.
@@ -5630,7 +5697,7 @@ NamedDecl *Sema::findLocallyScopedExternCDecl(DeclarationName Name) {
return Result.empty() ? nullptr : *Result.begin();
}
-/// \brief Diagnose function specifiers on a declaration of an identifier that
+/// Diagnose function specifiers on a declaration of an identifier that
/// does not identify a function.
void Sema::DiagnoseFunctionSpecifiers(const DeclSpec &DS) {
// FIXME: We should probably indicate the identifier in question to avoid
@@ -5670,8 +5737,8 @@ Sema::ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC,
Diag(D.getDeclSpec().getConstexprSpecLoc(), diag::err_invalid_constexpr)
<< 1;
- if (D.getName().Kind != UnqualifiedId::IK_Identifier) {
- if (D.getName().Kind == UnqualifiedId::IK_DeductionGuideName)
+ if (D.getName().Kind != UnqualifiedIdKind::IK_Identifier) {
+ if (D.getName().Kind == UnqualifiedIdKind::IK_DeductionGuideName)
Diag(D.getName().StartLocation,
diag::err_deduction_guide_invalid_specifier)
<< "typedef";
@@ -5704,7 +5771,7 @@ Sema::CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *NewTD) {
TypeSourceInfo *TInfo = NewTD->getTypeSourceInfo();
QualType T = TInfo->getType();
if (T->isVariablyModifiedType()) {
- getCurFunction()->setHasBranchProtectedScope();
+ setFunctionHasBranchProtectedScope();
if (S->getFnParent() == nullptr) {
bool SizeIsNegative;
@@ -5772,7 +5839,7 @@ Sema::ActOnTypedefNameDecl(Scope *S, DeclContext *DC, TypedefNameDecl *NewTD,
return NewTD;
}
-/// \brief Determines whether the given declaration is an out-of-scope
+/// Determines whether the given declaration is an out-of-scope
/// previous declaration.
///
/// This routine should be invoked when name lookup has found a
@@ -6148,29 +6215,21 @@ static bool shouldConsiderLinkage(const FunctionDecl *FD) {
llvm_unreachable("Unexpected context");
}
-static bool hasParsedAttr(Scope *S, const AttributeList *AttrList,
- AttributeList::Kind Kind) {
- for (const AttributeList *L = AttrList; L; L = L->getNext())
- if (L->getKind() == Kind)
- return true;
- return false;
-}
-
static bool hasParsedAttr(Scope *S, const Declarator &PD,
- AttributeList::Kind Kind) {
+ ParsedAttr::Kind Kind) {
// Check decl attributes on the DeclSpec.
- if (hasParsedAttr(S, PD.getDeclSpec().getAttributes().getList(), Kind))
+ if (PD.getDeclSpec().getAttributes().hasAttribute(Kind))
return true;
// Walk the declarator structure, checking decl attributes that were in a type
// position to the decl itself.
for (unsigned I = 0, E = PD.getNumTypeObjects(); I != E; ++I) {
- if (hasParsedAttr(S, PD.getTypeObject(I).getAttrs(), Kind))
+ if (PD.getTypeObject(I).getAttrs().hasAttribute(Kind))
return true;
}
// Finally, check attributes on the decl itself.
- return hasParsedAttr(S, PD.getAttributes(), Kind);
+ return PD.getAttributes().hasAttribute(Kind);
}
/// Adjust the \c DeclContext for a function or variable that might be a
@@ -6197,7 +6256,7 @@ bool Sema::adjustContextForLocalExternDecl(DeclContext *&DC) {
return true;
}
-/// \brief Returns true if given declaration has external C language linkage.
+/// Returns true if given declaration has external C language linkage.
static bool isDeclExternC(const Decl *D) {
if (const auto *FD = dyn_cast<FunctionDecl>(D))
return FD->isExternC();
@@ -6303,6 +6362,20 @@ NamedDecl *Sema::ActOnVariableDeclarator(
D.setInvalidType();
}
}
+
+ // OpenCL C++ 1.0 s2.9: the thread_local storage qualifier is not
+ // supported. OpenCL C does not support thread_local either, and
+ // also reject all other thread storage class specifiers.
+ DeclSpec::TSCS TSC = D.getDeclSpec().getThreadStorageClassSpec();
+ if (TSC != TSCS_unspecified) {
+ bool IsCXX = getLangOpts().OpenCLCPlusPlus;
+ Diag(D.getDeclSpec().getThreadStorageClassSpecLoc(),
+ diag::err_opencl_unknown_type_specifier)
+ << IsCXX << getLangOpts().getOpenCLVersionTuple().getAsString()
+ << DeclSpec::getSpecifierName(TSC) << 1;
+ D.setInvalidType();
+ return nullptr;
+ }
}
DeclSpec::SCS SCSpec = D.getDeclSpec().getStorageClassSpec();
@@ -6311,8 +6384,8 @@ NamedDecl *Sema::ActOnVariableDeclarator(
// dllimport globals without explicit storage class are treated as extern. We
// have to change the storage class this early to get the right DeclContext.
if (SC == SC_None && !DC->isRecord() &&
- hasParsedAttr(S, D, AttributeList::AT_DLLImport) &&
- !hasParsedAttr(S, D, AttributeList::AT_DLLExport))
+ hasParsedAttr(S, D, ParsedAttr::AT_DLLImport) &&
+ !hasParsedAttr(S, D, ParsedAttr::AT_DLLExport))
SC = SC_Extern;
DeclContext *OriginalDC = DC;
@@ -6425,7 +6498,7 @@ NamedDecl *Sema::ActOnVariableDeclarator(
TemplateParams = MatchTemplateParametersToScopeSpecifier(
D.getDeclSpec().getLocStart(), D.getIdentifierLoc(),
D.getCXXScopeSpec(),
- D.getName().getKind() == UnqualifiedId::IK_TemplateId
+ D.getName().getKind() == UnqualifiedIdKind::IK_TemplateId
? D.getName().TemplateId
: nullptr,
TemplateParamLists,
@@ -6433,7 +6506,7 @@ NamedDecl *Sema::ActOnVariableDeclarator(
if (TemplateParams) {
if (!TemplateParams->size() &&
- D.getName().getKind() != UnqualifiedId::IK_TemplateId) {
+ D.getName().getKind() != UnqualifiedIdKind::IK_TemplateId) {
// There is an extraneous 'template<>' for this variable. Complain
// about it, but allow the declaration of the variable.
Diag(TemplateParams->getTemplateLoc(),
@@ -6443,7 +6516,7 @@ NamedDecl *Sema::ActOnVariableDeclarator(
TemplateParams->getRAngleLoc());
TemplateParams = nullptr;
} else {
- if (D.getName().getKind() == UnqualifiedId::IK_TemplateId) {
+ if (D.getName().getKind() == UnqualifiedIdKind::IK_TemplateId) {
// This is an explicit specialization or a partial specialization.
// FIXME: Check that we can declare a specialization here.
IsVariableTemplateSpecialization = true;
@@ -6464,9 +6537,9 @@ NamedDecl *Sema::ActOnVariableDeclarator(
}
}
} else {
- assert(
- (Invalid || D.getName().getKind() != UnqualifiedId::IK_TemplateId) &&
- "should have a 'template<>' for this decl");
+ assert((Invalid ||
+ D.getName().getKind() != UnqualifiedIdKind::IK_TemplateId) &&
+ "should have a 'template<>' for this decl");
}
if (IsVariableTemplateSpecialization) {
@@ -6842,9 +6915,9 @@ NamedDecl *Sema::ActOnVariableDeclarator(
}
if (D.isRedeclaration() && !Previous.empty()) {
- checkDLLAttributeRedeclaration(
- *this, dyn_cast<NamedDecl>(Previous.getRepresentativeDecl()), NewVD,
- IsMemberSpecialization, D.isFunctionDefinition());
+ NamedDecl *Prev = Previous.getRepresentativeDecl();
+ checkDLLAttributeRedeclaration(*this, Prev, NewVD, IsMemberSpecialization,
+ D.isFunctionDefinition());
}
if (NewTemplate) {
@@ -6887,7 +6960,7 @@ static ShadowedDeclKind computeShadowedDeclKind(const NamedDecl *ShadowedDecl,
/// variable \p VD, or an invalid source location otherwise.
static SourceLocation getCaptureLocation(const LambdaScopeInfo *LSI,
const VarDecl *VD) {
- for (const LambdaScopeInfo::Capture &Capture : LSI->Captures) {
+ for (const Capture &Capture : LSI->Captures) {
if (Capture.isVariableCapture() && Capture.getVariable() == VD)
return Capture.getLocation();
}
@@ -6904,7 +6977,7 @@ static bool shouldWarnIfShadowedDecl(const DiagnosticsEngine &Diags,
return !Diags.isIgnored(diag::warn_decl_shadow, R.getNameLoc());
}
-/// \brief Return the declaration shadowed by the given variable \p D, or null
+/// Return the declaration shadowed by the given variable \p D, or null
/// if it doesn't shadow any declaration or shadowing warnings are disabled.
NamedDecl *Sema::getShadowedDeclaration(const VarDecl *D,
const LookupResult &R) {
@@ -6921,14 +6994,14 @@ NamedDecl *Sema::getShadowedDeclaration(const VarDecl *D,
: nullptr;
}
-/// \brief Return the declaration shadowed by the given typedef \p D, or null
+/// Return the declaration shadowed by the given typedef \p D, or null
/// if it doesn't shadow any declaration or shadowing warnings are disabled.
NamedDecl *Sema::getShadowedDeclaration(const TypedefNameDecl *D,
const LookupResult &R) {
// Don't warn if typedef declaration is part of a class
if (D->getDeclContext()->isRecord())
return nullptr;
-
+
if (!shouldWarnIfShadowedDecl(Diags, R))
return nullptr;
@@ -6936,7 +7009,7 @@ NamedDecl *Sema::getShadowedDeclaration(const TypedefNameDecl *D,
return isa<TypedefNameDecl>(ShadowedDecl) ? ShadowedDecl : nullptr;
}
-/// \brief Diagnose variable or built-in function shadowing. Implements
+/// Diagnose variable or built-in function shadowing. Implements
/// -Wshadow.
///
/// This method is called whenever a VarDecl is added to a "useful"
@@ -7067,7 +7140,7 @@ void Sema::DiagnoseShadowingLambdaDecls(const LambdaScopeInfo *LSI) {
}
}
-/// \brief Check -Wshadow without the advantage of a previous lookup.
+/// Check -Wshadow without the advantage of a previous lookup.
void Sema::CheckShadow(Scope *S, VarDecl *D) {
if (Diags.isIgnored(diag::warn_decl_shadow, D->getLocation()))
return;
@@ -7225,8 +7298,7 @@ void Sema::CheckVariableDeclarationType(VarDecl *NewVD) {
if (NewVD->isInvalidDecl())
return;
- TypeSourceInfo *TInfo = NewVD->getTypeSourceInfo();
- QualType T = TInfo->getType();
+ QualType T = NewVD->getType();
// Defer checking an 'auto' type until its initializer is attached.
if (T->isUndeducedType())
@@ -7364,16 +7436,24 @@ void Sema::CheckVariableDeclarationType(VarDecl *NewVD) {
bool isVM = T->isVariablyModifiedType();
if (isVM || NewVD->hasAttr<CleanupAttr>() ||
NewVD->hasAttr<BlocksAttr>())
- getCurFunction()->setHasBranchProtectedScope();
+ setFunctionHasBranchProtectedScope();
if ((isVM && NewVD->hasLinkage()) ||
(T->isVariableArrayType() && NewVD->hasGlobalStorage())) {
bool SizeIsNegative;
llvm::APSInt Oversized;
- TypeSourceInfo *FixedTInfo =
- TryToFixInvalidVariablyModifiedTypeSourceInfo(TInfo, Context,
- SizeIsNegative, Oversized);
- if (!FixedTInfo && T->isVariableArrayType()) {
+ TypeSourceInfo *FixedTInfo = TryToFixInvalidVariablyModifiedTypeSourceInfo(
+ NewVD->getTypeSourceInfo(), Context, SizeIsNegative, Oversized);
+ QualType FixedT;
+ if (FixedTInfo && T == NewVD->getTypeSourceInfo()->getType())
+ FixedT = FixedTInfo->getType();
+ else if (FixedTInfo) {
+ // Type and type-as-written are canonically different. We need to fix up
+ // both types separately.
+ FixedT = TryToFixInvalidVariablyModifiedType(T, Context, SizeIsNegative,
+ Oversized);
+ }
+ if ((!FixedTInfo || FixedT.isNull()) && T->isVariableArrayType()) {
const VariableArrayType *VAT = Context.getAsVariableArrayType(T);
// FIXME: This won't give the correct result for
// int a[10][n];
@@ -7402,7 +7482,7 @@ void Sema::CheckVariableDeclarationType(VarDecl *NewVD) {
}
Diag(NewVD->getLocation(), diag::warn_illegal_constant_array_size);
- NewVD->setType(FixedTInfo->getType());
+ NewVD->setType(FixedT);
NewVD->setTypeSourceInfo(FixedTInfo);
}
@@ -7437,7 +7517,7 @@ void Sema::CheckVariableDeclarationType(VarDecl *NewVD) {
}
}
-/// \brief Perform semantic checking on a newly-created variable
+/// Perform semantic checking on a newly-created variable
/// declaration.
///
/// This routine performs all of the type-checking required for a
@@ -7508,8 +7588,8 @@ struct FindOverriddenMethod {
enum OverrideErrorKind { OEK_All, OEK_NonDeleted, OEK_Deleted };
} // end anonymous namespace
-/// \brief Report an error regarding overriding, along with any relevant
-/// overriden methods.
+/// Report an error regarding overriding, along with any relevant
+/// overridden methods.
///
/// \param DiagID the primary error to report.
/// \param MD the overriding method.
@@ -7624,7 +7704,7 @@ void Sema::MarkTypoCorrectedFunctionDefinition(const NamedDecl *F) {
TypoCorrectedFunctionDefinitions.insert(F);
}
-/// \brief Generate diagnostics for an invalid function redeclaration.
+/// Generate diagnostics for an invalid function redeclaration.
///
/// This routine handles generating the diagnostic messages for an invalid
/// function redeclaration, including finding possible similar declarations
@@ -8176,7 +8256,7 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
bool &AddToScope) {
QualType R = TInfo->getType();
- assert(R.getTypePtr()->isFunctionType());
+ assert(R->isFunctionType());
// TODO: consider using NameInfo for diagnostic.
DeclarationNameInfo NameInfo = GetNameForDeclarator(D);
@@ -8261,7 +8341,7 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
MatchTemplateParametersToScopeSpecifier(
D.getDeclSpec().getLocStart(), D.getIdentifierLoc(),
D.getCXXScopeSpec(),
- D.getName().getKind() == UnqualifiedId::IK_TemplateId
+ D.getName().getKind() == UnqualifiedIdKind::IK_TemplateId
? D.getName().TemplateId
: nullptr,
TemplateParamLists, isFriend, isMemberSpecialization,
@@ -8318,7 +8398,7 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
// and clearly the user wants a template specialization. So
// we need to insert '<>' after the name.
SourceLocation InsertLoc;
- if (D.getName().getKind() != UnqualifiedId::IK_TemplateId) {
+ if (D.getName().getKind() != UnqualifiedIdKind::IK_TemplateId) {
InsertLoc = D.getName().getSourceRange().getEnd();
InsertLoc = getLocForEndOfToken(InsertLoc);
}
@@ -8655,6 +8735,15 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
NewFD->dropAttr<SectionAttr>();
}
+ // Apply an implicit CodeSegAttr from class declspec or
+ // apply an implicit SectionAttr from #pragma code_seg if active.
+ if (!NewFD->hasAttr<CodeSegAttr>()) {
+ if (Attr *SAttr = getImplicitCodeSegOrSectionAttrForFunction(NewFD,
+ D.isFunctionDefinition())) {
+ NewFD->addAttr(SAttr);
+ }
+ }
+
// Handle attributes.
ProcessDeclAttributes(S, NewFD, D);
@@ -8719,7 +8808,7 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
// If the declarator is a template-id, translate the parser's template
// argument list into our AST format.
- if (D.getName().getKind() == UnqualifiedId::IK_TemplateId) {
+ if (D.getName().getKind() == UnqualifiedIdKind::IK_TemplateId) {
TemplateIdAnnotation *TemplateId = D.getName().TemplateId;
TemplateArgs.setLAngleLoc(TemplateId->LAngleLoc);
TemplateArgs.setRAngleLoc(TemplateId->RAngleLoc);
@@ -8785,10 +8874,6 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
if (CurContext->isDependentContext() && CurContext->isRecord()
&& !isFriend) {
isDependentClassScopeExplicitSpecialization = true;
- Diag(NewFD->getLocation(), getLangOpts().MicrosoftExt ?
- diag::ext_function_specialization_in_class :
- diag::err_function_specialization_in_class)
- << NewFD->getDeclName();
} else if (!NewFD->isInvalidDecl() &&
CheckFunctionTemplateSpecialization(
NewFD, (HasExplicitTemplateArgs ? &TemplateArgs : nullptr),
@@ -8994,19 +9079,22 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
NewFD->setRangeEnd(D.getSourceRange().getEnd());
if (D.isRedeclaration() && !Previous.empty()) {
- checkDLLAttributeRedeclaration(
- *this, dyn_cast<NamedDecl>(Previous.getRepresentativeDecl()), NewFD,
- isMemberSpecialization || isFunctionTemplateSpecialization,
- D.isFunctionDefinition());
+ NamedDecl *Prev = Previous.getRepresentativeDecl();
+ checkDLLAttributeRedeclaration(*this, Prev, NewFD,
+ isMemberSpecialization ||
+ isFunctionTemplateSpecialization,
+ D.isFunctionDefinition());
}
if (getLangOpts().CUDA) {
IdentifierInfo *II = NewFD->getIdentifier();
- if (II && II->isStr("cudaConfigureCall") && !NewFD->isInvalidDecl() &&
+ if (II &&
+ II->isStr(getLangOpts().HIP ? "hipConfigureCall"
+ : "cudaConfigureCall") &&
+ !NewFD->isInvalidDecl() &&
NewFD->getDeclContext()->getRedeclContext()->isTranslationUnit()) {
if (!R->getAs<FunctionType>()->getReturnType()->isScalarType())
Diag(NewFD->getLocation(), diag::err_config_scalar_return);
-
Context.setcudaConfigureCallDecl(NewFD);
}
@@ -9073,22 +9161,95 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
}
// Here we have an function template explicit specialization at class scope.
- // The actually specialization will be postponed to template instatiation
+ // The actual specialization will be postponed to template instatiation
// time via the ClassScopeFunctionSpecializationDecl node.
if (isDependentClassScopeExplicitSpecialization) {
ClassScopeFunctionSpecializationDecl *NewSpec =
ClassScopeFunctionSpecializationDecl::Create(
- Context, CurContext, SourceLocation(),
+ Context, CurContext, NewFD->getLocation(),
cast<CXXMethodDecl>(NewFD),
HasExplicitTemplateArgs, TemplateArgs);
CurContext->addDecl(NewSpec);
AddToScope = false;
}
+ // Diagnose availability attributes. Availability cannot be used on functions
+ // that are run during load/unload.
+ if (const auto *attr = NewFD->getAttr<AvailabilityAttr>()) {
+ if (NewFD->hasAttr<ConstructorAttr>()) {
+ Diag(attr->getLocation(), diag::warn_availability_on_static_initializer)
+ << 1;
+ NewFD->dropAttr<AvailabilityAttr>();
+ }
+ if (NewFD->hasAttr<DestructorAttr>()) {
+ Diag(attr->getLocation(), diag::warn_availability_on_static_initializer)
+ << 2;
+ NewFD->dropAttr<AvailabilityAttr>();
+ }
+ }
+
return NewFD;
}
-/// \brief Checks if the new declaration declared in dependent context must be
+/// Return a CodeSegAttr from a containing class. The Microsoft docs say
+/// when __declspec(code_seg) "is applied to a class, all member functions of
+/// the class and nested classes -- this includes compiler-generated special
+/// member functions -- are put in the specified segment."
+/// The actual behavior is a little more complicated. The Microsoft compiler
+/// won't check outer classes if there is an active value from #pragma code_seg.
+/// The CodeSeg is always applied from the direct parent but only from outer
+/// classes when the #pragma code_seg stack is empty. See:
+/// https://reviews.llvm.org/D22931, the Microsoft feedback page is no longer
+/// available since MS has removed the page.
+static Attr *getImplicitCodeSegAttrFromClass(Sema &S, const FunctionDecl *FD) {
+ const auto *Method = dyn_cast<CXXMethodDecl>(FD);
+ if (!Method)
+ return nullptr;
+ const CXXRecordDecl *Parent = Method->getParent();
+ if (const auto *SAttr = Parent->getAttr<CodeSegAttr>()) {
+ Attr *NewAttr = SAttr->clone(S.getASTContext());
+ NewAttr->setImplicit(true);
+ return NewAttr;
+ }
+
+ // The Microsoft compiler won't check outer classes for the CodeSeg
+ // when the #pragma code_seg stack is active.
+ if (S.CodeSegStack.CurrentValue)
+ return nullptr;
+
+ while ((Parent = dyn_cast<CXXRecordDecl>(Parent->getParent()))) {
+ if (const auto *SAttr = Parent->getAttr<CodeSegAttr>()) {
+ Attr *NewAttr = SAttr->clone(S.getASTContext());
+ NewAttr->setImplicit(true);
+ return NewAttr;
+ }
+ }
+ return nullptr;
+}
+
+/// Returns an implicit CodeSegAttr if a __declspec(code_seg) is found on a
+/// containing class. Otherwise it will return implicit SectionAttr if the
+/// function is a definition and there is an active value on CodeSegStack
+/// (from the current #pragma code-seg value).
+///
+/// \param FD Function being declared.
+/// \param IsDefinition Whether it is a definition or just a declarartion.
+/// \returns A CodeSegAttr or SectionAttr to apply to the function or
+/// nullptr if no attribute should be added.
+Attr *Sema::getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD,
+ bool IsDefinition) {
+ if (Attr *A = getImplicitCodeSegAttrFromClass(*this, FD))
+ return A;
+ if (!FD->hasAttr<SectionAttr>() && IsDefinition &&
+ CodeSegStack.CurrentValue) {
+ return SectionAttr::CreateImplicit(getASTContext(),
+ SectionAttr::Declspec_allocate,
+ CodeSegStack.CurrentValue->getString(),
+ CodeSegStack.CurrentPragmaLocation);
+ }
+ return nullptr;
+}
+/// Checks if the new declaration declared in dependent context must be
/// put in the same redeclaration chain as the specified declaration.
///
/// \param D Declaration that is checked.
@@ -9114,7 +9275,524 @@ bool Sema::shouldLinkDependentDeclWithPrevious(Decl *D, Decl *PrevDecl) {
D->getFriendObjectKind() != Decl::FOK_None);
}
-/// \brief Perform semantic checking of a new function declaration.
+namespace MultiVersioning {
+enum Type { None, Target, CPUSpecific, CPUDispatch};
+} // MultiVersionType
+
+static MultiVersioning::Type
+getMultiVersionType(const FunctionDecl *FD) {
+ if (FD->hasAttr<TargetAttr>())
+ return MultiVersioning::Target;
+ if (FD->hasAttr<CPUDispatchAttr>())
+ return MultiVersioning::CPUDispatch;
+ if (FD->hasAttr<CPUSpecificAttr>())
+ return MultiVersioning::CPUSpecific;
+ return MultiVersioning::None;
+}
+/// Check the target attribute of the function for MultiVersion
+/// validity.
+///
+/// Returns true if there was an error, false otherwise.
+static bool CheckMultiVersionValue(Sema &S, const FunctionDecl *FD) {
+ const auto *TA = FD->getAttr<TargetAttr>();
+ assert(TA && "MultiVersion Candidate requires a target attribute");
+ TargetAttr::ParsedTargetAttr ParseInfo = TA->parse();
+ const TargetInfo &TargetInfo = S.Context.getTargetInfo();
+ enum ErrType { Feature = 0, Architecture = 1 };
+
+ if (!ParseInfo.Architecture.empty() &&
+ !TargetInfo.validateCpuIs(ParseInfo.Architecture)) {
+ S.Diag(FD->getLocation(), diag::err_bad_multiversion_option)
+ << Architecture << ParseInfo.Architecture;
+ return true;
+ }
+
+ for (const auto &Feat : ParseInfo.Features) {
+ auto BareFeat = StringRef{Feat}.substr(1);
+ if (Feat[0] == '-') {
+ S.Diag(FD->getLocation(), diag::err_bad_multiversion_option)
+ << Feature << ("no-" + BareFeat).str();
+ return true;
+ }
+
+ if (!TargetInfo.validateCpuSupports(BareFeat) ||
+ !TargetInfo.isValidFeatureName(BareFeat)) {
+ S.Diag(FD->getLocation(), diag::err_bad_multiversion_option)
+ << Feature << BareFeat;
+ return true;
+ }
+ }
+ return false;
+}
+
+static bool CheckMultiVersionAdditionalRules(Sema &S, const FunctionDecl *OldFD,
+ const FunctionDecl *NewFD,
+ bool CausesMV,
+ MultiVersioning::Type MVType) {
+ enum DoesntSupport {
+ FuncTemplates = 0,
+ VirtFuncs = 1,
+ DeducedReturn = 2,
+ Constructors = 3,
+ Destructors = 4,
+ DeletedFuncs = 5,
+ DefaultedFuncs = 6,
+ ConstexprFuncs = 7,
+ };
+ enum Different {
+ CallingConv = 0,
+ ReturnType = 1,
+ ConstexprSpec = 2,
+ InlineSpec = 3,
+ StorageClass = 4,
+ Linkage = 5
+ };
+
+ bool IsCPUSpecificCPUDispatchMVType =
+ MVType == MultiVersioning::CPUDispatch ||
+ MVType == MultiVersioning::CPUSpecific;
+
+ if (OldFD && !OldFD->getType()->getAs<FunctionProtoType>()) {
+ S.Diag(OldFD->getLocation(), diag::err_multiversion_noproto);
+ S.Diag(NewFD->getLocation(), diag::note_multiversioning_caused_here);
+ return true;
+ }
+
+ if (!NewFD->getType()->getAs<FunctionProtoType>())
+ return S.Diag(NewFD->getLocation(), diag::err_multiversion_noproto);
+
+ if (!S.getASTContext().getTargetInfo().supportsMultiVersioning()) {
+ S.Diag(NewFD->getLocation(), diag::err_multiversion_not_supported);
+ if (OldFD)
+ S.Diag(OldFD->getLocation(), diag::note_previous_declaration);
+ return true;
+ }
+
+ // For now, disallow all other attributes. These should be opt-in, but
+ // an analysis of all of them is a future FIXME.
+ if (CausesMV && OldFD &&
+ std::distance(OldFD->attr_begin(), OldFD->attr_end()) != 1) {
+ S.Diag(OldFD->getLocation(), diag::err_multiversion_no_other_attrs)
+ << IsCPUSpecificCPUDispatchMVType;
+ S.Diag(NewFD->getLocation(), diag::note_multiversioning_caused_here);
+ return true;
+ }
+
+ if (std::distance(NewFD->attr_begin(), NewFD->attr_end()) != 1)
+ return S.Diag(NewFD->getLocation(), diag::err_multiversion_no_other_attrs)
+ << IsCPUSpecificCPUDispatchMVType;
+
+ if (NewFD->getTemplatedKind() == FunctionDecl::TK_FunctionTemplate)
+ return S.Diag(NewFD->getLocation(), diag::err_multiversion_doesnt_support)
+ << IsCPUSpecificCPUDispatchMVType << FuncTemplates;
+
+ if (const auto *NewCXXFD = dyn_cast<CXXMethodDecl>(NewFD)) {
+ if (NewCXXFD->isVirtual())
+ return S.Diag(NewCXXFD->getLocation(),
+ diag::err_multiversion_doesnt_support)
+ << IsCPUSpecificCPUDispatchMVType << VirtFuncs;
+
+ if (const auto *NewCXXCtor = dyn_cast<CXXConstructorDecl>(NewFD))
+ return S.Diag(NewCXXCtor->getLocation(),
+ diag::err_multiversion_doesnt_support)
+ << IsCPUSpecificCPUDispatchMVType << Constructors;
+
+ if (const auto *NewCXXDtor = dyn_cast<CXXDestructorDecl>(NewFD))
+ return S.Diag(NewCXXDtor->getLocation(),
+ diag::err_multiversion_doesnt_support)
+ << IsCPUSpecificCPUDispatchMVType << Destructors;
+ }
+
+ if (NewFD->isDeleted())
+ return S.Diag(NewFD->getLocation(), diag::err_multiversion_doesnt_support)
+ << IsCPUSpecificCPUDispatchMVType << DeletedFuncs;
+
+ if (NewFD->isDefaulted())
+ return S.Diag(NewFD->getLocation(), diag::err_multiversion_doesnt_support)
+ << IsCPUSpecificCPUDispatchMVType << DefaultedFuncs;
+
+ if (NewFD->isConstexpr() && (MVType == MultiVersioning::CPUDispatch ||
+ MVType == MultiVersioning::CPUSpecific))
+ return S.Diag(NewFD->getLocation(), diag::err_multiversion_doesnt_support)
+ << IsCPUSpecificCPUDispatchMVType << ConstexprFuncs;
+
+ QualType NewQType = S.getASTContext().getCanonicalType(NewFD->getType());
+ const auto *NewType = cast<FunctionType>(NewQType);
+ QualType NewReturnType = NewType->getReturnType();
+
+ if (NewReturnType->isUndeducedType())
+ return S.Diag(NewFD->getLocation(), diag::err_multiversion_doesnt_support)
+ << IsCPUSpecificCPUDispatchMVType << DeducedReturn;
+
+ // Only allow transition to MultiVersion if it hasn't been used.
+ if (OldFD && CausesMV && OldFD->isUsed(false))
+ return S.Diag(NewFD->getLocation(), diag::err_multiversion_after_used);
+
+ // Ensure the return type is identical.
+ if (OldFD) {
+ QualType OldQType = S.getASTContext().getCanonicalType(OldFD->getType());
+ const auto *OldType = cast<FunctionType>(OldQType);
+ FunctionType::ExtInfo OldTypeInfo = OldType->getExtInfo();
+ FunctionType::ExtInfo NewTypeInfo = NewType->getExtInfo();
+
+ if (OldTypeInfo.getCC() != NewTypeInfo.getCC())
+ return S.Diag(NewFD->getLocation(), diag::err_multiversion_diff)
+ << CallingConv;
+
+ QualType OldReturnType = OldType->getReturnType();
+
+ if (OldReturnType != NewReturnType)
+ return S.Diag(NewFD->getLocation(), diag::err_multiversion_diff)
+ << ReturnType;
+
+ if (OldFD->isConstexpr() != NewFD->isConstexpr())
+ return S.Diag(NewFD->getLocation(), diag::err_multiversion_diff)
+ << ConstexprSpec;
+
+ if (OldFD->isInlineSpecified() != NewFD->isInlineSpecified())
+ return S.Diag(NewFD->getLocation(), diag::err_multiversion_diff)
+ << InlineSpec;
+
+ if (OldFD->getStorageClass() != NewFD->getStorageClass())
+ return S.Diag(NewFD->getLocation(), diag::err_multiversion_diff)
+ << StorageClass;
+
+ if (OldFD->isExternC() != NewFD->isExternC())
+ return S.Diag(NewFD->getLocation(), diag::err_multiversion_diff)
+ << Linkage;
+
+ if (S.CheckEquivalentExceptionSpec(
+ OldFD->getType()->getAs<FunctionProtoType>(), OldFD->getLocation(),
+ NewFD->getType()->getAs<FunctionProtoType>(), NewFD->getLocation()))
+ return true;
+ }
+ return false;
+}
+
+/// Check the validity of a multiversion function declaration that is the
+/// first of its kind. Also sets the multiversion'ness' of the function itself.
+///
+/// This sets NewFD->isInvalidDecl() to true if there was an error.
+///
+/// Returns true if there was an error, false otherwise.
+static bool CheckMultiVersionFirstFunction(Sema &S, FunctionDecl *FD,
+ MultiVersioning::Type MVType,
+ const TargetAttr *TA,
+ const CPUDispatchAttr *CPUDisp,
+ const CPUSpecificAttr *CPUSpec) {
+ assert(MVType != MultiVersioning::None &&
+ "Function lacks multiversion attribute");
+
+ // Target only causes MV if it is default, otherwise this is a normal
+ // function.
+ if (MVType == MultiVersioning::Target && !TA->isDefaultVersion())
+ return false;
+
+ if (MVType == MultiVersioning::Target && CheckMultiVersionValue(S, FD)) {
+ FD->setInvalidDecl();
+ return true;
+ }
+
+ if (CheckMultiVersionAdditionalRules(S, nullptr, FD, true, MVType)) {
+ FD->setInvalidDecl();
+ return true;
+ }
+
+ FD->setIsMultiVersion();
+ return false;
+}
+
+static bool CheckTargetCausesMultiVersioning(
+ Sema &S, FunctionDecl *OldFD, FunctionDecl *NewFD, const TargetAttr *NewTA,
+ bool &Redeclaration, NamedDecl *&OldDecl, bool &MergeTypeWithPrevious,
+ LookupResult &Previous) {
+ const auto *OldTA = OldFD->getAttr<TargetAttr>();
+ TargetAttr::ParsedTargetAttr NewParsed = NewTA->parse();
+ // Sort order doesn't matter, it just needs to be consistent.
+ llvm::sort(NewParsed.Features.begin(), NewParsed.Features.end());
+
+ // If the old decl is NOT MultiVersioned yet, and we don't cause that
+ // to change, this is a simple redeclaration.
+ if (!OldTA || OldTA->getFeaturesStr() == NewTA->getFeaturesStr())
+ return false;
+
+ // Otherwise, this decl causes MultiVersioning.
+ if (!S.getASTContext().getTargetInfo().supportsMultiVersioning()) {
+ S.Diag(NewFD->getLocation(), diag::err_multiversion_not_supported);
+ S.Diag(OldFD->getLocation(), diag::note_previous_declaration);
+ NewFD->setInvalidDecl();
+ return true;
+ }
+
+ if (CheckMultiVersionAdditionalRules(S, OldFD, NewFD, true,
+ MultiVersioning::Target)) {
+ NewFD->setInvalidDecl();
+ return true;
+ }
+
+ if (CheckMultiVersionValue(S, NewFD)) {
+ NewFD->setInvalidDecl();
+ return true;
+ }
+
+ if (CheckMultiVersionValue(S, OldFD)) {
+ S.Diag(NewFD->getLocation(), diag::note_multiversioning_caused_here);
+ NewFD->setInvalidDecl();
+ return true;
+ }
+
+ TargetAttr::ParsedTargetAttr OldParsed =
+ OldTA->parse(std::less<std::string>());
+
+ if (OldParsed == NewParsed) {
+ S.Diag(NewFD->getLocation(), diag::err_multiversion_duplicate);
+ S.Diag(OldFD->getLocation(), diag::note_previous_declaration);
+ NewFD->setInvalidDecl();
+ return true;
+ }
+
+ for (const auto *FD : OldFD->redecls()) {
+ const auto *CurTA = FD->getAttr<TargetAttr>();
+ if (!CurTA || CurTA->isInherited()) {
+ S.Diag(FD->getLocation(), diag::err_multiversion_required_in_redecl)
+ << 0;
+ S.Diag(NewFD->getLocation(), diag::note_multiversioning_caused_here);
+ NewFD->setInvalidDecl();
+ return true;
+ }
+ }
+
+ OldFD->setIsMultiVersion();
+ NewFD->setIsMultiVersion();
+ Redeclaration = false;
+ MergeTypeWithPrevious = false;
+ OldDecl = nullptr;
+ Previous.clear();
+ return false;
+}
+
+/// Check the validity of a new function declaration being added to an existing
+/// multiversioned declaration collection.
+static bool CheckMultiVersionAdditionalDecl(
+ Sema &S, FunctionDecl *OldFD, FunctionDecl *NewFD,
+ MultiVersioning::Type NewMVType, const TargetAttr *NewTA,
+ const CPUDispatchAttr *NewCPUDisp, const CPUSpecificAttr *NewCPUSpec,
+ bool &Redeclaration, NamedDecl *&OldDecl, bool &MergeTypeWithPrevious,
+ LookupResult &Previous) {
+
+ MultiVersioning::Type OldMVType = getMultiVersionType(OldFD);
+ // Disallow mixing of multiversioning types.
+ if ((OldMVType == MultiVersioning::Target &&
+ NewMVType != MultiVersioning::Target) ||
+ (NewMVType == MultiVersioning::Target &&
+ OldMVType != MultiVersioning::Target)) {
+ S.Diag(NewFD->getLocation(), diag::err_multiversion_types_mixed);
+ S.Diag(OldFD->getLocation(), diag::note_previous_declaration);
+ NewFD->setInvalidDecl();
+ return true;
+ }
+
+ TargetAttr::ParsedTargetAttr NewParsed;
+ if (NewTA) {
+ NewParsed = NewTA->parse();
+ llvm::sort(NewParsed.Features.begin(), NewParsed.Features.end());
+ }
+
+ bool UseMemberUsingDeclRules =
+ S.CurContext->isRecord() && !NewFD->getFriendObjectKind();
+
+ // Next, check ALL non-overloads to see if this is a redeclaration of a
+ // previous member of the MultiVersion set.
+ for (NamedDecl *ND : Previous) {
+ FunctionDecl *CurFD = ND->getAsFunction();
+ if (!CurFD)
+ continue;
+ if (S.IsOverload(NewFD, CurFD, UseMemberUsingDeclRules))
+ continue;
+
+ if (NewMVType == MultiVersioning::Target) {
+ const auto *CurTA = CurFD->getAttr<TargetAttr>();
+ if (CurTA->getFeaturesStr() == NewTA->getFeaturesStr()) {
+ NewFD->setIsMultiVersion();
+ Redeclaration = true;
+ OldDecl = ND;
+ return false;
+ }
+
+ TargetAttr::ParsedTargetAttr CurParsed =
+ CurTA->parse(std::less<std::string>());
+ if (CurParsed == NewParsed) {
+ S.Diag(NewFD->getLocation(), diag::err_multiversion_duplicate);
+ S.Diag(CurFD->getLocation(), diag::note_previous_declaration);
+ NewFD->setInvalidDecl();
+ return true;
+ }
+ } else {
+ const auto *CurCPUSpec = CurFD->getAttr<CPUSpecificAttr>();
+ const auto *CurCPUDisp = CurFD->getAttr<CPUDispatchAttr>();
+ // Handle CPUDispatch/CPUSpecific versions.
+ // Only 1 CPUDispatch function is allowed, this will make it go through
+ // the redeclaration errors.
+ if (NewMVType == MultiVersioning::CPUDispatch &&
+ CurFD->hasAttr<CPUDispatchAttr>()) {
+ if (CurCPUDisp->cpus_size() == NewCPUDisp->cpus_size() &&
+ std::equal(
+ CurCPUDisp->cpus_begin(), CurCPUDisp->cpus_end(),
+ NewCPUDisp->cpus_begin(),
+ [](const IdentifierInfo *Cur, const IdentifierInfo *New) {
+ return Cur->getName() == New->getName();
+ })) {
+ NewFD->setIsMultiVersion();
+ Redeclaration = true;
+ OldDecl = ND;
+ return false;
+ }
+
+ // If the declarations don't match, this is an error condition.
+ S.Diag(NewFD->getLocation(), diag::err_cpu_dispatch_mismatch);
+ S.Diag(CurFD->getLocation(), diag::note_previous_declaration);
+ NewFD->setInvalidDecl();
+ return true;
+ }
+ if (NewMVType == MultiVersioning::CPUSpecific && CurCPUSpec) {
+
+ if (CurCPUSpec->cpus_size() == NewCPUSpec->cpus_size() &&
+ std::equal(
+ CurCPUSpec->cpus_begin(), CurCPUSpec->cpus_end(),
+ NewCPUSpec->cpus_begin(),
+ [](const IdentifierInfo *Cur, const IdentifierInfo *New) {
+ return Cur->getName() == New->getName();
+ })) {
+ NewFD->setIsMultiVersion();
+ Redeclaration = true;
+ OldDecl = ND;
+ return false;
+ }
+
+ // Only 1 version of CPUSpecific is allowed for each CPU.
+ for (const IdentifierInfo *CurII : CurCPUSpec->cpus()) {
+ for (const IdentifierInfo *NewII : NewCPUSpec->cpus()) {
+ if (CurII == NewII) {
+ S.Diag(NewFD->getLocation(), diag::err_cpu_specific_multiple_defs)
+ << NewII;
+ S.Diag(CurFD->getLocation(), diag::note_previous_declaration);
+ NewFD->setInvalidDecl();
+ return true;
+ }
+ }
+ }
+ }
+ // If the two decls aren't the same MVType, there is no possible error
+ // condition.
+ }
+ }
+
+ // Else, this is simply a non-redecl case. Checking the 'value' is only
+ // necessary in the Target case, since The CPUSpecific/Dispatch cases are
+ // handled in the attribute adding step.
+ if (NewMVType == MultiVersioning::Target &&
+ CheckMultiVersionValue(S, NewFD)) {
+ NewFD->setInvalidDecl();
+ return true;
+ }
+
+ if (CheckMultiVersionAdditionalRules(S, OldFD, NewFD, false, NewMVType)) {
+ NewFD->setInvalidDecl();
+ return true;
+ }
+
+ NewFD->setIsMultiVersion();
+ Redeclaration = false;
+ MergeTypeWithPrevious = false;
+ OldDecl = nullptr;
+ Previous.clear();
+ return false;
+}
+
+
+/// Check the validity of a mulitversion function declaration.
+/// Also sets the multiversion'ness' of the function itself.
+///
+/// This sets NewFD->isInvalidDecl() to true if there was an error.
+///
+/// Returns true if there was an error, false otherwise.
+static bool CheckMultiVersionFunction(Sema &S, FunctionDecl *NewFD,
+ bool &Redeclaration, NamedDecl *&OldDecl,
+ bool &MergeTypeWithPrevious,
+ LookupResult &Previous) {
+ const auto *NewTA = NewFD->getAttr<TargetAttr>();
+ const auto *NewCPUDisp = NewFD->getAttr<CPUDispatchAttr>();
+ const auto *NewCPUSpec = NewFD->getAttr<CPUSpecificAttr>();
+
+ // Mixing Multiversioning types is prohibited.
+ if ((NewTA && NewCPUDisp) || (NewTA && NewCPUSpec) ||
+ (NewCPUDisp && NewCPUSpec)) {
+ S.Diag(NewFD->getLocation(), diag::err_multiversion_types_mixed);
+ NewFD->setInvalidDecl();
+ return true;
+ }
+
+ MultiVersioning::Type MVType = getMultiVersionType(NewFD);
+
+ // Main isn't allowed to become a multiversion function, however it IS
+ // permitted to have 'main' be marked with the 'target' optimization hint.
+ if (NewFD->isMain()) {
+ if ((MVType == MultiVersioning::Target && NewTA->isDefaultVersion()) ||
+ MVType == MultiVersioning::CPUDispatch ||
+ MVType == MultiVersioning::CPUSpecific) {
+ S.Diag(NewFD->getLocation(), diag::err_multiversion_not_allowed_on_main);
+ NewFD->setInvalidDecl();
+ return true;
+ }
+ return false;
+ }
+
+ if (!OldDecl || !OldDecl->getAsFunction() ||
+ OldDecl->getDeclContext()->getRedeclContext() !=
+ NewFD->getDeclContext()->getRedeclContext()) {
+ // If there's no previous declaration, AND this isn't attempting to cause
+ // multiversioning, this isn't an error condition.
+ if (MVType == MultiVersioning::None)
+ return false;
+ return CheckMultiVersionFirstFunction(S, NewFD, MVType, NewTA, NewCPUDisp,
+ NewCPUSpec);
+ }
+
+ FunctionDecl *OldFD = OldDecl->getAsFunction();
+
+ if (!OldFD->isMultiVersion() && MVType == MultiVersioning::None)
+ return false;
+
+ if (OldFD->isMultiVersion() && MVType == MultiVersioning::None) {
+ S.Diag(NewFD->getLocation(), diag::err_multiversion_required_in_redecl)
+ << (getMultiVersionType(OldFD) != MultiVersioning::Target);
+ NewFD->setInvalidDecl();
+ return true;
+ }
+
+ // Handle the target potentially causes multiversioning case.
+ if (!OldFD->isMultiVersion() && MVType == MultiVersioning::Target)
+ return CheckTargetCausesMultiVersioning(S, OldFD, NewFD, NewTA,
+ Redeclaration, OldDecl,
+ MergeTypeWithPrevious, Previous);
+ // Previous declarations lack CPUDispatch/CPUSpecific.
+ if (!OldFD->isMultiVersion()) {
+ S.Diag(OldFD->getLocation(), diag::err_multiversion_required_in_redecl)
+ << 1;
+ S.Diag(NewFD->getLocation(), diag::note_multiversioning_caused_here);
+ NewFD->setInvalidDecl();
+ return true;
+ }
+
+ // At this point, we have a multiversion function decl (in OldFD) AND an
+ // appropriate attribute in the current function decl. Resolve that these are
+ // still compatible with previous declarations.
+ return CheckMultiVersionAdditionalDecl(
+ S, OldFD, NewFD, MVType, NewTA, NewCPUDisp, NewCPUSpec, Redeclaration,
+ OldDecl, MergeTypeWithPrevious, Previous);
+}
+
+/// Perform semantic checking of a new function declaration.
///
/// Performs semantic analysis of the new function declaration
/// NewFD. This routine performs all semantic checking that does not
@@ -9201,6 +9879,10 @@ bool Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD,
}
}
+ if (CheckMultiVersionFunction(*this, NewFD, Redeclaration, OldDecl,
+ MergeTypeWithPrevious, Previous))
+ return Redeclaration;
+
// C++11 [dcl.constexpr]p8:
// A constexpr specifier for a non-static member function that is not
// a constructor declares that member function to be const.
@@ -9250,15 +9932,16 @@ bool Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD,
Previous.clear();
Previous.addDecl(OldDecl);
- if (FunctionTemplateDecl *OldTemplateDecl
- = dyn_cast<FunctionTemplateDecl>(OldDecl)) {
- NewFD->setPreviousDeclaration(OldTemplateDecl->getTemplatedDecl());
+ if (FunctionTemplateDecl *OldTemplateDecl =
+ dyn_cast<FunctionTemplateDecl>(OldDecl)) {
+ auto *OldFD = OldTemplateDecl->getTemplatedDecl();
+ NewFD->setPreviousDeclaration(OldFD);
+ adjustDeclContextForDeclaratorDecl(NewFD, OldFD);
FunctionTemplateDecl *NewTemplateDecl
= NewFD->getDescribedFunctionTemplate();
assert(NewTemplateDecl && "Template/non-template mismatch");
- if (CXXMethodDecl *Method
- = dyn_cast<CXXMethodDecl>(NewTemplateDecl->getTemplatedDecl())) {
- Method->setAccess(OldTemplateDecl->getAccess());
+ if (NewFD->isCXXClassMember()) {
+ NewFD->setAccess(OldTemplateDecl->getAccess());
NewTemplateDecl->setAccess(OldTemplateDecl->getAccess());
}
@@ -9270,22 +9953,22 @@ bool Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD,
assert(OldTemplateDecl->isMemberSpecialization());
// Explicit specializations of a member template do not inherit deleted
// status from the parent member template that they are specializing.
- if (OldTemplateDecl->getTemplatedDecl()->isDeleted()) {
- FunctionDecl *const OldTemplatedDecl =
- OldTemplateDecl->getTemplatedDecl();
+ if (OldFD->isDeleted()) {
// FIXME: This assert will not hold in the presence of modules.
- assert(OldTemplatedDecl->getCanonicalDecl() == OldTemplatedDecl);
+ assert(OldFD->getCanonicalDecl() == OldFD);
// FIXME: We need an update record for this AST mutation.
- OldTemplatedDecl->setDeletedAsWritten(false);
+ OldFD->setDeletedAsWritten(false);
}
}
} else {
if (shouldLinkDependentDeclWithPrevious(NewFD, OldDecl)) {
+ auto *OldFD = cast<FunctionDecl>(OldDecl);
// This needs to happen first so that 'inline' propagates.
- NewFD->setPreviousDeclaration(cast<FunctionDecl>(OldDecl));
- if (isa<CXXMethodDecl>(NewFD))
- NewFD->setAccess(OldDecl->getAccess());
+ NewFD->setPreviousDeclaration(OldFD);
+ adjustDeclContextForDeclaratorDecl(NewFD, OldFD);
+ if (NewFD->isCXXClassMember())
+ NewFD->setAccess(OldFD->getAccess());
}
}
} else if (!getLangOpts().CPlusPlus && MayNeedOverloadableChecks &&
@@ -9440,7 +10123,7 @@ bool Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD,
else if (auto *MPT = T->getAs<MemberPointerType>())
T = MPT->getPointeeType();
if (auto *FPT = T->getAs<FunctionProtoType>())
- if (FPT->isNothrow(Context))
+ if (FPT->isNothrow())
return true;
return false;
};
@@ -9951,7 +10634,7 @@ namespace {
S.DiagRuntimeBehavior(DRE->getLocStart(), DRE,
S.PDiag(diag)
- << DRE->getNameInfo().getName()
+ << DRE->getDecl()
<< OrigDecl->getLocation()
<< DRE->getSourceRange());
}
@@ -10011,12 +10694,22 @@ QualType Sema::deduceVarTypeFromInitializer(VarDecl *VDecl,
// C++11 [dcl.spec.auto]p3
if (!Init) {
assert(VDecl && "no init for init capture deduction?");
- Diag(VDecl->getLocation(), diag::err_auto_var_requires_init)
- << VDecl->getDeclName() << Type;
- return QualType();
+
+ // Except for class argument deduction, and then for an initializing
+ // declaration only, i.e. no static at class scope or extern.
+ if (!isa<DeducedTemplateSpecializationType>(Deduced) ||
+ VDecl->hasExternalStorage() ||
+ VDecl->isStaticDataMember()) {
+ Diag(VDecl->getLocation(), diag::err_auto_var_requires_init)
+ << VDecl->getDeclName() << Type;
+ return QualType();
+ }
}
- ArrayRef<Expr*> DeduceInits = Init;
+ ArrayRef<Expr*> DeduceInits;
+ if (Init)
+ DeduceInits = Init;
+
if (DirectInit) {
if (auto *PL = dyn_cast_or_null<ParenListExpr>(Init))
DeduceInits = PL->exprs();
@@ -10260,7 +10953,7 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init, bool DirectInit) {
}
if (VDecl->hasLocalStorage())
- getCurFunction()->setHasBranchProtectedScope();
+ setFunctionHasBranchProtectedScope();
if (DiagnoseUnexpandedParameterPack(Init, UPPC_Initializer)) {
VDecl->setInvalidDecl();
@@ -10360,11 +11053,12 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init, bool DirectInit) {
// we do not warn to warn spuriously when 'x' and 'y' are on separate
// paths through the function. This should be revisited if
// -Wrepeated-use-of-weak is made flow-sensitive.
- if ((VDecl->getType().getObjCLifetime() == Qualifiers::OCL_Strong ||
- VDecl->getType().isNonWeakInMRRWithObjCWeak(Context)) &&
- !Diags.isIgnored(diag::warn_arc_repeated_use_of_weak,
- Init->getLocStart()))
- getCurFunction()->markSafeWeakUse(Init);
+ if (FunctionScopeInfo *FSI = getCurFunction())
+ if ((VDecl->getType().getObjCLifetime() == Qualifiers::OCL_Strong ||
+ VDecl->getType().isNonWeakInMRRWithObjCWeak(Context)) &&
+ !Diags.isIgnored(diag::warn_arc_repeated_use_of_weak,
+ Init->getLocStart()))
+ FSI->markSafeWeakUse(Init);
}
// The initialization is usually a full-expression.
@@ -10471,6 +11165,9 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init, bool DirectInit) {
; // Nothing to check.
else if (Init->isIntegerConstantExpr(Context, &Loc))
; // Ok, it's an ICE!
+ else if (Init->getType()->isScopedEnumeralType() &&
+ Init->isCXX11ConstantExpr(Context))
+ ; // Ok, it is a scoped-enum constant expression.
else if (Init->isEvaluatable(Context)) {
// If we can constant fold the initializer through heroics, accept it,
// but report this as a use of an extension for -pedantic.
@@ -10521,7 +11218,7 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init, bool DirectInit) {
} else if (VDecl->isFileVarDecl()) {
// In C, extern is typically used to avoid tentative definitions when
// declaring variables in headers, but adding an intializer makes it a
- // defintion. This is somewhat confusing, so GCC and Clang both warn on it.
+ // definition. This is somewhat confusing, so GCC and Clang both warn on it.
// In C++, extern is often used to give implictly static const variables
// external linkage, so don't warn in that case. If selectany is present,
// this might be header code intended for C and C++ inclusion, so apply the
@@ -10794,11 +11491,11 @@ void Sema::ActOnUninitializedDecl(Decl *RealDecl) {
if (const RecordType *Record
= Context.getBaseElementType(Type)->getAs<RecordType>()) {
CXXRecordDecl *CXXRecord = cast<CXXRecordDecl>(Record->getDecl());
- // Mark the function for further checking even if the looser rules of
- // C++11 do not require such checks, so that we can diagnose
- // incompatibilities with C++98.
+ // Mark the function (if we're in one) for further checking even if the
+ // looser rules of C++11 do not require such checks, so that we can
+ // diagnose incompatibilities with C++98.
if (!CXXRecord->isPOD())
- getCurFunction()->setHasBranchProtectedScope();
+ setFunctionHasBranchProtectedScope();
}
}
@@ -10893,13 +11590,13 @@ Sema::ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc,
DS.SetTypeSpecType(DeclSpec::TST_auto, IdentLoc, PrevSpec, DiagID,
getPrintingPolicy());
- Declarator D(DS, Declarator::ForContext);
+ Declarator D(DS, DeclaratorContext::ForContext);
D.SetIdentifier(Ident, IdentLoc);
D.takeAttributes(Attrs, AttrEnd);
ParsedAttributes EmptyAttrs(Attrs.getPool().getFactory());
- D.AddTypeInfo(DeclaratorChunk::getReference(0, IdentLoc, /*lvalue*/false),
- EmptyAttrs, IdentLoc);
+ D.AddTypeInfo(DeclaratorChunk::getReference(0, IdentLoc, /*lvalue*/ false),
+ IdentLoc);
Decl *Var = ActOnDeclarator(S, D);
cast<VarDecl>(Var)->setCXXForRangeDecl(true);
FinalizeDeclaration(Var);
@@ -10934,11 +11631,15 @@ void Sema::CheckCompleteVariableDeclaration(VarDecl *var) {
case Qualifiers::OCL_Weak:
case Qualifiers::OCL_Strong:
- getCurFunction()->setHasBranchProtectedScope();
+ setFunctionHasBranchProtectedScope();
break;
}
}
+ if (var->hasLocalStorage() &&
+ var->getType().isDestructedType() == QualType::DK_nontrivial_c_struct)
+ setFunctionHasBranchProtectedScope();
+
// Warn about externally-visible variables being defined without a
// prior declaration. We only want to do this for global
// declarations, but we also specifically need to avoid doing it for
@@ -10947,6 +11648,8 @@ void Sema::CheckCompleteVariableDeclaration(VarDecl *var) {
if (var->isThisDeclarationADefinition() &&
var->getDeclContext()->getRedeclContext()->isFileContext() &&
var->isExternallyVisible() && var->hasLinkage() &&
+ !var->isInline() && !var->getDescribedVarTemplate() &&
+ !isTemplateInstantiation(var->getTemplateSpecializationKind()) &&
!getDiagnostics().isIgnored(diag::warn_missing_variable_declarations,
var->getLocation())) {
// Find a previous declaration that's not a definition.
@@ -11140,7 +11843,7 @@ void Sema::CheckCompleteVariableDeclaration(VarDecl *var) {
Context.addModuleInitializer(ModuleScopes.back().Module, var);
}
-/// \brief Determines if a variable's alignment is dependent.
+/// Determines if a variable's alignment is dependent.
static bool hasDependentAlignment(VarDecl *VD) {
if (VD->getType()->isDependentType())
return true;
@@ -11227,58 +11930,8 @@ void Sema::FinalizeDeclaration(Decl *ThisDecl) {
// 7.5). We must also apply the same checks to all __shared__
// variables whether they are local or not. CUDA also allows
// constant initializers for __constant__ and __device__ variables.
- if (getLangOpts().CUDA) {
- const Expr *Init = VD->getInit();
- if (Init && VD->hasGlobalStorage()) {
- if (VD->hasAttr<CUDADeviceAttr>() || VD->hasAttr<CUDAConstantAttr>() ||
- VD->hasAttr<CUDASharedAttr>()) {
- assert(!VD->isStaticLocal() || VD->hasAttr<CUDASharedAttr>());
- bool AllowedInit = false;
- if (const CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(Init))
- AllowedInit =
- isEmptyCudaConstructor(VD->getLocation(), CE->getConstructor());
- // We'll allow constant initializers even if it's a non-empty
- // constructor according to CUDA rules. This deviates from NVCC,
- // but allows us to handle things like constexpr constructors.
- if (!AllowedInit &&
- (VD->hasAttr<CUDADeviceAttr>() || VD->hasAttr<CUDAConstantAttr>()))
- AllowedInit = VD->getInit()->isConstantInitializer(
- Context, VD->getType()->isReferenceType());
-
- // Also make sure that destructor, if there is one, is empty.
- if (AllowedInit)
- if (CXXRecordDecl *RD = VD->getType()->getAsCXXRecordDecl())
- AllowedInit =
- isEmptyCudaDestructor(VD->getLocation(), RD->getDestructor());
-
- if (!AllowedInit) {
- Diag(VD->getLocation(), VD->hasAttr<CUDASharedAttr>()
- ? diag::err_shared_var_init
- : diag::err_dynamic_var_init)
- << Init->getSourceRange();
- VD->setInvalidDecl();
- }
- } else {
- // This is a host-side global variable. Check that the initializer is
- // callable from the host side.
- const FunctionDecl *InitFn = nullptr;
- if (const CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(Init)) {
- InitFn = CE->getConstructor();
- } else if (const CallExpr *CE = dyn_cast<CallExpr>(Init)) {
- InitFn = CE->getDirectCallee();
- }
- if (InitFn) {
- CUDAFunctionTarget InitFnTarget = IdentifyCUDATarget(InitFn);
- if (InitFnTarget != CFT_Host && InitFnTarget != CFT_HostDevice) {
- Diag(VD->getLocation(), diag::err_ref_bad_target_global_initializer)
- << InitFnTarget << InitFn;
- Diag(InitFn->getLocation(), diag::note_previous_decl) << InitFn;
- VD->setInvalidDecl();
- }
- }
- }
- }
- }
+ if (getLangOpts().CUDA)
+ checkAllowedCUDAInitializer(VD);
// Grab the dllimport or dllexport attribute off of the VarDecl.
const InheritableAttr *DLLAttr = getDLLAttr(VD);
@@ -11657,7 +12310,7 @@ Decl *Sema::ActOnParamDeclarator(Scope *S, Declarator &D) {
return New;
}
-/// \brief Synthesizes a variable for a parameter arising from a
+/// Synthesizes a variable for a parameter arising from a
/// typedef.
ParmVarDecl *Sema::BuildParmVarDeclForTypedef(DeclContext *DC,
SourceLocation Loc,
@@ -11809,7 +12462,7 @@ void Sema::ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D,
// Use the identifier location for the type source range.
DS.SetRangeStart(FTI.Params[i].IdentLoc);
DS.SetRangeEnd(FTI.Params[i].IdentLoc);
- Declarator ParamD(DS, Declarator::KNRTypeListContext);
+ Declarator ParamD(DS, DeclaratorContext::KNRTypeListContext);
ParamD.SetIdentifier(FTI.Params[i].Ident, FTI.Params[i].IdentLoc);
FTI.Params[i].Param = ActOnParamDeclarator(S, ParamD);
}
@@ -11894,9 +12547,45 @@ Sema::CheckForFunctionRedefinition(FunctionDecl *FD,
const FunctionDecl *EffectiveDefinition,
SkipBodyInfo *SkipBody) {
const FunctionDecl *Definition = EffectiveDefinition;
+ if (!Definition && !FD->isDefined(Definition) && !FD->isCXXClassMember()) {
+ // If this is a friend function defined in a class template, it does not
+ // have a body until it is used, nevertheless it is a definition, see
+ // [temp.inst]p2:
+ //
+ // ... for the purpose of determining whether an instantiated redeclaration
+ // is valid according to [basic.def.odr] and [class.mem], a declaration that
+ // corresponds to a definition in the template is considered to be a
+ // definition.
+ //
+ // The following code must produce redefinition error:
+ //
+ // template<typename T> struct C20 { friend void func_20() {} };
+ // C20<int> c20i;
+ // void func_20() {}
+ //
+ for (auto I : FD->redecls()) {
+ if (I != FD && !I->isInvalidDecl() &&
+ I->getFriendObjectKind() != Decl::FOK_None) {
+ if (FunctionDecl *Original = I->getInstantiatedFromMemberFunction()) {
+ if (FunctionDecl *OrigFD = FD->getInstantiatedFromMemberFunction()) {
+ // A merged copy of the same function, instantiated as a member of
+ // the same class, is OK.
+ if (declaresSameEntity(OrigFD, Original) &&
+ declaresSameEntity(cast<Decl>(I->getLexicalDeclContext()),
+ cast<Decl>(FD->getLexicalDeclContext())))
+ continue;
+ }
+
+ if (Original->isThisDeclarationADefinition()) {
+ Definition = I;
+ break;
+ }
+ }
+ }
+ }
+ }
if (!Definition)
- if (!FD->isDefined(Definition))
- return;
+ return;
if (canRedefineFunction(Definition, getLangOpts()))
return;
@@ -11981,8 +12670,13 @@ static void RebuildLambdaScopeInfo(CXXMethodDecl *CallOperator,
Decl *Sema::ActOnStartOfFunctionDef(Scope *FnBodyScope, Decl *D,
SkipBodyInfo *SkipBody) {
- if (!D)
+ if (!D) {
+ // Parsing the function declaration failed in some way. Push on a fake scope
+ // anyway so we can try to parse the function body.
+ PushFunctionScope();
return D;
+ }
+
FunctionDecl *FD = nullptr;
if (FunctionTemplateDecl *FunTmpl = dyn_cast<FunctionTemplateDecl>(D))
@@ -12119,7 +12813,7 @@ Decl *Sema::ActOnStartOfFunctionDef(Scope *FnBodyScope, Decl *D,
return D;
}
-/// \brief Given the set of return statements within a function body,
+/// Given the set of return statements within a function body,
/// compute the variables that are subject to the named return value
/// optimization.
///
@@ -12172,9 +12866,15 @@ bool Sema::canSkipFunctionBody(Decl *D) {
// rest of the file.
// We cannot skip the body of a function with an undeduced return type,
// because any callers of that function need to know the type.
- if (const FunctionDecl *FD = D->getAsFunction())
- if (FD->isConstexpr() || FD->getReturnType()->isUndeducedType())
+ if (const FunctionDecl *FD = D->getAsFunction()) {
+ if (FD->isConstexpr())
+ return false;
+ // We can't simply call Type::isUndeducedType here, because inside template
+ // auto can be deduced to a dependent type, which is not considered
+ // "undeduced".
+ if (FD->getReturnType()->getContainedDeducedType())
return false;
+ }
return Consumer.shouldSkipFunctionBody(D);
}
@@ -12270,8 +12970,8 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
// Try to apply the named return value optimization. We have to check
// if we can do this here because lambdas keep return statements around
// to deduce an implicit return type.
- if (getLangOpts().CPlusPlus && FD->getReturnType()->isRecordType() &&
- !FD->isDependentContext())
+ if (FD->getReturnType()->isRecordType() &&
+ (!getLangOpts().CPlusPlus || !FD->isDependentContext()))
computeNRVO(Body, getCurFunction());
}
@@ -12314,6 +13014,13 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
}
}
+ // Warn on CPUDispatch with an actual body.
+ if (FD->isMultiVersion() && FD->hasAttr<CPUDispatchAttr>() && Body)
+ if (const auto *CmpndBody = dyn_cast<CompoundStmt>(Body))
+ if (!CmpndBody->body_empty())
+ Diag(CmpndBody->body_front()->getLocStart(),
+ diag::warn_dispatch_body_ignored);
+
if (auto *MD = dyn_cast<CXXMethodDecl>(FD)) {
const CXXMethodDecl *KeyFunction;
if (MD->isOutOfLine() && (MD = MD->getCanonicalDecl()) &&
@@ -12391,6 +13098,9 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
getCurFunction()->ObjCWarnForNoInitDelegation = false;
}
} else {
+ // Parsing the function declaration failed in some way. Pop the fake scope
+ // we pushed on.
+ PopFunctionScopeInfo(ActivePolicy, dcl);
return nullptr;
}
@@ -12496,7 +13206,7 @@ void Sema::ActOnFinishDelayedAttribute(Scope *S, Decl *D,
// Always attach attributes to the underlying decl.
if (TemplateDecl *TD = dyn_cast<TemplateDecl>(D))
D = TD->getTemplatedDecl();
- ProcessDeclAttributeList(S, D, Attrs.getList());
+ ProcessDeclAttributeList(S, D, Attrs);
if (CXXMethodDecl *Method = dyn_cast_or_null<CXXMethodDecl>(D))
if (Method->isStatic())
@@ -12507,10 +13217,20 @@ void Sema::ActOnFinishDelayedAttribute(Scope *S, Decl *D,
/// call, forming a call to an implicitly defined function (per C99 6.5.1p2).
NamedDecl *Sema::ImplicitlyDefineFunction(SourceLocation Loc,
IdentifierInfo &II, Scope *S) {
+ // Find the scope in which the identifier is injected and the corresponding
+ // DeclContext.
+ // FIXME: C89 does not say what happens if there is no enclosing block scope.
+ // In that case, we inject the declaration into the translation unit scope
+ // instead.
Scope *BlockScope = S;
while (!BlockScope->isCompoundStmtScope() && BlockScope->getParent())
BlockScope = BlockScope->getParent();
+ Scope *ContextScope = BlockScope;
+ while (!ContextScope->getEntity())
+ ContextScope = ContextScope->getParent();
+ ContextRAII SavedContext(*this, ContextScope->getEntity());
+
// Before we produce a declaration for an implicitly defined
// function, see whether there was a locally-scoped declaration of
// this name as a function or variable. If so, use that
@@ -12574,7 +13294,7 @@ NamedDecl *Sema::ImplicitlyDefineFunction(SourceLocation Loc,
(void)Error; // Silence warning.
assert(!Error && "Error setting up implicit decl!");
SourceLocation NoLoc;
- Declarator D(DS, Declarator::BlockContext);
+ Declarator D(DS, DeclaratorContext::BlockContext);
D.AddTypeInfo(DeclaratorChunk::getFunction(/*HasProto=*/false,
/*IsAmbiguous=*/false,
/*LParenLoc=*/NoLoc,
@@ -12588,18 +13308,16 @@ NamedDecl *Sema::ImplicitlyDefineFunction(SourceLocation Loc,
/*ConstQualifierLoc=*/NoLoc,
/*VolatileQualifierLoc=*/NoLoc,
/*RestrictQualifierLoc=*/NoLoc,
- /*MutableLoc=*/NoLoc,
- EST_None,
+ /*MutableLoc=*/NoLoc, EST_None,
/*ESpecRange=*/SourceRange(),
/*Exceptions=*/nullptr,
/*ExceptionRanges=*/nullptr,
/*NumExceptions=*/0,
/*NoexceptExpr=*/nullptr,
/*ExceptionSpecTokens=*/nullptr,
- /*DeclsInPrototype=*/None,
- Loc, Loc, D),
- DS.getAttributes(),
- SourceLocation());
+ /*DeclsInPrototype=*/None, Loc,
+ Loc, D),
+ std::move(DS.getAttributes()), SourceLocation());
D.SetIdentifier(&II, Loc);
// Insert this function into the enclosing block scope.
@@ -12611,7 +13329,7 @@ NamedDecl *Sema::ImplicitlyDefineFunction(SourceLocation Loc,
return FD;
}
-/// \brief Adds any function attributes that we know a priori based on
+/// Adds any function attributes that we know a priori based on
/// the declaration of this function.
///
/// These attributes can apply both to implicitly-declared builtins
@@ -12661,11 +13379,11 @@ void Sema::AddKnownFunctionAttributes(FunctionDecl *FD) {
Context.BuiltinInfo.isConstWithoutErrno(BuiltinID))
FD->addAttr(ConstAttr::CreateImplicit(Context, FD->getLocation()));
- // We make "fma" on GNU or Windows const because we know it does not set
+ // We make "fma" on some platforms const because we know it does not set
// errno in those environments even though it could set errno based on the
// C standard.
const llvm::Triple &Trip = Context.getTargetInfo().getTriple();
- if ((Trip.isGNUEnvironment() || Trip.isOSMSVCRT()) &&
+ if ((Trip.isGNUEnvironment() || Trip.isAndroid() || Trip.isOSMSVCRT()) &&
!FD->hasAttr<ConstAttr>()) {
switch (BuiltinID) {
case Builtin::BI__builtin_fma:
@@ -12741,7 +13459,7 @@ void Sema::AddKnownFunctionAttributes(FunctionDecl *FD) {
// We already have a __builtin___CFStringMakeConstantString,
// but builds that use -fno-constant-cfstrings don't go through that.
if (!FD->hasAttr<FormatArgAttr>())
- FD->addAttr(FormatArgAttr::CreateImplicit(Context, 1,
+ FD->addAttr(FormatArgAttr::CreateImplicit(Context, ParamIdx(1, FD),
FD->getLocation()));
}
}
@@ -12803,7 +13521,7 @@ TypedefDecl *Sema::ParseTypedefDecl(Scope *S, Declarator &D, QualType T,
return NewTD;
}
-/// \brief Check that this is a valid underlying type for an enum declaration.
+/// Check that this is a valid underlying type for an enum declaration.
bool Sema::CheckEnumUnderlyingType(TypeSourceInfo *TI) {
SourceLocation UnderlyingLoc = TI->getTypeLoc().getBeginLoc();
QualType T = TI->getType();
@@ -12821,11 +13539,9 @@ bool Sema::CheckEnumUnderlyingType(TypeSourceInfo *TI) {
/// Check whether this is a valid redeclaration of a previous enumeration.
/// \return true if the redeclaration was invalid.
-bool Sema::CheckEnumRedeclaration(
- SourceLocation EnumLoc, bool IsScoped, QualType EnumUnderlyingTy,
- bool EnumUnderlyingIsImplicit, const EnumDecl *Prev) {
- bool IsFixed = !EnumUnderlyingTy.isNull();
-
+bool Sema::CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped,
+ QualType EnumUnderlyingTy, bool IsFixed,
+ const EnumDecl *Prev) {
if (IsScoped != Prev->isScoped()) {
Diag(EnumLoc, diag::err_enum_redeclare_scoped_mismatch)
<< Prev->isScoped();
@@ -12845,10 +13561,6 @@ bool Sema::CheckEnumRedeclaration(
<< Prev->getIntegerTypeRange();
return true;
}
- } else if (IsFixed && !Prev->isFixed() && EnumUnderlyingIsImplicit) {
- ;
- } else if (!IsFixed && Prev->isFixed() && !Prev->getIntegerTypeSourceInfo()) {
- ;
} else if (IsFixed != Prev->isFixed()) {
Diag(EnumLoc, diag::err_enum_redeclare_fixed_mismatch)
<< Prev->isFixed();
@@ -12859,7 +13571,7 @@ bool Sema::CheckEnumRedeclaration(
return false;
}
-/// \brief Get diagnostic %select index for tag kind for
+/// Get diagnostic %select index for tag kind for
/// redeclaration diagnostic message.
/// WARNING: Indexes apply to particular diagnostics only!
///
@@ -12873,7 +13585,7 @@ static unsigned getRedeclDiagFromTagKind(TagTypeKind Tag) {
}
}
-/// \brief Determine if tag kind is a class-key compatible with
+/// Determine if tag kind is a class-key compatible with
/// class for redeclaration (class, struct, or __interface).
///
/// \returns true iff the tag kind is compatible.
@@ -12907,7 +13619,7 @@ Sema::NonTagKind Sema::getNonTagTypeDeclKind(const Decl *PrevDecl,
llvm_unreachable("invalid TTK");
}
-/// \brief Determine whether a tag with a given kind is acceptable
+/// Determine whether a tag with a given kind is acceptable
/// as a redeclaration of the given tag declaration.
///
/// \returns true if the new tag kind is acceptable, false otherwise.
@@ -13043,8 +13755,8 @@ static FixItHint createFriendTagNNSFixIt(Sema &SemaRef, NamedDecl *ND, Scope *S,
return FixItHint::CreateInsertion(NameLoc, Insertion);
}
-/// \brief Determine whether a tag originally declared in context \p OldDC can
-/// be redeclared with an unqualfied name in \p NewDC (assuming name lookup
+/// Determine whether a tag originally declared in context \p OldDC can
+/// be redeclared with an unqualified name in \p NewDC (assuming name lookup
/// found a declaration in \p OldDC as a previous decl, perhaps through a
/// using-declaration).
static bool isAcceptableTagRedeclContext(Sema &S, DeclContext *OldDC,
@@ -13064,7 +13776,7 @@ static bool isAcceptableTagRedeclContext(Sema &S, DeclContext *OldDC,
return false;
}
-/// \brief This is invoked when we see 'struct foo' or 'struct {'. In the
+/// This is invoked when we see 'struct foo' or 'struct {'. In the
/// former case, Name will be non-null. In the later case, Name will be null.
/// TagSpec indicates what kind of tag this is. TUK indicates whether this is a
/// reference/declaration/definition of a tag.
@@ -13077,13 +13789,12 @@ static bool isAcceptableTagRedeclContext(Sema &S, DeclContext *OldDC,
Decl *Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc, CXXScopeSpec &SS,
IdentifierInfo *Name, SourceLocation NameLoc,
- AttributeList *Attr, AccessSpecifier AS,
+ const ParsedAttributesView &Attrs, AccessSpecifier AS,
SourceLocation ModulePrivateLoc,
MultiTemplateParamsArg TemplateParameterLists,
bool &OwnedDecl, bool &IsDependent,
SourceLocation ScopedEnumKWLoc,
- bool ScopedEnumUsesClassTag,
- TypeResult UnderlyingType,
+ bool ScopedEnumUsesClassTag, TypeResult UnderlyingType,
bool IsTypeSpecifier, bool IsTemplateParamOrArg,
SkipBodyInfo *SkipBody) {
// If this is not a definition, it must have a name.
@@ -13122,14 +13833,11 @@ Decl *Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
return nullptr;
OwnedDecl = false;
- DeclResult Result = CheckClassTemplate(S, TagSpec, TUK, KWLoc,
- SS, Name, NameLoc, Attr,
- TemplateParams, AS,
- ModulePrivateLoc,
- /*FriendLoc*/SourceLocation(),
- TemplateParameterLists.size()-1,
- TemplateParameterLists.data(),
- SkipBody);
+ DeclResult Result = CheckClassTemplate(
+ S, TagSpec, TUK, KWLoc, SS, Name, NameLoc, Attrs, TemplateParams,
+ AS, ModulePrivateLoc,
+ /*FriendLoc*/ SourceLocation(), TemplateParameterLists.size() - 1,
+ TemplateParameterLists.data(), SkipBody);
return Result.get();
} else {
// The "template<>" header is extraneous.
@@ -13144,14 +13852,14 @@ Decl *Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
// this early, because it's needed to detect if this is an incompatible
// redeclaration.
llvm::PointerUnion<const Type*, TypeSourceInfo*> EnumUnderlying;
- bool EnumUnderlyingIsImplicit = false;
+ bool IsFixed = !UnderlyingType.isUnset() || ScopedEnum;
if (Kind == TTK_Enum) {
- if (UnderlyingType.isInvalid() || (!UnderlyingType.get() && ScopedEnum))
+ if (UnderlyingType.isInvalid() || (!UnderlyingType.get() && ScopedEnum)) {
// No underlying type explicitly specified, or we failed to parse the
// type, default to int.
EnumUnderlying = Context.IntTy.getTypePtr();
- else if (UnderlyingType.get()) {
+ } else if (UnderlyingType.get()) {
// C++0x 7.2p2: The type-specifier-seq of an enum-base shall name an
// integral type; any cv-qualification is ignored.
TypeSourceInfo *TI = nullptr;
@@ -13167,11 +13875,12 @@ Decl *Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
EnumUnderlying = Context.IntTy.getTypePtr();
} else if (Context.getTargetInfo().getCXXABI().isMicrosoft()) {
- if (getLangOpts().MSVCCompat || TUK == TUK_Definition) {
- // Microsoft enums are always of int type.
+ // For MSVC ABI compatibility, unfixed enums must use an underlying type
+ // of 'int'. However, if this is an unfixed forward declaration, don't set
+ // the underlying type unless the user enables -fms-compatibility. This
+ // makes unfixed forward declared enums incomplete and is more conforming.
+ if (TUK == TUK_Definition || getLangOpts().MSVCCompat)
EnumUnderlying = Context.IntTy.getTypePtr();
- EnumUnderlyingIsImplicit = true;
- }
}
}
@@ -13197,8 +13906,7 @@ Decl *Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
if (Kind == TTK_Enum) {
New = EnumDecl::Create(Context, SearchDC, KWLoc, Loc, Name, nullptr,
- ScopedEnum, ScopedEnumUsesClassTag,
- !EnumUnderlying.isNull());
+ ScopedEnum, ScopedEnumUsesClassTag, IsFixed);
// If this is an undefined enum, bail.
if (TUK != TUK_Definition && !Invalid)
return nullptr;
@@ -13577,7 +14285,7 @@ Decl *Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
// in which case we want the caller to bail out.
if (CheckEnumRedeclaration(NameLoc.isValid() ? NameLoc : KWLoc,
ScopedEnum, EnumUnderlyingTy,
- EnumUnderlyingIsImplicit, PrevEnum))
+ IsFixed, PrevEnum))
return TUK == TUK_Declaration ? PrevTagDecl : nullptr;
}
@@ -13595,7 +14303,7 @@ Decl *Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
// If this is a use, just return the declaration we found, unless
// we have attributes.
if (TUK == TUK_Reference || TUK == TUK_Friend) {
- if (Attr) {
+ if (!Attrs.empty()) {
// FIXME: Diagnose these attributes. For now, we create a new
// declaration to hold them.
} else if (TUK == TUK_Reference &&
@@ -13787,13 +14495,12 @@ CreateNewDecl:
// PrevDecl.
TagDecl *New;
- bool IsForwardReference = false;
if (Kind == TTK_Enum) {
// FIXME: Tag decls should be chained to any simultaneous vardecls, e.g.:
// enum X { A, B, C } D; D should chain to X.
New = EnumDecl::Create(Context, SearchDC, KWLoc, Loc, Name,
cast_or_null<EnumDecl>(PrevDecl), ScopedEnum,
- ScopedEnumUsesClassTag, !EnumUnderlying.isNull());
+ ScopedEnumUsesClassTag, IsFixed);
if (isStdAlignValT && (!StdAlignValT || getStdAlignValT()->isImplicit()))
StdAlignValT = cast<EnumDecl>(New);
@@ -13801,8 +14508,7 @@ CreateNewDecl:
// If this is an undefined enum, warn.
if (TUK != TUK_Definition && !Invalid) {
TagDecl *Def;
- if (!EnumUnderlyingIsImplicit &&
- (getLangOpts().CPlusPlus11 || getLangOpts().ObjC2) &&
+ if (IsFixed && (getLangOpts().CPlusPlus11 || getLangOpts().ObjC2) &&
cast<EnumDecl>(New)->isFixed()) {
// C++0x: 7.2p2: opaque-enum-declaration.
// Conflicts are diagnosed above. Do nothing.
@@ -13818,12 +14524,6 @@ CreateNewDecl:
else if (getLangOpts().CPlusPlus)
DiagID = diag::err_forward_ref_enum;
Diag(Loc, DiagID);
-
- // If this is a forward-declared reference to an enumeration, make a
- // note of it; we won't actually be introducing the declaration into
- // the declaration context.
- if (TUK == TUK_Reference)
- IsForwardReference = true;
}
}
@@ -13834,6 +14534,7 @@ CreateNewDecl:
else
ED->setIntegerType(QualType(EnumUnderlying.get<const Type*>(), 0));
ED->setPromotionType(ED->getIntegerType());
+ assert(ED->isComplete() && "enum with type should be complete");
}
} else {
// struct/union/class
@@ -13872,13 +14573,10 @@ CreateNewDecl:
if (SS.isNotEmpty()) {
if (SS.isSet()) {
// If this is either a declaration or a definition, check the
- // nested-name-specifier against the current context. We don't do this
- // for explicit specializations, because they have similar checking
- // (with more specific diagnostics) in the call to
- // CheckMemberSpecialization, below.
- if (!isMemberSpecialization &&
- (TUK == TUK_Definition || TUK == TUK_Declaration) &&
- diagnoseQualifiedDeclaration(SS, DC, OrigName, Loc))
+ // nested-name-specifier against the current context.
+ if ((TUK == TUK_Definition || TUK == TUK_Declaration) &&
+ diagnoseQualifiedDeclaration(SS, DC, OrigName, Loc,
+ isMemberSpecialization))
Invalid = true;
New->setQualifierInfo(SS.getWithLocInContext(Context));
@@ -13965,8 +14663,7 @@ CreateNewDecl:
if (TUK == TUK_Definition)
New->startDefinition();
- if (Attr)
- ProcessDeclAttributeList(S, New, Attr);
+ ProcessDeclAttributeList(S, New, Attrs);
AddPragmaAttributes(S, New);
// If this has an identifier, add it to the scope stack.
@@ -13983,9 +14680,7 @@ CreateNewDecl:
PushOnScopeChains(New, EnclosingScope, /* AddToContext = */ false);
} else if (Name) {
S = getNonFieldDeclScope(S);
- PushOnScopeChains(New, S, !IsForwardReference);
- if (IsForwardReference)
- SearchDC->makeDeclVisibleInContext(New);
+ PushOnScopeChains(New, S, true);
} else {
CurContext->addDecl(New);
}
@@ -14369,7 +15064,7 @@ FieldDecl *Sema::HandleField(Scope *S, RecordDecl *Record,
return NewFD;
}
-/// \brief Build a new FieldDecl and check its well-formedness.
+/// Build a new FieldDecl and check its well-formedness.
///
/// This routine builds a new FieldDecl given the fields name, type,
/// record, etc. \p PrevDecl should refer to any previous declaration
@@ -14420,6 +15115,13 @@ FieldDecl *Sema::CheckFieldDecl(DeclarationName Name, QualType T,
InvalidDecl = true;
}
+ // Anonymous bit-fields cannot be cv-qualified (CWG 2229).
+ if (!InvalidDecl && getLangOpts().CPlusPlus && !II && BitWidth &&
+ T.hasQualifiers()) {
+ InvalidDecl = true;
+ Diag(Loc, diag::err_anon_bitfield_qualifiers);
+ }
+
// C99 6.7.2.1p8: A member of a structure or union may have any type other
// than a variably modified type.
if (!InvalidDecl && T->isVariablyModifiedType()) {
@@ -14752,7 +15454,7 @@ void Sema::ActOnLastBitfield(SourceLocation DeclLoc,
Decl *ivarDecl = AllIvarDecls[AllIvarDecls.size()-1];
ObjCIvarDecl *Ivar = cast<ObjCIvarDecl>(ivarDecl);
- if (!Ivar->isBitField() || Ivar->getBitWidthValue(Context) == 0)
+ if (!Ivar->isBitField() || Ivar->isZeroLengthBitField(Context))
return;
ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(CurContext);
if (!ID) {
@@ -14780,7 +15482,8 @@ void Sema::ActOnLastBitfield(SourceLocation DeclLoc,
void Sema::ActOnFields(Scope *S, SourceLocation RecLoc, Decl *EnclosingDecl,
ArrayRef<Decl *> Fields, SourceLocation LBrac,
- SourceLocation RBrac, AttributeList *Attr) {
+ SourceLocation RBrac,
+ const ParsedAttributesView &Attrs) {
assert(EnclosingDecl && "missing record or interface decl");
// If this is an Objective-C @implementation or category and we have
@@ -14963,8 +15666,7 @@ void Sema::ActOnFields(Scope *S, SourceLocation RecLoc, Decl *EnclosingDecl,
QualType T = Context.getObjCObjectPointerType(FD->getType());
FD->setType(T);
} else if (getLangOpts().allowsNonTrivialObjCLifetimeQualifiers() &&
- Record && !ObjCFieldLifetimeErrReported &&
- (!getLangOpts().CPlusPlus || Record->isUnion())) {
+ Record && !ObjCFieldLifetimeErrReported && Record->isUnion()) {
// It's an error in ARC or Weak if a field has lifetime.
// We don't want to report this in a system header, though,
// so we just make the field unavailable.
@@ -15000,6 +15702,27 @@ void Sema::ActOnFields(Scope *S, SourceLocation RecLoc, Decl *EnclosingDecl,
Record->setHasObjectMember(true);
}
}
+
+ if (Record && !getLangOpts().CPlusPlus && !FD->hasAttr<UnavailableAttr>()) {
+ QualType FT = FD->getType();
+ if (FT.isNonTrivialToPrimitiveDefaultInitialize())
+ Record->setNonTrivialToPrimitiveDefaultInitialize(true);
+ QualType::PrimitiveCopyKind PCK = FT.isNonTrivialToPrimitiveCopy();
+ if (PCK != QualType::PCK_Trivial && PCK != QualType::PCK_VolatileTrivial)
+ Record->setNonTrivialToPrimitiveCopy(true);
+ if (FT.isDestructedType()) {
+ Record->setNonTrivialToPrimitiveDestroy(true);
+ Record->setParamDestroyedInCallee(true);
+ }
+
+ if (const auto *RT = FT->getAs<RecordType>()) {
+ if (RT->getDecl()->getArgPassingRestrictions() ==
+ RecordDecl::APK_CanNeverPassInRegs)
+ Record->setArgPassingRestrictions(RecordDecl::APK_CanNeverPassInRegs);
+ } else if (FT.getQualifiers().getObjCLifetime() == Qualifiers::OCL_Weak)
+ Record->setArgPassingRestrictions(RecordDecl::APK_CanNeverPassInRegs);
+ }
+
if (Record && FD->getType().isVolatileQualified())
Record->setHasVolatileMember(true);
// Keep track of the number of named members.
@@ -15027,10 +15750,10 @@ void Sema::ActOnFields(Scope *S, SourceLocation RecLoc, Decl *EnclosingDecl,
CXXRecord->getDestructor());
}
- if (!CXXRecord->isInvalidDecl()) {
- // Add any implicitly-declared members to this class.
- AddImplicitlyDeclaredMembersToClass(CXXRecord);
+ // Add any implicitly-declared members to this class.
+ AddImplicitlyDeclaredMembersToClass(CXXRecord);
+ if (!CXXRecord->isInvalidDecl()) {
// If we have virtual base classes, we may end up finding multiple
// final overriders for a given virtual function. Check for this
// problem now.
@@ -15045,7 +15768,7 @@ void Sema::ActOnFields(Scope *S, SourceLocation RecLoc, Decl *EnclosingDecl,
SOEnd = M->second.end();
SO != SOEnd; ++SO) {
assert(SO->second.size() > 0 &&
- "Virtual function without overridding functions?");
+ "Virtual function without overriding functions?");
if (SO->second.size() == 1)
continue;
@@ -15077,6 +15800,9 @@ void Sema::ActOnFields(Scope *S, SourceLocation RecLoc, Decl *EnclosingDecl,
if (!Completed)
Record->completeDefinition();
+ // Handle attributes before checking the layout.
+ ProcessDeclAttributeList(S, Record, Attrs);
+
// We may have deferred checking for a deleted destructor. Check now.
if (CXXRecordDecl *CXXRecord = dyn_cast<CXXRecordDecl>(Record)) {
auto *Dtor = CXXRecord->getDestructor();
@@ -15119,7 +15845,7 @@ void Sema::ActOnFields(Scope *S, SourceLocation RecLoc, Decl *EnclosingDecl,
(NonBitFields == 0 || ZeroSize) && I != E; ++I) {
IsEmpty = false;
if (I->isUnnamedBitfield()) {
- if (I->getBitWidthValue(Context) > 0)
+ if (!I->isZeroLengthBitField(Context))
ZeroSize = false;
} else {
++NonBitFields;
@@ -15207,12 +15933,9 @@ void Sema::ActOnFields(Scope *S, SourceLocation RecLoc, Decl *EnclosingDecl,
CDecl->setIvarRBraceLoc(RBrac);
}
}
-
- if (Attr)
- ProcessDeclAttributeList(S, Record, Attr);
}
-/// \brief Determine whether the given integral value is representable within
+/// Determine whether the given integral value is representable within
/// the given type T.
static bool isRepresentableIntegerValue(ASTContext &Context,
llvm::APSInt &Value,
@@ -15229,7 +15952,7 @@ static bool isRepresentableIntegerValue(ASTContext &Context,
return Value.getMinSignedBits() <= BitWidth;
}
-// \brief Given an integral type, return the next larger integral type
+// Given an integral type, return the next larger integral type
// (or a NULL type of no such type exists).
static QualType getNextLargerIntegralType(ASTContext &Context, QualType T) {
// FIXME: Int128/UInt128 support, which also needs to be introduced into
@@ -15292,7 +16015,7 @@ EnumConstantDecl *Sema::CheckEnumConstant(EnumDecl *Enum,
&EnumVal).get())) {
// C99 6.7.2.2p2: Make sure we have an integer constant expression.
} else {
- if (Enum->isFixed()) {
+ if (Enum->isComplete()) {
EltTy = Enum->getIntegerType();
// In Obj-C and Microsoft mode, require the enumeration value to be
@@ -15456,7 +16179,7 @@ Sema::SkipBodyInfo Sema::shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II,
Decl *Sema::ActOnEnumConstant(Scope *S, Decl *theEnumDecl, Decl *lastEnumConst,
SourceLocation IdLoc, IdentifierInfo *Id,
- AttributeList *Attr,
+ const ParsedAttributesView &Attrs,
SourceLocation EqualLoc, Expr *Val) {
EnumDecl *TheEnumDecl = cast<EnumDecl>(theEnumDecl);
EnumConstantDecl *LastEnumConst =
@@ -15507,7 +16230,7 @@ Decl *Sema::ActOnEnumConstant(Scope *S, Decl *theEnumDecl, Decl *lastEnumConst,
}
// Process attributes.
- if (Attr) ProcessDeclAttributeList(S, New, Attr);
+ ProcessDeclAttributeList(S, New, Attrs);
AddPragmaAttributes(S, New);
// Register this decl in the current scope stack.
@@ -15559,39 +16282,10 @@ static bool ValidDuplicateEnum(EnumConstantDecl *ECD, EnumDecl *Enum) {
return false;
}
-namespace {
-struct DupKey {
- int64_t val;
- bool isTombstoneOrEmptyKey;
- DupKey(int64_t val, bool isTombstoneOrEmptyKey)
- : val(val), isTombstoneOrEmptyKey(isTombstoneOrEmptyKey) {}
-};
-
-static DupKey GetDupKey(const llvm::APSInt& Val) {
- return DupKey(Val.isSigned() ? Val.getSExtValue() : Val.getZExtValue(),
- false);
-}
-
-struct DenseMapInfoDupKey {
- static DupKey getEmptyKey() { return DupKey(0, true); }
- static DupKey getTombstoneKey() { return DupKey(1, true); }
- static unsigned getHashValue(const DupKey Key) {
- return (unsigned)(Key.val * 37);
- }
- static bool isEqual(const DupKey& LHS, const DupKey& RHS) {
- return LHS.isTombstoneOrEmptyKey == RHS.isTombstoneOrEmptyKey &&
- LHS.val == RHS.val;
- }
-};
-} // end anonymous namespace
-
// Emits a warning when an element is implicitly set a value that
// a previous element has already been set to.
static void CheckForDuplicateEnumValues(Sema &S, ArrayRef<Decl *> Elements,
- EnumDecl *Enum,
- QualType EnumType) {
- if (S.Diags.isIgnored(diag::warn_duplicate_enum_values, Enum->getLocation()))
- return;
+ EnumDecl *Enum, QualType EnumType) {
// Avoid anonymous enums
if (!Enum->getIdentifier())
return;
@@ -15600,20 +16294,28 @@ static void CheckForDuplicateEnumValues(Sema &S, ArrayRef<Decl *> Elements,
if (Enum->getNumPositiveBits() > 63 || Enum->getNumNegativeBits() > 64)
return;
+ if (S.Diags.isIgnored(diag::warn_duplicate_enum_values, Enum->getLocation()))
+ return;
+
typedef SmallVector<EnumConstantDecl *, 3> ECDVector;
- typedef SmallVector<ECDVector *, 3> DuplicatesVector;
+ typedef SmallVector<std::unique_ptr<ECDVector>, 3> DuplicatesVector;
typedef llvm::PointerUnion<EnumConstantDecl*, ECDVector*> DeclOrVector;
- typedef llvm::DenseMap<DupKey, DeclOrVector, DenseMapInfoDupKey>
- ValueToVectorMap;
+ typedef llvm::DenseMap<int64_t, DeclOrVector> ValueToVectorMap;
+
+ // Use int64_t as a key to avoid needing special handling for DenseMap keys.
+ auto EnumConstantToKey = [](const EnumConstantDecl *D) {
+ llvm::APSInt Val = D->getInitVal();
+ return Val.isSigned() ? Val.getSExtValue() : Val.getZExtValue();
+ };
DuplicatesVector DupVector;
ValueToVectorMap EnumMap;
// Populate the EnumMap with all values represented by enum constants without
- // an initialier.
- for (unsigned i = 0, e = Elements.size(); i != e; ++i) {
- EnumConstantDecl *ECD = cast_or_null<EnumConstantDecl>(Elements[i]);
+ // an initializer.
+ for (auto *Element : Elements) {
+ EnumConstantDecl *ECD = cast_or_null<EnumConstantDecl>(Element);
// Null EnumConstantDecl means a previous diagnostic has been emitted for
// this constant. Skip this enum since it may be ill-formed.
@@ -15621,45 +16323,45 @@ static void CheckForDuplicateEnumValues(Sema &S, ArrayRef<Decl *> Elements,
return;
}
+ // Constants with initalizers are handled in the next loop.
if (ECD->getInitExpr())
continue;
- DupKey Key = GetDupKey(ECD->getInitVal());
- DeclOrVector &Entry = EnumMap[Key];
-
- // First time encountering this value.
- if (Entry.isNull())
- Entry = ECD;
+ // Duplicate values are handled in the next loop.
+ EnumMap.insert({EnumConstantToKey(ECD), ECD});
}
+ if (EnumMap.size() == 0)
+ return;
+
// Create vectors for any values that has duplicates.
- for (unsigned i = 0, e = Elements.size(); i != e; ++i) {
- EnumConstantDecl *ECD = cast<EnumConstantDecl>(Elements[i]);
+ for (auto *Element : Elements) {
+ // The last loop returned if any constant was null.
+ EnumConstantDecl *ECD = cast<EnumConstantDecl>(Element);
if (!ValidDuplicateEnum(ECD, Enum))
continue;
- DupKey Key = GetDupKey(ECD->getInitVal());
-
- DeclOrVector& Entry = EnumMap[Key];
- if (Entry.isNull())
+ auto Iter = EnumMap.find(EnumConstantToKey(ECD));
+ if (Iter == EnumMap.end())
continue;
+ DeclOrVector& Entry = Iter->second;
if (EnumConstantDecl *D = Entry.dyn_cast<EnumConstantDecl*>()) {
// Ensure constants are different.
if (D == ECD)
continue;
// Create new vector and push values onto it.
- ECDVector *Vec = new ECDVector();
+ auto Vec = llvm::make_unique<ECDVector>();
Vec->push_back(D);
Vec->push_back(ECD);
// Update entry to point to the duplicates vector.
- Entry = Vec;
+ Entry = Vec.get();
// Store the vector somewhere we can consult later for quick emission of
// diagnostics.
- DupVector.push_back(Vec);
+ DupVector.emplace_back(std::move(Vec));
continue;
}
@@ -15672,26 +16374,21 @@ static void CheckForDuplicateEnumValues(Sema &S, ArrayRef<Decl *> Elements,
}
// Emit diagnostics.
- for (DuplicatesVector::iterator DupVectorIter = DupVector.begin(),
- DupVectorEnd = DupVector.end();
- DupVectorIter != DupVectorEnd; ++DupVectorIter) {
- ECDVector *Vec = *DupVectorIter;
+ for (const auto &Vec : DupVector) {
assert(Vec->size() > 1 && "ECDVector should have at least 2 elements.");
// Emit warning for one enum constant.
- ECDVector::iterator I = Vec->begin();
- S.Diag((*I)->getLocation(), diag::warn_duplicate_enum_values)
- << (*I)->getName() << (*I)->getInitVal().toString(10)
- << (*I)->getSourceRange();
- ++I;
+ auto *FirstECD = Vec->front();
+ S.Diag(FirstECD->getLocation(), diag::warn_duplicate_enum_values)
+ << FirstECD << FirstECD->getInitVal().toString(10)
+ << FirstECD->getSourceRange();
// Emit one note for each of the remaining enum constants with
// the same value.
- for (ECDVector::iterator E = Vec->end(); I != E; ++I)
- S.Diag((*I)->getLocation(), diag::note_duplicate_element)
- << (*I)->getName() << (*I)->getInitVal().toString(10)
- << (*I)->getSourceRange();
- delete Vec;
+ for (auto *ECD : llvm::make_range(Vec->begin() + 1, Vec->end()))
+ S.Diag(ECD->getLocation(), diag::note_duplicate_element)
+ << ECD << ECD->getInitVal().toString(10)
+ << ECD->getSourceRange();
}
}
@@ -15725,14 +16422,12 @@ bool Sema::IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val,
}
void Sema::ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange,
- Decl *EnumDeclX,
- ArrayRef<Decl *> Elements,
- Scope *S, AttributeList *Attr) {
+ Decl *EnumDeclX, ArrayRef<Decl *> Elements, Scope *S,
+ const ParsedAttributesView &Attrs) {
EnumDecl *Enum = cast<EnumDecl>(EnumDeclX);
QualType EnumType = Context.getTypeDeclType(Enum);
- if (Attr)
- ProcessDeclAttributeList(S, Enum, Attr);
+ ProcessDeclAttributeList(S, Enum, Attrs);
if (Enum->isDependentType()) {
for (unsigned i = 0, e = Elements.size(); i != e; ++i) {
@@ -15803,7 +16498,9 @@ void Sema::ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange,
if (LangOpts.ShortEnums)
Packed = true;
- if (Enum->isFixed()) {
+ // If the enum already has a type because it is fixed or dictated by the
+ // target, promote that type instead of analyzing the enumerators.
+ if (Enum->isComplete()) {
BestType = Enum->getIntegerType();
if (BestType->isPromotableIntegerType())
BestPromotionType = Context.getPromotedIntegerType(BestType);
diff --git a/lib/Sema/SemaDeclAttr.cpp b/lib/Sema/SemaDeclAttr.cpp
index 21fe46ad9dd1..320eabd5ec2f 100644
--- a/lib/Sema/SemaDeclAttr.cpp
+++ b/lib/Sema/SemaDeclAttr.cpp
@@ -31,6 +31,7 @@
#include "clang/Sema/Initialization.h"
#include "clang/Sema/Lookup.h"
#include "clang/Sema/Scope.h"
+#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/SemaInternal.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringExtras.h"
@@ -58,7 +59,7 @@ static bool isFunctionOrMethod(const Decl *D) {
return (D->getFunctionType() != nullptr) || isa<ObjCMethodDecl>(D);
}
-/// \brief Return true if the given decl has function type (function or
+/// Return true if the given decl has function type (function or
/// function-typed variable) or an Objective-C method or a block.
static bool isFunctionOrMethodOrBlock(const Decl *D) {
return isFunctionOrMethod(D) || isa<BlockDecl>(D);
@@ -87,7 +88,7 @@ static bool hasFunctionProto(const Decl *D) {
static unsigned getFunctionOrMethodNumParams(const Decl *D) {
if (const FunctionType *FnTy = D->getFunctionType())
return cast<FunctionProtoType>(FnTy)->getNumParams();
- if (const BlockDecl *BD = dyn_cast<BlockDecl>(D))
+ if (const auto *BD = dyn_cast<BlockDecl>(D))
return BD->getNumParams();
return cast<ObjCMethodDecl>(D)->param_size();
}
@@ -95,7 +96,7 @@ static unsigned getFunctionOrMethodNumParams(const Decl *D) {
static QualType getFunctionOrMethodParamType(const Decl *D, unsigned Idx) {
if (const FunctionType *FnTy = D->getFunctionType())
return cast<FunctionProtoType>(FnTy)->getParamType(Idx);
- if (const BlockDecl *BD = dyn_cast<BlockDecl>(D))
+ if (const auto *BD = dyn_cast<BlockDecl>(D))
return BD->getParamDecl(Idx)->getType();
return cast<ObjCMethodDecl>(D)->parameters()[Idx]->getType();
@@ -113,7 +114,7 @@ static SourceRange getFunctionOrMethodParamRange(const Decl *D, unsigned Idx) {
static QualType getFunctionOrMethodResultType(const Decl *D) {
if (const FunctionType *FnTy = D->getFunctionType())
- return cast<FunctionType>(FnTy)->getReturnType();
+ return FnTy->getReturnType();
return cast<ObjCMethodDecl>(D)->getReturnType();
}
@@ -126,24 +127,21 @@ static SourceRange getFunctionOrMethodResultSourceRange(const Decl *D) {
}
static bool isFunctionOrMethodVariadic(const Decl *D) {
- if (const FunctionType *FnTy = D->getFunctionType()) {
- const FunctionProtoType *proto = cast<FunctionProtoType>(FnTy);
- return proto->isVariadic();
- }
- if (const BlockDecl *BD = dyn_cast<BlockDecl>(D))
+ if (const FunctionType *FnTy = D->getFunctionType())
+ return cast<FunctionProtoType>(FnTy)->isVariadic();
+ if (const auto *BD = dyn_cast<BlockDecl>(D))
return BD->isVariadic();
-
return cast<ObjCMethodDecl>(D)->isVariadic();
}
static bool isInstanceMethod(const Decl *D) {
- if (const CXXMethodDecl *MethodDecl = dyn_cast<CXXMethodDecl>(D))
+ if (const auto *MethodDecl = dyn_cast<CXXMethodDecl>(D))
return MethodDecl->isInstance();
return false;
}
static inline bool isNSStringType(QualType T, ASTContext &Ctx) {
- const ObjCObjectPointerType *PT = T->getAs<ObjCObjectPointerType>();
+ const auto *PT = T->getAs<ObjCObjectPointerType>();
if (!PT)
return false;
@@ -159,11 +157,11 @@ static inline bool isNSStringType(QualType T, ASTContext &Ctx) {
}
static inline bool isCFStringType(QualType T, ASTContext &Ctx) {
- const PointerType *PT = T->getAs<PointerType>();
+ const auto *PT = T->getAs<PointerType>();
if (!PT)
return false;
- const RecordType *RT = PT->getPointeeType()->getAs<RecordType>();
+ const auto *RT = PT->getPointeeType()->getAs<RecordType>();
if (!RT)
return false;
@@ -174,89 +172,86 @@ static inline bool isCFStringType(QualType T, ASTContext &Ctx) {
return RD->getIdentifier() == &Ctx.Idents.get("__CFString");
}
-static unsigned getNumAttributeArgs(const AttributeList &Attr) {
+static unsigned getNumAttributeArgs(const ParsedAttr &AL) {
// FIXME: Include the type in the argument list.
- return Attr.getNumArgs() + Attr.hasParsedType();
+ return AL.getNumArgs() + AL.hasParsedType();
}
template <typename Compare>
-static bool checkAttributeNumArgsImpl(Sema &S, const AttributeList &Attr,
+static bool checkAttributeNumArgsImpl(Sema &S, const ParsedAttr &AL,
unsigned Num, unsigned Diag,
Compare Comp) {
- if (Comp(getNumAttributeArgs(Attr), Num)) {
- S.Diag(Attr.getLoc(), Diag) << Attr.getName() << Num;
+ if (Comp(getNumAttributeArgs(AL), Num)) {
+ S.Diag(AL.getLoc(), Diag) << AL.getName() << Num;
return false;
}
return true;
}
-/// \brief Check if the attribute has exactly as many args as Num. May
+/// Check if the attribute has exactly as many args as Num. May
/// output an error.
-static bool checkAttributeNumArgs(Sema &S, const AttributeList &Attr,
- unsigned Num) {
- return checkAttributeNumArgsImpl(S, Attr, Num,
+static bool checkAttributeNumArgs(Sema &S, const ParsedAttr &AL, unsigned Num) {
+ return checkAttributeNumArgsImpl(S, AL, Num,
diag::err_attribute_wrong_number_arguments,
std::not_equal_to<unsigned>());
}
-/// \brief Check if the attribute has at least as many args as Num. May
+/// Check if the attribute has at least as many args as Num. May
/// output an error.
-static bool checkAttributeAtLeastNumArgs(Sema &S, const AttributeList &Attr,
+static bool checkAttributeAtLeastNumArgs(Sema &S, const ParsedAttr &AL,
unsigned Num) {
- return checkAttributeNumArgsImpl(S, Attr, Num,
+ return checkAttributeNumArgsImpl(S, AL, Num,
diag::err_attribute_too_few_arguments,
std::less<unsigned>());
}
-/// \brief Check if the attribute has at most as many args as Num. May
+/// Check if the attribute has at most as many args as Num. May
/// output an error.
-static bool checkAttributeAtMostNumArgs(Sema &S, const AttributeList &Attr,
- unsigned Num) {
- return checkAttributeNumArgsImpl(S, Attr, Num,
+static bool checkAttributeAtMostNumArgs(Sema &S, const ParsedAttr &AL,
+ unsigned Num) {
+ return checkAttributeNumArgsImpl(S, AL, Num,
diag::err_attribute_too_many_arguments,
std::greater<unsigned>());
}
-/// \brief A helper function to provide Attribute Location for the Attr types
-/// AND the AttributeList.
+/// A helper function to provide Attribute Location for the Attr types
+/// AND the ParsedAttr.
template <typename AttrInfo>
-static typename std::enable_if<std::is_base_of<clang::Attr, AttrInfo>::value,
+static typename std::enable_if<std::is_base_of<Attr, AttrInfo>::value,
SourceLocation>::type
-getAttrLoc(const AttrInfo &Attr) {
- return Attr.getLocation();
-}
-static SourceLocation getAttrLoc(const clang::AttributeList &Attr) {
- return Attr.getLoc();
+getAttrLoc(const AttrInfo &AL) {
+ return AL.getLocation();
}
+static SourceLocation getAttrLoc(const ParsedAttr &AL) { return AL.getLoc(); }
-/// \brief A helper function to provide Attribute Name for the Attr types
-/// AND the AttributeList.
+/// A helper function to provide Attribute Name for the Attr types
+/// AND the ParsedAttr.
template <typename AttrInfo>
-static typename std::enable_if<std::is_base_of<clang::Attr, AttrInfo>::value,
+static typename std::enable_if<std::is_base_of<Attr, AttrInfo>::value,
const AttrInfo *>::type
-getAttrName(const AttrInfo &Attr) {
- return &Attr;
+getAttrName(const AttrInfo &AL) {
+ return &AL;
}
-static const IdentifierInfo *getAttrName(const clang::AttributeList &Attr) {
- return Attr.getName();
+static const IdentifierInfo *getAttrName(const ParsedAttr &AL) {
+ return AL.getName();
}
-/// \brief If Expr is a valid integer constant, get the value of the integer
+/// If Expr is a valid integer constant, get the value of the integer
/// expression and return success or failure. May output an error.
-template<typename AttrInfo>
-static bool checkUInt32Argument(Sema &S, const AttrInfo& Attr, const Expr *Expr,
+template <typename AttrInfo>
+static bool checkUInt32Argument(Sema &S, const AttrInfo &AI, const Expr *Expr,
uint32_t &Val, unsigned Idx = UINT_MAX) {
llvm::APSInt I(32);
if (Expr->isTypeDependent() || Expr->isValueDependent() ||
!Expr->isIntegerConstantExpr(I, S.Context)) {
if (Idx != UINT_MAX)
- S.Diag(getAttrLoc(Attr), diag::err_attribute_argument_n_type)
- << getAttrName(Attr) << Idx << AANT_ArgumentIntegerConstant
+ S.Diag(getAttrLoc(AI), diag::err_attribute_argument_n_type)
+ << getAttrName(AI) << Idx << AANT_ArgumentIntegerConstant
<< Expr->getSourceRange();
else
- S.Diag(getAttrLoc(Attr), diag::err_attribute_argument_type)
- << getAttrName(Attr) << AANT_ArgumentIntegerConstant
+ S.Diag(getAttrLoc(AI), diag::err_attribute_argument_type)
+ << getAttrName(AI) << AANT_ArgumentIntegerConstant
<< Expr->getSourceRange();
return false;
}
@@ -271,14 +266,14 @@ static bool checkUInt32Argument(Sema &S, const AttrInfo& Attr, const Expr *Expr,
return true;
}
-/// \brief Wrapper around checkUInt32Argument, with an extra check to be sure
+/// Wrapper around checkUInt32Argument, with an extra check to be sure
/// that the result will fit into a regular (signed) int. All args have the same
/// purpose as they do in checkUInt32Argument.
-template<typename AttrInfo>
-static bool checkPositiveIntArgument(Sema &S, const AttrInfo& Attr, const Expr *Expr,
+template <typename AttrInfo>
+static bool checkPositiveIntArgument(Sema &S, const AttrInfo &AI, const Expr *Expr,
int &Val, unsigned Idx = UINT_MAX) {
uint32_t UVal;
- if (!checkUInt32Argument(S, Attr, Expr, UVal, Idx))
+ if (!checkUInt32Argument(S, AI, Expr, UVal, Idx))
return false;
if (UVal > (uint32_t)std::numeric_limits<int>::max()) {
@@ -293,12 +288,12 @@ static bool checkPositiveIntArgument(Sema &S, const AttrInfo& Attr, const Expr *
return true;
}
-/// \brief Diagnose mutually exclusive attributes when present on a given
+/// Diagnose mutually exclusive attributes when present on a given
/// declaration. Returns true if diagnosed.
template <typename AttrTy>
static bool checkAttrMutualExclusion(Sema &S, Decl *D, SourceRange Range,
IdentifierInfo *Ident) {
- if (AttrTy *A = D->getAttr<AttrTy>()) {
+ if (const auto *A = D->getAttr<AttrTy>()) {
S.Diag(Range.getBegin(), diag::err_attributes_are_not_compatible) << Ident
<< A;
S.Diag(A->getLocation(), diag::note_conflicting_attribute);
@@ -307,14 +302,14 @@ static bool checkAttrMutualExclusion(Sema &S, Decl *D, SourceRange Range,
return false;
}
-/// \brief Check if IdxExpr is a valid parameter index for a function or
+/// Check if IdxExpr is a valid parameter index for a function or
/// instance method D. May output an error.
///
/// \returns true if IdxExpr is a valid index.
template <typename AttrInfo>
static bool checkFunctionOrMethodParameterIndex(
- Sema &S, const Decl *D, const AttrInfo &Attr, unsigned AttrArgNum,
- const Expr *IdxExpr, uint64_t &Idx, bool AllowImplicitThis = false) {
+ Sema &S, const Decl *D, const AttrInfo &AI, unsigned AttrArgNum,
+ const Expr *IdxExpr, ParamIdx &Idx, bool CanIndexImplicitThis = false) {
assert(isFunctionOrMethodOrBlock(D));
// In C++ the implicit 'this' function parameter also counts.
@@ -328,44 +323,43 @@ static bool checkFunctionOrMethodParameterIndex(
llvm::APSInt IdxInt;
if (IdxExpr->isTypeDependent() || IdxExpr->isValueDependent() ||
!IdxExpr->isIntegerConstantExpr(IdxInt, S.Context)) {
- S.Diag(getAttrLoc(Attr), diag::err_attribute_argument_n_type)
- << getAttrName(Attr) << AttrArgNum << AANT_ArgumentIntegerConstant
+ S.Diag(getAttrLoc(AI), diag::err_attribute_argument_n_type)
+ << getAttrName(AI) << AttrArgNum << AANT_ArgumentIntegerConstant
<< IdxExpr->getSourceRange();
return false;
}
- Idx = IdxInt.getLimitedValue();
- if (Idx < 1 || (!IV && Idx > NumParams)) {
- S.Diag(getAttrLoc(Attr), diag::err_attribute_argument_out_of_bounds)
- << getAttrName(Attr) << AttrArgNum << IdxExpr->getSourceRange();
+ unsigned IdxSource = IdxInt.getLimitedValue(UINT_MAX);
+ if (IdxSource < 1 || (!IV && IdxSource > NumParams)) {
+ S.Diag(getAttrLoc(AI), diag::err_attribute_argument_out_of_bounds)
+ << getAttrName(AI) << AttrArgNum << IdxExpr->getSourceRange();
return false;
}
- Idx--; // Convert to zero-based.
- if (HasImplicitThisParam && !AllowImplicitThis) {
- if (Idx == 0) {
- S.Diag(getAttrLoc(Attr),
+ if (HasImplicitThisParam && !CanIndexImplicitThis) {
+ if (IdxSource == 1) {
+ S.Diag(getAttrLoc(AI),
diag::err_attribute_invalid_implicit_this_argument)
- << getAttrName(Attr) << IdxExpr->getSourceRange();
+ << getAttrName(AI) << IdxExpr->getSourceRange();
return false;
}
- --Idx;
}
+ Idx = ParamIdx(IdxSource, D);
return true;
}
-/// \brief Check if the argument \p ArgNum of \p Attr is a ASCII string literal.
+/// Check if the argument \p ArgNum of \p Attr is a ASCII string literal.
/// If not emit an error and return false. If the argument is an identifier it
/// will emit an error with a fixit hint and treat it as if it was a string
/// literal.
-bool Sema::checkStringLiteralArgumentAttr(const AttributeList &Attr,
- unsigned ArgNum, StringRef &Str,
+bool Sema::checkStringLiteralArgumentAttr(const ParsedAttr &AL, unsigned ArgNum,
+ StringRef &Str,
SourceLocation *ArgLocation) {
// Look for identifiers. If we have one emit a hint to fix it to a literal.
- if (Attr.isArgIdent(ArgNum)) {
- IdentifierLoc *Loc = Attr.getArgAsIdent(ArgNum);
+ if (AL.isArgIdent(ArgNum)) {
+ IdentifierLoc *Loc = AL.getArgAsIdent(ArgNum);
Diag(Loc->Loc, diag::err_attribute_argument_type)
- << Attr.getName() << AANT_ArgumentString
+ << AL.getName() << AANT_ArgumentString
<< FixItHint::CreateInsertion(Loc->Loc, "\"")
<< FixItHint::CreateInsertion(getLocForEndOfToken(Loc->Loc), "\"");
Str = Loc->Ident->getName();
@@ -375,14 +369,14 @@ bool Sema::checkStringLiteralArgumentAttr(const AttributeList &Attr,
}
// Now check for an actual string literal.
- Expr *ArgExpr = Attr.getArgAsExpr(ArgNum);
- StringLiteral *Literal = dyn_cast<StringLiteral>(ArgExpr->IgnoreParenCasts());
+ Expr *ArgExpr = AL.getArgAsExpr(ArgNum);
+ const auto *Literal = dyn_cast<StringLiteral>(ArgExpr->IgnoreParenCasts());
if (ArgLocation)
*ArgLocation = ArgExpr->getLocStart();
if (!Literal || !Literal->isAscii()) {
Diag(ArgExpr->getLocStart(), diag::err_attribute_argument_type)
- << Attr.getName() << AANT_ArgumentString;
+ << AL.getName() << AANT_ArgumentString;
return false;
}
@@ -390,35 +384,34 @@ bool Sema::checkStringLiteralArgumentAttr(const AttributeList &Attr,
return true;
}
-/// \brief Applies the given attribute to the Decl without performing any
+/// Applies the given attribute to the Decl without performing any
/// additional semantic checking.
template <typename AttrType>
-static void handleSimpleAttribute(Sema &S, Decl *D,
- const AttributeList &Attr) {
- D->addAttr(::new (S.Context) AttrType(Attr.getRange(), S.Context,
- Attr.getAttributeSpellingListIndex()));
+static void handleSimpleAttribute(Sema &S, Decl *D, const ParsedAttr &AL) {
+ D->addAttr(::new (S.Context) AttrType(AL.getRange(), S.Context,
+ AL.getAttributeSpellingListIndex()));
}
template <typename AttrType>
static void handleSimpleAttributeWithExclusions(Sema &S, Decl *D,
- const AttributeList &Attr) {
- handleSimpleAttribute<AttrType>(S, D, Attr);
+ const ParsedAttr &AL) {
+ handleSimpleAttribute<AttrType>(S, D, AL);
}
-/// \brief Applies the given attribute to the Decl so long as the Decl doesn't
+/// Applies the given attribute to the Decl so long as the Decl doesn't
/// already have one of the given incompatible attributes.
template <typename AttrType, typename IncompatibleAttrType,
typename... IncompatibleAttrTypes>
static void handleSimpleAttributeWithExclusions(Sema &S, Decl *D,
- const AttributeList &Attr) {
- if (checkAttrMutualExclusion<IncompatibleAttrType>(S, D, Attr.getRange(),
- Attr.getName()))
+ const ParsedAttr &AL) {
+ if (checkAttrMutualExclusion<IncompatibleAttrType>(S, D, AL.getRange(),
+ AL.getName()))
return;
handleSimpleAttributeWithExclusions<AttrType, IncompatibleAttrTypes...>(S, D,
- Attr);
+ AL);
}
-/// \brief Check if the passed-in expression is of type int or bool.
+/// Check if the passed-in expression is of type int or bool.
static bool isIntOrBool(Expr *Exp) {
QualType QT = Exp->getType();
return QT->isBooleanType() || QT->isIntegerType();
@@ -441,17 +434,17 @@ static bool threadSafetyCheckIsSmartPointer(Sema &S, const RecordType* RT) {
return true;
}
-/// \brief Check if passed in Decl is a pointer type.
+/// Check if passed in Decl is a pointer type.
/// Note that this function may produce an error message.
/// \return true if the Decl is a pointer type; false otherwise
static bool threadSafetyCheckIsPointer(Sema &S, const Decl *D,
- const AttributeList &Attr) {
- const ValueDecl *vd = cast<ValueDecl>(D);
- QualType QT = vd->getType();
+ const ParsedAttr &AL) {
+ const auto *VD = cast<ValueDecl>(D);
+ QualType QT = VD->getType();
if (QT->isAnyPointerType())
return true;
- if (const RecordType *RT = QT->getAs<RecordType>()) {
+ if (const auto *RT = QT->getAs<RecordType>()) {
// If it's an incomplete type, it could be a smart pointer; skip it.
// (We don't want to force template instantiation if we can avoid it,
// since that would alter the order in which templates are instantiated.)
@@ -462,19 +455,19 @@ static bool threadSafetyCheckIsPointer(Sema &S, const Decl *D,
return true;
}
- S.Diag(Attr.getLoc(), diag::warn_thread_attribute_decl_not_pointer)
- << Attr.getName() << QT;
+ S.Diag(AL.getLoc(), diag::warn_thread_attribute_decl_not_pointer)
+ << AL.getName() << QT;
return false;
}
-/// \brief Checks that the passed in QualType either is of RecordType or points
+/// Checks that the passed in QualType either is of RecordType or points
/// to RecordType. Returns the relevant RecordType, null if it does not exit.
static const RecordType *getRecordType(QualType QT) {
- if (const RecordType *RT = QT->getAs<RecordType>())
+ if (const auto *RT = QT->getAs<RecordType>())
return RT;
// Now check if we point to record type.
- if (const PointerType *PT = QT->getAs<PointerType>())
+ if (const auto *PT = QT->getAs<PointerType>())
return PT->getPointeeType()->getAs<RecordType>();
return nullptr;
@@ -501,7 +494,7 @@ static bool checkRecordTypeForCapability(Sema &S, QualType Ty) {
return true;
// Else check if any base classes have a capability.
- if (CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) {
+ if (const auto *CRD = dyn_cast<CXXRecordDecl>(RD)) {
CXXBasePaths BPaths(false, false);
if (CRD->lookupInBases([](const CXXBaseSpecifier *BS, CXXBasePath &) {
const auto *Type = BS->getType()->getAs<RecordType>();
@@ -559,18 +552,18 @@ static bool isCapabilityExpr(Sema &S, const Expr *Ex) {
return typeHasCapability(S, Ex->getType());
}
-/// \brief Checks that all attribute arguments, starting from Sidx, resolve to
+/// Checks that all attribute arguments, starting from Sidx, resolve to
/// a capability object.
/// \param Sidx The attribute argument index to start checking with.
/// \param ParamIdxOk Whether an argument can be indexing into a function
/// parameter list.
static void checkAttrArgsAreCapabilityObjs(Sema &S, Decl *D,
- const AttributeList &Attr,
+ const ParsedAttr &AL,
SmallVectorImpl<Expr *> &Args,
int Sidx = 0,
bool ParamIdxOk = false) {
- for (unsigned Idx = Sidx; Idx < Attr.getNumArgs(); ++Idx) {
- Expr *ArgExp = Attr.getArgAsExpr(Idx);
+ for (unsigned Idx = Sidx; Idx < AL.getNumArgs(); ++Idx) {
+ Expr *ArgExp = AL.getArgAsExpr(Idx);
if (ArgExp->isTypeDependent()) {
// FIXME -- need to check this again on template instantiation
@@ -578,7 +571,7 @@ static void checkAttrArgsAreCapabilityObjs(Sema &S, Decl *D,
continue;
}
- if (StringLiteral *StrLit = dyn_cast<StringLiteral>(ArgExp)) {
+ if (const auto *StrLit = dyn_cast<StringLiteral>(ArgExp)) {
if (StrLit->getLength() == 0 ||
(StrLit->isAscii() && StrLit->getString() == StringRef("*"))) {
// Pass empty strings to the analyzer without warnings.
@@ -589,8 +582,7 @@ static void checkAttrArgsAreCapabilityObjs(Sema &S, Decl *D,
// We allow constant strings to be used as a placeholder for expressions
// that are not valid C++ syntax, but warn that they are ignored.
- S.Diag(Attr.getLoc(), diag::warn_thread_attribute_ignored) <<
- Attr.getName();
+ S.Diag(AL.getLoc(), diag::warn_thread_attribute_ignored) << AL.getName();
Args.push_back(ArgExp);
continue;
}
@@ -599,9 +591,9 @@ static void checkAttrArgsAreCapabilityObjs(Sema &S, Decl *D,
// A pointer to member expression of the form &MyClass::mu is treated
// specially -- we need to look at the type of the member.
- if (UnaryOperator *UOp = dyn_cast<UnaryOperator>(ArgExp))
+ if (const auto *UOp = dyn_cast<UnaryOperator>(ArgExp))
if (UOp->getOpcode() == UO_AddrOf)
- if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(UOp->getSubExpr()))
+ if (const auto *DRE = dyn_cast<DeclRefExpr>(UOp->getSubExpr()))
if (DRE->getDecl()->isCXXInstanceMember())
ArgTy = DRE->getDecl()->getType();
@@ -610,16 +602,16 @@ static void checkAttrArgsAreCapabilityObjs(Sema &S, Decl *D,
// Now check if we index into a record type function param.
if(!RT && ParamIdxOk) {
- FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
- IntegerLiteral *IL = dyn_cast<IntegerLiteral>(ArgExp);
+ const auto *FD = dyn_cast<FunctionDecl>(D);
+ const auto *IL = dyn_cast<IntegerLiteral>(ArgExp);
if(FD && IL) {
unsigned int NumParams = FD->getNumParams();
llvm::APInt ArgValue = IL->getValue();
uint64_t ParamIdxFromOne = ArgValue.getZExtValue();
uint64_t ParamIdxFromZero = ParamIdxFromOne - 1;
- if(!ArgValue.isStrictlyPositive() || ParamIdxFromOne > NumParams) {
- S.Diag(Attr.getLoc(), diag::err_attribute_argument_out_of_range)
- << Attr.getName() << Idx + 1 << NumParams;
+ if (!ArgValue.isStrictlyPositive() || ParamIdxFromOne > NumParams) {
+ S.Diag(AL.getLoc(), diag::err_attribute_argument_out_of_range)
+ << AL.getName() << Idx + 1 << NumParams;
continue;
}
ArgTy = FD->getParamDecl(ParamIdxFromZero)->getType();
@@ -631,8 +623,8 @@ static void checkAttrArgsAreCapabilityObjs(Sema &S, Decl *D,
// capability may be on the type, and the expression is a capability
// boolean logic expression. Eg) requires_capability(A || B && !C)
if (!typeHasCapability(S, ArgTy) && !isCapabilityExpr(S, ArgExp))
- S.Diag(Attr.getLoc(), diag::warn_thread_attribute_argument_not_lockable)
- << Attr.getName() << ArgTy;
+ S.Diag(AL.getLoc(), diag::warn_thread_attribute_argument_not_lockable)
+ << AL.getName() << ArgTy;
Args.push_back(ArgExp);
}
@@ -642,22 +634,20 @@ static void checkAttrArgsAreCapabilityObjs(Sema &S, Decl *D,
// Attribute Implementations
//===----------------------------------------------------------------------===//
-static void handlePtGuardedVarAttr(Sema &S, Decl *D,
- const AttributeList &Attr) {
- if (!threadSafetyCheckIsPointer(S, D, Attr))
+static void handlePtGuardedVarAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ if (!threadSafetyCheckIsPointer(S, D, AL))
return;
D->addAttr(::new (S.Context)
- PtGuardedVarAttr(Attr.getRange(), S.Context,
- Attr.getAttributeSpellingListIndex()));
+ PtGuardedVarAttr(AL.getRange(), S.Context,
+ AL.getAttributeSpellingListIndex()));
}
-static bool checkGuardedByAttrCommon(Sema &S, Decl *D,
- const AttributeList &Attr,
- Expr* &Arg) {
- SmallVector<Expr*, 1> Args;
+static bool checkGuardedByAttrCommon(Sema &S, Decl *D, const ParsedAttr &AL,
+ Expr *&Arg) {
+ SmallVector<Expr *, 1> Args;
// check that all arguments are lockable objects
- checkAttrArgsAreCapabilityObjs(S, D, Attr, Args);
+ checkAttrArgsAreCapabilityObjs(S, D, AL, Args);
unsigned Size = Args.size();
if (Size != 1)
return false;
@@ -667,273 +657,239 @@ static bool checkGuardedByAttrCommon(Sema &S, Decl *D,
return true;
}
-static void handleGuardedByAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+static void handleGuardedByAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
Expr *Arg = nullptr;
- if (!checkGuardedByAttrCommon(S, D, Attr, Arg))
+ if (!checkGuardedByAttrCommon(S, D, AL, Arg))
return;
- D->addAttr(::new (S.Context) GuardedByAttr(Attr.getRange(), S.Context, Arg,
- Attr.getAttributeSpellingListIndex()));
+ D->addAttr(::new (S.Context) GuardedByAttr(
+ AL.getRange(), S.Context, Arg, AL.getAttributeSpellingListIndex()));
}
-static void handlePtGuardedByAttr(Sema &S, Decl *D,
- const AttributeList &Attr) {
+static void handlePtGuardedByAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
Expr *Arg = nullptr;
- if (!checkGuardedByAttrCommon(S, D, Attr, Arg))
+ if (!checkGuardedByAttrCommon(S, D, AL, Arg))
return;
- if (!threadSafetyCheckIsPointer(S, D, Attr))
+ if (!threadSafetyCheckIsPointer(S, D, AL))
return;
- D->addAttr(::new (S.Context) PtGuardedByAttr(Attr.getRange(),
- S.Context, Arg,
- Attr.getAttributeSpellingListIndex()));
+ D->addAttr(::new (S.Context) PtGuardedByAttr(
+ AL.getRange(), S.Context, Arg, AL.getAttributeSpellingListIndex()));
}
-static bool checkAcquireOrderAttrCommon(Sema &S, Decl *D,
- const AttributeList &Attr,
+static bool checkAcquireOrderAttrCommon(Sema &S, Decl *D, const ParsedAttr &AL,
SmallVectorImpl<Expr *> &Args) {
- if (!checkAttributeAtLeastNumArgs(S, Attr, 1))
+ if (!checkAttributeAtLeastNumArgs(S, AL, 1))
return false;
// Check that this attribute only applies to lockable types.
QualType QT = cast<ValueDecl>(D)->getType();
if (!QT->isDependentType() && !typeHasCapability(S, QT)) {
- S.Diag(Attr.getLoc(), diag::warn_thread_attribute_decl_not_lockable)
- << Attr.getName();
+ S.Diag(AL.getLoc(), diag::warn_thread_attribute_decl_not_lockable)
+ << AL.getName();
return false;
}
// Check that all arguments are lockable objects.
- checkAttrArgsAreCapabilityObjs(S, D, Attr, Args);
+ checkAttrArgsAreCapabilityObjs(S, D, AL, Args);
if (Args.empty())
return false;
return true;
}
-static void handleAcquiredAfterAttr(Sema &S, Decl *D,
- const AttributeList &Attr) {
- SmallVector<Expr*, 1> Args;
- if (!checkAcquireOrderAttrCommon(S, D, Attr, Args))
+static void handleAcquiredAfterAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ SmallVector<Expr *, 1> Args;
+ if (!checkAcquireOrderAttrCommon(S, D, AL, Args))
return;
Expr **StartArg = &Args[0];
- D->addAttr(::new (S.Context)
- AcquiredAfterAttr(Attr.getRange(), S.Context,
- StartArg, Args.size(),
- Attr.getAttributeSpellingListIndex()));
+ D->addAttr(::new (S.Context) AcquiredAfterAttr(
+ AL.getRange(), S.Context, StartArg, Args.size(),
+ AL.getAttributeSpellingListIndex()));
}
-static void handleAcquiredBeforeAttr(Sema &S, Decl *D,
- const AttributeList &Attr) {
- SmallVector<Expr*, 1> Args;
- if (!checkAcquireOrderAttrCommon(S, D, Attr, Args))
+static void handleAcquiredBeforeAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ SmallVector<Expr *, 1> Args;
+ if (!checkAcquireOrderAttrCommon(S, D, AL, Args))
return;
Expr **StartArg = &Args[0];
- D->addAttr(::new (S.Context)
- AcquiredBeforeAttr(Attr.getRange(), S.Context,
- StartArg, Args.size(),
- Attr.getAttributeSpellingListIndex()));
+ D->addAttr(::new (S.Context) AcquiredBeforeAttr(
+ AL.getRange(), S.Context, StartArg, Args.size(),
+ AL.getAttributeSpellingListIndex()));
}
-static bool checkLockFunAttrCommon(Sema &S, Decl *D,
- const AttributeList &Attr,
+static bool checkLockFunAttrCommon(Sema &S, Decl *D, const ParsedAttr &AL,
SmallVectorImpl<Expr *> &Args) {
// zero or more arguments ok
// check that all arguments are lockable objects
- checkAttrArgsAreCapabilityObjs(S, D, Attr, Args, 0, /*ParamIdxOk=*/true);
+ checkAttrArgsAreCapabilityObjs(S, D, AL, Args, 0, /*ParamIdxOk=*/true);
return true;
}
-static void handleAssertSharedLockAttr(Sema &S, Decl *D,
- const AttributeList &Attr) {
- SmallVector<Expr*, 1> Args;
- if (!checkLockFunAttrCommon(S, D, Attr, Args))
+static void handleAssertSharedLockAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ SmallVector<Expr *, 1> Args;
+ if (!checkLockFunAttrCommon(S, D, AL, Args))
return;
unsigned Size = Args.size();
Expr **StartArg = Size == 0 ? nullptr : &Args[0];
D->addAttr(::new (S.Context)
- AssertSharedLockAttr(Attr.getRange(), S.Context, StartArg, Size,
- Attr.getAttributeSpellingListIndex()));
+ AssertSharedLockAttr(AL.getRange(), S.Context, StartArg, Size,
+ AL.getAttributeSpellingListIndex()));
}
static void handleAssertExclusiveLockAttr(Sema &S, Decl *D,
- const AttributeList &Attr) {
- SmallVector<Expr*, 1> Args;
- if (!checkLockFunAttrCommon(S, D, Attr, Args))
+ const ParsedAttr &AL) {
+ SmallVector<Expr *, 1> Args;
+ if (!checkLockFunAttrCommon(S, D, AL, Args))
return;
unsigned Size = Args.size();
Expr **StartArg = Size == 0 ? nullptr : &Args[0];
- D->addAttr(::new (S.Context)
- AssertExclusiveLockAttr(Attr.getRange(), S.Context,
- StartArg, Size,
- Attr.getAttributeSpellingListIndex()));
+ D->addAttr(::new (S.Context) AssertExclusiveLockAttr(
+ AL.getRange(), S.Context, StartArg, Size,
+ AL.getAttributeSpellingListIndex()));
}
-/// \brief Checks to be sure that the given parameter number is in bounds, and is
-/// an integral type. Will emit appropriate diagnostics if this returns
+/// Checks to be sure that the given parameter number is in bounds, and
+/// is an integral type. Will emit appropriate diagnostics if this returns
/// false.
///
-/// FuncParamNo is expected to be from the user, so is base-1. AttrArgNo is used
-/// to actually retrieve the argument, so it's base-0.
+/// AttrArgNo is used to actually retrieve the argument, so it's base-0.
template <typename AttrInfo>
static bool checkParamIsIntegerType(Sema &S, const FunctionDecl *FD,
- const AttrInfo &Attr, Expr *AttrArg,
- unsigned FuncParamNo, unsigned AttrArgNo,
- bool AllowDependentType = false) {
- uint64_t Idx;
- if (!checkFunctionOrMethodParameterIndex(S, FD, Attr, FuncParamNo, AttrArg,
+ const AttrInfo &AI, unsigned AttrArgNo) {
+ assert(AI.isArgExpr(AttrArgNo) && "Expected expression argument");
+ Expr *AttrArg = AI.getArgAsExpr(AttrArgNo);
+ ParamIdx Idx;
+ if (!checkFunctionOrMethodParameterIndex(S, FD, AI, AttrArgNo + 1, AttrArg,
Idx))
return false;
- const ParmVarDecl *Param = FD->getParamDecl(Idx);
- if (AllowDependentType && Param->getType()->isDependentType())
- return true;
+ const ParmVarDecl *Param = FD->getParamDecl(Idx.getASTIndex());
if (!Param->getType()->isIntegerType() && !Param->getType()->isCharType()) {
SourceLocation SrcLoc = AttrArg->getLocStart();
S.Diag(SrcLoc, diag::err_attribute_integers_only)
- << getAttrName(Attr) << Param->getSourceRange();
+ << getAttrName(AI) << Param->getSourceRange();
return false;
}
return true;
}
-/// \brief Checks to be sure that the given parameter number is in bounds, and is
-/// an integral type. Will emit appropriate diagnostics if this returns false.
-///
-/// FuncParamNo is expected to be from the user, so is base-1. AttrArgNo is used
-/// to actually retrieve the argument, so it's base-0.
-static bool checkParamIsIntegerType(Sema &S, const FunctionDecl *FD,
- const AttributeList &Attr,
- unsigned FuncParamNo, unsigned AttrArgNo,
- bool AllowDependentType = false) {
- assert(Attr.isArgExpr(AttrArgNo) && "Expected expression argument");
- return checkParamIsIntegerType(S, FD, Attr, Attr.getArgAsExpr(AttrArgNo),
- FuncParamNo, AttrArgNo, AllowDependentType);
-}
-
-static void handleAllocSizeAttr(Sema &S, Decl *D, const AttributeList &Attr) {
- if (!checkAttributeAtLeastNumArgs(S, Attr, 1) ||
- !checkAttributeAtMostNumArgs(S, Attr, 2))
+static void handleAllocSizeAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ if (!checkAttributeAtLeastNumArgs(S, AL, 1) ||
+ !checkAttributeAtMostNumArgs(S, AL, 2))
return;
const auto *FD = cast<FunctionDecl>(D);
if (!FD->getReturnType()->isPointerType()) {
- S.Diag(Attr.getLoc(), diag::warn_attribute_return_pointers_only)
- << Attr.getName();
+ S.Diag(AL.getLoc(), diag::warn_attribute_return_pointers_only)
+ << AL.getName();
return;
}
- const Expr *SizeExpr = Attr.getArgAsExpr(0);
- int SizeArgNo;
+ const Expr *SizeExpr = AL.getArgAsExpr(0);
+ int SizeArgNoVal;
// Parameter indices are 1-indexed, hence Index=1
- if (!checkPositiveIntArgument(S, Attr, SizeExpr, SizeArgNo, /*Index=*/1))
+ if (!checkPositiveIntArgument(S, AL, SizeExpr, SizeArgNoVal, /*Index=*/1))
return;
-
- if (!checkParamIsIntegerType(S, FD, Attr, SizeArgNo, /*AttrArgNo=*/0))
+ if (!checkParamIsIntegerType(S, FD, AL, /*AttrArgNo=*/0))
return;
+ ParamIdx SizeArgNo(SizeArgNoVal, D);
- // Args are 1-indexed, so 0 implies that the arg was not present
- int NumberArgNo = 0;
- if (Attr.getNumArgs() == 2) {
- const Expr *NumberExpr = Attr.getArgAsExpr(1);
+ ParamIdx NumberArgNo;
+ if (AL.getNumArgs() == 2) {
+ const Expr *NumberExpr = AL.getArgAsExpr(1);
+ int Val;
// Parameter indices are 1-based, hence Index=2
- if (!checkPositiveIntArgument(S, Attr, NumberExpr, NumberArgNo,
- /*Index=*/2))
+ if (!checkPositiveIntArgument(S, AL, NumberExpr, Val, /*Index=*/2))
return;
-
- if (!checkParamIsIntegerType(S, FD, Attr, NumberArgNo, /*AttrArgNo=*/1))
+ if (!checkParamIsIntegerType(S, FD, AL, /*AttrArgNo=*/1))
return;
+ NumberArgNo = ParamIdx(Val, D);
}
- D->addAttr(::new (S.Context) AllocSizeAttr(
- Attr.getRange(), S.Context, SizeArgNo, NumberArgNo,
- Attr.getAttributeSpellingListIndex()));
+ D->addAttr(::new (S.Context)
+ AllocSizeAttr(AL.getRange(), S.Context, SizeArgNo, NumberArgNo,
+ AL.getAttributeSpellingListIndex()));
}
-static bool checkTryLockFunAttrCommon(Sema &S, Decl *D,
- const AttributeList &Attr,
+static bool checkTryLockFunAttrCommon(Sema &S, Decl *D, const ParsedAttr &AL,
SmallVectorImpl<Expr *> &Args) {
- if (!checkAttributeAtLeastNumArgs(S, Attr, 1))
+ if (!checkAttributeAtLeastNumArgs(S, AL, 1))
return false;
- if (!isIntOrBool(Attr.getArgAsExpr(0))) {
- S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_type)
- << Attr.getName() << 1 << AANT_ArgumentIntOrBool;
+ if (!isIntOrBool(AL.getArgAsExpr(0))) {
+ S.Diag(AL.getLoc(), diag::err_attribute_argument_n_type)
+ << AL.getName() << 1 << AANT_ArgumentIntOrBool;
return false;
}
// check that all arguments are lockable objects
- checkAttrArgsAreCapabilityObjs(S, D, Attr, Args, 1);
+ checkAttrArgsAreCapabilityObjs(S, D, AL, Args, 1);
return true;
}
static void handleSharedTrylockFunctionAttr(Sema &S, Decl *D,
- const AttributeList &Attr) {
+ const ParsedAttr &AL) {
SmallVector<Expr*, 2> Args;
- if (!checkTryLockFunAttrCommon(S, D, Attr, Args))
+ if (!checkTryLockFunAttrCommon(S, D, AL, Args))
return;
- D->addAttr(::new (S.Context)
- SharedTrylockFunctionAttr(Attr.getRange(), S.Context,
- Attr.getArgAsExpr(0),
- Args.data(), Args.size(),
- Attr.getAttributeSpellingListIndex()));
+ D->addAttr(::new (S.Context) SharedTrylockFunctionAttr(
+ AL.getRange(), S.Context, AL.getArgAsExpr(0), Args.data(), Args.size(),
+ AL.getAttributeSpellingListIndex()));
}
static void handleExclusiveTrylockFunctionAttr(Sema &S, Decl *D,
- const AttributeList &Attr) {
+ const ParsedAttr &AL) {
SmallVector<Expr*, 2> Args;
- if (!checkTryLockFunAttrCommon(S, D, Attr, Args))
+ if (!checkTryLockFunAttrCommon(S, D, AL, Args))
return;
D->addAttr(::new (S.Context) ExclusiveTrylockFunctionAttr(
- Attr.getRange(), S.Context, Attr.getArgAsExpr(0), Args.data(),
- Args.size(), Attr.getAttributeSpellingListIndex()));
+ AL.getRange(), S.Context, AL.getArgAsExpr(0), Args.data(),
+ Args.size(), AL.getAttributeSpellingListIndex()));
}
-static void handleLockReturnedAttr(Sema &S, Decl *D,
- const AttributeList &Attr) {
+static void handleLockReturnedAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
// check that the argument is lockable object
SmallVector<Expr*, 1> Args;
- checkAttrArgsAreCapabilityObjs(S, D, Attr, Args);
+ checkAttrArgsAreCapabilityObjs(S, D, AL, Args);
unsigned Size = Args.size();
if (Size == 0)
return;
D->addAttr(::new (S.Context)
- LockReturnedAttr(Attr.getRange(), S.Context, Args[0],
- Attr.getAttributeSpellingListIndex()));
+ LockReturnedAttr(AL.getRange(), S.Context, Args[0],
+ AL.getAttributeSpellingListIndex()));
}
-static void handleLocksExcludedAttr(Sema &S, Decl *D,
- const AttributeList &Attr) {
- if (!checkAttributeAtLeastNumArgs(S, Attr, 1))
+static void handleLocksExcludedAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ if (!checkAttributeAtLeastNumArgs(S, AL, 1))
return;
// check that all arguments are lockable objects
SmallVector<Expr*, 1> Args;
- checkAttrArgsAreCapabilityObjs(S, D, Attr, Args);
+ checkAttrArgsAreCapabilityObjs(S, D, AL, Args);
unsigned Size = Args.size();
if (Size == 0)
return;
Expr **StartArg = &Args[0];
D->addAttr(::new (S.Context)
- LocksExcludedAttr(Attr.getRange(), S.Context, StartArg, Size,
- Attr.getAttributeSpellingListIndex()));
+ LocksExcludedAttr(AL.getRange(), S.Context, StartArg, Size,
+ AL.getAttributeSpellingListIndex()));
}
-static bool checkFunctionConditionAttr(Sema &S, Decl *D,
- const AttributeList &Attr,
+static bool checkFunctionConditionAttr(Sema &S, Decl *D, const ParsedAttr &AL,
Expr *&Cond, StringRef &Msg) {
- Cond = Attr.getArgAsExpr(0);
+ Cond = AL.getArgAsExpr(0);
if (!Cond->isTypeDependent()) {
ExprResult Converted = S.PerformContextuallyConvertToBool(Cond);
if (Converted.isInvalid())
@@ -941,7 +897,7 @@ static bool checkFunctionConditionAttr(Sema &S, Decl *D,
Cond = Converted.get();
}
- if (!S.checkStringLiteralArgumentAttr(Attr, 1, Msg))
+ if (!S.checkStringLiteralArgumentAttr(AL, 1, Msg))
return false;
if (Msg.empty())
@@ -951,8 +907,8 @@ static bool checkFunctionConditionAttr(Sema &S, Decl *D,
if (isa<FunctionDecl>(D) && !Cond->isValueDependent() &&
!Expr::isPotentialConstantExprUnevaluated(Cond, cast<FunctionDecl>(D),
Diags)) {
- S.Diag(Attr.getLoc(), diag::err_attr_cond_never_constant_expr)
- << Attr.getName();
+ S.Diag(AL.getLoc(), diag::err_attr_cond_never_constant_expr)
+ << AL.getName();
for (const PartialDiagnosticAt &PDiag : Diags)
S.Diag(PDiag.first, PDiag.second);
return false;
@@ -960,15 +916,15 @@ static bool checkFunctionConditionAttr(Sema &S, Decl *D,
return true;
}
-static void handleEnableIfAttr(Sema &S, Decl *D, const AttributeList &Attr) {
- S.Diag(Attr.getLoc(), diag::ext_clang_enable_if);
+static void handleEnableIfAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ S.Diag(AL.getLoc(), diag::ext_clang_enable_if);
Expr *Cond;
StringRef Msg;
- if (checkFunctionConditionAttr(S, D, Attr, Cond, Msg))
+ if (checkFunctionConditionAttr(S, D, AL, Cond, Msg))
D->addAttr(::new (S.Context)
- EnableIfAttr(Attr.getRange(), S.Context, Cond, Msg,
- Attr.getAttributeSpellingListIndex()));
+ EnableIfAttr(AL.getRange(), S.Context, Cond, Msg,
+ AL.getAttributeSpellingListIndex()));
}
namespace {
@@ -1017,21 +973,21 @@ public:
};
}
-static void handleDiagnoseIfAttr(Sema &S, Decl *D, const AttributeList &Attr) {
- S.Diag(Attr.getLoc(), diag::ext_clang_diagnose_if);
+static void handleDiagnoseIfAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ S.Diag(AL.getLoc(), diag::ext_clang_diagnose_if);
Expr *Cond;
StringRef Msg;
- if (!checkFunctionConditionAttr(S, D, Attr, Cond, Msg))
+ if (!checkFunctionConditionAttr(S, D, AL, Cond, Msg))
return;
StringRef DiagTypeStr;
- if (!S.checkStringLiteralArgumentAttr(Attr, 2, DiagTypeStr))
+ if (!S.checkStringLiteralArgumentAttr(AL, 2, DiagTypeStr))
return;
DiagnoseIfAttr::DiagnosticType DiagType;
if (!DiagnoseIfAttr::ConvertStrToDiagnosticType(DiagTypeStr, DiagType)) {
- S.Diag(Attr.getArgAsExpr(2)->getLocStart(),
+ S.Diag(AL.getArgAsExpr(2)->getLocStart(),
diag::err_diagnose_if_invalid_diagnostic_type);
return;
}
@@ -1040,21 +996,20 @@ static void handleDiagnoseIfAttr(Sema &S, Decl *D, const AttributeList &Attr) {
if (const auto *FD = dyn_cast<FunctionDecl>(D))
ArgDependent = ArgumentDependenceChecker(FD).referencesArgs(Cond);
D->addAttr(::new (S.Context) DiagnoseIfAttr(
- Attr.getRange(), S.Context, Cond, Msg, DiagType, ArgDependent, cast<NamedDecl>(D),
- Attr.getAttributeSpellingListIndex()));
+ AL.getRange(), S.Context, Cond, Msg, DiagType, ArgDependent,
+ cast<NamedDecl>(D), AL.getAttributeSpellingListIndex()));
}
-static void handlePassObjectSizeAttr(Sema &S, Decl *D,
- const AttributeList &Attr) {
+static void handlePassObjectSizeAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
if (D->hasAttr<PassObjectSizeAttr>()) {
S.Diag(D->getLocStart(), diag::err_attribute_only_once_per_parameter)
- << Attr.getName();
+ << AL.getName();
return;
}
- Expr *E = Attr.getArgAsExpr(0);
+ Expr *E = AL.getArgAsExpr(0);
uint32_t Type;
- if (!checkUInt32Argument(S, Attr, E, Type, /*Idx=*/1))
+ if (!checkUInt32Argument(S, AL, E, Type, /*Idx=*/1))
return;
// pass_object_size's argument is passed in as the second argument of
@@ -1062,7 +1017,7 @@ static void handlePassObjectSizeAttr(Sema &S, Decl *D,
// argument; namely, it must be in the range [0, 3].
if (Type > 3) {
S.Diag(E->getLocStart(), diag::err_attribute_argument_outof_range)
- << Attr.getName() << 0 << 3 << E->getSourceRange();
+ << AL.getName() << 0 << 3 << E->getSourceRange();
return;
}
@@ -1072,45 +1027,44 @@ static void handlePassObjectSizeAttr(Sema &S, Decl *D,
// definition, so we defer the constness check until later.
if (!cast<ParmVarDecl>(D)->getType()->isPointerType()) {
S.Diag(D->getLocStart(), diag::err_attribute_pointers_only)
- << Attr.getName() << 1;
+ << AL.getName() << 1;
return;
}
- D->addAttr(::new (S.Context)
- PassObjectSizeAttr(Attr.getRange(), S.Context, (int)Type,
- Attr.getAttributeSpellingListIndex()));
+ D->addAttr(::new (S.Context) PassObjectSizeAttr(
+ AL.getRange(), S.Context, (int)Type, AL.getAttributeSpellingListIndex()));
}
-static void handleConsumableAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+static void handleConsumableAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
ConsumableAttr::ConsumedState DefaultState;
- if (Attr.isArgIdent(0)) {
- IdentifierLoc *IL = Attr.getArgAsIdent(0);
+ if (AL.isArgIdent(0)) {
+ IdentifierLoc *IL = AL.getArgAsIdent(0);
if (!ConsumableAttr::ConvertStrToConsumedState(IL->Ident->getName(),
DefaultState)) {
S.Diag(IL->Loc, diag::warn_attribute_type_not_supported)
- << Attr.getName() << IL->Ident;
+ << AL.getName() << IL->Ident;
return;
}
} else {
- S.Diag(Attr.getLoc(), diag::err_attribute_argument_type)
- << Attr.getName() << AANT_ArgumentIdentifier;
+ S.Diag(AL.getLoc(), diag::err_attribute_argument_type)
+ << AL.getName() << AANT_ArgumentIdentifier;
return;
}
D->addAttr(::new (S.Context)
- ConsumableAttr(Attr.getRange(), S.Context, DefaultState,
- Attr.getAttributeSpellingListIndex()));
+ ConsumableAttr(AL.getRange(), S.Context, DefaultState,
+ AL.getAttributeSpellingListIndex()));
}
static bool checkForConsumableClass(Sema &S, const CXXMethodDecl *MD,
- const AttributeList &Attr) {
+ const ParsedAttr &AL) {
ASTContext &CurrContext = S.getASTContext();
QualType ThisType = MD->getThisType(CurrContext)->getPointeeType();
if (const CXXRecordDecl *RD = ThisType->getAsCXXRecordDecl()) {
if (!RD->hasAttr<ConsumableAttr>()) {
- S.Diag(Attr.getLoc(), diag::warn_attr_on_unconsumable_class) <<
+ S.Diag(AL.getLoc(), diag::warn_attr_on_unconsumable_class) <<
RD->getNameAsString();
return false;
@@ -1120,33 +1074,32 @@ static bool checkForConsumableClass(Sema &S, const CXXMethodDecl *MD,
return true;
}
-static void handleCallableWhenAttr(Sema &S, Decl *D,
- const AttributeList &Attr) {
- if (!checkAttributeAtLeastNumArgs(S, Attr, 1))
+static void handleCallableWhenAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ if (!checkAttributeAtLeastNumArgs(S, AL, 1))
return;
- if (!checkForConsumableClass(S, cast<CXXMethodDecl>(D), Attr))
+ if (!checkForConsumableClass(S, cast<CXXMethodDecl>(D), AL))
return;
SmallVector<CallableWhenAttr::ConsumedState, 3> States;
- for (unsigned ArgIndex = 0; ArgIndex < Attr.getNumArgs(); ++ArgIndex) {
+ for (unsigned ArgIndex = 0; ArgIndex < AL.getNumArgs(); ++ArgIndex) {
CallableWhenAttr::ConsumedState CallableState;
StringRef StateString;
SourceLocation Loc;
- if (Attr.isArgIdent(ArgIndex)) {
- IdentifierLoc *Ident = Attr.getArgAsIdent(ArgIndex);
+ if (AL.isArgIdent(ArgIndex)) {
+ IdentifierLoc *Ident = AL.getArgAsIdent(ArgIndex);
StateString = Ident->Ident->getName();
Loc = Ident->Loc;
} else {
- if (!S.checkStringLiteralArgumentAttr(Attr, ArgIndex, StateString, &Loc))
+ if (!S.checkStringLiteralArgumentAttr(AL, ArgIndex, StateString, &Loc))
return;
}
if (!CallableWhenAttr::ConvertStrToConsumedState(StateString,
CallableState)) {
S.Diag(Loc, diag::warn_attribute_type_not_supported)
- << Attr.getName() << StateString;
+ << AL.getName() << StateString;
return;
}
@@ -1154,27 +1107,26 @@ static void handleCallableWhenAttr(Sema &S, Decl *D,
}
D->addAttr(::new (S.Context)
- CallableWhenAttr(Attr.getRange(), S.Context, States.data(),
- States.size(), Attr.getAttributeSpellingListIndex()));
+ CallableWhenAttr(AL.getRange(), S.Context, States.data(),
+ States.size(), AL.getAttributeSpellingListIndex()));
}
-static void handleParamTypestateAttr(Sema &S, Decl *D,
- const AttributeList &Attr) {
+static void handleParamTypestateAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
ParamTypestateAttr::ConsumedState ParamState;
- if (Attr.isArgIdent(0)) {
- IdentifierLoc *Ident = Attr.getArgAsIdent(0);
+ if (AL.isArgIdent(0)) {
+ IdentifierLoc *Ident = AL.getArgAsIdent(0);
StringRef StateString = Ident->Ident->getName();
if (!ParamTypestateAttr::ConvertStrToConsumedState(StateString,
ParamState)) {
S.Diag(Ident->Loc, diag::warn_attribute_type_not_supported)
- << Attr.getName() << StateString;
+ << AL.getName() << StateString;
return;
}
} else {
- S.Diag(Attr.getLoc(), diag::err_attribute_argument_type) <<
- Attr.getName() << AANT_ArgumentIdentifier;
+ S.Diag(AL.getLoc(), diag::err_attribute_argument_type) <<
+ AL.getName() << AANT_ArgumentIdentifier;
return;
}
@@ -1185,31 +1137,30 @@ static void handleParamTypestateAttr(Sema &S, Decl *D,
//const CXXRecordDecl *RD = ReturnType->getAsCXXRecordDecl();
//
//if (!RD || !RD->hasAttr<ConsumableAttr>()) {
- // S.Diag(Attr.getLoc(), diag::warn_return_state_for_unconsumable_type) <<
+ // S.Diag(AL.getLoc(), diag::warn_return_state_for_unconsumable_type) <<
// ReturnType.getAsString();
// return;
//}
D->addAttr(::new (S.Context)
- ParamTypestateAttr(Attr.getRange(), S.Context, ParamState,
- Attr.getAttributeSpellingListIndex()));
+ ParamTypestateAttr(AL.getRange(), S.Context, ParamState,
+ AL.getAttributeSpellingListIndex()));
}
-static void handleReturnTypestateAttr(Sema &S, Decl *D,
- const AttributeList &Attr) {
+static void handleReturnTypestateAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
ReturnTypestateAttr::ConsumedState ReturnState;
- if (Attr.isArgIdent(0)) {
- IdentifierLoc *IL = Attr.getArgAsIdent(0);
+ if (AL.isArgIdent(0)) {
+ IdentifierLoc *IL = AL.getArgAsIdent(0);
if (!ReturnTypestateAttr::ConvertStrToConsumedState(IL->Ident->getName(),
ReturnState)) {
S.Diag(IL->Loc, diag::warn_attribute_type_not_supported)
- << Attr.getName() << IL->Ident;
+ << AL.getName() << IL->Ident;
return;
}
} else {
- S.Diag(Attr.getLoc(), diag::err_attribute_argument_type) <<
- Attr.getName() << AANT_ArgumentIdentifier;
+ S.Diag(AL.getLoc(), diag::err_attribute_argument_type) <<
+ AL.getName() << AANT_ArgumentIdentifier;
return;
}
@@ -1237,72 +1188,70 @@ static void handleReturnTypestateAttr(Sema &S, Decl *D,
// ReturnType.getAsString();
// return;
//}
-
+
D->addAttr(::new (S.Context)
- ReturnTypestateAttr(Attr.getRange(), S.Context, ReturnState,
- Attr.getAttributeSpellingListIndex()));
+ ReturnTypestateAttr(AL.getRange(), S.Context, ReturnState,
+ AL.getAttributeSpellingListIndex()));
}
-static void handleSetTypestateAttr(Sema &S, Decl *D, const AttributeList &Attr) {
- if (!checkForConsumableClass(S, cast<CXXMethodDecl>(D), Attr))
+static void handleSetTypestateAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ if (!checkForConsumableClass(S, cast<CXXMethodDecl>(D), AL))
return;
SetTypestateAttr::ConsumedState NewState;
- if (Attr.isArgIdent(0)) {
- IdentifierLoc *Ident = Attr.getArgAsIdent(0);
+ if (AL.isArgIdent(0)) {
+ IdentifierLoc *Ident = AL.getArgAsIdent(0);
StringRef Param = Ident->Ident->getName();
if (!SetTypestateAttr::ConvertStrToConsumedState(Param, NewState)) {
S.Diag(Ident->Loc, diag::warn_attribute_type_not_supported)
- << Attr.getName() << Param;
+ << AL.getName() << Param;
return;
}
} else {
- S.Diag(Attr.getLoc(), diag::err_attribute_argument_type) <<
- Attr.getName() << AANT_ArgumentIdentifier;
+ S.Diag(AL.getLoc(), diag::err_attribute_argument_type) <<
+ AL.getName() << AANT_ArgumentIdentifier;
return;
}
D->addAttr(::new (S.Context)
- SetTypestateAttr(Attr.getRange(), S.Context, NewState,
- Attr.getAttributeSpellingListIndex()));
+ SetTypestateAttr(AL.getRange(), S.Context, NewState,
+ AL.getAttributeSpellingListIndex()));
}
-static void handleTestTypestateAttr(Sema &S, Decl *D,
- const AttributeList &Attr) {
- if (!checkForConsumableClass(S, cast<CXXMethodDecl>(D), Attr))
+static void handleTestTypestateAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ if (!checkForConsumableClass(S, cast<CXXMethodDecl>(D), AL))
return;
TestTypestateAttr::ConsumedState TestState;
- if (Attr.isArgIdent(0)) {
- IdentifierLoc *Ident = Attr.getArgAsIdent(0);
+ if (AL.isArgIdent(0)) {
+ IdentifierLoc *Ident = AL.getArgAsIdent(0);
StringRef Param = Ident->Ident->getName();
if (!TestTypestateAttr::ConvertStrToConsumedState(Param, TestState)) {
S.Diag(Ident->Loc, diag::warn_attribute_type_not_supported)
- << Attr.getName() << Param;
+ << AL.getName() << Param;
return;
}
} else {
- S.Diag(Attr.getLoc(), diag::err_attribute_argument_type) <<
- Attr.getName() << AANT_ArgumentIdentifier;
+ S.Diag(AL.getLoc(), diag::err_attribute_argument_type) <<
+ AL.getName() << AANT_ArgumentIdentifier;
return;
}
D->addAttr(::new (S.Context)
- TestTypestateAttr(Attr.getRange(), S.Context, TestState,
- Attr.getAttributeSpellingListIndex()));
+ TestTypestateAttr(AL.getRange(), S.Context, TestState,
+ AL.getAttributeSpellingListIndex()));
}
-static void handleExtVectorTypeAttr(Sema &S, Scope *scope, Decl *D,
- const AttributeList &Attr) {
+static void handleExtVectorTypeAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
// Remember this typedef decl, we will need it later for diagnostics.
S.ExtVectorDecls.push_back(cast<TypedefNameDecl>(D));
}
-static void handlePackedAttr(Sema &S, Decl *D, const AttributeList &Attr) {
- if (TagDecl *TD = dyn_cast<TagDecl>(D))
- TD->addAttr(::new (S.Context) PackedAttr(Attr.getRange(), S.Context,
- Attr.getAttributeSpellingListIndex()));
- else if (FieldDecl *FD = dyn_cast<FieldDecl>(D)) {
+static void handlePackedAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ if (auto *TD = dyn_cast<TagDecl>(D))
+ TD->addAttr(::new (S.Context) PackedAttr(AL.getRange(), S.Context,
+ AL.getAttributeSpellingListIndex()));
+ else if (auto *FD = dyn_cast<FieldDecl>(D)) {
bool BitfieldByteAligned = (!FD->getType()->isDependentType() &&
!FD->getType()->isIncompleteType() &&
FD->isBitField() &&
@@ -1311,81 +1260,80 @@ static void handlePackedAttr(Sema &S, Decl *D, const AttributeList &Attr) {
if (S.getASTContext().getTargetInfo().getTriple().isPS4()) {
if (BitfieldByteAligned)
// The PS4 target needs to maintain ABI backwards compatibility.
- S.Diag(Attr.getLoc(), diag::warn_attribute_ignored_for_field_of_type)
- << Attr.getName() << FD->getType();
+ S.Diag(AL.getLoc(), diag::warn_attribute_ignored_for_field_of_type)
+ << AL.getName() << FD->getType();
else
FD->addAttr(::new (S.Context) PackedAttr(
- Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex()));
+ AL.getRange(), S.Context, AL.getAttributeSpellingListIndex()));
} else {
// Report warning about changed offset in the newer compiler versions.
if (BitfieldByteAligned)
- S.Diag(Attr.getLoc(), diag::warn_attribute_packed_for_bitfield);
+ S.Diag(AL.getLoc(), diag::warn_attribute_packed_for_bitfield);
FD->addAttr(::new (S.Context) PackedAttr(
- Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex()));
+ AL.getRange(), S.Context, AL.getAttributeSpellingListIndex()));
}
} else
- S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << Attr.getName();
+ S.Diag(AL.getLoc(), diag::warn_attribute_ignored) << AL.getName();
}
-static bool checkIBOutletCommon(Sema &S, Decl *D, const AttributeList &Attr) {
+static bool checkIBOutletCommon(Sema &S, Decl *D, const ParsedAttr &AL) {
// The IBOutlet/IBOutletCollection attributes only apply to instance
// variables or properties of Objective-C classes. The outlet must also
// have an object reference type.
- if (const ObjCIvarDecl *VD = dyn_cast<ObjCIvarDecl>(D)) {
+ if (const auto *VD = dyn_cast<ObjCIvarDecl>(D)) {
if (!VD->getType()->getAs<ObjCObjectPointerType>()) {
- S.Diag(Attr.getLoc(), diag::warn_iboutlet_object_type)
- << Attr.getName() << VD->getType() << 0;
+ S.Diag(AL.getLoc(), diag::warn_iboutlet_object_type)
+ << AL.getName() << VD->getType() << 0;
return false;
}
}
- else if (const ObjCPropertyDecl *PD = dyn_cast<ObjCPropertyDecl>(D)) {
+ else if (const auto *PD = dyn_cast<ObjCPropertyDecl>(D)) {
if (!PD->getType()->getAs<ObjCObjectPointerType>()) {
- S.Diag(Attr.getLoc(), diag::warn_iboutlet_object_type)
- << Attr.getName() << PD->getType() << 1;
+ S.Diag(AL.getLoc(), diag::warn_iboutlet_object_type)
+ << AL.getName() << PD->getType() << 1;
return false;
}
}
else {
- S.Diag(Attr.getLoc(), diag::warn_attribute_iboutlet) << Attr.getName();
+ S.Diag(AL.getLoc(), diag::warn_attribute_iboutlet) << AL.getName();
return false;
}
return true;
}
-static void handleIBOutlet(Sema &S, Decl *D, const AttributeList &Attr) {
- if (!checkIBOutletCommon(S, D, Attr))
+static void handleIBOutlet(Sema &S, Decl *D, const ParsedAttr &AL) {
+ if (!checkIBOutletCommon(S, D, AL))
return;
D->addAttr(::new (S.Context)
- IBOutletAttr(Attr.getRange(), S.Context,
- Attr.getAttributeSpellingListIndex()));
+ IBOutletAttr(AL.getRange(), S.Context,
+ AL.getAttributeSpellingListIndex()));
}
-static void handleIBOutletCollection(Sema &S, Decl *D,
- const AttributeList &Attr) {
+static void handleIBOutletCollection(Sema &S, Decl *D, const ParsedAttr &AL) {
// The iboutletcollection attribute can have zero or one arguments.
- if (Attr.getNumArgs() > 1) {
- S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments)
- << Attr.getName() << 1;
+ if (AL.getNumArgs() > 1) {
+ S.Diag(AL.getLoc(), diag::err_attribute_wrong_number_arguments)
+ << AL.getName() << 1;
return;
}
- if (!checkIBOutletCommon(S, D, Attr))
+ if (!checkIBOutletCommon(S, D, AL))
return;
ParsedType PT;
- if (Attr.hasParsedType())
- PT = Attr.getTypeArg();
+ if (AL.hasParsedType())
+ PT = AL.getTypeArg();
else {
- PT = S.getTypeName(S.Context.Idents.get("NSObject"), Attr.getLoc(),
+ PT = S.getTypeName(S.Context.Idents.get("NSObject"), AL.getLoc(),
S.getScopeForContext(D->getDeclContext()->getParent()));
if (!PT) {
- S.Diag(Attr.getLoc(), diag::err_iboutletcollection_type) << "NSObject";
+ S.Diag(AL.getLoc(), diag::err_iboutletcollection_type) << "NSObject";
return;
}
}
@@ -1393,22 +1341,22 @@ static void handleIBOutletCollection(Sema &S, Decl *D,
TypeSourceInfo *QTLoc = nullptr;
QualType QT = S.GetTypeFromParser(PT, &QTLoc);
if (!QTLoc)
- QTLoc = S.Context.getTrivialTypeSourceInfo(QT, Attr.getLoc());
+ QTLoc = S.Context.getTrivialTypeSourceInfo(QT, AL.getLoc());
// Diagnose use of non-object type in iboutletcollection attribute.
// FIXME. Gnu attribute extension ignores use of builtin types in
// attributes. So, __attribute__((iboutletcollection(char))) will be
// treated as __attribute__((iboutletcollection())).
if (!QT->isObjCIdType() && !QT->isObjCObjectType()) {
- S.Diag(Attr.getLoc(),
+ S.Diag(AL.getLoc(),
QT->isBuiltinType() ? diag::err_iboutletcollection_builtintype
: diag::err_iboutletcollection_type) << QT;
return;
}
D->addAttr(::new (S.Context)
- IBOutletCollectionAttr(Attr.getRange(), S.Context, QTLoc,
- Attr.getAttributeSpellingListIndex()));
+ IBOutletCollectionAttr(AL.getRange(), S.Context, QTLoc,
+ AL.getAttributeSpellingListIndex()));
}
bool Sema::isValidPointerAttrType(QualType T, bool RefOkay) {
@@ -1435,35 +1383,36 @@ bool Sema::isValidPointerAttrType(QualType T, bool RefOkay) {
return T->isAnyPointerType() || T->isBlockPointerType();
}
-static bool attrNonNullArgCheck(Sema &S, QualType T, const AttributeList &Attr,
+static bool attrNonNullArgCheck(Sema &S, QualType T, const ParsedAttr &AL,
SourceRange AttrParmRange,
SourceRange TypeRange,
bool isReturnValue = false) {
if (!S.isValidPointerAttrType(T)) {
if (isReturnValue)
- S.Diag(Attr.getLoc(), diag::warn_attribute_return_pointers_only)
- << Attr.getName() << AttrParmRange << TypeRange;
+ S.Diag(AL.getLoc(), diag::warn_attribute_return_pointers_only)
+ << AL.getName() << AttrParmRange << TypeRange;
else
- S.Diag(Attr.getLoc(), diag::warn_attribute_pointers_only)
- << Attr.getName() << AttrParmRange << TypeRange << 0;
+ S.Diag(AL.getLoc(), diag::warn_attribute_pointers_only)
+ << AL.getName() << AttrParmRange << TypeRange << 0;
return false;
}
return true;
}
-static void handleNonNullAttr(Sema &S, Decl *D, const AttributeList &Attr) {
- SmallVector<unsigned, 8> NonNullArgs;
- for (unsigned I = 0; I < Attr.getNumArgs(); ++I) {
- Expr *Ex = Attr.getArgAsExpr(I);
- uint64_t Idx;
- if (!checkFunctionOrMethodParameterIndex(S, D, Attr, I + 1, Ex, Idx))
+static void handleNonNullAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ SmallVector<ParamIdx, 8> NonNullArgs;
+ for (unsigned I = 0; I < AL.getNumArgs(); ++I) {
+ Expr *Ex = AL.getArgAsExpr(I);
+ ParamIdx Idx;
+ if (!checkFunctionOrMethodParameterIndex(S, D, AL, I + 1, Ex, Idx))
return;
// Is the function argument a pointer type?
- if (Idx < getFunctionOrMethodNumParams(D) &&
- !attrNonNullArgCheck(S, getFunctionOrMethodParamType(D, Idx), Attr,
- Ex->getSourceRange(),
- getFunctionOrMethodParamRange(D, Idx)))
+ if (Idx.getASTIndex() < getFunctionOrMethodNumParams(D) &&
+ !attrNonNullArgCheck(
+ S, getFunctionOrMethodParamType(D, Idx.getASTIndex()), AL,
+ Ex->getSourceRange(),
+ getFunctionOrMethodParamRange(D, Idx.getASTIndex())))
continue;
NonNullArgs.push_back(Idx);
@@ -1473,7 +1422,7 @@ static void handleNonNullAttr(Sema &S, Decl *D, const AttributeList &Attr) {
// arguments have a nonnull attribute; warn if there aren't any. Skip this
// check if the attribute came from a macro expansion or a template
// instantiation.
- if (NonNullArgs.empty() && Attr.getLoc().isFileID() &&
+ if (NonNullArgs.empty() && AL.getLoc().isFileID() &&
!S.inTemplateInstantiation()) {
bool AnyPointers = isFunctionOrMethodVariadic(D);
for (unsigned I = 0, E = getFunctionOrMethodNumParams(D);
@@ -1484,80 +1433,77 @@ static void handleNonNullAttr(Sema &S, Decl *D, const AttributeList &Attr) {
}
if (!AnyPointers)
- S.Diag(Attr.getLoc(), diag::warn_attribute_nonnull_no_pointers);
+ S.Diag(AL.getLoc(), diag::warn_attribute_nonnull_no_pointers);
}
- unsigned *Start = NonNullArgs.data();
+ ParamIdx *Start = NonNullArgs.data();
unsigned Size = NonNullArgs.size();
llvm::array_pod_sort(Start, Start + Size);
D->addAttr(::new (S.Context)
- NonNullAttr(Attr.getRange(), S.Context, Start, Size,
- Attr.getAttributeSpellingListIndex()));
+ NonNullAttr(AL.getRange(), S.Context, Start, Size,
+ AL.getAttributeSpellingListIndex()));
}
static void handleNonNullAttrParameter(Sema &S, ParmVarDecl *D,
- const AttributeList &Attr) {
- if (Attr.getNumArgs() > 0) {
+ const ParsedAttr &AL) {
+ if (AL.getNumArgs() > 0) {
if (D->getFunctionType()) {
- handleNonNullAttr(S, D, Attr);
+ handleNonNullAttr(S, D, AL);
} else {
- S.Diag(Attr.getLoc(), diag::warn_attribute_nonnull_parm_no_args)
+ S.Diag(AL.getLoc(), diag::warn_attribute_nonnull_parm_no_args)
<< D->getSourceRange();
}
return;
}
// Is the argument a pointer type?
- if (!attrNonNullArgCheck(S, D->getType(), Attr, SourceRange(),
+ if (!attrNonNullArgCheck(S, D->getType(), AL, SourceRange(),
D->getSourceRange()))
return;
D->addAttr(::new (S.Context)
- NonNullAttr(Attr.getRange(), S.Context, nullptr, 0,
- Attr.getAttributeSpellingListIndex()));
+ NonNullAttr(AL.getRange(), S.Context, nullptr, 0,
+ AL.getAttributeSpellingListIndex()));
}
-static void handleReturnsNonNullAttr(Sema &S, Decl *D,
- const AttributeList &Attr) {
+static void handleReturnsNonNullAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
QualType ResultType = getFunctionOrMethodResultType(D);
SourceRange SR = getFunctionOrMethodResultSourceRange(D);
- if (!attrNonNullArgCheck(S, ResultType, Attr, SourceRange(), SR,
+ if (!attrNonNullArgCheck(S, ResultType, AL, SourceRange(), SR,
/* isReturnValue */ true))
return;
D->addAttr(::new (S.Context)
- ReturnsNonNullAttr(Attr.getRange(), S.Context,
- Attr.getAttributeSpellingListIndex()));
+ ReturnsNonNullAttr(AL.getRange(), S.Context,
+ AL.getAttributeSpellingListIndex()));
}
-static void handleNoEscapeAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+static void handleNoEscapeAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
if (D->isInvalidDecl())
return;
// noescape only applies to pointer types.
QualType T = cast<ParmVarDecl>(D)->getType();
if (!S.isValidPointerAttrType(T, /* RefOkay */ true)) {
- S.Diag(Attr.getLoc(), diag::warn_attribute_pointers_only)
- << Attr.getName() << Attr.getRange() << 0;
+ S.Diag(AL.getLoc(), diag::warn_attribute_pointers_only)
+ << AL.getName() << AL.getRange() << 0;
return;
}
D->addAttr(::new (S.Context) NoEscapeAttr(
- Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex()));
+ AL.getRange(), S.Context, AL.getAttributeSpellingListIndex()));
}
-static void handleAssumeAlignedAttr(Sema &S, Decl *D,
- const AttributeList &Attr) {
- Expr *E = Attr.getArgAsExpr(0),
- *OE = Attr.getNumArgs() > 1 ? Attr.getArgAsExpr(1) : nullptr;
- S.AddAssumeAlignedAttr(Attr.getRange(), D, E, OE,
- Attr.getAttributeSpellingListIndex());
+static void handleAssumeAlignedAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ Expr *E = AL.getArgAsExpr(0),
+ *OE = AL.getNumArgs() > 1 ? AL.getArgAsExpr(1) : nullptr;
+ S.AddAssumeAlignedAttr(AL.getRange(), D, E, OE,
+ AL.getAttributeSpellingListIndex());
}
-static void handleAllocAlignAttr(Sema &S, Decl *D,
- const AttributeList &Attr) {
- S.AddAllocAlignAttr(Attr.getRange(), D, Attr.getArgAsExpr(0),
- Attr.getAttributeSpellingListIndex());
+static void handleAllocAlignAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ S.AddAllocAlignAttr(AL.getRange(), D, AL.getArgAsExpr(0),
+ AL.getAttributeSpellingListIndex());
}
void Sema::AddAssumeAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E,
@@ -1615,7 +1561,7 @@ void Sema::AddAllocAlignAttr(SourceRange AttrRange, Decl *D, Expr *ParamExpr,
unsigned SpellingListIndex) {
QualType ResultType = getFunctionOrMethodResultType(D);
- AllocAlignAttr TmpAttr(AttrRange, Context, 0, SpellingListIndex);
+ AllocAlignAttr TmpAttr(AttrRange, Context, ParamIdx(), SpellingListIndex);
SourceLocation AttrLoc = AttrRange.getBegin();
if (!ResultType->isDependentType() &&
@@ -1625,28 +1571,22 @@ void Sema::AddAllocAlignAttr(SourceRange AttrRange, Decl *D, Expr *ParamExpr,
return;
}
- uint64_t IndexVal;
+ ParamIdx Idx;
const auto *FuncDecl = cast<FunctionDecl>(D);
if (!checkFunctionOrMethodParameterIndex(*this, FuncDecl, TmpAttr,
- /*AttrArgNo=*/1, ParamExpr,
- IndexVal))
+ /*AttrArgNo=*/1, ParamExpr, Idx))
return;
- QualType Ty = getFunctionOrMethodParamType(D, IndexVal);
+ QualType Ty = getFunctionOrMethodParamType(D, Idx.getASTIndex());
if (!Ty->isDependentType() && !Ty->isIntegralType(Context)) {
Diag(ParamExpr->getLocStart(), diag::err_attribute_integers_only)
- << &TmpAttr << FuncDecl->getParamDecl(IndexVal)->getSourceRange();
+ << &TmpAttr
+ << FuncDecl->getParamDecl(Idx.getASTIndex())->getSourceRange();
return;
}
- // We cannot use the Idx returned from checkFunctionOrMethodParameterIndex
- // because that has corrected for the implicit this parameter, and is zero-
- // based. The attribute expects what the user wrote explicitly.
- llvm::APSInt Val;
- ParamExpr->EvaluateAsInt(Val, Context);
-
- D->addAttr(::new (Context) AllocAlignAttr(
- AttrRange, Context, Val.getZExtValue(), SpellingListIndex));
+ D->addAttr(::new (Context)
+ AllocAlignAttr(AttrRange, Context, Idx, SpellingListIndex));
}
/// Normalize the attribute, __foo__ becomes foo.
@@ -1660,7 +1600,7 @@ static bool normalizeName(StringRef &AttrName) {
return false;
}
-static void handleOwnershipAttr(Sema &S, Decl *D, const AttributeList &AL) {
+static void handleOwnershipAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
// This attribute must be applied to a function declaration. The first
// argument to the attribute must be an identifier, the name of the resource,
// for example: malloc. The following arguments must be argument indexes, the
@@ -1706,15 +1646,15 @@ static void handleOwnershipAttr(Sema &S, Decl *D, const AttributeList &AL) {
Module = &S.PP.getIdentifierTable().get(ModuleName);
}
- SmallVector<unsigned, 8> OwnershipArgs;
+ SmallVector<ParamIdx, 8> OwnershipArgs;
for (unsigned i = 1; i < AL.getNumArgs(); ++i) {
Expr *Ex = AL.getArgAsExpr(i);
- uint64_t Idx;
+ ParamIdx Idx;
if (!checkFunctionOrMethodParameterIndex(S, D, AL, i, Ex, Idx))
return;
// Is the function argument a pointer type?
- QualType T = getFunctionOrMethodParamType(D, Idx);
+ QualType T = getFunctionOrMethodParamType(D, Idx.getASTIndex());
int Err = -1; // No error
switch (K) {
case OwnershipAttr::Takes:
@@ -1745,14 +1685,13 @@ static void handleOwnershipAttr(Sema &S, Decl *D, const AttributeList &AL) {
} else if (K == OwnershipAttr::Returns &&
I->getOwnKind() == OwnershipAttr::Returns) {
// A returns attribute conflicts with any other returns attribute using
- // a different index. Note, diagnostic reporting is 1-based, but stored
- // argument indexes are 0-based.
+ // a different index.
if (std::find(I->args_begin(), I->args_end(), Idx) == I->args_end()) {
S.Diag(I->getLocation(), diag::err_ownership_returns_index_mismatch)
- << *(I->args_begin()) + 1;
+ << I->args_begin()->getSourceIndex();
if (I->args_size())
S.Diag(AL.getLoc(), diag::note_ownership_returns_index_mismatch)
- << (unsigned)Idx + 1 << Ex->getSourceRange();
+ << Idx.getSourceIndex() << Ex->getSourceRange();
return;
}
}
@@ -1760,25 +1699,22 @@ static void handleOwnershipAttr(Sema &S, Decl *D, const AttributeList &AL) {
OwnershipArgs.push_back(Idx);
}
- unsigned* start = OwnershipArgs.data();
- unsigned size = OwnershipArgs.size();
- llvm::array_pod_sort(start, start + size);
-
+ ParamIdx *Start = OwnershipArgs.data();
+ unsigned Size = OwnershipArgs.size();
+ llvm::array_pod_sort(Start, Start + Size);
D->addAttr(::new (S.Context)
- OwnershipAttr(AL.getLoc(), S.Context, Module, start, size,
- AL.getAttributeSpellingListIndex()));
+ OwnershipAttr(AL.getLoc(), S.Context, Module, Start, Size,
+ AL.getAttributeSpellingListIndex()));
}
-static void handleWeakRefAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+static void handleWeakRefAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
// Check the attribute arguments.
- if (Attr.getNumArgs() > 1) {
- S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments)
- << Attr.getName() << 1;
+ if (AL.getNumArgs() > 1) {
+ S.Diag(AL.getLoc(), diag::err_attribute_wrong_number_arguments)
+ << AL.getName() << 1;
return;
}
- NamedDecl *nd = cast<NamedDecl>(D);
-
// gcc rejects
// class c {
// static int a __attribute__((weakref ("v2")));
@@ -1791,8 +1727,8 @@ static void handleWeakRefAttr(Sema &S, Decl *D, const AttributeList &Attr) {
// we reject them
const DeclContext *Ctx = D->getDeclContext()->getRedeclContext();
if (!Ctx->isFileContext()) {
- S.Diag(Attr.getLoc(), diag::err_attribute_weakref_not_global_context)
- << nd;
+ S.Diag(AL.getLoc(), diag::err_attribute_weakref_not_global_context)
+ << cast<NamedDecl>(D);
return;
}
@@ -1822,88 +1758,71 @@ static void handleWeakRefAttr(Sema &S, Decl *D, const AttributeList &Attr) {
// of transforming it into an AliasAttr. The WeakRefAttr never uses the
// StringRef parameter it was given anyway.
StringRef Str;
- if (Attr.getNumArgs() && S.checkStringLiteralArgumentAttr(Attr, 0, Str))
+ if (AL.getNumArgs() && S.checkStringLiteralArgumentAttr(AL, 0, Str))
// GCC will accept anything as the argument of weakref. Should we
// check for an existing decl?
- D->addAttr(::new (S.Context) AliasAttr(Attr.getRange(), S.Context, Str,
- Attr.getAttributeSpellingListIndex()));
+ D->addAttr(::new (S.Context) AliasAttr(AL.getRange(), S.Context, Str,
+ AL.getAttributeSpellingListIndex()));
D->addAttr(::new (S.Context)
- WeakRefAttr(Attr.getRange(), S.Context,
- Attr.getAttributeSpellingListIndex()));
+ WeakRefAttr(AL.getRange(), S.Context,
+ AL.getAttributeSpellingListIndex()));
}
-static void handleIFuncAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+static void handleIFuncAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
StringRef Str;
- if (!S.checkStringLiteralArgumentAttr(Attr, 0, Str))
+ if (!S.checkStringLiteralArgumentAttr(AL, 0, Str))
return;
// Aliases should be on declarations, not definitions.
const auto *FD = cast<FunctionDecl>(D);
if (FD->isThisDeclarationADefinition()) {
- S.Diag(Attr.getLoc(), diag::err_alias_is_definition) << FD << 1;
+ S.Diag(AL.getLoc(), diag::err_alias_is_definition) << FD << 1;
return;
}
- D->addAttr(::new (S.Context) IFuncAttr(Attr.getRange(), S.Context, Str,
- Attr.getAttributeSpellingListIndex()));
+ D->addAttr(::new (S.Context) IFuncAttr(AL.getRange(), S.Context, Str,
+ AL.getAttributeSpellingListIndex()));
}
-static void handleAliasAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+static void handleAliasAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
StringRef Str;
- if (!S.checkStringLiteralArgumentAttr(Attr, 0, Str))
+ if (!S.checkStringLiteralArgumentAttr(AL, 0, Str))
return;
if (S.Context.getTargetInfo().getTriple().isOSDarwin()) {
- S.Diag(Attr.getLoc(), diag::err_alias_not_supported_on_darwin);
+ S.Diag(AL.getLoc(), diag::err_alias_not_supported_on_darwin);
return;
}
if (S.Context.getTargetInfo().getTriple().isNVPTX()) {
- S.Diag(Attr.getLoc(), diag::err_alias_not_supported_on_nvptx);
+ S.Diag(AL.getLoc(), diag::err_alias_not_supported_on_nvptx);
}
// Aliases should be on declarations, not definitions.
if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
if (FD->isThisDeclarationADefinition()) {
- S.Diag(Attr.getLoc(), diag::err_alias_is_definition) << FD << 0;
+ S.Diag(AL.getLoc(), diag::err_alias_is_definition) << FD << 0;
return;
}
} else {
const auto *VD = cast<VarDecl>(D);
if (VD->isThisDeclarationADefinition() && VD->isExternallyVisible()) {
- S.Diag(Attr.getLoc(), diag::err_alias_is_definition) << VD << 0;
+ S.Diag(AL.getLoc(), diag::err_alias_is_definition) << VD << 0;
return;
}
}
// FIXME: check if target symbol exists in current file
- D->addAttr(::new (S.Context) AliasAttr(Attr.getRange(), S.Context, Str,
- Attr.getAttributeSpellingListIndex()));
-}
-
-static void handleColdAttr(Sema &S, Decl *D, const AttributeList &Attr) {
- if (checkAttrMutualExclusion<HotAttr>(S, D, Attr.getRange(), Attr.getName()))
- return;
-
- D->addAttr(::new (S.Context) ColdAttr(Attr.getRange(), S.Context,
- Attr.getAttributeSpellingListIndex()));
-}
-
-static void handleHotAttr(Sema &S, Decl *D, const AttributeList &Attr) {
- if (checkAttrMutualExclusion<ColdAttr>(S, D, Attr.getRange(), Attr.getName()))
- return;
-
- D->addAttr(::new (S.Context) HotAttr(Attr.getRange(), S.Context,
- Attr.getAttributeSpellingListIndex()));
+ D->addAttr(::new (S.Context) AliasAttr(AL.getRange(), S.Context, Str,
+ AL.getAttributeSpellingListIndex()));
}
-static void handleTLSModelAttr(Sema &S, Decl *D,
- const AttributeList &Attr) {
+static void handleTLSModelAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
StringRef Model;
SourceLocation LiteralLoc;
// Check that it is a string.
- if (!S.checkStringLiteralArgumentAttr(Attr, 0, Model, &LiteralLoc))
+ if (!S.checkStringLiteralArgumentAttr(AL, 0, Model, &LiteralLoc))
return;
// Check that the value.
@@ -1914,60 +1833,101 @@ static void handleTLSModelAttr(Sema &S, Decl *D,
}
D->addAttr(::new (S.Context)
- TLSModelAttr(Attr.getRange(), S.Context, Model,
- Attr.getAttributeSpellingListIndex()));
+ TLSModelAttr(AL.getRange(), S.Context, Model,
+ AL.getAttributeSpellingListIndex()));
}
-static void handleRestrictAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+static void handleRestrictAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
QualType ResultType = getFunctionOrMethodResultType(D);
if (ResultType->isAnyPointerType() || ResultType->isBlockPointerType()) {
D->addAttr(::new (S.Context) RestrictAttr(
- Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex()));
+ AL.getRange(), S.Context, AL.getAttributeSpellingListIndex()));
return;
}
- S.Diag(Attr.getLoc(), diag::warn_attribute_return_pointers_only)
- << Attr.getName() << getFunctionOrMethodResultSourceRange(D);
+ S.Diag(AL.getLoc(), diag::warn_attribute_return_pointers_only)
+ << AL.getName() << getFunctionOrMethodResultSourceRange(D);
}
-static void handleCommonAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+static void handleCPUSpecificAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ FunctionDecl *FD = cast<FunctionDecl>(D);
+ if (!checkAttributeAtLeastNumArgs(S, AL, 1))
+ return;
+
+ SmallVector<IdentifierInfo *, 8> CPUs;
+ for (unsigned ArgNo = 0; ArgNo < getNumAttributeArgs(AL); ++ArgNo) {
+ if (!AL.isArgIdent(ArgNo)) {
+ S.Diag(AL.getLoc(), diag::err_attribute_argument_type)
+ << AL.getName() << AANT_ArgumentIdentifier;
+ return;
+ }
+
+ IdentifierLoc *CPUArg = AL.getArgAsIdent(ArgNo);
+ StringRef CPUName = CPUArg->Ident->getName().trim();
+
+ if (!S.Context.getTargetInfo().validateCPUSpecificCPUDispatch(CPUName)) {
+ S.Diag(CPUArg->Loc, diag::err_invalid_cpu_specific_dispatch_value)
+ << CPUName << (AL.getKind() == ParsedAttr::AT_CPUDispatch);
+ return;
+ }
+
+ const TargetInfo &Target = S.Context.getTargetInfo();
+ if (llvm::any_of(CPUs, [CPUName, &Target](const IdentifierInfo *Cur) {
+ return Target.CPUSpecificManglingCharacter(CPUName) ==
+ Target.CPUSpecificManglingCharacter(Cur->getName());
+ })) {
+ S.Diag(AL.getLoc(), diag::warn_multiversion_duplicate_entries);
+ return;
+ }
+ CPUs.push_back(CPUArg->Ident);
+ }
+
+ FD->setIsMultiVersion(true);
+ if (AL.getKind() == ParsedAttr::AT_CPUSpecific)
+ D->addAttr(::new (S.Context) CPUSpecificAttr(
+ AL.getRange(), S.Context, CPUs.data(), CPUs.size(),
+ AL.getAttributeSpellingListIndex()));
+ else
+ D->addAttr(::new (S.Context) CPUDispatchAttr(
+ AL.getRange(), S.Context, CPUs.data(), CPUs.size(),
+ AL.getAttributeSpellingListIndex()));
+}
+
+static void handleCommonAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
if (S.LangOpts.CPlusPlus) {
- S.Diag(Attr.getLoc(), diag::err_attribute_not_supported_in_lang)
- << Attr.getName() << AttributeLangSupport::Cpp;
+ S.Diag(AL.getLoc(), diag::err_attribute_not_supported_in_lang)
+ << AL.getName() << AttributeLangSupport::Cpp;
return;
}
- if (CommonAttr *CA = S.mergeCommonAttr(D, Attr.getRange(), Attr.getName(),
- Attr.getAttributeSpellingListIndex()))
+ if (CommonAttr *CA = S.mergeCommonAttr(D, AL.getRange(), AL.getName(),
+ AL.getAttributeSpellingListIndex()))
D->addAttr(CA);
}
-static void handleNakedAttr(Sema &S, Decl *D, const AttributeList &Attr) {
- if (checkAttrMutualExclusion<DisableTailCallsAttr>(S, D, Attr.getRange(),
- Attr.getName()))
+static void handleNakedAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ if (checkAttrMutualExclusion<DisableTailCallsAttr>(S, D, AL.getRange(),
+ AL.getName()))
return;
- if (Attr.isDeclspecAttribute()) {
+ if (AL.isDeclspecAttribute()) {
const auto &Triple = S.getASTContext().getTargetInfo().getTriple();
const auto &Arch = Triple.getArch();
if (Arch != llvm::Triple::x86 &&
(Arch != llvm::Triple::arm && Arch != llvm::Triple::thumb)) {
- S.Diag(Attr.getLoc(), diag::err_attribute_not_supported_on_arch)
- << Attr.getName() << Triple.getArchName();
+ S.Diag(AL.getLoc(), diag::err_attribute_not_supported_on_arch)
+ << AL.getName() << Triple.getArchName();
return;
}
}
- D->addAttr(::new (S.Context) NakedAttr(Attr.getRange(), S.Context,
- Attr.getAttributeSpellingListIndex()));
+ D->addAttr(::new (S.Context) NakedAttr(AL.getRange(), S.Context,
+ AL.getAttributeSpellingListIndex()));
}
-static void handleNoReturnAttr(Sema &S, Decl *D, const AttributeList &Attrs) {
+static void handleNoReturnAttr(Sema &S, Decl *D, const ParsedAttr &Attrs) {
if (hasDeclarator(D)) return;
- if (S.CheckNoReturnAttr(Attrs))
- return;
-
if (!isa<ObjCMethodDecl>(D)) {
S.Diag(Attrs.getLoc(), diag::warn_attribute_wrong_decl_type)
<< Attrs.getName() << ExpectedFunctionOrMethod;
@@ -1978,16 +1938,14 @@ static void handleNoReturnAttr(Sema &S, Decl *D, const AttributeList &Attrs) {
Attrs.getRange(), S.Context, Attrs.getAttributeSpellingListIndex()));
}
-static void handleNoCallerSavedRegsAttr(Sema &S, Decl *D,
- const AttributeList &Attr) {
- if (S.CheckNoCallerSavedRegsAttr(Attr))
- return;
-
- D->addAttr(::new (S.Context) AnyX86NoCallerSavedRegistersAttr(
- Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex()));
+static void handleNoCfCheckAttr(Sema &S, Decl *D, const ParsedAttr &Attrs) {
+ if (!S.getLangOpts().CFProtectionBranch)
+ S.Diag(Attrs.getLoc(), diag::warn_nocf_check_attribute_ignored);
+ else
+ handleSimpleAttribute<AnyX86NoCfCheckAttr>(S, D, Attrs);
}
-bool Sema::CheckNoReturnAttr(const AttributeList &Attrs) {
+bool Sema::CheckAttrNoArgs(const ParsedAttr &Attrs) {
if (!checkAttributeNumArgs(*this, Attrs, 0)) {
Attrs.setInvalid();
return true;
@@ -1996,221 +1954,167 @@ bool Sema::CheckNoReturnAttr(const AttributeList &Attrs) {
return false;
}
-bool Sema::CheckNoCallerSavedRegsAttr(const AttributeList &Attr) {
+bool Sema::CheckAttrTarget(const ParsedAttr &AL) {
// Check whether the attribute is valid on the current target.
- if (!Attr.existsInTarget(Context.getTargetInfo())) {
- Diag(Attr.getLoc(), diag::warn_unknown_attribute_ignored) << Attr.getName();
- Attr.setInvalid();
- return true;
- }
-
- if (!checkAttributeNumArgs(*this, Attr, 0)) {
- Attr.setInvalid();
+ if (!AL.existsInTarget(Context.getTargetInfo())) {
+ Diag(AL.getLoc(), diag::warn_unknown_attribute_ignored) << AL.getName();
+ AL.setInvalid();
return true;
}
return false;
}
-static void handleAnalyzerNoReturnAttr(Sema &S, Decl *D,
- const AttributeList &Attr) {
-
+static void handleAnalyzerNoReturnAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+
// The checking path for 'noreturn' and 'analyzer_noreturn' are different
// because 'analyzer_noreturn' does not impact the type.
if (!isFunctionOrMethodOrBlock(D)) {
ValueDecl *VD = dyn_cast<ValueDecl>(D);
if (!VD || (!VD->getType()->isBlockPointerType() &&
!VD->getType()->isFunctionPointerType())) {
- S.Diag(Attr.getLoc(),
- Attr.isCXX11Attribute() ? diag::err_attribute_wrong_decl_type
+ S.Diag(AL.getLoc(),
+ AL.isCXX11Attribute() ? diag::err_attribute_wrong_decl_type
: diag::warn_attribute_wrong_decl_type)
- << Attr.getName() << ExpectedFunctionMethodOrBlock;
+ << AL.getName() << ExpectedFunctionMethodOrBlock;
return;
}
}
D->addAttr(::new (S.Context)
- AnalyzerNoReturnAttr(Attr.getRange(), S.Context,
- Attr.getAttributeSpellingListIndex()));
+ AnalyzerNoReturnAttr(AL.getRange(), S.Context,
+ AL.getAttributeSpellingListIndex()));
}
// PS3 PPU-specific.
-static void handleVecReturnAttr(Sema &S, Decl *D, const AttributeList &Attr) {
-/*
- Returning a Vector Class in Registers
-
- According to the PPU ABI specifications, a class with a single member of
- vector type is returned in memory when used as the return value of a function.
- This results in inefficient code when implementing vector classes. To return
- the value in a single vector register, add the vecreturn attribute to the
- class definition. This attribute is also applicable to struct types.
-
- Example:
-
- struct Vector
- {
- __vector float xyzw;
- } __attribute__((vecreturn));
-
- Vector Add(Vector lhs, Vector rhs)
- {
- Vector result;
- result.xyzw = vec_add(lhs.xyzw, rhs.xyzw);
- return result; // This will be returned in a register
- }
-*/
+static void handleVecReturnAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ /*
+ Returning a Vector Class in Registers
+
+ According to the PPU ABI specifications, a class with a single member of
+ vector type is returned in memory when used as the return value of a
+ function.
+ This results in inefficient code when implementing vector classes. To return
+ the value in a single vector register, add the vecreturn attribute to the
+ class definition. This attribute is also applicable to struct types.
+
+ Example:
+
+ struct Vector
+ {
+ __vector float xyzw;
+ } __attribute__((vecreturn));
+
+ Vector Add(Vector lhs, Vector rhs)
+ {
+ Vector result;
+ result.xyzw = vec_add(lhs.xyzw, rhs.xyzw);
+ return result; // This will be returned in a register
+ }
+ */
if (VecReturnAttr *A = D->getAttr<VecReturnAttr>()) {
- S.Diag(Attr.getLoc(), diag::err_repeat_attribute) << A;
+ S.Diag(AL.getLoc(), diag::err_repeat_attribute) << A;
return;
}
- RecordDecl *record = cast<RecordDecl>(D);
+ const auto *R = cast<RecordDecl>(D);
int count = 0;
- if (!isa<CXXRecordDecl>(record)) {
- S.Diag(Attr.getLoc(), diag::err_attribute_vecreturn_only_vector_member);
+ if (!isa<CXXRecordDecl>(R)) {
+ S.Diag(AL.getLoc(), diag::err_attribute_vecreturn_only_vector_member);
return;
}
- if (!cast<CXXRecordDecl>(record)->isPOD()) {
- S.Diag(Attr.getLoc(), diag::err_attribute_vecreturn_only_pod_record);
+ if (!cast<CXXRecordDecl>(R)->isPOD()) {
+ S.Diag(AL.getLoc(), diag::err_attribute_vecreturn_only_pod_record);
return;
}
- for (const auto *I : record->fields()) {
+ for (const auto *I : R->fields()) {
if ((count == 1) || !I->getType()->isVectorType()) {
- S.Diag(Attr.getLoc(), diag::err_attribute_vecreturn_only_vector_member);
+ S.Diag(AL.getLoc(), diag::err_attribute_vecreturn_only_vector_member);
return;
}
count++;
}
- D->addAttr(::new (S.Context)
- VecReturnAttr(Attr.getRange(), S.Context,
- Attr.getAttributeSpellingListIndex()));
+ D->addAttr(::new (S.Context) VecReturnAttr(
+ AL.getRange(), S.Context, AL.getAttributeSpellingListIndex()));
}
static void handleDependencyAttr(Sema &S, Scope *Scope, Decl *D,
- const AttributeList &Attr) {
+ const ParsedAttr &AL) {
if (isa<ParmVarDecl>(D)) {
// [[carries_dependency]] can only be applied to a parameter if it is a
// parameter of a function declaration or lambda.
if (!(Scope->getFlags() & clang::Scope::FunctionDeclarationScope)) {
- S.Diag(Attr.getLoc(),
+ S.Diag(AL.getLoc(),
diag::err_carries_dependency_param_not_function_decl);
return;
}
}
D->addAttr(::new (S.Context) CarriesDependencyAttr(
- Attr.getRange(), S.Context,
- Attr.getAttributeSpellingListIndex()));
-}
-
-static void handleNotTailCalledAttr(Sema &S, Decl *D,
- const AttributeList &Attr) {
- if (checkAttrMutualExclusion<AlwaysInlineAttr>(S, D, Attr.getRange(),
- Attr.getName()))
- return;
-
- D->addAttr(::new (S.Context) NotTailCalledAttr(
- Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex()));
-}
-
-static void handleDisableTailCallsAttr(Sema &S, Decl *D,
- const AttributeList &Attr) {
- if (checkAttrMutualExclusion<NakedAttr>(S, D, Attr.getRange(),
- Attr.getName()))
- return;
-
- D->addAttr(::new (S.Context) DisableTailCallsAttr(
- Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex()));
-}
-
-static void handleUsedAttr(Sema &S, Decl *D, const AttributeList &Attr) {
- if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
- if (VD->hasLocalStorage()) {
- S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << Attr.getName();
- return;
- }
- } else if (!isFunctionOrMethod(D)) {
- S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
- << Attr.getName() << ExpectedVariableOrFunction;
- return;
- }
-
- D->addAttr(::new (S.Context)
- UsedAttr(Attr.getRange(), S.Context,
- Attr.getAttributeSpellingListIndex()));
+ AL.getRange(), S.Context,
+ AL.getAttributeSpellingListIndex()));
}
-static void handleUnusedAttr(Sema &S, Decl *D, const AttributeList &Attr) {
- bool IsCXX17Attr = Attr.isCXX11Attribute() && !Attr.getScopeName();
-
- if (IsCXX17Attr && isa<VarDecl>(D)) {
- // The C++17 spelling of this attribute cannot be applied to a static data
- // member per [dcl.attr.unused]p2.
- if (cast<VarDecl>(D)->isStaticDataMember()) {
- S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
- << Attr.getName() << ExpectedForMaybeUnused;
- return;
- }
- }
+static void handleUnusedAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ bool IsCXX17Attr = AL.isCXX11Attribute() && !AL.getScopeName();
// If this is spelled as the standard C++17 attribute, but not in C++17, warn
// about using it as an extension.
if (!S.getLangOpts().CPlusPlus17 && IsCXX17Attr)
- S.Diag(Attr.getLoc(), diag::ext_cxx17_attr) << Attr.getName();
+ S.Diag(AL.getLoc(), diag::ext_cxx17_attr) << AL.getName();
D->addAttr(::new (S.Context) UnusedAttr(
- Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex()));
+ AL.getRange(), S.Context, AL.getAttributeSpellingListIndex()));
}
-static void handleConstructorAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+static void handleConstructorAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
uint32_t priority = ConstructorAttr::DefaultPriority;
- if (Attr.getNumArgs() &&
- !checkUInt32Argument(S, Attr, Attr.getArgAsExpr(0), priority))
+ if (AL.getNumArgs() &&
+ !checkUInt32Argument(S, AL, AL.getArgAsExpr(0), priority))
return;
D->addAttr(::new (S.Context)
- ConstructorAttr(Attr.getRange(), S.Context, priority,
- Attr.getAttributeSpellingListIndex()));
+ ConstructorAttr(AL.getRange(), S.Context, priority,
+ AL.getAttributeSpellingListIndex()));
}
-static void handleDestructorAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+static void handleDestructorAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
uint32_t priority = DestructorAttr::DefaultPriority;
- if (Attr.getNumArgs() &&
- !checkUInt32Argument(S, Attr, Attr.getArgAsExpr(0), priority))
+ if (AL.getNumArgs() &&
+ !checkUInt32Argument(S, AL, AL.getArgAsExpr(0), priority))
return;
D->addAttr(::new (S.Context)
- DestructorAttr(Attr.getRange(), S.Context, priority,
- Attr.getAttributeSpellingListIndex()));
+ DestructorAttr(AL.getRange(), S.Context, priority,
+ AL.getAttributeSpellingListIndex()));
}
template <typename AttrTy>
-static void handleAttrWithMessage(Sema &S, Decl *D,
- const AttributeList &Attr) {
+static void handleAttrWithMessage(Sema &S, Decl *D, const ParsedAttr &AL) {
// Handle the case where the attribute has a text message.
StringRef Str;
- if (Attr.getNumArgs() == 1 && !S.checkStringLiteralArgumentAttr(Attr, 0, Str))
+ if (AL.getNumArgs() == 1 && !S.checkStringLiteralArgumentAttr(AL, 0, Str))
return;
- D->addAttr(::new (S.Context) AttrTy(Attr.getRange(), S.Context, Str,
- Attr.getAttributeSpellingListIndex()));
+ D->addAttr(::new (S.Context) AttrTy(AL.getRange(), S.Context, Str,
+ AL.getAttributeSpellingListIndex()));
}
static void handleObjCSuppresProtocolAttr(Sema &S, Decl *D,
- const AttributeList &Attr) {
+ const ParsedAttr &AL) {
if (!cast<ObjCProtocolDecl>(D)->isThisDeclarationADefinition()) {
- S.Diag(Attr.getLoc(), diag::err_objc_attr_protocol_requires_definition)
- << Attr.getName() << Attr.getRange();
+ S.Diag(AL.getLoc(), diag::err_objc_attr_protocol_requires_definition)
+ << AL.getName() << AL.getRange();
return;
}
D->addAttr(::new (S.Context)
- ObjCExplicitProtocolImplAttr(Attr.getRange(), S.Context,
- Attr.getAttributeSpellingListIndex()));
+ ObjCExplicitProtocolImplAttr(AL.getRange(), S.Context,
+ AL.getAttributeSpellingListIndex()));
}
static bool checkAvailabilityAttr(Sema &S, SourceRange Range,
@@ -2252,7 +2156,7 @@ static bool checkAvailabilityAttr(Sema &S, SourceRange Range,
return false;
}
-/// \brief Check whether the two versions match.
+/// Check whether the two versions match.
///
/// If either version tuple is empty, then they are assumed to match. If
/// \p BeforeIsOkay is true, then \p X can be less than or equal to \p Y.
@@ -2302,7 +2206,7 @@ AvailabilityAttr *Sema::mergeAvailabilityAttr(NamedDecl *D, SourceRange Range,
if (D->hasAttrs()) {
AttrVec &Attrs = D->getAttrs();
for (unsigned i = 0, e = Attrs.size(); i != e;) {
- const AvailabilityAttr *OldAA = dyn_cast<AvailabilityAttr>(Attrs[i]);
+ const auto *OldAA = dyn_cast<AvailabilityAttr>(Attrs[i]);
if (!OldAA) {
++i;
continue;
@@ -2434,37 +2338,34 @@ AvailabilityAttr *Sema::mergeAvailabilityAttr(NamedDecl *D, SourceRange Range,
return nullptr;
}
-static void handleAvailabilityAttr(Sema &S, Decl *D,
- const AttributeList &Attr) {
- if (!checkAttributeNumArgs(S, Attr, 1))
+static void handleAvailabilityAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ if (!checkAttributeNumArgs(S, AL, 1))
return;
- IdentifierLoc *Platform = Attr.getArgAsIdent(0);
- unsigned Index = Attr.getAttributeSpellingListIndex();
+ IdentifierLoc *Platform = AL.getArgAsIdent(0);
+ unsigned Index = AL.getAttributeSpellingListIndex();
IdentifierInfo *II = Platform->Ident;
if (AvailabilityAttr::getPrettyPlatformName(II->getName()).empty())
S.Diag(Platform->Loc, diag::warn_availability_unknown_platform)
<< Platform->Ident;
- NamedDecl *ND = dyn_cast<NamedDecl>(D);
+ auto *ND = dyn_cast<NamedDecl>(D);
if (!ND) // We warned about this already, so just return.
return;
- AvailabilityChange Introduced = Attr.getAvailabilityIntroduced();
- AvailabilityChange Deprecated = Attr.getAvailabilityDeprecated();
- AvailabilityChange Obsoleted = Attr.getAvailabilityObsoleted();
- bool IsUnavailable = Attr.getUnavailableLoc().isValid();
- bool IsStrict = Attr.getStrictLoc().isValid();
+ AvailabilityChange Introduced = AL.getAvailabilityIntroduced();
+ AvailabilityChange Deprecated = AL.getAvailabilityDeprecated();
+ AvailabilityChange Obsoleted = AL.getAvailabilityObsoleted();
+ bool IsUnavailable = AL.getUnavailableLoc().isValid();
+ bool IsStrict = AL.getStrictLoc().isValid();
StringRef Str;
- if (const StringLiteral *SE =
- dyn_cast_or_null<StringLiteral>(Attr.getMessageExpr()))
+ if (const auto *SE = dyn_cast_or_null<StringLiteral>(AL.getMessageExpr()))
Str = SE->getString();
StringRef Replacement;
- if (const StringLiteral *SE =
- dyn_cast_or_null<StringLiteral>(Attr.getReplacementExpr()))
+ if (const auto *SE = dyn_cast_or_null<StringLiteral>(AL.getReplacementExpr()))
Replacement = SE->getString();
- AvailabilityAttr *NewAttr = S.mergeAvailabilityAttr(ND, Attr.getRange(), II,
+ AvailabilityAttr *NewAttr = S.mergeAvailabilityAttr(ND, AL.getRange(), II,
false/*Implicit*/,
Introduced.Version,
Deprecated.Version,
@@ -2509,7 +2410,7 @@ static void handleAvailabilityAttr(Sema &S, Decl *D,
auto NewObsoleted = adjustWatchOSVersion(Obsoleted.Version);
AvailabilityAttr *NewAttr = S.mergeAvailabilityAttr(ND,
- Attr.getRange(),
+ AL.getRange(),
NewII,
true/*Implicit*/,
NewIntroduced,
@@ -2534,7 +2435,7 @@ static void handleAvailabilityAttr(Sema &S, Decl *D,
if (NewII) {
AvailabilityAttr *NewAttr = S.mergeAvailabilityAttr(ND,
- Attr.getRange(),
+ AL.getRange(),
NewII,
true/*Implicit*/,
Introduced.Version,
@@ -2552,23 +2453,23 @@ static void handleAvailabilityAttr(Sema &S, Decl *D,
}
static void handleExternalSourceSymbolAttr(Sema &S, Decl *D,
- const AttributeList &Attr) {
- if (!checkAttributeAtLeastNumArgs(S, Attr, 1))
+ const ParsedAttr &AL) {
+ if (!checkAttributeAtLeastNumArgs(S, AL, 1))
return;
- assert(checkAttributeAtMostNumArgs(S, Attr, 3) &&
+ assert(checkAttributeAtMostNumArgs(S, AL, 3) &&
"Invalid number of arguments in an external_source_symbol attribute");
StringRef Language;
- if (const auto *SE = dyn_cast_or_null<StringLiteral>(Attr.getArgAsExpr(0)))
+ if (const auto *SE = dyn_cast_or_null<StringLiteral>(AL.getArgAsExpr(0)))
Language = SE->getString();
StringRef DefinedIn;
- if (const auto *SE = dyn_cast_or_null<StringLiteral>(Attr.getArgAsExpr(1)))
+ if (const auto *SE = dyn_cast_or_null<StringLiteral>(AL.getArgAsExpr(1)))
DefinedIn = SE->getString();
- bool IsGeneratedDeclaration = Attr.getArgAsIdent(2) != nullptr;
+ bool IsGeneratedDeclaration = AL.getArgAsIdent(2) != nullptr;
D->addAttr(::new (S.Context) ExternalSourceSymbolAttr(
- Attr.getRange(), S.Context, Language, DefinedIn, IsGeneratedDeclaration,
- Attr.getAttributeSpellingListIndex()));
+ AL.getRange(), S.Context, Language, DefinedIn, IsGeneratedDeclaration,
+ AL.getAttributeSpellingListIndex()));
}
template <class T>
@@ -2601,12 +2502,12 @@ TypeVisibilityAttr *Sema::mergeTypeVisibilityAttr(Decl *D, SourceRange Range,
AttrSpellingListIndex);
}
-static void handleVisibilityAttr(Sema &S, Decl *D, const AttributeList &Attr,
+static void handleVisibilityAttr(Sema &S, Decl *D, const ParsedAttr &AL,
bool isTypeVisibility) {
// Visibility attributes don't mean anything on a typedef.
if (isa<TypedefNameDecl>(D)) {
- S.Diag(Attr.getRange().getBegin(), diag::warn_attribute_ignored)
- << Attr.getName();
+ S.Diag(AL.getRange().getBegin(), diag::warn_attribute_ignored)
+ << AL.getName();
return;
}
@@ -2615,21 +2516,21 @@ static void handleVisibilityAttr(Sema &S, Decl *D, const AttributeList &Attr,
!(isa<TagDecl>(D) ||
isa<ObjCInterfaceDecl>(D) ||
isa<NamespaceDecl>(D))) {
- S.Diag(Attr.getRange().getBegin(), diag::err_attribute_wrong_decl_type)
- << Attr.getName() << ExpectedTypeOrNamespace;
+ S.Diag(AL.getRange().getBegin(), diag::err_attribute_wrong_decl_type)
+ << AL.getName() << ExpectedTypeOrNamespace;
return;
}
// Check that the argument is a string literal.
StringRef TypeStr;
SourceLocation LiteralLoc;
- if (!S.checkStringLiteralArgumentAttr(Attr, 0, TypeStr, &LiteralLoc))
+ if (!S.checkStringLiteralArgumentAttr(AL, 0, TypeStr, &LiteralLoc))
return;
VisibilityAttr::VisibilityType type;
if (!VisibilityAttr::ConvertStrToVisibilityType(TypeStr, type)) {
S.Diag(LiteralLoc, diag::warn_attribute_type_not_supported)
- << Attr.getName() << TypeStr;
+ << AL.getName() << TypeStr;
return;
}
@@ -2637,62 +2538,60 @@ static void handleVisibilityAttr(Sema &S, Decl *D, const AttributeList &Attr,
// (like Darwin) that don't support it.
if (type == VisibilityAttr::Protected &&
!S.Context.getTargetInfo().hasProtectedVisibility()) {
- S.Diag(Attr.getLoc(), diag::warn_attribute_protected_visibility);
+ S.Diag(AL.getLoc(), diag::warn_attribute_protected_visibility);
type = VisibilityAttr::Default;
}
- unsigned Index = Attr.getAttributeSpellingListIndex();
- clang::Attr *newAttr;
+ unsigned Index = AL.getAttributeSpellingListIndex();
+ Attr *newAttr;
if (isTypeVisibility) {
- newAttr = S.mergeTypeVisibilityAttr(D, Attr.getRange(),
+ newAttr = S.mergeTypeVisibilityAttr(D, AL.getRange(),
(TypeVisibilityAttr::VisibilityType) type,
Index);
} else {
- newAttr = S.mergeVisibilityAttr(D, Attr.getRange(), type, Index);
+ newAttr = S.mergeVisibilityAttr(D, AL.getRange(), type, Index);
}
if (newAttr)
D->addAttr(newAttr);
}
-static void handleObjCMethodFamilyAttr(Sema &S, Decl *decl,
- const AttributeList &Attr) {
- ObjCMethodDecl *method = cast<ObjCMethodDecl>(decl);
- if (!Attr.isArgIdent(0)) {
- S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_type)
- << Attr.getName() << 1 << AANT_ArgumentIdentifier;
+static void handleObjCMethodFamilyAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ const auto *M = cast<ObjCMethodDecl>(D);
+ if (!AL.isArgIdent(0)) {
+ S.Diag(AL.getLoc(), diag::err_attribute_argument_n_type)
+ << AL.getName() << 1 << AANT_ArgumentIdentifier;
return;
}
- IdentifierLoc *IL = Attr.getArgAsIdent(0);
+ IdentifierLoc *IL = AL.getArgAsIdent(0);
ObjCMethodFamilyAttr::FamilyKind F;
if (!ObjCMethodFamilyAttr::ConvertStrToFamilyKind(IL->Ident->getName(), F)) {
- S.Diag(IL->Loc, diag::warn_attribute_type_not_supported) << Attr.getName()
- << IL->Ident;
+ S.Diag(IL->Loc, diag::warn_attribute_type_not_supported)
+ << AL.getName() << IL->Ident;
return;
}
if (F == ObjCMethodFamilyAttr::OMF_init &&
- !method->getReturnType()->isObjCObjectPointerType()) {
- S.Diag(method->getLocation(), diag::err_init_method_bad_return_type)
- << method->getReturnType();
+ !M->getReturnType()->isObjCObjectPointerType()) {
+ S.Diag(M->getLocation(), diag::err_init_method_bad_return_type)
+ << M->getReturnType();
// Ignore the attribute.
return;
}
- method->addAttr(new (S.Context) ObjCMethodFamilyAttr(Attr.getRange(),
- S.Context, F,
- Attr.getAttributeSpellingListIndex()));
+ D->addAttr(new (S.Context) ObjCMethodFamilyAttr(
+ AL.getRange(), S.Context, F, AL.getAttributeSpellingListIndex()));
}
-static void handleObjCNSObject(Sema &S, Decl *D, const AttributeList &Attr) {
- if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(D)) {
+static void handleObjCNSObject(Sema &S, Decl *D, const ParsedAttr &AL) {
+ if (const auto *TD = dyn_cast<TypedefNameDecl>(D)) {
QualType T = TD->getUnderlyingType();
if (!T->isCARCBridgableType()) {
S.Diag(TD->getLocation(), diag::err_nsobject_attribute);
return;
}
}
- else if (ObjCPropertyDecl *PD = dyn_cast<ObjCPropertyDecl>(D)) {
+ else if (const auto *PD = dyn_cast<ObjCPropertyDecl>(D)) {
QualType T = PD->getType();
if (!T->isCARCBridgableType()) {
S.Diag(PD->getLocation(), diag::err_nsobject_attribute);
@@ -2709,12 +2608,12 @@ static void handleObjCNSObject(Sema &S, Decl *D, const AttributeList &Attr) {
S.Diag(D->getLocation(), diag::warn_nsobject_attribute);
}
D->addAttr(::new (S.Context)
- ObjCNSObjectAttr(Attr.getRange(), S.Context,
- Attr.getAttributeSpellingListIndex()));
+ ObjCNSObjectAttr(AL.getRange(), S.Context,
+ AL.getAttributeSpellingListIndex()));
}
-static void handleObjCIndependentClass(Sema &S, Decl *D, const AttributeList &Attr) {
- if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(D)) {
+static void handleObjCIndependentClass(Sema &S, Decl *D, const ParsedAttr &AL) {
+ if (const auto *TD = dyn_cast<TypedefNameDecl>(D)) {
QualType T = TD->getUnderlyingType();
if (!T->isObjCObjectPointerType()) {
S.Diag(TD->getLocation(), diag::warn_ptr_independentclass_attribute);
@@ -2725,45 +2624,45 @@ static void handleObjCIndependentClass(Sema &S, Decl *D, const AttributeList &At
return;
}
D->addAttr(::new (S.Context)
- ObjCIndependentClassAttr(Attr.getRange(), S.Context,
- Attr.getAttributeSpellingListIndex()));
+ ObjCIndependentClassAttr(AL.getRange(), S.Context,
+ AL.getAttributeSpellingListIndex()));
}
-static void handleBlocksAttr(Sema &S, Decl *D, const AttributeList &Attr) {
- if (!Attr.isArgIdent(0)) {
- S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_type)
- << Attr.getName() << 1 << AANT_ArgumentIdentifier;
+static void handleBlocksAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ if (!AL.isArgIdent(0)) {
+ S.Diag(AL.getLoc(), diag::err_attribute_argument_n_type)
+ << AL.getName() << 1 << AANT_ArgumentIdentifier;
return;
}
- IdentifierInfo *II = Attr.getArgAsIdent(0)->Ident;
+ IdentifierInfo *II = AL.getArgAsIdent(0)->Ident;
BlocksAttr::BlockType type;
if (!BlocksAttr::ConvertStrToBlockType(II->getName(), type)) {
- S.Diag(Attr.getLoc(), diag::warn_attribute_type_not_supported)
- << Attr.getName() << II;
+ S.Diag(AL.getLoc(), diag::warn_attribute_type_not_supported)
+ << AL.getName() << II;
return;
}
D->addAttr(::new (S.Context)
- BlocksAttr(Attr.getRange(), S.Context, type,
- Attr.getAttributeSpellingListIndex()));
+ BlocksAttr(AL.getRange(), S.Context, type,
+ AL.getAttributeSpellingListIndex()));
}
-static void handleSentinelAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+static void handleSentinelAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
unsigned sentinel = (unsigned)SentinelAttr::DefaultSentinel;
- if (Attr.getNumArgs() > 0) {
- Expr *E = Attr.getArgAsExpr(0);
+ if (AL.getNumArgs() > 0) {
+ Expr *E = AL.getArgAsExpr(0);
llvm::APSInt Idx(32);
if (E->isTypeDependent() || E->isValueDependent() ||
!E->isIntegerConstantExpr(Idx, S.Context)) {
- S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_type)
- << Attr.getName() << 1 << AANT_ArgumentIntegerConstant
+ S.Diag(AL.getLoc(), diag::err_attribute_argument_n_type)
+ << AL.getName() << 1 << AANT_ArgumentIntegerConstant
<< E->getSourceRange();
return;
}
if (Idx.isSigned() && Idx.isNegative()) {
- S.Diag(Attr.getLoc(), diag::err_attribute_sentinel_less_than_zero)
+ S.Diag(AL.getLoc(), diag::err_attribute_sentinel_less_than_zero)
<< E->getSourceRange();
return;
}
@@ -2772,13 +2671,13 @@ static void handleSentinelAttr(Sema &S, Decl *D, const AttributeList &Attr) {
}
unsigned nullPos = (unsigned)SentinelAttr::DefaultNullPos;
- if (Attr.getNumArgs() > 1) {
- Expr *E = Attr.getArgAsExpr(1);
+ if (AL.getNumArgs() > 1) {
+ Expr *E = AL.getArgAsExpr(1);
llvm::APSInt Idx(32);
if (E->isTypeDependent() || E->isValueDependent() ||
!E->isIntegerConstantExpr(Idx, S.Context)) {
- S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_type)
- << Attr.getName() << 2 << AANT_ArgumentIntegerConstant
+ S.Diag(AL.getLoc(), diag::err_attribute_argument_n_type)
+ << AL.getName() << 2 << AANT_ArgumentIntegerConstant
<< E->getSourceRange();
return;
}
@@ -2787,34 +2686,34 @@ static void handleSentinelAttr(Sema &S, Decl *D, const AttributeList &Attr) {
if ((Idx.isSigned() && Idx.isNegative()) || nullPos > 1) {
// FIXME: This error message could be improved, it would be nice
// to say what the bounds actually are.
- S.Diag(Attr.getLoc(), diag::err_attribute_sentinel_not_zero_or_one)
+ S.Diag(AL.getLoc(), diag::err_attribute_sentinel_not_zero_or_one)
<< E->getSourceRange();
return;
}
}
- if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
const FunctionType *FT = FD->getType()->castAs<FunctionType>();
if (isa<FunctionNoProtoType>(FT)) {
- S.Diag(Attr.getLoc(), diag::warn_attribute_sentinel_named_arguments);
+ S.Diag(AL.getLoc(), diag::warn_attribute_sentinel_named_arguments);
return;
}
if (!cast<FunctionProtoType>(FT)->isVariadic()) {
- S.Diag(Attr.getLoc(), diag::warn_attribute_sentinel_not_variadic) << 0;
+ S.Diag(AL.getLoc(), diag::warn_attribute_sentinel_not_variadic) << 0;
return;
}
- } else if (ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) {
+ } else if (const auto *MD = dyn_cast<ObjCMethodDecl>(D)) {
if (!MD->isVariadic()) {
- S.Diag(Attr.getLoc(), diag::warn_attribute_sentinel_not_variadic) << 0;
+ S.Diag(AL.getLoc(), diag::warn_attribute_sentinel_not_variadic) << 0;
return;
}
- } else if (BlockDecl *BD = dyn_cast<BlockDecl>(D)) {
+ } else if (const auto *BD = dyn_cast<BlockDecl>(D)) {
if (!BD->isVariadic()) {
- S.Diag(Attr.getLoc(), diag::warn_attribute_sentinel_not_variadic) << 1;
+ S.Diag(AL.getLoc(), diag::warn_attribute_sentinel_not_variadic) << 1;
return;
}
- } else if (const VarDecl *V = dyn_cast<VarDecl>(D)) {
+ } else if (const auto *V = dyn_cast<VarDecl>(D)) {
QualType Ty = V->getType();
if (Ty->isBlockPointerType() || Ty->isFunctionPointerType()) {
const FunctionType *FT = Ty->isFunctionPointerType()
@@ -2822,84 +2721,83 @@ static void handleSentinelAttr(Sema &S, Decl *D, const AttributeList &Attr) {
: Ty->getAs<BlockPointerType>()->getPointeeType()->getAs<FunctionType>();
if (!cast<FunctionProtoType>(FT)->isVariadic()) {
int m = Ty->isFunctionPointerType() ? 0 : 1;
- S.Diag(Attr.getLoc(), diag::warn_attribute_sentinel_not_variadic) << m;
+ S.Diag(AL.getLoc(), diag::warn_attribute_sentinel_not_variadic) << m;
return;
}
} else {
- S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
- << Attr.getName() << ExpectedFunctionMethodOrBlock;
+ S.Diag(AL.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << AL.getName() << ExpectedFunctionMethodOrBlock;
return;
}
} else {
- S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
- << Attr.getName() << ExpectedFunctionMethodOrBlock;
+ S.Diag(AL.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << AL.getName() << ExpectedFunctionMethodOrBlock;
return;
}
D->addAttr(::new (S.Context)
- SentinelAttr(Attr.getRange(), S.Context, sentinel, nullPos,
- Attr.getAttributeSpellingListIndex()));
+ SentinelAttr(AL.getRange(), S.Context, sentinel, nullPos,
+ AL.getAttributeSpellingListIndex()));
}
-static void handleWarnUnusedResult(Sema &S, Decl *D, const AttributeList &Attr) {
+static void handleWarnUnusedResult(Sema &S, Decl *D, const ParsedAttr &AL) {
if (D->getFunctionType() &&
D->getFunctionType()->getReturnType()->isVoidType()) {
- S.Diag(Attr.getLoc(), diag::warn_attribute_void_function_method)
- << Attr.getName() << 0;
+ S.Diag(AL.getLoc(), diag::warn_attribute_void_function_method)
+ << AL.getName() << 0;
return;
}
- if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D))
+ if (const auto *MD = dyn_cast<ObjCMethodDecl>(D))
if (MD->getReturnType()->isVoidType()) {
- S.Diag(Attr.getLoc(), diag::warn_attribute_void_function_method)
- << Attr.getName() << 1;
+ S.Diag(AL.getLoc(), diag::warn_attribute_void_function_method)
+ << AL.getName() << 1;
return;
}
// If this is spelled as the standard C++17 attribute, but not in C++17, warn
// about using it as an extension.
- if (!S.getLangOpts().CPlusPlus17 && Attr.isCXX11Attribute() &&
- !Attr.getScopeName())
- S.Diag(Attr.getLoc(), diag::ext_cxx17_attr) << Attr.getName();
+ if (!S.getLangOpts().CPlusPlus17 && AL.isCXX11Attribute() &&
+ !AL.getScopeName())
+ S.Diag(AL.getLoc(), diag::ext_cxx17_attr) << AL.getName();
D->addAttr(::new (S.Context)
- WarnUnusedResultAttr(Attr.getRange(), S.Context,
- Attr.getAttributeSpellingListIndex()));
+ WarnUnusedResultAttr(AL.getRange(), S.Context,
+ AL.getAttributeSpellingListIndex()));
}
-static void handleWeakImportAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+static void handleWeakImportAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
// weak_import only applies to variable & function declarations.
bool isDef = false;
if (!D->canBeWeakImported(isDef)) {
if (isDef)
- S.Diag(Attr.getLoc(), diag::warn_attribute_invalid_on_definition)
+ S.Diag(AL.getLoc(), diag::warn_attribute_invalid_on_definition)
<< "weak_import";
else if (isa<ObjCPropertyDecl>(D) || isa<ObjCMethodDecl>(D) ||
(S.Context.getTargetInfo().getTriple().isOSDarwin() &&
(isa<ObjCInterfaceDecl>(D) || isa<EnumDecl>(D)))) {
// Nothing to warn about here.
} else
- S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
- << Attr.getName() << ExpectedVariableOrFunction;
+ S.Diag(AL.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << AL.getName() << ExpectedVariableOrFunction;
return;
}
D->addAttr(::new (S.Context)
- WeakImportAttr(Attr.getRange(), S.Context,
- Attr.getAttributeSpellingListIndex()));
+ WeakImportAttr(AL.getRange(), S.Context,
+ AL.getAttributeSpellingListIndex()));
}
// Handles reqd_work_group_size and work_group_size_hint.
template <typename WorkGroupAttr>
-static void handleWorkGroupSize(Sema &S, Decl *D,
- const AttributeList &Attr) {
+static void handleWorkGroupSize(Sema &S, Decl *D, const ParsedAttr &AL) {
uint32_t WGSize[3];
for (unsigned i = 0; i < 3; ++i) {
- const Expr *E = Attr.getArgAsExpr(i);
- if (!checkUInt32Argument(S, Attr, E, WGSize[i], i))
+ const Expr *E = AL.getArgAsExpr(i);
+ if (!checkUInt32Argument(S, AL, E, WGSize[i], i))
return;
if (WGSize[i] == 0) {
- S.Diag(Attr.getLoc(), diag::err_attribute_argument_is_zero)
- << Attr.getName() << E->getSourceRange();
+ S.Diag(AL.getLoc(), diag::err_attribute_argument_is_zero)
+ << AL.getName() << E->getSourceRange();
return;
}
}
@@ -2908,73 +2806,81 @@ static void handleWorkGroupSize(Sema &S, Decl *D,
if (Existing && !(Existing->getXDim() == WGSize[0] &&
Existing->getYDim() == WGSize[1] &&
Existing->getZDim() == WGSize[2]))
- S.Diag(Attr.getLoc(), diag::warn_duplicate_attribute) << Attr.getName();
+ S.Diag(AL.getLoc(), diag::warn_duplicate_attribute) << AL.getName();
- D->addAttr(::new (S.Context) WorkGroupAttr(Attr.getRange(), S.Context,
+ D->addAttr(::new (S.Context) WorkGroupAttr(AL.getRange(), S.Context,
WGSize[0], WGSize[1], WGSize[2],
- Attr.getAttributeSpellingListIndex()));
+ AL.getAttributeSpellingListIndex()));
}
// Handles intel_reqd_sub_group_size.
-static void handleSubGroupSize(Sema &S, Decl *D, const AttributeList &Attr) {
+static void handleSubGroupSize(Sema &S, Decl *D, const ParsedAttr &AL) {
uint32_t SGSize;
- const Expr *E = Attr.getArgAsExpr(0);
- if (!checkUInt32Argument(S, Attr, E, SGSize))
+ const Expr *E = AL.getArgAsExpr(0);
+ if (!checkUInt32Argument(S, AL, E, SGSize))
return;
if (SGSize == 0) {
- S.Diag(Attr.getLoc(), diag::err_attribute_argument_is_zero)
- << Attr.getName() << E->getSourceRange();
+ S.Diag(AL.getLoc(), diag::err_attribute_argument_is_zero)
+ << AL.getName() << E->getSourceRange();
return;
}
OpenCLIntelReqdSubGroupSizeAttr *Existing =
D->getAttr<OpenCLIntelReqdSubGroupSizeAttr>();
if (Existing && Existing->getSubGroupSize() != SGSize)
- S.Diag(Attr.getLoc(), diag::warn_duplicate_attribute) << Attr.getName();
+ S.Diag(AL.getLoc(), diag::warn_duplicate_attribute) << AL.getName();
D->addAttr(::new (S.Context) OpenCLIntelReqdSubGroupSizeAttr(
- Attr.getRange(), S.Context, SGSize,
- Attr.getAttributeSpellingListIndex()));
+ AL.getRange(), S.Context, SGSize,
+ AL.getAttributeSpellingListIndex()));
}
-static void handleVecTypeHint(Sema &S, Decl *D, const AttributeList &Attr) {
- if (!Attr.hasParsedType()) {
- S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments)
- << Attr.getName() << 1;
+static void handleVecTypeHint(Sema &S, Decl *D, const ParsedAttr &AL) {
+ if (!AL.hasParsedType()) {
+ S.Diag(AL.getLoc(), diag::err_attribute_wrong_number_arguments)
+ << AL.getName() << 1;
return;
}
TypeSourceInfo *ParmTSI = nullptr;
- QualType ParmType = S.GetTypeFromParser(Attr.getTypeArg(), &ParmTSI);
+ QualType ParmType = S.GetTypeFromParser(AL.getTypeArg(), &ParmTSI);
assert(ParmTSI && "no type source info for attribute argument");
if (!ParmType->isExtVectorType() && !ParmType->isFloatingType() &&
(ParmType->isBooleanType() ||
!ParmType->isIntegralType(S.getASTContext()))) {
- S.Diag(Attr.getLoc(), diag::err_attribute_argument_vec_type_hint)
+ S.Diag(AL.getLoc(), diag::err_attribute_argument_vec_type_hint)
<< ParmType;
return;
}
if (VecTypeHintAttr *A = D->getAttr<VecTypeHintAttr>()) {
if (!S.Context.hasSameType(A->getTypeHint(), ParmType)) {
- S.Diag(Attr.getLoc(), diag::warn_duplicate_attribute) << Attr.getName();
+ S.Diag(AL.getLoc(), diag::warn_duplicate_attribute) << AL.getName();
return;
}
}
- D->addAttr(::new (S.Context) VecTypeHintAttr(Attr.getLoc(), S.Context,
+ D->addAttr(::new (S.Context) VecTypeHintAttr(AL.getLoc(), S.Context,
ParmTSI,
- Attr.getAttributeSpellingListIndex()));
+ AL.getAttributeSpellingListIndex()));
}
SectionAttr *Sema::mergeSectionAttr(Decl *D, SourceRange Range,
StringRef Name,
unsigned AttrSpellingListIndex) {
+ // Explicit or partial specializations do not inherit
+ // the section attribute from the primary template.
+ if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
+ if (AttrSpellingListIndex == SectionAttr::Declspec_allocate &&
+ FD->isFunctionTemplateSpecialization())
+ return nullptr;
+ }
if (SectionAttr *ExistingAttr = D->getAttr<SectionAttr>()) {
if (ExistingAttr->getName() == Name)
return nullptr;
- Diag(ExistingAttr->getLocation(), diag::warn_mismatched_section);
+ Diag(ExistingAttr->getLocation(), diag::warn_mismatched_section)
+ << 1 /*section*/;
Diag(Range.getBegin(), diag::note_previous_attribute);
return nullptr;
}
@@ -2985,18 +2891,19 @@ SectionAttr *Sema::mergeSectionAttr(Decl *D, SourceRange Range,
bool Sema::checkSectionName(SourceLocation LiteralLoc, StringRef SecName) {
std::string Error = Context.getTargetInfo().isValidSectionSpecifier(SecName);
if (!Error.empty()) {
- Diag(LiteralLoc, diag::err_attribute_section_invalid_for_target) << Error;
+ Diag(LiteralLoc, diag::err_attribute_section_invalid_for_target) << Error
+ << 1 /*'section'*/;
return false;
}
return true;
}
-static void handleSectionAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+static void handleSectionAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
// Make sure that there is a string literal as the sections's single
// argument.
StringRef Str;
SourceLocation LiteralLoc;
- if (!S.checkStringLiteralArgumentAttr(Attr, 0, Str, &LiteralLoc))
+ if (!S.checkStringLiteralArgumentAttr(AL, 0, Str, &LiteralLoc))
return;
if (!S.checkSectionName(LiteralLoc, Str))
@@ -3010,12 +2917,65 @@ static void handleSectionAttr(Sema &S, Decl *D, const AttributeList &Attr) {
return;
}
- unsigned Index = Attr.getAttributeSpellingListIndex();
- SectionAttr *NewAttr = S.mergeSectionAttr(D, Attr.getRange(), Str, Index);
+ unsigned Index = AL.getAttributeSpellingListIndex();
+ SectionAttr *NewAttr = S.mergeSectionAttr(D, AL.getRange(), Str, Index);
if (NewAttr)
D->addAttr(NewAttr);
}
+static bool checkCodeSegName(Sema&S, SourceLocation LiteralLoc, StringRef CodeSegName) {
+ std::string Error = S.Context.getTargetInfo().isValidSectionSpecifier(CodeSegName);
+ if (!Error.empty()) {
+ S.Diag(LiteralLoc, diag::err_attribute_section_invalid_for_target) << Error
+ << 0 /*'code-seg'*/;
+ return false;
+ }
+ return true;
+}
+
+CodeSegAttr *Sema::mergeCodeSegAttr(Decl *D, SourceRange Range,
+ StringRef Name,
+ unsigned AttrSpellingListIndex) {
+ // Explicit or partial specializations do not inherit
+ // the code_seg attribute from the primary template.
+ if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
+ if (FD->isFunctionTemplateSpecialization())
+ return nullptr;
+ }
+ if (const auto *ExistingAttr = D->getAttr<CodeSegAttr>()) {
+ if (ExistingAttr->getName() == Name)
+ return nullptr;
+ Diag(ExistingAttr->getLocation(), diag::warn_mismatched_section)
+ << 0 /*codeseg*/;
+ Diag(Range.getBegin(), diag::note_previous_attribute);
+ return nullptr;
+ }
+ return ::new (Context) CodeSegAttr(Range, Context, Name,
+ AttrSpellingListIndex);
+}
+
+static void handleCodeSegAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ StringRef Str;
+ SourceLocation LiteralLoc;
+ if (!S.checkStringLiteralArgumentAttr(AL, 0, Str, &LiteralLoc))
+ return;
+ if (!checkCodeSegName(S, LiteralLoc, Str))
+ return;
+ if (const auto *ExistingAttr = D->getAttr<CodeSegAttr>()) {
+ if (!ExistingAttr->isImplicit()) {
+ S.Diag(AL.getLoc(),
+ ExistingAttr->getName() == Str
+ ? diag::warn_duplicate_codeseg_attribute
+ : diag::err_conflicting_codeseg_attribute);
+ return;
+ }
+ D->dropAttr<CodeSegAttr>();
+ }
+ if (CodeSegAttr *CSA = S.mergeCodeSegAttr(D, AL.getRange(), Str,
+ AL.getAttributeSpellingListIndex()))
+ D->addAttr(CSA);
+}
+
// Check for things we'd like to warn about. Multiversioning issues are
// handled later in the process, once we know how many exist.
bool Sema::checkTargetAttr(SourceLocation LiteralLoc, StringRef AttrStr) {
@@ -3044,30 +3004,50 @@ bool Sema::checkTargetAttr(SourceLocation LiteralLoc, StringRef AttrStr) {
<< Unsupported << None << CurFeature;
}
- return true;
+ return false;
}
-static void handleTargetAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+static void handleTargetAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
StringRef Str;
SourceLocation LiteralLoc;
- if (!S.checkStringLiteralArgumentAttr(Attr, 0, Str, &LiteralLoc) ||
- !S.checkTargetAttr(LiteralLoc, Str))
+ if (!S.checkStringLiteralArgumentAttr(AL, 0, Str, &LiteralLoc) ||
+ S.checkTargetAttr(LiteralLoc, Str))
return;
- unsigned Index = Attr.getAttributeSpellingListIndex();
+
+ unsigned Index = AL.getAttributeSpellingListIndex();
TargetAttr *NewAttr =
- ::new (S.Context) TargetAttr(Attr.getRange(), S.Context, Str, Index);
+ ::new (S.Context) TargetAttr(AL.getRange(), S.Context, Str, Index);
D->addAttr(NewAttr);
}
-static void handleCleanupAttr(Sema &S, Decl *D, const AttributeList &Attr) {
- Expr *E = Attr.getArgAsExpr(0);
+static void handleMinVectorWidthAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ Expr *E = AL.getArgAsExpr(0);
+ uint32_t VecWidth;
+ if (!checkUInt32Argument(S, AL, E, VecWidth)) {
+ AL.setInvalid();
+ return;
+ }
+
+ MinVectorWidthAttr *Existing = D->getAttr<MinVectorWidthAttr>();
+ if (Existing && Existing->getVectorWidth() != VecWidth) {
+ S.Diag(AL.getLoc(), diag::warn_duplicate_attribute) << AL.getName();
+ return;
+ }
+
+ D->addAttr(::new (S.Context)
+ MinVectorWidthAttr(AL.getRange(), S.Context, VecWidth,
+ AL.getAttributeSpellingListIndex()));
+}
+
+static void handleCleanupAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ Expr *E = AL.getArgAsExpr(0);
SourceLocation Loc = E->getExprLoc();
FunctionDecl *FD = nullptr;
DeclarationNameInfo NI;
// gcc only allows for simple identifiers. Since we support more than gcc, we
// will warn the user.
- if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
+ if (auto *DRE = dyn_cast<DeclRefExpr>(E)) {
if (DRE->hasQualifier())
S.Diag(Loc, diag::warn_cleanup_ext);
FD = dyn_cast<FunctionDecl>(DRE->getDecl());
@@ -3077,7 +3057,7 @@ static void handleCleanupAttr(Sema &S, Decl *D, const AttributeList &Attr) {
<< NI.getName();
return;
}
- } else if (UnresolvedLookupExpr *ULE = dyn_cast<UnresolvedLookupExpr>(E)) {
+ } else if (auto *ULE = dyn_cast<UnresolvedLookupExpr>(E)) {
if (ULE->hasExplicitTemplateArgs())
S.Diag(Loc, diag::warn_cleanup_ext);
FD = S.ResolveSingleFunctionTemplateSpecialization(ULE, true);
@@ -3112,49 +3092,49 @@ static void handleCleanupAttr(Sema &S, Decl *D, const AttributeList &Attr) {
}
D->addAttr(::new (S.Context)
- CleanupAttr(Attr.getRange(), S.Context, FD,
- Attr.getAttributeSpellingListIndex()));
+ CleanupAttr(AL.getRange(), S.Context, FD,
+ AL.getAttributeSpellingListIndex()));
}
static void handleEnumExtensibilityAttr(Sema &S, Decl *D,
- const AttributeList &Attr) {
- if (!Attr.isArgIdent(0)) {
- S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_type)
- << Attr.getName() << 0 << AANT_ArgumentIdentifier;
+ const ParsedAttr &AL) {
+ if (!AL.isArgIdent(0)) {
+ S.Diag(AL.getLoc(), diag::err_attribute_argument_n_type)
+ << AL.getName() << 0 << AANT_ArgumentIdentifier;
return;
}
EnumExtensibilityAttr::Kind ExtensibilityKind;
- IdentifierInfo *II = Attr.getArgAsIdent(0)->Ident;
+ IdentifierInfo *II = AL.getArgAsIdent(0)->Ident;
if (!EnumExtensibilityAttr::ConvertStrToKind(II->getName(),
ExtensibilityKind)) {
- S.Diag(Attr.getLoc(), diag::warn_attribute_type_not_supported)
- << Attr.getName() << II;
+ S.Diag(AL.getLoc(), diag::warn_attribute_type_not_supported)
+ << AL.getName() << II;
return;
}
D->addAttr(::new (S.Context) EnumExtensibilityAttr(
- Attr.getRange(), S.Context, ExtensibilityKind,
- Attr.getAttributeSpellingListIndex()));
+ AL.getRange(), S.Context, ExtensibilityKind,
+ AL.getAttributeSpellingListIndex()));
}
/// Handle __attribute__((format_arg((idx)))) attribute based on
/// http://gcc.gnu.org/onlinedocs/gcc/Function-Attributes.html
-static void handleFormatArgAttr(Sema &S, Decl *D, const AttributeList &Attr) {
- Expr *IdxExpr = Attr.getArgAsExpr(0);
- uint64_t Idx;
- if (!checkFunctionOrMethodParameterIndex(S, D, Attr, 1, IdxExpr, Idx))
+static void handleFormatArgAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ Expr *IdxExpr = AL.getArgAsExpr(0);
+ ParamIdx Idx;
+ if (!checkFunctionOrMethodParameterIndex(S, D, AL, 1, IdxExpr, Idx))
return;
// Make sure the format string is really a string.
- QualType Ty = getFunctionOrMethodParamType(D, Idx);
+ QualType Ty = getFunctionOrMethodParamType(D, Idx.getASTIndex());
bool NotNSStringTy = !isNSStringType(Ty, S.Context);
if (NotNSStringTy &&
!isCFStringType(Ty, S.Context) &&
(!Ty->isPointerType() ||
!Ty->getAs<PointerType>()->getPointeeType()->isCharType())) {
- S.Diag(Attr.getLoc(), diag::err_format_attribute_not)
+ S.Diag(AL.getLoc(), diag::err_format_attribute_not)
<< "a string type" << IdxExpr->getSourceRange()
<< getFunctionOrMethodParamRange(D, 0);
return;
@@ -3164,21 +3144,14 @@ static void handleFormatArgAttr(Sema &S, Decl *D, const AttributeList &Attr) {
!isCFStringType(Ty, S.Context) &&
(!Ty->isPointerType() ||
!Ty->getAs<PointerType>()->getPointeeType()->isCharType())) {
- S.Diag(Attr.getLoc(), diag::err_format_attribute_result_not)
+ S.Diag(AL.getLoc(), diag::err_format_attribute_result_not)
<< (NotNSStringTy ? "string type" : "NSString")
<< IdxExpr->getSourceRange() << getFunctionOrMethodParamRange(D, 0);
return;
}
- // We cannot use the Idx returned from checkFunctionOrMethodParameterIndex
- // because that has corrected for the implicit this parameter, and is zero-
- // based. The attribute expects what the user wrote explicitly.
- llvm::APSInt Val;
- IdxExpr->EvaluateAsInt(Val, S.Context);
-
- D->addAttr(::new (S.Context)
- FormatArgAttr(Attr.getRange(), S.Context, Val.getZExtValue(),
- Attr.getAttributeSpellingListIndex()));
+ D->addAttr(::new (S.Context) FormatArgAttr(
+ AL.getRange(), S.Context, Idx, AL.getAttributeSpellingListIndex()));
}
enum FormatAttrKind {
@@ -3213,43 +3186,42 @@ static FormatAttrKind getFormatAttrKind(StringRef Format) {
/// Handle __attribute__((init_priority(priority))) attributes based on
/// http://gcc.gnu.org/onlinedocs/gcc/C_002b_002b-Attributes.html
-static void handleInitPriorityAttr(Sema &S, Decl *D,
- const AttributeList &Attr) {
+static void handleInitPriorityAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
if (!S.getLangOpts().CPlusPlus) {
- S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << Attr.getName();
+ S.Diag(AL.getLoc(), diag::warn_attribute_ignored) << AL.getName();
return;
}
if (S.getCurFunctionOrMethodDecl()) {
- S.Diag(Attr.getLoc(), diag::err_init_priority_object_attr);
- Attr.setInvalid();
+ S.Diag(AL.getLoc(), diag::err_init_priority_object_attr);
+ AL.setInvalid();
return;
}
QualType T = cast<VarDecl>(D)->getType();
if (S.Context.getAsArrayType(T))
T = S.Context.getBaseElementType(T);
if (!T->getAs<RecordType>()) {
- S.Diag(Attr.getLoc(), diag::err_init_priority_object_attr);
- Attr.setInvalid();
+ S.Diag(AL.getLoc(), diag::err_init_priority_object_attr);
+ AL.setInvalid();
return;
}
- Expr *E = Attr.getArgAsExpr(0);
+ Expr *E = AL.getArgAsExpr(0);
uint32_t prioritynum;
- if (!checkUInt32Argument(S, Attr, E, prioritynum)) {
- Attr.setInvalid();
+ if (!checkUInt32Argument(S, AL, E, prioritynum)) {
+ AL.setInvalid();
return;
}
if (prioritynum < 101 || prioritynum > 65535) {
- S.Diag(Attr.getLoc(), diag::err_attribute_argument_outof_range)
- << E->getSourceRange() << Attr.getName() << 101 << 65535;
- Attr.setInvalid();
+ S.Diag(AL.getLoc(), diag::err_attribute_argument_outof_range)
+ << E->getSourceRange() << AL.getName() << 101 << 65535;
+ AL.setInvalid();
return;
}
D->addAttr(::new (S.Context)
- InitPriorityAttr(Attr.getRange(), S.Context, prioritynum,
- Attr.getAttributeSpellingListIndex()));
+ InitPriorityAttr(AL.getRange(), S.Context, prioritynum,
+ AL.getAttributeSpellingListIndex()));
}
FormatAttr *Sema::mergeFormatAttr(Decl *D, SourceRange Range,
@@ -3275,10 +3247,10 @@ FormatAttr *Sema::mergeFormatAttr(Decl *D, SourceRange Range,
/// Handle __attribute__((format(type,idx,firstarg))) attributes based on
/// http://gcc.gnu.org/onlinedocs/gcc/Function-Attributes.html
-static void handleFormatAttr(Sema &S, Decl *D, const AttributeList &Attr) {
- if (!Attr.isArgIdent(0)) {
- S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_type)
- << Attr.getName() << 1 << AANT_ArgumentIdentifier;
+static void handleFormatAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ if (!AL.isArgIdent(0)) {
+ S.Diag(AL.getLoc(), diag::err_attribute_argument_n_type)
+ << AL.getName() << 1 << AANT_ArgumentIdentifier;
return;
}
@@ -3287,7 +3259,7 @@ static void handleFormatAttr(Sema &S, Decl *D, const AttributeList &Attr) {
bool HasImplicitThisParam = isInstanceMethod(D);
unsigned NumArgs = getFunctionOrMethodNumParams(D) + HasImplicitThisParam;
- IdentifierInfo *II = Attr.getArgAsIdent(0)->Ident;
+ IdentifierInfo *II = AL.getArgAsIdent(0)->Ident;
StringRef Format = II->getName();
if (normalizeName(Format)) {
@@ -3302,20 +3274,20 @@ static void handleFormatAttr(Sema &S, Decl *D, const AttributeList &Attr) {
return;
if (Kind == InvalidFormat) {
- S.Diag(Attr.getLoc(), diag::warn_attribute_type_not_supported)
- << Attr.getName() << II->getName();
+ S.Diag(AL.getLoc(), diag::warn_attribute_type_not_supported)
+ << AL.getName() << II->getName();
return;
}
// checks for the 2nd argument
- Expr *IdxExpr = Attr.getArgAsExpr(1);
+ Expr *IdxExpr = AL.getArgAsExpr(1);
uint32_t Idx;
- if (!checkUInt32Argument(S, Attr, IdxExpr, Idx, 2))
+ if (!checkUInt32Argument(S, AL, IdxExpr, Idx, 2))
return;
if (Idx < 1 || Idx > NumArgs) {
- S.Diag(Attr.getLoc(), diag::err_attribute_argument_out_of_bounds)
- << Attr.getName() << 2 << IdxExpr->getSourceRange();
+ S.Diag(AL.getLoc(), diag::err_attribute_argument_out_of_bounds)
+ << AL.getName() << 2 << IdxExpr->getSourceRange();
return;
}
@@ -3324,7 +3296,7 @@ static void handleFormatAttr(Sema &S, Decl *D, const AttributeList &Attr) {
if (HasImplicitThisParam) {
if (ArgIdx == 0) {
- S.Diag(Attr.getLoc(),
+ S.Diag(AL.getLoc(),
diag::err_format_attribute_implicit_this_format_string)
<< IdxExpr->getSourceRange();
return;
@@ -3337,7 +3309,7 @@ static void handleFormatAttr(Sema &S, Decl *D, const AttributeList &Attr) {
if (Kind == CFStringFormat) {
if (!isCFStringType(Ty, S.Context)) {
- S.Diag(Attr.getLoc(), diag::err_format_attribute_not)
+ S.Diag(AL.getLoc(), diag::err_format_attribute_not)
<< "a CFString" << IdxExpr->getSourceRange()
<< getFunctionOrMethodParamRange(D, ArgIdx);
return;
@@ -3346,23 +3318,23 @@ static void handleFormatAttr(Sema &S, Decl *D, const AttributeList &Attr) {
// FIXME: do we need to check if the type is NSString*? What are the
// semantics?
if (!isNSStringType(Ty, S.Context)) {
- S.Diag(Attr.getLoc(), diag::err_format_attribute_not)
+ S.Diag(AL.getLoc(), diag::err_format_attribute_not)
<< "an NSString" << IdxExpr->getSourceRange()
<< getFunctionOrMethodParamRange(D, ArgIdx);
return;
}
} else if (!Ty->isPointerType() ||
!Ty->getAs<PointerType>()->getPointeeType()->isCharType()) {
- S.Diag(Attr.getLoc(), diag::err_format_attribute_not)
+ S.Diag(AL.getLoc(), diag::err_format_attribute_not)
<< "a string type" << IdxExpr->getSourceRange()
<< getFunctionOrMethodParamRange(D, ArgIdx);
return;
}
// check the 3rd argument
- Expr *FirstArgExpr = Attr.getArgAsExpr(2);
+ Expr *FirstArgExpr = AL.getArgAsExpr(2);
uint32_t FirstArg;
- if (!checkUInt32Argument(S, Attr, FirstArgExpr, FirstArg, 3))
+ if (!checkUInt32Argument(S, AL, FirstArgExpr, FirstArg, 3))
return;
// check if the function is variadic if the 3rd argument non-zero
@@ -3379,43 +3351,42 @@ static void handleFormatAttr(Sema &S, Decl *D, const AttributeList &Attr) {
// variable the input is just the current time + the format string.
if (Kind == StrftimeFormat) {
if (FirstArg != 0) {
- S.Diag(Attr.getLoc(), diag::err_format_strftime_third_parameter)
+ S.Diag(AL.getLoc(), diag::err_format_strftime_third_parameter)
<< FirstArgExpr->getSourceRange();
return;
}
// if 0 it disables parameter checking (to use with e.g. va_list)
} else if (FirstArg != 0 && FirstArg != NumArgs) {
- S.Diag(Attr.getLoc(), diag::err_attribute_argument_out_of_bounds)
- << Attr.getName() << 3 << FirstArgExpr->getSourceRange();
+ S.Diag(AL.getLoc(), diag::err_attribute_argument_out_of_bounds)
+ << AL.getName() << 3 << FirstArgExpr->getSourceRange();
return;
}
- FormatAttr *NewAttr = S.mergeFormatAttr(D, Attr.getRange(), II,
+ FormatAttr *NewAttr = S.mergeFormatAttr(D, AL.getRange(), II,
Idx, FirstArg,
- Attr.getAttributeSpellingListIndex());
+ AL.getAttributeSpellingListIndex());
if (NewAttr)
D->addAttr(NewAttr);
}
-static void handleTransparentUnionAttr(Sema &S, Decl *D,
- const AttributeList &Attr) {
+static void handleTransparentUnionAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
// Try to find the underlying union declaration.
RecordDecl *RD = nullptr;
- TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(D);
+ const auto *TD = dyn_cast<TypedefNameDecl>(D);
if (TD && TD->getUnderlyingType()->isUnionType())
RD = TD->getUnderlyingType()->getAsUnionType()->getDecl();
else
RD = dyn_cast<RecordDecl>(D);
if (!RD || !RD->isUnion()) {
- S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
- << Attr.getName() << ExpectedUnion;
+ S.Diag(AL.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << AL.getName() << ExpectedUnion;
return;
}
if (!RD->isCompleteDefinition()) {
if (!RD->isBeingDefined())
- S.Diag(Attr.getLoc(),
+ S.Diag(AL.getLoc(),
diag::warn_transparent_union_attribute_not_definition);
return;
}
@@ -3423,7 +3394,7 @@ static void handleTransparentUnionAttr(Sema &S, Decl *D,
RecordDecl::field_iterator Field = RD->field_begin(),
FieldEnd = RD->field_end();
if (Field == FieldEnd) {
- S.Diag(Attr.getLoc(), diag::warn_transparent_union_attribute_zero_fields);
+ S.Diag(AL.getLoc(), diag::warn_transparent_union_attribute_zero_fields);
return;
}
@@ -3467,15 +3438,15 @@ static void handleTransparentUnionAttr(Sema &S, Decl *D,
}
RD->addAttr(::new (S.Context)
- TransparentUnionAttr(Attr.getRange(), S.Context,
- Attr.getAttributeSpellingListIndex()));
+ TransparentUnionAttr(AL.getRange(), S.Context,
+ AL.getAttributeSpellingListIndex()));
}
-static void handleAnnotateAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+static void handleAnnotateAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
// Make sure that there is a string literal as the annotation's single
// argument.
StringRef Str;
- if (!S.checkStringLiteralArgumentAttr(Attr, 0, Str))
+ if (!S.checkStringLiteralArgumentAttr(AL, 0, Str))
return;
// Don't duplicate annotations that are already set.
@@ -3485,14 +3456,13 @@ static void handleAnnotateAttr(Sema &S, Decl *D, const AttributeList &Attr) {
}
D->addAttr(::new (S.Context)
- AnnotateAttr(Attr.getRange(), S.Context, Str,
- Attr.getAttributeSpellingListIndex()));
+ AnnotateAttr(AL.getRange(), S.Context, Str,
+ AL.getAttributeSpellingListIndex()));
}
-static void handleAlignValueAttr(Sema &S, Decl *D,
- const AttributeList &Attr) {
- S.AddAlignValueAttr(Attr.getRange(), D, Attr.getArgAsExpr(0),
- Attr.getAttributeSpellingListIndex());
+static void handleAlignValueAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ S.AddAlignValueAttr(AL.getRange(), D, AL.getArgAsExpr(0),
+ AL.getAttributeSpellingListIndex());
}
void Sema::AddAlignValueAttr(SourceRange AttrRange, Decl *D, Expr *E,
@@ -3501,9 +3471,9 @@ void Sema::AddAlignValueAttr(SourceRange AttrRange, Decl *D, Expr *E,
SourceLocation AttrLoc = AttrRange.getBegin();
QualType T;
- if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(D))
+ if (const auto *TD = dyn_cast<TypedefNameDecl>(D))
T = TD->getUnderlyingType();
- else if (ValueDecl *VD = dyn_cast<ValueDecl>(D))
+ else if (const auto *VD = dyn_cast<ValueDecl>(D))
T = VD->getType();
else
llvm_unreachable("Unknown decl type for align_value");
@@ -3540,42 +3510,32 @@ void Sema::AddAlignValueAttr(SourceRange AttrRange, Decl *D, Expr *E,
D->addAttr(::new (Context) AlignValueAttr(TmpAttr));
}
-static void handleAlignedAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+static void handleAlignedAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
// check the attribute arguments.
- if (Attr.getNumArgs() > 1) {
- S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments)
- << Attr.getName() << 1;
+ if (AL.getNumArgs() > 1) {
+ S.Diag(AL.getLoc(), diag::err_attribute_wrong_number_arguments)
+ << AL.getName() << 1;
return;
}
- if (Attr.getNumArgs() == 0) {
- D->addAttr(::new (S.Context) AlignedAttr(Attr.getRange(), S.Context,
- true, nullptr, Attr.getAttributeSpellingListIndex()));
+ if (AL.getNumArgs() == 0) {
+ D->addAttr(::new (S.Context) AlignedAttr(AL.getRange(), S.Context,
+ true, nullptr, AL.getAttributeSpellingListIndex()));
return;
}
- Expr *E = Attr.getArgAsExpr(0);
- if (Attr.isPackExpansion() && !E->containsUnexpandedParameterPack()) {
- S.Diag(Attr.getEllipsisLoc(),
+ Expr *E = AL.getArgAsExpr(0);
+ if (AL.isPackExpansion() && !E->containsUnexpandedParameterPack()) {
+ S.Diag(AL.getEllipsisLoc(),
diag::err_pack_expansion_without_parameter_packs);
return;
}
- if (!Attr.isPackExpansion() && S.DiagnoseUnexpandedParameterPack(E))
+ if (!AL.isPackExpansion() && S.DiagnoseUnexpandedParameterPack(E))
return;
- if (E->isValueDependent()) {
- if (const auto *TND = dyn_cast<TypedefNameDecl>(D)) {
- if (!TND->getUnderlyingType()->isDependentType()) {
- S.Diag(Attr.getLoc(), diag::err_alignment_dependent_typedef_name)
- << E->getSourceRange();
- return;
- }
- }
- }
-
- S.AddAlignedAttr(Attr.getRange(), D, E, Attr.getAttributeSpellingListIndex(),
- Attr.isPackExpansion());
+ S.AddAlignedAttr(AL.getRange(), D, E, AL.getAttributeSpellingListIndex(),
+ AL.isPackExpansion());
}
void Sema::AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E,
@@ -3599,12 +3559,12 @@ void Sema::AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E,
int DiagKind = -1;
if (isa<ParmVarDecl>(D)) {
DiagKind = 0;
- } else if (VarDecl *VD = dyn_cast<VarDecl>(D)) {
+ } else if (const auto *VD = dyn_cast<VarDecl>(D)) {
if (VD->getStorageClass() == SC_Register)
DiagKind = 1;
if (VD->isExceptionVariable())
DiagKind = 2;
- } else if (FieldDecl *FD = dyn_cast<FieldDecl>(D)) {
+ } else if (const auto *FD = dyn_cast<FieldDecl>(D)) {
if (FD->isBitField())
DiagKind = 3;
} else if (!isa<TagDecl>(D)) {
@@ -3620,7 +3580,18 @@ void Sema::AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E,
}
}
- if (E->isTypeDependent() || E->isValueDependent()) {
+ if (E->isValueDependent()) {
+ // We can't support a dependent alignment on a non-dependent type,
+ // because we have no way to model that a type is "alignment-dependent"
+ // but not dependent in any other way.
+ if (const auto *TND = dyn_cast<TypedefNameDecl>(D)) {
+ if (!TND->getUnderlyingType()->isDependentType()) {
+ Diag(AttrLoc, diag::err_alignment_dependent_typedef_name)
+ << E->getSourceRange();
+ return;
+ }
+ }
+
// Save dependent expressions in the AST to be instantiated.
AlignedAttr *AA = ::new (Context) AlignedAttr(TmpAttr);
AA->setPackExpansion(IsPackExpansion);
@@ -3628,7 +3599,7 @@ void Sema::AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E,
return;
}
- // FIXME: Cache the number on the Attr object?
+ // FIXME: Cache the number on the AL object?
llvm::APSInt Alignment;
ExprResult ICE
= VerifyIntegerConstantExpression(E, &Alignment,
@@ -3666,7 +3637,7 @@ void Sema::AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E,
unsigned MaxTLSAlign =
Context.toCharUnitsFromBits(Context.getTargetInfo().getMaxTLSAlign())
.getQuantity();
- auto *VD = dyn_cast<VarDecl>(D);
+ const auto *VD = dyn_cast<VarDecl>(D);
if (MaxTLSAlign && AlignVal > MaxTLSAlign && VD &&
VD->getTLSKind() != VarDecl::TLS_None) {
Diag(VD->getLocation(), diag::err_tls_var_aligned_over_maximum)
@@ -3683,7 +3654,7 @@ void Sema::AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E,
void Sema::AddAlignedAttr(SourceRange AttrRange, Decl *D, TypeSourceInfo *TS,
unsigned SpellingListIndex, bool IsPackExpansion) {
- // FIXME: Cache the number on the Attr object if non-dependent?
+ // FIXME: Cache the number on the AL object if non-dependent?
// FIXME: Perform checking of type validity
AlignedAttr *AA = ::new (Context) AlignedAttr(AttrRange, Context, false, TS,
SpellingListIndex);
@@ -3695,11 +3666,11 @@ void Sema::CheckAlignasUnderalignment(Decl *D) {
assert(D->hasAttrs() && "no attributes on decl");
QualType UnderlyingTy, DiagTy;
- if (ValueDecl *VD = dyn_cast<ValueDecl>(D)) {
+ if (const auto *VD = dyn_cast<ValueDecl>(D)) {
UnderlyingTy = DiagTy = VD->getType();
} else {
UnderlyingTy = DiagTy = Context.getTagDeclType(cast<TagDecl>(D));
- if (EnumDecl *ED = dyn_cast<EnumDecl>(D))
+ if (const auto *ED = dyn_cast<EnumDecl>(D))
UnderlyingTy = ED->getIntegerType();
}
if (DiagTy->isDependentType() || DiagTy->isIncompleteType())
@@ -3819,18 +3790,18 @@ static void parseModeAttrArg(Sema &S, StringRef Str, unsigned &DestWidth,
/// Despite what would be logical, the mode attribute is a decl attribute, not a
/// type attribute: 'int ** __attribute((mode(HI))) *G;' tries to make 'G' be
/// HImode, not an intermediate pointer.
-static void handleModeAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+static void handleModeAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
// This attribute isn't documented, but glibc uses it. It changes
// the width of an int or unsigned int to the specified size.
- if (!Attr.isArgIdent(0)) {
- S.Diag(Attr.getLoc(), diag::err_attribute_argument_type) << Attr.getName()
+ if (!AL.isArgIdent(0)) {
+ S.Diag(AL.getLoc(), diag::err_attribute_argument_type) << AL.getName()
<< AANT_ArgumentIdentifier;
return;
}
- IdentifierInfo *Name = Attr.getArgAsIdent(0)->Ident;
+ IdentifierInfo *Name = AL.getArgAsIdent(0)->Ident;
- S.AddModeAttr(Attr.getRange(), D, Name, Attr.getAttributeSpellingListIndex());
+ S.AddModeAttr(AL.getRange(), D, Name, AL.getAttributeSpellingListIndex());
}
void Sema::AddModeAttr(SourceRange AttrRange, Decl *D, IdentifierInfo *Name,
@@ -3876,9 +3847,9 @@ void Sema::AddModeAttr(SourceRange AttrRange, Decl *D, IdentifierInfo *Name,
}
QualType OldTy;
- if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(D))
+ if (const auto *TD = dyn_cast<TypedefNameDecl>(D))
OldTy = TD->getUnderlyingType();
- else if (EnumDecl *ED = dyn_cast<EnumDecl>(D)) {
+ else if (const auto *ED = dyn_cast<EnumDecl>(D)) {
// Something like 'typedef enum { X } __attribute__((mode(XX))) T;'.
// Try to get type from enum declaration, default to int.
OldTy = ED->getIntegerType();
@@ -3896,7 +3867,7 @@ void Sema::AddModeAttr(SourceRange AttrRange, Decl *D, IdentifierInfo *Name,
// Base type can also be a vector type (see PR17453).
// Distinguish between base type and base element type.
QualType OldElemTy = OldTy;
- if (const VectorType *VT = OldTy->getAs<VectorType>())
+ if (const auto *VT = OldTy->getAs<VectorType>())
OldElemTy = VT->getElementType();
// GCC allows 'mode' attribute on enumeration types (even incomplete), except
@@ -3945,7 +3916,7 @@ void Sema::AddModeAttr(SourceRange AttrRange, Decl *D, IdentifierInfo *Name,
if (VectorSize.getBoolValue()) {
NewTy = Context.getVectorType(NewTy, VectorSize.getZExtValue(),
VectorType::GenericVector);
- } else if (const VectorType *OldVT = OldTy->getAs<VectorType>()) {
+ } else if (const auto *OldVT = OldTy->getAs<VectorType>()) {
// Complex machine mode does not support base vector types.
if (ComplexMode) {
Diag(AttrLoc, diag::err_complex_mode_vector_type);
@@ -3964,9 +3935,9 @@ void Sema::AddModeAttr(SourceRange AttrRange, Decl *D, IdentifierInfo *Name,
}
// Install the new type.
- if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(D))
+ if (auto *TD = dyn_cast<TypedefNameDecl>(D))
TD->setModedTypeSourceInfo(TD->getTypeSourceInfo(), NewTy);
- else if (EnumDecl *ED = dyn_cast<EnumDecl>(D))
+ else if (auto *ED = dyn_cast<EnumDecl>(D))
ED->setIntegerType(NewTy);
else
cast<ValueDecl>(D)->setType(NewTy);
@@ -3975,10 +3946,10 @@ void Sema::AddModeAttr(SourceRange AttrRange, Decl *D, IdentifierInfo *Name,
ModeAttr(AttrRange, Context, Name, SpellingListIndex));
}
-static void handleNoDebugAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+static void handleNoDebugAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
D->addAttr(::new (S.Context)
- NoDebugAttr(Attr.getRange(), S.Context,
- Attr.getAttributeSpellingListIndex()));
+ NoDebugAttr(AL.getRange(), S.Context,
+ AL.getAttributeSpellingListIndex()));
}
AlwaysInlineAttr *Sema::mergeAlwaysInlineAttr(Decl *D, SourceRange Range,
@@ -4010,7 +3981,7 @@ InternalLinkageAttr *
Sema::mergeInternalLinkageAttr(Decl *D, SourceRange Range,
IdentifierInfo *Ident,
unsigned AttrSpellingListIndex) {
- if (auto VD = dyn_cast<VarDecl>(D)) {
+ if (const auto *VD = dyn_cast<VarDecl>(D)) {
// Attribute applies to Var but not any subclass of it (like ParmVar,
// ImplicitParm or VarTemplateSpecialization).
if (VD->getKind() != Decl::Var) {
@@ -4067,71 +4038,70 @@ OptimizeNoneAttr *Sema::mergeOptimizeNoneAttr(Decl *D, SourceRange Range,
AttrSpellingListIndex);
}
-static void handleAlwaysInlineAttr(Sema &S, Decl *D,
- const AttributeList &Attr) {
- if (checkAttrMutualExclusion<NotTailCalledAttr>(S, D, Attr.getRange(),
- Attr.getName()))
+static void handleAlwaysInlineAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ if (checkAttrMutualExclusion<NotTailCalledAttr>(S, D, AL.getRange(),
+ AL.getName()))
return;
if (AlwaysInlineAttr *Inline = S.mergeAlwaysInlineAttr(
- D, Attr.getRange(), Attr.getName(),
- Attr.getAttributeSpellingListIndex()))
+ D, AL.getRange(), AL.getName(),
+ AL.getAttributeSpellingListIndex()))
D->addAttr(Inline);
}
-static void handleMinSizeAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+static void handleMinSizeAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
if (MinSizeAttr *MinSize = S.mergeMinSizeAttr(
- D, Attr.getRange(), Attr.getAttributeSpellingListIndex()))
+ D, AL.getRange(), AL.getAttributeSpellingListIndex()))
D->addAttr(MinSize);
}
-static void handleOptimizeNoneAttr(Sema &S, Decl *D,
- const AttributeList &Attr) {
+static void handleOptimizeNoneAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
if (OptimizeNoneAttr *Optnone = S.mergeOptimizeNoneAttr(
- D, Attr.getRange(), Attr.getAttributeSpellingListIndex()))
+ D, AL.getRange(), AL.getAttributeSpellingListIndex()))
D->addAttr(Optnone);
}
-static void handleConstantAttr(Sema &S, Decl *D, const AttributeList &Attr) {
- if (checkAttrMutualExclusion<CUDASharedAttr>(S, D, Attr.getRange(),
- Attr.getName()))
+static void handleConstantAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ if (checkAttrMutualExclusion<CUDASharedAttr>(S, D, AL.getRange(),
+ AL.getName()))
return;
- auto *VD = cast<VarDecl>(D);
+ const auto *VD = cast<VarDecl>(D);
if (!VD->hasGlobalStorage()) {
- S.Diag(Attr.getLoc(), diag::err_cuda_nonglobal_constant);
+ S.Diag(AL.getLoc(), diag::err_cuda_nonglobal_constant);
return;
}
D->addAttr(::new (S.Context) CUDAConstantAttr(
- Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex()));
+ AL.getRange(), S.Context, AL.getAttributeSpellingListIndex()));
}
-static void handleSharedAttr(Sema &S, Decl *D, const AttributeList &Attr) {
- if (checkAttrMutualExclusion<CUDAConstantAttr>(S, D, Attr.getRange(),
- Attr.getName()))
+static void handleSharedAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ if (checkAttrMutualExclusion<CUDAConstantAttr>(S, D, AL.getRange(),
+ AL.getName()))
return;
- auto *VD = cast<VarDecl>(D);
+ const auto *VD = cast<VarDecl>(D);
// extern __shared__ is only allowed on arrays with no length (e.g.
// "int x[]").
- if (VD->hasExternalStorage() && !isa<IncompleteArrayType>(VD->getType())) {
- S.Diag(Attr.getLoc(), diag::err_cuda_extern_shared) << VD;
+ if (!S.getLangOpts().CUDARelocatableDeviceCode && VD->hasExternalStorage() &&
+ !isa<IncompleteArrayType>(VD->getType())) {
+ S.Diag(AL.getLoc(), diag::err_cuda_extern_shared) << VD;
return;
}
if (S.getLangOpts().CUDA && VD->hasLocalStorage() &&
- S.CUDADiagIfHostCode(Attr.getLoc(), diag::err_cuda_host_shared)
+ S.CUDADiagIfHostCode(AL.getLoc(), diag::err_cuda_host_shared)
<< S.CurrentCUDATarget())
return;
D->addAttr(::new (S.Context) CUDASharedAttr(
- Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex()));
+ AL.getRange(), S.Context, AL.getAttributeSpellingListIndex()));
}
-static void handleGlobalAttr(Sema &S, Decl *D, const AttributeList &Attr) {
- if (checkAttrMutualExclusion<CUDADeviceAttr>(S, D, Attr.getRange(),
- Attr.getName()) ||
- checkAttrMutualExclusion<CUDAHostAttr>(S, D, Attr.getRange(),
- Attr.getName())) {
+static void handleGlobalAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ if (checkAttrMutualExclusion<CUDADeviceAttr>(S, D, AL.getRange(),
+ AL.getName()) ||
+ checkAttrMutualExclusion<CUDAHostAttr>(S, D, AL.getRange(),
+ AL.getName())) {
return;
}
- FunctionDecl *FD = cast<FunctionDecl>(D);
+ const auto *FD = cast<FunctionDecl>(D);
if (!FD->getReturnType()->isVoidType()) {
SourceRange RTRange = FD->getReturnTypeSourceRange();
S.Diag(FD->getTypeSpecStartLoc(), diag::err_kern_type_not_void_return)
@@ -4153,88 +4123,88 @@ static void handleGlobalAttr(Sema &S, Decl *D, const AttributeList &Attr) {
S.Diag(FD->getLocStart(), diag::warn_kern_is_inline) << FD;
D->addAttr(::new (S.Context)
- CUDAGlobalAttr(Attr.getRange(), S.Context,
- Attr.getAttributeSpellingListIndex()));
+ CUDAGlobalAttr(AL.getRange(), S.Context,
+ AL.getAttributeSpellingListIndex()));
}
-static void handleGNUInlineAttr(Sema &S, Decl *D, const AttributeList &Attr) {
- FunctionDecl *Fn = cast<FunctionDecl>(D);
+static void handleGNUInlineAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ const auto *Fn = cast<FunctionDecl>(D);
if (!Fn->isInlineSpecified()) {
- S.Diag(Attr.getLoc(), diag::warn_gnu_inline_attribute_requires_inline);
+ S.Diag(AL.getLoc(), diag::warn_gnu_inline_attribute_requires_inline);
return;
}
D->addAttr(::new (S.Context)
- GNUInlineAttr(Attr.getRange(), S.Context,
- Attr.getAttributeSpellingListIndex()));
+ GNUInlineAttr(AL.getRange(), S.Context,
+ AL.getAttributeSpellingListIndex()));
}
-static void handleCallConvAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+static void handleCallConvAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
if (hasDeclarator(D)) return;
- // Diagnostic is emitted elsewhere: here we store the (valid) Attr
+ // Diagnostic is emitted elsewhere: here we store the (valid) AL
// in the Decl node for syntactic reasoning, e.g., pretty-printing.
CallingConv CC;
- if (S.CheckCallingConvAttr(Attr, CC, /*FD*/nullptr))
+ if (S.CheckCallingConvAttr(AL, CC, /*FD*/nullptr))
return;
if (!isa<ObjCMethodDecl>(D)) {
- S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
- << Attr.getName() << ExpectedFunctionOrMethod;
+ S.Diag(AL.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << AL.getName() << ExpectedFunctionOrMethod;
return;
}
- switch (Attr.getKind()) {
- case AttributeList::AT_FastCall:
+ switch (AL.getKind()) {
+ case ParsedAttr::AT_FastCall:
D->addAttr(::new (S.Context)
- FastCallAttr(Attr.getRange(), S.Context,
- Attr.getAttributeSpellingListIndex()));
+ FastCallAttr(AL.getRange(), S.Context,
+ AL.getAttributeSpellingListIndex()));
return;
- case AttributeList::AT_StdCall:
+ case ParsedAttr::AT_StdCall:
D->addAttr(::new (S.Context)
- StdCallAttr(Attr.getRange(), S.Context,
- Attr.getAttributeSpellingListIndex()));
+ StdCallAttr(AL.getRange(), S.Context,
+ AL.getAttributeSpellingListIndex()));
return;
- case AttributeList::AT_ThisCall:
+ case ParsedAttr::AT_ThisCall:
D->addAttr(::new (S.Context)
- ThisCallAttr(Attr.getRange(), S.Context,
- Attr.getAttributeSpellingListIndex()));
+ ThisCallAttr(AL.getRange(), S.Context,
+ AL.getAttributeSpellingListIndex()));
return;
- case AttributeList::AT_CDecl:
+ case ParsedAttr::AT_CDecl:
D->addAttr(::new (S.Context)
- CDeclAttr(Attr.getRange(), S.Context,
- Attr.getAttributeSpellingListIndex()));
+ CDeclAttr(AL.getRange(), S.Context,
+ AL.getAttributeSpellingListIndex()));
return;
- case AttributeList::AT_Pascal:
+ case ParsedAttr::AT_Pascal:
D->addAttr(::new (S.Context)
- PascalAttr(Attr.getRange(), S.Context,
- Attr.getAttributeSpellingListIndex()));
+ PascalAttr(AL.getRange(), S.Context,
+ AL.getAttributeSpellingListIndex()));
return;
- case AttributeList::AT_SwiftCall:
+ case ParsedAttr::AT_SwiftCall:
D->addAttr(::new (S.Context)
- SwiftCallAttr(Attr.getRange(), S.Context,
- Attr.getAttributeSpellingListIndex()));
+ SwiftCallAttr(AL.getRange(), S.Context,
+ AL.getAttributeSpellingListIndex()));
return;
- case AttributeList::AT_VectorCall:
+ case ParsedAttr::AT_VectorCall:
D->addAttr(::new (S.Context)
- VectorCallAttr(Attr.getRange(), S.Context,
- Attr.getAttributeSpellingListIndex()));
+ VectorCallAttr(AL.getRange(), S.Context,
+ AL.getAttributeSpellingListIndex()));
return;
- case AttributeList::AT_MSABI:
+ case ParsedAttr::AT_MSABI:
D->addAttr(::new (S.Context)
- MSABIAttr(Attr.getRange(), S.Context,
- Attr.getAttributeSpellingListIndex()));
+ MSABIAttr(AL.getRange(), S.Context,
+ AL.getAttributeSpellingListIndex()));
return;
- case AttributeList::AT_SysVABI:
+ case ParsedAttr::AT_SysVABI:
D->addAttr(::new (S.Context)
- SysVABIAttr(Attr.getRange(), S.Context,
- Attr.getAttributeSpellingListIndex()));
+ SysVABIAttr(AL.getRange(), S.Context,
+ AL.getAttributeSpellingListIndex()));
return;
- case AttributeList::AT_RegCall:
+ case ParsedAttr::AT_RegCall:
D->addAttr(::new (S.Context) RegCallAttr(
- Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex()));
+ AL.getRange(), S.Context, AL.getAttributeSpellingListIndex()));
return;
- case AttributeList::AT_Pcs: {
+ case ParsedAttr::AT_Pcs: {
PcsAttr::PCSType PCS;
switch (CC) {
case CC_AAPCS:
@@ -4248,37 +4218,37 @@ static void handleCallConvAttr(Sema &S, Decl *D, const AttributeList &Attr) {
}
D->addAttr(::new (S.Context)
- PcsAttr(Attr.getRange(), S.Context, PCS,
- Attr.getAttributeSpellingListIndex()));
+ PcsAttr(AL.getRange(), S.Context, PCS,
+ AL.getAttributeSpellingListIndex()));
return;
}
- case AttributeList::AT_IntelOclBicc:
+ case ParsedAttr::AT_IntelOclBicc:
D->addAttr(::new (S.Context)
- IntelOclBiccAttr(Attr.getRange(), S.Context,
- Attr.getAttributeSpellingListIndex()));
+ IntelOclBiccAttr(AL.getRange(), S.Context,
+ AL.getAttributeSpellingListIndex()));
return;
- case AttributeList::AT_PreserveMost:
+ case ParsedAttr::AT_PreserveMost:
D->addAttr(::new (S.Context) PreserveMostAttr(
- Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex()));
+ AL.getRange(), S.Context, AL.getAttributeSpellingListIndex()));
return;
- case AttributeList::AT_PreserveAll:
+ case ParsedAttr::AT_PreserveAll:
D->addAttr(::new (S.Context) PreserveAllAttr(
- Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex()));
+ AL.getRange(), S.Context, AL.getAttributeSpellingListIndex()));
return;
default:
llvm_unreachable("unexpected attribute kind");
}
}
-static void handleSuppressAttr(Sema &S, Decl *D, const AttributeList &Attr) {
- if (!checkAttributeAtLeastNumArgs(S, Attr, 1))
+static void handleSuppressAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ if (!checkAttributeAtLeastNumArgs(S, AL, 1))
return;
std::vector<StringRef> DiagnosticIdentifiers;
- for (unsigned I = 0, E = Attr.getNumArgs(); I != E; ++I) {
+ for (unsigned I = 0, E = AL.getNumArgs(); I != E; ++I) {
StringRef RuleName;
- if (!S.checkStringLiteralArgumentAttr(Attr, I, RuleName, nullptr))
+ if (!S.checkStringLiteralArgumentAttr(AL, I, RuleName, nullptr))
return;
// FIXME: Warn if the rule name is unknown. This is tricky because only
@@ -4286,11 +4256,11 @@ static void handleSuppressAttr(Sema &S, Decl *D, const AttributeList &Attr) {
DiagnosticIdentifiers.push_back(RuleName);
}
D->addAttr(::new (S.Context) SuppressAttr(
- Attr.getRange(), S.Context, DiagnosticIdentifiers.data(),
- DiagnosticIdentifiers.size(), Attr.getAttributeSpellingListIndex()));
+ AL.getRange(), S.Context, DiagnosticIdentifiers.data(),
+ DiagnosticIdentifiers.size(), AL.getAttributeSpellingListIndex()));
}
-bool Sema::CheckCallingConvAttr(const AttributeList &Attrs, CallingConv &CC,
+bool Sema::CheckCallingConvAttr(const ParsedAttr &Attrs, CallingConv &CC,
const FunctionDecl *FD) {
if (Attrs.isInvalid())
return true;
@@ -4300,7 +4270,7 @@ bool Sema::CheckCallingConvAttr(const AttributeList &Attrs, CallingConv &CC,
return false;
}
- unsigned ReqArgs = Attrs.getKind() == AttributeList::AT_Pcs ? 1 : 0;
+ unsigned ReqArgs = Attrs.getKind() == ParsedAttr::AT_Pcs ? 1 : 0;
if (!checkAttributeNumArgs(*this, Attrs, ReqArgs)) {
Attrs.setInvalid();
return true;
@@ -4308,23 +4278,39 @@ bool Sema::CheckCallingConvAttr(const AttributeList &Attrs, CallingConv &CC,
// TODO: diagnose uses of these conventions on the wrong target.
switch (Attrs.getKind()) {
- case AttributeList::AT_CDecl: CC = CC_C; break;
- case AttributeList::AT_FastCall: CC = CC_X86FastCall; break;
- case AttributeList::AT_StdCall: CC = CC_X86StdCall; break;
- case AttributeList::AT_ThisCall: CC = CC_X86ThisCall; break;
- case AttributeList::AT_Pascal: CC = CC_X86Pascal; break;
- case AttributeList::AT_SwiftCall: CC = CC_Swift; break;
- case AttributeList::AT_VectorCall: CC = CC_X86VectorCall; break;
- case AttributeList::AT_RegCall: CC = CC_X86RegCall; break;
- case AttributeList::AT_MSABI:
+ case ParsedAttr::AT_CDecl:
+ CC = CC_C;
+ break;
+ case ParsedAttr::AT_FastCall:
+ CC = CC_X86FastCall;
+ break;
+ case ParsedAttr::AT_StdCall:
+ CC = CC_X86StdCall;
+ break;
+ case ParsedAttr::AT_ThisCall:
+ CC = CC_X86ThisCall;
+ break;
+ case ParsedAttr::AT_Pascal:
+ CC = CC_X86Pascal;
+ break;
+ case ParsedAttr::AT_SwiftCall:
+ CC = CC_Swift;
+ break;
+ case ParsedAttr::AT_VectorCall:
+ CC = CC_X86VectorCall;
+ break;
+ case ParsedAttr::AT_RegCall:
+ CC = CC_X86RegCall;
+ break;
+ case ParsedAttr::AT_MSABI:
CC = Context.getTargetInfo().getTriple().isOSWindows() ? CC_C :
CC_Win64;
break;
- case AttributeList::AT_SysVABI:
+ case ParsedAttr::AT_SysVABI:
CC = Context.getTargetInfo().getTriple().isOSWindows() ? CC_X86_64SysV :
CC_C;
break;
- case AttributeList::AT_Pcs: {
+ case ParsedAttr::AT_Pcs: {
StringRef StrRef;
if (!checkStringLiteralArgumentAttr(Attrs, 0, StrRef)) {
Attrs.setInvalid();
@@ -4342,9 +4328,15 @@ bool Sema::CheckCallingConvAttr(const AttributeList &Attrs, CallingConv &CC,
Diag(Attrs.getLoc(), diag::err_invalid_pcs);
return true;
}
- case AttributeList::AT_IntelOclBicc: CC = CC_IntelOclBicc; break;
- case AttributeList::AT_PreserveMost: CC = CC_PreserveMost; break;
- case AttributeList::AT_PreserveAll: CC = CC_PreserveAll; break;
+ case ParsedAttr::AT_IntelOclBicc:
+ CC = CC_IntelOclBicc;
+ break;
+ case ParsedAttr::AT_PreserveMost:
+ CC = CC_PreserveMost;
+ break;
+ case ParsedAttr::AT_PreserveAll:
+ CC = CC_PreserveAll;
+ break;
default: llvm_unreachable("unexpected attribute kind");
}
@@ -4369,39 +4361,39 @@ bool Sema::CheckCallingConvAttr(const AttributeList &Attrs, CallingConv &CC,
}
/// Pointer-like types in the default address space.
-static bool isValidSwiftContextType(QualType type) {
- if (!type->hasPointerRepresentation())
- return type->isDependentType();
- return type->getPointeeType().getAddressSpace() == LangAS::Default;
+static bool isValidSwiftContextType(QualType Ty) {
+ if (!Ty->hasPointerRepresentation())
+ return Ty->isDependentType();
+ return Ty->getPointeeType().getAddressSpace() == LangAS::Default;
}
/// Pointers and references in the default address space.
-static bool isValidSwiftIndirectResultType(QualType type) {
- if (auto ptrType = type->getAs<PointerType>()) {
- type = ptrType->getPointeeType();
- } else if (auto refType = type->getAs<ReferenceType>()) {
- type = refType->getPointeeType();
+static bool isValidSwiftIndirectResultType(QualType Ty) {
+ if (const auto *PtrType = Ty->getAs<PointerType>()) {
+ Ty = PtrType->getPointeeType();
+ } else if (const auto *RefType = Ty->getAs<ReferenceType>()) {
+ Ty = RefType->getPointeeType();
} else {
- return type->isDependentType();
+ return Ty->isDependentType();
}
- return type.getAddressSpace() == LangAS::Default;
+ return Ty.getAddressSpace() == LangAS::Default;
}
/// Pointers and references to pointers in the default address space.
-static bool isValidSwiftErrorResultType(QualType type) {
- if (auto ptrType = type->getAs<PointerType>()) {
- type = ptrType->getPointeeType();
- } else if (auto refType = type->getAs<ReferenceType>()) {
- type = refType->getPointeeType();
+static bool isValidSwiftErrorResultType(QualType Ty) {
+ if (const auto *PtrType = Ty->getAs<PointerType>()) {
+ Ty = PtrType->getPointeeType();
+ } else if (const auto *RefType = Ty->getAs<ReferenceType>()) {
+ Ty = RefType->getPointeeType();
} else {
- return type->isDependentType();
+ return Ty->isDependentType();
}
- if (!type.getQualifiers().empty())
+ if (!Ty.getQualifiers().empty())
return false;
- return isValidSwiftContextType(type);
+ return isValidSwiftContextType(Ty);
}
-static void handleParameterABIAttr(Sema &S, Decl *D, const AttributeList &Attrs,
+static void handleParameterABIAttr(Sema &S, Decl *D, const ParsedAttr &Attrs,
ParameterABI Abi) {
S.AddParameterABIAttr(Attrs.getRange(), D, Abi,
Attrs.getAttributeSpellingListIndex());
@@ -4460,34 +4452,34 @@ void Sema::AddParameterABIAttr(SourceRange range, Decl *D, ParameterABI abi,
/// Checks a regparm attribute, returning true if it is ill-formed and
/// otherwise setting numParams to the appropriate value.
-bool Sema::CheckRegparmAttr(const AttributeList &Attr, unsigned &numParams) {
- if (Attr.isInvalid())
+bool Sema::CheckRegparmAttr(const ParsedAttr &AL, unsigned &numParams) {
+ if (AL.isInvalid())
return true;
- if (!checkAttributeNumArgs(*this, Attr, 1)) {
- Attr.setInvalid();
+ if (!checkAttributeNumArgs(*this, AL, 1)) {
+ AL.setInvalid();
return true;
}
uint32_t NP;
- Expr *NumParamsExpr = Attr.getArgAsExpr(0);
- if (!checkUInt32Argument(*this, Attr, NumParamsExpr, NP)) {
- Attr.setInvalid();
+ Expr *NumParamsExpr = AL.getArgAsExpr(0);
+ if (!checkUInt32Argument(*this, AL, NumParamsExpr, NP)) {
+ AL.setInvalid();
return true;
}
if (Context.getTargetInfo().getRegParmMax() == 0) {
- Diag(Attr.getLoc(), diag::err_attribute_regparm_wrong_platform)
+ Diag(AL.getLoc(), diag::err_attribute_regparm_wrong_platform)
<< NumParamsExpr->getSourceRange();
- Attr.setInvalid();
+ AL.setInvalid();
return true;
}
numParams = NP;
if (numParams > Context.getTargetInfo().getRegParmMax()) {
- Diag(Attr.getLoc(), diag::err_attribute_regparm_invalid_number)
+ Diag(AL.getLoc(), diag::err_attribute_regparm_invalid_number)
<< Context.getTargetInfo().getRegParmMax() << NumParamsExpr->getSourceRange();
- Attr.setInvalid();
+ AL.setInvalid();
return true;
}
@@ -4499,7 +4491,7 @@ bool Sema::CheckRegparmAttr(const AttributeList &Attr, unsigned &numParams) {
// non-nullptr Expr result on success. Otherwise, it returns nullptr
// and may output an error.
static Expr *makeLaunchBoundsArgExpr(Sema &S, Expr *E,
- const CUDALaunchBoundsAttr &Attr,
+ const CUDALaunchBoundsAttr &AL,
const unsigned Idx) {
if (S.DiagnoseUnexpandedParameterPack(E))
return nullptr;
@@ -4512,7 +4504,7 @@ static Expr *makeLaunchBoundsArgExpr(Sema &S, Expr *E,
llvm::APSInt I(64);
if (!E->isIntegerConstantExpr(I, S.Context)) {
S.Diag(E->getExprLoc(), diag::err_attribute_argument_n_type)
- << &Attr << Idx << AANT_ArgumentIntegerConstant << E->getSourceRange();
+ << &AL << Idx << AANT_ArgumentIntegerConstant << E->getSourceRange();
return nullptr;
}
// Make sure we can fit it in 32 bits.
@@ -4523,7 +4515,7 @@ static Expr *makeLaunchBoundsArgExpr(Sema &S, Expr *E,
}
if (I < 0)
S.Diag(E->getExprLoc(), diag::warn_attribute_argument_n_negative)
- << &Attr << Idx << E->getSourceRange();
+ << &AL << Idx << E->getSourceRange();
// We may need to perform implicit conversion of the argument.
InitializedEntity Entity = InitializedEntity::InitializeParameter(
@@ -4553,253 +4545,232 @@ void Sema::AddLaunchBoundsAttr(SourceRange AttrRange, Decl *D, Expr *MaxThreads,
AttrRange, Context, MaxThreads, MinBlocks, SpellingListIndex));
}
-static void handleLaunchBoundsAttr(Sema &S, Decl *D,
- const AttributeList &Attr) {
- if (!checkAttributeAtLeastNumArgs(S, Attr, 1) ||
- !checkAttributeAtMostNumArgs(S, Attr, 2))
+static void handleLaunchBoundsAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ if (!checkAttributeAtLeastNumArgs(S, AL, 1) ||
+ !checkAttributeAtMostNumArgs(S, AL, 2))
return;
- S.AddLaunchBoundsAttr(Attr.getRange(), D, Attr.getArgAsExpr(0),
- Attr.getNumArgs() > 1 ? Attr.getArgAsExpr(1) : nullptr,
- Attr.getAttributeSpellingListIndex());
+ S.AddLaunchBoundsAttr(AL.getRange(), D, AL.getArgAsExpr(0),
+ AL.getNumArgs() > 1 ? AL.getArgAsExpr(1) : nullptr,
+ AL.getAttributeSpellingListIndex());
}
static void handleArgumentWithTypeTagAttr(Sema &S, Decl *D,
- const AttributeList &Attr) {
- if (!Attr.isArgIdent(0)) {
- S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_type)
- << Attr.getName() << /* arg num = */ 1 << AANT_ArgumentIdentifier;
- return;
- }
-
- if (!checkAttributeNumArgs(S, Attr, 3))
- return;
-
- IdentifierInfo *ArgumentKind = Attr.getArgAsIdent(0)->Ident;
-
- if (!isFunctionOrMethod(D) || !hasFunctionProto(D)) {
- S.Diag(Attr.getLoc(), diag::err_attribute_wrong_decl_type)
- << Attr.getName() << ExpectedFunctionOrMethod;
+ const ParsedAttr &AL) {
+ if (!AL.isArgIdent(0)) {
+ S.Diag(AL.getLoc(), diag::err_attribute_argument_n_type)
+ << AL.getName() << /* arg num = */ 1 << AANT_ArgumentIdentifier;
return;
}
- uint64_t ArgumentIdx;
- if (!checkFunctionOrMethodParameterIndex(S, D, Attr, 2, Attr.getArgAsExpr(1),
+ ParamIdx ArgumentIdx;
+ if (!checkFunctionOrMethodParameterIndex(S, D, AL, 2, AL.getArgAsExpr(1),
ArgumentIdx))
return;
- uint64_t TypeTagIdx;
- if (!checkFunctionOrMethodParameterIndex(S, D, Attr, 3, Attr.getArgAsExpr(2),
+ ParamIdx TypeTagIdx;
+ if (!checkFunctionOrMethodParameterIndex(S, D, AL, 3, AL.getArgAsExpr(2),
TypeTagIdx))
return;
- bool IsPointer = (Attr.getName()->getName() == "pointer_with_type_tag");
+ bool IsPointer = AL.getName()->getName() == "pointer_with_type_tag";
if (IsPointer) {
// Ensure that buffer has a pointer type.
- QualType BufferTy = getFunctionOrMethodParamType(D, ArgumentIdx);
- if (!BufferTy->isPointerType()) {
- S.Diag(Attr.getLoc(), diag::err_attribute_pointers_only)
- << Attr.getName() << 0;
- }
+ unsigned ArgumentIdxAST = ArgumentIdx.getASTIndex();
+ if (ArgumentIdxAST >= getFunctionOrMethodNumParams(D) ||
+ !getFunctionOrMethodParamType(D, ArgumentIdxAST)->isPointerType())
+ S.Diag(AL.getLoc(), diag::err_attribute_pointers_only)
+ << AL.getName() << 0;
}
- D->addAttr(::new (S.Context)
- ArgumentWithTypeTagAttr(Attr.getRange(), S.Context, ArgumentKind,
- ArgumentIdx, TypeTagIdx, IsPointer,
- Attr.getAttributeSpellingListIndex()));
+ D->addAttr(::new (S.Context) ArgumentWithTypeTagAttr(
+ AL.getRange(), S.Context, AL.getArgAsIdent(0)->Ident, ArgumentIdx,
+ TypeTagIdx, IsPointer, AL.getAttributeSpellingListIndex()));
}
static void handleTypeTagForDatatypeAttr(Sema &S, Decl *D,
- const AttributeList &Attr) {
- if (!Attr.isArgIdent(0)) {
- S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_type)
- << Attr.getName() << 1 << AANT_ArgumentIdentifier;
+ const ParsedAttr &AL) {
+ if (!AL.isArgIdent(0)) {
+ S.Diag(AL.getLoc(), diag::err_attribute_argument_n_type)
+ << AL.getName() << 1 << AANT_ArgumentIdentifier;
return;
}
- if (!checkAttributeNumArgs(S, Attr, 1))
+ if (!checkAttributeNumArgs(S, AL, 1))
return;
if (!isa<VarDecl>(D)) {
- S.Diag(Attr.getLoc(), diag::err_attribute_wrong_decl_type)
- << Attr.getName() << ExpectedVariable;
+ S.Diag(AL.getLoc(), diag::err_attribute_wrong_decl_type)
+ << AL.getName() << ExpectedVariable;
return;
}
- IdentifierInfo *PointerKind = Attr.getArgAsIdent(0)->Ident;
+ IdentifierInfo *PointerKind = AL.getArgAsIdent(0)->Ident;
TypeSourceInfo *MatchingCTypeLoc = nullptr;
- S.GetTypeFromParser(Attr.getMatchingCType(), &MatchingCTypeLoc);
+ S.GetTypeFromParser(AL.getMatchingCType(), &MatchingCTypeLoc);
assert(MatchingCTypeLoc && "no type source info for attribute argument");
D->addAttr(::new (S.Context)
- TypeTagForDatatypeAttr(Attr.getRange(), S.Context, PointerKind,
+ TypeTagForDatatypeAttr(AL.getRange(), S.Context, PointerKind,
MatchingCTypeLoc,
- Attr.getLayoutCompatible(),
- Attr.getMustBeNull(),
- Attr.getAttributeSpellingListIndex()));
+ AL.getLayoutCompatible(),
+ AL.getMustBeNull(),
+ AL.getAttributeSpellingListIndex()));
}
-static void handleXRayLogArgsAttr(Sema &S, Decl *D,
- const AttributeList &Attr) {
- uint64_t ArgCount;
+static void handleXRayLogArgsAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ ParamIdx ArgCount;
- if (!checkFunctionOrMethodParameterIndex(S, D, Attr, 1, Attr.getArgAsExpr(0),
+ if (!checkFunctionOrMethodParameterIndex(S, D, AL, 1, AL.getArgAsExpr(0),
ArgCount,
- true /* AllowImplicitThis*/))
+ true /* CanIndexImplicitThis */))
return;
- // ArgCount isn't a parameter index [0;n), it's a count [1;n] - hence + 1.
- D->addAttr(::new (S.Context)
- XRayLogArgsAttr(Attr.getRange(), S.Context, ++ArgCount,
- Attr.getAttributeSpellingListIndex()));
+ // ArgCount isn't a parameter index [0;n), it's a count [1;n]
+ D->addAttr(::new (S.Context) XRayLogArgsAttr(
+ AL.getRange(), S.Context, ArgCount.getSourceIndex(),
+ AL.getAttributeSpellingListIndex()));
}
//===----------------------------------------------------------------------===//
// Checker-specific attribute handlers.
//===----------------------------------------------------------------------===//
-static bool isValidSubjectOfNSReturnsRetainedAttribute(QualType type) {
- return type->isDependentType() ||
- type->isObjCRetainableType();
+static bool isValidSubjectOfNSReturnsRetainedAttribute(QualType QT) {
+ return QT->isDependentType() || QT->isObjCRetainableType();
}
-static bool isValidSubjectOfNSAttribute(Sema &S, QualType type) {
- return type->isDependentType() ||
- type->isObjCObjectPointerType() ||
- S.Context.isObjCNSObjectType(type);
+static bool isValidSubjectOfNSAttribute(Sema &S, QualType QT) {
+ return QT->isDependentType() || QT->isObjCObjectPointerType() ||
+ S.Context.isObjCNSObjectType(QT);
}
-static bool isValidSubjectOfCFAttribute(Sema &S, QualType type) {
- return type->isDependentType() ||
- type->isPointerType() ||
- isValidSubjectOfNSAttribute(S, type);
+static bool isValidSubjectOfCFAttribute(Sema &S, QualType QT) {
+ return QT->isDependentType() || QT->isPointerType() ||
+ isValidSubjectOfNSAttribute(S, QT);
}
-static void handleNSConsumedAttr(Sema &S, Decl *D, const AttributeList &Attr) {
- S.AddNSConsumedAttr(Attr.getRange(), D, Attr.getAttributeSpellingListIndex(),
- Attr.getKind() == AttributeList::AT_NSConsumed,
+static void handleNSConsumedAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ S.AddNSConsumedAttr(AL.getRange(), D, AL.getAttributeSpellingListIndex(),
+ AL.getKind() == ParsedAttr::AT_NSConsumed,
/*template instantiation*/ false);
}
-void Sema::AddNSConsumedAttr(SourceRange attrRange, Decl *D,
- unsigned spellingIndex, bool isNSConsumed,
- bool isTemplateInstantiation) {
- ParmVarDecl *param = cast<ParmVarDecl>(D);
- bool typeOK;
+void Sema::AddNSConsumedAttr(SourceRange AttrRange, Decl *D,
+ unsigned SpellingIndex, bool IsNSConsumed,
+ bool IsTemplateInstantiation) {
+ const auto *Param = cast<ParmVarDecl>(D);
+ bool TypeOK;
- if (isNSConsumed) {
- typeOK = isValidSubjectOfNSAttribute(*this, param->getType());
- } else {
- typeOK = isValidSubjectOfCFAttribute(*this, param->getType());
- }
+ if (IsNSConsumed)
+ TypeOK = isValidSubjectOfNSAttribute(*this, Param->getType());
+ else
+ TypeOK = isValidSubjectOfCFAttribute(*this, Param->getType());
- if (!typeOK) {
+ if (!TypeOK) {
// These attributes are normally just advisory, but in ARC, ns_consumed
// is significant. Allow non-dependent code to contain inappropriate
// attributes even in ARC, but require template instantiations to be
// set up correctly.
- Diag(D->getLocStart(),
- (isTemplateInstantiation && isNSConsumed &&
- getLangOpts().ObjCAutoRefCount
- ? diag::err_ns_attribute_wrong_parameter_type
- : diag::warn_ns_attribute_wrong_parameter_type))
- << attrRange
- << (isNSConsumed ? "ns_consumed" : "cf_consumed")
- << (isNSConsumed ? /*objc pointers*/ 0 : /*cf pointers*/ 1);
+ Diag(D->getLocStart(), (IsTemplateInstantiation && IsNSConsumed &&
+ getLangOpts().ObjCAutoRefCount
+ ? diag::err_ns_attribute_wrong_parameter_type
+ : diag::warn_ns_attribute_wrong_parameter_type))
+ << AttrRange << (IsNSConsumed ? "ns_consumed" : "cf_consumed")
+ << (IsNSConsumed ? /*objc pointers*/ 0 : /*cf pointers*/ 1);
return;
}
- if (isNSConsumed)
- param->addAttr(::new (Context)
- NSConsumedAttr(attrRange, Context, spellingIndex));
+ if (IsNSConsumed)
+ D->addAttr(::new (Context)
+ NSConsumedAttr(AttrRange, Context, SpellingIndex));
else
- param->addAttr(::new (Context)
- CFConsumedAttr(attrRange, Context, spellingIndex));
+ D->addAttr(::new (Context)
+ CFConsumedAttr(AttrRange, Context, SpellingIndex));
}
-bool Sema::checkNSReturnsRetainedReturnType(SourceLocation loc,
- QualType type) {
- if (isValidSubjectOfNSReturnsRetainedAttribute(type))
+bool Sema::checkNSReturnsRetainedReturnType(SourceLocation Loc, QualType QT) {
+ if (isValidSubjectOfNSReturnsRetainedAttribute(QT))
return false;
- Diag(loc, diag::warn_ns_attribute_wrong_return_type)
- << "'ns_returns_retained'" << 0 << 0;
+ Diag(Loc, diag::warn_ns_attribute_wrong_return_type)
+ << "'ns_returns_retained'" << 0 << 0;
return true;
}
static void handleNSReturnsRetainedAttr(Sema &S, Decl *D,
- const AttributeList &Attr) {
- QualType returnType;
+ const ParsedAttr &AL) {
+ QualType ReturnType;
- if (ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D))
- returnType = MD->getReturnType();
+ if (const auto *MD = dyn_cast<ObjCMethodDecl>(D))
+ ReturnType = MD->getReturnType();
else if (S.getLangOpts().ObjCAutoRefCount && hasDeclarator(D) &&
- (Attr.getKind() == AttributeList::AT_NSReturnsRetained))
+ (AL.getKind() == ParsedAttr::AT_NSReturnsRetained))
return; // ignore: was handled as a type attribute
- else if (ObjCPropertyDecl *PD = dyn_cast<ObjCPropertyDecl>(D))
- returnType = PD->getType();
- else if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
- returnType = FD->getReturnType();
- else if (auto *Param = dyn_cast<ParmVarDecl>(D)) {
- returnType = Param->getType()->getPointeeType();
- if (returnType.isNull()) {
+ else if (const auto *PD = dyn_cast<ObjCPropertyDecl>(D))
+ ReturnType = PD->getType();
+ else if (const auto *FD = dyn_cast<FunctionDecl>(D))
+ ReturnType = FD->getReturnType();
+ else if (const auto *Param = dyn_cast<ParmVarDecl>(D)) {
+ ReturnType = Param->getType()->getPointeeType();
+ if (ReturnType.isNull()) {
S.Diag(D->getLocStart(), diag::warn_ns_attribute_wrong_parameter_type)
- << Attr.getName() << /*pointer-to-CF*/2
- << Attr.getRange();
+ << AL.getName() << /*pointer-to-CF*/2
+ << AL.getRange();
return;
}
- } else if (Attr.isUsedAsTypeAttr()) {
+ } else if (AL.isUsedAsTypeAttr()) {
return;
} else {
AttributeDeclKind ExpectedDeclKind;
- switch (Attr.getKind()) {
+ switch (AL.getKind()) {
default: llvm_unreachable("invalid ownership attribute");
- case AttributeList::AT_NSReturnsRetained:
- case AttributeList::AT_NSReturnsAutoreleased:
- case AttributeList::AT_NSReturnsNotRetained:
+ case ParsedAttr::AT_NSReturnsRetained:
+ case ParsedAttr::AT_NSReturnsAutoreleased:
+ case ParsedAttr::AT_NSReturnsNotRetained:
ExpectedDeclKind = ExpectedFunctionOrMethod;
break;
- case AttributeList::AT_CFReturnsRetained:
- case AttributeList::AT_CFReturnsNotRetained:
+ case ParsedAttr::AT_CFReturnsRetained:
+ case ParsedAttr::AT_CFReturnsNotRetained:
ExpectedDeclKind = ExpectedFunctionMethodOrParameter;
break;
}
S.Diag(D->getLocStart(), diag::warn_attribute_wrong_decl_type)
- << Attr.getRange() << Attr.getName() << ExpectedDeclKind;
+ << AL.getRange() << AL.getName() << ExpectedDeclKind;
return;
}
- bool typeOK;
- bool cf;
- switch (Attr.getKind()) {
+ bool TypeOK;
+ bool Cf;
+ switch (AL.getKind()) {
default: llvm_unreachable("invalid ownership attribute");
- case AttributeList::AT_NSReturnsRetained:
- typeOK = isValidSubjectOfNSReturnsRetainedAttribute(returnType);
- cf = false;
+ case ParsedAttr::AT_NSReturnsRetained:
+ TypeOK = isValidSubjectOfNSReturnsRetainedAttribute(ReturnType);
+ Cf = false;
break;
-
- case AttributeList::AT_NSReturnsAutoreleased:
- case AttributeList::AT_NSReturnsNotRetained:
- typeOK = isValidSubjectOfNSAttribute(S, returnType);
- cf = false;
+
+ case ParsedAttr::AT_NSReturnsAutoreleased:
+ case ParsedAttr::AT_NSReturnsNotRetained:
+ TypeOK = isValidSubjectOfNSAttribute(S, ReturnType);
+ Cf = false;
break;
- case AttributeList::AT_CFReturnsRetained:
- case AttributeList::AT_CFReturnsNotRetained:
- typeOK = isValidSubjectOfCFAttribute(S, returnType);
- cf = true;
+ case ParsedAttr::AT_CFReturnsRetained:
+ case ParsedAttr::AT_CFReturnsNotRetained:
+ TypeOK = isValidSubjectOfCFAttribute(S, ReturnType);
+ Cf = true;
break;
}
- if (!typeOK) {
- if (Attr.isUsedAsTypeAttr())
+ if (!TypeOK) {
+ if (AL.isUsedAsTypeAttr())
return;
if (isa<ParmVarDecl>(D)) {
S.Diag(D->getLocStart(), diag::warn_ns_attribute_wrong_parameter_type)
- << Attr.getName() << /*pointer-to-CF*/2
- << Attr.getRange();
+ << AL.getName() << /*pointer-to-CF*/2
+ << AL.getRange();
} else {
// Needs to be kept in sync with warn_ns_attribute_wrong_return_type.
enum : unsigned {
@@ -4812,40 +4783,40 @@ static void handleNSReturnsRetainedAttr(Sema &S, Decl *D,
else if (isa<ObjCPropertyDecl>(D))
SubjectKind = Property;
S.Diag(D->getLocStart(), diag::warn_ns_attribute_wrong_return_type)
- << Attr.getName() << SubjectKind << cf
- << Attr.getRange();
+ << AL.getName() << SubjectKind << Cf
+ << AL.getRange();
}
return;
}
- switch (Attr.getKind()) {
+ switch (AL.getKind()) {
default:
llvm_unreachable("invalid ownership attribute");
- case AttributeList::AT_NSReturnsAutoreleased:
+ case ParsedAttr::AT_NSReturnsAutoreleased:
D->addAttr(::new (S.Context) NSReturnsAutoreleasedAttr(
- Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex()));
+ AL.getRange(), S.Context, AL.getAttributeSpellingListIndex()));
return;
- case AttributeList::AT_CFReturnsNotRetained:
+ case ParsedAttr::AT_CFReturnsNotRetained:
D->addAttr(::new (S.Context) CFReturnsNotRetainedAttr(
- Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex()));
+ AL.getRange(), S.Context, AL.getAttributeSpellingListIndex()));
return;
- case AttributeList::AT_NSReturnsNotRetained:
+ case ParsedAttr::AT_NSReturnsNotRetained:
D->addAttr(::new (S.Context) NSReturnsNotRetainedAttr(
- Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex()));
+ AL.getRange(), S.Context, AL.getAttributeSpellingListIndex()));
return;
- case AttributeList::AT_CFReturnsRetained:
+ case ParsedAttr::AT_CFReturnsRetained:
D->addAttr(::new (S.Context) CFReturnsRetainedAttr(
- Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex()));
+ AL.getRange(), S.Context, AL.getAttributeSpellingListIndex()));
return;
- case AttributeList::AT_NSReturnsRetained:
+ case ParsedAttr::AT_NSReturnsRetained:
D->addAttr(::new (S.Context) NSReturnsRetainedAttr(
- Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex()));
+ AL.getRange(), S.Context, AL.getAttributeSpellingListIndex()));
return;
};
}
static void handleObjCReturnsInnerPointerAttr(Sema &S, Decl *D,
- const AttributeList &Attrs) {
+ const ParsedAttr &Attrs) {
const int EP_ObjCMethod = 1;
const int EP_ObjCProperty = 2;
@@ -4873,116 +4844,91 @@ static void handleObjCReturnsInnerPointerAttr(Sema &S, Decl *D,
}
static void handleObjCRequiresSuperAttr(Sema &S, Decl *D,
- const AttributeList &Attrs) {
- ObjCMethodDecl *method = cast<ObjCMethodDecl>(D);
-
- DeclContext *DC = method->getDeclContext();
- if (const ObjCProtocolDecl *PDecl = dyn_cast_or_null<ObjCProtocolDecl>(DC)) {
+ const ParsedAttr &Attrs) {
+ const auto *Method = cast<ObjCMethodDecl>(D);
+
+ const DeclContext *DC = Method->getDeclContext();
+ if (const auto *PDecl = dyn_cast_or_null<ObjCProtocolDecl>(DC)) {
S.Diag(D->getLocStart(), diag::warn_objc_requires_super_protocol)
- << Attrs.getName() << 0;
+ << Attrs.getName() << 0;
S.Diag(PDecl->getLocation(), diag::note_protocol_decl);
return;
}
- if (method->getMethodFamily() == OMF_dealloc) {
+ if (Method->getMethodFamily() == OMF_dealloc) {
S.Diag(D->getLocStart(), diag::warn_objc_requires_super_protocol)
- << Attrs.getName() << 1;
+ << Attrs.getName() << 1;
return;
}
-
- method->addAttr(::new (S.Context)
- ObjCRequiresSuperAttr(Attrs.getRange(), S.Context,
- Attrs.getAttributeSpellingListIndex()));
-}
-
-static void handleCFAuditedTransferAttr(Sema &S, Decl *D,
- const AttributeList &Attr) {
- if (checkAttrMutualExclusion<CFUnknownTransferAttr>(S, D, Attr.getRange(),
- Attr.getName()))
- return;
- D->addAttr(::new (S.Context)
- CFAuditedTransferAttr(Attr.getRange(), S.Context,
- Attr.getAttributeSpellingListIndex()));
-}
-
-static void handleCFUnknownTransferAttr(Sema &S, Decl *D,
- const AttributeList &Attr) {
- if (checkAttrMutualExclusion<CFAuditedTransferAttr>(S, D, Attr.getRange(),
- Attr.getName()))
- return;
-
- D->addAttr(::new (S.Context)
- CFUnknownTransferAttr(Attr.getRange(), S.Context,
- Attr.getAttributeSpellingListIndex()));
+ D->addAttr(::new (S.Context) ObjCRequiresSuperAttr(
+ Attrs.getRange(), S.Context, Attrs.getAttributeSpellingListIndex()));
}
-static void handleObjCBridgeAttr(Sema &S, Scope *Sc, Decl *D,
- const AttributeList &Attr) {
- IdentifierLoc * Parm = Attr.isArgIdent(0) ? Attr.getArgAsIdent(0) : nullptr;
+static void handleObjCBridgeAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ IdentifierLoc *Parm = AL.isArgIdent(0) ? AL.getArgAsIdent(0) : nullptr;
if (!Parm) {
- S.Diag(D->getLocStart(), diag::err_objc_attr_not_id) << Attr.getName() << 0;
+ S.Diag(D->getLocStart(), diag::err_objc_attr_not_id) << AL.getName() << 0;
return;
}
// Typedefs only allow objc_bridge(id) and have some additional checking.
- if (auto TD = dyn_cast<TypedefNameDecl>(D)) {
+ if (const auto *TD = dyn_cast<TypedefNameDecl>(D)) {
if (!Parm->Ident->isStr("id")) {
- S.Diag(Attr.getLoc(), diag::err_objc_attr_typedef_not_id)
- << Attr.getName();
+ S.Diag(AL.getLoc(), diag::err_objc_attr_typedef_not_id)
+ << AL.getName();
return;
}
// Only allow 'cv void *'.
QualType T = TD->getUnderlyingType();
if (!T->isVoidPointerType()) {
- S.Diag(Attr.getLoc(), diag::err_objc_attr_typedef_not_void_pointer);
+ S.Diag(AL.getLoc(), diag::err_objc_attr_typedef_not_void_pointer);
return;
}
}
D->addAttr(::new (S.Context)
- ObjCBridgeAttr(Attr.getRange(), S.Context, Parm->Ident,
- Attr.getAttributeSpellingListIndex()));
+ ObjCBridgeAttr(AL.getRange(), S.Context, Parm->Ident,
+ AL.getAttributeSpellingListIndex()));
}
-static void handleObjCBridgeMutableAttr(Sema &S, Scope *Sc, Decl *D,
- const AttributeList &Attr) {
- IdentifierLoc * Parm = Attr.isArgIdent(0) ? Attr.getArgAsIdent(0) : nullptr;
+static void handleObjCBridgeMutableAttr(Sema &S, Decl *D,
+ const ParsedAttr &AL) {
+ IdentifierLoc *Parm = AL.isArgIdent(0) ? AL.getArgAsIdent(0) : nullptr;
if (!Parm) {
- S.Diag(D->getLocStart(), diag::err_objc_attr_not_id) << Attr.getName() << 0;
+ S.Diag(D->getLocStart(), diag::err_objc_attr_not_id) << AL.getName() << 0;
return;
}
D->addAttr(::new (S.Context)
- ObjCBridgeMutableAttr(Attr.getRange(), S.Context, Parm->Ident,
- Attr.getAttributeSpellingListIndex()));
+ ObjCBridgeMutableAttr(AL.getRange(), S.Context, Parm->Ident,
+ AL.getAttributeSpellingListIndex()));
}
-static void handleObjCBridgeRelatedAttr(Sema &S, Scope *Sc, Decl *D,
- const AttributeList &Attr) {
+static void handleObjCBridgeRelatedAttr(Sema &S, Decl *D,
+ const ParsedAttr &AL) {
IdentifierInfo *RelatedClass =
- Attr.isArgIdent(0) ? Attr.getArgAsIdent(0)->Ident : nullptr;
+ AL.isArgIdent(0) ? AL.getArgAsIdent(0)->Ident : nullptr;
if (!RelatedClass) {
- S.Diag(D->getLocStart(), diag::err_objc_attr_not_id) << Attr.getName() << 0;
+ S.Diag(D->getLocStart(), diag::err_objc_attr_not_id) << AL.getName() << 0;
return;
}
IdentifierInfo *ClassMethod =
- Attr.getArgAsIdent(1) ? Attr.getArgAsIdent(1)->Ident : nullptr;
+ AL.getArgAsIdent(1) ? AL.getArgAsIdent(1)->Ident : nullptr;
IdentifierInfo *InstanceMethod =
- Attr.getArgAsIdent(2) ? Attr.getArgAsIdent(2)->Ident : nullptr;
+ AL.getArgAsIdent(2) ? AL.getArgAsIdent(2)->Ident : nullptr;
D->addAttr(::new (S.Context)
- ObjCBridgeRelatedAttr(Attr.getRange(), S.Context, RelatedClass,
+ ObjCBridgeRelatedAttr(AL.getRange(), S.Context, RelatedClass,
ClassMethod, InstanceMethod,
- Attr.getAttributeSpellingListIndex()));
+ AL.getAttributeSpellingListIndex()));
}
static void handleObjCDesignatedInitializer(Sema &S, Decl *D,
- const AttributeList &Attr) {
+ const ParsedAttr &AL) {
ObjCInterfaceDecl *IFace;
- if (ObjCCategoryDecl *CatDecl =
- dyn_cast<ObjCCategoryDecl>(D->getDeclContext()))
+ if (auto *CatDecl = dyn_cast<ObjCCategoryDecl>(D->getDeclContext()))
IFace = CatDecl->getClassInterface();
else
IFace = cast<ObjCInterfaceDecl>(D->getDeclContext());
@@ -4992,29 +4938,28 @@ static void handleObjCDesignatedInitializer(Sema &S, Decl *D,
IFace->setHasDesignatedInitializers();
D->addAttr(::new (S.Context)
- ObjCDesignatedInitializerAttr(Attr.getRange(), S.Context,
- Attr.getAttributeSpellingListIndex()));
+ ObjCDesignatedInitializerAttr(AL.getRange(), S.Context,
+ AL.getAttributeSpellingListIndex()));
}
-static void handleObjCRuntimeName(Sema &S, Decl *D,
- const AttributeList &Attr) {
+static void handleObjCRuntimeName(Sema &S, Decl *D, const ParsedAttr &AL) {
StringRef MetaDataName;
- if (!S.checkStringLiteralArgumentAttr(Attr, 0, MetaDataName))
+ if (!S.checkStringLiteralArgumentAttr(AL, 0, MetaDataName))
return;
D->addAttr(::new (S.Context)
- ObjCRuntimeNameAttr(Attr.getRange(), S.Context,
+ ObjCRuntimeNameAttr(AL.getRange(), S.Context,
MetaDataName,
- Attr.getAttributeSpellingListIndex()));
+ AL.getAttributeSpellingListIndex()));
}
// When a user wants to use objc_boxable with a union or struct
// but they don't have access to the declaration (legacy/third-party code)
// then they can 'enable' this feature with a typedef:
// typedef struct __attribute((objc_boxable)) legacy_struct legacy_struct;
-static void handleObjCBoxable(Sema &S, Decl *D, const AttributeList &Attr) {
+static void handleObjCBoxable(Sema &S, Decl *D, const ParsedAttr &AL) {
bool notify = false;
- RecordDecl *RD = dyn_cast<RecordDecl>(D);
+ auto *RD = dyn_cast<RecordDecl>(D);
if (RD && RD->getDefinition()) {
RD = RD->getDefinition();
notify = true;
@@ -5022,8 +4967,8 @@ static void handleObjCBoxable(Sema &S, Decl *D, const AttributeList &Attr) {
if (RD) {
ObjCBoxableAttr *BoxableAttr = ::new (S.Context)
- ObjCBoxableAttr(Attr.getRange(), S.Context,
- Attr.getAttributeSpellingListIndex());
+ ObjCBoxableAttr(AL.getRange(), S.Context,
+ AL.getAttributeSpellingListIndex());
RD->addAttr(BoxableAttr);
if (notify) {
// we need to notify ASTReader/ASTWriter about
@@ -5034,36 +4979,35 @@ static void handleObjCBoxable(Sema &S, Decl *D, const AttributeList &Attr) {
}
}
-static void handleObjCOwnershipAttr(Sema &S, Decl *D,
- const AttributeList &Attr) {
+static void handleObjCOwnershipAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
if (hasDeclarator(D)) return;
S.Diag(D->getLocStart(), diag::err_attribute_wrong_decl_type)
- << Attr.getRange() << Attr.getName() << ExpectedVariable;
+ << AL.getRange() << AL.getName() << ExpectedVariable;
}
static void handleObjCPreciseLifetimeAttr(Sema &S, Decl *D,
- const AttributeList &Attr) {
- ValueDecl *vd = cast<ValueDecl>(D);
- QualType type = vd->getType();
+ const ParsedAttr &AL) {
+ const auto *VD = cast<ValueDecl>(D);
+ QualType QT = VD->getType();
- if (!type->isDependentType() &&
- !type->isObjCLifetimeType()) {
- S.Diag(Attr.getLoc(), diag::err_objc_precise_lifetime_bad_type)
- << type;
+ if (!QT->isDependentType() &&
+ !QT->isObjCLifetimeType()) {
+ S.Diag(AL.getLoc(), diag::err_objc_precise_lifetime_bad_type)
+ << QT;
return;
}
- Qualifiers::ObjCLifetime lifetime = type.getObjCLifetime();
+ Qualifiers::ObjCLifetime Lifetime = QT.getObjCLifetime();
// If we have no lifetime yet, check the lifetime we're presumably
// going to infer.
- if (lifetime == Qualifiers::OCL_None && !type->isDependentType())
- lifetime = type->getObjCARCImplicitLifetime();
+ if (Lifetime == Qualifiers::OCL_None && !QT->isDependentType())
+ Lifetime = QT->getObjCARCImplicitLifetime();
- switch (lifetime) {
+ switch (Lifetime) {
case Qualifiers::OCL_None:
- assert(type->isDependentType() &&
+ assert(QT->isDependentType() &&
"didn't infer lifetime for non-dependent type?");
break;
@@ -5073,14 +5017,14 @@ static void handleObjCPreciseLifetimeAttr(Sema &S, Decl *D,
case Qualifiers::OCL_ExplicitNone:
case Qualifiers::OCL_Autoreleasing:
- S.Diag(Attr.getLoc(), diag::warn_objc_precise_lifetime_meaningless)
- << (lifetime == Qualifiers::OCL_Autoreleasing);
+ S.Diag(AL.getLoc(), diag::warn_objc_precise_lifetime_meaningless)
+ << (Lifetime == Qualifiers::OCL_Autoreleasing);
break;
}
D->addAttr(::new (S.Context)
- ObjCPreciseLifetimeAttr(Attr.getRange(), S.Context,
- Attr.getAttributeSpellingListIndex()));
+ ObjCPreciseLifetimeAttr(AL.getRange(), S.Context,
+ AL.getAttributeSpellingListIndex()));
}
//===----------------------------------------------------------------------===//
@@ -5100,16 +5044,16 @@ UuidAttr *Sema::mergeUuidAttr(Decl *D, SourceRange Range,
return ::new (Context) UuidAttr(Range, Context, Uuid, AttrSpellingListIndex);
}
-static void handleUuidAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+static void handleUuidAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
if (!S.LangOpts.CPlusPlus) {
- S.Diag(Attr.getLoc(), diag::err_attribute_not_supported_in_lang)
- << Attr.getName() << AttributeLangSupport::C;
+ S.Diag(AL.getLoc(), diag::err_attribute_not_supported_in_lang)
+ << AL.getName() << AttributeLangSupport::C;
return;
}
StringRef StrRef;
SourceLocation LiteralLoc;
- if (!S.checkStringLiteralArgumentAttr(Attr, 0, StrRef, &LiteralLoc))
+ if (!S.checkStringLiteralArgumentAttr(AL, 0, StrRef, &LiteralLoc))
return;
// GUID format is "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX" or
@@ -5141,162 +5085,158 @@ static void handleUuidAttr(Sema &S, Decl *D, const AttributeList &Attr) {
// separating attributes nor of the [ and the ] are in the AST.
// Cf "SourceLocations of attribute list delimiters - [[ ... , ... ]] etc"
// on cfe-dev.
- if (Attr.isMicrosoftAttribute()) // Check for [uuid(...)] spelling.
- S.Diag(Attr.getLoc(), diag::warn_atl_uuid_deprecated);
+ if (AL.isMicrosoftAttribute()) // Check for [uuid(...)] spelling.
+ S.Diag(AL.getLoc(), diag::warn_atl_uuid_deprecated);
- UuidAttr *UA = S.mergeUuidAttr(D, Attr.getRange(),
- Attr.getAttributeSpellingListIndex(), StrRef);
+ UuidAttr *UA = S.mergeUuidAttr(D, AL.getRange(),
+ AL.getAttributeSpellingListIndex(), StrRef);
if (UA)
D->addAttr(UA);
}
-static void handleMSInheritanceAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+static void handleMSInheritanceAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
if (!S.LangOpts.CPlusPlus) {
- S.Diag(Attr.getLoc(), diag::err_attribute_not_supported_in_lang)
- << Attr.getName() << AttributeLangSupport::C;
+ S.Diag(AL.getLoc(), diag::err_attribute_not_supported_in_lang)
+ << AL.getName() << AttributeLangSupport::C;
return;
}
MSInheritanceAttr *IA = S.mergeMSInheritanceAttr(
- D, Attr.getRange(), /*BestCase=*/true,
- Attr.getAttributeSpellingListIndex(),
- (MSInheritanceAttr::Spelling)Attr.getSemanticSpelling());
+ D, AL.getRange(), /*BestCase=*/true,
+ AL.getAttributeSpellingListIndex(),
+ (MSInheritanceAttr::Spelling)AL.getSemanticSpelling());
if (IA) {
D->addAttr(IA);
S.Consumer.AssignInheritanceModel(cast<CXXRecordDecl>(D));
}
}
-static void handleDeclspecThreadAttr(Sema &S, Decl *D,
- const AttributeList &Attr) {
- VarDecl *VD = cast<VarDecl>(D);
+static void handleDeclspecThreadAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ const auto *VD = cast<VarDecl>(D);
if (!S.Context.getTargetInfo().isTLSSupported()) {
- S.Diag(Attr.getLoc(), diag::err_thread_unsupported);
+ S.Diag(AL.getLoc(), diag::err_thread_unsupported);
return;
}
if (VD->getTSCSpec() != TSCS_unspecified) {
- S.Diag(Attr.getLoc(), diag::err_declspec_thread_on_thread_variable);
+ S.Diag(AL.getLoc(), diag::err_declspec_thread_on_thread_variable);
return;
}
if (VD->hasLocalStorage()) {
- S.Diag(Attr.getLoc(), diag::err_thread_non_global) << "__declspec(thread)";
+ S.Diag(AL.getLoc(), diag::err_thread_non_global) << "__declspec(thread)";
return;
}
- VD->addAttr(::new (S.Context) ThreadAttr(
- Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex()));
+ D->addAttr(::new (S.Context) ThreadAttr(AL.getRange(), S.Context,
+ AL.getAttributeSpellingListIndex()));
}
-static void handleAbiTagAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+static void handleAbiTagAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
SmallVector<StringRef, 4> Tags;
- for (unsigned I = 0, E = Attr.getNumArgs(); I != E; ++I) {
+ for (unsigned I = 0, E = AL.getNumArgs(); I != E; ++I) {
StringRef Tag;
- if (!S.checkStringLiteralArgumentAttr(Attr, I, Tag))
+ if (!S.checkStringLiteralArgumentAttr(AL, I, Tag))
return;
Tags.push_back(Tag);
}
if (const auto *NS = dyn_cast<NamespaceDecl>(D)) {
if (!NS->isInline()) {
- S.Diag(Attr.getLoc(), diag::warn_attr_abi_tag_namespace) << 0;
+ S.Diag(AL.getLoc(), diag::warn_attr_abi_tag_namespace) << 0;
return;
}
if (NS->isAnonymousNamespace()) {
- S.Diag(Attr.getLoc(), diag::warn_attr_abi_tag_namespace) << 1;
+ S.Diag(AL.getLoc(), diag::warn_attr_abi_tag_namespace) << 1;
return;
}
- if (Attr.getNumArgs() == 0)
+ if (AL.getNumArgs() == 0)
Tags.push_back(NS->getName());
- } else if (!checkAttributeAtLeastNumArgs(S, Attr, 1))
+ } else if (!checkAttributeAtLeastNumArgs(S, AL, 1))
return;
// Store tags sorted and without duplicates.
- std::sort(Tags.begin(), Tags.end());
+ llvm::sort(Tags.begin(), Tags.end());
Tags.erase(std::unique(Tags.begin(), Tags.end()), Tags.end());
D->addAttr(::new (S.Context)
- AbiTagAttr(Attr.getRange(), S.Context, Tags.data(), Tags.size(),
- Attr.getAttributeSpellingListIndex()));
+ AbiTagAttr(AL.getRange(), S.Context, Tags.data(), Tags.size(),
+ AL.getAttributeSpellingListIndex()));
}
-static void handleARMInterruptAttr(Sema &S, Decl *D,
- const AttributeList &Attr) {
+static void handleARMInterruptAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
// Check the attribute arguments.
- if (Attr.getNumArgs() > 1) {
- S.Diag(Attr.getLoc(), diag::err_attribute_too_many_arguments)
- << Attr.getName() << 1;
+ if (AL.getNumArgs() > 1) {
+ S.Diag(AL.getLoc(), diag::err_attribute_too_many_arguments)
+ << AL.getName() << 1;
return;
}
StringRef Str;
SourceLocation ArgLoc;
- if (Attr.getNumArgs() == 0)
+ if (AL.getNumArgs() == 0)
Str = "";
- else if (!S.checkStringLiteralArgumentAttr(Attr, 0, Str, &ArgLoc))
+ else if (!S.checkStringLiteralArgumentAttr(AL, 0, Str, &ArgLoc))
return;
ARMInterruptAttr::InterruptType Kind;
if (!ARMInterruptAttr::ConvertStrToInterruptType(Str, Kind)) {
- S.Diag(Attr.getLoc(), diag::warn_attribute_type_not_supported)
- << Attr.getName() << Str << ArgLoc;
+ S.Diag(AL.getLoc(), diag::warn_attribute_type_not_supported)
+ << AL.getName() << Str << ArgLoc;
return;
}
- unsigned Index = Attr.getAttributeSpellingListIndex();
+ unsigned Index = AL.getAttributeSpellingListIndex();
D->addAttr(::new (S.Context)
- ARMInterruptAttr(Attr.getLoc(), S.Context, Kind, Index));
+ ARMInterruptAttr(AL.getLoc(), S.Context, Kind, Index));
}
-static void handleMSP430InterruptAttr(Sema &S, Decl *D,
- const AttributeList &Attr) {
- if (!checkAttributeNumArgs(S, Attr, 1))
+static void handleMSP430InterruptAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ if (!checkAttributeNumArgs(S, AL, 1))
return;
- if (!Attr.isArgExpr(0)) {
- S.Diag(Attr.getLoc(), diag::err_attribute_argument_type) << Attr.getName()
+ if (!AL.isArgExpr(0)) {
+ S.Diag(AL.getLoc(), diag::err_attribute_argument_type) << AL.getName()
<< AANT_ArgumentIntegerConstant;
return;
}
// FIXME: Check for decl - it should be void ()(void).
- Expr *NumParamsExpr = static_cast<Expr *>(Attr.getArgAsExpr(0));
+ Expr *NumParamsExpr = static_cast<Expr *>(AL.getArgAsExpr(0));
llvm::APSInt NumParams(32);
if (!NumParamsExpr->isIntegerConstantExpr(NumParams, S.Context)) {
- S.Diag(Attr.getLoc(), diag::err_attribute_argument_type)
- << Attr.getName() << AANT_ArgumentIntegerConstant
+ S.Diag(AL.getLoc(), diag::err_attribute_argument_type)
+ << AL.getName() << AANT_ArgumentIntegerConstant
<< NumParamsExpr->getSourceRange();
return;
}
unsigned Num = NumParams.getLimitedValue(255);
if ((Num & 1) || Num > 30) {
- S.Diag(Attr.getLoc(), diag::err_attribute_argument_out_of_bounds)
- << Attr.getName() << (int)NumParams.getSExtValue()
+ S.Diag(AL.getLoc(), diag::err_attribute_argument_out_of_bounds)
+ << AL.getName() << (int)NumParams.getSExtValue()
<< NumParamsExpr->getSourceRange();
return;
}
D->addAttr(::new (S.Context)
- MSP430InterruptAttr(Attr.getLoc(), S.Context, Num,
- Attr.getAttributeSpellingListIndex()));
+ MSP430InterruptAttr(AL.getLoc(), S.Context, Num,
+ AL.getAttributeSpellingListIndex()));
D->addAttr(UsedAttr::CreateImplicit(S.Context));
}
-static void handleMipsInterruptAttr(Sema &S, Decl *D,
- const AttributeList &Attr) {
+static void handleMipsInterruptAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
// Only one optional argument permitted.
- if (Attr.getNumArgs() > 1) {
- S.Diag(Attr.getLoc(), diag::err_attribute_too_many_arguments)
- << Attr.getName() << 1;
+ if (AL.getNumArgs() > 1) {
+ S.Diag(AL.getLoc(), diag::err_attribute_too_many_arguments)
+ << AL.getName() << 1;
return;
}
StringRef Str;
SourceLocation ArgLoc;
- if (Attr.getNumArgs() == 0)
+ if (AL.getNumArgs() == 0)
Str = "";
- else if (!S.checkStringLiteralArgumentAttr(Attr, 0, Str, &ArgLoc))
+ else if (!S.checkStringLiteralArgumentAttr(AL, 0, Str, &ArgLoc))
return;
// Semantic checks for a function with the 'interrupt' attribute for MIPS:
@@ -5326,23 +5266,22 @@ static void handleMipsInterruptAttr(Sema &S, Decl *D,
return;
}
- if (checkAttrMutualExclusion<Mips16Attr>(S, D, Attr.getRange(),
- Attr.getName()))
+ if (checkAttrMutualExclusion<Mips16Attr>(S, D, AL.getRange(),
+ AL.getName()))
return;
MipsInterruptAttr::InterruptType Kind;
if (!MipsInterruptAttr::ConvertStrToInterruptType(Str, Kind)) {
- S.Diag(Attr.getLoc(), diag::warn_attribute_type_not_supported)
- << Attr.getName() << "'" + std::string(Str) + "'";
+ S.Diag(AL.getLoc(), diag::warn_attribute_type_not_supported)
+ << AL.getName() << "'" + std::string(Str) + "'";
return;
}
D->addAttr(::new (S.Context) MipsInterruptAttr(
- Attr.getLoc(), S.Context, Kind, Attr.getAttributeSpellingListIndex()));
+ AL.getLoc(), S.Context, Kind, AL.getAttributeSpellingListIndex()));
}
-static void handleAnyX86InterruptAttr(Sema &S, Decl *D,
- const AttributeList &Attr) {
+static void handleAnyX86InterruptAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
// Semantic checks for a function with the 'interrupt' attribute.
// a) Must be a function.
// b) Must have the 'void' return type.
@@ -5352,8 +5291,8 @@ static void handleAnyX86InterruptAttr(Sema &S, Decl *D,
if (!isFunctionOrMethod(D) || !hasFunctionProto(D) || isInstanceMethod(D) ||
CXXMethodDecl::isStaticOverloadedOperator(
cast<NamedDecl>(D)->getDeclName().getCXXOverloadedOperator())) {
- S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
- << Attr.getName() << ExpectedFunctionWithProtoType;
+ S.Diag(AL.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << AL.getName() << ExpectedFunctionWithProtoType;
return;
}
// Interrupt handler must have void return type.
@@ -5403,182 +5342,241 @@ static void handleAnyX86InterruptAttr(Sema &S, Decl *D,
return;
}
D->addAttr(::new (S.Context) AnyX86InterruptAttr(
- Attr.getLoc(), S.Context, Attr.getAttributeSpellingListIndex()));
+ AL.getLoc(), S.Context, AL.getAttributeSpellingListIndex()));
D->addAttr(UsedAttr::CreateImplicit(S.Context));
}
-static void handleAVRInterruptAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+static void handleAVRInterruptAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
if (!isFunctionOrMethod(D)) {
S.Diag(D->getLocation(), diag::warn_attribute_wrong_decl_type)
<< "'interrupt'" << ExpectedFunction;
return;
}
- if (!checkAttributeNumArgs(S, Attr, 0))
+ if (!checkAttributeNumArgs(S, AL, 0))
return;
- handleSimpleAttribute<AVRInterruptAttr>(S, D, Attr);
+ handleSimpleAttribute<AVRInterruptAttr>(S, D, AL);
}
-static void handleAVRSignalAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+static void handleAVRSignalAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
if (!isFunctionOrMethod(D)) {
S.Diag(D->getLocation(), diag::warn_attribute_wrong_decl_type)
<< "'signal'" << ExpectedFunction;
return;
}
- if (!checkAttributeNumArgs(S, Attr, 0))
+ if (!checkAttributeNumArgs(S, AL, 0))
+ return;
+
+ handleSimpleAttribute<AVRSignalAttr>(S, D, AL);
+}
+
+
+static void handleRISCVInterruptAttr(Sema &S, Decl *D,
+ const ParsedAttr &AL) {
+ // Warn about repeated attributes.
+ if (const auto *A = D->getAttr<RISCVInterruptAttr>()) {
+ S.Diag(AL.getRange().getBegin(),
+ diag::warn_riscv_repeated_interrupt_attribute);
+ S.Diag(A->getLocation(), diag::note_riscv_repeated_interrupt_attribute);
+ return;
+ }
+
+ // Check the attribute argument. Argument is optional.
+ if (!checkAttributeAtMostNumArgs(S, AL, 1))
+ return;
+
+ StringRef Str;
+ SourceLocation ArgLoc;
+
+ // 'machine'is the default interrupt mode.
+ if (AL.getNumArgs() == 0)
+ Str = "machine";
+ else if (!S.checkStringLiteralArgumentAttr(AL, 0, Str, &ArgLoc))
+ return;
+
+ // Semantic checks for a function with the 'interrupt' attribute:
+ // - Must be a function.
+ // - Must have no parameters.
+ // - Must have the 'void' return type.
+ // - The attribute itself must either have no argument or one of the
+ // valid interrupt types, see [RISCVInterruptDocs].
+
+ if (D->getFunctionType() == nullptr) {
+ S.Diag(D->getLocation(), diag::warn_attribute_wrong_decl_type)
+ << "'interrupt'" << ExpectedFunction;
+ return;
+ }
+
+ if (hasFunctionProto(D) && getFunctionOrMethodNumParams(D) != 0) {
+ S.Diag(D->getLocation(), diag::warn_riscv_interrupt_attribute) << 0;
+ return;
+ }
+
+ if (!getFunctionOrMethodResultType(D)->isVoidType()) {
+ S.Diag(D->getLocation(), diag::warn_riscv_interrupt_attribute) << 1;
+ return;
+ }
+
+ RISCVInterruptAttr::InterruptType Kind;
+ if (!RISCVInterruptAttr::ConvertStrToInterruptType(Str, Kind)) {
+ S.Diag(AL.getLoc(), diag::warn_attribute_type_not_supported)
+ << AL.getName() << Str << ArgLoc;
return;
+ }
- handleSimpleAttribute<AVRSignalAttr>(S, D, Attr);
+ D->addAttr(::new (S.Context) RISCVInterruptAttr(
+ AL.getLoc(), S.Context, Kind, AL.getAttributeSpellingListIndex()));
}
-static void handleInterruptAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+static void handleInterruptAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
// Dispatch the interrupt attribute based on the current target.
switch (S.Context.getTargetInfo().getTriple().getArch()) {
case llvm::Triple::msp430:
- handleMSP430InterruptAttr(S, D, Attr);
+ handleMSP430InterruptAttr(S, D, AL);
break;
case llvm::Triple::mipsel:
case llvm::Triple::mips:
- handleMipsInterruptAttr(S, D, Attr);
+ handleMipsInterruptAttr(S, D, AL);
break;
case llvm::Triple::x86:
case llvm::Triple::x86_64:
- handleAnyX86InterruptAttr(S, D, Attr);
+ handleAnyX86InterruptAttr(S, D, AL);
break;
case llvm::Triple::avr:
- handleAVRInterruptAttr(S, D, Attr);
+ handleAVRInterruptAttr(S, D, AL);
+ break;
+ case llvm::Triple::riscv32:
+ case llvm::Triple::riscv64:
+ handleRISCVInterruptAttr(S, D, AL);
break;
default:
- handleARMInterruptAttr(S, D, Attr);
+ handleARMInterruptAttr(S, D, AL);
break;
}
}
static void handleAMDGPUFlatWorkGroupSizeAttr(Sema &S, Decl *D,
- const AttributeList &Attr) {
+ const ParsedAttr &AL) {
uint32_t Min = 0;
- Expr *MinExpr = Attr.getArgAsExpr(0);
- if (!checkUInt32Argument(S, Attr, MinExpr, Min))
+ Expr *MinExpr = AL.getArgAsExpr(0);
+ if (!checkUInt32Argument(S, AL, MinExpr, Min))
return;
uint32_t Max = 0;
- Expr *MaxExpr = Attr.getArgAsExpr(1);
- if (!checkUInt32Argument(S, Attr, MaxExpr, Max))
+ Expr *MaxExpr = AL.getArgAsExpr(1);
+ if (!checkUInt32Argument(S, AL, MaxExpr, Max))
return;
if (Min == 0 && Max != 0) {
- S.Diag(Attr.getLoc(), diag::err_attribute_argument_invalid)
- << Attr.getName() << 0;
+ S.Diag(AL.getLoc(), diag::err_attribute_argument_invalid)
+ << AL.getName() << 0;
return;
}
if (Min > Max) {
- S.Diag(Attr.getLoc(), diag::err_attribute_argument_invalid)
- << Attr.getName() << 1;
+ S.Diag(AL.getLoc(), diag::err_attribute_argument_invalid)
+ << AL.getName() << 1;
return;
}
D->addAttr(::new (S.Context)
- AMDGPUFlatWorkGroupSizeAttr(Attr.getLoc(), S.Context, Min, Max,
- Attr.getAttributeSpellingListIndex()));
+ AMDGPUFlatWorkGroupSizeAttr(AL.getLoc(), S.Context, Min, Max,
+ AL.getAttributeSpellingListIndex()));
}
-static void handleAMDGPUWavesPerEUAttr(Sema &S, Decl *D,
- const AttributeList &Attr) {
+static void handleAMDGPUWavesPerEUAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
uint32_t Min = 0;
- Expr *MinExpr = Attr.getArgAsExpr(0);
- if (!checkUInt32Argument(S, Attr, MinExpr, Min))
+ Expr *MinExpr = AL.getArgAsExpr(0);
+ if (!checkUInt32Argument(S, AL, MinExpr, Min))
return;
uint32_t Max = 0;
- if (Attr.getNumArgs() == 2) {
- Expr *MaxExpr = Attr.getArgAsExpr(1);
- if (!checkUInt32Argument(S, Attr, MaxExpr, Max))
+ if (AL.getNumArgs() == 2) {
+ Expr *MaxExpr = AL.getArgAsExpr(1);
+ if (!checkUInt32Argument(S, AL, MaxExpr, Max))
return;
}
if (Min == 0 && Max != 0) {
- S.Diag(Attr.getLoc(), diag::err_attribute_argument_invalid)
- << Attr.getName() << 0;
+ S.Diag(AL.getLoc(), diag::err_attribute_argument_invalid)
+ << AL.getName() << 0;
return;
}
if (Max != 0 && Min > Max) {
- S.Diag(Attr.getLoc(), diag::err_attribute_argument_invalid)
- << Attr.getName() << 1;
+ S.Diag(AL.getLoc(), diag::err_attribute_argument_invalid)
+ << AL.getName() << 1;
return;
}
D->addAttr(::new (S.Context)
- AMDGPUWavesPerEUAttr(Attr.getLoc(), S.Context, Min, Max,
- Attr.getAttributeSpellingListIndex()));
+ AMDGPUWavesPerEUAttr(AL.getLoc(), S.Context, Min, Max,
+ AL.getAttributeSpellingListIndex()));
}
-static void handleAMDGPUNumSGPRAttr(Sema &S, Decl *D,
- const AttributeList &Attr) {
+static void handleAMDGPUNumSGPRAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
uint32_t NumSGPR = 0;
- Expr *NumSGPRExpr = Attr.getArgAsExpr(0);
- if (!checkUInt32Argument(S, Attr, NumSGPRExpr, NumSGPR))
+ Expr *NumSGPRExpr = AL.getArgAsExpr(0);
+ if (!checkUInt32Argument(S, AL, NumSGPRExpr, NumSGPR))
return;
D->addAttr(::new (S.Context)
- AMDGPUNumSGPRAttr(Attr.getLoc(), S.Context, NumSGPR,
- Attr.getAttributeSpellingListIndex()));
+ AMDGPUNumSGPRAttr(AL.getLoc(), S.Context, NumSGPR,
+ AL.getAttributeSpellingListIndex()));
}
-static void handleAMDGPUNumVGPRAttr(Sema &S, Decl *D,
- const AttributeList &Attr) {
+static void handleAMDGPUNumVGPRAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
uint32_t NumVGPR = 0;
- Expr *NumVGPRExpr = Attr.getArgAsExpr(0);
- if (!checkUInt32Argument(S, Attr, NumVGPRExpr, NumVGPR))
+ Expr *NumVGPRExpr = AL.getArgAsExpr(0);
+ if (!checkUInt32Argument(S, AL, NumVGPRExpr, NumVGPR))
return;
D->addAttr(::new (S.Context)
- AMDGPUNumVGPRAttr(Attr.getLoc(), S.Context, NumVGPR,
- Attr.getAttributeSpellingListIndex()));
+ AMDGPUNumVGPRAttr(AL.getLoc(), S.Context, NumVGPR,
+ AL.getAttributeSpellingListIndex()));
}
static void handleX86ForceAlignArgPointerAttr(Sema &S, Decl *D,
- const AttributeList& Attr) {
+ const ParsedAttr &AL) {
// If we try to apply it to a function pointer, don't warn, but don't
// do anything, either. It doesn't matter anyway, because there's nothing
// special about calling a force_align_arg_pointer function.
- ValueDecl *VD = dyn_cast<ValueDecl>(D);
+ const auto *VD = dyn_cast<ValueDecl>(D);
if (VD && VD->getType()->isFunctionPointerType())
return;
// Also don't warn on function pointer typedefs.
- TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(D);
+ const auto *TD = dyn_cast<TypedefNameDecl>(D);
if (TD && (TD->getUnderlyingType()->isFunctionPointerType() ||
TD->getUnderlyingType()->isFunctionType()))
return;
// Attribute can only be applied to function types.
if (!isa<FunctionDecl>(D)) {
- S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
- << Attr.getName() << /* function */0;
+ S.Diag(AL.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << AL.getName() << ExpectedFunction;
return;
}
D->addAttr(::new (S.Context)
- X86ForceAlignArgPointerAttr(Attr.getRange(), S.Context,
- Attr.getAttributeSpellingListIndex()));
+ X86ForceAlignArgPointerAttr(AL.getRange(), S.Context,
+ AL.getAttributeSpellingListIndex()));
}
-static void handleLayoutVersion(Sema &S, Decl *D, const AttributeList &Attr) {
+static void handleLayoutVersion(Sema &S, Decl *D, const ParsedAttr &AL) {
uint32_t Version;
- Expr *VersionExpr = static_cast<Expr *>(Attr.getArgAsExpr(0));
- if (!checkUInt32Argument(S, Attr, Attr.getArgAsExpr(0), Version))
+ Expr *VersionExpr = static_cast<Expr *>(AL.getArgAsExpr(0));
+ if (!checkUInt32Argument(S, AL, AL.getArgAsExpr(0), Version))
return;
// TODO: Investigate what happens with the next major version of MSVC.
if (Version != LangOptions::MSVC2015) {
- S.Diag(Attr.getLoc(), diag::err_attribute_argument_out_of_bounds)
- << Attr.getName() << Version << VersionExpr->getSourceRange();
+ S.Diag(AL.getLoc(), diag::err_attribute_argument_out_of_bounds)
+ << AL.getName() << Version << VersionExpr->getSourceRange();
return;
}
D->addAttr(::new (S.Context)
- LayoutVersionAttr(Attr.getRange(), S.Context, Version,
- Attr.getAttributeSpellingListIndex()));
+ LayoutVersionAttr(AL.getRange(), S.Context, Version,
+ AL.getAttributeSpellingListIndex()));
}
DLLImportAttr *Sema::mergeDLLImportAttr(Decl *D, SourceRange Range,
@@ -5607,7 +5605,7 @@ DLLExportAttr *Sema::mergeDLLExportAttr(Decl *D, SourceRange Range,
return ::new (Context) DLLExportAttr(Range, Context, AttrSpellingListIndex);
}
-static void handleDLLAttr(Sema &S, Decl *D, const AttributeList &A) {
+static void handleDLLAttr(Sema &S, Decl *D, const ParsedAttr &A) {
if (isa<ClassTemplatePartialSpecializationDecl>(D) &&
S.Context.getTargetInfo().getCXXABI().isMicrosoft()) {
S.Diag(A.getRange().getBegin(), diag::warn_attribute_ignored)
@@ -5615,8 +5613,8 @@ static void handleDLLAttr(Sema &S, Decl *D, const AttributeList &A) {
return;
}
- if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
- if (FD->isInlined() && A.getKind() == AttributeList::AT_DLLImport &&
+ if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
+ if (FD->isInlined() && A.getKind() == ParsedAttr::AT_DLLImport &&
!S.Context.getTargetInfo().getCXXABI().isMicrosoft()) {
// MinGW doesn't allow dllimport on inline functions.
S.Diag(A.getRange().getBegin(), diag::warn_attribute_ignored_on_inline)
@@ -5625,7 +5623,7 @@ static void handleDLLAttr(Sema &S, Decl *D, const AttributeList &A) {
}
}
- if (auto *MD = dyn_cast<CXXMethodDecl>(D)) {
+ if (const auto *MD = dyn_cast<CXXMethodDecl>(D)) {
if (S.Context.getTargetInfo().getCXXABI().isMicrosoft() &&
MD->getParent()->isLambda()) {
S.Diag(A.getRange().getBegin(), diag::err_attribute_dll_lambda) << A.getName();
@@ -5634,7 +5632,7 @@ static void handleDLLAttr(Sema &S, Decl *D, const AttributeList &A) {
}
unsigned Index = A.getAttributeSpellingListIndex();
- Attr *NewAttr = A.getKind() == AttributeList::AT_DLLExport
+ Attr *NewAttr = A.getKind() == ParsedAttr::AT_DLLExport
? (Attr *)S.mergeDLLExportAttr(D, A.getRange(), Index)
: (Attr *)S.mergeDLLImportAttr(D, A.getRange(), Index);
if (NewAttr)
@@ -5654,7 +5652,7 @@ Sema::mergeMSInheritanceAttr(Decl *D, SourceRange Range, bool BestCase,
D->dropAttr<MSInheritanceAttr>();
}
- CXXRecordDecl *RD = cast<CXXRecordDecl>(D);
+ auto *RD = cast<CXXRecordDecl>(D);
if (RD->hasDefinition()) {
if (checkMSInheritanceAttrOnDefinition(RD, Range, BestCase,
SemanticSpelling)) {
@@ -5677,7 +5675,7 @@ Sema::mergeMSInheritanceAttr(Decl *D, SourceRange Range, bool BestCase,
MSInheritanceAttr(Range, Context, BestCase, AttrSpellingListIndex);
}
-static void handleCapabilityAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+static void handleCapabilityAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
// The capability attributes take a single string parameter for the name of
// the capability they represent. The lockable attribute does not take any
// parameters. However, semantically, both attributes represent the same
@@ -5688,8 +5686,8 @@ static void handleCapabilityAttr(Sema &S, Decl *D, const AttributeList &Attr) {
// literal will be considered a "mutex."
StringRef N("mutex");
SourceLocation LiteralLoc;
- if (Attr.getKind() == AttributeList::AT_Capability &&
- !S.checkStringLiteralArgumentAttr(Attr, 0, N, &LiteralLoc))
+ if (AL.getKind() == ParsedAttr::AT_Capability &&
+ !S.checkStringLiteralArgumentAttr(AL, 0, N, &LiteralLoc))
return;
// Currently, there are only two names allowed for a capability: role and
@@ -5697,80 +5695,79 @@ static void handleCapabilityAttr(Sema &S, Decl *D, const AttributeList &Attr) {
if (!N.equals_lower("mutex") && !N.equals_lower("role"))
S.Diag(LiteralLoc, diag::warn_invalid_capability_name) << N;
- D->addAttr(::new (S.Context) CapabilityAttr(Attr.getRange(), S.Context, N,
- Attr.getAttributeSpellingListIndex()));
+ D->addAttr(::new (S.Context) CapabilityAttr(AL.getRange(), S.Context, N,
+ AL.getAttributeSpellingListIndex()));
}
-static void handleAssertCapabilityAttr(Sema &S, Decl *D,
- const AttributeList &Attr) {
+static void handleAssertCapabilityAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
SmallVector<Expr*, 1> Args;
- if (!checkLockFunAttrCommon(S, D, Attr, Args))
+ if (!checkLockFunAttrCommon(S, D, AL, Args))
return;
- D->addAttr(::new (S.Context) AssertCapabilityAttr(Attr.getRange(), S.Context,
+ D->addAttr(::new (S.Context) AssertCapabilityAttr(AL.getRange(), S.Context,
Args.data(), Args.size(),
- Attr.getAttributeSpellingListIndex()));
+ AL.getAttributeSpellingListIndex()));
}
static void handleAcquireCapabilityAttr(Sema &S, Decl *D,
- const AttributeList &Attr) {
+ const ParsedAttr &AL) {
SmallVector<Expr*, 1> Args;
- if (!checkLockFunAttrCommon(S, D, Attr, Args))
+ if (!checkLockFunAttrCommon(S, D, AL, Args))
return;
- D->addAttr(::new (S.Context) AcquireCapabilityAttr(Attr.getRange(),
+ D->addAttr(::new (S.Context) AcquireCapabilityAttr(AL.getRange(),
S.Context,
Args.data(), Args.size(),
- Attr.getAttributeSpellingListIndex()));
+ AL.getAttributeSpellingListIndex()));
}
static void handleTryAcquireCapabilityAttr(Sema &S, Decl *D,
- const AttributeList &Attr) {
+ const ParsedAttr &AL) {
SmallVector<Expr*, 2> Args;
- if (!checkTryLockFunAttrCommon(S, D, Attr, Args))
+ if (!checkTryLockFunAttrCommon(S, D, AL, Args))
return;
- D->addAttr(::new (S.Context) TryAcquireCapabilityAttr(Attr.getRange(),
+ D->addAttr(::new (S.Context) TryAcquireCapabilityAttr(AL.getRange(),
S.Context,
- Attr.getArgAsExpr(0),
+ AL.getArgAsExpr(0),
Args.data(),
Args.size(),
- Attr.getAttributeSpellingListIndex()));
+ AL.getAttributeSpellingListIndex()));
}
static void handleReleaseCapabilityAttr(Sema &S, Decl *D,
- const AttributeList &Attr) {
+ const ParsedAttr &AL) {
// Check that all arguments are lockable objects.
SmallVector<Expr *, 1> Args;
- checkAttrArgsAreCapabilityObjs(S, D, Attr, Args, 0, true);
+ checkAttrArgsAreCapabilityObjs(S, D, AL, Args, 0, true);
D->addAttr(::new (S.Context) ReleaseCapabilityAttr(
- Attr.getRange(), S.Context, Args.data(), Args.size(),
- Attr.getAttributeSpellingListIndex()));
+ AL.getRange(), S.Context, Args.data(), Args.size(),
+ AL.getAttributeSpellingListIndex()));
}
static void handleRequiresCapabilityAttr(Sema &S, Decl *D,
- const AttributeList &Attr) {
- if (!checkAttributeAtLeastNumArgs(S, Attr, 1))
+ const ParsedAttr &AL) {
+ if (!checkAttributeAtLeastNumArgs(S, AL, 1))
return;
// check that all arguments are lockable objects
SmallVector<Expr*, 1> Args;
- checkAttrArgsAreCapabilityObjs(S, D, Attr, Args);
+ checkAttrArgsAreCapabilityObjs(S, D, AL, Args);
if (Args.empty())
return;
RequiresCapabilityAttr *RCA = ::new (S.Context)
- RequiresCapabilityAttr(Attr.getRange(), S.Context, Args.data(),
- Args.size(), Attr.getAttributeSpellingListIndex());
+ RequiresCapabilityAttr(AL.getRange(), S.Context, Args.data(),
+ Args.size(), AL.getAttributeSpellingListIndex());
D->addAttr(RCA);
}
-static void handleDeprecatedAttr(Sema &S, Decl *D, const AttributeList &Attr) {
- if (auto *NSD = dyn_cast<NamespaceDecl>(D)) {
+static void handleDeprecatedAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ if (const auto *NSD = dyn_cast<NamespaceDecl>(D)) {
if (NSD->isAnonymousNamespace()) {
- S.Diag(Attr.getLoc(), diag::warn_deprecated_anonymous_namespace);
+ S.Diag(AL.getLoc(), diag::warn_deprecated_anonymous_namespace);
// Do not want to attach the attribute to the namespace because that will
// cause confusing diagnostic reports for uses of declarations within the
// namespace.
@@ -5780,25 +5777,25 @@ static void handleDeprecatedAttr(Sema &S, Decl *D, const AttributeList &Attr) {
// Handle the cases where the attribute has a text message.
StringRef Str, Replacement;
- if (Attr.isArgExpr(0) && Attr.getArgAsExpr(0) &&
- !S.checkStringLiteralArgumentAttr(Attr, 0, Str))
+ if (AL.isArgExpr(0) && AL.getArgAsExpr(0) &&
+ !S.checkStringLiteralArgumentAttr(AL, 0, Str))
return;
// Only support a single optional message for Declspec and CXX11.
- if (Attr.isDeclspecAttribute() || Attr.isCXX11Attribute())
- checkAttributeAtMostNumArgs(S, Attr, 1);
- else if (Attr.isArgExpr(1) && Attr.getArgAsExpr(1) &&
- !S.checkStringLiteralArgumentAttr(Attr, 1, Replacement))
+ if (AL.isDeclspecAttribute() || AL.isCXX11Attribute())
+ checkAttributeAtMostNumArgs(S, AL, 1);
+ else if (AL.isArgExpr(1) && AL.getArgAsExpr(1) &&
+ !S.checkStringLiteralArgumentAttr(AL, 1, Replacement))
return;
if (!S.getLangOpts().CPlusPlus14)
- if (Attr.isCXX11Attribute() &&
- !(Attr.hasScope() && Attr.getScopeName()->isStr("gnu")))
- S.Diag(Attr.getLoc(), diag::ext_cxx14_attr) << Attr.getName();
+ if (AL.isCXX11Attribute() &&
+ !(AL.hasScope() && AL.getScopeName()->isStr("gnu")))
+ S.Diag(AL.getLoc(), diag::ext_cxx14_attr) << AL.getName();
D->addAttr(::new (S.Context)
- DeprecatedAttr(Attr.getRange(), S.Context, Str, Replacement,
- Attr.getAttributeSpellingListIndex()));
+ DeprecatedAttr(AL.getRange(), S.Context, Str, Replacement,
+ AL.getAttributeSpellingListIndex()));
}
static bool isGlobalVar(const Decl *D) {
@@ -5807,35 +5804,35 @@ static bool isGlobalVar(const Decl *D) {
return false;
}
-static void handleNoSanitizeAttr(Sema &S, Decl *D, const AttributeList &Attr) {
- if (!checkAttributeAtLeastNumArgs(S, Attr, 1))
+static void handleNoSanitizeAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ if (!checkAttributeAtLeastNumArgs(S, AL, 1))
return;
std::vector<StringRef> Sanitizers;
- for (unsigned I = 0, E = Attr.getNumArgs(); I != E; ++I) {
+ for (unsigned I = 0, E = AL.getNumArgs(); I != E; ++I) {
StringRef SanitizerName;
SourceLocation LiteralLoc;
- if (!S.checkStringLiteralArgumentAttr(Attr, I, SanitizerName, &LiteralLoc))
+ if (!S.checkStringLiteralArgumentAttr(AL, I, SanitizerName, &LiteralLoc))
return;
if (parseSanitizerValue(SanitizerName, /*AllowGroups=*/true) == 0)
S.Diag(LiteralLoc, diag::warn_unknown_sanitizer_ignored) << SanitizerName;
else if (isGlobalVar(D) && SanitizerName != "address")
S.Diag(D->getLocation(), diag::err_attribute_wrong_decl_type)
- << Attr.getName() << ExpectedFunctionOrMethod;
+ << AL.getName() << ExpectedFunctionOrMethod;
Sanitizers.push_back(SanitizerName);
}
D->addAttr(::new (S.Context) NoSanitizeAttr(
- Attr.getRange(), S.Context, Sanitizers.data(), Sanitizers.size(),
- Attr.getAttributeSpellingListIndex()));
+ AL.getRange(), S.Context, Sanitizers.data(), Sanitizers.size(),
+ AL.getAttributeSpellingListIndex()));
}
static void handleNoSanitizeSpecificAttr(Sema &S, Decl *D,
- const AttributeList &Attr) {
- StringRef AttrName = Attr.getName()->getName();
+ const ParsedAttr &AL) {
+ StringRef AttrName = AL.getName()->getName();
normalizeName(AttrName);
StringRef SanitizerName = llvm::StringSwitch<StringRef>(AttrName)
.Case("no_address_safety_analysis", "address")
@@ -5844,77 +5841,78 @@ static void handleNoSanitizeSpecificAttr(Sema &S, Decl *D,
.Case("no_sanitize_memory", "memory");
if (isGlobalVar(D) && SanitizerName != "address")
S.Diag(D->getLocation(), diag::err_attribute_wrong_decl_type)
- << Attr.getName() << ExpectedFunction;
+ << AL.getName() << ExpectedFunction;
D->addAttr(::new (S.Context)
- NoSanitizeAttr(Attr.getRange(), S.Context, &SanitizerName, 1,
- Attr.getAttributeSpellingListIndex()));
+ NoSanitizeAttr(AL.getRange(), S.Context, &SanitizerName, 1,
+ AL.getAttributeSpellingListIndex()));
}
-static void handleInternalLinkageAttr(Sema &S, Decl *D,
- const AttributeList &Attr) {
+static void handleInternalLinkageAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
if (InternalLinkageAttr *Internal =
- S.mergeInternalLinkageAttr(D, Attr.getRange(), Attr.getName(),
- Attr.getAttributeSpellingListIndex()))
+ S.mergeInternalLinkageAttr(D, AL.getRange(), AL.getName(),
+ AL.getAttributeSpellingListIndex()))
D->addAttr(Internal);
}
-static void handleOpenCLNoSVMAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+static void handleOpenCLNoSVMAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
if (S.LangOpts.OpenCLVersion != 200)
- S.Diag(Attr.getLoc(), diag::err_attribute_requires_opencl_version)
- << Attr.getName() << "2.0" << 0;
+ S.Diag(AL.getLoc(), diag::err_attribute_requires_opencl_version)
+ << AL.getName() << "2.0" << 0;
else
- S.Diag(Attr.getLoc(), diag::warn_opencl_attr_deprecated_ignored)
- << Attr.getName() << "2.0";
+ S.Diag(AL.getLoc(), diag::warn_opencl_attr_deprecated_ignored)
+ << AL.getName() << "2.0";
}
/// Handles semantic checking for features that are common to all attributes,
/// such as checking whether a parameter was properly specified, or the correct
/// number of arguments were passed, etc.
-static bool handleCommonAttributeFeatures(Sema &S, Scope *scope, Decl *D,
- const AttributeList &Attr) {
+static bool handleCommonAttributeFeatures(Sema &S, Decl *D,
+ const ParsedAttr &AL) {
// Several attributes carry different semantics than the parsing requires, so
// those are opted out of the common argument checks.
//
// We also bail on unknown and ignored attributes because those are handled
// as part of the target-specific handling logic.
- if (Attr.getKind() == AttributeList::UnknownAttribute)
+ if (AL.getKind() == ParsedAttr::UnknownAttribute)
return false;
// Check whether the attribute requires specific language extensions to be
// enabled.
- if (!Attr.diagnoseLangOpts(S))
+ if (!AL.diagnoseLangOpts(S))
return true;
// Check whether the attribute appertains to the given subject.
- if (!Attr.diagnoseAppertainsTo(S, D))
+ if (!AL.diagnoseAppertainsTo(S, D))
return true;
- if (Attr.hasCustomParsing())
+ if (AL.hasCustomParsing())
return false;
- if (Attr.getMinArgs() == Attr.getMaxArgs()) {
+ if (AL.getMinArgs() == AL.getMaxArgs()) {
// If there are no optional arguments, then checking for the argument count
// is trivial.
- if (!checkAttributeNumArgs(S, Attr, Attr.getMinArgs()))
+ if (!checkAttributeNumArgs(S, AL, AL.getMinArgs()))
return true;
} else {
// There are optional arguments, so checking is slightly more involved.
- if (Attr.getMinArgs() &&
- !checkAttributeAtLeastNumArgs(S, Attr, Attr.getMinArgs()))
+ if (AL.getMinArgs() &&
+ !checkAttributeAtLeastNumArgs(S, AL, AL.getMinArgs()))
return true;
- else if (!Attr.hasVariadicArg() && Attr.getMaxArgs() &&
- !checkAttributeAtMostNumArgs(S, Attr, Attr.getMaxArgs()))
+ else if (!AL.hasVariadicArg() && AL.getMaxArgs() &&
+ !checkAttributeAtMostNumArgs(S, AL, AL.getMaxArgs()))
return true;
}
+ if (S.CheckAttrTarget(AL))
+ return true;
+
return false;
}
-static void handleOpenCLAccessAttr(Sema &S, Decl *D,
- const AttributeList &Attr) {
+static void handleOpenCLAccessAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
if (D->isInvalidDecl())
return;
// Check if there is only one access qualifier.
if (D->hasAttr<OpenCLAccessAttr>()) {
- S.Diag(Attr.getLoc(), diag::err_opencl_multiple_access_qualifiers)
+ S.Diag(AL.getLoc(), diag::err_opencl_multiple_access_qualifiers)
<< D->getSourceRange();
D->setInvalidDecl(true);
return;
@@ -5925,12 +5923,12 @@ static void handleOpenCLAccessAttr(Sema &S, Decl *D,
// OpenCL v2.0 s6.13.6 - A kernel cannot read from and write to the same pipe
// object. Using the read_write (or __read_write) qualifier with the pipe
// qualifier is a compilation error.
- if (const ParmVarDecl *PDecl = dyn_cast<ParmVarDecl>(D)) {
+ if (const auto *PDecl = dyn_cast<ParmVarDecl>(D)) {
const Type *DeclTy = PDecl->getType().getCanonicalType().getTypePtr();
- if (Attr.getName()->getName().find("read_write") != StringRef::npos) {
+ if (AL.getName()->getName().find("read_write") != StringRef::npos) {
if (S.getLangOpts().OpenCLVersion < 200 || DeclTy->isPipeType()) {
- S.Diag(Attr.getLoc(), diag::err_opencl_invalid_read_write)
- << Attr.getName() << PDecl->getType() << DeclTy->isImageType();
+ S.Diag(AL.getLoc(), diag::err_opencl_invalid_read_write)
+ << AL.getName() << PDecl->getType() << DeclTy->isImageType();
D->setInvalidDecl(true);
return;
}
@@ -5938,7 +5936,7 @@ static void handleOpenCLAccessAttr(Sema &S, Decl *D,
}
D->addAttr(::new (S.Context) OpenCLAccessAttr(
- Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex()));
+ AL.getRange(), S.Context, AL.getAttributeSpellingListIndex()));
}
//===----------------------------------------------------------------------===//
@@ -5949,633 +5947,659 @@ static void handleOpenCLAccessAttr(Sema &S, Decl *D,
/// the attribute applies to decls. If the attribute is a type attribute, just
/// silently ignore it if a GNU attribute.
static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
- const AttributeList &Attr,
+ const ParsedAttr &AL,
bool IncludeCXX11Attributes) {
- if (Attr.isInvalid() || Attr.getKind() == AttributeList::IgnoredAttribute)
+ if (AL.isInvalid() || AL.getKind() == ParsedAttr::IgnoredAttribute)
return;
// Ignore C++11 attributes on declarator chunks: they appertain to the type
// instead.
- if (Attr.isCXX11Attribute() && !IncludeCXX11Attributes)
+ if (AL.isCXX11Attribute() && !IncludeCXX11Attributes)
return;
// Unknown attributes are automatically warned on. Target-specific attributes
// which do not apply to the current target architecture are treated as
// though they were unknown attributes.
- if (Attr.getKind() == AttributeList::UnknownAttribute ||
- !Attr.existsInTarget(S.Context.getTargetInfo())) {
- S.Diag(Attr.getLoc(), Attr.isDeclspecAttribute()
- ? diag::warn_unhandled_ms_attribute_ignored
- : diag::warn_unknown_attribute_ignored)
- << Attr.getName();
+ if (AL.getKind() == ParsedAttr::UnknownAttribute ||
+ !AL.existsInTarget(S.Context.getTargetInfo())) {
+ S.Diag(AL.getLoc(), AL.isDeclspecAttribute()
+ ? diag::warn_unhandled_ms_attribute_ignored
+ : diag::warn_unknown_attribute_ignored)
+ << AL.getName();
return;
}
- if (handleCommonAttributeFeatures(S, scope, D, Attr))
+ if (handleCommonAttributeFeatures(S, D, AL))
return;
- switch (Attr.getKind()) {
+ switch (AL.getKind()) {
default:
- if (!Attr.isStmtAttr()) {
+ if (!AL.isStmtAttr()) {
// Type attributes are handled elsewhere; silently move on.
- assert(Attr.isTypeAttr() && "Non-type attribute not handled");
+ assert(AL.isTypeAttr() && "Non-type attribute not handled");
break;
}
- S.Diag(Attr.getLoc(), diag::err_stmt_attribute_invalid_on_decl)
- << Attr.getName() << D->getLocation();
+ S.Diag(AL.getLoc(), diag::err_stmt_attribute_invalid_on_decl)
+ << AL.getName() << D->getLocation();
break;
- case AttributeList::AT_Interrupt:
- handleInterruptAttr(S, D, Attr);
+ case ParsedAttr::AT_Interrupt:
+ handleInterruptAttr(S, D, AL);
break;
- case AttributeList::AT_X86ForceAlignArgPointer:
- handleX86ForceAlignArgPointerAttr(S, D, Attr);
+ case ParsedAttr::AT_X86ForceAlignArgPointer:
+ handleX86ForceAlignArgPointerAttr(S, D, AL);
break;
- case AttributeList::AT_DLLExport:
- case AttributeList::AT_DLLImport:
- handleDLLAttr(S, D, Attr);
+ case ParsedAttr::AT_DLLExport:
+ case ParsedAttr::AT_DLLImport:
+ handleDLLAttr(S, D, AL);
break;
- case AttributeList::AT_Mips16:
+ case ParsedAttr::AT_Mips16:
handleSimpleAttributeWithExclusions<Mips16Attr, MicroMipsAttr,
- MipsInterruptAttr>(S, D, Attr);
+ MipsInterruptAttr>(S, D, AL);
break;
- case AttributeList::AT_NoMips16:
- handleSimpleAttribute<NoMips16Attr>(S, D, Attr);
+ case ParsedAttr::AT_NoMips16:
+ handleSimpleAttribute<NoMips16Attr>(S, D, AL);
break;
- case AttributeList::AT_MicroMips:
- handleSimpleAttributeWithExclusions<MicroMipsAttr, Mips16Attr>(S, D, Attr);
+ case ParsedAttr::AT_MicroMips:
+ handleSimpleAttributeWithExclusions<MicroMipsAttr, Mips16Attr>(S, D, AL);
break;
- case AttributeList::AT_NoMicroMips:
- handleSimpleAttribute<NoMicroMipsAttr>(S, D, Attr);
+ case ParsedAttr::AT_NoMicroMips:
+ handleSimpleAttribute<NoMicroMipsAttr>(S, D, AL);
break;
- case AttributeList::AT_MipsLongCall:
+ case ParsedAttr::AT_MipsLongCall:
handleSimpleAttributeWithExclusions<MipsLongCallAttr, MipsShortCallAttr>(
- S, D, Attr);
+ S, D, AL);
break;
- case AttributeList::AT_MipsShortCall:
+ case ParsedAttr::AT_MipsShortCall:
handleSimpleAttributeWithExclusions<MipsShortCallAttr, MipsLongCallAttr>(
- S, D, Attr);
+ S, D, AL);
+ break;
+ case ParsedAttr::AT_AMDGPUFlatWorkGroupSize:
+ handleAMDGPUFlatWorkGroupSizeAttr(S, D, AL);
+ break;
+ case ParsedAttr::AT_AMDGPUWavesPerEU:
+ handleAMDGPUWavesPerEUAttr(S, D, AL);
break;
- case AttributeList::AT_AMDGPUFlatWorkGroupSize:
- handleAMDGPUFlatWorkGroupSizeAttr(S, D, Attr);
+ case ParsedAttr::AT_AMDGPUNumSGPR:
+ handleAMDGPUNumSGPRAttr(S, D, AL);
break;
- case AttributeList::AT_AMDGPUWavesPerEU:
- handleAMDGPUWavesPerEUAttr(S, D, Attr);
+ case ParsedAttr::AT_AMDGPUNumVGPR:
+ handleAMDGPUNumVGPRAttr(S, D, AL);
break;
- case AttributeList::AT_AMDGPUNumSGPR:
- handleAMDGPUNumSGPRAttr(S, D, Attr);
+ case ParsedAttr::AT_AVRSignal:
+ handleAVRSignalAttr(S, D, AL);
break;
- case AttributeList::AT_AMDGPUNumVGPR:
- handleAMDGPUNumVGPRAttr(S, D, Attr);
+ case ParsedAttr::AT_IBAction:
+ handleSimpleAttribute<IBActionAttr>(S, D, AL);
break;
- case AttributeList::AT_AVRSignal:
- handleAVRSignalAttr(S, D, Attr);
+ case ParsedAttr::AT_IBOutlet:
+ handleIBOutlet(S, D, AL);
break;
- case AttributeList::AT_IBAction:
- handleSimpleAttribute<IBActionAttr>(S, D, Attr);
+ case ParsedAttr::AT_IBOutletCollection:
+ handleIBOutletCollection(S, D, AL);
break;
- case AttributeList::AT_IBOutlet:
- handleIBOutlet(S, D, Attr);
+ case ParsedAttr::AT_IFunc:
+ handleIFuncAttr(S, D, AL);
break;
- case AttributeList::AT_IBOutletCollection:
- handleIBOutletCollection(S, D, Attr);
+ case ParsedAttr::AT_Alias:
+ handleAliasAttr(S, D, AL);
break;
- case AttributeList::AT_IFunc:
- handleIFuncAttr(S, D, Attr);
+ case ParsedAttr::AT_Aligned:
+ handleAlignedAttr(S, D, AL);
break;
- case AttributeList::AT_Alias:
- handleAliasAttr(S, D, Attr);
+ case ParsedAttr::AT_AlignValue:
+ handleAlignValueAttr(S, D, AL);
break;
- case AttributeList::AT_Aligned:
- handleAlignedAttr(S, D, Attr);
+ case ParsedAttr::AT_AllocSize:
+ handleAllocSizeAttr(S, D, AL);
break;
- case AttributeList::AT_AlignValue:
- handleAlignValueAttr(S, D, Attr);
+ case ParsedAttr::AT_AlwaysInline:
+ handleAlwaysInlineAttr(S, D, AL);
break;
- case AttributeList::AT_AllocSize:
- handleAllocSizeAttr(S, D, Attr);
+ case ParsedAttr::AT_Artificial:
+ handleSimpleAttribute<ArtificialAttr>(S, D, AL);
break;
- case AttributeList::AT_AlwaysInline:
- handleAlwaysInlineAttr(S, D, Attr);
+ case ParsedAttr::AT_AnalyzerNoReturn:
+ handleAnalyzerNoReturnAttr(S, D, AL);
break;
- case AttributeList::AT_AnalyzerNoReturn:
- handleAnalyzerNoReturnAttr(S, D, Attr);
+ case ParsedAttr::AT_TLSModel:
+ handleTLSModelAttr(S, D, AL);
break;
- case AttributeList::AT_TLSModel:
- handleTLSModelAttr(S, D, Attr);
+ case ParsedAttr::AT_Annotate:
+ handleAnnotateAttr(S, D, AL);
break;
- case AttributeList::AT_Annotate:
- handleAnnotateAttr(S, D, Attr);
+ case ParsedAttr::AT_Availability:
+ handleAvailabilityAttr(S, D, AL);
break;
- case AttributeList::AT_Availability:
- handleAvailabilityAttr(S, D, Attr);
+ case ParsedAttr::AT_CarriesDependency:
+ handleDependencyAttr(S, scope, D, AL);
break;
- case AttributeList::AT_CarriesDependency:
- handleDependencyAttr(S, scope, D, Attr);
+ case ParsedAttr::AT_CPUDispatch:
+ case ParsedAttr::AT_CPUSpecific:
+ handleCPUSpecificAttr(S, D, AL);
break;
- case AttributeList::AT_Common:
- handleCommonAttr(S, D, Attr);
+ case ParsedAttr::AT_Common:
+ handleCommonAttr(S, D, AL);
break;
- case AttributeList::AT_CUDAConstant:
- handleConstantAttr(S, D, Attr);
+ case ParsedAttr::AT_CUDAConstant:
+ handleConstantAttr(S, D, AL);
break;
- case AttributeList::AT_PassObjectSize:
- handlePassObjectSizeAttr(S, D, Attr);
+ case ParsedAttr::AT_PassObjectSize:
+ handlePassObjectSizeAttr(S, D, AL);
break;
- case AttributeList::AT_Constructor:
- handleConstructorAttr(S, D, Attr);
+ case ParsedAttr::AT_Constructor:
+ handleConstructorAttr(S, D, AL);
break;
- case AttributeList::AT_CXX11NoReturn:
- handleSimpleAttribute<CXX11NoReturnAttr>(S, D, Attr);
+ case ParsedAttr::AT_CXX11NoReturn:
+ handleSimpleAttribute<CXX11NoReturnAttr>(S, D, AL);
break;
- case AttributeList::AT_Deprecated:
- handleDeprecatedAttr(S, D, Attr);
+ case ParsedAttr::AT_Deprecated:
+ handleDeprecatedAttr(S, D, AL);
break;
- case AttributeList::AT_Destructor:
- handleDestructorAttr(S, D, Attr);
+ case ParsedAttr::AT_Destructor:
+ handleDestructorAttr(S, D, AL);
break;
- case AttributeList::AT_EnableIf:
- handleEnableIfAttr(S, D, Attr);
+ case ParsedAttr::AT_EnableIf:
+ handleEnableIfAttr(S, D, AL);
break;
- case AttributeList::AT_DiagnoseIf:
- handleDiagnoseIfAttr(S, D, Attr);
+ case ParsedAttr::AT_DiagnoseIf:
+ handleDiagnoseIfAttr(S, D, AL);
break;
- case AttributeList::AT_ExtVectorType:
- handleExtVectorTypeAttr(S, scope, D, Attr);
+ case ParsedAttr::AT_ExtVectorType:
+ handleExtVectorTypeAttr(S, D, AL);
break;
- case AttributeList::AT_ExternalSourceSymbol:
- handleExternalSourceSymbolAttr(S, D, Attr);
+ case ParsedAttr::AT_ExternalSourceSymbol:
+ handleExternalSourceSymbolAttr(S, D, AL);
break;
- case AttributeList::AT_MinSize:
- handleMinSizeAttr(S, D, Attr);
+ case ParsedAttr::AT_MinSize:
+ handleMinSizeAttr(S, D, AL);
break;
- case AttributeList::AT_OptimizeNone:
- handleOptimizeNoneAttr(S, D, Attr);
+ case ParsedAttr::AT_OptimizeNone:
+ handleOptimizeNoneAttr(S, D, AL);
break;
- case AttributeList::AT_FlagEnum:
- handleSimpleAttribute<FlagEnumAttr>(S, D, Attr);
+ case ParsedAttr::AT_FlagEnum:
+ handleSimpleAttribute<FlagEnumAttr>(S, D, AL);
break;
- case AttributeList::AT_EnumExtensibility:
- handleEnumExtensibilityAttr(S, D, Attr);
+ case ParsedAttr::AT_EnumExtensibility:
+ handleEnumExtensibilityAttr(S, D, AL);
break;
- case AttributeList::AT_Flatten:
- handleSimpleAttribute<FlattenAttr>(S, D, Attr);
+ case ParsedAttr::AT_Flatten:
+ handleSimpleAttribute<FlattenAttr>(S, D, AL);
break;
- case AttributeList::AT_Format:
- handleFormatAttr(S, D, Attr);
+ case ParsedAttr::AT_Format:
+ handleFormatAttr(S, D, AL);
break;
- case AttributeList::AT_FormatArg:
- handleFormatArgAttr(S, D, Attr);
+ case ParsedAttr::AT_FormatArg:
+ handleFormatArgAttr(S, D, AL);
break;
- case AttributeList::AT_CUDAGlobal:
- handleGlobalAttr(S, D, Attr);
+ case ParsedAttr::AT_CUDAGlobal:
+ handleGlobalAttr(S, D, AL);
break;
- case AttributeList::AT_CUDADevice:
+ case ParsedAttr::AT_CUDADevice:
handleSimpleAttributeWithExclusions<CUDADeviceAttr, CUDAGlobalAttr>(S, D,
- Attr);
+ AL);
break;
- case AttributeList::AT_CUDAHost:
- handleSimpleAttributeWithExclusions<CUDAHostAttr, CUDAGlobalAttr>(S, D,
- Attr);
+ case ParsedAttr::AT_CUDAHost:
+ handleSimpleAttributeWithExclusions<CUDAHostAttr, CUDAGlobalAttr>(S, D, AL);
break;
- case AttributeList::AT_GNUInline:
- handleGNUInlineAttr(S, D, Attr);
+ case ParsedAttr::AT_GNUInline:
+ handleGNUInlineAttr(S, D, AL);
break;
- case AttributeList::AT_CUDALaunchBounds:
- handleLaunchBoundsAttr(S, D, Attr);
+ case ParsedAttr::AT_CUDALaunchBounds:
+ handleLaunchBoundsAttr(S, D, AL);
break;
- case AttributeList::AT_Restrict:
- handleRestrictAttr(S, D, Attr);
+ case ParsedAttr::AT_Restrict:
+ handleRestrictAttr(S, D, AL);
break;
- case AttributeList::AT_MayAlias:
- handleSimpleAttribute<MayAliasAttr>(S, D, Attr);
+ case ParsedAttr::AT_MayAlias:
+ handleSimpleAttribute<MayAliasAttr>(S, D, AL);
break;
- case AttributeList::AT_Mode:
- handleModeAttr(S, D, Attr);
+ case ParsedAttr::AT_Mode:
+ handleModeAttr(S, D, AL);
break;
- case AttributeList::AT_NoAlias:
- handleSimpleAttribute<NoAliasAttr>(S, D, Attr);
+ case ParsedAttr::AT_NoAlias:
+ handleSimpleAttribute<NoAliasAttr>(S, D, AL);
break;
- case AttributeList::AT_NoCommon:
- handleSimpleAttribute<NoCommonAttr>(S, D, Attr);
+ case ParsedAttr::AT_NoCommon:
+ handleSimpleAttribute<NoCommonAttr>(S, D, AL);
break;
- case AttributeList::AT_NoSplitStack:
- handleSimpleAttribute<NoSplitStackAttr>(S, D, Attr);
+ case ParsedAttr::AT_NoSplitStack:
+ handleSimpleAttribute<NoSplitStackAttr>(S, D, AL);
break;
- case AttributeList::AT_NonNull:
- if (ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(D))
- handleNonNullAttrParameter(S, PVD, Attr);
+ case ParsedAttr::AT_NonNull:
+ if (auto *PVD = dyn_cast<ParmVarDecl>(D))
+ handleNonNullAttrParameter(S, PVD, AL);
else
- handleNonNullAttr(S, D, Attr);
+ handleNonNullAttr(S, D, AL);
+ break;
+ case ParsedAttr::AT_ReturnsNonNull:
+ handleReturnsNonNullAttr(S, D, AL);
+ break;
+ case ParsedAttr::AT_NoEscape:
+ handleNoEscapeAttr(S, D, AL);
break;
- case AttributeList::AT_ReturnsNonNull:
- handleReturnsNonNullAttr(S, D, Attr);
+ case ParsedAttr::AT_AssumeAligned:
+ handleAssumeAlignedAttr(S, D, AL);
break;
- case AttributeList::AT_NoEscape:
- handleNoEscapeAttr(S, D, Attr);
+ case ParsedAttr::AT_AllocAlign:
+ handleAllocAlignAttr(S, D, AL);
break;
- case AttributeList::AT_AssumeAligned:
- handleAssumeAlignedAttr(S, D, Attr);
+ case ParsedAttr::AT_Overloadable:
+ handleSimpleAttribute<OverloadableAttr>(S, D, AL);
break;
- case AttributeList::AT_AllocAlign:
- handleAllocAlignAttr(S, D, Attr);
+ case ParsedAttr::AT_Ownership:
+ handleOwnershipAttr(S, D, AL);
break;
- case AttributeList::AT_Overloadable:
- handleSimpleAttribute<OverloadableAttr>(S, D, Attr);
+ case ParsedAttr::AT_Cold:
+ handleSimpleAttributeWithExclusions<ColdAttr, HotAttr>(S, D, AL);
break;
- case AttributeList::AT_Ownership:
- handleOwnershipAttr(S, D, Attr);
+ case ParsedAttr::AT_Hot:
+ handleSimpleAttributeWithExclusions<HotAttr, ColdAttr>(S, D, AL);
break;
- case AttributeList::AT_Cold:
- handleColdAttr(S, D, Attr);
+ case ParsedAttr::AT_Naked:
+ handleNakedAttr(S, D, AL);
break;
- case AttributeList::AT_Hot:
- handleHotAttr(S, D, Attr);
+ case ParsedAttr::AT_NoReturn:
+ handleNoReturnAttr(S, D, AL);
break;
- case AttributeList::AT_Naked:
- handleNakedAttr(S, D, Attr);
+ case ParsedAttr::AT_AnyX86NoCfCheck:
+ handleNoCfCheckAttr(S, D, AL);
break;
- case AttributeList::AT_NoReturn:
- handleNoReturnAttr(S, D, Attr);
+ case ParsedAttr::AT_NoThrow:
+ handleSimpleAttribute<NoThrowAttr>(S, D, AL);
break;
- case AttributeList::AT_NoThrow:
- handleSimpleAttribute<NoThrowAttr>(S, D, Attr);
+ case ParsedAttr::AT_CUDAShared:
+ handleSharedAttr(S, D, AL);
break;
- case AttributeList::AT_CUDAShared:
- handleSharedAttr(S, D, Attr);
+ case ParsedAttr::AT_VecReturn:
+ handleVecReturnAttr(S, D, AL);
break;
- case AttributeList::AT_VecReturn:
- handleVecReturnAttr(S, D, Attr);
+ case ParsedAttr::AT_ObjCOwnership:
+ handleObjCOwnershipAttr(S, D, AL);
break;
- case AttributeList::AT_ObjCOwnership:
- handleObjCOwnershipAttr(S, D, Attr);
+ case ParsedAttr::AT_ObjCPreciseLifetime:
+ handleObjCPreciseLifetimeAttr(S, D, AL);
break;
- case AttributeList::AT_ObjCPreciseLifetime:
- handleObjCPreciseLifetimeAttr(S, D, Attr);
+ case ParsedAttr::AT_ObjCReturnsInnerPointer:
+ handleObjCReturnsInnerPointerAttr(S, D, AL);
break;
- case AttributeList::AT_ObjCReturnsInnerPointer:
- handleObjCReturnsInnerPointerAttr(S, D, Attr);
+ case ParsedAttr::AT_ObjCRequiresSuper:
+ handleObjCRequiresSuperAttr(S, D, AL);
break;
- case AttributeList::AT_ObjCRequiresSuper:
- handleObjCRequiresSuperAttr(S, D, Attr);
+ case ParsedAttr::AT_ObjCBridge:
+ handleObjCBridgeAttr(S, D, AL);
break;
- case AttributeList::AT_ObjCBridge:
- handleObjCBridgeAttr(S, scope, D, Attr);
+ case ParsedAttr::AT_ObjCBridgeMutable:
+ handleObjCBridgeMutableAttr(S, D, AL);
break;
- case AttributeList::AT_ObjCBridgeMutable:
- handleObjCBridgeMutableAttr(S, scope, D, Attr);
+ case ParsedAttr::AT_ObjCBridgeRelated:
+ handleObjCBridgeRelatedAttr(S, D, AL);
break;
- case AttributeList::AT_ObjCBridgeRelated:
- handleObjCBridgeRelatedAttr(S, scope, D, Attr);
+ case ParsedAttr::AT_ObjCDesignatedInitializer:
+ handleObjCDesignatedInitializer(S, D, AL);
break;
- case AttributeList::AT_ObjCDesignatedInitializer:
- handleObjCDesignatedInitializer(S, D, Attr);
+ case ParsedAttr::AT_ObjCRuntimeName:
+ handleObjCRuntimeName(S, D, AL);
break;
- case AttributeList::AT_ObjCRuntimeName:
- handleObjCRuntimeName(S, D, Attr);
+ case ParsedAttr::AT_ObjCRuntimeVisible:
+ handleSimpleAttribute<ObjCRuntimeVisibleAttr>(S, D, AL);
break;
- case AttributeList::AT_ObjCRuntimeVisible:
- handleSimpleAttribute<ObjCRuntimeVisibleAttr>(S, D, Attr);
+ case ParsedAttr::AT_ObjCBoxable:
+ handleObjCBoxable(S, D, AL);
break;
- case AttributeList::AT_ObjCBoxable:
- handleObjCBoxable(S, D, Attr);
+ case ParsedAttr::AT_CFAuditedTransfer:
+ handleSimpleAttributeWithExclusions<CFAuditedTransferAttr,
+ CFUnknownTransferAttr>(S, D, AL);
break;
- case AttributeList::AT_CFAuditedTransfer:
- handleCFAuditedTransferAttr(S, D, Attr);
+ case ParsedAttr::AT_CFUnknownTransfer:
+ handleSimpleAttributeWithExclusions<CFUnknownTransferAttr,
+ CFAuditedTransferAttr>(S, D, AL);
break;
- case AttributeList::AT_CFUnknownTransfer:
- handleCFUnknownTransferAttr(S, D, Attr);
+ case ParsedAttr::AT_CFConsumed:
+ case ParsedAttr::AT_NSConsumed:
+ handleNSConsumedAttr(S, D, AL);
break;
- case AttributeList::AT_CFConsumed:
- case AttributeList::AT_NSConsumed:
- handleNSConsumedAttr(S, D, Attr);
+ case ParsedAttr::AT_NSConsumesSelf:
+ handleSimpleAttribute<NSConsumesSelfAttr>(S, D, AL);
break;
- case AttributeList::AT_NSConsumesSelf:
- handleSimpleAttribute<NSConsumesSelfAttr>(S, D, Attr);
+ case ParsedAttr::AT_NSReturnsAutoreleased:
+ case ParsedAttr::AT_NSReturnsNotRetained:
+ case ParsedAttr::AT_CFReturnsNotRetained:
+ case ParsedAttr::AT_NSReturnsRetained:
+ case ParsedAttr::AT_CFReturnsRetained:
+ handleNSReturnsRetainedAttr(S, D, AL);
break;
- case AttributeList::AT_NSReturnsAutoreleased:
- case AttributeList::AT_NSReturnsNotRetained:
- case AttributeList::AT_CFReturnsNotRetained:
- case AttributeList::AT_NSReturnsRetained:
- case AttributeList::AT_CFReturnsRetained:
- handleNSReturnsRetainedAttr(S, D, Attr);
+ case ParsedAttr::AT_WorkGroupSizeHint:
+ handleWorkGroupSize<WorkGroupSizeHintAttr>(S, D, AL);
break;
- case AttributeList::AT_WorkGroupSizeHint:
- handleWorkGroupSize<WorkGroupSizeHintAttr>(S, D, Attr);
+ case ParsedAttr::AT_ReqdWorkGroupSize:
+ handleWorkGroupSize<ReqdWorkGroupSizeAttr>(S, D, AL);
break;
- case AttributeList::AT_ReqdWorkGroupSize:
- handleWorkGroupSize<ReqdWorkGroupSizeAttr>(S, D, Attr);
+ case ParsedAttr::AT_OpenCLIntelReqdSubGroupSize:
+ handleSubGroupSize(S, D, AL);
break;
- case AttributeList::AT_OpenCLIntelReqdSubGroupSize:
- handleSubGroupSize(S, D, Attr);
+ case ParsedAttr::AT_VecTypeHint:
+ handleVecTypeHint(S, D, AL);
break;
- case AttributeList::AT_VecTypeHint:
- handleVecTypeHint(S, D, Attr);
+ case ParsedAttr::AT_RequireConstantInit:
+ handleSimpleAttribute<RequireConstantInitAttr>(S, D, AL);
break;
- case AttributeList::AT_RequireConstantInit:
- handleSimpleAttribute<RequireConstantInitAttr>(S, D, Attr);
+ case ParsedAttr::AT_InitPriority:
+ handleInitPriorityAttr(S, D, AL);
break;
- case AttributeList::AT_InitPriority:
- handleInitPriorityAttr(S, D, Attr);
+ case ParsedAttr::AT_Packed:
+ handlePackedAttr(S, D, AL);
break;
- case AttributeList::AT_Packed:
- handlePackedAttr(S, D, Attr);
+ case ParsedAttr::AT_Section:
+ handleSectionAttr(S, D, AL);
break;
- case AttributeList::AT_Section:
- handleSectionAttr(S, D, Attr);
+ case ParsedAttr::AT_CodeSeg:
+ handleCodeSegAttr(S, D, AL);
break;
- case AttributeList::AT_Target:
- handleTargetAttr(S, D, Attr);
+ case ParsedAttr::AT_Target:
+ handleTargetAttr(S, D, AL);
break;
- case AttributeList::AT_Unavailable:
- handleAttrWithMessage<UnavailableAttr>(S, D, Attr);
+ case ParsedAttr::AT_MinVectorWidth:
+ handleMinVectorWidthAttr(S, D, AL);
break;
- case AttributeList::AT_ArcWeakrefUnavailable:
- handleSimpleAttribute<ArcWeakrefUnavailableAttr>(S, D, Attr);
+ case ParsedAttr::AT_Unavailable:
+ handleAttrWithMessage<UnavailableAttr>(S, D, AL);
break;
- case AttributeList::AT_ObjCRootClass:
- handleSimpleAttribute<ObjCRootClassAttr>(S, D, Attr);
+ case ParsedAttr::AT_ArcWeakrefUnavailable:
+ handleSimpleAttribute<ArcWeakrefUnavailableAttr>(S, D, AL);
break;
- case AttributeList::AT_ObjCSubclassingRestricted:
- handleSimpleAttribute<ObjCSubclassingRestrictedAttr>(S, D, Attr);
+ case ParsedAttr::AT_ObjCRootClass:
+ handleSimpleAttribute<ObjCRootClassAttr>(S, D, AL);
break;
- case AttributeList::AT_ObjCExplicitProtocolImpl:
- handleObjCSuppresProtocolAttr(S, D, Attr);
+ case ParsedAttr::AT_ObjCSubclassingRestricted:
+ handleSimpleAttribute<ObjCSubclassingRestrictedAttr>(S, D, AL);
break;
- case AttributeList::AT_ObjCRequiresPropertyDefs:
- handleSimpleAttribute<ObjCRequiresPropertyDefsAttr>(S, D, Attr);
+ case ParsedAttr::AT_ObjCExplicitProtocolImpl:
+ handleObjCSuppresProtocolAttr(S, D, AL);
break;
- case AttributeList::AT_Unused:
- handleUnusedAttr(S, D, Attr);
+ case ParsedAttr::AT_ObjCRequiresPropertyDefs:
+ handleSimpleAttribute<ObjCRequiresPropertyDefsAttr>(S, D, AL);
break;
- case AttributeList::AT_ReturnsTwice:
- handleSimpleAttribute<ReturnsTwiceAttr>(S, D, Attr);
+ case ParsedAttr::AT_Unused:
+ handleUnusedAttr(S, D, AL);
break;
- case AttributeList::AT_NotTailCalled:
- handleNotTailCalledAttr(S, D, Attr);
+ case ParsedAttr::AT_ReturnsTwice:
+ handleSimpleAttribute<ReturnsTwiceAttr>(S, D, AL);
break;
- case AttributeList::AT_DisableTailCalls:
- handleDisableTailCallsAttr(S, D, Attr);
+ case ParsedAttr::AT_NotTailCalled:
+ handleSimpleAttributeWithExclusions<NotTailCalledAttr, AlwaysInlineAttr>(
+ S, D, AL);
break;
- case AttributeList::AT_Used:
- handleUsedAttr(S, D, Attr);
+ case ParsedAttr::AT_DisableTailCalls:
+ handleSimpleAttributeWithExclusions<DisableTailCallsAttr, NakedAttr>(S, D,
+ AL);
break;
- case AttributeList::AT_Visibility:
- handleVisibilityAttr(S, D, Attr, false);
+ case ParsedAttr::AT_Used:
+ handleSimpleAttribute<UsedAttr>(S, D, AL);
break;
- case AttributeList::AT_TypeVisibility:
- handleVisibilityAttr(S, D, Attr, true);
+ case ParsedAttr::AT_Visibility:
+ handleVisibilityAttr(S, D, AL, false);
break;
- case AttributeList::AT_WarnUnused:
- handleSimpleAttribute<WarnUnusedAttr>(S, D, Attr);
+ case ParsedAttr::AT_TypeVisibility:
+ handleVisibilityAttr(S, D, AL, true);
break;
- case AttributeList::AT_WarnUnusedResult:
- handleWarnUnusedResult(S, D, Attr);
+ case ParsedAttr::AT_WarnUnused:
+ handleSimpleAttribute<WarnUnusedAttr>(S, D, AL);
break;
- case AttributeList::AT_Weak:
- handleSimpleAttribute<WeakAttr>(S, D, Attr);
+ case ParsedAttr::AT_WarnUnusedResult:
+ handleWarnUnusedResult(S, D, AL);
break;
- case AttributeList::AT_WeakRef:
- handleWeakRefAttr(S, D, Attr);
+ case ParsedAttr::AT_Weak:
+ handleSimpleAttribute<WeakAttr>(S, D, AL);
break;
- case AttributeList::AT_WeakImport:
- handleWeakImportAttr(S, D, Attr);
+ case ParsedAttr::AT_WeakRef:
+ handleWeakRefAttr(S, D, AL);
break;
- case AttributeList::AT_TransparentUnion:
- handleTransparentUnionAttr(S, D, Attr);
+ case ParsedAttr::AT_WeakImport:
+ handleWeakImportAttr(S, D, AL);
break;
- case AttributeList::AT_ObjCException:
- handleSimpleAttribute<ObjCExceptionAttr>(S, D, Attr);
+ case ParsedAttr::AT_TransparentUnion:
+ handleTransparentUnionAttr(S, D, AL);
break;
- case AttributeList::AT_ObjCMethodFamily:
- handleObjCMethodFamilyAttr(S, D, Attr);
+ case ParsedAttr::AT_ObjCException:
+ handleSimpleAttribute<ObjCExceptionAttr>(S, D, AL);
break;
- case AttributeList::AT_ObjCNSObject:
- handleObjCNSObject(S, D, Attr);
+ case ParsedAttr::AT_ObjCMethodFamily:
+ handleObjCMethodFamilyAttr(S, D, AL);
break;
- case AttributeList::AT_ObjCIndependentClass:
- handleObjCIndependentClass(S, D, Attr);
+ case ParsedAttr::AT_ObjCNSObject:
+ handleObjCNSObject(S, D, AL);
break;
- case AttributeList::AT_Blocks:
- handleBlocksAttr(S, D, Attr);
+ case ParsedAttr::AT_ObjCIndependentClass:
+ handleObjCIndependentClass(S, D, AL);
break;
- case AttributeList::AT_Sentinel:
- handleSentinelAttr(S, D, Attr);
+ case ParsedAttr::AT_Blocks:
+ handleBlocksAttr(S, D, AL);
break;
- case AttributeList::AT_Const:
- handleSimpleAttribute<ConstAttr>(S, D, Attr);
+ case ParsedAttr::AT_Sentinel:
+ handleSentinelAttr(S, D, AL);
break;
- case AttributeList::AT_Pure:
- handleSimpleAttribute<PureAttr>(S, D, Attr);
+ case ParsedAttr::AT_Const:
+ handleSimpleAttribute<ConstAttr>(S, D, AL);
break;
- case AttributeList::AT_Cleanup:
- handleCleanupAttr(S, D, Attr);
+ case ParsedAttr::AT_Pure:
+ handleSimpleAttribute<PureAttr>(S, D, AL);
break;
- case AttributeList::AT_NoDebug:
- handleNoDebugAttr(S, D, Attr);
+ case ParsedAttr::AT_Cleanup:
+ handleCleanupAttr(S, D, AL);
break;
- case AttributeList::AT_NoDuplicate:
- handleSimpleAttribute<NoDuplicateAttr>(S, D, Attr);
+ case ParsedAttr::AT_NoDebug:
+ handleNoDebugAttr(S, D, AL);
break;
- case AttributeList::AT_Convergent:
- handleSimpleAttribute<ConvergentAttr>(S, D, Attr);
+ case ParsedAttr::AT_NoDuplicate:
+ handleSimpleAttribute<NoDuplicateAttr>(S, D, AL);
break;
- case AttributeList::AT_NoInline:
- handleSimpleAttribute<NoInlineAttr>(S, D, Attr);
+ case ParsedAttr::AT_Convergent:
+ handleSimpleAttribute<ConvergentAttr>(S, D, AL);
break;
- case AttributeList::AT_NoInstrumentFunction: // Interacts with -pg.
- handleSimpleAttribute<NoInstrumentFunctionAttr>(S, D, Attr);
+ case ParsedAttr::AT_NoInline:
+ handleSimpleAttribute<NoInlineAttr>(S, D, AL);
break;
- case AttributeList::AT_StdCall:
- case AttributeList::AT_CDecl:
- case AttributeList::AT_FastCall:
- case AttributeList::AT_ThisCall:
- case AttributeList::AT_Pascal:
- case AttributeList::AT_RegCall:
- case AttributeList::AT_SwiftCall:
- case AttributeList::AT_VectorCall:
- case AttributeList::AT_MSABI:
- case AttributeList::AT_SysVABI:
- case AttributeList::AT_Pcs:
- case AttributeList::AT_IntelOclBicc:
- case AttributeList::AT_PreserveMost:
- case AttributeList::AT_PreserveAll:
- handleCallConvAttr(S, D, Attr);
+ case ParsedAttr::AT_NoInstrumentFunction: // Interacts with -pg.
+ handleSimpleAttribute<NoInstrumentFunctionAttr>(S, D, AL);
break;
- case AttributeList::AT_Suppress:
- handleSuppressAttr(S, D, Attr);
+ case ParsedAttr::AT_NoStackProtector:
+ // Interacts with -fstack-protector options.
+ handleSimpleAttribute<NoStackProtectorAttr>(S, D, AL);
break;
- case AttributeList::AT_OpenCLKernel:
- handleSimpleAttribute<OpenCLKernelAttr>(S, D, Attr);
+ case ParsedAttr::AT_StdCall:
+ case ParsedAttr::AT_CDecl:
+ case ParsedAttr::AT_FastCall:
+ case ParsedAttr::AT_ThisCall:
+ case ParsedAttr::AT_Pascal:
+ case ParsedAttr::AT_RegCall:
+ case ParsedAttr::AT_SwiftCall:
+ case ParsedAttr::AT_VectorCall:
+ case ParsedAttr::AT_MSABI:
+ case ParsedAttr::AT_SysVABI:
+ case ParsedAttr::AT_Pcs:
+ case ParsedAttr::AT_IntelOclBicc:
+ case ParsedAttr::AT_PreserveMost:
+ case ParsedAttr::AT_PreserveAll:
+ handleCallConvAttr(S, D, AL);
break;
- case AttributeList::AT_OpenCLAccess:
- handleOpenCLAccessAttr(S, D, Attr);
+ case ParsedAttr::AT_Suppress:
+ handleSuppressAttr(S, D, AL);
break;
- case AttributeList::AT_OpenCLNoSVM:
- handleOpenCLNoSVMAttr(S, D, Attr);
+ case ParsedAttr::AT_OpenCLKernel:
+ handleSimpleAttribute<OpenCLKernelAttr>(S, D, AL);
break;
- case AttributeList::AT_SwiftContext:
- handleParameterABIAttr(S, D, Attr, ParameterABI::SwiftContext);
+ case ParsedAttr::AT_OpenCLAccess:
+ handleOpenCLAccessAttr(S, D, AL);
break;
- case AttributeList::AT_SwiftErrorResult:
- handleParameterABIAttr(S, D, Attr, ParameterABI::SwiftErrorResult);
+ case ParsedAttr::AT_OpenCLNoSVM:
+ handleOpenCLNoSVMAttr(S, D, AL);
break;
- case AttributeList::AT_SwiftIndirectResult:
- handleParameterABIAttr(S, D, Attr, ParameterABI::SwiftIndirectResult);
+ case ParsedAttr::AT_SwiftContext:
+ handleParameterABIAttr(S, D, AL, ParameterABI::SwiftContext);
break;
- case AttributeList::AT_InternalLinkage:
- handleInternalLinkageAttr(S, D, Attr);
+ case ParsedAttr::AT_SwiftErrorResult:
+ handleParameterABIAttr(S, D, AL, ParameterABI::SwiftErrorResult);
break;
- case AttributeList::AT_LTOVisibilityPublic:
- handleSimpleAttribute<LTOVisibilityPublicAttr>(S, D, Attr);
+ case ParsedAttr::AT_SwiftIndirectResult:
+ handleParameterABIAttr(S, D, AL, ParameterABI::SwiftIndirectResult);
+ break;
+ case ParsedAttr::AT_InternalLinkage:
+ handleInternalLinkageAttr(S, D, AL);
+ break;
+ case ParsedAttr::AT_LTOVisibilityPublic:
+ handleSimpleAttribute<LTOVisibilityPublicAttr>(S, D, AL);
break;
// Microsoft attributes:
- case AttributeList::AT_EmptyBases:
- handleSimpleAttribute<EmptyBasesAttr>(S, D, Attr);
+ case ParsedAttr::AT_EmptyBases:
+ handleSimpleAttribute<EmptyBasesAttr>(S, D, AL);
+ break;
+ case ParsedAttr::AT_LayoutVersion:
+ handleLayoutVersion(S, D, AL);
break;
- case AttributeList::AT_LayoutVersion:
- handleLayoutVersion(S, D, Attr);
+ case ParsedAttr::AT_TrivialABI:
+ handleSimpleAttribute<TrivialABIAttr>(S, D, AL);
break;
- case AttributeList::AT_MSNoVTable:
- handleSimpleAttribute<MSNoVTableAttr>(S, D, Attr);
+ case ParsedAttr::AT_MSNoVTable:
+ handleSimpleAttribute<MSNoVTableAttr>(S, D, AL);
break;
- case AttributeList::AT_MSStruct:
- handleSimpleAttribute<MSStructAttr>(S, D, Attr);
+ case ParsedAttr::AT_MSStruct:
+ handleSimpleAttribute<MSStructAttr>(S, D, AL);
break;
- case AttributeList::AT_Uuid:
- handleUuidAttr(S, D, Attr);
+ case ParsedAttr::AT_Uuid:
+ handleUuidAttr(S, D, AL);
break;
- case AttributeList::AT_MSInheritance:
- handleMSInheritanceAttr(S, D, Attr);
+ case ParsedAttr::AT_MSInheritance:
+ handleMSInheritanceAttr(S, D, AL);
break;
- case AttributeList::AT_SelectAny:
- handleSimpleAttribute<SelectAnyAttr>(S, D, Attr);
+ case ParsedAttr::AT_SelectAny:
+ handleSimpleAttribute<SelectAnyAttr>(S, D, AL);
break;
- case AttributeList::AT_Thread:
- handleDeclspecThreadAttr(S, D, Attr);
+ case ParsedAttr::AT_Thread:
+ handleDeclspecThreadAttr(S, D, AL);
break;
- case AttributeList::AT_AbiTag:
- handleAbiTagAttr(S, D, Attr);
+ case ParsedAttr::AT_AbiTag:
+ handleAbiTagAttr(S, D, AL);
break;
// Thread safety attributes:
- case AttributeList::AT_AssertExclusiveLock:
- handleAssertExclusiveLockAttr(S, D, Attr);
+ case ParsedAttr::AT_AssertExclusiveLock:
+ handleAssertExclusiveLockAttr(S, D, AL);
break;
- case AttributeList::AT_AssertSharedLock:
- handleAssertSharedLockAttr(S, D, Attr);
+ case ParsedAttr::AT_AssertSharedLock:
+ handleAssertSharedLockAttr(S, D, AL);
break;
- case AttributeList::AT_GuardedVar:
- handleSimpleAttribute<GuardedVarAttr>(S, D, Attr);
+ case ParsedAttr::AT_GuardedVar:
+ handleSimpleAttribute<GuardedVarAttr>(S, D, AL);
break;
- case AttributeList::AT_PtGuardedVar:
- handlePtGuardedVarAttr(S, D, Attr);
+ case ParsedAttr::AT_PtGuardedVar:
+ handlePtGuardedVarAttr(S, D, AL);
break;
- case AttributeList::AT_ScopedLockable:
- handleSimpleAttribute<ScopedLockableAttr>(S, D, Attr);
+ case ParsedAttr::AT_ScopedLockable:
+ handleSimpleAttribute<ScopedLockableAttr>(S, D, AL);
break;
- case AttributeList::AT_NoSanitize:
- handleNoSanitizeAttr(S, D, Attr);
+ case ParsedAttr::AT_NoSanitize:
+ handleNoSanitizeAttr(S, D, AL);
break;
- case AttributeList::AT_NoSanitizeSpecific:
- handleNoSanitizeSpecificAttr(S, D, Attr);
+ case ParsedAttr::AT_NoSanitizeSpecific:
+ handleNoSanitizeSpecificAttr(S, D, AL);
break;
- case AttributeList::AT_NoThreadSafetyAnalysis:
- handleSimpleAttribute<NoThreadSafetyAnalysisAttr>(S, D, Attr);
+ case ParsedAttr::AT_NoThreadSafetyAnalysis:
+ handleSimpleAttribute<NoThreadSafetyAnalysisAttr>(S, D, AL);
break;
- case AttributeList::AT_GuardedBy:
- handleGuardedByAttr(S, D, Attr);
+ case ParsedAttr::AT_GuardedBy:
+ handleGuardedByAttr(S, D, AL);
break;
- case AttributeList::AT_PtGuardedBy:
- handlePtGuardedByAttr(S, D, Attr);
+ case ParsedAttr::AT_PtGuardedBy:
+ handlePtGuardedByAttr(S, D, AL);
break;
- case AttributeList::AT_ExclusiveTrylockFunction:
- handleExclusiveTrylockFunctionAttr(S, D, Attr);
+ case ParsedAttr::AT_ExclusiveTrylockFunction:
+ handleExclusiveTrylockFunctionAttr(S, D, AL);
break;
- case AttributeList::AT_LockReturned:
- handleLockReturnedAttr(S, D, Attr);
+ case ParsedAttr::AT_LockReturned:
+ handleLockReturnedAttr(S, D, AL);
break;
- case AttributeList::AT_LocksExcluded:
- handleLocksExcludedAttr(S, D, Attr);
+ case ParsedAttr::AT_LocksExcluded:
+ handleLocksExcludedAttr(S, D, AL);
break;
- case AttributeList::AT_SharedTrylockFunction:
- handleSharedTrylockFunctionAttr(S, D, Attr);
+ case ParsedAttr::AT_SharedTrylockFunction:
+ handleSharedTrylockFunctionAttr(S, D, AL);
break;
- case AttributeList::AT_AcquiredBefore:
- handleAcquiredBeforeAttr(S, D, Attr);
+ case ParsedAttr::AT_AcquiredBefore:
+ handleAcquiredBeforeAttr(S, D, AL);
break;
- case AttributeList::AT_AcquiredAfter:
- handleAcquiredAfterAttr(S, D, Attr);
+ case ParsedAttr::AT_AcquiredAfter:
+ handleAcquiredAfterAttr(S, D, AL);
break;
// Capability analysis attributes.
- case AttributeList::AT_Capability:
- case AttributeList::AT_Lockable:
- handleCapabilityAttr(S, D, Attr);
+ case ParsedAttr::AT_Capability:
+ case ParsedAttr::AT_Lockable:
+ handleCapabilityAttr(S, D, AL);
break;
- case AttributeList::AT_RequiresCapability:
- handleRequiresCapabilityAttr(S, D, Attr);
+ case ParsedAttr::AT_RequiresCapability:
+ handleRequiresCapabilityAttr(S, D, AL);
break;
- case AttributeList::AT_AssertCapability:
- handleAssertCapabilityAttr(S, D, Attr);
+ case ParsedAttr::AT_AssertCapability:
+ handleAssertCapabilityAttr(S, D, AL);
break;
- case AttributeList::AT_AcquireCapability:
- handleAcquireCapabilityAttr(S, D, Attr);
+ case ParsedAttr::AT_AcquireCapability:
+ handleAcquireCapabilityAttr(S, D, AL);
break;
- case AttributeList::AT_ReleaseCapability:
- handleReleaseCapabilityAttr(S, D, Attr);
+ case ParsedAttr::AT_ReleaseCapability:
+ handleReleaseCapabilityAttr(S, D, AL);
break;
- case AttributeList::AT_TryAcquireCapability:
- handleTryAcquireCapabilityAttr(S, D, Attr);
+ case ParsedAttr::AT_TryAcquireCapability:
+ handleTryAcquireCapabilityAttr(S, D, AL);
break;
// Consumed analysis attributes.
- case AttributeList::AT_Consumable:
- handleConsumableAttr(S, D, Attr);
+ case ParsedAttr::AT_Consumable:
+ handleConsumableAttr(S, D, AL);
break;
- case AttributeList::AT_ConsumableAutoCast:
- handleSimpleAttribute<ConsumableAutoCastAttr>(S, D, Attr);
+ case ParsedAttr::AT_ConsumableAutoCast:
+ handleSimpleAttribute<ConsumableAutoCastAttr>(S, D, AL);
break;
- case AttributeList::AT_ConsumableSetOnRead:
- handleSimpleAttribute<ConsumableSetOnReadAttr>(S, D, Attr);
+ case ParsedAttr::AT_ConsumableSetOnRead:
+ handleSimpleAttribute<ConsumableSetOnReadAttr>(S, D, AL);
break;
- case AttributeList::AT_CallableWhen:
- handleCallableWhenAttr(S, D, Attr);
+ case ParsedAttr::AT_CallableWhen:
+ handleCallableWhenAttr(S, D, AL);
break;
- case AttributeList::AT_ParamTypestate:
- handleParamTypestateAttr(S, D, Attr);
+ case ParsedAttr::AT_ParamTypestate:
+ handleParamTypestateAttr(S, D, AL);
break;
- case AttributeList::AT_ReturnTypestate:
- handleReturnTypestateAttr(S, D, Attr);
+ case ParsedAttr::AT_ReturnTypestate:
+ handleReturnTypestateAttr(S, D, AL);
break;
- case AttributeList::AT_SetTypestate:
- handleSetTypestateAttr(S, D, Attr);
+ case ParsedAttr::AT_SetTypestate:
+ handleSetTypestateAttr(S, D, AL);
break;
- case AttributeList::AT_TestTypestate:
- handleTestTypestateAttr(S, D, Attr);
+ case ParsedAttr::AT_TestTypestate:
+ handleTestTypestateAttr(S, D, AL);
break;
// Type safety attributes.
- case AttributeList::AT_ArgumentWithTypeTag:
- handleArgumentWithTypeTagAttr(S, D, Attr);
+ case ParsedAttr::AT_ArgumentWithTypeTag:
+ handleArgumentWithTypeTagAttr(S, D, AL);
break;
- case AttributeList::AT_TypeTagForDatatype:
- handleTypeTagForDatatypeAttr(S, D, Attr);
+ case ParsedAttr::AT_TypeTagForDatatype:
+ handleTypeTagForDatatypeAttr(S, D, AL);
break;
- case AttributeList::AT_AnyX86NoCallerSavedRegisters:
- handleNoCallerSavedRegsAttr(S, D, Attr);
+ case ParsedAttr::AT_AnyX86NoCallerSavedRegisters:
+ handleSimpleAttribute<AnyX86NoCallerSavedRegistersAttr>(S, D, AL);
break;
- case AttributeList::AT_RenderScriptKernel:
- handleSimpleAttribute<RenderScriptKernelAttr>(S, D, Attr);
+ case ParsedAttr::AT_RenderScriptKernel:
+ handleSimpleAttribute<RenderScriptKernelAttr>(S, D, AL);
break;
// XRay attributes.
- case AttributeList::AT_XRayInstrument:
- handleSimpleAttribute<XRayInstrumentAttr>(S, D, Attr);
+ case ParsedAttr::AT_XRayInstrument:
+ handleSimpleAttribute<XRayInstrumentAttr>(S, D, AL);
break;
- case AttributeList::AT_XRayLogArgs:
- handleXRayLogArgsAttr(S, D, Attr);
+ case ParsedAttr::AT_XRayLogArgs:
+ handleXRayLogArgsAttr(S, D, AL);
break;
}
}
@@ -6583,18 +6607,21 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
/// ProcessDeclAttributeList - Apply all the decl attributes in the specified
/// attribute list to the specified decl, ignoring any type attributes.
void Sema::ProcessDeclAttributeList(Scope *S, Decl *D,
- const AttributeList *AttrList,
+ const ParsedAttributesView &AttrList,
bool IncludeCXX11Attributes) {
- for (const AttributeList* l = AttrList; l; l = l->getNext())
- ProcessDeclAttribute(*this, S, D, *l, IncludeCXX11Attributes);
+ if (AttrList.empty())
+ return;
+
+ for (const ParsedAttr &AL : AttrList)
+ ProcessDeclAttribute(*this, S, D, AL, IncludeCXX11Attributes);
// FIXME: We should be able to handle these cases in TableGen.
// GCC accepts
// static int a9 __attribute__((weakref));
// but that looks really pointless. We reject it.
if (D->hasAttr<WeakRefAttr>() && !D->hasAttr<AliasAttr>()) {
- Diag(AttrList->getLoc(), diag::err_attribute_weakref_without_alias)
- << cast<NamedDecl>(D);
+ Diag(AttrList.begin()->getLoc(), diag::err_attribute_weakref_without_alias)
+ << cast<NamedDecl>(D);
D->dropAttr<WeakRefAttr>();
return;
}
@@ -6605,79 +6632,83 @@ void Sema::ProcessDeclAttributeList(Scope *S, Decl *D,
// attribute must never appear as a group" for attributes like cold and hot.
if (!D->hasAttr<OpenCLKernelAttr>()) {
// These attributes cannot be applied to a non-kernel function.
- if (Attr *A = D->getAttr<ReqdWorkGroupSizeAttr>()) {
+ if (const auto *A = D->getAttr<ReqdWorkGroupSizeAttr>()) {
// FIXME: This emits a different error message than
// diag::err_attribute_wrong_decl_type + ExpectedKernelFunction.
Diag(D->getLocation(), diag::err_opencl_kernel_attr) << A;
D->setInvalidDecl();
- } else if (Attr *A = D->getAttr<WorkGroupSizeHintAttr>()) {
+ } else if (const auto *A = D->getAttr<WorkGroupSizeHintAttr>()) {
Diag(D->getLocation(), diag::err_opencl_kernel_attr) << A;
D->setInvalidDecl();
- } else if (Attr *A = D->getAttr<VecTypeHintAttr>()) {
+ } else if (const auto *A = D->getAttr<VecTypeHintAttr>()) {
Diag(D->getLocation(), diag::err_opencl_kernel_attr) << A;
D->setInvalidDecl();
- } else if (Attr *A = D->getAttr<AMDGPUFlatWorkGroupSizeAttr>()) {
- Diag(D->getLocation(), diag::err_attribute_wrong_decl_type)
- << A << ExpectedKernelFunction;
- D->setInvalidDecl();
- } else if (Attr *A = D->getAttr<AMDGPUWavesPerEUAttr>()) {
- Diag(D->getLocation(), diag::err_attribute_wrong_decl_type)
- << A << ExpectedKernelFunction;
- D->setInvalidDecl();
- } else if (Attr *A = D->getAttr<AMDGPUNumSGPRAttr>()) {
- Diag(D->getLocation(), diag::err_attribute_wrong_decl_type)
- << A << ExpectedKernelFunction;
- D->setInvalidDecl();
- } else if (Attr *A = D->getAttr<AMDGPUNumVGPRAttr>()) {
- Diag(D->getLocation(), diag::err_attribute_wrong_decl_type)
- << A << ExpectedKernelFunction;
- D->setInvalidDecl();
- } else if (Attr *A = D->getAttr<OpenCLIntelReqdSubGroupSizeAttr>()) {
+ } else if (const auto *A = D->getAttr<OpenCLIntelReqdSubGroupSizeAttr>()) {
Diag(D->getLocation(), diag::err_opencl_kernel_attr) << A;
D->setInvalidDecl();
+ } else if (!D->hasAttr<CUDAGlobalAttr>()) {
+ if (const auto *A = D->getAttr<AMDGPUFlatWorkGroupSizeAttr>()) {
+ Diag(D->getLocation(), diag::err_attribute_wrong_decl_type)
+ << A << ExpectedKernelFunction;
+ D->setInvalidDecl();
+ } else if (const auto *A = D->getAttr<AMDGPUWavesPerEUAttr>()) {
+ Diag(D->getLocation(), diag::err_attribute_wrong_decl_type)
+ << A << ExpectedKernelFunction;
+ D->setInvalidDecl();
+ } else if (const auto *A = D->getAttr<AMDGPUNumSGPRAttr>()) {
+ Diag(D->getLocation(), diag::err_attribute_wrong_decl_type)
+ << A << ExpectedKernelFunction;
+ D->setInvalidDecl();
+ } else if (const auto *A = D->getAttr<AMDGPUNumVGPRAttr>()) {
+ Diag(D->getLocation(), diag::err_attribute_wrong_decl_type)
+ << A << ExpectedKernelFunction;
+ D->setInvalidDecl();
+ }
}
}
}
// Helper for delayed processing TransparentUnion attribute.
-void Sema::ProcessDeclAttributeDelayed(Decl *D, const AttributeList *AttrList) {
- for (const AttributeList *Attr = AttrList; Attr; Attr = Attr->getNext())
- if (Attr->getKind() == AttributeList::AT_TransparentUnion) {
- handleTransparentUnionAttr(*this, D, *Attr);
+void Sema::ProcessDeclAttributeDelayed(Decl *D,
+ const ParsedAttributesView &AttrList) {
+ for (const ParsedAttr &AL : AttrList)
+ if (AL.getKind() == ParsedAttr::AT_TransparentUnion) {
+ handleTransparentUnionAttr(*this, D, AL);
break;
}
}
// Annotation attributes are the only attributes allowed after an access
// specifier.
-bool Sema::ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl,
- const AttributeList *AttrList) {
- for (const AttributeList* l = AttrList; l; l = l->getNext()) {
- if (l->getKind() == AttributeList::AT_Annotate) {
- ProcessDeclAttribute(*this, nullptr, ASDecl, *l, l->isCXX11Attribute());
+bool Sema::ProcessAccessDeclAttributeList(
+ AccessSpecDecl *ASDecl, const ParsedAttributesView &AttrList) {
+ for (const ParsedAttr &AL : AttrList) {
+ if (AL.getKind() == ParsedAttr::AT_Annotate) {
+ ProcessDeclAttribute(*this, nullptr, ASDecl, AL, AL.isCXX11Attribute());
} else {
- Diag(l->getLoc(), diag::err_only_annotate_after_access_spec);
+ Diag(AL.getLoc(), diag::err_only_annotate_after_access_spec);
return true;
}
}
-
return false;
}
/// checkUnusedDeclAttributes - Check a list of attributes to see if it
/// contains any decl attributes that we should warn about.
-static void checkUnusedDeclAttributes(Sema &S, const AttributeList *A) {
- for ( ; A; A = A->getNext()) {
+static void checkUnusedDeclAttributes(Sema &S, const ParsedAttributesView &A) {
+ for (const ParsedAttr &AL : A) {
// Only warn if the attribute is an unignored, non-type attribute.
- if (A->isUsedAsTypeAttr() || A->isInvalid()) continue;
- if (A->getKind() == AttributeList::IgnoredAttribute) continue;
+ if (AL.isUsedAsTypeAttr() || AL.isInvalid())
+ continue;
+ if (AL.getKind() == ParsedAttr::IgnoredAttribute)
+ continue;
- if (A->getKind() == AttributeList::UnknownAttribute) {
- S.Diag(A->getLoc(), diag::warn_unknown_attribute_ignored)
- << A->getName() << A->getRange();
+ if (AL.getKind() == ParsedAttr::UnknownAttribute) {
+ S.Diag(AL.getLoc(), diag::warn_unknown_attribute_ignored)
+ << AL.getName() << AL.getRange();
} else {
- S.Diag(A->getLoc(), diag::warn_attribute_not_on_decl)
- << A->getName() << A->getRange();
+ S.Diag(AL.getLoc(), diag::warn_attribute_not_on_decl)
+ << AL.getName() << AL.getRange();
}
}
}
@@ -6686,7 +6717,7 @@ static void checkUnusedDeclAttributes(Sema &S, const AttributeList *A) {
/// used to build a declaration, complain about any decl attributes
/// which might be lying around on it.
void Sema::checkUnusedDeclAttributes(Declarator &D) {
- ::checkUnusedDeclAttributes(*this, D.getDeclSpec().getAttributes().getList());
+ ::checkUnusedDeclAttributes(*this, D.getDeclSpec().getAttributes());
::checkUnusedDeclAttributes(*this, D.getAttributes());
for (unsigned i = 0, e = D.getNumTypeObjects(); i != e; ++i)
::checkUnusedDeclAttributes(*this, D.getTypeObject(i).getAttrs());
@@ -6698,7 +6729,7 @@ NamedDecl * Sema::DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II,
SourceLocation Loc) {
assert(isa<FunctionDecl>(ND) || isa<VarDecl>(ND));
NamedDecl *NewD = nullptr;
- if (FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) {
+ if (auto *FD = dyn_cast<FunctionDecl>(ND)) {
FunctionDecl *NewFD;
// FIXME: Missing call to CheckFunctionDeclaration().
// FIXME: Mangling?
@@ -6718,7 +6749,7 @@ NamedDecl * Sema::DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II,
// Fake up parameter variables; they are declared as if this were
// a typedef.
QualType FDTy = FD->getType();
- if (const FunctionProtoType *FT = FDTy->getAs<FunctionProtoType>()) {
+ if (const auto *FT = FDTy->getAs<FunctionProtoType>()) {
SmallVector<ParmVarDecl*, 16> Params;
for (const auto &AI : FT->param_types()) {
ParmVarDecl *Param = BuildParmVarDeclForTypedef(NewFD, Loc, AI);
@@ -6727,15 +6758,13 @@ NamedDecl * Sema::DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II,
}
NewFD->setParams(Params);
}
- } else if (VarDecl *VD = dyn_cast<VarDecl>(ND)) {
+ } else if (auto *VD = dyn_cast<VarDecl>(ND)) {
NewD = VarDecl::Create(VD->getASTContext(), VD->getDeclContext(),
VD->getInnerLocStart(), VD->getLocation(), II,
VD->getType(), VD->getTypeSourceInfo(),
VD->getStorageClass());
- if (VD->getQualifier()) {
- VarDecl *NewVD = cast<VarDecl>(NewD);
- NewVD->setQualifierInfo(VD->getQualifierLoc());
- }
+ if (VD->getQualifier())
+ cast<VarDecl>(NewD)->setQualifierInfo(VD->getQualifierLoc());
}
return NewD;
}
@@ -6771,10 +6800,10 @@ void Sema::ProcessPragmaWeak(Scope *S, Decl *D) {
LoadExternalWeakUndeclaredIdentifiers();
if (!WeakUndeclaredIdentifiers.empty()) {
NamedDecl *ND = nullptr;
- if (VarDecl *VD = dyn_cast<VarDecl>(D))
+ if (auto *VD = dyn_cast<VarDecl>(D))
if (VD->isExternC())
ND = VD;
- if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
+ if (auto *FD = dyn_cast<FunctionDecl>(D))
if (FD->isExternC())
ND = FD;
if (ND) {
@@ -6795,20 +6824,19 @@ void Sema::ProcessPragmaWeak(Scope *S, Decl *D) {
/// specified in many different places, and we need to find and apply them all.
void Sema::ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD) {
// Apply decl attributes from the DeclSpec if present.
- if (const AttributeList *Attrs = PD.getDeclSpec().getAttributes().getList())
- ProcessDeclAttributeList(S, D, Attrs);
+ if (!PD.getDeclSpec().getAttributes().empty())
+ ProcessDeclAttributeList(S, D, PD.getDeclSpec().getAttributes());
// Walk the declarator structure, applying decl attributes that were in a type
// position to the decl itself. This handles cases like:
// int *__attr__(x)** D;
// when X is a decl attribute.
for (unsigned i = 0, e = PD.getNumTypeObjects(); i != e; ++i)
- if (const AttributeList *Attrs = PD.getTypeObject(i).getAttrs())
- ProcessDeclAttributeList(S, D, Attrs, /*IncludeCXX11Attributes=*/false);
+ ProcessDeclAttributeList(S, D, PD.getTypeObject(i).getAttrs(),
+ /*IncludeCXX11Attributes=*/false);
// Finally, apply any attributes on the decl itself.
- if (const AttributeList *Attrs = PD.getAttributes())
- ProcessDeclAttributeList(S, D, Attrs);
+ ProcessDeclAttributeList(S, D, PD.getAttributes());
// Apply additional attributes specified by '#pragma clang attribute'.
AddPragmaAttributes(S, D);
@@ -6817,14 +6845,14 @@ void Sema::ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD) {
/// Is the given declaration allowed to use a forbidden type?
/// If so, it'll still be annotated with an attribute that makes it
/// illegal to actually use.
-static bool isForbiddenTypeAllowed(Sema &S, Decl *decl,
+static bool isForbiddenTypeAllowed(Sema &S, Decl *D,
const DelayedDiagnostic &diag,
UnavailableAttr::ImplicitReason &reason) {
// Private ivars are always okay. Unfortunately, people don't
// always properly make their ivars private, even in system headers.
// Plus we need to make fields okay, too.
- if (!isa<FieldDecl>(decl) && !isa<ObjCPropertyDecl>(decl) &&
- !isa<FunctionDecl>(decl))
+ if (!isa<FieldDecl>(D) && !isa<ObjCPropertyDecl>(D) &&
+ !isa<FunctionDecl>(D))
return false;
// Silently accept unsupported uses of __weak in both user and system
@@ -6832,7 +6860,7 @@ static bool isForbiddenTypeAllowed(Sema &S, Decl *decl,
// -fno-objc-arc files. We do have to take some care against attempts
// to define such things; for now, we've only done that for ivars
// and properties.
- if ((isa<ObjCIvarDecl>(decl) || isa<ObjCPropertyDecl>(decl))) {
+ if ((isa<ObjCIvarDecl>(D) || isa<ObjCPropertyDecl>(D))) {
if (diag.getForbiddenTypeDiagnostic() == diag::err_arc_weak_disabled ||
diag.getForbiddenTypeDiagnostic() == diag::err_arc_weak_no_runtime) {
reason = UnavailableAttr::IR_ForbiddenWeak;
@@ -6841,7 +6869,7 @@ static bool isForbiddenTypeAllowed(Sema &S, Decl *decl,
}
// Allow all sorts of things in system headers.
- if (S.Context.getSourceManager().isInSystemHeader(decl->getLocation())) {
+ if (S.Context.getSourceManager().isInSystemHeader(D->getLocation())) {
// Currently, all the failures dealt with this way are due to ARC
// restrictions.
reason = UnavailableAttr::IR_ARCForbiddenType;
@@ -6852,30 +6880,29 @@ static bool isForbiddenTypeAllowed(Sema &S, Decl *decl,
}
/// Handle a delayed forbidden-type diagnostic.
-static void handleDelayedForbiddenType(Sema &S, DelayedDiagnostic &diag,
- Decl *decl) {
- auto reason = UnavailableAttr::IR_None;
- if (decl && isForbiddenTypeAllowed(S, decl, diag, reason)) {
- assert(reason && "didn't set reason?");
- decl->addAttr(UnavailableAttr::CreateImplicit(S.Context, "", reason,
- diag.Loc));
+static void handleDelayedForbiddenType(Sema &S, DelayedDiagnostic &DD,
+ Decl *D) {
+ auto Reason = UnavailableAttr::IR_None;
+ if (D && isForbiddenTypeAllowed(S, D, DD, Reason)) {
+ assert(Reason && "didn't set reason?");
+ D->addAttr(UnavailableAttr::CreateImplicit(S.Context, "", Reason, DD.Loc));
return;
}
if (S.getLangOpts().ObjCAutoRefCount)
- if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(decl)) {
+ if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
// FIXME: we may want to suppress diagnostics for all
- // kind of forbidden type messages on unavailable functions.
+ // kind of forbidden type messages on unavailable functions.
if (FD->hasAttr<UnavailableAttr>() &&
- diag.getForbiddenTypeDiagnostic() ==
- diag::err_arc_array_param_no_ownership) {
- diag.Triggered = true;
+ DD.getForbiddenTypeDiagnostic() ==
+ diag::err_arc_array_param_no_ownership) {
+ DD.Triggered = true;
return;
}
}
- S.Diag(diag.Loc, diag.getForbiddenTypeDiagnostic())
- << diag.getForbiddenTypeOperand() << diag.getForbiddenTypeArgument();
- diag.Triggered = true;
+ S.Diag(DD.Loc, DD.getForbiddenTypeDiagnostic())
+ << DD.getForbiddenTypeOperand() << DD.getForbiddenTypeArgument();
+ DD.Triggered = true;
}
static const AvailabilityAttr *getAttrForPlatform(ASTContext &Context,
@@ -6918,9 +6945,9 @@ ShouldDiagnoseAvailabilityOfDecl(const NamedDecl *D, std::string *Message) {
// For typedefs, if the typedef declaration appears available look
// to the underlying type to see if it is more restrictive.
- while (const TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(D)) {
+ while (const auto *TD = dyn_cast<TypedefNameDecl>(D)) {
if (Result == AR_Available) {
- if (const TagType *TT = TD->getUnderlyingType()->getAs<TagType>()) {
+ if (const auto *TT = TD->getUnderlyingType()->getAs<TagType>()) {
D = TT->getDecl();
Result = D->getAvailability(Message);
continue;
@@ -6930,7 +6957,7 @@ ShouldDiagnoseAvailabilityOfDecl(const NamedDecl *D, std::string *Message) {
}
// Forward class declarations get their attributes from their definition.
- if (const ObjCInterfaceDecl *IDecl = dyn_cast<ObjCInterfaceDecl>(D)) {
+ if (const auto *IDecl = dyn_cast<ObjCInterfaceDecl>(D)) {
if (IDecl->getDefinition()) {
D = IDecl->getDefinition();
Result = D->getAvailability(Message);
@@ -6950,7 +6977,7 @@ ShouldDiagnoseAvailabilityOfDecl(const NamedDecl *D, std::string *Message) {
}
-/// \brief whether we should emit a diagnostic for \c K and \c DeclVersion in
+/// whether we should emit a diagnostic for \c K and \c DeclVersion in
/// the context of \c Ctx. For example, we should emit an unavailable diagnostic
/// in a deprecated context, but not the other way around.
static bool ShouldDiagnoseAvailabilityInContext(Sema &S, AvailabilityResult K,
@@ -6973,40 +7000,24 @@ static bool ShouldDiagnoseAvailabilityInContext(Sema &S, AvailabilityResult K,
return false;
};
- // FIXME: This is a temporary workaround! Some existing Apple headers depends
- // on nested declarations in an @interface having the availability of the
- // interface when they really shouldn't: they are members of the enclosing
- // context, and can referenced from there.
- if (S.OriginalLexicalContext && cast<Decl>(S.OriginalLexicalContext) != Ctx) {
- auto *OrigCtx = cast<Decl>(S.OriginalLexicalContext);
- if (CheckContext(OrigCtx))
- return false;
-
- // An implementation implicitly has the availability of the interface.
- if (auto *CatOrImpl = dyn_cast<ObjCImplDecl>(OrigCtx)) {
- if (const ObjCInterfaceDecl *Interface = CatOrImpl->getClassInterface())
- if (CheckContext(Interface))
- return false;
- }
- // A category implicitly has the availability of the interface.
- else if (auto *CatD = dyn_cast<ObjCCategoryDecl>(OrigCtx))
- if (const ObjCInterfaceDecl *Interface = CatD->getClassInterface())
- if (CheckContext(Interface))
- return false;
- }
-
do {
if (CheckContext(Ctx))
return false;
// An implementation implicitly has the availability of the interface.
- if (auto *CatOrImpl = dyn_cast<ObjCImplDecl>(Ctx)) {
+ // Unless it is "+load" method.
+ if (const auto *MethodD = dyn_cast<ObjCMethodDecl>(Ctx))
+ if (MethodD->isClassMethod() &&
+ MethodD->getSelector().getAsString() == "load")
+ return true;
+
+ if (const auto *CatOrImpl = dyn_cast<ObjCImplDecl>(Ctx)) {
if (const ObjCInterfaceDecl *Interface = CatOrImpl->getClassInterface())
if (CheckContext(Interface))
return false;
}
// A category implicitly has the availability of the interface.
- else if (auto *CatD = dyn_cast<ObjCCategoryDecl>(Ctx))
+ else if (const auto *CatD = dyn_cast<ObjCCategoryDecl>(Ctx))
if (const ObjCInterfaceDecl *Interface = CatD->getClassInterface())
if (CheckContext(Interface))
return false;
@@ -7076,6 +7087,45 @@ struct AttributeInsertion {
} // end anonymous namespace
+/// Tries to parse a string as ObjC method name.
+///
+/// \param Name The string to parse. Expected to originate from availability
+/// attribute argument.
+/// \param SlotNames The vector that will be populated with slot names. In case
+/// of unsuccessful parsing can contain invalid data.
+/// \returns A number of method parameters if parsing was successful, None
+/// otherwise.
+static Optional<unsigned>
+tryParseObjCMethodName(StringRef Name, SmallVectorImpl<StringRef> &SlotNames,
+ const LangOptions &LangOpts) {
+ // Accept replacements starting with - or + as valid ObjC method names.
+ if (!Name.empty() && (Name.front() == '-' || Name.front() == '+'))
+ Name = Name.drop_front(1);
+ if (Name.empty())
+ return None;
+ Name.split(SlotNames, ':');
+ unsigned NumParams;
+ if (Name.back() == ':') {
+ // Remove an empty string at the end that doesn't represent any slot.
+ SlotNames.pop_back();
+ NumParams = SlotNames.size();
+ } else {
+ if (SlotNames.size() != 1)
+ // Not a valid method name, just a colon-separated string.
+ return None;
+ NumParams = 0;
+ }
+ // Verify all slot names are valid.
+ bool AllowDollar = LangOpts.DollarIdents;
+ for (StringRef S : SlotNames) {
+ if (S.empty())
+ continue;
+ if (!isValidIdentifier(S, AllowDollar))
+ return None;
+ }
+ return NumParams;
+}
+
/// Returns a source location in which it's appropriate to insert a new
/// attribute for the given declaration \D.
static Optional<AttributeInsertion>
@@ -7105,14 +7155,15 @@ createAttributeInsertion(const NamedDecl *D, const SourceManager &SM,
/// \param Ctx The context that the reference occurred in
/// \param ReferringDecl The exact declaration that was referenced.
/// \param OffendingDecl A related decl to \c ReferringDecl that has an
-/// availability attribute corrisponding to \c K attached to it. Note that this
+/// availability attribute corresponding to \c K attached to it. Note that this
/// may not be the same as ReferringDecl, i.e. if an EnumDecl is annotated and
/// we refer to a member EnumConstantDecl, ReferringDecl is the EnumConstantDecl
/// and OffendingDecl is the EnumDecl.
static void DoEmitAvailabilityWarning(Sema &S, AvailabilityResult K,
Decl *Ctx, const NamedDecl *ReferringDecl,
const NamedDecl *OffendingDecl,
- StringRef Message, SourceLocation Loc,
+ StringRef Message,
+ ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass,
const ObjCPropertyDecl *ObjCProperty,
bool ObjCPropertyAccess) {
@@ -7134,6 +7185,8 @@ static void DoEmitAvailabilityWarning(Sema &S, AvailabilityResult K,
if (!ShouldDiagnoseAvailabilityInContext(S, K, DeclVersion, Ctx))
return;
+ SourceLocation Loc = Locs.front();
+
// The declaration can have multiple availability attributes, we are looking
// at one of them.
const AvailabilityAttr *A = getAttrForPlatform(S.Context, OffendingDecl);
@@ -7177,7 +7230,7 @@ static void DoEmitAvailabilityWarning(Sema &S, AvailabilityResult K,
<< OffendingDecl << /* partial */ 3;
if (const auto *Enclosing = findEnclosingDeclToAnnotate(Ctx)) {
- if (auto *TD = dyn_cast<TagDecl>(Enclosing))
+ if (const auto *TD = dyn_cast<TagDecl>(Enclosing))
if (TD->getDeclName().isEmpty()) {
S.Diag(TD->getLocation(),
diag::note_decl_unguarded_availability_silence)
@@ -7218,8 +7271,8 @@ static void DoEmitAvailabilityWarning(Sema &S, AvailabilityResult K,
diag_fwdclass_message = diag::warn_deprecated_fwdclass_message;
property_note_select = /* deprecated */ 0;
available_here_select_kind = /* deprecated */ 2;
- if (const auto *Attr = OffendingDecl->getAttr<DeprecatedAttr>())
- NoteLocation = Attr->getLocation();
+ if (const auto *AL = OffendingDecl->getAttr<DeprecatedAttr>())
+ NoteLocation = AL->getLocation();
break;
case AR_Unavailable:
@@ -7230,8 +7283,8 @@ static void DoEmitAvailabilityWarning(Sema &S, AvailabilityResult K,
property_note_select = /* unavailable */ 1;
available_here_select_kind = /* unavailable */ 0;
- if (auto Attr = OffendingDecl->getAttr<UnavailableAttr>()) {
- if (Attr->isImplicit() && Attr->getImplicitReason()) {
+ if (auto AL = OffendingDecl->getAttr<UnavailableAttr>()) {
+ if (AL->isImplicit() && AL->getImplicitReason()) {
// Most of these failures are due to extra restrictions in ARC;
// reflect that in the primary diagnostic when applicable.
auto flagARCError = [&] {
@@ -7241,7 +7294,7 @@ static void DoEmitAvailabilityWarning(Sema &S, AvailabilityResult K,
diag = diag::err_unavailable_in_arc;
};
- switch (Attr->getImplicitReason()) {
+ switch (AL->getImplicitReason()) {
case UnavailableAttr::IR_None: break;
case UnavailableAttr::IR_ARCForbiddenType:
@@ -7279,37 +7332,55 @@ static void DoEmitAvailabilityWarning(Sema &S, AvailabilityResult K,
llvm_unreachable("Warning for availability of available declaration?");
}
- CharSourceRange UseRange;
- StringRef Replacement;
+ SmallVector<FixItHint, 12> FixIts;
if (K == AR_Deprecated) {
- if (auto Attr = OffendingDecl->getAttr<DeprecatedAttr>())
- Replacement = Attr->getReplacement();
- if (auto Attr = getAttrForPlatform(S.Context, OffendingDecl))
- Replacement = Attr->getReplacement();
+ StringRef Replacement;
+ if (auto AL = OffendingDecl->getAttr<DeprecatedAttr>())
+ Replacement = AL->getReplacement();
+ if (auto AL = getAttrForPlatform(S.Context, OffendingDecl))
+ Replacement = AL->getReplacement();
+ CharSourceRange UseRange;
if (!Replacement.empty())
UseRange =
CharSourceRange::getCharRange(Loc, S.getLocForEndOfToken(Loc));
+ if (UseRange.isValid()) {
+ if (const auto *MethodDecl = dyn_cast<ObjCMethodDecl>(ReferringDecl)) {
+ Selector Sel = MethodDecl->getSelector();
+ SmallVector<StringRef, 12> SelectorSlotNames;
+ Optional<unsigned> NumParams = tryParseObjCMethodName(
+ Replacement, SelectorSlotNames, S.getLangOpts());
+ if (NumParams && NumParams.getValue() == Sel.getNumArgs()) {
+ assert(SelectorSlotNames.size() == Locs.size());
+ for (unsigned I = 0; I < Locs.size(); ++I) {
+ if (!Sel.getNameForSlot(I).empty()) {
+ CharSourceRange NameRange = CharSourceRange::getCharRange(
+ Locs[I], S.getLocForEndOfToken(Locs[I]));
+ FixIts.push_back(FixItHint::CreateReplacement(
+ NameRange, SelectorSlotNames[I]));
+ } else
+ FixIts.push_back(
+ FixItHint::CreateInsertion(Locs[I], SelectorSlotNames[I]));
+ }
+ } else
+ FixIts.push_back(FixItHint::CreateReplacement(UseRange, Replacement));
+ } else
+ FixIts.push_back(FixItHint::CreateReplacement(UseRange, Replacement));
+ }
}
if (!Message.empty()) {
- S.Diag(Loc, diag_message) << ReferringDecl << Message
- << (UseRange.isValid() ?
- FixItHint::CreateReplacement(UseRange, Replacement) : FixItHint());
+ S.Diag(Loc, diag_message) << ReferringDecl << Message << FixIts;
if (ObjCProperty)
S.Diag(ObjCProperty->getLocation(), diag::note_property_attribute)
<< ObjCProperty->getDeclName() << property_note_select;
} else if (!UnknownObjCClass) {
- S.Diag(Loc, diag) << ReferringDecl
- << (UseRange.isValid() ?
- FixItHint::CreateReplacement(UseRange, Replacement) : FixItHint());
+ S.Diag(Loc, diag) << ReferringDecl << FixIts;
if (ObjCProperty)
S.Diag(ObjCProperty->getLocation(), diag::note_property_attribute)
<< ObjCProperty->getDeclName() << property_note_select;
} else {
- S.Diag(Loc, diag_fwdclass_message) << ReferringDecl
- << (UseRange.isValid() ?
- FixItHint::CreateReplacement(UseRange, Replacement) : FixItHint());
+ S.Diag(Loc, diag_fwdclass_message) << ReferringDecl << FixIts;
S.Diag(UnknownObjCClass->getLocation(), diag::note_forward_class);
}
@@ -7325,8 +7396,9 @@ static void handleDelayedAvailabilityCheck(Sema &S, DelayedDiagnostic &DD,
DD.Triggered = true;
DoEmitAvailabilityWarning(
S, DD.getAvailabilityResult(), Ctx, DD.getAvailabilityReferringDecl(),
- DD.getAvailabilityOffendingDecl(), DD.getAvailabilityMessage(), DD.Loc,
- DD.getUnknownObjCClass(), DD.getObjCProperty(), false);
+ DD.getAvailabilityOffendingDecl(), DD.getAvailabilityMessage(),
+ DD.getAvailabilitySelectorLocs(), DD.getUnknownObjCClass(),
+ DD.getObjCProperty(), false);
}
void Sema::PopParsingDeclaration(ParsingDeclState state, Decl *decl) {
@@ -7387,7 +7459,8 @@ void Sema::redelayDiagnostics(DelayedDiagnosticPool &pool) {
static void EmitAvailabilityWarning(Sema &S, AvailabilityResult AR,
const NamedDecl *ReferringDecl,
const NamedDecl *OffendingDecl,
- StringRef Message, SourceLocation Loc,
+ StringRef Message,
+ ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass,
const ObjCPropertyDecl *ObjCProperty,
bool ObjCPropertyAccess) {
@@ -7395,14 +7468,14 @@ static void EmitAvailabilityWarning(Sema &S, AvailabilityResult AR,
if (S.DelayedDiagnostics.shouldDelayDiagnostics()) {
S.DelayedDiagnostics.add(
DelayedDiagnostic::makeAvailability(
- AR, Loc, ReferringDecl, OffendingDecl, UnknownObjCClass,
+ AR, Locs, ReferringDecl, OffendingDecl, UnknownObjCClass,
ObjCProperty, Message, ObjCPropertyAccess));
return;
}
Decl *Ctx = cast<Decl>(S.getCurLexicalContext());
DoEmitAvailabilityWarning(S, AR, Ctx, ReferringDecl, OffendingDecl,
- Message, Loc, UnknownObjCClass, ObjCProperty,
+ Message, Locs, UnknownObjCClass, ObjCProperty,
ObjCPropertyAccess);
}
@@ -7471,7 +7544,7 @@ public:
}
};
-/// \brief This class implements -Wunguarded-availability.
+/// This class implements -Wunguarded-availability.
///
/// This is done with a traversal of the AST of a function that makes reference
/// to a partially available declaration. Whenever we encounter an \c if of the
@@ -7642,7 +7715,7 @@ void DiagnoseUnguardedAvailability::DiagnoseDeclAvailability(
SourceLocation StmtEndLoc =
SM.getExpansionRange(
(LastStmtOfUse ? LastStmtOfUse : StmtOfUse)->getLocEnd())
- .second;
+ .getEnd();
if (SM.getFileID(IfInsertionLoc) != SM.getFileID(StmtEndLoc))
return;
@@ -7681,11 +7754,11 @@ bool DiagnoseUnguardedAvailability::VisitTypeLoc(TypeLoc Ty) {
if (Range.isInvalid())
return true;
- if (const TagType *TT = dyn_cast<TagType>(TyPtr)) {
+ if (const auto *TT = dyn_cast<TagType>(TyPtr)) {
TagDecl *TD = TT->getDecl();
DiagnoseDeclAvailability(TD, Range);
- } else if (const TypedefType *TD = dyn_cast<TypedefType>(TyPtr)) {
+ } else if (const auto *TD = dyn_cast<TypedefType>(TyPtr)) {
TypedefNameDecl *D = TD->getDecl();
DiagnoseDeclAvailability(D, Range);
@@ -7740,7 +7813,8 @@ void Sema::DiagnoseUnguardedAvailabilityViolations(Decl *D) {
DiagnoseUnguardedAvailability(*this, D).IssueDiagnostics(Body);
}
-void Sema::DiagnoseAvailabilityOfDecl(NamedDecl *D, SourceLocation Loc,
+void Sema::DiagnoseAvailabilityOfDecl(NamedDecl *D,
+ ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass,
bool ObjCPropertyAccess,
bool AvoidPartialAvailabilityChecks) {
@@ -7769,7 +7843,7 @@ void Sema::DiagnoseAvailabilityOfDecl(NamedDecl *D, SourceLocation Loc,
}
const ObjCPropertyDecl *ObjCPDecl = nullptr;
- if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) {
+ if (const auto *MD = dyn_cast<ObjCMethodDecl>(D)) {
if (const ObjCPropertyDecl *PD = MD->findPropertyDecl()) {
AvailabilityResult PDeclResult = PD->getAvailability(nullptr);
if (PDeclResult == Result)
@@ -7777,6 +7851,6 @@ void Sema::DiagnoseAvailabilityOfDecl(NamedDecl *D, SourceLocation Loc,
}
}
- EmitAvailabilityWarning(*this, Result, D, OffendingDecl, Message, Loc,
+ EmitAvailabilityWarning(*this, Result, D, OffendingDecl, Message, Locs,
UnknownObjCClass, ObjCPDecl, ObjCPropertyAccess);
}
diff --git a/lib/Sema/SemaDeclCXX.cpp b/lib/Sema/SemaDeclCXX.cpp
index aa26b37f444d..4cf3abdf5745 100644
--- a/lib/Sema/SemaDeclCXX.cpp
+++ b/lib/Sema/SemaDeclCXX.cpp
@@ -17,6 +17,7 @@
#include "clang/AST/ASTMutationListener.h"
#include "clang/AST/CXXInheritance.h"
#include "clang/AST/CharUnits.h"
+#include "clang/AST/ComparisonCategories.h"
#include "clang/AST/EvaluatedExprVisitor.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/RecordLayout.h"
@@ -170,43 +171,40 @@ Sema::ImplicitExceptionSpecification::CalledDecl(SourceLocation CallLoc,
if (EST == EST_None && Method->hasAttr<NoThrowAttr>())
EST = EST_BasicNoexcept;
- switch(EST) {
+ switch (EST) {
+ case EST_Unparsed:
+ case EST_Uninstantiated:
+ case EST_Unevaluated:
+ llvm_unreachable("should not see unresolved exception specs here");
+
// If this function can throw any exceptions, make a note of that.
case EST_MSAny:
case EST_None:
+ // FIXME: Whichever we see last of MSAny and None determines our result.
+ // We should make a consistent, order-independent choice here.
ClearExceptions();
ComputedEST = EST;
return;
+ case EST_NoexceptFalse:
+ ClearExceptions();
+ ComputedEST = EST_None;
+ return;
// FIXME: If the call to this decl is using any of its default arguments, we
// need to search them for potentially-throwing calls.
// If this function has a basic noexcept, it doesn't affect the outcome.
case EST_BasicNoexcept:
+ case EST_NoexceptTrue:
return;
- // If we're still at noexcept(true) and there's a nothrow() callee,
+ // If we're still at noexcept(true) and there's a throw() callee,
// change to that specification.
case EST_DynamicNone:
if (ComputedEST == EST_BasicNoexcept)
ComputedEST = EST_DynamicNone;
return;
- // Check out noexcept specs.
- case EST_ComputedNoexcept:
- {
- FunctionProtoType::NoexceptResult NR =
- Proto->getNoexceptSpec(Self->Context);
- assert(NR != FunctionProtoType::NR_NoNoexcept &&
- "Must have noexcept result for EST_ComputedNoexcept.");
- assert(NR != FunctionProtoType::NR_Dependent &&
- "Should not generate implicit declarations for dependent cases, "
- "and don't know how to handle them anyway.");
- // noexcept(false) -> no spec on the new function
- if (NR == FunctionProtoType::NR_Throw) {
- ClearExceptions();
- ComputedEST = EST_None;
- }
- // noexcept(true) won't change anything either.
- return;
- }
- default:
+ case EST_DependentNoexcept:
+ llvm_unreachable(
+ "should not generate implicit declarations for dependent cases");
+ case EST_Dynamic:
break;
}
assert(EST == EST_Dynamic && "EST case not considered earlier.");
@@ -712,7 +710,7 @@ Sema::ActOnDecompositionDeclarator(Scope *S, Declarator &D,
Diag(Decomp.getLSquareLoc(),
!getLangOpts().CPlusPlus17
? diag::ext_decomp_decl
- : D.getContext() == Declarator::ConditionContext
+ : D.getContext() == DeclaratorContext::ConditionContext
? diag::ext_decomp_decl_cond
: diag::warn_cxx14_compat_decomp_decl)
<< Decomp.getSourceRange();
@@ -1459,7 +1457,7 @@ void Sema::CheckCompleteDecompositionDeclaration(DecompositionDecl *DD) {
DD->setInvalidDecl();
}
-/// \brief Merge the exception specifications of two variable declarations.
+/// Merge the exception specifications of two variable declarations.
///
/// This is called when there's a redeclaration of a VarDecl. The function
/// checks if the redeclaration might have an exception specification and
@@ -1575,7 +1573,7 @@ static bool CheckConstexprParameterTypes(Sema &SemaRef,
return true;
}
-/// \brief Get diagnostic %select index for tag kind for
+/// Get diagnostic %select index for tag kind for
/// record diagnostic message.
/// WARNING: Indexes apply to particular diagnostics only!
///
@@ -2061,27 +2059,39 @@ bool Sema::CheckConstexprFunctionBody(const FunctionDecl *Dcl, Stmt *Body) {
return true;
}
+/// Get the class that is directly named by the current context. This is the
+/// class for which an unqualified-id in this scope could name a constructor
+/// or destructor.
+///
+/// If the scope specifier denotes a class, this will be that class.
+/// If the scope specifier is empty, this will be the class whose
+/// member-specification we are currently within. Otherwise, there
+/// is no such class.
+CXXRecordDecl *Sema::getCurrentClass(Scope *, const CXXScopeSpec *SS) {
+ assert(getLangOpts().CPlusPlus && "No class names in C!");
+
+ if (SS && SS->isInvalid())
+ return nullptr;
+
+ if (SS && SS->isNotEmpty()) {
+ DeclContext *DC = computeDeclContext(*SS, true);
+ return dyn_cast_or_null<CXXRecordDecl>(DC);
+ }
+
+ return dyn_cast_or_null<CXXRecordDecl>(CurContext);
+}
+
/// isCurrentClassName - Determine whether the identifier II is the
/// name of the class type currently being defined. In the case of
/// nested classes, this will only return true if II is the name of
/// the innermost class.
-bool Sema::isCurrentClassName(const IdentifierInfo &II, Scope *,
+bool Sema::isCurrentClassName(const IdentifierInfo &II, Scope *S,
const CXXScopeSpec *SS) {
- assert(getLangOpts().CPlusPlus && "No class names in C!");
-
- CXXRecordDecl *CurDecl;
- if (SS && SS->isSet() && !SS->isInvalid()) {
- DeclContext *DC = computeDeclContext(*SS, true);
- CurDecl = dyn_cast_or_null<CXXRecordDecl>(DC);
- } else
- CurDecl = dyn_cast_or_null<CXXRecordDecl>(CurContext);
-
- if (CurDecl && CurDecl->getIdentifier())
- return &II == CurDecl->getIdentifier();
- return false;
+ CXXRecordDecl *CurDecl = getCurrentClass(S, SS);
+ return CurDecl && &II == CurDecl->getIdentifier();
}
-/// \brief Determine whether the identifier II is a typo for the name of
+/// Determine whether the identifier II is a typo for the name of
/// the class type currently being defined. If so, update it to the identifier
/// that should have been used.
bool Sema::isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS) {
@@ -2107,7 +2117,7 @@ bool Sema::isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS) {
return false;
}
-/// \brief Determine whether the given class is a base class of the given
+/// Determine whether the given class is a base class of the given
/// class, including looking at dependent bases.
static bool findCircularInheritance(const CXXRecordDecl *Class,
const CXXRecordDecl *Current) {
@@ -2139,7 +2149,7 @@ static bool findCircularInheritance(const CXXRecordDecl *Class,
return false;
}
-/// \brief Check the validity of a C++ base class specifier.
+/// Check the validity of a C++ base class specifier.
///
/// \returns a new CXXBaseSpecifier if well-formed, emits diagnostics
/// and returns NULL otherwise.
@@ -2233,6 +2243,19 @@ Sema::CheckBaseSpecifier(CXXRecordDecl *Class,
CXXRecordDecl *CXXBaseDecl = cast<CXXRecordDecl>(BaseDecl);
assert(CXXBaseDecl && "Base type is not a C++ type");
+ // Microsoft docs say:
+ // "If a base-class has a code_seg attribute, derived classes must have the
+ // same attribute."
+ const auto *BaseCSA = CXXBaseDecl->getAttr<CodeSegAttr>();
+ const auto *DerivedCSA = Class->getAttr<CodeSegAttr>();
+ if ((DerivedCSA || BaseCSA) &&
+ (!BaseCSA || !DerivedCSA || BaseCSA->getName() != DerivedCSA->getName())) {
+ Diag(Class->getLocation(), diag::err_mismatched_code_seg_base);
+ Diag(CXXBaseDecl->getLocation(), diag::note_base_class_specified_here)
+ << CXXBaseDecl;
+ return nullptr;
+ }
+
// A class which contains a flexible array member is not suitable for use as a
// base class:
// - If the layout determines that a base comes before another base,
@@ -2290,18 +2313,13 @@ Sema::ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange,
// We do not support any C++11 attributes on base-specifiers yet.
// Diagnose any attributes we see.
- if (!Attributes.empty()) {
- for (AttributeList *Attr = Attributes.getList(); Attr;
- Attr = Attr->getNext()) {
- if (Attr->isInvalid() ||
- Attr->getKind() == AttributeList::IgnoredAttribute)
- continue;
- Diag(Attr->getLoc(),
- Attr->getKind() == AttributeList::UnknownAttribute
- ? diag::warn_unknown_attribute_ignored
- : diag::err_base_specifier_attribute)
- << Attr->getName();
- }
+ for (const ParsedAttr &AL : Attributes) {
+ if (AL.isInvalid() || AL.getKind() == ParsedAttr::IgnoredAttribute)
+ continue;
+ Diag(AL.getLoc(), AL.getKind() == ParsedAttr::UnknownAttribute
+ ? diag::warn_unknown_attribute_ignored
+ : diag::err_base_specifier_attribute)
+ << AL.getName();
}
TypeSourceInfo *TInfo = nullptr;
@@ -2326,7 +2344,7 @@ Sema::ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange,
/// locally, there's no need to abstract the small size parameter.
typedef llvm::SmallPtrSet<QualType, 4> IndirectBaseSet;
-/// \brief Recursively add the bases of Type. Don't add Type itself.
+/// Recursively add the bases of Type. Don't add Type itself.
static void
NoteIndirectBases(ASTContext &Context, IndirectBaseSet &Set,
const QualType &Type)
@@ -2347,7 +2365,7 @@ NoteIndirectBases(ASTContext &Context, IndirectBaseSet &Set,
}
}
-/// \brief Performs the actual work of attaching the given base class
+/// Performs the actual work of attaching the given base class
/// specifiers to a C++ class.
bool Sema::AttachBaseSpecifiers(CXXRecordDecl *Class,
MutableArrayRef<CXXBaseSpecifier *> Bases) {
@@ -2404,7 +2422,7 @@ bool Sema::AttachBaseSpecifiers(CXXRecordDecl *Class,
// The Microsoft extension __interface does not permit bases that
// are not themselves public interfaces.
Diag(KnownBase->getLocStart(), diag::err_invalid_base_in_interface)
- << getRecordDiagFromTagKind(RD->getTagKind()) << RD->getName()
+ << getRecordDiagFromTagKind(RD->getTagKind()) << RD
<< RD->getSourceRange();
Invalid = true;
}
@@ -2417,9 +2435,16 @@ bool Sema::AttachBaseSpecifiers(CXXRecordDecl *Class,
// Attach the remaining base class specifiers to the derived class.
Class->setBases(Bases.data(), NumGoodBases);
+ // Check that the only base classes that are duplicate are virtual.
for (unsigned idx = 0; idx < NumGoodBases; ++idx) {
// Check whether this direct base is inaccessible due to ambiguity.
QualType BaseType = Bases[idx]->getType();
+
+ // Skip all dependent types in templates being used as base specifiers.
+ // Checks below assume that the base specifier is a CXXRecord.
+ if (BaseType->isDependentType())
+ continue;
+
CanQualType CanonicalBase = Context.getCanonicalType(BaseType)
.getUnqualifiedType();
@@ -2459,7 +2484,7 @@ void Sema::ActOnBaseSpecifiers(Decl *ClassDecl,
AttachBaseSpecifiers(cast<CXXRecordDecl>(ClassDecl), Bases);
}
-/// \brief Determine whether the type \p Derived is a C++ class that is
+/// Determine whether the type \p Derived is a C++ class that is
/// derived from the type \p Base.
bool Sema::IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base) {
if (!getLangOpts().CPlusPlus)
@@ -2486,7 +2511,7 @@ bool Sema::IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base) {
return DerivedRD->isDerivedFrom(BaseRD);
}
-/// \brief Determine whether the type \p Derived is a C++ class that is
+/// Determine whether the type \p Derived is a C++ class that is
/// derived from the type \p Base.
bool Sema::IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base,
CXXBasePaths &Paths) {
@@ -2638,7 +2663,7 @@ Sema::CheckDerivedToBaseConversion(QualType Derived, QualType Base,
}
-/// @brief Builds a string representing ambiguous paths from a
+/// Builds a string representing ambiguous paths from a
/// specific derived class to different subobjects of the same base
/// class.
///
@@ -2674,10 +2699,9 @@ std::string Sema::getAmbiguousPathsDisplayString(CXXBasePaths &Paths) {
//===----------------------------------------------------------------------===//
/// ActOnAccessSpecifier - Parsed an access specifier followed by a colon.
-bool Sema::ActOnAccessSpecifier(AccessSpecifier Access,
- SourceLocation ASLoc,
+bool Sema::ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc,
SourceLocation ColonLoc,
- AttributeList *Attrs) {
+ const ParsedAttributesView &Attrs) {
assert(Access != AS_none && "Invalid kind for syntactic access specifier!");
AccessSpecDecl *ASDecl = AccessSpecDecl::Create(Context, Access, CurContext,
ASLoc, ColonLoc);
@@ -2764,7 +2788,7 @@ void Sema::DiagnoseAbsenceOfOverrideControl(NamedDecl *D) {
SourceLocation Loc = MD->getLocation();
SourceLocation SpellingLoc = Loc;
if (getSourceManager().isMacroArgExpansion(Loc))
- SpellingLoc = getSourceManager().getImmediateExpansionRange(Loc).first;
+ SpellingLoc = getSourceManager().getImmediateExpansionRange(Loc).getBegin();
SpellingLoc = getSourceManager().getSpellingLoc(SpellingLoc);
if (SpellingLoc.isValid() && getSourceManager().isInSystemHeader(SpellingLoc))
return;
@@ -2805,10 +2829,13 @@ static bool InitializationHasSideEffects(const FieldDecl &FD) {
return false;
}
-static AttributeList *getMSPropertyAttr(AttributeList *list) {
- for (AttributeList *it = list; it != nullptr; it = it->getNext())
- if (it->isDeclspecPropertyAttribute())
- return it;
+static const ParsedAttr *getMSPropertyAttr(const ParsedAttributesView &list) {
+ ParsedAttributesView::const_iterator Itr =
+ llvm::find_if(list, [](const ParsedAttr &AL) {
+ return AL.isDeclspecPropertyAttribute();
+ });
+ if (Itr != list.end())
+ return &*Itr;
return nullptr;
}
@@ -2855,7 +2882,7 @@ void Sema::CheckShadowInheritedFields(const SourceLocation &Loc,
if (AS_none !=
CXXRecordDecl::MergeAccess(P.Access, BaseField->getAccess())) {
Diag(Loc, diag::warn_shadow_field)
- << FieldName.getAsString() << RD->getName() << Base->getName();
+ << FieldName << RD << Base;
Diag(BaseField->getLocation(), diag::note_shadow_field);
Bases.erase(It);
}
@@ -2887,8 +2914,8 @@ Sema::ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D,
assert(!DS.isFriendSpecified());
bool isFunc = D.isDeclarationOfFunction();
- AttributeList *MSPropertyAttr =
- getMSPropertyAttr(D.getDeclSpec().getAttributes().getList());
+ const ParsedAttr *MSPropertyAttr =
+ getMSPropertyAttr(D.getDeclSpec().getAttributes());
if (cast<CXXRecordDecl>(CurContext)->isInterface()) {
// The Microsoft extension __interface only permits public member functions
@@ -3044,7 +3071,9 @@ Sema::ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D,
// int X::member;
// };
if (DeclContext *DC = computeDeclContext(SS, false))
- diagnoseQualifiedDeclaration(SS, DC, Name, D.getIdentifierLoc());
+ diagnoseQualifiedDeclaration(SS, DC, Name, D.getIdentifierLoc(),
+ D.getName().getKind() ==
+ UnqualifiedIdKind::IK_TemplateId);
else
Diag(D.getIdentifierLoc(), diag::err_member_qualification)
<< Name << SS.getRange();
@@ -3054,7 +3083,7 @@ Sema::ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D,
if (MSPropertyAttr) {
Member = HandleMSProperty(S, cast<CXXRecordDecl>(CurContext), Loc, D,
- BitWidth, InitStyle, AS, MSPropertyAttr);
+ BitWidth, InitStyle, AS, *MSPropertyAttr);
if (!Member)
return nullptr;
isInstField = false;
@@ -3096,14 +3125,38 @@ Sema::ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D,
Member->setInvalidDecl();
}
+ NamedDecl *NonTemplateMember = Member;
+ if (FunctionTemplateDecl *FunTmpl = dyn_cast<FunctionTemplateDecl>(Member))
+ NonTemplateMember = FunTmpl->getTemplatedDecl();
+ else if (VarTemplateDecl *VarTmpl = dyn_cast<VarTemplateDecl>(Member))
+ NonTemplateMember = VarTmpl->getTemplatedDecl();
+
Member->setAccess(AS);
// If we have declared a member function template or static data member
// template, set the access of the templated declaration as well.
- if (FunctionTemplateDecl *FunTmpl = dyn_cast<FunctionTemplateDecl>(Member))
- FunTmpl->getTemplatedDecl()->setAccess(AS);
- else if (VarTemplateDecl *VarTmpl = dyn_cast<VarTemplateDecl>(Member))
- VarTmpl->getTemplatedDecl()->setAccess(AS);
+ if (NonTemplateMember != Member)
+ NonTemplateMember->setAccess(AS);
+
+ // C++ [temp.deduct.guide]p3:
+ // A deduction guide [...] for a member class template [shall be
+ // declared] with the same access [as the template].
+ if (auto *DG = dyn_cast<CXXDeductionGuideDecl>(NonTemplateMember)) {
+ auto *TD = DG->getDeducedTemplate();
+ if (AS != TD->getAccess()) {
+ Diag(DG->getLocStart(), diag::err_deduction_guide_wrong_access);
+ Diag(TD->getLocStart(), diag::note_deduction_guide_template_access)
+ << TD->getAccess();
+ const AccessSpecDecl *LastAccessSpec = nullptr;
+ for (const auto *D : cast<CXXRecordDecl>(CurContext)->decls()) {
+ if (const auto *AccessSpec = dyn_cast<AccessSpecDecl>(D))
+ LastAccessSpec = AccessSpec;
+ }
+ assert(LastAccessSpec && "differing access with no access specifier");
+ Diag(LastAccessSpec->getLocStart(), diag::note_deduction_guide_access)
+ << AS;
+ }
+ }
}
if (VS.isOverrideSpecified())
@@ -3544,7 +3597,7 @@ namespace {
}
} // namespace
-/// \brief Enter a new C++ default initializer scope. After calling this, the
+/// Enter a new C++ default initializer scope. After calling this, the
/// caller must call \ref ActOnFinishCXXInClassMemberInitializer, even if
/// parsing or instantiating the initializer failed.
void Sema::ActOnStartCXXInClassMemberInitializer() {
@@ -3553,7 +3606,7 @@ void Sema::ActOnStartCXXInClassMemberInitializer() {
PushFunctionScope();
}
-/// \brief This is invoked after parsing an in-class initializer for a
+/// This is invoked after parsing an in-class initializer for a
/// non-static C++ class member, and after instantiating an in-class initializer
/// in a class template. Such actions are deferred until the class is complete.
void Sema::ActOnFinishCXXInClassMemberInitializer(Decl *D,
@@ -3581,10 +3634,14 @@ void Sema::ActOnFinishCXXInClassMemberInitializer(Decl *D,
ExprResult Init = InitExpr;
if (!FD->getType()->isDependentType() && !InitExpr->isTypeDependent()) {
- InitializedEntity Entity = InitializedEntity::InitializeMember(FD);
- InitializationKind Kind = FD->getInClassInitStyle() == ICIS_ListInit
- ? InitializationKind::CreateDirectList(InitExpr->getLocStart())
- : InitializationKind::CreateCopy(InitExpr->getLocStart(), InitLoc);
+ InitializedEntity Entity =
+ InitializedEntity::InitializeMemberFromDefaultMemberInitializer(FD);
+ InitializationKind Kind =
+ FD->getInClassInitStyle() == ICIS_ListInit
+ ? InitializationKind::CreateDirectList(InitExpr->getLocStart(),
+ InitExpr->getLocStart(),
+ InitExpr->getLocEnd())
+ : InitializationKind::CreateCopy(InitExpr->getLocStart(), InitLoc);
InitializationSequence Seq(*this, Entity, Kind, InitExpr);
Init = Seq.Perform(*this, Entity, Kind, InitExpr);
if (Init.isInvalid()) {
@@ -3607,7 +3664,7 @@ void Sema::ActOnFinishCXXInClassMemberInitializer(Decl *D,
FD->setInClassInitializer(InitExpr);
}
-/// \brief Find the direct and/or virtual base specifiers that
+/// Find the direct and/or virtual base specifiers that
/// correspond to the given base type, for use in base initialization
/// within a constructor.
static bool FindBaseInitializer(Sema &SemaRef,
@@ -3651,7 +3708,7 @@ static bool FindBaseInitializer(Sema &SemaRef,
return DirectBaseSpec || VirtualBaseSpec;
}
-/// \brief Handle a C++ member initializer using braced-init-list syntax.
+/// Handle a C++ member initializer using braced-init-list syntax.
MemInitResult
Sema::ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
@@ -3667,7 +3724,7 @@ Sema::ActOnMemInitializer(Decl *ConstructorD,
EllipsisLoc);
}
-/// \brief Handle a C++ member initializer using parentheses syntax.
+/// Handle a C++ member initializer using parentheses syntax.
MemInitResult
Sema::ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
@@ -3710,7 +3767,7 @@ private:
}
-/// \brief Handle a C++ member initializer.
+/// Handle a C++ member initializer.
MemInitResult
Sema::BuildMemInitializer(Decl *ConstructorD,
Scope *S,
@@ -3889,53 +3946,6 @@ Sema::BuildMemInitializer(Decl *ConstructorD,
return BuildBaseInitializer(BaseType, TInfo, Init, ClassDecl, EllipsisLoc);
}
-/// Checks a member initializer expression for cases where reference (or
-/// pointer) members are bound to by-value parameters (or their addresses).
-static void CheckForDanglingReferenceOrPointer(Sema &S, ValueDecl *Member,
- Expr *Init,
- SourceLocation IdLoc) {
- QualType MemberTy = Member->getType();
-
- // We only handle pointers and references currently.
- // FIXME: Would this be relevant for ObjC object pointers? Or block pointers?
- if (!MemberTy->isReferenceType() && !MemberTy->isPointerType())
- return;
-
- const bool IsPointer = MemberTy->isPointerType();
- if (IsPointer) {
- if (const UnaryOperator *Op
- = dyn_cast<UnaryOperator>(Init->IgnoreParenImpCasts())) {
- // The only case we're worried about with pointers requires taking the
- // address.
- if (Op->getOpcode() != UO_AddrOf)
- return;
-
- Init = Op->getSubExpr();
- } else {
- // We only handle address-of expression initializers for pointers.
- return;
- }
- }
-
- if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Init->IgnoreParens())) {
- // We only warn when referring to a non-reference parameter declaration.
- const ParmVarDecl *Parameter = dyn_cast<ParmVarDecl>(DRE->getDecl());
- if (!Parameter || Parameter->getType()->isReferenceType())
- return;
-
- S.Diag(Init->getExprLoc(),
- IsPointer ? diag::warn_init_ptr_member_to_parameter_addr
- : diag::warn_bind_ref_member_to_parameter)
- << Member << Parameter << Init->getSourceRange();
- } else {
- // Other initializers are fine.
- return;
- }
-
- S.Diag(Member->getLocation(), diag::note_ref_or_ptr_member_declared_here)
- << (unsigned)IsPointer;
-}
-
MemInitResult
Sema::BuildMemberInitializer(ValueDecl *Member, Expr *Init,
SourceLocation IdLoc) {
@@ -3979,9 +3989,10 @@ Sema::BuildMemberInitializer(ValueDecl *Member, Expr *Init,
: InitializedEntity::InitializeMember(IndirectMember,
nullptr);
InitializationKind Kind =
- InitList ? InitializationKind::CreateDirectList(IdLoc)
- : InitializationKind::CreateDirect(IdLoc, InitRange.getBegin(),
- InitRange.getEnd());
+ InitList ? InitializationKind::CreateDirectList(
+ IdLoc, Init->getLocStart(), Init->getLocEnd())
+ : InitializationKind::CreateDirect(IdLoc, InitRange.getBegin(),
+ InitRange.getEnd());
InitializationSequence InitSeq(*this, MemberEntity, Kind, Args);
ExprResult MemberInit = InitSeq.Perform(*this, MemberEntity, Kind, Args,
@@ -3989,8 +4000,6 @@ Sema::BuildMemberInitializer(ValueDecl *Member, Expr *Init,
if (MemberInit.isInvalid())
return true;
- CheckForDanglingReferenceOrPointer(*this, Member, MemberInit.get(), IdLoc);
-
// C++11 [class.base.init]p7:
// The initialization of each base and member constitutes a
// full-expression.
@@ -4033,9 +4042,10 @@ Sema::BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init,
InitializedEntity DelegationEntity = InitializedEntity::InitializeDelegation(
QualType(ClassDecl->getTypeForDecl(), 0));
InitializationKind Kind =
- InitList ? InitializationKind::CreateDirectList(NameLoc)
- : InitializationKind::CreateDirect(NameLoc, InitRange.getBegin(),
- InitRange.getEnd());
+ InitList ? InitializationKind::CreateDirectList(
+ NameLoc, Init->getLocStart(), Init->getLocEnd())
+ : InitializationKind::CreateDirect(NameLoc, InitRange.getBegin(),
+ InitRange.getEnd());
InitializationSequence InitSeq(*this, DelegationEntity, Kind, Args);
ExprResult DelegationInit = InitSeq.Perform(*this, DelegationEntity, Kind,
Args, nullptr);
@@ -4167,9 +4177,9 @@ Sema::BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo,
InitializedEntity BaseEntity =
InitializedEntity::InitializeBase(Context, BaseSpec, VirtualBaseSpec);
InitializationKind Kind =
- InitList ? InitializationKind::CreateDirectList(BaseLoc)
- : InitializationKind::CreateDirect(BaseLoc, InitRange.getBegin(),
- InitRange.getEnd());
+ InitList ? InitializationKind::CreateDirectList(BaseLoc)
+ : InitializationKind::CreateDirect(BaseLoc, InitRange.getBegin(),
+ InitRange.getEnd());
InitializationSequence InitSeq(*this, BaseEntity, Kind, Args);
ExprResult BaseInit = InitSeq.Perform(*this, BaseEntity, Kind, Args, nullptr);
if (BaseInit.isInvalid())
@@ -4321,7 +4331,7 @@ BuildImplicitMemberInitializer(Sema &SemaRef, CXXConstructorDecl *Constructor,
QualType ParamType = Param->getType().getNonReferenceType();
// Suppress copying zero-width bitfields.
- if (Field->isBitField() && Field->getBitWidthValue(SemaRef.Context) == 0)
+ if (Field->isZeroLengthBitField(SemaRef.Context))
return false;
Expr *MemberExprBase =
@@ -4536,7 +4546,7 @@ struct BaseAndFieldInfo {
return !FieldRD->hasInClassInitializer();
}
- /// \brief Determine whether the given field is, or is within, a union member
+ /// Determine whether the given field is, or is within, a union member
/// that is inactive (because there was an initializer given for a different
/// member of the union, or because the union was not initialized at all).
bool isWithinInactiveUnionMember(FieldDecl *Field,
@@ -4554,7 +4564,7 @@ struct BaseAndFieldInfo {
};
}
-/// \brief Determine whether the given type is an incomplete or zero-lenfgth
+/// Determine whether the given type is an incomplete or zero-lenfgth
/// array type.
static bool isIncompleteOrZeroLengthArrayType(ASTContext &Context, QualType T) {
if (T->isIncompleteArrayType())
@@ -4601,6 +4611,10 @@ static bool CollectFieldInitializer(Sema &SemaRef, BaseAndFieldInfo &Info,
SemaRef.BuildCXXDefaultInitExpr(Info.Ctor->getLocation(), Field);
if (DIE.isInvalid())
return true;
+
+ auto Entity = InitializedEntity::InitializeMember(Field, nullptr, true);
+ SemaRef.checkInitializerLifetime(Entity, DIE.get());
+
CXXCtorInitializer *Init;
if (Indirect)
Init = new (SemaRef.Context)
@@ -5464,7 +5478,7 @@ static void CheckAbstractClassUsage(AbstractUsageInfo &Info,
}
}
-static void ReferenceDllExportedMethods(Sema &S, CXXRecordDecl *Class) {
+static void ReferenceDllExportedMembers(Sema &S, CXXRecordDecl *Class) {
Attr *ClassAttr = getDLLAttr(Class);
if (!ClassAttr)
return;
@@ -5479,6 +5493,14 @@ static void ReferenceDllExportedMethods(Sema &S, CXXRecordDecl *Class) {
return;
for (Decl *Member : Class->decls()) {
+ // Defined static variables that are members of an exported base
+ // class must be marked export too.
+ auto *VD = dyn_cast<VarDecl>(Member);
+ if (VD && Member->getAttr<DLLExportAttr>() &&
+ VD->getStorageClass() == SC_Static &&
+ TSK == TSK_ImplicitInstantiation)
+ S.MarkVariableReferenced(VD->getLocation(), VD);
+
auto *MD = dyn_cast<CXXMethodDecl>(Member);
if (!MD)
continue;
@@ -5506,7 +5528,7 @@ static void ReferenceDllExportedMethods(Sema &S, CXXRecordDecl *Class) {
S.MarkFunctionReferenced(Class->getLocation(), MD);
if (Trap.hasErrorOccurred()) {
S.Diag(ClassAttr->getLocation(), diag::note_due_to_dllexported_class)
- << Class->getName() << !S.getLangOpts().CPlusPlus11;
+ << Class << !S.getLangOpts().CPlusPlus11;
break;
}
@@ -5556,7 +5578,17 @@ static void checkForMultipleExportedDefaultConstructors(Sema &S,
}
}
-/// \brief Check class-level dllimport/dllexport attribute.
+void Sema::checkClassLevelCodeSegAttribute(CXXRecordDecl *Class) {
+ // Mark any compiler-generated routines with the implicit code_seg attribute.
+ for (auto *Method : Class->methods()) {
+ if (Method->isUserProvided())
+ continue;
+ if (Attr *A = getImplicitCodeSegOrSectionAttrForFunction(Method, /*IsDefinition=*/true))
+ Method->addAttr(A);
+ }
+}
+
+/// Check class-level dllimport/dllexport attribute.
void Sema::checkClassLevelDLLAttribute(CXXRecordDecl *Class) {
Attr *ClassAttr = getDLLAttr(Class);
@@ -5606,6 +5638,13 @@ void Sema::checkClassLevelDLLAttribute(CXXRecordDecl *Class) {
// The class is either imported or exported.
const bool ClassExported = ClassAttr->getKind() == attr::DLLExport;
+ // Check if this was a dllimport attribute propagated from a derived class to
+ // a base class template specialization. We don't apply these attributes to
+ // static data members.
+ const bool PropagatedImport =
+ !ClassExported &&
+ cast<DLLImportAttr>(ClassAttr)->wasPropagatedToBaseTemplate();
+
TemplateSpecializationKind TSK = Class->getTemplateSpecializationKind();
// Ignore explicit dllexport on explicit class template instantiation declarations.
@@ -5657,6 +5696,11 @@ void Sema::checkClassLevelDLLAttribute(CXXRecordDecl *Class) {
}
}
+ // Don't apply dllimport attributes to static data members of class template
+ // instantiations when the attribute is propagated from a derived class.
+ if (VD && PropagatedImport)
+ continue;
+
if (!cast<NamedDecl>(Member)->isExternallyVisible())
continue;
@@ -5665,6 +5709,21 @@ void Sema::checkClassLevelDLLAttribute(CXXRecordDecl *Class) {
cast<InheritableAttr>(ClassAttr->clone(getASTContext()));
NewAttr->setInherited(true);
Member->addAttr(NewAttr);
+
+ if (MD) {
+ // Propagate DLLAttr to friend re-declarations of MD that have already
+ // been constructed.
+ for (FunctionDecl *FD = MD->getMostRecentDecl(); FD;
+ FD = FD->getPreviousDecl()) {
+ if (FD->getFriendObjectKind() == Decl::FOK_None)
+ continue;
+ assert(!getDLLAttr(FD) &&
+ "friend re-decl should not already have a DLLAttr");
+ NewAttr = cast<InheritableAttr>(ClassAttr->clone(getASTContext()));
+ NewAttr->setInherited(true);
+ FD->addAttr(NewAttr);
+ }
+ }
}
}
@@ -5672,7 +5731,7 @@ void Sema::checkClassLevelDLLAttribute(CXXRecordDecl *Class) {
DelayedDllExportClasses.push_back(Class);
}
-/// \brief Perform propagation of DLL attributes from a derived class to a
+/// Perform propagation of DLL attributes from a derived class to a
/// templated base class for MS compatibility.
void Sema::propagateDLLAttrToBaseClassTemplate(
CXXRecordDecl *Class, Attr *ClassAttr,
@@ -5694,6 +5753,11 @@ void Sema::propagateDLLAttrToBaseClassTemplate(
NewAttr->setInherited(true);
BaseTemplateSpec->addAttr(NewAttr);
+ // If this was an import, mark that we propagated it from a derived class to
+ // a base class template specialization.
+ if (auto *ImportAttr = dyn_cast<DLLImportAttr>(NewAttr))
+ ImportAttr->setPropagatedToBaseTemplate();
+
// If the template is already instantiated, checkDLLAttributeRedeclaration()
// needs to be run again to work see the new attribute. Otherwise this will
// get run whenever the template is instantiated.
@@ -5756,10 +5820,74 @@ static void DefineImplicitSpecialMember(Sema &S, CXXMethodDecl *MD,
/// Determine whether a type is permitted to be passed or returned in
/// registers, per C++ [class.temporary]p3.
-static bool computeCanPassInRegisters(Sema &S, CXXRecordDecl *D) {
+static bool canPassInRegisters(Sema &S, CXXRecordDecl *D,
+ TargetInfo::CallingConvKind CCK) {
if (D->isDependentType() || D->isInvalidDecl())
return false;
+ // Clang <= 4 used the pre-C++11 rule, which ignores move operations.
+ // The PS4 platform ABI follows the behavior of Clang 3.2.
+ if (CCK == TargetInfo::CCK_ClangABI4OrPS4)
+ return !D->hasNonTrivialDestructorForCall() &&
+ !D->hasNonTrivialCopyConstructorForCall();
+
+ if (CCK == TargetInfo::CCK_MicrosoftWin64) {
+ bool CopyCtorIsTrivial = false, CopyCtorIsTrivialForCall = false;
+ bool DtorIsTrivialForCall = false;
+
+ // If a class has at least one non-deleted, trivial copy constructor, it
+ // is passed according to the C ABI. Otherwise, it is passed indirectly.
+ //
+ // Note: This permits classes with non-trivial copy or move ctors to be
+ // passed in registers, so long as they *also* have a trivial copy ctor,
+ // which is non-conforming.
+ if (D->needsImplicitCopyConstructor()) {
+ if (!D->defaultedCopyConstructorIsDeleted()) {
+ if (D->hasTrivialCopyConstructor())
+ CopyCtorIsTrivial = true;
+ if (D->hasTrivialCopyConstructorForCall())
+ CopyCtorIsTrivialForCall = true;
+ }
+ } else {
+ for (const CXXConstructorDecl *CD : D->ctors()) {
+ if (CD->isCopyConstructor() && !CD->isDeleted()) {
+ if (CD->isTrivial())
+ CopyCtorIsTrivial = true;
+ if (CD->isTrivialForCall())
+ CopyCtorIsTrivialForCall = true;
+ }
+ }
+ }
+
+ if (D->needsImplicitDestructor()) {
+ if (!D->defaultedDestructorIsDeleted() &&
+ D->hasTrivialDestructorForCall())
+ DtorIsTrivialForCall = true;
+ } else if (const auto *DD = D->getDestructor()) {
+ if (!DD->isDeleted() && DD->isTrivialForCall())
+ DtorIsTrivialForCall = true;
+ }
+
+ // If the copy ctor and dtor are both trivial-for-calls, pass direct.
+ if (CopyCtorIsTrivialForCall && DtorIsTrivialForCall)
+ return true;
+
+ // If a class has a destructor, we'd really like to pass it indirectly
+ // because it allows us to elide copies. Unfortunately, MSVC makes that
+ // impossible for small types, which it will pass in a single register or
+ // stack slot. Most objects with dtors are large-ish, so handle that early.
+ // We can't call out all large objects as being indirect because there are
+ // multiple x64 calling conventions and the C++ ABI code shouldn't dictate
+ // how we pass large POD types.
+
+ // Note: This permits small classes with nontrivial destructors to be
+ // passed in registers, which is non-conforming.
+ if (CopyCtorIsTrivial &&
+ S.getASTContext().getTypeSize(D->getTypeForDecl()) <= 64)
+ return true;
+ return false;
+ }
+
// Per C++ [class.temporary]p3, the relevant condition is:
// each copy constructor, move constructor, and destructor of X is
// either trivial or deleted, and X has at least one non-deleted copy
@@ -5768,20 +5896,20 @@ static bool computeCanPassInRegisters(Sema &S, CXXRecordDecl *D) {
if (D->needsImplicitCopyConstructor() &&
!D->defaultedCopyConstructorIsDeleted()) {
- if (!D->hasTrivialCopyConstructor())
+ if (!D->hasTrivialCopyConstructorForCall())
return false;
HasNonDeletedCopyOrMove = true;
}
if (S.getLangOpts().CPlusPlus11 && D->needsImplicitMoveConstructor() &&
!D->defaultedMoveConstructorIsDeleted()) {
- if (!D->hasTrivialMoveConstructor())
+ if (!D->hasTrivialMoveConstructorForCall())
return false;
HasNonDeletedCopyOrMove = true;
}
if (D->needsImplicitDestructor() && !D->defaultedDestructorIsDeleted() &&
- !D->hasTrivialDestructor())
+ !D->hasTrivialDestructorForCall())
return false;
for (const CXXMethodDecl *MD : D->methods()) {
@@ -5794,14 +5922,14 @@ static bool computeCanPassInRegisters(Sema &S, CXXRecordDecl *D) {
else if (!isa<CXXDestructorDecl>(MD))
continue;
- if (!MD->isTrivial())
+ if (!MD->isTrivialForCall())
return false;
}
return HasNonDeletedCopyOrMove;
}
-/// \brief Perform semantic checks on a class definition that has been
+/// Perform semantic checks on a class definition that has been
/// completing, introducing implicitly-declared members, checking for
/// abstract types, etc.
void Sema::CheckCompletedCXXClass(CXXRecordDecl *Record) {
@@ -5851,10 +5979,11 @@ void Sema::CheckCompletedCXXClass(CXXRecordDecl *Record) {
DeclContext::lookup_result R = Record->lookup(Record->getDeclName());
for (DeclContext::lookup_iterator I = R.begin(), E = R.end(); I != E;
++I) {
- NamedDecl *D = *I;
- if ((isa<FieldDecl>(D) && Record->hasUserDeclaredConstructor()) ||
+ NamedDecl *D = (*I)->getUnderlyingDecl();
+ if (((isa<FieldDecl>(D) || isa<UnresolvedUsingValueDecl>(D)) &&
+ Record->hasUserDeclaredConstructor()) ||
isa<IndirectFieldDecl>(D)) {
- Diag(D->getLocation(), diag::err_member_name_of_class)
+ Diag((*I)->getLocation(), diag::err_member_name_of_class)
<< D->getDeclName();
break;
}
@@ -5878,6 +6007,17 @@ void Sema::CheckCompletedCXXClass(CXXRecordDecl *Record) {
}
}
+ // See if trivial_abi has to be dropped.
+ if (Record->hasAttr<TrivialABIAttr>())
+ checkIllFormedTrivialABIStruct(*Record);
+
+ // Set HasTrivialSpecialMemberForCall if the record has attribute
+ // "trivial_abi".
+ bool HasTrivialABI = Record->hasAttr<TrivialABIAttr>();
+
+ if (HasTrivialABI)
+ Record->setHasTrivialSpecialMemberForCall();
+
bool HasMethodWithOverrideControl = false,
HasOverridingMethodWithoutOverrideControl = false;
if (!Record->isDependentType()) {
@@ -5900,12 +6040,23 @@ void Sema::CheckCompletedCXXClass(CXXRecordDecl *Record) {
if (!M->isImplicit() && !M->isUserProvided()) {
if (CSM != CXXInvalid) {
M->setTrivial(SpecialMemberIsTrivial(M, CSM));
-
// Inform the class that we've finished declaring this member.
Record->finishedDefaultedOrDeletedMember(M);
+ M->setTrivialForCall(
+ HasTrivialABI ||
+ SpecialMemberIsTrivial(M, CSM, TAH_ConsiderTrivialABI));
+ Record->setTrivialForCallFlags(M);
}
}
+ // Set triviality for the purpose of calls if this is a user-provided
+ // copy/move constructor or destructor.
+ if ((CSM == CXXCopyConstructor || CSM == CXXMoveConstructor ||
+ CSM == CXXDestructor) && M->isUserProvided()) {
+ M->setTrivialForCall(HasTrivialABI);
+ Record->setTrivialForCallFlags(M);
+ }
+
if (!M->isInvalidDecl() && M->isExplicitlyDefaulted() &&
M->hasAttr<DLLExportAttr>()) {
if (getLangOpts().isCompatibleWithMSVC(LangOptions::MSVC2015) &&
@@ -5945,8 +6096,35 @@ void Sema::CheckCompletedCXXClass(CXXRecordDecl *Record) {
}
checkClassLevelDLLAttribute(Record);
+ checkClassLevelCodeSegAttribute(Record);
+
+ bool ClangABICompat4 =
+ Context.getLangOpts().getClangABICompat() <= LangOptions::ClangABI::Ver4;
+ TargetInfo::CallingConvKind CCK =
+ Context.getTargetInfo().getCallingConvKind(ClangABICompat4);
+ bool CanPass = canPassInRegisters(*this, Record, CCK);
+
+ // Do not change ArgPassingRestrictions if it has already been set to
+ // APK_CanNeverPassInRegs.
+ if (Record->getArgPassingRestrictions() != RecordDecl::APK_CanNeverPassInRegs)
+ Record->setArgPassingRestrictions(CanPass
+ ? RecordDecl::APK_CanPassInRegs
+ : RecordDecl::APK_CannotPassInRegs);
- Record->setCanPassInRegisters(computeCanPassInRegisters(*this, Record));
+ // If canPassInRegisters returns true despite the record having a non-trivial
+ // destructor, the record is destructed in the callee. This happens only when
+ // the record or one of its subobjects has a field annotated with trivial_abi
+ // or a field qualified with ObjC __strong/__weak.
+ if (Context.getTargetInfo().getCXXABI().areArgsDestroyedLeftToRightInCallee())
+ Record->setParamDestroyedInCallee(true);
+ else if (Record->hasNonTrivialDestructor())
+ Record->setParamDestroyedInCallee(CanPass);
+
+ if (getLangOpts().ForceEmitVTables) {
+ // If we want to emit all the vtables, we need to mark it as used. This
+ // is especially required for cases like vtable assumption loads.
+ MarkVTableUsed(Record->getInnerLocStart(), Record);
+ }
}
/// Look up the special member function that would be called by a special
@@ -7017,9 +7195,14 @@ bool Sema::ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
///
/// If \p Selected is not \c NULL, \c *Selected will be filled in with the
/// member that was most likely to be intended to be trivial, if any.
+///
+/// If \p ForCall is true, look at CXXRecord::HasTrivialSpecialMembersForCall to
+/// determine whether the special member is trivial.
static bool findTrivialSpecialMember(Sema &S, CXXRecordDecl *RD,
Sema::CXXSpecialMember CSM, unsigned Quals,
- bool ConstRHS, CXXMethodDecl **Selected) {
+ bool ConstRHS,
+ Sema::TrivialABIHandling TAH,
+ CXXMethodDecl **Selected) {
if (Selected)
*Selected = nullptr;
@@ -7060,7 +7243,9 @@ static bool findTrivialSpecialMember(Sema &S, CXXRecordDecl *RD,
// C++11 [class.dtor]p5:
// A destructor is trivial if:
// - all the direct [subobjects] have trivial destructors
- if (RD->hasTrivialDestructor())
+ if (RD->hasTrivialDestructor() ||
+ (TAH == Sema::TAH_ConsiderTrivialABI &&
+ RD->hasTrivialDestructorForCall()))
return true;
if (Selected) {
@@ -7075,7 +7260,9 @@ static bool findTrivialSpecialMember(Sema &S, CXXRecordDecl *RD,
// C++11 [class.copy]p12:
// A copy constructor is trivial if:
// - the constructor selected to copy each direct [subobject] is trivial
- if (RD->hasTrivialCopyConstructor()) {
+ if (RD->hasTrivialCopyConstructor() ||
+ (TAH == Sema::TAH_ConsiderTrivialABI &&
+ RD->hasTrivialCopyConstructorForCall())) {
if (Quals == Qualifiers::Const)
// We must either select the trivial copy constructor or reach an
// ambiguity; no need to actually perform overload resolution.
@@ -7128,6 +7315,10 @@ static bool findTrivialSpecialMember(Sema &S, CXXRecordDecl *RD,
// not supposed to!
if (Selected)
*Selected = SMOR.getMethod();
+
+ if (TAH == Sema::TAH_ConsiderTrivialABI &&
+ (CSM == Sema::CXXCopyConstructor || CSM == Sema::CXXMoveConstructor))
+ return SMOR.getMethod()->isTrivialForCall();
return SMOR.getMethod()->isTrivial();
}
@@ -7166,14 +7357,14 @@ static bool checkTrivialSubobjectCall(Sema &S, SourceLocation SubobjLoc,
QualType SubType, bool ConstRHS,
Sema::CXXSpecialMember CSM,
TrivialSubobjectKind Kind,
- bool Diagnose) {
+ Sema::TrivialABIHandling TAH, bool Diagnose) {
CXXRecordDecl *SubRD = SubType->getAsCXXRecordDecl();
if (!SubRD)
return true;
CXXMethodDecl *Selected;
if (findTrivialSpecialMember(S, SubRD, CSM, SubType.getCVRQualifiers(),
- ConstRHS, Diagnose ? &Selected : nullptr))
+ ConstRHS, TAH, Diagnose ? &Selected : nullptr))
return true;
if (Diagnose) {
@@ -7203,7 +7394,8 @@ static bool checkTrivialSubobjectCall(Sema &S, SourceLocation SubobjLoc,
<< Kind << SubType.getUnqualifiedType() << CSM;
// Explain why the defaulted or deleted special member isn't trivial.
- S.SpecialMemberIsTrivial(Selected, CSM, Diagnose);
+ S.SpecialMemberIsTrivial(Selected, CSM, Sema::TAH_IgnoreTrivialABI,
+ Diagnose);
}
}
@@ -7214,7 +7406,9 @@ static bool checkTrivialSubobjectCall(Sema &S, SourceLocation SubobjLoc,
/// trivial.
static bool checkTrivialClassMembers(Sema &S, CXXRecordDecl *RD,
Sema::CXXSpecialMember CSM,
- bool ConstArg, bool Diagnose) {
+ bool ConstArg,
+ Sema::TrivialABIHandling TAH,
+ bool Diagnose) {
for (const auto *FI : RD->fields()) {
if (FI->isInvalidDecl() || FI->isUnnamedBitfield())
continue;
@@ -7224,7 +7418,7 @@ static bool checkTrivialClassMembers(Sema &S, CXXRecordDecl *RD,
// Pretend anonymous struct or union members are members of this class.
if (FI->isAnonymousStructOrUnion()) {
if (!checkTrivialClassMembers(S, FieldType->getAsCXXRecordDecl(),
- CSM, ConstArg, Diagnose))
+ CSM, ConstArg, TAH, Diagnose))
return false;
continue;
}
@@ -7252,7 +7446,7 @@ static bool checkTrivialClassMembers(Sema &S, CXXRecordDecl *RD,
bool ConstRHS = ConstArg && !FI->isMutable();
if (!checkTrivialSubobjectCall(S, FI->getLocation(), FieldType, ConstRHS,
- CSM, TSK_Field, Diagnose))
+ CSM, TSK_Field, TAH, Diagnose))
return false;
}
@@ -7266,14 +7460,15 @@ void Sema::DiagnoseNontrivial(const CXXRecordDecl *RD, CXXSpecialMember CSM) {
bool ConstArg = (CSM == CXXCopyConstructor || CSM == CXXCopyAssignment);
checkTrivialSubobjectCall(*this, RD->getLocation(), Ty, ConstArg, CSM,
- TSK_CompleteObject, /*Diagnose*/true);
+ TSK_CompleteObject, TAH_IgnoreTrivialABI,
+ /*Diagnose*/true);
}
/// Determine whether a defaulted or deleted special member function is trivial,
/// as specified in C++11 [class.ctor]p5, C++11 [class.copy]p12,
/// C++11 [class.copy]p25, and C++11 [class.dtor]p5.
bool Sema::SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM,
- bool Diagnose) {
+ TrivialABIHandling TAH, bool Diagnose) {
assert(!MD->isUserProvided() && CSM != CXXInvalid && "not special enough");
CXXRecordDecl *RD = MD->getParent();
@@ -7350,7 +7545,7 @@ bool Sema::SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM,
// destructors]
for (const auto &BI : RD->bases())
if (!checkTrivialSubobjectCall(*this, BI.getLocStart(), BI.getType(),
- ConstArg, CSM, TSK_BaseClass, Diagnose))
+ ConstArg, CSM, TSK_BaseClass, TAH, Diagnose))
return false;
// C++11 [class.ctor]p5, C++11 [class.dtor]p5:
@@ -7365,7 +7560,7 @@ bool Sema::SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM,
// -- for all of the non-static data members of its class that are of class
// type (or array thereof), each such class has a trivial [default
// constructor or destructor]
- if (!checkTrivialClassMembers(*this, RD, CSM, ConstArg, Diagnose))
+ if (!checkTrivialClassMembers(*this, RD, CSM, ConstArg, TAH, Diagnose))
return false;
// C++11 [class.dtor]p5:
@@ -7477,7 +7672,7 @@ public:
};
} // end anonymous namespace
-/// \brief Add the most overriden methods from MD to Methods
+/// Add the most overriden methods from MD to Methods
static void AddMostOverridenMethods(const CXXMethodDecl *MD,
llvm::SmallPtrSetImpl<const CXXMethodDecl *>& Methods) {
if (MD->size_overridden_methods() == 0)
@@ -7487,7 +7682,7 @@ static void AddMostOverridenMethods(const CXXMethodDecl *MD,
AddMostOverridenMethods(O, Methods);
}
-/// \brief Check if a method overloads virtual methods in a base class without
+/// Check if a method overloads virtual methods in a base class without
/// overriding any.
void Sema::FindHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods) {
@@ -7528,7 +7723,7 @@ void Sema::NoteHiddenVirtualMethods(CXXMethodDecl *MD,
}
}
-/// \brief Diagnose methods which overload virtual methods in a base class
+/// Diagnose methods which overload virtual methods in a base class
/// without overriding any.
void Sema::DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD) {
if (MD->isInvalidDecl())
@@ -7547,22 +7742,64 @@ void Sema::DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD) {
}
}
-void Sema::ActOnFinishCXXMemberSpecification(Scope* S, SourceLocation RLoc,
- Decl *TagDecl,
- SourceLocation LBrac,
- SourceLocation RBrac,
- AttributeList *AttrList) {
+void Sema::checkIllFormedTrivialABIStruct(CXXRecordDecl &RD) {
+ auto PrintDiagAndRemoveAttr = [&]() {
+ // No diagnostics if this is a template instantiation.
+ if (!isTemplateInstantiation(RD.getTemplateSpecializationKind()))
+ Diag(RD.getAttr<TrivialABIAttr>()->getLocation(),
+ diag::ext_cannot_use_trivial_abi) << &RD;
+ RD.dropAttr<TrivialABIAttr>();
+ };
+
+ // Ill-formed if the struct has virtual functions.
+ if (RD.isPolymorphic()) {
+ PrintDiagAndRemoveAttr();
+ return;
+ }
+
+ for (const auto &B : RD.bases()) {
+ // Ill-formed if the base class is non-trivial for the purpose of calls or a
+ // virtual base.
+ if ((!B.getType()->isDependentType() &&
+ !B.getType()->getAsCXXRecordDecl()->canPassInRegisters()) ||
+ B.isVirtual()) {
+ PrintDiagAndRemoveAttr();
+ return;
+ }
+ }
+
+ for (const auto *FD : RD.fields()) {
+ // Ill-formed if the field is an ObjectiveC pointer or of a type that is
+ // non-trivial for the purpose of calls.
+ QualType FT = FD->getType();
+ if (FT.getObjCLifetime() == Qualifiers::OCL_Weak) {
+ PrintDiagAndRemoveAttr();
+ return;
+ }
+
+ if (const auto *RT = FT->getBaseElementTypeUnsafe()->getAs<RecordType>())
+ if (!RT->isDependentType() &&
+ !cast<CXXRecordDecl>(RT->getDecl())->canPassInRegisters()) {
+ PrintDiagAndRemoveAttr();
+ return;
+ }
+ }
+}
+
+void Sema::ActOnFinishCXXMemberSpecification(
+ Scope *S, SourceLocation RLoc, Decl *TagDecl, SourceLocation LBrac,
+ SourceLocation RBrac, const ParsedAttributesView &AttrList) {
if (!TagDecl)
return;
AdjustDeclIfTemplate(TagDecl);
- for (const AttributeList* l = AttrList; l; l = l->getNext()) {
- if (l->getKind() != AttributeList::AT_Visibility)
+ for (const ParsedAttr &AL : AttrList) {
+ if (AL.getKind() != ParsedAttr::AT_Visibility)
continue;
- l->setInvalid();
- Diag(l->getLoc(), diag::warn_attribute_after_definition_ignored) <<
- l->getName();
+ AL.setInvalid();
+ Diag(AL.getLoc(), diag::warn_attribute_after_definition_ignored)
+ << AL.getName();
}
ActOnFields(S, RLoc, TagDecl, llvm::makeArrayRef(
@@ -7570,7 +7807,7 @@ void Sema::ActOnFinishCXXMemberSpecification(Scope* S, SourceLocation RLoc,
reinterpret_cast<Decl**>(FieldCollector->getCurFields()),
FieldCollector->getCurNumFields()), LBrac, RBrac, AttrList);
- CheckCompletedCXXClass(dyn_cast_or_null<CXXRecordDecl>(TagDecl));
+ CheckCompletedCXXClass(cast<CXXRecordDecl>(TagDecl));
}
/// AddImplicitlyDeclaredMembersToClass - Adds any implicitly-declared
@@ -8108,7 +8345,8 @@ void Sema::CheckConversionDeclarator(Declarator &D, QualType &R,
QualType ConvType =
GetTypeFromParser(D.getName().ConversionFunctionId, &ConvTSI);
- if (D.getDeclSpec().hasTypeSpecifier() && !D.isInvalidType()) {
+ const DeclSpec &DS = D.getDeclSpec();
+ if (DS.hasTypeSpecifier() && !D.isInvalidType()) {
// Conversion functions don't have return types, but the parser will
// happily parse something like:
//
@@ -8118,9 +8356,18 @@ void Sema::CheckConversionDeclarator(Declarator &D, QualType &R,
//
// The return type will be changed later anyway.
Diag(D.getIdentifierLoc(), diag::err_conv_function_return_type)
- << SourceRange(D.getDeclSpec().getTypeSpecTypeLoc())
+ << SourceRange(DS.getTypeSpecTypeLoc())
<< SourceRange(D.getIdentifierLoc());
D.setInvalidType();
+ } else if (DS.getTypeQualifiers() && !D.isInvalidType()) {
+ // It's also plausible that the user writes type qualifiers in the wrong
+ // place, such as:
+ // struct S { const operator int(); };
+ // FIXME: we could provide a fixit to move the qualifiers onto the
+ // conversion type.
+ Diag(D.getIdentifierLoc(), diag::err_conv_function_with_complex_decl)
+ << SourceRange(D.getIdentifierLoc()) << 0;
+ D.setInvalidType();
}
const FunctionProtoType *Proto = R->getAs<FunctionProtoType>();
@@ -8233,12 +8480,12 @@ void Sema::CheckConversionDeclarator(Declarator &D, QualType &R,
R = Context.getFunctionType(ConvType, None, Proto->getExtProtoInfo());
// C++0x explicit conversion operators.
- if (D.getDeclSpec().isExplicitSpecified())
- Diag(D.getDeclSpec().getExplicitSpecLoc(),
- getLangOpts().CPlusPlus11 ?
- diag::warn_cxx98_compat_explicit_conversion_functions :
- diag::ext_explicit_conversion_functions)
- << SourceRange(D.getDeclSpec().getExplicitSpecLoc());
+ if (DS.isExplicitSpecified())
+ Diag(DS.getExplicitSpecLoc(),
+ getLangOpts().CPlusPlus11
+ ? diag::warn_cxx98_compat_explicit_conversion_functions
+ : diag::ext_explicit_conversion_functions)
+ << SourceRange(DS.getExplicitSpecLoc());
}
/// ActOnConversionDeclarator - Called by ActOnDeclarator to complete
@@ -8437,7 +8684,7 @@ void Sema::CheckDeductionGuideDeclarator(Declarator &D, QualType &R,
// Namespace Handling
//===----------------------------------------------------------------------===//
-/// \brief Diagnose a mismatch in 'inline' qualifiers when a namespace is
+/// Diagnose a mismatch in 'inline' qualifiers when a namespace is
/// reopened.
static void DiagnoseNamespaceInlineMismatch(Sema &S, SourceLocation KeywordLoc,
SourceLocation Loc,
@@ -8479,14 +8726,10 @@ static void DiagnoseNamespaceInlineMismatch(Sema &S, SourceLocation KeywordLoc,
/// ActOnStartNamespaceDef - This is called at the start of a namespace
/// definition.
-Decl *Sema::ActOnStartNamespaceDef(Scope *NamespcScope,
- SourceLocation InlineLoc,
- SourceLocation NamespaceLoc,
- SourceLocation IdentLoc,
- IdentifierInfo *II,
- SourceLocation LBrace,
- AttributeList *AttrList,
- UsingDirectiveDecl *&UD) {
+Decl *Sema::ActOnStartNamespaceDef(
+ Scope *NamespcScope, SourceLocation InlineLoc, SourceLocation NamespaceLoc,
+ SourceLocation IdentLoc, IdentifierInfo *II, SourceLocation LBrace,
+ const ParsedAttributesView &AttrList, UsingDirectiveDecl *&UD) {
SourceLocation StartLoc = InlineLoc.isValid() ? InlineLoc : NamespaceLoc;
// For anonymous namespace, take the location of the left brace.
SourceLocation Loc = II ? IdentLoc : LBrace;
@@ -8673,7 +8916,137 @@ NamespaceDecl *Sema::lookupStdExperimentalNamespace() {
return StdExperimentalNamespaceCache;
}
-/// \brief Retrieve the special "std" namespace, which may require us to
+namespace {
+
+enum UnsupportedSTLSelect {
+ USS_InvalidMember,
+ USS_MissingMember,
+ USS_NonTrivial,
+ USS_Other
+};
+
+struct InvalidSTLDiagnoser {
+ Sema &S;
+ SourceLocation Loc;
+ QualType TyForDiags;
+
+ QualType operator()(UnsupportedSTLSelect Sel = USS_Other, StringRef Name = "",
+ const VarDecl *VD = nullptr) {
+ {
+ auto D = S.Diag(Loc, diag::err_std_compare_type_not_supported)
+ << TyForDiags << ((int)Sel);
+ if (Sel == USS_InvalidMember || Sel == USS_MissingMember) {
+ assert(!Name.empty());
+ D << Name;
+ }
+ }
+ if (Sel == USS_InvalidMember) {
+ S.Diag(VD->getLocation(), diag::note_var_declared_here)
+ << VD << VD->getSourceRange();
+ }
+ return QualType();
+ }
+};
+} // namespace
+
+QualType Sema::CheckComparisonCategoryType(ComparisonCategoryType Kind,
+ SourceLocation Loc) {
+ assert(getLangOpts().CPlusPlus &&
+ "Looking for comparison category type outside of C++.");
+
+ // Check if we've already successfully checked the comparison category type
+ // before. If so, skip checking it again.
+ ComparisonCategoryInfo *Info = Context.CompCategories.lookupInfo(Kind);
+ if (Info && FullyCheckedComparisonCategories[static_cast<unsigned>(Kind)])
+ return Info->getType();
+
+ // If lookup failed
+ if (!Info) {
+ std::string NameForDiags = "std::";
+ NameForDiags += ComparisonCategories::getCategoryString(Kind);
+ Diag(Loc, diag::err_implied_comparison_category_type_not_found)
+ << NameForDiags;
+ return QualType();
+ }
+
+ assert(Info->Kind == Kind);
+ assert(Info->Record);
+
+ // Update the Record decl in case we encountered a forward declaration on our
+ // first pass. FIXME: This is a bit of a hack.
+ if (Info->Record->hasDefinition())
+ Info->Record = Info->Record->getDefinition();
+
+ // Use an elaborated type for diagnostics which has a name containing the
+ // prepended 'std' namespace but not any inline namespace names.
+ QualType TyForDiags = [&]() {
+ auto *NNS =
+ NestedNameSpecifier::Create(Context, nullptr, getStdNamespace());
+ return Context.getElaboratedType(ETK_None, NNS, Info->getType());
+ }();
+
+ if (RequireCompleteType(Loc, TyForDiags, diag::err_incomplete_type))
+ return QualType();
+
+ InvalidSTLDiagnoser UnsupportedSTLError{*this, Loc, TyForDiags};
+
+ if (!Info->Record->isTriviallyCopyable())
+ return UnsupportedSTLError(USS_NonTrivial);
+
+ for (const CXXBaseSpecifier &BaseSpec : Info->Record->bases()) {
+ CXXRecordDecl *Base = BaseSpec.getType()->getAsCXXRecordDecl();
+ // Tolerate empty base classes.
+ if (Base->isEmpty())
+ continue;
+ // Reject STL implementations which have at least one non-empty base.
+ return UnsupportedSTLError();
+ }
+
+ // Check that the STL has implemented the types using a single integer field.
+ // This expectation allows better codegen for builtin operators. We require:
+ // (1) The class has exactly one field.
+ // (2) The field is an integral or enumeration type.
+ auto FIt = Info->Record->field_begin(), FEnd = Info->Record->field_end();
+ if (std::distance(FIt, FEnd) != 1 ||
+ !FIt->getType()->isIntegralOrEnumerationType()) {
+ return UnsupportedSTLError();
+ }
+
+ // Build each of the require values and store them in Info.
+ for (ComparisonCategoryResult CCR :
+ ComparisonCategories::getPossibleResultsForType(Kind)) {
+ StringRef MemName = ComparisonCategories::getResultString(CCR);
+ ComparisonCategoryInfo::ValueInfo *ValInfo = Info->lookupValueInfo(CCR);
+
+ if (!ValInfo)
+ return UnsupportedSTLError(USS_MissingMember, MemName);
+
+ VarDecl *VD = ValInfo->VD;
+ assert(VD && "should not be null!");
+
+ // Attempt to diagnose reasons why the STL definition of this type
+ // might be foobar, including it failing to be a constant expression.
+ // TODO Handle more ways the lookup or result can be invalid.
+ if (!VD->isStaticDataMember() || !VD->isConstexpr() || !VD->hasInit() ||
+ !VD->checkInitIsICE())
+ return UnsupportedSTLError(USS_InvalidMember, MemName, VD);
+
+ // Attempt to evaluate the var decl as a constant expression and extract
+ // the value of its first field as a ICE. If this fails, the STL
+ // implementation is not supported.
+ if (!ValInfo->hasValidIntValue())
+ return UnsupportedSTLError();
+
+ MarkVariableReferenced(Loc, VD);
+ }
+
+ // We've successfully built the required types and expressions. Update
+ // the cache and return the newly cached value.
+ FullyCheckedComparisonCategories[static_cast<unsigned>(Kind)] = true;
+ return Info->getType();
+}
+
+/// Retrieve the special "std" namespace, which may require us to
/// implicitly define the namespace.
NamespaceDecl *Sema::getOrCreateStdNamespace() {
if (!StdNamespace) {
@@ -8816,7 +9189,7 @@ bool Sema::isInitListConstructor(const FunctionDecl *Ctor) {
return isStdInitializerList(ArgType, nullptr);
}
-/// \brief Determine whether a using statement is in a context where it will be
+/// Determine whether a using statement is in a context where it will be
/// apply in all contexts.
static bool IsUsingDirectiveInToplevelContext(DeclContext *CurContext) {
switch (CurContext->getDeclKind()) {
@@ -8871,13 +9244,11 @@ static bool TryNamespaceTypoCorrection(Sema &S, LookupResult &R, Scope *Sc,
return false;
}
-Decl *Sema::ActOnUsingDirective(Scope *S,
- SourceLocation UsingLoc,
- SourceLocation NamespcLoc,
- CXXScopeSpec &SS,
- SourceLocation IdentLoc,
- IdentifierInfo *NamespcName,
- AttributeList *AttrList) {
+Decl *Sema::ActOnUsingDirective(Scope *S, SourceLocation UsingLoc,
+ SourceLocation NamespcLoc, CXXScopeSpec &SS,
+ SourceLocation IdentLoc,
+ IdentifierInfo *NamespcName,
+ const ParsedAttributesView &AttrList) {
assert(!SS.isInvalid() && "Invalid CXXScopeSpec.");
assert(NamespcName && "Invalid NamespcName.");
assert(IdentLoc.isValid() && "Invalid NamespceName location.");
@@ -8932,7 +9303,7 @@ Decl *Sema::ActOnUsingDirective(Scope *S,
// Find enclosing context containing both using-directive and
// nominated namespace.
- DeclContext *CommonAncestor = cast<DeclContext>(NS);
+ DeclContext *CommonAncestor = NS;
while (CommonAncestor && !CommonAncestor->Encloses(CurContext))
CommonAncestor = CommonAncestor->getParent();
@@ -8969,15 +9340,12 @@ void Sema::PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir) {
S->PushUsingDirective(UDir);
}
-
-Decl *Sema::ActOnUsingDeclaration(Scope *S,
- AccessSpecifier AS,
+Decl *Sema::ActOnUsingDeclaration(Scope *S, AccessSpecifier AS,
SourceLocation UsingLoc,
- SourceLocation TypenameLoc,
- CXXScopeSpec &SS,
+ SourceLocation TypenameLoc, CXXScopeSpec &SS,
UnqualifiedId &Name,
SourceLocation EllipsisLoc,
- AttributeList *AttrList) {
+ const ParsedAttributesView &AttrList) {
assert(S->getFlags() & Scope::DeclScope && "Invalid Scope.");
if (SS.isEmpty()) {
@@ -8986,15 +9354,15 @@ Decl *Sema::ActOnUsingDeclaration(Scope *S,
}
switch (Name.getKind()) {
- case UnqualifiedId::IK_ImplicitSelfParam:
- case UnqualifiedId::IK_Identifier:
- case UnqualifiedId::IK_OperatorFunctionId:
- case UnqualifiedId::IK_LiteralOperatorId:
- case UnqualifiedId::IK_ConversionFunctionId:
+ case UnqualifiedIdKind::IK_ImplicitSelfParam:
+ case UnqualifiedIdKind::IK_Identifier:
+ case UnqualifiedIdKind::IK_OperatorFunctionId:
+ case UnqualifiedIdKind::IK_LiteralOperatorId:
+ case UnqualifiedIdKind::IK_ConversionFunctionId:
break;
- case UnqualifiedId::IK_ConstructorName:
- case UnqualifiedId::IK_ConstructorTemplateId:
+ case UnqualifiedIdKind::IK_ConstructorName:
+ case UnqualifiedIdKind::IK_ConstructorTemplateId:
// C++11 inheriting constructors.
Diag(Name.getLocStart(),
getLangOpts().CPlusPlus11 ?
@@ -9006,17 +9374,17 @@ Decl *Sema::ActOnUsingDeclaration(Scope *S,
return nullptr;
- case UnqualifiedId::IK_DestructorName:
+ case UnqualifiedIdKind::IK_DestructorName:
Diag(Name.getLocStart(), diag::err_using_decl_destructor)
<< SS.getRange();
return nullptr;
- case UnqualifiedId::IK_TemplateId:
+ case UnqualifiedIdKind::IK_TemplateId:
Diag(Name.getLocStart(), diag::err_using_decl_template_id)
<< SourceRange(Name.TemplateId->LAngleLoc, Name.TemplateId->RAngleLoc);
return nullptr;
- case UnqualifiedId::IK_DeductionGuideName:
+ case UnqualifiedIdKind::IK_DeductionGuideName:
llvm_unreachable("cannot parse qualified deduction guide name");
}
@@ -9056,7 +9424,7 @@ Decl *Sema::ActOnUsingDeclaration(Scope *S,
return UD;
}
-/// \brief Determine whether a using declaration considers the given
+/// Determine whether a using declaration considers the given
/// declarations as "equivalent", e.g., if they are redeclarations of
/// the same entity or are both typedefs of the same type.
static bool
@@ -9149,6 +9517,19 @@ bool Sema::CheckUsingShadowDecl(UsingDecl *Using, NamedDecl *Orig,
if (isa<UsingDecl>(D) || isa<UsingPackDecl>(D))
continue;
+ if (auto *RD = dyn_cast<CXXRecordDecl>(D)) {
+ // C++ [class.mem]p19:
+ // If T is the name of a class, then [every named member other than
+ // a non-static data member] shall have a name different from T
+ if (RD->isInjectedClassName() && !isa<FieldDecl>(Target) &&
+ !isa<IndirectFieldDecl>(Target) &&
+ !isa<UnresolvedUsingValueDecl>(Target) &&
+ DiagnoseClassNameShadow(
+ CurContext,
+ DeclarationNameInfo(Using->getDeclName(), Using->getLocation())))
+ return true;
+ }
+
if (IsEquivalentForUsingDecl(Context, D, Target)) {
if (UsingShadowDecl *Shadow = dyn_cast<UsingShadowDecl>(*I))
PrevShadow = Shadow;
@@ -9421,15 +9802,11 @@ private:
/// \param IsInstantiation - Whether this call arises from an
/// instantiation of an unresolved using declaration. We treat
/// the lookup differently for these declarations.
-NamedDecl *Sema::BuildUsingDeclaration(Scope *S, AccessSpecifier AS,
- SourceLocation UsingLoc,
- bool HasTypenameKeyword,
- SourceLocation TypenameLoc,
- CXXScopeSpec &SS,
- DeclarationNameInfo NameInfo,
- SourceLocation EllipsisLoc,
- AttributeList *AttrList,
- bool IsInstantiation) {
+NamedDecl *Sema::BuildUsingDeclaration(
+ Scope *S, AccessSpecifier AS, SourceLocation UsingLoc,
+ bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS,
+ DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc,
+ const ParsedAttributesView &AttrList, bool IsInstantiation) {
assert(!SS.isInvalid() && "Invalid CXXScopeSpec.");
SourceLocation IdentLoc = NameInfo.getLoc();
assert(IdentLoc.isValid() && "Invalid TargetName location.");
@@ -9995,14 +10372,11 @@ bool Sema::CheckUsingDeclQualifier(SourceLocation UsingLoc,
return true;
}
-Decl *Sema::ActOnAliasDeclaration(Scope *S,
- AccessSpecifier AS,
+Decl *Sema::ActOnAliasDeclaration(Scope *S, AccessSpecifier AS,
MultiTemplateParamsArg TemplateParamLists,
- SourceLocation UsingLoc,
- UnqualifiedId &Name,
- AttributeList *AttrList,
- TypeResult Type,
- Decl *DeclFromDeclSpec) {
+ SourceLocation UsingLoc, UnqualifiedId &Name,
+ const ParsedAttributesView &AttrList,
+ TypeResult Type, Decl *DeclFromDeclSpec) {
// Skip up to the relevant declaration scope.
while (S->isTemplateParamScope())
S = S->getParent();
@@ -10040,7 +10414,7 @@ Decl *Sema::ActOnAliasDeclaration(Scope *S,
Previous.clear();
}
- assert(Name.Kind == UnqualifiedId::IK_Identifier &&
+ assert(Name.Kind == UnqualifiedIdKind::IK_Identifier &&
"name in alias declaration must be an identifier");
TypeAliasDecl *NewTD = TypeAliasDecl::Create(Context, CurContext, UsingLoc,
Name.StartLocation,
@@ -10378,7 +10752,7 @@ struct DeclaringSpecialMember {
}
}
- /// \brief Are we already trying to declare this special member?
+ /// Are we already trying to declare this special member?
bool isAlreadyBeingDeclared() const {
return WasAlreadyBeingDeclared;
}
@@ -10720,6 +11094,8 @@ CXXDestructorDecl *Sema::DeclareImplicitDestructor(CXXRecordDecl *ClassDecl) {
// We don't need to use SpecialMemberIsTrivial here; triviality for
// destructors is easy to compute.
Destructor->setTrivial(ClassDecl->hasTrivialDestructor());
+ Destructor->setTrivialForCall(ClassDecl->hasAttr<TrivialABIAttr>() ||
+ ClassDecl->hasTrivialDestructorForCall());
// Note that we have declared this destructor.
++ASTContext::NumImplicitDestructorsDeclared;
@@ -10784,7 +11160,7 @@ void Sema::DefineImplicitDestructor(SourceLocation CurrentLocation,
}
}
-/// \brief Perform any semantic analysis which needs to be delayed until all
+/// Perform any semantic analysis which needs to be delayed until all
/// pending class member declarations have been parsed.
void Sema::ActOnFinishCXXMemberDecls() {
// If the context is an invalid C++ class, just suppress these checks.
@@ -10804,12 +11180,12 @@ void Sema::ActOnFinishCXXNonNestedClass(Decl *D) {
void Sema::referenceDLLExportedClassMethods() {
if (!DelayedDllExportClasses.empty()) {
- // Calling ReferenceDllExportedMethods might cause the current function to
+ // Calling ReferenceDllExportedMembers might cause the current function to
// be called again, so use a local copy of DelayedDllExportClasses.
SmallVector<CXXRecordDecl *, 4> WorkList;
std::swap(DelayedDllExportClasses, WorkList);
for (CXXRecordDecl *Class : WorkList)
- ReferenceDllExportedMethods(*this, Class);
+ ReferenceDllExportedMembers(*this, Class);
}
}
@@ -10843,7 +11219,7 @@ void Sema::AdjustDestructorExceptionSpec(CXXRecordDecl *ClassDecl,
}
namespace {
-/// \brief An abstract base class for all helper classes used in building the
+/// An abstract base class for all helper classes used in building the
// copy/move operators. These classes serve as factory functions and help us
// avoid using the same Expr* in the AST twice.
class ExprBuilder {
@@ -10990,11 +11366,11 @@ buildMemcpyForAssignmentOp(Sema &S, SourceLocation Loc, QualType T,
Expr *From = FromB.build(S, Loc);
From = new (S.Context) UnaryOperator(From, UO_AddrOf,
S.Context.getPointerType(From->getType()),
- VK_RValue, OK_Ordinary, Loc);
+ VK_RValue, OK_Ordinary, Loc, false);
Expr *To = ToB.build(S, Loc);
To = new (S.Context) UnaryOperator(To, UO_AddrOf,
S.Context.getPointerType(To->getType()),
- VK_RValue, OK_Ordinary, Loc);
+ VK_RValue, OK_Ordinary, Loc, false);
const Type *E = T->getBaseElementTypeUnsafe();
bool NeedsCollectableMemCpy =
@@ -11028,7 +11404,7 @@ buildMemcpyForAssignmentOp(Sema &S, SourceLocation Loc, QualType T,
return Call.getAs<Stmt>();
}
-/// \brief Builds a statement that copies/moves the given entity from \p From to
+/// Builds a statement that copies/moves the given entity from \p From to
/// \c To.
///
/// This routine is used to copy/move the members of a class with an
@@ -11233,10 +11609,12 @@ buildSingleCopyAssignRecursively(Sema &S, SourceLocation Loc, QualType T,
BO_NE, S.Context.BoolTy,
VK_RValue, OK_Ordinary, Loc, FPOptions());
- // Create the pre-increment of the iteration variable.
- Expr *Increment
- = new (S.Context) UnaryOperator(IterationVarRef.build(S, Loc), UO_PreInc,
- SizeType, VK_LValue, OK_Ordinary, Loc);
+ // Create the pre-increment of the iteration variable. We can determine
+ // whether the increment will overflow based on the value of the array
+ // bound.
+ Expr *Increment = new (S.Context)
+ UnaryOperator(IterationVarRef.build(S, Loc), UO_PreInc, SizeType,
+ VK_LValue, OK_Ordinary, Loc, Upper.isMaxValue());
// Construct the loop that copies all elements of this array.
return S.ActOnForStmt(
@@ -11525,7 +11903,7 @@ void Sema::DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
}
// Suppress assigning zero-width bitfields.
- if (Field->isBitField() && Field->getBitWidthValue(Context) == 0)
+ if (Field->isZeroLengthBitField(Context))
continue;
QualType FieldType = Field->getType().getNonReferenceType();
@@ -11892,7 +12270,7 @@ void Sema::DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
}
// Suppress assigning zero-width bitfields.
- if (Field->isBitField() && Field->getBitWidthValue(Context) == 0)
+ if (Field->isZeroLengthBitField(Context))
continue;
QualType FieldType = Field->getType().getNonReferenceType();
@@ -12021,9 +12399,16 @@ CXXConstructorDecl *Sema::DeclareImplicitCopyConstructor(
CopyConstructor->setParams(FromParam);
CopyConstructor->setTrivial(
- ClassDecl->needsOverloadResolutionForCopyConstructor()
- ? SpecialMemberIsTrivial(CopyConstructor, CXXCopyConstructor)
- : ClassDecl->hasTrivialCopyConstructor());
+ ClassDecl->needsOverloadResolutionForCopyConstructor()
+ ? SpecialMemberIsTrivial(CopyConstructor, CXXCopyConstructor)
+ : ClassDecl->hasTrivialCopyConstructor());
+
+ CopyConstructor->setTrivialForCall(
+ ClassDecl->hasAttr<TrivialABIAttr>() ||
+ (ClassDecl->needsOverloadResolutionForCopyConstructor()
+ ? SpecialMemberIsTrivial(CopyConstructor, CXXCopyConstructor,
+ TAH_ConsiderTrivialABI)
+ : ClassDecl->hasTrivialCopyConstructorForCall()));
// Note that we have declared this constructor.
++ASTContext::NumImplicitCopyConstructorsDeclared;
@@ -12144,9 +12529,16 @@ CXXConstructorDecl *Sema::DeclareImplicitMoveConstructor(
MoveConstructor->setParams(FromParam);
MoveConstructor->setTrivial(
- ClassDecl->needsOverloadResolutionForMoveConstructor()
- ? SpecialMemberIsTrivial(MoveConstructor, CXXMoveConstructor)
- : ClassDecl->hasTrivialMoveConstructor());
+ ClassDecl->needsOverloadResolutionForMoveConstructor()
+ ? SpecialMemberIsTrivial(MoveConstructor, CXXMoveConstructor)
+ : ClassDecl->hasTrivialMoveConstructor());
+
+ MoveConstructor->setTrivialForCall(
+ ClassDecl->hasAttr<TrivialABIAttr>() ||
+ (ClassDecl->needsOverloadResolutionForMoveConstructor()
+ ? SpecialMemberIsTrivial(MoveConstructor, CXXMoveConstructor,
+ TAH_ConsiderTrivialABI)
+ : ClassDecl->hasTrivialMoveConstructorForCall()));
// Note that we have declared this constructor.
++ASTContext::NumImplicitMoveConstructorsDeclared;
@@ -12215,30 +12607,27 @@ void Sema::DefineImplicitLambdaToFunctionPointerConversion(
SourceLocation CurrentLocation,
CXXConversionDecl *Conv) {
SynthesizedFunctionScope Scope(*this, Conv);
+ assert(!Conv->getReturnType()->isUndeducedType());
CXXRecordDecl *Lambda = Conv->getParent();
- CXXMethodDecl *CallOp = Lambda->getLambdaCallOperator();
- // If we are defining a specialization of a conversion to function-ptr
- // cache the deduced template arguments for this specialization
- // so that we can use them to retrieve the corresponding call-operator
- // and static-invoker.
- const TemplateArgumentList *DeducedTemplateArgs = nullptr;
-
- // Retrieve the corresponding call-operator specialization.
- if (Lambda->isGenericLambda()) {
- assert(Conv->isFunctionTemplateSpecialization());
- FunctionTemplateDecl *CallOpTemplate =
- CallOp->getDescribedFunctionTemplate();
- DeducedTemplateArgs = Conv->getTemplateSpecializationArgs();
- void *InsertPos = nullptr;
- FunctionDecl *CallOpSpec = CallOpTemplate->findSpecialization(
- DeducedTemplateArgs->asArray(),
- InsertPos);
- assert(CallOpSpec &&
- "Conversion operator must have a corresponding call operator");
- CallOp = cast<CXXMethodDecl>(CallOpSpec);
+ FunctionDecl *CallOp = Lambda->getLambdaCallOperator();
+ FunctionDecl *Invoker = Lambda->getLambdaStaticInvoker();
+
+ if (auto *TemplateArgs = Conv->getTemplateSpecializationArgs()) {
+ CallOp = InstantiateFunctionDeclaration(
+ CallOp->getDescribedFunctionTemplate(), TemplateArgs, CurrentLocation);
+ if (!CallOp)
+ return;
+
+ Invoker = InstantiateFunctionDeclaration(
+ Invoker->getDescribedFunctionTemplate(), TemplateArgs, CurrentLocation);
+ if (!Invoker)
+ return;
}
+ if (CallOp->isInvalidDecl())
+ return;
+
// Mark the call operator referenced (and add to pending instantiations
// if necessary).
// For both the conversion and static-invoker template specializations
@@ -12246,39 +12635,24 @@ void Sema::DefineImplicitLambdaToFunctionPointerConversion(
// to the PendingInstantiations.
MarkFunctionReferenced(CurrentLocation, CallOp);
- // Retrieve the static invoker...
- CXXMethodDecl *Invoker = Lambda->getLambdaStaticInvoker();
- // ... and get the corresponding specialization for a generic lambda.
- if (Lambda->isGenericLambda()) {
- assert(DeducedTemplateArgs &&
- "Must have deduced template arguments from Conversion Operator");
- FunctionTemplateDecl *InvokeTemplate =
- Invoker->getDescribedFunctionTemplate();
- void *InsertPos = nullptr;
- FunctionDecl *InvokeSpec = InvokeTemplate->findSpecialization(
- DeducedTemplateArgs->asArray(),
- InsertPos);
- assert(InvokeSpec &&
- "Must have a corresponding static invoker specialization");
- Invoker = cast<CXXMethodDecl>(InvokeSpec);
- }
+ // Fill in the __invoke function with a dummy implementation. IR generation
+ // will fill in the actual details. Update its type in case it contained
+ // an 'auto'.
+ Invoker->markUsed(Context);
+ Invoker->setReferenced();
+ Invoker->setType(Conv->getReturnType()->getPointeeType());
+ Invoker->setBody(new (Context) CompoundStmt(Conv->getLocation()));
+
// Construct the body of the conversion function { return __invoke; }.
Expr *FunctionRef = BuildDeclRefExpr(Invoker, Invoker->getType(),
- VK_LValue, Conv->getLocation()).get();
+ VK_LValue, Conv->getLocation()).get();
assert(FunctionRef && "Can't refer to __invoke function?");
Stmt *Return = BuildReturnStmt(Conv->getLocation(), FunctionRef).get();
Conv->setBody(CompoundStmt::Create(Context, Return, Conv->getLocation(),
Conv->getLocation()));
-
Conv->markUsed(Context);
Conv->setReferenced();
- // Fill in the __invoke function with a dummy implementation. IR generation
- // will fill in the actual details.
- Invoker->markUsed(Context);
- Invoker->setReferenced();
- Invoker->setBody(new (Context) CompoundStmt(Conv->getLocation()));
-
if (ASTMutationListener *L = getASTMutationListener()) {
L->CompletedImplicitDefinition(Conv);
L->CompletedImplicitDefinition(Invoker);
@@ -12339,7 +12713,7 @@ void Sema::DefineImplicitLambdaToBlockPointerConversion(
}
}
-/// \brief Determine whether the given list arguments contains exactly one
+/// Determine whether the given list arguments contains exactly one
/// "real" (non-default) argument.
static bool hasOneRealArgument(MultiExprArg Args) {
switch (Args.size()) {
@@ -12550,7 +12924,7 @@ void Sema::FinalizeVarWithDestructor(VarDecl *VD, const RecordType *Record) {
Diag(VD->getLocation(), diag::warn_global_destructor);
}
-/// \brief Given a constructor and the set of arguments provided for the
+/// Given a constructor and the set of arguments provided for the
/// constructor, convert the arguments and add any required default arguments
/// to form a proper call to this constructor.
///
@@ -12617,6 +12991,13 @@ CheckOperatorNewDeleteDeclarationScope(Sema &SemaRef,
return false;
}
+static QualType
+RemoveAddressSpaceFromPtr(Sema &SemaRef, const PointerType *PtrTy) {
+ QualType QTy = PtrTy->getPointeeType();
+ QTy = SemaRef.Context.removeAddrSpaceQualType(QTy);
+ return SemaRef.Context.getPointerType(QTy);
+}
+
static inline bool
CheckOperatorNewDeleteTypes(Sema &SemaRef, const FunctionDecl *FnDecl,
CanQualType ExpectedResultType,
@@ -12632,6 +13013,13 @@ CheckOperatorNewDeleteTypes(Sema &SemaRef, const FunctionDecl *FnDecl,
diag::err_operator_new_delete_dependent_result_type)
<< FnDecl->getDeclName() << ExpectedResultType;
+ // OpenCL C++: the operator is valid on any address space.
+ if (SemaRef.getLangOpts().OpenCLCPlusPlus) {
+ if (auto *PtrTy = ResultType->getAs<PointerType>()) {
+ ResultType = RemoveAddressSpaceFromPtr(SemaRef, PtrTy);
+ }
+ }
+
// Check that the result type is what we expect.
if (SemaRef.Context.getCanonicalType(ResultType) != ExpectedResultType)
return SemaRef.Diag(FnDecl->getLocation(),
@@ -12657,6 +13045,13 @@ CheckOperatorNewDeleteTypes(Sema &SemaRef, const FunctionDecl *FnDecl,
<< FnDecl->getDeclName() << ExpectedFirstParamType;
// Check that the first parameter type is what we expect.
+ if (SemaRef.getLangOpts().OpenCLCPlusPlus) {
+ // OpenCL C++: the operator is valid on any address space.
+ if (auto *PtrTy =
+ FnDecl->getParamDecl(0)->getType()->getAs<PointerType>()) {
+ FirstParamType = RemoveAddressSpaceFromPtr(SemaRef, PtrTy);
+ }
+ }
if (SemaRef.Context.getCanonicalType(FirstParamType).getUnqualifiedType() !=
ExpectedFirstParamType)
return SemaRef.Diag(FnDecl->getLocation(), InvalidParamTypeDiag)
@@ -12966,6 +13361,7 @@ bool Sema::CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl) {
ParamType->isSpecificBuiltinType(BuiltinType::LongDouble) ||
Context.hasSameType(ParamType, Context.CharTy) ||
Context.hasSameType(ParamType, Context.WideCharTy) ||
+ Context.hasSameType(ParamType, Context.Char8Ty) ||
Context.hasSameType(ParamType, Context.Char16Ty) ||
Context.hasSameType(ParamType, Context.Char32Ty)) {
} else if (const PointerType *Ptr = ParamType->getAs<PointerType>()) {
@@ -13026,10 +13422,12 @@ bool Sema::CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl) {
}
QualType InnerType = PointeeType.getUnqualifiedType();
- // Only const char *, const wchar_t*, const char16_t*, and const char32_t*
- // are allowed as the first parameter to a two-parameter function
+ // Only const char *, const wchar_t*, const char8_t*, const char16_t*, and
+ // const char32_t* are allowed as the first parameter to a two-parameter
+ // function
if (!(Context.hasSameType(InnerType, Context.CharTy) ||
Context.hasSameType(InnerType, Context.WideCharTy) ||
+ Context.hasSameType(InnerType, Context.Char8Ty) ||
Context.hasSameType(InnerType, Context.Char16Ty) ||
Context.hasSameType(InnerType, Context.Char32Ty))) {
Diag((*Param)->getSourceRange().getBegin(),
@@ -13136,19 +13534,18 @@ Decl *Sema::ActOnFinishLinkageSpecification(Scope *S,
}
Decl *Sema::ActOnEmptyDeclaration(Scope *S,
- AttributeList *AttrList,
+ const ParsedAttributesView &AttrList,
SourceLocation SemiLoc) {
Decl *ED = EmptyDecl::Create(Context, CurContext, SemiLoc);
// Attribute declarations appertain to empty declaration so we handle
// them here.
- if (AttrList)
- ProcessDeclAttributeList(S, ED, AttrList);
+ ProcessDeclAttributeList(S, ED, AttrList);
CurContext->addDecl(ED);
return ED;
}
-/// \brief Perform semantic analysis for the variable declaration that
+/// Perform semantic analysis for the variable declaration that
/// occurs within a C++ catch clause, returning the newly-created
/// variable.
VarDecl *Sema::BuildExceptionDeclaration(Scope *S,
@@ -13404,7 +13801,7 @@ Decl *Sema::BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
return Decl;
}
-/// \brief Perform semantic analysis of the given friend type declaration.
+/// Perform semantic analysis of the given friend type declaration.
///
/// \returns A friend declaration that.
FriendDecl *Sema::CheckFriendTypeDecl(SourceLocation LocStart,
@@ -13481,10 +13878,9 @@ FriendDecl *Sema::CheckFriendTypeDecl(SourceLocation LocStart,
/// templated.
Decl *Sema::ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
unsigned TagSpec, SourceLocation TagLoc,
- CXXScopeSpec &SS,
- IdentifierInfo *Name,
+ CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
- AttributeList *Attr,
+ const ParsedAttributesView &Attr,
MultiTemplateParamsArg TempParamLists) {
TagTypeKind Kind = TypeWithKeyword::getTagTypeKindForTypeSpec(TagSpec);
@@ -13597,7 +13993,6 @@ Decl *Sema::ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
return Friend;
}
-
/// Handle a friend type declaration. This works in tandem with
/// ActOnTag.
///
@@ -13625,7 +14020,7 @@ Decl *Sema::ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
// Try to convert the decl specifier to a type. This works for
// friend templates because ActOnTag never produces a ClassTemplateDecl
// for a TUK_Friend.
- Declarator TheDeclarator(DS, Declarator::MemberContext);
+ Declarator TheDeclarator(DS, DeclaratorContext::MemberContext);
TypeSourceInfo *TSI = GetTypeForDeclarator(TheDeclarator, S);
QualType T = TSI->getType();
if (TheDeclarator.isInvalidType())
@@ -13799,7 +14194,8 @@ NamedDecl *Sema::ActOnFriendFunctionDecl(Scope *S, Declarator &D,
// elaborated-type-specifier, the lookup to determine whether
// the entity has been previously declared shall not consider
// any scopes outside the innermost enclosing namespace.
- bool isTemplateId = D.getName().getKind() == UnqualifiedId::IK_TemplateId;
+ bool isTemplateId =
+ D.getName().getKind() == UnqualifiedIdKind::IK_TemplateId;
// Find the appropriate context according to the above.
DC = CurContext;
@@ -13910,24 +14306,24 @@ NamedDecl *Sema::ActOnFriendFunctionDecl(Scope *S, Declarator &D,
if (!DC->isRecord()) {
int DiagArg = -1;
switch (D.getName().getKind()) {
- case UnqualifiedId::IK_ConstructorTemplateId:
- case UnqualifiedId::IK_ConstructorName:
+ case UnqualifiedIdKind::IK_ConstructorTemplateId:
+ case UnqualifiedIdKind::IK_ConstructorName:
DiagArg = 0;
break;
- case UnqualifiedId::IK_DestructorName:
+ case UnqualifiedIdKind::IK_DestructorName:
DiagArg = 1;
break;
- case UnqualifiedId::IK_ConversionFunctionId:
+ case UnqualifiedIdKind::IK_ConversionFunctionId:
DiagArg = 2;
break;
- case UnqualifiedId::IK_DeductionGuideName:
+ case UnqualifiedIdKind::IK_DeductionGuideName:
DiagArg = 3;
break;
- case UnqualifiedId::IK_Identifier:
- case UnqualifiedId::IK_ImplicitSelfParam:
- case UnqualifiedId::IK_LiteralOperatorId:
- case UnqualifiedId::IK_OperatorFunctionId:
- case UnqualifiedId::IK_TemplateId:
+ case UnqualifiedIdKind::IK_Identifier:
+ case UnqualifiedIdKind::IK_ImplicitSelfParam:
+ case UnqualifiedIdKind::IK_LiteralOperatorId:
+ case UnqualifiedIdKind::IK_OperatorFunctionId:
+ case UnqualifiedIdKind::IK_TemplateId:
break;
}
// This implies that it has to be an operator or function.
@@ -14168,6 +14564,16 @@ bool Sema::CheckOverridingFunctionAttributes(const CXXMethodDecl *New,
}
}
+ // Virtual overrides must have the same code_seg.
+ const auto *OldCSA = Old->getAttr<CodeSegAttr>();
+ const auto *NewCSA = New->getAttr<CodeSegAttr>();
+ if ((NewCSA || OldCSA) &&
+ (!OldCSA || !NewCSA || NewCSA->getName() != OldCSA->getName())) {
+ Diag(New->getLocation(), diag::err_mismatched_code_seg_override);
+ Diag(Old->getLocation(), diag::note_previous_declaration);
+ return true;
+ }
+
CallingConv NewCC = NewFT->getCallConv(), OldCC = OldFT->getCallConv();
// If the calling conventions match, everything is fine
@@ -14294,7 +14700,7 @@ bool Sema::CheckOverridingFunctionReturnType(const CXXMethodDecl *New,
return false;
}
-/// \brief Mark the given method pure.
+/// Mark the given method pure.
///
/// \param Method the method to be marked pure.
///
@@ -14324,7 +14730,7 @@ void Sema::ActOnPureSpecifier(Decl *D, SourceLocation ZeroLoc) {
Diag(D->getLocation(), diag::err_illegal_initializer);
}
-/// \brief Determine whether the given declaration is a global variable or
+/// Determine whether the given declaration is a global variable or
/// static data member.
static bool isNonlocalVariable(const Decl *D) {
if (const VarDecl *Var = dyn_cast_or_null<VarDecl>(D))
@@ -14431,7 +14837,7 @@ void Sema::MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class,
// Try to insert this class into the map.
LoadExternalVTableUses();
- Class = cast<CXXRecordDecl>(Class->getCanonicalDecl());
+ Class = Class->getCanonicalDecl();
std::pair<llvm::DenseMap<CXXRecordDecl *, bool>::iterator, bool>
Pos = VTablesUsed.insert(std::make_pair(Class, DefinitionRequired));
if (!Pos.second) {
@@ -14543,7 +14949,7 @@ bool Sema::DefineUsedVTables() {
// vtable for this class is required.
DefinedAnything = true;
MarkVirtualMembersReferenced(Loc, Class);
- CXXRecordDecl *Canonical = cast<CXXRecordDecl>(Class->getCanonicalDecl());
+ CXXRecordDecl *Canonical = Class->getCanonicalDecl();
if (VTablesUsed[Canonical])
Consumer.HandleVTable(Class);
@@ -14666,9 +15072,9 @@ void Sema::SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation) {
static
void DelegatingCycleHelper(CXXConstructorDecl* Ctor,
- llvm::SmallSet<CXXConstructorDecl*, 4> &Valid,
- llvm::SmallSet<CXXConstructorDecl*, 4> &Invalid,
- llvm::SmallSet<CXXConstructorDecl*, 4> &Current,
+ llvm::SmallPtrSet<CXXConstructorDecl*, 4> &Valid,
+ llvm::SmallPtrSet<CXXConstructorDecl*, 4> &Invalid,
+ llvm::SmallPtrSet<CXXConstructorDecl*, 4> &Current,
Sema &S) {
if (Ctor->isInvalidDecl())
return;
@@ -14730,7 +15136,7 @@ void DelegatingCycleHelper(CXXConstructorDecl* Ctor,
void Sema::CheckDelegatingCtorCycles() {
- llvm::SmallSet<CXXConstructorDecl*, 4> Valid, Invalid, Current;
+ llvm::SmallPtrSet<CXXConstructorDecl*, 4> Valid, Invalid, Current;
for (DelegatingCtorDeclsType::iterator
I = DelegatingCtorDecls.begin(ExternalSource),
@@ -14738,14 +15144,12 @@ void Sema::CheckDelegatingCtorCycles() {
I != E; ++I)
DelegatingCycleHelper(*I, Valid, Invalid, Current, *this);
- for (llvm::SmallSet<CXXConstructorDecl *, 4>::iterator CI = Invalid.begin(),
- CE = Invalid.end();
- CI != CE; ++CI)
+ for (auto CI = Invalid.begin(), CE = Invalid.end(); CI != CE; ++CI)
(*CI)->setInvalidDecl();
}
namespace {
- /// \brief AST visitor that finds references to the 'this' expression.
+ /// AST visitor that finds references to the 'this' expression.
class FindCXXThisExpr : public RecursiveASTVisitor<FindCXXThisExpr> {
Sema &S;
@@ -14815,7 +15219,9 @@ bool Sema::checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method) {
case EST_None:
break;
- case EST_ComputedNoexcept:
+ case EST_DependentNoexcept:
+ case EST_NoexceptFalse:
+ case EST_NoexceptTrue:
if (!Finder.TraverseStmt(Proto->getNoexceptExpr()))
return true;
LLVM_FALLTHROUGH;
@@ -14912,31 +15318,17 @@ void Sema::checkExceptionSpecification(
return;
}
- if (EST == EST_ComputedNoexcept) {
- // If an error occurred, there's no expression here.
- if (NoexceptExpr) {
- assert((NoexceptExpr->isTypeDependent() ||
- NoexceptExpr->getType()->getCanonicalTypeUnqualified() ==
- Context.BoolTy) &&
- "Parser should have made sure that the expression is boolean");
- if (IsTopLevel && NoexceptExpr &&
- DiagnoseUnexpandedParameterPack(NoexceptExpr)) {
- ESI.Type = EST_BasicNoexcept;
- return;
- }
-
- if (!NoexceptExpr->isValueDependent()) {
- ExprResult Result = VerifyIntegerConstantExpression(
- NoexceptExpr, nullptr, diag::err_noexcept_needs_constant_expression,
- /*AllowFold*/ false);
- if (Result.isInvalid()) {
- ESI.Type = EST_BasicNoexcept;
- return;
- }
- NoexceptExpr = Result.get();
- }
- ESI.NoexceptExpr = NoexceptExpr;
+ if (isComputedNoexcept(EST)) {
+ assert((NoexceptExpr->isTypeDependent() ||
+ NoexceptExpr->getType()->getCanonicalTypeUnqualified() ==
+ Context.BoolTy) &&
+ "Parser should have made sure that the expression is boolean");
+ if (IsTopLevel && DiagnoseUnexpandedParameterPack(NoexceptExpr)) {
+ ESI.Type = EST_BasicNoexcept;
+ return;
}
+
+ ESI.NoexceptExpr = NoexceptExpr;
return;
}
}
@@ -14981,11 +15373,11 @@ void Sema::actOnDelayedExceptionSpecification(Decl *MethodD,
/// HandleMSProperty - Analyze a __delcspec(property) field of a C++ class.
///
MSPropertyDecl *Sema::HandleMSProperty(Scope *S, RecordDecl *Record,
- SourceLocation DeclStart,
- Declarator &D, Expr *BitWidth,
+ SourceLocation DeclStart, Declarator &D,
+ Expr *BitWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS,
- AttributeList *MSPropertyAttr) {
+ const ParsedAttr &MSPropertyAttr) {
IdentifierInfo *II = D.getIdentifier();
if (!II) {
Diag(DeclStart, diag::err_anonymous_property);
@@ -15048,7 +15440,7 @@ MSPropertyDecl *Sema::HandleMSProperty(Scope *S, RecordDecl *Record,
PrevDecl = nullptr;
SourceLocation TSSL = D.getLocStart();
- const AttributeList::PropertyData &Data = MSPropertyAttr->getPropertyData();
+ const ParsedAttr::PropertyData &Data = MSPropertyAttr.getPropertyData();
MSPropertyDecl *NewPD = MSPropertyDecl::Create(
Context, Record, Loc, II, T, TInfo, TSSL, Data.GetterId, Data.SetterId);
ProcessDeclAttributes(TUScope, NewPD, D);
diff --git a/lib/Sema/SemaDeclObjC.cpp b/lib/Sema/SemaDeclObjC.cpp
index abbdc9574b8c..e1b033ea8282 100644
--- a/lib/Sema/SemaDeclObjC.cpp
+++ b/lib/Sema/SemaDeclObjC.cpp
@@ -156,23 +156,23 @@ void Sema::CheckObjCMethodOverride(ObjCMethodDecl *NewMethod,
Diag(Overridden->getLocation(),
diag::note_related_result_type_overridden);
}
- if (getLangOpts().ObjCAutoRefCount) {
- Diags.setSeverity(diag::warn_nsreturns_retained_attribute_mismatch,
- diag::Severity::Error, SourceLocation());
- Diags.setSeverity(diag::warn_nsconsumed_attribute_mismatch,
- diag::Severity::Error, SourceLocation());
- }
if ((NewMethod->hasAttr<NSReturnsRetainedAttr>() !=
Overridden->hasAttr<NSReturnsRetainedAttr>())) {
Diag(NewMethod->getLocation(),
- diag::warn_nsreturns_retained_attribute_mismatch) << 1;
+ getLangOpts().ObjCAutoRefCount
+ ? diag::err_nsreturns_retained_attribute_mismatch
+ : diag::warn_nsreturns_retained_attribute_mismatch)
+ << 1;
Diag(Overridden->getLocation(), diag::note_previous_decl) << "method";
}
if ((NewMethod->hasAttr<NSReturnsNotRetainedAttr>() !=
Overridden->hasAttr<NSReturnsNotRetainedAttr>())) {
Diag(NewMethod->getLocation(),
- diag::warn_nsreturns_retained_attribute_mismatch) << 0;
+ getLangOpts().ObjCAutoRefCount
+ ? diag::err_nsreturns_retained_attribute_mismatch
+ : diag::warn_nsreturns_retained_attribute_mismatch)
+ << 0;
Diag(Overridden->getLocation(), diag::note_previous_decl) << "method";
}
@@ -185,7 +185,10 @@ void Sema::CheckObjCMethodOverride(ObjCMethodDecl *NewMethod,
ParmVarDecl *newDecl = (*ni);
if (newDecl->hasAttr<NSConsumedAttr>() !=
oldDecl->hasAttr<NSConsumedAttr>()) {
- Diag(newDecl->getLocation(), diag::warn_nsconsumed_attribute_mismatch);
+ Diag(newDecl->getLocation(),
+ getLangOpts().ObjCAutoRefCount
+ ? diag::err_nsconsumed_attribute_mismatch
+ : diag::warn_nsconsumed_attribute_mismatch);
Diag(oldDecl->getLocation(), diag::note_previous_decl) << "parameter";
}
@@ -199,7 +202,7 @@ void Sema::CheckObjCMethodOverride(ObjCMethodDecl *NewMethod,
}
}
-/// \brief Check a method declaration for compatibility with the Objective-C
+/// Check a method declaration for compatibility with the Objective-C
/// ARC conventions.
bool Sema::CheckARCMethodDecl(ObjCMethodDecl *method) {
ObjCMethodFamily family = method->getMethodFamily();
@@ -263,12 +266,20 @@ static void DiagnoseObjCImplementedDeprecations(Sema &S, const NamedDecl *ND,
if (!ND)
return;
bool IsCategory = false;
- AvailabilityResult Availability = ND->getAvailability();
+ StringRef RealizedPlatform;
+ AvailabilityResult Availability = ND->getAvailability(
+ /*Message=*/nullptr, /*EnclosingVersion=*/VersionTuple(),
+ &RealizedPlatform);
if (Availability != AR_Deprecated) {
if (isa<ObjCMethodDecl>(ND)) {
if (Availability != AR_Unavailable)
return;
- // Warn about implementing unavailable methods.
+ if (RealizedPlatform.empty())
+ RealizedPlatform = S.Context.getTargetInfo().getPlatformName();
+ // Warn about implementing unavailable methods, unless the unavailable
+ // is for an app extension.
+ if (RealizedPlatform.endswith("_app_extension"))
+ return;
S.Diag(ImplLoc, diag::warn_unavailable_def);
S.Diag(ND->getLocation(), diag::note_method_declared_at)
<< ND->getDeclName();
@@ -338,6 +349,13 @@ void Sema::ActOnStartOfObjCMethodDef(Scope *FnBodyScope, Decl *D) {
if (!MDecl)
return;
+ QualType ResultType = MDecl->getReturnType();
+ if (!ResultType->isDependentType() && !ResultType->isVoidType() &&
+ !MDecl->isInvalidDecl() &&
+ RequireCompleteType(MDecl->getLocation(), ResultType,
+ diag::err_func_def_incomplete_result))
+ MDecl->setInvalidDecl();
+
// Allow all of Sema to see that we are entering a method definition.
PushDeclContext(FnBodyScope, MDecl);
PushFunctionScope();
@@ -930,16 +948,14 @@ static bool checkTypeParamListConsistency(Sema &S,
return false;
}
-Decl *Sema::
-ActOnStartClassInterface(Scope *S, SourceLocation AtInterfaceLoc,
- IdentifierInfo *ClassName, SourceLocation ClassLoc,
- ObjCTypeParamList *typeParamList,
- IdentifierInfo *SuperName, SourceLocation SuperLoc,
- ArrayRef<ParsedType> SuperTypeArgs,
- SourceRange SuperTypeArgsRange,
- Decl * const *ProtoRefs, unsigned NumProtoRefs,
- const SourceLocation *ProtoLocs,
- SourceLocation EndProtoLoc, AttributeList *AttrList) {
+Decl *Sema::ActOnStartClassInterface(
+ Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
+ SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
+ IdentifierInfo *SuperName, SourceLocation SuperLoc,
+ ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange,
+ Decl *const *ProtoRefs, unsigned NumProtoRefs,
+ const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
+ const ParsedAttributesView &AttrList) {
assert(ClassName && "Missing class identifier");
// Check for another declaration kind with the same name.
@@ -1024,9 +1040,8 @@ ActOnStartClassInterface(Scope *S, SourceLocation AtInterfaceLoc,
IDecl->setInvalidDecl();
}
}
-
- if (AttrList)
- ProcessDeclAttributeList(TUScope, IDecl, AttrList);
+
+ ProcessDeclAttributeList(TUScope, IDecl, AttrList);
AddPragmaAttributes(TUScope, IDecl);
PushOnScopeChains(IDecl, TUScope);
@@ -1165,15 +1180,11 @@ bool Sema::CheckForwardProtocolDeclarationForCircularDependency(
return res;
}
-Decl *
-Sema::ActOnStartProtocolInterface(SourceLocation AtProtoInterfaceLoc,
- IdentifierInfo *ProtocolName,
- SourceLocation ProtocolLoc,
- Decl * const *ProtoRefs,
- unsigned NumProtoRefs,
- const SourceLocation *ProtoLocs,
- SourceLocation EndProtoLoc,
- AttributeList *AttrList) {
+Decl *Sema::ActOnStartProtocolInterface(
+ SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName,
+ SourceLocation ProtocolLoc, Decl *const *ProtoRefs, unsigned NumProtoRefs,
+ const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
+ const ParsedAttributesView &AttrList) {
bool err = false;
// FIXME: Deal with AttrList.
assert(ProtocolName && "Missing protocol identifier");
@@ -1192,6 +1203,11 @@ Sema::ActOnStartProtocolInterface(SourceLocation AtProtoInterfaceLoc,
PDecl = ObjCProtocolDecl::Create(Context, CurContext, ProtocolName,
ProtocolLoc, AtProtoInterfaceLoc,
/*PrevDecl=*/nullptr);
+
+ // If we are using modules, add the decl to the context in order to
+ // serialize something meaningful.
+ if (getLangOpts().Modules)
+ PushOnScopeChains(PDecl, TUScope);
PDecl->startDefinition();
} else {
if (PrevDecl) {
@@ -1211,9 +1227,8 @@ Sema::ActOnStartProtocolInterface(SourceLocation AtProtoInterfaceLoc,
PushOnScopeChains(PDecl, TUScope);
PDecl->startDefinition();
}
-
- if (AttrList)
- ProcessDeclAttributeList(TUScope, PDecl, AttrList);
+
+ ProcessDeclAttributeList(TUScope, PDecl, AttrList);
AddPragmaAttributes(TUScope, PDecl);
// Merge attributes from previous declarations.
@@ -1538,20 +1553,18 @@ void Sema::actOnObjCTypeArgsOrProtocolQualifiers(
DS.SetRangeEnd(loc);
// Form the declarator.
- Declarator D(DS, Declarator::TypeNameContext);
+ Declarator D(DS, DeclaratorContext::TypeNameContext);
// If we have a typedef of an Objective-C class type that is missing a '*',
// add the '*'.
if (type->getAs<ObjCInterfaceType>()) {
SourceLocation starLoc = getLocForEndOfToken(loc);
- ParsedAttributes parsedAttrs(attrFactory);
D.AddTypeInfo(DeclaratorChunk::getPointer(/*typeQuals=*/0, starLoc,
SourceLocation(),
SourceLocation(),
SourceLocation(),
SourceLocation(),
SourceLocation()),
- parsedAttrs,
starLoc);
// Diagnose the missing '*'.
@@ -1729,7 +1742,7 @@ void Sema::DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT,
Sema::DeclGroupPtrTy
Sema::ActOnForwardProtocolDeclaration(SourceLocation AtProtocolLoc,
ArrayRef<IdentifierLocPair> IdentList,
- AttributeList *attrList) {
+ const ParsedAttributesView &attrList) {
SmallVector<Decl *, 8> DeclsInGroup;
for (const IdentifierLocPair &IdentPair : IdentList) {
IdentifierInfo *Ident = IdentPair.first;
@@ -1742,9 +1755,8 @@ Sema::ActOnForwardProtocolDeclaration(SourceLocation AtProtocolLoc,
PushOnScopeChains(PDecl, TUScope);
CheckObjCDeclScope(PDecl);
-
- if (attrList)
- ProcessDeclAttributeList(TUScope, PDecl, attrList);
+
+ ProcessDeclAttributeList(TUScope, PDecl, attrList);
AddPragmaAttributes(TUScope, PDecl);
if (PrevDecl)
@@ -1756,17 +1768,13 @@ Sema::ActOnForwardProtocolDeclaration(SourceLocation AtProtocolLoc,
return BuildDeclaratorGroup(DeclsInGroup);
}
-Decl *Sema::
-ActOnStartCategoryInterface(SourceLocation AtInterfaceLoc,
- IdentifierInfo *ClassName, SourceLocation ClassLoc,
- ObjCTypeParamList *typeParamList,
- IdentifierInfo *CategoryName,
- SourceLocation CategoryLoc,
- Decl * const *ProtoRefs,
- unsigned NumProtoRefs,
- const SourceLocation *ProtoLocs,
- SourceLocation EndProtoLoc,
- AttributeList *AttrList) {
+Decl *Sema::ActOnStartCategoryInterface(
+ SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
+ SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
+ IdentifierInfo *CategoryName, SourceLocation CategoryLoc,
+ Decl *const *ProtoRefs, unsigned NumProtoRefs,
+ const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
+ const ParsedAttributesView &AttrList) {
ObjCCategoryDecl *CDecl;
ObjCInterfaceDecl *IDecl = getObjCInterfaceDecl(ClassName, ClassLoc, true);
@@ -1832,6 +1840,12 @@ ActOnStartCategoryInterface(SourceLocation AtInterfaceLoc,
// FIXME: PushOnScopeChains?
CurContext->addDecl(CDecl);
+ // Process the attributes before looking at protocols to ensure that the
+ // availability attribute is attached to the category to provide availability
+ // checking for protocol uses.
+ ProcessDeclAttributeList(TUScope, CDecl, AttrList);
+ AddPragmaAttributes(TUScope, CDecl);
+
if (NumProtoRefs) {
diagnoseUseOfProtocols(*this, CDecl, (ObjCProtocolDecl*const*)ProtoRefs,
NumProtoRefs, ProtoLocs);
@@ -1843,10 +1857,6 @@ ActOnStartCategoryInterface(SourceLocation AtInterfaceLoc,
NumProtoRefs, Context);
}
- if (AttrList)
- ProcessDeclAttributeList(TUScope, CDecl, AttrList);
- AddPragmaAttributes(TUScope, CDecl);
-
CheckObjCDeclScope(CDecl);
return ActOnObjCContainerStartDefinition(CDecl);
}
@@ -2162,17 +2172,9 @@ static void WarnUndefinedMethod(Sema &S, SourceLocation ImpLoc,
unsigned DiagID,
NamedDecl *NeededFor = nullptr) {
// No point warning no definition of method which is 'unavailable'.
- switch (method->getAvailability()) {
- case AR_Available:
- case AR_Deprecated:
- break;
-
- // Don't warn about unavailable or not-yet-introduced methods.
- case AR_NotYetIntroduced:
- case AR_Unavailable:
+ if (method->getAvailability() == AR_Unavailable)
return;
- }
-
+
// FIXME: For now ignore 'IncompleteImpl'.
// Previously we grouped all unimplemented methods under a single
// warning, but some users strongly voiced that they would prefer
@@ -2717,7 +2719,7 @@ static void CheckProtocolMethodDefs(Sema &S,
// This is because method will be implemented in the primary class
// or one of its super class implementation.
- // Ugly, but necessary. Method declared in protcol might have
+ // Ugly, but necessary. Method declared in protocol might have
// have been synthesized due to a property declared in the class which
// uses the protocol.
if (ObjCMethodDecl *MethodInClass =
@@ -3345,7 +3347,7 @@ void Sema::addMethodToGlobalList(ObjCMethodList *List,
Previous->setNext(new (Mem) ObjCMethodList(Method));
}
-/// \brief Read the contents of the method pool for a given selector from
+/// Read the contents of the method pool for a given selector from
/// external storage.
void Sema::ReadMethodPool(Selector Sel) {
assert(ExternalSource && "We need an external AST source");
@@ -3427,7 +3429,7 @@ static bool FilterMethodsByTypeBound(ObjCMethodDecl *Method,
MethodInterface->isSuperClassOf(BoundInterface) ||
BoundInterface->isSuperClassOf(MethodInterface);
}
- llvm_unreachable("unknow method context");
+ llvm_unreachable("unknown method context");
}
/// We first select the type of the method: Instance or Factory, then collect
@@ -3859,9 +3861,9 @@ Decl *Sema::ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods,
assert(AtEnd.isValid() && "Invalid location for '@end'");
- ObjCContainerDecl *OCD = dyn_cast<ObjCContainerDecl>(CurContext);
- Decl *ClassDecl = cast<Decl>(OCD);
-
+ auto *OCD = cast<ObjCContainerDecl>(CurContext);
+ Decl *ClassDecl = OCD;
+
bool isInterfaceDeclKind =
isa<ObjCInterfaceDecl>(ClassDecl) || isa<ObjCCategoryDecl>(ClassDecl)
|| isa<ObjCProtocolDecl>(ClassDecl);
@@ -4084,7 +4086,7 @@ CvtQTToAstBitMask(ObjCDeclSpec::ObjCDeclQualifier PQTVal) {
return (Decl::ObjCDeclQualifier) (unsigned) PQTVal;
}
-/// \brief Check whether the declared result type of the given Objective-C
+/// Check whether the declared result type of the given Objective-C
/// method declaration is compatible with the method's class.
///
static Sema::ResultTypeCompatibilityKind
@@ -4130,7 +4132,7 @@ class OverrideSearch {
public:
Sema &S;
ObjCMethodDecl *Method;
- llvm::SmallPtrSet<ObjCMethodDecl*, 4> Overridden;
+ llvm::SmallSetVector<ObjCMethodDecl*, 4> Overridden;
bool Recursive;
public:
@@ -4167,7 +4169,7 @@ public:
}
}
- typedef llvm::SmallPtrSetImpl<ObjCMethodDecl*>::iterator iterator;
+ typedef decltype(Overridden)::iterator iterator;
iterator begin() const { return Overridden.begin(); }
iterator end() const { return Overridden.end(); }
@@ -4335,10 +4337,6 @@ void Sema::CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod,
// Then merge the declarations.
mergeObjCMethodDecls(ObjCMethod, overridden);
- }
-
- for (ObjCMethodDecl *overridden : overrides) {
- CheckObjCMethodOverride(ObjCMethod, overridden);
if (ObjCMethod->isImplicit() && overridden->isImplicit())
continue; // Conflicting properties are detected elsewhere.
@@ -4502,25 +4500,21 @@ static void checkObjCMethodX86VectorTypes(Sema &SemaRef,
}
Decl *Sema::ActOnMethodDeclaration(
- Scope *S,
- SourceLocation MethodLoc, SourceLocation EndLoc,
- tok::TokenKind MethodType,
- ObjCDeclSpec &ReturnQT, ParsedType ReturnType,
- ArrayRef<SourceLocation> SelectorLocs,
- Selector Sel,
+ Scope *S, SourceLocation MethodLoc, SourceLocation EndLoc,
+ tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType,
+ ArrayRef<SourceLocation> SelectorLocs, Selector Sel,
// optional arguments. The number of types/arguments is obtained
// from the Sel.getNumArgs().
- ObjCArgInfo *ArgInfo,
- DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args
- AttributeList *AttrList, tok::ObjCKeywordKind MethodDeclKind,
+ ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo,
+ unsigned CNumArgs, // c-style args
+ const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodDeclKind,
bool isVariadic, bool MethodDefinition) {
// Make sure we can establish a context for the method.
if (!CurContext->isObjCContainer()) {
Diag(MethodLoc, diag::err_missing_method_context);
return nullptr;
}
- ObjCContainerDecl *OCD = dyn_cast<ObjCContainerDecl>(CurContext);
- Decl *ClassDecl = cast<Decl>(OCD);
+ Decl *ClassDecl = cast<ObjCContainerDecl>(CurContext);
QualType resultDeclType;
bool HasRelatedResultType = false;
@@ -4621,8 +4615,7 @@ Decl *Sema::ActOnMethodDeclaration(
ObjCMethod->setObjCDeclQualifier(
CvtQTToAstBitMask(ReturnQT.getObjCDeclQualifier()));
- if (AttrList)
- ProcessDeclAttributeList(TUScope, ObjCMethod, AttrList);
+ ProcessDeclAttributeList(TUScope, ObjCMethod, AttrList);
AddPragmaAttributes(TUScope, ObjCMethod);
// Add the method now.
@@ -4726,6 +4719,17 @@ Decl *Sema::ActOnMethodDeclaration(
Context.getTargetInfo().getTriple().getArch() == llvm::Triple::x86)
checkObjCMethodX86VectorTypes(*this, ObjCMethod);
+ // + load method cannot have availability attributes. It get called on
+ // startup, so it has to have the availability of the deployment target.
+ if (const auto *attr = ObjCMethod->getAttr<AvailabilityAttr>()) {
+ if (ObjCMethod->isClassMethod() &&
+ ObjCMethod->getSelector().getAsString() == "load") {
+ Diag(attr->getLocation(), diag::warn_availability_on_static_initializer)
+ << 0;
+ ObjCMethod->dropAttr<AvailabilityAttr>();
+ }
+ }
+
ActOnDocumentableDecl(ObjCMethod);
return ObjCMethod;
@@ -4769,7 +4773,7 @@ void Sema::ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart,
Context.DeepCollectObjCIvars(Class, true, Ivars);
// For each ivar, create a fresh ObjCAtDefsFieldDecl.
for (unsigned i = 0; i < Ivars.size(); i++) {
- const FieldDecl* ID = cast<FieldDecl>(Ivars[i]);
+ const FieldDecl* ID = Ivars[i];
RecordDecl *Record = dyn_cast<RecordDecl>(TagD);
Decl *FD = ObjCAtDefsFieldDecl::Create(Context, Record,
/*FIXME: StartL=*/ID->getLocation(),
@@ -4784,13 +4788,13 @@ void Sema::ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart,
D != Decls.end(); ++D) {
FieldDecl *FD = cast<FieldDecl>(*D);
if (getLangOpts().CPlusPlus)
- PushOnScopeChains(cast<FieldDecl>(FD), S);
+ PushOnScopeChains(FD, S);
else if (RecordDecl *Record = dyn_cast<RecordDecl>(TagD))
Record->addDecl(FD);
}
}
-/// \brief Build a type-check a new Objective-C exception variable declaration.
+/// Build a type-check a new Objective-C exception variable declaration.
VarDecl *Sema::BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType T,
SourceLocation StartLoc,
SourceLocation IdLoc,
@@ -4811,12 +4815,17 @@ VarDecl *Sema::BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType T,
// Don't do any further checking.
} else if (T->isDependentType()) {
// Okay: we don't know what this type will instantiate to.
- } else if (!T->isObjCObjectPointerType()) {
- Invalid = true;
- Diag(IdLoc ,diag::err_catch_param_not_objc_type);
} else if (T->isObjCQualifiedIdType()) {
Invalid = true;
Diag(IdLoc, diag::err_illegal_qualifiers_on_catch_parm);
+ } else if (T->isObjCIdType()) {
+ // Okay: we don't know what this type will instantiate to.
+ } else if (!T->isObjCObjectPointerType()) {
+ Invalid = true;
+ Diag(IdLoc, diag::err_catch_param_not_objc_type);
+ } else if (!T->getAs<ObjCObjectPointerType>()->getInterfaceType()) {
+ Invalid = true;
+ Diag(IdLoc, diag::err_catch_param_not_objc_type);
}
VarDecl *New = VarDecl::Create(Context, CurContext, StartLoc, IdLoc, Id,
diff --git a/lib/Sema/SemaExceptionSpec.cpp b/lib/Sema/SemaExceptionSpec.cpp
index 67d1b02d1fca..df5bc9b82b96 100644
--- a/lib/Sema/SemaExceptionSpec.cpp
+++ b/lib/Sema/SemaExceptionSpec.cpp
@@ -76,6 +76,29 @@ bool Sema::isLibstdcxxEagerExceptionSpecHack(const Declarator &D) {
.Default(false);
}
+ExprResult Sema::ActOnNoexceptSpec(SourceLocation NoexceptLoc,
+ Expr *NoexceptExpr,
+ ExceptionSpecificationType &EST) {
+ // FIXME: This is bogus, a noexcept expression is not a condition.
+ ExprResult Converted = CheckBooleanCondition(NoexceptLoc, NoexceptExpr);
+ if (Converted.isInvalid())
+ return Converted;
+
+ if (Converted.get()->isValueDependent()) {
+ EST = EST_DependentNoexcept;
+ return Converted;
+ }
+
+ llvm::APSInt Result;
+ Converted = VerifyIntegerConstantExpression(
+ Converted.get(), &Result,
+ diag::err_noexcept_needs_constant_expression,
+ /*AllowFold*/ false);
+ if (!Converted.isInvalid())
+ EST = !Result ? EST_NoexceptFalse : EST_NoexceptTrue;
+ return Converted;
+}
+
/// CheckSpecifiedExceptionType - Check if the given type is valid in an
/// exception specification. Incomplete types, or pointers to incomplete types
/// other than void are not allowed.
@@ -203,8 +226,8 @@ Sema::UpdateExceptionSpec(FunctionDecl *FD,
if (auto *Listener = getASTMutationListener())
Listener->ResolvedExceptionSpec(FD);
- for (auto *Redecl : FD->redecls())
- Context.adjustExceptionSpec(cast<FunctionDecl>(Redecl), ESI);
+ for (FunctionDecl *Redecl : FD->redecls())
+ Context.adjustExceptionSpec(Redecl, ESI);
}
static bool CheckEquivalentExceptionSpecImpl(
@@ -309,13 +332,19 @@ bool Sema::CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New) {
FunctionProtoType::ExceptionSpecInfo ESI = OldProto->getExceptionSpecType();
if (ESI.Type == EST_Dynamic) {
+ // FIXME: What if the exceptions are described in terms of the old
+ // prototype's parameters?
ESI.Exceptions = OldProto->exceptions();
}
- if (ESI.Type == EST_ComputedNoexcept) {
- // For computed noexcept, we can't just take the expression from the old
- // prototype. It likely contains references to the old prototype's
- // parameters.
+ if (ESI.Type == EST_NoexceptFalse)
+ ESI.Type = EST_None;
+ if (ESI.Type == EST_NoexceptTrue)
+ ESI.Type = EST_BasicNoexcept;
+
+ // For dependent noexcept, we can't just take the expression from the old
+ // prototype. It likely contains references to the old prototype's parameters.
+ if (ESI.Type == EST_DependentNoexcept) {
New->setInvalidDecl();
} else {
// Update the type of the function with the appropriate exception
@@ -325,12 +354,12 @@ bool Sema::CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New) {
NewProto->getExtProtoInfo().withExceptionSpec(ESI)));
}
- if (getLangOpts().MicrosoftExt && ESI.Type != EST_ComputedNoexcept) {
+ if (getLangOpts().MicrosoftExt && ESI.Type != EST_DependentNoexcept) {
// Allow missing exception specifications in redeclarations as an extension.
DiagID = diag::ext_ms_missing_exception_specification;
ReturnValueOnError = false;
} else if (New->isReplaceableGlobalAllocationFunction() &&
- ESI.Type != EST_ComputedNoexcept) {
+ ESI.Type != EST_DependentNoexcept) {
// Allow missing exception specifications in redeclarations as an extension,
// when declaring a replaceable global allocation function.
DiagID = diag::ext_missing_exception_specification;
@@ -367,7 +396,9 @@ bool Sema::CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New) {
OS << "noexcept";
break;
- case EST_ComputedNoexcept:
+ case EST_DependentNoexcept:
+ case EST_NoexceptFalse:
+ case EST_NoexceptTrue:
OS << "noexcept(";
assert(OldProto->getNoexceptExpr() != nullptr && "Expected non-null Expr");
OldProto->getNoexceptExpr()->printPretty(OS, nullptr, getPrintingPolicy());
@@ -478,63 +509,62 @@ static bool CheckEquivalentExceptionSpecImpl(
!isUnresolvedExceptionSpec(NewEST) &&
"Shouldn't see unknown exception specifications here");
- // Shortcut the case where both have no spec.
- if (OldEST == EST_None && NewEST == EST_None)
- return false;
+ CanThrowResult OldCanThrow = Old->canThrow();
+ CanThrowResult NewCanThrow = New->canThrow();
- FunctionProtoType::NoexceptResult OldNR = Old->getNoexceptSpec(S.Context);
- FunctionProtoType::NoexceptResult NewNR = New->getNoexceptSpec(S.Context);
- if (OldNR == FunctionProtoType::NR_BadNoexcept ||
- NewNR == FunctionProtoType::NR_BadNoexcept)
+ // Any non-throwing specifications are compatible.
+ if (OldCanThrow == CT_Cannot && NewCanThrow == CT_Cannot)
return false;
- // Dependent noexcept specifiers are compatible with each other, but nothing
- // else.
- // One noexcept is compatible with another if the argument is the same
- if (OldNR == NewNR &&
- OldNR != FunctionProtoType::NR_NoNoexcept &&
- NewNR != FunctionProtoType::NR_NoNoexcept)
- return false;
- if (OldNR != NewNR &&
- OldNR != FunctionProtoType::NR_NoNoexcept &&
- NewNR != FunctionProtoType::NR_NoNoexcept) {
- S.Diag(NewLoc, DiagID);
- if (NoteID.getDiagID() != 0 && OldLoc.isValid())
- S.Diag(OldLoc, NoteID);
- return true;
+ // Any throws-anything specifications are usually compatible.
+ if (OldCanThrow == CT_Can && OldEST != EST_Dynamic &&
+ NewCanThrow == CT_Can && NewEST != EST_Dynamic) {
+ // The exception is that the absence of an exception specification only
+ // matches noexcept(false) for functions, as described above.
+ if (!AllowNoexceptAllMatchWithNoSpec &&
+ ((OldEST == EST_None && NewEST == EST_NoexceptFalse) ||
+ (OldEST == EST_NoexceptFalse && NewEST == EST_None))) {
+ // This is the disallowed case.
+ } else {
+ return false;
+ }
}
- // The MS extension throw(...) is compatible with itself.
- if (OldEST == EST_MSAny && NewEST == EST_MSAny)
- return false;
-
- // It's also compatible with no spec.
- if ((OldEST == EST_None && NewEST == EST_MSAny) ||
- (OldEST == EST_MSAny && NewEST == EST_None))
- return false;
+ // C++14 [except.spec]p3:
+ // Two exception-specifications are compatible if [...] both have the form
+ // noexcept(constant-expression) and the constant-expressions are equivalent
+ if (OldEST == EST_DependentNoexcept && NewEST == EST_DependentNoexcept) {
+ llvm::FoldingSetNodeID OldFSN, NewFSN;
+ Old->getNoexceptExpr()->Profile(OldFSN, S.Context, true);
+ New->getNoexceptExpr()->Profile(NewFSN, S.Context, true);
+ if (OldFSN == NewFSN)
+ return false;
+ }
- // It's also compatible with noexcept(false).
- if (OldEST == EST_MSAny && NewNR == FunctionProtoType::NR_Throw)
- return false;
- if (NewEST == EST_MSAny && OldNR == FunctionProtoType::NR_Throw)
- return false;
+ // Dynamic exception specifications with the same set of adjusted types
+ // are compatible.
+ if (OldEST == EST_Dynamic && NewEST == EST_Dynamic) {
+ bool Success = true;
+ // Both have a dynamic exception spec. Collect the first set, then compare
+ // to the second.
+ llvm::SmallPtrSet<CanQualType, 8> OldTypes, NewTypes;
+ for (const auto &I : Old->exceptions())
+ OldTypes.insert(S.Context.getCanonicalType(I).getUnqualifiedType());
+
+ for (const auto &I : New->exceptions()) {
+ CanQualType TypePtr = S.Context.getCanonicalType(I).getUnqualifiedType();
+ if (OldTypes.count(TypePtr))
+ NewTypes.insert(TypePtr);
+ else {
+ Success = false;
+ break;
+ }
+ }
- // As described above, noexcept(false) matches no spec only for functions.
- if (AllowNoexceptAllMatchWithNoSpec) {
- if (OldEST == EST_None && NewNR == FunctionProtoType::NR_Throw)
- return false;
- if (NewEST == EST_None && OldNR == FunctionProtoType::NR_Throw)
+ if (Success && OldTypes.size() == NewTypes.size())
return false;
}
- // Any non-throwing specifications are compatible.
- bool OldNonThrowing = OldNR == FunctionProtoType::NR_Nothrow ||
- OldEST == EST_DynamicNone;
- bool NewNonThrowing = NewNR == FunctionProtoType::NR_Nothrow ||
- NewEST == EST_DynamicNone;
- if (OldNonThrowing && NewNonThrowing)
- return false;
-
// As a special compatibility feature, under C++0x we accept no spec and
// throw(std::bad_alloc) as equivalent for operator new and operator new[].
// This is because the implicit declaration changed, but old code would break.
@@ -560,54 +590,24 @@ static bool CheckEquivalentExceptionSpecImpl(
}
}
- // At this point, the only remaining valid case is two matching dynamic
- // specifications. We return here unless both specifications are dynamic.
- if (OldEST != EST_Dynamic || NewEST != EST_Dynamic) {
- if (MissingExceptionSpecification && Old->hasExceptionSpec() &&
- !New->hasExceptionSpec()) {
- // The old type has an exception specification of some sort, but
- // the new type does not.
- *MissingExceptionSpecification = true;
-
- if (MissingEmptyExceptionSpecification && OldNonThrowing) {
- // The old type has a throw() or noexcept(true) exception specification
- // and the new type has no exception specification, and the caller asked
- // to handle this itself.
- *MissingEmptyExceptionSpecification = true;
- }
-
- return true;
+ // If the caller wants to handle the case that the new function is
+ // incompatible due to a missing exception specification, let it.
+ if (MissingExceptionSpecification && OldEST != EST_None &&
+ NewEST == EST_None) {
+ // The old type has an exception specification of some sort, but
+ // the new type does not.
+ *MissingExceptionSpecification = true;
+
+ if (MissingEmptyExceptionSpecification && OldCanThrow == CT_Cannot) {
+ // The old type has a throw() or noexcept(true) exception specification
+ // and the new type has no exception specification, and the caller asked
+ // to handle this itself.
+ *MissingEmptyExceptionSpecification = true;
}
- S.Diag(NewLoc, DiagID);
- if (NoteID.getDiagID() != 0 && OldLoc.isValid())
- S.Diag(OldLoc, NoteID);
return true;
}
- assert(OldEST == EST_Dynamic && NewEST == EST_Dynamic &&
- "Exception compatibility logic error: non-dynamic spec slipped through.");
-
- bool Success = true;
- // Both have a dynamic exception spec. Collect the first set, then compare
- // to the second.
- llvm::SmallPtrSet<CanQualType, 8> OldTypes, NewTypes;
- for (const auto &I : Old->exceptions())
- OldTypes.insert(S.Context.getCanonicalType(I).getUnqualifiedType());
-
- for (const auto &I : New->exceptions()) {
- CanQualType TypePtr = S.Context.getCanonicalType(I).getUnqualifiedType();
- if (OldTypes.count(TypePtr))
- NewTypes.insert(TypePtr);
- else
- Success = false;
- }
-
- Success = Success && OldTypes.size() == NewTypes.size();
-
- if (Success) {
- return false;
- }
S.Diag(NewLoc, DiagID);
if (NoteID.getDiagID() != 0 && OldLoc.isValid())
S.Diag(OldLoc, NoteID);
@@ -626,6 +626,90 @@ bool Sema::CheckEquivalentExceptionSpec(const PartialDiagnostic &DiagID,
New, NewLoc);
}
+bool Sema::handlerCanCatch(QualType HandlerType, QualType ExceptionType) {
+ // [except.handle]p3:
+ // A handler is a match for an exception object of type E if:
+
+ // HandlerType must be ExceptionType or derived from it, or pointer or
+ // reference to such types.
+ const ReferenceType *RefTy = HandlerType->getAs<ReferenceType>();
+ if (RefTy)
+ HandlerType = RefTy->getPointeeType();
+
+ // -- the handler is of type cv T or cv T& and E and T are the same type
+ if (Context.hasSameUnqualifiedType(ExceptionType, HandlerType))
+ return true;
+
+ // FIXME: ObjC pointer types?
+ if (HandlerType->isPointerType() || HandlerType->isMemberPointerType()) {
+ if (RefTy && (!HandlerType.isConstQualified() ||
+ HandlerType.isVolatileQualified()))
+ return false;
+
+ // -- the handler is of type cv T or const T& where T is a pointer or
+ // pointer to member type and E is std::nullptr_t
+ if (ExceptionType->isNullPtrType())
+ return true;
+
+ // -- the handler is of type cv T or const T& where T is a pointer or
+ // pointer to member type and E is a pointer or pointer to member type
+ // that can be converted to T by one or more of
+ // -- a qualification conversion
+ // -- a function pointer conversion
+ bool LifetimeConv;
+ QualType Result;
+ // FIXME: Should we treat the exception as catchable if a lifetime
+ // conversion is required?
+ if (IsQualificationConversion(ExceptionType, HandlerType, false,
+ LifetimeConv) ||
+ IsFunctionConversion(ExceptionType, HandlerType, Result))
+ return true;
+
+ // -- a standard pointer conversion [...]
+ if (!ExceptionType->isPointerType() || !HandlerType->isPointerType())
+ return false;
+
+ // Handle the "qualification conversion" portion.
+ Qualifiers EQuals, HQuals;
+ ExceptionType = Context.getUnqualifiedArrayType(
+ ExceptionType->getPointeeType(), EQuals);
+ HandlerType = Context.getUnqualifiedArrayType(
+ HandlerType->getPointeeType(), HQuals);
+ if (!HQuals.compatiblyIncludes(EQuals))
+ return false;
+
+ if (HandlerType->isVoidType() && ExceptionType->isObjectType())
+ return true;
+
+ // The only remaining case is a derived-to-base conversion.
+ }
+
+ // -- the handler is of type cg T or cv T& and T is an unambiguous public
+ // base class of E
+ if (!ExceptionType->isRecordType() || !HandlerType->isRecordType())
+ return false;
+ CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
+ /*DetectVirtual=*/false);
+ if (!IsDerivedFrom(SourceLocation(), ExceptionType, HandlerType, Paths) ||
+ Paths.isAmbiguous(Context.getCanonicalType(HandlerType)))
+ return false;
+
+ // Do this check from a context without privileges.
+ switch (CheckBaseClassAccess(SourceLocation(), HandlerType, ExceptionType,
+ Paths.front(),
+ /*Diagnostic*/ 0,
+ /*ForceCheck*/ true,
+ /*ForceUnprivileged*/ true)) {
+ case AR_accessible: return true;
+ case AR_inaccessible: return false;
+ case AR_dependent:
+ llvm_unreachable("access check dependent for unprivileged context");
+ case AR_delayed:
+ llvm_unreachable("access check delayed in non-declaration");
+ }
+ llvm_unreachable("unexpected access check result");
+}
+
/// CheckExceptionSpecSubset - Check whether the second function type's
/// exception specification is a subset (or equivalent) of the first function
/// type. This is used by override and pointer assignment checks.
@@ -656,62 +740,32 @@ bool Sema::CheckExceptionSpecSubset(const PartialDiagnostic &DiagID,
return false;
ExceptionSpecificationType SuperEST = Superset->getExceptionSpecType();
-
- // If superset contains everything, we're done.
- if (SuperEST == EST_None || SuperEST == EST_MSAny)
- return CheckParamExceptionSpec(NestedDiagID, NoteID, Superset, SuperLoc,
- Subset, SubLoc);
-
- // If there are dependent noexcept specs, assume everything is fine. Unlike
- // with the equivalency check, this is safe in this case, because we don't
- // want to merge declarations. Checks after instantiation will catch any
- // omissions we make here.
- // We also shortcut checking if a noexcept expression was bad.
-
- FunctionProtoType::NoexceptResult SuperNR =Superset->getNoexceptSpec(Context);
- if (SuperNR == FunctionProtoType::NR_BadNoexcept ||
- SuperNR == FunctionProtoType::NR_Dependent)
- return false;
-
- // Another case of the superset containing everything.
- if (SuperNR == FunctionProtoType::NR_Throw)
- return CheckParamExceptionSpec(NestedDiagID, NoteID, Superset, SuperLoc,
- Subset, SubLoc);
-
ExceptionSpecificationType SubEST = Subset->getExceptionSpecType();
-
assert(!isUnresolvedExceptionSpec(SuperEST) &&
!isUnresolvedExceptionSpec(SubEST) &&
"Shouldn't see unknown exception specifications here");
- // It does not. If the subset contains everything, we've failed.
- if (SubEST == EST_None || SubEST == EST_MSAny) {
- Diag(SubLoc, DiagID);
- if (NoteID.getDiagID() != 0)
- Diag(SuperLoc, NoteID);
- return true;
- }
-
- FunctionProtoType::NoexceptResult SubNR = Subset->getNoexceptSpec(Context);
- if (SubNR == FunctionProtoType::NR_BadNoexcept ||
- SubNR == FunctionProtoType::NR_Dependent)
+ // If there are dependent noexcept specs, assume everything is fine. Unlike
+ // with the equivalency check, this is safe in this case, because we don't
+ // want to merge declarations. Checks after instantiation will catch any
+ // omissions we make here.
+ if (SuperEST == EST_DependentNoexcept || SubEST == EST_DependentNoexcept)
return false;
- // Another case of the subset containing everything.
- if (SubNR == FunctionProtoType::NR_Throw) {
- Diag(SubLoc, DiagID);
- if (NoteID.getDiagID() != 0)
- Diag(SuperLoc, NoteID);
- return true;
- }
+ CanThrowResult SuperCanThrow = Superset->canThrow();
+ CanThrowResult SubCanThrow = Subset->canThrow();
- // If the subset contains nothing, we're done.
- if (SubEST == EST_DynamicNone || SubNR == FunctionProtoType::NR_Nothrow)
+ // If the superset contains everything or the subset contains nothing, we're
+ // done.
+ if ((SuperCanThrow == CT_Can && SuperEST != EST_Dynamic) ||
+ SubCanThrow == CT_Cannot)
return CheckParamExceptionSpec(NestedDiagID, NoteID, Superset, SuperLoc,
Subset, SubLoc);
- // Otherwise, if the superset contains nothing, we've failed.
- if (SuperEST == EST_DynamicNone || SuperNR == FunctionProtoType::NR_Nothrow) {
+ // If the subset contains everything or the superset contains nothing, we've
+ // failed.
+ if ((SubCanThrow == CT_Can && SubEST != EST_Dynamic) ||
+ SuperCanThrow == CT_Cannot) {
Diag(SubLoc, DiagID);
if (NoteID.getDiagID() != 0)
Diag(SuperLoc, NoteID);
@@ -722,75 +776,23 @@ bool Sema::CheckExceptionSpecSubset(const PartialDiagnostic &DiagID,
"Exception spec subset: non-dynamic case slipped through.");
// Neither contains everything or nothing. Do a proper comparison.
- for (const auto &SubI : Subset->exceptions()) {
- // Take one type from the subset.
- QualType CanonicalSubT = Context.getCanonicalType(SubI);
- // Unwrap pointers and references so that we can do checks within a class
- // hierarchy. Don't unwrap member pointers; they don't have hierarchy
- // conversions on the pointee.
- bool SubIsPointer = false;
- if (const ReferenceType *RefTy = CanonicalSubT->getAs<ReferenceType>())
- CanonicalSubT = RefTy->getPointeeType();
- if (const PointerType *PtrTy = CanonicalSubT->getAs<PointerType>()) {
- CanonicalSubT = PtrTy->getPointeeType();
- SubIsPointer = true;
- }
- bool SubIsClass = CanonicalSubT->isRecordType();
- CanonicalSubT = CanonicalSubT.getLocalUnqualifiedType();
+ for (QualType SubI : Subset->exceptions()) {
+ if (const ReferenceType *RefTy = SubI->getAs<ReferenceType>())
+ SubI = RefTy->getPointeeType();
- CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
- /*DetectVirtual=*/false);
-
- bool Contained = false;
// Make sure it's in the superset.
- for (const auto &SuperI : Superset->exceptions()) {
- QualType CanonicalSuperT = Context.getCanonicalType(SuperI);
- // SubT must be SuperT or derived from it, or pointer or reference to
- // such types.
- if (const ReferenceType *RefTy = CanonicalSuperT->getAs<ReferenceType>())
- CanonicalSuperT = RefTy->getPointeeType();
- if (SubIsPointer) {
- if (const PointerType *PtrTy = CanonicalSuperT->getAs<PointerType>())
- CanonicalSuperT = PtrTy->getPointeeType();
- else {
- continue;
- }
- }
- CanonicalSuperT = CanonicalSuperT.getLocalUnqualifiedType();
- // If the types are the same, move on to the next type in the subset.
- if (CanonicalSubT == CanonicalSuperT) {
+ bool Contained = false;
+ for (QualType SuperI : Superset->exceptions()) {
+ // [except.spec]p5:
+ // the target entity shall allow at least the exceptions allowed by the
+ // source
+ //
+ // We interpret this as meaning that a handler for some target type would
+ // catch an exception of each source type.
+ if (handlerCanCatch(SuperI, SubI)) {
Contained = true;
break;
}
-
- // Otherwise we need to check the inheritance.
- if (!SubIsClass || !CanonicalSuperT->isRecordType())
- continue;
-
- Paths.clear();
- if (!IsDerivedFrom(SubLoc, CanonicalSubT, CanonicalSuperT, Paths))
- continue;
-
- if (Paths.isAmbiguous(Context.getCanonicalType(CanonicalSuperT)))
- continue;
-
- // Do this check from a context without privileges.
- switch (CheckBaseClassAccess(SourceLocation(),
- CanonicalSuperT, CanonicalSubT,
- Paths.front(),
- /*Diagnostic*/ 0,
- /*ForceCheck*/ true,
- /*ForceUnprivileged*/ true)) {
- case AR_accessible: break;
- case AR_inaccessible: continue;
- case AR_dependent:
- llvm_unreachable("access check dependent for unprivileged context");
- case AR_delayed:
- llvm_unreachable("access check delayed in non-declaration");
- }
-
- Contained = true;
- break;
}
if (!Contained) {
Diag(SubLoc, DiagID);
@@ -994,7 +996,7 @@ static CanThrowResult canCalleeThrow(Sema &S, const Expr *E, const Decl *D) {
if (!FT)
return CT_Can;
- return FT->isNothrow(S.Context) ? CT_Cannot : CT_Can;
+ return FT->canThrow();
}
static CanThrowResult canDynamicCastThrow(const CXXDynamicCastExpr *DC) {
@@ -1262,6 +1264,7 @@ CanThrowResult Sema::canThrow(const Expr *E) {
case Expr::ImaginaryLiteralClass:
case Expr::ImplicitValueInitExprClass:
case Expr::IntegerLiteralClass:
+ case Expr::FixedPointLiteralClass:
case Expr::ArrayInitIndexExprClass:
case Expr::NoInitExprClass:
case Expr::ObjCEncodeExprClass:
diff --git a/lib/Sema/SemaExpr.cpp b/lib/Sema/SemaExpr.cpp
index 4746355e9800..60abd718e228 100644
--- a/lib/Sema/SemaExpr.cpp
+++ b/lib/Sema/SemaExpr.cpp
@@ -37,6 +37,7 @@
#include "clang/Sema/Designator.h"
#include "clang/Sema/Initialization.h"
#include "clang/Sema/Lookup.h"
+#include "clang/Sema/Overload.h"
#include "clang/Sema/ParsedTemplate.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/ScopeInfo.h"
@@ -47,7 +48,7 @@
using namespace clang;
using namespace sema;
-/// \brief Determine whether the use of this declaration is valid, without
+/// Determine whether the use of this declaration is valid, without
/// emitting diagnostics.
bool Sema::CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid) {
// See if this is an auto-typed variable whose initializer we are parsing.
@@ -88,7 +89,7 @@ static void DiagnoseUnusedOfDecl(Sema &S, NamedDecl *D, SourceLocation Loc) {
}
}
-/// \brief Emit a note explaining that this function is deleted.
+/// Emit a note explaining that this function is deleted.
void Sema::NoteDeletedFunction(FunctionDecl *Decl) {
assert(Decl->isDeleted());
@@ -116,7 +117,7 @@ void Sema::NoteDeletedFunction(FunctionDecl *Decl) {
<< Decl << true;
}
-/// \brief Determine whether a FunctionDecl was ever declared with an
+/// Determine whether a FunctionDecl was ever declared with an
/// explicit storage class.
static bool hasAnyExplicitStorageClass(const FunctionDecl *D) {
for (auto I : D->redecls()) {
@@ -126,7 +127,7 @@ static bool hasAnyExplicitStorageClass(const FunctionDecl *D) {
return false;
}
-/// \brief Check whether we're in an extern inline function and referring to a
+/// Check whether we're in an extern inline function and referring to a
/// variable or function with internal linkage (C11 6.7.4p3).
///
/// This is only a warning because we used to silently accept this code, but
@@ -189,7 +190,7 @@ void Sema::MaybeSuggestAddingStaticToDecl(const FunctionDecl *Cur) {
}
}
-/// \brief Determine whether the use of this declaration is valid, and
+/// Determine whether the use of this declaration is valid, and
/// emit any corresponding diagnostics.
///
/// This routine diagnoses various problems with referencing
@@ -201,10 +202,11 @@ void Sema::MaybeSuggestAddingStaticToDecl(const FunctionDecl *Cur) {
/// \returns true if there was an error (this declaration cannot be
/// referenced), false otherwise.
///
-bool Sema::DiagnoseUseOfDecl(NamedDecl *D, SourceLocation Loc,
+bool Sema::DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass,
bool ObjCPropertyAccess,
bool AvoidPartialAvailabilityChecks) {
+ SourceLocation Loc = Locs.front();
if (getLangOpts().CPlusPlus && isa<FunctionDecl>(D)) {
// If there were any diagnostics suppressed by template argument deduction,
// emit them now.
@@ -288,7 +290,7 @@ bool Sema::DiagnoseUseOfDecl(NamedDecl *D, SourceLocation Loc,
return true;
}
- DiagnoseAvailabilityOfDecl(D, Loc, UnknownObjCClass, ObjCPropertyAccess,
+ DiagnoseAvailabilityOfDecl(D, Locs, UnknownObjCClass, ObjCPropertyAccess,
AvoidPartialAvailabilityChecks);
DiagnoseUnusedOfDecl(*this, D, Loc);
@@ -298,7 +300,7 @@ bool Sema::DiagnoseUseOfDecl(NamedDecl *D, SourceLocation Loc,
return false;
}
-/// \brief Retrieve the message suffix that should be added to a
+/// Retrieve the message suffix that should be added to a
/// diagnostic complaining about the given function being deleted or
/// unavailable.
std::string Sema::getDeletedOrUnavailableSuffix(const FunctionDecl *FD) {
@@ -776,6 +778,9 @@ Sema::VarArgKind Sema::isValidVarArgType(const QualType &Ty) {
return VAK_Valid;
}
+ if (Ty.isDestructedType() == QualType::DK_nontrivial_c_struct)
+ return VAK_Invalid;
+
if (Ty.isCXX98PODType(Context))
return VAK_Valid;
@@ -837,7 +842,10 @@ void Sema::checkVariadicArgument(const Expr *E, VariadicCallType CT) {
break;
case VAK_Invalid:
- if (Ty->isObjCObjectType())
+ if (Ty.isDestructedType() == QualType::DK_nontrivial_c_struct)
+ Diag(E->getLocStart(),
+ diag::err_cannot_pass_non_trivial_c_struct_to_vararg) << Ty << CT;
+ else if (Ty->isObjCObjectType())
DiagRuntimeBehavior(
E->getLocStart(), nullptr,
PDiag(diag::err_cannot_pass_objc_interface_to_vararg)
@@ -909,7 +917,7 @@ ExprResult Sema::DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT,
return E;
}
-/// \brief Converts an integer to complex float type. Helper function of
+/// Converts an integer to complex float type. Helper function of
/// UsualArithmeticConversions()
///
/// \return false if the integer expression is an integer type and is
@@ -934,7 +942,7 @@ static bool handleIntegerToComplexFloatConversion(Sema &S, ExprResult &IntExpr,
return false;
}
-/// \brief Handle arithmetic conversion with complex types. Helper function of
+/// Handle arithmetic conversion with complex types. Helper function of
/// UsualArithmeticConversions()
static QualType handleComplexFloatConversion(Sema &S, ExprResult &LHS,
ExprResult &RHS, QualType LHSType,
@@ -990,7 +998,7 @@ static QualType handleComplexFloatConversion(Sema &S, ExprResult &LHS,
return ResultType;
}
-/// \brief Handle arithmetic conversion from integer to float. Helper function
+/// Handle arithmetic conversion from integer to float. Helper function
/// of UsualArithmeticConversions()
static QualType handleIntToFloatConversion(Sema &S, ExprResult &FloatExpr,
ExprResult &IntExpr,
@@ -1021,7 +1029,7 @@ static QualType handleIntToFloatConversion(Sema &S, ExprResult &FloatExpr,
return result;
}
-/// \brief Handle arithmethic conversion with floating point types. Helper
+/// Handle arithmethic conversion with floating point types. Helper
/// function of UsualArithmeticConversions()
static QualType handleFloatConversion(Sema &S, ExprResult &LHS,
ExprResult &RHS, QualType LHSType,
@@ -1059,7 +1067,7 @@ static QualType handleFloatConversion(Sema &S, ExprResult &LHS,
/*convertFloat=*/!IsCompAssign);
}
-/// \brief Diagnose attempts to convert between __float128 and long double if
+/// Diagnose attempts to convert between __float128 and long double if
/// there is no support for such conversion. Helper function of
/// UsualArithmeticConversions().
static bool unsupportedTypeConversion(const Sema &S, QualType LHSType,
@@ -1092,13 +1100,12 @@ static bool unsupportedTypeConversion(const Sema &S, QualType LHSType,
Float128AndLongDouble |= (LHSElemType == S.Context.LongDoubleTy &&
RHSElemType == S.Context.Float128Ty);
- /* We've handled the situation where __float128 and long double have the same
- representation. The only other allowable conversion is if long double is
- really just double.
- */
+ // We've handled the situation where __float128 and long double have the same
+ // representation. We allow all conversions for all possible long double types
+ // except PPC's double double.
return Float128AndLongDouble &&
- (&S.Context.getFloatTypeSemantics(S.Context.LongDoubleTy) !=
- &llvm::APFloat::IEEEdouble());
+ (&S.Context.getFloatTypeSemantics(S.Context.LongDoubleTy) ==
+ &llvm::APFloat::PPCDoubleDouble());
}
typedef ExprResult PerformCastFn(Sema &S, Expr *operand, QualType toType);
@@ -1116,7 +1123,7 @@ ExprResult doComplexIntegralCast(Sema &S, Expr *op, QualType toType) {
}
}
-/// \brief Handle integer arithmetic conversions. Helper function of
+/// Handle integer arithmetic conversions. Helper function of
/// UsualArithmeticConversions()
template <PerformCastFn doLHSCast, PerformCastFn doRHSCast>
static QualType handleIntegerConversion(Sema &S, ExprResult &LHS,
@@ -1167,7 +1174,7 @@ static QualType handleIntegerConversion(Sema &S, ExprResult &LHS,
}
}
-/// \brief Handle conversions with GCC complex int extension. Helper function
+/// Handle conversions with GCC complex int extension. Helper function
/// of UsualArithmeticConversions()
static QualType handleComplexIntConversion(Sema &S, ExprResult &LHS,
ExprResult &RHS, QualType LHSType,
@@ -1529,6 +1536,8 @@ Sema::ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope) {
CharTy = Context.getWideCharType();
Kind = StringLiteral::Wide;
} else if (Literal.isUTF8()) {
+ if (getLangOpts().Char8)
+ CharTy = Context.Char8Ty;
Kind = StringLiteral::UTF8;
} else if (Literal.isUTF16()) {
CharTy = Context.Char16Ty;
@@ -1545,17 +1554,14 @@ Sema::ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope) {
if (getLangOpts().CPlusPlus || getLangOpts().ConstStrings)
CharTyConst.addConst();
+ CharTyConst = Context.adjustStringLiteralBaseType(CharTyConst);
+
// Get an array type for the string, according to C99 6.4.5. This includes
// the nul terminator character as well as the string length for pascal
// strings.
- QualType StrTy = Context.getConstantArrayType(CharTyConst,
- llvm::APInt(32, Literal.GetNumStringChars()+1),
- ArrayType::Normal, 0);
-
- // OpenCL v1.1 s6.5.3: a string literal is in the constant address space.
- if (getLangOpts().OpenCL) {
- StrTy = Context.getAddrSpaceQualType(StrTy, LangAS::opencl_constant);
- }
+ QualType StrTy = Context.getConstantArrayType(
+ CharTyConst, llvm::APInt(32, Literal.GetNumStringChars() + 1),
+ ArrayType::Normal, 0);
// Pass &StringTokLocs[0], StringTokLocs.size() to factory!
StringLiteral *Lit = StringLiteral::Create(Context, Literal.GetString(),
@@ -1674,9 +1680,9 @@ Sema::BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
MarkDeclRefReferenced(E);
if (getLangOpts().ObjCWeak && isa<VarDecl>(D) &&
- Ty.getObjCLifetime() == Qualifiers::OCL_Weak &&
+ Ty.getObjCLifetime() == Qualifiers::OCL_Weak && !isUnevaluatedContext() &&
!Diags.isIgnored(diag::warn_arc_repeated_use_of_weak, E->getLocStart()))
- recordUseOfEvaluatedWeak(E);
+ getCurFunction()->recordUseOfWeak(E);
FieldDecl *FD = dyn_cast<FieldDecl>(D);
if (IndirectFieldDecl *IFD = dyn_cast<IndirectFieldDecl>(D))
@@ -1711,7 +1717,7 @@ Sema::DecomposeUnqualifiedId(const UnqualifiedId &Id,
TemplateArgumentListInfo &Buffer,
DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *&TemplateArgs) {
- if (Id.getKind() == UnqualifiedId::IK_TemplateId) {
+ if (Id.getKind() == UnqualifiedIdKind::IK_TemplateId) {
Buffer.setLAngleLoc(Id.TemplateId->LAngleLoc);
Buffer.setRAngleLoc(Id.TemplateId->RAngleLoc);
@@ -2070,19 +2076,21 @@ Sema::ActOnIdExpression(Scope *S, CXXScopeSpec &SS,
IsAddressOfOperand, TemplateArgs);
// Perform the required lookup.
- LookupResult R(*this, NameInfo,
- (Id.getKind() == UnqualifiedId::IK_ImplicitSelfParam)
- ? LookupObjCImplicitSelfParam : LookupOrdinaryName);
- if (TemplateArgs) {
+ LookupResult R(*this, NameInfo,
+ (Id.getKind() == UnqualifiedIdKind::IK_ImplicitSelfParam)
+ ? LookupObjCImplicitSelfParam
+ : LookupOrdinaryName);
+ if (TemplateKWLoc.isValid() || TemplateArgs) {
// Lookup the template name again to correctly establish the context in
// which it was found. This is really unfortunate as we already did the
// lookup to determine that it was a template name in the first place. If
// this becomes a performance hit, we can work harder to preserve those
// results until we get here but it's likely not worth it.
bool MemberOfUnknownSpecialization;
- LookupTemplateName(R, S, SS, QualType(), /*EnteringContext=*/false,
- MemberOfUnknownSpecialization);
-
+ if (LookupTemplateName(R, S, SS, QualType(), /*EnteringContext=*/false,
+ MemberOfUnknownSpecialization, TemplateKWLoc))
+ return ExprError();
+
if (MemberOfUnknownSpecialization ||
(R.getResultKind() == LookupResult::NotFoundInCurrentInstantiation))
return ActOnDependentIdExpression(SS, TemplateKWLoc, NameInfo,
@@ -2148,6 +2156,9 @@ Sema::ActOnIdExpression(Scope *S, CXXScopeSpec &SS,
if (SS.isValid())
CCC->setTypoNNS(SS.getScopeRep());
}
+ // FIXME: DiagnoseEmptyLookup produces bad diagnostics if we're looking for
+ // a template name, but we happen to have always already looked up the name
+ // before we get here if it must be a template name.
if (DiagnoseEmptyLookup(S, SS, R,
CCC ? std::move(CCC) : std::move(DefaultValidator),
nullptr, None, &TE)) {
@@ -2243,7 +2254,7 @@ Sema::ActOnIdExpression(Scope *S, CXXScopeSpec &SS,
// In C++1y, if this is a variable template id, then check it
// in BuildTemplateIdExpr().
// The single lookup result must be a variable template declaration.
- if (Id.getKind() == UnqualifiedId::IK_TemplateId && Id.TemplateId &&
+ if (Id.getKind() == UnqualifiedIdKind::IK_TemplateId && Id.TemplateId &&
Id.TemplateId->Kind == TNK_Var_template) {
assert(R.getAsSingle<VarTemplateDecl>() &&
"There should only be one declaration found.");
@@ -2401,7 +2412,7 @@ Sema::LookupInObjCMethod(LookupResult &Lookup, Scope *S,
IdentifierInfo &II = Context.Idents.get("self");
UnqualifiedId SelfName;
SelfName.setIdentifier(&II, SourceLocation());
- SelfName.setKind(UnqualifiedId::IK_ImplicitSelfParam);
+ SelfName.setKind(UnqualifiedIdKind::IK_ImplicitSelfParam);
CXXScopeSpec SelfScopeSpec;
SourceLocation TemplateKWLoc;
ExprResult SelfExpr = ActOnIdExpression(S, SelfScopeSpec, TemplateKWLoc,
@@ -2425,8 +2436,9 @@ Sema::LookupInObjCMethod(LookupResult &Lookup, Scope *S,
IV->getLocation(), SelfExpr.get(), true, true);
if (IV->getType().getObjCLifetime() == Qualifiers::OCL_Weak) {
- if (!Diags.isIgnored(diag::warn_arc_repeated_use_of_weak, Loc))
- recordUseOfEvaluatedWeak(Result);
+ if (!isUnevaluatedContext() &&
+ !Diags.isIgnored(diag::warn_arc_repeated_use_of_weak, Loc))
+ getCurFunction()->recordUseOfWeak(Result);
}
if (getLangOpts().ObjCAutoRefCount) {
if (CurContext->isClosure())
@@ -2470,7 +2482,7 @@ Sema::LookupInObjCMethod(LookupResult &Lookup, Scope *S,
return ExprResult((Expr *)nullptr);
}
-/// \brief Cast a base object to a member's actual type.
+/// Cast a base object to a member's actual type.
///
/// Logically this happens in three phases:
///
@@ -2716,12 +2728,23 @@ static bool CheckDeclInExpr(Sema &S, SourceLocation Loc, NamedDecl *D) {
return false;
}
+// Certain multiversion types should be treated as overloaded even when there is
+// only one result.
+static bool ShouldLookupResultBeMultiVersionOverload(const LookupResult &R) {
+ assert(R.isSingleResult() && "Expected only a single result");
+ const auto *FD = dyn_cast<FunctionDecl>(R.getFoundDecl());
+ return FD &&
+ (FD->isCPUDispatchMultiVersion() || FD->isCPUSpecificMultiVersion());
+}
+
ExprResult Sema::BuildDeclarationNameExpr(const CXXScopeSpec &SS,
LookupResult &R, bool NeedsADL,
bool AcceptInvalidDecl) {
// If this is a single, fully-resolved result and we don't need ADL,
// just build an ordinary singleton decl ref.
- if (!NeedsADL && R.isSingleResult() && !R.getAsSingle<FunctionTemplateDecl>())
+ if (!NeedsADL && R.isSingleResult() &&
+ !R.getAsSingle<FunctionTemplateDecl>() &&
+ !ShouldLookupResultBeMultiVersionOverload(R))
return BuildDeclarationNameExpr(SS, R.getLookupNameInfo(), R.getFoundDecl(),
R.getRepresentativeDecl(), nullptr,
AcceptInvalidDecl);
@@ -2729,7 +2752,7 @@ ExprResult Sema::BuildDeclarationNameExpr(const CXXScopeSpec &SS,
// We only need to check the declaration if there's exactly one
// result, because in the overloaded case the results can only be
// functions and function templates.
- if (R.isSingleResult() &&
+ if (R.isSingleResult() && !ShouldLookupResultBeMultiVersionOverload(R) &&
CheckDeclInExpr(*this, R.getNameLoc(), R.getFoundDecl()))
return ExprError();
@@ -2752,7 +2775,7 @@ static void
diagnoseUncapturableValueReference(Sema &S, SourceLocation loc,
ValueDecl *var, DeclContext *DC);
-/// \brief Complete semantic analysis for a reference to the given declaration.
+/// Complete semantic analysis for a reference to the given declaration.
ExprResult Sema::BuildDeclarationNameExpr(
const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D,
NamedDecl *FoundD, const TemplateArgumentListInfo *TemplateArgs,
@@ -2768,9 +2791,7 @@ ExprResult Sema::BuildDeclarationNameExpr(
if (TemplateDecl *Template = dyn_cast<TemplateDecl>(D)) {
// Specifically diagnose references to class templates that are missing
// a template argument list.
- Diag(Loc, diag::err_template_decl_ref) << (isa<VarTemplateDecl>(D) ? 1 : 0)
- << Template << SS.getRange();
- Diag(Template->getLocation(), diag::note_template_decl_here);
+ diagnoseMissingTemplateArguments(TemplateName(Template), Loc);
return ExprError();
}
@@ -3033,8 +3054,9 @@ ExprResult Sema::BuildPredefinedExpr(SourceLocation Loc,
unsigned Length = Str.length();
llvm::APInt LengthI(32, Length + 1);
- if (IT == PredefinedExpr::LFunction) {
- ResTy = Context.WideCharTy.withConst();
+ if (IT == PredefinedExpr::LFunction || IT == PredefinedExpr::LFuncSig) {
+ ResTy =
+ Context.adjustStringLiteralBaseType(Context.WideCharTy.withConst());
SmallString<32> RawChars;
ConvertUTF8ToWideString(Context.getTypeSizeInChars(ResTy).getQuantity(),
Str, RawChars);
@@ -3043,7 +3065,7 @@ ExprResult Sema::BuildPredefinedExpr(SourceLocation Loc,
SL = StringLiteral::Create(Context, RawChars, StringLiteral::Wide,
/*Pascal*/ false, ResTy, Loc);
} else {
- ResTy = Context.CharTy.withConst();
+ ResTy = Context.adjustStringLiteralBaseType(Context.CharTy.withConst());
ResTy = Context.getConstantArrayType(ResTy, LengthI, ArrayType::Normal,
/*IndexTypeQuals*/ 0);
SL = StringLiteral::Create(Context, Str, StringLiteral::Ascii,
@@ -3063,7 +3085,8 @@ ExprResult Sema::ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind) {
case tok::kw___FUNCTION__: IT = PredefinedExpr::Function; break;
case tok::kw___FUNCDNAME__: IT = PredefinedExpr::FuncDName; break; // [MS]
case tok::kw___FUNCSIG__: IT = PredefinedExpr::FuncSig; break; // [MS]
- case tok::kw_L__FUNCTION__: IT = PredefinedExpr::LFunction; break;
+ case tok::kw_L__FUNCTION__: IT = PredefinedExpr::LFunction; break; // [MS]
+ case tok::kw_L__FUNCSIG__: IT = PredefinedExpr::LFuncSig; break; // [MS]
case tok::kw___PRETTY_FUNCTION__: IT = PredefinedExpr::PrettyFunction; break;
}
@@ -3085,6 +3108,8 @@ ExprResult Sema::ActOnCharacterConstant(const Token &Tok, Scope *UDLScope) {
QualType Ty;
if (Literal.isWide())
Ty = Context.WideCharTy; // L'x' -> wchar_t in C and C++.
+ else if (Literal.isUTF8() && getLangOpts().Char8)
+ Ty = Context.Char8Ty; // u8'x' -> char8_t when it exists.
else if (Literal.isUTF16())
Ty = Context.Char16Ty; // u'x' -> char16_t in C11 and C++11.
else if (Literal.isUTF32())
@@ -3280,8 +3305,8 @@ ExprResult Sema::ActOnNumericConstant(const Token &Tok, Scope *UDLScope) {
// operator "" X ("n")
unsigned Length = Literal.getUDSuffixOffset();
QualType StrTy = Context.getConstantArrayType(
- Context.CharTy.withConst(), llvm::APInt(32, Length + 1),
- ArrayType::Normal, 0);
+ Context.adjustStringLiteralBaseType(Context.CharTy.withConst()),
+ llvm::APInt(32, Length + 1), ArrayType::Normal, 0);
Expr *Lit = StringLiteral::Create(
Context, StringRef(TokSpelling.data(), Length), StringLiteral::Ascii,
/*Pascal*/false, StrTy, &TokLoc, 1);
@@ -3313,7 +3338,52 @@ ExprResult Sema::ActOnNumericConstant(const Token &Tok, Scope *UDLScope) {
Expr *Res;
- if (Literal.isFloatingLiteral()) {
+ if (Literal.isFixedPointLiteral()) {
+ QualType Ty;
+
+ if (Literal.isAccum) {
+ if (Literal.isHalf) {
+ Ty = Context.ShortAccumTy;
+ } else if (Literal.isLong) {
+ Ty = Context.LongAccumTy;
+ } else {
+ Ty = Context.AccumTy;
+ }
+ } else if (Literal.isFract) {
+ if (Literal.isHalf) {
+ Ty = Context.ShortFractTy;
+ } else if (Literal.isLong) {
+ Ty = Context.LongFractTy;
+ } else {
+ Ty = Context.FractTy;
+ }
+ }
+
+ if (Literal.isUnsigned) Ty = Context.getCorrespondingUnsignedType(Ty);
+
+ bool isSigned = !Literal.isUnsigned;
+ unsigned scale = Context.getFixedPointScale(Ty);
+ unsigned ibits = Context.getFixedPointIBits(Ty);
+ unsigned bit_width = Context.getTypeInfo(Ty).Width;
+
+ llvm::APInt Val(bit_width, 0, isSigned);
+ bool Overflowed = Literal.GetFixedPointValue(Val, scale);
+
+ // Do not use bit_width since some types may have padding like _Fract or
+ // unsigned _Accums if PaddingOnUnsignedFixedPoint is set.
+ auto MaxVal = llvm::APInt::getMaxValue(ibits + scale).zextOrSelf(bit_width);
+ if (Literal.isFract && Val == MaxVal + 1)
+ // Clause 6.4.4 - The value of a constant shall be in the range of
+ // representable values for its type, with exception for constants of a
+ // fract type with a value of exactly 1; such a constant shall denote
+ // the maximal value for the type.
+ --Val;
+ else if (Val.ugt(MaxVal) || Overflowed)
+ Diag(Tok.getLocation(), diag::err_too_large_for_fixed_point);
+
+ Res = FixedPointLiteral::CreateFromRawInt(Context, Val, Ty,
+ Tok.getLocation(), scale);
+ } else if (Literal.isFloatingLiteral()) {
QualType Ty;
if (Literal.isHalf){
if (getOpenCLOptions().isEnabled("cl_khr_fp16"))
@@ -3553,7 +3623,7 @@ static bool CheckObjCTraitOperandConstraints(Sema &S, QualType T,
return false;
}
-/// \brief Check whether E is a pointer from a decayed array type (the decayed
+/// Check whether E is a pointer from a decayed array type (the decayed
/// pointer type is equal to T) and emit a warning if it is.
static void warnOnSizeofOnArrayDecay(Sema &S, SourceLocation Loc, QualType T,
Expr *E) {
@@ -3571,7 +3641,7 @@ static void warnOnSizeofOnArrayDecay(Sema &S, SourceLocation Loc, QualType T,
<< ICE->getSubExpr()->getType();
}
-/// \brief Check the constraints on expression operands to unary type expression
+/// Check the constraints on expression operands to unary type expression
/// and type traits.
///
/// Completes any types necessary and validates the constraints on the operand
@@ -3655,7 +3725,7 @@ bool Sema::CheckUnaryExprOrTypeTraitOperand(Expr *E,
return false;
}
-/// \brief Check the constraints on operands to unary expression and type
+/// Check the constraints on operands to unary expression and type
/// traits.
///
/// This will complete any types necessary, and validate the various constraints
@@ -3909,7 +3979,7 @@ static void captureVariablyModifiedType(ASTContext &Context, QualType T,
} while (!T.isNull() && T->isVariablyModifiedType());
}
-/// \brief Build a sizeof or alignof expression given a type operand.
+/// Build a sizeof or alignof expression given a type operand.
ExprResult
Sema::CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo,
SourceLocation OpLoc,
@@ -3953,7 +4023,7 @@ Sema::CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo,
ExprKind, TInfo, Context.getSizeType(), OpLoc, R.getEnd());
}
-/// \brief Build a sizeof or alignof expression given an expression
+/// Build a sizeof or alignof expression given an expression
/// operand.
ExprResult
Sema::CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc,
@@ -4071,7 +4141,7 @@ Sema::ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc,
return BuildUnaryOp(S, OpLoc, Opc, Input);
}
-/// \brief Diagnose if arithmetic on the given ObjC pointer is illegal.
+/// Diagnose if arithmetic on the given ObjC pointer is illegal.
///
/// \return true on error
static bool checkArithmeticOnObjCPointer(Sema &S,
@@ -4327,10 +4397,13 @@ Sema::CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
// Per C++ core issue 1213, the result is an xvalue if either operand is
// a non-lvalue array, and an lvalue otherwise.
- if (getLangOpts().CPlusPlus11 &&
- ((LHSExp->getType()->isArrayType() && !LHSExp->isLValue()) ||
- (RHSExp->getType()->isArrayType() && !RHSExp->isLValue())))
- VK = VK_XValue;
+ if (getLangOpts().CPlusPlus11) {
+ for (auto *Op : {LHSExp, RHSExp}) {
+ Op = Op->IgnoreImplicit();
+ if (Op->getType()->isArrayType() && !Op->isLValue())
+ VK = VK_XValue;
+ }
+ }
// Perform default conversions.
if (!LHSExp->getType()->getAs<VectorType>()) {
@@ -4391,12 +4464,24 @@ Sema::CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
} else if (const VectorType *VTy = LHSTy->getAs<VectorType>()) {
BaseExpr = LHSExp; // vectors: V[123]
IndexExpr = RHSExp;
+ // We apply C++ DR1213 to vector subscripting too.
+ if (getLangOpts().CPlusPlus11 && LHSExp->getValueKind() == VK_RValue) {
+ ExprResult Materialized = TemporaryMaterializationConversion(LHSExp);
+ if (Materialized.isInvalid())
+ return ExprError();
+ LHSExp = Materialized.get();
+ }
VK = LHSExp->getValueKind();
if (VK != VK_RValue)
OK = OK_VectorComponent;
- // FIXME: need to deal with const...
ResultType = VTy->getElementType();
+ QualType BaseType = BaseExpr->getType();
+ Qualifiers BaseQuals = BaseType.getQualifiers();
+ Qualifiers MemberQuals = ResultType.getQualifiers();
+ Qualifiers Combined = BaseQuals + MemberQuals;
+ if (Combined != MemberQuals)
+ ResultType = Context.getQualifiedType(ResultType, Combined);
} else if (LHSTy->isArrayType()) {
// If we see an array that wasn't promoted by
// DefaultFunctionArrayLvalueConversion, it must be an array that
@@ -4830,6 +4915,10 @@ bool Sema::GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl,
(!Param || !Param->hasAttr<CFConsumedAttr>()))
CFAudited = true;
+ if (Proto->getExtParameterInfo(i).isNoEscape())
+ if (auto *BE = dyn_cast<BlockExpr>(Arg->IgnoreParenNoopCasts(Context)))
+ BE->getBlockDecl()->setDoesNotEscape();
+
InitializedEntity Entity =
Param ? InitializedEntity::InitializeParameter(Context, Param,
ProtoArgType)
@@ -5495,7 +5584,7 @@ Sema::BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
// CUDA: Kernel calls must be to global functions
if (FDecl && !FDecl->hasAttr<CUDAGlobalAttr>())
return ExprError(Diag(LParenLoc,diag::err_kern_call_not_global_function)
- << FDecl->getName() << Fn->getSourceRange());
+ << FDecl << Fn->getSourceRange());
// CUDA: Kernel function must have 'void' return type
if (!FuncT->getReturnType()->isVoidType())
@@ -5505,7 +5594,7 @@ Sema::BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
// CUDA: Calls to global functions must be configured
if (FDecl && FDecl->hasAttr<CUDAGlobalAttr>())
return ExprError(Diag(LParenLoc, diag::err_global_call_not_config)
- << FDecl->getName() << Fn->getSourceRange());
+ << FDecl << Fn->getSourceRange());
}
}
@@ -5704,7 +5793,7 @@ Sema::ActOnInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList,
}
// Semantic analysis for initializers is done by ActOnDeclarator() and
- // CheckInitializer() - it requires knowledge of the object being intialized.
+ // CheckInitializer() - it requires knowledge of the object being initialized.
InitListExpr *E = new (Context) InitListExpr(Context, LBraceLoc, InitArgList,
RBraceLoc);
@@ -6238,7 +6327,7 @@ ExprResult Sema::ActOnParenListExpr(SourceLocation L,
return expr;
}
-/// \brief Emit a specialized diagnostic when one expression is a null pointer
+/// Emit a specialized diagnostic when one expression is a null pointer
/// constant and the other is not a pointer. Returns true if a diagnostic is
/// emitted.
bool Sema::DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr,
@@ -6279,7 +6368,7 @@ bool Sema::DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr,
return true;
}
-/// \brief Return false if the condition expression is valid, true otherwise.
+/// Return false if the condition expression is valid, true otherwise.
static bool checkCondition(Sema &S, Expr *Cond, SourceLocation QuestionLoc) {
QualType CondTy = Cond->getType();
@@ -6298,7 +6387,7 @@ static bool checkCondition(Sema &S, Expr *Cond, SourceLocation QuestionLoc) {
return true;
}
-/// \brief Handle when one or both operands are void type.
+/// Handle when one or both operands are void type.
static QualType checkConditionalVoidType(Sema &S, ExprResult &LHS,
ExprResult &RHS) {
Expr *LHSExpr = LHS.get();
@@ -6315,7 +6404,7 @@ static QualType checkConditionalVoidType(Sema &S, ExprResult &LHS,
return S.Context.VoidTy;
}
-/// \brief Return false if the NullExpr can be promoted to PointerTy,
+/// Return false if the NullExpr can be promoted to PointerTy,
/// true otherwise.
static bool checkConditionalNullPointer(Sema &S, ExprResult &NullExpr,
QualType PointerTy) {
@@ -6328,7 +6417,7 @@ static bool checkConditionalNullPointer(Sema &S, ExprResult &NullExpr,
return false;
}
-/// \brief Checks compatibility between two pointers and return the resulting
+/// Checks compatibility between two pointers and return the resulting
/// type.
static QualType checkConditionalPointerCompatibility(Sema &S, ExprResult &LHS,
ExprResult &RHS,
@@ -6462,7 +6551,7 @@ static QualType checkConditionalPointerCompatibility(Sema &S, ExprResult &LHS,
return ResultTy;
}
-/// \brief Return the resulting type when the operands are both block pointers.
+/// Return the resulting type when the operands are both block pointers.
static QualType checkConditionalBlockPointerCompatibility(Sema &S,
ExprResult &LHS,
ExprResult &RHS,
@@ -6487,7 +6576,7 @@ static QualType checkConditionalBlockPointerCompatibility(Sema &S,
return checkConditionalPointerCompatibility(S, LHS, RHS, Loc);
}
-/// \brief Return the resulting type when the operands are both pointers.
+/// Return the resulting type when the operands are both pointers.
static QualType
checkConditionalObjectPointersCompatibility(Sema &S, ExprResult &LHS,
ExprResult &RHS,
@@ -6526,7 +6615,7 @@ checkConditionalObjectPointersCompatibility(Sema &S, ExprResult &LHS,
return checkConditionalPointerCompatibility(S, LHS, RHS, Loc);
}
-/// \brief Return false if the first expression is not an integer and the second
+/// Return false if the first expression is not an integer and the second
/// expression is not a pointer, true otherwise.
static bool checkPointerIntegerMismatch(Sema &S, ExprResult &Int,
Expr* PointerExpr, SourceLocation Loc,
@@ -6546,7 +6635,7 @@ static bool checkPointerIntegerMismatch(Sema &S, ExprResult &Int,
return true;
}
-/// \brief Simple conversion between integer and floating point types.
+/// Simple conversion between integer and floating point types.
///
/// Used when handling the OpenCL conditional operator where the
/// condition is a vector while the other operands are scalar.
@@ -6601,7 +6690,7 @@ static QualType OpenCLArithmeticConversions(Sema &S, ExprResult &LHS,
(S, LHS, RHS, LHSType, RHSType, /*IsCompAssign = */ false);
}
-/// \brief Convert scalar operands to a vector that matches the
+/// Convert scalar operands to a vector that matches the
/// condition in length.
///
/// Used when handling the OpenCL conditional operator where the
@@ -6646,7 +6735,7 @@ OpenCLConvertScalarsToVectors(Sema &S, ExprResult &LHS, ExprResult &RHS,
return VectorTy;
}
-/// \brief Return false if this is a valid OpenCL condition vector
+/// Return false if this is a valid OpenCL condition vector
static bool checkOpenCLConditionVector(Sema &S, Expr *Cond,
SourceLocation QuestionLoc) {
// OpenCL v1.1 s6.11.6 says the elements of the vector must be of
@@ -6661,7 +6750,7 @@ static bool checkOpenCLConditionVector(Sema &S, Expr *Cond,
return true;
}
-/// \brief Return false if the vector condition type and the vector
+/// Return false if the vector condition type and the vector
/// result type are compatible.
///
/// OpenCL v1.1 s6.11.6 requires that both vector types have the same
@@ -6691,7 +6780,7 @@ static bool checkVectorResult(Sema &S, QualType CondTy, QualType VecResTy,
return false;
}
-/// \brief Return the resulting type for the conditional operator in
+/// Return the resulting type for the conditional operator in
/// OpenCL (aka "ternary selection operator", OpenCL v1.1
/// s6.3.i) when the condition is a vector type.
static QualType
@@ -6726,7 +6815,7 @@ OpenCLCheckVectorConditional(Sema &S, ExprResult &Cond,
return OpenCLConvertScalarsToVectors(S, LHS, RHS, CondTy, QuestionLoc);
}
-/// \brief Return true if the Expr is block type
+/// Return true if the Expr is block type
static bool checkBlockType(Sema &S, const Expr *E) {
if (const CallExpr *CE = dyn_cast<CallExpr>(E)) {
QualType Ty = CE->getCallee()->getType();
@@ -7049,6 +7138,10 @@ static bool IsArithmeticBinaryExpr(Expr *E, BinaryOperatorKind *Opcode,
E = E->IgnoreImpCasts();
E = E->IgnoreConversionOperator();
E = E->IgnoreImpCasts();
+ if (auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E)) {
+ E = MTE->GetTemporaryExpr();
+ E = E->IgnoreImpCasts();
+ }
// Built-in binary operator.
if (BinaryOperator *OP = dyn_cast<BinaryOperator>(E)) {
@@ -7096,6 +7189,8 @@ static bool ExprLooksBoolean(Expr *E) {
return OP->getOpcode() == UO_LNot;
if (E->getType()->isPointerType())
return true;
+ // FIXME: What about overloaded operator calls returning "unspecified boolean
+ // type"s (commonly pointer-to-members)?
return false;
}
@@ -7847,7 +7942,7 @@ Sema::CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS,
return Incompatible;
}
-/// \brief Constructs a transparent union from an expression that is
+/// Constructs a transparent union from an expression that is
/// used to initialize the transparent union.
static void ConstructTransparentUnion(Sema &S, ASTContext &C,
ExprResult &EResult, QualType UnionType,
@@ -8014,7 +8109,7 @@ Sema::CheckSingleAssignmentConstraints(QualType LHSType, ExprResult &CallerRHS,
if (Diagnose && isa<ObjCProtocolExpr>(PRE)) {
ObjCProtocolDecl *PDecl = cast<ObjCProtocolExpr>(PRE)->getProtocol();
if (PDecl && !PDecl->hasDefinition()) {
- Diag(PRE->getExprLoc(), diag::warn_atprotocol_protocol) << PDecl->getName();
+ Diag(PRE->getExprLoc(), diag::warn_atprotocol_protocol) << PDecl;
Diag(PDecl->getLocation(), diag::note_entity_declared_at) << PDecl;
}
}
@@ -8053,18 +8148,57 @@ Sema::CheckSingleAssignmentConstraints(QualType LHSType, ExprResult &CallerRHS,
RHS = E;
return Compatible;
}
-
+
if (ConvertRHS)
RHS = ImpCastExprToType(E, Ty, Kind);
}
return result;
}
+namespace {
+/// The original operand to an operator, prior to the application of the usual
+/// arithmetic conversions and converting the arguments of a builtin operator
+/// candidate.
+struct OriginalOperand {
+ explicit OriginalOperand(Expr *Op) : Orig(Op), Conversion(nullptr) {
+ if (auto *MTE = dyn_cast<MaterializeTemporaryExpr>(Op))
+ Op = MTE->GetTemporaryExpr();
+ if (auto *BTE = dyn_cast<CXXBindTemporaryExpr>(Op))
+ Op = BTE->getSubExpr();
+ if (auto *ICE = dyn_cast<ImplicitCastExpr>(Op)) {
+ Orig = ICE->getSubExprAsWritten();
+ Conversion = ICE->getConversionFunction();
+ }
+ }
+
+ QualType getType() const { return Orig->getType(); }
+
+ Expr *Orig;
+ NamedDecl *Conversion;
+};
+}
+
QualType Sema::InvalidOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS) {
+ OriginalOperand OrigLHS(LHS.get()), OrigRHS(RHS.get());
+
Diag(Loc, diag::err_typecheck_invalid_operands)
- << LHS.get()->getType() << RHS.get()->getType()
+ << OrigLHS.getType() << OrigRHS.getType()
<< LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
+
+ // If a user-defined conversion was applied to either of the operands prior
+ // to applying the built-in operator rules, tell the user about it.
+ if (OrigLHS.Conversion) {
+ Diag(OrigLHS.Conversion->getLocation(),
+ diag::note_typecheck_invalid_operands_converted)
+ << 0 << LHS.get()->getType();
+ }
+ if (OrigRHS.Conversion) {
+ Diag(OrigRHS.Conversion->getLocation(),
+ diag::note_typecheck_invalid_operands_converted)
+ << 1 << RHS.get()->getType();
+ }
+
return QualType();
}
@@ -8215,7 +8349,7 @@ static bool canConvertIntTyToFloatTy(Sema &S, ExprResult *Int,
QualType IntTy = Int->get()->getType().getUnqualifiedType();
// Determine if the integer constant can be expressed as a floating point
- // number of the appropiate type.
+ // number of the appropriate type.
llvm::APSInt Result;
bool CstInt = Int->get()->EvaluateAsInt(Result, S.Context);
uint64_t Bits = 0;
@@ -8593,7 +8727,7 @@ QualType Sema::CheckRemainderOperands(
return compType;
}
-/// \brief Diagnose invalid arithmetic on two void pointers.
+/// Diagnose invalid arithmetic on two void pointers.
static void diagnoseArithmeticOnTwoVoidPointers(Sema &S, SourceLocation Loc,
Expr *LHSExpr, Expr *RHSExpr) {
S.Diag(Loc, S.getLangOpts().CPlusPlus
@@ -8603,7 +8737,7 @@ static void diagnoseArithmeticOnTwoVoidPointers(Sema &S, SourceLocation Loc,
<< RHSExpr->getSourceRange();
}
-/// \brief Diagnose invalid arithmetic on a void pointer.
+/// Diagnose invalid arithmetic on a void pointer.
static void diagnoseArithmeticOnVoidPointer(Sema &S, SourceLocation Loc,
Expr *Pointer) {
S.Diag(Loc, S.getLangOpts().CPlusPlus
@@ -8612,7 +8746,7 @@ static void diagnoseArithmeticOnVoidPointer(Sema &S, SourceLocation Loc,
<< 0 /* one pointer */ << Pointer->getSourceRange();
}
-/// \brief Diagnose invalid arithmetic on a null pointer.
+/// Diagnose invalid arithmetic on a null pointer.
///
/// If \p IsGNUIdiom is true, the operation is using the 'p = (i8*)nullptr + n'
/// idiom, which we recognize as a GNU extension.
@@ -8627,7 +8761,7 @@ static void diagnoseArithmeticOnNullPointer(Sema &S, SourceLocation Loc,
<< S.getLangOpts().CPlusPlus << Pointer->getSourceRange();
}
-/// \brief Diagnose invalid arithmetic on two function pointers.
+/// Diagnose invalid arithmetic on two function pointers.
static void diagnoseArithmeticOnTwoFunctionPointers(Sema &S, SourceLocation Loc,
Expr *LHS, Expr *RHS) {
assert(LHS->getType()->isAnyPointerType());
@@ -8643,7 +8777,7 @@ static void diagnoseArithmeticOnTwoFunctionPointers(Sema &S, SourceLocation Loc,
<< LHS->getSourceRange() << RHS->getSourceRange();
}
-/// \brief Diagnose invalid arithmetic on a function pointer.
+/// Diagnose invalid arithmetic on a function pointer.
static void diagnoseArithmeticOnFunctionPointer(Sema &S, SourceLocation Loc,
Expr *Pointer) {
assert(Pointer->getType()->isAnyPointerType());
@@ -8655,7 +8789,7 @@ static void diagnoseArithmeticOnFunctionPointer(Sema &S, SourceLocation Loc,
<< Pointer->getSourceRange();
}
-/// \brief Emit error if Operand is incomplete pointer type
+/// Emit error if Operand is incomplete pointer type
///
/// \returns True if pointer has incomplete type
static bool checkArithmeticIncompletePointerType(Sema &S, SourceLocation Loc,
@@ -8671,7 +8805,7 @@ static bool checkArithmeticIncompletePointerType(Sema &S, SourceLocation Loc,
PointeeTy, Operand->getSourceRange());
}
-/// \brief Check the validity of an arithmetic pointer operand.
+/// Check the validity of an arithmetic pointer operand.
///
/// If the operand has pointer type, this code will check for pointer types
/// which are invalid in arithmetic operations. These will be diagnosed
@@ -8702,7 +8836,7 @@ static bool checkArithmeticOpPointerOperand(Sema &S, SourceLocation Loc,
return true;
}
-/// \brief Check the validity of a binary arithmetic operation w.r.t. pointer
+/// Check the validity of a binary arithmetic operation w.r.t. pointer
/// operands.
///
/// This routine will diagnose any invalid arithmetic on pointer operands much
@@ -8804,7 +8938,7 @@ static void diagnoseStringPlusInt(Sema &Self, SourceLocation OpLoc,
Self.Diag(OpLoc, diag::note_string_plus_scalar_silence);
}
-/// \brief Emit a warning when adding a char literal to a string.
+/// Emit a warning when adding a char literal to a string.
static void diagnoseStringPlusChar(Sema &Self, SourceLocation OpLoc,
Expr *LHSExpr, Expr *RHSExpr) {
const Expr *StringRefExpr = LHSExpr;
@@ -8855,7 +8989,7 @@ static void diagnoseStringPlusChar(Sema &Self, SourceLocation OpLoc,
}
}
-/// \brief Emit error when two pointers are incompatible.
+/// Emit error when two pointers are incompatible.
static void diagnosePointerIncompatibility(Sema &S, SourceLocation Loc,
Expr *LHSExpr, Expr *RHSExpr) {
assert(LHSExpr->getType()->isAnyPointerType());
@@ -9153,7 +9287,7 @@ static void DiagnoseBadShiftValues(Sema& S, ExprResult &LHS, ExprResult &RHS,
<< RHS.get()->getSourceRange();
}
-/// \brief Return the resulting type when a vector is shifted
+/// Return the resulting type when a vector is shifted
/// by a scalar or vector shift amount.
static QualType checkVectorShift(Sema &S, ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign) {
@@ -9299,16 +9433,6 @@ QualType Sema::CheckShiftOperands(ExprResult &LHS, ExprResult &RHS,
return LHSType;
}
-static bool IsWithinTemplateSpecialization(Decl *D) {
- if (DeclContext *DC = D->getDeclContext()) {
- if (isa<ClassTemplateSpecializationDecl>(DC))
- return true;
- if (FunctionDecl *FD = dyn_cast<FunctionDecl>(DC))
- return FD->isFunctionTemplateSpecialization();
- }
- return false;
-}
-
/// If two different enums are compared, raise a warning.
static void checkEnumComparison(Sema &S, SourceLocation Loc, Expr *LHS,
Expr *RHS) {
@@ -9338,7 +9462,7 @@ static void checkEnumComparison(Sema &S, SourceLocation Loc, Expr *LHS,
<< LHS->getSourceRange() << RHS->getSourceRange();
}
-/// \brief Diagnose bad pointer comparisons.
+/// Diagnose bad pointer comparisons.
static void diagnoseDistinctPointerComparison(Sema &S, SourceLocation Loc,
ExprResult &LHS, ExprResult &RHS,
bool IsError) {
@@ -9348,7 +9472,7 @@ static void diagnoseDistinctPointerComparison(Sema &S, SourceLocation Loc,
<< LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
}
-/// \brief Returns false if the pointers are converted to a composite type,
+/// Returns false if the pointers are converted to a composite type,
/// true otherwise.
static bool convertPointersToCompositeType(Sema &S, SourceLocation Loc,
ExprResult &LHS, ExprResult &RHS) {
@@ -9586,137 +9710,368 @@ static void diagnoseLogicalNotOnLHSofCheck(Sema &S, ExprResult &LHS,
// Get the decl for a simple expression: a reference to a variable,
// an implicit C++ field reference, or an implicit ObjC ivar reference.
static ValueDecl *getCompareDecl(Expr *E) {
- if (DeclRefExpr* DR = dyn_cast<DeclRefExpr>(E))
+ if (DeclRefExpr *DR = dyn_cast<DeclRefExpr>(E))
return DR->getDecl();
- if (ObjCIvarRefExpr* Ivar = dyn_cast<ObjCIvarRefExpr>(E)) {
+ if (ObjCIvarRefExpr *Ivar = dyn_cast<ObjCIvarRefExpr>(E)) {
if (Ivar->isFreeIvar())
return Ivar->getDecl();
}
- if (MemberExpr* Mem = dyn_cast<MemberExpr>(E)) {
+ if (MemberExpr *Mem = dyn_cast<MemberExpr>(E)) {
if (Mem->isImplicitAccess())
return Mem->getMemberDecl();
}
return nullptr;
}
-// C99 6.5.8, C++ [expr.rel]
-QualType Sema::CheckCompareOperands(ExprResult &LHS, ExprResult &RHS,
- SourceLocation Loc, BinaryOperatorKind Opc,
- bool IsRelational) {
- checkArithmeticNull(*this, LHS, RHS, Loc, /*isCompare=*/true);
+/// Diagnose some forms of syntactically-obvious tautological comparison.
+static void diagnoseTautologicalComparison(Sema &S, SourceLocation Loc,
+ Expr *LHS, Expr *RHS,
+ BinaryOperatorKind Opc) {
+ Expr *LHSStripped = LHS->IgnoreParenImpCasts();
+ Expr *RHSStripped = RHS->IgnoreParenImpCasts();
+
+ QualType LHSType = LHS->getType();
+ QualType RHSType = RHS->getType();
+ if (LHSType->hasFloatingRepresentation() ||
+ (LHSType->isBlockPointerType() && !BinaryOperator::isEqualityOp(Opc)) ||
+ LHS->getLocStart().isMacroID() || RHS->getLocStart().isMacroID() ||
+ S.inTemplateInstantiation())
+ return;
- // Handle vector comparisons separately.
- if (LHS.get()->getType()->isVectorType() ||
- RHS.get()->getType()->isVectorType())
- return CheckVectorCompareOperands(LHS, RHS, Loc, IsRelational);
+ // Comparisons between two array types are ill-formed for operator<=>, so
+ // we shouldn't emit any additional warnings about it.
+ if (Opc == BO_Cmp && LHSType->isArrayType() && RHSType->isArrayType())
+ return;
+
+ // For non-floating point types, check for self-comparisons of the form
+ // x == x, x != x, x < x, etc. These always evaluate to a constant, and
+ // often indicate logic errors in the program.
+ //
+ // NOTE: Don't warn about comparison expressions resulting from macro
+ // expansion. Also don't warn about comparisons which are only self
+ // comparisons within a template instantiation. The warnings should catch
+ // obvious cases in the definition of the template anyways. The idea is to
+ // warn when the typed comparison operator will always evaluate to the same
+ // result.
+ ValueDecl *DL = getCompareDecl(LHSStripped);
+ ValueDecl *DR = getCompareDecl(RHSStripped);
+ if (DL && DR && declaresSameEntity(DL, DR)) {
+ StringRef Result;
+ switch (Opc) {
+ case BO_EQ: case BO_LE: case BO_GE:
+ Result = "true";
+ break;
+ case BO_NE: case BO_LT: case BO_GT:
+ Result = "false";
+ break;
+ case BO_Cmp:
+ Result = "'std::strong_ordering::equal'";
+ break;
+ default:
+ break;
+ }
+ S.DiagRuntimeBehavior(Loc, nullptr,
+ S.PDiag(diag::warn_comparison_always)
+ << 0 /*self-comparison*/ << !Result.empty()
+ << Result);
+ } else if (DL && DR &&
+ DL->getType()->isArrayType() && DR->getType()->isArrayType() &&
+ !DL->isWeak() && !DR->isWeak()) {
+ // What is it always going to evaluate to?
+ StringRef Result;
+ switch(Opc) {
+ case BO_EQ: // e.g. array1 == array2
+ Result = "false";
+ break;
+ case BO_NE: // e.g. array1 != array2
+ Result = "true";
+ break;
+ default: // e.g. array1 <= array2
+ // The best we can say is 'a constant'
+ break;
+ }
+ S.DiagRuntimeBehavior(Loc, nullptr,
+ S.PDiag(diag::warn_comparison_always)
+ << 1 /*array comparison*/
+ << !Result.empty() << Result);
+ }
+
+ if (isa<CastExpr>(LHSStripped))
+ LHSStripped = LHSStripped->IgnoreParenCasts();
+ if (isa<CastExpr>(RHSStripped))
+ RHSStripped = RHSStripped->IgnoreParenCasts();
+
+ // Warn about comparisons against a string constant (unless the other
+ // operand is null); the user probably wants strcmp.
+ Expr *LiteralString = nullptr;
+ Expr *LiteralStringStripped = nullptr;
+ if ((isa<StringLiteral>(LHSStripped) || isa<ObjCEncodeExpr>(LHSStripped)) &&
+ !RHSStripped->isNullPointerConstant(S.Context,
+ Expr::NPC_ValueDependentIsNull)) {
+ LiteralString = LHS;
+ LiteralStringStripped = LHSStripped;
+ } else if ((isa<StringLiteral>(RHSStripped) ||
+ isa<ObjCEncodeExpr>(RHSStripped)) &&
+ !LHSStripped->isNullPointerConstant(S.Context,
+ Expr::NPC_ValueDependentIsNull)) {
+ LiteralString = RHS;
+ LiteralStringStripped = RHSStripped;
+ }
+
+ if (LiteralString) {
+ S.DiagRuntimeBehavior(Loc, nullptr,
+ S.PDiag(diag::warn_stringcompare)
+ << isa<ObjCEncodeExpr>(LiteralStringStripped)
+ << LiteralString->getSourceRange());
+ }
+}
+
+static ImplicitConversionKind castKindToImplicitConversionKind(CastKind CK) {
+ switch (CK) {
+ default: {
+#ifndef NDEBUG
+ llvm::errs() << "unhandled cast kind: " << CastExpr::getCastKindName(CK)
+ << "\n";
+#endif
+ llvm_unreachable("unhandled cast kind");
+ }
+ case CK_UserDefinedConversion:
+ return ICK_Identity;
+ case CK_LValueToRValue:
+ return ICK_Lvalue_To_Rvalue;
+ case CK_ArrayToPointerDecay:
+ return ICK_Array_To_Pointer;
+ case CK_FunctionToPointerDecay:
+ return ICK_Function_To_Pointer;
+ case CK_IntegralCast:
+ return ICK_Integral_Conversion;
+ case CK_FloatingCast:
+ return ICK_Floating_Conversion;
+ case CK_IntegralToFloating:
+ case CK_FloatingToIntegral:
+ return ICK_Floating_Integral;
+ case CK_IntegralComplexCast:
+ case CK_FloatingComplexCast:
+ case CK_FloatingComplexToIntegralComplex:
+ case CK_IntegralComplexToFloatingComplex:
+ return ICK_Complex_Conversion;
+ case CK_FloatingComplexToReal:
+ case CK_FloatingRealToComplex:
+ case CK_IntegralComplexToReal:
+ case CK_IntegralRealToComplex:
+ return ICK_Complex_Real;
+ }
+}
+
+static bool checkThreeWayNarrowingConversion(Sema &S, QualType ToType, Expr *E,
+ QualType FromType,
+ SourceLocation Loc) {
+ // Check for a narrowing implicit conversion.
+ StandardConversionSequence SCS;
+ SCS.setAsIdentityConversion();
+ SCS.setToType(0, FromType);
+ SCS.setToType(1, ToType);
+ if (const auto *ICE = dyn_cast<ImplicitCastExpr>(E))
+ SCS.Second = castKindToImplicitConversionKind(ICE->getCastKind());
+
+ APValue PreNarrowingValue;
+ QualType PreNarrowingType;
+ switch (SCS.getNarrowingKind(S.Context, E, PreNarrowingValue,
+ PreNarrowingType,
+ /*IgnoreFloatToIntegralConversion*/ true)) {
+ case NK_Dependent_Narrowing:
+ // Implicit conversion to a narrower type, but the expression is
+ // value-dependent so we can't tell whether it's actually narrowing.
+ case NK_Not_Narrowing:
+ return false;
+
+ case NK_Constant_Narrowing:
+ // Implicit conversion to a narrower type, and the value is not a constant
+ // expression.
+ S.Diag(E->getLocStart(), diag::err_spaceship_argument_narrowing)
+ << /*Constant*/ 1
+ << PreNarrowingValue.getAsString(S.Context, PreNarrowingType) << ToType;
+ return true;
+
+ case NK_Variable_Narrowing:
+ // Implicit conversion to a narrower type, and the value is not a constant
+ // expression.
+ case NK_Type_Narrowing:
+ S.Diag(E->getLocStart(), diag::err_spaceship_argument_narrowing)
+ << /*Constant*/ 0 << FromType << ToType;
+ // TODO: It's not a constant expression, but what if the user intended it
+ // to be? Can we produce notes to help them figure out why it isn't?
+ return true;
+ }
+ llvm_unreachable("unhandled case in switch");
+}
+
+static QualType checkArithmeticOrEnumeralThreeWayCompare(Sema &S,
+ ExprResult &LHS,
+ ExprResult &RHS,
+ SourceLocation Loc) {
+ using CCT = ComparisonCategoryType;
QualType LHSType = LHS.get()->getType();
QualType RHSType = RHS.get()->getType();
+ // Dig out the original argument type and expression before implicit casts
+ // were applied. These are the types/expressions we need to check the
+ // [expr.spaceship] requirements against.
+ ExprResult LHSStripped = LHS.get()->IgnoreParenImpCasts();
+ ExprResult RHSStripped = RHS.get()->IgnoreParenImpCasts();
+ QualType LHSStrippedType = LHSStripped.get()->getType();
+ QualType RHSStrippedType = RHSStripped.get()->getType();
+
+ // C++2a [expr.spaceship]p3: If one of the operands is of type bool and the
+ // other is not, the program is ill-formed.
+ if (LHSStrippedType->isBooleanType() != RHSStrippedType->isBooleanType()) {
+ S.InvalidOperands(Loc, LHSStripped, RHSStripped);
+ return QualType();
+ }
- Expr *LHSStripped = LHS.get()->IgnoreParenImpCasts();
- Expr *RHSStripped = RHS.get()->IgnoreParenImpCasts();
+ int NumEnumArgs = (int)LHSStrippedType->isEnumeralType() +
+ RHSStrippedType->isEnumeralType();
+ if (NumEnumArgs == 1) {
+ bool LHSIsEnum = LHSStrippedType->isEnumeralType();
+ QualType OtherTy = LHSIsEnum ? RHSStrippedType : LHSStrippedType;
+ if (OtherTy->hasFloatingRepresentation()) {
+ S.InvalidOperands(Loc, LHSStripped, RHSStripped);
+ return QualType();
+ }
+ }
+ if (NumEnumArgs == 2) {
+ // C++2a [expr.spaceship]p5: If both operands have the same enumeration
+ // type E, the operator yields the result of converting the operands
+ // to the underlying type of E and applying <=> to the converted operands.
+ if (!S.Context.hasSameUnqualifiedType(LHSStrippedType, RHSStrippedType)) {
+ S.InvalidOperands(Loc, LHS, RHS);
+ return QualType();
+ }
+ QualType IntType =
+ LHSStrippedType->getAs<EnumType>()->getDecl()->getIntegerType();
+ assert(IntType->isArithmeticType());
- checkEnumComparison(*this, Loc, LHS.get(), RHS.get());
- diagnoseLogicalNotOnLHSofCheck(*this, LHS, RHS, Loc, Opc);
+ // We can't use `CK_IntegralCast` when the underlying type is 'bool', so we
+ // promote the boolean type, and all other promotable integer types, to
+ // avoid this.
+ if (IntType->isPromotableIntegerType())
+ IntType = S.Context.getPromotedIntegerType(IntType);
- if (!LHSType->hasFloatingRepresentation() &&
- !(LHSType->isBlockPointerType() && IsRelational) &&
- !LHS.get()->getLocStart().isMacroID() &&
- !RHS.get()->getLocStart().isMacroID() &&
- !inTemplateInstantiation()) {
- // For non-floating point types, check for self-comparisons of the form
- // x == x, x != x, x < x, etc. These always evaluate to a constant, and
- // often indicate logic errors in the program.
- //
- // NOTE: Don't warn about comparison expressions resulting from macro
- // expansion. Also don't warn about comparisons which are only self
- // comparisons within a template specialization. The warnings should catch
- // obvious cases in the definition of the template anyways. The idea is to
- // warn when the typed comparison operator will always evaluate to the same
- // result.
- ValueDecl *DL = getCompareDecl(LHSStripped);
- ValueDecl *DR = getCompareDecl(RHSStripped);
- if (DL && DR && DL == DR && !IsWithinTemplateSpecialization(DL)) {
- DiagRuntimeBehavior(Loc, nullptr, PDiag(diag::warn_comparison_always)
- << 0 // self-
- << (Opc == BO_EQ
- || Opc == BO_LE
- || Opc == BO_GE));
- } else if (DL && DR && LHSType->isArrayType() && RHSType->isArrayType() &&
- !DL->getType()->isReferenceType() &&
- !DR->getType()->isReferenceType()) {
- // what is it always going to eval to?
- char always_evals_to;
- switch(Opc) {
- case BO_EQ: // e.g. array1 == array2
- always_evals_to = 0; // false
- break;
- case BO_NE: // e.g. array1 != array2
- always_evals_to = 1; // true
- break;
- default:
- // best we can say is 'a constant'
- always_evals_to = 2; // e.g. array1 <= array2
- break;
- }
- DiagRuntimeBehavior(Loc, nullptr, PDiag(diag::warn_comparison_always)
- << 1 // array
- << always_evals_to);
- }
+ LHS = S.ImpCastExprToType(LHS.get(), IntType, CK_IntegralCast);
+ RHS = S.ImpCastExprToType(RHS.get(), IntType, CK_IntegralCast);
+ LHSType = RHSType = IntType;
+ }
+
+ // C++2a [expr.spaceship]p4: If both operands have arithmetic types, the
+ // usual arithmetic conversions are applied to the operands.
+ QualType Type = S.UsualArithmeticConversions(LHS, RHS);
+ if (LHS.isInvalid() || RHS.isInvalid())
+ return QualType();
+ if (Type.isNull())
+ return S.InvalidOperands(Loc, LHS, RHS);
+ assert(Type->isArithmeticType() || Type->isEnumeralType());
+
+ bool HasNarrowing = checkThreeWayNarrowingConversion(
+ S, Type, LHS.get(), LHSType, LHS.get()->getLocStart());
+ HasNarrowing |= checkThreeWayNarrowingConversion(
+ S, Type, RHS.get(), RHSType, RHS.get()->getLocStart());
+ if (HasNarrowing)
+ return QualType();
- if (isa<CastExpr>(LHSStripped))
- LHSStripped = LHSStripped->IgnoreParenCasts();
- if (isa<CastExpr>(RHSStripped))
- RHSStripped = RHSStripped->IgnoreParenCasts();
+ assert(!Type.isNull() && "composite type for <=> has not been set");
- // Warn about comparisons against a string constant (unless the other
- // operand is null), the user probably wants strcmp.
- Expr *literalString = nullptr;
- Expr *literalStringStripped = nullptr;
- if ((isa<StringLiteral>(LHSStripped) || isa<ObjCEncodeExpr>(LHSStripped)) &&
- !RHSStripped->isNullPointerConstant(Context,
- Expr::NPC_ValueDependentIsNull)) {
- literalString = LHS.get();
- literalStringStripped = LHSStripped;
- } else if ((isa<StringLiteral>(RHSStripped) ||
- isa<ObjCEncodeExpr>(RHSStripped)) &&
- !LHSStripped->isNullPointerConstant(Context,
- Expr::NPC_ValueDependentIsNull)) {
- literalString = RHS.get();
- literalStringStripped = RHSStripped;
+ auto TypeKind = [&]() {
+ if (const ComplexType *CT = Type->getAs<ComplexType>()) {
+ if (CT->getElementType()->hasFloatingRepresentation())
+ return CCT::WeakEquality;
+ return CCT::StrongEquality;
}
+ if (Type->isIntegralOrEnumerationType())
+ return CCT::StrongOrdering;
+ if (Type->hasFloatingRepresentation())
+ return CCT::PartialOrdering;
+ llvm_unreachable("other types are unimplemented");
+ }();
- if (literalString) {
- DiagRuntimeBehavior(Loc, nullptr,
- PDiag(diag::warn_stringcompare)
- << isa<ObjCEncodeExpr>(literalStringStripped)
- << literalString->getSourceRange());
- }
- }
+ return S.CheckComparisonCategoryType(TypeKind, Loc);
+}
+
+static QualType checkArithmeticOrEnumeralCompare(Sema &S, ExprResult &LHS,
+ ExprResult &RHS,
+ SourceLocation Loc,
+ BinaryOperatorKind Opc) {
+ if (Opc == BO_Cmp)
+ return checkArithmeticOrEnumeralThreeWayCompare(S, LHS, RHS, Loc);
// C99 6.5.8p3 / C99 6.5.9p4
- UsualArithmeticConversions(LHS, RHS);
+ QualType Type = S.UsualArithmeticConversions(LHS, RHS);
if (LHS.isInvalid() || RHS.isInvalid())
return QualType();
+ if (Type.isNull())
+ return S.InvalidOperands(Loc, LHS, RHS);
+ assert(Type->isArithmeticType() || Type->isEnumeralType());
- LHSType = LHS.get()->getType();
- RHSType = RHS.get()->getType();
+ checkEnumComparison(S, Loc, LHS.get(), RHS.get());
+
+ if (Type->isAnyComplexType() && BinaryOperator::isRelationalOp(Opc))
+ return S.InvalidOperands(Loc, LHS, RHS);
+
+ // Check for comparisons of floating point operands using != and ==.
+ if (Type->hasFloatingRepresentation() && BinaryOperator::isEqualityOp(Opc))
+ S.CheckFloatComparison(Loc, LHS.get(), RHS.get());
// The result of comparisons is 'bool' in C++, 'int' in C.
- QualType ResultTy = Context.getLogicalOperationType();
+ return S.Context.getLogicalOperationType();
+}
- if (IsRelational) {
- if (LHSType->isRealType() && RHSType->isRealType())
- return ResultTy;
- } else {
- // Check for comparisons of floating point operands using != and ==.
- if (LHSType->hasFloatingRepresentation())
- CheckFloatComparison(Loc, LHS.get(), RHS.get());
+// C99 6.5.8, C++ [expr.rel]
+QualType Sema::CheckCompareOperands(ExprResult &LHS, ExprResult &RHS,
+ SourceLocation Loc,
+ BinaryOperatorKind Opc) {
+ bool IsRelational = BinaryOperator::isRelationalOp(Opc);
+ bool IsThreeWay = Opc == BO_Cmp;
+ auto IsAnyPointerType = [](ExprResult E) {
+ QualType Ty = E.get()->getType();
+ return Ty->isPointerType() || Ty->isMemberPointerType();
+ };
- if (LHSType->isArithmeticType() && RHSType->isArithmeticType())
- return ResultTy;
+ // C++2a [expr.spaceship]p6: If at least one of the operands is of pointer
+ // type, array-to-pointer, ..., conversions are performed on both operands to
+ // bring them to their composite type.
+ // Otherwise, all comparisons expect an rvalue, so convert to rvalue before
+ // any type-related checks.
+ if (!IsThreeWay || IsAnyPointerType(LHS) || IsAnyPointerType(RHS)) {
+ LHS = DefaultFunctionArrayLvalueConversion(LHS.get());
+ if (LHS.isInvalid())
+ return QualType();
+ RHS = DefaultFunctionArrayLvalueConversion(RHS.get());
+ if (RHS.isInvalid())
+ return QualType();
+ } else {
+ LHS = DefaultLvalueConversion(LHS.get());
+ if (LHS.isInvalid())
+ return QualType();
+ RHS = DefaultLvalueConversion(RHS.get());
+ if (RHS.isInvalid())
+ return QualType();
}
+ checkArithmeticNull(*this, LHS, RHS, Loc, /*isCompare=*/true);
+
+ // Handle vector comparisons separately.
+ if (LHS.get()->getType()->isVectorType() ||
+ RHS.get()->getType()->isVectorType())
+ return CheckVectorCompareOperands(LHS, RHS, Loc, Opc);
+
+ diagnoseLogicalNotOnLHSofCheck(*this, LHS, RHS, Loc, Opc);
+ diagnoseTautologicalComparison(*this, Loc, LHS.get(), RHS.get(), Opc);
+
+ QualType LHSType = LHS.get()->getType();
+ QualType RHSType = RHS.get()->getType();
+ if ((LHSType->isArithmeticType() || LHSType->isEnumeralType()) &&
+ (RHSType->isArithmeticType() || RHSType->isEnumeralType()))
+ return checkArithmeticOrEnumeralCompare(*this, LHS, RHS, Loc, Opc);
+
const Expr::NullPointerConstantKind LHSNullKind =
LHS.get()->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNull);
const Expr::NullPointerConstantKind RHSNullKind =
@@ -9724,6 +10079,44 @@ QualType Sema::CheckCompareOperands(ExprResult &LHS, ExprResult &RHS,
bool LHSIsNull = LHSNullKind != Expr::NPCK_NotNull;
bool RHSIsNull = RHSNullKind != Expr::NPCK_NotNull;
+ auto computeResultTy = [&]() {
+ if (Opc != BO_Cmp)
+ return Context.getLogicalOperationType();
+ assert(getLangOpts().CPlusPlus);
+ assert(Context.hasSameType(LHS.get()->getType(), RHS.get()->getType()));
+
+ QualType CompositeTy = LHS.get()->getType();
+ assert(!CompositeTy->isReferenceType());
+
+ auto buildResultTy = [&](ComparisonCategoryType Kind) {
+ return CheckComparisonCategoryType(Kind, Loc);
+ };
+
+ // C++2a [expr.spaceship]p7: If the composite pointer type is a function
+ // pointer type, a pointer-to-member type, or std::nullptr_t, the
+ // result is of type std::strong_equality
+ if (CompositeTy->isFunctionPointerType() ||
+ CompositeTy->isMemberPointerType() || CompositeTy->isNullPtrType())
+ // FIXME: consider making the function pointer case produce
+ // strong_ordering not strong_equality, per P0946R0-Jax18 discussion
+ // and direction polls
+ return buildResultTy(ComparisonCategoryType::StrongEquality);
+
+ // C++2a [expr.spaceship]p8: If the composite pointer type is an object
+ // pointer type, p <=> q is of type std::strong_ordering.
+ if (CompositeTy->isPointerType()) {
+ // P0946R0: Comparisons between a null pointer constant and an object
+ // pointer result in std::strong_equality
+ if (LHSIsNull != RHSIsNull)
+ return buildResultTy(ComparisonCategoryType::StrongEquality);
+ return buildResultTy(ComparisonCategoryType::StrongOrdering);
+ }
+ // C++2a [expr.spaceship]p9: Otherwise, the program is ill-formed.
+ // TODO: Extend support for operator<=> to ObjC types.
+ return InvalidOperands(Loc, LHS, RHS);
+ };
+
+
if (!IsRelational && LHSIsNull != RHSIsNull) {
bool IsEquality = Opc == BO_EQ;
if (RHSIsNull)
@@ -9751,29 +10144,30 @@ QualType Sema::CheckCompareOperands(ExprResult &LHS, ExprResult &RHS,
// conformance with the C++ standard.
diagnoseFunctionPointerToVoidComparison(
*this, Loc, LHS, RHS, /*isError*/ (bool)isSFINAEContext());
-
+
if (isSFINAEContext())
return QualType();
-
+
RHS = ImpCastExprToType(RHS.get(), LHSType, CK_BitCast);
- return ResultTy;
+ return computeResultTy();
}
// C++ [expr.eq]p2:
// If at least one operand is a pointer [...] bring them to their
// composite pointer type.
+ // C++ [expr.spaceship]p6
+ // If at least one of the operands is of pointer type, [...] bring them
+ // to their composite pointer type.
// C++ [expr.rel]p2:
// If both operands are pointers, [...] bring them to their composite
// pointer type.
if ((int)LHSType->isPointerType() + (int)RHSType->isPointerType() >=
(IsRelational ? 2 : 1) &&
- (!LangOpts.ObjCAutoRefCount ||
- !(LHSType->isObjCObjectPointerType() ||
- RHSType->isObjCObjectPointerType()))) {
+ (!LangOpts.ObjCAutoRefCount || !(LHSType->isObjCObjectPointerType() ||
+ RHSType->isObjCObjectPointerType()))) {
if (convertPointersToCompositeType(*this, Loc, LHS, RHS))
return QualType();
- else
- return ResultTy;
+ return computeResultTy();
}
} else if (LHSType->isPointerType() &&
RHSType->isPointerType()) { // C99 6.5.8p2
@@ -9824,7 +10218,7 @@ QualType Sema::CheckCompareOperands(ExprResult &LHS, ExprResult &RHS,
else
RHS = ImpCastExprToType(RHS.get(), LHSType, Kind);
}
- return ResultTy;
+ return computeResultTy();
}
if (getLangOpts().CPlusPlus) {
@@ -9834,11 +10228,11 @@ QualType Sema::CheckCompareOperands(ExprResult &LHS, ExprResult &RHS,
if (!IsRelational && LHSIsNull && RHSIsNull) {
if (LHSType->isNullPtrType()) {
RHS = ImpCastExprToType(RHS.get(), LHSType, CK_NullToPointer);
- return ResultTy;
+ return computeResultTy();
}
if (RHSType->isNullPtrType()) {
LHS = ImpCastExprToType(LHS.get(), RHSType, CK_NullToPointer);
- return ResultTy;
+ return computeResultTy();
}
}
@@ -9847,12 +10241,12 @@ QualType Sema::CheckCompareOperands(ExprResult &LHS, ExprResult &RHS,
if (!IsRelational && RHSType->isNullPtrType() &&
(LHSType->isObjCObjectPointerType() || LHSType->isBlockPointerType())) {
RHS = ImpCastExprToType(RHS.get(), LHSType, CK_NullToPointer);
- return ResultTy;
+ return computeResultTy();
}
if (!IsRelational && LHSType->isNullPtrType() &&
(RHSType->isObjCObjectPointerType() || RHSType->isBlockPointerType())) {
LHS = ImpCastExprToType(LHS.get(), RHSType, CK_NullToPointer);
- return ResultTy;
+ return computeResultTy();
}
if (IsRelational &&
@@ -9875,7 +10269,7 @@ QualType Sema::CheckCompareOperands(ExprResult &LHS, ExprResult &RHS,
RHS = ImpCastExprToType(RHS.get(), LHSType, CK_NullToPointer);
else
LHS = ImpCastExprToType(LHS.get(), RHSType, CK_NullToPointer);
- return ResultTy;
+ return computeResultTy();
}
}
}
@@ -9888,15 +10282,8 @@ QualType Sema::CheckCompareOperands(ExprResult &LHS, ExprResult &RHS,
if (convertPointersToCompositeType(*this, Loc, LHS, RHS))
return QualType();
else
- return ResultTy;
+ return computeResultTy();
}
-
- // Handle scoped enumeration types specifically, since they don't promote
- // to integers.
- if (LHS.get()->getType()->isEnumeralType() &&
- Context.hasSameUnqualifiedType(LHS.get()->getType(),
- RHS.get()->getType()))
- return ResultTy;
}
// Handle block pointer types.
@@ -9912,7 +10299,7 @@ QualType Sema::CheckCompareOperands(ExprResult &LHS, ExprResult &RHS,
<< RHS.get()->getSourceRange();
}
RHS = ImpCastExprToType(RHS.get(), LHSType, CK_BitCast);
- return ResultTy;
+ return computeResultTy();
}
// Allow block pointers to be compared with null pointer constants.
@@ -9936,7 +10323,7 @@ QualType Sema::CheckCompareOperands(ExprResult &LHS, ExprResult &RHS,
RHS = ImpCastExprToType(RHS.get(), LHSType,
LHSType->isPointerType() ? CK_BitCast
: CK_AnyPointerToBlockPointerCast);
- return ResultTy;
+ return computeResultTy();
}
if (LHSType->isObjCObjectPointerType() ||
@@ -9969,7 +10356,7 @@ QualType Sema::CheckCompareOperands(ExprResult &LHS, ExprResult &RHS,
RHS = ImpCastExprToType(E, LHSType,
LPT ? CK_BitCast :CK_CPointerToObjCPointerCast);
}
- return ResultTy;
+ return computeResultTy();
}
if (LHSType->isObjCObjectPointerType() &&
RHSType->isObjCObjectPointerType()) {
@@ -9983,7 +10370,20 @@ QualType Sema::CheckCompareOperands(ExprResult &LHS, ExprResult &RHS,
LHS = ImpCastExprToType(LHS.get(), RHSType, CK_BitCast);
else
RHS = ImpCastExprToType(RHS.get(), LHSType, CK_BitCast);
- return ResultTy;
+ return computeResultTy();
+ }
+
+ if (!IsRelational && LHSType->isBlockPointerType() &&
+ RHSType->isBlockCompatibleObjCPointerType(Context)) {
+ LHS = ImpCastExprToType(LHS.get(), RHSType,
+ CK_BlockPointerToObjCPointerCast);
+ return computeResultTy();
+ } else if (!IsRelational &&
+ LHSType->isBlockCompatibleObjCPointerType(Context) &&
+ RHSType->isBlockPointerType()) {
+ RHS = ImpCastExprToType(RHS.get(), LHSType,
+ CK_BlockPointerToObjCPointerCast);
+ return computeResultTy();
}
}
if ((LHSType->isAnyPointerType() && RHSType->isIntegerType()) ||
@@ -10023,30 +10423,30 @@ QualType Sema::CheckCompareOperands(ExprResult &LHS, ExprResult &RHS,
else
RHS = ImpCastExprToType(RHS.get(), LHSType,
RHSIsNull ? CK_NullToPointer : CK_IntegralToPointer);
- return ResultTy;
+ return computeResultTy();
}
// Handle block pointers.
if (!IsRelational && RHSIsNull
&& LHSType->isBlockPointerType() && RHSType->isIntegerType()) {
RHS = ImpCastExprToType(RHS.get(), LHSType, CK_NullToPointer);
- return ResultTy;
+ return computeResultTy();
}
if (!IsRelational && LHSIsNull
&& LHSType->isIntegerType() && RHSType->isBlockPointerType()) {
LHS = ImpCastExprToType(LHS.get(), RHSType, CK_NullToPointer);
- return ResultTy;
+ return computeResultTy();
}
if (getLangOpts().OpenCLVersion >= 200) {
if (LHSIsNull && RHSType->isQueueT()) {
LHS = ImpCastExprToType(LHS.get(), RHSType, CK_NullToPointer);
- return ResultTy;
+ return computeResultTy();
}
if (LHSType->isQueueT() && RHSIsNull) {
RHS = ImpCastExprToType(RHS.get(), LHSType, CK_NullToPointer);
- return ResultTy;
+ return computeResultTy();
}
}
@@ -10100,7 +10500,7 @@ QualType Sema::GetSignedVectorType(QualType V) {
/// types.
QualType Sema::CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
- bool IsRelational) {
+ BinaryOperatorKind Opc) {
// Check to make sure we're operating on vectors of the same type and width,
// Allowing one side to be a scalar of element type.
QualType vType = CheckVectorOperands(LHS, RHS, Loc, /*isCompAssign*/false,
@@ -10120,22 +10520,12 @@ QualType Sema::CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS,
// For non-floating point types, check for self-comparisons of the form
// x == x, x != x, x < x, etc. These always evaluate to a constant, and
// often indicate logic errors in the program.
- if (!LHSType->hasFloatingRepresentation() && !inTemplateInstantiation()) {
- if (DeclRefExpr* DRL
- = dyn_cast<DeclRefExpr>(LHS.get()->IgnoreParenImpCasts()))
- if (DeclRefExpr* DRR
- = dyn_cast<DeclRefExpr>(RHS.get()->IgnoreParenImpCasts()))
- if (DRL->getDecl() == DRR->getDecl())
- DiagRuntimeBehavior(Loc, nullptr,
- PDiag(diag::warn_comparison_always)
- << 0 // self-
- << 2 // "a constant"
- );
- }
+ diagnoseTautologicalComparison(*this, Loc, LHS.get(), RHS.get(), Opc);
// Check for comparisons of floating point operands using != and ==.
- if (!IsRelational && LHSType->hasFloatingRepresentation()) {
- assert (RHS.get()->getType()->hasFloatingRepresentation());
+ if (BinaryOperator::isEqualityOp(Opc) &&
+ LHSType->hasFloatingRepresentation()) {
+ assert(RHS.get()->getType()->hasFloatingRepresentation());
CheckFloatComparison(Loc, LHS.get(), RHS.get());
}
@@ -10417,8 +10807,16 @@ static void DiagnoseConstAssignment(Sema &S, const Expr *E,
// Static fields do not inherit constness from parents.
break;
}
- break;
- } // End MemberExpr
+ break; // End MemberExpr
+ } else if (const ArraySubscriptExpr *ASE =
+ dyn_cast<ArraySubscriptExpr>(E)) {
+ E = ASE->getBase()->IgnoreParenImpCasts();
+ continue;
+ } else if (const ExtVectorElementExpr *EVE =
+ dyn_cast<ExtVectorElementExpr>(E)) {
+ E = EVE->getBase()->IgnoreParenImpCasts();
+ continue;
+ }
break;
}
@@ -10659,12 +11057,34 @@ static bool CheckForModifiableLvalue(Expr *E, SourceLocation Loc, Sema &S) {
static void CheckIdentityFieldAssignment(Expr *LHSExpr, Expr *RHSExpr,
SourceLocation Loc,
Sema &Sema) {
+ if (Sema.inTemplateInstantiation())
+ return;
+ if (Sema.isUnevaluatedContext())
+ return;
+ if (Loc.isInvalid() || Loc.isMacroID())
+ return;
+ if (LHSExpr->getExprLoc().isMacroID() || RHSExpr->getExprLoc().isMacroID())
+ return;
+
// C / C++ fields
MemberExpr *ML = dyn_cast<MemberExpr>(LHSExpr);
MemberExpr *MR = dyn_cast<MemberExpr>(RHSExpr);
- if (ML && MR && ML->getMemberDecl() == MR->getMemberDecl()) {
- if (isa<CXXThisExpr>(ML->getBase()) && isa<CXXThisExpr>(MR->getBase()))
- Sema.Diag(Loc, diag::warn_identity_field_assign) << 0;
+ if (ML && MR) {
+ if (!(isa<CXXThisExpr>(ML->getBase()) && isa<CXXThisExpr>(MR->getBase())))
+ return;
+ const ValueDecl *LHSDecl =
+ cast<ValueDecl>(ML->getMemberDecl()->getCanonicalDecl());
+ const ValueDecl *RHSDecl =
+ cast<ValueDecl>(MR->getMemberDecl()->getCanonicalDecl());
+ if (LHSDecl != RHSDecl)
+ return;
+ if (LHSDecl->getType().isVolatileQualified())
+ return;
+ if (const ReferenceType *RefTy = LHSDecl->getType()->getAs<ReferenceType>())
+ if (RefTy->getPointeeType().isVolatileQualified())
+ return;
+
+ Sema.Diag(Loc, diag::warn_identity_field_assign) << 0;
}
// Objective-C instance variables
@@ -11045,7 +11465,7 @@ namespace {
AO_No_Error = 4
};
}
-/// \brief Diagnose invalid operand for address of operations.
+/// Diagnose invalid operand for address of operations.
///
/// \param Type The type of operand which cannot have its address taken.
static void diagnoseAddressOfInvalidType(Sema &S, SourceLocation Loc,
@@ -11418,12 +11838,13 @@ static inline UnaryOperatorKind ConvertTokenKindToUnaryOpcode(
}
/// DiagnoseSelfAssignment - Emits a warning if a value is assigned to itself.
-/// This warning is only emitted for builtin assignment operations. It is also
-/// suppressed in the event of macro expansions.
+/// This warning suppressed in the event of macro expansions.
static void DiagnoseSelfAssignment(Sema &S, Expr *LHSExpr, Expr *RHSExpr,
- SourceLocation OpLoc) {
+ SourceLocation OpLoc, bool IsBuiltin) {
if (S.inTemplateInstantiation())
return;
+ if (S.isUnevaluatedContext())
+ return;
if (OpLoc.isInvalid() || OpLoc.isMacroID())
return;
LHSExpr = LHSExpr->IgnoreParenImpCasts();
@@ -11446,9 +11867,10 @@ static void DiagnoseSelfAssignment(Sema &S, Expr *LHSExpr, Expr *RHSExpr,
if (RefTy->getPointeeType().isVolatileQualified())
return;
- S.Diag(OpLoc, diag::warn_self_assignment)
- << LHSDeclRef->getType()
- << LHSExpr->getSourceRange() << RHSExpr->getSourceRange();
+ S.Diag(OpLoc, IsBuiltin ? diag::warn_self_assignment_builtin
+ : diag::warn_self_assignment_overloaded)
+ << LHSDeclRef->getType() << LHSExpr->getSourceRange()
+ << RHSExpr->getSourceRange();
}
/// Check if a bitwise-& is performed on an Objective-C pointer. This
@@ -11583,8 +12005,8 @@ ExprResult Sema::CreateBuiltinBinOp(SourceLocation OpLoc,
// C++11 5.17p9:
// The meaning of x = {v} [...] is that of x = T(v) [...]. The meaning
// of x = {} is x = T().
- InitializationKind Kind =
- InitializationKind::CreateDirectList(RHSExpr->getLocStart());
+ InitializationKind Kind = InitializationKind::CreateDirectList(
+ RHSExpr->getLocStart(), RHSExpr->getLocStart(), RHSExpr->getLocEnd());
InitializedEntity Entity =
InitializedEntity::InitializeTemporary(LHSExpr->getType());
InitializationSequence InitSeq(*this, Entity, Kind, RHSExpr);
@@ -11641,7 +12063,7 @@ ExprResult Sema::CreateBuiltinBinOp(SourceLocation OpLoc,
OK = LHS.get()->getObjectKind();
}
if (!ResultTy.isNull()) {
- DiagnoseSelfAssignment(*this, LHS.get(), RHS.get(), OpLoc);
+ DiagnoseSelfAssignment(*this, LHS.get(), RHS.get(), OpLoc, true);
DiagnoseSelfMove(LHS.get(), RHS.get(), OpLoc);
}
RecordModifiableNonNullParam(*this, LHS.get());
@@ -11677,19 +12099,17 @@ ExprResult Sema::CreateBuiltinBinOp(SourceLocation OpLoc,
case BO_GE:
case BO_GT:
ConvertHalfVec = true;
- ResultTy = CheckCompareOperands(LHS, RHS, OpLoc, Opc, true);
+ ResultTy = CheckCompareOperands(LHS, RHS, OpLoc, Opc);
break;
case BO_EQ:
case BO_NE:
ConvertHalfVec = true;
- ResultTy = CheckCompareOperands(LHS, RHS, OpLoc, Opc, false);
+ ResultTy = CheckCompareOperands(LHS, RHS, OpLoc, Opc);
break;
case BO_Cmp:
- // FIXME: Implement proper semantic checking of '<=>'.
ConvertHalfVec = true;
- ResultTy = CheckCompareOperands(LHS, RHS, OpLoc, Opc, true);
- if (!ResultTy.isNull())
- ResultTy = Context.VoidTy;
+ ResultTy = CheckCompareOperands(LHS, RHS, OpLoc, Opc);
+ assert(ResultTy.isNull() || ResultTy->getAsCXXRecordDecl());
break;
case BO_And:
checkObjCPointerIntrospection(*this, LHS, RHS, OpLoc);
@@ -11739,7 +12159,7 @@ ExprResult Sema::CreateBuiltinBinOp(SourceLocation OpLoc,
break;
case BO_AndAssign:
case BO_OrAssign: // fallthrough
- DiagnoseSelfAssignment(*this, LHS.get(), RHS.get(), OpLoc);
+ DiagnoseSelfAssignment(*this, LHS.get(), RHS.get(), OpLoc, true);
LLVM_FALLTHROUGH;
case BO_XorAssign:
CompResultTy = CheckBitwiseOperands(LHS, RHS, OpLoc, Opc);
@@ -11857,7 +12277,7 @@ static void DiagnoseBitwisePrecedence(Sema &Self, BinaryOperatorKind Opc,
ParensRange);
}
-/// \brief It accepts a '&&' expr that is inside a '||' one.
+/// It accepts a '&&' expr that is inside a '||' one.
/// Emit a diagnostic together with a fixit hint that wraps the '&&' expression
/// in parentheses.
static void
@@ -11872,7 +12292,7 @@ EmitDiagnosticForLogicalAndInLogicalOr(Sema &Self, SourceLocation OpLoc,
Bop->getSourceRange());
}
-/// \brief Returns true if the given expression can be evaluated as a constant
+/// Returns true if the given expression can be evaluated as a constant
/// 'true'.
static bool EvaluatesAsTrue(Sema &S, Expr *E) {
bool Res;
@@ -11880,7 +12300,7 @@ static bool EvaluatesAsTrue(Sema &S, Expr *E) {
E->EvaluateAsBooleanCondition(Res, S.getASTContext()) && Res;
}
-/// \brief Returns true if the given expression can be evaluated as a constant
+/// Returns true if the given expression can be evaluated as a constant
/// 'false'.
static bool EvaluatesAsFalse(Sema &S, Expr *E) {
bool Res;
@@ -11888,7 +12308,7 @@ static bool EvaluatesAsFalse(Sema &S, Expr *E) {
E->EvaluateAsBooleanCondition(Res, S.getASTContext()) && !Res;
}
-/// \brief Look for '&&' in the left hand of a '||' expr.
+/// Look for '&&' in the left hand of a '||' expr.
static void DiagnoseLogicalAndInLogicalOrLHS(Sema &S, SourceLocation OpLoc,
Expr *LHSExpr, Expr *RHSExpr) {
if (BinaryOperator *Bop = dyn_cast<BinaryOperator>(LHSExpr)) {
@@ -11910,7 +12330,7 @@ static void DiagnoseLogicalAndInLogicalOrLHS(Sema &S, SourceLocation OpLoc,
}
}
-/// \brief Look for '&&' in the right hand of a '||' expr.
+/// Look for '&&' in the right hand of a '||' expr.
static void DiagnoseLogicalAndInLogicalOrRHS(Sema &S, SourceLocation OpLoc,
Expr *LHSExpr, Expr *RHSExpr) {
if (BinaryOperator *Bop = dyn_cast<BinaryOperator>(RHSExpr)) {
@@ -11925,7 +12345,7 @@ static void DiagnoseLogicalAndInLogicalOrRHS(Sema &S, SourceLocation OpLoc,
}
}
-/// \brief Look for bitwise op in the left or right hand of a bitwise op with
+/// Look for bitwise op in the left or right hand of a bitwise op with
/// lower precedence and emit a diagnostic together with a fixit hint that wraps
/// the '&' expression in parentheses.
static void DiagnoseBitwiseOpInBitwiseOp(Sema &S, BinaryOperatorKind Opc,
@@ -12038,6 +12458,21 @@ ExprResult Sema::ActOnBinOp(Scope *S, SourceLocation TokLoc,
static ExprResult BuildOverloadedBinOp(Sema &S, Scope *Sc, SourceLocation OpLoc,
BinaryOperatorKind Opc,
Expr *LHS, Expr *RHS) {
+ switch (Opc) {
+ case BO_Assign:
+ case BO_DivAssign:
+ case BO_RemAssign:
+ case BO_SubAssign:
+ case BO_AndAssign:
+ case BO_OrAssign:
+ case BO_XorAssign:
+ DiagnoseSelfAssignment(S, LHS, RHS, OpLoc, false);
+ CheckIdentityFieldAssignment(LHS, RHS, OpLoc, S);
+ break;
+ default:
+ break;
+ }
+
// Find all of the overloaded operators visible from this
// point. We perform both an operator-name lookup from the local
// scope and an argument-dependent lookup based on the types of
@@ -12160,6 +12595,16 @@ ExprResult Sema::BuildBinOp(Scope *S, SourceLocation OpLoc,
return CreateBuiltinBinOp(OpLoc, Opc, LHSExpr, RHSExpr);
}
+static bool isOverflowingIntegerType(ASTContext &Ctx, QualType T) {
+ if (T.isNull() || T->isDependentType())
+ return false;
+
+ if (!T->isPromotableIntegerType())
+ return true;
+
+ return Ctx.getIntWidth(T) >= Ctx.getIntWidth(Ctx.IntTy);
+}
+
ExprResult Sema::CreateBuiltinUnaryOp(SourceLocation OpLoc,
UnaryOperatorKind Opc,
Expr *InputExpr) {
@@ -12167,6 +12612,8 @@ ExprResult Sema::CreateBuiltinUnaryOp(SourceLocation OpLoc,
ExprValueKind VK = VK_RValue;
ExprObjectKind OK = OK_Ordinary;
QualType resultType;
+ bool CanOverflow = false;
+
bool ConvertHalfVec = false;
if (getLangOpts().OpenCL) {
QualType Ty = InputExpr->getType();
@@ -12192,6 +12639,7 @@ ExprResult Sema::CreateBuiltinUnaryOp(SourceLocation OpLoc,
Opc == UO_PostInc,
Opc == UO_PreInc ||
Opc == UO_PreDec);
+ CanOverflow = isOverflowingIntegerType(Context, resultType);
break;
case UO_AddrOf:
resultType = CheckAddressOfOperand(Input, OpLoc);
@@ -12205,6 +12653,8 @@ ExprResult Sema::CreateBuiltinUnaryOp(SourceLocation OpLoc,
}
case UO_Plus:
case UO_Minus:
+ CanOverflow = Opc == UO_Minus &&
+ isOverflowingIntegerType(Context, Input.get()->getType());
Input = UsualUnaryConversions(Input.get());
if (Input.isInvalid()) return ExprError();
// Unary plus and minus require promoting an operand of half vector to a
@@ -12241,6 +12691,7 @@ ExprResult Sema::CreateBuiltinUnaryOp(SourceLocation OpLoc,
if (Input.isInvalid())
return ExprError();
resultType = Input.get()->getType();
+
if (resultType->isDependentType())
break;
// C99 6.5.3.3p1. We allow complex int and float as a GCC extension.
@@ -12337,7 +12788,7 @@ ExprResult Sema::CreateBuiltinUnaryOp(SourceLocation OpLoc,
OK = Input.get()->getObjectKind();
break;
case UO_Coawait:
- // It's unnessesary to represent the pass-through operator co_await in the
+ // It's unnecessary to represent the pass-through operator co_await in the
// AST; just return the input expression instead.
assert(!Input.get()->getType()->isDependentType() &&
"the co_await expression must be non-dependant before "
@@ -12355,17 +12806,17 @@ ExprResult Sema::CreateBuiltinUnaryOp(SourceLocation OpLoc,
CheckArrayAccess(Input.get());
auto *UO = new (Context)
- UnaryOperator(Input.get(), Opc, resultType, VK, OK, OpLoc);
+ UnaryOperator(Input.get(), Opc, resultType, VK, OK, OpLoc, CanOverflow);
// Convert the result back to a half vector.
if (ConvertHalfVec)
return convertVector(UO, Context.HalfTy, *this);
return UO;
}
-/// \brief Determine whether the given expression is a qualified member
+/// Determine whether the given expression is a qualified member
/// access expression, of a form that could be turned into a pointer to member
/// with the address-of operator.
-static bool isQualifiedMemberAccess(Expr *E) {
+bool Sema::isQualifiedMemberAccess(Expr *E) {
if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
if (!DRE->getQualifier())
return false;
@@ -12548,11 +12999,8 @@ Sema::ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt,
LastExpr = rebuiltLastStmt;
} else {
LastExpr = PerformCopyInitialization(
- InitializedEntity::InitializeResult(LPLoc,
- Ty,
- false),
- SourceLocation(),
- LastExpr);
+ InitializedEntity::InitializeStmtExprResult(LPLoc, Ty),
+ SourceLocation(), LastExpr);
}
if (LastExpr.isInvalid())
@@ -12783,7 +13231,7 @@ ExprResult Sema::ActOnChooseExpr(SourceLocation BuiltinLoc,
CondExpr = CondICE.get();
CondIsTrue = condEval.getZExtValue();
- // If the condition is > zero, then the AST type is the same as the LSHExpr.
+ // If the condition is > zero, then the AST type is the same as the LHSExpr.
Expr *ActiveExpr = CondIsTrue ? LHSExpr : RHSExpr;
resType = ActiveExpr->getType();
@@ -12834,7 +13282,7 @@ void Sema::ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo,
Scope *CurScope) {
assert(ParamInfo.getIdentifier() == nullptr &&
"block-id should have no identifier!");
- assert(ParamInfo.getContext() == Declarator::BlockLiteralContext);
+ assert(ParamInfo.getContext() == DeclaratorContext::BlockLiteralContext);
BlockScopeInfo *CurBlock = getCurBlock();
TypeSourceInfo *Sig = GetTypeForDeclarator(ParamInfo, CurScope);
@@ -12989,7 +13437,7 @@ ExprResult Sema::ActOnBlockStmtExpr(SourceLocation CaretLoc,
// Set the captured variables on the block.
// FIXME: Share capture structure between BlockDecl and CapturingScopeInfo!
SmallVector<BlockDecl::Capture, 4> Captures;
- for (CapturingScopeInfo::Capture &Cap : BSI->Captures) {
+ for (Capture &Cap : BSI->Captures) {
if (Cap.isThisCapture())
continue;
BlockDecl::Capture NewCap(Cap.getVariable(), Cap.isBlockCapture(),
@@ -13069,7 +13517,7 @@ ExprResult Sema::ActOnBlockStmtExpr(SourceLocation CaretLoc,
for (const auto &CI : Result->getBlockDecl()->captures()) {
const VarDecl *var = CI.getVariable();
if (var->getType().isDestructedType() != QualType::DK_none) {
- getCurFunction()->setHasBranchProtectedScope();
+ setFunctionHasBranchProtectedScope();
break;
}
}
@@ -13335,7 +13783,6 @@ bool Sema::DiagnoseAssignmentResult(AssignConvertType ConvTy,
DiagKind = diag::err_typecheck_incompatible_address_space;
break;
-
} else if (lhq.getObjCLifetime() != rhq.getObjCLifetime()) {
DiagKind = diag::err_typecheck_incompatible_ownership;
break;
@@ -13459,7 +13906,7 @@ bool Sema::DiagnoseAssignmentResult(AssignConvertType ConvTy,
if (DiagKind == diag::warn_incompatible_qualified_id &&
PDecl && IFace && !IFace->hasDefinition())
Diag(IFace->getLocation(), diag::note_incomplete_class_and_qualified_id)
- << IFace->getName() << PDecl->getName();
+ << IFace << PDecl;
if (SecondType == Context.OverloadTy)
NoteAllOverloadCandidates(OverloadExpr::find(SrcExpr).Expression,
@@ -13700,22 +14147,22 @@ ExprResult Sema::TransformToPotentiallyEvaluated(Expr *E) {
}
void
-Sema::PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext,
- Decl *LambdaContextDecl,
- bool IsDecltype) {
+Sema::PushExpressionEvaluationContext(
+ ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl,
+ ExpressionEvaluationContextRecord::ExpressionKind ExprContext) {
ExprEvalContexts.emplace_back(NewContext, ExprCleanupObjects.size(), Cleanup,
- LambdaContextDecl, IsDecltype);
+ LambdaContextDecl, ExprContext);
Cleanup.reset();
if (!MaybeODRUseExprs.empty())
std::swap(MaybeODRUseExprs, ExprEvalContexts.back().SavedMaybeODRUseExprs);
}
void
-Sema::PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext,
- ReuseLambdaContextDecl_t,
- bool IsDecltype) {
+Sema::PushExpressionEvaluationContext(
+ ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t,
+ ExpressionEvaluationContextRecord::ExpressionKind ExprContext) {
Decl *ClosureContextDecl = ExprEvalContexts.back().ManglingContextDecl;
- PushExpressionEvaluationContext(NewContext, ClosureContextDecl, IsDecltype);
+ PushExpressionEvaluationContext(NewContext, ClosureContextDecl, ExprContext);
}
void Sema::PopExpressionEvaluationContext() {
@@ -13723,30 +14170,30 @@ void Sema::PopExpressionEvaluationContext() {
unsigned NumTypos = Rec.NumTypos;
if (!Rec.Lambdas.empty()) {
- if (Rec.isUnevaluated() || Rec.isConstantEvaluated()) {
+ using ExpressionKind = ExpressionEvaluationContextRecord::ExpressionKind;
+ if (Rec.ExprContext == ExpressionKind::EK_TemplateArgument || Rec.isUnevaluated() ||
+ (Rec.isConstantEvaluated() && !getLangOpts().CPlusPlus17)) {
unsigned D;
if (Rec.isUnevaluated()) {
// C++11 [expr.prim.lambda]p2:
// A lambda-expression shall not appear in an unevaluated operand
// (Clause 5).
D = diag::err_lambda_unevaluated_operand;
- } else {
+ } else if (Rec.isConstantEvaluated() && !getLangOpts().CPlusPlus17) {
// C++1y [expr.const]p2:
// A conditional-expression e is a core constant expression unless the
// evaluation of e, following the rules of the abstract machine, would
// evaluate [...] a lambda-expression.
D = diag::err_lambda_in_constant_expression;
- }
+ } else if (Rec.ExprContext == ExpressionKind::EK_TemplateArgument) {
+ // C++17 [expr.prim.lamda]p2:
+ // A lambda-expression shall not appear [...] in a template-argument.
+ D = diag::err_lambda_in_invalid_context;
+ } else
+ llvm_unreachable("Couldn't infer lambda error message.");
- // C++1z allows lambda expressions as core constant expressions.
- // FIXME: In C++1z, reinstate the restrictions on lambda expressions (CWG
- // 1607) from appearing within template-arguments and array-bounds that
- // are part of function-signatures. Be mindful that P0315 (Lambdas in
- // unevaluated contexts) might lift some of these restrictions in a
- // future version.
- if (!Rec.isConstantEvaluated() || !getLangOpts().CPlusPlus17)
- for (const auto *L : Rec.Lambdas)
- Diag(L->getLocStart(), D);
+ for (const auto *L : Rec.Lambdas)
+ Diag(L->getLocStart(), D);
} else {
// Mark the capture expressions odr-used. This was deferred
// during lambda expression creation.
@@ -13805,13 +14252,13 @@ static bool isEvaluatableContext(Sema &SemaRef) {
switch (SemaRef.ExprEvalContexts.back().Context) {
case Sema::ExpressionEvaluationContext::Unevaluated:
case Sema::ExpressionEvaluationContext::UnevaluatedAbstract:
- case Sema::ExpressionEvaluationContext::DiscardedStatement:
// Expressions in this context are never evaluated.
return false;
case Sema::ExpressionEvaluationContext::UnevaluatedList:
case Sema::ExpressionEvaluationContext::ConstantEvaluated:
case Sema::ExpressionEvaluationContext::PotentiallyEvaluated:
+ case Sema::ExpressionEvaluationContext::DiscardedStatement:
// Expressions in this context could be evaluated.
return true;
@@ -13855,7 +14302,7 @@ static bool isImplicitlyDefinableConstexprFunction(FunctionDecl *Func) {
(Func->isImplicitlyInstantiable() || (MD && !MD->isUserProvided()));
}
-/// \brief Mark a function referenced, and check whether it is odr-used
+/// Mark a function referenced, and check whether it is odr-used
/// (C++ [basic.def.odr]p2, C99 6.9p3)
void Sema::MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
bool MightBeOdrUse) {
@@ -14088,7 +14535,7 @@ static bool isVariableAlreadyCapturedInScopeInfo(CapturingScopeInfo *CSI, VarDec
// Similarly to mutable captures in lambda, all the OpenMP captures by copy
// are mutable in the sense that user can change their value - they are
// private instances of the captured declarations.
- const CapturingScopeInfo::Capture &Cap = CSI->getCapture(Var);
+ const Capture &Cap = CSI->getCapture(Var);
if (Cap.isCopyCapture() &&
!(isa<LambdaScopeInfo>(CSI) && cast<LambdaScopeInfo>(CSI)->Mutable) &&
!(isa<CapturedRegionScopeInfo>(CSI) &&
@@ -14241,30 +14688,6 @@ static bool captureInBlock(BlockScopeInfo *BSI, VarDecl *Var,
if (BuildAndDiagnose) {
SourceLocation VarLoc = Var->getLocation();
S.Diag(Loc, diag::warn_block_capture_autoreleasing);
- {
- auto AddAutoreleaseNote =
- S.Diag(VarLoc, diag::note_declare_parameter_autoreleasing);
- // Provide a fix-it for the '__autoreleasing' keyword at the
- // appropriate location in the variable's type.
- if (const auto *TSI = Var->getTypeSourceInfo()) {
- PointerTypeLoc PTL =
- TSI->getTypeLoc().getAsAdjusted<PointerTypeLoc>();
- if (PTL) {
- SourceLocation Loc = PTL.getPointeeLoc().getEndLoc();
- Loc = Lexer::getLocForEndOfToken(Loc, 0, S.getSourceManager(),
- S.getLangOpts());
- if (Loc.isValid()) {
- StringRef CharAtLoc = Lexer::getSourceText(
- CharSourceRange::getCharRange(Loc, Loc.getLocWithOffset(1)),
- S.getSourceManager(), S.getLangOpts());
- AddAutoreleaseNote << FixItHint::CreateInsertion(
- Loc, CharAtLoc.empty() || !isWhitespace(CharAtLoc[0])
- ? " __autoreleasing "
- : " __autoreleasing");
- }
- }
- }
- }
S.Diag(VarLoc, diag::note_declare_parameter_strong);
}
}
@@ -14272,7 +14695,7 @@ static bool captureInBlock(BlockScopeInfo *BSI, VarDecl *Var,
const bool HasBlocksAttr = Var->hasAttr<BlocksAttr>();
if (HasBlocksAttr || CaptureType->isReferenceType() ||
- (S.getLangOpts().OpenMP && S.IsOpenMPCapturedDecl(Var))) {
+ (S.getLangOpts().OpenMP && S.isOpenMPCapturedDecl(Var))) {
// Block capture by reference does not change the capture or
// declaration reference types.
ByRef = true;
@@ -14332,7 +14755,7 @@ static bool captureInBlock(BlockScopeInfo *BSI, VarDecl *Var,
}
-/// \brief Capture the given variable in the captured region.
+/// Capture the given variable in the captured region.
static bool captureInCapturedRegion(CapturedRegionScopeInfo *RSI,
VarDecl *Var,
SourceLocation Loc,
@@ -14345,14 +14768,14 @@ static bool captureInCapturedRegion(CapturedRegionScopeInfo *RSI,
bool ByRef = true;
// Using an LValue reference type is consistent with Lambdas (see below).
if (S.getLangOpts().OpenMP && RSI->CapRegionKind == CR_OpenMP) {
- if (S.IsOpenMPCapturedDecl(Var)) {
+ if (S.isOpenMPCapturedDecl(Var)) {
bool HasConst = DeclRefType.isConstQualified();
DeclRefType = DeclRefType.getUnqualifiedType();
// Don't lose diagnostics about assignments to const.
if (HasConst)
DeclRefType.addConst();
}
- ByRef = S.IsOpenMPCapturedByRef(Var, RSI->OpenMPLevel);
+ ByRef = S.isOpenMPCapturedByRef(Var, RSI->OpenMPLevel);
}
if (ByRef)
@@ -14392,7 +14815,7 @@ static bool captureInCapturedRegion(CapturedRegionScopeInfo *RSI,
return true;
}
-/// \brief Create a field within the lambda class for the variable
+/// Create a field within the lambda class for the variable
/// being captured.
static void addAsFieldToClosureType(Sema &S, LambdaScopeInfo *LSI,
QualType FieldType, QualType DeclRefType,
@@ -14410,7 +14833,7 @@ static void addAsFieldToClosureType(Sema &S, LambdaScopeInfo *LSI,
Lambda->addDecl(Field);
}
-/// \brief Capture the given variable in the lambda.
+/// Capture the given variable in the lambda.
static bool captureInLambda(LambdaScopeInfo *LSI,
VarDecl *Var,
SourceLocation Loc,
@@ -14544,7 +14967,7 @@ bool Sema::tryCaptureVariable(
// Capture global variables if it is required to use private copy of this
// variable.
bool IsGlobal = !Var->hasLocalStorage();
- if (IsGlobal && !(LangOpts.OpenMP && IsOpenMPCapturedDecl(Var)))
+ if (IsGlobal && !(LangOpts.OpenMP && isOpenMPCapturedDecl(Var)))
return true;
Var = Var->getCanonicalDecl();
@@ -14909,7 +15332,7 @@ static void DoMarkVarDeclReferenced(Sema &SemaRef, SourceLocation Loc,
// A reference initialized by a constant expression can never be
// odr-used, so simply ignore it.
if (!Var->getType()->isReferenceType() ||
- (SemaRef.LangOpts.OpenMP && SemaRef.IsOpenMPCapturedDecl(Var)))
+ (SemaRef.LangOpts.OpenMP && SemaRef.isOpenMPCapturedDecl(Var)))
SemaRef.MaybeODRUseExprs.insert(E);
} else if (OdrUseContext) {
MarkVarDeclODRUsed(Var, Loc, SemaRef,
@@ -14925,7 +15348,8 @@ static void DoMarkVarDeclReferenced(Sema &SemaRef, SourceLocation Loc,
if (RefersToEnclosingScope) {
LambdaScopeInfo *const LSI =
SemaRef.getCurLambda(/*IgnoreNonLambdaCapturingScope=*/true);
- if (LSI && !LSI->CallOperator->Encloses(Var->getDeclContext())) {
+ if (LSI && (!LSI->CallOperator ||
+ !LSI->CallOperator->Encloses(Var->getDeclContext()))) {
// If a variable could potentially be odr-used, defer marking it so
// until we finish analyzing the full expression for any
// lvalue-to-rvalue
@@ -14943,7 +15367,7 @@ static void DoMarkVarDeclReferenced(Sema &SemaRef, SourceLocation Loc,
}
}
-/// \brief Mark a variable referenced, and check whether it is odr-used
+/// Mark a variable referenced, and check whether it is odr-used
/// (C++ [basic.def.odr]p2, C99 6.9p3). Note that this should not be
/// used directly for normal expressions referring to VarDecl.
void Sema::MarkVariableReferenced(SourceLocation Loc, VarDecl *Var) {
@@ -14984,7 +15408,7 @@ static void MarkExprReferenced(Sema &SemaRef, SourceLocation Loc,
SemaRef.MarkAnyDeclReferenced(Loc, DM, MightBeOdrUse);
}
-/// \brief Perform reference-marking and odr-use handling for a DeclRefExpr.
+/// Perform reference-marking and odr-use handling for a DeclRefExpr.
void Sema::MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base) {
// TODO: update this with DR# once a defect report is filed.
// C++11 defect. The address of a pure member should not be an ODR use, even
@@ -14997,7 +15421,7 @@ void Sema::MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base) {
MarkExprReferenced(*this, E->getLocation(), E->getDecl(), E, OdrUse);
}
-/// \brief Perform reference-marking and odr-use handling for a MemberExpr.
+/// Perform reference-marking and odr-use handling for a MemberExpr.
void Sema::MarkMemberReferenced(MemberExpr *E) {
// C++11 [basic.def.odr]p2:
// A non-overloaded function whose name appears as a potentially-evaluated
@@ -15016,7 +15440,7 @@ void Sema::MarkMemberReferenced(MemberExpr *E) {
MarkExprReferenced(*this, Loc, E->getMemberDecl(), E, MightBeOdrUse);
}
-/// \brief Perform marking for a reference to an arbitrary declaration. It
+/// Perform marking for a reference to an arbitrary declaration. It
/// marks the declaration referenced, and performs odr-use checking for
/// functions and variables. This method should not be used when building a
/// normal expression which refers to a variable.
@@ -15079,7 +15503,7 @@ void Sema::MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T) {
}
namespace {
- /// \brief Helper class that marks all of the declarations referenced by
+ /// Helper class that marks all of the declarations referenced by
/// potentially-evaluated subexpressions as "referenced".
class EvaluatedExprMarker : public EvaluatedExprVisitor<EvaluatedExprMarker> {
Sema &S;
@@ -15152,7 +15576,7 @@ namespace {
};
}
-/// \brief Mark any declarations that appear within this expression or any
+/// Mark any declarations that appear within this expression or any
/// potentially-evaluated subexpressions as "referenced".
///
/// \param SkipLocalVariables If true, don't mark local variables as
@@ -15162,7 +15586,7 @@ void Sema::MarkDeclarationsReferencedInExpr(Expr *E,
EvaluatedExprMarker(*this, SkipLocalVariables).Visit(E);
}
-/// \brief Emit a diagnostic that describes an effect on the run-time behavior
+/// Emit a diagnostic that describes an effect on the run-time behavior
/// of the program being compiled.
///
/// This routine emits the given diagnostic when the code currently being
@@ -15228,7 +15652,8 @@ bool Sema::CheckCallReturnType(QualType ReturnType, SourceLocation Loc,
// If we're inside a decltype's expression, don't check for a valid return
// type or construct temporaries until we know whether this is the last call.
- if (ExprEvalContexts.back().IsDecltype) {
+ if (ExprEvalContexts.back().ExprContext ==
+ ExpressionEvaluationContextRecord::EK_Decltype) {
ExprEvalContexts.back().DelayedDecltypeCalls.push_back(CE);
return false;
}
@@ -15319,7 +15744,7 @@ void Sema::DiagnoseAssignmentAsCondition(Expr *E) {
<< FixItHint::CreateReplacement(Loc, "==");
}
-/// \brief Redundant parentheses over an equality comparison can indicate
+/// Redundant parentheses over an equality comparison can indicate
/// that the user intended an assignment used as condition.
void Sema::DiagnoseEqualityWithExtraParens(ParenExpr *ParenE) {
// Don't warn if the parens came from a macro.
diff --git a/lib/Sema/SemaExprCXX.cpp b/lib/Sema/SemaExprCXX.cpp
index cff9fbbf491b..a1168fa34d56 100644
--- a/lib/Sema/SemaExprCXX.cpp
+++ b/lib/Sema/SemaExprCXX.cpp
@@ -8,7 +8,7 @@
//===----------------------------------------------------------------------===//
///
/// \file
-/// \brief Implements semantic analysis for C++ expressions.
+/// Implements semantic analysis for C++ expressions.
///
//===----------------------------------------------------------------------===//
@@ -42,7 +42,7 @@
using namespace clang;
using namespace sema;
-/// \brief Handle the result of the special case name lookup for inheriting
+/// Handle the result of the special case name lookup for inheriting
/// constructor declarations. 'NS::X::X' and 'NS::X<...>::X' are treated as
/// constructor names in member using declarations, even if 'X' is not the
/// name of the corresponding type.
@@ -80,6 +80,50 @@ ParsedType Sema::getInheritingConstructorName(CXXScopeSpec &SS,
Context.getTrivialTypeSourceInfo(Type, NameLoc));
}
+ParsedType Sema::getConstructorName(IdentifierInfo &II,
+ SourceLocation NameLoc,
+ Scope *S, CXXScopeSpec &SS,
+ bool EnteringContext) {
+ CXXRecordDecl *CurClass = getCurrentClass(S, &SS);
+ assert(CurClass && &II == CurClass->getIdentifier() &&
+ "not a constructor name");
+
+ // When naming a constructor as a member of a dependent context (eg, in a
+ // friend declaration or an inherited constructor declaration), form an
+ // unresolved "typename" type.
+ if (CurClass->isDependentContext() && !EnteringContext) {
+ QualType T = Context.getDependentNameType(ETK_None, SS.getScopeRep(), &II);
+ return ParsedType::make(T);
+ }
+
+ if (SS.isNotEmpty() && RequireCompleteDeclContext(SS, CurClass))
+ return ParsedType();
+
+ // Find the injected-class-name declaration. Note that we make no attempt to
+ // diagnose cases where the injected-class-name is shadowed: the only
+ // declaration that can validly shadow the injected-class-name is a
+ // non-static data member, and if the class contains both a non-static data
+ // member and a constructor then it is ill-formed (we check that in
+ // CheckCompletedCXXClass).
+ CXXRecordDecl *InjectedClassName = nullptr;
+ for (NamedDecl *ND : CurClass->lookup(&II)) {
+ auto *RD = dyn_cast<CXXRecordDecl>(ND);
+ if (RD && RD->isInjectedClassName()) {
+ InjectedClassName = RD;
+ break;
+ }
+ }
+ if (!InjectedClassName && CurClass->isInvalidDecl())
+ return ParsedType();
+ assert(InjectedClassName && "couldn't find injected class name");
+
+ QualType T = Context.getTypeDeclType(InjectedClassName);
+ DiagnoseUseOfDecl(InjectedClassName, NameLoc);
+ MarkAnyDeclReferenced(NameLoc, InjectedClassName, /*OdrUse=*/false);
+
+ return ParsedType::make(T);
+}
+
ParsedType Sema::getDestructorName(SourceLocation TildeLoc,
IdentifierInfo &II,
SourceLocation NameLoc,
@@ -356,7 +400,7 @@ ParsedType Sema::getDestructorTypeForDecltype(const DeclSpec &DS,
bool Sema::checkLiteralOperatorId(const CXXScopeSpec &SS,
const UnqualifiedId &Name) {
- assert(Name.getKind() == UnqualifiedId::IK_LiteralOperatorId);
+ assert(Name.getKind() == UnqualifiedIdKind::IK_LiteralOperatorId);
if (!SS.isValid())
return false;
@@ -383,7 +427,7 @@ bool Sema::checkLiteralOperatorId(const CXXScopeSpec &SS,
llvm_unreachable("unknown nested name specifier kind");
}
-/// \brief Build a C++ typeid expression with a type operand.
+/// Build a C++ typeid expression with a type operand.
ExprResult Sema::BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
@@ -408,7 +452,7 @@ ExprResult Sema::BuildCXXTypeId(QualType TypeInfoType,
SourceRange(TypeidLoc, RParenLoc));
}
-/// \brief Build a C++ typeid expression with an expression operand.
+/// Build a C++ typeid expression with an expression operand.
ExprResult Sema::BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *E,
@@ -480,6 +524,12 @@ ExprResult Sema::BuildCXXTypeId(QualType TypeInfoType,
ExprResult
Sema::ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc,
bool isType, void *TyOrExpr, SourceLocation RParenLoc) {
+ // OpenCL C++ 1.0 s2.9: typeid is not supported.
+ if (getLangOpts().OpenCLCPlusPlus) {
+ return ExprError(Diag(OpLoc, diag::err_openclcxx_not_supported)
+ << "typeid");
+ }
+
// Find the std::type_info type.
if (!getStdNamespace())
return ExprError(Diag(OpLoc, diag::err_need_header_before_typeid));
@@ -560,7 +610,7 @@ getUuidAttrOfType(Sema &SemaRef, QualType QT,
}
}
-/// \brief Build a Microsoft __uuidof expression with a type operand.
+/// Build a Microsoft __uuidof expression with a type operand.
ExprResult Sema::BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
@@ -580,7 +630,7 @@ ExprResult Sema::BuildCXXUuidof(QualType TypeInfoType,
SourceRange(TypeidLoc, RParenLoc));
}
-/// \brief Build a Microsoft __uuidof expression with an expression operand.
+/// Build a Microsoft __uuidof expression with an expression operand.
ExprResult Sema::BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *E,
@@ -695,7 +745,11 @@ ExprResult Sema::BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
bool IsThrownVarInScope) {
// Don't report an error if 'throw' is used in system headers.
if (!getLangOpts().CXXExceptions &&
- !getSourceManager().isInSystemHeader(OpLoc))
+ !getSourceManager().isInSystemHeader(OpLoc) &&
+ (!getLangOpts().OpenMPIsDevice ||
+ !getLangOpts().OpenMPHostCXXExceptions ||
+ isInOpenMPTargetExecutionDirective() ||
+ isInOpenMPDeclareTargetContext()))
Diag(OpLoc, diag::err_exceptions_disabled) << "throw";
// Exceptions aren't allowed in CUDA device code.
@@ -728,7 +782,7 @@ ExprResult Sema::BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
// exception object
const VarDecl *NRVOVariable = nullptr;
if (IsThrownVarInScope)
- NRVOVariable = getCopyElisionCandidate(QualType(), Ex, false);
+ NRVOVariable = getCopyElisionCandidate(QualType(), Ex, CES_Strict);
InitializedEntity Entity = InitializedEntity::InitializeException(
OpLoc, ExceptionObjectTy,
@@ -1114,8 +1168,9 @@ bool Sema::CheckCXXThisCapture(SourceLocation Loc, const bool Explicit,
assert((!ByCopy || Explicit) && "cannot implicitly capture *this by value");
- const unsigned MaxFunctionScopesIndex = FunctionScopeIndexToStopAt ?
- *FunctionScopeIndexToStopAt : FunctionScopes.size() - 1;
+ const int MaxFunctionScopesIndex = FunctionScopeIndexToStopAt
+ ? *FunctionScopeIndexToStopAt
+ : FunctionScopes.size() - 1;
// Check that we can capture the *enclosing object* (referred to by '*this')
// by the capturing-entity/closure (lambda/block/etc) at
@@ -1141,7 +1196,7 @@ bool Sema::CheckCXXThisCapture(SourceLocation Loc, const bool Explicit,
unsigned NumCapturingClosures = 0;
- for (unsigned idx = MaxFunctionScopesIndex; idx != 0; idx--) {
+ for (int idx = MaxFunctionScopesIndex; idx >= 0; idx--) {
if (CapturingScopeInfo *CSI =
dyn_cast<CapturingScopeInfo>(FunctionScopes[idx])) {
if (CSI->CXXThisCaptureIndex != 0) {
@@ -1196,8 +1251,8 @@ bool Sema::CheckCXXThisCapture(SourceLocation Loc, const bool Explicit,
// FIXME: We need to delay this marking in PotentiallyPotentiallyEvaluated
// contexts.
QualType ThisTy = getCurrentThisType();
- for (unsigned idx = MaxFunctionScopesIndex; NumCapturingClosures;
- --idx, --NumCapturingClosures) {
+ for (int idx = MaxFunctionScopesIndex; NumCapturingClosures;
+ --idx, --NumCapturingClosures) {
CapturingScopeInfo *CSI = cast<CapturingScopeInfo>(FunctionScopes[idx]);
Expr *ThisExpr = nullptr;
@@ -1244,11 +1299,16 @@ bool Sema::isThisOutsideMemberFunctionBody(QualType BaseType) {
return Class && Class->isBeingDefined();
}
+/// Parse construction of a specified type.
+/// Can be interpreted either as function-style casting ("int(x)")
+/// or class type construction ("ClassType(x,y,z)")
+/// or creation of a value-initialized type ("int()").
ExprResult
Sema::ActOnCXXTypeConstructExpr(ParsedType TypeRep,
- SourceLocation LParenLoc,
+ SourceLocation LParenOrBraceLoc,
MultiExprArg exprs,
- SourceLocation RParenLoc) {
+ SourceLocation RParenOrBraceLoc,
+ bool ListInitialization) {
if (!TypeRep)
return ExprError();
@@ -1257,7 +1317,8 @@ Sema::ActOnCXXTypeConstructExpr(ParsedType TypeRep,
if (!TInfo)
TInfo = Context.getTrivialTypeSourceInfo(Ty, SourceLocation());
- auto Result = BuildCXXTypeConstructExpr(TInfo, LParenLoc, exprs, RParenLoc);
+ auto Result = BuildCXXTypeConstructExpr(TInfo, LParenOrBraceLoc, exprs,
+ RParenOrBraceLoc, ListInitialization);
// Avoid creating a non-type-dependent expression that contains typos.
// Non-type-dependent expressions are liable to be discarded without
// checking for embedded typos.
@@ -1267,38 +1328,40 @@ Sema::ActOnCXXTypeConstructExpr(ParsedType TypeRep,
return Result;
}
-/// ActOnCXXTypeConstructExpr - Parse construction of a specified type.
-/// Can be interpreted either as function-style casting ("int(x)")
-/// or class type construction ("ClassType(x,y,z)")
-/// or creation of a value-initialized type ("int()").
ExprResult
Sema::BuildCXXTypeConstructExpr(TypeSourceInfo *TInfo,
- SourceLocation LParenLoc,
+ SourceLocation LParenOrBraceLoc,
MultiExprArg Exprs,
- SourceLocation RParenLoc) {
+ SourceLocation RParenOrBraceLoc,
+ bool ListInitialization) {
QualType Ty = TInfo->getType();
SourceLocation TyBeginLoc = TInfo->getTypeLoc().getBeginLoc();
if (Ty->isDependentType() || CallExpr::hasAnyTypeDependentArguments(Exprs)) {
- return CXXUnresolvedConstructExpr::Create(Context, TInfo, LParenLoc, Exprs,
- RParenLoc);
+ // FIXME: CXXUnresolvedConstructExpr does not model list-initialization
+ // directly. We work around this by dropping the locations of the braces.
+ SourceRange Locs = ListInitialization
+ ? SourceRange()
+ : SourceRange(LParenOrBraceLoc, RParenOrBraceLoc);
+ return CXXUnresolvedConstructExpr::Create(Context, TInfo, Locs.getBegin(),
+ Exprs, Locs.getEnd());
}
- bool ListInitialization = LParenLoc.isInvalid();
assert((!ListInitialization ||
(Exprs.size() == 1 && isa<InitListExpr>(Exprs[0]))) &&
"List initialization must have initializer list as expression.");
- SourceRange FullRange = SourceRange(TyBeginLoc,
- ListInitialization ? Exprs[0]->getSourceRange().getEnd() : RParenLoc);
+ SourceRange FullRange = SourceRange(TyBeginLoc, RParenOrBraceLoc);
InitializedEntity Entity = InitializedEntity::InitializeTemporary(TInfo);
InitializationKind Kind =
Exprs.size()
? ListInitialization
- ? InitializationKind::CreateDirectList(TyBeginLoc)
- : InitializationKind::CreateDirect(TyBeginLoc, LParenLoc,
- RParenLoc)
- : InitializationKind::CreateValue(TyBeginLoc, LParenLoc, RParenLoc);
+ ? InitializationKind::CreateDirectList(
+ TyBeginLoc, LParenOrBraceLoc, RParenOrBraceLoc)
+ : InitializationKind::CreateDirect(TyBeginLoc, LParenOrBraceLoc,
+ RParenOrBraceLoc)
+ : InitializationKind::CreateValue(TyBeginLoc, LParenOrBraceLoc,
+ RParenOrBraceLoc);
// C++1z [expr.type.conv]p1:
// If the type is a placeholder for a deduced class type, [...perform class
@@ -1319,7 +1382,8 @@ Sema::BuildCXXTypeConstructExpr(TypeSourceInfo *TInfo,
if (Exprs.size() == 1 && !ListInitialization &&
!isa<InitListExpr>(Exprs[0])) {
Expr *Arg = Exprs[0];
- return BuildCXXFunctionalCastExpr(TInfo, Ty, LParenLoc, Arg, RParenLoc);
+ return BuildCXXFunctionalCastExpr(TInfo, Ty, LParenOrBraceLoc, Arg,
+ RParenOrBraceLoc);
}
// For an expression of the form T(), T shall not be an array type.
@@ -1367,15 +1431,18 @@ Sema::BuildCXXTypeConstructExpr(TypeSourceInfo *TInfo,
// CXXTemporaryObjectExpr. It's also weird that the functional cast
// is sometimes handled by initialization and sometimes not.
QualType ResultType = Result.get()->getType();
+ SourceRange Locs = ListInitialization
+ ? SourceRange()
+ : SourceRange(LParenOrBraceLoc, RParenOrBraceLoc);
Result = CXXFunctionalCastExpr::Create(
- Context, ResultType, Expr::getValueKindForType(Ty), TInfo,
- CK_NoOp, Result.get(), /*Path=*/nullptr, LParenLoc, RParenLoc);
+ Context, ResultType, Expr::getValueKindForType(Ty), TInfo, CK_NoOp,
+ Result.get(), /*Path=*/nullptr, Locs.getBegin(), Locs.getEnd());
}
return Result;
}
-/// \brief Determine whether the given function is a non-placement
+/// Determine whether the given function is a non-placement
/// deallocation function.
static bool isNonPlacementDeallocationFunction(Sema &S, FunctionDecl *FD) {
if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(FD))
@@ -1430,7 +1497,7 @@ namespace {
CUDAPref = S.IdentifyCUDAPreference(Caller, FD);
}
- operator bool() const { return FD; }
+ explicit operator bool() const { return FD; }
bool isBetterThan(const UsualDeallocFnInfo &Other, bool WantSize,
bool WantAlign) const {
@@ -1543,7 +1610,7 @@ static bool doesUsualArrayDeleteWantSize(Sema &S, SourceLocation loc,
return Best && Best.HasSizeT;
}
-/// \brief Parsed a C++ 'new' expression (C++ 5.3.4).
+/// Parsed a C++ 'new' expression (C++ 5.3.4).
///
/// E.g.:
/// @code new (memory) int[size][4] @endcode
@@ -1593,9 +1660,9 @@ Sema::ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
if (Expr *NumElts = (Expr *)Array.NumElts) {
if (!NumElts->isTypeDependent() && !NumElts->isValueDependent()) {
if (getLangOpts().CPlusPlus14) {
- // C++1y [expr.new]p6: Every constant-expression in a noptr-new-declarator
- // shall be a converted constant expression (5.19) of type std::size_t
- // and shall evaluate to a strictly positive value.
+ // C++1y [expr.new]p6: Every constant-expression in a noptr-new-declarator
+ // shall be a converted constant expression (5.19) of type std::size_t
+ // and shall evaluate to a strictly positive value.
unsigned IntWidth = Context.getTargetInfo().getIntWidth();
assert(IntWidth && "Builtin type of size 0?");
llvm::APSInt Value(IntWidth);
@@ -1728,7 +1795,9 @@ Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
// - Otherwise, the new-initializer is interpreted according to the
// initialization rules of 8.5 for direct-initialization.
: initStyle == CXXNewExpr::ListInit
- ? InitializationKind::CreateDirectList(TypeRange.getBegin())
+ ? InitializationKind::CreateDirectList(TypeRange.getBegin(),
+ Initializer->getLocStart(),
+ Initializer->getLocEnd())
: InitializationKind::CreateDirect(TypeRange.getBegin(),
DirectInitRange.getBegin(),
DirectInitRange.getEnd());
@@ -1795,13 +1864,6 @@ Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
if (CheckAllocatedType(AllocType, TypeRange.getBegin(), TypeRange))
return ExprError();
- if (initStyle == CXXNewExpr::ListInit &&
- isStdInitializerList(AllocType, nullptr)) {
- Diag(AllocTypeInfo->getTypeLoc().getBeginLoc(),
- diag::warn_dangling_std_initializer_list)
- << /*at end of FE*/0 << Inits[0]->getSourceRange();
- }
-
// In ARC, infer 'retaining' for the allocated
if (getLangOpts().ObjCAutoRefCount &&
AllocType.getObjCLifetime() == Qualifiers::OCL_None &&
@@ -1831,7 +1893,7 @@ Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
assert(Context.getTargetInfo().getIntWidth() && "Builtin type of size 0?");
ConvertedSize = PerformImplicitConversion(ArraySize, Context.getSizeType(),
- AA_Converting);
+ AA_Converting);
if (!ConvertedSize.isInvalid() &&
ArraySize->getType()->getAs<RecordType>())
@@ -1960,11 +2022,12 @@ Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
bool PassAlignment = getLangOpts().AlignedAllocation &&
Alignment > NewAlignment;
+ AllocationFunctionScope Scope = UseGlobal ? AFS_Global : AFS_Both;
if (!AllocType->isDependentType() &&
!Expr::hasAnyTypeDependentArguments(PlacementArgs) &&
FindAllocationFunctions(StartLoc,
SourceRange(PlacementLParen, PlacementRParen),
- UseGlobal, AllocType, ArraySize, PassAlignment,
+ Scope, Scope, AllocType, ArraySize, PassAlignment,
PlacementArgs, OperatorNew, OperatorDelete))
return ExprError();
@@ -2099,7 +2162,7 @@ Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
Range, DirectInitRange);
}
-/// \brief Checks that a type is suitable as the allocated type
+/// Checks that a type is suitable as the allocated type
/// in a new-expression.
bool Sema::CheckAllocatedType(QualType AllocType, SourceLocation Loc,
SourceRange R) {
@@ -2120,7 +2183,8 @@ bool Sema::CheckAllocatedType(QualType AllocType, SourceLocation Loc,
else if (AllocType->isVariablyModifiedType())
return Diag(Loc, diag::err_variably_modified_new_type)
<< AllocType;
- else if (AllocType.getAddressSpace() != LangAS::Default)
+ else if (AllocType.getAddressSpace() != LangAS::Default &&
+ !getLangOpts().OpenCLCPlusPlus)
return Diag(Loc, diag::err_address_space_qualified_new)
<< AllocType.getUnqualifiedType()
<< AllocType.getQualifiers().getAddressSpaceAttributePrintValue();
@@ -2137,12 +2201,10 @@ bool Sema::CheckAllocatedType(QualType AllocType, SourceLocation Loc,
return false;
}
-static bool
-resolveAllocationOverload(Sema &S, LookupResult &R, SourceRange Range,
- SmallVectorImpl<Expr *> &Args, bool &PassAlignment,
- FunctionDecl *&Operator,
- OverloadCandidateSet *AlignedCandidates = nullptr,
- Expr *AlignArg = nullptr) {
+static bool resolveAllocationOverload(
+ Sema &S, LookupResult &R, SourceRange Range, SmallVectorImpl<Expr *> &Args,
+ bool &PassAlignment, FunctionDecl *&Operator,
+ OverloadCandidateSet *AlignedCandidates, Expr *AlignArg, bool Diagnose) {
OverloadCandidateSet Candidates(R.getNameLoc(),
OverloadCandidateSet::CSK_Normal);
for (LookupResult::iterator Alloc = R.begin(), AllocEnd = R.end();
@@ -2188,7 +2250,8 @@ resolveAllocationOverload(Sema &S, LookupResult &R, SourceRange Range,
AlignArg = Args[1];
Args.erase(Args.begin() + 1);
return resolveAllocationOverload(S, R, Range, Args, PassAlignment,
- Operator, &Candidates, AlignArg);
+ Operator, &Candidates, AlignArg,
+ Diagnose);
}
// MSVC will fall back on trying to find a matching global operator new
@@ -2204,67 +2267,72 @@ resolveAllocationOverload(Sema &S, LookupResult &R, SourceRange Range,
S.LookupQualifiedName(R, S.Context.getTranslationUnitDecl());
// FIXME: This will give bad diagnostics pointing at the wrong functions.
return resolveAllocationOverload(S, R, Range, Args, PassAlignment,
- Operator, nullptr);
+ Operator, /*Candidates=*/nullptr,
+ /*AlignArg=*/nullptr, Diagnose);
}
- S.Diag(R.getNameLoc(), diag::err_ovl_no_viable_function_in_call)
- << R.getLookupName() << Range;
-
- // If we have aligned candidates, only note the align_val_t candidates
- // from AlignedCandidates and the non-align_val_t candidates from
- // Candidates.
- if (AlignedCandidates) {
- auto IsAligned = [](OverloadCandidate &C) {
- return C.Function->getNumParams() > 1 &&
- C.Function->getParamDecl(1)->getType()->isAlignValT();
- };
- auto IsUnaligned = [&](OverloadCandidate &C) { return !IsAligned(C); };
-
- // This was an overaligned allocation, so list the aligned candidates
- // first.
- Args.insert(Args.begin() + 1, AlignArg);
- AlignedCandidates->NoteCandidates(S, OCD_AllCandidates, Args, "",
- R.getNameLoc(), IsAligned);
- Args.erase(Args.begin() + 1);
- Candidates.NoteCandidates(S, OCD_AllCandidates, Args, "", R.getNameLoc(),
- IsUnaligned);
- } else {
- Candidates.NoteCandidates(S, OCD_AllCandidates, Args);
+ if (Diagnose) {
+ S.Diag(R.getNameLoc(), diag::err_ovl_no_viable_function_in_call)
+ << R.getLookupName() << Range;
+
+ // If we have aligned candidates, only note the align_val_t candidates
+ // from AlignedCandidates and the non-align_val_t candidates from
+ // Candidates.
+ if (AlignedCandidates) {
+ auto IsAligned = [](OverloadCandidate &C) {
+ return C.Function->getNumParams() > 1 &&
+ C.Function->getParamDecl(1)->getType()->isAlignValT();
+ };
+ auto IsUnaligned = [&](OverloadCandidate &C) { return !IsAligned(C); };
+
+ // This was an overaligned allocation, so list the aligned candidates
+ // first.
+ Args.insert(Args.begin() + 1, AlignArg);
+ AlignedCandidates->NoteCandidates(S, OCD_AllCandidates, Args, "",
+ R.getNameLoc(), IsAligned);
+ Args.erase(Args.begin() + 1);
+ Candidates.NoteCandidates(S, OCD_AllCandidates, Args, "", R.getNameLoc(),
+ IsUnaligned);
+ } else {
+ Candidates.NoteCandidates(S, OCD_AllCandidates, Args);
+ }
}
return true;
case OR_Ambiguous:
- S.Diag(R.getNameLoc(), diag::err_ovl_ambiguous_call)
- << R.getLookupName() << Range;
- Candidates.NoteCandidates(S, OCD_ViableCandidates, Args);
+ if (Diagnose) {
+ S.Diag(R.getNameLoc(), diag::err_ovl_ambiguous_call)
+ << R.getLookupName() << Range;
+ Candidates.NoteCandidates(S, OCD_ViableCandidates, Args);
+ }
return true;
case OR_Deleted: {
- S.Diag(R.getNameLoc(), diag::err_ovl_deleted_call)
- << Best->Function->isDeleted()
- << R.getLookupName()
- << S.getDeletedOrUnavailableSuffix(Best->Function)
- << Range;
- Candidates.NoteCandidates(S, OCD_AllCandidates, Args);
+ if (Diagnose) {
+ S.Diag(R.getNameLoc(), diag::err_ovl_deleted_call)
+ << Best->Function->isDeleted() << R.getLookupName()
+ << S.getDeletedOrUnavailableSuffix(Best->Function) << Range;
+ Candidates.NoteCandidates(S, OCD_AllCandidates, Args);
+ }
return true;
}
}
llvm_unreachable("Unreachable, bad result from BestViableFunction");
}
-
-/// FindAllocationFunctions - Finds the overloads of operator new and delete
-/// that are appropriate for the allocation.
bool Sema::FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
- bool UseGlobal, QualType AllocType,
- bool IsArray, bool &PassAlignment,
- MultiExprArg PlaceArgs,
+ AllocationFunctionScope NewScope,
+ AllocationFunctionScope DeleteScope,
+ QualType AllocType, bool IsArray,
+ bool &PassAlignment, MultiExprArg PlaceArgs,
FunctionDecl *&OperatorNew,
- FunctionDecl *&OperatorDelete) {
+ FunctionDecl *&OperatorDelete,
+ bool Diagnose) {
// --- Choosing an allocation function ---
// C++ 5.3.4p8 - 14 & 18
- // 1) If UseGlobal is true, only look in the global scope. Else, also look
- // in the scope of the allocated class.
+ // 1) If looking in AFS_Global scope for allocation functions, only look in
+ // the global scope. Else, if AFS_Class, only look in the scope of the
+ // allocated class. If AFS_Both, look in both.
// 2) If an array size is given, look for operator new[], else look for
// operator new.
// 3) The first argument is always size_t. Append the arguments from the
@@ -2314,7 +2382,7 @@ bool Sema::FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
// function's name is looked up in the global scope. Otherwise, if the
// allocated type is a class type T or array thereof, the allocation
// function's name is looked up in the scope of T.
- if (AllocElemType->isRecordType() && !UseGlobal)
+ if (AllocElemType->isRecordType() && NewScope != AFS_Global)
LookupQualifiedName(R, AllocElemType->getAsCXXRecordDecl());
// We can see ambiguity here if the allocation function is found in
@@ -2325,8 +2393,17 @@ bool Sema::FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
// If this lookup fails to find the name, or if the allocated type is not
// a class type, the allocation function's name is looked up in the
// global scope.
- if (R.empty())
+ if (R.empty()) {
+ if (NewScope == AFS_Class)
+ return true;
+
LookupQualifiedName(R, Context.getTranslationUnitDecl());
+ }
+
+ if (getLangOpts().OpenCLCPlusPlus && R.empty()) {
+ Diag(StartLoc, diag::err_openclcxx_not_supported) << "default new";
+ return true;
+ }
assert(!R.empty() && "implicitly declared allocation functions not found");
assert(!R.isAmbiguous() && "global allocation functions are ambiguous");
@@ -2335,7 +2412,8 @@ bool Sema::FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
R.suppressDiagnostics();
if (resolveAllocationOverload(*this, R, Range, AllocArgs, PassAlignment,
- OperatorNew))
+ OperatorNew, /*Candidates=*/nullptr,
+ /*AlignArg=*/nullptr, Diagnose))
return true;
}
@@ -2362,7 +2440,7 @@ bool Sema::FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
// the allocated type is not a class type or array thereof, the
// deallocation function's name is looked up in the global scope.
LookupResult FoundDelete(*this, DeleteName, StartLoc, LookupOrdinaryName);
- if (AllocElemType->isRecordType() && !UseGlobal) {
+ if (AllocElemType->isRecordType() && DeleteScope != AFS_Global) {
CXXRecordDecl *RD
= cast<CXXRecordDecl>(AllocElemType->getAs<RecordType>()->getDecl());
LookupQualifiedName(FoundDelete, RD);
@@ -2372,6 +2450,9 @@ bool Sema::FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
bool FoundGlobalDelete = FoundDelete.empty();
if (FoundDelete.empty()) {
+ if (DeleteScope == AFS_Class)
+ return true;
+
DeclareGlobalNewDelete();
LookupQualifiedName(FoundDelete, Context.getTranslationUnitDecl());
}
@@ -2559,6 +2640,11 @@ void Sema::DeclareGlobalNewDelete() {
if (GlobalNewDeleteDeclared)
return;
+ // OpenCL C++ 1.0 s2.9: the implicitly declared new and delete operators
+ // are not supported.
+ if (getLangOpts().OpenCLCPlusPlus)
+ return;
+
// C++ [basic.std.dynamic]p2:
// [...] The following allocation and deallocation functions (18.4) are
// implicitly declared in global scope in each translation unit of a
@@ -2845,7 +2931,7 @@ bool Sema::FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
}
namespace {
-/// \brief Checks whether delete-expression, and new-expression used for
+/// Checks whether delete-expression, and new-expression used for
/// initializing deletee have the same array form.
class MismatchingNewDeleteDetector {
public:
@@ -2868,7 +2954,7 @@ public:
: Field(nullptr), IsArrayForm(false), EndOfTU(EndOfTU),
HasUndefinedConstructors(false) {}
- /// \brief Checks whether pointee of a delete-expression is initialized with
+ /// Checks whether pointee of a delete-expression is initialized with
/// matching form of new-expression.
///
/// If return value is \c VarInitMismatches or \c MemberInitMismatches at the
@@ -2879,7 +2965,7 @@ public:
/// couldn't be analyzed. If at least one constructor initializes the member
/// with matching type of new, the return value is \c NoMismatch.
MismatchResult analyzeDeleteExpr(const CXXDeleteExpr *DE);
- /// \brief Analyzes a class member.
+ /// Analyzes a class member.
/// \param Field Class member to analyze.
/// \param DeleteWasArrayForm Array form-ness of the delete-expression used
/// for deleting the \p Field.
@@ -2892,13 +2978,13 @@ public:
private:
const bool EndOfTU;
- /// \brief Indicates that there is at least one constructor without body.
+ /// Indicates that there is at least one constructor without body.
bool HasUndefinedConstructors;
- /// \brief Returns \c CXXNewExpr from given initialization expression.
+ /// Returns \c CXXNewExpr from given initialization expression.
/// \param E Expression used for initializing pointee in delete-expression.
/// E can be a single-element \c InitListExpr consisting of new-expression.
const CXXNewExpr *getNewExprFromInitListOrExpr(const Expr *E);
- /// \brief Returns whether member is initialized with mismatching form of
+ /// Returns whether member is initialized with mismatching form of
/// \c new either by the member initializer or in-class initialization.
///
/// If bodies of all constructors are not visible at the end of translation
@@ -2906,7 +2992,7 @@ private:
/// form of \c new, mismatch cannot be proven, and this function will return
/// \c NoMismatch.
MismatchResult analyzeMemberExpr(const MemberExpr *ME);
- /// \brief Returns whether variable is initialized with mismatching form of
+ /// Returns whether variable is initialized with mismatching form of
/// \c new.
///
/// If variable is initialized with matching form of \c new or variable is not
@@ -2914,7 +3000,7 @@ private:
/// If variable is initialized with mismatching form of \c new, returns false.
/// \param D Variable to analyze.
bool hasMatchingVarInit(const DeclRefExpr *D);
- /// \brief Checks whether the constructor initializes pointee with mismatching
+ /// Checks whether the constructor initializes pointee with mismatching
/// form of \c new.
///
/// Returns true, if member is initialized with matching form of \c new in
@@ -2923,7 +3009,7 @@ private:
/// constructor isn't defined at the point where delete-expression is seen, or
/// member isn't initialized by the constructor.
bool hasMatchingNewInCtor(const CXXConstructorDecl *CD);
- /// \brief Checks whether member is initialized with matching form of
+ /// Checks whether member is initialized with matching form of
/// \c new in member initializer list.
bool hasMatchingNewInCtorInit(const CXXCtorInitializer *CI);
/// Checks whether member is initialized with mismatching form of \c new by
@@ -3192,7 +3278,8 @@ Sema::ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal,
QualType Pointee = Type->getAs<PointerType>()->getPointeeType();
QualType PointeeElem = Context.getBaseElementType(Pointee);
- if (Pointee.getAddressSpace() != LangAS::Default)
+ if (Pointee.getAddressSpace() != LangAS::Default &&
+ !getLangOpts().OpenCLCPlusPlus)
return Diag(Ex.get()->getLocStart(),
diag::err_address_space_qualified_delete)
<< Pointee.getUnqualifiedType()
@@ -3267,6 +3354,11 @@ Sema::ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal,
}
if (!OperatorDelete) {
+ if (getLangOpts().OpenCLCPlusPlus) {
+ Diag(StartLoc, diag::err_openclcxx_not_supported) << "default delete";
+ return ExprError();
+ }
+
bool IsComplete = isCompleteType(StartLoc, Pointee);
bool CanProvideSize =
IsComplete && (!ArrayForm || UsualArrayDeleteWantsSize ||
@@ -3322,6 +3414,128 @@ Sema::ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal,
return Result;
}
+static bool resolveBuiltinNewDeleteOverload(Sema &S, CallExpr *TheCall,
+ bool IsDelete,
+ FunctionDecl *&Operator) {
+
+ DeclarationName NewName = S.Context.DeclarationNames.getCXXOperatorName(
+ IsDelete ? OO_Delete : OO_New);
+
+ LookupResult R(S, NewName, TheCall->getLocStart(), Sema::LookupOrdinaryName);
+ S.LookupQualifiedName(R, S.Context.getTranslationUnitDecl());
+ assert(!R.empty() && "implicitly declared allocation functions not found");
+ assert(!R.isAmbiguous() && "global allocation functions are ambiguous");
+
+ // We do our own custom access checks below.
+ R.suppressDiagnostics();
+
+ SmallVector<Expr *, 8> Args(TheCall->arg_begin(), TheCall->arg_end());
+ OverloadCandidateSet Candidates(R.getNameLoc(),
+ OverloadCandidateSet::CSK_Normal);
+ for (LookupResult::iterator FnOvl = R.begin(), FnOvlEnd = R.end();
+ FnOvl != FnOvlEnd; ++FnOvl) {
+ // Even member operator new/delete are implicitly treated as
+ // static, so don't use AddMemberCandidate.
+ NamedDecl *D = (*FnOvl)->getUnderlyingDecl();
+
+ if (FunctionTemplateDecl *FnTemplate = dyn_cast<FunctionTemplateDecl>(D)) {
+ S.AddTemplateOverloadCandidate(FnTemplate, FnOvl.getPair(),
+ /*ExplicitTemplateArgs=*/nullptr, Args,
+ Candidates,
+ /*SuppressUserConversions=*/false);
+ continue;
+ }
+
+ FunctionDecl *Fn = cast<FunctionDecl>(D);
+ S.AddOverloadCandidate(Fn, FnOvl.getPair(), Args, Candidates,
+ /*SuppressUserConversions=*/false);
+ }
+
+ SourceRange Range = TheCall->getSourceRange();
+
+ // Do the resolution.
+ OverloadCandidateSet::iterator Best;
+ switch (Candidates.BestViableFunction(S, R.getNameLoc(), Best)) {
+ case OR_Success: {
+ // Got one!
+ FunctionDecl *FnDecl = Best->Function;
+ assert(R.getNamingClass() == nullptr &&
+ "class members should not be considered");
+
+ if (!FnDecl->isReplaceableGlobalAllocationFunction()) {
+ S.Diag(R.getNameLoc(), diag::err_builtin_operator_new_delete_not_usual)
+ << (IsDelete ? 1 : 0) << Range;
+ S.Diag(FnDecl->getLocation(), diag::note_non_usual_function_declared_here)
+ << R.getLookupName() << FnDecl->getSourceRange();
+ return true;
+ }
+
+ Operator = FnDecl;
+ return false;
+ }
+
+ case OR_No_Viable_Function:
+ S.Diag(R.getNameLoc(), diag::err_ovl_no_viable_function_in_call)
+ << R.getLookupName() << Range;
+ Candidates.NoteCandidates(S, OCD_AllCandidates, Args);
+ return true;
+
+ case OR_Ambiguous:
+ S.Diag(R.getNameLoc(), diag::err_ovl_ambiguous_call)
+ << R.getLookupName() << Range;
+ Candidates.NoteCandidates(S, OCD_ViableCandidates, Args);
+ return true;
+
+ case OR_Deleted: {
+ S.Diag(R.getNameLoc(), diag::err_ovl_deleted_call)
+ << Best->Function->isDeleted() << R.getLookupName()
+ << S.getDeletedOrUnavailableSuffix(Best->Function) << Range;
+ Candidates.NoteCandidates(S, OCD_AllCandidates, Args);
+ return true;
+ }
+ }
+ llvm_unreachable("Unreachable, bad result from BestViableFunction");
+}
+
+ExprResult
+Sema::SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult,
+ bool IsDelete) {
+ CallExpr *TheCall = cast<CallExpr>(TheCallResult.get());
+ if (!getLangOpts().CPlusPlus) {
+ Diag(TheCall->getExprLoc(), diag::err_builtin_requires_language)
+ << (IsDelete ? "__builtin_operator_delete" : "__builtin_operator_new")
+ << "C++";
+ return ExprError();
+ }
+ // CodeGen assumes it can find the global new and delete to call,
+ // so ensure that they are declared.
+ DeclareGlobalNewDelete();
+
+ FunctionDecl *OperatorNewOrDelete = nullptr;
+ if (resolveBuiltinNewDeleteOverload(*this, TheCall, IsDelete,
+ OperatorNewOrDelete))
+ return ExprError();
+ assert(OperatorNewOrDelete && "should be found");
+
+ TheCall->setType(OperatorNewOrDelete->getReturnType());
+ for (unsigned i = 0; i != TheCall->getNumArgs(); ++i) {
+ QualType ParamTy = OperatorNewOrDelete->getParamDecl(i)->getType();
+ InitializedEntity Entity =
+ InitializedEntity::InitializeParameter(Context, ParamTy, false);
+ ExprResult Arg = PerformCopyInitialization(
+ Entity, TheCall->getArg(i)->getLocStart(), TheCall->getArg(i));
+ if (Arg.isInvalid())
+ return ExprError();
+ TheCall->setArg(i, Arg.get());
+ }
+ auto Callee = dyn_cast<ImplicitCastExpr>(TheCall->getCallee());
+ assert(Callee && Callee->getCastKind() == CK_BuiltinFnToFnPtr &&
+ "Callee expected to be implicit cast to a builtin function pointer");
+ Callee->setType(OperatorNewOrDelete->getType());
+
+ return TheCallResult;
+}
+
void Sema::CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc,
bool IsDelete, bool CallCanBeVirtual,
bool WarnOnNonAbstractTypes,
@@ -3378,7 +3592,7 @@ Sema::ConditionResult Sema::ActOnConditionVariable(Decl *ConditionVar,
CK == ConditionKind::ConstexprIf);
}
-/// \brief Check the use of the given variable as a C++ condition in an if,
+/// Check the use of the given variable as a C++ condition in an if,
/// while, do-while, or switch statement.
ExprResult Sema::CheckConditionVariable(VarDecl *ConditionVar,
SourceLocation StmtLoc,
@@ -3548,6 +3762,10 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
const ImplicitConversionSequence &ICS,
AssignmentAction Action,
CheckedConversionKind CCK) {
+ // C++ [over.match.oper]p7: [...] operands of class type are converted [...]
+ if (CCK == CCK_ForBuiltinOverloadedOp && !From->getType()->isRecordType())
+ return From;
+
switch (ICS.getKind()) {
case ImplicitConversionSequence::StandardConversion: {
ExprResult Res = PerformImplicitConversion(From, ToType, ICS.Standard,
@@ -3607,6 +3825,12 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
From = CastArg.get();
+ // C++ [over.match.oper]p7:
+ // [...] the second standard conversion sequence of a user-defined
+ // conversion sequence is not applied.
+ if (CCK == CCK_ForBuiltinOverloadedOp)
+ return From;
+
return PerformImplicitConversion(From, ToType, ICS.UserDefined.After,
AA_Converting, CCK);
}
@@ -4070,14 +4294,14 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
// If this conversion sequence succeeded and involved implicitly converting a
// _Nullable type to a _Nonnull one, complain.
- if (CCK == CCK_ImplicitConversion)
+ if (!isCast(CCK))
diagnoseNullableToNonnullConversion(ToType, InitialFromType,
From->getLocStart());
return From;
}
-/// \brief Check the completeness of a type in a unary type trait.
+/// Check the completeness of a type in a unary type trait.
///
/// If the particular type trait requires a complete type, tries to complete
/// it. If completing the type fails, a diagnostic is emitted and false
@@ -4227,7 +4451,7 @@ static bool HasNoThrowOperator(const RecordType *RT, OverloadedOperatorKind Op,
const FunctionProtoType *CPT =
Operator->getType()->getAs<FunctionProtoType>();
CPT = Self.ResolveExceptionSpec(KeyLoc, CPT);
- if (!CPT || !CPT->isNothrow(C))
+ if (!CPT || !CPT->isNothrow())
return false;
}
}
@@ -4475,7 +4699,7 @@ static bool EvaluateUnaryTypeTrait(Sema &Self, TypeTrait UTT,
const FunctionProtoType *CPT =
Destructor->getType()->getAs<FunctionProtoType>();
CPT = Self.ResolveExceptionSpec(KeyLoc, CPT);
- if (!CPT || !CPT->isNothrow(C))
+ if (!CPT || !CPT->isNothrow())
return false;
}
}
@@ -4568,7 +4792,7 @@ static bool EvaluateUnaryTypeTrait(Sema &Self, TypeTrait UTT,
return false;
// TODO: check whether evaluating default arguments can throw.
// For now, we'll be conservative and assume that they can throw.
- if (!CPT->isNothrow(C) || CPT->getNumParams() > 1)
+ if (!CPT->isNothrow() || CPT->getNumParams() > 1)
return false;
}
}
@@ -4607,7 +4831,7 @@ static bool EvaluateUnaryTypeTrait(Sema &Self, TypeTrait UTT,
return false;
// FIXME: check whether evaluating default arguments can throw.
// For now, we'll be conservative and assume that they can throw.
- if (!CPT->isNothrow(C) || CPT->getNumParams() > 0)
+ if (!CPT->isNothrow() || CPT->getNumParams() > 0)
return false;
}
}
@@ -4645,11 +4869,14 @@ static bool evaluateTypeTrait(Sema &S, TypeTrait Kind, SourceLocation KWLoc,
if (Kind <= UTT_Last)
return EvaluateUnaryTypeTrait(S, Kind, KWLoc, Args[0]->getType());
- if (Kind <= BTT_Last)
+ // Evaluate BTT_ReferenceBindsToTemporary alongside the IsConstructible
+ // traits to avoid duplication.
+ if (Kind <= BTT_Last && Kind != BTT_ReferenceBindsToTemporary)
return EvaluateBinaryTypeTrait(S, Kind, Args[0]->getType(),
Args[1]->getType(), RParenLoc);
switch (Kind) {
+ case clang::BTT_ReferenceBindsToTemporary:
case clang::TT_IsConstructible:
case clang::TT_IsNothrowConstructible:
case clang::TT_IsTriviallyConstructible: {
@@ -4726,6 +4953,13 @@ static bool evaluateTypeTrait(Sema &S, TypeTrait Kind, SourceLocation KWLoc,
if (Kind == clang::TT_IsConstructible)
return true;
+ if (Kind == clang::BTT_ReferenceBindsToTemporary) {
+ if (!T->isReferenceType())
+ return false;
+
+ return !Init.isDirectReferenceBinding();
+ }
+
if (Kind == clang::TT_IsNothrowConstructible)
return S.canThrow(Result.get()) == CT_Cannot;
@@ -5231,8 +5465,9 @@ QualType Sema::CheckPointerToMemberOperands(ExprResult &LHS, ExprResult &RHS,
case RQ_LValue:
if (!isIndirect && !LHS.get()->Classify(Context).isLValue()) {
- // C++2a allows functions with ref-qualifier & if they are also 'const'.
- if (Proto->isConst())
+ // C++2a allows functions with ref-qualifier & if their cv-qualifier-seq
+ // is (exactly) 'const'.
+ if (Proto->isConst() && !Proto->isVolatile())
Diag(Loc, getLangOpts().CPlusPlus2a
? diag::warn_cxx17_compat_pointer_to_const_ref_member_on_rvalue
: diag::ext_pointer_to_const_ref_member_on_rvalue);
@@ -5269,7 +5504,7 @@ QualType Sema::CheckPointerToMemberOperands(ExprResult &LHS, ExprResult &RHS,
return Result;
}
-/// \brief Try to convert a type to another according to C++11 5.16p3.
+/// Try to convert a type to another according to C++11 5.16p3.
///
/// This is part of the parameter validation for the ? operator. If either
/// value operand is a class type, the two operands are attempted to be
@@ -5294,7 +5529,7 @@ static bool TryClassUnification(Sema &Self, Expr *From, Expr *To,
// constraint that in the conversion the reference must bind directly to
// an lvalue.
// -- If E2 is an xvalue: E1 can be converted to match E2 if E1 can be
- // implicitly conveted to the type "rvalue reference to R2", subject to
+ // implicitly converted to the type "rvalue reference to R2", subject to
// the constraint that the reference must bind directly.
if (To->isLValue() || To->isXValue()) {
QualType T = To->isLValue() ? Self.Context.getLValueReferenceType(ToType)
@@ -5363,7 +5598,7 @@ static bool TryClassUnification(Sema &Self, Expr *From, Expr *To,
return false;
}
-/// \brief Try to find a common type for two according to C++0x 5.16p5.
+/// Try to find a common type for two according to C++0x 5.16p5.
///
/// This is part of the parameter validation for the ? operator. If either
/// value operand is a class type, overload resolution is used to find a
@@ -5425,7 +5660,7 @@ static bool FindConditionalOverload(Sema &Self, ExprResult &LHS, ExprResult &RHS
return true;
}
-/// \brief Perform an "extended" implicit conversion as returned by
+/// Perform an "extended" implicit conversion as returned by
/// TryClassUnification.
static bool ConvertForConditional(Sema &Self, ExprResult &E, QualType T) {
InitializedEntity Entity = InitializedEntity::InitializeTemporary(T);
@@ -5441,7 +5676,7 @@ static bool ConvertForConditional(Sema &Self, ExprResult &E, QualType T) {
return false;
}
-/// \brief Check the operands of ?: under C++ semantics.
+/// Check the operands of ?: under C++ semantics.
///
/// See C++ [expr.cond]. Note that LHS is never null, even for the GNU x ?: y
/// extension. In this case, LHS == Cond. (But they're not aliases.)
@@ -5745,27 +5980,23 @@ mergeExceptionSpecs(Sema &S, FunctionProtoType::ExceptionSpecInfo ESI1,
if (EST2 == EST_None) return ESI2;
if (EST1 == EST_MSAny) return ESI1;
if (EST2 == EST_MSAny) return ESI2;
+ if (EST1 == EST_NoexceptFalse) return ESI1;
+ if (EST2 == EST_NoexceptFalse) return ESI2;
// If either of them is non-throwing, the result is the other.
if (EST1 == EST_DynamicNone) return ESI2;
if (EST2 == EST_DynamicNone) return ESI1;
if (EST1 == EST_BasicNoexcept) return ESI2;
if (EST2 == EST_BasicNoexcept) return ESI1;
+ if (EST1 == EST_NoexceptTrue) return ESI2;
+ if (EST2 == EST_NoexceptTrue) return ESI1;
- // If either of them is a non-value-dependent computed noexcept, that
- // determines the result.
- if (EST2 == EST_ComputedNoexcept && ESI2.NoexceptExpr &&
- !ESI2.NoexceptExpr->isValueDependent())
- return !ESI2.NoexceptExpr->EvaluateKnownConstInt(S.Context) ? ESI2 : ESI1;
- if (EST1 == EST_ComputedNoexcept && ESI1.NoexceptExpr &&
- !ESI1.NoexceptExpr->isValueDependent())
- return !ESI1.NoexceptExpr->EvaluateKnownConstInt(S.Context) ? ESI1 : ESI2;
// If we're left with value-dependent computed noexcept expressions, we're
// stuck. Before C++17, we can just drop the exception specification entirely,
// since it's not actually part of the canonical type. And this should never
// happen in C++17, because it would mean we were computing the composite
// pointer type of dependent types, which should never happen.
- if (EST1 == EST_ComputedNoexcept || EST2 == EST_ComputedNoexcept) {
+ if (EST1 == EST_DependentNoexcept || EST2 == EST_DependentNoexcept) {
assert(!S.getLangOpts().CPlusPlus17 &&
"computing composite pointer type of dependent types");
return FunctionProtoType::ExceptionSpecInfo();
@@ -5778,7 +6009,9 @@ mergeExceptionSpecs(Sema &S, FunctionProtoType::ExceptionSpecInfo ESI1,
case EST_DynamicNone:
case EST_MSAny:
case EST_BasicNoexcept:
- case EST_ComputedNoexcept:
+ case EST_DependentNoexcept:
+ case EST_NoexceptFalse:
+ case EST_NoexceptTrue:
llvm_unreachable("handled above");
case EST_Dynamic: {
@@ -5805,7 +6038,7 @@ mergeExceptionSpecs(Sema &S, FunctionProtoType::ExceptionSpecInfo ESI1,
llvm_unreachable("invalid ExceptionSpecificationType");
}
-/// \brief Find a merged pointer type and convert the two expressions to it.
+/// Find a merged pointer type and convert the two expressions to it.
///
/// This finds the composite pointer type (or member pointer type) for @p E1
/// and @p E2 according to C++1z 5p14. It converts both expressions to this
@@ -6195,7 +6428,8 @@ ExprResult Sema::MaybeBindToTemporary(Expr *E) {
if (RD->isInvalidDecl() || RD->isDependentContext())
return E;
- bool IsDecltype = ExprEvalContexts.back().IsDecltype;
+ bool IsDecltype = ExprEvalContexts.back().ExprContext ==
+ ExpressionEvaluationContextRecord::EK_Decltype;
CXXDestructorDecl *Destructor = IsDecltype ? nullptr : LookupDestructor(RD);
if (Destructor) {
@@ -6277,7 +6511,9 @@ Stmt *Sema::MaybeCreateStmtWithCleanups(Stmt *SubStmt) {
/// are omitted for the 'topmost' call in the decltype expression. If the
/// topmost call bound a temporary, strip that temporary off the expression.
ExprResult Sema::ActOnDecltypeExpression(Expr *E) {
- assert(ExprEvalContexts.back().IsDecltype && "not in a decltype expression");
+ assert(ExprEvalContexts.back().ExprContext ==
+ ExpressionEvaluationContextRecord::EK_Decltype &&
+ "not in a decltype expression");
// C++11 [expr.call]p11:
// If a function call is a prvalue of object type,
@@ -6319,7 +6555,8 @@ ExprResult Sema::ActOnDecltypeExpression(Expr *E) {
TopBind = nullptr;
// Disable the special decltype handling now.
- ExprEvalContexts.back().IsDecltype = false;
+ ExprEvalContexts.back().ExprContext =
+ ExpressionEvaluationContextRecord::EK_Other;
// In MS mode, don't perform any extra checking of call return types within a
// decltype expression.
@@ -6572,7 +6809,7 @@ static bool CheckArrow(Sema& S, QualType& ObjectType, Expr *&Base,
return false;
}
-/// \brief Check if it's ok to try and recover dot pseudo destructor calls on
+/// Check if it's ok to try and recover dot pseudo destructor calls on
/// pointer objects.
static bool
canRecoverDotPseudoDestructorCallsOnPointerObjects(Sema &SemaRef,
@@ -6716,11 +6953,11 @@ ExprResult Sema::ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation CCLoc,
SourceLocation TildeLoc,
UnqualifiedId &SecondTypeName) {
- assert((FirstTypeName.getKind() == UnqualifiedId::IK_TemplateId ||
- FirstTypeName.getKind() == UnqualifiedId::IK_Identifier) &&
+ assert((FirstTypeName.getKind() == UnqualifiedIdKind::IK_TemplateId ||
+ FirstTypeName.getKind() == UnqualifiedIdKind::IK_Identifier) &&
"Invalid first type name in pseudo-destructor");
- assert((SecondTypeName.getKind() == UnqualifiedId::IK_TemplateId ||
- SecondTypeName.getKind() == UnqualifiedId::IK_Identifier) &&
+ assert((SecondTypeName.getKind() == UnqualifiedIdKind::IK_TemplateId ||
+ SecondTypeName.getKind() == UnqualifiedIdKind::IK_Identifier) &&
"Invalid second type name in pseudo-destructor");
QualType ObjectType;
@@ -6742,7 +6979,7 @@ ExprResult Sema::ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
QualType DestructedType;
TypeSourceInfo *DestructedTypeInfo = nullptr;
PseudoDestructorTypeStorage Destructed;
- if (SecondTypeName.getKind() == UnqualifiedId::IK_Identifier) {
+ if (SecondTypeName.getKind() == UnqualifiedIdKind::IK_Identifier) {
ParsedType T = getTypeName(*SecondTypeName.Identifier,
SecondTypeName.StartLocation,
S, &SS, true, false, ObjectTypePtrForLookup,
@@ -6800,9 +7037,9 @@ ExprResult Sema::ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
// Convert the name of the scope type (the type prior to '::') into a type.
TypeSourceInfo *ScopeTypeInfo = nullptr;
QualType ScopeType;
- if (FirstTypeName.getKind() == UnqualifiedId::IK_TemplateId ||
+ if (FirstTypeName.getKind() == UnqualifiedIdKind::IK_TemplateId ||
FirstTypeName.Identifier) {
- if (FirstTypeName.getKind() == UnqualifiedId::IK_Identifier) {
+ if (FirstTypeName.getKind() == UnqualifiedIdKind::IK_Identifier) {
ParsedType T = getTypeName(*FirstTypeName.Identifier,
FirstTypeName.StartLocation,
S, &SS, true, false, ObjectTypePtrForLookup,
@@ -6877,10 +7114,17 @@ ExprResult Sema::ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
ExprResult Sema::BuildCXXMemberCallExpr(Expr *E, NamedDecl *FoundDecl,
CXXConversionDecl *Method,
bool HadMultipleCandidates) {
+ // Convert the expression to match the conversion function's implicit object
+ // parameter.
+ ExprResult Exp = PerformObjectArgumentInitialization(E, /*Qualifier=*/nullptr,
+ FoundDecl, Method);
+ if (Exp.isInvalid())
+ return true;
+
if (Method->getParent()->isLambda() &&
Method->getConversionType()->isBlockPointerType()) {
// This is a lambda coversion to block pointer; check if the argument
- // is a LambdaExpr.
+ // was a LambdaExpr.
Expr *SubE = E;
CastExpr *CE = dyn_cast<CastExpr>(SubE);
if (CE && CE->getCastKind() == CK_NoOp)
@@ -6897,22 +7141,16 @@ ExprResult Sema::BuildCXXMemberCallExpr(Expr *E, NamedDecl *FoundDecl,
DiagnosticErrorTrap Trap(Diags);
PushExpressionEvaluationContext(
ExpressionEvaluationContext::PotentiallyEvaluated);
- ExprResult Exp = BuildBlockForLambdaConversion(E->getExprLoc(),
- E->getExprLoc(),
- Method, E);
+ ExprResult BlockExp = BuildBlockForLambdaConversion(
+ Exp.get()->getExprLoc(), Exp.get()->getExprLoc(), Method, Exp.get());
PopExpressionEvaluationContext();
- if (Exp.isInvalid())
- Diag(E->getExprLoc(), diag::note_lambda_to_block_conv);
- return Exp;
+ if (BlockExp.isInvalid())
+ Diag(Exp.get()->getExprLoc(), diag::note_lambda_to_block_conv);
+ return BlockExp;
}
}
- ExprResult Exp = PerformObjectArgumentInitialization(E, /*Qualifier=*/nullptr,
- FoundDecl, Method);
- if (Exp.isInvalid())
- return true;
-
MemberExpr *ME = new (Context) MemberExpr(
Exp.get(), /*IsArrow=*/false, SourceLocation(), Method, SourceLocation(),
Context.BoundMemberTy, VK_RValue, OK_Ordinary);
@@ -7123,7 +7361,7 @@ static inline bool VariableCanNeverBeAConstantExpression(VarDecl *Var,
return !IsVariableAConstantExpression(Var, Context);
}
-/// \brief Check if the current lambda has any potential captures
+/// Check if the current lambda has any potential captures
/// that must be captured by any of its enclosing lambdas that are ready to
/// capture. If there is a lambda that can capture a nested
/// potential-capture, go ahead and do so. Also, check to see if any
@@ -7146,9 +7384,6 @@ static void CheckIfAnyEnclosingLambdasMustCaptureAnyPotentialCaptures(
const bool IsFullExprInstantiationDependent = FE->isInstantiationDependent();
- ArrayRef<const FunctionScopeInfo *> FunctionScopesArrayRef(
- S.FunctionScopes.data(), S.FunctionScopes.size());
-
// All the potentially captureable variables in the current nested
// lambda (within a generic outer lambda), must be captured by an
// outer lambda that is enclosed within a non-dependent context.
@@ -7177,7 +7412,7 @@ static void CheckIfAnyEnclosingLambdasMustCaptureAnyPotentialCaptures(
// capture the variable in that lambda (and all its enclosing lambdas).
if (const Optional<unsigned> Index =
getStackIndexOfNearestEnclosingCaptureCapableLambda(
- FunctionScopesArrayRef, Var, S)) {
+ S.FunctionScopes, Var, S)) {
const unsigned FunctionScopeIndexOfCapturableLambda = Index.getValue();
MarkVarDeclODRUsed(Var, VarExpr->getExprLoc(), S,
&FunctionScopeIndexOfCapturableLambda);
@@ -7213,7 +7448,7 @@ static void CheckIfAnyEnclosingLambdasMustCaptureAnyPotentialCaptures(
// 'this' in that lambda (and all its enclosing lambdas).
if (const Optional<unsigned> Index =
getStackIndexOfNearestEnclosingCaptureCapableLambda(
- FunctionScopesArrayRef, /*0 is 'this'*/ nullptr, S)) {
+ S.FunctionScopes, /*0 is 'this'*/ nullptr, S)) {
const unsigned FunctionScopeIndexOfCapturableLambda = Index.getValue();
S.CheckCXXThisCapture(CurrentLSI->PotentialThisCaptureLocation,
/*Explicit*/ false, /*BuildAndDiagnose*/ true,
@@ -7306,13 +7541,12 @@ class TransformTypos : public TreeTransform<TransformTypos> {
llvm::SmallDenseMap<TypoExpr *, ExprResult, 2> TransformCache;
llvm::SmallDenseMap<OverloadExpr *, Expr *, 4> OverloadResolution;
- /// \brief Emit diagnostics for all of the TypoExprs encountered.
+ /// Emit diagnostics for all of the TypoExprs encountered.
/// If the TypoExprs were successfully corrected, then the diagnostics should
/// suggest the corrections. Otherwise the diagnostics will not suggest
/// anything (having been passed an empty TypoCorrection).
void EmitAllDiagnostics() {
- for (auto E : TypoExprs) {
- TypoExpr *TE = cast<TypoExpr>(E);
+ for (TypoExpr *TE : TypoExprs) {
auto &State = SemaRef.getTypoExprState(TE);
if (State.DiagHandler) {
TypoCorrection TC = State.Consumer->getCurrentCorrection();
@@ -7333,7 +7567,7 @@ class TransformTypos : public TreeTransform<TransformTypos> {
}
}
- /// \brief If corrections for the first TypoExpr have been exhausted for a
+ /// If corrections for the first TypoExpr have been exhausted for a
/// given combination of the other TypoExprs, retry those corrections against
/// the next combination of substitutions for the other TypoExprs by advancing
/// to the next potential correction of the second TypoExpr. For the second
@@ -7504,12 +7738,8 @@ Sema::CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl,
if (E && !ExprEvalContexts.empty() && ExprEvalContexts.back().NumTypos &&
(E->isTypeDependent() || E->isValueDependent() ||
E->isInstantiationDependent())) {
- auto TyposInContext = ExprEvalContexts.back().NumTypos;
- assert(TyposInContext < ~0U && "Recursive call of CorrectDelayedTyposInExpr");
- ExprEvalContexts.back().NumTypos = ~0U;
auto TyposResolved = DelayedTypos.size();
auto Result = TransformTypos(*this, InitDecl, Filter).Transform(E);
- ExprEvalContexts.back().NumTypos = TyposInContext;
TyposResolved -= DelayedTypos.size();
if (Result.isInvalid() || Result.get() != E) {
ExprEvalContexts.back().NumTypos -= TyposResolved;
diff --git a/lib/Sema/SemaExprMember.cpp b/lib/Sema/SemaExprMember.cpp
index dd516ea3b428..3a8fee862c91 100644
--- a/lib/Sema/SemaExprMember.cpp
+++ b/lib/Sema/SemaExprMember.cpp
@@ -640,6 +640,7 @@ static bool LookupMemberExprInRecord(Sema &SemaRef, LookupResult &R,
const RecordType *RTy,
SourceLocation OpLoc, bool IsArrow,
CXXScopeSpec &SS, bool HasTemplateArgs,
+ SourceLocation TemplateKWLoc,
TypoExpr *&TE) {
SourceRange BaseRange = BaseExpr ? BaseExpr->getSourceRange() : SourceRange();
RecordDecl *RDecl = RTy->getDecl();
@@ -649,13 +650,13 @@ static bool LookupMemberExprInRecord(Sema &SemaRef, LookupResult &R,
BaseRange))
return true;
- if (HasTemplateArgs) {
+ if (HasTemplateArgs || TemplateKWLoc.isValid()) {
// LookupTemplateName doesn't expect these both to exist simultaneously.
QualType ObjectType = SS.isSet() ? QualType() : QualType(RTy, 0);
bool MOUS;
- SemaRef.LookupTemplateName(R, nullptr, SS, ObjectType, false, MOUS);
- return false;
+ return SemaRef.LookupTemplateName(R, nullptr, SS, ObjectType, false, MOUS,
+ TemplateKWLoc);
}
DeclContext *DC = RDecl;
@@ -733,7 +734,8 @@ static bool LookupMemberExprInRecord(Sema &SemaRef, LookupResult &R,
static ExprResult LookupMemberExpr(Sema &S, LookupResult &R,
ExprResult &BaseExpr, bool &IsArrow,
SourceLocation OpLoc, CXXScopeSpec &SS,
- Decl *ObjCImpDecl, bool HasTemplateArgs);
+ Decl *ObjCImpDecl, bool HasTemplateArgs,
+ SourceLocation TemplateKWLoc);
ExprResult
Sema::BuildMemberReferenceExpr(Expr *Base, QualType BaseType,
@@ -759,9 +761,9 @@ Sema::BuildMemberReferenceExpr(Expr *Base, QualType BaseType,
TypoExpr *TE = nullptr;
QualType RecordTy = BaseType;
if (IsArrow) RecordTy = RecordTy->getAs<PointerType>()->getPointeeType();
- if (LookupMemberExprInRecord(*this, R, nullptr,
- RecordTy->getAs<RecordType>(), OpLoc, IsArrow,
- SS, TemplateArgs != nullptr, TE))
+ if (LookupMemberExprInRecord(
+ *this, R, nullptr, RecordTy->getAs<RecordType>(), OpLoc, IsArrow,
+ SS, TemplateArgs != nullptr, TemplateKWLoc, TE))
return ExprError();
if (TE)
return TE;
@@ -769,10 +771,10 @@ Sema::BuildMemberReferenceExpr(Expr *Base, QualType BaseType,
// Explicit member accesses.
} else {
ExprResult BaseResult = Base;
- ExprResult Result = LookupMemberExpr(
- *this, R, BaseResult, IsArrow, OpLoc, SS,
- ExtraArgs ? ExtraArgs->ObjCImpDecl : nullptr,
- TemplateArgs != nullptr);
+ ExprResult Result =
+ LookupMemberExpr(*this, R, BaseResult, IsArrow, OpLoc, SS,
+ ExtraArgs ? ExtraArgs->ObjCImpDecl : nullptr,
+ TemplateArgs != nullptr, TemplateKWLoc);
if (BaseResult.isInvalid())
return ExprError();
@@ -802,16 +804,13 @@ Sema::BuildAnonymousStructUnionMemberReference(const CXXScopeSpec &SS,
Expr *baseObjectExpr,
SourceLocation opLoc) {
// First, build the expression that refers to the base object.
-
- bool baseObjectIsPointer = false;
- Qualifiers baseQuals;
-
+
// Case 1: the base of the indirect field is not a field.
VarDecl *baseVariable = indirectField->getVarDecl();
CXXScopeSpec EmptySS;
if (baseVariable) {
assert(baseVariable->getType()->isRecordType());
-
+
// In principle we could have a member access expression that
// accesses an anonymous struct/union that's a static member of
// the base object's class. However, under the current standard,
@@ -824,68 +823,37 @@ Sema::BuildAnonymousStructUnionMemberReference(const CXXScopeSpec &SS,
ExprResult result
= BuildDeclarationNameExpr(EmptySS, baseNameInfo, baseVariable);
if (result.isInvalid()) return ExprError();
-
- baseObjectExpr = result.get();
- baseObjectIsPointer = false;
- baseQuals = baseObjectExpr->getType().getQualifiers();
-
- // Case 2: the base of the indirect field is a field and the user
- // wrote a member expression.
- } else if (baseObjectExpr) {
- // The caller provided the base object expression. Determine
- // whether its a pointer and whether it adds any qualifiers to the
- // anonymous struct/union fields we're looking into.
- QualType objectType = baseObjectExpr->getType();
-
- if (const PointerType *ptr = objectType->getAs<PointerType>()) {
- baseObjectIsPointer = true;
- objectType = ptr->getPointeeType();
- } else {
- baseObjectIsPointer = false;
- }
- baseQuals = objectType.getQualifiers();
-
- // Case 3: the base of the indirect field is a field and we should
- // build an implicit member access.
- } else {
- // We've found a member of an anonymous struct/union that is
- // inside a non-anonymous struct/union, so in a well-formed
- // program our base object expression is "this".
- QualType ThisTy = getCurrentThisType();
- if (ThisTy.isNull()) {
- Diag(loc, diag::err_invalid_member_use_in_static_method)
- << indirectField->getDeclName();
- return ExprError();
- }
-
- // Our base object expression is "this".
- CheckCXXThisCapture(loc);
- baseObjectExpr
- = new (Context) CXXThisExpr(loc, ThisTy, /*isImplicit=*/ true);
- baseObjectIsPointer = true;
- baseQuals = ThisTy->castAs<PointerType>()->getPointeeType().getQualifiers();
+
+ baseObjectExpr = result.get();
}
-
+
+ assert((baseVariable || baseObjectExpr) &&
+ "referencing anonymous struct/union without a base variable or "
+ "expression");
+
// Build the implicit member references to the field of the
// anonymous struct/union.
Expr *result = baseObjectExpr;
IndirectFieldDecl::chain_iterator
FI = indirectField->chain_begin(), FEnd = indirectField->chain_end();
-
- // Build the first member access in the chain with full information.
+
+ // Case 2: the base of the indirect field is a field and the user
+ // wrote a member expression.
if (!baseVariable) {
FieldDecl *field = cast<FieldDecl>(*FI);
-
+
+ bool baseObjectIsPointer = baseObjectExpr->getType()->isPointerType();
+
// Make a nameInfo that properly uses the anonymous name.
DeclarationNameInfo memberNameInfo(field->getDeclName(), loc);
- result = BuildFieldReferenceExpr(result, baseObjectIsPointer,
- SourceLocation(), EmptySS, field,
- foundDecl, memberNameInfo).get();
+ // Build the first member access in the chain with full information.
+ result =
+ BuildFieldReferenceExpr(result, baseObjectIsPointer, SourceLocation(),
+ SS, field, foundDecl, memberNameInfo)
+ .get();
if (!result)
return ExprError();
-
- // FIXME: check qualified member access
}
// In all cases, we should now skip the first declaration in the chain.
@@ -922,7 +890,7 @@ BuildMSPropertyRefExpr(Sema &S, Expr *BaseExpr, bool IsArrow,
NameInfo.getLoc());
}
-/// \brief Build a MemberExpr AST node.
+/// Build a MemberExpr AST node.
static MemberExpr *BuildMemberExpr(
Sema &SemaRef, ASTContext &C, Expr *Base, bool isArrow,
SourceLocation OpLoc, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
@@ -937,7 +905,7 @@ static MemberExpr *BuildMemberExpr(
return E;
}
-/// \brief Determine if the given scope is within a function-try-block handler.
+/// Determine if the given scope is within a function-try-block handler.
static bool IsInFnTryBlockHandler(const Scope *S) {
// Walk the scope stack until finding a FnTryCatchScope, or leave the
// function scope. If a FnTryCatchScope is found, check whether the TryScope
@@ -954,16 +922,12 @@ getVarTemplateSpecialization(Sema &S, VarTemplateDecl *VarTempl,
const TemplateArgumentListInfo *TemplateArgs,
const DeclarationNameInfo &MemberNameInfo,
SourceLocation TemplateKWLoc) {
-
if (!TemplateArgs) {
- S.Diag(MemberNameInfo.getBeginLoc(), diag::err_template_decl_ref)
- << /*Variable template*/ 1 << MemberNameInfo.getName()
- << MemberNameInfo.getSourceRange();
-
- S.Diag(VarTempl->getLocation(), diag::note_template_decl_here);
-
+ S.diagnoseMissingTemplateArguments(TemplateName(VarTempl),
+ MemberNameInfo.getBeginLoc());
return nullptr;
}
+
DeclResult VDecl = S.CheckVarTemplateId(
VarTempl, TemplateKWLoc, MemberNameInfo.getLoc(), *TemplateArgs);
if (VDecl.isInvalid())
@@ -1264,7 +1228,8 @@ Sema::PerformMemberExprBaseConversion(Expr *Base, bool IsArrow) {
static ExprResult LookupMemberExpr(Sema &S, LookupResult &R,
ExprResult &BaseExpr, bool &IsArrow,
SourceLocation OpLoc, CXXScopeSpec &SS,
- Decl *ObjCImpDecl, bool HasTemplateArgs) {
+ Decl *ObjCImpDecl, bool HasTemplateArgs,
+ SourceLocation TemplateKWLoc) {
assert(BaseExpr.get() && "no base expression");
// Perform default conversions.
@@ -1314,8 +1279,8 @@ static ExprResult LookupMemberExpr(Sema &S, LookupResult &R,
// Handle field access to simple records.
if (const RecordType *RTy = BaseType->getAs<RecordType>()) {
TypoExpr *TE = nullptr;
- if (LookupMemberExprInRecord(S, R, BaseExpr.get(), RTy,
- OpLoc, IsArrow, SS, HasTemplateArgs, TE))
+ if (LookupMemberExprInRecord(S, R, BaseExpr.get(), RTy, OpLoc, IsArrow, SS,
+ HasTemplateArgs, TemplateKWLoc, TE))
return ExprError();
// Returning valid-but-null is how we indicate to the caller that
@@ -1353,7 +1318,7 @@ static ExprResult LookupMemberExpr(Sema &S, LookupResult &R,
OpLoc, S.Context.getObjCClassType());
if (ShouldTryAgainWithRedefinitionType(S, BaseExpr))
return LookupMemberExpr(S, R, BaseExpr, IsArrow, OpLoc, SS,
- ObjCImpDecl, HasTemplateArgs);
+ ObjCImpDecl, HasTemplateArgs, TemplateKWLoc);
goto fail;
}
@@ -1479,8 +1444,9 @@ static ExprResult LookupMemberExpr(Sema &S, LookupResult &R,
IsArrow);
if (IV->getType().getObjCLifetime() == Qualifiers::OCL_Weak) {
- if (!S.Diags.isIgnored(diag::warn_arc_repeated_use_of_weak, MemberLoc))
- S.recordUseOfEvaluatedWeak(Result);
+ if (!S.isUnevaluatedContext() &&
+ !S.Diags.isIgnored(diag::warn_arc_repeated_use_of_weak, MemberLoc))
+ S.getCurFunction()->recordUseOfWeak(Result);
}
return Result;
@@ -1524,9 +1490,6 @@ static ExprResult LookupMemberExpr(Sema &S, LookupResult &R,
}
if (ObjCMethodDecl *OMD = dyn_cast<ObjCMethodDecl>(PMDecl)) {
- // Check the use of this method.
- if (S.DiagnoseUseOfDecl(OMD, MemberLoc))
- return ExprError();
Selector SetterSel =
SelectorTable::constructSetterSelector(S.PP.getIdentifierTable(),
S.PP.getSelectorTable(),
@@ -1546,7 +1509,7 @@ static ExprResult LookupMemberExpr(Sema &S, LookupResult &R,
// use the 'id' redefinition in this case.
if (IsArrow && ShouldTryAgainWithRedefinitionType(S, BaseExpr))
return LookupMemberExpr(S, R, BaseExpr, IsArrow, OpLoc, SS,
- ObjCImpDecl, HasTemplateArgs);
+ ObjCImpDecl, HasTemplateArgs, TemplateKWLoc);
return ExprError(S.Diag(MemberLoc, diag::err_property_not_found)
<< MemberName << BaseType);
@@ -1559,7 +1522,7 @@ static ExprResult LookupMemberExpr(Sema &S, LookupResult &R,
if (!MD) {
if (ShouldTryAgainWithRedefinitionType(S, BaseExpr))
return LookupMemberExpr(S, R, BaseExpr, IsArrow, OpLoc, SS,
- ObjCImpDecl, HasTemplateArgs);
+ ObjCImpDecl, HasTemplateArgs, TemplateKWLoc);
goto fail;
}
@@ -1567,6 +1530,9 @@ static ExprResult LookupMemberExpr(Sema &S, LookupResult &R,
// Also must look for a getter name which uses property syntax.
Selector Sel = S.PP.getSelectorTable().getNullarySelector(Member);
ObjCInterfaceDecl *IFace = MD->getClassInterface();
+ if (!IFace)
+ goto fail;
+
ObjCMethodDecl *Getter;
if ((Getter = IFace->lookupClassMethod(Sel))) {
// Check the use of this method.
@@ -1598,7 +1564,7 @@ static ExprResult LookupMemberExpr(Sema &S, LookupResult &R,
if (ShouldTryAgainWithRedefinitionType(S, BaseExpr))
return LookupMemberExpr(S, R, BaseExpr, IsArrow, OpLoc, SS,
- ObjCImpDecl, HasTemplateArgs);
+ ObjCImpDecl, HasTemplateArgs, TemplateKWLoc);
return ExprError(S.Diag(MemberLoc, diag::err_property_not_found)
<< MemberName << BaseType);
@@ -1623,10 +1589,14 @@ static ExprResult LookupMemberExpr(Sema &S, LookupResult &R,
else
VK = BaseExpr.get()->getValueKind();
}
+
QualType ret = CheckExtVectorComponent(S, BaseType, VK, OpLoc,
Member, MemberLoc);
if (ret.isNull())
return ExprError();
+ Qualifiers BaseQ =
+ S.Context.getCanonicalType(BaseExpr.get()->getType()).getQualifiers();
+ ret = S.Context.getQualifiedType(ret, BaseQ);
return new (S.Context)
ExtVectorElementExpr(ret, VK, BaseExpr.get(), *Member, MemberLoc);
@@ -1639,7 +1609,7 @@ static ExprResult LookupMemberExpr(Sema &S, LookupResult &R,
BaseExpr = S.ImpCastExprToType(
BaseExpr.get(), S.Context.getObjCSelRedefinitionType(), CK_BitCast);
return LookupMemberExpr(S, R, BaseExpr, IsArrow, OpLoc, SS,
- ObjCImpDecl, HasTemplateArgs);
+ ObjCImpDecl, HasTemplateArgs, TemplateKWLoc);
}
// Failure cases.
@@ -1662,7 +1632,7 @@ static ExprResult LookupMemberExpr(Sema &S, LookupResult &R,
// Recurse as an -> access.
IsArrow = true;
return LookupMemberExpr(S, R, BaseExpr, IsArrow, OpLoc, SS,
- ObjCImpDecl, HasTemplateArgs);
+ ObjCImpDecl, HasTemplateArgs, TemplateKWLoc);
}
}
@@ -1676,7 +1646,7 @@ static ExprResult LookupMemberExpr(Sema &S, LookupResult &R,
return ExprError();
BaseExpr = S.DefaultFunctionArrayConversion(BaseExpr.get());
return LookupMemberExpr(S, R, BaseExpr, IsArrow, OpLoc, SS,
- ObjCImpDecl, HasTemplateArgs);
+ ObjCImpDecl, HasTemplateArgs, TemplateKWLoc);
}
S.Diag(OpLoc, diag::err_typecheck_member_reference_struct_union)
@@ -1707,7 +1677,7 @@ ExprResult Sema::ActOnMemberAccessExpr(Scope *S, Expr *Base,
// Warn about the explicit constructor calls Microsoft extension.
if (getLangOpts().MicrosoftExt &&
- Id.getKind() == UnqualifiedId::IK_ConstructorName)
+ Id.getKind() == UnqualifiedIdKind::IK_ConstructorName)
Diag(Id.getSourceRange().getBegin(),
diag::ext_ms_explicit_constructor_call);
@@ -1805,7 +1775,7 @@ Sema::BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow,
if (getLangOpts().OpenMP && IsArrow &&
!CurContext->isDependentContext() &&
isa<CXXThisExpr>(Base.get()->IgnoreParenImpCasts())) {
- if (auto *PrivateCopy = IsOpenMPCapturedDecl(Field)) {
+ if (auto *PrivateCopy = isOpenMPCapturedDecl(Field)) {
return getOpenMPCapturedExpr(PrivateCopy, VK, OK,
MemberNameInfo.getLoc());
}
diff --git a/lib/Sema/SemaExprObjC.cpp b/lib/Sema/SemaExprObjC.cpp
index cd0c2c47ae4c..bf0ffeba06b2 100644
--- a/lib/Sema/SemaExprObjC.cpp
+++ b/lib/Sema/SemaExprObjC.cpp
@@ -141,7 +141,7 @@ ExprResult Sema::BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S){
return new (Context) ObjCStringLiteral(S, Ty, AtLoc);
}
-/// \brief Emits an error if the given method does not exist, or if the return
+/// Emits an error if the given method does not exist, or if the return
/// type is not an Objective-C object.
static bool validateBoxingMethod(Sema &S, SourceLocation Loc,
const ObjCInterfaceDecl *Class,
@@ -165,7 +165,7 @@ static bool validateBoxingMethod(Sema &S, SourceLocation Loc,
return true;
}
-/// \brief Maps ObjCLiteralKind to NSClassIdKindKind
+/// Maps ObjCLiteralKind to NSClassIdKindKind
static NSAPI::NSClassIdKindKind ClassKindFromLiteralKind(
Sema::ObjCLiteralKind LiteralKind) {
switch (LiteralKind) {
@@ -189,7 +189,7 @@ static NSAPI::NSClassIdKindKind ClassKindFromLiteralKind(
llvm_unreachable("LiteralKind can't be converted into a ClassKind");
}
-/// \brief Validates ObjCInterfaceDecl availability.
+/// Validates ObjCInterfaceDecl availability.
/// ObjCInterfaceDecl, used to create ObjC literals, should be defined
/// if clang not in a debugger mode.
static bool ValidateObjCLiteralInterfaceDecl(Sema &S, ObjCInterfaceDecl *Decl,
@@ -211,7 +211,7 @@ static bool ValidateObjCLiteralInterfaceDecl(Sema &S, ObjCInterfaceDecl *Decl,
return true;
}
-/// \brief Looks up ObjCInterfaceDecl of a given NSClassIdKindKind.
+/// Looks up ObjCInterfaceDecl of a given NSClassIdKindKind.
/// Used to create ObjC literals, such as NSDictionary (@{}),
/// NSArray (@[]) and Boxed Expressions (@())
static ObjCInterfaceDecl *LookupObjCInterfaceDeclForLiteral(Sema &S,
@@ -236,7 +236,7 @@ static ObjCInterfaceDecl *LookupObjCInterfaceDeclForLiteral(Sema &S,
return ID;
}
-/// \brief Retrieve the NSNumber factory method that should be used to create
+/// Retrieve the NSNumber factory method that should be used to create
/// an Objective-C literal for the given type.
static ObjCMethodDecl *getNSNumberFactoryMethod(Sema &S, SourceLocation Loc,
QualType NumberType,
@@ -379,7 +379,7 @@ ExprResult Sema::ActOnObjCBoolLiteral(SourceLocation AtLoc,
return BuildObjCNumericLiteral(AtLoc, Inner.get());
}
-/// \brief Check that the given expression is a valid element of an Objective-C
+/// Check that the given expression is a valid element of an Objective-C
/// collection literal.
static ExprResult CheckObjCCollectionLiteralElement(Sema &S, Expr *Element,
QualType T,
@@ -1357,6 +1357,11 @@ QualType Sema::getMessageSendResultType(QualType ReceiverType,
if (isClassMessage)
return resultType;
+ // There is nothing left to do if the result type cannot have a nullability
+ // specifier.
+ if (!resultType->canHaveNullability())
+ return resultType;
+
// Map the nullability of the result into a table index.
unsigned receiverNullabilityIdx = 0;
if (auto nullability = ReceiverType->getNullability(Context))
@@ -1613,6 +1618,11 @@ bool Sema::CheckMessageArgumentTypes(QualType ReceiverType,
ParmVarDecl *param = Method->parameters()[i];
assert(argExpr && "CheckMessageArgumentTypes(): missing expression");
+ if (param->hasAttr<NoEscapeAttr>())
+ if (auto *BE = dyn_cast<BlockExpr>(
+ argExpr->IgnoreParenNoopCasts(Context)))
+ BE->getBlockDecl()->setDoesNotEscape();
+
// Strip the unbridged-cast placeholder expression off unless it's
// a consumed argument.
if (argExpr->hasPlaceholderType(BuiltinType::ARCUnbridgedCast) &&
@@ -2319,7 +2329,7 @@ static void checkFoundationAPI(Sema &S, SourceLocation Loc,
}
}
-/// \brief Diagnose use of %s directive in an NSString which is being passed
+/// Diagnose use of %s directive in an NSString which is being passed
/// as formatting string to formatting method.
static void
DiagnoseCStringFormatDirectiveInObjCAPI(Sema &S,
@@ -2358,7 +2368,7 @@ DiagnoseCStringFormatDirectiveInObjCAPI(Sema &S,
}
}
-/// \brief Build an Objective-C class message expression.
+/// Build an Objective-C class message expression.
///
/// This routine takes care of both normal class messages and
/// class messages to the superclass.
@@ -2403,11 +2413,12 @@ ExprResult Sema::BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
<< FixItHint::CreateInsertion(Loc, "[");
LBracLoc = Loc;
}
- SourceLocation SelLoc;
+ ArrayRef<SourceLocation> SelectorSlotLocs;
if (!SelectorLocs.empty() && SelectorLocs.front().isValid())
- SelLoc = SelectorLocs.front();
+ SelectorSlotLocs = SelectorLocs;
else
- SelLoc = Loc;
+ SelectorSlotLocs = Loc;
+ SourceLocation SelLoc = SelectorSlotLocs.front();
if (ReceiverType->isDependentType()) {
// If the receiver type is dependent, we can't type-check anything
@@ -2432,7 +2443,7 @@ ExprResult Sema::BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
assert(Class && "We don't know which class we're messaging?");
// objc++ diagnoses during typename annotation.
if (!getLangOpts().CPlusPlus)
- (void)DiagnoseUseOfDecl(Class, SelLoc);
+ (void)DiagnoseUseOfDecl(Class, SelectorSlotLocs);
// Find the method we are messaging.
if (!Method) {
SourceRange TypeRange
@@ -2457,7 +2468,7 @@ ExprResult Sema::BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
if (!Method)
Method = Class->lookupPrivateClassMethod(Sel);
- if (Method && DiagnoseUseOfDecl(Method, SelLoc))
+ if (Method && DiagnoseUseOfDecl(Method, SelectorSlotLocs))
return ExprError();
}
@@ -2581,7 +2592,7 @@ static bool isMethodDeclaredInRootProtocol(Sema &S, const ObjCMethodDecl *M) {
return false;
}
-/// \brief Build an Objective-C instance message expression.
+/// Build an Objective-C instance message expression.
///
/// This routine takes care of both normal instance messages and
/// instance messages to the superclass instance.
@@ -2627,11 +2638,12 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
SourceLocation Loc = SuperLoc.isValid()? SuperLoc : Receiver->getLocStart();
SourceRange RecRange =
SuperLoc.isValid()? SuperLoc : Receiver->getSourceRange();
- SourceLocation SelLoc;
+ ArrayRef<SourceLocation> SelectorSlotLocs;
if (!SelectorLocs.empty() && SelectorLocs.front().isValid())
- SelLoc = SelectorLocs.front();
+ SelectorSlotLocs = SelectorLocs;
else
- SelLoc = Loc;
+ SelectorSlotLocs = Loc;
+ SourceLocation SelLoc = SelectorSlotLocs.front();
if (LBracLoc.isInvalid()) {
Diag(Loc, diag::err_missing_open_square_message_send)
@@ -2743,7 +2755,7 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
if (!AreMultipleMethodsInGlobalPool(Sel, Method,
SourceRange(LBracLoc, RBracLoc),
receiverIsIdLike, Methods))
- DiagnoseUseOfDecl(Method, SelLoc);
+ DiagnoseUseOfDecl(Method, SelectorSlotLocs);
}
} else if (ReceiverType->isObjCClassOrClassKindOfType() ||
ReceiverType->isObjCQualifiedClassType()) {
@@ -2775,7 +2787,7 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
if (!Method)
Method = ClassDecl->lookupPrivateClassMethod(Sel);
}
- if (Method && DiagnoseUseOfDecl(Method, SelLoc))
+ if (Method && DiagnoseUseOfDecl(Method, SelectorSlotLocs))
return ExprError();
}
if (!Method) {
@@ -2792,7 +2804,7 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
// to select a better one.
Method = Methods[0];
- // If we find an instance method, emit waring.
+ // If we find an instance method, emit warning.
if (Method->isInstanceMethod()) {
if (const ObjCInterfaceDecl *ID =
dyn_cast<ObjCInterfaceDecl>(Method->getDeclContext())) {
@@ -2822,7 +2834,7 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
Method = LookupMethodInQualifiedType(Sel, QIdTy, true);
if (!Method)
Method = LookupMethodInQualifiedType(Sel, QIdTy, false);
- if (Method && DiagnoseUseOfDecl(Method, SelLoc))
+ if (Method && DiagnoseUseOfDecl(Method, SelectorSlotLocs))
return ExprError();
} else if (const ObjCObjectPointerType *OCIType
= ReceiverType->getAsObjCInterfacePointerType()) {
@@ -2897,7 +2909,7 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
}
}
}
- if (Method && DiagnoseUseOfDecl(Method, SelLoc, forwardClass))
+ if (Method && DiagnoseUseOfDecl(Method, SelectorSlotLocs, forwardClass))
return ExprError();
} else {
// Reject other random receiver types (e.g. structs).
@@ -3491,6 +3503,7 @@ static void addFixitForObjCARCConversion(Sema &S,
// We handle C-style and implicit casts here.
switch (CCK) {
case Sema::CCK_ImplicitConversion:
+ case Sema::CCK_ForBuiltinOverloadedOp:
case Sema::CCK_CStyleCast:
case Sema::CCK_OtherCast:
break;
@@ -3644,11 +3657,13 @@ diagnoseObjCARCConversion(Sema &S, SourceRange castRange,
SourceLocation afterLParen = S.getLocForEndOfToken(castRange.getBegin());
SourceLocation noteLoc = afterLParen.isValid() ? afterLParen : loc;
+ unsigned convKindForDiag = Sema::isCast(CCK) ? 0 : 1;
+
// Bridge from an ARC type to a CF type.
if (castACTC == ACTC_retainable && isAnyRetainable(exprACTC)) {
S.Diag(loc, diag::err_arc_cast_requires_bridge)
- << unsigned(CCK == Sema::CCK_ImplicitConversion) // cast|implicit
+ << convKindForDiag
<< 2 // of C pointer type
<< castExprType
<< unsigned(castType->isBlockPointerType()) // to ObjC|block type
@@ -3690,7 +3705,7 @@ diagnoseObjCARCConversion(Sema &S, SourceRange castRange,
if (exprACTC == ACTC_retainable && isAnyRetainable(castACTC)) {
bool br = S.isKnownName("CFBridgingRetain");
S.Diag(loc, diag::err_arc_cast_requires_bridge)
- << unsigned(CCK == Sema::CCK_ImplicitConversion) // cast|implicit
+ << convKindForDiag
<< unsigned(castExprType->isBlockPointerType()) // of ObjC|block type
<< castExprType
<< 2 // to C pointer type
@@ -3727,7 +3742,7 @@ diagnoseObjCARCConversion(Sema &S, SourceRange castRange,
}
S.Diag(loc, diag::err_arc_mismatched_cast)
- << (CCK != Sema::CCK_ImplicitConversion)
+ << !convKindForDiag
<< srcKind << castExprType << castType
<< castRange << castExpr->getSourceRange();
}
@@ -4180,7 +4195,7 @@ Sema::CheckObjCConversion(SourceRange castRange, QualType castType,
if (exprACTC == ACTC_indirectRetainable && castACTC == ACTC_voidPtr)
return ACR_okay;
if (castACTC == ACTC_indirectRetainable && exprACTC == ACTC_voidPtr &&
- CCK != CCK_ImplicitConversion)
+ isCast(CCK))
return ACR_okay;
switch (ARCCastChecker(Context, exprACTC, castACTC, false).Visit(castExpr)) {
@@ -4205,8 +4220,7 @@ Sema::CheckObjCConversion(SourceRange castRange, QualType castType,
// If this is a non-implicit cast from id or block type to a
// CoreFoundation type, delay complaining in case the cast is used
// in an acceptable context.
- if (exprACTC == ACTC_retainable && isAnyRetainable(castACTC) &&
- CCK != CCK_ImplicitConversion)
+ if (exprACTC == ACTC_retainable && isAnyRetainable(castACTC) && isCast(CCK))
return ACR_unbridged;
// Issue a diagnostic about a missing @-sign when implicit casting a cstring
@@ -4276,9 +4290,9 @@ Expr *Sema::stripARCUnbridgedCast(Expr *e) {
} else if (UnaryOperator *uo = dyn_cast<UnaryOperator>(e)) {
assert(uo->getOpcode() == UO_Extension);
Expr *sub = stripARCUnbridgedCast(uo->getSubExpr());
- return new (Context) UnaryOperator(sub, UO_Extension, sub->getType(),
- sub->getValueKind(), sub->getObjectKind(),
- uo->getOperatorLoc());
+ return new (Context)
+ UnaryOperator(sub, UO_Extension, sub->getType(), sub->getValueKind(),
+ sub->getObjectKind(), uo->getOperatorLoc(), false);
} else if (GenericSelectionExpr *gse = dyn_cast<GenericSelectionExpr>(e)) {
assert(!gse->isResultDependent());
diff --git a/lib/Sema/SemaInit.cpp b/lib/Sema/SemaInit.cpp
index 011051da58e5..3ee5ec4a4929 100644
--- a/lib/Sema/SemaInit.cpp
+++ b/lib/Sema/SemaInit.cpp
@@ -15,6 +15,7 @@
#include "clang/AST/DeclObjC.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
+#include "clang/AST/ExprOpenMP.h"
#include "clang/AST/TypeLoc.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Sema/Designator.h"
@@ -32,7 +33,7 @@ using namespace clang;
// Sema Initialization Checking
//===----------------------------------------------------------------------===//
-/// \brief Check whether T is compatible with a wide character type (wchar_t,
+/// Check whether T is compatible with a wide character type (wchar_t,
/// char16_t or char32_t).
static bool IsWideCharCompatible(QualType T, ASTContext &Context) {
if (Context.typesAreCompatible(Context.getWideCharType(), T))
@@ -49,10 +50,12 @@ enum StringInitFailureKind {
SIF_NarrowStringIntoWideChar,
SIF_WideStringIntoChar,
SIF_IncompatWideStringIntoWideChar,
+ SIF_UTF8StringIntoPlainChar,
+ SIF_PlainStringIntoUTF8Char,
SIF_Other
};
-/// \brief Check whether the array of type AT can be initialized by the Init
+/// Check whether the array of type AT can be initialized by the Init
/// expression by means of string initialization. Returns SIF_None if so,
/// otherwise returns a StringInitFailureKind that describes why the
/// initialization would not work.
@@ -77,12 +80,21 @@ static StringInitFailureKind IsStringInit(Expr *Init, const ArrayType *AT,
Context.getCanonicalType(AT->getElementType()).getUnqualifiedType();
switch (SL->getKind()) {
- case StringLiteral::Ascii:
case StringLiteral::UTF8:
+ // char8_t array can be initialized with a UTF-8 string.
+ if (ElemTy->isChar8Type())
+ return SIF_None;
+ LLVM_FALLTHROUGH;
+ case StringLiteral::Ascii:
// char array can be initialized with a narrow string.
// Only allow char x[] = "foo"; not char x[] = L"foo";
if (ElemTy->isCharType())
- return SIF_None;
+ return (SL->getKind() == StringLiteral::UTF8 &&
+ Context.getLangOpts().Char8)
+ ? SIF_UTF8StringIntoPlainChar
+ : SIF_None;
+ if (ElemTy->isChar8Type())
+ return SIF_PlainStringIntoUTF8Char;
if (IsWideCharCompatible(ElemTy, Context))
return SIF_NarrowStringIntoWideChar;
return SIF_Other;
@@ -94,7 +106,7 @@ static StringInitFailureKind IsStringInit(Expr *Init, const ArrayType *AT,
case StringLiteral::UTF16:
if (Context.typesAreCompatible(Context.Char16Ty, ElemTy))
return SIF_None;
- if (ElemTy->isCharType())
+ if (ElemTy->isCharType() || ElemTy->isChar8Type())
return SIF_WideStringIntoChar;
if (IsWideCharCompatible(ElemTy, Context))
return SIF_IncompatWideStringIntoWideChar;
@@ -102,7 +114,7 @@ static StringInitFailureKind IsStringInit(Expr *Init, const ArrayType *AT,
case StringLiteral::UTF32:
if (Context.typesAreCompatible(Context.Char32Ty, ElemTy))
return SIF_None;
- if (ElemTy->isCharType())
+ if (ElemTy->isCharType() || ElemTy->isChar8Type())
return SIF_WideStringIntoChar;
if (IsWideCharCompatible(ElemTy, Context))
return SIF_IncompatWideStringIntoWideChar;
@@ -110,7 +122,7 @@ static StringInitFailureKind IsStringInit(Expr *Init, const ArrayType *AT,
case StringLiteral::Wide:
if (Context.typesAreCompatible(Context.getWideCharType(), ElemTy))
return SIF_None;
- if (ElemTy->isCharType())
+ if (ElemTy->isCharType() || ElemTy->isChar8Type())
return SIF_WideStringIntoChar;
if (IsWideCharCompatible(ElemTy, Context))
return SIF_IncompatWideStringIntoWideChar;
@@ -206,7 +218,7 @@ static void CheckStringInit(Expr *Str, QualType &DeclT, const ArrayType *AT,
namespace {
-/// @brief Semantic checking for initializer lists.
+/// Semantic checking for initializer lists.
///
/// The InitListChecker class contains a set of routines that each
/// handle the initialization of a certain kind of entity, e.g.,
@@ -352,6 +364,7 @@ class InitListChecker {
bool FillWithNoInit = false);
void FillInEmptyInitializations(const InitializedEntity &Entity,
InitListExpr *ILE, bool &RequiresSecondPass,
+ InitListExpr *OuterILE, unsigned OuterIndex,
bool FillWithNoInit = false);
bool CheckFlexibleArrayInit(const InitializedEntity &Entity,
Expr *InitExpr, FieldDecl *Field,
@@ -365,7 +378,7 @@ public:
bool TreatUnavailableAsInvalid);
bool HadError() { return hadError; }
- // @brief Retrieves the fully-structured initializer list used for
+ // Retrieves the fully-structured initializer list used for
// semantic analysis and code generation.
InitListExpr *getFullyStructuredList() const { return FullyStructuredList; }
};
@@ -517,12 +530,13 @@ void InitListChecker::FillInEmptyInitForBase(
ILE->setInit(Init, BaseInit.getAs<Expr>());
} else if (InitListExpr *InnerILE =
dyn_cast<InitListExpr>(ILE->getInit(Init))) {
- FillInEmptyInitializations(BaseEntity, InnerILE,
- RequiresSecondPass, FillWithNoInit);
+ FillInEmptyInitializations(BaseEntity, InnerILE, RequiresSecondPass,
+ ILE, Init, FillWithNoInit);
} else if (DesignatedInitUpdateExpr *InnerDIUE =
dyn_cast<DesignatedInitUpdateExpr>(ILE->getInit(Init))) {
FillInEmptyInitializations(BaseEntity, InnerDIUE->getUpdater(),
- RequiresSecondPass, /*FillWithNoInit =*/true);
+ RequiresSecondPass, ILE, Init,
+ /*FillWithNoInit =*/true);
}
}
@@ -559,6 +573,7 @@ void InitListChecker::FillInEmptyInitForField(unsigned Init, FieldDecl *Field,
hadError = true;
return;
}
+ SemaRef.checkInitializerLifetime(MemberEntity, DIE.get());
if (Init < NumInits)
ILE->setInit(Init, DIE.get());
else {
@@ -605,24 +620,43 @@ void InitListChecker::FillInEmptyInitForField(unsigned Init, FieldDecl *Field,
} else if (InitListExpr *InnerILE
= dyn_cast<InitListExpr>(ILE->getInit(Init)))
FillInEmptyInitializations(MemberEntity, InnerILE,
- RequiresSecondPass, FillWithNoInit);
+ RequiresSecondPass, ILE, Init, FillWithNoInit);
else if (DesignatedInitUpdateExpr *InnerDIUE
= dyn_cast<DesignatedInitUpdateExpr>(ILE->getInit(Init)))
FillInEmptyInitializations(MemberEntity, InnerDIUE->getUpdater(),
- RequiresSecondPass, /*FillWithNoInit =*/ true);
+ RequiresSecondPass, ILE, Init,
+ /*FillWithNoInit =*/true);
}
/// Recursively replaces NULL values within the given initializer list
/// with expressions that perform value-initialization of the
-/// appropriate type.
+/// appropriate type, and finish off the InitListExpr formation.
void
InitListChecker::FillInEmptyInitializations(const InitializedEntity &Entity,
InitListExpr *ILE,
bool &RequiresSecondPass,
+ InitListExpr *OuterILE,
+ unsigned OuterIndex,
bool FillWithNoInit) {
assert((ILE->getType() != SemaRef.Context.VoidTy) &&
"Should not have void type");
+ // If this is a nested initializer list, we might have changed its contents
+ // (and therefore some of its properties, such as instantiation-dependence)
+ // while filling it in. Inform the outer initializer list so that its state
+ // can be updated to match.
+ // FIXME: We should fully build the inner initializers before constructing
+ // the outer InitListExpr instead of mutating AST nodes after they have
+ // been used as subexpressions of other nodes.
+ struct UpdateOuterILEWithUpdatedInit {
+ InitListExpr *Outer;
+ unsigned OuterIndex;
+ ~UpdateOuterILEWithUpdatedInit() {
+ if (Outer)
+ Outer->setInit(OuterIndex, Outer->getInit(OuterIndex));
+ }
+ } UpdateOuterRAII = {OuterILE, OuterIndex};
+
// A transparent ILE is not performing aggregate initialization and should
// not be filled in.
if (ILE->isTransparent())
@@ -719,6 +753,9 @@ InitListChecker::FillInEmptyInitializations(const InitializedEntity &Entity,
ElementEntity.getKind() == InitializedEntity::EK_VectorElement)
ElementEntity.setElementIndex(Init);
+ if (Init >= NumInits && ILE->hasArrayFiller())
+ return;
+
Expr *InitExpr = (Init < NumInits ? ILE->getInit(Init) : nullptr);
if (!InitExpr && Init < NumInits && ILE->hasArrayFiller())
ILE->setInit(Init, ILE->getArrayFiller());
@@ -769,11 +806,12 @@ InitListChecker::FillInEmptyInitializations(const InitializedEntity &Entity,
} else if (InitListExpr *InnerILE
= dyn_cast_or_null<InitListExpr>(InitExpr))
FillInEmptyInitializations(ElementEntity, InnerILE, RequiresSecondPass,
- FillWithNoInit);
+ ILE, Init, FillWithNoInit);
else if (DesignatedInitUpdateExpr *InnerDIUE
= dyn_cast_or_null<DesignatedInitUpdateExpr>(InitExpr))
FillInEmptyInitializations(ElementEntity, InnerDIUE->getUpdater(),
- RequiresSecondPass, /*FillWithNoInit =*/ true);
+ RequiresSecondPass, ILE, Init,
+ /*FillWithNoInit =*/true);
}
}
@@ -795,10 +833,11 @@ InitListChecker::InitListChecker(Sema &S, const InitializedEntity &Entity,
if (!hadError && !VerifyOnly) {
bool RequiresSecondPass = false;
- FillInEmptyInitializations(Entity, FullyStructuredList, RequiresSecondPass);
+ FillInEmptyInitializations(Entity, FullyStructuredList, RequiresSecondPass,
+ /*OuterILE=*/nullptr, /*OuterIndex=*/0);
if (RequiresSecondPass && !hadError)
FillInEmptyInitializations(Entity, FullyStructuredList,
- RequiresSecondPass);
+ RequiresSecondPass, nullptr, 0);
}
}
@@ -982,6 +1021,7 @@ static void warnBracedScalarInit(Sema &S, const InitializedEntity &Entity,
case InitializedEntity::EK_BlockElement:
case InitializedEntity::EK_LambdaToBlockConversionBlockElement:
case InitializedEntity::EK_Binding:
+ case InitializedEntity::EK_StmtExprResult:
llvm_unreachable("unexpected braced scalar init");
}
@@ -1162,10 +1202,12 @@ void InitListChecker::CheckSubElementType(const InitializedEntity &Entity,
if (!hadError && !VerifyOnly) {
bool RequiresSecondPass = false;
FillInEmptyInitializations(Entity, InnerStructuredList,
- RequiresSecondPass);
+ RequiresSecondPass, StructuredList,
+ StructuredIndex);
if (RequiresSecondPass && !hadError)
FillInEmptyInitializations(Entity, InnerStructuredList,
- RequiresSecondPass);
+ RequiresSecondPass, StructuredList,
+ StructuredIndex);
}
++StructuredIndex;
++Index;
@@ -1989,7 +2031,7 @@ void InitListChecker::CheckStructUnionTypes(
StructuredList, StructuredIndex);
}
-/// \brief Expand a field designator that refers to a member of an
+/// Expand a field designator that refers to a member of an
/// anonymous struct or union into a series of field designators that
/// refers to the field within the appropriate subobject.
///
@@ -2053,7 +2095,7 @@ class FieldInitializerValidatorCCC : public CorrectionCandidateCallback {
} // end anonymous namespace
-/// @brief Check the well-formedness of a C99 designated initializer.
+/// Check the well-formedness of a C99 designated initializer.
///
/// Determines whether the designated initializer @p DIE, which
/// resides at the given @p Index within the initializer list @p
@@ -2960,6 +3002,7 @@ DeclarationName InitializedEntity::getName() const {
return DeclarationName(Capture.VarID);
case EK_Result:
+ case EK_StmtExprResult:
case EK_Exception:
case EK_New:
case EK_Temporary:
@@ -2990,6 +3033,7 @@ ValueDecl *InitializedEntity::getDecl() const {
return reinterpret_cast<ParmVarDecl*>(Parameter & ~0x1);
case EK_Result:
+ case EK_StmtExprResult:
case EK_Exception:
case EK_New:
case EK_Temporary:
@@ -3015,6 +3059,7 @@ bool InitializedEntity::allowsNRVO() const {
case EK_Exception:
return LocAndNRVO.NRVO;
+ case EK_StmtExprResult:
case EK_Variable:
case EK_Parameter:
case EK_Parameter_CF_Audited:
@@ -3050,6 +3095,7 @@ unsigned InitializedEntity::dumpImpl(raw_ostream &OS) const {
case EK_Parameter_CF_Audited: OS << "CF audited function Parameter";
break;
case EK_Result: OS << "Result"; break;
+ case EK_StmtExprResult: OS << "StmtExprResult"; break;
case EK_Exception: OS << "Exception"; break;
case EK_Member: OS << "Member"; break;
case EK_Binding: OS << "Binding"; break;
@@ -3160,6 +3206,8 @@ bool InitializationSequence::isAmbiguous() const {
case FK_NarrowStringIntoWideCharArray:
case FK_WideStringIntoCharArray:
case FK_IncompatWideStringIntoWideChar:
+ case FK_PlainStringIntoUTF8Char:
+ case FK_UTF8StringIntoPlainChar:
case FK_AddressOfOverloadFailed: // FIXME: Could do better
case FK_NonConstLValueReferenceBindingToTemporary:
case FK_NonConstLValueReferenceBindingToBitfield:
@@ -3492,7 +3540,8 @@ static void MaybeProduceObjCObject(Sema &S,
/// retainable type, then returns need to immediately retain the
/// object. If an autorelease is required, it will be done at the
/// last instant.
- } else if (Entity.getKind() == InitializedEntity::EK_Result) {
+ } else if (Entity.getKind() == InitializedEntity::EK_Result ||
+ Entity.getKind() == InitializedEntity::EK_StmtExprResult) {
if (!Entity.getType()->isObjCRetainableType())
return;
@@ -3507,7 +3556,7 @@ static void TryListInitialization(Sema &S,
InitializationSequence &Sequence,
bool TreatUnavailableAsInvalid);
-/// \brief When initializing from init list via constructor, handle
+/// When initializing from init list via constructor, handle
/// initialization of an object of type std::initializer_list<T>.
///
/// \return true if we have handled initialization of an object of type
@@ -3533,8 +3582,8 @@ static bool TryInitializerListConstruction(Sema &S,
clang::ArrayType::Normal, 0);
InitializedEntity HiddenArray =
InitializedEntity::InitializeTemporary(ArrayType);
- InitializationKind Kind =
- InitializationKind::CreateDirectList(List->getExprLoc());
+ InitializationKind Kind = InitializationKind::CreateDirectList(
+ List->getExprLoc(), List->getLocStart(), List->getLocEnd());
TryListInitialization(S, HiddenArray, Kind, List, Sequence,
TreatUnavailableAsInvalid);
if (Sequence)
@@ -3668,7 +3717,7 @@ ResolveConstructorOverload(Sema &S, SourceLocation DeclLoc,
return CandidateSet.BestViableFunction(S, DeclLoc, Best);
}
-/// \brief Attempt initialization by constructor (C++ [dcl.init]), which
+/// Attempt initialization by constructor (C++ [dcl.init]), which
/// enumerates the constructors of the initialized entity and performs overload
/// resolution to select the best.
/// \param DestType The destination class type.
@@ -3885,7 +3934,7 @@ static void TryValueInitialization(Sema &S,
InitializationSequence &Sequence,
InitListExpr *InitList = nullptr);
-/// \brief Attempt list initialization of a reference.
+/// Attempt list initialization of a reference.
static void TryReferenceListInitialization(Sema &S,
const InitializedEntity &Entity,
const InitializationKind &Kind,
@@ -3959,7 +4008,7 @@ static void TryReferenceListInitialization(Sema &S,
}
}
-/// \brief Attempt list initialization (C++0x [dcl.init.list])
+/// Attempt list initialization (C++0x [dcl.init.list])
static void TryListInitialization(Sema &S,
const InitializedEntity &Entity,
const InitializationKind &Kind,
@@ -4154,7 +4203,7 @@ static void TryListInitialization(Sema &S,
Sequence.AddListInitializationStep(DestType);
}
-/// \brief Try a reference initialization that involves calling a conversion
+/// Try a reference initialization that involves calling a conversion
/// function.
static OverloadingResult TryRefInitWithConversionFunction(
Sema &S, const InitializedEntity &Entity, const InitializationKind &Kind,
@@ -4183,9 +4232,11 @@ static OverloadingResult TryRefInitWithConversionFunction(
OverloadCandidateSet &CandidateSet = Sequence.getFailedCandidateSet();
CandidateSet.clear(OverloadCandidateSet::CSK_InitByUserDefinedConversion);
- // Determine whether we are allowed to call explicit constructors or
- // explicit conversion operators.
- bool AllowExplicit = Kind.AllowExplicit();
+ // Determine whether we are allowed to call explicit conversion operators.
+ // Note that none of [over.match.copy], [over.match.conv], nor
+ // [over.match.ref] permit an explicit constructor to be chosen when
+ // initializing a reference, not even for direct-initialization.
+ bool AllowExplicitCtors = false;
bool AllowExplicitConvs = Kind.allowExplicitConversionFunctionsInRefBinding();
const RecordType *T1RecordType = nullptr;
@@ -4201,7 +4252,7 @@ static OverloadingResult TryRefInitWithConversionFunction(
continue;
if (!Info.Constructor->isInvalidDecl() &&
- Info.Constructor->isConvertingConstructor(AllowExplicit)) {
+ Info.Constructor->isConvertingConstructor(AllowExplicitCtors)) {
if (Info.ConstructorTmpl)
S.AddTemplateOverloadCandidate(Info.ConstructorTmpl, Info.FoundDecl,
/*ExplicitArgs*/ nullptr,
@@ -4344,7 +4395,7 @@ static void CheckCXX98CompatAccessibleCopy(Sema &S,
const InitializedEntity &Entity,
Expr *CurInitExpr);
-/// \brief Attempt reference initialization (C++0x [dcl.init.ref])
+/// Attempt reference initialization (C++0x [dcl.init.ref])
static void TryReferenceInitialization(Sema &S,
const InitializedEntity &Entity,
const InitializationKind &Kind,
@@ -4371,13 +4422,13 @@ static void TryReferenceInitialization(Sema &S,
}
/// Determine whether an expression is a non-referenceable glvalue (one to
-/// which a reference can never bind). Attemting to bind a reference to
+/// which a reference can never bind). Attempting to bind a reference to
/// such a glvalue will always create a temporary.
static bool isNonReferenceableGLValue(Expr *E) {
return E->refersToBitField() || E->refersToVectorElement();
}
-/// \brief Reference initialization without resolving overloaded functions.
+/// Reference initialization without resolving overloaded functions.
static void TryReferenceInitializationCore(Sema &S,
const InitializedEntity &Entity,
const InitializationKind &Kind,
@@ -4637,7 +4688,7 @@ static void TryReferenceInitializationCore(Sema &S,
Sequence.AddReferenceBindingStep(cv1T1, /*bindingTemporary=*/true);
}
-/// \brief Attempt character array initialization from a string literal
+/// Attempt character array initialization from a string literal
/// (C++ [dcl.init.string], C99 6.7.8).
static void TryStringLiteralInitialization(Sema &S,
const InitializedEntity &Entity,
@@ -4647,7 +4698,7 @@ static void TryStringLiteralInitialization(Sema &S,
Sequence.AddStringInitStep(Entity.getType());
}
-/// \brief Attempt value initialization (C++ [dcl.init]p7).
+/// Attempt value initialization (C++ [dcl.init]p7).
static void TryValueInitialization(Sema &S,
const InitializedEntity &Entity,
const InitializationKind &Kind,
@@ -4725,7 +4776,7 @@ static void TryValueInitialization(Sema &S,
Sequence.AddZeroInitializationStep(Entity.getType());
}
-/// \brief Attempt default initialization (C++ [dcl.init]p6).
+/// Attempt default initialization (C++ [dcl.init]p6).
static void TryDefaultInitialization(Sema &S,
const InitializedEntity &Entity,
const InitializationKind &Kind,
@@ -4764,7 +4815,7 @@ static void TryDefaultInitialization(Sema &S,
}
}
-/// \brief Attempt a user-defined conversion between two types (C++ [dcl.init]),
+/// Attempt a user-defined conversion between two types (C++ [dcl.init]),
/// which enumerates all conversion functions and performs overload resolution
/// to select the best.
static void TryUserDefinedConversion(Sema &S,
@@ -5043,7 +5094,7 @@ static void checkIndirectCopyRestoreSource(Sema &S, Expr *src) {
<< src->getSourceRange();
}
-/// \brief Determine whether we have compatible array types for the
+/// Determine whether we have compatible array types for the
/// purposes of GNU by-copy array initialization.
static bool hasCompatibleArrayTypes(ASTContext &Context, const ArrayType *Dest,
const ArrayType *Source) {
@@ -5337,6 +5388,12 @@ void InitializationSequence::InitializeFrom(Sema &S,
case SIF_IncompatWideStringIntoWideChar:
SetFailed(FK_IncompatWideStringIntoWideChar);
return;
+ case SIF_PlainStringIntoUTF8Char:
+ SetFailed(FK_PlainStringIntoUTF8Char);
+ return;
+ case SIF_UTF8StringIntoPlainChar:
+ SetFailed(FK_UTF8StringIntoPlainChar);
+ return;
case SIF_Other:
break;
}
@@ -5582,6 +5639,7 @@ getAssignmentAction(const InitializedEntity &Entity, bool Diagnose = false) {
return !Diagnose ? Sema::AA_Passing : Sema::AA_Passing_CFAudited;
case InitializedEntity::EK_Result:
+ case InitializedEntity::EK_StmtExprResult: // FIXME: Not quite right.
return Sema::AA_Returning;
case InitializedEntity::EK_Temporary:
@@ -5604,13 +5662,14 @@ getAssignmentAction(const InitializedEntity &Entity, bool Diagnose = false) {
llvm_unreachable("Invalid EntityKind!");
}
-/// \brief Whether we should bind a created object as a temporary when
+/// Whether we should bind a created object as a temporary when
/// initializing the given entity.
static bool shouldBindAsTemporary(const InitializedEntity &Entity) {
switch (Entity.getKind()) {
case InitializedEntity::EK_ArrayElement:
case InitializedEntity::EK_Member:
case InitializedEntity::EK_Result:
+ case InitializedEntity::EK_StmtExprResult:
case InitializedEntity::EK_New:
case InitializedEntity::EK_Variable:
case InitializedEntity::EK_Base:
@@ -5635,11 +5694,12 @@ static bool shouldBindAsTemporary(const InitializedEntity &Entity) {
llvm_unreachable("missed an InitializedEntity kind?");
}
-/// \brief Whether the given entity, when initialized with an object
+/// Whether the given entity, when initialized with an object
/// created for that initialization, requires destruction.
static bool shouldDestroyEntity(const InitializedEntity &Entity) {
switch (Entity.getKind()) {
case InitializedEntity::EK_Result:
+ case InitializedEntity::EK_StmtExprResult:
case InitializedEntity::EK_New:
case InitializedEntity::EK_Base:
case InitializedEntity::EK_Delegating:
@@ -5666,11 +5726,12 @@ static bool shouldDestroyEntity(const InitializedEntity &Entity) {
llvm_unreachable("missed an InitializedEntity kind?");
}
-/// \brief Get the location at which initialization diagnostics should appear.
+/// Get the location at which initialization diagnostics should appear.
static SourceLocation getInitializationLoc(const InitializedEntity &Entity,
Expr *Initializer) {
switch (Entity.getKind()) {
case InitializedEntity::EK_Result:
+ case InitializedEntity::EK_StmtExprResult:
return Entity.getReturnLoc();
case InitializedEntity::EK_Exception:
@@ -5702,7 +5763,7 @@ static SourceLocation getInitializationLoc(const InitializedEntity &Entity,
llvm_unreachable("missed an InitializedEntity kind?");
}
-/// \brief Make a (potentially elidable) temporary copy of the object
+/// Make a (potentially elidable) temporary copy of the object
/// provided by the given initializer by calling the appropriate copy
/// constructor.
///
@@ -5867,7 +5928,7 @@ static ExprResult CopyObject(Sema &S,
return CurInit;
}
-/// \brief Check whether elidable copy construction for binding a reference to
+/// Check whether elidable copy construction for binding a reference to
/// a temporary would have succeeded if we were building in C++98 mode, for
/// -Wc++98-compat.
static void CheckCXX98CompatAccessibleCopy(Sema &S,
@@ -6031,10 +6092,7 @@ PerformConstructorInitialization(Sema &S,
TypeSourceInfo *TSInfo = Entity.getTypeSourceInfo();
if (!TSInfo)
TSInfo = S.Context.getTrivialTypeSourceInfo(Entity.getType(), Loc);
- SourceRange ParenOrBraceRange =
- (Kind.getKind() == InitializationKind::IK_DirectList)
- ? SourceRange(LBraceLoc, RBraceLoc)
- : Kind.getParenRange();
+ SourceRange ParenOrBraceRange = Kind.getParenOrBraceRange();
if (auto *Shadow = dyn_cast<ConstructorUsingShadowDecl>(
Step.Function.FoundDecl.getDecl())) {
@@ -6068,7 +6126,7 @@ PerformConstructorInitialization(Sema &S,
if (IsListInitialization)
ParenOrBraceRange = SourceRange(LBraceLoc, RBraceLoc);
else if (Kind.getKind() == InitializationKind::IK_Direct)
- ParenOrBraceRange = Kind.getParenRange();
+ ParenOrBraceRange = Kind.getParenOrBraceRange();
// If the entity allows NRVO, mark the construction as elidable
// unconditionally.
@@ -6109,90 +6167,96 @@ PerformConstructorInitialization(Sema &S,
return CurInit;
}
-/// Determine whether the specified InitializedEntity definitely has a lifetime
-/// longer than the current full-expression. Conservatively returns false if
-/// it's unclear.
-static bool
-InitializedEntityOutlivesFullExpression(const InitializedEntity &Entity) {
- const InitializedEntity *Top = &Entity;
- while (Top->getParent())
- Top = Top->getParent();
-
- switch (Top->getKind()) {
- case InitializedEntity::EK_Variable:
- case InitializedEntity::EK_Result:
- case InitializedEntity::EK_Exception:
- case InitializedEntity::EK_Member:
- case InitializedEntity::EK_Binding:
- case InitializedEntity::EK_New:
- case InitializedEntity::EK_Base:
- case InitializedEntity::EK_Delegating:
- return true;
-
- case InitializedEntity::EK_ArrayElement:
- case InitializedEntity::EK_VectorElement:
- case InitializedEntity::EK_BlockElement:
- case InitializedEntity::EK_LambdaToBlockConversionBlockElement:
- case InitializedEntity::EK_ComplexElement:
- // Could not determine what the full initialization is. Assume it might not
- // outlive the full-expression.
- return false;
-
- case InitializedEntity::EK_Parameter:
- case InitializedEntity::EK_Parameter_CF_Audited:
- case InitializedEntity::EK_Temporary:
- case InitializedEntity::EK_LambdaCapture:
- case InitializedEntity::EK_CompoundLiteralInit:
- case InitializedEntity::EK_RelatedResult:
- // The entity being initialized might not outlive the full-expression.
- return false;
- }
-
- llvm_unreachable("unknown entity kind");
+namespace {
+enum LifetimeKind {
+ /// The lifetime of a temporary bound to this entity ends at the end of the
+ /// full-expression, and that's (probably) fine.
+ LK_FullExpression,
+
+ /// The lifetime of a temporary bound to this entity is extended to the
+ /// lifeitme of the entity itself.
+ LK_Extended,
+
+ /// The lifetime of a temporary bound to this entity probably ends too soon,
+ /// because the entity is allocated in a new-expression.
+ LK_New,
+
+ /// The lifetime of a temporary bound to this entity ends too soon, because
+ /// the entity is a return object.
+ LK_Return,
+
+ /// The lifetime of a temporary bound to this entity ends too soon, because
+ /// the entity is the result of a statement expression.
+ LK_StmtExprResult,
+
+ /// This is a mem-initializer: if it would extend a temporary (other than via
+ /// a default member initializer), the program is ill-formed.
+ LK_MemInitializer,
+};
+using LifetimeResult =
+ llvm::PointerIntPair<const InitializedEntity *, 3, LifetimeKind>;
}
/// Determine the declaration which an initialized entity ultimately refers to,
/// for the purpose of lifetime-extending a temporary bound to a reference in
/// the initialization of \p Entity.
-static const InitializedEntity *getEntityForTemporaryLifetimeExtension(
+static LifetimeResult getEntityLifetime(
const InitializedEntity *Entity,
- const InitializedEntity *FallbackDecl = nullptr) {
+ const InitializedEntity *InitField = nullptr) {
// C++11 [class.temporary]p5:
switch (Entity->getKind()) {
case InitializedEntity::EK_Variable:
// The temporary [...] persists for the lifetime of the reference
- return Entity;
+ return {Entity, LK_Extended};
case InitializedEntity::EK_Member:
// For subobjects, we look at the complete object.
if (Entity->getParent())
- return getEntityForTemporaryLifetimeExtension(Entity->getParent(),
- Entity);
+ return getEntityLifetime(Entity->getParent(), Entity);
// except:
- // -- A temporary bound to a reference member in a constructor's
- // ctor-initializer persists until the constructor exits.
- return Entity;
+ // C++17 [class.base.init]p8:
+ // A temporary expression bound to a reference member in a
+ // mem-initializer is ill-formed.
+ // C++17 [class.base.init]p11:
+ // A temporary expression bound to a reference member from a
+ // default member initializer is ill-formed.
+ //
+ // The context of p11 and its example suggest that it's only the use of a
+ // default member initializer from a constructor that makes the program
+ // ill-formed, not its mere existence, and that it can even be used by
+ // aggregate initialization.
+ return {Entity, Entity->isDefaultMemberInitializer() ? LK_Extended
+ : LK_MemInitializer};
case InitializedEntity::EK_Binding:
// Per [dcl.decomp]p3, the binding is treated as a variable of reference
// type.
- return Entity;
+ return {Entity, LK_Extended};
case InitializedEntity::EK_Parameter:
case InitializedEntity::EK_Parameter_CF_Audited:
// -- A temporary bound to a reference parameter in a function call
// persists until the completion of the full-expression containing
// the call.
+ return {nullptr, LK_FullExpression};
+
case InitializedEntity::EK_Result:
// -- The lifetime of a temporary bound to the returned value in a
// function return statement is not extended; the temporary is
// destroyed at the end of the full-expression in the return statement.
+ return {nullptr, LK_Return};
+
+ case InitializedEntity::EK_StmtExprResult:
+ // FIXME: Should we lifetime-extend through the result of a statement
+ // expression?
+ return {nullptr, LK_StmtExprResult};
+
case InitializedEntity::EK_New:
// -- A temporary bound to a reference in a new-initializer persists
// until the completion of the full-expression containing the
// new-initializer.
- return nullptr;
+ return {nullptr, LK_New};
case InitializedEntity::EK_Temporary:
case InitializedEntity::EK_CompoundLiteralInit:
@@ -6200,56 +6264,122 @@ static const InitializedEntity *getEntityForTemporaryLifetimeExtension(
// We don't yet know the storage duration of the surrounding temporary.
// Assume it's got full-expression duration for now, it will patch up our
// storage duration if that's not correct.
- return nullptr;
+ return {nullptr, LK_FullExpression};
case InitializedEntity::EK_ArrayElement:
// For subobjects, we look at the complete object.
- return getEntityForTemporaryLifetimeExtension(Entity->getParent(),
- FallbackDecl);
+ return getEntityLifetime(Entity->getParent(), InitField);
case InitializedEntity::EK_Base:
// For subobjects, we look at the complete object.
if (Entity->getParent())
- return getEntityForTemporaryLifetimeExtension(Entity->getParent(),
- Entity);
- LLVM_FALLTHROUGH;
+ return getEntityLifetime(Entity->getParent(), InitField);
+ return {InitField, LK_MemInitializer};
+
case InitializedEntity::EK_Delegating:
// We can reach this case for aggregate initialization in a constructor:
// struct A { int &&r; };
// struct B : A { B() : A{0} {} };
- // In this case, use the innermost field decl as the context.
- return FallbackDecl;
+ // In this case, use the outermost field decl as the context.
+ return {InitField, LK_MemInitializer};
case InitializedEntity::EK_BlockElement:
case InitializedEntity::EK_LambdaToBlockConversionBlockElement:
case InitializedEntity::EK_LambdaCapture:
- case InitializedEntity::EK_Exception:
case InitializedEntity::EK_VectorElement:
case InitializedEntity::EK_ComplexElement:
- return nullptr;
+ return {nullptr, LK_FullExpression};
+
+ case InitializedEntity::EK_Exception:
+ // FIXME: Can we diagnose lifetime problems with exceptions?
+ return {nullptr, LK_FullExpression};
}
llvm_unreachable("unknown entity kind");
}
-static void performLifetimeExtension(Expr *Init,
- const InitializedEntity *ExtendingEntity);
+namespace {
+enum ReferenceKind {
+ /// Lifetime would be extended by a reference binding to a temporary.
+ RK_ReferenceBinding,
+ /// Lifetime would be extended by a std::initializer_list object binding to
+ /// its backing array.
+ RK_StdInitializerList,
+};
+
+/// A temporary or local variable. This will be one of:
+/// * A MaterializeTemporaryExpr.
+/// * A DeclRefExpr whose declaration is a local.
+/// * An AddrLabelExpr.
+/// * A BlockExpr for a block with captures.
+using Local = Expr*;
+
+/// Expressions we stepped over when looking for the local state. Any steps
+/// that would inhibit lifetime extension or take us out of subexpressions of
+/// the initializer are included.
+struct IndirectLocalPathEntry {
+ enum EntryKind {
+ DefaultInit,
+ AddressOf,
+ VarInit,
+ LValToRVal,
+ } Kind;
+ Expr *E;
+ Decl *D = nullptr;
+ IndirectLocalPathEntry() {}
+ IndirectLocalPathEntry(EntryKind K, Expr *E) : Kind(K), E(E) {}
+ IndirectLocalPathEntry(EntryKind K, Expr *E, Decl *D) : Kind(K), E(E), D(D) {}
+};
+
+using IndirectLocalPath = llvm::SmallVectorImpl<IndirectLocalPathEntry>;
+
+struct RevertToOldSizeRAII {
+ IndirectLocalPath &Path;
+ unsigned OldSize = Path.size();
+ RevertToOldSizeRAII(IndirectLocalPath &Path) : Path(Path) {}
+ ~RevertToOldSizeRAII() { Path.resize(OldSize); }
+};
+
+using LocalVisitor = llvm::function_ref<bool(IndirectLocalPath &Path, Local L,
+ ReferenceKind RK)>;
+}
+
+static bool isVarOnPath(IndirectLocalPath &Path, VarDecl *VD) {
+ for (auto E : Path)
+ if (E.Kind == IndirectLocalPathEntry::VarInit && E.D == VD)
+ return true;
+ return false;
+}
+
+static bool pathContainsInit(IndirectLocalPath &Path) {
+ return std::any_of(Path.begin(), Path.end(), [=](IndirectLocalPathEntry E) {
+ return E.Kind == IndirectLocalPathEntry::DefaultInit ||
+ E.Kind == IndirectLocalPathEntry::VarInit;
+ });
+}
+
+static void visitLocalsRetainedByInitializer(IndirectLocalPath &Path,
+ Expr *Init, LocalVisitor Visit,
+ bool RevisitSubinits);
+
+/// Visit the locals that would be reachable through a reference bound to the
+/// glvalue expression \c Init.
+static void visitLocalsRetainedByReferenceBinding(IndirectLocalPath &Path,
+ Expr *Init, ReferenceKind RK,
+ LocalVisitor Visit) {
+ RevertToOldSizeRAII RAII(Path);
-/// Update a glvalue expression that is used as the initializer of a reference
-/// to note that its lifetime is extended.
-/// \return \c true if any temporary had its lifetime extended.
-static bool
-performReferenceExtension(Expr *Init,
- const InitializedEntity *ExtendingEntity) {
// Walk past any constructs which we can lifetime-extend across.
Expr *Old;
do {
Old = Init;
+ if (auto *EWC = dyn_cast<ExprWithCleanups>(Init))
+ Init = EWC->getSubExpr();
+
if (InitListExpr *ILE = dyn_cast<InitListExpr>(Init)) {
- if (ILE->getNumInits() == 1 && ILE->isGLValue()) {
- // This is just redundant braces around an initializer. Step over it.
+ // If this is just redundant braces around an initializer, step over it.
+ if (ILE->isTransparent())
Init = ILE->getInit(0);
- }
}
// Step over any subobject adjustments; we may have a materialized
@@ -6263,43 +6393,134 @@ performReferenceExtension(Expr *Init,
Init = CE->getSubExpr();
// Per the current approach for DR1299, look through array element access
- // when performing lifetime extension.
- if (auto *ASE = dyn_cast<ArraySubscriptExpr>(Init))
+ // on array glvalues when performing lifetime extension.
+ if (auto *ASE = dyn_cast<ArraySubscriptExpr>(Init)) {
Init = ASE->getBase();
+ auto *ICE = dyn_cast<ImplicitCastExpr>(Init);
+ if (ICE && ICE->getCastKind() == CK_ArrayToPointerDecay)
+ Init = ICE->getSubExpr();
+ else
+ // We can't lifetime extend through this but we might still find some
+ // retained temporaries.
+ return visitLocalsRetainedByInitializer(Path, Init, Visit, true);
+ }
+
+ // Step into CXXDefaultInitExprs so we can diagnose cases where a
+ // constructor inherits one as an implicit mem-initializer.
+ if (auto *DIE = dyn_cast<CXXDefaultInitExpr>(Init)) {
+ Path.push_back(
+ {IndirectLocalPathEntry::DefaultInit, DIE, DIE->getField()});
+ Init = DIE->getExpr();
+ }
} while (Init != Old);
- if (MaterializeTemporaryExpr *ME = dyn_cast<MaterializeTemporaryExpr>(Init)) {
- // Update the storage duration of the materialized temporary.
- // FIXME: Rebuild the expression instead of mutating it.
- ME->setExtendingDecl(ExtendingEntity->getDecl(),
- ExtendingEntity->allocateManglingNumber());
- performLifetimeExtension(ME->GetTemporaryExpr(), ExtendingEntity);
- return true;
+ if (auto *MTE = dyn_cast<MaterializeTemporaryExpr>(Init)) {
+ if (Visit(Path, Local(MTE), RK))
+ visitLocalsRetainedByInitializer(Path, MTE->GetTemporaryExpr(), Visit,
+ true);
+ }
+
+ switch (Init->getStmtClass()) {
+ case Stmt::DeclRefExprClass: {
+ // If we find the name of a local non-reference parameter, we could have a
+ // lifetime problem.
+ auto *DRE = cast<DeclRefExpr>(Init);
+ auto *VD = dyn_cast<VarDecl>(DRE->getDecl());
+ if (VD && VD->hasLocalStorage() &&
+ !DRE->refersToEnclosingVariableOrCapture()) {
+ if (!VD->getType()->isReferenceType()) {
+ Visit(Path, Local(DRE), RK);
+ } else if (isa<ParmVarDecl>(DRE->getDecl())) {
+ // The lifetime of a reference parameter is unknown; assume it's OK
+ // for now.
+ break;
+ } else if (VD->getInit() && !isVarOnPath(Path, VD)) {
+ Path.push_back({IndirectLocalPathEntry::VarInit, DRE, VD});
+ visitLocalsRetainedByReferenceBinding(Path, VD->getInit(),
+ RK_ReferenceBinding, Visit);
+ }
+ }
+ break;
}
- return false;
+ case Stmt::UnaryOperatorClass: {
+ // The only unary operator that make sense to handle here
+ // is Deref. All others don't resolve to a "name." This includes
+ // handling all sorts of rvalues passed to a unary operator.
+ const UnaryOperator *U = cast<UnaryOperator>(Init);
+ if (U->getOpcode() == UO_Deref)
+ visitLocalsRetainedByInitializer(Path, U->getSubExpr(), Visit, true);
+ break;
+ }
+
+ case Stmt::OMPArraySectionExprClass: {
+ visitLocalsRetainedByInitializer(
+ Path, cast<OMPArraySectionExpr>(Init)->getBase(), Visit, true);
+ break;
+ }
+
+ case Stmt::ConditionalOperatorClass:
+ case Stmt::BinaryConditionalOperatorClass: {
+ auto *C = cast<AbstractConditionalOperator>(Init);
+ if (!C->getTrueExpr()->getType()->isVoidType())
+ visitLocalsRetainedByReferenceBinding(Path, C->getTrueExpr(), RK, Visit);
+ if (!C->getFalseExpr()->getType()->isVoidType())
+ visitLocalsRetainedByReferenceBinding(Path, C->getFalseExpr(), RK, Visit);
+ break;
+ }
+
+ // FIXME: Visit the left-hand side of an -> or ->*.
+
+ default:
+ break;
+ }
}
-/// Update a prvalue expression that is going to be materialized as a
-/// lifetime-extended temporary.
-static void performLifetimeExtension(Expr *Init,
- const InitializedEntity *ExtendingEntity) {
+/// Visit the locals that would be reachable through an object initialized by
+/// the prvalue expression \c Init.
+static void visitLocalsRetainedByInitializer(IndirectLocalPath &Path,
+ Expr *Init, LocalVisitor Visit,
+ bool RevisitSubinits) {
+ RevertToOldSizeRAII RAII(Path);
+
+ // Step into CXXDefaultInitExprs so we can diagnose cases where a
+ // constructor inherits one as an implicit mem-initializer.
+ if (auto *DIE = dyn_cast<CXXDefaultInitExpr>(Init)) {
+ Path.push_back({IndirectLocalPathEntry::DefaultInit, DIE, DIE->getField()});
+ Init = DIE->getExpr();
+ }
+
+ if (auto *EWC = dyn_cast<ExprWithCleanups>(Init))
+ Init = EWC->getSubExpr();
+
// Dig out the expression which constructs the extended temporary.
Init = const_cast<Expr *>(Init->skipRValueSubobjectAdjustments());
if (CXXBindTemporaryExpr *BTE = dyn_cast<CXXBindTemporaryExpr>(Init))
Init = BTE->getSubExpr();
- if (CXXStdInitializerListExpr *ILE =
- dyn_cast<CXXStdInitializerListExpr>(Init)) {
- performReferenceExtension(ILE->getSubExpr(), ExtendingEntity);
- return;
- }
+ // C++17 [dcl.init.list]p6:
+ // initializing an initializer_list object from the array extends the
+ // lifetime of the array exactly like binding a reference to a temporary.
+ if (auto *ILE = dyn_cast<CXXStdInitializerListExpr>(Init))
+ return visitLocalsRetainedByReferenceBinding(Path, ILE->getSubExpr(),
+ RK_StdInitializerList, Visit);
if (InitListExpr *ILE = dyn_cast<InitListExpr>(Init)) {
+ // We already visited the elements of this initializer list while
+ // performing the initialization. Don't visit them again unless we've
+ // changed the lifetime of the initialized entity.
+ if (!RevisitSubinits)
+ return;
+
+ if (ILE->isTransparent())
+ return visitLocalsRetainedByInitializer(Path, ILE->getInit(0), Visit,
+ RevisitSubinits);
+
if (ILE->getType()->isArrayType()) {
for (unsigned I = 0, N = ILE->getNumInits(); I != N; ++I)
- performLifetimeExtension(ILE->getInit(I), ExtendingEntity);
+ visitLocalsRetainedByInitializer(Path, ILE->getInit(I), Visit,
+ RevisitSubinits);
return;
}
@@ -6311,7 +6532,8 @@ static void performLifetimeExtension(Expr *Init,
// bound to temporaries, those temporaries are also lifetime-extended.
if (RD->isUnion() && ILE->getInitializedFieldInUnion() &&
ILE->getInitializedFieldInUnion()->getType()->isReferenceType())
- performReferenceExtension(ILE->getInit(0), ExtendingEntity);
+ visitLocalsRetainedByReferenceBinding(Path, ILE->getInit(0),
+ RK_ReferenceBinding, Visit);
else {
unsigned Index = 0;
for (const auto *I : RD->fields()) {
@@ -6321,51 +6543,365 @@ static void performLifetimeExtension(Expr *Init,
continue;
Expr *SubInit = ILE->getInit(Index);
if (I->getType()->isReferenceType())
- performReferenceExtension(SubInit, ExtendingEntity);
- else if (isa<InitListExpr>(SubInit) ||
- isa<CXXStdInitializerListExpr>(SubInit))
- // This may be either aggregate-initialization of a member or
- // initialization of a std::initializer_list object. Either way,
+ visitLocalsRetainedByReferenceBinding(Path, SubInit,
+ RK_ReferenceBinding, Visit);
+ else
+ // This might be either aggregate-initialization of a member or
+ // initialization of a std::initializer_list object. Regardless,
// we should recursively lifetime-extend that initializer.
- performLifetimeExtension(SubInit, ExtendingEntity);
+ visitLocalsRetainedByInitializer(Path, SubInit, Visit,
+ RevisitSubinits);
++Index;
}
}
}
+ return;
}
-}
-static void warnOnLifetimeExtension(Sema &S, const InitializedEntity &Entity,
- const Expr *Init, bool IsInitializerList,
- const ValueDecl *ExtendingDecl) {
- // Warn if a field lifetime-extends a temporary.
- if (isa<FieldDecl>(ExtendingDecl)) {
- if (IsInitializerList) {
- S.Diag(Init->getExprLoc(), diag::warn_dangling_std_initializer_list)
- << /*at end of constructor*/true;
+ // Step over value-preserving rvalue casts.
+ while (auto *CE = dyn_cast<CastExpr>(Init)) {
+ switch (CE->getCastKind()) {
+ case CK_LValueToRValue:
+ // If we can match the lvalue to a const object, we can look at its
+ // initializer.
+ Path.push_back({IndirectLocalPathEntry::LValToRVal, CE});
+ return visitLocalsRetainedByReferenceBinding(
+ Path, Init, RK_ReferenceBinding,
+ [&](IndirectLocalPath &Path, Local L, ReferenceKind RK) -> bool {
+ if (auto *DRE = dyn_cast<DeclRefExpr>(L)) {
+ auto *VD = dyn_cast<VarDecl>(DRE->getDecl());
+ if (VD && VD->getType().isConstQualified() && VD->getInit()) {
+ Path.push_back({IndirectLocalPathEntry::VarInit, DRE, VD});
+ visitLocalsRetainedByInitializer(Path, VD->getInit(), Visit, true);
+ }
+ } else if (auto *MTE = dyn_cast<MaterializeTemporaryExpr>(L)) {
+ if (MTE->getType().isConstQualified())
+ visitLocalsRetainedByInitializer(Path, MTE->GetTemporaryExpr(),
+ Visit, true);
+ }
+ return false;
+ });
+
+ // We assume that objects can be retained by pointers cast to integers,
+ // but not if the integer is cast to floating-point type or to _Complex.
+ // We assume that casts to 'bool' do not preserve enough information to
+ // retain a local object.
+ case CK_NoOp:
+ case CK_BitCast:
+ case CK_BaseToDerived:
+ case CK_DerivedToBase:
+ case CK_UncheckedDerivedToBase:
+ case CK_Dynamic:
+ case CK_ToUnion:
+ case CK_IntegralToPointer:
+ case CK_PointerToIntegral:
+ case CK_VectorSplat:
+ case CK_IntegralCast:
+ case CK_CPointerToObjCPointerCast:
+ case CK_BlockPointerToObjCPointerCast:
+ case CK_AnyPointerToBlockPointerCast:
+ case CK_AddressSpaceConversion:
+ break;
+
+ case CK_ArrayToPointerDecay:
+ // Model array-to-pointer decay as taking the address of the array
+ // lvalue.
+ Path.push_back({IndirectLocalPathEntry::AddressOf, CE});
+ return visitLocalsRetainedByReferenceBinding(Path, CE->getSubExpr(),
+ RK_ReferenceBinding, Visit);
+
+ default:
return;
}
- bool IsSubobjectMember = false;
- for (const InitializedEntity *Ent = Entity.getParent(); Ent;
- Ent = Ent->getParent()) {
- if (Ent->getKind() != InitializedEntity::EK_Base) {
- IsSubobjectMember = true;
+ Init = CE->getSubExpr();
+ }
+
+ Init = Init->IgnoreParens();
+ switch (Init->getStmtClass()) {
+ case Stmt::UnaryOperatorClass: {
+ auto *UO = cast<UnaryOperator>(Init);
+ // If the initializer is the address of a local, we could have a lifetime
+ // problem.
+ if (UO->getOpcode() == UO_AddrOf) {
+ // If this is &rvalue, then it's ill-formed and we have already diagnosed
+ // it. Don't produce a redundant warning about the lifetime of the
+ // temporary.
+ if (isa<MaterializeTemporaryExpr>(UO->getSubExpr()))
+ return;
+
+ Path.push_back({IndirectLocalPathEntry::AddressOf, UO});
+ visitLocalsRetainedByReferenceBinding(Path, UO->getSubExpr(),
+ RK_ReferenceBinding, Visit);
+ }
+ break;
+ }
+
+ case Stmt::BinaryOperatorClass: {
+ // Handle pointer arithmetic.
+ auto *BO = cast<BinaryOperator>(Init);
+ BinaryOperatorKind BOK = BO->getOpcode();
+ if (!BO->getType()->isPointerType() || (BOK != BO_Add && BOK != BO_Sub))
+ break;
+
+ if (BO->getLHS()->getType()->isPointerType())
+ visitLocalsRetainedByInitializer(Path, BO->getLHS(), Visit, true);
+ else if (BO->getRHS()->getType()->isPointerType())
+ visitLocalsRetainedByInitializer(Path, BO->getRHS(), Visit, true);
+ break;
+ }
+
+ case Stmt::ConditionalOperatorClass:
+ case Stmt::BinaryConditionalOperatorClass: {
+ auto *C = cast<AbstractConditionalOperator>(Init);
+ // In C++, we can have a throw-expression operand, which has 'void' type
+ // and isn't interesting from a lifetime perspective.
+ if (!C->getTrueExpr()->getType()->isVoidType())
+ visitLocalsRetainedByInitializer(Path, C->getTrueExpr(), Visit, true);
+ if (!C->getFalseExpr()->getType()->isVoidType())
+ visitLocalsRetainedByInitializer(Path, C->getFalseExpr(), Visit, true);
+ break;
+ }
+
+ case Stmt::BlockExprClass:
+ if (cast<BlockExpr>(Init)->getBlockDecl()->hasCaptures()) {
+ // This is a local block, whose lifetime is that of the function.
+ Visit(Path, Local(cast<BlockExpr>(Init)), RK_ReferenceBinding);
+ }
+ break;
+
+ case Stmt::AddrLabelExprClass:
+ // We want to warn if the address of a label would escape the function.
+ Visit(Path, Local(cast<AddrLabelExpr>(Init)), RK_ReferenceBinding);
+ break;
+
+ default:
+ break;
+ }
+}
+
+/// Determine whether this is an indirect path to a temporary that we are
+/// supposed to lifetime-extend along (but don't).
+static bool shouldLifetimeExtendThroughPath(const IndirectLocalPath &Path) {
+ for (auto Elem : Path) {
+ if (Elem.Kind != IndirectLocalPathEntry::DefaultInit)
+ return false;
+ }
+ return true;
+}
+
+/// Find the range for the first interesting entry in the path at or after I.
+static SourceRange nextPathEntryRange(const IndirectLocalPath &Path, unsigned I,
+ Expr *E) {
+ for (unsigned N = Path.size(); I != N; ++I) {
+ switch (Path[I].Kind) {
+ case IndirectLocalPathEntry::AddressOf:
+ case IndirectLocalPathEntry::LValToRVal:
+ // These exist primarily to mark the path as not permitting or
+ // supporting lifetime extension.
+ break;
+
+ case IndirectLocalPathEntry::DefaultInit:
+ case IndirectLocalPathEntry::VarInit:
+ return Path[I].E->getSourceRange();
+ }
+ }
+ return E->getSourceRange();
+}
+
+void Sema::checkInitializerLifetime(const InitializedEntity &Entity,
+ Expr *Init) {
+ LifetimeResult LR = getEntityLifetime(&Entity);
+ LifetimeKind LK = LR.getInt();
+ const InitializedEntity *ExtendingEntity = LR.getPointer();
+
+ // If this entity doesn't have an interesting lifetime, don't bother looking
+ // for temporaries within its initializer.
+ if (LK == LK_FullExpression)
+ return;
+
+ auto TemporaryVisitor = [&](IndirectLocalPath &Path, Local L,
+ ReferenceKind RK) -> bool {
+ SourceRange DiagRange = nextPathEntryRange(Path, 0, L);
+ SourceLocation DiagLoc = DiagRange.getBegin();
+
+ switch (LK) {
+ case LK_FullExpression:
+ llvm_unreachable("already handled this");
+
+ case LK_Extended: {
+ auto *MTE = dyn_cast<MaterializeTemporaryExpr>(L);
+ if (!MTE) {
+ // The initialized entity has lifetime beyond the full-expression,
+ // and the local entity does too, so don't warn.
+ //
+ // FIXME: We should consider warning if a static / thread storage
+ // duration variable retains an automatic storage duration local.
+ return false;
+ }
+
+ // Lifetime-extend the temporary.
+ if (Path.empty()) {
+ // Update the storage duration of the materialized temporary.
+ // FIXME: Rebuild the expression instead of mutating it.
+ MTE->setExtendingDecl(ExtendingEntity->getDecl(),
+ ExtendingEntity->allocateManglingNumber());
+ // Also visit the temporaries lifetime-extended by this initializer.
+ return true;
+ }
+
+ if (shouldLifetimeExtendThroughPath(Path)) {
+ // We're supposed to lifetime-extend the temporary along this path (per
+ // the resolution of DR1815), but we don't support that yet.
+ //
+ // FIXME: Properly handle this situation. Perhaps the easiest approach
+ // would be to clone the initializer expression on each use that would
+ // lifetime extend its temporaries.
+ Diag(DiagLoc, diag::warn_unsupported_lifetime_extension)
+ << RK << DiagRange;
+ } else {
+ // If the path goes through the initialization of a variable or field,
+ // it can't possibly reach a temporary created in this full-expression.
+ // We will have already diagnosed any problems with the initializer.
+ if (pathContainsInit(Path))
+ return false;
+
+ Diag(DiagLoc, diag::warn_dangling_variable)
+ << RK << !Entity.getParent() << ExtendingEntity->getDecl()
+ << Init->isGLValue() << DiagRange;
+ }
+ break;
+ }
+
+ case LK_MemInitializer: {
+ if (isa<MaterializeTemporaryExpr>(L)) {
+ // Under C++ DR1696, if a mem-initializer (or a default member
+ // initializer used by the absence of one) would lifetime-extend a
+ // temporary, the program is ill-formed.
+ if (auto *ExtendingDecl =
+ ExtendingEntity ? ExtendingEntity->getDecl() : nullptr) {
+ bool IsSubobjectMember = ExtendingEntity != &Entity;
+ Diag(DiagLoc, shouldLifetimeExtendThroughPath(Path)
+ ? diag::err_dangling_member
+ : diag::warn_dangling_member)
+ << ExtendingDecl << IsSubobjectMember << RK << DiagRange;
+ // Don't bother adding a note pointing to the field if we're inside
+ // its default member initializer; our primary diagnostic points to
+ // the same place in that case.
+ if (Path.empty() ||
+ Path.back().Kind != IndirectLocalPathEntry::DefaultInit) {
+ Diag(ExtendingDecl->getLocation(),
+ diag::note_lifetime_extending_member_declared_here)
+ << RK << IsSubobjectMember;
+ }
+ } else {
+ // We have a mem-initializer but no particular field within it; this
+ // is either a base class or a delegating initializer directly
+ // initializing the base-class from something that doesn't live long
+ // enough.
+ //
+ // FIXME: Warn on this.
+ return false;
+ }
+ } else {
+ // Paths via a default initializer can only occur during error recovery
+ // (there's no other way that a default initializer can refer to a
+ // local). Don't produce a bogus warning on those cases.
+ if (pathContainsInit(Path))
+ return false;
+
+ auto *DRE = dyn_cast<DeclRefExpr>(L);
+ auto *VD = DRE ? dyn_cast<VarDecl>(DRE->getDecl()) : nullptr;
+ if (!VD) {
+ // A member was initialized to a local block.
+ // FIXME: Warn on this.
+ return false;
+ }
+
+ if (auto *Member =
+ ExtendingEntity ? ExtendingEntity->getDecl() : nullptr) {
+ bool IsPointer = Member->getType()->isAnyPointerType();
+ Diag(DiagLoc, IsPointer ? diag::warn_init_ptr_member_to_parameter_addr
+ : diag::warn_bind_ref_member_to_parameter)
+ << Member << VD << isa<ParmVarDecl>(VD) << DiagRange;
+ Diag(Member->getLocation(),
+ diag::note_ref_or_ptr_member_declared_here)
+ << (unsigned)IsPointer;
+ }
+ }
+ break;
+ }
+
+ case LK_New:
+ if (isa<MaterializeTemporaryExpr>(L)) {
+ Diag(DiagLoc, RK == RK_ReferenceBinding
+ ? diag::warn_new_dangling_reference
+ : diag::warn_new_dangling_initializer_list)
+ << !Entity.getParent() << DiagRange;
+ } else {
+ // We can't determine if the allocation outlives the local declaration.
+ return false;
+ }
+ break;
+
+ case LK_Return:
+ case LK_StmtExprResult:
+ if (auto *DRE = dyn_cast<DeclRefExpr>(L)) {
+ // We can't determine if the local variable outlives the statement
+ // expression.
+ if (LK == LK_StmtExprResult)
+ return false;
+ Diag(DiagLoc, diag::warn_ret_stack_addr_ref)
+ << Entity.getType()->isReferenceType() << DRE->getDecl()
+ << isa<ParmVarDecl>(DRE->getDecl()) << DiagRange;
+ } else if (isa<BlockExpr>(L)) {
+ Diag(DiagLoc, diag::err_ret_local_block) << DiagRange;
+ } else if (isa<AddrLabelExpr>(L)) {
+ Diag(DiagLoc, diag::warn_ret_addr_label) << DiagRange;
+ } else {
+ Diag(DiagLoc, diag::warn_ret_local_temp_addr_ref)
+ << Entity.getType()->isReferenceType() << DiagRange;
+ }
+ break;
+ }
+
+ for (unsigned I = 0; I != Path.size(); ++I) {
+ auto Elem = Path[I];
+
+ switch (Elem.Kind) {
+ case IndirectLocalPathEntry::AddressOf:
+ case IndirectLocalPathEntry::LValToRVal:
+ // These exist primarily to mark the path as not permitting or
+ // supporting lifetime extension.
+ break;
+
+ case IndirectLocalPathEntry::DefaultInit: {
+ auto *FD = cast<FieldDecl>(Elem.D);
+ Diag(FD->getLocation(), diag::note_init_with_default_member_initalizer)
+ << FD << nextPathEntryRange(Path, I + 1, L);
+ break;
+ }
+
+ case IndirectLocalPathEntry::VarInit:
+ const VarDecl *VD = cast<VarDecl>(Elem.D);
+ Diag(VD->getLocation(), diag::note_local_var_initializer)
+ << VD->getType()->isReferenceType() << VD->getDeclName()
+ << nextPathEntryRange(Path, I + 1, L);
break;
}
}
- S.Diag(Init->getExprLoc(),
- diag::warn_bind_ref_member_to_temporary)
- << ExtendingDecl << Init->getSourceRange()
- << IsSubobjectMember << IsInitializerList;
- if (IsSubobjectMember)
- S.Diag(ExtendingDecl->getLocation(),
- diag::note_ref_subobject_of_member_declared_here);
- else
- S.Diag(ExtendingDecl->getLocation(),
- diag::note_ref_or_ptr_member_declared_here)
- << /*is pointer*/false;
- }
+
+ // We didn't lifetime-extend, so don't go any further; we don't need more
+ // warnings or errors on inner temporaries within this one's initializer.
+ return false;
+ };
+
+ llvm::SmallVector<IndirectLocalPathEntry, 8> Path;
+ if (Init->isGLValue())
+ visitLocalsRetainedByReferenceBinding(Path, Init, RK_ReferenceBinding,
+ TemporaryVisitor);
+ else
+ visitLocalsRetainedByInitializer(Path, Init, TemporaryVisitor, false);
}
static void DiagnoseNarrowingInInitList(Sema &S,
@@ -6402,13 +6938,7 @@ static void CheckMoveOnConstruction(Sema &S, const Expr *InitExpr,
// Find the std::move call and get the argument.
const CallExpr *CE = dyn_cast<CallExpr>(InitExpr->IgnoreParens());
- if (!CE || CE->getNumArgs() != 1)
- return;
-
- const FunctionDecl *MoveFunction = CE->getDirectCallee();
- if (!MoveFunction || !MoveFunction->isInStdNamespace() ||
- !MoveFunction->getIdentifier() ||
- !MoveFunction->getIdentifier()->isStr("move"))
+ if (!CE || !CE->isCallToStdMove())
return;
const Expr *Arg = CE->getArg(0)->IgnoreImplicit();
@@ -6465,7 +6995,7 @@ static void CheckMoveOnConstruction(Sema &S, const Expr *InitExpr,
// macro only if it is at the beginning of the macro.
while (ArgLoc.isMacroID() &&
S.getSourceManager().isAtStartOfImmediateMacroExpansion(ArgLoc)) {
- ArgLoc = S.getSourceManager().getImmediateExpansionRange(ArgLoc).first;
+ ArgLoc = S.getSourceManager().getImmediateExpansionRange(ArgLoc).getBegin();
}
if (LParen.isMacroID())
@@ -6594,7 +7124,7 @@ InitializationSequence::Perform(Sema &S,
if (Kind.getKind() == InitializationKind::IK_Direct &&
!Kind.isExplicitCast()) {
// Rebuild the ParenListExpr.
- SourceRange ParenRange = Kind.getParenRange();
+ SourceRange ParenRange = Kind.getParenOrBraceRange();
return S.ActOnParenListExpr(ParenRange.getBegin(), ParenRange.getEnd(),
Args);
}
@@ -6633,20 +7163,6 @@ InitializationSequence::Perform(Sema &S,
return ExprError();
}
- // Diagnose cases where we initialize a pointer to an array temporary, and the
- // pointer obviously outlives the temporary.
- if (Args.size() == 1 && Args[0]->getType()->isArrayType() &&
- Entity.getType()->isPointerType() &&
- InitializedEntityOutlivesFullExpression(Entity)) {
- const Expr *Init = Args[0]->skipRValueSubobjectAdjustments();
- if (auto *MTE = dyn_cast<MaterializeTemporaryExpr>(Init))
- Init = MTE->GetTemporaryExpr();
- Expr::LValueClassification Kind = Init->ClassifyLValue(S.Context);
- if (Kind == Expr::LV_ClassTemporary || Kind == Expr::LV_ArrayTemporary)
- S.Diag(Init->getLocStart(), diag::warn_temporary_array_to_pointer_decay)
- << Init->getSourceRange();
- }
-
QualType DestType = Entity.getType().getNonReferenceType();
// FIXME: Ugly hack around the fact that Entity.getType() is not
// the same as Entity.getDecl()->getType() in cases involving type merging,
@@ -6797,16 +7313,6 @@ InitializationSequence::Perform(Sema &S,
}
}
- // Even though we didn't materialize a temporary, the binding may still
- // extend the lifetime of a temporary. This happens if we bind a reference
- // to the result of a cast to reference type.
- if (const InitializedEntity *ExtendingEntity =
- getEntityForTemporaryLifetimeExtension(&Entity))
- if (performReferenceExtension(CurInit.get(), ExtendingEntity))
- warnOnLifetimeExtension(S, Entity, CurInit.get(),
- /*IsInitializerList=*/false,
- ExtendingEntity->getDecl());
-
CheckForNullPointerDereference(S, CurInit.get());
break;
@@ -6821,23 +7327,13 @@ InitializationSequence::Perform(Sema &S,
// Materialize the temporary into memory.
MaterializeTemporaryExpr *MTE = S.CreateMaterializeTemporaryExpr(
Step->Type, CurInit.get(), Entity.getType()->isLValueReferenceType());
-
- // Maybe lifetime-extend the temporary's subobjects to match the
- // entity's lifetime.
- if (const InitializedEntity *ExtendingEntity =
- getEntityForTemporaryLifetimeExtension(&Entity))
- if (performReferenceExtension(MTE, ExtendingEntity))
- warnOnLifetimeExtension(S, Entity, CurInit.get(),
- /*IsInitializerList=*/false,
- ExtendingEntity->getDecl());
+ CurInit = MTE;
// If we're extending this temporary to automatic storage duration -- we
// need to register its cleanup during the full-expression's cleanups.
if (MTE->getStorageDuration() == SD_Automatic &&
MTE->getType().isDestructedType())
S.Cleanup.setExprNeedsCleanups(true);
-
- CurInit = MTE;
break;
}
@@ -6910,16 +7406,6 @@ InitializationSequence::Perform(Sema &S,
if (S.DiagnoseUseOfDecl(FoundFn, Kind.getLocation()))
return ExprError();
- // FIXME: Should we move this initialization into a separate
- // derived-to-base conversion? I believe the answer is "no", because
- // we don't want to turn off access control here for c-style casts.
- CurInit = S.PerformObjectArgumentInitialization(CurInit.get(),
- /*Qualifier=*/nullptr,
- FoundFn, Conversion);
- if (CurInit.isInvalid())
- return ExprError();
-
- // Build the actual call to the conversion function.
CurInit = S.BuildCXXMemberCallExpr(CurInit.get(), FoundFn, Conversion,
HadMultipleCandidates);
if (CurInit.isInvalid())
@@ -7114,14 +7600,17 @@ InitializationSequence::Perform(Sema &S,
bool IsStdInitListInit =
Step->Kind == SK_StdInitializerListConstructorCall;
Expr *Source = CurInit.get();
+ SourceRange Range = Kind.hasParenOrBraceRange()
+ ? Kind.getParenOrBraceRange()
+ : SourceRange();
CurInit = PerformConstructorInitialization(
S, UseTemporary ? TempEntity : Entity, Kind,
Source ? MultiExprArg(Source) : Args, *Step,
ConstructorInitRequiresZeroInit,
/*IsListInitialization*/ IsStdInitListInit,
/*IsStdInitListInitialization*/ IsStdInitListInit,
- /*LBraceLoc*/ SourceLocation(),
- /*RBraceLoc*/ SourceLocation());
+ /*LBraceLoc*/ Range.getBegin(),
+ /*RBraceLoc*/ Range.getEnd());
break;
}
@@ -7277,15 +7766,6 @@ InitializationSequence::Perform(Sema &S,
CurInit.get()->getType(), CurInit.get(),
/*BoundToLvalueReference=*/false);
- // Maybe lifetime-extend the array temporary's subobjects to match the
- // entity's lifetime.
- if (const InitializedEntity *ExtendingEntity =
- getEntityForTemporaryLifetimeExtension(&Entity))
- if (performReferenceExtension(MTE, ExtendingEntity))
- warnOnLifetimeExtension(S, Entity, CurInit.get(),
- /*IsInitializerList=*/true,
- ExtendingEntity->getDecl());
-
// Wrap it in a construction of a std::initializer_list<T>.
CurInit = new (S.Context) CXXStdInitializerListExpr(Step->Type, MTE);
@@ -7407,6 +7887,11 @@ InitializationSequence::Perform(Sema &S,
}
}
+ // Check whether the initializer has a shorter lifetime than the initialized
+ // entity, and if not, either lifetime-extend or warn as appropriate.
+ if (auto *Init = CurInit.get())
+ S.checkInitializerLifetime(Entity, Init);
+
// Diagnose non-fatal problems with the completed initialization.
if (Entity.getKind() == InitializedEntity::EK_Member &&
cast<FieldDecl>(Entity.getDecl())->isBitField())
@@ -7524,6 +8009,19 @@ bool InitializationSequence::Diagnose(Sema &S,
if (!Failed())
return false;
+ // When we want to diagnose only one element of a braced-init-list,
+ // we need to factor it out.
+ Expr *OnlyArg;
+ if (Args.size() == 1) {
+ auto *List = dyn_cast<InitListExpr>(Args[0]);
+ if (List && List->getNumInits() == 1)
+ OnlyArg = List->getInit(0);
+ else
+ OnlyArg = Args[0];
+ }
+ else
+ OnlyArg = nullptr;
+
QualType DestType = Entity.getType();
switch (Failure) {
case FK_TooManyInitsForReference:
@@ -7566,6 +8064,17 @@ bool InitializationSequence::Diagnose(Sema &S,
S.Diag(Kind.getLocation(),
diag::err_array_init_incompat_wide_string_into_wchar);
break;
+ case FK_PlainStringIntoUTF8Char:
+ S.Diag(Kind.getLocation(),
+ diag::err_array_init_plain_string_into_char8_t);
+ S.Diag(Args.front()->getLocStart(),
+ diag::note_array_init_plain_string_into_char8_t)
+ << FixItHint::CreateInsertion(Args.front()->getLocStart(), "u8");
+ break;
+ case FK_UTF8StringIntoPlainChar:
+ S.Diag(Kind.getLocation(),
+ diag::err_array_init_utf8_string_into_char);
+ break;
case FK_ArrayTypeMismatch:
case FK_NonConstantArrayInit:
S.Diag(Kind.getLocation(),
@@ -7573,7 +8082,7 @@ bool InitializationSequence::Diagnose(Sema &S,
? diag::err_array_init_different_type
: diag::err_array_init_non_constant_array))
<< DestType.getNonReferenceType()
- << Args[0]->getType()
+ << OnlyArg->getType()
<< Args[0]->getSourceRange();
break;
@@ -7584,7 +8093,7 @@ bool InitializationSequence::Diagnose(Sema &S,
case FK_AddressOfOverloadFailed: {
DeclAccessPair Found;
- S.ResolveAddressOfOverloadedFunction(Args[0],
+ S.ResolveAddressOfOverloadedFunction(OnlyArg,
DestType.getNonReferenceType(),
true,
Found);
@@ -7592,9 +8101,9 @@ bool InitializationSequence::Diagnose(Sema &S,
}
case FK_AddressOfUnaddressableFunction: {
- auto *FD = cast<FunctionDecl>(cast<DeclRefExpr>(Args[0])->getDecl());
+ auto *FD = cast<FunctionDecl>(cast<DeclRefExpr>(OnlyArg)->getDecl());
S.checkAddressOfFunctionIsAvailable(FD, /*Complain=*/true,
- Args[0]->getLocStart());
+ OnlyArg->getLocStart());
break;
}
@@ -7604,11 +8113,11 @@ bool InitializationSequence::Diagnose(Sema &S,
case OR_Ambiguous:
if (Failure == FK_UserConversionOverloadFailed)
S.Diag(Kind.getLocation(), diag::err_typecheck_ambiguous_condition)
- << Args[0]->getType() << DestType
+ << OnlyArg->getType() << DestType
<< Args[0]->getSourceRange();
else
S.Diag(Kind.getLocation(), diag::err_ref_init_ambiguous)
- << DestType << Args[0]->getType()
+ << DestType << OnlyArg->getType()
<< Args[0]->getSourceRange();
FailedCandidateSet.NoteCandidates(S, OCD_ViableCandidates, Args);
@@ -7618,10 +8127,10 @@ bool InitializationSequence::Diagnose(Sema &S,
if (!S.RequireCompleteType(Kind.getLocation(),
DestType.getNonReferenceType(),
diag::err_typecheck_nonviable_condition_incomplete,
- Args[0]->getType(), Args[0]->getSourceRange()))
+ OnlyArg->getType(), Args[0]->getSourceRange()))
S.Diag(Kind.getLocation(), diag::err_typecheck_nonviable_condition)
<< (Entity.getKind() == InitializedEntity::EK_Result)
- << Args[0]->getType() << Args[0]->getSourceRange()
+ << OnlyArg->getType() << Args[0]->getSourceRange()
<< DestType.getNonReferenceType();
FailedCandidateSet.NoteCandidates(S, OCD_AllCandidates, Args);
@@ -7629,7 +8138,7 @@ bool InitializationSequence::Diagnose(Sema &S,
case OR_Deleted: {
S.Diag(Kind.getLocation(), diag::err_typecheck_deleted_function)
- << Args[0]->getType() << DestType.getNonReferenceType()
+ << OnlyArg->getType() << DestType.getNonReferenceType()
<< Args[0]->getSourceRange();
OverloadCandidateSet::iterator Best;
OverloadingResult Ovl
@@ -7665,7 +8174,7 @@ bool InitializationSequence::Diagnose(Sema &S,
: diag::err_lvalue_reference_bind_to_unrelated)
<< DestType.getNonReferenceType().isVolatileQualified()
<< DestType.getNonReferenceType()
- << Args[0]->getType()
+ << OnlyArg->getType()
<< Args[0]->getSourceRange();
break;
@@ -7690,12 +8199,12 @@ bool InitializationSequence::Diagnose(Sema &S,
case FK_RValueReferenceBindingToLValue:
S.Diag(Kind.getLocation(), diag::err_lvalue_to_rvalue_ref)
- << DestType.getNonReferenceType() << Args[0]->getType()
+ << DestType.getNonReferenceType() << OnlyArg->getType()
<< Args[0]->getSourceRange();
break;
case FK_ReferenceInitDropsQualifiers: {
- QualType SourceType = Args[0]->getType();
+ QualType SourceType = OnlyArg->getType();
QualType NonRefType = DestType.getNonReferenceType();
Qualifiers DroppedQualifiers =
SourceType.getQualifiers() - NonRefType.getQualifiers();
@@ -7711,18 +8220,18 @@ bool InitializationSequence::Diagnose(Sema &S,
case FK_ReferenceInitFailed:
S.Diag(Kind.getLocation(), diag::err_reference_bind_failed)
<< DestType.getNonReferenceType()
- << Args[0]->isLValue()
- << Args[0]->getType()
+ << OnlyArg->isLValue()
+ << OnlyArg->getType()
<< Args[0]->getSourceRange();
emitBadConversionNotes(S, Entity, Args[0]);
break;
case FK_ConversionFailed: {
- QualType FromType = Args[0]->getType();
+ QualType FromType = OnlyArg->getType();
PartialDiagnostic PDiag = S.PDiag(diag::err_init_conversion_failed)
<< (int)Entity.getKind()
<< DestType
- << Args[0]->isLValue()
+ << OnlyArg->isLValue()
<< FromType
<< Args[0]->getSourceRange();
S.HandleFunctionTypeMismatch(PDiag, FromType, DestType);
@@ -7975,6 +8484,14 @@ void InitializationSequence::dump(raw_ostream &OS) const {
OS << "incompatible wide string into wide char array";
break;
+ case FK_PlainStringIntoUTF8Char:
+ OS << "plain string literal into char8_t array";
+ break;
+
+ case FK_UTF8StringIntoPlainChar:
+ OS << "u8 string literal into char array";
+ break;
+
case FK_ArrayTypeMismatch:
OS << "array type mismatch";
break;
@@ -8265,6 +8782,11 @@ void InitializationSequence::dump() const {
dump(llvm::errs());
}
+static bool NarrowingErrs(const LangOptions &L) {
+ return L.CPlusPlus11 &&
+ (!L.MicrosoftExt || L.isCompatibleWithMSVC(LangOptions::MSVC2015));
+}
+
static void DiagnoseNarrowingInInitList(Sema &S,
const ImplicitConversionSequence &ICS,
QualType PreNarrowingType,
@@ -8298,35 +8820,34 @@ static void DiagnoseNarrowingInInitList(Sema &S,
// This was a floating-to-integer conversion, which is always considered a
// narrowing conversion even if the value is a constant and can be
// represented exactly as an integer.
- S.Diag(PostInit->getLocStart(),
- (S.getLangOpts().MicrosoftExt || !S.getLangOpts().CPlusPlus11)
- ? diag::warn_init_list_type_narrowing
- : diag::ext_init_list_type_narrowing)
- << PostInit->getSourceRange()
- << PreNarrowingType.getLocalUnqualifiedType()
- << EntityType.getLocalUnqualifiedType();
+ S.Diag(PostInit->getLocStart(), NarrowingErrs(S.getLangOpts())
+ ? diag::ext_init_list_type_narrowing
+ : diag::warn_init_list_type_narrowing)
+ << PostInit->getSourceRange()
+ << PreNarrowingType.getLocalUnqualifiedType()
+ << EntityType.getLocalUnqualifiedType();
break;
case NK_Constant_Narrowing:
// A constant value was narrowed.
S.Diag(PostInit->getLocStart(),
- (S.getLangOpts().MicrosoftExt || !S.getLangOpts().CPlusPlus11)
- ? diag::warn_init_list_constant_narrowing
- : diag::ext_init_list_constant_narrowing)
- << PostInit->getSourceRange()
- << ConstantValue.getAsString(S.getASTContext(), ConstantType)
- << EntityType.getLocalUnqualifiedType();
+ NarrowingErrs(S.getLangOpts())
+ ? diag::ext_init_list_constant_narrowing
+ : diag::warn_init_list_constant_narrowing)
+ << PostInit->getSourceRange()
+ << ConstantValue.getAsString(S.getASTContext(), ConstantType)
+ << EntityType.getLocalUnqualifiedType();
break;
case NK_Variable_Narrowing:
// A variable's value may have been narrowed.
S.Diag(PostInit->getLocStart(),
- (S.getLangOpts().MicrosoftExt || !S.getLangOpts().CPlusPlus11)
- ? diag::warn_init_list_variable_narrowing
- : diag::ext_init_list_variable_narrowing)
- << PostInit->getSourceRange()
- << PreNarrowingType.getLocalUnqualifiedType()
- << EntityType.getLocalUnqualifiedType();
+ NarrowingErrs(S.getLangOpts())
+ ? diag::ext_init_list_variable_narrowing
+ : diag::warn_init_list_variable_narrowing)
+ << PostInit->getSourceRange()
+ << PreNarrowingType.getLocalUnqualifiedType()
+ << EntityType.getLocalUnqualifiedType();
break;
}
@@ -8599,6 +9120,7 @@ QualType Sema::DeduceTemplateSpecializationFromInitializer(
Expr *E = ListInit->getInit(0);
auto *RD = E->getType()->getAsCXXRecordDecl();
if (!isa<InitListExpr>(E) && RD &&
+ isCompleteType(Kind.getLocation(), E->getType()) &&
isOrIsDerivedFromSpecializationOf(RD, Template))
TryListConstructors = false;
}
diff --git a/lib/Sema/SemaLambda.cpp b/lib/Sema/SemaLambda.cpp
index cbfc330ca60b..a42b2e827e9b 100644
--- a/lib/Sema/SemaLambda.cpp
+++ b/lib/Sema/SemaLambda.cpp
@@ -24,7 +24,7 @@
using namespace clang;
using namespace sema;
-/// \brief Examines the FunctionScopeInfo stack to determine the nearest
+/// Examines the FunctionScopeInfo stack to determine the nearest
/// enclosing lambda (to the current lambda) that is 'capture-ready' for
/// the variable referenced in the current lambda (i.e. \p VarToCapture).
/// If successful, returns the index into Sema's FunctionScopeInfo stack
@@ -135,7 +135,7 @@ getStackIndexOfNearestEnclosingCaptureReadyLambda(
return NoLambdaIsCaptureReady;
}
-/// \brief Examines the FunctionScopeInfo stack to determine the nearest
+/// Examines the FunctionScopeInfo stack to determine the nearest
/// enclosing lambda (to the current lambda) that is 'capture-capable' for
/// the variable referenced in the current lambda (i.e. \p VarToCapture).
/// If successful, returns the index into Sema's FunctionScopeInfo stack
@@ -263,7 +263,7 @@ CXXRecordDecl *Sema::createLambdaClosureType(SourceRange IntroducerRange,
return Class;
}
-/// \brief Determine whether the given context is or is enclosed in an inline
+/// Determine whether the given context is or is enclosed in an inline
/// function.
static bool isInInlineFunction(const DeclContext *DC) {
while (!DC->isFileContext()) {
@@ -692,9 +692,7 @@ void Sema::deduceClosureReturnType(CapturingScopeInfo &CSI) {
}
// Third case: only one return statement. Don't bother doing extra work!
- SmallVectorImpl<ReturnStmt*>::iterator I = CSI.Returns.begin(),
- E = CSI.Returns.end();
- if (I+1 == E)
+ if (CSI.Returns.size() == 1)
return;
// General case: many return statements.
@@ -703,15 +701,22 @@ void Sema::deduceClosureReturnType(CapturingScopeInfo &CSI) {
// We require the return types to strictly match here.
// Note that we've already done the required promotions as part of
// processing the return statement.
- for (; I != E; ++I) {
- const ReturnStmt *RS = *I;
+ for (const ReturnStmt *RS : CSI.Returns) {
const Expr *RetE = RS->getRetValue();
QualType ReturnType =
(RetE ? RetE->getType() : Context.VoidTy).getUnqualifiedType();
if (Context.getCanonicalFunctionResultType(ReturnType) ==
- Context.getCanonicalFunctionResultType(CSI.ReturnType))
+ Context.getCanonicalFunctionResultType(CSI.ReturnType)) {
+ // Use the return type with the strictest possible nullability annotation.
+ auto RetTyNullability = ReturnType->getNullability(Ctx);
+ auto BlockNullability = CSI.ReturnType->getNullability(Ctx);
+ if (BlockNullability &&
+ (!RetTyNullability ||
+ hasWeakerNullability(*RetTyNullability, *BlockNullability)))
+ CSI.ReturnType = ReturnType;
continue;
+ }
// FIXME: This is a poor diagnostic for ReturnStmts without expressions.
// TODO: It's possible that the *first* return is the divergent one.
@@ -904,6 +909,14 @@ void Sema::ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
ParamInfo.getDeclSpec().isConstexprSpecified());
if (ExplicitParams)
CheckCXXDefaultArguments(Method);
+
+ // This represents the function body for the lambda function, check if we
+ // have to apply optnone due to a pragma.
+ AddRangeBasedOptnone(Method);
+
+ // code_seg attribute on lambda apply to the method.
+ if (Attr *A = getImplicitCodeSegOrSectionAttrForFunction(Method, /*IsDefinition=*/true))
+ Method->addAttr(A);
// Attributes on the lambda apply to the method.
ProcessDeclAttributes(CurScope, Method, ParamInfo);
@@ -984,6 +997,8 @@ void Sema::ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
CheckCXXThisCapture(C->Loc, /*Explicit=*/true, /*BuildAndDiagnose*/ true,
/*FunctionScopeIndexToStopAtPtr*/ nullptr,
C->Kind == LCK_StarThis);
+ if (!LSI->Captures.empty())
+ LSI->ExplicitCaptureRanges[LSI->Captures.size() - 1] = C->ExplicitRange;
continue;
}
@@ -1130,6 +1145,8 @@ void Sema::ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
TryCapture_ExplicitByVal;
tryCaptureVariable(Var, C->Loc, Kind, EllipsisLoc);
}
+ if (!LSI->Captures.empty())
+ LSI->ExplicitCaptureRanges[LSI->Captures.size() - 1] = C->ExplicitRange;
}
finishLambdaExplicitCaptures(LSI);
@@ -1161,13 +1178,31 @@ void Sema::ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope,
Class->setInvalidDecl();
SmallVector<Decl*, 4> Fields(Class->fields());
ActOnFields(nullptr, Class->getLocation(), Class, Fields, SourceLocation(),
- SourceLocation(), nullptr);
+ SourceLocation(), ParsedAttributesView());
CheckCompletedCXXClass(Class);
PopFunctionScopeInfo();
}
-/// \brief Add a lambda's conversion to function pointer, as described in
+QualType Sema::getLambdaConversionFunctionResultType(
+ const FunctionProtoType *CallOpProto) {
+ // The function type inside the pointer type is the same as the call
+ // operator with some tweaks. The calling convention is the default free
+ // function convention, and the type qualifications are lost.
+ const FunctionProtoType::ExtProtoInfo CallOpExtInfo =
+ CallOpProto->getExtProtoInfo();
+ FunctionProtoType::ExtProtoInfo InvokerExtInfo = CallOpExtInfo;
+ CallingConv CC = Context.getDefaultCallingConvention(
+ CallOpProto->isVariadic(), /*IsCXXMethod=*/false);
+ InvokerExtInfo.ExtInfo = InvokerExtInfo.ExtInfo.withCallingConv(CC);
+ InvokerExtInfo.TypeQuals = 0;
+ assert(InvokerExtInfo.RefQualifier == RQ_None &&
+ "Lambda's call operator should not have a reference qualifier");
+ return Context.getFunctionType(CallOpProto->getReturnType(),
+ CallOpProto->getParamTypes(), InvokerExtInfo);
+}
+
+/// Add a lambda's conversion to function pointer, as described in
/// C++11 [expr.prim.lambda]p6.
static void addFunctionPointerConversion(Sema &S,
SourceRange IntroducerRange,
@@ -1182,25 +1217,9 @@ static void addFunctionPointerConversion(Sema &S,
return;
// Add the conversion to function pointer.
- const FunctionProtoType *CallOpProto =
- CallOperator->getType()->getAs<FunctionProtoType>();
- const FunctionProtoType::ExtProtoInfo CallOpExtInfo =
- CallOpProto->getExtProtoInfo();
- QualType PtrToFunctionTy;
- QualType InvokerFunctionTy;
- {
- FunctionProtoType::ExtProtoInfo InvokerExtInfo = CallOpExtInfo;
- CallingConv CC = S.Context.getDefaultCallingConvention(
- CallOpProto->isVariadic(), /*IsCXXMethod=*/false);
- InvokerExtInfo.ExtInfo = InvokerExtInfo.ExtInfo.withCallingConv(CC);
- InvokerExtInfo.TypeQuals = 0;
- assert(InvokerExtInfo.RefQualifier == RQ_None &&
- "Lambda's call operator should not have a reference qualifier");
- InvokerFunctionTy =
- S.Context.getFunctionType(CallOpProto->getReturnType(),
- CallOpProto->getParamTypes(), InvokerExtInfo);
- PtrToFunctionTy = S.Context.getPointerType(InvokerFunctionTy);
- }
+ QualType InvokerFunctionTy = S.getLambdaConversionFunctionResultType(
+ CallOperator->getType()->castAs<FunctionProtoType>());
+ QualType PtrToFunctionTy = S.Context.getPointerType(InvokerFunctionTy);
// Create the type of the conversion function.
FunctionProtoType::ExtProtoInfo ConvExtInfo(
@@ -1352,24 +1371,13 @@ static void addFunctionPointerConversion(Sema &S,
Class->addDecl(Invoke);
}
-/// \brief Add a lambda's conversion to block pointer.
+/// Add a lambda's conversion to block pointer.
static void addBlockPointerConversion(Sema &S,
SourceRange IntroducerRange,
CXXRecordDecl *Class,
CXXMethodDecl *CallOperator) {
- const FunctionProtoType *Proto =
- CallOperator->getType()->getAs<FunctionProtoType>();
-
- // The function type inside the block pointer type is the same as the call
- // operator with some tweaks. The calling convention is the default free
- // function convention, and the type qualifications are lost.
- FunctionProtoType::ExtProtoInfo BlockEPI = Proto->getExtProtoInfo();
- BlockEPI.ExtInfo =
- BlockEPI.ExtInfo.withCallingConv(S.Context.getDefaultCallingConvention(
- Proto->isVariadic(), /*IsCXXMethod=*/false));
- BlockEPI.TypeQuals = 0;
- QualType FunctionTy = S.Context.getFunctionType(
- Proto->getReturnType(), Proto->getParamTypes(), BlockEPI);
+ QualType FunctionTy = S.getLambdaConversionFunctionResultType(
+ CallOperator->getType()->castAs<FunctionProtoType>());
QualType BlockPtrTy = S.Context.getBlockPointerType(FunctionTy);
FunctionProtoType::ExtProtoInfo ConversionEPI(
@@ -1397,8 +1405,9 @@ static void addBlockPointerConversion(Sema &S,
Class->addDecl(Conversion);
}
-static ExprResult performLambdaVarCaptureInitialization(
- Sema &S, const LambdaScopeInfo::Capture &Capture, FieldDecl *Field) {
+static ExprResult performLambdaVarCaptureInitialization(Sema &S,
+ const Capture &Capture,
+ FieldDecl *Field) {
assert(Capture.isVariableCapture() && "not a variable capture");
auto *Var = Capture.getVariable();
@@ -1452,7 +1461,7 @@ mapImplicitCaptureStyle(CapturingScopeInfo::ImplicitCaptureStyle ICS) {
llvm_unreachable("Unknown implicit capture style");
}
-bool Sema::CaptureHasSideEffects(const LambdaScopeInfo::Capture &From) {
+bool Sema::CaptureHasSideEffects(const Capture &From) {
if (!From.isVLATypeCapture()) {
Expr *Init = From.getInitExpr();
if (Init && Init->HasSideEffects(Context))
@@ -1477,12 +1486,13 @@ bool Sema::CaptureHasSideEffects(const LambdaScopeInfo::Capture &From) {
return false;
}
-void Sema::DiagnoseUnusedLambdaCapture(const LambdaScopeInfo::Capture &From) {
+bool Sema::DiagnoseUnusedLambdaCapture(SourceRange CaptureRange,
+ const Capture &From) {
if (CaptureHasSideEffects(From))
- return;
+ return false;
if (From.isVLATypeCapture())
- return;
+ return false;
auto diag = Diag(From.getLocation(), diag::warn_unused_lambda_capture);
if (From.isThisCapture())
@@ -1490,6 +1500,8 @@ void Sema::DiagnoseUnusedLambdaCapture(const LambdaScopeInfo::Capture &From) {
else
diag << From.getVariable();
diag << From.isNonODRUsed();
+ diag << FixItHint::CreateRemoval(CaptureRange);
+ return true;
}
ExprResult Sema::BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
@@ -1531,22 +1543,64 @@ ExprResult Sema::BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
// Translate captures.
auto CurField = Class->field_begin();
+ // True if the current capture has a used capture or default before it.
+ bool CurHasPreviousCapture = CaptureDefault != LCD_None;
+ SourceLocation PrevCaptureLoc = CurHasPreviousCapture ?
+ CaptureDefaultLoc : IntroducerRange.getBegin();
+
for (unsigned I = 0, N = LSI->Captures.size(); I != N; ++I, ++CurField) {
- const LambdaScopeInfo::Capture &From = LSI->Captures[I];
+ const Capture &From = LSI->Captures[I];
+
assert(!From.isBlockCapture() && "Cannot capture __block variables");
bool IsImplicit = I >= LSI->NumExplicitCaptures;
+ // Use source ranges of explicit captures for fixits where available.
+ SourceRange CaptureRange = LSI->ExplicitCaptureRanges[I];
+
// Warn about unused explicit captures.
+ bool IsCaptureUsed = true;
if (!CurContext->isDependentContext() && !IsImplicit && !From.isODRUsed()) {
// Initialized captures that are non-ODR used may not be eliminated.
bool NonODRUsedInitCapture =
IsGenericLambda && From.isNonODRUsed() && From.getInitExpr();
- if (!NonODRUsedInitCapture)
- DiagnoseUnusedLambdaCapture(From);
+ if (!NonODRUsedInitCapture) {
+ bool IsLast = (I + 1) == LSI->NumExplicitCaptures;
+ SourceRange FixItRange;
+ if (CaptureRange.isValid()) {
+ if (!CurHasPreviousCapture && !IsLast) {
+ // If there are no captures preceding this capture, remove the
+ // following comma.
+ FixItRange = SourceRange(CaptureRange.getBegin(),
+ getLocForEndOfToken(CaptureRange.getEnd()));
+ } else {
+ // Otherwise, remove the comma since the last used capture.
+ FixItRange = SourceRange(getLocForEndOfToken(PrevCaptureLoc),
+ CaptureRange.getEnd());
+ }
+ }
+
+ IsCaptureUsed = !DiagnoseUnusedLambdaCapture(FixItRange, From);
+ }
+ }
+
+ if (CaptureRange.isValid()) {
+ CurHasPreviousCapture |= IsCaptureUsed;
+ PrevCaptureLoc = CaptureRange.getEnd();
}
// Handle 'this' capture.
if (From.isThisCapture()) {
+ // Capturing 'this' implicitly with a default of '[=]' is deprecated,
+ // because it results in a reference capture. Don't warn prior to
+ // C++2a; there's nothing that can be done about it before then.
+ if (getLangOpts().CPlusPlus2a && IsImplicit &&
+ CaptureDefault == LCD_ByCopy) {
+ Diag(From.getLocation(), diag::warn_deprecated_this_capture);
+ Diag(CaptureDefaultLoc, diag::note_deprecated_this_capture)
+ << FixItHint::CreateInsertion(
+ getLocForEndOfToken(CaptureDefaultLoc), ", this");
+ }
+
Captures.push_back(
LambdaCapture(From.getLocation(), IsImplicit,
From.isCopyCapture() ? LCK_StarThis : LCK_This));
@@ -1596,7 +1650,7 @@ ExprResult Sema::BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
// Finalize the lambda class.
SmallVector<Decl*, 4> Fields(Class->fields());
ActOnFields(nullptr, Class->getLocation(), Class, Fields, SourceLocation(),
- SourceLocation(), nullptr);
+ SourceLocation(), ParsedAttributesView());
CheckCompletedCXXClass(Class);
}
diff --git a/lib/Sema/SemaLookup.cpp b/lib/Sema/SemaLookup.cpp
index a9db973851df..2732112c00b6 100644
--- a/lib/Sema/SemaLookup.cpp
+++ b/lib/Sema/SemaLookup.cpp
@@ -187,7 +187,7 @@ namespace {
}
void done() {
- std::sort(list.begin(), list.end(), UnqualUsingEntry::Comparator());
+ llvm::sort(list.begin(), list.end(), UnqualUsingEntry::Comparator());
}
typedef ListTy::const_iterator const_iterator;
@@ -356,7 +356,7 @@ static DeclContext *getContextForScopeMatching(Decl *D) {
return D->getDeclContext()->getRedeclContext();
}
-/// \brief Determine whether \p D is a better lookup result than \p Existing,
+/// Determine whether \p D is a better lookup result than \p Existing,
/// given that they declare the same entity.
static bool isPreferredLookupResult(Sema &S, Sema::LookupNameKind Kind,
NamedDecl *D, NamedDecl *Existing) {
@@ -669,7 +669,7 @@ LLVM_DUMP_METHOD void LookupResult::dump() {
D->dump();
}
-/// \brief Lookup a builtin function, when name lookup would otherwise
+/// Lookup a builtin function, when name lookup would otherwise
/// fail.
static bool LookupBuiltin(Sema &S, LookupResult &R) {
Sema::LookupNameKind NameKind = R.getLookupKind();
@@ -713,7 +713,7 @@ static bool LookupBuiltin(Sema &S, LookupResult &R) {
return false;
}
-/// \brief Determine whether we can declare a special member function within
+/// Determine whether we can declare a special member function within
/// the class at this point.
static bool CanDeclareSpecialMemberFunction(const CXXRecordDecl *Class) {
// We need to have a definition for the class.
@@ -755,7 +755,7 @@ void Sema::ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class) {
DeclareImplicitDestructor(Class);
}
-/// \brief Determine whether this is the name of an implicitly-declared
+/// Determine whether this is the name of an implicitly-declared
/// special member function.
static bool isImplicitlyDeclaredMemberFunctionName(DeclarationName Name) {
switch (Name.getNameKind()) {
@@ -773,7 +773,7 @@ static bool isImplicitlyDeclaredMemberFunctionName(DeclarationName Name) {
return false;
}
-/// \brief If there are any implicit member functions with the given name
+/// If there are any implicit member functions with the given name
/// that need to be declared in the given declaration context, do so.
static void DeclareImplicitMemberFunctionsWithName(Sema &S,
DeclarationName Name,
@@ -1354,7 +1354,7 @@ void Sema::makeMergedDefinitionVisible(NamedDecl *ND) {
makeMergedDefinitionVisible(Param);
}
-/// \brief Find the module in which the given declaration was defined.
+/// Find the module in which the given declaration was defined.
static Module *getDefiningModule(Sema &S, Decl *Entity) {
if (FunctionDecl *FD = dyn_cast<FunctionDecl>(Entity)) {
// If this function was instantiated from a template, the defining module is
@@ -1452,6 +1452,8 @@ template<typename Filter>
static bool hasVisibleDeclarationImpl(Sema &S, const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules,
Filter F) {
+ bool HasFilteredRedecls = false;
+
for (auto *Redecl : D->redecls()) {
auto *R = cast<NamedDecl>(Redecl);
if (!F(R))
@@ -1460,6 +1462,8 @@ static bool hasVisibleDeclarationImpl(Sema &S, const NamedDecl *D,
if (S.isVisible(R))
return true;
+ HasFilteredRedecls = true;
+
if (Modules) {
Modules->push_back(R->getOwningModule());
const auto &Merged = S.Context.getModulesWithMergedDefinition(R);
@@ -1467,7 +1471,11 @@ static bool hasVisibleDeclarationImpl(Sema &S, const NamedDecl *D,
}
}
- return false;
+ // Only return false if there is at least one redecl that is not filtered out.
+ if (HasFilteredRedecls)
+ return false;
+
+ return true;
}
bool Sema::hasVisibleExplicitSpecialization(
@@ -1497,11 +1505,9 @@ bool Sema::hasVisibleMemberSpecialization(
// class definition?
return D->getLexicalDeclContext()->isFileContext();
});
-
- return false;
}
-/// \brief Determine whether a declaration is visible to name lookup.
+/// Determine whether a declaration is visible to name lookup.
///
/// This routine determines whether the declaration D is visible in the current
/// lookup context, taking into account the current template instantiation
@@ -1648,7 +1654,7 @@ bool Sema::shouldLinkPossiblyHiddenDecl(LookupResult &R, const NamedDecl *New) {
return New->isExternallyDeclarable();
}
-/// \brief Retrieve the visible declaration corresponding to D, if any.
+/// Retrieve the visible declaration corresponding to D, if any.
///
/// This routine determines whether the declaration D is visible in the current
/// module, with the current imports. If not, it checks whether any
@@ -1656,7 +1662,8 @@ bool Sema::shouldLinkPossiblyHiddenDecl(LookupResult &R, const NamedDecl *New) {
///
/// \returns D, or a visible previous declaration of D, whichever is more recent
/// and visible. If no declaration of D is visible, returns null.
-static NamedDecl *findAcceptableDecl(Sema &SemaRef, NamedDecl *D) {
+static NamedDecl *findAcceptableDecl(Sema &SemaRef, NamedDecl *D,
+ unsigned IDNS) {
assert(!LookupResult::isVisible(SemaRef, D) && "not in slow case");
for (auto RD : D->redecls()) {
@@ -1668,7 +1675,8 @@ static NamedDecl *findAcceptableDecl(Sema &SemaRef, NamedDecl *D) {
// FIXME: This is wrong in the case where the previous declaration is not
// visible in the same scope as D. This needs to be done much more
// carefully.
- if (LookupResult::isVisible(SemaRef, ND))
+ if (ND->isInIdentifierNamespace(IDNS) &&
+ LookupResult::isVisible(SemaRef, ND))
return ND;
}
@@ -1693,17 +1701,18 @@ NamedDecl *LookupResult::getAcceptableDeclSlow(NamedDecl *D) const {
auto *Key = ND->getCanonicalDecl();
if (auto *Acceptable = getSema().VisibleNamespaceCache.lookup(Key))
return Acceptable;
- auto *Acceptable =
- isVisible(getSema(), Key) ? Key : findAcceptableDecl(getSema(), Key);
+ auto *Acceptable = isVisible(getSema(), Key)
+ ? Key
+ : findAcceptableDecl(getSema(), Key, IDNS);
if (Acceptable)
getSema().VisibleNamespaceCache.insert(std::make_pair(Key, Acceptable));
return Acceptable;
}
- return findAcceptableDecl(getSema(), D);
+ return findAcceptableDecl(getSema(), D, IDNS);
}
-/// @brief Perform unqualified name lookup starting from a given
+/// Perform unqualified name lookup starting from a given
/// scope.
///
/// Unqualified name lookup (C++ [basic.lookup.unqual], C99 6.2.1) is
@@ -1841,7 +1850,7 @@ bool Sema::LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation) {
return (ExternalSource && ExternalSource->LookupUnqualified(R, S));
}
-/// @brief Perform qualified name lookup in the namespaces nominated by
+/// Perform qualified name lookup in the namespaces nominated by
/// using directives by the given context.
///
/// C++98 [namespace.qual]p2:
@@ -1947,7 +1956,7 @@ static bool LookupQualifiedNameInUsingDirectives(Sema &S, LookupResult &R,
return Found;
}
-/// \brief Callback that looks for any member of a class with the given name.
+/// Callback that looks for any member of a class with the given name.
static bool LookupAnyMember(const CXXBaseSpecifier *Specifier,
CXXBasePath &Path, DeclarationName Name) {
RecordDecl *BaseRecord = Specifier->getType()->getAs<RecordType>()->getDecl();
@@ -1956,7 +1965,7 @@ static bool LookupAnyMember(const CXXBaseSpecifier *Specifier,
return !Path.Decls.empty();
}
-/// \brief Determine whether the given set of member declarations contains only
+/// Determine whether the given set of member declarations contains only
/// static members, nested types, and enumerators.
template<typename InputIterator>
static bool HasOnlyStaticMembers(InputIterator First, InputIterator Last) {
@@ -1988,7 +1997,7 @@ static bool HasOnlyStaticMembers(InputIterator First, InputIterator Last) {
return false;
}
-/// \brief Perform qualified name lookup into a given context.
+/// Perform qualified name lookup into a given context.
///
/// Qualified name lookup (C++ [basic.lookup.qual]) is used to find
/// names when the context of those names is explicit specified, e.g.,
@@ -2212,7 +2221,7 @@ bool Sema::LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
return true;
}
-/// \brief Performs qualified name lookup or special type of lookup for
+/// Performs qualified name lookup or special type of lookup for
/// "__super::" scope specifier.
///
/// This routine is a convenience overload meant to be called from contexts
@@ -2237,7 +2246,7 @@ bool Sema::LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
return LookupQualifiedName(R, LookupCtx);
}
-/// @brief Performs name lookup for a name that was parsed in the
+/// Performs name lookup for a name that was parsed in the
/// source code, and may contain a C++ scope specifier.
///
/// This routine is a convenience routine meant to be called from
@@ -2291,7 +2300,7 @@ bool Sema::LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS,
return LookupName(R, S, AllowBuiltinCreation);
}
-/// \brief Perform qualified name lookup into all base classes of the given
+/// Perform qualified name lookup into all base classes of the given
/// class.
///
/// \param R captures both the lookup criteria and any lookup results found.
@@ -2309,7 +2318,7 @@ bool Sema::LookupInSuper(LookupResult &R, CXXRecordDecl *Class) {
CXXRecordDecl *RD = cast<CXXRecordDecl>(
BaseSpec.getType()->castAs<RecordType>()->getDecl());
LookupResult Result(*this, R.getLookupNameInfo(), R.getLookupKind());
- Result.setBaseObjectType(Context.getRecordType(Class));
+ Result.setBaseObjectType(Context.getRecordType(Class));
LookupQualifiedName(Result, RD);
// Copy the lookup results into the target, merging the base's access into
@@ -2329,7 +2338,7 @@ bool Sema::LookupInSuper(LookupResult &R, CXXRecordDecl *Class) {
return !R.empty();
}
-/// \brief Produce a diagnostic describing the ambiguity that resulted
+/// Produce a diagnostic describing the ambiguity that resulted
/// from name lookup.
///
/// \param Result The result of the ambiguous lookup to be diagnosed.
@@ -2445,7 +2454,7 @@ static void CollectEnclosingNamespace(Sema::AssociatedNamespaceSet &Namespaces,
Namespaces.insert(Ctx->getPrimaryContext());
}
-// \brief Add the associated classes and namespaces for argument-dependent
+// Add the associated classes and namespaces for argument-dependent
// lookup that involves a template argument (C++ [basic.lookup.koenig]p2).
static void
addAssociatedClassesAndNamespaces(AssociatedLookup &Result,
@@ -2495,7 +2504,7 @@ addAssociatedClassesAndNamespaces(AssociatedLookup &Result,
}
}
-// \brief Add the associated classes and namespaces for
+// Add the associated classes and namespaces for
// argument-dependent lookup with an argument of class type
// (C++ [basic.lookup.koenig]p2).
static void
@@ -2590,7 +2599,7 @@ addAssociatedClassesAndNamespaces(AssociatedLookup &Result,
}
}
-// \brief Add the associated classes and namespaces for
+// Add the associated classes and namespaces for
// argument-dependent lookup with an argument of type T
// (C++ [basic.lookup.koenig]p2).
static void
@@ -2754,7 +2763,7 @@ addAssociatedClassesAndNamespaces(AssociatedLookup &Result, QualType Ty) {
}
}
-/// \brief Find the associated classes and namespaces for
+/// Find the associated classes and namespaces for
/// argument-dependent lookup for a call with the given set of
/// arguments.
///
@@ -2821,7 +2830,7 @@ NamedDecl *Sema::LookupSingleName(Scope *S, DeclarationName Name,
return R.getAsSingle<NamedDecl>();
}
-/// \brief Find the protocol with the given name, if any.
+/// Find the protocol with the given name, if any.
ObjCProtocolDecl *Sema::LookupProtocol(IdentifierInfo *II,
SourceLocation IdLoc,
RedeclarationKind Redecl) {
@@ -3048,7 +3057,7 @@ Sema::SpecialMemberOverloadResult Sema::LookupSpecialMember(CXXRecordDecl *RD,
return *Result;
}
-/// \brief Look up the default constructor for the given class.
+/// Look up the default constructor for the given class.
CXXConstructorDecl *Sema::LookupDefaultConstructor(CXXRecordDecl *Class) {
SpecialMemberOverloadResult Result =
LookupSpecialMember(Class, CXXDefaultConstructor, false, false, false,
@@ -3057,7 +3066,7 @@ CXXConstructorDecl *Sema::LookupDefaultConstructor(CXXRecordDecl *Class) {
return cast_or_null<CXXConstructorDecl>(Result.getMethod());
}
-/// \brief Look up the copying constructor for the given class.
+/// Look up the copying constructor for the given class.
CXXConstructorDecl *Sema::LookupCopyingConstructor(CXXRecordDecl *Class,
unsigned Quals) {
assert(!(Quals & ~(Qualifiers::Const | Qualifiers::Volatile)) &&
@@ -3069,7 +3078,7 @@ CXXConstructorDecl *Sema::LookupCopyingConstructor(CXXRecordDecl *Class,
return cast_or_null<CXXConstructorDecl>(Result.getMethod());
}
-/// \brief Look up the moving constructor for the given class.
+/// Look up the moving constructor for the given class.
CXXConstructorDecl *Sema::LookupMovingConstructor(CXXRecordDecl *Class,
unsigned Quals) {
SpecialMemberOverloadResult Result =
@@ -3079,7 +3088,7 @@ CXXConstructorDecl *Sema::LookupMovingConstructor(CXXRecordDecl *Class,
return cast_or_null<CXXConstructorDecl>(Result.getMethod());
}
-/// \brief Look up the constructors for the given class.
+/// Look up the constructors for the given class.
DeclContext::lookup_result Sema::LookupConstructors(CXXRecordDecl *Class) {
// If the implicit constructors have not yet been declared, do so now.
if (CanDeclareSpecialMemberFunction(Class)) {
@@ -3096,7 +3105,7 @@ DeclContext::lookup_result Sema::LookupConstructors(CXXRecordDecl *Class) {
return Class->lookup(Name);
}
-/// \brief Look up the copying assignment operator for the given class.
+/// Look up the copying assignment operator for the given class.
CXXMethodDecl *Sema::LookupCopyingAssignment(CXXRecordDecl *Class,
unsigned Quals, bool RValueThis,
unsigned ThisQuals) {
@@ -3113,7 +3122,7 @@ CXXMethodDecl *Sema::LookupCopyingAssignment(CXXRecordDecl *Class,
return Result.getMethod();
}
-/// \brief Look up the moving assignment operator for the given class.
+/// Look up the moving assignment operator for the given class.
CXXMethodDecl *Sema::LookupMovingAssignment(CXXRecordDecl *Class,
unsigned Quals,
bool RValueThis,
@@ -3129,7 +3138,7 @@ CXXMethodDecl *Sema::LookupMovingAssignment(CXXRecordDecl *Class,
return Result.getMethod();
}
-/// \brief Look for the destructor of the given class.
+/// Look for the destructor of the given class.
///
/// During semantic analysis, this routine should be used in lieu of
/// CXXRecordDecl::getDestructor().
@@ -3329,6 +3338,23 @@ void Sema::ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc,
// lookup (11.4).
DeclContext::lookup_result R = NS->lookup(Name);
for (auto *D : R) {
+ auto *Underlying = D;
+ if (auto *USD = dyn_cast<UsingShadowDecl>(D))
+ Underlying = USD->getTargetDecl();
+
+ if (!isa<FunctionDecl>(Underlying) &&
+ !isa<FunctionTemplateDecl>(Underlying))
+ continue;
+
+ if (!isVisible(D)) {
+ D = findAcceptableDecl(
+ *this, D, (Decl::IDNS_Ordinary | Decl::IDNS_OrdinaryFriend));
+ if (!D)
+ continue;
+ if (auto *USD = dyn_cast<UsingShadowDecl>(D))
+ Underlying = USD->getTargetDecl();
+ }
+
// If the only declaration here is an ordinary friend, consider
// it only if it was declared in an associated classes.
if ((D->getIdentifierNamespace() & Decl::IDNS_Ordinary) == 0) {
@@ -3350,22 +3376,6 @@ void Sema::ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc,
continue;
}
- auto *Underlying = D;
- if (auto *USD = dyn_cast<UsingShadowDecl>(D))
- Underlying = USD->getTargetDecl();
-
- if (!isa<FunctionDecl>(Underlying) &&
- !isa<FunctionTemplateDecl>(Underlying))
- continue;
-
- if (!isVisible(D)) {
- D = findAcceptableDecl(*this, D);
- if (!D)
- continue;
- if (auto *USD = dyn_cast<UsingShadowDecl>(D))
- Underlying = USD->getTargetDecl();
- }
-
// FIXME: Preserve D as the FoundDecl.
Result.insert(Underlying);
}
@@ -3385,26 +3395,26 @@ class ShadowContextRAII;
class VisibleDeclsRecord {
public:
- /// \brief An entry in the shadow map, which is optimized to store a
+ /// An entry in the shadow map, which is optimized to store a
/// single declaration (the common case) but can also store a list
/// of declarations.
typedef llvm::TinyPtrVector<NamedDecl*> ShadowMapEntry;
private:
- /// \brief A mapping from declaration names to the declarations that have
+ /// A mapping from declaration names to the declarations that have
/// this name within a particular scope.
typedef llvm::DenseMap<DeclarationName, ShadowMapEntry> ShadowMap;
- /// \brief A list of shadow maps, which is used to model name hiding.
+ /// A list of shadow maps, which is used to model name hiding.
std::list<ShadowMap> ShadowMaps;
- /// \brief The declaration contexts we have already visited.
+ /// The declaration contexts we have already visited.
llvm::SmallPtrSet<DeclContext *, 8> VisitedContexts;
friend class ShadowContextRAII;
public:
- /// \brief Determine whether we have already visited this context
+ /// Determine whether we have already visited this context
/// (and, if not, note that we are going to visit that context now).
bool visitedContext(DeclContext *Ctx) {
return !VisitedContexts.insert(Ctx).second;
@@ -3414,20 +3424,20 @@ public:
return VisitedContexts.count(Ctx);
}
- /// \brief Determine whether the given declaration is hidden in the
+ /// Determine whether the given declaration is hidden in the
/// current scope.
///
/// \returns the declaration that hides the given declaration, or
/// NULL if no such declaration exists.
NamedDecl *checkHidden(NamedDecl *ND);
- /// \brief Add a declaration to the current shadow map.
+ /// Add a declaration to the current shadow map.
void add(NamedDecl *ND) {
ShadowMaps.back()[ND->getDeclName()].push_back(ND);
}
};
-/// \brief RAII object that records when we've entered a shadow context.
+/// RAII object that records when we've entered a shadow context.
class ShadowContextRAII {
VisibleDeclsRecord &Visible;
@@ -3494,7 +3504,8 @@ static void LookupVisibleDecls(DeclContext *Ctx, LookupResult &Result,
bool InBaseClass,
VisibleDeclConsumer &Consumer,
VisibleDeclsRecord &Visited,
- bool IncludeDependentBases = false) {
+ bool IncludeDependentBases,
+ bool LoadExternal) {
if (!Ctx)
return;
@@ -3502,6 +3513,8 @@ static void LookupVisibleDecls(DeclContext *Ctx, LookupResult &Result,
if (Visited.visitedContext(Ctx->getPrimaryContext()))
return;
+ Consumer.EnteredContext(Ctx);
+
// Outside C++, lookup results for the TU live on identifiers.
if (isa<TranslationUnitDecl>(Ctx) &&
!Result.getSema().getLangOpts().CPlusPlus) {
@@ -3509,11 +3522,12 @@ static void LookupVisibleDecls(DeclContext *Ctx, LookupResult &Result,
auto &Idents = S.Context.Idents;
// Ensure all external identifiers are in the identifier table.
- if (IdentifierInfoLookup *External = Idents.getExternalIdentifierLookup()) {
- std::unique_ptr<IdentifierIterator> Iter(External->getIdentifiers());
- for (StringRef Name = Iter->Next(); !Name.empty(); Name = Iter->Next())
- Idents.get(Name);
- }
+ if (LoadExternal)
+ if (IdentifierInfoLookup *External = Idents.getExternalIdentifierLookup()) {
+ std::unique_ptr<IdentifierIterator> Iter(External->getIdentifiers());
+ for (StringRef Name = Iter->Next(); !Name.empty(); Name = Iter->Next())
+ Idents.get(Name);
+ }
// Walk all lookup results in the TU for each identifier.
for (const auto &Ident : Idents) {
@@ -3535,8 +3549,13 @@ static void LookupVisibleDecls(DeclContext *Ctx, LookupResult &Result,
if (CXXRecordDecl *Class = dyn_cast<CXXRecordDecl>(Ctx))
Result.getSema().ForceDeclarationOfImplicitMembers(Class);
+ // We sometimes skip loading namespace-level results (they tend to be huge).
+ bool Load = LoadExternal ||
+ !(isa<TranslationUnitDecl>(Ctx) || isa<NamespaceDecl>(Ctx));
// Enumerate all of the results in this context.
- for (DeclContextLookupResult R : Ctx->lookups()) {
+ for (DeclContextLookupResult R :
+ Load ? Ctx->lookups()
+ : Ctx->noload_lookups(/*PreserveInternalState=*/false)) {
for (auto *D : R) {
if (auto *ND = Result.getAcceptableDecl(D)) {
Consumer.FoundDecl(ND, Visited.checkHidden(ND), Ctx, InBaseClass);
@@ -3553,7 +3572,7 @@ static void LookupVisibleDecls(DeclContext *Ctx, LookupResult &Result,
continue;
LookupVisibleDecls(I->getNominatedNamespace(), Result,
QualifiedNameLookup, InBaseClass, Consumer, Visited,
- IncludeDependentBases);
+ IncludeDependentBases, LoadExternal);
}
}
@@ -3610,7 +3629,7 @@ static void LookupVisibleDecls(DeclContext *Ctx, LookupResult &Result,
// Find results in this base class (and its bases).
ShadowContextRAII Shadow(Visited);
LookupVisibleDecls(RD, Result, QualifiedNameLookup, true, Consumer,
- Visited, IncludeDependentBases);
+ Visited, IncludeDependentBases, LoadExternal);
}
}
@@ -3619,22 +3638,23 @@ static void LookupVisibleDecls(DeclContext *Ctx, LookupResult &Result,
// Traverse categories.
for (auto *Cat : IFace->visible_categories()) {
ShadowContextRAII Shadow(Visited);
- LookupVisibleDecls(Cat, Result, QualifiedNameLookup, false,
- Consumer, Visited);
+ LookupVisibleDecls(Cat, Result, QualifiedNameLookup, false, Consumer,
+ Visited, IncludeDependentBases, LoadExternal);
}
// Traverse protocols.
for (auto *I : IFace->all_referenced_protocols()) {
ShadowContextRAII Shadow(Visited);
LookupVisibleDecls(I, Result, QualifiedNameLookup, false, Consumer,
- Visited);
+ Visited, IncludeDependentBases, LoadExternal);
}
// Traverse the superclass.
if (IFace->getSuperClass()) {
ShadowContextRAII Shadow(Visited);
LookupVisibleDecls(IFace->getSuperClass(), Result, QualifiedNameLookup,
- true, Consumer, Visited);
+ true, Consumer, Visited, IncludeDependentBases,
+ LoadExternal);
}
// If there is an implementation, traverse it. We do this to find
@@ -3642,26 +3662,28 @@ static void LookupVisibleDecls(DeclContext *Ctx, LookupResult &Result,
if (IFace->getImplementation()) {
ShadowContextRAII Shadow(Visited);
LookupVisibleDecls(IFace->getImplementation(), Result,
- QualifiedNameLookup, InBaseClass, Consumer, Visited);
+ QualifiedNameLookup, InBaseClass, Consumer, Visited,
+ IncludeDependentBases, LoadExternal);
}
} else if (ObjCProtocolDecl *Protocol = dyn_cast<ObjCProtocolDecl>(Ctx)) {
for (auto *I : Protocol->protocols()) {
ShadowContextRAII Shadow(Visited);
LookupVisibleDecls(I, Result, QualifiedNameLookup, false, Consumer,
- Visited);
+ Visited, IncludeDependentBases, LoadExternal);
}
} else if (ObjCCategoryDecl *Category = dyn_cast<ObjCCategoryDecl>(Ctx)) {
for (auto *I : Category->protocols()) {
ShadowContextRAII Shadow(Visited);
LookupVisibleDecls(I, Result, QualifiedNameLookup, false, Consumer,
- Visited);
+ Visited, IncludeDependentBases, LoadExternal);
}
// If there is an implementation, traverse it.
if (Category->getImplementation()) {
ShadowContextRAII Shadow(Visited);
LookupVisibleDecls(Category->getImplementation(), Result,
- QualifiedNameLookup, true, Consumer, Visited);
+ QualifiedNameLookup, true, Consumer, Visited,
+ IncludeDependentBases, LoadExternal);
}
}
}
@@ -3669,7 +3691,8 @@ static void LookupVisibleDecls(DeclContext *Ctx, LookupResult &Result,
static void LookupVisibleDecls(Scope *S, LookupResult &Result,
UnqualUsingDirectiveSet &UDirs,
VisibleDeclConsumer &Consumer,
- VisibleDeclsRecord &Visited) {
+ VisibleDeclsRecord &Visited,
+ bool LoadExternal) {
if (!S)
return;
@@ -3708,7 +3731,8 @@ static void LookupVisibleDecls(Scope *S, LookupResult &Result,
Result.getNameLoc(), Sema::LookupMemberName);
if (ObjCInterfaceDecl *IFace = Method->getClassInterface()) {
LookupVisibleDecls(IFace, IvarResult, /*QualifiedNameLookup=*/false,
- /*InBaseClass=*/false, Consumer, Visited);
+ /*InBaseClass=*/false, Consumer, Visited,
+ /*IncludeDependentBases=*/false, LoadExternal);
}
}
@@ -3722,7 +3746,8 @@ static void LookupVisibleDecls(Scope *S, LookupResult &Result,
continue;
LookupVisibleDecls(Ctx, Result, /*QualifiedNameLookup=*/false,
- /*InBaseClass=*/false, Consumer, Visited);
+ /*InBaseClass=*/false, Consumer, Visited,
+ /*IncludeDependentBases=*/false, LoadExternal);
}
} else if (!S->getParent()) {
// Look into the translation unit scope. We walk through the translation
@@ -3736,7 +3761,8 @@ static void LookupVisibleDecls(Scope *S, LookupResult &Result,
// in DeclContexts unless we have to" optimization), we can eliminate this.
Entity = Result.getSema().Context.getTranslationUnitDecl();
LookupVisibleDecls(Entity, Result, /*QualifiedNameLookup=*/false,
- /*InBaseClass=*/false, Consumer, Visited);
+ /*InBaseClass=*/false, Consumer, Visited,
+ /*IncludeDependentBases=*/false, LoadExternal);
}
if (Entity) {
@@ -3745,17 +3771,19 @@ static void LookupVisibleDecls(Scope *S, LookupResult &Result,
for (const UnqualUsingEntry &UUE : UDirs.getNamespacesFor(Entity))
LookupVisibleDecls(const_cast<DeclContext *>(UUE.getNominatedNamespace()),
Result, /*QualifiedNameLookup=*/false,
- /*InBaseClass=*/false, Consumer, Visited);
+ /*InBaseClass=*/false, Consumer, Visited,
+ /*IncludeDependentBases=*/false, LoadExternal);
}
// Lookup names in the parent scope.
ShadowContextRAII Shadow(Visited);
- LookupVisibleDecls(S->getParent(), Result, UDirs, Consumer, Visited);
+ LookupVisibleDecls(S->getParent(), Result, UDirs, Consumer, Visited,
+ LoadExternal);
}
void Sema::LookupVisibleDecls(Scope *S, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
- bool IncludeGlobalScope) {
+ bool IncludeGlobalScope, bool LoadExternal) {
// Determine the set of using directives available during
// unqualified name lookup.
Scope *Initial = S;
@@ -3776,13 +3804,13 @@ void Sema::LookupVisibleDecls(Scope *S, LookupNameKind Kind,
if (!IncludeGlobalScope)
Visited.visitedContext(Context.getTranslationUnitDecl());
ShadowContextRAII Shadow(Visited);
- ::LookupVisibleDecls(Initial, Result, UDirs, Consumer, Visited);
+ ::LookupVisibleDecls(Initial, Result, UDirs, Consumer, Visited, LoadExternal);
}
void Sema::LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope,
- bool IncludeDependentBases) {
+ bool IncludeDependentBases, bool LoadExternal) {
LookupResult Result(*this, DeclarationName(), SourceLocation(), Kind);
Result.setAllowHidden(Consumer.includeHiddenDecls());
VisibleDeclsRecord Visited;
@@ -3791,7 +3819,7 @@ void Sema::LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind,
ShadowContextRAII Shadow(Visited);
::LookupVisibleDecls(Ctx, Result, /*QualifiedNameLookup=*/true,
/*InBaseClass=*/false, Consumer, Visited,
- IncludeDependentBases);
+ IncludeDependentBases, LoadExternal);
}
/// LookupOrCreateLabel - Do a name lookup of a label with the specified name.
@@ -3846,7 +3874,7 @@ static void LookupPotentialTypoResult(Sema &SemaRef,
bool isObjCIvarLookup,
bool FindHidden);
-/// \brief Check whether the declarations found for a typo correction are
+/// Check whether the declarations found for a typo correction are
/// visible. Set the correction's RequiresImport flag to true if none of the
/// declarations are visible, false otherwise.
static void checkCorrectionVisibility(Sema &SemaRef, TypoCorrection &TC) {
@@ -3865,17 +3893,13 @@ static void checkCorrectionVisibility(Sema &SemaRef, TypoCorrection &TC) {
bool AnyVisibleDecls = !NewDecls.empty();
for (/**/; DI != DE; ++DI) {
- NamedDecl *VisibleDecl = *DI;
- if (!LookupResult::isVisible(SemaRef, *DI))
- VisibleDecl = findAcceptableDecl(SemaRef, *DI);
-
- if (VisibleDecl) {
+ if (LookupResult::isVisible(SemaRef, *DI)) {
if (!AnyVisibleDecls) {
// Found a visible decl, discard all hidden ones.
AnyVisibleDecls = true;
NewDecls.clear();
}
- NewDecls.push_back(VisibleDecl);
+ NewDecls.push_back(*DI);
} else if (!AnyVisibleDecls && !(*DI)->isModulePrivate())
NewDecls.push_back(*DI);
}
@@ -3945,8 +3969,7 @@ void TypoCorrectionConsumer::FoundDecl(NamedDecl *ND, NamedDecl *Hiding,
// Only consider visible declarations and declarations from modules with
// names that exactly match.
- if (!LookupResult::isVisible(SemaRef, ND) && Name != Typo &&
- !findAcceptableDecl(SemaRef, ND))
+ if (!LookupResult::isVisible(SemaRef, ND) && Name != Typo)
return;
FoundName(Name->getName());
@@ -4337,7 +4360,7 @@ void TypoCorrectionConsumer::NamespaceSpecifierSet::addNameSpecifier(
DistanceMap[NumSpecifiers].push_back(SI);
}
-/// \brief Perform name lookup for a possible result for typo correction.
+/// Perform name lookup for a possible result for typo correction.
static void LookupPotentialTypoResult(Sema &SemaRef,
LookupResult &Res,
IdentifierInfo *Name,
@@ -4391,7 +4414,7 @@ static void LookupPotentialTypoResult(Sema &SemaRef,
}
}
-/// \brief Add keywords to the consumer as possible typo corrections.
+/// Add keywords to the consumer as possible typo corrections.
static void AddKeywordsToConsumer(Sema &SemaRef,
TypoCorrectionConsumer &Consumer,
Scope *S, CorrectionCandidateCallback &CCC,
@@ -4442,7 +4465,7 @@ static void AddKeywordsToConsumer(Sema &SemaRef,
}
}
- if (SemaRef.getLangOpts().GNUMode)
+ if (SemaRef.getLangOpts().GNUKeywords)
Consumer.addKeywordResult("typeof");
} else if (CCC.WantFunctionLikeCasts) {
static const char *const CastableTypeSpecs[] = {
@@ -4512,7 +4535,8 @@ static void AddKeywordsToConsumer(Sema &SemaRef,
if (S && S->getContinueParent())
Consumer.addKeywordResult("continue");
- if (!SemaRef.getCurFunction()->SwitchStack.empty()) {
+ if (SemaRef.getCurFunction() &&
+ !SemaRef.getCurFunction()->SwitchStack.empty()) {
Consumer.addKeywordResult("case");
Consumer.addKeywordResult("default");
}
@@ -4681,7 +4705,7 @@ std::unique_ptr<TypoCorrectionConsumer> Sema::makeTypoCorrectionConsumer(
return Consumer;
}
-/// \brief Try to "correct" a typo in the source code by finding
+/// Try to "correct" a typo in the source code by finding
/// visible declarations whose names are similar to the name that was
/// present in the source code.
///
@@ -4810,7 +4834,7 @@ TypoCorrection Sema::CorrectTypo(const DeclarationNameInfo &TypoName,
return FailedCorrection(Typo, TypoName.getLoc(), RecordFailure && !SecondBestTC);
}
-/// \brief Try to "correct" a typo in the source code by finding
+/// Try to "correct" a typo in the source code by finding
/// visible declarations whose names are similar to the name that was
/// present in the source code.
///
@@ -4966,6 +4990,8 @@ bool FunctionCallFilterCCC::ValidateCandidate(const TypoCorrection &candidate) {
// determine if it is a pointer or reference to a function. If so,
// check against the number of arguments expected for the pointee.
QualType ValType = cast<ValueDecl>(ND)->getType();
+ if (ValType.isNull())
+ continue;
if (ValType->isAnyPointerType() || ValType->isReferenceType())
ValType = ValType->getPointeeType();
if (const FunctionProtoType *FPT = ValType->getAs<FunctionProtoType>())
@@ -5047,7 +5073,7 @@ void Sema::diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
Recover);
}
-/// \brief Get a "quoted.h" or <angled.h> include path to use in a diagnostic
+/// Get a "quoted.h" or <angled.h> include path to use in a diagnostic
/// suggesting the addition of a #include of the specified file.
static std::string getIncludeStringForHeader(Preprocessor &PP,
const FileEntry *E) {
@@ -5126,7 +5152,7 @@ void Sema::diagnoseMissingImport(SourceLocation UseLoc, NamedDecl *Decl,
createImplicitModuleImportForErrorRecovery(UseLoc, Modules[0]);
}
-/// \brief Diagnose a successfully-corrected typo. Separated from the correction
+/// Diagnose a successfully-corrected typo. Separated from the correction
/// itself to allow external validation of the result, etc.
///
/// \param Correction The result of performing typo correction.
diff --git a/lib/Sema/SemaObjCProperty.cpp b/lib/Sema/SemaObjCProperty.cpp
index ea5b1da46f32..2983ec51f49e 100644
--- a/lib/Sema/SemaObjCProperty.cpp
+++ b/lib/Sema/SemaObjCProperty.cpp
@@ -104,7 +104,7 @@ static void checkPropertyDeclWithOwnership(Sema &S,
<< propertyLifetime;
}
-/// \brief Check this Objective-C property against a property declared in the
+/// Check this Objective-C property against a property declared in the
/// given protocol.
static void
CheckPropertyAgainstProtocol(Sema &S, ObjCPropertyDecl *Prop,
@@ -618,7 +618,7 @@ ObjCPropertyDecl *Sema::CreatePropertyDecl(Scope *S,
TInfo = Context.getTrivialTypeSourceInfo(T, TLoc);
}
- DeclContext *DC = cast<DeclContext>(CDecl);
+ DeclContext *DC = CDecl;
ObjCPropertyDecl *PDecl = ObjCPropertyDecl::Create(Context, DC,
FD.D.getIdentifierLoc(),
PropertyId, AtLoc,
@@ -897,14 +897,24 @@ SelectPropertyForSynthesisFromProtocols(Sema &S, SourceLocation AtLoc,
: HasUnexpectedAttribute;
Mismatches.push_back({Prop, Kind, AttributeName});
};
- if (isIncompatiblePropertyAttribute(OriginalAttributes, Attr,
+ // The ownership might be incompatible unless the property has no explicit
+ // ownership.
+ bool HasOwnership = (Attr & (ObjCPropertyDecl::OBJC_PR_retain |
+ ObjCPropertyDecl::OBJC_PR_strong |
+ ObjCPropertyDecl::OBJC_PR_copy |
+ ObjCPropertyDecl::OBJC_PR_assign |
+ ObjCPropertyDecl::OBJC_PR_unsafe_unretained |
+ ObjCPropertyDecl::OBJC_PR_weak)) != 0;
+ if (HasOwnership &&
+ isIncompatiblePropertyAttribute(OriginalAttributes, Attr,
ObjCPropertyDecl::OBJC_PR_copy)) {
Diag(OriginalAttributes & ObjCPropertyDecl::OBJC_PR_copy, "copy");
continue;
}
- if (areIncompatiblePropertyAttributes(
- OriginalAttributes, Attr, ObjCPropertyDecl::OBJC_PR_retain |
- ObjCPropertyDecl::OBJC_PR_strong)) {
+ if (HasOwnership && areIncompatiblePropertyAttributes(
+ OriginalAttributes, Attr,
+ ObjCPropertyDecl::OBJC_PR_retain |
+ ObjCPropertyDecl::OBJC_PR_strong)) {
Diag(OriginalAttributes & (ObjCPropertyDecl::OBJC_PR_retain |
ObjCPropertyDecl::OBJC_PR_strong),
"retain (or strong)");
@@ -1819,7 +1829,7 @@ static bool SuperClassImplementsProperty(ObjCInterfaceDecl *IDecl,
return false;
}
-/// \brief Default synthesizes all properties which must be synthesized
+/// Default synthesizes all properties which must be synthesized
/// in class's \@implementation.
void Sema::DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl,
ObjCInterfaceDecl *IDecl,
diff --git a/lib/Sema/SemaOpenMP.cpp b/lib/Sema/SemaOpenMP.cpp
index 24b58e8fd12b..e1a4c420d402 100644
--- a/lib/Sema/SemaOpenMP.cpp
+++ b/lib/Sema/SemaOpenMP.cpp
@@ -7,7 +7,7 @@
//
//===----------------------------------------------------------------------===//
/// \file
-/// \brief This file implements semantic analysis for OpenMP directives and
+/// This file implements semantic analysis for OpenMP directives and
/// clauses.
///
//===----------------------------------------------------------------------===//
@@ -35,17 +35,17 @@ using namespace clang;
// Stack of data-sharing attributes for variables
//===----------------------------------------------------------------------===//
-static Expr *CheckMapClauseExpressionBase(
+static const Expr *checkMapClauseExpressionBase(
Sema &SemaRef, Expr *E,
OMPClauseMappableExprCommon::MappableExprComponentList &CurComponents,
OpenMPClauseKind CKind, bool NoDiagnose);
namespace {
-/// \brief Default data sharing attributes, which can be applied to directive.
+/// Default data sharing attributes, which can be applied to directive.
enum DefaultDataSharingAttributes {
- DSA_unspecified = 0, /// \brief Data sharing attribute not specified.
- DSA_none = 1 << 0, /// \brief Default data sharing attribute 'none'.
- DSA_shared = 1 << 1, /// \brief Default data sharing attribute 'shared'.
+ DSA_unspecified = 0, /// Data sharing attribute not specified.
+ DSA_none = 1 << 0, /// Default data sharing attribute 'none'.
+ DSA_shared = 1 << 1, /// Default data sharing attribute 'shared'.
};
/// Attributes of the defaultmap clause.
@@ -54,51 +54,53 @@ enum DefaultMapAttributes {
DMA_tofrom_scalar, /// Default mapping is 'tofrom:scalar'.
};
-/// \brief Stack for tracking declarations used in OpenMP directives and
+/// Stack for tracking declarations used in OpenMP directives and
/// clauses and their data-sharing attributes.
-class DSAStackTy final {
+class DSAStackTy {
public:
- struct DSAVarData final {
+ struct DSAVarData {
OpenMPDirectiveKind DKind = OMPD_unknown;
OpenMPClauseKind CKind = OMPC_unknown;
- Expr *RefExpr = nullptr;
+ const Expr *RefExpr = nullptr;
DeclRefExpr *PrivateCopy = nullptr;
SourceLocation ImplicitDSALoc;
DSAVarData() = default;
- DSAVarData(OpenMPDirectiveKind DKind, OpenMPClauseKind CKind, Expr *RefExpr,
- DeclRefExpr *PrivateCopy, SourceLocation ImplicitDSALoc)
+ DSAVarData(OpenMPDirectiveKind DKind, OpenMPClauseKind CKind,
+ const Expr *RefExpr, DeclRefExpr *PrivateCopy,
+ SourceLocation ImplicitDSALoc)
: DKind(DKind), CKind(CKind), RefExpr(RefExpr),
PrivateCopy(PrivateCopy), ImplicitDSALoc(ImplicitDSALoc) {}
};
- typedef llvm::SmallVector<std::pair<Expr *, OverloadedOperatorKind>, 4>
- OperatorOffsetTy;
+ using OperatorOffsetTy =
+ llvm::SmallVector<std::pair<Expr *, OverloadedOperatorKind>, 4>;
private:
- struct DSAInfo final {
+ struct DSAInfo {
OpenMPClauseKind Attributes = OMPC_unknown;
/// Pointer to a reference expression and a flag which shows that the
/// variable is marked as lastprivate(true) or not (false).
- llvm::PointerIntPair<Expr *, 1, bool> RefExpr;
+ llvm::PointerIntPair<const Expr *, 1, bool> RefExpr;
DeclRefExpr *PrivateCopy = nullptr;
};
- typedef llvm::DenseMap<ValueDecl *, DSAInfo> DeclSAMapTy;
- typedef llvm::DenseMap<ValueDecl *, Expr *> AlignedMapTy;
- typedef std::pair<unsigned, VarDecl *> LCDeclInfo;
- typedef llvm::DenseMap<ValueDecl *, LCDeclInfo> LoopControlVariablesMapTy;
+ using DeclSAMapTy = llvm::SmallDenseMap<const ValueDecl *, DSAInfo, 8>;
+ using AlignedMapTy = llvm::SmallDenseMap<const ValueDecl *, const Expr *, 8>;
+ using LCDeclInfo = std::pair<unsigned, VarDecl *>;
+ using LoopControlVariablesMapTy =
+ llvm::SmallDenseMap<const ValueDecl *, LCDeclInfo, 8>;
/// Struct that associates a component with the clause kind where they are
/// found.
struct MappedExprComponentTy {
OMPClauseMappableExprCommon::MappableExprComponentLists Components;
OpenMPClauseKind Kind = OMPC_unknown;
};
- typedef llvm::DenseMap<ValueDecl *, MappedExprComponentTy>
- MappedExprComponentsTy;
- typedef llvm::StringMap<std::pair<OMPCriticalDirective *, llvm::APSInt>>
- CriticalsWithHintsTy;
- typedef llvm::DenseMap<OMPDependClause *, OperatorOffsetTy>
- DoacrossDependMapTy;
+ using MappedExprComponentsTy =
+ llvm::DenseMap<const ValueDecl *, MappedExprComponentTy>;
+ using CriticalsWithHintsTy =
+ llvm::StringMap<std::pair<const OMPCriticalDirective *, llvm::APSInt>>;
+ using DoacrossDependMapTy =
+ llvm::DenseMap<OMPDependClause *, OperatorOffsetTy>;
struct ReductionData {
- typedef llvm::PointerEmbeddedInt<BinaryOperatorKind, 16> BOKPtrType;
+ using BOKPtrType = llvm::PointerEmbeddedInt<BinaryOperatorKind, 16>;
SourceRange ReductionRange;
llvm::PointerUnion<const Expr *, BOKPtrType> ReductionOp;
ReductionData() = default;
@@ -111,9 +113,10 @@ private:
ReductionOp = RefExpr;
}
};
- typedef llvm::DenseMap<ValueDecl *, ReductionData> DeclReductionMapTy;
+ using DeclReductionMapTy =
+ llvm::SmallDenseMap<const ValueDecl *, ReductionData, 4>;
- struct SharingMapTy final {
+ struct SharingMapTy {
DeclSAMapTy SharingMap;
DeclReductionMapTy ReductionMap;
AlignedMapTy AlignedMap;
@@ -131,10 +134,10 @@ private:
/// get the data (loop counters etc.) about enclosing loop-based construct.
/// This data is required during codegen.
DoacrossDependMapTy DoacrossDepends;
- /// \brief first argument (Expr *) contains optional argument of the
+ /// first argument (Expr *) contains optional argument of the
/// 'ordered' clause, the second one is true if the regions has 'ordered'
/// clause, false otherwise.
- llvm::PointerIntPair<Expr *, 1, bool> OrderedRegion;
+ llvm::PointerIntPair<const Expr *, 1, bool> OrderedRegion;
bool NowaitRegion = false;
bool CancelRegion = false;
unsigned AssociatedLoops = 1;
@@ -148,25 +151,25 @@ private:
SharingMapTy() = default;
};
- typedef SmallVector<SharingMapTy, 4> StackTy;
+ using StackTy = SmallVector<SharingMapTy, 4>;
- /// \brief Stack of used declaration and their data-sharing attributes.
+ /// Stack of used declaration and their data-sharing attributes.
DeclSAMapTy Threadprivates;
const FunctionScopeInfo *CurrentNonCapturingFunctionScope = nullptr;
SmallVector<std::pair<StackTy, const FunctionScopeInfo *>, 4> Stack;
- /// \brief true, if check for DSA must be from parent directive, false, if
+ /// true, if check for DSA must be from parent directive, false, if
/// from current directive.
OpenMPClauseKind ClauseKindMode = OMPC_unknown;
Sema &SemaRef;
bool ForceCapturing = false;
CriticalsWithHintsTy Criticals;
- typedef SmallVector<SharingMapTy, 8>::reverse_iterator reverse_iterator;
+ using iterator = StackTy::const_reverse_iterator;
- DSAVarData getDSA(StackTy::reverse_iterator &Iter, ValueDecl *D);
+ DSAVarData getDSA(iterator &Iter, ValueDecl *D) const;
- /// \brief Checks if the variable is a local for OpenMP region.
- bool isOpenMPLocal(VarDecl *D, StackTy::reverse_iterator Iter);
+ /// Checks if the variable is a local for OpenMP region.
+ bool isOpenMPLocal(VarDecl *D, iterator Iter) const;
bool isStackEmpty() const {
return Stack.empty() ||
@@ -223,59 +226,61 @@ public:
}
}
- void addCriticalWithHint(OMPCriticalDirective *D, llvm::APSInt Hint) {
- Criticals[D->getDirectiveName().getAsString()] = std::make_pair(D, Hint);
+ void addCriticalWithHint(const OMPCriticalDirective *D, llvm::APSInt Hint) {
+ Criticals.try_emplace(D->getDirectiveName().getAsString(), D, Hint);
}
- const std::pair<OMPCriticalDirective *, llvm::APSInt>
+ const std::pair<const OMPCriticalDirective *, llvm::APSInt>
getCriticalWithHint(const DeclarationNameInfo &Name) const {
auto I = Criticals.find(Name.getAsString());
if (I != Criticals.end())
return I->second;
return std::make_pair(nullptr, llvm::APSInt());
}
- /// \brief If 'aligned' declaration for given variable \a D was not seen yet,
+ /// If 'aligned' declaration for given variable \a D was not seen yet,
/// add it and return NULL; otherwise return previous occurrence's expression
/// for diagnostics.
- Expr *addUniqueAligned(ValueDecl *D, Expr *NewDE);
+ const Expr *addUniqueAligned(const ValueDecl *D, const Expr *NewDE);
- /// \brief Register specified variable as loop control variable.
- void addLoopControlVariable(ValueDecl *D, VarDecl *Capture);
- /// \brief Check if the specified variable is a loop control variable for
+ /// Register specified variable as loop control variable.
+ void addLoopControlVariable(const ValueDecl *D, VarDecl *Capture);
+ /// Check if the specified variable is a loop control variable for
/// current region.
/// \return The index of the loop control variable in the list of associated
/// for-loops (from outer to inner).
- LCDeclInfo isLoopControlVariable(ValueDecl *D);
- /// \brief Check if the specified variable is a loop control variable for
+ const LCDeclInfo isLoopControlVariable(const ValueDecl *D) const;
+ /// Check if the specified variable is a loop control variable for
/// parent region.
/// \return The index of the loop control variable in the list of associated
/// for-loops (from outer to inner).
- LCDeclInfo isParentLoopControlVariable(ValueDecl *D);
- /// \brief Get the loop control variable for the I-th loop (or nullptr) in
+ const LCDeclInfo isParentLoopControlVariable(const ValueDecl *D) const;
+ /// Get the loop control variable for the I-th loop (or nullptr) in
/// parent directive.
- ValueDecl *getParentLoopControlVariable(unsigned I);
+ const ValueDecl *getParentLoopControlVariable(unsigned I) const;
- /// \brief Adds explicit data sharing attribute to the specified declaration.
- void addDSA(ValueDecl *D, Expr *E, OpenMPClauseKind A,
+ /// Adds explicit data sharing attribute to the specified declaration.
+ void addDSA(const ValueDecl *D, const Expr *E, OpenMPClauseKind A,
DeclRefExpr *PrivateCopy = nullptr);
/// Adds additional information for the reduction items with the reduction id
/// represented as an operator.
- void addTaskgroupReductionData(ValueDecl *D, SourceRange SR,
+ void addTaskgroupReductionData(const ValueDecl *D, SourceRange SR,
BinaryOperatorKind BOK);
/// Adds additional information for the reduction items with the reduction id
/// represented as reduction identifier.
- void addTaskgroupReductionData(ValueDecl *D, SourceRange SR,
+ void addTaskgroupReductionData(const ValueDecl *D, SourceRange SR,
const Expr *ReductionRef);
/// Returns the location and reduction operation from the innermost parent
/// region for the given \p D.
- DSAVarData getTopMostTaskgroupReductionData(ValueDecl *D, SourceRange &SR,
- BinaryOperatorKind &BOK,
- Expr *&TaskgroupDescriptor);
+ const DSAVarData
+ getTopMostTaskgroupReductionData(const ValueDecl *D, SourceRange &SR,
+ BinaryOperatorKind &BOK,
+ Expr *&TaskgroupDescriptor) const;
/// Returns the location and reduction operation from the innermost parent
/// region for the given \p D.
- DSAVarData getTopMostTaskgroupReductionData(ValueDecl *D, SourceRange &SR,
- const Expr *&ReductionRef,
- Expr *&TaskgroupDescriptor);
+ const DSAVarData
+ getTopMostTaskgroupReductionData(const ValueDecl *D, SourceRange &SR,
+ const Expr *&ReductionRef,
+ Expr *&TaskgroupDescriptor) const;
/// Return reduction reference expression for the current taskgroup.
Expr *getTaskgroupReductionRef() const {
assert(Stack.back().first.back().Directive == OMPD_taskgroup &&
@@ -285,74 +290,75 @@ public:
}
/// Checks if the given \p VD declaration is actually a taskgroup reduction
/// descriptor variable at the \p Level of OpenMP regions.
- bool isTaskgroupReductionRef(ValueDecl *VD, unsigned Level) const {
+ bool isTaskgroupReductionRef(const ValueDecl *VD, unsigned Level) const {
return Stack.back().first[Level].TaskgroupReductionRef &&
cast<DeclRefExpr>(Stack.back().first[Level].TaskgroupReductionRef)
->getDecl() == VD;
}
- /// \brief Returns data sharing attributes from top of the stack for the
+ /// Returns data sharing attributes from top of the stack for the
/// specified declaration.
- DSAVarData getTopDSA(ValueDecl *D, bool FromParent);
- /// \brief Returns data-sharing attributes for the specified declaration.
- DSAVarData getImplicitDSA(ValueDecl *D, bool FromParent);
- /// \brief Checks if the specified variables has data-sharing attributes which
+ const DSAVarData getTopDSA(ValueDecl *D, bool FromParent);
+ /// Returns data-sharing attributes for the specified declaration.
+ const DSAVarData getImplicitDSA(ValueDecl *D, bool FromParent) const;
+ /// Checks if the specified variables has data-sharing attributes which
/// match specified \a CPred predicate in any directive which matches \a DPred
/// predicate.
- DSAVarData hasDSA(ValueDecl *D,
- const llvm::function_ref<bool(OpenMPClauseKind)> &CPred,
- const llvm::function_ref<bool(OpenMPDirectiveKind)> &DPred,
- bool FromParent);
- /// \brief Checks if the specified variables has data-sharing attributes which
+ const DSAVarData
+ hasDSA(ValueDecl *D, const llvm::function_ref<bool(OpenMPClauseKind)> CPred,
+ const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
+ bool FromParent) const;
+ /// Checks if the specified variables has data-sharing attributes which
/// match specified \a CPred predicate in any innermost directive which
/// matches \a DPred predicate.
- DSAVarData
+ const DSAVarData
hasInnermostDSA(ValueDecl *D,
- const llvm::function_ref<bool(OpenMPClauseKind)> &CPred,
- const llvm::function_ref<bool(OpenMPDirectiveKind)> &DPred,
- bool FromParent);
- /// \brief Checks if the specified variables has explicit data-sharing
+ const llvm::function_ref<bool(OpenMPClauseKind)> CPred,
+ const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
+ bool FromParent) const;
+ /// Checks if the specified variables has explicit data-sharing
/// attributes which match specified \a CPred predicate at the specified
/// OpenMP region.
- bool hasExplicitDSA(ValueDecl *D,
- const llvm::function_ref<bool(OpenMPClauseKind)> &CPred,
- unsigned Level, bool NotLastprivate = false);
+ bool hasExplicitDSA(const ValueDecl *D,
+ const llvm::function_ref<bool(OpenMPClauseKind)> CPred,
+ unsigned Level, bool NotLastprivate = false) const;
- /// \brief Returns true if the directive at level \Level matches in the
+ /// Returns true if the directive at level \Level matches in the
/// specified \a DPred predicate.
bool hasExplicitDirective(
- const llvm::function_ref<bool(OpenMPDirectiveKind)> &DPred,
- unsigned Level);
+ const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
+ unsigned Level) const;
- /// \brief Finds a directive which matches specified \a DPred predicate.
- bool hasDirective(const llvm::function_ref<bool(OpenMPDirectiveKind,
- const DeclarationNameInfo &,
- SourceLocation)> &DPred,
- bool FromParent);
+ /// Finds a directive which matches specified \a DPred predicate.
+ bool hasDirective(
+ const llvm::function_ref<bool(
+ OpenMPDirectiveKind, const DeclarationNameInfo &, SourceLocation)>
+ DPred,
+ bool FromParent) const;
- /// \brief Returns currently analyzed directive.
+ /// Returns currently analyzed directive.
OpenMPDirectiveKind getCurrentDirective() const {
return isStackEmpty() ? OMPD_unknown : Stack.back().first.back().Directive;
}
- /// \brief Returns directive kind at specified level.
+ /// Returns directive kind at specified level.
OpenMPDirectiveKind getDirective(unsigned Level) const {
assert(!isStackEmpty() && "No directive at specified level.");
return Stack.back().first[Level].Directive;
}
- /// \brief Returns parent directive.
+ /// Returns parent directive.
OpenMPDirectiveKind getParentDirective() const {
if (isStackEmpty() || Stack.back().first.size() == 1)
return OMPD_unknown;
return std::next(Stack.back().first.rbegin())->Directive;
}
- /// \brief Set default data sharing attribute to none.
+ /// Set default data sharing attribute to none.
void setDefaultDSANone(SourceLocation Loc) {
assert(!isStackEmpty());
Stack.back().first.back().DefaultAttr = DSA_none;
Stack.back().first.back().DefaultAttrLoc = Loc;
}
- /// \brief Set default data sharing attribute to shared.
+ /// Set default data sharing attribute to shared.
void setDefaultDSAShared(SourceLocation Loc) {
assert(!isStackEmpty());
Stack.back().first.back().DefaultAttr = DSA_shared;
@@ -385,66 +391,66 @@ public:
: Stack.back().first.back().DefaultMapAttrLoc;
}
- /// \brief Checks if the specified variable is a threadprivate.
+ /// Checks if the specified variable is a threadprivate.
bool isThreadPrivate(VarDecl *D) {
- DSAVarData DVar = getTopDSA(D, false);
+ const DSAVarData DVar = getTopDSA(D, false);
return isOpenMPThreadPrivate(DVar.CKind);
}
- /// \brief Marks current region as ordered (it has an 'ordered' clause).
- void setOrderedRegion(bool IsOrdered, Expr *Param) {
+ /// Marks current region as ordered (it has an 'ordered' clause).
+ void setOrderedRegion(bool IsOrdered, const Expr *Param) {
assert(!isStackEmpty());
Stack.back().first.back().OrderedRegion.setInt(IsOrdered);
Stack.back().first.back().OrderedRegion.setPointer(Param);
}
- /// \brief Returns true, if parent region is ordered (has associated
+ /// Returns true, if parent region is ordered (has associated
/// 'ordered' clause), false - otherwise.
bool isParentOrderedRegion() const {
if (isStackEmpty() || Stack.back().first.size() == 1)
return false;
return std::next(Stack.back().first.rbegin())->OrderedRegion.getInt();
}
- /// \brief Returns optional parameter for the ordered region.
- Expr *getParentOrderedRegionParam() const {
+ /// Returns optional parameter for the ordered region.
+ const Expr *getParentOrderedRegionParam() const {
if (isStackEmpty() || Stack.back().first.size() == 1)
return nullptr;
return std::next(Stack.back().first.rbegin())->OrderedRegion.getPointer();
}
- /// \brief Marks current region as nowait (it has a 'nowait' clause).
+ /// Marks current region as nowait (it has a 'nowait' clause).
void setNowaitRegion(bool IsNowait = true) {
assert(!isStackEmpty());
Stack.back().first.back().NowaitRegion = IsNowait;
}
- /// \brief Returns true, if parent region is nowait (has associated
+ /// Returns true, if parent region is nowait (has associated
/// 'nowait' clause), false - otherwise.
bool isParentNowaitRegion() const {
if (isStackEmpty() || Stack.back().first.size() == 1)
return false;
return std::next(Stack.back().first.rbegin())->NowaitRegion;
}
- /// \brief Marks parent region as cancel region.
+ /// Marks parent region as cancel region.
void setParentCancelRegion(bool Cancel = true) {
if (!isStackEmpty() && Stack.back().first.size() > 1) {
auto &StackElemRef = *std::next(Stack.back().first.rbegin());
StackElemRef.CancelRegion |= StackElemRef.CancelRegion || Cancel;
}
}
- /// \brief Return true if current region has inner cancel construct.
+ /// Return true if current region has inner cancel construct.
bool isCancelRegion() const {
return isStackEmpty() ? false : Stack.back().first.back().CancelRegion;
}
- /// \brief Set collapse value for the region.
+ /// Set collapse value for the region.
void setAssociatedLoops(unsigned Val) {
assert(!isStackEmpty());
Stack.back().first.back().AssociatedLoops = Val;
}
- /// \brief Return collapse value for region.
+ /// Return collapse value for region.
unsigned getAssociatedLoops() const {
return isStackEmpty() ? 0 : Stack.back().first.back().AssociatedLoops;
}
- /// \brief Marks current target region as one with closely nested teams
+ /// Marks current target region as one with closely nested teams
/// region.
void setParentTeamsRegionLoc(SourceLocation TeamsRegionLoc) {
if (!isStackEmpty() && Stack.back().first.size() > 1) {
@@ -452,11 +458,11 @@ public:
TeamsRegionLoc;
}
}
- /// \brief Returns true, if current region has closely nested teams region.
+ /// Returns true, if current region has closely nested teams region.
bool hasInnerTeamsRegion() const {
return getInnerTeamsRegionLoc().isValid();
}
- /// \brief Returns location of the nested teams region (if any).
+ /// Returns location of the nested teams region (if any).
SourceLocation getInnerTeamsRegionLoc() const {
return isStackEmpty() ? SourceLocation()
: Stack.back().first.back().InnerTeamsRegionLoc;
@@ -465,10 +471,7 @@ public:
Scope *getCurScope() const {
return isStackEmpty() ? nullptr : Stack.back().first.back().CurScope;
}
- Scope *getCurScope() {
- return isStackEmpty() ? nullptr : Stack.back().first.back().CurScope;
- }
- SourceLocation getConstructLoc() {
+ SourceLocation getConstructLoc() const {
return isStackEmpty() ? SourceLocation()
: Stack.back().first.back().ConstructLoc;
}
@@ -476,10 +479,11 @@ public:
/// Do the check specified in \a Check to all component lists and return true
/// if any issue is found.
bool checkMappableExprComponentListsForDecl(
- ValueDecl *VD, bool CurrentRegionOnly,
+ const ValueDecl *VD, bool CurrentRegionOnly,
const llvm::function_ref<
bool(OMPClauseMappableExprCommon::MappableExprComponentListRef,
- OpenMPClauseKind)> &Check) {
+ OpenMPClauseKind)>
+ Check) const {
if (isStackEmpty())
return false;
auto SI = Stack.back().first.rbegin();
@@ -488,16 +492,16 @@ public:
if (SI == SE)
return false;
- if (CurrentRegionOnly) {
+ if (CurrentRegionOnly)
SE = std::next(SI);
- } else {
- ++SI;
- }
+ else
+ std::advance(SI, 1);
for (; SI != SE; ++SI) {
auto MI = SI->MappedExprComponents.find(VD);
if (MI != SI->MappedExprComponents.end())
- for (auto &L : MI->second.Components)
+ for (OMPClauseMappableExprCommon::MappableExprComponentListRef L :
+ MI->second.Components)
if (Check(L, MI->second.Kind))
return true;
}
@@ -507,10 +511,11 @@ public:
/// Do the check specified in \a Check to all component lists at a given level
/// and return true if any issue is found.
bool checkMappableExprComponentListsForDeclAtLevel(
- ValueDecl *VD, unsigned Level,
+ const ValueDecl *VD, unsigned Level,
const llvm::function_ref<
bool(OMPClauseMappableExprCommon::MappableExprComponentListRef,
- OpenMPClauseKind)> &Check) {
+ OpenMPClauseKind)>
+ Check) const {
if (isStackEmpty())
return false;
@@ -522,7 +527,8 @@ public:
auto MI = StartI->MappedExprComponents.find(VD);
if (MI != StartI->MappedExprComponents.end())
- for (auto &L : MI->second.Components)
+ for (OMPClauseMappableExprCommon::MappableExprComponentListRef L :
+ MI->second.Components)
if (Check(L, MI->second.Kind))
return true;
return false;
@@ -531,12 +537,13 @@ public:
/// Create a new mappable expression component list associated with a given
/// declaration and initialize it with the provided list of components.
void addMappableExpressionComponents(
- ValueDecl *VD,
+ const ValueDecl *VD,
OMPClauseMappableExprCommon::MappableExprComponentListRef Components,
OpenMPClauseKind WhereFoundClauseKind) {
assert(!isStackEmpty() &&
"Not expecting to retrieve components from a empty stack!");
- auto &MEC = Stack.back().first.back().MappedExprComponents[VD];
+ MappedExprComponentTy &MEC =
+ Stack.back().first.back().MappedExprComponents[VD];
// Create new entry and append the new components there.
MEC.Components.resize(MEC.Components.size() + 1);
MEC.Components.back().append(Components.begin(), Components.end());
@@ -547,18 +554,19 @@ public:
assert(!isStackEmpty());
return Stack.back().first.size() - 1;
}
- void addDoacrossDependClause(OMPDependClause *C, OperatorOffsetTy &OpsOffs) {
+ void addDoacrossDependClause(OMPDependClause *C,
+ const OperatorOffsetTy &OpsOffs) {
assert(!isStackEmpty() && Stack.back().first.size() > 1);
- auto &StackElem = *std::next(Stack.back().first.rbegin());
+ SharingMapTy &StackElem = *std::next(Stack.back().first.rbegin());
assert(isOpenMPWorksharingDirective(StackElem.Directive));
- StackElem.DoacrossDepends.insert({C, OpsOffs});
+ StackElem.DoacrossDepends.try_emplace(C, OpsOffs);
}
llvm::iterator_range<DoacrossDependMapTy::const_iterator>
getDoacrossDependClauses() const {
assert(!isStackEmpty());
- auto &StackElem = Stack.back().first.back();
+ const SharingMapTy &StackElem = Stack.back().first.back();
if (isOpenMPWorksharingDirective(StackElem.Directive)) {
- auto &Ref = StackElem.DoacrossDepends;
+ const DoacrossDependMapTy &Ref = StackElem.DoacrossDepends;
return llvm::make_range(Ref.begin(), Ref.end());
}
return llvm::make_range(StackElem.DoacrossDepends.end(),
@@ -569,29 +577,34 @@ bool isParallelOrTaskRegion(OpenMPDirectiveKind DKind) {
return isOpenMPParallelDirective(DKind) || isOpenMPTaskingDirective(DKind) ||
isOpenMPTeamsDirective(DKind) || DKind == OMPD_unknown;
}
+
} // namespace
-static Expr *getExprAsWritten(Expr *E) {
- if (auto *ExprTemp = dyn_cast<ExprWithCleanups>(E))
+static const Expr *getExprAsWritten(const Expr *E) {
+ if (const auto *ExprTemp = dyn_cast<ExprWithCleanups>(E))
E = ExprTemp->getSubExpr();
- if (auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E))
+ if (const auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E))
E = MTE->GetTemporaryExpr();
- while (auto *Binder = dyn_cast<CXXBindTemporaryExpr>(E))
+ while (const auto *Binder = dyn_cast<CXXBindTemporaryExpr>(E))
E = Binder->getSubExpr();
- if (auto *ICE = dyn_cast<ImplicitCastExpr>(E))
+ if (const auto *ICE = dyn_cast<ImplicitCastExpr>(E))
E = ICE->getSubExprAsWritten();
return E->IgnoreParens();
}
-static ValueDecl *getCanonicalDecl(ValueDecl *D) {
- if (auto *CED = dyn_cast<OMPCapturedExprDecl>(D))
- if (auto *ME = dyn_cast<MemberExpr>(getExprAsWritten(CED->getInit())))
+static Expr *getExprAsWritten(Expr *E) {
+ return const_cast<Expr *>(getExprAsWritten(const_cast<const Expr *>(E)));
+}
+
+static const ValueDecl *getCanonicalDecl(const ValueDecl *D) {
+ if (const auto *CED = dyn_cast<OMPCapturedExprDecl>(D))
+ if (const auto *ME = dyn_cast<MemberExpr>(getExprAsWritten(CED->getInit())))
D = ME->getMemberDecl();
- auto *VD = dyn_cast<VarDecl>(D);
- auto *FD = dyn_cast<FieldDecl>(D);
+ const auto *VD = dyn_cast<VarDecl>(D);
+ const auto *FD = dyn_cast<FieldDecl>(D);
if (VD != nullptr) {
VD = VD->getCanonicalDecl();
D = VD;
@@ -603,11 +616,16 @@ static ValueDecl *getCanonicalDecl(ValueDecl *D) {
return D;
}
-DSAStackTy::DSAVarData DSAStackTy::getDSA(StackTy::reverse_iterator &Iter,
- ValueDecl *D) {
+static ValueDecl *getCanonicalDecl(ValueDecl *D) {
+ return const_cast<ValueDecl *>(
+ getCanonicalDecl(const_cast<const ValueDecl *>(D)));
+}
+
+DSAStackTy::DSAVarData DSAStackTy::getDSA(iterator &Iter,
+ ValueDecl *D) const {
D = getCanonicalDecl(D);
auto *VD = dyn_cast<VarDecl>(D);
- auto *FD = dyn_cast<FieldDecl>(D);
+ const auto *FD = dyn_cast<FieldDecl>(D);
DSAVarData DVar;
if (isStackEmpty() || Iter == Stack.back().first.rend()) {
// OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
@@ -615,7 +633,7 @@ DSAStackTy::DSAVarData DSAStackTy::getDSA(StackTy::reverse_iterator &Iter,
// File-scope or namespace-scope variables referenced in called routines
// in the region are shared unless they appear in a threadprivate
// directive.
- if (VD && !VD->isFunctionOrMethodVarDecl() && !isa<ParmVarDecl>(D))
+ if (VD && !VD->isFunctionOrMethodVarDecl() && !isa<ParmVarDecl>(VD))
DVar.CKind = OMPC_shared;
// OpenMP [2.9.1.2, Data-sharing Attribute Rules for Variables Referenced
@@ -646,9 +664,10 @@ DSAStackTy::DSAVarData DSAStackTy::getDSA(StackTy::reverse_iterator &Iter,
// Explicitly specified attributes and local variables with predetermined
// attributes.
if (Iter->SharingMap.count(D)) {
- DVar.RefExpr = Iter->SharingMap[D].RefExpr.getPointer();
- DVar.PrivateCopy = Iter->SharingMap[D].PrivateCopy;
- DVar.CKind = Iter->SharingMap[D].Attributes;
+ const DSAInfo &Data = Iter->SharingMap.lookup(D);
+ DVar.RefExpr = Data.RefExpr.getPointer();
+ DVar.PrivateCopy = Data.PrivateCopy;
+ DVar.CKind = Data.Attributes;
DVar.ImplicitDSALoc = Iter->DefaultAttrLoc;
return DVar;
}
@@ -683,7 +702,7 @@ DSAStackTy::DSAVarData DSAStackTy::getDSA(StackTy::reverse_iterator &Iter,
// bound to the current team is shared.
if (isOpenMPTaskingDirective(DVar.DKind)) {
DSAVarData DVarTemp;
- auto I = Iter, E = Stack.back().first.rend();
+ iterator I = Iter, E = Stack.back().first.rend();
do {
++I;
// OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables
@@ -711,74 +730,75 @@ DSAStackTy::DSAVarData DSAStackTy::getDSA(StackTy::reverse_iterator &Iter,
return getDSA(++Iter, D);
}
-Expr *DSAStackTy::addUniqueAligned(ValueDecl *D, Expr *NewDE) {
+const Expr *DSAStackTy::addUniqueAligned(const ValueDecl *D,
+ const Expr *NewDE) {
assert(!isStackEmpty() && "Data sharing attributes stack is empty");
D = getCanonicalDecl(D);
- auto &StackElem = Stack.back().first.back();
+ SharingMapTy &StackElem = Stack.back().first.back();
auto It = StackElem.AlignedMap.find(D);
if (It == StackElem.AlignedMap.end()) {
assert(NewDE && "Unexpected nullptr expr to be added into aligned map");
StackElem.AlignedMap[D] = NewDE;
return nullptr;
- } else {
- assert(It->second && "Unexpected nullptr expr in the aligned map");
- return It->second;
}
- return nullptr;
+ assert(It->second && "Unexpected nullptr expr in the aligned map");
+ return It->second;
}
-void DSAStackTy::addLoopControlVariable(ValueDecl *D, VarDecl *Capture) {
+void DSAStackTy::addLoopControlVariable(const ValueDecl *D, VarDecl *Capture) {
assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
D = getCanonicalDecl(D);
- auto &StackElem = Stack.back().first.back();
- StackElem.LCVMap.insert(
- {D, LCDeclInfo(StackElem.LCVMap.size() + 1, Capture)});
+ SharingMapTy &StackElem = Stack.back().first.back();
+ StackElem.LCVMap.try_emplace(
+ D, LCDeclInfo(StackElem.LCVMap.size() + 1, Capture));
}
-DSAStackTy::LCDeclInfo DSAStackTy::isLoopControlVariable(ValueDecl *D) {
+const DSAStackTy::LCDeclInfo
+DSAStackTy::isLoopControlVariable(const ValueDecl *D) const {
assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
D = getCanonicalDecl(D);
- auto &StackElem = Stack.back().first.back();
+ const SharingMapTy &StackElem = Stack.back().first.back();
auto It = StackElem.LCVMap.find(D);
if (It != StackElem.LCVMap.end())
return It->second;
return {0, nullptr};
}
-DSAStackTy::LCDeclInfo DSAStackTy::isParentLoopControlVariable(ValueDecl *D) {
+const DSAStackTy::LCDeclInfo
+DSAStackTy::isParentLoopControlVariable(const ValueDecl *D) const {
assert(!isStackEmpty() && Stack.back().first.size() > 1 &&
"Data-sharing attributes stack is empty");
D = getCanonicalDecl(D);
- auto &StackElem = *std::next(Stack.back().first.rbegin());
+ const SharingMapTy &StackElem = *std::next(Stack.back().first.rbegin());
auto It = StackElem.LCVMap.find(D);
if (It != StackElem.LCVMap.end())
return It->second;
return {0, nullptr};
}
-ValueDecl *DSAStackTy::getParentLoopControlVariable(unsigned I) {
+const ValueDecl *DSAStackTy::getParentLoopControlVariable(unsigned I) const {
assert(!isStackEmpty() && Stack.back().first.size() > 1 &&
"Data-sharing attributes stack is empty");
- auto &StackElem = *std::next(Stack.back().first.rbegin());
+ const SharingMapTy &StackElem = *std::next(Stack.back().first.rbegin());
if (StackElem.LCVMap.size() < I)
return nullptr;
- for (auto &Pair : StackElem.LCVMap)
+ for (const auto &Pair : StackElem.LCVMap)
if (Pair.second.first == I)
return Pair.first;
return nullptr;
}
-void DSAStackTy::addDSA(ValueDecl *D, Expr *E, OpenMPClauseKind A,
+void DSAStackTy::addDSA(const ValueDecl *D, const Expr *E, OpenMPClauseKind A,
DeclRefExpr *PrivateCopy) {
D = getCanonicalDecl(D);
if (A == OMPC_threadprivate) {
- auto &Data = Threadprivates[D];
+ DSAInfo &Data = Threadprivates[D];
Data.Attributes = A;
Data.RefExpr.setPointer(E);
Data.PrivateCopy = nullptr;
} else {
assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
- auto &Data = Stack.back().first.back().SharingMap[D];
+ DSAInfo &Data = Stack.back().first.back().SharingMap[D];
assert(Data.Attributes == OMPC_unknown || (A == Data.Attributes) ||
(A == OMPC_firstprivate && Data.Attributes == OMPC_lastprivate) ||
(A == OMPC_lastprivate && Data.Attributes == OMPC_firstprivate) ||
@@ -793,7 +813,8 @@ void DSAStackTy::addDSA(ValueDecl *D, Expr *E, OpenMPClauseKind A,
Data.RefExpr.setPointerAndInt(E, IsLastprivate);
Data.PrivateCopy = PrivateCopy;
if (PrivateCopy) {
- auto &Data = Stack.back().first.back().SharingMap[PrivateCopy->getDecl()];
+ DSAInfo &Data =
+ Stack.back().first.back().SharingMap[PrivateCopy->getDecl()];
Data.Attributes = A;
Data.RefExpr.setPointerAndInt(PrivateCopy, IsLastprivate);
Data.PrivateCopy = nullptr;
@@ -801,13 +822,14 @@ void DSAStackTy::addDSA(ValueDecl *D, Expr *E, OpenMPClauseKind A,
}
}
-/// \brief Build a variable declaration for OpenMP loop iteration variable.
+/// Build a variable declaration for OpenMP loop iteration variable.
static VarDecl *buildVarDecl(Sema &SemaRef, SourceLocation Loc, QualType Type,
- StringRef Name, const AttrVec *Attrs = nullptr) {
+ StringRef Name, const AttrVec *Attrs = nullptr,
+ DeclRefExpr *OrigRef = nullptr) {
DeclContext *DC = SemaRef.CurContext;
IdentifierInfo *II = &SemaRef.PP.getIdentifierTable().get(Name);
TypeSourceInfo *TInfo = SemaRef.Context.getTrivialTypeSourceInfo(Type, Loc);
- VarDecl *Decl =
+ auto *Decl =
VarDecl::Create(SemaRef.Context, DC, Loc, Loc, II, Type, TInfo, SC_None);
if (Attrs) {
for (specific_attr_iterator<AlignedAttr> I(Attrs->begin()), E(Attrs->end());
@@ -815,6 +837,10 @@ static VarDecl *buildVarDecl(Sema &SemaRef, SourceLocation Loc, QualType Type,
Decl->addAttr(*I);
}
Decl->setImplicit();
+ if (OrigRef) {
+ Decl->addAttr(
+ OMPReferencedVarAttr::CreateImplicit(SemaRef.Context, OrigRef));
+ }
return Decl;
}
@@ -828,14 +854,14 @@ static DeclRefExpr *buildDeclRefExpr(Sema &S, VarDecl *D, QualType Ty,
VK_LValue);
}
-void DSAStackTy::addTaskgroupReductionData(ValueDecl *D, SourceRange SR,
+void DSAStackTy::addTaskgroupReductionData(const ValueDecl *D, SourceRange SR,
BinaryOperatorKind BOK) {
D = getCanonicalDecl(D);
assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
assert(
Stack.back().first.back().SharingMap[D].Attributes == OMPC_reduction &&
"Additional reduction info may be specified only for reduction items.");
- auto &ReductionData = Stack.back().first.back().ReductionMap[D];
+ ReductionData &ReductionData = Stack.back().first.back().ReductionMap[D];
assert(ReductionData.ReductionRange.isInvalid() &&
Stack.back().first.back().Directive == OMPD_taskgroup &&
"Additional reduction info may be specified only once for reduction "
@@ -844,21 +870,21 @@ void DSAStackTy::addTaskgroupReductionData(ValueDecl *D, SourceRange SR,
Expr *&TaskgroupReductionRef =
Stack.back().first.back().TaskgroupReductionRef;
if (!TaskgroupReductionRef) {
- auto *VD = buildVarDecl(SemaRef, SR.getBegin(),
- SemaRef.Context.VoidPtrTy, ".task_red.");
+ VarDecl *VD = buildVarDecl(SemaRef, SR.getBegin(),
+ SemaRef.Context.VoidPtrTy, ".task_red.");
TaskgroupReductionRef =
buildDeclRefExpr(SemaRef, VD, SemaRef.Context.VoidPtrTy, SR.getBegin());
}
}
-void DSAStackTy::addTaskgroupReductionData(ValueDecl *D, SourceRange SR,
+void DSAStackTy::addTaskgroupReductionData(const ValueDecl *D, SourceRange SR,
const Expr *ReductionRef) {
D = getCanonicalDecl(D);
assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
assert(
Stack.back().first.back().SharingMap[D].Attributes == OMPC_reduction &&
"Additional reduction info may be specified only for reduction items.");
- auto &ReductionData = Stack.back().first.back().ReductionMap[D];
+ ReductionData &ReductionData = Stack.back().first.back().ReductionMap[D];
assert(ReductionData.ReductionRange.isInvalid() &&
Stack.back().first.back().Directive == OMPD_taskgroup &&
"Additional reduction info may be specified only once for reduction "
@@ -867,28 +893,27 @@ void DSAStackTy::addTaskgroupReductionData(ValueDecl *D, SourceRange SR,
Expr *&TaskgroupReductionRef =
Stack.back().first.back().TaskgroupReductionRef;
if (!TaskgroupReductionRef) {
- auto *VD = buildVarDecl(SemaRef, SR.getBegin(), SemaRef.Context.VoidPtrTy,
- ".task_red.");
+ VarDecl *VD = buildVarDecl(SemaRef, SR.getBegin(),
+ SemaRef.Context.VoidPtrTy, ".task_red.");
TaskgroupReductionRef =
buildDeclRefExpr(SemaRef, VD, SemaRef.Context.VoidPtrTy, SR.getBegin());
}
}
-DSAStackTy::DSAVarData
-DSAStackTy::getTopMostTaskgroupReductionData(ValueDecl *D, SourceRange &SR,
- BinaryOperatorKind &BOK,
- Expr *&TaskgroupDescriptor) {
+const DSAStackTy::DSAVarData DSAStackTy::getTopMostTaskgroupReductionData(
+ const ValueDecl *D, SourceRange &SR, BinaryOperatorKind &BOK,
+ Expr *&TaskgroupDescriptor) const {
D = getCanonicalDecl(D);
assert(!isStackEmpty() && "Data-sharing attributes stack is empty.");
if (Stack.back().first.empty())
return DSAVarData();
- for (auto I = std::next(Stack.back().first.rbegin(), 1),
- E = Stack.back().first.rend();
+ for (iterator I = std::next(Stack.back().first.rbegin(), 1),
+ E = Stack.back().first.rend();
I != E; std::advance(I, 1)) {
- auto &Data = I->SharingMap[D];
+ const DSAInfo &Data = I->SharingMap.lookup(D);
if (Data.Attributes != OMPC_reduction || I->Directive != OMPD_taskgroup)
continue;
- auto &ReductionData = I->ReductionMap[D];
+ const ReductionData &ReductionData = I->ReductionMap.lookup(D);
if (!ReductionData.ReductionOp ||
ReductionData.ReductionOp.is<const Expr *>())
return DSAVarData();
@@ -904,21 +929,20 @@ DSAStackTy::getTopMostTaskgroupReductionData(ValueDecl *D, SourceRange &SR,
return DSAVarData();
}
-DSAStackTy::DSAVarData
-DSAStackTy::getTopMostTaskgroupReductionData(ValueDecl *D, SourceRange &SR,
- const Expr *&ReductionRef,
- Expr *&TaskgroupDescriptor) {
+const DSAStackTy::DSAVarData DSAStackTy::getTopMostTaskgroupReductionData(
+ const ValueDecl *D, SourceRange &SR, const Expr *&ReductionRef,
+ Expr *&TaskgroupDescriptor) const {
D = getCanonicalDecl(D);
assert(!isStackEmpty() && "Data-sharing attributes stack is empty.");
if (Stack.back().first.empty())
return DSAVarData();
- for (auto I = std::next(Stack.back().first.rbegin(), 1),
- E = Stack.back().first.rend();
+ for (iterator I = std::next(Stack.back().first.rbegin(), 1),
+ E = Stack.back().first.rend();
I != E; std::advance(I, 1)) {
- auto &Data = I->SharingMap[D];
+ const DSAInfo &Data = I->SharingMap.lookup(D);
if (Data.Attributes != OMPC_reduction || I->Directive != OMPD_taskgroup)
continue;
- auto &ReductionData = I->ReductionMap[D];
+ const ReductionData &ReductionData = I->ReductionMap.lookup(D);
if (!ReductionData.ReductionOp ||
!ReductionData.ReductionOp.is<const Expr *>())
return DSAVarData();
@@ -934,12 +958,13 @@ DSAStackTy::getTopMostTaskgroupReductionData(ValueDecl *D, SourceRange &SR,
return DSAVarData();
}
-bool DSAStackTy::isOpenMPLocal(VarDecl *D, StackTy::reverse_iterator Iter) {
+bool DSAStackTy::isOpenMPLocal(VarDecl *D, iterator Iter) const {
D = D->getCanonicalDecl();
- if (!isStackEmpty() && Stack.back().first.size() > 1) {
- reverse_iterator I = Iter, E = Stack.back().first.rend();
+ if (!isStackEmpty()) {
+ iterator I = Iter, E = Stack.back().first.rend();
Scope *TopScope = nullptr;
- while (I != E && !isParallelOrTaskRegion(I->Directive))
+ while (I != E && !isParallelOrTaskRegion(I->Directive) &&
+ !isOpenMPTargetExecutionDirective(I->Directive))
++I;
if (I == E)
return false;
@@ -952,35 +977,80 @@ bool DSAStackTy::isOpenMPLocal(VarDecl *D, StackTy::reverse_iterator Iter) {
return false;
}
-DSAStackTy::DSAVarData DSAStackTy::getTopDSA(ValueDecl *D, bool FromParent) {
+const DSAStackTy::DSAVarData DSAStackTy::getTopDSA(ValueDecl *D,
+ bool FromParent) {
D = getCanonicalDecl(D);
DSAVarData DVar;
+ auto *VD = dyn_cast<VarDecl>(D);
+ auto TI = Threadprivates.find(D);
+ if (TI != Threadprivates.end()) {
+ DVar.RefExpr = TI->getSecond().RefExpr.getPointer();
+ DVar.CKind = OMPC_threadprivate;
+ return DVar;
+ }
+ if (VD && VD->hasAttr<OMPThreadPrivateDeclAttr>()) {
+ DVar.RefExpr = buildDeclRefExpr(
+ SemaRef, VD, D->getType().getNonReferenceType(),
+ VD->getAttr<OMPThreadPrivateDeclAttr>()->getLocation());
+ DVar.CKind = OMPC_threadprivate;
+ addDSA(D, DVar.RefExpr, OMPC_threadprivate);
+ return DVar;
+ }
// OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
// in a Construct, C/C++, predetermined, p.1]
// Variables appearing in threadprivate directives are threadprivate.
- auto *VD = dyn_cast<VarDecl>(D);
if ((VD && VD->getTLSKind() != VarDecl::TLS_None &&
!(VD->hasAttr<OMPThreadPrivateDeclAttr>() &&
SemaRef.getLangOpts().OpenMPUseTLS &&
SemaRef.getASTContext().getTargetInfo().isTLSSupported())) ||
(VD && VD->getStorageClass() == SC_Register &&
VD->hasAttr<AsmLabelAttr>() && !VD->isLocalVarDecl())) {
- addDSA(D, buildDeclRefExpr(SemaRef, VD, D->getType().getNonReferenceType(),
- D->getLocation()),
- OMPC_threadprivate);
- }
- auto TI = Threadprivates.find(D);
- if (TI != Threadprivates.end()) {
- DVar.RefExpr = TI->getSecond().RefExpr.getPointer();
- DVar.CKind = OMPC_threadprivate;
- return DVar;
- } else if (VD && VD->hasAttr<OMPThreadPrivateDeclAttr>()) {
DVar.RefExpr = buildDeclRefExpr(
- SemaRef, VD, D->getType().getNonReferenceType(),
- VD->getAttr<OMPThreadPrivateDeclAttr>()->getLocation());
+ SemaRef, VD, D->getType().getNonReferenceType(), D->getLocation());
DVar.CKind = OMPC_threadprivate;
addDSA(D, DVar.RefExpr, OMPC_threadprivate);
+ return DVar;
+ }
+ if (SemaRef.getLangOpts().OpenMPCUDAMode && VD &&
+ VD->isLocalVarDeclOrParm() && !isStackEmpty() &&
+ !isLoopControlVariable(D).first) {
+ iterator IterTarget =
+ std::find_if(Stack.back().first.rbegin(), Stack.back().first.rend(),
+ [](const SharingMapTy &Data) {
+ return isOpenMPTargetExecutionDirective(Data.Directive);
+ });
+ if (IterTarget != Stack.back().first.rend()) {
+ iterator ParentIterTarget = std::next(IterTarget, 1);
+ for (iterator Iter = Stack.back().first.rbegin();
+ Iter != ParentIterTarget; std::advance(Iter, 1)) {
+ if (isOpenMPLocal(VD, Iter)) {
+ DVar.RefExpr =
+ buildDeclRefExpr(SemaRef, VD, D->getType().getNonReferenceType(),
+ D->getLocation());
+ DVar.CKind = OMPC_threadprivate;
+ return DVar;
+ }
+ }
+ if (!isClauseParsingMode() || IterTarget != Stack.back().first.rbegin()) {
+ auto DSAIter = IterTarget->SharingMap.find(D);
+ if (DSAIter != IterTarget->SharingMap.end() &&
+ isOpenMPPrivate(DSAIter->getSecond().Attributes)) {
+ DVar.RefExpr = DSAIter->getSecond().RefExpr.getPointer();
+ DVar.CKind = OMPC_threadprivate;
+ return DVar;
+ }
+ iterator End = Stack.back().first.rend();
+ if (!SemaRef.isOpenMPCapturedByRef(
+ D, std::distance(ParentIterTarget, End))) {
+ DVar.RefExpr =
+ buildDeclRefExpr(SemaRef, VD, D->getType().getNonReferenceType(),
+ IterTarget->ConstructLoc);
+ DVar.CKind = OMPC_threadprivate;
+ return DVar;
+ }
+ }
+ }
}
if (isStackEmpty())
@@ -994,7 +1064,7 @@ DSAStackTy::DSAVarData DSAStackTy::getTopDSA(ValueDecl *D, bool FromParent) {
// in a Construct, C/C++, predetermined, p.7]
// Variables with static storage duration that are declared in a scope
// inside the construct are shared.
- auto &&MatchesAlways = [](OpenMPDirectiveKind) -> bool { return true; };
+ auto &&MatchesAlways = [](OpenMPDirectiveKind) { return true; };
if (VD && VD->isStaticDataMember()) {
DSAVarData DVarTemp = hasDSA(D, isOpenMPPrivate, MatchesAlways, FromParent);
if (DVarTemp.CKind != OMPC_unknown && DVarTemp.RefExpr)
@@ -1011,21 +1081,21 @@ DSAStackTy::DSAVarData DSAStackTy::getTopDSA(ValueDecl *D, bool FromParent) {
// in a Construct, C/C++, predetermined, p.6]
// Variables with const qualified type having no mutable member are
// shared.
- CXXRecordDecl *RD =
+ const CXXRecordDecl *RD =
SemaRef.getLangOpts().CPlusPlus ? Type->getAsCXXRecordDecl() : nullptr;
- if (auto *CTSD = dyn_cast_or_null<ClassTemplateSpecializationDecl>(RD))
- if (auto *CTD = CTSD->getSpecializedTemplate())
+ if (const auto *CTSD = dyn_cast_or_null<ClassTemplateSpecializationDecl>(RD))
+ if (const ClassTemplateDecl *CTD = CTSD->getSpecializedTemplate())
RD = CTD->getTemplatedDecl();
if (IsConstant &&
!(SemaRef.getLangOpts().CPlusPlus && RD && RD->hasDefinition() &&
RD->hasMutableFields())) {
// Variables with const-qualified type having no mutable member may be
// listed in a firstprivate clause, even if they are static data members.
- DSAVarData DVarTemp = hasDSA(
- D, [](OpenMPClauseKind C) -> bool { return C == OMPC_firstprivate; },
- MatchesAlways, FromParent);
+ DSAVarData DVarTemp =
+ hasDSA(D, [](OpenMPClauseKind C) { return C == OMPC_firstprivate; },
+ MatchesAlways, FromParent);
if (DVarTemp.CKind == OMPC_firstprivate && DVarTemp.RefExpr)
- return DVar;
+ return DVarTemp;
DVar.CKind = OMPC_shared;
return DVar;
@@ -1033,14 +1103,16 @@ DSAStackTy::DSAVarData DSAStackTy::getTopDSA(ValueDecl *D, bool FromParent) {
// Explicitly specified attributes and local variables with predetermined
// attributes.
- auto I = Stack.back().first.rbegin();
- auto EndI = Stack.back().first.rend();
+ iterator I = Stack.back().first.rbegin();
+ iterator EndI = Stack.back().first.rend();
if (FromParent && I != EndI)
std::advance(I, 1);
- if (I->SharingMap.count(D)) {
- DVar.RefExpr = I->SharingMap[D].RefExpr.getPointer();
- DVar.PrivateCopy = I->SharingMap[D].PrivateCopy;
- DVar.CKind = I->SharingMap[D].Attributes;
+ auto It = I->SharingMap.find(D);
+ if (It != I->SharingMap.end()) {
+ const DSAInfo &Data = It->getSecond();
+ DVar.RefExpr = Data.RefExpr.getPointer();
+ DVar.PrivateCopy = Data.PrivateCopy;
+ DVar.CKind = Data.Attributes;
DVar.ImplicitDSALoc = I->DefaultAttrLoc;
DVar.DKind = I->Directive;
}
@@ -1048,36 +1120,36 @@ DSAStackTy::DSAVarData DSAStackTy::getTopDSA(ValueDecl *D, bool FromParent) {
return DVar;
}
-DSAStackTy::DSAVarData DSAStackTy::getImplicitDSA(ValueDecl *D,
- bool FromParent) {
+const DSAStackTy::DSAVarData DSAStackTy::getImplicitDSA(ValueDecl *D,
+ bool FromParent) const {
if (isStackEmpty()) {
- StackTy::reverse_iterator I;
+ iterator I;
return getDSA(I, D);
}
D = getCanonicalDecl(D);
- auto StartI = Stack.back().first.rbegin();
- auto EndI = Stack.back().first.rend();
+ iterator StartI = Stack.back().first.rbegin();
+ iterator EndI = Stack.back().first.rend();
if (FromParent && StartI != EndI)
std::advance(StartI, 1);
return getDSA(StartI, D);
}
-DSAStackTy::DSAVarData
+const DSAStackTy::DSAVarData
DSAStackTy::hasDSA(ValueDecl *D,
- const llvm::function_ref<bool(OpenMPClauseKind)> &CPred,
- const llvm::function_ref<bool(OpenMPDirectiveKind)> &DPred,
- bool FromParent) {
+ const llvm::function_ref<bool(OpenMPClauseKind)> CPred,
+ const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
+ bool FromParent) const {
if (isStackEmpty())
return {};
D = getCanonicalDecl(D);
- auto I = Stack.back().first.rbegin();
- auto EndI = Stack.back().first.rend();
+ iterator I = Stack.back().first.rbegin();
+ iterator EndI = Stack.back().first.rend();
if (FromParent && I != EndI)
std::advance(I, 1);
for (; I != EndI; std::advance(I, 1)) {
if (!DPred(I->Directive) && !isParallelOrTaskRegion(I->Directive))
continue;
- auto NewI = I;
+ iterator NewI = I;
DSAVarData DVar = getDSA(NewI, D);
if (I == NewI && CPred(DVar.CKind))
return DVar;
@@ -1085,27 +1157,27 @@ DSAStackTy::hasDSA(ValueDecl *D,
return {};
}
-DSAStackTy::DSAVarData DSAStackTy::hasInnermostDSA(
- ValueDecl *D, const llvm::function_ref<bool(OpenMPClauseKind)> &CPred,
- const llvm::function_ref<bool(OpenMPDirectiveKind)> &DPred,
- bool FromParent) {
+const DSAStackTy::DSAVarData DSAStackTy::hasInnermostDSA(
+ ValueDecl *D, const llvm::function_ref<bool(OpenMPClauseKind)> CPred,
+ const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
+ bool FromParent) const {
if (isStackEmpty())
return {};
D = getCanonicalDecl(D);
- auto StartI = Stack.back().first.rbegin();
- auto EndI = Stack.back().first.rend();
+ iterator StartI = Stack.back().first.rbegin();
+ iterator EndI = Stack.back().first.rend();
if (FromParent && StartI != EndI)
std::advance(StartI, 1);
if (StartI == EndI || !DPred(StartI->Directive))
return {};
- auto NewI = StartI;
+ iterator NewI = StartI;
DSAVarData DVar = getDSA(NewI, D);
return (NewI == StartI && CPred(DVar.CKind)) ? DVar : DSAVarData();
}
bool DSAStackTy::hasExplicitDSA(
- ValueDecl *D, const llvm::function_ref<bool(OpenMPClauseKind)> &CPred,
- unsigned Level, bool NotLastprivate) {
+ const ValueDecl *D, const llvm::function_ref<bool(OpenMPClauseKind)> CPred,
+ unsigned Level, bool NotLastprivate) const {
if (isStackEmpty())
return false;
D = getCanonicalDecl(D);
@@ -1114,15 +1186,16 @@ bool DSAStackTy::hasExplicitDSA(
if (std::distance(StartI, EndI) <= (int)Level)
return false;
std::advance(StartI, Level);
- return (StartI->SharingMap.count(D) > 0) &&
- StartI->SharingMap[D].RefExpr.getPointer() &&
- CPred(StartI->SharingMap[D].Attributes) &&
- (!NotLastprivate || !StartI->SharingMap[D].RefExpr.getInt());
+ auto I = StartI->SharingMap.find(D);
+ return (I != StartI->SharingMap.end()) &&
+ I->getSecond().RefExpr.getPointer() &&
+ CPred(I->getSecond().Attributes) &&
+ (!NotLastprivate || !I->getSecond().RefExpr.getInt());
}
bool DSAStackTy::hasExplicitDirective(
- const llvm::function_ref<bool(OpenMPDirectiveKind)> &DPred,
- unsigned Level) {
+ const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
+ unsigned Level) const {
if (isStackEmpty())
return false;
auto StartI = Stack.back().first.begin();
@@ -1136,8 +1209,8 @@ bool DSAStackTy::hasExplicitDirective(
bool DSAStackTy::hasDirective(
const llvm::function_ref<bool(OpenMPDirectiveKind,
const DeclarationNameInfo &, SourceLocation)>
- &DPred,
- bool FromParent) {
+ DPred,
+ bool FromParent) const {
// We look only in the enclosing region.
if (isStackEmpty())
return false;
@@ -1166,15 +1239,26 @@ void Sema::popOpenMPFunctionRegion(const FunctionScopeInfo *OldFSI) {
DSAStack->popFunction(OldFSI);
}
-bool Sema::IsOpenMPCapturedByRef(ValueDecl *D, unsigned Level) {
+static llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy>
+isDeclareTargetDeclaration(const ValueDecl *VD) {
+ for (const Decl *D : VD->redecls()) {
+ if (!D->hasAttrs())
+ continue;
+ if (const auto *Attr = D->getAttr<OMPDeclareTargetDeclAttr>())
+ return Attr->getMapType();
+ }
+ return llvm::None;
+}
+
+bool Sema::isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level) const {
assert(LangOpts.OpenMP && "OpenMP is not allowed");
- auto &Ctx = getASTContext();
+ ASTContext &Ctx = getASTContext();
bool IsByRef = true;
// Find the directive that is associated with the provided scope.
D = cast<ValueDecl>(D->getCanonicalDecl());
- auto Ty = D->getType();
+ QualType Ty = D->getType();
if (DSAStack->hasExplicitDirective(isOpenMPTargetExecutionDirective, Level)) {
// This table summarizes how a given variable should be passed to the device
@@ -1241,7 +1325,9 @@ bool Sema::IsOpenMPCapturedByRef(ValueDecl *D, unsigned Level) {
bool IsVariableAssociatedWithSection = false;
DSAStack->checkMappableExprComponentListsForDeclAtLevel(
- D, Level, [&](OMPClauseMappableExprCommon::MappableExprComponentListRef
+ D, Level,
+ [&IsVariableUsedInMapClause, &IsVariableAssociatedWithSection, D](
+ OMPClauseMappableExprCommon::MappableExprComponentListRef
MapExprComponents,
OpenMPClauseKind WhereFoundClauseKind) {
// Only the map clause information influences how a variable is
@@ -1331,20 +1417,22 @@ bool Sema::isInOpenMPTargetExecutionDirective() const {
false);
}
-VarDecl *Sema::IsOpenMPCapturedDecl(ValueDecl *D) {
+VarDecl *Sema::isOpenMPCapturedDecl(ValueDecl *D) const {
assert(LangOpts.OpenMP && "OpenMP is not allowed");
D = getCanonicalDecl(D);
// If we are attempting to capture a global variable in a directive with
// 'target' we return true so that this global is also mapped to the device.
//
- // FIXME: If the declaration is enclosed in a 'declare target' directive,
- // then it should not be captured. Therefore, an extra check has to be
- // inserted here once support for 'declare target' is added.
- //
auto *VD = dyn_cast<VarDecl>(D);
- if (VD && !VD->hasLocalStorage() && isInOpenMPTargetExecutionDirective())
+ if (VD && !VD->hasLocalStorage() && isInOpenMPTargetExecutionDirective()) {
+ // If the declaration is enclosed in a 'declare target' directive,
+ // then it should not be captured.
+ //
+ if (isDeclareTargetDeclaration(VD))
+ return nullptr;
return VD;
+ }
if (DSAStack->getCurrentDirective() != OMPD_unknown &&
(!DSAStack->isClauseParsingMode() ||
@@ -1355,12 +1443,13 @@ VarDecl *Sema::IsOpenMPCapturedDecl(ValueDecl *D) {
isParallelOrTaskRegion(DSAStack->getCurrentDirective())) ||
(VD && DSAStack->isForceVarCapturing()))
return VD ? VD : Info.second;
- auto DVarPrivate = DSAStack->getTopDSA(D, DSAStack->isClauseParsingMode());
+ DSAStackTy::DSAVarData DVarPrivate =
+ DSAStack->getTopDSA(D, DSAStack->isClauseParsingMode());
if (DVarPrivate.CKind != OMPC_unknown && isOpenMPPrivate(DVarPrivate.CKind))
return VD ? VD : cast<VarDecl>(DVarPrivate.PrivateCopy->getDecl());
- DVarPrivate = DSAStack->hasDSA(
- D, isOpenMPPrivate, [](OpenMPDirectiveKind) -> bool { return true; },
- DSAStack->isClauseParsingMode());
+ DVarPrivate = DSAStack->hasDSA(D, isOpenMPPrivate,
+ [](OpenMPDirectiveKind) { return true; },
+ DSAStack->isClauseParsingMode());
if (DVarPrivate.CKind != OMPC_unknown)
return VD ? VD : cast<VarDecl>(DVarPrivate.PrivateCopy->getDecl());
}
@@ -1374,11 +1463,10 @@ void Sema::adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex,
FunctionScopesIndex -= Regions.size();
}
-bool Sema::isOpenMPPrivateDecl(ValueDecl *D, unsigned Level) {
+bool Sema::isOpenMPPrivateDecl(const ValueDecl *D, unsigned Level) const {
assert(LangOpts.OpenMP && "OpenMP is not allowed");
return DSAStack->hasExplicitDSA(
- D, [](OpenMPClauseKind K) -> bool { return K == OMPC_private; },
- Level) ||
+ D, [](OpenMPClauseKind K) { return K == OMPC_private; }, Level) ||
(DSAStack->isClauseParsingMode() &&
DSAStack->getClauseParsingMode() == OMPC_private) ||
// Consider taskgroup reduction descriptor variable a private to avoid
@@ -1389,7 +1477,8 @@ bool Sema::isOpenMPPrivateDecl(ValueDecl *D, unsigned Level) {
DSAStack->isTaskgroupReductionRef(D, Level));
}
-void Sema::setOpenMPCaptureKind(FieldDecl *FD, ValueDecl *D, unsigned Level) {
+void Sema::setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D,
+ unsigned Level) {
assert(LangOpts.OpenMP && "OpenMP is not allowed");
D = getCanonicalDecl(D);
OpenMPClauseKind OMPC = OMPC_unknown;
@@ -1414,7 +1503,11 @@ void Sema::setOpenMPCaptureKind(FieldDecl *FD, ValueDecl *D, unsigned Level) {
}
if (DSAStack->hasExplicitDirective(isOpenMPTargetExecutionDirective,
NewLevel)) {
- OMPC = OMPC_firstprivate;
+ OMPC = OMPC_map;
+ if (D->getType()->isScalarType() &&
+ DSAStack->getDefaultDMAAtLevel(NewLevel) !=
+ DefaultMapAttributes::DMA_tofrom_scalar)
+ OMPC = OMPC_firstprivate;
break;
}
}
@@ -1422,11 +1515,12 @@ void Sema::setOpenMPCaptureKind(FieldDecl *FD, ValueDecl *D, unsigned Level) {
FD->addAttr(OMPCaptureKindAttr::CreateImplicit(Context, OMPC));
}
-bool Sema::isOpenMPTargetCapturedDecl(ValueDecl *D, unsigned Level) {
+bool Sema::isOpenMPTargetCapturedDecl(const ValueDecl *D,
+ unsigned Level) const {
assert(LangOpts.OpenMP && "OpenMP is not allowed");
// Return true if the current level is no longer enclosed in a target region.
- auto *VD = dyn_cast<VarDecl>(D);
+ const auto *VD = dyn_cast<VarDecl>(D);
return VD && !VD->hasLocalStorage() &&
DSAStack->hasExplicitDirective(isOpenMPTargetExecutionDirective,
Level);
@@ -1456,28 +1550,29 @@ void Sema::EndOpenMPDSABlock(Stmt *CurDirective) {
// clause requires an accessible, unambiguous default constructor for the
// class type, unless the list item is also specified in a firstprivate
// clause.
- if (auto *D = dyn_cast_or_null<OMPExecutableDirective>(CurDirective)) {
- for (auto *C : D->clauses()) {
+ if (const auto *D = dyn_cast_or_null<OMPExecutableDirective>(CurDirective)) {
+ for (OMPClause *C : D->clauses()) {
if (auto *Clause = dyn_cast<OMPLastprivateClause>(C)) {
SmallVector<Expr *, 8> PrivateCopies;
- for (auto *DE : Clause->varlists()) {
+ for (Expr *DE : Clause->varlists()) {
if (DE->isValueDependent() || DE->isTypeDependent()) {
PrivateCopies.push_back(nullptr);
continue;
}
auto *DRE = cast<DeclRefExpr>(DE->IgnoreParens());
- VarDecl *VD = cast<VarDecl>(DRE->getDecl());
+ auto *VD = cast<VarDecl>(DRE->getDecl());
QualType Type = VD->getType().getNonReferenceType();
- auto DVar = DSAStack->getTopDSA(VD, false);
+ const DSAStackTy::DSAVarData DVar =
+ DSAStack->getTopDSA(VD, /*FromParent=*/false);
if (DVar.CKind == OMPC_lastprivate) {
// Generate helper private variable and initialize it with the
// default value. The address of the original variable is replaced
// by the address of the new private variable in CodeGen. This new
// variable is not added to IdResolver, so the code in the OpenMP
// region uses original variable for proper diagnostics.
- auto *VDPrivate = buildVarDecl(
+ VarDecl *VDPrivate = buildVarDecl(
*this, DE->getExprLoc(), Type.getUnqualifiedType(),
- VD->getName(), VD->hasAttrs() ? &VD->getAttrs() : nullptr);
+ VD->getName(), VD->hasAttrs() ? &VD->getAttrs() : nullptr, DRE);
ActOnUninitializedDecl(VDPrivate);
if (VDPrivate->isInvalidDecl())
continue;
@@ -1507,7 +1602,7 @@ static bool FinishOpenMPLinearClause(OMPLinearClause &Clause, DeclRefExpr *IV,
namespace {
-class VarDeclFilterCCC : public CorrectionCandidateCallback {
+class VarDeclFilterCCC final : public CorrectionCandidateCallback {
private:
Sema &SemaRef;
@@ -1515,7 +1610,7 @@ public:
explicit VarDeclFilterCCC(Sema &S) : SemaRef(S) {}
bool ValidateCandidate(const TypoCorrection &Candidate) override {
NamedDecl *ND = Candidate.getCorrectionDecl();
- if (auto *VD = dyn_cast_or_null<VarDecl>(ND)) {
+ if (const auto *VD = dyn_cast_or_null<VarDecl>(ND)) {
return VD->hasGlobalStorage() &&
SemaRef.isDeclInScope(ND, SemaRef.getCurLexicalContext(),
SemaRef.getCurScope());
@@ -1524,7 +1619,7 @@ public:
}
};
-class VarOrFuncDeclFilterCCC : public CorrectionCandidateCallback {
+class VarOrFuncDeclFilterCCC final : public CorrectionCandidateCallback {
private:
Sema &SemaRef;
@@ -1568,12 +1663,10 @@ ExprResult Sema::ActOnOpenMPIdExpression(Scope *CurScope,
<< Id.getName();
return ExprError();
}
- } else {
- if (!(VD = Lookup.getAsSingle<VarDecl>())) {
- Diag(Id.getLoc(), diag::err_omp_expected_var_arg) << Id.getName();
- Diag(Lookup.getFoundDecl()->getLocation(), diag::note_declared_at);
- return ExprError();
- }
+ } else if (!(VD = Lookup.getAsSingle<VarDecl>())) {
+ Diag(Id.getLoc(), diag::err_omp_expected_var_arg) << Id.getName();
+ Diag(Lookup.getFoundDecl()->getLocation(), diag::note_declared_at);
+ return ExprError();
}
Lookup.suppressDiagnostics();
@@ -1591,7 +1684,7 @@ ExprResult Sema::ActOnOpenMPIdExpression(Scope *CurScope,
}
VarDecl *CanonicalVD = VD->getCanonicalDecl();
- NamedDecl *ND = cast<NamedDecl>(CanonicalVD);
+ NamedDecl *ND = CanonicalVD;
// OpenMP [2.9.2, Restrictions, C/C++, p.2]
// A threadprivate directive for file-scope variables must appear outside
// any definition or declaration.
@@ -1679,12 +1772,13 @@ Sema::ActOnOpenMPThreadprivateDirective(SourceLocation Loc,
}
namespace {
-class LocalVarRefChecker : public ConstStmtVisitor<LocalVarRefChecker, bool> {
+class LocalVarRefChecker final
+ : public ConstStmtVisitor<LocalVarRefChecker, bool> {
Sema &SemaRef;
public:
bool VisitDeclRefExpr(const DeclRefExpr *E) {
- if (auto *VD = dyn_cast<VarDecl>(E->getDecl())) {
+ if (const auto *VD = dyn_cast<VarDecl>(E->getDecl())) {
if (VD->hasLocalStorage()) {
SemaRef.Diag(E->getLocStart(),
diag::err_omp_local_var_in_threadprivate_init)
@@ -1697,7 +1791,7 @@ public:
return false;
}
bool VisitStmt(const Stmt *S) {
- for (auto Child : S->children()) {
+ for (const Stmt *Child : S->children()) {
if (Child && Visit(Child))
return true;
}
@@ -1710,9 +1804,9 @@ public:
OMPThreadPrivateDecl *
Sema::CheckOMPThreadPrivateDecl(SourceLocation Loc, ArrayRef<Expr *> VarList) {
SmallVector<Expr *, 8> Vars;
- for (auto &RefExpr : VarList) {
- DeclRefExpr *DE = cast<DeclRefExpr>(RefExpr);
- VarDecl *VD = cast<VarDecl>(DE->getDecl());
+ for (Expr *RefExpr : VarList) {
+ auto *DE = cast<DeclRefExpr>(RefExpr);
+ auto *VD = cast<VarDecl>(DE->getDecl());
SourceLocation ILoc = DE->getExprLoc();
// Mark variable as used.
@@ -1766,7 +1860,7 @@ Sema::CheckOMPThreadPrivateDecl(SourceLocation Loc, ArrayRef<Expr *> VarList) {
// Check if initial value of threadprivate variable reference variable with
// local storage (it is not supported by runtime).
- if (auto Init = VD->getAnyInitializer()) {
+ if (const Expr *Init = VD->getAnyInitializer()) {
LocalVarRefChecker Checker(*this);
if (Checker.Visit(Init))
continue;
@@ -1776,7 +1870,7 @@ Sema::CheckOMPThreadPrivateDecl(SourceLocation Loc, ArrayRef<Expr *> VarList) {
DSAStack->addDSA(VD, DE, OMPC_threadprivate);
VD->addAttr(OMPThreadPrivateDeclAttr::CreateImplicit(
Context, SourceRange(Loc, Loc)));
- if (auto *ML = Context.getASTMutationListener())
+ if (ASTMutationListener *ML = Context.getASTMutationListener())
ML->DeclarationMarkedOpenMPThreadPrivate(VD);
}
OMPThreadPrivateDecl *D = nullptr;
@@ -1788,8 +1882,9 @@ Sema::CheckOMPThreadPrivateDecl(SourceLocation Loc, ArrayRef<Expr *> VarList) {
return D;
}
-static void ReportOriginalDSA(Sema &SemaRef, DSAStackTy *Stack,
- const ValueDecl *D, DSAStackTy::DSAVarData DVar,
+static void reportOriginalDsa(Sema &SemaRef, const DSAStackTy *Stack,
+ const ValueDecl *D,
+ const DSAStackTy::DSAVarData &DVar,
bool IsLoopIterVar = false) {
if (DVar.RefExpr) {
SemaRef.Diag(DVar.RefExpr->getExprLoc(), diag::note_omp_explicit_dsa)
@@ -1845,15 +1940,15 @@ static void ReportOriginalDSA(Sema &SemaRef, DSAStackTy *Stack,
}
namespace {
-class DSAAttrChecker : public StmtVisitor<DSAAttrChecker, void> {
+class DSAAttrChecker final : public StmtVisitor<DSAAttrChecker, void> {
DSAStackTy *Stack;
Sema &SemaRef;
- bool ErrorFound;
- CapturedStmt *CS;
- llvm::SmallVector<Expr *, 8> ImplicitFirstprivate;
- llvm::SmallVector<Expr *, 8> ImplicitMap;
- llvm::DenseMap<ValueDecl *, Expr *> VarsWithInheritedDSA;
- llvm::DenseSet<ValueDecl *> ImplicitDeclarations;
+ bool ErrorFound = false;
+ CapturedStmt *CS = nullptr;
+ llvm::SmallVector<Expr *, 4> ImplicitFirstprivate;
+ llvm::SmallVector<Expr *, 4> ImplicitMap;
+ Sema::VarsWithInheritedDSAType VarsWithInheritedDSA;
+ llvm::SmallDenseSet<const ValueDecl *, 4> ImplicitDeclarations;
public:
void VisitDeclRefExpr(DeclRefExpr *E) {
@@ -1866,17 +1961,20 @@ public:
if (VD->hasLocalStorage() && !CS->capturesVariable(VD))
return;
- auto DVar = Stack->getTopDSA(VD, false);
+ DSAStackTy::DSAVarData DVar = Stack->getTopDSA(VD, /*FromParent=*/false);
// Check if the variable has explicit DSA set and stop analysis if it so.
if (DVar.RefExpr || !ImplicitDeclarations.insert(VD).second)
return;
// Skip internally declared static variables.
- if (VD->hasGlobalStorage() && !CS->capturesVariable(VD))
+ llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
+ isDeclareTargetDeclaration(VD);
+ if (VD->hasGlobalStorage() && !CS->capturesVariable(VD) &&
+ (!Res || *Res != OMPDeclareTargetDeclAttr::MT_Link))
return;
- auto ELoc = E->getExprLoc();
- auto DKind = Stack->getCurrentDirective();
+ SourceLocation ELoc = E->getExprLoc();
+ OpenMPDirectiveKind DKind = Stack->getCurrentDirective();
// The default(none) clause requires that each variable that is referenced
// in the construct, and does not have a predetermined data-sharing
// attribute, must have its data-sharing attribute explicitly determined
@@ -1919,7 +2017,7 @@ public:
IsFirstprivate =
IsFirstprivate ||
(VD->getType().getNonReferenceType()->isScalarType() &&
- Stack->getDefaultDMA() != DMA_tofrom_scalar);
+ Stack->getDefaultDMA() != DMA_tofrom_scalar && !Res);
if (IsFirstprivate)
ImplicitFirstprivate.emplace_back(E);
else
@@ -1933,8 +2031,8 @@ public:
// enclosing worksharing or parallel construct may not be accessed in an
// explicit task.
DVar = Stack->hasInnermostDSA(
- VD, [](OpenMPClauseKind C) -> bool { return C == OMPC_reduction; },
- [](OpenMPDirectiveKind K) -> bool {
+ VD, [](OpenMPClauseKind C) { return C == OMPC_reduction; },
+ [](OpenMPDirectiveKind K) {
return isOpenMPParallelDirective(K) ||
isOpenMPWorksharingDirective(K) || isOpenMPTeamsDirective(K);
},
@@ -1942,12 +2040,12 @@ public:
if (isOpenMPTaskingDirective(DKind) && DVar.CKind == OMPC_reduction) {
ErrorFound = true;
SemaRef.Diag(ELoc, diag::err_omp_reduction_in_task);
- ReportOriginalDSA(SemaRef, Stack, VD, DVar);
+ reportOriginalDsa(SemaRef, Stack, VD, DVar);
return;
}
// Define implicit data-sharing attributes for task.
- DVar = Stack->getImplicitDSA(VD, false);
+ DVar = Stack->getImplicitDSA(VD, /*FromParent=*/false);
if (isOpenMPTaskingDirective(DKind) && DVar.CKind != OMPC_shared &&
!Stack->isLoopControlVariable(VD).first)
ImplicitFirstprivate.push_back(E);
@@ -1962,7 +2060,7 @@ public:
if (isa<CXXThisExpr>(E->getBase()->IgnoreParens())) {
if (!FD)
return;
- auto DVar = Stack->getTopDSA(FD, false);
+ DSAStackTy::DSAVarData DVar = Stack->getTopDSA(FD, /*FromParent=*/false);
// Check if the variable has explicit DSA set and stop analysis if it
// so.
if (DVar.RefExpr || !ImplicitDeclarations.insert(FD).second)
@@ -1990,14 +2088,14 @@ public:
return;
}
- auto ELoc = E->getExprLoc();
+ SourceLocation ELoc = E->getExprLoc();
// OpenMP [2.9.3.6, Restrictions, p.2]
// A list item that appears in a reduction clause of the innermost
// enclosing worksharing or parallel construct may not be accessed in
// an explicit task.
DVar = Stack->hasInnermostDSA(
- FD, [](OpenMPClauseKind C) -> bool { return C == OMPC_reduction; },
- [](OpenMPDirectiveKind K) -> bool {
+ FD, [](OpenMPClauseKind C) { return C == OMPC_reduction; },
+ [](OpenMPDirectiveKind K) {
return isOpenMPParallelDirective(K) ||
isOpenMPWorksharingDirective(K) || isOpenMPTeamsDirective(K);
},
@@ -2005,12 +2103,12 @@ public:
if (isOpenMPTaskingDirective(DKind) && DVar.CKind == OMPC_reduction) {
ErrorFound = true;
SemaRef.Diag(ELoc, diag::err_omp_reduction_in_task);
- ReportOriginalDSA(SemaRef, Stack, FD, DVar);
+ reportOriginalDsa(SemaRef, Stack, FD, DVar);
return;
}
// Define implicit data-sharing attributes for task.
- DVar = Stack->getImplicitDSA(FD, false);
+ DVar = Stack->getImplicitDSA(FD, /*FromParent=*/false);
if (isOpenMPTaskingDirective(DKind) && DVar.CKind != OMPC_shared &&
!Stack->isLoopControlVariable(FD).first)
ImplicitFirstprivate.push_back(E);
@@ -2018,10 +2116,10 @@ public:
}
if (isOpenMPTargetExecutionDirective(DKind)) {
OMPClauseMappableExprCommon::MappableExprComponentList CurComponents;
- if (!CheckMapClauseExpressionBase(SemaRef, E, CurComponents, OMPC_map,
+ if (!checkMapClauseExpressionBase(SemaRef, E, CurComponents, OMPC_map,
/*NoDiagnose=*/true))
return;
- auto *VD = cast<ValueDecl>(
+ const auto *VD = cast<ValueDecl>(
CurComponents.back().getAssociatedDeclaration()->getCanonicalDecl());
if (!Stack->checkMappableExprComponentListsForDecl(
VD, /*CurrentRegionOnly=*/true,
@@ -2041,8 +2139,8 @@ public:
CCI->getAssociatedExpression())))
return false;
- Decl *CCD = CCI->getAssociatedDeclaration();
- Decl *SCD = SC.getAssociatedDeclaration();
+ const Decl *CCD = CCI->getAssociatedDeclaration();
+ const Decl *SCD = SC.getAssociatedDeclaration();
CCD = CCD ? CCD->getCanonicalDecl() : nullptr;
SCD = SCD ? SCD->getCanonicalDecl() : nullptr;
if (SCD != CCD)
@@ -2055,18 +2153,19 @@ public:
})) {
Visit(E->getBase());
}
- } else
+ } else {
Visit(E->getBase());
+ }
}
void VisitOMPExecutableDirective(OMPExecutableDirective *S) {
- for (auto *C : S->clauses()) {
+ for (OMPClause *C : S->clauses()) {
// Skip analysis of arguments of implicitly defined firstprivate clause
// for task|target directives.
// Skip analysis of arguments of implicitly defined map clause for target
// directives.
if (C && !((isa<OMPFirstprivateClause>(C) || isa<OMPMapClause>(C)) &&
C->isImplicit())) {
- for (auto *CC : C->children()) {
+ for (Stmt *CC : C->children()) {
if (CC)
Visit(CC);
}
@@ -2074,18 +2173,18 @@ public:
}
}
void VisitStmt(Stmt *S) {
- for (auto *C : S->children()) {
+ for (Stmt *C : S->children()) {
if (C && !isa<OMPExecutableDirective>(C))
Visit(C);
}
}
- bool isErrorFound() { return ErrorFound; }
+ bool isErrorFound() const { return ErrorFound; }
ArrayRef<Expr *> getImplicitFirstprivate() const {
return ImplicitFirstprivate;
}
ArrayRef<Expr *> getImplicitMap() const { return ImplicitMap; }
- llvm::DenseMap<ValueDecl *, Expr *> &getVarsWithInheritedDSA() {
+ const Sema::VarsWithInheritedDSAType &getVarsWithInheritedDSA() const {
return VarsWithInheritedDSA;
}
@@ -2103,7 +2202,7 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
case OMPD_teams:
case OMPD_teams_distribute:
case OMPD_teams_distribute_simd: {
- QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1);
+ QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
QualType KmpInt32PtrTy =
Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
Sema::CapturedParamNameType Params[] = {
@@ -2121,15 +2220,37 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
case OMPD_target_parallel_for_simd:
case OMPD_target_teams_distribute:
case OMPD_target_teams_distribute_simd: {
+ QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
+ QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
+ QualType KmpInt32PtrTy =
+ Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
+ QualType Args[] = {VoidPtrTy};
+ FunctionProtoType::ExtProtoInfo EPI;
+ EPI.Variadic = true;
+ QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
+ Sema::CapturedParamNameType Params[] = {
+ std::make_pair(".global_tid.", KmpInt32Ty),
+ std::make_pair(".part_id.", KmpInt32PtrTy),
+ std::make_pair(".privates.", VoidPtrTy),
+ std::make_pair(
+ ".copy_fn.",
+ Context.getPointerType(CopyFnType).withConst().withRestrict()),
+ std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
+ std::make_pair(StringRef(), QualType()) // __context with shared vars
+ };
+ ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
+ Params);
+ // Mark this captured region as inlined, because we don't use outlined
+ // function directly.
+ getCurCapturedRegion()->TheCapturedDecl->addAttr(
+ AlwaysInlineAttr::CreateImplicit(
+ Context, AlwaysInlineAttr::Keyword_forceinline));
Sema::CapturedParamNameType ParamsTarget[] = {
std::make_pair(StringRef(), QualType()) // __context with shared vars
};
// Start a captured region for 'target' with no implicit parameters.
ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
ParamsTarget);
- QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1);
- QualType KmpInt32PtrTy =
- Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
Sema::CapturedParamNameType ParamsTeamsOrParallel[] = {
std::make_pair(".global_tid.", KmpInt32PtrTy),
std::make_pair(".bound_tid.", KmpInt32PtrTy),
@@ -2141,6 +2262,37 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
ParamsTeamsOrParallel);
break;
}
+ case OMPD_target:
+ case OMPD_target_simd: {
+ QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
+ QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
+ QualType KmpInt32PtrTy =
+ Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
+ QualType Args[] = {VoidPtrTy};
+ FunctionProtoType::ExtProtoInfo EPI;
+ EPI.Variadic = true;
+ QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
+ Sema::CapturedParamNameType Params[] = {
+ std::make_pair(".global_tid.", KmpInt32Ty),
+ std::make_pair(".part_id.", KmpInt32PtrTy),
+ std::make_pair(".privates.", VoidPtrTy),
+ std::make_pair(
+ ".copy_fn.",
+ Context.getPointerType(CopyFnType).withConst().withRestrict()),
+ std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
+ std::make_pair(StringRef(), QualType()) // __context with shared vars
+ };
+ ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
+ Params);
+ // Mark this captured region as inlined, because we don't use outlined
+ // function directly.
+ getCurCapturedRegion()->TheCapturedDecl->addAttr(
+ AlwaysInlineAttr::CreateImplicit(
+ Context, AlwaysInlineAttr::Keyword_forceinline));
+ ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
+ std::make_pair(StringRef(), QualType()));
+ break;
+ }
case OMPD_simd:
case OMPD_for:
case OMPD_for_simd:
@@ -2154,9 +2306,7 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
case OMPD_distribute_simd:
case OMPD_ordered:
case OMPD_atomic:
- case OMPD_target_data:
- case OMPD_target:
- case OMPD_target_simd: {
+ case OMPD_target_data: {
Sema::CapturedParamNameType Params[] = {
std::make_pair(StringRef(), QualType()) // __context with shared vars
};
@@ -2165,17 +2315,21 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
break;
}
case OMPD_task: {
- QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1);
- QualType Args[] = {Context.VoidPtrTy.withConst().withRestrict()};
+ QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
+ QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
+ QualType KmpInt32PtrTy =
+ Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
+ QualType Args[] = {VoidPtrTy};
FunctionProtoType::ExtProtoInfo EPI;
EPI.Variadic = true;
QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
Sema::CapturedParamNameType Params[] = {
std::make_pair(".global_tid.", KmpInt32Ty),
- std::make_pair(".part_id.", Context.getPointerType(KmpInt32Ty)),
- std::make_pair(".privates.", Context.VoidPtrTy.withConst()),
- std::make_pair(".copy_fn.",
- Context.getPointerType(CopyFnType).withConst()),
+ std::make_pair(".part_id.", KmpInt32PtrTy),
+ std::make_pair(".privates.", VoidPtrTy),
+ std::make_pair(
+ ".copy_fn.",
+ Context.getPointerType(CopyFnType).withConst().withRestrict()),
std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
std::make_pair(StringRef(), QualType()) // __context with shared vars
};
@@ -2185,35 +2339,40 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
// function directly.
getCurCapturedRegion()->TheCapturedDecl->addAttr(
AlwaysInlineAttr::CreateImplicit(
- Context, AlwaysInlineAttr::Keyword_forceinline, SourceRange()));
+ Context, AlwaysInlineAttr::Keyword_forceinline));
break;
}
case OMPD_taskloop:
case OMPD_taskloop_simd: {
QualType KmpInt32Ty =
- Context.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
+ Context.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1)
+ .withConst();
QualType KmpUInt64Ty =
- Context.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/0);
+ Context.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/0)
+ .withConst();
QualType KmpInt64Ty =
- Context.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1);
- QualType Args[] = {Context.VoidPtrTy.withConst().withRestrict()};
+ Context.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1)
+ .withConst();
+ QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
+ QualType KmpInt32PtrTy =
+ Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
+ QualType Args[] = {VoidPtrTy};
FunctionProtoType::ExtProtoInfo EPI;
EPI.Variadic = true;
QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
Sema::CapturedParamNameType Params[] = {
std::make_pair(".global_tid.", KmpInt32Ty),
- std::make_pair(".part_id.", Context.getPointerType(KmpInt32Ty)),
- std::make_pair(".privates.",
- Context.VoidPtrTy.withConst().withRestrict()),
+ std::make_pair(".part_id.", KmpInt32PtrTy),
+ std::make_pair(".privates.", VoidPtrTy),
std::make_pair(
".copy_fn.",
Context.getPointerType(CopyFnType).withConst().withRestrict()),
std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
std::make_pair(".lb.", KmpUInt64Ty),
- std::make_pair(".ub.", KmpUInt64Ty), std::make_pair(".st.", KmpInt64Ty),
+ std::make_pair(".ub.", KmpUInt64Ty),
+ std::make_pair(".st.", KmpInt64Ty),
std::make_pair(".liter.", KmpInt32Ty),
- std::make_pair(".reductions.",
- Context.VoidPtrTy.withConst().withRestrict()),
+ std::make_pair(".reductions.", VoidPtrTy),
std::make_pair(StringRef(), QualType()) // __context with shared vars
};
ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
@@ -2222,30 +2381,86 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
// function directly.
getCurCapturedRegion()->TheCapturedDecl->addAttr(
AlwaysInlineAttr::CreateImplicit(
- Context, AlwaysInlineAttr::Keyword_forceinline, SourceRange()));
+ Context, AlwaysInlineAttr::Keyword_forceinline));
break;
}
case OMPD_distribute_parallel_for_simd:
- case OMPD_distribute_parallel_for:
+ case OMPD_distribute_parallel_for: {
+ QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
+ QualType KmpInt32PtrTy =
+ Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
+ Sema::CapturedParamNameType Params[] = {
+ std::make_pair(".global_tid.", KmpInt32PtrTy),
+ std::make_pair(".bound_tid.", KmpInt32PtrTy),
+ std::make_pair(".previous.lb.", Context.getSizeType().withConst()),
+ std::make_pair(".previous.ub.", Context.getSizeType().withConst()),
+ std::make_pair(StringRef(), QualType()) // __context with shared vars
+ };
+ ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
+ Params);
+ break;
+ }
case OMPD_target_teams_distribute_parallel_for:
case OMPD_target_teams_distribute_parallel_for_simd: {
- QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1);
+ QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
QualType KmpInt32PtrTy =
Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
+ QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
+
+ QualType Args[] = {VoidPtrTy};
+ FunctionProtoType::ExtProtoInfo EPI;
+ EPI.Variadic = true;
+ QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
Sema::CapturedParamNameType Params[] = {
+ std::make_pair(".global_tid.", KmpInt32Ty),
+ std::make_pair(".part_id.", KmpInt32PtrTy),
+ std::make_pair(".privates.", VoidPtrTy),
+ std::make_pair(
+ ".copy_fn.",
+ Context.getPointerType(CopyFnType).withConst().withRestrict()),
+ std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
+ std::make_pair(StringRef(), QualType()) // __context with shared vars
+ };
+ ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
+ Params);
+ // Mark this captured region as inlined, because we don't use outlined
+ // function directly.
+ getCurCapturedRegion()->TheCapturedDecl->addAttr(
+ AlwaysInlineAttr::CreateImplicit(
+ Context, AlwaysInlineAttr::Keyword_forceinline));
+ Sema::CapturedParamNameType ParamsTarget[] = {
+ std::make_pair(StringRef(), QualType()) // __context with shared vars
+ };
+ // Start a captured region for 'target' with no implicit parameters.
+ ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
+ ParamsTarget);
+
+ Sema::CapturedParamNameType ParamsTeams[] = {
std::make_pair(".global_tid.", KmpInt32PtrTy),
std::make_pair(".bound_tid.", KmpInt32PtrTy),
- std::make_pair(".previous.lb.", Context.getSizeType()),
- std::make_pair(".previous.ub.", Context.getSizeType()),
std::make_pair(StringRef(), QualType()) // __context with shared vars
};
+ // Start a captured region for 'target' with no implicit parameters.
ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
- Params);
+ ParamsTeams);
+
+ Sema::CapturedParamNameType ParamsParallel[] = {
+ std::make_pair(".global_tid.", KmpInt32PtrTy),
+ std::make_pair(".bound_tid.", KmpInt32PtrTy),
+ std::make_pair(".previous.lb.", Context.getSizeType().withConst()),
+ std::make_pair(".previous.ub.", Context.getSizeType().withConst()),
+ std::make_pair(StringRef(), QualType()) // __context with shared vars
+ };
+ // Start a captured region for 'teams' or 'parallel'. Both regions have
+ // the same implicit parameters.
+ ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
+ ParamsParallel);
break;
}
+
case OMPD_teams_distribute_parallel_for:
case OMPD_teams_distribute_parallel_for_simd: {
- QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1);
+ QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
QualType KmpInt32PtrTy =
Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
@@ -2261,8 +2476,8 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
Sema::CapturedParamNameType ParamsParallel[] = {
std::make_pair(".global_tid.", KmpInt32PtrTy),
std::make_pair(".bound_tid.", KmpInt32PtrTy),
- std::make_pair(".previous.lb.", Context.getSizeType()),
- std::make_pair(".previous.ub.", Context.getSizeType()),
+ std::make_pair(".previous.lb.", Context.getSizeType().withConst()),
+ std::make_pair(".previous.ub.", Context.getSizeType().withConst()),
std::make_pair(StringRef(), QualType()) // __context with shared vars
};
// Start a captured region for 'teams' or 'parallel'. Both regions have
@@ -2274,17 +2489,21 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
case OMPD_target_update:
case OMPD_target_enter_data:
case OMPD_target_exit_data: {
- QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1);
- QualType Args[] = {Context.VoidPtrTy.withConst().withRestrict()};
+ QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
+ QualType VoidPtrTy = Context.VoidPtrTy.withConst().withRestrict();
+ QualType KmpInt32PtrTy =
+ Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
+ QualType Args[] = {VoidPtrTy};
FunctionProtoType::ExtProtoInfo EPI;
EPI.Variadic = true;
QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI);
Sema::CapturedParamNameType Params[] = {
std::make_pair(".global_tid.", KmpInt32Ty),
- std::make_pair(".part_id.", Context.getPointerType(KmpInt32Ty)),
- std::make_pair(".privates.", Context.VoidPtrTy.withConst()),
- std::make_pair(".copy_fn.",
- Context.getPointerType(CopyFnType).withConst()),
+ std::make_pair(".part_id.", KmpInt32PtrTy),
+ std::make_pair(".privates.", VoidPtrTy),
+ std::make_pair(
+ ".copy_fn.",
+ Context.getPointerType(CopyFnType).withConst().withRestrict()),
std::make_pair(".task_t.", Context.VoidPtrTy.withConst()),
std::make_pair(StringRef(), QualType()) // __context with shared vars
};
@@ -2294,7 +2513,7 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
// function directly.
getCurCapturedRegion()->TheCapturedDecl->addAttr(
AlwaysInlineAttr::CreateImplicit(
- Context, AlwaysInlineAttr::Keyword_forceinline, SourceRange()));
+ Context, AlwaysInlineAttr::Keyword_forceinline));
break;
}
case OMPD_threadprivate:
@@ -2343,7 +2562,7 @@ static OMPCapturedExprDecl *buildCaptureDecl(Sema &S, IdentifierInfo *Id,
auto *CED = OMPCapturedExprDecl::Create(C, S.CurContext, Id, Ty,
CaptureExpr->getLocStart());
if (!WithInit)
- CED->addAttr(OMPCaptureNoInitAttr::CreateImplicit(C, SourceRange()));
+ CED->addAttr(OMPCaptureNoInitAttr::CreateImplicit(C));
S.CurContext->addHiddenDecl(CED);
S.AddInitializerToDecl(CED, Init, /*DirectInit=*/false);
return CED;
@@ -2352,12 +2571,11 @@ static OMPCapturedExprDecl *buildCaptureDecl(Sema &S, IdentifierInfo *Id,
static DeclRefExpr *buildCapture(Sema &S, ValueDecl *D, Expr *CaptureExpr,
bool WithInit) {
OMPCapturedExprDecl *CD;
- if (auto *VD = S.IsOpenMPCapturedDecl(D)) {
+ if (VarDecl *VD = S.isOpenMPCapturedDecl(D))
CD = cast<OMPCapturedExprDecl>(VD);
- } else {
+ else
CD = buildCaptureDecl(S, D->getIdentifier(), CaptureExpr, WithInit,
/*AsExpression=*/false);
- }
return buildDeclRefExpr(S, CD, CD->getType().getNonReferenceType(),
CaptureExpr->getExprLoc());
}
@@ -2395,7 +2613,7 @@ class CaptureRegionUnwinderRAII {
private:
Sema &S;
bool &ErrorFound;
- OpenMPDirectiveKind DKind;
+ OpenMPDirectiveKind DKind = OMPD_unknown;
public:
CaptureRegionUnwinderRAII(Sema &S, bool &ErrorFound,
@@ -2425,16 +2643,16 @@ StmtResult Sema::ActOnOpenMPRegionEnd(StmtResult S,
getOpenMPCaptureRegions(CaptureRegions, DSAStack->getCurrentDirective());
OMPOrderedClause *OC = nullptr;
OMPScheduleClause *SC = nullptr;
- SmallVector<OMPLinearClause *, 4> LCs;
- SmallVector<OMPClauseWithPreInit *, 8> PICs;
+ SmallVector<const OMPLinearClause *, 4> LCs;
+ SmallVector<const OMPClauseWithPreInit *, 4> PICs;
// This is required for proper codegen.
- for (auto *Clause : Clauses) {
+ for (OMPClause *Clause : Clauses) {
if (isOpenMPTaskingDirective(DSAStack->getCurrentDirective()) &&
Clause->getClauseKind() == OMPC_in_reduction) {
// Capture taskgroup task_reduction descriptors inside the tasking regions
// with the corresponding in_reduction items.
auto *IRC = cast<OMPInReductionClause>(Clause);
- for (auto *E : IRC->taskgroup_descriptors())
+ for (Expr *E : IRC->taskgroup_descriptors())
if (E)
MarkDeclarationsReferencedInExpr(E);
}
@@ -2445,7 +2663,7 @@ StmtResult Sema::ActOnOpenMPRegionEnd(StmtResult S,
Clause->getClauseKind() == OMPC_copyin)) {
DSAStack->setForceVarCapturing(Clause->getClauseKind() == OMPC_copyin);
// Mark all variables in private list clauses as used in inner region.
- for (auto *VarRef : Clause->children()) {
+ for (Stmt *VarRef : Clause->children()) {
if (auto *E = cast_or_null<Expr>(VarRef)) {
MarkDeclarationsReferencedInExpr(E);
}
@@ -2456,7 +2674,7 @@ StmtResult Sema::ActOnOpenMPRegionEnd(StmtResult S,
if (auto *C = OMPClauseWithPreInit::get(Clause))
PICs.push_back(C);
if (auto *C = OMPClauseWithPostUpdate::get(Clause)) {
- if (auto *E = C->getPostUpdateExpr())
+ if (Expr *E = C->getPostUpdateExpr())
MarkDeclarationsReferencedInExpr(E);
}
}
@@ -2483,7 +2701,7 @@ StmtResult Sema::ActOnOpenMPRegionEnd(StmtResult S,
ErrorFound = true;
}
if (!LCs.empty() && OC && OC->getNumForLoops()) {
- for (auto *C : LCs) {
+ for (const OMPLinearClause *C : LCs) {
Diag(C->getLocStart(), diag::err_omp_linear_ordered)
<< SourceRange(OC->getLocStart(), OC->getLocEnd());
}
@@ -2505,7 +2723,7 @@ StmtResult Sema::ActOnOpenMPRegionEnd(StmtResult S,
// Required for proper codegen of combined directives.
// TODO: add processing for other clauses.
if (ThisCaptureRegion != OMPD_unknown) {
- for (auto *C : PICs) {
+ for (const clang::OMPClauseWithPreInit *C : PICs) {
OpenMPDirectiveKind CaptureRegion = C->getCaptureRegion();
// Find the particular capture region for the clause if the
// directive is a combined one with multiple capture regions.
@@ -2515,7 +2733,7 @@ StmtResult Sema::ActOnOpenMPRegionEnd(StmtResult S,
if (CaptureRegion == ThisCaptureRegion ||
CaptureRegion == OMPD_unknown) {
if (auto *DS = cast_or_null<DeclStmt>(C->getPreInitStmt())) {
- for (auto *D : DS->decls())
+ for (Decl *D : DS->decls())
MarkVariableReferenced(D->getLocation(), cast<VarDecl>(D));
}
}
@@ -2542,14 +2760,14 @@ static bool checkCancelRegion(Sema &SemaRef, OpenMPDirectiveKind CurrentRegion,
return true;
}
-static bool checkNestingOfRegions(Sema &SemaRef, DSAStackTy *Stack,
+static bool checkNestingOfRegions(Sema &SemaRef, const DSAStackTy *Stack,
OpenMPDirectiveKind CurrentRegion,
const DeclarationNameInfo &CurrentName,
OpenMPDirectiveKind CancelRegion,
SourceLocation StartLoc) {
if (Stack->getCurScope()) {
- auto ParentRegion = Stack->getParentDirective();
- auto OffendingRegion = ParentRegion;
+ OpenMPDirectiveKind ParentRegion = Stack->getParentDirective();
+ OpenMPDirectiveKind OffendingRegion = ParentRegion;
bool NestingProhibited = false;
bool CloseNesting = true;
bool OrphanSeen = false;
@@ -2642,12 +2860,12 @@ static bool checkNestingOfRegions(Sema &SemaRef, DSAStackTy *Stack,
bool DeadLock = Stack->hasDirective(
[CurrentName, &PreviousCriticalLoc](OpenMPDirectiveKind K,
const DeclarationNameInfo &DNI,
- SourceLocation Loc) -> bool {
+ SourceLocation Loc) {
if (K == OMPD_critical && DNI.getName() == CurrentName.getName()) {
PreviousCriticalLoc = Loc;
return true;
- } else
- return false;
+ }
+ return false;
},
false /* skip top directive */);
if (DeadLock) {
@@ -2732,12 +2950,12 @@ static bool checkNestingOfRegions(Sema &SemaRef, DSAStackTy *Stack,
// target region, the behavior is unspecified.
NestingProhibited = Stack->hasDirective(
[&OffendingRegion](OpenMPDirectiveKind K, const DeclarationNameInfo &,
- SourceLocation) -> bool {
+ SourceLocation) {
if (isOpenMPTargetExecutionDirective(K)) {
OffendingRegion = K;
return true;
- } else
- return false;
+ }
+ return false;
},
false /* don't skip top directive */);
CloseNesting = false;
@@ -2765,7 +2983,7 @@ static bool checkIfClauses(Sema &S, OpenMPDirectiveKind Kind,
SmallVector<const OMPIfClause *, OMPC_unknown + 1> FoundNameModifiers(
OMPD_unknown + 1);
SmallVector<SourceLocation, 4> NameModifierLoc;
- for (const auto *C : Clauses) {
+ for (const OMPClause *C : Clauses) {
if (const auto *IC = dyn_cast_or_null<OMPIfClause>(C)) {
// At most one if clause without a directive-name-modifier can appear on
// the directive.
@@ -2831,7 +3049,7 @@ static bool checkIfClauses(Sema &S, OpenMPDirectiveKind Kind,
diag::err_omp_unnamed_if_clause)
<< (TotalAllowedNum > 1) << Values;
}
- for (auto Loc : NameModifierLoc) {
+ for (SourceLocation Loc : NameModifierLoc) {
S.Diag(Loc, diag::note_omp_previous_named_if_clause);
}
ErrorFound = true;
@@ -2851,7 +3069,7 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
return StmtError();
llvm::SmallVector<OMPClause *, 8> ClausesWithImplicit;
- llvm::DenseMap<ValueDecl *, Expr *> VarsWithInheritedDSA;
+ VarsWithInheritedDSAType VarsWithInheritedDSA;
bool ErrorFound = false;
ClausesWithImplicit.append(Clauses.begin(), Clauses.end());
if (AStmt && !CurContext->isDependentContext()) {
@@ -2875,9 +3093,9 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
SmallVector<Expr *, 4> ImplicitMaps(DSAChecker.getImplicitMap().begin(),
DSAChecker.getImplicitMap().end());
// Mark taskgroup task_reduction descriptors as implicitly firstprivate.
- for (auto *C : Clauses) {
+ for (OMPClause *C : Clauses) {
if (auto *IRC = dyn_cast<OMPInReductionClause>(C)) {
- for (auto *E : IRC->taskgroup_descriptors())
+ for (Expr *E : IRC->taskgroup_descriptors())
if (E)
ImplicitFirstprivates.emplace_back(E);
}
@@ -2889,8 +3107,9 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
ClausesWithImplicit.push_back(Implicit);
ErrorFound = cast<OMPFirstprivateClause>(Implicit)->varlist_size() !=
ImplicitFirstprivates.size();
- } else
+ } else {
ErrorFound = true;
+ }
}
if (!ImplicitMaps.empty()) {
if (OMPClause *Implicit = ActOnOpenMPMapClause(
@@ -2900,8 +3119,9 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
ClausesWithImplicit.emplace_back(Implicit);
ErrorFound |=
cast<OMPMapClause>(Implicit)->varlist_size() != ImplicitMaps.size();
- } else
+ } else {
ErrorFound = true;
+ }
}
}
@@ -3153,7 +3373,7 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
llvm_unreachable("Unknown OpenMP directive");
}
- for (auto P : VarsWithInheritedDSA) {
+ for (const auto &P : VarsWithInheritedDSA) {
Diag(P.second->getExprLoc(), diag::err_omp_no_dsa_for_variable)
<< P.first << P.second->getSourceRange();
}
@@ -3183,7 +3403,7 @@ Sema::DeclGroupPtrTy Sema::ActOnOpenMPDeclareSimdDirective(
Diag(SR.getBegin(), diag::err_omp_single_decl_in_declare_simd);
return DG;
}
- auto *ADecl = DG.get().getSingleDecl();
+ Decl *ADecl = DG.get().getSingleDecl();
if (auto *FTD = dyn_cast<FunctionTemplateDecl>(ADecl))
ADecl = FTD->getTemplatedDecl();
@@ -3205,16 +3425,16 @@ Sema::DeclGroupPtrTy Sema::ActOnOpenMPDeclareSimdDirective(
// The uniform clause declares one or more arguments to have an invariant
// value for all concurrent invocations of the function in the execution of a
// single SIMD loop.
- llvm::DenseMap<Decl *, Expr *> UniformedArgs;
- Expr *UniformedLinearThis = nullptr;
- for (auto *E : Uniforms) {
+ llvm::DenseMap<const Decl *, const Expr *> UniformedArgs;
+ const Expr *UniformedLinearThis = nullptr;
+ for (const Expr *E : Uniforms) {
E = E->IgnoreParenImpCasts();
- if (auto *DRE = dyn_cast<DeclRefExpr>(E))
- if (auto *PVD = dyn_cast<ParmVarDecl>(DRE->getDecl()))
+ if (const auto *DRE = dyn_cast<DeclRefExpr>(E))
+ if (const auto *PVD = dyn_cast<ParmVarDecl>(DRE->getDecl()))
if (FD->getNumParams() > PVD->getFunctionScopeIndex() &&
FD->getParamDecl(PVD->getFunctionScopeIndex())
->getCanonicalDecl() == PVD->getCanonicalDecl()) {
- UniformedArgs.insert(std::make_pair(PVD->getCanonicalDecl(), E));
+ UniformedArgs.try_emplace(PVD->getCanonicalDecl(), E);
continue;
}
if (isa<CXXThisExpr>(E)) {
@@ -3232,13 +3452,13 @@ Sema::DeclGroupPtrTy Sema::ActOnOpenMPDeclareSimdDirective(
// function in any of the linear, aligned, or uniform clauses.
// The type of list items appearing in the aligned clause must be array,
// pointer, reference to array, or reference to pointer.
- llvm::DenseMap<Decl *, Expr *> AlignedArgs;
- Expr *AlignedThis = nullptr;
- for (auto *E : Aligneds) {
+ llvm::DenseMap<const Decl *, const Expr *> AlignedArgs;
+ const Expr *AlignedThis = nullptr;
+ for (const Expr *E : Aligneds) {
E = E->IgnoreParenImpCasts();
- if (auto *DRE = dyn_cast<DeclRefExpr>(E))
- if (auto *PVD = dyn_cast<ParmVarDecl>(DRE->getDecl())) {
- auto *CanonPVD = PVD->getCanonicalDecl();
+ if (const auto *DRE = dyn_cast<DeclRefExpr>(E))
+ if (const auto *PVD = dyn_cast<ParmVarDecl>(DRE->getDecl())) {
+ const VarDecl *CanonPVD = PVD->getCanonicalDecl();
if (FD->getNumParams() > PVD->getFunctionScopeIndex() &&
FD->getParamDecl(PVD->getFunctionScopeIndex())
->getCanonicalDecl() == CanonPVD) {
@@ -3283,8 +3503,8 @@ Sema::DeclGroupPtrTy Sema::ActOnOpenMPDeclareSimdDirective(
// positive integer expression. If no optional parameter is specified,
// implementation-defined default alignments for SIMD instructions on the
// target platforms are assumed.
- SmallVector<Expr *, 4> NewAligns;
- for (auto *E : Alignments) {
+ SmallVector<const Expr *, 4> NewAligns;
+ for (Expr *E : Alignments) {
ExprResult Align;
if (E)
Align = VerifyPositiveIntegerConstantInClause(E, OMPC_aligned);
@@ -3299,16 +3519,16 @@ Sema::DeclGroupPtrTy Sema::ActOnOpenMPDeclareSimdDirective(
// When a linear-step expression is specified in a linear clause it must be
// either a constant integer expression or an integer-typed parameter that is
// specified in a uniform clause on the directive.
- llvm::DenseMap<Decl *, Expr *> LinearArgs;
+ llvm::DenseMap<const Decl *, const Expr *> LinearArgs;
const bool IsUniformedThis = UniformedLinearThis != nullptr;
auto MI = LinModifiers.begin();
- for (auto *E : Linears) {
+ for (const Expr *E : Linears) {
auto LinKind = static_cast<OpenMPLinearClauseKind>(*MI);
++MI;
E = E->IgnoreParenImpCasts();
- if (auto *DRE = dyn_cast<DeclRefExpr>(E))
- if (auto *PVD = dyn_cast<ParmVarDecl>(DRE->getDecl())) {
- auto *CanonPVD = PVD->getCanonicalDecl();
+ if (const auto *DRE = dyn_cast<DeclRefExpr>(E))
+ if (const auto *PVD = dyn_cast<ParmVarDecl>(DRE->getDecl())) {
+ const VarDecl *CanonPVD = PVD->getCanonicalDecl();
if (FD->getNumParams() > PVD->getFunctionScopeIndex() &&
FD->getParamDecl(PVD->getFunctionScopeIndex())
->getCanonicalDecl() == CanonPVD) {
@@ -3368,25 +3588,25 @@ Sema::DeclGroupPtrTy Sema::ActOnOpenMPDeclareSimdDirective(
Expr *Step = nullptr;
Expr *NewStep = nullptr;
SmallVector<Expr *, 4> NewSteps;
- for (auto *E : Steps) {
+ for (Expr *E : Steps) {
// Skip the same step expression, it was checked already.
if (Step == E || !E) {
NewSteps.push_back(E ? NewStep : nullptr);
continue;
}
Step = E;
- if (auto *DRE = dyn_cast<DeclRefExpr>(Step))
- if (auto *PVD = dyn_cast<ParmVarDecl>(DRE->getDecl())) {
- auto *CanonPVD = PVD->getCanonicalDecl();
+ if (const auto *DRE = dyn_cast<DeclRefExpr>(Step))
+ if (const auto *PVD = dyn_cast<ParmVarDecl>(DRE->getDecl())) {
+ const VarDecl *CanonPVD = PVD->getCanonicalDecl();
if (UniformedArgs.count(CanonPVD) == 0) {
Diag(Step->getExprLoc(), diag::err_omp_expected_uniform_param)
<< Step->getSourceRange();
} else if (E->isValueDependent() || E->isTypeDependent() ||
E->isInstantiationDependent() ||
E->containsUnexpandedParameterPack() ||
- CanonPVD->getType()->hasIntegerRepresentation())
+ CanonPVD->getType()->hasIntegerRepresentation()) {
NewSteps.push_back(Step);
- else {
+ } else {
Diag(Step->getExprLoc(), diag::err_omp_expected_int_param)
<< Step->getSourceRange();
}
@@ -3421,7 +3641,7 @@ StmtResult Sema::ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses,
if (!AStmt)
return StmtError();
- CapturedStmt *CS = cast<CapturedStmt>(AStmt);
+ auto *CS = cast<CapturedStmt>(AStmt);
// 1.2.2 OpenMP Language Terminology
// Structured block - An executable statement with a single entry at the
// top and a single exit at the bottom.
@@ -3429,108 +3649,110 @@ StmtResult Sema::ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses,
// longjmp() and throw() must not violate the entry/exit criteria.
CS->getCapturedDecl()->setNothrow();
- getCurFunction()->setHasBranchProtectedScope();
+ setFunctionHasBranchProtectedScope();
return OMPParallelDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt,
DSAStack->isCancelRegion());
}
namespace {
-/// \brief Helper class for checking canonical form of the OpenMP loops and
+/// Helper class for checking canonical form of the OpenMP loops and
/// extracting iteration space of each loop in the loop nest, that will be used
/// for IR generation.
class OpenMPIterationSpaceChecker {
- /// \brief Reference to Sema.
+ /// Reference to Sema.
Sema &SemaRef;
- /// \brief A location for diagnostics (when there is no some better location).
+ /// A location for diagnostics (when there is no some better location).
SourceLocation DefaultLoc;
- /// \brief A location for diagnostics (when increment is not compatible).
+ /// A location for diagnostics (when increment is not compatible).
SourceLocation ConditionLoc;
- /// \brief A source location for referring to loop init later.
+ /// A source location for referring to loop init later.
SourceRange InitSrcRange;
- /// \brief A source location for referring to condition later.
+ /// A source location for referring to condition later.
SourceRange ConditionSrcRange;
- /// \brief A source location for referring to increment later.
+ /// A source location for referring to increment later.
SourceRange IncrementSrcRange;
- /// \brief Loop variable.
+ /// Loop variable.
ValueDecl *LCDecl = nullptr;
- /// \brief Reference to loop variable.
+ /// Reference to loop variable.
Expr *LCRef = nullptr;
- /// \brief Lower bound (initializer for the var).
+ /// Lower bound (initializer for the var).
Expr *LB = nullptr;
- /// \brief Upper bound.
+ /// Upper bound.
Expr *UB = nullptr;
- /// \brief Loop step (increment).
+ /// Loop step (increment).
Expr *Step = nullptr;
- /// \brief This flag is true when condition is one of:
+ /// This flag is true when condition is one of:
/// Var < UB
/// Var <= UB
/// UB > Var
/// UB >= Var
bool TestIsLessOp = false;
- /// \brief This flag is true when condition is strict ( < or > ).
+ /// This flag is true when condition is strict ( < or > ).
bool TestIsStrictOp = false;
- /// \brief This flag is true when step is subtracted on each iteration.
+ /// This flag is true when step is subtracted on each iteration.
bool SubtractStep = false;
public:
OpenMPIterationSpaceChecker(Sema &SemaRef, SourceLocation DefaultLoc)
: SemaRef(SemaRef), DefaultLoc(DefaultLoc), ConditionLoc(DefaultLoc) {}
- /// \brief Check init-expr for canonical loop form and save loop counter
+ /// Check init-expr for canonical loop form and save loop counter
/// variable - #Var and its initialization value - #LB.
- bool CheckInit(Stmt *S, bool EmitDiags = true);
- /// \brief Check test-expr for canonical form, save upper-bound (#UB), flags
+ bool checkAndSetInit(Stmt *S, bool EmitDiags = true);
+ /// Check test-expr for canonical form, save upper-bound (#UB), flags
/// for less/greater and for strict/non-strict comparison.
- bool CheckCond(Expr *S);
- /// \brief Check incr-expr for canonical loop form and return true if it
+ bool checkAndSetCond(Expr *S);
+ /// Check incr-expr for canonical loop form and return true if it
/// does not conform, otherwise save loop step (#Step).
- bool CheckInc(Expr *S);
- /// \brief Return the loop counter variable.
- ValueDecl *GetLoopDecl() const { return LCDecl; }
- /// \brief Return the reference expression to loop counter variable.
- Expr *GetLoopDeclRefExpr() const { return LCRef; }
- /// \brief Source range of the loop init.
- SourceRange GetInitSrcRange() const { return InitSrcRange; }
- /// \brief Source range of the loop condition.
- SourceRange GetConditionSrcRange() const { return ConditionSrcRange; }
- /// \brief Source range of the loop increment.
- SourceRange GetIncrementSrcRange() const { return IncrementSrcRange; }
- /// \brief True if the step should be subtracted.
- bool ShouldSubtractStep() const { return SubtractStep; }
- /// \brief Build the expression to calculate the number of iterations.
+ bool checkAndSetInc(Expr *S);
+ /// Return the loop counter variable.
+ ValueDecl *getLoopDecl() const { return LCDecl; }
+ /// Return the reference expression to loop counter variable.
+ Expr *getLoopDeclRefExpr() const { return LCRef; }
+ /// Source range of the loop init.
+ SourceRange getInitSrcRange() const { return InitSrcRange; }
+ /// Source range of the loop condition.
+ SourceRange getConditionSrcRange() const { return ConditionSrcRange; }
+ /// Source range of the loop increment.
+ SourceRange getIncrementSrcRange() const { return IncrementSrcRange; }
+ /// True if the step should be subtracted.
+ bool shouldSubtractStep() const { return SubtractStep; }
+ /// Build the expression to calculate the number of iterations.
+ Expr *buildNumIterations(
+ Scope *S, const bool LimitedType,
+ llvm::MapVector<const Expr *, DeclRefExpr *> &Captures) const;
+ /// Build the precondition expression for the loops.
Expr *
- BuildNumIterations(Scope *S, const bool LimitedType,
- llvm::MapVector<Expr *, DeclRefExpr *> &Captures) const;
- /// \brief Build the precondition expression for the loops.
- Expr *BuildPreCond(Scope *S, Expr *Cond,
- llvm::MapVector<Expr *, DeclRefExpr *> &Captures) const;
- /// \brief Build reference expression to the counter be used for codegen.
- DeclRefExpr *BuildCounterVar(llvm::MapVector<Expr *, DeclRefExpr *> &Captures,
- DSAStackTy &DSA) const;
- /// \brief Build reference expression to the private counter be used for
+ buildPreCond(Scope *S, Expr *Cond,
+ llvm::MapVector<const Expr *, DeclRefExpr *> &Captures) const;
+ /// Build reference expression to the counter be used for codegen.
+ DeclRefExpr *
+ buildCounterVar(llvm::MapVector<const Expr *, DeclRefExpr *> &Captures,
+ DSAStackTy &DSA) const;
+ /// Build reference expression to the private counter be used for
/// codegen.
- Expr *BuildPrivateCounterVar() const;
- /// \brief Build initialization of the counter be used for codegen.
- Expr *BuildCounterInit() const;
- /// \brief Build step of the counter be used for codegen.
- Expr *BuildCounterStep() const;
- /// \brief Return true if any expression is dependent.
- bool Dependent() const;
+ Expr *buildPrivateCounterVar() const;
+ /// Build initialization of the counter be used for codegen.
+ Expr *buildCounterInit() const;
+ /// Build step of the counter be used for codegen.
+ Expr *buildCounterStep() const;
+ /// Return true if any expression is dependent.
+ bool dependent() const;
private:
- /// \brief Check the right-hand side of an assignment in the increment
+ /// Check the right-hand side of an assignment in the increment
/// expression.
- bool CheckIncRHS(Expr *RHS);
- /// \brief Helper to set loop counter variable and its initializer.
- bool SetLCDeclAndLB(ValueDecl *NewLCDecl, Expr *NewDeclRefExpr, Expr *NewLB);
- /// \brief Helper to set upper bound.
- bool SetUB(Expr *NewUB, bool LessOp, bool StrictOp, SourceRange SR,
+ bool checkAndSetIncRHS(Expr *RHS);
+ /// Helper to set loop counter variable and its initializer.
+ bool setLCDeclAndLB(ValueDecl *NewLCDecl, Expr *NewDeclRefExpr, Expr *NewLB);
+ /// Helper to set upper bound.
+ bool setUB(Expr *NewUB, bool LessOp, bool StrictOp, SourceRange SR,
SourceLocation SL);
- /// \brief Helper to set loop increment.
- bool SetStep(Expr *NewStep, bool Subtract);
+ /// Helper to set loop increment.
+ bool setStep(Expr *NewStep, bool Subtract);
};
-bool OpenMPIterationSpaceChecker::Dependent() const {
+bool OpenMPIterationSpaceChecker::dependent() const {
if (!LCDecl) {
assert(!LB && !UB && !Step);
return false;
@@ -3540,7 +3762,7 @@ bool OpenMPIterationSpaceChecker::Dependent() const {
(Step && Step->isValueDependent());
}
-bool OpenMPIterationSpaceChecker::SetLCDeclAndLB(ValueDecl *NewLCDecl,
+bool OpenMPIterationSpaceChecker::setLCDeclAndLB(ValueDecl *NewLCDecl,
Expr *NewLCRefExpr,
Expr *NewLB) {
// State consistency checking to ensure correct usage.
@@ -3560,7 +3782,7 @@ bool OpenMPIterationSpaceChecker::SetLCDeclAndLB(ValueDecl *NewLCDecl,
return false;
}
-bool OpenMPIterationSpaceChecker::SetUB(Expr *NewUB, bool LessOp, bool StrictOp,
+bool OpenMPIterationSpaceChecker::setUB(Expr *NewUB, bool LessOp, bool StrictOp,
SourceRange SR, SourceLocation SL) {
// State consistency checking to ensure correct usage.
assert(LCDecl != nullptr && LB != nullptr && UB == nullptr &&
@@ -3575,7 +3797,7 @@ bool OpenMPIterationSpaceChecker::SetUB(Expr *NewUB, bool LessOp, bool StrictOp,
return false;
}
-bool OpenMPIterationSpaceChecker::SetStep(Expr *NewStep, bool Subtract) {
+bool OpenMPIterationSpaceChecker::setStep(Expr *NewStep, bool Subtract) {
// State consistency checking to ensure correct usage.
assert(LCDecl != nullptr && LB != nullptr && Step == nullptr);
if (!NewStep)
@@ -3632,7 +3854,7 @@ bool OpenMPIterationSpaceChecker::SetStep(Expr *NewStep, bool Subtract) {
return false;
}
-bool OpenMPIterationSpaceChecker::CheckInit(Stmt *S, bool EmitDiags) {
+bool OpenMPIterationSpaceChecker::checkAndSetInit(Stmt *S, bool EmitDiags) {
// Check init-expr for canonical loop form and save loop counter
// variable - #Var and its initialization value - #LB.
// OpenMP [2.6] Canonical loop form. init-expr may be one of the following:
@@ -3656,17 +3878,17 @@ bool OpenMPIterationSpaceChecker::CheckInit(Stmt *S, bool EmitDiags) {
S = E->IgnoreParens();
if (auto *BO = dyn_cast<BinaryOperator>(S)) {
if (BO->getOpcode() == BO_Assign) {
- auto *LHS = BO->getLHS()->IgnoreParens();
+ Expr *LHS = BO->getLHS()->IgnoreParens();
if (auto *DRE = dyn_cast<DeclRefExpr>(LHS)) {
if (auto *CED = dyn_cast<OMPCapturedExprDecl>(DRE->getDecl()))
if (auto *ME = dyn_cast<MemberExpr>(getExprAsWritten(CED->getInit())))
- return SetLCDeclAndLB(ME->getMemberDecl(), ME, BO->getRHS());
- return SetLCDeclAndLB(DRE->getDecl(), DRE, BO->getRHS());
+ return setLCDeclAndLB(ME->getMemberDecl(), ME, BO->getRHS());
+ return setLCDeclAndLB(DRE->getDecl(), DRE, BO->getRHS());
}
if (auto *ME = dyn_cast<MemberExpr>(LHS)) {
if (ME->isArrow() &&
isa<CXXThisExpr>(ME->getBase()->IgnoreParenImpCasts()))
- return SetLCDeclAndLB(ME->getMemberDecl(), ME, BO->getRHS());
+ return setLCDeclAndLB(ME->getMemberDecl(), ME, BO->getRHS());
}
}
} else if (auto *DS = dyn_cast<DeclStmt>(S)) {
@@ -3678,28 +3900,28 @@ bool OpenMPIterationSpaceChecker::CheckInit(Stmt *S, bool EmitDiags) {
SemaRef.Diag(S->getLocStart(),
diag::ext_omp_loop_not_canonical_init)
<< S->getSourceRange();
- return SetLCDeclAndLB(Var, nullptr, Var->getInit());
+ return setLCDeclAndLB(Var, nullptr, Var->getInit());
}
}
}
} else if (auto *CE = dyn_cast<CXXOperatorCallExpr>(S)) {
if (CE->getOperator() == OO_Equal) {
- auto *LHS = CE->getArg(0);
+ Expr *LHS = CE->getArg(0);
if (auto *DRE = dyn_cast<DeclRefExpr>(LHS)) {
if (auto *CED = dyn_cast<OMPCapturedExprDecl>(DRE->getDecl()))
if (auto *ME = dyn_cast<MemberExpr>(getExprAsWritten(CED->getInit())))
- return SetLCDeclAndLB(ME->getMemberDecl(), ME, BO->getRHS());
- return SetLCDeclAndLB(DRE->getDecl(), DRE, CE->getArg(1));
+ return setLCDeclAndLB(ME->getMemberDecl(), ME, BO->getRHS());
+ return setLCDeclAndLB(DRE->getDecl(), DRE, CE->getArg(1));
}
if (auto *ME = dyn_cast<MemberExpr>(LHS)) {
if (ME->isArrow() &&
isa<CXXThisExpr>(ME->getBase()->IgnoreParenImpCasts()))
- return SetLCDeclAndLB(ME->getMemberDecl(), ME, BO->getRHS());
+ return setLCDeclAndLB(ME->getMemberDecl(), ME, BO->getRHS());
}
}
}
- if (Dependent() || SemaRef.CurContext->isDependentContext())
+ if (dependent() || SemaRef.CurContext->isDependentContext())
return false;
if (EmitDiags) {
SemaRef.Diag(S->getLocStart(), diag::err_omp_loop_not_canonical_init)
@@ -3708,29 +3930,29 @@ bool OpenMPIterationSpaceChecker::CheckInit(Stmt *S, bool EmitDiags) {
return true;
}
-/// \brief Ignore parenthesizes, implicit casts, copy constructor and return the
+/// Ignore parenthesizes, implicit casts, copy constructor and return the
/// variable (which may be the loop variable) if possible.
-static const ValueDecl *GetInitLCDecl(Expr *E) {
+static const ValueDecl *getInitLCDecl(const Expr *E) {
if (!E)
return nullptr;
E = getExprAsWritten(E);
- if (auto *CE = dyn_cast_or_null<CXXConstructExpr>(E))
+ if (const auto *CE = dyn_cast_or_null<CXXConstructExpr>(E))
if (const CXXConstructorDecl *Ctor = CE->getConstructor())
if ((Ctor->isCopyOrMoveConstructor() ||
Ctor->isConvertingConstructor(/*AllowExplicit=*/false)) &&
CE->getNumArgs() > 0 && CE->getArg(0) != nullptr)
E = CE->getArg(0)->IgnoreParenImpCasts();
- if (auto *DRE = dyn_cast_or_null<DeclRefExpr>(E)) {
- if (auto *VD = dyn_cast<VarDecl>(DRE->getDecl()))
+ if (const auto *DRE = dyn_cast_or_null<DeclRefExpr>(E)) {
+ if (const auto *VD = dyn_cast<VarDecl>(DRE->getDecl()))
return getCanonicalDecl(VD);
}
- if (auto *ME = dyn_cast_or_null<MemberExpr>(E))
+ if (const auto *ME = dyn_cast_or_null<MemberExpr>(E))
if (ME->isArrow() && isa<CXXThisExpr>(ME->getBase()->IgnoreParenImpCasts()))
return getCanonicalDecl(ME->getMemberDecl());
return nullptr;
}
-bool OpenMPIterationSpaceChecker::CheckCond(Expr *S) {
+bool OpenMPIterationSpaceChecker::checkAndSetCond(Expr *S) {
// Check test-expr for canonical form, save upper-bound UB, flags for
// less/greater and for strict/non-strict comparison.
// OpenMP [2.6] Canonical loop form. Test-expr may be one of the following:
@@ -3745,13 +3967,13 @@ bool OpenMPIterationSpaceChecker::CheckCond(Expr *S) {
SourceLocation CondLoc = S->getLocStart();
if (auto *BO = dyn_cast<BinaryOperator>(S)) {
if (BO->isRelationalOp()) {
- if (GetInitLCDecl(BO->getLHS()) == LCDecl)
- return SetUB(BO->getRHS(),
+ if (getInitLCDecl(BO->getLHS()) == LCDecl)
+ return setUB(BO->getRHS(),
(BO->getOpcode() == BO_LT || BO->getOpcode() == BO_LE),
(BO->getOpcode() == BO_LT || BO->getOpcode() == BO_GT),
BO->getSourceRange(), BO->getOperatorLoc());
- if (GetInitLCDecl(BO->getRHS()) == LCDecl)
- return SetUB(BO->getLHS(),
+ if (getInitLCDecl(BO->getRHS()) == LCDecl)
+ return setUB(BO->getLHS(),
(BO->getOpcode() == BO_GT || BO->getOpcode() == BO_GE),
(BO->getOpcode() == BO_LT || BO->getOpcode() == BO_GT),
BO->getSourceRange(), BO->getOperatorLoc());
@@ -3764,12 +3986,12 @@ bool OpenMPIterationSpaceChecker::CheckCond(Expr *S) {
case OO_GreaterEqual:
case OO_Less:
case OO_LessEqual:
- if (GetInitLCDecl(CE->getArg(0)) == LCDecl)
- return SetUB(CE->getArg(1), Op == OO_Less || Op == OO_LessEqual,
+ if (getInitLCDecl(CE->getArg(0)) == LCDecl)
+ return setUB(CE->getArg(1), Op == OO_Less || Op == OO_LessEqual,
Op == OO_Less || Op == OO_Greater, CE->getSourceRange(),
CE->getOperatorLoc());
- if (GetInitLCDecl(CE->getArg(1)) == LCDecl)
- return SetUB(CE->getArg(0), Op == OO_Greater || Op == OO_GreaterEqual,
+ if (getInitLCDecl(CE->getArg(1)) == LCDecl)
+ return setUB(CE->getArg(0), Op == OO_Greater || Op == OO_GreaterEqual,
Op == OO_Less || Op == OO_Greater, CE->getSourceRange(),
CE->getOperatorLoc());
break;
@@ -3778,14 +4000,14 @@ bool OpenMPIterationSpaceChecker::CheckCond(Expr *S) {
}
}
}
- if (Dependent() || SemaRef.CurContext->isDependentContext())
+ if (dependent() || SemaRef.CurContext->isDependentContext())
return false;
SemaRef.Diag(CondLoc, diag::err_omp_loop_not_canonical_cond)
<< S->getSourceRange() << LCDecl;
return true;
}
-bool OpenMPIterationSpaceChecker::CheckIncRHS(Expr *RHS) {
+bool OpenMPIterationSpaceChecker::checkAndSetIncRHS(Expr *RHS) {
// RHS of canonical loop form increment can be:
// var + incr
// incr + var
@@ -3795,28 +4017,28 @@ bool OpenMPIterationSpaceChecker::CheckIncRHS(Expr *RHS) {
if (auto *BO = dyn_cast<BinaryOperator>(RHS)) {
if (BO->isAdditiveOp()) {
bool IsAdd = BO->getOpcode() == BO_Add;
- if (GetInitLCDecl(BO->getLHS()) == LCDecl)
- return SetStep(BO->getRHS(), !IsAdd);
- if (IsAdd && GetInitLCDecl(BO->getRHS()) == LCDecl)
- return SetStep(BO->getLHS(), false);
+ if (getInitLCDecl(BO->getLHS()) == LCDecl)
+ return setStep(BO->getRHS(), !IsAdd);
+ if (IsAdd && getInitLCDecl(BO->getRHS()) == LCDecl)
+ return setStep(BO->getLHS(), /*Subtract=*/false);
}
} else if (auto *CE = dyn_cast<CXXOperatorCallExpr>(RHS)) {
bool IsAdd = CE->getOperator() == OO_Plus;
if ((IsAdd || CE->getOperator() == OO_Minus) && CE->getNumArgs() == 2) {
- if (GetInitLCDecl(CE->getArg(0)) == LCDecl)
- return SetStep(CE->getArg(1), !IsAdd);
- if (IsAdd && GetInitLCDecl(CE->getArg(1)) == LCDecl)
- return SetStep(CE->getArg(0), false);
+ if (getInitLCDecl(CE->getArg(0)) == LCDecl)
+ return setStep(CE->getArg(1), !IsAdd);
+ if (IsAdd && getInitLCDecl(CE->getArg(1)) == LCDecl)
+ return setStep(CE->getArg(0), /*Subtract=*/false);
}
}
- if (Dependent() || SemaRef.CurContext->isDependentContext())
+ if (dependent() || SemaRef.CurContext->isDependentContext())
return false;
SemaRef.Diag(RHS->getLocStart(), diag::err_omp_loop_not_canonical_incr)
<< RHS->getSourceRange() << LCDecl;
return true;
}
-bool OpenMPIterationSpaceChecker::CheckInc(Expr *S) {
+bool OpenMPIterationSpaceChecker::checkAndSetInc(Expr *S) {
// Check incr-expr for canonical loop form and return true if it
// does not conform.
// OpenMP [2.6] Canonical loop form. Test-expr may be one of the following:
@@ -3842,22 +4064,22 @@ bool OpenMPIterationSpaceChecker::CheckInc(Expr *S) {
S = S->IgnoreParens();
if (auto *UO = dyn_cast<UnaryOperator>(S)) {
if (UO->isIncrementDecrementOp() &&
- GetInitLCDecl(UO->getSubExpr()) == LCDecl)
- return SetStep(SemaRef
+ getInitLCDecl(UO->getSubExpr()) == LCDecl)
+ return setStep(SemaRef
.ActOnIntegerConstant(UO->getLocStart(),
(UO->isDecrementOp() ? -1 : 1))
.get(),
- false);
+ /*Subtract=*/false);
} else if (auto *BO = dyn_cast<BinaryOperator>(S)) {
switch (BO->getOpcode()) {
case BO_AddAssign:
case BO_SubAssign:
- if (GetInitLCDecl(BO->getLHS()) == LCDecl)
- return SetStep(BO->getRHS(), BO->getOpcode() == BO_SubAssign);
+ if (getInitLCDecl(BO->getLHS()) == LCDecl)
+ return setStep(BO->getRHS(), BO->getOpcode() == BO_SubAssign);
break;
case BO_Assign:
- if (GetInitLCDecl(BO->getLHS()) == LCDecl)
- return CheckIncRHS(BO->getRHS());
+ if (getInitLCDecl(BO->getLHS()) == LCDecl)
+ return checkAndSetIncRHS(BO->getRHS());
break;
default:
break;
@@ -3866,28 +4088,28 @@ bool OpenMPIterationSpaceChecker::CheckInc(Expr *S) {
switch (CE->getOperator()) {
case OO_PlusPlus:
case OO_MinusMinus:
- if (GetInitLCDecl(CE->getArg(0)) == LCDecl)
- return SetStep(SemaRef
+ if (getInitLCDecl(CE->getArg(0)) == LCDecl)
+ return setStep(SemaRef
.ActOnIntegerConstant(
CE->getLocStart(),
((CE->getOperator() == OO_MinusMinus) ? -1 : 1))
.get(),
- false);
+ /*Subtract=*/false);
break;
case OO_PlusEqual:
case OO_MinusEqual:
- if (GetInitLCDecl(CE->getArg(0)) == LCDecl)
- return SetStep(CE->getArg(1), CE->getOperator() == OO_MinusEqual);
+ if (getInitLCDecl(CE->getArg(0)) == LCDecl)
+ return setStep(CE->getArg(1), CE->getOperator() == OO_MinusEqual);
break;
case OO_Equal:
- if (GetInitLCDecl(CE->getArg(0)) == LCDecl)
- return CheckIncRHS(CE->getArg(1));
+ if (getInitLCDecl(CE->getArg(0)) == LCDecl)
+ return checkAndSetIncRHS(CE->getArg(1));
break;
default:
break;
}
}
- if (Dependent() || SemaRef.CurContext->isDependentContext())
+ if (dependent() || SemaRef.CurContext->isDependentContext())
return false;
SemaRef.Diag(S->getLocStart(), diag::err_omp_loop_not_canonical_incr)
<< S->getSourceRange() << LCDecl;
@@ -3896,7 +4118,7 @@ bool OpenMPIterationSpaceChecker::CheckInc(Expr *S) {
static ExprResult
tryBuildCapture(Sema &SemaRef, Expr *Capture,
- llvm::MapVector<Expr *, DeclRefExpr *> &Captures) {
+ llvm::MapVector<const Expr *, DeclRefExpr *> &Captures) {
if (SemaRef.CurContext->isDependentContext())
return ExprResult(Capture);
if (Capture->isEvaluatable(SemaRef.Context, Expr::SE_AllowSideEffects))
@@ -3912,17 +4134,17 @@ tryBuildCapture(Sema &SemaRef, Expr *Capture,
return Res;
}
-/// \brief Build the expression to calculate the number of iterations.
-Expr *OpenMPIterationSpaceChecker::BuildNumIterations(
+/// Build the expression to calculate the number of iterations.
+Expr *OpenMPIterationSpaceChecker::buildNumIterations(
Scope *S, const bool LimitedType,
- llvm::MapVector<Expr *, DeclRefExpr *> &Captures) const {
+ llvm::MapVector<const Expr *, DeclRefExpr *> &Captures) const {
ExprResult Diff;
- auto VarType = LCDecl->getType().getNonReferenceType();
+ QualType VarType = LCDecl->getType().getNonReferenceType();
if (VarType->isIntegerType() || VarType->isPointerType() ||
SemaRef.getLangOpts().CPlusPlus) {
// Upper - Lower
- auto *UBExpr = TestIsLessOp ? UB : LB;
- auto *LBExpr = TestIsLessOp ? LB : UB;
+ Expr *UBExpr = TestIsLessOp ? UB : LB;
+ Expr *LBExpr = TestIsLessOp ? LB : UB;
Expr *Upper = tryBuildCapture(SemaRef, UBExpr, Captures).get();
Expr *Lower = tryBuildCapture(SemaRef, LBExpr, Captures).get();
if (!Upper || !Lower)
@@ -3951,7 +4173,7 @@ Expr *OpenMPIterationSpaceChecker::BuildNumIterations(
return nullptr;
// Upper - Lower [- 1] + Step
- auto NewStep = tryBuildCapture(SemaRef, Step, Captures);
+ ExprResult NewStep = tryBuildCapture(SemaRef, Step, Captures);
if (!NewStep.isUsable())
return nullptr;
Diff = SemaRef.BuildBinOp(S, DefaultLoc, BO_Add, Diff.get(), NewStep.get());
@@ -3970,7 +4192,7 @@ Expr *OpenMPIterationSpaceChecker::BuildNumIterations(
// OpenMP runtime requires 32-bit or 64-bit loop variables.
QualType Type = Diff.get()->getType();
- auto &C = SemaRef.Context;
+ ASTContext &C = SemaRef.Context;
bool UseVarType = VarType->hasIntegerRepresentation() &&
C.getTypeSize(Type) > C.getTypeSize(VarType);
if (!Type->isIntegerType() || UseVarType) {
@@ -4009,22 +4231,23 @@ Expr *OpenMPIterationSpaceChecker::BuildNumIterations(
return Diff.get();
}
-Expr *OpenMPIterationSpaceChecker::BuildPreCond(
+Expr *OpenMPIterationSpaceChecker::buildPreCond(
Scope *S, Expr *Cond,
- llvm::MapVector<Expr *, DeclRefExpr *> &Captures) const {
+ llvm::MapVector<const Expr *, DeclRefExpr *> &Captures) const {
// Try to build LB <op> UB, where <op> is <, >, <=, or >=.
bool Suppress = SemaRef.getDiagnostics().getSuppressAllDiagnostics();
SemaRef.getDiagnostics().setSuppressAllDiagnostics(/*Val=*/true);
- auto NewLB = tryBuildCapture(SemaRef, LB, Captures);
- auto NewUB = tryBuildCapture(SemaRef, UB, Captures);
+ ExprResult NewLB = tryBuildCapture(SemaRef, LB, Captures);
+ ExprResult NewUB = tryBuildCapture(SemaRef, UB, Captures);
if (!NewLB.isUsable() || !NewUB.isUsable())
return nullptr;
- auto CondExpr = SemaRef.BuildBinOp(
- S, DefaultLoc, TestIsLessOp ? (TestIsStrictOp ? BO_LT : BO_LE)
- : (TestIsStrictOp ? BO_GT : BO_GE),
- NewLB.get(), NewUB.get());
+ ExprResult CondExpr =
+ SemaRef.BuildBinOp(S, DefaultLoc,
+ TestIsLessOp ? (TestIsStrictOp ? BO_LT : BO_LE)
+ : (TestIsStrictOp ? BO_GT : BO_GE),
+ NewLB.get(), NewUB.get());
if (CondExpr.isUsable()) {
if (!SemaRef.Context.hasSameUnqualifiedType(CondExpr.get()->getType(),
SemaRef.Context.BoolTy))
@@ -4037,15 +4260,16 @@ Expr *OpenMPIterationSpaceChecker::BuildPreCond(
return CondExpr.isUsable() ? CondExpr.get() : Cond;
}
-/// \brief Build reference expression to the counter be used for codegen.
-DeclRefExpr *OpenMPIterationSpaceChecker::BuildCounterVar(
- llvm::MapVector<Expr *, DeclRefExpr *> &Captures, DSAStackTy &DSA) const {
+/// Build reference expression to the counter be used for codegen.
+DeclRefExpr *OpenMPIterationSpaceChecker::buildCounterVar(
+ llvm::MapVector<const Expr *, DeclRefExpr *> &Captures, DSAStackTy &DSA) const {
auto *VD = dyn_cast<VarDecl>(LCDecl);
if (!VD) {
- VD = SemaRef.IsOpenMPCapturedDecl(LCDecl);
- auto *Ref = buildDeclRefExpr(
+ VD = SemaRef.isOpenMPCapturedDecl(LCDecl);
+ DeclRefExpr *Ref = buildDeclRefExpr(
SemaRef, VD, VD->getType().getNonReferenceType(), DefaultLoc);
- DSAStackTy::DSAVarData Data = DSA.getTopDSA(LCDecl, /*FromParent=*/false);
+ const DSAStackTy::DSAVarData Data =
+ DSA.getTopDSA(LCDecl, /*FromParent=*/false);
// If the loop control decl is explicitly marked as private, do not mark it
// as captured again.
if (!isOpenMPPrivate(Data.CKind) || !Data.RefExpr)
@@ -4056,12 +4280,15 @@ DeclRefExpr *OpenMPIterationSpaceChecker::BuildCounterVar(
DefaultLoc);
}
-Expr *OpenMPIterationSpaceChecker::BuildPrivateCounterVar() const {
+Expr *OpenMPIterationSpaceChecker::buildPrivateCounterVar() const {
if (LCDecl && !LCDecl->isInvalidDecl()) {
- auto Type = LCDecl->getType().getNonReferenceType();
- auto *PrivateVar =
- buildVarDecl(SemaRef, DefaultLoc, Type, LCDecl->getName(),
- LCDecl->hasAttrs() ? &LCDecl->getAttrs() : nullptr);
+ QualType Type = LCDecl->getType().getNonReferenceType();
+ VarDecl *PrivateVar = buildVarDecl(
+ SemaRef, DefaultLoc, Type, LCDecl->getName(),
+ LCDecl->hasAttrs() ? &LCDecl->getAttrs() : nullptr,
+ isa<VarDecl>(LCDecl)
+ ? buildDeclRefExpr(SemaRef, cast<VarDecl>(LCDecl), Type, DefaultLoc)
+ : nullptr);
if (PrivateVar->isInvalidDecl())
return nullptr;
return buildDeclRefExpr(SemaRef, PrivateVar, Type, DefaultLoc);
@@ -4069,35 +4296,35 @@ Expr *OpenMPIterationSpaceChecker::BuildPrivateCounterVar() const {
return nullptr;
}
-/// \brief Build initialization of the counter to be used for codegen.
-Expr *OpenMPIterationSpaceChecker::BuildCounterInit() const { return LB; }
+/// Build initialization of the counter to be used for codegen.
+Expr *OpenMPIterationSpaceChecker::buildCounterInit() const { return LB; }
-/// \brief Build step of the counter be used for codegen.
-Expr *OpenMPIterationSpaceChecker::BuildCounterStep() const { return Step; }
+/// Build step of the counter be used for codegen.
+Expr *OpenMPIterationSpaceChecker::buildCounterStep() const { return Step; }
-/// \brief Iteration space of a single for loop.
+/// Iteration space of a single for loop.
struct LoopIterationSpace final {
- /// \brief Condition of the loop.
+ /// Condition of the loop.
Expr *PreCond = nullptr;
- /// \brief This expression calculates the number of iterations in the loop.
+ /// This expression calculates the number of iterations in the loop.
/// It is always possible to calculate it before starting the loop.
Expr *NumIterations = nullptr;
- /// \brief The loop counter variable.
+ /// The loop counter variable.
Expr *CounterVar = nullptr;
- /// \brief Private loop counter variable.
+ /// Private loop counter variable.
Expr *PrivateCounterVar = nullptr;
- /// \brief This is initializer for the initial value of #CounterVar.
+ /// This is initializer for the initial value of #CounterVar.
Expr *CounterInit = nullptr;
- /// \brief This is step for the #CounterVar used to generate its update:
+ /// This is step for the #CounterVar used to generate its update:
/// #CounterVar = #CounterInit + #CounterStep * CurrentIteration.
Expr *CounterStep = nullptr;
- /// \brief Should step be subtracted?
+ /// Should step be subtracted?
bool Subtract = false;
- /// \brief Source range of the loop init.
+ /// Source range of the loop init.
SourceRange InitSrcRange;
- /// \brief Source range of the loop condition.
+ /// Source range of the loop condition.
SourceRange CondSrcRange;
- /// \brief Source range of the loop increment.
+ /// Source range of the loop increment.
SourceRange IncSrcRange;
};
@@ -4110,15 +4337,15 @@ void Sema::ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init) {
if (AssociatedLoops > 0 &&
isOpenMPLoopDirective(DSAStack->getCurrentDirective())) {
OpenMPIterationSpaceChecker ISC(*this, ForLoc);
- if (!ISC.CheckInit(Init, /*EmitDiags=*/false)) {
- if (auto *D = ISC.GetLoopDecl()) {
+ if (!ISC.checkAndSetInit(Init, /*EmitDiags=*/false)) {
+ if (ValueDecl *D = ISC.getLoopDecl()) {
auto *VD = dyn_cast<VarDecl>(D);
if (!VD) {
- if (auto *Private = IsOpenMPCapturedDecl(D))
+ if (VarDecl *Private = isOpenMPCapturedDecl(D)) {
VD = Private;
- else {
- auto *Ref = buildCapture(*this, D, ISC.GetLoopDeclRefExpr(),
- /*WithInit=*/false);
+ } else {
+ DeclRefExpr *Ref = buildCapture(*this, D, ISC.getLoopDeclRefExpr(),
+ /*WithInit=*/false);
VD = cast<VarDecl>(Ref->getDecl());
}
}
@@ -4129,15 +4356,15 @@ void Sema::ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init) {
}
}
-/// \brief Called on a for stmt to check and extract its iteration space
+/// Called on a for stmt to check and extract its iteration space
/// for further processing (such as collapsing).
-static bool CheckOpenMPIterationSpace(
+static bool checkOpenMPIterationSpace(
OpenMPDirectiveKind DKind, Stmt *S, Sema &SemaRef, DSAStackTy &DSA,
unsigned CurrentNestedLoopCount, unsigned NestedLoopCount,
Expr *CollapseLoopCountExpr, Expr *OrderedLoopCountExpr,
- llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA,
+ Sema::VarsWithInheritedDSAType &VarsWithImplicitDSA,
LoopIterationSpace &ResultIterSpace,
- llvm::MapVector<Expr *, DeclRefExpr *> &Captures) {
+ llvm::MapVector<const Expr *, DeclRefExpr *> &Captures) {
// OpenMP [2.6, Canonical Loop Form]
// for (init-expr; test-expr; incr-expr) structured-block
auto *For = dyn_cast_or_null<ForStmt>(S);
@@ -4168,22 +4395,22 @@ static bool CheckOpenMPIterationSpace(
OpenMPIterationSpaceChecker ISC(SemaRef, For->getForLoc());
// Check init.
- auto Init = For->getInit();
- if (ISC.CheckInit(Init))
+ Stmt *Init = For->getInit();
+ if (ISC.checkAndSetInit(Init))
return true;
bool HasErrors = false;
// Check loop variable's type.
- if (auto *LCDecl = ISC.GetLoopDecl()) {
- auto *LoopDeclRefExpr = ISC.GetLoopDeclRefExpr();
+ if (ValueDecl *LCDecl = ISC.getLoopDecl()) {
+ Expr *LoopDeclRefExpr = ISC.getLoopDeclRefExpr();
// OpenMP [2.6, Canonical Loop Form]
// Var is one of the following:
// A variable of signed or unsigned integer type.
// For C++, a variable of a random access iterator type.
// For C, a variable of a pointer type.
- auto VarType = LCDecl->getType().getNonReferenceType();
+ QualType VarType = LCDecl->getType().getNonReferenceType();
if (!VarType->isDependentType() && !VarType->isIntegerType() &&
!VarType->isPointerType() &&
!(SemaRef.getLangOpts().CPlusPlus && VarType->isOverloadableType())) {
@@ -4214,7 +4441,7 @@ static bool CheckOpenMPIterationSpace(
DSAStackTy::DSAVarData DVar = DSA.getTopDSA(LCDecl, false);
// If LoopVarRefExpr is nullptr it means the corresponding loop variable is
// declared in the loop and it is predetermined as a private.
- auto PredeterminedCKind =
+ OpenMPClauseKind PredeterminedCKind =
isOpenMPSimdDirective(DKind)
? ((NestedLoopCount == 1) ? OMPC_linear : OMPC_lastprivate)
: OMPC_private;
@@ -4230,7 +4457,7 @@ static bool CheckOpenMPIterationSpace(
<< getOpenMPClauseName(PredeterminedCKind);
if (DVar.RefExpr == nullptr)
DVar.CKind = PredeterminedCKind;
- ReportOriginalDSA(SemaRef, &DSA, LCDecl, DVar, /*IsLoopIterVar=*/true);
+ reportOriginalDsa(SemaRef, &DSA, LCDecl, DVar, /*IsLoopIterVar=*/true);
HasErrors = true;
} else if (LoopDeclRefExpr != nullptr) {
// Make the loop iteration variable private (for worksharing constructs),
@@ -4247,31 +4474,31 @@ static bool CheckOpenMPIterationSpace(
assert(isOpenMPLoopDirective(DKind) && "DSA for non-loop vars");
// Check test-expr.
- HasErrors |= ISC.CheckCond(For->getCond());
+ HasErrors |= ISC.checkAndSetCond(For->getCond());
// Check incr-expr.
- HasErrors |= ISC.CheckInc(For->getInc());
+ HasErrors |= ISC.checkAndSetInc(For->getInc());
}
- if (ISC.Dependent() || SemaRef.CurContext->isDependentContext() || HasErrors)
+ if (ISC.dependent() || SemaRef.CurContext->isDependentContext() || HasErrors)
return HasErrors;
// Build the loop's iteration space representation.
ResultIterSpace.PreCond =
- ISC.BuildPreCond(DSA.getCurScope(), For->getCond(), Captures);
- ResultIterSpace.NumIterations = ISC.BuildNumIterations(
+ ISC.buildPreCond(DSA.getCurScope(), For->getCond(), Captures);
+ ResultIterSpace.NumIterations = ISC.buildNumIterations(
DSA.getCurScope(),
(isOpenMPWorksharingDirective(DKind) ||
isOpenMPTaskLoopDirective(DKind) || isOpenMPDistributeDirective(DKind)),
Captures);
- ResultIterSpace.CounterVar = ISC.BuildCounterVar(Captures, DSA);
- ResultIterSpace.PrivateCounterVar = ISC.BuildPrivateCounterVar();
- ResultIterSpace.CounterInit = ISC.BuildCounterInit();
- ResultIterSpace.CounterStep = ISC.BuildCounterStep();
- ResultIterSpace.InitSrcRange = ISC.GetInitSrcRange();
- ResultIterSpace.CondSrcRange = ISC.GetConditionSrcRange();
- ResultIterSpace.IncSrcRange = ISC.GetIncrementSrcRange();
- ResultIterSpace.Subtract = ISC.ShouldSubtractStep();
+ ResultIterSpace.CounterVar = ISC.buildCounterVar(Captures, DSA);
+ ResultIterSpace.PrivateCounterVar = ISC.buildPrivateCounterVar();
+ ResultIterSpace.CounterInit = ISC.buildCounterInit();
+ ResultIterSpace.CounterStep = ISC.buildCounterStep();
+ ResultIterSpace.InitSrcRange = ISC.getInitSrcRange();
+ ResultIterSpace.CondSrcRange = ISC.getConditionSrcRange();
+ ResultIterSpace.IncSrcRange = ISC.getIncrementSrcRange();
+ ResultIterSpace.Subtract = ISC.shouldSubtractStep();
HasErrors |= (ResultIterSpace.PreCond == nullptr ||
ResultIterSpace.NumIterations == nullptr ||
@@ -4283,13 +4510,13 @@ static bool CheckOpenMPIterationSpace(
return HasErrors;
}
-/// \brief Build 'VarRef = Start.
+/// Build 'VarRef = Start.
static ExprResult
-BuildCounterInit(Sema &SemaRef, Scope *S, SourceLocation Loc, ExprResult VarRef,
+buildCounterInit(Sema &SemaRef, Scope *S, SourceLocation Loc, ExprResult VarRef,
ExprResult Start,
- llvm::MapVector<Expr *, DeclRefExpr *> &Captures) {
+ llvm::MapVector<const Expr *, DeclRefExpr *> &Captures) {
// Build 'VarRef = Start.
- auto NewStart = tryBuildCapture(SemaRef, Start.get(), Captures);
+ ExprResult NewStart = tryBuildCapture(SemaRef, Start.get(), Captures);
if (!NewStart.isUsable())
return ExprError();
if (!SemaRef.Context.hasSameType(NewStart.get()->getType(),
@@ -4301,17 +4528,16 @@ BuildCounterInit(Sema &SemaRef, Scope *S, SourceLocation Loc, ExprResult VarRef,
return ExprError();
}
- auto Init =
+ ExprResult Init =
SemaRef.BuildBinOp(S, Loc, BO_Assign, VarRef.get(), NewStart.get());
return Init;
}
-/// \brief Build 'VarRef = Start + Iter * Step'.
-static ExprResult
-BuildCounterUpdate(Sema &SemaRef, Scope *S, SourceLocation Loc,
- ExprResult VarRef, ExprResult Start, ExprResult Iter,
- ExprResult Step, bool Subtract,
- llvm::MapVector<Expr *, DeclRefExpr *> *Captures = nullptr) {
+/// Build 'VarRef = Start + Iter * Step'.
+static ExprResult buildCounterUpdate(
+ Sema &SemaRef, Scope *S, SourceLocation Loc, ExprResult VarRef,
+ ExprResult Start, ExprResult Iter, ExprResult Step, bool Subtract,
+ llvm::MapVector<const Expr *, DeclRefExpr *> *Captures = nullptr) {
// Add parentheses (for debugging purposes only).
Iter = SemaRef.ActOnParenExpr(Loc, Loc, Iter.get());
if (!VarRef.isUsable() || !Start.isUsable() || !Iter.isUsable() ||
@@ -4378,12 +4604,12 @@ BuildCounterUpdate(Sema &SemaRef, Scope *S, SourceLocation Loc,
return Update;
}
-/// \brief Convert integer expression \a E to make it have at least \a Bits
+/// Convert integer expression \a E to make it have at least \a Bits
/// bits.
-static ExprResult WidenIterationCount(unsigned Bits, Expr *E, Sema &SemaRef) {
+static ExprResult widenIterationCount(unsigned Bits, Expr *E, Sema &SemaRef) {
if (E == nullptr)
return ExprError();
- auto &C = SemaRef.Context;
+ ASTContext &C = SemaRef.Context;
QualType OldType = E->getType();
unsigned HasBits = C.getTypeSize(OldType);
if (HasBits >= Bits)
@@ -4394,9 +4620,9 @@ static ExprResult WidenIterationCount(unsigned Bits, Expr *E, Sema &SemaRef) {
true);
}
-/// \brief Check if the given expression \a E is a constant integer that fits
+/// Check if the given expression \a E is a constant integer that fits
/// into \a Bits bits.
-static bool FitsInto(unsigned Bits, bool Signed, Expr *E, Sema &SemaRef) {
+static bool fitsInto(unsigned Bits, bool Signed, const Expr *E, Sema &SemaRef) {
if (E == nullptr)
return false;
llvm::APSInt Result;
@@ -4419,10 +4645,10 @@ static Stmt *buildPreInits(ASTContext &Context,
/// Build preinits statement for the given declarations.
static Stmt *
buildPreInits(ASTContext &Context,
- const llvm::MapVector<Expr *, DeclRefExpr *> &Captures) {
+ const llvm::MapVector<const Expr *, DeclRefExpr *> &Captures) {
if (!Captures.empty()) {
SmallVector<Decl *, 16> PreInits;
- for (auto &Pair : Captures)
+ for (const auto &Pair : Captures)
PreInits.push_back(Pair.second->getDecl());
return buildPreInits(Context, PreInits);
}
@@ -4433,7 +4659,7 @@ buildPreInits(ASTContext &Context,
static Expr *buildPostUpdate(Sema &S, ArrayRef<Expr *> PostUpdates) {
Expr *PostUpdate = nullptr;
if (!PostUpdates.empty()) {
- for (auto *E : PostUpdates) {
+ for (Expr *E : PostUpdates) {
Expr *ConvE = S.BuildCStyleCastExpr(
E->getExprLoc(),
S.Context.getTrivialTypeSourceInfo(S.Context.VoidTy),
@@ -4449,14 +4675,14 @@ static Expr *buildPostUpdate(Sema &S, ArrayRef<Expr *> PostUpdates) {
return PostUpdate;
}
-/// \brief Called on a for stmt to check itself and nested loops (if any).
+/// Called on a for stmt to check itself and nested loops (if any).
/// \return Returns 0 if one of the collapsed stmts is not canonical for loop,
/// number of collapsed loops otherwise.
static unsigned
-CheckOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
+checkOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
Expr *OrderedLoopCountExpr, Stmt *AStmt, Sema &SemaRef,
DSAStackTy &DSA,
- llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA,
+ Sema::VarsWithInheritedDSAType &VarsWithImplicitDSA,
OMPLoopDirective::HelperExprs &Built) {
unsigned NestedLoopCount = 1;
if (CollapseLoopCountExpr) {
@@ -4482,12 +4708,12 @@ CheckOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
}
// This is helper routine for loop directives (e.g., 'for', 'simd',
// 'for simd', etc.).
- llvm::MapVector<Expr *, DeclRefExpr *> Captures;
+ llvm::MapVector<const Expr *, DeclRefExpr *> Captures;
SmallVector<LoopIterationSpace, 4> IterSpaces;
IterSpaces.resize(NestedLoopCount);
Stmt *CurStmt = AStmt->IgnoreContainers(/* IgnoreCaptured */ true);
for (unsigned Cnt = 0; Cnt < NestedLoopCount; ++Cnt) {
- if (CheckOpenMPIterationSpace(DKind, CurStmt, SemaRef, DSA, Cnt,
+ if (checkOpenMPIterationSpace(DKind, CurStmt, SemaRef, DSA, Cnt,
NestedLoopCount, CollapseLoopCountExpr,
OrderedLoopCountExpr, VarsWithImplicitDSA,
IterSpaces[Cnt], Captures))
@@ -4537,26 +4763,28 @@ CheckOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
// Precondition tests if there is at least one iteration (all conditions are
// true).
auto PreCond = ExprResult(IterSpaces[0].PreCond);
- auto N0 = IterSpaces[0].NumIterations;
- ExprResult LastIteration32 = WidenIterationCount(
- 32 /* Bits */, SemaRef
- .PerformImplicitConversion(
- N0->IgnoreImpCasts(), N0->getType(),
- Sema::AA_Converting, /*AllowExplicit=*/true)
- .get(),
- SemaRef);
- ExprResult LastIteration64 = WidenIterationCount(
- 64 /* Bits */, SemaRef
- .PerformImplicitConversion(
- N0->IgnoreImpCasts(), N0->getType(),
- Sema::AA_Converting, /*AllowExplicit=*/true)
- .get(),
+ Expr *N0 = IterSpaces[0].NumIterations;
+ ExprResult LastIteration32 =
+ widenIterationCount(/*Bits=*/32,
+ SemaRef
+ .PerformImplicitConversion(
+ N0->IgnoreImpCasts(), N0->getType(),
+ Sema::AA_Converting, /*AllowExplicit=*/true)
+ .get(),
+ SemaRef);
+ ExprResult LastIteration64 = widenIterationCount(
+ /*Bits=*/64,
+ SemaRef
+ .PerformImplicitConversion(N0->IgnoreImpCasts(), N0->getType(),
+ Sema::AA_Converting,
+ /*AllowExplicit=*/true)
+ .get(),
SemaRef);
if (!LastIteration32.isUsable() || !LastIteration64.isUsable())
return NestedLoopCount;
- auto &C = SemaRef.Context;
+ ASTContext &C = SemaRef.Context;
bool AllCountsNeedLessThan32Bits = C.getTypeSize(N0->getType()) < 32;
Scope *CurScope = DSA.getCurScope();
@@ -4566,7 +4794,7 @@ CheckOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
SemaRef.BuildBinOp(CurScope, PreCond.get()->getExprLoc(), BO_LAnd,
PreCond.get(), IterSpaces[Cnt].PreCond);
}
- auto N = IterSpaces[Cnt].NumIterations;
+ Expr *N = IterSpaces[Cnt].NumIterations;
SourceLocation Loc = N->getExprLoc();
AllCountsNeedLessThan32Bits &= C.getTypeSize(N->getType()) < 32;
if (LastIteration32.isUsable())
@@ -4592,8 +4820,8 @@ CheckOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
if (LastIteration32.isUsable() &&
C.getTypeSize(LastIteration32.get()->getType()) == 32 &&
(AllCountsNeedLessThan32Bits || NestedLoopCount == 1 ||
- FitsInto(
- 32 /* Bits */,
+ fitsInto(
+ /*Bits=*/32,
LastIteration32.get()->getType()->hasSignedIntegerRepresentation(),
LastIteration64.get(), SemaRef)))
LastIteration = LastIteration32;
@@ -4681,7 +4909,8 @@ CheckOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
ExprResult IsUBGreater = SemaRef.BuildBinOp(CurScope, InitLoc, BO_GT,
UB.get(), LastIteration.get());
ExprResult CondOp = SemaRef.ActOnConditionalOp(
- InitLoc, InitLoc, IsUBGreater.get(), LastIteration.get(), UB.get());
+ LastIteration.get()->getExprLoc(), InitLoc, IsUBGreater.get(),
+ LastIteration.get(), UB.get());
EUB = SemaRef.BuildBinOp(CurScope, InitLoc, BO_Assign, UB.get(),
CondOp.get());
EUB = SemaRef.ActOnFinishFullExpr(EUB.get());
@@ -4691,7 +4920,6 @@ CheckOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
// enclosing region. E.g. in 'distribute parallel for' the bounds obtained
// by scheduling 'distribute' have to be passed to the schedule of 'for'.
if (isOpenMPLoopBoundSharingDirective(DKind)) {
-
// Lower bound variable, initialized with zero.
VarDecl *CombLBDecl =
buildVarDecl(SemaRef, InitLoc, VType, ".omp.comb.lb");
@@ -4716,7 +4944,7 @@ CheckOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
CombCondOp.get());
CombEUB = SemaRef.ActOnFinishFullExpr(CombEUB.get());
- auto *CD = cast<CapturedStmt>(AStmt)->getCapturedDecl();
+ const CapturedDecl *CD = cast<CapturedStmt>(AStmt)->getCapturedDecl();
// We expect to have at least 2 more parameters than the 'parallel'
// directive does - the lower and upper bounds of the previous schedule.
assert(CD->getNumParams() >= 4 &&
@@ -4724,8 +4952,8 @@ CheckOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
// Set the proper type for the bounds given what we learned from the
// enclosed loops.
- auto *PrevLBDecl = CD->getParam(/*PrevLB=*/2);
- auto *PrevUBDecl = CD->getParam(/*PrevUB=*/3);
+ ImplicitParamDecl *PrevLBDecl = CD->getParam(/*PrevLB=*/2);
+ ImplicitParamDecl *PrevUBDecl = CD->getParam(/*PrevUB=*/3);
// Previous lower and upper bounds are obtained from the region
// parameters.
@@ -4764,7 +4992,7 @@ CheckOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
}
// Loop condition (IV < NumIterations) or (IV <= UB) for worksharing loops.
- SourceLocation CondLoc;
+ SourceLocation CondLoc = AStmt->getLocStart();
ExprResult Cond =
(isOpenMPWorksharingDirective(DKind) ||
isOpenMPTaskLoopDirective(DKind) || isOpenMPDistributeDirective(DKind))
@@ -4777,7 +5005,7 @@ CheckOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
SemaRef.BuildBinOp(CurScope, CondLoc, BO_LE, IV.get(), CombUB.get());
}
// Loop increment (IV = IV + 1)
- SourceLocation IncLoc;
+ SourceLocation IncLoc = AStmt->getLocStart();
ExprResult Inc =
SemaRef.BuildBinOp(CurScope, IncLoc, BO_Add, IV.get(),
SemaRef.ActOnIntegerConstant(IncLoc, 1).get());
@@ -4844,7 +5072,7 @@ CheckOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
// directive with for as IV = IV + ST; ensure upper bound expression based
// on PrevUB instead of NumIterations - used to implement 'for' when found
// in combination with 'distribute', like in 'distribute parallel for'
- SourceLocation DistIncLoc;
+ SourceLocation DistIncLoc = AStmt->getLocStart();
ExprResult DistCond, DistInc, PrevEUB;
if (isOpenMPLoopBoundSharingDirective(DKind)) {
DistCond = SemaRef.BuildBinOp(CurScope, CondLoc, BO_LE, IV.get(), UB.get());
@@ -4860,7 +5088,7 @@ CheckOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
// Build expression: UB = min(UB, prevUB) for #for in composite or combined
// construct
- SourceLocation DistEUBLoc;
+ SourceLocation DistEUBLoc = AStmt->getLocStart();
ExprResult IsUBGreater =
SemaRef.BuildBinOp(CurScope, DistEUBLoc, BO_GT, UB.get(), PrevUB.get());
ExprResult CondOp = SemaRef.ActOnConditionalOp(
@@ -4905,16 +5133,16 @@ CheckOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
// Build update: IS.CounterVar(Private) = IS.Start + Iter * IS.Step
auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IS.CounterVar)->getDecl());
- auto *CounterVar = buildDeclRefExpr(SemaRef, VD, IS.CounterVar->getType(),
- IS.CounterVar->getExprLoc(),
- /*RefersToCapture=*/true);
- ExprResult Init = BuildCounterInit(SemaRef, CurScope, UpdLoc, CounterVar,
+ DeclRefExpr *CounterVar = buildDeclRefExpr(
+ SemaRef, VD, IS.CounterVar->getType(), IS.CounterVar->getExprLoc(),
+ /*RefersToCapture=*/true);
+ ExprResult Init = buildCounterInit(SemaRef, CurScope, UpdLoc, CounterVar,
IS.CounterInit, Captures);
if (!Init.isUsable()) {
HasErrors = true;
break;
}
- ExprResult Update = BuildCounterUpdate(
+ ExprResult Update = buildCounterUpdate(
SemaRef, CurScope, UpdLoc, CounterVar, IS.CounterInit, Iter,
IS.CounterStep, IS.Subtract, &Captures);
if (!Update.isUsable()) {
@@ -4923,7 +5151,7 @@ CheckOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
}
// Build final: IS.CounterVar = IS.Start + IS.NumIters * IS.Step
- ExprResult Final = BuildCounterUpdate(
+ ExprResult Final = buildCounterUpdate(
SemaRef, CurScope, UpdLoc, CounterVar, IS.CounterInit,
IS.NumIterations, IS.CounterStep, IS.Subtract, &Captures);
if (!Final.isUsable()) {
@@ -4996,10 +5224,10 @@ CheckOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
Expr *CounterVal = SemaRef.DefaultLvalueConversion(IV.get()).get();
// Fill data for doacross depend clauses.
- for (auto Pair : DSA.getDoacrossDependClauses()) {
- if (Pair.first->getDependencyKind() == OMPC_DEPEND_source)
+ for (const auto &Pair : DSA.getDoacrossDependClauses()) {
+ if (Pair.first->getDependencyKind() == OMPC_DEPEND_source) {
Pair.first->setCounterValue(CounterVal);
- else {
+ } else {
if (NestedLoopCount != Pair.second.size() ||
NestedLoopCount != LoopMultipliers.size() + 1) {
// Erroneous case - clause has some problems.
@@ -5064,10 +5292,10 @@ static Expr *getOrderedNumberExpr(ArrayRef<OMPClause *> Clauses) {
static bool checkSimdlenSafelenSpecified(Sema &S,
const ArrayRef<OMPClause *> Clauses) {
- OMPSafelenClause *Safelen = nullptr;
- OMPSimdlenClause *Simdlen = nullptr;
+ const OMPSafelenClause *Safelen = nullptr;
+ const OMPSimdlenClause *Simdlen = nullptr;
- for (auto *Clause : Clauses) {
+ for (const OMPClause *Clause : Clauses) {
if (Clause->getClauseKind() == OMPC_safelen)
Safelen = cast<OMPSafelenClause>(Clause);
else if (Clause->getClauseKind() == OMPC_simdlen)
@@ -5078,8 +5306,8 @@ static bool checkSimdlenSafelenSpecified(Sema &S,
if (Simdlen && Safelen) {
llvm::APSInt SimdlenRes, SafelenRes;
- auto SimdlenLength = Simdlen->getSimdlen();
- auto SafelenLength = Safelen->getSafelen();
+ const Expr *SimdlenLength = Simdlen->getSimdlen();
+ const Expr *SafelenLength = Safelen->getSafelen();
if (SimdlenLength->isValueDependent() || SimdlenLength->isTypeDependent() ||
SimdlenLength->isInstantiationDependent() ||
SimdlenLength->containsUnexpandedParameterPack())
@@ -5104,10 +5332,10 @@ static bool checkSimdlenSafelenSpecified(Sema &S,
return false;
}
-StmtResult Sema::ActOnOpenMPSimdDirective(
- ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
- SourceLocation EndLoc,
- llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA) {
+StmtResult
+Sema::ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
+ SourceLocation StartLoc, SourceLocation EndLoc,
+ VarsWithInheritedDSAType &VarsWithImplicitDSA) {
if (!AStmt)
return StmtError();
@@ -5115,7 +5343,7 @@ StmtResult Sema::ActOnOpenMPSimdDirective(
OMPLoopDirective::HelperExprs B;
// In presence of clause 'collapse' or 'ordered' with number of loops, it will
// define the nested loops number.
- unsigned NestedLoopCount = CheckOpenMPLoop(
+ unsigned NestedLoopCount = checkOpenMPLoop(
OMPD_simd, getCollapseNumberExpr(Clauses), getOrderedNumberExpr(Clauses),
AStmt, *this, *DSAStack, VarsWithImplicitDSA, B);
if (NestedLoopCount == 0)
@@ -5126,7 +5354,7 @@ StmtResult Sema::ActOnOpenMPSimdDirective(
if (!CurContext->isDependentContext()) {
// Finalize the clauses that need pre-built expressions for CodeGen.
- for (auto C : Clauses) {
+ for (OMPClause *C : Clauses) {
if (auto *LC = dyn_cast<OMPLinearClause>(C))
if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
B.NumIterations, *this, CurScope,
@@ -5138,15 +5366,15 @@ StmtResult Sema::ActOnOpenMPSimdDirective(
if (checkSimdlenSafelenSpecified(*this, Clauses))
return StmtError();
- getCurFunction()->setHasBranchProtectedScope();
+ setFunctionHasBranchProtectedScope();
return OMPSimdDirective::Create(Context, StartLoc, EndLoc, NestedLoopCount,
Clauses, AStmt, B);
}
-StmtResult Sema::ActOnOpenMPForDirective(
- ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
- SourceLocation EndLoc,
- llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA) {
+StmtResult
+Sema::ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
+ SourceLocation StartLoc, SourceLocation EndLoc,
+ VarsWithInheritedDSAType &VarsWithImplicitDSA) {
if (!AStmt)
return StmtError();
@@ -5154,7 +5382,7 @@ StmtResult Sema::ActOnOpenMPForDirective(
OMPLoopDirective::HelperExprs B;
// In presence of clause 'collapse' or 'ordered' with number of loops, it will
// define the nested loops number.
- unsigned NestedLoopCount = CheckOpenMPLoop(
+ unsigned NestedLoopCount = checkOpenMPLoop(
OMPD_for, getCollapseNumberExpr(Clauses), getOrderedNumberExpr(Clauses),
AStmt, *this, *DSAStack, VarsWithImplicitDSA, B);
if (NestedLoopCount == 0)
@@ -5165,7 +5393,7 @@ StmtResult Sema::ActOnOpenMPForDirective(
if (!CurContext->isDependentContext()) {
// Finalize the clauses that need pre-built expressions for CodeGen.
- for (auto C : Clauses) {
+ for (OMPClause *C : Clauses) {
if (auto *LC = dyn_cast<OMPLinearClause>(C))
if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
B.NumIterations, *this, CurScope,
@@ -5174,15 +5402,14 @@ StmtResult Sema::ActOnOpenMPForDirective(
}
}
- getCurFunction()->setHasBranchProtectedScope();
+ setFunctionHasBranchProtectedScope();
return OMPForDirective::Create(Context, StartLoc, EndLoc, NestedLoopCount,
Clauses, AStmt, B, DSAStack->isCancelRegion());
}
StmtResult Sema::ActOnOpenMPForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
- SourceLocation EndLoc,
- llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA) {
+ SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
if (!AStmt)
return StmtError();
@@ -5191,7 +5418,7 @@ StmtResult Sema::ActOnOpenMPForSimdDirective(
// In presence of clause 'collapse' or 'ordered' with number of loops, it will
// define the nested loops number.
unsigned NestedLoopCount =
- CheckOpenMPLoop(OMPD_for_simd, getCollapseNumberExpr(Clauses),
+ checkOpenMPLoop(OMPD_for_simd, getCollapseNumberExpr(Clauses),
getOrderedNumberExpr(Clauses), AStmt, *this, *DSAStack,
VarsWithImplicitDSA, B);
if (NestedLoopCount == 0)
@@ -5202,7 +5429,7 @@ StmtResult Sema::ActOnOpenMPForSimdDirective(
if (!CurContext->isDependentContext()) {
// Finalize the clauses that need pre-built expressions for CodeGen.
- for (auto C : Clauses) {
+ for (OMPClause *C : Clauses) {
if (auto *LC = dyn_cast<OMPLinearClause>(C))
if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
B.NumIterations, *this, CurScope,
@@ -5214,7 +5441,7 @@ StmtResult Sema::ActOnOpenMPForSimdDirective(
if (checkSimdlenSafelenSpecified(*this, Clauses))
return StmtError();
- getCurFunction()->setHasBranchProtectedScope();
+ setFunctionHasBranchProtectedScope();
return OMPForSimdDirective::Create(Context, StartLoc, EndLoc, NestedLoopCount,
Clauses, AStmt, B);
}
@@ -5251,7 +5478,7 @@ StmtResult Sema::ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses,
return StmtError();
}
- getCurFunction()->setHasBranchProtectedScope();
+ setFunctionHasBranchProtectedScope();
return OMPSectionsDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt,
DSAStack->isCancelRegion());
@@ -5265,7 +5492,7 @@ StmtResult Sema::ActOnOpenMPSectionDirective(Stmt *AStmt,
assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
- getCurFunction()->setHasBranchProtectedScope();
+ setFunctionHasBranchProtectedScope();
DSAStack->setParentCancelRegion(DSAStack->isCancelRegion());
return OMPSectionDirective::Create(Context, StartLoc, EndLoc, AStmt,
@@ -5281,13 +5508,13 @@ StmtResult Sema::ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses,
assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
- getCurFunction()->setHasBranchProtectedScope();
+ setFunctionHasBranchProtectedScope();
// OpenMP [2.7.3, single Construct, Restrictions]
// The copyprivate clause must not be used with the nowait clause.
- OMPClause *Nowait = nullptr;
- OMPClause *Copyprivate = nullptr;
- for (auto *Clause : Clauses) {
+ const OMPClause *Nowait = nullptr;
+ const OMPClause *Copyprivate = nullptr;
+ for (const OMPClause *Clause : Clauses) {
if (Clause->getClauseKind() == OMPC_nowait)
Nowait = Clause;
else if (Clause->getClauseKind() == OMPC_copyprivate)
@@ -5311,7 +5538,7 @@ StmtResult Sema::ActOnOpenMPMasterDirective(Stmt *AStmt,
assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
- getCurFunction()->setHasBranchProtectedScope();
+ setFunctionHasBranchProtectedScope();
return OMPMasterDirective::Create(Context, StartLoc, EndLoc, AStmt);
}
@@ -5328,7 +5555,7 @@ StmtResult Sema::ActOnOpenMPCriticalDirective(
llvm::APSInt Hint;
SourceLocation HintLoc;
bool DependentHint = false;
- for (auto *C : Clauses) {
+ for (const OMPClause *C : Clauses) {
if (C->getClauseKind() == OMPC_hint) {
if (!DirName.getName()) {
Diag(C->getLocStart(), diag::err_omp_hint_clause_no_name);
@@ -5336,9 +5563,9 @@ StmtResult Sema::ActOnOpenMPCriticalDirective(
}
Expr *E = cast<OMPHintClause>(C)->getHint();
if (E->isTypeDependent() || E->isValueDependent() ||
- E->isInstantiationDependent())
+ E->isInstantiationDependent()) {
DependentHint = true;
- else {
+ } else {
Hint = E->EvaluateKnownConstInt(Context);
HintLoc = C->getLocStart();
}
@@ -5346,26 +5573,27 @@ StmtResult Sema::ActOnOpenMPCriticalDirective(
}
if (ErrorFound)
return StmtError();
- auto Pair = DSAStack->getCriticalWithHint(DirName);
+ const auto Pair = DSAStack->getCriticalWithHint(DirName);
if (Pair.first && DirName.getName() && !DependentHint) {
if (llvm::APSInt::compareValues(Hint, Pair.second) != 0) {
Diag(StartLoc, diag::err_omp_critical_with_hint);
- if (HintLoc.isValid()) {
+ if (HintLoc.isValid())
Diag(HintLoc, diag::note_omp_critical_hint_here)
<< 0 << Hint.toString(/*Radix=*/10, /*Signed=*/false);
- } else
+ else
Diag(StartLoc, diag::note_omp_critical_no_hint) << 0;
- if (auto *C = Pair.first->getSingleClause<OMPHintClause>()) {
+ if (const auto *C = Pair.first->getSingleClause<OMPHintClause>()) {
Diag(C->getLocStart(), diag::note_omp_critical_hint_here)
<< 1
<< C->getHint()->EvaluateKnownConstInt(Context).toString(
/*Radix=*/10, /*Signed=*/false);
- } else
+ } else {
Diag(Pair.first->getLocStart(), diag::note_omp_critical_no_hint) << 1;
+ }
}
}
- getCurFunction()->setHasBranchProtectedScope();
+ setFunctionHasBranchProtectedScope();
auto *Dir = OMPCriticalDirective::Create(Context, DirName, StartLoc, EndLoc,
Clauses, AStmt);
@@ -5376,12 +5604,11 @@ StmtResult Sema::ActOnOpenMPCriticalDirective(
StmtResult Sema::ActOnOpenMPParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
- SourceLocation EndLoc,
- llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA) {
+ SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
if (!AStmt)
return StmtError();
- CapturedStmt *CS = cast<CapturedStmt>(AStmt);
+ auto *CS = cast<CapturedStmt>(AStmt);
// 1.2.2 OpenMP Language Terminology
// Structured block - An executable statement with a single entry at the
// top and a single exit at the bottom.
@@ -5393,7 +5620,7 @@ StmtResult Sema::ActOnOpenMPParallelForDirective(
// In presence of clause 'collapse' or 'ordered' with number of loops, it will
// define the nested loops number.
unsigned NestedLoopCount =
- CheckOpenMPLoop(OMPD_parallel_for, getCollapseNumberExpr(Clauses),
+ checkOpenMPLoop(OMPD_parallel_for, getCollapseNumberExpr(Clauses),
getOrderedNumberExpr(Clauses), AStmt, *this, *DSAStack,
VarsWithImplicitDSA, B);
if (NestedLoopCount == 0)
@@ -5404,7 +5631,7 @@ StmtResult Sema::ActOnOpenMPParallelForDirective(
if (!CurContext->isDependentContext()) {
// Finalize the clauses that need pre-built expressions for CodeGen.
- for (auto C : Clauses) {
+ for (OMPClause *C : Clauses) {
if (auto *LC = dyn_cast<OMPLinearClause>(C))
if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
B.NumIterations, *this, CurScope,
@@ -5413,7 +5640,7 @@ StmtResult Sema::ActOnOpenMPParallelForDirective(
}
}
- getCurFunction()->setHasBranchProtectedScope();
+ setFunctionHasBranchProtectedScope();
return OMPParallelForDirective::Create(Context, StartLoc, EndLoc,
NestedLoopCount, Clauses, AStmt, B,
DSAStack->isCancelRegion());
@@ -5421,12 +5648,11 @@ StmtResult Sema::ActOnOpenMPParallelForDirective(
StmtResult Sema::ActOnOpenMPParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
- SourceLocation EndLoc,
- llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA) {
+ SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
if (!AStmt)
return StmtError();
- CapturedStmt *CS = cast<CapturedStmt>(AStmt);
+ auto *CS = cast<CapturedStmt>(AStmt);
// 1.2.2 OpenMP Language Terminology
// Structured block - An executable statement with a single entry at the
// top and a single exit at the bottom.
@@ -5438,7 +5664,7 @@ StmtResult Sema::ActOnOpenMPParallelForSimdDirective(
// In presence of clause 'collapse' or 'ordered' with number of loops, it will
// define the nested loops number.
unsigned NestedLoopCount =
- CheckOpenMPLoop(OMPD_parallel_for_simd, getCollapseNumberExpr(Clauses),
+ checkOpenMPLoop(OMPD_parallel_for_simd, getCollapseNumberExpr(Clauses),
getOrderedNumberExpr(Clauses), AStmt, *this, *DSAStack,
VarsWithImplicitDSA, B);
if (NestedLoopCount == 0)
@@ -5446,7 +5672,7 @@ StmtResult Sema::ActOnOpenMPParallelForSimdDirective(
if (!CurContext->isDependentContext()) {
// Finalize the clauses that need pre-built expressions for CodeGen.
- for (auto C : Clauses) {
+ for (OMPClause *C : Clauses) {
if (auto *LC = dyn_cast<OMPLinearClause>(C))
if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
B.NumIterations, *this, CurScope,
@@ -5458,7 +5684,7 @@ StmtResult Sema::ActOnOpenMPParallelForSimdDirective(
if (checkSimdlenSafelenSpecified(*this, Clauses))
return StmtError();
- getCurFunction()->setHasBranchProtectedScope();
+ setFunctionHasBranchProtectedScope();
return OMPParallelForSimdDirective::Create(
Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
}
@@ -5496,7 +5722,7 @@ Sema::ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses,
return StmtError();
}
- getCurFunction()->setHasBranchProtectedScope();
+ setFunctionHasBranchProtectedScope();
return OMPParallelSectionsDirective::Create(
Context, StartLoc, EndLoc, Clauses, AStmt, DSAStack->isCancelRegion());
@@ -5516,7 +5742,7 @@ StmtResult Sema::ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses,
// longjmp() and throw() must not violate the entry/exit criteria.
CS->getCapturedDecl()->setNothrow();
- getCurFunction()->setHasBranchProtectedScope();
+ setFunctionHasBranchProtectedScope();
return OMPTaskDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt,
DSAStack->isCancelRegion());
@@ -5546,7 +5772,7 @@ StmtResult Sema::ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses,
assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
- getCurFunction()->setHasBranchProtectedScope();
+ setFunctionHasBranchProtectedScope();
return OMPTaskgroupDirective::Create(Context, StartLoc, EndLoc, Clauses,
AStmt,
@@ -5564,13 +5790,13 @@ StmtResult Sema::ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc) {
- OMPClause *DependFound = nullptr;
- OMPClause *DependSourceClause = nullptr;
- OMPClause *DependSinkClause = nullptr;
+ const OMPClause *DependFound = nullptr;
+ const OMPClause *DependSourceClause = nullptr;
+ const OMPClause *DependSinkClause = nullptr;
bool ErrorFound = false;
- OMPThreadsClause *TC = nullptr;
- OMPSIMDClause *SC = nullptr;
- for (auto *C : Clauses) {
+ const OMPThreadsClause *TC = nullptr;
+ const OMPSIMDClause *SC = nullptr;
+ for (const OMPClause *C : Clauses) {
if (auto *DC = dyn_cast<OMPDependClause>(C)) {
DependFound = C;
if (DC->getDependencyKind() == OMPC_DEPEND_source) {
@@ -5579,8 +5805,9 @@ StmtResult Sema::ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses,
<< getOpenMPDirectiveName(OMPD_ordered)
<< getOpenMPClauseName(OMPC_depend) << 2;
ErrorFound = true;
- } else
+ } else {
DependSourceClause = C;
+ }
if (DependSinkClause) {
Diag(C->getLocStart(), diag::err_omp_depend_sink_source_not_allowed)
<< 0;
@@ -5594,10 +5821,11 @@ StmtResult Sema::ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses,
}
DependSinkClause = C;
}
- } else if (C->getClauseKind() == OMPC_threads)
+ } else if (C->getClauseKind() == OMPC_threads) {
TC = cast<OMPThreadsClause>(C);
- else if (C->getClauseKind() == OMPC_simd)
+ } else if (C->getClauseKind() == OMPC_simd) {
SC = cast<OMPSIMDClause>(C);
+ }
}
if (!ErrorFound && !SC &&
isOpenMPSimdDirective(DSAStack->getParentDirective())) {
@@ -5615,7 +5843,7 @@ StmtResult Sema::ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses,
diag::err_omp_ordered_directive_without_param);
ErrorFound = true;
} else if (TC || Clauses.empty()) {
- if (auto *Param = DSAStack->getParentOrderedRegionParam()) {
+ if (const Expr *Param = DSAStack->getParentOrderedRegionParam()) {
SourceLocation ErrLoc = TC ? TC->getLocStart() : StartLoc;
Diag(ErrLoc, diag::err_omp_ordered_directive_with_param)
<< (TC != nullptr);
@@ -5629,57 +5857,57 @@ StmtResult Sema::ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses,
if (AStmt) {
assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
- getCurFunction()->setHasBranchProtectedScope();
+ setFunctionHasBranchProtectedScope();
}
return OMPOrderedDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt);
}
namespace {
-/// \brief Helper class for checking expression in 'omp atomic [update]'
+/// Helper class for checking expression in 'omp atomic [update]'
/// construct.
class OpenMPAtomicUpdateChecker {
- /// \brief Error results for atomic update expressions.
+ /// Error results for atomic update expressions.
enum ExprAnalysisErrorCode {
- /// \brief A statement is not an expression statement.
+ /// A statement is not an expression statement.
NotAnExpression,
- /// \brief Expression is not builtin binary or unary operation.
+ /// Expression is not builtin binary or unary operation.
NotABinaryOrUnaryExpression,
- /// \brief Unary operation is not post-/pre- increment/decrement operation.
+ /// Unary operation is not post-/pre- increment/decrement operation.
NotAnUnaryIncDecExpression,
- /// \brief An expression is not of scalar type.
+ /// An expression is not of scalar type.
NotAScalarType,
- /// \brief A binary operation is not an assignment operation.
+ /// A binary operation is not an assignment operation.
NotAnAssignmentOp,
- /// \brief RHS part of the binary operation is not a binary expression.
+ /// RHS part of the binary operation is not a binary expression.
NotABinaryExpression,
- /// \brief RHS part is not additive/multiplicative/shift/biwise binary
+ /// RHS part is not additive/multiplicative/shift/biwise binary
/// expression.
NotABinaryOperator,
- /// \brief RHS binary operation does not have reference to the updated LHS
+ /// RHS binary operation does not have reference to the updated LHS
/// part.
NotAnUpdateExpression,
- /// \brief No errors is found.
+ /// No errors is found.
NoError
};
- /// \brief Reference to Sema.
+ /// Reference to Sema.
Sema &SemaRef;
- /// \brief A location for note diagnostics (when error is found).
+ /// A location for note diagnostics (when error is found).
SourceLocation NoteLoc;
- /// \brief 'x' lvalue part of the source atomic expression.
+ /// 'x' lvalue part of the source atomic expression.
Expr *X;
- /// \brief 'expr' rvalue part of the source atomic expression.
+ /// 'expr' rvalue part of the source atomic expression.
Expr *E;
- /// \brief Helper expression of the form
+ /// Helper expression of the form
/// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or
/// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'.
Expr *UpdateExpr;
- /// \brief Is 'x' a LHS in a RHS part of full update expression. It is
+ /// Is 'x' a LHS in a RHS part of full update expression. It is
/// important for non-associative operations.
bool IsXLHSInRHSPart;
BinaryOperatorKind Op;
SourceLocation OpLoc;
- /// \brief true if the source expression is a postfix unary operation, false
+ /// true if the source expression is a postfix unary operation, false
/// if it is a prefix unary operation.
bool IsPostfixUpdate;
@@ -5687,7 +5915,7 @@ public:
OpenMPAtomicUpdateChecker(Sema &SemaRef)
: SemaRef(SemaRef), X(nullptr), E(nullptr), UpdateExpr(nullptr),
IsXLHSInRHSPart(false), Op(BO_PtrMemD), IsPostfixUpdate(false) {}
- /// \brief Check specified statement that it is suitable for 'atomic update'
+ /// Check specified statement that it is suitable for 'atomic update'
/// constructs and extract 'x', 'expr' and Operation from the original
/// expression. If DiagId and NoteId == 0, then only check is performed
/// without error notification.
@@ -5695,19 +5923,19 @@ public:
/// \param NoteId Diagnostic note for the main error message.
/// \return true if statement is not an update expression, false otherwise.
bool checkStatement(Stmt *S, unsigned DiagId = 0, unsigned NoteId = 0);
- /// \brief Return the 'x' lvalue part of the source atomic expression.
+ /// Return the 'x' lvalue part of the source atomic expression.
Expr *getX() const { return X; }
- /// \brief Return the 'expr' rvalue part of the source atomic expression.
+ /// Return the 'expr' rvalue part of the source atomic expression.
Expr *getExpr() const { return E; }
- /// \brief Return the update expression used in calculation of the updated
+ /// Return the update expression used in calculation of the updated
/// value. Always has form 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or
/// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'.
Expr *getUpdateExpr() const { return UpdateExpr; }
- /// \brief Return true if 'x' is LHS in RHS part of full update expression,
+ /// Return true if 'x' is LHS in RHS part of full update expression,
/// false otherwise.
bool isXLHSInRHSPart() const { return IsXLHSInRHSPart; }
- /// \brief true if the source expression is a postfix unary operation, false
+ /// true if the source expression is a postfix unary operation, false
/// if it is a prefix unary operation.
bool isPostfixUpdate() const { return IsPostfixUpdate; }
@@ -5727,15 +5955,15 @@ bool OpenMPAtomicUpdateChecker::checkBinaryOperation(
// x = expr binop x;
if (AtomicBinOp->getOpcode() == BO_Assign) {
X = AtomicBinOp->getLHS();
- if (auto *AtomicInnerBinOp = dyn_cast<BinaryOperator>(
+ if (const auto *AtomicInnerBinOp = dyn_cast<BinaryOperator>(
AtomicBinOp->getRHS()->IgnoreParenImpCasts())) {
if (AtomicInnerBinOp->isMultiplicativeOp() ||
AtomicInnerBinOp->isAdditiveOp() || AtomicInnerBinOp->isShiftOp() ||
AtomicInnerBinOp->isBitwiseOp()) {
Op = AtomicInnerBinOp->getOpcode();
OpLoc = AtomicInnerBinOp->getOperatorLoc();
- auto *LHS = AtomicInnerBinOp->getLHS();
- auto *RHS = AtomicInnerBinOp->getRHS();
+ Expr *LHS = AtomicInnerBinOp->getLHS();
+ Expr *RHS = AtomicInnerBinOp->getRHS();
llvm::FoldingSetNodeID XId, LHSId, RHSId;
X->IgnoreParenImpCasts()->Profile(XId, SemaRef.getASTContext(),
/*Canonical=*/true);
@@ -5779,7 +6007,8 @@ bool OpenMPAtomicUpdateChecker::checkBinaryOperation(
SemaRef.Diag(ErrorLoc, DiagId) << ErrorRange;
SemaRef.Diag(NoteLoc, NoteId) << ErrorFound << NoteRange;
return true;
- } else if (SemaRef.CurContext->isDependentContext())
+ }
+ if (SemaRef.CurContext->isDependentContext())
E = X = UpdateExpr = nullptr;
return ErrorFound != NoError;
}
@@ -5801,7 +6030,7 @@ bool OpenMPAtomicUpdateChecker::checkStatement(Stmt *S, unsigned DiagId,
AtomicBody = AtomicBody->IgnoreParenImpCasts();
if (AtomicBody->getType()->isScalarType() ||
AtomicBody->isInstantiationDependent()) {
- if (auto *AtomicCompAssignOp = dyn_cast<CompoundAssignOperator>(
+ if (const auto *AtomicCompAssignOp = dyn_cast<CompoundAssignOperator>(
AtomicBody->IgnoreParenImpCasts())) {
// Check for Compound Assignment Operation
Op = BinaryOperator::getOpForCompoundAssignment(
@@ -5815,7 +6044,7 @@ bool OpenMPAtomicUpdateChecker::checkStatement(Stmt *S, unsigned DiagId,
// Check for Binary Operation
if (checkBinaryOperation(AtomicBinOp, DiagId, NoteId))
return true;
- } else if (auto *AtomicUnaryOp = dyn_cast<UnaryOperator>(
+ } else if (const auto *AtomicUnaryOp = dyn_cast<UnaryOperator>(
AtomicBody->IgnoreParenImpCasts())) {
// Check for Unary Operation
if (AtomicUnaryOp->isIncrementDecrementOp()) {
@@ -5851,7 +6080,8 @@ bool OpenMPAtomicUpdateChecker::checkStatement(Stmt *S, unsigned DiagId,
SemaRef.Diag(ErrorLoc, DiagId) << ErrorRange;
SemaRef.Diag(NoteLoc, NoteId) << ErrorFound << NoteRange;
return true;
- } else if (SemaRef.CurContext->isDependentContext())
+ }
+ if (SemaRef.CurContext->isDependentContext())
E = X = UpdateExpr = nullptr;
if (ErrorFound == NoError && E && X) {
// Build an update expression of form 'OpaqueValueExpr(x) binop
@@ -5861,7 +6091,7 @@ bool OpenMPAtomicUpdateChecker::checkStatement(Stmt *S, unsigned DiagId,
OpaqueValueExpr(X->getExprLoc(), X->getType(), VK_RValue);
auto *OVEExpr = new (SemaRef.getASTContext())
OpaqueValueExpr(E->getExprLoc(), E->getType(), VK_RValue);
- auto Update =
+ ExprResult Update =
SemaRef.CreateBuiltinBinOp(OpLoc, Op, IsXLHSInRHSPart ? OVEX : OVEExpr,
IsXLHSInRHSPart ? OVEExpr : OVEX);
if (Update.isInvalid())
@@ -5890,7 +6120,7 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
// longjmp() and throw() must not violate the entry/exit criteria.
OpenMPClauseKind AtomicKind = OMPC_unknown;
SourceLocation AtomicKindLoc;
- for (auto *C : Clauses) {
+ for (const OMPClause *C : Clauses) {
if (C->getClauseKind() == OMPC_read || C->getClauseKind() == OMPC_write ||
C->getClauseKind() == OMPC_update ||
C->getClauseKind() == OMPC_capture) {
@@ -5906,7 +6136,7 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
}
}
- auto Body = CS->getCapturedStmt();
+ Stmt *Body = CS->getCapturedStmt();
if (auto *EWC = dyn_cast<ExprWithCleanups>(Body))
Body = EWC->getSubExpr();
@@ -5950,8 +6180,8 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
SourceRange ErrorRange, NoteRange;
// If clause is read:
// v = x;
- if (auto *AtomicBody = dyn_cast<Expr>(Body)) {
- auto *AtomicBinOp =
+ if (const auto *AtomicBody = dyn_cast<Expr>(Body)) {
+ const auto *AtomicBinOp =
dyn_cast<BinaryOperator>(AtomicBody->IgnoreParenImpCasts());
if (AtomicBinOp && AtomicBinOp->getOpcode() == BO_Assign) {
X = AtomicBinOp->getRHS()->IgnoreParenImpCasts();
@@ -5959,7 +6189,7 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
if ((X->isInstantiationDependent() || X->getType()->isScalarType()) &&
(V->isInstantiationDependent() || V->getType()->isScalarType())) {
if (!X->isLValue() || !V->isLValue()) {
- auto NotLValueExpr = X->isLValue() ? V : X;
+ const Expr *NotLValueExpr = X->isLValue() ? V : X;
ErrorFound = NotAnLValue;
ErrorLoc = AtomicBinOp->getExprLoc();
ErrorRange = AtomicBinOp->getSourceRange();
@@ -5968,7 +6198,7 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
}
} else if (!X->isInstantiationDependent() ||
!V->isInstantiationDependent()) {
- auto NotScalarExpr =
+ const Expr *NotScalarExpr =
(X->isInstantiationDependent() || X->getType()->isScalarType())
? V
: X;
@@ -5998,7 +6228,8 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
Diag(NoteLoc, diag::note_omp_atomic_read_write) << ErrorFound
<< NoteRange;
return StmtError();
- } else if (CurContext->isDependentContext())
+ }
+ if (CurContext->isDependentContext())
V = X = nullptr;
} else if (AtomicKind == OMPC_write) {
enum {
@@ -6012,8 +6243,8 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
SourceRange ErrorRange, NoteRange;
// If clause is write:
// x = expr;
- if (auto *AtomicBody = dyn_cast<Expr>(Body)) {
- auto *AtomicBinOp =
+ if (const auto *AtomicBody = dyn_cast<Expr>(Body)) {
+ const auto *AtomicBinOp =
dyn_cast<BinaryOperator>(AtomicBody->IgnoreParenImpCasts());
if (AtomicBinOp && AtomicBinOp->getOpcode() == BO_Assign) {
X = AtomicBinOp->getLHS();
@@ -6029,7 +6260,7 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
}
} else if (!X->isInstantiationDependent() ||
!E->isInstantiationDependent()) {
- auto NotScalarExpr =
+ const Expr *NotScalarExpr =
(X->isInstantiationDependent() || X->getType()->isScalarType())
? E
: X;
@@ -6059,7 +6290,8 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
Diag(NoteLoc, diag::note_omp_atomic_read_write) << ErrorFound
<< NoteRange;
return StmtError();
- } else if (CurContext->isDependentContext())
+ }
+ if (CurContext->isDependentContext())
E = X = nullptr;
} else if (AtomicKind == OMPC_update || AtomicKind == OMPC_unknown) {
// If clause is update:
@@ -6093,7 +6325,7 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
} ErrorFound = NoError;
SourceLocation ErrorLoc, NoteLoc;
SourceRange ErrorRange, NoteRange;
- if (auto *AtomicBody = dyn_cast<Expr>(Body)) {
+ if (const auto *AtomicBody = dyn_cast<Expr>(Body)) {
// If clause is a capture:
// v = x++;
// v = x--;
@@ -6102,7 +6334,7 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
// v = x binop= expr;
// v = x = x binop expr;
// v = x = expr binop x;
- auto *AtomicBinOp =
+ const auto *AtomicBinOp =
dyn_cast<BinaryOperator>(AtomicBody->IgnoreParenImpCasts());
if (AtomicBinOp && AtomicBinOp->getOpcode() == BO_Assign) {
V = AtomicBinOp->getLHS();
@@ -6131,9 +6363,9 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
<< ErrorRange;
Diag(NoteLoc, diag::note_omp_atomic_capture) << ErrorFound << NoteRange;
return StmtError();
- } else if (CurContext->isDependentContext()) {
- UE = V = E = X = nullptr;
}
+ if (CurContext->isDependentContext())
+ UE = V = E = X = nullptr;
} else {
// If clause is a capture:
// { v = x; x = expr; }
@@ -6154,8 +6386,8 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
if (auto *CS = dyn_cast<CompoundStmt>(Body)) {
// Check that this is { expr1; expr2; }
if (CS->size() == 2) {
- auto *First = CS->body_front();
- auto *Second = CS->body_back();
+ Stmt *First = CS->body_front();
+ Stmt *Second = CS->body_back();
if (auto *EWC = dyn_cast<ExprWithCleanups>(First))
First = EWC->getSubExpr()->IgnoreParenImpCasts();
if (auto *EWC = dyn_cast<ExprWithCleanups>(Second))
@@ -6177,7 +6409,7 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
// { v = x; x = x binop expr; }
// { v = x; x = expr binop x; }
// Check that the first expression has form v = x.
- auto *PossibleX = BinOp->getRHS()->IgnoreParenImpCasts();
+ Expr *PossibleX = BinOp->getRHS()->IgnoreParenImpCasts();
llvm::FoldingSetNodeID XId, PossibleXId;
Checker.getX()->Profile(XId, Context, /*Canonical=*/true);
PossibleX->Profile(PossibleXId, Context, /*Canonical=*/true);
@@ -6207,7 +6439,7 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
// { x = x binop expr; v = x; }
// { x = expr binop x; v = x; }
// Check that the second expression has form v = x.
- auto *PossibleX = BinOp->getRHS()->IgnoreParenImpCasts();
+ Expr *PossibleX = BinOp->getRHS()->IgnoreParenImpCasts();
llvm::FoldingSetNodeID XId, PossibleXId;
Checker.getX()->Profile(XId, Context, /*Canonical=*/true);
PossibleX->Profile(PossibleXId, Context, /*Canonical=*/true);
@@ -6248,9 +6480,9 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
SecondBinOp ? SecondBinOp->getSourceRange()
: SourceRange(ErrorLoc, ErrorLoc);
} else {
- auto *PossibleXRHSInFirst =
+ Expr *PossibleXRHSInFirst =
FirstBinOp->getRHS()->IgnoreParenImpCasts();
- auto *PossibleXLHSInSecond =
+ Expr *PossibleXLHSInSecond =
SecondBinOp->getLHS()->IgnoreParenImpCasts();
llvm::FoldingSetNodeID X1Id, X2Id;
PossibleXRHSInFirst->Profile(X1Id, Context,
@@ -6293,13 +6525,13 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
<< ErrorRange;
Diag(NoteLoc, diag::note_omp_atomic_capture) << ErrorFound << NoteRange;
return StmtError();
- } else if (CurContext->isDependentContext()) {
- UE = V = E = X = nullptr;
}
+ if (CurContext->isDependentContext())
+ UE = V = E = X = nullptr;
}
}
- getCurFunction()->setHasBranchProtectedScope();
+ setFunctionHasBranchProtectedScope();
return OMPAtomicDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt,
X, V, E, UE, IsXLHSInRHSPart,
@@ -6313,25 +6545,35 @@ StmtResult Sema::ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses,
if (!AStmt)
return StmtError();
- CapturedStmt *CS = cast<CapturedStmt>(AStmt);
+ auto *CS = cast<CapturedStmt>(AStmt);
// 1.2.2 OpenMP Language Terminology
// Structured block - An executable statement with a single entry at the
// top and a single exit at the bottom.
// The point of exit cannot be a branch out of the structured block.
// longjmp() and throw() must not violate the entry/exit criteria.
CS->getCapturedDecl()->setNothrow();
+ for (int ThisCaptureLevel = getOpenMPCaptureLevels(OMPD_target);
+ ThisCaptureLevel > 1; --ThisCaptureLevel) {
+ CS = cast<CapturedStmt>(CS->getCapturedStmt());
+ // 1.2.2 OpenMP Language Terminology
+ // Structured block - An executable statement with a single entry at the
+ // top and a single exit at the bottom.
+ // The point of exit cannot be a branch out of the structured block.
+ // longjmp() and throw() must not violate the entry/exit criteria.
+ CS->getCapturedDecl()->setNothrow();
+ }
// OpenMP [2.16, Nesting of Regions]
// If specified, a teams construct must be contained within a target
// construct. That target construct must contain no statements or directives
// outside of the teams construct.
if (DSAStack->hasInnerTeamsRegion()) {
- auto S = AStmt->IgnoreContainers(/*IgnoreCaptured*/ true);
+ const Stmt *S = CS->IgnoreContainers(/*IgnoreCaptured=*/true);
bool OMPTeamsFound = true;
- if (auto *CS = dyn_cast<CompoundStmt>(S)) {
+ if (const auto *CS = dyn_cast<CompoundStmt>(S)) {
auto I = CS->body_begin();
while (I != CS->body_end()) {
- auto *OED = dyn_cast<OMPExecutableDirective>(*I);
+ const auto *OED = dyn_cast<OMPExecutableDirective>(*I);
if (!OED || !isOpenMPTeamsDirective(OED->getDirectiveKind())) {
OMPTeamsFound = false;
break;
@@ -6341,7 +6583,7 @@ StmtResult Sema::ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses,
assert(I != CS->body_end() && "Not found statement");
S = *I;
} else {
- auto *OED = dyn_cast<OMPExecutableDirective>(S);
+ const auto *OED = dyn_cast<OMPExecutableDirective>(S);
OMPTeamsFound = OED && isOpenMPTeamsDirective(OED->getDirectiveKind());
}
if (!OMPTeamsFound) {
@@ -6354,7 +6596,7 @@ StmtResult Sema::ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses,
}
}
- getCurFunction()->setHasBranchProtectedScope();
+ setFunctionHasBranchProtectedScope();
return OMPTargetDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt);
}
@@ -6366,15 +6608,25 @@ Sema::ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses,
if (!AStmt)
return StmtError();
- CapturedStmt *CS = cast<CapturedStmt>(AStmt);
+ auto *CS = cast<CapturedStmt>(AStmt);
// 1.2.2 OpenMP Language Terminology
// Structured block - An executable statement with a single entry at the
// top and a single exit at the bottom.
// The point of exit cannot be a branch out of the structured block.
// longjmp() and throw() must not violate the entry/exit criteria.
CS->getCapturedDecl()->setNothrow();
+ for (int ThisCaptureLevel = getOpenMPCaptureLevels(OMPD_target_parallel);
+ ThisCaptureLevel > 1; --ThisCaptureLevel) {
+ CS = cast<CapturedStmt>(CS->getCapturedStmt());
+ // 1.2.2 OpenMP Language Terminology
+ // Structured block - An executable statement with a single entry at the
+ // top and a single exit at the bottom.
+ // The point of exit cannot be a branch out of the structured block.
+ // longjmp() and throw() must not violate the entry/exit criteria.
+ CS->getCapturedDecl()->setNothrow();
+ }
- getCurFunction()->setHasBranchProtectedScope();
+ setFunctionHasBranchProtectedScope();
return OMPTargetParallelDirective::Create(Context, StartLoc, EndLoc, Clauses,
AStmt);
@@ -6382,12 +6634,11 @@ Sema::ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses,
StmtResult Sema::ActOnOpenMPTargetParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
- SourceLocation EndLoc,
- llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA) {
+ SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
if (!AStmt)
return StmtError();
- CapturedStmt *CS = cast<CapturedStmt>(AStmt);
+ auto *CS = cast<CapturedStmt>(AStmt);
// 1.2.2 OpenMP Language Terminology
// Structured block - An executable statement with a single entry at the
// top and a single exit at the bottom.
@@ -6409,7 +6660,7 @@ StmtResult Sema::ActOnOpenMPTargetParallelForDirective(
// In presence of clause 'collapse' or 'ordered' with number of loops, it will
// define the nested loops number.
unsigned NestedLoopCount =
- CheckOpenMPLoop(OMPD_target_parallel_for, getCollapseNumberExpr(Clauses),
+ checkOpenMPLoop(OMPD_target_parallel_for, getCollapseNumberExpr(Clauses),
getOrderedNumberExpr(Clauses), CS, *this, *DSAStack,
VarsWithImplicitDSA, B);
if (NestedLoopCount == 0)
@@ -6420,7 +6671,7 @@ StmtResult Sema::ActOnOpenMPTargetParallelForDirective(
if (!CurContext->isDependentContext()) {
// Finalize the clauses that need pre-built expressions for CodeGen.
- for (auto C : Clauses) {
+ for (OMPClause *C : Clauses) {
if (auto *LC = dyn_cast<OMPLinearClause>(C))
if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
B.NumIterations, *this, CurScope,
@@ -6429,7 +6680,7 @@ StmtResult Sema::ActOnOpenMPTargetParallelForDirective(
}
}
- getCurFunction()->setHasBranchProtectedScope();
+ setFunctionHasBranchProtectedScope();
return OMPTargetParallelForDirective::Create(Context, StartLoc, EndLoc,
NestedLoopCount, Clauses, AStmt,
B, DSAStack->isCancelRegion());
@@ -6466,7 +6717,7 @@ StmtResult Sema::ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses,
return StmtError();
}
- getCurFunction()->setHasBranchProtectedScope();
+ setFunctionHasBranchProtectedScope();
return OMPTargetDataDirective::Create(Context, StartLoc, EndLoc, Clauses,
AStmt);
@@ -6479,7 +6730,7 @@ Sema::ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses,
if (!AStmt)
return StmtError();
- CapturedStmt *CS = cast<CapturedStmt>(AStmt);
+ auto *CS = cast<CapturedStmt>(AStmt);
// 1.2.2 OpenMP Language Terminology
// Structured block - An executable statement with a single entry at the
// top and a single exit at the bottom.
@@ -6516,7 +6767,7 @@ Sema::ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses,
if (!AStmt)
return StmtError();
- CapturedStmt *CS = cast<CapturedStmt>(AStmt);
+ auto *CS = cast<CapturedStmt>(AStmt);
// 1.2.2 OpenMP Language Terminology
// Structured block - An executable statement with a single entry at the
// top and a single exit at the bottom.
@@ -6553,7 +6804,7 @@ StmtResult Sema::ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses,
if (!AStmt)
return StmtError();
- CapturedStmt *CS = cast<CapturedStmt>(AStmt);
+ auto *CS = cast<CapturedStmt>(AStmt);
// 1.2.2 OpenMP Language Terminology
// Structured block - An executable statement with a single entry at the
// top and a single exit at the bottom.
@@ -6585,7 +6836,7 @@ StmtResult Sema::ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses,
if (!AStmt)
return StmtError();
- CapturedStmt *CS = cast<CapturedStmt>(AStmt);
+ auto *CS = cast<CapturedStmt>(AStmt);
// 1.2.2 OpenMP Language Terminology
// Structured block - An executable statement with a single entry at the
// top and a single exit at the bottom.
@@ -6593,7 +6844,7 @@ StmtResult Sema::ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses,
// longjmp() and throw() must not violate the entry/exit criteria.
CS->getCapturedDecl()->setNothrow();
- getCurFunction()->setHasBranchProtectedScope();
+ setFunctionHasBranchProtectedScope();
DSAStack->setParentTeamsRegionLoc(StartLoc);
@@ -6635,9 +6886,9 @@ StmtResult Sema::ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses,
static bool checkGrainsizeNumTasksClauses(Sema &S,
ArrayRef<OMPClause *> Clauses) {
- OMPClause *PrevClause = nullptr;
+ const OMPClause *PrevClause = nullptr;
bool ErrorFound = false;
- for (auto *C : Clauses) {
+ for (const OMPClause *C : Clauses) {
if (C->getClauseKind() == OMPC_grainsize ||
C->getClauseKind() == OMPC_num_tasks) {
if (!PrevClause)
@@ -6659,9 +6910,9 @@ static bool checkGrainsizeNumTasksClauses(Sema &S,
static bool checkReductionClauseWithNogroup(Sema &S,
ArrayRef<OMPClause *> Clauses) {
- OMPClause *ReductionClause = nullptr;
- OMPClause *NogroupClause = nullptr;
- for (auto *C : Clauses) {
+ const OMPClause *ReductionClause = nullptr;
+ const OMPClause *NogroupClause = nullptr;
+ for (const OMPClause *C : Clauses) {
if (C->getClauseKind() == OMPC_reduction) {
ReductionClause = C;
if (NogroupClause)
@@ -6686,8 +6937,7 @@ static bool checkReductionClauseWithNogroup(Sema &S,
StmtResult Sema::ActOnOpenMPTaskLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
- SourceLocation EndLoc,
- llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA) {
+ SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
if (!AStmt)
return StmtError();
@@ -6696,7 +6946,7 @@ StmtResult Sema::ActOnOpenMPTaskLoopDirective(
// In presence of clause 'collapse' or 'ordered' with number of loops, it will
// define the nested loops number.
unsigned NestedLoopCount =
- CheckOpenMPLoop(OMPD_taskloop, getCollapseNumberExpr(Clauses),
+ checkOpenMPLoop(OMPD_taskloop, getCollapseNumberExpr(Clauses),
/*OrderedLoopCountExpr=*/nullptr, AStmt, *this, *DSAStack,
VarsWithImplicitDSA, B);
if (NestedLoopCount == 0)
@@ -6716,15 +6966,14 @@ StmtResult Sema::ActOnOpenMPTaskLoopDirective(
if (checkReductionClauseWithNogroup(*this, Clauses))
return StmtError();
- getCurFunction()->setHasBranchProtectedScope();
+ setFunctionHasBranchProtectedScope();
return OMPTaskLoopDirective::Create(Context, StartLoc, EndLoc,
NestedLoopCount, Clauses, AStmt, B);
}
StmtResult Sema::ActOnOpenMPTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
- SourceLocation EndLoc,
- llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA) {
+ SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
if (!AStmt)
return StmtError();
@@ -6733,7 +6982,7 @@ StmtResult Sema::ActOnOpenMPTaskLoopSimdDirective(
// In presence of clause 'collapse' or 'ordered' with number of loops, it will
// define the nested loops number.
unsigned NestedLoopCount =
- CheckOpenMPLoop(OMPD_taskloop_simd, getCollapseNumberExpr(Clauses),
+ checkOpenMPLoop(OMPD_taskloop_simd, getCollapseNumberExpr(Clauses),
/*OrderedLoopCountExpr=*/nullptr, AStmt, *this, *DSAStack,
VarsWithImplicitDSA, B);
if (NestedLoopCount == 0)
@@ -6744,7 +6993,7 @@ StmtResult Sema::ActOnOpenMPTaskLoopSimdDirective(
if (!CurContext->isDependentContext()) {
// Finalize the clauses that need pre-built expressions for CodeGen.
- for (auto C : Clauses) {
+ for (OMPClause *C : Clauses) {
if (auto *LC = dyn_cast<OMPLinearClause>(C))
if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
B.NumIterations, *this, CurScope,
@@ -6766,15 +7015,14 @@ StmtResult Sema::ActOnOpenMPTaskLoopSimdDirective(
if (checkSimdlenSafelenSpecified(*this, Clauses))
return StmtError();
- getCurFunction()->setHasBranchProtectedScope();
+ setFunctionHasBranchProtectedScope();
return OMPTaskLoopSimdDirective::Create(Context, StartLoc, EndLoc,
NestedLoopCount, Clauses, AStmt, B);
}
StmtResult Sema::ActOnOpenMPDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
- SourceLocation EndLoc,
- llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA) {
+ SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
if (!AStmt)
return StmtError();
@@ -6783,7 +7031,7 @@ StmtResult Sema::ActOnOpenMPDistributeDirective(
// In presence of clause 'collapse' with number of loops, it will
// define the nested loops number.
unsigned NestedLoopCount =
- CheckOpenMPLoop(OMPD_distribute, getCollapseNumberExpr(Clauses),
+ checkOpenMPLoop(OMPD_distribute, getCollapseNumberExpr(Clauses),
nullptr /*ordered not a clause on distribute*/, AStmt,
*this, *DSAStack, VarsWithImplicitDSA, B);
if (NestedLoopCount == 0)
@@ -6792,19 +7040,18 @@ StmtResult Sema::ActOnOpenMPDistributeDirective(
assert((CurContext->isDependentContext() || B.builtAll()) &&
"omp for loop exprs were not built");
- getCurFunction()->setHasBranchProtectedScope();
+ setFunctionHasBranchProtectedScope();
return OMPDistributeDirective::Create(Context, StartLoc, EndLoc,
NestedLoopCount, Clauses, AStmt, B);
}
StmtResult Sema::ActOnOpenMPDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
- SourceLocation EndLoc,
- llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA) {
+ SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
if (!AStmt)
return StmtError();
- CapturedStmt *CS = cast<CapturedStmt>(AStmt);
+ auto *CS = cast<CapturedStmt>(AStmt);
// 1.2.2 OpenMP Language Terminology
// Structured block - An executable statement with a single entry at the
// top and a single exit at the bottom.
@@ -6826,7 +7073,7 @@ StmtResult Sema::ActOnOpenMPDistributeParallelForDirective(
OMPLoopDirective::HelperExprs B;
// In presence of clause 'collapse' with number of loops, it will
// define the nested loops number.
- unsigned NestedLoopCount = CheckOpenMPLoop(
+ unsigned NestedLoopCount = checkOpenMPLoop(
OMPD_distribute_parallel_for, getCollapseNumberExpr(Clauses),
nullptr /*ordered not a clause on distribute*/, CS, *this, *DSAStack,
VarsWithImplicitDSA, B);
@@ -6836,7 +7083,7 @@ StmtResult Sema::ActOnOpenMPDistributeParallelForDirective(
assert((CurContext->isDependentContext() || B.builtAll()) &&
"omp for loop exprs were not built");
- getCurFunction()->setHasBranchProtectedScope();
+ setFunctionHasBranchProtectedScope();
return OMPDistributeParallelForDirective::Create(
Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B,
DSAStack->isCancelRegion());
@@ -6844,12 +7091,11 @@ StmtResult Sema::ActOnOpenMPDistributeParallelForDirective(
StmtResult Sema::ActOnOpenMPDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
- SourceLocation EndLoc,
- llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA) {
+ SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
if (!AStmt)
return StmtError();
- CapturedStmt *CS = cast<CapturedStmt>(AStmt);
+ auto *CS = cast<CapturedStmt>(AStmt);
// 1.2.2 OpenMP Language Terminology
// Structured block - An executable statement with a single entry at the
// top and a single exit at the bottom.
@@ -6871,7 +7117,7 @@ StmtResult Sema::ActOnOpenMPDistributeParallelForSimdDirective(
OMPLoopDirective::HelperExprs B;
// In presence of clause 'collapse' with number of loops, it will
// define the nested loops number.
- unsigned NestedLoopCount = CheckOpenMPLoop(
+ unsigned NestedLoopCount = checkOpenMPLoop(
OMPD_distribute_parallel_for_simd, getCollapseNumberExpr(Clauses),
nullptr /*ordered not a clause on distribute*/, CS, *this, *DSAStack,
VarsWithImplicitDSA, B);
@@ -6883,7 +7129,7 @@ StmtResult Sema::ActOnOpenMPDistributeParallelForSimdDirective(
if (!CurContext->isDependentContext()) {
// Finalize the clauses that need pre-built expressions for CodeGen.
- for (auto C : Clauses) {
+ for (OMPClause *C : Clauses) {
if (auto *LC = dyn_cast<OMPLinearClause>(C))
if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
B.NumIterations, *this, CurScope,
@@ -6895,19 +7141,18 @@ StmtResult Sema::ActOnOpenMPDistributeParallelForSimdDirective(
if (checkSimdlenSafelenSpecified(*this, Clauses))
return StmtError();
- getCurFunction()->setHasBranchProtectedScope();
+ setFunctionHasBranchProtectedScope();
return OMPDistributeParallelForSimdDirective::Create(
Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
}
StmtResult Sema::ActOnOpenMPDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
- SourceLocation EndLoc,
- llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA) {
+ SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
if (!AStmt)
return StmtError();
- CapturedStmt *CS = cast<CapturedStmt>(AStmt);
+ auto *CS = cast<CapturedStmt>(AStmt);
// 1.2.2 OpenMP Language Terminology
// Structured block - An executable statement with a single entry at the
// top and a single exit at the bottom.
@@ -6929,7 +7174,7 @@ StmtResult Sema::ActOnOpenMPDistributeSimdDirective(
// In presence of clause 'collapse' with number of loops, it will
// define the nested loops number.
unsigned NestedLoopCount =
- CheckOpenMPLoop(OMPD_distribute_simd, getCollapseNumberExpr(Clauses),
+ checkOpenMPLoop(OMPD_distribute_simd, getCollapseNumberExpr(Clauses),
nullptr /*ordered not a clause on distribute*/, CS, *this,
*DSAStack, VarsWithImplicitDSA, B);
if (NestedLoopCount == 0)
@@ -6940,7 +7185,7 @@ StmtResult Sema::ActOnOpenMPDistributeSimdDirective(
if (!CurContext->isDependentContext()) {
// Finalize the clauses that need pre-built expressions for CodeGen.
- for (auto C : Clauses) {
+ for (OMPClause *C : Clauses) {
if (auto *LC = dyn_cast<OMPLinearClause>(C))
if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
B.NumIterations, *this, CurScope,
@@ -6952,19 +7197,18 @@ StmtResult Sema::ActOnOpenMPDistributeSimdDirective(
if (checkSimdlenSafelenSpecified(*this, Clauses))
return StmtError();
- getCurFunction()->setHasBranchProtectedScope();
+ setFunctionHasBranchProtectedScope();
return OMPDistributeSimdDirective::Create(Context, StartLoc, EndLoc,
NestedLoopCount, Clauses, AStmt, B);
}
StmtResult Sema::ActOnOpenMPTargetParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
- SourceLocation EndLoc,
- llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA) {
+ SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
if (!AStmt)
return StmtError();
- CapturedStmt *CS = cast<CapturedStmt>(AStmt);
+ auto *CS = cast<CapturedStmt>(AStmt);
// 1.2.2 OpenMP Language Terminology
// Structured block - An executable statement with a single entry at the
// top and a single exit at the bottom.
@@ -6985,7 +7229,7 @@ StmtResult Sema::ActOnOpenMPTargetParallelForSimdDirective(
OMPLoopDirective::HelperExprs B;
// In presence of clause 'collapse' or 'ordered' with number of loops, it will
// define the nested loops number.
- unsigned NestedLoopCount = CheckOpenMPLoop(
+ unsigned NestedLoopCount = checkOpenMPLoop(
OMPD_target_parallel_for_simd, getCollapseNumberExpr(Clauses),
getOrderedNumberExpr(Clauses), CS, *this, *DSAStack,
VarsWithImplicitDSA, B);
@@ -6997,7 +7241,7 @@ StmtResult Sema::ActOnOpenMPTargetParallelForSimdDirective(
if (!CurContext->isDependentContext()) {
// Finalize the clauses that need pre-built expressions for CodeGen.
- for (auto C : Clauses) {
+ for (OMPClause *C : Clauses) {
if (auto *LC = dyn_cast<OMPLinearClause>(C))
if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
B.NumIterations, *this, CurScope,
@@ -7008,19 +7252,18 @@ StmtResult Sema::ActOnOpenMPTargetParallelForSimdDirective(
if (checkSimdlenSafelenSpecified(*this, Clauses))
return StmtError();
- getCurFunction()->setHasBranchProtectedScope();
+ setFunctionHasBranchProtectedScope();
return OMPTargetParallelForSimdDirective::Create(
Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
}
StmtResult Sema::ActOnOpenMPTargetSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
- SourceLocation EndLoc,
- llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA) {
+ SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
if (!AStmt)
return StmtError();
- CapturedStmt *CS = cast<CapturedStmt>(AStmt);
+ auto *CS = cast<CapturedStmt>(AStmt);
// 1.2.2 OpenMP Language Terminology
// Structured block - An executable statement with a single entry at the
// top and a single exit at the bottom.
@@ -7042,7 +7285,7 @@ StmtResult Sema::ActOnOpenMPTargetSimdDirective(
// In presence of clause 'collapse' with number of loops, it will define the
// nested loops number.
unsigned NestedLoopCount =
- CheckOpenMPLoop(OMPD_target_simd, getCollapseNumberExpr(Clauses),
+ checkOpenMPLoop(OMPD_target_simd, getCollapseNumberExpr(Clauses),
getOrderedNumberExpr(Clauses), CS, *this, *DSAStack,
VarsWithImplicitDSA, B);
if (NestedLoopCount == 0)
@@ -7053,7 +7296,7 @@ StmtResult Sema::ActOnOpenMPTargetSimdDirective(
if (!CurContext->isDependentContext()) {
// Finalize the clauses that need pre-built expressions for CodeGen.
- for (auto C : Clauses) {
+ for (OMPClause *C : Clauses) {
if (auto *LC = dyn_cast<OMPLinearClause>(C))
if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
B.NumIterations, *this, CurScope,
@@ -7065,19 +7308,18 @@ StmtResult Sema::ActOnOpenMPTargetSimdDirective(
if (checkSimdlenSafelenSpecified(*this, Clauses))
return StmtError();
- getCurFunction()->setHasBranchProtectedScope();
+ setFunctionHasBranchProtectedScope();
return OMPTargetSimdDirective::Create(Context, StartLoc, EndLoc,
NestedLoopCount, Clauses, AStmt, B);
}
StmtResult Sema::ActOnOpenMPTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
- SourceLocation EndLoc,
- llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA) {
+ SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
if (!AStmt)
return StmtError();
- CapturedStmt *CS = cast<CapturedStmt>(AStmt);
+ auto *CS = cast<CapturedStmt>(AStmt);
// 1.2.2 OpenMP Language Terminology
// Structured block - An executable statement with a single entry at the
// top and a single exit at the bottom.
@@ -7099,7 +7341,7 @@ StmtResult Sema::ActOnOpenMPTeamsDistributeDirective(
// In presence of clause 'collapse' with number of loops, it will
// define the nested loops number.
unsigned NestedLoopCount =
- CheckOpenMPLoop(OMPD_teams_distribute, getCollapseNumberExpr(Clauses),
+ checkOpenMPLoop(OMPD_teams_distribute, getCollapseNumberExpr(Clauses),
nullptr /*ordered not a clause on distribute*/, CS, *this,
*DSAStack, VarsWithImplicitDSA, B);
if (NestedLoopCount == 0)
@@ -7108,7 +7350,7 @@ StmtResult Sema::ActOnOpenMPTeamsDistributeDirective(
assert((CurContext->isDependentContext() || B.builtAll()) &&
"omp teams distribute loop exprs were not built");
- getCurFunction()->setHasBranchProtectedScope();
+ setFunctionHasBranchProtectedScope();
DSAStack->setParentTeamsRegionLoc(StartLoc);
@@ -7118,12 +7360,11 @@ StmtResult Sema::ActOnOpenMPTeamsDistributeDirective(
StmtResult Sema::ActOnOpenMPTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
- SourceLocation EndLoc,
- llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA) {
+ SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
if (!AStmt)
return StmtError();
- CapturedStmt *CS = cast<CapturedStmt>(AStmt);
+ auto *CS = cast<CapturedStmt>(AStmt);
// 1.2.2 OpenMP Language Terminology
// Structured block - An executable statement with a single entry at the
// top and a single exit at the bottom.
@@ -7146,7 +7387,7 @@ StmtResult Sema::ActOnOpenMPTeamsDistributeSimdDirective(
OMPLoopDirective::HelperExprs B;
// In presence of clause 'collapse' with number of loops, it will
// define the nested loops number.
- unsigned NestedLoopCount = CheckOpenMPLoop(
+ unsigned NestedLoopCount = checkOpenMPLoop(
OMPD_teams_distribute_simd, getCollapseNumberExpr(Clauses),
nullptr /*ordered not a clause on distribute*/, CS, *this, *DSAStack,
VarsWithImplicitDSA, B);
@@ -7159,7 +7400,7 @@ StmtResult Sema::ActOnOpenMPTeamsDistributeSimdDirective(
if (!CurContext->isDependentContext()) {
// Finalize the clauses that need pre-built expressions for CodeGen.
- for (auto C : Clauses) {
+ for (OMPClause *C : Clauses) {
if (auto *LC = dyn_cast<OMPLinearClause>(C))
if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
B.NumIterations, *this, CurScope,
@@ -7171,7 +7412,7 @@ StmtResult Sema::ActOnOpenMPTeamsDistributeSimdDirective(
if (checkSimdlenSafelenSpecified(*this, Clauses))
return StmtError();
- getCurFunction()->setHasBranchProtectedScope();
+ setFunctionHasBranchProtectedScope();
DSAStack->setParentTeamsRegionLoc(StartLoc);
@@ -7181,12 +7422,11 @@ StmtResult Sema::ActOnOpenMPTeamsDistributeSimdDirective(
StmtResult Sema::ActOnOpenMPTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
- SourceLocation EndLoc,
- llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA) {
+ SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
if (!AStmt)
return StmtError();
- CapturedStmt *CS = cast<CapturedStmt>(AStmt);
+ auto *CS = cast<CapturedStmt>(AStmt);
// 1.2.2 OpenMP Language Terminology
// Structured block - An executable statement with a single entry at the
// top and a single exit at the bottom.
@@ -7209,7 +7449,7 @@ StmtResult Sema::ActOnOpenMPTeamsDistributeParallelForSimdDirective(
OMPLoopDirective::HelperExprs B;
// In presence of clause 'collapse' with number of loops, it will
// define the nested loops number.
- auto NestedLoopCount = CheckOpenMPLoop(
+ unsigned NestedLoopCount = checkOpenMPLoop(
OMPD_teams_distribute_parallel_for_simd, getCollapseNumberExpr(Clauses),
nullptr /*ordered not a clause on distribute*/, CS, *this, *DSAStack,
VarsWithImplicitDSA, B);
@@ -7222,7 +7462,7 @@ StmtResult Sema::ActOnOpenMPTeamsDistributeParallelForSimdDirective(
if (!CurContext->isDependentContext()) {
// Finalize the clauses that need pre-built expressions for CodeGen.
- for (auto C : Clauses) {
+ for (OMPClause *C : Clauses) {
if (auto *LC = dyn_cast<OMPLinearClause>(C))
if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
B.NumIterations, *this, CurScope,
@@ -7234,7 +7474,7 @@ StmtResult Sema::ActOnOpenMPTeamsDistributeParallelForSimdDirective(
if (checkSimdlenSafelenSpecified(*this, Clauses))
return StmtError();
- getCurFunction()->setHasBranchProtectedScope();
+ setFunctionHasBranchProtectedScope();
DSAStack->setParentTeamsRegionLoc(StartLoc);
@@ -7244,12 +7484,11 @@ StmtResult Sema::ActOnOpenMPTeamsDistributeParallelForSimdDirective(
StmtResult Sema::ActOnOpenMPTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
- SourceLocation EndLoc,
- llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA) {
+ SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
if (!AStmt)
return StmtError();
- CapturedStmt *CS = cast<CapturedStmt>(AStmt);
+ auto *CS = cast<CapturedStmt>(AStmt);
// 1.2.2 OpenMP Language Terminology
// Structured block - An executable statement with a single entry at the
// top and a single exit at the bottom.
@@ -7272,7 +7511,7 @@ StmtResult Sema::ActOnOpenMPTeamsDistributeParallelForDirective(
OMPLoopDirective::HelperExprs B;
// In presence of clause 'collapse' with number of loops, it will
// define the nested loops number.
- unsigned NestedLoopCount = CheckOpenMPLoop(
+ unsigned NestedLoopCount = checkOpenMPLoop(
OMPD_teams_distribute_parallel_for, getCollapseNumberExpr(Clauses),
nullptr /*ordered not a clause on distribute*/, CS, *this, *DSAStack,
VarsWithImplicitDSA, B);
@@ -7283,7 +7522,7 @@ StmtResult Sema::ActOnOpenMPTeamsDistributeParallelForDirective(
assert((CurContext->isDependentContext() || B.builtAll()) &&
"omp for loop exprs were not built");
- getCurFunction()->setHasBranchProtectedScope();
+ setFunctionHasBranchProtectedScope();
DSAStack->setParentTeamsRegionLoc(StartLoc);
@@ -7299,7 +7538,7 @@ StmtResult Sema::ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses,
if (!AStmt)
return StmtError();
- CapturedStmt *CS = cast<CapturedStmt>(AStmt);
+ auto *CS = cast<CapturedStmt>(AStmt);
// 1.2.2 OpenMP Language Terminology
// Structured block - An executable statement with a single entry at the
// top and a single exit at the bottom.
@@ -7317,7 +7556,7 @@ StmtResult Sema::ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses,
// longjmp() and throw() must not violate the entry/exit criteria.
CS->getCapturedDecl()->setNothrow();
}
- getCurFunction()->setHasBranchProtectedScope();
+ setFunctionHasBranchProtectedScope();
return OMPTargetTeamsDirective::Create(Context, StartLoc, EndLoc, Clauses,
AStmt);
@@ -7325,12 +7564,11 @@ StmtResult Sema::ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses,
StmtResult Sema::ActOnOpenMPTargetTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
- SourceLocation EndLoc,
- llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA) {
+ SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
if (!AStmt)
return StmtError();
- CapturedStmt *CS = cast<CapturedStmt>(AStmt);
+ auto *CS = cast<CapturedStmt>(AStmt);
// 1.2.2 OpenMP Language Terminology
// Structured block - An executable statement with a single entry at the
// top and a single exit at the bottom.
@@ -7352,7 +7590,7 @@ StmtResult Sema::ActOnOpenMPTargetTeamsDistributeDirective(
OMPLoopDirective::HelperExprs B;
// In presence of clause 'collapse' with number of loops, it will
// define the nested loops number.
- auto NestedLoopCount = CheckOpenMPLoop(
+ unsigned NestedLoopCount = checkOpenMPLoop(
OMPD_target_teams_distribute, getCollapseNumberExpr(Clauses),
nullptr /*ordered not a clause on distribute*/, CS, *this, *DSAStack,
VarsWithImplicitDSA, B);
@@ -7362,33 +7600,42 @@ StmtResult Sema::ActOnOpenMPTargetTeamsDistributeDirective(
assert((CurContext->isDependentContext() || B.builtAll()) &&
"omp target teams distribute loop exprs were not built");
- getCurFunction()->setHasBranchProtectedScope();
+ setFunctionHasBranchProtectedScope();
return OMPTargetTeamsDistributeDirective::Create(
Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
}
StmtResult Sema::ActOnOpenMPTargetTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
- SourceLocation EndLoc,
- llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA) {
+ SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
if (!AStmt)
return StmtError();
- CapturedStmt *CS = cast<CapturedStmt>(AStmt);
+ auto *CS = cast<CapturedStmt>(AStmt);
// 1.2.2 OpenMP Language Terminology
// Structured block - An executable statement with a single entry at the
// top and a single exit at the bottom.
// The point of exit cannot be a branch out of the structured block.
// longjmp() and throw() must not violate the entry/exit criteria.
CS->getCapturedDecl()->setNothrow();
+ for (int ThisCaptureLevel =
+ getOpenMPCaptureLevels(OMPD_target_teams_distribute_parallel_for);
+ ThisCaptureLevel > 1; --ThisCaptureLevel) {
+ CS = cast<CapturedStmt>(CS->getCapturedStmt());
+ // 1.2.2 OpenMP Language Terminology
+ // Structured block - An executable statement with a single entry at the
+ // top and a single exit at the bottom.
+ // The point of exit cannot be a branch out of the structured block.
+ // longjmp() and throw() must not violate the entry/exit criteria.
+ CS->getCapturedDecl()->setNothrow();
+ }
OMPLoopDirective::HelperExprs B;
// In presence of clause 'collapse' with number of loops, it will
// define the nested loops number.
- auto NestedLoopCount = CheckOpenMPLoop(
- OMPD_target_teams_distribute_parallel_for,
- getCollapseNumberExpr(Clauses),
- nullptr /*ordered not a clause on distribute*/, AStmt, *this, *DSAStack,
+ unsigned NestedLoopCount = checkOpenMPLoop(
+ OMPD_target_teams_distribute_parallel_for, getCollapseNumberExpr(Clauses),
+ nullptr /*ordered not a clause on distribute*/, CS, *this, *DSAStack,
VarsWithImplicitDSA, B);
if (NestedLoopCount == 0)
return StmtError();
@@ -7396,7 +7643,18 @@ StmtResult Sema::ActOnOpenMPTargetTeamsDistributeParallelForDirective(
assert((CurContext->isDependentContext() || B.builtAll()) &&
"omp target teams distribute parallel for loop exprs were not built");
- getCurFunction()->setHasBranchProtectedScope();
+ if (!CurContext->isDependentContext()) {
+ // Finalize the clauses that need pre-built expressions for CodeGen.
+ for (OMPClause *C : Clauses) {
+ if (auto *LC = dyn_cast<OMPLinearClause>(C))
+ if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
+ B.NumIterations, *this, CurScope,
+ DSAStack))
+ return StmtError();
+ }
+ }
+
+ setFunctionHasBranchProtectedScope();
return OMPTargetTeamsDistributeParallelForDirective::Create(
Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B,
DSAStack->isCancelRegion());
@@ -7404,27 +7662,37 @@ StmtResult Sema::ActOnOpenMPTargetTeamsDistributeParallelForDirective(
StmtResult Sema::ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
- SourceLocation EndLoc,
- llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA) {
+ SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
if (!AStmt)
return StmtError();
- CapturedStmt *CS = cast<CapturedStmt>(AStmt);
+ auto *CS = cast<CapturedStmt>(AStmt);
// 1.2.2 OpenMP Language Terminology
// Structured block - An executable statement with a single entry at the
// top and a single exit at the bottom.
// The point of exit cannot be a branch out of the structured block.
// longjmp() and throw() must not violate the entry/exit criteria.
CS->getCapturedDecl()->setNothrow();
+ for (int ThisCaptureLevel = getOpenMPCaptureLevels(
+ OMPD_target_teams_distribute_parallel_for_simd);
+ ThisCaptureLevel > 1; --ThisCaptureLevel) {
+ CS = cast<CapturedStmt>(CS->getCapturedStmt());
+ // 1.2.2 OpenMP Language Terminology
+ // Structured block - An executable statement with a single entry at the
+ // top and a single exit at the bottom.
+ // The point of exit cannot be a branch out of the structured block.
+ // longjmp() and throw() must not violate the entry/exit criteria.
+ CS->getCapturedDecl()->setNothrow();
+ }
OMPLoopDirective::HelperExprs B;
// In presence of clause 'collapse' with number of loops, it will
// define the nested loops number.
- auto NestedLoopCount = CheckOpenMPLoop(
- OMPD_target_teams_distribute_parallel_for_simd,
- getCollapseNumberExpr(Clauses),
- nullptr /*ordered not a clause on distribute*/, AStmt, *this, *DSAStack,
- VarsWithImplicitDSA, B);
+ unsigned NestedLoopCount =
+ checkOpenMPLoop(OMPD_target_teams_distribute_parallel_for_simd,
+ getCollapseNumberExpr(Clauses),
+ nullptr /*ordered not a clause on distribute*/, CS, *this,
+ *DSAStack, VarsWithImplicitDSA, B);
if (NestedLoopCount == 0)
return StmtError();
@@ -7434,7 +7702,7 @@ StmtResult Sema::ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective(
if (!CurContext->isDependentContext()) {
// Finalize the clauses that need pre-built expressions for CodeGen.
- for (auto C : Clauses) {
+ for (OMPClause *C : Clauses) {
if (auto *LC = dyn_cast<OMPLinearClause>(C))
if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
B.NumIterations, *this, CurScope,
@@ -7446,15 +7714,14 @@ StmtResult Sema::ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective(
if (checkSimdlenSafelenSpecified(*this, Clauses))
return StmtError();
- getCurFunction()->setHasBranchProtectedScope();
+ setFunctionHasBranchProtectedScope();
return OMPTargetTeamsDistributeParallelForSimdDirective::Create(
Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
}
StmtResult Sema::ActOnOpenMPTargetTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
- SourceLocation EndLoc,
- llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA) {
+ SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
if (!AStmt)
return StmtError();
@@ -7480,7 +7747,7 @@ StmtResult Sema::ActOnOpenMPTargetTeamsDistributeSimdDirective(
OMPLoopDirective::HelperExprs B;
// In presence of clause 'collapse' with number of loops, it will
// define the nested loops number.
- auto NestedLoopCount = CheckOpenMPLoop(
+ unsigned NestedLoopCount = checkOpenMPLoop(
OMPD_target_teams_distribute_simd, getCollapseNumberExpr(Clauses),
nullptr /*ordered not a clause on distribute*/, CS, *this, *DSAStack,
VarsWithImplicitDSA, B);
@@ -7492,7 +7759,7 @@ StmtResult Sema::ActOnOpenMPTargetTeamsDistributeSimdDirective(
if (!CurContext->isDependentContext()) {
// Finalize the clauses that need pre-built expressions for CodeGen.
- for (auto C : Clauses) {
+ for (OMPClause *C : Clauses) {
if (auto *LC = dyn_cast<OMPLinearClause>(C))
if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
B.NumIterations, *this, CurScope,
@@ -7504,7 +7771,7 @@ StmtResult Sema::ActOnOpenMPTargetTeamsDistributeSimdDirective(
if (checkSimdlenSafelenSpecified(*this, Clauses))
return StmtError();
- getCurFunction()->setHasBranchProtectedScope();
+ setFunctionHasBranchProtectedScope();
return OMPTargetTeamsDistributeSimdDirective::Create(
Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
}
@@ -7612,13 +7879,18 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_target_parallel:
case OMPD_target_parallel_for:
case OMPD_target_parallel_for_simd:
- case OMPD_target_teams_distribute_parallel_for:
- case OMPD_target_teams_distribute_parallel_for_simd:
// If this clause applies to the nested 'parallel' region, capture within
// the 'target' region, otherwise do not capture.
if (NameModifier == OMPD_unknown || NameModifier == OMPD_parallel)
CaptureRegion = OMPD_target;
break;
+ case OMPD_target_teams_distribute_parallel_for:
+ case OMPD_target_teams_distribute_parallel_for_simd:
+ // If this clause applies to the nested 'parallel' region, capture within
+ // the 'teams' region, otherwise do not capture.
+ if (NameModifier == OMPD_unknown || NameModifier == OMPD_parallel)
+ CaptureRegion = OMPD_teams;
+ break;
case OMPD_teams_distribute_parallel_for:
case OMPD_teams_distribute_parallel_for_simd:
CaptureRegion = OMPD_teams;
@@ -7682,12 +7954,12 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_target_parallel:
case OMPD_target_parallel_for:
case OMPD_target_parallel_for_simd:
- case OMPD_target_teams_distribute_parallel_for:
- case OMPD_target_teams_distribute_parallel_for_simd:
CaptureRegion = OMPD_target;
break;
case OMPD_teams_distribute_parallel_for:
case OMPD_teams_distribute_parallel_for_simd:
+ case OMPD_target_teams_distribute_parallel_for:
+ case OMPD_target_teams_distribute_parallel_for_simd:
CaptureRegion = OMPD_teams;
break;
case OMPD_parallel:
@@ -7870,20 +8142,16 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
break;
case OMPC_schedule:
switch (DKind) {
- case OMPD_target_parallel_for:
- case OMPD_target_parallel_for_simd:
- case OMPD_target_teams_distribute_parallel_for:
- case OMPD_target_teams_distribute_parallel_for_simd:
- CaptureRegion = OMPD_target;
- break;
- case OMPD_teams_distribute_parallel_for:
- case OMPD_teams_distribute_parallel_for_simd:
- CaptureRegion = OMPD_teams;
- break;
case OMPD_parallel_for:
case OMPD_parallel_for_simd:
case OMPD_distribute_parallel_for:
case OMPD_distribute_parallel_for_simd:
+ case OMPD_teams_distribute_parallel_for:
+ case OMPD_teams_distribute_parallel_for_simd:
+ case OMPD_target_parallel_for:
+ case OMPD_target_parallel_for_simd:
+ case OMPD_target_teams_distribute_parallel_for:
+ case OMPD_target_teams_distribute_parallel_for_simd:
CaptureRegion = OMPD_parallel;
break;
case OMPD_for:
@@ -7941,18 +8209,14 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_teams_distribute_parallel_for_simd:
case OMPD_teams_distribute:
case OMPD_teams_distribute_simd:
- CaptureRegion = OMPD_teams;
- break;
case OMPD_target_teams_distribute_parallel_for:
case OMPD_target_teams_distribute_parallel_for_simd:
case OMPD_target_teams_distribute:
case OMPD_target_teams_distribute_simd:
- CaptureRegion = OMPD_target;
+ CaptureRegion = OMPD_teams;
break;
case OMPD_distribute_parallel_for:
case OMPD_distribute_parallel_for_simd:
- CaptureRegion = OMPD_parallel;
- break;
case OMPD_distribute:
case OMPD_distribute_simd:
// Do not capture thread_limit-clause expressions.
@@ -8007,19 +8271,19 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_target_update:
case OMPD_target_enter_data:
case OMPD_target_exit_data:
- CaptureRegion = OMPD_task;
- break;
+ case OMPD_target:
+ case OMPD_target_simd:
case OMPD_target_teams:
+ case OMPD_target_parallel:
case OMPD_target_teams_distribute:
case OMPD_target_teams_distribute_simd:
+ case OMPD_target_parallel_for:
+ case OMPD_target_parallel_for_simd:
case OMPD_target_teams_distribute_parallel_for:
case OMPD_target_teams_distribute_parallel_for_simd:
+ CaptureRegion = OMPD_task;
+ break;
case OMPD_target_data:
- case OMPD_target:
- case OMPD_target_simd:
- case OMPD_target_parallel:
- case OMPD_target_parallel_for:
- case OMPD_target_parallel_for_simd:
// Do not capture device-clause expressions.
break;
case OMPD_teams_distribute_parallel_for:
@@ -8137,7 +8401,7 @@ OMPClause *Sema::ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier,
getOpenMPCaptureRegionForClause(DKind, OMPC_if, NameModifier);
if (CaptureRegion != OMPD_unknown && !CurContext->isDependentContext()) {
ValExpr = MakeFullExpr(ValExpr).get();
- llvm::MapVector<Expr *, DeclRefExpr *> Captures;
+ llvm::MapVector<const Expr *, DeclRefExpr *> Captures;
ValExpr = tryBuildCapture(*this, ValExpr, Captures).get();
HelperValStmt = buildPreInits(Context, Captures);
}
@@ -8209,7 +8473,7 @@ ExprResult Sema::PerformOpenMPImplicitIntegerConversion(SourceLocation Loc,
return PerformContextualImplicitConversion(Loc, Op, ConvertDiagnoser);
}
-static bool IsNonNegativeIntegerValue(Expr *&ValExpr, Sema &SemaRef,
+static bool isNonNegativeIntegerValue(Expr *&ValExpr, Sema &SemaRef,
OpenMPClauseKind CKind,
bool StrictlyPositive) {
if (!ValExpr->isTypeDependent() && !ValExpr->isValueDependent() &&
@@ -8245,7 +8509,7 @@ OMPClause *Sema::ActOnOpenMPNumThreadsClause(Expr *NumThreads,
// OpenMP [2.5, Restrictions]
// The num_threads expression must evaluate to a positive integer value.
- if (!IsNonNegativeIntegerValue(ValExpr, *this, OMPC_num_threads,
+ if (!isNonNegativeIntegerValue(ValExpr, *this, OMPC_num_threads,
/*StrictlyPositive=*/true))
return nullptr;
@@ -8254,7 +8518,7 @@ OMPClause *Sema::ActOnOpenMPNumThreadsClause(Expr *NumThreads,
getOpenMPCaptureRegionForClause(DKind, OMPC_num_threads);
if (CaptureRegion != OMPD_unknown && !CurContext->isDependentContext()) {
ValExpr = MakeFullExpr(ValExpr).get();
- llvm::MapVector<Expr *, DeclRefExpr *> Captures;
+ llvm::MapVector<const Expr *, DeclRefExpr *> Captures;
ValExpr = tryBuildCapture(*this, ValExpr, Captures).get();
HelperValStmt = buildPreInits(Context, Captures);
}
@@ -8352,8 +8616,9 @@ OMPClause *Sema::ActOnOpenMPOrderedClause(SourceLocation StartLoc,
if (NumForLoopsResult.isInvalid())
return nullptr;
NumForLoops = NumForLoopsResult.get();
- } else
+ } else {
NumForLoops = nullptr;
+ }
DSAStack->setOrderedRegion(/*IsOrdered=*/true, NumForLoops);
return new (Context)
OMPOrderedClause(NumForLoops, StartLoc, LParenLoc, EndLoc);
@@ -8431,24 +8696,23 @@ OMPClause *Sema::ActOnOpenMPSimpleClause(
static std::string
getListOfPossibleValues(OpenMPClauseKind K, unsigned First, unsigned Last,
ArrayRef<unsigned> Exclude = llvm::None) {
- std::string Values;
+ SmallString<256> Buffer;
+ llvm::raw_svector_ostream Out(Buffer);
unsigned Bound = Last >= 2 ? Last - 2 : 0;
unsigned Skipped = Exclude.size();
auto S = Exclude.begin(), E = Exclude.end();
- for (unsigned i = First; i < Last; ++i) {
- if (std::find(S, E, i) != E) {
+ for (unsigned I = First; I < Last; ++I) {
+ if (std::find(S, E, I) != E) {
--Skipped;
continue;
}
- Values += "'";
- Values += getOpenMPSimpleClauseTypeName(K, i);
- Values += "'";
- if (i == Bound - Skipped)
- Values += " or ";
- else if (i != Bound + 1 - Skipped)
- Values += ", ";
+ Out << "'" << getOpenMPSimpleClauseTypeName(K, I) << "'";
+ if (I == Bound - Skipped)
+ Out << " or ";
+ else if (I != Bound + 1 - Skipped)
+ Out << ", ";
}
- return Values;
+ return Out.str();
}
OMPClause *Sema::ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind,
@@ -8682,7 +8946,7 @@ OMPClause *Sema::ActOnOpenMPScheduleClause(
OMPD_unknown &&
!CurContext->isDependentContext()) {
ValExpr = MakeFullExpr(ValExpr).get();
- llvm::MapVector<Expr *, DeclRefExpr *> Captures;
+ llvm::MapVector<const Expr *, DeclRefExpr *> Captures;
ValExpr = tryBuildCapture(*this, ValExpr, Captures).get();
HelperValStmt = buildPreInits(Context, Captures);
}
@@ -8986,13 +9250,13 @@ getPrivateItem(Sema &S, Expr *&RefExpr, SourceLocation &ELoc,
} IsArrayExpr = NoArrayExpr;
if (AllowArraySection) {
if (auto *ASE = dyn_cast_or_null<ArraySubscriptExpr>(RefExpr)) {
- auto *Base = ASE->getBase()->IgnoreParenImpCasts();
+ Expr *Base = ASE->getBase()->IgnoreParenImpCasts();
while (auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
Base = TempASE->getBase()->IgnoreParenImpCasts();
RefExpr = Base;
IsArrayExpr = ArraySubscript;
} else if (auto *OASE = dyn_cast_or_null<OMPArraySectionExpr>(RefExpr)) {
- auto *Base = OASE->getBase()->IgnoreParenImpCasts();
+ Expr *Base = OASE->getBase()->IgnoreParenImpCasts();
while (auto *TempOASE = dyn_cast<OMPArraySectionExpr>(Base))
Base = TempOASE->getBase()->IgnoreParenImpCasts();
while (auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
@@ -9010,10 +9274,10 @@ getPrivateItem(Sema &S, Expr *&RefExpr, SourceLocation &ELoc,
(S.getCurrentThisType().isNull() || !ME ||
!isa<CXXThisExpr>(ME->getBase()->IgnoreParenImpCasts()) ||
!isa<FieldDecl>(ME->getMemberDecl()))) {
- if (IsArrayExpr != NoArrayExpr)
+ if (IsArrayExpr != NoArrayExpr) {
S.Diag(ELoc, diag::err_omp_expected_base_var_name) << IsArrayExpr
<< ERange;
- else {
+ } else {
S.Diag(ELoc,
AllowArraySection
? diag::err_omp_expected_var_name_member_expr_or_array_item
@@ -9032,7 +9296,7 @@ OMPClause *Sema::ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
SourceLocation EndLoc) {
SmallVector<Expr *, 8> Vars;
SmallVector<Expr *, 8> PrivateCopies;
- for (auto &RefExpr : VarList) {
+ for (Expr *RefExpr : VarList) {
assert(RefExpr && "NULL expr in OpenMP private clause.");
SourceLocation ELoc;
SourceRange ERange;
@@ -9064,15 +9328,15 @@ OMPClause *Sema::ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
// listed below. For these exceptions only, listing a predetermined
// variable in a data-sharing attribute clause is allowed and overrides
// the variable's predetermined data-sharing attributes.
- DSAStackTy::DSAVarData DVar = DSAStack->getTopDSA(D, false);
+ DSAStackTy::DSAVarData DVar = DSAStack->getTopDSA(D, /*FromParent=*/false);
if (DVar.CKind != OMPC_unknown && DVar.CKind != OMPC_private) {
Diag(ELoc, diag::err_omp_wrong_dsa) << getOpenMPClauseName(DVar.CKind)
<< getOpenMPClauseName(OMPC_private);
- ReportOriginalDSA(*this, DSAStack, D, DVar);
+ reportOriginalDsa(*this, DSAStack, D, DVar);
continue;
}
- auto CurrDir = DSAStack->getCurrentDirective();
+ OpenMPDirectiveKind CurrDir = DSAStack->getCurrentDirective();
// Variably modified types are not supported for tasks.
if (!Type->isAnyPointerType() && Type->isVariablyModifiedType() &&
isOpenMPTaskingDirective(CurrDir)) {
@@ -9091,14 +9355,7 @@ OMPClause *Sema::ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
// OpenMP 4.5 [2.15.5.1, Restrictions, p.3]
// A list item cannot appear in both a map clause and a data-sharing
// attribute clause on the same construct
- if (CurrDir == OMPD_target || CurrDir == OMPD_target_parallel ||
- CurrDir == OMPD_target_teams ||
- CurrDir == OMPD_target_teams_distribute ||
- CurrDir == OMPD_target_teams_distribute_parallel_for ||
- CurrDir == OMPD_target_teams_distribute_parallel_for_simd ||
- CurrDir == OMPD_target_teams_distribute_simd ||
- CurrDir == OMPD_target_parallel_for_simd ||
- CurrDir == OMPD_target_parallel_for) {
+ if (isOpenMPTargetExecutionDirective(CurrDir)) {
OpenMPClauseKind ConflictKind;
if (DSAStack->checkMappableExprComponentListsForDecl(
VD, /*CurrentRegionOnly=*/true,
@@ -9111,7 +9368,7 @@ OMPClause *Sema::ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
<< getOpenMPClauseName(OMPC_private)
<< getOpenMPClauseName(ConflictKind)
<< getOpenMPDirectiveName(CurrDir);
- ReportOriginalDSA(*this, DSAStack, D, DVar);
+ reportOriginalDsa(*this, DSAStack, D, DVar);
continue;
}
}
@@ -9126,12 +9383,14 @@ OMPClause *Sema::ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
// IdResolver, so the code in the OpenMP region uses original variable for
// proper diagnostics.
Type = Type.getUnqualifiedType();
- auto VDPrivate = buildVarDecl(*this, ELoc, Type, D->getName(),
- D->hasAttrs() ? &D->getAttrs() : nullptr);
+ VarDecl *VDPrivate =
+ buildVarDecl(*this, ELoc, Type, D->getName(),
+ D->hasAttrs() ? &D->getAttrs() : nullptr,
+ VD ? cast<DeclRefExpr>(SimpleRefExpr) : nullptr);
ActOnUninitializedDecl(VDPrivate);
if (VDPrivate->isInvalidDecl())
continue;
- auto VDPrivateRefExpr = buildDeclRefExpr(
+ DeclRefExpr *VDPrivateRefExpr = buildDeclRefExpr(
*this, VDPrivate, RefExpr->getType().getUnqualifiedType(), ELoc);
DeclRefExpr *Ref = nullptr;
@@ -9156,7 +9415,7 @@ class DiagsUninitializedSeveretyRAII {
private:
DiagnosticsEngine &Diags;
SourceLocation SavedLoc;
- bool IsIgnored;
+ bool IsIgnored = false;
public:
DiagsUninitializedSeveretyRAII(DiagnosticsEngine &Diags, SourceLocation Loc,
@@ -9184,9 +9443,9 @@ OMPClause *Sema::ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
SmallVector<Decl *, 4> ExprCaptures;
bool IsImplicitClause =
StartLoc.isInvalid() && LParenLoc.isInvalid() && EndLoc.isInvalid();
- auto ImplicitClauseLoc = DSAStack->getConstructLoc();
+ SourceLocation ImplicitClauseLoc = DSAStack->getConstructLoc();
- for (auto &RefExpr : VarList) {
+ for (Expr *RefExpr : VarList) {
assert(RefExpr && "NULL expr in OpenMP firstprivate clause.");
SourceLocation ELoc;
SourceRange ERange;
@@ -9218,12 +9477,13 @@ OMPClause *Sema::ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
// A variable of class type (or array thereof) that appears in a private
// clause requires an accessible, unambiguous copy constructor for the
// class type.
- auto ElemType = Context.getBaseElementType(Type).getNonReferenceType();
+ QualType ElemType = Context.getBaseElementType(Type).getNonReferenceType();
// If an implicit firstprivate variable found it was checked already.
DSAStackTy::DSAVarData TopDVar;
if (!IsImplicitClause) {
- DSAStackTy::DSAVarData DVar = DSAStack->getTopDSA(D, false);
+ DSAStackTy::DSAVarData DVar =
+ DSAStack->getTopDSA(D, /*FromParent=*/false);
TopDVar = DVar;
OpenMPDirectiveKind CurrDir = DSAStack->getCurrentDirective();
bool IsConstant = ElemType.isConstant(Context);
@@ -9241,7 +9501,7 @@ OMPClause *Sema::ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
Diag(ELoc, diag::err_omp_wrong_dsa)
<< getOpenMPClauseName(DVar.CKind)
<< getOpenMPClauseName(OMPC_firstprivate);
- ReportOriginalDSA(*this, DSAStack, D, DVar);
+ reportOriginalDsa(*this, DSAStack, D, DVar);
continue;
}
@@ -9261,7 +9521,7 @@ OMPClause *Sema::ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
Diag(ELoc, diag::err_omp_wrong_dsa)
<< getOpenMPClauseName(DVar.CKind)
<< getOpenMPClauseName(OMPC_firstprivate);
- ReportOriginalDSA(*this, DSAStack, D, DVar);
+ reportOriginalDsa(*this, DSAStack, D, DVar);
continue;
}
@@ -9292,7 +9552,7 @@ OMPClause *Sema::ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
Diag(ELoc, diag::err_omp_required_access)
<< getOpenMPClauseName(OMPC_firstprivate)
<< getOpenMPClauseName(OMPC_shared);
- ReportOriginalDSA(*this, DSAStack, D, DVar);
+ reportOriginalDsa(*this, DSAStack, D, DVar);
continue;
}
}
@@ -9309,8 +9569,8 @@ OMPClause *Sema::ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
// from the worksharing construct.
if (isOpenMPTaskingDirective(CurrDir)) {
DVar = DSAStack->hasInnermostDSA(
- D, [](OpenMPClauseKind C) -> bool { return C == OMPC_reduction; },
- [](OpenMPDirectiveKind K) -> bool {
+ D, [](OpenMPClauseKind C) { return C == OMPC_reduction; },
+ [](OpenMPDirectiveKind K) {
return isOpenMPParallelDirective(K) ||
isOpenMPWorksharingDirective(K) ||
isOpenMPTeamsDirective(K);
@@ -9322,7 +9582,7 @@ OMPClause *Sema::ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
isOpenMPTeamsDirective(DVar.DKind))) {
Diag(ELoc, diag::err_omp_parallel_reduction_in_task_firstprivate)
<< getOpenMPDirectiveName(DVar.DKind);
- ReportOriginalDSA(*this, DSAStack, D, DVar);
+ reportOriginalDsa(*this, DSAStack, D, DVar);
continue;
}
}
@@ -9334,8 +9594,9 @@ OMPClause *Sema::ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
OpenMPClauseKind ConflictKind;
if (DSAStack->checkMappableExprComponentListsForDecl(
VD, /*CurrentRegionOnly=*/true,
- [&](OMPClauseMappableExprCommon::MappableExprComponentListRef,
- OpenMPClauseKind WhereFoundClauseKind) -> bool {
+ [&ConflictKind](
+ OMPClauseMappableExprCommon::MappableExprComponentListRef,
+ OpenMPClauseKind WhereFoundClauseKind) {
ConflictKind = WhereFoundClauseKind;
return true;
})) {
@@ -9343,7 +9604,7 @@ OMPClause *Sema::ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
<< getOpenMPClauseName(OMPC_firstprivate)
<< getOpenMPClauseName(ConflictKind)
<< getOpenMPDirectiveName(DSAStack->getCurrentDirective());
- ReportOriginalDSA(*this, DSAStack, D, DVar);
+ reportOriginalDsa(*this, DSAStack, D, DVar);
continue;
}
}
@@ -9365,8 +9626,10 @@ OMPClause *Sema::ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
}
Type = Type.getUnqualifiedType();
- auto VDPrivate = buildVarDecl(*this, ELoc, Type, D->getName(),
- D->hasAttrs() ? &D->getAttrs() : nullptr);
+ VarDecl *VDPrivate =
+ buildVarDecl(*this, ELoc, Type, D->getName(),
+ D->hasAttrs() ? &D->getAttrs() : nullptr,
+ VD ? cast<DeclRefExpr>(SimpleRefExpr) : nullptr);
// Generate helper private variable and initialize it with the value of the
// original variable. The address of the original variable is replaced by
// the address of the new private variable in the CodeGen. This new variable
@@ -9376,13 +9639,13 @@ OMPClause *Sema::ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
// For arrays generate initializer for single element and replace it by the
// original array element in CodeGen.
if (Type->isArrayType()) {
- auto VDInit =
+ VarDecl *VDInit =
buildVarDecl(*this, RefExpr->getExprLoc(), ElemType, D->getName());
VDInitRefExpr = buildDeclRefExpr(*this, VDInit, ElemType, ELoc);
- auto Init = DefaultLvalueConversion(VDInitRefExpr).get();
+ Expr *Init = DefaultLvalueConversion(VDInitRefExpr).get();
ElemType = ElemType.getUnqualifiedType();
- auto *VDInitTemp = buildVarDecl(*this, RefExpr->getExprLoc(), ElemType,
- ".firstprivate.temp");
+ VarDecl *VDInitTemp = buildVarDecl(*this, RefExpr->getExprLoc(), ElemType,
+ ".firstprivate.temp");
InitializedEntity Entity =
InitializedEntity::InitializeVariable(VDInitTemp);
InitializationKind Kind = InitializationKind::CreateCopy(ELoc, ELoc);
@@ -9396,8 +9659,8 @@ OMPClause *Sema::ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
// Remove temp variable declaration.
Context.Deallocate(VDInitTemp);
} else {
- auto *VDInit = buildVarDecl(*this, RefExpr->getExprLoc(), Type,
- ".firstprivate.temp");
+ VarDecl *VDInit = buildVarDecl(*this, RefExpr->getExprLoc(), Type,
+ ".firstprivate.temp");
VDInitRefExpr = buildDeclRefExpr(*this, VDInit, RefExpr->getType(),
RefExpr->getExprLoc());
AddInitializerToDecl(VDPrivate,
@@ -9412,16 +9675,16 @@ OMPClause *Sema::ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
continue;
}
CurContext->addDecl(VDPrivate);
- auto VDPrivateRefExpr = buildDeclRefExpr(
+ DeclRefExpr *VDPrivateRefExpr = buildDeclRefExpr(
*this, VDPrivate, RefExpr->getType().getUnqualifiedType(),
RefExpr->getExprLoc());
DeclRefExpr *Ref = nullptr;
if (!VD && !CurContext->isDependentContext()) {
- if (TopDVar.CKind == OMPC_lastprivate)
+ if (TopDVar.CKind == OMPC_lastprivate) {
Ref = TopDVar.PrivateCopy;
- else {
+ } else {
Ref = buildCapture(*this, D, SimpleRefExpr, /*WithInit=*/true);
- if (!IsOpenMPCapturedDecl(D))
+ if (!isOpenMPCapturedDecl(D))
ExprCaptures.push_back(Ref->getDecl());
}
}
@@ -9451,7 +9714,7 @@ OMPClause *Sema::ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList,
SmallVector<Expr *, 8> AssignmentOps;
SmallVector<Decl *, 4> ExprCaptures;
SmallVector<Expr *, 4> ExprPostUpdates;
- for (auto &RefExpr : VarList) {
+ for (Expr *RefExpr : VarList) {
assert(RefExpr && "NULL expr in OpenMP lastprivate clause.");
SourceLocation ELoc;
SourceRange ERange;
@@ -9488,7 +9751,7 @@ OMPClause *Sema::ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList,
// OpenMP 4.5 [2.10.8, Distribute Construct, p.3]
// A list item may appear in a firstprivate or lastprivate clause but not
// both.
- DSAStackTy::DSAVarData DVar = DSAStack->getTopDSA(D, false);
+ DSAStackTy::DSAVarData DVar = DSAStack->getTopDSA(D, /*FromParent=*/false);
if (DVar.CKind != OMPC_unknown && DVar.CKind != OMPC_lastprivate &&
(isOpenMPDistributeDirective(CurrDir) ||
DVar.CKind != OMPC_firstprivate) &&
@@ -9496,7 +9759,7 @@ OMPClause *Sema::ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList,
Diag(ELoc, diag::err_omp_wrong_dsa)
<< getOpenMPClauseName(DVar.CKind)
<< getOpenMPClauseName(OMPC_lastprivate);
- ReportOriginalDSA(*this, DSAStack, D, DVar);
+ reportOriginalDsa(*this, DSAStack, D, DVar);
continue;
}
@@ -9515,7 +9778,7 @@ OMPClause *Sema::ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList,
Diag(ELoc, diag::err_omp_required_access)
<< getOpenMPClauseName(OMPC_lastprivate)
<< getOpenMPClauseName(OMPC_shared);
- ReportOriginalDSA(*this, DSAStack, D, DVar);
+ reportOriginalDsa(*this, DSAStack, D, DVar);
continue;
}
}
@@ -9529,19 +9792,19 @@ OMPClause *Sema::ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList,
// lastprivate clause requires an accessible, unambiguous copy assignment
// operator for the class type.
Type = Context.getBaseElementType(Type).getNonReferenceType();
- auto *SrcVD = buildVarDecl(*this, ERange.getBegin(),
- Type.getUnqualifiedType(), ".lastprivate.src",
- D->hasAttrs() ? &D->getAttrs() : nullptr);
- auto *PseudoSrcExpr =
+ VarDecl *SrcVD = buildVarDecl(*this, ERange.getBegin(),
+ Type.getUnqualifiedType(), ".lastprivate.src",
+ D->hasAttrs() ? &D->getAttrs() : nullptr);
+ DeclRefExpr *PseudoSrcExpr =
buildDeclRefExpr(*this, SrcVD, Type.getUnqualifiedType(), ELoc);
- auto *DstVD =
+ VarDecl *DstVD =
buildVarDecl(*this, ERange.getBegin(), Type, ".lastprivate.dst",
D->hasAttrs() ? &D->getAttrs() : nullptr);
- auto *PseudoDstExpr = buildDeclRefExpr(*this, DstVD, Type, ELoc);
+ DeclRefExpr *PseudoDstExpr = buildDeclRefExpr(*this, DstVD, Type, ELoc);
// For arrays generate assignment operation for single element and replace
// it by the original array element in CodeGen.
- auto AssignmentOp = BuildBinOp(/*S=*/nullptr, ELoc, BO_Assign,
- PseudoDstExpr, PseudoSrcExpr);
+ ExprResult AssignmentOp = BuildBinOp(/*S=*/nullptr, ELoc, BO_Assign,
+ PseudoDstExpr, PseudoSrcExpr);
if (AssignmentOp.isInvalid())
continue;
AssignmentOp = ActOnFinishFullExpr(AssignmentOp.get(), ELoc,
@@ -9551,15 +9814,15 @@ OMPClause *Sema::ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList,
DeclRefExpr *Ref = nullptr;
if (!VD && !CurContext->isDependentContext()) {
- if (TopDVar.CKind == OMPC_firstprivate)
+ if (TopDVar.CKind == OMPC_firstprivate) {
Ref = TopDVar.PrivateCopy;
- else {
+ } else {
Ref = buildCapture(*this, D, SimpleRefExpr, /*WithInit=*/false);
- if (!IsOpenMPCapturedDecl(D))
+ if (!isOpenMPCapturedDecl(D))
ExprCaptures.push_back(Ref->getDecl());
}
if (TopDVar.CKind == OMPC_firstprivate ||
- (!IsOpenMPCapturedDecl(D) &&
+ (!isOpenMPCapturedDecl(D) &&
Ref->getDecl()->hasAttr<OMPCaptureNoInitAttr>())) {
ExprResult RefRes = DefaultLvalueConversion(Ref);
if (!RefRes.isUsable())
@@ -9596,7 +9859,7 @@ OMPClause *Sema::ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList,
SourceLocation LParenLoc,
SourceLocation EndLoc) {
SmallVector<Expr *, 8> Vars;
- for (auto &RefExpr : VarList) {
+ for (Expr *RefExpr : VarList) {
assert(RefExpr && "NULL expr in OpenMP lastprivate clause.");
SourceLocation ELoc;
SourceRange ERange;
@@ -9618,17 +9881,17 @@ OMPClause *Sema::ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList,
// listed below. For these exceptions only, listing a predetermined
// variable in a data-sharing attribute clause is allowed and overrides
// the variable's predetermined data-sharing attributes.
- DSAStackTy::DSAVarData DVar = DSAStack->getTopDSA(D, false);
+ DSAStackTy::DSAVarData DVar = DSAStack->getTopDSA(D, /*FromParent=*/false);
if (DVar.CKind != OMPC_unknown && DVar.CKind != OMPC_shared &&
DVar.RefExpr) {
Diag(ELoc, diag::err_omp_wrong_dsa) << getOpenMPClauseName(DVar.CKind)
<< getOpenMPClauseName(OMPC_shared);
- ReportOriginalDSA(*this, DSAStack, D, DVar);
+ reportOriginalDsa(*this, DSAStack, D, DVar);
continue;
}
DeclRefExpr *Ref = nullptr;
- if (!VD && IsOpenMPCapturedDecl(D) && !CurContext->isDependentContext())
+ if (!VD && isOpenMPCapturedDecl(D) && !CurContext->isDependentContext())
Ref = buildCapture(*this, D, SimpleRefExpr, /*WithInit=*/true);
DSAStack->addDSA(D, RefExpr->IgnoreParens(), OMPC_shared, Ref);
Vars.push_back((VD || !Ref || CurContext->isDependentContext())
@@ -9648,23 +9911,21 @@ class DSARefChecker : public StmtVisitor<DSARefChecker, bool> {
public:
bool VisitDeclRefExpr(DeclRefExpr *E) {
- if (VarDecl *VD = dyn_cast<VarDecl>(E->getDecl())) {
- DSAStackTy::DSAVarData DVar = Stack->getTopDSA(VD, false);
+ if (auto *VD = dyn_cast<VarDecl>(E->getDecl())) {
+ DSAStackTy::DSAVarData DVar = Stack->getTopDSA(VD, /*FromParent=*/false);
if (DVar.CKind == OMPC_shared && !DVar.RefExpr)
return false;
if (DVar.CKind != OMPC_unknown)
return true;
DSAStackTy::DSAVarData DVarPrivate = Stack->hasDSA(
- VD, isOpenMPPrivate, [](OpenMPDirectiveKind) -> bool { return true; },
+ VD, isOpenMPPrivate, [](OpenMPDirectiveKind) { return true; },
/*FromParent=*/true);
- if (DVarPrivate.CKind != OMPC_unknown)
- return true;
- return false;
+ return DVarPrivate.CKind != OMPC_unknown;
}
return false;
}
bool VisitStmt(Stmt *S) {
- for (auto Child : S->children()) {
+ for (Stmt *Child : S->children()) {
if (Child && Visit(Child))
return true;
}
@@ -9679,8 +9940,8 @@ namespace {
// DeclRefExpr to specified OMPCapturedExprDecl.
class TransformExprToCaptures : public TreeTransform<TransformExprToCaptures> {
typedef TreeTransform<TransformExprToCaptures> BaseTransform;
- ValueDecl *Field;
- DeclRefExpr *CapturedExpr;
+ ValueDecl *Field = nullptr;
+ DeclRefExpr *CapturedExpr = nullptr;
public:
TransformExprToCaptures(Sema &SemaRef, ValueDecl *FieldDecl)
@@ -9698,12 +9959,12 @@ public:
};
} // namespace
-template <typename T>
-static T filterLookupForUDR(SmallVectorImpl<UnresolvedSet<8>> &Lookups,
- const llvm::function_ref<T(ValueDecl *)> &Gen) {
- for (auto &Set : Lookups) {
+template <typename T, typename U>
+static T filterLookupForUDR(SmallVectorImpl<U> &Lookups,
+ const llvm::function_ref<T(ValueDecl *)> Gen) {
+ for (U &Set : Lookups) {
for (auto *D : Set) {
- if (auto Res = Gen(cast<ValueDecl>(D)))
+ if (T Res = Gen(cast<ValueDecl>(D)))
return Res;
}
}
@@ -9722,7 +9983,7 @@ buildDeclareReductionRef(Sema &SemaRef, SourceLocation Loc, SourceRange Range,
LookupResult Lookup(SemaRef, ReductionId, Sema::LookupOMPReductionName);
Lookup.suppressDiagnostics();
while (S && SemaRef.LookupParsedName(Lookup, S, &ReductionIdScopeSpec)) {
- auto *D = Lookup.getRepresentativeDecl();
+ NamedDecl *D = Lookup.getRepresentativeDecl();
do {
S = S->getParent();
} while (S && !S->isDeclScope(D));
@@ -9736,7 +9997,7 @@ buildDeclareReductionRef(Sema &SemaRef, SourceLocation Loc, SourceRange Range,
cast_or_null<UnresolvedLookupExpr>(UnresolvedReduction)) {
Lookups.push_back(UnresolvedSet<8>());
Decl *PrevD = nullptr;
- for (auto *D : ULE->decls()) {
+ for (NamedDecl *D : ULE->decls()) {
if (D == PrevD)
Lookups.push_back(UnresolvedSet<8>());
else if (auto *DRD = cast<OMPDeclareReductionDecl>(D))
@@ -9747,14 +10008,14 @@ buildDeclareReductionRef(Sema &SemaRef, SourceLocation Loc, SourceRange Range,
if (SemaRef.CurContext->isDependentContext() || Ty->isDependentType() ||
Ty->isInstantiationDependentType() ||
Ty->containsUnexpandedParameterPack() ||
- filterLookupForUDR<bool>(Lookups, [](ValueDecl *D) -> bool {
+ filterLookupForUDR<bool>(Lookups, [](ValueDecl *D) {
return !D->isInvalidDecl() &&
(D->getType()->isDependentType() ||
D->getType()->isInstantiationDependentType() ||
D->getType()->containsUnexpandedParameterPack());
})) {
UnresolvedSet<8> ResSet;
- for (auto &Set : Lookups) {
+ for (const UnresolvedSet<8> &Set : Lookups) {
ResSet.append(Set.begin(), Set.end());
// The last item marks the end of all declarations at the specified scope.
ResSet.addDecl(Set[Set.size() - 1]);
@@ -9856,7 +10117,7 @@ struct ReductionData {
};
} // namespace
-static bool CheckOMPArraySectionConstantForReduction(
+static bool checkOMPArraySectionConstantForReduction(
ASTContext &Context, const OMPArraySectionExpr *OASE, bool &SingleElement,
SmallVectorImpl<llvm::APSInt> &ArraySizes) {
const Expr *Length = OASE->getLength();
@@ -9918,14 +10179,14 @@ static bool CheckOMPArraySectionConstantForReduction(
return true;
}
-static bool ActOnOMPReductionKindClause(
+static bool actOnOMPReductionKindClause(
Sema &S, DSAStackTy *Stack, OpenMPClauseKind ClauseKind,
ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions, ReductionData &RD) {
- auto DN = ReductionId.getName();
- auto OOK = DN.getCXXOverloadedOperator();
+ DeclarationName DN = ReductionId.getName();
+ OverloadedOperatorKind OOK = DN.getCXXOverloadedOperator();
BinaryOperatorKind BOK = BO_Comma;
ASTContext &Context = S.Context;
@@ -9999,7 +10260,7 @@ static bool ActOnOMPReductionKindClause(
case NUM_OVERLOADED_OPERATORS:
llvm_unreachable("Unexpected reduction identifier");
case OO_None:
- if (auto *II = DN.getAsIdentifierInfo()) {
+ if (IdentifierInfo *II = DN.getAsIdentifierInfo()) {
if (II->isStr("max"))
BOK = BO_GT;
else if (II->isStr("min"))
@@ -10016,7 +10277,7 @@ static bool ActOnOMPReductionKindClause(
auto IR = UnresolvedReductions.begin(), ER = UnresolvedReductions.end();
bool FirstIter = true;
- for (auto RefExpr : VarList) {
+ for (Expr *RefExpr : VarList) {
assert(RefExpr && "nullptr expr in OpenMP reduction clause.");
// OpenMP [2.1, C/C++]
// A list item is a variable or array section, subject to the restrictions
@@ -10057,23 +10318,25 @@ static bool ActOnOMPReductionKindClause(
QualType Type;
auto *ASE = dyn_cast<ArraySubscriptExpr>(RefExpr->IgnoreParens());
auto *OASE = dyn_cast<OMPArraySectionExpr>(RefExpr->IgnoreParens());
- if (ASE)
+ if (ASE) {
Type = ASE->getType().getNonReferenceType();
- else if (OASE) {
- auto BaseType = OMPArraySectionExpr::getBaseOriginalType(OASE->getBase());
- if (auto *ATy = BaseType->getAsArrayTypeUnsafe())
+ } else if (OASE) {
+ QualType BaseType =
+ OMPArraySectionExpr::getBaseOriginalType(OASE->getBase());
+ if (const auto *ATy = BaseType->getAsArrayTypeUnsafe())
Type = ATy->getElementType();
else
Type = BaseType->getPointeeType();
Type = Type.getNonReferenceType();
- } else
+ } else {
Type = Context.getBaseElementType(D->getType().getNonReferenceType());
+ }
auto *VD = dyn_cast<VarDecl>(D);
// OpenMP [2.9.3.3, Restrictions, C/C++, p.3]
// A variable that appears in a private clause must not have an incomplete
// type or a reference type.
- if (S.RequireCompleteType(ELoc, Type,
+ if (S.RequireCompleteType(ELoc, D->getType(),
diag::err_omp_reduction_incomplete_type))
continue;
// OpenMP [2.14.3.6, reduction clause, Restrictions]
@@ -10117,19 +10380,19 @@ static bool ActOnOMPReductionKindClause(
// Any number of reduction clauses can be specified on the directive,
// but a list item can appear only once in the reduction clauses for that
// directive.
- DSAStackTy::DSAVarData DVar;
- DVar = Stack->getTopDSA(D, false);
+ DSAStackTy::DSAVarData DVar = Stack->getTopDSA(D, /*FromParent=*/false);
if (DVar.CKind == OMPC_reduction) {
S.Diag(ELoc, diag::err_omp_once_referenced)
<< getOpenMPClauseName(ClauseKind);
if (DVar.RefExpr)
S.Diag(DVar.RefExpr->getExprLoc(), diag::note_omp_referenced);
continue;
- } else if (DVar.CKind != OMPC_unknown) {
+ }
+ if (DVar.CKind != OMPC_unknown) {
S.Diag(ELoc, diag::err_omp_wrong_dsa)
<< getOpenMPClauseName(DVar.CKind)
<< getOpenMPClauseName(OMPC_reduction);
- ReportOriginalDSA(S, Stack, D, DVar);
+ reportOriginalDsa(S, Stack, D, DVar);
continue;
}
@@ -10146,7 +10409,7 @@ static bool ActOnOMPReductionKindClause(
S.Diag(ELoc, diag::err_omp_required_access)
<< getOpenMPClauseName(OMPC_reduction)
<< getOpenMPClauseName(OMPC_shared);
- ReportOriginalDSA(S, Stack, D, DVar);
+ reportOriginalDsa(S, Stack, D, DVar);
continue;
}
}
@@ -10212,11 +10475,11 @@ static bool ActOnOMPReductionKindClause(
}
Type = Type.getNonLValueExprType(Context).getUnqualifiedType();
- auto *LHSVD = buildVarDecl(S, ELoc, Type, ".reduction.lhs",
- D->hasAttrs() ? &D->getAttrs() : nullptr);
- auto *RHSVD = buildVarDecl(S, ELoc, Type, D->getName(),
- D->hasAttrs() ? &D->getAttrs() : nullptr);
- auto PrivateTy = Type;
+ VarDecl *LHSVD = buildVarDecl(S, ELoc, Type, ".reduction.lhs",
+ D->hasAttrs() ? &D->getAttrs() : nullptr);
+ VarDecl *RHSVD = buildVarDecl(S, ELoc, Type, D->getName(),
+ D->hasAttrs() ? &D->getAttrs() : nullptr);
+ QualType PrivateTy = Type;
// Try if we can determine constant lengths for all array sections and avoid
// the VLA.
@@ -10224,15 +10487,14 @@ static bool ActOnOMPReductionKindClause(
if (OASE) {
bool SingleElement;
llvm::SmallVector<llvm::APSInt, 4> ArraySizes;
- ConstantLengthOASE = CheckOMPArraySectionConstantForReduction(
+ ConstantLengthOASE = checkOMPArraySectionConstantForReduction(
Context, OASE, SingleElement, ArraySizes);
// If we don't have a single element, we must emit a constant array type.
if (ConstantLengthOASE && !SingleElement) {
- for (auto &Size : ArraySizes) {
+ for (llvm::APSInt &Size : ArraySizes)
PrivateTy = Context.getConstantArrayType(
PrivateTy, Size, ArrayType::Normal, /*IndexTypeQuals=*/0);
- }
}
}
@@ -10255,15 +10517,18 @@ static bool ActOnOMPReductionKindClause(
new (Context) OpaqueValueExpr(ELoc, Context.getSizeType(), VK_RValue),
ArrayType::Normal, /*IndexTypeQuals=*/0, SourceRange());
} else if (!ASE && !OASE &&
- Context.getAsArrayType(D->getType().getNonReferenceType()))
+ Context.getAsArrayType(D->getType().getNonReferenceType())) {
PrivateTy = D->getType().getNonReferenceType();
+ }
// Private copy.
- auto *PrivateVD = buildVarDecl(S, ELoc, PrivateTy, D->getName(),
- D->hasAttrs() ? &D->getAttrs() : nullptr);
+ VarDecl *PrivateVD =
+ buildVarDecl(S, ELoc, PrivateTy, D->getName(),
+ D->hasAttrs() ? &D->getAttrs() : nullptr,
+ VD ? cast<DeclRefExpr>(SimpleRefExpr) : nullptr);
// Add initializer for private variable.
Expr *Init = nullptr;
- auto *LHSDRE = buildDeclRefExpr(S, LHSVD, Type, ELoc);
- auto *RHSDRE = buildDeclRefExpr(S, RHSVD, Type, ELoc);
+ DeclRefExpr *LHSDRE = buildDeclRefExpr(S, LHSVD, Type, ELoc);
+ DeclRefExpr *RHSDRE = buildDeclRefExpr(S, RHSVD, Type, ELoc);
if (DeclareReductionRef.isUsable()) {
auto *DRDRef = DeclareReductionRef.getAs<DeclRefExpr>();
auto *DRD = cast<OMPDeclareReductionDecl>(DRDRef->getDecl());
@@ -10301,7 +10566,7 @@ static bool ActOnOMPReductionKindClause(
Init = FloatingLiteral::Create(Context, InitValue, /*isexact=*/true,
Type, ELoc);
} else if (Type->isScalarType()) {
- auto Size = Context.getTypeSize(Type);
+ uint64_t Size = Context.getTypeSize(Type);
QualType IntTy = Context.getIntTypeForBitwidth(Size, /*Signed=*/0);
llvm::APInt InitValue = llvm::APInt::getAllOnesValue(Size);
Init = IntegerLiteral::Create(Context, InitValue, IntTy, ELoc);
@@ -10322,7 +10587,7 @@ static bool ActOnOMPReductionKindClause(
// the reduction list item type'.
if (Type->isIntegerType() || Type->isPointerType()) {
bool IsSigned = Type->hasSignedIntegerRepresentation();
- auto Size = Context.getTypeSize(Type);
+ uint64_t Size = Context.getTypeSize(Type);
QualType IntTy =
Context.getIntTypeForBitwidth(Size, /*Signed=*/IsSigned);
llvm::APInt InitValue =
@@ -10333,7 +10598,7 @@ static bool ActOnOMPReductionKindClause(
Init = IntegerLiteral::Create(Context, InitValue, IntTy, ELoc);
if (Type->isPointerType()) {
// Cast to pointer type.
- auto CastExpr = S.BuildCStyleCastExpr(
+ ExprResult CastExpr = S.BuildCStyleCastExpr(
ELoc, Context.getTrivialTypeSourceInfo(Type, ELoc), ELoc, Init);
if (CastExpr.isInvalid())
continue;
@@ -10394,7 +10659,7 @@ static bool ActOnOMPReductionKindClause(
// codegen.
PrivateVD->setInit(RHSVD->getInit());
PrivateVD->setInitStyle(RHSVD->getInitStyle());
- auto *PrivateDRE = buildDeclRefExpr(S, PrivateVD, PrivateTy, ELoc);
+ DeclRefExpr *PrivateDRE = buildDeclRefExpr(S, PrivateVD, PrivateTy, ELoc);
ExprResult ReductionOp;
if (DeclareReductionRef.isUsable()) {
QualType RedTy = DeclareReductionRef.get()->getType();
@@ -10506,7 +10771,7 @@ static bool ActOnOMPReductionKindClause(
} else {
VarsExpr = Ref = buildCapture(S, D, SimpleRefExpr, /*WithInit=*/false);
}
- if (!S.IsOpenMPCapturedDecl(D)) {
+ if (!S.isOpenMPCapturedDecl(D)) {
RD.ExprCaptures.emplace_back(Ref->getDecl());
if (Ref->getDecl()->hasAttr<OMPCaptureNoInitAttr>()) {
ExprResult RefRes = S.DefaultLvalueConversion(Ref);
@@ -10551,8 +10816,7 @@ OMPClause *Sema::ActOnOpenMPReductionClause(
CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions) {
ReductionData RD(VarList.size());
-
- if (ActOnOMPReductionKindClause(*this, DSAStack, OMPC_reduction, VarList,
+ if (actOnOMPReductionKindClause(*this, DSAStack, OMPC_reduction, VarList,
StartLoc, LParenLoc, ColonLoc, EndLoc,
ReductionIdScopeSpec, ReductionId,
UnresolvedReductions, RD))
@@ -10572,10 +10836,9 @@ OMPClause *Sema::ActOnOpenMPTaskReductionClause(
CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions) {
ReductionData RD(VarList.size());
-
- if (ActOnOMPReductionKindClause(*this, DSAStack, OMPC_task_reduction,
- VarList, StartLoc, LParenLoc, ColonLoc,
- EndLoc, ReductionIdScopeSpec, ReductionId,
+ if (actOnOMPReductionKindClause(*this, DSAStack, OMPC_task_reduction, VarList,
+ StartLoc, LParenLoc, ColonLoc, EndLoc,
+ ReductionIdScopeSpec, ReductionId,
UnresolvedReductions, RD))
return nullptr;
@@ -10593,8 +10856,7 @@ OMPClause *Sema::ActOnOpenMPInReductionClause(
CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions) {
ReductionData RD(VarList.size());
-
- if (ActOnOMPReductionKindClause(*this, DSAStack, OMPC_in_reduction, VarList,
+ if (actOnOMPReductionKindClause(*this, DSAStack, OMPC_in_reduction, VarList,
StartLoc, LParenLoc, ColonLoc, EndLoc,
ReductionIdScopeSpec, ReductionId,
UnresolvedReductions, RD))
@@ -10618,10 +10880,10 @@ bool Sema::CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind,
return false;
}
-bool Sema::CheckOpenMPLinearDecl(ValueDecl *D, SourceLocation ELoc,
+bool Sema::CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc,
OpenMPLinearClauseKind LinKind,
QualType Type) {
- auto *VD = dyn_cast_or_null<VarDecl>(D);
+ const auto *VD = dyn_cast_or_null<VarDecl>(D);
// A variable must not have an incomplete type or a reference type.
if (RequireCompleteType(ELoc, Type, diag::err_omp_linear_incomplete_type))
return true;
@@ -10678,7 +10940,7 @@ OMPClause *Sema::ActOnOpenMPLinearClause(
SmallVector<Expr *, 4> ExprPostUpdates;
if (CheckOpenMPLinearModifier(LinKind, LinLoc))
LinKind = OMPC_LINEAR_val;
- for (auto &RefExpr : VarList) {
+ for (Expr *RefExpr : VarList) {
assert(RefExpr && "NULL expr in OpenMP linear clause.");
SourceLocation ELoc;
SourceRange ERange;
@@ -10702,11 +10964,11 @@ OMPClause *Sema::ActOnOpenMPLinearClause(
// A list-item cannot appear in more than one linear clause.
// A list-item that appears in a linear clause cannot appear in any
// other data-sharing attribute clause.
- DSAStackTy::DSAVarData DVar = DSAStack->getTopDSA(D, false);
+ DSAStackTy::DSAVarData DVar = DSAStack->getTopDSA(D, /*FromParent=*/false);
if (DVar.RefExpr) {
Diag(ELoc, diag::err_omp_wrong_dsa) << getOpenMPClauseName(DVar.CKind)
<< getOpenMPClauseName(OMPC_linear);
- ReportOriginalDSA(*this, DSAStack, D, DVar);
+ reportOriginalDsa(*this, DSAStack, D, DVar);
continue;
}
@@ -10715,16 +10977,18 @@ OMPClause *Sema::ActOnOpenMPLinearClause(
Type = Type.getNonReferenceType().getUnqualifiedType().getCanonicalType();
// Build private copy of original var.
- auto *Private = buildVarDecl(*this, ELoc, Type, D->getName(),
- D->hasAttrs() ? &D->getAttrs() : nullptr);
- auto *PrivateRef = buildDeclRefExpr(*this, Private, Type, ELoc);
+ VarDecl *Private =
+ buildVarDecl(*this, ELoc, Type, D->getName(),
+ D->hasAttrs() ? &D->getAttrs() : nullptr,
+ VD ? cast<DeclRefExpr>(SimpleRefExpr) : nullptr);
+ DeclRefExpr *PrivateRef = buildDeclRefExpr(*this, Private, Type, ELoc);
// Build var to save initial value.
VarDecl *Init = buildVarDecl(*this, ELoc, Type, ".linear.start");
Expr *InitExpr;
DeclRefExpr *Ref = nullptr;
if (!VD && !CurContext->isDependentContext()) {
Ref = buildCapture(*this, D, SimpleRefExpr, /*WithInit=*/false);
- if (!IsOpenMPCapturedDecl(D)) {
+ if (!isOpenMPCapturedDecl(D)) {
ExprCaptures.push_back(Ref->getDecl());
if (Ref->getDecl()->hasAttr<OMPCaptureNoInitAttr>()) {
ExprResult RefRes = DefaultLvalueConversion(Ref);
@@ -10746,7 +11010,7 @@ OMPClause *Sema::ActOnOpenMPLinearClause(
InitExpr = VD ? SimpleRefExpr : Ref;
AddInitializerToDecl(Init, DefaultLvalueConversion(InitExpr).get(),
/*DirectInit=*/false);
- auto InitRef = buildDeclRefExpr(*this, Init, Type, ELoc);
+ DeclRefExpr *InitRef = buildDeclRefExpr(*this, Init, Type, ELoc);
DSAStack->addDSA(D, RefExpr->IgnoreParens(), OMPC_linear, Ref);
Vars.push_back((VD || CurContext->isDependentContext())
@@ -10810,16 +11074,15 @@ static bool FinishOpenMPLinearClause(OMPLinearClause &Clause, DeclRefExpr *IV,
Expr *CalcStep = Clause.getCalcStep();
// OpenMP [2.14.3.7, linear clause]
// If linear-step is not specified it is assumed to be 1.
- if (Step == nullptr)
+ if (!Step)
Step = SemaRef.ActOnIntegerConstant(SourceLocation(), 1).get();
- else if (CalcStep) {
+ else if (CalcStep)
Step = cast<BinaryOperator>(CalcStep)->getLHS();
- }
bool HasErrors = false;
auto CurInit = Clause.inits().begin();
auto CurPrivate = Clause.privates().begin();
- auto LinKind = Clause.getModifier();
- for (auto &RefExpr : Clause.varlists()) {
+ OpenMPLinearClauseKind LinKind = Clause.getModifier();
+ for (Expr *RefExpr : Clause.varlists()) {
SourceLocation ELoc;
SourceRange ERange;
Expr *SimpleRefExpr = RefExpr;
@@ -10860,22 +11123,22 @@ static bool FinishOpenMPLinearClause(OMPLinearClause &Clause, DeclRefExpr *IV,
// Build update: Var = InitExpr + IV * Step
ExprResult Update;
- if (!Info.first) {
+ if (!Info.first)
Update =
- BuildCounterUpdate(SemaRef, S, RefExpr->getExprLoc(), *CurPrivate,
+ buildCounterUpdate(SemaRef, S, RefExpr->getExprLoc(), *CurPrivate,
InitExpr, IV, Step, /* Subtract */ false);
- } else
+ else
Update = *CurPrivate;
Update = SemaRef.ActOnFinishFullExpr(Update.get(), DE->getLocStart(),
/*DiscardedValue=*/true);
// Build final: Var = InitExpr + NumIterations * Step
ExprResult Final;
- if (!Info.first) {
- Final = BuildCounterUpdate(SemaRef, S, RefExpr->getExprLoc(), CapturedRef,
- InitExpr, NumIterations, Step,
- /* Subtract */ false);
- } else
+ if (!Info.first)
+ Final =
+ buildCounterUpdate(SemaRef, S, RefExpr->getExprLoc(), CapturedRef,
+ InitExpr, NumIterations, Step, /*Subtract=*/false);
+ else
Final = *CurPrivate;
Final = SemaRef.ActOnFinishFullExpr(Final.get(), DE->getLocStart(),
/*DiscardedValue=*/true);
@@ -10899,9 +11162,8 @@ static bool FinishOpenMPLinearClause(OMPLinearClause &Clause, DeclRefExpr *IV,
OMPClause *Sema::ActOnOpenMPAlignedClause(
ArrayRef<Expr *> VarList, Expr *Alignment, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc) {
-
SmallVector<Expr *, 8> Vars;
- for (auto &RefExpr : VarList) {
+ for (Expr *RefExpr : VarList) {
assert(RefExpr && "NULL expr in OpenMP linear clause.");
SourceLocation ELoc;
SourceRange ERange;
@@ -10938,7 +11200,7 @@ OMPClause *Sema::ActOnOpenMPAlignedClause(
// OpenMP [2.8.1, simd construct, Restrictions]
// A list-item cannot appear in more than one aligned clause.
- if (Expr *PrevRef = DSAStack->addUniqueAligned(D, SimpleRefExpr)) {
+ if (const Expr *PrevRef = DSAStack->addUniqueAligned(D, SimpleRefExpr)) {
Diag(ELoc, diag::err_omp_aligned_twice) << 0 << ERange;
Diag(PrevRef->getExprLoc(), diag::note_omp_explicit_dsa)
<< getOpenMPClauseName(OMPC_aligned);
@@ -10946,7 +11208,7 @@ OMPClause *Sema::ActOnOpenMPAlignedClause(
}
DeclRefExpr *Ref = nullptr;
- if (!VD && IsOpenMPCapturedDecl(D))
+ if (!VD && isOpenMPCapturedDecl(D))
Ref = buildCapture(*this, D, SimpleRefExpr, /*WithInit=*/true);
Vars.push_back(DefaultFunctionArrayConversion(
(VD || !Ref) ? RefExpr->IgnoreParens() : Ref)
@@ -10980,7 +11242,7 @@ OMPClause *Sema::ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList,
SmallVector<Expr *, 8> SrcExprs;
SmallVector<Expr *, 8> DstExprs;
SmallVector<Expr *, 8> AssignmentOps;
- for (auto &RefExpr : VarList) {
+ for (Expr *RefExpr : VarList) {
assert(RefExpr && "NULL expr in OpenMP copyin clause.");
if (isa<DependentScopeDeclRefExpr>(RefExpr)) {
// It will be analyzed later.
@@ -10996,7 +11258,7 @@ OMPClause *Sema::ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList,
// A list item is a variable name.
// OpenMP [2.14.4.1, Restrictions, p.1]
// A list item that appears in a copyin clause must be threadprivate.
- DeclRefExpr *DE = dyn_cast<DeclRefExpr>(RefExpr);
+ auto *DE = dyn_cast<DeclRefExpr>(RefExpr);
if (!DE || !isa<VarDecl>(DE->getDecl())) {
Diag(ELoc, diag::err_omp_expected_var_name_member_expr)
<< 0 << RefExpr->getSourceRange();
@@ -11004,7 +11266,7 @@ OMPClause *Sema::ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList,
}
Decl *D = DE->getDecl();
- VarDecl *VD = cast<VarDecl>(D);
+ auto *VD = cast<VarDecl>(D);
QualType Type = VD->getType();
if (Type->isDependentType() || Type->isInstantiationDependentType()) {
@@ -11029,21 +11291,22 @@ OMPClause *Sema::ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList,
// A variable of class type (or array thereof) that appears in a
// copyin clause requires an accessible, unambiguous copy assignment
// operator for the class type.
- auto ElemType = Context.getBaseElementType(Type).getNonReferenceType();
- auto *SrcVD =
+ QualType ElemType = Context.getBaseElementType(Type).getNonReferenceType();
+ VarDecl *SrcVD =
buildVarDecl(*this, DE->getLocStart(), ElemType.getUnqualifiedType(),
".copyin.src", VD->hasAttrs() ? &VD->getAttrs() : nullptr);
- auto *PseudoSrcExpr = buildDeclRefExpr(
+ DeclRefExpr *PseudoSrcExpr = buildDeclRefExpr(
*this, SrcVD, ElemType.getUnqualifiedType(), DE->getExprLoc());
- auto *DstVD =
+ VarDecl *DstVD =
buildVarDecl(*this, DE->getLocStart(), ElemType, ".copyin.dst",
VD->hasAttrs() ? &VD->getAttrs() : nullptr);
- auto *PseudoDstExpr =
+ DeclRefExpr *PseudoDstExpr =
buildDeclRefExpr(*this, DstVD, ElemType, DE->getExprLoc());
// For arrays generate assignment operation for single element and replace
// it by the original array element in CodeGen.
- auto AssignmentOp = BuildBinOp(/*S=*/nullptr, DE->getExprLoc(), BO_Assign,
- PseudoDstExpr, PseudoSrcExpr);
+ ExprResult AssignmentOp =
+ BuildBinOp(/*S=*/nullptr, DE->getExprLoc(), BO_Assign, PseudoDstExpr,
+ PseudoSrcExpr);
if (AssignmentOp.isInvalid())
continue;
AssignmentOp = ActOnFinishFullExpr(AssignmentOp.get(), DE->getExprLoc(),
@@ -11073,7 +11336,7 @@ OMPClause *Sema::ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
SmallVector<Expr *, 8> SrcExprs;
SmallVector<Expr *, 8> DstExprs;
SmallVector<Expr *, 8> AssignmentOps;
- for (auto &RefExpr : VarList) {
+ for (Expr *RefExpr : VarList) {
assert(RefExpr && "NULL expr in OpenMP linear clause.");
SourceLocation ELoc;
SourceRange ERange;
@@ -11098,13 +11361,14 @@ OMPClause *Sema::ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
// A list item that appears in a copyprivate clause may not appear in a
// private or firstprivate clause on the single construct.
if (!VD || !DSAStack->isThreadPrivate(VD)) {
- auto DVar = DSAStack->getTopDSA(D, false);
+ DSAStackTy::DSAVarData DVar =
+ DSAStack->getTopDSA(D, /*FromParent=*/false);
if (DVar.CKind != OMPC_unknown && DVar.CKind != OMPC_copyprivate &&
DVar.RefExpr) {
Diag(ELoc, diag::err_omp_wrong_dsa)
<< getOpenMPClauseName(DVar.CKind)
<< getOpenMPClauseName(OMPC_copyprivate);
- ReportOriginalDSA(*this, DSAStack, D, DVar);
+ reportOriginalDsa(*this, DSAStack, D, DVar);
continue;
}
@@ -11117,7 +11381,7 @@ OMPClause *Sema::ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
Diag(ELoc, diag::err_omp_required_access)
<< getOpenMPClauseName(OMPC_copyprivate)
<< "threadprivate or private in the enclosing context";
- ReportOriginalDSA(*this, DSAStack, D, DVar);
+ reportOriginalDsa(*this, DSAStack, D, DVar);
continue;
}
}
@@ -11143,16 +11407,16 @@ OMPClause *Sema::ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
// operator for the class type.
Type = Context.getBaseElementType(Type.getNonReferenceType())
.getUnqualifiedType();
- auto *SrcVD =
+ VarDecl *SrcVD =
buildVarDecl(*this, RefExpr->getLocStart(), Type, ".copyprivate.src",
D->hasAttrs() ? &D->getAttrs() : nullptr);
- auto *PseudoSrcExpr = buildDeclRefExpr(*this, SrcVD, Type, ELoc);
- auto *DstVD =
+ DeclRefExpr *PseudoSrcExpr = buildDeclRefExpr(*this, SrcVD, Type, ELoc);
+ VarDecl *DstVD =
buildVarDecl(*this, RefExpr->getLocStart(), Type, ".copyprivate.dst",
D->hasAttrs() ? &D->getAttrs() : nullptr);
- auto *PseudoDstExpr = buildDeclRefExpr(*this, DstVD, Type, ELoc);
- auto AssignmentOp = BuildBinOp(DSAStack->getCurScope(), ELoc, BO_Assign,
- PseudoDstExpr, PseudoSrcExpr);
+ DeclRefExpr *PseudoDstExpr = buildDeclRefExpr(*this, DstVD, Type, ELoc);
+ ExprResult AssignmentOp = BuildBinOp(
+ DSAStack->getCurScope(), ELoc, BO_Assign, PseudoDstExpr, PseudoSrcExpr);
if (AssignmentOp.isInvalid())
continue;
AssignmentOp = ActOnFinishFullExpr(AssignmentOp.get(), ELoc,
@@ -11162,7 +11426,7 @@ OMPClause *Sema::ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
// No need to mark vars as copyprivate, they are already threadprivate or
// implicitly private.
- assert(VD || IsOpenMPCapturedDecl(D));
+ assert(VD || isOpenMPCapturedDecl(D));
Vars.push_back(
VD ? RefExpr->IgnoreParens()
: buildCapture(*this, D, SimpleRefExpr, /*WithInit=*/false));
@@ -11214,143 +11478,139 @@ Sema::ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind,
llvm::APSInt DepCounter(/*BitWidth=*/32);
llvm::APSInt TotalDepCount(/*BitWidth=*/32);
if (DepKind == OMPC_DEPEND_sink) {
- if (auto *OrderedCountExpr = DSAStack->getParentOrderedRegionParam()) {
+ if (const Expr *OrderedCountExpr = DSAStack->getParentOrderedRegionParam()) {
TotalDepCount = OrderedCountExpr->EvaluateKnownConstInt(Context);
TotalDepCount.setIsUnsigned(/*Val=*/true);
}
}
- if ((DepKind != OMPC_DEPEND_sink && DepKind != OMPC_DEPEND_source) ||
- DSAStack->getParentOrderedRegionParam()) {
- for (auto &RefExpr : VarList) {
- assert(RefExpr && "NULL expr in OpenMP shared clause.");
- if (isa<DependentScopeDeclRefExpr>(RefExpr)) {
+ for (Expr *RefExpr : VarList) {
+ assert(RefExpr && "NULL expr in OpenMP shared clause.");
+ if (isa<DependentScopeDeclRefExpr>(RefExpr)) {
+ // It will be analyzed later.
+ Vars.push_back(RefExpr);
+ continue;
+ }
+
+ SourceLocation ELoc = RefExpr->getExprLoc();
+ Expr *SimpleExpr = RefExpr->IgnoreParenCasts();
+ if (DepKind == OMPC_DEPEND_sink) {
+ if (DSAStack->getParentOrderedRegionParam() &&
+ DepCounter >= TotalDepCount) {
+ Diag(ELoc, diag::err_omp_depend_sink_unexpected_expr);
+ continue;
+ }
+ ++DepCounter;
+ // OpenMP [2.13.9, Summary]
+ // depend(dependence-type : vec), where dependence-type is:
+ // 'sink' and where vec is the iteration vector, which has the form:
+ // x1 [+- d1], x2 [+- d2 ], . . . , xn [+- dn]
+ // where n is the value specified by the ordered clause in the loop
+ // directive, xi denotes the loop iteration variable of the i-th nested
+ // loop associated with the loop directive, and di is a constant
+ // non-negative integer.
+ if (CurContext->isDependentContext()) {
// It will be analyzed later.
Vars.push_back(RefExpr);
continue;
}
+ SimpleExpr = SimpleExpr->IgnoreImplicit();
+ OverloadedOperatorKind OOK = OO_None;
+ SourceLocation OOLoc;
+ Expr *LHS = SimpleExpr;
+ Expr *RHS = nullptr;
+ if (auto *BO = dyn_cast<BinaryOperator>(SimpleExpr)) {
+ OOK = BinaryOperator::getOverloadedOperator(BO->getOpcode());
+ OOLoc = BO->getOperatorLoc();
+ LHS = BO->getLHS()->IgnoreParenImpCasts();
+ RHS = BO->getRHS()->IgnoreParenImpCasts();
+ } else if (auto *OCE = dyn_cast<CXXOperatorCallExpr>(SimpleExpr)) {
+ OOK = OCE->getOperator();
+ OOLoc = OCE->getOperatorLoc();
+ LHS = OCE->getArg(/*Arg=*/0)->IgnoreParenImpCasts();
+ RHS = OCE->getArg(/*Arg=*/1)->IgnoreParenImpCasts();
+ } else if (auto *MCE = dyn_cast<CXXMemberCallExpr>(SimpleExpr)) {
+ OOK = MCE->getMethodDecl()
+ ->getNameInfo()
+ .getName()
+ .getCXXOverloadedOperator();
+ OOLoc = MCE->getCallee()->getExprLoc();
+ LHS = MCE->getImplicitObjectArgument()->IgnoreParenImpCasts();
+ RHS = MCE->getArg(/*Arg=*/0)->IgnoreParenImpCasts();
+ }
+ SourceLocation ELoc;
+ SourceRange ERange;
+ auto Res = getPrivateItem(*this, LHS, ELoc, ERange,
+ /*AllowArraySection=*/false);
+ if (Res.second) {
+ // It will be analyzed later.
+ Vars.push_back(RefExpr);
+ }
+ ValueDecl *D = Res.first;
+ if (!D)
+ continue;
- SourceLocation ELoc = RefExpr->getExprLoc();
- auto *SimpleExpr = RefExpr->IgnoreParenCasts();
- if (DepKind == OMPC_DEPEND_sink) {
- if (DepCounter >= TotalDepCount) {
- Diag(ELoc, diag::err_omp_depend_sink_unexpected_expr);
- continue;
- }
- ++DepCounter;
- // OpenMP [2.13.9, Summary]
- // depend(dependence-type : vec), where dependence-type is:
- // 'sink' and where vec is the iteration vector, which has the form:
- // x1 [+- d1], x2 [+- d2 ], . . . , xn [+- dn]
- // where n is the value specified by the ordered clause in the loop
- // directive, xi denotes the loop iteration variable of the i-th nested
- // loop associated with the loop directive, and di is a constant
- // non-negative integer.
- if (CurContext->isDependentContext()) {
- // It will be analyzed later.
- Vars.push_back(RefExpr);
- continue;
- }
- SimpleExpr = SimpleExpr->IgnoreImplicit();
- OverloadedOperatorKind OOK = OO_None;
- SourceLocation OOLoc;
- Expr *LHS = SimpleExpr;
- Expr *RHS = nullptr;
- if (auto *BO = dyn_cast<BinaryOperator>(SimpleExpr)) {
- OOK = BinaryOperator::getOverloadedOperator(BO->getOpcode());
- OOLoc = BO->getOperatorLoc();
- LHS = BO->getLHS()->IgnoreParenImpCasts();
- RHS = BO->getRHS()->IgnoreParenImpCasts();
- } else if (auto *OCE = dyn_cast<CXXOperatorCallExpr>(SimpleExpr)) {
- OOK = OCE->getOperator();
- OOLoc = OCE->getOperatorLoc();
- LHS = OCE->getArg(/*Arg=*/0)->IgnoreParenImpCasts();
- RHS = OCE->getArg(/*Arg=*/1)->IgnoreParenImpCasts();
- } else if (auto *MCE = dyn_cast<CXXMemberCallExpr>(SimpleExpr)) {
- OOK = MCE->getMethodDecl()
- ->getNameInfo()
- .getName()
- .getCXXOverloadedOperator();
- OOLoc = MCE->getCallee()->getExprLoc();
- LHS = MCE->getImplicitObjectArgument()->IgnoreParenImpCasts();
- RHS = MCE->getArg(/*Arg=*/0)->IgnoreParenImpCasts();
- }
- SourceLocation ELoc;
- SourceRange ERange;
- auto Res = getPrivateItem(*this, LHS, ELoc, ERange,
- /*AllowArraySection=*/false);
- if (Res.second) {
- // It will be analyzed later.
- Vars.push_back(RefExpr);
- }
- ValueDecl *D = Res.first;
- if (!D)
- continue;
-
- if (OOK != OO_Plus && OOK != OO_Minus && (RHS || OOK != OO_None)) {
- Diag(OOLoc, diag::err_omp_depend_sink_expected_plus_minus);
- continue;
- }
- if (RHS) {
- ExprResult RHSRes = VerifyPositiveIntegerConstantInClause(
- RHS, OMPC_depend, /*StrictlyPositive=*/false);
- if (RHSRes.isInvalid())
- continue;
- }
- if (!CurContext->isDependentContext() &&
- DSAStack->getParentOrderedRegionParam() &&
- DepCounter != DSAStack->isParentLoopControlVariable(D).first) {
- ValueDecl* VD = DSAStack->getParentLoopControlVariable(
- DepCounter.getZExtValue());
- if (VD) {
- Diag(ELoc, diag::err_omp_depend_sink_expected_loop_iteration)
- << 1 << VD;
- } else {
- Diag(ELoc, diag::err_omp_depend_sink_expected_loop_iteration) << 0;
- }
- continue;
- }
- OpsOffs.push_back({RHS, OOK});
- } else {
- auto *ASE = dyn_cast<ArraySubscriptExpr>(SimpleExpr);
- if (!RefExpr->IgnoreParenImpCasts()->isLValue() ||
- (ASE &&
- !ASE->getBase()
- ->getType()
- .getNonReferenceType()
- ->isPointerType() &&
- !ASE->getBase()->getType().getNonReferenceType()->isArrayType())) {
- Diag(ELoc, diag::err_omp_expected_addressable_lvalue_or_array_item)
- << RefExpr->getSourceRange();
- continue;
- }
- bool Suppress = getDiagnostics().getSuppressAllDiagnostics();
- getDiagnostics().setSuppressAllDiagnostics(/*Val=*/true);
- ExprResult Res = CreateBuiltinUnaryOp(ELoc, UO_AddrOf,
- RefExpr->IgnoreParenImpCasts());
- getDiagnostics().setSuppressAllDiagnostics(Suppress);
- if (!Res.isUsable() && !isa<OMPArraySectionExpr>(SimpleExpr)) {
- Diag(ELoc, diag::err_omp_expected_addressable_lvalue_or_array_item)
- << RefExpr->getSourceRange();
+ if (OOK != OO_Plus && OOK != OO_Minus && (RHS || OOK != OO_None)) {
+ Diag(OOLoc, diag::err_omp_depend_sink_expected_plus_minus);
+ continue;
+ }
+ if (RHS) {
+ ExprResult RHSRes = VerifyPositiveIntegerConstantInClause(
+ RHS, OMPC_depend, /*StrictlyPositive=*/false);
+ if (RHSRes.isInvalid())
continue;
- }
}
- Vars.push_back(RefExpr->IgnoreParenImpCasts());
+ if (!CurContext->isDependentContext() &&
+ DSAStack->getParentOrderedRegionParam() &&
+ DepCounter != DSAStack->isParentLoopControlVariable(D).first) {
+ const ValueDecl *VD =
+ DSAStack->getParentLoopControlVariable(DepCounter.getZExtValue());
+ if (VD)
+ Diag(ELoc, diag::err_omp_depend_sink_expected_loop_iteration)
+ << 1 << VD;
+ else
+ Diag(ELoc, diag::err_omp_depend_sink_expected_loop_iteration) << 0;
+ continue;
+ }
+ OpsOffs.emplace_back(RHS, OOK);
+ } else {
+ auto *ASE = dyn_cast<ArraySubscriptExpr>(SimpleExpr);
+ if (!RefExpr->IgnoreParenImpCasts()->isLValue() ||
+ (ASE &&
+ !ASE->getBase()->getType().getNonReferenceType()->isPointerType() &&
+ !ASE->getBase()->getType().getNonReferenceType()->isArrayType())) {
+ Diag(ELoc, diag::err_omp_expected_addressable_lvalue_or_array_item)
+ << RefExpr->getSourceRange();
+ continue;
+ }
+ bool Suppress = getDiagnostics().getSuppressAllDiagnostics();
+ getDiagnostics().setSuppressAllDiagnostics(/*Val=*/true);
+ ExprResult Res =
+ CreateBuiltinUnaryOp(ELoc, UO_AddrOf, RefExpr->IgnoreParenImpCasts());
+ getDiagnostics().setSuppressAllDiagnostics(Suppress);
+ if (!Res.isUsable() && !isa<OMPArraySectionExpr>(SimpleExpr)) {
+ Diag(ELoc, diag::err_omp_expected_addressable_lvalue_or_array_item)
+ << RefExpr->getSourceRange();
+ continue;
+ }
}
+ Vars.push_back(RefExpr->IgnoreParenImpCasts());
+ }
- if (!CurContext->isDependentContext() && DepKind == OMPC_DEPEND_sink &&
- TotalDepCount > VarList.size() &&
- DSAStack->getParentOrderedRegionParam() &&
- DSAStack->getParentLoopControlVariable(VarList.size() + 1)) {
- Diag(EndLoc, diag::err_omp_depend_sink_expected_loop_iteration) << 1
- << DSAStack->getParentLoopControlVariable(VarList.size() + 1);
- }
- if (DepKind != OMPC_DEPEND_source && DepKind != OMPC_DEPEND_sink &&
- Vars.empty())
- return nullptr;
+ if (!CurContext->isDependentContext() && DepKind == OMPC_DEPEND_sink &&
+ TotalDepCount > VarList.size() &&
+ DSAStack->getParentOrderedRegionParam() &&
+ DSAStack->getParentLoopControlVariable(VarList.size() + 1)) {
+ Diag(EndLoc, diag::err_omp_depend_sink_expected_loop_iteration)
+ << 1 << DSAStack->getParentLoopControlVariable(VarList.size() + 1);
}
+ if (DepKind != OMPC_DEPEND_source && DepKind != OMPC_DEPEND_sink &&
+ Vars.empty())
+ return nullptr;
+
auto *C = OMPDependClause::Create(Context, StartLoc, LParenLoc, EndLoc,
DepKind, DepLoc, ColonLoc, Vars);
- if (DepKind == OMPC_DEPEND_sink || DepKind == OMPC_DEPEND_source)
+ if ((DepKind == OMPC_DEPEND_sink || DepKind == OMPC_DEPEND_source) &&
+ DSAStack->isParentOrderedRegion())
DSAStack->addDoacrossDependClause(C, OpsOffs);
return C;
}
@@ -11363,7 +11623,7 @@ OMPClause *Sema::ActOnOpenMPDeviceClause(Expr *Device, SourceLocation StartLoc,
// OpenMP [2.9.1, Restrictions]
// The device expression must evaluate to a non-negative integer value.
- if (!IsNonNegativeIntegerValue(ValExpr, *this, OMPC_device,
+ if (!isNonNegativeIntegerValue(ValExpr, *this, OMPC_device,
/*StrictlyPositive=*/false))
return nullptr;
@@ -11372,46 +11632,50 @@ OMPClause *Sema::ActOnOpenMPDeviceClause(Expr *Device, SourceLocation StartLoc,
getOpenMPCaptureRegionForClause(DKind, OMPC_device);
if (CaptureRegion != OMPD_unknown && !CurContext->isDependentContext()) {
ValExpr = MakeFullExpr(ValExpr).get();
- llvm::MapVector<Expr *, DeclRefExpr *> Captures;
+ llvm::MapVector<const Expr *, DeclRefExpr *> Captures;
ValExpr = tryBuildCapture(*this, ValExpr, Captures).get();
HelperValStmt = buildPreInits(Context, Captures);
}
- return new (Context)
- OMPDeviceClause(ValExpr, HelperValStmt, StartLoc, LParenLoc, EndLoc);
+ return new (Context) OMPDeviceClause(ValExpr, HelperValStmt, CaptureRegion,
+ StartLoc, LParenLoc, EndLoc);
}
-static bool CheckTypeMappable(SourceLocation SL, SourceRange SR, Sema &SemaRef,
- DSAStackTy *Stack, QualType QTy) {
+static bool checkTypeMappable(SourceLocation SL, SourceRange SR, Sema &SemaRef,
+ DSAStackTy *Stack, QualType QTy,
+ bool FullCheck = true) {
NamedDecl *ND;
if (QTy->isIncompleteType(&ND)) {
SemaRef.Diag(SL, diag::err_incomplete_type) << QTy << SR;
return false;
}
+ if (FullCheck && !SemaRef.CurContext->isDependentContext() &&
+ !QTy.isTrivialType(SemaRef.Context))
+ SemaRef.Diag(SL, diag::warn_omp_non_trivial_type_mapped) << QTy << SR;
return true;
}
-/// \brief Return true if it can be proven that the provided array expression
+/// Return true if it can be proven that the provided array expression
/// (array section or array subscript) does NOT specify the whole size of the
/// array whose base type is \a BaseQTy.
-static bool CheckArrayExpressionDoesNotReferToWholeSize(Sema &SemaRef,
+static bool checkArrayExpressionDoesNotReferToWholeSize(Sema &SemaRef,
const Expr *E,
QualType BaseQTy) {
- auto *OASE = dyn_cast<OMPArraySectionExpr>(E);
+ const auto *OASE = dyn_cast<OMPArraySectionExpr>(E);
// If this is an array subscript, it refers to the whole size if the size of
// the dimension is constant and equals 1. Also, an array section assumes the
// format of an array subscript if no colon is used.
if (isa<ArraySubscriptExpr>(E) || (OASE && OASE->getColonLoc().isInvalid())) {
- if (auto *ATy = dyn_cast<ConstantArrayType>(BaseQTy.getTypePtr()))
+ if (const auto *ATy = dyn_cast<ConstantArrayType>(BaseQTy.getTypePtr()))
return ATy->getSize().getSExtValue() != 1;
// Size can't be evaluated statically.
return false;
}
assert(OASE && "Expecting array section if not an array subscript.");
- auto *LowerBound = OASE->getLowerBound();
- auto *Length = OASE->getLength();
+ const Expr *LowerBound = OASE->getLowerBound();
+ const Expr *Length = OASE->getLength();
// If there is a lower bound that does not evaluates to zero, we are not
// covering the whole dimension.
@@ -11434,7 +11698,7 @@ static bool CheckArrayExpressionDoesNotReferToWholeSize(Sema &SemaRef,
// We can only check if the length is the same as the size of the dimension
// if we have a constant array.
- auto *CATy = dyn_cast<ConstantArrayType>(BaseQTy.getTypePtr());
+ const auto *CATy = dyn_cast<ConstantArrayType>(BaseQTy.getTypePtr());
if (!CATy)
return false;
@@ -11448,10 +11712,10 @@ static bool CheckArrayExpressionDoesNotReferToWholeSize(Sema &SemaRef,
// Return true if it can be proven that the provided array expression (array
// section or array subscript) does NOT specify a single element of the array
// whose base type is \a BaseQTy.
-static bool CheckArrayExpressionDoesNotReferToUnitySize(Sema &SemaRef,
+static bool checkArrayExpressionDoesNotReferToUnitySize(Sema &SemaRef,
const Expr *E,
QualType BaseQTy) {
- auto *OASE = dyn_cast<OMPArraySectionExpr>(E);
+ const auto *OASE = dyn_cast<OMPArraySectionExpr>(E);
// An array subscript always refer to a single element. Also, an array section
// assumes the format of an array subscript if no colon is used.
@@ -11459,13 +11723,13 @@ static bool CheckArrayExpressionDoesNotReferToUnitySize(Sema &SemaRef,
return false;
assert(OASE && "Expecting array section if not an array subscript.");
- auto *Length = OASE->getLength();
+ const Expr *Length = OASE->getLength();
// If we don't have a length we have to check if the array has unitary size
// for this dimension. Also, we should always expect a length if the base type
// is pointer.
if (!Length) {
- if (auto *ATy = dyn_cast<ConstantArrayType>(BaseQTy.getTypePtr()))
+ if (const auto *ATy = dyn_cast<ConstantArrayType>(BaseQTy.getTypePtr()))
return ATy->getSize().getSExtValue() != 1;
// We cannot assume anything.
return false;
@@ -11483,7 +11747,7 @@ static bool CheckArrayExpressionDoesNotReferToUnitySize(Sema &SemaRef,
// cannot be determined and do all the necessary checks to see if the expression
// is valid as a standalone mappable expression. In the process, record all the
// components of the expression.
-static Expr *CheckMapClauseExpressionBase(
+static const Expr *checkMapClauseExpressionBase(
Sema &SemaRef, Expr *E,
OMPClauseMappableExprCommon::MappableExprComponentList &CurComponents,
OpenMPClauseKind CKind, bool NoDiagnose) {
@@ -11509,7 +11773,7 @@ static Expr *CheckMapClauseExpressionBase(
//
// We want to retrieve the member expression 'this->S';
- Expr *RelevantExpr = nullptr;
+ const Expr *RelevantExpr = nullptr;
// OpenMP 4.5 [2.15.5.1, map Clause, Restrictions, p.2]
// If a list item is an array section, it must specify contiguous storage.
@@ -11548,7 +11812,7 @@ static Expr *CheckMapClauseExpressionBase(
// Record the component.
CurComponents.emplace_back(CurE, CurE->getDecl());
} else if (auto *CurE = dyn_cast<MemberExpr>(E)) {
- auto *BaseE = CurE->getBase()->IgnoreParenImpCasts();
+ Expr *BaseE = CurE->getBase()->IgnoreParenImpCasts();
if (isa<CXXThisExpr>(BaseE))
// We found a base expression: this->Val.
@@ -11592,15 +11856,13 @@ static Expr *CheckMapClauseExpressionBase(
// A list item cannot be a variable that is a member of a structure with
// a union type.
//
- if (auto *RT = CurType->getAs<RecordType>()) {
- if (RT->isUnionType()) {
- if (!NoDiagnose) {
- SemaRef.Diag(ELoc, diag::err_omp_union_type_not_allowed)
- << CurE->getSourceRange();
- return nullptr;
- }
- continue;
+ if (CurType->isUnionType()) {
+ if (!NoDiagnose) {
+ SemaRef.Diag(ELoc, diag::err_omp_union_type_not_allowed)
+ << CurE->getSourceRange();
+ return nullptr;
}
+ continue;
}
// If we got a member expression, we should not expect any array section
@@ -11630,7 +11892,7 @@ static Expr *CheckMapClauseExpressionBase(
// If we got an array subscript that express the whole dimension we
// can have any array expressions before. If it only expressing part of
// the dimension, we can only have unitary-size array expressions.
- if (CheckArrayExpressionDoesNotReferToWholeSize(SemaRef, CurE,
+ if (checkArrayExpressionDoesNotReferToWholeSize(SemaRef, CurE,
E->getType()))
AllowWholeSizeArraySection = false;
@@ -11658,9 +11920,9 @@ static Expr *CheckMapClauseExpressionBase(
}
bool NotWhole =
- CheckArrayExpressionDoesNotReferToWholeSize(SemaRef, CurE, CurType);
+ checkArrayExpressionDoesNotReferToWholeSize(SemaRef, CurE, CurType);
bool NotUnity =
- CheckArrayExpressionDoesNotReferToUnitySize(SemaRef, CurE, CurType);
+ checkArrayExpressionDoesNotReferToUnitySize(SemaRef, CurE, CurType);
if (AllowWholeSizeArraySection) {
// Any array section is currently allowed. Allowing a whole size array
@@ -11698,8 +11960,8 @@ static Expr *CheckMapClauseExpressionBase(
// Return true if expression E associated with value VD has conflicts with other
// map information.
-static bool CheckMapConflicts(
- Sema &SemaRef, DSAStackTy *DSAS, ValueDecl *VD, Expr *E,
+static bool checkMapConflicts(
+ Sema &SemaRef, DSAStackTy *DSAS, const ValueDecl *VD, const Expr *E,
bool CurrentRegionOnly,
OMPClauseMappableExprCommon::MappableExprComponentListRef CurComponents,
OpenMPClauseKind CKind) {
@@ -11721,17 +11983,19 @@ static bool CheckMapConflicts(
bool FoundError = DSAS->checkMappableExprComponentListsForDecl(
VD, CurrentRegionOnly,
- [&](OMPClauseMappableExprCommon::MappableExprComponentListRef
- StackComponents,
- OpenMPClauseKind) -> bool {
-
+ [&IsEnclosedByDataEnvironmentExpr, &SemaRef, VD, CurrentRegionOnly, ELoc,
+ ERange, CKind, &EnclosingExpr,
+ CurComponents](OMPClauseMappableExprCommon::MappableExprComponentListRef
+ StackComponents,
+ OpenMPClauseKind) {
assert(!StackComponents.empty() &&
"Map clause expression with no components!");
assert(StackComponents.back().getAssociatedDeclaration() == VD &&
"Map clause expression with unexpected base!");
+ (void)VD;
// The whole expression in the stack.
- auto *RE = StackComponents.front().getAssociatedExpression();
+ const Expr *RE = StackComponents.front().getAssociatedExpression();
// Expressions must start from the same base. Here we detect at which
// point both expressions diverge from each other and see if we can
@@ -11774,17 +12038,17 @@ static bool CheckMapConflicts(
// If they are, the maps completely overlap, which is legal.
for (; SI != SE; ++SI) {
QualType Type;
- if (auto *ASE =
+ if (const auto *ASE =
dyn_cast<ArraySubscriptExpr>(SI->getAssociatedExpression())) {
Type = ASE->getBase()->IgnoreParenImpCasts()->getType();
- } else if (auto *OASE = dyn_cast<OMPArraySectionExpr>(
+ } else if (const auto *OASE = dyn_cast<OMPArraySectionExpr>(
SI->getAssociatedExpression())) {
- auto *E = OASE->getBase()->IgnoreParenImpCasts();
+ const Expr *E = OASE->getBase()->IgnoreParenImpCasts();
Type =
OMPArraySectionExpr::getBaseOriginalType(E).getCanonicalType();
}
if (Type.isNull() || Type->isAnyPointerType() ||
- CheckArrayExpressionDoesNotReferToWholeSize(
+ checkArrayExpressionDoesNotReferToWholeSize(
SemaRef, SI->getAssociatedExpression(), Type))
break;
}
@@ -11797,9 +12061,9 @@ static bool CheckMapConflicts(
// other, it means they are sharing storage.
if (CI == CE && SI == SE) {
if (CurrentRegionOnly) {
- if (CKind == OMPC_map)
+ if (CKind == OMPC_map) {
SemaRef.Diag(ELoc, diag::err_omp_map_shared_storage) << ERange;
- else {
+ } else {
assert(CKind == OMPC_to || CKind == OMPC_from);
SemaRef.Diag(ELoc, diag::err_omp_once_referenced_in_target_update)
<< ERange;
@@ -11807,12 +12071,11 @@ static bool CheckMapConflicts(
SemaRef.Diag(RE->getExprLoc(), diag::note_used_here)
<< RE->getSourceRange();
return true;
- } else {
- // If we find the same expression in the enclosing data environment,
- // that is legal.
- IsEnclosedByDataEnvironmentExpr = true;
- return false;
}
+ // If we find the same expression in the enclosing data environment,
+ // that is legal.
+ IsEnclosedByDataEnvironmentExpr = true;
+ return false;
}
QualType DerivedType =
@@ -11842,14 +12105,21 @@ static bool CheckMapConflicts(
DerivedLoc,
diag::err_omp_pointer_mapped_along_with_derived_section)
<< DerivedLoc;
- } else {
+ SemaRef.Diag(RE->getExprLoc(), diag::note_used_here)
+ << RE->getSourceRange();
+ return true;
+ }
+ if (CI->getAssociatedExpression()->getStmtClass() !=
+ SI->getAssociatedExpression()->getStmtClass() ||
+ CI->getAssociatedDeclaration()->getCanonicalDecl() ==
+ SI->getAssociatedDeclaration()->getCanonicalDecl()) {
assert(CI != CE && SI != SE);
- SemaRef.Diag(DerivedLoc, diag::err_omp_same_pointer_derreferenced)
+ SemaRef.Diag(DerivedLoc, diag::err_omp_same_pointer_dereferenced)
<< DerivedLoc;
+ SemaRef.Diag(RE->getExprLoc(), diag::note_used_here)
+ << RE->getSourceRange();
+ return true;
}
- SemaRef.Diag(RE->getExprLoc(), diag::note_used_here)
- << RE->getSourceRange();
- return true;
}
// OpenMP 4.5 [2.15.5.1, map Clause, Restrictions, p.4]
@@ -11858,9 +12128,9 @@ static bool CheckMapConflicts(
//
// An expression is a subset of the other.
if (CurrentRegionOnly && (CI == CE || SI == SE)) {
- if (CKind == OMPC_map)
+ if (CKind == OMPC_map) {
SemaRef.Diag(ELoc, diag::err_omp_map_shared_storage) << ERange;
- else {
+ } else {
assert(CKind == OMPC_to || CKind == OMPC_from);
SemaRef.Diag(ELoc, diag::err_omp_once_referenced_in_target_update)
<< ERange;
@@ -11912,7 +12182,7 @@ static bool CheckMapConflicts(
namespace {
// Utility struct that gathers all the related lists associated with a mappable
// expression.
-struct MappableVarListInfo final {
+struct MappableVarListInfo {
// The list of expressions.
ArrayRef<Expr *> VarList;
// The list of processed expressions.
@@ -11952,11 +12222,11 @@ checkMappableExpressionList(Sema &SemaRef, DSAStackTy *DSAS,
// In the end we should have the same amount of declarations and component
// lists.
- for (auto &RE : MVLI.VarList) {
+ for (Expr *RE : MVLI.VarList) {
assert(RE && "Null expr in omp to/from/map clause");
SourceLocation ELoc = RE->getExprLoc();
- auto *VE = RE->IgnoreParenLValueCasts();
+ const Expr *VE = RE->IgnoreParenLValueCasts();
if (VE->isValueDependent() || VE->isTypeDependent() ||
VE->isInstantiationDependent() ||
@@ -11967,7 +12237,7 @@ checkMappableExpressionList(Sema &SemaRef, DSAStackTy *DSAS,
continue;
}
- auto *SimpleExpr = RE->IgnoreParenCasts();
+ Expr *SimpleExpr = RE->IgnoreParenCasts();
if (!RE->IgnoreParenImpCasts()->isLValue()) {
SemaRef.Diag(ELoc,
@@ -11981,8 +12251,8 @@ checkMappableExpressionList(Sema &SemaRef, DSAStackTy *DSAS,
// Obtain the array or member expression bases if required. Also, fill the
// components array with all the components identified in the process.
- auto *BE = CheckMapClauseExpressionBase(SemaRef, SimpleExpr, CurComponents,
- CKind, /*NoDiagnose=*/false);
+ const Expr *BE = checkMapClauseExpressionBase(
+ SemaRef, SimpleExpr, CurComponents, CKind, /*NoDiagnose=*/false);
if (!BE)
continue;
@@ -11999,7 +12269,7 @@ checkMappableExpressionList(Sema &SemaRef, DSAStackTy *DSAS,
"Expecting components to have associated only canonical declarations.");
auto *VD = dyn_cast<VarDecl>(CurDeclaration);
- auto *FD = dyn_cast<FieldDecl>(CurDeclaration);
+ const auto *FD = dyn_cast<FieldDecl>(CurDeclaration);
assert((VD || FD) && "Only variables or fields are expected here!");
(void)FD;
@@ -12009,10 +12279,10 @@ checkMappableExpressionList(Sema &SemaRef, DSAStackTy *DSAS,
// OpenMP 4.5 [2.10.5, target update Construct]
// threadprivate variables cannot appear in a from clause.
if (VD && DSAS->isThreadPrivate(VD)) {
- auto DVar = DSAS->getTopDSA(VD, false);
+ DSAStackTy::DSAVarData DVar = DSAS->getTopDSA(VD, /*FromParent=*/false);
SemaRef.Diag(ELoc, diag::err_omp_threadprivate_in_clause)
<< getOpenMPClauseName(CKind);
- ReportOriginalDSA(SemaRef, DSAS, VD, DVar);
+ reportOriginalDsa(SemaRef, DSAS, VD, DVar);
continue;
}
@@ -12024,11 +12294,11 @@ checkMappableExpressionList(Sema &SemaRef, DSAStackTy *DSAS,
// with the current construct separately from the enclosing data
// environment, because the restrictions are different. We only have to
// check conflicts across regions for the map clauses.
- if (CheckMapConflicts(SemaRef, DSAS, CurDeclaration, SimpleExpr,
+ if (checkMapConflicts(SemaRef, DSAS, CurDeclaration, SimpleExpr,
/*CurrentRegionOnly=*/true, CurComponents, CKind))
break;
if (CKind == OMPC_map &&
- CheckMapConflicts(SemaRef, DSAS, CurDeclaration, SimpleExpr,
+ checkMapConflicts(SemaRef, DSAS, CurDeclaration, SimpleExpr,
/*CurrentRegionOnly=*/false, CurComponents, CKind))
break;
@@ -12036,13 +12306,20 @@ checkMappableExpressionList(Sema &SemaRef, DSAStackTy *DSAS,
// OpenMP 4.5 [2.15.5.1, map Clause, Restrictions, C++, p.1]
// If the type of a list item is a reference to a type T then the type will
// be considered to be T for all purposes of this clause.
- QualType Type = CurDeclaration->getType().getNonReferenceType();
+ auto I = llvm::find_if(
+ CurComponents,
+ [](const OMPClauseMappableExprCommon::MappableComponent &MC) {
+ return MC.getAssociatedDeclaration();
+ });
+ assert(I != CurComponents.end() && "Null decl on map clause.");
+ QualType Type =
+ I->getAssociatedDeclaration()->getType().getNonReferenceType();
// OpenMP 4.5 [2.10.5, target update Construct, Restrictions, p.4]
// A list item in a to or from clause must have a mappable type.
// OpenMP 4.5 [2.15.5.1, map Clause, Restrictions, p.9]
// A list item must have a mappable type.
- if (!CheckTypeMappable(VE->getExprLoc(), VE->getSourceRange(), SemaRef,
+ if (!checkTypeMappable(VE->getExprLoc(), VE->getSourceRange(), SemaRef,
DSAS, Type))
continue;
@@ -12078,18 +12355,14 @@ checkMappableExpressionList(Sema &SemaRef, DSAStackTy *DSAS,
// OpenMP 4.5 [2.15.5.1, Restrictions, p.3]
// A list item cannot appear in both a map clause and a data-sharing
// attribute clause on the same construct
- if ((DKind == OMPD_target || DKind == OMPD_target_teams ||
- DKind == OMPD_target_teams_distribute ||
- DKind == OMPD_target_teams_distribute_parallel_for ||
- DKind == OMPD_target_teams_distribute_parallel_for_simd ||
- DKind == OMPD_target_teams_distribute_simd) && VD) {
- auto DVar = DSAS->getTopDSA(VD, false);
+ if (VD && isOpenMPTargetExecutionDirective(DKind)) {
+ DSAStackTy::DSAVarData DVar = DSAS->getTopDSA(VD, /*FromParent=*/false);
if (isOpenMPPrivate(DVar.CKind)) {
SemaRef.Diag(ELoc, diag::err_omp_variable_in_given_clause_and_dsa)
<< getOpenMPClauseName(DVar.CKind)
<< getOpenMPClauseName(OMPC_map)
<< getOpenMPDirectiveName(DSAS->getCurrentDirective());
- ReportOriginalDSA(SemaRef, DSAS, CurDeclaration, DVar);
+ reportOriginalDsa(SemaRef, DSAS, CurDeclaration, DVar);
continue;
}
}
@@ -12190,14 +12463,14 @@ Sema::DeclGroupPtrTy Sema::ActOnOpenMPDeclareReductionDirectiveStart(
FilterLookupForScope(Lookup, DC, S, /*ConsiderLinkage=*/false,
/*AllowInlineNamespace=*/false);
llvm::DenseMap<OMPDeclareReductionDecl *, bool> UsedAsPrevious;
- auto Filter = Lookup.makeFilter();
+ LookupResult::Filter Filter = Lookup.makeFilter();
while (Filter.hasNext()) {
auto *PrevDecl = cast<OMPDeclareReductionDecl>(Filter.next());
if (InCompoundScope) {
auto I = UsedAsPrevious.find(PrevDecl);
if (I == UsedAsPrevious.end())
UsedAsPrevious[PrevDecl] = false;
- if (auto *D = PrevDecl->getPrevDeclInScope())
+ if (OMPDeclareReductionDecl *D = PrevDecl->getPrevDeclInScope())
UsedAsPrevious[D] = true;
}
PreviousRedeclTypes[PrevDecl->getType().getCanonicalType()] =
@@ -12205,7 +12478,7 @@ Sema::DeclGroupPtrTy Sema::ActOnOpenMPDeclareReductionDirectiveStart(
}
Filter.done();
if (InCompoundScope) {
- for (auto &PrevData : UsedAsPrevious) {
+ for (const auto &PrevData : UsedAsPrevious) {
if (!PrevData.second) {
PrevDRD = PrevData.first;
break;
@@ -12221,8 +12494,8 @@ Sema::DeclGroupPtrTy Sema::ActOnOpenMPDeclareReductionDirectiveStart(
PrevDRDInScope = PrevDRDInScope->getPrevDeclInScope();
} while (PrevDRDInScope != nullptr);
}
- for (auto &TyData : ReductionTypes) {
- auto I = PreviousRedeclTypes.find(TyData.first.getCanonicalType());
+ for (const auto &TyData : ReductionTypes) {
+ const auto I = PreviousRedeclTypes.find(TyData.first.getCanonicalType());
bool Invalid = false;
if (I != PreviousRedeclTypes.end()) {
Diag(TyData.second, diag::err_omp_declare_reduction_redefinition)
@@ -12251,7 +12524,7 @@ void Sema::ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D) {
// Enter new function scope.
PushFunctionScope();
- getCurFunction()->setHasBranchProtectedScope();
+ setFunctionHasBranchProtectedScope();
getCurFunction()->setHasOMPDeclareReductionCombiner();
if (S != nullptr)
@@ -12269,7 +12542,7 @@ void Sema::ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D) {
// reference. C lang does not support references, so pass all parameters as
// pointers.
// Create 'T omp_in;' variable.
- auto *OmpInParm =
+ VarDecl *OmpInParm =
buildVarDecl(*this, D->getLocation(), ReductionType, "omp_in");
// Create 'T* omp_parm;T omp_out;'. All references to 'omp_out' will
// be replaced by '*omp_parm' during codegen. This required because 'omp_out'
@@ -12277,7 +12550,7 @@ void Sema::ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D) {
// reference. C lang does not support references, so pass all parameters as
// pointers.
// Create 'T omp_out;' variable.
- auto *OmpOutParm =
+ VarDecl *OmpOutParm =
buildVarDecl(*this, D->getLocation(), ReductionType, "omp_out");
if (S != nullptr) {
PushOnScopeChains(OmpInParm, S);
@@ -12307,7 +12580,7 @@ VarDecl *Sema::ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D) {
// Enter new function scope.
PushFunctionScope();
- getCurFunction()->setHasBranchProtectedScope();
+ setFunctionHasBranchProtectedScope();
if (S != nullptr)
PushDeclContext(S, DRD);
@@ -12324,7 +12597,7 @@ VarDecl *Sema::ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D) {
// reference. C lang does not support references, so pass all parameters as
// pointers.
// Create 'T omp_priv;' variable.
- auto *OmpPrivParm =
+ VarDecl *OmpPrivParm =
buildVarDecl(*this, D->getLocation(), ReductionType, "omp_priv");
// Create 'T* omp_parm;T omp_orig;'. All references to 'omp_orig' will
// be replaced by '*omp_parm' during codegen. This required because 'omp_orig'
@@ -12332,7 +12605,7 @@ VarDecl *Sema::ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D) {
// reference. C lang does not support references, so pass all parameters as
// pointers.
// Create 'T omp_orig;' variable.
- auto *OmpOrigParm =
+ VarDecl *OmpOrigParm =
buildVarDecl(*this, D->getLocation(), ReductionType, "omp_orig");
if (S != nullptr) {
PushOnScopeChains(OmpPrivParm, S);
@@ -12367,13 +12640,14 @@ void Sema::ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer,
Sema::DeclGroupPtrTy Sema::ActOnOpenMPDeclareReductionDirectiveEnd(
Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid) {
- for (auto *D : DeclReductions.get()) {
+ for (Decl *D : DeclReductions.get()) {
if (IsValid) {
- auto *DRD = cast<OMPDeclareReductionDecl>(D);
- if (S != nullptr)
- PushOnScopeChains(DRD, S, /*AddToContext=*/false);
- } else
+ if (S)
+ PushOnScopeChains(cast<OMPDeclareReductionDecl>(D), S,
+ /*AddToContext=*/false);
+ } else {
D->setInvalidDecl();
+ }
}
return DeclReductions;
}
@@ -12387,7 +12661,7 @@ OMPClause *Sema::ActOnOpenMPNumTeamsClause(Expr *NumTeams,
// OpenMP [teams Constrcut, Restrictions]
// The num_teams expression must evaluate to a positive integer value.
- if (!IsNonNegativeIntegerValue(ValExpr, *this, OMPC_num_teams,
+ if (!isNonNegativeIntegerValue(ValExpr, *this, OMPC_num_teams,
/*StrictlyPositive=*/true))
return nullptr;
@@ -12396,7 +12670,7 @@ OMPClause *Sema::ActOnOpenMPNumTeamsClause(Expr *NumTeams,
getOpenMPCaptureRegionForClause(DKind, OMPC_num_teams);
if (CaptureRegion != OMPD_unknown && !CurContext->isDependentContext()) {
ValExpr = MakeFullExpr(ValExpr).get();
- llvm::MapVector<Expr *, DeclRefExpr *> Captures;
+ llvm::MapVector<const Expr *, DeclRefExpr *> Captures;
ValExpr = tryBuildCapture(*this, ValExpr, Captures).get();
HelperValStmt = buildPreInits(Context, Captures);
}
@@ -12414,7 +12688,7 @@ OMPClause *Sema::ActOnOpenMPThreadLimitClause(Expr *ThreadLimit,
// OpenMP [teams Constrcut, Restrictions]
// The thread_limit expression must evaluate to a positive integer value.
- if (!IsNonNegativeIntegerValue(ValExpr, *this, OMPC_thread_limit,
+ if (!isNonNegativeIntegerValue(ValExpr, *this, OMPC_thread_limit,
/*StrictlyPositive=*/true))
return nullptr;
@@ -12423,7 +12697,7 @@ OMPClause *Sema::ActOnOpenMPThreadLimitClause(Expr *ThreadLimit,
getOpenMPCaptureRegionForClause(DKind, OMPC_thread_limit);
if (CaptureRegion != OMPD_unknown && !CurContext->isDependentContext()) {
ValExpr = MakeFullExpr(ValExpr).get();
- llvm::MapVector<Expr *, DeclRefExpr *> Captures;
+ llvm::MapVector<const Expr *, DeclRefExpr *> Captures;
ValExpr = tryBuildCapture(*this, ValExpr, Captures).get();
HelperValStmt = buildPreInits(Context, Captures);
}
@@ -12440,7 +12714,7 @@ OMPClause *Sema::ActOnOpenMPPriorityClause(Expr *Priority,
// OpenMP [2.9.1, task Constrcut]
// The priority-value is a non-negative numerical scalar expression.
- if (!IsNonNegativeIntegerValue(ValExpr, *this, OMPC_priority,
+ if (!isNonNegativeIntegerValue(ValExpr, *this, OMPC_priority,
/*StrictlyPositive=*/false))
return nullptr;
@@ -12456,7 +12730,7 @@ OMPClause *Sema::ActOnOpenMPGrainsizeClause(Expr *Grainsize,
// OpenMP [2.9.2, taskloop Constrcut]
// The parameter of the grainsize clause must be a positive integer
// expression.
- if (!IsNonNegativeIntegerValue(ValExpr, *this, OMPC_grainsize,
+ if (!isNonNegativeIntegerValue(ValExpr, *this, OMPC_grainsize,
/*StrictlyPositive=*/true))
return nullptr;
@@ -12472,7 +12746,7 @@ OMPClause *Sema::ActOnOpenMPNumTasksClause(Expr *NumTasks,
// OpenMP [2.9.2, taskloop Constrcut]
// The parameter of the num_tasks clause must be a positive integer
// expression.
- if (!IsNonNegativeIntegerValue(ValExpr, *this, OMPC_num_tasks,
+ if (!isNonNegativeIntegerValue(ValExpr, *this, OMPC_num_tasks,
/*StrictlyPositive=*/true))
return nullptr;
@@ -12534,7 +12808,7 @@ OMPClause *Sema::ActOnOpenMPDistScheduleClause(
OMPD_unknown &&
!CurContext->isDependentContext()) {
ValExpr = MakeFullExpr(ValExpr).get();
- llvm::MapVector<Expr *, DeclRefExpr *> Captures;
+ llvm::MapVector<const Expr *, DeclRefExpr *> Captures;
ValExpr = tryBuildCapture(*this, ValExpr, Captures).get();
HelperValStmt = buildPreInits(Context, Captures);
}
@@ -12599,7 +12873,6 @@ bool Sema::ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc) {
void Sema::ActOnFinishOpenMPDeclareTargetDirective() {
assert(IsInOpenMPDeclareTargetContext &&
"Unexpected ActOnFinishOpenMPDeclareTargetDirective");
-
IsInOpenMPDeclareTargetContext = false;
}
@@ -12634,9 +12907,8 @@ void Sema::ActOnOpenMPDeclareTargetName(Scope *CurScope,
if (isa<VarDecl>(ND) || isa<FunctionDecl>(ND)) {
if (!SameDirectiveDecls.insert(cast<NamedDecl>(ND->getCanonicalDecl())))
Diag(Id.getLoc(), diag::err_omp_declare_target_multiple) << Id.getName();
-
if (!ND->hasAttr<OMPDeclareTargetDeclAttr>()) {
- Attr *A = OMPDeclareTargetDeclAttr::CreateImplicit(Context, MT);
+ auto *A = OMPDeclareTargetDeclAttr::CreateImplicit(Context, MT);
ND->addAttr(A);
if (ASTMutationListener *ML = Context.getASTMutationListener())
ML->DeclarationMarkedOpenMPDeclareTarget(ND, A);
@@ -12645,15 +12917,16 @@ void Sema::ActOnOpenMPDeclareTargetName(Scope *CurScope,
Diag(Id.getLoc(), diag::err_omp_declare_target_to_and_link)
<< Id.getName();
}
- } else
+ } else {
Diag(Id.getLoc(), diag::err_omp_invalid_target_decl) << Id.getName();
+ }
}
static void checkDeclInTargetContext(SourceLocation SL, SourceRange SR,
Sema &SemaRef, Decl *D) {
if (!D)
return;
- Decl *LD = nullptr;
+ const Decl *LD = nullptr;
if (isa<TagDecl>(D)) {
LD = cast<TagDecl>(D)->getDefinition();
} else if (isa<VarDecl>(D)) {
@@ -12662,56 +12935,63 @@ static void checkDeclInTargetContext(SourceLocation SL, SourceRange SR,
// If this is an implicit variable that is legal and we do not need to do
// anything.
if (cast<VarDecl>(D)->isImplicit()) {
- Attr *A = OMPDeclareTargetDeclAttr::CreateImplicit(
+ auto *A = OMPDeclareTargetDeclAttr::CreateImplicit(
SemaRef.Context, OMPDeclareTargetDeclAttr::MT_To);
D->addAttr(A);
if (ASTMutationListener *ML = SemaRef.Context.getASTMutationListener())
ML->DeclarationMarkedOpenMPDeclareTarget(D, A);
return;
}
-
- } else if (isa<FunctionDecl>(D)) {
+ } else if (const auto *F = dyn_cast<FunctionDecl>(D)) {
const FunctionDecl *FD = nullptr;
- if (cast<FunctionDecl>(D)->hasBody(FD))
- LD = const_cast<FunctionDecl *>(FD);
-
- // If the definition is associated with the current declaration in the
- // target region (it can be e.g. a lambda) that is legal and we do not need
- // to do anything else.
- if (LD == D) {
- Attr *A = OMPDeclareTargetDeclAttr::CreateImplicit(
- SemaRef.Context, OMPDeclareTargetDeclAttr::MT_To);
- D->addAttr(A);
- if (ASTMutationListener *ML = SemaRef.Context.getASTMutationListener())
- ML->DeclarationMarkedOpenMPDeclareTarget(D, A);
- return;
+ if (cast<FunctionDecl>(D)->hasBody(FD)) {
+ LD = FD;
+ // If the definition is associated with the current declaration in the
+ // target region (it can be e.g. a lambda) that is legal and we do not
+ // need to do anything else.
+ if (LD == D) {
+ auto *A = OMPDeclareTargetDeclAttr::CreateImplicit(
+ SemaRef.Context, OMPDeclareTargetDeclAttr::MT_To);
+ D->addAttr(A);
+ if (ASTMutationListener *ML = SemaRef.Context.getASTMutationListener())
+ ML->DeclarationMarkedOpenMPDeclareTarget(D, A);
+ return;
+ }
+ } else if (F->isFunctionTemplateSpecialization() &&
+ F->getTemplateSpecializationKind() ==
+ TSK_ImplicitInstantiation) {
+ // Check if the function is implicitly instantiated from the template
+ // defined in the declare target region.
+ const FunctionTemplateDecl *FTD = F->getPrimaryTemplate();
+ if (FTD && FTD->hasAttr<OMPDeclareTargetDeclAttr>())
+ return;
}
}
if (!LD)
LD = D;
if (LD && !LD->hasAttr<OMPDeclareTargetDeclAttr>() &&
- (isa<VarDecl>(LD) || isa<FunctionDecl>(LD))) {
+ ((isa<VarDecl>(LD) && !isa<ParmVarDecl>(LD)) || isa<FunctionDecl>(LD))) {
// Outlined declaration is not declared target.
- if (LD->isOutOfLine()) {
- SemaRef.Diag(LD->getLocation(), diag::warn_omp_not_in_target_context);
- SemaRef.Diag(SL, diag::note_used_here) << SR;
- } else {
- DeclContext *DC = LD->getDeclContext();
- while (DC) {
- if (isa<FunctionDecl>(DC) &&
- cast<FunctionDecl>(DC)->hasAttr<OMPDeclareTargetDeclAttr>())
- break;
- DC = DC->getParent();
- }
- if (DC)
- return;
+ if (!isa<FunctionDecl>(LD)) {
+ if (LD->isOutOfLine()) {
+ SemaRef.Diag(LD->getLocation(), diag::warn_omp_not_in_target_context);
+ SemaRef.Diag(SL, diag::note_used_here) << SR;
+ } else {
+ const DeclContext *DC = LD->getDeclContext();
+ while (DC &&
+ (!isa<FunctionDecl>(DC) ||
+ !cast<FunctionDecl>(DC)->hasAttr<OMPDeclareTargetDeclAttr>()))
+ DC = DC->getParent();
+ if (DC)
+ return;
- // Is not declared in target context.
- SemaRef.Diag(LD->getLocation(), diag::warn_omp_not_in_target_context);
- SemaRef.Diag(SL, diag::note_used_here) << SR;
+ // Is not declared in target context.
+ SemaRef.Diag(LD->getLocation(), diag::warn_omp_not_in_target_context);
+ SemaRef.Diag(SL, diag::note_used_here) << SR;
+ }
}
// Mark decl as declared target to prevent further diagnostic.
- Attr *A = OMPDeclareTargetDeclAttr::CreateImplicit(
+ auto *A = OMPDeclareTargetDeclAttr::CreateImplicit(
SemaRef.Context, OMPDeclareTargetDeclAttr::MT_To);
D->addAttr(A);
if (ASTMutationListener *ML = SemaRef.Context.getASTMutationListener())
@@ -12722,11 +13002,9 @@ static void checkDeclInTargetContext(SourceLocation SL, SourceRange SR,
static bool checkValueDeclInTarget(SourceLocation SL, SourceRange SR,
Sema &SemaRef, DSAStackTy *Stack,
ValueDecl *VD) {
- if (VD->hasAttr<OMPDeclareTargetDeclAttr>())
- return true;
- if (!CheckTypeMappable(SL, SR, SemaRef, Stack, VD->getType()))
- return false;
- return true;
+ return VD->hasAttr<OMPDeclareTargetDeclAttr>() ||
+ checkTypeMappable(SL, SR, SemaRef, Stack, VD->getType(),
+ /*FullCheck=*/false);
}
void Sema::checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D,
@@ -12735,22 +13013,27 @@ void Sema::checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D,
return;
SourceRange SR = E ? E->getSourceRange() : D->getSourceRange();
SourceLocation SL = E ? E->getLocStart() : D->getLocation();
- // 2.10.6: threadprivate variable cannot appear in a declare target directive.
- if (VarDecl *VD = dyn_cast<VarDecl>(D)) {
+ if (auto *VD = dyn_cast<VarDecl>(D)) {
+ // Only global variables can be marked as declare target.
+ if (VD->isLocalVarDeclOrParm())
+ return;
+ // 2.10.6: threadprivate variable cannot appear in a declare target
+ // directive.
if (DSAStack->isThreadPrivate(VD)) {
Diag(SL, diag::err_omp_threadprivate_in_target);
- ReportOriginalDSA(*this, DSAStack, VD, DSAStack->getTopDSA(VD, false));
+ reportOriginalDsa(*this, DSAStack, VD, DSAStack->getTopDSA(VD, false));
return;
}
}
- if (ValueDecl *VD = dyn_cast<ValueDecl>(D)) {
+ if (auto *VD = dyn_cast<ValueDecl>(D)) {
// Problem if any with var declared with incomplete type will be reported
// as normal, so no need to check it here.
if ((E || !VD->getType()->isIncompleteType()) &&
!checkValueDeclInTarget(SL, SR, *this, DSAStack, VD)) {
// Mark decl as declared target to prevent further diagnostic.
- if (isa<VarDecl>(VD) || isa<FunctionDecl>(VD)) {
- Attr *A = OMPDeclareTargetDeclAttr::CreateImplicit(
+ if (isa<VarDecl>(VD) || isa<FunctionDecl>(VD) ||
+ isa<FunctionTemplateDecl>(VD)) {
+ auto *A = OMPDeclareTargetDeclAttr::CreateImplicit(
Context, OMPDeclareTargetDeclAttr::MT_To);
VD->addAttr(A);
if (ASTMutationListener *ML = Context.getASTMutationListener())
@@ -12759,7 +13042,7 @@ void Sema::checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D,
return;
}
}
- if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
if (FD->hasAttr<OMPDeclareTargetDeclAttr>() &&
(FD->getAttr<OMPDeclareTargetDeclAttr>()->getMapType() ==
OMPDeclareTargetDeclAttr::MT_Link)) {
@@ -12769,11 +13052,22 @@ void Sema::checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D,
return;
}
}
+ if (const auto *FTD = dyn_cast<FunctionTemplateDecl>(D)) {
+ if (FTD->hasAttr<OMPDeclareTargetDeclAttr>() &&
+ (FTD->getAttr<OMPDeclareTargetDeclAttr>()->getMapType() ==
+ OMPDeclareTargetDeclAttr::MT_Link)) {
+ assert(IdLoc.isValid() && "Source location is expected");
+ Diag(IdLoc, diag::err_omp_function_in_link_clause);
+ Diag(FTD->getLocation(), diag::note_defined_here) << FTD;
+ return;
+ }
+ }
if (!E) {
// Checking declaration inside declare target region.
if (!D->hasAttr<OMPDeclareTargetDeclAttr>() &&
- (isa<VarDecl>(D) || isa<FunctionDecl>(D))) {
- Attr *A = OMPDeclareTargetDeclAttr::CreateImplicit(
+ (isa<VarDecl>(D) || isa<FunctionDecl>(D) ||
+ isa<FunctionTemplateDecl>(D))) {
+ auto *A = OMPDeclareTargetDeclAttr::CreateImplicit(
Context, OMPDeclareTargetDeclAttr::MT_To);
D->addAttr(A);
if (ASTMutationListener *ML = Context.getASTMutationListener())
@@ -12820,7 +13114,7 @@ OMPClause *Sema::ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList,
SmallVector<Expr *, 8> PrivateCopies;
SmallVector<Expr *, 8> Inits;
- for (auto &RefExpr : VarList) {
+ for (Expr *RefExpr : VarList) {
assert(RefExpr && "NULL expr in OpenMP use_device_ptr clause.");
SourceLocation ELoc;
SourceRange ERange;
@@ -12849,20 +13143,22 @@ OMPClause *Sema::ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList,
}
// Build the private variable and the expression that refers to it.
- auto VDPrivate = buildVarDecl(*this, ELoc, Type, D->getName(),
- D->hasAttrs() ? &D->getAttrs() : nullptr);
+ auto VDPrivate =
+ buildVarDecl(*this, ELoc, Type, D->getName(),
+ D->hasAttrs() ? &D->getAttrs() : nullptr,
+ VD ? cast<DeclRefExpr>(SimpleRefExpr) : nullptr);
if (VDPrivate->isInvalidDecl())
continue;
CurContext->addDecl(VDPrivate);
- auto VDPrivateRefExpr = buildDeclRefExpr(
+ DeclRefExpr *VDPrivateRefExpr = buildDeclRefExpr(
*this, VDPrivate, RefExpr->getType().getUnqualifiedType(), ELoc);
// Add temporary variable to initialize the private copy of the pointer.
- auto *VDInit =
+ VarDecl *VDInit =
buildVarDecl(*this, RefExpr->getExprLoc(), Type, ".devptr.temp");
- auto *VDInitRefExpr = buildDeclRefExpr(*this, VDInit, RefExpr->getType(),
- RefExpr->getExprLoc());
+ DeclRefExpr *VDInitRefExpr = buildDeclRefExpr(
+ *this, VDInit, RefExpr->getType(), RefExpr->getExprLoc());
AddInitializerToDecl(VDPrivate,
DefaultLvalueConversion(VDInitRefExpr).get(),
/*DirectInit=*/false);
@@ -12902,7 +13198,7 @@ OMPClause *Sema::ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList,
SourceLocation LParenLoc,
SourceLocation EndLoc) {
MappableVarListInfo MVLI(VarList);
- for (auto &RefExpr : VarList) {
+ for (Expr *RefExpr : VarList) {
assert(RefExpr && "NULL expr in OpenMP is_device_ptr clause.");
SourceLocation ELoc;
SourceRange ERange;
@@ -12927,17 +13223,17 @@ OMPClause *Sema::ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList,
// Check if the declaration in the clause does not show up in any data
// sharing attribute.
- auto DVar = DSAStack->getTopDSA(D, false);
+ DSAStackTy::DSAVarData DVar = DSAStack->getTopDSA(D, /*FromParent=*/false);
if (isOpenMPPrivate(DVar.CKind)) {
Diag(ELoc, diag::err_omp_variable_in_given_clause_and_dsa)
<< getOpenMPClauseName(DVar.CKind)
<< getOpenMPClauseName(OMPC_is_device_ptr)
<< getOpenMPDirectiveName(DSAStack->getCurrentDirective());
- ReportOriginalDSA(*this, DSAStack, D, DVar);
+ reportOriginalDsa(*this, DSAStack, D, DVar);
continue;
}
- Expr *ConflictExpr;
+ const Expr *ConflictExpr;
if (DSAStack->checkMappableExprComponentListsForDecl(
D, /*CurrentRegionOnly=*/true,
[&ConflictExpr](
diff --git a/lib/Sema/SemaOverload.cpp b/lib/Sema/SemaOverload.cpp
index 2144845f4dd3..08af485ef4c7 100644
--- a/lib/Sema/SemaOverload.cpp
+++ b/lib/Sema/SemaOverload.cpp
@@ -223,6 +223,7 @@ bool StandardConversionSequence::isPointerConversionToBool() const {
// a pointer.
if (getToType(1)->isBooleanType() &&
(getFromType()->isPointerType() ||
+ getFromType()->isMemberPointerType() ||
getFromType()->isObjCObjectPointerType() ||
getFromType()->isBlockPointerType() ||
getFromType()->isNullPtrType() ||
@@ -288,11 +289,11 @@ static const Expr *IgnoreNarrowingConversion(const Expr *Converted) {
/// value of the expression prior to the narrowing conversion.
/// \param ConstantType If this is an NK_Constant_Narrowing conversion, the
/// type of the expression prior to the narrowing conversion.
-NarrowingKind
-StandardConversionSequence::getNarrowingKind(ASTContext &Ctx,
- const Expr *Converted,
- APValue &ConstantValue,
- QualType &ConstantType) const {
+/// \param IgnoreFloatToIntegralConversion If true type-narrowing conversions
+/// from floating point types to integral types should be ignored.
+NarrowingKind StandardConversionSequence::getNarrowingKind(
+ ASTContext &Ctx, const Expr *Converted, APValue &ConstantValue,
+ QualType &ConstantType, bool IgnoreFloatToIntegralConversion) const {
assert(Ctx.getLangOpts().CPlusPlus && "narrowing check outside C++");
// C++11 [dcl.init.list]p7:
@@ -327,7 +328,10 @@ StandardConversionSequence::getNarrowingKind(ASTContext &Ctx,
FloatingIntegralConversion:
if (FromType->isRealFloatingType() && ToType->isIntegralType(Ctx)) {
return NK_Type_Narrowing;
- } else if (FromType->isIntegralType(Ctx) && ToType->isRealFloatingType()) {
+ } else if (FromType->isIntegralOrUnscopedEnumerationType() &&
+ ToType->isRealFloatingType()) {
+ if (IgnoreFloatToIntegralConversion)
+ return NK_Not_Narrowing;
llvm::APSInt IntConstantValue;
const Expr *Initializer = IgnoreNarrowingConversion(Converted);
assert(Initializer && "Unknown conversion expression");
@@ -580,7 +584,7 @@ namespace {
};
}
-/// \brief Convert from Sema's representation of template deduction information
+/// Convert from Sema's representation of template deduction information
/// to the form used in overload-candidate information.
DeductionFailureInfo
clang::MakeDeductionFailureInfo(ASTContext &Context,
@@ -625,6 +629,8 @@ clang::MakeDeductionFailureInfo(ASTContext &Context,
break;
}
+ case Sema::TDK_IncompletePack:
+ // FIXME: It's slightly wasteful to allocate two TemplateArguments for this.
case Sema::TDK_Inconsistent:
case Sema::TDK_Underqualified: {
// FIXME: Should allocate from normal heap so that we can free this later.
@@ -667,6 +673,7 @@ void DeductionFailureInfo::Destroy() {
case Sema::TDK_NonDependentConversionFailure:
break;
+ case Sema::TDK_IncompletePack:
case Sema::TDK_Inconsistent:
case Sema::TDK_Underqualified:
case Sema::TDK_DeducedMismatch:
@@ -716,6 +723,7 @@ TemplateParameter DeductionFailureInfo::getTemplateParameter() {
case Sema::TDK_InvalidExplicitArguments:
return TemplateParameter::getFromOpaqueValue(Data);
+ case Sema::TDK_IncompletePack:
case Sema::TDK_Inconsistent:
case Sema::TDK_Underqualified:
return static_cast<DFIParamWithArguments*>(Data)->Param;
@@ -736,6 +744,7 @@ TemplateArgumentList *DeductionFailureInfo::getTemplateArgumentList() {
case Sema::TDK_TooManyArguments:
case Sema::TDK_TooFewArguments:
case Sema::TDK_Incomplete:
+ case Sema::TDK_IncompletePack:
case Sema::TDK_InvalidExplicitArguments:
case Sema::TDK_Inconsistent:
case Sema::TDK_Underqualified:
@@ -773,6 +782,7 @@ const TemplateArgument *DeductionFailureInfo::getFirstArg() {
case Sema::TDK_NonDependentConversionFailure:
return nullptr;
+ case Sema::TDK_IncompletePack:
case Sema::TDK_Inconsistent:
case Sema::TDK_Underqualified:
case Sema::TDK_DeducedMismatch:
@@ -794,6 +804,7 @@ const TemplateArgument *DeductionFailureInfo::getSecondArg() {
case Sema::TDK_Invalid:
case Sema::TDK_InstantiationDepth:
case Sema::TDK_Incomplete:
+ case Sema::TDK_IncompletePack:
case Sema::TDK_TooManyArguments:
case Sema::TDK_TooFewArguments:
case Sema::TDK_InvalidExplicitArguments:
@@ -997,6 +1008,13 @@ Sema::CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &Old,
Match = *I;
return Ovl_Match;
}
+
+ // Builtins that have custom typechecking or have a reference should
+ // not be overloadable or redeclarable.
+ if (!getASTContext().canBuiltinBeRedeclared(OldF)) {
+ Match = *I;
+ return Ovl_NonFunction;
+ }
} else if (isa<UsingDecl>(OldD) || isa<UsingPackDecl>(OldD)) {
// We can overload with these, which can show up when doing
// redeclaration checks for UsingDecls.
@@ -1182,7 +1200,7 @@ bool Sema::IsOverload(FunctionDecl *New, FunctionDecl *Old,
return false;
}
-/// \brief Checks availability of the function depending on the current
+/// Checks availability of the function depending on the current
/// function context. Inside an unavailable function, unavailability is ignored.
///
/// \returns true if \arg FD is unavailable and current context is inside
@@ -1200,7 +1218,7 @@ bool Sema::isFunctionConsideredUnavailable(FunctionDecl *FD) {
return true;
}
-/// \brief Tries a user-defined conversion from From to ToType.
+/// Tries a user-defined conversion from From to ToType.
///
/// Produces an implicit conversion sequence for when a standard conversion
/// is not an option. See TryImplicitConversion for more information.
@@ -1412,7 +1430,7 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
return PerformImplicitConversion(From, ToType, ICS, Action);
}
-/// \brief Determine whether the conversion from FromType to ToType is a valid
+/// Determine whether the conversion from FromType to ToType is a valid
/// conversion that strips "noexcept" or "noreturn" off the nested function
/// type.
bool Sema::IsFunctionConversion(QualType FromType, QualType ToType,
@@ -1473,12 +1491,10 @@ bool Sema::IsFunctionConversion(QualType FromType, QualType ToType,
// Drop 'noexcept' if not present in target type.
if (const auto *FromFPT = dyn_cast<FunctionProtoType>(FromFn)) {
const auto *ToFPT = cast<FunctionProtoType>(ToFn);
- if (FromFPT->isNothrow(Context) && !ToFPT->isNothrow(Context)) {
+ if (FromFPT->isNothrow() && !ToFPT->isNothrow()) {
FromFn = cast<FunctionType>(
- Context.getFunctionType(FromFPT->getReturnType(),
- FromFPT->getParamTypes(),
- FromFPT->getExtProtoInfo().withExceptionSpec(
- FunctionProtoType::ExceptionSpecInfo()))
+ Context.getFunctionTypeWithExceptionSpec(QualType(FromFPT, 0),
+ EST_None)
.getTypePtr());
Changed = true;
}
@@ -1511,7 +1527,7 @@ bool Sema::IsFunctionConversion(QualType FromType, QualType ToType,
return true;
}
-/// \brief Determine whether the conversion from FromType to ToType is a valid
+/// Determine whether the conversion from FromType to ToType is a valid
/// vector conversion.
///
/// \param ICK Will be set to the vector conversion kind, if this is a vector
@@ -1766,8 +1782,8 @@ static bool IsStandardConversion(Sema &S, Expr* From, QualType ToType,
(FromType == S.Context.LongDoubleTy &&
ToType == S.Context.Float128Ty));
if (Float128AndLongDouble &&
- (&S.Context.getFloatTypeSemantics(S.Context.LongDoubleTy) !=
- &llvm::APFloat::IEEEdouble()))
+ (&S.Context.getFloatTypeSemantics(S.Context.LongDoubleTy) ==
+ &llvm::APFloat::PPCDoubleDouble()))
return false;
}
// Floating point conversions (C++ 4.8).
@@ -1998,6 +2014,14 @@ bool Sema::IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType) {
isCompleteType(From->getLocStart(), FromType))
return Context.hasSameUnqualifiedType(
ToType, FromEnumType->getDecl()->getPromotionType());
+
+ // C++ [conv.prom]p5:
+ // If the bit-field has an enumerated type, it is treated as any other
+ // value of that type for promotion purposes.
+ //
+ // ... so do not fall through into the bit-field checks below in C++.
+ if (getLangOpts().CPlusPlus)
+ return false;
}
// C++0x [conv.prom]p2:
@@ -2045,6 +2069,11 @@ bool Sema::IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType) {
// other value of that type for promotion purposes (C++ 4.5p3).
// FIXME: We should delay checking of bit-fields until we actually perform the
// conversion.
+ //
+ // FIXME: In C, only bit-fields of types _Bool, int, or unsigned int may be
+ // promoted, per C11 6.3.1.1/2. We promote all bit-fields (including enum
+ // bit-fields and those whose underlying type is larger than int) for GCC
+ // compatibility.
if (From) {
if (FieldDecl *MemberDecl = From->getSourceBitField()) {
llvm::APSInt BitWidth;
@@ -2111,7 +2140,7 @@ bool Sema::IsFloatingPointPromotion(QualType FromType, QualType ToType) {
return false;
}
-/// \brief Determine if a conversion is a complex promotion.
+/// Determine if a conversion is a complex promotion.
///
/// A complex promotion is defined as a complex -> complex conversion
/// where the conversion between the underlying real types is a
@@ -2345,7 +2374,7 @@ bool Sema::IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
return false;
}
-/// \brief Adopt the given qualifiers for the given type.
+/// Adopt the given qualifiers for the given type.
static QualType AdoptQualifiers(ASTContext &Context, QualType T, Qualifiers Qs){
Qualifiers TQs = T.getQualifiers();
@@ -2533,7 +2562,7 @@ bool Sema::isObjCPointerConversion(QualType FromType, QualType ToType,
return false;
}
-/// \brief Determine whether this is an Objective-C writeback conversion,
+/// Determine whether this is an Objective-C writeback conversion,
/// used for parameter passing when performing automatic reference counting.
///
/// \param FromType The type we're converting form.
@@ -2593,7 +2622,7 @@ bool Sema::isObjCWritebackConversion(QualType FromType, QualType ToType,
IncompatibleObjC))
return false;
- /// \brief Construct the type we're converting to, which is a pointer to
+ /// Construct the type we're converting to, which is a pointer to
/// __autoreleasing pointee.
FromPointee = Context.getQualifiedType(FromPointee, FromQuals);
ConvertedType = Context.getPointerType(FromPointee);
@@ -2803,9 +2832,9 @@ void Sema::HandleFunctionTypeMismatch(PartialDiagnostic &PDiag,
// Handle exception specification differences on canonical type (in C++17
// onwards).
if (cast<FunctionProtoType>(FromFunction->getCanonicalTypeUnqualified())
- ->isNothrow(Context) !=
+ ->isNothrow() !=
cast<FunctionProtoType>(ToFunction->getCanonicalTypeUnqualified())
- ->isNothrow(Context)) {
+ ->isNothrow()) {
PDiag << ft_noexcept;
return;
}
@@ -3065,7 +3094,7 @@ Sema::IsQualificationConversion(QualType FromType, QualType ToType,
// in multi-level pointers, subject to the following rules: [...]
bool PreviousToQualsIncludeConst = true;
bool UnwrappedAnyPointer = false;
- while (Context.UnwrapSimilarPointerTypes(FromType, ToType)) {
+ while (Context.UnwrapSimilarTypes(FromType, ToType)) {
// Within each iteration of the loop, we check the qualifiers to
// determine if this still looks like a qualification
// conversion. Then, if all is well, we unwrap one more level of
@@ -3121,6 +3150,15 @@ Sema::IsQualificationConversion(QualType FromType, QualType ToType,
= PreviousToQualsIncludeConst && ToQuals.hasConst();
}
+ // Allows address space promotion by language rules implemented in
+ // Type::Qualifiers::isAddressSpaceSupersetOf.
+ Qualifiers FromQuals = FromType.getQualifiers();
+ Qualifiers ToQuals = ToType.getQualifiers();
+ if (!ToQuals.isAddressSpaceSupersetOf(FromQuals) &&
+ !FromQuals.isAddressSpaceSupersetOf(ToQuals)) {
+ return false;
+ }
+
// We are left with FromType and ToType being the pointee types
// after unwrapping the original FromType and ToType the same number
// of types. If we unwrapped any pointers, and if FromType and
@@ -3129,7 +3167,7 @@ Sema::IsQualificationConversion(QualType FromType, QualType ToType,
return UnwrappedAnyPointer && Context.hasSameUnqualifiedType(FromType,ToType);
}
-/// \brief - Determine whether this is a conversion from a scalar type to an
+/// - Determine whether this is a conversion from a scalar type to an
/// atomic type.
///
/// If successful, updates \c SCS's second and third steps in the conversion
@@ -3472,7 +3510,7 @@ Sema::DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType) {
return true;
}
-/// \brief Compare the user-defined conversion functions or constructors
+/// Compare the user-defined conversion functions or constructors
/// of two user-defined conversion sequences to determine whether any ordering
/// is possible.
static ImplicitConversionSequence::CompareKind
@@ -3620,16 +3658,6 @@ CompareImplicitConversionSequences(Sema &S, SourceLocation Loc,
return Result;
}
-static bool hasSimilarType(ASTContext &Context, QualType T1, QualType T2) {
- while (Context.UnwrapSimilarPointerTypes(T1, T2)) {
- Qualifiers Quals;
- T1 = Context.getUnqualifiedArrayType(T1, Quals);
- T2 = Context.getUnqualifiedArrayType(T2, Quals);
- }
-
- return Context.hasSameUnqualifiedType(T1, T2);
-}
-
// Per 13.3.3.2p3, compare the given standard conversion sequences to
// determine if one is a proper subset of the other.
static ImplicitConversionSequence::CompareKind
@@ -3653,7 +3681,7 @@ compareStandardConversionSubsets(ASTContext &Context,
Result = ImplicitConversionSequence::Worse;
else
return ImplicitConversionSequence::Indistinguishable;
- } else if (!hasSimilarType(Context, SCS1.getToType(1), SCS2.getToType(1)))
+ } else if (!Context.hasSimilarType(SCS1.getToType(1), SCS2.getToType(1)))
return ImplicitConversionSequence::Indistinguishable;
if (SCS1.Third == SCS2.Third) {
@@ -3674,7 +3702,7 @@ compareStandardConversionSubsets(ASTContext &Context,
return ImplicitConversionSequence::Indistinguishable;
}
-/// \brief Determine whether one of the given reference bindings is better
+/// Determine whether one of the given reference bindings is better
/// than the other based on what kind of bindings they are.
static bool
isBetterReferenceBindingKind(const StandardConversionSequence &SCS1,
@@ -3927,7 +3955,7 @@ CompareQualificationConversions(Sema &S,
: ImplicitConversionSequence::Better;
}
- while (S.Context.UnwrapSimilarPointerTypes(T1, T2)) {
+ while (S.Context.UnwrapSimilarTypes(T1, T2)) {
// Within each iteration of the loop, we check the qualifiers to
// determine if this still looks like a qualification
// conversion. Then, if all is well, we unwrap one more level of
@@ -4199,7 +4227,7 @@ CompareDerivedToBaseConversions(Sema &S, SourceLocation Loc,
return ImplicitConversionSequence::Indistinguishable;
}
-/// \brief Determine whether the given type is valid, e.g., it is not an invalid
+/// Determine whether the given type is valid, e.g., it is not an invalid
/// C++ class.
static bool isTypeValid(QualType T) {
if (CXXRecordDecl *Record = T->getAsCXXRecordDecl())
@@ -4302,7 +4330,7 @@ Sema::CompareReferenceRelationship(SourceLocation Loc,
return Ref_Related;
}
-/// \brief Look for a user-defined conversion to a value reference-compatible
+/// Look for a user-defined conversion to a value reference-compatible
/// with DeclType. Return true if something definite is found.
static bool
FindConversionForRefInit(Sema &S, ImplicitConversionSequence &ICS,
@@ -4429,7 +4457,7 @@ FindConversionForRefInit(Sema &S, ImplicitConversionSequence &ICS,
llvm_unreachable("Invalid OverloadResult!");
}
-/// \brief Compute an implicit conversion sequence for reference
+/// Compute an implicit conversion sequence for reference
/// initialization.
static ImplicitConversionSequence
TryReferenceInit(Sema &S, Expr *Init, QualType DeclType,
@@ -5137,6 +5165,13 @@ Sema::PerformObjectArgumentInitialization(Expr *From,
FromRecordType = From->getType();
DestType = ImplicitParamRecordType;
FromClassification = From->Classify(Context);
+
+ // When performing member access on an rvalue, materialize a temporary.
+ if (From->isRValue()) {
+ From = CreateMaterializeTemporaryExpr(FromRecordType, From,
+ Method->getRefQualifier() !=
+ RefQualifierKind::RQ_RValue);
+ }
}
// Note that we always use the true parent context when performing
@@ -5145,7 +5180,8 @@ Sema::PerformObjectArgumentInitialization(Expr *From,
*this, From->getLocStart(), From->getType(), FromClassification, Method,
Method->getParent());
if (ICS.isBad()) {
- if (ICS.Bad.Kind == BadConversionSequence::bad_qualifiers) {
+ switch (ICS.Bad.Kind) {
+ case BadConversionSequence::bad_qualifiers: {
Qualifiers FromQs = FromRecordType.getQualifiers();
Qualifiers ToQs = DestType.getQualifiers();
unsigned CVR = FromQs.getCVRQualifiers() & ~ToQs.getCVRQualifiers();
@@ -5158,10 +5194,28 @@ Sema::PerformObjectArgumentInitialization(Expr *From,
<< Method->getDeclName();
return ExprError();
}
+ break;
+ }
+
+ case BadConversionSequence::lvalue_ref_to_rvalue:
+ case BadConversionSequence::rvalue_ref_to_lvalue: {
+ bool IsRValueQualified =
+ Method->getRefQualifier() == RefQualifierKind::RQ_RValue;
+ Diag(From->getLocStart(), diag::err_member_function_call_bad_ref)
+ << Method->getDeclName() << FromClassification.isRValue()
+ << IsRValueQualified;
+ Diag(Method->getLocation(), diag::note_previous_decl)
+ << Method->getDeclName();
+ return ExprError();
+ }
+
+ case BadConversionSequence::no_conversion:
+ case BadConversionSequence::unrelated_class:
+ break;
}
return Diag(From->getLocStart(),
- diag::err_implicit_object_parameter_init)
+ diag::err_member_function_call_bad_type)
<< ImplicitParamRecordType << FromRecordType << From->getSourceRange();
}
@@ -5380,10 +5434,11 @@ static ExprResult CheckConvertedConstantExpression(Sema &S, Expr *From,
SmallVector<PartialDiagnosticAt, 8> Notes;
Expr::EvalResult Eval;
Eval.Diag = &Notes;
+ Expr::ConstExprUsage Usage = CCE == Sema::CCEK_TemplateArg
+ ? Expr::EvaluateForMangling
+ : Expr::EvaluateForCodeGen;
- if ((T->isReferenceType()
- ? !Result.get()->EvaluateAsLValue(Eval, S.Context)
- : !Result.get()->EvaluateAsRValue(Eval, S.Context)) ||
+ if (!Result.get()->EvaluateAsConstantExpr(Eval, Usage, S.Context) ||
(RequireInt && !Eval.Val.isInt())) {
// The expression can't be folded, so we can't keep it at this position in
// the AST.
@@ -5622,7 +5677,7 @@ collectViableConversionCandidates(Sema &SemaRef, Expr *From, QualType ToType,
}
}
-/// \brief Attempt to convert the given expression to a type which is accepted
+/// Attempt to convert the given expression to a type which is accepted
/// by the given converter.
///
/// This routine will attempt to convert an expression of class type to a
@@ -5940,6 +5995,13 @@ Sema::AddOverloadCandidate(FunctionDecl *Function,
Candidate.IgnoreObjectArgument = false;
Candidate.ExplicitCallArguments = Args.size();
+ if (Function->isMultiVersion() && Function->hasAttr<TargetAttr>() &&
+ !Function->getAttr<TargetAttr>()->isDefaultVersion()) {
+ Candidate.Viable = false;
+ Candidate.FailureKind = ovl_non_default_multiversion_function;
+ return;
+ }
+
if (Constructor) {
// C++ [class.copy]p3:
// A member function template is never instantiated to perform the copy
@@ -6220,12 +6282,17 @@ convertArgsForAvailabilityChecks(Sema &S, FunctionDecl *Function, Expr *ThisArg,
if (!Function->isVariadic() && Args.size() < Function->getNumParams()) {
for (unsigned i = Args.size(), e = Function->getNumParams(); i != e; ++i) {
ParmVarDecl *P = Function->getParamDecl(i);
- ExprResult R = S.PerformCopyInitialization(
- InitializedEntity::InitializeParameter(S.Context,
- Function->getParamDecl(i)),
- SourceLocation(),
- P->hasUninstantiatedDefaultArg() ? P->getUninstantiatedDefaultArg()
- : P->getDefaultArg());
+ Expr *DefArg = P->hasUninstantiatedDefaultArg()
+ ? P->getUninstantiatedDefaultArg()
+ : P->getDefaultArg();
+ // This can only happen in code completion, i.e. when PartialOverloading
+ // is true.
+ if (!DefArg)
+ return false;
+ ExprResult R =
+ S.PerformCopyInitialization(InitializedEntity::InitializeParameter(
+ S.Context, Function->getParamDecl(i)),
+ SourceLocation(), DefArg);
if (R.isInvalid())
return false;
ConvertedArgs.push_back(R.get());
@@ -6336,67 +6403,65 @@ bool Sema::diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND,
});
}
-/// \brief Add all of the function declarations in the given function set to
+/// Add all of the function declarations in the given function set to
/// the overload candidate set.
void Sema::AddFunctionCandidates(const UnresolvedSetImpl &Fns,
ArrayRef<Expr *> Args,
- OverloadCandidateSet& CandidateSet,
+ OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs,
bool SuppressUserConversions,
bool PartialOverloading,
bool FirstArgumentIsBase) {
for (UnresolvedSetIterator F = Fns.begin(), E = Fns.end(); F != E; ++F) {
NamedDecl *D = F.getDecl()->getUnderlyingDecl();
- if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
- ArrayRef<Expr *> FunctionArgs = Args;
- if (isa<CXXMethodDecl>(FD) && !cast<CXXMethodDecl>(FD)->isStatic()) {
- QualType ObjectType;
- Expr::Classification ObjectClassification;
- if (Args.size() > 0) {
- if (Expr *E = Args[0]) {
- // Use the explit base to restrict the lookup:
- ObjectType = E->getType();
- ObjectClassification = E->Classify(Context);
- } // .. else there is an implit base.
- FunctionArgs = Args.slice(1);
- }
- AddMethodCandidate(cast<CXXMethodDecl>(FD), F.getPair(),
- cast<CXXMethodDecl>(FD)->getParent(), ObjectType,
- ObjectClassification, FunctionArgs, CandidateSet,
- SuppressUserConversions, PartialOverloading);
- } else {
- // Slice the first argument (which is the base) when we access
- // static method as non-static
- if (Args.size() > 0 && (!Args[0] || (FirstArgumentIsBase && isa<CXXMethodDecl>(FD) &&
- !isa<CXXConstructorDecl>(FD)))) {
- assert(cast<CXXMethodDecl>(FD)->isStatic());
- FunctionArgs = Args.slice(1);
- }
- AddOverloadCandidate(FD, F.getPair(), FunctionArgs, CandidateSet,
- SuppressUserConversions, PartialOverloading);
- }
- } else {
- FunctionTemplateDecl *FunTmpl = cast<FunctionTemplateDecl>(D);
- if (isa<CXXMethodDecl>(FunTmpl->getTemplatedDecl()) &&
- !cast<CXXMethodDecl>(FunTmpl->getTemplatedDecl())->isStatic()) {
- QualType ObjectType;
- Expr::Classification ObjectClassification;
+ ArrayRef<Expr *> FunctionArgs = Args;
+
+ FunctionTemplateDecl *FunTmpl = dyn_cast<FunctionTemplateDecl>(D);
+ FunctionDecl *FD =
+ FunTmpl ? FunTmpl->getTemplatedDecl() : cast<FunctionDecl>(D);
+
+ if (isa<CXXMethodDecl>(FD) && !cast<CXXMethodDecl>(FD)->isStatic()) {
+ QualType ObjectType;
+ Expr::Classification ObjectClassification;
+ if (Args.size() > 0) {
if (Expr *E = Args[0]) {
- // Use the explit base to restrict the lookup:
+ // Use the explicit base to restrict the lookup:
ObjectType = E->getType();
ObjectClassification = E->Classify(Context);
- } // .. else there is an implit base.
+ } // .. else there is an implicit base.
+ FunctionArgs = Args.slice(1);
+ }
+ if (FunTmpl) {
AddMethodTemplateCandidate(
FunTmpl, F.getPair(),
cast<CXXRecordDecl>(FunTmpl->getDeclContext()),
ExplicitTemplateArgs, ObjectType, ObjectClassification,
- Args.slice(1), CandidateSet, SuppressUserConversions,
+ FunctionArgs, CandidateSet, SuppressUserConversions,
PartialOverloading);
} else {
- AddTemplateOverloadCandidate(FunTmpl, F.getPair(),
- ExplicitTemplateArgs, Args,
- CandidateSet, SuppressUserConversions,
- PartialOverloading);
+ AddMethodCandidate(cast<CXXMethodDecl>(FD), F.getPair(),
+ cast<CXXMethodDecl>(FD)->getParent(), ObjectType,
+ ObjectClassification, FunctionArgs, CandidateSet,
+ SuppressUserConversions, PartialOverloading);
+ }
+ } else {
+ // This branch handles both standalone functions and static methods.
+
+ // Slice the first argument (which is the base) when we access
+ // static method as non-static.
+ if (Args.size() > 0 &&
+ (!Args[0] || (FirstArgumentIsBase && isa<CXXMethodDecl>(FD) &&
+ !isa<CXXConstructorDecl>(FD)))) {
+ assert(cast<CXXMethodDecl>(FD)->isStatic());
+ FunctionArgs = Args.slice(1);
+ }
+ if (FunTmpl) {
+ AddTemplateOverloadCandidate(
+ FunTmpl, F.getPair(), ExplicitTemplateArgs, FunctionArgs,
+ CandidateSet, SuppressUserConversions, PartialOverloading);
+ } else {
+ AddOverloadCandidate(FD, F.getPair(), FunctionArgs, CandidateSet,
+ SuppressUserConversions, PartialOverloading);
}
}
}
@@ -6564,9 +6629,15 @@ Sema::AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl,
Candidate.DeductionFailure.Data = FailedAttr;
return;
}
+
+ if (Method->isMultiVersion() && Method->hasAttr<TargetAttr>() &&
+ !Method->getAttr<TargetAttr>()->isDefaultVersion()) {
+ Candidate.Viable = false;
+ Candidate.FailureKind = ovl_non_default_multiversion_function;
+ }
}
-/// \brief Add a C++ member function template as a candidate to the candidate
+/// Add a C++ member function template as a candidate to the candidate
/// set, using template argument deduction to produce an appropriate member
/// function template specialization.
void
@@ -6634,7 +6705,7 @@ Sema::AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl,
Conversions);
}
-/// \brief Add a C++ function template specialization as a candidate
+/// Add a C++ function template specialization as a candidate
/// in the candidate set, using template argument deduction to produce
/// an appropriate function template specialization.
void
@@ -6967,9 +7038,15 @@ Sema::AddConversionCandidate(CXXConversionDecl *Conversion,
Candidate.DeductionFailure.Data = FailedAttr;
return;
}
+
+ if (Conversion->isMultiVersion() && Conversion->hasAttr<TargetAttr>() &&
+ !Conversion->getAttr<TargetAttr>()->isDefaultVersion()) {
+ Candidate.Viable = false;
+ Candidate.FailureKind = ovl_non_default_multiversion_function;
+ }
}
-/// \brief Adds a conversion function template specialization
+/// Adds a conversion function template specialization
/// candidate to the overload set, using template argument deduction
/// to deduce the template arguments of the conversion function
/// template from the type that we are converting to (C++
@@ -7124,7 +7201,7 @@ void Sema::AddSurrogateCandidate(CXXConversionDecl *Conversion,
}
}
-/// \brief Add overload candidates for overloaded operators that are
+/// Add overload candidates for overloaded operators that are
/// member functions.
///
/// Add the overloaded operator candidates that are member functions
@@ -7260,18 +7337,18 @@ class BuiltinCandidateTypeSet {
/// used in the built-in candidates.
TypeSet EnumerationTypes;
- /// \brief The set of vector types that will be used in the built-in
+ /// The set of vector types that will be used in the built-in
/// candidates.
TypeSet VectorTypes;
- /// \brief A flag indicating non-record types are viable candidates
+ /// A flag indicating non-record types are viable candidates
bool HasNonRecordTypes;
- /// \brief A flag indicating whether either arithmetic or enumeration types
+ /// A flag indicating whether either arithmetic or enumeration types
/// were present in the candidate set.
bool HasArithmeticOrEnumeralTypes;
- /// \brief A flag indicating whether the nullptr type was present in the
+ /// A flag indicating whether the nullptr type was present in the
/// candidate set.
bool HasNullPtrType;
@@ -7524,7 +7601,7 @@ BuiltinCandidateTypeSet::AddTypesConvertedFrom(QualType Ty,
}
}
-/// \brief Helper function for AddBuiltinOperatorCandidates() that adds
+/// Helper function for AddBuiltinOperatorCandidates() that adds
/// the volatile- and non-volatile-qualified assignment operators for the
/// given type to the candidate set.
static void AddBuiltinAssignmentOperatorCandidates(Sema &S,
@@ -7602,7 +7679,7 @@ static Qualifiers CollectVRQualifiers(ASTContext &Context, Expr* ArgExpr) {
namespace {
-/// \brief Helper class to manage the addition of builtin operator overload
+/// Helper class to manage the addition of builtin operator overload
/// candidates. It provides shared state and utility methods used throughout
/// the process, as well as a helper method to add each group of builtin
/// operator overloads from the standard to a candidate set.
@@ -7658,6 +7735,8 @@ class BuiltinOperatorOverloadBuilder {
ArithmeticTypes.push_back(S.Context.BoolTy);
ArithmeticTypes.push_back(S.Context.CharTy);
ArithmeticTypes.push_back(S.Context.WCharTy);
+ if (S.Context.getLangOpts().Char8)
+ ArithmeticTypes.push_back(S.Context.Char8Ty);
ArithmeticTypes.push_back(S.Context.Char16Ty);
ArithmeticTypes.push_back(S.Context.Char32Ty);
ArithmeticTypes.push_back(S.Context.SignedCharTy);
@@ -7673,7 +7752,7 @@ class BuiltinOperatorOverloadBuilder {
"Enough inline storage for all arithmetic types.");
}
- /// \brief Helper method to factor out the common pattern of adding overloads
+ /// Helper method to factor out the common pattern of adding overloads
/// for '++' and '--' builtin operators.
void addPlusPlusMinusMinusStyleOverloads(QualType CandidateTy,
bool HasVolatile,
@@ -7733,11 +7812,13 @@ public:
InitArithmeticTypes();
}
+ // Increment is deprecated for bool since C++17.
+ //
// C++ [over.built]p3:
//
- // For every pair (T, VQ), where T is an arithmetic type, and VQ
- // is either volatile or empty, there exist candidate operator
- // functions of the form
+ // For every pair (T, VQ), where T is an arithmetic type other
+ // than bool, and VQ is either volatile or empty, there exist
+ // candidate operator functions of the form
//
// VQ T& operator++(VQ T&);
// T operator++(VQ T&, int);
@@ -7754,10 +7835,16 @@ public:
if (!HasArithmeticOrEnumeralCandidateType)
return;
- for (unsigned Arith = (Op == OO_PlusPlus? 0 : 1);
- Arith < NumArithmeticTypes; ++Arith) {
+ for (unsigned Arith = 0; Arith < NumArithmeticTypes; ++Arith) {
+ const auto TypeOfT = ArithmeticTypes[Arith];
+ if (TypeOfT == S.Context.BoolTy) {
+ if (Op == OO_MinusMinus)
+ continue;
+ if (Op == OO_PlusPlus && S.getLangOpts().CPlusPlus17)
+ continue;
+ }
addPlusPlusMinusMinusStyleOverloads(
- ArithmeticTypes[Arith],
+ TypeOfT,
VisibleTypeConversionsQuals.hasVolatile(),
VisibleTypeConversionsQuals.hasRestrict());
}
@@ -7929,7 +8016,8 @@ public:
// bool operator>=(T, T);
// bool operator==(T, T);
// bool operator!=(T, T);
- void addRelationalPointerOrEnumeralOverloads() {
+ // R operator<=>(T, T)
+ void addGenericBinaryPointerOrEnumeralOverloads() {
// C++ [over.match.oper]p3:
// [...]the built-in candidates include all of the candidate operator
// functions defined in 13.6 that, compared to the given operator, [...]
@@ -8002,7 +8090,6 @@ public:
UserDefinedBinaryOperators.count(std::make_pair(CanonType,
CanonType)))
continue;
-
QualType ParamTypes[2] = { *Enum, *Enum };
S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet);
}
@@ -8120,6 +8207,41 @@ public:
}
}
+ // C++2a [over.built]p14:
+ //
+ // For every integral type T there exists a candidate operator function
+ // of the form
+ //
+ // std::strong_ordering operator<=>(T, T)
+ //
+ // C++2a [over.built]p15:
+ //
+ // For every pair of floating-point types L and R, there exists a candidate
+ // operator function of the form
+ //
+ // std::partial_ordering operator<=>(L, R);
+ //
+ // FIXME: The current specification for integral types doesn't play nice with
+ // the direction of p0946r0, which allows mixed integral and unscoped-enum
+ // comparisons. Under the current spec this can lead to ambiguity during
+ // overload resolution. For example:
+ //
+ // enum A : int {a};
+ // auto x = (a <=> (long)42);
+ //
+ // error: call is ambiguous for arguments 'A' and 'long'.
+ // note: candidate operator<=>(int, int)
+ // note: candidate operator<=>(long, long)
+ //
+ // To avoid this error, this function deviates from the specification and adds
+ // the mixed overloads `operator<=>(L, R)` where L and R are promoted
+ // arithmetic types (the same as the generic relational overloads).
+ //
+ // For now this function acts as a placeholder.
+ void addThreeWayArithmeticOverloads() {
+ addGenericBinaryArithmeticOverloads();
+ }
+
// C++ [over.built]p17:
//
// For every pair of promoted integral types L and R, there
@@ -8688,12 +8810,14 @@ void Sema::AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
case OO_Greater:
case OO_LessEqual:
case OO_GreaterEqual:
- OpBuilder.addRelationalPointerOrEnumeralOverloads();
+ OpBuilder.addGenericBinaryPointerOrEnumeralOverloads();
OpBuilder.addGenericBinaryArithmeticOverloads();
break;
case OO_Spaceship:
- llvm_unreachable("<=> expressions not supported yet");
+ OpBuilder.addGenericBinaryPointerOrEnumeralOverloads();
+ OpBuilder.addThreeWayArithmeticOverloads();
+ break;
case OO_Percent:
case OO_Caret:
@@ -8764,7 +8888,7 @@ void Sema::AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
}
}
-/// \brief Add function candidates found via argument-dependent lookup
+/// Add function candidates found via argument-dependent lookup
/// to the set of overloading candidates.
///
/// This routine performs argument-dependent name lookup based on the
@@ -8870,6 +8994,47 @@ static Comparison compareEnableIfAttrs(const Sema &S, const FunctionDecl *Cand1,
return Cand1I == Cand1Attrs.end() ? Comparison::Equal : Comparison::Better;
}
+static bool isBetterMultiversionCandidate(const OverloadCandidate &Cand1,
+ const OverloadCandidate &Cand2) {
+ if (!Cand1.Function || !Cand1.Function->isMultiVersion() || !Cand2.Function ||
+ !Cand2.Function->isMultiVersion())
+ return false;
+
+ // If this is a cpu_dispatch/cpu_specific multiversion situation, prefer
+ // cpu_dispatch, else arbitrarily based on the identifiers.
+ bool Cand1CPUDisp = Cand1.Function->hasAttr<CPUDispatchAttr>();
+ bool Cand2CPUDisp = Cand2.Function->hasAttr<CPUDispatchAttr>();
+ const auto *Cand1CPUSpec = Cand1.Function->getAttr<CPUSpecificAttr>();
+ const auto *Cand2CPUSpec = Cand2.Function->getAttr<CPUSpecificAttr>();
+
+ if (!Cand1CPUDisp && !Cand2CPUDisp && !Cand1CPUSpec && !Cand2CPUSpec)
+ return false;
+
+ if (Cand1CPUDisp && !Cand2CPUDisp)
+ return true;
+ if (Cand2CPUDisp && !Cand1CPUDisp)
+ return false;
+
+ if (Cand1CPUSpec && Cand2CPUSpec) {
+ if (Cand1CPUSpec->cpus_size() != Cand2CPUSpec->cpus_size())
+ return Cand1CPUSpec->cpus_size() < Cand2CPUSpec->cpus_size();
+
+ std::pair<CPUSpecificAttr::cpus_iterator, CPUSpecificAttr::cpus_iterator>
+ FirstDiff = std::mismatch(
+ Cand1CPUSpec->cpus_begin(), Cand1CPUSpec->cpus_end(),
+ Cand2CPUSpec->cpus_begin(),
+ [](const IdentifierInfo *LHS, const IdentifierInfo *RHS) {
+ return LHS->getName() == RHS->getName();
+ });
+
+ assert(FirstDiff.first != Cand1CPUSpec->cpus_end() &&
+ "Two different cpu-specific versions should not have the same "
+ "identifier list, otherwise they'd be the same decl!");
+ return (*FirstDiff.first)->getName() < (*FirstDiff.second)->getName();
+ }
+ llvm_unreachable("No way to get here unless both had cpu_dispatch");
+}
+
/// isBetterOverloadCandidate - Determines whether the first overload
/// candidate is a better candidate than the second (C++ 13.3.3p1).
bool clang::isBetterOverloadCandidate(
@@ -9067,7 +9232,10 @@ bool clang::isBetterOverloadCandidate(
functionHasPassObjectSizeParams(Cand1.Function);
bool HasPS2 = Cand2.Function != nullptr &&
functionHasPassObjectSizeParams(Cand2.Function);
- return HasPS1 != HasPS2 && HasPS1;
+ if (HasPS1 != HasPS2 && HasPS1)
+ return true;
+
+ return isBetterMultiversionCandidate(Cand1, Cand2);
}
/// Determine whether two declarations are "equivalent" for the purposes of
@@ -9137,7 +9305,7 @@ void Sema::diagnoseEquivalentInternalLinkageDeclarations(
}
}
-/// \brief Computes the best viable function (C++ 13.3.3)
+/// Computes the best viable function (C++ 13.3.3)
/// within an overload candidate set.
///
/// \param Loc The location of the function name (or operator symbol) for
@@ -9228,66 +9396,77 @@ enum OverloadCandidateKind {
oc_function,
oc_method,
oc_constructor,
- oc_function_template,
- oc_method_template,
- oc_constructor_template,
oc_implicit_default_constructor,
oc_implicit_copy_constructor,
oc_implicit_move_constructor,
oc_implicit_copy_assignment,
oc_implicit_move_assignment,
- oc_inherited_constructor,
- oc_inherited_constructor_template
+ oc_inherited_constructor
+};
+
+enum OverloadCandidateSelect {
+ ocs_non_template,
+ ocs_template,
+ ocs_described_template,
};
-static OverloadCandidateKind
+static std::pair<OverloadCandidateKind, OverloadCandidateSelect>
ClassifyOverloadCandidate(Sema &S, NamedDecl *Found, FunctionDecl *Fn,
std::string &Description) {
- bool isTemplate = false;
+ bool isTemplate = Fn->isTemplateDecl() || Found->isTemplateDecl();
if (FunctionTemplateDecl *FunTmpl = Fn->getPrimaryTemplate()) {
isTemplate = true;
Description = S.getTemplateArgumentBindingsText(
- FunTmpl->getTemplateParameters(), *Fn->getTemplateSpecializationArgs());
+ FunTmpl->getTemplateParameters(), *Fn->getTemplateSpecializationArgs());
}
- if (CXXConstructorDecl *Ctor = dyn_cast<CXXConstructorDecl>(Fn)) {
- if (!Ctor->isImplicit()) {
- if (isa<ConstructorUsingShadowDecl>(Found))
- return isTemplate ? oc_inherited_constructor_template
- : oc_inherited_constructor;
- else
- return isTemplate ? oc_constructor_template : oc_constructor;
- }
+ OverloadCandidateSelect Select = [&]() {
+ if (!Description.empty())
+ return ocs_described_template;
+ return isTemplate ? ocs_template : ocs_non_template;
+ }();
- if (Ctor->isDefaultConstructor())
- return oc_implicit_default_constructor;
+ OverloadCandidateKind Kind = [&]() {
+ if (CXXConstructorDecl *Ctor = dyn_cast<CXXConstructorDecl>(Fn)) {
+ if (!Ctor->isImplicit()) {
+ if (isa<ConstructorUsingShadowDecl>(Found))
+ return oc_inherited_constructor;
+ else
+ return oc_constructor;
+ }
- if (Ctor->isMoveConstructor())
- return oc_implicit_move_constructor;
+ if (Ctor->isDefaultConstructor())
+ return oc_implicit_default_constructor;
- assert(Ctor->isCopyConstructor() &&
- "unexpected sort of implicit constructor");
- return oc_implicit_copy_constructor;
- }
+ if (Ctor->isMoveConstructor())
+ return oc_implicit_move_constructor;
- if (CXXMethodDecl *Meth = dyn_cast<CXXMethodDecl>(Fn)) {
- // This actually gets spelled 'candidate function' for now, but
- // it doesn't hurt to split it out.
- if (!Meth->isImplicit())
- return isTemplate ? oc_method_template : oc_method;
+ assert(Ctor->isCopyConstructor() &&
+ "unexpected sort of implicit constructor");
+ return oc_implicit_copy_constructor;
+ }
- if (Meth->isMoveAssignmentOperator())
- return oc_implicit_move_assignment;
+ if (CXXMethodDecl *Meth = dyn_cast<CXXMethodDecl>(Fn)) {
+ // This actually gets spelled 'candidate function' for now, but
+ // it doesn't hurt to split it out.
+ if (!Meth->isImplicit())
+ return oc_method;
- if (Meth->isCopyAssignmentOperator())
- return oc_implicit_copy_assignment;
+ if (Meth->isMoveAssignmentOperator())
+ return oc_implicit_move_assignment;
- assert(isa<CXXConversionDecl>(Meth) && "expected conversion");
- return oc_method;
- }
+ if (Meth->isCopyAssignmentOperator())
+ return oc_implicit_copy_assignment;
+
+ assert(isa<CXXConversionDecl>(Meth) && "expected conversion");
+ return oc_method;
+ }
- return isTemplate ? oc_function_template : oc_function;
+ return oc_function;
+ }();
+
+ return std::make_pair(Kind, Select);
}
void MaybeEmitInheritedConstructorNote(Sema &S, Decl *FoundDecl) {
@@ -9313,7 +9492,7 @@ static bool isFunctionAlwaysEnabled(const ASTContext &Ctx,
return true;
}
-/// \brief Returns true if we can take the address of the function.
+/// Returns true if we can take the address of the function.
///
/// \param Complain - If true, we'll emit a diagnostic
/// \param InOverloadResolution - For the purposes of emitting a diagnostic, are
@@ -9375,11 +9554,16 @@ void Sema::NoteOverloadCandidate(NamedDecl *Found, FunctionDecl *Fn,
QualType DestType, bool TakingAddress) {
if (TakingAddress && !checkAddressOfCandidateIsAvailable(*this, Fn))
return;
+ if (Fn->isMultiVersion() && Fn->hasAttr<TargetAttr>() &&
+ !Fn->getAttr<TargetAttr>()->isDefaultVersion())
+ return;
std::string FnDesc;
- OverloadCandidateKind K = ClassifyOverloadCandidate(*this, Found, Fn, FnDesc);
+ std::pair<OverloadCandidateKind, OverloadCandidateSelect> KSPair =
+ ClassifyOverloadCandidate(*this, Found, Fn, FnDesc);
PartialDiagnostic PD = PDiag(diag::note_ovl_candidate)
- << (unsigned) K << Fn << FnDesc;
+ << (unsigned)KSPair.first << (unsigned)KSPair.second
+ << Fn << FnDesc;
HandleFunctionTypeMismatch(PD, Fn->getType(), DestType);
Diag(Fn->getLocation(), PD);
@@ -9453,7 +9637,7 @@ static void DiagnoseBadConversion(Sema &S, OverloadCandidate *Cand,
}
std::string FnDesc;
- OverloadCandidateKind FnKind =
+ std::pair<OverloadCandidateKind, OverloadCandidateSelect> FnKindPair =
ClassifyOverloadCandidate(S, Cand->FoundDecl, Fn, FnDesc);
Expr *FromExpr = Conv.Bad.FromExpr;
@@ -9468,9 +9652,9 @@ static void DiagnoseBadConversion(Sema &S, OverloadCandidate *Cand,
DeclarationName Name = cast<OverloadExpr>(E)->getName();
S.Diag(Fn->getLocation(), diag::note_ovl_candidate_bad_overload)
- << (unsigned) FnKind << FnDesc
- << (FromExpr ? FromExpr->getSourceRange() : SourceRange())
- << ToTy << Name << I+1;
+ << (unsigned)FnKindPair.first << (unsigned)FnKindPair.second << FnDesc
+ << (FromExpr ? FromExpr->getSourceRange() : SourceRange()) << ToTy
+ << Name << I + 1;
MaybeEmitInheritedConstructorNote(S, Cand->FoundDecl);
return;
}
@@ -9497,43 +9681,38 @@ static void DiagnoseBadConversion(Sema &S, OverloadCandidate *Cand,
if (FromQs.getAddressSpace() != ToQs.getAddressSpace()) {
S.Diag(Fn->getLocation(), diag::note_ovl_candidate_bad_addrspace)
- << (unsigned) FnKind << FnDesc
- << (FromExpr ? FromExpr->getSourceRange() : SourceRange())
- << FromTy
- << FromQs.getAddressSpaceAttributePrintValue()
- << ToQs.getAddressSpaceAttributePrintValue()
- << (unsigned) isObjectArgument << I+1;
+ << (unsigned)FnKindPair.first << (unsigned)FnKindPair.second << FnDesc
+ << (FromExpr ? FromExpr->getSourceRange() : SourceRange()) << FromTy
+ << ToTy << (unsigned)isObjectArgument << I + 1;
MaybeEmitInheritedConstructorNote(S, Cand->FoundDecl);
return;
}
if (FromQs.getObjCLifetime() != ToQs.getObjCLifetime()) {
S.Diag(Fn->getLocation(), diag::note_ovl_candidate_bad_ownership)
- << (unsigned) FnKind << FnDesc
- << (FromExpr ? FromExpr->getSourceRange() : SourceRange())
- << FromTy
- << FromQs.getObjCLifetime() << ToQs.getObjCLifetime()
- << (unsigned) isObjectArgument << I+1;
+ << (unsigned)FnKindPair.first << (unsigned)FnKindPair.second << FnDesc
+ << (FromExpr ? FromExpr->getSourceRange() : SourceRange()) << FromTy
+ << FromQs.getObjCLifetime() << ToQs.getObjCLifetime()
+ << (unsigned)isObjectArgument << I + 1;
MaybeEmitInheritedConstructorNote(S, Cand->FoundDecl);
return;
}
if (FromQs.getObjCGCAttr() != ToQs.getObjCGCAttr()) {
S.Diag(Fn->getLocation(), diag::note_ovl_candidate_bad_gc)
- << (unsigned) FnKind << FnDesc
- << (FromExpr ? FromExpr->getSourceRange() : SourceRange())
- << FromTy
- << FromQs.getObjCGCAttr() << ToQs.getObjCGCAttr()
- << (unsigned) isObjectArgument << I+1;
+ << (unsigned)FnKindPair.first << (unsigned)FnKindPair.second << FnDesc
+ << (FromExpr ? FromExpr->getSourceRange() : SourceRange()) << FromTy
+ << FromQs.getObjCGCAttr() << ToQs.getObjCGCAttr()
+ << (unsigned)isObjectArgument << I + 1;
MaybeEmitInheritedConstructorNote(S, Cand->FoundDecl);
return;
}
if (FromQs.hasUnaligned() != ToQs.hasUnaligned()) {
S.Diag(Fn->getLocation(), diag::note_ovl_candidate_bad_unaligned)
- << (unsigned) FnKind << FnDesc
- << (FromExpr ? FromExpr->getSourceRange() : SourceRange())
- << FromTy << FromQs.hasUnaligned() << I+1;
+ << (unsigned)FnKindPair.first << (unsigned)FnKindPair.second << FnDesc
+ << (FromExpr ? FromExpr->getSourceRange() : SourceRange()) << FromTy
+ << FromQs.hasUnaligned() << I + 1;
MaybeEmitInheritedConstructorNote(S, Cand->FoundDecl);
return;
}
@@ -9543,14 +9722,14 @@ static void DiagnoseBadConversion(Sema &S, OverloadCandidate *Cand,
if (isObjectArgument) {
S.Diag(Fn->getLocation(), diag::note_ovl_candidate_bad_cvr_this)
- << (unsigned) FnKind << FnDesc
- << (FromExpr ? FromExpr->getSourceRange() : SourceRange())
- << FromTy << (CVR - 1);
+ << (unsigned)FnKindPair.first << (unsigned)FnKindPair.second << FnDesc
+ << (FromExpr ? FromExpr->getSourceRange() : SourceRange()) << FromTy
+ << (CVR - 1);
} else {
S.Diag(Fn->getLocation(), diag::note_ovl_candidate_bad_cvr)
- << (unsigned) FnKind << FnDesc
- << (FromExpr ? FromExpr->getSourceRange() : SourceRange())
- << FromTy << (CVR - 1) << I+1;
+ << (unsigned)FnKindPair.first << (unsigned)FnKindPair.second << FnDesc
+ << (FromExpr ? FromExpr->getSourceRange() : SourceRange()) << FromTy
+ << (CVR - 1) << I + 1;
}
MaybeEmitInheritedConstructorNote(S, Cand->FoundDecl);
return;
@@ -9560,9 +9739,9 @@ static void DiagnoseBadConversion(Sema &S, OverloadCandidate *Cand,
// telling the user that it has type void is not useful.
if (FromExpr && isa<InitListExpr>(FromExpr)) {
S.Diag(Fn->getLocation(), diag::note_ovl_candidate_bad_list_argument)
- << (unsigned) FnKind << FnDesc
- << (FromExpr ? FromExpr->getSourceRange() : SourceRange())
- << FromTy << ToTy << (unsigned) isObjectArgument << I+1;
+ << (unsigned)FnKindPair.first << (unsigned)FnKindPair.second << FnDesc
+ << (FromExpr ? FromExpr->getSourceRange() : SourceRange()) << FromTy
+ << ToTy << (unsigned)isObjectArgument << I + 1;
MaybeEmitInheritedConstructorNote(S, Cand->FoundDecl);
return;
}
@@ -9576,10 +9755,10 @@ static void DiagnoseBadConversion(Sema &S, OverloadCandidate *Cand,
if (TempFromTy->isIncompleteType()) {
// Emit the generic diagnostic and, optionally, add the hints to it.
S.Diag(Fn->getLocation(), diag::note_ovl_candidate_bad_conv_incomplete)
- << (unsigned) FnKind << FnDesc
- << (FromExpr ? FromExpr->getSourceRange() : SourceRange())
- << FromTy << ToTy << (unsigned) isObjectArgument << I+1
- << (unsigned) (Cand->Fix.Kind);
+ << (unsigned)FnKindPair.first << (unsigned)FnKindPair.second << FnDesc
+ << (FromExpr ? FromExpr->getSourceRange() : SourceRange()) << FromTy
+ << ToTy << (unsigned)isObjectArgument << I + 1
+ << (unsigned)(Cand->Fix.Kind);
MaybeEmitInheritedConstructorNote(S, Cand->FoundDecl);
return;
@@ -9617,21 +9796,19 @@ static void DiagnoseBadConversion(Sema &S, OverloadCandidate *Cand,
ToTy.getNonReferenceType().getCanonicalType() ==
FromTy.getNonReferenceType().getCanonicalType()) {
S.Diag(Fn->getLocation(), diag::note_ovl_candidate_bad_lvalue)
- << (unsigned) FnKind << FnDesc
- << (FromExpr ? FromExpr->getSourceRange() : SourceRange())
- << (unsigned) isObjectArgument << I + 1;
+ << (unsigned)FnKindPair.first << (unsigned)FnKindPair.second << FnDesc
+ << (unsigned)isObjectArgument << I + 1
+ << (FromExpr ? FromExpr->getSourceRange() : SourceRange());
MaybeEmitInheritedConstructorNote(S, Cand->FoundDecl);
return;
}
}
if (BaseToDerivedConversion) {
- S.Diag(Fn->getLocation(),
- diag::note_ovl_candidate_bad_base_to_derived_conv)
- << (unsigned) FnKind << FnDesc
- << (FromExpr ? FromExpr->getSourceRange() : SourceRange())
- << (BaseToDerivedConversion - 1)
- << FromTy << ToTy << I+1;
+ S.Diag(Fn->getLocation(), diag::note_ovl_candidate_bad_base_to_derived_conv)
+ << (unsigned)FnKindPair.first << (unsigned)FnKindPair.second << FnDesc
+ << (FromExpr ? FromExpr->getSourceRange() : SourceRange())
+ << (BaseToDerivedConversion - 1) << FromTy << ToTy << I + 1;
MaybeEmitInheritedConstructorNote(S, Cand->FoundDecl);
return;
}
@@ -9642,9 +9819,9 @@ static void DiagnoseBadConversion(Sema &S, OverloadCandidate *Cand,
Qualifiers ToQs = CToTy.getQualifiers();
if (FromQs.getObjCLifetime() != ToQs.getObjCLifetime()) {
S.Diag(Fn->getLocation(), diag::note_ovl_candidate_bad_arc_conv)
- << (unsigned) FnKind << FnDesc
- << (FromExpr ? FromExpr->getSourceRange() : SourceRange())
- << FromTy << ToTy << (unsigned) isObjectArgument << I+1;
+ << (unsigned)FnKindPair.first << (unsigned)FnKindPair.second
+ << FnDesc << (FromExpr ? FromExpr->getSourceRange() : SourceRange())
+ << FromTy << ToTy << (unsigned)isObjectArgument << I + 1;
MaybeEmitInheritedConstructorNote(S, Cand->FoundDecl);
return;
}
@@ -9656,10 +9833,10 @@ static void DiagnoseBadConversion(Sema &S, OverloadCandidate *Cand,
// Emit the generic diagnostic and, optionally, add the hints to it.
PartialDiagnostic FDiag = S.PDiag(diag::note_ovl_candidate_bad_conv);
- FDiag << (unsigned) FnKind << FnDesc
- << (FromExpr ? FromExpr->getSourceRange() : SourceRange())
- << FromTy << ToTy << (unsigned) isObjectArgument << I + 1
- << (unsigned) (Cand->Fix.Kind);
+ FDiag << (unsigned)FnKindPair.first << (unsigned)FnKindPair.second << FnDesc
+ << (FromExpr ? FromExpr->getSourceRange() : SourceRange()) << FromTy
+ << ToTy << (unsigned)isObjectArgument << I + 1
+ << (unsigned)(Cand->Fix.Kind);
// If we can fix the conversion, suggest the FixIts.
for (std::vector<FixItHint>::iterator HI = Cand->Fix.Hints.begin(),
@@ -9732,17 +9909,18 @@ static void DiagnoseArityMismatch(Sema &S, NamedDecl *Found, Decl *D,
}
std::string Description;
- OverloadCandidateKind FnKind =
+ std::pair<OverloadCandidateKind, OverloadCandidateSelect> FnKindPair =
ClassifyOverloadCandidate(S, Found, Fn, Description);
if (modeCount == 1 && Fn->getParamDecl(0)->getDeclName())
S.Diag(Fn->getLocation(), diag::note_ovl_candidate_arity_one)
- << (unsigned) FnKind << (Fn->getDescribedFunctionTemplate() != nullptr)
- << mode << Fn->getParamDecl(0) << NumFormalArgs;
+ << (unsigned)FnKindPair.first << (unsigned)FnKindPair.second
+ << Description << mode << Fn->getParamDecl(0) << NumFormalArgs;
else
S.Diag(Fn->getLocation(), diag::note_ovl_candidate_arity)
- << (unsigned) FnKind << (Fn->getDescribedFunctionTemplate() != nullptr)
- << mode << modeCount << NumFormalArgs;
+ << (unsigned)FnKindPair.first << (unsigned)FnKindPair.second
+ << Description << mode << modeCount << NumFormalArgs;
+
MaybeEmitInheritedConstructorNote(S, Found);
}
@@ -9783,6 +9961,17 @@ static void DiagnoseBadDeduction(Sema &S, NamedDecl *Found, Decl *Templated,
return;
}
+ case Sema::TDK_IncompletePack: {
+ assert(ParamD && "no parameter found for incomplete deduction result");
+ S.Diag(Templated->getLocation(),
+ diag::note_ovl_candidate_incomplete_deduction_pack)
+ << ParamD->getDeclName()
+ << (DeductionFailure.getFirstArg()->pack_size() + 1)
+ << *DeductionFailure.getFirstArg();
+ MaybeEmitInheritedConstructorNote(S, Found);
+ return;
+ }
+
case Sema::TDK_Underqualified: {
assert(ParamD && "no parameter found for bad qualifiers deduction result");
TemplateTypeParmDecl *TParam = cast<TemplateTypeParmDecl>(ParamD);
@@ -10017,11 +10206,13 @@ static void DiagnoseBadTarget(Sema &S, OverloadCandidate *Cand) {
CalleeTarget = S.IdentifyCUDATarget(Callee);
std::string FnDesc;
- OverloadCandidateKind FnKind =
+ std::pair<OverloadCandidateKind, OverloadCandidateSelect> FnKindPair =
ClassifyOverloadCandidate(S, Cand->FoundDecl, Callee, FnDesc);
S.Diag(Callee->getLocation(), diag::note_ovl_candidate_bad_target)
- << (unsigned)FnKind << CalleeTarget << CallerTarget;
+ << (unsigned)FnKindPair.first << (unsigned)ocs_non_template
+ << FnDesc /* Ignored */
+ << CalleeTarget << CallerTarget;
// This could be an implicit constructor for which we could not infer the
// target due to a collsion. Diagnose that case.
@@ -10030,7 +10221,7 @@ static void DiagnoseBadTarget(Sema &S, OverloadCandidate *Cand) {
CXXRecordDecl *ParentClass = Meth->getParent();
Sema::CXXSpecialMember CSM;
- switch (FnKind) {
+ switch (FnKindPair.first) {
default:
return;
case oc_implicit_default_constructor:
@@ -10102,12 +10293,12 @@ static void NoteFunctionCandidate(Sema &S, OverloadCandidate *Cand,
if (Cand->Viable) {
if (Fn->isDeleted() || S.isFunctionConsideredUnavailable(Fn)) {
std::string FnDesc;
- OverloadCandidateKind FnKind =
- ClassifyOverloadCandidate(S, Cand->FoundDecl, Fn, FnDesc);
+ std::pair<OverloadCandidateKind, OverloadCandidateSelect> FnKindPair =
+ ClassifyOverloadCandidate(S, Cand->FoundDecl, Fn, FnDesc);
S.Diag(Fn->getLocation(), diag::note_ovl_candidate_deleted)
- << FnKind << FnDesc
- << (Fn->isDeleted() ? (Fn->isDeletedAsWritten() ? 1 : 2) : 0);
+ << (unsigned)FnKindPair.first << (unsigned)FnKindPair.second << FnDesc
+ << (Fn->isDeleted() ? (Fn->isDeletedAsWritten() ? 1 : 2) : 0);
MaybeEmitInheritedConstructorNote(S, Cand->FoundDecl);
return;
}
@@ -10176,6 +10367,9 @@ static void NoteFunctionCandidate(Sema &S, OverloadCandidate *Cand,
assert(!Available);
break;
}
+ case ovl_non_default_multiversion_function:
+ // Do nothing, these should simply be ignored.
+ break;
}
}
@@ -10257,6 +10451,7 @@ static unsigned RankDeductionFailure(const DeductionFailureInfo &DFI) {
case Sema::TDK_Invalid:
case Sema::TDK_Incomplete:
+ case Sema::TDK_IncompletePack:
return 1;
case Sema::TDK_Underqualified:
@@ -10493,9 +10688,8 @@ static void CompleteNonViableCandidate(Sema &S, OverloadCandidate *Cand,
}
}
-/// PrintOverloadCandidates - When overload resolution fails, prints
-/// diagnostic messages containing the candidates in the candidate
-/// set.
+/// When overload resolution fails, prints diagnostic messages containing the
+/// candidates in the candidate set.
void OverloadCandidateSet::NoteCandidates(
Sema &S, OverloadCandidateDisplayKind OCD, ArrayRef<Expr *> Args,
StringRef Opc, SourceLocation OpLoc,
@@ -10642,8 +10836,8 @@ void TemplateSpecCandidateSet::NoteCandidates(Sema &S, SourceLocation Loc) {
// in general, want to list every possible builtin candidate.
}
- std::sort(Cands.begin(), Cands.end(),
- CompareTemplateSpecCandidatesForDisplay(S));
+ llvm::sort(Cands.begin(), Cands.end(),
+ CompareTemplateSpecCandidatesForDisplay(S));
// FIXME: Perhaps rename OverloadsShown and getShowOverloads()
// for generalization purposes (?).
@@ -10912,6 +11106,11 @@ private:
if (FunctionDecl *Caller = dyn_cast<FunctionDecl>(S.CurContext))
if (!Caller->isImplicit() && !S.IsAllowedCUDACall(Caller, FunDecl))
return false;
+ if (FunDecl->isMultiVersion()) {
+ const auto *TA = FunDecl->getAttr<TargetAttr>();
+ if (TA && !TA->isDefaultVersion())
+ return false;
+ }
// If any candidate has a placeholder return type, trigger its deduction
// now.
@@ -10993,9 +11192,9 @@ private:
MatchesCopy.begin(), MatchesCopy.end(), FailedCandidates,
SourceExpr->getLocStart(), S.PDiag(),
S.PDiag(diag::err_addr_ovl_ambiguous)
- << Matches[0].second->getDeclName(),
+ << Matches[0].second->getDeclName(),
S.PDiag(diag::note_ovl_candidate)
- << (unsigned)oc_function_template,
+ << (unsigned)oc_function << (unsigned)ocs_described_template,
Complain, TargetFunctionType);
if (Result != MatchesCopy.end()) {
@@ -11157,7 +11356,7 @@ Sema::ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr,
return Fn;
}
-/// \brief Given an expression that refers to an overloaded function, try to
+/// Given an expression that refers to an overloaded function, try to
/// resolve that function to a single function that can have its address taken.
/// This will modify `Pair` iff it returns non-null.
///
@@ -11193,7 +11392,7 @@ Sema::resolveAddressOfOnlyViableOverloadCandidate(Expr *E,
return Result;
}
-/// \brief Given an overloaded function, tries to turn it into a non-overloaded
+/// Given an overloaded function, tries to turn it into a non-overloaded
/// function reference using resolveAddressOfOnlyViableOverloadCandidate. This
/// will perform access checks, diagnose the use of the resultant decl, and, if
/// requested, potentially perform a function-to-pointer decay.
@@ -11207,7 +11406,8 @@ bool Sema::resolveAndFixAddressOfOnlyViableOverloadCandidate(
DeclAccessPair DAP;
FunctionDecl *Found = resolveAddressOfOnlyViableOverloadCandidate(E, DAP);
- if (!Found)
+ if (!Found || Found->isCPUDispatchMultiVersion() ||
+ Found->isCPUSpecificMultiVersion())
return false;
// Emitting multiple diagnostics for a function that is both inaccessible and
@@ -11223,7 +11423,7 @@ bool Sema::resolveAndFixAddressOfOnlyViableOverloadCandidate(
return true;
}
-/// \brief Given an expression that refers to an overloaded function, try to
+/// Given an expression that refers to an overloaded function, try to
/// resolve that overloaded function expression down to a single function.
///
/// This routine can only resolve template-ids that refer to a single function
@@ -11309,9 +11509,6 @@ Sema::ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
return Matched;
}
-
-
-
// Resolve and fix an overloaded expression that can be resolved
// because it identifies a single function template specialization.
//
@@ -11395,7 +11592,7 @@ bool Sema::ResolveAndFixSingleFunctionTemplateSpecialization(
return true;
}
-/// \brief Add a single candidate to the overload set.
+/// Add a single candidate to the overload set.
static void AddOverloadedCallCandidate(Sema &S,
DeclAccessPair FoundDecl,
TemplateArgumentListInfo *ExplicitTemplateArgs,
@@ -11434,7 +11631,7 @@ static void AddOverloadedCallCandidate(Sema &S,
assert(!KnownValid && "unhandled case in overloaded call candidate");
}
-/// \brief Add the overload candidates named by callee and/or found by argument
+/// Add the overload candidates named by callee and/or found by argument
/// dependent lookup to the given overload set.
void Sema::AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE,
ArrayRef<Expr *> Args,
@@ -11726,7 +11923,7 @@ BuildRecoveryCallExpr(Sema &SemaRef, Scope *S, Expr *Fn,
RParenLoc);
}
-/// \brief Constructs and populates an OverloadedCandidateSet from
+/// Constructs and populates an OverloadedCandidateSet from
/// the given function.
/// \returns true when an the ExprResult output parameter has been set.
bool Sema::buildOverloadedCallSet(Scope *S, Expr *Fn,
@@ -11866,7 +12063,7 @@ static ExprResult FinishOverloadedCallExpr(Sema &SemaRef, Scope *S, Expr *Fn,
<< Fn->getSourceRange();
CandidateSet->NoteCandidates(SemaRef, OCD_AllCandidates, Args);
- // We emitted an error for the unvailable/deleted function call but keep
+ // We emitted an error for the unavailable/deleted function call but keep
// the call in the AST.
FunctionDecl *FDecl = (*Best)->Function;
Fn = SemaRef.FixOverloadedFunctionReference(Fn, (*Best)->FoundDecl, FDecl);
@@ -11932,7 +12129,7 @@ static bool IsOverloaded(const UnresolvedSetImpl &Functions) {
(Functions.size() == 1 && isa<FunctionTemplateDecl>(*Functions.begin()));
}
-/// \brief Create a unary operation that may resolve to an overloaded
+/// Create a unary operation that may resolve to an overloaded
/// operator.
///
/// \param OpLoc The location of the operator itself (e.g., '*').
@@ -11978,7 +12175,7 @@ Sema::CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
if (Input->isTypeDependent()) {
if (Fns.empty())
return new (Context) UnaryOperator(Input, Opc, Context.DependentTy,
- VK_RValue, OK_Ordinary, OpLoc);
+ VK_RValue, OK_Ordinary, OpLoc, false);
CXXRecordDecl *NamingClass = nullptr; // lookup ignores member operators
UnresolvedLookupExpr *Fn
@@ -12077,7 +12274,8 @@ Sema::CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
// break out so that we will build the appropriate built-in
// operator node.
ExprResult InputRes = PerformImplicitConversion(
- Input, Best->BuiltinParamTypes[0], Best->Conversions[0], AA_Passing);
+ Input, Best->BuiltinParamTypes[0], Best->Conversions[0], AA_Passing,
+ CCK_ForBuiltinOverloadedOp);
if (InputRes.isInvalid())
return ExprError();
Input = InputRes.get();
@@ -12123,7 +12321,7 @@ Sema::CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
return CreateBuiltinUnaryOp(OpLoc, Opc, Input);
}
-/// \brief Create a binary operation that may resolve to an overloaded
+/// Create a binary operation that may resolve to an overloaded
/// operator.
///
/// \param OpLoc The location of the operator itself (e.g., '+').
@@ -12321,16 +12519,16 @@ Sema::CreateOverloadedBinOp(SourceLocation OpLoc,
// We matched a built-in operator. Convert the arguments, then
// break out so that we will build the appropriate built-in
// operator node.
- ExprResult ArgsRes0 =
- PerformImplicitConversion(Args[0], Best->BuiltinParamTypes[0],
- Best->Conversions[0], AA_Passing);
+ ExprResult ArgsRes0 = PerformImplicitConversion(
+ Args[0], Best->BuiltinParamTypes[0], Best->Conversions[0],
+ AA_Passing, CCK_ForBuiltinOverloadedOp);
if (ArgsRes0.isInvalid())
return ExprError();
Args[0] = ArgsRes0.get();
- ExprResult ArgsRes1 =
- PerformImplicitConversion(Args[1], Best->BuiltinParamTypes[1],
- Best->Conversions[1], AA_Passing);
+ ExprResult ArgsRes1 = PerformImplicitConversion(
+ Args[1], Best->BuiltinParamTypes[1], Best->Conversions[1],
+ AA_Passing, CCK_ForBuiltinOverloadedOp);
if (ArgsRes1.isInvalid())
return ExprError();
Args[1] = ArgsRes1.get();
@@ -12346,7 +12544,7 @@ Sema::CreateOverloadedBinOp(SourceLocation OpLoc,
if (Opc == BO_Comma)
break;
- // For class as left operand for assignment or compound assigment
+ // For class as left operand for assignment or compound assignment
// operator do not fall through to handling in built-in, but report that
// no overloaded assignment operator found
ExprResult Result = ExprError();
@@ -12533,16 +12731,16 @@ Sema::CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
// We matched a built-in operator. Convert the arguments, then
// break out so that we will build the appropriate built-in
// operator node.
- ExprResult ArgsRes0 =
- PerformImplicitConversion(Args[0], Best->BuiltinParamTypes[0],
- Best->Conversions[0], AA_Passing);
+ ExprResult ArgsRes0 = PerformImplicitConversion(
+ Args[0], Best->BuiltinParamTypes[0], Best->Conversions[0],
+ AA_Passing, CCK_ForBuiltinOverloadedOp);
if (ArgsRes0.isInvalid())
return ExprError();
Args[0] = ArgsRes0.get();
- ExprResult ArgsRes1 =
- PerformImplicitConversion(Args[1], Best->BuiltinParamTypes[1],
- Best->Conversions[1], AA_Passing);
+ ExprResult ArgsRes1 = PerformImplicitConversion(
+ Args[1], Best->BuiltinParamTypes[1], Best->Conversions[1],
+ AA_Passing, CCK_ForBuiltinOverloadedOp);
if (ArgsRes1.isInvalid())
return ExprError();
Args[1] = ArgsRes1.get();
@@ -13508,7 +13706,7 @@ Expr *Sema::FixOverloadedFunctionReference(Expr *E, DeclAccessPair Found,
return new (Context) UnaryOperator(SubExpr, UO_AddrOf, MemPtrType,
VK_RValue, OK_Ordinary,
- UnOp->getOperatorLoc());
+ UnOp->getOperatorLoc(), false);
}
}
Expr *SubExpr = FixOverloadedFunctionReference(UnOp->getSubExpr(),
@@ -13519,7 +13717,7 @@ Expr *Sema::FixOverloadedFunctionReference(Expr *E, DeclAccessPair Found,
return new (Context) UnaryOperator(SubExpr, UO_AddrOf,
Context.getPointerType(SubExpr->getType()),
VK_RValue, OK_Ordinary,
- UnOp->getOperatorLoc());
+ UnOp->getOperatorLoc(), false);
}
// C++ [except.spec]p17:
diff --git a/lib/Sema/SemaPseudoObject.cpp b/lib/Sema/SemaPseudoObject.cpp
index 58980be64a30..4484e9b3513b 100644
--- a/lib/Sema/SemaPseudoObject.cpp
+++ b/lib/Sema/SemaPseudoObject.cpp
@@ -132,7 +132,8 @@ namespace {
uop->getType(),
uop->getValueKind(),
uop->getObjectKind(),
- uop->getOperatorLoc());
+ uop->getOperatorLoc(),
+ uop->canOverflow());
}
if (GenericSelectionExpr *gse = dyn_cast<GenericSelectionExpr>(e)) {
@@ -189,11 +190,12 @@ namespace {
Sema &S;
unsigned ResultIndex;
SourceLocation GenericLoc;
+ bool IsUnique;
SmallVector<Expr *, 4> Semantics;
- PseudoOpBuilder(Sema &S, SourceLocation genericLoc)
+ PseudoOpBuilder(Sema &S, SourceLocation genericLoc, bool IsUnique)
: S(S), ResultIndex(PseudoObjectExpr::NoResult),
- GenericLoc(genericLoc) {}
+ GenericLoc(genericLoc), IsUnique(IsUnique) {}
virtual ~PseudoOpBuilder() {}
@@ -207,6 +209,9 @@ namespace {
assert(ResultIndex == PseudoObjectExpr::NoResult);
ResultIndex = Semantics.size();
Semantics.push_back(resultExpr);
+ // An OVE is not unique if it is used as the result expression.
+ if (auto *OVE = dyn_cast<OpaqueValueExpr>(Semantics.back()))
+ OVE->setIsUnique(false);
}
ExprResult buildRValueOperation(Expr *op);
@@ -226,6 +231,9 @@ namespace {
void setResultToLastSemantic() {
assert(ResultIndex == PseudoObjectExpr::NoResult);
ResultIndex = Semantics.size() - 1;
+ // An OVE is not unique if it is used as the result expression.
+ if (auto *OVE = dyn_cast<OpaqueValueExpr>(Semantics.back()))
+ OVE->setIsUnique(false);
}
/// Return true if assignments have a non-void result.
@@ -245,7 +253,7 @@ namespace {
virtual ExprResult buildGet() = 0;
virtual ExprResult buildSet(Expr *, SourceLocation,
bool captureSetValueAsResult) = 0;
- /// \brief Should the result of an assignment be the formal result of the
+ /// Should the result of an assignment be the formal result of the
/// setter call or the value that was passed to the setter?
///
/// Different pseudo-object language features use different language rules
@@ -273,10 +281,10 @@ namespace {
Selector GetterSelector;
public:
- ObjCPropertyOpBuilder(Sema &S, ObjCPropertyRefExpr *refExpr) :
- PseudoOpBuilder(S, refExpr->getLocation()), RefExpr(refExpr),
- SyntacticRefExpr(nullptr), InstanceReceiver(nullptr), Getter(nullptr),
- Setter(nullptr) {
+ ObjCPropertyOpBuilder(Sema &S, ObjCPropertyRefExpr *refExpr, bool IsUnique)
+ : PseudoOpBuilder(S, refExpr->getLocation(), IsUnique),
+ RefExpr(refExpr), SyntacticRefExpr(nullptr),
+ InstanceReceiver(nullptr), Getter(nullptr), Setter(nullptr) {
}
ExprResult buildRValueOperation(Expr *op);
@@ -313,11 +321,10 @@ namespace {
Selector AtIndexSetterSelector;
public:
- ObjCSubscriptOpBuilder(Sema &S, ObjCSubscriptRefExpr *refExpr) :
- PseudoOpBuilder(S, refExpr->getSourceRange().getBegin()),
- RefExpr(refExpr),
- InstanceBase(nullptr), InstanceKey(nullptr),
- AtIndexGetter(nullptr), AtIndexSetter(nullptr) {}
+ ObjCSubscriptOpBuilder(Sema &S, ObjCSubscriptRefExpr *refExpr, bool IsUnique)
+ : PseudoOpBuilder(S, refExpr->getSourceRange().getBegin(), IsUnique),
+ RefExpr(refExpr), InstanceBase(nullptr), InstanceKey(nullptr),
+ AtIndexGetter(nullptr), AtIndexSetter(nullptr) {}
ExprResult buildRValueOperation(Expr *op);
ExprResult buildAssignmentOperation(Scope *Sc,
@@ -341,11 +348,11 @@ namespace {
MSPropertyRefExpr *getBaseMSProperty(MSPropertySubscriptExpr *E);
public:
- MSPropertyOpBuilder(Sema &S, MSPropertyRefExpr *refExpr) :
- PseudoOpBuilder(S, refExpr->getSourceRange().getBegin()),
- RefExpr(refExpr), InstanceBase(nullptr) {}
- MSPropertyOpBuilder(Sema &S, MSPropertySubscriptExpr *refExpr)
- : PseudoOpBuilder(S, refExpr->getSourceRange().getBegin()),
+ MSPropertyOpBuilder(Sema &S, MSPropertyRefExpr *refExpr, bool IsUnique)
+ : PseudoOpBuilder(S, refExpr->getSourceRange().getBegin(), IsUnique),
+ RefExpr(refExpr), InstanceBase(nullptr) {}
+ MSPropertyOpBuilder(Sema &S, MSPropertySubscriptExpr *refExpr, bool IsUnique)
+ : PseudoOpBuilder(S, refExpr->getSourceRange().getBegin(), IsUnique),
InstanceBase(nullptr) {
RefExpr = getBaseMSProperty(refExpr);
}
@@ -364,7 +371,9 @@ OpaqueValueExpr *PseudoOpBuilder::capture(Expr *e) {
new (S.Context) OpaqueValueExpr(GenericLoc, e->getType(),
e->getValueKind(), e->getObjectKind(),
e);
-
+ if (IsUnique)
+ captured->setIsUnique(true);
+
// Make sure we bind that in the semantics.
addSemanticExpr(captured);
return captured;
@@ -396,6 +405,8 @@ OpaqueValueExpr *PseudoOpBuilder::captureValueAsResult(Expr *e) {
if (e == Semantics[index]) break;
}
ResultIndex = index;
+ // An OVE is not unique if it is used as the result expression.
+ cast<OpaqueValueExpr>(e)->setIsUnique(false);
return cast<OpaqueValueExpr>(e);
}
@@ -527,9 +538,12 @@ PseudoOpBuilder::buildIncDecOperation(Scope *Sc, SourceLocation opcLoc,
(result.get()->isTypeDependent() || CanCaptureValue(result.get())))
setResultToLastSemantic();
- UnaryOperator *syntactic =
- new (S.Context) UnaryOperator(syntacticOp, opcode, resultType,
- VK_LValue, OK_Ordinary, opcLoc);
+ UnaryOperator *syntactic = new (S.Context) UnaryOperator(
+ syntacticOp, opcode, resultType, VK_LValue, OK_Ordinary, opcLoc,
+ !resultType->isDependentType()
+ ? S.Context.getTypeSize(resultType) >=
+ S.Context.getTypeSize(S.Context.IntTy)
+ : false);
return complete(syntactic);
}
@@ -961,11 +975,11 @@ ObjCPropertyOpBuilder::buildIncDecOperation(Scope *Sc, SourceLocation opcLoc,
}
ExprResult ObjCPropertyOpBuilder::complete(Expr *SyntacticForm) {
- if (isWeakProperty() &&
+ if (isWeakProperty() && !S.isUnevaluatedContext() &&
!S.Diags.isIgnored(diag::warn_arc_repeated_use_of_weak,
SyntacticForm->getLocStart()))
- S.recordUseOfEvaluatedWeak(SyntacticRefExpr,
- SyntacticRefExpr->isMessagingGetter());
+ S.getCurFunction()->recordUseOfWeak(SyntacticRefExpr,
+ SyntacticRefExpr->isMessagingGetter());
return PseudoOpBuilder::complete(SyntacticForm);
}
@@ -1524,20 +1538,20 @@ ExprResult Sema::checkPseudoObjectRValue(Expr *E) {
Expr *opaqueRef = E->IgnoreParens();
if (ObjCPropertyRefExpr *refExpr
= dyn_cast<ObjCPropertyRefExpr>(opaqueRef)) {
- ObjCPropertyOpBuilder builder(*this, refExpr);
+ ObjCPropertyOpBuilder builder(*this, refExpr, true);
return builder.buildRValueOperation(E);
}
else if (ObjCSubscriptRefExpr *refExpr
= dyn_cast<ObjCSubscriptRefExpr>(opaqueRef)) {
- ObjCSubscriptOpBuilder builder(*this, refExpr);
+ ObjCSubscriptOpBuilder builder(*this, refExpr, true);
return builder.buildRValueOperation(E);
} else if (MSPropertyRefExpr *refExpr
= dyn_cast<MSPropertyRefExpr>(opaqueRef)) {
- MSPropertyOpBuilder builder(*this, refExpr);
+ MSPropertyOpBuilder builder(*this, refExpr, true);
return builder.buildRValueOperation(E);
} else if (MSPropertySubscriptExpr *RefExpr =
dyn_cast<MSPropertySubscriptExpr>(opaqueRef)) {
- MSPropertyOpBuilder Builder(*this, RefExpr);
+ MSPropertyOpBuilder Builder(*this, RefExpr, true);
return Builder.buildRValueOperation(E);
} else {
llvm_unreachable("unknown pseudo-object kind!");
@@ -1550,24 +1564,24 @@ ExprResult Sema::checkPseudoObjectIncDec(Scope *Sc, SourceLocation opcLoc,
// Do nothing if the operand is dependent.
if (op->isTypeDependent())
return new (Context) UnaryOperator(op, opcode, Context.DependentTy,
- VK_RValue, OK_Ordinary, opcLoc);
+ VK_RValue, OK_Ordinary, opcLoc, false);
assert(UnaryOperator::isIncrementDecrementOp(opcode));
Expr *opaqueRef = op->IgnoreParens();
if (ObjCPropertyRefExpr *refExpr
= dyn_cast<ObjCPropertyRefExpr>(opaqueRef)) {
- ObjCPropertyOpBuilder builder(*this, refExpr);
+ ObjCPropertyOpBuilder builder(*this, refExpr, false);
return builder.buildIncDecOperation(Sc, opcLoc, opcode, op);
} else if (isa<ObjCSubscriptRefExpr>(opaqueRef)) {
Diag(opcLoc, diag::err_illegal_container_subscripting_op);
return ExprError();
} else if (MSPropertyRefExpr *refExpr
= dyn_cast<MSPropertyRefExpr>(opaqueRef)) {
- MSPropertyOpBuilder builder(*this, refExpr);
+ MSPropertyOpBuilder builder(*this, refExpr, false);
return builder.buildIncDecOperation(Sc, opcLoc, opcode, op);
} else if (MSPropertySubscriptExpr *RefExpr
= dyn_cast<MSPropertySubscriptExpr>(opaqueRef)) {
- MSPropertyOpBuilder Builder(*this, RefExpr);
+ MSPropertyOpBuilder Builder(*this, RefExpr, false);
return Builder.buildIncDecOperation(Sc, opcLoc, opcode, op);
} else {
llvm_unreachable("unknown pseudo-object kind!");
@@ -1590,22 +1604,23 @@ ExprResult Sema::checkPseudoObjectAssignment(Scope *S, SourceLocation opcLoc,
RHS = result.get();
}
+ bool IsSimpleAssign = opcode == BO_Assign;
Expr *opaqueRef = LHS->IgnoreParens();
if (ObjCPropertyRefExpr *refExpr
= dyn_cast<ObjCPropertyRefExpr>(opaqueRef)) {
- ObjCPropertyOpBuilder builder(*this, refExpr);
+ ObjCPropertyOpBuilder builder(*this, refExpr, IsSimpleAssign);
return builder.buildAssignmentOperation(S, opcLoc, opcode, LHS, RHS);
} else if (ObjCSubscriptRefExpr *refExpr
= dyn_cast<ObjCSubscriptRefExpr>(opaqueRef)) {
- ObjCSubscriptOpBuilder builder(*this, refExpr);
+ ObjCSubscriptOpBuilder builder(*this, refExpr, IsSimpleAssign);
return builder.buildAssignmentOperation(S, opcLoc, opcode, LHS, RHS);
} else if (MSPropertyRefExpr *refExpr
= dyn_cast<MSPropertyRefExpr>(opaqueRef)) {
- MSPropertyOpBuilder builder(*this, refExpr);
+ MSPropertyOpBuilder builder(*this, refExpr, IsSimpleAssign);
return builder.buildAssignmentOperation(S, opcLoc, opcode, LHS, RHS);
} else if (MSPropertySubscriptExpr *RefExpr
= dyn_cast<MSPropertySubscriptExpr>(opaqueRef)) {
- MSPropertyOpBuilder Builder(*this, RefExpr);
+ MSPropertyOpBuilder Builder(*this, RefExpr, IsSimpleAssign);
return Builder.buildAssignmentOperation(S, opcLoc, opcode, LHS, RHS);
} else {
llvm_unreachable("unknown pseudo-object kind!");
@@ -1633,9 +1648,9 @@ Expr *Sema::recreateSyntacticForm(PseudoObjectExpr *E) {
Expr *syntax = E->getSyntacticForm();
if (UnaryOperator *uop = dyn_cast<UnaryOperator>(syntax)) {
Expr *op = stripOpaqueValuesFromPseudoObjectRef(*this, uop->getSubExpr());
- return new (Context) UnaryOperator(op, uop->getOpcode(), uop->getType(),
- uop->getValueKind(), uop->getObjectKind(),
- uop->getOperatorLoc());
+ return new (Context) UnaryOperator(
+ op, uop->getOpcode(), uop->getType(), uop->getValueKind(),
+ uop->getObjectKind(), uop->getOperatorLoc(), uop->canOverflow());
} else if (CompoundAssignOperator *cop
= dyn_cast<CompoundAssignOperator>(syntax)) {
Expr *lhs = stripOpaqueValuesFromPseudoObjectRef(*this, cop->getLHS());
diff --git a/lib/Sema/SemaStmt.cpp b/lib/Sema/SemaStmt.cpp
index 4474d62949a2..b2f9783d44f1 100644
--- a/lib/Sema/SemaStmt.cpp
+++ b/lib/Sema/SemaStmt.cpp
@@ -14,6 +14,7 @@
#include "clang/Sema/SemaInternal.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTDiagnostic.h"
+#include "clang/AST/ASTLambda.h"
#include "clang/AST/CharUnits.h"
#include "clang/AST/CXXInheritance.h"
#include "clang/AST/DeclObjC.h"
@@ -120,7 +121,7 @@ void Sema::ActOnForEachDeclStmt(DeclGroupPtrTy dg) {
}
}
-/// \brief Diagnose unused comparisons, both builtin and overloaded operators.
+/// Diagnose unused comparisons, both builtin and overloaded operators.
/// For '==' and '!=', suggest fixits for '=' or '|='.
///
/// Adding a cast to void (or other expression wrappers) will prevent the
@@ -336,8 +337,8 @@ void Sema::DiagnoseUnusedExprResult(const Stmt *S) {
DiagRuntimeBehavior(Loc, nullptr, PDiag(DiagID) << R1 << R2);
}
-void Sema::ActOnStartOfCompoundStmt() {
- PushCompoundScope();
+void Sema::ActOnStartOfCompoundStmt(bool IsStmtExpr) {
+ PushCompoundScope(IsStmtExpr);
}
void Sema::ActOnFinishOfCompoundStmt() {
@@ -391,65 +392,79 @@ StmtResult Sema::ActOnCompoundStmt(SourceLocation L, SourceLocation R,
return CompoundStmt::Create(Context, Elts, L, R);
}
+ExprResult
+Sema::ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val) {
+ if (!Val.get())
+ return Val;
+
+ if (DiagnoseUnexpandedParameterPack(Val.get()))
+ return ExprError();
+
+ // If we're not inside a switch, let the 'case' statement handling diagnose
+ // this. Just clean up after the expression as best we can.
+ if (!getCurFunction()->SwitchStack.empty()) {
+ Expr *CondExpr =
+ getCurFunction()->SwitchStack.back().getPointer()->getCond();
+ if (!CondExpr)
+ return ExprError();
+ QualType CondType = CondExpr->getType();
+
+ auto CheckAndFinish = [&](Expr *E) {
+ if (CondType->isDependentType() || E->isTypeDependent())
+ return ExprResult(E);
+
+ if (getLangOpts().CPlusPlus11) {
+ // C++11 [stmt.switch]p2: the constant-expression shall be a converted
+ // constant expression of the promoted type of the switch condition.
+ llvm::APSInt TempVal;
+ return CheckConvertedConstantExpression(E, CondType, TempVal,
+ CCEK_CaseValue);
+ }
+
+ ExprResult ER = E;
+ if (!E->isValueDependent())
+ ER = VerifyIntegerConstantExpression(E);
+ if (!ER.isInvalid())
+ ER = DefaultLvalueConversion(ER.get());
+ if (!ER.isInvalid())
+ ER = ImpCastExprToType(ER.get(), CondType, CK_IntegralCast);
+ return ER;
+ };
+
+ ExprResult Converted = CorrectDelayedTyposInExpr(Val, CheckAndFinish);
+ if (Converted.get() == Val.get())
+ Converted = CheckAndFinish(Val.get());
+ if (Converted.isInvalid())
+ return ExprError();
+ Val = Converted;
+ }
+
+ return ActOnFinishFullExpr(Val.get(), Val.get()->getExprLoc(), false,
+ getLangOpts().CPlusPlus11);
+}
+
StmtResult
-Sema::ActOnCaseStmt(SourceLocation CaseLoc, Expr *LHSVal,
- SourceLocation DotDotDotLoc, Expr *RHSVal,
+Sema::ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHSVal,
+ SourceLocation DotDotDotLoc, ExprResult RHSVal,
SourceLocation ColonLoc) {
- assert(LHSVal && "missing expression in case statement");
+ assert((LHSVal.isInvalid() || LHSVal.get()) && "missing LHS value");
+ assert((DotDotDotLoc.isInvalid() ? RHSVal.isUnset()
+ : RHSVal.isInvalid() || RHSVal.get()) &&
+ "missing RHS value");
if (getCurFunction()->SwitchStack.empty()) {
Diag(CaseLoc, diag::err_case_not_in_switch);
return StmtError();
}
- ExprResult LHS =
- CorrectDelayedTyposInExpr(LHSVal, [this](class Expr *E) {
- if (!getLangOpts().CPlusPlus11)
- return VerifyIntegerConstantExpression(E);
- if (Expr *CondExpr =
- getCurFunction()->SwitchStack.back()->getCond()) {
- QualType CondType = CondExpr->getType();
- llvm::APSInt TempVal;
- return CheckConvertedConstantExpression(E, CondType, TempVal,
- CCEK_CaseValue);
- }
- return ExprError();
- });
- if (LHS.isInvalid())
+ if (LHSVal.isInvalid() || RHSVal.isInvalid()) {
+ getCurFunction()->SwitchStack.back().setInt(true);
return StmtError();
- LHSVal = LHS.get();
-
- if (!getLangOpts().CPlusPlus11) {
- // C99 6.8.4.2p3: The expression shall be an integer constant.
- // However, GCC allows any evaluatable integer expression.
- if (!LHSVal->isTypeDependent() && !LHSVal->isValueDependent()) {
- LHSVal = VerifyIntegerConstantExpression(LHSVal).get();
- if (!LHSVal)
- return StmtError();
- }
-
- // GCC extension: The expression shall be an integer constant.
-
- if (RHSVal && !RHSVal->isTypeDependent() && !RHSVal->isValueDependent()) {
- RHSVal = VerifyIntegerConstantExpression(RHSVal).get();
- // Recover from an error by just forgetting about it.
- }
}
- LHS = ActOnFinishFullExpr(LHSVal, LHSVal->getExprLoc(), false,
- getLangOpts().CPlusPlus11);
- if (LHS.isInvalid())
- return StmtError();
-
- auto RHS = RHSVal ? ActOnFinishFullExpr(RHSVal, RHSVal->getExprLoc(), false,
- getLangOpts().CPlusPlus11)
- : ExprResult();
- if (RHS.isInvalid())
- return StmtError();
-
CaseStmt *CS = new (Context)
- CaseStmt(LHS.get(), RHS.get(), CaseLoc, DotDotDotLoc, ColonLoc);
- getCurFunction()->SwitchStack.back()->addSwitchCase(CS);
+ CaseStmt(LHSVal.get(), RHSVal.get(), CaseLoc, DotDotDotLoc, ColonLoc);
+ getCurFunction()->SwitchStack.back().getPointer()->addSwitchCase(CS);
return CS;
}
@@ -472,7 +487,7 @@ Sema::ActOnDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc,
}
DefaultStmt *DS = new (Context) DefaultStmt(DefaultLoc, ColonLoc, SubStmt);
- getCurFunction()->SwitchStack.back()->addSwitchCase(DS);
+ getCurFunction()->SwitchStack.back().getPointer()->addSwitchCase(DS);
return DS;
}
@@ -556,7 +571,7 @@ StmtResult Sema::BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr,
return StmtError();
if (IsConstexpr || isa<ObjCAvailabilityCheckExpr>(Cond.get().second))
- getCurFunction()->setHasBranchProtectedScope();
+ setFunctionHasBranchProtectedScope();
DiagnoseUnusedExprResult(thenStmt);
DiagnoseUnusedExprResult(elseStmt);
@@ -678,20 +693,44 @@ ExprResult Sema::CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond) {
if (CondResult.isInvalid())
return ExprError();
+ // FIXME: PerformContextualImplicitConversion doesn't always tell us if it
+ // failed and produced a diagnostic.
+ Cond = CondResult.get();
+ if (!Cond->isTypeDependent() &&
+ !Cond->getType()->isIntegralOrEnumerationType())
+ return ExprError();
+
// C99 6.8.4.2p5 - Integer promotions are performed on the controlling expr.
- return UsualUnaryConversions(CondResult.get());
+ return UsualUnaryConversions(Cond);
}
StmtResult Sema::ActOnStartOfSwitchStmt(SourceLocation SwitchLoc,
Stmt *InitStmt, ConditionResult Cond) {
- if (Cond.isInvalid())
- return StmtError();
+ Expr *CondExpr = Cond.get().second;
+ assert((Cond.isInvalid() || CondExpr) && "switch with no condition");
+
+ if (CondExpr && !CondExpr->isTypeDependent()) {
+ // We have already converted the expression to an integral or enumeration
+ // type, when we parsed the switch condition. If we don't have an
+ // appropriate type now, enter the switch scope but remember that it's
+ // invalid.
+ assert(CondExpr->getType()->isIntegralOrEnumerationType() &&
+ "invalid condition type");
+ if (CondExpr->isKnownToHaveBooleanValue()) {
+ // switch(bool_expr) {...} is often a programmer error, e.g.
+ // switch(n && mask) { ... } // Doh - should be "n & mask".
+ // One can always use an if statement instead of switch(bool_expr).
+ Diag(SwitchLoc, diag::warn_bool_switch_condition)
+ << CondExpr->getSourceRange();
+ }
+ }
- getCurFunction()->setHasBranchIntoScope();
+ setFunctionHasBranchIntoScope();
SwitchStmt *SS = new (Context)
- SwitchStmt(Context, InitStmt, Cond.get().first, Cond.get().second);
- getCurFunction()->SwitchStack.push_back(SS);
+ SwitchStmt(Context, InitStmt, Cond.get().first, CondExpr);
+ getCurFunction()->SwitchStack.push_back(
+ FunctionScopeInfo::SwitchInfo(SS, false));
return SS;
}
@@ -704,6 +743,10 @@ static void AdjustAPSInt(llvm::APSInt &Val, unsigned BitWidth, bool IsSigned) {
/// type.
static void checkCaseValue(Sema &S, SourceLocation Loc, const llvm::APSInt &Val,
unsigned UnpromotedWidth, bool UnpromotedSign) {
+ // In C++11 onwards, this is checked by the language rules.
+ if (S.getLangOpts().CPlusPlus11)
+ return;
+
// If the case value was signed and negative and the switch expression is
// unsigned, don't bother to warn: this is implementation-defined behavior.
// FIXME: Introduce a second, default-ignored warning for this case?
@@ -758,7 +801,7 @@ static bool ShouldDiagnoseSwitchCaseNotInEnum(const Sema &S,
static void checkEnumTypesInSwitchStmt(Sema &S, const Expr *Cond,
const Expr *Case) {
- QualType CondType = GetTypeBeforeIntegralPromotion(Cond);
+ QualType CondType = Cond->getType();
QualType CaseType = Case->getType();
const EnumType *CondEnumType = CondType->getAs<EnumType>();
@@ -786,7 +829,8 @@ StmtResult
Sema::ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch,
Stmt *BodyStmt) {
SwitchStmt *SS = cast<SwitchStmt>(Switch);
- assert(SS == getCurFunction()->SwitchStack.back() &&
+ bool CaseListIsIncomplete = getCurFunction()->SwitchStack.back().getInt();
+ assert(SS == getCurFunction()->SwitchStack.back().getPointer() &&
"switch stack missing push/pop!");
getCurFunction()->SwitchStack.pop_back();
@@ -799,10 +843,6 @@ Sema::ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch,
QualType CondType = CondExpr->getType();
- const Expr *CondExprBeforePromotion = CondExpr;
- QualType CondTypeBeforePromotion =
- GetTypeBeforeIntegralPromotion(CondExprBeforePromotion);
-
// C++ 6.4.2.p2:
// Integral promotions are performed (on the switch condition).
//
@@ -810,21 +850,9 @@ Sema::ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch,
// type (before the promotion) doesn't make sense, even when it can
// be represented by the promoted type. Therefore we need to find
// the pre-promotion type of the switch condition.
- if (!CondExpr->isTypeDependent()) {
- // We have already converted the expression to an integral or enumeration
- // type, when we started the switch statement. If we don't have an
- // appropriate type now, just return an error.
- if (!CondType->isIntegralOrEnumerationType())
- return StmtError();
-
- if (CondExpr->isKnownToHaveBooleanValue()) {
- // switch(bool_expr) {...} is often a programmer error, e.g.
- // switch(n && mask) { ... } // Doh - should be "n & mask".
- // One can always use an if statement instead of switch(bool_expr).
- Diag(SwitchLoc, diag::warn_bool_switch_condition)
- << CondExpr->getSourceRange();
- }
- }
+ const Expr *CondExprBeforePromotion = CondExpr;
+ QualType CondTypeBeforePromotion =
+ GetTypeBeforeIntegralPromotion(CondExprBeforePromotion);
// Get the bitwidth of the switched-on value after promotions. We must
// convert the integer case values to this width before comparison.
@@ -877,50 +905,32 @@ Sema::ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch,
Expr *Lo = CS->getLHS();
- if (Lo->isTypeDependent() || Lo->isValueDependent()) {
+ if (Lo->isValueDependent()) {
HasDependentValue = true;
break;
}
- checkEnumTypesInSwitchStmt(*this, CondExpr, Lo);
-
- llvm::APSInt LoVal;
-
- if (getLangOpts().CPlusPlus11) {
- // C++11 [stmt.switch]p2: the constant-expression shall be a converted
- // constant expression of the promoted type of the switch condition.
- ExprResult ConvLo =
- CheckConvertedConstantExpression(Lo, CondType, LoVal, CCEK_CaseValue);
- if (ConvLo.isInvalid()) {
- CaseListIsErroneous = true;
- continue;
- }
- Lo = ConvLo.get();
- } else {
- // We already verified that the expression has a i-c-e value (C99
- // 6.8.4.2p3) - get that value now.
- LoVal = Lo->EvaluateKnownConstInt(Context);
-
- // If the LHS is not the same type as the condition, insert an implicit
- // cast.
- Lo = DefaultLvalueConversion(Lo).get();
- Lo = ImpCastExprToType(Lo, CondType, CK_IntegralCast).get();
- }
+ // We already verified that the expression has a constant value;
+ // get that value (prior to conversions).
+ const Expr *LoBeforePromotion = Lo;
+ GetTypeBeforeIntegralPromotion(LoBeforePromotion);
+ llvm::APSInt LoVal = LoBeforePromotion->EvaluateKnownConstInt(Context);
// Check the unconverted value is within the range of possible values of
// the switch expression.
checkCaseValue(*this, Lo->getLocStart(), LoVal,
CondWidthBeforePromotion, CondIsSignedBeforePromotion);
+ // FIXME: This duplicates the check performed for warn_not_in_enum below.
+ checkEnumTypesInSwitchStmt(*this, CondExprBeforePromotion,
+ LoBeforePromotion);
+
// Convert the value to the same width/sign as the condition.
AdjustAPSInt(LoVal, CondWidth, CondIsSigned);
- CS->setLHS(Lo);
-
// If this is a case range, remember it in CaseRanges, otherwise CaseVals.
if (CS->getRHS()) {
- if (CS->getRHS()->isTypeDependent() ||
- CS->getRHS()->isValueDependent()) {
+ if (CS->getRHS()->isValueDependent()) {
HasDependentValue = true;
break;
}
@@ -1001,27 +1011,10 @@ Sema::ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch,
llvm::APSInt &LoVal = CaseRanges[i].first;
CaseStmt *CR = CaseRanges[i].second;
Expr *Hi = CR->getRHS();
- llvm::APSInt HiVal;
-
- if (getLangOpts().CPlusPlus11) {
- // C++11 [stmt.switch]p2: the constant-expression shall be a converted
- // constant expression of the promoted type of the switch condition.
- ExprResult ConvHi =
- CheckConvertedConstantExpression(Hi, CondType, HiVal,
- CCEK_CaseValue);
- if (ConvHi.isInvalid()) {
- CaseListIsErroneous = true;
- continue;
- }
- Hi = ConvHi.get();
- } else {
- HiVal = Hi->EvaluateKnownConstInt(Context);
- // If the RHS is not the same type as the condition, insert an
- // implicit cast.
- Hi = DefaultLvalueConversion(Hi).get();
- Hi = ImpCastExprToType(Hi, CondType, CK_IntegralCast).get();
- }
+ const Expr *HiBeforePromotion = Hi;
+ GetTypeBeforeIntegralPromotion(HiBeforePromotion);
+ llvm::APSInt HiVal = HiBeforePromotion->EvaluateKnownConstInt(Context);
// Check the unconverted value is within the range of possible values of
// the switch expression.
@@ -1031,8 +1024,6 @@ Sema::ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch,
// Convert the value to the same width/sign as the condition.
AdjustAPSInt(HiVal, CondWidth, CondIsSigned);
- CR->setRHS(Hi);
-
// If the low value is bigger than the high value, the case is empty.
if (LoVal > HiVal) {
Diag(CR->getLHS()->getLocStart(), diag::warn_case_empty_range)
@@ -1103,7 +1094,8 @@ Sema::ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch,
}
// Complain if we have a constant condition and we didn't find a match.
- if (!CaseListIsErroneous && ShouldCheckConstantCond) {
+ if (!CaseListIsErroneous && !CaseListIsIncomplete &&
+ ShouldCheckConstantCond) {
// TODO: it would be nice if we printed enums as enums, chars as
// chars, etc.
Diag(CondExpr->getExprLoc(), diag::warn_missing_case_for_condition)
@@ -1119,8 +1111,8 @@ Sema::ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch,
const EnumType *ET = CondTypeBeforePromotion->getAs<EnumType>();
// If switch has default case, then ignore it.
- if (!CaseListIsErroneous && !HasConstantCond && ET &&
- ET->getDecl()->isCompleteDefinition()) {
+ if (!CaseListIsErroneous && !CaseListIsIncomplete && !HasConstantCond &&
+ ET && ET->getDecl()->isCompleteDefinition()) {
const EnumDecl *ED = ET->getDecl();
EnumValsTy EnumVals;
@@ -1872,7 +1864,7 @@ StmtResult
Sema::ActOnObjCForCollectionStmt(SourceLocation ForLoc,
Stmt *First, Expr *collection,
SourceLocation RParenLoc) {
- getCurFunction()->setHasBranchProtectedScope();
+ setFunctionHasBranchProtectedScope();
ExprResult CollectionExprResult =
CheckObjCForCollectionOperand(ForLoc, collection);
@@ -2024,7 +2016,7 @@ void NoteForRangeBeginEndFunction(Sema &SemaRef, Expr *E,
/// Build a variable declaration for a for-range statement.
VarDecl *BuildForRangeVarDecl(Sema &SemaRef, SourceLocation Loc,
- QualType Type, const char *Name) {
+ QualType Type, StringRef Name) {
DeclContext *DC = SemaRef.CurContext;
IdentifierInfo *II = &SemaRef.PP.getIdentifierTable().get(Name);
TypeSourceInfo *TInfo = SemaRef.Context.getTrivialTypeSourceInfo(Type, Loc);
@@ -2093,10 +2085,12 @@ StmtResult Sema::ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc,
}
// Build auto && __range = range-init
+ // Divide by 2, since the variables are in the inner scope (loop body).
+ const auto DepthStr = std::to_string(S->getDepth() / 2);
SourceLocation RangeLoc = Range->getLocStart();
VarDecl *RangeVar = BuildForRangeVarDecl(*this, RangeLoc,
Context.getAutoRRefDeductType(),
- "__range");
+ std::string("__range") + DepthStr);
if (FinishForRangeVarDecl(*this, RangeVar, Range, RangeLoc,
diag::err_for_range_deduction_failure)) {
LoopVar->setInvalidDecl();
@@ -2118,7 +2112,7 @@ StmtResult Sema::ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc,
DS, RParenLoc, Kind);
}
-/// \brief Create the initialization, compare, and increment steps for
+/// Create the initialization, compare, and increment steps for
/// the range-based for loop expression.
/// This function does not handle array-based for loops,
/// which are created in Sema::BuildCXXForRangeStmt.
@@ -2339,10 +2333,12 @@ Sema::BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation CoawaitLoc,
return StmtError();
// Build auto __begin = begin-expr, __end = end-expr.
+ // Divide by 2, since the variables are in the inner scope (loop body).
+ const auto DepthStr = std::to_string(S->getDepth() / 2);
VarDecl *BeginVar = BuildForRangeVarDecl(*this, ColonLoc, AutoType,
- "__begin");
+ std::string("__begin") + DepthStr);
VarDecl *EndVar = BuildForRangeVarDecl(*this, ColonLoc, AutoType,
- "__end");
+ std::string("__end") + DepthStr);
// Build begin-expr and end-expr and attach to __begin and __end variables.
ExprResult BeginExpr, EndExpr;
@@ -2386,7 +2382,7 @@ Sema::BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation CoawaitLoc,
// FIXME: This results in codegen generating IR that recalculates the
// run-time number of elements (as opposed to just using the IR Value
// that corresponds to the run-time value of each bound that was
- // generated when the array was created.) If this proves too embarassing
+ // generated when the array was created.) If this proves too embarrassing
// even for unoptimized IR, consider passing a magic-value/cookie to
// codegen that then knows to simply use that initial llvm::Value (that
// corresponds to the bound at time of array creation) within
@@ -2656,7 +2652,7 @@ static void DiagnoseForRangeReferenceVariableCopies(Sema &SemaRef,
if (ReturnsReference) {
// Loop variable creates a temporary. Suggest either to go with
- // non-reference loop variable to indiciate a copy is made, or
+ // non-reference loop variable to indicate a copy is made, or
// the correct time to bind a const reference.
SemaRef.Diag(VD->getLocation(), diag::warn_for_range_const_reference_copy)
<< VD << VariableType << E->getType();
@@ -2717,7 +2713,7 @@ static void DiagnoseForRangeConstVariableCopies(Sema &SemaRef,
/// DiagnoseForRangeVariableCopies - Diagnose three cases and fixes for them.
/// 1) for (const foo &x : foos) where foos only returns a copy. Suggest
/// using "const foo x" to show that a copy is made
-/// 2) for (const bar &x : foos) where bar is a temporary intialized by bar.
+/// 2) for (const bar &x : foos) where bar is a temporary initialized by bar.
/// Suggest either "const bar x" to keep the copying or "const foo& x" to
/// prevent the copy.
/// 3) for (const foo x : foos) where x is constructed from a reference foo.
@@ -2779,7 +2775,7 @@ StmtResult Sema::FinishCXXForRangeStmt(Stmt *S, Stmt *B) {
StmtResult Sema::ActOnGotoStmt(SourceLocation GotoLoc,
SourceLocation LabelLoc,
LabelDecl *TheDecl) {
- getCurFunction()->setHasBranchIntoScope();
+ setFunctionHasBranchIntoScope();
TheDecl->markUsed(Context);
return new (Context) GotoStmt(TheDecl, GotoLoc, LabelLoc);
}
@@ -2806,7 +2802,7 @@ Sema::ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc,
return StmtError();
E = ExprRes.get();
- getCurFunction()->setHasIndirectGoto();
+ setFunctionHasIndirectGoto();
return new (Context) IndirectGotoStmt(GotoLoc, StarLoc, E);
}
@@ -2846,7 +2842,7 @@ Sema::ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope) {
return new (Context) BreakStmt(BreakLoc);
}
-/// \brief Determine whether the given expression is a candidate for
+/// Determine whether the given expression is a candidate for
/// copy elision in either a return statement or a throw expression.
///
/// \param ReturnType If we're determining the copy elision candidate for
@@ -2857,7 +2853,7 @@ Sema::ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope) {
/// \param E The expression being returned from the function or block, or
/// being thrown.
///
-/// \param AllowParamOrMoveConstructible Whether we allow function parameters or
+/// \param CESK Whether we allow function parameters or
/// id-expressions that could be moved out of the function to be considered NRVO
/// candidates. C++ prohibits these for NRVO itself, but we re-use this logic to
/// determine whether we should try to move as part of a return or throw (which
@@ -2866,10 +2862,7 @@ Sema::ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope) {
/// \returns The NRVO candidate variable, if the return statement may use the
/// NRVO, or NULL if there is no such candidate.
VarDecl *Sema::getCopyElisionCandidate(QualType ReturnType, Expr *E,
- bool AllowParamOrMoveConstructible) {
- if (!getLangOpts().CPlusPlus)
- return nullptr;
-
+ CopyElisionSemanticsKind CESK) {
// - in a return statement in a function [where] ...
// ... the expression is the name of a non-volatile automatic object ...
DeclRefExpr *DR = dyn_cast<DeclRefExpr>(E->IgnoreParens());
@@ -2879,13 +2872,13 @@ VarDecl *Sema::getCopyElisionCandidate(QualType ReturnType, Expr *E,
if (!VD)
return nullptr;
- if (isCopyElisionCandidate(ReturnType, VD, AllowParamOrMoveConstructible))
+ if (isCopyElisionCandidate(ReturnType, VD, CESK))
return VD;
return nullptr;
}
bool Sema::isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD,
- bool AllowParamOrMoveConstructible) {
+ CopyElisionSemanticsKind CESK) {
QualType VDType = VD->getType();
// - in a return statement in a function with ...
// ... a class return type ...
@@ -2894,16 +2887,17 @@ bool Sema::isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD,
return false;
// ... the same cv-unqualified type as the function return type ...
// When considering moving this expression out, allow dissimilar types.
- if (!AllowParamOrMoveConstructible && !VDType->isDependentType() &&
+ if (!(CESK & CES_AllowDifferentTypes) && !VDType->isDependentType() &&
!Context.hasSameUnqualifiedType(ReturnType, VDType))
return false;
}
// ...object (other than a function or catch-clause parameter)...
if (VD->getKind() != Decl::Var &&
- !(AllowParamOrMoveConstructible && VD->getKind() == Decl::ParmVar))
+ !((CESK & CES_AllowParameters) && VD->getKind() == Decl::ParmVar))
+ return false;
+ if (!(CESK & CES_AllowExceptionVariables) && VD->isExceptionVariable())
return false;
- if (VD->isExceptionVariable()) return false;
// ...automatic...
if (!VD->hasLocalStorage()) return false;
@@ -2913,7 +2907,7 @@ bool Sema::isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD,
// variable will no longer be used.
if (VD->hasAttr<BlocksAttr>()) return false;
- if (AllowParamOrMoveConstructible)
+ if (CESK & CES_AllowDifferentTypes)
return true;
// ...non-volatile...
@@ -2928,7 +2922,95 @@ bool Sema::isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD,
return true;
}
-/// \brief Perform the initialization of a potentially-movable value, which
+/// Try to perform the initialization of a potentially-movable value,
+/// which is the operand to a return or throw statement.
+///
+/// This routine implements C++14 [class.copy]p32, which attempts to treat
+/// returned lvalues as rvalues in certain cases (to prefer move construction),
+/// then falls back to treating them as lvalues if that failed.
+///
+/// \param ConvertingConstructorsOnly If true, follow [class.copy]p32 and reject
+/// resolutions that find non-constructors, such as derived-to-base conversions
+/// or `operator T()&&` member functions. If false, do consider such
+/// conversion sequences.
+///
+/// \param Res We will fill this in if move-initialization was possible.
+/// If move-initialization is not possible, such that we must fall back to
+/// treating the operand as an lvalue, we will leave Res in its original
+/// invalid state.
+static void TryMoveInitialization(Sema& S,
+ const InitializedEntity &Entity,
+ const VarDecl *NRVOCandidate,
+ QualType ResultType,
+ Expr *&Value,
+ bool ConvertingConstructorsOnly,
+ ExprResult &Res) {
+ ImplicitCastExpr AsRvalue(ImplicitCastExpr::OnStack, Value->getType(),
+ CK_NoOp, Value, VK_XValue);
+
+ Expr *InitExpr = &AsRvalue;
+
+ InitializationKind Kind = InitializationKind::CreateCopy(
+ Value->getLocStart(), Value->getLocStart());
+
+ InitializationSequence Seq(S, Entity, Kind, InitExpr);
+
+ if (!Seq)
+ return;
+
+ for (const InitializationSequence::Step &Step : Seq.steps()) {
+ if (Step.Kind != InitializationSequence::SK_ConstructorInitialization &&
+ Step.Kind != InitializationSequence::SK_UserConversion)
+ continue;
+
+ FunctionDecl *FD = Step.Function.Function;
+ if (ConvertingConstructorsOnly) {
+ if (isa<CXXConstructorDecl>(FD)) {
+ // C++14 [class.copy]p32:
+ // [...] If the first overload resolution fails or was not performed,
+ // or if the type of the first parameter of the selected constructor
+ // is not an rvalue reference to the object's type (possibly
+ // cv-qualified), overload resolution is performed again, considering
+ // the object as an lvalue.
+ const RValueReferenceType *RRefType =
+ FD->getParamDecl(0)->getType()->getAs<RValueReferenceType>();
+ if (!RRefType)
+ break;
+ if (!S.Context.hasSameUnqualifiedType(RRefType->getPointeeType(),
+ NRVOCandidate->getType()))
+ break;
+ } else {
+ continue;
+ }
+ } else {
+ if (isa<CXXConstructorDecl>(FD)) {
+ // Check that overload resolution selected a constructor taking an
+ // rvalue reference. If it selected an lvalue reference, then we
+ // didn't need to cast this thing to an rvalue in the first place.
+ if (!isa<RValueReferenceType>(FD->getParamDecl(0)->getType()))
+ break;
+ } else if (isa<CXXMethodDecl>(FD)) {
+ // Check that overload resolution selected a conversion operator
+ // taking an rvalue reference.
+ if (cast<CXXMethodDecl>(FD)->getRefQualifier() != RQ_RValue)
+ break;
+ } else {
+ continue;
+ }
+ }
+
+ // Promote "AsRvalue" to the heap, since we now need this
+ // expression node to persist.
+ Value = ImplicitCastExpr::Create(S.Context, Value->getType(), CK_NoOp,
+ Value, nullptr, VK_XValue);
+
+ // Complete type-checking the initialization of the return type
+ // using the constructor we found.
+ Res = Seq.Perform(S, Entity, Kind, Value);
+ }
+}
+
+/// Perform the initialization of a potentially-movable value, which
/// is the result of return value.
///
/// This routine implements C++14 [class.copy]p32, which attempts to treat
@@ -2951,52 +3033,82 @@ Sema::PerformMoveOrCopyInitialization(const InitializedEntity &Entity,
// were designated by an rvalue.
ExprResult Res = ExprError();
- if (AllowNRVO && !NRVOCandidate)
- NRVOCandidate = getCopyElisionCandidate(ResultType, Value, true);
-
- if (AllowNRVO && NRVOCandidate) {
- ImplicitCastExpr AsRvalue(ImplicitCastExpr::OnStack, Value->getType(),
- CK_NoOp, Value, VK_XValue);
-
- Expr *InitExpr = &AsRvalue;
-
- InitializationKind Kind = InitializationKind::CreateCopy(
- Value->getLocStart(), Value->getLocStart());
-
- InitializationSequence Seq(*this, Entity, Kind, InitExpr);
- if (Seq) {
- for (const InitializationSequence::Step &Step : Seq.steps()) {
- if (!(Step.Kind ==
- InitializationSequence::SK_ConstructorInitialization ||
- (Step.Kind == InitializationSequence::SK_UserConversion &&
- isa<CXXConstructorDecl>(Step.Function.Function))))
- continue;
-
- CXXConstructorDecl *Constructor =
- cast<CXXConstructorDecl>(Step.Function.Function);
-
- const RValueReferenceType *RRefType
- = Constructor->getParamDecl(0)->getType()
- ->getAs<RValueReferenceType>();
-
- // [...] If the first overload resolution fails or was not performed, or
- // if the type of the first parameter of the selected constructor is not
- // an rvalue reference to the object's type (possibly cv-qualified),
- // overload resolution is performed again, considering the object as an
- // lvalue.
- if (!RRefType ||
- !Context.hasSameUnqualifiedType(RRefType->getPointeeType(),
- NRVOCandidate->getType()))
- break;
+ if (AllowNRVO) {
+ bool AffectedByCWG1579 = false;
+
+ if (!NRVOCandidate) {
+ NRVOCandidate = getCopyElisionCandidate(ResultType, Value, CES_Default);
+ if (NRVOCandidate &&
+ !getDiagnostics().isIgnored(diag::warn_return_std_move_in_cxx11,
+ Value->getExprLoc())) {
+ const VarDecl *NRVOCandidateInCXX11 =
+ getCopyElisionCandidate(ResultType, Value, CES_FormerDefault);
+ AffectedByCWG1579 = (!NRVOCandidateInCXX11);
+ }
+ }
- // Promote "AsRvalue" to the heap, since we now need this
- // expression node to persist.
- Value = ImplicitCastExpr::Create(Context, Value->getType(), CK_NoOp,
- Value, nullptr, VK_XValue);
+ if (NRVOCandidate) {
+ TryMoveInitialization(*this, Entity, NRVOCandidate, ResultType, Value,
+ true, Res);
+ }
- // Complete type-checking the initialization of the return type
- // using the constructor we found.
- Res = Seq.Perform(*this, Entity, Kind, Value);
+ if (!Res.isInvalid() && AffectedByCWG1579) {
+ QualType QT = NRVOCandidate->getType();
+ if (QT.getNonReferenceType()
+ .getUnqualifiedType()
+ .isTriviallyCopyableType(Context)) {
+ // Adding 'std::move' around a trivially copyable variable is probably
+ // pointless. Don't suggest it.
+ } else {
+ // Common cases for this are returning unique_ptr<Derived> from a
+ // function of return type unique_ptr<Base>, or returning T from a
+ // function of return type Expected<T>. This is totally fine in a
+ // post-CWG1579 world, but was not fine before.
+ assert(!ResultType.isNull());
+ SmallString<32> Str;
+ Str += "std::move(";
+ Str += NRVOCandidate->getDeclName().getAsString();
+ Str += ")";
+ Diag(Value->getExprLoc(), diag::warn_return_std_move_in_cxx11)
+ << Value->getSourceRange()
+ << NRVOCandidate->getDeclName() << ResultType << QT;
+ Diag(Value->getExprLoc(), diag::note_add_std_move_in_cxx11)
+ << FixItHint::CreateReplacement(Value->getSourceRange(), Str);
+ }
+ } else if (Res.isInvalid() &&
+ !getDiagnostics().isIgnored(diag::warn_return_std_move,
+ Value->getExprLoc())) {
+ const VarDecl *FakeNRVOCandidate =
+ getCopyElisionCandidate(QualType(), Value, CES_AsIfByStdMove);
+ if (FakeNRVOCandidate) {
+ QualType QT = FakeNRVOCandidate->getType();
+ if (QT->isLValueReferenceType()) {
+ // Adding 'std::move' around an lvalue reference variable's name is
+ // dangerous. Don't suggest it.
+ } else if (QT.getNonReferenceType()
+ .getUnqualifiedType()
+ .isTriviallyCopyableType(Context)) {
+ // Adding 'std::move' around a trivially copyable variable is probably
+ // pointless. Don't suggest it.
+ } else {
+ ExprResult FakeRes = ExprError();
+ Expr *FakeValue = Value;
+ TryMoveInitialization(*this, Entity, FakeNRVOCandidate, ResultType,
+ FakeValue, false, FakeRes);
+ if (!FakeRes.isInvalid()) {
+ bool IsThrow =
+ (Entity.getKind() == InitializedEntity::EK_Exception);
+ SmallString<32> Str;
+ Str += "std::move(";
+ Str += FakeNRVOCandidate->getDeclName().getAsString();
+ Str += ")";
+ Diag(Value->getExprLoc(), diag::warn_return_std_move)
+ << Value->getSourceRange()
+ << FakeNRVOCandidate->getDeclName() << IsThrow;
+ Diag(Value->getExprLoc(), diag::note_add_std_move)
+ << FixItHint::CreateReplacement(Value->getSourceRange(), Str);
+ }
+ }
}
}
}
@@ -3010,7 +3122,7 @@ Sema::PerformMoveOrCopyInitialization(const InitializedEntity &Entity,
return Res;
}
-/// \brief Determine whether the declared return type of the specified function
+/// Determine whether the declared return type of the specified function
/// contains 'auto'.
static bool hasDeducedReturnType(FunctionDecl *FD) {
const FunctionProtoType *FPT =
@@ -3144,7 +3256,7 @@ Sema::ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
// In C++ the return statement is handled via a copy initialization.
// the C version of which boils down to CheckSingleAssignmentConstraints.
- NRVOCandidate = getCopyElisionCandidate(FnRetType, RetValExp, false);
+ NRVOCandidate = getCopyElisionCandidate(FnRetType, RetValExp, CES_Strict);
InitializedEntity Entity = InitializedEntity::InitializeResult(ReturnLoc,
FnRetType,
NRVOCandidate != nullptr);
@@ -3157,7 +3269,7 @@ Sema::ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
RetValExp = Res.get();
CheckReturnValExpr(RetValExp, FnRetType, ReturnLoc);
} else {
- NRVOCandidate = getCopyElisionCandidate(FnRetType, RetValExp, false);
+ NRVOCandidate = getCopyElisionCandidate(FnRetType, RetValExp, CES_Strict);
}
if (RetValExp) {
@@ -3182,7 +3294,7 @@ Sema::ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
}
namespace {
-/// \brief Marks all typedefs in all local classes in a type referenced.
+/// Marks all typedefs in all local classes in a type referenced.
///
/// In a function like
/// auto f() {
@@ -3228,6 +3340,12 @@ bool Sema::DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD,
SourceLocation ReturnLoc,
Expr *&RetExpr,
AutoType *AT) {
+ // If this is the conversion function for a lambda, we choose to deduce it
+ // type from the corresponding call operator, not from the synthesized return
+ // statement within it. See Sema::DeduceReturnType.
+ if (isLambdaConversionOperator(FD))
+ return false;
+
TypeLoc OrigResultType = getReturnTypeLoc(FD);
QualType Deduced;
@@ -3521,7 +3639,7 @@ StmtResult Sema::BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
// In C++ the return statement is handled via a copy initialization,
// the C version of which boils down to CheckSingleAssignmentConstraints.
if (RetValExp)
- NRVOCandidate = getCopyElisionCandidate(FnRetType, RetValExp, false);
+ NRVOCandidate = getCopyElisionCandidate(FnRetType, RetValExp, CES_Strict);
if (!HasDependentReturnType && !RetValExp->isTypeDependent()) {
// we have a non-void function with an expression, continue checking
InitializedEntity Entity = InitializedEntity::InitializeResult(ReturnLoc,
@@ -3596,7 +3714,7 @@ Sema::ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try,
if (!getLangOpts().ObjCExceptions)
Diag(AtLoc, diag::err_objc_exceptions_disabled) << "@try";
- getCurFunction()->setHasBranchProtectedScope();
+ setFunctionHasBranchProtectedScope();
unsigned NumCatchStmts = CatchStmts.size();
return ObjCAtTryStmt::Create(Context, AtLoc, Try, CatchStmts.data(),
NumCatchStmts, Finally);
@@ -3687,7 +3805,7 @@ StmtResult
Sema::ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc, Expr *SyncExpr,
Stmt *SyncBody) {
// We can't jump into or indirect-jump out of a @synchronized block.
- getCurFunction()->setHasBranchProtectedScope();
+ setFunctionHasBranchProtectedScope();
return new (Context) ObjCAtSynchronizedStmt(AtLoc, SyncExpr, SyncBody);
}
@@ -3703,7 +3821,7 @@ Sema::ActOnCXXCatchBlock(SourceLocation CatchLoc, Decl *ExDecl,
StmtResult
Sema::ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body) {
- getCurFunction()->setHasBranchProtectedScope();
+ setFunctionHasBranchProtectedScope();
return new (Context) ObjCAutoreleasePoolStmt(AtLoc, Body);
}
@@ -3815,7 +3933,11 @@ StmtResult Sema::ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
ArrayRef<Stmt *> Handlers) {
// Don't report an error if 'try' is used in system headers.
if (!getLangOpts().CXXExceptions &&
- !getSourceManager().isInSystemHeader(TryLoc))
+ !getSourceManager().isInSystemHeader(TryLoc) &&
+ (!getLangOpts().OpenMPIsDevice ||
+ !getLangOpts().OpenMPHostCXXExceptions ||
+ isInOpenMPTargetExecutionDirective() ||
+ isInOpenMPDeclareTargetContext()))
Diag(TryLoc, diag::err_exceptions_disabled) << "try";
// Exceptions aren't allowed in CUDA device code.
@@ -4029,32 +4151,29 @@ Sema::CreateCapturedStmtRecordDecl(CapturedDecl *&CD, SourceLocation Loc,
return RD;
}
-static void buildCapturedStmtCaptureList(
- SmallVectorImpl<CapturedStmt::Capture> &Captures,
- SmallVectorImpl<Expr *> &CaptureInits,
- ArrayRef<CapturingScopeInfo::Capture> Candidates) {
-
- typedef ArrayRef<CapturingScopeInfo::Capture>::const_iterator CaptureIter;
- for (CaptureIter Cap = Candidates.begin(); Cap != Candidates.end(); ++Cap) {
-
- if (Cap->isThisCapture()) {
- Captures.push_back(CapturedStmt::Capture(Cap->getLocation(),
+static void
+buildCapturedStmtCaptureList(SmallVectorImpl<CapturedStmt::Capture> &Captures,
+ SmallVectorImpl<Expr *> &CaptureInits,
+ ArrayRef<sema::Capture> Candidates) {
+ for (const sema::Capture &Cap : Candidates) {
+ if (Cap.isThisCapture()) {
+ Captures.push_back(CapturedStmt::Capture(Cap.getLocation(),
CapturedStmt::VCK_This));
- CaptureInits.push_back(Cap->getInitExpr());
+ CaptureInits.push_back(Cap.getInitExpr());
continue;
- } else if (Cap->isVLATypeCapture()) {
+ } else if (Cap.isVLATypeCapture()) {
Captures.push_back(
- CapturedStmt::Capture(Cap->getLocation(), CapturedStmt::VCK_VLAType));
+ CapturedStmt::Capture(Cap.getLocation(), CapturedStmt::VCK_VLAType));
CaptureInits.push_back(nullptr);
continue;
}
- Captures.push_back(CapturedStmt::Capture(Cap->getLocation(),
- Cap->isReferenceCapture()
+ Captures.push_back(CapturedStmt::Capture(Cap.getLocation(),
+ Cap.isReferenceCapture()
? CapturedStmt::VCK_ByRef
: CapturedStmt::VCK_ByCopy,
- Cap->getVariable()));
- CaptureInits.push_back(Cap->getInitExpr());
+ Cap.getVariable()));
+ CaptureInits.push_back(Cap.getInitExpr());
}
}
@@ -4104,7 +4223,9 @@ void Sema::ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
assert(!ContextIsFound &&
"null type has been found already for '__context' parameter");
IdentifierInfo *ParamName = &Context.Idents.get("__context");
- QualType ParamType = Context.getPointerType(Context.getTagDeclType(RD));
+ QualType ParamType = Context.getPointerType(Context.getTagDeclType(RD))
+ .withConst()
+ .withRestrict();
auto *Param =
ImplicitParamDecl::Create(Context, DC, Loc, ParamName, ParamType,
ImplicitParamDecl::CapturedContext);
@@ -4153,7 +4274,7 @@ void Sema::ActOnCapturedRegionError() {
SmallVector<Decl*, 4> Fields(Record->fields());
ActOnFields(/*Scope=*/nullptr, Record->getLocation(), Record, Fields,
- SourceLocation(), SourceLocation(), /*AttributeList=*/nullptr);
+ SourceLocation(), SourceLocation(), ParsedAttributesView());
PopDeclContext();
PopFunctionScopeInfo();
diff --git a/lib/Sema/SemaStmtAsm.cpp b/lib/Sema/SemaStmtAsm.cpp
index fc1cc7bbe544..7e26b71c0482 100644
--- a/lib/Sema/SemaStmtAsm.cpp
+++ b/lib/Sema/SemaStmtAsm.cpp
@@ -109,7 +109,7 @@ static bool CheckNakedParmReference(Expr *E, Sema &S) {
return false;
}
-/// \brief Returns true if given expression is not compatible with inline
+/// Returns true if given expression is not compatible with inline
/// assembly's memory constraint; false otherwise.
static bool checkExprMemoryConstraintCompat(Sema &S, Expr *E,
TargetInfo::ConstraintInfo &Info,
@@ -793,7 +793,7 @@ StmtResult Sema::ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc,
ArrayRef<Expr*> Exprs,
SourceLocation EndLoc) {
bool IsSimple = (NumOutputs != 0 || NumInputs != 0);
- getCurFunction()->setHasBranchProtectedScope();
+ setFunctionHasBranchProtectedScope();
MSAsmStmt *NS =
new (Context) MSAsmStmt(Context, AsmLoc, LBraceLoc, IsSimple,
/*IsVolatile*/ true, AsmToks, NumOutputs, NumInputs,
diff --git a/lib/Sema/SemaStmtAttr.cpp b/lib/Sema/SemaStmtAttr.cpp
index e55e20c2827f..e39a65c6ce0c 100644
--- a/lib/Sema/SemaStmtAttr.cpp
+++ b/lib/Sema/SemaStmtAttr.cpp
@@ -23,7 +23,7 @@
using namespace clang;
using namespace sema;
-static Attr *handleFallThroughAttr(Sema &S, Stmt *St, const AttributeList &A,
+static Attr *handleFallThroughAttr(Sema &S, Stmt *St, const ParsedAttr &A,
SourceRange Range) {
FallThroughAttr Attr(A.getRange(), S.Context,
A.getAttributeSpellingListIndex());
@@ -53,7 +53,7 @@ static Attr *handleFallThroughAttr(Sema &S, Stmt *St, const AttributeList &A,
return ::new (S.Context) auto(Attr);
}
-static Attr *handleSuppressAttr(Sema &S, Stmt *St, const AttributeList &A,
+static Attr *handleSuppressAttr(Sema &S, Stmt *St, const ParsedAttr &A,
SourceRange Range) {
if (A.getNumArgs() < 1) {
S.Diag(A.getLoc(), diag::err_attribute_too_few_arguments)
@@ -78,7 +78,7 @@ static Attr *handleSuppressAttr(Sema &S, Stmt *St, const AttributeList &A,
DiagnosticIdentifiers.size(), A.getAttributeSpellingListIndex());
}
-static Attr *handleLoopHintAttr(Sema &S, Stmt *St, const AttributeList &A,
+static Attr *handleLoopHintAttr(Sema &S, Stmt *St, const ParsedAttr &A,
SourceRange) {
IdentifierLoc *PragmaNameLoc = A.getArgAsIdent(0);
IdentifierLoc *OptionLoc = A.getArgAsIdent(1);
@@ -246,7 +246,7 @@ CheckForIncompatibleAttributes(Sema &S,
}
}
-static Attr *handleOpenCLUnrollHint(Sema &S, Stmt *St, const AttributeList &A,
+static Attr *handleOpenCLUnrollHint(Sema &S, Stmt *St, const ParsedAttr &A,
SourceRange Range) {
// Although the feature was introduced only in OpenCL C v2.0 s6.11.5, it's
// useful for OpenCL 1.x too and doesn't require HW support.
@@ -288,21 +288,21 @@ static Attr *handleOpenCLUnrollHint(Sema &S, Stmt *St, const AttributeList &A,
return OpenCLUnrollHintAttr::CreateImplicit(S.Context, UnrollFactor);
}
-static Attr *ProcessStmtAttribute(Sema &S, Stmt *St, const AttributeList &A,
+static Attr *ProcessStmtAttribute(Sema &S, Stmt *St, const ParsedAttr &A,
SourceRange Range) {
switch (A.getKind()) {
- case AttributeList::UnknownAttribute:
+ case ParsedAttr::UnknownAttribute:
S.Diag(A.getLoc(), A.isDeclspecAttribute() ?
diag::warn_unhandled_ms_attribute_ignored :
diag::warn_unknown_attribute_ignored) << A.getName();
return nullptr;
- case AttributeList::AT_FallThrough:
+ case ParsedAttr::AT_FallThrough:
return handleFallThroughAttr(S, St, A, Range);
- case AttributeList::AT_LoopHint:
+ case ParsedAttr::AT_LoopHint:
return handleLoopHintAttr(S, St, A, Range);
- case AttributeList::AT_OpenCLUnrollHint:
+ case ParsedAttr::AT_OpenCLUnrollHint:
return handleOpenCLUnrollHint(S, St, A, Range);
- case AttributeList::AT_Suppress:
+ case ParsedAttr::AT_Suppress:
return handleSuppressAttr(S, St, A, Range);
default:
// if we're here, then we parsed a known attribute, but didn't recognize
@@ -313,11 +313,12 @@ static Attr *ProcessStmtAttribute(Sema &S, Stmt *St, const AttributeList &A,
}
}
-StmtResult Sema::ProcessStmtAttributes(Stmt *S, AttributeList *AttrList,
+StmtResult Sema::ProcessStmtAttributes(Stmt *S,
+ const ParsedAttributesView &AttrList,
SourceRange Range) {
SmallVector<const Attr*, 8> Attrs;
- for (const AttributeList* l = AttrList; l; l = l->getNext()) {
- if (Attr *a = ProcessStmtAttribute(*this, S, *l, Range))
+ for (const ParsedAttr &AL : AttrList) {
+ if (Attr *a = ProcessStmtAttribute(*this, S, AL, Range))
Attrs.push_back(a);
}
diff --git a/lib/Sema/SemaTemplate.cpp b/lib/Sema/SemaTemplate.cpp
index d94cb0d0f485..dd1163267119 100644
--- a/lib/Sema/SemaTemplate.cpp
+++ b/lib/Sema/SemaTemplate.cpp
@@ -46,7 +46,7 @@ clang::getTemplateParamsRange(TemplateParameterList const * const *Ps,
}
namespace clang {
-/// \brief [temp.constr.decl]p2: A template's associated constraints are
+/// [temp.constr.decl]p2: A template's associated constraints are
/// defined as a single constraint-expression derived from the introduced
/// constraint-expressions [ ... ].
///
@@ -65,7 +65,7 @@ static Expr *clang::formAssociatedConstraints(TemplateParameterList *Params,
return Params->getRequiresClause();
}
-/// \brief Determine whether the declaration found is acceptable as the name
+/// Determine whether the declaration found is acceptable as the name
/// of a template and, if so, return that template declaration. Otherwise,
/// returns NULL.
static NamedDecl *isAcceptableTemplateName(ASTContext &Context,
@@ -105,6 +105,12 @@ static NamedDecl *isAcceptableTemplateName(ASTContext &Context,
return nullptr;
}
+ // 'using Dependent::foo;' can resolve to a template name.
+ // 'using typename Dependent::foo;' cannot (not even if 'foo' is an
+ // injected-class-name).
+ if (isa<UnresolvedUsingValueDecl>(D))
+ return D;
+
return nullptr;
}
@@ -158,7 +164,7 @@ bool Sema::hasAnyAcceptableTemplateNames(LookupResult &R,
TemplateNameKind Sema::isTemplateName(Scope *S,
CXXScopeSpec &SS,
bool hasTemplateKeyword,
- UnqualifiedId &Name,
+ const UnqualifiedId &Name,
ParsedType ObjectTypePtr,
bool EnteringContext,
TemplateTy &TemplateResult,
@@ -169,16 +175,16 @@ TemplateNameKind Sema::isTemplateName(Scope *S,
MemberOfUnknownSpecialization = false;
switch (Name.getKind()) {
- case UnqualifiedId::IK_Identifier:
+ case UnqualifiedIdKind::IK_Identifier:
TName = DeclarationName(Name.Identifier);
break;
- case UnqualifiedId::IK_OperatorFunctionId:
+ case UnqualifiedIdKind::IK_OperatorFunctionId:
TName = Context.DeclarationNames.getCXXOperatorName(
Name.OperatorFunctionId.Operator);
break;
- case UnqualifiedId::IK_LiteralOperatorId:
+ case UnqualifiedIdKind::IK_LiteralOperatorId:
TName = Context.DeclarationNames.getCXXLiteralOperatorName(Name.Identifier);
break;
@@ -189,8 +195,9 @@ TemplateNameKind Sema::isTemplateName(Scope *S,
QualType ObjectType = ObjectTypePtr.get();
LookupResult R(*this, TName, Name.getLocStart(), LookupOrdinaryName);
- LookupTemplateName(R, S, SS, ObjectType, EnteringContext,
- MemberOfUnknownSpecialization);
+ if (LookupTemplateName(R, S, SS, ObjectType, EnteringContext,
+ MemberOfUnknownSpecialization))
+ return TNK_Non_template;
if (R.empty()) return TNK_Non_template;
if (R.isAmbiguous()) {
// Suppress diagnostics; we'll redo this lookup later.
@@ -213,6 +220,10 @@ TemplateNameKind Sema::isTemplateName(Scope *S,
// We'll do this lookup again later.
R.suppressDiagnostics();
+ } else if (isa<UnresolvedUsingValueDecl>((*R.begin())->getUnderlyingDecl())) {
+ // We don't yet know whether this is a template-name or not.
+ MemberOfUnknownSpecialization = true;
+ return TNK_Non_template;
} else {
TemplateDecl *TD = cast<TemplateDecl>((*R.begin())->getUnderlyingDecl());
@@ -252,8 +263,10 @@ bool Sema::isDeductionGuideName(Scope *S, const IdentifierInfo &Name,
// syntactic form of a deduction guide is enough to identify it even
// if we can't look up the template name at all.
LookupResult R(*this, DeclarationName(&Name), NameLoc, LookupOrdinaryName);
- LookupTemplateName(R, S, SS, /*ObjectType*/QualType(),
- /*EnteringContext*/false, MemberOfUnknownSpecialization);
+ if (LookupTemplateName(R, S, SS, /*ObjectType*/ QualType(),
+ /*EnteringContext*/ false,
+ MemberOfUnknownSpecialization))
+ return false;
if (R.empty()) return false;
if (R.isAmbiguous()) {
@@ -298,39 +311,40 @@ bool Sema::DiagnoseUnknownTemplateName(const IdentifierInfo &II,
return true;
}
-void Sema::LookupTemplateName(LookupResult &Found,
+bool Sema::LookupTemplateName(LookupResult &Found,
Scope *S, CXXScopeSpec &SS,
QualType ObjectType,
bool EnteringContext,
- bool &MemberOfUnknownSpecialization) {
+ bool &MemberOfUnknownSpecialization,
+ SourceLocation TemplateKWLoc) {
// Determine where to perform name lookup
MemberOfUnknownSpecialization = false;
DeclContext *LookupCtx = nullptr;
- bool isDependent = false;
+ bool IsDependent = false;
if (!ObjectType.isNull()) {
// This nested-name-specifier occurs in a member access expression, e.g.,
// x->B::f, and we are looking into the type of the object.
assert(!SS.isSet() && "ObjectType and scope specifier cannot coexist");
LookupCtx = computeDeclContext(ObjectType);
- isDependent = ObjectType->isDependentType();
- assert((isDependent || !ObjectType->isIncompleteType() ||
+ IsDependent = !LookupCtx;
+ assert((IsDependent || !ObjectType->isIncompleteType() ||
ObjectType->castAs<TagType>()->isBeingDefined()) &&
"Caller should have completed object type");
// Template names cannot appear inside an Objective-C class or object type.
if (ObjectType->isObjCObjectOrInterfaceType()) {
Found.clear();
- return;
+ return false;
}
} else if (SS.isSet()) {
// This nested-name-specifier occurs after another nested-name-specifier,
// so long into the context associated with the prior nested-name-specifier.
LookupCtx = computeDeclContext(SS, EnteringContext);
- isDependent = isDependentScopeSpecifier(SS);
+ IsDependent = !LookupCtx;
// The declaration context must be complete.
if (LookupCtx && RequireCompleteDeclContext(SS, LookupCtx))
- return;
+ return true;
}
bool ObjectTypeSearchedInScope = false;
@@ -341,34 +355,43 @@ void Sema::LookupTemplateName(LookupResult &Found,
// expression or the declaration context associated with a prior
// nested-name-specifier.
LookupQualifiedName(Found, LookupCtx);
- if (!ObjectType.isNull() && Found.empty()) {
- // C++ [basic.lookup.classref]p1:
- // In a class member access expression (5.2.5), if the . or -> token is
- // immediately followed by an identifier followed by a <, the
- // identifier must be looked up to determine whether the < is the
- // beginning of a template argument list (14.2) or a less-than operator.
- // The identifier is first looked up in the class of the object
- // expression. If the identifier is not found, it is then looked up in
- // the context of the entire postfix-expression and shall name a class
- // or function template.
- if (S) LookupName(Found, S);
- ObjectTypeSearchedInScope = true;
+
+ // FIXME: The C++ standard does not clearly specify what happens in the
+ // case where the object type is dependent, and implementations vary. In
+ // Clang, we treat a name after a . or -> as a template-name if lookup
+ // finds a non-dependent member or member of the current instantiation that
+ // is a type template, or finds no such members and lookup in the context
+ // of the postfix-expression finds a type template. In the latter case, the
+ // name is nonetheless dependent, and we may resolve it to a member of an
+ // unknown specialization when we come to instantiate the template.
+ IsDependent |= Found.wasNotFoundInCurrentInstantiation();
+ }
+
+ if (!SS.isSet() && (ObjectType.isNull() || Found.empty())) {
+ // C++ [basic.lookup.classref]p1:
+ // In a class member access expression (5.2.5), if the . or -> token is
+ // immediately followed by an identifier followed by a <, the
+ // identifier must be looked up to determine whether the < is the
+ // beginning of a template argument list (14.2) or a less-than operator.
+ // The identifier is first looked up in the class of the object
+ // expression. If the identifier is not found, it is then looked up in
+ // the context of the entire postfix-expression and shall name a class
+ // template.
+ if (S)
+ LookupName(Found, S);
+
+ if (!ObjectType.isNull()) {
+ // FIXME: We should filter out all non-type templates here, particularly
+ // variable templates and concepts. But the exclusion of alias templates
+ // and template template parameters is a wording defect.
AllowFunctionTemplatesInLookup = false;
+ ObjectTypeSearchedInScope = true;
}
- } else if (isDependent && (!S || ObjectType.isNull())) {
- // We cannot look into a dependent object type or nested nme
- // specifier.
- MemberOfUnknownSpecialization = true;
- return;
- } else {
- // Perform unqualified name lookup in the current scope.
- LookupName(Found, S);
- if (!ObjectType.isNull())
- AllowFunctionTemplatesInLookup = false;
+ IsDependent |= Found.wasNotFoundInCurrentInstantiation();
}
- if (Found.empty() && !isDependent) {
+ if (Found.empty() && !IsDependent) {
// If we did not find any names, attempt to correct any typos.
DeclarationName Name = Found.getLookupName();
Found.clear();
@@ -402,11 +425,27 @@ void Sema::LookupTemplateName(LookupResult &Found,
}
}
+ NamedDecl *ExampleLookupResult =
+ Found.empty() ? nullptr : Found.getRepresentativeDecl();
FilterAcceptableTemplateNames(Found, AllowFunctionTemplatesInLookup);
if (Found.empty()) {
- if (isDependent)
+ if (IsDependent) {
MemberOfUnknownSpecialization = true;
- return;
+ return false;
+ }
+
+ // If a 'template' keyword was used, a lookup that finds only non-template
+ // names is an error.
+ if (ExampleLookupResult && TemplateKWLoc.isValid()) {
+ Diag(Found.getNameLoc(), diag::err_template_kw_refers_to_non_template)
+ << Found.getLookupName() << SS.getRange();
+ Diag(ExampleLookupResult->getUnderlyingDecl()->getLocation(),
+ diag::note_template_kw_refers_to_non_template)
+ << Found.getLookupName();
+ return true;
+ }
+
+ return false;
}
if (S && !ObjectType.isNull() && !ObjectTypeSearchedInScope &&
@@ -453,6 +492,8 @@ void Sema::LookupTemplateName(LookupResult &Found,
}
}
}
+
+ return false;
}
void Sema::diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName,
@@ -467,20 +508,41 @@ void Sema::diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName,
DeclContext *LookupCtx = nullptr;
NamedDecl *Found = nullptr;
+ bool MissingTemplateKeyword = false;
// Figure out what name we looked up.
- if (auto *ME = dyn_cast<MemberExpr>(TemplateName.get())) {
+ if (auto *DRE = dyn_cast<DeclRefExpr>(TemplateName.get())) {
+ NameInfo = DRE->getNameInfo();
+ SS.Adopt(DRE->getQualifierLoc());
+ LookupKind = LookupOrdinaryName;
+ Found = DRE->getFoundDecl();
+ } else if (auto *ME = dyn_cast<MemberExpr>(TemplateName.get())) {
NameInfo = ME->getMemberNameInfo();
SS.Adopt(ME->getQualifierLoc());
LookupKind = LookupMemberName;
LookupCtx = ME->getBase()->getType()->getAsCXXRecordDecl();
Found = ME->getMemberDecl();
+ } else if (auto *DSDRE =
+ dyn_cast<DependentScopeDeclRefExpr>(TemplateName.get())) {
+ NameInfo = DSDRE->getNameInfo();
+ SS.Adopt(DSDRE->getQualifierLoc());
+ MissingTemplateKeyword = true;
+ } else if (auto *DSME =
+ dyn_cast<CXXDependentScopeMemberExpr>(TemplateName.get())) {
+ NameInfo = DSME->getMemberNameInfo();
+ SS.Adopt(DSME->getQualifierLoc());
+ MissingTemplateKeyword = true;
} else {
- auto *DRE = cast<DeclRefExpr>(TemplateName.get());
- NameInfo = DRE->getNameInfo();
- SS.Adopt(DRE->getQualifierLoc());
- LookupKind = LookupOrdinaryName;
- Found = DRE->getFoundDecl();
+ llvm_unreachable("unexpected kind of potential template name");
+ }
+
+ // If this is a dependent-scope lookup, diagnose that the 'template' keyword
+ // was missing.
+ if (MissingTemplateKeyword) {
+ Diag(NameInfo.getLocStart(), diag::err_template_kw_missing)
+ << "" << NameInfo.getName().getAsString()
+ << SourceRange(Less, Greater);
+ return;
}
// Try to correct the name by looking for templates and C++ named casts.
@@ -765,7 +827,7 @@ static TemplateArgumentLoc translateTemplateArgument(Sema &SemaRef,
llvm_unreachable("Unhandled parsed template argument");
}
-/// \brief Translates template arguments as provided by the parser
+/// Translates template arguments as provided by the parser
/// into template arguments used by semantic analysis.
void Sema::translateTemplateArguments(const ASTTemplateArgsPtr &TemplateArgsIn,
TemplateArgumentListInfo &TemplateArgs) {
@@ -783,6 +845,56 @@ static void maybeDiagnoseTemplateParameterShadow(Sema &SemaRef, Scope *S,
SemaRef.DiagnoseTemplateParameterShadow(Loc, PrevDecl);
}
+/// Convert a parsed type into a parsed template argument. This is mostly
+/// trivial, except that we may have parsed a C++17 deduced class template
+/// specialization type, in which case we should form a template template
+/// argument instead of a type template argument.
+ParsedTemplateArgument Sema::ActOnTemplateTypeArgument(TypeResult ParsedType) {
+ TypeSourceInfo *TInfo;
+ QualType T = GetTypeFromParser(ParsedType.get(), &TInfo);
+ if (T.isNull())
+ return ParsedTemplateArgument();
+ assert(TInfo && "template argument with no location");
+
+ // If we might have formed a deduced template specialization type, convert
+ // it to a template template argument.
+ if (getLangOpts().CPlusPlus17) {
+ TypeLoc TL = TInfo->getTypeLoc();
+ SourceLocation EllipsisLoc;
+ if (auto PET = TL.getAs<PackExpansionTypeLoc>()) {
+ EllipsisLoc = PET.getEllipsisLoc();
+ TL = PET.getPatternLoc();
+ }
+
+ CXXScopeSpec SS;
+ if (auto ET = TL.getAs<ElaboratedTypeLoc>()) {
+ SS.Adopt(ET.getQualifierLoc());
+ TL = ET.getNamedTypeLoc();
+ }
+
+ if (auto DTST = TL.getAs<DeducedTemplateSpecializationTypeLoc>()) {
+ TemplateName Name = DTST.getTypePtr()->getTemplateName();
+ if (SS.isSet())
+ Name = Context.getQualifiedTemplateName(SS.getScopeRep(),
+ /*HasTemplateKeyword*/ false,
+ Name.getAsTemplateDecl());
+ ParsedTemplateArgument Result(SS, TemplateTy::make(Name),
+ DTST.getTemplateNameLoc());
+ if (EllipsisLoc.isValid())
+ Result = Result.getTemplatePackExpansion(EllipsisLoc);
+ return Result;
+ }
+ }
+
+ // This is a normal type template argument. Note, if the type template
+ // argument is an injected-class-name for a template, it has a dual nature
+ // and can be used as either a type or a template. We handle that in
+ // convertTypeTemplateArgumentToTemplate.
+ return ParsedTemplateArgument(ParsedTemplateArgument::Type,
+ ParsedType.get().getAsOpaquePtr(),
+ TInfo->getTypeLoc().getLocStart());
+}
+
/// ActOnTypeParameter - Called when a C++ template type parameter
/// (e.g., "typename T") has been parsed. Typename specifies whether
/// the keyword "typename" was used to declare the type parameter
@@ -854,7 +966,7 @@ NamedDecl *Sema::ActOnTypeParameter(Scope *S, bool Typename,
return Param;
}
-/// \brief Check that the type of a non-type template parameter is
+/// Check that the type of a non-type template parameter is
/// well-formed.
///
/// \returns the (possibly-promoted) parameter type if valid;
@@ -933,9 +1045,9 @@ NamedDecl *Sema::ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
auto CheckValidDeclSpecifiers = [this, &D] {
// C++ [temp.param]
// p1
- // template-parameter:
- // ...
- // parameter-declaration
+ // template-parameter:
+ // ...
+ // parameter-declaration
// p2
// ... A storage class shall not be specified in a template-parameter
// declaration.
@@ -1151,17 +1263,13 @@ static void SetNestedNameSpecifier(TagDecl *T, const CXXScopeSpec &SS) {
T->setQualifierInfo(SS.getWithLocInContext(T->getASTContext()));
}
-DeclResult
-Sema::CheckClassTemplate(Scope *S, unsigned TagSpec, TagUseKind TUK,
- SourceLocation KWLoc, CXXScopeSpec &SS,
- IdentifierInfo *Name, SourceLocation NameLoc,
- AttributeList *Attr,
- TemplateParameterList *TemplateParams,
- AccessSpecifier AS, SourceLocation ModulePrivateLoc,
- SourceLocation FriendLoc,
- unsigned NumOuterTemplateParamLists,
- TemplateParameterList** OuterTemplateParamLists,
- SkipBodyInfo *SkipBody) {
+DeclResult Sema::CheckClassTemplate(
+ Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
+ CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc,
+ const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams,
+ AccessSpecifier AS, SourceLocation ModulePrivateLoc,
+ SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists,
+ TemplateParameterList **OuterTemplateParamLists, SkipBodyInfo *SkipBody) {
assert(TemplateParams && TemplateParams->size() > 0 &&
"No template parameters");
assert(TUK != TUK_Reference && "Can only declare or define class templates");
@@ -1212,7 +1320,7 @@ Sema::CheckClassTemplate(Scope *S, unsigned TagSpec, TagUseKind TUK,
if (RebuildTemplateParamsInCurrentInstantiation(TemplateParams))
Invalid = true;
} else if (TUK != TUK_Friend && TUK != TUK_Reference)
- diagnoseQualifiedDeclaration(SS, SemanticContext, Name, NameLoc);
+ diagnoseQualifiedDeclaration(SS, SemanticContext, Name, NameLoc, false);
LookupQualifiedName(Previous, SemanticContext);
} else {
@@ -1501,8 +1609,7 @@ Sema::CheckClassTemplate(Scope *S, unsigned TagSpec, TagUseKind TUK,
if (TUK == TUK_Definition)
NewClass->startDefinition();
- if (Attr)
- ProcessDeclAttributeList(S, NewClass, Attr);
+ ProcessDeclAttributeList(S, NewClass, Attr);
if (PrevClassTemplate)
mergeDeclAttributes(NewClass, PrevClassTemplate->getTemplatedDecl());
@@ -1885,6 +1992,8 @@ void Sema::DeclareImplicitDeductionGuides(TemplateDecl *Template,
// FIXME: Add a kind for this to give more meaningful diagnostics. But can
// this substitution process actually fail?
InstantiatingTemplate BuildingDeductionGuides(*this, Loc, Template);
+ if (BuildingDeductionGuides.isInvalid())
+ return;
// Convert declared constructors into deduction guide templates.
// FIXME: Skip constructors for which deduction must necessarily fail (those
@@ -1925,7 +2034,7 @@ void Sema::DeclareImplicitDeductionGuides(TemplateDecl *Template,
->setIsCopyDeductionCandidate();
}
-/// \brief Diagnose the presence of a default template argument on a
+/// Diagnose the presence of a default template argument on a
/// template parameter, which is ill-formed in certain contexts.
///
/// \returns true if the default template argument should be dropped.
@@ -1981,7 +2090,7 @@ static bool DiagnoseDefaultTemplateArgument(Sema &S,
llvm_unreachable("Invalid TemplateParamListContext!");
}
-/// \brief Check for unexpanded parameter packs within the template parameters
+/// Check for unexpanded parameter packs within the template parameters
/// of a template template parameter, recursively.
static bool DiagnoseUnexpandedParameterPacks(Sema &S,
TemplateTemplateParmDecl *TTP) {
@@ -2012,7 +2121,7 @@ static bool DiagnoseUnexpandedParameterPacks(Sema &S,
return false;
}
-/// \brief Checks the validity of a template parameter list, possibly
+/// Checks the validity of a template parameter list, possibly
/// considering the template parameter list from a previous
/// declaration.
///
@@ -2379,7 +2488,7 @@ static SourceRange getRangeOfTypeInNestedNameSpecifier(ASTContext &Context,
return SourceRange();
}
-/// \brief Match the given template parameter lists to the given scope
+/// Match the given template parameter lists to the given scope
/// specifier, returning the template parameter list that applies to the
/// name.
///
@@ -3476,7 +3585,7 @@ noteNonDeducibleParameters(Sema &S, TemplateParameterList *TemplateParams,
const llvm::SmallBitVector &DeducibleParams) {
for (unsigned I = 0, N = DeducibleParams.size(); I != N; ++I) {
if (!DeducibleParams[I]) {
- NamedDecl *Param = cast<NamedDecl>(TemplateParams->getParam(I));
+ NamedDecl *Param = TemplateParams->getParam(I);
if (Param->getDeclName())
S.Diag(Param->getLocation(), diag::note_non_deducible_parameter)
<< Param->getDeclName();
@@ -3557,7 +3666,7 @@ DeclResult Sema::ActOnVarTemplateSpecialization(
TemplateParameterList *TemplateParams, StorageClass SC,
bool IsPartialSpecialization) {
// D must be variable template id.
- assert(D.getName().getKind() == UnqualifiedId::IK_TemplateId &&
+ assert(D.getName().getKind() == UnqualifiedIdKind::IK_TemplateId &&
"Variable template specialization is declared with a template it.");
TemplateIdAnnotation *TemplateId = D.getName().TemplateId;
@@ -3754,7 +3863,7 @@ DeclResult Sema::ActOnVarTemplateSpecialization(
}
namespace {
-/// \brief A partial specialization whose template arguments have matched
+/// A partial specialization whose template arguments have matched
/// a given template-id.
struct PartialSpecMatchResult {
VarTemplatePartialSpecializationDecl *Partial;
@@ -3938,6 +4047,16 @@ Sema::CheckVarTemplateId(const CXXScopeSpec &SS,
/*FoundD=*/nullptr, TemplateArgs);
}
+void Sema::diagnoseMissingTemplateArguments(TemplateName Name,
+ SourceLocation Loc) {
+ Diag(Loc, diag::err_template_missing_args)
+ << (int)getTemplateNameKindForDiagnostics(Name) << Name;
+ if (TemplateDecl *TD = Name.getAsTemplateDecl()) {
+ Diag(TD->getLocation(), diag::note_template_decl_here)
+ << TD->getTemplateParameters()->getSourceRange();
+ }
+}
+
ExprResult Sema::BuildTemplateIdExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
@@ -3957,11 +4076,23 @@ ExprResult Sema::BuildTemplateIdExpr(const CXXScopeSpec &SS,
assert(!R.empty() && "empty lookup results when building templateid");
assert(!R.isAmbiguous() && "ambiguous lookup when building templateid");
+ // Non-function templates require a template argument list.
+ if (auto *TD = R.getAsSingle<TemplateDecl>()) {
+ if (!TemplateArgs && !isa<FunctionTemplateDecl>(TD)) {
+ diagnoseMissingTemplateArguments(TemplateName(TD), R.getNameLoc());
+ return ExprError();
+ }
+ }
+
+ auto AnyDependentArguments = [&]() -> bool {
+ bool InstantiationDependent;
+ return TemplateArgs &&
+ TemplateSpecializationType::anyDependentTemplateArguments(
+ *TemplateArgs, InstantiationDependent);
+ };
+
// In C++1y, check variable template ids.
- bool InstantiationDependent;
- if (R.getAsSingle<VarTemplateDecl>() &&
- !TemplateSpecializationType::anyDependentTemplateArguments(
- *TemplateArgs, InstantiationDependent)) {
+ if (R.getAsSingle<VarTemplateDecl>() && !AnyDependentArguments()) {
return CheckVarTemplateId(SS, R.getLookupNameInfo(),
R.getAsSingle<VarTemplateDecl>(),
TemplateKWLoc, TemplateArgs);
@@ -3997,15 +4128,17 @@ Sema::BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS,
bool MemberOfUnknownSpecialization;
LookupResult R(*this, NameInfo, LookupOrdinaryName);
- LookupTemplateName(R, (Scope*)nullptr, SS, QualType(), /*Entering*/ false,
- MemberOfUnknownSpecialization);
+ if (LookupTemplateName(R, (Scope *)nullptr, SS, QualType(),
+ /*Entering*/false, MemberOfUnknownSpecialization,
+ TemplateKWLoc))
+ return ExprError();
if (R.isAmbiguous())
return ExprError();
if (R.empty()) {
- Diag(NameInfo.getLoc(), diag::err_template_kw_refers_to_non_template)
- << NameInfo.getName() << SS.getRange();
+ Diag(NameInfo.getLoc(), diag::err_no_member)
+ << NameInfo.getName() << DC << SS.getRange();
return ExprError();
}
@@ -4020,7 +4153,7 @@ Sema::BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS,
return BuildTemplateIdExpr(SS, TemplateKWLoc, R, /*ADL*/ false, TemplateArgs);
}
-/// \brief Form a dependent template name.
+/// Form a dependent template name.
///
/// This action forms a dependent template name given the template
/// name and its (presumably dependent) scope specifier. For
@@ -4030,7 +4163,7 @@ Sema::BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS,
TemplateNameKind Sema::ActOnDependentTemplateName(Scope *S,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
- UnqualifiedId &Name,
+ const UnqualifiedId &Name,
ParsedType ObjectType,
bool EnteringContext,
TemplateTy &Result,
@@ -4068,24 +4201,27 @@ TemplateNameKind Sema::ActOnDependentTemplateName(Scope *S,
TemplateNameKind TNK = isTemplateName(S, SS, TemplateKWLoc.isValid(), Name,
ObjectType, EnteringContext, Result,
MemberOfUnknownSpecialization);
- if (TNK == TNK_Non_template && LookupCtx->isDependentContext() &&
- isa<CXXRecordDecl>(LookupCtx) &&
- (!cast<CXXRecordDecl>(LookupCtx)->hasDefinition() ||
- cast<CXXRecordDecl>(LookupCtx)->hasAnyDependentBases())) {
+ if (TNK == TNK_Non_template && MemberOfUnknownSpecialization) {
// This is a dependent template. Handle it below.
} else if (TNK == TNK_Non_template) {
- Diag(Name.getLocStart(),
- diag::err_template_kw_refers_to_non_template)
- << GetNameFromUnqualifiedId(Name).getName()
- << Name.getSourceRange()
- << TemplateKWLoc;
+ // Do the lookup again to determine if this is a "nothing found" case or
+ // a "not a template" case. FIXME: Refactor isTemplateName so we don't
+ // need to do this.
+ DeclarationNameInfo DNI = GetNameFromUnqualifiedId(Name);
+ LookupResult R(*this, DNI.getName(), Name.getLocStart(),
+ LookupOrdinaryName);
+ bool MOUS;
+ if (!LookupTemplateName(R, S, SS, ObjectType.get(), EnteringContext,
+ MOUS, TemplateKWLoc))
+ Diag(Name.getLocStart(), diag::err_no_member)
+ << DNI.getName() << LookupCtx << SS.getRange();
return TNK_Non_template;
} else {
// We found something; return it.
auto *LookupRD = dyn_cast<CXXRecordDecl>(LookupCtx);
if (!AllowInjectedClassName && SS.isSet() && LookupRD &&
- Name.getKind() == UnqualifiedId::IK_Identifier && Name.Identifier &&
- LookupRD->getIdentifier() == Name.Identifier) {
+ Name.getKind() == UnqualifiedIdKind::IK_Identifier &&
+ Name.Identifier && LookupRD->getIdentifier() == Name.Identifier) {
// C++14 [class.qual]p2:
// In a lookup in which function names are not ignored and the
// nested-name-specifier nominates a class C, if the name specified
@@ -4107,17 +4243,17 @@ TemplateNameKind Sema::ActOnDependentTemplateName(Scope *S,
NestedNameSpecifier *Qualifier = SS.getScopeRep();
switch (Name.getKind()) {
- case UnqualifiedId::IK_Identifier:
+ case UnqualifiedIdKind::IK_Identifier:
Result = TemplateTy::make(Context.getDependentTemplateName(Qualifier,
Name.Identifier));
return TNK_Dependent_template_name;
- case UnqualifiedId::IK_OperatorFunctionId:
+ case UnqualifiedIdKind::IK_OperatorFunctionId:
Result = TemplateTy::make(Context.getDependentTemplateName(Qualifier,
Name.OperatorFunctionId.Operator));
return TNK_Function_template;
- case UnqualifiedId::IK_LiteralOperatorId:
+ case UnqualifiedIdKind::IK_LiteralOperatorId:
llvm_unreachable("literal operator id cannot have a dependent scope");
default:
@@ -4148,16 +4284,13 @@ bool Sema::CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
ArgType = Arg.getAsType();
TSI = AL.getTypeSourceInfo();
break;
- case TemplateArgument::Template: {
+ case TemplateArgument::Template:
+ case TemplateArgument::TemplateExpansion: {
// We have a template type parameter but the template argument
// is a template without any arguments.
SourceRange SR = AL.getSourceRange();
- TemplateName Name = Arg.getAsTemplate();
- Diag(SR.getBegin(), diag::err_template_missing_args)
- << (int)getTemplateNameKindForDiagnostics(Name) << Name << SR;
- if (TemplateDecl *Decl = Name.getAsTemplateDecl())
- Diag(Decl->getLocation(), diag::note_template_decl_here);
-
+ TemplateName Name = Arg.getAsTemplateOrTemplatePattern();
+ diagnoseMissingTemplateArguments(Name, SR.getEnd());
return true;
}
case TemplateArgument::Expression: {
@@ -4250,7 +4383,7 @@ bool Sema::CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
return false;
}
-/// \brief Substitute template arguments into the default template argument for
+/// Substitute template arguments into the default template argument for
/// the given template type parameter.
///
/// \param SemaRef the semantic analysis object for which we are performing
@@ -4306,7 +4439,7 @@ SubstDefaultTemplateArgument(Sema &SemaRef,
return ArgType;
}
-/// \brief Substitute template arguments into the default template argument for
+/// Substitute template arguments into the default template argument for
/// the given non-type template parameter.
///
/// \param SemaRef the semantic analysis object for which we are performing
@@ -4354,7 +4487,7 @@ SubstDefaultTemplateArgument(Sema &SemaRef,
return SemaRef.SubstExpr(Param->getDefaultArgument(), TemplateArgLists);
}
-/// \brief Substitute template arguments into the default template argument for
+/// Substitute template arguments into the default template argument for
/// the given template template parameter.
///
/// \param SemaRef the semantic analysis object for which we are performing
@@ -4418,7 +4551,7 @@ SubstDefaultTemplateArgument(Sema &SemaRef,
TemplateArgLists);
}
-/// \brief If the given template parameter has a default template
+/// If the given template parameter has a default template
/// argument, substitute into that default template argument and
/// return the corresponding template argument.
TemplateArgumentLoc
@@ -4519,7 +4652,7 @@ static TemplateArgumentLoc convertTypeTemplateArgumentToTemplate(TypeLoc TLoc) {
return TemplateArgumentLoc();
}
-/// \brief Check that the given template argument corresponds to the given
+/// Check that the given template argument corresponds to the given
/// template parameter.
///
/// \param Param The template parameter against which the argument will be
@@ -4566,6 +4699,8 @@ bool Sema::CheckTemplateArgument(NamedDecl *Param,
if (NTTP->isParameterPack() && NTTP->isExpandedParameterPack())
NTTPType = NTTP->getExpansionType(ArgumentPackIndex);
+ // FIXME: Do we need to substitute into parameters here if they're
+ // instantiation-dependent but not dependent?
if (NTTPType->isDependentType() &&
!isa<TemplateTemplateParmDecl>(Template) &&
!Template->getDeclContext()->isDependentContext()) {
@@ -4597,11 +4732,15 @@ bool Sema::CheckTemplateArgument(NamedDecl *Param,
case TemplateArgument::Expression: {
TemplateArgument Result;
+ unsigned CurSFINAEErrors = NumSFINAEErrors;
ExprResult Res =
CheckTemplateArgument(NTTP, NTTPType, Arg.getArgument().getAsExpr(),
Result, CTAK);
if (Res.isInvalid())
return true;
+ // If the current template argument causes an error, give up now.
+ if (CurSFINAEErrors < NumSFINAEErrors)
+ return true;
// If the resulting expression is new, then use it in place of the
// old expression in the template argument.
@@ -4705,9 +4844,15 @@ bool Sema::CheckTemplateArgument(NamedDecl *Param,
// Check template template parameters.
TemplateTemplateParmDecl *TempParm = cast<TemplateTemplateParmDecl>(Param);
+ TemplateParameterList *Params = TempParm->getTemplateParameters();
+ if (TempParm->isExpandedParameterPack())
+ Params = TempParm->getExpansionTemplateParameters(ArgumentPackIndex);
+
// Substitute into the template parameter list of the template
// template parameter, since previously-supplied template arguments
// may appear within the template template parameter.
+ //
+ // FIXME: Skip this if the parameters aren't instantiation-dependent.
{
// Set up a template instantiation context.
LocalInstantiationScope Scope(*this);
@@ -4718,10 +4863,9 @@ bool Sema::CheckTemplateArgument(NamedDecl *Param,
return true;
TemplateArgumentList TemplateArgs(TemplateArgumentList::OnStack, Converted);
- TempParm = cast_or_null<TemplateTemplateParmDecl>(
- SubstDecl(TempParm, CurContext,
- MultiLevelTemplateArgumentList(TemplateArgs)));
- if (!TempParm)
+ Params = SubstTemplateParams(Params, CurContext,
+ MultiLevelTemplateArgumentList(TemplateArgs));
+ if (!Params)
return true;
}
@@ -4742,7 +4886,7 @@ bool Sema::CheckTemplateArgument(NamedDecl *Param,
case TemplateArgument::Template:
case TemplateArgument::TemplateExpansion:
- if (CheckTemplateArgument(TempParm, Arg, ArgumentPackIndex))
+ if (CheckTemplateTemplateArgument(Params, Arg))
return true;
Converted.push_back(Arg.getArgument());
@@ -4770,28 +4914,7 @@ bool Sema::CheckTemplateArgument(NamedDecl *Param,
return false;
}
-/// \brief Diagnose an arity mismatch in the
-static bool diagnoseArityMismatch(Sema &S, TemplateDecl *Template,
- SourceLocation TemplateLoc,
- TemplateArgumentListInfo &TemplateArgs) {
- TemplateParameterList *Params = Template->getTemplateParameters();
- unsigned NumParams = Params->size();
- unsigned NumArgs = TemplateArgs.size();
-
- SourceRange Range;
- if (NumArgs > NumParams)
- Range = SourceRange(TemplateArgs[NumParams].getLocation(),
- TemplateArgs.getRAngleLoc());
- S.Diag(TemplateLoc, diag::err_template_arg_list_different_arity)
- << (NumArgs > NumParams)
- << (int)S.getTemplateNameKindForDiagnostics(TemplateName(Template))
- << Template << Range;
- S.Diag(Template->getLocation(), diag::note_template_decl_here)
- << Params->getSourceRange();
- return true;
-}
-
-/// \brief Check whether the template parameter is a pack expansion, and if so,
+/// Check whether the template parameter is a pack expansion, and if so,
/// determine the number of parameters produced by that expansion. For instance:
///
/// \code
@@ -4844,10 +4967,18 @@ static bool diagnoseMissingArgument(Sema &S, SourceLocation Loc,
// FIXME: If there's a more recent default argument that *is* visible,
// diagnose that it was declared too late.
- return diagnoseArityMismatch(S, TD, Loc, Args);
+ TemplateParameterList *Params = TD->getTemplateParameters();
+
+ S.Diag(Loc, diag::err_template_arg_list_different_arity)
+ << /*not enough args*/0
+ << (int)S.getTemplateNameKindForDiagnostics(TemplateName(TD))
+ << TD;
+ S.Diag(TD->getLocation(), diag::note_template_decl_here)
+ << Params->getSourceRange();
+ return true;
}
-/// \brief Check that the given template argument list is well-formed
+/// Check that the given template argument list is well-formed
/// for specializing the given template.
bool Sema::CheckTemplateArgumentList(
TemplateDecl *Template, SourceLocation TemplateLoc,
@@ -4896,7 +5027,7 @@ bool Sema::CheckTemplateArgumentList(
} else if (ArgIdx == NumArgs && !PartialTemplateArgs) {
// Not enough arguments for this parameter pack.
Diag(TemplateLoc, diag::err_template_arg_list_different_arity)
- << false
+ << /*not enough args*/0
<< (int)getTemplateNameKindForDiagnostics(TemplateName(Template))
<< Template;
Diag(Template->getLocation(), diag::note_template_decl_here)
@@ -5091,8 +5222,16 @@ bool Sema::CheckTemplateArgumentList(
// If we have any leftover arguments, then there were too many arguments.
// Complain and fail.
- if (ArgIdx < NumArgs)
- return diagnoseArityMismatch(*this, Template, TemplateLoc, NewArgs);
+ if (ArgIdx < NumArgs) {
+ Diag(TemplateLoc, diag::err_template_arg_list_different_arity)
+ << /*too many args*/1
+ << (int)getTemplateNameKindForDiagnostics(TemplateName(Template))
+ << Template
+ << SourceRange(NewArgs[ArgIdx].getLocation(), NewArgs.getRAngleLoc());
+ Diag(Template->getLocation(), diag::note_template_decl_here)
+ << Params->getSourceRange();
+ return true;
+ }
// No problems found with the new argument list, propagate changes back
// to caller.
@@ -5197,6 +5336,11 @@ bool UnnamedLocalNoLinkageFinder::VisitVectorType(const VectorType* T) {
return Visit(T->getElementType());
}
+bool UnnamedLocalNoLinkageFinder::VisitDependentVectorType(
+ const DependentVectorType *T) {
+ return Visit(T->getElementType());
+}
+
bool UnnamedLocalNoLinkageFinder::VisitExtVectorType(const ExtVectorType* T) {
return Visit(T->getElementType());
}
@@ -5354,7 +5498,7 @@ bool UnnamedLocalNoLinkageFinder::VisitNestedNameSpecifier(
llvm_unreachable("Invalid NestedNameSpecifier::Kind!");
}
-/// \brief Check a template argument against its corresponding
+/// Check a template argument against its corresponding
/// template type parameter.
///
/// This routine implements the semantics of C++ [temp.arg.type]. It
@@ -5392,7 +5536,7 @@ enum NullPointerValueKind {
NPV_Error
};
-/// \brief Determine whether the given template argument is a null pointer
+/// Determine whether the given template argument is a null pointer
/// value of the appropriate type.
static NullPointerValueKind
isNullPointerValueTemplateArgument(Sema &S, NonTypeTemplateParmDecl *Param,
@@ -5488,7 +5632,7 @@ isNullPointerValueTemplateArgument(Sema &S, NonTypeTemplateParmDecl *Param,
return NPV_NotNullPointer;
}
-/// \brief Checks whether the given template argument is compatible with its
+/// Checks whether the given template argument is compatible with its
/// template parameter.
static bool CheckTemplateArgumentIsCompatibleWithParameter(
Sema &S, NonTypeTemplateParmDecl *Param, QualType ParamType, Expr *ArgIn,
@@ -5545,7 +5689,7 @@ static bool CheckTemplateArgumentIsCompatibleWithParameter(
return false;
}
-/// \brief Checks whether the given template argument is the address
+/// Checks whether the given template argument is the address
/// of an object or function according to C++ [temp.arg.nontype]p1.
static bool
CheckTemplateArgumentAddressOfObjectOrFunction(Sema &S,
@@ -5833,7 +5977,7 @@ CheckTemplateArgumentAddressOfObjectOrFunction(Sema &S,
return false;
}
-/// \brief Checks whether the given template argument is a pointer to
+/// Checks whether the given template argument is a pointer to
/// member constant according to C++ [temp.arg.nontype]p1.
static bool CheckTemplateArgumentPointerToMember(Sema &S,
NonTypeTemplateParmDecl *Param,
@@ -5883,17 +6027,16 @@ static bool CheckTemplateArgumentPointerToMember(Sema &S,
}
// A constant of pointer-to-member type.
else if ((DRE = dyn_cast<DeclRefExpr>(Arg))) {
- if (ValueDecl *VD = dyn_cast<ValueDecl>(DRE->getDecl())) {
- if (VD->getType()->isMemberPointerType()) {
- if (isa<NonTypeTemplateParmDecl>(VD)) {
- if (Arg->isTypeDependent() || Arg->isValueDependent()) {
- Converted = TemplateArgument(Arg);
- } else {
- VD = cast<ValueDecl>(VD->getCanonicalDecl());
- Converted = TemplateArgument(VD, ParamType);
- }
- return Invalid;
+ ValueDecl *VD = DRE->getDecl();
+ if (VD->getType()->isMemberPointerType()) {
+ if (isa<NonTypeTemplateParmDecl>(VD)) {
+ if (Arg->isTypeDependent() || Arg->isValueDependent()) {
+ Converted = TemplateArgument(Arg);
+ } else {
+ VD = cast<ValueDecl>(VD->getCanonicalDecl());
+ Converted = TemplateArgument(VD, ParamType);
}
+ return Invalid;
}
}
@@ -5963,7 +6106,7 @@ static bool CheckTemplateArgumentPointerToMember(Sema &S,
return true;
}
-/// \brief Check a template argument against its corresponding
+/// Check a template argument against its corresponding
/// non-type template parameter.
///
/// This routine implements the semantics of C++ [temp.arg.nontype].
@@ -6118,7 +6261,7 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
// -- a predefined __func__ variable
if (auto *E = Value.getLValueBase().dyn_cast<const Expr*>()) {
if (isa<CXXUuidofExpr>(E)) {
- Converted = TemplateArgument(const_cast<Expr*>(E));
+ Converted = TemplateArgument(ArgResult.get());
break;
}
Diag(Arg->getLocStart(), diag::err_template_arg_not_decl_ref)
@@ -6481,14 +6624,13 @@ static void DiagnoseTemplateParameterListArityMismatch(
Sema &S, TemplateParameterList *New, TemplateParameterList *Old,
Sema::TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc);
-/// \brief Check a template argument against its corresponding
+/// Check a template argument against its corresponding
/// template template parameter.
///
/// This routine implements the semantics of C++ [temp.arg.template].
/// It returns true if an error occurred, and false otherwise.
-bool Sema::CheckTemplateArgument(TemplateTemplateParmDecl *Param,
- TemplateArgumentLoc &Arg,
- unsigned ArgumentPackIndex) {
+bool Sema::CheckTemplateTemplateArgument(TemplateParameterList *Params,
+ TemplateArgumentLoc &Arg) {
TemplateName Name = Arg.getArgument().getAsTemplateOrTemplatePattern();
TemplateDecl *Template = Name.getAsTemplateDecl();
if (!Template) {
@@ -6523,10 +6665,6 @@ bool Sema::CheckTemplateArgument(TemplateTemplateParmDecl *Param,
<< Template;
}
- TemplateParameterList *Params = Param->getTemplateParameters();
- if (Param->isExpandedParameterPack())
- Params = Param->getExpansionTemplateParameters(ArgumentPackIndex);
-
// C++1z [temp.arg.template]p3: (DR 150)
// A template-argument matches a template template-parameter P when P
// is at least as specialized as the template-argument A.
@@ -6553,7 +6691,7 @@ bool Sema::CheckTemplateArgument(TemplateTemplateParmDecl *Param,
Arg.getLocation());
}
-/// \brief Given a non-type template argument that refers to a
+/// Given a non-type template argument that refers to a
/// declaration and the type of its corresponding non-type template
/// parameter, produce an expression that properly refers to that
/// declaration.
@@ -6584,7 +6722,7 @@ Sema::BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
assert(Arg.getKind() == TemplateArgument::Declaration &&
"Only declaration template arguments permitted here");
- ValueDecl *VD = cast<ValueDecl>(Arg.getAsDecl());
+ ValueDecl *VD = Arg.getAsDecl();
if (VD->getDeclContext()->isRecord() &&
(isa<CXXMethodDecl>(VD) || isa<FieldDecl>(VD) ||
@@ -6676,7 +6814,7 @@ Sema::BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
return BuildDeclRefExpr(VD, T, VK, Loc);
}
-/// \brief Construct a new expression that refers to the given
+/// Construct a new expression that refers to the given
/// integral template argument with the given source-location
/// information.
///
@@ -6701,11 +6839,11 @@ Sema::BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg,
Expr *E;
if (T->isAnyCharacterType()) {
- // This does not need to handle u8 character literals because those are
- // of type char, and so can also be covered by an ASCII character literal.
CharacterLiteral::CharacterKind Kind;
if (T->isWideCharType())
Kind = CharacterLiteral::Wide;
+ else if (T->isChar8Type() && getLangOpts().Char8)
+ Kind = CharacterLiteral::UTF8;
else if (T->isChar16Type())
Kind = CharacterLiteral::UTF16;
else if (T->isChar32Type())
@@ -6736,7 +6874,7 @@ Sema::BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg,
return E;
}
-/// \brief Match two template parameters within template parameter lists.
+/// Match two template parameters within template parameter lists.
static bool MatchTemplateParameterKind(Sema &S, NamedDecl *New, NamedDecl *Old,
bool Complain,
Sema::TemplateParameterListEqualKind Kind,
@@ -6839,7 +6977,7 @@ static bool MatchTemplateParameterKind(Sema &S, NamedDecl *New, NamedDecl *Old,
return true;
}
-/// \brief Diagnose a known arity mismatch when comparing template argument
+/// Diagnose a known arity mismatch when comparing template argument
/// lists.
static
void DiagnoseTemplateParameterListArityMismatch(Sema &S,
@@ -6861,7 +6999,7 @@ void DiagnoseTemplateParameterListArityMismatch(Sema &S,
<< SourceRange(Old->getTemplateLoc(), Old->getRAngleLoc());
}
-/// \brief Determine whether the given template parameter lists are
+/// Determine whether the given template parameter lists are
/// equivalent.
///
/// \param New The new template parameter list, typically written in the
@@ -6953,7 +7091,7 @@ Sema::TemplateParameterListsAreEqual(TemplateParameterList *New,
return true;
}
-/// \brief Check whether a template can be declared within this scope.
+/// Check whether a template can be declared within this scope.
///
/// If the template declaration is valid in this scope, returns
/// false. Otherwise, issues a diagnostic and returns true.
@@ -7002,7 +7140,7 @@ Sema::CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams) {
<< TemplateParams->getSourceRange();
}
-/// \brief Determine what kind of template specialization the given declaration
+/// Determine what kind of template specialization the given declaration
/// is.
static TemplateSpecializationKind getTemplateSpecializationKind(Decl *D) {
if (!D)
@@ -7018,7 +7156,7 @@ static TemplateSpecializationKind getTemplateSpecializationKind(Decl *D) {
return TSK_Undeclared;
}
-/// \brief Check whether a specialization is well-formed in the current
+/// Check whether a specialization is well-formed in the current
/// context.
///
/// This routine determines whether a template specialization can be declared
@@ -7072,120 +7210,43 @@ static bool CheckTemplateSpecializationScope(Sema &S,
}
// C++ [temp.expl.spec]p2:
- // An explicit specialization shall be declared in the namespace
- // of which the template is a member, or, for member templates, in
- // the namespace of which the enclosing class or enclosing class
- // template is a member. An explicit specialization of a member
- // function, member class or static data member of a class
- // template shall be declared in the namespace of which the class
- // template is a member. Such a declaration may also be a
- // definition. If the declaration is not a definition, the
- // specialization may be defined later in the name- space in which
- // the explicit specialization was declared, or in a namespace
- // that encloses the one in which the explicit specialization was
- // declared.
+ // An explicit specialization may be declared in any scope in which
+ // the corresponding primary template may be defined.
if (S.CurContext->getRedeclContext()->isFunctionOrMethod()) {
S.Diag(Loc, diag::err_template_spec_decl_function_scope)
<< Specialized;
return true;
}
- if (S.CurContext->isRecord() && !IsPartialSpecialization) {
- if (S.getLangOpts().MicrosoftExt) {
- // Do not warn for class scope explicit specialization during
- // instantiation, warning was already emitted during pattern
- // semantic analysis.
- if (!S.inTemplateInstantiation())
- S.Diag(Loc, diag::ext_function_specialization_in_class)
- << Specialized;
- } else {
- S.Diag(Loc, diag::err_template_spec_decl_class_scope)
- << Specialized;
- return true;
- }
- }
-
- if (S.CurContext->isRecord() &&
- !S.CurContext->Equals(Specialized->getDeclContext())) {
- // Make sure that we're specializing in the right record context.
- // Otherwise, things can go horribly wrong.
- S.Diag(Loc, diag::err_template_spec_decl_class_scope)
- << Specialized;
- return true;
- }
-
// C++ [temp.class.spec]p6:
- // A class template partial specialization may be declared or redeclared
- // in any namespace scope in which its definition may be defined (14.5.1
- // and 14.5.2).
- DeclContext *SpecializedContext
- = Specialized->getDeclContext()->getEnclosingNamespaceContext();
- DeclContext *DC = S.CurContext->getEnclosingNamespaceContext();
-
- // Make sure that this redeclaration (or definition) occurs in an enclosing
- // namespace.
- // Note that HandleDeclarator() performs this check for explicit
- // specializations of function templates, static data members, and member
- // functions, so we skip the check here for those kinds of entities.
- // FIXME: HandleDeclarator's diagnostics aren't quite as good, though.
- // Should we refactor that check, so that it occurs later?
- if (!DC->Encloses(SpecializedContext) &&
- !(isa<FunctionTemplateDecl>(Specialized) ||
- isa<FunctionDecl>(Specialized) ||
- isa<VarTemplateDecl>(Specialized) ||
- isa<VarDecl>(Specialized))) {
+ // A class template partial specialization may be declared in any
+ // scope in which the primary template may be defined.
+ DeclContext *SpecializedContext =
+ Specialized->getDeclContext()->getRedeclContext();
+ DeclContext *DC = S.CurContext->getRedeclContext();
+
+ // Make sure that this redeclaration (or definition) occurs in the same
+ // scope or an enclosing namespace.
+ if (!(DC->isFileContext() ? DC->Encloses(SpecializedContext)
+ : DC->Equals(SpecializedContext))) {
if (isa<TranslationUnitDecl>(SpecializedContext))
S.Diag(Loc, diag::err_template_spec_redecl_global_scope)
<< EntityKind << Specialized;
- else if (isa<NamespaceDecl>(SpecializedContext)) {
+ else {
+ auto *ND = cast<NamedDecl>(SpecializedContext);
int Diag = diag::err_template_spec_redecl_out_of_scope;
- if (S.getLangOpts().MicrosoftExt)
+ if (S.getLangOpts().MicrosoftExt && !DC->isRecord())
Diag = diag::ext_ms_template_spec_redecl_out_of_scope;
S.Diag(Loc, Diag) << EntityKind << Specialized
- << cast<NamedDecl>(SpecializedContext);
- } else
- llvm_unreachable("unexpected namespace context for specialization");
+ << ND << isa<CXXRecordDecl>(ND);
+ }
S.Diag(Specialized->getLocation(), diag::note_specialized_entity);
- } else if ((!PrevDecl ||
- getTemplateSpecializationKind(PrevDecl) == TSK_Undeclared ||
- getTemplateSpecializationKind(PrevDecl) ==
- TSK_ImplicitInstantiation)) {
- // C++ [temp.exp.spec]p2:
- // An explicit specialization shall be declared in the namespace of which
- // the template is a member, or, for member templates, in the namespace
- // of which the enclosing class or enclosing class template is a member.
- // An explicit specialization of a member function, member class or
- // static data member of a class template shall be declared in the
- // namespace of which the class template is a member.
- //
- // C++11 [temp.expl.spec]p2:
- // An explicit specialization shall be declared in a namespace enclosing
- // the specialized template.
- // C++11 [temp.explicit]p3:
- // An explicit instantiation shall appear in an enclosing namespace of its
- // template.
- if (!DC->InEnclosingNamespaceSetOf(SpecializedContext)) {
- bool IsCPlusPlus11Extension = DC->Encloses(SpecializedContext);
- if (isa<TranslationUnitDecl>(SpecializedContext)) {
- assert(!IsCPlusPlus11Extension &&
- "DC encloses TU but isn't in enclosing namespace set");
- S.Diag(Loc, diag::err_template_spec_decl_out_of_scope_global)
- << EntityKind << Specialized;
- } else if (isa<NamespaceDecl>(SpecializedContext)) {
- int Diag;
- if (!IsCPlusPlus11Extension)
- Diag = diag::err_template_spec_decl_out_of_scope;
- else if (!S.getLangOpts().CPlusPlus11)
- Diag = diag::ext_template_spec_decl_out_of_scope;
- else
- Diag = diag::warn_cxx98_compat_template_spec_decl_out_of_scope;
- S.Diag(Loc, Diag)
- << EntityKind << Specialized << cast<NamedDecl>(SpecializedContext);
- }
- S.Diag(Specialized->getLocation(), diag::note_specialized_entity);
- }
+ // Don't allow specializing in the wrong class during error recovery.
+ // Otherwise, things can go horribly wrong.
+ if (DC->isRecord())
+ return true;
}
return false;
@@ -7211,7 +7272,7 @@ static SourceRange findTemplateParameter(unsigned Depth, TypeLoc TL) {
return Checker.MatchLoc;
}
-/// \brief Subroutine of Sema::CheckTemplatePartialSpecializationArgs
+/// Subroutine of Sema::CheckTemplatePartialSpecializationArgs
/// that checks non-type template partial specialization arguments.
static bool CheckNonTypeTemplatePartialSpecializationArgs(
Sema &S, SourceLocation TemplateNameLoc, NonTypeTemplateParmDecl *Param,
@@ -7299,7 +7360,7 @@ static bool CheckNonTypeTemplatePartialSpecializationArgs(
return false;
}
-/// \brief Check the non-type template arguments of a class template
+/// Check the non-type template arguments of a class template
/// partial specialization according to C++ [temp.class.spec]p9.
///
/// \param TemplateNameLoc the location of the template name.
@@ -7335,16 +7396,11 @@ bool Sema::CheckTemplatePartialSpecializationArgs(
return false;
}
-DeclResult
-Sema::ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec,
- TagUseKind TUK,
- SourceLocation KWLoc,
- SourceLocation ModulePrivateLoc,
- TemplateIdAnnotation &TemplateId,
- AttributeList *Attr,
- MultiTemplateParamsArg
- TemplateParameterLists,
- SkipBodyInfo *SkipBody) {
+DeclResult Sema::ActOnClassTemplateSpecialization(
+ Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
+ SourceLocation ModulePrivateLoc, TemplateIdAnnotation &TemplateId,
+ const ParsedAttributesView &Attr,
+ MultiTemplateParamsArg TemplateParameterLists, SkipBodyInfo *SkipBody) {
assert(TUK != TUK_Reference && "References are not specializations");
CXXScopeSpec &SS = TemplateId.SS;
@@ -7586,10 +7642,6 @@ Sema::ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec,
ClassTemplate->AddSpecialization(Specialization, InsertPos);
if (CurContext->isDependentContext()) {
- // -fms-extensions permits specialization of nested classes without
- // fully specializing the outer class(es).
- assert(getLangOpts().MicrosoftExt &&
- "Only possible with -fms-extensions!");
TemplateName CanonTemplate = Context.getCanonicalTemplateName(Name);
CanonType = Context.getTemplateSpecializationType(
CanonTemplate, Converted);
@@ -7649,8 +7701,7 @@ Sema::ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec,
}
}
- if (Attr)
- ProcessDeclAttributeList(S, Specialization, Attr);
+ ProcessDeclAttributeList(S, Specialization, Attr);
// Add alignment attributes if necessary; these attributes are checked when
// the ASTContext lays out the structure.
@@ -7717,7 +7768,7 @@ Decl *Sema::ActOnTemplateDeclarator(Scope *S,
return NewDecl;
}
-/// \brief Strips various properties off an implicit instantiation
+/// Strips various properties off an implicit instantiation
/// that has just been explicitly specialized.
static void StripImplicitInstantiation(NamedDecl *D) {
D->dropAttr<DLLImportAttr>();
@@ -7727,7 +7778,7 @@ static void StripImplicitInstantiation(NamedDecl *D) {
FD->setInlineSpecified(false);
}
-/// \brief Compute the diagnostic location for an explicit instantiation
+/// Compute the diagnostic location for an explicit instantiation
// declaration or definition.
static SourceLocation DiagLocForExplicitInstantiation(
NamedDecl* D, SourceLocation PointOfInstantiation) {
@@ -7744,7 +7795,7 @@ static SourceLocation DiagLocForExplicitInstantiation(
return PrevDiagLoc;
}
-/// \brief Diagnose cases where we have an explicit template specialization
+/// Diagnose cases where we have an explicit template specialization
/// before/after an explicit template instantiation, producing diagnostics
/// for those cases where they are required and determining whether the
/// new specialization/instantiation will have any effect.
@@ -7890,7 +7941,7 @@ Sema::CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
return false;
case TSK_ExplicitInstantiationDeclaration:
- // We're explicity instantiating a definition for something for which we
+ // We're explicitly instantiating a definition for something for which we
// were previously asked to suppress instantiations. That's fine.
// C++0x [temp.explicit]p4:
@@ -7929,7 +7980,7 @@ Sema::CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
llvm_unreachable("Missing specialization/instantiation case?");
}
-/// \brief Perform semantic analysis for the given dependent function
+/// Perform semantic analysis for the given dependent function
/// template specialization.
///
/// The only possible way to get a dependent function template specialization
@@ -7952,24 +8003,41 @@ Sema::CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD,
// the correct context.
DeclContext *FDLookupContext = FD->getDeclContext()->getRedeclContext();
LookupResult::Filter F = Previous.makeFilter();
+ enum DiscardReason { NotAFunctionTemplate, NotAMemberOfEnclosing };
+ SmallVector<std::pair<DiscardReason, Decl *>, 8> DiscardedCandidates;
while (F.hasNext()) {
NamedDecl *D = F.next()->getUnderlyingDecl();
- if (!isa<FunctionTemplateDecl>(D) ||
- !FDLookupContext->InEnclosingNamespaceSetOf(
- D->getDeclContext()->getRedeclContext()))
+ if (!isa<FunctionTemplateDecl>(D)) {
+ F.erase();
+ DiscardedCandidates.push_back(std::make_pair(NotAFunctionTemplate, D));
+ continue;
+ }
+
+ if (!FDLookupContext->InEnclosingNamespaceSetOf(
+ D->getDeclContext()->getRedeclContext())) {
F.erase();
+ DiscardedCandidates.push_back(std::make_pair(NotAMemberOfEnclosing, D));
+ continue;
+ }
}
F.done();
- // Should this be diagnosed here?
- if (Previous.empty()) return true;
+ if (Previous.empty()) {
+ Diag(FD->getLocation(),
+ diag::err_dependent_function_template_spec_no_match);
+ for (auto &P : DiscardedCandidates)
+ Diag(P.second->getLocation(),
+ diag::note_dependent_function_template_spec_discard_reason)
+ << P.first;
+ return true;
+ }
FD->setDependentTemplateSpecialization(Context, Previous.asUnresolvedSet(),
ExplicitTemplateArgs);
return false;
}
-/// \brief Perform semantic analysis for the given function template
+/// Perform semantic analysis for the given function template
/// specialization.
///
/// This routine performs all of the semantic analysis required for an
@@ -8192,7 +8260,7 @@ bool Sema::CheckFunctionTemplateSpecialization(
return false;
}
-/// \brief Perform semantic analysis for the given non-template member
+/// Perform semantic analysis for the given non-template member
/// specialization.
///
/// This routine performs all of the semantic analysis required for an
@@ -8402,7 +8470,7 @@ void Sema::CompleteMemberSpecialization(NamedDecl *Member,
llvm_unreachable("unknown member specialization kind");
}
-/// \brief Check the scope of an explicit instantiation.
+/// Check the scope of an explicit instantiation.
///
/// \returns true if a serious error occurs, false otherwise.
static bool CheckExplicitInstantiationScope(Sema &S, NamedDecl *D,
@@ -8456,7 +8524,7 @@ static bool CheckExplicitInstantiationScope(Sema &S, NamedDecl *D,
return false;
}
-/// \brief Determine whether the given scope specifier has a template-id in it.
+/// Determine whether the given scope specifier has a template-id in it.
static bool ScopeSpecifierHasTemplateId(const CXXScopeSpec &SS) {
if (!SS.isSet())
return false;
@@ -8502,19 +8570,12 @@ static void dllExportImportClassTemplateSpecialization(
}
// Explicit instantiation of a class template specialization
-DeclResult
-Sema::ActOnExplicitInstantiation(Scope *S,
- SourceLocation ExternLoc,
- SourceLocation TemplateLoc,
- unsigned TagSpec,
- SourceLocation KWLoc,
- const CXXScopeSpec &SS,
- TemplateTy TemplateD,
- SourceLocation TemplateNameLoc,
- SourceLocation LAngleLoc,
- ASTTemplateArgsPtr TemplateArgsIn,
- SourceLocation RAngleLoc,
- AttributeList *Attr) {
+DeclResult Sema::ActOnExplicitInstantiation(
+ Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc,
+ unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS,
+ TemplateTy TemplateD, SourceLocation TemplateNameLoc,
+ SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgsIn,
+ SourceLocation RAngleLoc, const ParsedAttributesView &Attr) {
// Find the class template we're specializing
TemplateName Name = TemplateD.get();
TemplateDecl *TD = Name.getAsTemplateDecl();
@@ -8555,11 +8616,11 @@ Sema::ActOnExplicitInstantiation(Scope *S,
if (TSK == TSK_ExplicitInstantiationDeclaration) {
// Check for dllexport class template instantiation declarations.
- for (AttributeList *A = Attr; A; A = A->getNext()) {
- if (A->getKind() == AttributeList::AT_DLLExport) {
+ for (const ParsedAttr &AL : Attr) {
+ if (AL.getKind() == ParsedAttr::AT_DLLExport) {
Diag(ExternLoc,
diag::warn_attribute_dllexport_explicit_instantiation_decl);
- Diag(A->getLoc(), diag::note_attribute);
+ Diag(AL.getLoc(), diag::note_attribute);
break;
}
}
@@ -8579,10 +8640,10 @@ Sema::ActOnExplicitInstantiation(Scope *S,
// Check for dllimport class template instantiation definitions.
bool DLLImport =
ClassTemplate->getTemplatedDecl()->getAttr<DLLImportAttr>();
- for (AttributeList *A = Attr; A; A = A->getNext()) {
- if (A->getKind() == AttributeList::AT_DLLImport)
+ for (const ParsedAttr &AL : Attr) {
+ if (AL.getKind() == ParsedAttr::AT_DLLImport)
DLLImport = true;
- if (A->getKind() == AttributeList::AT_DLLExport) {
+ if (AL.getKind() == ParsedAttr::AT_DLLExport) {
// dllexport trumps dllimport here.
DLLImport = false;
break;
@@ -8692,8 +8753,7 @@ Sema::ActOnExplicitInstantiation(Scope *S,
Specialization->setBraceRange(SourceRange());
bool PreviouslyDLLExported = Specialization->hasAttr<DLLExportAttr>();
- if (Attr)
- ProcessDeclAttributeList(S, Specialization, Attr);
+ ProcessDeclAttributeList(S, Specialization, Attr);
// Add the explicit instantiation into its lexical context. However,
// since explicit instantiations are never found by name lookup, we
@@ -8791,15 +8851,11 @@ Sema::ActOnExplicitInstantiation(Scope *S,
// Explicit instantiation of a member class of a class template.
DeclResult
-Sema::ActOnExplicitInstantiation(Scope *S,
- SourceLocation ExternLoc,
- SourceLocation TemplateLoc,
- unsigned TagSpec,
- SourceLocation KWLoc,
- CXXScopeSpec &SS,
- IdentifierInfo *Name,
- SourceLocation NameLoc,
- AttributeList *Attr) {
+Sema::ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc,
+ SourceLocation TemplateLoc, unsigned TagSpec,
+ SourceLocation KWLoc, CXXScopeSpec &SS,
+ IdentifierInfo *Name, SourceLocation NameLoc,
+ const ParsedAttributesView &Attr) {
bool Owned = false;
bool IsDependent = false;
@@ -9044,7 +9100,7 @@ DeclResult Sema::ActOnExplicitInstantiation(Scope *S,
return true;
}
- if (D.getName().getKind() != UnqualifiedId::IK_TemplateId) {
+ if (D.getName().getKind() != UnqualifiedIdKind::IK_TemplateId) {
// C++1y [temp.explicit]p3:
// If the explicit instantiation is for a variable, the unqualified-id
// in the declaration shall be a template-id.
@@ -9101,8 +9157,7 @@ DeclResult Sema::ActOnExplicitInstantiation(Scope *S,
Prev->setTemplateSpecializationKind(TSK, D.getIdentifierLoc());
if (PrevTemplate) {
// Merge attributes.
- if (AttributeList *Attr = D.getDeclSpec().getAttributes().getList())
- ProcessDeclAttributeList(S, Prev, Attr);
+ ProcessDeclAttributeList(S, Prev, D.getDeclSpec().getAttributes());
}
if (TSK == TSK_ExplicitInstantiationDefinition)
InstantiateVariableDefinition(D.getIdentifierLoc(), Prev);
@@ -9126,7 +9181,7 @@ DeclResult Sema::ActOnExplicitInstantiation(Scope *S,
// argument list into our AST format.
bool HasExplicitTemplateArgs = false;
TemplateArgumentListInfo TemplateArgs;
- if (D.getName().getKind() == UnqualifiedId::IK_TemplateId) {
+ if (D.getName().getKind() == UnqualifiedIdKind::IK_TemplateId) {
TemplateArgs = makeTemplateArgumentListInfo(*this, *D.getName().TemplateId);
HasExplicitTemplateArgs = true;
}
@@ -9138,7 +9193,6 @@ DeclResult Sema::ActOnExplicitInstantiation(Scope *S,
// template.
UnresolvedSet<8> TemplateMatches;
FunctionDecl *NonTemplateMatch = nullptr;
- AttributeList *Attr = D.getDeclSpec().getAttributes().getList();
TemplateSpecCandidateSet FailedCandidates(D.getIdentifierLoc());
for (LookupResult::iterator P = Previous.begin(), PEnd = Previous.end();
P != PEnd; ++P) {
@@ -9186,7 +9240,7 @@ DeclResult Sema::ActOnExplicitInstantiation(Scope *S,
if (LangOpts.CUDA &&
IdentifyCUDATarget(Specialization,
/* IgnoreImplicitHDAttributes = */ true) !=
- IdentifyCUDATarget(Attr)) {
+ IdentifyCUDATarget(D.getDeclSpec().getAttributes())) {
FailedCandidates.addCandidate().set(
P.getPair(), FunTmpl->getTemplatedDecl(),
MakeDeductionFailureInfo(Context, TDK_CUDATargetMismatch, Info));
@@ -9265,8 +9319,7 @@ DeclResult Sema::ActOnExplicitInstantiation(Scope *S,
return (Decl*) nullptr;
}
- if (Attr)
- ProcessDeclAttributeList(S, Specialization, Attr);
+ ProcessDeclAttributeList(S, Specialization, D.getDeclSpec().getAttributes());
// In MSVC mode, dllimported explicit instantiation definitions are treated as
// instantiation declarations.
@@ -9292,7 +9345,7 @@ DeclResult Sema::ActOnExplicitInstantiation(Scope *S,
//
// C++98 has the same restriction, just worded differently.
FunctionTemplateDecl *FunTmpl = Specialization->getPrimaryTemplate();
- if (D.getName().getKind() != UnqualifiedId::IK_TemplateId && !FunTmpl &&
+ if (D.getName().getKind() != UnqualifiedIdKind::IK_TemplateId && !FunTmpl &&
D.getCXXScopeSpec().isSet() &&
!ScopeSpecifierHasTemplateId(D.getCXXScopeSpec()))
Diag(D.getIdentifierLoc(),
@@ -9478,8 +9531,7 @@ static bool isEnableIf(NestedNameSpecifierLoc NNS, const IdentifierInfo &II,
EnableIfTy.getAs<TemplateSpecializationTypeLoc>();
if (!EnableIfTSTLoc || EnableIfTSTLoc.getNumArgs() == 0)
return false;
- const TemplateSpecializationType *EnableIfTST =
- cast<TemplateSpecializationType>(EnableIfTSTLoc.getTypePtr());
+ const TemplateSpecializationType *EnableIfTST = EnableIfTSTLoc.getTypePtr();
// ... which names a complete class template declaration...
const TemplateDecl *EnableIfDecl =
@@ -9511,7 +9563,7 @@ static bool isEnableIf(NestedNameSpecifierLoc NNS, const IdentifierInfo &II,
return true;
}
-/// \brief Build the type that describes a C++ typename specifier,
+/// Build the type that describes a C++ typename specifier,
/// e.g., "typename T::type".
QualType
Sema::CheckTypenameType(ElaboratedTypeKeyword Keyword,
@@ -9686,7 +9738,7 @@ namespace {
: TreeTransform<CurrentInstantiationRebuilder>(SemaRef),
Loc(Loc), Entity(Entity) { }
- /// \brief Determine whether the given type \p T has already been
+ /// Determine whether the given type \p T has already been
/// transformed.
///
/// For the purposes of type reconstruction, a type has already been
@@ -9695,14 +9747,14 @@ namespace {
return T.isNull() || !T->isDependentType();
}
- /// \brief Returns the location of the entity whose type is being
+ /// Returns the location of the entity whose type is being
/// rebuilt.
SourceLocation getBaseLocation() { return Loc; }
- /// \brief Returns the name of the entity whose type is being rebuilt.
+ /// Returns the name of the entity whose type is being rebuilt.
DeclarationName getBaseEntity() { return Entity; }
- /// \brief Sets the "base" location and entity when that
+ /// Sets the "base" location and entity when that
/// information is known based on another transformation.
void setBase(SourceLocation Loc, DeclarationName Entity) {
this->Loc = Loc;
@@ -9716,7 +9768,7 @@ namespace {
};
} // end anonymous namespace
-/// \brief Rebuilds a type within the context of the current instantiation.
+/// Rebuilds a type within the context of the current instantiation.
///
/// The type \p T is part of the type of an out-of-line member definition of
/// a class template (or class template partial specialization) that was parsed
@@ -9774,7 +9826,7 @@ bool Sema::RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS) {
return false;
}
-/// \brief Rebuild the template parameters now that we know we're in a current
+/// Rebuild the template parameters now that we know we're in a current
/// instantiation.
bool Sema::RebuildTemplateParamsInCurrentInstantiation(
TemplateParameterList *Params) {
@@ -9813,7 +9865,7 @@ bool Sema::RebuildTemplateParamsInCurrentInstantiation(
return false;
}
-/// \brief Produces a formatted string that describes the binding of
+/// Produces a formatted string that describes the binding of
/// template parameters to template arguments.
std::string
Sema::getTemplateArgumentBindingsText(const TemplateParameterList *Params,
@@ -9891,7 +9943,7 @@ bool Sema::IsInsideALocalClassWithinATemplateFunction() {
}
namespace {
-/// \brief Walk the path from which a declaration was instantiated, and check
+/// Walk the path from which a declaration was instantiated, and check
/// that every explicit specialization along that path is visible. This enforces
/// C++ [temp.expl.spec]/6:
///
@@ -10027,7 +10079,7 @@ void Sema::checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec) {
ExplicitSpecializationVisibilityChecker(*this, Loc).check(Spec);
}
-/// \brief Check whether a template partial specialization that we've discovered
+/// Check whether a template partial specialization that we've discovered
/// is hidden, and produce suitable diagnostics if so.
void Sema::checkPartialSpecializationVisibility(SourceLocation Loc,
NamedDecl *Spec) {
diff --git a/lib/Sema/SemaTemplateDeduction.cpp b/lib/Sema/SemaTemplateDeduction.cpp
index f8ee60251698..633b2837e1fe 100644
--- a/lib/Sema/SemaTemplateDeduction.cpp
+++ b/lib/Sema/SemaTemplateDeduction.cpp
@@ -1,60 +1,97 @@
-//===------- SemaTemplateDeduction.cpp - Template Argument Deduction ------===/
+//===- SemaTemplateDeduction.cpp - Template Argument Deduction ------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
-//===----------------------------------------------------------------------===/
//
-// This file implements C++ template argument deduction.
+//===----------------------------------------------------------------------===//
//
-//===----------------------------------------------------------------------===/
+// This file implements C++ template argument deduction.
+//
+//===----------------------------------------------------------------------===//
#include "clang/Sema/TemplateDeduction.h"
#include "TreeTransform.h"
+#include "TypeLocBuilder.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTLambda.h"
-#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclAccessPair.h"
+#include "clang/AST/DeclBase.h"
+#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/DeclarationName.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
-#include "clang/AST/StmtVisitor.h"
-#include "clang/AST/TypeOrdering.h"
-#include "clang/Sema/DeclSpec.h"
+#include "clang/AST/NestedNameSpecifier.h"
+#include "clang/AST/TemplateBase.h"
+#include "clang/AST/TemplateName.h"
+#include "clang/AST/Type.h"
+#include "clang/AST/TypeLoc.h"
+#include "clang/AST/UnresolvedSet.h"
+#include "clang/Basic/AddressSpaces.h"
+#include "clang/Basic/ExceptionSpecificationType.h"
+#include "clang/Basic/LLVM.h"
+#include "clang/Basic/LangOptions.h"
+#include "clang/Basic/PartialDiagnostic.h"
+#include "clang/Basic/SourceLocation.h"
+#include "clang/Basic/Specifiers.h"
+#include "clang/Sema/Ownership.h"
#include "clang/Sema/Sema.h"
#include "clang/Sema/Template.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/APSInt.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallBitVector.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/ErrorHandling.h"
#include <algorithm>
+#include <cassert>
+#include <tuple>
+#include <utility>
namespace clang {
- using namespace sema;
- /// \brief Various flags that control template argument deduction.
+
+ /// Various flags that control template argument deduction.
///
/// These flags can be bitwise-OR'd together.
enum TemplateDeductionFlags {
- /// \brief No template argument deduction flags, which indicates the
+ /// No template argument deduction flags, which indicates the
/// strictest results for template argument deduction (as used for, e.g.,
/// matching class template partial specializations).
TDF_None = 0,
- /// \brief Within template argument deduction from a function call, we are
+
+ /// Within template argument deduction from a function call, we are
/// matching with a parameter type for which the original parameter was
/// a reference.
TDF_ParamWithReferenceType = 0x1,
- /// \brief Within template argument deduction from a function call, we
+
+ /// Within template argument deduction from a function call, we
/// are matching in a case where we ignore cv-qualifiers.
TDF_IgnoreQualifiers = 0x02,
- /// \brief Within template argument deduction from a function call,
+
+ /// Within template argument deduction from a function call,
/// we are matching in a case where we can perform template argument
/// deduction from a template-id of a derived class of the argument type.
TDF_DerivedClass = 0x04,
- /// \brief Allow non-dependent types to differ, e.g., when performing
+
+ /// Allow non-dependent types to differ, e.g., when performing
/// template argument deduction from a function call where conversions
/// may apply.
TDF_SkipNonDependent = 0x08,
- /// \brief Whether we are performing template argument deduction for
+
+ /// Whether we are performing template argument deduction for
/// parameters and arguments in a top-level template argument
TDF_TopLevelParameterTypeList = 0x10,
- /// \brief Within template argument deduction from overload resolution per
+
+ /// Within template argument deduction from overload resolution per
/// C++ [over.over] allow matching function types that are compatible in
/// terms of noreturn and default calling convention adjustments, or
/// similarly matching a declared template specialization against a
@@ -62,12 +99,18 @@ namespace clang {
/// deduction where the parameter is a function type that can be converted
/// to the argument type.
TDF_AllowCompatibleFunctionType = 0x20,
+
+ /// Within template argument deduction for a conversion function, we are
+ /// matching with an argument type for which the original argument was
+ /// a reference.
+ TDF_ArgWithReferenceType = 0x40,
};
}
using namespace clang;
+using namespace sema;
-/// \brief Compare two APSInts, extending and switching the sign as
+/// Compare two APSInts, extending and switching the sign as
/// necessary to compare their values regardless of underlying type.
static bool hasSameExtendedValue(llvm::APSInt X, llvm::APSInt Y) {
if (Y.getBitWidth() > X.getBitWidth())
@@ -125,14 +168,14 @@ static void MarkUsedTemplateParameters(ASTContext &Ctx, QualType T,
bool OnlyDeduced, unsigned Level,
llvm::SmallBitVector &Deduced);
-/// \brief If the given expression is of a form that permits the deduction
+/// If the given expression is of a form that permits the deduction
/// of a non-type template parameter, return the declaration of that
/// non-type template parameter.
static NonTypeTemplateParmDecl *
getDeducedParameterFromExpr(TemplateDeductionInfo &Info, Expr *E) {
// If we are within an alias template, the expression may have undergone
// any number of parameter substitutions already.
- while (1) {
+ while (true) {
if (ImplicitCastExpr *IC = dyn_cast<ImplicitCastExpr>(E))
E = IC->getSubExpr();
else if (SubstNonTypeTemplateParmExpr *Subst =
@@ -150,7 +193,7 @@ getDeducedParameterFromExpr(TemplateDeductionInfo &Info, Expr *E) {
return nullptr;
}
-/// \brief Determine whether two declaration pointers refer to the same
+/// Determine whether two declaration pointers refer to the same
/// declaration.
static bool isSameDeclaration(Decl *X, Decl *Y) {
if (NamedDecl *NX = dyn_cast<NamedDecl>(X))
@@ -161,7 +204,7 @@ static bool isSameDeclaration(Decl *X, Decl *Y) {
return X->getCanonicalDecl() == Y->getCanonicalDecl();
}
-/// \brief Verify that the given, deduced template arguments are compatible.
+/// Verify that the given, deduced template arguments are compatible.
///
/// \returns The deduced template argument, or a NULL template argument if
/// the deduced template arguments were incompatible.
@@ -270,7 +313,7 @@ checkDeducedTemplateArguments(ASTContext &Context,
return Y;
}
- // If we deduced two declarations, make sure they they refer to the
+ // If we deduced two declarations, make sure that they refer to the
// same declaration.
if (Y.getKind() == TemplateArgument::Declaration &&
isSameDeclaration(X.getAsDecl(), Y.getAsDecl()))
@@ -297,7 +340,7 @@ checkDeducedTemplateArguments(ASTContext &Context,
// All other combinations are incompatible.
return DeducedTemplateArgument();
- case TemplateArgument::Pack:
+ case TemplateArgument::Pack: {
if (Y.getKind() != TemplateArgument::Pack ||
X.pack_size() != Y.pack_size())
return DeducedTemplateArgument();
@@ -319,11 +362,12 @@ checkDeducedTemplateArguments(ASTContext &Context,
TemplateArgument::CreatePackCopy(Context, NewPack),
X.wasDeducedFromArrayBound() && Y.wasDeducedFromArrayBound());
}
+ }
llvm_unreachable("Invalid TemplateArgument Kind!");
}
-/// \brief Deduce the value of the given non-type template parameter
+/// Deduce the value of the given non-type template parameter
/// as the given deduced template argument. All non-type template parameter
/// deduction is funneled through here.
static Sema::TemplateDeductionResult DeduceNonTypeTemplateArgument(
@@ -371,7 +415,7 @@ static Sema::TemplateDeductionResult DeduceNonTypeTemplateArgument(
/*ArrayBound=*/NewDeduced.wasDeducedFromArrayBound());
}
-/// \brief Deduce the value of the given non-type template parameter
+/// Deduce the value of the given non-type template parameter
/// from the given integral constant.
static Sema::TemplateDeductionResult DeduceNonTypeTemplateArgument(
Sema &S, TemplateParameterList *TemplateParams,
@@ -385,7 +429,7 @@ static Sema::TemplateDeductionResult DeduceNonTypeTemplateArgument(
ValueType, Info, Deduced);
}
-/// \brief Deduce the value of the given non-type template parameter
+/// Deduce the value of the given non-type template parameter
/// from the given null pointer template argument type.
static Sema::TemplateDeductionResult DeduceNullPtrTemplateArgument(
Sema &S, TemplateParameterList *TemplateParams,
@@ -402,7 +446,7 @@ static Sema::TemplateDeductionResult DeduceNullPtrTemplateArgument(
Value->getType(), Info, Deduced);
}
-/// \brief Deduce the value of the given non-type template parameter
+/// Deduce the value of the given non-type template parameter
/// from the given type- or value-dependent expression.
///
/// \returns true if deduction succeeded, false otherwise.
@@ -415,7 +459,7 @@ static Sema::TemplateDeductionResult DeduceNonTypeTemplateArgument(
Value->getType(), Info, Deduced);
}
-/// \brief Deduce the value of the given non-type template parameter
+/// Deduce the value of the given non-type template parameter
/// from the given declaration.
///
/// \returns true if deduction succeeded, false otherwise.
@@ -475,7 +519,7 @@ DeduceTemplateArguments(Sema &S,
return Sema::TDK_NonDeducedMismatch;
}
-/// \brief Deduce the template arguments by comparing the template parameter
+/// Deduce the template arguments by comparing the template parameter
/// type (which is a template-id) with the template argument type.
///
/// \param S the Sema
@@ -502,6 +546,10 @@ DeduceTemplateArguments(Sema &S,
SmallVectorImpl<DeducedTemplateArgument> &Deduced) {
assert(Arg.isCanonical() && "Argument type must be canonical");
+ // Treat an injected-class-name as its underlying template-id.
+ if (auto *Injected = dyn_cast<InjectedClassNameType>(Arg))
+ Arg = Injected->getInjectedSpecializationType();
+
// Check whether the template argument is a dependent template-id.
if (const TemplateSpecializationType *SpecArg
= dyn_cast<TemplateSpecializationType>(Arg)) {
@@ -556,7 +604,7 @@ DeduceTemplateArguments(Sema &S,
Deduced, /*NumberOfArgumentsMustMatch=*/true);
}
-/// \brief Determines whether the given type is an opaque type that
+/// Determines whether the given type is an opaque type that
/// might be more qualified when instantiated.
static bool IsPossiblyOpaquelyQualifiedType(QualType T) {
switch (T->getTypeClass()) {
@@ -580,30 +628,7 @@ static bool IsPossiblyOpaquelyQualifiedType(QualType T) {
}
}
-/// \brief Retrieve the depth and index of a template parameter.
-static std::pair<unsigned, unsigned>
-getDepthAndIndex(NamedDecl *ND) {
- if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(ND))
- return std::make_pair(TTP->getDepth(), TTP->getIndex());
-
- if (NonTypeTemplateParmDecl *NTTP = dyn_cast<NonTypeTemplateParmDecl>(ND))
- return std::make_pair(NTTP->getDepth(), NTTP->getIndex());
-
- TemplateTemplateParmDecl *TTP = cast<TemplateTemplateParmDecl>(ND);
- return std::make_pair(TTP->getDepth(), TTP->getIndex());
-}
-
-/// \brief Retrieve the depth and index of an unexpanded parameter pack.
-static std::pair<unsigned, unsigned>
-getDepthAndIndex(UnexpandedParameterPack UPP) {
- if (const TemplateTypeParmType *TTP
- = UPP.first.dyn_cast<const TemplateTypeParmType *>())
- return std::make_pair(TTP->getDepth(), TTP->getIndex());
-
- return getDepthAndIndex(UPP.first.get<NamedDecl *>());
-}
-
-/// \brief Helper function to build a TemplateParameter when we don't
+/// Helper function to build a TemplateParameter when we don't
/// know its type statically.
static TemplateParameter makeTemplateParameter(Decl *D) {
if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(D))
@@ -614,10 +639,21 @@ static TemplateParameter makeTemplateParameter(Decl *D) {
return TemplateParameter(cast<TemplateTemplateParmDecl>(D));
}
+/// If \p Param is an expanded parameter pack, get the number of expansions.
+static Optional<unsigned> getExpandedPackSize(NamedDecl *Param) {
+ if (auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Param))
+ if (NTTP->isExpandedParameterPack())
+ return NTTP->getNumExpansionTypes();
+
+ if (auto *TTP = dyn_cast<TemplateTemplateParmDecl>(Param))
+ if (TTP->isExpandedParameterPack())
+ return TTP->getNumExpansionTemplateParameters();
+
+ return None;
+}
+
/// A pack that we're currently deducing.
struct clang::DeducedPack {
- DeducedPack(unsigned Index) : Index(Index), Outer(nullptr) {}
-
// The index of the pack.
unsigned Index;
@@ -632,17 +668,93 @@ struct clang::DeducedPack {
SmallVector<DeducedTemplateArgument, 4> New;
// The outer deduction for this pack, if any.
- DeducedPack *Outer;
+ DeducedPack *Outer = nullptr;
+
+ DeducedPack(unsigned Index) : Index(Index) {}
};
namespace {
+
/// A scope in which we're performing pack deduction.
class PackDeductionScope {
public:
+ /// Prepare to deduce the packs named within Pattern.
PackDeductionScope(Sema &S, TemplateParameterList *TemplateParams,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
TemplateDeductionInfo &Info, TemplateArgument Pattern)
: S(S), TemplateParams(TemplateParams), Deduced(Deduced), Info(Info) {
+ unsigned NumNamedPacks = addPacks(Pattern);
+ finishConstruction(NumNamedPacks);
+ }
+
+ /// Prepare to directly deduce arguments of the parameter with index \p Index.
+ PackDeductionScope(Sema &S, TemplateParameterList *TemplateParams,
+ SmallVectorImpl<DeducedTemplateArgument> &Deduced,
+ TemplateDeductionInfo &Info, unsigned Index)
+ : S(S), TemplateParams(TemplateParams), Deduced(Deduced), Info(Info) {
+ addPack(Index);
+ finishConstruction(1);
+ }
+
+private:
+ void addPack(unsigned Index) {
+ // Save the deduced template argument for the parameter pack expanded
+ // by this pack expansion, then clear out the deduction.
+ DeducedPack Pack(Index);
+ Pack.Saved = Deduced[Index];
+ Deduced[Index] = TemplateArgument();
+
+ // FIXME: What if we encounter multiple packs with different numbers of
+ // pre-expanded expansions? (This should already have been diagnosed
+ // during substitution.)
+ if (Optional<unsigned> ExpandedPackExpansions =
+ getExpandedPackSize(TemplateParams->getParam(Index)))
+ FixedNumExpansions = ExpandedPackExpansions;
+
+ Packs.push_back(Pack);
+ }
+
+ unsigned addPacks(TemplateArgument Pattern) {
+ // Compute the set of template parameter indices that correspond to
+ // parameter packs expanded by the pack expansion.
+ llvm::SmallBitVector SawIndices(TemplateParams->size());
+
+ auto AddPack = [&](unsigned Index) {
+ if (SawIndices[Index])
+ return;
+ SawIndices[Index] = true;
+ addPack(Index);
+ };
+
+ // First look for unexpanded packs in the pattern.
+ SmallVector<UnexpandedParameterPack, 2> Unexpanded;
+ S.collectUnexpandedParameterPacks(Pattern, Unexpanded);
+ for (unsigned I = 0, N = Unexpanded.size(); I != N; ++I) {
+ unsigned Depth, Index;
+ std::tie(Depth, Index) = getDepthAndIndex(Unexpanded[I]);
+ if (Depth == Info.getDeducedDepth())
+ AddPack(Index);
+ }
+ assert(!Packs.empty() && "Pack expansion without unexpanded packs?");
+
+ unsigned NumNamedPacks = Packs.size();
+
+ // We can also have deduced template parameters that do not actually
+ // appear in the pattern, but can be deduced by it (the type of a non-type
+ // template parameter pack, in particular). These won't have prevented us
+ // from partially expanding the pack.
+ llvm::SmallBitVector Used(TemplateParams->size());
+ MarkUsedTemplateParameters(S.Context, Pattern, /*OnlyDeduced*/true,
+ Info.getDeducedDepth(), Used);
+ for (int Index = Used.find_first(); Index != -1;
+ Index = Used.find_next(Index))
+ if (TemplateParams->getParam(Index)->isParameterPack())
+ AddPack(Index);
+
+ return NumNamedPacks;
+ }
+
+ void finishConstruction(unsigned NumNamedPacks) {
// Dig out the partially-substituted pack, if there is one.
const TemplateArgument *PartialPackArgs = nullptr;
unsigned NumPartialPackArgs = 0;
@@ -652,60 +764,29 @@ public:
&PartialPackArgs, &NumPartialPackArgs))
PartialPackDepthIndex = getDepthAndIndex(Partial);
- // Compute the set of template parameter indices that correspond to
- // parameter packs expanded by the pack expansion.
- {
- llvm::SmallBitVector SawIndices(TemplateParams->size());
-
- auto AddPack = [&](unsigned Index) {
- if (SawIndices[Index])
- return;
- SawIndices[Index] = true;
-
- // Save the deduced template argument for the parameter pack expanded
- // by this pack expansion, then clear out the deduction.
- DeducedPack Pack(Index);
- Pack.Saved = Deduced[Index];
- Deduced[Index] = TemplateArgument();
-
- Packs.push_back(Pack);
- };
-
- // First look for unexpanded packs in the pattern.
- SmallVector<UnexpandedParameterPack, 2> Unexpanded;
- S.collectUnexpandedParameterPacks(Pattern, Unexpanded);
- for (unsigned I = 0, N = Unexpanded.size(); I != N; ++I) {
- unsigned Depth, Index;
- std::tie(Depth, Index) = getDepthAndIndex(Unexpanded[I]);
- if (Depth == Info.getDeducedDepth())
- AddPack(Index);
+ // This pack expansion will have been partially or fully expanded if
+ // it only names explicitly-specified parameter packs (including the
+ // partially-substituted one, if any).
+ bool IsExpanded = true;
+ for (unsigned I = 0; I != NumNamedPacks; ++I) {
+ if (Packs[I].Index >= Info.getNumExplicitArgs()) {
+ IsExpanded = false;
+ IsPartiallyExpanded = false;
+ break;
+ }
+ if (PartialPackDepthIndex ==
+ std::make_pair(Info.getDeducedDepth(), Packs[I].Index)) {
+ IsPartiallyExpanded = true;
}
- assert(!Packs.empty() && "Pack expansion without unexpanded packs?");
-
- // This pack expansion will have been partially expanded iff the only
- // unexpanded parameter pack within it is the partially-substituted pack.
- IsPartiallyExpanded =
- Packs.size() == 1 &&
- PartialPackDepthIndex ==
- std::make_pair(Info.getDeducedDepth(), Packs.front().Index);
-
- // Skip over the pack elements that were expanded into separate arguments.
- if (IsPartiallyExpanded)
- PackElements += NumPartialPackArgs;
-
- // We can also have deduced template parameters that do not actually
- // appear in the pattern, but can be deduced by it (the type of a non-type
- // template parameter pack, in particular). These won't have prevented us
- // from partially expanding the pack.
- llvm::SmallBitVector Used(TemplateParams->size());
- MarkUsedTemplateParameters(S.Context, Pattern, /*OnlyDeduced*/true,
- Info.getDeducedDepth(), Used);
- for (int Index = Used.find_first(); Index != -1;
- Index = Used.find_next(Index))
- if (TemplateParams->getParam(Index)->isParameterPack())
- AddPack(Index);
}
+ // Skip over the pack elements that were expanded into separate arguments.
+ // If we partially expanded, this is the number of partial arguments.
+ if (IsPartiallyExpanded)
+ PackElements += NumPartialPackArgs;
+ else if (IsExpanded)
+ PackElements += *FixedNumExpansions;
+
for (auto &Pack : Packs) {
if (Info.PendingDeducedPacks.size() > Pack.Index)
Pack.Outer = Info.PendingDeducedPacks[Pack.Index];
@@ -724,12 +805,13 @@ public:
// FIXME: If we could represent a "depth i, index j, pack elem k"
// parameter, we could substitute the partially-substituted pack
// everywhere and avoid this.
- if (Pack.New.size() > PackElements)
+ if (!IsPartiallyExpanded)
Deduced[Pack.Index] = Pack.New[PackElements];
}
}
}
+public:
~PackDeductionScope() {
for (auto &Pack : Packs)
Info.PendingDeducedPacks[Pack.Index] = Pack.Outer;
@@ -739,6 +821,18 @@ public:
/// sequence of (prior) function parameters / template arguments.
bool isPartiallyExpanded() { return IsPartiallyExpanded; }
+ /// Determine whether this pack expansion scope has a known, fixed arity.
+ /// This happens if it involves a pack from an outer template that has
+ /// (notionally) already been expanded.
+ bool hasFixedArity() { return FixedNumExpansions.hasValue(); }
+
+ /// Determine whether the next element of the argument is still part of this
+ /// pack. This is the case unless the pack is already expanded to a fixed
+ /// length.
+ bool hasNextElement() {
+ return !FixedNumExpansions || *FixedNumExpansions > PackElements;
+ }
+
/// Move to deducing the next element in each pack that is being deduced.
void nextPackElement() {
// Capture the deduced template arguments for each parameter pack expanded
@@ -761,16 +855,24 @@ public:
++PackElements;
}
- /// \brief Finish template argument deduction for a set of argument packs,
+ /// Finish template argument deduction for a set of argument packs,
/// producing the argument packs and checking for consistency with prior
/// deductions.
- Sema::TemplateDeductionResult finish() {
+ Sema::TemplateDeductionResult
+ finish(bool TreatNoDeductionsAsNonDeduced = true) {
// Build argument packs for each of the parameter packs expanded by this
// pack expansion.
for (auto &Pack : Packs) {
// Put back the old value for this pack.
Deduced[Pack.Index] = Pack.Saved;
+ // If we are deducing the size of this pack even if we didn't deduce any
+ // values for it, then make sure we build a pack of the right size.
+ // FIXME: Should we always deduce the size, even if the pack appears in
+ // a non-deduced context?
+ if (!TreatNoDeductionsAsNonDeduced)
+ Pack.New.resize(PackElements);
+
// Build or find a new value for this pack.
DeducedTemplateArgument NewPack;
if (PackElements && Pack.New.empty()) {
@@ -826,14 +928,24 @@ public:
Result = checkDeducedTemplateArguments(S.Context, OldPack, NewPack);
}
+ NamedDecl *Param = TemplateParams->getParam(Pack.Index);
if (Result.isNull()) {
- Info.Param =
- makeTemplateParameter(TemplateParams->getParam(Pack.Index));
+ Info.Param = makeTemplateParameter(Param);
Info.FirstArg = OldPack;
Info.SecondArg = NewPack;
return Sema::TDK_Inconsistent;
}
+ // If we have a pre-expanded pack and we didn't deduce enough elements
+ // for it, fail deduction.
+ if (Optional<unsigned> Expansions = getExpandedPackSize(Param)) {
+ if (*Expansions != PackElements) {
+ Info.Param = makeTemplateParameter(Param);
+ Info.FirstArg = Result;
+ return Sema::TDK_IncompletePack;
+ }
+ }
+
*Loc = Result;
}
@@ -847,12 +959,15 @@ private:
TemplateDeductionInfo &Info;
unsigned PackElements = 0;
bool IsPartiallyExpanded = false;
+ /// The number of expansions, if we have a fully-expanded pack in this scope.
+ Optional<unsigned> FixedNumExpansions;
SmallVector<DeducedPack, 2> Packs;
};
+
} // namespace
-/// \brief Deduce the template arguments by comparing the list of parameter
+/// Deduce the template arguments by comparing the list of parameter
/// types to the list of argument types, as in the parameter-type-lists of
/// function types (C++ [temp.deduct.type]p10).
///
@@ -891,12 +1006,6 @@ DeduceTemplateArguments(Sema &S,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
unsigned TDF,
bool PartialOrdering = false) {
- // Fast-path check to see if we have too many/too few arguments.
- if (NumParams != NumArgs &&
- !(NumParams && isa<PackExpansionType>(Params[NumParams - 1])) &&
- !(NumArgs && isa<PackExpansionType>(Args[NumArgs - 1])))
- return Sema::TDK_MiscellaneousDeductionFailure;
-
// C++0x [temp.deduct.type]p10:
// Similarly, if P has a form that contains (T), then each parameter type
// Pi of the respective parameter-type- list of P is compared with the
@@ -933,13 +1042,6 @@ DeduceTemplateArguments(Sema &S,
continue;
}
- // C++0x [temp.deduct.type]p5:
- // The non-deduced contexts are:
- // - A function parameter pack that does not occur at the end of the
- // parameter-declaration-clause.
- if (ParamIdx + 1 < NumParams)
- return Sema::TDK_Success;
-
// C++0x [temp.deduct.type]p10:
// If the parameter-declaration corresponding to Pi is a function
// parameter pack, then the type of its declarator- id is compared with
@@ -950,15 +1052,43 @@ DeduceTemplateArguments(Sema &S,
QualType Pattern = Expansion->getPattern();
PackDeductionScope PackScope(S, TemplateParams, Deduced, Info, Pattern);
- for (; ArgIdx < NumArgs; ++ArgIdx) {
- // Deduce template arguments from the pattern.
- if (Sema::TemplateDeductionResult Result
- = DeduceTemplateArgumentsByTypeMatch(S, TemplateParams, Pattern,
- Args[ArgIdx], Info, Deduced,
- TDF, PartialOrdering))
- return Result;
+ // A pack scope with fixed arity is not really a pack any more, so is not
+ // a non-deduced context.
+ if (ParamIdx + 1 == NumParams || PackScope.hasFixedArity()) {
+ for (; ArgIdx < NumArgs && PackScope.hasNextElement(); ++ArgIdx) {
+ // Deduce template arguments from the pattern.
+ if (Sema::TemplateDeductionResult Result
+ = DeduceTemplateArgumentsByTypeMatch(S, TemplateParams, Pattern,
+ Args[ArgIdx], Info, Deduced,
+ TDF, PartialOrdering))
+ return Result;
- PackScope.nextPackElement();
+ PackScope.nextPackElement();
+ }
+ } else {
+ // C++0x [temp.deduct.type]p5:
+ // The non-deduced contexts are:
+ // - A function parameter pack that does not occur at the end of the
+ // parameter-declaration-clause.
+ //
+ // FIXME: There is no wording to say what we should do in this case. We
+ // choose to resolve this by applying the same rule that is applied for a
+ // function call: that is, deduce all contained packs to their
+ // explicitly-specified values (or to <> if there is no such value).
+ //
+ // This is seemingly-arbitrarily different from the case of a template-id
+ // with a non-trailing pack-expansion in its arguments, which renders the
+ // entire template-argument-list a non-deduced context.
+
+ // If the parameter type contains an explicitly-specified pack that we
+ // could not expand, skip the number of parameters notionally created
+ // by the expansion.
+ Optional<unsigned> NumExpansions = Expansion->getNumExpansions();
+ if (NumExpansions && !PackScope.isPartiallyExpanded()) {
+ for (unsigned I = 0; I != *NumExpansions && ArgIdx < NumArgs;
+ ++I, ++ArgIdx)
+ PackScope.nextPackElement();
+ }
}
// Build argument packs for each of the parameter packs expanded by this
@@ -974,8 +1104,10 @@ DeduceTemplateArguments(Sema &S,
return Sema::TDK_Success;
}
-/// \brief Determine whether the parameter has qualifiers that are either
-/// inconsistent with or a superset of the argument's qualifiers.
+/// Determine whether the parameter has qualifiers that the argument
+/// lacks. Put another way, determine whether there is no way to add
+/// a deduced set of qualifiers to the ParamType that would result in
+/// its qualifiers matching those of the ArgType.
static bool hasInconsistentOrSupersetQualifiersOf(QualType ParamType,
QualType ArgType) {
Qualifiers ParamQs = ParamType.getQualifiers();
@@ -999,13 +1131,11 @@ static bool hasInconsistentOrSupersetQualifiersOf(QualType ParamType,
ParamQs.hasObjCLifetime())
return true;
- // CVR qualifier superset.
- return (ParamQs.getCVRQualifiers() != ArgQs.getCVRQualifiers()) &&
- ((ParamQs.getCVRQualifiers() | ArgQs.getCVRQualifiers())
- == ParamQs.getCVRQualifiers());
+ // CVR qualifiers inconsistent or a superset.
+ return (ParamQs.getCVRQualifiers() & ~ArgQs.getCVRQualifiers()) != 0;
}
-/// \brief Compare types for equality with respect to possibly compatible
+/// Compare types for equality with respect to possibly compatible
/// function types (noreturn adjustment, implicit calling conventions). If any
/// of parameter and argument is not a function, just perform type comparison.
///
@@ -1057,7 +1187,7 @@ static bool isForwardingReference(QualType Param, unsigned FirstInnerIndex) {
return false;
}
-/// \brief Deduce the template arguments by comparing the parameter type and
+/// Deduce the template arguments by comparing the parameter type and
/// the argument type (C++ [temp.deduct.type]).
///
/// \param S the semantic analysis object within which we are deducing
@@ -1228,6 +1358,12 @@ DeduceTemplateArgumentsByTypeMatch(Sema &S,
return Sema::TDK_Underqualified;
}
+ // Do not match a function type with a cv-qualified type.
+ // http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_active.html#1584
+ if (Arg->isFunctionType() && Param.hasQualifiers()) {
+ return Sema::TDK_NonDeducedMismatch;
+ }
+
assert(TemplateTypeParm->getDepth() == Info.getDeducedDepth() &&
"saw template type parameter with wrong depth");
assert(Arg != S.Context.OverloadTy && "Unresolved overloaded function");
@@ -1303,6 +1439,18 @@ DeduceTemplateArgumentsByTypeMatch(Sema &S,
if (TDF & TDF_ParamWithReferenceType) {
if (hasInconsistentOrSupersetQualifiersOf(Param, Arg))
return Sema::TDK_NonDeducedMismatch;
+ } else if (TDF & TDF_ArgWithReferenceType) {
+ // C++ [temp.deduct.conv]p4:
+ // If the original A is a reference type, A can be more cv-qualified
+ // than the deduced A
+ if (!Arg.getQualifiers().compatiblyIncludes(Param.getQualifiers()))
+ return Sema::TDK_NonDeducedMismatch;
+
+ // Strip out all extra qualifiers from the argument to figure out the
+ // type we're converting to, prior to the qualification conversion.
+ Qualifiers Quals;
+ Arg = S.Context.getUnqualifiedArrayType(Arg, Quals);
+ Arg = S.Context.getQualifiedType(Arg, Param.getQualifiers());
} else if (!IsPossiblyOpaquelyQualifiedType(Param)) {
if (Param.getCVRQualifiers() != Arg.getCVRQualifiers())
return Sema::TDK_NonDeducedMismatch;
@@ -1353,7 +1501,7 @@ DeduceTemplateArgumentsByTypeMatch(Sema &S,
case Type::Enum:
case Type::ObjCObject:
case Type::ObjCInterface:
- case Type::ObjCObjectPointer: {
+ case Type::ObjCObjectPointer:
if (TDF & TDF_SkipNonDependent)
return Sema::TDK_Success;
@@ -1363,7 +1511,6 @@ DeduceTemplateArgumentsByTypeMatch(Sema &S,
}
return Param == Arg? Sema::TDK_Success : Sema::TDK_NonDeducedMismatch;
- }
// _Complex T [placeholder extension]
case Type::Complex:
@@ -1557,7 +1704,7 @@ DeduceTemplateArgumentsByTypeMatch(Sema &S,
"saw non-type template parameter with wrong depth");
llvm::APSInt Noexcept(1);
- switch (FunctionProtoArg->canThrow(S.Context)) {
+ switch (FunctionProtoArg->canThrow()) {
case CT_Cannot:
Noexcept = 1;
LLVM_FALLTHROUGH;
@@ -1578,11 +1725,14 @@ DeduceTemplateArgumentsByTypeMatch(Sema &S,
}
}
// FIXME: Detect non-deduced exception specification mismatches?
+ //
+ // Careful about [temp.deduct.call] and [temp.deduct.conv], which allow
+ // top-level differences in noexcept-specifications.
return Sema::TDK_Success;
}
- case Type::InjectedClassName: {
+ case Type::InjectedClassName:
// Treat a template's injected-class-name as if the template
// specialization type had been used.
Param = cast<InjectedClassNameType>(Param)
@@ -1590,7 +1740,6 @@ DeduceTemplateArgumentsByTypeMatch(Sema &S,
assert(isa<TemplateSpecializationType>(Param) &&
"injected class name is not a template specialization type");
LLVM_FALLTHROUGH;
- }
// template-name<T> (where template-name refers to a class template)
// template-name<i>
@@ -1788,6 +1937,54 @@ DeduceTemplateArgumentsByTypeMatch(Sema &S,
return Sema::TDK_NonDeducedMismatch;
}
+ case Type::DependentVector: {
+ const auto *VectorParam = cast<DependentVectorType>(Param);
+
+ if (const auto *VectorArg = dyn_cast<VectorType>(Arg)) {
+ // Perform deduction on the element types.
+ if (Sema::TemplateDeductionResult Result =
+ DeduceTemplateArgumentsByTypeMatch(
+ S, TemplateParams, VectorParam->getElementType(),
+ VectorArg->getElementType(), Info, Deduced, TDF))
+ return Result;
+
+ // Perform deduction on the vector size, if we can.
+ NonTypeTemplateParmDecl *NTTP =
+ getDeducedParameterFromExpr(Info, VectorParam->getSizeExpr());
+ if (!NTTP)
+ return Sema::TDK_Success;
+
+ llvm::APSInt ArgSize(S.Context.getTypeSize(S.Context.IntTy), false);
+ ArgSize = VectorArg->getNumElements();
+ // Note that we use the "array bound" rules here; just like in that
+ // case, we don't have any particular type for the vector size, but
+ // we can provide one if necessary.
+ return DeduceNonTypeTemplateArgument(S, TemplateParams, NTTP, ArgSize,
+ S.Context.UnsignedIntTy, true,
+ Info, Deduced);
+ }
+
+ if (const auto *VectorArg = dyn_cast<DependentVectorType>(Arg)) {
+ // Perform deduction on the element types.
+ if (Sema::TemplateDeductionResult Result =
+ DeduceTemplateArgumentsByTypeMatch(
+ S, TemplateParams, VectorParam->getElementType(),
+ VectorArg->getElementType(), Info, Deduced, TDF))
+ return Result;
+
+ // Perform deduction on the vector size, if we can.
+ NonTypeTemplateParmDecl *NTTP = getDeducedParameterFromExpr(
+ Info, VectorParam->getSizeExpr());
+ if (!NTTP)
+ return Sema::TDK_Success;
+
+ return DeduceNonTypeTemplateArgument(
+ S, TemplateParams, NTTP, VectorArg->getSizeExpr(), Info, Deduced);
+ }
+
+ return Sema::TDK_NonDeducedMismatch;
+ }
+
// (clang extension)
//
// T __attribute__(((ext_vector_type(N))))
@@ -1992,7 +2189,7 @@ DeduceTemplateArguments(Sema &S,
Info.SecondArg = Arg;
return Sema::TDK_NonDeducedMismatch;
- case TemplateArgument::Expression: {
+ case TemplateArgument::Expression:
if (NonTypeTemplateParmDecl *NTTP
= getDeducedParameterFromExpr(Info, Param.getAsExpr())) {
if (Arg.getKind() == TemplateArgument::Integral)
@@ -2021,7 +2218,7 @@ DeduceTemplateArguments(Sema &S,
// Can't deduce anything, but that's okay.
return Sema::TDK_Success;
- }
+
case TemplateArgument::Pack:
llvm_unreachable("Argument packs should be expanded by the caller!");
}
@@ -2029,7 +2226,7 @@ DeduceTemplateArguments(Sema &S,
llvm_unreachable("Invalid TemplateArgument Kind!");
}
-/// \brief Determine whether there is a template argument to be used for
+/// Determine whether there is a template argument to be used for
/// deduction.
///
/// This routine "expands" argument packs in-place, overriding its input
@@ -2052,7 +2249,7 @@ static bool hasTemplateArgumentForDeduction(ArrayRef<TemplateArgument> &Args,
return ArgIdx < Args.size();
}
-/// \brief Determine whether the given set of template arguments has a pack
+/// Determine whether the given set of template arguments has a pack
/// expansion that is not the last template argument.
static bool hasPackExpansionBeforeEnd(ArrayRef<TemplateArgument> Args) {
bool FoundPackExpansion = false;
@@ -2063,6 +2260,8 @@ static bool hasPackExpansionBeforeEnd(ArrayRef<TemplateArgument> Args) {
if (A.getKind() == TemplateArgument::Pack)
return hasPackExpansionBeforeEnd(A.pack_elements());
+ // FIXME: If this is a fixed-arity pack expansion from an outer level of
+ // templates, it should not be treated as a pack expansion.
if (A.isPackExpansion())
FoundPackExpansion = true;
}
@@ -2126,17 +2325,15 @@ DeduceTemplateArguments(Sema &S, TemplateParameterList *TemplateParams,
// template parameter packs expanded by Pi.
TemplateArgument Pattern = Params[ParamIdx].getPackExpansionPattern();
- // FIXME: If there are no remaining arguments, we can bail out early
- // and set any deduced parameter packs to an empty argument pack.
- // The latter part of this is a (minor) correctness issue.
-
// Prepare to deduce the packs within the pattern.
PackDeductionScope PackScope(S, TemplateParams, Deduced, Info, Pattern);
// Keep track of the deduced template arguments for each parameter pack
// expanded by this pack expansion (the outer index) and for each
// template argument (the inner SmallVectors).
- for (; hasTemplateArgumentForDeduction(Args, ArgIdx); ++ArgIdx) {
+ for (; hasTemplateArgumentForDeduction(Args, ArgIdx) &&
+ PackScope.hasNextElement();
+ ++ArgIdx) {
// Deduce template arguments from the pattern.
if (Sema::TemplateDeductionResult Result
= DeduceTemplateArguments(S, TemplateParams, Pattern, Args[ArgIdx],
@@ -2167,7 +2364,7 @@ DeduceTemplateArguments(Sema &S,
/*NumberOfArgumentsMustMatch*/false);
}
-/// \brief Determine whether two template arguments are the same.
+/// Determine whether two template arguments are the same.
static bool isSameTemplateArg(ASTContext &Context,
TemplateArgument X,
const TemplateArgument &Y,
@@ -2228,7 +2425,7 @@ static bool isSameTemplateArg(ASTContext &Context,
llvm_unreachable("Invalid TemplateArgument Kind!");
}
-/// \brief Allocate a TemplateArgumentLoc where all locations have
+/// Allocate a TemplateArgumentLoc where all locations have
/// been initialized to the given location.
///
/// \param Arg The template argument we are producing template argument
@@ -2303,8 +2500,7 @@ Sema::getTrivialTemplateArgumentLoc(const TemplateArgument &Arg,
llvm_unreachable("Invalid TemplateArgument Kind!");
}
-
-/// \brief Convert the given deduced template argument and add it to the set of
+/// Convert the given deduced template argument and add it to the set of
/// fully-converted template arguments.
static bool
ConvertDeducedTemplateArgument(Sema &S, NamedDecl *Param,
@@ -2409,6 +2605,16 @@ static Sema::TemplateDeductionResult ConvertDeducedTemplateArguments(
for (unsigned I = 0, N = TemplateParams->size(); I != N; ++I) {
NamedDecl *Param = TemplateParams->getParam(I);
+ // C++0x [temp.arg.explicit]p3:
+ // A trailing template parameter pack (14.5.3) not otherwise deduced will
+ // be deduced to an empty sequence of template arguments.
+ // FIXME: Where did the word "trailing" come from?
+ if (Deduced[I].isNull() && Param->isTemplateParameterPack()) {
+ if (auto Result = PackDeductionScope(S, TemplateParams, Deduced, Info, I)
+ .finish(/*TreatNoDeductionsAsNonDeduced*/false))
+ return Result;
+ }
+
if (!Deduced[I].isNull()) {
if (I < NumAlreadyConverted) {
// We may have had explicitly-specified template arguments for a
@@ -2443,40 +2649,6 @@ static Sema::TemplateDeductionResult ConvertDeducedTemplateArguments(
continue;
}
- // C++0x [temp.arg.explicit]p3:
- // A trailing template parameter pack (14.5.3) not otherwise deduced will
- // be deduced to an empty sequence of template arguments.
- // FIXME: Where did the word "trailing" come from?
- if (Param->isTemplateParameterPack()) {
- // We may have had explicitly-specified template arguments for this
- // template parameter pack. If so, our empty deduction extends the
- // explicitly-specified set (C++0x [temp.arg.explicit]p9).
- const TemplateArgument *ExplicitArgs;
- unsigned NumExplicitArgs;
- if (CurrentInstantiationScope &&
- CurrentInstantiationScope->getPartiallySubstitutedPack(
- &ExplicitArgs, &NumExplicitArgs) == Param) {
- Builder.push_back(TemplateArgument(
- llvm::makeArrayRef(ExplicitArgs, NumExplicitArgs)));
-
- // Forget the partially-substituted pack; its substitution is now
- // complete.
- CurrentInstantiationScope->ResetPartiallySubstitutedPack();
- } else {
- // Go through the motions of checking the empty argument pack against
- // the parameter pack.
- DeducedTemplateArgument DeducedPack(TemplateArgument::getEmptyPack());
- if (ConvertDeducedTemplateArgument(S, Param, DeducedPack, Template,
- Info, IsDeduced, Builder)) {
- Info.Param = makeTemplateParameter(Param);
- // FIXME: These template arguments are temporary. Free them!
- Info.reset(TemplateArgumentList::CreateCopy(S.Context, Builder));
- return Sema::TDK_SubstitutionFailure;
- }
- }
- continue;
- }
-
// Substitute into the default template argument, if available.
bool HasDefaultArg = false;
TemplateDecl *TD = dyn_cast<TemplateDecl>(Template);
@@ -2659,7 +2831,7 @@ static Sema::TemplateDeductionResult FinishTemplateArgumentDeduction(
}
-/// \brief Perform template argument deduction to determine whether
+/// Perform template argument deduction to determine whether
/// the given template arguments match the given class template
/// partial specialization per C++ [temp.class.spec.match].
Sema::TemplateDeductionResult
@@ -2702,7 +2874,7 @@ Sema::DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial,
*this, Partial, /*PartialOrdering=*/false, TemplateArgs, Deduced, Info);
}
-/// \brief Perform template argument deduction to determine whether
+/// Perform template argument deduction to determine whether
/// the given template arguments match the given variable template
/// partial specialization per C++ [temp.class.spec.match].
Sema::TemplateDeductionResult
@@ -2743,7 +2915,7 @@ Sema::DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial,
*this, Partial, /*PartialOrdering=*/false, TemplateArgs, Deduced, Info);
}
-/// \brief Determine whether the given type T is a simple-template-id type.
+/// Determine whether the given type T is a simple-template-id type.
static bool isSimpleTemplateIdType(QualType T) {
if (const TemplateSpecializationType *Spec
= T->getAs<TemplateSpecializationType>())
@@ -2763,7 +2935,7 @@ static bool isSimpleTemplateIdType(QualType T) {
return false;
}
-/// \brief Substitute the explicitly-provided template arguments into the
+/// Substitute the explicitly-provided template arguments into the
/// given function template according to C++ [temp.arg.explicit].
///
/// \param FunctionTemplate the function template into which the explicit
@@ -2837,7 +3009,7 @@ Sema::SubstituteExplicitTemplateArguments(
Trap.hasErrorOccurred()) {
unsigned Index = Builder.size();
if (Index >= TemplateParams->size())
- Index = TemplateParams->size() - 1;
+ return TDK_SubstitutionFailure;
Info.Param = makeTemplateParameter(TemplateParams->getParam(Index));
return TDK_InvalidExplicitArguments;
}
@@ -2846,7 +3018,7 @@ Sema::SubstituteExplicitTemplateArguments(
// template arguments.
TemplateArgumentList *ExplicitArgumentList
= TemplateArgumentList::CreateCopy(Context, Builder);
- Info.reset(ExplicitArgumentList);
+ Info.setExplicitArgs(ExplicitArgumentList);
// Template argument deduction and the final substitution should be
// done in the context of the templated declaration. Explicit
@@ -2858,14 +3030,19 @@ Sema::SubstituteExplicitTemplateArguments(
// note that the template argument pack is partially substituted and record
// the explicit template arguments. They'll be used as part of deduction
// for this template parameter pack.
- for (unsigned I = 0, N = Builder.size(); I != N; ++I) {
- const TemplateArgument &Arg = Builder[I];
+ unsigned PartiallySubstitutedPackIndex = -1u;
+ if (!Builder.empty()) {
+ const TemplateArgument &Arg = Builder.back();
if (Arg.getKind() == TemplateArgument::Pack) {
- CurrentInstantiationScope->SetPartiallySubstitutedPack(
- TemplateParams->getParam(I),
- Arg.pack_begin(),
- Arg.pack_size());
- break;
+ auto *Param = TemplateParams->getParam(Builder.size() - 1);
+ // If this is a fully-saturated fixed-size pack, it should be
+ // fully-substituted, not partially-substituted.
+ Optional<unsigned> Expansions = getExpandedPackSize(Param);
+ if (!Expansions || Arg.pack_size() < *Expansions) {
+ PartiallySubstitutedPackIndex = Builder.size() - 1;
+ CurrentInstantiationScope->SetPartiallySubstitutedPack(
+ Param, Arg.pack_begin(), Arg.pack_size());
+ }
}
}
@@ -2955,13 +3132,13 @@ Sema::SubstituteExplicitTemplateArguments(
// case, the empty template argument list <> itself may also be omitted.
//
// Take all of the explicitly-specified arguments and put them into
- // the set of deduced template arguments. Explicitly-specified
- // parameter packs, however, will be set to NULL since the deduction
- // mechanisms handle explicitly-specified argument packs directly.
+ // the set of deduced template arguments. The partially-substituted
+ // parameter pack, however, will be set to NULL since the deduction
+ // mechanism handles the partially-substituted argument pack directly.
Deduced.reserve(TemplateParams->size());
for (unsigned I = 0, N = ExplicitArgumentList->size(); I != N; ++I) {
const TemplateArgument &Arg = ExplicitArgumentList->get(I);
- if (Arg.getKind() == TemplateArgument::Pack)
+ if (I == PartiallySubstitutedPackIndex)
Deduced.push_back(DeducedTemplateArgument());
else
Deduced.push_back(Arg);
@@ -2970,7 +3147,7 @@ Sema::SubstituteExplicitTemplateArguments(
return TDK_Success;
}
-/// \brief Check whether the deduced argument type for a call to a function
+/// Check whether the deduced argument type for a call to a function
/// template matches the actual argument type per C++ [temp.deduct.call]p4.
static Sema::TemplateDeductionResult
CheckOriginalCallArgDeduction(Sema &S, TemplateDeductionInfo &Info,
@@ -3080,7 +3257,7 @@ CheckOriginalCallArgDeduction(Sema &S, TemplateDeductionInfo &Info,
return Sema::TDK_Success;
if (A->isRecordType() && isSimpleTemplateIdType(OriginalParamType) &&
- S.IsDerivedFrom(SourceLocation(), A, DeducedA))
+ S.IsDerivedFrom(Info.getLocation(), A, DeducedA))
return Sema::TDK_Success;
return Failed();
@@ -3116,7 +3293,7 @@ static unsigned getPackIndexForParam(Sema &S,
llvm_unreachable("parameter index would not be produced from template");
}
-/// \brief Finish template argument deduction for a function template,
+/// Finish template argument deduction for a function template,
/// checking the deduced template arguments for completeness and forming
/// the function template specialization.
///
@@ -3267,13 +3444,14 @@ static QualType GetTypeOfFunction(Sema &S, const OverloadExpr::FindResult &R,
// We may need to deduce the return type of the function now.
if (S.getLangOpts().CPlusPlus14 && Fn->getReturnType()->isUndeducedType() &&
S.DeduceReturnType(Fn, R.Expression->getExprLoc(), /*Diagnose*/ false))
- return QualType();
+ return {};
if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Fn))
if (Method->isInstance()) {
// An instance method that's referenced in a form that doesn't
// look like a member pointer is just invalid.
- if (!R.HasFormOfMemberPointer) return QualType();
+ if (!R.HasFormOfMemberPointer)
+ return {};
return S.Context.getMemberPointerType(Fn->getType(),
S.Context.getTypeDeclType(Method->getParent()).getTypePtr());
@@ -3322,7 +3500,7 @@ ResolveOverloadForDeduction(Sema &S, TemplateParameterList *TemplateParams,
S.resolveAddressOfOnlyViableOverloadCandidate(Arg, DAP))
return GetTypeOfFunction(S, R, Viable);
- return QualType();
+ return {};
}
// Gather the explicit template arguments, if any.
@@ -3339,7 +3517,7 @@ ResolveOverloadForDeduction(Sema &S, TemplateParameterList *TemplateParams,
// function templates, the parameter is treated as a
// non-deduced context.
if (!Ovl->hasExplicitTemplateArgs())
- return QualType();
+ return {};
// Otherwise, see if we can resolve a function type
FunctionDecl *Specialization = nullptr;
@@ -3379,14 +3557,15 @@ ResolveOverloadForDeduction(Sema &S, TemplateParameterList *TemplateParams,
= DeduceTemplateArgumentsByTypeMatch(S, TemplateParams, ParamType,
ArgType, Info, Deduced, TDF);
if (Result) continue;
- if (!Match.isNull()) return QualType();
+ if (!Match.isNull())
+ return {};
Match = ArgType;
}
return Match;
}
-/// \brief Perform the adjustments to the parameter and argument types
+/// Perform the adjustments to the parameter and argument types
/// described in C++ [temp.deduct.call].
///
/// \returns true if the caller should not attempt to perform any template
@@ -3493,7 +3672,7 @@ static Sema::TemplateDeductionResult DeduceTemplateArgumentsFromCallArgument(
SmallVectorImpl<Sema::OriginalCallArg> &OriginalCallArgs,
bool DecomposedParam, unsigned ArgIdx, unsigned TDF);
-/// \brief Attempt template argument deduction from an initializer list
+/// Attempt template argument deduction from an initializer list
/// deemed to be an argument in a function call.
static Sema::TemplateDeductionResult DeduceFromInitializerList(
Sema &S, TemplateParameterList *TemplateParams, QualType AdjustedParamType,
@@ -3554,7 +3733,7 @@ static Sema::TemplateDeductionResult DeduceFromInitializerList(
return Sema::TDK_Success;
}
-/// \brief Perform template argument deduction per [temp.deduct.call] for a
+/// Perform template argument deduction per [temp.deduct.call] for a
/// single parameter / argument pair.
static Sema::TemplateDeductionResult DeduceTemplateArgumentsFromCallArgument(
Sema &S, TemplateParameterList *TemplateParams, unsigned FirstInnerIndex,
@@ -3587,7 +3766,7 @@ static Sema::TemplateDeductionResult DeduceTemplateArgumentsFromCallArgument(
ArgType, Info, Deduced, TDF);
}
-/// \brief Perform template argument deduction from a function call
+/// Perform template argument deduction from a function call
/// (C++ [temp.deduct.call]).
///
/// \param FunctionTemplate the function template for which we are performing
@@ -3725,8 +3904,9 @@ Sema::TemplateDeductionResult Sema::DeduceTemplateArguments(
// the length of the explicitly-specified pack if it's expanded by the
// parameter pack and 0 otherwise, and we treat each deduction as a
// non-deduced context.
- if (ParamIdx + 1 == NumParamTypes) {
- for (; ArgIdx < Args.size(); PackScope.nextPackElement(), ++ArgIdx) {
+ if (ParamIdx + 1 == NumParamTypes || PackScope.hasFixedArity()) {
+ for (; ArgIdx < Args.size() && PackScope.hasNextElement();
+ PackScope.nextPackElement(), ++ArgIdx) {
ParamTypesForArgChecking.push_back(ParamPattern);
if (auto Result = DeduceCallArgument(ParamPattern, ArgIdx))
return Result;
@@ -3753,10 +3933,16 @@ Sema::TemplateDeductionResult Sema::DeduceTemplateArguments(
return Result;
}
+ // Capture the context in which the function call is made. This is the context
+ // that is needed when the accessibility of template arguments is checked.
+ DeclContext *CallingCtx = CurContext;
+
return FinishTemplateArgumentDeduction(
FunctionTemplate, Deduced, NumExplicitlySpecified, Specialization, Info,
- &OriginalCallArgs, PartialOverloading,
- [&]() { return CheckNonDependent(ParamTypesForArgChecking); });
+ &OriginalCallArgs, PartialOverloading, [&, CallingCtx]() {
+ ContextRAII SavedContext(*this, CallingCtx);
+ return CheckNonDependent(ParamTypesForArgChecking);
+ });
}
QualType Sema::adjustCCAndNoReturn(QualType ArgFunctionType,
@@ -3798,7 +3984,7 @@ QualType Sema::adjustCCAndNoReturn(QualType ArgFunctionType,
ArgFunctionTypeP->getParamTypes(), EPI);
}
-/// \brief Deduce template arguments when taking the address of a function
+/// Deduce template arguments when taking the address of a function
/// template (C++ [temp.deduct.funcaddr]) or matching a specialization to
/// a template.
///
@@ -3940,118 +4126,7 @@ Sema::TemplateDeductionResult Sema::DeduceTemplateArguments(
return TDK_Success;
}
-/// \brief Given a function declaration (e.g. a generic lambda conversion
-/// function) that contains an 'auto' in its result type, substitute it
-/// with TypeToReplaceAutoWith. Be careful to pass in the type you want
-/// to replace 'auto' with and not the actual result type you want
-/// to set the function to.
-static inline void
-SubstAutoWithinFunctionReturnType(FunctionDecl *F,
- QualType TypeToReplaceAutoWith, Sema &S) {
- assert(!TypeToReplaceAutoWith->getContainedAutoType());
- QualType AutoResultType = F->getReturnType();
- assert(AutoResultType->getContainedAutoType());
- QualType DeducedResultType = S.SubstAutoType(AutoResultType,
- TypeToReplaceAutoWith);
- S.Context.adjustDeducedFunctionResultType(F, DeducedResultType);
-}
-
-/// \brief Given a specialized conversion operator of a generic lambda
-/// create the corresponding specializations of the call operator and
-/// the static-invoker. If the return type of the call operator is auto,
-/// deduce its return type and check if that matches the
-/// return type of the destination function ptr.
-
-static inline Sema::TemplateDeductionResult
-SpecializeCorrespondingLambdaCallOperatorAndInvoker(
- CXXConversionDecl *ConversionSpecialized,
- SmallVectorImpl<DeducedTemplateArgument> &DeducedArguments,
- QualType ReturnTypeOfDestFunctionPtr,
- TemplateDeductionInfo &TDInfo,
- Sema &S) {
-
- CXXRecordDecl *LambdaClass = ConversionSpecialized->getParent();
- assert(LambdaClass && LambdaClass->isGenericLambda());
-
- CXXMethodDecl *CallOpGeneric = LambdaClass->getLambdaCallOperator();
- QualType CallOpResultType = CallOpGeneric->getReturnType();
- const bool GenericLambdaCallOperatorHasDeducedReturnType =
- CallOpResultType->getContainedAutoType();
-
- FunctionTemplateDecl *CallOpTemplate =
- CallOpGeneric->getDescribedFunctionTemplate();
-
- FunctionDecl *CallOpSpecialized = nullptr;
- // Use the deduced arguments of the conversion function, to specialize our
- // generic lambda's call operator.
- if (Sema::TemplateDeductionResult Result
- = S.FinishTemplateArgumentDeduction(CallOpTemplate,
- DeducedArguments,
- 0, CallOpSpecialized, TDInfo))
- return Result;
-
- // If we need to deduce the return type, do so (instantiates the callop).
- if (GenericLambdaCallOperatorHasDeducedReturnType &&
- CallOpSpecialized->getReturnType()->isUndeducedType())
- S.DeduceReturnType(CallOpSpecialized,
- CallOpSpecialized->getPointOfInstantiation(),
- /*Diagnose*/ true);
-
- // Check to see if the return type of the destination ptr-to-function
- // matches the return type of the call operator.
- if (!S.Context.hasSameType(CallOpSpecialized->getReturnType(),
- ReturnTypeOfDestFunctionPtr))
- return Sema::TDK_NonDeducedMismatch;
- // Since we have succeeded in matching the source and destination
- // ptr-to-functions (now including return type), and have successfully
- // specialized our corresponding call operator, we are ready to
- // specialize the static invoker with the deduced arguments of our
- // ptr-to-function.
- FunctionDecl *InvokerSpecialized = nullptr;
- FunctionTemplateDecl *InvokerTemplate = LambdaClass->
- getLambdaStaticInvoker()->getDescribedFunctionTemplate();
-
-#ifndef NDEBUG
- Sema::TemplateDeductionResult LLVM_ATTRIBUTE_UNUSED Result =
-#endif
- S.FinishTemplateArgumentDeduction(InvokerTemplate, DeducedArguments, 0,
- InvokerSpecialized, TDInfo);
- assert(Result == Sema::TDK_Success &&
- "If the call operator succeeded so should the invoker!");
- // Set the result type to match the corresponding call operator
- // specialization's result type.
- if (GenericLambdaCallOperatorHasDeducedReturnType &&
- InvokerSpecialized->getReturnType()->isUndeducedType()) {
- // Be sure to get the type to replace 'auto' with and not
- // the full result type of the call op specialization
- // to substitute into the 'auto' of the invoker and conversion
- // function.
- // For e.g.
- // int* (*fp)(int*) = [](auto* a) -> auto* { return a; };
- // We don't want to subst 'int*' into 'auto' to get int**.
-
- QualType TypeToReplaceAutoWith = CallOpSpecialized->getReturnType()
- ->getContainedAutoType()
- ->getDeducedType();
- SubstAutoWithinFunctionReturnType(InvokerSpecialized,
- TypeToReplaceAutoWith, S);
- SubstAutoWithinFunctionReturnType(ConversionSpecialized,
- TypeToReplaceAutoWith, S);
- }
-
- // Ensure that static invoker doesn't have a const qualifier.
- // FIXME: When creating the InvokerTemplate in SemaLambda.cpp
- // do not use the CallOperator's TypeSourceInfo which allows
- // the const qualifier to leak through.
- const FunctionProtoType *InvokerFPT = InvokerSpecialized->
- getType().getTypePtr()->castAs<FunctionProtoType>();
- FunctionProtoType::ExtProtoInfo EPI = InvokerFPT->getExtProtoInfo();
- EPI.TypeQuals = 0;
- InvokerSpecialized->setType(S.Context.getFunctionType(
- InvokerFPT->getReturnType(), InvokerFPT->getParamTypes(), EPI));
- return Sema::TDK_Success;
-}
-/// \brief Deduce template arguments for a templated conversion
+/// Deduce template arguments for a templated conversion
/// function (C++ [temp.deduct.conv]) and, if successful, produce a
/// conversion function template specialization.
Sema::TemplateDeductionResult
@@ -4080,12 +4155,20 @@ Sema::DeduceTemplateArguments(FunctionTemplateDecl *ConversionTemplate,
// C++0x [temp.deduct.conv]p4:
// [...] If A is a reference type, the type referred to by A is used
// for type deduction.
- if (const ReferenceType *ARef = A->getAs<ReferenceType>())
- A = ARef->getPointeeType().getUnqualifiedType();
+ if (const ReferenceType *ARef = A->getAs<ReferenceType>()) {
+ A = ARef->getPointeeType();
+ // We work around a defect in the standard here: cv-qualifiers are also
+ // removed from P and A in this case, unless P was a reference type. This
+ // seems to mostly match what other compilers are doing.
+ if (!FromType->getAs<ReferenceType>()) {
+ A = A.getUnqualifiedType();
+ P = P.getUnqualifiedType();
+ }
+
// C++ [temp.deduct.conv]p3:
//
// If A is not a reference type:
- else {
+ } else {
assert(!A->isReferenceType() && "Reference types were handled above");
// - If P is an array type, the pointer type produced by the
@@ -4134,7 +4217,7 @@ Sema::DeduceTemplateArguments(FunctionTemplateDecl *ConversionTemplate,
// cv-qualified than the deduced A (i.e., the type referred to
// by the reference)
if (ToType->isReferenceType())
- TDF |= TDF_ParamWithReferenceType;
+ TDF |= TDF_ArgWithReferenceType;
// - The deduced A can be another pointer or pointer to member
// type that can be converted to A via a qualification
// conversion.
@@ -4158,39 +4241,10 @@ Sema::DeduceTemplateArguments(FunctionTemplateDecl *ConversionTemplate,
= FinishTemplateArgumentDeduction(ConversionTemplate, Deduced, 0,
ConversionSpecialized, Info);
Specialization = cast_or_null<CXXConversionDecl>(ConversionSpecialized);
-
- // If the conversion operator is being invoked on a lambda closure to convert
- // to a ptr-to-function, use the deduced arguments from the conversion
- // function to specialize the corresponding call operator.
- // e.g., int (*fp)(int) = [](auto a) { return a; };
- if (Result == TDK_Success && isLambdaConversionOperator(ConversionGeneric)) {
-
- // Get the return type of the destination ptr-to-function we are converting
- // to. This is necessary for matching the lambda call operator's return
- // type to that of the destination ptr-to-function's return type.
- assert(A->isPointerType() &&
- "Can only convert from lambda to ptr-to-function");
- const FunctionType *ToFunType =
- A->getPointeeType().getTypePtr()->getAs<FunctionType>();
- const QualType DestFunctionPtrReturnType = ToFunType->getReturnType();
-
- // Create the corresponding specializations of the call operator and
- // the static-invoker; and if the return type is auto,
- // deduce the return type and check if it matches the
- // DestFunctionPtrReturnType.
- // For instance:
- // auto L = [](auto a) { return f(a); };
- // int (*fp)(int) = L;
- // char (*fp2)(int) = L; <-- Not OK.
-
- Result = SpecializeCorrespondingLambdaCallOperatorAndInvoker(
- Specialization, Deduced, DestFunctionPtrReturnType,
- Info, *this);
- }
return Result;
}
-/// \brief Deduce template arguments for a function template when there is
+/// Deduce template arguments for a function template when there is
/// nothing to deduce against (C++0x [temp.arg.explicit]p3).
///
/// \param FunctionTemplate the function template for which we are performing
@@ -4225,12 +4279,14 @@ Sema::TemplateDeductionResult Sema::DeduceTemplateArguments(
}
namespace {
+
/// Substitute the 'auto' specifier or deduced template specialization type
/// specifier within a type for a given replacement type.
class SubstituteDeducedTypeTransform :
public TreeTransform<SubstituteDeducedTypeTransform> {
QualType Replacement;
bool UseTypeSugar;
+
public:
SubstituteDeducedTypeTransform(Sema &SemaRef, QualType Replacement,
bool UseTypeSugar = true)
@@ -4292,7 +4348,8 @@ namespace {
return TransformType(TLB, TL);
}
};
-}
+
+} // namespace
Sema::DeduceAutoResult
Sema::DeduceAutoType(TypeSourceInfo *Type, Expr *&Init, QualType &Result,
@@ -4326,7 +4383,7 @@ static bool diagnoseAutoDeductionFailure(Sema &S,
}
}
-/// \brief Deduce the type for an auto type-specifier (C++11 [dcl.spec.auto]p6)
+/// Deduce the type for an auto type-specifier (C++11 [dcl.spec.auto]p6)
///
/// Note that this is done even if the initializer is dependent. (This is
/// necessary to support partial ordering of templates using 'auto'.)
@@ -4536,6 +4593,43 @@ bool Sema::DeduceReturnType(FunctionDecl *FD, SourceLocation Loc,
bool Diagnose) {
assert(FD->getReturnType()->isUndeducedType());
+ // For a lambda's conversion operator, deduce any 'auto' or 'decltype(auto)'
+ // within the return type from the call operator's type.
+ if (isLambdaConversionOperator(FD)) {
+ CXXRecordDecl *Lambda = cast<CXXMethodDecl>(FD)->getParent();
+ FunctionDecl *CallOp = Lambda->getLambdaCallOperator();
+
+ // For a generic lambda, instantiate the call operator if needed.
+ if (auto *Args = FD->getTemplateSpecializationArgs()) {
+ CallOp = InstantiateFunctionDeclaration(
+ CallOp->getDescribedFunctionTemplate(), Args, Loc);
+ if (!CallOp || CallOp->isInvalidDecl())
+ return true;
+
+ // We might need to deduce the return type by instantiating the definition
+ // of the operator() function.
+ if (CallOp->getReturnType()->isUndeducedType())
+ InstantiateFunctionDefinition(Loc, CallOp);
+ }
+
+ if (CallOp->isInvalidDecl())
+ return true;
+ assert(!CallOp->getReturnType()->isUndeducedType() &&
+ "failed to deduce lambda return type");
+
+ // Build the new return type from scratch.
+ QualType RetType = getLambdaConversionFunctionResultType(
+ CallOp->getType()->castAs<FunctionProtoType>());
+ if (FD->getReturnType()->getAs<PointerType>())
+ RetType = Context.getPointerType(RetType);
+ else {
+ assert(FD->getReturnType()->getAs<BlockPointerType>());
+ RetType = Context.getBlockPointerType(RetType);
+ }
+ Context.adjustDeducedFunctionResultType(FD, RetType);
+ return false;
+ }
+
if (FD->getTemplateInstantiationPattern())
InstantiateFunctionDefinition(Loc, FD);
@@ -4548,7 +4642,7 @@ bool Sema::DeduceReturnType(FunctionDecl *FD, SourceLocation Loc,
return StillUndeduced;
}
-/// \brief If this is a non-static member function,
+/// If this is a non-static member function,
static void
AddImplicitObjectParameterType(ASTContext &Context,
CXXMethodDecl *Method,
@@ -4570,7 +4664,7 @@ AddImplicitObjectParameterType(ASTContext &Context,
ArgTypes.push_back(ArgTy);
}
-/// \brief Determine whether the function template \p FT1 is at least as
+/// Determine whether the function template \p FT1 is at least as
/// specialized as \p FT2.
static bool isAtLeastAsSpecializedAs(Sema &S,
SourceLocation Loc,
@@ -4721,7 +4815,7 @@ static bool isAtLeastAsSpecializedAs(Sema &S,
return true;
}
-/// \brief Determine whether this a function template whose parameter-type-list
+/// Determine whether this a function template whose parameter-type-list
/// ends with a function parameter pack.
static bool isVariadicFunctionTemplate(FunctionTemplateDecl *FunTmpl) {
FunctionDecl *Function = FunTmpl->getTemplatedDecl();
@@ -4742,7 +4836,7 @@ static bool isVariadicFunctionTemplate(FunctionTemplateDecl *FunTmpl) {
return true;
}
-/// \brief Returns the more specialized function template according
+/// Returns the more specialized function template according
/// to the rules of function template partial ordering (C++ [temp.func.order]).
///
/// \param FT1 the first function template
@@ -4789,7 +4883,7 @@ Sema::getMoreSpecializedTemplate(FunctionTemplateDecl *FT1,
return nullptr;
}
-/// \brief Determine if the two templates are equivalent.
+/// Determine if the two templates are equivalent.
static bool isSameTemplate(TemplateDecl *T1, TemplateDecl *T2) {
if (T1 == T2)
return true;
@@ -4800,7 +4894,7 @@ static bool isSameTemplate(TemplateDecl *T1, TemplateDecl *T2) {
return T1->getCanonicalDecl() == T2->getCanonicalDecl();
}
-/// \brief Retrieve the most specialized of the given function template
+/// Retrieve the most specialized of the given function template
/// specializations.
///
/// \param SpecBegin the start iterator of the function template
@@ -4959,7 +5053,7 @@ static bool isAtLeastAsSpecializedAs(Sema &S, QualType T1, QualType T2,
return true;
}
-/// \brief Returns the more specialized class template partial specialization
+/// Returns the more specialized class template partial specialization
/// according to the rules of partial ordering of class template partial
/// specializations (C++ [temp.class.order]).
///
@@ -5114,7 +5208,7 @@ bool Sema::isTemplateTemplateParameterAtLeastAsSpecializedAs(
return isAtLeastAsSpecializedAs(*this, PType, AType, AArg, Info);
}
-/// \brief Mark the template parameters that are used by the given
+/// Mark the template parameters that are used by the given
/// expression.
static void
MarkUsedTemplateParameters(ASTContext &Ctx,
@@ -5128,7 +5222,7 @@ MarkUsedTemplateParameters(ASTContext &Ctx,
// Skip through any implicit casts we added while type-checking, and any
// substitutions performed by template alias expansion.
- while (1) {
+ while (true) {
if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E))
E = ICE->getSubExpr();
else if (const SubstNonTypeTemplateParmExpr *Subst =
@@ -5158,7 +5252,7 @@ MarkUsedTemplateParameters(ASTContext &Ctx,
MarkUsedTemplateParameters(Ctx, NTTP->getType(), OnlyDeduced, Depth, Used);
}
-/// \brief Mark the template parameters that are used by the given
+/// Mark the template parameters that are used by the given
/// nested name specifier.
static void
MarkUsedTemplateParameters(ASTContext &Ctx,
@@ -5175,7 +5269,7 @@ MarkUsedTemplateParameters(ASTContext &Ctx,
OnlyDeduced, Depth, Used);
}
-/// \brief Mark the template parameters that are used by the given
+/// Mark the template parameters that are used by the given
/// template name.
static void
MarkUsedTemplateParameters(ASTContext &Ctx,
@@ -5200,7 +5294,7 @@ MarkUsedTemplateParameters(ASTContext &Ctx,
Depth, Used);
}
-/// \brief Mark the template parameters that are used by the given
+/// Mark the template parameters that are used by the given
/// type.
static void
MarkUsedTemplateParameters(ASTContext &Ctx, QualType T,
@@ -5271,6 +5365,14 @@ MarkUsedTemplateParameters(ASTContext &Ctx, QualType T,
OnlyDeduced, Depth, Used);
break;
+ case Type::DependentVector: {
+ const auto *VecType = cast<DependentVectorType>(T);
+ MarkUsedTemplateParameters(Ctx, VecType->getElementType(), OnlyDeduced,
+ Depth, Used);
+ MarkUsedTemplateParameters(Ctx, VecType->getSizeExpr(), OnlyDeduced, Depth,
+ Used);
+ break;
+ }
case Type::DependentSizedExtVector: {
const DependentSizedExtVectorType *VecType
= cast<DependentSizedExtVectorType>(T);
@@ -5296,9 +5398,24 @@ MarkUsedTemplateParameters(ASTContext &Ctx, QualType T,
const FunctionProtoType *Proto = cast<FunctionProtoType>(T);
MarkUsedTemplateParameters(Ctx, Proto->getReturnType(), OnlyDeduced, Depth,
Used);
- for (unsigned I = 0, N = Proto->getNumParams(); I != N; ++I)
- MarkUsedTemplateParameters(Ctx, Proto->getParamType(I), OnlyDeduced,
- Depth, Used);
+ for (unsigned I = 0, N = Proto->getNumParams(); I != N; ++I) {
+ // C++17 [temp.deduct.type]p5:
+ // The non-deduced contexts are: [...]
+ // -- A function parameter pack that does not occur at the end of the
+ // parameter-declaration-list.
+ if (!OnlyDeduced || I + 1 == N ||
+ !Proto->getParamType(I)->getAs<PackExpansionType>()) {
+ MarkUsedTemplateParameters(Ctx, Proto->getParamType(I), OnlyDeduced,
+ Depth, Used);
+ } else {
+ // FIXME: C++17 [temp.deduct.call]p1:
+ // When a function parameter pack appears in a non-deduced context,
+ // the type of that pack is never deduced.
+ //
+ // We should also track a set of "never deduced" parameters, and
+ // subtract that from the list of deduced parameters after marking.
+ }
+ }
if (auto *E = Proto->getNoexceptExpr())
MarkUsedTemplateParameters(Ctx, E, OnlyDeduced, Depth, Used);
break;
@@ -5453,7 +5570,7 @@ MarkUsedTemplateParameters(ASTContext &Ctx, QualType T,
}
}
-/// \brief Mark the template parameters that are used by this
+/// Mark the template parameters that are used by this
/// template argument.
static void
MarkUsedTemplateParameters(ASTContext &Ctx,
@@ -5496,7 +5613,7 @@ MarkUsedTemplateParameters(ASTContext &Ctx,
}
}
-/// \brief Mark which template parameters can be deduced from a given
+/// Mark which template parameters can be deduced from a given
/// template argument list.
///
/// \param TemplateArgs the template argument list from which template
@@ -5522,7 +5639,7 @@ Sema::MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs,
Depth, Used);
}
-/// \brief Marks all of the template parameters that will be deduced by a
+/// Marks all of the template parameters that will be deduced by a
/// call to the given function template.
void Sema::MarkDeducedTemplateParameters(
ASTContext &Ctx, const FunctionTemplateDecl *FunctionTemplate,
diff --git a/lib/Sema/SemaTemplateInstantiate.cpp b/lib/Sema/SemaTemplateInstantiate.cpp
index a48e2466a84d..bc2ee42400b7 100644
--- a/lib/Sema/SemaTemplateInstantiate.cpp
+++ b/lib/Sema/SemaTemplateInstantiate.cpp
@@ -18,13 +18,14 @@
#include "clang/AST/ASTMutationListener.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
+#include "clang/AST/PrettyDeclStackTrace.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/Initialization.h"
#include "clang/Sema/Lookup.h"
-#include "clang/Sema/PrettyDeclStackTrace.h"
#include "clang/Sema/Template.h"
#include "clang/Sema/TemplateDeduction.h"
+#include "clang/Sema/TemplateInstCallback.h"
using namespace clang;
using namespace sema;
@@ -33,7 +34,7 @@ using namespace sema;
// Template Instantiation Support
//===----------------------------------------------------------------------===/
-/// \brief Retrieve the template argument list(s) that should be used to
+/// Retrieve the template argument list(s) that should be used to
/// instantiate the definition of the given declaration.
///
/// \param D the declaration for which we are computing template instantiation
@@ -199,6 +200,10 @@ bool Sema::CodeSynthesisContext::isInstantiationRecord() const {
case DeclaringSpecialMember:
case DefiningSynthesizedFunction:
return false;
+
+ // This function should never be called when Kind's value is Memoization.
+ case Memoization:
+ break;
}
llvm_unreachable("Invalid SynthesisKind!");
@@ -235,6 +240,7 @@ Sema::InstantiatingTemplate::InstantiatingTemplate(
!SemaRef.InstantiatingSpecializations
.insert(std::make_pair(Inst.Entity->getCanonicalDecl(), Inst.Kind))
.second;
+ atTemplateBegin(SemaRef.TemplateInstCallbacks, SemaRef, Inst);
}
}
@@ -394,8 +400,10 @@ void Sema::InstantiatingTemplate::Clear() {
std::make_pair(Active.Entity, Active.Kind));
}
- SemaRef.popCodeSynthesisContext();
+ atTemplateEnd(SemaRef.TemplateInstCallbacks, SemaRef,
+ SemaRef.CodeSynthesisContexts.back());
+ SemaRef.popCodeSynthesisContext();
Invalid = true;
}
}
@@ -419,7 +427,7 @@ bool Sema::InstantiatingTemplate::CheckInstantiationDepth(
return true;
}
-/// \brief Prints the current instantiation stack through a series of
+/// Prints the current instantiation stack through a series of
/// notes.
void Sema::PrintInstantiationStack() {
// Determine which template instantiations to skip, if any.
@@ -626,7 +634,7 @@ void Sema::PrintInstantiationStack() {
<< cast<CXXRecordDecl>(Active->Entity) << Active->SpecialMember;
break;
- case CodeSynthesisContext::DefiningSynthesizedFunction:
+ case CodeSynthesisContext::DefiningSynthesizedFunction: {
// FIXME: For synthesized members other than special members, produce a note.
auto *MD = dyn_cast<CXXMethodDecl>(Active->Entity);
auto CSM = MD ? getSpecialMember(MD) : CXXInvalid;
@@ -637,6 +645,10 @@ void Sema::PrintInstantiationStack() {
}
break;
}
+
+ case CodeSynthesisContext::Memoization:
+ break;
+ }
}
}
@@ -682,6 +694,9 @@ Optional<TemplateDeductionInfo *> Sema::isSFINAEContext() const {
// This happens in a context unrelated to template instantiation, so
// there is no SFINAE.
return None;
+
+ case CodeSynthesisContext::Memoization:
+ break;
}
// The inner context was transparent for SFINAE. If it occurred within a
@@ -693,19 +708,6 @@ Optional<TemplateDeductionInfo *> Sema::isSFINAEContext() const {
return None;
}
-/// \brief Retrieve the depth and index of a parameter pack.
-static std::pair<unsigned, unsigned>
-getDepthAndIndex(NamedDecl *ND) {
- if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(ND))
- return std::make_pair(TTP->getDepth(), TTP->getIndex());
-
- if (NonTypeTemplateParmDecl *NTTP = dyn_cast<NonTypeTemplateParmDecl>(ND))
- return std::make_pair(NTTP->getDepth(), NTTP->getIndex());
-
- TemplateTemplateParmDecl *TTP = cast<TemplateTemplateParmDecl>(ND);
- return std::make_pair(TTP->getDepth(), TTP->getIndex());
-}
-
//===----------------------------------------------------------------------===/
// Template Instantiation for Types
//===----------------------------------------------------------------------===/
@@ -725,20 +727,20 @@ namespace {
: inherited(SemaRef), TemplateArgs(TemplateArgs), Loc(Loc),
Entity(Entity) { }
- /// \brief Determine whether the given type \p T has already been
+ /// Determine whether the given type \p T has already been
/// transformed.
///
/// For the purposes of template instantiation, a type has already been
/// transformed if it is NULL or if it is not dependent.
bool AlreadyTransformed(QualType T);
- /// \brief Returns the location of the entity being instantiated, if known.
+ /// Returns the location of the entity being instantiated, if known.
SourceLocation getBaseLocation() { return Loc; }
- /// \brief Returns the name of the entity being instantiated, if any.
+ /// Returns the name of the entity being instantiated, if any.
DeclarationName getBaseEntity() { return Entity; }
- /// \brief Sets the "base" location and entity when that
+ /// Sets the "base" location and entity when that
/// information is known based on another transformation.
void setBase(SourceLocation Loc, DeclarationName Entity) {
this->Loc = Loc;
@@ -793,7 +795,7 @@ namespace {
}
}
- /// \brief Transform the given declaration by instantiating a reference to
+ /// Transform the given declaration by instantiating a reference to
/// this declaration.
Decl *TransformDecl(SourceLocation Loc, Decl *D);
@@ -824,15 +826,15 @@ namespace {
SemaRef.PerformDependentDiagnostics(DC, TemplateArgs);
}
- /// \brief Transform the definition of the given declaration by
+ /// Transform the definition of the given declaration by
/// instantiating it.
Decl *TransformDefinition(SourceLocation Loc, Decl *D);
- /// \brief Transform the first qualifier within a scope by instantiating the
+ /// Transform the first qualifier within a scope by instantiating the
/// declaration.
NamedDecl *TransformFirstQualifierInScope(NamedDecl *D, SourceLocation Loc);
- /// \brief Rebuild the exception declaration and register the declaration
+ /// Rebuild the exception declaration and register the declaration
/// as an instantiated local.
VarDecl *RebuildExceptionDecl(VarDecl *ExceptionDecl,
TypeSourceInfo *Declarator,
@@ -840,12 +842,12 @@ namespace {
SourceLocation NameLoc,
IdentifierInfo *Name);
- /// \brief Rebuild the Objective-C exception declaration and register the
+ /// Rebuild the Objective-C exception declaration and register the
/// declaration as an instantiated local.
VarDecl *RebuildObjCExceptionDecl(VarDecl *ExceptionDecl,
TypeSourceInfo *TSInfo, QualType T);
- /// \brief Check for tag mismatches when instantiating an
+ /// Check for tag mismatches when instantiating an
/// elaborated type.
QualType RebuildElaboratedType(SourceLocation KeywordLoc,
ElaboratedTypeKeyword Keyword,
@@ -870,14 +872,14 @@ namespace {
ExprResult TransformSubstNonTypeTemplateParmPackExpr(
SubstNonTypeTemplateParmPackExpr *E);
- /// \brief Rebuild a DeclRefExpr for a ParmVarDecl reference.
+ /// Rebuild a DeclRefExpr for a ParmVarDecl reference.
ExprResult RebuildParmVarDeclRefExpr(ParmVarDecl *PD, SourceLocation Loc);
- /// \brief Transform a reference to a function parameter pack.
+ /// Transform a reference to a function parameter pack.
ExprResult TransformFunctionParmPackRefExpr(DeclRefExpr *E,
ParmVarDecl *PD);
- /// \brief Transform a FunctionParmPackExpr which was built when we couldn't
+ /// Transform a FunctionParmPackExpr which was built when we couldn't
/// expand a function parameter pack reference which refers to an expanded
/// pack.
ExprResult TransformFunctionParmPackExpr(FunctionParmPackExpr *E);
@@ -900,12 +902,12 @@ namespace {
Optional<unsigned> NumExpansions,
bool ExpectParameterPack);
- /// \brief Transforms a template type parameter type by performing
+ /// Transforms a template type parameter type by performing
/// substitution of the corresponding template type argument.
QualType TransformTemplateTypeParmType(TypeLocBuilder &TLB,
TemplateTypeParmTypeLoc TL);
- /// \brief Transforms an already-substituted template type parameter pack
+ /// Transforms an already-substituted template type parameter pack
/// into either itself (if we aren't substituting into its pack expansion)
/// or the appropriate substituted argument.
QualType TransformSubstTemplateTypeParmPackType(TypeLocBuilder &TLB,
@@ -1197,11 +1199,11 @@ TemplateInstantiator::TransformTemplateParmRefExpr(DeclRefExpr *E,
NTTP->getDeclName());
if (TargetType.isNull())
return ExprError();
-
- return new (SemaRef.Context) SubstNonTypeTemplateParmPackExpr(TargetType,
- NTTP,
- E->getLocation(),
- Arg);
+
+ return new (SemaRef.Context) SubstNonTypeTemplateParmPackExpr(
+ TargetType.getNonLValueExprType(SemaRef.Context),
+ TargetType->isReferenceType() ? VK_LValue : VK_RValue, NTTP,
+ E->getLocation(), Arg);
}
Arg = getPackSubstitutedTemplateArgument(getSema(), Arg);
@@ -1246,7 +1248,7 @@ ExprResult TemplateInstantiator::transformNonTypeTemplateParmRef(
arg.getKind() == TemplateArgument::NullPtr) {
ValueDecl *VD;
if (arg.getKind() == TemplateArgument::Declaration) {
- VD = cast<ValueDecl>(arg.getAsDecl());
+ VD = arg.getAsDecl();
// Find the instantiation of the template argument. This is
// required for nested templates.
@@ -1525,7 +1527,7 @@ TemplateInstantiator::TransformSubstTemplateTypeParmPackType(
return Result;
}
-/// \brief Perform substitution on the type T with a given set of template
+/// Perform substitution on the type T with a given set of template
/// arguments.
///
/// This routine substitutes the given template arguments into the
@@ -1820,7 +1822,7 @@ ParmVarDecl *Sema::SubstParmVarDecl(ParmVarDecl *OldParm,
return NewParm;
}
-/// \brief Substitute the given template arguments into the given set of
+/// Substitute the given template arguments into the given set of
/// parameters, producing the set of parameter types that would be generated
/// from such a substitution.
bool Sema::SubstParmTypes(
@@ -1840,7 +1842,7 @@ bool Sema::SubstParmTypes(
Loc, Params, nullptr, ExtParamInfos, ParamTypes, OutParams, ParamInfos);
}
-/// \brief Perform substitution on the base class specifiers of the
+/// Perform substitution on the base class specifiers of the
/// given class template specialization.
///
/// Produces a diagnostic and returns true on error, returns false and
@@ -1960,7 +1962,7 @@ namespace clang {
}
}
-/// \brief Instantiate the definition of a class from a given pattern.
+/// Instantiate the definition of a class from a given pattern.
///
/// \param PointOfInstantiation The point of instantiation within the
/// source code.
@@ -1996,7 +1998,7 @@ Sema::InstantiateClass(SourceLocation PointOfInstantiation,
return true;
Pattern = PatternDef;
- // \brief Record the point of instantiation.
+ // Record the point of instantiation.
if (MemberSpecializationInfo *MSInfo
= Instantiation->getMemberSpecializationInfo()) {
MSInfo->setTemplateSpecializationKind(TSK);
@@ -2011,7 +2013,7 @@ Sema::InstantiateClass(SourceLocation PointOfInstantiation,
if (Inst.isInvalid())
return true;
assert(!Inst.isAlreadyInstantiating() && "should have been caught by caller");
- PrettyDeclStackTraceEntry CrashInfo(*this, Instantiation, SourceLocation(),
+ PrettyDeclStackTraceEntry CrashInfo(Context, Instantiation, SourceLocation(),
"instantiating class definition");
// Enter the scope of this instantiation. We don't use
@@ -2068,6 +2070,11 @@ Sema::InstantiateClass(SourceLocation PointOfInstantiation,
if (Member->getDeclContext() != Pattern)
continue;
+ // BlockDecls can appear in a default-member-initializer. They must be the
+ // child of a BlockExpr, so we only know how to instantiate them from there.
+ if (isa<BlockDecl>(Member))
+ continue;
+
if (Member->isInvalidDecl()) {
Instantiation->setInvalidDecl();
continue;
@@ -2110,7 +2117,7 @@ Sema::InstantiateClass(SourceLocation PointOfInstantiation,
// Finish checking fields.
ActOnFields(nullptr, Instantiation->getLocation(), Instantiation, Fields,
- SourceLocation(), SourceLocation(), nullptr);
+ SourceLocation(), SourceLocation(), ParsedAttributesView());
CheckCompletedCXXClass(Instantiation);
// Default arguments are parsed, if not instantiated. We can go instantiate
@@ -2196,7 +2203,7 @@ Sema::InstantiateClass(SourceLocation PointOfInstantiation,
return Instantiation->isInvalidDecl();
}
-/// \brief Instantiate the definition of an enum from a given pattern.
+/// Instantiate the definition of an enum from a given pattern.
///
/// \param PointOfInstantiation The point of instantiation within the
/// source code.
@@ -2234,7 +2241,7 @@ bool Sema::InstantiateEnum(SourceLocation PointOfInstantiation,
return true;
if (Inst.isAlreadyInstantiating())
return false;
- PrettyDeclStackTraceEntry CrashInfo(*this, Instantiation, SourceLocation(),
+ PrettyDeclStackTraceEntry CrashInfo(Context, Instantiation, SourceLocation(),
"instantiating enum definition");
// The instantiation is visible here, even if it was first declared in an
@@ -2262,7 +2269,7 @@ bool Sema::InstantiateEnum(SourceLocation PointOfInstantiation,
}
-/// \brief Instantiate the definition of a field from the given pattern.
+/// Instantiate the definition of a field from the given pattern.
///
/// \param PointOfInstantiation The point of instantiation within the
/// source code.
@@ -2310,7 +2317,7 @@ bool Sema::InstantiateInClassInitializer(
<< Instantiation;
return true;
}
- PrettyDeclStackTraceEntry CrashInfo(*this, Instantiation, SourceLocation(),
+ PrettyDeclStackTraceEntry CrashInfo(Context, Instantiation, SourceLocation(),
"instantiating default member init");
// Enter the scope of this instantiation. We don't use PushDeclContext because
@@ -2340,7 +2347,7 @@ bool Sema::InstantiateInClassInitializer(
}
namespace {
- /// \brief A partial specialization whose template arguments have matched
+ /// A partial specialization whose template arguments have matched
/// a given template-id.
struct PartialSpecMatchResult {
ClassTemplatePartialSpecializationDecl *Partial;
@@ -2379,127 +2386,137 @@ getPatternForClassTemplateSpecialization(
if (Inst.isInvalid() || Inst.isAlreadyInstantiating())
return nullptr;
- ClassTemplateDecl *Template = ClassTemplateSpec->getSpecializedTemplate();
- CXXRecordDecl *Pattern = nullptr;
-
- // C++ [temp.class.spec.match]p1:
- // When a class template is used in a context that requires an
- // instantiation of the class, it is necessary to determine
- // whether the instantiation is to be generated using the primary
- // template or one of the partial specializations. This is done by
- // matching the template arguments of the class template
- // specialization with the template argument lists of the partial
- // specializations.
- typedef PartialSpecMatchResult MatchResult;
- SmallVector<MatchResult, 4> Matched;
- SmallVector<ClassTemplatePartialSpecializationDecl *, 4> PartialSpecs;
- Template->getPartialSpecializations(PartialSpecs);
- TemplateSpecCandidateSet FailedCandidates(PointOfInstantiation);
- for (unsigned I = 0, N = PartialSpecs.size(); I != N; ++I) {
- ClassTemplatePartialSpecializationDecl *Partial = PartialSpecs[I];
- TemplateDeductionInfo Info(FailedCandidates.getLocation());
- if (Sema::TemplateDeductionResult Result = S.DeduceTemplateArguments(
- Partial, ClassTemplateSpec->getTemplateArgs(), Info)) {
- // Store the failed-deduction information for use in diagnostics, later.
- // TODO: Actually use the failed-deduction info?
- FailedCandidates.addCandidate().set(
- DeclAccessPair::make(Template, AS_public), Partial,
- MakeDeductionFailureInfo(S.Context, Result, Info));
- (void)Result;
- } else {
- Matched.push_back(PartialSpecMatchResult());
- Matched.back().Partial = Partial;
- Matched.back().Args = Info.take();
+ llvm::PointerUnion<ClassTemplateDecl *,
+ ClassTemplatePartialSpecializationDecl *>
+ Specialized = ClassTemplateSpec->getSpecializedTemplateOrPartial();
+ if (!Specialized.is<ClassTemplatePartialSpecializationDecl *>()) {
+ // Find best matching specialization.
+ ClassTemplateDecl *Template = ClassTemplateSpec->getSpecializedTemplate();
+
+ // C++ [temp.class.spec.match]p1:
+ // When a class template is used in a context that requires an
+ // instantiation of the class, it is necessary to determine
+ // whether the instantiation is to be generated using the primary
+ // template or one of the partial specializations. This is done by
+ // matching the template arguments of the class template
+ // specialization with the template argument lists of the partial
+ // specializations.
+ typedef PartialSpecMatchResult MatchResult;
+ SmallVector<MatchResult, 4> Matched;
+ SmallVector<ClassTemplatePartialSpecializationDecl *, 4> PartialSpecs;
+ Template->getPartialSpecializations(PartialSpecs);
+ TemplateSpecCandidateSet FailedCandidates(PointOfInstantiation);
+ for (unsigned I = 0, N = PartialSpecs.size(); I != N; ++I) {
+ ClassTemplatePartialSpecializationDecl *Partial = PartialSpecs[I];
+ TemplateDeductionInfo Info(FailedCandidates.getLocation());
+ if (Sema::TemplateDeductionResult Result = S.DeduceTemplateArguments(
+ Partial, ClassTemplateSpec->getTemplateArgs(), Info)) {
+ // Store the failed-deduction information for use in diagnostics, later.
+ // TODO: Actually use the failed-deduction info?
+ FailedCandidates.addCandidate().set(
+ DeclAccessPair::make(Template, AS_public), Partial,
+ MakeDeductionFailureInfo(S.Context, Result, Info));
+ (void)Result;
+ } else {
+ Matched.push_back(PartialSpecMatchResult());
+ Matched.back().Partial = Partial;
+ Matched.back().Args = Info.take();
+ }
}
- }
- // If we're dealing with a member template where the template parameters
- // have been instantiated, this provides the original template parameters
- // from which the member template's parameters were instantiated.
+ // If we're dealing with a member template where the template parameters
+ // have been instantiated, this provides the original template parameters
+ // from which the member template's parameters were instantiated.
- if (Matched.size() >= 1) {
- SmallVectorImpl<MatchResult>::iterator Best = Matched.begin();
- if (Matched.size() == 1) {
- // -- If exactly one matching specialization is found, the
- // instantiation is generated from that specialization.
- // We don't need to do anything for this.
- } else {
- // -- If more than one matching specialization is found, the
- // partial order rules (14.5.4.2) are used to determine
- // whether one of the specializations is more specialized
- // than the others. If none of the specializations is more
- // specialized than all of the other matching
- // specializations, then the use of the class template is
- // ambiguous and the program is ill-formed.
- for (SmallVectorImpl<MatchResult>::iterator P = Best + 1,
- PEnd = Matched.end();
- P != PEnd; ++P) {
- if (S.getMoreSpecializedPartialSpecialization(
- P->Partial, Best->Partial, PointOfInstantiation) == P->Partial)
- Best = P;
- }
-
- // Determine if the best partial specialization is more specialized than
- // the others.
- bool Ambiguous = false;
- for (SmallVectorImpl<MatchResult>::iterator P = Matched.begin(),
- PEnd = Matched.end();
- P != PEnd; ++P) {
- if (P != Best &&
- S.getMoreSpecializedPartialSpecialization(P->Partial, Best->Partial,
- PointOfInstantiation) !=
- Best->Partial) {
- Ambiguous = true;
- break;
+ if (Matched.size() >= 1) {
+ SmallVectorImpl<MatchResult>::iterator Best = Matched.begin();
+ if (Matched.size() == 1) {
+ // -- If exactly one matching specialization is found, the
+ // instantiation is generated from that specialization.
+ // We don't need to do anything for this.
+ } else {
+ // -- If more than one matching specialization is found, the
+ // partial order rules (14.5.4.2) are used to determine
+ // whether one of the specializations is more specialized
+ // than the others. If none of the specializations is more
+ // specialized than all of the other matching
+ // specializations, then the use of the class template is
+ // ambiguous and the program is ill-formed.
+ for (SmallVectorImpl<MatchResult>::iterator P = Best + 1,
+ PEnd = Matched.end();
+ P != PEnd; ++P) {
+ if (S.getMoreSpecializedPartialSpecialization(
+ P->Partial, Best->Partial, PointOfInstantiation) ==
+ P->Partial)
+ Best = P;
}
- }
-
- if (Ambiguous) {
- // Partial ordering did not produce a clear winner. Complain.
- Inst.Clear();
- ClassTemplateSpec->setInvalidDecl();
- S.Diag(PointOfInstantiation, diag::err_partial_spec_ordering_ambiguous)
- << ClassTemplateSpec;
-
- // Print the matching partial specializations.
+
+ // Determine if the best partial specialization is more specialized than
+ // the others.
+ bool Ambiguous = false;
for (SmallVectorImpl<MatchResult>::iterator P = Matched.begin(),
PEnd = Matched.end();
- P != PEnd; ++P)
- S.Diag(P->Partial->getLocation(), diag::note_partial_spec_match)
- << S.getTemplateArgumentBindingsText(
- P->Partial->getTemplateParameters(), *P->Args);
+ P != PEnd; ++P) {
+ if (P != Best && S.getMoreSpecializedPartialSpecialization(
+ P->Partial, Best->Partial,
+ PointOfInstantiation) != Best->Partial) {
+ Ambiguous = true;
+ break;
+ }
+ }
+
+ if (Ambiguous) {
+ // Partial ordering did not produce a clear winner. Complain.
+ Inst.Clear();
+ ClassTemplateSpec->setInvalidDecl();
+ S.Diag(PointOfInstantiation,
+ diag::err_partial_spec_ordering_ambiguous)
+ << ClassTemplateSpec;
+
+ // Print the matching partial specializations.
+ for (SmallVectorImpl<MatchResult>::iterator P = Matched.begin(),
+ PEnd = Matched.end();
+ P != PEnd; ++P)
+ S.Diag(P->Partial->getLocation(), diag::note_partial_spec_match)
+ << S.getTemplateArgumentBindingsText(
+ P->Partial->getTemplateParameters(), *P->Args);
- return nullptr;
+ return nullptr;
+ }
}
+
+ ClassTemplateSpec->setInstantiationOf(Best->Partial, Best->Args);
+ } else {
+ // -- If no matches are found, the instantiation is generated
+ // from the primary template.
}
-
+ }
+
+ CXXRecordDecl *Pattern = nullptr;
+ Specialized = ClassTemplateSpec->getSpecializedTemplateOrPartial();
+ if (auto *PartialSpec =
+ Specialized.dyn_cast<ClassTemplatePartialSpecializationDecl *>()) {
// Instantiate using the best class template partial specialization.
- ClassTemplatePartialSpecializationDecl *OrigPartialSpec = Best->Partial;
- while (OrigPartialSpec->getInstantiatedFromMember()) {
+ while (PartialSpec->getInstantiatedFromMember()) {
// If we've found an explicit specialization of this class template,
// stop here and use that as the pattern.
- if (OrigPartialSpec->isMemberSpecialization())
+ if (PartialSpec->isMemberSpecialization())
break;
-
- OrigPartialSpec = OrigPartialSpec->getInstantiatedFromMember();
+
+ PartialSpec = PartialSpec->getInstantiatedFromMember();
}
-
- Pattern = OrigPartialSpec;
- ClassTemplateSpec->setInstantiationOf(Best->Partial, Best->Args);
+ Pattern = PartialSpec;
} else {
- // -- If no matches are found, the instantiation is generated
- // from the primary template.
- ClassTemplateDecl *OrigTemplate = Template;
- while (OrigTemplate->getInstantiatedFromMemberTemplate()) {
+ ClassTemplateDecl *Template = ClassTemplateSpec->getSpecializedTemplate();
+ while (Template->getInstantiatedFromMemberTemplate()) {
// If we've found an explicit specialization of this class template,
// stop here and use that as the pattern.
- if (OrigTemplate->isMemberSpecialization())
+ if (Template->isMemberSpecialization())
break;
-
- OrigTemplate = OrigTemplate->getInstantiatedFromMemberTemplate();
+
+ Template = Template->getInstantiatedFromMemberTemplate();
}
-
- Pattern = OrigTemplate->getTemplatedDecl();
+ Pattern = Template->getTemplatedDecl();
}
return Pattern;
@@ -2525,7 +2542,7 @@ bool Sema::InstantiateClassTemplateSpecialization(
Complain);
}
-/// \brief Instantiates the definitions of all of the member
+/// Instantiates the definitions of all of the member
/// of the given class, which is an instantiation of a class template
/// or a member class of a template.
void
@@ -2732,7 +2749,7 @@ Sema::InstantiateClassMembers(SourceLocation PointOfInstantiation,
}
}
-/// \brief Instantiate the definitions of all of the members of the
+/// Instantiate the definitions of all of the members of the
/// given class template specialization, which was named as part of an
/// explicit instantiation.
void
@@ -2808,7 +2825,7 @@ Sema::SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS,
return Instantiator.TransformNestedNameSpecifierLoc(NNS);
}
-/// \brief Do template substitution on declaration name info.
+/// Do template substitution on declaration name info.
DeclarationNameInfo
Sema::SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo,
const MultiLevelTemplateArgumentList &TemplateArgs) {
diff --git a/lib/Sema/SemaTemplateInstantiateDecl.cpp b/lib/Sema/SemaTemplateInstantiateDecl.cpp
index d8af8f34530b..5109dc8290f9 100644
--- a/lib/Sema/SemaTemplateInstantiateDecl.cpp
+++ b/lib/Sema/SemaTemplateInstantiateDecl.cpp
@@ -18,11 +18,12 @@
#include "clang/AST/DependentDiagnostic.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
+#include "clang/AST/PrettyDeclStackTrace.h"
#include "clang/AST/TypeLoc.h"
#include "clang/Sema/Initialization.h"
#include "clang/Sema/Lookup.h"
-#include "clang/Sema/PrettyDeclStackTrace.h"
#include "clang/Sema/Template.h"
+#include "clang/Sema/TemplateInstCallback.h"
using namespace clang;
@@ -175,7 +176,8 @@ static void instantiateDependentAllocAlignAttr(
Sema &S, const MultiLevelTemplateArgumentList &TemplateArgs,
const AllocAlignAttr *Align, Decl *New) {
Expr *Param = IntegerLiteral::Create(
- S.getASTContext(), llvm::APInt(64, Align->getParamIndex()),
+ S.getASTContext(),
+ llvm::APInt(64, Align->getParamIndex().getSourceIndex()),
S.getASTContext().UnsignedLongLongTy, Align->getLocation());
S.AddAllocAlignAttr(Align->getLocation(), New, Param,
Align->getSpellingListIndex());
@@ -343,14 +345,6 @@ static void instantiateOMPDeclareSimdDeclAttr(
Attr.getRange());
}
-static bool DeclContainsAttr(const Decl *D, const Attr *NewAttr) {
- if (!D->hasAttrs() || NewAttr->duplicatesAllowed())
- return false;
- return llvm::find_if(D->getAttrs(), [NewAttr](const Attr *Attr) {
- return Attr->getKind() == NewAttr->getKind();
- }) != D->getAttrs().end();
-}
-
void Sema::InstantiateAttrsForDecl(
const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Tmpl,
Decl *New, LateInstantiatedAttrVec *LateAttrs,
@@ -365,7 +359,7 @@ void Sema::InstantiateAttrsForDecl(
Attr *NewAttr = sema::instantiateTemplateAttributeForDecl(
TmplAttr, Context, *this, TemplateArgs);
- if (NewAttr && !DeclContainsAttr(New, NewAttr))
+ if (NewAttr)
New->addAttr(NewAttr);
}
}
@@ -470,8 +464,7 @@ void Sema::InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
Attr *NewAttr = sema::instantiateTemplateAttribute(TmplAttr, Context,
*this, TemplateArgs);
-
- if (NewAttr && !DeclContainsAttr(New, NewAttr))
+ if (NewAttr)
New->addAttr(NewAttr);
}
}
@@ -749,7 +742,7 @@ Decl *TemplateDeclInstantiator::VisitVarDecl(VarDecl *D,
if (D->isNRVOVariable()) {
QualType ReturnType = cast<FunctionDecl>(DC)->getReturnType();
- if (SemaRef.isCopyElisionCandidate(ReturnType, Var, false))
+ if (SemaRef.isCopyElisionCandidate(ReturnType, Var, Sema::CES_Strict))
Var->setNRVOVariable(true);
}
@@ -1049,8 +1042,7 @@ Decl *TemplateDeclInstantiator::VisitEnumDecl(EnumDecl *D) {
SemaRef.SubstType(TI->getType(), TemplateArgs,
UnderlyingLoc, DeclarationName());
SemaRef.CheckEnumRedeclaration(Def->getLocation(), Def->isScoped(),
- DefnUnderlying,
- /*EnumUnderlyingIsImplicit=*/false, Enum);
+ DefnUnderlying, /*IsFixed=*/true, Enum);
}
}
@@ -1126,8 +1118,7 @@ void TemplateDeclInstantiator::InstantiateEnumDefinition(
}
SemaRef.ActOnEnumBody(Enum->getLocation(), Enum->getBraceRange(), Enum,
- Enumerators,
- nullptr, nullptr);
+ Enumerators, nullptr, ParsedAttributesView());
}
Decl *TemplateDeclInstantiator::VisitEnumConstantDecl(EnumConstantDecl *D) {
@@ -1564,7 +1555,7 @@ Decl *TemplateDeclInstantiator::VisitCXXRecordDecl(CXXRecordDecl *D) {
return Record;
}
-/// \brief Adjust the given function type for an instantiation of the
+/// Adjust the given function type for an instantiation of the
/// given declaration, to cope with modifications to the function's type that
/// aren't reflected in the type-source information.
///
@@ -1661,6 +1652,7 @@ Decl *TemplateDeclInstantiator::VisitFunctionDecl(FunctionDecl *D,
NameInfo, T, TInfo, D->getSourceRange().getEnd());
if (DGuide->isCopyDeductionCandidate())
cast<CXXDeductionGuideDecl>(Function)->setIsCopyDeductionCandidate();
+ Function->setAccess(D->getAccess());
} else {
Function = FunctionDecl::Create(
SemaRef.Context, DC, D->getInnerLocStart(), NameInfo, T, TInfo,
@@ -1815,45 +1807,24 @@ Decl *TemplateDeclInstantiator::VisitFunctionDecl(FunctionDecl *D,
// apply to non-template function declarations and definitions also apply
// to these implicit definitions.
if (D->isThisDeclarationADefinition()) {
- // Check for a function body.
- const FunctionDecl *Definition = nullptr;
- if (Function->isDefined(Definition) &&
- Definition->getTemplateSpecializationKind() == TSK_Undeclared) {
- SemaRef.Diag(Function->getLocation(), diag::err_redefinition)
- << Function->getDeclName();
- SemaRef.Diag(Definition->getLocation(), diag::note_previous_definition);
- }
- // Check for redefinitions due to other instantiations of this or
- // a similar friend function.
- else for (auto R : Function->redecls()) {
- if (R == Function)
- continue;
-
- // If some prior declaration of this function has been used, we need
- // to instantiate its definition.
- if (!QueuedInstantiation && R->isUsed(false)) {
- if (MemberSpecializationInfo *MSInfo =
- Function->getMemberSpecializationInfo()) {
- if (MSInfo->getPointOfInstantiation().isInvalid()) {
- SourceLocation Loc = R->getLocation(); // FIXME
- MSInfo->setPointOfInstantiation(Loc);
- SemaRef.PendingLocalImplicitInstantiations.push_back(
- std::make_pair(Function, Loc));
- QueuedInstantiation = true;
- }
- }
- }
-
- // If some prior declaration of this function was a friend with an
- // uninstantiated definition, reject it.
- if (R->getFriendObjectKind()) {
- if (const FunctionDecl *RPattern =
- R->getTemplateInstantiationPattern()) {
- if (RPattern->isDefined(RPattern)) {
- SemaRef.Diag(Function->getLocation(), diag::err_redefinition)
- << Function->getDeclName();
- SemaRef.Diag(R->getLocation(), diag::note_previous_definition);
- break;
+ SemaRef.CheckForFunctionRedefinition(Function);
+ if (!Function->isInvalidDecl()) {
+ for (auto R : Function->redecls()) {
+ if (R == Function)
+ continue;
+
+ // If some prior declaration of this function has been used, we need
+ // to instantiate its definition.
+ if (!QueuedInstantiation && R->isUsed(false)) {
+ if (MemberSpecializationInfo *MSInfo =
+ Function->getMemberSpecializationInfo()) {
+ if (MSInfo->getPointOfInstantiation().isInvalid()) {
+ SourceLocation Loc = R->getLocation(); // FIXME
+ MSInfo->setPointOfInstantiation(Loc);
+ SemaRef.PendingLocalImplicitInstantiations.push_back(
+ std::make_pair(Function, Loc));
+ QueuedInstantiation = true;
+ }
}
}
}
@@ -2676,7 +2647,8 @@ Decl *TemplateDeclInstantiator::instantiateUnresolvedUsingDecl(
NamedDecl *UD = SemaRef.BuildUsingDeclaration(
/*Scope*/ nullptr, D->getAccess(), D->getUsingLoc(),
- /*HasTypename*/ TD, TypenameLoc, SS, NameInfo, EllipsisLoc, nullptr,
+ /*HasTypename*/ TD, TypenameLoc, SS, NameInfo, EllipsisLoc,
+ ParsedAttributesView(),
/*IsInstantiation*/ true);
if (UD)
SemaRef.Context.setInstantiatedFromUsingDecl(UD, D);
@@ -2697,9 +2669,9 @@ Decl *TemplateDeclInstantiator::VisitUnresolvedUsingValueDecl(
Decl *TemplateDeclInstantiator::VisitUsingPackDecl(UsingPackDecl *D) {
SmallVector<NamedDecl*, 8> Expansions;
for (auto *UD : D->expansions()) {
- if (auto *NewUD =
+ if (NamedDecl *NewUD =
SemaRef.FindInstantiatedDecl(D->getLocation(), UD, TemplateArgs))
- Expansions.push_back(cast<NamedDecl>(NewUD));
+ Expansions.push_back(NewUD);
else
return nullptr;
}
@@ -2740,6 +2712,8 @@ Decl *TemplateDeclInstantiator::VisitClassScopeFunctionSpecializationDecl(
assert(Specialization && "Class scope Specialization is null");
SemaRef.Context.setClassScopeSpecializationPattern(Specialization, OldFD);
+ // FIXME: If this is a definition, check for redefinition errors!
+
return NewFD;
}
@@ -2859,7 +2833,10 @@ Decl *TemplateDeclInstantiator::VisitFunctionDecl(FunctionDecl *D) {
Decl *
TemplateDeclInstantiator::VisitCXXDeductionGuideDecl(CXXDeductionGuideDecl *D) {
- return VisitFunctionDecl(D, nullptr);
+ Decl *Inst = VisitFunctionDecl(D, nullptr);
+ if (Inst && !D->getDescribedFunctionTemplate())
+ Owner->addDecl(Inst);
+ return Inst;
}
Decl *TemplateDeclInstantiator::VisitCXXMethodDecl(CXXMethodDecl *D) {
@@ -3112,7 +3089,7 @@ Decl *Sema::SubstDecl(Decl *D, DeclContext *Owner,
return Instantiator.Visit(D);
}
-/// \brief Instantiates a nested template parameter list in the current
+/// Instantiates a nested template parameter list in the current
/// instantiation context.
///
/// \param L The parameter list to instantiate
@@ -3148,7 +3125,14 @@ TemplateDeclInstantiator::SubstTemplateParams(TemplateParameterList *L) {
return InstL;
}
-/// \brief Instantiate the declaration of a class template partial
+TemplateParameterList *
+Sema::SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner,
+ const MultiLevelTemplateArgumentList &TemplateArgs) {
+ TemplateDeclInstantiator Instantiator(*this, Owner, TemplateArgs);
+ return Instantiator.SubstTemplateParams(Params);
+}
+
+/// Instantiate the declaration of a class template partial
/// specialization.
///
/// \param ClassTemplate the (instantiated) class template that is partially
@@ -3282,7 +3266,7 @@ TemplateDeclInstantiator::InstantiateClassTemplatePartialSpecialization(
return InstPartialSpec;
}
-/// \brief Instantiate the declaration of a variable template partial
+/// Instantiate the declaration of a variable template partial
/// specialization.
///
/// \param VarTemplate the (instantiated) variable template that is partially
@@ -3624,7 +3608,7 @@ void Sema::InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
TemplateArgs);
}
-/// \brief Initializes the common fields of an instantiation function
+/// Initializes the common fields of an instantiation function
/// declaration (New) from the corresponding fields of its template (Tmpl).
///
/// \returns true if there was an error
@@ -3657,8 +3641,10 @@ TemplateDeclInstantiator::InitFunctionInstantiation(FunctionDecl *New,
assert(FunTmpl->getTemplatedDecl() == Tmpl &&
"Deduction from the wrong function template?");
(void) FunTmpl;
+ atTemplateEnd(SemaRef.TemplateInstCallbacks, SemaRef, ActiveInst);
ActiveInst.Kind = ActiveInstType::TemplateInstantiation;
ActiveInst.Entity = New;
+ atTemplateBegin(SemaRef.TemplateInstCallbacks, SemaRef, ActiveInst);
}
}
@@ -3710,7 +3696,7 @@ TemplateDeclInstantiator::InitFunctionInstantiation(FunctionDecl *New,
return false;
}
-/// \brief Initializes common fields of an instantiated method
+/// Initializes common fields of an instantiated method
/// declaration (New) from the corresponding fields of its template
/// (Tmpl).
///
@@ -3729,6 +3715,30 @@ TemplateDeclInstantiator::InitMethodInstantiation(CXXMethodDecl *New,
return false;
}
+/// Instantiate (or find existing instantiation of) a function template with a
+/// given set of template arguments.
+///
+/// Usually this should not be used, and template argument deduction should be
+/// used in its place.
+FunctionDecl *
+Sema::InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD,
+ const TemplateArgumentList *Args,
+ SourceLocation Loc) {
+ FunctionDecl *FD = FTD->getTemplatedDecl();
+
+ sema::TemplateDeductionInfo Info(Loc);
+ InstantiatingTemplate Inst(
+ *this, Loc, FTD, Args->asArray(),
+ CodeSynthesisContext::ExplicitTemplateArgumentSubstitution, Info);
+ if (Inst.isInvalid())
+ return nullptr;
+
+ ContextRAII SavedContext(*this, FD);
+ MultiLevelTemplateArgumentList MArgs(*Args);
+
+ return cast_or_null<FunctionDecl>(SubstDecl(FD, FD->getParent(), MArgs));
+}
+
/// In the MS ABI, we need to instantiate default arguments of dllexported
/// default constructors along with the constructor definition. This allows IR
/// gen to emit a constructor closure which calls the default constructor with
@@ -3750,7 +3760,7 @@ static void InstantiateDefaultCtorDefaultArgs(Sema &S,
}
}
-/// \brief Instantiate the definition of the given function from its
+/// Instantiate the definition of the given function from its
/// template.
///
/// \param PointOfInstantiation the point at which the instantiation was
@@ -3812,7 +3822,8 @@ void Sema::InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
PendingInstantiations.push_back(
std::make_pair(Function, PointOfInstantiation));
} else if (TSK == TSK_ImplicitInstantiation) {
- if (AtEndOfTU && !getDiagnostics().hasErrorOccurred()) {
+ if (AtEndOfTU && !getDiagnostics().hasErrorOccurred() &&
+ !getSourceManager().isInSystemHeader(PatternDecl->getLocStart())) {
Diag(PointOfInstantiation, diag::warn_func_template_missing)
<< Function;
Diag(PatternDecl->getLocation(), diag::note_forward_template_decl);
@@ -3829,8 +3840,8 @@ void Sema::InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
if (PatternDecl->isLateTemplateParsed() &&
!LateTemplateParser) {
Function->setInstantiationIsPending(true);
- PendingInstantiations.push_back(
- std::make_pair(Function, PointOfInstantiation));
+ LateParsedInstantiations.push_back(
+ std::make_pair(Function, PointOfInstantiation));
return;
}
@@ -3887,7 +3898,7 @@ void Sema::InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
InstantiatingTemplate Inst(*this, PointOfInstantiation, Function);
if (Inst.isInvalid() || Inst.isAlreadyInstantiating())
return;
- PrettyDeclStackTraceEntry CrashInfo(*this, Function, SourceLocation(),
+ PrettyDeclStackTraceEntry CrashInfo(Context, Function, SourceLocation(),
"instantiating function definition");
// The instantiation is visible here, even if it was first declared in an
@@ -3932,8 +3943,10 @@ void Sema::InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
TemplateArgs))
return;
+ StmtResult Body;
if (PatternDecl->hasSkippedBody()) {
ActOnSkippedFunctionBody(Function);
+ Body = nullptr;
} else {
if (CXXConstructorDecl *Ctor = dyn_cast<CXXConstructorDecl>(Function)) {
// If this is a constructor, instantiate the member initializers.
@@ -3949,16 +3962,14 @@ void Sema::InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
}
// Instantiate the function body.
- StmtResult Body = SubstStmt(Pattern, TemplateArgs);
+ Body = SubstStmt(Pattern, TemplateArgs);
if (Body.isInvalid())
Function->setInvalidDecl();
-
- // FIXME: finishing the function body while in an expression evaluation
- // context seems wrong. Investigate more.
- ActOnFinishFunctionBody(Function, Body.get(),
- /*IsInstantiation=*/true);
}
+ // FIXME: finishing the function body while in an expression evaluation
+ // context seems wrong. Investigate more.
+ ActOnFinishFunctionBody(Function, Body.get(), /*IsInstantiation=*/true);
PerformDependentDiagnostics(PatternDecl, TemplateArgs);
@@ -4025,7 +4036,7 @@ VarTemplateSpecializationDecl *Sema::BuildVarTemplateInstantiation(
VarTemplate, FromVar, InsertPos, TemplateArgsInfo, Converted));
}
-/// \brief Instantiates a variable template specialization by completing it
+/// Instantiates a variable template specialization by completing it
/// with appropriate type information and initializer.
VarTemplateSpecializationDecl *Sema::CompleteVarTemplateSpecializationDecl(
VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl,
@@ -4075,6 +4086,7 @@ void Sema::BuildVariableInstantiation(
NewVar->setTSCSpec(OldVar->getTSCSpec());
NewVar->setInitStyle(OldVar->getInitStyle());
NewVar->setCXXForRangeDecl(OldVar->isCXXForRangeDecl());
+ NewVar->setObjCForDecl(OldVar->isObjCForDecl());
NewVar->setConstexpr(OldVar->isConstexpr());
NewVar->setInitCapture(OldVar->isInitCapture());
NewVar->setPreviousDeclInSameBlockScope(
@@ -4135,7 +4147,8 @@ void Sema::BuildVariableInstantiation(
// it right away if the type contains 'auto'.
if ((!isa<VarTemplateSpecializationDecl>(NewVar) &&
!InstantiatingVarTemplate &&
- !(OldVar->isInline() && OldVar->isThisDeclarationADefinition())) ||
+ !(OldVar->isInline() && OldVar->isThisDeclarationADefinition() &&
+ !NewVar->isThisDeclarationADefinition())) ||
NewVar->getType()->isUndeducedType())
InstantiateVariableInitializer(NewVar, OldVar, TemplateArgs);
@@ -4147,7 +4160,7 @@ void Sema::BuildVariableInstantiation(
DiagnoseUnusedDecl(NewVar);
}
-/// \brief Instantiate the initializer of a variable.
+/// Instantiate the initializer of a variable.
void Sema::InstantiateVariableInitializer(
VarDecl *Var, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs) {
@@ -4193,7 +4206,9 @@ void Sema::InstantiateVariableInitializer(
Var->setInvalidDecl();
}
} else {
- if (Var->isStaticDataMember()) {
+ // `inline` variables are a definition and declaration all in one; we won't
+ // pick up an initializer from anywhere else.
+ if (Var->isStaticDataMember() && !Var->isInline()) {
if (!Var->isOutOfLine())
return;
@@ -4204,14 +4219,17 @@ void Sema::InstantiateVariableInitializer(
}
// We'll add an initializer to a for-range declaration later.
- if (Var->isCXXForRangeDecl())
+ if (Var->isCXXForRangeDecl() || Var->isObjCForDecl())
return;
ActOnUninitializedDecl(Var);
}
+
+ if (getLangOpts().CUDA)
+ checkAllowedCUDAInitializer(Var);
}
-/// \brief Instantiate the definition of the given variable from its
+/// Instantiate the definition of the given variable from its
/// template.
///
/// \param PointOfInstantiation the point at which the instantiation was
@@ -4295,7 +4313,7 @@ void Sema::InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
InstantiatingTemplate Inst(*this, PointOfInstantiation, Var);
if (Inst.isInvalid() || Inst.isAlreadyInstantiating())
return;
- PrettyDeclStackTraceEntry CrashInfo(*this, Var, SourceLocation(),
+ PrettyDeclStackTraceEntry CrashInfo(Context, Var, SourceLocation(),
"instantiating variable initializer");
// The instantiation is visible here, even if it was first declared in an
@@ -4347,7 +4365,8 @@ void Sema::InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
std::make_pair(Var, PointOfInstantiation));
} else if (TSK == TSK_ImplicitInstantiation) {
// Warn about missing definition at the end of translation unit.
- if (AtEndOfTU && !getDiagnostics().hasErrorOccurred()) {
+ if (AtEndOfTU && !getDiagnostics().hasErrorOccurred() &&
+ !getSourceManager().isInSystemHeader(PatternDecl->getLocStart())) {
Diag(PointOfInstantiation, diag::warn_var_template_missing)
<< Var;
Diag(PatternDecl->getLocation(), diag::note_forward_template_decl);
@@ -4407,7 +4426,7 @@ void Sema::InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
InstantiatingTemplate Inst(*this, PointOfInstantiation, Var);
if (Inst.isInvalid() || Inst.isAlreadyInstantiating())
return;
- PrettyDeclStackTraceEntry CrashInfo(*this, Var, SourceLocation(),
+ PrettyDeclStackTraceEntry CrashInfo(Context, Var, SourceLocation(),
"instantiating variable definition");
// If we're performing recursive template instantiation, create our own
@@ -4829,7 +4848,7 @@ static NamedDecl *findInstantiationOf(ASTContext &Ctx,
return nullptr;
}
-/// \brief Finds the instantiation of the given declaration context
+/// Finds the instantiation of the given declaration context
/// within the current instantiation.
///
/// \returns NULL if there was an error
@@ -4841,7 +4860,7 @@ DeclContext *Sema::FindInstantiatedContext(SourceLocation Loc, DeclContext* DC,
} else return DC;
}
-/// \brief Find the instantiation of the given declaration within the
+/// Find the instantiation of the given declaration within the
/// current instantiation.
///
/// This routine is intended to be used when \p D is a declaration
@@ -5154,7 +5173,7 @@ NamedDecl *Sema::FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
return D;
}
-/// \brief Performs template instantiation for all implicit template
+/// Performs template instantiation for all implicit template
/// instantiations we have seen until this point.
void Sema::PerformPendingInstantiations(bool LocalOnly) {
while (!PendingLocalImplicitInstantiations.empty() ||
@@ -5211,7 +5230,7 @@ void Sema::PerformPendingInstantiations(bool LocalOnly) {
break;
}
- PrettyDeclStackTraceEntry CrashInfo(*this, Var, SourceLocation(),
+ PrettyDeclStackTraceEntry CrashInfo(Context, Var, SourceLocation(),
"instantiating variable definition");
bool DefinitionRequired = Var->getTemplateSpecializationKind() ==
TSK_ExplicitInstantiationDefinition;
diff --git a/lib/Sema/SemaTemplateVariadic.cpp b/lib/Sema/SemaTemplateVariadic.cpp
index d81837dad508..fc1641334273 100644
--- a/lib/Sema/SemaTemplateVariadic.cpp
+++ b/lib/Sema/SemaTemplateVariadic.cpp
@@ -26,21 +26,8 @@ using namespace clang;
// Visitor that collects unexpanded parameter packs
//----------------------------------------------------------------------------
-/// \brief Retrieve the depth and index of a parameter pack.
-static std::pair<unsigned, unsigned>
-getDepthAndIndex(NamedDecl *ND) {
- if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(ND))
- return std::make_pair(TTP->getDepth(), TTP->getIndex());
-
- if (NonTypeTemplateParmDecl *NTTP = dyn_cast<NonTypeTemplateParmDecl>(ND))
- return std::make_pair(NTTP->getDepth(), NTTP->getIndex());
-
- TemplateTemplateParmDecl *TTP = cast<TemplateTemplateParmDecl>(ND);
- return std::make_pair(TTP->getDepth(), TTP->getIndex());
-}
-
namespace {
- /// \brief A class that collects unexpanded parameter packs.
+ /// A class that collects unexpanded parameter packs.
class CollectUnexpandedParameterPacksVisitor :
public RecursiveASTVisitor<CollectUnexpandedParameterPacksVisitor>
{
@@ -83,14 +70,14 @@ namespace {
// Recording occurrences of (unexpanded) parameter packs.
//------------------------------------------------------------------------
- /// \brief Record occurrences of template type parameter packs.
+ /// Record occurrences of template type parameter packs.
bool VisitTemplateTypeParmTypeLoc(TemplateTypeParmTypeLoc TL) {
if (TL.getTypePtr()->isParameterPack())
addUnexpanded(TL.getTypePtr(), TL.getNameLoc());
return true;
}
- /// \brief Record occurrences of template type parameter packs
+ /// Record occurrences of template type parameter packs
/// when we don't have proper source-location information for
/// them.
///
@@ -102,7 +89,7 @@ namespace {
return true;
}
- /// \brief Record occurrences of function and non-type template
+ /// Record occurrences of function and non-type template
/// parameter packs in an expression.
bool VisitDeclRefExpr(DeclRefExpr *E) {
if (E->getDecl()->isParameterPack())
@@ -111,7 +98,7 @@ namespace {
return true;
}
- /// \brief Record occurrences of template template parameter packs.
+ /// Record occurrences of template template parameter packs.
bool TraverseTemplateName(TemplateName Template) {
if (auto *TTP = dyn_cast_or_null<TemplateTemplateParmDecl>(
Template.getAsTemplateDecl())) {
@@ -122,7 +109,7 @@ namespace {
return inherited::TraverseTemplateName(Template);
}
- /// \brief Suppress traversal into Objective-C container literal
+ /// Suppress traversal into Objective-C container literal
/// elements that are pack expansions.
bool TraverseObjCDictionaryLiteral(ObjCDictionaryLiteral *E) {
if (!E->containsUnexpandedParameterPack())
@@ -142,7 +129,7 @@ namespace {
// Pruning the search for unexpanded parameter packs.
//------------------------------------------------------------------------
- /// \brief Suppress traversal into statements and expressions that
+ /// Suppress traversal into statements and expressions that
/// do not contain unexpanded parameter packs.
bool TraverseStmt(Stmt *S) {
Expr *E = dyn_cast_or_null<Expr>(S);
@@ -152,7 +139,7 @@ namespace {
return true;
}
- /// \brief Suppress traversal into types that do not contain
+ /// Suppress traversal into types that do not contain
/// unexpanded parameter packs.
bool TraverseType(QualType T) {
if ((!T.isNull() && T->containsUnexpandedParameterPack()) || InLambda)
@@ -161,7 +148,7 @@ namespace {
return true;
}
- /// \brief Suppress traversal into types with location information
+ /// Suppress traversal into types with location information
/// that do not contain unexpanded parameter packs.
bool TraverseTypeLoc(TypeLoc TL) {
if ((!TL.getType().isNull() &&
@@ -172,7 +159,7 @@ namespace {
return true;
}
- /// \brief Suppress traversal of parameter packs.
+ /// Suppress traversal of parameter packs.
bool TraverseDecl(Decl *D) {
// A function parameter pack is a pack expansion, so cannot contain
// an unexpanded parameter pack. Likewise for a template parameter
@@ -183,7 +170,7 @@ namespace {
return inherited::TraverseDecl(D);
}
- /// \brief Suppress traversal of pack-expanded attributes.
+ /// Suppress traversal of pack-expanded attributes.
bool TraverseAttr(Attr *A) {
if (A->isPackExpansion())
return true;
@@ -191,7 +178,7 @@ namespace {
return inherited::TraverseAttr(A);
}
- /// \brief Suppress traversal of pack expansion expressions and types.
+ /// Suppress traversal of pack expansion expressions and types.
///@{
bool TraversePackExpansionType(PackExpansionType *T) { return true; }
bool TraversePackExpansionTypeLoc(PackExpansionTypeLoc TL) { return true; }
@@ -200,7 +187,7 @@ namespace {
///@}
- /// \brief Suppress traversal of using-declaration pack expansion.
+ /// Suppress traversal of using-declaration pack expansion.
bool TraverseUnresolvedUsingValueDecl(UnresolvedUsingValueDecl *D) {
if (D->isPackExpansion())
return true;
@@ -208,7 +195,7 @@ namespace {
return inherited::TraverseUnresolvedUsingValueDecl(D);
}
- /// \brief Suppress traversal of using-declaration pack expansion.
+ /// Suppress traversal of using-declaration pack expansion.
bool TraverseUnresolvedUsingTypenameDecl(UnresolvedUsingTypenameDecl *D) {
if (D->isPackExpansion())
return true;
@@ -216,7 +203,7 @@ namespace {
return inherited::TraverseUnresolvedUsingTypenameDecl(D);
}
- /// \brief Suppress traversal of template argument pack expansions.
+ /// Suppress traversal of template argument pack expansions.
bool TraverseTemplateArgument(const TemplateArgument &Arg) {
if (Arg.isPackExpansion())
return true;
@@ -224,7 +211,7 @@ namespace {
return inherited::TraverseTemplateArgument(Arg);
}
- /// \brief Suppress traversal of template argument pack expansions.
+ /// Suppress traversal of template argument pack expansions.
bool TraverseTemplateArgumentLoc(const TemplateArgumentLoc &ArgLoc) {
if (ArgLoc.getArgument().isPackExpansion())
return true;
@@ -232,7 +219,7 @@ namespace {
return inherited::TraverseTemplateArgumentLoc(ArgLoc);
}
- /// \brief Suppress traversal of base specifier pack expansions.
+ /// Suppress traversal of base specifier pack expansions.
bool TraverseCXXBaseSpecifier(const CXXBaseSpecifier &Base) {
if (Base.isPackExpansion())
return true;
@@ -240,7 +227,7 @@ namespace {
return inherited::TraverseCXXBaseSpecifier(Base);
}
- /// \brief Suppress traversal of mem-initializer pack expansions.
+ /// Suppress traversal of mem-initializer pack expansions.
bool TraverseConstructorInitializer(CXXCtorInitializer *Init) {
if (Init->isPackExpansion())
return true;
@@ -248,7 +235,7 @@ namespace {
return inherited::TraverseConstructorInitializer(Init);
}
- /// \brief Note whether we're traversing a lambda containing an unexpanded
+ /// Note whether we're traversing a lambda containing an unexpanded
/// parameter pack. In this case, the unexpanded pack can occur anywhere,
/// including all the places where we normally wouldn't look. Within a
/// lambda, we don't propagate the 'contains unexpanded parameter pack' bit
@@ -284,7 +271,7 @@ namespace {
};
}
-/// \brief Determine whether it's possible for an unexpanded parameter pack to
+/// Determine whether it's possible for an unexpanded parameter pack to
/// be valid in this location. This only happens when we're in a declaration
/// that is nested within an expression that could be expanded, such as a
/// lambda-expression within a function call.
@@ -298,7 +285,7 @@ bool Sema::isUnexpandedParameterPackPermitted() {
return false;
}
-/// \brief Diagnose all of the unexpanded parameter packs in the given
+/// Diagnose all of the unexpanded parameter packs in the given
/// vector.
bool
Sema::DiagnoseUnexpandedParameterPacks(SourceLocation Loc,
@@ -314,8 +301,18 @@ Sema::DiagnoseUnexpandedParameterPacks(SourceLocation Loc,
// later.
SmallVector<UnexpandedParameterPack, 4> LambdaParamPackReferences;
for (unsigned N = FunctionScopes.size(); N; --N) {
- if (sema::LambdaScopeInfo *LSI =
- dyn_cast<sema::LambdaScopeInfo>(FunctionScopes[N-1])) {
+ sema::FunctionScopeInfo *Func = FunctionScopes[N-1];
+ // We do not permit pack expansion that would duplicate a statement
+ // expression, not even within a lambda.
+ // FIXME: We could probably support this for statement expressions that do
+ // not contain labels, and for pack expansions that expand both the stmt
+ // expr and the enclosing lambda.
+ if (std::any_of(
+ Func->CompoundScopes.begin(), Func->CompoundScopes.end(),
+ [](sema::CompoundScopeInfo &CSI) { return CSI.IsStmtExpr; }))
+ break;
+
+ if (auto *LSI = dyn_cast<sema::LambdaScopeInfo>(Func)) {
if (N == FunctionScopes.size()) {
for (auto &Param : Unexpanded) {
auto *PD = dyn_cast_or_null<ParmVarDecl>(
@@ -636,7 +633,9 @@ bool Sema::CheckParameterPacksForExpansion(
RetainExpansion = false;
std::pair<IdentifierInfo *, SourceLocation> FirstPack;
bool HaveFirstPack = false;
-
+ Optional<unsigned> NumPartialExpansions;
+ SourceLocation PartiallySubstitutedPackLoc;
+
for (ArrayRef<UnexpandedParameterPack>::iterator i = Unexpanded.begin(),
end = Unexpanded.end();
i != end; ++i) {
@@ -701,8 +700,13 @@ bool Sema::CheckParameterPacksForExpansion(
= CurrentInstantiationScope->getPartiallySubstitutedPack()){
unsigned PartialDepth, PartialIndex;
std::tie(PartialDepth, PartialIndex) = getDepthAndIndex(PartialPack);
- if (PartialDepth == Depth && PartialIndex == Index)
+ if (PartialDepth == Depth && PartialIndex == Index) {
RetainExpansion = true;
+ // We don't actually know the new pack size yet.
+ NumPartialExpansions = NewPackSize;
+ PartiallySubstitutedPackLoc = i->second;
+ continue;
+ }
}
}
@@ -732,6 +736,28 @@ bool Sema::CheckParameterPacksForExpansion(
}
}
+ // If we're performing a partial expansion but we also have a full expansion,
+ // expand to the number of common arguments. For example, given:
+ //
+ // template<typename ...T> struct A {
+ // template<typename ...U> void f(pair<T, U>...);
+ // };
+ //
+ // ... a call to 'A<int, int>().f<int>' should expand the pack once and
+ // retain an expansion.
+ if (NumPartialExpansions) {
+ if (NumExpansions && *NumExpansions < *NumPartialExpansions) {
+ NamedDecl *PartialPack =
+ CurrentInstantiationScope->getPartiallySubstitutedPack();
+ Diag(EllipsisLoc, diag::err_pack_expansion_length_conflict_partial)
+ << PartialPack << *NumPartialExpansions << *NumExpansions
+ << SourceRange(PartiallySubstitutedPackLoc);
+ return true;
+ }
+
+ NumExpansions = NumPartialExpansions;
+ }
+
return false;
}
@@ -812,6 +838,7 @@ bool Sema::containsUnexpandedParameterPacks(Declarator &D) {
case TST_void:
case TST_char:
case TST_wchar:
+ case TST_char8:
case TST_char16:
case TST_char32:
case TST_int:
@@ -819,6 +846,8 @@ bool Sema::containsUnexpandedParameterPacks(Declarator &D) {
case TST_half:
case TST_float:
case TST_double:
+ case TST_Accum:
+ case TST_Fract:
case TST_Float16:
case TST_float128:
case TST_bool:
@@ -871,14 +900,14 @@ bool Sema::containsUnexpandedParameterPacks(Declarator &D) {
->containsUnexpandedParameterPack())
return true;
}
- } else if (Chunk.Fun.getExceptionSpecType() == EST_ComputedNoexcept &&
+ } else if (isComputedNoexcept(Chunk.Fun.getExceptionSpecType()) &&
Chunk.Fun.NoexceptExpr->containsUnexpandedParameterPack())
return true;
if (Chunk.Fun.hasTrailingReturnType()) {
QualType T = Chunk.Fun.getTrailingReturnType().get();
- if (!T.isNull() && T->containsUnexpandedParameterPack())
- return true;
+ if (!T.isNull() && T->containsUnexpandedParameterPack())
+ return true;
}
break;
@@ -889,7 +918,7 @@ bool Sema::containsUnexpandedParameterPacks(Declarator &D) {
break;
}
}
-
+
return false;
}
@@ -906,7 +935,7 @@ class ParameterPackValidatorCCC : public CorrectionCandidateCallback {
}
-/// \brief Called when an expression computing the size of a parameter pack
+/// Called when an expression computing the size of a parameter pack
/// is parsed.
///
/// \code
@@ -951,12 +980,12 @@ ExprResult Sema::ActOnSizeofParameterPackExpr(Scope *S,
case LookupResult::FoundOverloaded:
case LookupResult::FoundUnresolvedValue:
break;
-
+
case LookupResult::Ambiguous:
DiagnoseAmbiguousLookup(R);
return ExprError();
}
-
+
if (!ParameterPack || !ParameterPack->isParameterPack()) {
Diag(NameLoc, diag::err_sizeof_pack_no_pack_name)
<< &Name;
diff --git a/lib/Sema/SemaType.cpp b/lib/Sema/SemaType.cpp
index 2530b766f5f7..ac04cecaf774 100644
--- a/lib/Sema/SemaType.cpp
+++ b/lib/Sema/SemaType.cpp
@@ -31,6 +31,7 @@
#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/SemaInternal.h"
#include "clang/Sema/Template.h"
+#include "clang/Sema/TemplateInstCallback.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringSwitch.h"
@@ -47,7 +48,7 @@ enum TypeDiagSelector {
/// isOmittedBlockReturnType - Return true if this declarator is missing a
/// return type because this is a omitted return type on a block literal.
static bool isOmittedBlockReturnType(const Declarator &D) {
- if (D.getContext() != Declarator::BlockLiteralContext ||
+ if (D.getContext() != DeclaratorContext::BlockLiteralContext ||
D.getDeclSpec().hasTypeSpecifier())
return false;
@@ -63,13 +64,17 @@ static bool isOmittedBlockReturnType(const Declarator &D) {
/// diagnoseBadTypeAttribute - Diagnoses a type attribute which
/// doesn't apply to the given type.
-static void diagnoseBadTypeAttribute(Sema &S, const AttributeList &attr,
+static void diagnoseBadTypeAttribute(Sema &S, const ParsedAttr &attr,
QualType type) {
TypeDiagSelector WhichType;
bool useExpansionLoc = true;
switch (attr.getKind()) {
- case AttributeList::AT_ObjCGC: WhichType = TDS_Pointer; break;
- case AttributeList::AT_ObjCOwnership: WhichType = TDS_ObjCObjOrBlock; break;
+ case ParsedAttr::AT_ObjCGC:
+ WhichType = TDS_Pointer;
+ break;
+ case ParsedAttr::AT_ObjCOwnership:
+ WhichType = TDS_ObjCObjOrBlock;
+ break;
default:
// Assume everything else was a function attribute.
WhichType = TDS_Function;
@@ -97,47 +102,48 @@ static void diagnoseBadTypeAttribute(Sema &S, const AttributeList &attr,
// objc_gc applies to Objective-C pointers or, otherwise, to the
// smallest available pointer type (i.e. 'void*' in 'void**').
-#define OBJC_POINTER_TYPE_ATTRS_CASELIST \
- case AttributeList::AT_ObjCGC: \
- case AttributeList::AT_ObjCOwnership
+#define OBJC_POINTER_TYPE_ATTRS_CASELIST \
+ case ParsedAttr::AT_ObjCGC: \
+ case ParsedAttr::AT_ObjCOwnership
// Calling convention attributes.
-#define CALLING_CONV_ATTRS_CASELIST \
- case AttributeList::AT_CDecl: \
- case AttributeList::AT_FastCall: \
- case AttributeList::AT_StdCall: \
- case AttributeList::AT_ThisCall: \
- case AttributeList::AT_RegCall: \
- case AttributeList::AT_Pascal: \
- case AttributeList::AT_SwiftCall: \
- case AttributeList::AT_VectorCall: \
- case AttributeList::AT_MSABI: \
- case AttributeList::AT_SysVABI: \
- case AttributeList::AT_Pcs: \
- case AttributeList::AT_IntelOclBicc: \
- case AttributeList::AT_PreserveMost: \
- case AttributeList::AT_PreserveAll
+#define CALLING_CONV_ATTRS_CASELIST \
+ case ParsedAttr::AT_CDecl: \
+ case ParsedAttr::AT_FastCall: \
+ case ParsedAttr::AT_StdCall: \
+ case ParsedAttr::AT_ThisCall: \
+ case ParsedAttr::AT_RegCall: \
+ case ParsedAttr::AT_Pascal: \
+ case ParsedAttr::AT_SwiftCall: \
+ case ParsedAttr::AT_VectorCall: \
+ case ParsedAttr::AT_MSABI: \
+ case ParsedAttr::AT_SysVABI: \
+ case ParsedAttr::AT_Pcs: \
+ case ParsedAttr::AT_IntelOclBicc: \
+ case ParsedAttr::AT_PreserveMost: \
+ case ParsedAttr::AT_PreserveAll
// Function type attributes.
-#define FUNCTION_TYPE_ATTRS_CASELIST \
- case AttributeList::AT_NSReturnsRetained: \
- case AttributeList::AT_NoReturn: \
- case AttributeList::AT_Regparm: \
- case AttributeList::AT_AnyX86NoCallerSavedRegisters: \
+#define FUNCTION_TYPE_ATTRS_CASELIST \
+ case ParsedAttr::AT_NSReturnsRetained: \
+ case ParsedAttr::AT_NoReturn: \
+ case ParsedAttr::AT_Regparm: \
+ case ParsedAttr::AT_AnyX86NoCallerSavedRegisters: \
+ case ParsedAttr::AT_AnyX86NoCfCheck: \
CALLING_CONV_ATTRS_CASELIST
// Microsoft-specific type qualifiers.
-#define MS_TYPE_ATTRS_CASELIST \
- case AttributeList::AT_Ptr32: \
- case AttributeList::AT_Ptr64: \
- case AttributeList::AT_SPtr: \
- case AttributeList::AT_UPtr
+#define MS_TYPE_ATTRS_CASELIST \
+ case ParsedAttr::AT_Ptr32: \
+ case ParsedAttr::AT_Ptr64: \
+ case ParsedAttr::AT_SPtr: \
+ case ParsedAttr::AT_UPtr
// Nullability qualifiers.
-#define NULLABILITY_TYPE_ATTRS_CASELIST \
- case AttributeList::AT_TypeNonNull: \
- case AttributeList::AT_TypeNullable: \
- case AttributeList::AT_TypeNullUnspecified
+#define NULLABILITY_TYPE_ATTRS_CASELIST \
+ case ParsedAttr::AT_TypeNonNull: \
+ case ParsedAttr::AT_TypeNullable: \
+ case ParsedAttr::AT_TypeNullUnspecified
namespace {
/// An object which stores processing state for the entire
@@ -160,11 +166,11 @@ namespace {
bool hasSavedAttrs;
/// The original set of attributes on the DeclSpec.
- SmallVector<AttributeList*, 2> savedAttrs;
+ SmallVector<ParsedAttr *, 2> savedAttrs;
/// A list of attributes to diagnose the uselessness of when the
/// processing is complete.
- SmallVector<AttributeList*, 2> ignoredTypeAttrs;
+ SmallVector<ParsedAttr *, 2> ignoredTypeAttrs;
public:
TypeProcessingState(Sema &sema, Declarator &declarator)
@@ -193,10 +199,10 @@ namespace {
chunkIndex = idx;
}
- AttributeList *&getCurrentAttrListRef() const {
+ ParsedAttributesView &getCurrentAttributes() const {
if (isProcessingDeclSpec())
- return getMutableDeclSpec().getAttributes().getListRef();
- return declarator.getTypeObject(chunkIndex).getAttrListRef();
+ return getMutableDeclSpec().getAttributes();
+ return declarator.getTypeObject(chunkIndex).getAttrs();
}
/// Save the current set of attributes on the DeclSpec.
@@ -205,16 +211,15 @@ namespace {
if (hasSavedAttrs) return;
DeclSpec &spec = getMutableDeclSpec();
- for (AttributeList *attr = spec.getAttributes().getList(); attr;
- attr = attr->getNext())
- savedAttrs.push_back(attr);
+ for (ParsedAttr &AL : spec.getAttributes())
+ savedAttrs.push_back(&AL);
trivial &= savedAttrs.empty();
hasSavedAttrs = true;
}
/// Record that we had nowhere to put the given type attribute.
/// We will diagnose such attributes later.
- void addIgnoredTypeAttr(AttributeList &attr) {
+ void addIgnoredTypeAttr(ParsedAttr &attr) {
ignoredTypeAttrs.push_back(&attr);
}
@@ -239,46 +244,18 @@ namespace {
void restoreDeclSpecAttrs() {
assert(hasSavedAttrs);
- if (savedAttrs.empty()) {
- getMutableDeclSpec().getAttributes().set(nullptr);
- return;
- }
-
- getMutableDeclSpec().getAttributes().set(savedAttrs[0]);
- for (unsigned i = 0, e = savedAttrs.size() - 1; i != e; ++i)
- savedAttrs[i]->setNext(savedAttrs[i+1]);
- savedAttrs.back()->setNext(nullptr);
+ getMutableDeclSpec().getAttributes().clearListOnly();
+ for (ParsedAttr *AL : savedAttrs)
+ getMutableDeclSpec().getAttributes().addAtStart(AL);
}
};
} // end anonymous namespace
-static void spliceAttrIntoList(AttributeList &attr, AttributeList *&head) {
- attr.setNext(head);
- head = &attr;
-}
-
-static void spliceAttrOutOfList(AttributeList &attr, AttributeList *&head) {
- if (head == &attr) {
- head = attr.getNext();
- return;
- }
-
- AttributeList *cur = head;
- while (true) {
- assert(cur && cur->getNext() && "ran out of attrs?");
- if (cur->getNext() == &attr) {
- cur->setNext(attr.getNext());
- return;
- }
- cur = cur->getNext();
- }
-}
-
-static void moveAttrFromListToList(AttributeList &attr,
- AttributeList *&fromList,
- AttributeList *&toList) {
- spliceAttrOutOfList(attr, fromList);
- spliceAttrIntoList(attr, toList);
+static void moveAttrFromListToList(ParsedAttr &attr,
+ ParsedAttributesView &fromList,
+ ParsedAttributesView &toList) {
+ fromList.remove(&attr);
+ toList.addAtStart(&attr);
}
/// The location of a type attribute.
@@ -291,29 +268,26 @@ enum TypeAttrLocation {
TAL_DeclName
};
-static void processTypeAttrs(TypeProcessingState &state,
- QualType &type, TypeAttrLocation TAL,
- AttributeList *attrs);
+static void processTypeAttrs(TypeProcessingState &state, QualType &type,
+ TypeAttrLocation TAL, ParsedAttributesView &attrs);
-static bool handleFunctionTypeAttr(TypeProcessingState &state,
- AttributeList &attr,
+static bool handleFunctionTypeAttr(TypeProcessingState &state, ParsedAttr &attr,
QualType &type);
static bool handleMSPointerTypeQualifierAttr(TypeProcessingState &state,
- AttributeList &attr,
- QualType &type);
+ ParsedAttr &attr, QualType &type);
-static bool handleObjCGCTypeAttr(TypeProcessingState &state,
- AttributeList &attr, QualType &type);
+static bool handleObjCGCTypeAttr(TypeProcessingState &state, ParsedAttr &attr,
+ QualType &type);
static bool handleObjCOwnershipTypeAttr(TypeProcessingState &state,
- AttributeList &attr, QualType &type);
+ ParsedAttr &attr, QualType &type);
static bool handleObjCPointerTypeAttr(TypeProcessingState &state,
- AttributeList &attr, QualType &type) {
- if (attr.getKind() == AttributeList::AT_ObjCGC)
+ ParsedAttr &attr, QualType &type) {
+ if (attr.getKind() == ParsedAttr::AT_ObjCGC)
return handleObjCGCTypeAttr(state, attr, type);
- assert(attr.getKind() == AttributeList::AT_ObjCOwnership);
+ assert(attr.getKind() == ParsedAttr::AT_ObjCOwnership);
return handleObjCOwnershipTypeAttr(state, attr, type);
}
@@ -395,8 +369,7 @@ static DeclaratorChunk *maybeMovePastReturnType(Declarator &declarator,
/// didn't apply in whatever position it was written in, try to move
/// it to a more appropriate position.
static void distributeObjCPointerTypeAttr(TypeProcessingState &state,
- AttributeList &attr,
- QualType type) {
+ ParsedAttr &attr, QualType type) {
Declarator &declarator = state.getDeclarator();
// Move it to the outermost normal or block pointer declarator.
@@ -409,13 +382,13 @@ static void distributeObjCPointerTypeAttr(TypeProcessingState &state,
// of a block.
DeclaratorChunk *destChunk = nullptr;
if (state.isProcessingDeclSpec() &&
- attr.getKind() == AttributeList::AT_ObjCOwnership)
+ attr.getKind() == ParsedAttr::AT_ObjCOwnership)
destChunk = maybeMovePastReturnType(declarator, i - 1,
/*onlyBlockPointers=*/true);
if (!destChunk) destChunk = &chunk;
- moveAttrFromListToList(attr, state.getCurrentAttrListRef(),
- destChunk->getAttrListRef());
+ moveAttrFromListToList(attr, state.getCurrentAttributes(),
+ destChunk->getAttrs());
return;
}
@@ -426,12 +399,12 @@ static void distributeObjCPointerTypeAttr(TypeProcessingState &state,
// We may be starting at the return type of a block.
case DeclaratorChunk::Function:
if (state.isProcessingDeclSpec() &&
- attr.getKind() == AttributeList::AT_ObjCOwnership) {
+ attr.getKind() == ParsedAttr::AT_ObjCOwnership) {
if (DeclaratorChunk *dest = maybeMovePastReturnType(
declarator, i,
/*onlyBlockPointers=*/true)) {
- moveAttrFromListToList(attr, state.getCurrentAttrListRef(),
- dest->getAttrListRef());
+ moveAttrFromListToList(attr, state.getCurrentAttributes(),
+ dest->getAttrs());
return;
}
}
@@ -451,10 +424,8 @@ static void distributeObjCPointerTypeAttr(TypeProcessingState &state,
/// Distribute an objc_gc type attribute that was written on the
/// declarator.
-static void
-distributeObjCPointerTypeAttrFromDeclarator(TypeProcessingState &state,
- AttributeList &attr,
- QualType &declSpecType) {
+static void distributeObjCPointerTypeAttrFromDeclarator(
+ TypeProcessingState &state, ParsedAttr &attr, QualType &declSpecType) {
Declarator &declarator = state.getDeclarator();
// objc_gc goes on the innermost pointer to something that's not a
@@ -491,8 +462,8 @@ distributeObjCPointerTypeAttrFromDeclarator(TypeProcessingState &state,
// attribute from being applied multiple times and gives
// the source-location-filler something to work with.
state.saveDeclSpecAttrs();
- moveAttrFromListToList(attr, declarator.getAttrListRef(),
- declarator.getMutableDeclSpec().getAttributes().getListRef());
+ moveAttrFromListToList(attr, declarator.getAttributes(),
+ declarator.getMutableDeclSpec().getAttributes());
return;
}
}
@@ -500,13 +471,13 @@ distributeObjCPointerTypeAttrFromDeclarator(TypeProcessingState &state,
// Otherwise, if we found an appropriate chunk, splice the attribute
// into it.
if (innermost != -1U) {
- moveAttrFromListToList(attr, declarator.getAttrListRef(),
- declarator.getTypeObject(innermost).getAttrListRef());
+ moveAttrFromListToList(attr, declarator.getAttributes(),
+ declarator.getTypeObject(innermost).getAttrs());
return;
}
// Otherwise, diagnose when we're done building the type.
- spliceAttrOutOfList(attr, declarator.getAttrListRef());
+ declarator.getAttributes().remove(&attr);
state.addIgnoredTypeAttr(attr);
}
@@ -515,8 +486,7 @@ distributeObjCPointerTypeAttrFromDeclarator(TypeProcessingState &state,
/// that it didn't apply in whatever position it was written in, try
/// to move it to a more appropriate position.
static void distributeFunctionTypeAttr(TypeProcessingState &state,
- AttributeList &attr,
- QualType type) {
+ ParsedAttr &attr, QualType type) {
Declarator &declarator = state.getDeclarator();
// Try to push the attribute from the return type of a function to
@@ -525,8 +495,8 @@ static void distributeFunctionTypeAttr(TypeProcessingState &state,
DeclaratorChunk &chunk = declarator.getTypeObject(i-1);
switch (chunk.Kind) {
case DeclaratorChunk::Function:
- moveAttrFromListToList(attr, state.getCurrentAttrListRef(),
- chunk.getAttrListRef());
+ moveAttrFromListToList(attr, state.getCurrentAttributes(),
+ chunk.getAttrs());
return;
case DeclaratorChunk::Paren:
@@ -546,11 +516,9 @@ static void distributeFunctionTypeAttr(TypeProcessingState &state,
/// Try to distribute a function type attribute to the innermost
/// function chunk or type. Returns true if the attribute was
/// distributed, false if no location was found.
-static bool
-distributeFunctionTypeAttrToInnermost(TypeProcessingState &state,
- AttributeList &attr,
- AttributeList *&attrList,
- QualType &declSpecType) {
+static bool distributeFunctionTypeAttrToInnermost(
+ TypeProcessingState &state, ParsedAttr &attr,
+ ParsedAttributesView &attrList, QualType &declSpecType) {
Declarator &declarator = state.getDeclarator();
// Put it on the innermost function chunk, if there is one.
@@ -558,7 +526,7 @@ distributeFunctionTypeAttrToInnermost(TypeProcessingState &state,
DeclaratorChunk &chunk = declarator.getTypeObject(i);
if (chunk.Kind != DeclaratorChunk::Function) continue;
- moveAttrFromListToList(attr, attrList, chunk.getAttrListRef());
+ moveAttrFromListToList(attr, attrList, chunk.getAttrs());
return true;
}
@@ -567,25 +535,23 @@ distributeFunctionTypeAttrToInnermost(TypeProcessingState &state,
/// A function type attribute was written in the decl spec. Try to
/// apply it somewhere.
-static void
-distributeFunctionTypeAttrFromDeclSpec(TypeProcessingState &state,
- AttributeList &attr,
- QualType &declSpecType) {
+static void distributeFunctionTypeAttrFromDeclSpec(TypeProcessingState &state,
+ ParsedAttr &attr,
+ QualType &declSpecType) {
state.saveDeclSpecAttrs();
// C++11 attributes before the decl specifiers actually appertain to
// the declarators. Move them straight there. We don't support the
// 'put them wherever you like' semantics we allow for GNU attributes.
if (attr.isCXX11Attribute()) {
- moveAttrFromListToList(attr, state.getCurrentAttrListRef(),
- state.getDeclarator().getAttrListRef());
+ moveAttrFromListToList(attr, state.getCurrentAttributes(),
+ state.getDeclarator().getAttributes());
return;
}
// Try to distribute to the innermost.
- if (distributeFunctionTypeAttrToInnermost(state, attr,
- state.getCurrentAttrListRef(),
- declSpecType))
+ if (distributeFunctionTypeAttrToInnermost(
+ state, attr, state.getCurrentAttributes(), declSpecType))
return;
// If that failed, diagnose the bad attribute when the declarator is
@@ -595,25 +561,23 @@ distributeFunctionTypeAttrFromDeclSpec(TypeProcessingState &state,
/// A function type attribute was written on the declarator. Try to
/// apply it somewhere.
-static void
-distributeFunctionTypeAttrFromDeclarator(TypeProcessingState &state,
- AttributeList &attr,
- QualType &declSpecType) {
+static void distributeFunctionTypeAttrFromDeclarator(TypeProcessingState &state,
+ ParsedAttr &attr,
+ QualType &declSpecType) {
Declarator &declarator = state.getDeclarator();
// Try to distribute to the innermost.
- if (distributeFunctionTypeAttrToInnermost(state, attr,
- declarator.getAttrListRef(),
- declSpecType))
+ if (distributeFunctionTypeAttrToInnermost(
+ state, attr, declarator.getAttributes(), declSpecType))
return;
// If that failed, diagnose the bad attribute when the declarator is
// fully built.
- spliceAttrOutOfList(attr, declarator.getAttrListRef());
+ declarator.getAttributes().remove(&attr);
state.addIgnoredTypeAttr(attr);
}
-/// \brief Given that there are attributes written on the declarator
+/// Given that there are attributes written on the declarator
/// itself, try to distribute any type attributes to the appropriate
/// declarator chunk.
///
@@ -625,24 +589,25 @@ distributeFunctionTypeAttrFromDeclarator(TypeProcessingState &state,
static void distributeTypeAttrsFromDeclarator(TypeProcessingState &state,
QualType &declSpecType) {
// Collect all the type attributes from the declarator itself.
- assert(state.getDeclarator().getAttributes() && "declarator has no attrs!");
- AttributeList *attr = state.getDeclarator().getAttributes();
- AttributeList *next;
- do {
- next = attr->getNext();
-
+ assert(!state.getDeclarator().getAttributes().empty() &&
+ "declarator has no attrs!");
+ // The called functions in this loop actually remove things from the current
+ // list, so iterating over the existing list isn't possible. Instead, make a
+ // non-owning copy and iterate over that.
+ ParsedAttributesView AttrsCopy{state.getDeclarator().getAttributes()};
+ for (ParsedAttr &attr : AttrsCopy) {
// Do not distribute C++11 attributes. They have strict rules for what
// they appertain to.
- if (attr->isCXX11Attribute())
+ if (attr.isCXX11Attribute())
continue;
- switch (attr->getKind()) {
+ switch (attr.getKind()) {
OBJC_POINTER_TYPE_ATTRS_CASELIST:
- distributeObjCPointerTypeAttrFromDeclarator(state, *attr, declSpecType);
+ distributeObjCPointerTypeAttrFromDeclarator(state, attr, declSpecType);
break;
FUNCTION_TYPE_ATTRS_CASELIST:
- distributeFunctionTypeAttrFromDeclarator(state, *attr, declSpecType);
+ distributeFunctionTypeAttrFromDeclarator(state, attr, declSpecType);
break;
MS_TYPE_ATTRS_CASELIST:
@@ -653,13 +618,13 @@ static void distributeTypeAttrsFromDeclarator(TypeProcessingState &state,
// Nullability specifiers cannot go after the declarator-id.
// Objective-C __kindof does not get distributed.
- case AttributeList::AT_ObjCKindOf:
+ case ParsedAttr::AT_ObjCKindOf:
continue;
default:
break;
}
- } while ((attr = next));
+ }
}
/// Add a synthetic '()' to a block-literal declarator if it is
@@ -759,28 +724,18 @@ static bool checkOmittedBlockReturnType(Sema &S, Declarator &declarator,
return false;
// Warn if we see type attributes for omitted return type on a block literal.
- AttributeList *&attrs =
- declarator.getMutableDeclSpec().getAttributes().getListRef();
- AttributeList *prev = nullptr;
- for (AttributeList *cur = attrs; cur; cur = cur->getNext()) {
- AttributeList &attr = *cur;
- // Skip attributes that were marked to be invalid or non-type
- // attributes.
- if (attr.isInvalid() || !attr.isTypeAttr()) {
- prev = cur;
+ SmallVector<ParsedAttr *, 2> ToBeRemoved;
+ for (ParsedAttr &AL : declarator.getMutableDeclSpec().getAttributes()) {
+ if (AL.isInvalid() || !AL.isTypeAttr())
continue;
- }
- S.Diag(attr.getLoc(),
+ S.Diag(AL.getLoc(),
diag::warn_block_literal_attributes_on_omitted_return_type)
- << attr.getName();
- // Remove cur from the list.
- if (prev) {
- prev->setNext(cur->getNext());
- prev = cur;
- } else {
- attrs = cur->getNext();
- }
+ << AL.getName();
+ ToBeRemoved.push_back(&AL);
}
+ // Remove bad attributes from the list.
+ for (ParsedAttr *AL : ToBeRemoved)
+ declarator.getMutableDeclSpec().getAttributes().remove(AL);
// Warn if we see type qualifiers for omitted return type on a block literal.
const DeclSpec &DS = declarator.getDeclSpec();
@@ -1208,22 +1163,15 @@ TypeResult Sema::actOnObjCTypeArgsAndProtocolQualifiers(
return CreateParsedType(Result, ResultTInfo);
}
-static OpenCLAccessAttr::Spelling getImageAccess(const AttributeList *Attrs) {
- if (Attrs) {
- const AttributeList *Next = Attrs;
- do {
- const AttributeList &Attr = *Next;
- Next = Attr.getNext();
- if (Attr.getKind() == AttributeList::AT_OpenCLAccess) {
- return static_cast<OpenCLAccessAttr::Spelling>(
- Attr.getSemanticSpelling());
- }
- } while (Next);
- }
+static OpenCLAccessAttr::Spelling
+getImageAccess(const ParsedAttributesView &Attrs) {
+ for (const ParsedAttr &AL : Attrs)
+ if (AL.getKind() == ParsedAttr::AT_OpenCLAccess)
+ return static_cast<OpenCLAccessAttr::Spelling>(AL.getSemanticSpelling());
return OpenCLAccessAttr::Keyword_read_only;
}
-/// \brief Convert the specified declspec to the appropriate type
+/// Convert the specified declspec to the appropriate type
/// object.
/// \param state Specifies the declarator containing the declaration specifier
/// to be converted, along with other associated processing state.
@@ -1235,7 +1183,7 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
Sema &S = state.getSema();
Declarator &declarator = state.getDeclarator();
- const DeclSpec &DS = declarator.getDeclSpec();
+ DeclSpec &DS = declarator.getMutableDeclSpec();
SourceLocation DeclLoc = declarator.getIdentifierLoc();
if (DeclLoc.isInvalid())
DeclLoc = DS.getLocStart();
@@ -1275,6 +1223,11 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
Result = Context.getUnsignedWCharType();
}
break;
+ case DeclSpec::TST_char8:
+ assert(DS.getTypeSpecSign() == DeclSpec::TSS_unspecified &&
+ "Unknown TSS value");
+ Result = Context.Char8Ty;
+ break;
case DeclSpec::TST_char16:
assert(DS.getTypeSpecSign() == DeclSpec::TSS_unspecified &&
"Unknown TSS value");
@@ -1291,11 +1244,12 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
// The declspec is always missing in a lambda expr context; it is either
// specified with a trailing return type or inferred.
if (S.getLangOpts().CPlusPlus14 &&
- declarator.getContext() == Declarator::LambdaExprContext) {
+ declarator.getContext() == DeclaratorContext::LambdaExprContext) {
// In C++1y, a lambda's implicit return type is 'auto'.
Result = Context.getAutoDeductType();
break;
- } else if (declarator.getContext() == Declarator::LambdaExprContext ||
+ } else if (declarator.getContext() ==
+ DeclaratorContext::LambdaExprContext ||
checkOmittedBlockReturnType(S, declarator,
Context.DependentTy)) {
Result = Context.DependentTy;
@@ -1383,6 +1337,52 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
}
break;
}
+ case DeclSpec::TST_accum: {
+ switch (DS.getTypeSpecWidth()) {
+ case DeclSpec::TSW_short:
+ Result = Context.ShortAccumTy;
+ break;
+ case DeclSpec::TSW_unspecified:
+ Result = Context.AccumTy;
+ break;
+ case DeclSpec::TSW_long:
+ Result = Context.LongAccumTy;
+ break;
+ case DeclSpec::TSW_longlong:
+ llvm_unreachable("Unable to specify long long as _Accum width");
+ }
+
+ if (DS.getTypeSpecSign() == DeclSpec::TSS_unsigned)
+ Result = Context.getCorrespondingUnsignedType(Result);
+
+ if (DS.isTypeSpecSat())
+ Result = Context.getCorrespondingSaturatedType(Result);
+
+ break;
+ }
+ case DeclSpec::TST_fract: {
+ switch (DS.getTypeSpecWidth()) {
+ case DeclSpec::TSW_short:
+ Result = Context.ShortFractTy;
+ break;
+ case DeclSpec::TSW_unspecified:
+ Result = Context.FractTy;
+ break;
+ case DeclSpec::TSW_long:
+ Result = Context.LongFractTy;
+ break;
+ case DeclSpec::TSW_longlong:
+ llvm_unreachable("Unable to specify long long as _Fract width");
+ }
+
+ if (DS.getTypeSpecSign() == DeclSpec::TSS_unsigned)
+ Result = Context.getCorrespondingUnsignedType(Result);
+
+ if (DS.isTypeSpecSat())
+ Result = Context.getCorrespondingSaturatedType(Result);
+
+ break;
+ }
case DeclSpec::TST_int128:
if (!S.Context.getTargetInfo().hasInt128Type())
S.Diag(DS.getTypeSpecTypeLoc(), diag::err_type_unsupported)
@@ -1421,7 +1421,7 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
case DeclSpec::TST_union:
case DeclSpec::TST_struct:
case DeclSpec::TST_interface: {
- TypeDecl *D = dyn_cast_or_null<TypeDecl>(DS.getRepAsDecl());
+ TagDecl *D = dyn_cast_or_null<TagDecl>(DS.getRepAsDecl());
if (!D) {
// This can happen in C++ with ambiguous lookups.
Result = Context.IntTy;
@@ -1441,7 +1441,8 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
// In both C and C++, make an ElaboratedType.
ElaboratedTypeKeyword Keyword
= ElaboratedType::getKeywordForTypeSpec(DS.getTypeSpecType());
- Result = S.getElaboratedType(Keyword, DS.getTypeSpecScope(), Result);
+ Result = S.getElaboratedType(Keyword, DS.getTypeSpecScope(), Result,
+ DS.isTypeSpecOwned() ? D : nullptr);
break;
}
case DeclSpec::TST_typename: {
@@ -1527,16 +1528,19 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
}
break;
-#define GENERIC_IMAGE_TYPE(ImgType, Id) \
- case DeclSpec::TST_##ImgType##_t: \
- switch (getImageAccess(DS.getAttributes().getList())) { \
- case OpenCLAccessAttr::Keyword_write_only: \
- Result = Context.Id##WOTy; break; \
- case OpenCLAccessAttr::Keyword_read_write: \
- Result = Context.Id##RWTy; break; \
- case OpenCLAccessAttr::Keyword_read_only: \
- Result = Context.Id##ROTy; break; \
- } \
+#define GENERIC_IMAGE_TYPE(ImgType, Id) \
+ case DeclSpec::TST_##ImgType##_t: \
+ switch (getImageAccess(DS.getAttributes())) { \
+ case OpenCLAccessAttr::Keyword_write_only: \
+ Result = Context.Id##WOTy; \
+ break; \
+ case OpenCLAccessAttr::Keyword_read_write: \
+ Result = Context.Id##RWTy; \
+ break; \
+ case OpenCLAccessAttr::Keyword_read_only: \
+ Result = Context.Id##ROTy; \
+ break; \
+ } \
break;
#include "clang/Basic/OpenCLImageTypes.def"
@@ -1550,6 +1554,15 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
S.checkOpenCLDisabledTypeDeclSpec(DS, Result))
declarator.setInvalidType(true);
+ bool IsFixedPointType = DS.getTypeSpecType() == DeclSpec::TST_accum ||
+ DS.getTypeSpecType() == DeclSpec::TST_fract;
+
+ // Only fixed point types can be saturated
+ if (DS.isTypeSpecSat() && !IsFixedPointType)
+ S.Diag(DS.getTypeSpecSatLoc(), diag::err_invalid_saturation_spec)
+ << DS.getSpecifierName(DS.getTypeSpecType(),
+ Context.getPrintingPolicy());
+
// Handle complex types.
if (DS.getTypeSpecComplex() == DeclSpec::TSC_complex) {
if (S.getLangOpts().Freestanding)
@@ -1572,7 +1585,7 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
// Before we process any type attributes, synthesize a block literal
// function declarator if necessary.
- if (declarator.getContext() == Declarator::BlockLiteralContext)
+ if (declarator.getContext() == DeclaratorContext::BlockLiteralContext)
maybeSynthesizeBlockSignature(state, Result);
// Apply any type attributes from the decl spec. This may cause the
@@ -1580,7 +1593,7 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
// attributes are pushed around.
// pipe attributes will be handled later ( at GetFullTypeForDeclarator )
if (!DS.isTypeSpecPipe())
- processTypeAttrs(state, Result, TAL_DeclSpec, DS.getAttributes().getList());
+ processTypeAttrs(state, Result, TAL_DeclSpec, DS.getAttributes());
// Apply const/volatile/restrict qualifiers to T.
if (unsigned TypeQuals = DS.getTypeQualifiers()) {
@@ -1747,7 +1760,7 @@ QualType Sema::BuildQualifiedType(QualType T, SourceLocation Loc,
return BuildQualifiedType(T, Loc, Q, DS);
}
-/// \brief Build a paren type including \p T.
+/// Build a paren type including \p T.
QualType Sema::BuildParenType(QualType T) {
return Context.getParenType(T);
}
@@ -1857,7 +1870,7 @@ static bool checkQualifiedFunction(Sema &S, QualType T, SourceLocation Loc,
return true;
}
-/// \brief Build a pointer type.
+/// Build a pointer type.
///
/// \param T The type to which we'll be building a pointer.
///
@@ -1897,7 +1910,7 @@ QualType Sema::BuildPointerType(QualType T,
return Context.getPointerType(T);
}
-/// \brief Build a reference type.
+/// Build a reference type.
///
/// \param T The type to which we'll be building a reference.
///
@@ -1959,7 +1972,7 @@ QualType Sema::BuildReferenceType(QualType T, bool SpelledAsLValue,
return Context.getRValueReferenceType(T);
}
-/// \brief Build a Read-only Pipe type.
+/// Build a Read-only Pipe type.
///
/// \param T The type to which we'll be building a Pipe.
///
@@ -1971,7 +1984,7 @@ QualType Sema::BuildReadPipeType(QualType T, SourceLocation Loc) {
return Context.getReadPipeType(T);
}
-/// \brief Build a Write-only Pipe type.
+/// Build a Write-only Pipe type.
///
/// \param T The type to which we'll be building a Pipe.
///
@@ -2005,7 +2018,7 @@ static bool isArraySizeVLA(Sema &S, Expr *ArraySize, llvm::APSInt &SizeVal) {
S.LangOpts.OpenCL).isInvalid();
}
-/// \brief Build an array type.
+/// Build an array type.
///
/// \param T The type of each element in the array.
///
@@ -2230,7 +2243,59 @@ QualType Sema::BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
return T;
}
-/// \brief Build an ext-vector type.
+QualType Sema::BuildVectorType(QualType CurType, Expr *SizeExpr,
+ SourceLocation AttrLoc) {
+ // The base type must be integer (not Boolean or enumeration) or float, and
+ // can't already be a vector.
+ if (!CurType->isDependentType() &&
+ (!CurType->isBuiltinType() || CurType->isBooleanType() ||
+ (!CurType->isIntegerType() && !CurType->isRealFloatingType()))) {
+ Diag(AttrLoc, diag::err_attribute_invalid_vector_type) << CurType;
+ return QualType();
+ }
+
+ if (SizeExpr->isTypeDependent() || SizeExpr->isValueDependent())
+ return Context.getDependentVectorType(CurType, SizeExpr, AttrLoc,
+ VectorType::GenericVector);
+
+ llvm::APSInt VecSize(32);
+ if (!SizeExpr->isIntegerConstantExpr(VecSize, Context)) {
+ Diag(AttrLoc, diag::err_attribute_argument_type)
+ << "vector_size" << AANT_ArgumentIntegerConstant
+ << SizeExpr->getSourceRange();
+ return QualType();
+ }
+
+ if (CurType->isDependentType())
+ return Context.getDependentVectorType(CurType, SizeExpr, AttrLoc,
+ VectorType::GenericVector);
+
+ unsigned VectorSize = static_cast<unsigned>(VecSize.getZExtValue() * 8);
+ unsigned TypeSize = static_cast<unsigned>(Context.getTypeSize(CurType));
+
+ if (VectorSize == 0) {
+ Diag(AttrLoc, diag::err_attribute_zero_size) << SizeExpr->getSourceRange();
+ return QualType();
+ }
+
+ // vecSize is specified in bytes - convert to bits.
+ if (VectorSize % TypeSize) {
+ Diag(AttrLoc, diag::err_attribute_invalid_size)
+ << SizeExpr->getSourceRange();
+ return QualType();
+ }
+
+ if (VectorType::isVectorSizeTooLarge(VectorSize / TypeSize)) {
+ Diag(AttrLoc, diag::err_attribute_size_too_large)
+ << SizeExpr->getSourceRange();
+ return QualType();
+ }
+
+ return Context.getVectorType(CurType, VectorSize / TypeSize,
+ VectorType::GenericVector);
+}
+
+/// Build an ext-vector type.
///
/// Run the required checks for the extended vector type.
QualType Sema::BuildExtVectorType(QualType T, Expr *ArraySize,
@@ -2401,7 +2466,7 @@ QualType Sema::BuildFunctionType(QualType T,
return Context.getFunctionType(T, ParamTypes, EPI);
}
-/// \brief Build a member pointer type \c T Class::*.
+/// Build a member pointer type \c T Class::*.
///
/// \param T the type to which the member pointer refers.
/// \param Class the class type into which the member pointer points.
@@ -2450,7 +2515,7 @@ QualType Sema::BuildMemberPointerType(QualType T, QualType Class,
return Context.getMemberPointerType(T, Class.getTypePtr());
}
-/// \brief Build a block pointer type.
+/// Build a block pointer type.
///
/// \param T The type to which we'll be building a block pointer.
///
@@ -2582,9 +2647,8 @@ static void inferARCWriteback(TypeProcessingState &state,
if (chunk.Kind != DeclaratorChunk::Pointer &&
chunk.Kind != DeclaratorChunk::BlockPointer)
return;
- for (const AttributeList *attr = chunk.getAttrs(); attr;
- attr = attr->getNext())
- if (attr->getKind() == AttributeList::AT_ObjCOwnership)
+ for (const ParsedAttr &AL : chunk.getAttrs())
+ if (AL.getKind() == ParsedAttr::AT_ObjCOwnership)
return;
transferARCOwnershipToDeclaratorChunk(state, Qualifiers::OCL_Autoreleasing,
@@ -2702,7 +2766,7 @@ static void diagnoseRedundantReturnTypeQualifiers(Sema &S, QualType RetTy,
// If the qualifiers come from a conversion function type, don't diagnose
// them -- they're not necessarily redundant, since such a conversion
// operator can be explicitly called as "x.operator const int()".
- if (D.getName().getKind() == UnqualifiedId::IK_ConversionFunctionId)
+ if (D.getName().getKind() == UnqualifiedIdKind::IK_ConversionFunctionId)
return;
// Just parens all the way out to the decl specifiers. Diagnose any qualifiers
@@ -2728,11 +2792,11 @@ static QualType GetDeclSpecTypeForDeclarator(TypeProcessingState &state,
TagDecl *OwnedTagDecl = nullptr;
switch (D.getName().getKind()) {
- case UnqualifiedId::IK_ImplicitSelfParam:
- case UnqualifiedId::IK_OperatorFunctionId:
- case UnqualifiedId::IK_Identifier:
- case UnqualifiedId::IK_LiteralOperatorId:
- case UnqualifiedId::IK_TemplateId:
+ case UnqualifiedIdKind::IK_ImplicitSelfParam:
+ case UnqualifiedIdKind::IK_OperatorFunctionId:
+ case UnqualifiedIdKind::IK_Identifier:
+ case UnqualifiedIdKind::IK_LiteralOperatorId:
+ case UnqualifiedIdKind::IK_TemplateId:
T = ConvertDeclSpecToType(state);
if (!D.isInvalidType() && D.getDeclSpec().isTypeSpecOwned()) {
@@ -2742,23 +2806,23 @@ static QualType GetDeclSpecTypeForDeclarator(TypeProcessingState &state,
}
break;
- case UnqualifiedId::IK_ConstructorName:
- case UnqualifiedId::IK_ConstructorTemplateId:
- case UnqualifiedId::IK_DestructorName:
+ case UnqualifiedIdKind::IK_ConstructorName:
+ case UnqualifiedIdKind::IK_ConstructorTemplateId:
+ case UnqualifiedIdKind::IK_DestructorName:
// Constructors and destructors don't have return types. Use
// "void" instead.
T = SemaRef.Context.VoidTy;
processTypeAttrs(state, T, TAL_DeclSpec,
- D.getDeclSpec().getAttributes().getList());
+ D.getMutableDeclSpec().getAttributes());
break;
- case UnqualifiedId::IK_DeductionGuideName:
+ case UnqualifiedIdKind::IK_DeductionGuideName:
// Deduction guides have a trailing return type and no type in their
// decl-specifier sequence. Use a placeholder return type for now.
T = SemaRef.Context.DependentTy;
break;
- case UnqualifiedId::IK_ConversionFunctionId:
+ case UnqualifiedIdKind::IK_ConversionFunctionId:
// The result type of a conversion function is the type that it
// converts to.
T = SemaRef.GetTypeFromParser(D.getName().ConversionFunctionId,
@@ -2766,7 +2830,7 @@ static QualType GetDeclSpecTypeForDeclarator(TypeProcessingState &state,
break;
}
- if (D.getAttributes())
+ if (!D.getAttributes().empty())
distributeTypeAttrsFromDeclarator(state, T);
// C++11 [dcl.spec.auto]p5: reject 'auto' if it is not in an allowed context.
@@ -2780,16 +2844,16 @@ static QualType GetDeclSpecTypeForDeclarator(TypeProcessingState &state,
(Auto && Auto->getKeyword() != AutoTypeKeyword::GNUAutoType);
switch (D.getContext()) {
- case Declarator::LambdaExprContext:
+ case DeclaratorContext::LambdaExprContext:
// Declared return type of a lambda-declarator is implicit and is always
// 'auto'.
break;
- case Declarator::ObjCParameterContext:
- case Declarator::ObjCResultContext:
- case Declarator::PrototypeContext:
+ case DeclaratorContext::ObjCParameterContext:
+ case DeclaratorContext::ObjCResultContext:
+ case DeclaratorContext::PrototypeContext:
Error = 0;
break;
- case Declarator::LambdaExprParameterContext:
+ case DeclaratorContext::LambdaExprParameterContext:
// In C++14, generic lambdas allow 'auto' in their parameters.
if (!SemaRef.getLangOpts().CPlusPlus14 ||
!Auto || Auto->getKeyword() != AutoTypeKeyword::Auto)
@@ -2821,7 +2885,7 @@ static QualType GetDeclSpecTypeForDeclarator(TypeProcessingState &state,
T, QualType(CorrespondingTemplateParam->getTypeForDecl(), 0));
}
break;
- case Declarator::MemberContext: {
+ case DeclaratorContext::MemberContext: {
if (D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_static ||
D.isFunctionDeclarator())
break;
@@ -2837,57 +2901,66 @@ static QualType GetDeclSpecTypeForDeclarator(TypeProcessingState &state,
Error = 20; // Friend type
break;
}
- case Declarator::CXXCatchContext:
- case Declarator::ObjCCatchContext:
+ case DeclaratorContext::CXXCatchContext:
+ case DeclaratorContext::ObjCCatchContext:
Error = 7; // Exception declaration
break;
- case Declarator::TemplateParamContext:
+ case DeclaratorContext::TemplateParamContext:
if (isa<DeducedTemplateSpecializationType>(Deduced))
Error = 19; // Template parameter
else if (!SemaRef.getLangOpts().CPlusPlus17)
Error = 8; // Template parameter (until C++17)
break;
- case Declarator::BlockLiteralContext:
+ case DeclaratorContext::BlockLiteralContext:
Error = 9; // Block literal
break;
- case Declarator::TemplateTypeArgContext:
+ case DeclaratorContext::TemplateArgContext:
+ // Within a template argument list, a deduced template specialization
+ // type will be reinterpreted as a template template argument.
+ if (isa<DeducedTemplateSpecializationType>(Deduced) &&
+ !D.getNumTypeObjects() &&
+ D.getDeclSpec().getParsedSpecifiers() == DeclSpec::PQ_TypeSpecifier)
+ break;
+ LLVM_FALLTHROUGH;
+ case DeclaratorContext::TemplateTypeArgContext:
Error = 10; // Template type argument
break;
- case Declarator::AliasDeclContext:
- case Declarator::AliasTemplateContext:
+ case DeclaratorContext::AliasDeclContext:
+ case DeclaratorContext::AliasTemplateContext:
Error = 12; // Type alias
break;
- case Declarator::TrailingReturnContext:
+ case DeclaratorContext::TrailingReturnContext:
+ case DeclaratorContext::TrailingReturnVarContext:
if (!SemaRef.getLangOpts().CPlusPlus14 || !IsCXXAutoType)
Error = 13; // Function return type
break;
- case Declarator::ConversionIdContext:
+ case DeclaratorContext::ConversionIdContext:
if (!SemaRef.getLangOpts().CPlusPlus14 || !IsCXXAutoType)
Error = 14; // conversion-type-id
break;
- case Declarator::FunctionalCastContext:
+ case DeclaratorContext::FunctionalCastContext:
if (isa<DeducedTemplateSpecializationType>(Deduced))
break;
LLVM_FALLTHROUGH;
- case Declarator::TypeNameContext:
+ case DeclaratorContext::TypeNameContext:
Error = 15; // Generic
break;
- case Declarator::FileContext:
- case Declarator::BlockContext:
- case Declarator::ForContext:
- case Declarator::InitStmtContext:
- case Declarator::ConditionContext:
+ case DeclaratorContext::FileContext:
+ case DeclaratorContext::BlockContext:
+ case DeclaratorContext::ForContext:
+ case DeclaratorContext::InitStmtContext:
+ case DeclaratorContext::ConditionContext:
// FIXME: P0091R3 (erroneously) does not permit class template argument
// deduction in conditions, for-init-statements, and other declarations
// that are not simple-declarations.
break;
- case Declarator::CXXNewContext:
+ case DeclaratorContext::CXXNewContext:
// FIXME: P0091R3 does not permit class template argument deduction here,
// but we follow GCC and allow it anyway.
if (!IsCXXAutoType && !isa<DeducedTemplateSpecializationType>(Deduced))
Error = 17; // 'new' type
break;
- case Declarator::KNRTypeListContext:
+ case DeclaratorContext::KNRTypeListContext:
Error = 18; // K&R function parameter
break;
}
@@ -2916,7 +2989,7 @@ static QualType GetDeclSpecTypeForDeclarator(TypeProcessingState &state,
}
SourceRange AutoRange = D.getDeclSpec().getTypeSpecTypeLoc();
- if (D.getName().getKind() == UnqualifiedId::IK_ConversionFunctionId)
+ if (D.getName().getKind() == UnqualifiedIdKind::IK_ConversionFunctionId)
AutoRange = D.getName().getSourceRange();
if (Error != -1) {
@@ -2944,9 +3017,11 @@ static QualType GetDeclSpecTypeForDeclarator(TypeProcessingState &state,
T = SemaRef.Context.IntTy;
D.setInvalidType(true);
- } else if (!HaveTrailing) {
+ } else if (!HaveTrailing &&
+ D.getContext() != DeclaratorContext::LambdaExprContext) {
// If there was a trailing return type, we already got
// warn_cxx98_compat_trailing_return_type in the parser.
+ // If this was a lambda, we already warned on that too.
SemaRef.Diag(AutoRange.getBegin(),
diag::warn_cxx98_compat_auto_type_specifier)
<< AutoRange;
@@ -2959,47 +3034,49 @@ static QualType GetDeclSpecTypeForDeclarator(TypeProcessingState &state,
// or enumeration in a type-specifier-seq.
unsigned DiagID = 0;
switch (D.getContext()) {
- case Declarator::TrailingReturnContext:
+ case DeclaratorContext::TrailingReturnContext:
+ case DeclaratorContext::TrailingReturnVarContext:
// Class and enumeration definitions are syntactically not allowed in
// trailing return types.
llvm_unreachable("parser should not have allowed this");
break;
- case Declarator::FileContext:
- case Declarator::MemberContext:
- case Declarator::BlockContext:
- case Declarator::ForContext:
- case Declarator::InitStmtContext:
- case Declarator::BlockLiteralContext:
- case Declarator::LambdaExprContext:
+ case DeclaratorContext::FileContext:
+ case DeclaratorContext::MemberContext:
+ case DeclaratorContext::BlockContext:
+ case DeclaratorContext::ForContext:
+ case DeclaratorContext::InitStmtContext:
+ case DeclaratorContext::BlockLiteralContext:
+ case DeclaratorContext::LambdaExprContext:
// C++11 [dcl.type]p3:
// A type-specifier-seq shall not define a class or enumeration unless
// it appears in the type-id of an alias-declaration (7.1.3) that is not
// the declaration of a template-declaration.
- case Declarator::AliasDeclContext:
+ case DeclaratorContext::AliasDeclContext:
break;
- case Declarator::AliasTemplateContext:
+ case DeclaratorContext::AliasTemplateContext:
DiagID = diag::err_type_defined_in_alias_template;
break;
- case Declarator::TypeNameContext:
- case Declarator::FunctionalCastContext:
- case Declarator::ConversionIdContext:
- case Declarator::TemplateParamContext:
- case Declarator::CXXNewContext:
- case Declarator::CXXCatchContext:
- case Declarator::ObjCCatchContext:
- case Declarator::TemplateTypeArgContext:
+ case DeclaratorContext::TypeNameContext:
+ case DeclaratorContext::FunctionalCastContext:
+ case DeclaratorContext::ConversionIdContext:
+ case DeclaratorContext::TemplateParamContext:
+ case DeclaratorContext::CXXNewContext:
+ case DeclaratorContext::CXXCatchContext:
+ case DeclaratorContext::ObjCCatchContext:
+ case DeclaratorContext::TemplateArgContext:
+ case DeclaratorContext::TemplateTypeArgContext:
DiagID = diag::err_type_defined_in_type_specifier;
break;
- case Declarator::PrototypeContext:
- case Declarator::LambdaExprParameterContext:
- case Declarator::ObjCParameterContext:
- case Declarator::ObjCResultContext:
- case Declarator::KNRTypeListContext:
+ case DeclaratorContext::PrototypeContext:
+ case DeclaratorContext::LambdaExprParameterContext:
+ case DeclaratorContext::ObjCParameterContext:
+ case DeclaratorContext::ObjCResultContext:
+ case DeclaratorContext::KNRTypeListContext:
// C++ [dcl.fct]p6:
// Types shall not be defined in return or parameter types.
DiagID = diag::err_type_defined_in_param_type;
break;
- case Declarator::ConditionContext:
+ case DeclaratorContext::ConditionContext:
// C++ 6.4p2:
// The type-specifier-seq shall not contain typedef and shall not declare
// a new class or enumeration.
@@ -3048,7 +3125,7 @@ static void warnAboutAmbiguousFunction(Sema &S, Declarator &D,
// Inside a condition, a direct initializer is not permitted. We allow one to
// be parsed in order to give better diagnostics in condition parsing.
- if (D.getContext() == Declarator::ConditionContext)
+ if (D.getContext() == DeclaratorContext::ConditionContext)
return;
SourceRange ParenRange(DeclType.Loc, DeclType.EndLoc);
@@ -3164,7 +3241,7 @@ static void warnAboutRedundantParens(Sema &S, Declarator &D, QualType T) {
case DeclaratorChunk::Function:
// In a new-type-id, function chunks require parentheses.
- if (D.getContext() == Declarator::CXXNewContext)
+ if (D.getContext() == DeclaratorContext::CXXNewContext)
return;
// FIXME: "A(f())" deserves a vexing-parse warning, not just a
// redundant-parens warning, but we don't know whether the function
@@ -3237,21 +3314,20 @@ static void warnAboutRedundantParens(Sema &S, Declarator &D, QualType T) {
/// this is the outermost chunk, then we can determine the CC from the
/// declarator context. If not, then this could be either a member function
/// type or normal function type.
-static CallingConv
-getCCForDeclaratorChunk(Sema &S, Declarator &D,
- const DeclaratorChunk::FunctionTypeInfo &FTI,
- unsigned ChunkIndex) {
+static CallingConv getCCForDeclaratorChunk(
+ Sema &S, Declarator &D, const ParsedAttributesView &AttrList,
+ const DeclaratorChunk::FunctionTypeInfo &FTI, unsigned ChunkIndex) {
assert(D.getTypeObject(ChunkIndex).Kind == DeclaratorChunk::Function);
// Check for an explicit CC attribute.
- for (auto Attr = FTI.AttrList; Attr; Attr = Attr->getNext()) {
- switch (Attr->getKind()) {
- CALLING_CONV_ATTRS_CASELIST: {
+ for (const ParsedAttr &AL : AttrList) {
+ switch (AL.getKind()) {
+ CALLING_CONV_ATTRS_CASELIST : {
// Ignore attributes that don't validate or can't apply to the
// function type. We'll diagnose the failure to apply them in
// handleFunctionTypeAttr.
CallingConv CC;
- if (!S.CheckCallingConvAttr(*Attr, CC) &&
+ if (!S.CheckCallingConvAttr(AL, CC) &&
(!FTI.isVariadic || supportsVariadicCall(CC))) {
return CC;
}
@@ -3282,7 +3358,7 @@ getCCForDeclaratorChunk(Sema &S, Declarator &D,
// in a member pointer.
IsCXXInstanceMethod =
D.getTypeObject(I).Kind == DeclaratorChunk::MemberPointer;
- } else if (D.getContext() == Declarator::LambdaExprContext) {
+ } else if (D.getContext() == DeclaratorContext::LambdaExprContext) {
// This can only be a call operator for a lambda, which is an instance
// method.
IsCXXInstanceMethod = true;
@@ -3307,9 +3383,8 @@ getCCForDeclaratorChunk(Sema &S, Declarator &D,
// convention attribute. This is the simplest place to infer
// calling convention for OpenCL kernels.
if (S.getLangOpts().OpenCL) {
- for (const AttributeList *Attr = D.getDeclSpec().getAttributes().getList();
- Attr; Attr = Attr->getNext()) {
- if (Attr->getKind() == AttributeList::AT_OpenCLKernel) {
+ for (const ParsedAttr &AL : D.getDeclSpec().getAttributes()) {
+ if (AL.getKind() == ParsedAttr::AT_OpenCLKernel) {
CC = CC_OpenCLKernel;
break;
}
@@ -3360,12 +3435,11 @@ IdentifierInfo *Sema::getNSErrorIdent() {
/// Check whether there is a nullability attribute of any kind in the given
/// attribute list.
-static bool hasNullabilityAttr(const AttributeList *attrs) {
- for (const AttributeList *attr = attrs; attr;
- attr = attr->getNext()) {
- if (attr->getKind() == AttributeList::AT_TypeNonNull ||
- attr->getKind() == AttributeList::AT_TypeNullable ||
- attr->getKind() == AttributeList::AT_TypeNullUnspecified)
+static bool hasNullabilityAttr(const ParsedAttributesView &attrs) {
+ for (const ParsedAttr &AL : attrs) {
+ if (AL.getKind() == ParsedAttr::AT_TypeNonNull ||
+ AL.getKind() == ParsedAttr::AT_TypeNullable ||
+ AL.getKind() == ParsedAttr::AT_TypeNullUnspecified)
return true;
}
@@ -3779,8 +3853,8 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// Does this declaration declare a typedef-name?
bool IsTypedefName =
D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_typedef ||
- D.getContext() == Declarator::AliasDeclContext ||
- D.getContext() == Declarator::AliasTemplateContext;
+ D.getContext() == DeclaratorContext::AliasDeclContext ||
+ D.getContext() == DeclaratorContext::AliasTemplateContext;
// Does T refer to a function type with a cv-qualifier or a ref-qualifier?
bool IsQualifiedFunction = T->isFunctionProtoType() &&
@@ -3909,14 +3983,15 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
} else {
bool isFunctionOrMethod = false;
switch (auto context = state.getDeclarator().getContext()) {
- case Declarator::ObjCParameterContext:
- case Declarator::ObjCResultContext:
- case Declarator::PrototypeContext:
- case Declarator::TrailingReturnContext:
+ case DeclaratorContext::ObjCParameterContext:
+ case DeclaratorContext::ObjCResultContext:
+ case DeclaratorContext::PrototypeContext:
+ case DeclaratorContext::TrailingReturnContext:
+ case DeclaratorContext::TrailingReturnVarContext:
isFunctionOrMethod = true;
LLVM_FALLTHROUGH;
- case Declarator::MemberContext:
+ case DeclaratorContext::MemberContext:
if (state.getDeclarator().isObjCIvar() && !isFunctionOrMethod) {
complainAboutMissingNullability = CAMN_No;
break;
@@ -3930,8 +4005,8 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
LLVM_FALLTHROUGH;
- case Declarator::FileContext:
- case Declarator::KNRTypeListContext: {
+ case DeclaratorContext::FileContext:
+ case DeclaratorContext::KNRTypeListContext: {
complainAboutMissingNullability = CAMN_Yes;
// Nullability inference depends on the type and declarator.
@@ -3947,8 +4022,9 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
if (inAssumeNonNullRegion) {
complainAboutInferringWithinChunk = wrappingKind;
inferNullability = NullabilityKind::NonNull;
- inferNullabilityCS = (context == Declarator::ObjCParameterContext ||
- context == Declarator::ObjCResultContext);
+ inferNullabilityCS =
+ (context == DeclaratorContext::ObjCParameterContext ||
+ context == DeclaratorContext::ObjCResultContext);
}
break;
@@ -3965,19 +4041,15 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// On pointer-to-pointer parameters marked cf_returns_retained or
// cf_returns_not_retained, if the outer pointer is explicit then
// infer the inner pointer as _Nullable.
- auto hasCFReturnsAttr = [](const AttributeList *NextAttr) -> bool {
- while (NextAttr) {
- if (NextAttr->getKind() == AttributeList::AT_CFReturnsRetained ||
- NextAttr->getKind() == AttributeList::AT_CFReturnsNotRetained)
- return true;
- NextAttr = NextAttr->getNext();
- }
- return false;
+ auto hasCFReturnsAttr =
+ [](const ParsedAttributesView &AttrList) -> bool {
+ return AttrList.hasAttribute(ParsedAttr::AT_CFReturnsRetained) ||
+ AttrList.hasAttribute(ParsedAttr::AT_CFReturnsNotRetained);
};
if (const auto *InnermostChunk = D.getInnermostNonParenChunk()) {
if (hasCFReturnsAttr(D.getAttributes()) ||
hasCFReturnsAttr(InnermostChunk->getAttrs()) ||
- hasCFReturnsAttr(D.getDeclSpec().getAttributes().getList())) {
+ hasCFReturnsAttr(D.getDeclSpec().getAttributes())) {
inferNullability = NullabilityKind::Nullable;
inferNullabilityInnerOnly = true;
}
@@ -3988,26 +4060,27 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
break;
}
- case Declarator::ConversionIdContext:
+ case DeclaratorContext::ConversionIdContext:
complainAboutMissingNullability = CAMN_Yes;
break;
- case Declarator::AliasDeclContext:
- case Declarator::AliasTemplateContext:
- case Declarator::BlockContext:
- case Declarator::BlockLiteralContext:
- case Declarator::ConditionContext:
- case Declarator::CXXCatchContext:
- case Declarator::CXXNewContext:
- case Declarator::ForContext:
- case Declarator::InitStmtContext:
- case Declarator::LambdaExprContext:
- case Declarator::LambdaExprParameterContext:
- case Declarator::ObjCCatchContext:
- case Declarator::TemplateParamContext:
- case Declarator::TemplateTypeArgContext:
- case Declarator::TypeNameContext:
- case Declarator::FunctionalCastContext:
+ case DeclaratorContext::AliasDeclContext:
+ case DeclaratorContext::AliasTemplateContext:
+ case DeclaratorContext::BlockContext:
+ case DeclaratorContext::BlockLiteralContext:
+ case DeclaratorContext::ConditionContext:
+ case DeclaratorContext::CXXCatchContext:
+ case DeclaratorContext::CXXNewContext:
+ case DeclaratorContext::ForContext:
+ case DeclaratorContext::InitStmtContext:
+ case DeclaratorContext::LambdaExprContext:
+ case DeclaratorContext::LambdaExprParameterContext:
+ case DeclaratorContext::ObjCCatchContext:
+ case DeclaratorContext::TemplateParamContext:
+ case DeclaratorContext::TemplateArgContext:
+ case DeclaratorContext::TemplateTypeArgContext:
+ case DeclaratorContext::TypeNameContext:
+ case DeclaratorContext::FunctionalCastContext:
// Don't infer in these contexts.
break;
}
@@ -4032,10 +4105,10 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// Local function that checks the nullability for a given pointer declarator.
// Returns true if _Nonnull was inferred.
- auto inferPointerNullability = [&](SimplePointerKind pointerKind,
- SourceLocation pointerLoc,
- SourceLocation pointerEndLoc,
- AttributeList *&attrs) -> AttributeList * {
+ auto inferPointerNullability =
+ [&](SimplePointerKind pointerKind, SourceLocation pointerLoc,
+ SourceLocation pointerEndLoc,
+ ParsedAttributesView &attrs) -> ParsedAttr * {
// We've seen a pointer.
if (NumPointersRemaining > 0)
--NumPointersRemaining;
@@ -4046,18 +4119,16 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// If we're supposed to infer nullability, do so now.
if (inferNullability && !inferNullabilityInnerOnlyComplete) {
- AttributeList::Syntax syntax
- = inferNullabilityCS ? AttributeList::AS_ContextSensitiveKeyword
- : AttributeList::AS_Keyword;
- AttributeList *nullabilityAttr = state.getDeclarator().getAttributePool()
- .create(
- S.getNullabilityKeyword(
- *inferNullability),
- SourceRange(pointerLoc),
- nullptr, SourceLocation(),
- nullptr, 0, syntax);
-
- spliceAttrIntoList(*nullabilityAttr, attrs);
+ ParsedAttr::Syntax syntax = inferNullabilityCS
+ ? ParsedAttr::AS_ContextSensitiveKeyword
+ : ParsedAttr::AS_Keyword;
+ ParsedAttr *nullabilityAttr =
+ state.getDeclarator().getAttributePool().create(
+ S.getNullabilityKeyword(*inferNullability),
+ SourceRange(pointerLoc), nullptr, SourceLocation(), nullptr, 0,
+ syntax);
+
+ attrs.addAtStart(nullabilityAttr);
if (inferNullabilityCS) {
state.getDeclarator().getMutableDeclSpec().getObjCQualifiers()
@@ -4112,9 +4183,9 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
pointerKind = SimplePointerKind::MemberPointer;
if (auto *attr = inferPointerNullability(
- pointerKind, D.getDeclSpec().getTypeSpecTypeLoc(),
- D.getDeclSpec().getLocEnd(),
- D.getMutableDeclSpec().getAttributes().getListRef())) {
+ pointerKind, D.getDeclSpec().getTypeSpecTypeLoc(),
+ D.getDeclSpec().getLocEnd(),
+ D.getMutableDeclSpec().getAttributes())) {
T = Context.getAttributedType(
AttributedType::getNullabilityAttrKind(*inferNullability),T,T);
attr->setUsedAsTypeAttr();
@@ -4152,7 +4223,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// Handle pointer nullability.
inferPointerNullability(SimplePointerKind::BlockPointer, DeclType.Loc,
- DeclType.EndLoc, DeclType.getAttrListRef());
+ DeclType.EndLoc, DeclType.getAttrs());
T = S.BuildBlockPointerType(T, D.getIdentifierLoc(), Name);
if (DeclType.Cls.TypeQuals || LangOpts.OpenCL) {
@@ -4174,7 +4245,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// Handle pointer nullability
inferPointerNullability(SimplePointerKind::Pointer, DeclType.Loc,
- DeclType.EndLoc, DeclType.getAttrListRef());
+ DeclType.EndLoc, DeclType.getAttrs());
if (LangOpts.ObjC1 && T->getAs<ObjCObjectType>()) {
T = Context.getObjCObjectPointerType(T);
@@ -4243,7 +4314,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// array type, ...
if (ASM == ArrayType::Static || ATI.TypeQuals) {
if (!(D.isPrototypeContext() ||
- D.getContext() == Declarator::KNRTypeListContext)) {
+ D.getContext() == DeclaratorContext::KNRTypeListContext)) {
S.Diag(DeclType.Loc, diag::err_array_static_outside_prototype) <<
(ASM == ArrayType::Static ? "'static'" : "type qualifier");
// Remove the 'static' and the type qualifiers.
@@ -4267,7 +4338,8 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
const AutoType *AT = T->getContainedAutoType();
// Allow arrays of auto if we are a generic lambda parameter.
// i.e. [](auto (&array)[5]) { return array[0]; }; OK
- if (AT && D.getContext() != Declarator::LambdaExprParameterContext) {
+ if (AT &&
+ D.getContext() != DeclaratorContext::LambdaExprParameterContext) {
// We've already diagnosed this for decltype(auto).
if (!AT->isDecltypeAuto())
S.Diag(DeclType.Loc, diag::err_illegal_decl_array_of_auto)
@@ -4319,14 +4391,14 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
<< T << D.getSourceRange();
D.setInvalidType(true);
} else if (D.getName().getKind() ==
- UnqualifiedId::IK_DeductionGuideName) {
+ UnqualifiedIdKind::IK_DeductionGuideName) {
if (T != Context.DependentTy) {
S.Diag(D.getDeclSpec().getLocStart(),
diag::err_deduction_guide_with_complex_decl)
<< D.getSourceRange();
D.setInvalidType(true);
}
- } else if (D.getContext() != Declarator::LambdaExprContext &&
+ } else if (D.getContext() != DeclaratorContext::LambdaExprContext &&
(T.hasQualifiers() || !isa<AutoType>(T) ||
cast<AutoType>(T)->getKeyword() !=
AutoTypeKeyword::Auto)) {
@@ -4347,12 +4419,13 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// C99 6.7.5.3p1: The return type may not be a function or array type.
// For conversion functions, we'll diagnose this particular error later.
if (!D.isInvalidType() && (T->isArrayType() || T->isFunctionType()) &&
- (D.getName().getKind() != UnqualifiedId::IK_ConversionFunctionId)) {
+ (D.getName().getKind() !=
+ UnqualifiedIdKind::IK_ConversionFunctionId)) {
unsigned diagID = diag::err_func_returning_array_function;
// Last processing chunk in block context means this function chunk
// represents the block.
if (chunkIndex == 0 &&
- D.getContext() == Declarator::BlockLiteralContext)
+ D.getContext() == DeclaratorContext::BlockLiteralContext)
diagID = diag::err_block_returning_array_function;
S.Diag(DeclType.Loc, diagID) << T->isFunctionType() << T;
T = Context.IntTy;
@@ -4446,20 +4519,17 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
SourceLocation AttrLoc;
if (chunkIndex + 1 < D.getNumTypeObjects()) {
DeclaratorChunk ReturnTypeChunk = D.getTypeObject(chunkIndex + 1);
- for (const AttributeList *Attr = ReturnTypeChunk.getAttrs();
- Attr; Attr = Attr->getNext()) {
- if (Attr->getKind() == AttributeList::AT_ObjCOwnership) {
- AttrLoc = Attr->getLoc();
+ for (const ParsedAttr &AL : ReturnTypeChunk.getAttrs()) {
+ if (AL.getKind() == ParsedAttr::AT_ObjCOwnership) {
+ AttrLoc = AL.getLoc();
break;
}
}
}
if (AttrLoc.isInvalid()) {
- for (const AttributeList *Attr
- = D.getDeclSpec().getAttributes().getList();
- Attr; Attr = Attr->getNext()) {
- if (Attr->getKind() == AttributeList::AT_ObjCOwnership) {
- AttrLoc = Attr->getLoc();
+ for (const ParsedAttr &AL : D.getDeclSpec().getAttributes()) {
+ if (AL.getKind() == ParsedAttr::AT_ObjCOwnership) {
+ AttrLoc = AL.getLoc();
break;
}
}
@@ -4470,7 +4540,8 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// the predefined
// __strong/__weak/__autoreleasing/__unsafe_unretained.
if (AttrLoc.isMacroID())
- AttrLoc = S.SourceMgr.getImmediateExpansionRange(AttrLoc).first;
+ AttrLoc =
+ S.SourceMgr.getImmediateExpansionRange(AttrLoc).getBegin();
S.Diag(AttrLoc, diag::warn_arc_lifetime_result_type)
<< T.getQualifiers().getObjCLifetime();
@@ -4490,15 +4561,16 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
if (IsTypedefName && FTI.getExceptionSpecType() && !LangOpts.CPlusPlus17)
S.Diag(FTI.getExceptionSpecLocBeg(),
diag::err_exception_spec_in_typedef)
- << (D.getContext() == Declarator::AliasDeclContext ||
- D.getContext() == Declarator::AliasTemplateContext);
+ << (D.getContext() == DeclaratorContext::AliasDeclContext ||
+ D.getContext() == DeclaratorContext::AliasTemplateContext);
// If we see "T var();" or "T var(T());" at block scope, it is probably
// an attempt to initialize a variable, not a function declaration.
if (FTI.isAmbiguous)
warnAboutAmbiguousFunction(S, D, DeclType, T);
- FunctionType::ExtInfo EI(getCCForDeclaratorChunk(S, D, FTI, chunkIndex));
+ FunctionType::ExtInfo EI(
+ getCCForDeclaratorChunk(S, D, DeclType.getAttrs(), FTI, chunkIndex));
if (!FTI.NumParams && !FTI.isVariadic && !LangOpts.CPlusPlus
&& !LangOpts.OpenCL) {
@@ -4508,19 +4580,9 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// We allow a zero-parameter variadic function in C if the
// function is marked with the "overloadable" attribute. Scan
// for this attribute now.
- if (!FTI.NumParams && FTI.isVariadic && !LangOpts.CPlusPlus) {
- bool Overloadable = false;
- for (const AttributeList *Attrs = D.getAttributes();
- Attrs; Attrs = Attrs->getNext()) {
- if (Attrs->getKind() == AttributeList::AT_Overloadable) {
- Overloadable = true;
- break;
- }
- }
-
- if (!Overloadable)
+ if (!FTI.NumParams && FTI.isVariadic && !LangOpts.CPlusPlus)
+ if (!D.getAttributes().hasAttribute(ParsedAttr::AT_Overloadable))
S.Diag(FTI.getEllipsisLoc(), diag::err_ellipsis_first_param);
- }
if (FTI.NumParams && FTI.Params[0].Param == nullptr) {
// C99 6.7.5.3p3: Reject int(x,y,z) when it's not a function
@@ -4652,7 +4714,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
DynamicExceptions.push_back(FTI.Exceptions[I].Ty);
DynamicExceptionRanges.push_back(FTI.Exceptions[I].Range);
}
- } else if (FTI.getExceptionSpecType() == EST_ComputedNoexcept) {
+ } else if (isComputedNoexcept(FTI.getExceptionSpecType())) {
NoexceptExpr = FTI.NoexceptExpr;
}
@@ -4675,7 +4737,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// Handle pointer nullability.
inferPointerNullability(SimplePointerKind::MemberPointer, DeclType.Loc,
- DeclType.EndLoc, DeclType.getAttrListRef());
+ DeclType.EndLoc, DeclType.getAttrs());
if (SS.isInvalid()) {
// Avoid emitting extra errors if we already errored on the scope.
@@ -4731,7 +4793,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
case DeclaratorChunk::Pipe: {
T = S.BuildReadPipeType(T, DeclType.Loc);
processTypeAttrs(state, T, TAL_DeclSpec,
- D.getDeclSpec().getAttributes().getList());
+ D.getMutableDeclSpec().getAttributes());
break;
}
}
@@ -4742,8 +4804,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
}
// See if there are any attributes on this declarator chunk.
- processTypeAttrs(state, T, TAL_DeclChunk,
- const_cast<AttributeList *>(DeclType.getAttrs()));
+ processTypeAttrs(state, T, TAL_DeclChunk, DeclType.getAttrs());
}
// GNU warning -Wstrict-prototypes
@@ -4763,7 +4824,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
break;
case DeclaratorChunk::Function: {
const DeclaratorChunk::FunctionTypeInfo &FTI = DeclType.Fun;
- if (FTI.NumParams == 0)
+ if (FTI.NumParams == 0 && !FTI.isVariadic)
S.Diag(DeclType.Loc, diag::warn_strict_prototypes)
<< IsBlock
<< FixItHint::CreateInsertion(FTI.getRParenLoc(), "void");
@@ -4791,11 +4852,11 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// Core issue 547 also allows cv-qualifiers on function types that are
// top-level template type arguments.
enum { NonMember, Member, DeductionGuide } Kind = NonMember;
- if (D.getName().getKind() == UnqualifiedId::IK_DeductionGuideName)
+ if (D.getName().getKind() == UnqualifiedIdKind::IK_DeductionGuideName)
Kind = DeductionGuide;
else if (!D.getCXXScopeSpec().isSet()) {
- if ((D.getContext() == Declarator::MemberContext ||
- D.getContext() == Declarator::LambdaExprContext) &&
+ if ((D.getContext() == DeclaratorContext::MemberContext ||
+ D.getContext() == DeclaratorContext::LambdaExprContext) &&
!D.getDeclSpec().isFriendSpecified())
Kind = Member;
} else {
@@ -4824,7 +4885,8 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
!(Kind == Member &&
D.getDeclSpec().getStorageClassSpec() != DeclSpec::SCS_static) &&
!IsTypedefName &&
- D.getContext() != Declarator::TemplateTypeArgContext) {
+ D.getContext() != DeclaratorContext::TemplateArgContext &&
+ D.getContext() != DeclaratorContext::TemplateTypeArgContext) {
SourceLocation Loc = D.getLocStart();
SourceRange RemovalRange;
unsigned I;
@@ -4841,8 +4903,8 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
if (Chunk.Fun.TypeQuals & Qualifiers::Restrict)
RemovalLocs.push_back(Chunk.Fun.getRestrictQualifierLoc());
if (!RemovalLocs.empty()) {
- std::sort(RemovalLocs.begin(), RemovalLocs.end(),
- BeforeThanCompare<SourceLocation>(S.getSourceManager()));
+ llvm::sort(RemovalLocs.begin(), RemovalLocs.end(),
+ BeforeThanCompare<SourceLocation>(S.getSourceManager()));
RemovalRange = SourceRange(RemovalLocs.front(), RemovalLocs.back());
Loc = RemovalLocs.front();
}
@@ -4890,8 +4952,8 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// only be used in a parameter-declaration. Such a parameter-declaration
// is a parameter pack (14.5.3). [...]
switch (D.getContext()) {
- case Declarator::PrototypeContext:
- case Declarator::LambdaExprParameterContext:
+ case DeclaratorContext::PrototypeContext:
+ case DeclaratorContext::LambdaExprParameterContext:
// C++0x [dcl.fct]p13:
// [...] When it is part of a parameter-declaration-clause, the
// parameter pack is a function parameter pack (14.5.3). The type T
@@ -4910,7 +4972,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
T = Context.getPackExpansionType(T, None);
}
break;
- case Declarator::TemplateParamContext:
+ case DeclaratorContext::TemplateParamContext:
// C++0x [temp.param]p15:
// If a template-parameter is a [...] is a parameter-declaration that
// declares a parameter pack (8.3.5), then the template-parameter is a
@@ -4928,27 +4990,31 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
: diag::ext_variadic_templates);
break;
- case Declarator::FileContext:
- case Declarator::KNRTypeListContext:
- case Declarator::ObjCParameterContext: // FIXME: special diagnostic here?
- case Declarator::ObjCResultContext: // FIXME: special diagnostic here?
- case Declarator::TypeNameContext:
- case Declarator::FunctionalCastContext:
- case Declarator::CXXNewContext:
- case Declarator::AliasDeclContext:
- case Declarator::AliasTemplateContext:
- case Declarator::MemberContext:
- case Declarator::BlockContext:
- case Declarator::ForContext:
- case Declarator::InitStmtContext:
- case Declarator::ConditionContext:
- case Declarator::CXXCatchContext:
- case Declarator::ObjCCatchContext:
- case Declarator::BlockLiteralContext:
- case Declarator::LambdaExprContext:
- case Declarator::ConversionIdContext:
- case Declarator::TrailingReturnContext:
- case Declarator::TemplateTypeArgContext:
+ case DeclaratorContext::FileContext:
+ case DeclaratorContext::KNRTypeListContext:
+ case DeclaratorContext::ObjCParameterContext: // FIXME: special diagnostic
+ // here?
+ case DeclaratorContext::ObjCResultContext: // FIXME: special diagnostic
+ // here?
+ case DeclaratorContext::TypeNameContext:
+ case DeclaratorContext::FunctionalCastContext:
+ case DeclaratorContext::CXXNewContext:
+ case DeclaratorContext::AliasDeclContext:
+ case DeclaratorContext::AliasTemplateContext:
+ case DeclaratorContext::MemberContext:
+ case DeclaratorContext::BlockContext:
+ case DeclaratorContext::ForContext:
+ case DeclaratorContext::InitStmtContext:
+ case DeclaratorContext::ConditionContext:
+ case DeclaratorContext::CXXCatchContext:
+ case DeclaratorContext::ObjCCatchContext:
+ case DeclaratorContext::BlockLiteralContext:
+ case DeclaratorContext::LambdaExprContext:
+ case DeclaratorContext::ConversionIdContext:
+ case DeclaratorContext::TrailingReturnContext:
+ case DeclaratorContext::TrailingReturnVarContext:
+ case DeclaratorContext::TemplateArgContext:
+ case DeclaratorContext::TemplateTypeArgContext:
// FIXME: We may want to allow parameter packs in block-literal contexts
// in the future.
S.Diag(D.getEllipsisLoc(),
@@ -5003,10 +5069,8 @@ static void transferARCOwnershipToDeclaratorChunk(TypeProcessingState &state,
// Look for an explicit lifetime attribute.
DeclaratorChunk &chunk = D.getTypeObject(chunkIndex);
- for (const AttributeList *attr = chunk.getAttrs(); attr;
- attr = attr->getNext())
- if (attr->getKind() == AttributeList::AT_ObjCOwnership)
- return;
+ if (chunk.getAttrs().hasAttribute(ParsedAttr::AT_ObjCOwnership))
+ return;
const char *attrStr = nullptr;
switch (ownership) {
@@ -5025,16 +5089,15 @@ static void transferARCOwnershipToDeclaratorChunk(TypeProcessingState &state,
// If there wasn't one, add one (with an invalid source location
// so that we don't make an AttributedType for it).
- AttributeList *attr = D.getAttributePool()
- .create(&S.Context.Idents.get("objc_ownership"), SourceLocation(),
- /*scope*/ nullptr, SourceLocation(),
- /*args*/ &Args, 1, AttributeList::AS_GNU);
- spliceAttrIntoList(*attr, chunk.getAttrListRef());
-
+ ParsedAttr *attr = D.getAttributePool().create(
+ &S.Context.Idents.get("objc_ownership"), SourceLocation(),
+ /*scope*/ nullptr, SourceLocation(),
+ /*args*/ &Args, 1, ParsedAttr::AS_GNU);
+ chunk.getAttrs().addAtStart(attr);
// TODO: mark whether we did this inference?
}
-/// \brief Used for transferring ownership in casts resulting in l-values.
+/// Used for transferring ownership in casts resulting in l-values.
static void transferARCOwnership(TypeProcessingState &state,
QualType &declSpecTy,
Qualifiers::ObjCLifetime ownership) {
@@ -5101,110 +5164,91 @@ TypeSourceInfo *Sema::GetTypeForDeclaratorCast(Declarator &D, QualType FromTy) {
return GetFullTypeForDeclarator(state, declSpecTy, ReturnTypeInfo);
}
-/// Map an AttributedType::Kind to an AttributeList::Kind.
-static AttributeList::Kind getAttrListKind(AttributedType::Kind kind) {
+/// Map an AttributedType::Kind to an ParsedAttr::Kind.
+static ParsedAttr::Kind getAttrListKind(AttributedType::Kind kind) {
switch (kind) {
case AttributedType::attr_address_space:
- return AttributeList::AT_AddressSpace;
+ return ParsedAttr::AT_AddressSpace;
case AttributedType::attr_regparm:
- return AttributeList::AT_Regparm;
+ return ParsedAttr::AT_Regparm;
case AttributedType::attr_vector_size:
- return AttributeList::AT_VectorSize;
+ return ParsedAttr::AT_VectorSize;
case AttributedType::attr_neon_vector_type:
- return AttributeList::AT_NeonVectorType;
+ return ParsedAttr::AT_NeonVectorType;
case AttributedType::attr_neon_polyvector_type:
- return AttributeList::AT_NeonPolyVectorType;
+ return ParsedAttr::AT_NeonPolyVectorType;
case AttributedType::attr_objc_gc:
- return AttributeList::AT_ObjCGC;
+ return ParsedAttr::AT_ObjCGC;
case AttributedType::attr_objc_ownership:
case AttributedType::attr_objc_inert_unsafe_unretained:
- return AttributeList::AT_ObjCOwnership;
+ return ParsedAttr::AT_ObjCOwnership;
case AttributedType::attr_noreturn:
- return AttributeList::AT_NoReturn;
+ return ParsedAttr::AT_NoReturn;
+ case AttributedType::attr_nocf_check:
+ return ParsedAttr::AT_AnyX86NoCfCheck;
case AttributedType::attr_cdecl:
- return AttributeList::AT_CDecl;
+ return ParsedAttr::AT_CDecl;
case AttributedType::attr_fastcall:
- return AttributeList::AT_FastCall;
+ return ParsedAttr::AT_FastCall;
case AttributedType::attr_stdcall:
- return AttributeList::AT_StdCall;
+ return ParsedAttr::AT_StdCall;
case AttributedType::attr_thiscall:
- return AttributeList::AT_ThisCall;
+ return ParsedAttr::AT_ThisCall;
case AttributedType::attr_regcall:
- return AttributeList::AT_RegCall;
+ return ParsedAttr::AT_RegCall;
case AttributedType::attr_pascal:
- return AttributeList::AT_Pascal;
+ return ParsedAttr::AT_Pascal;
case AttributedType::attr_swiftcall:
- return AttributeList::AT_SwiftCall;
+ return ParsedAttr::AT_SwiftCall;
case AttributedType::attr_vectorcall:
- return AttributeList::AT_VectorCall;
+ return ParsedAttr::AT_VectorCall;
case AttributedType::attr_pcs:
case AttributedType::attr_pcs_vfp:
- return AttributeList::AT_Pcs;
+ return ParsedAttr::AT_Pcs;
case AttributedType::attr_inteloclbicc:
- return AttributeList::AT_IntelOclBicc;
+ return ParsedAttr::AT_IntelOclBicc;
case AttributedType::attr_ms_abi:
- return AttributeList::AT_MSABI;
+ return ParsedAttr::AT_MSABI;
case AttributedType::attr_sysv_abi:
- return AttributeList::AT_SysVABI;
+ return ParsedAttr::AT_SysVABI;
case AttributedType::attr_preserve_most:
- return AttributeList::AT_PreserveMost;
+ return ParsedAttr::AT_PreserveMost;
case AttributedType::attr_preserve_all:
- return AttributeList::AT_PreserveAll;
+ return ParsedAttr::AT_PreserveAll;
case AttributedType::attr_ptr32:
- return AttributeList::AT_Ptr32;
+ return ParsedAttr::AT_Ptr32;
case AttributedType::attr_ptr64:
- return AttributeList::AT_Ptr64;
+ return ParsedAttr::AT_Ptr64;
case AttributedType::attr_sptr:
- return AttributeList::AT_SPtr;
+ return ParsedAttr::AT_SPtr;
case AttributedType::attr_uptr:
- return AttributeList::AT_UPtr;
+ return ParsedAttr::AT_UPtr;
case AttributedType::attr_nonnull:
- return AttributeList::AT_TypeNonNull;
+ return ParsedAttr::AT_TypeNonNull;
case AttributedType::attr_nullable:
- return AttributeList::AT_TypeNullable;
+ return ParsedAttr::AT_TypeNullable;
case AttributedType::attr_null_unspecified:
- return AttributeList::AT_TypeNullUnspecified;
+ return ParsedAttr::AT_TypeNullUnspecified;
case AttributedType::attr_objc_kindof:
- return AttributeList::AT_ObjCKindOf;
+ return ParsedAttr::AT_ObjCKindOf;
case AttributedType::attr_ns_returns_retained:
- return AttributeList::AT_NSReturnsRetained;
+ return ParsedAttr::AT_NSReturnsRetained;
}
llvm_unreachable("unexpected attribute kind!");
}
-static void fillAttributedTypeLoc(AttributedTypeLoc TL,
- const AttributeList *attrs,
- const AttributeList *DeclAttrs = nullptr) {
- // DeclAttrs and attrs cannot be both empty.
- assert((attrs || DeclAttrs) &&
- "no type attributes in the expected location!");
-
- AttributeList::Kind parsedKind = getAttrListKind(TL.getAttrKind());
- // Try to search for an attribute of matching kind in attrs list.
- while (attrs && attrs->getKind() != parsedKind)
- attrs = attrs->getNext();
- if (!attrs) {
- // No matching type attribute in attrs list found.
- // Try searching through C++11 attributes in the declarator attribute list.
- while (DeclAttrs && (!DeclAttrs->isCXX11Attribute() ||
- DeclAttrs->getKind() != parsedKind))
- DeclAttrs = DeclAttrs->getNext();
- attrs = DeclAttrs;
- }
-
- assert(attrs && "no matching type attribute in expected location!");
-
- TL.setAttrNameLoc(attrs->getLoc());
+static void setAttributedTypeLoc(AttributedTypeLoc TL, const ParsedAttr &attr) {
+ TL.setAttrNameLoc(attr.getLoc());
if (TL.hasAttrExprOperand()) {
- assert(attrs->isArgExpr(0) && "mismatched attribute operand kind");
- TL.setAttrExprOperand(attrs->getArgAsExpr(0));
+ assert(attr.isArgExpr(0) && "mismatched attribute operand kind");
+ TL.setAttrExprOperand(attr.getArgAsExpr(0));
} else if (TL.hasAttrEnumOperand()) {
- assert((attrs->isArgIdent(0) || attrs->isArgExpr(0)) &&
+ assert((attr.isArgIdent(0) || attr.isArgExpr(0)) &&
"unexpected attribute operand kind");
- if (attrs->isArgIdent(0))
- TL.setAttrEnumOperandLoc(attrs->getArgAsIdent(0)->Loc);
+ if (attr.isArgIdent(0))
+ TL.setAttrEnumOperandLoc(attr.getArgAsIdent(0)->Loc);
else
- TL.setAttrEnumOperandLoc(attrs->getArgAsExpr(0)->getExprLoc());
+ TL.setAttrEnumOperandLoc(attr.getArgAsExpr(0)->getExprLoc());
}
// FIXME: preserve this information to here.
@@ -5212,6 +5256,25 @@ static void fillAttributedTypeLoc(AttributedTypeLoc TL,
TL.setAttrOperandParensRange(SourceRange());
}
+static void fillAttributedTypeLoc(AttributedTypeLoc TL,
+ const ParsedAttributesView &Attrs,
+ const ParsedAttributesView &DeclAttrs) {
+ // DeclAttrs and Attrs cannot be both empty.
+ assert((!Attrs.empty() || !DeclAttrs.empty()) &&
+ "no type attributes in the expected location!");
+
+ ParsedAttr::Kind parsedKind = getAttrListKind(TL.getAttrKind());
+ // Try to search for an attribute of matching kind in Attrs list.
+ for (const ParsedAttr &AL : Attrs)
+ if (AL.getKind() == parsedKind)
+ return setAttributedTypeLoc(TL, AL);
+
+ for (const ParsedAttr &AL : DeclAttrs)
+ if (AL.isCXX11Attribute() || AL.getKind() == parsedKind)
+ return setAttributedTypeLoc(TL, AL);
+ llvm_unreachable("no matching type attribute in expected location!");
+}
+
namespace {
class TypeSpecLocFiller : public TypeLocVisitor<TypeSpecLocFiller> {
ASTContext &Context;
@@ -5222,7 +5285,7 @@ namespace {
: Context(Context), DS(DS) {}
void VisitAttributedTypeLoc(AttributedTypeLoc TL) {
- fillAttributedTypeLoc(TL, DS.getAttributes().getList());
+ fillAttributedTypeLoc(TL, DS.getAttributes(), ParsedAttributesView{});
Visit(TL.getModifiedLoc());
}
void VisitQualifiedTypeLoc(QualifiedTypeLoc TL) {
@@ -5394,7 +5457,7 @@ namespace {
}
void VisitAttributedTypeLoc(AttributedTypeLoc TL) {
- fillAttributedTypeLoc(TL, Chunk.getAttrs());
+ fillAttributedTypeLoc(TL, Chunk.getAttrs(), ParsedAttributesView{});
}
void VisitAdjustedTypeLoc(AdjustedTypeLoc TL) {
// nothing
@@ -5527,19 +5590,23 @@ static void fillAtomicQualLoc(AtomicTypeLoc ATL, const DeclaratorChunk &Chunk) {
ATL.setParensRange(SourceRange());
}
-static void fillDependentAddressSpaceTypeLoc(DependentAddressSpaceTypeLoc DASTL,
- const AttributeList *Attrs) {
- while (Attrs && Attrs->getKind() != AttributeList::AT_AddressSpace)
- Attrs = Attrs->getNext();
+static void
+fillDependentAddressSpaceTypeLoc(DependentAddressSpaceTypeLoc DASTL,
+ const ParsedAttributesView &Attrs) {
+ for (const ParsedAttr &AL : Attrs) {
+ if (AL.getKind() == ParsedAttr::AT_AddressSpace) {
+ DASTL.setAttrNameLoc(AL.getLoc());
+ DASTL.setAttrExprOperand(AL.getArgAsExpr(0));
+ DASTL.setAttrOperandParensRange(SourceRange());
+ return;
+ }
+ }
- assert(Attrs && "no address_space attribute found at the expected location!");
-
- DASTL.setAttrNameLoc(Attrs->getLoc());
- DASTL.setAttrExprOperand(Attrs->getArgAsExpr(0));
- DASTL.setAttrOperandParensRange(SourceRange());
+ llvm_unreachable(
+ "no address_space attribute found at the expected location!");
}
-/// \brief Create and instantiate a TypeSourceInfo with type source information.
+/// Create and instantiate a TypeSourceInfo with type source information.
///
/// \param T QualType referring to the type as written in source code.
///
@@ -5552,7 +5619,6 @@ Sema::GetTypeSourceInfoForDeclarator(Declarator &D, QualType T,
TypeSourceInfo *ReturnTypeInfo) {
TypeSourceInfo *TInfo = Context.CreateTypeSourceInfo(T);
UnqualTypeLoc CurrTL = TInfo->getTypeLoc().getUnqualifiedLoc();
- const AttributeList *DeclAttrs = D.getAttributes();
// Handle parameter packs whose type is a pack expansion.
if (isa<PackExpansionType>(T)) {
@@ -5576,7 +5642,8 @@ Sema::GetTypeSourceInfoForDeclarator(Declarator &D, QualType T,
}
while (AttributedTypeLoc TL = CurrTL.getAs<AttributedTypeLoc>()) {
- fillAttributedTypeLoc(TL, D.getTypeObject(i).getAttrs(), DeclAttrs);
+ fillAttributedTypeLoc(TL, D.getTypeObject(i).getAttrs(),
+ D.getAttributes());
CurrTL = TL.getNextTypeLoc().getUnqualifiedLoc();
}
@@ -5601,7 +5668,7 @@ Sema::GetTypeSourceInfoForDeclarator(Declarator &D, QualType T,
return TInfo;
}
-/// \brief Create a LocInfoType to hold the given QualType and TypeSourceInfo.
+/// Create a LocInfoType to hold the given QualType and TypeSourceInfo.
ParsedType Sema::CreateParsedType(QualType T, TypeSourceInfo *TInfo) {
// FIXME: LocInfoTypes are "transient", only needed for passing to/from Parser
// and Sema during declaration parsing. Try deallocating/caching them when
@@ -5637,9 +5704,9 @@ TypeResult Sema::ActOnTypeName(Scope *S, Declarator &D) {
// to apply them to the actual parameter declaration.
// Likewise, we don't want to do this for alias declarations, because
// we are actually going to build a declaration from this eventually.
- if (D.getContext() != Declarator::ObjCParameterContext &&
- D.getContext() != Declarator::AliasDeclContext &&
- D.getContext() != Declarator::AliasTemplateContext)
+ if (D.getContext() != DeclaratorContext::ObjCParameterContext &&
+ D.getContext() != DeclaratorContext::AliasDeclContext &&
+ D.getContext() != DeclaratorContext::AliasTemplateContext)
checkUnusedDeclAttributes(D);
if (getLangOpts().CPlusPlus) {
@@ -5668,14 +5735,6 @@ QualType Sema::BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace,
SourceLocation AttrLoc) {
if (!AddrSpace->isValueDependent()) {
- // If this type is already address space qualified, reject it.
- // ISO/IEC TR 18037 S5.3 (amending C99 6.7.3): "No type shall be qualified
- // by qualifiers for two or more different address spaces."
- if (T.getAddressSpace() != LangAS::Default) {
- Diag(AttrLoc, diag::err_attribute_address_multiple_qualifiers);
- return QualType();
- }
-
llvm::APSInt addrSpace(32);
if (!AddrSpace->isIntegerConstantExpr(addrSpace, Context)) {
Diag(AttrLoc, diag::err_attribute_argument_type)
@@ -5706,6 +5765,20 @@ QualType Sema::BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace,
LangAS ASIdx =
getLangASFromTargetAS(static_cast<unsigned>(addrSpace.getZExtValue()));
+ // If this type is already address space qualified with a different
+ // address space, reject it.
+ // ISO/IEC TR 18037 S5.3 (amending C99 6.7.3): "No type shall be qualified
+ // by qualifiers for two or more different address spaces."
+ if (T.getAddressSpace() != LangAS::Default) {
+ if (T.getAddressSpace() != ASIdx) {
+ Diag(AttrLoc, diag::err_attribute_address_multiple_qualifiers);
+ return QualType();
+ } else
+ // Emit a warning if they are identical; it's likely unintended.
+ Diag(AttrLoc,
+ diag::warn_attribute_address_multiple_identical_qualifiers);
+ }
+
return Context.getAddrSpaceQualType(T, ASIdx);
}
@@ -5726,16 +5799,7 @@ QualType Sema::BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace,
/// specified type. The attribute contains 1 argument, the id of the address
/// space for the type.
static void HandleAddressSpaceTypeAttribute(QualType &Type,
- const AttributeList &Attr, Sema &S){
- // If this type is already address space qualified, reject it.
- // ISO/IEC TR 18037 S5.3 (amending C99 6.7.3): "No type shall be qualified by
- // qualifiers for two or more different address spaces."
- if (Type.getAddressSpace() != LangAS::Default) {
- S.Diag(Attr.getLoc(), diag::err_attribute_address_multiple_qualifiers);
- Attr.setInvalid();
- return;
- }
-
+ const ParsedAttr &Attr, Sema &S) {
// ISO/IEC TR 18037 S5.3 (amending C99 6.7.3): "A function type shall not be
// qualified by an address-space qualifier."
if (Type->isFunctionType()) {
@@ -5745,7 +5809,7 @@ static void HandleAddressSpaceTypeAttribute(QualType &Type,
}
LangAS ASIdx;
- if (Attr.getKind() == AttributeList::AT_AddressSpace) {
+ if (Attr.getKind() == ParsedAttr::AT_AddressSpace) {
// Check the attribute arguments.
if (Attr.getNumArgs() != 1) {
@@ -5784,20 +5848,35 @@ static void HandleAddressSpaceTypeAttribute(QualType &Type,
} else {
// The keyword-based type attributes imply which address space to use.
switch (Attr.getKind()) {
- case AttributeList::AT_OpenCLGlobalAddressSpace:
+ case ParsedAttr::AT_OpenCLGlobalAddressSpace:
ASIdx = LangAS::opencl_global; break;
- case AttributeList::AT_OpenCLLocalAddressSpace:
+ case ParsedAttr::AT_OpenCLLocalAddressSpace:
ASIdx = LangAS::opencl_local; break;
- case AttributeList::AT_OpenCLConstantAddressSpace:
+ case ParsedAttr::AT_OpenCLConstantAddressSpace:
ASIdx = LangAS::opencl_constant; break;
- case AttributeList::AT_OpenCLGenericAddressSpace:
+ case ParsedAttr::AT_OpenCLGenericAddressSpace:
ASIdx = LangAS::opencl_generic; break;
- case AttributeList::AT_OpenCLPrivateAddressSpace:
+ case ParsedAttr::AT_OpenCLPrivateAddressSpace:
ASIdx = LangAS::opencl_private; break;
default:
llvm_unreachable("Invalid address space");
}
+ // If this type is already address space qualified with a different
+ // address space, reject it.
+ // ISO/IEC TR 18037 S5.3 (amending C99 6.7.3): "No type shall be qualified by
+ // qualifiers for two or more different address spaces."
+ if (Type.getAddressSpace() != LangAS::Default) {
+ if (Type.getAddressSpace() != ASIdx) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_address_multiple_qualifiers);
+ Attr.setInvalid();
+ return;
+ } else
+ // Emit a warning if they are identical; it's likely unintended.
+ S.Diag(Attr.getLoc(),
+ diag::warn_attribute_address_multiple_identical_qualifiers);
+ }
+
Type = S.Context.getAddrSpaceQualType(Type, ASIdx);
}
}
@@ -5837,8 +5916,7 @@ static bool hasDirectOwnershipQualifier(QualType type) {
///
/// Returns 'true' if the attribute was handled.
static bool handleObjCOwnershipTypeAttr(TypeProcessingState &state,
- AttributeList &attr,
- QualType &type) {
+ ParsedAttr &attr, QualType &type) {
bool NonObjCPointer = false;
if (!type->isDependentType() && !type->isUndeducedType()) {
@@ -5867,7 +5945,8 @@ static bool handleObjCOwnershipTypeAttr(TypeProcessingState &state,
Sema &S = state.getSema();
SourceLocation AttrLoc = attr.getLoc();
if (AttrLoc.isMacroID())
- AttrLoc = S.getSourceManager().getImmediateExpansionRange(AttrLoc).first;
+ AttrLoc =
+ S.getSourceManager().getImmediateExpansionRange(AttrLoc).getBegin();
if (!attr.isArgIdent(0)) {
S.Diag(AttrLoc, diag::err_attribute_argument_type)
@@ -6022,8 +6101,7 @@ static bool handleObjCOwnershipTypeAttr(TypeProcessingState &state,
/// attribute on the specified type. Returns true to indicate that
/// the attribute was handled, false to indicate that the type does
/// not permit the attribute.
-static bool handleObjCGCTypeAttr(TypeProcessingState &state,
- AttributeList &attr,
+static bool handleObjCGCTypeAttr(TypeProcessingState &state, ParsedAttr &attr,
QualType &type) {
Sema &S = state.getSema();
@@ -6215,11 +6293,10 @@ namespace {
} // end anonymous namespace
static bool handleMSPointerTypeQualifierAttr(TypeProcessingState &State,
- AttributeList &Attr,
- QualType &Type) {
+ ParsedAttr &Attr, QualType &Type) {
Sema &S = State.getSema();
- AttributeList::Kind Kind = Attr.getKind();
+ ParsedAttr::Kind Kind = Attr.getKind();
QualType Desugared = Type;
const AttributedType *AT = dyn_cast<AttributedType>(Type);
while (AT) {
@@ -6236,16 +6313,16 @@ static bool handleMSPointerTypeQualifierAttr(TypeProcessingState &State,
// You cannot have both __sptr and __uptr on the same type, nor can you
// have __ptr32 and __ptr64.
if ((CurAttrKind == AttributedType::attr_ptr32 &&
- Kind == AttributeList::AT_Ptr64) ||
+ Kind == ParsedAttr::AT_Ptr64) ||
(CurAttrKind == AttributedType::attr_ptr64 &&
- Kind == AttributeList::AT_Ptr32)) {
+ Kind == ParsedAttr::AT_Ptr32)) {
S.Diag(Attr.getLoc(), diag::err_attributes_are_not_compatible)
<< "'__ptr32'" << "'__ptr64'";
return true;
} else if ((CurAttrKind == AttributedType::attr_sptr &&
- Kind == AttributeList::AT_UPtr) ||
+ Kind == ParsedAttr::AT_UPtr) ||
(CurAttrKind == AttributedType::attr_uptr &&
- Kind == AttributeList::AT_SPtr)) {
+ Kind == ParsedAttr::AT_SPtr)) {
S.Diag(Attr.getLoc(), diag::err_attributes_are_not_compatible)
<< "'__sptr'" << "'__uptr'";
return true;
@@ -6270,10 +6347,18 @@ static bool handleMSPointerTypeQualifierAttr(TypeProcessingState &State,
AttributedType::Kind TAK;
switch (Kind) {
default: llvm_unreachable("Unknown attribute kind");
- case AttributeList::AT_Ptr32: TAK = AttributedType::attr_ptr32; break;
- case AttributeList::AT_Ptr64: TAK = AttributedType::attr_ptr64; break;
- case AttributeList::AT_SPtr: TAK = AttributedType::attr_sptr; break;
- case AttributeList::AT_UPtr: TAK = AttributedType::attr_uptr; break;
+ case ParsedAttr::AT_Ptr32:
+ TAK = AttributedType::attr_ptr32;
+ break;
+ case ParsedAttr::AT_Ptr64:
+ TAK = AttributedType::attr_ptr64;
+ break;
+ case ParsedAttr::AT_SPtr:
+ TAK = AttributedType::attr_sptr;
+ break;
+ case ParsedAttr::AT_UPtr:
+ TAK = AttributedType::attr_uptr;
+ break;
}
Type = S.Context.getAttributedType(TAK, Type, Type);
@@ -6424,15 +6509,15 @@ bool Sema::checkObjCKindOfType(QualType &type, SourceLocation loc) {
}
/// Map a nullability attribute kind to a nullability kind.
-static NullabilityKind mapNullabilityAttrKind(AttributeList::Kind kind) {
+static NullabilityKind mapNullabilityAttrKind(ParsedAttr::Kind kind) {
switch (kind) {
- case AttributeList::AT_TypeNonNull:
+ case ParsedAttr::AT_TypeNonNull:
return NullabilityKind::NonNull;
- case AttributeList::AT_TypeNullable:
+ case ParsedAttr::AT_TypeNullable:
return NullabilityKind::Nullable;
- case AttributeList::AT_TypeNullUnspecified:
+ case ParsedAttr::AT_TypeNullUnspecified:
return NullabilityKind::Unspecified;
default:
@@ -6447,15 +6532,14 @@ static NullabilityKind mapNullabilityAttrKind(AttributeList::Kind kind) {
/// \returns true if the nullability annotation was distributed, false
/// otherwise.
static bool distributeNullabilityTypeAttr(TypeProcessingState &state,
- QualType type,
- AttributeList &attr) {
+ QualType type, ParsedAttr &attr) {
Declarator &declarator = state.getDeclarator();
/// Attempt to move the attribute to the specified chunk.
auto moveToChunk = [&](DeclaratorChunk &chunk, bool inFunction) -> bool {
// If there is already a nullability attribute there, don't add
// one.
- if (hasNullabilityAttr(chunk.getAttrListRef()))
+ if (hasNullabilityAttr(chunk.getAttrs()))
return false;
// Complain about the nullability qualifier being in the wrong
@@ -6488,8 +6572,8 @@ static bool distributeNullabilityTypeAttr(TypeProcessingState &state,
" " + attr.getName()->getName().str() + " ");
}
- moveAttrFromListToList(attr, state.getCurrentAttrListRef(),
- chunk.getAttrListRef());
+ moveAttrFromListToList(attr, state.getCurrentAttributes(),
+ chunk.getAttrs());
return true;
};
@@ -6528,28 +6612,28 @@ static bool distributeNullabilityTypeAttr(TypeProcessingState &state,
return false;
}
-static AttributedType::Kind getCCTypeAttrKind(AttributeList &Attr) {
+static AttributedType::Kind getCCTypeAttrKind(ParsedAttr &Attr) {
assert(!Attr.isInvalid());
switch (Attr.getKind()) {
default:
llvm_unreachable("not a calling convention attribute");
- case AttributeList::AT_CDecl:
+ case ParsedAttr::AT_CDecl:
return AttributedType::attr_cdecl;
- case AttributeList::AT_FastCall:
+ case ParsedAttr::AT_FastCall:
return AttributedType::attr_fastcall;
- case AttributeList::AT_StdCall:
+ case ParsedAttr::AT_StdCall:
return AttributedType::attr_stdcall;
- case AttributeList::AT_ThisCall:
+ case ParsedAttr::AT_ThisCall:
return AttributedType::attr_thiscall;
- case AttributeList::AT_RegCall:
+ case ParsedAttr::AT_RegCall:
return AttributedType::attr_regcall;
- case AttributeList::AT_Pascal:
+ case ParsedAttr::AT_Pascal:
return AttributedType::attr_pascal;
- case AttributeList::AT_SwiftCall:
+ case ParsedAttr::AT_SwiftCall:
return AttributedType::attr_swiftcall;
- case AttributeList::AT_VectorCall:
+ case ParsedAttr::AT_VectorCall:
return AttributedType::attr_vectorcall;
- case AttributeList::AT_Pcs: {
+ case ParsedAttr::AT_Pcs: {
// The attribute may have had a fixit applied where we treated an
// identifier as a string literal. The contents of the string are valid,
// but the form may not be.
@@ -6562,15 +6646,15 @@ static AttributedType::Kind getCCTypeAttrKind(AttributeList &Attr) {
.Case("aapcs", AttributedType::attr_pcs)
.Case("aapcs-vfp", AttributedType::attr_pcs_vfp);
}
- case AttributeList::AT_IntelOclBicc:
+ case ParsedAttr::AT_IntelOclBicc:
return AttributedType::attr_inteloclbicc;
- case AttributeList::AT_MSABI:
+ case ParsedAttr::AT_MSABI:
return AttributedType::attr_ms_abi;
- case AttributeList::AT_SysVABI:
+ case ParsedAttr::AT_SysVABI:
return AttributedType::attr_sysv_abi;
- case AttributeList::AT_PreserveMost:
+ case ParsedAttr::AT_PreserveMost:
return AttributedType::attr_preserve_most;
- case AttributeList::AT_PreserveAll:
+ case ParsedAttr::AT_PreserveAll:
return AttributedType::attr_preserve_all;
}
llvm_unreachable("unexpected attribute kind!");
@@ -6578,15 +6662,14 @@ static AttributedType::Kind getCCTypeAttrKind(AttributeList &Attr) {
/// Process an individual function attribute. Returns true to
/// indicate that the attribute was handled, false if it wasn't.
-static bool handleFunctionTypeAttr(TypeProcessingState &state,
- AttributeList &attr,
+static bool handleFunctionTypeAttr(TypeProcessingState &state, ParsedAttr &attr,
QualType &type) {
Sema &S = state.getSema();
FunctionTypeUnwrapper unwrapped(S, type);
- if (attr.getKind() == AttributeList::AT_NoReturn) {
- if (S.CheckNoReturnAttr(attr))
+ if (attr.getKind() == ParsedAttr::AT_NoReturn) {
+ if (S.CheckAttrNoArgs(attr))
return true;
// Delay if this is not a function type.
@@ -6601,7 +6684,7 @@ static bool handleFunctionTypeAttr(TypeProcessingState &state,
// ns_returns_retained is not always a type attribute, but if we got
// here, we're treating it as one right now.
- if (attr.getKind() == AttributeList::AT_NSReturnsRetained) {
+ if (attr.getKind() == ParsedAttr::AT_NSReturnsRetained) {
if (attr.getNumArgs()) return true;
// Delay if this is not a function type.
@@ -6625,8 +6708,8 @@ static bool handleFunctionTypeAttr(TypeProcessingState &state,
return true;
}
- if (attr.getKind() == AttributeList::AT_AnyX86NoCallerSavedRegisters) {
- if (S.CheckNoCallerSavedRegsAttr(attr))
+ if (attr.getKind() == ParsedAttr::AT_AnyX86NoCallerSavedRegisters) {
+ if (S.CheckAttrTarget(attr) || S.CheckAttrNoArgs(attr))
return true;
// Delay if this is not a function type.
@@ -6639,7 +6722,28 @@ static bool handleFunctionTypeAttr(TypeProcessingState &state,
return true;
}
- if (attr.getKind() == AttributeList::AT_Regparm) {
+ if (attr.getKind() == ParsedAttr::AT_AnyX86NoCfCheck) {
+ if (!S.getLangOpts().CFProtectionBranch) {
+ S.Diag(attr.getLoc(), diag::warn_nocf_check_attribute_ignored);
+ attr.setInvalid();
+ return true;
+ }
+
+ if (S.CheckAttrTarget(attr) || S.CheckAttrNoArgs(attr))
+ return true;
+
+ // If this is not a function type, warning will be asserted by subject
+ // check.
+ if (!unwrapped.isFunctionType())
+ return true;
+
+ FunctionType::ExtInfo EI =
+ unwrapped.get()->getExtInfo().withNoCfCheck(true);
+ type = unwrapped.wrap(S, S.Context.adjustFunctionType(unwrapped.get(), EI));
+ return true;
+ }
+
+ if (attr.getKind() == ParsedAttr::AT_Regparm) {
unsigned value;
if (S.CheckRegparmAttr(attr, value))
return true;
@@ -6796,7 +6900,7 @@ void Sema::adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor,
/// The raw attribute should contain precisely 1 argument, the vector size for
/// the variable, measured in bytes. If curType and rawAttr are well formed,
/// this routine will return a new vector type.
-static void HandleVectorSizeAttr(QualType& CurType, const AttributeList &Attr,
+static void HandleVectorSizeAttr(QualType &CurType, const ParsedAttr &Attr,
Sema &S) {
// Check the attribute arguments.
if (Attr.getNumArgs() != 1) {
@@ -6805,58 +6909,35 @@ static void HandleVectorSizeAttr(QualType& CurType, const AttributeList &Attr,
Attr.setInvalid();
return;
}
- Expr *sizeExpr = static_cast<Expr *>(Attr.getArgAsExpr(0));
- llvm::APSInt vecSize(32);
- if (sizeExpr->isTypeDependent() || sizeExpr->isValueDependent() ||
- !sizeExpr->isIntegerConstantExpr(vecSize, S.Context)) {
- S.Diag(Attr.getLoc(), diag::err_attribute_argument_type)
- << Attr.getName() << AANT_ArgumentIntegerConstant
- << sizeExpr->getSourceRange();
- Attr.setInvalid();
- return;
- }
- // The base type must be integer (not Boolean or enumeration) or float, and
- // can't already be a vector.
- if (!CurType->isBuiltinType() || CurType->isBooleanType() ||
- (!CurType->isIntegerType() && !CurType->isRealFloatingType())) {
- S.Diag(Attr.getLoc(), diag::err_attribute_invalid_vector_type) << CurType;
- Attr.setInvalid();
- return;
- }
- unsigned typeSize = static_cast<unsigned>(S.Context.getTypeSize(CurType));
- // vecSize is specified in bytes - convert to bits.
- unsigned vectorSize = static_cast<unsigned>(vecSize.getZExtValue() * 8);
- // the vector size needs to be an integral multiple of the type size.
- if (vectorSize % typeSize) {
- S.Diag(Attr.getLoc(), diag::err_attribute_invalid_size)
- << sizeExpr->getSourceRange();
- Attr.setInvalid();
- return;
- }
- if (VectorType::isVectorSizeTooLarge(vectorSize / typeSize)) {
- S.Diag(Attr.getLoc(), diag::err_attribute_size_too_large)
- << sizeExpr->getSourceRange();
- Attr.setInvalid();
- return;
- }
- if (vectorSize == 0) {
- S.Diag(Attr.getLoc(), diag::err_attribute_zero_size)
- << sizeExpr->getSourceRange();
- Attr.setInvalid();
- return;
+ Expr *SizeExpr;
+ // Special case where the argument is a template id.
+ if (Attr.isArgIdent(0)) {
+ CXXScopeSpec SS;
+ SourceLocation TemplateKWLoc;
+ UnqualifiedId Id;
+ Id.setIdentifier(Attr.getArgAsIdent(0)->Ident, Attr.getLoc());
+
+ ExprResult Size = S.ActOnIdExpression(S.getCurScope(), SS, TemplateKWLoc,
+ Id, false, false);
+
+ if (Size.isInvalid())
+ return;
+ SizeExpr = Size.get();
+ } else {
+ SizeExpr = Attr.getArgAsExpr(0);
}
- // Success! Instantiate the vector type, the number of elements is > 0, and
- // not required to be a power of 2, unlike GCC.
- CurType = S.Context.getVectorType(CurType, vectorSize/typeSize,
- VectorType::GenericVector);
+ QualType T = S.BuildVectorType(CurType, SizeExpr, Attr.getLoc());
+ if (!T.isNull())
+ CurType = T;
+ else
+ Attr.setInvalid();
}
-/// \brief Process the OpenCL-like ext_vector_type attribute when it occurs on
+/// Process the OpenCL-like ext_vector_type attribute when it occurs on
/// a type.
-static void HandleExtVectorTypeAttr(QualType &CurType,
- const AttributeList &Attr,
+static void HandleExtVectorTypeAttr(QualType &CurType, const ParsedAttr &Attr,
Sema &S) {
// check the attribute arguments.
if (Attr.getNumArgs() != 1) {
@@ -6945,9 +7026,8 @@ static bool isPermittedNeonBaseType(QualType &Ty,
/// the argument to these Neon attributes is the number of vector elements,
/// not the vector size in bytes. The vector width and element type must
/// match one of the standard Neon vector types.
-static void HandleNeonVectorTypeAttr(QualType& CurType,
- const AttributeList &Attr, Sema &S,
- VectorType::VectorKind VecKind) {
+static void HandleNeonVectorTypeAttr(QualType &CurType, const ParsedAttr &Attr,
+ Sema &S, VectorType::VectorKind VecKind) {
// Target must have NEON
if (!S.Context.getTargetInfo().hasFeature("neon")) {
S.Diag(Attr.getLoc(), diag::err_attribute_unsupported) << Attr.getName();
@@ -6993,7 +7073,7 @@ static void HandleNeonVectorTypeAttr(QualType& CurType,
}
/// Handle OpenCL Access Qualifier Attribute.
-static void HandleOpenCLAccessAttr(QualType &CurType, const AttributeList &Attr,
+static void HandleOpenCLAccessAttr(QualType &CurType, const ParsedAttr &Attr,
Sema &S) {
// OpenCL v2.0 s6.6 - Access qualifier can be used only for image and pipe type.
if (!(CurType->isImageType() || CurType->isPipeType())) {
@@ -7033,12 +7113,12 @@ static void deduceOpenCLImplicitAddrSpace(TypeProcessingState &State,
// Handle the cases where address space should not be deduced.
//
- // The pointee type of a pointer type is alwasy deduced since a pointer always
+ // The pointee type of a pointer type is always deduced since a pointer always
// points to some memory location which should has an address space.
//
// There are situations that at the point of certain declarations, the address
// space may be unknown and better to be left as default. For example, when
- // definining a typedef or struct type, they are not associated with any
+ // defining a typedef or struct type, they are not associated with any
// specific address space. Later on, they may be used with any address space
// to declare a variable.
//
@@ -7066,7 +7146,7 @@ static void deduceOpenCLImplicitAddrSpace(TypeProcessingState &State,
IsFuncReturnType || IsFuncType ||
// Do not deduce addr space for member types of struct, except the pointee
// type of a pointer member type.
- (D.getContext() == Declarator::MemberContext && !IsPointee) ||
+ (D.getContext() == DeclaratorContext::MemberContext && !IsPointee) ||
// Do not deduce addr space for types used to define a typedef and the
// typedef itself, except the pointee type of a pointer type which is used
// to define the typedef.
@@ -7083,8 +7163,9 @@ static void deduceOpenCLImplicitAddrSpace(TypeProcessingState &State,
// The default address space name for arguments to a function in a
// program, or local variables of a function is __private. All function
// arguments shall be in the __private address space.
- if (State.getSema().getLangOpts().OpenCLVersion <= 120) {
- ImpAddr = LangAS::opencl_private;
+ if (State.getSema().getLangOpts().OpenCLVersion <= 120 &&
+ !State.getSema().getLangOpts().OpenCLCPlusPlus) {
+ ImpAddr = LangAS::opencl_private;
} else {
// If address space is not set, OpenCL 2.0 defines non private default
// address spaces for some cases:
@@ -7098,7 +7179,7 @@ static void deduceOpenCLImplicitAddrSpace(TypeProcessingState &State,
if (IsPointee) {
ImpAddr = LangAS::opencl_generic;
} else {
- if (D.getContext() == Declarator::FileContext) {
+ if (D.getContext() == DeclaratorContext::FileContext) {
ImpAddr = LangAS::opencl_global;
} else {
if (D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_static ||
@@ -7114,16 +7195,18 @@ static void deduceOpenCLImplicitAddrSpace(TypeProcessingState &State,
}
static void processTypeAttrs(TypeProcessingState &state, QualType &type,
- TypeAttrLocation TAL, AttributeList *attrs) {
+ TypeAttrLocation TAL,
+ ParsedAttributesView &attrs) {
// Scan through and apply attributes to this type where it makes sense. Some
// attributes (such as __address_space__, __vector_size__, etc) apply to the
// type, but others can be present in the type specifiers even though they
// apply to the decl. Here we apply type attributes and ignore the rest.
- while (attrs) {
- AttributeList &attr = *attrs;
- attrs = attr.getNext(); // reset to the next here due to early loop continue
- // stmts
+ // This loop modifies the list pretty frequently, but we still need to make
+ // sure we visit every element once. Copy the attributes list, and iterate
+ // over that.
+ ParsedAttributesView AttrsCopy{attrs};
+ for (ParsedAttr &attr : AttrsCopy) {
// Skip attributes that were marked to be invalid.
if (attr.isInvalid())
@@ -7131,14 +7214,19 @@ static void processTypeAttrs(TypeProcessingState &state, QualType &type,
if (attr.isCXX11Attribute()) {
// [[gnu::...]] attributes are treated as declaration attributes, so may
- // not appertain to a DeclaratorChunk, even if we handle them as type
- // attributes.
+ // not appertain to a DeclaratorChunk. If we handle them as type
+ // attributes, accept them in that position and diagnose the GCC
+ // incompatibility.
if (attr.getScopeName() && attr.getScopeName()->isStr("gnu")) {
+ bool IsTypeAttr = attr.isTypeAttr();
if (TAL == TAL_DeclChunk) {
state.getSema().Diag(attr.getLoc(),
- diag::warn_cxx11_gnu_attribute_on_type)
+ IsTypeAttr
+ ? diag::warn_gcc_ignores_type_attr
+ : diag::warn_cxx11_gnu_attribute_on_type)
<< attr.getName();
- continue;
+ if (!IsTypeAttr)
+ continue;
}
} else if (TAL != TAL_DeclChunk) {
// Otherwise, only consider type processing for a C++11 attribute if
@@ -7159,27 +7247,27 @@ static void processTypeAttrs(TypeProcessingState &state, QualType &type,
}
break;
- case AttributeList::UnknownAttribute:
+ case ParsedAttr::UnknownAttribute:
if (attr.isCXX11Attribute() && TAL == TAL_DeclChunk)
state.getSema().Diag(attr.getLoc(),
diag::warn_unknown_attribute_ignored)
<< attr.getName();
break;
- case AttributeList::IgnoredAttribute:
+ case ParsedAttr::IgnoredAttribute:
break;
- case AttributeList::AT_MayAlias:
+ case ParsedAttr::AT_MayAlias:
// FIXME: This attribute needs to actually be handled, but if we ignore
// it it breaks large amounts of Linux software.
attr.setUsedAsTypeAttr();
break;
- case AttributeList::AT_OpenCLPrivateAddressSpace:
- case AttributeList::AT_OpenCLGlobalAddressSpace:
- case AttributeList::AT_OpenCLLocalAddressSpace:
- case AttributeList::AT_OpenCLConstantAddressSpace:
- case AttributeList::AT_OpenCLGenericAddressSpace:
- case AttributeList::AT_AddressSpace:
+ case ParsedAttr::AT_OpenCLPrivateAddressSpace:
+ case ParsedAttr::AT_OpenCLGlobalAddressSpace:
+ case ParsedAttr::AT_OpenCLLocalAddressSpace:
+ case ParsedAttr::AT_OpenCLConstantAddressSpace:
+ case ParsedAttr::AT_OpenCLGenericAddressSpace:
+ case ParsedAttr::AT_AddressSpace:
HandleAddressSpaceTypeAttribute(type, attr, state.getSema());
attr.setUsedAsTypeAttr();
break;
@@ -7188,25 +7276,25 @@ static void processTypeAttrs(TypeProcessingState &state, QualType &type,
distributeObjCPointerTypeAttr(state, attr, type);
attr.setUsedAsTypeAttr();
break;
- case AttributeList::AT_VectorSize:
+ case ParsedAttr::AT_VectorSize:
HandleVectorSizeAttr(type, attr, state.getSema());
attr.setUsedAsTypeAttr();
break;
- case AttributeList::AT_ExtVectorType:
+ case ParsedAttr::AT_ExtVectorType:
HandleExtVectorTypeAttr(type, attr, state.getSema());
attr.setUsedAsTypeAttr();
break;
- case AttributeList::AT_NeonVectorType:
+ case ParsedAttr::AT_NeonVectorType:
HandleNeonVectorTypeAttr(type, attr, state.getSema(),
VectorType::NeonVector);
attr.setUsedAsTypeAttr();
break;
- case AttributeList::AT_NeonPolyVectorType:
+ case ParsedAttr::AT_NeonPolyVectorType:
HandleNeonVectorTypeAttr(type, attr, state.getSema(),
VectorType::NeonPolyVector);
attr.setUsedAsTypeAttr();
break;
- case AttributeList::AT_OpenCLAccess:
+ case ParsedAttr::AT_OpenCLAccess:
HandleOpenCLAccessAttr(type, attr, state.getSema());
attr.setUsedAsTypeAttr();
break;
@@ -7245,7 +7333,7 @@ static void processTypeAttrs(TypeProcessingState &state, QualType &type,
}
break;
- case AttributeList::AT_ObjCKindOf:
+ case ParsedAttr::AT_ObjCKindOf:
// '__kindof' must be part of the decl-specifiers.
switch (TAL) {
case TAL_DeclSpec:
@@ -7331,7 +7419,7 @@ void Sema::completeExprArrayBound(Expr *E) {
}
}
-/// \brief Ensure that the type of the given expression is complete.
+/// Ensure that the type of the given expression is complete.
///
/// This routine checks whether the expression \p E has a complete type. If the
/// expression refers to an instantiable construct, that instantiation is
@@ -7368,7 +7456,7 @@ bool Sema::RequireCompleteExprType(Expr *E, unsigned DiagID) {
return RequireCompleteExprType(E, Diagnoser);
}
-/// @brief Ensure that the type T is a complete type.
+/// Ensure that the type T is a complete type.
///
/// This routine checks whether the type @p T is complete in any
/// context where a complete type is required. If @p T is a complete
@@ -7407,12 +7495,13 @@ bool Sema::hasStructuralCompatLayout(Decl *D, Decl *Suggested) {
// and isolate from other C++ specific checks.
StructuralEquivalenceContext Ctx(
D->getASTContext(), Suggested->getASTContext(), NonEquivalentDecls,
+ StructuralEquivalenceKind::Default,
false /*StrictTypeSpelling*/, true /*Complain*/,
true /*ErrorOnTagTypeMismatch*/);
- return Ctx.IsStructurallyEquivalent(D, Suggested);
+ return Ctx.IsEquivalent(D, Suggested);
}
-/// \brief Determine whether there is any declaration of \p D that was ever a
+/// Determine whether there is any declaration of \p D that was ever a
/// definition (perhaps before module merging) and is currently visible.
/// \param D The definition of the entity.
/// \param Suggested Filled in with the declaration that should be made visible
@@ -7482,7 +7571,7 @@ bool Sema::hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested,
/// Locks in the inheritance model for the given class and all of its bases.
static void assignInheritanceModel(Sema &S, CXXRecordDecl *RD) {
- RD = RD->getMostRecentDecl();
+ RD = RD->getMostRecentNonInjectedDecl();
if (!RD->hasAttr<MSInheritanceAttr>()) {
MSInheritanceAttr::Spelling IM;
@@ -7512,7 +7601,7 @@ static void assignInheritanceModel(Sema &S, CXXRecordDecl *RD) {
}
}
-/// \brief The implementation of RequireCompleteType
+/// The implementation of RequireCompleteType
bool Sema::RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
TypeDiagnoser *Diagnoser) {
// FIXME: Add this assertion to make sure we always get instantiation points.
@@ -7523,11 +7612,17 @@ bool Sema::RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
// assert(!T->isDependentType() &&
// "Can't ask whether a dependent type is complete");
- // We lock in the inheritance model once somebody has asked us to ensure
- // that a pointer-to-member type is complete.
- if (Context.getTargetInfo().getCXXABI().isMicrosoft()) {
- if (const MemberPointerType *MPTy = T->getAs<MemberPointerType>()) {
- if (!MPTy->getClass()->isDependentType()) {
+ if (const MemberPointerType *MPTy = T->getAs<MemberPointerType>()) {
+ if (!MPTy->getClass()->isDependentType()) {
+ if (getLangOpts().CompleteMemberPointers &&
+ !MPTy->getClass()->getAsCXXRecordDecl()->isBeingDefined() &&
+ RequireCompleteType(Loc, QualType(MPTy->getClass(), 0),
+ diag::err_memptr_incomplete))
+ return true;
+
+ // We lock in the inheritance model once somebody has asked us to ensure
+ // that a pointer-to-member type is complete.
+ if (Context.getTargetInfo().getCXXABI().isMicrosoft()) {
(void)isCompleteType(Loc, QualType(MPTy->getClass(), 0));
assignInheritanceModel(*this, MPTy->getMostRecentCXXRecordDecl());
}
@@ -7551,10 +7646,18 @@ bool Sema::RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
// If the user is going to see an error here, recover by making the
// definition visible.
bool TreatAsComplete = Diagnoser && !isSFINAEContext();
- if (Diagnoser)
+ if (Diagnoser && SuggestedDef)
diagnoseMissingImport(Loc, SuggestedDef, MissingImportKind::Definition,
/*Recover*/TreatAsComplete);
return !TreatAsComplete;
+ } else if (Def && !TemplateInstCallbacks.empty()) {
+ CodeSynthesisContext TempInst;
+ TempInst.Kind = CodeSynthesisContext::Memoization;
+ TempInst.Template = Def;
+ TempInst.Entity = Def;
+ TempInst.PointOfInstantiation = Loc;
+ atTemplateBegin(TemplateInstCallbacks, *this, TempInst);
+ atTemplateEnd(TemplateInstCallbacks, *this, TempInst);
}
return false;
@@ -7683,7 +7786,7 @@ bool Sema::RequireCompleteType(SourceLocation Loc, QualType T,
return RequireCompleteType(Loc, T, Diagnoser);
}
-/// \brief Get diagnostic %select index for tag kind for
+/// Get diagnostic %select index for tag kind for
/// literal type diagnostic message.
/// WARNING: Indexes apply to particular diagnostics only!
///
@@ -7697,7 +7800,7 @@ static unsigned getLiteralDiagFromTagKind(TagTypeKind Tag) {
}
}
-/// @brief Ensure that the type T is a literal type.
+/// Ensure that the type T is a literal type.
///
/// This routine checks whether the type @p T is a literal type. If @p T is an
/// incomplete type, an attempt is made to complete it. If @p T is a literal
@@ -7741,6 +7844,13 @@ bool Sema::RequireLiteralType(SourceLocation Loc, QualType T,
if (RequireCompleteType(Loc, ElemType, diag::note_non_literal_incomplete, T))
return true;
+ // [expr.prim.lambda]p3:
+ // This class type is [not] a literal type.
+ if (RD->isLambda() && !getLangOpts().CPlusPlus17) {
+ Diag(RD->getLocation(), diag::note_non_literal_lambda);
+ return true;
+ }
+
// If the class has virtual base classes, then it's not an aggregate, and
// cannot have any constexpr constructors or a trivial default constructor,
// so is non-literal. This is better to diagnose than the resulting absence
@@ -7784,7 +7894,8 @@ bool Sema::RequireLiteralType(SourceLocation Loc, QualType T,
diag::note_non_literal_user_provided_dtor :
diag::note_non_literal_nontrivial_dtor) << RD;
if (!Dtor->isUserProvided())
- SpecialMemberIsTrivial(Dtor, CXXDestructor, /*Diagnose*/true);
+ SpecialMemberIsTrivial(Dtor, CXXDestructor, TAH_IgnoreTrivialABI,
+ /*Diagnose*/true);
}
return true;
@@ -7795,10 +7906,12 @@ bool Sema::RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID) {
return RequireLiteralType(Loc, T, Diagnoser);
}
-/// \brief Retrieve a version of the type 'T' that is elaborated by Keyword
-/// and qualified by the nested-name-specifier contained in SS.
+/// Retrieve a version of the type 'T' that is elaborated by Keyword, qualified
+/// by the nested-name-specifier contained in SS, and that is (re)declared by
+/// OwnedTagDecl, which is nullptr if this is not a (re)declaration.
QualType Sema::getElaboratedType(ElaboratedTypeKeyword Keyword,
- const CXXScopeSpec &SS, QualType T) {
+ const CXXScopeSpec &SS, QualType T,
+ TagDecl *OwnedTagDecl) {
if (T.isNull())
return T;
NestedNameSpecifier *NNS;
@@ -7809,7 +7922,7 @@ QualType Sema::getElaboratedType(ElaboratedTypeKeyword Keyword,
return T;
NNS = nullptr;
}
- return Context.getElaboratedType(Keyword, NNS, T);
+ return Context.getElaboratedType(Keyword, NNS, T, OwnedTagDecl);
}
QualType Sema::BuildTypeofExprType(Expr *E, SourceLocation Loc) {
@@ -7845,11 +7958,12 @@ static QualType getDecltypeForExpr(Sema &S, Expr *E) {
//
// We apply the same rules for Objective-C ivar and property references.
if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
- if (const ValueDecl *VD = dyn_cast<ValueDecl>(DRE->getDecl()))
- return VD->getType();
+ const ValueDecl *VD = DRE->getDecl();
+ return VD->getType();
} else if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) {
- if (const FieldDecl *FD = dyn_cast<FieldDecl>(ME->getMemberDecl()))
- return FD->getType();
+ if (const ValueDecl *VD = ME->getMemberDecl())
+ if (isa<FieldDecl>(VD) || isa<VarDecl>(VD))
+ return VD->getType();
} else if (const ObjCIvarRefExpr *IR = dyn_cast<ObjCIvarRefExpr>(E)) {
return IR->getDecl()->getType();
} else if (const ObjCPropertyRefExpr *PR = dyn_cast<ObjCPropertyRefExpr>(E)) {
diff --git a/lib/Sema/TreeTransform.h b/lib/Sema/TreeTransform.h
index 96969ea87a13..3b3953991000 100644
--- a/lib/Sema/TreeTransform.h
+++ b/lib/Sema/TreeTransform.h
@@ -41,7 +41,7 @@
namespace clang {
using namespace sema;
-/// \brief A semantic tree transformation that allows one to transform one
+/// A semantic tree transformation that allows one to transform one
/// abstract syntax tree into another.
///
/// A new tree transformation is defined by creating a new subclass \c X of
@@ -57,7 +57,7 @@ using namespace sema;
/// subclasses to customize any of its operations. Thus, a subclass can
/// override any of the transformation or rebuild operators by providing an
/// operation with the same signature as the default implementation. The
-/// overridding function should not be virtual.
+/// overriding function should not be virtual.
///
/// Semantic tree transformations are split into two stages, either of which
/// can be replaced by a subclass. The "transform" step transforms an AST node
@@ -94,7 +94,7 @@ using namespace sema;
/// (\c getBaseLocation(), \c getBaseEntity()).
template<typename Derived>
class TreeTransform {
- /// \brief Private RAII object that helps us forget and then re-remember
+ /// Private RAII object that helps us forget and then re-remember
/// the template argument corresponding to a partially-substituted parameter
/// pack.
class ForgetPartiallySubstitutedPackRAII {
@@ -114,19 +114,19 @@ class TreeTransform {
protected:
Sema &SemaRef;
- /// \brief The set of local declarations that have been transformed, for
+ /// The set of local declarations that have been transformed, for
/// cases where we are forced to build new declarations within the transformer
/// rather than in the subclass (e.g., lambda closure types).
llvm::DenseMap<Decl *, Decl *> TransformedLocalDecls;
public:
- /// \brief Initializes a new tree transformer.
+ /// Initializes a new tree transformer.
TreeTransform(Sema &SemaRef) : SemaRef(SemaRef) { }
- /// \brief Retrieves a reference to the derived class.
+ /// Retrieves a reference to the derived class.
Derived &getDerived() { return static_cast<Derived&>(*this); }
- /// \brief Retrieves a reference to the derived class.
+ /// Retrieves a reference to the derived class.
const Derived &getDerived() const {
return static_cast<const Derived&>(*this);
}
@@ -134,11 +134,11 @@ public:
static inline ExprResult Owned(Expr *E) { return E; }
static inline StmtResult Owned(Stmt *S) { return S; }
- /// \brief Retrieves a reference to the semantic analysis object used for
+ /// Retrieves a reference to the semantic analysis object used for
/// this tree transform.
Sema &getSema() const { return SemaRef; }
- /// \brief Whether the transformation should always rebuild AST nodes, even
+ /// Whether the transformation should always rebuild AST nodes, even
/// if none of the children have changed.
///
/// Subclasses may override this function to specify when the transformation
@@ -149,7 +149,7 @@ public:
/// statement node appears at most once in its containing declaration.
bool AlwaysRebuild() { return SemaRef.ArgumentPackSubstitutionIndex != -1; }
- /// \brief Returns the location of the entity being transformed, if that
+ /// Returns the location of the entity being transformed, if that
/// information was not available elsewhere in the AST.
///
/// By default, returns no source-location information. Subclasses can
@@ -157,21 +157,21 @@ public:
/// information.
SourceLocation getBaseLocation() { return SourceLocation(); }
- /// \brief Returns the name of the entity being transformed, if that
+ /// Returns the name of the entity being transformed, if that
/// information was not available elsewhere in the AST.
///
/// By default, returns an empty name. Subclasses can provide an alternative
/// implementation with a more precise name.
DeclarationName getBaseEntity() { return DeclarationName(); }
- /// \brief Sets the "base" location and entity when that
+ /// Sets the "base" location and entity when that
/// information is known based on another transformation.
///
/// By default, the source location and entity are ignored. Subclasses can
/// override this function to provide a customized implementation.
void setBase(SourceLocation Loc, DeclarationName Entity) { }
- /// \brief RAII object that temporarily sets the base location and entity
+ /// RAII object that temporarily sets the base location and entity
/// used for reporting diagnostics in types.
class TemporaryBase {
TreeTransform &Self;
@@ -193,7 +193,7 @@ public:
}
};
- /// \brief Determine whether the given type \p T has already been
+ /// Determine whether the given type \p T has already been
/// transformed.
///
/// Subclasses can provide an alternative implementation of this routine
@@ -204,7 +204,7 @@ public:
return T.isNull();
}
- /// \brief Determine whether the given call argument should be dropped, e.g.,
+ /// Determine whether the given call argument should be dropped, e.g.,
/// because it is a default argument.
///
/// Subclasses can provide an alternative implementation of this routine to
@@ -214,7 +214,7 @@ public:
return E->isDefaultArgument();
}
- /// \brief Determine whether we should expand a pack expansion with the
+ /// Determine whether we should expand a pack expansion with the
/// given set of parameter packs into separate arguments by repeatedly
/// transforming the pattern.
///
@@ -261,7 +261,7 @@ public:
return false;
}
- /// \brief "Forget" about the partially-substituted pack template argument,
+ /// "Forget" about the partially-substituted pack template argument,
/// when performing an instantiation that must preserve the parameter pack
/// use.
///
@@ -270,18 +270,18 @@ public:
return TemplateArgument();
}
- /// \brief "Remember" the partially-substituted pack template argument
+ /// "Remember" the partially-substituted pack template argument
/// after performing an instantiation that must preserve the parameter pack
/// use.
///
/// This routine is meant to be overridden by the template instantiator.
void RememberPartiallySubstitutedPack(TemplateArgument Arg) { }
- /// \brief Note to the derived class when a function parameter pack is
+ /// Note to the derived class when a function parameter pack is
/// being expanded.
void ExpandingFunctionParameterPack(ParmVarDecl *Pack) { }
- /// \brief Transforms the given type into another type.
+ /// Transforms the given type into another type.
///
/// By default, this routine transforms a type by creating a
/// TypeSourceInfo for it and delegating to the appropriate
@@ -292,7 +292,7 @@ public:
/// \returns the transformed type.
QualType TransformType(QualType T);
- /// \brief Transforms the given type-with-location into a new
+ /// Transforms the given type-with-location into a new
/// type-with-location.
///
/// By default, this routine transforms a type by delegating to the
@@ -302,13 +302,13 @@ public:
/// to alter the transformation.
TypeSourceInfo *TransformType(TypeSourceInfo *DI);
- /// \brief Transform the given type-with-location into a new
+ /// Transform the given type-with-location into a new
/// type, collecting location information in the given builder
/// as necessary.
///
QualType TransformType(TypeLocBuilder &TLB, TypeLoc TL);
- /// \brief Transform a type that is permitted to produce a
+ /// Transform a type that is permitted to produce a
/// DeducedTemplateSpecializationType.
///
/// This is used in the (relatively rare) contexts where it is acceptable
@@ -319,7 +319,7 @@ public:
TypeSourceInfo *TransformTypeWithDeducedTST(TypeSourceInfo *DI);
/// @}
- /// \brief Transform the given statement.
+ /// Transform the given statement.
///
/// By default, this routine transforms a statement by delegating to the
/// appropriate TransformXXXStmt function to transform a specific kind of
@@ -330,7 +330,7 @@ public:
/// \returns the transformed statement.
StmtResult TransformStmt(Stmt *S);
- /// \brief Transform the given statement.
+ /// Transform the given statement.
///
/// By default, this routine transforms a statement by delegating to the
/// appropriate TransformOMPXXXClause function to transform a specific kind
@@ -340,7 +340,7 @@ public:
/// \returns the transformed OpenMP clause.
OMPClause *TransformOMPClause(OMPClause *S);
- /// \brief Transform the given attribute.
+ /// Transform the given attribute.
///
/// By default, this routine transforms a statement by delegating to the
/// appropriate TransformXXXAttr function to transform a specific kind
@@ -350,7 +350,7 @@ public:
/// \returns the transformed attribute
const Attr *TransformAttr(const Attr *S);
-/// \brief Transform the specified attribute.
+/// Transform the specified attribute.
///
/// Subclasses should override the transformation of attributes with a pragma
/// spelling to transform expressions stored within the attribute.
@@ -361,7 +361,7 @@ public:
const X##Attr *Transform##X##Attr(const X##Attr *R) { return R; }
#include "clang/Basic/AttrList.inc"
- /// \brief Transform the given expression.
+ /// Transform the given expression.
///
/// By default, this routine transforms an expression by delegating to the
/// appropriate TransformXXXExpr function to build a new expression.
@@ -371,7 +371,7 @@ public:
/// \returns the transformed expression.
ExprResult TransformExpr(Expr *E);
- /// \brief Transform the given initializer.
+ /// Transform the given initializer.
///
/// By default, this routine transforms an initializer by stripping off the
/// semantic nodes added by initialization, then passing the result to
@@ -380,7 +380,7 @@ public:
/// \returns the transformed initializer.
ExprResult TransformInitializer(Expr *Init, bool NotCopyInit);
- /// \brief Transform the given list of expressions.
+ /// Transform the given list of expressions.
///
/// This routine transforms a list of expressions by invoking
/// \c TransformExpr() for each subexpression. However, it also provides
@@ -407,7 +407,7 @@ public:
SmallVectorImpl<Expr *> &Outputs,
bool *ArgChanged = nullptr);
- /// \brief Transform the given declaration, which is referenced from a type
+ /// Transform the given declaration, which is referenced from a type
/// or expression.
///
/// By default, acts as the identity function on declarations, unless the
@@ -422,7 +422,7 @@ public:
return D;
}
- /// \brief Transform the specified condition.
+ /// Transform the specified condition.
///
/// By default, this transforms the variable and expression and rebuilds
/// the condition.
@@ -430,14 +430,14 @@ public:
Expr *Expr,
Sema::ConditionKind Kind);
- /// \brief Transform the attributes associated with the given declaration and
+ /// Transform the attributes associated with the given declaration and
/// place them on the new declaration.
///
/// By default, this operation does nothing. Subclasses may override this
/// behavior to transform attributes.
void transformAttrs(Decl *Old, Decl *New) { }
- /// \brief Note that a local declaration has been transformed by this
+ /// Note that a local declaration has been transformed by this
/// transformer.
///
/// Local declarations are typically transformed via a call to
@@ -448,7 +448,7 @@ public:
TransformedLocalDecls[Old] = New;
}
- /// \brief Transform the definition of the given declaration.
+ /// Transform the definition of the given declaration.
///
/// By default, invokes TransformDecl() to transform the declaration.
/// Subclasses may override this function to provide alternate behavior.
@@ -456,7 +456,7 @@ public:
return getDerived().TransformDecl(Loc, D);
}
- /// \brief Transform the given declaration, which was the first part of a
+ /// Transform the given declaration, which was the first part of a
/// nested-name-specifier in a member access expression.
///
/// This specific declaration transformation only applies to the first
@@ -473,7 +473,7 @@ public:
bool TransformOverloadExprDecls(OverloadExpr *Old, bool RequiresADL,
LookupResult &R);
- /// \brief Transform the given nested-name-specifier with source-location
+ /// Transform the given nested-name-specifier with source-location
/// information.
///
/// By default, transforms all of the types and declarations within the
@@ -484,7 +484,7 @@ public:
QualType ObjectType = QualType(),
NamedDecl *FirstQualifierInScope = nullptr);
- /// \brief Transform the given declaration name.
+ /// Transform the given declaration name.
///
/// By default, transforms the types of conversion function, constructor,
/// and destructor names and then (if needed) rebuilds the declaration name.
@@ -493,7 +493,7 @@ public:
DeclarationNameInfo
TransformDeclarationNameInfo(const DeclarationNameInfo &NameInfo);
- /// \brief Transform the given template name.
+ /// Transform the given template name.
///
/// \param SS The nested-name-specifier that qualifies the template
/// name. This nested-name-specifier must already have been transformed.
@@ -520,7 +520,7 @@ public:
NamedDecl *FirstQualifierInScope = nullptr,
bool AllowInjectedClassName = false);
- /// \brief Transform the given template argument.
+ /// Transform the given template argument.
///
/// By default, this operation transforms the type, expression, or
/// declaration stored within the template argument and constructs a
@@ -532,7 +532,7 @@ public:
TemplateArgumentLoc &Output,
bool Uneval = false);
- /// \brief Transform the given set of template arguments.
+ /// Transform the given set of template arguments.
///
/// By default, this operation transforms all of the template arguments
/// in the input set using \c TransformTemplateArgument(), and appends
@@ -558,7 +558,7 @@ public:
Uneval);
}
- /// \brief Transform the given set of template arguments.
+ /// Transform the given set of template arguments.
///
/// By default, this operation transforms all of the template arguments
/// in the input set using \c TransformTemplateArgument(), and appends
@@ -578,11 +578,11 @@ public:
TemplateArgumentListInfo &Outputs,
bool Uneval = false);
- /// \brief Fakes up a TemplateArgumentLoc for a given TemplateArgument.
+ /// Fakes up a TemplateArgumentLoc for a given TemplateArgument.
void InventTemplateArgumentLoc(const TemplateArgument &Arg,
TemplateArgumentLoc &ArgLoc);
- /// \brief Fakes up a TypeSourceInfo for a type.
+ /// Fakes up a TypeSourceInfo for a type.
TypeSourceInfo *InventTypeSourceInfo(QualType T) {
return SemaRef.Context.getTrivialTypeSourceInfo(T,
getDerived().getBaseLocation());
@@ -622,7 +622,7 @@ public:
TypeLocBuilder &TLB, DependentTemplateSpecializationTypeLoc TL,
NestedNameSpecifierLoc QualifierLoc);
- /// \brief Transforms the parameters of a function type into the
+ /// Transforms the parameters of a function type into the
/// given vectors.
///
/// The result vectors should be kept in sync; null entries in the
@@ -636,7 +636,7 @@ public:
SmallVectorImpl<QualType> &PTypes, SmallVectorImpl<ParmVarDecl *> *PVars,
Sema::ExtParameterInfoBuilder &PInfos);
- /// \brief Transforms a single function-type parameter. Return null
+ /// Transforms a single function-type parameter. Return null
/// on error.
///
/// \param indexAdjustment - A number to add to the parameter's
@@ -684,7 +684,7 @@ public:
OMPClause *Transform ## Class(Class *S);
#include "clang/Basic/OpenMPKinds.def"
- /// \brief Build a new qualified type given its unqualified type and type
+ /// Build a new qualified type given its unqualified type and type
/// qualifiers.
///
/// By default, this routine adds type qualifiers only to types that can
@@ -694,19 +694,19 @@ public:
QualType RebuildQualifiedType(QualType T, SourceLocation Loc,
Qualifiers Quals);
- /// \brief Build a new pointer type given its pointee type.
+ /// Build a new pointer type given its pointee type.
///
/// By default, performs semantic analysis when building the pointer type.
/// Subclasses may override this routine to provide different behavior.
QualType RebuildPointerType(QualType PointeeType, SourceLocation Sigil);
- /// \brief Build a new block pointer type given its pointee type.
+ /// Build a new block pointer type given its pointee type.
///
/// By default, performs semantic analysis when building the block pointer
/// type. Subclasses may override this routine to provide different behavior.
QualType RebuildBlockPointerType(QualType PointeeType, SourceLocation Sigil);
- /// \brief Build a new reference type given the type it references.
+ /// Build a new reference type given the type it references.
///
/// By default, performs semantic analysis when building the
/// reference type. Subclasses may override this routine to provide
@@ -718,7 +718,7 @@ public:
bool LValue,
SourceLocation Sigil);
- /// \brief Build a new member pointer type given the pointee type and the
+ /// Build a new member pointer type given the pointee type and the
/// class type it refers into.
///
/// By default, performs semantic analysis when building the member pointer
@@ -732,7 +732,7 @@ public:
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc);
- /// \brief Build an Objective-C object type.
+ /// Build an Objective-C object type.
///
/// By default, performs semantic analysis when building the object type.
/// Subclasses may override this routine to provide different behavior.
@@ -746,14 +746,14 @@ public:
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc);
- /// \brief Build a new Objective-C object pointer type given the pointee type.
+ /// Build a new Objective-C object pointer type given the pointee type.
///
/// By default, directly builds the pointer type, with no additional semantic
/// analysis.
QualType RebuildObjCObjectPointerType(QualType PointeeType,
SourceLocation Star);
- /// \brief Build a new array type given the element type, size
+ /// Build a new array type given the element type, size
/// modifier, size of the array (if known), size expression, and index type
/// qualifiers.
///
@@ -767,7 +767,7 @@ public:
unsigned IndexTypeQuals,
SourceRange BracketsRange);
- /// \brief Build a new constant array type given the element type, size
+ /// Build a new constant array type given the element type, size
/// modifier, (known) size of the array, and index type qualifiers.
///
/// By default, performs semantic analysis when building the array type.
@@ -778,7 +778,7 @@ public:
unsigned IndexTypeQuals,
SourceRange BracketsRange);
- /// \brief Build a new incomplete array type given the element type, size
+ /// Build a new incomplete array type given the element type, size
/// modifier, and index type qualifiers.
///
/// By default, performs semantic analysis when building the array type.
@@ -788,7 +788,7 @@ public:
unsigned IndexTypeQuals,
SourceRange BracketsRange);
- /// \brief Build a new variable-length array type given the element type,
+ /// Build a new variable-length array type given the element type,
/// size modifier, size expression, and index type qualifiers.
///
/// By default, performs semantic analysis when building the array type.
@@ -799,7 +799,7 @@ public:
unsigned IndexTypeQuals,
SourceRange BracketsRange);
- /// \brief Build a new dependent-sized array type given the element type,
+ /// Build a new dependent-sized array type given the element type,
/// size modifier, size expression, and index type qualifiers.
///
/// By default, performs semantic analysis when building the array type.
@@ -810,7 +810,7 @@ public:
unsigned IndexTypeQuals,
SourceRange BracketsRange);
- /// \brief Build a new vector type given the element type and
+ /// Build a new vector type given the element type and
/// number of elements.
///
/// By default, performs semantic analysis when building the vector type.
@@ -818,7 +818,16 @@ public:
QualType RebuildVectorType(QualType ElementType, unsigned NumElements,
VectorType::VectorKind VecKind);
- /// \brief Build a new extended vector type given the element type and
+ /// Build a new potentially dependently-sized extended vector type
+ /// given the element type and number of elements.
+ ///
+ /// By default, performs semantic analysis when building the vector type.
+ /// Subclasses may override this routine to provide different behavior.
+ QualType RebuildDependentVectorType(QualType ElementType, Expr *SizeExpr,
+ SourceLocation AttributeLoc,
+ VectorType::VectorKind);
+
+ /// Build a new extended vector type given the element type and
/// number of elements.
///
/// By default, performs semantic analysis when building the vector type.
@@ -826,7 +835,7 @@ public:
QualType RebuildExtVectorType(QualType ElementType, unsigned NumElements,
SourceLocation AttributeLoc);
- /// \brief Build a new potentially dependently-sized extended vector type
+ /// Build a new potentially dependently-sized extended vector type
/// given the element type and number of elements.
///
/// By default, performs semantic analysis when building the vector type.
@@ -835,7 +844,7 @@ public:
Expr *SizeExpr,
SourceLocation AttributeLoc);
- /// \brief Build a new DependentAddressSpaceType or return the pointee
+ /// Build a new DependentAddressSpaceType or return the pointee
/// type variable with the correct address space (retrieved from
/// AddrSpaceExpr) applied to it. The former will be returned in cases
/// where the address space remains dependent.
@@ -847,7 +856,7 @@ public:
Expr *AddrSpaceExpr,
SourceLocation AttributeLoc);
- /// \brief Build a new function type.
+ /// Build a new function type.
///
/// By default, performs semantic analysis when building the function type.
/// Subclasses may override this routine to provide different behavior.
@@ -855,51 +864,51 @@ public:
MutableArrayRef<QualType> ParamTypes,
const FunctionProtoType::ExtProtoInfo &EPI);
- /// \brief Build a new unprototyped function type.
+ /// Build a new unprototyped function type.
QualType RebuildFunctionNoProtoType(QualType ResultType);
- /// \brief Rebuild an unresolved typename type, given the decl that
+ /// Rebuild an unresolved typename type, given the decl that
/// the UnresolvedUsingTypenameDecl was transformed to.
QualType RebuildUnresolvedUsingType(SourceLocation NameLoc, Decl *D);
- /// \brief Build a new typedef type.
+ /// Build a new typedef type.
QualType RebuildTypedefType(TypedefNameDecl *Typedef) {
return SemaRef.Context.getTypeDeclType(Typedef);
}
- /// \brief Build a new class/struct/union type.
+ /// Build a new class/struct/union type.
QualType RebuildRecordType(RecordDecl *Record) {
return SemaRef.Context.getTypeDeclType(Record);
}
- /// \brief Build a new Enum type.
+ /// Build a new Enum type.
QualType RebuildEnumType(EnumDecl *Enum) {
return SemaRef.Context.getTypeDeclType(Enum);
}
- /// \brief Build a new typeof(expr) type.
+ /// Build a new typeof(expr) type.
///
/// By default, performs semantic analysis when building the typeof type.
/// Subclasses may override this routine to provide different behavior.
QualType RebuildTypeOfExprType(Expr *Underlying, SourceLocation Loc);
- /// \brief Build a new typeof(type) type.
+ /// Build a new typeof(type) type.
///
/// By default, builds a new TypeOfType with the given underlying type.
QualType RebuildTypeOfType(QualType Underlying);
- /// \brief Build a new unary transform type.
+ /// Build a new unary transform type.
QualType RebuildUnaryTransformType(QualType BaseType,
UnaryTransformType::UTTKind UKind,
SourceLocation Loc);
- /// \brief Build a new C++11 decltype type.
+ /// Build a new C++11 decltype type.
///
/// By default, performs semantic analysis when building the decltype type.
/// Subclasses may override this routine to provide different behavior.
QualType RebuildDecltypeType(Expr *Underlying, SourceLocation Loc);
- /// \brief Build a new C++11 auto type.
+ /// Build a new C++11 auto type.
///
/// By default, builds a new AutoType with the given deduced type.
QualType RebuildAutoType(QualType Deduced, AutoTypeKeyword Keyword) {
@@ -918,7 +927,7 @@ public:
Template, Deduced, /*IsDependent*/ false);
}
- /// \brief Build a new template specialization type.
+ /// Build a new template specialization type.
///
/// By default, performs semantic analysis when building the template
/// specialization type. Subclasses may override this routine to provide
@@ -927,7 +936,7 @@ public:
SourceLocation TemplateLoc,
TemplateArgumentListInfo &Args);
- /// \brief Build a new parenthesized type.
+ /// Build a new parenthesized type.
///
/// By default, builds a new ParenType type from the inner type.
/// Subclasses may override this routine to provide different behavior.
@@ -935,7 +944,7 @@ public:
return SemaRef.BuildParenType(InnerType);
}
- /// \brief Build a new qualified name type.
+ /// Build a new qualified name type.
///
/// By default, builds a new ElaboratedType type from the keyword,
/// the nested-name-specifier and the named type.
@@ -949,7 +958,7 @@ public:
Named);
}
- /// \brief Build a new typename type that refers to a template-id.
+ /// Build a new typename type that refers to a template-id.
///
/// By default, builds a new DependentNameType type from the
/// nested-name-specifier and the given type. Subclasses may override
@@ -957,6 +966,7 @@ public:
QualType RebuildDependentTemplateSpecializationType(
ElaboratedTypeKeyword Keyword,
NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
const IdentifierInfo *Name,
SourceLocation NameLoc,
TemplateArgumentListInfo &Args,
@@ -965,9 +975,9 @@ public:
// TODO: avoid TemplateName abstraction
CXXScopeSpec SS;
SS.Adopt(QualifierLoc);
- TemplateName InstName
- = getDerived().RebuildTemplateName(SS, *Name, NameLoc, QualType(),
- nullptr, AllowInjectedClassName);
+ TemplateName InstName = getDerived().RebuildTemplateName(
+ SS, TemplateKWLoc, *Name, NameLoc, QualType(), nullptr,
+ AllowInjectedClassName);
if (InstName.isNull())
return QualType();
@@ -993,7 +1003,7 @@ public:
T);
}
- /// \brief Build a new typename type that refers to an identifier.
+ /// Build a new typename type that refers to an identifier.
///
/// By default, performs semantic analysis when building the typename type
/// (or elaborated type). Subclasses may override this routine to provide
@@ -1106,7 +1116,7 @@ public:
T);
}
- /// \brief Build a new pack expansion type.
+ /// Build a new pack expansion type.
///
/// By default, builds a new PackExpansionType type from the given pattern.
/// Subclasses may override this routine to provide different behavior.
@@ -1118,17 +1128,17 @@ public:
NumExpansions);
}
- /// \brief Build a new atomic type given its value type.
+ /// Build a new atomic type given its value type.
///
/// By default, performs semantic analysis when building the atomic type.
/// Subclasses may override this routine to provide different behavior.
QualType RebuildAtomicType(QualType ValueType, SourceLocation KWLoc);
- /// \brief Build a new pipe type given its value type.
+ /// Build a new pipe type given its value type.
QualType RebuildPipeType(QualType ValueType, SourceLocation KWLoc,
bool isReadPipe);
- /// \brief Build a new template name given a nested name specifier, a flag
+ /// Build a new template name given a nested name specifier, a flag
/// indicating whether the "template" keyword was provided, and the template
/// that the template name refers to.
///
@@ -1138,7 +1148,7 @@ public:
bool TemplateKW,
TemplateDecl *Template);
- /// \brief Build a new template name given a nested name specifier and the
+ /// Build a new template name given a nested name specifier and the
/// name that is referred to as a template.
///
/// By default, performs semantic analysis to determine whether the name can
@@ -1146,13 +1156,13 @@ public:
/// template name. Subclasses may override this routine to provide different
/// behavior.
TemplateName RebuildTemplateName(CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
const IdentifierInfo &Name,
- SourceLocation NameLoc,
- QualType ObjectType,
+ SourceLocation NameLoc, QualType ObjectType,
NamedDecl *FirstQualifierInScope,
bool AllowInjectedClassName);
- /// \brief Build a new template name given a nested name specifier and the
+ /// Build a new template name given a nested name specifier and the
/// overloaded operator name that is referred to as a template.
///
/// By default, performs semantic analysis to determine whether the name can
@@ -1160,12 +1170,12 @@ public:
/// template name. Subclasses may override this routine to provide different
/// behavior.
TemplateName RebuildTemplateName(CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
OverloadedOperatorKind Operator,
- SourceLocation NameLoc,
- QualType ObjectType,
+ SourceLocation NameLoc, QualType ObjectType,
bool AllowInjectedClassName);
- /// \brief Build a new template name given a template template parameter pack
+ /// Build a new template name given a template template parameter pack
/// and the
///
/// By default, performs semantic analysis to determine whether the name can
@@ -1177,7 +1187,7 @@ public:
return getSema().Context.getSubstTemplateTemplateParmPack(Param, ArgPack);
}
- /// \brief Build a new compound statement.
+ /// Build a new compound statement.
///
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
@@ -1189,7 +1199,7 @@ public:
IsStmtExpr);
}
- /// \brief Build a new case statement.
+ /// Build a new case statement.
///
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
@@ -1202,7 +1212,7 @@ public:
ColonLoc);
}
- /// \brief Attach the body to a new case statement.
+ /// Attach the body to a new case statement.
///
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
@@ -1211,7 +1221,7 @@ public:
return S;
}
- /// \brief Build a new default statement.
+ /// Build a new default statement.
///
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
@@ -1222,7 +1232,7 @@ public:
/*CurScope=*/nullptr);
}
- /// \brief Build a new label statement.
+ /// Build a new label statement.
///
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
@@ -1231,7 +1241,7 @@ public:
return SemaRef.ActOnLabelStmt(IdentLoc, L, ColonLoc, SubStmt);
}
- /// \brief Build a new label statement.
+ /// Build a new label statement.
///
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
@@ -1241,7 +1251,7 @@ public:
return SemaRef.ActOnAttributedStmt(AttrLoc, Attrs, SubStmt);
}
- /// \brief Build a new "if" statement.
+ /// Build a new "if" statement.
///
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
@@ -1252,7 +1262,7 @@ public:
ElseLoc, Else);
}
- /// \brief Start building a new switch statement.
+ /// Start building a new switch statement.
///
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
@@ -1261,7 +1271,7 @@ public:
return getSema().ActOnStartOfSwitchStmt(SwitchLoc, Init, Cond);
}
- /// \brief Attach the body to the switch statement.
+ /// Attach the body to the switch statement.
///
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
@@ -1270,7 +1280,7 @@ public:
return getSema().ActOnFinishSwitchStmt(SwitchLoc, Switch, Body);
}
- /// \brief Build a new while statement.
+ /// Build a new while statement.
///
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
@@ -1279,7 +1289,7 @@ public:
return getSema().ActOnWhileStmt(WhileLoc, Cond, Body);
}
- /// \brief Build a new do-while statement.
+ /// Build a new do-while statement.
///
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
@@ -1290,7 +1300,7 @@ public:
Cond, RParenLoc);
}
- /// \brief Build a new for statement.
+ /// Build a new for statement.
///
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
@@ -1302,7 +1312,7 @@ public:
Inc, RParenLoc, Body);
}
- /// \brief Build a new goto statement.
+ /// Build a new goto statement.
///
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
@@ -1311,7 +1321,7 @@ public:
return getSema().ActOnGotoStmt(GotoLoc, LabelLoc, Label);
}
- /// \brief Build a new indirect goto statement.
+ /// Build a new indirect goto statement.
///
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
@@ -1321,7 +1331,7 @@ public:
return getSema().ActOnIndirectGotoStmt(GotoLoc, StarLoc, Target);
}
- /// \brief Build a new return statement.
+ /// Build a new return statement.
///
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
@@ -1329,7 +1339,7 @@ public:
return getSema().BuildReturnStmt(ReturnLoc, Result);
}
- /// \brief Build a new declaration statement.
+ /// Build a new declaration statement.
///
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
@@ -1339,7 +1349,7 @@ public:
return getSema().ActOnDeclStmt(DG, StartLoc, EndLoc);
}
- /// \brief Build a new inline asm statement.
+ /// Build a new inline asm statement.
///
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
@@ -1354,7 +1364,7 @@ public:
AsmString, Clobbers, RParenLoc);
}
- /// \brief Build a new MS style inline asm statement.
+ /// Build a new MS style inline asm statement.
///
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
@@ -1371,7 +1381,7 @@ public:
Constraints, Clobbers, Exprs, EndLoc);
}
- /// \brief Build a new co_return statement.
+ /// Build a new co_return statement.
///
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
@@ -1380,7 +1390,7 @@ public:
return getSema().BuildCoreturnStmt(CoreturnLoc, Result, IsImplicit);
}
- /// \brief Build a new co_await expression.
+ /// Build a new co_await expression.
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
@@ -1389,7 +1399,7 @@ public:
return getSema().BuildResolvedCoawaitExpr(CoawaitLoc, Result, IsImplicit);
}
- /// \brief Build a new co_await expression.
+ /// Build a new co_await expression.
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
@@ -1399,7 +1409,7 @@ public:
return getSema().BuildUnresolvedCoawaitExpr(CoawaitLoc, Result, Lookup);
}
- /// \brief Build a new co_yield expression.
+ /// Build a new co_yield expression.
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
@@ -1411,7 +1421,7 @@ public:
return getSema().BuildCoroutineBodyStmt(Args);
}
- /// \brief Build a new Objective-C \@try statement.
+ /// Build a new Objective-C \@try statement.
///
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
@@ -1423,7 +1433,7 @@ public:
Finally);
}
- /// \brief Rebuild an Objective-C exception declaration.
+ /// Rebuild an Objective-C exception declaration.
///
/// By default, performs semantic analysis to build the new declaration.
/// Subclasses may override this routine to provide different behavior.
@@ -1435,7 +1445,7 @@ public:
ExceptionDecl->getIdentifier());
}
- /// \brief Build a new Objective-C \@catch statement.
+ /// Build a new Objective-C \@catch statement.
///
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
@@ -1447,7 +1457,7 @@ public:
Var, Body);
}
- /// \brief Build a new Objective-C \@finally statement.
+ /// Build a new Objective-C \@finally statement.
///
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
@@ -1456,7 +1466,7 @@ public:
return getSema().ActOnObjCAtFinallyStmt(AtLoc, Body);
}
- /// \brief Build a new Objective-C \@throw statement.
+ /// Build a new Objective-C \@throw statement.
///
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
@@ -1465,7 +1475,7 @@ public:
return getSema().BuildObjCAtThrowStmt(AtLoc, Operand);
}
- /// \brief Build a new OpenMP executable directive.
+ /// Build a new OpenMP executable directive.
///
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
@@ -1479,7 +1489,7 @@ public:
Kind, DirName, CancelRegion, Clauses, AStmt, StartLoc, EndLoc);
}
- /// \brief Build a new OpenMP 'if' clause.
+ /// Build a new OpenMP 'if' clause.
///
/// By default, performs semantic analysis to build the new OpenMP clause.
/// Subclasses may override this routine to provide different behavior.
@@ -1494,7 +1504,7 @@ public:
EndLoc);
}
- /// \brief Build a new OpenMP 'final' clause.
+ /// Build a new OpenMP 'final' clause.
///
/// By default, performs semantic analysis to build the new OpenMP clause.
/// Subclasses may override this routine to provide different behavior.
@@ -1505,7 +1515,7 @@ public:
EndLoc);
}
- /// \brief Build a new OpenMP 'num_threads' clause.
+ /// Build a new OpenMP 'num_threads' clause.
///
/// By default, performs semantic analysis to build the new OpenMP clause.
/// Subclasses may override this routine to provide different behavior.
@@ -1517,7 +1527,7 @@ public:
LParenLoc, EndLoc);
}
- /// \brief Build a new OpenMP 'safelen' clause.
+ /// Build a new OpenMP 'safelen' clause.
///
/// By default, performs semantic analysis to build the new OpenMP clause.
/// Subclasses may override this routine to provide different behavior.
@@ -1527,7 +1537,7 @@ public:
return getSema().ActOnOpenMPSafelenClause(Len, StartLoc, LParenLoc, EndLoc);
}
- /// \brief Build a new OpenMP 'simdlen' clause.
+ /// Build a new OpenMP 'simdlen' clause.
///
/// By default, performs semantic analysis to build the new OpenMP clause.
/// Subclasses may override this routine to provide different behavior.
@@ -1537,7 +1547,7 @@ public:
return getSema().ActOnOpenMPSimdlenClause(Len, StartLoc, LParenLoc, EndLoc);
}
- /// \brief Build a new OpenMP 'collapse' clause.
+ /// Build a new OpenMP 'collapse' clause.
///
/// By default, performs semantic analysis to build the new OpenMP clause.
/// Subclasses may override this routine to provide different behavior.
@@ -1548,7 +1558,7 @@ public:
EndLoc);
}
- /// \brief Build a new OpenMP 'default' clause.
+ /// Build a new OpenMP 'default' clause.
///
/// By default, performs semantic analysis to build the new OpenMP clause.
/// Subclasses may override this routine to provide different behavior.
@@ -1561,7 +1571,7 @@ public:
StartLoc, LParenLoc, EndLoc);
}
- /// \brief Build a new OpenMP 'proc_bind' clause.
+ /// Build a new OpenMP 'proc_bind' clause.
///
/// By default, performs semantic analysis to build the new OpenMP clause.
/// Subclasses may override this routine to provide different behavior.
@@ -1574,7 +1584,7 @@ public:
StartLoc, LParenLoc, EndLoc);
}
- /// \brief Build a new OpenMP 'schedule' clause.
+ /// Build a new OpenMP 'schedule' clause.
///
/// By default, performs semantic analysis to build the new OpenMP clause.
/// Subclasses may override this routine to provide different behavior.
@@ -1588,7 +1598,7 @@ public:
CommaLoc, EndLoc);
}
- /// \brief Build a new OpenMP 'ordered' clause.
+ /// Build a new OpenMP 'ordered' clause.
///
/// By default, performs semantic analysis to build the new OpenMP clause.
/// Subclasses may override this routine to provide different behavior.
@@ -1598,7 +1608,7 @@ public:
return getSema().ActOnOpenMPOrderedClause(StartLoc, EndLoc, LParenLoc, Num);
}
- /// \brief Build a new OpenMP 'private' clause.
+ /// Build a new OpenMP 'private' clause.
///
/// By default, performs semantic analysis to build the new OpenMP clause.
/// Subclasses may override this routine to provide different behavior.
@@ -1610,7 +1620,7 @@ public:
EndLoc);
}
- /// \brief Build a new OpenMP 'firstprivate' clause.
+ /// Build a new OpenMP 'firstprivate' clause.
///
/// By default, performs semantic analysis to build the new OpenMP clause.
/// Subclasses may override this routine to provide different behavior.
@@ -1622,7 +1632,7 @@ public:
EndLoc);
}
- /// \brief Build a new OpenMP 'lastprivate' clause.
+ /// Build a new OpenMP 'lastprivate' clause.
///
/// By default, performs semantic analysis to build the new OpenMP clause.
/// Subclasses may override this routine to provide different behavior.
@@ -1634,7 +1644,7 @@ public:
EndLoc);
}
- /// \brief Build a new OpenMP 'shared' clause.
+ /// Build a new OpenMP 'shared' clause.
///
/// By default, performs semantic analysis to build the new OpenMP clause.
/// Subclasses may override this routine to provide different behavior.
@@ -1646,7 +1656,7 @@ public:
EndLoc);
}
- /// \brief Build a new OpenMP 'reduction' clause.
+ /// Build a new OpenMP 'reduction' clause.
///
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
@@ -1694,7 +1704,7 @@ public:
ReductionId, UnresolvedReductions);
}
- /// \brief Build a new OpenMP 'linear' clause.
+ /// Build a new OpenMP 'linear' clause.
///
/// By default, performs semantic analysis to build the new OpenMP clause.
/// Subclasses may override this routine to provide different behavior.
@@ -1710,7 +1720,7 @@ public:
EndLoc);
}
- /// \brief Build a new OpenMP 'aligned' clause.
+ /// Build a new OpenMP 'aligned' clause.
///
/// By default, performs semantic analysis to build the new OpenMP clause.
/// Subclasses may override this routine to provide different behavior.
@@ -1723,7 +1733,7 @@ public:
LParenLoc, ColonLoc, EndLoc);
}
- /// \brief Build a new OpenMP 'copyin' clause.
+ /// Build a new OpenMP 'copyin' clause.
///
/// By default, performs semantic analysis to build the new OpenMP clause.
/// Subclasses may override this routine to provide different behavior.
@@ -1735,7 +1745,7 @@ public:
EndLoc);
}
- /// \brief Build a new OpenMP 'copyprivate' clause.
+ /// Build a new OpenMP 'copyprivate' clause.
///
/// By default, performs semantic analysis to build the new OpenMP clause.
/// Subclasses may override this routine to provide different behavior.
@@ -1747,7 +1757,7 @@ public:
EndLoc);
}
- /// \brief Build a new OpenMP 'flush' pseudo clause.
+ /// Build a new OpenMP 'flush' pseudo clause.
///
/// By default, performs semantic analysis to build the new OpenMP clause.
/// Subclasses may override this routine to provide different behavior.
@@ -1759,7 +1769,7 @@ public:
EndLoc);
}
- /// \brief Build a new OpenMP 'depend' pseudo clause.
+ /// Build a new OpenMP 'depend' pseudo clause.
///
/// By default, performs semantic analysis to build the new OpenMP clause.
/// Subclasses may override this routine to provide different behavior.
@@ -1772,7 +1782,7 @@ public:
StartLoc, LParenLoc, EndLoc);
}
- /// \brief Build a new OpenMP 'device' clause.
+ /// Build a new OpenMP 'device' clause.
///
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
@@ -1783,7 +1793,7 @@ public:
EndLoc);
}
- /// \brief Build a new OpenMP 'map' clause.
+ /// Build a new OpenMP 'map' clause.
///
/// By default, performs semantic analysis to build the new OpenMP clause.
/// Subclasses may override this routine to provide different behavior.
@@ -1798,7 +1808,7 @@ public:
VarList, StartLoc, LParenLoc, EndLoc);
}
- /// \brief Build a new OpenMP 'num_teams' clause.
+ /// Build a new OpenMP 'num_teams' clause.
///
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
@@ -1809,7 +1819,7 @@ public:
EndLoc);
}
- /// \brief Build a new OpenMP 'thread_limit' clause.
+ /// Build a new OpenMP 'thread_limit' clause.
///
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
@@ -1821,7 +1831,7 @@ public:
LParenLoc, EndLoc);
}
- /// \brief Build a new OpenMP 'priority' clause.
+ /// Build a new OpenMP 'priority' clause.
///
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
@@ -1832,7 +1842,7 @@ public:
EndLoc);
}
- /// \brief Build a new OpenMP 'grainsize' clause.
+ /// Build a new OpenMP 'grainsize' clause.
///
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
@@ -1843,7 +1853,7 @@ public:
EndLoc);
}
- /// \brief Build a new OpenMP 'num_tasks' clause.
+ /// Build a new OpenMP 'num_tasks' clause.
///
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
@@ -1854,7 +1864,7 @@ public:
EndLoc);
}
- /// \brief Build a new OpenMP 'hint' clause.
+ /// Build a new OpenMP 'hint' clause.
///
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
@@ -1864,7 +1874,7 @@ public:
return getSema().ActOnOpenMPHintClause(Hint, StartLoc, LParenLoc, EndLoc);
}
- /// \brief Build a new OpenMP 'dist_schedule' clause.
+ /// Build a new OpenMP 'dist_schedule' clause.
///
/// By default, performs semantic analysis to build the new OpenMP clause.
/// Subclasses may override this routine to provide different behavior.
@@ -1877,7 +1887,7 @@ public:
Kind, ChunkSize, StartLoc, LParenLoc, KindLoc, CommaLoc, EndLoc);
}
- /// \brief Build a new OpenMP 'to' clause.
+ /// Build a new OpenMP 'to' clause.
///
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
@@ -1888,7 +1898,7 @@ public:
return getSema().ActOnOpenMPToClause(VarList, StartLoc, LParenLoc, EndLoc);
}
- /// \brief Build a new OpenMP 'from' clause.
+ /// Build a new OpenMP 'from' clause.
///
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
@@ -1924,7 +1934,7 @@ public:
EndLoc);
}
- /// \brief Rebuild the operand to an Objective-C \@synchronized statement.
+ /// Rebuild the operand to an Objective-C \@synchronized statement.
///
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
@@ -1933,7 +1943,7 @@ public:
return getSema().ActOnObjCAtSynchronizedOperand(atLoc, object);
}
- /// \brief Build a new Objective-C \@synchronized statement.
+ /// Build a new Objective-C \@synchronized statement.
///
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
@@ -1942,7 +1952,7 @@ public:
return getSema().ActOnObjCAtSynchronizedStmt(AtLoc, Object, Body);
}
- /// \brief Build a new Objective-C \@autoreleasepool statement.
+ /// Build a new Objective-C \@autoreleasepool statement.
///
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
@@ -1951,7 +1961,7 @@ public:
return getSema().ActOnObjCAutoreleasePoolStmt(AtLoc, Body);
}
- /// \brief Build a new Objective-C fast enumeration statement.
+ /// Build a new Objective-C fast enumeration statement.
///
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
@@ -1970,7 +1980,7 @@ public:
return getSema().FinishObjCForCollectionStmt(ForEachStmt.get(), Body);
}
- /// \brief Build a new C++ exception declaration.
+ /// Build a new C++ exception declaration.
///
/// By default, performs semantic analysis to build the new decaration.
/// Subclasses may override this routine to provide different behavior.
@@ -1986,7 +1996,7 @@ public:
return Var;
}
- /// \brief Build a new C++ catch statement.
+ /// Build a new C++ catch statement.
///
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
@@ -1997,7 +2007,7 @@ public:
Handler));
}
- /// \brief Build a new C++ try statement.
+ /// Build a new C++ try statement.
///
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
@@ -2006,7 +2016,7 @@ public:
return getSema().ActOnCXXTryBlock(TryLoc, TryBlock, Handlers);
}
- /// \brief Build a new C++0x range-based for statement.
+ /// Build a new C++0x range-based for statement.
///
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
@@ -2040,7 +2050,7 @@ public:
Sema::BFRK_Rebuild);
}
- /// \brief Build a new C++0x range-based for statement.
+ /// Build a new C++0x range-based for statement.
///
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
@@ -2053,7 +2063,7 @@ public:
QualifierLoc, NameInfo, Nested);
}
- /// \brief Attach body to a C++0x range-based for statement.
+ /// Attach body to a C++0x range-based for statement.
///
/// By default, performs semantic analysis to finish the new statement.
/// Subclasses may override this routine to provide different behavior.
@@ -2075,7 +2085,7 @@ public:
return SEHFinallyStmt::Create(getSema().getASTContext(), Loc, Block);
}
- /// \brief Build a new predefined expression.
+ /// Build a new predefined expression.
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
@@ -2084,7 +2094,7 @@ public:
return getSema().BuildPredefinedExpr(Loc, IT);
}
- /// \brief Build a new expression that references a declaration.
+ /// Build a new expression that references a declaration.
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
@@ -2095,7 +2105,7 @@ public:
}
- /// \brief Build a new expression that references a declaration.
+ /// Build a new expression that references a declaration.
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
@@ -2111,7 +2121,7 @@ public:
return getSema().BuildDeclarationNameExpr(SS, NameInfo, VD);
}
- /// \brief Build a new expression in parentheses.
+ /// Build a new expression in parentheses.
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
@@ -2120,7 +2130,7 @@ public:
return getSema().ActOnParenExpr(LParen, RParen, SubExpr);
}
- /// \brief Build a new pseudo-destructor expression.
+ /// Build a new pseudo-destructor expression.
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
@@ -2133,7 +2143,7 @@ public:
SourceLocation TildeLoc,
PseudoDestructorTypeStorage Destroyed);
- /// \brief Build a new unary operator expression.
+ /// Build a new unary operator expression.
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
@@ -2143,7 +2153,7 @@ public:
return getSema().BuildUnaryOp(/*Scope=*/nullptr, OpLoc, Opc, SubExpr);
}
- /// \brief Build a new builtin offsetof expression.
+ /// Build a new builtin offsetof expression.
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
@@ -2155,7 +2165,7 @@ public:
RParenLoc);
}
- /// \brief Build a new sizeof, alignof or vec_step expression with a
+ /// Build a new sizeof, alignof or vec_step expression with a
/// type argument.
///
/// By default, performs semantic analysis to build the new expression.
@@ -2167,7 +2177,7 @@ public:
return getSema().CreateUnaryExprOrTypeTraitExpr(TInfo, OpLoc, ExprKind, R);
}
- /// \brief Build a new sizeof, alignof or vec step expression with an
+ /// Build a new sizeof, alignof or vec step expression with an
/// expression argument.
///
/// By default, performs semantic analysis to build the new expression.
@@ -2183,7 +2193,7 @@ public:
return Result;
}
- /// \brief Build a new array subscript expression.
+ /// Build a new array subscript expression.
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
@@ -2196,7 +2206,7 @@ public:
RBracketLoc);
}
- /// \brief Build a new array section expression.
+ /// Build a new array section expression.
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
@@ -2208,7 +2218,7 @@ public:
ColonLoc, Length, RBracketLoc);
}
- /// \brief Build a new call expression.
+ /// Build a new call expression.
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
@@ -2220,7 +2230,7 @@ public:
Args, RParenLoc, ExecConfig);
}
- /// \brief Build a new member access expression.
+ /// Build a new member access expression.
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
@@ -2239,7 +2249,6 @@ public:
// We have a reference to an unnamed field. This is always the
// base of an anonymous struct/union member access, i.e. the
// field is always of record type.
- assert(!QualifierLoc && "Can't have an unnamed field with a qualifier!");
assert(Member->getType()->isRecordType() &&
"unnamed member not of record type?");
@@ -2250,11 +2259,11 @@ public:
if (BaseResult.isInvalid())
return ExprError();
Base = BaseResult.get();
- ExprValueKind VK = isArrow ? VK_LValue : Base->getValueKind();
- MemberExpr *ME = new (getSema().Context)
- MemberExpr(Base, isArrow, OpLoc, Member, MemberNameInfo,
- cast<FieldDecl>(Member)->getType(), VK, OK_Ordinary);
- return ME;
+
+ CXXScopeSpec EmptySS;
+ return getSema().BuildFieldReferenceExpr(
+ Base, isArrow, OpLoc, EmptySS, cast<FieldDecl>(Member),
+ DeclAccessPair::make(FoundDecl, FoundDecl->getAccess()), MemberNameInfo);
}
CXXScopeSpec SS;
@@ -2279,7 +2288,7 @@ public:
/*S*/nullptr);
}
- /// \brief Build a new binary operator expression.
+ /// Build a new binary operator expression.
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
@@ -2289,7 +2298,7 @@ public:
return getSema().BuildBinOp(/*Scope=*/nullptr, OpLoc, Opc, LHS, RHS);
}
- /// \brief Build a new conditional operator expression.
+ /// Build a new conditional operator expression.
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
@@ -2302,7 +2311,7 @@ public:
LHS, RHS);
}
- /// \brief Build a new C-style cast expression.
+ /// Build a new C-style cast expression.
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
@@ -2314,7 +2323,7 @@ public:
SubExpr);
}
- /// \brief Build a new compound literal expression.
+ /// Build a new compound literal expression.
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
@@ -2326,7 +2335,7 @@ public:
Init);
}
- /// \brief Build a new extended vector element access expression.
+ /// Build a new extended vector element access expression.
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
@@ -2346,27 +2355,17 @@ public:
/*S*/ nullptr);
}
- /// \brief Build a new initializer list expression.
+ /// Build a new initializer list expression.
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
ExprResult RebuildInitList(SourceLocation LBraceLoc,
MultiExprArg Inits,
- SourceLocation RBraceLoc,
- QualType ResultTy) {
- ExprResult Result
- = SemaRef.ActOnInitList(LBraceLoc, Inits, RBraceLoc);
- if (Result.isInvalid() || ResultTy->isDependentType())
- return Result;
-
- // Patch in the result type we were given, which may have been computed
- // when the initial InitListExpr was built.
- InitListExpr *ILE = cast<InitListExpr>((Expr *)Result.get());
- ILE->setType(ResultTy);
- return Result;
+ SourceLocation RBraceLoc) {
+ return SemaRef.ActOnInitList(LBraceLoc, Inits, RBraceLoc);
}
- /// \brief Build a new designated initializer expression.
+ /// Build a new designated initializer expression.
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
@@ -2384,7 +2383,7 @@ public:
return Result;
}
- /// \brief Build a new value-initialized expression.
+ /// Build a new value-initialized expression.
///
/// By default, builds the implicit value initialization without performing
/// any semantic analysis. Subclasses may override this routine to provide
@@ -2393,7 +2392,7 @@ public:
return new (SemaRef.Context) ImplicitValueInitExpr(T);
}
- /// \brief Build a new \c va_arg expression.
+ /// Build a new \c va_arg expression.
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
@@ -2405,7 +2404,7 @@ public:
RParenLoc);
}
- /// \brief Build a new expression list in parentheses.
+ /// Build a new expression list in parentheses.
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
@@ -2415,7 +2414,7 @@ public:
return getSema().ActOnParenListExpr(LParenLoc, RParenLoc, SubExprs);
}
- /// \brief Build a new address-of-label expression.
+ /// Build a new address-of-label expression.
///
/// By default, performs semantic analysis, using the name of the label
/// rather than attempting to map the label statement itself.
@@ -2425,7 +2424,7 @@ public:
return getSema().ActOnAddrLabel(AmpAmpLoc, LabelLoc, Label);
}
- /// \brief Build a new GNU statement expression.
+ /// Build a new GNU statement expression.
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
@@ -2435,7 +2434,7 @@ public:
return getSema().ActOnStmtExpr(LParenLoc, SubStmt, RParenLoc);
}
- /// \brief Build a new __builtin_choose_expr expression.
+ /// Build a new __builtin_choose_expr expression.
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
@@ -2447,7 +2446,7 @@ public:
RParenLoc);
}
- /// \brief Build a new generic selection expression.
+ /// Build a new generic selection expression.
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
@@ -2461,7 +2460,7 @@ public:
ControllingExpr, Types, Exprs);
}
- /// \brief Build a new overloaded operator call expression.
+ /// Build a new overloaded operator call expression.
///
/// By default, performs semantic analysis to build the new expression.
/// The semantic analysis provides the behavior of template instantiation,
@@ -2475,7 +2474,7 @@ public:
Expr *First,
Expr *Second);
- /// \brief Build a new C++ "named" cast expression, such as static_cast or
+ /// Build a new C++ "named" cast expression, such as static_cast or
/// reinterpret_cast.
///
/// By default, this routine dispatches to one of the more-specific routines
@@ -2516,7 +2515,7 @@ public:
}
}
- /// \brief Build a new C++ static_cast expression.
+ /// Build a new C++ static_cast expression.
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
@@ -2533,7 +2532,7 @@ public:
SourceRange(LParenLoc, RParenLoc));
}
- /// \brief Build a new C++ dynamic_cast expression.
+ /// Build a new C++ dynamic_cast expression.
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
@@ -2550,7 +2549,7 @@ public:
SourceRange(LParenLoc, RParenLoc));
}
- /// \brief Build a new C++ reinterpret_cast expression.
+ /// Build a new C++ reinterpret_cast expression.
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
@@ -2567,7 +2566,7 @@ public:
SourceRange(LParenLoc, RParenLoc));
}
- /// \brief Build a new C++ const_cast expression.
+ /// Build a new C++ const_cast expression.
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
@@ -2584,20 +2583,21 @@ public:
SourceRange(LParenLoc, RParenLoc));
}
- /// \brief Build a new C++ functional-style cast expression.
+ /// Build a new C++ functional-style cast expression.
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
ExprResult RebuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo,
SourceLocation LParenLoc,
Expr *Sub,
- SourceLocation RParenLoc) {
+ SourceLocation RParenLoc,
+ bool ListInitialization) {
return getSema().BuildCXXTypeConstructExpr(TInfo, LParenLoc,
- MultiExprArg(&Sub, 1),
- RParenLoc);
+ MultiExprArg(&Sub, 1), RParenLoc,
+ ListInitialization);
}
- /// \brief Build a new C++ typeid(type) expression.
+ /// Build a new C++ typeid(type) expression.
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
@@ -2610,7 +2610,7 @@ public:
}
- /// \brief Build a new C++ typeid(expr) expression.
+ /// Build a new C++ typeid(expr) expression.
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
@@ -2622,7 +2622,7 @@ public:
RParenLoc);
}
- /// \brief Build a new C++ __uuidof(type) expression.
+ /// Build a new C++ __uuidof(type) expression.
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
@@ -2634,7 +2634,7 @@ public:
RParenLoc);
}
- /// \brief Build a new C++ __uuidof(expr) expression.
+ /// Build a new C++ __uuidof(expr) expression.
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
@@ -2646,7 +2646,7 @@ public:
RParenLoc);
}
- /// \brief Build a new C++ "this" expression.
+ /// Build a new C++ "this" expression.
///
/// By default, builds a new "this" expression without performing any
/// semantic analysis. Subclasses may override this routine to provide
@@ -2658,7 +2658,7 @@ public:
return new (getSema().Context) CXXThisExpr(ThisLoc, ThisType, isImplicit);
}
- /// \brief Build a new C++ throw expression.
+ /// Build a new C++ throw expression.
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
@@ -2667,7 +2667,7 @@ public:
return getSema().BuildCXXThrow(ThrowLoc, Sub, IsThrownVariableInScope);
}
- /// \brief Build a new C++ default-argument expression.
+ /// Build a new C++ default-argument expression.
///
/// By default, builds a new default-argument expression, which does not
/// require any semantic analysis. Subclasses may override this routine to
@@ -2677,7 +2677,7 @@ public:
return CXXDefaultArgExpr::Create(getSema().Context, Loc, Param);
}
- /// \brief Build a new C++11 default-initialization expression.
+ /// Build a new C++11 default-initialization expression.
///
/// By default, builds a new default field initialization expression, which
/// does not require any semantic analysis. Subclasses may override this
@@ -2687,18 +2687,18 @@ public:
return CXXDefaultInitExpr::Create(getSema().Context, Loc, Field);
}
- /// \brief Build a new C++ zero-initialization expression.
+ /// Build a new C++ zero-initialization expression.
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
ExprResult RebuildCXXScalarValueInitExpr(TypeSourceInfo *TSInfo,
SourceLocation LParenLoc,
SourceLocation RParenLoc) {
- return getSema().BuildCXXTypeConstructExpr(TSInfo, LParenLoc,
- None, RParenLoc);
+ return getSema().BuildCXXTypeConstructExpr(
+ TSInfo, LParenLoc, None, RParenLoc, /*ListInitialization=*/false);
}
- /// \brief Build a new C++ "new" expression.
+ /// Build a new C++ "new" expression.
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
@@ -2725,7 +2725,7 @@ public:
Initializer);
}
- /// \brief Build a new C++ "delete" expression.
+ /// Build a new C++ "delete" expression.
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
@@ -2737,7 +2737,7 @@ public:
Operand);
}
- /// \brief Build a new type trait expression.
+ /// Build a new type trait expression.
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
@@ -2748,7 +2748,7 @@ public:
return getSema().BuildTypeTrait(Trait, StartLoc, Args, RParenLoc);
}
- /// \brief Build a new array type trait expression.
+ /// Build a new array type trait expression.
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
@@ -2760,7 +2760,7 @@ public:
return getSema().BuildArrayTypeTrait(Trait, StartLoc, TSInfo, DimExpr, RParenLoc);
}
- /// \brief Build a new expression trait expression.
+ /// Build a new expression trait expression.
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
@@ -2771,7 +2771,7 @@ public:
return getSema().BuildExpressionTrait(Trait, StartLoc, Queried, RParenLoc);
}
- /// \brief Build a new (previously unresolved) declaration reference
+ /// Build a new (previously unresolved) declaration reference
/// expression.
///
/// By default, performs semantic analysis to build the new expression.
@@ -2794,7 +2794,7 @@ public:
SS, NameInfo, IsAddressOfOperand, /*S*/nullptr, RecoveryTSI);
}
- /// \brief Build a new template-id expression.
+ /// Build a new template-id expression.
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
@@ -2807,7 +2807,7 @@ public:
TemplateArgs);
}
- /// \brief Build a new object-construction expression.
+ /// Build a new object-construction expression.
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
@@ -2837,7 +2837,7 @@ public:
ParenRange);
}
- /// \brief Build a new implicit construction via inherited constructor
+ /// Build a new implicit construction via inherited constructor
/// expression.
ExprResult RebuildCXXInheritedCtorInitExpr(QualType T, SourceLocation Loc,
CXXConstructorDecl *Constructor,
@@ -2847,35 +2847,33 @@ public:
Loc, T, Constructor, ConstructsVBase, InheritedFromVBase);
}
- /// \brief Build a new object-construction expression.
+ /// Build a new object-construction expression.
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
ExprResult RebuildCXXTemporaryObjectExpr(TypeSourceInfo *TSInfo,
- SourceLocation LParenLoc,
+ SourceLocation LParenOrBraceLoc,
MultiExprArg Args,
- SourceLocation RParenLoc) {
- return getSema().BuildCXXTypeConstructExpr(TSInfo,
- LParenLoc,
- Args,
- RParenLoc);
+ SourceLocation RParenOrBraceLoc,
+ bool ListInitialization) {
+ return getSema().BuildCXXTypeConstructExpr(
+ TSInfo, LParenOrBraceLoc, Args, RParenOrBraceLoc, ListInitialization);
}
- /// \brief Build a new object-construction expression.
+ /// Build a new object-construction expression.
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
ExprResult RebuildCXXUnresolvedConstructExpr(TypeSourceInfo *TSInfo,
SourceLocation LParenLoc,
MultiExprArg Args,
- SourceLocation RParenLoc) {
- return getSema().BuildCXXTypeConstructExpr(TSInfo,
- LParenLoc,
- Args,
- RParenLoc);
+ SourceLocation RParenLoc,
+ bool ListInitialization) {
+ return getSema().BuildCXXTypeConstructExpr(TSInfo, LParenLoc, Args,
+ RParenLoc, ListInitialization);
}
- /// \brief Build a new member reference expression.
+ /// Build a new member reference expression.
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
@@ -2899,7 +2897,7 @@ public:
TemplateArgs, /*S*/nullptr);
}
- /// \brief Build a new member reference expression.
+ /// Build a new member reference expression.
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
@@ -2921,7 +2919,7 @@ public:
R, TemplateArgs, /*S*/nullptr);
}
- /// \brief Build a new noexcept expression.
+ /// Build a new noexcept expression.
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
@@ -2929,7 +2927,7 @@ public:
return SemaRef.BuildCXXNoexceptExpr(Range.getBegin(), Arg, Range.getEnd());
}
- /// \brief Build a new expression to compute the length of a parameter pack.
+ /// Build a new expression to compute the length of a parameter pack.
ExprResult RebuildSizeOfPackExpr(SourceLocation OperatorLoc,
NamedDecl *Pack,
SourceLocation PackLoc,
@@ -2940,7 +2938,7 @@ public:
RParenLoc, Length, PartialArgs);
}
- /// \brief Build a new Objective-C boxed expression.
+ /// Build a new Objective-C boxed expression.
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
@@ -2948,7 +2946,7 @@ public:
return getSema().BuildObjCBoxedExpr(SR, ValueExpr);
}
- /// \brief Build a new Objective-C array literal.
+ /// Build a new Objective-C array literal.
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
@@ -2966,7 +2964,7 @@ public:
getterMethod, setterMethod);
}
- /// \brief Build a new Objective-C dictionary literal.
+ /// Build a new Objective-C dictionary literal.
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
@@ -2975,7 +2973,7 @@ public:
return getSema().BuildObjCDictionaryLiteral(Range, Elements);
}
- /// \brief Build a new Objective-C \@encode expression.
+ /// Build a new Objective-C \@encode expression.
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
@@ -2985,7 +2983,7 @@ public:
return SemaRef.BuildObjCEncodeExpression(AtLoc, EncodeTypeInfo, RParenLoc);
}
- /// \brief Build a new Objective-C class message.
+ /// Build a new Objective-C class message.
ExprResult RebuildObjCMessageExpr(TypeSourceInfo *ReceiverTypeInfo,
Selector Sel,
ArrayRef<SourceLocation> SelectorLocs,
@@ -3000,7 +2998,7 @@ public:
RBracLoc, Args);
}
- /// \brief Build a new Objective-C instance message.
+ /// Build a new Objective-C instance message.
ExprResult RebuildObjCMessageExpr(Expr *Receiver,
Selector Sel,
ArrayRef<SourceLocation> SelectorLocs,
@@ -3015,7 +3013,7 @@ public:
RBracLoc, Args);
}
- /// \brief Build a new Objective-C instance/class message to 'super'.
+ /// Build a new Objective-C instance/class message to 'super'.
ExprResult RebuildObjCMessageExpr(SourceLocation SuperLoc,
Selector Sel,
ArrayRef<SourceLocation> SelectorLocs,
@@ -3038,7 +3036,7 @@ public:
}
- /// \brief Build a new Objective-C ivar reference expression.
+ /// Build a new Objective-C ivar reference expression.
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
@@ -3058,7 +3056,7 @@ public:
return Result;
}
- /// \brief Build a new Objective-C property reference expression.
+ /// Build a new Objective-C property reference expression.
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
@@ -3077,7 +3075,7 @@ public:
/*S=*/nullptr);
}
- /// \brief Build a new Objective-C property reference expression.
+ /// Build a new Objective-C property reference expression.
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
@@ -3093,7 +3091,7 @@ public:
PropertyLoc, Base));
}
- /// \brief Build a new Objective-C "isa" expression.
+ /// Build a new Objective-C "isa" expression.
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
@@ -3110,7 +3108,7 @@ public:
/*S=*/nullptr);
}
- /// \brief Build a new shuffle vector expression.
+ /// Build a new shuffle vector expression.
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
@@ -3142,7 +3140,7 @@ public:
return SemaRef.SemaBuiltinShuffleVector(cast<CallExpr>(TheCall.get()));
}
- /// \brief Build a new convert vector expression.
+ /// Build a new convert vector expression.
ExprResult RebuildConvertVectorExpr(SourceLocation BuiltinLoc,
Expr *SrcExpr, TypeSourceInfo *DstTInfo,
SourceLocation RParenLoc) {
@@ -3150,7 +3148,7 @@ public:
BuiltinLoc, RParenLoc);
}
- /// \brief Build a new template argument pack expansion.
+ /// Build a new template argument pack expansion.
///
/// By default, performs semantic analysis to build a new pack expansion
/// for a template argument. Subclasses may override this routine to provide
@@ -3198,7 +3196,7 @@ public:
return TemplateArgumentLoc();
}
- /// \brief Build a new expression pack expansion.
+ /// Build a new expression pack expansion.
///
/// By default, performs semantic analysis to build a new pack expansion
/// for an expression. Subclasses may override this routine to provide
@@ -3208,7 +3206,7 @@ public:
return getSema().CheckPackExpansion(Pattern, EllipsisLoc, NumExpansions);
}
- /// \brief Build a new C++1z fold-expression.
+ /// Build a new C++1z fold-expression.
///
/// By default, performs semantic analysis in order to build a new fold
/// expression.
@@ -3220,7 +3218,7 @@ public:
RHS, RParenLoc);
}
- /// \brief Build an empty C++1z fold-expression with the given operator.
+ /// Build an empty C++1z fold-expression with the given operator.
///
/// By default, produces the fallback value for the fold-expression, or
/// produce an error if there is no fallback value.
@@ -3229,7 +3227,7 @@ public:
return getSema().BuildEmptyCXXFoldExpr(EllipsisLoc, Operator);
}
- /// \brief Build a new atomic operation expression.
+ /// Build a new atomic operation expression.
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
@@ -3394,11 +3392,10 @@ ExprResult TreeTransform<Derived>::TransformInitializer(Expr *Init,
/*IsCall*/true, NewArgs, &ArgChanged))
return ExprError();
- // If this was list initialization, revert to list form.
+ // If this was list initialization, revert to syntactic list form.
if (Construct->isListInitialization())
return getDerived().RebuildInitList(Construct->getLocStart(), NewArgs,
- Construct->getLocEnd(),
- Construct->getType());
+ Construct->getLocEnd());
// Build a ParenListExpr to represent anything else.
SourceRange Parens = Construct->getParenOrBraceRange();
@@ -3765,8 +3762,12 @@ TreeTransform<Derived>::TransformTemplateName(CXXScopeSpec &SS,
ObjectType.isNull())
return Name;
+ // FIXME: Preserve the location of the "template" keyword.
+ SourceLocation TemplateKWLoc = NameLoc;
+
if (DTN->isIdentifier()) {
return getDerived().RebuildTemplateName(SS,
+ TemplateKWLoc,
*DTN->getIdentifier(),
NameLoc,
ObjectType,
@@ -3774,7 +3775,8 @@ TreeTransform<Derived>::TransformTemplateName(CXXScopeSpec &SS,
AllowInjectedClassName);
}
- return getDerived().RebuildTemplateName(SS, DTN->getOperator(), NameLoc,
+ return getDerived().RebuildTemplateName(SS, TemplateKWLoc,
+ DTN->getOperator(), NameLoc,
ObjectType, AllowInjectedClassName);
}
@@ -3866,6 +3868,10 @@ template<typename Derived>
bool TreeTransform<Derived>::TransformTemplateArgument(
const TemplateArgumentLoc &Input,
TemplateArgumentLoc &Output, bool Uneval) {
+ EnterExpressionEvaluationContext EEEC(
+ SemaRef, Sema::ExpressionEvaluationContext::ConstantEvaluated,
+ /*LambdaContextDecl=*/nullptr, /*ExprContext=*/
+ Sema::ExpressionEvaluationContextRecord::EK_TemplateArgument);
const TemplateArgument &Arg = Input.getArgument();
switch (Arg.getKind()) {
case TemplateArgument::Null:
@@ -3933,7 +3939,7 @@ bool TreeTransform<Derived>::TransformTemplateArgument(
return true;
}
-/// \brief Iterator adaptor that invents template argument location information
+/// Iterator adaptor that invents template argument location information
/// for each of the template arguments in its underlying iterator.
template<typename Derived, typename InputIterator>
class TemplateArgumentLocInventIterator {
@@ -4353,6 +4359,7 @@ TypeSourceInfo *TreeTransform<Derived>::TransformTSIInObjectScope(
TemplateName Template
= getDerived().RebuildTemplateName(SS,
+ SpecTL.getTemplateKeywordLoc(),
*SpecTL.getTypePtr()->getIdentifier(),
SpecTL.getTemplateNameLoc(),
ObjectType, UnqualLookup,
@@ -4752,6 +4759,44 @@ TreeTransform<Derived>::TransformDependentSizedArrayType(TypeLocBuilder &TLB,
return Result;
}
+template <typename Derived>
+QualType TreeTransform<Derived>::TransformDependentVectorType(
+ TypeLocBuilder &TLB, DependentVectorTypeLoc TL) {
+ const DependentVectorType *T = TL.getTypePtr();
+ QualType ElementType = getDerived().TransformType(T->getElementType());
+ if (ElementType.isNull())
+ return QualType();
+
+ EnterExpressionEvaluationContext Unevaluated(
+ SemaRef, Sema::ExpressionEvaluationContext::ConstantEvaluated);
+
+ ExprResult Size = getDerived().TransformExpr(T->getSizeExpr());
+ Size = SemaRef.ActOnConstantExpression(Size);
+ if (Size.isInvalid())
+ return QualType();
+
+ QualType Result = TL.getType();
+ if (getDerived().AlwaysRebuild() || ElementType != T->getElementType() ||
+ Size.get() != T->getSizeExpr()) {
+ Result = getDerived().RebuildDependentVectorType(
+ ElementType, Size.get(), T->getAttributeLoc(), T->getVectorKind());
+ if (Result.isNull())
+ return QualType();
+ }
+
+ // Result might be dependent or not.
+ if (isa<DependentVectorType>(Result)) {
+ DependentVectorTypeLoc NewTL =
+ TLB.push<DependentVectorTypeLoc>(Result);
+ NewTL.setNameLoc(TL.getNameLoc());
+ } else {
+ VectorTypeLoc NewTL = TLB.push<VectorTypeLoc>(Result);
+ NewTL.setNameLoc(TL.getNameLoc());
+ }
+
+ return Result;
+}
+
template<typename Derived>
QualType TreeTransform<Derived>::TransformDependentSizedExtVectorType(
TypeLocBuilder &TLB,
@@ -5277,30 +5322,23 @@ bool TreeTransform<Derived>::TransformExceptionSpec(
assert(ESI.Type != EST_Uninstantiated && ESI.Type != EST_Unevaluated);
// Instantiate a dynamic noexcept expression, if any.
- if (ESI.Type == EST_ComputedNoexcept) {
+ if (isComputedNoexcept(ESI.Type)) {
EnterExpressionEvaluationContext Unevaluated(
getSema(), Sema::ExpressionEvaluationContext::ConstantEvaluated);
ExprResult NoexceptExpr = getDerived().TransformExpr(ESI.NoexceptExpr);
if (NoexceptExpr.isInvalid())
return true;
- // FIXME: This is bogus, a noexcept expression is not a condition.
- NoexceptExpr = getSema().CheckBooleanCondition(Loc, NoexceptExpr.get());
+ ExceptionSpecificationType EST = ESI.Type;
+ NoexceptExpr =
+ getSema().ActOnNoexceptSpec(Loc, NoexceptExpr.get(), EST);
if (NoexceptExpr.isInvalid())
return true;
- if (!NoexceptExpr.get()->isValueDependent()) {
- NoexceptExpr = getSema().VerifyIntegerConstantExpression(
- NoexceptExpr.get(), nullptr,
- diag::err_noexcept_needs_constant_expression,
- /*AllowFold*/false);
- if (NoexceptExpr.isInvalid())
- return true;
- }
-
- if (ESI.NoexceptExpr != NoexceptExpr.get())
+ if (ESI.NoexceptExpr != NoexceptExpr.get() || EST != ESI.Type)
Changed = true;
ESI.NoexceptExpr = NoexceptExpr.get();
+ ESI.Type = EST;
}
if (ESI.Type != EST_Dynamic)
@@ -5507,7 +5545,7 @@ QualType TreeTransform<Derived>::TransformDecltypeType(TypeLocBuilder &TLB,
// decltype expressions are not potentially evaluated contexts
EnterExpressionEvaluationContext Unevaluated(
SemaRef, Sema::ExpressionEvaluationContext::Unevaluated, nullptr,
- /*IsDecltype=*/true);
+ Sema::ExpressionEvaluationContextRecord::EK_Decltype);
ExprResult E = getDerived().TransformExpr(T->getUnderlyingExpr());
if (E.isInvalid())
@@ -5778,7 +5816,7 @@ QualType TreeTransform<Derived>::TransformPipeType(TypeLocBuilder &TLB,
return Result;
}
- /// \brief Simple iterator that traverses the template arguments in a
+ /// Simple iterator that traverses the template arguments in a
/// container that provides a \c getArgLoc() member function.
///
/// This iterator is intended to be used with the iterator form of
@@ -6158,8 +6196,8 @@ TransformDependentTemplateSpecializationType(TypeLocBuilder &TLB,
return QualType();
QualType Result = getDerived().RebuildDependentTemplateSpecializationType(
- T->getKeyword(), QualifierLoc, T->getIdentifier(),
- TL.getTemplateNameLoc(), NewTemplateArgs,
+ T->getKeyword(), QualifierLoc, TL.getTemplateKeywordLoc(),
+ T->getIdentifier(), TL.getTemplateNameLoc(), NewTemplateArgs,
/*AllowInjectedClassName*/ false);
if (Result.isNull())
return QualType();
@@ -6487,13 +6525,13 @@ TreeTransform<Derived>::TransformCaseStmt(CaseStmt *S) {
// Transform the left-hand case value.
LHS = getDerived().TransformExpr(S->getLHS());
- LHS = SemaRef.ActOnConstantExpression(LHS);
+ LHS = SemaRef.ActOnCaseExpr(S->getCaseLoc(), LHS);
if (LHS.isInvalid())
return StmtError();
// Transform the right-hand case value (for the GNU case-range extension).
RHS = getDerived().TransformExpr(S->getRHS());
- RHS = SemaRef.ActOnConstantExpression(RHS);
+ RHS = SemaRef.ActOnCaseExpr(S->getCaseLoc(), RHS);
if (RHS.isInvalid())
return StmtError();
}
@@ -6956,6 +6994,8 @@ TreeTransform<Derived>::TransformCoroutineBodyStmt(CoroutineBodyStmt *S) {
// The new CoroutinePromise object needs to be built and put into the current
// FunctionScopeInfo before any transformations or rebuilding occurs.
+ if (!SemaRef.buildCoroutineParameterMoves(FD->getLocation()))
+ return StmtError();
auto *Promise = SemaRef.buildCoroutinePromise(FD->getLocation());
if (!Promise)
return StmtError();
@@ -7046,8 +7086,6 @@ TreeTransform<Derived>::TransformCoroutineBodyStmt(CoroutineBodyStmt *S) {
Builder.ReturnStmt = Res.get();
}
}
- if (!Builder.buildParameterMoves())
- return StmtError();
return getDerived().RebuildCoroutineBodyStmt(Builder);
}
@@ -7642,11 +7680,7 @@ StmtResult TreeTransform<Derived>::TransformOMPExecutableDirective(
StmtResult Body;
{
Sema::CompoundScopeRAII CompoundScope(getSema());
- int ThisCaptureLevel =
- Sema::getOpenMPCaptureLevels(D->getDirectiveKind());
- Stmt *CS = D->getAssociatedStmt();
- while (--ThisCaptureLevel >= 0)
- CS = cast<CapturedStmt>(CS)->getCapturedStmt();
+ Stmt *CS = D->getInnermostCapturedStmt()->getCapturedStmt();
Body = getDerived().TransformStmt(CS);
}
AssociatedStmt =
@@ -8905,6 +8939,12 @@ TreeTransform<Derived>::TransformIntegerLiteral(IntegerLiteral *E) {
return E;
}
+template <typename Derived>
+ExprResult TreeTransform<Derived>::TransformFixedPointLiteral(
+ FixedPointLiteral *E) {
+ return E;
+}
+
template<typename Derived>
ExprResult
TreeTransform<Derived>::TransformFloatingLiteral(FloatingLiteral *E) {
@@ -8986,7 +9026,7 @@ TreeTransform<Derived>::TransformParenExpr(ParenExpr *E) {
E->getRParen());
}
-/// \brief The operand of a unary address-of operator has special rules: it's
+/// The operand of a unary address-of operator has special rules: it's
/// allowed to refer to a non-static member of a class even if there's no 'this'
/// object available.
template<typename Derived>
@@ -9517,7 +9557,7 @@ TreeTransform<Derived>::TransformInitListExpr(InitListExpr *E) {
}
return getDerived().RebuildInitList(E->getLBraceLoc(), Inits,
- E->getRBraceLoc(), E->getType());
+ E->getRBraceLoc());
}
template<typename Derived>
@@ -9685,7 +9725,7 @@ TreeTransform<Derived>::TransformParenListExpr(ParenListExpr *E) {
E->getRParenLoc());
}
-/// \brief Transform an address-of-label expression.
+/// Transform an address-of-label expression.
///
/// By default, the transformation of an address-of-label expression always
/// rebuilds the expression, so that the label identifier can be resolved to
@@ -9951,7 +9991,8 @@ TreeTransform<Derived>::TransformCXXFunctionalCastExpr(
return getDerived().RebuildCXXFunctionalCastExpr(Type,
E->getLParenLoc(),
SubExpr.get(),
- E->getRParenLoc());
+ E->getRParenLoc(),
+ E->isListInitialization());
}
template<typename Derived>
@@ -10403,7 +10444,7 @@ bool TreeTransform<Derived>::TransformOverloadExprDecls(OverloadExpr *Old,
// the corresponding pack is empty
if (AllEmptyPacks && !RequiresADL) {
getSema().Diag(Old->getNameLoc(), diag::err_using_pack_expansion_empty)
- << isa<UnresolvedMemberExpr>(Old) << Old->getNameInfo().getName();
+ << isa<UnresolvedMemberExpr>(Old) << Old->getName();
return true;
}
@@ -10814,7 +10855,7 @@ ExprResult TreeTransform<Derived>::TransformCXXInheritedCtorInitExpr(
E->constructsVBase(), E->inheritedFromVBase());
}
-/// \brief Transform a C++ temporary-binding expression.
+/// Transform a C++ temporary-binding expression.
///
/// Since CXXBindTemporaryExpr nodes are implicitly generated, we just
/// transform the subexpression and return that.
@@ -10824,7 +10865,7 @@ TreeTransform<Derived>::TransformCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
return getDerived().TransformExpr(E->getSubExpr());
}
-/// \brief Transform a C++ expression that contains cleanups that should
+/// Transform a C++ expression that contains cleanups that should
/// be run after the expression is evaluated.
///
/// Since ExprWithCleanups nodes are implicitly generated, we
@@ -10867,11 +10908,12 @@ TreeTransform<Derived>::TransformCXXTemporaryObjectExpr(
return SemaRef.MaybeBindToTemporary(E);
}
- // FIXME: Pass in E->isListInitialization().
- return getDerived().RebuildCXXTemporaryObjectExpr(T,
- /*FIXME:*/T->getTypeLoc().getEndLoc(),
- Args,
- E->getLocEnd());
+ // FIXME: We should just pass E->isListInitialization(), but we're not
+ // prepared to handle list-initialization without a child InitListExpr.
+ SourceLocation LParenLoc = T->getTypeLoc().getEndLoc();
+ return getDerived().RebuildCXXTemporaryObjectExpr(
+ T, LParenLoc, Args, E->getLocEnd(),
+ /*ListInitialization=*/LParenLoc.isInvalid());
}
template<typename Derived>
@@ -11157,10 +11199,8 @@ TreeTransform<Derived>::TransformCXXUnresolvedConstructExpr(
return E;
// FIXME: we're faking the locations of the commas
- return getDerived().RebuildCXXUnresolvedConstructExpr(T,
- E->getLParenLoc(),
- Args,
- E->getRParenLoc());
+ return getDerived().RebuildCXXUnresolvedConstructExpr(
+ T, E->getLParenLoc(), Args, E->getRParenLoc(), E->isListInitialization());
}
template<typename Derived>
@@ -11408,8 +11448,10 @@ TreeTransform<Derived>::TransformSizeOfPackExpr(SizeOfPackExpr *E) {
ArgStorage = TemplateArgument(TemplateName(TTPD), None);
} else {
auto *VD = cast<ValueDecl>(Pack);
- ExprResult DRE = getSema().BuildDeclRefExpr(VD, VD->getType(),
- VK_RValue, E->getPackLoc());
+ ExprResult DRE = getSema().BuildDeclRefExpr(
+ VD, VD->getType().getNonLValueExprType(getSema().Context),
+ VD->getType()->isReferenceType() ? VK_LValue : VK_RValue,
+ E->getPackLoc());
if (DRE.isInvalid())
return ExprError();
ArgStorage = new (getSema().Context) PackExpansionExpr(
@@ -12386,6 +12428,13 @@ TreeTransform<Derived>::RebuildVectorType(QualType ElementType,
return SemaRef.Context.getVectorType(ElementType, NumElements, VecKind);
}
+template <typename Derived>
+QualType TreeTransform<Derived>::RebuildDependentVectorType(
+ QualType ElementType, Expr *SizeExpr, SourceLocation AttributeLoc,
+ VectorType::VectorKind VecKind) {
+ return SemaRef.BuildVectorType(ElementType, SizeExpr, AttributeLoc);
+}
+
template<typename Derived>
QualType TreeTransform<Derived>::RebuildExtVectorType(QualType ElementType,
unsigned NumElements,
@@ -12532,6 +12581,7 @@ TreeTransform<Derived>::RebuildTemplateName(CXXScopeSpec &SS,
template<typename Derived>
TemplateName
TreeTransform<Derived>::RebuildTemplateName(CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
const IdentifierInfo &Name,
SourceLocation NameLoc,
QualType ObjectType,
@@ -12540,7 +12590,6 @@ TreeTransform<Derived>::RebuildTemplateName(CXXScopeSpec &SS,
UnqualifiedId TemplateName;
TemplateName.setIdentifier(&Name, NameLoc);
Sema::TemplateTy Template;
- SourceLocation TemplateKWLoc; // FIXME: retrieve it from caller.
getSema().ActOnDependentTemplateName(/*Scope=*/nullptr,
SS, TemplateKWLoc, TemplateName,
ParsedType::make(ObjectType),
@@ -12552,6 +12601,7 @@ TreeTransform<Derived>::RebuildTemplateName(CXXScopeSpec &SS,
template<typename Derived>
TemplateName
TreeTransform<Derived>::RebuildTemplateName(CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
OverloadedOperatorKind Operator,
SourceLocation NameLoc,
QualType ObjectType,
@@ -12560,7 +12610,6 @@ TreeTransform<Derived>::RebuildTemplateName(CXXScopeSpec &SS,
// FIXME: Bogus location information.
SourceLocation SymbolLocations[3] = { NameLoc, NameLoc, NameLoc };
Name.setOperatorFunctionId(NameLoc, Operator, SymbolLocations);
- SourceLocation TemplateKWLoc; // FIXME: retrieve it from caller.
Sema::TemplateTy Template;
getSema().ActOnDependentTemplateName(/*Scope=*/nullptr,
SS, TemplateKWLoc, Name,
@@ -12609,9 +12658,11 @@ TreeTransform<Derived>::RebuildCXXOperatorCallExpr(OverloadedOperatorKind Op,
// -> is never a builtin operation.
return SemaRef.BuildOverloadedArrowExpr(nullptr, First, OpLoc);
} else if (Second == nullptr || isPostIncDec) {
- if (!First->getType()->isOverloadableType()) {
- // The argument is not of overloadable type, so try to create a
- // built-in unary operation.
+ if (!First->getType()->isOverloadableType() ||
+ (Op == OO_Amp && getSema().isQualifiedMemberAccess(First))) {
+ // The argument is not of overloadable type, or this is an expression
+ // of the form &Class::member, so try to create a built-in unary
+ // operation.
UnaryOperatorKind Opc
= UnaryOperator::getOverloadedOpcode(Op, isPostIncDec);
diff --git a/lib/Sema/TypeLocBuilder.h b/lib/Sema/TypeLocBuilder.h
index 9c77045d2e12..a088fe9de667 100644
--- a/lib/Sema/TypeLocBuilder.h
+++ b/lib/Sema/TypeLocBuilder.h
@@ -83,7 +83,7 @@ class TypeLocBuilder {
NumBytesAtAlign4 = NumBytesAtAlign8 = 0;
}
- /// \brief Tell the TypeLocBuilder that the type it is storing has been
+ /// Tell the TypeLocBuilder that the type it is storing has been
/// modified in some safe way that doesn't affect type-location information.
void TypeWasModifiedSafely(QualType T) {
#ifndef NDEBUG
@@ -112,7 +112,7 @@ class TypeLocBuilder {
return DI;
}
- /// \brief Copies the type-location information to the given AST context and
+ /// Copies the type-location information to the given AST context and
/// returns a \c TypeLoc referring into the AST context.
TypeLoc getTypeLocInContext(ASTContext &Context, QualType T) {
#ifndef NDEBUG
@@ -132,7 +132,7 @@ private:
/// Grow to the given capacity.
void grow(size_t NewCapacity);
- /// \brief Retrieve a temporary TypeLoc that refers into this \c TypeLocBuilder
+ /// Retrieve a temporary TypeLoc that refers into this \c TypeLocBuilder
/// object.
///
/// The resulting \c TypeLoc should only be used so long as the
diff --git a/lib/Serialization/ASTCommon.cpp b/lib/Serialization/ASTCommon.cpp
index 9c6f03cd0bb7..da482717f450 100644
--- a/lib/Serialization/ASTCommon.cpp
+++ b/lib/Serialization/ASTCommon.cpp
@@ -16,7 +16,7 @@
#include "clang/AST/DeclObjC.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Serialization/ASTDeserializationListener.h"
-#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/DJB.h"
using namespace clang;
@@ -91,6 +91,78 @@ serialization::TypeIdxFromBuiltin(const BuiltinType *BT) {
case BuiltinType::LongDouble:
ID = PREDEF_TYPE_LONGDOUBLE_ID;
break;
+ case BuiltinType::ShortAccum:
+ ID = PREDEF_TYPE_SHORT_ACCUM_ID;
+ break;
+ case BuiltinType::Accum:
+ ID = PREDEF_TYPE_ACCUM_ID;
+ break;
+ case BuiltinType::LongAccum:
+ ID = PREDEF_TYPE_LONG_ACCUM_ID;
+ break;
+ case BuiltinType::UShortAccum:
+ ID = PREDEF_TYPE_USHORT_ACCUM_ID;
+ break;
+ case BuiltinType::UAccum:
+ ID = PREDEF_TYPE_UACCUM_ID;
+ break;
+ case BuiltinType::ULongAccum:
+ ID = PREDEF_TYPE_ULONG_ACCUM_ID;
+ break;
+ case BuiltinType::ShortFract:
+ ID = PREDEF_TYPE_SHORT_FRACT_ID;
+ break;
+ case BuiltinType::Fract:
+ ID = PREDEF_TYPE_FRACT_ID;
+ break;
+ case BuiltinType::LongFract:
+ ID = PREDEF_TYPE_LONG_FRACT_ID;
+ break;
+ case BuiltinType::UShortFract:
+ ID = PREDEF_TYPE_USHORT_FRACT_ID;
+ break;
+ case BuiltinType::UFract:
+ ID = PREDEF_TYPE_UFRACT_ID;
+ break;
+ case BuiltinType::ULongFract:
+ ID = PREDEF_TYPE_ULONG_FRACT_ID;
+ break;
+ case BuiltinType::SatShortAccum:
+ ID = PREDEF_TYPE_SAT_SHORT_ACCUM_ID;
+ break;
+ case BuiltinType::SatAccum:
+ ID = PREDEF_TYPE_SAT_ACCUM_ID;
+ break;
+ case BuiltinType::SatLongAccum:
+ ID = PREDEF_TYPE_SAT_LONG_ACCUM_ID;
+ break;
+ case BuiltinType::SatUShortAccum:
+ ID = PREDEF_TYPE_SAT_USHORT_ACCUM_ID;
+ break;
+ case BuiltinType::SatUAccum:
+ ID = PREDEF_TYPE_SAT_UACCUM_ID;
+ break;
+ case BuiltinType::SatULongAccum:
+ ID = PREDEF_TYPE_SAT_ULONG_ACCUM_ID;
+ break;
+ case BuiltinType::SatShortFract:
+ ID = PREDEF_TYPE_SAT_SHORT_FRACT_ID;
+ break;
+ case BuiltinType::SatFract:
+ ID = PREDEF_TYPE_SAT_FRACT_ID;
+ break;
+ case BuiltinType::SatLongFract:
+ ID = PREDEF_TYPE_SAT_LONG_FRACT_ID;
+ break;
+ case BuiltinType::SatUShortFract:
+ ID = PREDEF_TYPE_SAT_USHORT_FRACT_ID;
+ break;
+ case BuiltinType::SatUFract:
+ ID = PREDEF_TYPE_SAT_UFRACT_ID;
+ break;
+ case BuiltinType::SatULongFract:
+ ID = PREDEF_TYPE_SAT_ULONG_FRACT_ID;
+ break;
case BuiltinType::Float16:
ID = PREDEF_TYPE_FLOAT16_ID;
break;
@@ -100,6 +172,9 @@ serialization::TypeIdxFromBuiltin(const BuiltinType *BT) {
case BuiltinType::NullPtr:
ID = PREDEF_TYPE_NULLPTR_ID;
break;
+ case BuiltinType::Char8:
+ ID = PREDEF_TYPE_CHAR8_ID;
+ break;
case BuiltinType::Char16:
ID = PREDEF_TYPE_CHAR16_ID;
break;
@@ -171,7 +246,7 @@ unsigned serialization::ComputeHash(Selector Sel) {
unsigned R = 5381;
for (unsigned I = 0; I != N; ++I)
if (IdentifierInfo *II = Sel.getIdentifierInfoForSlot(I))
- R = llvm::HashString(II->getName(), R);
+ R = llvm::djbHash(II->getName(), R);
return R;
}
@@ -231,7 +306,7 @@ serialization::getDefinitiveDeclContext(const DeclContext *DC) {
default:
llvm_unreachable("Unhandled DeclContext in AST reader");
}
-
+
llvm_unreachable("Unhandled decl kind");
}
@@ -344,9 +419,21 @@ bool serialization::needsAnonymousDeclarationNumber(const NamedDecl *D) {
return true;
}
- // Otherwise, we only care about anonymous class members.
+ // At block scope, we number everything that we need to deduplicate, since we
+ // can't just use name matching to keep things lined up.
+ // FIXME: This is only necessary for an inline function or a template or
+ // similar.
+ if (D->getLexicalDeclContext()->isFunctionOrMethod()) {
+ if (auto *VD = dyn_cast<VarDecl>(D))
+ return VD->isStaticLocal();
+ // FIXME: What about CapturedDecls (and declarations nested within them)?
+ return isa<TagDecl>(D) || isa<BlockDecl>(D);
+ }
+
+ // Otherwise, we only care about anonymous class members / block-scope decls.
+ // FIXME: We need to handle lambdas and blocks within inline / templated
+ // variables too.
if (D->getDeclName() || !isa<CXXRecordDecl>(D->getLexicalDeclContext()))
return false;
return isa<TagDecl>(D) || isa<FieldDecl>(D);
}
-
diff --git a/lib/Serialization/ASTCommon.h b/lib/Serialization/ASTCommon.h
index 6aca453bbb89..12e26c1fc2b9 100644
--- a/lib/Serialization/ASTCommon.h
+++ b/lib/Serialization/ASTCommon.h
@@ -72,7 +72,7 @@ TypeID MakeTypeID(ASTContext &Context, QualType T, IdxForTypeTy IdxForType) {
unsigned ComputeHash(Selector Sel);
-/// \brief Retrieve the "definitive" declaration that provides all of the
+/// Retrieve the "definitive" declaration that provides all of the
/// visible entries for the given declaration context, if there is one.
///
/// The "definitive" declaration is the only place where we need to look to
@@ -84,14 +84,14 @@ unsigned ComputeHash(Selector Sel);
/// multiple definitions.
const DeclContext *getDefinitiveDeclContext(const DeclContext *DC);
-/// \brief Determine whether the given declaration kind is redeclarable.
+/// Determine whether the given declaration kind is redeclarable.
bool isRedeclarableDeclKind(unsigned Kind);
-/// \brief Determine whether the given declaration needs an anonymous
+/// Determine whether the given declaration needs an anonymous
/// declaration number.
bool needsAnonymousDeclarationNumber(const NamedDecl *D);
-/// \brief Visit each declaration within \c DC that needs an anonymous
+/// Visit each declaration within \c DC that needs an anonymous
/// declaration number and call \p Visit with the declaration and its number.
template<typename Fn> void numberAnonymousDeclsWithin(const DeclContext *DC,
Fn Visit) {
diff --git a/lib/Serialization/ASTReader.cpp b/lib/Serialization/ASTReader.cpp
index 4ed822e04f6c..9a3b9e1da39a 100644
--- a/lib/Serialization/ASTReader.cpp
+++ b/lib/Serialization/ASTReader.cpp
@@ -61,7 +61,6 @@
#include "clang/Basic/TargetOptions.h"
#include "clang/Basic/TokenKinds.h"
#include "clang/Basic/Version.h"
-#include "clang/Basic/VersionTuple.h"
#include "clang/Frontend/PCHContainerOperations.h"
#include "clang/Lex/HeaderSearch.h"
#include "clang/Lex/HeaderSearchOptions.h"
@@ -104,8 +103,9 @@
#include "llvm/ADT/iterator_range.h"
#include "llvm/Bitcode/BitstreamReader.h"
#include "llvm/Support/Casting.h"
-#include "llvm/Support/Compression.h"
#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Compression.h"
+#include "llvm/Support/DJB.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/ErrorHandling.h"
@@ -114,6 +114,7 @@
#include "llvm/Support/Path.h"
#include "llvm/Support/SaveAndRestore.h"
#include "llvm/Support/Timer.h"
+#include "llvm/Support/VersionTuple.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <cassert>
@@ -255,7 +256,7 @@ void ChainedASTReaderListener::readModuleFileExtension(
ASTReaderListener::~ASTReaderListener() = default;
-/// \brief Compare the given set of language options against an existing set of
+/// Compare the given set of language options against an existing set of
/// language options.
///
/// \param Diags If non-NULL, diagnostics will be emitted via this engine.
@@ -359,7 +360,7 @@ static bool checkLanguageOptions(const LangOptions &LangOpts,
return false;
}
-/// \brief Compare the given set of target options against an existing set of
+/// Compare the given set of target options against an existing set of
/// target options.
///
/// \param Diags If non-NULL, diagnostics will be emitted via this engine.
@@ -395,8 +396,8 @@ static bool checkTargetOptions(const TargetOptions &TargetOpts,
ExistingTargetOpts.FeaturesAsWritten.end());
SmallVector<StringRef, 4> ReadFeatures(TargetOpts.FeaturesAsWritten.begin(),
TargetOpts.FeaturesAsWritten.end());
- std::sort(ExistingFeatures.begin(), ExistingFeatures.end());
- std::sort(ReadFeatures.begin(), ReadFeatures.end());
+ llvm::sort(ExistingFeatures.begin(), ExistingFeatures.end());
+ llvm::sort(ReadFeatures.begin(), ReadFeatures.end());
// We compute the set difference in both directions explicitly so that we can
// diagnose the differences differently.
@@ -574,7 +575,7 @@ bool PCHValidator::ReadDiagnosticOptions(
Complain);
}
-/// \brief Collect the macro definitions provided by the given preprocessor
+/// Collect the macro definitions provided by the given preprocessor
/// options.
static void
collectMacroDefinitions(const PreprocessorOptions &PPOpts,
@@ -612,7 +613,7 @@ collectMacroDefinitions(const PreprocessorOptions &PPOpts,
}
}
-/// \brief Check the preprocessor options deserialized from the control block
+/// Check the preprocessor options deserialized from the control block
/// against the preprocessor options in an existing preprocessor.
///
/// \param Diags If non-null, produce diagnostics for any mismatches incurred.
@@ -703,6 +704,17 @@ static bool checkPreprocessorOptions(const PreprocessorOptions &PPOpts,
// Compute the #include and #include_macros lines we need.
for (unsigned I = 0, N = ExistingPPOpts.Includes.size(); I != N; ++I) {
StringRef File = ExistingPPOpts.Includes[I];
+
+ if (!ExistingPPOpts.ImplicitPCHInclude.empty() &&
+ !ExistingPPOpts.PCHThroughHeader.empty()) {
+ // In case the through header is an include, we must add all the includes
+ // to the predefines so the start point can be determined.
+ SuggestedPredefines += "#include \"";
+ SuggestedPredefines += File;
+ SuggestedPredefines += "\"\n";
+ continue;
+ }
+
if (File == ExistingPPOpts.ImplicitPCHInclude)
continue;
@@ -870,7 +882,7 @@ ASTSelectorLookupTrait::ReadData(Selector, const unsigned char* d,
}
unsigned ASTIdentifierLookupTraitBase::ComputeHash(const internal_key_type& a) {
- return llvm::HashString(a);
+ return llvm::djbHash(a);
}
std::pair<unsigned, unsigned>
@@ -888,7 +900,7 @@ ASTIdentifierLookupTraitBase::ReadKey(const unsigned char* d, unsigned n) {
return StringRef((const char*) d, n-1);
}
-/// \brief Whether the given identifier is "interesting".
+/// Whether the given identifier is "interesting".
static bool isInterestingIdentifier(ASTReader &Reader, IdentifierInfo &II,
bool IsModule) {
return II.hadMacroDefinition() ||
@@ -1211,7 +1223,7 @@ void ASTReader::Error(unsigned DiagID,
// Source Manager Deserialization
//===----------------------------------------------------------------------===//
-/// \brief Read the line table in the source manager block.
+/// Read the line table in the source manager block.
/// \returns true if there was an error.
bool ASTReader::ParseLineTable(ModuleFile &F,
const RecordData &Record) {
@@ -1257,7 +1269,7 @@ bool ASTReader::ParseLineTable(ModuleFile &F,
return false;
}
-/// \brief Read a source manager block
+/// Read a source manager block
bool ASTReader::ReadSourceManagerBlock(ModuleFile &F) {
using namespace SrcMgr;
@@ -1313,7 +1325,7 @@ bool ASTReader::ReadSourceManagerBlock(ModuleFile &F) {
}
}
-/// \brief If a header file is not found at the path that we expect it to be
+/// If a header file is not found at the path that we expect it to be
/// and the PCH file was moved from its original location, try to resolve the
/// file by assuming that header+PCH were moved together and the header is in
/// the same place relative to the PCH.
@@ -1481,6 +1493,7 @@ bool ASTReader::ReadSLocEntry(int ID) {
SourceMgr.createExpansionLoc(SpellingLoc,
ReadSourceLocation(*F, Record[2]),
ReadSourceLocation(*F, Record[3]),
+ Record[5],
Record[4],
ID,
BaseOffset + Record[0]);
@@ -1510,7 +1523,7 @@ std::pair<SourceLocation, StringRef> ASTReader::getModuleImportLoc(int ID) {
return std::make_pair(M->ImportLoc, StringRef(M->ModuleName));
}
-/// \brief Find the location where the module F is imported.
+/// Find the location where the module F is imported.
SourceLocation ASTReader::getImportLocation(ModuleFile *F) {
if (F->ImportLoc.isValid())
return F->ImportLoc;
@@ -1856,7 +1869,7 @@ void ASTReader::ReadDefinedMacros() {
namespace {
- /// \brief Visitor class used to look up identifirs in an AST file.
+ /// Visitor class used to look up identifirs in an AST file.
class IdentifierLookupVisitor {
StringRef Name;
unsigned NameHash;
@@ -1900,7 +1913,7 @@ namespace {
return true;
}
- // \brief Retrieve the identifier info found within the module
+ // Retrieve the identifier info found within the module
// files.
IdentifierInfo *getIdentifierInfo() const { return Found; }
};
@@ -2136,7 +2149,7 @@ InputFile ASTReader::getInputFile(ModuleFile &F, unsigned ID, bool Complain) {
}
// Check if there was a request to override the contents of the file
- // that was part of the precompiled header. Overridding such a file
+ // that was part of the precompiled header. Overriding such a file
// can lead to problems when lexing using the source locations from the
// PCH.
SourceManager &SM = getSourceManager();
@@ -2208,7 +2221,7 @@ InputFile ASTReader::getInputFile(ModuleFile &F, unsigned ID, bool Complain) {
return IF;
}
-/// \brief If we are loading a relocatable PCH or module file, and the filename
+/// If we are loading a relocatable PCH or module file, and the filename
/// is not an absolute path, add the system or module root to the beginning of
/// the file name.
void ASTReader::ResolveImportedPath(ModuleFile &M, std::string &Filename) {
@@ -2480,7 +2493,7 @@ ASTReader::ReadControlBlock(ModuleFile &F,
return VersionMismatch;
}
- bool hasErrors = Record[6];
+ bool hasErrors = Record[7];
if (hasErrors && !DisableValidation && !AllowASTWithCompilerErrors) {
Diag(diag::err_pch_with_compiler_errors);
return HadErrors;
@@ -2498,6 +2511,8 @@ ASTReader::ReadControlBlock(ModuleFile &F,
F.HasTimestamps = Record[5];
+ F.PCHHasObjectFile = Record[6];
+
const std::string &CurBranch = getClangFullRepositoryVersion();
StringRef ASTBranch = Blob;
if (StringRef(CurBranch) != ASTBranch && !DisableValidation) {
@@ -2611,7 +2626,9 @@ ASTReader::ReadControlBlock(ModuleFile &F,
"MODULE_DIRECTORY found before MODULE_NAME");
// If we've already loaded a module map file covering this module, we may
// have a better path for it (relative to the current build).
- Module *M = PP.getHeaderSearchInfo().lookupModule(F.ModuleName);
+ Module *M = PP.getHeaderSearchInfo().lookupModule(
+ F.ModuleName, /*AllowSearch*/ true,
+ /*AllowExtraModuleMapSearch*/ true);
if (M && M->Directory) {
// If we're implicitly loading a module, the base directory can't
// change between the build and use.
@@ -3215,6 +3232,24 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
break;
}
+ case PPD_SKIPPED_RANGES: {
+ F.PreprocessedSkippedRangeOffsets = (const PPSkippedRange*)Blob.data();
+ assert(Blob.size() % sizeof(PPSkippedRange) == 0);
+ F.NumPreprocessedSkippedRanges = Blob.size() / sizeof(PPSkippedRange);
+
+ if (!PP.getPreprocessingRecord())
+ PP.createPreprocessingRecord();
+ if (!PP.getPreprocessingRecord()->getExternalSource())
+ PP.getPreprocessingRecord()->SetExternalSource(*this);
+ F.BasePreprocessedSkippedRangeID = PP.getPreprocessingRecord()
+ ->allocateSkippedRanges(F.NumPreprocessedSkippedRanges);
+
+ if (F.NumPreprocessedSkippedRanges > 0)
+ GlobalSkippedRangeMap.insert(
+ std::make_pair(F.BasePreprocessedSkippedRangeID, &F));
+ break;
+ }
+
case DECL_UPDATE_OFFSETS:
if (Record.size() % 2 != 0) {
Error("invalid DECL_UPDATE_OFFSETS block in AST file");
@@ -3648,7 +3683,7 @@ ASTReader::ReadModuleMapFileBlock(RecordData &Record, ModuleFile &F,
return Success;
}
-/// \brief Move the given method to the back of the global list of methods.
+/// Move the given method to the back of the global list of methods.
static void moveMethodToBackOfGlobalList(Sema &S, ObjCMethodDecl *Method) {
// Find the entry for this selector in the method pool.
Sema::GlobalMethodPool::iterator Known
@@ -3801,7 +3836,7 @@ static void updateModuleTimestamp(ModuleFile &MF) {
OS.clear_error(); // Avoid triggering a fatal error.
}
-/// \brief Given a cursor at the start of an AST file, scan ahead and drop the
+/// Given a cursor at the start of an AST file, scan ahead and drop the
/// cursor into the start of the given block ID, returning false on success and
/// true on failure.
static bool SkipCursorToBlock(BitstreamCursor &Cursor, unsigned BlockID) {
@@ -4068,7 +4103,7 @@ ASTReader::ASTReadResult ASTReader::ReadAST(StringRef FileName,
static ASTFileSignature readASTFileSignature(StringRef PCH);
-/// \brief Whether \p Stream starts with the AST/PCH file magic number 'CPCH'.
+/// Whether \p Stream starts with the AST/PCH file magic number 'CPCH'.
static bool startsWithASTFileMagic(BitstreamCursor &Stream) {
return Stream.canSkipToPos(4) &&
Stream.Read(8) == 'C' &&
@@ -4559,7 +4594,7 @@ void ASTReader::finalizeForWriting() {
// Nothing to do for now.
}
-/// \brief Reads and return the signature record from \p PCH's control block, or
+/// Reads and return the signature record from \p PCH's control block, or
/// else returns 0.
static ASTFileSignature readASTFileSignature(StringRef PCH) {
BitstreamCursor Stream(PCH);
@@ -4585,7 +4620,7 @@ static ASTFileSignature readASTFileSignature(StringRef PCH) {
}
}
-/// \brief Retrieve the name of the original source file name
+/// Retrieve the name of the original source file name
/// directly from the AST file, without actually loading the AST
/// file.
std::string ASTReader::getOriginalSourceFile(
@@ -4791,7 +4826,8 @@ bool ASTReader::readASTFileControlBlock(
unsigned NumInputFiles = Record[0];
unsigned NumUserFiles = Record[1];
- const uint64_t *InputFileOffs = (const uint64_t *)Blob.data();
+ const llvm::support::unaligned_uint64_t *InputFileOffs =
+ (const llvm::support::unaligned_uint64_t *)Blob.data();
for (unsigned I = 0; I != NumInputFiles; ++I) {
// Go find this input file.
bool isSystemFile = I >= NumUserFiles;
@@ -4961,7 +4997,7 @@ ASTReader::ReadSubmoduleBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
break;
case SUBMODULE_DEFINITION: {
- if (Record.size() < 8) {
+ if (Record.size() < 12) {
Error("malformed module definition");
return Failure;
}
@@ -4979,6 +5015,7 @@ ASTReader::ReadSubmoduleBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
bool InferExplicitSubmodules = Record[Idx++];
bool InferExportWildcard = Record[Idx++];
bool ConfigMacrosExhaustive = Record[Idx++];
+ bool ModuleMapIsPrivate = Record[Idx++];
Module *ParentModule = nullptr;
if (Parent)
@@ -5026,6 +5063,7 @@ ASTReader::ReadSubmoduleBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
CurrentModule->InferExplicitSubmodules = InferExplicitSubmodules;
CurrentModule->InferExportWildcard = InferExportWildcard;
CurrentModule->ConfigMacrosExhaustive = ConfigMacrosExhaustive;
+ CurrentModule->ModuleMapIsPrivate = ModuleMapIsPrivate;
if (DeserializationListener)
DeserializationListener->ModuleRead(GlobalID, CurrentModule);
@@ -5152,6 +5190,7 @@ ASTReader::ReadSubmoduleBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
break;
case SUBMODULE_LINK_LIBRARY:
+ ModMap.resolveLinkAsDependencies(CurrentModule);
CurrentModule->LinkLibraries.push_back(
Module::LinkLibrary(Blob, Record[0]));
break;
@@ -5184,12 +5223,13 @@ ASTReader::ReadSubmoduleBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
case SUBMODULE_EXPORT_AS:
CurrentModule->ExportAsModule = Blob.str();
+ ModMap.addLinkAsDependency(CurrentModule);
break;
}
}
}
-/// \brief Parse the record that corresponds to a LangOptions data
+/// Parse the record that corresponds to a LangOptions data
/// structure.
///
/// This routine parses the language options from the AST file and then gives
@@ -5387,6 +5427,20 @@ ASTReader::getModuleFileLevelDecls(ModuleFile &Mod) {
Mod.FileSortedDecls + Mod.NumFileSortedDecls));
}
+SourceRange ASTReader::ReadSkippedRange(unsigned GlobalIndex) {
+ auto I = GlobalSkippedRangeMap.find(GlobalIndex);
+ assert(I != GlobalSkippedRangeMap.end() &&
+ "Corrupted global skipped range map");
+ ModuleFile *M = I->second;
+ unsigned LocalIndex = GlobalIndex - M->BasePreprocessedSkippedRangeID;
+ assert(LocalIndex < M->NumPreprocessedSkippedRanges);
+ PPSkippedRange RawRange = M->PreprocessedSkippedRangeOffsets[LocalIndex];
+ SourceRange Range(TranslateSourceLocation(*M, RawRange.getBegin()),
+ TranslateSourceLocation(*M, RawRange.getEnd()));
+ assert(Range.isValid());
+ return Range;
+}
+
PreprocessedEntity *ASTReader::ReadPreprocessedEntity(unsigned Index) {
PreprocessedEntityID PPID = Index+1;
std::pair<ModuleFile *, unsigned> PPInfo = getModulePreprocessedEntity(Index);
@@ -5474,7 +5528,7 @@ PreprocessedEntity *ASTReader::ReadPreprocessedEntity(unsigned Index) {
llvm_unreachable("Invalid PreprocessorDetailRecordTypes");
}
-/// \brief Find the next module that contains entities and return the ID
+/// Find the next module that contains entities and return the ID
/// of the first entry.
///
/// \param SLocMapI points at a chunk of a module that contains no
@@ -5577,7 +5631,7 @@ PreprocessedEntityID ASTReader::findPreprocessedEntity(SourceLocation Loc,
return M.BasePreprocessedEntityID + (PPI - pp_begin);
}
-/// \brief Returns a pair of [Begin, End) indices of preallocated
+/// Returns a pair of [Begin, End) indices of preallocated
/// preprocessed entities that \arg Range encompasses.
std::pair<unsigned, unsigned>
ASTReader::findPreprocessedEntitiesInRange(SourceRange Range) {
@@ -5591,7 +5645,7 @@ std::pair<unsigned, unsigned>
return std::make_pair(BeginID, EndID);
}
-/// \brief Optionally returns true or false if the preallocated preprocessed
+/// Optionally returns true or false if the preallocated preprocessed
/// entity with index \arg Index came from file \arg FID.
Optional<bool> ASTReader::isPreprocessedEntityInFileID(unsigned Index,
FileID FID) {
@@ -5615,7 +5669,7 @@ Optional<bool> ASTReader::isPreprocessedEntityInFileID(unsigned Index,
namespace {
- /// \brief Visitor used to search for information about a header file.
+ /// Visitor used to search for information about a header file.
class HeaderFileInfoVisitor {
const FileEntry *FE;
Optional<HeaderFileInfo> HFI;
@@ -5729,6 +5783,8 @@ void ASTReader::ReadPragmaDiagnosticMappings(DiagnosticsEngine &Diag) {
Initial.ExtBehavior = (diag::Severity)Flags;
FirstState = ReadDiagState(Initial, SourceLocation(), true);
+ assert(F.OriginalSourceFileID.isValid());
+
// Set up the root buffer of the module to start with the initial
// diagnostic state of the module itself, to cover files that contain no
// explicit transitions (for which we did not serialize anything).
@@ -5749,6 +5805,7 @@ void ASTReader::ReadPragmaDiagnosticMappings(DiagnosticsEngine &Diag) {
"Invalid data, missing pragma diagnostic states");
SourceLocation Loc = ReadSourceLocation(F, Record[Idx++]);
auto IDAndOffset = SourceMgr.getDecomposedLoc(Loc);
+ assert(IDAndOffset.first.isValid() && "invalid FileID for transition");
assert(IDAndOffset.second == 0 && "not a start location for a FileID");
unsigned Transitions = Record[Idx++];
@@ -5792,7 +5849,7 @@ void ASTReader::ReadPragmaDiagnosticMappings(DiagnosticsEngine &Diag) {
}
}
-/// \brief Get the correct cursor and offset for loading a type.
+/// Get the correct cursor and offset for loading a type.
ASTReader::RecordLocation ASTReader::TypeCursorForIndex(unsigned Index) {
GlobalTypeMapType::iterator I = GlobalTypeMap.find(Index);
assert(I != GlobalTypeMap.end() && "Corrupted global type map");
@@ -5800,7 +5857,7 @@ ASTReader::RecordLocation ASTReader::TypeCursorForIndex(unsigned Index) {
return RecordLocation(M, M->TypeOffsets[Index - M->BaseTypeIndex]);
}
-/// \brief Read and return the type with the given index..
+/// Read and return the type with the given index..
///
/// The index is the type ID, shifted and minus the number of predefs. This
/// routine actually reads the record corresponding to the type at the given
@@ -5969,13 +6026,14 @@ QualType ASTReader::readTypeRecord(unsigned Index) {
}
case TYPE_FUNCTION_NO_PROTO: {
- if (Record.size() != 7) {
+ if (Record.size() != 8) {
Error("incorrect encoding of no-proto function type");
return QualType();
}
QualType ResultType = readType(*Loc.F, Record, Idx);
FunctionType::ExtInfo Info(Record[1], Record[2], Record[3],
- (CallingConv)Record[4], Record[5], Record[6]);
+ (CallingConv)Record[4], Record[5], Record[6],
+ Record[7]);
return Context.getFunctionNoProtoType(ResultType, Info);
}
@@ -5988,9 +6046,10 @@ QualType ASTReader::readTypeRecord(unsigned Index) {
/*regparm*/ Record[3],
static_cast<CallingConv>(Record[4]),
/*produces*/ Record[5],
- /*nocallersavedregs*/ Record[6]);
+ /*nocallersavedregs*/ Record[6],
+ /*nocfcheck*/ Record[7]);
- unsigned Idx = 7;
+ unsigned Idx = 8;
EPI.Variadic = Record[Idx++];
EPI.HasTrailingReturn = Record[Idx++];
@@ -6142,7 +6201,8 @@ QualType ASTReader::readTypeRecord(unsigned Index) {
ElaboratedTypeKeyword Keyword = (ElaboratedTypeKeyword)Record[Idx++];
NestedNameSpecifier *NNS = ReadNestedNameSpecifier(*Loc.F, Record, Idx);
QualType NamedType = readType(*Loc.F, Record, Idx);
- return Context.getElaboratedType(Keyword, NNS, NamedType);
+ TagDecl *OwnedTagDecl = ReadDeclAs<TagDecl>(*Loc.F, Record, Idx);
+ return Context.getElaboratedType(Keyword, NNS, NamedType, OwnedTagDecl);
}
case TYPE_OBJC_INTERFACE: {
@@ -6311,6 +6371,17 @@ QualType ASTReader::readTypeRecord(unsigned Index) {
return Context.getPipeType(ElementType, ReadOnly);
}
+ case TYPE_DEPENDENT_SIZED_VECTOR: {
+ unsigned Idx = 0;
+ QualType ElementType = readType(*Loc.F, Record, Idx);
+ Expr *SizeExpr = ReadExpr(*Loc.F);
+ SourceLocation AttrLoc = ReadSourceLocation(*Loc.F, Record, Idx);
+ unsigned VecKind = Record[Idx];
+
+ return Context.getDependentVectorType(ElementType, SizeExpr, AttrLoc,
+ (VectorType::VectorKind)VecKind);
+ }
+
case TYPE_DEPENDENT_SIZED_EXT_VECTOR: {
unsigned Idx = 0;
@@ -6349,7 +6420,7 @@ void ASTReader::readExceptionSpec(ModuleFile &ModuleFile,
for (unsigned I = 0, N = Record[Idx++]; I != N; ++I)
Exceptions.push_back(readType(ModuleFile, Record, Idx));
ESI.Exceptions = Exceptions;
- } else if (EST == EST_ComputedNoexcept) {
+ } else if (isComputedNoexcept(EST)) {
ESI.NoexceptExpr = ReadExpr(ModuleFile);
} else if (EST == EST_Uninstantiated) {
ESI.SourceDecl = ReadDeclAs<FunctionDecl>(ModuleFile, Record, Idx);
@@ -6491,6 +6562,11 @@ void TypeLocReader::VisitVectorTypeLoc(VectorTypeLoc TL) {
TL.setNameLoc(ReadSourceLocation());
}
+void TypeLocReader::VisitDependentVectorTypeLoc(
+ DependentVectorTypeLoc TL) {
+ TL.setNameLoc(ReadSourceLocation());
+}
+
void TypeLocReader::VisitExtVectorTypeLoc(ExtVectorTypeLoc TL) {
TL.setNameLoc(ReadSourceLocation());
}
@@ -6686,6 +6762,13 @@ void TypeLocReader::VisitPipeTypeLoc(PipeTypeLoc TL) {
TL.setKWLoc(ReadSourceLocation());
}
+void ASTReader::ReadTypeLoc(ModuleFile &F, const ASTReader::RecordData &Record,
+ unsigned &Idx, TypeLoc TL) {
+ TypeLocReader TLR(F, *this, Record, Idx);
+ for (; !TL.isNull(); TL = TL.getNextTypeLoc())
+ TLR.Visit(TL);
+}
+
TypeSourceInfo *
ASTReader::GetTypeSourceInfo(ModuleFile &F, const ASTReader::RecordData &Record,
unsigned &Idx) {
@@ -6694,9 +6777,7 @@ ASTReader::GetTypeSourceInfo(ModuleFile &F, const ASTReader::RecordData &Record,
return nullptr;
TypeSourceInfo *TInfo = getContext().CreateTypeSourceInfo(InfoTy);
- TypeLocReader TLR(F, *this, Record, Idx);
- for (TypeLoc TL = TInfo->getTypeLoc(); !TL.isNull(); TL = TL.getNextTypeLoc())
- TLR.Visit(TL);
+ ReadTypeLoc(F, Record, Idx, TInfo->getTypeLoc());
return TInfo;
}
@@ -6774,6 +6855,78 @@ QualType ASTReader::GetType(TypeID ID) {
case PREDEF_TYPE_LONGDOUBLE_ID:
T = Context.LongDoubleTy;
break;
+ case PREDEF_TYPE_SHORT_ACCUM_ID:
+ T = Context.ShortAccumTy;
+ break;
+ case PREDEF_TYPE_ACCUM_ID:
+ T = Context.AccumTy;
+ break;
+ case PREDEF_TYPE_LONG_ACCUM_ID:
+ T = Context.LongAccumTy;
+ break;
+ case PREDEF_TYPE_USHORT_ACCUM_ID:
+ T = Context.UnsignedShortAccumTy;
+ break;
+ case PREDEF_TYPE_UACCUM_ID:
+ T = Context.UnsignedAccumTy;
+ break;
+ case PREDEF_TYPE_ULONG_ACCUM_ID:
+ T = Context.UnsignedLongAccumTy;
+ break;
+ case PREDEF_TYPE_SHORT_FRACT_ID:
+ T = Context.ShortFractTy;
+ break;
+ case PREDEF_TYPE_FRACT_ID:
+ T = Context.FractTy;
+ break;
+ case PREDEF_TYPE_LONG_FRACT_ID:
+ T = Context.LongFractTy;
+ break;
+ case PREDEF_TYPE_USHORT_FRACT_ID:
+ T = Context.UnsignedShortFractTy;
+ break;
+ case PREDEF_TYPE_UFRACT_ID:
+ T = Context.UnsignedFractTy;
+ break;
+ case PREDEF_TYPE_ULONG_FRACT_ID:
+ T = Context.UnsignedLongFractTy;
+ break;
+ case PREDEF_TYPE_SAT_SHORT_ACCUM_ID:
+ T = Context.SatShortAccumTy;
+ break;
+ case PREDEF_TYPE_SAT_ACCUM_ID:
+ T = Context.SatAccumTy;
+ break;
+ case PREDEF_TYPE_SAT_LONG_ACCUM_ID:
+ T = Context.SatLongAccumTy;
+ break;
+ case PREDEF_TYPE_SAT_USHORT_ACCUM_ID:
+ T = Context.SatUnsignedShortAccumTy;
+ break;
+ case PREDEF_TYPE_SAT_UACCUM_ID:
+ T = Context.SatUnsignedAccumTy;
+ break;
+ case PREDEF_TYPE_SAT_ULONG_ACCUM_ID:
+ T = Context.SatUnsignedLongAccumTy;
+ break;
+ case PREDEF_TYPE_SAT_SHORT_FRACT_ID:
+ T = Context.SatShortFractTy;
+ break;
+ case PREDEF_TYPE_SAT_FRACT_ID:
+ T = Context.SatFractTy;
+ break;
+ case PREDEF_TYPE_SAT_LONG_FRACT_ID:
+ T = Context.SatLongFractTy;
+ break;
+ case PREDEF_TYPE_SAT_USHORT_FRACT_ID:
+ T = Context.SatUnsignedShortFractTy;
+ break;
+ case PREDEF_TYPE_SAT_UFRACT_ID:
+ T = Context.SatUnsignedFractTy;
+ break;
+ case PREDEF_TYPE_SAT_ULONG_FRACT_ID:
+ T = Context.SatUnsignedLongFractTy;
+ break;
case PREDEF_TYPE_FLOAT16_ID:
T = Context.Float16Ty;
break;
@@ -6798,6 +6951,9 @@ QualType ASTReader::GetType(TypeID ID) {
case PREDEF_TYPE_NULLPTR_ID:
T = Context.NullPtrTy;
break;
+ case PREDEF_TYPE_CHAR8_ID:
+ T = Context.Char8Ty;
+ break;
case PREDEF_TYPE_CHAR16_ID:
T = Context.Char16Ty;
break;
@@ -7239,7 +7395,7 @@ serialization::DeclID ASTReader::ReadDeclID(ModuleFile &F,
return getGlobalDeclID(F, Record[Idx++]);
}
-/// \brief Resolve the offset of a statement into a statement.
+/// Resolve the offset of a statement into a statement.
///
/// This operation will read a new statement from the external
/// source each time it is called, and is meant to be used via a
@@ -7432,7 +7588,7 @@ ASTReader::getLoadedLookupTables(DeclContext *Primary) const {
return I == Lookups.end() ? nullptr : &I->second;
}
-/// \brief Under non-PCH compilation the consumer receives the objc methods
+/// Under non-PCH compilation the consumer receives the objc methods
/// before receiving the implementation, and codegen depends on this.
/// We simulate this by deserializing and passing to consumer the methods of the
/// implementation before passing the deserialized implementation decl.
@@ -7735,25 +7891,25 @@ IdentifierInfo *ASTReader::get(StringRef Name) {
namespace clang {
- /// \brief An identifier-lookup iterator that enumerates all of the
+ /// An identifier-lookup iterator that enumerates all of the
/// identifiers stored within a set of AST files.
class ASTIdentifierIterator : public IdentifierIterator {
- /// \brief The AST reader whose identifiers are being enumerated.
+ /// The AST reader whose identifiers are being enumerated.
const ASTReader &Reader;
- /// \brief The current index into the chain of AST files stored in
+ /// The current index into the chain of AST files stored in
/// the AST reader.
unsigned Index;
- /// \brief The current position within the identifier lookup table
+ /// The current position within the identifier lookup table
/// of the current AST file.
ASTIdentifierLookupTable::key_iterator Current;
- /// \brief The end position within the identifier lookup table of
+ /// The end position within the identifier lookup table of
/// the current AST file.
ASTIdentifierLookupTable::key_iterator End;
- /// \brief Whether to skip any modules in the ASTReader.
+ /// Whether to skip any modules in the ASTReader.
bool SkipModules;
public:
@@ -7889,12 +8045,12 @@ namespace serialization {
return true;
}
- /// \brief Retrieve the instance methods found by this visitor.
+ /// Retrieve the instance methods found by this visitor.
ArrayRef<ObjCMethodDecl *> getInstanceMethods() const {
return InstanceMethods;
}
- /// \brief Retrieve the instance methods found by this visitor.
+ /// Retrieve the instance methods found by this visitor.
ArrayRef<ObjCMethodDecl *> getFactoryMethods() const {
return FactoryMethods;
}
@@ -7912,7 +8068,7 @@ namespace serialization {
} // namespace serialization
} // namespace clang
-/// \brief Add the given set of methods to the method list.
+/// Add the given set of methods to the method list.
static void addMethodsToPool(Sema &S, ArrayRef<ObjCMethodDecl *> Methods,
ObjCMethodList &List) {
for (unsigned I = 0, N = Methods.size(); I != N; ++I) {
@@ -8151,7 +8307,7 @@ void ASTReader::SetIdentifierInfo(IdentifierID ID, IdentifierInfo *II) {
DeserializationListener->IdentifierRead(ID, II);
}
-/// \brief Set the globally-visible declarations associated with the given
+/// Set the globally-visible declarations associated with the given
/// identifier.
///
/// If the AST reader is currently in a state where the given declaration IDs
@@ -8328,6 +8484,11 @@ Module *ASTReader::getModule(unsigned ID) {
return getSubmodule(ID);
}
+bool ASTReader::DeclIsFromPCHWithObjectFile(const Decl *D) {
+ ModuleFile *MF = getOwningModuleFile(D);
+ return MF && MF->PCHHasObjectFile;
+}
+
ModuleFile *ASTReader::getLocalModuleFile(ModuleFile &F, unsigned ID) {
if (ID & 1) {
// It's a module, look it up by submodule ID.
@@ -8682,7 +8843,7 @@ ReadTemplateArgumentList(SmallVectorImpl<TemplateArgument> &TemplArgs,
TemplArgs.push_back(ReadTemplateArgument(F, Record, Idx, Canonicalize));
}
-/// \brief Read a UnresolvedSet structure.
+/// Read a UnresolvedSet structure.
void ASTReader::ReadUnresolvedSet(ModuleFile &F, LazyASTUnresolvedSet &Set,
const RecordData &Record, unsigned &Idx) {
unsigned NumDecls = Record[Idx++];
@@ -8903,7 +9064,7 @@ ASTReader::ReadSourceRange(ModuleFile &F, const RecordData &Record,
return SourceRange(beg, end);
}
-/// \brief Read an integral value
+/// Read an integral value
llvm::APInt ASTReader::ReadAPInt(const RecordData &Record, unsigned &Idx) {
unsigned BitWidth = Record[Idx++];
unsigned NumWords = llvm::APInt::getNumWords(BitWidth);
@@ -8912,20 +9073,20 @@ llvm::APInt ASTReader::ReadAPInt(const RecordData &Record, unsigned &Idx) {
return Result;
}
-/// \brief Read a signed integral value
+/// Read a signed integral value
llvm::APSInt ASTReader::ReadAPSInt(const RecordData &Record, unsigned &Idx) {
bool isUnsigned = Record[Idx++];
return llvm::APSInt(ReadAPInt(Record, Idx), isUnsigned);
}
-/// \brief Read a floating-point value
+/// Read a floating-point value
llvm::APFloat ASTReader::ReadAPFloat(const RecordData &Record,
const llvm::fltSemantics &Sem,
unsigned &Idx) {
return llvm::APFloat(Sem, ReadAPInt(Record, Idx));
}
-// \brief Read a string
+// Read a string
std::string ASTReader::ReadString(const RecordData &Record, unsigned &Idx) {
unsigned Len = Record[Idx++];
std::string Result(Record.data() + Idx, Record.data() + Idx + Len);
@@ -8967,13 +9128,13 @@ DiagnosticBuilder ASTReader::Diag(SourceLocation Loc, unsigned DiagID) const {
return Diags.Report(Loc, DiagID);
}
-/// \brief Retrieve the identifier table associated with the
+/// Retrieve the identifier table associated with the
/// preprocessor.
IdentifierTable &ASTReader::getIdentifierTable() {
return PP.getIdentifierTable();
}
-/// \brief Record that the given ID maps to the given switch-case
+/// Record that the given ID maps to the given switch-case
/// statement.
void ASTReader::RecordSwitchCaseID(SwitchCase *SC, unsigned ID) {
assert((*CurrSwitchCaseStmts)[ID] == nullptr &&
@@ -8981,7 +9142,7 @@ void ASTReader::RecordSwitchCaseID(SwitchCase *SC, unsigned ID) {
(*CurrSwitchCaseStmts)[ID] = SC;
}
-/// \brief Retrieve the switch-case statement with the given ID.
+/// Retrieve the switch-case statement with the given ID.
SwitchCase *ASTReader::getSwitchCaseWithID(unsigned ID) {
assert((*CurrSwitchCaseStmts)[ID] != nullptr && "No SwitchCase with this ID");
return (*CurrSwitchCaseStmts)[ID];
@@ -9032,8 +9193,7 @@ void ASTReader::ReadComments() {
bool IsTrailingComment = Record[Idx++];
bool IsAlmostTrailingComment = Record[Idx++];
Comments.push_back(new (Context) RawComment(
- SR, Kind, IsTrailingComment, IsAlmostTrailingComment,
- Context.getLangOpts().CommentOpts.ParseAllComments));
+ SR, Kind, IsTrailingComment, IsAlmostTrailingComment));
break;
}
}
@@ -9041,8 +9201,8 @@ void ASTReader::ReadComments() {
NextCursor:
// De-serialized SourceLocations get negative FileIDs for other modules,
// potentially invalidating the original order. Sort it again.
- std::sort(Comments.begin(), Comments.end(),
- BeforeThanCompare<RawComment>(SourceMgr));
+ llvm::sort(Comments.begin(), Comments.end(),
+ BeforeThanCompare<RawComment>(SourceMgr));
Context.Comments.addDeserializedComments(Comments);
}
}
@@ -9230,6 +9390,19 @@ void ASTReader::finishPendingActions() {
PBEnd = PendingBodies.end();
PB != PBEnd; ++PB) {
if (FunctionDecl *FD = dyn_cast<FunctionDecl>(PB->first)) {
+ // For a function defined inline within a class template, force the
+ // canonical definition to be the one inside the canonical definition of
+ // the template. This ensures that we instantiate from a correct view
+ // of the template.
+ //
+ // Sadly we can't do this more generally: we can't be sure that all
+ // copies of an arbitrary class definition will have the same members
+ // defined (eg, some member functions may not be instantiated, and some
+ // special members may or may not have been implicitly defined).
+ if (auto *RD = dyn_cast<CXXRecordDecl>(FD->getLexicalParent()))
+ if (RD->isDependentContext() && !RD->isThisDeclarationADefinition())
+ continue;
+
// FIXME: Check for =delete/=default?
// FIXME: Complain about ODR violations here?
const FunctionDecl *Defn = nullptr;
@@ -9242,7 +9415,15 @@ void ASTReader::finishPendingActions() {
if (!FD->isLateTemplateParsed() &&
!NonConstDefn->isLateTemplateParsed() &&
FD->getODRHash() != NonConstDefn->getODRHash()) {
- PendingFunctionOdrMergeFailures[FD].push_back(NonConstDefn);
+ if (!isa<CXXMethodDecl>(FD)) {
+ PendingFunctionOdrMergeFailures[FD].push_back(NonConstDefn);
+ } else if (FD->getLexicalParent()->isFileContext() &&
+ NonConstDefn->getLexicalParent()->isFileContext()) {
+ // Only diagnose out-of-line method definitions. If they are
+ // in class definitions, then an error will be generated when
+ // processing the class bodies.
+ PendingFunctionOdrMergeFailures[FD].push_back(NonConstDefn);
+ }
}
}
continue;
@@ -9262,7 +9443,8 @@ void ASTReader::finishPendingActions() {
void ASTReader::diagnoseOdrViolations() {
if (PendingOdrMergeFailures.empty() && PendingOdrMergeChecks.empty() &&
- PendingFunctionOdrMergeFailures.empty())
+ PendingFunctionOdrMergeFailures.empty() &&
+ PendingEnumOdrMergeFailures.empty())
return;
// Trigger the import of the full definition of each class that had any
@@ -9298,6 +9480,16 @@ void ASTReader::diagnoseOdrViolations() {
}
}
+ // Trigger the import of enums.
+ auto EnumOdrMergeFailures = std::move(PendingEnumOdrMergeFailures);
+ PendingEnumOdrMergeFailures.clear();
+ for (auto &Merge : EnumOdrMergeFailures) {
+ Merge.first->decls_begin();
+ for (auto &Enum : Merge.second) {
+ Enum->decls_begin();
+ }
+ }
+
// For each declaration from a merged context, check that the canonical
// definition of that context also contains a declaration of the same
// entity.
@@ -9380,7 +9572,8 @@ void ASTReader::diagnoseOdrViolations() {
}
}
- if (OdrMergeFailures.empty() && FunctionOdrMergeFailures.empty())
+ if (OdrMergeFailures.empty() && FunctionOdrMergeFailures.empty() &&
+ EnumOdrMergeFailures.empty())
return;
// Ensure we don't accidentally recursively enter deserialization while
@@ -9409,6 +9602,20 @@ void ASTReader::diagnoseOdrViolations() {
return Hash.CalculateHash();
};
+ auto ComputeTemplateArgumentODRHash = [&Hash](const TemplateArgument &TA) {
+ Hash.clear();
+ Hash.AddTemplateArgument(TA);
+ return Hash.CalculateHash();
+ };
+
+ auto ComputeTemplateParameterListODRHash =
+ [&Hash](const TemplateParameterList *TPL) {
+ assert(TPL);
+ Hash.clear();
+ Hash.AddTemplateParameterList(TPL);
+ return Hash.CalculateHash();
+ };
+
// Issue any pending ODR-failure diagnostics.
for (auto &Merge : OdrMergeFailures) {
// If we've already pointed out a specific problem with this class, don't
@@ -9761,6 +9968,7 @@ void ASTReader::diagnoseOdrViolations() {
TypeDef,
Var,
Friend,
+ FunctionTemplate,
Other
} FirstDiffType = Other,
SecondDiffType = Other;
@@ -9798,6 +10006,8 @@ void ASTReader::diagnoseOdrViolations() {
return Var;
case Decl::Friend:
return Friend;
+ case Decl::FunctionTemplate:
+ return FunctionTemplate;
}
};
@@ -9884,7 +10094,7 @@ void ASTReader::diagnoseOdrViolations() {
// Used with err_module_odr_violation_mismatch_decl_diff and
// note_module_odr_violation_mismatch_decl_diff
- enum ODRDeclDifference{
+ enum ODRDeclDifference {
StaticAssertCondition,
StaticAssertMessage,
StaticAssertOnlyMessage,
@@ -9897,6 +10107,7 @@ void ASTReader::diagnoseOdrViolations() {
FieldDifferentInitializers,
MethodName,
MethodDeleted,
+ MethodDefaulted,
MethodVirtual,
MethodStatic,
MethodVolatile,
@@ -9907,6 +10118,11 @@ void ASTReader::diagnoseOdrViolations() {
MethodParameterName,
MethodParameterSingleDefaultArgument,
MethodParameterDifferentDefaultArgument,
+ MethodNoTemplateArguments,
+ MethodDifferentNumberTemplateArguments,
+ MethodDifferentTemplateArgument,
+ MethodSingleBody,
+ MethodDifferentBody,
TypedefName,
TypedefType,
VarName,
@@ -9917,6 +10133,13 @@ void ASTReader::diagnoseOdrViolations() {
FriendTypeFunction,
FriendType,
FriendFunction,
+ FunctionTemplateDifferentNumberParameters,
+ FunctionTemplateParameterDifferentKind,
+ FunctionTemplateParameterName,
+ FunctionTemplateParameterSingleDefaultArgument,
+ FunctionTemplateParameterDifferentDefaultArgument,
+ FunctionTemplateParameterDifferentType,
+ FunctionTemplatePackParameter,
};
// These lambdas have the common portions of the ODR diagnostics. This
@@ -10133,8 +10356,8 @@ void ASTReader::diagnoseOdrViolations() {
break;
}
- const bool FirstDeleted = FirstMethod->isDeleted();
- const bool SecondDeleted = SecondMethod->isDeleted();
+ const bool FirstDeleted = FirstMethod->isDeletedAsWritten();
+ const bool SecondDeleted = SecondMethod->isDeletedAsWritten();
if (FirstDeleted != SecondDeleted) {
ODRDiagError(FirstMethod->getLocation(),
FirstMethod->getSourceRange(), MethodDeleted)
@@ -10147,6 +10370,20 @@ void ASTReader::diagnoseOdrViolations() {
break;
}
+ const bool FirstDefaulted = FirstMethod->isExplicitlyDefaulted();
+ const bool SecondDefaulted = SecondMethod->isExplicitlyDefaulted();
+ if (FirstDefaulted != SecondDefaulted) {
+ ODRDiagError(FirstMethod->getLocation(),
+ FirstMethod->getSourceRange(), MethodDefaulted)
+ << FirstMethodType << FirstName << FirstDefaulted;
+
+ ODRDiagNote(SecondMethod->getLocation(),
+ SecondMethod->getSourceRange(), MethodDefaulted)
+ << SecondMethodType << SecondName << SecondDefaulted;
+ Diagnosed = true;
+ break;
+ }
+
const bool FirstVirtual = FirstMethod->isVirtualAsWritten();
const bool SecondVirtual = SecondMethod->isVirtualAsWritten();
const bool FirstPure = FirstMethod->isPure();
@@ -10329,6 +10566,127 @@ void ASTReader::diagnoseOdrViolations() {
break;
}
+ const auto *FirstTemplateArgs =
+ FirstMethod->getTemplateSpecializationArgs();
+ const auto *SecondTemplateArgs =
+ SecondMethod->getTemplateSpecializationArgs();
+
+ if ((FirstTemplateArgs && !SecondTemplateArgs) ||
+ (!FirstTemplateArgs && SecondTemplateArgs)) {
+ ODRDiagError(FirstMethod->getLocation(),
+ FirstMethod->getSourceRange(), MethodNoTemplateArguments)
+ << FirstMethodType << FirstName << (FirstTemplateArgs != nullptr);
+ ODRDiagNote(SecondMethod->getLocation(),
+ SecondMethod->getSourceRange(), MethodNoTemplateArguments)
+ << SecondMethodType << SecondName
+ << (SecondTemplateArgs != nullptr);
+
+ Diagnosed = true;
+ break;
+ }
+
+ if (FirstTemplateArgs && SecondTemplateArgs) {
+ // Remove pack expansions from argument list.
+ auto ExpandTemplateArgumentList =
+ [](const TemplateArgumentList *TAL) {
+ llvm::SmallVector<const TemplateArgument *, 8> ExpandedList;
+ for (const TemplateArgument &TA : TAL->asArray()) {
+ if (TA.getKind() != TemplateArgument::Pack) {
+ ExpandedList.push_back(&TA);
+ continue;
+ }
+ for (const TemplateArgument &PackTA : TA.getPackAsArray()) {
+ ExpandedList.push_back(&PackTA);
+ }
+ }
+ return ExpandedList;
+ };
+ llvm::SmallVector<const TemplateArgument *, 8> FirstExpandedList =
+ ExpandTemplateArgumentList(FirstTemplateArgs);
+ llvm::SmallVector<const TemplateArgument *, 8> SecondExpandedList =
+ ExpandTemplateArgumentList(SecondTemplateArgs);
+
+ if (FirstExpandedList.size() != SecondExpandedList.size()) {
+ ODRDiagError(FirstMethod->getLocation(),
+ FirstMethod->getSourceRange(),
+ MethodDifferentNumberTemplateArguments)
+ << FirstMethodType << FirstName
+ << (unsigned)FirstExpandedList.size();
+ ODRDiagNote(SecondMethod->getLocation(),
+ SecondMethod->getSourceRange(),
+ MethodDifferentNumberTemplateArguments)
+ << SecondMethodType << SecondName
+ << (unsigned)SecondExpandedList.size();
+
+ Diagnosed = true;
+ break;
+ }
+
+ bool TemplateArgumentMismatch = false;
+ for (unsigned i = 0, e = FirstExpandedList.size(); i != e; ++i) {
+ const TemplateArgument &FirstTA = *FirstExpandedList[i],
+ &SecondTA = *SecondExpandedList[i];
+ if (ComputeTemplateArgumentODRHash(FirstTA) ==
+ ComputeTemplateArgumentODRHash(SecondTA)) {
+ continue;
+ }
+
+ ODRDiagError(FirstMethod->getLocation(),
+ FirstMethod->getSourceRange(),
+ MethodDifferentTemplateArgument)
+ << FirstMethodType << FirstName << FirstTA << i + 1;
+ ODRDiagNote(SecondMethod->getLocation(),
+ SecondMethod->getSourceRange(),
+ MethodDifferentTemplateArgument)
+ << SecondMethodType << SecondName << SecondTA << i + 1;
+
+ TemplateArgumentMismatch = true;
+ break;
+ }
+
+ if (TemplateArgumentMismatch) {
+ Diagnosed = true;
+ break;
+ }
+ }
+
+ // Compute the hash of the method as if it has no body.
+ auto ComputeCXXMethodODRHash = [&Hash](const CXXMethodDecl *D) {
+ Hash.clear();
+ Hash.AddFunctionDecl(D, true /*SkipBody*/);
+ return Hash.CalculateHash();
+ };
+
+ // Compare the hash generated to the hash stored. A difference means
+ // that a body was present in the original source. Due to merging,
+ // the stardard way of detecting a body will not work.
+ const bool HasFirstBody =
+ ComputeCXXMethodODRHash(FirstMethod) != FirstMethod->getODRHash();
+ const bool HasSecondBody =
+ ComputeCXXMethodODRHash(SecondMethod) != SecondMethod->getODRHash();
+
+ if (HasFirstBody != HasSecondBody) {
+ ODRDiagError(FirstMethod->getLocation(),
+ FirstMethod->getSourceRange(), MethodSingleBody)
+ << FirstMethodType << FirstName << HasFirstBody;
+ ODRDiagNote(SecondMethod->getLocation(),
+ SecondMethod->getSourceRange(), MethodSingleBody)
+ << SecondMethodType << SecondName << HasSecondBody;
+ Diagnosed = true;
+ break;
+ }
+
+ if (HasFirstBody && HasSecondBody) {
+ ODRDiagError(FirstMethod->getLocation(),
+ FirstMethod->getSourceRange(), MethodDifferentBody)
+ << FirstMethodType << FirstName;
+ ODRDiagNote(SecondMethod->getLocation(),
+ SecondMethod->getSourceRange(), MethodDifferentBody)
+ << SecondMethodType << SecondName;
+ Diagnosed = true;
+ break;
+ }
+
break;
}
case TypeAlias:
@@ -10481,6 +10839,305 @@ void ASTReader::diagnoseOdrViolations() {
Diagnosed = true;
break;
}
+ case FunctionTemplate: {
+ FunctionTemplateDecl *FirstTemplate =
+ cast<FunctionTemplateDecl>(FirstDecl);
+ FunctionTemplateDecl *SecondTemplate =
+ cast<FunctionTemplateDecl>(SecondDecl);
+
+ TemplateParameterList *FirstTPL =
+ FirstTemplate->getTemplateParameters();
+ TemplateParameterList *SecondTPL =
+ SecondTemplate->getTemplateParameters();
+
+ if (FirstTPL->size() != SecondTPL->size()) {
+ ODRDiagError(FirstTemplate->getLocation(),
+ FirstTemplate->getSourceRange(),
+ FunctionTemplateDifferentNumberParameters)
+ << FirstTemplate << FirstTPL->size();
+ ODRDiagNote(SecondTemplate->getLocation(),
+ SecondTemplate->getSourceRange(),
+ FunctionTemplateDifferentNumberParameters)
+ << SecondTemplate << SecondTPL->size();
+
+ Diagnosed = true;
+ break;
+ }
+
+ bool ParameterMismatch = false;
+ for (unsigned i = 0, e = FirstTPL->size(); i != e; ++i) {
+ NamedDecl *FirstParam = FirstTPL->getParam(i);
+ NamedDecl *SecondParam = SecondTPL->getParam(i);
+
+ if (FirstParam->getKind() != SecondParam->getKind()) {
+ enum {
+ TemplateTypeParameter,
+ NonTypeTemplateParameter,
+ TemplateTemplateParameter,
+ };
+ auto GetParamType = [](NamedDecl *D) {
+ switch (D->getKind()) {
+ default:
+ llvm_unreachable("Unexpected template parameter type");
+ case Decl::TemplateTypeParm:
+ return TemplateTypeParameter;
+ case Decl::NonTypeTemplateParm:
+ return NonTypeTemplateParameter;
+ case Decl::TemplateTemplateParm:
+ return TemplateTemplateParameter;
+ }
+ };
+
+ ODRDiagError(FirstTemplate->getLocation(),
+ FirstTemplate->getSourceRange(),
+ FunctionTemplateParameterDifferentKind)
+ << FirstTemplate << (i + 1) << GetParamType(FirstParam);
+ ODRDiagNote(SecondTemplate->getLocation(),
+ SecondTemplate->getSourceRange(),
+ FunctionTemplateParameterDifferentKind)
+ << SecondTemplate << (i + 1) << GetParamType(SecondParam);
+
+ ParameterMismatch = true;
+ break;
+ }
+
+ if (FirstParam->getName() != SecondParam->getName()) {
+ ODRDiagError(FirstTemplate->getLocation(),
+ FirstTemplate->getSourceRange(),
+ FunctionTemplateParameterName)
+ << FirstTemplate << (i + 1) << (bool)FirstParam->getIdentifier()
+ << FirstParam;
+ ODRDiagNote(SecondTemplate->getLocation(),
+ SecondTemplate->getSourceRange(),
+ FunctionTemplateParameterName)
+ << SecondTemplate << (i + 1)
+ << (bool)SecondParam->getIdentifier() << SecondParam;
+ ParameterMismatch = true;
+ break;
+ }
+
+ if (isa<TemplateTypeParmDecl>(FirstParam) &&
+ isa<TemplateTypeParmDecl>(SecondParam)) {
+ TemplateTypeParmDecl *FirstTTPD =
+ cast<TemplateTypeParmDecl>(FirstParam);
+ TemplateTypeParmDecl *SecondTTPD =
+ cast<TemplateTypeParmDecl>(SecondParam);
+ bool HasFirstDefaultArgument =
+ FirstTTPD->hasDefaultArgument() &&
+ !FirstTTPD->defaultArgumentWasInherited();
+ bool HasSecondDefaultArgument =
+ SecondTTPD->hasDefaultArgument() &&
+ !SecondTTPD->defaultArgumentWasInherited();
+ if (HasFirstDefaultArgument != HasSecondDefaultArgument) {
+ ODRDiagError(FirstTemplate->getLocation(),
+ FirstTemplate->getSourceRange(),
+ FunctionTemplateParameterSingleDefaultArgument)
+ << FirstTemplate << (i + 1) << HasFirstDefaultArgument;
+ ODRDiagNote(SecondTemplate->getLocation(),
+ SecondTemplate->getSourceRange(),
+ FunctionTemplateParameterSingleDefaultArgument)
+ << SecondTemplate << (i + 1) << HasSecondDefaultArgument;
+ ParameterMismatch = true;
+ break;
+ }
+
+ if (HasFirstDefaultArgument && HasSecondDefaultArgument) {
+ QualType FirstType = FirstTTPD->getDefaultArgument();
+ QualType SecondType = SecondTTPD->getDefaultArgument();
+ if (ComputeQualTypeODRHash(FirstType) !=
+ ComputeQualTypeODRHash(SecondType)) {
+ ODRDiagError(FirstTemplate->getLocation(),
+ FirstTemplate->getSourceRange(),
+ FunctionTemplateParameterDifferentDefaultArgument)
+ << FirstTemplate << (i + 1) << FirstType;
+ ODRDiagNote(SecondTemplate->getLocation(),
+ SecondTemplate->getSourceRange(),
+ FunctionTemplateParameterDifferentDefaultArgument)
+ << SecondTemplate << (i + 1) << SecondType;
+ ParameterMismatch = true;
+ break;
+ }
+ }
+
+ if (FirstTTPD->isParameterPack() !=
+ SecondTTPD->isParameterPack()) {
+ ODRDiagError(FirstTemplate->getLocation(),
+ FirstTemplate->getSourceRange(),
+ FunctionTemplatePackParameter)
+ << FirstTemplate << (i + 1) << FirstTTPD->isParameterPack();
+ ODRDiagNote(SecondTemplate->getLocation(),
+ SecondTemplate->getSourceRange(),
+ FunctionTemplatePackParameter)
+ << SecondTemplate << (i + 1) << SecondTTPD->isParameterPack();
+ ParameterMismatch = true;
+ break;
+ }
+ }
+
+ if (isa<TemplateTemplateParmDecl>(FirstParam) &&
+ isa<TemplateTemplateParmDecl>(SecondParam)) {
+ TemplateTemplateParmDecl *FirstTTPD =
+ cast<TemplateTemplateParmDecl>(FirstParam);
+ TemplateTemplateParmDecl *SecondTTPD =
+ cast<TemplateTemplateParmDecl>(SecondParam);
+
+ TemplateParameterList *FirstTPL =
+ FirstTTPD->getTemplateParameters();
+ TemplateParameterList *SecondTPL =
+ SecondTTPD->getTemplateParameters();
+
+ if (ComputeTemplateParameterListODRHash(FirstTPL) !=
+ ComputeTemplateParameterListODRHash(SecondTPL)) {
+ ODRDiagError(FirstTemplate->getLocation(),
+ FirstTemplate->getSourceRange(),
+ FunctionTemplateParameterDifferentType)
+ << FirstTemplate << (i + 1);
+ ODRDiagNote(SecondTemplate->getLocation(),
+ SecondTemplate->getSourceRange(),
+ FunctionTemplateParameterDifferentType)
+ << SecondTemplate << (i + 1);
+ ParameterMismatch = true;
+ break;
+ }
+
+ bool HasFirstDefaultArgument =
+ FirstTTPD->hasDefaultArgument() &&
+ !FirstTTPD->defaultArgumentWasInherited();
+ bool HasSecondDefaultArgument =
+ SecondTTPD->hasDefaultArgument() &&
+ !SecondTTPD->defaultArgumentWasInherited();
+ if (HasFirstDefaultArgument != HasSecondDefaultArgument) {
+ ODRDiagError(FirstTemplate->getLocation(),
+ FirstTemplate->getSourceRange(),
+ FunctionTemplateParameterSingleDefaultArgument)
+ << FirstTemplate << (i + 1) << HasFirstDefaultArgument;
+ ODRDiagNote(SecondTemplate->getLocation(),
+ SecondTemplate->getSourceRange(),
+ FunctionTemplateParameterSingleDefaultArgument)
+ << SecondTemplate << (i + 1) << HasSecondDefaultArgument;
+ ParameterMismatch = true;
+ break;
+ }
+
+ if (HasFirstDefaultArgument && HasSecondDefaultArgument) {
+ TemplateArgument FirstTA =
+ FirstTTPD->getDefaultArgument().getArgument();
+ TemplateArgument SecondTA =
+ SecondTTPD->getDefaultArgument().getArgument();
+ if (ComputeTemplateArgumentODRHash(FirstTA) !=
+ ComputeTemplateArgumentODRHash(SecondTA)) {
+ ODRDiagError(FirstTemplate->getLocation(),
+ FirstTemplate->getSourceRange(),
+ FunctionTemplateParameterDifferentDefaultArgument)
+ << FirstTemplate << (i + 1) << FirstTA;
+ ODRDiagNote(SecondTemplate->getLocation(),
+ SecondTemplate->getSourceRange(),
+ FunctionTemplateParameterDifferentDefaultArgument)
+ << SecondTemplate << (i + 1) << SecondTA;
+ ParameterMismatch = true;
+ break;
+ }
+ }
+
+ if (FirstTTPD->isParameterPack() !=
+ SecondTTPD->isParameterPack()) {
+ ODRDiagError(FirstTemplate->getLocation(),
+ FirstTemplate->getSourceRange(),
+ FunctionTemplatePackParameter)
+ << FirstTemplate << (i + 1) << FirstTTPD->isParameterPack();
+ ODRDiagNote(SecondTemplate->getLocation(),
+ SecondTemplate->getSourceRange(),
+ FunctionTemplatePackParameter)
+ << SecondTemplate << (i + 1) << SecondTTPD->isParameterPack();
+ ParameterMismatch = true;
+ break;
+ }
+ }
+
+ if (isa<NonTypeTemplateParmDecl>(FirstParam) &&
+ isa<NonTypeTemplateParmDecl>(SecondParam)) {
+ NonTypeTemplateParmDecl *FirstNTTPD =
+ cast<NonTypeTemplateParmDecl>(FirstParam);
+ NonTypeTemplateParmDecl *SecondNTTPD =
+ cast<NonTypeTemplateParmDecl>(SecondParam);
+
+ QualType FirstType = FirstNTTPD->getType();
+ QualType SecondType = SecondNTTPD->getType();
+ if (ComputeQualTypeODRHash(FirstType) !=
+ ComputeQualTypeODRHash(SecondType)) {
+ ODRDiagError(FirstTemplate->getLocation(),
+ FirstTemplate->getSourceRange(),
+ FunctionTemplateParameterDifferentType)
+ << FirstTemplate << (i + 1);
+ ODRDiagNote(SecondTemplate->getLocation(),
+ SecondTemplate->getSourceRange(),
+ FunctionTemplateParameterDifferentType)
+ << SecondTemplate << (i + 1);
+ ParameterMismatch = true;
+ break;
+ }
+
+ bool HasFirstDefaultArgument =
+ FirstNTTPD->hasDefaultArgument() &&
+ !FirstNTTPD->defaultArgumentWasInherited();
+ bool HasSecondDefaultArgument =
+ SecondNTTPD->hasDefaultArgument() &&
+ !SecondNTTPD->defaultArgumentWasInherited();
+ if (HasFirstDefaultArgument != HasSecondDefaultArgument) {
+ ODRDiagError(FirstTemplate->getLocation(),
+ FirstTemplate->getSourceRange(),
+ FunctionTemplateParameterSingleDefaultArgument)
+ << FirstTemplate << (i + 1) << HasFirstDefaultArgument;
+ ODRDiagNote(SecondTemplate->getLocation(),
+ SecondTemplate->getSourceRange(),
+ FunctionTemplateParameterSingleDefaultArgument)
+ << SecondTemplate << (i + 1) << HasSecondDefaultArgument;
+ ParameterMismatch = true;
+ break;
+ }
+
+ if (HasFirstDefaultArgument && HasSecondDefaultArgument) {
+ Expr *FirstDefaultArgument = FirstNTTPD->getDefaultArgument();
+ Expr *SecondDefaultArgument = SecondNTTPD->getDefaultArgument();
+ if (ComputeODRHash(FirstDefaultArgument) !=
+ ComputeODRHash(SecondDefaultArgument)) {
+ ODRDiagError(FirstTemplate->getLocation(),
+ FirstTemplate->getSourceRange(),
+ FunctionTemplateParameterDifferentDefaultArgument)
+ << FirstTemplate << (i + 1) << FirstDefaultArgument;
+ ODRDiagNote(SecondTemplate->getLocation(),
+ SecondTemplate->getSourceRange(),
+ FunctionTemplateParameterDifferentDefaultArgument)
+ << SecondTemplate << (i + 1) << SecondDefaultArgument;
+ ParameterMismatch = true;
+ break;
+ }
+ }
+
+ if (FirstNTTPD->isParameterPack() !=
+ SecondNTTPD->isParameterPack()) {
+ ODRDiagError(FirstTemplate->getLocation(),
+ FirstTemplate->getSourceRange(),
+ FunctionTemplatePackParameter)
+ << FirstTemplate << (i + 1) << FirstNTTPD->isParameterPack();
+ ODRDiagNote(SecondTemplate->getLocation(),
+ SecondTemplate->getSourceRange(),
+ FunctionTemplatePackParameter)
+ << SecondTemplate << (i + 1)
+ << SecondNTTPD->isParameterPack();
+ ParameterMismatch = true;
+ break;
+ }
+ }
+ }
+
+ if (ParameterMismatch) {
+ Diagnosed = true;
+ break;
+ }
+
+ break;
+ }
}
if (Diagnosed)
@@ -10660,6 +11317,195 @@ void ASTReader::diagnoseOdrViolations() {
Diagnosed = true;
break;
}
+ (void)Diagnosed;
+ assert(Diagnosed && "Unable to emit ODR diagnostic.");
+ }
+
+ // Issue ODR failures diagnostics for enums.
+ for (auto &Merge : EnumOdrMergeFailures) {
+ enum ODREnumDifference {
+ SingleScopedEnum,
+ EnumTagKeywordMismatch,
+ SingleSpecifiedType,
+ DifferentSpecifiedTypes,
+ DifferentNumberEnumConstants,
+ EnumConstantName,
+ EnumConstantSingleInitilizer,
+ EnumConstantDifferentInitilizer,
+ };
+
+ // If we've already pointed out a specific problem with this enum, don't
+ // bother issuing a general "something's different" diagnostic.
+ if (!DiagnosedOdrMergeFailures.insert(Merge.first).second)
+ continue;
+
+ EnumDecl *FirstEnum = Merge.first;
+ std::string FirstModule = getOwningModuleNameForDiagnostic(FirstEnum);
+
+ using DeclHashes =
+ llvm::SmallVector<std::pair<EnumConstantDecl *, unsigned>, 4>;
+ auto PopulateHashes = [&ComputeSubDeclODRHash, FirstEnum](
+ DeclHashes &Hashes, EnumDecl *Enum) {
+ for (auto *D : Enum->decls()) {
+ // Due to decl merging, the first EnumDecl is the parent of
+ // Decls in both records.
+ if (!ODRHash::isWhitelistedDecl(D, FirstEnum))
+ continue;
+ assert(isa<EnumConstantDecl>(D) && "Unexpected Decl kind");
+ Hashes.emplace_back(cast<EnumConstantDecl>(D),
+ ComputeSubDeclODRHash(D));
+ }
+ };
+ DeclHashes FirstHashes;
+ PopulateHashes(FirstHashes, FirstEnum);
+ bool Diagnosed = false;
+ for (auto &SecondEnum : Merge.second) {
+
+ if (FirstEnum == SecondEnum)
+ continue;
+
+ std::string SecondModule =
+ getOwningModuleNameForDiagnostic(SecondEnum);
+
+ auto ODRDiagError = [FirstEnum, &FirstModule,
+ this](SourceLocation Loc, SourceRange Range,
+ ODREnumDifference DiffType) {
+ return Diag(Loc, diag::err_module_odr_violation_enum)
+ << FirstEnum << FirstModule.empty() << FirstModule << Range
+ << DiffType;
+ };
+ auto ODRDiagNote = [&SecondModule, this](SourceLocation Loc,
+ SourceRange Range,
+ ODREnumDifference DiffType) {
+ return Diag(Loc, diag::note_module_odr_violation_enum)
+ << SecondModule << Range << DiffType;
+ };
+
+ if (FirstEnum->isScoped() != SecondEnum->isScoped()) {
+ ODRDiagError(FirstEnum->getLocation(), FirstEnum->getSourceRange(),
+ SingleScopedEnum)
+ << FirstEnum->isScoped();
+ ODRDiagNote(SecondEnum->getLocation(), SecondEnum->getSourceRange(),
+ SingleScopedEnum)
+ << SecondEnum->isScoped();
+ Diagnosed = true;
+ continue;
+ }
+
+ if (FirstEnum->isScoped() && SecondEnum->isScoped()) {
+ if (FirstEnum->isScopedUsingClassTag() !=
+ SecondEnum->isScopedUsingClassTag()) {
+ ODRDiagError(FirstEnum->getLocation(), FirstEnum->getSourceRange(),
+ EnumTagKeywordMismatch)
+ << FirstEnum->isScopedUsingClassTag();
+ ODRDiagNote(SecondEnum->getLocation(), SecondEnum->getSourceRange(),
+ EnumTagKeywordMismatch)
+ << SecondEnum->isScopedUsingClassTag();
+ Diagnosed = true;
+ continue;
+ }
+ }
+
+ QualType FirstUnderlyingType =
+ FirstEnum->getIntegerTypeSourceInfo()
+ ? FirstEnum->getIntegerTypeSourceInfo()->getType()
+ : QualType();
+ QualType SecondUnderlyingType =
+ SecondEnum->getIntegerTypeSourceInfo()
+ ? SecondEnum->getIntegerTypeSourceInfo()->getType()
+ : QualType();
+ if (FirstUnderlyingType.isNull() != SecondUnderlyingType.isNull()) {
+ ODRDiagError(FirstEnum->getLocation(), FirstEnum->getSourceRange(),
+ SingleSpecifiedType)
+ << !FirstUnderlyingType.isNull();
+ ODRDiagNote(SecondEnum->getLocation(), SecondEnum->getSourceRange(),
+ SingleSpecifiedType)
+ << !SecondUnderlyingType.isNull();
+ Diagnosed = true;
+ continue;
+ }
+
+ if (!FirstUnderlyingType.isNull() && !SecondUnderlyingType.isNull()) {
+ if (ComputeQualTypeODRHash(FirstUnderlyingType) !=
+ ComputeQualTypeODRHash(SecondUnderlyingType)) {
+ ODRDiagError(FirstEnum->getLocation(), FirstEnum->getSourceRange(),
+ DifferentSpecifiedTypes)
+ << FirstUnderlyingType;
+ ODRDiagNote(SecondEnum->getLocation(), SecondEnum->getSourceRange(),
+ DifferentSpecifiedTypes)
+ << SecondUnderlyingType;
+ Diagnosed = true;
+ continue;
+ }
+ }
+
+ DeclHashes SecondHashes;
+ PopulateHashes(SecondHashes, SecondEnum);
+
+ if (FirstHashes.size() != SecondHashes.size()) {
+ ODRDiagError(FirstEnum->getLocation(), FirstEnum->getSourceRange(),
+ DifferentNumberEnumConstants)
+ << (int)FirstHashes.size();
+ ODRDiagNote(SecondEnum->getLocation(), SecondEnum->getSourceRange(),
+ DifferentNumberEnumConstants)
+ << (int)SecondHashes.size();
+ Diagnosed = true;
+ continue;
+ }
+
+ for (unsigned I = 0; I < FirstHashes.size(); ++I) {
+ if (FirstHashes[I].second == SecondHashes[I].second)
+ continue;
+ const EnumConstantDecl *FirstEnumConstant = FirstHashes[I].first;
+ const EnumConstantDecl *SecondEnumConstant = SecondHashes[I].first;
+
+ if (FirstEnumConstant->getDeclName() !=
+ SecondEnumConstant->getDeclName()) {
+
+ ODRDiagError(FirstEnumConstant->getLocation(),
+ FirstEnumConstant->getSourceRange(), EnumConstantName)
+ << I + 1 << FirstEnumConstant;
+ ODRDiagNote(SecondEnumConstant->getLocation(),
+ SecondEnumConstant->getSourceRange(), EnumConstantName)
+ << I + 1 << SecondEnumConstant;
+ Diagnosed = true;
+ break;
+ }
+
+ const Expr *FirstInit = FirstEnumConstant->getInitExpr();
+ const Expr *SecondInit = SecondEnumConstant->getInitExpr();
+ if (!FirstInit && !SecondInit)
+ continue;
+
+ if (!FirstInit || !SecondInit) {
+ ODRDiagError(FirstEnumConstant->getLocation(),
+ FirstEnumConstant->getSourceRange(),
+ EnumConstantSingleInitilizer)
+ << I + 1 << FirstEnumConstant << (FirstInit != nullptr);
+ ODRDiagNote(SecondEnumConstant->getLocation(),
+ SecondEnumConstant->getSourceRange(),
+ EnumConstantSingleInitilizer)
+ << I + 1 << SecondEnumConstant << (SecondInit != nullptr);
+ Diagnosed = true;
+ break;
+ }
+
+ if (ComputeODRHash(FirstInit) != ComputeODRHash(SecondInit)) {
+ ODRDiagError(FirstEnumConstant->getLocation(),
+ FirstEnumConstant->getSourceRange(),
+ EnumConstantDifferentInitilizer)
+ << I + 1 << FirstEnumConstant;
+ ODRDiagNote(SecondEnumConstant->getLocation(),
+ SecondEnumConstant->getSourceRange(),
+ EnumConstantDifferentInitilizer)
+ << I + 1 << SecondEnumConstant;
+ Diagnosed = true;
+ break;
+ }
+ }
+ }
+
+ (void)Diagnosed;
assert(Diagnosed && "Unable to emit ODR diagnostic.");
}
}
diff --git a/lib/Serialization/ASTReaderDecl.cpp b/lib/Serialization/ASTReaderDecl.cpp
index efbaf92a849a..7e2c4829b14c 100644
--- a/lib/Serialization/ASTReaderDecl.cpp
+++ b/lib/Serialization/ASTReaderDecl.cpp
@@ -1,4 +1,4 @@
-//===--- ASTReaderDecl.cpp - Decl Deserialization ---------------*- C++ -*-===//
+//===- ASTReaderDecl.cpp - Decl Deserialization ---------------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -15,42 +15,89 @@
#include "ASTCommon.h"
#include "ASTReaderInternals.h"
#include "clang/AST/ASTContext.h"
+#include "clang/AST/Attr.h"
+#include "clang/AST/AttrIterator.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclBase.h"
#include "clang/AST/DeclCXX.h"
-#include "clang/AST/DeclGroup.h"
+#include "clang/AST/DeclFriend.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclOpenMP.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/DeclVisitor.h"
+#include "clang/AST/DeclarationName.h"
#include "clang/AST/Expr.h"
+#include "clang/AST/ExternalASTSource.h"
+#include "clang/AST/LambdaCapture.h"
+#include "clang/AST/NestedNameSpecifier.h"
+#include "clang/AST/Redeclarable.h"
+#include "clang/AST/Stmt.h"
+#include "clang/AST/TemplateBase.h"
+#include "clang/AST/Type.h"
+#include "clang/AST/UnresolvedSet.h"
+#include "clang/Basic/AttrKinds.h"
+#include "clang/Basic/ExceptionSpecificationType.h"
+#include "clang/Basic/IdentifierTable.h"
+#include "clang/Basic/LLVM.h"
+#include "clang/Basic/Lambda.h"
+#include "clang/Basic/LangOptions.h"
+#include "clang/Basic/Linkage.h"
+#include "clang/Basic/Module.h"
+#include "clang/Basic/PragmaKinds.h"
+#include "clang/Basic/SourceLocation.h"
+#include "clang/Basic/Specifiers.h"
#include "clang/Sema/IdentifierResolver.h"
#include "clang/Sema/SemaDiagnostic.h"
+#include "clang/Serialization/ASTBitCodes.h"
#include "clang/Serialization/ASTReader.h"
+#include "clang/Serialization/ContinuousRangeMap.h"
+#include "clang/Serialization/Module.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/Bitcode/BitstreamReader.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/SaveAndRestore.h"
+#include <algorithm>
+#include <cassert>
+#include <cstdint>
+#include <cstring>
+#include <string>
+#include <utility>
using namespace clang;
-using namespace clang::serialization;
+using namespace serialization;
//===----------------------------------------------------------------------===//
// Declaration deserialization
//===----------------------------------------------------------------------===//
namespace clang {
+
class ASTDeclReader : public DeclVisitor<ASTDeclReader, void> {
ASTReader &Reader;
ASTRecordReader &Record;
ASTReader::RecordLocation Loc;
const DeclID ThisDeclID;
const SourceLocation ThisDeclLoc;
- typedef ASTReader::RecordData RecordData;
- TypeID TypeIDForTypeDecl;
+
+ using RecordData = ASTReader::RecordData;
+
+ TypeID DeferredTypeID = 0;
unsigned AnonymousDeclNumber;
- GlobalDeclID NamedDeclForTagDecl;
- IdentifierInfo *TypedefNameForLinkage;
+ GlobalDeclID NamedDeclForTagDecl = 0;
+ IdentifierInfo *TypedefNameForLinkage = nullptr;
- bool HasPendingBody;
+ bool HasPendingBody = false;
- ///\brief A flag to carry the information for a decl from the entity is
+ ///A flag to carry the information for a decl from the entity is
/// used. We use it to delay the marking of the canonical decl as used until
/// the entire declaration is deserialized and merged.
- bool IsDeclMarkedUsed;
+ bool IsDeclMarkedUsed = false;
uint64_t GetCurrentCursorOffset();
@@ -130,6 +177,8 @@ namespace clang {
void MergeDefinitionData(ObjCProtocolDecl *D,
struct ObjCProtocolDecl::DefinitionData &&NewDD);
+ static DeclContext *getPrimaryDCForAnonymousDecl(DeclContext *LexicalDC);
+
static NamedDecl *getAnonymousDeclForMerging(ASTReader &Reader,
DeclContext *DC,
unsigned Index);
@@ -144,39 +193,34 @@ namespace clang {
public:
RedeclarableResult(Decl *MergeWith, GlobalDeclID FirstID, bool IsKeyDecl)
- : MergeWith(MergeWith), FirstID(FirstID), IsKeyDecl(IsKeyDecl) {}
+ : MergeWith(MergeWith), FirstID(FirstID), IsKeyDecl(IsKeyDecl) {}
- /// \brief Retrieve the first ID.
+ /// Retrieve the first ID.
GlobalDeclID getFirstID() const { return FirstID; }
- /// \brief Is this declaration a key declaration?
+ /// Is this declaration a key declaration?
bool isKeyDecl() const { return IsKeyDecl; }
- /// \brief Get a known declaration that this should be merged with, if
+ /// Get a known declaration that this should be merged with, if
/// any.
Decl *getKnownMergeTarget() const { return MergeWith; }
};
- /// \brief Class used to capture the result of searching for an existing
+ /// Class used to capture the result of searching for an existing
/// declaration of a specific kind and name, along with the ability
/// to update the place where this result was found (the declaration
/// chain hanging off an identifier or the DeclContext we searched in)
/// if requested.
class FindExistingResult {
ASTReader &Reader;
- NamedDecl *New;
- NamedDecl *Existing;
- bool AddResult;
-
- unsigned AnonymousDeclNumber;
- IdentifierInfo *TypedefNameForLinkage;
-
- void operator=(FindExistingResult &&) = delete;
+ NamedDecl *New = nullptr;
+ NamedDecl *Existing = nullptr;
+ bool AddResult = false;
+ unsigned AnonymousDeclNumber = 0;
+ IdentifierInfo *TypedefNameForLinkage = nullptr;
public:
- FindExistingResult(ASTReader &Reader)
- : Reader(Reader), New(nullptr), Existing(nullptr), AddResult(false),
- AnonymousDeclNumber(0), TypedefNameForLinkage(nullptr) {}
+ FindExistingResult(ASTReader &Reader) : Reader(Reader) {}
FindExistingResult(ASTReader &Reader, NamedDecl *New, NamedDecl *Existing,
unsigned AnonymousDeclNumber,
@@ -193,9 +237,10 @@ namespace clang {
Other.AddResult = false;
}
+ FindExistingResult &operator=(FindExistingResult &&) = delete;
~FindExistingResult();
- /// \brief Suppress the addition of this result into the known set of
+ /// Suppress the addition of this result into the known set of
/// names.
void suppress() { AddResult = false; }
@@ -213,11 +258,8 @@ namespace clang {
ASTDeclReader(ASTReader &Reader, ASTRecordReader &Record,
ASTReader::RecordLocation Loc,
DeclID thisDeclID, SourceLocation ThisDeclLoc)
- : Reader(Reader), Record(Record), Loc(Loc),
- ThisDeclID(thisDeclID), ThisDeclLoc(ThisDeclLoc),
- TypeIDForTypeDecl(0), NamedDeclForTagDecl(0),
- TypedefNameForLinkage(nullptr), HasPendingBody(false),
- IsDeclMarkedUsed(false) {}
+ : Reader(Reader), Record(Record), Loc(Loc), ThisDeclID(thisDeclID),
+ ThisDeclLoc(ThisDeclLoc) {}
template <typename T> static
void AddLazySpecializations(T *D,
@@ -232,7 +274,7 @@ namespace clang {
if (auto &Old = LazySpecializations) {
IDs.insert(IDs.end(), Old + 1, Old + 1 + Old[0]);
- std::sort(IDs.begin(), IDs.end());
+ llvm::sort(IDs.begin(), IDs.end());
IDs.erase(std::unique(IDs.begin(), IDs.end()), IDs.end());
}
@@ -265,13 +307,13 @@ namespace clang {
static void markIncompleteDeclChainImpl(Redeclarable<DeclT> *D);
static void markIncompleteDeclChainImpl(...);
- /// \brief Determine whether this declaration has a pending body.
+ /// Determine whether this declaration has a pending body.
bool hasPendingBody() const { return HasPendingBody; }
void ReadFunctionDefinition(FunctionDecl *FD);
void Visit(Decl *D);
- void UpdateDecl(Decl *D, llvm::SmallVectorImpl<serialization::DeclID>&);
+ void UpdateDecl(Decl *D, SmallVectorImpl<serialization::DeclID> &);
static void setNextObjCCategory(ObjCCategoryDecl *Cat,
ObjCCategoryDecl *Next) {
@@ -300,19 +342,23 @@ namespace clang {
void VisitCXXRecordDecl(CXXRecordDecl *D) { VisitCXXRecordDeclImpl(D); }
RedeclarableResult VisitClassTemplateSpecializationDeclImpl(
ClassTemplateSpecializationDecl *D);
+
void VisitClassTemplateSpecializationDecl(
ClassTemplateSpecializationDecl *D) {
VisitClassTemplateSpecializationDeclImpl(D);
}
+
void VisitClassTemplatePartialSpecializationDecl(
ClassTemplatePartialSpecializationDecl *D);
void VisitClassScopeFunctionSpecializationDecl(
ClassScopeFunctionSpecializationDecl *D);
RedeclarableResult
VisitVarTemplateSpecializationDeclImpl(VarTemplateSpecializationDecl *D);
+
void VisitVarTemplateSpecializationDecl(VarTemplateSpecializationDecl *D) {
VisitVarTemplateSpecializationDeclImpl(D);
}
+
void VisitVarTemplatePartialSpecializationDecl(
VarTemplatePartialSpecializationDecl *D);
void VisitTemplateTypeParmDecl(TemplateTypeParmDecl *D);
@@ -402,18 +448,22 @@ namespace clang {
void VisitOMPDeclareReductionDecl(OMPDeclareReductionDecl *D);
void VisitOMPCapturedExprDecl(OMPCapturedExprDecl *D);
};
-} // end namespace clang
+
+} // namespace clang
namespace {
+
/// Iterator over the redeclarations of a declaration that have already
/// been merged into the same redeclaration chain.
template<typename DeclT>
class MergedRedeclIterator {
- DeclT *Start, *Canonical, *Current;
+ DeclT *Start;
+ DeclT *Canonical = nullptr;
+ DeclT *Current = nullptr;
+
public:
- MergedRedeclIterator() : Current(nullptr) {}
- MergedRedeclIterator(DeclT *Start)
- : Start(Start), Canonical(nullptr), Current(Start) {}
+ MergedRedeclIterator() = default;
+ MergedRedeclIterator(DeclT *Start) : Start(Start), Current(Start) {}
DeclT *operator*() { return Current; }
@@ -438,7 +488,8 @@ public:
return A.Current != B.Current;
}
};
-} // end anonymous namespace
+
+} // namespace
template <typename DeclT>
static llvm::iterator_range<MergedRedeclIterator<DeclT>>
@@ -472,30 +523,27 @@ void ASTDeclReader::Visit(Decl *D) {
D->getCanonicalDecl()->Used |= IsDeclMarkedUsed;
IsDeclMarkedUsed = false;
- if (DeclaratorDecl *DD = dyn_cast<DeclaratorDecl>(D)) {
- if (DD->DeclInfo) {
- DeclaratorDecl::ExtInfo *Info =
- DD->DeclInfo.get<DeclaratorDecl::ExtInfo *>();
- Info->TInfo = GetTypeSourceInfo();
- }
- else {
- DD->DeclInfo = GetTypeSourceInfo();
- }
+ if (auto *DD = dyn_cast<DeclaratorDecl>(D)) {
+ if (auto *TInfo = DD->getTypeSourceInfo())
+ Record.readTypeLoc(TInfo->getTypeLoc());
}
- if (TypeDecl *TD = dyn_cast<TypeDecl>(D)) {
+ if (auto *TD = dyn_cast<TypeDecl>(D)) {
// We have a fully initialized TypeDecl. Read its type now.
- TD->setTypeForDecl(Reader.GetType(TypeIDForTypeDecl).getTypePtrOrNull());
+ TD->setTypeForDecl(Reader.GetType(DeferredTypeID).getTypePtrOrNull());
// If this is a tag declaration with a typedef name for linkage, it's safe
// to load that typedef now.
if (NamedDeclForTagDecl)
cast<TagDecl>(D)->TypedefNameDeclOrQualifier =
cast<TypedefNameDecl>(Reader.GetDecl(NamedDeclForTagDecl));
- } else if (ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(D)) {
+ } else if (auto *ID = dyn_cast<ObjCInterfaceDecl>(D)) {
// if we have a fully initialized TypeDecl, we can safely read its type now.
- ID->TypeForDecl = Reader.GetType(TypeIDForTypeDecl).getTypePtrOrNull();
- } else if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ ID->TypeForDecl = Reader.GetType(DeferredTypeID).getTypePtrOrNull();
+ } else if (auto *FD = dyn_cast<FunctionDecl>(D)) {
+ if (DeferredTypeID)
+ FD->setType(Reader.GetType(DeferredTypeID));
+
// FunctionDecl's body was written last after all other Stmts/Exprs.
// We only read it if FD doesn't already have a body (e.g., from another
// module).
@@ -523,8 +571,8 @@ void ASTDeclReader::VisitDecl(Decl *D) {
LexicalDCIDForTemplateParmDecl);
D->setDeclContext(Reader.getContext().getTranslationUnitDecl());
} else {
- DeclContext *SemaDC = ReadDeclAs<DeclContext>();
- DeclContext *LexicalDC = ReadDeclAs<DeclContext>();
+ auto *SemaDC = ReadDeclAs<DeclContext>();
+ auto *LexicalDC = ReadDeclAs<DeclContext>();
if (!LexicalDC)
LexicalDC = SemaDC;
DeclContext *MergedSemaDC = Reader.MergedDeclContexts.lookup(SemaDC);
@@ -615,7 +663,7 @@ void ASTDeclReader::VisitTypeDecl(TypeDecl *TD) {
VisitNamedDecl(TD);
TD->setLocStart(ReadSourceLocation());
// Delay type reading until after we have fully initialized the decl.
- TypeIDForTypeDecl = Record.getGlobalTypeID(Record.readInt());
+ DeferredTypeID = Record.getGlobalTypeID(Record.readInt());
}
ASTDeclReader::RedeclarableResult
@@ -667,7 +715,7 @@ ASTDeclReader::RedeclarableResult ASTDeclReader::VisitTagDecl(TagDecl *TD) {
case 0:
break;
case 1: { // ExtInfo
- TagDecl::ExtInfo *Info = new (Reader.getContext()) TagDecl::ExtInfo();
+ auto *Info = new (Reader.getContext()) TagDecl::ExtInfo();
ReadQualifierInfo(*Info);
TD->TypedefNameDeclOrQualifier = Info;
break;
@@ -698,6 +746,9 @@ void ASTDeclReader::VisitEnumDecl(EnumDecl *ED) {
ED->IsScopedUsingClassTag = Record.readInt();
ED->IsFixed = Record.readInt();
+ ED->HasODRHash = true;
+ ED->ODRHash = Record.readInt();
+
// If this is a definition subject to the ODR, and we already have a
// definition, merge this one into it.
if (ED->IsCompleteDefinition &&
@@ -718,14 +769,15 @@ void ASTDeclReader::VisitEnumDecl(EnumDecl *ED) {
Reader.MergedDeclContexts.insert(std::make_pair(ED, OldDef));
ED->IsCompleteDefinition = false;
Reader.mergeDefinitionVisibility(OldDef, ED);
+ if (OldDef->getODRHash() != ED->getODRHash())
+ Reader.PendingEnumOdrMergeFailures[OldDef].push_back(ED);
} else {
OldDef = ED;
}
}
- if (EnumDecl *InstED = ReadDeclAs<EnumDecl>()) {
- TemplateSpecializationKind TSK =
- (TemplateSpecializationKind)Record.readInt();
+ if (auto *InstED = ReadDeclAs<EnumDecl>()) {
+ auto TSK = (TemplateSpecializationKind)Record.readInt();
SourceLocation POI = ReadSourceLocation();
ED->setInstantiationOfMemberEnum(Reader.getContext(), InstED, TSK);
ED->getMemberSpecializationInfo()->setPointOfInstantiation(POI);
@@ -739,12 +791,23 @@ ASTDeclReader::VisitRecordDeclImpl(RecordDecl *RD) {
RD->setAnonymousStructOrUnion(Record.readInt());
RD->setHasObjectMember(Record.readInt());
RD->setHasVolatileMember(Record.readInt());
+ RD->setNonTrivialToPrimitiveDefaultInitialize(Record.readInt());
+ RD->setNonTrivialToPrimitiveCopy(Record.readInt());
+ RD->setNonTrivialToPrimitiveDestroy(Record.readInt());
+ RD->setParamDestroyedInCallee(Record.readInt());
+ RD->setArgPassingRestrictions((RecordDecl::ArgPassingKind)Record.readInt());
return Redecl;
}
void ASTDeclReader::VisitValueDecl(ValueDecl *VD) {
VisitNamedDecl(VD);
- VD->setType(Record.readType());
+ // For function declarations, defer reading the type in case the function has
+ // a deduced return type that references an entity declared within the
+ // function.
+ if (isa<FunctionDecl>(VD))
+ DeferredTypeID = Record.getGlobalTypeID(Record.readInt());
+ else
+ VD->setType(Record.readType());
}
void ASTDeclReader::VisitEnumConstantDecl(EnumConstantDecl *ECD) {
@@ -759,17 +822,33 @@ void ASTDeclReader::VisitDeclaratorDecl(DeclaratorDecl *DD) {
VisitValueDecl(DD);
DD->setInnerLocStart(ReadSourceLocation());
if (Record.readInt()) { // hasExtInfo
- DeclaratorDecl::ExtInfo *Info
- = new (Reader.getContext()) DeclaratorDecl::ExtInfo();
+ auto *Info = new (Reader.getContext()) DeclaratorDecl::ExtInfo();
ReadQualifierInfo(*Info);
DD->DeclInfo = Info;
}
+ QualType TSIType = Record.readType();
+ DD->setTypeSourceInfo(
+ TSIType.isNull() ? nullptr
+ : Reader.getContext().CreateTypeSourceInfo(TSIType));
}
void ASTDeclReader::VisitFunctionDecl(FunctionDecl *FD) {
RedeclarableResult Redecl = VisitRedeclarable(FD);
VisitDeclaratorDecl(FD);
+ // Attach a type to this function. Use the real type if possible, but fall
+ // back to the type as written if it involves a deduced return type.
+ if (FD->getTypeSourceInfo() &&
+ FD->getTypeSourceInfo()->getType()->castAs<FunctionType>()
+ ->getReturnType()->getContainedAutoType()) {
+ // We'll set up the real type in Visit, once we've finished loading the
+ // function.
+ FD->setType(FD->getTypeSourceInfo()->getType());
+ } else {
+ FD->setType(Reader.GetType(DeferredTypeID));
+ DeferredTypeID = 0;
+ }
+
ReadDeclarationNameLoc(FD->DNLoc, FD->getDeclName());
FD->IdentifierNamespace = Record.readInt();
@@ -786,12 +865,14 @@ void ASTDeclReader::VisitFunctionDecl(FunctionDecl *FD) {
FD->HasWrittenPrototype = Record.readInt();
FD->IsDeleted = Record.readInt();
FD->IsTrivial = Record.readInt();
+ FD->IsTrivialForCall = Record.readInt();
FD->IsDefaulted = Record.readInt();
FD->IsExplicitlyDefaulted = Record.readInt();
FD->HasImplicitReturnZero = Record.readInt();
FD->IsConstexpr = Record.readInt();
FD->UsesSEHTry = Record.readInt();
FD->HasSkippedBody = Record.readInt();
+ FD->IsMultiVersion = Record.readInt();
FD->IsLateTemplateParsed = Record.readInt();
FD->setCachedLinkage(Linkage(Record.readInt()));
FD->EndRangeLoc = ReadSourceLocation();
@@ -808,9 +889,8 @@ void ASTDeclReader::VisitFunctionDecl(FunctionDecl *FD) {
FD->setDescribedFunctionTemplate(ReadDeclAs<FunctionTemplateDecl>());
break;
case FunctionDecl::TK_MemberSpecialization: {
- FunctionDecl *InstFD = ReadDeclAs<FunctionDecl>();
- TemplateSpecializationKind TSK =
- (TemplateSpecializationKind)Record.readInt();
+ auto *InstFD = ReadDeclAs<FunctionDecl>();
+ auto TSK = (TemplateSpecializationKind)Record.readInt();
SourceLocation POI = ReadSourceLocation();
FD->setInstantiationOfMemberFunction(Reader.getContext(), InstFD, TSK);
FD->getMemberSpecializationInfo()->setPointOfInstantiation(POI);
@@ -818,9 +898,8 @@ void ASTDeclReader::VisitFunctionDecl(FunctionDecl *FD) {
break;
}
case FunctionDecl::TK_FunctionTemplateSpecialization: {
- FunctionTemplateDecl *Template = ReadDeclAs<FunctionTemplateDecl>();
- TemplateSpecializationKind TSK =
- (TemplateSpecializationKind)Record.readInt();
+ auto *Template = ReadDeclAs<FunctionTemplateDecl>();
+ auto TSK = (TemplateSpecializationKind)Record.readInt();
// Template arguments.
SmallVector<TemplateArgument, 8> TemplArgs;
@@ -833,7 +912,7 @@ void ASTDeclReader::VisitFunctionDecl(FunctionDecl *FD) {
if (HasTemplateArgumentsAsWritten) {
unsigned NumTemplateArgLocs = Record.readInt();
TemplArgLocs.reserve(NumTemplateArgLocs);
- for (unsigned i=0; i != NumTemplateArgLocs; ++i)
+ for (unsigned i = 0; i != NumTemplateArgLocs; ++i)
TemplArgLocs.push_back(Record.readTemplateArgumentLoc());
LAngleLoc = ReadSourceLocation();
@@ -846,7 +925,7 @@ void ASTDeclReader::VisitFunctionDecl(FunctionDecl *FD) {
TemplateArgumentList *TemplArgList
= TemplateArgumentList::CreateCopy(C, TemplArgs);
TemplateArgumentListInfo TemplArgsInfo(LAngleLoc, RAngleLoc);
- for (unsigned i=0, e = TemplArgLocs.size(); i != e; ++i)
+ for (unsigned i = 0, e = TemplArgLocs.size(); i != e; ++i)
TemplArgsInfo.addArgument(TemplArgLocs[i]);
FunctionTemplateSpecializationInfo *FTInfo
= FunctionTemplateSpecializationInfo::Create(C, FD, Template, TSK,
@@ -859,7 +938,7 @@ void ASTDeclReader::VisitFunctionDecl(FunctionDecl *FD) {
if (FD->isCanonicalDecl()) { // if canonical add to template's set.
// The template that contains the specializations set. It's not safe to
// use getCanonicalDecl on Template since it may still be initializing.
- FunctionTemplateDecl *CanonTemplate = ReadDeclAs<FunctionTemplateDecl>();
+ auto *CanonTemplate = ReadDeclAs<FunctionTemplateDecl>();
// Get the InsertPos by FindNodeOrInsertPos() instead of calling
// InsertNode(FTInfo) directly to avoid the getASTContext() call in
// FunctionTemplateSpecializationInfo's Profile().
@@ -981,7 +1060,7 @@ ObjCTypeParamList *ASTDeclReader::ReadObjCTypeParamList() {
SmallVector<ObjCTypeParamDecl *, 4> typeParams;
typeParams.reserve(numParams);
for (unsigned i = 0; i != numParams; ++i) {
- auto typeParam = ReadDeclAs<ObjCTypeParamDecl>();
+ auto *typeParam = ReadDeclAs<ObjCTypeParamDecl>();
if (!typeParam)
return nullptr;
@@ -1034,7 +1113,7 @@ void ASTDeclReader::MergeDefinitionData(ObjCInterfaceDecl *D,
void ASTDeclReader::VisitObjCInterfaceDecl(ObjCInterfaceDecl *ID) {
RedeclarableResult Redecl = VisitRedeclarable(ID);
VisitObjCContainerDecl(ID);
- TypeIDForTypeDecl = Record.getGlobalTypeID(Record.readInt());
+ DeferredTypeID = Record.getGlobalTypeID(Record.readInt());
mergeRedeclarable(ID, Redecl);
ID->TypeParamList = ReadObjCTypeParamList();
@@ -1079,7 +1158,6 @@ void ASTDeclReader::VisitObjCIvarDecl(ObjCIvarDecl *IVD) {
void ASTDeclReader::ReadObjCDefinitionData(
struct ObjCProtocolDecl::DefinitionData &Data) {
-
unsigned NumProtoRefs = Record.readInt();
SmallVector<ObjCProtocolDecl *, 16> ProtoRefs;
ProtoRefs.reserve(NumProtoRefs);
@@ -1155,6 +1233,12 @@ void ASTDeclReader::VisitObjCCategoryDecl(ObjCCategoryDecl *CD) {
ProtoLocs.push_back(ReadSourceLocation());
CD->setProtocolList(ProtoRefs.data(), NumProtoRefs, ProtoLocs.data(),
Reader.getContext());
+
+ // Protocols in the class extension belong to the class.
+ if (NumProtoRefs > 0 && CD->ClassInterface && CD->IsClassExtension())
+ CD->ClassInterface->mergeClassExtensionProtocolList(
+ (ObjCProtocolDecl *const *)ProtoRefs.data(), NumProtoRefs,
+ Reader.getContext());
}
void ASTDeclReader::VisitObjCCompatibleAliasDecl(ObjCCompatibleAliasDecl *CAD) {
@@ -1234,7 +1318,7 @@ void ASTDeclReader::VisitFieldDecl(FieldDecl *FD) {
FD->setBitWidth(BW);
if (!FD->getDeclName()) {
- if (FieldDecl *Tmpl = ReadDeclAs<FieldDecl>())
+ if (auto *Tmpl = ReadDeclAs<FieldDecl>())
Reader.getContext().setInstantiatedFromUnnamedFieldDecl(FD, Tmpl);
}
mergeMergeable(FD);
@@ -1272,6 +1356,7 @@ ASTDeclReader::RedeclarableResult ASTDeclReader::VisitVarDeclImpl(VarDecl *VD) {
VD->NonParmVarDeclBits.ExceptionVar = Record.readInt();
VD->NonParmVarDeclBits.NRVOVariable = Record.readInt();
VD->NonParmVarDeclBits.CXXForRangeDecl = Record.readInt();
+ VD->NonParmVarDeclBits.ObjCForDecl = Record.readInt();
VD->NonParmVarDeclBits.ARCPseudoStrong = Record.readInt();
VD->NonParmVarDeclBits.IsInline = Record.readInt();
VD->NonParmVarDeclBits.IsInlineSpecified = Record.readInt();
@@ -1280,7 +1365,7 @@ ASTDeclReader::RedeclarableResult ASTDeclReader::VisitVarDeclImpl(VarDecl *VD) {
VD->NonParmVarDeclBits.PreviousDeclInSameBlockScope = Record.readInt();
VD->NonParmVarDeclBits.ImplicitParamKind = Record.readInt();
}
- Linkage VarLinkage = Linkage(Record.readInt());
+ auto VarLinkage = Linkage(Record.readInt());
VD->setCachedLinkage(VarLinkage);
// Reconstruct the one piece of the IdentifierNamespace that we need.
@@ -1316,9 +1401,8 @@ ASTDeclReader::RedeclarableResult ASTDeclReader::VisitVarDeclImpl(VarDecl *VD) {
VD->setDescribedVarTemplate(ReadDeclAs<VarTemplateDecl>());
break;
case StaticDataMemberSpecialization: { // HasMemberSpecializationInfo.
- VarDecl *Tmpl = ReadDeclAs<VarDecl>();
- TemplateSpecializationKind TSK =
- (TemplateSpecializationKind)Record.readInt();
+ auto *Tmpl = ReadDeclAs<VarDecl>();
+ auto TSK = (TemplateSpecializationKind)Record.readInt();
SourceLocation POI = ReadSourceLocation();
Reader.getContext().setInstantiatedFromStaticDataMember(VD, Tmpl, TSK,POI);
mergeRedeclarable(VD, Redecl);
@@ -1357,7 +1441,7 @@ void ASTDeclReader::VisitParmVarDecl(ParmVarDecl *PD) {
void ASTDeclReader::VisitDecompositionDecl(DecompositionDecl *DD) {
VisitVarDecl(DD);
- BindingDecl **BDs = DD->getTrailingObjects<BindingDecl*>();
+ auto **BDs = DD->getTrailingObjects<BindingDecl *>();
for (unsigned I = 0; I != DD->NumBindings; ++I)
BDs[I] = ReadDeclAs<BindingDecl>();
}
@@ -1393,7 +1477,7 @@ void ASTDeclReader::VisitBlockDecl(BlockDecl *BD) {
SmallVector<BlockDecl::Capture, 16> captures;
captures.reserve(numCaptures);
for (unsigned i = 0; i != numCaptures; ++i) {
- VarDecl *decl = ReadDeclAs<VarDecl>();
+ auto *decl = ReadDeclAs<VarDecl>();
unsigned flags = Record.readInt();
bool byRef = (flags & 1);
bool nested = (flags & 2);
@@ -1460,7 +1544,7 @@ void ASTDeclReader::VisitNamespaceDecl(NamespaceDecl *D) {
// Each module has its own anonymous namespace, which is disjoint from
// any other module's anonymous namespaces, so don't attach the anonymous
// namespace at all.
- NamespaceDecl *Anon = cast<NamespaceDecl>(Reader.GetDecl(AnonNamespace));
+ auto *Anon = cast<NamespaceDecl>(Reader.GetDecl(AnonNamespace));
if (!Record.isModule())
D->setAnonymousNamespace(Anon);
}
@@ -1483,7 +1567,7 @@ void ASTDeclReader::VisitUsingDecl(UsingDecl *D) {
ReadDeclarationNameLoc(D->DNLoc, D->getDeclName());
D->FirstUsingShadow.setPointer(ReadDeclAs<UsingShadowDecl>());
D->setTypename(Record.readInt());
- if (NamedDecl *Pattern = ReadDeclAs<NamedDecl>())
+ if (auto *Pattern = ReadDeclAs<NamedDecl>())
Reader.getContext().setInstantiatedFromUsingDecl(D, Pattern);
mergeMergeable(D);
}
@@ -1491,7 +1575,7 @@ void ASTDeclReader::VisitUsingDecl(UsingDecl *D) {
void ASTDeclReader::VisitUsingPackDecl(UsingPackDecl *D) {
VisitNamedDecl(D);
D->InstantiatedFrom = ReadDeclAs<NamedDecl>();
- NamedDecl **Expansions = D->getTrailingObjects<NamedDecl*>();
+ auto **Expansions = D->getTrailingObjects<NamedDecl *>();
for (unsigned I = 0; I != D->NumExpansions; ++I)
Expansions[I] = ReadDeclAs<NamedDecl>();
mergeMergeable(D);
@@ -1500,9 +1584,10 @@ void ASTDeclReader::VisitUsingPackDecl(UsingPackDecl *D) {
void ASTDeclReader::VisitUsingShadowDecl(UsingShadowDecl *D) {
RedeclarableResult Redecl = VisitRedeclarable(D);
VisitNamedDecl(D);
- D->setTargetDecl(ReadDeclAs<NamedDecl>());
+ D->Underlying = ReadDeclAs<NamedDecl>();
+ D->IdentifierNamespace = Record.readInt();
D->UsingOrNextShadow = ReadDeclAs<NamedDecl>();
- UsingShadowDecl *Pattern = ReadDeclAs<UsingShadowDecl>();
+ auto *Pattern = ReadDeclAs<UsingShadowDecl>();
if (Pattern)
Reader.getContext().setInstantiatedFromUsingShadowDecl(D, Pattern);
mergeRedeclarable(D, Redecl);
@@ -1554,7 +1639,9 @@ void ASTDeclReader::ReadCXXDefinitionData(
Data.Polymorphic = Record.readInt();
Data.Abstract = Record.readInt();
Data.IsStandardLayout = Record.readInt();
- Data.HasNoNonEmptyBases = Record.readInt();
+ Data.IsCXX11StandardLayout = Record.readInt();
+ Data.HasBasesWithFields = Record.readInt();
+ Data.HasBasesWithNonStaticDataMembers = Record.readInt();
Data.HasPrivateFields = Record.readInt();
Data.HasProtectedFields = Record.readInt();
Data.HasPublicFields = Record.readInt();
@@ -1575,11 +1662,12 @@ void ASTDeclReader::ReadCXXDefinitionData(
Data.DefaultedMoveAssignmentIsDeleted = Record.readInt();
Data.DefaultedDestructorIsDeleted = Record.readInt();
Data.HasTrivialSpecialMembers = Record.readInt();
+ Data.HasTrivialSpecialMembersForCall = Record.readInt();
Data.DeclaredNonTrivialSpecialMembers = Record.readInt();
+ Data.DeclaredNonTrivialSpecialMembersForCall = Record.readInt();
Data.HasIrrelevantDestructor = Record.readInt();
Data.HasConstexprNonCopyMoveConstructor = Record.readInt();
Data.HasDefaultedDefaultConstructor = Record.readInt();
- Data.CanPassInRegisters = Record.readInt();
Data.DefaultedDefaultConstructorIsConstexpr = Record.readInt();
Data.HasConstexprDefaultConstructor = Record.readInt();
Data.HasNonLiteralTypeFieldsOrBases = Record.readInt();
@@ -1610,9 +1698,9 @@ void ASTDeclReader::ReadCXXDefinitionData(
Data.FirstFriend = ReadDeclID();
if (Data.IsLambda) {
- typedef LambdaCapture Capture;
- CXXRecordDecl::LambdaDefinitionData &Lambda
- = static_cast<CXXRecordDecl::LambdaDefinitionData &>(Data);
+ using Capture = LambdaCapture;
+
+ auto &Lambda = static_cast<CXXRecordDecl::LambdaDefinitionData &>(Data);
Lambda.Dependent = Record.readInt();
Lambda.IsGenericLambda = Record.readInt();
Lambda.CaptureDefault = Record.readInt();
@@ -1627,7 +1715,7 @@ void ASTDeclReader::ReadCXXDefinitionData(
for (unsigned I = 0, N = Lambda.NumCaptures; I != N; ++I) {
SourceLocation Loc = ReadSourceLocation();
bool IsImplicit = Record.readInt();
- LambdaCaptureKind Kind = static_cast<LambdaCaptureKind>(Record.readInt());
+ auto Kind = static_cast<LambdaCaptureKind>(Record.readInt());
switch (Kind) {
case LCK_StarThis:
case LCK_This:
@@ -1636,7 +1724,7 @@ void ASTDeclReader::ReadCXXDefinitionData(
break;
case LCK_ByCopy:
case LCK_ByRef:
- VarDecl *Var = ReadDeclAs<VarDecl>();
+ auto *Var = ReadDeclAs<VarDecl>();
SourceLocation EllipsisLoc = ReadSourceLocation();
*ToCapture++ = Capture(Loc, IsImplicit, Kind, Var, EllipsisLoc);
break;
@@ -1692,7 +1780,9 @@ void ASTDeclReader::MergeDefinitionData(
MATCH_FIELD(Polymorphic)
MATCH_FIELD(Abstract)
MATCH_FIELD(IsStandardLayout)
- MATCH_FIELD(HasNoNonEmptyBases)
+ MATCH_FIELD(IsCXX11StandardLayout)
+ MATCH_FIELD(HasBasesWithFields)
+ MATCH_FIELD(HasBasesWithNonStaticDataMembers)
MATCH_FIELD(HasPrivateFields)
MATCH_FIELD(HasProtectedFields)
MATCH_FIELD(HasPublicFields)
@@ -1713,11 +1803,12 @@ void ASTDeclReader::MergeDefinitionData(
MATCH_FIELD(DefaultedMoveAssignmentIsDeleted)
MATCH_FIELD(DefaultedDestructorIsDeleted)
OR_FIELD(HasTrivialSpecialMembers)
+ OR_FIELD(HasTrivialSpecialMembersForCall)
OR_FIELD(DeclaredNonTrivialSpecialMembers)
+ OR_FIELD(DeclaredNonTrivialSpecialMembersForCall)
MATCH_FIELD(HasIrrelevantDestructor)
OR_FIELD(HasConstexprNonCopyMoveConstructor)
OR_FIELD(HasDefaultedDefaultConstructor)
- MATCH_FIELD(CanPassInRegisters)
MATCH_FIELD(DefaultedDefaultConstructorIsConstexpr)
OR_FIELD(HasConstexprDefaultConstructor)
MATCH_FIELD(HasNonLiteralTypeFieldsOrBases)
@@ -1775,29 +1866,31 @@ void ASTDeclReader::ReadCXXRecordDefinition(CXXRecordDecl *D, bool Update) {
else
DD = new (C) struct CXXRecordDecl::DefinitionData(D);
+ CXXRecordDecl *Canon = D->getCanonicalDecl();
+ // Set decl definition data before reading it, so that during deserialization
+ // when we read CXXRecordDecl, it already has definition data and we don't
+ // set fake one.
+ if (!Canon->DefinitionData)
+ Canon->DefinitionData = DD;
+ D->DefinitionData = Canon->DefinitionData;
ReadCXXDefinitionData(*DD, D);
- // We might already have a definition for this record. This can happen either
- // because we're reading an update record, or because we've already done some
- // merging. Either way, just merge into it.
- CXXRecordDecl *Canon = D->getCanonicalDecl();
- if (Canon->DefinitionData) {
+ // We might already have a different definition for this record. This can
+ // happen either because we're reading an update record, or because we've
+ // already done some merging. Either way, just merge into it.
+ if (Canon->DefinitionData != DD) {
MergeDefinitionData(Canon, std::move(*DD));
- D->DefinitionData = Canon->DefinitionData;
return;
}
// Mark this declaration as being a definition.
D->IsCompleteDefinition = true;
- D->DefinitionData = DD;
// If this is not the first declaration or is an update record, we can have
// other redeclarations already. Make a note that we need to propagate the
// DefinitionData pointer onto them.
- if (Update || Canon != D) {
- Canon->DefinitionData = D->DefinitionData;
+ if (Update || Canon != D)
Reader.PendingDefinitions.insert(D);
- }
}
ASTDeclReader::RedeclarableResult
@@ -1817,7 +1910,7 @@ ASTDeclReader::VisitCXXRecordDeclImpl(CXXRecordDecl *D) {
break;
case CXXRecTemplate: {
// Merged when we merge the template.
- ClassTemplateDecl *Template = ReadDeclAs<ClassTemplateDecl>();
+ auto *Template = ReadDeclAs<ClassTemplateDecl>();
D->TemplateOrInstantiation = Template;
if (!Template->getTemplatedDecl()) {
// We've not actually loaded the ClassTemplateDecl yet, because we're
@@ -1826,14 +1919,13 @@ ASTDeclReader::VisitCXXRecordDeclImpl(CXXRecordDecl *D) {
//
// Beware: we do not yet know our canonical declaration, and may still
// get merged once the surrounding class template has got off the ground.
- TypeIDForTypeDecl = 0;
+ DeferredTypeID = 0;
}
break;
}
case CXXRecMemberSpecialization: {
- CXXRecordDecl *RD = ReadDeclAs<CXXRecordDecl>();
- TemplateSpecializationKind TSK =
- (TemplateSpecializationKind)Record.readInt();
+ auto *RD = ReadDeclAs<CXXRecordDecl>();
+ auto TSK = (TemplateSpecializationKind)Record.readInt();
SourceLocation POI = ReadSourceLocation();
MemberSpecializationInfo *MSI = new (C) MemberSpecializationInfo(RD, TSK);
MSI->setPointOfInstantiation(POI);
@@ -1877,7 +1969,7 @@ void ASTDeclReader::VisitCXXMethodDecl(CXXMethodDecl *D) {
while (NumOverridenMethods--) {
// Avoid invariant checking of CXXMethodDecl::addOverriddenMethod,
// MD may be initializing.
- if (CXXMethodDecl *MD = ReadDeclAs<CXXMethodDecl>())
+ if (auto *MD = ReadDeclAs<CXXMethodDecl>())
Reader.getContext().addOverriddenMethod(D, MD->getCanonicalDecl());
}
} else {
@@ -1904,7 +1996,7 @@ void ASTDeclReader::VisitCXXDestructorDecl(CXXDestructorDecl *D) {
VisitCXXMethodDecl(D);
if (auto *OperatorDelete = ReadDeclAs<FunctionDecl>()) {
- auto *Canon = cast<CXXDestructorDecl>(D->getCanonicalDecl());
+ CXXDestructorDecl *Canon = D->getCanonicalDecl();
auto *ThisArg = Record.readExpr();
// FIXME: Check consistency if we have an old and new operator delete.
if (!Canon->OperatorDelete) {
@@ -1922,7 +2014,7 @@ void ASTDeclReader::VisitImportDecl(ImportDecl *D) {
VisitDecl(D);
D->ImportedAndComplete.setPointer(readModule());
D->ImportedAndComplete.setInt(Record.readInt());
- SourceLocation *StoredLocs = D->getTrailingObjects<SourceLocation>();
+ auto *StoredLocs = D->getTrailingObjects<SourceLocation>();
for (unsigned I = 0, N = Record.back(); I != N; ++I)
StoredLocs[I] = ReadSourceLocation();
Record.skipInts(1); // The number of stored source locations.
@@ -1965,7 +2057,7 @@ DeclID ASTDeclReader::VisitTemplateDecl(TemplateDecl *D) {
VisitNamedDecl(D);
DeclID PatternID = ReadDeclID();
- NamedDecl *TemplatedDecl = cast_or_null<NamedDecl>(Reader.GetDecl(PatternID));
+ auto *TemplatedDecl = cast_or_null<NamedDecl>(Reader.GetDecl(PatternID));
TemplateParameterList *TemplateParams = Record.readTemplateParameterList();
// FIXME handle associated constraints
D->init(TemplatedDecl, TemplateParams);
@@ -1989,8 +2081,7 @@ ASTDeclReader::VisitRedeclarableTemplateDecl(RedeclarableTemplateDecl *D) {
// If this is the first declaration of the template, fill in the information
// for the 'common' pointer.
if (ThisDeclID == Redecl.getFirstID()) {
- if (RedeclarableTemplateDecl *RTD
- = ReadDeclAs<RedeclarableTemplateDecl>()) {
+ if (auto *RTD = ReadDeclAs<RedeclarableTemplateDecl>()) {
assert(RTD->getKind() == D->getKind() &&
"InstantiatedFromMemberTemplate kind mismatch");
D->setInstantiatedFromMemberTemplate(RTD);
@@ -2058,15 +2149,15 @@ ASTDeclReader::VisitClassTemplateSpecializationDeclImpl(
ASTContext &C = Reader.getContext();
if (Decl *InstD = ReadDecl()) {
- if (ClassTemplateDecl *CTD = dyn_cast<ClassTemplateDecl>(InstD)) {
+ if (auto *CTD = dyn_cast<ClassTemplateDecl>(InstD)) {
D->SpecializedTemplate = CTD;
} else {
SmallVector<TemplateArgument, 8> TemplArgs;
Record.readTemplateArgumentList(TemplArgs);
TemplateArgumentList *ArgList
= TemplateArgumentList::CreateCopy(C, TemplArgs);
- ClassTemplateSpecializationDecl::SpecializedPartialSpecialization *PS
- = new (C) ClassTemplateSpecializationDecl::
+ auto *PS =
+ new (C) ClassTemplateSpecializationDecl::
SpecializedPartialSpecialization();
PS->PartialSpecialization
= cast<ClassTemplatePartialSpecializationDecl>(InstD);
@@ -2083,12 +2174,11 @@ ASTDeclReader::VisitClassTemplateSpecializationDeclImpl(
bool writtenAsCanonicalDecl = Record.readInt();
if (writtenAsCanonicalDecl) {
- ClassTemplateDecl *CanonPattern = ReadDeclAs<ClassTemplateDecl>();
+ auto *CanonPattern = ReadDeclAs<ClassTemplateDecl>();
if (D->isCanonicalDecl()) { // It's kept in the folding set.
// Set this as, or find, the canonical declaration for this specialization
ClassTemplateSpecializationDecl *CanonSpec;
- if (ClassTemplatePartialSpecializationDecl *Partial =
- dyn_cast<ClassTemplatePartialSpecializationDecl>(D)) {
+ if (auto *Partial = dyn_cast<ClassTemplatePartialSpecializationDecl>(D)) {
CanonSpec = CanonPattern->getCommonPtr()->PartialSpecializations
.GetOrInsertNode(Partial);
} else {
@@ -2114,8 +2204,8 @@ ASTDeclReader::VisitClassTemplateSpecializationDeclImpl(
// Explicit info.
if (TypeSourceInfo *TyInfo = GetTypeSourceInfo()) {
- ClassTemplateSpecializationDecl::ExplicitSpecializationInfo *ExplicitInfo
- = new (C) ClassTemplateSpecializationDecl::ExplicitSpecializationInfo;
+ auto *ExplicitInfo =
+ new (C) ClassTemplateSpecializationDecl::ExplicitSpecializationInfo;
ExplicitInfo->TypeAsWritten = TyInfo;
ExplicitInfo->ExternLoc = ReadSourceLocation();
ExplicitInfo->TemplateKeywordLoc = ReadSourceLocation();
@@ -2169,14 +2259,14 @@ ASTDeclReader::VisitVarTemplateSpecializationDeclImpl(
ASTContext &C = Reader.getContext();
if (Decl *InstD = ReadDecl()) {
- if (VarTemplateDecl *VTD = dyn_cast<VarTemplateDecl>(InstD)) {
+ if (auto *VTD = dyn_cast<VarTemplateDecl>(InstD)) {
D->SpecializedTemplate = VTD;
} else {
SmallVector<TemplateArgument, 8> TemplArgs;
Record.readTemplateArgumentList(TemplArgs);
TemplateArgumentList *ArgList = TemplateArgumentList::CreateCopy(
C, TemplArgs);
- VarTemplateSpecializationDecl::SpecializedPartialSpecialization *PS =
+ auto *PS =
new (C)
VarTemplateSpecializationDecl::SpecializedPartialSpecialization();
PS->PartialSpecialization =
@@ -2188,7 +2278,7 @@ ASTDeclReader::VisitVarTemplateSpecializationDeclImpl(
// Explicit info.
if (TypeSourceInfo *TyInfo = GetTypeSourceInfo()) {
- VarTemplateSpecializationDecl::ExplicitSpecializationInfo *ExplicitInfo =
+ auto *ExplicitInfo =
new (C) VarTemplateSpecializationDecl::ExplicitSpecializationInfo;
ExplicitInfo->TypeAsWritten = TyInfo;
ExplicitInfo->ExternLoc = ReadSourceLocation();
@@ -2205,11 +2295,10 @@ ASTDeclReader::VisitVarTemplateSpecializationDeclImpl(
bool writtenAsCanonicalDecl = Record.readInt();
if (writtenAsCanonicalDecl) {
- VarTemplateDecl *CanonPattern = ReadDeclAs<VarTemplateDecl>();
+ auto *CanonPattern = ReadDeclAs<VarTemplateDecl>();
if (D->isCanonicalDecl()) { // It's kept in the folding set.
// FIXME: If it's already present, merge it.
- if (VarTemplatePartialSpecializationDecl *Partial =
- dyn_cast<VarTemplatePartialSpecializationDecl>(D)) {
+ if (auto *Partial = dyn_cast<VarTemplatePartialSpecializationDecl>(D)) {
CanonPattern->getCommonPtr()->PartialSpecializations
.GetOrInsertNode(Partial);
} else {
@@ -2276,8 +2365,7 @@ void ASTDeclReader::VisitTemplateTemplateParmDecl(TemplateTemplateParmDecl *D) {
D->setDepth(Record.readInt());
D->setPosition(Record.readInt());
if (D->isExpandedParameterPack()) {
- TemplateParameterList **Data =
- D->getTrailingObjects<TemplateParameterList *>();
+ auto **Data = D->getTrailingObjects<TemplateParameterList *>();
for (unsigned I = 0, N = D->getNumExpansionTemplateParameters();
I != N; ++I)
Data[I] = Record.readTemplateParameterList();
@@ -2351,7 +2439,7 @@ ASTDeclReader::VisitRedeclarable(Redeclarable<T> *D) {
(void)ReadDecl();
}
- T *FirstDecl = cast_or_null<T>(Reader.GetDecl(FirstDeclID));
+ auto *FirstDecl = cast_or_null<T>(Reader.GetDecl(FirstDeclID));
if (FirstDecl != D) {
// We delay loading of the redeclaration chain to avoid deeply nested calls.
// We temporarily set the first (canonical) declaration as the previous one
@@ -2361,7 +2449,7 @@ ASTDeclReader::VisitRedeclarable(Redeclarable<T> *D) {
D->First = FirstDecl->getCanonicalDecl();
}
- T *DAsT = static_cast<T*>(D);
+ auto *DAsT = static_cast<T *>(D);
// Note that we need to load local redeclarations of this decl and build a
// decl chain for them. This must happen *after* we perform the preloading
@@ -2373,7 +2461,7 @@ ASTDeclReader::VisitRedeclarable(Redeclarable<T> *D) {
return RedeclarableResult(MergeWith, FirstDeclID, IsKeyDecl);
}
-/// \brief Attempts to merge the given declaration (D) with another declaration
+/// Attempts to merge the given declaration (D) with another declaration
/// of the same entity.
template<typename T>
void ASTDeclReader::mergeRedeclarable(Redeclarable<T> *DBase,
@@ -2387,7 +2475,7 @@ void ASTDeclReader::mergeRedeclarable(Redeclarable<T> *DBase,
if (!DBase->isFirstDecl())
return;
- T *D = static_cast<T*>(DBase);
+ auto *D = static_cast<T *>(DBase);
if (auto *Existing = Redecl.getKnownMergeTarget())
// We already know of an existing declaration we should merge with.
@@ -2397,7 +2485,7 @@ void ASTDeclReader::mergeRedeclarable(Redeclarable<T> *DBase,
mergeRedeclarable(D, Existing, Redecl, TemplatePatternID);
}
-/// \brief "Cast" to type T, asserting if we don't have an implicit conversion.
+/// "Cast" to type T, asserting if we don't have an implicit conversion.
/// We use this to put code in a template that will only be valid for certain
/// instantiations.
template<typename T> static T assert_cast(T t) { return t; }
@@ -2405,7 +2493,7 @@ template<typename T> static T assert_cast(...) {
llvm_unreachable("bad assert_cast");
}
-/// \brief Merge together the pattern declarations from two template
+/// Merge together the pattern declarations from two template
/// declarations.
void ASTDeclReader::mergeTemplatePattern(RedeclarableTemplateDecl *D,
RedeclarableTemplateDecl *Existing,
@@ -2447,13 +2535,13 @@ void ASTDeclReader::mergeTemplatePattern(RedeclarableTemplateDecl *D,
llvm_unreachable("merged an unknown kind of redeclarable template");
}
-/// \brief Attempts to merge the given declaration (D) with another declaration
+/// Attempts to merge the given declaration (D) with another declaration
/// of the same entity.
template<typename T>
void ASTDeclReader::mergeRedeclarable(Redeclarable<T> *DBase, T *Existing,
RedeclarableResult &Redecl,
DeclID TemplatePatternID) {
- T *D = static_cast<T*>(DBase);
+ auto *D = static_cast<T *>(DBase);
T *ExistingCanon = Existing->getCanonicalDecl();
T *DCanon = D->getCanonicalDecl();
if (ExistingCanon != DCanon) {
@@ -2487,7 +2575,21 @@ void ASTDeclReader::mergeRedeclarable(Redeclarable<T> *DBase, T *Existing,
}
}
-/// \brief Attempts to merge the given declaration (D) with another declaration
+/// ODR-like semantics for C/ObjC allow us to merge tag types and a structural
+/// check in Sema guarantees the types can be merged (see C11 6.2.7/1 or C89
+/// 6.1.2.6/1). Although most merging is done in Sema, we need to guarantee
+/// that some types are mergeable during deserialization, otherwise name
+/// lookup fails. This is the case for EnumConstantDecl.
+static bool allowODRLikeMergeInC(NamedDecl *ND) {
+ if (!ND)
+ return false;
+ // TODO: implement merge for other necessary decls.
+ if (isa<EnumConstantDecl>(ND))
+ return true;
+ return false;
+}
+
+/// Attempts to merge the given declaration (D) with another declaration
/// of the same entity, for the case where the entity is not actually
/// redeclarable. This happens, for instance, when merging the fields of
/// identical class definitions from two different modules.
@@ -2497,10 +2599,12 @@ void ASTDeclReader::mergeMergeable(Mergeable<T> *D) {
if (!Reader.getContext().getLangOpts().Modules)
return;
- // ODR-based merging is only performed in C++. In C, identically-named things
- // in different translation units are not redeclarations (but may still have
- // compatible types).
- if (!Reader.getContext().getLangOpts().CPlusPlus)
+ // ODR-based merging is performed in C++ and in some cases (tag types) in C.
+ // Note that C identically-named things in different translation units are
+ // not redeclarations, but may still have compatible types, where ODR-like
+ // semantics may apply.
+ if (!Reader.getContext().getLangOpts().CPlusPlus &&
+ !allowODRLikeMergeInC(dyn_cast<NamedDecl>(static_cast<T*>(D))))
return;
if (FindExistingResult ExistingRes = findExisting(static_cast<T*>(D)))
@@ -2538,11 +2642,11 @@ void ASTDeclReader::VisitOMPCapturedExprDecl(OMPCapturedExprDecl *D) {
// Attribute Reading
//===----------------------------------------------------------------------===//
-/// \brief Reads attributes from the current stream position.
+/// Reads attributes from the current stream position.
void ASTReader::ReadAttributes(ASTRecordReader &Record, AttrVec &Attrs) {
for (unsigned i = 0, e = Record.readInt(); i != e; ++i) {
Attr *New = nullptr;
- attr::Kind Kind = (attr::Kind)Record.readInt();
+ auto Kind = (attr::Kind)Record.readInt();
SourceRange Range = Record.readSourceRange();
ASTContext &Context = getContext();
@@ -2557,7 +2661,7 @@ void ASTReader::ReadAttributes(ASTRecordReader &Record, AttrVec &Attrs) {
// ASTReader Implementation
//===----------------------------------------------------------------------===//
-/// \brief Note that we have loaded the declaration with the given
+/// Note that we have loaded the declaration with the given
/// Index.
///
/// This routine notes that this declaration has already been loaded,
@@ -2568,8 +2672,7 @@ inline void ASTReader::LoadedDecl(unsigned Index, Decl *D) {
DeclsLoaded[Index] = D;
}
-
-/// \brief Determine whether the consumer will be interested in seeing
+/// Determine whether the consumer will be interested in seeing
/// this declaration (via HandleTopLevelDecl).
///
/// This routine should return true for anything that might affect
@@ -2597,10 +2700,10 @@ static bool isConsumerInterestedIn(ASTContext &Ctx, Decl *D, bool HasBody) {
return true;
if (isa<OMPThreadPrivateDecl>(D) || isa<OMPDeclareReductionDecl>(D))
return !D->getDeclContext()->isFunctionOrMethod();
- if (VarDecl *Var = dyn_cast<VarDecl>(D))
+ if (const auto *Var = dyn_cast<VarDecl>(D))
return Var->isFileVarDecl() &&
Var->isThisDeclarationADefinition() == VarDecl::Definition;
- if (FunctionDecl *Func = dyn_cast<FunctionDecl>(D))
+ if (const auto *Func = dyn_cast<FunctionDecl>(D))
return Func->doesThisDeclarationHaveABody() || HasBody;
if (auto *ES = D->getASTContext().getExternalSource())
@@ -2610,7 +2713,7 @@ static bool isConsumerInterestedIn(ASTContext &Ctx, Decl *D, bool HasBody) {
return false;
}
-/// \brief Get the correct cursor and offset for loading a declaration.
+/// Get the correct cursor and offset for loading a declaration.
ASTReader::RecordLocation
ASTReader::DeclCursorForID(DeclID ID, SourceLocation &Loc) {
GlobalDeclMapType::iterator I = GlobalDeclMap.find(ID);
@@ -2623,8 +2726,7 @@ ASTReader::DeclCursorForID(DeclID ID, SourceLocation &Loc) {
}
ASTReader::RecordLocation ASTReader::getLocalBitOffset(uint64_t GlobalOffset) {
- ContinuousRangeMap<uint64_t, ModuleFile*, 4>::iterator I
- = GlobalBitOffsetsMap.find(GlobalOffset);
+ auto I = GlobalBitOffsetsMap.find(GlobalOffset);
assert(I != GlobalBitOffsetsMap.end() && "Corrupted global bit offsets map");
return RecordLocation(I->second, GlobalOffset - I->second->GlobalBitOffset);
@@ -2637,26 +2739,26 @@ uint64_t ASTReader::getGlobalBitOffset(ModuleFile &M, uint32_t LocalOffset) {
static bool isSameTemplateParameterList(const TemplateParameterList *X,
const TemplateParameterList *Y);
-/// \brief Determine whether two template parameters are similar enough
+/// Determine whether two template parameters are similar enough
/// that they may be used in declarations of the same template.
static bool isSameTemplateParameter(const NamedDecl *X,
const NamedDecl *Y) {
if (X->getKind() != Y->getKind())
return false;
- if (const TemplateTypeParmDecl *TX = dyn_cast<TemplateTypeParmDecl>(X)) {
- const TemplateTypeParmDecl *TY = cast<TemplateTypeParmDecl>(Y);
+ if (const auto *TX = dyn_cast<TemplateTypeParmDecl>(X)) {
+ const auto *TY = cast<TemplateTypeParmDecl>(Y);
return TX->isParameterPack() == TY->isParameterPack();
}
- if (const NonTypeTemplateParmDecl *TX = dyn_cast<NonTypeTemplateParmDecl>(X)) {
- const NonTypeTemplateParmDecl *TY = cast<NonTypeTemplateParmDecl>(Y);
+ if (const auto *TX = dyn_cast<NonTypeTemplateParmDecl>(X)) {
+ const auto *TY = cast<NonTypeTemplateParmDecl>(Y);
return TX->isParameterPack() == TY->isParameterPack() &&
TX->getASTContext().hasSameType(TX->getType(), TY->getType());
}
- const TemplateTemplateParmDecl *TX = cast<TemplateTemplateParmDecl>(X);
- const TemplateTemplateParmDecl *TY = cast<TemplateTemplateParmDecl>(Y);
+ const auto *TX = cast<TemplateTemplateParmDecl>(X);
+ const auto *TY = cast<TemplateTemplateParmDecl>(Y);
return TX->isParameterPack() == TY->isParameterPack() &&
isSameTemplateParameterList(TX->getTemplateParameters(),
TY->getTemplateParameters());
@@ -2709,7 +2811,7 @@ static bool isSameQualifier(const NestedNameSpecifier *X,
return !PX && !PY;
}
-/// \brief Determine whether two template parameter lists are similar enough
+/// Determine whether two template parameter lists are similar enough
/// that they may be used in declarations of the same template.
static bool isSameTemplateParameterList(const TemplateParameterList *X,
const TemplateParameterList *Y) {
@@ -2762,7 +2864,7 @@ static bool hasSameOverloadableAttrs(const FunctionDecl *A,
return true;
}
-/// \brief Determine whether the two declarations refer to the same entity.
+/// Determine whether the two declarations refer to the same entity.
static bool isSameEntity(NamedDecl *X, NamedDecl *Y) {
assert(X->getDeclName() == Y->getDeclName() && "Declaration name mismatch!");
@@ -2770,14 +2872,18 @@ static bool isSameEntity(NamedDecl *X, NamedDecl *Y) {
return true;
// Must be in the same context.
- if (!X->getDeclContext()->getRedeclContext()->Equals(
- Y->getDeclContext()->getRedeclContext()))
+ //
+ // Note that we can't use DeclContext::Equals here, because the DeclContexts
+ // could be two different declarations of the same function. (We will fix the
+ // semantic DC to refer to the primary definition after merging.)
+ if (!declaresSameEntity(cast<Decl>(X->getDeclContext()->getRedeclContext()),
+ cast<Decl>(Y->getDeclContext()->getRedeclContext())))
return false;
// Two typedefs refer to the same entity if they have the same underlying
// type.
- if (TypedefNameDecl *TypedefX = dyn_cast<TypedefNameDecl>(X))
- if (TypedefNameDecl *TypedefY = dyn_cast<TypedefNameDecl>(Y))
+ if (const auto *TypedefX = dyn_cast<TypedefNameDecl>(X))
+ if (const auto *TypedefY = dyn_cast<TypedefNameDecl>(Y))
return X->getASTContext().hasSameType(TypedefX->getUnderlyingType(),
TypedefY->getUnderlyingType());
@@ -2796,8 +2902,8 @@ static bool isSameEntity(NamedDecl *X, NamedDecl *Y) {
}
// Compatible tags match.
- if (TagDecl *TagX = dyn_cast<TagDecl>(X)) {
- TagDecl *TagY = cast<TagDecl>(Y);
+ if (const auto *TagX = dyn_cast<TagDecl>(X)) {
+ const auto *TagY = cast<TagDecl>(Y);
return (TagX->getTagKind() == TagY->getTagKind()) ||
((TagX->getTagKind() == TTK_Struct || TagX->getTagKind() == TTK_Class ||
TagX->getTagKind() == TTK_Interface) &&
@@ -2808,28 +2914,51 @@ static bool isSameEntity(NamedDecl *X, NamedDecl *Y) {
// Functions with the same type and linkage match.
// FIXME: This needs to cope with merging of prototyped/non-prototyped
// functions, etc.
- if (FunctionDecl *FuncX = dyn_cast<FunctionDecl>(X)) {
- FunctionDecl *FuncY = cast<FunctionDecl>(Y);
- if (CXXConstructorDecl *CtorX = dyn_cast<CXXConstructorDecl>(X)) {
- CXXConstructorDecl *CtorY = cast<CXXConstructorDecl>(Y);
+ if (const auto *FuncX = dyn_cast<FunctionDecl>(X)) {
+ const auto *FuncY = cast<FunctionDecl>(Y);
+ if (const auto *CtorX = dyn_cast<CXXConstructorDecl>(X)) {
+ const auto *CtorY = cast<CXXConstructorDecl>(Y);
if (CtorX->getInheritedConstructor() &&
!isSameEntity(CtorX->getInheritedConstructor().getConstructor(),
CtorY->getInheritedConstructor().getConstructor()))
return false;
}
+
+ if (FuncX->isMultiVersion() != FuncY->isMultiVersion())
+ return false;
+
+ // Multiversioned functions with different feature strings are represented
+ // as separate declarations.
+ if (FuncX->isMultiVersion()) {
+ const auto *TAX = FuncX->getAttr<TargetAttr>();
+ const auto *TAY = FuncY->getAttr<TargetAttr>();
+ assert(TAX && TAY && "Multiversion Function without target attribute");
+
+ if (TAX->getFeaturesStr() != TAY->getFeaturesStr())
+ return false;
+ }
+
ASTContext &C = FuncX->getASTContext();
- if (!C.hasSameType(FuncX->getType(), FuncY->getType())) {
+ auto GetTypeAsWritten = [](const FunctionDecl *FD) {
+ // Map to the first declaration that we've already merged into this one.
+ // The TSI of redeclarations might not match (due to calling conventions
+ // being inherited onto the type but not the TSI), but the TSI type of
+ // the first declaration of the function should match across modules.
+ FD = FD->getCanonicalDecl();
+ return FD->getTypeSourceInfo() ? FD->getTypeSourceInfo()->getType()
+ : FD->getType();
+ };
+ QualType XT = GetTypeAsWritten(FuncX), YT = GetTypeAsWritten(FuncY);
+ if (!C.hasSameType(XT, YT)) {
// We can get functions with different types on the redecl chain in C++17
// if they have differing exception specifications and at least one of
// the excpetion specs is unresolved.
- // FIXME: Do we need to check for C++14 deduced return types here too?
- auto *XFPT = FuncX->getType()->getAs<FunctionProtoType>();
- auto *YFPT = FuncY->getType()->getAs<FunctionProtoType>();
+ auto *XFPT = XT->getAs<FunctionProtoType>();
+ auto *YFPT = YT->getAs<FunctionProtoType>();
if (C.getLangOpts().CPlusPlus17 && XFPT && YFPT &&
(isUnresolvedExceptionSpec(XFPT->getExceptionSpecType()) ||
isUnresolvedExceptionSpec(YFPT->getExceptionSpecType())) &&
- C.hasSameFunctionTypeIgnoringExceptionSpec(FuncX->getType(),
- FuncY->getType()))
+ C.hasSameFunctionTypeIgnoringExceptionSpec(XT, YT))
return true;
return false;
}
@@ -2838,8 +2967,8 @@ static bool isSameEntity(NamedDecl *X, NamedDecl *Y) {
}
// Variables with the same type and linkage match.
- if (VarDecl *VarX = dyn_cast<VarDecl>(X)) {
- VarDecl *VarY = cast<VarDecl>(Y);
+ if (const auto *VarX = dyn_cast<VarDecl>(X)) {
+ const auto *VarY = cast<VarDecl>(Y);
if (VarX->getLinkageInternal() == VarY->getLinkageInternal()) {
ASTContext &C = VarX->getASTContext();
if (C.hasSameType(VarX->getType(), VarY->getType()))
@@ -2861,15 +2990,15 @@ static bool isSameEntity(NamedDecl *X, NamedDecl *Y) {
}
// Namespaces with the same name and inlinedness match.
- if (NamespaceDecl *NamespaceX = dyn_cast<NamespaceDecl>(X)) {
- NamespaceDecl *NamespaceY = cast<NamespaceDecl>(Y);
+ if (const auto *NamespaceX = dyn_cast<NamespaceDecl>(X)) {
+ const auto *NamespaceY = cast<NamespaceDecl>(Y);
return NamespaceX->isInline() == NamespaceY->isInline();
}
// Identical template names and kinds match if their template parameter lists
// and patterns match.
- if (TemplateDecl *TemplateX = dyn_cast<TemplateDecl>(X)) {
- TemplateDecl *TemplateY = cast<TemplateDecl>(Y);
+ if (const auto *TemplateX = dyn_cast<TemplateDecl>(X)) {
+ const auto *TemplateY = cast<TemplateDecl>(Y);
return isSameEntity(TemplateX->getTemplatedDecl(),
TemplateY->getTemplatedDecl()) &&
isSameTemplateParameterList(TemplateX->getTemplateParameters(),
@@ -2877,15 +3006,15 @@ static bool isSameEntity(NamedDecl *X, NamedDecl *Y) {
}
// Fields with the same name and the same type match.
- if (FieldDecl *FDX = dyn_cast<FieldDecl>(X)) {
- FieldDecl *FDY = cast<FieldDecl>(Y);
+ if (const auto *FDX = dyn_cast<FieldDecl>(X)) {
+ const auto *FDY = cast<FieldDecl>(Y);
// FIXME: Also check the bitwidth is odr-equivalent, if any.
return X->getASTContext().hasSameType(FDX->getType(), FDY->getType());
}
// Indirect fields with the same target field match.
- if (auto *IFDX = dyn_cast<IndirectFieldDecl>(X)) {
- auto *IFDY = cast<IndirectFieldDecl>(Y);
+ if (const auto *IFDX = dyn_cast<IndirectFieldDecl>(X)) {
+ const auto *IFDY = cast<IndirectFieldDecl>(Y);
return IFDX->getAnonField()->getCanonicalDecl() ==
IFDY->getAnonField()->getCanonicalDecl();
}
@@ -2896,32 +3025,32 @@ static bool isSameEntity(NamedDecl *X, NamedDecl *Y) {
return true;
// Using shadow declarations with the same target match.
- if (UsingShadowDecl *USX = dyn_cast<UsingShadowDecl>(X)) {
- UsingShadowDecl *USY = cast<UsingShadowDecl>(Y);
+ if (const auto *USX = dyn_cast<UsingShadowDecl>(X)) {
+ const auto *USY = cast<UsingShadowDecl>(Y);
return USX->getTargetDecl() == USY->getTargetDecl();
}
// Using declarations with the same qualifier match. (We already know that
// the name matches.)
- if (auto *UX = dyn_cast<UsingDecl>(X)) {
- auto *UY = cast<UsingDecl>(Y);
+ if (const auto *UX = dyn_cast<UsingDecl>(X)) {
+ const auto *UY = cast<UsingDecl>(Y);
return isSameQualifier(UX->getQualifier(), UY->getQualifier()) &&
UX->hasTypename() == UY->hasTypename() &&
UX->isAccessDeclaration() == UY->isAccessDeclaration();
}
- if (auto *UX = dyn_cast<UnresolvedUsingValueDecl>(X)) {
- auto *UY = cast<UnresolvedUsingValueDecl>(Y);
+ if (const auto *UX = dyn_cast<UnresolvedUsingValueDecl>(X)) {
+ const auto *UY = cast<UnresolvedUsingValueDecl>(Y);
return isSameQualifier(UX->getQualifier(), UY->getQualifier()) &&
UX->isAccessDeclaration() == UY->isAccessDeclaration();
}
- if (auto *UX = dyn_cast<UnresolvedUsingTypenameDecl>(X))
+ if (const auto *UX = dyn_cast<UnresolvedUsingTypenameDecl>(X))
return isSameQualifier(
UX->getQualifier(),
cast<UnresolvedUsingTypenameDecl>(Y)->getQualifier());
// Namespace alias definitions with the same target match.
- if (auto *NAX = dyn_cast<NamespaceAliasDecl>(X)) {
- auto *NAY = cast<NamespaceAliasDecl>(Y);
+ if (const auto *NAX = dyn_cast<NamespaceAliasDecl>(X)) {
+ const auto *NAY = cast<NamespaceAliasDecl>(Y);
return NAX->getNamespace()->Equals(NAY->getNamespace());
}
@@ -2932,10 +3061,10 @@ static bool isSameEntity(NamedDecl *X, NamedDecl *Y) {
/// looking for declarations to merge.
DeclContext *ASTDeclReader::getPrimaryContextForMerging(ASTReader &Reader,
DeclContext *DC) {
- if (NamespaceDecl *ND = dyn_cast<NamespaceDecl>(DC))
+ if (auto *ND = dyn_cast<NamespaceDecl>(DC))
return ND->getOriginalNamespace();
- if (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(DC)) {
+ if (auto *RD = dyn_cast<CXXRecordDecl>(DC)) {
// Try to dig out the definition.
auto *DD = RD->DefinitionData;
if (!DD)
@@ -2959,7 +3088,7 @@ DeclContext *ASTDeclReader::getPrimaryContextForMerging(ASTReader &Reader,
return DD->Definition;
}
- if (EnumDecl *ED = dyn_cast<EnumDecl>(DC))
+ if (auto *ED = dyn_cast<EnumDecl>(DC))
return ED->getASTContext().getLangOpts().CPlusPlus? ED->getDefinition()
: nullptr;
@@ -3021,23 +3150,50 @@ static NamedDecl *getDeclForMerging(NamedDecl *Found,
return nullptr;
}
+/// Find the declaration to use to populate the anonymous declaration table
+/// for the given lexical DeclContext. We only care about finding local
+/// definitions of the context; we'll merge imported ones as we go.
+DeclContext *
+ASTDeclReader::getPrimaryDCForAnonymousDecl(DeclContext *LexicalDC) {
+ // For classes, we track the definition as we merge.
+ if (auto *RD = dyn_cast<CXXRecordDecl>(LexicalDC)) {
+ auto *DD = RD->getCanonicalDecl()->DefinitionData;
+ return DD ? DD->Definition : nullptr;
+ }
+
+ // For anything else, walk its merged redeclarations looking for a definition.
+ // Note that we can't just call getDefinition here because the redeclaration
+ // chain isn't wired up.
+ for (auto *D : merged_redecls(cast<Decl>(LexicalDC))) {
+ if (auto *FD = dyn_cast<FunctionDecl>(D))
+ if (FD->isThisDeclarationADefinition())
+ return FD;
+ if (auto *MD = dyn_cast<ObjCMethodDecl>(D))
+ if (MD->isThisDeclarationADefinition())
+ return MD;
+ }
+
+ // No merged definition yet.
+ return nullptr;
+}
+
NamedDecl *ASTDeclReader::getAnonymousDeclForMerging(ASTReader &Reader,
DeclContext *DC,
unsigned Index) {
// If the lexical context has been merged, look into the now-canonical
// definition.
- if (auto *Merged = Reader.MergedDeclContexts.lookup(DC))
- DC = Merged;
+ auto *CanonDC = cast<Decl>(DC)->getCanonicalDecl();
// If we've seen this before, return the canonical declaration.
- auto &Previous = Reader.AnonymousDeclarationsForMerging[DC];
+ auto &Previous = Reader.AnonymousDeclarationsForMerging[CanonDC];
if (Index < Previous.size() && Previous[Index])
return Previous[Index];
// If this is the first time, but we have parsed a declaration of the context,
// build the anonymous declaration list from the parsed declaration.
- if (!cast<Decl>(DC)->isFromASTFile()) {
- numberAnonymousDeclsWithin(DC, [&](NamedDecl *ND, unsigned Number) {
+ auto *PrimaryDC = getPrimaryDCForAnonymousDecl(DC);
+ if (PrimaryDC && !cast<Decl>(PrimaryDC)->isFromASTFile()) {
+ numberAnonymousDeclsWithin(PrimaryDC, [&](NamedDecl *ND, unsigned Number) {
if (Previous.size() == Number)
Previous.push_back(cast<NamedDecl>(ND->getCanonicalDecl()));
else
@@ -3051,10 +3207,9 @@ NamedDecl *ASTDeclReader::getAnonymousDeclForMerging(ASTReader &Reader,
void ASTDeclReader::setAnonymousDeclForMerging(ASTReader &Reader,
DeclContext *DC, unsigned Index,
NamedDecl *D) {
- if (auto *Merged = Reader.MergedDeclContexts.lookup(DC))
- DC = Merged;
+ auto *CanonDC = cast<Decl>(DC)->getCanonicalDecl();
- auto &Previous = Reader.AnonymousDeclarationsForMerging[DC];
+ auto &Previous = Reader.AnonymousDeclarationsForMerging[CanonDC];
if (Index >= Previous.size())
Previous.resize(Index + 1);
if (!Previous[Index])
@@ -3102,12 +3257,10 @@ ASTDeclReader::FindExistingResult ASTDeclReader::findExisting(NamedDecl *D) {
// cause additional lookups here.
class UpToDateIdentifierRAII {
IdentifierInfo *II;
- bool WasOutToDate;
+ bool WasOutToDate = false;
public:
- explicit UpToDateIdentifierRAII(IdentifierInfo *II)
- : II(II), WasOutToDate(false)
- {
+ explicit UpToDateIdentifierRAII(IdentifierInfo *II) : II(II) {
if (II) {
WasOutToDate = II->isOutOfDate();
if (WasOutToDate)
@@ -3160,6 +3313,7 @@ template<typename DeclT>
Decl *ASTDeclReader::getMostRecentDeclImpl(Redeclarable<DeclT> *D) {
return D->RedeclLink.getLatestNotUpdated();
}
+
Decl *ASTDeclReader::getMostRecentDeclImpl(...) {
llvm_unreachable("getMostRecentDecl on non-redeclarable declaration");
}
@@ -3190,12 +3344,13 @@ void ASTDeclReader::attachPreviousDeclImpl(ASTReader &Reader,
}
namespace clang {
+
template<>
void ASTDeclReader::attachPreviousDeclImpl(ASTReader &Reader,
Redeclarable<VarDecl> *D,
Decl *Previous, Decl *Canon) {
- VarDecl *VD = static_cast<VarDecl*>(D);
- VarDecl *PrevVD = cast<VarDecl>(Previous);
+ auto *VD = static_cast<VarDecl *>(D);
+ auto *PrevVD = cast<VarDecl>(Previous);
D->RedeclLink.setPrevious(PrevVD);
D->First = PrevVD->First;
@@ -3217,8 +3372,8 @@ template<>
void ASTDeclReader::attachPreviousDeclImpl(ASTReader &Reader,
Redeclarable<FunctionDecl> *D,
Decl *Previous, Decl *Canon) {
- FunctionDecl *FD = static_cast<FunctionDecl*>(D);
- FunctionDecl *PrevFD = cast<FunctionDecl>(Previous);
+ auto *FD = static_cast<FunctionDecl *>(D);
+ auto *PrevFD = cast<FunctionDecl>(Previous);
FD->RedeclLink.setPrevious(PrevFD);
FD->First = PrevFD->First;
@@ -3257,7 +3412,8 @@ void ASTDeclReader::attachPreviousDeclImpl(ASTReader &Reader,
std::make_pair(Canon, IsUnresolved ? PrevFD : FD));
}
}
-} // end namespace clang
+
+} // namespace clang
void ASTDeclReader::attachPreviousDeclImpl(ASTReader &Reader, ...) {
llvm_unreachable("attachPreviousDecl on non-redeclarable declaration");
@@ -3326,7 +3482,7 @@ void ASTDeclReader::attachPreviousDecl(ASTReader &Reader, Decl *D,
// If the declaration declares a template, it may inherit default arguments
// from the previous declaration.
- if (TemplateDecl *TD = dyn_cast<TemplateDecl>(D))
+ if (auto *TD = dyn_cast<TemplateDecl>(D))
inheritDefaultTemplateArguments(Reader.getContext(),
cast<TemplateDecl>(Previous), TD);
}
@@ -3335,6 +3491,7 @@ template<typename DeclT>
void ASTDeclReader::attachLatestDeclImpl(Redeclarable<DeclT> *D, Decl *Latest) {
D->RedeclLink.setLatest(cast<DeclT>(Latest));
}
+
void ASTDeclReader::attachLatestDeclImpl(...) {
llvm_unreachable("attachLatestDecl on non-redeclarable declaration");
}
@@ -3356,6 +3513,7 @@ template<typename DeclT>
void ASTDeclReader::markIncompleteDeclChainImpl(Redeclarable<DeclT> *D) {
D->RedeclLink.markIncomplete();
}
+
void ASTDeclReader::markIncompleteDeclChainImpl(...) {
llvm_unreachable("markIncompleteDeclChain on non-redeclarable declaration");
}
@@ -3371,7 +3529,7 @@ void ASTReader::markIncompleteDeclChain(Decl *D) {
}
}
-/// \brief Read the declaration at the given offset from the AST file.
+/// Read the declaration at the given offset from the AST file.
Decl *ASTReader::ReadDeclRecord(DeclID ID) {
unsigned Index = ID - NUM_PREDEF_DECL_IDS;
SourceLocation DeclLoc;
@@ -3639,7 +3797,7 @@ Decl *ASTReader::ReadDeclRecord(DeclID ID) {
// If this declaration is also a declaration context, get the
// offsets for its tables of lexical and visible declarations.
- if (DeclContext *DC = dyn_cast<DeclContext>(D)) {
+ if (auto *DC = dyn_cast<DeclContext>(D)) {
std::pair<uint64_t, uint64_t> Offsets = Reader.VisitDeclContext(DC);
if (Offsets.first &&
ReadLexicalDeclContextStorage(*Loc.F, DeclsCursor, Offsets.first, DC))
@@ -3655,7 +3813,7 @@ Decl *ASTReader::ReadDeclRecord(DeclID ID) {
PendingUpdateRecord(ID, D, /*JustLoaded=*/true));
// Load the categories after recursive loading is finished.
- if (ObjCInterfaceDecl *Class = dyn_cast<ObjCInterfaceDecl>(D))
+ if (auto *Class = dyn_cast<ObjCInterfaceDecl>(D))
// If we already have a definition when deserializing the ObjCInterfaceDecl,
// we put the Decl in PendingDefinitions so we can pull the categories here.
if (Class->isThisDeclarationADefinition() ||
@@ -3706,7 +3864,7 @@ void ASTReader::loadDeclUpdateRecords(PendingUpdateRecord &Record) {
ProcessingUpdatesRAIIObj ProcessingUpdates(*this);
DeclUpdateOffsetsMap::iterator UpdI = DeclUpdateOffsets.find(ID);
- llvm::SmallVector<serialization::DeclID, 8> PendingLazySpecializationIDs;
+ SmallVector<serialization::DeclID, 8> PendingLazySpecializationIDs;
if (UpdI != DeclUpdateOffsets.end()) {
auto UpdateOffsets = std::move(UpdI->second);
@@ -3763,7 +3921,7 @@ void ASTReader::loadDeclUpdateRecords(PendingUpdateRecord &Record) {
PendingVisibleUpdates.erase(I);
auto *DC = cast<DeclContext>(D)->getPrimaryContext();
- for (const PendingVisibleUpdate &Update : VisibleUpdates)
+ for (const auto &Update : VisibleUpdates)
Lookups[DC].Table.add(
Update.Mod, Update.Data,
reader::ASTDeclContextNameLookupTrait(*this, *Update.Mod));
@@ -3812,13 +3970,14 @@ void ASTReader::loadPendingDeclChain(Decl *FirstLocal, uint64_t LocalOffset) {
}
namespace {
- /// \brief Given an ObjC interface, goes through the modules and links to the
+
+ /// Given an ObjC interface, goes through the modules and links to the
/// interface all the categories for it.
class ObjCCategoriesVisitor {
ASTReader &Reader;
ObjCInterfaceDecl *Interface;
llvm::SmallPtrSetImpl<ObjCCategoryDecl *> &Deserialized;
- ObjCCategoryDecl *Tail;
+ ObjCCategoryDecl *Tail = nullptr;
llvm::DenseMap<DeclarationName, ObjCCategoryDecl *> NameCategoryMap;
serialization::GlobalDeclID InterfaceID;
unsigned PreviousGeneration;
@@ -3868,10 +4027,8 @@ namespace {
llvm::SmallPtrSetImpl<ObjCCategoryDecl *> &Deserialized,
serialization::GlobalDeclID InterfaceID,
unsigned PreviousGeneration)
- : Reader(Reader), Interface(Interface), Deserialized(Deserialized),
- Tail(nullptr), InterfaceID(InterfaceID),
- PreviousGeneration(PreviousGeneration)
- {
+ : Reader(Reader), Interface(Interface), Deserialized(Deserialized),
+ InterfaceID(InterfaceID), PreviousGeneration(PreviousGeneration) {
// Populate the name -> category map with the set of known categories.
for (auto *Cat : Interface->known_categories()) {
if (Cat->getDeclName())
@@ -3920,7 +4077,8 @@ namespace {
return true;
}
};
-} // end anonymous namespace
+
+} // namespace
void ASTReader::loadObjCCategories(serialization::GlobalDeclID ID,
ObjCInterfaceDecl *D,
@@ -3973,13 +4131,13 @@ void ASTDeclReader::UpdateDecl(Decl *D,
break;
case UPD_CXX_ADDED_ANONYMOUS_NAMESPACE: {
- NamespaceDecl *Anon = ReadDeclAs<NamespaceDecl>();
+ auto *Anon = ReadDeclAs<NamespaceDecl>();
// Each module has its own anonymous namespace, which is disjoint from
// any other module's anonymous namespaces, so don't attach the anonymous
// namespace at all.
if (!Record.isModule()) {
- if (TranslationUnitDecl *TU = dyn_cast<TranslationUnitDecl>(D))
+ if (auto *TU = dyn_cast<TranslationUnitDecl>(D))
TU->setAnonymousNamespace(Anon);
else
cast<NamespaceDecl>(D)->setAnonymousNamespace(Anon);
@@ -3988,7 +4146,7 @@ void ASTDeclReader::UpdateDecl(Decl *D,
}
case UPD_CXX_ADDED_VAR_DEFINITION: {
- VarDecl *VD = cast<VarDecl>(D);
+ auto *VD = cast<VarDecl>(D);
VD->NonParmVarDeclBits.IsInline = Record.readInt();
VD->NonParmVarDeclBits.IsInlineSpecified = Record.readInt();
uint64_t Val = Record.readInt();
@@ -4005,8 +4163,7 @@ void ASTDeclReader::UpdateDecl(Decl *D,
case UPD_CXX_POINT_OF_INSTANTIATION: {
SourceLocation POI = Record.readSourceLocation();
- if (VarTemplateSpecializationDecl *VTSD =
- dyn_cast<VarTemplateSpecializationDecl>(D)) {
+ if (auto *VTSD = dyn_cast<VarTemplateSpecializationDecl>(D)) {
VTSD->setPointOfInstantiation(POI);
} else if (auto *VD = dyn_cast<VarDecl>(D)) {
VD->getMemberSpecializationInfo()->setPointOfInstantiation(POI);
@@ -4023,12 +4180,12 @@ void ASTDeclReader::UpdateDecl(Decl *D,
}
case UPD_CXX_INSTANTIATED_DEFAULT_ARGUMENT: {
- auto Param = cast<ParmVarDecl>(D);
+ auto *Param = cast<ParmVarDecl>(D);
// We have to read the default argument regardless of whether we use it
// so that hypothetical further update records aren't messed up.
// TODO: Add a function to skip over the next expr record.
- auto DefaultArg = Record.readExpr();
+ auto *DefaultArg = Record.readExpr();
// Only apply the update if the parameter still has an uninstantiated
// default argument.
@@ -4038,8 +4195,8 @@ void ASTDeclReader::UpdateDecl(Decl *D,
}
case UPD_CXX_INSTANTIATED_DEFAULT_MEMBER_INITIALIZER: {
- auto FD = cast<FieldDecl>(D);
- auto DefaultInit = Record.readExpr();
+ auto *FD = cast<FieldDecl>(D);
+ auto *DefaultInit = Record.readExpr();
// Only apply the update if the field still has an uninstantiated
// default member initializer.
@@ -4055,7 +4212,7 @@ void ASTDeclReader::UpdateDecl(Decl *D,
}
case UPD_CXX_ADDED_FUNCTION_DEFINITION: {
- FunctionDecl *FD = cast<FunctionDecl>(D);
+ auto *FD = cast<FunctionDecl>(D);
if (Reader.PendingBodies[FD]) {
// FIXME: Maybe check for ODR violations.
// It's safe to stop now because this update record is always last.
@@ -4082,6 +4239,9 @@ void ASTDeclReader::UpdateDecl(Decl *D,
bool HadRealDefinition =
OldDD && (OldDD->Definition != RD ||
!Reader.PendingFakeDefinitionData.count(OldDD));
+ RD->setParamDestroyedInCallee(Record.readInt());
+ RD->setArgPassingRestrictions(
+ (RecordDecl::ArgPassingKind)Record.readInt());
ReadCXXRecordDefinition(RD, /*Update*/true);
// Visible update is handled separately.
@@ -4098,13 +4258,12 @@ void ASTDeclReader::UpdateDecl(Decl *D,
MSInfo->setTemplateSpecializationKind(TSK);
MSInfo->setPointOfInstantiation(POI);
} else {
- ClassTemplateSpecializationDecl *Spec =
- cast<ClassTemplateSpecializationDecl>(RD);
+ auto *Spec = cast<ClassTemplateSpecializationDecl>(RD);
Spec->setTemplateSpecializationKind(TSK);
Spec->setPointOfInstantiation(POI);
if (Record.readInt()) {
- auto PartialSpec =
+ auto *PartialSpec =
ReadDeclAs<ClassTemplatePartialSpecializationDecl>();
SmallVector<TemplateArgument, 8> TemplArgs;
Record.readTemplateArgumentList(TemplArgs);
@@ -4177,18 +4336,17 @@ void ASTDeclReader::UpdateDecl(Decl *D,
QualType DeducedResultType = Record.readType();
for (auto *Redecl : merged_redecls(D)) {
// FIXME: If the return type is already deduced, check that it matches.
- FunctionDecl *FD = cast<FunctionDecl>(Redecl);
+ auto *FD = cast<FunctionDecl>(Redecl);
Reader.getContext().adjustDeducedFunctionResultType(FD,
DeducedResultType);
}
break;
}
- case UPD_DECL_MARKED_USED: {
+ case UPD_DECL_MARKED_USED:
// Maintain AST consistency: any later redeclarations are used too.
D->markUsed(Reader.getContext());
break;
- }
case UPD_MANGLING_NUMBER:
Reader.getContext().setManglingNumber(cast<NamedDecl>(D),
diff --git a/lib/Serialization/ASTReaderInternals.h b/lib/Serialization/ASTReaderInternals.h
index 2b92ae65ea84..b5a1493a4f7e 100644
--- a/lib/Serialization/ASTReaderInternals.h
+++ b/lib/Serialization/ASTReaderInternals.h
@@ -40,7 +40,7 @@ class ModuleFile;
namespace reader {
-/// \brief Class that performs name lookup into a DeclContext stored
+/// Class that performs name lookup into a DeclContext stored
/// in an AST file.
class ASTDeclContextNameLookupTrait {
ASTReader &Reader;
@@ -121,7 +121,7 @@ struct DeclContextLookupTable {
MultiOnDiskHashTable<ASTDeclContextNameLookupTrait> Table;
};
-/// \brief Base class for the trait describing the on-disk hash table for the
+/// Base class for the trait describing the on-disk hash table for the
/// identifiers in an AST file.
///
/// This class is not useful by itself; rather, it provides common
@@ -156,7 +156,7 @@ public:
static internal_key_type ReadKey(const unsigned char* d, unsigned n);
};
-/// \brief Class that performs lookup for an identifier stored in an AST file.
+/// Class that performs lookup for an identifier stored in an AST file.
class ASTIdentifierLookupTrait : public ASTIdentifierLookupTraitBase {
ASTReader &Reader;
ModuleFile &F;
@@ -182,12 +182,12 @@ public:
ASTReader &getReader() const { return Reader; }
};
-/// \brief The on-disk hash table used to contain information about
+/// The on-disk hash table used to contain information about
/// all of the identifiers in the program.
using ASTIdentifierLookupTable =
llvm::OnDiskIterableChainedHashTable<ASTIdentifierLookupTrait>;
-/// \brief Class that performs lookup for a selector's entries in the global
+/// Class that performs lookup for a selector's entries in the global
/// method pool stored in an AST file.
class ASTSelectorLookupTrait {
ASTReader &Reader;
@@ -229,11 +229,11 @@ public:
data_type ReadData(Selector, const unsigned char* d, unsigned DataLen);
};
-/// \brief The on-disk hash table used for the global method pool.
+/// The on-disk hash table used for the global method pool.
using ASTSelectorLookupTable =
llvm::OnDiskChainedHashTable<ASTSelectorLookupTrait>;
-/// \brief Trait class used to search the on-disk hash table containing all of
+/// Trait class used to search the on-disk hash table containing all of
/// the header search information.
///
/// The on-disk hash table contains a mapping from each header path to
@@ -280,7 +280,7 @@ public:
data_type ReadData(internal_key_ref,const unsigned char *d, unsigned DataLen);
};
-/// \brief The on-disk hash table used for known header files.
+/// The on-disk hash table used for known header files.
using HeaderFileInfoLookupTable =
llvm::OnDiskChainedHashTable<HeaderFileInfoTrait>;
diff --git a/lib/Serialization/ASTReaderStmt.cpp b/lib/Serialization/ASTReaderStmt.cpp
index 6163b811c769..d9d780b25b31 100644
--- a/lib/Serialization/ASTReaderStmt.cpp
+++ b/lib/Serialization/ASTReaderStmt.cpp
@@ -1,4 +1,4 @@
-//===--- ASTReaderStmt.cpp - Stmt/Expr Deserialization ----------*- C++ -*-===//
+//===- ASTReaderStmt.cpp - Stmt/Expr Deserialization ----------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -14,13 +14,55 @@
#include "clang/Serialization/ASTReader.h"
#include "clang/AST/ASTContext.h"
+#include "clang/AST/AttrIterator.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclAccessPair.h"
#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclGroup.h"
+#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/DeclarationName.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/AST/ExprOpenMP.h"
+#include "clang/AST/NestedNameSpecifier.h"
+#include "clang/AST/OpenMPClause.h"
+#include "clang/AST/OperationKinds.h"
+#include "clang/AST/Stmt.h"
+#include "clang/AST/StmtCXX.h"
+#include "clang/AST/StmtObjC.h"
+#include "clang/AST/StmtOpenMP.h"
#include "clang/AST/StmtVisitor.h"
+#include "clang/AST/TemplateBase.h"
+#include "clang/AST/Type.h"
+#include "clang/AST/UnresolvedSet.h"
+#include "clang/Basic/CapturedStmt.h"
+#include "clang/Basic/ExpressionTraits.h"
+#include "clang/Basic/LLVM.h"
+#include "clang/Basic/Lambda.h"
+#include "clang/Basic/LangOptions.h"
+#include "clang/Basic/OpenMPKinds.h"
+#include "clang/Basic/OperatorKinds.h"
+#include "clang/Basic/SourceLocation.h"
+#include "clang/Basic/Specifiers.h"
+#include "clang/Basic/TypeTraits.h"
#include "clang/Lex/Token.h"
+#include "clang/Serialization/ASTBitCodes.h"
+#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Bitcode/BitstreamReader.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <algorithm>
+#include <cassert>
+#include <cstdint>
+#include <string>
+
using namespace clang;
-using namespace clang::serialization;
+using namespace serialization;
namespace clang {
@@ -68,19 +110,20 @@ namespace clang {
ASTStmtReader(ASTRecordReader &Record, llvm::BitstreamCursor &Cursor)
: Record(Record), DeclsCursor(Cursor) {}
- /// \brief The number of record fields required for the Stmt class
+ /// The number of record fields required for the Stmt class
/// itself.
static const unsigned NumStmtFields = 0;
- /// \brief The number of record fields required for the Expr class
+ /// The number of record fields required for the Expr class
/// itself.
static const unsigned NumExprFields = NumStmtFields + 7;
- /// \brief Read and initialize a ExplicitTemplateArgumentList structure.
+ /// Read and initialize a ExplicitTemplateArgumentList structure.
void ReadTemplateKWAndArgsInfo(ASTTemplateKWAndArgsInfo &Args,
TemplateArgumentLoc *ArgsLocArray,
unsigned NumTemplateArgs);
- /// \brief Read and initialize a ExplicitTemplateArgumentList structure.
+
+ /// Read and initialize a ExplicitTemplateArgumentList structure.
void ReadExplicitTemplateArgumentList(ASTTemplateArgumentListInfo &ArgList,
unsigned NumTemplateArgs);
@@ -89,7 +132,8 @@ namespace clang {
void Visit##Type(Type *);
#include "clang/AST/StmtNodes.inc"
};
-}
+
+} // namespace clang
void ASTStmtReader::ReadTemplateKWAndArgsInfo(ASTTemplateKWAndArgsInfo &Args,
TemplateArgumentLoc *ArgsLocArray,
@@ -146,7 +190,7 @@ void ASTStmtReader::VisitDefaultStmt(DefaultStmt *S) {
void ASTStmtReader::VisitLabelStmt(LabelStmt *S) {
VisitStmt(S);
- LabelDecl *LD = ReadDeclAs<LabelDecl>();
+ auto *LD = ReadDeclAs<LabelDecl>();
LD->setStmt(S);
S->setDecl(LD);
S->setSubStmt(Record.readSubStmt());
@@ -489,6 +533,12 @@ void ASTStmtReader::VisitIntegerLiteral(IntegerLiteral *E) {
E->setValue(Record.getContext(), Record.readAPInt());
}
+void ASTStmtReader::VisitFixedPointLiteral(FixedPointLiteral *E) {
+ VisitExpr(E);
+ E->setLocation(ReadSourceLocation());
+ E->setValue(Record.getContext(), Record.readAPInt());
+}
+
void ASTStmtReader::VisitFloatingLiteral(FloatingLiteral *E) {
VisitExpr(E);
E->setRawSemantics(static_cast<Stmt::APFloatSemantics>(Record.readInt()));
@@ -508,8 +558,7 @@ void ASTStmtReader::VisitStringLiteral(StringLiteral *E) {
assert(Record.peekInt() == E->getNumConcatenated() &&
"Wrong number of concatenated tokens!");
Record.skipInts(1);
- StringLiteral::StringKind kind =
- static_cast<StringLiteral::StringKind>(Record.readInt());
+ auto kind = static_cast<StringLiteral::StringKind>(Record.readInt());
bool isPascal = Record.readInt();
// Read string data
@@ -553,6 +602,7 @@ void ASTStmtReader::VisitUnaryOperator(UnaryOperator *E) {
E->setSubExpr(Record.readSubExpr());
E->setOpcode((UnaryOperator::Opcode)Record.readInt());
E->setOperatorLoc(ReadSourceLocation());
+ E->setCanOverflow(Record.readInt());
}
void ASTStmtReader::VisitOffsetOfExpr(OffsetOfExpr *E) {
@@ -565,7 +615,7 @@ void ASTStmtReader::VisitOffsetOfExpr(OffsetOfExpr *E) {
E->setRParenLoc(ReadSourceLocation());
E->setTypeSourceInfo(GetTypeSourceInfo());
for (unsigned I = 0, N = E->getNumComponents(); I != N; ++I) {
- OffsetOfNode::Kind Kind = static_cast<OffsetOfNode::Kind>(Record.readInt());
+ auto Kind = static_cast<OffsetOfNode::Kind>(Record.readInt());
SourceLocation Start = ReadSourceLocation();
SourceLocation End = ReadSourceLocation();
switch (Kind) {
@@ -585,7 +635,7 @@ void ASTStmtReader::VisitOffsetOfExpr(OffsetOfExpr *E) {
break;
case OffsetOfNode::Base: {
- CXXBaseSpecifier *Base = new (Record.getContext()) CXXBaseSpecifier();
+ auto *Base = new (Record.getContext()) CXXBaseSpecifier();
*Base = Record.readCXXBaseSpecifier();
E->setComponent(I, OffsetOfNode(Base));
break;
@@ -675,7 +725,7 @@ void ASTStmtReader::VisitCastExpr(CastExpr *E) {
E->setCastKind((CastKind)Record.readInt());
CastExpr::path_iterator BaseI = E->path_begin();
while (NumBaseSpecs--) {
- CXXBaseSpecifier *BaseSpec = new (Record.getContext()) CXXBaseSpecifier;
+ auto *BaseSpec = new (Record.getContext()) CXXBaseSpecifier;
*BaseSpec = Record.readCXXBaseSpecifier();
*BaseI++ = BaseSpec;
}
@@ -719,6 +769,7 @@ ASTStmtReader::VisitBinaryConditionalOperator(BinaryConditionalOperator *E) {
void ASTStmtReader::VisitImplicitCastExpr(ImplicitCastExpr *E) {
VisitCastExpr(E);
+ E->setIsPartOfExplicitCast(Record.readInt());
}
void ASTStmtReader::VisitExplicitCastExpr(ExplicitCastExpr *E) {
@@ -749,7 +800,7 @@ void ASTStmtReader::VisitExtVectorElementExpr(ExtVectorElementExpr *E) {
void ASTStmtReader::VisitInitListExpr(InitListExpr *E) {
VisitExpr(E);
- if (InitListExpr *SyntForm = cast_or_null<InitListExpr>(Record.readSubStmt()))
+ if (auto *SyntForm = cast_or_null<InitListExpr>(Record.readSubStmt()))
E->setSyntacticForm(SyntForm);
E->setLBraceLoc(ReadSourceLocation());
E->setRBraceLoc(ReadSourceLocation());
@@ -775,7 +826,7 @@ void ASTStmtReader::VisitInitListExpr(InitListExpr *E) {
}
void ASTStmtReader::VisitDesignatedInitExpr(DesignatedInitExpr *E) {
- typedef DesignatedInitExpr::Designator Designator;
+ using Designator = DesignatedInitExpr::Designator;
VisitExpr(E);
unsigned NumSubExprs = Record.readInt();
@@ -789,7 +840,7 @@ void ASTStmtReader::VisitDesignatedInitExpr(DesignatedInitExpr *E) {
while (Record.getIdx() < Record.size()) {
switch ((DesignatorTypes)Record.readInt()) {
case DESIG_FIELD_DECL: {
- FieldDecl *Field = ReadDeclAs<FieldDecl>();
+ auto *Field = ReadDeclAs<FieldDecl>();
SourceLocation DotLoc = ReadSourceLocation();
SourceLocation FieldLoc = ReadSourceLocation();
Designators.push_back(Designator(Field->getIdentifier(), DotLoc,
@@ -994,9 +1045,9 @@ void ASTStmtReader::VisitObjCDictionaryLiteral(ObjCDictionaryLiteral *E) {
assert(NumElements == E->getNumElements() && "Wrong number of elements");
bool HasPackExpansions = Record.readInt();
assert(HasPackExpansions == E->HasPackExpansions &&"Pack expansion mismatch");
- ObjCDictionaryLiteral::KeyValuePair *KeyValues =
+ auto *KeyValues =
E->getTrailingObjects<ObjCDictionaryLiteral::KeyValuePair>();
- ObjCDictionaryLiteral::ExpansionData *Expansions =
+ auto *Expansions =
E->getTrailingObjects<ObjCDictionaryLiteral::ExpansionData>();
for (unsigned I = 0; I != NumElements; ++I) {
KeyValues[I].Key = Record.readSubExpr();
@@ -1047,8 +1098,8 @@ void ASTStmtReader::VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E) {
unsigned MethodRefFlags = Record.readInt();
bool Implicit = Record.readInt() != 0;
if (Implicit) {
- ObjCMethodDecl *Getter = ReadDeclAs<ObjCMethodDecl>();
- ObjCMethodDecl *Setter = ReadDeclAs<ObjCMethodDecl>();
+ auto *Getter = ReadDeclAs<ObjCMethodDecl>();
+ auto *Setter = ReadDeclAs<ObjCMethodDecl>();
E->setImplicitProperty(Getter, Setter, MethodRefFlags);
} else {
E->setExplicitProperty(ReadDeclAs<ObjCPropertyDecl>(), MethodRefFlags);
@@ -1085,8 +1136,7 @@ void ASTStmtReader::VisitObjCMessageExpr(ObjCMessageExpr *E) {
E->SelLocsKind = Record.readInt();
E->setDelegateInitCall(Record.readInt());
E->IsImplicit = Record.readInt();
- ObjCMessageExpr::ReceiverKind Kind
- = static_cast<ObjCMessageExpr::ReceiverKind>(Record.readInt());
+ auto Kind = static_cast<ObjCMessageExpr::ReceiverKind>(Record.readInt());
switch (Kind) {
case ObjCMessageExpr::Instance:
E->setInstanceReceiver(Record.readSubExpr());
@@ -1520,8 +1570,8 @@ void ASTStmtReader::VisitOverloadExpr(OverloadExpr *E) {
unsigned NumDecls = Record.readInt();
UnresolvedSet<8> Decls;
for (unsigned i = 0; i != NumDecls; ++i) {
- NamedDecl *D = ReadDeclAs<NamedDecl>();
- AccessSpecifier AS = (AccessSpecifier)Record.readInt();
+ auto *D = ReadDeclAs<NamedDecl>();
+ auto AS = (AccessSpecifier)Record.readInt();
Decls.addDecl(D, AS);
}
E->initializeResults(Record.getContext(), Decls.begin(), Decls.end());
@@ -1555,7 +1605,7 @@ void ASTStmtReader::VisitTypeTraitExpr(TypeTraitExpr *E) {
E->Loc = Range.getBegin();
E->RParenLoc = Range.getEnd();
- TypeSourceInfo **Args = E->getTrailingObjects<TypeSourceInfo *>();
+ auto **Args = E->getTrailingObjects<TypeSourceInfo *>();
for (unsigned I = 0, N = E->getNumArgs(); I != N; ++I)
Args[I] = GetTypeSourceInfo();
}
@@ -1639,7 +1689,7 @@ void ASTStmtReader::VisitFunctionParmPackExpr(FunctionParmPackExpr *E) {
E->NumParameters = Record.readInt();
E->ParamPack = ReadDeclAs<ParmVarDecl>();
E->NameLoc = ReadSourceLocation();
- ParmVarDecl **Parms = E->getTrailingObjects<ParmVarDecl *>();
+ auto **Parms = E->getTrailingObjects<ParmVarDecl *>();
for (unsigned i = 0, n = E->NumParameters; i != n; ++i)
Parms[i] = ReadDeclAs<ParmVarDecl>();
}
@@ -1647,7 +1697,7 @@ void ASTStmtReader::VisitFunctionParmPackExpr(FunctionParmPackExpr *E) {
void ASTStmtReader::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E) {
VisitExpr(E);
E->State = Record.readSubExpr();
- auto VD = ReadDeclAs<ValueDecl>();
+ auto *VD = ReadDeclAs<ValueDecl>();
unsigned ManglingNumber = Record.readInt();
E->setExtendingDecl(VD, ManglingNumber);
}
@@ -1666,6 +1716,7 @@ void ASTStmtReader::VisitOpaqueValueExpr(OpaqueValueExpr *E) {
VisitExpr(E);
E->SourceExpr = Record.readSubExpr();
E->Loc = ReadSourceLocation();
+ E->setIsUnique(Record.readInt());
}
void ASTStmtReader::VisitTypoExpr(TypoExpr *E) {
@@ -1756,19 +1807,23 @@ void ASTStmtReader::VisitAsTypeExpr(AsTypeExpr *E) {
//===----------------------------------------------------------------------===//
namespace clang {
+
class OMPClauseReader : public OMPClauseVisitor<OMPClauseReader> {
ASTStmtReader *Reader;
ASTContext &Context;
+
public:
OMPClauseReader(ASTStmtReader *R, ASTRecordReader &Record)
: Reader(R), Context(Record.getContext()) {}
+
#define OPENMP_CLAUSE(Name, Class) void Visit##Class(Class *C);
#include "clang/Basic/OpenMPKinds.def"
OMPClause *readClause();
void VisitOMPClauseWithPreInit(OMPClauseWithPreInit *C);
void VisitOMPClauseWithPostUpdate(OMPClauseWithPostUpdate *C);
};
-}
+
+} // namespace clang
OMPClause *OMPClauseReader::readClause() {
OMPClause *C;
@@ -2393,7 +2448,7 @@ void OMPClauseReader::VisitOMPMapClause(OMPMapClause *C) {
Components.reserve(TotalComponents);
for (unsigned i = 0; i < TotalComponents; ++i) {
Expr *AssociatedExpr = Reader->Record.readSubExpr();
- ValueDecl *AssociatedDecl = Reader->Record.readDeclAs<ValueDecl>();
+ auto *AssociatedDecl = Reader->Record.readDeclAs<ValueDecl>();
Components.push_back(OMPClauseMappableExprCommon::MappableComponent(
AssociatedExpr, AssociatedDecl));
}
@@ -2487,7 +2542,7 @@ void OMPClauseReader::VisitOMPToClause(OMPToClause *C) {
Components.reserve(TotalComponents);
for (unsigned i = 0; i < TotalComponents; ++i) {
Expr *AssociatedExpr = Reader->Record.readSubExpr();
- ValueDecl *AssociatedDecl = Reader->Record.readDeclAs<ValueDecl>();
+ auto *AssociatedDecl = Reader->Record.readDeclAs<ValueDecl>();
Components.push_back(OMPClauseMappableExprCommon::MappableComponent(
AssociatedExpr, AssociatedDecl));
}
@@ -2529,7 +2584,7 @@ void OMPClauseReader::VisitOMPFromClause(OMPFromClause *C) {
Components.reserve(TotalComponents);
for (unsigned i = 0; i < TotalComponents; ++i) {
Expr *AssociatedExpr = Reader->Record.readSubExpr();
- ValueDecl *AssociatedDecl = Reader->Record.readDeclAs<ValueDecl>();
+ auto *AssociatedDecl = Reader->Record.readDeclAs<ValueDecl>();
Components.push_back(OMPClauseMappableExprCommon::MappableComponent(
AssociatedExpr, AssociatedDecl));
}
@@ -2579,7 +2634,7 @@ void OMPClauseReader::VisitOMPUseDevicePtrClause(OMPUseDevicePtrClause *C) {
Components.reserve(TotalComponents);
for (unsigned i = 0; i < TotalComponents; ++i) {
Expr *AssociatedExpr = Reader->Record.readSubExpr();
- ValueDecl *AssociatedDecl = Reader->Record.readDeclAs<ValueDecl>();
+ auto *AssociatedDecl = Reader->Record.readDeclAs<ValueDecl>();
Components.push_back(OMPClauseMappableExprCommon::MappableComponent(
AssociatedExpr, AssociatedDecl));
}
@@ -2622,7 +2677,7 @@ void OMPClauseReader::VisitOMPIsDevicePtrClause(OMPIsDevicePtrClause *C) {
Components.reserve(TotalComponents);
for (unsigned i = 0; i < TotalComponents; ++i) {
Expr *AssociatedExpr = Reader->Record.readSubExpr();
- ValueDecl *AssociatedDecl = Reader->Record.readDeclAs<ValueDecl>();
+ auto *AssociatedDecl = Reader->Record.readDeclAs<ValueDecl>();
Components.push_back(OMPClauseMappableExprCommon::MappableComponent(
AssociatedExpr, AssociatedDecl));
}
@@ -2632,6 +2687,7 @@ void OMPClauseReader::VisitOMPIsDevicePtrClause(OMPIsDevicePtrClause *C) {
//===----------------------------------------------------------------------===//
// OpenMP Directives.
//===----------------------------------------------------------------------===//
+
void ASTStmtReader::VisitOMPExecutableDirective(OMPExecutableDirective *E) {
E->setLocStart(ReadSourceLocation());
E->setLocEnd(ReadSourceLocation());
@@ -2917,6 +2973,7 @@ void ASTStmtReader::VisitOMPTargetUpdateDirective(OMPTargetUpdateDirective *D) {
Record.skipInts(1);
VisitOMPExecutableDirective(D);
}
+
void ASTStmtReader::VisitOMPDistributeParallelForDirective(
OMPDistributeParallelForDirective *D) {
VisitOMPLoopDirective(D);
@@ -3025,7 +3082,6 @@ Expr *ASTReader::ReadSubExpr() {
// stack. Evaluation terminates when we see a STMT_STOP record, and
// the single remaining expression on the stack is our result.
Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
-
ReadingKindTracker ReadingKind(Read_Stmt, *this);
llvm::BitstreamCursor &Cursor = F.DeclsCursor;
@@ -3254,15 +3310,15 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
bool HadMultipleCandidates = Record.readInt();
- NamedDecl *FoundD = Record.readDeclAs<NamedDecl>();
- AccessSpecifier AS = (AccessSpecifier)Record.readInt();
+ auto *FoundD = Record.readDeclAs<NamedDecl>();
+ auto AS = (AccessSpecifier)Record.readInt();
DeclAccessPair FoundDecl = DeclAccessPair::make(FoundD, AS);
QualType T = Record.readType();
- ExprValueKind VK = static_cast<ExprValueKind>(Record.readInt());
- ExprObjectKind OK = static_cast<ExprObjectKind>(Record.readInt());
+ auto VK = static_cast<ExprValueKind>(Record.readInt());
+ auto OK = static_cast<ExprObjectKind>(Record.readInt());
Expr *Base = ReadSubExpr();
- ValueDecl *MemberD = Record.readDeclAs<ValueDecl>();
+ auto *MemberD = Record.readDeclAs<ValueDecl>();
SourceLocation MemberLoc = Record.readSourceLocation();
DeclarationNameInfo MemberNameInfo(MemberD->getDeclName(), MemberLoc);
bool IsArrow = Record.readInt();
@@ -3382,93 +3438,121 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
case EXPR_OBJC_STRING_LITERAL:
S = new (Context) ObjCStringLiteral(Empty);
break;
+
case EXPR_OBJC_BOXED_EXPRESSION:
S = new (Context) ObjCBoxedExpr(Empty);
break;
+
case EXPR_OBJC_ARRAY_LITERAL:
S = ObjCArrayLiteral::CreateEmpty(Context,
Record[ASTStmtReader::NumExprFields]);
break;
+
case EXPR_OBJC_DICTIONARY_LITERAL:
S = ObjCDictionaryLiteral::CreateEmpty(Context,
Record[ASTStmtReader::NumExprFields],
Record[ASTStmtReader::NumExprFields + 1]);
break;
+
case EXPR_OBJC_ENCODE:
S = new (Context) ObjCEncodeExpr(Empty);
break;
+
case EXPR_OBJC_SELECTOR_EXPR:
S = new (Context) ObjCSelectorExpr(Empty);
break;
+
case EXPR_OBJC_PROTOCOL_EXPR:
S = new (Context) ObjCProtocolExpr(Empty);
break;
+
case EXPR_OBJC_IVAR_REF_EXPR:
S = new (Context) ObjCIvarRefExpr(Empty);
break;
+
case EXPR_OBJC_PROPERTY_REF_EXPR:
S = new (Context) ObjCPropertyRefExpr(Empty);
break;
+
case EXPR_OBJC_SUBSCRIPT_REF_EXPR:
S = new (Context) ObjCSubscriptRefExpr(Empty);
break;
+
case EXPR_OBJC_KVC_REF_EXPR:
llvm_unreachable("mismatching AST file");
+
case EXPR_OBJC_MESSAGE_EXPR:
S = ObjCMessageExpr::CreateEmpty(Context,
Record[ASTStmtReader::NumExprFields],
Record[ASTStmtReader::NumExprFields + 1]);
break;
+
case EXPR_OBJC_ISA:
S = new (Context) ObjCIsaExpr(Empty);
break;
+
case EXPR_OBJC_INDIRECT_COPY_RESTORE:
S = new (Context) ObjCIndirectCopyRestoreExpr(Empty);
break;
+
case EXPR_OBJC_BRIDGED_CAST:
S = new (Context) ObjCBridgedCastExpr(Empty);
break;
+
case STMT_OBJC_FOR_COLLECTION:
S = new (Context) ObjCForCollectionStmt(Empty);
break;
+
case STMT_OBJC_CATCH:
S = new (Context) ObjCAtCatchStmt(Empty);
break;
+
case STMT_OBJC_FINALLY:
S = new (Context) ObjCAtFinallyStmt(Empty);
break;
+
case STMT_OBJC_AT_TRY:
S = ObjCAtTryStmt::CreateEmpty(Context,
Record[ASTStmtReader::NumStmtFields],
Record[ASTStmtReader::NumStmtFields + 1]);
break;
+
case STMT_OBJC_AT_SYNCHRONIZED:
S = new (Context) ObjCAtSynchronizedStmt(Empty);
break;
+
case STMT_OBJC_AT_THROW:
S = new (Context) ObjCAtThrowStmt(Empty);
break;
+
case STMT_OBJC_AUTORELEASE_POOL:
S = new (Context) ObjCAutoreleasePoolStmt(Empty);
break;
+
case EXPR_OBJC_BOOL_LITERAL:
S = new (Context) ObjCBoolLiteralExpr(Empty);
break;
+
case EXPR_OBJC_AVAILABILITY_CHECK:
S = new (Context) ObjCAvailabilityCheckExpr(Empty);
break;
+
case STMT_SEH_LEAVE:
S = new (Context) SEHLeaveStmt(Empty);
break;
+
case STMT_SEH_EXCEPT:
S = new (Context) SEHExceptStmt(Empty);
break;
+
case STMT_SEH_FINALLY:
S = new (Context) SEHFinallyStmt(Empty);
break;
+
case STMT_SEH_TRY:
S = new (Context) SEHTryStmt(Empty);
break;
+
case STMT_CXX_CATCH:
S = new (Context) CXXCatchStmt(Empty);
break;
@@ -3750,11 +3834,10 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
break;
}
- case STMT_OMP_TARGET_TEAMS_DIRECTIVE: {
+ case STMT_OMP_TARGET_TEAMS_DIRECTIVE:
S = OMPTargetTeamsDirective::CreateEmpty(
Context, Record[ASTStmtReader::NumStmtFields], Empty);
break;
- }
case STMT_OMP_TARGET_TEAMS_DISTRIBUTE_DIRECTIVE: {
auto NumClauses = Record[ASTStmtReader::NumStmtFields];
@@ -3847,36 +3930,47 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
case EXPR_CXX_NULL_PTR_LITERAL:
S = new (Context) CXXNullPtrLiteralExpr(Empty);
break;
+
case EXPR_CXX_TYPEID_EXPR:
S = new (Context) CXXTypeidExpr(Empty, true);
break;
+
case EXPR_CXX_TYPEID_TYPE:
S = new (Context) CXXTypeidExpr(Empty, false);
break;
+
case EXPR_CXX_UUIDOF_EXPR:
S = new (Context) CXXUuidofExpr(Empty, true);
break;
+
case EXPR_CXX_PROPERTY_REF_EXPR:
S = new (Context) MSPropertyRefExpr(Empty);
break;
+
case EXPR_CXX_PROPERTY_SUBSCRIPT_EXPR:
S = new (Context) MSPropertySubscriptExpr(Empty);
break;
+
case EXPR_CXX_UUIDOF_TYPE:
S = new (Context) CXXUuidofExpr(Empty, false);
break;
+
case EXPR_CXX_THIS:
S = new (Context) CXXThisExpr(Empty);
break;
+
case EXPR_CXX_THROW:
S = new (Context) CXXThrowExpr(Empty);
break;
+
case EXPR_CXX_DEFAULT_ARG:
S = new (Context) CXXDefaultArgExpr(Empty);
break;
+
case EXPR_CXX_DEFAULT_INIT:
S = new (Context) CXXDefaultInitExpr(Empty);
break;
+
case EXPR_CXX_BIND_TEMPORARY:
S = new (Context) CXXBindTemporaryExpr(Empty);
break;
@@ -3884,12 +3978,15 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
case EXPR_CXX_SCALAR_VALUE_INIT:
S = new (Context) CXXScalarValueInitExpr(Empty);
break;
+
case EXPR_CXX_NEW:
S = new (Context) CXXNewExpr(Empty);
break;
+
case EXPR_CXX_DELETE:
S = new (Context) CXXDeleteExpr(Empty);
break;
+
case EXPR_CXX_PSEUDO_DESTRUCTOR:
S = new (Context) CXXPseudoDestructorExpr(Empty);
break;
@@ -4033,7 +4130,6 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
case EXPR_DEPENDENT_COAWAIT:
S = new (Context) DependentCoawaitExpr(Empty);
break;
-
}
// We hit a STMT_STOP, so we're done with this expression.
diff --git a/lib/Serialization/ASTWriter.cpp b/lib/Serialization/ASTWriter.cpp
index 1e72ced2ee36..1a8d806e9d24 100644
--- a/lib/Serialization/ASTWriter.cpp
+++ b/lib/Serialization/ASTWriter.cpp
@@ -53,7 +53,6 @@
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/TargetOptions.h"
#include "clang/Basic/Version.h"
-#include "clang/Basic/VersionTuple.h"
#include "clang/Lex/HeaderSearch.h"
#include "clang/Lex/HeaderSearchOptions.h"
#include "clang/Lex/MacroInfo.h"
@@ -79,16 +78,17 @@
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/ScopeExit.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Bitcode/BitCodes.h"
#include "llvm/Bitcode/BitstreamWriter.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compression.h"
+#include "llvm/Support/DJB.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/EndianStream.h"
#include "llvm/Support/Error.h"
@@ -97,6 +97,7 @@
#include "llvm/Support/OnDiskHashTable.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/SHA1.h"
+#include "llvm/Support/VersionTuple.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <cassert>
@@ -138,10 +139,10 @@ namespace clang {
ASTWriter &Writer;
ASTRecordWriter Record;
- /// \brief Type code that corresponds to the record generated.
+ /// Type code that corresponds to the record generated.
TypeCode Code = static_cast<TypeCode>(0);
- /// \brief Abbreviation to use for the record, if any.
+ /// Abbreviation to use for the record, if any.
unsigned AbbrevToUse = 0;
public:
@@ -276,6 +277,7 @@ void ASTTypeWriter::VisitFunctionType(const FunctionType *T) {
Record.push_back(C.getCC());
Record.push_back(C.getProducesResult());
Record.push_back(C.getNoCallerSavedRegs());
+ Record.push_back(C.getNoCfCheck());
if (C.getHasRegParm() || C.getRegParm() || C.getProducesResult())
AbbrevToUse = 0;
@@ -293,7 +295,7 @@ static void addExceptionSpec(const FunctionProtoType *T,
Record.push_back(T->getNumExceptions());
for (unsigned I = 0, N = T->getNumExceptions(); I != N; ++I)
Record.AddTypeRef(T->getExceptionType(I));
- } else if (T->getExceptionSpecType() == EST_ComputedNoexcept) {
+ } else if (isComputedNoexcept(T->getExceptionSpecType())) {
Record.AddStmt(T->getNoexceptExpr());
} else if (T->getExceptionSpecType() == EST_Uninstantiated) {
Record.AddDeclRef(T->getExceptionSpecDecl());
@@ -453,7 +455,15 @@ ASTTypeWriter::VisitDependentSizedExtVectorType(
Code = TYPE_DEPENDENT_SIZED_EXT_VECTOR;
}
-void
+void ASTTypeWriter::VisitDependentVectorType(const DependentVectorType *T) {
+ Record.AddTypeRef(T->getElementType());
+ Record.AddStmt(const_cast<Expr*>(T->getSizeExpr()));
+ Record.AddSourceLocation(T->getAttributeLoc());
+ Record.push_back(T->getVectorKind());
+ Code = TYPE_DEPENDENT_SIZED_VECTOR;
+}
+
+void
ASTTypeWriter::VisitDependentAddressSpaceType(
const DependentAddressSpaceType *T) {
Record.AddTypeRef(T->getPointeeType());
@@ -511,6 +521,7 @@ void ASTTypeWriter::VisitElaboratedType(const ElaboratedType *T) {
Record.push_back(T->getKeyword());
Record.AddNestedNameSpecifier(T->getQualifier());
Record.AddTypeRef(T->getNamedType());
+ Record.AddDeclRef(T->getOwnedTagDecl());
Code = TYPE_ELABORATED;
}
@@ -661,7 +672,7 @@ void TypeLocWriter::VisitDependentAddressSpaceTypeLoc(
SourceRange range = TL.getAttrOperandParensRange();
Record.AddSourceLocation(range.getBegin());
Record.AddSourceLocation(range.getEnd());
- Record.AddStmt(TL.getAttrExprOperand());
+ Record.AddStmt(TL.getAttrExprOperand());
}
void TypeLocWriter::VisitDependentSizedExtVectorTypeLoc(
@@ -673,6 +684,11 @@ void TypeLocWriter::VisitVectorTypeLoc(VectorTypeLoc TL) {
Record.AddSourceLocation(TL.getNameLoc());
}
+void TypeLocWriter::VisitDependentVectorTypeLoc(
+ DependentVectorTypeLoc TL) {
+ Record.AddSourceLocation(TL.getNameLoc());
+}
+
void TypeLocWriter::VisitExtVectorTypeLoc(ExtVectorTypeLoc TL) {
Record.AddSourceLocation(TL.getNameLoc());
}
@@ -884,6 +900,7 @@ void ASTWriter::WriteTypeAbbrevs() {
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 4)); // CC
Abv->Add(BitCodeAbbrevOp(0)); // ProducesResult
Abv->Add(BitCodeAbbrevOp(0)); // NoCallerSavedRegs
+ Abv->Add(BitCodeAbbrevOp(0)); // NoCfCheck
// FunctionProtoType
Abv->Add(BitCodeAbbrevOp(0)); // IsVariadic
Abv->Add(BitCodeAbbrevOp(0)); // HasTrailingReturn
@@ -1104,6 +1121,7 @@ void ASTWriter::WriteBlockInfoBlock() {
RECORD(UNUSED_FILESCOPED_DECLS);
RECORD(PPD_ENTITIES_OFFSETS);
RECORD(VTABLE_USES);
+ RECORD(PPD_SKIPPED_RANGES);
RECORD(REFERENCED_SELECTOR_POOL);
RECORD(TU_UPDATE_LEXICAL);
RECORD(SEMA_DECL_REFS);
@@ -1293,7 +1311,7 @@ void ASTWriter::WriteBlockInfoBlock() {
RECORD(DECL_PRAGMA_COMMENT);
RECORD(DECL_PRAGMA_DETECT_MISMATCH);
RECORD(DECL_OMP_DECLARE_REDUCTION);
-
+
// Statements and Exprs can occur in the Decls and Types block.
AddStmtsExprs(Stream, Record);
@@ -1316,7 +1334,7 @@ void ASTWriter::WriteBlockInfoBlock() {
Stream.ExitBlock();
}
-/// \brief Prepares a path for being written to an AST file by converting it
+/// Prepares a path for being written to an AST file by converting it
/// to an absolute path and removing nested './'s.
///
/// \return \c true if the path was changed.
@@ -1326,7 +1344,7 @@ static bool cleanPathForOutput(FileManager &FileMgr,
return Changed | llvm::sys::path::remove_dots(Path);
}
-/// \brief Adjusts the given filename to only write out the portion of the
+/// Adjusts the given filename to only write out the portion of the
/// filename that is not part of the system root directory.
///
/// \param Filename the file name to adjust.
@@ -1436,7 +1454,7 @@ ASTFileSignature ASTWriter::writeUnhashedControlBlock(Preprocessor &PP,
return Signature;
}
-/// \brief Write the control block.
+/// Write the control block.
void ASTWriter::WriteControlBlock(Preprocessor &PP, ASTContext &Context,
StringRef isysroot,
const std::string &OutputFile) {
@@ -1444,7 +1462,7 @@ void ASTWriter::WriteControlBlock(Preprocessor &PP, ASTContext &Context,
Stream.EnterSubblock(CONTROL_BLOCK_ID, 5);
RecordData Record;
-
+
// Metadata
auto MetadataAbbrev = std::make_shared<BitCodeAbbrev>();
MetadataAbbrev->Add(BitCodeAbbrevOp(METADATA));
@@ -1454,16 +1472,23 @@ void ASTWriter::WriteControlBlock(Preprocessor &PP, ASTContext &Context,
MetadataAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 16)); // Clang min.
MetadataAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Relocatable
MetadataAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Timestamps
+ MetadataAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // PCHHasObjectFile
MetadataAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Errors
MetadataAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // SVN branch/tag
unsigned MetadataAbbrevCode = Stream.EmitAbbrev(std::move(MetadataAbbrev));
assert((!WritingModule || isysroot.empty()) &&
"writing module as a relocatable PCH?");
{
- RecordData::value_type Record[] = {METADATA, VERSION_MAJOR, VERSION_MINOR,
- CLANG_VERSION_MAJOR, CLANG_VERSION_MINOR,
- !isysroot.empty(), IncludeTimestamps,
- ASTHasCompilerErrors};
+ RecordData::value_type Record[] = {
+ METADATA,
+ VERSION_MAJOR,
+ VERSION_MINOR,
+ CLANG_VERSION_MAJOR,
+ CLANG_VERSION_MINOR,
+ !isysroot.empty(),
+ IncludeTimestamps,
+ Context.getLangOpts().BuildingPCHWithObjectFile,
+ ASTHasCompilerErrors};
Stream.EmitRecordWithBlob(MetadataAbbrevCode, Record,
getClangFullRepositoryVersion());
}
@@ -1732,7 +1757,7 @@ void ASTWriter::WriteControlBlock(Preprocessor &PP, ASTContext &Context,
namespace {
-/// \brief An input file.
+/// An input file.
struct InputFileEntry {
const FileEntry *File;
bool IsSystemFile;
@@ -1842,7 +1867,7 @@ void ASTWriter::WriteInputFiles(SourceManager &SourceMgr,
// Source Manager Serialization
//===----------------------------------------------------------------------===//
-/// \brief Create an abbreviation for the SLocEntry that refers to a
+/// Create an abbreviation for the SLocEntry that refers to a
/// file.
static unsigned CreateSLocFileAbbrev(llvm::BitstreamWriter &Stream) {
using namespace llvm;
@@ -1861,7 +1886,7 @@ static unsigned CreateSLocFileAbbrev(llvm::BitstreamWriter &Stream) {
return Stream.EmitAbbrev(std::move(Abbrev));
}
-/// \brief Create an abbreviation for the SLocEntry that refers to a
+/// Create an abbreviation for the SLocEntry that refers to a
/// buffer.
static unsigned CreateSLocBufferAbbrev(llvm::BitstreamWriter &Stream) {
using namespace llvm;
@@ -1876,7 +1901,7 @@ static unsigned CreateSLocBufferAbbrev(llvm::BitstreamWriter &Stream) {
return Stream.EmitAbbrev(std::move(Abbrev));
}
-/// \brief Create an abbreviation for the SLocEntry that refers to a
+/// Create an abbreviation for the SLocEntry that refers to a
/// buffer's blob.
static unsigned CreateSLocBufferBlobAbbrev(llvm::BitstreamWriter &Stream,
bool Compressed) {
@@ -1891,7 +1916,7 @@ static unsigned CreateSLocBufferBlobAbbrev(llvm::BitstreamWriter &Stream,
return Stream.EmitAbbrev(std::move(Abbrev));
}
-/// \brief Create an abbreviation for the SLocEntry that refers to a macro
+/// Create an abbreviation for the SLocEntry that refers to a macro
/// expansion.
static unsigned CreateSLocExpansionAbbrev(llvm::BitstreamWriter &Stream) {
using namespace llvm;
@@ -1902,6 +1927,7 @@ static unsigned CreateSLocExpansionAbbrev(llvm::BitstreamWriter &Stream) {
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // Spelling location
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // Start location
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // End location
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Is token range
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Token length
return Stream.EmitAbbrev(std::move(Abbrev));
}
@@ -1911,11 +1937,11 @@ namespace {
// Trait used for the on-disk hash table of header search information.
class HeaderFileInfoTrait {
ASTWriter &Writer;
-
+
// Keep track of the framework names we've used during serialization.
SmallVector<char, 128> FrameworkStringData;
llvm::StringMap<unsigned> FrameworkNameOffset;
-
+
public:
HeaderFileInfoTrait(ASTWriter &Writer) : Writer(Writer) {}
@@ -1928,7 +1954,7 @@ namespace {
using UnresolvedModule =
llvm::PointerIntPair<Module *, 2, ModuleMap::ModuleHeaderRole>;
-
+
struct data_type {
const HeaderFileInfo &HFI;
ArrayRef<ModuleMap::KnownHeader> KnownHeaders;
@@ -1938,19 +1964,19 @@ namespace {
using hash_value_type = unsigned;
using offset_type = unsigned;
-
+
hash_value_type ComputeHash(key_type_ref key) {
// The hash is based only on size/time of the file, so that the reader can
// match even when symlinking or excess path elements ("foo/../", "../")
// change the form of the name. However, complete path is still the key.
return llvm::hash_combine(key.Size, key.ModTime);
}
-
+
std::pair<unsigned, unsigned>
EmitKeyDataLength(raw_ostream& Out, key_type_ref key, data_type_ref Data) {
using namespace llvm::support;
- endian::Writer<little> LE(Out);
+ endian::Writer LE(Out, little);
unsigned KeyLen = key.Filename.size() + 1 + 8 + 8;
LE.write<uint16_t>(KeyLen);
unsigned DataLen = 1 + 2 + 4 + 4;
@@ -1966,7 +1992,7 @@ namespace {
void EmitKey(raw_ostream& Out, key_type_ref key, unsigned KeyLen) {
using namespace llvm::support;
- endian::Writer<little> LE(Out);
+ endian::Writer LE(Out, little);
LE.write<uint64_t>(key.Size);
KeyLen -= 8;
LE.write<uint64_t>(key.ModTime);
@@ -1978,16 +2004,16 @@ namespace {
data_type_ref Data, unsigned DataLen) {
using namespace llvm::support;
- endian::Writer<little> LE(Out);
+ endian::Writer LE(Out, little);
uint64_t Start = Out.tell(); (void)Start;
-
+
unsigned char Flags = (Data.HFI.isImport << 5)
| (Data.HFI.isPragmaOnce << 4)
| (Data.HFI.DirInfo << 1)
| Data.HFI.IndexHeaderMapHeader;
LE.write<uint8_t>(Flags);
LE.write<uint16_t>(Data.HFI.NumIncludes);
-
+
if (!Data.HFI.ControllingMacro)
LE.write<uint32_t>(Data.HFI.ControllingMacroID);
else
@@ -2000,10 +2026,10 @@ namespace {
= FrameworkNameOffset.find(Data.HFI.Framework);
if (Pos == FrameworkNameOffset.end()) {
Offset = FrameworkStringData.size() + 1;
- FrameworkStringData.append(Data.HFI.Framework.begin(),
+ FrameworkStringData.append(Data.HFI.Framework.begin(),
Data.HFI.Framework.end());
FrameworkStringData.push_back(0);
-
+
FrameworkNameOffset[Data.HFI.Framework] = Offset;
} else
Offset = Pos->second;
@@ -2027,14 +2053,14 @@ namespace {
assert(Out.tell() - Start == DataLen && "Wrong data length");
}
-
+
const char *strings_begin() const { return FrameworkStringData.begin(); }
const char *strings_end() const { return FrameworkStringData.end(); }
};
} // namespace
-/// \brief Write the header search block for the list of files that
+/// Write the header search block for the list of files that
///
/// \param HS The header search structure to save.
void ASTWriter::WriteHeaderSearch(const HeaderSearch &HS) {
@@ -2100,7 +2126,7 @@ void ASTWriter::WriteHeaderSearch(const HeaderSearch &HS) {
SmallVector<const FileEntry *, 16> FilesByUID;
HS.getFileMgr().GetUniqueIDMapping(FilesByUID);
-
+
if (FilesByUID.size() > HS.header_file_size())
FilesByUID.resize(HS.header_file_size());
@@ -2148,7 +2174,7 @@ void ASTWriter::WriteHeaderSearch(const HeaderSearch &HS) {
llvm::raw_svector_ostream Out(TableData);
// Make sure that no bucket is at offset 0
- endian::Writer<little>(Out).write<uint32_t>(0);
+ endian::write<uint32_t>(Out, 0, little);
BucketOffset = Generator.Emit(Out, GeneratorTrait);
}
@@ -2162,13 +2188,13 @@ void ASTWriter::WriteHeaderSearch(const HeaderSearch &HS) {
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32));
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
unsigned TableAbbrev = Stream.EmitAbbrev(std::move(Abbrev));
-
+
// Write the header search table
RecordData::value_type Record[] = {HEADER_SEARCH_TABLE, BucketOffset,
NumHeaderSearchEntries, TableData.size()};
TableData.append(GeneratorTrait.strings_begin(),GeneratorTrait.strings_end());
Stream.EmitRecordWithBlob(TableAbbrev, Record, TableData);
-
+
// Free all of the strings we had to duplicate.
for (unsigned I = 0, N = SavedStrings.size(); I != N; ++I)
free(const_cast<char *>(SavedStrings[I]));
@@ -2198,7 +2224,7 @@ static void emitBlob(llvm::BitstreamWriter &Stream, StringRef Blob,
Stream.EmitRecordWithBlob(SLocBufferBlobAbbrv, Record, Blob);
}
-/// \brief Writes the block containing the serialized form of the
+/// Writes the block containing the serialized form of the
/// source manager.
///
/// TODO: We should probably use an on-disk hash table (stored in a
@@ -2268,7 +2294,7 @@ void ASTWriter::WriteSourceManagerBlock(SourceManager &SourceMgr,
Record.push_back(InputFileIDs[Content->OrigEntry]);
Record.push_back(File.NumCreatedFIDs);
-
+
FileDeclIDsTy::iterator FDI = FileDeclIDs.find(FID);
if (FDI != FileDeclIDs.end()) {
Record.push_back(FDI->second->FirstDeclIndex);
@@ -2277,9 +2303,9 @@ void ASTWriter::WriteSourceManagerBlock(SourceManager &SourceMgr,
Record.push_back(0);
Record.push_back(0);
}
-
+
Stream.EmitRecordWithAbbrev(SLocFileAbbrv, Record);
-
+
if (Content->BufferOverridden || Content->IsTransient)
EmitBlob = true;
} else {
@@ -2318,6 +2344,7 @@ void ASTWriter::WriteSourceManagerBlock(SourceManager &SourceMgr,
? SourceLocation()
: Expansion.getExpansionLocEnd(),
Record);
+ Record.push_back(Expansion.isExpansionTokenRange());
// Compute the token length for this macro expansion.
unsigned NextOffset = SourceMgr.getNextLocalOffset();
@@ -2420,7 +2447,7 @@ static bool shouldIgnoreMacro(MacroDirective *MD, bool IsModule,
return false;
}
-/// \brief Writes the block containing the serialized form of the
+/// Writes the block containing the serialized form of the
/// preprocessor.
void ASTWriter::WritePreprocessor(const Preprocessor &PP, bool IsModule) {
PreprocessingRecord *PPRec = PP.getPreprocessingRecord();
@@ -2480,8 +2507,8 @@ void ASTWriter::WritePreprocessor(const Preprocessor &PP, bool IsModule) {
MacroIdentifiers.push_back(Id.second);
// Sort the set of macro definitions that need to be serialized by the
// name of the macro, to provide a stable ordering.
- std::sort(MacroIdentifiers.begin(), MacroIdentifiers.end(),
- llvm::less_ptr<IdentifierInfo>());
+ llvm::sort(MacroIdentifiers.begin(), MacroIdentifiers.end(),
+ llvm::less_ptr<IdentifierInfo>());
// Emit the macro directives as a list and associate the offset with the
// identifier they belong to.
@@ -2540,7 +2567,7 @@ void ASTWriter::WritePreprocessor(const Preprocessor &PP, bool IsModule) {
Record.clear();
}
- /// \brief Offsets of each of the macros into the bitstream, indexed by
+ /// Offsets of each of the macros into the bitstream, indexed by
/// the local macro ID
///
/// For each identifier that is associated with a macro, this map
@@ -2640,8 +2667,8 @@ void ASTWriter::WritePreprocessorDetail(PreprocessingRecord &PPRec) {
// If the preprocessor has a preprocessing record, emit it.
unsigned NumPreprocessingRecords = 0;
using namespace llvm;
-
- // Set up the abbreviation for
+
+ // Set up the abbreviation for
unsigned InclusionAbbrev = 0;
{
auto Abbrev = std::make_shared<BitCodeAbbrev>();
@@ -2653,15 +2680,15 @@ void ASTWriter::WritePreprocessorDetail(PreprocessingRecord &PPRec) {
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
InclusionAbbrev = Stream.EmitAbbrev(std::move(Abbrev));
}
-
- unsigned FirstPreprocessorEntityID
- = (Chain ? PPRec.getNumLoadedPreprocessedEntities() : 0)
+
+ unsigned FirstPreprocessorEntityID
+ = (Chain ? PPRec.getNumLoadedPreprocessedEntities() : 0)
+ NUM_PREDEF_PP_ENTITY_IDS;
unsigned NextPreprocessorEntityID = FirstPreprocessorEntityID;
RecordData Record;
for (PreprocessingRecord::iterator E = PPRec.local_begin(),
EEnd = PPRec.local_end();
- E != EEnd;
+ E != EEnd;
(void)++E, ++NumPreprocessingRecords, ++NextPreprocessorEntityID) {
Record.clear();
@@ -2702,7 +2729,7 @@ void ASTWriter::WritePreprocessorDetail(PreprocessingRecord &PPRec) {
Stream.EmitRecordWithBlob(InclusionAbbrev, Record, Buffer);
continue;
}
-
+
llvm_unreachable("Unhandled PreprocessedEntity in ASTWriter");
}
Stream.ExitBlock();
@@ -2726,6 +2753,26 @@ void ASTWriter::WritePreprocessorDetail(PreprocessingRecord &PPRec) {
Stream.EmitRecordWithBlob(PPEOffsetAbbrev, Record,
bytes(PreprocessedEntityOffsets));
}
+
+ // Write the skipped region table for the preprocessing record.
+ ArrayRef<SourceRange> SkippedRanges = PPRec.getSkippedRanges();
+ if (SkippedRanges.size() > 0) {
+ std::vector<PPSkippedRange> SerializedSkippedRanges;
+ SerializedSkippedRanges.reserve(SkippedRanges.size());
+ for (auto const& Range : SkippedRanges)
+ SerializedSkippedRanges.emplace_back(Range);
+
+ using namespace llvm;
+ auto Abbrev = std::make_shared<BitCodeAbbrev>();
+ Abbrev->Add(BitCodeAbbrevOp(PPD_SKIPPED_RANGES));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
+ unsigned PPESkippedRangeAbbrev = Stream.EmitAbbrev(std::move(Abbrev));
+
+ Record.clear();
+ Record.push_back(PPD_SKIPPED_RANGES);
+ Stream.EmitRecordWithBlob(PPESkippedRangeAbbrev, Record,
+ bytes(SerializedSkippedRanges));
+ }
}
unsigned ASTWriter::getLocalOrImportedSubmoduleID(Module *Mod) {
@@ -2755,21 +2802,21 @@ unsigned ASTWriter::getSubmoduleID(Module *Mod) {
return ID;
}
-/// \brief Compute the number of modules within the given tree (including the
+/// Compute the number of modules within the given tree (including the
/// given module).
static unsigned getNumberOfModules(Module *Mod) {
unsigned ChildModules = 0;
for (auto Sub = Mod->submodule_begin(), SubEnd = Mod->submodule_end();
Sub != SubEnd; ++Sub)
ChildModules += getNumberOfModules(*Sub);
-
+
return ChildModules + 1;
}
void ASTWriter::WriteSubmodules(Module *WritingModule) {
// Enter the submodule description block.
Stream.EnterSubblock(SUBMODULE_BLOCK_ID, /*bits for abbreviations*/5);
-
+
// Write the abbreviations needed for the submodules block.
using namespace llvm;
@@ -2786,6 +2833,7 @@ void ASTWriter::WriteSubmodules(Module *WritingModule) {
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // InferExplicit...
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // InferExportWild...
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // ConfigMacrosExh...
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // ModuleMapIsPriv...
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // Name
unsigned DefinitionAbbrev = Stream.EmitAbbrev(std::move(Abbrev));
@@ -2862,7 +2910,7 @@ void ASTWriter::WriteSubmodules(Module *WritingModule) {
getNumberOfModules(WritingModule),
FirstSubmoduleID - NUM_PREDEF_SUBMODULE_IDS};
Stream.EmitRecord(SUBMODULE_METADATA, Record);
-
+
// Write all of the submodules.
std::queue<Module *> Q;
Q.push(WritingModule);
@@ -2890,7 +2938,8 @@ void ASTWriter::WriteSubmodules(Module *WritingModule) {
Mod->InferSubmodules,
Mod->InferExplicitSubmodules,
Mod->InferExportWildcard,
- Mod->ConfigMacrosExhaustive};
+ Mod->ConfigMacrosExhaustive,
+ Mod->ModuleMapIsPrivate};
Stream.EmitRecordWithBlob(DefinitionAbbrev, Record, Mod->Name);
}
@@ -2938,7 +2987,7 @@ void ASTWriter::WriteSubmodules(Module *WritingModule) {
Stream.EmitRecordWithBlob(TopHeaderAbbrev, Record, H->getName());
}
- // Emit the imports.
+ // Emit the imports.
if (!Mod->Imports.empty()) {
RecordData Record;
for (auto *I : Mod->Imports)
@@ -2946,7 +2995,7 @@ void ASTWriter::WriteSubmodules(Module *WritingModule) {
Stream.EmitRecord(SUBMODULE_IMPORTS, Record);
}
- // Emit the exports.
+ // Emit the exports.
if (!Mod->Exports.empty()) {
RecordData Record;
for (const auto &E : Mod->Exports) {
@@ -2996,12 +3045,12 @@ void ASTWriter::WriteSubmodules(Module *WritingModule) {
RecordData::value_type Record[] = {SUBMODULE_EXPORT_AS};
Stream.EmitRecordWithBlob(ExportAsAbbrev, Record, Mod->ExportAsModule);
}
-
+
// Queue up the submodules of this module.
for (auto *M : Mod->submodules())
Q.push(M);
}
-
+
Stream.ExitBlock();
assert((NextSubmoduleID - FirstSubmoduleID ==
@@ -3040,7 +3089,7 @@ void ASTWriter::WritePragmaDiagnosticMappings(const DiagnosticsEngine &Diag,
unsigned &DiagStateID = DiagStateIDMap[State];
Record.push_back(DiagStateID);
-
+
if (DiagStateID == 0) {
DiagStateID = ++CurrID;
@@ -3071,8 +3120,11 @@ void ASTWriter::WritePragmaDiagnosticMappings(const DiagnosticsEngine &Diag,
!FileIDAndFile.second.HasLocalTransitions)
continue;
++NumLocations;
- AddSourceLocation(Diag.SourceMgr->getLocForStartOfFile(FileIDAndFile.first),
- Record);
+
+ SourceLocation Loc = Diag.SourceMgr->getComposedLoc(FileIDAndFile.first, 0);
+ assert(!Loc.isInvalid() && "start loc for valid FileID is invalid");
+ AddSourceLocation(Loc, Record);
+
Record.push_back(FileIDAndFile.second.StateTransitions.size());
for (auto &StatePoint : FileIDAndFile.second.StateTransitions) {
Record.push_back(StatePoint.Offset);
@@ -3099,7 +3151,7 @@ void ASTWriter::WritePragmaDiagnosticMappings(const DiagnosticsEngine &Diag,
// Type Serialization
//===----------------------------------------------------------------------===//
-/// \brief Write the representation of a type to the AST stream.
+/// Write the representation of a type to the AST stream.
void ASTWriter::WriteType(QualType T) {
TypeIdx &IdxRef = TypeIdxs[T];
if (IdxRef.getIndex() == 0) // we haven't seen this type before.
@@ -3131,11 +3183,11 @@ void ASTWriter::WriteType(QualType T) {
// Declaration Serialization
//===----------------------------------------------------------------------===//
-/// \brief Write the block containing all of the declaration IDs
+/// Write the block containing all of the declaration IDs
/// lexically declared within the given DeclContext.
///
/// \returns the offset of the DECL_CONTEXT_LEXICAL block within the
-/// bistream, or 0 if no block was written.
+/// bitstream, or 0 if no block was written.
uint64_t ASTWriter::WriteDeclContextLexicalBlock(ASTContext &Context,
DeclContext *DC) {
if (DC->decls_empty())
@@ -3190,8 +3242,8 @@ void ASTWriter::WriteFileDeclIDsMap() {
SmallVector<std::pair<FileID, DeclIDInFileInfo *>, 64> SortedFileDeclIDs(
FileDeclIDs.begin(), FileDeclIDs.end());
- std::sort(SortedFileDeclIDs.begin(), SortedFileDeclIDs.end(),
- llvm::less_first());
+ llvm::sort(SortedFileDeclIDs.begin(), SortedFileDeclIDs.end(),
+ llvm::less_first());
// Join the vectors of DeclIDs from all files.
SmallVector<DeclID, 256> FileGroupedDeclIDs;
@@ -3214,6 +3266,9 @@ void ASTWriter::WriteFileDeclIDsMap() {
void ASTWriter::WriteComments() {
Stream.EnterSubblock(COMMENTS_BLOCK_ID, 3);
+ auto _ = llvm::make_scope_exit([this] { Stream.ExitBlock(); });
+ if (!PP->getPreprocessorOpts().WriteCommentListToPCH)
+ return;
ArrayRef<RawComment *> RawComments = Context->Comments.getComments();
RecordData Record;
for (const auto *I : RawComments) {
@@ -3224,7 +3279,6 @@ void ASTWriter::WriteComments() {
Record.push_back(I->isAlmostTrailingComment());
Stream.EmitRecord(COMMENTS_RAW_COMMENT, Record);
}
- Stream.ExitBlock();
}
//===----------------------------------------------------------------------===//
@@ -3261,7 +3315,7 @@ public:
data_type_ref Methods) {
using namespace llvm::support;
- endian::Writer<little> LE(Out);
+ endian::Writer LE(Out, little);
unsigned KeyLen = 2 + (Sel.getNumArgs()? Sel.getNumArgs() * 4 : 4);
LE.write<uint16_t>(KeyLen);
unsigned DataLen = 4 + 2 + 2; // 2 bytes for each of the method counts
@@ -3280,7 +3334,7 @@ public:
void EmitKey(raw_ostream& Out, Selector Sel, unsigned) {
using namespace llvm::support;
- endian::Writer<little> LE(Out);
+ endian::Writer LE(Out, little);
uint64_t Start = Out.tell();
assert((Start >> 32) == 0 && "Selector key offset too large");
Writer.SetSelectorOffset(Sel, Start);
@@ -3297,7 +3351,7 @@ public:
data_type_ref Methods, unsigned DataLen) {
using namespace llvm::support;
- endian::Writer<little> LE(Out);
+ endian::Writer LE(Out, little);
uint64_t Start = Out.tell(); (void)Start;
LE.write<uint32_t>(Methods.ID);
unsigned NumInstanceMethods = 0;
@@ -3343,7 +3397,7 @@ public:
} // namespace
-/// \brief Write ObjC data: selectors and the method pool.
+/// Write ObjC data: selectors and the method pool.
///
/// The method pool contains both instance and factory methods, stored
/// in an on-disk hash table indexed by the selector. The hash table also
@@ -3409,7 +3463,7 @@ void ASTWriter::WriteSelectors(Sema &SemaRef) {
ASTMethodPoolTrait Trait(*this);
llvm::raw_svector_ostream Out(MethodPool);
// Make sure that no bucket is at offset 0
- endian::Writer<little>(Out).write<uint32_t>(0);
+ endian::write<uint32_t>(Out, 0, little);
BucketOffset = Generator.Emit(Out, Trait);
}
@@ -3447,7 +3501,7 @@ void ASTWriter::WriteSelectors(Sema &SemaRef) {
}
}
-/// \brief Write the selectors referenced in @selector expression into AST file.
+/// Write the selectors referenced in @selector expression into AST file.
void ASTWriter::WriteReferencedSelectorsPool(Sema &SemaRef) {
using namespace llvm;
@@ -3522,8 +3576,8 @@ class ASTIdentifierTableTrait {
bool IsModule;
bool NeedDecls;
ASTWriter::RecordData *InterestingIdentifierOffsets;
-
- /// \brief Determines whether this is an "interesting" identifier that needs a
+
+ /// Determines whether this is an "interesting" identifier that needs a
/// full IdentifierInfo structure written into the hash table. Notably, this
/// doesn't check whether the name has macros defined; use PublicMacroIterator
/// to check that.
@@ -3558,7 +3612,7 @@ public:
bool needDecls() const { return NeedDecls; }
static hash_value_type ComputeHash(const IdentifierInfo* II) {
- return llvm::HashString(II->getName());
+ return llvm::djbHash(II->getName());
}
bool isInterestingIdentifier(const IdentifierInfo *II) {
@@ -3591,7 +3645,7 @@ public:
using namespace llvm::support;
- endian::Writer<little> LE(Out);
+ endian::Writer LE(Out, little);
assert((uint16_t)DataLen == DataLen && (uint16_t)KeyLen == KeyLen);
LE.write<uint16_t>(DataLen);
@@ -3620,7 +3674,7 @@ public:
IdentID ID, unsigned) {
using namespace llvm::support;
- endian::Writer<little> LE(Out);
+ endian::Writer LE(Out, little);
auto MacroOffset = Writer.getMacroDirectivesOffset(II);
if (!isInterestingIdentifier(II, MacroOffset)) {
@@ -3665,12 +3719,12 @@ public:
} // namespace
-/// \brief Write the identifier table into the AST file.
+/// Write the identifier table into the AST file.
///
/// The identifier table consists of a blob containing string data
/// (the actual identifiers themselves) and a separate "offsets" index
/// that maps identifier IDs to locations within the blob.
-void ASTWriter::WriteIdentifierTable(Preprocessor &PP,
+void ASTWriter::WriteIdentifierTable(Preprocessor &PP,
IdentifierResolver &IdResolver,
bool IsModule) {
using namespace llvm;
@@ -3695,7 +3749,7 @@ void ASTWriter::WriteIdentifierTable(Preprocessor &PP,
IIs.push_back(ID.second);
// Sort the identifiers lexicographically before getting them references so
// that their order is stable.
- std::sort(IIs.begin(), IIs.end(), llvm::less_ptr<IdentifierInfo>());
+ llvm::sort(IIs.begin(), IIs.end(), llvm::less_ptr<IdentifierInfo>());
for (const IdentifierInfo *II : IIs)
if (Trait.isInterestingNonMacroIdentifier(II))
getIdentifierRef(II);
@@ -3724,7 +3778,7 @@ void ASTWriter::WriteIdentifierTable(Preprocessor &PP,
llvm::raw_svector_ostream Out(IdentifierTable);
// Make sure that no bucket is at offset 0
- endian::Writer<little>(Out).write<uint32_t>(0);
+ endian::write<uint32_t>(Out, 0, little);
BucketOffset = Generator.Emit(Out, Trait);
}
@@ -3820,8 +3874,7 @@ public:
using namespace llvm::support;
- endian::Writer<little>(Out)
- .write<uint32_t>(Writer.getChain()->getModuleFileID(F));
+ endian::write<uint32_t>(Out, Writer.getChain()->getModuleFileID(F), little);
}
std::pair<unsigned, unsigned> EmitKeyDataLength(raw_ostream &Out,
@@ -3829,7 +3882,7 @@ public:
data_type_ref Lookup) {
using namespace llvm::support;
- endian::Writer<little> LE(Out);
+ endian::Writer LE(Out, little);
unsigned KeyLen = 1;
switch (Name.getKind()) {
case DeclarationName::Identifier:
@@ -3863,7 +3916,7 @@ public:
void EmitKey(raw_ostream &Out, DeclarationNameKey Name, unsigned) {
using namespace llvm::support;
- endian::Writer<little> LE(Out);
+ endian::Writer LE(Out, little);
LE.write<uint8_t>(Name.getKind());
switch (Name.getKind()) {
case DeclarationName::Identifier:
@@ -3895,7 +3948,7 @@ public:
unsigned DataLen) {
using namespace llvm::support;
- endian::Writer<little> LE(Out);
+ endian::Writer LE(Out, little);
uint64_t Start = Out.tell(); (void)Start;
for (unsigned I = Lookup.first, N = Lookup.second; I != N; ++I)
LE.write<uint32_t>(DeclIDs[I]);
@@ -3993,7 +4046,7 @@ ASTWriter::GenerateNameLookupTable(const DeclContext *ConstDC,
}
// Sort the names into a stable order.
- std::sort(Names.begin(), Names.end());
+ llvm::sort(Names.begin(), Names.end());
if (auto *D = dyn_cast<CXXRecordDecl>(DC)) {
// We need to establish an ordering of constructor and conversion function
@@ -4096,7 +4149,7 @@ ASTWriter::GenerateNameLookupTable(const DeclContext *ConstDC,
Generator.emit(LookupTable, Trait, Lookups ? &Lookups->Table : nullptr);
}
-/// \brief Write the block containing all of the declaration IDs
+/// Write the block containing all of the declaration IDs
/// visible from the given DeclContext.
///
/// \returns the offset of the DECL_CONTEXT_VISIBLE block within the
@@ -4130,7 +4183,7 @@ uint64_t ASTWriter::WriteDeclContextVisibleBlock(ASTContext &Context,
std::make_pair(Entry.first, Entry.second.getLookupResult()));
}
- std::sort(LookupResults.begin(), LookupResults.end(), llvm::less_first());
+ llvm::sort(LookupResults.begin(), LookupResults.end(), llvm::less_first());
for (auto &NameAndResult : LookupResults) {
DeclarationName Name = NameAndResult.first;
DeclContext::lookup_result Result = NameAndResult.second;
@@ -4186,7 +4239,7 @@ uint64_t ASTWriter::WriteDeclContextVisibleBlock(ASTContext &Context,
return Offset;
}
-/// \brief Write an UPDATE_VISIBLE block for the given context.
+/// Write an UPDATE_VISIBLE block for the given context.
///
/// UPDATE_VISIBLE blocks contain the declarations that are added to an existing
/// DeclContext in a dependent AST file. As such, they only exist for the TU
@@ -4211,13 +4264,13 @@ void ASTWriter::WriteDeclContextVisibleUpdate(const DeclContext *DC) {
Stream.EmitRecordWithBlob(UpdateVisibleAbbrev, Record, LookupTable);
}
-/// \brief Write an FP_PRAGMA_OPTIONS block for the given FPOptions.
+/// Write an FP_PRAGMA_OPTIONS block for the given FPOptions.
void ASTWriter::WriteFPPragmaOptions(const FPOptions &Opts) {
RecordData::value_type Record[] = {Opts.getInt()};
Stream.EmitRecord(FP_PRAGMA_OPTIONS, Record);
}
-/// \brief Write an OPENCL_EXTENSIONS block for the given OpenCLOptions.
+/// Write an OPENCL_EXTENSIONS block for the given OpenCLOptions.
void ASTWriter::WriteOpenCLExtensions(Sema &SemaRef) {
if (!SemaRef.Context.getLangOpts().OpenCL)
return;
@@ -4274,16 +4327,16 @@ void ASTWriter::WriteCUDAPragmas(Sema &SemaRef) {
void ASTWriter::WriteObjCCategories() {
SmallVector<ObjCCategoriesInfo, 2> CategoriesMap;
RecordData Categories;
-
+
for (unsigned I = 0, N = ObjCClassesWithCategories.size(); I != N; ++I) {
unsigned Size = 0;
unsigned StartIndex = Categories.size();
-
+
ObjCInterfaceDecl *Class = ObjCClassesWithCategories[I];
-
+
// Allocate space for the size.
Categories.push_back(0);
-
+
// Add the categories.
for (ObjCInterfaceDecl::known_categories_iterator
Cat = Class->known_categories_begin(),
@@ -4292,10 +4345,10 @@ void ASTWriter::WriteObjCCategories() {
assert(getDeclID(*Cat) != 0 && "Bogus category");
AddDeclRef(*Cat, Categories);
}
-
+
// Update the size.
Categories[StartIndex] = Size;
-
+
// Record this interface -> category map.
ObjCCategoriesInfo CatInfo = { getDeclID(Class), StartIndex };
CategoriesMap.push_back(CatInfo);
@@ -4344,7 +4397,7 @@ void ASTWriter::WriteLateParsedTemplates(Sema &SemaRef) {
Stream.EmitRecord(LATE_PARSED_TEMPLATE, Record);
}
-/// \brief Write the state of 'pragma clang optimize' at the end of the module.
+/// Write the state of 'pragma clang optimize' at the end of the module.
void ASTWriter::WriteOptimizePragmaOptions(Sema &SemaRef) {
RecordData Record;
SourceLocation PragmaLoc = SemaRef.getOptimizeOffPragmaLocation();
@@ -4352,14 +4405,14 @@ void ASTWriter::WriteOptimizePragmaOptions(Sema &SemaRef) {
Stream.EmitRecord(OPTIMIZE_PRAGMA_OPTIONS, Record);
}
-/// \brief Write the state of 'pragma ms_struct' at the end of the module.
+/// Write the state of 'pragma ms_struct' at the end of the module.
void ASTWriter::WriteMSStructPragmaOptions(Sema &SemaRef) {
RecordData Record;
Record.push_back(SemaRef.MSStructPragmaOn ? PMSST_ON : PMSST_OFF);
Stream.EmitRecord(MSSTRUCT_PRAGMA_OPTIONS, Record);
}
-/// \brief Write the state of 'pragma pointers_to_members' at the end of the
+/// Write the state of 'pragma pointers_to_members' at the end of the
//module.
void ASTWriter::WriteMSPointersToMembersPragmaOptions(Sema &SemaRef) {
RecordData Record;
@@ -4368,7 +4421,7 @@ void ASTWriter::WriteMSPointersToMembersPragmaOptions(Sema &SemaRef) {
Stream.EmitRecord(POINTERS_TO_MEMBERS_PRAGMA_OPTIONS, Record);
}
-/// \brief Write the state of 'pragma pack' at the end of the module.
+/// Write the state of 'pragma pack' at the end of the module.
void ASTWriter::WritePackPragmaOptions(Sema &SemaRef) {
// Don't serialize pragma pack state for modules, since it should only take
// effect on a per-submodule basis.
@@ -4427,7 +4480,7 @@ void ASTWriter::WriteModuleFileExtension(Sema &SemaRef,
// General Serialization Routines
//===----------------------------------------------------------------------===//
-/// \brief Emit the list of attributes to the specified record.
+/// Emit the list of attributes to the specified record.
void ASTRecordWriter::AddAttributes(ArrayRef<const Attr *> Attrs) {
auto &Record = *this;
Record.push_back(Attrs.size());
@@ -4501,7 +4554,7 @@ void ASTWriter::AddVersionTuple(const VersionTuple &Version,
Record.push_back(0);
}
-/// \brief Note that the identifier II occurs at the given offset
+/// Note that the identifier II occurs at the given offset
/// within the identifier table.
void ASTWriter::SetIdentifierOffset(const IdentifierInfo *II, uint32_t Offset) {
IdentID ID = IdentifierIDs[II];
@@ -4511,7 +4564,7 @@ void ASTWriter::SetIdentifierOffset(const IdentifierInfo *II, uint32_t Offset) {
IdentifierOffsets[ID - FirstIdentID] = Offset;
}
-/// \brief Note that the selector Sel occurs at the given offset
+/// Note that the selector Sel occurs at the given offset
/// within the method pool/selector table.
void ASTWriter::SetSelectorOffset(Selector Sel, uint32_t Offset) {
unsigned ID = SelectorIDs[Sel];
@@ -4555,7 +4608,7 @@ ASTFileSignature ASTWriter::WriteAST(Sema &SemaRef,
WritingAST = true;
ASTHasCompilerErrors = hasErrors;
-
+
// Emit the file header.
Stream.Emit((unsigned)'C', 8);
Stream.Emit((unsigned)'P', 8);
@@ -4603,7 +4656,7 @@ ASTFileSignature ASTWriter::WriteASTCore(Sema &SemaRef, StringRef isysroot,
// Make sure that the AST reader knows to finalize itself.
if (Chain)
Chain->finalizeForWriting();
-
+
ASTContext &Context = SemaRef.Context;
Preprocessor &PP = SemaRef.PP;
@@ -4644,7 +4697,7 @@ ASTFileSignature ASTWriter::WriteASTCore(Sema &SemaRef, StringRef isysroot,
// headers.
RecordData TentativeDefinitions;
AddLazyVectorDecls(*this, SemaRef.TentativeDefinitions, TentativeDefinitions);
-
+
// Build a record containing all of the file scoped decls in this file.
RecordData UnusedFileScopedDecls;
if (!isModule)
@@ -4732,13 +4785,15 @@ ASTFileSignature ASTWriter::WriteASTCore(Sema &SemaRef, StringRef isysroot,
// analyze later in AST.
RecordData DeleteExprsToAnalyze;
- for (const auto &DeleteExprsInfo :
- SemaRef.getMismatchingDeleteExpressions()) {
- AddDeclRef(DeleteExprsInfo.first, DeleteExprsToAnalyze);
- DeleteExprsToAnalyze.push_back(DeleteExprsInfo.second.size());
- for (const auto &DeleteLoc : DeleteExprsInfo.second) {
- AddSourceLocation(DeleteLoc.first, DeleteExprsToAnalyze);
- DeleteExprsToAnalyze.push_back(DeleteLoc.second);
+ if (!isModule) {
+ for (const auto &DeleteExprsInfo :
+ SemaRef.getMismatchingDeleteExpressions()) {
+ AddDeclRef(DeleteExprsInfo.first, DeleteExprsToAnalyze);
+ DeleteExprsToAnalyze.push_back(DeleteExprsInfo.second.size());
+ for (const auto &DeleteLoc : DeleteExprsInfo.second) {
+ AddSourceLocation(DeleteLoc.first, DeleteExprsToAnalyze);
+ DeleteExprsToAnalyze.push_back(DeleteLoc.second);
+ }
}
}
@@ -4765,7 +4820,7 @@ ASTFileSignature ASTWriter::WriteASTCore(Sema &SemaRef, StringRef isysroot,
NewGlobalKindDeclPairs.push_back(GetDeclRef(D));
}
}
-
+
auto Abv = std::make_shared<BitCodeAbbrev>();
Abv->Add(llvm::BitCodeAbbrevOp(TU_UPDATE_LEXICAL));
Abv->Add(llvm::BitCodeAbbrevOp(llvm::BitCodeAbbrevOp::Blob));
@@ -4787,7 +4842,7 @@ ASTFileSignature ASTWriter::WriteASTCore(Sema &SemaRef, StringRef isysroot,
// If we have any extern "C" names, write out a visible update for them.
if (Context.ExternCContext)
WriteDeclContextVisibleUpdate(Context.ExternCContext);
-
+
// If the translation unit has an anonymous namespace, and we don't already
// have an update block for it, write it as an update block.
// FIXME: Why do we not do this if there's already an update block?
@@ -4826,7 +4881,7 @@ ASTFileSignature ASTWriter::WriteASTCore(Sema &SemaRef, StringRef isysroot,
IIs.push_back(II);
}
// Sort the identifiers to visit based on their name.
- std::sort(IIs.begin(), IIs.end(), llvm::less_ptr<IdentifierInfo>());
+ llvm::sort(IIs.begin(), IIs.end(), llvm::less_ptr<IdentifierInfo>());
for (const IdentifierInfo *II : IIs) {
for (IdentifierResolver::iterator D = SemaRef.IdResolver.begin(II),
DEnd = SemaRef.IdResolver.end();
@@ -4876,7 +4931,7 @@ ASTFileSignature ASTWriter::WriteASTCore(Sema &SemaRef, StringRef isysroot,
// declaration-id:i32
// c++-base-specifiers-id:i32
// type-id:i32)
- //
+ //
// module-kind is the ModuleKind enum value. If it is MK_PrebuiltModule or
// MK_ExplicitModule, then the module-name is the module name. Otherwise,
// it is the module file name.
@@ -4890,7 +4945,7 @@ ASTFileSignature ASTWriter::WriteASTCore(Sema &SemaRef, StringRef isysroot,
for (ModuleFile &M : Chain->ModuleMgr) {
using namespace llvm::support;
- endian::Writer<little> LE(Out);
+ endian::Writer LE(Out, little);
LE.write<uint8_t>(static_cast<uint8_t>(M.Kind));
StringRef Name =
M.Kind == MK_PrebuiltModule || M.Kind == MK_ExplicitModule
@@ -4970,7 +5025,7 @@ ASTFileSignature ASTWriter::WriteASTCore(Sema &SemaRef, StringRef isysroot,
WriteOpenCLExtensionDecls(SemaRef);
WriteCUDAPragmas(SemaRef);
- // If we're emitting a module, write out the submodule information.
+ // If we're emitting a module, write out the submodule information.
if (WritingModule)
WriteSubmodules(WritingModule);
@@ -5020,7 +5075,7 @@ ASTFileSignature ASTWriter::WriteASTCore(Sema &SemaRef, StringRef isysroot,
// Write the record containing CUDA-specific declaration references.
if (!CUDASpecialDeclRefs.empty())
Stream.EmitRecord(CUDA_SPECIAL_DECL_REFS, CUDASpecialDeclRefs);
-
+
// Write the delegating constructors.
if (!DelegatingCtorDecls.empty())
Stream.EmitRecord(DELEGATING_CTORS, DelegatingCtorDecls);
@@ -5063,7 +5118,7 @@ ASTFileSignature ASTWriter::WriteASTCore(Sema &SemaRef, StringRef isysroot,
};
// Sort and deduplicate module IDs.
- std::sort(Imports.begin(), Imports.end(), Cmp);
+ llvm::sort(Imports.begin(), Imports.end(), Cmp);
Imports.erase(std::unique(Imports.begin(), Imports.end(), Eq),
Imports.end());
@@ -5167,6 +5222,8 @@ void ASTWriter::WriteDeclUpdatesBlocks(RecordDataImpl &OffsetsRecord) {
case UPD_CXX_INSTANTIATED_CLASS_DEFINITION: {
auto *RD = cast<CXXRecordDecl>(D);
UpdatedDeclContexts.insert(RD->getPrimaryContext());
+ Record.push_back(RD->isParamDestroyedInCallee());
+ Record.push_back(RD->getArgPassingRestrictions());
Record.AddCXXDefinitionData(RD);
Record.AddOffset(WriteDeclContextLexicalBlock(
*Context, const_cast<CXXRecordDecl *>(RD)));
@@ -5323,7 +5380,7 @@ MacroID ASTWriter::getMacroRef(MacroInfo *MI, const IdentifierInfo *Name) {
MacroID ASTWriter::getMacroID(MacroInfo *MI) {
if (!MI || MI->isBuiltinMacro())
return 0;
-
+
assert(MacroIDs.find(MI) != MacroIDs.end() && "Macro not emitted!");
return MacroIDs[MI];
}
@@ -5406,12 +5463,11 @@ void ASTRecordWriter::AddTypeSourceInfo(TypeSourceInfo *TInfo) {
return;
}
+ AddTypeRef(TInfo->getType());
AddTypeLoc(TInfo->getTypeLoc());
}
void ASTRecordWriter::AddTypeLoc(TypeLoc TL) {
- AddTypeRef(TL.getType());
-
TypeLocWriter TLW(*this);
for (; !TL.isNull(); TL = TL.getNextTypeLoc())
TLW.Visit(TL);
@@ -5467,12 +5523,12 @@ DeclID ASTWriter::GetDeclRef(const Decl *D) {
if (!D) {
return 0;
}
-
+
// If D comes from an AST file, its declaration ID is already known and
// fixed.
if (D->isFromASTFile())
return D->getGlobalID();
-
+
assert(!(reinterpret_cast<uintptr_t>(D) & 0x01) && "Invalid decl pointer");
DeclID &ID = DeclIDs[D];
if (ID == 0) {
@@ -5516,7 +5572,9 @@ void ASTWriter::associateDeclWithFile(const Decl *D, DeclID ID) {
return;
// FIXME: ParmVarDecls that are part of a function type of a parameter of
// a function/objc method, should not have TU as lexical context.
- if (isa<ParmVarDecl>(D))
+ // TemplateTemplateParmDecls that are part of an alias template, should not
+ // have TU as lexical context.
+ if (isa<ParmVarDecl>(D) || isa<TemplateTemplateParmDecl>(D))
return;
SourceManager &SM = Context->getSourceManager();
@@ -5734,6 +5792,7 @@ void ASTRecordWriter::AddNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS) {
case NestedNameSpecifier::TypeSpec:
case NestedNameSpecifier::TypeSpecWithTemplate:
Record->push_back(Kind == NestedNameSpecifier::TypeSpecWithTemplate);
+ AddTypeRef(NNS.getTypeLoc().getType());
AddTypeLoc(NNS.getTypeLoc());
AddSourceLocation(NNS.getLocalSourceRange().getEnd());
break;
@@ -5792,7 +5851,7 @@ void ASTRecordWriter::AddTemplateName(TemplateName Name) {
AddTemplateName(subst->getReplacement());
break;
}
-
+
case TemplateName::SubstTemplateTemplateParmPack: {
SubstTemplateTemplateParmPackStorage *SubstPack
= Name.getAsSubstTemplateTemplateParmPack();
@@ -5855,7 +5914,7 @@ void ASTRecordWriter::AddTemplateParameterList(
AddDeclRef(P);
}
-/// \brief Emit a template argument list.
+/// Emit a template argument list.
void ASTRecordWriter::AddTemplateArgumentList(
const TemplateArgumentList *TemplateArgs) {
assert(TemplateArgs && "No TemplateArgs!");
@@ -5892,7 +5951,7 @@ void ASTRecordWriter::AddCXXBaseSpecifier(const CXXBaseSpecifier &Base) {
Record->push_back(Base.getInheritConstructors());
AddTypeSourceInfo(Base.getTypeSourceInfo());
AddSourceRange(Base.getSourceRange());
- AddSourceLocation(Base.isPackExpansion()? Base.getEllipsisLoc()
+ AddSourceLocation(Base.isPackExpansion()? Base.getEllipsisLoc()
: SourceLocation());
}
@@ -5965,7 +6024,9 @@ void ASTRecordWriter::AddCXXDefinitionData(const CXXRecordDecl *D) {
Record->push_back(Data.Polymorphic);
Record->push_back(Data.Abstract);
Record->push_back(Data.IsStandardLayout);
- Record->push_back(Data.HasNoNonEmptyBases);
+ Record->push_back(Data.IsCXX11StandardLayout);
+ Record->push_back(Data.HasBasesWithFields);
+ Record->push_back(Data.HasBasesWithNonStaticDataMembers);
Record->push_back(Data.HasPrivateFields);
Record->push_back(Data.HasProtectedFields);
Record->push_back(Data.HasPublicFields);
@@ -5986,11 +6047,12 @@ void ASTRecordWriter::AddCXXDefinitionData(const CXXRecordDecl *D) {
Record->push_back(Data.DefaultedMoveAssignmentIsDeleted);
Record->push_back(Data.DefaultedDestructorIsDeleted);
Record->push_back(Data.HasTrivialSpecialMembers);
+ Record->push_back(Data.HasTrivialSpecialMembersForCall);
Record->push_back(Data.DeclaredNonTrivialSpecialMembers);
+ Record->push_back(Data.DeclaredNonTrivialSpecialMembersForCall);
Record->push_back(Data.HasIrrelevantDestructor);
Record->push_back(Data.HasConstexprNonCopyMoveConstructor);
Record->push_back(Data.HasDefaultedDefaultConstructor);
- Record->push_back(Data.CanPassInRegisters);
Record->push_back(Data.DefaultedDefaultConstructorIsConstexpr);
Record->push_back(Data.HasConstexprDefaultConstructor);
Record->push_back(Data.HasNonLiteralTypeFieldsOrBases);
@@ -6024,9 +6086,9 @@ void ASTRecordWriter::AddCXXDefinitionData(const CXXRecordDecl *D) {
AddUnresolvedSet(Data.Conversions.get(*Writer->Context));
AddUnresolvedSet(Data.VisibleConversions.get(*Writer->Context));
- // Data.Definition is the owning decl, no need to write it.
+ // Data.Definition is the owning decl, no need to write it.
AddDeclRef(D->getFirstFriend());
-
+
// Add lambda-specific data.
if (Data.IsLambda) {
auto &Lambda = D->getLambdaData();
@@ -6319,7 +6381,7 @@ void ASTWriter::AddedObjCCategoryToInterface(const ObjCCategoryDecl *CatD,
assert(!WritingAST && "Already writing the AST!");
if (!IFD->isFromASTFile())
return; // Declaration not imported from PCH.
-
+
assert(IFD->getDefinition() && "Category on a class without a definition?");
ObjCClassesWithCategories.insert(
const_cast<ObjCInterfaceDecl *>(IFD->getDefinition()));
diff --git a/lib/Serialization/ASTWriterDecl.cpp b/lib/Serialization/ASTWriterDecl.cpp
index bb72a3b383ea..77e578f6bc57 100644
--- a/lib/Serialization/ASTWriterDecl.cpp
+++ b/lib/Serialization/ASTWriterDecl.cpp
@@ -17,6 +17,7 @@
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/DeclVisitor.h"
#include "clang/AST/Expr.h"
+#include "clang/AST/PrettyDeclStackTrace.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Serialization/ASTReader.h"
#include "clang/Serialization/ASTWriter.h"
@@ -264,7 +265,8 @@ void ASTDeclWriter::Visit(Decl *D) {
// abbreviation infrastructure requires that arrays are encoded last, so
// we handle it here in the case of those classes derived from DeclaratorDecl
if (DeclaratorDecl *DD = dyn_cast<DeclaratorDecl>(D)) {
- Record.AddTypeSourceInfo(DD->getTypeSourceInfo());
+ if (auto *TInfo = DD->getTypeSourceInfo())
+ Record.AddTypeLoc(TInfo->getTypeLoc());
}
// Handle FunctionDecl's body here and write it after all other Stmts/Exprs
@@ -429,6 +431,8 @@ void ASTDeclWriter::VisitEnumDecl(EnumDecl *D) {
Record.push_back(D->isScoped());
Record.push_back(D->isScopedUsingClassTag());
Record.push_back(D->isFixed());
+ Record.push_back(D->getODRHash());
+
if (MemberSpecializationInfo *MemberInfo = D->getMemberSpecializationInfo()) {
Record.AddDeclRef(MemberInfo->getInstantiatedFrom());
Record.push_back(MemberInfo->getTemplateSpecializationKind());
@@ -465,6 +469,11 @@ void ASTDeclWriter::VisitRecordDecl(RecordDecl *D) {
Record.push_back(D->isAnonymousStructOrUnion());
Record.push_back(D->hasObjectMember());
Record.push_back(D->hasVolatileMember());
+ Record.push_back(D->isNonTrivialToPrimitiveDefaultInitialize());
+ Record.push_back(D->isNonTrivialToPrimitiveCopy());
+ Record.push_back(D->isNonTrivialToPrimitiveDestroy());
+ Record.push_back(D->isParamDestroyedInCallee());
+ Record.push_back(D->getArgPassingRestrictions());
if (D->getDeclContext() == D->getLexicalDeclContext() &&
!D->hasAttrs() &&
@@ -507,6 +516,9 @@ void ASTDeclWriter::VisitDeclaratorDecl(DeclaratorDecl *D) {
Record.push_back(D->hasExtInfo());
if (D->hasExtInfo())
Record.AddQualifierInfo(*D->getExtInfo());
+ // The location information is deferred until the end of the record.
+ Record.AddTypeRef(D->getTypeSourceInfo() ? D->getTypeSourceInfo()->getType()
+ : QualType());
}
void ASTDeclWriter::VisitFunctionDecl(FunctionDecl *D) {
@@ -528,12 +540,14 @@ void ASTDeclWriter::VisitFunctionDecl(FunctionDecl *D) {
Record.push_back(D->HasWrittenPrototype);
Record.push_back(D->IsDeleted);
Record.push_back(D->IsTrivial);
+ Record.push_back(D->IsTrivialForCall);
Record.push_back(D->IsDefaulted);
Record.push_back(D->IsExplicitlyDefaulted);
Record.push_back(D->HasImplicitReturnZero);
Record.push_back(D->IsConstexpr);
Record.push_back(D->UsesSEHTry);
Record.push_back(D->HasSkippedBody);
+ Record.push_back(D->IsMultiVersion);
Record.push_back(D->IsLateTemplateParsed);
Record.push_back(D->getLinkageInternal());
Record.AddSourceLocation(D->getLocEnd());
@@ -912,6 +926,7 @@ void ASTDeclWriter::VisitVarDecl(VarDecl *D) {
Record.push_back(D->isExceptionVariable());
Record.push_back(D->isNRVOVariable());
Record.push_back(D->isCXXForRangeDecl());
+ Record.push_back(D->isObjCForDecl());
Record.push_back(D->isARCPseudoStrong());
Record.push_back(D->isInline());
Record.push_back(D->isInlineSpecified());
@@ -1192,6 +1207,7 @@ void ASTDeclWriter::VisitUsingShadowDecl(UsingShadowDecl *D) {
VisitRedeclarable(D);
VisitNamedDecl(D);
Record.AddDeclRef(D->getTargetDecl());
+ Record.push_back(D->getIdentifierNamespace());
Record.AddDeclRef(D->UsingOrNextShadow);
Record.AddDeclRef(Context.getInstantiatedFromUsingShadowDecl(D));
Code = serialization::DECL_USING_SHADOW;
@@ -1625,7 +1641,7 @@ void ASTDeclWriter::VisitStaticAssertDecl(StaticAssertDecl *D) {
Code = serialization::DECL_STATIC_ASSERT;
}
-/// \brief Emit the DeclContext part of a declaration context decl.
+/// Emit the DeclContext part of a declaration context decl.
void ASTDeclWriter::VisitDeclContext(DeclContext *DC) {
Record.AddOffset(Writer.WriteDeclContextLexicalBlock(Context, DC));
Record.AddOffset(Writer.WriteDeclContextVisibleBlock(Context, DC));
@@ -1763,11 +1779,11 @@ void ASTWriter::WriteDeclAbbrevs() {
// DeclaratorDecl
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // InnerStartLoc
Abv->Add(BitCodeAbbrevOp(0)); // hasExtInfo
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // TSIType
// FieldDecl
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isMutable
Abv->Add(BitCodeAbbrevOp(0)); // InitStyle
// Type Source Info
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6));
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // TypeLoc
DeclFieldAbbrev = Stream.EmitAbbrev(std::move(Abv));
@@ -1796,6 +1812,7 @@ void ASTWriter::WriteDeclAbbrevs() {
// DeclaratorDecl
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // InnerStartLoc
Abv->Add(BitCodeAbbrevOp(0)); // hasExtInfo
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // TSIType
// FieldDecl
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isMutable
Abv->Add(BitCodeAbbrevOp(0)); // InitStyle
@@ -1803,7 +1820,6 @@ void ASTWriter::WriteDeclAbbrevs() {
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // getAccessControl
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // getSynthesize
// Type Source Info
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6));
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // TypeLoc
DeclObjCIvarAbbrev = Stream.EmitAbbrev(std::move(Abv));
@@ -1851,6 +1867,7 @@ void ASTWriter::WriteDeclAbbrevs() {
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isScoped
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isScopedUsingClassTag
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isFixed
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32));// ODRHash
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // InstantiatedMembEnum
// DC
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // LexicalOffset
@@ -1896,6 +1913,18 @@ void ASTWriter::WriteDeclAbbrevs() {
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // AnonymousStructUnion
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // hasObjectMember
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // hasVolatileMember
+
+ // isNonTrivialToPrimitiveDefaultInitialize
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1));
+ // isNonTrivialToPrimitiveCopy
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1));
+ // isNonTrivialToPrimitiveDestroy
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1));
+ // isParamDestroyedInCallee
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1));
+ // getArgPassingRestrictions
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 2));
+
// DC
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // LexicalOffset
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // VisibleOffset
@@ -1927,6 +1956,7 @@ void ASTWriter::WriteDeclAbbrevs() {
// DeclaratorDecl
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // InnerStartLoc
Abv->Add(BitCodeAbbrevOp(0)); // hasExtInfo
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // TSIType
// VarDecl
Abv->Add(BitCodeAbbrevOp(0)); // SClass
Abv->Add(BitCodeAbbrevOp(0)); // TSCSpec
@@ -1943,7 +1973,6 @@ void ASTWriter::WriteDeclAbbrevs() {
Abv->Add(BitCodeAbbrevOp(0)); // HasInheritedDefaultArg
Abv->Add(BitCodeAbbrevOp(0)); // HasUninstantiatedDefaultArg
// Type Source Info
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6));
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // TypeLoc
DeclParmVarAbbrev = Stream.EmitAbbrev(std::move(Abv));
@@ -2003,6 +2032,7 @@ void ASTWriter::WriteDeclAbbrevs() {
// DeclaratorDecl
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // InnerStartLoc
Abv->Add(BitCodeAbbrevOp(0)); // hasExtInfo
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // TSIType
// VarDecl
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); // SClass
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 2)); // TSCSpec
@@ -2011,6 +2041,7 @@ void ASTWriter::WriteDeclAbbrevs() {
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isExceptionVariable
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isNRVOVariable
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isCXXForRangeDecl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isObjCForDecl
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isARCPseudoStrong
Abv->Add(BitCodeAbbrevOp(0)); // isInline
Abv->Add(BitCodeAbbrevOp(0)); // isInlineSpecified
@@ -2022,7 +2053,6 @@ void ASTWriter::WriteDeclAbbrevs() {
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 2)); // IsInitICE (local)
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 2)); // VarKind (local enum)
// Type Source Info
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6));
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // TypeLoc
DeclVarAbbrev = Stream.EmitAbbrev(std::move(Abv));
@@ -2053,6 +2083,7 @@ void ASTWriter::WriteDeclAbbrevs() {
// DeclaratorDecl
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // InnerLocStart
Abv->Add(BitCodeAbbrevOp(0)); // HasExtInfo
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // TSIType
// FunctionDecl
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 11)); // IDNS
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); // StorageClass
@@ -2065,12 +2096,14 @@ void ASTWriter::WriteDeclAbbrevs() {
Abv->Add(BitCodeAbbrevOp(1)); // HasWrittenProto
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Deleted
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Trivial
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // TrivialForCall
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Defaulted
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // ExplicitlyDefaulted
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // ImplicitReturnZero
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Constexpr
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // UsesSEHTry
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // SkippedBody
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // MultiVersion
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // LateParsed
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); // Linkage
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // LocEnd
@@ -2162,6 +2195,7 @@ void ASTWriter::WriteDeclAbbrevs() {
// CastExpr
Abv->Add(BitCodeAbbrevOp(0)); // PathSize
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 6)); // CastKind
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // PartOfExplicitCast
// ImplicitCastExpr
ExprImplicitCastAbbrev = Stream.EmitAbbrev(std::move(Abv));
@@ -2209,6 +2243,9 @@ static bool isRequiredDecl(const Decl *D, ASTContext &Context,
}
void ASTWriter::WriteDecl(ASTContext &Context, Decl *D) {
+ PrettyDeclStackTraceEntry CrashInfo(Context, D, SourceLocation(),
+ "serializing");
+
// Determine the ID for this declaration.
serialization::DeclID ID;
assert(!D->isFromASTFile() && "should not be emitting imported decl");
diff --git a/lib/Serialization/ASTWriterStmt.cpp b/lib/Serialization/ASTWriterStmt.cpp
index c5f4495d2f01..3efb6482dd42 100644
--- a/lib/Serialization/ASTWriterStmt.cpp
+++ b/lib/Serialization/ASTWriterStmt.cpp
@@ -8,7 +8,7 @@
//===----------------------------------------------------------------------===//
///
/// \file
-/// \brief Implements serialization for Statements and Expressions.
+/// Implements serialization for Statements and Expressions.
///
//===----------------------------------------------------------------------===//
@@ -444,6 +444,13 @@ void ASTStmtWriter::VisitIntegerLiteral(IntegerLiteral *E) {
Code = serialization::EXPR_INTEGER_LITERAL;
}
+void ASTStmtWriter::VisitFixedPointLiteral(FixedPointLiteral *E) {
+ VisitExpr(E);
+ Record.AddSourceLocation(E->getLocation());
+ Record.AddAPInt(E->getValue());
+ Code = serialization::EXPR_INTEGER_LITERAL;
+}
+
void ASTStmtWriter::VisitFloatingLiteral(FloatingLiteral *E) {
VisitExpr(E);
Record.push_back(E->getRawSemantics());
@@ -509,6 +516,7 @@ void ASTStmtWriter::VisitUnaryOperator(UnaryOperator *E) {
Record.AddStmt(E->getSubExpr());
Record.push_back(E->getOpcode()); // FIXME: stable encoding
Record.AddSourceLocation(E->getOperatorLoc());
+ Record.push_back(E->canOverflow());
Code = serialization::EXPR_UNARY_OPERATOR;
}
@@ -705,6 +713,7 @@ ASTStmtWriter::VisitBinaryConditionalOperator(BinaryConditionalOperator *E) {
void ASTStmtWriter::VisitImplicitCastExpr(ImplicitCastExpr *E) {
VisitCastExpr(E);
+ Record.push_back(E->isPartOfExplicitCast());
if (E->path_size() == 0)
AbbrevToUse = Writer.getExprImplicitCastAbbrev();
@@ -1698,6 +1707,7 @@ void ASTStmtWriter::VisitOpaqueValueExpr(OpaqueValueExpr *E) {
VisitExpr(E);
Record.AddStmt(E->getSourceExpr());
Record.AddSourceLocation(E->getLocation());
+ Record.push_back(E->isUnique());
Code = serialization::EXPR_OPAQUE_VALUE;
}
@@ -2675,7 +2685,7 @@ void ASTWriter::ClearSwitchCaseIDs() {
SwitchCaseIDs.clear();
}
-/// \brief Write the given substatement or subexpression to the
+/// Write the given substatement or subexpression to the
/// bitstream.
void ASTWriter::WriteSubStmt(Stmt *S) {
RecordData Record;
@@ -2719,7 +2729,7 @@ void ASTWriter::WriteSubStmt(Stmt *S) {
SubStmtEntries[S] = Offset;
}
-/// \brief Flush all of the statements that have been added to the
+/// Flush all of the statements that have been added to the
/// queue via AddStmt().
void ASTRecordWriter::FlushStmts() {
// We expect to be the only consumer of the two temporary statement maps,
diff --git a/lib/Serialization/GlobalModuleIndex.cpp b/lib/Serialization/GlobalModuleIndex.cpp
index 20c114297b99..3733638d2977 100644
--- a/lib/Serialization/GlobalModuleIndex.cpp
+++ b/lib/Serialization/GlobalModuleIndex.cpp
@@ -21,9 +21,9 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/SmallString.h"
-#include "llvm/ADT/StringExtras.h"
#include "llvm/Bitcode/BitstreamReader.h"
#include "llvm/Bitcode/BitstreamWriter.h"
+#include "llvm/Support/DJB.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/LockFileManager.h"
#include "llvm/Support/MemoryBuffer.h"
@@ -38,26 +38,26 @@ using namespace serialization;
//----------------------------------------------------------------------------//
namespace {
enum {
- /// \brief The block containing the index.
+ /// The block containing the index.
GLOBAL_INDEX_BLOCK_ID = llvm::bitc::FIRST_APPLICATION_BLOCKID
};
- /// \brief Describes the record types in the index.
+ /// Describes the record types in the index.
enum IndexRecordTypes {
- /// \brief Contains version information and potentially other metadata,
+ /// Contains version information and potentially other metadata,
/// used to determine if we can read this global index file.
INDEX_METADATA,
- /// \brief Describes a module, including its file name and dependencies.
+ /// Describes a module, including its file name and dependencies.
MODULE,
- /// \brief The index for identifiers.
+ /// The index for identifiers.
IDENTIFIER_INDEX
};
}
-/// \brief The name of the global index file.
+/// The name of the global index file.
static const char * const IndexFileName = "modules.idx";
-/// \brief The global index file version.
+/// The global index file version.
static const unsigned CurrentVersion = 1;
//----------------------------------------------------------------------------//
@@ -66,7 +66,7 @@ static const unsigned CurrentVersion = 1;
namespace {
-/// \brief Trait used to read the identifier index from the on-disk hash
+/// Trait used to read the identifier index from the on-disk hash
/// table.
class IdentifierIndexReaderTrait {
public:
@@ -81,7 +81,7 @@ public:
}
static hash_value_type ComputeHash(const internal_key_type& a) {
- return llvm::HashString(a);
+ return llvm::djbHash(a);
}
static std::pair<unsigned, unsigned>
@@ -245,7 +245,7 @@ GlobalModuleIndex::readIndex(StringRef Path) {
return std::make_pair(nullptr, EC_NotFound);
std::unique_ptr<llvm::MemoryBuffer> Buffer = std::move(BufferOrErr.get());
- /// \brief The main bitstream cursor for the main block.
+ /// The main bitstream cursor for the main block.
llvm::BitstreamCursor Cursor(*Buffer);
// Sniff for the signature.
@@ -289,7 +289,7 @@ void GlobalModuleIndex::getModuleDependencies(
bool GlobalModuleIndex::lookupIdentifier(StringRef Name, HitSet &Hits) {
Hits.clear();
-
+
// If there's no identifier index, there is nothing we can do.
if (!IdentifierIndex)
return false;
@@ -368,12 +368,12 @@ LLVM_DUMP_METHOD void GlobalModuleIndex::dump() {
//----------------------------------------------------------------------------//
namespace {
- /// \brief Provides information about a specific module file.
+ /// Provides information about a specific module file.
struct ModuleFileInfo {
- /// \brief The numberic ID for this module file.
+ /// The numberic ID for this module file.
unsigned ID;
- /// \brief The set of modules on which this module depends. Each entry is
+ /// The set of modules on which this module depends. Each entry is
/// a module ID.
SmallVector<unsigned, 4> Dependencies;
ASTFileSignature Signature;
@@ -387,7 +387,7 @@ namespace {
: StoredSize(Size), StoredModTime(ModTime), StoredSignature(Sig) {}
};
- /// \brief Builder that generates the global module index file.
+ /// Builder that generates the global module index file.
class GlobalModuleIndexBuilder {
FileManager &FileMgr;
const PCHContainerReader &PCHContainerRdr;
@@ -398,26 +398,26 @@ namespace {
/// Information about each of the known module files.
ModuleFilesMap ModuleFiles;
- /// \brief Mapping from the imported module file to the imported
+ /// Mapping from the imported module file to the imported
/// information.
typedef std::multimap<const FileEntry *, ImportedModuleFileInfo>
ImportedModuleFilesMap;
- /// \brief Information about each importing of a module file.
+ /// Information about each importing of a module file.
ImportedModuleFilesMap ImportedModuleFiles;
- /// \brief Mapping from identifiers to the list of module file IDs that
+ /// Mapping from identifiers to the list of module file IDs that
/// consider this identifier to be interesting.
typedef llvm::StringMap<SmallVector<unsigned, 2> > InterestingIdentifierMap;
- /// \brief A mapping from all interesting identifiers to the set of module
+ /// A mapping from all interesting identifiers to the set of module
/// files in which those identifiers are considered interesting.
InterestingIdentifierMap InterestingIdentifiers;
-
- /// \brief Write the block-info block for the global module index file.
+
+ /// Write the block-info block for the global module index file.
void emitBlockInfoBlock(llvm::BitstreamWriter &Stream);
- /// \brief Retrieve the module file information for the given file.
+ /// Retrieve the module file information for the given file.
ModuleFileInfo &getModuleFileInfo(const FileEntry *File) {
llvm::MapVector<const FileEntry *, ModuleFileInfo>::iterator Known
= ModuleFiles.find(File);
@@ -435,12 +435,12 @@ namespace {
FileManager &FileMgr, const PCHContainerReader &PCHContainerRdr)
: FileMgr(FileMgr), PCHContainerRdr(PCHContainerRdr) {}
- /// \brief Load the contents of the given module file into the builder.
+ /// Load the contents of the given module file into the builder.
///
/// \returns true if an error occurred, false otherwise.
bool loadModuleFile(const FileEntry *File);
- /// \brief Write the index to the given bitstream.
+ /// Write the index to the given bitstream.
/// \returns true if an error occurred, false otherwise.
bool writeIndex(llvm::BitstreamWriter &Stream);
};
@@ -493,7 +493,7 @@ namespace {
: public serialization::reader::ASTIdentifierLookupTraitBase {
public:
- /// \brief The identifier and whether it is "interesting".
+ /// The identifier and whether it is "interesting".
typedef std::pair<StringRef, bool> data_type;
data_type ReadData(const internal_key_type& k,
@@ -608,7 +608,7 @@ bool GlobalModuleIndexBuilder::loadModuleFile(const FileEntry *File) {
// Skip the import location
++Idx;
- // Load stored size/modification time.
+ // Load stored size/modification time.
off_t StoredSize = (off_t)Record[Idx++];
time_t StoredModTime = (time_t)Record[Idx++];
@@ -685,7 +685,7 @@ bool GlobalModuleIndexBuilder::loadModuleFile(const FileEntry *File) {
namespace {
-/// \brief Trait used to generate the identifier index as an on-disk hash
+/// Trait used to generate the identifier index as an on-disk hash
/// table.
class IdentifierIndexWriterTrait {
public:
@@ -697,20 +697,20 @@ public:
typedef unsigned offset_type;
static hash_value_type ComputeHash(key_type_ref Key) {
- return llvm::HashString(Key);
+ return llvm::djbHash(Key);
}
std::pair<unsigned,unsigned>
EmitKeyDataLength(raw_ostream& Out, key_type_ref Key, data_type_ref Data) {
using namespace llvm::support;
- endian::Writer<little> LE(Out);
+ endian::Writer LE(Out, little);
unsigned KeyLen = Key.size();
unsigned DataLen = Data.size() * 4;
LE.write<uint16_t>(KeyLen);
LE.write<uint16_t>(DataLen);
return std::make_pair(KeyLen, DataLen);
}
-
+
void EmitKey(raw_ostream& Out, key_type_ref Key, unsigned KeyLen) {
Out.write(Key.data(), KeyLen);
}
@@ -719,7 +719,7 @@ public:
unsigned DataLen) {
using namespace llvm::support;
for (unsigned I = 0, N = Data.size(); I != N; ++I)
- endian::Writer<little>(Out).write<uint32_t>(Data[I]);
+ endian::write<uint32_t>(Out, Data[I], little);
}
};
@@ -740,7 +740,7 @@ bool GlobalModuleIndexBuilder::writeIndex(llvm::BitstreamWriter &Stream) {
}
using namespace llvm;
-
+
// Emit the file header.
Stream.Emit((unsigned)'B', 8);
Stream.Emit((unsigned)'C', 8);
@@ -789,7 +789,7 @@ bool GlobalModuleIndexBuilder::writeIndex(llvm::BitstreamWriter &Stream) {
I != IEnd; ++I) {
Generator.insert(I->first(), I->second, Trait);
}
-
+
// Create the on-disk hash table in a buffer.
SmallString<4096> IdentifierTable;
uint32_t BucketOffset;
@@ -797,7 +797,7 @@ bool GlobalModuleIndexBuilder::writeIndex(llvm::BitstreamWriter &Stream) {
using namespace llvm::support;
llvm::raw_svector_ostream Out(IdentifierTable);
// Make sure that no bucket is at offset 0
- endian::Writer<little>(Out).write<uint32_t>(0);
+ endian::write<uint32_t>(Out, 0, little);
BucketOffset = Generator.Emit(Out, Trait);
}
@@ -902,7 +902,7 @@ GlobalModuleIndex::writeIndex(FileManager &FileMgr,
// Rename the newly-written index file to the proper name.
if (llvm::sys::fs::rename(IndexTmpPath, IndexPath)) {
- // Rename failed; just remove the
+ // Rename failed; just remove the
llvm::sys::fs::remove(IndexTmpPath);
return EC_IOError;
}
@@ -913,10 +913,10 @@ GlobalModuleIndex::writeIndex(FileManager &FileMgr,
namespace {
class GlobalIndexIdentifierIterator : public IdentifierIterator {
- /// \brief The current position within the identifier lookup table.
+ /// The current position within the identifier lookup table.
IdentifierIndexTable::key_iterator Current;
- /// \brief The end position within the identifier lookup table.
+ /// The end position within the identifier lookup table.
IdentifierIndexTable::key_iterator End;
public:
diff --git a/lib/Serialization/Module.cpp b/lib/Serialization/Module.cpp
index 5a44d26fe399..f0ada809ad99 100644
--- a/lib/Serialization/Module.cpp
+++ b/lib/Serialization/Module.cpp
@@ -1,4 +1,4 @@
-//===--- Module.cpp - Module description ------------------------*- C++ -*-===//
+//===- Module.cpp - Module description ------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -11,8 +11,12 @@
// been loaded from an AST file.
//
//===----------------------------------------------------------------------===//
+
#include "clang/Serialization/Module.h"
#include "ASTReaderInternals.h"
+#include "clang/Serialization/ContinuousRangeMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
@@ -32,7 +36,8 @@ dumpLocalRemap(StringRef Name,
if (Map.begin() == Map.end())
return;
- typedef ContinuousRangeMap<Key, Offset, InitialCapacity> MapType;
+ using MapType = ContinuousRangeMap<Key, Offset, InitialCapacity>;
+
llvm::errs() << " " << Name << ":\n";
for (typename MapType::const_iterator I = Map.begin(), IEnd = Map.end();
I != IEnd; ++I) {
diff --git a/lib/Serialization/MultiOnDiskHashTable.h b/lib/Serialization/MultiOnDiskHashTable.h
index 44d1616a0110..ded7cd146449 100644
--- a/lib/Serialization/MultiOnDiskHashTable.h
+++ b/lib/Serialization/MultiOnDiskHashTable.h
@@ -37,7 +37,7 @@
namespace clang {
namespace serialization {
-/// \brief A collection of on-disk hash tables, merged when relevant for performance.
+/// A collection of on-disk hash tables, merged when relevant for performance.
template<typename Info> class MultiOnDiskHashTable {
public:
/// A handle to a file, used when overriding tables.
@@ -57,7 +57,7 @@ private:
template<typename ReaderInfo, typename WriterInfo>
friend class MultiOnDiskHashTableGenerator;
- /// \brief A hash table stored on disk.
+ /// A hash table stored on disk.
struct OnDiskTable {
using HashTable = llvm::OnDiskIterableChainedHashTable<Info>;
@@ -79,14 +79,14 @@ private:
using Table = llvm::PointerUnion<OnDiskTable *, MergedTable *>;
using TableVector = llvm::TinyPtrVector<void *>;
- /// \brief The current set of on-disk and merged tables.
+ /// The current set of on-disk and merged tables.
/// We manually store the opaque value of the Table because TinyPtrVector
/// can't cope with holding a PointerUnion directly.
/// There can be at most one MergedTable in this vector, and if present,
/// it is the first table.
TableVector Tables;
- /// \brief Files corresponding to overridden tables that we've not yet
+ /// Files corresponding to overridden tables that we've not yet
/// discarded.
llvm::TinyPtrVector<file_type> PendingOverrides;
@@ -102,7 +102,7 @@ private:
llvm::mapped_iterator<TableVector::iterator, AsOnDiskTable>;
using table_range = llvm::iterator_range<table_iterator>;
- /// \brief The current set of on-disk tables.
+ /// The current set of on-disk tables.
table_range tables() {
auto Begin = Tables.begin(), End = Tables.end();
if (getMergedTable())
@@ -117,7 +117,7 @@ private:
.template dyn_cast<MergedTable*>();
}
- /// \brief Delete all our current on-disk tables.
+ /// Delete all our current on-disk tables.
void clear() {
for (auto *T : tables())
delete T;
@@ -194,7 +194,7 @@ public:
~MultiOnDiskHashTable() { clear(); }
- /// \brief Add the table \p Data loaded from file \p File.
+ /// Add the table \p Data loaded from file \p File.
void add(file_type File, storage_type Data, Info InfoObj = Info()) {
using namespace llvm::support;
@@ -225,7 +225,7 @@ public:
Tables.push_back(NewTable.getOpaqueValue());
}
- /// \brief Find and read the lookup results for \p EKey.
+ /// Find and read the lookup results for \p EKey.
data_type find(const external_key_type &EKey) {
data_type Result;
@@ -257,7 +257,7 @@ public:
return Result;
}
- /// \brief Read all the lookup results into a single value. This only makes
+ /// Read all the lookup results into a single value. This only makes
/// sense if merging values across keys is meaningful.
data_type findAll() {
data_type Result;
@@ -288,7 +288,7 @@ public:
}
};
-/// \brief Writer for the on-disk hash table.
+/// Writer for the on-disk hash table.
template<typename ReaderInfo, typename WriterInfo>
class MultiOnDiskHashTableGenerator {
using BaseTable = MultiOnDiskHashTable<ReaderInfo>;
@@ -312,7 +312,7 @@ public:
// Write our header information.
{
- endian::Writer<little> Writer(OutStream);
+ endian::Writer Writer(OutStream, little);
// Reserve four bytes for the bucket offset.
Writer.write<uint32_t>(0);
diff --git a/lib/StaticAnalyzer/Checkers/AllocationDiagnostics.h b/lib/StaticAnalyzer/Checkers/AllocationDiagnostics.h
index 048418ef62db..62b7fab0739a 100644
--- a/lib/StaticAnalyzer/Checkers/AllocationDiagnostics.h
+++ b/lib/StaticAnalyzer/Checkers/AllocationDiagnostics.h
@@ -18,7 +18,7 @@
namespace clang { namespace ento {
-/// \brief Returns true if leak diagnostics should directly reference
+/// Returns true if leak diagnostics should directly reference
/// the allocatin site (where possible).
///
/// The default is false.
diff --git a/lib/StaticAnalyzer/Checkers/AllocationState.h b/lib/StaticAnalyzer/Checkers/AllocationState.h
new file mode 100644
index 000000000000..a6908bd7a651
--- /dev/null
+++ b/lib/StaticAnalyzer/Checkers/AllocationState.h
@@ -0,0 +1,34 @@
+//===--- AllocationState.h ------------------------------------- *- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_ALLOCATIONSTATE_H
+#define LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_ALLOCATIONSTATE_H
+
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporterVisitors.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+
+namespace clang {
+namespace ento {
+
+namespace allocation_state {
+
+ProgramStateRef markReleased(ProgramStateRef State, SymbolRef Sym,
+ const Expr *Origin);
+
+/// This function provides an additional visitor that augments the bug report
+/// with information relevant to memory errors caused by the misuse of
+/// AF_InnerBuffer symbols.
+std::unique_ptr<BugReporterVisitor> getInnerPointerBRVisitor(SymbolRef Sym);
+
+} // end namespace allocation_state
+
+} // end namespace ento
+} // end namespace clang
+
+#endif
diff --git a/lib/StaticAnalyzer/Checkers/AnalysisOrderChecker.cpp b/lib/StaticAnalyzer/Checkers/AnalysisOrderChecker.cpp
index 90d5c0e36a47..e4cdc500de6a 100644
--- a/lib/StaticAnalyzer/Checkers/AnalysisOrderChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/AnalysisOrderChecker.cpp
@@ -15,8 +15,10 @@
//===----------------------------------------------------------------------===//
#include "ClangSACheckers.h"
+#include "clang/AST/ExprCXX.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
using namespace clang;
@@ -29,8 +31,17 @@ class AnalysisOrderChecker
check::PostStmt<CastExpr>,
check::PreStmt<ArraySubscriptExpr>,
check::PostStmt<ArraySubscriptExpr>,
+ check::PreStmt<CXXNewExpr>,
+ check::PostStmt<CXXNewExpr>,
+ check::PreStmt<OffsetOfExpr>,
+ check::PostStmt<OffsetOfExpr>,
+ check::PreCall,
+ check::PostCall,
+ check::NewAllocator,
check::Bind,
- check::RegionChanges> {
+ check::RegionChanges,
+ check::LiveSymbols> {
+
bool isCallbackEnabled(AnalyzerOptions &Opts, StringRef CallbackName) const {
return Opts.getBooleanOption("*", false, this) ||
Opts.getBooleanOption(CallbackName, false, this);
@@ -72,11 +83,60 @@ public:
llvm::errs() << "PostStmt<ArraySubscriptExpr>\n";
}
+ void checkPreStmt(const CXXNewExpr *NE, CheckerContext &C) const {
+ if (isCallbackEnabled(C, "PreStmtCXXNewExpr"))
+ llvm::errs() << "PreStmt<CXXNewExpr>\n";
+ }
+
+ void checkPostStmt(const CXXNewExpr *NE, CheckerContext &C) const {
+ if (isCallbackEnabled(C, "PostStmtCXXNewExpr"))
+ llvm::errs() << "PostStmt<CXXNewExpr>\n";
+ }
+
+ void checkPreStmt(const OffsetOfExpr *OOE, CheckerContext &C) const {
+ if (isCallbackEnabled(C, "PreStmtOffsetOfExpr"))
+ llvm::errs() << "PreStmt<OffsetOfExpr>\n";
+ }
+
+ void checkPostStmt(const OffsetOfExpr *OOE, CheckerContext &C) const {
+ if (isCallbackEnabled(C, "PostStmtOffsetOfExpr"))
+ llvm::errs() << "PostStmt<OffsetOfExpr>\n";
+ }
+
+ void checkPreCall(const CallEvent &Call, CheckerContext &C) const {
+ if (isCallbackEnabled(C, "PreCall")) {
+ llvm::errs() << "PreCall";
+ if (const NamedDecl *ND = dyn_cast_or_null<NamedDecl>(Call.getDecl()))
+ llvm::errs() << " (" << ND->getQualifiedNameAsString() << ')';
+ llvm::errs() << '\n';
+ }
+ }
+
+ void checkPostCall(const CallEvent &Call, CheckerContext &C) const {
+ if (isCallbackEnabled(C, "PostCall")) {
+ llvm::errs() << "PostCall";
+ if (const NamedDecl *ND = dyn_cast_or_null<NamedDecl>(Call.getDecl()))
+ llvm::errs() << " (" << ND->getQualifiedNameAsString() << ')';
+ llvm::errs() << '\n';
+ }
+ }
+
+ void checkNewAllocator(const CXXNewExpr *CNE, SVal Target,
+ CheckerContext &C) const {
+ if (isCallbackEnabled(C, "NewAllocator"))
+ llvm::errs() << "NewAllocator\n";
+ }
+
void checkBind(SVal Loc, SVal Val, const Stmt *S, CheckerContext &C) const {
if (isCallbackEnabled(C, "Bind"))
llvm::errs() << "Bind\n";
}
+ void checkLiveSymbols(ProgramStateRef State, SymbolReaper &SymReaper) const {
+ if (isCallbackEnabled(State, "LiveSymbols"))
+ llvm::errs() << "LiveSymbols\n";
+ }
+
ProgramStateRef
checkRegionChanges(ProgramStateRef State,
const InvalidatedSymbols *Invalidated,
diff --git a/lib/StaticAnalyzer/Checkers/AnalyzerStatsChecker.cpp b/lib/StaticAnalyzer/Checkers/AnalyzerStatsChecker.cpp
index 64c30e7a82c1..aadc6bac8d00 100644
--- a/lib/StaticAnalyzer/Checkers/AnalyzerStatsChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/AnalyzerStatsChecker.cpp
@@ -122,6 +122,8 @@ void AnalyzerStatsChecker::checkEndAnalysis(ExplodedGraph &G,
E = CE.blocks_exhausted_end(); I != E; ++I) {
const BlockEdge &BE = I->first;
const CFGBlock *Exit = BE.getDst();
+ if (Exit->empty())
+ continue;
const CFGElement &CE = Exit->front();
if (Optional<CFGStmt> CS = CE.getAs<CFGStmt>()) {
SmallString<128> bufI;
diff --git a/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp b/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp
index b944f90539d4..933380d494a4 100644
--- a/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp
+++ b/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp
@@ -33,8 +33,8 @@ class ArrayBoundCheckerV2 :
enum OOB_Kind { OOB_Precedes, OOB_Excedes, OOB_Tainted };
- void reportOOB(CheckerContext &C, ProgramStateRef errorState,
- OOB_Kind kind) const;
+ void reportOOB(CheckerContext &C, ProgramStateRef errorState, OOB_Kind kind,
+ std::unique_ptr<BugReporterVisitor> Visitor = nullptr) const;
public:
void checkLocation(SVal l, bool isLoad, const Stmt*S,
@@ -125,7 +125,6 @@ void ArrayBoundCheckerV2::checkLocation(SVal location, bool isLoad,
// have some flexibility in defining the base region, we can achieve
// various levels of conservatism in our buffer overflow checking.
ProgramStateRef state = checkerContext.getState();
- ProgramStateRef originalState = state;
SValBuilder &svalBuilder = checkerContext.getSValBuilder();
const RegionRawOffsetV2 &rawOffset =
@@ -205,8 +204,10 @@ void ArrayBoundCheckerV2::checkLocation(SVal location, bool isLoad,
// If we are under constrained and the index variables are tainted, report.
if (state_exceedsUpperBound && state_withinUpperBound) {
- if (state->isTainted(rawOffset.getByteOffset())) {
- reportOOB(checkerContext, state_exceedsUpperBound, OOB_Tainted);
+ SVal ByteOffset = rawOffset.getByteOffset();
+ if (state->isTainted(ByteOffset)) {
+ reportOOB(checkerContext, state_exceedsUpperBound, OOB_Tainted,
+ llvm::make_unique<TaintBugVisitor>(ByteOffset));
return;
}
} else if (state_exceedsUpperBound) {
@@ -222,13 +223,12 @@ void ArrayBoundCheckerV2::checkLocation(SVal location, bool isLoad,
}
while (false);
- if (state != originalState)
- checkerContext.addTransition(state);
+ checkerContext.addTransition(state);
}
-void ArrayBoundCheckerV2::reportOOB(CheckerContext &checkerContext,
- ProgramStateRef errorState,
- OOB_Kind kind) const {
+void ArrayBoundCheckerV2::reportOOB(
+ CheckerContext &checkerContext, ProgramStateRef errorState, OOB_Kind kind,
+ std::unique_ptr<BugReporterVisitor> Visitor) const {
ExplodedNode *errorNode = checkerContext.generateErrorNode(errorState);
if (!errorNode)
@@ -255,8 +255,9 @@ void ArrayBoundCheckerV2::reportOOB(CheckerContext &checkerContext,
break;
}
- checkerContext.emitReport(
- llvm::make_unique<BugReport>(*BT, os.str(), errorNode));
+ auto BR = llvm::make_unique<BugReport>(*BT, os.str(), errorNode);
+ BR->addVisitor(std::move(Visitor));
+ checkerContext.emitReport(std::move(BR));
}
#ifndef NDEBUG
diff --git a/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp b/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp
index 371187747f03..7d6358acbbac 100644
--- a/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp
+++ b/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp
@@ -436,8 +436,7 @@ void CFNumberChecker::checkPreStmt(const CallExpr *CE,
return;
// Get the value of the "theType" argument.
- const LocationContext *LCtx = C.getLocationContext();
- SVal TheTypeVal = state->getSVal(CE->getArg(1), LCtx);
+ SVal TheTypeVal = C.getSVal(CE->getArg(1));
// FIXME: We really should allow ranges of valid theType values, and
// bifurcate the state appropriately.
@@ -457,7 +456,7 @@ void CFNumberChecker::checkPreStmt(const CallExpr *CE,
// Look at the value of the integer being passed by reference. Essentially
// we want to catch cases where the value passed in is not equal to the
// size of the type being created.
- SVal TheValueExpr = state->getSVal(CE->getArg(2), LCtx);
+ SVal TheValueExpr = C.getSVal(CE->getArg(2));
// FIXME: Eventually we should handle arbitrary locations. We can do this
// by having an enhanced memory model that does low-level typing.
@@ -571,7 +570,7 @@ void CFRetainReleaseChecker::checkPreStmt(const CallExpr *CE,
// Get the argument's value.
const Expr *Arg = CE->getArg(0);
- SVal ArgVal = state->getSVal(Arg, C.getLocationContext());
+ SVal ArgVal = C.getSVal(Arg);
Optional<DefinedSVal> DefArgVal = ArgVal.getAs<DefinedSVal>();
if (!DefArgVal)
return;
@@ -977,8 +976,7 @@ assumeCollectionNonEmpty(CheckerContext &C, ProgramStateRef State,
if (!State)
return nullptr;
- SymbolRef CollectionS =
- State->getSVal(FCS->getCollection(), C.getLocationContext()).getAsSymbol();
+ SymbolRef CollectionS = C.getSVal(FCS->getCollection()).getAsSymbol();
return assumeCollectionNonEmpty(C, State, CollectionS, Assumption);
}
@@ -1166,7 +1164,7 @@ void ObjCLoopChecker::checkDeadSymbols(SymbolReaper &SymReaper,
namespace {
/// \class ObjCNonNilReturnValueChecker
-/// \brief The checker restricts the return values of APIs known to
+/// The checker restricts the return values of APIs known to
/// never (or almost never) return 'nil'.
class ObjCNonNilReturnValueChecker
: public Checker<check::PostObjCMessage,
@@ -1206,7 +1204,7 @@ ProgramStateRef
ObjCNonNilReturnValueChecker::assumeExprIsNonNull(const Expr *NonNullExpr,
ProgramStateRef State,
CheckerContext &C) const {
- SVal Val = State->getSVal(NonNullExpr, C.getLocationContext());
+ SVal Val = C.getSVal(NonNullExpr);
if (Optional<DefinedOrUnknownSVal> DV = Val.getAs<DefinedOrUnknownSVal>())
return State->assume(*DV, true);
return State;
diff --git a/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp b/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp
index 097d4198800d..0e781d08e24c 100644
--- a/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp
@@ -43,7 +43,7 @@ bool BuiltinFunctionChecker::evalCall(const CallExpr *CE,
case Builtin::BI__builtin_assume: {
assert (CE->arg_begin() != CE->arg_end());
- SVal ArgSVal = state->getSVal(CE->getArg(0), LCtx);
+ SVal ArgSVal = C.getSVal(CE->getArg(0));
if (ArgSVal.isUndef())
return true; // Return true to model purity.
@@ -68,7 +68,7 @@ bool BuiltinFunctionChecker::evalCall(const CallExpr *CE,
// __builtin_addressof is going from a reference to a pointer, but those
// are represented the same way in the analyzer.
assert (CE->arg_begin() != CE->arg_end());
- SVal X = state->getSVal(*(CE->arg_begin()), LCtx);
+ SVal X = C.getSVal(*(CE->arg_begin()));
C.addTransition(state->BindExpr(CE, LCtx, X));
return true;
}
@@ -83,8 +83,7 @@ bool BuiltinFunctionChecker::evalCall(const CallExpr *CE,
// Set the extent of the region in bytes. This enables us to use the
// SVal of the argument directly. If we save the extent in bits, we
// cannot represent values like symbol*8.
- DefinedOrUnknownSVal Size =
- state->getSVal(*(CE->arg_begin()), LCtx).castAs<DefinedOrUnknownSVal>();
+ auto Size = C.getSVal(*(CE->arg_begin())).castAs<DefinedOrUnknownSVal>();
SValBuilder& svalBuilder = C.getSValBuilder();
DefinedOrUnknownSVal Extent = R->getExtent(svalBuilder);
@@ -97,7 +96,8 @@ bool BuiltinFunctionChecker::evalCall(const CallExpr *CE,
return true;
}
- case Builtin::BI__builtin_object_size: {
+ case Builtin::BI__builtin_object_size:
+ case Builtin::BI__builtin_constant_p: {
// This must be resolvable at compile time, so we defer to the constant
// evaluator for a value.
SVal V = UnknownVal();
diff --git a/lib/StaticAnalyzer/Checkers/CMakeLists.txt b/lib/StaticAnalyzer/Checkers/CMakeLists.txt
index 7ab9c6114eae..5bb4770b5675 100644
--- a/lib/StaticAnalyzer/Checkers/CMakeLists.txt
+++ b/lib/StaticAnalyzer/Checkers/CMakeLists.txt
@@ -37,9 +37,11 @@ add_clang_library(clangStaticAnalyzerCheckers
DynamicTypeChecker.cpp
ExprInspectionChecker.cpp
FixedAddressChecker.cpp
+ GCDAntipatternChecker.cpp
GenericTaintChecker.cpp
GTestChecker.cpp
IdenticalExprChecker.cpp
+ InnerPointerChecker.cpp
IteratorChecker.cpp
IvarInvalidationChecker.cpp
LLVMConventionsChecker.cpp
@@ -49,6 +51,7 @@ add_clang_library(clangStaticAnalyzerCheckers
MallocChecker.cpp
MallocOverflowSecurityChecker.cpp
MallocSizeofChecker.cpp
+ MmapWriteExecChecker.cpp
MisusedMovedObjectChecker.cpp
MPI-Checker/MPIBugReporter.cpp
MPI-Checker/MPIChecker.cpp
@@ -61,6 +64,7 @@ add_clang_library(clangStaticAnalyzerCheckers
NullabilityChecker.cpp
NumberObjectConversionChecker.cpp
ObjCAtSyncChecker.cpp
+ ObjCAutoreleaseWriteChecker.cpp
ObjCContainersASTChecker.cpp
ObjCContainersChecker.cpp
ObjCMissingSuperCallChecker.cpp
@@ -75,6 +79,7 @@ add_clang_library(clangStaticAnalyzerCheckers
RetainCountChecker.cpp
ReturnPointerRangeChecker.cpp
ReturnUndefChecker.cpp
+ RunLoopAutoreleaseLeakChecker.cpp
SimpleStreamChecker.cpp
StackAddrEscapeChecker.cpp
StdLibraryFunctionsChecker.cpp
@@ -82,11 +87,13 @@ add_clang_library(clangStaticAnalyzerCheckers
TaintTesterChecker.cpp
TestAfterDivZeroChecker.cpp
TraversalChecker.cpp
+ TrustNonnullChecker.cpp
UndefBranchChecker.cpp
UndefCapturedBlockVarChecker.cpp
UndefResultChecker.cpp
UndefinedArraySubscriptChecker.cpp
UndefinedAssignmentChecker.cpp
+ UninitializedObjectChecker.cpp
UnixAPIChecker.cpp
UnreachableCodeChecker.cpp
VforkChecker.cpp
diff --git a/lib/StaticAnalyzer/Checkers/CStringChecker.cpp b/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
index 28ad7e9e5071..278452ec994a 100644
--- a/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
@@ -97,14 +97,17 @@ public:
void evalStrcpy(CheckerContext &C, const CallExpr *CE) const;
void evalStrncpy(CheckerContext &C, const CallExpr *CE) const;
void evalStpcpy(CheckerContext &C, const CallExpr *CE) const;
+ void evalStrlcpy(CheckerContext &C, const CallExpr *CE) const;
void evalStrcpyCommon(CheckerContext &C,
const CallExpr *CE,
bool returnEnd,
bool isBounded,
- bool isAppending) const;
+ bool isAppending,
+ bool returnPtr = true) const;
void evalStrcat(CheckerContext &C, const CallExpr *CE) const;
void evalStrncat(CheckerContext &C, const CallExpr *CE) const;
+ void evalStrlcat(CheckerContext &C, const CallExpr *CE) const;
void evalStrcmp(CheckerContext &C, const CallExpr *CE) const;
void evalStrncmp(CheckerContext &C, const CallExpr *CE) const;
@@ -155,6 +158,10 @@ public:
static bool SummarizeRegion(raw_ostream &os, ASTContext &Ctx,
const MemRegion *MR);
+ static bool memsetAux(const Expr *DstBuffer, const Expr *CharE,
+ const Expr *Size, CheckerContext &C,
+ ProgramStateRef &State);
+
// Re-usable checks
ProgramStateRef checkNonNull(CheckerContext &C,
ProgramStateRef state,
@@ -194,6 +201,14 @@ public:
const Stmt *First,
const Stmt *Second) const;
+ void emitNullArgBug(CheckerContext &C, ProgramStateRef State, const Stmt *S,
+ StringRef WarningMsg) const;
+ void emitOutOfBoundsBug(CheckerContext &C, ProgramStateRef State,
+ const Stmt *S, StringRef WarningMsg) const;
+ void emitNotCStringBug(CheckerContext &C, ProgramStateRef State,
+ const Stmt *S, StringRef WarningMsg) const;
+ void emitAdditionOverflowBug(CheckerContext &C, ProgramStateRef State) const;
+
ProgramStateRef checkAdditionOverflow(CheckerContext &C,
ProgramStateRef state,
NonLoc left,
@@ -239,30 +254,14 @@ ProgramStateRef CStringChecker::checkNonNull(CheckerContext &C,
std::tie(stateNull, stateNonNull) = assumeZero(C, state, l, S->getType());
if (stateNull && !stateNonNull) {
- if (!Filter.CheckCStringNullArg)
- return nullptr;
-
- ExplodedNode *N = C.generateErrorNode(stateNull);
- if (!N)
- return nullptr;
-
- if (!BT_Null)
- BT_Null.reset(new BuiltinBug(
- Filter.CheckNameCStringNullArg, categories::UnixAPI,
- "Null pointer argument in call to byte string function"));
-
- SmallString<80> buf;
- llvm::raw_svector_ostream os(buf);
- assert(CurrentFunctionDescription);
- os << "Null pointer argument in call to " << CurrentFunctionDescription;
-
- // Generate a report for this bug.
- BuiltinBug *BT = static_cast<BuiltinBug*>(BT_Null.get());
- auto report = llvm::make_unique<BugReport>(*BT, os.str(), N);
+ if (Filter.CheckCStringNullArg) {
+ SmallString<80> buf;
+ llvm::raw_svector_ostream os(buf);
+ assert(CurrentFunctionDescription);
+ os << "Null pointer argument in call to " << CurrentFunctionDescription;
- report->addRange(S->getSourceRange());
- bugreporter::trackNullOrUndefValue(N, S, *report);
- C.emitReport(std::move(report));
+ emitNullArgBug(C, stateNull, S, os.str());
+ }
return nullptr;
}
@@ -305,21 +304,14 @@ ProgramStateRef CStringChecker::CheckLocation(CheckerContext &C,
ProgramStateRef StInBound = state->assumeInBound(Idx, Size, true);
ProgramStateRef StOutBound = state->assumeInBound(Idx, Size, false);
if (StOutBound && !StInBound) {
- ExplodedNode *N = C.generateErrorNode(StOutBound);
- if (!N)
+ // These checks are either enabled by the CString out-of-bounds checker
+ // explicitly or implicitly by the Malloc checker.
+ // In the latter case we only do modeling but do not emit warning.
+ if (!Filter.CheckCStringOutOfBounds)
return nullptr;
-
- if (!BT_Bounds) {
- BT_Bounds.reset(new BuiltinBug(
- Filter.CheckNameCStringOutOfBounds, "Out-of-bound array access",
- "Byte string function accesses out-of-bound array element"));
- }
- BuiltinBug *BT = static_cast<BuiltinBug*>(BT_Bounds.get());
-
- // Generate a report for this bug.
- std::unique_ptr<BugReport> report;
+ // Emit a bug report.
if (warningMsg) {
- report = llvm::make_unique<BugReport>(*BT, warningMsg, N);
+ emitOutOfBoundsBug(C, StOutBound, S, warningMsg);
} else {
assert(CurrentFunctionDescription);
assert(CurrentFunctionDescription[0] != '\0');
@@ -329,15 +321,8 @@ ProgramStateRef CStringChecker::CheckLocation(CheckerContext &C,
os << toUppercase(CurrentFunctionDescription[0])
<< &CurrentFunctionDescription[1]
<< " accesses out-of-bound array element";
- report = llvm::make_unique<BugReport>(*BT, os.str(), N);
+ emitOutOfBoundsBug(C, StOutBound, S, os.str());
}
-
- // FIXME: It would be nice to eventually make this diagnostic more clear,
- // e.g., by referencing the original declaration or by saying *why* this
- // reference is outside the range.
-
- report->addRange(S->getSourceRange());
- C.emitReport(std::move(report));
return nullptr;
}
@@ -366,7 +351,7 @@ ProgramStateRef CStringChecker::CheckBufferAccess(CheckerContext &C,
QualType PtrTy = Ctx.getPointerType(Ctx.CharTy);
// Check that the first buffer is non-null.
- SVal BufVal = state->getSVal(FirstBuf, LCtx);
+ SVal BufVal = C.getSVal(FirstBuf);
state = checkNonNull(C, state, FirstBuf, BufVal);
if (!state)
return nullptr;
@@ -378,15 +363,17 @@ ProgramStateRef CStringChecker::CheckBufferAccess(CheckerContext &C,
// Get the access length and make sure it is known.
// FIXME: This assumes the caller has already checked that the access length
// is positive. And that it's unsigned.
- SVal LengthVal = state->getSVal(Size, LCtx);
+ SVal LengthVal = C.getSVal(Size);
Optional<NonLoc> Length = LengthVal.getAs<NonLoc>();
if (!Length)
return state;
// Compute the offset of the last element to be accessed: size-1.
NonLoc One = svalBuilder.makeIntVal(1, sizeTy).castAs<NonLoc>();
- NonLoc LastOffset = svalBuilder
- .evalBinOpNN(state, BO_Sub, *Length, One, sizeTy).castAs<NonLoc>();
+ SVal Offset = svalBuilder.evalBinOpNN(state, BO_Sub, *Length, One, sizeTy);
+ if (Offset.isUnknown())
+ return nullptr;
+ NonLoc LastOffset = Offset.castAs<NonLoc>();
// Check that the first buffer is sufficiently long.
SVal BufStart = svalBuilder.evalCast(BufVal, PtrTy, FirstBuf->getType());
@@ -555,6 +542,79 @@ void CStringChecker::emitOverlapBug(CheckerContext &C, ProgramStateRef state,
C.emitReport(std::move(report));
}
+void CStringChecker::emitNullArgBug(CheckerContext &C, ProgramStateRef State,
+ const Stmt *S, StringRef WarningMsg) const {
+ if (ExplodedNode *N = C.generateErrorNode(State)) {
+ if (!BT_Null)
+ BT_Null.reset(new BuiltinBug(
+ Filter.CheckNameCStringNullArg, categories::UnixAPI,
+ "Null pointer argument in call to byte string function"));
+
+ BuiltinBug *BT = static_cast<BuiltinBug *>(BT_Null.get());
+ auto Report = llvm::make_unique<BugReport>(*BT, WarningMsg, N);
+ bugreporter::trackNullOrUndefValue(N, S, *Report);
+ C.emitReport(std::move(Report));
+ }
+}
+
+void CStringChecker::emitOutOfBoundsBug(CheckerContext &C,
+ ProgramStateRef State, const Stmt *S,
+ StringRef WarningMsg) const {
+ if (ExplodedNode *N = C.generateErrorNode(State)) {
+ if (!BT_Bounds)
+ BT_Bounds.reset(new BuiltinBug(
+ Filter.CheckCStringOutOfBounds ? Filter.CheckNameCStringOutOfBounds
+ : Filter.CheckNameCStringNullArg,
+ "Out-of-bound array access",
+ "Byte string function accesses out-of-bound array element"));
+
+ BuiltinBug *BT = static_cast<BuiltinBug *>(BT_Bounds.get());
+
+ // FIXME: It would be nice to eventually make this diagnostic more clear,
+ // e.g., by referencing the original declaration or by saying *why* this
+ // reference is outside the range.
+ auto Report = llvm::make_unique<BugReport>(*BT, WarningMsg, N);
+ Report->addRange(S->getSourceRange());
+ C.emitReport(std::move(Report));
+ }
+}
+
+void CStringChecker::emitNotCStringBug(CheckerContext &C, ProgramStateRef State,
+ const Stmt *S,
+ StringRef WarningMsg) const {
+ if (ExplodedNode *N = C.generateNonFatalErrorNode(State)) {
+ if (!BT_NotCString)
+ BT_NotCString.reset(new BuiltinBug(
+ Filter.CheckNameCStringNotNullTerm, categories::UnixAPI,
+ "Argument is not a null-terminated string."));
+
+ auto Report = llvm::make_unique<BugReport>(*BT_NotCString, WarningMsg, N);
+
+ Report->addRange(S->getSourceRange());
+ C.emitReport(std::move(Report));
+ }
+}
+
+void CStringChecker::emitAdditionOverflowBug(CheckerContext &C,
+ ProgramStateRef State) const {
+ if (ExplodedNode *N = C.generateErrorNode(State)) {
+ if (!BT_NotCString)
+ BT_NotCString.reset(
+ new BuiltinBug(Filter.CheckNameCStringOutOfBounds, "API",
+ "Sum of expressions causes overflow."));
+
+ // This isn't a great error message, but this should never occur in real
+ // code anyway -- you'd have to create a buffer longer than a size_t can
+ // represent, which is sort of a contradiction.
+ const char *WarningMsg =
+ "This expression will create a string whose length is too big to "
+ "be represented as a size_t";
+
+ auto Report = llvm::make_unique<BugReport>(*BT_NotCString, WarningMsg, N);
+ C.emitReport(std::move(Report));
+ }
+}
+
ProgramStateRef CStringChecker::checkAdditionOverflow(CheckerContext &C,
ProgramStateRef state,
NonLoc left,
@@ -598,26 +658,7 @@ ProgramStateRef CStringChecker::checkAdditionOverflow(CheckerContext &C,
if (stateOverflow && !stateOkay) {
// We have an overflow. Emit a bug report.
- ExplodedNode *N = C.generateErrorNode(stateOverflow);
- if (!N)
- return nullptr;
-
- if (!BT_AdditionOverflow)
- BT_AdditionOverflow.reset(
- new BuiltinBug(Filter.CheckNameCStringOutOfBounds, "API",
- "Sum of expressions causes overflow"));
-
- // This isn't a great error message, but this should never occur in real
- // code anyway -- you'd have to create a buffer longer than a size_t can
- // represent, which is sort of a contradiction.
- const char *warning =
- "This expression will create a string whose length is too big to "
- "be represented as a size_t";
-
- // Generate a report for this bug.
- C.emitReport(
- llvm::make_unique<BugReport>(*BT_AdditionOverflow, warning, N));
-
+ emitAdditionOverflowBug(C, stateOverflow);
return nullptr;
}
@@ -717,15 +758,7 @@ SVal CStringChecker::getCStringLength(CheckerContext &C, ProgramStateRef &state,
// C string. In the context of locations, the only time we can issue such
// a warning is for labels.
if (Optional<loc::GotoLabel> Label = Buf.getAs<loc::GotoLabel>()) {
- if (!Filter.CheckCStringNotNullTerm)
- return UndefinedVal();
-
- if (ExplodedNode *N = C.generateNonFatalErrorNode(state)) {
- if (!BT_NotCString)
- BT_NotCString.reset(new BuiltinBug(
- Filter.CheckNameCStringNotNullTerm, categories::UnixAPI,
- "Argument is not a null-terminated string."));
-
+ if (Filter.CheckCStringNotNullTerm) {
SmallString<120> buf;
llvm::raw_svector_ostream os(buf);
assert(CurrentFunctionDescription);
@@ -733,14 +766,9 @@ SVal CStringChecker::getCStringLength(CheckerContext &C, ProgramStateRef &state,
<< " is the address of the label '" << Label->getLabel()->getName()
<< "', which is not a null-terminated string";
- // Generate a report for this bug.
- auto report = llvm::make_unique<BugReport>(*BT_NotCString, os.str(), N);
-
- report->addRange(Ex->getSourceRange());
- C.emitReport(std::move(report));
+ emitNotCStringBug(C, state, Ex, os.str());
}
return UndefinedVal();
-
}
// If it's not a region and not a label, give up.
@@ -777,15 +805,7 @@ SVal CStringChecker::getCStringLength(CheckerContext &C, ProgramStateRef &state,
// Other regions (mostly non-data) can't have a reliable C string length.
// In this case, an error is emitted and UndefinedVal is returned.
// The caller should always be prepared to handle this case.
- if (!Filter.CheckCStringNotNullTerm)
- return UndefinedVal();
-
- if (ExplodedNode *N = C.generateNonFatalErrorNode(state)) {
- if (!BT_NotCString)
- BT_NotCString.reset(new BuiltinBug(
- Filter.CheckNameCStringNotNullTerm, categories::UnixAPI,
- "Argument is not a null-terminated string."));
-
+ if (Filter.CheckCStringNotNullTerm) {
SmallString<120> buf;
llvm::raw_svector_ostream os(buf);
@@ -797,13 +817,8 @@ SVal CStringChecker::getCStringLength(CheckerContext &C, ProgramStateRef &state,
else
os << "not a null-terminated string";
- // Generate a report for this bug.
- auto report = llvm::make_unique<BugReport>(*BT_NotCString, os.str(), N);
-
- report->addRange(Ex->getSourceRange());
- C.emitReport(std::move(report));
+ emitNotCStringBug(C, state, Ex, os.str());
}
-
return UndefinedVal();
}
}
@@ -852,9 +867,10 @@ bool CStringChecker::IsFirstBufInBound(CheckerContext &C,
// Compute the offset of the last element to be accessed: size-1.
NonLoc One = svalBuilder.makeIntVal(1, sizeTy).castAs<NonLoc>();
- NonLoc LastOffset =
- svalBuilder.evalBinOpNN(state, BO_Sub, *Length, One, sizeTy)
- .castAs<NonLoc>();
+ SVal Offset = svalBuilder.evalBinOpNN(state, BO_Sub, *Length, One, sizeTy);
+ if (Offset.isUnknown())
+ return true; // cf top comment
+ NonLoc LastOffset = Offset.castAs<NonLoc>();
// Check that the first buffer is sufficiently long.
SVal BufStart = svalBuilder.evalCast(BufVal, PtrTy, FirstBuf->getType());
@@ -987,6 +1003,95 @@ bool CStringChecker::SummarizeRegion(raw_ostream &os, ASTContext &Ctx,
}
}
+bool CStringChecker::memsetAux(const Expr *DstBuffer, const Expr *CharE,
+ const Expr *Size, CheckerContext &C,
+ ProgramStateRef &State) {
+ SVal MemVal = C.getSVal(DstBuffer);
+ SVal CharVal = C.getSVal(CharE);
+ SVal SizeVal = C.getSVal(Size);
+ const MemRegion *MR = MemVal.getAsRegion();
+ if (!MR)
+ return false;
+
+ // We're about to model memset by producing a "default binding" in the Store.
+ // Our current implementation - RegionStore - doesn't support default bindings
+ // that don't cover the whole base region. So we should first get the offset
+ // and the base region to figure out whether the offset of buffer is 0.
+ RegionOffset Offset = MR->getAsOffset();
+ const MemRegion *BR = Offset.getRegion();
+
+ Optional<NonLoc> SizeNL = SizeVal.getAs<NonLoc>();
+ if (!SizeNL)
+ return false;
+
+ SValBuilder &svalBuilder = C.getSValBuilder();
+ ASTContext &Ctx = C.getASTContext();
+
+ // void *memset(void *dest, int ch, size_t count);
+ // For now we can only handle the case of offset is 0 and concrete char value.
+ if (Offset.isValid() && !Offset.hasSymbolicOffset() &&
+ Offset.getOffset() == 0) {
+ // Get the base region's extent.
+ auto *SubReg = cast<SubRegion>(BR);
+ DefinedOrUnknownSVal Extent = SubReg->getExtent(svalBuilder);
+
+ ProgramStateRef StateWholeReg, StateNotWholeReg;
+ std::tie(StateWholeReg, StateNotWholeReg) =
+ State->assume(svalBuilder.evalEQ(State, Extent, *SizeNL));
+
+ // With the semantic of 'memset()', we should convert the CharVal to
+ // unsigned char.
+ CharVal = svalBuilder.evalCast(CharVal, Ctx.UnsignedCharTy, Ctx.IntTy);
+
+ ProgramStateRef StateNullChar, StateNonNullChar;
+ std::tie(StateNullChar, StateNonNullChar) =
+ assumeZero(C, State, CharVal, Ctx.UnsignedCharTy);
+
+ if (StateWholeReg && !StateNotWholeReg && StateNullChar &&
+ !StateNonNullChar) {
+ // If the 'memset()' acts on the whole region of destination buffer and
+ // the value of the second argument of 'memset()' is zero, bind the second
+ // argument's value to the destination buffer with 'default binding'.
+ // FIXME: Since there is no perfect way to bind the non-zero character, we
+ // can only deal with zero value here. In the future, we need to deal with
+ // the binding of non-zero value in the case of whole region.
+ State = State->bindDefaultZero(svalBuilder.makeLoc(BR),
+ C.getLocationContext());
+ } else {
+ // If the destination buffer's extent is not equal to the value of
+ // third argument, just invalidate buffer.
+ State = InvalidateBuffer(C, State, DstBuffer, MemVal,
+ /*IsSourceBuffer*/ false, Size);
+ }
+
+ if (StateNullChar && !StateNonNullChar) {
+ // If the value of the second argument of 'memset()' is zero, set the
+ // string length of destination buffer to 0 directly.
+ State = setCStringLength(State, MR,
+ svalBuilder.makeZeroVal(Ctx.getSizeType()));
+ } else if (!StateNullChar && StateNonNullChar) {
+ SVal NewStrLen = svalBuilder.getMetadataSymbolVal(
+ CStringChecker::getTag(), MR, DstBuffer, Ctx.getSizeType(),
+ C.getLocationContext(), C.blockCount());
+
+ // If the value of second argument is not zero, then the string length
+ // is at least the size argument.
+ SVal NewStrLenGESize = svalBuilder.evalBinOp(
+ State, BO_GE, NewStrLen, SizeVal, svalBuilder.getConditionType());
+
+ State = setCStringLength(
+ State->assume(NewStrLenGESize.castAs<DefinedOrUnknownSVal>(), true),
+ MR, NewStrLen);
+ }
+ } else {
+ // If the offset is not zero and char value is not concrete, we can do
+ // nothing but invalidate the buffer.
+ State = InvalidateBuffer(C, State, DstBuffer, MemVal,
+ /*IsSourceBuffer*/ false, Size);
+ }
+ return true;
+}
+
//===----------------------------------------------------------------------===//
// evaluation of individual function calls.
//===----------------------------------------------------------------------===//
@@ -1384,6 +1489,18 @@ void CStringChecker::evalStpcpy(CheckerContext &C, const CallExpr *CE) const {
/* isAppending = */ false);
}
+void CStringChecker::evalStrlcpy(CheckerContext &C, const CallExpr *CE) const {
+ if (CE->getNumArgs() < 3)
+ return;
+
+ // char *strlcpy(char *dst, const char *src, size_t n);
+ evalStrcpyCommon(C, CE,
+ /* returnEnd = */ true,
+ /* isBounded = */ true,
+ /* isAppending = */ false,
+ /* returnPtr = */ false);
+}
+
void CStringChecker::evalStrcat(CheckerContext &C, const CallExpr *CE) const {
if (CE->getNumArgs() < 2)
return;
@@ -1406,9 +1523,21 @@ void CStringChecker::evalStrncat(CheckerContext &C, const CallExpr *CE) const {
/* isAppending = */ true);
}
+void CStringChecker::evalStrlcat(CheckerContext &C, const CallExpr *CE) const {
+ if (CE->getNumArgs() < 3)
+ return;
+
+ //char *strlcat(char *s1, const char *s2, size_t n);
+ evalStrcpyCommon(C, CE,
+ /* returnEnd = */ false,
+ /* isBounded = */ true,
+ /* isAppending = */ true,
+ /* returnPtr = */ false);
+}
+
void CStringChecker::evalStrcpyCommon(CheckerContext &C, const CallExpr *CE,
bool returnEnd, bool isBounded,
- bool isAppending) const {
+ bool isAppending, bool returnPtr) const {
CurrentFunctionDescription = "string copy function";
ProgramStateRef state = C.getState();
const LocationContext *LCtx = C.getLocationContext();
@@ -1446,6 +1575,11 @@ void CStringChecker::evalStrcpyCommon(CheckerContext &C, const CallExpr *CE,
SVal maxLastElementIndex = UnknownVal();
const char *boundWarning = nullptr;
+ state = CheckOverlap(C, state, isBounded ? CE->getArg(2) : CE->getArg(1), Dst, srcExpr);
+
+ if (!state)
+ return;
+
// If the function is strncpy, strncat, etc... it is bounded.
if (isBounded) {
// Get the max number of characters to copy.
@@ -1518,7 +1652,11 @@ void CStringChecker::evalStrcpyCommon(CheckerContext &C, const CallExpr *CE,
// If the size is known to be zero, we're done.
if (StateZeroSize && !StateNonZeroSize) {
- StateZeroSize = StateZeroSize->BindExpr(CE, LCtx, DstVal);
+ if (returnPtr) {
+ StateZeroSize = StateZeroSize->BindExpr(CE, LCtx, DstVal);
+ } else {
+ StateZeroSize = StateZeroSize->BindExpr(CE, LCtx, *lenValNL);
+ }
C.addTransition(StateZeroSize);
return;
}
@@ -1649,16 +1787,22 @@ void CStringChecker::evalStrcpyCommon(CheckerContext &C, const CallExpr *CE,
finalStrLength = amountCopied;
}
- // The final result of the function will either be a pointer past the last
- // copied element, or a pointer to the start of the destination buffer.
- SVal Result = (returnEnd ? UnknownVal() : DstVal);
+ SVal Result;
+
+ if (returnPtr) {
+ // The final result of the function will either be a pointer past the last
+ // copied element, or a pointer to the start of the destination buffer.
+ Result = (returnEnd ? UnknownVal() : DstVal);
+ } else {
+ Result = finalStrLength;
+ }
assert(state);
// If the destination is a MemRegion, try to check for a buffer overflow and
// record the new string length.
if (Optional<loc::MemRegionVal> dstRegVal =
- DstVal.getAs<loc::MemRegionVal>()) {
+ DstVal.getAs<loc::MemRegionVal>()) {
QualType ptrTy = Dst->getType();
// If we have an exact value on a bounded copy, use that to check for
@@ -1666,9 +1810,9 @@ void CStringChecker::evalStrcpyCommon(CheckerContext &C, const CallExpr *CE,
if (boundWarning) {
if (Optional<NonLoc> maxLastNL = maxLastElementIndex.getAs<NonLoc>()) {
SVal maxLastElement = svalBuilder.evalBinOpLN(state, BO_Add, *dstRegVal,
- *maxLastNL, ptrTy);
+ *maxLastNL, ptrTy);
state = CheckLocation(C, state, CE->getArg(2), maxLastElement,
- boundWarning);
+ boundWarning);
if (!state)
return;
}
@@ -1677,7 +1821,7 @@ void CStringChecker::evalStrcpyCommon(CheckerContext &C, const CallExpr *CE,
// Then, if the final length is known...
if (Optional<NonLoc> knownStrLength = finalStrLength.getAs<NonLoc>()) {
SVal lastElement = svalBuilder.evalBinOpLN(state, BO_Add, *dstRegVal,
- *knownStrLength, ptrTy);
+ *knownStrLength, ptrTy);
// ...and we haven't checked the bound, we'll check the actual copy.
if (!boundWarning) {
@@ -1689,7 +1833,7 @@ void CStringChecker::evalStrcpyCommon(CheckerContext &C, const CallExpr *CE,
}
// If this is a stpcpy-style copy, the last element is the return value.
- if (returnEnd)
+ if (returnPtr && returnEnd)
Result = lastElement;
}
@@ -1701,12 +1845,12 @@ void CStringChecker::evalStrcpyCommon(CheckerContext &C, const CallExpr *CE,
// This would probably remove any existing bindings past the end of the
// string, but that's still an improvement over blank invalidation.
state = InvalidateBuffer(C, state, Dst, *dstRegVal,
- /*IsSourceBuffer*/false, nullptr);
+ /*IsSourceBuffer*/false, nullptr);
// Invalidate the source (const-invalidation without const-pointer-escaping
// the address of the top-level region).
state = InvalidateBuffer(C, state, srcExpr, srcVal, /*IsSourceBuffer*/true,
- nullptr);
+ nullptr);
// Set the C string length of the destination, if we know it.
if (isBounded && !isAppending) {
@@ -1722,12 +1866,13 @@ void CStringChecker::evalStrcpyCommon(CheckerContext &C, const CallExpr *CE,
assert(state);
- // If this is a stpcpy-style copy, but we were unable to check for a buffer
- // overflow, we still need a result. Conjure a return value.
- if (returnEnd && Result.isUnknown()) {
- Result = svalBuilder.conjureSymbolVal(nullptr, CE, LCtx, C.blockCount());
+ if (returnPtr) {
+ // If this is a stpcpy-style copy, but we were unable to check for a buffer
+ // overflow, we still need a result. Conjure a return value.
+ if (returnEnd && Result.isUnknown()) {
+ Result = svalBuilder.conjureSymbolVal(nullptr, CE, LCtx, C.blockCount());
+ }
}
-
// Set the return value.
state = state->BindExpr(CE, LCtx, Result);
C.addTransition(state);
@@ -1750,7 +1895,7 @@ void CStringChecker::evalStrncmp(CheckerContext &C, const CallExpr *CE) const {
}
void CStringChecker::evalStrcasecmp(CheckerContext &C,
- const CallExpr *CE) const {
+ const CallExpr *CE) const {
if (CE->getNumArgs() < 2)
return;
@@ -1759,7 +1904,7 @@ void CStringChecker::evalStrcasecmp(CheckerContext &C,
}
void CStringChecker::evalStrncasecmp(CheckerContext &C,
- const CallExpr *CE) const {
+ const CallExpr *CE) const {
if (CE->getNumArgs() < 3)
return;
@@ -1768,7 +1913,7 @@ void CStringChecker::evalStrncasecmp(CheckerContext &C,
}
void CStringChecker::evalStrcmpCommon(CheckerContext &C, const CallExpr *CE,
- bool isBounded, bool ignoreCase) const {
+ bool isBounded, bool ignoreCase) const {
CurrentFunctionDescription = "string comparison function";
ProgramStateRef state = C.getState();
const LocationContext *LCtx = C.getLocationContext();
@@ -1813,7 +1958,7 @@ void CStringChecker::evalStrcmpCommon(CheckerContext &C, const CallExpr *CE,
// and we only need to check one size.
if (StSameBuf) {
StSameBuf = StSameBuf->BindExpr(CE, LCtx,
- svalBuilder.makeZeroVal(CE->getType()));
+ svalBuilder.makeZeroVal(CE->getType()));
C.addTransition(StSameBuf);
// If the two arguments are GUARANTEED to be the same, we're done!
@@ -1832,7 +1977,7 @@ void CStringChecker::evalStrcmpCommon(CheckerContext &C, const CallExpr *CE,
const StringLiteral *s2StrLiteral = getCStringLiteral(C, state, s2, s2Val);
bool canComputeResult = false;
SVal resultVal = svalBuilder.conjureSymbolVal(nullptr, CE, LCtx,
- C.blockCount());
+ C.blockCount());
if (s1StrLiteral && s2StrLiteral) {
StringRef s1StrRef = s1StrLiteral->getString();
@@ -1867,7 +2012,7 @@ void CStringChecker::evalStrcmpCommon(CheckerContext &C, const CallExpr *CE,
// Use StringRef's comparison methods to compute the actual result.
int compareRes = ignoreCase ? s1StrRef.compare_lower(s2StrRef)
- : s1StrRef.compare(s2StrRef);
+ : s1StrRef.compare(s2StrRef);
// The strcmp function returns an integer greater than, equal to, or less
// than zero, [c11, p7.24.4.2].
@@ -1881,7 +2026,7 @@ void CStringChecker::evalStrcmpCommon(CheckerContext &C, const CallExpr *CE,
BinaryOperatorKind op = (compareRes == 1) ? BO_GT : BO_LT;
SVal compareWithZero =
svalBuilder.evalBinOp(state, op, resultVal, zeroVal,
- svalBuilder.getConditionType());
+ svalBuilder.getConditionType());
DefinedSVal compareWithZeroVal = compareWithZero.castAs<DefinedSVal>();
state = state->assume(compareWithZeroVal, true);
}
@@ -1933,17 +2078,17 @@ void CStringChecker::evalStrsep(CheckerContext &C, const CallExpr *CE) const {
// Invalidate the search string, representing the change of one delimiter
// character to NUL.
State = InvalidateBuffer(C, State, SearchStrPtr, Result,
- /*IsSourceBuffer*/false, nullptr);
+ /*IsSourceBuffer*/false, nullptr);
// Overwrite the search string pointer. The new value is either an address
// further along in the same string, or NULL if there are no more tokens.
State = State->bindLoc(*SearchStrLoc,
- SVB.conjureSymbolVal(getTag(),
- CE,
- LCtx,
- CharPtrTy,
- C.blockCount()),
- LCtx);
+ SVB.conjureSymbolVal(getTag(),
+ CE,
+ LCtx,
+ CharPtrTy,
+ C.blockCount()),
+ LCtx);
} else {
assert(SearchStrVal.isUnknown());
// Conjure a symbolic value. It's the best we can do.
@@ -1961,12 +2106,12 @@ void CStringChecker::evalStdCopy(CheckerContext &C, const CallExpr *CE) const {
}
void CStringChecker::evalStdCopyBackward(CheckerContext &C,
- const CallExpr *CE) const {
+ const CallExpr *CE) const {
evalStdCopyCommon(C, CE);
}
void CStringChecker::evalStdCopyCommon(CheckerContext &C,
- const CallExpr *CE) const {
+ const CallExpr *CE) const {
if (CE->getNumArgs() < 3)
return;
@@ -1983,7 +2128,7 @@ void CStringChecker::evalStdCopyCommon(CheckerContext &C,
const Expr *Dst = CE->getArg(2);
SVal DstVal = State->getSVal(Dst, LCtx);
State = InvalidateBuffer(C, State, Dst, DstVal, /*IsSource=*/false,
- /*Size=*/nullptr);
+ /*Size=*/nullptr);
SValBuilder &SVB = C.getSValBuilder();
@@ -2000,6 +2145,7 @@ void CStringChecker::evalMemset(CheckerContext &C, const CallExpr *CE) const {
CurrentFunctionDescription = "memory set function";
const Expr *Mem = CE->getArg(0);
+ const Expr *CharE = CE->getArg(1);
const Expr *Size = CE->getArg(2);
ProgramStateRef State = C.getState();
@@ -2032,9 +2178,11 @@ void CStringChecker::evalMemset(CheckerContext &C, const CallExpr *CE) const {
State = CheckBufferAccess(C, State, Size, Mem);
if (!State)
return;
- State = InvalidateBuffer(C, State, Mem, C.getSVal(Mem),
- /*IsSourceBuffer*/false, Size);
- if (!State)
+
+ // According to the values of the arguments, bind the value of the second
+ // argument to the destination buffer and set string length, or just
+ // invalidate the destination buffer.
+ if (!memsetAux(Mem, CharE, Size, C, State))
return;
State = State->BindExpr(CE, LCtx, MemVal);
@@ -2082,10 +2230,14 @@ bool CStringChecker::evalCall(const CallExpr *CE, CheckerContext &C) const {
evalFunction = &CStringChecker::evalStrncpy;
else if (C.isCLibraryFunction(FDecl, "stpcpy"))
evalFunction = &CStringChecker::evalStpcpy;
+ else if (C.isCLibraryFunction(FDecl, "strlcpy"))
+ evalFunction = &CStringChecker::evalStrlcpy;
else if (C.isCLibraryFunction(FDecl, "strcat"))
evalFunction = &CStringChecker::evalStrcat;
else if (C.isCLibraryFunction(FDecl, "strncat"))
evalFunction = &CStringChecker::evalStrncat;
+ else if (C.isCLibraryFunction(FDecl, "strlcat"))
+ evalFunction = &CStringChecker::evalStrlcat;
else if (C.isCLibraryFunction(FDecl, "strlen"))
evalFunction = &CStringChecker::evalstrLength;
else if (C.isCLibraryFunction(FDecl, "strnlen"))
@@ -2149,10 +2301,10 @@ void CStringChecker::checkPreStmt(const DeclStmt *DS, CheckerContext &C) const {
if (!MR)
continue;
- SVal StrVal = state->getSVal(Init, C.getLocationContext());
+ SVal StrVal = C.getSVal(Init);
assert(StrVal.isValid() && "Initializer string is unknown or undefined");
DefinedOrUnknownSVal strLength =
- getCStringLength(C, state, Init, StrVal).castAs<DefinedOrUnknownSVal>();
+ getCStringLength(C, state, Init, StrVal).castAs<DefinedOrUnknownSVal>();
state = state->set<CStringLength>(MR, strLength);
}
@@ -2162,11 +2314,11 @@ void CStringChecker::checkPreStmt(const DeclStmt *DS, CheckerContext &C) const {
ProgramStateRef
CStringChecker::checkRegionChanges(ProgramStateRef state,
- const InvalidatedSymbols *,
- ArrayRef<const MemRegion *> ExplicitRegions,
- ArrayRef<const MemRegion *> Regions,
- const LocationContext *LCtx,
- const CallEvent *Call) const {
+ const InvalidatedSymbols *,
+ ArrayRef<const MemRegion *> ExplicitRegions,
+ ArrayRef<const MemRegion *> Regions,
+ const LocationContext *LCtx,
+ const CallEvent *Call) const {
CStringLengthTy Entries = state->get<CStringLength>();
if (Entries.isEmpty())
return state;
@@ -2176,7 +2328,7 @@ CStringChecker::checkRegionChanges(ProgramStateRef state,
// First build sets for the changed regions and their super-regions.
for (ArrayRef<const MemRegion *>::iterator
- I = Regions.begin(), E = Regions.end(); I != E; ++I) {
+ I = Regions.begin(), E = Regions.end(); I != E; ++I) {
const MemRegion *MR = *I;
Invalidated.insert(MR);
@@ -2191,7 +2343,7 @@ CStringChecker::checkRegionChanges(ProgramStateRef state,
// Then loop over the entries in the current state.
for (CStringLengthTy::iterator I = Entries.begin(),
- E = Entries.end(); I != E; ++I) {
+ E = Entries.end(); I != E; ++I) {
const MemRegion *MR = I.getKey();
// Is this entry for a super-region of a changed region?
@@ -2215,22 +2367,22 @@ CStringChecker::checkRegionChanges(ProgramStateRef state,
}
void CStringChecker::checkLiveSymbols(ProgramStateRef state,
- SymbolReaper &SR) const {
+ SymbolReaper &SR) const {
// Mark all symbols in our string length map as valid.
CStringLengthTy Entries = state->get<CStringLength>();
for (CStringLengthTy::iterator I = Entries.begin(), E = Entries.end();
- I != E; ++I) {
+ I != E; ++I) {
SVal Len = I.getData();
for (SymExpr::symbol_iterator si = Len.symbol_begin(),
- se = Len.symbol_end(); si != se; ++si)
+ se = Len.symbol_end(); si != se; ++si)
SR.markInUse(*si);
}
}
void CStringChecker::checkDeadSymbols(SymbolReaper &SR,
- CheckerContext &C) const {
+ CheckerContext &C) const {
if (!SR.hasDeadSymbols())
return;
@@ -2241,7 +2393,7 @@ void CStringChecker::checkDeadSymbols(SymbolReaper &SR,
CStringLengthTy::Factory &F = state->get_context<CStringLength>();
for (CStringLengthTy::iterator I = Entries.begin(), E = Entries.end();
- I != E; ++I) {
+ I != E; ++I) {
SVal Len = I.getData();
if (SymbolRef Sym = Len.getAsSymbol()) {
if (SR.isDead(Sym))
@@ -2260,11 +2412,11 @@ void CStringChecker::checkDeadSymbols(SymbolReaper &SR,
checker->Filter.CheckName##name = mgr.getCurrentCheckName(); \
}
-REGISTER_CHECKER(CStringNullArg)
-REGISTER_CHECKER(CStringOutOfBounds)
-REGISTER_CHECKER(CStringBufferOverlap)
+ REGISTER_CHECKER(CStringNullArg)
+ REGISTER_CHECKER(CStringOutOfBounds)
+ REGISTER_CHECKER(CStringBufferOverlap)
REGISTER_CHECKER(CStringNotNullTerm)
-void ento::registerCStringCheckerBasic(CheckerManager &Mgr) {
- registerCStringNullArg(Mgr);
-}
+ void ento::registerCStringCheckerBasic(CheckerManager &Mgr) {
+ Mgr.registerChecker<CStringChecker>();
+ }
diff --git a/lib/StaticAnalyzer/Checkers/CStringSyntaxChecker.cpp b/lib/StaticAnalyzer/Checkers/CStringSyntaxChecker.cpp
index 4b5e97b69295..8b4aa857e775 100644
--- a/lib/StaticAnalyzer/Checkers/CStringSyntaxChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/CStringSyntaxChecker.cpp
@@ -80,6 +80,18 @@ class WalkAST: public StmtVisitor<WalkAST> {
/// of bytes to copy.
bool containsBadStrncatPattern(const CallExpr *CE);
+ /// Identify erroneous patterns in the last argument to strlcpy - the number
+ /// of bytes to copy.
+ /// The bad pattern checked is when the size is known
+ /// to be larger than the destination can handle.
+ /// char dst[2];
+ /// size_t cpy = 4;
+ /// strlcpy(dst, "abcd", sizeof("abcd") - 1);
+ /// strlcpy(dst, "abcd", 4);
+ /// strlcpy(dst + 3, "abcd", 2);
+ /// strlcpy(dst, "abcd", cpy);
+ bool containsBadStrlcpyPattern(const CallExpr *CE);
+
public:
WalkAST(const CheckerBase *Checker, BugReporter &BR, AnalysisDeclContext *AC)
: Checker(Checker), BR(BR), AC(AC) {}
@@ -130,6 +142,54 @@ bool WalkAST::containsBadStrncatPattern(const CallExpr *CE) {
return false;
}
+bool WalkAST::containsBadStrlcpyPattern(const CallExpr *CE) {
+ if (CE->getNumArgs() != 3)
+ return false;
+ const Expr *DstArg = CE->getArg(0);
+ const Expr *LenArg = CE->getArg(2);
+
+ const auto *DstArgDecl = dyn_cast<DeclRefExpr>(DstArg->IgnoreParenImpCasts());
+ const auto *LenArgDecl = dyn_cast<DeclRefExpr>(LenArg->IgnoreParenLValueCasts());
+ uint64_t DstOff = 0;
+ // - size_t dstlen = sizeof(dst)
+ if (LenArgDecl) {
+ const auto *LenArgVal = dyn_cast<VarDecl>(LenArgDecl->getDecl());
+ if (LenArgVal->getInit())
+ LenArg = LenArgVal->getInit();
+ }
+
+ // - integral value
+ // We try to figure out if the last argument is possibly longer
+ // than the destination can possibly handle if its size can be defined.
+ if (const auto *IL = dyn_cast<IntegerLiteral>(LenArg->IgnoreParenImpCasts())) {
+ uint64_t ILRawVal = IL->getValue().getZExtValue();
+
+ // Case when there is pointer arithmetic on the destination buffer
+ // especially when we offset from the base decreasing the
+ // buffer length accordingly.
+ if (!DstArgDecl) {
+ if (const auto *BE = dyn_cast<BinaryOperator>(DstArg->IgnoreParenImpCasts())) {
+ DstArgDecl = dyn_cast<DeclRefExpr>(BE->getLHS()->IgnoreParenImpCasts());
+ if (BE->getOpcode() == BO_Add) {
+ if ((IL = dyn_cast<IntegerLiteral>(BE->getRHS()->IgnoreParenImpCasts()))) {
+ DstOff = IL->getValue().getZExtValue();
+ }
+ }
+ }
+ }
+ if (DstArgDecl) {
+ if (const auto *Buffer = dyn_cast<ConstantArrayType>(DstArgDecl->getType())) {
+ ASTContext &C = BR.getContext();
+ uint64_t BufferLen = C.getTypeSize(Buffer) / 8;
+ if ((BufferLen - DstOff) < ILRawVal)
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
void WalkAST::VisitCallExpr(CallExpr *CE) {
const FunctionDecl *FD = CE->getDirectCallee();
if (!FD)
@@ -159,6 +219,25 @@ void WalkAST::VisitCallExpr(CallExpr *CE) {
"C String API", os.str(), Loc,
LenArg->getSourceRange());
}
+ } else if (CheckerContext::isCLibraryFunction(FD, "strlcpy")) {
+ if (containsBadStrlcpyPattern(CE)) {
+ const Expr *DstArg = CE->getArg(0);
+ const Expr *LenArg = CE->getArg(2);
+ PathDiagnosticLocation Loc =
+ PathDiagnosticLocation::createBegin(LenArg, BR.getSourceManager(), AC);
+
+ StringRef DstName = getPrintableName(DstArg);
+
+ SmallString<256> S;
+ llvm::raw_svector_ostream os(S);
+ os << "The third argument is larger than the size of the input buffer. ";
+ if (!DstName.empty())
+ os << "Replace with the value 'sizeof(" << DstName << ")` or lower";
+
+ BR.EmitBasicReport(FD, Checker, "Anti-pattern in the argument",
+ "C String API", os.str(), Loc,
+ LenArg->getSourceRange());
+ }
}
// Recurse and check children.
diff --git a/lib/StaticAnalyzer/Checkers/CXXSelfAssignmentChecker.cpp b/lib/StaticAnalyzer/Checkers/CXXSelfAssignmentChecker.cpp
index 668e772fe1b3..d1d37c75dfcc 100644
--- a/lib/StaticAnalyzer/Checkers/CXXSelfAssignmentChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/CXXSelfAssignmentChecker.cpp
@@ -48,7 +48,7 @@ void CXXSelfAssignmentChecker::checkBeginFunction(CheckerContext &C) const {
auto &State = C.getState();
auto &SVB = C.getSValBuilder();
auto ThisVal =
- State->getSVal(SVB.getCXXThis(MD, LCtx->getCurrentStackFrame()));
+ State->getSVal(SVB.getCXXThis(MD, LCtx->getStackFrame()));
auto Param = SVB.makeLoc(State->getRegion(MD->getParamDecl(0), LCtx));
auto ParamVal = State->getSVal(Param);
ProgramStateRef SelfAssignState = State->bindLoc(Param, ThisVal, LCtx);
diff --git a/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp b/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp
index 3e178152d925..059553b21995 100644
--- a/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp
@@ -101,7 +101,7 @@ void CastSizeChecker::checkPreStmt(const CastExpr *CE,CheckerContext &C) const {
return;
ProgramStateRef state = C.getState();
- const MemRegion *R = state->getSVal(E, C.getLocationContext()).getAsRegion();
+ const MemRegion *R = C.getSVal(E).getAsRegion();
if (!R)
return;
diff --git a/lib/StaticAnalyzer/Checkers/CastToStructChecker.cpp b/lib/StaticAnalyzer/Checkers/CastToStructChecker.cpp
index 65e81315f095..00e903355720 100644
--- a/lib/StaticAnalyzer/Checkers/CastToStructChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/CastToStructChecker.cpp
@@ -78,7 +78,7 @@ bool CastToStructVisitor::VisitCastExpr(const CastExpr *CE) {
// Don't warn for references
const ValueDecl *VD = nullptr;
if (const auto *SE = dyn_cast<DeclRefExpr>(U->getSubExpr()))
- VD = dyn_cast<ValueDecl>(SE->getDecl());
+ VD = SE->getDecl();
else if (const auto *SE = dyn_cast<MemberExpr>(U->getSubExpr()))
VD = SE->getMemberDecl();
if (!VD || VD->getType()->isReferenceType())
diff --git a/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp b/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp
index 2818c9d9fd4a..f4d2e32cef11 100644
--- a/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp
+++ b/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp
@@ -126,7 +126,7 @@ public:
const CallEvent *Call,
PointerEscapeKind Kind) const;
void checkPreStmt(const ReturnStmt *RS, CheckerContext &C) const;
- void checkEndFunction(CheckerContext &Ctx) const;
+ void checkEndFunction(const ReturnStmt *RS, CheckerContext &Ctx) const;
private:
void diagnoseMissingReleases(CheckerContext &C) const;
@@ -398,7 +398,7 @@ void ObjCDeallocChecker::checkPostObjCMessage(
/// Check for missing releases even when -dealloc does not call
/// '[super dealloc]'.
void ObjCDeallocChecker::checkEndFunction(
- CheckerContext &C) const {
+ const ReturnStmt *RS, CheckerContext &C) const {
diagnoseMissingReleases(C);
}
@@ -535,7 +535,7 @@ void ObjCDeallocChecker::diagnoseMissingReleases(CheckerContext &C) const {
continue;
// Prevents diagnosing multiple times for the same instance variable
- // at, for example, both a return and at the end of of the function.
+ // at, for example, both a return and at the end of the function.
NewUnreleased = F.remove(NewUnreleased, IvarSymbol);
if (State->getStateManager()
@@ -645,7 +645,7 @@ ObjCDeallocChecker::findPropertyOnDeallocatingInstance(
bool ObjCDeallocChecker::diagnoseExtraRelease(SymbolRef ReleasedValue,
const ObjCMethodCall &M,
CheckerContext &C) const {
- // Try to get the region from which the the released value was loaded.
+ // Try to get the region from which the released value was loaded.
// Note that, unlike diagnosing for missing releases, here we don't track
// values that must not be released in the state. This is because even if
// these values escape, it is still an error under the rules of MRR to
diff --git a/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp b/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp
index 6dbacad7f2ea..202233acffab 100644
--- a/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp
+++ b/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp
@@ -37,6 +37,9 @@ static bool isArc4RandomAvailable(const ASTContext &Ctx) {
namespace {
struct ChecksFilter {
+ DefaultBool check_bcmp;
+ DefaultBool check_bcopy;
+ DefaultBool check_bzero;
DefaultBool check_gets;
DefaultBool check_getpw;
DefaultBool check_mktemp;
@@ -47,6 +50,9 @@ struct ChecksFilter {
DefaultBool check_FloatLoopCounter;
DefaultBool check_UncheckedReturn;
+ CheckName checkName_bcmp;
+ CheckName checkName_bcopy;
+ CheckName checkName_bzero;
CheckName checkName_gets;
CheckName checkName_getpw;
CheckName checkName_mktemp;
@@ -89,6 +95,9 @@ public:
// Checker-specific methods.
void checkLoopConditionForFloat(const ForStmt *FS);
+ void checkCall_bcmp(const CallExpr *CE, const FunctionDecl *FD);
+ void checkCall_bcopy(const CallExpr *CE, const FunctionDecl *FD);
+ void checkCall_bzero(const CallExpr *CE, const FunctionDecl *FD);
void checkCall_gets(const CallExpr *CE, const FunctionDecl *FD);
void checkCall_getpw(const CallExpr *CE, const FunctionDecl *FD);
void checkCall_mktemp(const CallExpr *CE, const FunctionDecl *FD);
@@ -129,6 +138,9 @@ void WalkAST::VisitCallExpr(CallExpr *CE) {
// Set the evaluation function by switching on the callee name.
FnCheck evalFunction = llvm::StringSwitch<FnCheck>(Name)
+ .Case("bcmp", &WalkAST::checkCall_bcmp)
+ .Case("bcopy", &WalkAST::checkCall_bcopy)
+ .Case("bzero", &WalkAST::checkCall_bzero)
.Case("gets", &WalkAST::checkCall_gets)
.Case("getpw", &WalkAST::checkCall_getpw)
.Case("mktemp", &WalkAST::checkCall_mktemp)
@@ -296,6 +308,132 @@ void WalkAST::checkLoopConditionForFloat(const ForStmt *FS) {
}
//===----------------------------------------------------------------------===//
+// Check: Any use of bcmp.
+// CWE-477: Use of Obsolete Functions
+// bcmp was deprecated in POSIX.1-2008
+//===----------------------------------------------------------------------===//
+
+void WalkAST::checkCall_bcmp(const CallExpr *CE, const FunctionDecl *FD) {
+ if (!filter.check_bcmp)
+ return;
+
+ const FunctionProtoType *FPT = FD->getType()->getAs<FunctionProtoType>();
+ if (!FPT)
+ return;
+
+ // Verify that the function takes three arguments.
+ if (FPT->getNumParams() != 3)
+ return;
+
+ for (int i = 0; i < 2; i++) {
+ // Verify the first and second argument type is void*.
+ const PointerType *PT = FPT->getParamType(i)->getAs<PointerType>();
+ if (!PT)
+ return;
+
+ if (PT->getPointeeType().getUnqualifiedType() != BR.getContext().VoidTy)
+ return;
+ }
+
+ // Verify the third argument type is integer.
+ if (!FPT->getParamType(2)->isIntegralOrUnscopedEnumerationType())
+ return;
+
+ // Issue a warning.
+ PathDiagnosticLocation CELoc =
+ PathDiagnosticLocation::createBegin(CE, BR.getSourceManager(), AC);
+ BR.EmitBasicReport(AC->getDecl(), filter.checkName_bcmp,
+ "Use of deprecated function in call to 'bcmp()'",
+ "Security",
+ "The bcmp() function is obsoleted by memcmp().",
+ CELoc, CE->getCallee()->getSourceRange());
+}
+
+//===----------------------------------------------------------------------===//
+// Check: Any use of bcopy.
+// CWE-477: Use of Obsolete Functions
+// bcopy was deprecated in POSIX.1-2008
+//===----------------------------------------------------------------------===//
+
+void WalkAST::checkCall_bcopy(const CallExpr *CE, const FunctionDecl *FD) {
+ if (!filter.check_bcopy)
+ return;
+
+ const FunctionProtoType *FPT = FD->getType()->getAs<FunctionProtoType>();
+ if (!FPT)
+ return;
+
+ // Verify that the function takes three arguments.
+ if (FPT->getNumParams() != 3)
+ return;
+
+ for (int i = 0; i < 2; i++) {
+ // Verify the first and second argument type is void*.
+ const PointerType *PT = FPT->getParamType(i)->getAs<PointerType>();
+ if (!PT)
+ return;
+
+ if (PT->getPointeeType().getUnqualifiedType() != BR.getContext().VoidTy)
+ return;
+ }
+
+ // Verify the third argument type is integer.
+ if (!FPT->getParamType(2)->isIntegralOrUnscopedEnumerationType())
+ return;
+
+ // Issue a warning.
+ PathDiagnosticLocation CELoc =
+ PathDiagnosticLocation::createBegin(CE, BR.getSourceManager(), AC);
+ BR.EmitBasicReport(AC->getDecl(), filter.checkName_bcopy,
+ "Use of deprecated function in call to 'bcopy()'",
+ "Security",
+ "The bcopy() function is obsoleted by memcpy() "
+ "or memmove().",
+ CELoc, CE->getCallee()->getSourceRange());
+}
+
+//===----------------------------------------------------------------------===//
+// Check: Any use of bzero.
+// CWE-477: Use of Obsolete Functions
+// bzero was deprecated in POSIX.1-2008
+//===----------------------------------------------------------------------===//
+
+void WalkAST::checkCall_bzero(const CallExpr *CE, const FunctionDecl *FD) {
+ if (!filter.check_bzero)
+ return;
+
+ const FunctionProtoType *FPT = FD->getType()->getAs<FunctionProtoType>();
+ if (!FPT)
+ return;
+
+ // Verify that the function takes two arguments.
+ if (FPT->getNumParams() != 2)
+ return;
+
+ // Verify the first argument type is void*.
+ const PointerType *PT = FPT->getParamType(0)->getAs<PointerType>();
+ if (!PT)
+ return;
+
+ if (PT->getPointeeType().getUnqualifiedType() != BR.getContext().VoidTy)
+ return;
+
+ // Verify the second argument type is integer.
+ if (!FPT->getParamType(1)->isIntegralOrUnscopedEnumerationType())
+ return;
+
+ // Issue a warning.
+ PathDiagnosticLocation CELoc =
+ PathDiagnosticLocation::createBegin(CE, BR.getSourceManager(), AC);
+ BR.EmitBasicReport(AC->getDecl(), filter.checkName_bzero,
+ "Use of deprecated function in call to 'bzero()'",
+ "Security",
+ "The bzero() function is obsoleted by memset().",
+ CELoc, CE->getCallee()->getSourceRange());
+}
+
+
+//===----------------------------------------------------------------------===//
// Check: Any use of 'gets' is insecure.
// Originally: <rdar://problem/6335715>
// Implements (part of): 300-BSI (buildsecurityin.us-cert.gov)
@@ -510,6 +648,17 @@ void WalkAST::checkCall_strcpy(const CallExpr *CE, const FunctionDecl *FD) {
if (!checkCall_strCommon(CE, FD))
return;
+ const auto *Target = CE->getArg(0)->IgnoreImpCasts(),
+ *Source = CE->getArg(1)->IgnoreImpCasts();
+ if (const auto *DeclRef = dyn_cast<DeclRefExpr>(Target))
+ if (const auto *Array = dyn_cast<ConstantArrayType>(DeclRef->getType())) {
+ uint64_t ArraySize = BR.getContext().getTypeSize(Array) / 8;
+ if (const auto *String = dyn_cast<StringLiteral>(Source)) {
+ if (ArraySize >= String->getLength() + 1)
+ return;
+ }
+ }
+
// Issue a warning.
PathDiagnosticLocation CELoc =
PathDiagnosticLocation::createBegin(CE, BR.getSourceManager(), AC);
@@ -764,6 +913,9 @@ public:
checker->filter.checkName_##name = mgr.getCurrentCheckName(); \
}
+REGISTER_CHECKER(bcmp)
+REGISTER_CHECKER(bcopy)
+REGISTER_CHECKER(bzero)
REGISTER_CHECKER(gets)
REGISTER_CHECKER(getpw)
REGISTER_CHECKER(mkstemp)
diff --git a/lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp b/lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp
index 95b6c4d3775d..7862a4c25681 100644
--- a/lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp
+++ b/lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp
@@ -42,6 +42,7 @@ class CheckerDocumentation : public Checker< check::PreStmt<ReturnStmt>,
check::PreCall,
check::PostCall,
check::BranchCondition,
+ check::NewAllocator,
check::Location,
check::Bind,
check::DeadSymbols,
@@ -58,7 +59,7 @@ class CheckerDocumentation : public Checker< check::PreStmt<ReturnStmt>,
check::Event<ImplicitNullDerefEvent>,
check::ASTDecl<FunctionDecl> > {
public:
- /// \brief Pre-visit the Statement.
+ /// Pre-visit the Statement.
///
/// The method will be called before the analyzer core processes the
/// statement. The notification is performed for every explored CFGElement,
@@ -71,7 +72,7 @@ public:
/// check::PreStmt<ReturnStmt>
void checkPreStmt(const ReturnStmt *DS, CheckerContext &C) const {}
- /// \brief Post-visit the Statement.
+ /// Post-visit the Statement.
///
/// The method will be called after the analyzer core processes the
/// statement. The notification is performed for every explored CFGElement,
@@ -81,7 +82,7 @@ public:
/// check::PostStmt<DeclStmt>
void checkPostStmt(const DeclStmt *DS, CheckerContext &C) const;
- /// \brief Pre-visit the Objective C message.
+ /// Pre-visit the Objective C message.
///
/// This will be called before the analyzer core processes the method call.
/// This is called for any action which produces an Objective-C message send,
@@ -90,13 +91,13 @@ public:
/// check::PreObjCMessage
void checkPreObjCMessage(const ObjCMethodCall &M, CheckerContext &C) const {}
- /// \brief Post-visit the Objective C message.
+ /// Post-visit the Objective C message.
/// \sa checkPreObjCMessage()
///
/// check::PostObjCMessage
void checkPostObjCMessage(const ObjCMethodCall &M, CheckerContext &C) const {}
- /// \brief Visit an Objective-C message whose receiver is nil.
+ /// Visit an Objective-C message whose receiver is nil.
///
/// This will be called when the analyzer core processes a method call whose
/// receiver is definitely nil. In this case, check{Pre/Post}ObjCMessage and
@@ -105,7 +106,7 @@ public:
/// check::ObjCMessageNil
void checkObjCMessageNil(const ObjCMethodCall &M, CheckerContext &C) const {}
- /// \brief Pre-visit an abstract "call" event.
+ /// Pre-visit an abstract "call" event.
///
/// This is used for checkers that want to check arguments or attributed
/// behavior for functions and methods no matter how they are being invoked.
@@ -117,16 +118,32 @@ public:
/// check::PreCall
void checkPreCall(const CallEvent &Call, CheckerContext &C) const {}
- /// \brief Post-visit an abstract "call" event.
+ /// Post-visit an abstract "call" event.
/// \sa checkPreObjCMessage()
///
/// check::PostCall
void checkPostCall(const CallEvent &Call, CheckerContext &C) const {}
- /// \brief Pre-visit of the condition statement of a branch (such as IfStmt).
+ /// Pre-visit of the condition statement of a branch (such as IfStmt).
void checkBranchCondition(const Stmt *Condition, CheckerContext &Ctx) const {}
- /// \brief Called on a load from and a store to a location.
+ /// Post-visit the C++ operator new's allocation call.
+ ///
+ /// Execution of C++ operator new consists of the following phases: (1) call
+ /// default or overridden operator new() to allocate memory (2) cast the
+ /// return value of operator new() from void pointer type to class pointer
+ /// type, (3) assuming that the value is non-null, call the object's
+ /// constructor over this pointer, (4) declare that the value of the
+ /// new-expression is this pointer. This callback is called between steps
+ /// (2) and (3). Post-call for the allocator is called after step (1).
+ /// Pre-statement for the new-expression is called on step (4) when the value
+ /// of the expression is evaluated.
+ /// \param NE The C++ new-expression that triggered the allocation.
+ /// \param Target The allocated region, casted to the class type.
+ void checkNewAllocator(const CXXNewExpr *NE, SVal Target,
+ CheckerContext &) const {}
+
+ /// Called on a load from and a store to a location.
///
/// The method will be called each time a location (pointer) value is
/// accessed.
@@ -138,7 +155,7 @@ public:
void checkLocation(SVal Loc, bool IsLoad, const Stmt *S,
CheckerContext &) const {}
- /// \brief Called on binding of a value to a location.
+ /// Called on binding of a value to a location.
///
/// \param Loc The value of the location (pointer).
/// \param Val The value which will be stored at the location Loc.
@@ -147,7 +164,7 @@ public:
/// check::Bind
void checkBind(SVal Loc, SVal Val, const Stmt *S, CheckerContext &) const {}
- /// \brief Called whenever a symbol becomes dead.
+ /// Called whenever a symbol becomes dead.
///
/// This callback should be used by the checkers to aggressively clean
/// up/reduce the checker state, which is important for reducing the overall
@@ -164,20 +181,20 @@ public:
void checkDeadSymbols(SymbolReaper &SR, CheckerContext &C) const {}
- /// \brief Called when the analyzer core starts analyzing a function,
+ /// Called when the analyzer core starts analyzing a function,
/// regardless of whether it is analyzed at the top level or is inlined.
///
/// check::BeginFunction
void checkBeginFunction(CheckerContext &Ctx) const {}
- /// \brief Called when the analyzer core reaches the end of a
+ /// Called when the analyzer core reaches the end of a
/// function being analyzed regardless of whether it is analyzed at the top
/// level or is inlined.
///
/// check::EndFunction
- void checkEndFunction(CheckerContext &Ctx) const {}
+ void checkEndFunction(const ReturnStmt *RS, CheckerContext &Ctx) const {}
- /// \brief Called after all the paths in the ExplodedGraph reach end of path
+ /// Called after all the paths in the ExplodedGraph reach end of path
/// - the symbolic execution graph is fully explored.
///
/// This callback should be used in cases when a checker needs to have a
@@ -190,14 +207,14 @@ public:
BugReporter &BR,
ExprEngine &Eng) const {}
- /// \brief Called after analysis of a TranslationUnit is complete.
+ /// Called after analysis of a TranslationUnit is complete.
///
/// check::EndOfTranslationUnit
void checkEndOfTranslationUnit(const TranslationUnitDecl *TU,
AnalysisManager &Mgr,
BugReporter &BR) const {}
- /// \brief Evaluates function call.
+ /// Evaluates function call.
///
/// The analysis core threats all function calls in the same way. However, some
/// functions have special meaning, which should be reflected in the program
@@ -212,7 +229,7 @@ public:
/// eval::Call
bool evalCall(const CallExpr *CE, CheckerContext &C) const { return true; }
- /// \brief Handles assumptions on symbolic values.
+ /// Handles assumptions on symbolic values.
///
/// This method is called when a symbolic expression is assumed to be true or
/// false. For example, the assumptions are performed when evaluating a
@@ -231,7 +248,7 @@ public:
/// check::LiveSymbols
void checkLiveSymbols(ProgramStateRef State, SymbolReaper &SR) const {}
- /// \brief Called when the contents of one or more regions change.
+ /// Called when the contents of one or more regions change.
///
/// This can occur in many different ways: an explicit bind, a blanket
/// invalidation of the region contents, or by passing a region to a function
@@ -263,7 +280,7 @@ public:
return State;
}
- /// \brief Called when pointers escape.
+ /// Called when pointers escape.
///
/// This notifies the checkers about pointer escape, which occurs whenever
/// the analyzer cannot track the symbol any more. For example, as a
@@ -283,7 +300,7 @@ public:
return State;
}
- /// \brief Called when const pointers escape.
+ /// Called when const pointers escape.
///
/// Note: in most cases checkPointerEscape callback is sufficient.
/// \sa checkPointerEscape
@@ -297,7 +314,7 @@ public:
/// check::Event<ImplicitNullDerefEvent>
void checkEvent(ImplicitNullDerefEvent Event) const {}
- /// \brief Check every declaration in the AST.
+ /// Check every declaration in the AST.
///
/// An AST traversal callback, which should only be used when the checker is
/// not path sensitive. It will be called for every Declaration in the AST and
diff --git a/lib/StaticAnalyzer/Checkers/ChrootChecker.cpp b/lib/StaticAnalyzer/Checkers/ChrootChecker.cpp
index 9e9939ae25c0..b38992b0e030 100644
--- a/lib/StaticAnalyzer/Checkers/ChrootChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/ChrootChecker.cpp
@@ -105,7 +105,7 @@ void ChrootChecker::Chdir(CheckerContext &C, const CallExpr *CE) const {
// After chdir("/"), enter the jail, set the enum value JAIL_ENTERED.
const Expr *ArgExpr = CE->getArg(0);
- SVal ArgVal = state->getSVal(ArgExpr, C.getLocationContext());
+ SVal ArgVal = C.getSVal(ArgExpr);
if (const MemRegion *R = ArgVal.getAsRegion()) {
R = R->StripCasts();
@@ -132,7 +132,7 @@ void ChrootChecker::checkPreStmt(const CallExpr *CE, CheckerContext &C) const {
if (!II_chdir)
II_chdir = &Ctx.Idents.get("chdir");
- // Ingnore chroot and chdir.
+ // Ignore chroot and chdir.
if (FD->getIdentifier() == II_chroot || FD->getIdentifier() == II_chdir)
return;
diff --git a/lib/StaticAnalyzer/Checkers/DeleteWithNonVirtualDtorChecker.cpp b/lib/StaticAnalyzer/Checkers/DeleteWithNonVirtualDtorChecker.cpp
index e04e2ab2c320..d3489282ab62 100644
--- a/lib/StaticAnalyzer/Checkers/DeleteWithNonVirtualDtorChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/DeleteWithNonVirtualDtorChecker.cpp
@@ -39,7 +39,7 @@ class DeleteWithNonVirtualDtorChecker
: public Checker<check::PreStmt<CXXDeleteExpr>> {
mutable std::unique_ptr<BugType> BT;
- class DeleteBugVisitor : public BugReporterVisitorImpl<DeleteBugVisitor> {
+ class DeleteBugVisitor : public BugReporterVisitor {
public:
DeleteBugVisitor() : Satisfied(false) {}
void Profile(llvm::FoldingSetNodeID &ID) const override {
@@ -110,8 +110,6 @@ DeleteWithNonVirtualDtorChecker::DeleteBugVisitor::VisitNode(
if (Satisfied)
return nullptr;
- ProgramStateRef State = N->getState();
- const LocationContext *LC = N->getLocationContext();
const Stmt *S = PathDiagnosticLocation::getStmt(N);
if (!S)
return nullptr;
@@ -128,7 +126,7 @@ DeleteWithNonVirtualDtorChecker::DeleteBugVisitor::VisitNode(
}
// Region associated with the current cast expression.
- const MemRegion *M = State->getSVal(CastE, LC).getAsRegion();
+ const MemRegion *M = N->getSVal(CastE).getAsRegion();
if (!M)
return nullptr;
diff --git a/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp b/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp
index 598502305633..bc39c92ea970 100644
--- a/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp
@@ -24,22 +24,23 @@ using namespace ento;
namespace {
class DivZeroChecker : public Checker< check::PreStmt<BinaryOperator> > {
mutable std::unique_ptr<BuiltinBug> BT;
- void reportBug(const char *Msg,
- ProgramStateRef StateZero,
- CheckerContext &C) const ;
+ void reportBug(const char *Msg, ProgramStateRef StateZero, CheckerContext &C,
+ std::unique_ptr<BugReporterVisitor> Visitor = nullptr) const;
+
public:
void checkPreStmt(const BinaryOperator *B, CheckerContext &C) const;
};
} // end anonymous namespace
-void DivZeroChecker::reportBug(const char *Msg,
- ProgramStateRef StateZero,
- CheckerContext &C) const {
+void DivZeroChecker::reportBug(
+ const char *Msg, ProgramStateRef StateZero, CheckerContext &C,
+ std::unique_ptr<BugReporterVisitor> Visitor) const {
if (ExplodedNode *N = C.generateErrorNode(StateZero)) {
if (!BT)
BT.reset(new BuiltinBug(this, "Division by zero"));
auto R = llvm::make_unique<BugReport>(*BT, Msg, N);
+ R->addVisitor(std::move(Visitor));
bugreporter::trackNullOrUndefValue(N, bugreporter::GetDenomExpr(N), *R);
C.emitReport(std::move(R));
}
@@ -57,7 +58,7 @@ void DivZeroChecker::checkPreStmt(const BinaryOperator *B,
if (!B->getRHS()->getType()->isScalarType())
return;
- SVal Denom = C.getState()->getSVal(B->getRHS(), C.getLocationContext());
+ SVal Denom = C.getSVal(B->getRHS());
Optional<DefinedSVal> DV = Denom.getAs<DefinedSVal>();
// Divide-by-undefined handled in the generic checking for uses of
@@ -78,7 +79,8 @@ void DivZeroChecker::checkPreStmt(const BinaryOperator *B,
bool TaintedD = C.getState()->isTainted(*DV);
if ((stateNotZero && stateZero && TaintedD)) {
- reportBug("Division by a tainted value, possibly zero", stateZero, C);
+ reportBug("Division by a tainted value, possibly zero", stateZero, C,
+ llvm::make_unique<TaintBugVisitor>(*DV));
return;
}
diff --git a/lib/StaticAnalyzer/Checkers/DynamicTypeChecker.cpp b/lib/StaticAnalyzer/Checkers/DynamicTypeChecker.cpp
index 109897be2931..4e4d81cd6714 100644
--- a/lib/StaticAnalyzer/Checkers/DynamicTypeChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/DynamicTypeChecker.cpp
@@ -38,8 +38,7 @@ class DynamicTypeChecker : public Checker<check::PostStmt<ImplicitCastExpr>> {
new BugType(this, "Dynamic and static type mismatch", "Type Error"));
}
- class DynamicTypeBugVisitor
- : public BugReporterVisitorImpl<DynamicTypeBugVisitor> {
+ class DynamicTypeBugVisitor : public BugReporterVisitor {
public:
DynamicTypeBugVisitor(const MemRegion *Reg) : Reg(Reg) {}
diff --git a/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp b/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp
index db9179e018a1..126e57645a43 100644
--- a/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp
+++ b/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp
@@ -59,7 +59,7 @@ class DynamicTypePropagation:
const ObjCObjectType *getObjectTypeForAllocAndNew(const ObjCMessageExpr *MsgE,
CheckerContext &C) const;
- /// \brief Return a better dynamic type if one can be derived from the cast.
+ /// Return a better dynamic type if one can be derived from the cast.
const ObjCObjectPointerType *getBetterObjCType(const Expr *CastE,
CheckerContext &C) const;
@@ -74,7 +74,7 @@ class DynamicTypePropagation:
new BugType(this, "Generics", categories::CoreFoundationObjectiveC));
}
- class GenericsBugVisitor : public BugReporterVisitorImpl<GenericsBugVisitor> {
+ class GenericsBugVisitor : public BugReporterVisitor {
public:
GenericsBugVisitor(SymbolRef S) : Sym(S) {}
@@ -562,7 +562,7 @@ void DynamicTypePropagation::checkPostStmt(const CastExpr *CE,
DestObjectPtrType->isUnspecialized())
return;
- SymbolRef Sym = State->getSVal(CE, C.getLocationContext()).getAsSymbol();
+ SymbolRef Sym = C.getSVal(CE).getAsSymbol();
if (!Sym)
return;
@@ -631,7 +631,7 @@ static const Expr *stripCastsAndSugar(const Expr *E) {
}
static bool isObjCTypeParamDependent(QualType Type) {
- // It is illegal to typedef parameterized types inside an interface. Therfore
+ // It is illegal to typedef parameterized types inside an interface. Therefore
// an Objective-C type can only be dependent on a type parameter when the type
// parameter structurally present in the type itself.
class IsObjCTypeParamDependentTypeVisitor
diff --git a/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp b/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp
index 0005ec470d20..8de653c10f7e 100644
--- a/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp
@@ -149,7 +149,7 @@ void ExprInspectionChecker::analyzerEval(const CallExpr *CE,
// A specific instantiation of an inlined function may have more constrained
// values than can generally be assumed. Skip the check.
- if (LC->getCurrentStackFrame()->getParent() != nullptr)
+ if (LC->getStackFrame()->getParent() != nullptr)
return;
reportBug(getArgumentValueString(CE, C), C);
@@ -178,7 +178,7 @@ void ExprInspectionChecker::analyzerCheckInlined(const CallExpr *CE,
// when we are analyzing it as an inlined function. This means that
// clang_analyzer_checkInlined(true) should always print TRUE, but
// clang_analyzer_checkInlined(false) should never actually print anything.
- if (LC->getCurrentStackFrame()->getParent() == nullptr)
+ if (LC->getStackFrame()->getParent() == nullptr)
return;
reportBug(getArgumentValueString(CE, C), C);
diff --git a/lib/StaticAnalyzer/Checkers/FixedAddressChecker.cpp b/lib/StaticAnalyzer/Checkers/FixedAddressChecker.cpp
index 3fe89f96a43b..059203fca730 100644
--- a/lib/StaticAnalyzer/Checkers/FixedAddressChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/FixedAddressChecker.cpp
@@ -44,8 +44,7 @@ void FixedAddressChecker::checkPreStmt(const BinaryOperator *B,
if (!T->isPointerType())
return;
- ProgramStateRef state = C.getState();
- SVal RV = state->getSVal(B->getRHS(), C.getLocationContext());
+ SVal RV = C.getSVal(B->getRHS());
if (!RV.isConstant() || RV.isZeroConstant())
return;
diff --git a/lib/StaticAnalyzer/Checkers/GCDAntipatternChecker.cpp b/lib/StaticAnalyzer/Checkers/GCDAntipatternChecker.cpp
new file mode 100644
index 000000000000..5cb51b01f044
--- /dev/null
+++ b/lib/StaticAnalyzer/Checkers/GCDAntipatternChecker.cpp
@@ -0,0 +1,229 @@
+//===- GCDAntipatternChecker.cpp ---------------------------------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines GCDAntipatternChecker which checks against a common
+// antipattern when synchronous API is emulated from asynchronous callbacks
+// using a semaphore:
+//
+// dispatch_semaphore_t sema = dispatch_semaphore_create(0);
+//
+// AnyCFunctionCall(^{
+// // code…
+// dispatch_semaphore_signal(sema);
+// })
+// dispatch_semaphore_wait(sema, *)
+//
+// Such code is a common performance problem, due to inability of GCD to
+// properly handle QoS when a combination of queues and semaphores is used.
+// Good code would either use asynchronous API (when available), or perform
+// the necessary action in asynchronous callback.
+//
+// Currently, the check is performed using a simple heuristical AST pattern
+// matching.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/ASTMatchers/ASTMatchFinder.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+#include "llvm/Support/Debug.h"
+
+using namespace clang;
+using namespace ento;
+using namespace ast_matchers;
+
+namespace {
+
+// ID of a node at which the diagnostic would be emitted.
+const char *WarnAtNode = "waitcall";
+
+class GCDAntipatternChecker : public Checker<check::ASTCodeBody> {
+public:
+ void checkASTCodeBody(const Decl *D,
+ AnalysisManager &AM,
+ BugReporter &BR) const;
+};
+
+auto callsName(const char *FunctionName)
+ -> decltype(callee(functionDecl())) {
+ return callee(functionDecl(hasName(FunctionName)));
+}
+
+auto equalsBoundArgDecl(int ArgIdx, const char *DeclName)
+ -> decltype(hasArgument(0, expr())) {
+ return hasArgument(ArgIdx, ignoringParenCasts(declRefExpr(
+ to(varDecl(equalsBoundNode(DeclName))))));
+}
+
+auto bindAssignmentToDecl(const char *DeclName) -> decltype(hasLHS(expr())) {
+ return hasLHS(ignoringParenImpCasts(
+ declRefExpr(to(varDecl().bind(DeclName)))));
+}
+
+/// The pattern is very common in tests, and it is OK to use it there.
+/// We have to heuristics for detecting tests: method name starts with "test"
+/// (used in XCTest), and a class name contains "mock" or "test" (used in
+/// helpers which are not tests themselves, but used exclusively in tests).
+static bool isTest(const Decl *D) {
+ if (const auto* ND = dyn_cast<NamedDecl>(D)) {
+ std::string DeclName = ND->getNameAsString();
+ if (StringRef(DeclName).startswith("test"))
+ return true;
+ }
+ if (const auto *OD = dyn_cast<ObjCMethodDecl>(D)) {
+ if (const auto *CD = dyn_cast<ObjCContainerDecl>(OD->getParent())) {
+ std::string ContainerName = CD->getNameAsString();
+ StringRef CN(ContainerName);
+ if (CN.contains_lower("test") || CN.contains_lower("mock"))
+ return true;
+ }
+ }
+ return false;
+}
+
+static auto findGCDAntiPatternWithSemaphore() -> decltype(compoundStmt()) {
+
+ const char *SemaphoreBinding = "semaphore_name";
+ auto SemaphoreCreateM = callExpr(allOf(
+ callsName("dispatch_semaphore_create"),
+ hasArgument(0, ignoringParenCasts(integerLiteral(equals(0))))));
+
+ auto SemaphoreBindingM = anyOf(
+ forEachDescendant(
+ varDecl(hasDescendant(SemaphoreCreateM)).bind(SemaphoreBinding)),
+ forEachDescendant(binaryOperator(bindAssignmentToDecl(SemaphoreBinding),
+ hasRHS(SemaphoreCreateM))));
+
+ auto HasBlockArgumentM = hasAnyArgument(hasType(
+ hasCanonicalType(blockPointerType())
+ ));
+
+ auto ArgCallsSignalM = hasAnyArgument(stmt(hasDescendant(callExpr(
+ allOf(
+ callsName("dispatch_semaphore_signal"),
+ equalsBoundArgDecl(0, SemaphoreBinding)
+ )))));
+
+ auto HasBlockAndCallsSignalM = allOf(HasBlockArgumentM, ArgCallsSignalM);
+
+ auto HasBlockCallingSignalM =
+ forEachDescendant(
+ stmt(anyOf(
+ callExpr(HasBlockAndCallsSignalM),
+ objcMessageExpr(HasBlockAndCallsSignalM)
+ )));
+
+ auto SemaphoreWaitM = forEachDescendant(
+ callExpr(
+ allOf(
+ callsName("dispatch_semaphore_wait"),
+ equalsBoundArgDecl(0, SemaphoreBinding)
+ )
+ ).bind(WarnAtNode));
+
+ return compoundStmt(
+ SemaphoreBindingM, HasBlockCallingSignalM, SemaphoreWaitM);
+}
+
+static auto findGCDAntiPatternWithGroup() -> decltype(compoundStmt()) {
+
+ const char *GroupBinding = "group_name";
+ auto DispatchGroupCreateM = callExpr(callsName("dispatch_group_create"));
+
+ auto GroupBindingM = anyOf(
+ forEachDescendant(
+ varDecl(hasDescendant(DispatchGroupCreateM)).bind(GroupBinding)),
+ forEachDescendant(binaryOperator(bindAssignmentToDecl(GroupBinding),
+ hasRHS(DispatchGroupCreateM))));
+
+ auto GroupEnterM = forEachDescendant(
+ stmt(callExpr(allOf(callsName("dispatch_group_enter"),
+ equalsBoundArgDecl(0, GroupBinding)))));
+
+ auto HasBlockArgumentM = hasAnyArgument(hasType(
+ hasCanonicalType(blockPointerType())
+ ));
+
+ auto ArgCallsSignalM = hasAnyArgument(stmt(hasDescendant(callExpr(
+ allOf(
+ callsName("dispatch_group_leave"),
+ equalsBoundArgDecl(0, GroupBinding)
+ )))));
+
+ auto HasBlockAndCallsLeaveM = allOf(HasBlockArgumentM, ArgCallsSignalM);
+
+ auto AcceptsBlockM =
+ forEachDescendant(
+ stmt(anyOf(
+ callExpr(HasBlockAndCallsLeaveM),
+ objcMessageExpr(HasBlockAndCallsLeaveM)
+ )));
+
+ auto GroupWaitM = forEachDescendant(
+ callExpr(
+ allOf(
+ callsName("dispatch_group_wait"),
+ equalsBoundArgDecl(0, GroupBinding)
+ )
+ ).bind(WarnAtNode));
+
+ return compoundStmt(GroupBindingM, GroupEnterM, AcceptsBlockM, GroupWaitM);
+}
+
+static void emitDiagnostics(const BoundNodes &Nodes,
+ const char* Type,
+ BugReporter &BR,
+ AnalysisDeclContext *ADC,
+ const GCDAntipatternChecker *Checker) {
+ const auto *SW = Nodes.getNodeAs<CallExpr>(WarnAtNode);
+ assert(SW);
+
+ std::string Diagnostics;
+ llvm::raw_string_ostream OS(Diagnostics);
+ OS << "Waiting on a callback using a " << Type << " creates useless threads "
+ << "and is subject to priority inversion; consider "
+ << "using a synchronous API or changing the caller to be asynchronous";
+
+ BR.EmitBasicReport(
+ ADC->getDecl(),
+ Checker,
+ /*Name=*/"GCD performance anti-pattern",
+ /*Category=*/"Performance",
+ OS.str(),
+ PathDiagnosticLocation::createBegin(SW, BR.getSourceManager(), ADC),
+ SW->getSourceRange());
+}
+
+void GCDAntipatternChecker::checkASTCodeBody(const Decl *D,
+ AnalysisManager &AM,
+ BugReporter &BR) const {
+ if (isTest(D))
+ return;
+
+ AnalysisDeclContext *ADC = AM.getAnalysisDeclContext(D);
+
+ auto SemaphoreMatcherM = findGCDAntiPatternWithSemaphore();
+ auto Matches = match(SemaphoreMatcherM, *D->getBody(), AM.getASTContext());
+ for (BoundNodes Match : Matches)
+ emitDiagnostics(Match, "semaphore", BR, ADC, this);
+
+ auto GroupMatcherM = findGCDAntiPatternWithGroup();
+ Matches = match(GroupMatcherM, *D->getBody(), AM.getASTContext());
+ for (BoundNodes Match : Matches)
+ emitDiagnostics(Match, "group", BR, ADC, this);
+}
+
+}
+
+void ento::registerGCDAntipattern(CheckerManager &Mgr) {
+ Mgr.registerChecker<GCDAntipatternChecker>();
+}
diff --git a/lib/StaticAnalyzer/Checkers/GTestChecker.cpp b/lib/StaticAnalyzer/Checkers/GTestChecker.cpp
index f0be41b293e4..3ef95e673b87 100644
--- a/lib/StaticAnalyzer/Checkers/GTestChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/GTestChecker.cpp
@@ -161,7 +161,7 @@ void GTestChecker::modelAssertionResultCopyConstructor(
const CXXConstructorCall *Call, CheckerContext &C) const {
assert(Call->getNumArgs() == 1);
- // The first parameter of the the copy constructor must be the other
+ // The first parameter of the copy constructor must be the other
// instance to initialize this instances fields from.
SVal OtherVal = Call->getArgSVal(0);
SVal ThisVal = Call->getCXXThisVal();
diff --git a/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp b/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp
index 43966656cd8d..899586745a0b 100644
--- a/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp
@@ -48,24 +48,24 @@ private:
BT.reset(new BugType(this, "Use of Untrusted Data", "Untrusted Data"));
}
- /// \brief Catch taint related bugs. Check if tainted data is passed to a
+ /// Catch taint related bugs. Check if tainted data is passed to a
/// system call etc.
bool checkPre(const CallExpr *CE, CheckerContext &C) const;
- /// \brief Add taint sources on a pre-visit.
+ /// Add taint sources on a pre-visit.
void addSourcesPre(const CallExpr *CE, CheckerContext &C) const;
- /// \brief Propagate taint generated at pre-visit.
+ /// Propagate taint generated at pre-visit.
bool propagateFromPre(const CallExpr *CE, CheckerContext &C) const;
- /// \brief Add taint sources on a post visit.
+ /// Add taint sources on a post visit.
void addSourcesPost(const CallExpr *CE, CheckerContext &C) const;
/// Check if the region the expression evaluates to is the standard input,
/// and thus, is tainted.
static bool isStdin(const Expr *E, CheckerContext &C);
- /// \brief Given a pointer argument, return the value it points to.
+ /// Given a pointer argument, return the value it points to.
static Optional<SVal> getPointedToSVal(CheckerContext &C, const Expr *Arg);
/// Functions defining the attack surface.
@@ -100,26 +100,9 @@ private:
bool generateReportIfTainted(const Expr *E, const char Msg[],
CheckerContext &C) const;
- /// The bug visitor prints a diagnostic message at the location where a given
- /// variable was tainted.
- class TaintBugVisitor
- : public BugReporterVisitorImpl<TaintBugVisitor> {
- private:
- const SVal V;
-
- public:
- TaintBugVisitor(const SVal V) : V(V) {}
- void Profile(llvm::FoldingSetNodeID &ID) const override { ID.Add(V); }
-
- std::shared_ptr<PathDiagnosticPiece> VisitNode(const ExplodedNode *N,
- const ExplodedNode *PrevN,
- BugReporterContext &BRC,
- BugReport &BR) override;
- };
-
typedef SmallVector<unsigned, 2> ArgVector;
- /// \brief A struct used to specify taint propagation rules for a function.
+ /// A struct used to specify taint propagation rules for a function.
///
/// If any of the possible taint source arguments is tainted, all of the
/// destination arguments should also be tainted. Use InvalidArgIndex in the
@@ -183,7 +166,7 @@ private:
return (V && State->isTainted(*V));
}
- /// \brief Pre-process a function which propagates taint according to the
+ /// Pre-process a function which propagates taint according to the
/// taint rule.
ProgramStateRef process(const CallExpr *CE, CheckerContext &C) const;
@@ -214,28 +197,6 @@ const char GenericTaintChecker::MsgTaintedBufferSize[] =
/// points to data, which should be tainted on return.
REGISTER_SET_WITH_PROGRAMSTATE(TaintArgsOnPostVisit, unsigned)
-std::shared_ptr<PathDiagnosticPiece>
-GenericTaintChecker::TaintBugVisitor::VisitNode(const ExplodedNode *N,
- const ExplodedNode *PrevN, BugReporterContext &BRC, BugReport &BR) {
-
- // Find the ExplodedNode where the taint was first introduced
- if (!N->getState()->isTainted(V) || PrevN->getState()->isTainted(V))
- return nullptr;
-
- const Stmt *S = PathDiagnosticLocation::getStmt(N);
- if (!S)
- return nullptr;
-
- const LocationContext *NCtx = N->getLocationContext();
- PathDiagnosticLocation L =
- PathDiagnosticLocation::createBegin(S, BRC.getSourceManager(), NCtx);
- if (!L.isValid() || !L.asLocation().isValid())
- return nullptr;
-
- return std::make_shared<PathDiagnosticEventPiece>(
- L, "Taint originated here");
-}
-
GenericTaintChecker::TaintPropagationRule
GenericTaintChecker::TaintPropagationRule::getTaintPropagationRule(
const FunctionDecl *FDecl,
@@ -468,7 +429,7 @@ bool GenericTaintChecker::checkPre(const CallExpr *CE, CheckerContext &C) const{
Optional<SVal> GenericTaintChecker::getPointedToSVal(CheckerContext &C,
const Expr *Arg) {
ProgramStateRef State = C.getState();
- SVal AddrVal = State->getSVal(Arg->IgnoreParens(), C.getLocationContext());
+ SVal AddrVal = C.getSVal(Arg->IgnoreParens());
if (AddrVal.isUnknownOrUndef())
return None;
@@ -621,7 +582,7 @@ ProgramStateRef GenericTaintChecker::postRetTaint(const CallExpr *CE,
bool GenericTaintChecker::isStdin(const Expr *E, CheckerContext &C) {
ProgramStateRef State = C.getState();
- SVal Val = State->getSVal(E, C.getLocationContext());
+ SVal Val = C.getSVal(E);
// stdin is a pointer, so it would be a region.
const MemRegion *MemReg = Val.getAsRegion();
@@ -646,7 +607,8 @@ bool GenericTaintChecker::isStdin(const Expr *E, CheckerContext &C) {
if ((D->getName().find("stdin") != StringRef::npos) && D->isExternC())
if (const PointerType * PtrTy =
dyn_cast<PointerType>(D->getType().getTypePtr()))
- if (PtrTy->getPointeeType() == C.getASTContext().getFILEType())
+ if (PtrTy->getPointeeType().getCanonicalType() ==
+ C.getASTContext().getFILEType().getCanonicalType())
return true;
}
return false;
diff --git a/lib/StaticAnalyzer/Checkers/IdenticalExprChecker.cpp b/lib/StaticAnalyzer/Checkers/IdenticalExprChecker.cpp
index cf57b8dca063..f102ca96a5c1 100644
--- a/lib/StaticAnalyzer/Checkers/IdenticalExprChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/IdenticalExprChecker.cpp
@@ -8,7 +8,7 @@
//===----------------------------------------------------------------------===//
///
/// \file
-/// \brief This defines IdenticalExprChecker, a check that warns about
+/// This defines IdenticalExprChecker, a check that warns about
/// unintended use of identical expressions.
///
/// It checks for use of identical expressions with comparison operators and
@@ -296,7 +296,7 @@ bool FindIdenticalExprVisitor::VisitConditionalOperator(
return true;
}
-/// \brief Determines whether two statement trees are identical regarding
+/// Determines whether two statement trees are identical regarding
/// operators and symbols.
///
/// Exceptions: expressions containing macros or functions with possible side
diff --git a/lib/StaticAnalyzer/Checkers/InnerPointerChecker.cpp b/lib/StaticAnalyzer/Checkers/InnerPointerChecker.cpp
new file mode 100644
index 000000000000..ed877ab34518
--- /dev/null
+++ b/lib/StaticAnalyzer/Checkers/InnerPointerChecker.cpp
@@ -0,0 +1,252 @@
+//=== InnerPointerChecker.cpp -------------------------------------*- C++ -*--//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a check that marks a raw pointer to a C++ container's
+// inner buffer released when the object is destroyed. This information can
+// be used by MallocChecker to detect use-after-free problems.
+//
+//===----------------------------------------------------------------------===//
+
+#include "AllocationState.h"
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/CommonBugCategories.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+
+using namespace clang;
+using namespace ento;
+
+using PtrSet = llvm::ImmutableSet<SymbolRef>;
+
+// Associate container objects with a set of raw pointer symbols.
+REGISTER_MAP_WITH_PROGRAMSTATE(RawPtrMap, const MemRegion *, PtrSet)
+
+// This is a trick to gain access to PtrSet's Factory.
+namespace clang {
+namespace ento {
+template <>
+struct ProgramStateTrait<PtrSet> : public ProgramStatePartialTrait<PtrSet> {
+ static void *GDMIndex() {
+ static int Index = 0;
+ return &Index;
+ }
+};
+} // end namespace ento
+} // end namespace clang
+
+namespace {
+
+class InnerPointerChecker
+ : public Checker<check::DeadSymbols, check::PostCall> {
+
+ CallDescription AppendFn, AssignFn, ClearFn, CStrFn, DataFn, EraseFn,
+ InsertFn, PopBackFn, PushBackFn, ReplaceFn, ReserveFn, ResizeFn,
+ ShrinkToFitFn, SwapFn;
+
+public:
+ class InnerPointerBRVisitor : public BugReporterVisitor {
+ SymbolRef PtrToBuf;
+
+ public:
+ InnerPointerBRVisitor(SymbolRef Sym) : PtrToBuf(Sym) {}
+
+ static void *getTag() {
+ static int Tag = 0;
+ return &Tag;
+ }
+
+ void Profile(llvm::FoldingSetNodeID &ID) const override {
+ ID.AddPointer(getTag());
+ }
+
+ std::shared_ptr<PathDiagnosticPiece> VisitNode(const ExplodedNode *N,
+ const ExplodedNode *PrevN,
+ BugReporterContext &BRC,
+ BugReport &BR) override;
+
+ // FIXME: Scan the map once in the visitor's constructor and do a direct
+ // lookup by region.
+ bool isSymbolTracked(ProgramStateRef State, SymbolRef Sym) {
+ RawPtrMapTy Map = State->get<RawPtrMap>();
+ for (const auto Entry : Map) {
+ if (Entry.second.contains(Sym))
+ return true;
+ }
+ return false;
+ }
+ };
+
+ InnerPointerChecker()
+ : AppendFn("append"), AssignFn("assign"), ClearFn("clear"),
+ CStrFn("c_str"), DataFn("data"), EraseFn("erase"), InsertFn("insert"),
+ PopBackFn("pop_back"), PushBackFn("push_back"), ReplaceFn("replace"),
+ ReserveFn("reserve"), ResizeFn("resize"),
+ ShrinkToFitFn("shrink_to_fit"), SwapFn("swap") {}
+
+ /// Check whether the function called on the container object is a
+ /// member function that potentially invalidates pointers referring
+ /// to the objects's internal buffer.
+ bool mayInvalidateBuffer(const CallEvent &Call) const;
+
+ /// Record the connection between the symbol returned by c_str() and the
+ /// corresponding string object region in the ProgramState. Mark the symbol
+ /// released if the string object is destroyed.
+ void checkPostCall(const CallEvent &Call, CheckerContext &C) const;
+
+ /// Clean up the ProgramState map.
+ void checkDeadSymbols(SymbolReaper &SymReaper, CheckerContext &C) const;
+};
+
+} // end anonymous namespace
+
+// [string.require]
+//
+// "References, pointers, and iterators referring to the elements of a
+// basic_string sequence may be invalidated by the following uses of that
+// basic_string object:
+//
+// -- TODO: As an argument to any standard library function taking a reference
+// to non-const basic_string as an argument. For example, as an argument to
+// non-member functions swap(), operator>>(), and getline(), or as an argument
+// to basic_string::swap().
+//
+// -- Calling non-const member functions, except operator[], at, front, back,
+// begin, rbegin, end, and rend."
+//
+bool InnerPointerChecker::mayInvalidateBuffer(const CallEvent &Call) const {
+ if (const auto *MemOpCall = dyn_cast<CXXMemberOperatorCall>(&Call)) {
+ OverloadedOperatorKind Opc = MemOpCall->getOriginExpr()->getOperator();
+ if (Opc == OO_Equal || Opc == OO_PlusEqual)
+ return true;
+ return false;
+ }
+ return (isa<CXXDestructorCall>(Call) || Call.isCalled(AppendFn) ||
+ Call.isCalled(AssignFn) || Call.isCalled(ClearFn) ||
+ Call.isCalled(EraseFn) || Call.isCalled(InsertFn) ||
+ Call.isCalled(PopBackFn) || Call.isCalled(PushBackFn) ||
+ Call.isCalled(ReplaceFn) || Call.isCalled(ReserveFn) ||
+ Call.isCalled(ResizeFn) || Call.isCalled(ShrinkToFitFn) ||
+ Call.isCalled(SwapFn));
+}
+
+void InnerPointerChecker::checkPostCall(const CallEvent &Call,
+ CheckerContext &C) const {
+ const auto *ICall = dyn_cast<CXXInstanceCall>(&Call);
+ if (!ICall)
+ return;
+
+ SVal Obj = ICall->getCXXThisVal();
+ const auto *ObjRegion = dyn_cast_or_null<TypedValueRegion>(Obj.getAsRegion());
+ if (!ObjRegion)
+ return;
+
+ auto *TypeDecl = ObjRegion->getValueType()->getAsCXXRecordDecl();
+ if (TypeDecl->getName() != "basic_string")
+ return;
+
+ ProgramStateRef State = C.getState();
+
+ if (Call.isCalled(CStrFn) || Call.isCalled(DataFn)) {
+ SVal RawPtr = Call.getReturnValue();
+ if (SymbolRef Sym = RawPtr.getAsSymbol(/*IncludeBaseRegions=*/true)) {
+ // Start tracking this raw pointer by adding it to the set of symbols
+ // associated with this container object in the program state map.
+ PtrSet::Factory &F = State->getStateManager().get_context<PtrSet>();
+ const PtrSet *SetPtr = State->get<RawPtrMap>(ObjRegion);
+ PtrSet Set = SetPtr ? *SetPtr : F.getEmptySet();
+ assert(C.wasInlined || !Set.contains(Sym));
+ Set = F.add(Set, Sym);
+ State = State->set<RawPtrMap>(ObjRegion, Set);
+ C.addTransition(State);
+ }
+ return;
+ }
+
+ if (mayInvalidateBuffer(Call)) {
+ if (const PtrSet *PS = State->get<RawPtrMap>(ObjRegion)) {
+ // Mark all pointer symbols associated with the deleted object released.
+ const Expr *Origin = Call.getOriginExpr();
+ for (const auto Symbol : *PS) {
+ // NOTE: `Origin` may be null, and will be stored so in the symbol's
+ // `RefState` in MallocChecker's `RegionState` program state map.
+ State = allocation_state::markReleased(State, Symbol, Origin);
+ }
+ State = State->remove<RawPtrMap>(ObjRegion);
+ C.addTransition(State);
+ return;
+ }
+ }
+}
+
+void InnerPointerChecker::checkDeadSymbols(SymbolReaper &SymReaper,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ PtrSet::Factory &F = State->getStateManager().get_context<PtrSet>();
+ RawPtrMapTy RPM = State->get<RawPtrMap>();
+ for (const auto Entry : RPM) {
+ if (!SymReaper.isLiveRegion(Entry.first)) {
+ // Due to incomplete destructor support, some dead regions might
+ // remain in the program state map. Clean them up.
+ State = State->remove<RawPtrMap>(Entry.first);
+ }
+ if (const PtrSet *OldSet = State->get<RawPtrMap>(Entry.first)) {
+ PtrSet CleanedUpSet = *OldSet;
+ for (const auto Symbol : Entry.second) {
+ if (!SymReaper.isLive(Symbol))
+ CleanedUpSet = F.remove(CleanedUpSet, Symbol);
+ }
+ State = CleanedUpSet.isEmpty()
+ ? State->remove<RawPtrMap>(Entry.first)
+ : State->set<RawPtrMap>(Entry.first, CleanedUpSet);
+ }
+ }
+ C.addTransition(State);
+}
+
+std::shared_ptr<PathDiagnosticPiece>
+InnerPointerChecker::InnerPointerBRVisitor::VisitNode(const ExplodedNode *N,
+ const ExplodedNode *PrevN,
+ BugReporterContext &BRC,
+ BugReport &BR) {
+
+ if (!isSymbolTracked(N->getState(), PtrToBuf) ||
+ isSymbolTracked(PrevN->getState(), PtrToBuf))
+ return nullptr;
+
+ const Stmt *S = PathDiagnosticLocation::getStmt(N);
+ if (!S)
+ return nullptr;
+
+ SmallString<256> Buf;
+ llvm::raw_svector_ostream OS(Buf);
+ OS << "Dangling inner pointer obtained here";
+ PathDiagnosticLocation Pos(S, BRC.getSourceManager(),
+ N->getLocationContext());
+ return std::make_shared<PathDiagnosticEventPiece>(Pos, OS.str(), true,
+ nullptr);
+}
+
+namespace clang {
+namespace ento {
+namespace allocation_state {
+
+std::unique_ptr<BugReporterVisitor> getInnerPointerBRVisitor(SymbolRef Sym) {
+ return llvm::make_unique<InnerPointerChecker::InnerPointerBRVisitor>(Sym);
+}
+
+} // end namespace allocation_state
+} // end namespace ento
+} // end namespace clang
+
+void ento::registerInnerPointerChecker(CheckerManager &Mgr) {
+ registerNewDeleteChecker(Mgr);
+ Mgr.registerChecker<InnerPointerChecker>();
+}
diff --git a/lib/StaticAnalyzer/Checkers/IteratorChecker.cpp b/lib/StaticAnalyzer/Checkers/IteratorChecker.cpp
index 0f9b749506fa..56c250cd1678 100644
--- a/lib/StaticAnalyzer/Checkers/IteratorChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/IteratorChecker.cpp
@@ -46,19 +46,25 @@
// use setter and getters functions which separate the three cases. To store
// them we use a pointer union of symbol and memory region.
//
-// The checker works the following way: We record the past-end iterator for
-// all containers whenever their `.end()` is called. Since the Constraint
-// Manager cannot handle SVals we need to take over its role. We post-check
-// equality and non-equality comparisons and propagate the position of the
-// iterator to the other side of the comparison if it is past-end and we are in
-// the 'equal' branch (true-branch for `==` and false-branch for `!=`).
+// The checker works the following way: We record the begin and the
+// past-end iterator for all containers whenever their `.begin()` and `.end()`
+// are called. Since the Constraint Manager cannot handle such SVals we need
+// to take over its role. We post-check equality and non-equality comparisons
+// and record that the two sides are equal if we are in the 'equal' branch
+// (true-branch for `==` and false-branch for `!=`).
//
// In case of type-I or type-II iterators we get a concrete integer as a result
// of the comparison (1 or 0) but in case of type-III we only get a Symbol. In
// this latter case we record the symbol and reload it in evalAssume() and do
// the propagation there. We also handle (maybe double) negated comparisons
-// which are represented in the form of (x == 0 or x !=0 ) where x is the
+// which are represented in the form of (x == 0 or x != 0) where x is the
// comparison itself.
+//
+// Since `SimpleConstraintManager` cannot handle complex symbolic expressions
+// we only use expressions of the format S, S+n or S-n for iterator positions
+// where S is a conjured symbol and n is an unsigned concrete integer. When
+// making an assumption e.g. `S1 + n == S2 + m` we store `S1 - S2 == m - n` as
+// a constraint which we later retrieve when doing an actual comparison.
#include "ClangSACheckers.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
@@ -80,7 +86,7 @@ private:
const MemRegion *Cont;
// Abstract offset
- SymbolRef Offset;
+ const SymbolRef Offset;
IteratorPosition(const MemRegion *C, SymbolRef Of)
: Cont(C), Offset(Of) {}
@@ -113,31 +119,39 @@ public:
typedef llvm::PointerUnion<const MemRegion *, SymbolRef> RegionOrSymbol;
-// Structure to record the symbolic end position of a container
+// Structure to record the symbolic begin and end position of a container
struct ContainerData {
private:
- SymbolRef End;
+ const SymbolRef Begin, End;
- ContainerData(SymbolRef E) : End(E) {}
+ ContainerData(SymbolRef B, SymbolRef E) : Begin(B), End(E) {}
public:
+ static ContainerData fromBegin(SymbolRef B) {
+ return ContainerData(B, nullptr);
+ }
+
static ContainerData fromEnd(SymbolRef E) {
- return ContainerData(E);
+ return ContainerData(nullptr, E);
}
+ SymbolRef getBegin() const { return Begin; }
SymbolRef getEnd() const { return End; }
- ContainerData newEnd(SymbolRef E) const { return ContainerData(E); }
+ ContainerData newBegin(SymbolRef B) const { return ContainerData(B, End); }
+
+ ContainerData newEnd(SymbolRef E) const { return ContainerData(Begin, E); }
bool operator==(const ContainerData &X) const {
- return End == X.End;
+ return Begin == X.Begin && End == X.End;
}
bool operator!=(const ContainerData &X) const {
- return End != X.End;
+ return Begin != X.Begin || End != X.End;
}
void Profile(llvm::FoldingSetNodeID &ID) const {
+ ID.Add(Begin);
ID.Add(End);
}
};
@@ -167,8 +181,9 @@ public:
class IteratorChecker
: public Checker<check::PreCall, check::PostCall,
+ check::PreStmt<CXXOperatorCallExpr>,
check::PostStmt<MaterializeTemporaryExpr>,
- check::DeadSymbols,
+ check::LiveSymbols, check::DeadSymbols,
eval::Assume> {
std::unique_ptr<BugType> OutOfRangeBugType;
@@ -176,10 +191,22 @@ class IteratorChecker
void handleComparison(CheckerContext &C, const SVal &RetVal, const SVal &LVal,
const SVal &RVal, OverloadedOperatorKind Op) const;
void verifyDereference(CheckerContext &C, const SVal &Val) const;
+ void handleIncrement(CheckerContext &C, const SVal &RetVal, const SVal &Iter,
+ bool Postfix) const;
+ void handleDecrement(CheckerContext &C, const SVal &RetVal, const SVal &Iter,
+ bool Postfix) const;
+ void handleRandomIncrOrDecr(CheckerContext &C, OverloadedOperatorKind Op,
+ const SVal &RetVal, const SVal &LHS,
+ const SVal &RHS) const;
+ void handleBegin(CheckerContext &C, const Expr *CE, const SVal &RetVal,
+ const SVal &Cont) const;
void handleEnd(CheckerContext &C, const Expr *CE, const SVal &RetVal,
const SVal &Cont) const;
void assignToContainer(CheckerContext &C, const Expr *CE, const SVal &RetVal,
const MemRegion *Cont) const;
+ void verifyRandomIncrOrDecr(CheckerContext &C, OverloadedOperatorKind Op,
+ const SVal &RetVal, const SVal &LHS,
+ const SVal &RHS) const;
void reportOutOfRangeBug(const StringRef &Message, const SVal &Val,
CheckerContext &C, ExplodedNode *ErrNode) const;
@@ -196,8 +223,10 @@ public:
void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
void checkPostCall(const CallEvent &Call, CheckerContext &C) const;
+ void checkPreStmt(const CXXOperatorCallExpr *COCE, CheckerContext &C) const;
void checkPostStmt(const MaterializeTemporaryExpr *MTE,
CheckerContext &C) const;
+ void checkLiveSymbols(ProgramStateRef State, SymbolReaper &SR) const;
void checkDeadSymbols(SymbolReaper &SR, CheckerContext &C) const;
ProgramStateRef evalAssume(ProgramStateRef State, SVal Cond,
bool Assumption) const;
@@ -217,9 +246,13 @@ namespace {
bool isIteratorType(const QualType &Type);
bool isIterator(const CXXRecordDecl *CRD);
+bool isBeginCall(const FunctionDecl *Func);
bool isEndCall(const FunctionDecl *Func);
bool isSimpleComparisonOperator(OverloadedOperatorKind OK);
bool isDereferenceOperator(OverloadedOperatorKind OK);
+bool isIncrementOperator(OverloadedOperatorKind OK);
+bool isDecrementOperator(OverloadedOperatorKind OK);
+bool isRandomIncrOrDecrOperator(OverloadedOperatorKind OK);
BinaryOperator::Opcode getOpcode(const SymExpr *SE);
const RegionOrSymbol getRegionOrSymbol(const SVal &Val);
const ProgramStateRef processComparison(ProgramStateRef State,
@@ -230,7 +263,11 @@ const ProgramStateRef saveComparison(ProgramStateRef State,
const SVal &RVal, bool Eq);
const IteratorComparison *loadComparison(ProgramStateRef State,
const SymExpr *Condition);
+SymbolRef getContainerBegin(ProgramStateRef State, const MemRegion *Cont);
SymbolRef getContainerEnd(ProgramStateRef State, const MemRegion *Cont);
+ProgramStateRef createContainerBegin(ProgramStateRef State,
+ const MemRegion *Cont,
+ const SymbolRef Sym);
ProgramStateRef createContainerEnd(ProgramStateRef State, const MemRegion *Cont,
const SymbolRef Sym);
const IteratorPosition *getIteratorPosition(ProgramStateRef State,
@@ -255,6 +292,7 @@ const ContainerData *getContainerData(ProgramStateRef State,
ProgramStateRef setContainerData(ProgramStateRef State, const MemRegion *Cont,
const ContainerData &CData);
bool isOutOfRange(ProgramStateRef State, const IteratorPosition &Pos);
+bool isZero(ProgramStateRef State, const NonLoc &Val);
} // namespace
IteratorChecker::IteratorChecker() {
@@ -272,6 +310,22 @@ void IteratorChecker::checkPreCall(const CallEvent &Call,
if (Func->isOverloadedOperator()) {
if (ChecksEnabled[CK_IteratorRangeChecker] &&
+ isRandomIncrOrDecrOperator(Func->getOverloadedOperator())) {
+ if (const auto *InstCall = dyn_cast<CXXInstanceCall>(&Call)) {
+ // Check for out-of-range incrementions and decrementions
+ if (Call.getNumArgs() >= 1) {
+ verifyRandomIncrOrDecr(C, Func->getOverloadedOperator(),
+ Call.getReturnValue(),
+ InstCall->getCXXThisVal(), Call.getArgSVal(0));
+ }
+ } else {
+ if (Call.getNumArgs() >= 2) {
+ verifyRandomIncrOrDecr(C, Func->getOverloadedOperator(),
+ Call.getReturnValue(), Call.getArgSVal(0),
+ Call.getArgSVal(1));
+ }
+ }
+ } else if (ChecksEnabled[CK_IteratorRangeChecker] &&
isDereferenceOperator(Func->getOverloadedOperator())) {
// Check for dereference of out-of-range iterators
if (const auto *InstCall = dyn_cast<CXXInstanceCall>(&Call)) {
@@ -300,6 +354,36 @@ void IteratorChecker::checkPostCall(const CallEvent &Call,
handleComparison(C, Call.getReturnValue(), Call.getArgSVal(0),
Call.getArgSVal(1), Op);
}
+ } else if (isRandomIncrOrDecrOperator(Func->getOverloadedOperator())) {
+ if (const auto *InstCall = dyn_cast<CXXInstanceCall>(&Call)) {
+ if (Call.getNumArgs() >= 1) {
+ handleRandomIncrOrDecr(C, Func->getOverloadedOperator(),
+ Call.getReturnValue(),
+ InstCall->getCXXThisVal(), Call.getArgSVal(0));
+ }
+ } else {
+ if (Call.getNumArgs() >= 2) {
+ handleRandomIncrOrDecr(C, Func->getOverloadedOperator(),
+ Call.getReturnValue(), Call.getArgSVal(0),
+ Call.getArgSVal(1));
+ }
+ }
+ } else if (isIncrementOperator(Func->getOverloadedOperator())) {
+ if (const auto *InstCall = dyn_cast<CXXInstanceCall>(&Call)) {
+ handleIncrement(C, Call.getReturnValue(), InstCall->getCXXThisVal(),
+ Call.getNumArgs());
+ } else {
+ handleIncrement(C, Call.getReturnValue(), Call.getArgSVal(0),
+ Call.getNumArgs());
+ }
+ } else if (isDecrementOperator(Func->getOverloadedOperator())) {
+ if (const auto *InstCall = dyn_cast<CXXInstanceCall>(&Call)) {
+ handleDecrement(C, Call.getReturnValue(), InstCall->getCXXThisVal(),
+ Call.getNumArgs());
+ } else {
+ handleDecrement(C, Call.getReturnValue(), Call.getArgSVal(0),
+ Call.getNumArgs());
+ }
}
} else {
const auto *OrigExpr = Call.getOriginExpr();
@@ -315,6 +399,11 @@ void IteratorChecker::checkPostCall(const CallEvent &Call,
return;
if (const auto *InstCall = dyn_cast<CXXInstanceCall>(&Call)) {
+ if (isBeginCall(Func)) {
+ handleBegin(C, OrigExpr, Call.getReturnValue(),
+ InstCall->getCXXThisVal());
+ return;
+ }
if (isEndCall(Func)) {
handleEnd(C, OrigExpr, Call.getReturnValue(),
InstCall->getCXXThisVal());
@@ -351,19 +440,80 @@ void IteratorChecker::checkPostCall(const CallEvent &Call,
}
}
+void IteratorChecker::checkPreStmt(const CXXOperatorCallExpr *COCE,
+ CheckerContext &C) const {
+ const auto *ThisExpr = COCE->getArg(0);
+
+ auto State = C.getState();
+ const auto *LCtx = C.getLocationContext();
+
+ const auto CurrentThis = State->getSVal(ThisExpr, LCtx);
+ if (const auto *Reg = CurrentThis.getAsRegion()) {
+ if (!Reg->getAs<CXXTempObjectRegion>())
+ return;
+ const auto OldState = C.getPredecessor()->getFirstPred()->getState();
+ const auto OldThis = OldState->getSVal(ThisExpr, LCtx);
+ // FIXME: This solution is unreliable. It may happen that another checker
+ // subscribes to the pre-statement check of `CXXOperatorCallExpr`
+ // and adds a transition before us. The proper fix is to make the
+ // CFG provide a `ConstructionContext` for the `CXXOperatorCallExpr`,
+ // which would turn the corresponding `CFGStmt` element into a
+ // `CFGCXXRecordTypedCall` element, which will allow `ExprEngine` to
+ // foresee that the `begin()`/`end()` call constructs the object
+ // directly in the temporary region that `CXXOperatorCallExpr` takes
+ // as its implicit object argument.
+ const auto *Pos = getIteratorPosition(OldState, OldThis);
+ if (!Pos)
+ return;
+ State = setIteratorPosition(State, CurrentThis, *Pos);
+ C.addTransition(State);
+ }
+}
+
void IteratorChecker::checkPostStmt(const MaterializeTemporaryExpr *MTE,
CheckerContext &C) const {
/* Transfer iterator state to temporary objects */
auto State = C.getState();
- const auto *LCtx = C.getLocationContext();
const auto *Pos =
- getIteratorPosition(State, State->getSVal(MTE->GetTemporaryExpr(), LCtx));
+ getIteratorPosition(State, C.getSVal(MTE->GetTemporaryExpr()));
if (!Pos)
return;
- State = setIteratorPosition(State, State->getSVal(MTE, LCtx), *Pos);
+ State = setIteratorPosition(State, C.getSVal(MTE), *Pos);
C.addTransition(State);
}
+void IteratorChecker::checkLiveSymbols(ProgramStateRef State,
+ SymbolReaper &SR) const {
+ // Keep symbolic expressions of iterator positions, container begins and ends
+ // alive
+ auto RegionMap = State->get<IteratorRegionMap>();
+ for (const auto Reg : RegionMap) {
+ const auto Offset = Reg.second.getOffset();
+ for (auto i = Offset->symbol_begin(); i != Offset->symbol_end(); ++i)
+ if (isa<SymbolData>(*i))
+ SR.markLive(*i);
+ }
+
+ auto SymbolMap = State->get<IteratorSymbolMap>();
+ for (const auto Sym : SymbolMap) {
+ const auto Offset = Sym.second.getOffset();
+ for (auto i = Offset->symbol_begin(); i != Offset->symbol_end(); ++i)
+ if (isa<SymbolData>(*i))
+ SR.markLive(*i);
+ }
+
+ auto ContMap = State->get<ContainerMap>();
+ for (const auto Cont : ContMap) {
+ const auto CData = Cont.second;
+ if (CData.getBegin()) {
+ SR.markLive(CData.getBegin());
+ }
+ if (CData.getEnd()) {
+ SR.markLive(CData.getEnd());
+ }
+ }
+}
+
void IteratorChecker::checkDeadSymbols(SymbolReaper &SR,
CheckerContext &C) const {
// Cleanup
@@ -471,13 +621,209 @@ void IteratorChecker::verifyDereference(CheckerContext &C,
static CheckerProgramPointTag Tag("IteratorRangeChecker",
"IteratorOutOfRange");
auto *N = C.generateNonFatalErrorNode(State, &Tag);
- if (!N) {
+ if (!N)
return;
- }
reportOutOfRangeBug("Iterator accessed outside of its range.", Val, C, N);
}
}
+void IteratorChecker::handleIncrement(CheckerContext &C, const SVal &RetVal,
+ const SVal &Iter, bool Postfix) const {
+ // Increment the symbolic expressions which represents the position of the
+ // iterator
+ auto State = C.getState();
+ const auto *Pos = getIteratorPosition(State, Iter);
+ if (Pos) {
+ auto &SymMgr = C.getSymbolManager();
+ auto &BVF = SymMgr.getBasicVals();
+ auto &SVB = C.getSValBuilder();
+ const auto OldOffset = Pos->getOffset();
+ auto NewOffset =
+ SVB.evalBinOp(State, BO_Add,
+ nonloc::SymbolVal(OldOffset),
+ nonloc::ConcreteInt(BVF.getValue(llvm::APSInt::get(1))),
+ SymMgr.getType(OldOffset)).getAsSymbol();
+ auto NewPos = Pos->setTo(NewOffset);
+ State = setIteratorPosition(State, Iter, NewPos);
+ State = setIteratorPosition(State, RetVal, Postfix ? *Pos : NewPos);
+ C.addTransition(State);
+ }
+}
+
+void IteratorChecker::handleDecrement(CheckerContext &C, const SVal &RetVal,
+ const SVal &Iter, bool Postfix) const {
+ // Decrement the symbolic expressions which represents the position of the
+ // iterator
+ auto State = C.getState();
+ const auto *Pos = getIteratorPosition(State, Iter);
+ if (Pos) {
+ auto &SymMgr = C.getSymbolManager();
+ auto &BVF = SymMgr.getBasicVals();
+ auto &SVB = C.getSValBuilder();
+ const auto OldOffset = Pos->getOffset();
+ auto NewOffset =
+ SVB.evalBinOp(State, BO_Sub,
+ nonloc::SymbolVal(OldOffset),
+ nonloc::ConcreteInt(BVF.getValue(llvm::APSInt::get(1))),
+ SymMgr.getType(OldOffset)).getAsSymbol();
+ auto NewPos = Pos->setTo(NewOffset);
+ State = setIteratorPosition(State, Iter, NewPos);
+ State = setIteratorPosition(State, RetVal, Postfix ? *Pos : NewPos);
+ C.addTransition(State);
+ }
+}
+
+// This function tells the analyzer's engine that symbols produced by our
+// checker, most notably iterator positions, are relatively small.
+// A distance between items in the container should not be very large.
+// By assuming that it is within around 1/8 of the address space,
+// we can help the analyzer perform operations on these symbols
+// without being afraid of integer overflows.
+// FIXME: Should we provide it as an API, so that all checkers could use it?
+static ProgramStateRef assumeNoOverflow(ProgramStateRef State, SymbolRef Sym,
+ long Scale) {
+ SValBuilder &SVB = State->getStateManager().getSValBuilder();
+ BasicValueFactory &BV = SVB.getBasicValueFactory();
+
+ QualType T = Sym->getType();
+ assert(T->isSignedIntegerOrEnumerationType());
+ APSIntType AT = BV.getAPSIntType(T);
+
+ ProgramStateRef NewState = State;
+
+ llvm::APSInt Max = AT.getMaxValue() / AT.getValue(Scale);
+ SVal IsCappedFromAbove =
+ SVB.evalBinOpNN(State, BO_LE, nonloc::SymbolVal(Sym),
+ nonloc::ConcreteInt(Max), SVB.getConditionType());
+ if (auto DV = IsCappedFromAbove.getAs<DefinedSVal>()) {
+ NewState = NewState->assume(*DV, true);
+ if (!NewState)
+ return State;
+ }
+
+ llvm::APSInt Min = -Max;
+ SVal IsCappedFromBelow =
+ SVB.evalBinOpNN(State, BO_GE, nonloc::SymbolVal(Sym),
+ nonloc::ConcreteInt(Min), SVB.getConditionType());
+ if (auto DV = IsCappedFromBelow.getAs<DefinedSVal>()) {
+ NewState = NewState->assume(*DV, true);
+ if (!NewState)
+ return State;
+ }
+
+ return NewState;
+}
+
+void IteratorChecker::handleRandomIncrOrDecr(CheckerContext &C,
+ OverloadedOperatorKind Op,
+ const SVal &RetVal,
+ const SVal &LHS,
+ const SVal &RHS) const {
+ // Increment or decrement the symbolic expressions which represents the
+ // position of the iterator
+ auto State = C.getState();
+ const auto *Pos = getIteratorPosition(State, LHS);
+ if (!Pos)
+ return;
+
+ const auto *value = &RHS;
+ if (auto loc = RHS.getAs<Loc>()) {
+ const auto val = State->getRawSVal(*loc);
+ value = &val;
+ }
+
+ auto &SymMgr = C.getSymbolManager();
+ auto &SVB = C.getSValBuilder();
+ auto BinOp = (Op == OO_Plus || Op == OO_PlusEqual) ? BO_Add : BO_Sub;
+ const auto OldOffset = Pos->getOffset();
+ SymbolRef NewOffset;
+ if (const auto intValue = value->getAs<nonloc::ConcreteInt>()) {
+ // For concrete integers we can calculate the new position
+ NewOffset = SVB.evalBinOp(State, BinOp, nonloc::SymbolVal(OldOffset),
+ *intValue,
+ SymMgr.getType(OldOffset)).getAsSymbol();
+ } else {
+ // For other symbols create a new symbol to keep expressions simple
+ const auto &LCtx = C.getLocationContext();
+ NewOffset = SymMgr.conjureSymbol(nullptr, LCtx, SymMgr.getType(OldOffset),
+ C.blockCount());
+ State = assumeNoOverflow(State, NewOffset, 4);
+ }
+ auto NewPos = Pos->setTo(NewOffset);
+ auto &TgtVal = (Op == OO_PlusEqual || Op == OO_MinusEqual) ? LHS : RetVal;
+ State = setIteratorPosition(State, TgtVal, NewPos);
+ C.addTransition(State);
+}
+
+void IteratorChecker::verifyRandomIncrOrDecr(CheckerContext &C,
+ OverloadedOperatorKind Op,
+ const SVal &RetVal,
+ const SVal &LHS,
+ const SVal &RHS) const {
+ auto State = C.getState();
+
+ // If the iterator is initially inside its range, then the operation is valid
+ const auto *Pos = getIteratorPosition(State, LHS);
+ if (!Pos || !isOutOfRange(State, *Pos))
+ return;
+
+ auto value = RHS;
+ if (auto loc = RHS.getAs<Loc>()) {
+ value = State->getRawSVal(*loc);
+ }
+
+ // Incremention or decremention by 0 is never bug
+ if (isZero(State, value.castAs<NonLoc>()))
+ return;
+
+ auto &SymMgr = C.getSymbolManager();
+ auto &SVB = C.getSValBuilder();
+ auto BinOp = (Op == OO_Plus || Op == OO_PlusEqual) ? BO_Add : BO_Sub;
+ const auto OldOffset = Pos->getOffset();
+ const auto intValue = value.getAs<nonloc::ConcreteInt>();
+ if (!intValue)
+ return;
+
+ auto NewOffset = SVB.evalBinOp(State, BinOp, nonloc::SymbolVal(OldOffset),
+ *intValue,
+ SymMgr.getType(OldOffset)).getAsSymbol();
+ auto NewPos = Pos->setTo(NewOffset);
+
+ // If out of range, the only valid operation is to step into the range
+ if (isOutOfRange(State, NewPos)) {
+ auto *N = C.generateNonFatalErrorNode(State);
+ if (!N)
+ return;
+ reportOutOfRangeBug("Iterator accessed past its end.", LHS, C, N);
+ }
+}
+
+void IteratorChecker::handleBegin(CheckerContext &C, const Expr *CE,
+ const SVal &RetVal, const SVal &Cont) const {
+ const auto *ContReg = Cont.getAsRegion();
+ if (!ContReg)
+ return;
+
+ while (const auto *CBOR = ContReg->getAs<CXXBaseObjectRegion>()) {
+ ContReg = CBOR->getSuperRegion();
+ }
+
+ // If the container already has a begin symbol then use it. Otherwise first
+ // create a new one.
+ auto State = C.getState();
+ auto BeginSym = getContainerBegin(State, ContReg);
+ if (!BeginSym) {
+ auto &SymMgr = C.getSymbolManager();
+ BeginSym = SymMgr.conjureSymbol(CE, C.getLocationContext(),
+ C.getASTContext().LongTy, C.blockCount());
+ State = assumeNoOverflow(State, BeginSym, 4);
+ State = createContainerBegin(State, ContReg, BeginSym);
+ }
+ State = setIteratorPosition(State, RetVal,
+ IteratorPosition::getPosition(ContReg, BeginSym));
+ C.addTransition(State);
+}
+
void IteratorChecker::handleEnd(CheckerContext &C, const Expr *CE,
const SVal &RetVal, const SVal &Cont) const {
const auto *ContReg = Cont.getAsRegion();
@@ -496,6 +842,7 @@ void IteratorChecker::handleEnd(CheckerContext &C, const Expr *CE,
auto &SymMgr = C.getSymbolManager();
EndSym = SymMgr.conjureSymbol(CE, C.getLocationContext(),
C.getASTContext().LongTy, C.blockCount());
+ State = assumeNoOverflow(State, EndSym, 4);
State = createContainerEnd(State, ContReg, EndSym);
}
State = setIteratorPosition(State, RetVal,
@@ -514,6 +861,7 @@ void IteratorChecker::assignToContainer(CheckerContext &C, const Expr *CE,
auto &SymMgr = C.getSymbolManager();
auto Sym = SymMgr.conjureSymbol(CE, C.getLocationContext(),
C.getASTContext().LongTy, C.blockCount());
+ State = assumeNoOverflow(State, Sym, 4);
State = setIteratorPosition(State, RetVal,
IteratorPosition::getPosition(Cont, Sym));
C.addTransition(State);
@@ -529,9 +877,12 @@ void IteratorChecker::reportOutOfRangeBug(const StringRef &Message,
namespace {
+bool isLess(ProgramStateRef State, SymbolRef Sym1, SymbolRef Sym2);
bool isGreaterOrEqual(ProgramStateRef State, SymbolRef Sym1, SymbolRef Sym2);
bool compare(ProgramStateRef State, SymbolRef Sym1, SymbolRef Sym2,
BinaryOperator::Opcode Opc);
+bool compare(ProgramStateRef State, NonLoc NL1, NonLoc NL2,
+ BinaryOperator::Opcode Opc);
bool isIteratorType(const QualType &Type) {
if (Type->isPointerType())
@@ -585,6 +936,13 @@ bool isIterator(const CXXRecordDecl *CRD) {
HasPostIncrOp && HasDerefOp;
}
+bool isBeginCall(const FunctionDecl *Func) {
+ const auto *IdInfo = Func->getIdentifier();
+ if (!IdInfo)
+ return false;
+ return IdInfo->getName().endswith_lower("begin");
+}
+
bool isEndCall(const FunctionDecl *Func) {
const auto *IdInfo = Func->getIdentifier();
if (!IdInfo)
@@ -601,11 +959,24 @@ bool isDereferenceOperator(OverloadedOperatorKind OK) {
OK == OO_Subscript;
}
+bool isIncrementOperator(OverloadedOperatorKind OK) {
+ return OK == OO_PlusPlus;
+}
+
+bool isDecrementOperator(OverloadedOperatorKind OK) {
+ return OK == OO_MinusMinus;
+}
+
+bool isRandomIncrOrDecrOperator(OverloadedOperatorKind OK) {
+ return OK == OO_Plus || OK == OO_PlusEqual || OK == OO_Minus ||
+ OK == OO_MinusEqual;
+}
+
BinaryOperator::Opcode getOpcode(const SymExpr *SE) {
if (const auto *BSE = dyn_cast<BinarySymExpr>(SE)) {
return BSE->getOpcode();
} else if (const auto *SC = dyn_cast<SymbolConjured>(SE)) {
- const auto *COE = dyn_cast<CXXOperatorCallExpr>(SC->getStmt());
+ const auto *COE = dyn_cast_or_null<CXXOperatorCallExpr>(SC->getStmt());
if (!COE)
return BO_Comma; // Extremal value, neither EQ nor NE
if (COE->getOperator() == OO_EqualEqual) {
@@ -660,6 +1031,14 @@ const IteratorComparison *loadComparison(ProgramStateRef State,
return State->get<IteratorComparisonMap>(Condition);
}
+SymbolRef getContainerBegin(ProgramStateRef State, const MemRegion *Cont) {
+ const auto *CDataPtr = getContainerData(State, Cont);
+ if (!CDataPtr)
+ return nullptr;
+
+ return CDataPtr->getBegin();
+}
+
SymbolRef getContainerEnd(ProgramStateRef State, const MemRegion *Cont) {
const auto *CDataPtr = getContainerData(State, Cont);
if (!CDataPtr)
@@ -668,6 +1047,22 @@ SymbolRef getContainerEnd(ProgramStateRef State, const MemRegion *Cont) {
return CDataPtr->getEnd();
}
+ProgramStateRef createContainerBegin(ProgramStateRef State,
+ const MemRegion *Cont,
+ const SymbolRef Sym) {
+ // Only create if it does not exist
+ const auto *CDataPtr = getContainerData(State, Cont);
+ if (CDataPtr) {
+ if (CDataPtr->getBegin()) {
+ return State;
+ }
+ const auto CData = CDataPtr->newBegin(Sym);
+ return setContainerData(State, Cont, CData);
+ }
+ const auto CData = ContainerData::fromBegin(Sym);
+ return setContainerData(State, Cont, CData);
+}
+
ProgramStateRef createContainerEnd(ProgramStateRef State, const MemRegion *Cont,
const SymbolRef Sym) {
// Only create if it does not exist
@@ -675,14 +1070,12 @@ ProgramStateRef createContainerEnd(ProgramStateRef State, const MemRegion *Cont,
if (CDataPtr) {
if (CDataPtr->getEnd()) {
return State;
- } else {
- const auto CData = CDataPtr->newEnd(Sym);
- return setContainerData(State, Cont, CData);
}
- } else {
- const auto CData = ContainerData::fromEnd(Sym);
+ const auto CData = CDataPtr->newEnd(Sym);
return setContainerData(State, Cont, CData);
}
+ const auto CData = ContainerData::fromEnd(Sym);
+ return setContainerData(State, Cont, CData);
}
const ContainerData *getContainerData(ProgramStateRef State,
@@ -767,17 +1160,39 @@ ProgramStateRef relateIteratorPositions(ProgramStateRef State,
const IteratorPosition &Pos1,
const IteratorPosition &Pos2,
bool Equal) {
- // Try to compare them and get a defined value
auto &SVB = State->getStateManager().getSValBuilder();
+
+ // FIXME: This code should be reworked as follows:
+ // 1. Subtract the operands using evalBinOp().
+ // 2. Assume that the result doesn't overflow.
+ // 3. Compare the result to 0.
+ // 4. Assume the result of the comparison.
const auto comparison =
SVB.evalBinOp(State, BO_EQ, nonloc::SymbolVal(Pos1.getOffset()),
- nonloc::SymbolVal(Pos2.getOffset()), SVB.getConditionType())
- .getAs<DefinedSVal>();
- if (comparison) {
- return State->assume(*comparison, Equal);
+ nonloc::SymbolVal(Pos2.getOffset()),
+ SVB.getConditionType());
+
+ assert(comparison.getAs<DefinedSVal>() &&
+ "Symbol comparison must be a `DefinedSVal`");
+
+ auto NewState = State->assume(comparison.castAs<DefinedSVal>(), Equal);
+ if (const auto CompSym = comparison.getAsSymbol()) {
+ assert(isa<SymIntExpr>(CompSym) &&
+ "Symbol comparison must be a `SymIntExpr`");
+ assert(BinaryOperator::isComparisonOp(
+ cast<SymIntExpr>(CompSym)->getOpcode()) &&
+ "Symbol comparison must be a comparison");
+ return assumeNoOverflow(NewState, cast<SymIntExpr>(CompSym)->getLHS(), 2);
}
- return State;
+ return NewState;
+}
+
+bool isZero(ProgramStateRef State, const NonLoc &Val) {
+ auto &BVF = State->getBasicVals();
+ return compare(State, Val,
+ nonloc::ConcreteInt(BVF.getValue(llvm::APSInt::get(0))),
+ BO_EQ);
}
bool isOutOfRange(ProgramStateRef State, const IteratorPosition &Pos) {
@@ -789,6 +1204,13 @@ bool isOutOfRange(ProgramStateRef State, const IteratorPosition &Pos) {
// Out of range means less than the begin symbol or greater or equal to the
// end symbol.
+ const auto Beg = CData->getBegin();
+ if (Beg) {
+ if (isLess(State, Pos.getOffset(), Beg)) {
+ return true;
+ }
+ }
+
const auto End = CData->getEnd();
if (End) {
if (isGreaterOrEqual(State, Pos.getOffset(), End)) {
@@ -799,25 +1221,30 @@ bool isOutOfRange(ProgramStateRef State, const IteratorPosition &Pos) {
return false;
}
+bool isLess(ProgramStateRef State, SymbolRef Sym1, SymbolRef Sym2) {
+ return compare(State, Sym1, Sym2, BO_LT);
+}
+
bool isGreaterOrEqual(ProgramStateRef State, SymbolRef Sym1, SymbolRef Sym2) {
return compare(State, Sym1, Sym2, BO_GE);
}
bool compare(ProgramStateRef State, SymbolRef Sym1, SymbolRef Sym2,
BinaryOperator::Opcode Opc) {
- auto &SMgr = State->getStateManager();
- auto &SVB = SMgr.getSValBuilder();
+ return compare(State, nonloc::SymbolVal(Sym1), nonloc::SymbolVal(Sym2), Opc);
+}
+
+bool compare(ProgramStateRef State, NonLoc NL1, NonLoc NL2,
+ BinaryOperator::Opcode Opc) {
+ auto &SVB = State->getStateManager().getSValBuilder();
const auto comparison =
- SVB.evalBinOp(State, Opc, nonloc::SymbolVal(Sym1),
- nonloc::SymbolVal(Sym2), SVB.getConditionType())
- .getAs<DefinedSVal>();
+ SVB.evalBinOp(State, Opc, NL1, NL2, SVB.getConditionType());
- if(comparison) {
- return !!State->assume(*comparison, true);
- }
+ assert(comparison.getAs<DefinedSVal>() &&
+ "Symbol comparison must be a `DefinedSVal`");
- return false;
+ return !State->assume(comparison.castAs<DefinedSVal>(), false);
}
} // namespace
@@ -831,3 +1258,4 @@ bool compare(ProgramStateRef State, SymbolRef Sym1, SymbolRef Sym2,
}
REGISTER_CHECKER(IteratorRangeChecker)
+
diff --git a/lib/StaticAnalyzer/Checkers/IvarInvalidationChecker.cpp b/lib/StaticAnalyzer/Checkers/IvarInvalidationChecker.cpp
index 8076ca09591f..2fb627184eb9 100644
--- a/lib/StaticAnalyzer/Checkers/IvarInvalidationChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/IvarInvalidationChecker.cpp
@@ -20,7 +20,7 @@
// been called on them. An invalidation method should either invalidate all
// the ivars or call another invalidation method (on self).
//
-// Partial invalidor annotation allows to addess cases when ivars are
+// Partial invalidor annotation allows to address cases when ivars are
// invalidated by other methods, which might or might not be called from
// the invalidation method. The checker checks that each invalidation
// method and all the partial methods cumulatively invalidate all ivars.
@@ -402,13 +402,13 @@ visit(const ObjCImplementationDecl *ImplD) const {
// Find the setter and the getter.
const ObjCMethodDecl *SetterD = PD->getSetterMethodDecl();
if (SetterD) {
- SetterD = cast<ObjCMethodDecl>(SetterD->getCanonicalDecl());
+ SetterD = SetterD->getCanonicalDecl();
PropSetterToIvarMap[SetterD] = ID;
}
const ObjCMethodDecl *GetterD = PD->getGetterMethodDecl();
if (GetterD) {
- GetterD = cast<ObjCMethodDecl>(GetterD->getCanonicalDecl());
+ GetterD = GetterD->getCanonicalDecl();
PropGetterToIvarMap[GetterD] = ID;
}
}
@@ -606,7 +606,7 @@ void IvarInvalidationCheckerImpl::MethodCrawler::checkObjCMessageExpr(
const ObjCMessageExpr *ME) {
const ObjCMethodDecl *MD = ME->getMethodDecl();
if (MD) {
- MD = cast<ObjCMethodDecl>(MD->getCanonicalDecl());
+ MD = MD->getCanonicalDecl();
MethToIvarMapTy::const_iterator IvI = PropertyGetterToIvarMap.find(MD);
if (IvI != PropertyGetterToIvarMap.end())
markInvalidated(IvI->second);
@@ -630,7 +630,7 @@ void IvarInvalidationCheckerImpl::MethodCrawler::checkObjCPropertyRefExpr(
if (PA->isImplicitProperty()) {
const ObjCMethodDecl *MD = PA->getImplicitPropertySetter();
if (MD) {
- MD = cast<ObjCMethodDecl>(MD->getCanonicalDecl());
+ MD = MD->getCanonicalDecl();
MethToIvarMapTy::const_iterator IvI =PropertyGetterToIvarMap.find(MD);
if (IvI != PropertyGetterToIvarMap.end())
markInvalidated(IvI->second);
@@ -702,7 +702,7 @@ void IvarInvalidationCheckerImpl::MethodCrawler::VisitObjCMessageExpr(
// Check if we call a setter and set the property to 'nil'.
if (MD && (ME->getNumArgs() == 1) && isZero(ME->getArg(0))) {
- MD = cast<ObjCMethodDecl>(MD->getCanonicalDecl());
+ MD = MD->getCanonicalDecl();
MethToIvarMapTy::const_iterator IvI = PropertySetterToIvarMap.find(MD);
if (IvI != PropertySetterToIvarMap.end()) {
markInvalidated(IvI->second);
diff --git a/lib/StaticAnalyzer/Checkers/LocalizationChecker.cpp b/lib/StaticAnalyzer/Checkers/LocalizationChecker.cpp
index 655ce33390c9..849b1193c042 100644
--- a/lib/StaticAnalyzer/Checkers/LocalizationChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/LocalizationChecker.cpp
@@ -113,8 +113,7 @@ NonLocalizedStringChecker::NonLocalizedStringChecker() {
}
namespace {
-class NonLocalizedStringBRVisitor final
- : public BugReporterVisitorImpl<NonLocalizedStringBRVisitor> {
+class NonLocalizedStringBRVisitor final : public BugReporterVisitor {
const MemRegion *NonLocalizedString;
bool Satisfied;
@@ -1017,8 +1016,7 @@ NonLocalizedStringBRVisitor::VisitNode(const ExplodedNode *Succ,
if (!LiteralExpr)
return nullptr;
- ProgramStateRef State = Succ->getState();
- SVal LiteralSVal = State->getSVal(LiteralExpr, Succ->getLocationContext());
+ SVal LiteralSVal = Succ->getSVal(LiteralExpr);
if (LiteralSVal.getAsRegion() != NonLocalizedString)
return nullptr;
@@ -1108,7 +1106,7 @@ void EmptyLocalizationContextChecker::checkASTDecl(
void EmptyLocalizationContextChecker::MethodCrawler::VisitObjCMessageExpr(
const ObjCMessageExpr *ME) {
- // FIXME: We may be able to use PPCallbacks to check for empy context
+ // FIXME: We may be able to use PPCallbacks to check for empty context
// comments as part of preprocessing and avoid this re-lexing hack.
const ObjCInterfaceDecl *OD = ME->getReceiverInterface();
if (!OD)
@@ -1389,7 +1387,7 @@ void PluralMisuseChecker::MethodCrawler::reportPluralMisuseError(
// Generate the bug report.
BR.EmitBasicReport(AC->getDecl(), Checker, "Plural Misuse",
"Localizability Issue (Apple)",
- "Plural cases are not supported accross all languages. "
+ "Plural cases are not supported across all languages. "
"Use a .stringsdict file instead",
PathDiagnosticLocation(S, BR.getSourceManager(), AC));
}
diff --git a/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIBugReporter.h b/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIBugReporter.h
index 0ee91cca4793..40eb0631d7c5 100644
--- a/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIBugReporter.h
+++ b/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIBugReporter.h
@@ -78,7 +78,7 @@ private:
/// Bug visitor class to find the node where the request region was previously
/// used in order to include it into the BugReport path.
- class RequestNodeVisitor : public BugReporterVisitorImpl<RequestNodeVisitor> {
+ class RequestNodeVisitor : public BugReporterVisitor {
public:
RequestNodeVisitor(const MemRegion *const MemoryRegion,
const std::string &ErrText)
diff --git a/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp b/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp
index f8473dbd7647..b8ef6701c0df 100644
--- a/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp
@@ -120,8 +120,7 @@ private:
/// The bug visitor which allows us to print extra diagnostics along the
/// BugReport path. For example, showing the allocation site of the leaked
/// region.
- class SecKeychainBugVisitor
- : public BugReporterVisitorImpl<SecKeychainBugVisitor> {
+ class SecKeychainBugVisitor : public BugReporterVisitor {
protected:
// The allocated region symbol tracked by the main analysis.
SymbolRef Sym;
@@ -202,7 +201,7 @@ static bool isBadDeallocationArgument(const MemRegion *Arg) {
static SymbolRef getAsPointeeSymbol(const Expr *Expr,
CheckerContext &C) {
ProgramStateRef State = C.getState();
- SVal ArgV = State->getSVal(Expr, C.getLocationContext());
+ SVal ArgV = C.getSVal(Expr);
if (Optional<loc::MemRegionVal> X = ArgV.getAs<loc::MemRegionVal>()) {
StoreManager& SM = C.getStoreManager();
@@ -297,7 +296,7 @@ void MacOSKeychainAPIChecker::checkPreStmt(const CallExpr *CE,
// Check the argument to the deallocator.
const Expr *ArgExpr = CE->getArg(paramIdx);
- SVal ArgSVal = State->getSVal(ArgExpr, C.getLocationContext());
+ SVal ArgSVal = C.getSVal(ArgExpr);
// Undef is reported by another checker.
if (ArgSVal.isUndef())
@@ -426,8 +425,7 @@ void MacOSKeychainAPIChecker::checkPostStmt(const CallExpr *CE,
// allocated value symbol, since our diagnostics depend on the value
// returned by the call. Ex: Data should only be freed if noErr was
// returned during allocation.)
- SymbolRef RetStatusSymbol =
- State->getSVal(CE, C.getLocationContext()).getAsSymbol();
+ SymbolRef RetStatusSymbol = C.getSVal(CE).getAsSymbol();
C.getSymbolManager().addSymbolDependency(V, RetStatusSymbol);
// Track the allocated value in the checker state.
diff --git a/lib/StaticAnalyzer/Checkers/MallocChecker.cpp b/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
index 851114004b96..8f07f413e81f 100644
--- a/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
@@ -30,6 +30,7 @@
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringExtras.h"
+#include "AllocationState.h"
#include <climits>
#include <utility>
@@ -45,7 +46,8 @@ enum AllocationFamily {
AF_CXXNew,
AF_CXXNewArray,
AF_IfNameIndex,
- AF_Alloca
+ AF_Alloca,
+ AF_InnerBuffer
};
class RefState {
@@ -134,10 +136,10 @@ enum ReallocPairKind {
};
/// \class ReallocPair
-/// \brief Stores information about the symbol being reallocated by a call to
+/// Stores information about the symbol being reallocated by a call to
/// 'realloc' to allow modeling failed reallocation later in the path.
struct ReallocPair {
- // \brief The symbol which realloc reallocated.
+ // The symbol which realloc reallocated.
SymbolRef ReallocatedSym;
ReallocPairKind Kind;
@@ -162,6 +164,7 @@ class MallocChecker : public Checker<check::DeadSymbols,
check::PreCall,
check::PostStmt<CallExpr>,
check::PostStmt<CXXNewExpr>,
+ check::NewAllocator,
check::PreStmt<CXXDeleteExpr>,
check::PostStmt<BlockExpr>,
check::PostObjCMessage,
@@ -207,6 +210,8 @@ public:
void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
void checkPostStmt(const CallExpr *CE, CheckerContext &C) const;
void checkPostStmt(const CXXNewExpr *NE, CheckerContext &C) const;
+ void checkNewAllocator(const CXXNewExpr *NE, SVal Target,
+ CheckerContext &C) const;
void checkPreStmt(const CXXDeleteExpr *DE, CheckerContext &C) const;
void checkPostObjCMessage(const ObjCMethodCall &Call, CheckerContext &C) const;
void checkPostStmt(const BlockExpr *BE, CheckerContext &C) const;
@@ -253,20 +258,20 @@ private:
void initIdentifierInfo(ASTContext &C) const;
- /// \brief Determine family of a deallocation expression.
+ /// Determine family of a deallocation expression.
AllocationFamily getAllocationFamily(CheckerContext &C, const Stmt *S) const;
- /// \brief Print names of allocators and deallocators.
+ /// Print names of allocators and deallocators.
///
/// \returns true on success.
bool printAllocDeallocName(raw_ostream &os, CheckerContext &C,
const Expr *E) const;
- /// \brief Print expected name of an allocator based on the deallocator's
+ /// Print expected name of an allocator based on the deallocator's
/// family derived from the DeallocExpr.
void printExpectedAllocName(raw_ostream &os, CheckerContext &C,
const Expr *DeallocExpr) const;
- /// \brief Print expected name of a deallocator based on the allocator's
+ /// Print expected name of a deallocator based on the allocator's
/// family.
void printExpectedDeallocName(raw_ostream &os, AllocationFamily Family) const;
@@ -281,10 +286,18 @@ private:
bool isStandardNewDelete(const FunctionDecl *FD, ASTContext &C) const;
///@}
- /// \brief Perform a zero-allocation check.
+ /// Process C++ operator new()'s allocation, which is the part of C++
+ /// new-expression that goes before the constructor.
+ void processNewAllocation(const CXXNewExpr *NE, CheckerContext &C,
+ SVal Target) const;
+
+ /// Perform a zero-allocation check.
+ /// The optional \p RetVal parameter specifies the newly allocated pointer
+ /// value; if unspecified, the value of expression \p E is used.
ProgramStateRef ProcessZeroAllocation(CheckerContext &C, const Expr *E,
const unsigned AllocationSizeArg,
- ProgramStateRef State) const;
+ ProgramStateRef State,
+ Optional<SVal> RetVal = None) const;
ProgramStateRef MallocMemReturnsAttr(CheckerContext &C,
const CallExpr *CE,
@@ -300,7 +313,7 @@ private:
AllocationFamily Family = AF_Malloc);
static ProgramStateRef addExtentSize(CheckerContext &C, const CXXNewExpr *NE,
- ProgramStateRef State);
+ ProgramStateRef State, SVal Target);
// Check if this malloc() for special flags. At present that means M_ZERO or
// __GFP_ZERO (in which case, treat it like calloc).
@@ -309,9 +322,12 @@ private:
const ProgramStateRef &State) const;
/// Update the RefState to reflect the new memory allocation.
+ /// The optional \p RetVal parameter specifies the newly allocated pointer
+ /// value; if unspecified, the value of expression \p E is used.
static ProgramStateRef
MallocUpdateRefState(CheckerContext &C, const Expr *E, ProgramStateRef State,
- AllocationFamily Family = AF_Malloc);
+ AllocationFamily Family = AF_Malloc,
+ Optional<SVal> RetVal = None);
ProgramStateRef FreeMemAttr(CheckerContext &C, const CallExpr *CE,
const OwnershipAttr* Att,
@@ -337,7 +353,7 @@ private:
static ProgramStateRef CallocMem(CheckerContext &C, const CallExpr *CE,
ProgramStateRef State);
- ///\brief Check if the memory associated with this symbol was released.
+ ///Check if the memory associated with this symbol was released.
bool isReleased(SymbolRef Sym, CheckerContext &C) const;
bool checkUseAfterFree(SymbolRef Sym, CheckerContext &C, const Stmt *S) const;
@@ -415,8 +431,7 @@ private:
/// The bug visitor which allows us to print extra diagnostics along the
/// BugReport path. For example, showing the allocation site of the leaked
/// region.
- class MallocBugVisitor final
- : public BugReporterVisitorImpl<MallocBugVisitor> {
+ class MallocBugVisitor final : public BugReporterVisitor {
protected:
enum NotificationMode {
Normal,
@@ -432,15 +447,24 @@ private:
// A symbol from when the primary region should have been reallocated.
SymbolRef FailedReallocSymbol;
+ // A C++ destructor stack frame in which memory was released. Used for
+ // miscellaneous false positive suppression.
+ const StackFrameContext *ReleaseDestructorLC;
+
bool IsLeak;
public:
MallocBugVisitor(SymbolRef S, bool isLeak = false)
- : Sym(S), Mode(Normal), FailedReallocSymbol(nullptr), IsLeak(isLeak) {}
+ : Sym(S), Mode(Normal), FailedReallocSymbol(nullptr),
+ ReleaseDestructorLC(nullptr), IsLeak(isLeak) {}
+
+ static void *getTag() {
+ static int Tag = 0;
+ return &Tag;
+ }
void Profile(llvm::FoldingSetNodeID &ID) const override {
- static int X = 0;
- ID.AddPointer(&X);
+ ID.AddPointer(getTag());
ID.AddPointer(Sym);
}
@@ -456,8 +480,13 @@ private:
inline bool isReleased(const RefState *S, const RefState *SPrev,
const Stmt *Stmt) {
// Did not track -> released. Other state (allocated) -> released.
- return (Stmt && (isa<CallExpr>(Stmt) || isa<CXXDeleteExpr>(Stmt)) &&
- (S && S->isReleased()) && (!SPrev || !SPrev->isReleased()));
+ // The statement associated with the release might be missing.
+ bool IsReleased = (S && S->isReleased()) &&
+ (!SPrev || !SPrev->isReleased());
+ assert(!IsReleased ||
+ (Stmt && (isa<CallExpr>(Stmt) || isa<CXXDeleteExpr>(Stmt))) ||
+ (!Stmt && S->getAllocationFamily() == AF_InnerBuffer));
+ return IsReleased;
}
inline bool isRelinquished(const RefState *S, const RefState *SPrev,
@@ -486,7 +515,7 @@ private:
BugReporterContext &BRC,
BugReport &BR) override;
- std::unique_ptr<PathDiagnosticPiece>
+ std::shared_ptr<PathDiagnosticPiece>
getEndPath(BugReporterContext &BRC, const ExplodedNode *EndPathNode,
BugReport &BR) override {
if (!IsLeak)
@@ -496,7 +525,7 @@ private:
PathDiagnosticLocation::createEndOfPath(EndPathNode,
BRC.getSourceManager());
// Do not add the statement itself as a range in case of leak.
- return llvm::make_unique<PathDiagnosticEventPiece>(L, BR.getDescription(),
+ return std::make_shared<PathDiagnosticEventPiece>(L, BR.getDescription(),
false);
}
@@ -758,7 +787,7 @@ llvm::Optional<ProgramStateRef> MallocChecker::performKernelMalloc(
return None;
const Expr *FlagsEx = CE->getArg(CE->getNumArgs() - 1);
- const SVal V = State->getSVal(FlagsEx, C.getLocationContext());
+ const SVal V = C.getSVal(FlagsEx);
if (!V.getAs<NonLoc>()) {
// The case where 'V' can be a location can only be due to a bad header,
// so in this case bail out.
@@ -949,13 +978,15 @@ void MallocChecker::checkPostStmt(const CallExpr *CE, CheckerContext &C) const {
}
// Performs a 0-sized allocations check.
-ProgramStateRef MallocChecker::ProcessZeroAllocation(CheckerContext &C,
- const Expr *E,
- const unsigned AllocationSizeArg,
- ProgramStateRef State) const {
+ProgramStateRef MallocChecker::ProcessZeroAllocation(
+ CheckerContext &C, const Expr *E, const unsigned AllocationSizeArg,
+ ProgramStateRef State, Optional<SVal> RetVal) const {
if (!State)
return nullptr;
+ if (!RetVal)
+ RetVal = C.getSVal(E);
+
const Expr *Arg = nullptr;
if (const CallExpr *CE = dyn_cast<CallExpr>(E)) {
@@ -972,8 +1003,7 @@ ProgramStateRef MallocChecker::ProcessZeroAllocation(CheckerContext &C,
assert(Arg);
- Optional<DefinedSVal> DefArgVal =
- State->getSVal(Arg, C.getLocationContext()).getAs<DefinedSVal>();
+ Optional<DefinedSVal> DefArgVal = C.getSVal(Arg).getAs<DefinedSVal>();
if (!DefArgVal)
return State;
@@ -988,8 +1018,7 @@ ProgramStateRef MallocChecker::ProcessZeroAllocation(CheckerContext &C,
State->assume(SvalBuilder.evalEQ(State, *DefArgVal, Zero));
if (TrueState && !FalseState) {
- SVal retVal = State->getSVal(E, C.getLocationContext());
- SymbolRef Sym = retVal.getAsLocSymbol();
+ SymbolRef Sym = RetVal->getAsLocSymbol();
if (!Sym)
return State;
@@ -1050,9 +1079,9 @@ static bool treatUnusedNewEscaped(const CXXNewExpr *NE) {
return false;
}
-void MallocChecker::checkPostStmt(const CXXNewExpr *NE,
- CheckerContext &C) const {
-
+void MallocChecker::processNewAllocation(const CXXNewExpr *NE,
+ CheckerContext &C,
+ SVal Target) const {
if (NE->getNumPlacementArgs())
for (CXXNewExpr::const_arg_iterator I = NE->placement_arg_begin(),
E = NE->placement_arg_end(); I != E; ++I)
@@ -1072,37 +1101,48 @@ void MallocChecker::checkPostStmt(const CXXNewExpr *NE,
// MallocUpdateRefState() instead of MallocMemAux() which breakes the
// existing binding.
State = MallocUpdateRefState(C, NE, State, NE->isArray() ? AF_CXXNewArray
- : AF_CXXNew);
- State = addExtentSize(C, NE, State);
- State = ProcessZeroAllocation(C, NE, 0, State);
+ : AF_CXXNew, Target);
+ State = addExtentSize(C, NE, State, Target);
+ State = ProcessZeroAllocation(C, NE, 0, State, Target);
C.addTransition(State);
}
+void MallocChecker::checkPostStmt(const CXXNewExpr *NE,
+ CheckerContext &C) const {
+ if (!C.getAnalysisManager().getAnalyzerOptions().mayInlineCXXAllocator())
+ processNewAllocation(NE, C, C.getSVal(NE));
+}
+
+void MallocChecker::checkNewAllocator(const CXXNewExpr *NE, SVal Target,
+ CheckerContext &C) const {
+ if (!C.wasInlined)
+ processNewAllocation(NE, C, Target);
+}
+
// Sets the extent value of the MemRegion allocated by
// new expression NE to its size in Bytes.
//
ProgramStateRef MallocChecker::addExtentSize(CheckerContext &C,
const CXXNewExpr *NE,
- ProgramStateRef State) {
+ ProgramStateRef State,
+ SVal Target) {
if (!State)
return nullptr;
SValBuilder &svalBuilder = C.getSValBuilder();
SVal ElementCount;
- const LocationContext *LCtx = C.getLocationContext();
const SubRegion *Region;
if (NE->isArray()) {
const Expr *SizeExpr = NE->getArraySize();
- ElementCount = State->getSVal(SizeExpr, C.getLocationContext());
+ ElementCount = C.getSVal(SizeExpr);
// Store the extent size for the (symbolic)region
// containing the elements.
- Region = (State->getSVal(NE, LCtx))
- .getAsRegion()
+ Region = Target.getAsRegion()
->getAs<SubRegion>()
- ->getSuperRegion()
+ ->StripCasts()
->getAs<SubRegion>();
} else {
ElementCount = svalBuilder.makeIntVal(1, true);
- Region = (State->getSVal(NE, LCtx)).getAsRegion()->getAs<SubRegion>();
+ Region = Target.getAsRegion()->getAs<SubRegion>();
}
assert(Region);
@@ -1199,7 +1239,8 @@ MallocChecker::MallocMemReturnsAttr(CheckerContext &C, const CallExpr *CE,
OwnershipAttr::args_iterator I = Att->args_begin(), E = Att->args_end();
if (I != E) {
- return MallocMemAux(C, CE, CE->getArg(*I), UndefinedVal(), State);
+ return MallocMemAux(C, CE, CE->getArg(I->getASTIndex()), UndefinedVal(),
+ State);
}
return MallocMemAux(C, CE, UnknownVal(), UndefinedVal(), State);
}
@@ -1212,8 +1253,7 @@ ProgramStateRef MallocChecker::MallocMemAux(CheckerContext &C,
if (!State)
return nullptr;
- return MallocMemAux(C, CE, State->getSVal(SizeEx, C.getLocationContext()),
- Init, State, Family);
+ return MallocMemAux(C, CE, C.getSVal(SizeEx), Init, State, Family);
}
ProgramStateRef MallocChecker::MallocMemAux(CheckerContext &C,
@@ -1239,7 +1279,7 @@ ProgramStateRef MallocChecker::MallocMemAux(CheckerContext &C,
State = State->BindExpr(CE, C.getLocationContext(), RetVal);
// Fill the region with the initialization value.
- State = State->bindDefault(RetVal, Init, LCtx);
+ State = State->bindDefaultInitial(RetVal, Init, LCtx);
// Set the region's extent equal to the Size parameter.
const SymbolicRegion *R =
@@ -1263,18 +1303,22 @@ ProgramStateRef MallocChecker::MallocMemAux(CheckerContext &C,
ProgramStateRef MallocChecker::MallocUpdateRefState(CheckerContext &C,
const Expr *E,
ProgramStateRef State,
- AllocationFamily Family) {
+ AllocationFamily Family,
+ Optional<SVal> RetVal) {
if (!State)
return nullptr;
// Get the return value.
- SVal retVal = State->getSVal(E, C.getLocationContext());
+ if (!RetVal)
+ RetVal = C.getSVal(E);
// We expect the malloc functions to return a pointer.
- if (!retVal.getAs<Loc>())
+ if (!RetVal->getAs<Loc>())
return nullptr;
- SymbolRef Sym = retVal.getAsLocSymbol();
+ SymbolRef Sym = RetVal->getAsLocSymbol();
+ // This is a return value of a function that was not inlined, such as malloc()
+ // or new(). We've checked that in the caller. Therefore, it must be a symbol.
assert(Sym);
// Set the symbol's state to Allocated.
@@ -1294,9 +1338,9 @@ ProgramStateRef MallocChecker::FreeMemAttr(CheckerContext &C,
bool ReleasedAllocated = false;
for (const auto &Arg : Att->args()) {
- ProgramStateRef StateI = FreeMemAux(C, CE, State, Arg,
- Att->getOwnKind() == OwnershipAttr::Holds,
- ReleasedAllocated);
+ ProgramStateRef StateI = FreeMemAux(
+ C, CE, State, Arg.getASTIndex(),
+ Att->getOwnKind() == OwnershipAttr::Holds, ReleasedAllocated);
if (StateI)
State = StateI;
}
@@ -1429,6 +1473,7 @@ void MallocChecker::printExpectedAllocName(raw_ostream &os, CheckerContext &C,
case AF_CXXNew: os << "'new'"; return;
case AF_CXXNewArray: os << "'new[]'"; return;
case AF_IfNameIndex: os << "'if_nameindex()'"; return;
+ case AF_InnerBuffer: os << "container-specific allocator"; return;
case AF_Alloca:
case AF_None: llvm_unreachable("not a deallocation expression");
}
@@ -1441,6 +1486,7 @@ void MallocChecker::printExpectedDeallocName(raw_ostream &os,
case AF_CXXNew: os << "'delete'"; return;
case AF_CXXNewArray: os << "'delete[]'"; return;
case AF_IfNameIndex: os << "'if_freenameindex()'"; return;
+ case AF_InnerBuffer: os << "container-specific deallocator"; return;
case AF_Alloca:
case AF_None: llvm_unreachable("suspicious argument");
}
@@ -1457,7 +1503,7 @@ ProgramStateRef MallocChecker::FreeMemAux(CheckerContext &C,
if (!State)
return nullptr;
- SVal ArgVal = State->getSVal(ArgExpr, C.getLocationContext());
+ SVal ArgVal = C.getSVal(ArgExpr);
if (!ArgVal.getAs<DefinedOrUnknownSVal>())
return nullptr;
DefinedOrUnknownSVal location = ArgVal.castAs<DefinedOrUnknownSVal>();
@@ -1615,7 +1661,9 @@ MallocChecker::getCheckIfTracked(AllocationFamily Family,
return Optional<MallocChecker::CheckKind>();
}
case AF_CXXNew:
- case AF_CXXNewArray: {
+ case AF_CXXNewArray:
+ // FIXME: Add new CheckKind for AF_InnerBuffer.
+ case AF_InnerBuffer: {
if (IsALeakCheck) {
if (ChecksEnabled[CK_NewDeleteLeaksChecker])
return CK_NewDeleteLeaksChecker;
@@ -1945,6 +1993,11 @@ void MallocChecker::ReportUseAfterFree(CheckerContext &C, SourceRange Range,
R->markInteresting(Sym);
R->addRange(Range);
R->addVisitor(llvm::make_unique<MallocBugVisitor>(Sym));
+
+ const RefState *RS = C.getState()->get<RegionState>(Sym);
+ if (RS->getAllocationFamily() == AF_InnerBuffer)
+ R->addVisitor(allocation_state::getInnerPointerBRVisitor(Sym));
+
C.emitReport(std::move(R));
}
}
@@ -2047,8 +2100,8 @@ void MallocChecker::ReportFunctionPointerFree(CheckerContext &C, SVal ArgVal,
if (ExplodedNode *N = C.generateErrorNode()) {
if (!BT_BadFree[*CheckKind])
- BT_BadFree[*CheckKind].reset(
- new BugType(CheckNames[*CheckKind], "Bad free", "Memory Error"));
+ BT_BadFree[*CheckKind].reset(new BugType(
+ CheckNames[*CheckKind], "Bad free", categories::MemoryError));
SmallString<100> Buf;
llvm::raw_svector_ostream Os(Buf);
@@ -2084,8 +2137,7 @@ ProgramStateRef MallocChecker::ReallocMemAux(CheckerContext &C,
return nullptr;
const Expr *arg0Expr = CE->getArg(0);
- const LocationContext *LCtx = C.getLocationContext();
- SVal Arg0Val = State->getSVal(arg0Expr, LCtx);
+ SVal Arg0Val = C.getSVal(arg0Expr);
if (!Arg0Val.getAs<DefinedOrUnknownSVal>())
return nullptr;
DefinedOrUnknownSVal arg0Val = Arg0Val.castAs<DefinedOrUnknownSVal>();
@@ -2099,7 +2151,7 @@ ProgramStateRef MallocChecker::ReallocMemAux(CheckerContext &C,
const Expr *Arg1 = CE->getArg(1);
// Get the value of the size argument.
- SVal TotalSize = State->getSVal(Arg1, LCtx);
+ SVal TotalSize = C.getSVal(Arg1);
if (SuffixWithN)
TotalSize = evalMulForBufferSize(C, Arg1, CE->getArg(2));
if (!TotalSize.getAs<DefinedOrUnknownSVal>())
@@ -2133,7 +2185,7 @@ ProgramStateRef MallocChecker::ReallocMemAux(CheckerContext &C,
// Get the from and to pointer symbols as in toPtr = realloc(fromPtr, size).
assert(!PrtIsNull);
SymbolRef FromPtr = arg0Val.getAsSymbol();
- SVal RetVal = State->getSVal(CE, LCtx);
+ SVal RetVal = C.getSVal(CE);
SymbolRef ToPtr = RetVal.getAsSymbol();
if (!FromPtr || !ToPtr)
return nullptr;
@@ -2216,7 +2268,7 @@ MallocChecker::getAllocationSite(const ExplodedNode *N, SymbolRef Sym,
// Do not show local variables belonging to a function other than
// where the error is reported.
if (!VR ||
- (VR->getStackFrame() == LeakContext->getCurrentStackFrame()))
+ (VR->getStackFrame() == LeakContext->getStackFrame()))
ReferenceRegion = MR;
}
}
@@ -2406,7 +2458,7 @@ void MallocChecker::checkPreStmt(const ReturnStmt *S, CheckerContext &C) const {
// Check if we are returning a symbol.
ProgramStateRef State = C.getState();
- SVal RetVal = State->getSVal(E, C.getLocationContext());
+ SVal RetVal = C.getSVal(E);
SymbolRef Sym = RetVal.getAsSymbol();
if (!Sym)
// If we are returning a field of the allocated struct or an array element,
@@ -2436,8 +2488,7 @@ void MallocChecker::checkPostStmt(const BlockExpr *BE,
ProgramStateRef state = C.getState();
const BlockDataRegion *R =
- cast<BlockDataRegion>(state->getSVal(BE,
- C.getLocationContext()).getAsRegion());
+ cast<BlockDataRegion>(C.getSVal(BE).getAsRegion());
BlockDataRegion::referenced_vars_iterator I = R->referenced_vars_begin(),
E = R->referenced_vars_end();
@@ -2793,36 +2844,133 @@ static SymbolRef findFailedReallocSymbol(ProgramStateRef currState,
return nullptr;
}
+static bool isReferenceCountingPointerDestructor(const CXXDestructorDecl *DD) {
+ if (const IdentifierInfo *II = DD->getParent()->getIdentifier()) {
+ StringRef N = II->getName();
+ if (N.contains_lower("ptr") || N.contains_lower("pointer")) {
+ if (N.contains_lower("ref") || N.contains_lower("cnt") ||
+ N.contains_lower("intrusive") || N.contains_lower("shared")) {
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
std::shared_ptr<PathDiagnosticPiece> MallocChecker::MallocBugVisitor::VisitNode(
const ExplodedNode *N, const ExplodedNode *PrevN, BugReporterContext &BRC,
BugReport &BR) {
+
ProgramStateRef state = N->getState();
ProgramStateRef statePrev = PrevN->getState();
const RefState *RS = state->get<RegionState>(Sym);
const RefState *RSPrev = statePrev->get<RegionState>(Sym);
- if (!RS)
- return nullptr;
const Stmt *S = PathDiagnosticLocation::getStmt(N);
- if (!S)
+ // When dealing with containers, we sometimes want to give a note
+ // even if the statement is missing.
+ if (!S && (!RS || RS->getAllocationFamily() != AF_InnerBuffer))
return nullptr;
+ const LocationContext *CurrentLC = N->getLocationContext();
+
+ // If we find an atomic fetch_add or fetch_sub within the destructor in which
+ // the pointer was released (before the release), this is likely a destructor
+ // of a shared pointer.
+ // Because we don't model atomics, and also because we don't know that the
+ // original reference count is positive, we should not report use-after-frees
+ // on objects deleted in such destructors. This can probably be improved
+ // through better shared pointer modeling.
+ if (ReleaseDestructorLC) {
+ if (const auto *AE = dyn_cast<AtomicExpr>(S)) {
+ AtomicExpr::AtomicOp Op = AE->getOp();
+ if (Op == AtomicExpr::AO__c11_atomic_fetch_add ||
+ Op == AtomicExpr::AO__c11_atomic_fetch_sub) {
+ if (ReleaseDestructorLC == CurrentLC ||
+ ReleaseDestructorLC->isParentOf(CurrentLC)) {
+ BR.markInvalid(getTag(), S);
+ }
+ }
+ }
+ }
+
// FIXME: We will eventually need to handle non-statement-based events
// (__attribute__((cleanup))).
// Find out if this is an interesting point and what is the kind.
- const char *Msg = nullptr;
+ StringRef Msg;
StackHintGeneratorForSymbol *StackHint = nullptr;
+ SmallString<256> Buf;
+ llvm::raw_svector_ostream OS(Buf);
+
if (Mode == Normal) {
if (isAllocated(RS, RSPrev, S)) {
Msg = "Memory is allocated";
StackHint = new StackHintGeneratorForSymbol(Sym,
"Returned allocated memory");
} else if (isReleased(RS, RSPrev, S)) {
- Msg = "Memory is released";
+ const auto Family = RS->getAllocationFamily();
+ switch (Family) {
+ case AF_Alloca:
+ case AF_Malloc:
+ case AF_CXXNew:
+ case AF_CXXNewArray:
+ case AF_IfNameIndex:
+ Msg = "Memory is released";
+ break;
+ case AF_InnerBuffer: {
+ OS << "Inner pointer invalidated by call to ";
+ if (N->getLocation().getKind() == ProgramPoint::PostImplicitCallKind) {
+ OS << "destructor";
+ } else {
+ OS << "'";
+ const Stmt *S = RS->getStmt();
+ if (const auto *MemCallE = dyn_cast<CXXMemberCallExpr>(S)) {
+ OS << MemCallE->getMethodDecl()->getNameAsString();
+ } else if (const auto *OpCallE = dyn_cast<CXXOperatorCallExpr>(S)) {
+ OS << OpCallE->getDirectCallee()->getNameAsString();
+ }
+ OS << "'";
+ }
+ Msg = OS.str();
+ break;
+ }
+ case AF_None:
+ llvm_unreachable("Unhandled allocation family!");
+ }
StackHint = new StackHintGeneratorForSymbol(Sym,
"Returning; memory was released");
+
+ // See if we're releasing memory while inlining a destructor
+ // (or one of its callees). This turns on various common
+ // false positive suppressions.
+ bool FoundAnyDestructor = false;
+ for (const LocationContext *LC = CurrentLC; LC; LC = LC->getParent()) {
+ if (const auto *DD = dyn_cast<CXXDestructorDecl>(LC->getDecl())) {
+ if (isReferenceCountingPointerDestructor(DD)) {
+ // This immediately looks like a reference-counting destructor.
+ // We're bad at guessing the original reference count of the object,
+ // so suppress the report for now.
+ BR.markInvalid(getTag(), DD);
+ } else if (!FoundAnyDestructor) {
+ assert(!ReleaseDestructorLC &&
+ "There can be only one release point!");
+ // Suspect that it's a reference counting pointer destructor.
+ // On one of the next nodes might find out that it has atomic
+ // reference counting operations within it (see the code above),
+ // and if so, we'd conclude that it likely is a reference counting
+ // pointer destructor.
+ ReleaseDestructorLC = LC->getStackFrame();
+ // It is unlikely that releasing memory is delegated to a destructor
+ // inside a destructor of a shared pointer, because it's fairly hard
+ // to pass the information that the pointer indeed needs to be
+ // released into it. So we're only interested in the innermost
+ // destructor.
+ FoundAnyDestructor = true;
+ }
+ }
+ }
} else if (isRelinquished(RS, RSPrev, S)) {
Msg = "Memory ownership is transferred";
StackHint = new StackHintGeneratorForSymbol(Sym, "");
@@ -2856,13 +3004,24 @@ std::shared_ptr<PathDiagnosticPiece> MallocChecker::MallocBugVisitor::VisitNode(
}
}
- if (!Msg)
+ if (Msg.empty())
return nullptr;
assert(StackHint);
// Generate the extra diagnostic.
- PathDiagnosticLocation Pos(S, BRC.getSourceManager(),
- N->getLocationContext());
+ PathDiagnosticLocation Pos;
+ if (!S) {
+ assert(RS->getAllocationFamily() == AF_InnerBuffer);
+ auto PostImplCall = N->getLocation().getAs<PostImplicitCall>();
+ if (!PostImplCall)
+ return nullptr;
+ Pos = PathDiagnosticLocation(PostImplCall->getLocation(),
+ BRC.getSourceManager());
+ } else {
+ Pos = PathDiagnosticLocation(S, BRC.getSourceManager(),
+ N->getLocationContext());
+ }
+
return std::make_shared<PathDiagnosticEventPiece>(Pos, Msg, true, StackHint);
}
@@ -2890,6 +3049,20 @@ void MallocChecker::printState(raw_ostream &Out, ProgramStateRef State,
}
}
+namespace clang {
+namespace ento {
+namespace allocation_state {
+
+ProgramStateRef
+markReleased(ProgramStateRef State, SymbolRef Sym, const Expr *Origin) {
+ AllocationFamily Family = AF_InnerBuffer;
+ return State->set<RegionState>(Sym, RefState::getReleased(Family, Origin));
+}
+
+} // end namespace allocation_state
+} // end namespace ento
+} // end namespace clang
+
void ento::registerNewDeleteLeaksChecker(CheckerManager &mgr) {
registerCStringCheckerBasic(mgr);
MallocChecker *checker = mgr.registerChecker<MallocChecker>();
@@ -2900,8 +3073,13 @@ void ento::registerNewDeleteLeaksChecker(CheckerManager &mgr) {
mgr.getCurrentCheckName();
// We currently treat NewDeleteLeaks checker as a subchecker of NewDelete
// checker.
- if (!checker->ChecksEnabled[MallocChecker::CK_NewDeleteChecker])
+ if (!checker->ChecksEnabled[MallocChecker::CK_NewDeleteChecker]) {
checker->ChecksEnabled[MallocChecker::CK_NewDeleteChecker] = true;
+ // FIXME: This does not set the correct name, but without this workaround
+ // no name will be set at all.
+ checker->CheckNames[MallocChecker::CK_NewDeleteChecker] =
+ mgr.getCurrentCheckName();
+ }
}
#define REGISTER_CHECKER(name) \
diff --git a/lib/StaticAnalyzer/Checkers/MisusedMovedObjectChecker.cpp b/lib/StaticAnalyzer/Checkers/MisusedMovedObjectChecker.cpp
index 497978f07815..19c1d077afa1 100644
--- a/lib/StaticAnalyzer/Checkers/MisusedMovedObjectChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/MisusedMovedObjectChecker.cpp
@@ -46,7 +46,7 @@ class MisusedMovedObjectChecker
: public Checker<check::PreCall, check::PostCall, check::EndFunction,
check::DeadSymbols, check::RegionChanges> {
public:
- void checkEndFunction(CheckerContext &C) const;
+ void checkEndFunction(const ReturnStmt *RS, CheckerContext &C) const;
void checkPreCall(const CallEvent &MC, CheckerContext &C) const;
void checkPostCall(const CallEvent &MC, CheckerContext &C) const;
void checkDeadSymbols(SymbolReaper &SR, CheckerContext &C) const;
@@ -61,7 +61,7 @@ public:
private:
enum MisuseKind {MK_FunCall, MK_Copy, MK_Move};
- class MovedBugVisitor : public BugReporterVisitorImpl<MovedBugVisitor> {
+ class MovedBugVisitor : public BugReporterVisitor {
public:
MovedBugVisitor(const MemRegion *R) : Region(R), Found(false) {}
@@ -101,8 +101,6 @@ static ProgramStateRef removeFromState(ProgramStateRef State,
const MemRegion *Region) {
if (!Region)
return State;
- // Note: The isSubRegionOf function is not reflexive.
- State = State->remove<TrackedRegionMap>(Region);
for (auto &E : State->get<TrackedRegionMap>()) {
if (E.first->isSubRegionOf(Region))
State = State->remove<TrackedRegionMap>(E.first);
@@ -224,7 +222,8 @@ ExplodedNode *MisusedMovedObjectChecker::reportBug(const MemRegion *Region,
// Removing the function parameters' MemRegion from the state. This is needed
// for PODs where the trivial destructor does not even created nor executed.
-void MisusedMovedObjectChecker::checkEndFunction(CheckerContext &C) const {
+void MisusedMovedObjectChecker::checkEndFunction(const ReturnStmt *RS,
+ CheckerContext &C) const {
auto State = C.getState();
TrackedRegionMapTy Objects = State->get<TrackedRegionMap>();
if (Objects.isEmpty())
diff --git a/lib/StaticAnalyzer/Checkers/MmapWriteExecChecker.cpp b/lib/StaticAnalyzer/Checkers/MmapWriteExecChecker.cpp
new file mode 100644
index 000000000000..5060b0e0a6e0
--- /dev/null
+++ b/lib/StaticAnalyzer/Checkers/MmapWriteExecChecker.cpp
@@ -0,0 +1,88 @@
+// MmapWriteExecChecker.cpp - Check for the prot argument -----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This checker tests the 3rd argument of mmap's calls to check if
+// it is writable and executable in the same time. It's somehow
+// an optional checker since for example in JIT libraries it is pretty common.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+
+using namespace clang;
+using namespace ento;
+using llvm::APSInt;
+
+namespace {
+class MmapWriteExecChecker : public Checker<check::PreCall> {
+ CallDescription MmapFn;
+ CallDescription MprotectFn;
+ static int ProtWrite;
+ static int ProtExec;
+ static int ProtRead;
+ mutable std::unique_ptr<BugType> BT;
+public:
+ MmapWriteExecChecker() : MmapFn("mmap", 6), MprotectFn("mprotect", 3) {}
+ void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
+ int ProtExecOv;
+ int ProtReadOv;
+};
+}
+
+int MmapWriteExecChecker::ProtWrite = 0x02;
+int MmapWriteExecChecker::ProtExec = 0x04;
+int MmapWriteExecChecker::ProtRead = 0x01;
+
+void MmapWriteExecChecker::checkPreCall(const CallEvent &Call,
+ CheckerContext &C) const {
+ if (Call.isCalled(MmapFn) || Call.isCalled(MprotectFn)) {
+ SVal ProtVal = Call.getArgSVal(2);
+ Optional<nonloc::ConcreteInt> ProtLoc = ProtVal.getAs<nonloc::ConcreteInt>();
+ int64_t Prot = ProtLoc->getValue().getSExtValue();
+ if (ProtExecOv != ProtExec)
+ ProtExec = ProtExecOv;
+ if (ProtReadOv != ProtRead)
+ ProtRead = ProtReadOv;
+
+ // Wrong settings
+ if (ProtRead == ProtExec)
+ return;
+
+ if ((Prot & (ProtWrite | ProtExec)) == (ProtWrite | ProtExec)) {
+ if (!BT)
+ BT.reset(new BugType(this, "W^X check fails, Write Exec prot flags set", "Security"));
+
+ ExplodedNode *N = C.generateNonFatalErrorNode();
+ if (!N)
+ return;
+
+ auto Report = llvm::make_unique<BugReport>(
+ *BT, "Both PROT_WRITE and PROT_EXEC flags are set. This can "
+ "lead to exploitable memory regions, which could be overwritten "
+ "with malicious code", N);
+ Report->addRange(Call.getArgSourceRange(2));
+ C.emitReport(std::move(Report));
+ }
+ }
+}
+
+void ento::registerMmapWriteExecChecker(CheckerManager &mgr) {
+ MmapWriteExecChecker *Mwec =
+ mgr.registerChecker<MmapWriteExecChecker>();
+ Mwec->ProtExecOv =
+ mgr.getAnalyzerOptions().getOptionAsInteger("MmapProtExec", 0x04, Mwec);
+ Mwec->ProtReadOv =
+ mgr.getAnalyzerOptions().getOptionAsInteger("MmapProtRead", 0x01, Mwec);
+}
diff --git a/lib/StaticAnalyzer/Checkers/NSErrorChecker.cpp b/lib/StaticAnalyzer/Checkers/NSErrorChecker.cpp
index 559c75d7a5b0..2bd68b625c1f 100644
--- a/lib/StaticAnalyzer/Checkers/NSErrorChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/NSErrorChecker.cpp
@@ -186,8 +186,7 @@ static void setFlag(ProgramStateRef state, SVal val, CheckerContext &C) {
}
static QualType parameterTypeFromSVal(SVal val, CheckerContext &C) {
- const StackFrameContext *
- SFC = C.getLocationContext()->getCurrentStackFrame();
+ const StackFrameContext * SFC = C.getStackFrame();
if (Optional<loc::MemRegionVal> X = val.getAs<loc::MemRegionVal>()) {
const MemRegion* R = X->getRegion();
if (const VarRegion *VR = R->getAs<VarRegion>())
diff --git a/lib/StaticAnalyzer/Checkers/NonNullParamChecker.cpp b/lib/StaticAnalyzer/Checkers/NonNullParamChecker.cpp
index 6d05159e51b0..01d2c0491b85 100644
--- a/lib/StaticAnalyzer/Checkers/NonNullParamChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/NonNullParamChecker.cpp
@@ -44,13 +44,9 @@ public:
};
} // end anonymous namespace
-void NonNullParamChecker::checkPreCall(const CallEvent &Call,
- CheckerContext &C) const {
+/// \return Bitvector marking non-null attributes.
+static llvm::SmallBitVector getNonNullAttrs(const CallEvent &Call) {
const Decl *FD = Call.getDecl();
- if (!FD)
- return;
-
- // Merge all non-null attributes
unsigned NumArgs = Call.getNumArgs();
llvm::SmallBitVector AttrNonNull(NumArgs);
for (const auto *NonNull : FD->specific_attrs<NonNullAttr>()) {
@@ -58,49 +54,54 @@ void NonNullParamChecker::checkPreCall(const CallEvent &Call,
AttrNonNull.set(0, NumArgs);
break;
}
- for (unsigned Val : NonNull->args()) {
- if (Val >= NumArgs)
+ for (const ParamIdx &Idx : NonNull->args()) {
+ unsigned IdxAST = Idx.getASTIndex();
+ if (IdxAST >= NumArgs)
continue;
- AttrNonNull.set(Val);
+ AttrNonNull.set(IdxAST);
}
}
+ return AttrNonNull;
+}
- ProgramStateRef state = C.getState();
+void NonNullParamChecker::checkPreCall(const CallEvent &Call,
+ CheckerContext &C) const {
+ if (!Call.getDecl())
+ return;
+
+ llvm::SmallBitVector AttrNonNull = getNonNullAttrs(Call);
+ unsigned NumArgs = Call.getNumArgs();
- CallEvent::param_type_iterator TyI = Call.param_type_begin(),
- TyE = Call.param_type_end();
+ ProgramStateRef state = C.getState();
+ ArrayRef<ParmVarDecl*> parms = Call.parameters();
for (unsigned idx = 0; idx < NumArgs; ++idx) {
+ // For vararg functions, a corresponding parameter decl may not exist.
+ bool HasParam = idx < parms.size();
// Check if the parameter is a reference. We want to report when reference
// to a null pointer is passed as a parameter.
- bool haveRefTypeParam = false;
- if (TyI != TyE) {
- haveRefTypeParam = (*TyI)->isReferenceType();
- TyI++;
- }
-
+ bool haveRefTypeParam =
+ HasParam ? parms[idx]->getType()->isReferenceType() : false;
bool haveAttrNonNull = AttrNonNull[idx];
- if (!haveAttrNonNull) {
- // Check if the parameter is also marked 'nonnull'.
- ArrayRef<ParmVarDecl*> parms = Call.parameters();
- if (idx < parms.size())
- haveAttrNonNull = parms[idx]->hasAttr<NonNullAttr>();
- }
- if (!haveRefTypeParam && !haveAttrNonNull)
+ // Check if the parameter is also marked 'nonnull'.
+ if (!haveAttrNonNull && HasParam)
+ haveAttrNonNull = parms[idx]->hasAttr<NonNullAttr>();
+
+ if (!haveAttrNonNull && !haveRefTypeParam)
continue;
// If the value is unknown or undefined, we can't perform this check.
const Expr *ArgE = Call.getArgExpr(idx);
SVal V = Call.getArgSVal(idx);
- Optional<DefinedSVal> DV = V.getAs<DefinedSVal>();
+ auto DV = V.getAs<DefinedSVal>();
if (!DV)
continue;
- // Process the case when the argument is not a location.
assert(!haveRefTypeParam || DV->getAs<Loc>());
+ // Process the case when the argument is not a location.
if (haveAttrNonNull && !DV->getAs<Loc>()) {
// If the argument is a union type, we want to handle a potential
// transparent_union GCC extension.
@@ -112,66 +113,63 @@ void NonNullParamChecker::checkPreCall(const CallEvent &Call,
if (!UT || !UT->getDecl()->hasAttr<TransparentUnionAttr>())
continue;
- if (Optional<nonloc::CompoundVal> CSV =
- DV->getAs<nonloc::CompoundVal>()) {
- nonloc::CompoundVal::iterator CSV_I = CSV->begin();
- assert(CSV_I != CSV->end());
- V = *CSV_I;
- DV = V.getAs<DefinedSVal>();
- assert(++CSV_I == CSV->end());
- // FIXME: Handle (some_union){ some_other_union_val }, which turns into
- // a LazyCompoundVal inside a CompoundVal.
- if (!V.getAs<Loc>())
- continue;
- // Retrieve the corresponding expression.
- if (const CompoundLiteralExpr *CE = dyn_cast<CompoundLiteralExpr>(ArgE))
- if (const InitListExpr *IE =
- dyn_cast<InitListExpr>(CE->getInitializer()))
- ArgE = dyn_cast<Expr>(*(IE->begin()));
-
- } else {
- // FIXME: Handle LazyCompoundVals?
+ auto CSV = DV->getAs<nonloc::CompoundVal>();
+
+ // FIXME: Handle LazyCompoundVals?
+ if (!CSV)
continue;
- }
+
+ V = *(CSV->begin());
+ DV = V.getAs<DefinedSVal>();
+ assert(++CSV->begin() == CSV->end());
+ // FIXME: Handle (some_union){ some_other_union_val }, which turns into
+ // a LazyCompoundVal inside a CompoundVal.
+ if (!V.getAs<Loc>())
+ continue;
+
+ // Retrieve the corresponding expression.
+ if (const auto *CE = dyn_cast<CompoundLiteralExpr>(ArgE))
+ if (const auto *IE = dyn_cast<InitListExpr>(CE->getInitializer()))
+ ArgE = dyn_cast<Expr>(*(IE->begin()));
}
ConstraintManager &CM = C.getConstraintManager();
ProgramStateRef stateNotNull, stateNull;
std::tie(stateNotNull, stateNull) = CM.assumeDual(state, *DV);
- if (stateNull) {
- if (!stateNotNull) {
- // Generate an error node. Check for a null node in case
- // we cache out.
- if (ExplodedNode *errorNode = C.generateErrorNode(stateNull)) {
-
- std::unique_ptr<BugReport> R;
- if (haveAttrNonNull)
- R = genReportNullAttrNonNull(errorNode, ArgE);
- else if (haveRefTypeParam)
- R = genReportReferenceToNullPointer(errorNode, ArgE);
-
- // Highlight the range of the argument that was null.
- R->addRange(Call.getArgSourceRange(idx));
-
- // Emit the bug report.
- C.emitReport(std::move(R));
- }
-
- // Always return. Either we cached out or we just emitted an error.
- return;
+ // Generate an error node. Check for a null node in case
+ // we cache out.
+ if (stateNull && !stateNotNull) {
+ if (ExplodedNode *errorNode = C.generateErrorNode(stateNull)) {
+
+ std::unique_ptr<BugReport> R;
+ if (haveAttrNonNull)
+ R = genReportNullAttrNonNull(errorNode, ArgE);
+ else if (haveRefTypeParam)
+ R = genReportReferenceToNullPointer(errorNode, ArgE);
+
+ // Highlight the range of the argument that was null.
+ R->addRange(Call.getArgSourceRange(idx));
+
+ // Emit the bug report.
+ C.emitReport(std::move(R));
}
+
+ // Always return. Either we cached out or we just emitted an error.
+ return;
+ }
+
+ if (stateNull) {
if (ExplodedNode *N = C.generateSink(stateNull, C.getPredecessor())) {
ImplicitNullDerefEvent event = {
- V, false, N, &C.getBugReporter(),
- /*IsDirectDereference=*/haveRefTypeParam};
+ V, false, N, &C.getBugReporter(),
+ /*IsDirectDereference=*/haveRefTypeParam};
dispatchEvent(event);
}
}
// If a pointer value passed the check we should assume that it is
// indeed not null from this point forward.
- assert(stateNotNull);
state = stateNotNull;
}
diff --git a/lib/StaticAnalyzer/Checkers/NonnullGlobalConstantsChecker.cpp b/lib/StaticAnalyzer/Checkers/NonnullGlobalConstantsChecker.cpp
index 0b4ecb41d20f..6f3180eb839a 100644
--- a/lib/StaticAnalyzer/Checkers/NonnullGlobalConstantsChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/NonnullGlobalConstantsChecker.cpp
@@ -54,7 +54,7 @@ private:
} // namespace
-/// Lazily initialize cache for required identifier informations.
+/// Lazily initialize cache for required identifier information.
void NonnullGlobalConstantsChecker::initIdentifierInfo(ASTContext &Ctx) const {
if (NSStringII)
return;
@@ -73,9 +73,9 @@ void NonnullGlobalConstantsChecker::checkLocation(SVal location, bool isLoad,
return;
ProgramStateRef State = C.getState();
- SVal V = State->getSVal(location.castAs<Loc>());
if (isGlobalConstString(location)) {
+ SVal V = State->getSVal(location.castAs<Loc>());
Optional<DefinedOrUnknownSVal> Constr = V.getAs<DefinedOrUnknownSVal>();
if (Constr) {
diff --git a/lib/StaticAnalyzer/Checkers/NullabilityChecker.cpp b/lib/StaticAnalyzer/Checkers/NullabilityChecker.cpp
index fa9a317683ba..7d1ca61c97a9 100644
--- a/lib/StaticAnalyzer/Checkers/NullabilityChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/NullabilityChecker.cpp
@@ -30,6 +30,7 @@
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerHelpers.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
@@ -40,21 +41,6 @@ using namespace clang;
using namespace ento;
namespace {
-// Do not reorder! The getMostNullable method relies on the order.
-// Optimization: Most pointers expected to be unspecified. When a symbol has an
-// unspecified or nonnull type non of the rules would indicate any problem for
-// that symbol. For this reason only nullable and contradicted nullability are
-// stored for a symbol. When a symbol is already contradicted, it can not be
-// casted back to nullable.
-enum class Nullability : char {
- Contradicted, // Tracked nullability is contradicted by an explicit cast. Do
- // not report any nullability related issue for this symbol.
- // This nullability is propagated aggressively to avoid false
- // positive results. See the comment on getMostNullable method.
- Nullable,
- Unspecified,
- Nonnull
-};
/// Returns the most nullable nullability. This is used for message expressions
/// like [receiver method], where the nullability of this expression is either
@@ -142,8 +128,7 @@ public:
DefaultBool NeedTracking;
private:
- class NullabilityBugVisitor
- : public BugReporterVisitorImpl<NullabilityBugVisitor> {
+ class NullabilityBugVisitor : public BugReporterVisitor {
public:
NullabilityBugVisitor(const MemRegion *M) : Region(M) {}
@@ -265,7 +250,7 @@ REGISTER_MAP_WITH_PROGRAMSTATE(NullabilityMap, const MemRegion *,
// initial direct violation has been discovered, and (3) warning after a direct
// violation that has been implicitly or explicitly suppressed (for
// example, with a cast of NULL to _Nonnull). In essence, once an invariant
-// violation is detected on a path, this checker will be esentially turned off
+// violation is detected on a path, this checker will be essentially turned off
// for the rest of the analysis
//
// The analyzer takes this approach (rather than generating a sink node) to
@@ -345,17 +330,6 @@ NullabilityChecker::NullabilityBugVisitor::VisitNode(const ExplodedNode *N,
nullptr);
}
-static Nullability getNullabilityAnnotation(QualType Type) {
- const auto *AttrType = Type->getAs<AttributedType>();
- if (!AttrType)
- return Nullability::Unspecified;
- if (AttrType->getAttrKind() == AttributedType::attr_nullable)
- return Nullability::Nullable;
- else if (AttrType->getAttrKind() == AttributedType::attr_nonnull)
- return Nullability::Nonnull;
- return Nullability::Unspecified;
-}
-
/// Returns true when the value stored at the given location is null
/// and the passed in type is nonnnull.
static bool checkValueAtLValForInvariantViolation(ProgramStateRef State,
@@ -560,8 +534,7 @@ void NullabilityChecker::checkPreStmt(const ReturnStmt *S,
if (State->get<InvariantViolated>())
return;
- auto RetSVal =
- State->getSVal(S, C.getLocationContext()).getAs<DefinedOrUnknownSVal>();
+ auto RetSVal = C.getSVal(S).getAs<DefinedOrUnknownSVal>();
if (!RetSVal)
return;
@@ -873,7 +846,7 @@ void NullabilityChecker::checkPostObjCMessage(const ObjCMethodCall &M,
// are either item retrieval related or not interesting nullability wise.
// Using this fact, to keep the code easier to read just ignore the return
// value of every instance method of dictionaries.
- if (M.isInstanceMessage() && Name.find("Dictionary") != StringRef::npos) {
+ if (M.isInstanceMessage() && Name.contains("Dictionary")) {
State =
State->set<NullabilityMap>(ReturnRegion, Nullability::Contradicted);
C.addTransition(State);
@@ -881,7 +854,7 @@ void NullabilityChecker::checkPostObjCMessage(const ObjCMethodCall &M,
}
// For similar reasons ignore some methods of Cocoa arrays.
StringRef FirstSelectorSlot = M.getSelector().getNameForSlot(0);
- if (Name.find("Array") != StringRef::npos &&
+ if (Name.contains("Array") &&
(FirstSelectorSlot == "firstObject" ||
FirstSelectorSlot == "lastObject")) {
State =
@@ -894,7 +867,7 @@ void NullabilityChecker::checkPostObjCMessage(const ObjCMethodCall &M,
// encodings are used. Using lossless encodings is so frequent that ignoring
// this class of methods reduced the emitted diagnostics by about 30% on
// some projects (and all of that was false positives).
- if (Name.find("String") != StringRef::npos) {
+ if (Name.contains("String")) {
for (auto Param : M.parameters()) {
if (Param->getName() == "encoding") {
State = State->set<NullabilityMap>(ReturnRegion,
@@ -977,8 +950,7 @@ void NullabilityChecker::checkPostStmt(const ExplicitCastExpr *CE,
if (DestNullability == Nullability::Unspecified)
return;
- auto RegionSVal =
- State->getSVal(CE, C.getLocationContext()).getAs<DefinedOrUnknownSVal>();
+ auto RegionSVal = C.getSVal(CE).getAs<DefinedOrUnknownSVal>();
const MemRegion *Region = getTrackRegion(*RegionSVal);
if (!Region)
return;
diff --git a/lib/StaticAnalyzer/Checkers/NumberObjectConversionChecker.cpp b/lib/StaticAnalyzer/Checkers/NumberObjectConversionChecker.cpp
index 40e379cb2efc..d1749cfdbe27 100644
--- a/lib/StaticAnalyzer/Checkers/NumberObjectConversionChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/NumberObjectConversionChecker.cpp
@@ -270,8 +270,10 @@ void NumberObjectConversionChecker::checkASTCodeBody(const Decl *D,
hasRHS(SuspiciousNumberObjectExprM)));
auto ConversionThroughBranchingM =
- ifStmt(hasCondition(SuspiciousNumberObjectExprM))
- .bind("pedantic");
+ ifStmt(allOf(
+ hasCondition(SuspiciousNumberObjectExprM),
+ unless(hasConditionVariableStatement(declStmt())
+ ))).bind("pedantic");
auto ConversionThroughCallM =
callExpr(hasAnyArgument(allOf(hasType(SuspiciousScalarTypeM),
diff --git a/lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp b/lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp
index cbaa5c23592d..b7339fe79f69 100644
--- a/lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp
@@ -39,7 +39,7 @@ void ObjCAtSyncChecker::checkPreStmt(const ObjCAtSynchronizedStmt *S,
const Expr *Ex = S->getSynchExpr();
ProgramStateRef state = C.getState();
- SVal V = state->getSVal(Ex, C.getLocationContext());
+ SVal V = C.getSVal(Ex);
// Uninitialized value used for the mutex?
if (V.getAs<UndefinedVal>()) {
diff --git a/lib/StaticAnalyzer/Checkers/ObjCAutoreleaseWriteChecker.cpp b/lib/StaticAnalyzer/Checkers/ObjCAutoreleaseWriteChecker.cpp
new file mode 100644
index 000000000000..81bcda51b8f8
--- /dev/null
+++ b/lib/StaticAnalyzer/Checkers/ObjCAutoreleaseWriteChecker.cpp
@@ -0,0 +1,209 @@
+//===- ObjCAutoreleaseWriteChecker.cpp ----------------------------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines ObjCAutoreleaseWriteChecker which warns against writes
+// into autoreleased out parameters which cause crashes.
+// An example of a problematic write is a write to {@code error} in the example
+// below:
+//
+// - (BOOL) mymethod:(NSError *__autoreleasing *)error list:(NSArray*) list {
+// [list enumerateObjectsUsingBlock:^(id obj, NSUInteger idx, BOOL *stop) {
+// NSString *myString = obj;
+// if ([myString isEqualToString:@"error"] && error)
+// *error = [NSError errorWithDomain:@"MyDomain" code:-1];
+// }];
+// return false;
+// }
+//
+// Such code will crash on read from `*error` due to the autorelease pool
+// in `enumerateObjectsUsingBlock` implementation freeing the error object
+// on exit from the function.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/ASTMatchers/ASTMatchFinder.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+#include "llvm/ADT/Twine.h"
+
+using namespace clang;
+using namespace ento;
+using namespace ast_matchers;
+
+namespace {
+
+const char *ProblematicWriteBind = "problematicwrite";
+const char *CapturedBind = "capturedbind";
+const char *ParamBind = "parambind";
+const char *IsMethodBind = "ismethodbind";
+
+class ObjCAutoreleaseWriteChecker : public Checker<check::ASTCodeBody> {
+public:
+ void checkASTCodeBody(const Decl *D,
+ AnalysisManager &AM,
+ BugReporter &BR) const;
+private:
+ std::vector<std::string> SelectorsWithAutoreleasingPool = {
+ // Common to NSArray, NSSet, NSOrderedSet
+ "enumerateObjectsUsingBlock:",
+ "enumerateObjectsWithOptions:usingBlock:",
+
+ // Common to NSArray and NSOrderedSet
+ "enumerateObjectsAtIndexes:options:usingBlock:",
+ "indexOfObjectAtIndexes:options:passingTest:",
+ "indexesOfObjectsAtIndexes:options:passingTest:",
+ "indexOfObjectPassingTest:",
+ "indexOfObjectWithOptions:passingTest:",
+ "indexesOfObjectsPassingTest:",
+ "indexesOfObjectsWithOptions:passingTest:",
+
+ // NSDictionary
+ "enumerateKeysAndObjectsUsingBlock:",
+ "enumerateKeysAndObjectsWithOptions:usingBlock:",
+ "keysOfEntriesPassingTest:",
+ "keysOfEntriesWithOptions:passingTest:",
+
+ // NSSet
+ "objectsPassingTest:",
+ "objectsWithOptions:passingTest:",
+ "enumerateIndexPathsWithOptions:usingBlock:",
+
+ // NSIndexSet
+ "enumerateIndexesWithOptions:usingBlock:",
+ "enumerateIndexesUsingBlock:",
+ "enumerateIndexesInRange:options:usingBlock:",
+ "enumerateRangesUsingBlock:",
+ "enumerateRangesWithOptions:usingBlock:",
+ "enumerateRangesInRange:options:usingBlock:",
+ "indexPassingTest:",
+ "indexesPassingTest:",
+ "indexWithOptions:passingTest:",
+ "indexesWithOptions:passingTest:",
+ "indexInRange:options:passingTest:",
+ "indexesInRange:options:passingTest:"
+ };
+
+ std::vector<std::string> FunctionsWithAutoreleasingPool = {
+ "dispatch_async", "dispatch_group_async", "dispatch_barrier_async"};
+};
+}
+
+static inline std::vector<llvm::StringRef> toRefs(std::vector<std::string> V) {
+ return std::vector<llvm::StringRef>(V.begin(), V.end());
+}
+
+static auto callsNames(std::vector<std::string> FunctionNames)
+ -> decltype(callee(functionDecl())) {
+ return callee(functionDecl(hasAnyName(toRefs(FunctionNames))));
+}
+
+static void emitDiagnostics(BoundNodes &Match, const Decl *D, BugReporter &BR,
+ AnalysisManager &AM,
+ const ObjCAutoreleaseWriteChecker *Checker) {
+ AnalysisDeclContext *ADC = AM.getAnalysisDeclContext(D);
+
+ const auto *PVD = Match.getNodeAs<ParmVarDecl>(ParamBind);
+ QualType Ty = PVD->getType();
+ if (Ty->getPointeeType().getObjCLifetime() != Qualifiers::OCL_Autoreleasing)
+ return;
+ const char *ActionMsg = "Write to";
+ const auto *MarkedStmt = Match.getNodeAs<Expr>(ProblematicWriteBind);
+ bool IsCapture = false;
+
+ // Prefer to warn on write, but if not available, warn on capture.
+ if (!MarkedStmt) {
+ MarkedStmt = Match.getNodeAs<Expr>(CapturedBind);
+ assert(MarkedStmt);
+ ActionMsg = "Capture of";
+ IsCapture = true;
+ }
+
+ SourceRange Range = MarkedStmt->getSourceRange();
+ PathDiagnosticLocation Location = PathDiagnosticLocation::createBegin(
+ MarkedStmt, BR.getSourceManager(), ADC);
+ bool IsMethod = Match.getNodeAs<ObjCMethodDecl>(IsMethodBind) != nullptr;
+ const char *Name = IsMethod ? "method" : "function";
+
+ BR.EmitBasicReport(
+ ADC->getDecl(), Checker,
+ /*Name=*/(llvm::Twine(ActionMsg)
+ + " autoreleasing out parameter inside autorelease pool").str(),
+ /*Category=*/"Memory",
+ (llvm::Twine(ActionMsg) + " autoreleasing out parameter " +
+ (IsCapture ? "'" + PVD->getName() + "'" + " " : "") + "inside " +
+ "autorelease pool that may exit before " + Name + " returns; consider "
+ "writing first to a strong local variable declared outside of the block")
+ .str(),
+ Location,
+ Range);
+}
+
+void ObjCAutoreleaseWriteChecker::checkASTCodeBody(const Decl *D,
+ AnalysisManager &AM,
+ BugReporter &BR) const {
+
+ auto DoublePointerParamM =
+ parmVarDecl(hasType(hasCanonicalType(pointerType(
+ pointee(hasCanonicalType(objcObjectPointerType()))))))
+ .bind(ParamBind);
+
+ auto ReferencedParamM =
+ declRefExpr(to(parmVarDecl(DoublePointerParamM))).bind(CapturedBind);
+
+ // Write into a binded object, e.g. *ParamBind = X.
+ auto WritesIntoM = binaryOperator(
+ hasLHS(unaryOperator(
+ hasOperatorName("*"),
+ hasUnaryOperand(
+ ignoringParenImpCasts(ReferencedParamM))
+ )),
+ hasOperatorName("=")
+ ).bind(ProblematicWriteBind);
+
+ auto ArgumentCaptureM = hasAnyArgument(
+ ignoringParenImpCasts(ReferencedParamM));
+ auto CapturedInParamM = stmt(anyOf(
+ callExpr(ArgumentCaptureM),
+ objcMessageExpr(ArgumentCaptureM)));
+
+ // WritesIntoM happens inside a block passed as an argument.
+ auto WritesOrCapturesInBlockM = hasAnyArgument(allOf(
+ hasType(hasCanonicalType(blockPointerType())),
+ forEachDescendant(
+ stmt(anyOf(WritesIntoM, CapturedInParamM))
+ )));
+
+ auto BlockPassedToMarkedFuncM = stmt(anyOf(
+ callExpr(allOf(
+ callsNames(FunctionsWithAutoreleasingPool), WritesOrCapturesInBlockM)),
+ objcMessageExpr(allOf(
+ hasAnySelector(toRefs(SelectorsWithAutoreleasingPool)),
+ WritesOrCapturesInBlockM))
+ ));
+
+ auto HasParamAndWritesInMarkedFuncM = allOf(
+ hasAnyParameter(DoublePointerParamM),
+ forEachDescendant(BlockPassedToMarkedFuncM));
+
+ auto MatcherM = decl(anyOf(
+ objcMethodDecl(HasParamAndWritesInMarkedFuncM).bind(IsMethodBind),
+ functionDecl(HasParamAndWritesInMarkedFuncM),
+ blockDecl(HasParamAndWritesInMarkedFuncM)));
+
+ auto Matches = match(MatcherM, *D, AM.getASTContext());
+ for (BoundNodes Match : Matches)
+ emitDiagnostics(Match, D, BR, AM, this);
+}
+
+void ento::registerAutoreleaseWriteChecker(CheckerManager &Mgr) {
+ Mgr.registerChecker<ObjCAutoreleaseWriteChecker>();
+}
diff --git a/lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp b/lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp
index 58ebf72660b6..fb05ca630b45 100644
--- a/lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp
@@ -39,7 +39,7 @@ class ObjCContainersChecker : public Checker< check::PreStmt<CallExpr>,
}
inline SymbolRef getArraySym(const Expr *E, CheckerContext &C) const {
- SVal ArrayRef = C.getState()->getSVal(E, C.getLocationContext());
+ SVal ArrayRef = C.getSVal(E);
SymbolRef ArraySym = ArrayRef.getAsSymbol();
return ArraySym;
}
@@ -66,13 +66,13 @@ REGISTER_MAP_WITH_PROGRAMSTATE(ArraySizeMap, SymbolRef, DefinedSVal)
void ObjCContainersChecker::addSizeInfo(const Expr *Array, const Expr *Size,
CheckerContext &C) const {
ProgramStateRef State = C.getState();
- SVal SizeV = State->getSVal(Size, C.getLocationContext());
+ SVal SizeV = C.getSVal(Size);
// Undefined is reported by another checker.
if (SizeV.isUnknownOrUndef())
return;
// Get the ArrayRef symbol.
- SVal ArrayRef = State->getSVal(Array, C.getLocationContext());
+ SVal ArrayRef = C.getSVal(Array);
SymbolRef ArraySym = ArrayRef.getAsSymbol();
if (!ArraySym)
return;
@@ -128,7 +128,7 @@ void ObjCContainersChecker::checkPreStmt(const CallExpr *CE,
// Get the index.
const Expr *IdxExpr = CE->getArg(1);
- SVal IdxVal = State->getSVal(IdxExpr, C.getLocationContext());
+ SVal IdxVal = C.getSVal(IdxExpr);
if (IdxVal.isUnknownOrUndef())
return;
DefinedSVal Idx = IdxVal.castAs<DefinedSVal>();
diff --git a/lib/StaticAnalyzer/Checkers/ObjCMissingSuperCallChecker.cpp b/lib/StaticAnalyzer/Checkers/ObjCMissingSuperCallChecker.cpp
index 32a1adb587bf..d01c6ae6e093 100644
--- a/lib/StaticAnalyzer/Checkers/ObjCMissingSuperCallChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/ObjCMissingSuperCallChecker.cpp
@@ -81,7 +81,7 @@ private:
}
-/// \brief Determine whether the given class has a superclass that we want
+/// Determine whether the given class has a superclass that we want
/// to check. The name of the found superclass is stored in SuperclassName.
///
/// \param D The declaration to check for superclasses.
diff --git a/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp b/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp
index ffa3a2700616..629520437369 100644
--- a/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp
@@ -86,11 +86,11 @@ public:
namespace {
enum SelfFlagEnum {
- /// \brief No flag set.
+ /// No flag set.
SelfFlag_None = 0x0,
- /// \brief Value came from 'self'.
+ /// Value came from 'self'.
SelfFlag_Self = 0x1,
- /// \brief Value came from the result of an initializer (e.g. [super init]).
+ /// Value came from the result of an initializer (e.g. [super init]).
SelfFlag_InitRes = 0x2
};
}
@@ -98,7 +98,7 @@ enum SelfFlagEnum {
REGISTER_MAP_WITH_PROGRAMSTATE(SelfFlag, SymbolRef, unsigned)
REGISTER_TRAIT_WITH_PROGRAMSTATE(CalledInit, bool)
-/// \brief A call receiving a reference to 'self' invalidates the object that
+/// A call receiving a reference to 'self' invalidates the object that
/// 'self' contains. This keeps the "self flags" assigned to the 'self'
/// object before the call so we can assign them to the new object that 'self'
/// points to after the call.
@@ -128,11 +128,11 @@ static bool hasSelfFlag(SVal val, SelfFlagEnum flag, CheckerContext &C) {
return getSelfFlags(val, C) & flag;
}
-/// \brief Returns true of the value of the expression is the object that 'self'
+/// Returns true of the value of the expression is the object that 'self'
/// points to and is an object that did not come from the result of calling
/// an initializer.
static bool isInvalidSelf(const Expr *E, CheckerContext &C) {
- SVal exprVal = C.getState()->getSVal(E, C.getLocationContext());
+ SVal exprVal = C.getSVal(E);
if (!hasSelfFlag(exprVal, SelfFlag_Self, C))
return false; // value did not come from 'self'.
if (hasSelfFlag(exprVal, SelfFlag_InitRes, C))
@@ -183,7 +183,7 @@ void ObjCSelfInitChecker::checkPostObjCMessage(const ObjCMethodCall &Msg,
// value out when we return from this method.
state = state->set<CalledInit>(true);
- SVal V = state->getSVal(Msg.getOriginExpr(), C.getLocationContext());
+ SVal V = C.getSVal(Msg.getOriginExpr());
addSelfFlag(state, V, SelfFlag_InitRes, C);
return;
}
@@ -407,7 +407,7 @@ static bool shouldRunOnFunctionOrMethod(const NamedDecl *ND) {
return ID != nullptr;
}
-/// \brief Returns true if the location is 'self'.
+/// Returns true if the location is 'self'.
static bool isSelfVar(SVal location, CheckerContext &C) {
AnalysisDeclContext *analCtx = C.getCurrentAnalysisDeclContext();
if (!analCtx->getSelfDecl())
diff --git a/lib/StaticAnalyzer/Checkers/ObjCSuperDeallocChecker.cpp b/lib/StaticAnalyzer/Checkers/ObjCSuperDeallocChecker.cpp
index 69b19a785938..fcba3b33f3e0 100644
--- a/lib/StaticAnalyzer/Checkers/ObjCSuperDeallocChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/ObjCSuperDeallocChecker.cpp
@@ -62,9 +62,7 @@ private:
REGISTER_SET_WITH_PROGRAMSTATE(CalledSuperDealloc, SymbolRef)
namespace {
-class SuperDeallocBRVisitor final
- : public BugReporterVisitorImpl<SuperDeallocBRVisitor> {
-
+class SuperDeallocBRVisitor final : public BugReporterVisitor {
SymbolRef ReceiverSymbol;
bool Satisfied;
diff --git a/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp b/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp
index 6c0c53dd64cb..f69f3492edb1 100644
--- a/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp
@@ -67,7 +67,7 @@ public:
visitor.TraverseDecl(const_cast<TranslationUnitDecl *>(TUD));
}
- /// \brief Look for records of overly padded types. If padding *
+ /// Look for records of overly padded types. If padding *
/// PadMultiplier exceeds AllowedPad, then generate a report.
/// PadMultiplier is used to share code with the array padding
/// checker.
@@ -97,7 +97,7 @@ public:
reportRecord(RD, BaselinePad, OptimalPad, OptimalFieldsOrder);
}
- /// \brief Look for arrays of overly padded types. If the padding of the
+ /// Look for arrays of overly padded types. If the padding of the
/// array type exceeds AllowedPad, then generate a report.
void visitVariable(const VarDecl *VD) const {
const ArrayType *ArrTy = VD->getType()->getAsArrayTypeUnsafe();
@@ -237,7 +237,7 @@ public:
};
std::transform(RD->field_begin(), RD->field_end(),
std::back_inserter(Fields), GatherSizesAndAlignments);
- std::sort(Fields.begin(), Fields.end());
+ llvm::sort(Fields.begin(), Fields.end());
// This lets us skip over vptrs and non-virtual bases,
// so that we can just worry about the fields in our object.
// Note that this does cause us to miss some cases where we
diff --git a/lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp b/lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp
index 8caf6df4d970..63f82b275ba2 100644
--- a/lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp
@@ -154,8 +154,7 @@ void PointerArithChecker::reportPointerArithMisuse(const Expr *E,
return;
ProgramStateRef State = C.getState();
- const MemRegion *Region =
- State->getSVal(E, C.getLocationContext()).getAsRegion();
+ const MemRegion *Region = C.getSVal(E).getAsRegion();
if (!Region)
return;
if (PointedNeeded)
@@ -227,7 +226,7 @@ void PointerArithChecker::checkPostStmt(const CallExpr *CE,
if (AllocFunctions.count(FunI) == 0)
return;
- SVal SV = State->getSVal(CE, C.getLocationContext());
+ SVal SV = C.getSVal(CE);
const MemRegion *Region = SV.getAsRegion();
if (!Region)
return;
@@ -248,7 +247,7 @@ void PointerArithChecker::checkPostStmt(const CXXNewExpr *NE,
AllocKind Kind = getKindOfNewOp(NE, FD);
ProgramStateRef State = C.getState();
- SVal AllocedVal = State->getSVal(NE, C.getLocationContext());
+ SVal AllocedVal = C.getSVal(NE);
const MemRegion *Region = AllocedVal.getAsRegion();
if (!Region)
return;
@@ -263,7 +262,7 @@ void PointerArithChecker::checkPostStmt(const CastExpr *CE,
const Expr *CastedExpr = CE->getSubExpr();
ProgramStateRef State = C.getState();
- SVal CastedVal = State->getSVal(CastedExpr, C.getLocationContext());
+ SVal CastedVal = C.getSVal(CastedExpr);
const MemRegion *Region = CastedVal.getAsRegion();
if (!Region)
@@ -281,7 +280,7 @@ void PointerArithChecker::checkPreStmt(const CastExpr *CE,
const Expr *CastedExpr = CE->getSubExpr();
ProgramStateRef State = C.getState();
- SVal CastedVal = State->getSVal(CastedExpr, C.getLocationContext());
+ SVal CastedVal = C.getSVal(CastedExpr);
const MemRegion *Region = CastedVal.getAsRegion();
if (!Region)
@@ -304,12 +303,15 @@ void PointerArithChecker::checkPreStmt(const UnaryOperator *UOp,
void PointerArithChecker::checkPreStmt(const ArraySubscriptExpr *SubsExpr,
CheckerContext &C) const {
- ProgramStateRef State = C.getState();
- SVal Idx = State->getSVal(SubsExpr->getIdx(), C.getLocationContext());
+ SVal Idx = C.getSVal(SubsExpr->getIdx());
// Indexing with 0 is OK.
if (Idx.isZeroConstant())
return;
+
+ // Indexing vector-type expressions is also OK.
+ if (SubsExpr->getBase()->getType()->isVectorType())
+ return;
reportPointerArithMisuse(SubsExpr->getBase(), C);
}
@@ -324,14 +326,14 @@ void PointerArithChecker::checkPreStmt(const BinaryOperator *BOp,
ProgramStateRef State = C.getState();
if (Rhs->getType()->isIntegerType() && Lhs->getType()->isPointerType()) {
- SVal RHSVal = State->getSVal(Rhs, C.getLocationContext());
+ SVal RHSVal = C.getSVal(Rhs);
if (State->isNull(RHSVal).isConstrainedTrue())
return;
reportPointerArithMisuse(Lhs, C, !BOp->isAdditiveOp());
}
// The int += ptr; case is not valid C++.
if (Lhs->getType()->isIntegerType() && Rhs->getType()->isPointerType()) {
- SVal LHSVal = State->getSVal(Lhs, C.getLocationContext());
+ SVal LHSVal = C.getSVal(Lhs);
if (State->isNull(LHSVal).isConstrainedTrue())
return;
reportPointerArithMisuse(Rhs, C);
diff --git a/lib/StaticAnalyzer/Checkers/PointerSubChecker.cpp b/lib/StaticAnalyzer/Checkers/PointerSubChecker.cpp
index 2d33ebc2610d..9aa5348e4c34 100644
--- a/lib/StaticAnalyzer/Checkers/PointerSubChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/PointerSubChecker.cpp
@@ -39,10 +39,8 @@ void PointerSubChecker::checkPreStmt(const BinaryOperator *B,
if (B->getOpcode() != BO_Sub)
return;
- ProgramStateRef state = C.getState();
- const LocationContext *LCtx = C.getLocationContext();
- SVal LV = state->getSVal(B->getLHS(), LCtx);
- SVal RV = state->getSVal(B->getRHS(), LCtx);
+ SVal LV = C.getSVal(B->getLHS());
+ SVal RV = C.getSVal(B->getRHS());
const MemRegion *LR = LV.getAsRegion();
const MemRegion *RR = RV.getAsRegion();
diff --git a/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp b/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp
index dab29be1c8fb..10ab952e069b 100644
--- a/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp
@@ -109,8 +109,6 @@ REGISTER_MAP_WITH_PROGRAMSTATE(DestroyRetVal, const MemRegion *, SymbolRef)
void PthreadLockChecker::checkPostStmt(const CallExpr *CE,
CheckerContext &C) const {
- ProgramStateRef state = C.getState();
- const LocationContext *LCtx = C.getLocationContext();
StringRef FName = C.getCalleeName(CE);
if (FName.empty())
return;
@@ -121,34 +119,31 @@ void PthreadLockChecker::checkPostStmt(const CallExpr *CE,
if (FName == "pthread_mutex_lock" ||
FName == "pthread_rwlock_rdlock" ||
FName == "pthread_rwlock_wrlock")
- AcquireLock(C, CE, state->getSVal(CE->getArg(0), LCtx),
- false, PthreadSemantics);
+ AcquireLock(C, CE, C.getSVal(CE->getArg(0)), false, PthreadSemantics);
else if (FName == "lck_mtx_lock" ||
FName == "lck_rw_lock_exclusive" ||
FName == "lck_rw_lock_shared")
- AcquireLock(C, CE, state->getSVal(CE->getArg(0), LCtx),
- false, XNUSemantics);
+ AcquireLock(C, CE, C.getSVal(CE->getArg(0)), false, XNUSemantics);
else if (FName == "pthread_mutex_trylock" ||
FName == "pthread_rwlock_tryrdlock" ||
FName == "pthread_rwlock_trywrlock")
- AcquireLock(C, CE, state->getSVal(CE->getArg(0), LCtx),
+ AcquireLock(C, CE, C.getSVal(CE->getArg(0)),
true, PthreadSemantics);
else if (FName == "lck_mtx_try_lock" ||
FName == "lck_rw_try_lock_exclusive" ||
FName == "lck_rw_try_lock_shared")
- AcquireLock(C, CE, state->getSVal(CE->getArg(0), LCtx),
- true, XNUSemantics);
+ AcquireLock(C, CE, C.getSVal(CE->getArg(0)), true, XNUSemantics);
else if (FName == "pthread_mutex_unlock" ||
FName == "pthread_rwlock_unlock" ||
FName == "lck_mtx_unlock" ||
FName == "lck_rw_done")
- ReleaseLock(C, CE, state->getSVal(CE->getArg(0), LCtx));
+ ReleaseLock(C, CE, C.getSVal(CE->getArg(0)));
else if (FName == "pthread_mutex_destroy")
- DestroyLock(C, CE, state->getSVal(CE->getArg(0), LCtx), PthreadSemantics);
+ DestroyLock(C, CE, C.getSVal(CE->getArg(0)), PthreadSemantics);
else if (FName == "lck_mtx_destroy")
- DestroyLock(C, CE, state->getSVal(CE->getArg(0), LCtx), XNUSemantics);
+ DestroyLock(C, CE, C.getSVal(CE->getArg(0)), XNUSemantics);
else if (FName == "pthread_mutex_init")
- InitLock(C, CE, state->getSVal(CE->getArg(0), LCtx));
+ InitLock(C, CE, C.getSVal(CE->getArg(0)));
}
// When a lock is destroyed, in some semantics(like PthreadSemantics) we are not
@@ -232,7 +227,7 @@ void PthreadLockChecker::AcquireLock(CheckerContext &C, const CallExpr *CE,
if (sym)
state = resolvePossiblyDestroyedMutex(state, lockR, sym);
- SVal X = state->getSVal(CE, C.getLocationContext());
+ SVal X = C.getSVal(CE);
if (X.isUnknownOrUndef())
return;
diff --git a/lib/StaticAnalyzer/Checkers/RetainCountChecker.cpp b/lib/StaticAnalyzer/Checkers/RetainCountChecker.cpp
index e47494a3e90b..2c1e139330d6 100644
--- a/lib/StaticAnalyzer/Checkers/RetainCountChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/RetainCountChecker.cpp
@@ -555,7 +555,7 @@ public:
}
const RetainSummary *find(IdentifierInfo* II, Selector S) {
- // FIXME: Class method lookup. Right now we dont' have a good way
+ // FIXME: Class method lookup. Right now we don't have a good way
// of going between IdentifierInfo* and the class hierarchy.
MapTy::iterator I = M.find(ObjCSummaryKey(II, S));
@@ -883,21 +883,22 @@ RetainSummaryManager::getPersistentSummary(const RetainSummary &OldSumm) {
//===----------------------------------------------------------------------===//
static bool isRetain(const FunctionDecl *FD, StringRef FName) {
- return FName.endswith("Retain");
+ return FName.startswith_lower("retain") || FName.endswith_lower("retain");
}
static bool isRelease(const FunctionDecl *FD, StringRef FName) {
- return FName.endswith("Release");
+ return FName.startswith_lower("release") || FName.endswith_lower("release");
}
static bool isAutorelease(const FunctionDecl *FD, StringRef FName) {
- return FName.endswith("Autorelease");
+ return FName.startswith_lower("autorelease") ||
+ FName.endswith_lower("autorelease");
}
static bool isMakeCollectable(const FunctionDecl *FD, StringRef FName) {
// FIXME: Remove FunctionDecl parameter.
// FIXME: Is it really okay if MakeCollectable isn't a suffix?
- return FName.find("MakeCollectable") != StringRef::npos;
+ return FName.find_lower("MakeCollectable") != StringRef::npos;
}
static ArgEffect getStopTrackingHardEquivalent(ArgEffect E) {
@@ -1787,8 +1788,7 @@ namespace {
//===---------===//
// Bug Reports. //
//===---------===//
-
- class CFRefReportVisitor : public BugReporterVisitorImpl<CFRefReportVisitor> {
+ class CFRefReportVisitor : public BugReporterVisitor {
protected:
SymbolRef Sym;
const SummaryLogTy &SummaryLog;
@@ -1809,7 +1809,7 @@ namespace {
BugReporterContext &BRC,
BugReport &BR) override;
- std::unique_ptr<PathDiagnosticPiece> getEndPath(BugReporterContext &BRC,
+ std::shared_ptr<PathDiagnosticPiece> getEndPath(BugReporterContext &BRC,
const ExplodedNode *N,
BugReport &BR) override;
};
@@ -1820,18 +1820,9 @@ namespace {
const SummaryLogTy &log)
: CFRefReportVisitor(sym, GCEnabled, log) {}
- std::unique_ptr<PathDiagnosticPiece> getEndPath(BugReporterContext &BRC,
+ std::shared_ptr<PathDiagnosticPiece> getEndPath(BugReporterContext &BRC,
const ExplodedNode *N,
BugReport &BR) override;
-
- std::unique_ptr<BugReporterVisitor> clone() const override {
- // The curiously-recurring template pattern only works for one level of
- // subclassing. Rather than make a new template base for
- // CFRefReportVisitor, we simply override clone() to do the right thing.
- // This could be trouble someday if BugReporterVisitorImpl is ever
- // used for something else besides a convenient implementation of clone().
- return llvm::make_unique<CFRefLeakReportVisitor>(*this);
- }
};
class CFRefReport : public BugReport {
@@ -1929,6 +1920,14 @@ static bool isNumericLiteralExpression(const Expr *E) {
isa<CXXBoolLiteralExpr>(E);
}
+static Optional<std::string> describeRegion(const MemRegion *MR) {
+ if (const auto *VR = dyn_cast_or_null<VarRegion>(MR))
+ return std::string(VR->getDecl()->getName());
+ // Once we support more storage locations for bindings,
+ // this would need to be improved.
+ return None;
+}
+
/// Returns true if this stack frame is for an Objective-C method that is a
/// property getter or setter whose body has been synthesized by the analyzer.
static bool isSynthesizedAccessor(const StackFrameContext *SFC) {
@@ -1969,8 +1968,8 @@ CFRefReportVisitor::VisitNode(const ExplodedNode *N, const ExplodedNode *PrevN,
const Stmt *S = N->getLocation().castAs<StmtPoint>().getStmt();
if (isa<ObjCIvarRefExpr>(S) &&
- isSynthesizedAccessor(LCtx->getCurrentStackFrame())) {
- S = LCtx->getCurrentStackFrame()->getCallSite();
+ isSynthesizedAccessor(LCtx->getStackFrame())) {
+ S = LCtx->getStackFrame()->getCallSite();
}
if (isa<ObjCArrayLiteral>(S)) {
@@ -2298,7 +2297,7 @@ GetAllocationSite(ProgramStateManager& StateMgr, const ExplodedNode *N,
const VarRegion *VR = R->getBaseRegion()->getAs<VarRegion>();
// Do not show local variables belonging to a function other than
// where the error is reported.
- if (!VR || VR->getStackFrame() == LeakContext->getCurrentStackFrame())
+ if (!VR || VR->getStackFrame() == LeakContext->getStackFrame())
FirstBinding = R;
}
@@ -2356,14 +2355,14 @@ GetAllocationSite(ProgramStateManager& StateMgr, const ExplodedNode *N,
InterestingMethodContext);
}
-std::unique_ptr<PathDiagnosticPiece>
+std::shared_ptr<PathDiagnosticPiece>
CFRefReportVisitor::getEndPath(BugReporterContext &BRC,
const ExplodedNode *EndN, BugReport &BR) {
BR.markInteresting(Sym);
return BugReporterVisitor::getDefaultEndPath(BRC, EndN, BR);
}
-std::unique_ptr<PathDiagnosticPiece>
+std::shared_ptr<PathDiagnosticPiece>
CFRefLeakReportVisitor::getEndPath(BugReporterContext &BRC,
const ExplodedNode *EndN, BugReport &BR) {
@@ -2393,9 +2392,9 @@ CFRefLeakReportVisitor::getEndPath(BugReporterContext &BRC,
os << "Object leaked: ";
- if (FirstBinding) {
- os << "object allocated and stored into '"
- << FirstBinding->getString() << '\'';
+ Optional<std::string> RegionDescription = describeRegion(FirstBinding);
+ if (RegionDescription) {
+ os << "object allocated and stored into '" << *RegionDescription << '\'';
}
else
os << "allocated object";
@@ -2450,7 +2449,7 @@ CFRefLeakReportVisitor::getEndPath(BugReporterContext &BRC,
os << " is not referenced later in this execution path and has a retain "
"count of +" << RV->getCount();
- return llvm::make_unique<PathDiagnosticEventPiece>(L, os.str());
+ return std::make_shared<PathDiagnosticEventPiece>(L, os.str());
}
void CFRefLeakReport::deriveParamLocation(CheckerContext &Ctx, SymbolRef sym) {
@@ -2513,7 +2512,8 @@ void CFRefLeakReport::deriveAllocLocation(CheckerContext &Ctx,SymbolRef sym) {
UniqueingDecl = AllocNode->getLocationContext()->getDecl();
}
-void CFRefLeakReport::createDescription(CheckerContext &Ctx, bool GCEnabled, bool IncludeAllocationLine) {
+void CFRefLeakReport::createDescription(CheckerContext &Ctx, bool GCEnabled,
+ bool IncludeAllocationLine) {
assert(Location.isValid() && UniqueingDecl && UniqueingLocation.isValid());
Description.clear();
llvm::raw_string_ostream os(Description);
@@ -2522,8 +2522,9 @@ void CFRefLeakReport::createDescription(CheckerContext &Ctx, bool GCEnabled, boo
os << "(when using garbage collection) ";
os << "of an object";
- if (AllocBinding) {
- os << " stored into '" << AllocBinding->getString() << '\'';
+ Optional<std::string> RegionDescription = describeRegion(AllocBinding);
+ if (RegionDescription) {
+ os << " stored into '" << *RegionDescription << '\'';
if (IncludeAllocationLine) {
FullSourceLoc SL(AllocStmt->getLocStart(), Ctx.getSourceManager());
os << " (allocated on line " << SL.getSpellingLineNumber() << ")";
@@ -2742,7 +2743,7 @@ public:
void checkDeadSymbols(SymbolReaper &SymReaper, CheckerContext &C) const;
void checkBeginFunction(CheckerContext &C) const;
- void checkEndFunction(CheckerContext &C) const;
+ void checkEndFunction(const ReturnStmt *RS, CheckerContext &C) const;
ProgramStateRef updateSymbol(ProgramStateRef state, SymbolRef sym,
RefVal V, ArgEffect E, RefVal::Kind &hasErr,
@@ -2799,9 +2800,7 @@ void RetainCountChecker::checkPostStmt(const BlockExpr *BE,
return;
ProgramStateRef state = C.getState();
- const BlockDataRegion *R =
- cast<BlockDataRegion>(state->getSVal(BE,
- C.getLocationContext()).getAsRegion());
+ auto *R = cast<BlockDataRegion>(C.getSVal(BE).getAsRegion());
BlockDataRegion::referenced_vars_iterator I = R->referenced_vars_begin(),
E = R->referenced_vars_end();
@@ -2851,7 +2850,7 @@ void RetainCountChecker::checkPostStmt(const CastExpr *CE,
}
ProgramStateRef state = C.getState();
- SymbolRef Sym = state->getSVal(CE, C.getLocationContext()).getAsLocSymbol();
+ SymbolRef Sym = C.getSVal(CE).getAsLocSymbol();
if (!Sym)
return;
const RefVal* T = getRefBinding(state, Sym);
@@ -2874,7 +2873,7 @@ void RetainCountChecker::processObjCLiterals(CheckerContext &C,
ProgramStateRef state = C.getState();
const ExplodedNode *pred = C.getPredecessor();
for (const Stmt *Child : Ex->children()) {
- SVal V = state->getSVal(Child, pred->getLocationContext());
+ SVal V = pred->getSVal(Child);
if (SymbolRef sym = V.getAsSymbol())
if (const RefVal* T = getRefBinding(state, sym)) {
RefVal::Kind hasErr = (RefVal::Kind) 0;
@@ -2913,10 +2912,9 @@ void RetainCountChecker::checkPostStmt(const ObjCDictionaryLiteral *DL,
void RetainCountChecker::checkPostStmt(const ObjCBoxedExpr *Ex,
CheckerContext &C) const {
const ExplodedNode *Pred = C.getPredecessor();
- const LocationContext *LCtx = Pred->getLocationContext();
ProgramStateRef State = Pred->getState();
- if (SymbolRef Sym = State->getSVal(Ex, LCtx).getAsSymbol()) {
+ if (SymbolRef Sym = Pred->getSVal(Ex).getAsSymbol()) {
QualType ResultTy = Ex->getType();
State = setRefBinding(State, Sym,
RefVal::makeNotOwned(RetEffect::ObjC, ResultTy));
@@ -3993,7 +3991,8 @@ void RetainCountChecker::checkBeginFunction(CheckerContext &Ctx) const {
Ctx.addTransition(state);
}
-void RetainCountChecker::checkEndFunction(CheckerContext &Ctx) const {
+void RetainCountChecker::checkEndFunction(const ReturnStmt *RS,
+ CheckerContext &Ctx) const {
ProgramStateRef state = Ctx.getState();
RefBindingsTy B = state->get<RefBindings>();
ExplodedNode *Pred = Ctx.getPredecessor();
diff --git a/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp b/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp
index 19fa0fb193cc..1952715a9b7c 100644
--- a/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp
@@ -40,7 +40,7 @@ void ReturnPointerRangeChecker::checkPreStmt(const ReturnStmt *RS,
if (!RetE)
return;
- SVal V = state->getSVal(RetE, C.getLocationContext());
+ SVal V = C.getSVal(RetE);
const MemRegion *R = V.getAsRegion();
const ElementRegion *ER = dyn_cast_or_null<ElementRegion>(R);
diff --git a/lib/StaticAnalyzer/Checkers/RunLoopAutoreleaseLeakChecker.cpp b/lib/StaticAnalyzer/Checkers/RunLoopAutoreleaseLeakChecker.cpp
new file mode 100644
index 000000000000..64b61a0213d2
--- /dev/null
+++ b/lib/StaticAnalyzer/Checkers/RunLoopAutoreleaseLeakChecker.cpp
@@ -0,0 +1,217 @@
+//=- RunLoopAutoreleaseLeakChecker.cpp --------------------------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//
+//===----------------------------------------------------------------------===//
+//
+// A checker for detecting leaks resulting from allocating temporary
+// autoreleased objects before starting the main run loop.
+//
+// Checks for two antipatterns:
+// 1. ObjCMessageExpr followed by [[NSRunLoop mainRunLoop] run] in the same
+// autorelease pool.
+// 2. ObjCMessageExpr followed by [[NSRunLoop mainRunLoop] run] in no
+// autorelease pool.
+//
+// Any temporary objects autoreleased in code called in those expressions
+// will not be deallocated until the program exits, and are effectively leaks.
+//
+//===----------------------------------------------------------------------===//
+//
+
+#include "ClangSACheckers.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/ASTMatchers/ASTMatchFinder.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+
+using namespace clang;
+using namespace ento;
+using namespace ast_matchers;
+
+namespace {
+
+const char * RunLoopBind = "NSRunLoopM";
+const char * RunLoopRunBind = "RunLoopRunM";
+const char * OtherMsgBind = "OtherMessageSentM";
+const char * AutoreleasePoolBind = "AutoreleasePoolM";
+
+class RunLoopAutoreleaseLeakChecker : public Checker<
+ check::ASTCodeBody> {
+
+public:
+ void checkASTCodeBody(const Decl *D,
+ AnalysisManager &AM,
+ BugReporter &BR) const;
+
+};
+
+} // end anonymous namespace
+
+
+using TriBoolTy = Optional<bool>;
+using MemoizationMapTy = llvm::DenseMap<const Stmt *, Optional<TriBoolTy>>;
+
+static TriBoolTy
+seenBeforeRec(const Stmt *Parent, const Stmt *A, const Stmt *B,
+ MemoizationMapTy &Memoization) {
+ for (const Stmt *C : Parent->children()) {
+ if (C == A)
+ return true;
+
+ if (C == B)
+ return false;
+
+ Optional<TriBoolTy> &Cached = Memoization[C];
+ if (!Cached)
+ Cached = seenBeforeRec(C, A, B, Memoization);
+
+ if (Cached->hasValue())
+ return Cached->getValue();
+ }
+
+ return None;
+}
+
+/// \return Whether {@code A} occurs before {@code B} in traversal of
+/// {@code Parent}.
+/// Conceptually a very incomplete/unsound approximation of happens-before
+/// relationship (A is likely to be evaluated before B),
+/// but useful enough in this case.
+static bool seenBefore(const Stmt *Parent, const Stmt *A, const Stmt *B) {
+ MemoizationMapTy Memoization;
+ TriBoolTy Val = seenBeforeRec(Parent, A, B, Memoization);
+ return Val.getValue();
+}
+
+static void emitDiagnostics(BoundNodes &Match,
+ const Decl *D,
+ BugReporter &BR,
+ AnalysisManager &AM,
+ const RunLoopAutoreleaseLeakChecker *Checker) {
+
+ assert(D->hasBody());
+ const Stmt *DeclBody = D->getBody();
+
+ AnalysisDeclContext *ADC = AM.getAnalysisDeclContext(D);
+
+ const auto *ME = Match.getNodeAs<ObjCMessageExpr>(OtherMsgBind);
+ assert(ME);
+
+ const auto *AP =
+ Match.getNodeAs<ObjCAutoreleasePoolStmt>(AutoreleasePoolBind);
+ bool HasAutoreleasePool = (AP != nullptr);
+
+ const auto *RL = Match.getNodeAs<ObjCMessageExpr>(RunLoopBind);
+ const auto *RLR = Match.getNodeAs<Stmt>(RunLoopRunBind);
+ assert(RLR && "Run loop launch not found");
+
+ assert(ME != RLR);
+ if (HasAutoreleasePool && seenBefore(AP, RLR, ME))
+ return;
+
+ if (!HasAutoreleasePool && seenBefore(DeclBody, RLR, ME))
+ return;
+
+ PathDiagnosticLocation Location = PathDiagnosticLocation::createBegin(
+ ME, BR.getSourceManager(), ADC);
+ SourceRange Range = ME->getSourceRange();
+
+ BR.EmitBasicReport(ADC->getDecl(), Checker,
+ /*Name=*/"Memory leak inside autorelease pool",
+ /*Category=*/"Memory",
+ /*Name=*/
+ (Twine("Temporary objects allocated in the") +
+ " autorelease pool " +
+ (HasAutoreleasePool ? "" : "of last resort ") +
+ "followed by the launch of " +
+ (RL ? "main run loop " : "xpc_main ") +
+ "may never get released; consider moving them to a "
+ "separate autorelease pool")
+ .str(),
+ Location, Range);
+}
+
+static StatementMatcher getRunLoopRunM(StatementMatcher Extra = anything()) {
+ StatementMatcher MainRunLoopM =
+ objcMessageExpr(hasSelector("mainRunLoop"),
+ hasReceiverType(asString("NSRunLoop")),
+ Extra)
+ .bind(RunLoopBind);
+
+ StatementMatcher MainRunLoopRunM = objcMessageExpr(hasSelector("run"),
+ hasReceiver(MainRunLoopM),
+ Extra).bind(RunLoopRunBind);
+
+ StatementMatcher XPCRunM =
+ callExpr(callee(functionDecl(hasName("xpc_main")))).bind(RunLoopRunBind);
+ return anyOf(MainRunLoopRunM, XPCRunM);
+}
+
+static StatementMatcher getOtherMessageSentM(StatementMatcher Extra = anything()) {
+ return objcMessageExpr(unless(anyOf(equalsBoundNode(RunLoopBind),
+ equalsBoundNode(RunLoopRunBind))),
+ Extra)
+ .bind(OtherMsgBind);
+}
+
+static void
+checkTempObjectsInSamePool(const Decl *D, AnalysisManager &AM, BugReporter &BR,
+ const RunLoopAutoreleaseLeakChecker *Chkr) {
+ StatementMatcher RunLoopRunM = getRunLoopRunM();
+ StatementMatcher OtherMessageSentM = getOtherMessageSentM();
+
+ StatementMatcher RunLoopInAutorelease =
+ autoreleasePoolStmt(
+ hasDescendant(RunLoopRunM),
+ hasDescendant(OtherMessageSentM)).bind(AutoreleasePoolBind);
+
+ DeclarationMatcher GroupM = decl(hasDescendant(RunLoopInAutorelease));
+
+ auto Matches = match(GroupM, *D, AM.getASTContext());
+ for (BoundNodes Match : Matches)
+ emitDiagnostics(Match, D, BR, AM, Chkr);
+}
+
+static void
+checkTempObjectsInNoPool(const Decl *D, AnalysisManager &AM, BugReporter &BR,
+ const RunLoopAutoreleaseLeakChecker *Chkr) {
+
+ auto NoPoolM = unless(hasAncestor(autoreleasePoolStmt()));
+
+ StatementMatcher RunLoopRunM = getRunLoopRunM(NoPoolM);
+ StatementMatcher OtherMessageSentM = getOtherMessageSentM(NoPoolM);
+
+ DeclarationMatcher GroupM = functionDecl(
+ isMain(),
+ hasDescendant(RunLoopRunM),
+ hasDescendant(OtherMessageSentM)
+ );
+
+ auto Matches = match(GroupM, *D, AM.getASTContext());
+
+ for (BoundNodes Match : Matches)
+ emitDiagnostics(Match, D, BR, AM, Chkr);
+
+}
+
+void RunLoopAutoreleaseLeakChecker::checkASTCodeBody(const Decl *D,
+ AnalysisManager &AM,
+ BugReporter &BR) const {
+ checkTempObjectsInSamePool(D, AM, BR, this);
+ checkTempObjectsInNoPool(D, AM, BR, this);
+}
+
+void ento::registerRunLoopAutoreleaseLeakChecker(CheckerManager &mgr) {
+ mgr.registerChecker<RunLoopAutoreleaseLeakChecker>();
+}
diff --git a/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp b/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp
index 25975628c553..feae9e59b343 100644
--- a/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp
@@ -47,7 +47,7 @@ public:
void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
void checkPreStmt(const ReturnStmt *RS, CheckerContext &C) const;
- void checkEndFunction(CheckerContext &Ctx) const;
+ void checkEndFunction(const ReturnStmt *RS, CheckerContext &Ctx) const;
private:
void checkReturnedBlockCaptures(const BlockDataRegion &B,
@@ -120,7 +120,7 @@ bool StackAddrEscapeChecker::isArcManagedBlock(const MemRegion *R,
bool StackAddrEscapeChecker::isNotInCurrentFrame(const MemRegion *R,
CheckerContext &C) {
const StackSpaceRegion *S = cast<StackSpaceRegion>(R->getMemorySpace());
- return S->getStackFrame() != C.getLocationContext()->getCurrentStackFrame();
+ return S->getStackFrame() != C.getStackFrame();
}
bool StackAddrEscapeChecker::isSemaphoreCaptured(const BlockDecl &B) const {
@@ -255,8 +255,7 @@ void StackAddrEscapeChecker::checkPreStmt(const ReturnStmt *RS,
return;
RetE = RetE->IgnoreParens();
- const LocationContext *LCtx = C.getLocationContext();
- SVal V = C.getState()->getSVal(RetE, LCtx);
+ SVal V = C.getSVal(RetE);
const MemRegion *R = V.getAsRegion();
if (!R)
return;
@@ -288,7 +287,8 @@ void StackAddrEscapeChecker::checkPreStmt(const ReturnStmt *RS,
EmitStackError(C, R, RetE);
}
-void StackAddrEscapeChecker::checkEndFunction(CheckerContext &Ctx) const {
+void StackAddrEscapeChecker::checkEndFunction(const ReturnStmt *RS,
+ CheckerContext &Ctx) const {
if (!ChecksEnabled[CK_StackAddrEscapeChecker])
return;
@@ -304,8 +304,7 @@ void StackAddrEscapeChecker::checkEndFunction(CheckerContext &Ctx) const {
public:
SmallVector<std::pair<const MemRegion *, const MemRegion *>, 10> V;
- CallBack(CheckerContext &CC)
- : Ctx(CC), CurSFC(CC.getLocationContext()->getCurrentStackFrame()) {}
+ CallBack(CheckerContext &CC) : Ctx(CC), CurSFC(CC.getStackFrame()) {}
bool HandleBinding(StoreManager &SMgr, Store S, const MemRegion *Region,
SVal Val) override {
diff --git a/lib/StaticAnalyzer/Checkers/StreamChecker.cpp b/lib/StaticAnalyzer/Checkers/StreamChecker.cpp
index 915514b42133..d77975559e3f 100644
--- a/lib/StaticAnalyzer/Checkers/StreamChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/StreamChecker.cpp
@@ -242,22 +242,19 @@ void StreamChecker::Fclose(CheckerContext &C, const CallExpr *CE) const {
void StreamChecker::Fread(CheckerContext &C, const CallExpr *CE) const {
ProgramStateRef state = C.getState();
- if (!CheckNullStream(state->getSVal(CE->getArg(3), C.getLocationContext()),
- state, C))
+ if (!CheckNullStream(C.getSVal(CE->getArg(3)), state, C))
return;
}
void StreamChecker::Fwrite(CheckerContext &C, const CallExpr *CE) const {
ProgramStateRef state = C.getState();
- if (!CheckNullStream(state->getSVal(CE->getArg(3), C.getLocationContext()),
- state, C))
+ if (!CheckNullStream(C.getSVal(CE->getArg(3)), state, C))
return;
}
void StreamChecker::Fseek(CheckerContext &C, const CallExpr *CE) const {
ProgramStateRef state = C.getState();
- if (!(state = CheckNullStream(state->getSVal(CE->getArg(0),
- C.getLocationContext()), state, C)))
+ if (!(state = CheckNullStream(C.getSVal(CE->getArg(0)), state, C)))
return;
// Check the legality of the 'whence' argument of 'fseek'.
SVal Whence = state->getSVal(CE->getArg(2), C.getLocationContext());
@@ -283,57 +280,49 @@ void StreamChecker::Fseek(CheckerContext &C, const CallExpr *CE) const {
void StreamChecker::Ftell(CheckerContext &C, const CallExpr *CE) const {
ProgramStateRef state = C.getState();
- if (!CheckNullStream(state->getSVal(CE->getArg(0), C.getLocationContext()),
- state, C))
+ if (!CheckNullStream(C.getSVal(CE->getArg(0)), state, C))
return;
}
void StreamChecker::Rewind(CheckerContext &C, const CallExpr *CE) const {
ProgramStateRef state = C.getState();
- if (!CheckNullStream(state->getSVal(CE->getArg(0), C.getLocationContext()),
- state, C))
+ if (!CheckNullStream(C.getSVal(CE->getArg(0)), state, C))
return;
}
void StreamChecker::Fgetpos(CheckerContext &C, const CallExpr *CE) const {
ProgramStateRef state = C.getState();
- if (!CheckNullStream(state->getSVal(CE->getArg(0), C.getLocationContext()),
- state, C))
+ if (!CheckNullStream(C.getSVal(CE->getArg(0)), state, C))
return;
}
void StreamChecker::Fsetpos(CheckerContext &C, const CallExpr *CE) const {
ProgramStateRef state = C.getState();
- if (!CheckNullStream(state->getSVal(CE->getArg(0), C.getLocationContext()),
- state, C))
+ if (!CheckNullStream(C.getSVal(CE->getArg(0)), state, C))
return;
}
void StreamChecker::Clearerr(CheckerContext &C, const CallExpr *CE) const {
ProgramStateRef state = C.getState();
- if (!CheckNullStream(state->getSVal(CE->getArg(0), C.getLocationContext()),
- state, C))
+ if (!CheckNullStream(C.getSVal(CE->getArg(0)), state, C))
return;
}
void StreamChecker::Feof(CheckerContext &C, const CallExpr *CE) const {
ProgramStateRef state = C.getState();
- if (!CheckNullStream(state->getSVal(CE->getArg(0), C.getLocationContext()),
- state, C))
+ if (!CheckNullStream(C.getSVal(CE->getArg(0)), state, C))
return;
}
void StreamChecker::Ferror(CheckerContext &C, const CallExpr *CE) const {
ProgramStateRef state = C.getState();
- if (!CheckNullStream(state->getSVal(CE->getArg(0), C.getLocationContext()),
- state, C))
+ if (!CheckNullStream(C.getSVal(CE->getArg(0)), state, C))
return;
}
void StreamChecker::Fileno(CheckerContext &C, const CallExpr *CE) const {
ProgramStateRef state = C.getState();
- if (!CheckNullStream(state->getSVal(CE->getArg(0), C.getLocationContext()),
- state, C))
+ if (!CheckNullStream(C.getSVal(CE->getArg(0)), state, C))
return;
}
@@ -363,8 +352,7 @@ ProgramStateRef StreamChecker::CheckNullStream(SVal SV, ProgramStateRef state,
ProgramStateRef StreamChecker::CheckDoubleClose(const CallExpr *CE,
ProgramStateRef state,
CheckerContext &C) const {
- SymbolRef Sym =
- state->getSVal(CE->getArg(0), C.getLocationContext()).getAsSymbol();
+ SymbolRef Sym = C.getSVal(CE->getArg(0)).getAsSymbol();
if (!Sym)
return state;
diff --git a/lib/StaticAnalyzer/Checkers/TestAfterDivZeroChecker.cpp b/lib/StaticAnalyzer/Checkers/TestAfterDivZeroChecker.cpp
index 5268bbf5562e..f4c0edbab3f0 100644
--- a/lib/StaticAnalyzer/Checkers/TestAfterDivZeroChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/TestAfterDivZeroChecker.cpp
@@ -55,7 +55,7 @@ public:
}
};
-class DivisionBRVisitor : public BugReporterVisitorImpl<DivisionBRVisitor> {
+class DivisionBRVisitor : public BugReporterVisitor {
private:
SymbolRef ZeroSymbol;
const StackFrameContext *SFC;
@@ -85,7 +85,7 @@ class TestAfterDivZeroChecker
public:
void checkPreStmt(const BinaryOperator *B, CheckerContext &C) const;
void checkBranchCondition(const Stmt *Condition, CheckerContext &C) const;
- void checkEndFunction(CheckerContext &C) const;
+ void checkEndFunction(const ReturnStmt *RS, CheckerContext &C) const;
void setDivZeroMap(SVal Var, CheckerContext &C) const;
bool hasDivZeroMap(SVal Var, const CheckerContext &C) const;
bool isZero(SVal S, CheckerContext &C) const;
@@ -114,8 +114,7 @@ DivisionBRVisitor::VisitNode(const ExplodedNode *Succ, const ExplodedNode *Pred,
if (!E)
return nullptr;
- ProgramStateRef State = Succ->getState();
- SVal S = State->getSVal(E, Succ->getLocationContext());
+ SVal S = Succ->getSVal(E);
if (ZeroSymbol == S.getAsSymbol() && SFC == Succ->getStackFrame()) {
Satisfied = true;
@@ -181,7 +180,8 @@ void TestAfterDivZeroChecker::reportBug(SVal Val, CheckerContext &C) const {
}
}
-void TestAfterDivZeroChecker::checkEndFunction(CheckerContext &C) const {
+void TestAfterDivZeroChecker::checkEndFunction(const ReturnStmt *RS,
+ CheckerContext &C) const {
ProgramStateRef State = C.getState();
DivZeroMapTy DivZeroes = State->get<DivZeroMap>();
diff --git a/lib/StaticAnalyzer/Checkers/TraversalChecker.cpp b/lib/StaticAnalyzer/Checkers/TraversalChecker.cpp
index 8ad962875b06..ee185b813611 100644
--- a/lib/StaticAnalyzer/Checkers/TraversalChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/TraversalChecker.cpp
@@ -30,7 +30,7 @@ class TraversalDumper : public Checker< check::BranchCondition,
public:
void checkBranchCondition(const Stmt *Condition, CheckerContext &C) const;
void checkBeginFunction(CheckerContext &C) const;
- void checkEndFunction(CheckerContext &C) const;
+ void checkEndFunction(const ReturnStmt *RS, CheckerContext &C) const;
};
}
@@ -56,7 +56,8 @@ void TraversalDumper::checkBeginFunction(CheckerContext &C) const {
llvm::outs() << "--BEGIN FUNCTION--\n";
}
-void TraversalDumper::checkEndFunction(CheckerContext &C) const {
+void TraversalDumper::checkEndFunction(const ReturnStmt *RS,
+ CheckerContext &C) const {
llvm::outs() << "--END FUNCTION--\n";
}
diff --git a/lib/StaticAnalyzer/Checkers/TrustNonnullChecker.cpp b/lib/StaticAnalyzer/Checkers/TrustNonnullChecker.cpp
new file mode 100644
index 000000000000..f3d68014224d
--- /dev/null
+++ b/lib/StaticAnalyzer/Checkers/TrustNonnullChecker.cpp
@@ -0,0 +1,90 @@
+//== TrustNonnullChecker.cpp - Checker for trusting annotations -*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This checker adds an assumption that methods annotated with _Nonnull
+// which come from system headers actually return a non-null pointer.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerHelpers.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+
+class TrustNonnullChecker : public Checker<check::PostCall> {
+private:
+ /// \returns Whether we trust the result of the method call to be
+ /// a non-null pointer.
+ bool isNonNullPtr(const CallEvent &Call, CheckerContext &C) const {
+ QualType ExprRetType = Call.getResultType();
+ if (!ExprRetType->isAnyPointerType())
+ return false;
+
+ if (getNullabilityAnnotation(ExprRetType) == Nullability::Nonnull)
+ return true;
+
+ // The logic for ObjC instance method calls is more complicated,
+ // as the return value is nil when the receiver is nil.
+ if (!isa<ObjCMethodCall>(&Call))
+ return false;
+
+ const auto *MCall = cast<ObjCMethodCall>(&Call);
+ const ObjCMethodDecl *MD = MCall->getDecl();
+
+ // Distrust protocols.
+ if (isa<ObjCProtocolDecl>(MD->getDeclContext()))
+ return false;
+
+ QualType DeclRetType = MD->getReturnType();
+ if (getNullabilityAnnotation(DeclRetType) != Nullability::Nonnull)
+ return false;
+
+ // For class messages it is sufficient for the declaration to be
+ // annotated _Nonnull.
+ if (!MCall->isInstanceMessage())
+ return true;
+
+ // Alternatively, the analyzer could know that the receiver is not null.
+ SVal Receiver = MCall->getReceiverSVal();
+ ConditionTruthVal TV = C.getState()->isNonNull(Receiver);
+ if (TV.isConstrainedTrue())
+ return true;
+
+ return false;
+ }
+
+public:
+ void checkPostCall(const CallEvent &Call, CheckerContext &C) const {
+ // Only trust annotations for system headers for non-protocols.
+ if (!Call.isInSystemHeader())
+ return;
+
+ ProgramStateRef State = C.getState();
+
+ if (isNonNullPtr(Call, C))
+ if (auto L = Call.getReturnValue().getAs<Loc>())
+ State = State->assume(*L, /*Assumption=*/true);
+
+ C.addTransition(State);
+ }
+};
+
+} // end empty namespace
+
+
+void ento::registerTrustNonnullChecker(CheckerManager &Mgr) {
+ Mgr.registerChecker<TrustNonnullChecker>();
+}
diff --git a/lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp b/lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp
index 0a274292aa39..934ee63318fa 100644
--- a/lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp
@@ -59,7 +59,7 @@ public:
void UndefBranchChecker::checkBranchCondition(const Stmt *Condition,
CheckerContext &Ctx) const {
- SVal X = Ctx.getState()->getSVal(Condition, Ctx.getLocationContext());
+ SVal X = Ctx.getSVal(Condition);
if (X.isUndef()) {
// Generate a sink node, which implicitly marks both outgoing branches as
// infeasible.
diff --git a/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp b/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp
index 17fe8610da06..6a93c10c7644 100644
--- a/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp
@@ -55,9 +55,7 @@ UndefCapturedBlockVarChecker::checkPostStmt(const BlockExpr *BE,
return;
ProgramStateRef state = C.getState();
- const BlockDataRegion *R =
- cast<BlockDataRegion>(state->getSVal(BE,
- C.getLocationContext()).getAsRegion());
+ auto *R = cast<BlockDataRegion>(C.getSVal(BE).getAsRegion());
BlockDataRegion::referenced_vars_iterator I = R->referenced_vars_begin(),
E = R->referenced_vars_end();
diff --git a/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp b/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp
index 172ce346f1ba..b9a93bedca2e 100644
--- a/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp
@@ -37,12 +37,11 @@ public:
static bool isArrayIndexOutOfBounds(CheckerContext &C, const Expr *Ex) {
ProgramStateRef state = C.getState();
- const LocationContext *LCtx = C.getLocationContext();
if (!isa<ArraySubscriptExpr>(Ex))
return false;
- SVal Loc = state->getSVal(Ex, LCtx);
+ SVal Loc = C.getSVal(Ex);
if (!Loc.isValid())
return false;
@@ -64,11 +63,18 @@ static bool isShiftOverflow(const BinaryOperator *B, CheckerContext &C) {
B->getRHS(), C.getASTContext().getIntWidth(B->getLHS()->getType()));
}
+static bool isLeftShiftResultUnrepresentable(const BinaryOperator *B,
+ CheckerContext &C) {
+ SValBuilder &SB = C.getSValBuilder();
+ ProgramStateRef State = C.getState();
+ const llvm::APSInt *LHS = SB.getKnownValue(State, C.getSVal(B->getLHS()));
+ const llvm::APSInt *RHS = SB.getKnownValue(State, C.getSVal(B->getRHS()));
+ return (unsigned)RHS->getZExtValue() > LHS->countLeadingZeros();
+}
+
void UndefResultChecker::checkPostStmt(const BinaryOperator *B,
CheckerContext &C) const {
- ProgramStateRef state = C.getState();
- const LocationContext *LCtx = C.getLocationContext();
- if (state->getSVal(B, LCtx).isUndef()) {
+ if (C.getSVal(B).isUndef()) {
// Do not report assignments of uninitialized values inside swap functions.
// This should allow to swap partially uninitialized structs
@@ -92,11 +98,11 @@ void UndefResultChecker::checkPostStmt(const BinaryOperator *B,
const Expr *Ex = nullptr;
bool isLeft = true;
- if (state->getSVal(B->getLHS(), LCtx).isUndef()) {
+ if (C.getSVal(B->getLHS()).isUndef()) {
Ex = B->getLHS()->IgnoreParenCasts();
isLeft = true;
}
- else if (state->getSVal(B->getRHS(), LCtx).isUndef()) {
+ else if (C.getSVal(B->getRHS()).isUndef()) {
Ex = B->getRHS()->IgnoreParenCasts();
isLeft = false;
}
@@ -141,6 +147,19 @@ void UndefResultChecker::checkPostStmt(const BinaryOperator *B,
C.isNegative(B->getLHS())) {
OS << "The result of the left shift is undefined because the left "
"operand is negative";
+ } else if (B->getOpcode() == BinaryOperatorKind::BO_Shl &&
+ isLeftShiftResultUnrepresentable(B, C)) {
+ ProgramStateRef State = C.getState();
+ SValBuilder &SB = C.getSValBuilder();
+ const llvm::APSInt *LHS =
+ SB.getKnownValue(State, C.getSVal(B->getLHS()));
+ const llvm::APSInt *RHS =
+ SB.getKnownValue(State, C.getSVal(B->getRHS()));
+ OS << "The result of the left shift is undefined due to shifting \'"
+ << LHS->getSExtValue() << "\' by \'" << RHS->getZExtValue()
+ << "\', which is unrepresentable in the unsigned version of "
+ << "the return type \'" << B->getLHS()->getType().getAsString()
+ << "\'";
} else {
OS << "The result of the '"
<< BinaryOperator::getOpcodeStr(B->getOpcode())
diff --git a/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp b/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp
index c3dcf1fac197..2ef6855ba6b7 100644
--- a/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp
@@ -51,17 +51,20 @@ void UndefinedAssignmentChecker::checkBind(SVal location, SVal val,
if (!N)
return;
- const char *str = "Assigned value is garbage or undefined";
-
+ static const char *const DefaultMsg =
+ "Assigned value is garbage or undefined";
if (!BT)
- BT.reset(new BuiltinBug(this, str));
+ BT.reset(new BuiltinBug(this, DefaultMsg));
// Generate a report for this bug.
+ llvm::SmallString<128> Str;
+ llvm::raw_svector_ostream OS(Str);
+
const Expr *ex = nullptr;
while (StoreE) {
if (const UnaryOperator *U = dyn_cast<UnaryOperator>(StoreE)) {
- str = "The expression is an uninitialized value. "
+ OS << "The expression is an uninitialized value. "
"The computed value will also be garbage";
ex = U->getSubExpr();
@@ -70,9 +73,8 @@ void UndefinedAssignmentChecker::checkBind(SVal location, SVal val,
if (const BinaryOperator *B = dyn_cast<BinaryOperator>(StoreE)) {
if (B->isCompoundAssignmentOp()) {
- ProgramStateRef state = C.getState();
- if (state->getSVal(B->getLHS(), C.getLocationContext()).isUndef()) {
- str = "The left expression of the compound assignment is an "
+ if (C.getSVal(B->getLHS()).isUndef()) {
+ OS << "The left expression of the compound assignment is an "
"uninitialized value. The computed value will also be garbage";
ex = B->getLHS();
break;
@@ -88,10 +90,26 @@ void UndefinedAssignmentChecker::checkBind(SVal location, SVal val,
ex = VD->getInit();
}
+ if (const auto *CD =
+ dyn_cast<CXXConstructorDecl>(C.getStackFrame()->getDecl())) {
+ if (CD->isImplicit()) {
+ for (auto I : CD->inits()) {
+ if (I->getInit()->IgnoreImpCasts() == StoreE) {
+ OS << "Value assigned to field '" << I->getMember()->getName()
+ << "' in implicit constructor is garbage or undefined";
+ break;
+ }
+ }
+ }
+ }
+
break;
}
- auto R = llvm::make_unique<BugReport>(*BT, str, N);
+ if (OS.str().empty())
+ OS << DefaultMsg;
+
+ auto R = llvm::make_unique<BugReport>(*BT, OS.str(), N);
if (ex) {
R->addRange(ex->getSourceRange());
bugreporter::trackNullOrUndefValue(N, ex, *R);
diff --git a/lib/StaticAnalyzer/Checkers/UninitializedObjectChecker.cpp b/lib/StaticAnalyzer/Checkers/UninitializedObjectChecker.cpp
new file mode 100644
index 000000000000..398228a9d887
--- /dev/null
+++ b/lib/StaticAnalyzer/Checkers/UninitializedObjectChecker.cpp
@@ -0,0 +1,688 @@
+//===----- UninitializedObjectChecker.cpp ------------------------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a checker that reports uninitialized fields in objects
+// created after a constructor call.
+//
+// This checker has two options:
+// - "Pedantic" (boolean). If its not set or is set to false, the checker
+// won't emit warnings for objects that don't have at least one initialized
+// field. This may be set with
+//
+// `-analyzer-config alpha.cplusplus.UninitializedObject:Pedantic=true`.
+//
+// - "NotesAsWarnings" (boolean). If set to true, the checker will emit a
+// warning for each uninitalized field, as opposed to emitting one warning
+// per constructor call, and listing the uninitialized fields that belongs
+// to it in notes. Defaults to false.
+//
+// `-analyzer-config alpha.cplusplus.UninitializedObject:NotesAsWarnings=true`.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include <algorithm>
+
+using namespace clang;
+using namespace clang::ento;
+
+namespace {
+
+class UninitializedObjectChecker : public Checker<check::EndFunction> {
+ std::unique_ptr<BuiltinBug> BT_uninitField;
+
+public:
+ // These fields will be initialized when registering the checker.
+ bool IsPedantic;
+ bool ShouldConvertNotesToWarnings;
+
+ UninitializedObjectChecker()
+ : BT_uninitField(new BuiltinBug(this, "Uninitialized fields")) {}
+ void checkEndFunction(const ReturnStmt *RS, CheckerContext &C) const;
+};
+
+/// Represents a field chain. A field chain is a vector of fields where the
+/// first element of the chain is the object under checking (not stored), and
+/// every other element is a field, and the element that precedes it is the
+/// object that contains it.
+///
+/// Note that this class is immutable, and new fields may only be added through
+/// constructor calls.
+class FieldChainInfo {
+ using FieldChain = llvm::ImmutableList<const FieldRegion *>;
+
+ FieldChain Chain;
+
+ const bool IsDereferenced = false;
+
+public:
+ FieldChainInfo() = default;
+
+ FieldChainInfo(const FieldChainInfo &Other, const bool IsDereferenced)
+ : Chain(Other.Chain), IsDereferenced(IsDereferenced) {}
+
+ FieldChainInfo(const FieldChainInfo &Other, const FieldRegion *FR,
+ const bool IsDereferenced = false);
+
+ bool contains(const FieldRegion *FR) const { return Chain.contains(FR); }
+ bool isPointer() const;
+
+ /// If this is a fieldchain whose last element is an uninitialized region of a
+ /// pointer type, `IsDereferenced` will store whether the pointer itself or
+ /// the pointee is uninitialized.
+ bool isDereferenced() const;
+ const FieldDecl *getEndOfChain() const;
+ void print(llvm::raw_ostream &Out) const;
+
+private:
+ /// Prints every element except the last to `Out`. Since ImmutableLists store
+ /// elements in reverse order, and have no reverse iterators, we use a
+ /// recursive function to print the fieldchain correctly. The last element in
+ /// the chain is to be printed by `print`.
+ static void printTail(llvm::raw_ostream &Out,
+ const llvm::ImmutableListImpl<const FieldRegion *> *L);
+ friend struct FieldChainInfoComparator;
+};
+
+struct FieldChainInfoComparator {
+ bool operator()(const FieldChainInfo &lhs, const FieldChainInfo &rhs) const {
+ assert(!lhs.Chain.isEmpty() && !rhs.Chain.isEmpty() &&
+ "Attempted to store an empty fieldchain!");
+ return *lhs.Chain.begin() < *rhs.Chain.begin();
+ }
+};
+
+using UninitFieldSet = std::set<FieldChainInfo, FieldChainInfoComparator>;
+
+/// Searches for and stores uninitialized fields in a non-union object.
+class FindUninitializedFields {
+ ProgramStateRef State;
+ const TypedValueRegion *const ObjectR;
+
+ const bool IsPedantic;
+ bool IsAnyFieldInitialized = false;
+
+ UninitFieldSet UninitFields;
+
+public:
+ FindUninitializedFields(ProgramStateRef State,
+ const TypedValueRegion *const R, bool IsPedantic);
+ const UninitFieldSet &getUninitFields();
+
+private:
+ /// Adds a FieldChainInfo object to UninitFields. Return true if an insertion
+ /// took place.
+ bool addFieldToUninits(FieldChainInfo LocalChain);
+
+ // For the purposes of this checker, we'll regard the object under checking as
+ // a directed tree, where
+ // * the root is the object under checking
+ // * every node is an object that is
+ // - a union
+ // - a non-union record
+ // - a pointer/reference
+ // - an array
+ // - of a primitive type, which we'll define later in a helper function.
+ // * the parent of each node is the object that contains it
+ // * every leaf is an array, a primitive object, a nullptr or an undefined
+ // pointer.
+ //
+ // Example:
+ //
+ // struct A {
+ // struct B {
+ // int x, y = 0;
+ // };
+ // B b;
+ // int *iptr = new int;
+ // B* bptr;
+ //
+ // A() {}
+ // };
+ //
+ // The directed tree:
+ //
+ // ->x
+ // /
+ // ->b--->y
+ // /
+ // A-->iptr->(int value)
+ // \
+ // ->bptr
+ //
+ // From this we'll construct a vector of fieldchains, where each fieldchain
+ // represents an uninitialized field. An uninitialized field may be a
+ // primitive object, a pointer, a pointee or a union without a single
+ // initialized field.
+ // In the above example, for the default constructor call we'll end up with
+ // these fieldchains:
+ //
+ // this->b.x
+ // this->iptr (pointee uninit)
+ // this->bptr (pointer uninit)
+ //
+ // We'll traverse each node of the above graph with the appropiate one of
+ // these methods:
+
+ /// This method checks a region of a union object, and returns true if no
+ /// field is initialized within the region.
+ bool isUnionUninit(const TypedValueRegion *R);
+
+ /// This method checks a region of a non-union object, and returns true if
+ /// an uninitialized field is found within the region.
+ bool isNonUnionUninit(const TypedValueRegion *R, FieldChainInfo LocalChain);
+
+ /// This method checks a region of a pointer or reference object, and returns
+ /// true if the ptr/ref object itself or any field within the pointee's region
+ /// is uninitialized.
+ bool isPointerOrReferenceUninit(const FieldRegion *FR,
+ FieldChainInfo LocalChain);
+
+ /// This method returns true if the value of a primitive object is
+ /// uninitialized.
+ bool isPrimitiveUninit(const SVal &V);
+
+ // Note that we don't have a method for arrays -- the elements of an array are
+ // often left uninitialized intentionally even when it is of a C++ record
+ // type, so we'll assume that an array is always initialized.
+ // TODO: Add a support for nonloc::LocAsInteger.
+};
+
+} // end of anonymous namespace
+
+// Static variable instantionations.
+
+static llvm::ImmutableListFactory<const FieldRegion *> Factory;
+
+// Utility function declarations.
+
+/// Returns the object that was constructed by CtorDecl, or None if that isn't
+/// possible.
+static Optional<nonloc::LazyCompoundVal>
+getObjectVal(const CXXConstructorDecl *CtorDecl, CheckerContext &Context);
+
+/// Checks whether the constructor under checking is called by another
+/// constructor.
+static bool isCalledByConstructor(const CheckerContext &Context);
+
+/// Returns whether FD can be (transitively) dereferenced to a void pointer type
+/// (void*, void**, ...). The type of the region behind a void pointer isn't
+/// known, and thus FD can not be analyzed.
+static bool isVoidPointer(const FieldDecl *FD);
+
+/// Returns true if T is a primitive type. We defined this type so that for
+/// objects that we'd only like analyze as much as checking whether their
+/// value is undefined or not, such as ints and doubles, can be analyzed with
+/// ease. This also helps ensuring that every special field type is handled
+/// correctly.
+static bool isPrimitiveType(const QualType &T) {
+ return T->isBuiltinType() || T->isEnumeralType() || T->isMemberPointerType();
+}
+
+/// Constructs a note message for a given FieldChainInfo object.
+static void printNoteMessage(llvm::raw_ostream &Out,
+ const FieldChainInfo &Chain);
+
+/// Returns with Field's name. This is a helper function to get the correct name
+/// even if Field is a captured lambda variable.
+static StringRef getVariableName(const FieldDecl *Field);
+
+//===----------------------------------------------------------------------===//
+// Methods for UninitializedObjectChecker.
+//===----------------------------------------------------------------------===//
+
+void UninitializedObjectChecker::checkEndFunction(
+ const ReturnStmt *RS, CheckerContext &Context) const {
+
+ const auto *CtorDecl = dyn_cast_or_null<CXXConstructorDecl>(
+ Context.getLocationContext()->getDecl());
+ if (!CtorDecl)
+ return;
+
+ if (!CtorDecl->isUserProvided())
+ return;
+
+ if (CtorDecl->getParent()->isUnion())
+ return;
+
+ // This avoids essentially the same error being reported multiple times.
+ if (isCalledByConstructor(Context))
+ return;
+
+ Optional<nonloc::LazyCompoundVal> Object = getObjectVal(CtorDecl, Context);
+ if (!Object)
+ return;
+
+ FindUninitializedFields F(Context.getState(), Object->getRegion(),
+ IsPedantic);
+
+ const UninitFieldSet &UninitFields = F.getUninitFields();
+
+ if (UninitFields.empty())
+ return;
+
+ // There are uninitialized fields in the record.
+
+ ExplodedNode *Node = Context.generateNonFatalErrorNode(Context.getState());
+ if (!Node)
+ return;
+
+ PathDiagnosticLocation LocUsedForUniqueing;
+ const Stmt *CallSite = Context.getStackFrame()->getCallSite();
+ if (CallSite)
+ LocUsedForUniqueing = PathDiagnosticLocation::createBegin(
+ CallSite, Context.getSourceManager(), Node->getLocationContext());
+
+ // For Plist consumers that don't support notes just yet, we'll convert notes
+ // to warnings.
+ if (ShouldConvertNotesToWarnings) {
+ for (const auto &Chain : UninitFields) {
+ SmallString<100> WarningBuf;
+ llvm::raw_svector_ostream WarningOS(WarningBuf);
+
+ printNoteMessage(WarningOS, Chain);
+
+ auto Report = llvm::make_unique<BugReport>(
+ *BT_uninitField, WarningOS.str(), Node, LocUsedForUniqueing,
+ Node->getLocationContext()->getDecl());
+ Context.emitReport(std::move(Report));
+ }
+ return;
+ }
+
+ SmallString<100> WarningBuf;
+ llvm::raw_svector_ostream WarningOS(WarningBuf);
+ WarningOS << UninitFields.size() << " uninitialized field"
+ << (UninitFields.size() == 1 ? "" : "s")
+ << " at the end of the constructor call";
+
+ auto Report = llvm::make_unique<BugReport>(
+ *BT_uninitField, WarningOS.str(), Node, LocUsedForUniqueing,
+ Node->getLocationContext()->getDecl());
+
+ for (const auto &Chain : UninitFields) {
+ SmallString<200> NoteBuf;
+ llvm::raw_svector_ostream NoteOS(NoteBuf);
+
+ printNoteMessage(NoteOS, Chain);
+
+ Report->addNote(NoteOS.str(),
+ PathDiagnosticLocation::create(Chain.getEndOfChain(),
+ Context.getSourceManager()));
+ }
+ Context.emitReport(std::move(Report));
+}
+
+//===----------------------------------------------------------------------===//
+// Methods for FindUninitializedFields.
+//===----------------------------------------------------------------------===//
+
+FindUninitializedFields::FindUninitializedFields(
+ ProgramStateRef State, const TypedValueRegion *const R, bool IsPedantic)
+ : State(State), ObjectR(R), IsPedantic(IsPedantic) {}
+
+const UninitFieldSet &FindUninitializedFields::getUninitFields() {
+ isNonUnionUninit(ObjectR, FieldChainInfo());
+
+ if (!IsPedantic && !IsAnyFieldInitialized)
+ UninitFields.clear();
+
+ return UninitFields;
+}
+
+bool FindUninitializedFields::addFieldToUninits(FieldChainInfo Chain) {
+ if (State->getStateManager().getContext().getSourceManager().isInSystemHeader(
+ Chain.getEndOfChain()->getLocation()))
+ return false;
+
+ return UninitFields.insert(Chain).second;
+}
+
+bool FindUninitializedFields::isNonUnionUninit(const TypedValueRegion *R,
+ FieldChainInfo LocalChain) {
+ assert(R->getValueType()->isRecordType() &&
+ !R->getValueType()->isUnionType() &&
+ "This method only checks non-union record objects!");
+
+ const RecordDecl *RD =
+ R->getValueType()->getAs<RecordType>()->getDecl()->getDefinition();
+ assert(RD && "Referred record has no definition");
+
+ bool ContainsUninitField = false;
+
+ // Are all of this non-union's fields initialized?
+ for (const FieldDecl *I : RD->fields()) {
+
+ const auto FieldVal =
+ State->getLValue(I, loc::MemRegionVal(R)).castAs<loc::MemRegionVal>();
+ const auto *FR = FieldVal.getRegionAs<FieldRegion>();
+ QualType T = I->getType();
+
+ // If LocalChain already contains FR, then we encountered a cyclic
+ // reference. In this case, region FR is already under checking at an
+ // earlier node in the directed tree.
+ if (LocalChain.contains(FR))
+ return false;
+
+ if (T->isStructureOrClassType()) {
+ if (isNonUnionUninit(FR, {LocalChain, FR}))
+ ContainsUninitField = true;
+ continue;
+ }
+
+ if (T->isUnionType()) {
+ if (isUnionUninit(FR)) {
+ if (addFieldToUninits({LocalChain, FR}))
+ ContainsUninitField = true;
+ } else
+ IsAnyFieldInitialized = true;
+ continue;
+ }
+
+ if (T->isArrayType()) {
+ IsAnyFieldInitialized = true;
+ continue;
+ }
+
+ if (T->isPointerType() || T->isReferenceType()) {
+ if (isPointerOrReferenceUninit(FR, LocalChain))
+ ContainsUninitField = true;
+ continue;
+ }
+
+ if (isPrimitiveType(T)) {
+ SVal V = State->getSVal(FieldVal);
+
+ if (isPrimitiveUninit(V)) {
+ if (addFieldToUninits({LocalChain, FR}))
+ ContainsUninitField = true;
+ }
+ continue;
+ }
+
+ llvm_unreachable("All cases are handled!");
+ }
+
+ // Checking bases.
+ // FIXME: As of now, because of `isCalledByConstructor`, objects whose type
+ // is a descendant of another type will emit warnings for uninitalized
+ // inherited members.
+ // This is not the only way to analyze bases of an object -- if we didn't
+ // filter them out, and didn't analyze the bases, this checker would run for
+ // each base of the object in order of base initailization and in theory would
+ // find every uninitalized field. This approach could also make handling
+ // diamond inheritances more easily.
+ //
+ // This rule (that a descendant type's cunstructor is responsible for
+ // initializing inherited data members) is not obvious, and should it should
+ // be.
+ const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD);
+ if (!CXXRD)
+ return ContainsUninitField;
+
+ for (const CXXBaseSpecifier &BaseSpec : CXXRD->bases()) {
+ const auto *BaseRegion = State->getLValue(BaseSpec, R)
+ .castAs<loc::MemRegionVal>()
+ .getRegionAs<TypedValueRegion>();
+
+ if (isNonUnionUninit(BaseRegion, LocalChain))
+ ContainsUninitField = true;
+ }
+
+ return ContainsUninitField;
+}
+
+bool FindUninitializedFields::isUnionUninit(const TypedValueRegion *R) {
+ assert(R->getValueType()->isUnionType() &&
+ "This method only checks union objects!");
+ // TODO: Implement support for union fields.
+ return false;
+}
+
+// Note that pointers/references don't contain fields themselves, so in this
+// function we won't add anything to LocalChain.
+bool FindUninitializedFields::isPointerOrReferenceUninit(
+ const FieldRegion *FR, FieldChainInfo LocalChain) {
+
+ assert((FR->getDecl()->getType()->isPointerType() ||
+ FR->getDecl()->getType()->isReferenceType()) &&
+ "This method only checks pointer/reference objects!");
+
+ SVal V = State->getSVal(FR);
+
+ if (V.isUnknown() || V.isZeroConstant()) {
+ IsAnyFieldInitialized = true;
+ return false;
+ }
+
+ if (V.isUndef()) {
+ return addFieldToUninits({LocalChain, FR});
+ }
+
+ const FieldDecl *FD = FR->getDecl();
+
+ // TODO: The dynamic type of a void pointer may be retrieved with
+ // `getDynamicTypeInfo`.
+ if (isVoidPointer(FD)) {
+ IsAnyFieldInitialized = true;
+ return false;
+ }
+
+ assert(V.getAs<Loc>() && "V should be Loc at this point!");
+
+ // At this point the pointer itself is initialized and points to a valid
+ // location, we'll now check the pointee.
+ SVal DerefdV = State->getSVal(V.castAs<Loc>());
+
+ // TODO: Dereferencing should be done according to the dynamic type.
+ while (Optional<Loc> L = DerefdV.getAs<Loc>()) {
+ DerefdV = State->getSVal(*L);
+ }
+
+ // If V is a pointer pointing to a record type.
+ if (Optional<nonloc::LazyCompoundVal> RecordV =
+ DerefdV.getAs<nonloc::LazyCompoundVal>()) {
+
+ const TypedValueRegion *R = RecordV->getRegion();
+
+ // We can't reason about symbolic regions, assume its initialized.
+ // Note that this also avoids a potential infinite recursion, because
+ // constructors for list-like classes are checked without being called, and
+ // the Static Analyzer will construct a symbolic region for Node *next; or
+ // similar code snippets.
+ if (R->getSymbolicBase()) {
+ IsAnyFieldInitialized = true;
+ return false;
+ }
+
+ const QualType T = R->getValueType();
+
+ if (T->isStructureOrClassType())
+ return isNonUnionUninit(R, {LocalChain, FR});
+
+ if (T->isUnionType()) {
+ if (isUnionUninit(R)) {
+ return addFieldToUninits({LocalChain, FR, /*IsDereferenced*/ true});
+ } else {
+ IsAnyFieldInitialized = true;
+ return false;
+ }
+ }
+
+ if (T->isArrayType()) {
+ IsAnyFieldInitialized = true;
+ return false;
+ }
+
+ llvm_unreachable("All cases are handled!");
+ }
+
+ // TODO: If possible, it should be asserted that the DerefdV at this point is
+ // primitive.
+
+ if (isPrimitiveUninit(DerefdV))
+ return addFieldToUninits({LocalChain, FR, /*IsDereferenced*/ true});
+
+ IsAnyFieldInitialized = true;
+ return false;
+}
+
+bool FindUninitializedFields::isPrimitiveUninit(const SVal &V) {
+ if (V.isUndef())
+ return true;
+
+ IsAnyFieldInitialized = true;
+ return false;
+}
+
+//===----------------------------------------------------------------------===//
+// Methods for FieldChainInfo.
+//===----------------------------------------------------------------------===//
+
+FieldChainInfo::FieldChainInfo(const FieldChainInfo &Other,
+ const FieldRegion *FR, const bool IsDereferenced)
+ : FieldChainInfo(Other, IsDereferenced) {
+ assert(!contains(FR) && "Can't add a field that is already a part of the "
+ "fieldchain! Is this a cyclic reference?");
+ Chain = Factory.add(FR, Other.Chain);
+}
+
+bool FieldChainInfo::isPointer() const {
+ assert(!Chain.isEmpty() && "Empty fieldchain!");
+ return (*Chain.begin())->getDecl()->getType()->isPointerType();
+}
+
+bool FieldChainInfo::isDereferenced() const {
+ assert(isPointer() && "Only pointers may or may not be dereferenced!");
+ return IsDereferenced;
+}
+
+const FieldDecl *FieldChainInfo::getEndOfChain() const {
+ assert(!Chain.isEmpty() && "Empty fieldchain!");
+ return (*Chain.begin())->getDecl();
+}
+
+// TODO: This function constructs an incorrect fieldchain string in the
+// following case:
+//
+// struct Base { int x; };
+// struct D1 : Base {}; struct D2 : Base {};
+//
+// struct MostDerived : D1, D2 {
+// MostDerived() {}
+// }
+//
+// A call to MostDerived::MostDerived() will cause two notes that say
+// "uninitialized field 'this->x'", but we can't refer to 'x' directly,
+// we need an explicit namespace resolution whether the uninit field was
+// 'D1::x' or 'D2::x'.
+void FieldChainInfo::print(llvm::raw_ostream &Out) const {
+ if (Chain.isEmpty())
+ return;
+
+ const llvm::ImmutableListImpl<const FieldRegion *> *L =
+ Chain.getInternalPointer();
+ printTail(Out, L->getTail());
+ Out << getVariableName(L->getHead()->getDecl());
+}
+
+void FieldChainInfo::printTail(
+ llvm::raw_ostream &Out,
+ const llvm::ImmutableListImpl<const FieldRegion *> *L) {
+ if (!L)
+ return;
+
+ printTail(Out, L->getTail());
+ const FieldDecl *Field = L->getHead()->getDecl();
+ Out << getVariableName(Field);
+ Out << (Field->getType()->isPointerType() ? "->" : ".");
+}
+
+//===----------------------------------------------------------------------===//
+// Utility functions.
+//===----------------------------------------------------------------------===//
+
+static bool isVoidPointer(const FieldDecl *FD) {
+ QualType T = FD->getType();
+
+ while (!T.isNull()) {
+ if (T->isVoidPointerType())
+ return true;
+ T = T->getPointeeType();
+ }
+ return false;
+}
+
+static Optional<nonloc::LazyCompoundVal>
+getObjectVal(const CXXConstructorDecl *CtorDecl, CheckerContext &Context) {
+
+ Loc ThisLoc = Context.getSValBuilder().getCXXThis(CtorDecl->getParent(),
+ Context.getStackFrame());
+ // Getting the value for 'this'.
+ SVal This = Context.getState()->getSVal(ThisLoc);
+
+ // Getting the value for '*this'.
+ SVal Object = Context.getState()->getSVal(This.castAs<Loc>());
+
+ return Object.getAs<nonloc::LazyCompoundVal>();
+}
+
+// TODO: We should also check that if the constructor was called by another
+// constructor, whether those two are in any relation to one another. In it's
+// current state, this introduces some false negatives.
+static bool isCalledByConstructor(const CheckerContext &Context) {
+ const LocationContext *LC = Context.getLocationContext()->getParent();
+
+ while (LC) {
+ if (isa<CXXConstructorDecl>(LC->getDecl()))
+ return true;
+
+ LC = LC->getParent();
+ }
+ return false;
+}
+
+static void printNoteMessage(llvm::raw_ostream &Out,
+ const FieldChainInfo &Chain) {
+ if (Chain.isPointer()) {
+ if (Chain.isDereferenced())
+ Out << "uninitialized pointee 'this->";
+ else
+ Out << "uninitialized pointer 'this->";
+ } else
+ Out << "uninitialized field 'this->";
+ Chain.print(Out);
+ Out << "'";
+}
+
+static StringRef getVariableName(const FieldDecl *Field) {
+ // If Field is a captured lambda variable, Field->getName() will return with
+ // an empty string. We can however acquire it's name from the lambda's
+ // captures.
+ const auto *CXXParent = dyn_cast<CXXRecordDecl>(Field->getParent());
+
+ if (CXXParent && CXXParent->isLambda()) {
+ assert(CXXParent->captures_begin());
+ auto It = CXXParent->captures_begin() + Field->getFieldIndex();
+ return It->getCapturedVar()->getName();
+ }
+
+ return Field->getName();
+}
+
+void ento::registerUninitializedObjectChecker(CheckerManager &Mgr) {
+ auto Chk = Mgr.registerChecker<UninitializedObjectChecker>();
+ Chk->IsPedantic = Mgr.getAnalyzerOptions().getBooleanOption(
+ "Pedantic", /*DefaultVal*/ false, Chk);
+ Chk->ShouldConvertNotesToWarnings = Mgr.getAnalyzerOptions().getBooleanOption(
+ "NotesAsWarnings", /*DefaultVal*/ false, Chk);
+}
diff --git a/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp b/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp
index 7f9a00ff876d..a6b50dc37740 100644
--- a/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp
@@ -194,7 +194,7 @@ void UnixAPIChecker::CheckOpenVariant(CheckerContext &C,
// Now check if oflags has O_CREAT set.
const Expr *oflagsEx = CE->getArg(FlagsArgIndex);
- const SVal V = state->getSVal(oflagsEx, C.getLocationContext());
+ const SVal V = C.getSVal(oflagsEx);
if (!V.getAs<NonLoc>()) {
// The case where 'V' can be a location can only be due to a bad header,
// so in this case bail out.
@@ -248,8 +248,7 @@ void UnixAPIChecker::CheckPthreadOnce(CheckerContext &C,
// Check if the first argument is stack allocated. If so, issue a warning
// because that's likely to be bad news.
ProgramStateRef state = C.getState();
- const MemRegion *R =
- state->getSVal(CE->getArg(0), C.getLocationContext()).getAsRegion();
+ const MemRegion *R = C.getSVal(CE->getArg(0)).getAsRegion();
if (!R || !isa<StackSpaceRegion>(R->getMemorySpace()))
return;
@@ -336,7 +335,7 @@ void UnixAPIChecker::BasicAllocationCheck(CheckerContext &C,
ProgramStateRef state = C.getState();
ProgramStateRef trueState = nullptr, falseState = nullptr;
const Expr *arg = CE->getArg(sizeArg);
- SVal argVal = state->getSVal(arg, C.getLocationContext());
+ SVal argVal = C.getSVal(arg);
if (argVal.isUnknownOrUndef())
return;
@@ -364,7 +363,7 @@ void UnixAPIChecker::CheckCallocZero(CheckerContext &C,
unsigned int i;
for (i = 0; i < nArgs; i++) {
const Expr *arg = CE->getArg(i);
- SVal argVal = state->getSVal(arg, C.getLocationContext());
+ SVal argVal = C.getSVal(arg);
if (argVal.isUnknownOrUndef()) {
if (i == 0)
continue;
diff --git a/lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp b/lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp
index 6f21e868b174..dbd12cc9b65a 100644
--- a/lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp
@@ -132,7 +132,8 @@ void UnreachableCodeChecker::checkEndAnalysis(ExplodedGraph &G,
ci != ce; ++ci) {
if (Optional<CFGStmt> S = (*ci).getAs<CFGStmt>())
if (const CallExpr *CE = dyn_cast<CallExpr>(S->getStmt())) {
- if (CE->getBuiltinCallee() == Builtin::BI__builtin_unreachable) {
+ if (CE->getBuiltinCallee() == Builtin::BI__builtin_unreachable ||
+ CE->isBuiltinAssumeFalse(Eng.getContext())) {
foundUnreachable = true;
break;
}
diff --git a/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp b/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp
index 40217bdee892..2584f2011819 100644
--- a/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp
@@ -32,19 +32,18 @@ class VLASizeChecker : public Checker< check::PreStmt<DeclStmt> > {
mutable std::unique_ptr<BugType> BT;
enum VLASize_Kind { VLA_Garbage, VLA_Zero, VLA_Tainted, VLA_Negative };
- void reportBug(VLASize_Kind Kind,
- const Expr *SizeE,
- ProgramStateRef State,
- CheckerContext &C) const;
+ void reportBug(VLASize_Kind Kind, const Expr *SizeE, ProgramStateRef State,
+ CheckerContext &C,
+ std::unique_ptr<BugReporterVisitor> Visitor = nullptr) const;
+
public:
void checkPreStmt(const DeclStmt *DS, CheckerContext &C) const;
};
} // end anonymous namespace
-void VLASizeChecker::reportBug(VLASize_Kind Kind,
- const Expr *SizeE,
- ProgramStateRef State,
- CheckerContext &C) const {
+void VLASizeChecker::reportBug(
+ VLASize_Kind Kind, const Expr *SizeE, ProgramStateRef State,
+ CheckerContext &C, std::unique_ptr<BugReporterVisitor> Visitor) const {
// Generate an error node.
ExplodedNode *N = C.generateErrorNode(State);
if (!N)
@@ -73,6 +72,7 @@ void VLASizeChecker::reportBug(VLASize_Kind Kind,
}
auto report = llvm::make_unique<BugReport>(*BT, os.str(), N);
+ report->addVisitor(std::move(Visitor));
report->addRange(SizeE->getSourceRange());
bugreporter::trackNullOrUndefValue(N, SizeE, *report);
C.emitReport(std::move(report));
@@ -94,7 +94,7 @@ void VLASizeChecker::checkPreStmt(const DeclStmt *DS, CheckerContext &C) const {
// FIXME: Handle multi-dimensional VLAs.
const Expr *SE = VLA->getSizeExpr();
ProgramStateRef state = C.getState();
- SVal sizeV = state->getSVal(SE, C.getLocationContext());
+ SVal sizeV = C.getSVal(SE);
if (sizeV.isUndef()) {
reportBug(VLA_Garbage, SE, state, C);
@@ -108,7 +108,8 @@ void VLASizeChecker::checkPreStmt(const DeclStmt *DS, CheckerContext &C) const {
// Check if the size is tainted.
if (state->isTainted(sizeV)) {
- reportBug(VLA_Tainted, SE, nullptr, C);
+ reportBug(VLA_Tainted, SE, nullptr, C,
+ llvm::make_unique<TaintBugVisitor>(sizeV));
return;
}
diff --git a/lib/StaticAnalyzer/Checkers/ValistChecker.cpp b/lib/StaticAnalyzer/Checkers/ValistChecker.cpp
index 06c4ef71d80b..bd657340fcfb 100644
--- a/lib/StaticAnalyzer/Checkers/ValistChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/ValistChecker.cpp
@@ -56,7 +56,6 @@ public:
private:
const MemRegion *getVAListAsRegion(SVal SV, const Expr *VAExpr,
bool &IsSymbolic, CheckerContext &C) const;
- StringRef getVariableNameFromRegion(const MemRegion *Reg) const;
const ExplodedNode *getStartCallSite(const ExplodedNode *N,
const MemRegion *Reg) const;
@@ -64,13 +63,13 @@ private:
CheckerContext &C) const;
void reportLeakedVALists(const RegionVector &LeakedVALists, StringRef Msg1,
StringRef Msg2, CheckerContext &C, ExplodedNode *N,
- bool ForceReport = false) const;
+ bool ReportUninit = false) const;
void checkVAListStartCall(const CallEvent &Call, CheckerContext &C,
bool IsCopy) const;
void checkVAListEndCall(const CallEvent &Call, CheckerContext &C) const;
- class ValistBugVisitor : public BugReporterVisitorImpl<ValistBugVisitor> {
+ class ValistBugVisitor : public BugReporterVisitor {
public:
ValistBugVisitor(const MemRegion *Reg, bool IsLeak = false)
: Reg(Reg), IsLeak(IsLeak) {}
@@ -79,7 +78,7 @@ private:
ID.AddPointer(&X);
ID.AddPointer(Reg);
}
- std::unique_ptr<PathDiagnosticPiece>
+ std::shared_ptr<PathDiagnosticPiece>
getEndPath(BugReporterContext &BRC, const ExplodedNode *EndPathNode,
BugReport &BR) override {
if (!IsLeak)
@@ -88,8 +87,7 @@ private:
PathDiagnosticLocation L = PathDiagnosticLocation::createEndOfPath(
EndPathNode, BRC.getSourceManager());
// Do not add the statement itself as a range in case of leak.
- return llvm::make_unique<PathDiagnosticEventPiece>(L, BR.getDescription(),
- false);
+ return std::make_shared<PathDiagnosticEventPiece>(L, BR.getDescription(), false);
}
std::shared_ptr<PathDiagnosticPiece> VisitNode(const ExplodedNode *N,
const ExplodedNode *PrevN,
@@ -189,7 +187,7 @@ void ValistChecker::checkPreStmt(const VAArgExpr *VAA,
CheckerContext &C) const {
ProgramStateRef State = C.getState();
const Expr *VASubExpr = VAA->getSubExpr();
- SVal VAListSVal = State->getSVal(VASubExpr, C.getLocationContext());
+ SVal VAListSVal = C.getSVal(VASubExpr);
bool Symbolic;
const MemRegion *VAList =
getVAListAsRegion(VAListSVal, VASubExpr, Symbolic, C);
@@ -267,15 +265,19 @@ void ValistChecker::reportUninitializedAccess(const MemRegion *VAList,
void ValistChecker::reportLeakedVALists(const RegionVector &LeakedVALists,
StringRef Msg1, StringRef Msg2,
CheckerContext &C, ExplodedNode *N,
- bool ForceReport) const {
+ bool ReportUninit) const {
if (!(ChecksEnabled[CK_Unterminated] ||
- (ChecksEnabled[CK_Uninitialized] && ForceReport)))
+ (ChecksEnabled[CK_Uninitialized] && ReportUninit)))
return;
for (auto Reg : LeakedVALists) {
if (!BT_leakedvalist) {
- BT_leakedvalist.reset(new BugType(CheckNames[CK_Unterminated],
- "Leaked va_list",
- categories::MemoryError));
+ // FIXME: maybe creating a new check name for this type of bug is a better
+ // solution.
+ BT_leakedvalist.reset(
+ new BugType(CheckNames[CK_Unterminated].getName().empty()
+ ? CheckNames[CK_Uninitialized]
+ : CheckNames[CK_Unterminated],
+ "Leaked va_list", categories::MemoryError));
BT_leakedvalist->setSuppressOnSink(true);
}
@@ -375,7 +377,7 @@ void ValistChecker::checkVAListEndCall(const CallEvent &Call,
std::shared_ptr<PathDiagnosticPiece> ValistChecker::ValistBugVisitor::VisitNode(
const ExplodedNode *N, const ExplodedNode *PrevN, BugReporterContext &BRC,
- BugReport &BR) {
+ BugReport &) {
ProgramStateRef State = N->getState();
ProgramStateRef StatePrev = PrevN->getState();
diff --git a/lib/StaticAnalyzer/Checkers/VirtualCallChecker.cpp b/lib/StaticAnalyzer/Checkers/VirtualCallChecker.cpp
index c5010f53785a..5b602468cdd4 100644
--- a/lib/StaticAnalyzer/Checkers/VirtualCallChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/VirtualCallChecker.cpp
@@ -48,7 +48,7 @@ public:
DefaultBool IsPureOnly;
void checkBeginFunction(CheckerContext &C) const;
- void checkEndFunction(CheckerContext &C) const;
+ void checkEndFunction(const ReturnStmt *RS, CheckerContext &C) const;
void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
private:
@@ -57,7 +57,7 @@ private:
void reportBug(StringRef Msg, bool PureError, const MemRegion *Reg,
CheckerContext &C) const;
- class VirtualBugVisitor : public BugReporterVisitorImpl<VirtualBugVisitor> {
+ class VirtualBugVisitor : public BugReporterVisitor {
private:
const MemRegion *ObjectRegion;
bool Found;
@@ -108,7 +108,7 @@ VirtualCallChecker::VirtualBugVisitor::VisitNode(const ExplodedNode *N,
if (!MD)
return nullptr;
auto ThiSVal =
- State->getSVal(SVB.getCXXThis(MD, LCtx->getCurrentStackFrame()));
+ State->getSVal(SVB.getCXXThis(MD, LCtx->getStackFrame()));
const MemRegion *Reg = ThiSVal.castAs<loc::MemRegionVal>().getRegion();
if (!Reg)
return nullptr;
@@ -167,7 +167,8 @@ void VirtualCallChecker::checkBeginFunction(CheckerContext &C) const {
}
// The EndFunction callback when leave a constructor or a destructor.
-void VirtualCallChecker::checkEndFunction(CheckerContext &C) const {
+void VirtualCallChecker::checkEndFunction(const ReturnStmt *RS,
+ CheckerContext &C) const {
registerCtorDtorCallInState(false, C);
}
@@ -230,7 +231,7 @@ void VirtualCallChecker::registerCtorDtorCallInState(bool IsBeginFunction,
// Enter a constructor, set the corresponding memregion be true.
if (isa<CXXConstructorDecl>(MD)) {
auto ThiSVal =
- State->getSVal(SVB.getCXXThis(MD, LCtx->getCurrentStackFrame()));
+ State->getSVal(SVB.getCXXThis(MD, LCtx->getStackFrame()));
const MemRegion *Reg = ThiSVal.getAsRegion();
if (IsBeginFunction)
State = State->set<CtorDtorMap>(Reg, ObjectState::CtorCalled);
@@ -244,7 +245,7 @@ void VirtualCallChecker::registerCtorDtorCallInState(bool IsBeginFunction,
// Enter a Destructor, set the corresponding memregion be true.
if (isa<CXXDestructorDecl>(MD)) {
auto ThiSVal =
- State->getSVal(SVB.getCXXThis(MD, LCtx->getCurrentStackFrame()));
+ State->getSVal(SVB.getCXXThis(MD, LCtx->getStackFrame()));
const MemRegion *Reg = ThiSVal.getAsRegion();
if (IsBeginFunction)
State = State->set<CtorDtorMap>(Reg, ObjectState::DtorCalled);
diff --git a/lib/StaticAnalyzer/Core/AnalysisManager.cpp b/lib/StaticAnalyzer/Core/AnalysisManager.cpp
index 1cc08f0d9fe7..dc0d3ec8493a 100644
--- a/lib/StaticAnalyzer/Core/AnalysisManager.cpp
+++ b/lib/StaticAnalyzer/Core/AnalysisManager.cpp
@@ -26,9 +26,12 @@ AnalysisManager::AnalysisManager(
// Adding LoopExit elements to the CFG is a requirement for loop
// unrolling.
Options.includeLoopExitInCFG() || Options.shouldUnrollLoops(),
+ Options.includeScopesInCFG(),
Options.shouldSynthesizeBodies(),
Options.shouldConditionalizeStaticInitializers(),
/*addCXXNewAllocator=*/true,
+ Options.includeRichConstructorsInCFG(),
+ Options.shouldElideConstructors(),
injector),
Ctx(ASTCtx), Diags(diags), LangOpts(lang), PathConsumers(PDC),
CreateStoreMgr(storemgr), CreateConstraintMgr(constraintmgr),
diff --git a/lib/StaticAnalyzer/Core/AnalyzerOptions.cpp b/lib/StaticAnalyzer/Core/AnalyzerOptions.cpp
index 48e3e22af04a..9b2dc32e0600 100644
--- a/lib/StaticAnalyzer/Core/AnalyzerOptions.cpp
+++ b/lib/StaticAnalyzer/Core/AnalyzerOptions.cpp
@@ -1,4 +1,4 @@
-//===-- AnalyzerOptions.cpp - Analysis Engine Options -----------*- C++ -*-===//
+//===- AnalyzerOptions.cpp - Analysis Engine Options ----------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -16,8 +16,15 @@
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringSwitch.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Twine.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/FileSystem.h"
#include "llvm/Support/raw_ostream.h"
+#include <cassert>
+#include <cstddef>
+#include <utility>
+#include <vector>
using namespace clang;
using namespace ento;
@@ -55,9 +62,32 @@ AnalyzerOptions::UserModeKind AnalyzerOptions::getUserMode() {
return UserMode;
}
+AnalyzerOptions::ExplorationStrategyKind
+AnalyzerOptions::getExplorationStrategy() {
+ if (ExplorationStrategy == ExplorationStrategyKind::NotSet) {
+ StringRef StratStr =
+ Config
+ .insert(std::make_pair("exploration_strategy", "unexplored_first_queue"))
+ .first->second;
+ ExplorationStrategy =
+ llvm::StringSwitch<ExplorationStrategyKind>(StratStr)
+ .Case("dfs", ExplorationStrategyKind::DFS)
+ .Case("bfs", ExplorationStrategyKind::BFS)
+ .Case("unexplored_first",
+ ExplorationStrategyKind::UnexploredFirst)
+ .Case("unexplored_first_queue",
+ ExplorationStrategyKind::UnexploredFirstQueue)
+ .Case("bfs_block_dfs_contents",
+ ExplorationStrategyKind::BFSBlockDFSContents)
+ .Default(ExplorationStrategyKind::NotSet);
+ assert(ExplorationStrategy != ExplorationStrategyKind::NotSet &&
+ "User mode is invalid.");
+ }
+ return ExplorationStrategy;
+}
+
IPAKind AnalyzerOptions::getIPAMode() {
if (IPAMode == IPAK_NotSet) {
-
// Use the User Mode to set the default IPA value.
// Note, we have to add the string to the Config map for the ConfigDumper
// checker to function properly.
@@ -169,7 +199,7 @@ bool AnalyzerOptions::getBooleanOption(Optional<bool> &V, StringRef Name,
bool AnalyzerOptions::includeTemporaryDtorsInCFG() {
return getBooleanOption(IncludeTemporaryDtorsInCFG,
"cfg-temporary-dtors",
- /* Default = */ false);
+ /* Default = */ true);
}
bool AnalyzerOptions::includeImplicitDtorsInCFG() {
@@ -185,7 +215,19 @@ bool AnalyzerOptions::includeLifetimeInCFG() {
bool AnalyzerOptions::includeLoopExitInCFG() {
return getBooleanOption(IncludeLoopExitInCFG, "cfg-loopexit",
- /* Default = */ false);
+ /* Default = */ false);
+}
+
+bool AnalyzerOptions::includeRichConstructorsInCFG() {
+ return getBooleanOption(IncludeRichConstructorsInCFG,
+ "cfg-rich-constructors",
+ /* Default = */ true);
+}
+
+bool AnalyzerOptions::includeScopesInCFG() {
+ return getBooleanOption(IncludeScopesInCFG,
+ "cfg-scopes",
+ /* Default = */ false);
}
bool AnalyzerOptions::mayInlineCXXStandardLibrary() {
@@ -203,7 +245,7 @@ bool AnalyzerOptions::mayInlineTemplateFunctions() {
bool AnalyzerOptions::mayInlineCXXAllocator() {
return getBooleanOption(InlineCXXAllocator,
"c++-allocator-inlining",
- /*Default=*/false);
+ /*Default=*/true);
}
bool AnalyzerOptions::mayInlineCXXContainerMethods() {
@@ -218,6 +260,11 @@ bool AnalyzerOptions::mayInlineCXXSharedPtrDtor() {
/*Default=*/false);
}
+bool AnalyzerOptions::mayInlineCXXTemporaryDtors() {
+ return getBooleanOption(InlineCXXTemporaryDtors,
+ "c++-temp-dtor-inlining",
+ /*Default=*/true);
+}
bool AnalyzerOptions::mayInlineObjCMethod() {
return getBooleanOption(ObjCInliningMode,
@@ -249,6 +296,12 @@ bool AnalyzerOptions::shouldSuppressFromCXXStandardLibrary() {
/* Default = */ true);
}
+bool AnalyzerOptions::shouldCrosscheckWithZ3() {
+ return getBooleanOption(CrosscheckWithZ3,
+ "crosscheck-with-z3",
+ /* Default = */ false);
+}
+
bool AnalyzerOptions::shouldReportIssuesInMainSourceFile() {
return getBooleanOption(ReportIssuesInMainSourceFile,
"report-in-main-source-file",
@@ -262,6 +315,18 @@ bool AnalyzerOptions::shouldWriteStableReportFilename() {
/* Default = */ false);
}
+bool AnalyzerOptions::shouldSerializeStats() {
+ return getBooleanOption(SerializeStats,
+ "serialize-stats",
+ /* Default = */ false);
+}
+
+bool AnalyzerOptions::shouldElideConstructors() {
+ return getBooleanOption(ElideConstructors,
+ "elide-constructors",
+ /* Default = */ true);
+}
+
int AnalyzerOptions::getOptionAsInteger(StringRef Name, int DefaultVal,
const CheckerBase *C,
bool SearchInParents) {
@@ -299,7 +364,6 @@ unsigned AnalyzerOptions::getAlwaysInlineSize() {
unsigned AnalyzerOptions::getMaxInlinableSize() {
if (!MaxInlinableSize.hasValue()) {
-
int DefaultValue = 0;
UserModeKind HighLevelMode = getUserMode();
switch (HighLevelMode) {
@@ -324,6 +388,12 @@ unsigned AnalyzerOptions::getGraphTrimInterval() {
return GraphTrimInterval.getValue();
}
+unsigned AnalyzerOptions::getMaxSymbolComplexity() {
+ if (!MaxSymbolComplexity.hasValue())
+ MaxSymbolComplexity = getOptionAsInteger("max-symbol-complexity", 35);
+ return MaxSymbolComplexity.getValue();
+}
+
unsigned AnalyzerOptions::getMaxTimesInlineLarge() {
if (!MaxTimesInlineLarge.hasValue())
MaxTimesInlineLarge = getOptionAsInteger("max-times-inline-large", 32);
@@ -392,3 +462,34 @@ bool AnalyzerOptions::shouldDisplayNotesAsEvents() {
getBooleanOption("notes-as-events", /*Default=*/false);
return DisplayNotesAsEvents.getValue();
}
+
+bool AnalyzerOptions::shouldAggressivelySimplifyBinaryOperation() {
+ if (!AggressiveBinaryOperationSimplification.hasValue())
+ AggressiveBinaryOperationSimplification =
+ getBooleanOption("aggressive-binary-operation-simplification",
+ /*Default=*/false);
+ return AggressiveBinaryOperationSimplification.getValue();
+}
+
+StringRef AnalyzerOptions::getCTUDir() {
+ if (!CTUDir.hasValue()) {
+ CTUDir = getOptionAsString("ctu-dir", "");
+ if (!llvm::sys::fs::is_directory(*CTUDir))
+ CTUDir = "";
+ }
+ return CTUDir.getValue();
+}
+
+bool AnalyzerOptions::naiveCTUEnabled() {
+ if (!NaiveCTU.hasValue()) {
+ NaiveCTU = getBooleanOption("experimental-enable-naive-ctu-analysis",
+ /*Default=*/false);
+ }
+ return NaiveCTU.getValue();
+}
+
+StringRef AnalyzerOptions::getCTUIndexName() {
+ if (!CTUIndexName.hasValue())
+ CTUIndexName = getOptionAsString("ctu-index-name", "externalFnMap.txt");
+ return CTUIndexName.getValue();
+}
diff --git a/lib/StaticAnalyzer/Core/BasicValueFactory.cpp b/lib/StaticAnalyzer/Core/BasicValueFactory.cpp
index ec7a7e9e4b1c..db4c1432ccc3 100644
--- a/lib/StaticAnalyzer/Core/BasicValueFactory.cpp
+++ b/lib/StaticAnalyzer/Core/BasicValueFactory.cpp
@@ -1,4 +1,4 @@
-//=== BasicValueFactory.cpp - Basic values for Path Sens analysis --*- C++ -*-//
+//===- BasicValueFactory.cpp - Basic values for Path Sens analysis --------===//
//
// The LLVM Compiler Infrastructure
//
@@ -13,9 +13,18 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/AST/ASTContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/BasicValueFactory.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/APSIntType.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/Store.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/StoreRef.h"
+#include "llvm/ADT/APSInt.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/ImmutableList.h"
+#include "llvm/ADT/STLExtras.h"
+#include <cassert>
+#include <cstdint>
+#include <utility>
using namespace clang;
using namespace ento;
@@ -40,10 +49,11 @@ void PointerToMemberData::Profile(
ID.AddPointer(L.getInternalPointer());
}
-typedef std::pair<SVal, uintptr_t> SValData;
-typedef std::pair<SVal, SVal> SValPair;
+using SValData = std::pair<SVal, uintptr_t>;
+using SValPair = std::pair<SVal, SVal>;
namespace llvm {
+
template<> struct FoldingSetTrait<SValData> {
static inline void Profile(const SValData& X, llvm::FoldingSetNodeID& ID) {
X.first.Profile(ID);
@@ -57,20 +67,21 @@ template<> struct FoldingSetTrait<SValPair> {
X.second.Profile(ID);
}
};
-}
-typedef llvm::FoldingSet<llvm::FoldingSetNodeWrapper<SValData> >
- PersistentSValsTy;
+} // namespace llvm
-typedef llvm::FoldingSet<llvm::FoldingSetNodeWrapper<SValPair> >
- PersistentSValPairsTy;
+using PersistentSValsTy =
+ llvm::FoldingSet<llvm::FoldingSetNodeWrapper<SValData>>;
+
+using PersistentSValPairsTy =
+ llvm::FoldingSet<llvm::FoldingSetNodeWrapper<SValPair>>;
BasicValueFactory::~BasicValueFactory() {
// Note that the dstor for the contents of APSIntSet will never be called,
// so we iterate over the set and invoke the dstor for each APSInt. This
// frees an aux. memory allocated to represent very large constants.
- for (APSIntSetTy::iterator I=APSIntSet.begin(), E=APSIntSet.end(); I!=E; ++I)
- I->getValue().~APSInt();
+ for (const auto &I : APSIntSet)
+ I.getValue().~APSInt();
delete (PersistentSValsTy*) PersistentSVals;
delete (PersistentSValPairsTy*) PersistentSValPairs;
@@ -79,7 +90,8 @@ BasicValueFactory::~BasicValueFactory() {
const llvm::APSInt& BasicValueFactory::getValue(const llvm::APSInt& X) {
llvm::FoldingSetNodeID ID;
void *InsertPos;
- typedef llvm::FoldingSetNodeWrapper<llvm::APSInt> FoldNodeTy;
+
+ using FoldNodeTy = llvm::FoldingSetNodeWrapper<llvm::APSInt>;
X.Profile(ID);
FoldNodeTy* P = APSIntSet.FindNodeOrInsertPos(ID, InsertPos);
@@ -107,14 +119,12 @@ const llvm::APSInt& BasicValueFactory::getValue(uint64_t X, unsigned BitWidth,
}
const llvm::APSInt& BasicValueFactory::getValue(uint64_t X, QualType T) {
-
return getValue(getAPSIntType(T).getValue(X));
}
const CompoundValData*
BasicValueFactory::getCompoundValData(QualType T,
llvm::ImmutableList<SVal> Vals) {
-
llvm::FoldingSetNodeID ID;
CompoundValData::Profile(ID, T, Vals);
void *InsertPos;
@@ -150,7 +160,7 @@ BasicValueFactory::getLazyCompoundValData(const StoreRef &store,
}
const PointerToMemberData *BasicValueFactory::getPointerToMemberData(
- const DeclaratorDecl *DD, llvm::ImmutableList<const CXXBaseSpecifier*> L) {
+ const DeclaratorDecl *DD, llvm::ImmutableList<const CXXBaseSpecifier *> L) {
llvm::FoldingSetNodeID ID;
PointerToMemberData::Profile(ID, DD, L);
void *InsertPos;
@@ -167,7 +177,7 @@ const PointerToMemberData *BasicValueFactory::getPointerToMemberData(
return D;
}
-const clang::ento::PointerToMemberData *BasicValueFactory::accumCXXBase(
+const PointerToMemberData *BasicValueFactory::accumCXXBase(
llvm::iterator_range<CastExpr::path_const_iterator> PathRange,
const nonloc::PointerToMember &PTM) {
nonloc::PointerToMember::PTMDataType PTMDT = PTM.getPTMData();
@@ -195,10 +205,9 @@ const clang::ento::PointerToMemberData *BasicValueFactory::accumCXXBase(
const llvm::APSInt*
BasicValueFactory::evalAPSInt(BinaryOperator::Opcode Op,
const llvm::APSInt& V1, const llvm::APSInt& V2) {
-
switch (Op) {
default:
- assert (false && "Invalid Opcode.");
+ assert(false && "Invalid Opcode.");
case BO_Mul:
return &getValue( V1 * V2 );
@@ -220,11 +229,9 @@ BasicValueFactory::evalAPSInt(BinaryOperator::Opcode Op,
return &getValue( V1 - V2 );
case BO_Shl: {
-
// FIXME: This logic should probably go higher up, where we can
// test these conditions symbolically.
- // FIXME: Expand these checks to include all undefined behavior.
if (V1.isSigned() && V1.isNegative())
return nullptr;
@@ -236,16 +243,16 @@ BasicValueFactory::evalAPSInt(BinaryOperator::Opcode Op,
if (Amt >= V1.getBitWidth())
return nullptr;
+ if (V1.isSigned() && Amt > V1.countLeadingZeros())
+ return nullptr;
+
return &getValue( V1.operator<<( (unsigned) Amt ));
}
case BO_Shr: {
-
// FIXME: This logic should probably go higher up, where we can
// test these conditions symbolically.
- // FIXME: Expand these checks to include all undefined behavior.
-
if (V2.isSigned() && V2.isNegative())
return nullptr;
@@ -288,10 +295,8 @@ BasicValueFactory::evalAPSInt(BinaryOperator::Opcode Op,
}
}
-
const std::pair<SVal, uintptr_t>&
BasicValueFactory::getPersistentSValWithData(const SVal& V, uintptr_t Data) {
-
// Lazily create the folding set.
if (!PersistentSVals) PersistentSVals = new PersistentSValsTy();
@@ -302,7 +307,8 @@ BasicValueFactory::getPersistentSValWithData(const SVal& V, uintptr_t Data) {
PersistentSValsTy& Map = *((PersistentSValsTy*) PersistentSVals);
- typedef llvm::FoldingSetNodeWrapper<SValData> FoldNodeTy;
+ using FoldNodeTy = llvm::FoldingSetNodeWrapper<SValData>;
+
FoldNodeTy* P = Map.FindNodeOrInsertPos(ID, InsertPos);
if (!P) {
@@ -316,7 +322,6 @@ BasicValueFactory::getPersistentSValWithData(const SVal& V, uintptr_t Data) {
const std::pair<SVal, SVal>&
BasicValueFactory::getPersistentSValPair(const SVal& V1, const SVal& V2) {
-
// Lazily create the folding set.
if (!PersistentSValPairs) PersistentSValPairs = new PersistentSValPairsTy();
@@ -327,7 +332,8 @@ BasicValueFactory::getPersistentSValPair(const SVal& V1, const SVal& V2) {
PersistentSValPairsTy& Map = *((PersistentSValPairsTy*) PersistentSValPairs);
- typedef llvm::FoldingSetNodeWrapper<SValPair> FoldNodeTy;
+ using FoldNodeTy = llvm::FoldingSetNodeWrapper<SValPair>;
+
FoldNodeTy* P = Map.FindNodeOrInsertPos(ID, InsertPos);
if (!P) {
diff --git a/lib/StaticAnalyzer/Core/BugReporter.cpp b/lib/StaticAnalyzer/Core/BugReporter.cpp
index dc284888eb03..f990eb6a058d 100644
--- a/lib/StaticAnalyzer/Core/BugReporter.cpp
+++ b/lib/StaticAnalyzer/Core/BugReporter.cpp
@@ -1,4 +1,4 @@
-// BugReporter.cpp - Generate PathDiagnostics for Bugs ------------*- C++ -*--//
+//===- BugReporter.cpp - Generate PathDiagnostics for bugs ----------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -13,28 +13,62 @@
//===----------------------------------------------------------------------===//
#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
-#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclBase.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ParentMap.h"
+#include "clang/AST/Stmt.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/StmtObjC.h"
+#include "clang/Analysis/AnalysisDeclContext.h"
#include "clang/Analysis/CFG.h"
#include "clang/Analysis/CFGStmtMap.h"
#include "clang/Analysis/ProgramPoint.h"
+#include "clang/Basic/LLVM.h"
+#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/SourceManager.h"
+#include "clang/StaticAnalyzer/Core/AnalyzerOptions.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporterVisitors.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h"
+#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/IntrusiveRefCntPtr.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/Optional.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <iterator>
#include <memory>
#include <queue>
+#include <string>
+#include <tuple>
+#include <utility>
+#include <vector>
using namespace clang;
using namespace ento;
@@ -47,7 +81,7 @@ STATISTIC(MaxValidBugClassSize,
"The maximum number of bug reports in the same equivalence class "
"where at least one report is valid (not suppressed)");
-BugReporterVisitor::~BugReporterVisitor() {}
+BugReporterVisitor::~BugReporterVisitor() = default;
void BugReporterContext::anchor() {}
@@ -127,10 +161,9 @@ static void removeRedundantMsgs(PathPieces &path) {
if (i == N-1)
break;
- if (PathDiagnosticEventPiece *nextEvent =
+ if (auto *nextEvent =
dyn_cast<PathDiagnosticEventPiece>(path.front().get())) {
- PathDiagnosticEventPiece *event =
- cast<PathDiagnosticEventPiece>(piece.get());
+ auto *event = cast<PathDiagnosticEventPiece>(piece.get());
// Check to see if we should keep one of the two pieces. If we
// come up with a preference, record which piece to keep, and consume
// another piece from the path.
@@ -152,15 +185,16 @@ static void removeRedundantMsgs(PathPieces &path) {
/// A map from PathDiagnosticPiece to the LocationContext of the inlined
/// function call it represents.
-typedef llvm::DenseMap<const PathPieces *, const LocationContext *>
- LocationContextMap;
+using LocationContextMap =
+ llvm::DenseMap<const PathPieces *, const LocationContext *>;
/// Recursively scan through a path and prune out calls and macros pieces
/// that aren't needed. Return true if afterwards the path contains
/// "interesting stuff" which means it shouldn't be pruned from the parent path.
static bool removeUnneededCalls(PathPieces &pieces, BugReport *R,
- LocationContextMap &LCM) {
- bool containsSomethingInteresting = false;
+ LocationContextMap &LCM,
+ bool IsInteresting = false) {
+ bool containsSomethingInteresting = IsInteresting;
const unsigned N = pieces.size();
for (unsigned i = 0 ; i < N ; ++i) {
@@ -174,12 +208,8 @@ static bool removeUnneededCalls(PathPieces &pieces, BugReport *R,
auto &call = cast<PathDiagnosticCallPiece>(*piece);
// Check if the location context is interesting.
assert(LCM.count(&call.path));
- if (R->isInteresting(LCM[&call.path])) {
- containsSomethingInteresting = true;
- break;
- }
-
- if (!removeUnneededCalls(call.path, R, LCM))
+ if (!removeUnneededCalls(call.path, R, LCM,
+ R->isInteresting(LCM[&call.path])))
continue;
containsSomethingInteresting = true;
@@ -187,7 +217,7 @@ static bool removeUnneededCalls(PathPieces &pieces, BugReport *R,
}
case PathDiagnosticPiece::Macro: {
auto &macro = cast<PathDiagnosticMacroPiece>(*piece);
- if (!removeUnneededCalls(macro.subPieces, R, LCM))
+ if (!removeUnneededCalls(macro.subPieces, R, LCM, IsInteresting))
continue;
containsSomethingInteresting = true;
break;
@@ -225,13 +255,11 @@ static bool hasImplicitBody(const Decl *D) {
static void
adjustCallLocations(PathPieces &Pieces,
PathDiagnosticLocation *LastCallLocation = nullptr) {
- for (PathPieces::iterator I = Pieces.begin(), E = Pieces.end(); I != E; ++I) {
- PathDiagnosticCallPiece *Call = dyn_cast<PathDiagnosticCallPiece>(I->get());
+ for (const auto &I : Pieces) {
+ auto *Call = dyn_cast<PathDiagnosticCallPiece>(I.get());
- if (!Call) {
- assert((*I)->getLocation().asLocation().isValid());
+ if (!Call)
continue;
- }
if (LastCallLocation) {
bool CallerIsImplicit = hasImplicitBody(Call->getCaller());
@@ -314,29 +342,19 @@ static void removePiecesWithInvalidLocations(PathPieces &Pieces) {
//===----------------------------------------------------------------------===//
namespace {
-class NodeMapClosure : public BugReport::NodeResolver {
- InterExplodedGraphMap &M;
-public:
- NodeMapClosure(InterExplodedGraphMap &m) : M(m) {}
-
- const ExplodedNode *getOriginalNode(const ExplodedNode *N) override {
- return M.lookup(N);
- }
-};
class PathDiagnosticBuilder : public BugReporterContext {
BugReport *R;
PathDiagnosticConsumer *PDC;
- NodeMapClosure NMC;
+
public:
const LocationContext *LC;
PathDiagnosticBuilder(GRBugReporter &br,
BugReport *r, InterExplodedGraphMap &Backmap,
PathDiagnosticConsumer *pdc)
- : BugReporterContext(br),
- R(r), PDC(pdc), NMC(Backmap), LC(r->getErrorNode()->getLocationContext())
- {}
+ : BugReporterContext(br, Backmap), R(r), PDC(pdc),
+ LC(r->getErrorNode()->getLocationContext()) {}
PathDiagnosticLocation ExecutionContinues(const ExplodedNode *N);
@@ -353,19 +371,18 @@ public:
return getParentMap().getParent(S);
}
- NodeMapClosure& getNodeResolver() override { return NMC; }
-
PathDiagnosticLocation getEnclosingStmtLocation(const Stmt *S);
PathDiagnosticConsumer::PathGenerationScheme getGenerationScheme() const {
- return PDC ? PDC->getGenerationScheme() : PathDiagnosticConsumer::Extensive;
+ return PDC ? PDC->getGenerationScheme() : PathDiagnosticConsumer::Minimal;
}
bool supportsLogicalOpControlFlow() const {
return PDC ? PDC->supportsLogicalOpControlFlow() : true;
}
};
-} // end anonymous namespace
+
+} // namespace
PathDiagnosticLocation
PathDiagnosticBuilder::ExecutionContinues(const ExplodedNode *N) {
@@ -379,7 +396,6 @@ PathDiagnosticBuilder::ExecutionContinues(const ExplodedNode *N) {
PathDiagnosticLocation
PathDiagnosticBuilder::ExecutionContinues(llvm::raw_string_ostream &os,
const ExplodedNode *N) {
-
// Slow, but probably doesn't matter.
if (os.str().empty())
os << ' ';
@@ -433,12 +449,12 @@ static PathDiagnosticLocation
getEnclosingStmtLocation(const Stmt *S, SourceManager &SMgr, const ParentMap &P,
const LocationContext *LC, bool allowNestedContexts) {
if (!S)
- return PathDiagnosticLocation();
+ return {};
while (const Stmt *Parent = getEnclosingParent(S, P)) {
switch (Parent->getStmtClass()) {
case Stmt::BinaryOperatorClass: {
- const BinaryOperator *B = cast<BinaryOperator>(Parent);
+ const auto *B = cast<BinaryOperator>(Parent);
if (B->isLogicalOp())
return PathDiagnosticLocation(allowNestedContexts ? B : S, SMgr, LC);
break;
@@ -504,46 +520,21 @@ PathDiagnosticBuilder::getEnclosingStmtLocation(const Stmt *S) {
}
//===----------------------------------------------------------------------===//
-// "Visitors only" path diagnostic generation algorithm.
-//===----------------------------------------------------------------------===//
-static bool GenerateVisitorsOnlyPathDiagnostic(
- PathDiagnostic &PD, PathDiagnosticBuilder &PDB, const ExplodedNode *N,
- ArrayRef<std::unique_ptr<BugReporterVisitor>> visitors) {
- // All path generation skips the very first node (the error node).
- // This is because there is special handling for the end-of-path note.
- N = N->getFirstPred();
- if (!N)
- return true;
-
- BugReport *R = PDB.getBugReport();
- while (const ExplodedNode *Pred = N->getFirstPred()) {
- for (auto &V : visitors)
- // Visit all the node pairs, but throw the path pieces away.
- V->VisitNode(N, Pred, PDB, *R);
-
- N = Pred;
- }
-
- return R->isValid();
-}
-
-//===----------------------------------------------------------------------===//
// "Minimal" path diagnostic generation algorithm.
//===----------------------------------------------------------------------===//
-typedef std::pair<PathDiagnosticCallPiece*, const ExplodedNode*> StackDiagPair;
-typedef SmallVector<StackDiagPair, 6> StackDiagVector;
+using StackDiagPair =
+ std::pair<PathDiagnosticCallPiece *, const ExplodedNode *>;
+using StackDiagVector = SmallVector<StackDiagPair, 6>;
static void updateStackPiecesWithMessage(PathDiagnosticPiece &P,
StackDiagVector &CallStack) {
// If the piece contains a special message, add it to all the call
// pieces on the active stack.
- if (PathDiagnosticEventPiece *ep = dyn_cast<PathDiagnosticEventPiece>(&P)) {
-
+ if (auto *ep = dyn_cast<PathDiagnosticEventPiece>(&P)) {
if (ep->hasCallStackHint())
- for (StackDiagVector::iterator I = CallStack.begin(),
- E = CallStack.end(); I != E; ++I) {
- PathDiagnosticCallPiece *CP = I->first;
- const ExplodedNode *N = I->second;
+ for (const auto &I : CallStack) {
+ PathDiagnosticCallPiece *CP = I.first;
+ const ExplodedNode *N = I.second;
std::string stackMsg = ep->getCallStackMessage(N);
// The last message on the path to final bug is the most important
@@ -557,693 +548,271 @@ static void updateStackPiecesWithMessage(PathDiagnosticPiece &P,
static void CompactPathDiagnostic(PathPieces &path, const SourceManager& SM);
-static bool GenerateMinimalPathDiagnostic(
- PathDiagnostic &PD, PathDiagnosticBuilder &PDB, const ExplodedNode *N,
- LocationContextMap &LCM,
- ArrayRef<std::unique_ptr<BugReporterVisitor>> visitors) {
-
- SourceManager& SMgr = PDB.getSourceManager();
- const LocationContext *LC = PDB.LC;
- const ExplodedNode *NextNode = N->pred_empty()
- ? nullptr : *(N->pred_begin());
-
- StackDiagVector CallStack;
-
- while (NextNode) {
- N = NextNode;
- PDB.LC = N->getLocationContext();
- NextNode = N->getFirstPred();
-
- ProgramPoint P = N->getLocation();
-
- do {
- if (Optional<CallExitEnd> CE = P.getAs<CallExitEnd>()) {
- auto C = PathDiagnosticCallPiece::construct(N, *CE, SMgr);
- // Record the mapping from call piece to LocationContext.
- LCM[&C->path] = CE->getCalleeContext();
- auto *P = C.get();
- PD.getActivePath().push_front(std::move(C));
- PD.pushActivePath(&P->path);
- CallStack.push_back(StackDiagPair(P, N));
- break;
- }
-
- if (Optional<CallEnter> CE = P.getAs<CallEnter>()) {
- // Flush all locations, and pop the active path.
- bool VisitedEntireCall = PD.isWithinCall();
- PD.popActivePath();
-
- // Either we just added a bunch of stuff to the top-level path, or
- // we have a previous CallExitEnd. If the former, it means that the
- // path terminated within a function call. We must then take the
- // current contents of the active path and place it within
- // a new PathDiagnosticCallPiece.
- PathDiagnosticCallPiece *C;
- if (VisitedEntireCall) {
- C = cast<PathDiagnosticCallPiece>(PD.getActivePath().front().get());
- } else {
- const Decl *Caller = CE->getLocationContext()->getDecl();
- C = PathDiagnosticCallPiece::construct(PD.getActivePath(), Caller);
- // Record the mapping from call piece to LocationContext.
- LCM[&C->path] = CE->getCalleeContext();
- }
-
- C->setCallee(*CE, SMgr);
- if (!CallStack.empty()) {
- assert(CallStack.back().first == C);
- CallStack.pop_back();
- }
- break;
- }
-
- if (Optional<BlockEdge> BE = P.getAs<BlockEdge>()) {
- const CFGBlock *Src = BE->getSrc();
- const CFGBlock *Dst = BE->getDst();
- const Stmt *T = Src->getTerminator();
-
- if (!T)
- break;
-
- PathDiagnosticLocation Start =
- PathDiagnosticLocation::createBegin(T, SMgr,
- N->getLocationContext());
-
- switch (T->getStmtClass()) {
- default:
- break;
-
- case Stmt::GotoStmtClass:
- case Stmt::IndirectGotoStmtClass: {
- const Stmt *S = PathDiagnosticLocation::getNextStmt(N);
-
- if (!S)
- break;
-
- std::string sbuf;
- llvm::raw_string_ostream os(sbuf);
- const PathDiagnosticLocation &End = PDB.getEnclosingStmtLocation(S);
-
- os << "Control jumps to line "
- << End.asLocation().getExpansionLineNumber();
- PD.getActivePath().push_front(
- std::make_shared<PathDiagnosticControlFlowPiece>(Start, End,
- os.str()));
- break;
- }
-
- case Stmt::SwitchStmtClass: {
- // Figure out what case arm we took.
- std::string sbuf;
- llvm::raw_string_ostream os(sbuf);
-
- if (const Stmt *S = Dst->getLabel()) {
- PathDiagnosticLocation End(S, SMgr, LC);
-
- switch (S->getStmtClass()) {
- default:
- os << "No cases match in the switch statement. "
- "Control jumps to line "
- << End.asLocation().getExpansionLineNumber();
- break;
- case Stmt::DefaultStmtClass:
- os << "Control jumps to the 'default' case at line "
- << End.asLocation().getExpansionLineNumber();
- break;
-
- case Stmt::CaseStmtClass: {
- os << "Control jumps to 'case ";
- const CaseStmt *Case = cast<CaseStmt>(S);
- const Expr *LHS = Case->getLHS()->IgnoreParenCasts();
-
- // Determine if it is an enum.
- bool GetRawInt = true;
-
- if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(LHS)) {
- // FIXME: Maybe this should be an assertion. Are there cases
- // were it is not an EnumConstantDecl?
- const EnumConstantDecl *D =
- dyn_cast<EnumConstantDecl>(DR->getDecl());
-
- if (D) {
- GetRawInt = false;
- os << *D;
- }
- }
-
- if (GetRawInt)
- os << LHS->EvaluateKnownConstInt(PDB.getASTContext());
-
- os << ":' at line "
- << End.asLocation().getExpansionLineNumber();
- break;
- }
- }
- PD.getActivePath().push_front(
- std::make_shared<PathDiagnosticControlFlowPiece>(Start, End,
- os.str()));
- }
- else {
- os << "'Default' branch taken. ";
- const PathDiagnosticLocation &End = PDB.ExecutionContinues(os, N);
- PD.getActivePath().push_front(
- std::make_shared<PathDiagnosticControlFlowPiece>(Start, End,
- os.str()));
- }
-
- break;
- }
-
- case Stmt::BreakStmtClass:
- case Stmt::ContinueStmtClass: {
- std::string sbuf;
- llvm::raw_string_ostream os(sbuf);
- PathDiagnosticLocation End = PDB.ExecutionContinues(os, N);
- PD.getActivePath().push_front(
- std::make_shared<PathDiagnosticControlFlowPiece>(Start, End,
- os.str()));
- break;
- }
-
- // Determine control-flow for ternary '?'.
- case Stmt::BinaryConditionalOperatorClass:
- case Stmt::ConditionalOperatorClass: {
- std::string sbuf;
- llvm::raw_string_ostream os(sbuf);
- os << "'?' condition is ";
-
- if (*(Src->succ_begin()+1) == Dst)
- os << "false";
- else
- os << "true";
-
- PathDiagnosticLocation End = PDB.ExecutionContinues(N);
-
- if (const Stmt *S = End.asStmt())
- End = PDB.getEnclosingStmtLocation(S);
-
- PD.getActivePath().push_front(
- std::make_shared<PathDiagnosticControlFlowPiece>(Start, End,
- os.str()));
- break;
- }
-
- // Determine control-flow for short-circuited '&&' and '||'.
- case Stmt::BinaryOperatorClass: {
- if (!PDB.supportsLogicalOpControlFlow())
- break;
- const BinaryOperator *B = cast<BinaryOperator>(T);
- std::string sbuf;
- llvm::raw_string_ostream os(sbuf);
- os << "Left side of '";
-
- if (B->getOpcode() == BO_LAnd) {
- os << "&&" << "' is ";
-
- if (*(Src->succ_begin()+1) == Dst) {
- os << "false";
- PathDiagnosticLocation End(B->getLHS(), SMgr, LC);
- PathDiagnosticLocation Start =
- PathDiagnosticLocation::createOperatorLoc(B, SMgr);
- PD.getActivePath().push_front(
- std::make_shared<PathDiagnosticControlFlowPiece>(Start, End,
- os.str()));
- }
- else {
- os << "true";
- PathDiagnosticLocation Start(B->getLHS(), SMgr, LC);
- PathDiagnosticLocation End = PDB.ExecutionContinues(N);
- PD.getActivePath().push_front(
- std::make_shared<PathDiagnosticControlFlowPiece>(Start, End,
- os.str()));
- }
- }
- else {
- assert(B->getOpcode() == BO_LOr);
- os << "||" << "' is ";
-
- if (*(Src->succ_begin()+1) == Dst) {
- os << "false";
- PathDiagnosticLocation Start(B->getLHS(), SMgr, LC);
- PathDiagnosticLocation End = PDB.ExecutionContinues(N);
- PD.getActivePath().push_front(
- std::make_shared<PathDiagnosticControlFlowPiece>(Start, End,
- os.str()));
- }
- else {
- os << "true";
- PathDiagnosticLocation End(B->getLHS(), SMgr, LC);
- PathDiagnosticLocation Start =
- PathDiagnosticLocation::createOperatorLoc(B, SMgr);
- PD.getActivePath().push_front(
- std::make_shared<PathDiagnosticControlFlowPiece>(Start, End,
- os.str()));
- }
- }
-
- break;
- }
+std::shared_ptr<PathDiagnosticControlFlowPiece> generateDiagForSwitchOP(
+ const ExplodedNode *N,
+ const CFGBlock *Dst,
+ const SourceManager &SM,
+ const LocationContext *LC,
+ PathDiagnosticBuilder &PDB,
+ PathDiagnosticLocation &Start
+ ) {
+ // Figure out what case arm we took.
+ std::string sbuf;
+ llvm::raw_string_ostream os(sbuf);
+ PathDiagnosticLocation End;
- case Stmt::DoStmtClass: {
- if (*(Src->succ_begin()) == Dst) {
- std::string sbuf;
- llvm::raw_string_ostream os(sbuf);
-
- os << "Loop condition is true. ";
- PathDiagnosticLocation End = PDB.ExecutionContinues(os, N);
-
- if (const Stmt *S = End.asStmt())
- End = PDB.getEnclosingStmtLocation(S);
-
- PD.getActivePath().push_front(
- std::make_shared<PathDiagnosticControlFlowPiece>(Start, End,
- os.str()));
- }
- else {
- PathDiagnosticLocation End = PDB.ExecutionContinues(N);
-
- if (const Stmt *S = End.asStmt())
- End = PDB.getEnclosingStmtLocation(S);
-
- PD.getActivePath().push_front(
- std::make_shared<PathDiagnosticControlFlowPiece>(
- Start, End, "Loop condition is false. Exiting loop"));
- }
-
- break;
- }
-
- case Stmt::WhileStmtClass:
- case Stmt::ForStmtClass: {
- if (*(Src->succ_begin()+1) == Dst) {
- std::string sbuf;
- llvm::raw_string_ostream os(sbuf);
-
- os << "Loop condition is false. ";
- PathDiagnosticLocation End = PDB.ExecutionContinues(os, N);
- if (const Stmt *S = End.asStmt())
- End = PDB.getEnclosingStmtLocation(S);
-
- PD.getActivePath().push_front(
- std::make_shared<PathDiagnosticControlFlowPiece>(Start, End,
- os.str()));
- }
- else {
- PathDiagnosticLocation End = PDB.ExecutionContinues(N);
- if (const Stmt *S = End.asStmt())
- End = PDB.getEnclosingStmtLocation(S);
-
- PD.getActivePath().push_front(
- std::make_shared<PathDiagnosticControlFlowPiece>(
- Start, End, "Loop condition is true. Entering loop body"));
- }
-
- break;
- }
+ if (const Stmt *S = Dst->getLabel()) {
+ End = PathDiagnosticLocation(S, SM, LC);
- case Stmt::IfStmtClass: {
- PathDiagnosticLocation End = PDB.ExecutionContinues(N);
+ switch (S->getStmtClass()) {
+ default:
+ os << "No cases match in the switch statement. "
+ "Control jumps to line "
+ << End.asLocation().getExpansionLineNumber();
+ break;
+ case Stmt::DefaultStmtClass:
+ os << "Control jumps to the 'default' case at line "
+ << End.asLocation().getExpansionLineNumber();
+ break;
- if (const Stmt *S = End.asStmt())
- End = PDB.getEnclosingStmtLocation(S);
+ case Stmt::CaseStmtClass: {
+ os << "Control jumps to 'case ";
+ const auto *Case = cast<CaseStmt>(S);
+ const Expr *LHS = Case->getLHS()->IgnoreParenCasts();
- if (*(Src->succ_begin()+1) == Dst)
- PD.getActivePath().push_front(
- std::make_shared<PathDiagnosticControlFlowPiece>(
- Start, End, "Taking false branch"));
- else
- PD.getActivePath().push_front(
- std::make_shared<PathDiagnosticControlFlowPiece>(
- Start, End, "Taking true branch"));
+ // Determine if it is an enum.
+ bool GetRawInt = true;
- break;
- }
- }
- }
- } while(0);
-
- if (NextNode) {
- // Add diagnostic pieces from custom visitors.
- BugReport *R = PDB.getBugReport();
- llvm::FoldingSet<PathDiagnosticPiece> DeduplicationSet;
- for (auto &V : visitors) {
- if (auto p = V->VisitNode(N, NextNode, PDB, *R)) {
- if (DeduplicationSet.GetOrInsertNode(p.get()) != p.get())
- continue;
+ if (const auto *DR = dyn_cast<DeclRefExpr>(LHS)) {
+ // FIXME: Maybe this should be an assertion. Are there cases
+ // were it is not an EnumConstantDecl?
+ const auto *D = dyn_cast<EnumConstantDecl>(DR->getDecl());
- updateStackPiecesWithMessage(*p, CallStack);
- PD.getActivePath().push_front(std::move(p));
+ if (D) {
+ GetRawInt = false;
+ os << *D;
}
}
- }
- }
-
- if (!PDB.getBugReport()->isValid())
- return false;
-
- // After constructing the full PathDiagnostic, do a pass over it to compact
- // PathDiagnosticPieces that occur within a macro.
- CompactPathDiagnostic(PD.getMutablePieces(), PDB.getSourceManager());
- return true;
-}
-
-//===----------------------------------------------------------------------===//
-// "Extensive" PathDiagnostic generation.
-//===----------------------------------------------------------------------===//
-
-static bool IsControlFlowExpr(const Stmt *S) {
- const Expr *E = dyn_cast<Expr>(S);
-
- if (!E)
- return false;
-
- E = E->IgnoreParenCasts();
-
- if (isa<AbstractConditionalOperator>(E))
- return true;
-
- if (const BinaryOperator *B = dyn_cast<BinaryOperator>(E))
- if (B->isLogicalOp())
- return true;
- return false;
-}
-
-namespace {
-class ContextLocation : public PathDiagnosticLocation {
- bool IsDead;
-public:
- ContextLocation(const PathDiagnosticLocation &L, bool isdead = false)
- : PathDiagnosticLocation(L), IsDead(isdead) {}
-
- void markDead() { IsDead = true; }
- bool isDead() const { return IsDead; }
-};
-
-static PathDiagnosticLocation cleanUpLocation(PathDiagnosticLocation L,
- const LocationContext *LC,
- bool firstCharOnly = false) {
- if (const Stmt *S = L.asStmt()) {
- const Stmt *Original = S;
- while (1) {
- // Adjust the location for some expressions that are best referenced
- // by one of their subexpressions.
- switch (S->getStmtClass()) {
- default:
- break;
- case Stmt::ParenExprClass:
- case Stmt::GenericSelectionExprClass:
- S = cast<Expr>(S)->IgnoreParens();
- firstCharOnly = true;
- continue;
- case Stmt::BinaryConditionalOperatorClass:
- case Stmt::ConditionalOperatorClass:
- S = cast<AbstractConditionalOperator>(S)->getCond();
- firstCharOnly = true;
- continue;
- case Stmt::ChooseExprClass:
- S = cast<ChooseExpr>(S)->getCond();
- firstCharOnly = true;
- continue;
- case Stmt::BinaryOperatorClass:
- S = cast<BinaryOperator>(S)->getLHS();
- firstCharOnly = true;
- continue;
- }
+ if (GetRawInt)
+ os << LHS->EvaluateKnownConstInt(PDB.getASTContext());
+ os << ":' at line " << End.asLocation().getExpansionLineNumber();
break;
}
-
- if (S != Original)
- L = PathDiagnosticLocation(S, L.getManager(), LC);
- }
-
- if (firstCharOnly)
- L = PathDiagnosticLocation::createSingleLocation(L);
-
- return L;
-}
-
-class EdgeBuilder {
- std::vector<ContextLocation> CLocs;
- typedef std::vector<ContextLocation>::iterator iterator;
- PathDiagnostic &PD;
- PathDiagnosticBuilder &PDB;
- PathDiagnosticLocation PrevLoc;
-
- bool IsConsumedExpr(const PathDiagnosticLocation &L);
-
- bool containsLocation(const PathDiagnosticLocation &Container,
- const PathDiagnosticLocation &Containee);
-
- PathDiagnosticLocation getContextLocation(const PathDiagnosticLocation &L);
-
-
-
- void popLocation() {
- if (!CLocs.back().isDead() && CLocs.back().asLocation().isFileID()) {
- // For contexts, we only one the first character as the range.
- rawAddEdge(cleanUpLocation(CLocs.back(), PDB.LC, true));
}
- CLocs.pop_back();
+ } else {
+ os << "'Default' branch taken. ";
+ End = PDB.ExecutionContinues(os, N);
+ }
+ return std::make_shared<PathDiagnosticControlFlowPiece>(Start, End,
+ os.str());
+}
+
+
+std::shared_ptr<PathDiagnosticControlFlowPiece> generateDiagForGotoOP(
+ const Stmt *S,
+ PathDiagnosticBuilder &PDB,
+ PathDiagnosticLocation &Start) {
+ std::string sbuf;
+ llvm::raw_string_ostream os(sbuf);
+ const PathDiagnosticLocation &End = PDB.getEnclosingStmtLocation(S);
+ os << "Control jumps to line " << End.asLocation().getExpansionLineNumber();
+ return std::make_shared<PathDiagnosticControlFlowPiece>(Start, End, os.str());
+
+}
+
+std::shared_ptr<PathDiagnosticControlFlowPiece> generateDiagForBinaryOP(
+ const ExplodedNode *N,
+ const Stmt *T,
+ const CFGBlock *Src,
+ const CFGBlock *Dst,
+ const SourceManager &SM,
+ PathDiagnosticBuilder &PDB,
+ const LocationContext *LC) {
+ const auto *B = cast<BinaryOperator>(T);
+ std::string sbuf;
+ llvm::raw_string_ostream os(sbuf);
+ os << "Left side of '";
+ PathDiagnosticLocation Start, End;
+
+ if (B->getOpcode() == BO_LAnd) {
+ os << "&&"
+ << "' is ";
+
+ if (*(Src->succ_begin() + 1) == Dst) {
+ os << "false";
+ End = PathDiagnosticLocation(B->getLHS(), SM, LC);
+ Start =
+ PathDiagnosticLocation::createOperatorLoc(B, SM);
+ } else {
+ os << "true";
+ Start = PathDiagnosticLocation(B->getLHS(), SM, LC);
+ End = PDB.ExecutionContinues(N);
+ }
+ } else {
+ assert(B->getOpcode() == BO_LOr);
+ os << "||"
+ << "' is ";
+
+ if (*(Src->succ_begin() + 1) == Dst) {
+ os << "false";
+ Start = PathDiagnosticLocation(B->getLHS(), SM, LC);
+ End = PDB.ExecutionContinues(N);
+ } else {
+ os << "true";
+ End = PathDiagnosticLocation(B->getLHS(), SM, LC);
+ Start =
+ PathDiagnosticLocation::createOperatorLoc(B, SM);
+ }
}
+ return std::make_shared<PathDiagnosticControlFlowPiece>(Start, End,
+ os.str());
+}
-public:
- EdgeBuilder(PathDiagnostic &pd, PathDiagnosticBuilder &pdb)
- : PD(pd), PDB(pdb) {
-
- // If the PathDiagnostic already has pieces, add the enclosing statement
- // of the first piece as a context as well.
- if (!PD.path.empty()) {
- PrevLoc = (*PD.path.begin())->getLocation();
-
- if (const Stmt *S = PrevLoc.asStmt())
- addExtendedContext(PDB.getEnclosingStmtLocation(S).asStmt());
- }
- }
+void generateMinimalDiagForBlockEdge(const ExplodedNode *N, BlockEdge BE,
+ const SourceManager &SM,
+ PathDiagnosticBuilder &PDB,
+ PathDiagnostic &PD) {
+ const LocationContext *LC = N->getLocationContext();
+ const CFGBlock *Src = BE.getSrc();
+ const CFGBlock *Dst = BE.getDst();
+ const Stmt *T = Src->getTerminator();
+ if (!T)
+ return;
- ~EdgeBuilder() {
- while (!CLocs.empty()) popLocation();
+ auto Start = PathDiagnosticLocation::createBegin(T, SM, LC);
+ switch (T->getStmtClass()) {
+ default:
+ break;
- // Finally, add an initial edge from the start location of the first
- // statement (if it doesn't already exist).
- PathDiagnosticLocation L = PathDiagnosticLocation::createDeclBegin(
- PDB.LC,
- PDB.getSourceManager());
- if (L.isValid())
- rawAddEdge(L);
+ case Stmt::GotoStmtClass:
+ case Stmt::IndirectGotoStmtClass: {
+ if (const Stmt *S = PathDiagnosticLocation::getNextStmt(N))
+ PD.getActivePath().push_front(generateDiagForGotoOP(S, PDB, Start));
+ break;
}
- void flushLocations() {
- while (!CLocs.empty())
- popLocation();
- PrevLoc = PathDiagnosticLocation();
+ case Stmt::SwitchStmtClass: {
+ PD.getActivePath().push_front(
+ generateDiagForSwitchOP(N, Dst, SM, LC, PDB, Start));
+ break;
}
- void addEdge(PathDiagnosticLocation NewLoc, bool alwaysAdd = false,
- bool IsPostJump = false);
-
- void rawAddEdge(PathDiagnosticLocation NewLoc);
-
- void addContext(const Stmt *S);
- void addContext(const PathDiagnosticLocation &L);
- void addExtendedContext(const Stmt *S);
-};
-} // end anonymous namespace
-
-
-PathDiagnosticLocation
-EdgeBuilder::getContextLocation(const PathDiagnosticLocation &L) {
- if (const Stmt *S = L.asStmt()) {
- if (IsControlFlowExpr(S))
- return L;
-
- return PDB.getEnclosingStmtLocation(S);
+ case Stmt::BreakStmtClass:
+ case Stmt::ContinueStmtClass: {
+ std::string sbuf;
+ llvm::raw_string_ostream os(sbuf);
+ PathDiagnosticLocation End = PDB.ExecutionContinues(os, N);
+ PD.getActivePath().push_front(
+ std::make_shared<PathDiagnosticControlFlowPiece>(Start, End, os.str()));
+ break;
}
- return L;
-}
-
-bool EdgeBuilder::containsLocation(const PathDiagnosticLocation &Container,
- const PathDiagnosticLocation &Containee) {
-
- if (Container == Containee)
- return true;
+ // Determine control-flow for ternary '?'.
+ case Stmt::BinaryConditionalOperatorClass:
+ case Stmt::ConditionalOperatorClass: {
+ std::string sbuf;
+ llvm::raw_string_ostream os(sbuf);
+ os << "'?' condition is ";
- if (Container.asDecl())
- return true;
+ if (*(Src->succ_begin() + 1) == Dst)
+ os << "false";
+ else
+ os << "true";
- if (const Stmt *S = Containee.asStmt())
- if (const Stmt *ContainerS = Container.asStmt()) {
- while (S) {
- if (S == ContainerS)
- return true;
- S = PDB.getParent(S);
- }
- return false;
- }
+ PathDiagnosticLocation End = PDB.ExecutionContinues(N);
- // Less accurate: compare using source ranges.
- SourceRange ContainerR = Container.asRange();
- SourceRange ContaineeR = Containee.asRange();
+ if (const Stmt *S = End.asStmt())
+ End = PDB.getEnclosingStmtLocation(S);
- SourceManager &SM = PDB.getSourceManager();
- SourceLocation ContainerRBeg = SM.getExpansionLoc(ContainerR.getBegin());
- SourceLocation ContainerREnd = SM.getExpansionLoc(ContainerR.getEnd());
- SourceLocation ContaineeRBeg = SM.getExpansionLoc(ContaineeR.getBegin());
- SourceLocation ContaineeREnd = SM.getExpansionLoc(ContaineeR.getEnd());
-
- unsigned ContainerBegLine = SM.getExpansionLineNumber(ContainerRBeg);
- unsigned ContainerEndLine = SM.getExpansionLineNumber(ContainerREnd);
- unsigned ContaineeBegLine = SM.getExpansionLineNumber(ContaineeRBeg);
- unsigned ContaineeEndLine = SM.getExpansionLineNumber(ContaineeREnd);
-
- assert(ContainerBegLine <= ContainerEndLine);
- assert(ContaineeBegLine <= ContaineeEndLine);
-
- return (ContainerBegLine <= ContaineeBegLine &&
- ContainerEndLine >= ContaineeEndLine &&
- (ContainerBegLine != ContaineeBegLine ||
- SM.getExpansionColumnNumber(ContainerRBeg) <=
- SM.getExpansionColumnNumber(ContaineeRBeg)) &&
- (ContainerEndLine != ContaineeEndLine ||
- SM.getExpansionColumnNumber(ContainerREnd) >=
- SM.getExpansionColumnNumber(ContaineeREnd)));
-}
-
-void EdgeBuilder::rawAddEdge(PathDiagnosticLocation NewLoc) {
- if (!PrevLoc.isValid()) {
- PrevLoc = NewLoc;
- return;
+ PD.getActivePath().push_front(
+ std::make_shared<PathDiagnosticControlFlowPiece>(Start, End, os.str()));
+ break;
}
- const PathDiagnosticLocation &NewLocClean = cleanUpLocation(NewLoc, PDB.LC);
- const PathDiagnosticLocation &PrevLocClean = cleanUpLocation(PrevLoc, PDB.LC);
+ // Determine control-flow for short-circuited '&&' and '||'.
+ case Stmt::BinaryOperatorClass: {
+ if (!PDB.supportsLogicalOpControlFlow())
+ break;
- if (PrevLocClean.asLocation().isInvalid()) {
- PrevLoc = NewLoc;
- return;
+ std::shared_ptr<PathDiagnosticControlFlowPiece> Diag =
+ generateDiagForBinaryOP(N, T, Src, Dst, SM, PDB, LC);
+ PD.getActivePath().push_front(Diag);
+ break;
}
- if (NewLocClean.asLocation() == PrevLocClean.asLocation())
- return;
-
- // FIXME: Ignore intra-macro edges for now.
- if (NewLocClean.asLocation().getExpansionLoc() ==
- PrevLocClean.asLocation().getExpansionLoc())
- return;
-
- PD.getActivePath().push_front(
- std::make_shared<PathDiagnosticControlFlowPiece>(NewLocClean,
- PrevLocClean));
- PrevLoc = NewLoc;
-}
-
-void EdgeBuilder::addEdge(PathDiagnosticLocation NewLoc, bool alwaysAdd,
- bool IsPostJump) {
-
- if (!alwaysAdd && NewLoc.asLocation().isMacroID())
- return;
+ case Stmt::DoStmtClass:
+ if (*(Src->succ_begin()) == Dst) {
+ std::string sbuf;
+ llvm::raw_string_ostream os(sbuf);
- const PathDiagnosticLocation &CLoc = getContextLocation(NewLoc);
+ os << "Loop condition is true. ";
+ PathDiagnosticLocation End = PDB.ExecutionContinues(os, N);
- while (!CLocs.empty()) {
- ContextLocation &TopContextLoc = CLocs.back();
+ if (const Stmt *S = End.asStmt())
+ End = PDB.getEnclosingStmtLocation(S);
- // Is the top location context the same as the one for the new location?
- if (TopContextLoc == CLoc) {
- if (alwaysAdd) {
- if (IsConsumedExpr(TopContextLoc))
- TopContextLoc.markDead();
+ PD.getActivePath().push_front(
+ std::make_shared<PathDiagnosticControlFlowPiece>(Start, End,
+ os.str()));
+ } else {
+ PathDiagnosticLocation End = PDB.ExecutionContinues(N);
- rawAddEdge(NewLoc);
- }
+ if (const Stmt *S = End.asStmt())
+ End = PDB.getEnclosingStmtLocation(S);
- if (IsPostJump)
- TopContextLoc.markDead();
- return;
+ PD.getActivePath().push_front(
+ std::make_shared<PathDiagnosticControlFlowPiece>(
+ Start, End, "Loop condition is false. Exiting loop"));
}
+ break;
- if (containsLocation(TopContextLoc, CLoc)) {
- if (alwaysAdd) {
- rawAddEdge(NewLoc);
-
- if (IsConsumedExpr(CLoc)) {
- CLocs.push_back(ContextLocation(CLoc, /*IsDead=*/true));
- return;
- }
- }
+ case Stmt::WhileStmtClass:
+ case Stmt::ForStmtClass:
+ if (*(Src->succ_begin() + 1) == Dst) {
+ std::string sbuf;
+ llvm::raw_string_ostream os(sbuf);
+
+ os << "Loop condition is false. ";
+ PathDiagnosticLocation End = PDB.ExecutionContinues(os, N);
+ if (const Stmt *S = End.asStmt())
+ End = PDB.getEnclosingStmtLocation(S);
+
+ PD.getActivePath().push_front(
+ std::make_shared<PathDiagnosticControlFlowPiece>(Start, End,
+ os.str()));
+ } else {
+ PathDiagnosticLocation End = PDB.ExecutionContinues(N);
+ if (const Stmt *S = End.asStmt())
+ End = PDB.getEnclosingStmtLocation(S);
- CLocs.push_back(ContextLocation(CLoc, /*IsDead=*/IsPostJump));
- return;
+ PD.getActivePath().push_front(
+ std::make_shared<PathDiagnosticControlFlowPiece>(
+ Start, End, "Loop condition is true. Entering loop body"));
}
- // Context does not contain the location. Flush it.
- popLocation();
- }
-
- // If we reach here, there is no enclosing context. Just add the edge.
- rawAddEdge(NewLoc);
-}
+ break;
-bool EdgeBuilder::IsConsumedExpr(const PathDiagnosticLocation &L) {
- if (const Expr *X = dyn_cast_or_null<Expr>(L.asStmt()))
- return PDB.getParentMap().isConsumedExpr(X) && !IsControlFlowExpr(X);
+ case Stmt::IfStmtClass: {
+ PathDiagnosticLocation End = PDB.ExecutionContinues(N);
- return false;
-}
+ if (const Stmt *S = End.asStmt())
+ End = PDB.getEnclosingStmtLocation(S);
-void EdgeBuilder::addExtendedContext(const Stmt *S) {
- if (!S)
- return;
-
- const Stmt *Parent = PDB.getParent(S);
- while (Parent) {
- if (isa<CompoundStmt>(Parent))
- Parent = PDB.getParent(Parent);
+ if (*(Src->succ_begin() + 1) == Dst)
+ PD.getActivePath().push_front(
+ std::make_shared<PathDiagnosticControlFlowPiece>(
+ Start, End, "Taking false branch"));
else
- break;
- }
+ PD.getActivePath().push_front(
+ std::make_shared<PathDiagnosticControlFlowPiece>(
+ Start, End, "Taking true branch"));
- if (Parent) {
- switch (Parent->getStmtClass()) {
- case Stmt::DoStmtClass:
- case Stmt::ObjCAtSynchronizedStmtClass:
- addContext(Parent);
- default:
- break;
- }
+ break;
}
-
- addContext(S);
-}
-
-void EdgeBuilder::addContext(const Stmt *S) {
- if (!S)
- return;
-
- PathDiagnosticLocation L(S, PDB.getSourceManager(), PDB.LC);
- addContext(L);
-}
-
-void EdgeBuilder::addContext(const PathDiagnosticLocation &L) {
- while (!CLocs.empty()) {
- const PathDiagnosticLocation &TopContextLoc = CLocs.back();
-
- // Is the top location context the same as the one for the new location?
- if (TopContextLoc == L)
- return;
-
- if (containsLocation(TopContextLoc, L)) {
- CLocs.push_back(L);
- return;
- }
-
- // Context does not contain the location. Flush it.
- popLocation();
}
-
- CLocs.push_back(L);
}
// Cone-of-influence: support the reverse propagation of "interesting" symbols
@@ -1257,7 +826,7 @@ void EdgeBuilder::addContext(const PathDiagnosticLocation &L) {
// because the constraint solver sometimes simplifies certain symbolic values
// into constants when appropriate, and this complicates reasoning about
// interesting values.
-typedef llvm::DenseSet<const Expr *> InterestingExprs;
+using InterestingExprs = llvm::DenseSet<const Expr *>;
static void reversePropagateIntererstingSymbols(BugReport &R,
InterestingExprs &IE,
@@ -1276,7 +845,7 @@ static void reversePropagateIntererstingSymbols(BugReport &R,
case Stmt::BinaryOperatorClass:
case Stmt::UnaryOperatorClass: {
for (const Stmt *SubStmt : Ex->children()) {
- if (const Expr *child = dyn_cast_or_null<Expr>(SubStmt)) {
+ if (const auto *child = dyn_cast_or_null<Expr>(SubStmt)) {
IE.insert(child);
SVal ChildV = State->getSVal(child, LCtx);
R.markInteresting(ChildV);
@@ -1296,10 +865,10 @@ static void reversePropagateInterestingSymbols(BugReport &R,
const LocationContext *CallerCtx)
{
// FIXME: Handle non-CallExpr-based CallEvents.
- const StackFrameContext *Callee = CalleeCtx->getCurrentStackFrame();
+ const StackFrameContext *Callee = CalleeCtx->getStackFrame();
const Stmt *CallSite = Callee->getCallSite();
- if (const CallExpr *CE = dyn_cast_or_null<CallExpr>(CallSite)) {
- if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CalleeCtx->getDecl())) {
+ if (const auto *CE = dyn_cast_or_null<CallExpr>(CallSite)) {
+ if (const auto *FD = dyn_cast<FunctionDecl>(CalleeCtx->getDecl())) {
FunctionDecl::param_const_iterator PI = FD->param_begin(),
PE = FD->param_end();
CallExpr::const_arg_iterator AI = CE->arg_begin(), AE = CE->arg_end();
@@ -1339,16 +908,6 @@ static bool isJumpToFalseBranch(const BlockEdge *BE) {
return (*(Src->succ_begin()+1) == BE->getDst());
}
-/// Return true if the terminator is a loop and the destination is the
-/// false branch.
-static bool isLoopJumpPastBody(const Stmt *Term, const BlockEdge *BE) {
- if (!isLoop(Term))
- return false;
-
- // Did we take the false branch?
- return isJumpToFalseBranch(BE);
-}
-
static bool isContainedByStmt(ParentMap &PM, const Stmt *S, const Stmt *SubS) {
while (SubS) {
if (SubS == S)
@@ -1376,7 +935,7 @@ static bool isInLoopBody(ParentMap &PM, const Stmt *S, const Stmt *Term) {
const Stmt *LoopBody = nullptr;
switch (Term->getStmtClass()) {
case Stmt::CXXForRangeStmtClass: {
- const CXXForRangeStmt *FR = cast<CXXForRangeStmt>(Term);
+ const auto *FR = cast<CXXForRangeStmt>(Term);
if (isContainedByStmt(PM, FR->getInc(), S))
return true;
if (isContainedByStmt(PM, FR->getLoopVarStmt(), S))
@@ -1385,14 +944,14 @@ static bool isInLoopBody(ParentMap &PM, const Stmt *S, const Stmt *Term) {
break;
}
case Stmt::ForStmtClass: {
- const ForStmt *FS = cast<ForStmt>(Term);
+ const auto *FS = cast<ForStmt>(Term);
if (isContainedByStmt(PM, FS->getInc(), S))
return true;
LoopBody = FS->getBody();
break;
}
case Stmt::ObjCForCollectionStmtClass: {
- const ObjCForCollectionStmt *FC = cast<ObjCForCollectionStmt>(Term);
+ const auto *FC = cast<ObjCForCollectionStmt>(Term);
LoopBody = FC->getBody();
break;
}
@@ -1405,210 +964,7 @@ static bool isInLoopBody(ParentMap &PM, const Stmt *S, const Stmt *Term) {
return isContainedByStmt(PM, LoopBody, S);
}
-//===----------------------------------------------------------------------===//
-// Top-level logic for generating extensive path diagnostics.
-//===----------------------------------------------------------------------===//
-
-static bool GenerateExtensivePathDiagnostic(
- PathDiagnostic &PD, PathDiagnosticBuilder &PDB, const ExplodedNode *N,
- LocationContextMap &LCM,
- ArrayRef<std::unique_ptr<BugReporterVisitor>> visitors) {
- EdgeBuilder EB(PD, PDB);
- const SourceManager& SM = PDB.getSourceManager();
- StackDiagVector CallStack;
- InterestingExprs IE;
-
- const ExplodedNode *NextNode = N->pred_empty() ? nullptr : *(N->pred_begin());
- while (NextNode) {
- N = NextNode;
- NextNode = N->getFirstPred();
- ProgramPoint P = N->getLocation();
-
- do {
- if (Optional<PostStmt> PS = P.getAs<PostStmt>()) {
- if (const Expr *Ex = PS->getStmtAs<Expr>())
- reversePropagateIntererstingSymbols(*PDB.getBugReport(), IE,
- N->getState().get(), Ex,
- N->getLocationContext());
- }
-
- if (Optional<CallExitEnd> CE = P.getAs<CallExitEnd>()) {
- const Stmt *S = CE->getCalleeContext()->getCallSite();
- if (const Expr *Ex = dyn_cast_or_null<Expr>(S)) {
- reversePropagateIntererstingSymbols(*PDB.getBugReport(), IE,
- N->getState().get(), Ex,
- N->getLocationContext());
- }
-
- auto C = PathDiagnosticCallPiece::construct(N, *CE, SM);
- LCM[&C->path] = CE->getCalleeContext();
-
- EB.addEdge(C->callReturn, /*AlwaysAdd=*/true, /*IsPostJump=*/true);
- EB.flushLocations();
-
- auto *P = C.get();
- PD.getActivePath().push_front(std::move(C));
- PD.pushActivePath(&P->path);
- CallStack.push_back(StackDiagPair(P, N));
- break;
- }
-
- // Pop the call hierarchy if we are done walking the contents
- // of a function call.
- if (Optional<CallEnter> CE = P.getAs<CallEnter>()) {
- // Add an edge to the start of the function.
- const Decl *D = CE->getCalleeContext()->getDecl();
- PathDiagnosticLocation pos =
- PathDiagnosticLocation::createBegin(D, SM);
- EB.addEdge(pos);
-
- // Flush all locations, and pop the active path.
- bool VisitedEntireCall = PD.isWithinCall();
- EB.flushLocations();
- PD.popActivePath();
- PDB.LC = N->getLocationContext();
-
- // Either we just added a bunch of stuff to the top-level path, or
- // we have a previous CallExitEnd. If the former, it means that the
- // path terminated within a function call. We must then take the
- // current contents of the active path and place it within
- // a new PathDiagnosticCallPiece.
- PathDiagnosticCallPiece *C;
- if (VisitedEntireCall) {
- C = cast<PathDiagnosticCallPiece>(PD.getActivePath().front().get());
- } else {
- const Decl *Caller = CE->getLocationContext()->getDecl();
- C = PathDiagnosticCallPiece::construct(PD.getActivePath(), Caller);
- LCM[&C->path] = CE->getCalleeContext();
- }
-
- C->setCallee(*CE, SM);
- EB.addContext(C->getLocation());
-
- if (!CallStack.empty()) {
- assert(CallStack.back().first == C);
- CallStack.pop_back();
- }
- break;
- }
-
- // Note that is important that we update the LocationContext
- // after looking at CallExits. CallExit basically adds an
- // edge in the *caller*, so we don't want to update the LocationContext
- // too soon.
- PDB.LC = N->getLocationContext();
-
- // Block edges.
- if (Optional<BlockEdge> BE = P.getAs<BlockEdge>()) {
- // Does this represent entering a call? If so, look at propagating
- // interesting symbols across call boundaries.
- if (NextNode) {
- const LocationContext *CallerCtx = NextNode->getLocationContext();
- const LocationContext *CalleeCtx = PDB.LC;
- if (CallerCtx != CalleeCtx) {
- reversePropagateInterestingSymbols(*PDB.getBugReport(), IE,
- N->getState().get(),
- CalleeCtx, CallerCtx);
- }
- }
-
- // Are we jumping to the head of a loop? Add a special diagnostic.
- if (const Stmt *Loop = BE->getSrc()->getLoopTarget()) {
- PathDiagnosticLocation L(Loop, SM, PDB.LC);
- const CompoundStmt *CS = nullptr;
-
- if (const ForStmt *FS = dyn_cast<ForStmt>(Loop))
- CS = dyn_cast<CompoundStmt>(FS->getBody());
- else if (const WhileStmt *WS = dyn_cast<WhileStmt>(Loop))
- CS = dyn_cast<CompoundStmt>(WS->getBody());
-
- auto p = std::make_shared<PathDiagnosticEventPiece>(
- L, "Looping back to the head of the loop");
- p->setPrunable(true);
-
- EB.addEdge(p->getLocation(), true);
- PD.getActivePath().push_front(std::move(p));
-
- if (CS) {
- PathDiagnosticLocation BL =
- PathDiagnosticLocation::createEndBrace(CS, SM);
- EB.addEdge(BL);
- }
- }
-
- const CFGBlock *BSrc = BE->getSrc();
- ParentMap &PM = PDB.getParentMap();
-
- if (const Stmt *Term = BSrc->getTerminator()) {
- // Are we jumping past the loop body without ever executing the
- // loop (because the condition was false)?
- if (isLoopJumpPastBody(Term, &*BE) &&
- !isInLoopBody(PM,
- getStmtBeforeCond(PM,
- BSrc->getTerminatorCondition(),
- N),
- Term)) {
- PathDiagnosticLocation L(Term, SM, PDB.LC);
- auto PE = std::make_shared<PathDiagnosticEventPiece>(
- L, "Loop body executed 0 times");
- PE->setPrunable(true);
-
- EB.addEdge(PE->getLocation(), true);
- PD.getActivePath().push_front(std::move(PE));
- }
-
- // In any case, add the terminator as the current statement
- // context for control edges.
- EB.addContext(Term);
- }
-
- break;
- }
-
- if (Optional<BlockEntrance> BE = P.getAs<BlockEntrance>()) {
- Optional<CFGElement> First = BE->getFirstElement();
- if (Optional<CFGStmt> S = First ? First->getAs<CFGStmt>() : None) {
- const Stmt *stmt = S->getStmt();
- if (IsControlFlowExpr(stmt)) {
- // Add the proper context for '&&', '||', and '?'.
- EB.addContext(stmt);
- }
- else
- EB.addExtendedContext(PDB.getEnclosingStmtLocation(stmt).asStmt());
- }
-
- break;
- }
-
-
- } while (0);
-
- if (!NextNode)
- continue;
-
- // Add pieces from custom visitors.
- BugReport *R = PDB.getBugReport();
- llvm::FoldingSet<PathDiagnosticPiece> DeduplicationSet;
- for (auto &V : visitors) {
- if (auto p = V->VisitNode(N, NextNode, PDB, *R)) {
- if (DeduplicationSet.GetOrInsertNode(p.get()) != p.get())
- continue;
-
- const PathDiagnosticLocation &Loc = p->getLocation();
- EB.addEdge(Loc, true);
- updateStackPiecesWithMessage(*p, CallStack);
- PD.getActivePath().push_front(std::move(p));
-
- if (const Stmt *S = Loc.asStmt())
- EB.addExtendedContext(PDB.getEnclosingStmtLocation(S).asStmt());
- }
- }
- }
-
- return PDB.getBugReport()->isValid();
-}
-
-/// \brief Adds a sanitized control-flow diagnostic edge to a path.
+/// Adds a sanitized control-flow diagnostic edge to a path.
static void addEdgeToPath(PathPieces &path,
PathDiagnosticLocation &PrevLoc,
PathDiagnosticLocation NewLoc,
@@ -1639,8 +995,7 @@ static void addEdgeToPath(PathPieces &path,
/// which returns the element for ObjCForCollectionStmts.
static const Stmt *getTerminatorCondition(const CFGBlock *B) {
const Stmt *S = B->getTerminatorCondition();
- if (const ObjCForCollectionStmt *FS =
- dyn_cast_or_null<ObjCForCollectionStmt>(S))
+ if (const auto *FS = dyn_cast_or_null<ObjCForCollectionStmt>(S))
return FS->getElement();
return S;
}
@@ -1652,269 +1007,256 @@ static const char StrLoopRangeEmpty[] =
static const char StrLoopCollectionEmpty[] =
"Loop body skipped when collection is empty";
-static bool GenerateAlternateExtensivePathDiagnostic(
- PathDiagnostic &PD, PathDiagnosticBuilder &PDB, const ExplodedNode *N,
- LocationContextMap &LCM,
- ArrayRef<std::unique_ptr<BugReporterVisitor>> visitors) {
-
- BugReport *report = PDB.getBugReport();
+static std::unique_ptr<FilesToLineNumsMap>
+findExecutedLines(SourceManager &SM, const ExplodedNode *N);
+
+/// Generate diagnostics for the node \p N,
+/// and write it into \p PD.
+/// \p AddPathEdges Whether diagnostic consumer can generate path arrows
+/// showing both row and column.
+static void generatePathDiagnosticsForNode(const ExplodedNode *N,
+ PathDiagnostic &PD,
+ PathDiagnosticLocation &PrevLoc,
+ PathDiagnosticBuilder &PDB,
+ LocationContextMap &LCM,
+ StackDiagVector &CallStack,
+ InterestingExprs &IE,
+ bool AddPathEdges) {
+ ProgramPoint P = N->getLocation();
const SourceManager& SM = PDB.getSourceManager();
- StackDiagVector CallStack;
- InterestingExprs IE;
- PathDiagnosticLocation PrevLoc = PD.getLocation();
+ // Have we encountered an entrance to a call? It may be
+ // the case that we have not encountered a matching
+ // call exit before this point. This means that the path
+ // terminated within the call itself.
+ if (auto CE = P.getAs<CallEnter>()) {
+
+ if (AddPathEdges) {
+ // Add an edge to the start of the function.
+ const StackFrameContext *CalleeLC = CE->getCalleeContext();
+ const Decl *D = CalleeLC->getDecl();
+ // Add the edge only when the callee has body. We jump to the beginning
+ // of the *declaration*, however we expect it to be followed by the
+ // body. This isn't the case for autosynthesized property accessors in
+ // Objective-C. No need for a similar extra check for CallExit points
+ // because the exit edge comes from a statement (i.e. return),
+ // not from declaration.
+ if (D->hasBody())
+ addEdgeToPath(PD.getActivePath(), PrevLoc,
+ PathDiagnosticLocation::createBegin(D, SM), CalleeLC);
+ }
- const ExplodedNode *NextNode = N->getFirstPred();
- while (NextNode) {
- N = NextNode;
- NextNode = N->getFirstPred();
- ProgramPoint P = N->getLocation();
-
- do {
- // Have we encountered an entrance to a call? It may be
- // the case that we have not encountered a matching
- // call exit before this point. This means that the path
- // terminated within the call itself.
- if (Optional<CallEnter> CE = P.getAs<CallEnter>()) {
- // Add an edge to the start of the function.
- const StackFrameContext *CalleeLC = CE->getCalleeContext();
- const Decl *D = CalleeLC->getDecl();
- // Add the edge only when the callee has body. We jump to the beginning
- // of the *declaration*, however we expect it to be followed by the
- // body. This isn't the case for autosynthesized property accessors in
- // Objective-C. No need for a similar extra check for CallExit points
- // because the exit edge comes from a statement (i.e. return),
- // not from declaration.
- if (D->hasBody())
- addEdgeToPath(PD.getActivePath(), PrevLoc,
- PathDiagnosticLocation::createBegin(D, SM), CalleeLC);
+ // Did we visit an entire call?
+ bool VisitedEntireCall = PD.isWithinCall();
+ PD.popActivePath();
- // Did we visit an entire call?
- bool VisitedEntireCall = PD.isWithinCall();
- PD.popActivePath();
+ PathDiagnosticCallPiece *C;
+ if (VisitedEntireCall) {
+ C = cast<PathDiagnosticCallPiece>(PD.getActivePath().front().get());
+ } else {
+ const Decl *Caller = CE->getLocationContext()->getDecl();
+ C = PathDiagnosticCallPiece::construct(PD.getActivePath(), Caller);
+
+ if (AddPathEdges) {
+ // Since we just transferred the path over to the call piece,
+ // reset the mapping from active to location context.
+ assert(PD.getActivePath().size() == 1 &&
+ PD.getActivePath().front().get() == C);
+ LCM[&PD.getActivePath()] = nullptr;
+ }
- PathDiagnosticCallPiece *C;
- if (VisitedEntireCall) {
- PathDiagnosticPiece *P = PD.getActivePath().front().get();
- C = cast<PathDiagnosticCallPiece>(P);
- } else {
- const Decl *Caller = CE->getLocationContext()->getDecl();
- C = PathDiagnosticCallPiece::construct(PD.getActivePath(), Caller);
-
- // Since we just transferred the path over to the call piece,
- // reset the mapping from active to location context.
- assert(PD.getActivePath().size() == 1 &&
- PD.getActivePath().front().get() == C);
- LCM[&PD.getActivePath()] = nullptr;
-
- // Record the location context mapping for the path within
- // the call.
- assert(LCM[&C->path] == nullptr ||
- LCM[&C->path] == CE->getCalleeContext());
- LCM[&C->path] = CE->getCalleeContext();
-
- // If this is the first item in the active path, record
- // the new mapping from active path to location context.
- const LocationContext *&NewLC = LCM[&PD.getActivePath()];
- if (!NewLC)
- NewLC = N->getLocationContext();
-
- PDB.LC = NewLC;
- }
- C->setCallee(*CE, SM);
+ // Record the location context mapping for the path within
+ // the call.
+ assert(LCM[&C->path] == nullptr ||
+ LCM[&C->path] == CE->getCalleeContext());
+ LCM[&C->path] = CE->getCalleeContext();
- // Update the previous location in the active path.
- PrevLoc = C->getLocation();
+ // If this is the first item in the active path, record
+ // the new mapping from active path to location context.
+ const LocationContext *&NewLC = LCM[&PD.getActivePath()];
+ if (!NewLC)
+ NewLC = N->getLocationContext();
- if (!CallStack.empty()) {
- assert(CallStack.back().first == C);
- CallStack.pop_back();
- }
- break;
- }
+ PDB.LC = NewLC;
+ }
+ C->setCallee(*CE, SM);
- // Query the location context here and the previous location
- // as processing CallEnter may change the active path.
- PDB.LC = N->getLocationContext();
+ // Update the previous location in the active path.
+ PrevLoc = C->getLocation();
- // Record the mapping from the active path to the location
- // context.
- assert(!LCM[&PD.getActivePath()] ||
- LCM[&PD.getActivePath()] == PDB.LC);
- LCM[&PD.getActivePath()] = PDB.LC;
-
- // Have we encountered an exit from a function call?
- if (Optional<CallExitEnd> CE = P.getAs<CallExitEnd>()) {
- const Stmt *S = CE->getCalleeContext()->getCallSite();
- // Propagate the interesting symbols accordingly.
- if (const Expr *Ex = dyn_cast_or_null<Expr>(S)) {
- reversePropagateIntererstingSymbols(*PDB.getBugReport(), IE,
- N->getState().get(), Ex,
- N->getLocationContext());
- }
+ if (!CallStack.empty()) {
+ assert(CallStack.back().first == C);
+ CallStack.pop_back();
+ }
+ return;
+ }
- // We are descending into a call (backwards). Construct
- // a new call piece to contain the path pieces for that call.
- auto C = PathDiagnosticCallPiece::construct(N, *CE, SM);
- // Record the location context for this call piece.
- LCM[&C->path] = CE->getCalleeContext();
+ if (AddPathEdges) {
+ // Query the location context here and the previous location
+ // as processing CallEnter may change the active path.
+ PDB.LC = N->getLocationContext();
- // Add the edge to the return site.
- addEdgeToPath(PD.getActivePath(), PrevLoc, C->callReturn, PDB.LC);
- auto *P = C.get();
- PD.getActivePath().push_front(std::move(C));
- PrevLoc.invalidate();
+ // Record the mapping from the active path to the location
+ // context.
+ assert(!LCM[&PD.getActivePath()] || LCM[&PD.getActivePath()] == PDB.LC);
+ LCM[&PD.getActivePath()] = PDB.LC;
+ }
- // Make the contents of the call the active path for now.
- PD.pushActivePath(&P->path);
- CallStack.push_back(StackDiagPair(P, N));
- break;
- }
+ // Have we encountered an exit from a function call?
+ if (Optional<CallExitEnd> CE = P.getAs<CallExitEnd>()) {
- if (Optional<PostStmt> PS = P.getAs<PostStmt>()) {
- // For expressions, make sure we propagate the
- // interesting symbols correctly.
- if (const Expr *Ex = PS->getStmtAs<Expr>())
- reversePropagateIntererstingSymbols(*PDB.getBugReport(), IE,
- N->getState().get(), Ex,
- N->getLocationContext());
-
- // Add an edge. If this is an ObjCForCollectionStmt do
- // not add an edge here as it appears in the CFG both
- // as a terminator and as a terminator condition.
- if (!isa<ObjCForCollectionStmt>(PS->getStmt())) {
- PathDiagnosticLocation L =
- PathDiagnosticLocation(PS->getStmt(), SM, PDB.LC);
- addEdgeToPath(PD.getActivePath(), PrevLoc, L, PDB.LC);
- }
- break;
+ // We are descending into a call (backwards). Construct
+ // a new call piece to contain the path pieces for that call.
+ auto C = PathDiagnosticCallPiece::construct(N, *CE, SM);
+ // Record the mapping from call piece to LocationContext.
+ LCM[&C->path] = CE->getCalleeContext();
+
+ if (AddPathEdges) {
+ const Stmt *S = CE->getCalleeContext()->getCallSite();
+ // Propagate the interesting symbols accordingly.
+ if (const auto *Ex = dyn_cast_or_null<Expr>(S)) {
+ reversePropagateIntererstingSymbols(*PDB.getBugReport(), IE,
+ N->getState().get(), Ex,
+ N->getLocationContext());
}
+ // Add the edge to the return site.
+ addEdgeToPath(PD.getActivePath(), PrevLoc, C->callReturn, PDB.LC);
+ PrevLoc.invalidate();
+ }
- // Block edges.
- if (Optional<BlockEdge> BE = P.getAs<BlockEdge>()) {
- // Does this represent entering a call? If so, look at propagating
- // interesting symbols across call boundaries.
- if (NextNode) {
- const LocationContext *CallerCtx = NextNode->getLocationContext();
- const LocationContext *CalleeCtx = PDB.LC;
- if (CallerCtx != CalleeCtx) {
- reversePropagateInterestingSymbols(*PDB.getBugReport(), IE,
- N->getState().get(),
- CalleeCtx, CallerCtx);
- }
- }
+ auto *P = C.get();
+ PD.getActivePath().push_front(std::move(C));
- // Are we jumping to the head of a loop? Add a special diagnostic.
- if (const Stmt *Loop = BE->getSrc()->getLoopTarget()) {
- PathDiagnosticLocation L(Loop, SM, PDB.LC);
- const Stmt *Body = nullptr;
-
- if (const ForStmt *FS = dyn_cast<ForStmt>(Loop))
- Body = FS->getBody();
- else if (const WhileStmt *WS = dyn_cast<WhileStmt>(Loop))
- Body = WS->getBody();
- else if (const ObjCForCollectionStmt *OFS =
- dyn_cast<ObjCForCollectionStmt>(Loop)) {
- Body = OFS->getBody();
- } else if (const CXXForRangeStmt *FRS =
- dyn_cast<CXXForRangeStmt>(Loop)) {
- Body = FRS->getBody();
- }
- // do-while statements are explicitly excluded here
+ // Make the contents of the call the active path for now.
+ PD.pushActivePath(&P->path);
+ CallStack.push_back(StackDiagPair(P, N));
+ return;
+ }
- auto p = std::make_shared<PathDiagnosticEventPiece>(
- L, "Looping back to the head "
- "of the loop");
- p->setPrunable(true);
+ if (auto PS = P.getAs<PostStmt>()) {
+ if (!AddPathEdges)
+ return;
- addEdgeToPath(PD.getActivePath(), PrevLoc, p->getLocation(), PDB.LC);
- PD.getActivePath().push_front(std::move(p));
+ // For expressions, make sure we propagate the
+ // interesting symbols correctly.
+ if (const Expr *Ex = PS->getStmtAs<Expr>())
+ reversePropagateIntererstingSymbols(*PDB.getBugReport(), IE,
+ N->getState().get(), Ex,
+ N->getLocationContext());
+
+ // Add an edge. If this is an ObjCForCollectionStmt do
+ // not add an edge here as it appears in the CFG both
+ // as a terminator and as a terminator condition.
+ if (!isa<ObjCForCollectionStmt>(PS->getStmt())) {
+ PathDiagnosticLocation L =
+ PathDiagnosticLocation(PS->getStmt(), SM, PDB.LC);
+ addEdgeToPath(PD.getActivePath(), PrevLoc, L, PDB.LC);
+ }
- if (const CompoundStmt *CS = dyn_cast_or_null<CompoundStmt>(Body)) {
- addEdgeToPath(PD.getActivePath(), PrevLoc,
- PathDiagnosticLocation::createEndBrace(CS, SM),
- PDB.LC);
- }
- }
+ } else if (auto BE = P.getAs<BlockEdge>()) {
- const CFGBlock *BSrc = BE->getSrc();
- ParentMap &PM = PDB.getParentMap();
-
- if (const Stmt *Term = BSrc->getTerminator()) {
- // Are we jumping past the loop body without ever executing the
- // loop (because the condition was false)?
- if (isLoop(Term)) {
- const Stmt *TermCond = getTerminatorCondition(BSrc);
- bool IsInLoopBody =
- isInLoopBody(PM, getStmtBeforeCond(PM, TermCond, N), Term);
-
- const char *str = nullptr;
-
- if (isJumpToFalseBranch(&*BE)) {
- if (!IsInLoopBody) {
- if (isa<ObjCForCollectionStmt>(Term)) {
- str = StrLoopCollectionEmpty;
- } else if (isa<CXXForRangeStmt>(Term)) {
- str = StrLoopRangeEmpty;
- } else {
- str = StrLoopBodyZero;
- }
- }
- } else {
- str = StrEnteringLoop;
- }
+ if (!AddPathEdges) {
+ generateMinimalDiagForBlockEdge(N, *BE, SM, PDB, PD);
+ return;
+ }
- if (str) {
- PathDiagnosticLocation L(TermCond ? TermCond : Term, SM, PDB.LC);
- auto PE = std::make_shared<PathDiagnosticEventPiece>(L, str);
- PE->setPrunable(true);
- addEdgeToPath(PD.getActivePath(), PrevLoc,
- PE->getLocation(), PDB.LC);
- PD.getActivePath().push_front(std::move(PE));
- }
- } else if (isa<BreakStmt>(Term) || isa<ContinueStmt>(Term) ||
- isa<GotoStmt>(Term)) {
- PathDiagnosticLocation L(Term, SM, PDB.LC);
- addEdgeToPath(PD.getActivePath(), PrevLoc, L, PDB.LC);
- }
- }
- break;
+ // Does this represent entering a call? If so, look at propagating
+ // interesting symbols across call boundaries.
+ if (const ExplodedNode *NextNode = N->getFirstPred()) {
+ const LocationContext *CallerCtx = NextNode->getLocationContext();
+ const LocationContext *CalleeCtx = PDB.LC;
+ if (CallerCtx != CalleeCtx && AddPathEdges) {
+ reversePropagateInterestingSymbols(*PDB.getBugReport(), IE,
+ N->getState().get(),
+ CalleeCtx, CallerCtx);
}
- } while (0);
+ }
- if (!NextNode)
- continue;
+ // Are we jumping to the head of a loop? Add a special diagnostic.
+ if (const Stmt *Loop = BE->getSrc()->getLoopTarget()) {
+ PathDiagnosticLocation L(Loop, SM, PDB.LC);
+ const Stmt *Body = nullptr;
+
+ if (const auto *FS = dyn_cast<ForStmt>(Loop))
+ Body = FS->getBody();
+ else if (const auto *WS = dyn_cast<WhileStmt>(Loop))
+ Body = WS->getBody();
+ else if (const auto *OFS = dyn_cast<ObjCForCollectionStmt>(Loop)) {
+ Body = OFS->getBody();
+ } else if (const auto *FRS = dyn_cast<CXXForRangeStmt>(Loop)) {
+ Body = FRS->getBody();
+ }
+ // do-while statements are explicitly excluded here
- // Add pieces from custom visitors.
- llvm::FoldingSet<PathDiagnosticPiece> DeduplicationSet;
- for (auto &V : visitors) {
- if (auto p = V->VisitNode(N, NextNode, PDB, *report)) {
- if (DeduplicationSet.GetOrInsertNode(p.get()) != p.get())
- continue;
+ auto p = std::make_shared<PathDiagnosticEventPiece>(
+ L, "Looping back to the head "
+ "of the loop");
+ p->setPrunable(true);
- addEdgeToPath(PD.getActivePath(), PrevLoc, p->getLocation(), PDB.LC);
- updateStackPiecesWithMessage(*p, CallStack);
- PD.getActivePath().push_front(std::move(p));
+ addEdgeToPath(PD.getActivePath(), PrevLoc, p->getLocation(), PDB.LC);
+ PD.getActivePath().push_front(std::move(p));
+
+ if (const auto *CS = dyn_cast_or_null<CompoundStmt>(Body)) {
+ addEdgeToPath(PD.getActivePath(), PrevLoc,
+ PathDiagnosticLocation::createEndBrace(CS, SM),
+ PDB.LC);
}
}
- }
- // Add an edge to the start of the function.
- // We'll prune it out later, but it helps make diagnostics more uniform.
- const StackFrameContext *CalleeLC = PDB.LC->getCurrentStackFrame();
- const Decl *D = CalleeLC->getDecl();
- addEdgeToPath(PD.getActivePath(), PrevLoc,
- PathDiagnosticLocation::createBegin(D, SM),
- CalleeLC);
+ const CFGBlock *BSrc = BE->getSrc();
+ ParentMap &PM = PDB.getParentMap();
+
+ if (const Stmt *Term = BSrc->getTerminator()) {
+ // Are we jumping past the loop body without ever executing the
+ // loop (because the condition was false)?
+ if (isLoop(Term)) {
+ const Stmt *TermCond = getTerminatorCondition(BSrc);
+ bool IsInLoopBody =
+ isInLoopBody(PM, getStmtBeforeCond(PM, TermCond, N), Term);
+
+ const char *str = nullptr;
+
+ if (isJumpToFalseBranch(&*BE)) {
+ if (!IsInLoopBody) {
+ if (isa<ObjCForCollectionStmt>(Term)) {
+ str = StrLoopCollectionEmpty;
+ } else if (isa<CXXForRangeStmt>(Term)) {
+ str = StrLoopRangeEmpty;
+ } else {
+ str = StrLoopBodyZero;
+ }
+ }
+ } else {
+ str = StrEnteringLoop;
+ }
- return report->isValid();
+ if (str) {
+ PathDiagnosticLocation L(TermCond ? TermCond : Term, SM, PDB.LC);
+ auto PE = std::make_shared<PathDiagnosticEventPiece>(L, str);
+ PE->setPrunable(true);
+ addEdgeToPath(PD.getActivePath(), PrevLoc,
+ PE->getLocation(), PDB.LC);
+ PD.getActivePath().push_front(std::move(PE));
+ }
+ } else if (isa<BreakStmt>(Term) || isa<ContinueStmt>(Term) ||
+ isa<GotoStmt>(Term)) {
+ PathDiagnosticLocation L(Term, SM, PDB.LC);
+ addEdgeToPath(PD.getActivePath(), PrevLoc, L, PDB.LC);
+ }
+ }
+ }
}
-static const Stmt *getLocStmt(PathDiagnosticLocation L) {
- if (!L.isValid())
- return nullptr;
- return L.asStmt();
+static std::unique_ptr<PathDiagnostic>
+generateEmptyDiagnosticForReport(BugReport *R, SourceManager &SM) {
+ BugType &BT = R->getBugType();
+ return llvm::make_unique<PathDiagnostic>(
+ R->getBugType().getCheckName(), R->getDeclWithIssue(),
+ R->getBugType().getName(), R->getDescription(),
+ R->getShortDescription(/*Fallback=*/false), BT.getCategory(),
+ R->getUniqueingLocation(), R->getUniqueingDecl(),
+ findExecutedLines(SM, R->getErrorNode()));
}
static const Stmt *getStmtParent(const Stmt *S, const ParentMap &PM) {
@@ -1941,7 +1283,7 @@ static const Stmt *getStmtParent(const Stmt *S, const ParentMap &PM) {
static bool isConditionForTerminator(const Stmt *S, const Stmt *Cond) {
switch (S->getStmtClass()) {
case Stmt::BinaryOperatorClass: {
- const BinaryOperator *BO = cast<BinaryOperator>(S);
+ const auto *BO = cast<BinaryOperator>(S);
if (!BO->isLogicalOp())
return false;
return BO->getLHS() == Cond || BO->getRHS() == Cond;
@@ -1963,7 +1305,7 @@ static bool isConditionForTerminator(const Stmt *S, const Stmt *Cond) {
case Stmt::BinaryConditionalOperatorClass:
return cast<BinaryConditionalOperator>(S)->getCond() == Cond;
case Stmt::ConditionalOperatorClass: {
- const ConditionalOperator *CO = cast<ConditionalOperator>(S);
+ const auto *CO = cast<ConditionalOperator>(S);
return CO->getCond() == Cond ||
CO->getLHS() == Cond ||
CO->getRHS() == Cond;
@@ -1971,7 +1313,7 @@ static bool isConditionForTerminator(const Stmt *S, const Stmt *Cond) {
case Stmt::ObjCForCollectionStmtClass:
return cast<ObjCForCollectionStmt>(S)->getElement() == Cond;
case Stmt::CXXForRangeStmtClass: {
- const CXXForRangeStmt *FRS = cast<CXXForRangeStmt>(S);
+ const auto *FRS = cast<CXXForRangeStmt>(S);
return FRS->getCond() == Cond || FRS->getRangeInit() == Cond;
}
default:
@@ -1980,16 +1322,15 @@ static bool isConditionForTerminator(const Stmt *S, const Stmt *Cond) {
}
static bool isIncrementOrInitInForLoop(const Stmt *S, const Stmt *FL) {
- if (const ForStmt *FS = dyn_cast<ForStmt>(FL))
+ if (const auto *FS = dyn_cast<ForStmt>(FL))
return FS->getInc() == S || FS->getInit() == S;
- if (const CXXForRangeStmt *FRS = dyn_cast<CXXForRangeStmt>(FL))
+ if (const auto *FRS = dyn_cast<CXXForRangeStmt>(FL))
return FRS->getInc() == S || FRS->getRangeStmt() == S ||
FRS->getLoopVarStmt() || FRS->getRangeInit() == S;
return false;
}
-typedef llvm::DenseSet<const PathDiagnosticCallPiece *>
- OptimizedCallsSet;
+using OptimizedCallsSet = llvm::DenseSet<const PathDiagnosticCallPiece *>;
/// Adds synthetic edges from top-level statements to their subexpressions.
///
@@ -2001,8 +1342,7 @@ static void addContextEdges(PathPieces &pieces, SourceManager &SM,
PathPieces::iterator Prev = pieces.end();
for (PathPieces::iterator I = pieces.begin(), E = Prev; I != E;
Prev = I, ++I) {
- PathDiagnosticControlFlowPiece *Piece =
- dyn_cast<PathDiagnosticControlFlowPiece>(I->get());
+ auto *Piece = dyn_cast<PathDiagnosticControlFlowPiece>(I->get());
if (!Piece)
continue;
@@ -2023,7 +1363,7 @@ static void addContextEdges(PathPieces &pieces, SourceManager &SM,
// This is important for nested logical expressions (||, &&, ?:) where we
// want to show all the levels of context.
while (true) {
- const Stmt *Dst = getLocStmt(Piece->getEndLocation());
+ const Stmt *Dst = Piece->getEndLocation().getStmtOrNull();
// We are looking at an edge. Is the destination within a larger
// expression?
@@ -2046,9 +1386,11 @@ static void addContextEdges(PathPieces &pieces, SourceManager &SM,
auto *PrevPiece = dyn_cast<PathDiagnosticControlFlowPiece>(Prev->get());
if (PrevPiece) {
- if (const Stmt *PrevSrc = getLocStmt(PrevPiece->getStartLocation())) {
+ if (const Stmt *PrevSrc =
+ PrevPiece->getStartLocation().getStmtOrNull()) {
const Stmt *PrevSrcParent = getStmtParent(PrevSrc, PM);
- if (PrevSrcParent == getStmtParent(getLocStmt(DstContext), PM)) {
+ if (PrevSrcParent ==
+ getStmtParent(DstContext.getStmtOrNull(), PM)) {
PrevPiece->setEndLocation(DstContext);
break;
}
@@ -2067,7 +1409,7 @@ static void addContextEdges(PathPieces &pieces, SourceManager &SM,
}
}
-/// \brief Move edges from a branch condition to a branch target
+/// Move edges from a branch condition to a branch target
/// when the condition is simple.
///
/// This restructures some of the work of addContextEdges. That function
@@ -2077,17 +1419,15 @@ static void addContextEdges(PathPieces &pieces, SourceManager &SM,
/// the branch to the branch condition, and (3) an edge from the branch
/// condition to the branch target. We keep (1), but may wish to remove (2)
/// and move the source of (3) to the branch if the branch condition is simple.
-///
static void simplifySimpleBranches(PathPieces &pieces) {
for (PathPieces::iterator I = pieces.begin(), E = pieces.end(); I != E; ++I) {
-
- auto *PieceI = dyn_cast<PathDiagnosticControlFlowPiece>(I->get());
+ const auto *PieceI = dyn_cast<PathDiagnosticControlFlowPiece>(I->get());
if (!PieceI)
continue;
- const Stmt *s1Start = getLocStmt(PieceI->getStartLocation());
- const Stmt *s1End = getLocStmt(PieceI->getEndLocation());
+ const Stmt *s1Start = PieceI->getStartLocation().getStmtOrNull();
+ const Stmt *s1End = PieceI->getEndLocation().getStmtOrNull();
if (!s1Start || !s1End)
continue;
@@ -2102,7 +1442,7 @@ static void simplifySimpleBranches(PathPieces &pieces) {
if (NextI == E)
break;
- auto *EV = dyn_cast<PathDiagnosticEventPiece>(NextI->get());
+ const auto *EV = dyn_cast<PathDiagnosticEventPiece>(NextI->get());
if (EV) {
StringRef S = EV->getString();
if (S == StrEnteringLoop || S == StrLoopBodyZero ||
@@ -2120,8 +1460,8 @@ static void simplifySimpleBranches(PathPieces &pieces) {
if (!PieceNextI)
continue;
- const Stmt *s2Start = getLocStmt(PieceNextI->getStartLocation());
- const Stmt *s2End = getLocStmt(PieceNextI->getEndLocation());
+ const Stmt *s2Start = PieceNextI->getStartLocation().getStmtOrNull();
+ const Stmt *s2End = PieceNextI->getEndLocation().getStmtOrNull();
if (!s2Start || !s2End || s1End != s2Start)
continue;
@@ -2152,7 +1492,7 @@ static void simplifySimpleBranches(PathPieces &pieces) {
static Optional<size_t> getLengthOnSingleLine(SourceManager &SM,
SourceRange Range) {
SourceRange ExpansionRange(SM.getExpansionLoc(Range.getBegin()),
- SM.getExpansionRange(Range.getEnd()).second);
+ SM.getExpansionRange(Range.getEnd()).getEnd());
FileID FID = SM.getFileID(ExpansionRange.getBegin());
if (FID != SM.getFileID(ExpansionRange.getEnd()))
@@ -2204,22 +1544,21 @@ static void removeContextCycles(PathPieces &Path, SourceManager &SM,
ParentMap &PM) {
for (PathPieces::iterator I = Path.begin(), E = Path.end(); I != E; ) {
// Pattern match the current piece and its successor.
- PathDiagnosticControlFlowPiece *PieceI =
- dyn_cast<PathDiagnosticControlFlowPiece>(I->get());
+ const auto *PieceI = dyn_cast<PathDiagnosticControlFlowPiece>(I->get());
if (!PieceI) {
++I;
continue;
}
- const Stmt *s1Start = getLocStmt(PieceI->getStartLocation());
- const Stmt *s1End = getLocStmt(PieceI->getEndLocation());
+ const Stmt *s1Start = PieceI->getStartLocation().getStmtOrNull();
+ const Stmt *s1End = PieceI->getEndLocation().getStmtOrNull();
PathPieces::iterator NextI = I; ++NextI;
if (NextI == E)
break;
- PathDiagnosticControlFlowPiece *PieceNextI =
+ const auto *PieceNextI =
dyn_cast<PathDiagnosticControlFlowPiece>(NextI->get());
if (!PieceNextI) {
@@ -2236,8 +1575,8 @@ static void removeContextCycles(PathPieces &Path, SourceManager &SM,
}
}
- const Stmt *s2Start = getLocStmt(PieceNextI->getStartLocation());
- const Stmt *s2End = getLocStmt(PieceNextI->getEndLocation());
+ const Stmt *s2Start = PieceNextI->getStartLocation().getStmtOrNull();
+ const Stmt *s2End = PieceNextI->getEndLocation().getStmtOrNull();
if (s1Start && s2Start && s1Start == s2End && s2Start == s1End) {
const size_t MAX_SHORT_LINE_LENGTH = 80;
@@ -2256,10 +1595,8 @@ static void removeContextCycles(PathPieces &Path, SourceManager &SM,
}
}
-/// \brief Return true if X is contained by Y.
-static bool lexicalContains(ParentMap &PM,
- const Stmt *X,
- const Stmt *Y) {
+/// Return true if X is contained by Y.
+static bool lexicalContains(ParentMap &PM, const Stmt *X, const Stmt *Y) {
while (X) {
if (X == Y)
return true;
@@ -2269,24 +1606,21 @@ static bool lexicalContains(ParentMap &PM,
}
// Remove short edges on the same line less than 3 columns in difference.
-static void removePunyEdges(PathPieces &path,
- SourceManager &SM,
+static void removePunyEdges(PathPieces &path, SourceManager &SM,
ParentMap &PM) {
-
bool erased = false;
for (PathPieces::iterator I = path.begin(), E = path.end(); I != E;
erased ? I : ++I) {
-
erased = false;
- auto *PieceI = dyn_cast<PathDiagnosticControlFlowPiece>(I->get());
+ const auto *PieceI = dyn_cast<PathDiagnosticControlFlowPiece>(I->get());
if (!PieceI)
continue;
- const Stmt *start = getLocStmt(PieceI->getStartLocation());
- const Stmt *end = getLocStmt(PieceI->getEndLocation());
+ const Stmt *start = PieceI->getStartLocation().getStmtOrNull();
+ const Stmt *end = PieceI->getEndLocation().getStmtOrNull();
if (!start || !end)
continue;
@@ -2327,7 +1661,7 @@ static void removePunyEdges(PathPieces &path,
static void removeIdenticalEvents(PathPieces &path) {
for (PathPieces::iterator I = path.begin(), E = path.end(); I != E; ++I) {
- auto *PieceI = dyn_cast<PathDiagnosticEventPiece>(I->get());
+ const auto *PieceI = dyn_cast<PathDiagnosticEventPiece>(I->get());
if (!PieceI)
continue;
@@ -2336,7 +1670,7 @@ static void removeIdenticalEvents(PathPieces &path) {
if (NextI == E)
return;
- auto *PieceNextI = dyn_cast<PathDiagnosticEventPiece>(NextI->get());
+ const auto *PieceNextI = dyn_cast<PathDiagnosticEventPiece>(NextI->get());
if (!PieceNextI)
continue;
@@ -2377,8 +1711,8 @@ static bool optimizeEdges(PathPieces &path, SourceManager &SM,
continue;
}
- const Stmt *s1Start = getLocStmt(PieceI->getStartLocation());
- const Stmt *s1End = getLocStmt(PieceI->getEndLocation());
+ const Stmt *s1Start = PieceI->getStartLocation().getStmtOrNull();
+ const Stmt *s1End = PieceI->getEndLocation().getStmtOrNull();
const Stmt *level1 = getStmtParent(s1Start, PM);
const Stmt *level2 = getStmtParent(s1End, PM);
@@ -2386,15 +1720,15 @@ static bool optimizeEdges(PathPieces &path, SourceManager &SM,
if (NextI == E)
break;
- auto *PieceNextI = dyn_cast<PathDiagnosticControlFlowPiece>(NextI->get());
+ const auto *PieceNextI = dyn_cast<PathDiagnosticControlFlowPiece>(NextI->get());
if (!PieceNextI) {
++I;
continue;
}
- const Stmt *s2Start = getLocStmt(PieceNextI->getStartLocation());
- const Stmt *s2End = getLocStmt(PieceNextI->getEndLocation());
+ const Stmt *s2Start = PieceNextI->getStartLocation().getStmtOrNull();
+ const Stmt *s2End = PieceNextI->getEndLocation().getStmtOrNull();
const Stmt *level3 = getStmtParent(s2Start, PM);
const Stmt *level4 = getStmtParent(s2End, PM);
@@ -2412,7 +1746,6 @@ static bool optimizeEdges(PathPieces &path, SourceManager &SM,
//
// NOTE: this will be limited later in cases where we add barriers
// to prevent this optimization.
- //
if (level1 && level1 == level2 && level1 == level3 && level1 == level4) {
PieceI->setEndLocation(PieceNextI->getEndLocation());
path.erase(NextI);
@@ -2427,7 +1760,6 @@ static bool optimizeEdges(PathPieces &path, SourceManager &SM,
//
// NOTE: this will be limited later in cases where we add barriers
// to prevent this optimization.
- //
if (s1End && s1End == s2Start && level2) {
bool removeEdge = false;
// Remove edges into the increment or initialization of a
@@ -2493,8 +1825,7 @@ static bool optimizeEdges(PathPieces &path, SourceManager &SM,
//
// (X -> element)
if (s1End == s2Start) {
- const ObjCForCollectionStmt *FS =
- dyn_cast_or_null<ObjCForCollectionStmt>(level3);
+ const auto *FS = dyn_cast_or_null<ObjCForCollectionStmt>(level3);
if (FS && FS->getCollection()->IgnoreParens() == s2Start &&
s2End == FS->getElement()) {
PieceI->setEndLocation(PieceNextI->getEndLocation());
@@ -2532,8 +1863,7 @@ static bool optimizeEdges(PathPieces &path, SourceManager &SM,
/// statement had an invalid source location), this function does nothing.
// FIXME: We should just generate invalid edges anyway and have the optimizer
// deal with them.
-static void dropFunctionEntryEdge(PathPieces &Path,
- LocationContextMap &LCM,
+static void dropFunctionEntryEdge(PathPieces &Path, LocationContextMap &LCM,
SourceManager &SM) {
const auto *FirstEdge =
dyn_cast<PathDiagnosticControlFlowPiece>(Path.front().get());
@@ -2548,11 +1878,134 @@ static void dropFunctionEntryEdge(PathPieces &Path,
Path.pop_front();
}
+using VisitorsDiagnosticsTy = llvm::DenseMap<const ExplodedNode *,
+ std::vector<std::shared_ptr<PathDiagnosticPiece>>>;
+
+/// This function is responsible for generating diagnostic pieces that are
+/// *not* provided by bug report visitors.
+/// These diagnostics may differ depending on the consumer's settings,
+/// and are therefore constructed separately for each consumer.
+///
+/// There are two path diagnostics generation modes: with adding edges (used
+/// for plists) and without (used for HTML and text).
+/// When edges are added (\p ActiveScheme is Extensive),
+/// the path is modified to insert artificially generated
+/// edges.
+/// Otherwise, more detailed diagnostics is emitted for block edges, explaining
+/// the transitions in words.
+static std::unique_ptr<PathDiagnostic> generatePathDiagnosticForConsumer(
+ PathDiagnosticConsumer::PathGenerationScheme ActiveScheme,
+ PathDiagnosticBuilder &PDB,
+ const ExplodedNode *ErrorNode,
+ const VisitorsDiagnosticsTy &VisitorsDiagnostics) {
+
+ bool GenerateDiagnostics = (ActiveScheme != PathDiagnosticConsumer::None);
+ bool AddPathEdges = (ActiveScheme == PathDiagnosticConsumer::Extensive);
+ SourceManager &SM = PDB.getSourceManager();
+ BugReport *R = PDB.getBugReport();
+ AnalyzerOptions &Opts = PDB.getBugReporter().getAnalyzerOptions();
+ StackDiagVector CallStack;
+ InterestingExprs IE;
+ LocationContextMap LCM;
+ std::unique_ptr<PathDiagnostic> PD = generateEmptyDiagnosticForReport(R, SM);
+
+ if (GenerateDiagnostics) {
+ auto EndNotes = VisitorsDiagnostics.find(ErrorNode);
+ std::shared_ptr<PathDiagnosticPiece> LastPiece;
+ if (EndNotes != VisitorsDiagnostics.end()) {
+ assert(!EndNotes->second.empty());
+ LastPiece = EndNotes->second[0];
+ } else {
+ LastPiece = BugReporterVisitor::getDefaultEndPath(PDB, ErrorNode, *R);
+ }
+ PD->setEndOfPath(LastPiece);
+ }
+
+ PathDiagnosticLocation PrevLoc = PD->getLocation();
+ const ExplodedNode *NextNode = ErrorNode->getFirstPred();
+ while (NextNode) {
+ if (GenerateDiagnostics)
+ generatePathDiagnosticsForNode(
+ NextNode, *PD, PrevLoc, PDB, LCM, CallStack, IE, AddPathEdges);
+
+ auto VisitorNotes = VisitorsDiagnostics.find(NextNode);
+ NextNode = NextNode->getFirstPred();
+ if (!GenerateDiagnostics || VisitorNotes == VisitorsDiagnostics.end())
+ continue;
+
+ // This is a workaround due to inability to put shared PathDiagnosticPiece
+ // into a FoldingSet.
+ std::set<llvm::FoldingSetNodeID> DeduplicationSet;
+
+ // Add pieces from custom visitors.
+ for (const auto &Note : VisitorNotes->second) {
+ llvm::FoldingSetNodeID ID;
+ Note->Profile(ID);
+ auto P = DeduplicationSet.insert(ID);
+ if (!P.second)
+ continue;
+
+ if (AddPathEdges)
+ addEdgeToPath(PD->getActivePath(), PrevLoc, Note->getLocation(),
+ PDB.LC);
+ updateStackPiecesWithMessage(*Note, CallStack);
+ PD->getActivePath().push_front(Note);
+ }
+ }
+
+ if (AddPathEdges) {
+ // Add an edge to the start of the function.
+ // We'll prune it out later, but it helps make diagnostics more uniform.
+ const StackFrameContext *CalleeLC = PDB.LC->getStackFrame();
+ const Decl *D = CalleeLC->getDecl();
+ addEdgeToPath(PD->getActivePath(), PrevLoc,
+ PathDiagnosticLocation::createBegin(D, SM), CalleeLC);
+ }
+
+ if (!AddPathEdges && GenerateDiagnostics)
+ CompactPathDiagnostic(PD->getMutablePieces(), SM);
+
+ // Finally, prune the diagnostic path of uninteresting stuff.
+ if (!PD->path.empty()) {
+ if (R->shouldPrunePath() && Opts.shouldPrunePaths()) {
+ bool stillHasNotes =
+ removeUnneededCalls(PD->getMutablePieces(), R, LCM);
+ assert(stillHasNotes);
+ (void)stillHasNotes;
+ }
+
+ // Redirect all call pieces to have valid locations.
+ adjustCallLocations(PD->getMutablePieces());
+ removePiecesWithInvalidLocations(PD->getMutablePieces());
+
+ if (AddPathEdges) {
+
+ // Reduce the number of edges from a very conservative set
+ // to an aesthetically pleasing subset that conveys the
+ // necessary information.
+ OptimizedCallsSet OCS;
+ while (optimizeEdges(PD->getMutablePieces(), SM, OCS, LCM)) {}
+
+ // Drop the very first function-entry edge. It's not really necessary
+ // for top-level functions.
+ dropFunctionEntryEdge(PD->getMutablePieces(), LCM, SM);
+ }
+
+ // Remove messages that are basically the same, and edges that may not
+ // make sense.
+ // We have to do this after edge optimization in the Extensive mode.
+ removeRedundantMsgs(PD->getMutablePieces());
+ removeEdgesToDefaultInitializers(PD->getMutablePieces());
+ }
+ return PD;
+}
+
//===----------------------------------------------------------------------===//
// Methods for BugType and subclasses.
//===----------------------------------------------------------------------===//
-void BugType::anchor() { }
+
+void BugType::anchor() {}
void BugType::FlushReports(BugReporter &BR) {}
@@ -2570,14 +2023,17 @@ void BugReport::addVisitor(std::unique_ptr<BugReporterVisitor> visitor) {
llvm::FoldingSetNodeID ID;
visitor->Profile(ID);
- void *InsertPos;
- if (CallbacksSet.FindNodeOrInsertPos(ID, InsertPos))
+ void *InsertPos = nullptr;
+ if (CallbacksSet.FindNodeOrInsertPos(ID, InsertPos)) {
return;
+ }
- CallbacksSet.InsertNode(visitor.get(), InsertPos);
Callbacks.push_back(std::move(visitor));
- ++ConfigurationChangeToken;
+}
+
+void BugReport::clearVisitors() {
+ Callbacks.clear();
}
BugReport::~BugReport() {
@@ -2595,7 +2051,7 @@ const Decl *BugReport::getDeclWithIssue() const {
return nullptr;
const LocationContext *LC = N->getLocationContext();
- return LC->getCurrentStackFrame()->getDecl();
+ return LC->getStackFrame()->getDecl();
}
void BugReport::Profile(llvm::FoldingSetNodeID& hash) const {
@@ -2623,11 +2079,9 @@ void BugReport::markInteresting(SymbolRef sym) {
if (!sym)
return;
- // If the symbol wasn't already in our set, note a configuration change.
- if (getInterestingSymbols().insert(sym).second)
- ++ConfigurationChangeToken;
+ getInterestingSymbols().insert(sym);
- if (const SymbolMetadata *meta = dyn_cast<SymbolMetadata>(sym))
+ if (const auto *meta = dyn_cast<SymbolMetadata>(sym))
getInterestingRegions().insert(meta->getRegion());
}
@@ -2635,12 +2089,10 @@ void BugReport::markInteresting(const MemRegion *R) {
if (!R)
return;
- // If the base region wasn't already in our set, note a configuration change.
R = R->getBaseRegion();
- if (getInterestingRegions().insert(R).second)
- ++ConfigurationChangeToken;
+ getInterestingRegions().insert(R);
- if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(R))
+ if (const auto *SR = dyn_cast<SymbolicRegion>(R))
getInterestingSymbols().insert(SR->getSymbol());
}
@@ -2674,7 +2126,7 @@ bool BugReport::isInteresting(const MemRegion *R) {
bool b = getInterestingRegions().count(R);
if (b)
return true;
- if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(R))
+ if (const auto *SR = dyn_cast<SymbolicRegion>(R))
return getInterestingSymbols().count(SR->getSymbol());
return false;
}
@@ -2734,7 +2186,7 @@ llvm::iterator_range<BugReport::ranges_iterator> BugReport::getRanges() {
// If no custom ranges, add the range of the statement corresponding to
// the error node.
if (Ranges.empty()) {
- if (const Expr *E = dyn_cast_or_null<Expr>(getStmt()))
+ if (const auto *E = dyn_cast_or_null<Expr>(getStmt()))
addRange(E->getSourceRange());
else
return llvm::make_range(ranges_iterator(), ranges_iterator());
@@ -2762,9 +2214,11 @@ PathDiagnosticLocation BugReport::getLocation(const SourceManager &SM) const {
// Methods for BugReporter and subclasses.
//===----------------------------------------------------------------------===//
-BugReportEquivClass::~BugReportEquivClass() { }
-GRBugReporter::~GRBugReporter() { }
-BugReporterData::~BugReporterData() {}
+BugReportEquivClass::~BugReportEquivClass() = default;
+
+GRBugReporter::~GRBugReporter() = default;
+
+BugReporterData::~BugReporterData() = default;
ExplodedGraph &GRBugReporter::getGraph() { return Eng.getGraph(); }
@@ -2775,11 +2229,8 @@ BugReporter::~BugReporter() {
FlushReports();
// Free the bug reports we are tracking.
- typedef std::vector<BugReportEquivClass *> ContTy;
- for (ContTy::iterator I = EQClassesVector.begin(), E = EQClassesVector.end();
- I != E; ++I) {
- delete *I;
- }
+ for (const auto I : EQClassesVector)
+ delete I;
}
void BugReporter::FlushReports() {
@@ -2791,18 +2242,13 @@ void BugReporter::FlushReports() {
// FIXME: Only NSErrorChecker needs BugType's FlushReports.
// Turn NSErrorChecker into a proper checker and remove this.
SmallVector<const BugType *, 16> bugTypes(BugTypes.begin(), BugTypes.end());
- for (SmallVectorImpl<const BugType *>::iterator
- I = bugTypes.begin(), E = bugTypes.end(); I != E; ++I)
- const_cast<BugType*>(*I)->FlushReports(*this);
+ for (const auto I : bugTypes)
+ const_cast<BugType*>(I)->FlushReports(*this);
// We need to flush reports in deterministic order to ensure the order
// of the reports is consistent between runs.
- typedef std::vector<BugReportEquivClass *> ContVecTy;
- for (ContVecTy::iterator EI=EQClassesVector.begin(), EE=EQClassesVector.end();
- EI != EE; ++EI){
- BugReportEquivClass& EQ = **EI;
- FlushReport(EQ);
- }
+ for (const auto EQ : EQClassesVector)
+ FlushReport(*EQ);
// BugReporter owns and deletes only BugTypes created implicitly through
// EmitBasicReport.
@@ -2819,6 +2265,7 @@ void BugReporter::FlushReports() {
//===----------------------------------------------------------------------===//
namespace {
+
/// A wrapper around a report graph, which contains only a single path, and its
/// node maps.
class ReportGraph {
@@ -2833,10 +2280,12 @@ public:
class TrimmedGraph {
InterExplodedGraphMap InverseMap;
- typedef llvm::DenseMap<const ExplodedNode *, unsigned> PriorityMapTy;
+ using PriorityMapTy = llvm::DenseMap<const ExplodedNode *, unsigned>;
+
PriorityMapTy PriorityMap;
- typedef std::pair<const ExplodedNode *, size_t> NodeIndexPair;
+ using NodeIndexPair = std::pair<const ExplodedNode *, size_t>;
+
SmallVector<NodeIndexPair, 32> ReportNodes;
std::unique_ptr<ExplodedGraph> G;
@@ -2874,7 +2323,8 @@ public:
bool popNextReportGraph(ReportGraph &GraphWrapper);
};
-}
+
+} // namespace
TrimmedGraph::TrimmedGraph(const ExplodedGraph *OriginalGraph,
ArrayRef<const ExplodedNode *> Nodes) {
@@ -2930,8 +2380,8 @@ TrimmedGraph::TrimmedGraph(const ExplodedGraph *OriginalGraph,
}
// Sort the error paths from longest to shortest.
- std::sort(ReportNodes.begin(), ReportNodes.end(),
- PriorityCompare<true>(PriorityMap));
+ llvm::sort(ReportNodes.begin(), ReportNodes.end(),
+ PriorityCompare<true>(PriorityMap));
}
bool TrimmedGraph::popNextReportGraph(ReportGraph &GraphWrapper) {
@@ -2987,23 +2437,21 @@ bool TrimmedGraph::popNextReportGraph(ReportGraph &GraphWrapper) {
return true;
}
-
/// CompactPathDiagnostic - This function postprocesses a PathDiagnostic object
/// and collapses PathDiagosticPieces that are expanded by macros.
static void CompactPathDiagnostic(PathPieces &path, const SourceManager& SM) {
- typedef std::vector<
- std::pair<std::shared_ptr<PathDiagnosticMacroPiece>, SourceLocation>>
- MacroStackTy;
+ using MacroStackTy =
+ std::vector<
+ std::pair<std::shared_ptr<PathDiagnosticMacroPiece>, SourceLocation>>;
- typedef std::vector<std::shared_ptr<PathDiagnosticPiece>> PiecesTy;
+ using PiecesTy = std::vector<std::shared_ptr<PathDiagnosticPiece>>;
MacroStackTy MacroStack;
PiecesTy Pieces;
for (PathPieces::const_iterator I = path.begin(), E = path.end();
- I!=E; ++I) {
-
- auto &piece = *I;
+ I != E; ++I) {
+ const auto &piece = *I;
// Recursively compact calls.
if (auto *call = dyn_cast<PathDiagnosticCallPiece>(&*piece)) {
@@ -3082,43 +2530,69 @@ static void CompactPathDiagnostic(PathPieces &path, const SourceManager& SM) {
path.insert(path.end(), Pieces.begin(), Pieces.end());
}
-bool GRBugReporter::generatePathDiagnostic(PathDiagnostic& PD,
- PathDiagnosticConsumer &PC,
- ArrayRef<BugReport *> &bugReports) {
- assert(!bugReports.empty());
+/// Generate notes from all visitors.
+/// Notes associated with {@code ErrorNode} are generated using
+/// {@code getEndPath}, and the rest are generated with {@code VisitNode}.
+static std::unique_ptr<VisitorsDiagnosticsTy>
+generateVisitorsDiagnostics(BugReport *R, const ExplodedNode *ErrorNode,
+ BugReporterContext &BRC) {
+ auto Notes = llvm::make_unique<VisitorsDiagnosticsTy>();
+ BugReport::VisitorList visitors;
- bool HasValid = false;
- bool HasInvalid = false;
- SmallVector<const ExplodedNode *, 32> errorNodes;
- for (ArrayRef<BugReport*>::iterator I = bugReports.begin(),
- E = bugReports.end(); I != E; ++I) {
- if ((*I)->isValid()) {
- HasValid = true;
- errorNodes.push_back((*I)->getErrorNode());
- } else {
- // Keep the errorNodes list in sync with the bugReports list.
- HasInvalid = true;
- errorNodes.push_back(nullptr);
+ // Run visitors on all nodes starting from the node *before* the last one.
+ // The last node is reserved for notes generated with {@code getEndPath}.
+ const ExplodedNode *NextNode = ErrorNode->getFirstPred();
+ while (NextNode) {
+
+ // At each iteration, move all visitors from report to visitor list.
+ for (BugReport::visitor_iterator I = R->visitor_begin(),
+ E = R->visitor_end();
+ I != E; ++I) {
+ visitors.push_back(std::move(*I));
}
- }
+ R->clearVisitors();
- // If all the reports have been marked invalid by a previous path generation,
- // we're done.
- if (!HasValid)
- return false;
+ const ExplodedNode *Pred = NextNode->getFirstPred();
+ if (!Pred) {
+ std::shared_ptr<PathDiagnosticPiece> LastPiece;
+ for (auto &V : visitors) {
+ V->finalizeVisitor(BRC, ErrorNode, *R);
- typedef PathDiagnosticConsumer::PathGenerationScheme PathGenerationScheme;
- PathGenerationScheme ActiveScheme = PC.getGenerationScheme();
+ if (auto Piece = V->getEndPath(BRC, ErrorNode, *R)) {
+ assert(!LastPiece &&
+ "There can only be one final piece in a diagnostic.");
+ LastPiece = std::move(Piece);
+ (*Notes)[ErrorNode].push_back(LastPiece);
+ }
+ }
+ break;
+ }
- if (ActiveScheme == PathDiagnosticConsumer::Extensive) {
- AnalyzerOptions &options = getAnalyzerOptions();
- if (options.getBooleanOption("path-diagnostics-alternate", true)) {
- ActiveScheme = PathDiagnosticConsumer::AlternateExtensive;
+ for (auto &V : visitors) {
+ auto P = V->VisitNode(NextNode, Pred, BRC, *R);
+ if (P)
+ (*Notes)[NextNode].push_back(std::move(P));
}
+
+ if (!R->isValid())
+ break;
+
+ NextNode = Pred;
}
- TrimmedGraph TrimG(&getGraph(), errorNodes);
- ReportGraph ErrorGraph;
+ return Notes;
+}
+
+/// Find a non-invalidated report for a given equivalence class,
+/// and return together with a cache of visitors notes.
+/// If none found, return a nullptr paired with an empty cache.
+static
+std::pair<BugReport*, std::unique_ptr<VisitorsDiagnosticsTy>> findValidReport(
+ TrimmedGraph &TrimG,
+ ReportGraph &ErrorGraph,
+ ArrayRef<BugReport *> &bugReports,
+ AnalyzerOptions &Opts,
+ GRBugReporter &Reporter) {
while (TrimG.popNextReportGraph(ErrorGraph)) {
// Find the BugReport with the original location.
@@ -3126,125 +2600,85 @@ bool GRBugReporter::generatePathDiagnostic(PathDiagnostic& PD,
BugReport *R = bugReports[ErrorGraph.Index];
assert(R && "No original report found for sliced graph.");
assert(R->isValid() && "Report selected by trimmed graph marked invalid.");
+ const ExplodedNode *ErrorNode = ErrorGraph.ErrorNode;
- // Start building the path diagnostic...
- PathDiagnosticBuilder PDB(*this, R, ErrorGraph.BackMap, &PC);
- const ExplodedNode *N = ErrorGraph.ErrorNode;
+ // Register refutation visitors first, if they mark the bug invalid no
+ // further analysis is required
+ R->addVisitor(llvm::make_unique<LikelyFalsePositiveSuppressionBRVisitor>());
// Register additional node visitors.
R->addVisitor(llvm::make_unique<NilReceiverBRVisitor>());
R->addVisitor(llvm::make_unique<ConditionBRVisitor>());
- R->addVisitor(llvm::make_unique<LikelyFalsePositiveSuppressionBRVisitor>());
R->addVisitor(llvm::make_unique<CXXSelfAssignmentBRVisitor>());
- BugReport::VisitorList visitors;
- unsigned origReportConfigToken, finalReportConfigToken;
- LocationContextMap LCM;
-
- // While generating diagnostics, it's possible the visitors will decide
- // new symbols and regions are interesting, or add other visitors based on
- // the information they find. If they do, we need to regenerate the path
- // based on our new report configuration.
- do {
- // Get a clean copy of all the visitors.
- for (BugReport::visitor_iterator I = R->visitor_begin(),
- E = R->visitor_end(); I != E; ++I)
- visitors.push_back((*I)->clone());
-
- // Clear out the active path from any previous work.
- PD.resetPath();
- origReportConfigToken = R->getConfigurationChangeToken();
-
- // Generate the very last diagnostic piece - the piece is visible before
- // the trace is expanded.
- std::unique_ptr<PathDiagnosticPiece> LastPiece;
- for (BugReport::visitor_iterator I = visitors.begin(), E = visitors.end();
- I != E; ++I) {
- if (std::unique_ptr<PathDiagnosticPiece> Piece =
- (*I)->getEndPath(PDB, N, *R)) {
- assert (!LastPiece &&
- "There can only be one final piece in a diagnostic.");
- LastPiece = std::move(Piece);
- }
- }
+ BugReporterContext BRC(Reporter, ErrorGraph.BackMap);
- if (ActiveScheme != PathDiagnosticConsumer::None) {
- if (!LastPiece)
- LastPiece = BugReporterVisitor::getDefaultEndPath(PDB, N, *R);
- assert(LastPiece);
- PD.setEndOfPath(std::move(LastPiece));
- }
+ // Run all visitors on a given graph, once.
+ std::unique_ptr<VisitorsDiagnosticsTy> visitorNotes =
+ generateVisitorsDiagnostics(R, ErrorNode, BRC);
- // Make sure we get a clean location context map so we don't
- // hold onto old mappings.
- LCM.clear();
+ if (R->isValid()) {
+ if (Opts.shouldCrosscheckWithZ3()) {
+ // If crosscheck is enabled, remove all visitors, add the refutation
+ // visitor and check again
+ R->clearVisitors();
+ R->addVisitor(llvm::make_unique<FalsePositiveRefutationBRVisitor>());
- switch (ActiveScheme) {
- case PathDiagnosticConsumer::AlternateExtensive:
- GenerateAlternateExtensivePathDiagnostic(PD, PDB, N, LCM, visitors);
- break;
- case PathDiagnosticConsumer::Extensive:
- GenerateExtensivePathDiagnostic(PD, PDB, N, LCM, visitors);
- break;
- case PathDiagnosticConsumer::Minimal:
- GenerateMinimalPathDiagnostic(PD, PDB, N, LCM, visitors);
- break;
- case PathDiagnosticConsumer::None:
- GenerateVisitorsOnlyPathDiagnostic(PD, PDB, N, visitors);
- break;
+ // We don't overrite the notes inserted by other visitors because the
+ // refutation manager does not add any new note to the path
+ generateVisitorsDiagnostics(R, ErrorGraph.ErrorNode, BRC);
}
- // Clean up the visitors we used.
- visitors.clear();
-
- // Did anything change while generating this path?
- finalReportConfigToken = R->getConfigurationChangeToken();
- } while (finalReportConfigToken != origReportConfigToken);
-
- if (!R->isValid())
- continue;
-
- // Finally, prune the diagnostic path of uninteresting stuff.
- if (!PD.path.empty()) {
- if (R->shouldPrunePath() && getAnalyzerOptions().shouldPrunePaths()) {
- bool stillHasNotes = removeUnneededCalls(PD.getMutablePieces(), R, LCM);
- assert(stillHasNotes);
- (void)stillHasNotes;
- }
+ // Check if the bug is still valid
+ if (R->isValid())
+ return std::make_pair(R, std::move(visitorNotes));
+ }
+ }
- // Redirect all call pieces to have valid locations.
- adjustCallLocations(PD.getMutablePieces());
- removePiecesWithInvalidLocations(PD.getMutablePieces());
+ return std::make_pair(nullptr, llvm::make_unique<VisitorsDiagnosticsTy>());
+}
- if (ActiveScheme == PathDiagnosticConsumer::AlternateExtensive) {
- SourceManager &SM = getSourceManager();
+std::unique_ptr<DiagnosticForConsumerMapTy>
+GRBugReporter::generatePathDiagnostics(
+ ArrayRef<PathDiagnosticConsumer *> consumers,
+ ArrayRef<BugReport *> &bugReports) {
+ assert(!bugReports.empty());
- // Reduce the number of edges from a very conservative set
- // to an aesthetically pleasing subset that conveys the
- // necessary information.
- OptimizedCallsSet OCS;
- while (optimizeEdges(PD.getMutablePieces(), SM, OCS, LCM)) {}
+ auto Out = llvm::make_unique<DiagnosticForConsumerMapTy>();
+ bool HasValid = false;
+ SmallVector<const ExplodedNode *, 32> errorNodes;
+ for (const auto I : bugReports) {
+ if (I->isValid()) {
+ HasValid = true;
+ errorNodes.push_back(I->getErrorNode());
+ } else {
+ // Keep the errorNodes list in sync with the bugReports list.
+ errorNodes.push_back(nullptr);
+ }
+ }
- // Drop the very first function-entry edge. It's not really necessary
- // for top-level functions.
- dropFunctionEntryEdge(PD.getMutablePieces(), LCM, SM);
- }
+ // If all the reports have been marked invalid by a previous path generation,
+ // we're done.
+ if (!HasValid)
+ return Out;
- // Remove messages that are basically the same, and edges that may not
- // make sense.
- // We have to do this after edge optimization in the Extensive mode.
- removeRedundantMsgs(PD.getMutablePieces());
- removeEdgesToDefaultInitializers(PD.getMutablePieces());
+ TrimmedGraph TrimG(&getGraph(), errorNodes);
+ ReportGraph ErrorGraph;
+ auto ReportInfo = findValidReport(TrimG, ErrorGraph, bugReports,
+ getAnalyzerOptions(), *this);
+ BugReport *R = ReportInfo.first;
+
+ if (R && R->isValid()) {
+ const ExplodedNode *ErrorNode = ErrorGraph.ErrorNode;
+ for (PathDiagnosticConsumer *PC : consumers) {
+ PathDiagnosticBuilder PDB(*this, R, ErrorGraph.BackMap, PC);
+ std::unique_ptr<PathDiagnostic> PD = generatePathDiagnosticForConsumer(
+ PC->getGenerationScheme(), PDB, ErrorNode, *ReportInfo.second);
+ (*Out)[PC] = std::move(PD);
}
-
- // We found a report and didn't suppress it.
- return true;
}
- // We suppressed all the reports in this equivalence class.
- assert(!HasInvalid && "Inconsistent suppression");
- (void)HasInvalid;
- return false;
+ return Out;
}
void BugReporter::Register(BugType *BT) {
@@ -3294,20 +2728,21 @@ void BugReporter::emitReport(std::unique_ptr<BugReport> R) {
EQ->AddReport(std::move(R));
}
-
//===----------------------------------------------------------------------===//
// Emitting reports in equivalence classes.
//===----------------------------------------------------------------------===//
namespace {
+
struct FRIEC_WLItem {
const ExplodedNode *N;
ExplodedNode::const_succ_iterator I, E;
FRIEC_WLItem(const ExplodedNode *n)
- : N(n), I(N->succ_begin()), E(N->succ_end()) {}
+ : N(n), I(N->succ_begin()), E(N->succ_end()) {}
};
-}
+
+} // namespace
static const CFGBlock *findBlockForNode(const ExplodedNode *N) {
ProgramPoint P = N->getLocation();
@@ -3397,7 +2832,6 @@ static bool isInevitablySinking(const ExplodedNode *N) {
static BugReport *
FindReportInEquivalenceClass(BugReportEquivClass& EQ,
SmallVectorImpl<BugReport*> &bugReports) {
-
BugReportEquivClass::iterator I = EQ.begin(), E = EQ.end();
assert(I != E);
BugType& BT = I->getBugType();
@@ -3407,10 +2841,10 @@ FindReportInEquivalenceClass(BugReportEquivClass& EQ,
// to 'Nodes'. Any of the reports will serve as a "representative" report.
if (!BT.isSuppressOnSink()) {
BugReport *R = &*I;
- for (BugReportEquivClass::iterator I=EQ.begin(), E=EQ.end(); I!=E; ++I) {
- const ExplodedNode *N = I->getErrorNode();
+ for (auto &I : EQ) {
+ const ExplodedNode *N = I.getErrorNode();
if (N) {
- R = &*I;
+ R = &I;
bugReports.push_back(R);
}
}
@@ -3451,8 +2885,9 @@ FindReportInEquivalenceClass(BugReportEquivClass& EQ,
// At this point we know that 'N' is not a sink and it has at least one
// successor. Use a DFS worklist to find a non-sink end-of-path node.
- typedef FRIEC_WLItem WLItem;
- typedef SmallVector<WLItem, 10> DFSWorkList;
+ using WLItem = FRIEC_WLItem;
+ using DFSWorkList = SmallVector<WLItem, 10>;
+
llvm::DenseMap<const ExplodedNode *, unsigned> Visited;
DFSWorkList WL;
@@ -3502,90 +2937,166 @@ FindReportInEquivalenceClass(BugReportEquivClass& EQ,
void BugReporter::FlushReport(BugReportEquivClass& EQ) {
SmallVector<BugReport*, 10> bugReports;
- BugReport *exampleReport = FindReportInEquivalenceClass(EQ, bugReports);
- if (exampleReport) {
- for (PathDiagnosticConsumer *PDC : getPathDiagnosticConsumers()) {
- FlushReport(exampleReport, *PDC, bugReports);
+ BugReport *report = FindReportInEquivalenceClass(EQ, bugReports);
+ if (!report)
+ return;
+
+ ArrayRef<PathDiagnosticConsumer*> Consumers = getPathDiagnosticConsumers();
+ std::unique_ptr<DiagnosticForConsumerMapTy> Diagnostics =
+ generateDiagnosticForConsumerMap(report, Consumers, bugReports);
+
+ for (auto &P : *Diagnostics) {
+ PathDiagnosticConsumer *Consumer = P.first;
+ std::unique_ptr<PathDiagnostic> &PD = P.second;
+
+ // If the path is empty, generate a single step path with the location
+ // of the issue.
+ if (PD->path.empty()) {
+ PathDiagnosticLocation L = report->getLocation(getSourceManager());
+ auto piece = llvm::make_unique<PathDiagnosticEventPiece>(
+ L, report->getDescription());
+ for (SourceRange Range : report->getRanges())
+ piece->addRange(Range);
+ PD->setEndOfPath(std::move(piece));
}
- }
-}
-void BugReporter::FlushReport(BugReport *exampleReport,
- PathDiagnosticConsumer &PD,
- ArrayRef<BugReport*> bugReports) {
+ PathPieces &Pieces = PD->getMutablePieces();
+ if (getAnalyzerOptions().shouldDisplayNotesAsEvents()) {
+ // For path diagnostic consumers that don't support extra notes,
+ // we may optionally convert those to path notes.
+ for (auto I = report->getNotes().rbegin(),
+ E = report->getNotes().rend(); I != E; ++I) {
+ PathDiagnosticNotePiece *Piece = I->get();
+ auto ConvertedPiece = std::make_shared<PathDiagnosticEventPiece>(
+ Piece->getLocation(), Piece->getString());
+ for (const auto &R: Piece->getRanges())
+ ConvertedPiece->addRange(R);
+
+ Pieces.push_front(std::move(ConvertedPiece));
+ }
+ } else {
+ for (auto I = report->getNotes().rbegin(),
+ E = report->getNotes().rend(); I != E; ++I)
+ Pieces.push_front(*I);
+ }
- // FIXME: Make sure we use the 'R' for the path that was actually used.
- // Probably doesn't make a difference in practice.
- BugType& BT = exampleReport->getBugType();
+ // Get the meta data.
+ const BugReport::ExtraTextList &Meta = report->getExtraText();
+ for (const auto &i : Meta)
+ PD->addMeta(i);
- std::unique_ptr<PathDiagnostic> D(new PathDiagnostic(
- exampleReport->getBugType().getCheckName(),
- exampleReport->getDeclWithIssue(), exampleReport->getBugType().getName(),
- exampleReport->getDescription(),
- exampleReport->getShortDescription(/*Fallback=*/false), BT.getCategory(),
- exampleReport->getUniqueingLocation(),
- exampleReport->getUniqueingDecl()));
+ Consumer->HandlePathDiagnostic(std::move(PD));
+ }
+}
- if (exampleReport->isPathSensitive()) {
- // Generate the full path diagnostic, using the generation scheme
- // specified by the PathDiagnosticConsumer. Note that we have to generate
- // path diagnostics even for consumers which do not support paths, because
- // the BugReporterVisitors may mark this bug as a false positive.
- assert(!bugReports.empty());
+/// Insert all lines participating in the function signature \p Signature
+/// into \p ExecutedLines.
+static void populateExecutedLinesWithFunctionSignature(
+ const Decl *Signature, SourceManager &SM,
+ std::unique_ptr<FilesToLineNumsMap> &ExecutedLines) {
+ SourceRange SignatureSourceRange;
+ const Stmt* Body = Signature->getBody();
+ if (const auto FD = dyn_cast<FunctionDecl>(Signature)) {
+ SignatureSourceRange = FD->getSourceRange();
+ } else if (const auto OD = dyn_cast<ObjCMethodDecl>(Signature)) {
+ SignatureSourceRange = OD->getSourceRange();
+ } else {
+ return;
+ }
+ SourceLocation Start = SignatureSourceRange.getBegin();
+ SourceLocation End = Body ? Body->getSourceRange().getBegin()
+ : SignatureSourceRange.getEnd();
+ unsigned StartLine = SM.getExpansionLineNumber(Start);
+ unsigned EndLine = SM.getExpansionLineNumber(End);
- MaxBugClassSize.updateMax(bugReports.size());
+ FileID FID = SM.getFileID(SM.getExpansionLoc(Start));
+ for (unsigned Line = StartLine; Line <= EndLine; Line++)
+ ExecutedLines->operator[](FID.getHashValue()).insert(Line);
+}
- if (!generatePathDiagnostic(*D.get(), PD, bugReports))
- return;
+static void populateExecutedLinesWithStmt(
+ const Stmt *S, SourceManager &SM,
+ std::unique_ptr<FilesToLineNumsMap> &ExecutedLines) {
+ SourceLocation Loc = S->getSourceRange().getBegin();
+ SourceLocation ExpansionLoc = SM.getExpansionLoc(Loc);
+ FileID FID = SM.getFileID(ExpansionLoc);
+ unsigned LineNo = SM.getExpansionLineNumber(ExpansionLoc);
+ ExecutedLines->operator[](FID.getHashValue()).insert(LineNo);
+}
- MaxValidBugClassSize.updateMax(bugReports.size());
+/// \return all executed lines including function signatures on the path
+/// starting from \p N.
+static std::unique_ptr<FilesToLineNumsMap>
+findExecutedLines(SourceManager &SM, const ExplodedNode *N) {
+ auto ExecutedLines = llvm::make_unique<FilesToLineNumsMap>();
- // Examine the report and see if the last piece is in a header. Reset the
- // report location to the last piece in the main source file.
- AnalyzerOptions &Opts = getAnalyzerOptions();
- if (Opts.shouldReportIssuesInMainSourceFile() && !Opts.AnalyzeAll)
- D->resetDiagnosticLocationToMainFile();
- }
-
- // If the path is empty, generate a single step path with the location
- // of the issue.
- if (D->path.empty()) {
- PathDiagnosticLocation L = exampleReport->getLocation(getSourceManager());
- auto piece = llvm::make_unique<PathDiagnosticEventPiece>(
- L, exampleReport->getDescription());
- for (SourceRange Range : exampleReport->getRanges())
- piece->addRange(Range);
- D->setEndOfPath(std::move(piece));
- }
-
- PathPieces &Pieces = D->getMutablePieces();
- if (getAnalyzerOptions().shouldDisplayNotesAsEvents()) {
- // For path diagnostic consumers that don't support extra notes,
- // we may optionally convert those to path notes.
- for (auto I = exampleReport->getNotes().rbegin(),
- E = exampleReport->getNotes().rend(); I != E; ++I) {
- PathDiagnosticNotePiece *Piece = I->get();
- auto ConvertedPiece = std::make_shared<PathDiagnosticEventPiece>(
- Piece->getLocation(), Piece->getString());
- for (const auto &R: Piece->getRanges())
- ConvertedPiece->addRange(R);
+ while (N) {
+ if (N->getFirstPred() == nullptr) {
+ // First node: show signature of the entrance point.
+ const Decl *D = N->getLocationContext()->getDecl();
+ populateExecutedLinesWithFunctionSignature(D, SM, ExecutedLines);
+ } else if (auto CE = N->getLocationAs<CallEnter>()) {
+ // Inlined function: show signature.
+ const Decl* D = CE->getCalleeContext()->getDecl();
+ populateExecutedLinesWithFunctionSignature(D, SM, ExecutedLines);
+ } else if (const Stmt *S = PathDiagnosticLocation::getStmt(N)) {
+ populateExecutedLinesWithStmt(S, SM, ExecutedLines);
+
+ // Show extra context for some parent kinds.
+ const Stmt *P = N->getParentMap().getParent(S);
+
+ // The path exploration can die before the node with the associated
+ // return statement is generated, but we do want to show the whole
+ // return.
+ if (const auto *RS = dyn_cast_or_null<ReturnStmt>(P)) {
+ populateExecutedLinesWithStmt(RS, SM, ExecutedLines);
+ P = N->getParentMap().getParent(RS);
+ }
- Pieces.push_front(std::move(ConvertedPiece));
+ if (P && (isa<SwitchCase>(P) || isa<LabelStmt>(P)))
+ populateExecutedLinesWithStmt(P, SM, ExecutedLines);
}
- } else {
- for (auto I = exampleReport->getNotes().rbegin(),
- E = exampleReport->getNotes().rend(); I != E; ++I)
- Pieces.push_front(*I);
+
+ N = N->getFirstPred();
}
+ return ExecutedLines;
+}
+
+std::unique_ptr<DiagnosticForConsumerMapTy>
+BugReporter::generateDiagnosticForConsumerMap(
+ BugReport *report, ArrayRef<PathDiagnosticConsumer *> consumers,
+ ArrayRef<BugReport *> bugReports) {
- // Get the meta data.
- const BugReport::ExtraTextList &Meta = exampleReport->getExtraText();
- for (BugReport::ExtraTextList::const_iterator i = Meta.begin(),
- e = Meta.end(); i != e; ++i) {
- D->addMeta(*i);
+ if (!report->isPathSensitive()) {
+ auto Out = llvm::make_unique<DiagnosticForConsumerMapTy>();
+ for (auto *Consumer : consumers)
+ (*Out)[Consumer] = generateEmptyDiagnosticForReport(report,
+ getSourceManager());
+ return Out;
}
- PD.HandlePathDiagnostic(std::move(D));
+ // Generate the full path sensitive diagnostic, using the generation scheme
+ // specified by the PathDiagnosticConsumer. Note that we have to generate
+ // path diagnostics even for consumers which do not support paths, because
+ // the BugReporterVisitors may mark this bug as a false positive.
+ assert(!bugReports.empty());
+ MaxBugClassSize.updateMax(bugReports.size());
+ std::unique_ptr<DiagnosticForConsumerMapTy> Out =
+ generatePathDiagnostics(consumers, bugReports);
+
+ if (Out->empty())
+ return Out;
+
+ MaxValidBugClassSize.updateMax(bugReports.size());
+
+ // Examine the report and see if the last piece is in a header. Reset the
+ // report location to the last piece in the main source file.
+ AnalyzerOptions &Opts = getAnalyzerOptions();
+ for (auto const &P : *Out)
+ if (Opts.shouldReportIssuesInMainSourceFile() && !Opts.AnalyzeAll)
+ P.second->resetDiagnosticLocationToMainFile();
+
+ return Out;
}
void BugReporter::EmitBasicReport(const Decl *DeclWithIssue,
@@ -3596,12 +3107,12 @@ void BugReporter::EmitBasicReport(const Decl *DeclWithIssue,
EmitBasicReport(DeclWithIssue, Checker->getCheckName(), Name, Category, Str,
Loc, Ranges);
}
+
void BugReporter::EmitBasicReport(const Decl *DeclWithIssue,
CheckName CheckName,
StringRef name, StringRef category,
StringRef str, PathDiagnosticLocation Loc,
ArrayRef<SourceRange> Ranges) {
-
// 'BT' is owned by BugReporter.
BugType *BT = getBugTypeForName(CheckName, name, category);
auto R = llvm::make_unique<BugReport>(*BT, str, Loc);
@@ -3622,84 +3133,3 @@ BugType *BugReporter::getBugTypeForName(CheckName CheckName, StringRef name,
BT = new BugType(CheckName, name, category);
return BT;
}
-
-LLVM_DUMP_METHOD void PathPieces::dump() const {
- unsigned index = 0;
- for (PathPieces::const_iterator I = begin(), E = end(); I != E; ++I) {
- llvm::errs() << "[" << index++ << "] ";
- (*I)->dump();
- llvm::errs() << "\n";
- }
-}
-
-LLVM_DUMP_METHOD void PathDiagnosticCallPiece::dump() const {
- llvm::errs() << "CALL\n--------------\n";
-
- if (const Stmt *SLoc = getLocStmt(getLocation()))
- SLoc->dump();
- else if (const NamedDecl *ND = dyn_cast<NamedDecl>(getCallee()))
- llvm::errs() << *ND << "\n";
- else
- getLocation().dump();
-}
-
-LLVM_DUMP_METHOD void PathDiagnosticEventPiece::dump() const {
- llvm::errs() << "EVENT\n--------------\n";
- llvm::errs() << getString() << "\n";
- llvm::errs() << " ---- at ----\n";
- getLocation().dump();
-}
-
-LLVM_DUMP_METHOD void PathDiagnosticControlFlowPiece::dump() const {
- llvm::errs() << "CONTROL\n--------------\n";
- getStartLocation().dump();
- llvm::errs() << " ---- to ----\n";
- getEndLocation().dump();
-}
-
-LLVM_DUMP_METHOD void PathDiagnosticMacroPiece::dump() const {
- llvm::errs() << "MACRO\n--------------\n";
- // FIXME: Print which macro is being invoked.
-}
-
-LLVM_DUMP_METHOD void PathDiagnosticNotePiece::dump() const {
- llvm::errs() << "NOTE\n--------------\n";
- llvm::errs() << getString() << "\n";
- llvm::errs() << " ---- at ----\n";
- getLocation().dump();
-}
-
-LLVM_DUMP_METHOD void PathDiagnosticLocation::dump() const {
- if (!isValid()) {
- llvm::errs() << "<INVALID>\n";
- return;
- }
-
- switch (K) {
- case RangeK:
- // FIXME: actually print the range.
- llvm::errs() << "<range>\n";
- break;
- case SingleLocK:
- asLocation().dump();
- llvm::errs() << "\n";
- break;
- case StmtK:
- if (S)
- S->dump();
- else
- llvm::errs() << "<NULL STMT>\n";
- break;
- case DeclK:
- if (const NamedDecl *ND = dyn_cast_or_null<NamedDecl>(D))
- llvm::errs() << *ND << "\n";
- else if (isa<BlockDecl>(D))
- // FIXME: Make this nicer.
- llvm::errs() << "<block>\n";
- else if (D)
- llvm::errs() << "<unknown decl>\n";
- else
- llvm::errs() << "<NULL DECL>\n";
- break;
- }
-}
diff --git a/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp b/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
index 972f4c5f3da2..c87bc685d8b9 100644
--- a/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
+++ b/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
@@ -1,4 +1,4 @@
-// BugReporterVisitors.cpp - Helpers for reporting bugs -----------*- C++ -*--//
+//===- BugReporterVisitors.cpp - Helpers for reporting bugs ---------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -11,37 +11,83 @@
// enhance the diagnostics reported for a bug.
//
//===----------------------------------------------------------------------===//
+
#include "clang/StaticAnalyzer/Core/BugReporter/BugReporterVisitors.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclBase.h"
+#include "clang/AST/DeclCXX.h"
#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
+#include "clang/AST/Stmt.h"
+#include "clang/AST/Type.h"
+#include "clang/ASTMatchers/ASTMatchFinder.h"
+#include "clang/Analysis/AnalysisDeclContext.h"
+#include "clang/Analysis/CFG.h"
#include "clang/Analysis/CFGStmtMap.h"
+#include "clang/Analysis/ProgramPoint.h"
+#include "clang/Basic/IdentifierTable.h"
+#include "clang/Basic/LLVM.h"
+#include "clang/Basic/SourceLocation.h"
+#include "clang/Basic/SourceManager.h"
#include "clang/Lex/Lexer.h"
+#include "clang/StaticAnalyzer/Core/AnalyzerOptions.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState_Fwd.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SubEngine.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SMTConstraintManager.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
+#include <cassert>
+#include <deque>
+#include <memory>
+#include <string>
+#include <utility>
using namespace clang;
using namespace ento;
-using llvm::FoldingSetNodeID;
-
//===----------------------------------------------------------------------===//
// Utility functions.
//===----------------------------------------------------------------------===//
bool bugreporter::isDeclRefExprToReference(const Expr *E) {
- if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
+ if (const auto *DRE = dyn_cast<DeclRefExpr>(E))
return DRE->getDecl()->getType()->isReferenceType();
- }
return false;
}
+static const Expr *peelOffPointerArithmetic(const BinaryOperator *B) {
+ if (B->isAdditiveOp() && B->getType()->isPointerType()) {
+ if (B->getLHS()->getType()->isPointerType()) {
+ return B->getLHS();
+ } else if (B->getRHS()->getType()->isPointerType()) {
+ return B->getRHS();
+ }
+ }
+ return nullptr;
+}
+
/// Given that expression S represents a pointer that would be dereferenced,
/// try to find a sub-expression from which the pointer came from.
/// This is used for tracking down origins of a null or undefined value:
@@ -55,33 +101,27 @@ bool bugreporter::isDeclRefExprToReference(const Expr *E) {
/// x->y.z ==> x (lvalue)
/// foo()->y.z ==> foo() (rvalue)
const Expr *bugreporter::getDerefExpr(const Stmt *S) {
- const Expr *E = dyn_cast<Expr>(S);
+ const auto *E = dyn_cast<Expr>(S);
if (!E)
return nullptr;
while (true) {
- if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
+ if (const auto *CE = dyn_cast<CastExpr>(E)) {
if (CE->getCastKind() == CK_LValueToRValue) {
// This cast represents the load we're looking for.
break;
}
E = CE->getSubExpr();
- } else if (const BinaryOperator *B = dyn_cast<BinaryOperator>(E)) {
+ } else if (const auto *B = dyn_cast<BinaryOperator>(E)) {
// Pointer arithmetic: '*(x + 2)' -> 'x') etc.
- if (B->getType()->isPointerType()) {
- if (B->getLHS()->getType()->isPointerType()) {
- E = B->getLHS();
- } else if (B->getRHS()->getType()->isPointerType()) {
- E = B->getRHS();
- } else {
- break;
- }
+ if (const Expr *Inner = peelOffPointerArithmetic(B)) {
+ E = Inner;
} else {
// Probably more arithmetic can be pattern-matched here,
// but for now give up.
break;
}
- } else if (const UnaryOperator *U = dyn_cast<UnaryOperator>(E)) {
+ } else if (const auto *U = dyn_cast<UnaryOperator>(E)) {
if (U->getOpcode() == UO_Deref || U->getOpcode() == UO_AddrOf ||
(U->isIncrementDecrementOp() && U->getType()->isPointerType())) {
// Operators '*' and '&' don't actually mean anything.
@@ -94,14 +134,16 @@ const Expr *bugreporter::getDerefExpr(const Stmt *S) {
}
}
// Pattern match for a few useful cases: a[0], p->f, *p etc.
- else if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) {
+ else if (const auto *ME = dyn_cast<MemberExpr>(E)) {
E = ME->getBase();
- } else if (const ObjCIvarRefExpr *IvarRef = dyn_cast<ObjCIvarRefExpr>(E)) {
+ } else if (const auto *IvarRef = dyn_cast<ObjCIvarRefExpr>(E)) {
E = IvarRef->getBase();
- } else if (const ArraySubscriptExpr *AE = dyn_cast<ArraySubscriptExpr>(E)) {
+ } else if (const auto *AE = dyn_cast<ArraySubscriptExpr>(E)) {
E = AE->getBase();
- } else if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) {
+ } else if (const auto *PE = dyn_cast<ParenExpr>(E)) {
E = PE->getSubExpr();
+ } else if (const auto *EWC = dyn_cast<ExprWithCleanups>(E)) {
+ E = EWC->getSubExpr();
} else {
// Other arbitrary stuff.
break;
@@ -111,7 +153,7 @@ const Expr *bugreporter::getDerefExpr(const Stmt *S) {
// Special case: remove the final lvalue-to-rvalue cast, but do not recurse
// deeper into the sub-expression. This way we return the lvalue from which
// our pointer rvalue was loaded.
- if (const ImplicitCastExpr *CE = dyn_cast<ImplicitCastExpr>(E))
+ if (const auto *CE = dyn_cast<ImplicitCastExpr>(E))
if (CE->getCastKind() == CK_LValueToRValue)
E = CE->getSubExpr();
@@ -120,14 +162,14 @@ const Expr *bugreporter::getDerefExpr(const Stmt *S) {
const Stmt *bugreporter::GetDenomExpr(const ExplodedNode *N) {
const Stmt *S = N->getLocationAs<PreStmt>()->getStmt();
- if (const BinaryOperator *BE = dyn_cast<BinaryOperator>(S))
+ if (const auto *BE = dyn_cast<BinaryOperator>(S))
return BE->getRHS();
return nullptr;
}
const Stmt *bugreporter::GetRetValExpr(const ExplodedNode *N) {
const Stmt *S = N->getLocationAs<PostStmt>()->getStmt();
- if (const ReturnStmt *RS = dyn_cast<ReturnStmt>(S))
+ if (const auto *RS = dyn_cast<ReturnStmt>(S))
return RS->getRetValue();
return nullptr;
}
@@ -136,13 +178,18 @@ const Stmt *bugreporter::GetRetValExpr(const ExplodedNode *N) {
// Definitions for bug reporter visitors.
//===----------------------------------------------------------------------===//
-std::unique_ptr<PathDiagnosticPiece>
+std::shared_ptr<PathDiagnosticPiece>
BugReporterVisitor::getEndPath(BugReporterContext &BRC,
const ExplodedNode *EndPathNode, BugReport &BR) {
return nullptr;
}
-std::unique_ptr<PathDiagnosticPiece> BugReporterVisitor::getDefaultEndPath(
+void
+BugReporterVisitor::finalizeVisitor(BugReporterContext &BRC,
+ const ExplodedNode *EndPathNode,
+ BugReport &BR) {}
+
+std::shared_ptr<PathDiagnosticPiece> BugReporterVisitor::getDefaultEndPath(
BugReporterContext &BRC, const ExplodedNode *EndPathNode, BugReport &BR) {
PathDiagnosticLocation L =
PathDiagnosticLocation::createEndOfPath(EndPathNode,BRC.getSourceManager());
@@ -151,16 +198,465 @@ std::unique_ptr<PathDiagnosticPiece> BugReporterVisitor::getDefaultEndPath(
// Only add the statement itself as a range if we didn't specify any
// special ranges for this report.
- auto P = llvm::make_unique<PathDiagnosticEventPiece>(
+ auto P = std::make_shared<PathDiagnosticEventPiece>(
L, BR.getDescription(), Ranges.begin() == Ranges.end());
for (SourceRange Range : Ranges)
P->addRange(Range);
- return std::move(P);
+ return P;
+}
+
+/// \return name of the macro inside the location \p Loc.
+static StringRef getMacroName(SourceLocation Loc,
+ BugReporterContext &BRC) {
+ return Lexer::getImmediateMacroName(
+ Loc,
+ BRC.getSourceManager(),
+ BRC.getASTContext().getLangOpts());
+}
+
+/// \return Whether given spelling location corresponds to an expansion
+/// of a function-like macro.
+static bool isFunctionMacroExpansion(SourceLocation Loc,
+ const SourceManager &SM) {
+ if (!Loc.isMacroID())
+ return false;
+ while (SM.isMacroArgExpansion(Loc))
+ Loc = SM.getImmediateExpansionRange(Loc).getBegin();
+ std::pair<FileID, unsigned> TLInfo = SM.getDecomposedLoc(Loc);
+ SrcMgr::SLocEntry SE = SM.getSLocEntry(TLInfo.first);
+ const SrcMgr::ExpansionInfo &EInfo = SE.getExpansion();
+ return EInfo.isFunctionMacroExpansion();
+}
+
+/// \return Whether \c RegionOfInterest was modified at \p N,
+/// where \p ReturnState is a state associated with the return
+/// from the current frame.
+static bool wasRegionOfInterestModifiedAt(
+ const SubRegion *RegionOfInterest,
+ const ExplodedNode *N,
+ SVal ValueAfter) {
+ ProgramStateRef State = N->getState();
+ ProgramStateManager &Mgr = N->getState()->getStateManager();
+
+ if (!N->getLocationAs<PostStore>()
+ && !N->getLocationAs<PostInitializer>()
+ && !N->getLocationAs<PostStmt>())
+ return false;
+
+ // Writing into region of interest.
+ if (auto PS = N->getLocationAs<PostStmt>())
+ if (auto *BO = PS->getStmtAs<BinaryOperator>())
+ if (BO->isAssignmentOp() && RegionOfInterest->isSubRegionOf(
+ N->getSVal(BO->getLHS()).getAsRegion()))
+ return true;
+
+ // SVal after the state is possibly different.
+ SVal ValueAtN = N->getState()->getSVal(RegionOfInterest);
+ if (!Mgr.getSValBuilder().areEqual(State, ValueAtN, ValueAfter).isConstrainedTrue() &&
+ (!ValueAtN.isUndef() || !ValueAfter.isUndef()))
+ return true;
+
+ return false;
}
namespace {
+
+/// Put a diagnostic on return statement of all inlined functions
+/// for which the region of interest \p RegionOfInterest was passed into,
+/// but not written inside, and it has caused an undefined read or a null
+/// pointer dereference outside.
+class NoStoreFuncVisitor final : public BugReporterVisitor {
+ const SubRegion *RegionOfInterest;
+ static constexpr const char *DiagnosticsMsg =
+ "Returning without writing to '";
+
+ /// Frames writing into \c RegionOfInterest.
+ /// This visitor generates a note only if a function does not write into
+ /// a region of interest. This information is not immediately available
+ /// by looking at the node associated with the exit from the function
+ /// (usually the return statement). To avoid recomputing the same information
+ /// many times (going up the path for each node and checking whether the
+ /// region was written into) we instead lazily compute the
+ /// stack frames along the path which write into the region of interest.
+ llvm::SmallPtrSet<const StackFrameContext *, 32> FramesModifyingRegion;
+ llvm::SmallPtrSet<const StackFrameContext *, 32> FramesModifyingCalculated;
+
+public:
+ NoStoreFuncVisitor(const SubRegion *R) : RegionOfInterest(R) {}
+
+ void Profile(llvm::FoldingSetNodeID &ID) const override {
+ static int Tag = 0;
+ ID.AddPointer(&Tag);
+ }
+
+ std::shared_ptr<PathDiagnosticPiece> VisitNode(const ExplodedNode *N,
+ const ExplodedNode *PrevN,
+ BugReporterContext &BRC,
+ BugReport &BR) override {
+
+ const LocationContext *Ctx = N->getLocationContext();
+ const StackFrameContext *SCtx = Ctx->getStackFrame();
+ ProgramStateRef State = N->getState();
+ auto CallExitLoc = N->getLocationAs<CallExitBegin>();
+
+ // No diagnostic if region was modified inside the frame.
+ if (!CallExitLoc)
+ return nullptr;
+
+ CallEventRef<> Call =
+ BRC.getStateManager().getCallEventManager().getCaller(SCtx, State);
+ const PrintingPolicy &PP = BRC.getASTContext().getPrintingPolicy();
+ const SourceManager &SM = BRC.getSourceManager();
+
+ // Region of interest corresponds to an IVar, exiting a method
+ // which could have written into that IVar, but did not.
+ if (const auto *MC = dyn_cast<ObjCMethodCall>(Call))
+ if (const auto *IvarR = dyn_cast<ObjCIvarRegion>(RegionOfInterest))
+ if (potentiallyWritesIntoIvar(Call->getRuntimeDefinition().getDecl(),
+ IvarR->getDecl()) &&
+ !isRegionOfInterestModifiedInFrame(N))
+ return notModifiedMemberDiagnostics(
+ Ctx, SM, PP, *CallExitLoc, Call,
+ MC->getReceiverSVal().getAsRegion());
+
+ if (const auto *CCall = dyn_cast<CXXConstructorCall>(Call)) {
+ const MemRegion *ThisRegion = CCall->getCXXThisVal().getAsRegion();
+ if (RegionOfInterest->isSubRegionOf(ThisRegion)
+ && !CCall->getDecl()->isImplicit()
+ && !isRegionOfInterestModifiedInFrame(N))
+ return notModifiedMemberDiagnostics(Ctx, SM, PP, *CallExitLoc,
+ CCall, ThisRegion);
+ }
+
+ ArrayRef<ParmVarDecl *> parameters = getCallParameters(Call);
+ for (unsigned I = 0; I < Call->getNumArgs() && I < parameters.size(); ++I) {
+ const ParmVarDecl *PVD = parameters[I];
+ SVal S = Call->getArgSVal(I);
+ unsigned IndirectionLevel = 1;
+ QualType T = PVD->getType();
+ while (const MemRegion *R = S.getAsRegion()) {
+ if (RegionOfInterest->isSubRegionOf(R)
+ && !isPointerToConst(PVD->getType())) {
+
+ if (isRegionOfInterestModifiedInFrame(N))
+ return nullptr;
+
+ return notModifiedParameterDiagnostics(
+ Ctx, SM, PP, *CallExitLoc, Call, PVD, R, IndirectionLevel);
+ }
+ QualType PT = T->getPointeeType();
+ if (PT.isNull() || PT->isVoidType()) break;
+ S = State->getSVal(R, PT);
+ T = PT;
+ IndirectionLevel++;
+ }
+ }
+
+ return nullptr;
+ }
+
+private:
+
+ /// \return Whether the method declaration \p Parent
+ /// syntactically has a binary operation writing into the ivar \p Ivar.
+ bool potentiallyWritesIntoIvar(const Decl *Parent,
+ const ObjCIvarDecl *Ivar) {
+ using namespace ast_matchers;
+ if (!Parent || !Parent->getBody())
+ return false;
+ StatementMatcher WriteIntoIvarM = binaryOperator(
+ hasOperatorName("="), hasLHS(ignoringParenImpCasts(objcIvarRefExpr(
+ hasDeclaration(equalsNode(Ivar))))));
+ StatementMatcher ParentM = stmt(hasDescendant(WriteIntoIvarM));
+ auto Matches = match(ParentM, *Parent->getBody(), Parent->getASTContext());
+ return !Matches.empty();
+ }
+
+ /// Check and lazily calculate whether the region of interest is
+ /// modified in the stack frame to which \p N belongs.
+ /// The calculation is cached in FramesModifyingRegion.
+ bool isRegionOfInterestModifiedInFrame(const ExplodedNode *N) {
+ const LocationContext *Ctx = N->getLocationContext();
+ const StackFrameContext *SCtx = Ctx->getStackFrame();
+ if (!FramesModifyingCalculated.count(SCtx))
+ findModifyingFrames(N);
+ return FramesModifyingRegion.count(SCtx);
+ }
+
+
+ /// Write to \c FramesModifyingRegion all stack frames along
+ /// the path in the current stack frame which modify \c RegionOfInterest.
+ void findModifyingFrames(const ExplodedNode *N) {
+ assert(N->getLocationAs<CallExitBegin>());
+ ProgramStateRef LastReturnState = N->getState();
+ SVal ValueAtReturn = LastReturnState->getSVal(RegionOfInterest);
+ const LocationContext *Ctx = N->getLocationContext();
+ const StackFrameContext *OriginalSCtx = Ctx->getStackFrame();
+
+ do {
+ ProgramStateRef State = N->getState();
+ auto CallExitLoc = N->getLocationAs<CallExitBegin>();
+ if (CallExitLoc) {
+ LastReturnState = State;
+ ValueAtReturn = LastReturnState->getSVal(RegionOfInterest);
+ }
+
+ FramesModifyingCalculated.insert(
+ N->getLocationContext()->getStackFrame());
+
+ if (wasRegionOfInterestModifiedAt(RegionOfInterest, N, ValueAtReturn)) {
+ const StackFrameContext *SCtx = N->getStackFrame();
+ while (!SCtx->inTopFrame()) {
+ auto p = FramesModifyingRegion.insert(SCtx);
+ if (!p.second)
+ break; // Frame and all its parents already inserted.
+ SCtx = SCtx->getParent()->getStackFrame();
+ }
+ }
+
+ // Stop calculation at the call to the current function.
+ if (auto CE = N->getLocationAs<CallEnter>())
+ if (CE->getCalleeContext() == OriginalSCtx)
+ break;
+
+ N = N->getFirstPred();
+ } while (N);
+ }
+
+ /// Get parameters associated with runtime definition in order
+ /// to get the correct parameter name.
+ ArrayRef<ParmVarDecl *> getCallParameters(CallEventRef<> Call) {
+ // Use runtime definition, if available.
+ RuntimeDefinition RD = Call->getRuntimeDefinition();
+ if (const auto *FD = dyn_cast_or_null<FunctionDecl>(RD.getDecl()))
+ return FD->parameters();
+
+ return Call->parameters();
+ }
+
+ /// \return whether \p Ty points to a const type, or is a const reference.
+ bool isPointerToConst(QualType Ty) {
+ return !Ty->getPointeeType().isNull() &&
+ Ty->getPointeeType().getCanonicalType().isConstQualified();
+ }
+
+ /// \return Diagnostics piece for the member field not modified
+ /// in a given function.
+ std::shared_ptr<PathDiagnosticPiece> notModifiedMemberDiagnostics(
+ const LocationContext *Ctx,
+ const SourceManager &SM,
+ const PrintingPolicy &PP,
+ CallExitBegin &CallExitLoc,
+ CallEventRef<> Call,
+ const MemRegion *ArgRegion) {
+ const char *TopRegionName = isa<ObjCMethodCall>(Call) ? "self" : "this";
+ SmallString<256> sbuf;
+ llvm::raw_svector_ostream os(sbuf);
+ os << DiagnosticsMsg;
+ bool out = prettyPrintRegionName(TopRegionName, "->", /*IsReference=*/true,
+ /*IndirectionLevel=*/1, ArgRegion, os, PP);
+
+ // Return nothing if we have failed to pretty-print.
+ if (!out)
+ return nullptr;
+
+ os << "'";
+ PathDiagnosticLocation L =
+ getPathDiagnosticLocation(CallExitLoc.getReturnStmt(), SM, Ctx, Call);
+ return std::make_shared<PathDiagnosticEventPiece>(L, os.str());
+ }
+
+ /// \return Diagnostics piece for the parameter \p PVD not modified
+ /// in a given function.
+ /// \p IndirectionLevel How many times \c ArgRegion has to be dereferenced
+ /// before we get to the super region of \c RegionOfInterest
+ std::shared_ptr<PathDiagnosticPiece>
+ notModifiedParameterDiagnostics(const LocationContext *Ctx,
+ const SourceManager &SM,
+ const PrintingPolicy &PP,
+ CallExitBegin &CallExitLoc,
+ CallEventRef<> Call,
+ const ParmVarDecl *PVD,
+ const MemRegion *ArgRegion,
+ unsigned IndirectionLevel) {
+ PathDiagnosticLocation L = getPathDiagnosticLocation(
+ CallExitLoc.getReturnStmt(), SM, Ctx, Call);
+ SmallString<256> sbuf;
+ llvm::raw_svector_ostream os(sbuf);
+ os << DiagnosticsMsg;
+ bool IsReference = PVD->getType()->isReferenceType();
+ const char *Sep = IsReference && IndirectionLevel == 1 ? "." : "->";
+ bool Success = prettyPrintRegionName(
+ PVD->getQualifiedNameAsString().c_str(),
+ Sep, IsReference, IndirectionLevel, ArgRegion, os, PP);
+
+ // Print the parameter name if the pretty-printing has failed.
+ if (!Success)
+ PVD->printQualifiedName(os);
+ os << "'";
+ return std::make_shared<PathDiagnosticEventPiece>(L, os.str());
+ }
+
+ /// \return a path diagnostic location for the optionally
+ /// present return statement \p RS.
+ PathDiagnosticLocation getPathDiagnosticLocation(const ReturnStmt *RS,
+ const SourceManager &SM,
+ const LocationContext *Ctx,
+ CallEventRef<> Call) {
+ if (RS)
+ return PathDiagnosticLocation::createBegin(RS, SM, Ctx);
+ return PathDiagnosticLocation(
+ Call->getRuntimeDefinition().getDecl()->getSourceRange().getEnd(), SM);
+ }
+
+ /// Pretty-print region \p ArgRegion starting from parent to \p os.
+ /// \return whether printing has succeeded
+ bool prettyPrintRegionName(StringRef TopRegionName,
+ StringRef Sep,
+ bool IsReference,
+ int IndirectionLevel,
+ const MemRegion *ArgRegion,
+ llvm::raw_svector_ostream &os,
+ const PrintingPolicy &PP) {
+ SmallVector<const MemRegion *, 5> Subregions;
+ const MemRegion *R = RegionOfInterest;
+ while (R != ArgRegion) {
+ if (!(isa<FieldRegion>(R) || isa<CXXBaseObjectRegion>(R) ||
+ isa<ObjCIvarRegion>(R)))
+ return false; // Pattern-matching failed.
+ Subregions.push_back(R);
+ R = cast<SubRegion>(R)->getSuperRegion();
+ }
+ bool IndirectReference = !Subregions.empty();
+
+ if (IndirectReference)
+ IndirectionLevel--; // Due to "->" symbol.
+
+ if (IsReference)
+ IndirectionLevel--; // Due to reference semantics.
+
+ bool ShouldSurround = IndirectReference && IndirectionLevel > 0;
+
+ if (ShouldSurround)
+ os << "(";
+ for (int i = 0; i < IndirectionLevel; i++)
+ os << "*";
+ os << TopRegionName;
+ if (ShouldSurround)
+ os << ")";
+
+ for (auto I = Subregions.rbegin(), E = Subregions.rend(); I != E; ++I) {
+ if (const auto *FR = dyn_cast<FieldRegion>(*I)) {
+ os << Sep;
+ FR->getDecl()->getDeclName().print(os, PP);
+ Sep = ".";
+ } else if (const auto *IR = dyn_cast<ObjCIvarRegion>(*I)) {
+ os << "->";
+ IR->getDecl()->getDeclName().print(os, PP);
+ Sep = ".";
+ } else if (isa<CXXBaseObjectRegion>(*I)) {
+ continue; // Just keep going up to the base region.
+ } else {
+ llvm_unreachable("Previous check has missed an unexpected region");
+ }
+ }
+ return true;
+ }
+};
+
+/// Suppress null-pointer-dereference bugs where dereferenced null was returned
+/// the macro.
+class MacroNullReturnSuppressionVisitor final : public BugReporterVisitor {
+ const SubRegion *RegionOfInterest;
+ const SVal ValueAtDereference;
+
+ // Do not invalidate the reports where the value was modified
+ // after it got assigned to from the macro.
+ bool WasModified = false;
+
+public:
+ MacroNullReturnSuppressionVisitor(const SubRegion *R,
+ const SVal V) : RegionOfInterest(R),
+ ValueAtDereference(V) {}
+
+ std::shared_ptr<PathDiagnosticPiece> VisitNode(const ExplodedNode *N,
+ const ExplodedNode *PrevN,
+ BugReporterContext &BRC,
+ BugReport &BR) override {
+ if (WasModified)
+ return nullptr;
+
+ auto BugPoint = BR.getErrorNode()->getLocation().getAs<StmtPoint>();
+ if (!BugPoint)
+ return nullptr;
+
+ const SourceManager &SMgr = BRC.getSourceManager();
+ if (auto Loc = matchAssignment(N, BRC)) {
+ if (isFunctionMacroExpansion(*Loc, SMgr)) {
+ std::string MacroName = getMacroName(*Loc, BRC);
+ SourceLocation BugLoc = BugPoint->getStmt()->getLocStart();
+ if (!BugLoc.isMacroID() || getMacroName(BugLoc, BRC) != MacroName)
+ BR.markInvalid(getTag(), MacroName.c_str());
+ }
+ }
+
+ if (wasRegionOfInterestModifiedAt(RegionOfInterest, N, ValueAtDereference))
+ WasModified = true;
+
+ return nullptr;
+ }
+
+ static void addMacroVisitorIfNecessary(
+ const ExplodedNode *N, const MemRegion *R,
+ bool EnableNullFPSuppression, BugReport &BR,
+ const SVal V) {
+ AnalyzerOptions &Options = N->getState()->getStateManager()
+ .getOwningEngine()->getAnalysisManager().options;
+ if (EnableNullFPSuppression && Options.shouldSuppressNullReturnPaths()
+ && V.getAs<Loc>())
+ BR.addVisitor(llvm::make_unique<MacroNullReturnSuppressionVisitor>(
+ R->getAs<SubRegion>(), V));
+ }
+
+ void* getTag() const {
+ static int Tag = 0;
+ return static_cast<void *>(&Tag);
+ }
+
+ void Profile(llvm::FoldingSetNodeID &ID) const override {
+ ID.AddPointer(getTag());
+ }
+
+private:
+ /// \return Source location of right hand side of an assignment
+ /// into \c RegionOfInterest, empty optional if none found.
+ Optional<SourceLocation> matchAssignment(const ExplodedNode *N,
+ BugReporterContext &BRC) {
+ const Stmt *S = PathDiagnosticLocation::getStmt(N);
+ ProgramStateRef State = N->getState();
+ auto *LCtx = N->getLocationContext();
+ if (!S)
+ return None;
+
+ if (const auto *DS = dyn_cast<DeclStmt>(S)) {
+ if (const auto *VD = dyn_cast<VarDecl>(DS->getSingleDecl()))
+ if (const Expr *RHS = VD->getInit())
+ if (RegionOfInterest->isSubRegionOf(
+ State->getLValue(VD, LCtx).getAsRegion()))
+ return RHS->getLocStart();
+ } else if (const auto *BO = dyn_cast<BinaryOperator>(S)) {
+ const MemRegion *R = N->getSVal(BO->getLHS()).getAsRegion();
+ const Expr *RHS = BO->getRHS();
+ if (BO->isAssignmentOp() && RegionOfInterest->isSubRegionOf(R)) {
+ return RHS->getLocStart();
+ }
+ }
+ return None;
+ }
+};
+
/// Emits an extra note at the return statement of an interesting stack frame.
///
/// The returned value is marked as an interesting value, and if it's null,
@@ -168,19 +664,20 @@ namespace {
///
/// This visitor is intended to be used when another visitor discovers that an
/// interesting value comes from an inlined function call.
-class ReturnVisitor : public BugReporterVisitorImpl<ReturnVisitor> {
+class ReturnVisitor : public BugReporterVisitor {
const StackFrameContext *StackFrame;
enum {
Initial,
MaybeUnsuppress,
Satisfied
- } Mode;
+ } Mode = Initial;
bool EnableNullFPSuppression;
+ bool ShouldInvalidate = true;
public:
ReturnVisitor(const StackFrameContext *Frame, bool Suppressed)
- : StackFrame(Frame), Mode(Initial), EnableNullFPSuppression(Suppressed) {}
+ : StackFrame(Frame), EnableNullFPSuppression(Suppressed) {}
static void *getTag() {
static int Tag = 0;
@@ -235,7 +732,7 @@ public:
// Check the return value.
ProgramStateRef State = Node->getState();
- SVal RetVal = State->getSVal(S, Node->getLocationContext());
+ SVal RetVal = Node->getSVal(S);
// Handle cases where a reference is returned and then immediately used.
if (cast<Expr>(S)->isGLValue())
@@ -274,7 +771,7 @@ public:
if (!SP)
return nullptr;
- const ReturnStmt *Ret = dyn_cast<ReturnStmt>(SP->getStmt());
+ const auto *Ret = dyn_cast<ReturnStmt>(SP->getStmt());
if (!Ret)
return nullptr;
@@ -329,8 +826,7 @@ public:
// If we have counter-suppression enabled, make sure we keep visiting
// future nodes. We want to emit a path note as well, in case
// the report is resurrected as valid later on.
- ExprEngine &Eng = BRC.getBugReporter().getEngine();
- AnalyzerOptions &Options = Eng.getAnalysisManager().options;
+ AnalyzerOptions &Options = BRC.getAnalyzerOptions();
if (EnableNullFPSuppression && hasCounterSuppression(Options))
Mode = MaybeUnsuppress;
@@ -352,8 +848,8 @@ public:
}
} else {
// FIXME: We should have a more generalized location printing mechanism.
- if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(RetE))
- if (const DeclaratorDecl *DD = dyn_cast<DeclaratorDecl>(DR->getDecl()))
+ if (const auto *DR = dyn_cast<DeclRefExpr>(RetE))
+ if (const auto *DD = dyn_cast<DeclaratorDecl>(DR->getDecl()))
Out << " (loaded from '" << *DD << "')";
}
@@ -368,8 +864,7 @@ public:
visitNodeMaybeUnsuppress(const ExplodedNode *N, const ExplodedNode *PrevN,
BugReporterContext &BRC, BugReport &BR) {
#ifndef NDEBUG
- ExprEngine &Eng = BRC.getBugReporter().getEngine();
- AnalyzerOptions &Options = Eng.getAnalysisManager().options;
+ AnalyzerOptions &Options = BRC.getAnalyzerOptions();
assert(hasCounterSuppression(Options));
#endif
@@ -406,7 +901,7 @@ public:
if (bugreporter::trackNullOrUndefValue(N, ArgE, BR, /*IsArg=*/true,
EnableNullFPSuppression))
- BR.removeInvalidation(ReturnVisitor::getTag(), StackFrame);
+ ShouldInvalidate = false;
// If we /can't/ track the null pointer, we should err on the side of
// false negatives, and continue towards marking this report invalid.
@@ -432,18 +927,16 @@ public:
llvm_unreachable("Invalid visit mode!");
}
- std::unique_ptr<PathDiagnosticPiece> getEndPath(BugReporterContext &BRC,
- const ExplodedNode *N,
- BugReport &BR) override {
- if (EnableNullFPSuppression)
+ void finalizeVisitor(BugReporterContext &BRC, const ExplodedNode *N,
+ BugReport &BR) override {
+ if (EnableNullFPSuppression && ShouldInvalidate)
BR.markInvalid(ReturnVisitor::getTag(), StackFrame);
- return nullptr;
}
};
-} // end anonymous namespace
+} // namespace
-void FindLastStoreBRVisitor ::Profile(llvm::FoldingSetNodeID &ID) const {
+void FindLastStoreBRVisitor::Profile(llvm::FoldingSetNodeID &ID) const {
static int tag = 0;
ID.AddPointer(&tag);
ID.AddPointer(R);
@@ -466,7 +959,7 @@ static bool isInitializationOfVar(const ExplodedNode *N, const VarRegion *VR) {
return false;
const MemSpaceRegion *VarSpace = VR->getMemorySpace();
- const StackSpaceRegion *FrameSpace = dyn_cast<StackSpaceRegion>(VarSpace);
+ const auto *FrameSpace = dyn_cast<StackSpaceRegion>(VarSpace);
if (!FrameSpace) {
// If we ever directly evaluate global DeclStmts, this assertion will be
// invalid, but this still seems preferable to silently accepting an
@@ -477,14 +970,131 @@ static bool isInitializationOfVar(const ExplodedNode *N, const VarRegion *VR) {
assert(VR->getDecl()->hasLocalStorage());
const LocationContext *LCtx = N->getLocationContext();
- return FrameSpace->getStackFrame() == LCtx->getCurrentStackFrame();
+ return FrameSpace->getStackFrame() == LCtx->getStackFrame();
+}
+
+/// Show diagnostics for initializing or declaring a region \p R with a bad value.
+static void showBRDiagnostics(const char *action, llvm::raw_svector_ostream &os,
+ const MemRegion *R, SVal V, const DeclStmt *DS) {
+ if (R->canPrintPretty()) {
+ R->printPretty(os);
+ os << " ";
+ }
+
+ if (V.getAs<loc::ConcreteInt>()) {
+ bool b = false;
+ if (R->isBoundable()) {
+ if (const auto *TR = dyn_cast<TypedValueRegion>(R)) {
+ if (TR->getValueType()->isObjCObjectPointerType()) {
+ os << action << "nil";
+ b = true;
+ }
+ }
+ }
+ if (!b)
+ os << action << "a null pointer value";
+
+ } else if (auto CVal = V.getAs<nonloc::ConcreteInt>()) {
+ os << action << CVal->getValue();
+ } else if (DS) {
+ if (V.isUndef()) {
+ if (isa<VarRegion>(R)) {
+ const auto *VD = cast<VarDecl>(DS->getSingleDecl());
+ if (VD->getInit()) {
+ os << (R->canPrintPretty() ? "initialized" : "Initializing")
+ << " to a garbage value";
+ } else {
+ os << (R->canPrintPretty() ? "declared" : "Declaring")
+ << " without an initial value";
+ }
+ }
+ } else {
+ os << (R->canPrintPretty() ? "initialized" : "Initialized")
+ << " here";
+ }
+ }
+}
+
+/// Display diagnostics for passing bad region as a parameter.
+static void showBRParamDiagnostics(llvm::raw_svector_ostream& os,
+ const VarRegion *VR,
+ SVal V) {
+ const auto *Param = cast<ParmVarDecl>(VR->getDecl());
+
+ os << "Passing ";
+
+ if (V.getAs<loc::ConcreteInt>()) {
+ if (Param->getType()->isObjCObjectPointerType())
+ os << "nil object reference";
+ else
+ os << "null pointer value";
+ } else if (V.isUndef()) {
+ os << "uninitialized value";
+ } else if (auto CI = V.getAs<nonloc::ConcreteInt>()) {
+ os << "the value " << CI->getValue();
+ } else {
+ os << "value";
+ }
+
+ // Printed parameter indexes are 1-based, not 0-based.
+ unsigned Idx = Param->getFunctionScopeIndex() + 1;
+ os << " via " << Idx << llvm::getOrdinalSuffix(Idx) << " parameter";
+ if (VR->canPrintPretty()) {
+ os << " ";
+ VR->printPretty(os);
+ }
+}
+
+/// Show default diagnostics for storing bad region.
+static void showBRDefaultDiagnostics(llvm::raw_svector_ostream& os,
+ const MemRegion *R,
+ SVal V) {
+ if (V.getAs<loc::ConcreteInt>()) {
+ bool b = false;
+ if (R->isBoundable()) {
+ if (const auto *TR = dyn_cast<TypedValueRegion>(R)) {
+ if (TR->getValueType()->isObjCObjectPointerType()) {
+ os << "nil object reference stored";
+ b = true;
+ }
+ }
+ }
+ if (!b) {
+ if (R->canPrintPretty())
+ os << "Null pointer value stored";
+ else
+ os << "Storing null pointer value";
+ }
+
+ } else if (V.isUndef()) {
+ if (R->canPrintPretty())
+ os << "Uninitialized value stored";
+ else
+ os << "Storing uninitialized value";
+
+ } else if (auto CV = V.getAs<nonloc::ConcreteInt>()) {
+ if (R->canPrintPretty())
+ os << "The value " << CV->getValue() << " is assigned";
+ else
+ os << "Assigning " << CV->getValue();
+
+ } else {
+ if (R->canPrintPretty())
+ os << "Value assigned";
+ else
+ os << "Assigning value";
+ }
+
+ if (R->canPrintPretty()) {
+ os << " to ";
+ R->printPretty(os);
+ }
}
std::shared_ptr<PathDiagnosticPiece>
FindLastStoreBRVisitor::VisitNode(const ExplodedNode *Succ,
const ExplodedNode *Pred,
BugReporterContext &BRC, BugReport &BR) {
-
if (Satisfied)
return nullptr;
@@ -493,7 +1103,7 @@ FindLastStoreBRVisitor::VisitNode(const ExplodedNode *Succ,
bool IsParam = false;
// First see if we reached the declaration of the region.
- if (const VarRegion *VR = dyn_cast<VarRegion>(R)) {
+ if (const auto *VR = dyn_cast<VarRegion>(R)) {
if (isInitializationOfVar(Pred, VR)) {
StoreSite = Pred;
InitE = VR->getDecl()->getInit();
@@ -539,8 +1149,8 @@ FindLastStoreBRVisitor::VisitNode(const ExplodedNode *Succ,
// 'this' should never be NULL, but this visitor isn't just for NULL and
// UndefinedVal.)
if (Optional<CallEnter> CE = Succ->getLocationAs<CallEnter>()) {
- if (const VarRegion *VR = dyn_cast<VarRegion>(R)) {
- const ParmVarDecl *Param = cast<ParmVarDecl>(VR->getDecl());
+ if (const auto *VR = dyn_cast<VarRegion>(R)) {
+ const auto *Param = cast<ParmVarDecl>(VR->getDecl());
ProgramStateManager &StateMgr = BRC.getStateManager();
CallEventManager &CallMgr = StateMgr.getCallEventManager();
@@ -554,7 +1164,7 @@ FindLastStoreBRVisitor::VisitNode(const ExplodedNode *Succ,
// If this is a CXXTempObjectRegion, the Expr responsible for its creation
// is wrapped inside of it.
- if (const CXXTempObjectRegion *TmpR = dyn_cast<CXXTempObjectRegion>(R))
+ if (const auto *TmpR = dyn_cast<CXXTempObjectRegion>(R))
InitE = TmpR->getExpr();
}
@@ -584,8 +1194,8 @@ FindLastStoreBRVisitor::VisitNode(const ExplodedNode *Succ,
if (Optional<PostStmt> PS = StoreSite->getLocationAs<PostStmt>()) {
const Stmt *S = PS->getStmt();
const char *action = nullptr;
- const DeclStmt *DS = dyn_cast<DeclStmt>(S);
- const VarRegion *VR = dyn_cast<VarRegion>(R);
+ const auto *DS = dyn_cast<DeclStmt>(S);
+ const auto *VR = dyn_cast<VarRegion>(R);
if (DS) {
action = R->canPrintPretty() ? "initialized to " :
@@ -596,8 +1206,8 @@ FindLastStoreBRVisitor::VisitNode(const ExplodedNode *Succ,
if (VR) {
// See if we can get the BlockVarRegion.
ProgramStateRef State = StoreSite->getState();
- SVal V = State->getSVal(S, PS->getLocationContext());
- if (const BlockDataRegion *BDR =
+ SVal V = StoreSite->getSVal(S);
+ if (const auto *BDR =
dyn_cast_or_null<BlockDataRegion>(V.getAsRegion())) {
if (const VarRegion *OriginalR = BDR->getOriginalRegion(VR)) {
if (Optional<KnownSVal> KV =
@@ -608,122 +1218,16 @@ FindLastStoreBRVisitor::VisitNode(const ExplodedNode *Succ,
}
}
}
+ if (action)
+ showBRDiagnostics(action, os, R, V, DS);
- if (action) {
- if (R->canPrintPretty()) {
- R->printPretty(os);
- os << " ";
- }
-
- if (V.getAs<loc::ConcreteInt>()) {
- bool b = false;
- if (R->isBoundable()) {
- if (const TypedValueRegion *TR = dyn_cast<TypedValueRegion>(R)) {
- if (TR->getValueType()->isObjCObjectPointerType()) {
- os << action << "nil";
- b = true;
- }
- }
- }
-
- if (!b)
- os << action << "a null pointer value";
- } else if (Optional<nonloc::ConcreteInt> CVal =
- V.getAs<nonloc::ConcreteInt>()) {
- os << action << CVal->getValue();
- }
- else if (DS) {
- if (V.isUndef()) {
- if (isa<VarRegion>(R)) {
- const VarDecl *VD = cast<VarDecl>(DS->getSingleDecl());
- if (VD->getInit()) {
- os << (R->canPrintPretty() ? "initialized" : "Initializing")
- << " to a garbage value";
- } else {
- os << (R->canPrintPretty() ? "declared" : "Declaring")
- << " without an initial value";
- }
- }
- }
- else {
- os << (R->canPrintPretty() ? "initialized" : "Initialized")
- << " here";
- }
- }
- }
} else if (StoreSite->getLocation().getAs<CallEnter>()) {
- if (const VarRegion *VR = dyn_cast<VarRegion>(R)) {
- const ParmVarDecl *Param = cast<ParmVarDecl>(VR->getDecl());
-
- os << "Passing ";
-
- if (V.getAs<loc::ConcreteInt>()) {
- if (Param->getType()->isObjCObjectPointerType())
- os << "nil object reference";
- else
- os << "null pointer value";
- } else if (V.isUndef()) {
- os << "uninitialized value";
- } else if (Optional<nonloc::ConcreteInt> CI =
- V.getAs<nonloc::ConcreteInt>()) {
- os << "the value " << CI->getValue();
- } else {
- os << "value";
- }
-
- // Printed parameter indexes are 1-based, not 0-based.
- unsigned Idx = Param->getFunctionScopeIndex() + 1;
- os << " via " << Idx << llvm::getOrdinalSuffix(Idx) << " parameter";
- if (R->canPrintPretty()) {
- os << " ";
- R->printPretty(os);
- }
- }
+ if (const auto *VR = dyn_cast<VarRegion>(R))
+ showBRParamDiagnostics(os, VR, V);
}
- if (os.str().empty()) {
- if (V.getAs<loc::ConcreteInt>()) {
- bool b = false;
- if (R->isBoundable()) {
- if (const TypedValueRegion *TR = dyn_cast<TypedValueRegion>(R)) {
- if (TR->getValueType()->isObjCObjectPointerType()) {
- os << "nil object reference stored";
- b = true;
- }
- }
- }
- if (!b) {
- if (R->canPrintPretty())
- os << "Null pointer value stored";
- else
- os << "Storing null pointer value";
- }
-
- } else if (V.isUndef()) {
- if (R->canPrintPretty())
- os << "Uninitialized value stored";
- else
- os << "Storing uninitialized value";
-
- } else if (Optional<nonloc::ConcreteInt> CV =
- V.getAs<nonloc::ConcreteInt>()) {
- if (R->canPrintPretty())
- os << "The value " << CV->getValue() << " is assigned";
- else
- os << "Assigning " << CV->getValue();
-
- } else {
- if (R->canPrintPretty())
- os << "Value assigned";
- else
- os << "Assigning value";
- }
-
- if (R->canPrintPretty()) {
- os << " to ";
- R->printPretty(os);
- }
- }
+ if (os.str().empty())
+ showBRDefaultDiagnostics(os, R, V);
// Construct a new PathDiagnosticPiece.
ProgramPoint P = StoreSite->getLocation();
@@ -778,7 +1282,6 @@ TrackConstraintBRVisitor::VisitNode(const ExplodedNode *N,
// Check if in the previous state it was feasible for this constraint
// to *not* be true.
if (isUnderconstrained(PrevN)) {
-
IsSatisfied = true;
// As a sanity check, make sure that the negation of the constraint
@@ -816,20 +1319,20 @@ TrackConstraintBRVisitor::VisitNode(const ExplodedNode *N,
SuppressInlineDefensiveChecksVisitor::
SuppressInlineDefensiveChecksVisitor(DefinedSVal Value, const ExplodedNode *N)
- : V(Value), IsSatisfied(false), IsTrackingTurnedOn(false) {
-
- // Check if the visitor is disabled.
- SubEngine *Eng = N->getState()->getStateManager().getOwningEngine();
- assert(Eng && "Cannot file a bug report without an owning engine");
- AnalyzerOptions &Options = Eng->getAnalysisManager().options;
- if (!Options.shouldSuppressInlinedDefensiveChecks())
- IsSatisfied = true;
+ : V(Value) {
+ // Check if the visitor is disabled.
+ SubEngine *Eng = N->getState()->getStateManager().getOwningEngine();
+ assert(Eng && "Cannot file a bug report without an owning engine");
+ AnalyzerOptions &Options = Eng->getAnalysisManager().options;
+ if (!Options.shouldSuppressInlinedDefensiveChecks())
+ IsSatisfied = true;
- assert(N->getState()->isNull(V).isConstrainedTrue() &&
- "The visitor only tracks the cases where V is constrained to 0");
+ assert(N->getState()->isNull(V).isConstrainedTrue() &&
+ "The visitor only tracks the cases where V is constrained to 0");
}
-void SuppressInlineDefensiveChecksVisitor::Profile(FoldingSetNodeID &ID) const {
+void SuppressInlineDefensiveChecksVisitor::Profile(
+ llvm::FoldingSetNodeID &ID) const {
static int id = 0;
ID.AddPointer(&id);
ID.Add(V);
@@ -878,10 +1381,6 @@ SuppressInlineDefensiveChecksVisitor::VisitNode(const ExplodedNode *Succ,
if (!BugPoint)
return nullptr;
- SourceLocation BugLoc = BugPoint->getStmt()->getLocStart();
- if (BugLoc.isMacroID())
- return nullptr;
-
ProgramPoint CurPoint = Succ->getLocation();
const Stmt *CurTerminatorStmt = nullptr;
if (auto BE = CurPoint.getAs<BlockEdge>()) {
@@ -902,14 +1401,14 @@ SuppressInlineDefensiveChecksVisitor::VisitNode(const ExplodedNode *Succ,
SourceLocation TerminatorLoc = CurTerminatorStmt->getLocStart();
if (TerminatorLoc.isMacroID()) {
- const SourceManager &SMgr = BRC.getSourceManager();
- std::pair<FileID, unsigned> TLInfo = SMgr.getDecomposedLoc(TerminatorLoc);
- SrcMgr::SLocEntry SE = SMgr.getSLocEntry(TLInfo.first);
- const SrcMgr::ExpansionInfo &EInfo = SE.getExpansion();
- if (EInfo.isFunctionMacroExpansion()) {
+ SourceLocation BugLoc = BugPoint->getStmt()->getLocStart();
+
+ // Suppress reports unless we are in that same macro.
+ if (!BugLoc.isMacroID() ||
+ getMacroName(BugLoc, BRC) != getMacroName(TerminatorLoc, BRC)) {
BR.markInvalid("Suppress Macro IDC", CurLC);
- return nullptr;
}
+ return nullptr;
}
}
return nullptr;
@@ -917,8 +1416,8 @@ SuppressInlineDefensiveChecksVisitor::VisitNode(const ExplodedNode *Succ,
static const MemRegion *getLocationRegionIfReference(const Expr *E,
const ExplodedNode *N) {
- if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(E)) {
- if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
+ if (const auto *DR = dyn_cast<DeclRefExpr>(E)) {
+ if (const auto *VD = dyn_cast<VarDecl>(DR->getDecl())) {
if (!VD->getType()->isReferenceType())
return nullptr;
ProgramStateManager &StateMgr = N->getState()->getStateManager();
@@ -939,12 +1438,12 @@ static const MemRegion *getLocationRegionIfReference(const Expr *E,
static const Expr *peelOffOuterExpr(const Expr *Ex,
const ExplodedNode *N) {
Ex = Ex->IgnoreParenCasts();
- if (const ExprWithCleanups *EWC = dyn_cast<ExprWithCleanups>(Ex))
+ if (const auto *EWC = dyn_cast<ExprWithCleanups>(Ex))
return peelOffOuterExpr(EWC->getSubExpr(), N);
- if (const OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(Ex))
+ if (const auto *OVE = dyn_cast<OpaqueValueExpr>(Ex))
return peelOffOuterExpr(OVE->getSourceExpr(), N);
- if (auto *POE = dyn_cast<PseudoObjectExpr>(Ex)) {
- auto *PropRef = dyn_cast<ObjCPropertyRefExpr>(POE->getSyntacticForm());
+ if (const auto *POE = dyn_cast<PseudoObjectExpr>(Ex)) {
+ const auto *PropRef = dyn_cast<ObjCPropertyRefExpr>(POE->getSyntacticForm());
if (PropRef && PropRef->isMessagingGetter()) {
const Expr *GetterMessageSend =
POE->getSemanticExpr(POE->getNumSemanticExprs() - 1);
@@ -954,7 +1453,7 @@ static const Expr *peelOffOuterExpr(const Expr *Ex,
}
// Peel off the ternary operator.
- if (const ConditionalOperator *CO = dyn_cast<ConditionalOperator>(Ex)) {
+ if (const auto *CO = dyn_cast<ConditionalOperator>(Ex)) {
// Find a node where the branching occurred and find out which branch
// we took (true/false) by looking at the ExplodedGraph.
const ExplodedNode *NI = N;
@@ -975,6 +1474,68 @@ static const Expr *peelOffOuterExpr(const Expr *Ex,
NI = NI->getFirstPred();
} while (NI);
}
+
+ if (auto *BO = dyn_cast<BinaryOperator>(Ex))
+ if (const Expr *SubEx = peelOffPointerArithmetic(BO))
+ return peelOffOuterExpr(SubEx, N);
+
+ return Ex;
+}
+
+/// Walk through nodes until we get one that matches the statement exactly.
+/// Alternately, if we hit a known lvalue for the statement, we know we've
+/// gone too far (though we can likely track the lvalue better anyway).
+static const ExplodedNode* findNodeForStatement(const ExplodedNode *N,
+ const Stmt *S,
+ const Expr *Inner) {
+ do {
+ const ProgramPoint &pp = N->getLocation();
+ if (auto ps = pp.getAs<StmtPoint>()) {
+ if (ps->getStmt() == S || ps->getStmt() == Inner)
+ break;
+ } else if (auto CEE = pp.getAs<CallExitEnd>()) {
+ if (CEE->getCalleeContext()->getCallSite() == S ||
+ CEE->getCalleeContext()->getCallSite() == Inner)
+ break;
+ }
+ N = N->getFirstPred();
+ } while (N);
+ return N;
+}
+
+/// Find the ExplodedNode where the lvalue (the value of 'Ex')
+/// was computed.
+static const ExplodedNode* findNodeForExpression(const ExplodedNode *N,
+ const Expr *Inner) {
+ while (N) {
+ if (auto P = N->getLocation().getAs<PostStmt>()) {
+ if (P->getStmt() == Inner)
+ break;
+ }
+ N = N->getFirstPred();
+ }
+ assert(N && "Unable to find the lvalue node.");
+ return N;
+}
+
+/// Performing operator `&' on an lvalue expression is essentially a no-op.
+/// Then, if we are taking addresses of fields or elements, these are also
+/// unlikely to matter.
+static const Expr* peelOfOuterAddrOf(const Expr* Ex) {
+ Ex = Ex->IgnoreParenCasts();
+
+ // FIXME: There's a hack in our Store implementation that always computes
+ // field offsets around null pointers as if they are always equal to 0.
+ // The idea here is to report accesses to fields as null dereferences
+ // even though the pointer value that's being dereferenced is actually
+ // the offset of the field rather than exactly 0.
+ // See the FIXME in StoreManager's getLValueFieldOrIvar() method.
+ // This code interacts heavily with this hack; otherwise the value
+ // would not be null at all for most fields, so we'd be unable to track it.
+ if (const auto *Op = dyn_cast<UnaryOperator>(Ex))
+ if (Op->getOpcode() == UO_AddrOf && Op->getSubExpr()->isLValue())
+ if (const Expr *DerefEx = bugreporter::getDerefExpr(Op->getSubExpr()))
+ return DerefEx;
return Ex;
}
@@ -985,52 +1546,23 @@ bool bugreporter::trackNullOrUndefValue(const ExplodedNode *N,
if (!S || !N)
return false;
- if (const Expr *Ex = dyn_cast<Expr>(S))
+ if (const auto *Ex = dyn_cast<Expr>(S))
S = peelOffOuterExpr(Ex, N);
const Expr *Inner = nullptr;
- if (const Expr *Ex = dyn_cast<Expr>(S)) {
+ if (const auto *Ex = dyn_cast<Expr>(S)) {
+ Ex = peelOfOuterAddrOf(Ex);
Ex = Ex->IgnoreParenCasts();
- // Performing operator `&' on an lvalue expression is essentially a no-op.
- // Then, if we are taking addresses of fields or elements, these are also
- // unlikely to matter.
- // FIXME: There's a hack in our Store implementation that always computes
- // field offsets around null pointers as if they are always equal to 0.
- // The idea here is to report accesses to fields as null dereferences
- // even though the pointer value that's being dereferenced is actually
- // the offset of the field rather than exactly 0.
- // See the FIXME in StoreManager's getLValueFieldOrIvar() method.
- // This code interacts heavily with this hack; otherwise the value
- // would not be null at all for most fields, so we'd be unable to track it.
- if (const auto *Op = dyn_cast<UnaryOperator>(Ex))
- if (Op->getOpcode() == UO_AddrOf && Op->getSubExpr()->isLValue())
- if (const Expr *DerefEx = getDerefExpr(Op->getSubExpr()))
- Ex = DerefEx;
-
- if (Ex && (ExplodedGraph::isInterestingLValueExpr(Ex) || CallEvent::isCallStmt(Ex)))
+ if (Ex && (ExplodedGraph::isInterestingLValueExpr(Ex)
+ || CallEvent::isCallStmt(Ex)))
Inner = Ex;
}
if (IsArg && !Inner) {
assert(N->getLocation().getAs<CallEnter>() && "Tracking arg but not at call");
} else {
- // Walk through nodes until we get one that matches the statement exactly.
- // Alternately, if we hit a known lvalue for the statement, we know we've
- // gone too far (though we can likely track the lvalue better anyway).
- do {
- const ProgramPoint &pp = N->getLocation();
- if (Optional<StmtPoint> ps = pp.getAs<StmtPoint>()) {
- if (ps->getStmt() == S || ps->getStmt() == Inner)
- break;
- } else if (Optional<CallExitEnd> CEE = pp.getAs<CallExitEnd>()) {
- if (CEE->getCalleeContext()->getCallSite() == S ||
- CEE->getCalleeContext()->getCallSite() == Inner)
- break;
- }
- N = N->getFirstPred();
- } while (N);
-
+ N = findNodeForStatement(N, S, Inner);
if (!N)
return false;
}
@@ -1041,51 +1573,43 @@ bool bugreporter::trackNullOrUndefValue(const ExplodedNode *N,
// At this point in the path, the receiver should be live since we are at the
// message send expr. If it is nil, start tracking it.
if (const Expr *Receiver = NilReceiverBRVisitor::getNilReceiver(S, N))
- trackNullOrUndefValue(N, Receiver, report, false, EnableNullFPSuppression);
-
+ trackNullOrUndefValue(N, Receiver, report, /* IsArg=*/ false,
+ EnableNullFPSuppression);
// See if the expression we're interested refers to a variable.
// If so, we can track both its contents and constraints on its value.
if (Inner && ExplodedGraph::isInterestingLValueExpr(Inner)) {
- const MemRegion *R = nullptr;
-
- // Find the ExplodedNode where the lvalue (the value of 'Ex')
- // was computed. We need this for getting the location value.
- const ExplodedNode *LVNode = N;
- while (LVNode) {
- if (Optional<PostStmt> P = LVNode->getLocation().getAs<PostStmt>()) {
- if (P->getStmt() == Inner)
- break;
- }
- LVNode = LVNode->getFirstPred();
- }
- assert(LVNode && "Unable to find the lvalue node.");
+ const ExplodedNode *LVNode = findNodeForExpression(N, Inner);
ProgramStateRef LVState = LVNode->getState();
- SVal LVal = LVState->getSVal(Inner, LVNode->getLocationContext());
-
- if (LVState->isNull(LVal).isConstrainedTrue()) {
- // In case of C++ references, we want to differentiate between a null
- // reference and reference to null pointer.
- // If the LVal is null, check if we are dealing with null reference.
- // For those, we want to track the location of the reference.
- if (const MemRegion *RR = getLocationRegionIfReference(Inner, N))
- R = RR;
- } else {
- R = LVState->getSVal(Inner, LVNode->getLocationContext()).getAsRegion();
-
- // If this is a C++ reference to a null pointer, we are tracking the
- // pointer. In addition, we should find the store at which the reference
- // got initialized.
- if (const MemRegion *RR = getLocationRegionIfReference(Inner, N)) {
- if (Optional<KnownSVal> KV = LVal.getAs<KnownSVal>())
- report.addVisitor(llvm::make_unique<FindLastStoreBRVisitor>(
+ SVal LVal = LVNode->getSVal(Inner);
+
+ const MemRegion *RR = getLocationRegionIfReference(Inner, N);
+ bool LVIsNull = LVState->isNull(LVal).isConstrainedTrue();
+
+ // If this is a C++ reference to a null pointer, we are tracking the
+ // pointer. In addition, we should find the store at which the reference
+ // got initialized.
+ if (RR && !LVIsNull) {
+ if (auto KV = LVal.getAs<KnownSVal>())
+ report.addVisitor(llvm::make_unique<FindLastStoreBRVisitor>(
*KV, RR, EnableNullFPSuppression));
- }
}
+ // In case of C++ references, we want to differentiate between a null
+ // reference and reference to null pointer.
+ // If the LVal is null, check if we are dealing with null reference.
+ // For those, we want to track the location of the reference.
+ const MemRegion *R = (RR && LVIsNull) ? RR :
+ LVNode->getSVal(Inner).getAsRegion();
+
if (R) {
// Mark both the variable region and its contents as interesting.
SVal V = LVState->getRawSVal(loc::MemRegionVal(R));
+ report.addVisitor(
+ llvm::make_unique<NoStoreFuncVisitor>(cast<SubRegion>(R)));
+
+ MacroNullReturnSuppressionVisitor::addMacroVisitorIfNecessary(
+ N, R, EnableNullFPSuppression, report, V);
report.markInteresting(R);
report.markInteresting(V);
@@ -1094,21 +1618,21 @@ bool bugreporter::trackNullOrUndefValue(const ExplodedNode *N,
// If the contents are symbolic, find out when they became null.
if (V.getAsLocSymbol(/*IncludeBaseRegions*/ true))
report.addVisitor(llvm::make_unique<TrackConstraintBRVisitor>(
- V.castAs<DefinedSVal>(), false));
+ V.castAs<DefinedSVal>(), false));
// Add visitor, which will suppress inline defensive checks.
- if (Optional<DefinedSVal> DV = V.getAs<DefinedSVal>()) {
+ if (auto DV = V.getAs<DefinedSVal>()) {
if (!DV->isZeroConstant() && LVState->isNull(*DV).isConstrainedTrue() &&
EnableNullFPSuppression) {
report.addVisitor(
llvm::make_unique<SuppressInlineDefensiveChecksVisitor>(*DV,
- LVNode));
+ LVNode));
}
}
- if (Optional<KnownSVal> KV = V.getAs<KnownSVal>())
+ if (auto KV = V.getAs<KnownSVal>())
report.addVisitor(llvm::make_unique<FindLastStoreBRVisitor>(
- *KV, R, EnableNullFPSuppression));
+ *KV, R, EnableNullFPSuppression));
return true;
}
}
@@ -1119,7 +1643,7 @@ bool bugreporter::trackNullOrUndefValue(const ExplodedNode *N,
// If the value came from an inlined function call, we should at least make
// sure that function isn't pruned in our output.
- if (const Expr *E = dyn_cast<Expr>(S))
+ if (const auto *E = dyn_cast<Expr>(S))
S = E->IgnoreParenCasts();
ReturnVisitor::addVisitorIfNecessary(N, S, report, EnableNullFPSuppression);
@@ -1128,40 +1652,40 @@ bool bugreporter::trackNullOrUndefValue(const ExplodedNode *N,
// base value that was dereferenced.
// assert(!V.isUnknownOrUndef());
// Is it a symbolic value?
- if (Optional<loc::MemRegionVal> L = V.getAs<loc::MemRegionVal>()) {
+ if (auto L = V.getAs<loc::MemRegionVal>()) {
+ report.addVisitor(llvm::make_unique<UndefOrNullArgVisitor>(L->getRegion()));
+
// At this point we are dealing with the region's LValue.
// However, if the rvalue is a symbolic region, we should track it as well.
// Try to use the correct type when looking up the value.
SVal RVal;
- if (const Expr *E = dyn_cast<Expr>(S))
+ if (const auto *E = dyn_cast<Expr>(S))
RVal = state->getRawSVal(L.getValue(), E->getType());
else
RVal = state->getSVal(L->getRegion());
- report.addVisitor(llvm::make_unique<UndefOrNullArgVisitor>(L->getRegion()));
- if (Optional<KnownSVal> KV = RVal.getAs<KnownSVal>())
+ if (auto KV = RVal.getAs<KnownSVal>())
report.addVisitor(llvm::make_unique<FindLastStoreBRVisitor>(
- *KV, L->getRegion(), EnableNullFPSuppression));
+ *KV, L->getRegion(), EnableNullFPSuppression));
const MemRegion *RegionRVal = RVal.getAsRegion();
if (RegionRVal && isa<SymbolicRegion>(RegionRVal)) {
report.markInteresting(RegionRVal);
report.addVisitor(llvm::make_unique<TrackConstraintBRVisitor>(
- loc::MemRegionVal(RegionRVal), false));
+ loc::MemRegionVal(RegionRVal), false));
}
}
-
return true;
}
const Expr *NilReceiverBRVisitor::getNilReceiver(const Stmt *S,
const ExplodedNode *N) {
- const ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(S);
+ const auto *ME = dyn_cast<ObjCMessageExpr>(S);
if (!ME)
return nullptr;
if (const Expr *Receiver = ME->getInstanceReceiver()) {
ProgramStateRef state = N->getState();
- SVal V = state->getSVal(Receiver, N->getLocationContext());
+ SVal V = N->getSVal(Receiver);
if (state->isNull(V).isConstrainedTrue())
return Receiver;
}
@@ -1184,7 +1708,7 @@ NilReceiverBRVisitor::VisitNode(const ExplodedNode *N,
llvm::SmallString<256> Buf;
llvm::raw_svector_ostream OS(Buf);
- if (const ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(S)) {
+ if (const auto *ME = dyn_cast<ObjCMessageExpr>(S)) {
OS << "'";
ME->getSelector().print(OS);
OS << "' not called";
@@ -1217,16 +1741,15 @@ void FindLastStoreBRVisitor::registerStatementVarDecls(BugReport &BR,
const Stmt *Head = WorkList.front();
WorkList.pop_front();
- ProgramStateRef state = N->getState();
- ProgramStateManager &StateMgr = state->getStateManager();
+ ProgramStateManager &StateMgr = N->getState()->getStateManager();
- if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Head)) {
- if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
+ if (const auto *DR = dyn_cast<DeclRefExpr>(Head)) {
+ if (const auto *VD = dyn_cast<VarDecl>(DR->getDecl())) {
const VarRegion *R =
StateMgr.getRegionManager().getVarRegion(VD, N->getLocationContext());
// What did we load?
- SVal V = state->getSVal(S, N->getLocationContext());
+ SVal V = N->getSVal(S);
if (V.getAs<loc::ConcreteInt>() || V.getAs<nonloc::ConcreteInt>()) {
// Register a new visitor with the BugReport.
@@ -1267,7 +1790,6 @@ std::shared_ptr<PathDiagnosticPiece>
ConditionBRVisitor::VisitNodeImpl(const ExplodedNode *N,
const ExplodedNode *Prev,
BugReporterContext &BRC, BugReport &BR) {
-
ProgramPoint progPoint = N->getLocation();
ProgramStateRef CurrentState = N->getState();
ProgramStateRef PrevState = Prev->getState();
@@ -1289,11 +1811,8 @@ ConditionBRVisitor::VisitNodeImpl(const ExplodedNode *N,
}
if (Optional<PostStmt> PS = progPoint.getAs<PostStmt>()) {
- // FIXME: Assuming that BugReporter is a GRBugReporter is a layering
- // violation.
const std::pair<const ProgramPointTag *, const ProgramPointTag *> &tags =
- cast<GRBugReporter>(BRC.getBugReporter()).
- getEngine().geteagerlyAssumeBinOpBifurcationTags();
+ ExprEngine::geteagerlyAssumeBinOpBifurcationTags();
const ProgramPointTag *tag = PS->getTag();
if (tag == tags.first)
@@ -1389,7 +1908,7 @@ ConditionBRVisitor::VisitTrueTest(const Expr *Cond, bool tookTrue,
return P;
break;
case Stmt::UnaryOperatorClass: {
- const UnaryOperator *UO = cast<UnaryOperator>(CondTmp);
+ const auto *UO = cast<UnaryOperator>(CondTmp);
if (UO->getOpcode() == UO_LNot) {
tookTrueTmp = !tookTrueTmp;
CondTmp = UO->getSubExpr();
@@ -1432,7 +1951,6 @@ bool ConditionBRVisitor::patternMatch(const Expr *Ex,
isa<CXXBoolLiteralExpr>(Ex) ||
isa<IntegerLiteral>(Ex) ||
isa<FloatingLiteral>(Ex))) {
-
StringRef StartName = Lexer::getImmediateMacroNameForDiagnostics(LocStart,
BRC.getSourceManager(), BRC.getASTContext().getLangOpts());
StringRef EndName = Lexer::getImmediateMacroNameForDiagnostics(LocEnd,
@@ -1463,7 +1981,7 @@ bool ConditionBRVisitor::patternMatch(const Expr *Ex,
}
}
- if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Ex)) {
+ if (const auto *DR = dyn_cast<DeclRefExpr>(Ex)) {
const bool quotes = isa<VarDecl>(DR->getDecl());
if (quotes) {
Out << '\'';
@@ -1487,7 +2005,7 @@ bool ConditionBRVisitor::patternMatch(const Expr *Ex,
return quotes;
}
- if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(Ex)) {
+ if (const auto *IL = dyn_cast<IntegerLiteral>(Ex)) {
QualType OriginalTy = OriginalExpr->getType();
if (OriginalTy->isPointerType()) {
if (IL->getValue() == 0) {
@@ -1513,7 +2031,6 @@ std::shared_ptr<PathDiagnosticPiece>
ConditionBRVisitor::VisitTrueTest(const Expr *Cond, const BinaryOperator *BExpr,
const bool tookTrue, BugReporterContext &BRC,
BugReport &R, const ExplodedNode *N) {
-
bool shouldInvert = false;
Optional<bool> shouldPrune;
@@ -1618,8 +2135,8 @@ std::shared_ptr<PathDiagnosticPiece> ConditionBRVisitor::VisitConditionVariable(
PathDiagnosticLocation Loc(CondVarExpr, BRC.getSourceManager(), LCtx);
auto event = std::make_shared<PathDiagnosticEventPiece>(Loc, Out.str());
- if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(CondVarExpr)) {
- if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
+ if (const auto *DR = dyn_cast<DeclRefExpr>(CondVarExpr)) {
+ if (const auto *VD = dyn_cast<VarDecl>(DR->getDecl())) {
const ProgramState *state = N->getState().get();
if (const MemRegion *R = state->getLValue(VD, LCtx).getAsRegion()) {
if (report.isInteresting(R))
@@ -1635,8 +2152,7 @@ std::shared_ptr<PathDiagnosticPiece>
ConditionBRVisitor::VisitTrueTest(const Expr *Cond, const DeclRefExpr *DR,
const bool tookTrue, BugReporterContext &BRC,
BugReport &report, const ExplodedNode *N) {
-
- const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl());
+ const auto *VD = dyn_cast<VarDecl>(DR->getDecl());
if (!VD)
return nullptr;
@@ -1684,14 +2200,11 @@ bool ConditionBRVisitor::isPieceMessageGeneric(
Piece->getString() == GenericFalseMessage;
}
-std::unique_ptr<PathDiagnosticPiece>
-LikelyFalsePositiveSuppressionBRVisitor::getEndPath(BugReporterContext &BRC,
- const ExplodedNode *N,
- BugReport &BR) {
+void LikelyFalsePositiveSuppressionBRVisitor::finalizeVisitor(
+ BugReporterContext &BRC, const ExplodedNode *N, BugReport &BR) {
// Here we suppress false positives coming from system headers. This list is
// based on known issues.
- ExprEngine &Eng = BRC.getBugReporter().getEngine();
- AnalyzerOptions &Options = Eng.getAnalysisManager().options;
+ AnalyzerOptions &Options = BRC.getAnalyzerOptions();
const Decl *D = N->getLocationContext()->getDecl();
if (AnalysisDeclContext::isInStdNamespace(D)) {
@@ -1701,8 +2214,7 @@ LikelyFalsePositiveSuppressionBRVisitor::getEndPath(BugReporterContext &BRC,
// TR1, Boost, or llvm/ADT.
if (Options.shouldSuppressFromCXXStandardLibrary()) {
BR.markInvalid(getTag(), nullptr);
- return nullptr;
-
+ return;
} else {
// If the complete 'std' suppression is not enabled, suppress reports
// from the 'std' namespace that are known to produce false positives.
@@ -1710,27 +2222,27 @@ LikelyFalsePositiveSuppressionBRVisitor::getEndPath(BugReporterContext &BRC,
// The analyzer issues a false use-after-free when std::list::pop_front
// or std::list::pop_back are called multiple times because we cannot
// reason about the internal invariants of the data structure.
- if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(D)) {
+ if (const auto *MD = dyn_cast<CXXMethodDecl>(D)) {
const CXXRecordDecl *CD = MD->getParent();
if (CD->getName() == "list") {
BR.markInvalid(getTag(), nullptr);
- return nullptr;
+ return;
}
}
// The analyzer issues a false positive when the constructor of
// std::__independent_bits_engine from algorithms is used.
- if (const CXXConstructorDecl *MD = dyn_cast<CXXConstructorDecl>(D)) {
+ if (const auto *MD = dyn_cast<CXXConstructorDecl>(D)) {
const CXXRecordDecl *CD = MD->getParent();
if (CD->getName() == "__independent_bits_engine") {
BR.markInvalid(getTag(), nullptr);
- return nullptr;
+ return;
}
}
for (const LocationContext *LCtx = N->getLocationContext(); LCtx;
LCtx = LCtx->getParent()) {
- const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(LCtx->getDecl());
+ const auto *MD = dyn_cast<CXXMethodDecl>(LCtx->getDecl());
if (!MD)
continue;
@@ -1743,7 +2255,7 @@ LikelyFalsePositiveSuppressionBRVisitor::getEndPath(BugReporterContext &BRC,
// data structure.
if (CD->getName() == "basic_string") {
BR.markInvalid(getTag(), nullptr);
- return nullptr;
+ return;
}
// The analyzer issues a false positive on
@@ -1751,7 +2263,7 @@ LikelyFalsePositiveSuppressionBRVisitor::getEndPath(BugReporterContext &BRC,
// because it does not reason properly about temporary destructors.
if (CD->getName() == "shared_ptr") {
BR.markInvalid(getTag(), nullptr);
- return nullptr;
+ return;
}
}
}
@@ -1765,18 +2277,15 @@ LikelyFalsePositiveSuppressionBRVisitor::getEndPath(BugReporterContext &BRC,
Loc = Loc.getSpellingLoc();
if (SM.getFilename(Loc).endswith("sys/queue.h")) {
BR.markInvalid(getTag(), nullptr);
- return nullptr;
+ return;
}
}
-
- return nullptr;
}
std::shared_ptr<PathDiagnosticPiece>
UndefOrNullArgVisitor::VisitNode(const ExplodedNode *N,
const ExplodedNode *PrevN,
BugReporterContext &BRC, BugReport &BR) {
-
ProgramStateRef State = N->getState();
ProgramPoint ProgLoc = N->getLocation();
@@ -1789,18 +2298,17 @@ UndefOrNullArgVisitor::VisitNode(const ExplodedNode *N,
CallEventManager &CEMgr = BRC.getStateManager().getCallEventManager();
CallEventRef<> Call = CEMgr.getCaller(CEnter->getCalleeContext(), State);
unsigned Idx = 0;
- ArrayRef<ParmVarDecl*> parms = Call->parameters();
+ ArrayRef<ParmVarDecl *> parms = Call->parameters();
- for (ArrayRef<ParmVarDecl*>::iterator I = parms.begin(), E = parms.end();
- I != E; ++I, ++Idx) {
+ for (const auto ParamDecl : parms) {
const MemRegion *ArgReg = Call->getArgSVal(Idx).getAsRegion();
+ ++Idx;
// Are we tracking the argument or its subregion?
- if ( !ArgReg || (ArgReg != R && !R->isSubRegionOf(ArgReg->StripCasts())))
+ if ( !ArgReg || !R->isSubRegionOf(ArgReg->StripCasts()))
continue;
// Check the function parameter type.
- const ParmVarDecl *ParamDecl = *I;
assert(ParamDecl && "Formal parameter has no decl?");
QualType T = ParamDecl->getType();
@@ -1832,7 +2340,7 @@ CXXSelfAssignmentBRVisitor::VisitNode(const ExplodedNode *Succ,
if (Satisfied)
return nullptr;
- auto Edge = Succ->getLocation().getAs<BlockEdge>();
+ const auto Edge = Succ->getLocation().getAs<BlockEdge>();
if (!Edge.hasValue())
return nullptr;
@@ -1859,7 +2367,7 @@ CXXSelfAssignmentBRVisitor::VisitNode(const ExplodedNode *Succ,
const auto Param =
State->getSVal(State->getRegion(Met->getParamDecl(0), LCtx));
const auto This =
- State->getSVal(SVB.getCXXThis(Met, LCtx->getCurrentStackFrame()));
+ State->getSVal(SVB.getCXXThis(Met, LCtx->getStackFrame()));
auto L = PathDiagnosticLocation::create(Met, BRC.getSourceManager());
@@ -1877,3 +2385,82 @@ CXXSelfAssignmentBRVisitor::VisitNode(const ExplodedNode *Succ,
return std::move(Piece);
}
+
+std::shared_ptr<PathDiagnosticPiece>
+TaintBugVisitor::VisitNode(const ExplodedNode *N, const ExplodedNode *PrevN,
+ BugReporterContext &BRC, BugReport &BR) {
+
+ // Find the ExplodedNode where the taint was first introduced
+ if (!N->getState()->isTainted(V) || PrevN->getState()->isTainted(V))
+ return nullptr;
+
+ const Stmt *S = PathDiagnosticLocation::getStmt(N);
+ if (!S)
+ return nullptr;
+
+ const LocationContext *NCtx = N->getLocationContext();
+ PathDiagnosticLocation L =
+ PathDiagnosticLocation::createBegin(S, BRC.getSourceManager(), NCtx);
+ if (!L.isValid() || !L.asLocation().isValid())
+ return nullptr;
+
+ return std::make_shared<PathDiagnosticEventPiece>(L, "Taint originated here");
+}
+
+FalsePositiveRefutationBRVisitor::FalsePositiveRefutationBRVisitor()
+ : Constraints(ConstraintRangeTy::Factory().getEmptyMap()) {}
+
+void FalsePositiveRefutationBRVisitor::finalizeVisitor(
+ BugReporterContext &BRC, const ExplodedNode *EndPathNode, BugReport &BR) {
+ // Collect new constraints
+ VisitNode(EndPathNode, nullptr, BRC, BR);
+
+ // Create a refutation manager
+ std::unique_ptr<SMTSolver> RefutationSolver = CreateZ3Solver();
+ ASTContext &Ctx = BRC.getASTContext();
+
+ // Add constraints to the solver
+ for (const auto &I : Constraints) {
+ SymbolRef Sym = I.first;
+
+ SMTExprRef Constraints = RefutationSolver->fromBoolean(false);
+ for (const auto &Range : I.second) {
+ Constraints = RefutationSolver->mkOr(
+ Constraints,
+ RefutationSolver->getRangeExpr(Ctx, Sym, Range.From(), Range.To(),
+ /*InRange=*/true));
+ }
+ RefutationSolver->addConstraint(Constraints);
+ }
+
+ // And check for satisfiability
+ if (RefutationSolver->check().isConstrainedFalse())
+ BR.markInvalid("Infeasible constraints", EndPathNode->getLocationContext());
+}
+
+std::shared_ptr<PathDiagnosticPiece>
+FalsePositiveRefutationBRVisitor::VisitNode(const ExplodedNode *N,
+ const ExplodedNode *PrevN,
+ BugReporterContext &BRC,
+ BugReport &BR) {
+ // Collect new constraints
+ const ConstraintRangeTy &NewCs = N->getState()->get<ConstraintRange>();
+ ConstraintRangeTy::Factory &CF =
+ N->getState()->get_context<ConstraintRange>();
+
+ // Add constraints if we don't have them yet
+ for (auto const &C : NewCs) {
+ const SymbolRef &Sym = C.first;
+ if (!Constraints.contains(Sym)) {
+ Constraints = CF.add(Constraints, Sym, C.second);
+ }
+ }
+
+ return nullptr;
+}
+
+void FalsePositiveRefutationBRVisitor::Profile(
+ llvm::FoldingSetNodeID &ID) const {
+ static int Tag = 0;
+ ID.AddPointer(&Tag);
+}
diff --git a/lib/StaticAnalyzer/Core/CMakeLists.txt b/lib/StaticAnalyzer/Core/CMakeLists.txt
index 5ac4f942f373..de994b598e59 100644
--- a/lib/StaticAnalyzer/Core/CMakeLists.txt
+++ b/lib/StaticAnalyzer/Core/CMakeLists.txt
@@ -13,7 +13,6 @@ add_clang_library(clangStaticAnalyzerCore
AnalyzerOptions.cpp
BasicValueFactory.cpp
BlockCounter.cpp
- IssueHash.cpp
BugReporter.cpp
BugReporterVisitors.cpp
CallEvent.cpp
@@ -35,6 +34,7 @@ add_clang_library(clangStaticAnalyzerCore
ExprEngineObjC.cpp
FunctionSummary.cpp
HTMLDiagnostics.cpp
+ IssueHash.cpp
LoopUnrolling.cpp
LoopWidening.cpp
MemRegion.cpp
@@ -48,9 +48,11 @@ add_clang_library(clangStaticAnalyzerCore
SVals.cpp
SimpleConstraintManager.cpp
SimpleSValBuilder.cpp
+ SMTConstraintManager.cpp
Store.cpp
SubEngine.cpp
SymbolManager.cpp
+ WorkList.cpp
Z3ConstraintManager.cpp
LINK_LIBS
@@ -58,6 +60,7 @@ add_clang_library(clangStaticAnalyzerCore
clangASTMatchers
clangAnalysis
clangBasic
+ clangCrossTU
clangLex
clangRewrite
${Z3_LINK_FILES}
diff --git a/lib/StaticAnalyzer/Core/CallEvent.cpp b/lib/StaticAnalyzer/Core/CallEvent.cpp
index 776369be9dba..8db7b06f186d 100644
--- a/lib/StaticAnalyzer/Core/CallEvent.cpp
+++ b/lib/StaticAnalyzer/Core/CallEvent.cpp
@@ -1,4 +1,4 @@
-//===- Calls.cpp - Wrapper for all function and method calls ------*- C++ -*--//
+//===- CallEvent.cpp - Wrapper for all function and method calls ----------===//
//
// The LLVM Compiler Infrastructure
//
@@ -14,14 +14,52 @@
//===----------------------------------------------------------------------===//
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclBase.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
#include "clang/AST/ParentMap.h"
+#include "clang/AST/Stmt.h"
+#include "clang/AST/Type.h"
+#include "clang/Analysis/AnalysisDeclContext.h"
+#include "clang/Analysis/CFG.h"
#include "clang/Analysis/ProgramPoint.h"
+#include "clang/CrossTU/CrossTranslationUnit.h"
+#include "clang/Basic/IdentifierTable.h"
+#include "clang/Basic/LLVM.h"
+#include "clang/Basic/SourceLocation.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/Specifiers.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicTypeInfo.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicTypeMap.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState_Fwd.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/Store.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
-#include "llvm/Support/raw_ostream.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cassert>
+#include <utility>
#define DEBUG_TYPE "static-analyzer-call-event"
@@ -29,11 +67,13 @@ using namespace clang;
using namespace ento;
QualType CallEvent::getResultType() const {
+ ASTContext &Ctx = getState()->getStateManager().getContext();
const Expr *E = getOriginExpr();
- assert(E && "Calls without origin expressions do not have results");
- QualType ResultTy = E->getType();
+ if (!E)
+ return Ctx.VoidTy;
+ assert(E);
- ASTContext &Ctx = getState()->getStateManager().getContext();
+ QualType ResultTy = E->getType();
// A function that returns a reference to 'int' will have a result type
// of simply 'int'. Check the origin expr's value kind to recover the
@@ -78,7 +118,7 @@ static bool isCallback(QualType T) {
}
static bool isVoidPointerToNonConst(QualType T) {
- if (const PointerType *PT = T->getAs<PointerType>()) {
+ if (const auto *PT = T->getAs<PointerType>()) {
QualType PointeeTy = PT->getPointeeType();
if (PointeeTy.isConstQualified())
return false;
@@ -119,14 +159,14 @@ bool CallEvent::hasVoidPointerToNonConstArg() const {
}
bool CallEvent::isGlobalCFunction(StringRef FunctionName) const {
- const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(getDecl());
+ const auto *FD = dyn_cast_or_null<FunctionDecl>(getDecl());
if (!FD)
return false;
return CheckerContext::isCLibraryFunction(FD, FunctionName);
}
-/// \brief Returns true if a type is a pointer-to-const or reference-to-const
+/// Returns true if a type is a pointer-to-const or reference-to-const
/// with no further indirection.
static bool isPointerToConst(QualType Ty) {
QualType PointeeTy = Ty->getPointeeType();
@@ -235,7 +275,7 @@ SVal CallEvent::getArgSVal(unsigned Index) const {
SourceRange CallEvent::getArgSourceRange(unsigned Index) const {
const Expr *ArgE = getArgExpr(Index);
if (!ArgE)
- return SourceRange();
+ return {};
return ArgE->getSourceRange();
}
@@ -266,7 +306,6 @@ void CallEvent::dump(raw_ostream &Out) const {
Out << "Unknown call (type " << getKind() << ")";
}
-
bool CallEvent::isCallStmt(const Stmt *S) {
return isa<CallExpr>(S) || isa<ObjCMessageExpr>(S)
|| isa<CXXConstructExpr>(S)
@@ -275,11 +314,11 @@ bool CallEvent::isCallStmt(const Stmt *S) {
QualType CallEvent::getDeclaredResultType(const Decl *D) {
assert(D);
- if (const FunctionDecl* FD = dyn_cast<FunctionDecl>(D))
+ if (const auto *FD = dyn_cast<FunctionDecl>(D))
return FD->getReturnType();
- if (const ObjCMethodDecl* MD = dyn_cast<ObjCMethodDecl>(D))
+ if (const auto *MD = dyn_cast<ObjCMethodDecl>(D))
return MD->getReturnType();
- if (const BlockDecl *BD = dyn_cast<BlockDecl>(D)) {
+ if (const auto *BD = dyn_cast<BlockDecl>(D)) {
// Blocks are difficult because the return type may not be stored in the
// BlockDecl itself. The AST should probably be enhanced, but for now we
// just do what we can.
@@ -296,7 +335,7 @@ QualType CallEvent::getDeclaredResultType(const Decl *D) {
return Ty;
}
- return QualType();
+ return {};
}
llvm_unreachable("unknown callable kind");
@@ -305,11 +344,11 @@ QualType CallEvent::getDeclaredResultType(const Decl *D) {
bool CallEvent::isVariadic(const Decl *D) {
assert(D);
- if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
+ if (const auto *FD = dyn_cast<FunctionDecl>(D))
return FD->isVariadic();
- if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D))
+ if (const auto *MD = dyn_cast<ObjCMethodDecl>(D))
return MD->isVariadic();
- if (const BlockDecl *BD = dyn_cast<BlockDecl>(D))
+ if (const auto *BD = dyn_cast<BlockDecl>(D))
return BD->isVariadic();
llvm_unreachable("unknown callable kind");
@@ -350,32 +389,53 @@ ArrayRef<ParmVarDecl*> AnyFunctionCall::parameters() const {
RuntimeDefinition AnyFunctionCall::getRuntimeDefinition() const {
const FunctionDecl *FD = getDecl();
+ if (!FD)
+ return {};
+
// Note that the AnalysisDeclContext will have the FunctionDecl with
// the definition (if one exists).
- if (FD) {
- AnalysisDeclContext *AD =
- getLocationContext()->getAnalysisDeclContext()->
- getManager()->getContext(FD);
- bool IsAutosynthesized;
- Stmt* Body = AD->getBody(IsAutosynthesized);
- DEBUG({
- if (IsAutosynthesized)
- llvm::dbgs() << "Using autosynthesized body for " << FD->getName()
- << "\n";
- });
- if (Body) {
- const Decl* Decl = AD->getDecl();
- return RuntimeDefinition(Decl);
- }
+ AnalysisDeclContext *AD =
+ getLocationContext()->getAnalysisDeclContext()->
+ getManager()->getContext(FD);
+ bool IsAutosynthesized;
+ Stmt* Body = AD->getBody(IsAutosynthesized);
+ LLVM_DEBUG({
+ if (IsAutosynthesized)
+ llvm::dbgs() << "Using autosynthesized body for " << FD->getName()
+ << "\n";
+ });
+ if (Body) {
+ const Decl* Decl = AD->getDecl();
+ return RuntimeDefinition(Decl);
+ }
+
+ SubEngine *Engine = getState()->getStateManager().getOwningEngine();
+ AnalyzerOptions &Opts = Engine->getAnalysisManager().options;
+
+ // Try to get CTU definition only if CTUDir is provided.
+ if (!Opts.naiveCTUEnabled())
+ return {};
+
+ cross_tu::CrossTranslationUnitContext &CTUCtx =
+ *Engine->getCrossTranslationUnitContext();
+ llvm::Expected<const FunctionDecl *> CTUDeclOrError =
+ CTUCtx.getCrossTUDefinition(FD, Opts.getCTUDir(), Opts.getCTUIndexName());
+
+ if (!CTUDeclOrError) {
+ handleAllErrors(CTUDeclOrError.takeError(),
+ [&](const cross_tu::IndexError &IE) {
+ CTUCtx.emitCrossTUDiagnostics(IE);
+ });
+ return {};
}
- return RuntimeDefinition();
+ return RuntimeDefinition(*CTUDeclOrError);
}
void AnyFunctionCall::getInitialStackFrameContents(
const StackFrameContext *CalleeCtx,
BindingsTy &Bindings) const {
- const FunctionDecl *D = cast<FunctionDecl>(CalleeCtx->getDecl());
+ const auto *D = cast<FunctionDecl>(CalleeCtx->getDecl());
SValBuilder &SVB = getState()->getStateManager().getSValBuilder();
addParameterValuesToBindings(CalleeCtx, Bindings, SVB, *this,
D->parameters());
@@ -442,7 +502,6 @@ bool AnyFunctionCall::argumentsMayEscape() const {
return false;
}
-
const FunctionDecl *SimpleFunctionCall::getDecl() const {
const FunctionDecl *D = getOriginExpr()->getDirectCallee();
if (D)
@@ -451,9 +510,8 @@ const FunctionDecl *SimpleFunctionCall::getDecl() const {
return getSVal(getOriginExpr()->getCallee()).getAsFunctionDecl();
}
-
const FunctionDecl *CXXInstanceCall::getDecl() const {
- const CallExpr *CE = cast_or_null<CallExpr>(getOriginExpr());
+ const auto *CE = cast_or_null<CallExpr>(getOriginExpr());
if (!CE)
return AnyFunctionCall::getDecl();
@@ -470,15 +528,20 @@ void CXXInstanceCall::getExtraInvalidatedValues(
Values.push_back(ThisVal);
// Don't invalidate if the method is const and there are no mutable fields.
- if (const CXXMethodDecl *D = cast_or_null<CXXMethodDecl>(getDecl())) {
+ if (const auto *D = cast_or_null<CXXMethodDecl>(getDecl())) {
if (!D->isConst())
return;
// Get the record decl for the class of 'This'. D->getParent() may return a
// base class decl, rather than the class of the instance which needs to be
// checked for mutable fields.
+ // TODO: We might as well look at the dynamic type of the object.
const Expr *Ex = getCXXThisExpr()->ignoreParenBaseCasts();
- const CXXRecordDecl *ParentRecord = Ex->getType()->getAsCXXRecordDecl();
- if (!ParentRecord || ParentRecord->hasMutableFields())
+ QualType T = Ex->getType();
+ if (T->isPointerType()) // Arrow or implicit-this syntax?
+ T = T->getPointeeType();
+ const CXXRecordDecl *ParentRecord = T->getAsCXXRecordDecl();
+ assert(ParentRecord);
+ if (ParentRecord->hasMutableFields())
return;
// Preserve CXXThis.
const MemRegion *ThisRegion = ThisVal.getAsRegion();
@@ -501,27 +564,26 @@ SVal CXXInstanceCall::getCXXThisVal() const {
return ThisVal;
}
-
RuntimeDefinition CXXInstanceCall::getRuntimeDefinition() const {
// Do we have a decl at all?
const Decl *D = getDecl();
if (!D)
- return RuntimeDefinition();
+ return {};
// If the method is non-virtual, we know we can inline it.
- const CXXMethodDecl *MD = cast<CXXMethodDecl>(D);
+ const auto *MD = cast<CXXMethodDecl>(D);
if (!MD->isVirtual())
return AnyFunctionCall::getRuntimeDefinition();
// Do we know the implicit 'this' object being called?
const MemRegion *R = getCXXThisVal().getAsRegion();
if (!R)
- return RuntimeDefinition();
+ return {};
// Do we know anything about the type of 'this'?
DynamicTypeInfo DynType = getDynamicTypeInfo(getState(), R);
if (!DynType.isValid())
- return RuntimeDefinition();
+ return {};
// Is the type a C++ class? (This is mostly a defensive check.)
QualType RegionType = DynType.getType()->getPointeeType();
@@ -529,7 +591,7 @@ RuntimeDefinition CXXInstanceCall::getRuntimeDefinition() const {
const CXXRecordDecl *RD = RegionType->getAsCXXRecordDecl();
if (!RD || !RD->hasDefinition())
- return RuntimeDefinition();
+ return {};
// Find the decl for this method in that class.
const CXXMethodDecl *Result = MD->getCorrespondingMethodInClass(RD, true);
@@ -547,13 +609,13 @@ RuntimeDefinition CXXInstanceCall::getRuntimeDefinition() const {
// this is fixed. <rdar://problem/12287087>
//assert(!MD->getParent()->isDerivedFrom(RD) && "Bad DynamicTypeInfo");
- return RuntimeDefinition();
+ return {};
}
// Does the decl that we found have an implementation?
const FunctionDecl *Definition;
if (!Result->hasBody(Definition))
- return RuntimeDefinition();
+ return {};
// We found a definition. If we're not sure that this devirtualization is
// actually what will happen at runtime, make sure to provide the region so
@@ -574,7 +636,7 @@ void CXXInstanceCall::getInitialStackFrameContents(
ProgramStateManager &StateMgr = getState()->getStateManager();
SValBuilder &SVB = StateMgr.getSValBuilder();
- const CXXMethodDecl *MD = cast<CXXMethodDecl>(CalleeCtx->getDecl());
+ const auto *MD = cast<CXXMethodDecl>(CalleeCtx->getDecl());
Loc ThisLoc = SVB.getCXXThis(MD, CalleeCtx);
// If we devirtualized to a different member function, we need to make sure
@@ -587,7 +649,15 @@ void CXXInstanceCall::getInitialStackFrameContents(
// FIXME: CallEvent maybe shouldn't be directly accessing StoreManager.
bool Failed;
ThisVal = StateMgr.getStoreManager().attemptDownCast(ThisVal, Ty, Failed);
- assert(!Failed && "Calling an incorrectly devirtualized method");
+ if (Failed) {
+ // We might have suffered some sort of placement new earlier, so
+ // we're constructing in a completely unexpected storage.
+ // Fall back to a generic pointer cast for this-value.
+ const CXXMethodDecl *StaticMD = cast<CXXMethodDecl>(getDecl());
+ const CXXRecordDecl *StaticClass = StaticMD->getParent();
+ QualType StaticTy = Ctx.getPointerType(Ctx.getRecordType(StaticClass));
+ ThisVal = SVB.evalCast(ThisVal, Ty, StaticTy);
+ }
}
if (!ThisVal.isUnknown())
@@ -595,8 +665,6 @@ void CXXInstanceCall::getInitialStackFrameContents(
}
}
-
-
const Expr *CXXMemberCall::getCXXThisExpr() const {
return getOriginExpr()->getImplicitObjectArgument();
}
@@ -606,19 +674,17 @@ RuntimeDefinition CXXMemberCall::getRuntimeDefinition() const {
// id-expression in the class member access expression is a qualified-id,
// that function is called. Otherwise, its final overrider in the dynamic type
// of the object expression is called.
- if (const MemberExpr *ME = dyn_cast<MemberExpr>(getOriginExpr()->getCallee()))
+ if (const auto *ME = dyn_cast<MemberExpr>(getOriginExpr()->getCallee()))
if (ME->hasQualifier())
return AnyFunctionCall::getRuntimeDefinition();
return CXXInstanceCall::getRuntimeDefinition();
}
-
const Expr *CXXMemberOperatorCall::getCXXThisExpr() const {
return getOriginExpr()->getArg(0);
}
-
const BlockDataRegion *BlockCall::getBlockRegion() const {
const Expr *Callee = getOriginExpr()->getCallee();
const MemRegion *DataReg = getSVal(Callee).getAsRegion();
@@ -663,7 +729,6 @@ void BlockCall::getInitialStackFrameContents(const StackFrameContext *CalleeCtx,
Params);
}
-
SVal CXXConstructorCall::getCXXThisVal() const {
if (Data)
return loc::MemRegionVal(static_cast<const MemRegion *>(Data));
@@ -672,8 +737,13 @@ SVal CXXConstructorCall::getCXXThisVal() const {
void CXXConstructorCall::getExtraInvalidatedValues(ValueList &Values,
RegionAndSymbolInvalidationTraits *ETraits) const {
- if (Data)
- Values.push_back(loc::MemRegionVal(static_cast<const MemRegion *>(Data)));
+ if (Data) {
+ loc::MemRegionVal MV(static_cast<const MemRegion *>(Data));
+ if (SymbolRef Sym = MV.getAsSymbol(true))
+ ETraits->setTrait(Sym,
+ RegionAndSymbolInvalidationTraits::TK_SuppressEscape);
+ Values.push_back(MV);
+ }
}
void CXXConstructorCall::getInitialStackFrameContents(
@@ -684,7 +754,7 @@ void CXXConstructorCall::getInitialStackFrameContents(
SVal ThisVal = getCXXThisVal();
if (!ThisVal.isUnknown()) {
SValBuilder &SVB = getState()->getStateManager().getSValBuilder();
- const CXXMethodDecl *MD = cast<CXXMethodDecl>(CalleeCtx->getDecl());
+ const auto *MD = cast<CXXMethodDecl>(CalleeCtx->getDecl());
Loc ThisLoc = SVB.getCXXThis(MD, CalleeCtx);
Bindings.push_back(std::make_pair(ThisLoc, ThisVal));
}
@@ -785,7 +855,7 @@ SourceRange ObjCMethodCall::getSourceRange() const {
llvm_unreachable("unknown message kind");
}
-typedef llvm::PointerIntPair<const PseudoObjectExpr *, 2> ObjCMessageDataTy;
+using ObjCMessageDataTy = llvm::PointerIntPair<const PseudoObjectExpr *, 2>;
const PseudoObjectExpr *ObjCMethodCall::getContainingPseudoObjectExpr() const {
assert(Data && "Lazy lookup not yet performed.");
@@ -799,7 +869,7 @@ getSyntacticFromForPseudoObjectExpr(const PseudoObjectExpr *POE) {
// This handles the funny case of assigning to the result of a getter.
// This can happen if the getter returns a non-const reference.
- if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(Syntactic))
+ if (const auto *BO = dyn_cast<BinaryOperator>(Syntactic))
Syntactic = BO->getLHS();
return Syntactic;
@@ -807,13 +877,12 @@ getSyntacticFromForPseudoObjectExpr(const PseudoObjectExpr *POE) {
ObjCMessageKind ObjCMethodCall::getMessageKind() const {
if (!Data) {
-
// Find the parent, ignoring implicit casts.
ParentMap &PM = getLocationContext()->getParentMap();
const Stmt *S = PM.getParentIgnoreParenCasts(getOriginExpr());
// Check if parent is a PseudoObjectExpr.
- if (const PseudoObjectExpr *POE = dyn_cast_or_null<PseudoObjectExpr>(S)) {
+ if (const auto *POE = dyn_cast_or_null<PseudoObjectExpr>(S)) {
const Expr *Syntactic = getSyntacticFromForPseudoObjectExpr(POE);
ObjCMessageKind K;
@@ -875,15 +944,14 @@ const ObjCPropertyDecl *ObjCMethodCall::getAccessedProperty() const {
bool ObjCMethodCall::canBeOverridenInSubclass(ObjCInterfaceDecl *IDecl,
Selector Sel) const {
assert(IDecl);
- const SourceManager &SM =
- getState()->getStateManager().getContext().getSourceManager();
-
+ AnalysisManager &AMgr =
+ getState()->getStateManager().getOwningEngine()->getAnalysisManager();
// If the class interface is declared inside the main file, assume it is not
// subcassed.
// TODO: It could actually be subclassed if the subclass is private as well.
// This is probably very rare.
SourceLocation InterfLoc = IDecl->getEndOfDefinitionLoc();
- if (InterfLoc.isValid() && SM.isInMainFile(InterfLoc))
+ if (InterfLoc.isValid() && AMgr.isInCodeFile(InterfLoc))
return false;
// Assume that property accessors are not overridden.
@@ -905,7 +973,7 @@ bool ObjCMethodCall::canBeOverridenInSubclass(ObjCInterfaceDecl *IDecl,
return false;
// If outside the main file,
- if (D->getLocation().isValid() && !SM.isInMainFile(D->getLocation()))
+ if (D->getLocation().isValid() && !AMgr.isInCodeFile(D->getLocation()))
return true;
if (D->isOverriding()) {
@@ -965,7 +1033,6 @@ RuntimeDefinition ObjCMethodCall::getRuntimeDefinition() const {
Selector Sel = E->getSelector();
if (E->isInstanceMessage()) {
-
// Find the receiver type.
const ObjCObjectPointerType *ReceiverT = nullptr;
bool CanBeSubClassed = false;
@@ -980,13 +1047,13 @@ RuntimeDefinition ObjCMethodCall::getRuntimeDefinition() const {
} else {
Receiver = getReceiverSVal().getAsRegion();
if (!Receiver)
- return RuntimeDefinition();
+ return {};
DynamicTypeInfo DTI = getDynamicTypeInfo(getState(), Receiver);
if (!DTI.isValid()) {
assert(isa<AllocaRegion>(Receiver) &&
"Unhandled untyped region class!");
- return RuntimeDefinition();
+ return {};
}
QualType DynType = DTI.getType();
@@ -1041,11 +1108,9 @@ RuntimeDefinition ObjCMethodCall::getRuntimeDefinition() const {
// need to revisit this someday. In terms of memory, this table
// stays around until clang quits, which also may be bad if we
// need to release memory.
- typedef std::pair<const ObjCInterfaceDecl*, Selector>
- PrivateMethodKey;
- typedef llvm::DenseMap<PrivateMethodKey,
- Optional<const ObjCMethodDecl *> >
- PrivateMethodCache;
+ using PrivateMethodKey = std::pair<const ObjCInterfaceDecl *, Selector>;
+ using PrivateMethodCache =
+ llvm::DenseMap<PrivateMethodKey, Optional<const ObjCMethodDecl *>>;
static PrivateMethodCache PMC;
Optional<const ObjCMethodDecl *> &Val = PMC[std::make_pair(IDecl, Sel)];
@@ -1090,7 +1155,6 @@ RuntimeDefinition ObjCMethodCall::getRuntimeDefinition() const {
else
return RuntimeDefinition(MD, nullptr);
}
-
} else {
// This is a class method.
// If we have type info for the receiver class, we are calling via
@@ -1101,7 +1165,7 @@ RuntimeDefinition ObjCMethodCall::getRuntimeDefinition() const {
}
}
- return RuntimeDefinition();
+ return {};
}
bool ObjCMethodCall::argumentsMayEscape() const {
@@ -1118,7 +1182,7 @@ bool ObjCMethodCall::argumentsMayEscape() const {
void ObjCMethodCall::getInitialStackFrameContents(
const StackFrameContext *CalleeCtx,
BindingsTy &Bindings) const {
- const ObjCMethodDecl *D = cast<ObjCMethodDecl>(CalleeCtx->getDecl());
+ const auto *D = cast<ObjCMethodDecl>(CalleeCtx->getDecl());
SValBuilder &SVB = getState()->getStateManager().getSValBuilder();
addParameterValuesToBindings(CalleeCtx, Bindings, SVB, *this,
D->parameters());
@@ -1135,12 +1199,12 @@ void ObjCMethodCall::getInitialStackFrameContents(
CallEventRef<>
CallEventManager::getSimpleCall(const CallExpr *CE, ProgramStateRef State,
const LocationContext *LCtx) {
- if (const CXXMemberCallExpr *MCE = dyn_cast<CXXMemberCallExpr>(CE))
+ if (const auto *MCE = dyn_cast<CXXMemberCallExpr>(CE))
return create<CXXMemberCall>(MCE, State, LCtx);
- if (const CXXOperatorCallExpr *OpCE = dyn_cast<CXXOperatorCallExpr>(CE)) {
+ if (const auto *OpCE = dyn_cast<CXXOperatorCallExpr>(CE)) {
const FunctionDecl *DirectCallee = OpCE->getDirectCallee();
- if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(DirectCallee))
+ if (const auto *MD = dyn_cast<CXXMethodDecl>(DirectCallee))
if (MD->isInstance())
return create<CXXMemberOperatorCall>(OpCE, State, LCtx);
@@ -1153,12 +1217,11 @@ CallEventManager::getSimpleCall(const CallExpr *CE, ProgramStateRef State,
return create<SimpleFunctionCall>(CE, State, LCtx);
}
-
CallEventRef<>
CallEventManager::getCaller(const StackFrameContext *CalleeCtx,
ProgramStateRef State) {
const LocationContext *ParentCtx = CalleeCtx->getParent();
- const LocationContext *CallerCtx = ParentCtx->getCurrentStackFrame();
+ const LocationContext *CallerCtx = ParentCtx->getStackFrame();
assert(CallerCtx && "This should not be used for top-level stack frames");
const Stmt *CallSite = CalleeCtx->getCallSite();
@@ -1171,7 +1234,7 @@ CallEventManager::getCaller(const StackFrameContext *CalleeCtx,
case Stmt::CXXConstructExprClass:
case Stmt::CXXTemporaryObjectExprClass: {
SValBuilder &SVB = State->getStateManager().getSValBuilder();
- const CXXMethodDecl *Ctor = cast<CXXMethodDecl>(CalleeCtx->getDecl());
+ const auto *Ctor = cast<CXXMethodDecl>(CalleeCtx->getDecl());
Loc ThisPtr = SVB.getCXXThis(Ctor, CalleeCtx);
SVal ThisVal = State->getSVal(ThisPtr);
@@ -1192,12 +1255,11 @@ CallEventManager::getCaller(const StackFrameContext *CalleeCtx,
// destructors, though this could change in the future.
const CFGBlock *B = CalleeCtx->getCallSiteBlock();
CFGElement E = (*B)[CalleeCtx->getIndex()];
- assert(E.getAs<CFGImplicitDtor>() &&
+ assert((E.getAs<CFGImplicitDtor>() || E.getAs<CFGTemporaryDtor>()) &&
"All other CFG elements should have exprs");
- assert(!E.getAs<CFGTemporaryDtor>() && "We don't handle temporaries yet");
SValBuilder &SVB = State->getStateManager().getSValBuilder();
- const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(CalleeCtx->getDecl());
+ const auto *Dtor = cast<CXXDestructorDecl>(CalleeCtx->getDecl());
Loc ThisPtr = SVB.getCXXThis(Dtor, CalleeCtx);
SVal ThisVal = State->getSVal(ThisPtr);
@@ -1205,7 +1267,7 @@ CallEventManager::getCaller(const StackFrameContext *CalleeCtx,
if (Optional<CFGAutomaticObjDtor> AutoDtor = E.getAs<CFGAutomaticObjDtor>())
Trigger = AutoDtor->getTriggerStmt();
else if (Optional<CFGDeleteDtor> DeleteDtor = E.getAs<CFGDeleteDtor>())
- Trigger = cast<Stmt>(DeleteDtor->getDeleteExpr());
+ Trigger = DeleteDtor->getDeleteExpr();
else
Trigger = Dtor->getBody();
diff --git a/lib/StaticAnalyzer/Core/CheckerContext.cpp b/lib/StaticAnalyzer/Core/CheckerContext.cpp
index 61cbf3854bb2..6cf931abbddd 100644
--- a/lib/StaticAnalyzer/Core/CheckerContext.cpp
+++ b/lib/StaticAnalyzer/Core/CheckerContext.cpp
@@ -20,9 +20,8 @@ using namespace clang;
using namespace ento;
const FunctionDecl *CheckerContext::getCalleeDecl(const CallExpr *CE) const {
- ProgramStateRef State = getState();
const Expr *Callee = CE->getCallee();
- SVal L = State->getSVal(Callee, Pred->getLocationContext());
+ SVal L = Pred->getSVal(Callee);
return L.getAsFunctionDecl();
}
diff --git a/lib/StaticAnalyzer/Core/CheckerHelpers.cpp b/lib/StaticAnalyzer/Core/CheckerHelpers.cpp
index ed41914ebd05..b9facffcc8b5 100644
--- a/lib/StaticAnalyzer/Core/CheckerHelpers.cpp
+++ b/lib/StaticAnalyzer/Core/CheckerHelpers.cpp
@@ -15,8 +15,12 @@
#include "clang/AST/Decl.h"
#include "clang/AST/Expr.h"
+namespace clang {
+
+namespace ento {
+
// Recursively find any substatements containing macros
-bool clang::ento::containsMacro(const Stmt *S) {
+bool containsMacro(const Stmt *S) {
if (S->getLocStart().isMacroID())
return true;
@@ -31,7 +35,7 @@ bool clang::ento::containsMacro(const Stmt *S) {
}
// Recursively find any substatements containing enum constants
-bool clang::ento::containsEnum(const Stmt *S) {
+bool containsEnum(const Stmt *S) {
const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(S);
if (DR && isa<EnumConstantDecl>(DR->getDecl()))
@@ -45,7 +49,7 @@ bool clang::ento::containsEnum(const Stmt *S) {
}
// Recursively find any substatements containing static vars
-bool clang::ento::containsStaticLocal(const Stmt *S) {
+bool containsStaticLocal(const Stmt *S) {
const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(S);
if (DR)
@@ -61,7 +65,7 @@ bool clang::ento::containsStaticLocal(const Stmt *S) {
}
// Recursively find any substatements containing __builtin_offsetof
-bool clang::ento::containsBuiltinOffsetOf(const Stmt *S) {
+bool containsBuiltinOffsetOf(const Stmt *S) {
if (isa<OffsetOfExpr>(S))
return true;
@@ -74,7 +78,7 @@ bool clang::ento::containsBuiltinOffsetOf(const Stmt *S) {
// Extract lhs and rhs from assignment statement
std::pair<const clang::VarDecl *, const clang::Expr *>
-clang::ento::parseAssignment(const Stmt *S) {
+parseAssignment(const Stmt *S) {
const VarDecl *VD = nullptr;
const Expr *RHS = nullptr;
@@ -94,3 +98,18 @@ clang::ento::parseAssignment(const Stmt *S) {
return std::make_pair(VD, RHS);
}
+
+Nullability getNullabilityAnnotation(QualType Type) {
+ const auto *AttrType = Type->getAs<AttributedType>();
+ if (!AttrType)
+ return Nullability::Unspecified;
+ if (AttrType->getAttrKind() == AttributedType::attr_nullable)
+ return Nullability::Nullable;
+ else if (AttrType->getAttrKind() == AttributedType::attr_nonnull)
+ return Nullability::Nonnull;
+ return Nullability::Unspecified;
+}
+
+
+} // end namespace ento
+} // end namespace clang
diff --git a/lib/StaticAnalyzer/Core/CheckerManager.cpp b/lib/StaticAnalyzer/Core/CheckerManager.cpp
index 49f3edef2a2d..712872a15d8a 100644
--- a/lib/StaticAnalyzer/Core/CheckerManager.cpp
+++ b/lib/StaticAnalyzer/Core/CheckerManager.cpp
@@ -1,4 +1,4 @@
-//===--- CheckerManager.cpp - Static Analyzer Checker Manager -------------===//
+//===- CheckerManager.cpp - Static Analyzer Checker Manager ---------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -13,10 +13,20 @@
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/AST/DeclBase.h"
+#include "clang/AST/Stmt.h"
#include "clang/Analysis/ProgramPoint.h"
+#include "clang/Basic/LLVM.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CoreEngine.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <cassert>
+#include <vector>
using namespace clang;
using namespace ento;
@@ -43,9 +53,9 @@ void CheckerManager::finishedCheckerRegistration() {
#ifndef NDEBUG
// Make sure that for every event that has listeners, there is at least
// one dispatcher registered for it.
- for (llvm::DenseMap<EventTag, EventInfo>::iterator
- I = Events.begin(), E = Events.end(); I != E; ++I)
- assert(I->second.HasDispatcher && "No dispatcher registered for an event");
+ for (const auto &Event : Events)
+ assert(Event.second.HasDispatcher &&
+ "No dispatcher registered for an event");
#endif
}
@@ -65,25 +75,22 @@ void CheckerManager::runCheckersOnASTDecl(const Decl *D, AnalysisManager& mgr,
} else {
// Find the checkers that should run for this Decl and cache them.
checkers = &CachedDeclCheckersMap[DeclKind];
- for (unsigned i = 0, e = DeclCheckers.size(); i != e; ++i) {
- DeclCheckerInfo &info = DeclCheckers[i];
+ for (const auto &info : DeclCheckers)
if (info.IsForDeclFn(D))
checkers->push_back(info.CheckFn);
- }
}
assert(checkers);
- for (CachedDeclCheckers::iterator
- I = checkers->begin(), E = checkers->end(); I != E; ++I)
- (*I)(D, mgr, BR);
+ for (const auto checker : *checkers)
+ checker(D, mgr, BR);
}
void CheckerManager::runCheckersOnASTBody(const Decl *D, AnalysisManager& mgr,
BugReporter &BR) {
assert(D && D->hasBody());
- for (unsigned i = 0, e = BodyCheckers.size(); i != e; ++i)
- BodyCheckers[i](D, mgr, BR);
+ for (const auto BodyChecker : BodyCheckers)
+ BodyChecker(D, mgr, BR);
}
//===----------------------------------------------------------------------===//
@@ -118,10 +125,8 @@ static void expandGraphWithCheckers(CHECK_CTX checkCtx,
}
NodeBuilder B(*PrevSet, *CurrSet, BldrCtx);
- for (ExplodedNodeSet::iterator NI = PrevSet->begin(), NE = PrevSet->end();
- NI != NE; ++NI) {
- checkCtx.runChecker(*I, B, *NI);
- }
+ for (const auto &NI : *PrevSet)
+ checkCtx.runChecker(*I, B, NI);
// If all the produced transitions are sinks, stop.
if (CurrSet->empty())
@@ -133,21 +138,23 @@ static void expandGraphWithCheckers(CHECK_CTX checkCtx,
}
namespace {
+
struct CheckStmtContext {
- typedef SmallVectorImpl<CheckerManager::CheckStmtFunc> CheckersTy;
+ using CheckersTy = SmallVectorImpl<CheckerManager::CheckStmtFunc>;
+
bool IsPreVisit;
const CheckersTy &Checkers;
const Stmt *S;
ExprEngine &Eng;
bool WasInlined;
- CheckersTy::const_iterator checkers_begin() { return Checkers.begin(); }
- CheckersTy::const_iterator checkers_end() { return Checkers.end(); }
-
CheckStmtContext(bool isPreVisit, const CheckersTy &checkers,
const Stmt *s, ExprEngine &eng, bool wasInlined = false)
- : IsPreVisit(isPreVisit), Checkers(checkers), S(s), Eng(eng),
- WasInlined(wasInlined) {}
+ : IsPreVisit(isPreVisit), Checkers(checkers), S(s), Eng(eng),
+ WasInlined(wasInlined) {}
+
+ CheckersTy::const_iterator checkers_begin() { return Checkers.begin(); }
+ CheckersTy::const_iterator checkers_end() { return Checkers.end(); }
void runChecker(CheckerManager::CheckStmtFunc checkFn,
NodeBuilder &Bldr, ExplodedNode *Pred) {
@@ -160,9 +167,10 @@ namespace {
checkFn(S, C);
}
};
-}
-/// \brief Run checkers for visiting Stmts.
+} // namespace
+
+/// Run checkers for visiting Stmts.
void CheckerManager::runCheckersForStmt(bool isPreVisit,
ExplodedNodeSet &Dst,
const ExplodedNodeSet &Src,
@@ -175,8 +183,9 @@ void CheckerManager::runCheckersForStmt(bool isPreVisit,
}
namespace {
+
struct CheckObjCMessageContext {
- typedef std::vector<CheckerManager::CheckObjCMessageFunc> CheckersTy;
+ using CheckersTy = std::vector<CheckerManager::CheckObjCMessageFunc>;
ObjCMessageVisitKind Kind;
bool WasInlined;
@@ -184,19 +193,18 @@ namespace {
const ObjCMethodCall &Msg;
ExprEngine &Eng;
- CheckersTy::const_iterator checkers_begin() { return Checkers.begin(); }
- CheckersTy::const_iterator checkers_end() { return Checkers.end(); }
-
CheckObjCMessageContext(ObjCMessageVisitKind visitKind,
const CheckersTy &checkers,
const ObjCMethodCall &msg, ExprEngine &eng,
bool wasInlined)
- : Kind(visitKind), WasInlined(wasInlined), Checkers(checkers),
- Msg(msg), Eng(eng) { }
+ : Kind(visitKind), WasInlined(wasInlined), Checkers(checkers), Msg(msg),
+ Eng(eng) {}
+
+ CheckersTy::const_iterator checkers_begin() { return Checkers.begin(); }
+ CheckersTy::const_iterator checkers_end() { return Checkers.end(); }
void runChecker(CheckerManager::CheckObjCMessageFunc checkFn,
NodeBuilder &Bldr, ExplodedNode *Pred) {
-
bool IsPreVisit;
switch (Kind) {
@@ -215,9 +223,10 @@ namespace {
checkFn(*Msg.cloneWithState<ObjCMethodCall>(Pred->getState()), C);
}
};
-}
-/// \brief Run checkers for visiting obj-c messages.
+} // namespace
+
+/// Run checkers for visiting obj-c messages.
void CheckerManager::runCheckersForObjCMessage(ObjCMessageVisitKind visitKind,
ExplodedNodeSet &Dst,
const ExplodedNodeSet &Src,
@@ -242,24 +251,27 @@ CheckerManager::getObjCMessageCheckers(ObjCMessageVisitKind Kind) {
}
llvm_unreachable("Unknown Kind");
}
+
namespace {
+
// FIXME: This has all the same signatures as CheckObjCMessageContext.
// Is there a way we can merge the two?
struct CheckCallContext {
- typedef std::vector<CheckerManager::CheckCallFunc> CheckersTy;
+ using CheckersTy = std::vector<CheckerManager::CheckCallFunc>;
+
bool IsPreVisit, WasInlined;
const CheckersTy &Checkers;
const CallEvent &Call;
ExprEngine &Eng;
- CheckersTy::const_iterator checkers_begin() { return Checkers.begin(); }
- CheckersTy::const_iterator checkers_end() { return Checkers.end(); }
-
CheckCallContext(bool isPreVisit, const CheckersTy &checkers,
const CallEvent &call, ExprEngine &eng,
bool wasInlined)
- : IsPreVisit(isPreVisit), WasInlined(wasInlined), Checkers(checkers),
- Call(call), Eng(eng) { }
+ : IsPreVisit(isPreVisit), WasInlined(wasInlined), Checkers(checkers),
+ Call(call), Eng(eng) {}
+
+ CheckersTy::const_iterator checkers_begin() { return Checkers.begin(); }
+ CheckersTy::const_iterator checkers_end() { return Checkers.end(); }
void runChecker(CheckerManager::CheckCallFunc checkFn,
NodeBuilder &Bldr, ExplodedNode *Pred) {
@@ -269,9 +281,10 @@ namespace {
checkFn(*Call.cloneWithState(Pred->getState()), C);
}
};
-}
-/// \brief Run checkers for visiting an abstract call event.
+} // namespace
+
+/// Run checkers for visiting an abstract call event.
void CheckerManager::runCheckersForCallEvent(bool isPreVisit,
ExplodedNodeSet &Dst,
const ExplodedNodeSet &Src,
@@ -286,8 +299,10 @@ void CheckerManager::runCheckersForCallEvent(bool isPreVisit,
}
namespace {
+
struct CheckLocationContext {
- typedef std::vector<CheckerManager::CheckLocationFunc> CheckersTy;
+ using CheckersTy = std::vector<CheckerManager::CheckLocationFunc>;
+
const CheckersTy &Checkers;
SVal Loc;
bool IsLoad;
@@ -295,15 +310,15 @@ namespace {
const Stmt *BoundEx;
ExprEngine &Eng;
- CheckersTy::const_iterator checkers_begin() { return Checkers.begin(); }
- CheckersTy::const_iterator checkers_end() { return Checkers.end(); }
-
CheckLocationContext(const CheckersTy &checkers,
SVal loc, bool isLoad, const Stmt *NodeEx,
const Stmt *BoundEx,
ExprEngine &eng)
- : Checkers(checkers), Loc(loc), IsLoad(isLoad), NodeEx(NodeEx),
- BoundEx(BoundEx), Eng(eng) {}
+ : Checkers(checkers), Loc(loc), IsLoad(isLoad), NodeEx(NodeEx),
+ BoundEx(BoundEx), Eng(eng) {}
+
+ CheckersTy::const_iterator checkers_begin() { return Checkers.begin(); }
+ CheckersTy::const_iterator checkers_end() { return Checkers.end(); }
void runChecker(CheckerManager::CheckLocationFunc checkFn,
NodeBuilder &Bldr, ExplodedNode *Pred) {
@@ -317,9 +332,10 @@ namespace {
checkFn(Loc, IsLoad, BoundEx, C);
}
};
-}
-/// \brief Run checkers for load/store of a location.
+} // namespace
+
+/// Run checkers for load/store of a location.
void CheckerManager::runCheckersForLocation(ExplodedNodeSet &Dst,
const ExplodedNodeSet &Src,
@@ -333,8 +349,10 @@ void CheckerManager::runCheckersForLocation(ExplodedNodeSet &Dst,
}
namespace {
+
struct CheckBindContext {
- typedef std::vector<CheckerManager::CheckBindFunc> CheckersTy;
+ using CheckersTy = std::vector<CheckerManager::CheckBindFunc>;
+
const CheckersTy &Checkers;
SVal Loc;
SVal Val;
@@ -342,13 +360,13 @@ namespace {
ExprEngine &Eng;
const ProgramPoint &PP;
- CheckersTy::const_iterator checkers_begin() { return Checkers.begin(); }
- CheckersTy::const_iterator checkers_end() { return Checkers.end(); }
-
CheckBindContext(const CheckersTy &checkers,
SVal loc, SVal val, const Stmt *s, ExprEngine &eng,
const ProgramPoint &pp)
- : Checkers(checkers), Loc(loc), Val(val), S(s), Eng(eng), PP(pp) {}
+ : Checkers(checkers), Loc(loc), Val(val), S(s), Eng(eng), PP(pp) {}
+
+ CheckersTy::const_iterator checkers_begin() { return Checkers.begin(); }
+ CheckersTy::const_iterator checkers_end() { return Checkers.end(); }
void runChecker(CheckerManager::CheckBindFunc checkFn,
NodeBuilder &Bldr, ExplodedNode *Pred) {
@@ -358,9 +376,10 @@ namespace {
checkFn(Loc, Val, S, C);
}
};
-}
-/// \brief Run checkers for binding of a value to a location.
+} // namespace
+
+/// Run checkers for binding of a value to a location.
void CheckerManager::runCheckersForBind(ExplodedNodeSet &Dst,
const ExplodedNodeSet &Src,
SVal location, SVal val,
@@ -373,24 +392,26 @@ void CheckerManager::runCheckersForBind(ExplodedNodeSet &Dst,
void CheckerManager::runCheckersForEndAnalysis(ExplodedGraph &G,
BugReporter &BR,
ExprEngine &Eng) {
- for (unsigned i = 0, e = EndAnalysisCheckers.size(); i != e; ++i)
- EndAnalysisCheckers[i](G, BR, Eng);
+ for (const auto EndAnalysisChecker : EndAnalysisCheckers)
+ EndAnalysisChecker(G, BR, Eng);
}
namespace {
+
struct CheckBeginFunctionContext {
- typedef std::vector<CheckerManager::CheckBeginFunctionFunc> CheckersTy;
+ using CheckersTy = std::vector<CheckerManager::CheckBeginFunctionFunc>;
+
const CheckersTy &Checkers;
ExprEngine &Eng;
const ProgramPoint &PP;
- CheckersTy::const_iterator checkers_begin() { return Checkers.begin(); }
- CheckersTy::const_iterator checkers_end() { return Checkers.end(); }
-
CheckBeginFunctionContext(const CheckersTy &Checkers, ExprEngine &Eng,
const ProgramPoint &PP)
: Checkers(Checkers), Eng(Eng), PP(PP) {}
+ CheckersTy::const_iterator checkers_begin() { return Checkers.begin(); }
+ CheckersTy::const_iterator checkers_end() { return Checkers.end(); }
+
void runChecker(CheckerManager::CheckBeginFunctionFunc checkFn,
NodeBuilder &Bldr, ExplodedNode *Pred) {
const ProgramPoint &L = PP.withTag(checkFn.Checker);
@@ -399,7 +420,8 @@ struct CheckBeginFunctionContext {
checkFn(C);
}
};
-}
+
+} // namespace
void CheckerManager::runCheckersForBeginFunction(ExplodedNodeSet &Dst,
const BlockEdge &L,
@@ -411,42 +433,42 @@ void CheckerManager::runCheckersForBeginFunction(ExplodedNodeSet &Dst,
expandGraphWithCheckers(C, Dst, Src);
}
-/// \brief Run checkers for end of path.
+/// Run checkers for end of path.
// Note, We do not chain the checker output (like in expandGraphWithCheckers)
// for this callback since end of path nodes are expected to be final.
void CheckerManager::runCheckersForEndFunction(NodeBuilderContext &BC,
ExplodedNodeSet &Dst,
ExplodedNode *Pred,
- ExprEngine &Eng) {
-
+ ExprEngine &Eng,
+ const ReturnStmt *RS) {
// We define the builder outside of the loop bacause if at least one checkers
// creates a sucsessor for Pred, we do not need to generate an
// autotransition for it.
NodeBuilder Bldr(Pred, Dst, BC);
- for (unsigned i = 0, e = EndFunctionCheckers.size(); i != e; ++i) {
- CheckEndFunctionFunc checkFn = EndFunctionCheckers[i];
-
+ for (const auto checkFn : EndFunctionCheckers) {
const ProgramPoint &L = BlockEntrance(BC.Block,
Pred->getLocationContext(),
checkFn.Checker);
CheckerContext C(Bldr, Eng, Pred, L);
- checkFn(C);
+ checkFn(RS, C);
}
}
namespace {
+
struct CheckBranchConditionContext {
- typedef std::vector<CheckerManager::CheckBranchConditionFunc> CheckersTy;
+ using CheckersTy = std::vector<CheckerManager::CheckBranchConditionFunc>;
+
const CheckersTy &Checkers;
const Stmt *Condition;
ExprEngine &Eng;
- CheckersTy::const_iterator checkers_begin() { return Checkers.begin(); }
- CheckersTy::const_iterator checkers_end() { return Checkers.end(); }
-
CheckBranchConditionContext(const CheckersTy &checkers,
const Stmt *Cond, ExprEngine &eng)
- : Checkers(checkers), Condition(Cond), Eng(eng) {}
+ : Checkers(checkers), Condition(Cond), Eng(eng) {}
+
+ CheckersTy::const_iterator checkers_begin() { return Checkers.begin(); }
+ CheckersTy::const_iterator checkers_end() { return Checkers.end(); }
void runChecker(CheckerManager::CheckBranchConditionFunc checkFn,
NodeBuilder &Bldr, ExplodedNode *Pred) {
@@ -456,9 +478,10 @@ namespace {
checkFn(Condition, C);
}
};
-}
-/// \brief Run checkers for branch condition.
+} // namespace
+
+/// Run checkers for branch condition.
void CheckerManager::runCheckersForBranchCondition(const Stmt *Condition,
ExplodedNodeSet &Dst,
ExplodedNode *Pred,
@@ -469,29 +492,69 @@ void CheckerManager::runCheckersForBranchCondition(const Stmt *Condition,
expandGraphWithCheckers(C, Dst, Src);
}
-/// \brief Run checkers for live symbols.
+namespace {
+
+ struct CheckNewAllocatorContext {
+ using CheckersTy = std::vector<CheckerManager::CheckNewAllocatorFunc>;
+
+ const CheckersTy &Checkers;
+ const CXXNewExpr *NE;
+ SVal Target;
+ bool WasInlined;
+ ExprEngine &Eng;
+
+ CheckNewAllocatorContext(const CheckersTy &Checkers, const CXXNewExpr *NE,
+ SVal Target, bool WasInlined, ExprEngine &Eng)
+ : Checkers(Checkers), NE(NE), Target(Target), WasInlined(WasInlined),
+ Eng(Eng) {}
+
+ CheckersTy::const_iterator checkers_begin() { return Checkers.begin(); }
+ CheckersTy::const_iterator checkers_end() { return Checkers.end(); }
+
+ void runChecker(CheckerManager::CheckNewAllocatorFunc checkFn,
+ NodeBuilder &Bldr, ExplodedNode *Pred) {
+ ProgramPoint L = PostAllocatorCall(NE, Pred->getLocationContext());
+ CheckerContext C(Bldr, Eng, Pred, L, WasInlined);
+ checkFn(NE, Target, C);
+ }
+ };
+
+} // namespace
+
+void CheckerManager::runCheckersForNewAllocator(
+ const CXXNewExpr *NE, SVal Target, ExplodedNodeSet &Dst, ExplodedNode *Pred,
+ ExprEngine &Eng, bool WasInlined) {
+ ExplodedNodeSet Src;
+ Src.insert(Pred);
+ CheckNewAllocatorContext C(NewAllocatorCheckers, NE, Target, WasInlined, Eng);
+ expandGraphWithCheckers(C, Dst, Src);
+}
+
+/// Run checkers for live symbols.
void CheckerManager::runCheckersForLiveSymbols(ProgramStateRef state,
SymbolReaper &SymReaper) {
- for (unsigned i = 0, e = LiveSymbolsCheckers.size(); i != e; ++i)
- LiveSymbolsCheckers[i](state, SymReaper);
+ for (const auto LiveSymbolsChecker : LiveSymbolsCheckers)
+ LiveSymbolsChecker(state, SymReaper);
}
namespace {
+
struct CheckDeadSymbolsContext {
- typedef std::vector<CheckerManager::CheckDeadSymbolsFunc> CheckersTy;
+ using CheckersTy = std::vector<CheckerManager::CheckDeadSymbolsFunc>;
+
const CheckersTy &Checkers;
SymbolReaper &SR;
const Stmt *S;
ExprEngine &Eng;
ProgramPoint::Kind ProgarmPointKind;
- CheckersTy::const_iterator checkers_begin() { return Checkers.begin(); }
- CheckersTy::const_iterator checkers_end() { return Checkers.end(); }
-
CheckDeadSymbolsContext(const CheckersTy &checkers, SymbolReaper &sr,
const Stmt *s, ExprEngine &eng,
ProgramPoint::Kind K)
- : Checkers(checkers), SR(sr), S(s), Eng(eng), ProgarmPointKind(K) { }
+ : Checkers(checkers), SR(sr), S(s), Eng(eng), ProgarmPointKind(K) {}
+
+ CheckersTy::const_iterator checkers_begin() { return Checkers.begin(); }
+ CheckersTy::const_iterator checkers_end() { return Checkers.end(); }
void runChecker(CheckerManager::CheckDeadSymbolsFunc checkFn,
NodeBuilder &Bldr, ExplodedNode *Pred) {
@@ -505,9 +568,10 @@ namespace {
checkFn(SR, C);
}
};
-}
-/// \brief Run checkers for dead symbols.
+} // namespace
+
+/// Run checkers for dead symbols.
void CheckerManager::runCheckersForDeadSymbols(ExplodedNodeSet &Dst,
const ExplodedNodeSet &Src,
SymbolReaper &SymReaper,
@@ -518,7 +582,7 @@ void CheckerManager::runCheckersForDeadSymbols(ExplodedNodeSet &Dst,
expandGraphWithCheckers(C, Dst, Src);
}
-/// \brief Run checkers for region changes.
+/// Run checkers for region changes.
ProgramStateRef
CheckerManager::runCheckersForRegionChanges(ProgramStateRef state,
const InvalidatedSymbols *invalidated,
@@ -526,19 +590,18 @@ CheckerManager::runCheckersForRegionChanges(ProgramStateRef state,
ArrayRef<const MemRegion *> Regions,
const LocationContext *LCtx,
const CallEvent *Call) {
- for (unsigned i = 0, e = RegionChangesCheckers.size(); i != e; ++i) {
+ for (const auto RegionChangesChecker : RegionChangesCheckers) {
// If any checker declares the state infeasible (or if it starts that way),
// bail out.
if (!state)
return nullptr;
- state = RegionChangesCheckers[i](state, invalidated,
- ExplicitRegions, Regions,
- LCtx, Call);
+ state = RegionChangesChecker(state, invalidated, ExplicitRegions, Regions,
+ LCtx, Call);
}
return state;
}
-/// \brief Run checkers to process symbol escape event.
+/// Run checkers to process symbol escape event.
ProgramStateRef
CheckerManager::runCheckersForPointerEscape(ProgramStateRef State,
const InvalidatedSymbols &Escaped,
@@ -549,58 +612,55 @@ CheckerManager::runCheckersForPointerEscape(ProgramStateRef State,
(Kind != PSK_DirectEscapeOnCall &&
Kind != PSK_IndirectEscapeOnCall)) &&
"Call must not be NULL when escaping on call");
- for (unsigned i = 0, e = PointerEscapeCheckers.size(); i != e; ++i) {
- // If any checker declares the state infeasible (or if it starts that
- // way), bail out.
- if (!State)
- return nullptr;
- State = PointerEscapeCheckers[i](State, Escaped, Call, Kind, ETraits);
- }
+ for (const auto PointerEscapeChecker : PointerEscapeCheckers) {
+ // If any checker declares the state infeasible (or if it starts that
+ // way), bail out.
+ if (!State)
+ return nullptr;
+ State = PointerEscapeChecker(State, Escaped, Call, Kind, ETraits);
+ }
return State;
}
-/// \brief Run checkers for handling assumptions on symbolic values.
+/// Run checkers for handling assumptions on symbolic values.
ProgramStateRef
CheckerManager::runCheckersForEvalAssume(ProgramStateRef state,
SVal Cond, bool Assumption) {
- for (unsigned i = 0, e = EvalAssumeCheckers.size(); i != e; ++i) {
+ for (const auto EvalAssumeChecker : EvalAssumeCheckers) {
// If any checker declares the state infeasible (or if it starts that way),
// bail out.
if (!state)
return nullptr;
- state = EvalAssumeCheckers[i](state, Cond, Assumption);
+ state = EvalAssumeChecker(state, Cond, Assumption);
}
return state;
}
-/// \brief Run checkers for evaluating a call.
+/// Run checkers for evaluating a call.
/// Only one checker will evaluate the call.
void CheckerManager::runCheckersForEvalCall(ExplodedNodeSet &Dst,
const ExplodedNodeSet &Src,
const CallEvent &Call,
ExprEngine &Eng) {
const CallExpr *CE = cast<CallExpr>(Call.getOriginExpr());
- for (ExplodedNodeSet::iterator
- NI = Src.begin(), NE = Src.end(); NI != NE; ++NI) {
- ExplodedNode *Pred = *NI;
+ for (const auto Pred : Src) {
bool anyEvaluated = false;
ExplodedNodeSet checkDst;
NodeBuilder B(Pred, checkDst, Eng.getBuilderContext());
// Check if any of the EvalCall callbacks can evaluate the call.
- for (std::vector<EvalCallFunc>::iterator
- EI = EvalCallCheckers.begin(), EE = EvalCallCheckers.end();
- EI != EE; ++EI) {
+ for (const auto EvalCallChecker : EvalCallCheckers) {
ProgramPoint::Kind K = ProgramPoint::PostStmtKind;
- const ProgramPoint &L = ProgramPoint::getProgramPoint(CE, K,
- Pred->getLocationContext(), EI->Checker);
+ const ProgramPoint &L =
+ ProgramPoint::getProgramPoint(CE, K, Pred->getLocationContext(),
+ EvalCallChecker.Checker);
bool evaluated = false;
{ // CheckerContext generates transitions(populates checkDest) on
// destruction, so introduce the scope to make sure it gets properly
// populated.
CheckerContext C(B, Eng, Pred, L);
- evaluated = (*EI)(CE, C);
+ evaluated = EvalCallChecker(CE, C);
}
assert(!(evaluated && anyEvaluated)
&& "There are more than one checkers evaluating the call");
@@ -621,21 +681,20 @@ void CheckerManager::runCheckersForEvalCall(ExplodedNodeSet &Dst,
}
}
-/// \brief Run checkers for the entire Translation Unit.
+/// Run checkers for the entire Translation Unit.
void CheckerManager::runCheckersOnEndOfTranslationUnit(
const TranslationUnitDecl *TU,
AnalysisManager &mgr,
BugReporter &BR) {
- for (unsigned i = 0, e = EndOfTranslationUnitCheckers.size(); i != e; ++i)
- EndOfTranslationUnitCheckers[i](TU, mgr, BR);
+ for (const auto EndOfTranslationUnitChecker : EndOfTranslationUnitCheckers)
+ EndOfTranslationUnitChecker(TU, mgr, BR);
}
void CheckerManager::runCheckersForPrintState(raw_ostream &Out,
ProgramStateRef State,
const char *NL, const char *Sep) {
- for (llvm::DenseMap<CheckerTag, CheckerRef>::iterator
- I = CheckerTags.begin(), E = CheckerTags.end(); I != E; ++I)
- I->second->printState(Out, State, NL, Sep);
+ for (const auto &CheckerTag : CheckerTags)
+ CheckerTag.second->printState(Out, State, NL, Sep);
}
//===----------------------------------------------------------------------===//
@@ -661,6 +720,7 @@ void CheckerManager::_registerForPreStmt(CheckStmtFunc checkfn,
StmtCheckerInfo info = { checkfn, isForStmtFn, /*IsPreVisit*/true };
StmtCheckers.push_back(info);
}
+
void CheckerManager::_registerForPostStmt(CheckStmtFunc checkfn,
HandlesStmtFunc isForStmtFn) {
StmtCheckerInfo info = { checkfn, isForStmtFn, /*IsPreVisit*/false };
@@ -711,6 +771,10 @@ void CheckerManager::_registerForBranchCondition(
BranchConditionCheckers.push_back(checkfn);
}
+void CheckerManager::_registerForNewAllocator(CheckNewAllocatorFunc checkfn) {
+ NewAllocatorCheckers.push_back(checkfn);
+}
+
void CheckerManager::_registerForLiveSymbols(CheckLiveSymbolsFunc checkfn) {
LiveSymbolsCheckers.push_back(checkfn);
}
@@ -760,15 +824,13 @@ CheckerManager::getCachedStmtCheckersFor(const Stmt *S, bool isPreVisit) {
// Find the checkers that should run for this Stmt and cache them.
CachedStmtCheckers &Checkers = CachedStmtCheckersMap[Key];
- for (unsigned i = 0, e = StmtCheckers.size(); i != e; ++i) {
- StmtCheckerInfo &Info = StmtCheckers[i];
+ for (const auto &Info : StmtCheckers)
if (Info.IsPreVisit == isPreVisit && Info.IsForStmtFn(S))
Checkers.push_back(Info.CheckFn);
- }
return Checkers;
}
CheckerManager::~CheckerManager() {
- for (unsigned i = 0, e = CheckerDtors.size(); i != e; ++i)
- CheckerDtors[i]();
+ for (const auto CheckerDtor : CheckerDtors)
+ CheckerDtor();
}
diff --git a/lib/StaticAnalyzer/Core/CheckerRegistry.cpp b/lib/StaticAnalyzer/Core/CheckerRegistry.cpp
index c9cb189a5b72..645845ec2181 100644
--- a/lib/StaticAnalyzer/Core/CheckerRegistry.cpp
+++ b/lib/StaticAnalyzer/Core/CheckerRegistry.cpp
@@ -1,4 +1,4 @@
-//===--- CheckerRegistry.cpp - Maintains all available checkers -*- C++ -*-===//
+//===- CheckerRegistry.cpp - Maintains all available checkers -------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -9,18 +9,26 @@
#include "clang/StaticAnalyzer/Core/CheckerRegistry.h"
#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/LLVM.h"
#include "clang/Frontend/FrontendDiagnostic.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/CheckerOptInfo.h"
#include "clang/StaticAnalyzer/Core/AnalyzerOptions.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <cstddef>
+#include <tuple>
using namespace clang;
using namespace ento;
static const char PackageSeparator = '.';
-typedef llvm::SetVector<const CheckerRegistry::CheckerInfo *> CheckerInfoSet;
+using CheckerInfoSet = llvm::SetVector<const CheckerRegistry::CheckerInfo *>;
static bool checkerNameLT(const CheckerRegistry::CheckerInfo &a,
const CheckerRegistry::CheckerInfo &b) {
@@ -50,8 +58,7 @@ static void collectCheckers(const CheckerRegistry::CheckerInfoList &checkers,
// Use a binary search to find the possible start of the package.
CheckerRegistry::CheckerInfo packageInfo(nullptr, opt.getName(), "");
auto end = checkers.cend();
- CheckerRegistry::CheckerInfoList::const_iterator i =
- std::lower_bound(checkers.cbegin(), end, packageInfo, checkerNameLT);
+ auto i = std::lower_bound(checkers.cbegin(), end, packageInfo, checkerNameLT);
// If we didn't even find a possible package, give up.
if (i == end)
@@ -73,12 +80,11 @@ static void collectCheckers(const CheckerRegistry::CheckerInfoList &checkers,
size = packageSize->getValue();
// Step through all the checkers in the package.
- for (auto checkEnd = i+size; i != checkEnd; ++i) {
+ for (auto checkEnd = i+size; i != checkEnd; ++i)
if (opt.isEnabled())
collected.insert(&*i);
else
collected.remove(&*i);
- }
}
void CheckerRegistry::addChecker(InitializationFunction fn, StringRef name,
@@ -97,42 +103,38 @@ void CheckerRegistry::addChecker(InitializationFunction fn, StringRef name,
void CheckerRegistry::initializeManager(CheckerManager &checkerMgr,
SmallVectorImpl<CheckerOptInfo> &opts) const {
// Sort checkers for efficient collection.
- std::sort(Checkers.begin(), Checkers.end(), checkerNameLT);
+ llvm::sort(Checkers.begin(), Checkers.end(), checkerNameLT);
// Collect checkers enabled by the options.
CheckerInfoSet enabledCheckers;
- for (SmallVectorImpl<CheckerOptInfo>::iterator
- i = opts.begin(), e = opts.end(); i != e; ++i) {
- collectCheckers(Checkers, Packages, *i, enabledCheckers);
- }
+ for (auto &i : opts)
+ collectCheckers(Checkers, Packages, i, enabledCheckers);
// Initialize the CheckerManager with all enabled checkers.
- for (CheckerInfoSet::iterator
- i = enabledCheckers.begin(), e = enabledCheckers.end(); i != e; ++i) {
- checkerMgr.setCurrentCheckName(CheckName((*i)->FullName));
- (*i)->Initialize(checkerMgr);
+ for (const auto *i :enabledCheckers) {
+ checkerMgr.setCurrentCheckName(CheckName(i->FullName));
+ i->Initialize(checkerMgr);
}
}
void CheckerRegistry::validateCheckerOptions(const AnalyzerOptions &opts,
DiagnosticsEngine &diags) const {
- for (auto &config : opts.Config) {
+ for (const auto &config : opts.Config) {
size_t pos = config.getKey().find(':');
if (pos == StringRef::npos)
continue;
bool hasChecker = false;
StringRef checkerName = config.getKey().substr(0, pos);
- for (auto &checker : Checkers) {
+ for (const auto &checker : Checkers) {
if (checker.FullName.startswith(checkerName) &&
(checker.FullName.size() == pos || checker.FullName[pos] == '.')) {
hasChecker = true;
break;
}
}
- if (!hasChecker) {
+ if (!hasChecker)
diags.Report(diag::err_unknown_analyzer_checker) << checkerName;
- }
}
}
@@ -141,7 +143,7 @@ void CheckerRegistry::printHelp(raw_ostream &out,
// FIXME: Alphabetical sort puts 'experimental' in the middle.
// Would it be better to name it '~experimental' or something else
// that's ASCIIbetically last?
- std::sort(Checkers.begin(), Checkers.end(), checkerNameLT);
+ llvm::sort(Checkers.begin(), Checkers.end(), checkerNameLT);
// FIXME: Print available packages.
@@ -149,28 +151,26 @@ void CheckerRegistry::printHelp(raw_ostream &out,
// Find the maximum option length.
size_t optionFieldWidth = 0;
- for (CheckerInfoList::const_iterator i = Checkers.begin(), e = Checkers.end();
- i != e; ++i) {
+ for (const auto &i : Checkers) {
// Limit the amount of padding we are willing to give up for alignment.
// Package.Name Description [Hidden]
- size_t nameLength = i->FullName.size();
+ size_t nameLength = i.FullName.size();
if (nameLength <= maxNameChars)
optionFieldWidth = std::max(optionFieldWidth, nameLength);
}
const size_t initialPad = 2;
- for (CheckerInfoList::const_iterator i = Checkers.begin(), e = Checkers.end();
- i != e; ++i) {
- out.indent(initialPad) << i->FullName;
+ for (const auto &i : Checkers) {
+ out.indent(initialPad) << i.FullName;
- int pad = optionFieldWidth - i->FullName.size();
+ int pad = optionFieldWidth - i.FullName.size();
// Break on long option names.
if (pad < 0) {
out << '\n';
pad = optionFieldWidth + initialPad;
}
- out.indent(pad + 2) << i->Desc;
+ out.indent(pad + 2) << i.Desc;
out << '\n';
}
@@ -178,19 +178,13 @@ void CheckerRegistry::printHelp(raw_ostream &out,
void CheckerRegistry::printList(
raw_ostream &out, SmallVectorImpl<CheckerOptInfo> &opts) const {
- std::sort(Checkers.begin(), Checkers.end(), checkerNameLT);
+ llvm::sort(Checkers.begin(), Checkers.end(), checkerNameLT);
// Collect checkers enabled by the options.
CheckerInfoSet enabledCheckers;
- for (SmallVectorImpl<CheckerOptInfo>::iterator i = opts.begin(),
- e = opts.end();
- i != e; ++i) {
- collectCheckers(Checkers, Packages, *i, enabledCheckers);
- }
+ for (auto &i : opts)
+ collectCheckers(Checkers, Packages, i, enabledCheckers);
- for (CheckerInfoSet::const_iterator i = enabledCheckers.begin(),
- e = enabledCheckers.end();
- i != e; ++i) {
- out << (*i)->FullName << '\n';
- }
+ for (const auto *i : enabledCheckers)
+ out << i->FullName << '\n';
}
diff --git a/lib/StaticAnalyzer/Core/ConstraintManager.cpp b/lib/StaticAnalyzer/Core/ConstraintManager.cpp
index 8de2b0e8d271..ef9c44c51be4 100644
--- a/lib/StaticAnalyzer/Core/ConstraintManager.cpp
+++ b/lib/StaticAnalyzer/Core/ConstraintManager.cpp
@@ -1,4 +1,4 @@
-//== ConstraintManager.cpp - Constraints on symbolic values -----*- C++ -*--==//
+//===- ConstraintManager.cpp - Constraints on symbolic values. ------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -11,12 +11,17 @@
//
//===----------------------------------------------------------------------===//
+#include "clang/StaticAnalyzer/Core/PathSensitive/ConstraintManager.h"
+#include "clang/AST/Type.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState_Fwd.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
using namespace clang;
using namespace ento;
-ConstraintManager::~ConstraintManager() {}
+ConstraintManager::~ConstraintManager() = default;
static DefinedSVal getLocFromSymbol(const ProgramStateRef &State,
SymbolRef Sym) {
@@ -35,5 +40,5 @@ ConditionTruthVal ConstraintManager::checkNull(ProgramStateRef State,
return ConditionTruthVal(false);
if (!P.first && P.second)
return ConditionTruthVal(true);
- return ConditionTruthVal();
+ return {};
}
diff --git a/lib/StaticAnalyzer/Core/CoreEngine.cpp b/lib/StaticAnalyzer/Core/CoreEngine.cpp
index e2e9ddf5048e..c17b6aae37e2 100644
--- a/lib/StaticAnalyzer/Core/CoreEngine.cpp
+++ b/lib/StaticAnalyzer/Core/CoreEngine.cpp
@@ -1,4 +1,4 @@
-//==- CoreEngine.cpp - Path-Sensitive Dataflow Engine ------------*- C++ -*-//
+//===- CoreEngine.cpp - Path-Sensitive Dataflow Engine --------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -15,11 +15,27 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/CoreEngine.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
+#include "clang/AST/Stmt.h"
#include "clang/AST/StmtCXX.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+#include "clang/Analysis/AnalysisDeclContext.h"
+#include "clang/Analysis/CFG.h"
+#include "clang/Analysis/ProgramPoint.h"
+#include "clang/Basic/LLVM.h"
+#include "clang/StaticAnalyzer/Core/AnalyzerOptions.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/BlockCounter.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/FunctionSummary.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SubEngine.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/WorkList.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Support/Casting.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <algorithm>
+#include <cassert>
+#include <memory>
+#include <utility>
using namespace clang;
using namespace ento;
@@ -34,146 +50,42 @@ STATISTIC(NumPathsExplored,
"The # of paths explored by the analyzer.");
//===----------------------------------------------------------------------===//
-// Worklist classes for exploration of reachable states.
+// Core analysis engine.
//===----------------------------------------------------------------------===//
-WorkList::Visitor::~Visitor() {}
-
-namespace {
-class DFS : public WorkList {
- SmallVector<WorkListUnit,20> Stack;
-public:
- bool hasWork() const override {
- return !Stack.empty();
- }
-
- void enqueue(const WorkListUnit& U) override {
- Stack.push_back(U);
- }
-
- WorkListUnit dequeue() override {
- assert (!Stack.empty());
- const WorkListUnit& U = Stack.back();
- Stack.pop_back(); // This technically "invalidates" U, but we are fine.
- return U;
- }
-
- bool visitItemsInWorkList(Visitor &V) override {
- for (SmallVectorImpl<WorkListUnit>::iterator
- I = Stack.begin(), E = Stack.end(); I != E; ++I) {
- if (V.visit(*I))
- return true;
- }
- return false;
- }
-};
-
-class BFS : public WorkList {
- std::deque<WorkListUnit> Queue;
-public:
- bool hasWork() const override {
- return !Queue.empty();
- }
-
- void enqueue(const WorkListUnit& U) override {
- Queue.push_back(U);
- }
-
- WorkListUnit dequeue() override {
- WorkListUnit U = Queue.front();
- Queue.pop_front();
- return U;
- }
-
- bool visitItemsInWorkList(Visitor &V) override {
- for (std::deque<WorkListUnit>::iterator
- I = Queue.begin(), E = Queue.end(); I != E; ++I) {
- if (V.visit(*I))
- return true;
- }
- return false;
+static std::unique_ptr<WorkList> generateWorkList(AnalyzerOptions &Opts) {
+ switch (Opts.getExplorationStrategy()) {
+ case AnalyzerOptions::ExplorationStrategyKind::DFS:
+ return WorkList::makeDFS();
+ case AnalyzerOptions::ExplorationStrategyKind::BFS:
+ return WorkList::makeBFS();
+ case AnalyzerOptions::ExplorationStrategyKind::BFSBlockDFSContents:
+ return WorkList::makeBFSBlockDFSContents();
+ case AnalyzerOptions::ExplorationStrategyKind::UnexploredFirst:
+ return WorkList::makeUnexploredFirst();
+ case AnalyzerOptions::ExplorationStrategyKind::UnexploredFirstQueue:
+ return WorkList::makeUnexploredFirstPriorityQueue();
+ default:
+ llvm_unreachable("Unexpected case");
}
-};
-
-} // end anonymous namespace
-
-// Place the dstor for WorkList here because it contains virtual member
-// functions, and we the code for the dstor generated in one compilation unit.
-WorkList::~WorkList() {}
-
-WorkList *WorkList::makeDFS() { return new DFS(); }
-WorkList *WorkList::makeBFS() { return new BFS(); }
-
-namespace {
- class BFSBlockDFSContents : public WorkList {
- std::deque<WorkListUnit> Queue;
- SmallVector<WorkListUnit,20> Stack;
- public:
- bool hasWork() const override {
- return !Queue.empty() || !Stack.empty();
- }
-
- void enqueue(const WorkListUnit& U) override {
- if (U.getNode()->getLocation().getAs<BlockEntrance>())
- Queue.push_front(U);
- else
- Stack.push_back(U);
- }
-
- WorkListUnit dequeue() override {
- // Process all basic blocks to completion.
- if (!Stack.empty()) {
- const WorkListUnit& U = Stack.back();
- Stack.pop_back(); // This technically "invalidates" U, but we are fine.
- return U;
- }
-
- assert(!Queue.empty());
- // Don't use const reference. The subsequent pop_back() might make it
- // unsafe.
- WorkListUnit U = Queue.front();
- Queue.pop_front();
- return U;
- }
- bool visitItemsInWorkList(Visitor &V) override {
- for (SmallVectorImpl<WorkListUnit>::iterator
- I = Stack.begin(), E = Stack.end(); I != E; ++I) {
- if (V.visit(*I))
- return true;
- }
- for (std::deque<WorkListUnit>::iterator
- I = Queue.begin(), E = Queue.end(); I != E; ++I) {
- if (V.visit(*I))
- return true;
- }
- return false;
- }
-
- };
-} // end anonymous namespace
-
-WorkList* WorkList::makeBFSBlockDFSContents() {
- return new BFSBlockDFSContents();
}
-//===----------------------------------------------------------------------===//
-// Core analysis engine.
-//===----------------------------------------------------------------------===//
+CoreEngine::CoreEngine(SubEngine &subengine, FunctionSummariesTy *FS,
+ AnalyzerOptions &Opts)
+ : SubEng(subengine), WList(generateWorkList(Opts)),
+ BCounterFactory(G.getAllocator()), FunctionSummaries(FS) {}
/// ExecuteWorkList - Run the worklist algorithm for a maximum number of steps.
bool CoreEngine::ExecuteWorkList(const LocationContext *L, unsigned Steps,
ProgramStateRef InitState) {
-
if (G.num_roots() == 0) { // Initialize the analysis by constructing
// the root if none exists.
const CFGBlock *Entry = &(L->getCFG()->getEntry());
- assert (Entry->empty() &&
- "Entry block must be empty.");
+ assert(Entry->empty() && "Entry block must be empty.");
- assert (Entry->succ_size() == 1 &&
- "Entry block must have 1 successor.");
+ assert(Entry->succ_size() == 1 && "Entry block must have 1 successor.");
// Mark the entry block as visited.
FunctionSummaries->markVisitedBasicBlock(Entry->getBlockID(),
@@ -195,7 +107,7 @@ bool CoreEngine::ExecuteWorkList(const LocationContext *L, unsigned Steps,
bool IsNew;
ExplodedNode *Node = G.getNode(StartLoc, InitState, false, &IsNew);
- assert (IsNew);
+ assert(IsNew);
G.addRoot(Node);
NodeBuilderContext BuilderCtx(*this, StartLoc.getDst(), Node);
@@ -251,13 +163,12 @@ void CoreEngine::dispatchWorkItem(ExplodedNode* Pred, ProgramPoint Loc,
break;
case ProgramPoint::BlockExitKind:
- assert (false && "BlockExit location never occur in forward analysis.");
+ assert(false && "BlockExit location never occur in forward analysis.");
break;
- case ProgramPoint::CallEnterKind: {
+ case ProgramPoint::CallEnterKind:
HandleCallEnter(Loc.castAs<CallEnter>(), Pred);
break;
- }
case ProgramPoint::CallExitBeginKind:
SubEng.processCallExit(Pred);
@@ -275,7 +186,8 @@ void CoreEngine::dispatchWorkItem(ExplodedNode* Pred, ProgramPoint Loc,
Loc.getAs<PostInitializer>() ||
Loc.getAs<PostImplicitCall>() ||
Loc.getAs<CallExitEnd>() ||
- Loc.getAs<LoopExit>());
+ Loc.getAs<LoopExit>() ||
+ Loc.getAs<PostAllocatorCall>());
HandlePostStmt(WU.getBlock(), WU.getIndex(), Pred);
break;
}
@@ -294,7 +206,6 @@ bool CoreEngine::ExecuteWorkListWithInitialState(const LocationContext *L,
}
void CoreEngine::HandleBlockEdge(const BlockEdge &L, ExplodedNode *Pred) {
-
const CFGBlock *Blk = L.getDst();
NodeBuilderContext BuilderCtx(*this, Blk, Pred);
@@ -306,18 +217,14 @@ void CoreEngine::HandleBlockEdge(const BlockEdge &L, ExplodedNode *Pred) {
// Check if we are entering the EXIT block.
if (Blk == &(L.getLocationContext()->getCFG()->getExit())) {
-
- assert (L.getLocationContext()->getCFG()->getExit().size() == 0
- && "EXIT block cannot contain Stmts.");
+ assert(L.getLocationContext()->getCFG()->getExit().empty() &&
+ "EXIT block cannot contain Stmts.");
// Get return statement..
const ReturnStmt *RS = nullptr;
if (!L.getSrc()->empty()) {
if (Optional<CFGStmt> LastStmt = L.getSrc()->back().getAs<CFGStmt>()) {
- if ((RS = dyn_cast<ReturnStmt>(LastStmt->getStmt()))) {
- if (!RS->getRetValue())
- RS = nullptr;
- }
+ RS = dyn_cast<ReturnStmt>(LastStmt->getStmt());
}
}
@@ -345,12 +252,11 @@ void CoreEngine::HandleBlockEdge(const BlockEdge &L, ExplodedNode *Pred) {
void CoreEngine::HandleBlockEntrance(const BlockEntrance &L,
ExplodedNode *Pred) {
-
// Increment the block counter.
const LocationContext *LC = Pred->getLocationContext();
unsigned BlockId = L.getBlock()->getBlockID();
BlockCounter Counter = WList->getBlockCounter();
- Counter = BCounterFactory.IncrementCount(Counter, LC->getCurrentStackFrame(),
+ Counter = BCounterFactory.IncrementCount(Counter, LC->getStackFrame(),
BlockId);
WList->setBlockCounter(Counter);
@@ -364,7 +270,6 @@ void CoreEngine::HandleBlockEntrance(const BlockEntrance &L,
}
void CoreEngine::HandleBlockExit(const CFGBlock * B, ExplodedNode *Pred) {
-
if (const Stmt *Term = B->getTerminator()) {
switch (Term->getStmtClass()) {
default:
@@ -397,7 +302,7 @@ void CoreEngine::HandleBlockExit(const CFGBlock * B, ExplodedNode *Pred) {
HandleBranch(cast<ChooseExpr>(Term)->getCond(), Term, B, Pred);
return;
- case Stmt::CXXTryStmtClass: {
+ case Stmt::CXXTryStmtClass:
// Generate a node for each of the successors.
// Our logic for EH analysis can certainly be improved.
for (CFGBlock::const_succ_iterator it = B->succ_begin(),
@@ -408,7 +313,6 @@ void CoreEngine::HandleBlockExit(const CFGBlock * B, ExplodedNode *Pred) {
}
}
return;
- }
case Stmt::DoStmtClass:
HandleBranch(cast<DoStmt>(Term)->getCond(), Term, B, Pred);
@@ -433,7 +337,7 @@ void CoreEngine::HandleBlockExit(const CFGBlock * B, ExplodedNode *Pred) {
case Stmt::IndirectGotoStmtClass: {
// Only 1 successor: the indirect goto dispatch block.
- assert (B->succ_size() == 1);
+ assert(B->succ_size() == 1);
IndirectGotoNodeBuilder
builder(Pred, B, cast<IndirectGotoStmt>(Term)->getTarget(),
@@ -443,7 +347,7 @@ void CoreEngine::HandleBlockExit(const CFGBlock * B, ExplodedNode *Pred) {
return;
}
- case Stmt::ObjCForCollectionStmtClass: {
+ case Stmt::ObjCForCollectionStmtClass:
// In the case of ObjCForCollectionStmt, it appears twice in a CFG:
//
// (1) inside a basic block, which represents the binding of the
@@ -456,7 +360,6 @@ void CoreEngine::HandleBlockExit(const CFGBlock * B, ExplodedNode *Pred) {
// contain nil elements.
HandleBranch(Term, Term, B, Pred);
return;
- }
case Stmt::SwitchStmtClass: {
SwitchNodeBuilder builder(Pred, B, cast<SwitchStmt>(Term)->getCond(),
@@ -472,8 +375,8 @@ void CoreEngine::HandleBlockExit(const CFGBlock * B, ExplodedNode *Pred) {
}
}
- assert (B->succ_size() == 1 &&
- "Blocks with no terminator should have at most 1 successor.");
+ assert(B->succ_size() == 1 &&
+ "Blocks with no terminator should have at most 1 successor.");
generateNode(BlockEdge(B, *(B->succ_begin()), Pred->getLocationContext()),
Pred->State, Pred);
@@ -518,9 +421,8 @@ void CoreEngine::HandleStaticInit(const DeclStmt *DS, const CFGBlock *B,
enqueue(Dst);
}
-
void CoreEngine::HandlePostStmt(const CFGBlock *B, unsigned StmtIdx,
- ExplodedNode *Pred) {
+ ExplodedNode *Pred) {
assert(B);
assert(!B->empty());
@@ -537,14 +439,13 @@ void CoreEngine::HandlePostStmt(const CFGBlock *B, unsigned StmtIdx,
void CoreEngine::generateNode(const ProgramPoint &Loc,
ProgramStateRef State,
ExplodedNode *Pred) {
-
bool IsNew;
ExplodedNode *Node = G.getNode(Loc, State, false, &IsNew);
if (Pred)
Node->addPredecessor(Pred, G); // Link 'Node' with its predecessor.
else {
- assert (IsNew);
+ assert(IsNew);
G.addRoot(Node); // 'Node' has no predecessor. Make it a root.
}
@@ -555,7 +456,7 @@ void CoreEngine::generateNode(const ProgramPoint &Loc,
void CoreEngine::enqueueStmtNode(ExplodedNode *N,
const CFGBlock *Block, unsigned Idx) {
assert(Block);
- assert (!N->isSink());
+ assert(!N->isSink());
// Check if this node entered a callee.
if (N->getLocation().getAs<CallEnter>()) {
@@ -605,8 +506,7 @@ void CoreEngine::enqueueStmtNode(ExplodedNode *N,
ExplodedNode *CoreEngine::generateCallExitBeginNode(ExplodedNode *N,
const ReturnStmt *RS) {
// Create a CallExitBegin node and enqueue it.
- const StackFrameContext *LocCtx
- = cast<StackFrameContext>(N->getLocationContext());
+ const auto *LocCtx = cast<StackFrameContext>(N->getLocationContext());
// Use the callee location context.
CallExitBegin Loc(LocCtx, RS);
@@ -617,40 +517,33 @@ ExplodedNode *CoreEngine::generateCallExitBeginNode(ExplodedNode *N,
return isNew ? Node : nullptr;
}
-
void CoreEngine::enqueue(ExplodedNodeSet &Set) {
- for (ExplodedNodeSet::iterator I = Set.begin(),
- E = Set.end(); I != E; ++I) {
- WList->enqueue(*I);
- }
+ for (const auto I : Set)
+ WList->enqueue(I);
}
void CoreEngine::enqueue(ExplodedNodeSet &Set,
const CFGBlock *Block, unsigned Idx) {
- for (ExplodedNodeSet::iterator I = Set.begin(),
- E = Set.end(); I != E; ++I) {
- enqueueStmtNode(*I, Block, Idx);
- }
+ for (const auto I : Set)
+ enqueueStmtNode(I, Block, Idx);
}
void CoreEngine::enqueueEndOfFunction(ExplodedNodeSet &Set, const ReturnStmt *RS) {
- for (ExplodedNodeSet::iterator I = Set.begin(), E = Set.end(); I != E; ++I) {
- ExplodedNode *N = *I;
+ for (auto I : Set) {
// If we are in an inlined call, generate CallExitBegin node.
- if (N->getLocationContext()->getParent()) {
- N = generateCallExitBeginNode(N, RS);
- if (N)
- WList->enqueue(N);
+ if (I->getLocationContext()->getParent()) {
+ I = generateCallExitBeginNode(I, RS);
+ if (I)
+ WList->enqueue(I);
} else {
// TODO: We should run remove dead bindings here.
- G.addEndOfPath(N);
+ G.addEndOfPath(I);
NumPathsExplored++;
}
}
}
-
-void NodeBuilder::anchor() { }
+void NodeBuilder::anchor() {}
ExplodedNode* NodeBuilder::generateNodeImpl(const ProgramPoint &Loc,
ProgramStateRef State,
@@ -671,16 +564,15 @@ ExplodedNode* NodeBuilder::generateNodeImpl(const ProgramPoint &Loc,
return N;
}
-void NodeBuilderWithSinks::anchor() { }
+void NodeBuilderWithSinks::anchor() {}
StmtNodeBuilder::~StmtNodeBuilder() {
if (EnclosingBldr)
- for (ExplodedNodeSet::iterator I = Frontier.begin(),
- E = Frontier.end(); I != E; ++I )
- EnclosingBldr->addNodes(*I);
+ for (const auto I : Frontier)
+ EnclosingBldr->addNodes(I);
}
-void BranchNodeBuilder::anchor() { }
+void BranchNodeBuilder::anchor() {}
ExplodedNode *BranchNodeBuilder::generateNode(ProgramStateRef State,
bool branch,
@@ -714,11 +606,9 @@ IndirectGotoNodeBuilder::generateNode(const iterator &I,
return Succ;
}
-
ExplodedNode*
SwitchNodeBuilder::generateCaseStmtNode(const iterator &I,
ProgramStateRef St) {
-
bool IsNew;
ExplodedNode *Succ =
Eng.G.getNode(BlockEdge(Src, I.getBlock(), Pred->getLocationContext()),
@@ -731,7 +621,6 @@ SwitchNodeBuilder::generateCaseStmtNode(const iterator &I,
return Succ;
}
-
ExplodedNode*
SwitchNodeBuilder::generateDefaultCaseNode(ProgramStateRef St,
bool IsSink) {
diff --git a/lib/StaticAnalyzer/Core/DynamicTypeMap.cpp b/lib/StaticAnalyzer/Core/DynamicTypeMap.cpp
index a01ff36a8aae..530933916889 100644
--- a/lib/StaticAnalyzer/Core/DynamicTypeMap.cpp
+++ b/lib/StaticAnalyzer/Core/DynamicTypeMap.cpp
@@ -1,4 +1,4 @@
-//==- DynamicTypeMap.cpp - Dynamic Type Info related APIs ----------*- C++ -*-//
+//===- DynamicTypeMap.cpp - Dynamic Type Info related APIs ----------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -14,6 +14,13 @@
//===----------------------------------------------------------------------===//
#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicTypeMap.h"
+#include "clang/Basic/LLVM.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SymExpr.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cassert>
namespace clang {
namespace ento {
@@ -28,15 +35,15 @@ DynamicTypeInfo getDynamicTypeInfo(ProgramStateRef State,
return *GDMType;
// Otherwise, fall back to what we know about the region.
- if (const TypedRegion *TR = dyn_cast<TypedRegion>(Reg))
+ if (const auto *TR = dyn_cast<TypedRegion>(Reg))
return DynamicTypeInfo(TR->getLocationType(), /*CanBeSubclass=*/false);
- if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(Reg)) {
+ if (const auto *SR = dyn_cast<SymbolicRegion>(Reg)) {
SymbolRef Sym = SR->getSymbol();
return DynamicTypeInfo(Sym->getType());
}
- return DynamicTypeInfo();
+ return {};
}
ProgramStateRef setDynamicTypeInfo(ProgramStateRef State, const MemRegion *Reg,
@@ -47,5 +54,28 @@ ProgramStateRef setDynamicTypeInfo(ProgramStateRef State, const MemRegion *Reg,
return NewState;
}
+void printDynamicTypeInfo(ProgramStateRef State, raw_ostream &Out,
+ const char *NL, const char *Sep) {
+ bool First = true;
+ for (const auto &I : State->get<DynamicTypeMap>()) {
+ if (First) {
+ Out << NL << "Dynamic types of regions:" << NL;
+ First = false;
+ }
+ const MemRegion *MR = I.first;
+ const DynamicTypeInfo &DTI = I.second;
+ Out << MR << " : ";
+ if (DTI.isValid()) {
+ Out << DTI.getType()->getPointeeType().getAsString();
+ if (DTI.canBeASubClass()) {
+ Out << " (or its subclass)";
+ }
+ } else {
+ Out << "Invalid type info";
+ }
+ Out << NL;
+ }
+}
+
} // namespace ento
} // namespace clang
diff --git a/lib/StaticAnalyzer/Core/Environment.cpp b/lib/StaticAnalyzer/Core/Environment.cpp
index c6acb9d1851c..eccaee292c40 100644
--- a/lib/StaticAnalyzer/Core/Environment.cpp
+++ b/lib/StaticAnalyzer/Core/Environment.cpp
@@ -1,4 +1,4 @@
-//== Environment.cpp - Map from Stmt* to Locations/Values -------*- C++ -*--==//
+//===- Environment.cpp - Map from Stmt* to Locations/Values ---------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -11,12 +11,25 @@
//
//===----------------------------------------------------------------------===//
+#include "clang/StaticAnalyzer/Core/PathSensitive/Environment.h"
+#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
-#include "clang/AST/ExprObjC.h"
+#include "clang/AST/PrettyPrinter.h"
+#include "clang/AST/Stmt.h"
#include "clang/Analysis/AnalysisDeclContext.h"
-#include "clang/Analysis/CFG.h"
+#include "clang/Basic/LLVM.h"
+#include "clang/Basic/LangOptions.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SymExpr.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h"
+#include "llvm/ADT/ImmutableMap.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
+#include <cassert>
using namespace clang;
using namespace ento;
@@ -46,16 +59,16 @@ static const Expr *ignoreTransparentExprs(const Expr *E) {
}
static const Stmt *ignoreTransparentExprs(const Stmt *S) {
- if (const Expr *E = dyn_cast<Expr>(S))
+ if (const auto *E = dyn_cast<Expr>(S))
return ignoreTransparentExprs(E);
return S;
}
EnvironmentEntry::EnvironmentEntry(const Stmt *S, const LocationContext *L)
- : std::pair<const Stmt *,
- const StackFrameContext *>(ignoreTransparentExprs(S),
- L ? L->getCurrentStackFrame()
- : nullptr) {}
+ : std::pair<const Stmt *,
+ const StackFrameContext *>(ignoreTransparentExprs(S),
+ L ? L->getStackFrame()
+ : nullptr) {}
SVal Environment::lookupExpr(const EnvironmentEntry &E) const {
const SVal* X = ExprBindings.lookup(E);
@@ -95,7 +108,7 @@ SVal Environment::getSVal(const EnvironmentEntry &Entry,
return svalBuilder.getConstantVal(cast<Expr>(S)).getValue();
case Stmt::ReturnStmtClass: {
- const ReturnStmt *RS = cast<ReturnStmt>(S);
+ const auto *RS = cast<ReturnStmt>(S);
if (const Expr *RE = RS->getRetValue())
return getSVal(EnvironmentEntry(RE, LCtx), svalBuilder);
return UndefinedVal();
@@ -121,20 +134,25 @@ Environment EnvironmentManager::bindExpr(Environment Env,
}
namespace {
+
class MarkLiveCallback final : public SymbolVisitor {
SymbolReaper &SymReaper;
+
public:
MarkLiveCallback(SymbolReaper &symreaper) : SymReaper(symreaper) {}
+
bool VisitSymbol(SymbolRef sym) override {
SymReaper.markLive(sym);
return true;
}
+
bool VisitMemRegion(const MemRegion *R) override {
SymReaper.markLive(R);
return true;
}
};
-} // end anonymous namespace
+
+} // namespace
// removeDeadBindings:
// - Remove subexpression bindings.
@@ -147,7 +165,6 @@ Environment
EnvironmentManager::removeDeadBindings(Environment Env,
SymbolReaper &SymReaper,
ProgramStateRef ST) {
-
// We construct a new Environment object entirely, as this is cheaper than
// individually removing all the subexpression bindings (which will greatly
// outnumber block-level expression bindings).
@@ -156,14 +173,13 @@ EnvironmentManager::removeDeadBindings(Environment Env,
MarkLiveCallback CB(SymReaper);
ScanReachableSymbols RSScaner(ST, CB);
- llvm::ImmutableMapRef<EnvironmentEntry,SVal>
+ llvm::ImmutableMapRef<EnvironmentEntry, SVal>
EBMapRef(NewEnv.ExprBindings.getRootWithoutRetain(),
F.getTreeFactory());
// Iterate over the block-expr bindings.
for (Environment::iterator I = Env.begin(), E = Env.end();
I != E; ++I) {
-
const EnvironmentEntry &BlkExpr = I.getKey();
const SVal &X = I.getData();
@@ -186,28 +202,41 @@ EnvironmentManager::removeDeadBindings(Environment Env,
}
void Environment::print(raw_ostream &Out, const char *NL,
- const char *Sep) const {
- bool isFirst = true;
+ const char *Sep, const LocationContext *WithLC) const {
+ if (ExprBindings.isEmpty())
+ return;
+
+ if (!WithLC) {
+ // Find the freshest location context.
+ llvm::SmallPtrSet<const LocationContext *, 16> FoundContexts;
+ for (auto I : *this) {
+ const LocationContext *LC = I.first.getLocationContext();
+ if (FoundContexts.count(LC) == 0) {
+ // This context is fresher than all other contexts so far.
+ WithLC = LC;
+ for (const LocationContext *LCI = LC; LCI; LCI = LCI->getParent())
+ FoundContexts.insert(LCI);
+ }
+ }
+ }
- for (Environment::iterator I = begin(), E = end(); I != E; ++I) {
- const EnvironmentEntry &En = I.getKey();
+ assert(WithLC);
- if (isFirst) {
- Out << NL << NL
- << "Expressions:"
- << NL;
- isFirst = false;
- } else {
- Out << NL;
- }
+ LangOptions LO; // FIXME.
+ PrintingPolicy PP(LO);
- const Stmt *S = En.getStmt();
- assert(S != nullptr && "Expected non-null Stmt");
+ Out << NL << NL << "Expressions by stack frame:" << NL;
+ WithLC->dumpStack(Out, "", NL, Sep, [&](const LocationContext *LC) {
+ for (auto I : ExprBindings) {
+ if (I.first.getLocationContext() != LC)
+ continue;
- Out << " (" << (const void*) En.getLocationContext() << ','
- << (const void*) S << ") ";
- LangOptions LO; // FIXME.
- S->printPretty(Out, nullptr, PrintingPolicy(LO));
- Out << " : " << I.getData();
- }
+ const Stmt *S = I.first.getStmt();
+ assert(S != nullptr && "Expected non-null Stmt");
+
+ Out << "(" << (const void *)LC << ',' << (const void *)S << ") ";
+ S->printPretty(Out, nullptr, PP);
+ Out << " : " << I.second << NL;
+ }
+ });
}
diff --git a/lib/StaticAnalyzer/Core/ExplodedGraph.cpp b/lib/StaticAnalyzer/Core/ExplodedGraph.cpp
index 3bc8e09333b9..ece103d9d09a 100644
--- a/lib/StaticAnalyzer/Core/ExplodedGraph.cpp
+++ b/lib/StaticAnalyzer/Core/ExplodedGraph.cpp
@@ -1,4 +1,4 @@
-//=-- ExplodedGraph.cpp - Local, Path-Sens. "Exploded Graph" -*- C++ -*------=//
+//===- ExplodedGraph.cpp - Local, Path-Sens. "Exploded Graph" -------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -13,13 +13,24 @@
//===----------------------------------------------------------------------===//
#include "clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprObjC.h"
#include "clang/AST/ParentMap.h"
#include "clang/AST/Stmt.h"
+#include "clang/Analysis/ProgramPoint.h"
+#include "clang/Analysis/Support/BumpVector.h"
+#include "clang/Basic/LLVM.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState_Fwd.h"
#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/PointerUnion.h"
#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/Statistic.h"
+#include "llvm/Support/Casting.h"
+#include <cassert>
+#include <memory>
using namespace clang;
using namespace ento;
@@ -29,7 +40,7 @@ using namespace ento;
//===----------------------------------------------------------------------===//
// An out of line virtual method to provide a home for the class vtable.
-ExplodedNode::Auditor::~Auditor() {}
+ExplodedNode::Auditor::~Auditor() = default;
#ifndef NDEBUG
static ExplodedNode::Auditor* NodeAuditor = nullptr;
@@ -45,10 +56,9 @@ void ExplodedNode::SetAuditor(ExplodedNode::Auditor* A) {
// Cleanup.
//===----------------------------------------------------------------------===//
-ExplodedGraph::ExplodedGraph()
- : NumNodes(0), ReclaimNodeInterval(0) {}
+ExplodedGraph::ExplodedGraph() = default;
-ExplodedGraph::~ExplodedGraph() {}
+ExplodedGraph::~ExplodedGraph() = default;
//===----------------------------------------------------------------------===//
// Node reclamation.
@@ -187,12 +197,9 @@ void ExplodedGraph::reclaimRecentlyAllocatedNodes() {
return;
ReclaimCounter = ReclaimNodeInterval;
- for (NodeVector::iterator it = ChangedNodes.begin(), et = ChangedNodes.end();
- it != et; ++it) {
- ExplodedNode *node = *it;
+ for (const auto node : ChangedNodes)
if (shouldCollect(node))
collectNode(node);
- }
ChangedNodes.clear();
}
@@ -210,11 +217,11 @@ void ExplodedGraph::reclaimRecentlyAllocatedNodes() {
// 2. The group is empty, in which case the storage value is null.
// 3. The group contains a single node.
// 4. The group contains more than one node.
-typedef BumpVector<ExplodedNode *> ExplodedNodeVector;
-typedef llvm::PointerUnion<ExplodedNode *, ExplodedNodeVector *> GroupStorage;
+using ExplodedNodeVector = BumpVector<ExplodedNode *>;
+using GroupStorage = llvm::PointerUnion<ExplodedNode *, ExplodedNodeVector *>;
void ExplodedNode::addPredecessor(ExplodedNode *V, ExplodedGraph &G) {
- assert (!V->isSink());
+ assert(!V->isSink());
Preds.addNode(V, G);
V->Succs.addNode(this, G);
#ifndef NDEBUG
@@ -346,25 +353,22 @@ std::unique_ptr<ExplodedGraph>
ExplodedGraph::trim(ArrayRef<const NodeTy *> Sinks,
InterExplodedGraphMap *ForwardMap,
InterExplodedGraphMap *InverseMap) const {
-
if (Nodes.empty())
return nullptr;
- typedef llvm::DenseSet<const ExplodedNode*> Pass1Ty;
+ using Pass1Ty = llvm::DenseSet<const ExplodedNode *>;
Pass1Ty Pass1;
- typedef InterExplodedGraphMap Pass2Ty;
+ using Pass2Ty = InterExplodedGraphMap;
InterExplodedGraphMap Pass2Scratch;
Pass2Ty &Pass2 = ForwardMap ? *ForwardMap : Pass2Scratch;
SmallVector<const ExplodedNode*, 10> WL1, WL2;
// ===- Pass 1 (reverse DFS) -===
- for (ArrayRef<const NodeTy *>::iterator I = Sinks.begin(), E = Sinks.end();
- I != E; ++I) {
- if (*I)
- WL1.push_back(*I);
- }
+ for (const auto Sink : Sinks)
+ if (Sink)
+ WL1.push_back(Sink);
// Process the first worklist until it is empty.
while (!WL1.empty()) {
@@ -445,4 +449,3 @@ ExplodedGraph::trim(ArrayRef<const NodeTy *> Sinks,
return G;
}
-
diff --git a/lib/StaticAnalyzer/Core/ExprEngine.cpp b/lib/StaticAnalyzer/Core/ExprEngine.cpp
index 3be37e7ae301..188316c096e3 100644
--- a/lib/StaticAnalyzer/Core/ExprEngine.cpp
+++ b/lib/StaticAnalyzer/Core/ExprEngine.cpp
@@ -1,4 +1,4 @@
-//=-- ExprEngine.cpp - Path-Sensitive Expression-Level Dataflow ---*- C++ -*-=
+//===- ExprEngine.cpp - Path-Sensitive Expression-Level Dataflow ----------===//
//
// The LLVM Compiler Infrastructure
//
@@ -15,31 +15,75 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
#include "PrettyStackTraceLocationContext.h"
-#include "clang/AST/CharUnits.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclBase.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
#include "clang/AST/ParentMap.h"
-#include "clang/Analysis/CFGStmtMap.h"
+#include "clang/AST/PrettyPrinter.h"
+#include "clang/AST/Stmt.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/StmtObjC.h"
-#include "clang/Basic/Builtins.h"
+#include "clang/AST/Type.h"
+#include "clang/Analysis/AnalysisDeclContext.h"
+#include "clang/Analysis/CFG.h"
+#include "clang/Analysis/ConstructionContext.h"
+#include "clang/Analysis/ProgramPoint.h"
+#include "clang/Basic/IdentifierTable.h"
+#include "clang/Basic/LLVM.h"
+#include "clang/Basic/LangOptions.h"
#include "clang/Basic/PrettyStackTrace.h"
+#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/Specifiers.h"
+#include "clang/StaticAnalyzer/Core/AnalyzerOptions.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/LoopWidening.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ConstraintManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CoreEngine.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/LoopUnrolling.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/LoopWidening.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState_Fwd.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/Store.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SymExpr.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h"
+#include "llvm/ADT/APSInt.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/ImmutableMap.h"
+#include "llvm/ADT/ImmutableSet.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/DOTGraphTraits.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/GraphWriter.h"
#include "llvm/Support/SaveAndRestore.h"
#include "llvm/Support/raw_ostream.h"
-
-#ifndef NDEBUG
-#include "llvm/Support/GraphWriter.h"
-#endif
+#include <cassert>
+#include <cstdint>
+#include <memory>
+#include <string>
+#include <tuple>
+#include <utility>
+#include <vector>
using namespace clang;
using namespace ento;
-using llvm::APSInt;
#define DEBUG_TYPE "ExprEngine"
@@ -54,14 +98,100 @@ STATISTIC(NumMaxBlockCountReachedInInlined,
STATISTIC(NumTimesRetriedWithoutInlining,
"The # of times we re-evaluated a call without inlining");
-typedef std::pair<const CXXBindTemporaryExpr *, const StackFrameContext *>
- CXXBindTemporaryContext;
-// Keeps track of whether CXXBindTemporaryExpr nodes have been evaluated.
-// The StackFrameContext assures that nested calls due to inlined recursive
-// functions do not interfere.
-REGISTER_TRAIT_WITH_PROGRAMSTATE(InitializedTemporariesSet,
- llvm::ImmutableSet<CXXBindTemporaryContext>)
+//===----------------------------------------------------------------------===//
+// Internal program state traits.
+//===----------------------------------------------------------------------===//
+
+// When modeling a C++ constructor, for a variety of reasons we need to track
+// the location of the object for the duration of its ConstructionContext.
+// ObjectsUnderConstruction maps statements within the construction context
+// to the object's location, so that on every such statement the location
+// could have been retrieved.
+
+/// ConstructedObjectKey is used for being able to find the path-sensitive
+/// memory region of a freshly constructed object while modeling the AST node
+/// that syntactically represents the object that is being constructed.
+/// Semantics of such nodes may sometimes require access to the region that's
+/// not otherwise present in the program state, or to the very fact that
+/// the construction context was present and contained references to these
+/// AST nodes.
+class ConstructedObjectKey {
+ typedef std::pair<
+ llvm::PointerUnion<const Stmt *, const CXXCtorInitializer *>,
+ const LocationContext *> ConstructedObjectKeyImpl;
+
+ ConstructedObjectKeyImpl Impl;
+
+ const void *getAnyASTNodePtr() const {
+ if (const Stmt *S = getStmt())
+ return S;
+ else
+ return getCXXCtorInitializer();
+ }
+
+public:
+ ConstructedObjectKey(
+ llvm::PointerUnion<const Stmt *, const CXXCtorInitializer *> P,
+ const LocationContext *LC)
+ : Impl(P, LC) {
+ // This is the full list of statements that require additional actions when
+ // encountered. This list may be expanded when new actions are implemented.
+ assert(getCXXCtorInitializer() || isa<DeclStmt>(getStmt()) ||
+ isa<CXXNewExpr>(getStmt()) || isa<CXXBindTemporaryExpr>(getStmt()) ||
+ isa<MaterializeTemporaryExpr>(getStmt()) ||
+ isa<CXXConstructExpr>(getStmt()));
+ }
+
+ const Stmt *getStmt() const {
+ return Impl.first.dyn_cast<const Stmt *>();
+ }
+
+ const CXXCtorInitializer *getCXXCtorInitializer() const {
+ return Impl.first.dyn_cast<const CXXCtorInitializer *>();
+ }
+
+ const LocationContext *getLocationContext() const {
+ return Impl.second;
+ }
+
+ void print(llvm::raw_ostream &OS, PrinterHelper *Helper, PrintingPolicy &PP) {
+ OS << '(' << getLocationContext() << ',' << getAnyASTNodePtr() << ") ";
+ if (const Stmt *S = getStmt()) {
+ S->printPretty(OS, Helper, PP);
+ } else {
+ const CXXCtorInitializer *I = getCXXCtorInitializer();
+ OS << I->getAnyMember()->getNameAsString();
+ }
+ }
+
+ void Profile(llvm::FoldingSetNodeID &ID) const {
+ ID.AddPointer(Impl.first.getOpaqueValue());
+ ID.AddPointer(Impl.second);
+ }
+
+ bool operator==(const ConstructedObjectKey &RHS) const {
+ return Impl == RHS.Impl;
+ }
+
+ bool operator<(const ConstructedObjectKey &RHS) const {
+ return Impl < RHS.Impl;
+ }
+};
+
+typedef llvm::ImmutableMap<ConstructedObjectKey, SVal>
+ ObjectsUnderConstructionMap;
+REGISTER_TRAIT_WITH_PROGRAMSTATE(ObjectsUnderConstruction,
+ ObjectsUnderConstructionMap)
+
+// Additionally, track a set of destructors that correspond to elided
+// constructors when copy elision occurs.
+typedef std::pair<const CXXBindTemporaryExpr *, const LocationContext *>
+ ElidedDestructorItem;
+typedef llvm::ImmutableSet<ElidedDestructorItem>
+ ElidedDestructorSet;
+REGISTER_TRAIT_WITH_PROGRAMSTATE(ElidedDestructors,
+ ElidedDestructorSet)
//===----------------------------------------------------------------------===//
// Engine construction and deletion.
@@ -69,25 +199,21 @@ REGISTER_TRAIT_WITH_PROGRAMSTATE(InitializedTemporariesSet,
static const char* TagProviderName = "ExprEngine";
-ExprEngine::ExprEngine(AnalysisManager &mgr, bool gcEnabled,
+ExprEngine::ExprEngine(cross_tu::CrossTranslationUnitContext &CTU,
+ AnalysisManager &mgr, bool gcEnabled,
SetOfConstDecls *VisitedCalleesIn,
FunctionSummariesTy *FS,
InliningModes HowToInlineIn)
- : AMgr(mgr),
- AnalysisDeclContexts(mgr.getAnalysisDeclContextManager()),
- Engine(*this, FS),
- G(Engine.getGraph()),
- StateMgr(getContext(), mgr.getStoreManagerCreator(),
- mgr.getConstraintManagerCreator(), G.getAllocator(),
- this),
- SymMgr(StateMgr.getSymbolManager()),
- svalBuilder(StateMgr.getSValBuilder()),
- currStmtIdx(0), currBldrCtx(nullptr),
- ObjCNoRet(mgr.getASTContext()),
- ObjCGCEnabled(gcEnabled), BR(mgr, *this),
- VisitedCallees(VisitedCalleesIn),
- HowToInline(HowToInlineIn)
-{
+ : CTU(CTU), AMgr(mgr),
+ AnalysisDeclContexts(mgr.getAnalysisDeclContextManager()),
+ Engine(*this, FS, mgr.getAnalyzerOptions()), G(Engine.getGraph()),
+ StateMgr(getContext(), mgr.getStoreManagerCreator(),
+ mgr.getConstraintManagerCreator(), G.getAllocator(),
+ this),
+ SymMgr(StateMgr.getSymbolManager()),
+ svalBuilder(StateMgr.getSValBuilder()), ObjCNoRet(mgr.getASTContext()),
+ ObjCGCEnabled(gcEnabled), BR(mgr, *this),
+ VisitedCallees(VisitedCalleesIn), HowToInline(HowToInlineIn) {
unsigned TrimInterval = mgr.options.getGraphTrimInterval();
if (TrimInterval != 0) {
// Enable eager node reclaimation when constructing the ExplodedGraph.
@@ -111,8 +237,7 @@ ProgramStateRef ExprEngine::getInitialState(const LocationContext *InitLoc) {
// FIXME: It would be nice if we had a more general mechanism to add
// such preconditions. Some day.
do {
-
- if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
// Precondition: the first argument of 'main' is an integer guaranteed
// to be > 0.
const IdentifierInfo *II = FD->getIdentifier();
@@ -121,7 +246,7 @@ ProgramStateRef ExprEngine::getInitialState(const LocationContext *InitLoc) {
const ParmVarDecl *PD = FD->getParamDecl(0);
QualType T = PD->getType();
- const BuiltinType *BT = dyn_cast<BuiltinType>(T);
+ const auto *BT = dyn_cast<BuiltinType>(T);
if (!BT || !BT->isInteger())
break;
@@ -145,9 +270,9 @@ ProgramStateRef ExprEngine::getInitialState(const LocationContext *InitLoc) {
}
break;
}
- while (0);
+ while (false);
- if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) {
+ if (const auto *MD = dyn_cast<ObjCMethodDecl>(D)) {
// Precondition: 'self' is always non-null upon entry to an Objective-C
// method.
const ImplicitParamDecl *SelfD = MD->getSelfDecl();
@@ -161,12 +286,12 @@ ProgramStateRef ExprEngine::getInitialState(const LocationContext *InitLoc) {
}
}
- if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(D)) {
+ if (const auto *MD = dyn_cast<CXXMethodDecl>(D)) {
if (!MD->isStatic()) {
// Precondition: 'this' is always non-null upon entry to the
// top-level function. This is our starting assumption for
// analyzing an "open" program.
- const StackFrameContext *SFC = InitLoc->getCurrentStackFrame();
+ const StackFrameContext *SFC = InitLoc->getStackFrame();
if (SFC->getParent() == nullptr) {
loc::MemRegionVal L = svalBuilder.getCXXThis(MD, SFC);
SVal V = state->getSVal(L);
@@ -237,17 +362,30 @@ ExprEngine::createTemporaryRegionIfNeeded(ProgramStateRef State,
const Expr *Init = InitWithAdjustments->skipRValueSubobjectAdjustments(
CommaLHSs, Adjustments);
+ // Take the region for Init, i.e. for the whole object. If we do not remember
+ // the region in which the object originally was constructed, come up with
+ // a new temporary region out of thin air and copy the contents of the object
+ // (which are currently present in the Environment, because Init is an rvalue)
+ // into that region. This is not correct, but it is better than nothing.
const TypedValueRegion *TR = nullptr;
- if (const MaterializeTemporaryExpr *MT =
- dyn_cast<MaterializeTemporaryExpr>(Result)) {
- StorageDuration SD = MT->getStorageDuration();
- // If this object is bound to a reference with static storage duration, we
- // put it in a different region to prevent "address leakage" warnings.
- if (SD == SD_Static || SD == SD_Thread)
- TR = MRMgr.getCXXStaticTempObjectRegion(Init);
- }
- if (!TR)
+ if (const auto *MT = dyn_cast<MaterializeTemporaryExpr>(Result)) {
+ if (Optional<SVal> V = getObjectUnderConstruction(State, MT, LC)) {
+ State = finishObjectConstruction(State, MT, LC);
+ State = State->BindExpr(Result, LC, *V);
+ return State;
+ } else {
+ StorageDuration SD = MT->getStorageDuration();
+ // If this object is bound to a reference with static storage duration, we
+ // put it in a different region to prevent "address leakage" warnings.
+ if (SD == SD_Static || SD == SD_Thread) {
+ TR = MRMgr.getCXXStaticTempObjectRegion(Init);
+ } else {
+ TR = MRMgr.getCXXTempObjectRegion(Init, LC);
+ }
+ }
+ } else {
TR = MRMgr.getCXXTempObjectRegion(Init, LC);
+ }
SVal Reg = loc::MemRegionVal(TR);
SVal BaseReg = Reg;
@@ -264,7 +402,9 @@ ExprEngine::createTemporaryRegionIfNeeded(ProgramStateRef State,
break;
case SubobjectAdjustment::MemberPointerAdjustment:
// FIXME: Unimplemented.
- State = State->bindDefault(Reg, UnknownVal(), LC);
+ State = State->invalidateRegions(Reg, InitWithAdjustments,
+ currBldrCtx->blockCount(), LC, true,
+ nullptr, nullptr, nullptr);
return State;
}
}
@@ -283,7 +423,8 @@ ExprEngine::createTemporaryRegionIfNeeded(ProgramStateRef State,
currBldrCtx->blockCount());
State = State->bindLoc(BaseReg.castAs<Loc>(), InitVal, LC, false);
- // Then we'd need to take the value that certainly exists and bind it over.
+ // Then we'd need to take the value that certainly exists and bind it
+ // over.
if (InitValWithAdjustments.isUnknown()) {
// Try to recover some path sensitivity in case we couldn't
// compute the value.
@@ -308,6 +449,79 @@ ExprEngine::createTemporaryRegionIfNeeded(ProgramStateRef State,
return State;
}
+ProgramStateRef ExprEngine::addObjectUnderConstruction(
+ ProgramStateRef State,
+ llvm::PointerUnion<const Stmt *, const CXXCtorInitializer *> P,
+ const LocationContext *LC, SVal V) {
+ ConstructedObjectKey Key(P, LC->getStackFrame());
+ // FIXME: Currently the state might already contain the marker due to
+ // incorrect handling of temporaries bound to default parameters.
+ assert(!State->get<ObjectsUnderConstruction>(Key) ||
+ isa<CXXBindTemporaryExpr>(Key.getStmt()));
+ return State->set<ObjectsUnderConstruction>(Key, V);
+}
+
+Optional<SVal> ExprEngine::getObjectUnderConstruction(
+ ProgramStateRef State,
+ llvm::PointerUnion<const Stmt *, const CXXCtorInitializer *> P,
+ const LocationContext *LC) {
+ ConstructedObjectKey Key(P, LC->getStackFrame());
+ return Optional<SVal>::create(State->get<ObjectsUnderConstruction>(Key));
+}
+
+ProgramStateRef ExprEngine::finishObjectConstruction(
+ ProgramStateRef State,
+ llvm::PointerUnion<const Stmt *, const CXXCtorInitializer *> P,
+ const LocationContext *LC) {
+ ConstructedObjectKey Key(P, LC->getStackFrame());
+ assert(State->contains<ObjectsUnderConstruction>(Key));
+ return State->remove<ObjectsUnderConstruction>(Key);
+}
+
+ProgramStateRef ExprEngine::elideDestructor(ProgramStateRef State,
+ const CXXBindTemporaryExpr *BTE,
+ const LocationContext *LC) {
+ ElidedDestructorItem I(BTE, LC);
+ assert(!State->contains<ElidedDestructors>(I));
+ return State->add<ElidedDestructors>(I);
+}
+
+ProgramStateRef
+ExprEngine::cleanupElidedDestructor(ProgramStateRef State,
+ const CXXBindTemporaryExpr *BTE,
+ const LocationContext *LC) {
+ ElidedDestructorItem I(BTE, LC);
+ assert(State->contains<ElidedDestructors>(I));
+ return State->remove<ElidedDestructors>(I);
+}
+
+bool ExprEngine::isDestructorElided(ProgramStateRef State,
+ const CXXBindTemporaryExpr *BTE,
+ const LocationContext *LC) {
+ ElidedDestructorItem I(BTE, LC);
+ return State->contains<ElidedDestructors>(I);
+}
+
+bool ExprEngine::areAllObjectsFullyConstructed(ProgramStateRef State,
+ const LocationContext *FromLC,
+ const LocationContext *ToLC) {
+ const LocationContext *LC = FromLC;
+ while (LC != ToLC) {
+ assert(LC && "ToLC must be a parent of FromLC!");
+ for (auto I : State->get<ObjectsUnderConstruction>())
+ if (I.first.getLocationContext() == LC)
+ return false;
+
+ for (auto I: State->get<ElidedDestructors>())
+ if (I.second == LC)
+ return false;
+
+ LC = LC->getParent();
+ }
+ return true;
+}
+
+
//===----------------------------------------------------------------------===//
// Top-level transfer function logic (Dispatcher).
//===----------------------------------------------------------------------===//
@@ -331,8 +545,44 @@ ExprEngine::processRegionChanges(ProgramStateRef state,
LCtx, Call);
}
+static void printObjectsUnderConstructionForContext(raw_ostream &Out,
+ ProgramStateRef State,
+ const char *NL,
+ const char *Sep,
+ const LocationContext *LC) {
+ PrintingPolicy PP =
+ LC->getAnalysisDeclContext()->getASTContext().getPrintingPolicy();
+ for (auto I : State->get<ObjectsUnderConstruction>()) {
+ ConstructedObjectKey Key = I.first;
+ SVal Value = I.second;
+ if (Key.getLocationContext() != LC)
+ continue;
+ Key.print(Out, nullptr, PP);
+ Out << " : " << Value << NL;
+ }
+
+ for (auto I : State->get<ElidedDestructors>()) {
+ if (I.second != LC)
+ continue;
+ Out << '(' << I.second << ',' << (const void *)I.first << ") ";
+ I.first->printPretty(Out, nullptr, PP);
+ Out << " : (constructor elided)" << NL;
+ }
+}
+
void ExprEngine::printState(raw_ostream &Out, ProgramStateRef State,
- const char *NL, const char *Sep) {
+ const char *NL, const char *Sep,
+ const LocationContext *LCtx) {
+ if (LCtx) {
+ if (!State->get<ObjectsUnderConstruction>().isEmpty()) {
+ Out << Sep << "Objects under construction:" << NL;
+
+ LCtx->dumpStack(Out, "", NL, Sep, [&](const LocationContext *LC) {
+ printObjectsUnderConstructionForContext(Out, State, NL, Sep, LC);
+ });
+ }
+ }
+
getCheckerManager().runCheckersForPrintState(Out, State, NL, Sep);
}
@@ -348,10 +598,12 @@ void ExprEngine::processCFGElement(const CFGElement E, ExplodedNode *Pred,
switch (E.getKind()) {
case CFGElement::Statement:
- ProcessStmt(const_cast<Stmt*>(E.castAs<CFGStmt>().getStmt()), Pred);
+ case CFGElement::Constructor:
+ case CFGElement::CXXRecordTypedCall:
+ ProcessStmt(E.castAs<CFGStmt>().getStmt(), Pred);
return;
case CFGElement::Initializer:
- ProcessInitializer(E.castAs<CFGInitializer>().getInitializer(), Pred);
+ ProcessInitializer(E.castAs<CFGInitializer>(), Pred);
return;
case CFGElement::NewAllocator:
ProcessNewAllocator(E.castAs<CFGNewAllocator>().getAllocatorExpr(),
@@ -368,15 +620,16 @@ void ExprEngine::processCFGElement(const CFGElement E, ExplodedNode *Pred,
ProcessLoopExit(E.castAs<CFGLoopExit>().getLoopStmt(), Pred);
return;
case CFGElement::LifetimeEnds:
+ case CFGElement::ScopeBegin:
+ case CFGElement::ScopeEnd:
return;
}
}
static bool shouldRemoveDeadBindings(AnalysisManager &AMgr,
- const CFGStmt S,
+ const Stmt *S,
const ExplodedNode *Pred,
const LocationContext *LC) {
-
// Are we never purging state values?
if (AMgr.options.AnalysisPurgeOpt == PurgeNone)
return false;
@@ -386,17 +639,17 @@ static bool shouldRemoveDeadBindings(AnalysisManager &AMgr,
return true;
// Is this on a non-expression?
- if (!isa<Expr>(S.getStmt()))
+ if (!isa<Expr>(S))
return true;
// Run before processing a call.
- if (CallEvent::isCallStmt(S.getStmt()))
+ if (CallEvent::isCallStmt(S))
return true;
// Is this an expression that is consumed by another expression? If so,
// postpone cleaning out the state.
ParentMap &PM = LC->getAnalysisDeclContext()->getParentMap();
- return !PM.isConsumedExpr(cast<Expr>(S.getStmt()));
+ return !PM.isConsumedExpr(cast<Expr>(S));
}
void ExprEngine::removeDead(ExplodedNode *Pred, ExplodedNodeSet &Out,
@@ -426,9 +679,16 @@ void ExprEngine::removeDead(ExplodedNode *Pred, ExplodedNodeSet &Out,
LC = LC->getParent();
}
- const StackFrameContext *SFC = LC ? LC->getCurrentStackFrame() : nullptr;
+ const StackFrameContext *SFC = LC ? LC->getStackFrame() : nullptr;
SymbolReaper SymReaper(SFC, ReferenceStmt, SymMgr, getStoreManager());
+ for (auto I : CleanedState->get<ObjectsUnderConstruction>()) {
+ if (SymbolRef Sym = I.second.getAsSymbol())
+ SymReaper.markLive(Sym);
+ if (const MemRegion *MR = I.second.getAsRegion())
+ SymReaper.markLive(MR);
+ }
+
getCheckerManager().runCheckersForLiveSymbols(CleanedState, SymReaper);
// Create a state in which dead bindings are removed from the environment
@@ -457,9 +717,8 @@ void ExprEngine::removeDead(ExplodedNode *Pred, ExplodedNodeSet &Out,
// environment, the store, and the constraints cleaned up but have the
// user-supplied states as the predecessors.
StmtNodeBuilder Bldr(CheckedSet, Out, *currBldrCtx);
- for (ExplodedNodeSet::const_iterator
- I = CheckedSet.begin(), E = CheckedSet.end(); I != E; ++I) {
- ProgramStateRef CheckerState = (*I)->getState();
+ for (const auto I : CheckedSet) {
+ ProgramStateRef CheckerState = I->getState();
// The constraint manager has not been cleaned up yet, so clean up now.
CheckerState = getConstraintManager().removeDeadBindings(CheckerState,
@@ -476,35 +735,34 @@ void ExprEngine::removeDead(ExplodedNode *Pred, ExplodedNodeSet &Out,
// generate a transition to that state.
ProgramStateRef CleanedCheckerSt =
StateMgr.getPersistentStateWithGDM(CleanedState, CheckerState);
- Bldr.generateNode(DiagnosticStmt, *I, CleanedCheckerSt, &cleanupTag, K);
+ Bldr.generateNode(DiagnosticStmt, I, CleanedCheckerSt, &cleanupTag, K);
}
}
}
-void ExprEngine::ProcessStmt(const CFGStmt S,
- ExplodedNode *Pred) {
+void ExprEngine::ProcessStmt(const Stmt *currStmt, ExplodedNode *Pred) {
// Reclaim any unnecessary nodes in the ExplodedGraph.
G.reclaimRecentlyAllocatedNodes();
- const Stmt *currStmt = S.getStmt();
PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),
currStmt->getLocStart(),
"Error evaluating statement");
// Remove dead bindings and symbols.
ExplodedNodeSet CleanedStates;
- if (shouldRemoveDeadBindings(AMgr, S, Pred, Pred->getLocationContext())){
- removeDead(Pred, CleanedStates, currStmt, Pred->getLocationContext());
+ if (shouldRemoveDeadBindings(AMgr, currStmt, Pred,
+ Pred->getLocationContext())) {
+ removeDead(Pred, CleanedStates, currStmt,
+ Pred->getLocationContext());
} else
CleanedStates.Add(Pred);
// Visit the statement.
ExplodedNodeSet Dst;
- for (ExplodedNodeSet::iterator I = CleanedStates.begin(),
- E = CleanedStates.end(); I != E; ++I) {
+ for (const auto I : CleanedStates) {
ExplodedNodeSet DstI;
// Visit the statement.
- Visit(currStmt, *I, DstI);
+ Visit(currStmt, I, DstI);
Dst.insert(DstI);
}
@@ -530,36 +788,38 @@ void ExprEngine::ProcessLoopExit(const Stmt* S, ExplodedNode *Pred) {
Engine.enqueue(Dst, currBldrCtx->getBlock(), currStmtIdx);
}
-void ExprEngine::ProcessInitializer(const CFGInitializer Init,
+void ExprEngine::ProcessInitializer(const CFGInitializer CFGInit,
ExplodedNode *Pred) {
- const CXXCtorInitializer *BMI = Init.getInitializer();
+ const CXXCtorInitializer *BMI = CFGInit.getInitializer();
+ const Expr *Init = BMI->getInit()->IgnoreImplicit();
+ const LocationContext *LC = Pred->getLocationContext();
PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),
BMI->getSourceLocation(),
"Error evaluating initializer");
// We don't clean up dead bindings here.
- const StackFrameContext *stackFrame =
- cast<StackFrameContext>(Pred->getLocationContext());
- const CXXConstructorDecl *decl =
- cast<CXXConstructorDecl>(stackFrame->getDecl());
+ const auto *stackFrame = cast<StackFrameContext>(Pred->getLocationContext());
+ const auto *decl = cast<CXXConstructorDecl>(stackFrame->getDecl());
ProgramStateRef State = Pred->getState();
SVal thisVal = State->getSVal(svalBuilder.getCXXThis(decl, stackFrame));
- ExplodedNodeSet Tmp(Pred);
+ ExplodedNodeSet Tmp;
SVal FieldLoc;
// Evaluate the initializer, if necessary
if (BMI->isAnyMemberInitializer()) {
// Constructors build the object directly in the field,
// but non-objects must be copied in from the initializer.
- if (auto *CtorExpr = findDirectConstructorForCurrentCFGElement()) {
- assert(BMI->getInit()->IgnoreImplicit() == CtorExpr);
- (void)CtorExpr;
+ if (getObjectUnderConstruction(State, BMI, LC)) {
// The field was directly constructed, so there is no need to bind.
+ // But we still need to stop tracking the object under construction.
+ State = finishObjectConstruction(State, BMI, LC);
+ NodeBuilder Bldr(Pred, Tmp, *currBldrCtx);
+ PostStore PS(Init, LC, /*Loc*/ nullptr, /*tag*/ nullptr);
+ Bldr.generateNode(PS, State, Pred);
} else {
- const Expr *Init = BMI->getInit()->IgnoreImplicit();
const ValueDecl *Field;
if (BMI->isIndirectMemberInitializer()) {
Field = BMI->getIndirectMember();
@@ -593,15 +853,12 @@ void ExprEngine::ProcessInitializer(const CFGInitializer Init,
InitVal = State->getSVal(BMI->getInit(), stackFrame);
}
- assert(Tmp.size() == 1 && "have not generated any new nodes yet");
- assert(*Tmp.begin() == Pred && "have not generated any new nodes yet");
- Tmp.clear();
-
PostInitializer PP(BMI, FieldLoc.getAsRegion(), stackFrame);
evalBind(Tmp, Init, Pred, FieldLoc, InitVal, /*isInit=*/true, &PP);
}
} else {
assert(BMI->isBaseInitializer() || BMI->isDelegatingInitializer());
+ Tmp.insert(Pred);
// We already did all the work when visiting the CXXConstructExpr.
}
@@ -610,9 +867,9 @@ void ExprEngine::ProcessInitializer(const CFGInitializer Init,
PostInitializer PP(BMI, FieldLoc.getAsRegion(), stackFrame);
ExplodedNodeSet Dst;
NodeBuilder Bldr(Tmp, Dst, *currBldrCtx);
- for (ExplodedNodeSet::iterator I = Tmp.begin(), E = Tmp.end(); I != E; ++I) {
- ExplodedNode *N = *I;
- Bldr.generateNode(PP, N->getState(), N);
+ for (const auto I : Tmp) {
+ ProgramStateRef State = I->getState();
+ Bldr.generateNode(PP, State, I);
}
// Enqueue the new nodes onto the work list.
@@ -688,8 +945,15 @@ void ExprEngine::ProcessAutomaticObjDtor(const CFGAutomaticObjDtor Dtor,
varType = cast<TypedValueRegion>(Region)->getValueType();
}
+ // FIXME: We need to run the same destructor on every element of the array.
+ // This workaround will just run the first destructor (which will still
+ // invalidate the entire array).
+ EvalCallOptions CallOpts;
+ Region = makeZeroElementRegion(state, loc::MemRegionVal(Region), varType,
+ CallOpts.IsArrayCtorOrDtor).getAsRegion();
+
VisitCXXDestructor(varType, Region, Dtor.getTriggerStmt(), /*IsBase=*/ false,
- Pred, Dst);
+ Pred, Dst, CallOpts);
}
void ExprEngine::ProcessDeleteDtor(const CFGDeleteDtor Dtor,
@@ -699,12 +963,12 @@ void ExprEngine::ProcessDeleteDtor(const CFGDeleteDtor Dtor,
const LocationContext *LCtx = Pred->getLocationContext();
const CXXDeleteExpr *DE = Dtor.getDeleteExpr();
const Stmt *Arg = DE->getArgument();
+ QualType DTy = DE->getDestroyedType();
SVal ArgVal = State->getSVal(Arg, LCtx);
// If the argument to delete is known to be a null value,
// don't run destructor.
if (State->isNull(ArgVal).isConstrainedTrue()) {
- QualType DTy = DE->getDestroyedType();
QualType BTy = getContext().getBaseElementType(DTy);
const CXXRecordDecl *RD = BTy->getAsCXXRecordDecl();
const CXXDestructorDecl *Dtor = RD->getDestructor();
@@ -715,19 +979,30 @@ void ExprEngine::ProcessDeleteDtor(const CFGDeleteDtor Dtor,
return;
}
- VisitCXXDestructor(DE->getDestroyedType(),
- ArgVal.getAsRegion(),
- DE, /*IsBase=*/ false,
- Pred, Dst);
+ EvalCallOptions CallOpts;
+ const MemRegion *ArgR = ArgVal.getAsRegion();
+ if (DE->isArrayForm()) {
+ // FIXME: We need to run the same destructor on every element of the array.
+ // This workaround will just run the first destructor (which will still
+ // invalidate the entire array).
+ CallOpts.IsArrayCtorOrDtor = true;
+ // Yes, it may even be a multi-dimensional array.
+ while (const auto *AT = getContext().getAsArrayType(DTy))
+ DTy = AT->getElementType();
+ if (ArgR)
+ ArgR = getStoreManager().GetElementZeroRegion(cast<SubRegion>(ArgR), DTy);
+ }
+
+ VisitCXXDestructor(DTy, ArgR, DE, /*IsBase=*/false, Pred, Dst, CallOpts);
}
void ExprEngine::ProcessBaseDtor(const CFGBaseDtor D,
ExplodedNode *Pred, ExplodedNodeSet &Dst) {
const LocationContext *LCtx = Pred->getLocationContext();
- const CXXDestructorDecl *CurDtor = cast<CXXDestructorDecl>(LCtx->getDecl());
+ const auto *CurDtor = cast<CXXDestructorDecl>(LCtx->getDecl());
Loc ThisPtr = getSValBuilder().getCXXThis(CurDtor,
- LCtx->getCurrentStackFrame());
+ LCtx->getStackFrame());
SVal ThisVal = Pred->getState()->getSVal(ThisPtr);
// Create the base object region.
@@ -737,51 +1012,94 @@ void ExprEngine::ProcessBaseDtor(const CFGBaseDtor D,
Base->isVirtual());
VisitCXXDestructor(BaseTy, BaseVal.castAs<loc::MemRegionVal>().getRegion(),
- CurDtor->getBody(), /*IsBase=*/ true, Pred, Dst);
+ CurDtor->getBody(), /*IsBase=*/ true, Pred, Dst, {});
}
void ExprEngine::ProcessMemberDtor(const CFGMemberDtor D,
ExplodedNode *Pred, ExplodedNodeSet &Dst) {
const FieldDecl *Member = D.getFieldDecl();
+ QualType T = Member->getType();
ProgramStateRef State = Pred->getState();
const LocationContext *LCtx = Pred->getLocationContext();
- const CXXDestructorDecl *CurDtor = cast<CXXDestructorDecl>(LCtx->getDecl());
+ const auto *CurDtor = cast<CXXDestructorDecl>(LCtx->getDecl());
Loc ThisVal = getSValBuilder().getCXXThis(CurDtor,
- LCtx->getCurrentStackFrame());
+ LCtx->getStackFrame());
SVal FieldVal =
State->getLValue(Member, State->getSVal(ThisVal).castAs<Loc>());
- VisitCXXDestructor(Member->getType(),
- FieldVal.castAs<loc::MemRegionVal>().getRegion(),
- CurDtor->getBody(), /*IsBase=*/false, Pred, Dst);
+ // FIXME: We need to run the same destructor on every element of the array.
+ // This workaround will just run the first destructor (which will still
+ // invalidate the entire array).
+ EvalCallOptions CallOpts;
+ FieldVal = makeZeroElementRegion(State, FieldVal, T,
+ CallOpts.IsArrayCtorOrDtor);
+
+ VisitCXXDestructor(T, FieldVal.castAs<loc::MemRegionVal>().getRegion(),
+ CurDtor->getBody(), /*IsBase=*/false, Pred, Dst, CallOpts);
}
void ExprEngine::ProcessTemporaryDtor(const CFGTemporaryDtor D,
ExplodedNode *Pred,
ExplodedNodeSet &Dst) {
- ExplodedNodeSet CleanDtorState;
- StmtNodeBuilder StmtBldr(Pred, CleanDtorState, *currBldrCtx);
+ const CXXBindTemporaryExpr *BTE = D.getBindTemporaryExpr();
ProgramStateRef State = Pred->getState();
- if (State->contains<InitializedTemporariesSet>(
- std::make_pair(D.getBindTemporaryExpr(), Pred->getStackFrame()))) {
+ const LocationContext *LC = Pred->getLocationContext();
+ const MemRegion *MR = nullptr;
+
+ if (Optional<SVal> V =
+ getObjectUnderConstruction(State, D.getBindTemporaryExpr(),
+ Pred->getLocationContext())) {
// FIXME: Currently we insert temporary destructors for default parameters,
- // but we don't insert the constructors.
- State = State->remove<InitializedTemporariesSet>(
- std::make_pair(D.getBindTemporaryExpr(), Pred->getStackFrame()));
+ // but we don't insert the constructors, so the entry in
+ // ObjectsUnderConstruction may be missing.
+ State = finishObjectConstruction(State, D.getBindTemporaryExpr(),
+ Pred->getLocationContext());
+ MR = V->getAsRegion();
}
+
+ // If copy elision has occured, and the constructor corresponding to the
+ // destructor was elided, we need to skip the destructor as well.
+ if (isDestructorElided(State, BTE, LC)) {
+ State = cleanupElidedDestructor(State, BTE, LC);
+ NodeBuilder Bldr(Pred, Dst, *currBldrCtx);
+ PostImplicitCall PP(D.getDestructorDecl(getContext()),
+ D.getBindTemporaryExpr()->getLocStart(),
+ Pred->getLocationContext());
+ Bldr.generateNode(PP, State, Pred);
+ return;
+ }
+
+ ExplodedNodeSet CleanDtorState;
+ StmtNodeBuilder StmtBldr(Pred, CleanDtorState, *currBldrCtx);
StmtBldr.generateNode(D.getBindTemporaryExpr(), Pred, State);
- QualType varType = D.getBindTemporaryExpr()->getSubExpr()->getType();
+ QualType T = D.getBindTemporaryExpr()->getSubExpr()->getType();
// FIXME: Currently CleanDtorState can be empty here due to temporaries being
// bound to default parameters.
assert(CleanDtorState.size() <= 1);
ExplodedNode *CleanPred =
CleanDtorState.empty() ? Pred : *CleanDtorState.begin();
- // FIXME: Inlining of temporary destructors is not supported yet anyway, so
- // we just put a NULL region for now. This will need to be changed later.
- VisitCXXDestructor(varType, nullptr, D.getBindTemporaryExpr(),
- /*IsBase=*/false, CleanPred, Dst);
+
+ EvalCallOptions CallOpts;
+ CallOpts.IsTemporaryCtorOrDtor = true;
+ if (!MR) {
+ CallOpts.IsCtorOrDtorWithImproperlyModeledTargetRegion = true;
+
+ // If we have no MR, we still need to unwrap the array to avoid destroying
+ // the whole array at once. Regardless, we'd eventually need to model array
+ // destructors properly, element-by-element.
+ while (const ArrayType *AT = getContext().getAsArrayType(T)) {
+ T = AT->getElementType();
+ CallOpts.IsArrayCtorOrDtor = true;
+ }
+ } else {
+ // We'd eventually need to makeZeroElementRegion() trick here,
+ // but for now we don't have the respective construction contexts,
+ // so MR would always be null in this case. Do nothing for now.
+ }
+ VisitCXXDestructor(T, MR, D.getBindTemporaryExpr(),
+ /*IsBase=*/false, CleanPred, Dst, CallOpts);
}
void ExprEngine::processCleanupTemporaryBranch(const CXXBindTemporaryExpr *BTE,
@@ -791,19 +1109,23 @@ void ExprEngine::processCleanupTemporaryBranch(const CXXBindTemporaryExpr *BTE,
const CFGBlock *DstT,
const CFGBlock *DstF) {
BranchNodeBuilder TempDtorBuilder(Pred, Dst, BldCtx, DstT, DstF);
- if (Pred->getState()->contains<InitializedTemporariesSet>(
- std::make_pair(BTE, Pred->getStackFrame()))) {
+ ProgramStateRef State = Pred->getState();
+ const LocationContext *LC = Pred->getLocationContext();
+ if (getObjectUnderConstruction(State, BTE, LC)) {
TempDtorBuilder.markInfeasible(false);
- TempDtorBuilder.generateNode(Pred->getState(), true, Pred);
+ TempDtorBuilder.generateNode(State, true, Pred);
} else {
TempDtorBuilder.markInfeasible(true);
- TempDtorBuilder.generateNode(Pred->getState(), false, Pred);
+ TempDtorBuilder.generateNode(State, false, Pred);
}
}
void ExprEngine::VisitCXXBindTemporaryExpr(const CXXBindTemporaryExpr *BTE,
ExplodedNodeSet &PreVisit,
ExplodedNodeSet &Dst) {
+ // This is a fallback solution in case we didn't have a construction
+ // context when we were constructing the temporary. Otherwise the map should
+ // have been populated there.
if (!getAnalysisManager().options.includeTemporaryDtorsInCFG()) {
// In case we don't have temporary destructors in the CFG, do not mark
// the initialization - we would otherwise never clean it up.
@@ -813,34 +1135,39 @@ void ExprEngine::VisitCXXBindTemporaryExpr(const CXXBindTemporaryExpr *BTE,
StmtNodeBuilder StmtBldr(PreVisit, Dst, *currBldrCtx);
for (ExplodedNode *Node : PreVisit) {
ProgramStateRef State = Node->getState();
-
- if (!State->contains<InitializedTemporariesSet>(
- std::make_pair(BTE, Node->getStackFrame()))) {
- // FIXME: Currently the state might already contain the marker due to
+ const LocationContext *LC = Node->getLocationContext();
+ if (!getObjectUnderConstruction(State, BTE, LC)) {
+ // FIXME: Currently the state might also already contain the marker due to
// incorrect handling of temporaries bound to default parameters; for
// those, we currently skip the CXXBindTemporaryExpr but rely on adding
// temporary destructor nodes.
- State = State->add<InitializedTemporariesSet>(
- std::make_pair(BTE, Node->getStackFrame()));
+ State = addObjectUnderConstruction(State, BTE, LC, UnknownVal());
}
StmtBldr.generateNode(BTE, Node, State);
}
}
-namespace {
-class CollectReachableSymbolsCallback final : public SymbolVisitor {
- InvalidatedSymbols Symbols;
+ProgramStateRef ExprEngine::escapeValue(ProgramStateRef State, SVal V,
+ PointerEscapeKind K) const {
+ class CollectReachableSymbolsCallback final : public SymbolVisitor {
+ InvalidatedSymbols Symbols;
-public:
- explicit CollectReachableSymbolsCallback(ProgramStateRef State) {}
- const InvalidatedSymbols &getSymbols() const { return Symbols; }
+ public:
+ explicit CollectReachableSymbolsCallback(ProgramStateRef State) {}
- bool VisitSymbol(SymbolRef Sym) override {
- Symbols.insert(Sym);
- return true;
- }
-};
-} // end anonymous namespace
+ const InvalidatedSymbols &getSymbols() const { return Symbols; }
+
+ bool VisitSymbol(SymbolRef Sym) override {
+ Symbols.insert(Sym);
+ return true;
+ }
+ };
+
+ const CollectReachableSymbolsCallback &Scanner =
+ State->scanReachableSymbols<CollectReachableSymbolsCallback>(V);
+ return getCheckerManager().runCheckersForPointerEscape(
+ State, Scanner.getSymbols(), /*CallEvent*/ nullptr, K, nullptr);
+}
void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
ExplodedNodeSet &DstTop) {
@@ -930,8 +1257,7 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
case Stmt::OMPTargetTeamsDistributeParallelForDirectiveClass:
case Stmt::OMPTargetTeamsDistributeParallelForSimdDirectiveClass:
case Stmt::OMPTargetTeamsDistributeSimdDirectiveClass:
- case Stmt::CapturedStmtClass:
- {
+ case Stmt::CapturedStmtClass: {
const ExplodedNode *node = Bldr.generateSink(S, Pred, Pred->getState());
Engine.addAbortedBlock(node, currBldrCtx->getBlock());
break;
@@ -1026,6 +1352,7 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
case Stmt::AddrLabelExprClass:
case Stmt::AttributedStmtClass:
case Stmt::IntegerLiteralClass:
+ case Stmt::FixedPointLiteralClass:
case Stmt::CharacterLiteralClass:
case Stmt::ImplicitValueInitExprClass:
case Stmt::CXXScalarValueInitExprClass:
@@ -1060,16 +1387,15 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
StmtNodeBuilder Bldr2(PreVisit, Tmp, *currBldrCtx);
const Expr *ArgE;
- if (const CXXDefaultArgExpr *DefE = dyn_cast<CXXDefaultArgExpr>(S))
+ if (const auto *DefE = dyn_cast<CXXDefaultArgExpr>(S))
ArgE = DefE->getExpr();
- else if (const CXXDefaultInitExpr *DefE = dyn_cast<CXXDefaultInitExpr>(S))
+ else if (const auto *DefE = dyn_cast<CXXDefaultInitExpr>(S))
ArgE = DefE->getExpr();
else
llvm_unreachable("unknown constant wrapper kind");
bool IsTemporary = false;
- if (const MaterializeTemporaryExpr *MTE =
- dyn_cast<MaterializeTemporaryExpr>(ArgE)) {
+ if (const auto *MTE = dyn_cast<MaterializeTemporaryExpr>(ArgE)) {
ArgE = MTE->GetTemporaryExpr();
IsTemporary = true;
}
@@ -1079,15 +1405,14 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
ConstantVal = UnknownVal();
const LocationContext *LCtx = Pred->getLocationContext();
- for (ExplodedNodeSet::iterator I = PreVisit.begin(), E = PreVisit.end();
- I != E; ++I) {
- ProgramStateRef State = (*I)->getState();
+ for (const auto I : PreVisit) {
+ ProgramStateRef State = I->getState();
State = State->BindExpr(S, LCtx, *ConstantVal);
if (IsTemporary)
State = createTemporaryRegionIfNeeded(State, LCtx,
cast<Expr>(S),
cast<Expr>(S));
- Bldr2.generateNode(S, *I, State);
+ Bldr2.generateNode(S, I, State);
}
getCheckerManager().runCheckersForPostStmt(Dst, Tmp, S, *this);
@@ -1108,12 +1433,10 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
ExplodedNodeSet Tmp;
StmtNodeBuilder Bldr2(preVisit, Tmp, *currBldrCtx);
- const Expr *Ex = cast<Expr>(S);
+ const auto *Ex = cast<Expr>(S);
QualType resultType = Ex->getType();
- for (ExplodedNodeSet::iterator it = preVisit.begin(), et = preVisit.end();
- it != et; ++it) {
- ExplodedNode *N = *it;
+ for (const auto N : preVisit) {
const LocationContext *LCtx = N->getLocationContext();
SVal result = svalBuilder.conjureSymbolVal(nullptr, Ex, LCtx,
resultType,
@@ -1127,17 +1450,8 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
->getType()->isRecordType()))
for (auto Child : Ex->children()) {
assert(Child);
-
SVal Val = State->getSVal(Child, LCtx);
-
- CollectReachableSymbolsCallback Scanner =
- State->scanReachableSymbols<CollectReachableSymbolsCallback>(
- Val);
- const InvalidatedSymbols &EscapedSymbols = Scanner.getSymbols();
-
- State = getCheckerManager().runCheckersForPointerEscape(
- State, EscapedSymbols,
- /*CallEvent*/ nullptr, PSK_EscapeOther, nullptr);
+ State = escapeValue(State, Val, PSK_EscapeOther);
}
Bldr2.generateNode(S, N, State);
@@ -1184,7 +1498,7 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
break;
case Stmt::BinaryOperatorClass: {
- const BinaryOperator* B = cast<BinaryOperator>(S);
+ const auto *B = cast<BinaryOperator>(S);
if (B->isLogicalOp()) {
Bldr.takeNodes(Pred);
VisitLogicalExpr(B, Pred, Dst);
@@ -1216,12 +1530,12 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
}
case Stmt::CXXOperatorCallExprClass: {
- const CXXOperatorCallExpr *OCE = cast<CXXOperatorCallExpr>(S);
+ const auto *OCE = cast<CXXOperatorCallExpr>(S);
// For instance method operators, make sure the 'this' argument has a
// valid region.
const Decl *Callee = OCE->getCalleeDecl();
- if (const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(Callee)) {
+ if (const auto *MD = dyn_cast_or_null<CXXMethodDecl>(Callee)) {
if (MD->isInstance()) {
ProgramStateRef State = Pred->getState();
const LocationContext *LCtx = Pred->getLocationContext();
@@ -1239,34 +1553,38 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
// FALLTHROUGH
LLVM_FALLTHROUGH;
}
+
case Stmt::CallExprClass:
case Stmt::CXXMemberCallExprClass:
- case Stmt::UserDefinedLiteralClass: {
+ case Stmt::UserDefinedLiteralClass:
Bldr.takeNodes(Pred);
VisitCallExpr(cast<CallExpr>(S), Pred, Dst);
Bldr.addNodes(Dst);
break;
- }
- case Stmt::CXXCatchStmtClass: {
+ case Stmt::CXXCatchStmtClass:
Bldr.takeNodes(Pred);
VisitCXXCatchStmt(cast<CXXCatchStmt>(S), Pred, Dst);
Bldr.addNodes(Dst);
break;
- }
case Stmt::CXXTemporaryObjectExprClass:
- case Stmt::CXXConstructExprClass: {
+ case Stmt::CXXConstructExprClass:
Bldr.takeNodes(Pred);
VisitCXXConstructExpr(cast<CXXConstructExpr>(S), Pred, Dst);
Bldr.addNodes(Dst);
break;
- }
case Stmt::CXXNewExprClass: {
Bldr.takeNodes(Pred);
+
+ ExplodedNodeSet PreVisit;
+ getCheckerManager().runCheckersForPreStmt(PreVisit, Pred, S, *this);
+
ExplodedNodeSet PostVisit;
- VisitCXXNewExpr(cast<CXXNewExpr>(S), Pred, PostVisit);
+ for (const auto i : PreVisit)
+ VisitCXXNewExpr(cast<CXXNewExpr>(S), i, PostVisit);
+
getCheckerManager().runCheckersForPostStmt(Dst, PostVisit, S, *this);
Bldr.addNodes(Dst);
break;
@@ -1275,12 +1593,11 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
case Stmt::CXXDeleteExprClass: {
Bldr.takeNodes(Pred);
ExplodedNodeSet PreVisit;
- const CXXDeleteExpr *CDE = cast<CXXDeleteExpr>(S);
+ const auto *CDE = cast<CXXDeleteExpr>(S);
getCheckerManager().runCheckersForPreStmt(PreVisit, Pred, S, *this);
- for (ExplodedNodeSet::iterator i = PreVisit.begin(),
- e = PreVisit.end(); i != e ; ++i)
- VisitCXXDeleteExpr(CDE, *i, Dst);
+ for (const auto i : PreVisit)
+ VisitCXXDeleteExpr(CDE, i, Dst);
Bldr.addNodes(Dst);
break;
@@ -1290,7 +1607,7 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
case Stmt::ChooseExprClass: { // __builtin_choose_expr
Bldr.takeNodes(Pred);
- const ChooseExpr *C = cast<ChooseExpr>(S);
+ const auto *C = cast<ChooseExpr>(S);
VisitGuardedExpr(C, C->getLHS(), C->getRHS(), Pred, Dst);
Bldr.addNodes(Dst);
break;
@@ -1311,8 +1628,7 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
case Stmt::BinaryConditionalOperatorClass:
case Stmt::ConditionalOperatorClass: { // '?' operator
Bldr.takeNodes(Pred);
- const AbstractConditionalOperator *C
- = cast<AbstractConditionalOperator>(S);
+ const auto *C = cast<AbstractConditionalOperator>(S);
VisitGuardedExpr(C, C->getTrueExpr(), C->getFalseExpr(), Pred, Dst);
Bldr.addNodes(Dst);
break;
@@ -1326,7 +1642,7 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
case Stmt::DeclRefExprClass: {
Bldr.takeNodes(Pred);
- const DeclRefExpr *DE = cast<DeclRefExpr>(S);
+ const auto *DE = cast<DeclRefExpr>(S);
VisitCommonDeclRefExpr(DE, DE->getDecl(), Pred, Dst);
Bldr.addNodes(Dst);
break;
@@ -1347,7 +1663,7 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
case Stmt::CXXFunctionalCastExprClass:
case Stmt::ObjCBridgedCastExprClass: {
Bldr.takeNodes(Pred);
- const CastExpr *C = cast<CastExpr>(S);
+ const auto *C = cast<CastExpr>(S);
ExplodedNodeSet dstExpr;
VisitCast(C, C->getSubExpr(), Pred, dstExpr);
@@ -1359,14 +1675,12 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
case Expr::MaterializeTemporaryExprClass: {
Bldr.takeNodes(Pred);
- const MaterializeTemporaryExpr *MTE = cast<MaterializeTemporaryExpr>(S);
+ const auto *MTE = cast<MaterializeTemporaryExpr>(S);
ExplodedNodeSet dstPrevisit;
getCheckerManager().runCheckersForPreStmt(dstPrevisit, Pred, MTE, *this);
ExplodedNodeSet dstExpr;
- for (ExplodedNodeSet::iterator i = dstPrevisit.begin(),
- e = dstPrevisit.end(); i != e ; ++i) {
- CreateCXXTemporaryObject(MTE, *i, dstExpr);
- }
+ for (const auto i : dstPrevisit)
+ CreateCXXTemporaryObject(MTE, i, dstExpr);
getCheckerManager().runCheckersForPostStmt(Dst, dstExpr, MTE, *this);
Bldr.addNodes(Dst);
break;
@@ -1421,11 +1735,19 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
Bldr.addNodes(Dst);
break;
- case Stmt::OffsetOfExprClass:
+ case Stmt::OffsetOfExprClass: {
Bldr.takeNodes(Pred);
- VisitOffsetOfExpr(cast<OffsetOfExpr>(S), Pred, Dst);
+ ExplodedNodeSet PreVisit;
+ getCheckerManager().runCheckersForPreStmt(PreVisit, Pred, S, *this);
+
+ ExplodedNodeSet PostVisit;
+ for (const auto Node : PreVisit)
+ VisitOffsetOfExpr(cast<OffsetOfExpr>(S), Node, PostVisit);
+
+ getCheckerManager().runCheckersForPostStmt(Dst, PostVisit, S, *this);
Bldr.addNodes(Dst);
break;
+ }
case Stmt::UnaryExprOrTypeTraitExprClass:
Bldr.takeNodes(Pred);
@@ -1435,7 +1757,7 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
break;
case Stmt::StmtExprClass: {
- const StmtExpr *SE = cast<StmtExpr>(S);
+ const auto *SE = cast<StmtExpr>(S);
if (SE->getSubStmt()->body_empty()) {
// Empty statement expression.
@@ -1444,7 +1766,8 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
break;
}
- if (Expr *LastExpr = dyn_cast<Expr>(*SE->getSubStmt()->body_rbegin())) {
+ if (const auto *LastExpr =
+ dyn_cast<Expr>(*SE->getSubStmt()->body_rbegin())) {
ProgramStateRef state = Pred->getState();
Bldr.generateNode(SE, Pred,
state->BindExpr(SE, Pred->getLocationContext(),
@@ -1456,7 +1779,7 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
case Stmt::UnaryOperatorClass: {
Bldr.takeNodes(Pred);
- const UnaryOperator *U = cast<UnaryOperator>(S);
+ const auto *U = cast<UnaryOperator>(S);
if (AMgr.options.eagerlyAssumeBinOpBifurcation && (U->getOpcode() == UO_LNot)) {
ExplodedNodeSet Tmp;
VisitUnaryOperator(U, Pred, Tmp);
@@ -1471,7 +1794,7 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
case Stmt::PseudoObjectExprClass: {
Bldr.takeNodes(Pred);
ProgramStateRef state = Pred->getState();
- const PseudoObjectExpr *PE = cast<PseudoObjectExpr>(S);
+ const auto *PE = cast<PseudoObjectExpr>(S);
if (const Expr *Result = PE->getResultExpr()) {
SVal V = state->getSVal(Result, Pred->getLocationContext());
Bldr.generateNode(S, Pred,
@@ -1490,8 +1813,8 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
bool ExprEngine::replayWithoutInlining(ExplodedNode *N,
const LocationContext *CalleeLC) {
- const StackFrameContext *CalleeSF = CalleeLC->getCurrentStackFrame();
- const StackFrameContext *CallerSF = CalleeSF->getParent()->getCurrentStackFrame();
+ const StackFrameContext *CalleeSF = CalleeLC->getStackFrame();
+ const StackFrameContext *CallerSF = CalleeSF->getParent()->getStackFrame();
assert(CalleeSF && CallerSF);
ExplodedNode *BeforeProcessingCall = nullptr;
const Stmt *CE = CalleeSF->getCallSite();
@@ -1503,7 +1826,7 @@ bool ExprEngine::replayWithoutInlining(ExplodedNode *N,
N = N->pred_empty() ? nullptr : *(N->pred_begin());
// Skip the nodes corresponding to the inlined code.
- if (L.getLocationContext()->getCurrentStackFrame() != CallerSF)
+ if (L.getStackFrame() != CallerSF)
continue;
// We reached the caller. Find the node right before we started
// processing the call.
@@ -1602,10 +1925,10 @@ void ExprEngine::processCFGBlockEntrance(const BlockEdge &L,
// Check if we stopped at the top level function or not.
// Root node should have the location context of the top most function.
const LocationContext *CalleeLC = Pred->getLocation().getLocationContext();
- const LocationContext *CalleeSF = CalleeLC->getCurrentStackFrame();
+ const LocationContext *CalleeSF = CalleeLC->getStackFrame();
const LocationContext *RootLC =
(*G.roots_begin())->getLocation().getLocationContext();
- if (RootLC->getCurrentStackFrame() != CalleeSF) {
+ if (RootLC->getStackFrame() != CalleeSF) {
Engine.FunctionSummaries->markReachedMaxBlockCount(CalleeSF->getDecl());
// Re-run the call evaluation without inlining it, by storing the
@@ -1639,14 +1962,14 @@ static SVal RecoverCastedSymbol(ProgramStateManager& StateMgr,
const LocationContext *LCtx,
ASTContext &Ctx) {
- const Expr *Ex = dyn_cast<Expr>(Condition);
+ const auto *Ex = dyn_cast<Expr>(Condition);
if (!Ex)
return UnknownVal();
uint64_t bits = 0;
bool bitsInit = false;
- while (const CastExpr *CE = dyn_cast<CastExpr>(Ex)) {
+ while (const auto *CE = dyn_cast<CastExpr>(Ex)) {
QualType T = CE->getType();
if (!T->isIntegralOrEnumerationType())
@@ -1674,7 +1997,7 @@ static SVal RecoverCastedSymbol(ProgramStateManager& StateMgr,
#ifndef NDEBUG
static const Stmt *getRightmostLeaf(const Stmt *Condition) {
while (Condition) {
- const BinaryOperator *BO = dyn_cast<BinaryOperator>(Condition);
+ const auto *BO = dyn_cast<BinaryOperator>(Condition);
if (!BO || !BO->isLogicalOp()) {
return Condition;
}
@@ -1700,10 +2023,10 @@ static const Stmt *getRightmostLeaf(const Stmt *Condition) {
// space.
static const Stmt *ResolveCondition(const Stmt *Condition,
const CFGBlock *B) {
- if (const Expr *Ex = dyn_cast<Expr>(Condition))
+ if (const auto *Ex = dyn_cast<Expr>(Condition))
Condition = Ex->IgnoreParens();
- const BinaryOperator *BO = dyn_cast<BinaryOperator>(Condition);
+ const auto *BO = dyn_cast<BinaryOperator>(Condition);
if (!BO || !BO->isLogicalOp())
return Condition;
@@ -1751,7 +2074,7 @@ void ExprEngine::processBranch(const Stmt *Condition, const Stmt *Term,
return;
}
- if (const Expr *Ex = dyn_cast<Expr>(Condition))
+ if (const auto *Ex = dyn_cast<Expr>(Condition))
Condition = Ex->IgnoreParens();
Condition = ResolveCondition(Condition, BldCtx.getBlock());
@@ -1767,10 +2090,7 @@ void ExprEngine::processBranch(const Stmt *Condition, const Stmt *Term,
return;
BranchNodeBuilder builder(CheckersOutSet, Dst, BldCtx, DstT, DstF);
- for (NodeBuilder::iterator I = CheckersOutSet.begin(),
- E = CheckersOutSet.end(); E != I; ++I) {
- ExplodedNode *PredI = *I;
-
+ for (const auto PredI : CheckersOutSet) {
if (PredI->isSink())
continue;
@@ -1779,7 +2099,7 @@ void ExprEngine::processBranch(const Stmt *Condition, const Stmt *Term,
if (X.isUnknownOrUndef()) {
// Give it a chance to recover from unknown.
- if (const Expr *Ex = dyn_cast<Expr>(Condition)) {
+ if (const auto *Ex = dyn_cast<Expr>(Condition)) {
if (Ex->getType()->isIntegralOrEnumerationType()) {
// Try to recover some path-sensitivity. Right now casts of symbolic
// integers that promote their values are currently not tracked well.
@@ -1836,13 +2156,13 @@ REGISTER_TRAIT_WITH_PROGRAMSTATE(InitializedGlobalsSet,
void ExprEngine::processStaticInitializer(const DeclStmt *DS,
NodeBuilderContext &BuilderCtx,
ExplodedNode *Pred,
- clang::ento::ExplodedNodeSet &Dst,
+ ExplodedNodeSet &Dst,
const CFGBlock *DstT,
const CFGBlock *DstF) {
PrettyStackTraceLocationContext CrashInfo(Pred->getLocationContext());
currBldrCtx = &BuilderCtx;
- const VarDecl *VD = cast<VarDecl>(DS->getSingleDecl());
+ const auto *VD = cast<VarDecl>(DS->getSingleDecl());
ProgramStateRef state = Pred->getState();
bool initHasRun = state->contains<InitializedGlobalsSet>(VD);
BranchNodeBuilder builder(Pred, Dst, BuilderCtx, DstT, DstF);
@@ -1860,7 +2180,6 @@ void ExprEngine::processStaticInitializer(const DeclStmt *DS,
/// processIndirectGoto - Called by CoreEngine. Used to generate successor
/// nodes by processing the 'effects' of a computed goto jump.
void ExprEngine::processIndirectGoto(IndirectGotoNodeBuilder &builder) {
-
ProgramStateRef state = builder.getState();
SVal V = state->getSVal(builder.getTarget(), builder.getLocationContext());
@@ -1871,7 +2190,7 @@ void ExprEngine::processIndirectGoto(IndirectGotoNodeBuilder &builder) {
// (3) We have no clue about the label. Dispatch to all targets.
//
- typedef IndirectGotoNodeBuilder::iterator iterator;
+ using iterator = IndirectGotoNodeBuilder::iterator;
if (Optional<loc::GotoLabel> LV = V.getAs<loc::GotoLabel>()) {
const LabelDecl *L = LV->getLabel();
@@ -1897,26 +2216,10 @@ void ExprEngine::processIndirectGoto(IndirectGotoNodeBuilder &builder) {
// This is really a catch-all. We don't support symbolics yet.
// FIXME: Implement dispatch for symbolic pointers.
- for (iterator I=builder.begin(), E=builder.end(); I != E; ++I)
+ for (iterator I = builder.begin(), E = builder.end(); I != E; ++I)
builder.generateNode(I, state);
}
-#if 0
-static bool stackFrameDoesNotContainInitializedTemporaries(ExplodedNode &Pred) {
- const StackFrameContext* Frame = Pred.getStackFrame();
- const llvm::ImmutableSet<CXXBindTemporaryContext> &Set =
- Pred.getState()->get<InitializedTemporariesSet>();
- return std::find_if(Set.begin(), Set.end(),
- [&](const CXXBindTemporaryContext &Ctx) {
- if (Ctx.second == Frame) {
- Ctx.first->dump();
- llvm::errs() << "\n";
- }
- return Ctx.second == Frame;
- }) == Set.end();
-}
-#endif
-
void ExprEngine::processBeginOfFunction(NodeBuilderContext &BC,
ExplodedNode *Pred,
ExplodedNodeSet &Dst,
@@ -1930,9 +2233,59 @@ void ExprEngine::processBeginOfFunction(NodeBuilderContext &BC,
void ExprEngine::processEndOfFunction(NodeBuilderContext& BC,
ExplodedNode *Pred,
const ReturnStmt *RS) {
- // FIXME: Assert that stackFrameDoesNotContainInitializedTemporaries(*Pred)).
- // We currently cannot enable this assert, as lifetime extended temporaries
- // are not modelled correctly.
+ // FIXME: We currently cannot assert that temporaries are clear, because
+ // lifetime extended temporaries are not always modelled correctly. In some
+ // cases when we materialize the temporary, we do
+ // createTemporaryRegionIfNeeded(), and the region changes, and also the
+ // respective destructor becomes automatic from temporary. So for now clean up
+ // the state manually before asserting. Ideally, the code above the assertion
+ // should go away, but the assertion should remain.
+ {
+ ExplodedNodeSet CleanUpObjects;
+ NodeBuilder Bldr(Pred, CleanUpObjects, BC);
+ ProgramStateRef State = Pred->getState();
+ const LocationContext *FromLC = Pred->getLocationContext();
+ const LocationContext *ToLC = FromLC->getStackFrame()->getParent();
+ const LocationContext *LC = FromLC;
+ while (LC != ToLC) {
+ assert(LC && "ToLC must be a parent of FromLC!");
+ for (auto I : State->get<ObjectsUnderConstruction>())
+ if (I.first.getLocationContext() == LC) {
+ // The comment above only pardons us for not cleaning up a
+ // CXXBindTemporaryExpr. If any other statements are found here,
+ // it must be a separate problem.
+ assert(isa<CXXBindTemporaryExpr>(I.first.getStmt()));
+ State = State->remove<ObjectsUnderConstruction>(I.first);
+ // Also cleanup the elided destructor info.
+ ElidedDestructorItem Item(
+ cast<CXXBindTemporaryExpr>(I.first.getStmt()),
+ I.first.getLocationContext());
+ State = State->remove<ElidedDestructors>(Item);
+ }
+
+ // Also suppress the assertion for elided destructors when temporary
+ // destructors are not provided at all by the CFG, because there's no
+ // good place to clean them up.
+ if (!AMgr.getAnalyzerOptions().includeTemporaryDtorsInCFG())
+ for (auto I : State->get<ElidedDestructors>())
+ if (I.second == LC)
+ State = State->remove<ElidedDestructors>(I);
+
+ LC = LC->getParent();
+ }
+ if (State != Pred->getState()) {
+ Pred = Bldr.generateNode(Pred->getLocation(), State, Pred);
+ if (!Pred) {
+ // The node with clean temporaries already exists. We might have reached
+ // it on a path on which we initialize different temporaries.
+ return;
+ }
+ }
+ }
+ assert(areAllObjectsFullyConstructed(Pred->getState(),
+ Pred->getLocationContext(),
+ Pred->getStackFrame()->getParent()));
+
PrettyStackTraceLocationContext CrashInfo(Pred->getLocationContext());
StateMgr.EndPath(Pred->getState());
@@ -1943,12 +2296,10 @@ void ExprEngine::processEndOfFunction(NodeBuilderContext& BC,
removeDeadOnEndOfFunction(BC, Pred, AfterRemovedDead);
// Notify checkers.
- for (ExplodedNodeSet::iterator I = AfterRemovedDead.begin(),
- E = AfterRemovedDead.end(); I != E; ++I) {
- getCheckerManager().runCheckersForEndFunction(BC, Dst, *I, *this);
- }
+ for (const auto I : AfterRemovedDead)
+ getCheckerManager().runCheckersForEndFunction(BC, Dst, I, *this, RS);
} else {
- getCheckerManager().runCheckersForEndFunction(BC, Dst, Pred, *this);
+ getCheckerManager().runCheckersForEndFunction(BC, Dst, Pred, *this, RS);
}
Engine.enqueueEndOfFunction(Dst, RS);
@@ -1957,7 +2308,8 @@ void ExprEngine::processEndOfFunction(NodeBuilderContext& BC,
/// ProcessSwitch - Called by CoreEngine. Used to generate successor
/// nodes by processing the 'effects' of a switch statement.
void ExprEngine::processSwitch(SwitchNodeBuilder& builder) {
- typedef SwitchNodeBuilder::iterator iterator;
+ using iterator = SwitchNodeBuilder::iterator;
+
ProgramStateRef state = builder.getState();
const Expr *CondE = builder.getCondition();
SVal CondV_untested = state->getSVal(CondE, builder.getLocationContext());
@@ -2046,16 +2398,16 @@ void ExprEngine::VisitCommonDeclRefExpr(const Expr *Ex, const NamedDecl *D,
ProgramStateRef state = Pred->getState();
const LocationContext *LCtx = Pred->getLocationContext();
- if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
+ if (const auto *VD = dyn_cast<VarDecl>(D)) {
// C permits "extern void v", and if you cast the address to a valid type,
// you can even do things with it. We simply pretend
assert(Ex->isGLValue() || VD->getType()->isVoidType());
const LocationContext *LocCtxt = Pred->getLocationContext();
const Decl *D = LocCtxt->getDecl();
- const auto *MD = D ? dyn_cast<CXXMethodDecl>(D) : nullptr;
+ const auto *MD = dyn_cast_or_null<CXXMethodDecl>(D);
const auto *DeclRefEx = dyn_cast<DeclRefExpr>(Ex);
- SVal V;
- bool IsReference;
+ Optional<std::pair<SVal, QualType>> VInfo;
+
if (AMgr.options.shouldInlineLambdas() && DeclRefEx &&
DeclRefEx->refersToEnclosingVariableOrCapture() && MD &&
MD->getParent()->isLambda()) {
@@ -2064,25 +2416,23 @@ void ExprEngine::VisitCommonDeclRefExpr(const Expr *Ex, const NamedDecl *D,
llvm::DenseMap<const VarDecl *, FieldDecl *> LambdaCaptureFields;
FieldDecl *LambdaThisCaptureField;
CXXRec->getCaptureFields(LambdaCaptureFields, LambdaThisCaptureField);
- const FieldDecl *FD = LambdaCaptureFields[VD];
- if (!FD) {
- // When a constant is captured, sometimes no corresponding field is
- // created in the lambda object.
- assert(VD->getType().isConstQualified());
- V = state->getLValue(VD, LocCtxt);
- IsReference = false;
- } else {
+
+ // Sema follows a sequence of complex rules to determine whether the
+ // variable should be captured.
+ if (const FieldDecl *FD = LambdaCaptureFields[VD]) {
Loc CXXThis =
- svalBuilder.getCXXThis(MD, LocCtxt->getCurrentStackFrame());
+ svalBuilder.getCXXThis(MD, LocCtxt->getStackFrame());
SVal CXXThisVal = state->getSVal(CXXThis);
- V = state->getLValue(FD, CXXThisVal);
- IsReference = FD->getType()->isReferenceType();
+ VInfo = std::make_pair(state->getLValue(FD, CXXThisVal), FD->getType());
}
- } else {
- V = state->getLValue(VD, LocCtxt);
- IsReference = VD->getType()->isReferenceType();
}
+ if (!VInfo)
+ VInfo = std::make_pair(state->getLValue(VD, LocCtxt), VD->getType());
+
+ SVal V = VInfo->first;
+ bool IsReference = VInfo->second->isReferenceType();
+
// For references, the 'lvalue' is the pointer address stored in the
// reference region.
if (IsReference) {
@@ -2096,13 +2446,13 @@ void ExprEngine::VisitCommonDeclRefExpr(const Expr *Ex, const NamedDecl *D,
ProgramPoint::PostLValueKind);
return;
}
- if (const EnumConstantDecl *ED = dyn_cast<EnumConstantDecl>(D)) {
+ if (const auto *ED = dyn_cast<EnumConstantDecl>(D)) {
assert(!Ex->isGLValue());
SVal V = svalBuilder.makeIntVal(ED->getInitVal());
Bldr.generateNode(Ex, Pred, state->BindExpr(Ex, LCtx, V));
return;
}
- if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
SVal V = svalBuilder.getFunctionPointer(FD);
Bldr.generateNode(Ex, Pred, state->BindExpr(Ex, LCtx, V), nullptr,
ProgramPoint::PostLValueKind);
@@ -2118,7 +2468,12 @@ void ExprEngine::VisitCommonDeclRefExpr(const Expr *Ex, const NamedDecl *D,
currBldrCtx->blockCount());
state = state->assume(V.castAs<DefinedOrUnknownSVal>(), true);
Bldr.generateNode(Ex, Pred, state->BindExpr(Ex, LCtx, V), nullptr,
- ProgramPoint::PostLValueKind);
+ ProgramPoint::PostLValueKind);
+ return;
+ }
+ if (isa<BindingDecl>(D)) {
+ // FIXME: proper support for bound declarations.
+ // For now, let's just prevent crashing.
return;
}
@@ -2151,9 +2506,17 @@ void ExprEngine::VisitArraySubscriptExpr(const ArraySubscriptExpr *A,
ProgramStateRef state = Node->getState();
if (IsGLValueLike) {
- SVal V = state->getLValue(A->getType(),
- state->getSVal(Idx, LCtx),
- state->getSVal(Base, LCtx));
+ QualType T = A->getType();
+
+ // One of the forbidden LValue types! We still need to have sensible
+ // symbolic locations to represent this stuff. Note that arithmetic on
+ // void pointers is a GCC extension.
+ if (T->isVoidType())
+ T = getContext().CharTy;
+
+ SVal V = state->getLValue(T,
+ state->getSVal(Idx, LCtx),
+ state->getSVal(Base, LCtx));
Bldr.generateNode(A, Node, state->BindExpr(A, LCtx, V), nullptr,
ProgramPoint::PostLValueKind);
} else if (IsVectorType) {
@@ -2171,41 +2534,36 @@ a vector and not a forbidden lvalue type");
/// VisitMemberExpr - Transfer function for member expressions.
void ExprEngine::VisitMemberExpr(const MemberExpr *M, ExplodedNode *Pred,
ExplodedNodeSet &Dst) {
-
// FIXME: Prechecks eventually go in ::Visit().
ExplodedNodeSet CheckedSet;
getCheckerManager().runCheckersForPreStmt(CheckedSet, Pred, M, *this);
- ExplodedNodeSet EvalSet;
- ValueDecl *Member = M->getMemberDecl();
+ ExplodedNodeSet EvalSet;
+ ValueDecl *Member = M->getMemberDecl();
// Handle static member variables and enum constants accessed via
// member syntax.
- if (isa<VarDecl>(Member) || isa<EnumConstantDecl>(Member)) {
- ExplodedNodeSet Dst;
- for (ExplodedNodeSet::iterator I = CheckedSet.begin(), E = CheckedSet.end();
- I != E; ++I) {
- VisitCommonDeclRefExpr(M, Member, Pred, EvalSet);
- }
+ if (isa<VarDecl>(Member) || isa<EnumConstantDecl>(Member)) {
+ for (const auto I : CheckedSet)
+ VisitCommonDeclRefExpr(M, Member, I, EvalSet);
} else {
StmtNodeBuilder Bldr(CheckedSet, EvalSet, *currBldrCtx);
ExplodedNodeSet Tmp;
- for (ExplodedNodeSet::iterator I = CheckedSet.begin(), E = CheckedSet.end();
- I != E; ++I) {
- ProgramStateRef state = (*I)->getState();
- const LocationContext *LCtx = (*I)->getLocationContext();
+ for (const auto I : CheckedSet) {
+ ProgramStateRef state = I->getState();
+ const LocationContext *LCtx = I->getLocationContext();
Expr *BaseExpr = M->getBase();
// Handle C++ method calls.
- if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Member)) {
+ if (const auto *MD = dyn_cast<CXXMethodDecl>(Member)) {
if (MD->isInstance())
state = createTemporaryRegionIfNeeded(state, LCtx, BaseExpr);
SVal MDVal = svalBuilder.getFunctionPointer(MD);
state = state->BindExpr(M, LCtx, MDVal);
- Bldr.generateNode(M, *I, state);
+ Bldr.generateNode(M, I, state);
continue;
}
@@ -2213,7 +2571,7 @@ void ExprEngine::VisitMemberExpr(const MemberExpr *M, ExplodedNode *Pred,
state = createTemporaryRegionIfNeeded(state, LCtx, BaseExpr);
SVal baseExprVal = state->getSVal(BaseExpr, LCtx);
- FieldDecl *field = cast<FieldDecl>(Member);
+ const auto *field = cast<FieldDecl>(Member);
SVal L = state->getLValue(field, baseExprVal);
if (M->isGLValue() || M->getType()->isArrayType()) {
@@ -2223,8 +2581,8 @@ void ExprEngine::VisitMemberExpr(const MemberExpr *M, ExplodedNode *Pred,
// pointers as soon as they are used.
if (!M->isGLValue()) {
assert(M->getType()->isArrayType());
- const ImplicitCastExpr *PE =
- dyn_cast<ImplicitCastExpr>((*I)->getParentMap().getParentIgnoreParens(M));
+ const auto *PE =
+ dyn_cast<ImplicitCastExpr>(I->getParentMap().getParentIgnoreParens(M));
if (!PE || PE->getCastKind() != CK_ArrayToPointerDecay) {
llvm_unreachable("should always be wrapped in ArrayToPointerDecay");
}
@@ -2237,11 +2595,11 @@ void ExprEngine::VisitMemberExpr(const MemberExpr *M, ExplodedNode *Pred,
L = UnknownVal();
}
- Bldr.generateNode(M, *I, state->BindExpr(M, LCtx, L), nullptr,
+ Bldr.generateNode(M, I, state->BindExpr(M, LCtx, L), nullptr,
ProgramPoint::PostLValueKind);
} else {
- Bldr.takeNodes(*I);
- evalLoad(Tmp, M, M, *I, state, L);
+ Bldr.takeNodes(I);
+ evalLoad(Tmp, M, M, I, state, L);
Bldr.addNodes(Tmp);
}
}
@@ -2261,10 +2619,9 @@ void ExprEngine::VisitAtomicExpr(const AtomicExpr *AE, ExplodedNode *Pred,
ExplodedNodeSet AfterInvalidateSet;
StmtNodeBuilder Bldr(AfterPreSet, AfterInvalidateSet, *currBldrCtx);
- for (ExplodedNodeSet::iterator I = AfterPreSet.begin(), E = AfterPreSet.end();
- I != E; ++I) {
- ProgramStateRef State = (*I)->getState();
- const LocationContext *LCtx = (*I)->getLocationContext();
+ for (const auto I : AfterPreSet) {
+ ProgramStateRef State = I->getState();
+ const LocationContext *LCtx = I->getLocationContext();
SmallVector<SVal, 8> ValuesToInvalidate;
for (unsigned SI = 0, Count = AE->getNumSubExprs(); SI != Count; SI++) {
@@ -2281,7 +2638,7 @@ void ExprEngine::VisitAtomicExpr(const AtomicExpr *AE, ExplodedNode *Pred,
SVal ResultVal = UnknownVal();
State = State->BindExpr(AE, LCtx, ResultVal);
- Bldr.generateNode(AE, *I, State, nullptr,
+ Bldr.generateNode(AE, I, State, nullptr,
ProgramPoint::PostStmtKind);
}
@@ -2324,15 +2681,7 @@ ProgramStateRef ExprEngine::processPointerEscapedOnBind(ProgramStateRef State,
// Otherwise, find all symbols referenced by 'val' that we are tracking
// and stop tracking them.
- CollectReachableSymbolsCallback Scanner =
- State->scanReachableSymbols<CollectReachableSymbolsCallback>(Val);
- const InvalidatedSymbols &EscapedSymbols = Scanner.getSymbols();
- State = getCheckerManager().runCheckersForPointerEscape(State,
- EscapedSymbols,
- /*CallEvent*/ nullptr,
- PSK_EscapeOnBind,
- nullptr);
-
+ State = escapeValue(State, Val, PSK_EscapeOnBind);
return State;
}
@@ -2343,7 +2692,6 @@ ExprEngine::notifyCheckersOfPointerEscape(ProgramStateRef State,
ArrayRef<const MemRegion *> Regions,
const CallEvent *Call,
RegionAndSymbolInvalidationTraits &ITraits) {
-
if (!Invalidated || Invalidated->empty())
return State;
@@ -2357,16 +2705,13 @@ ExprEngine::notifyCheckersOfPointerEscape(ProgramStateRef State,
// If the symbols were invalidated by a call, we want to find out which ones
// were invalidated directly due to being arguments to the call.
InvalidatedSymbols SymbolsDirectlyInvalidated;
- for (ArrayRef<const MemRegion *>::iterator I = ExplicitRegions.begin(),
- E = ExplicitRegions.end(); I != E; ++I) {
- if (const SymbolicRegion *R = (*I)->StripCasts()->getAs<SymbolicRegion>())
+ for (const auto I : ExplicitRegions) {
+ if (const SymbolicRegion *R = I->StripCasts()->getAs<SymbolicRegion>())
SymbolsDirectlyInvalidated.insert(R->getSymbol());
}
InvalidatedSymbols SymbolsIndirectlyInvalidated;
- for (InvalidatedSymbols::const_iterator I=Invalidated->begin(),
- E = Invalidated->end(); I!=E; ++I) {
- SymbolRef sym = *I;
+ for (const auto &sym : *Invalidated) {
if (SymbolsDirectlyInvalidated.count(sym))
continue;
SymbolsIndirectlyInvalidated.insert(sym);
@@ -2390,7 +2735,6 @@ void ExprEngine::evalBind(ExplodedNodeSet &Dst, const Stmt *StoreE,
ExplodedNode *Pred,
SVal location, SVal Val,
bool atDeclInit, const ProgramPoint *PP) {
-
const LocationContext *LC = Pred->getLocationContext();
PostStmt PS(StoreE, LC);
if (!PP)
@@ -2414,9 +2758,7 @@ void ExprEngine::evalBind(ExplodedNodeSet &Dst, const Stmt *StoreE,
return;
}
- for (ExplodedNodeSet::iterator I = CheckedSet.begin(), E = CheckedSet.end();
- I!=E; ++I) {
- ExplodedNode *PredI = *I;
+ for (const auto PredI : CheckedSet) {
ProgramStateRef state = PredI->getState();
state = processPointerEscapedOnBind(state, location, Val, LC);
@@ -2465,8 +2807,8 @@ void ExprEngine::evalStore(ExplodedNodeSet &Dst, const Expr *AssignE,
if (location.isUndef())
return;
- for (ExplodedNodeSet::iterator NI=Tmp.begin(), NE=Tmp.end(); NI!=NE; ++NI)
- evalBind(Dst, StoreE, *NI, location, Val, false);
+ for (const auto I : Tmp)
+ evalBind(Dst, StoreE, I, location, Val, false);
}
void ExprEngine::evalLoad(ExplodedNodeSet &Dst,
@@ -2476,46 +2818,8 @@ void ExprEngine::evalLoad(ExplodedNodeSet &Dst,
ProgramStateRef state,
SVal location,
const ProgramPointTag *tag,
- QualType LoadTy)
-{
+ QualType LoadTy) {
assert(!location.getAs<NonLoc>() && "location cannot be a NonLoc.");
-
- // Are we loading from a region? This actually results in two loads; one
- // to fetch the address of the referenced value and one to fetch the
- // referenced value.
- if (const TypedValueRegion *TR =
- dyn_cast_or_null<TypedValueRegion>(location.getAsRegion())) {
-
- QualType ValTy = TR->getValueType();
- if (const ReferenceType *RT = ValTy->getAs<ReferenceType>()) {
- static SimpleProgramPointTag
- loadReferenceTag(TagProviderName, "Load Reference");
- ExplodedNodeSet Tmp;
- evalLoadCommon(Tmp, NodeEx, BoundEx, Pred, state,
- location, &loadReferenceTag,
- getContext().getPointerType(RT->getPointeeType()));
-
- // Perform the load from the referenced value.
- for (ExplodedNodeSet::iterator I=Tmp.begin(), E=Tmp.end() ; I!=E; ++I) {
- state = (*I)->getState();
- location = state->getSVal(BoundEx, (*I)->getLocationContext());
- evalLoadCommon(Dst, NodeEx, BoundEx, *I, state, location, tag, LoadTy);
- }
- return;
- }
- }
-
- evalLoadCommon(Dst, NodeEx, BoundEx, Pred, state, location, tag, LoadTy);
-}
-
-void ExprEngine::evalLoadCommon(ExplodedNodeSet &Dst,
- const Expr *NodeEx,
- const Expr *BoundEx,
- ExplodedNode *Pred,
- ProgramStateRef state,
- SVal location,
- const ProgramPointTag *tag,
- QualType LoadTy) {
assert(NodeEx);
assert(BoundEx);
// Evaluate the location (checks for bad dereferences).
@@ -2529,9 +2833,9 @@ void ExprEngine::evalLoadCommon(ExplodedNodeSet &Dst,
return;
// Proceed with the load.
- for (ExplodedNodeSet::iterator NI=Tmp.begin(), NE=Tmp.end(); NI!=NE; ++NI) {
- state = (*NI)->getState();
- const LocationContext *LCtx = (*NI)->getLocationContext();
+ for (const auto I : Tmp) {
+ state = I->getState();
+ const LocationContext *LCtx = I->getLocationContext();
SVal V = UnknownVal();
if (location.isValid()) {
@@ -2540,7 +2844,7 @@ void ExprEngine::evalLoadCommon(ExplodedNodeSet &Dst,
V = state->getSVal(location.castAs<Loc>(), LoadTy);
}
- Bldr.generateNode(NodeEx, *NI, state->BindExpr(BoundEx, LCtx, V), tag,
+ Bldr.generateNode(NodeEx, I, state->BindExpr(BoundEx, LCtx, V), tag,
ProgramPoint::PostLoadKind);
}
}
@@ -2597,8 +2901,7 @@ void ExprEngine::evalEagerlyAssumeBinOpBifurcation(ExplodedNodeSet &Dst,
const Expr *Ex) {
StmtNodeBuilder Bldr(Src, Dst, *currBldrCtx);
- for (ExplodedNodeSet::iterator I=Src.begin(), E=Src.end(); I!=E; ++I) {
- ExplodedNode *Pred = *I;
+ for (const auto Pred : Src) {
// Test if the previous node was as the same expression. This can happen
// when the expression fails to evaluate to anything meaningful and
// (as an optimization) we don't generate a node.
@@ -2648,7 +2951,7 @@ void ExprEngine::VisitGCCAsmStmt(const GCCAsmStmt *A, ExplodedNode *Pred,
for (const Expr *O : A->outputs()) {
SVal X = state->getSVal(O, Pred->getLocationContext());
- assert (!X.getAs<NonLoc>()); // Should be an Lval, or unknown, undef.
+ assert(!X.getAs<NonLoc>()); // Should be an Lval, or unknown, undef.
if (Optional<Loc> LV = X.getAs<Loc>())
state = state->bindLoc(*LV, UnknownVal(), Pred->getLocationContext());
@@ -2672,16 +2975,15 @@ static ExprEngine* GraphPrintCheckerState;
static SourceManager* GraphPrintSourceManager;
namespace llvm {
-template<>
-struct DOTGraphTraits<ExplodedNode*> :
- public DefaultDOTGraphTraits {
- DOTGraphTraits (bool isSimple=false) : DefaultDOTGraphTraits(isSimple) {}
+template<>
+struct DOTGraphTraits<ExplodedNode*> : public DefaultDOTGraphTraits {
+ DOTGraphTraits (bool isSimple = false) : DefaultDOTGraphTraits(isSimple) {}
// FIXME: Since we do not cache error nodes in ExprEngine now, this does not
// work.
static std::string getNodeAttributes(const ExplodedNode *N, void*) {
- return "";
+ return {};
}
// De-duplicate some source location pretty-printing.
@@ -2694,15 +2996,8 @@ struct DOTGraphTraits<ExplodedNode*> :
<< "\\l";
}
}
- static void printLocation2(raw_ostream &Out, SourceLocation SLoc) {
- if (SLoc.isFileID() && GraphPrintSourceManager->isInMainFile(SLoc))
- Out << "line " << GraphPrintSourceManager->getExpansionLineNumber(SLoc);
- else
- SLoc.print(Out, *GraphPrintSourceManager);
- }
static std::string getNodeLabel(const ExplodedNode *N, void*){
-
std::string sbuf;
llvm::raw_string_ostream Out(sbuf);
@@ -2710,14 +3005,13 @@ struct DOTGraphTraits<ExplodedNode*> :
ProgramPoint Loc = N->getLocation();
switch (Loc.getKind()) {
- case ProgramPoint::BlockEntranceKind: {
+ case ProgramPoint::BlockEntranceKind:
Out << "Block Entrance: B"
<< Loc.castAs<BlockEntrance>().getBlock()->getBlockID();
break;
- }
case ProgramPoint::BlockExitKind:
- assert (false);
+ assert(false);
break;
case ProgramPoint::CallEnterKind:
@@ -2808,7 +3102,7 @@ struct DOTGraphTraits<ExplodedNode*> :
const Stmt *Label = E.getDst()->getLabel();
if (Label) {
- if (const CaseStmt *C = dyn_cast<CaseStmt>(Label)) {
+ if (const auto *C = dyn_cast<CaseStmt>(Label)) {
Out << "\\lcase ";
LangOptions LO; // FIXME.
if (C->getLHS())
@@ -2822,7 +3116,7 @@ struct DOTGraphTraits<ExplodedNode*> :
Out << ":";
}
else {
- assert (isa<DefaultStmt>(Label));
+ assert(isa<DefaultStmt>(Label));
Out << "\\ldefault:";
}
}
@@ -2863,6 +3157,8 @@ struct DOTGraphTraits<ExplodedNode*> :
Out << "\\lPostStore\\l";
else if (Loc.getAs<PostLValue>())
Out << "\\lPostLValue\\l";
+ else if (Loc.getAs<PostAllocatorCall>())
+ Out << "\\lPostAllocatorCall\\l";
break;
}
@@ -2872,40 +3168,7 @@ struct DOTGraphTraits<ExplodedNode*> :
Out << "\\|StateID: " << (const void*) state.get()
<< " NodeID: " << (const void*) N << "\\|";
- // Analysis stack backtrace.
- Out << "Location context stack (from current to outer):\\l";
- const LocationContext *LC = Loc.getLocationContext();
- unsigned Idx = 0;
- for (; LC; LC = LC->getParent(), ++Idx) {
- Out << Idx << ". (" << (const void *)LC << ") ";
- switch (LC->getKind()) {
- case LocationContext::StackFrame:
- if (const NamedDecl *D = dyn_cast<NamedDecl>(LC->getDecl()))
- Out << "Calling " << D->getQualifiedNameAsString();
- else
- Out << "Calling anonymous code";
- if (const Stmt *S = cast<StackFrameContext>(LC)->getCallSite()) {
- Out << " at ";
- printLocation2(Out, S->getLocStart());
- }
- break;
- case LocationContext::Block:
- Out << "Invoking block";
- if (const Decl *D = cast<BlockInvocationContext>(LC)->getBlockDecl()) {
- Out << " defined at ";
- printLocation2(Out, D->getLocStart());
- }
- break;
- case LocationContext::Scope:
- Out << "Entering scope";
- // FIXME: Add more info once ScopeContext is activated.
- break;
- }
- Out << "\\l";
- }
- Out << "\\l";
-
- state->printDOT(Out);
+ state->printDOT(Out, N->getLocationContext());
Out << "\\l";
@@ -2916,23 +3179,24 @@ struct DOTGraphTraits<ExplodedNode*> :
return Out.str();
}
};
-} // end llvm namespace
+
+} // namespace llvm
#endif
void ExprEngine::ViewGraph(bool trim) {
#ifndef NDEBUG
if (trim) {
- std::vector<const ExplodedNode*> Src;
+ std::vector<const ExplodedNode *> Src;
// Flush any outstanding reports to make sure we cover all the nodes.
// This does not cause them to get displayed.
- for (BugReporter::iterator I=BR.begin(), E=BR.end(); I!=E; ++I)
- const_cast<BugType*>(*I)->FlushReports(BR);
+ for (const auto I : BR)
+ const_cast<BugType *>(I)->FlushReports(BR);
// Iterate through the reports and get their nodes.
for (BugReporter::EQClasses_iterator
EI = BR.EQClasses_begin(), EE = BR.EQClasses_end(); EI != EE; ++EI) {
- ExplodedNode *N = const_cast<ExplodedNode*>(EI->begin()->getErrorNode());
+ const auto *N = const_cast<ExplodedNode *>(EI->begin()->getErrorNode());
if (N) Src.push_back(N);
}
diff --git a/lib/StaticAnalyzer/Core/ExprEngineC.cpp b/lib/StaticAnalyzer/Core/ExprEngineC.cpp
index 3e7a50365f50..c7b1a9ac82f0 100644
--- a/lib/StaticAnalyzer/Core/ExprEngineC.cpp
+++ b/lib/StaticAnalyzer/Core/ExprEngineC.cpp
@@ -20,7 +20,7 @@ using namespace clang;
using namespace ento;
using llvm::APSInt;
-/// \brief Optionally conjure and return a symbol for offset when processing
+/// Optionally conjure and return a symbol for offset when processing
/// an expression \p Expression.
/// If \p Other is a location, conjure a symbol for \p Symbol
/// (offset) if it is unknown so that memory arithmetic always
@@ -257,13 +257,23 @@ ProgramStateRef ExprEngine::handleLValueBitCast(
ProgramStateRef state, const Expr* Ex, const LocationContext* LCtx,
QualType T, QualType ExTy, const CastExpr* CastE, StmtNodeBuilder& Bldr,
ExplodedNode* Pred) {
+ if (T->isLValueReferenceType()) {
+ assert(!CastE->getType()->isLValueReferenceType());
+ ExTy = getContext().getLValueReferenceType(ExTy);
+ } else if (T->isRValueReferenceType()) {
+ assert(!CastE->getType()->isRValueReferenceType());
+ ExTy = getContext().getRValueReferenceType(ExTy);
+ }
// Delegate to SValBuilder to process.
- SVal V = state->getSVal(Ex, LCtx);
- V = svalBuilder.evalCast(V, T, ExTy);
+ SVal OrigV = state->getSVal(Ex, LCtx);
+ SVal V = svalBuilder.evalCast(OrigV, T, ExTy);
// Negate the result if we're treating the boolean as a signed i1
if (CastE->getCastKind() == CK_BooleanToSignedIntegral)
V = evalMinus(V);
state = state->BindExpr(CastE, LCtx, V);
+ if (V.isUnknown() && !OrigV.isUnknown()) {
+ state = escapeValue(state, OrigV, PSK_EscapeOther);
+ }
Bldr.generateNode(CastE, Pred, state);
return state;
@@ -580,24 +590,12 @@ void ExprEngine::VisitDeclStmt(const DeclStmt *DS, ExplodedNode *Pred,
SVal InitVal = state->getSVal(InitEx, LC);
assert(DS->isSingleDecl());
- if (auto *CtorExpr = findDirectConstructorForCurrentCFGElement()) {
- assert(InitEx->IgnoreImplicit() == CtorExpr);
- (void)CtorExpr;
+ if (getObjectUnderConstruction(state, DS, LC)) {
+ state = finishObjectConstruction(state, DS, LC);
// We constructed the object directly in the variable.
// No need to bind anything.
B.generateNode(DS, UpdatedN, state);
} else {
- // We bound the temp obj region to the CXXConstructExpr. Now recover
- // the lazy compound value when the variable is not a reference.
- if (AMgr.getLangOpts().CPlusPlus && VD->getType()->isRecordType() &&
- !VD->getType()->isReferenceType()) {
- if (Optional<loc::MemRegionVal> M =
- InitVal.getAs<loc::MemRegionVal>()) {
- InitVal = state->getSVal(M->getRegion());
- assert(InitVal.getAs<nonloc::LazyCompoundVal>());
- }
- }
-
// Recover some path-sensitivity if a scalar value evaluated to
// UnknownVal.
if (InitVal.isUnknown()) {
@@ -760,7 +758,11 @@ void ExprEngine::VisitGuardedExpr(const Expr *Ex,
for (const ExplodedNode *N = Pred ; N ; N = *N->pred_begin()) {
ProgramPoint PP = N->getLocation();
if (PP.getAs<PreStmtPurgeDeadSymbols>() || PP.getAs<BlockEntrance>()) {
- assert(N->pred_size() == 1);
+ // If the state N has multiple predecessors P, it means that successors
+ // of P are all equivalent.
+ // In turn, that means that all nodes at P are equivalent in terms
+ // of observable behavior at N, and we can follow any of them.
+ // FIXME: a more robust solution which does not walk up the tree.
continue;
}
SrcBlock = PP.castAs<BlockEdge>().getSrc();
@@ -1062,6 +1064,7 @@ void ExprEngine::VisitIncrementDecrementOperator(const UnaryOperator* U,
// constant value. If the UnaryOperator has location type, create the
// constant with int type and pointer width.
SVal RHS;
+ SVal Result;
if (U->getType()->isAnyPointerType())
RHS = svalBuilder.makeArrayIndex(1);
@@ -1070,7 +1073,14 @@ void ExprEngine::VisitIncrementDecrementOperator(const UnaryOperator* U,
else
RHS = UnknownVal();
- SVal Result = evalBinOp(state, Op, V2, RHS, U->getType());
+ // The use of an operand of type bool with the ++ operators is deprecated
+ // but valid until C++17. And if the operand of the ++ operator is of type
+ // bool, it is set to true until C++17. Note that for '_Bool', it is also
+ // set to true when it encounters ++ operator.
+ if (U->getType()->isBooleanType() && U->isIncrementOp())
+ Result = svalBuilder.makeTruthVal(true, U->getType());
+ else
+ Result = evalBinOp(state, Op, V2, RHS, U->getType());
// Conjure a new symbol if necessary to recover precision.
if (Result.isUnknown()){
@@ -1092,7 +1102,6 @@ void ExprEngine::VisitIncrementDecrementOperator(const UnaryOperator* U,
Constraint = svalBuilder.evalEQ(state, SymVal,
svalBuilder.makeZeroVal(U->getType()));
-
state = state->assume(Constraint, false);
assert(state);
}
diff --git a/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp b/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp
index dad93111966f..dc124fc3ff2d 100644
--- a/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp
+++ b/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+#include "clang/Analysis/ConstructionContext.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/ParentMap.h"
@@ -41,19 +42,30 @@ void ExprEngine::performTrivialCopy(NodeBuilder &Bldr, ExplodedNode *Pred,
const CallEvent &Call) {
SVal ThisVal;
bool AlwaysReturnsLValue;
+ const CXXRecordDecl *ThisRD = nullptr;
if (const CXXConstructorCall *Ctor = dyn_cast<CXXConstructorCall>(&Call)) {
assert(Ctor->getDecl()->isTrivial());
assert(Ctor->getDecl()->isCopyOrMoveConstructor());
ThisVal = Ctor->getCXXThisVal();
+ ThisRD = Ctor->getDecl()->getParent();
AlwaysReturnsLValue = false;
} else {
assert(cast<CXXMethodDecl>(Call.getDecl())->isTrivial());
assert(cast<CXXMethodDecl>(Call.getDecl())->getOverloadedOperator() ==
OO_Equal);
ThisVal = cast<CXXInstanceCall>(Call).getCXXThisVal();
+ ThisRD = cast<CXXMethodDecl>(Call.getDecl())->getParent();
AlwaysReturnsLValue = true;
}
+ assert(ThisRD);
+ if (ThisRD->isEmpty()) {
+ // Do nothing for empty classes. Otherwise it'd retrieve an UnknownVal
+ // and bind it and RegionStore would think that the actual value
+ // in this region at this offset is unknown.
+ return;
+ }
+
const LocationContext *LCtx = Pred->getLocationContext();
ExplodedNodeSet Dst;
@@ -84,52 +96,50 @@ void ExprEngine::performTrivialCopy(NodeBuilder &Bldr, ExplodedNode *Pred,
}
-/// Returns a region representing the first element of a (possibly
-/// multi-dimensional) array.
-///
-/// On return, \p Ty will be set to the base type of the array.
-///
-/// If the type is not an array type at all, the original value is returned.
-static SVal makeZeroElementRegion(ProgramStateRef State, SVal LValue,
- QualType &Ty) {
+SVal ExprEngine::makeZeroElementRegion(ProgramStateRef State, SVal LValue,
+ QualType &Ty, bool &IsArray) {
SValBuilder &SVB = State->getStateManager().getSValBuilder();
ASTContext &Ctx = SVB.getContext();
while (const ArrayType *AT = Ctx.getAsArrayType(Ty)) {
Ty = AT->getElementType();
LValue = State->getLValue(Ty, SVB.makeZeroArrayIndex(), LValue);
+ IsArray = true;
}
return LValue;
}
+std::pair<ProgramStateRef, SVal> ExprEngine::prepareForObjectConstruction(
+ const Expr *E, ProgramStateRef State, const LocationContext *LCtx,
+ const ConstructionContext *CC, EvalCallOptions &CallOpts) {
+ MemRegionManager &MRMgr = getSValBuilder().getRegionManager();
-const MemRegion *
-ExprEngine::getRegionForConstructedObject(const CXXConstructExpr *CE,
- ExplodedNode *Pred) {
- const LocationContext *LCtx = Pred->getLocationContext();
- ProgramStateRef State = Pred->getState();
-
- // See if we're constructing an existing region by looking at the next
- // element in the CFG.
-
- if (auto Elem = findElementDirectlyInitializedByCurrentConstructor()) {
- if (Optional<CFGStmt> StmtElem = Elem->getAs<CFGStmt>()) {
- auto *DS = cast<DeclStmt>(StmtElem->getStmt());
- if (const auto *Var = dyn_cast<VarDecl>(DS->getSingleDecl())) {
- if (Var->getInit() && Var->getInit()->IgnoreImplicit() == CE) {
- SVal LValue = State->getLValue(Var, LCtx);
- QualType Ty = Var->getType();
- LValue = makeZeroElementRegion(State, LValue, Ty);
- return LValue.getAsRegion();
- }
- }
- } else if (Optional<CFGInitializer> InitElem = Elem->getAs<CFGInitializer>()) {
- const CXXCtorInitializer *Init = InitElem->getInitializer();
+ // See if we're constructing an existing region by looking at the
+ // current construction context.
+ if (CC) {
+ switch (CC->getKind()) {
+ case ConstructionContext::CXX17ElidedCopyVariableKind:
+ case ConstructionContext::SimpleVariableKind: {
+ const auto *DSCC = cast<VariableConstructionContext>(CC);
+ const auto *DS = DSCC->getDeclStmt();
+ const auto *Var = cast<VarDecl>(DS->getSingleDecl());
+ SVal LValue = State->getLValue(Var, LCtx);
+ QualType Ty = Var->getType();
+ LValue =
+ makeZeroElementRegion(State, LValue, Ty, CallOpts.IsArrayCtorOrDtor);
+ State =
+ addObjectUnderConstruction(State, DSCC->getDeclStmt(), LCtx, LValue);
+ return std::make_pair(State, LValue);
+ }
+ case ConstructionContext::CXX17ElidedCopyConstructorInitializerKind:
+ case ConstructionContext::SimpleConstructorInitializerKind: {
+ const auto *ICC = cast<ConstructorInitializerConstructionContext>(CC);
+ const auto *Init = ICC->getCXXCtorInitializer();
assert(Init->isAnyMemberInitializer());
const CXXMethodDecl *CurCtor = cast<CXXMethodDecl>(LCtx->getDecl());
Loc ThisPtr =
- getSValBuilder().getCXXThis(CurCtor, LCtx->getCurrentStackFrame());
+ getSValBuilder().getCXXThis(CurCtor, LCtx->getStackFrame());
SVal ThisVal = State->getSVal(ThisPtr);
const ValueDecl *Field;
@@ -143,92 +153,134 @@ ExprEngine::getRegionForConstructedObject(const CXXConstructExpr *CE,
}
QualType Ty = Field->getType();
- FieldVal = makeZeroElementRegion(State, FieldVal, Ty);
- return FieldVal.getAsRegion();
+ FieldVal = makeZeroElementRegion(State, FieldVal, Ty,
+ CallOpts.IsArrayCtorOrDtor);
+ State = addObjectUnderConstruction(State, Init, LCtx, FieldVal);
+ return std::make_pair(State, FieldVal);
}
-
- // FIXME: This will eventually need to handle new-expressions as well.
- // Don't forget to update the pre-constructor initialization code in
- // ExprEngine::VisitCXXConstructExpr.
- }
- // If we couldn't find an existing region to construct into, assume we're
- // constructing a temporary.
- MemRegionManager &MRMgr = getSValBuilder().getRegionManager();
- return MRMgr.getCXXTempObjectRegion(CE, LCtx);
-}
-
-/// Returns true if the initializer for \Elem can be a direct
-/// constructor.
-static bool canHaveDirectConstructor(CFGElement Elem){
- // DeclStmts and CXXCtorInitializers for fields can be directly constructed.
-
- if (Optional<CFGStmt> StmtElem = Elem.getAs<CFGStmt>()) {
- if (isa<DeclStmt>(StmtElem->getStmt())) {
- return true;
+ case ConstructionContext::NewAllocatedObjectKind: {
+ if (AMgr.getAnalyzerOptions().mayInlineCXXAllocator()) {
+ const auto *NECC = cast<NewAllocatedObjectConstructionContext>(CC);
+ const auto *NE = NECC->getCXXNewExpr();
+ SVal V = *getObjectUnderConstruction(State, NE, LCtx);
+ if (const SubRegion *MR =
+ dyn_cast_or_null<SubRegion>(V.getAsRegion())) {
+ if (NE->isArray()) {
+ // TODO: In fact, we need to call the constructor for every
+ // allocated element, not just the first one!
+ CallOpts.IsArrayCtorOrDtor = true;
+ return std::make_pair(
+ State, loc::MemRegionVal(getStoreManager().GetElementZeroRegion(
+ MR, NE->getType()->getPointeeType())));
+ }
+ return std::make_pair(State, V);
+ }
+ // TODO: Detect when the allocator returns a null pointer.
+ // Constructor shall not be called in this case.
+ }
+ break;
}
- }
-
- if (Elem.getKind() == CFGElement::Initializer) {
- return true;
- }
-
- return false;
-}
+ case ConstructionContext::SimpleReturnedValueKind:
+ case ConstructionContext::CXX17ElidedCopyReturnedValueKind: {
+ // The temporary is to be managed by the parent stack frame.
+ // So build it in the parent stack frame if we're not in the
+ // top frame of the analysis.
+ const StackFrameContext *SFC = LCtx->getStackFrame();
+ if (const LocationContext *CallerLCtx = SFC->getParent()) {
+ auto RTC = (*SFC->getCallSiteBlock())[SFC->getIndex()]
+ .getAs<CFGCXXRecordTypedCall>();
+ if (!RTC) {
+ // We were unable to find the correct construction context for the
+ // call in the parent stack frame. This is equivalent to not being
+ // able to find construction context at all.
+ break;
+ }
+ return prepareForObjectConstruction(
+ cast<Expr>(SFC->getCallSite()), State, CallerLCtx,
+ RTC->getConstructionContext(), CallOpts);
+ } else {
+ // We are on the top frame of the analysis.
+ // TODO: What exactly happens when we are? Does the temporary object
+ // live long enough in the region store in this case? Would checkers
+ // think that this object immediately goes out of scope?
+ CallOpts.IsTemporaryCtorOrDtor = true;
+ SVal V = loc::MemRegionVal(MRMgr.getCXXTempObjectRegion(E, LCtx));
+ return std::make_pair(State, V);
+ }
+ llvm_unreachable("Unhandled return value construction context!");
+ }
+ case ConstructionContext::ElidedTemporaryObjectKind: {
+ assert(AMgr.getAnalyzerOptions().shouldElideConstructors());
+ const auto *TCC = cast<ElidedTemporaryObjectConstructionContext>(CC);
+ const CXXBindTemporaryExpr *BTE = TCC->getCXXBindTemporaryExpr();
+ const MaterializeTemporaryExpr *MTE = TCC->getMaterializedTemporaryExpr();
+ const CXXConstructExpr *CE = TCC->getConstructorAfterElision();
+
+ // Support pre-C++17 copy elision. We'll have the elidable copy
+ // constructor in the AST and in the CFG, but we'll skip it
+ // and construct directly into the final object. This call
+ // also sets the CallOpts flags for us.
+ SVal V;
+ std::tie(State, V) = prepareForObjectConstruction(
+ CE, State, LCtx, TCC->getConstructionContextAfterElision(), CallOpts);
+
+ // Remember that we've elided the constructor.
+ State = addObjectUnderConstruction(State, CE, LCtx, V);
+
+ // Remember that we've elided the destructor.
+ if (BTE)
+ State = elideDestructor(State, BTE, LCtx);
+
+ // Instead of materialization, shamelessly return
+ // the final object destination.
+ if (MTE)
+ State = addObjectUnderConstruction(State, MTE, LCtx, V);
+
+ return std::make_pair(State, V);
+ }
+ case ConstructionContext::SimpleTemporaryObjectKind: {
+ const auto *TCC = cast<SimpleTemporaryObjectConstructionContext>(CC);
+ const CXXBindTemporaryExpr *BTE = TCC->getCXXBindTemporaryExpr();
+ const MaterializeTemporaryExpr *MTE = TCC->getMaterializedTemporaryExpr();
+ SVal V = UnknownVal();
+
+ if (MTE) {
+ if (const ValueDecl *VD = MTE->getExtendingDecl()) {
+ assert(MTE->getStorageDuration() != SD_FullExpression);
+ if (!VD->getType()->isReferenceType()) {
+ // We're lifetime-extended by a surrounding aggregate.
+ // Automatic destructors aren't quite working in this case
+ // on the CFG side. We should warn the caller about that.
+ // FIXME: Is there a better way to retrieve this information from
+ // the MaterializeTemporaryExpr?
+ CallOpts.IsTemporaryLifetimeExtendedViaAggregate = true;
+ }
+ }
-Optional<CFGElement>
-ExprEngine::findElementDirectlyInitializedByCurrentConstructor() {
- const NodeBuilderContext &CurrBldrCtx = getBuilderContext();
- // See if we're constructing an existing region by looking at the next
- // element in the CFG.
- const CFGBlock *B = CurrBldrCtx.getBlock();
- assert(isa<CXXConstructExpr>(((*B)[currStmtIdx]).castAs<CFGStmt>().getStmt()));
- unsigned int NextStmtIdx = currStmtIdx + 1;
- if (NextStmtIdx >= B->size())
- return None;
-
- CFGElement Next = (*B)[NextStmtIdx];
-
- // Is this a destructor? If so, we might be in the middle of an assignment
- // to a local or member: look ahead one more element to see what we find.
- while (Next.getAs<CFGImplicitDtor>() && NextStmtIdx + 1 < B->size()) {
- ++NextStmtIdx;
- Next = (*B)[NextStmtIdx];
- }
+ if (MTE->getStorageDuration() == SD_Static ||
+ MTE->getStorageDuration() == SD_Thread)
+ V = loc::MemRegionVal(MRMgr.getCXXStaticTempObjectRegion(E));
+ }
- if (canHaveDirectConstructor(Next))
- return Next;
+ if (V.isUnknown())
+ V = loc::MemRegionVal(MRMgr.getCXXTempObjectRegion(E, LCtx));
- return None;
-}
+ if (BTE)
+ State = addObjectUnderConstruction(State, BTE, LCtx, V);
-const CXXConstructExpr *
-ExprEngine::findDirectConstructorForCurrentCFGElement() {
- // Go backward in the CFG to see if the previous element (ignoring
- // destructors) was a CXXConstructExpr. If so, that constructor
- // was constructed directly into an existing region.
- // This process is essentially the inverse of that performed in
- // findElementDirectlyInitializedByCurrentConstructor().
- if (currStmtIdx == 0)
- return nullptr;
-
- const CFGBlock *B = getBuilderContext().getBlock();
- assert(canHaveDirectConstructor((*B)[currStmtIdx]));
-
- unsigned int PreviousStmtIdx = currStmtIdx - 1;
- CFGElement Previous = (*B)[PreviousStmtIdx];
-
- while (Previous.getAs<CFGImplicitDtor>() && PreviousStmtIdx > 0) {
- --PreviousStmtIdx;
- Previous = (*B)[PreviousStmtIdx];
- }
+ if (MTE)
+ State = addObjectUnderConstruction(State, MTE, LCtx, V);
- if (Optional<CFGStmt> PrevStmtElem = Previous.getAs<CFGStmt>()) {
- if (auto *CtorExpr = dyn_cast<CXXConstructExpr>(PrevStmtElem->getStmt())) {
- return CtorExpr;
+ CallOpts.IsTemporaryCtorOrDtor = true;
+ return std::make_pair(State, V);
+ }
}
}
-
- return nullptr;
+ // If we couldn't find an existing region to construct into, assume we're
+ // constructing a temporary. Notify the caller of our failure.
+ CallOpts.IsCtorOrDtorWithImproperlyModeledTargetRegion = true;
+ return std::make_pair(
+ State, loc::MemRegionVal(MRMgr.getCXXTempObjectRegion(E, LCtx)));
}
void ExprEngine::VisitCXXConstructExpr(const CXXConstructExpr *CE,
@@ -237,21 +289,41 @@ void ExprEngine::VisitCXXConstructExpr(const CXXConstructExpr *CE,
const LocationContext *LCtx = Pred->getLocationContext();
ProgramStateRef State = Pred->getState();
- const MemRegion *Target = nullptr;
+ SVal Target = UnknownVal();
+
+ if (Optional<SVal> ElidedTarget =
+ getObjectUnderConstruction(State, CE, LCtx)) {
+ // We've previously modeled an elidable constructor by pretending that it in
+ // fact constructs into the correct target. This constructor can therefore
+ // be skipped.
+ Target = *ElidedTarget;
+ StmtNodeBuilder Bldr(Pred, destNodes, *currBldrCtx);
+ State = finishObjectConstruction(State, CE, LCtx);
+ if (auto L = Target.getAs<Loc>())
+ State = State->BindExpr(CE, LCtx, State->getSVal(*L, CE->getType()));
+ Bldr.generateNode(CE, Pred, State);
+ return;
+ }
// FIXME: Handle arrays, which run the same constructor for every element.
// For now, we just run the first constructor (which should still invalidate
// the entire array).
+ EvalCallOptions CallOpts;
+ auto C = getCurrentCFGElement().getAs<CFGConstructor>();
+ assert(C || getCurrentCFGElement().getAs<CFGStmt>());
+ const ConstructionContext *CC = C ? C->getConstructionContext() : nullptr;
+
switch (CE->getConstructionKind()) {
case CXXConstructExpr::CK_Complete: {
- Target = getRegionForConstructedObject(CE, Pred);
+ std::tie(State, Target) =
+ prepareForObjectConstruction(CE, State, LCtx, CC, CallOpts);
break;
}
case CXXConstructExpr::CK_VirtualBase:
// Make sure we are not calling virtual base class initializers twice.
// Only the most-derived object should initialize virtual base classes.
- if (const Stmt *Outer = LCtx->getCurrentStackFrame()->getCallSite()) {
+ if (const Stmt *Outer = LCtx->getStackFrame()->getCallSite()) {
const CXXConstructExpr *OuterCtor = dyn_cast<CXXConstructExpr>(Outer);
if (OuterCtor) {
switch (OuterCtor->getConstructionKind()) {
@@ -281,48 +353,59 @@ void ExprEngine::VisitCXXConstructExpr(const CXXConstructExpr *CE,
// otherwise always available during construction.
if (dyn_cast_or_null<InitListExpr>(LCtx->getParentMap().getParent(CE))) {
MemRegionManager &MRMgr = getSValBuilder().getRegionManager();
- Target = MRMgr.getCXXTempObjectRegion(CE, LCtx);
+ Target = loc::MemRegionVal(MRMgr.getCXXTempObjectRegion(CE, LCtx));
+ CallOpts.IsCtorOrDtorWithImproperlyModeledTargetRegion = true;
break;
}
// FALLTHROUGH
case CXXConstructExpr::CK_Delegating: {
const CXXMethodDecl *CurCtor = cast<CXXMethodDecl>(LCtx->getDecl());
Loc ThisPtr = getSValBuilder().getCXXThis(CurCtor,
- LCtx->getCurrentStackFrame());
+ LCtx->getStackFrame());
SVal ThisVal = State->getSVal(ThisPtr);
if (CE->getConstructionKind() == CXXConstructExpr::CK_Delegating) {
- Target = ThisVal.getAsRegion();
+ Target = ThisVal;
} else {
// Cast to the base type.
bool IsVirtual =
(CE->getConstructionKind() == CXXConstructExpr::CK_VirtualBase);
SVal BaseVal = getStoreManager().evalDerivedToBase(ThisVal, CE->getType(),
IsVirtual);
- Target = BaseVal.getAsRegion();
+ Target = BaseVal;
}
break;
}
}
+ if (State != Pred->getState()) {
+ static SimpleProgramPointTag T("ExprEngine",
+ "Prepare for object construction");
+ ExplodedNodeSet DstPrepare;
+ StmtNodeBuilder BldrPrepare(Pred, DstPrepare, *currBldrCtx);
+ BldrPrepare.generateNode(CE, Pred, State, &T, ProgramPoint::PreStmtKind);
+ assert(DstPrepare.size() <= 1);
+ if (DstPrepare.size() == 0)
+ return;
+ Pred = *BldrPrepare.begin();
+ }
+
CallEventManager &CEMgr = getStateManager().getCallEventManager();
CallEventRef<CXXConstructorCall> Call =
- CEMgr.getCXXConstructorCall(CE, Target, State, LCtx);
+ CEMgr.getCXXConstructorCall(CE, Target.getAsRegion(), State, LCtx);
ExplodedNodeSet DstPreVisit;
getCheckerManager().runCheckersForPreStmt(DstPreVisit, Pred, CE, *this);
+ // FIXME: Is it possible and/or useful to do this before PreStmt?
ExplodedNodeSet PreInitialized;
{
StmtNodeBuilder Bldr(DstPreVisit, PreInitialized, *currBldrCtx);
- if (CE->requiresZeroInitialization()) {
- // Type of the zero doesn't matter.
- SVal ZeroVal = svalBuilder.makeZeroVal(getContext().CharTy);
-
- for (ExplodedNodeSet::iterator I = DstPreVisit.begin(),
- E = DstPreVisit.end();
- I != E; ++I) {
- ProgramStateRef State = (*I)->getState();
+ for (ExplodedNodeSet::iterator I = DstPreVisit.begin(),
+ E = DstPreVisit.end();
+ I != E; ++I) {
+ ProgramStateRef State = (*I)->getState();
+ if (CE->requiresZeroInitialization()) {
// FIXME: Once we properly handle constructors in new-expressions, we'll
// need to invalidate the region before setting a default value, to make
// sure there aren't any lingering bindings around. This probably needs
@@ -335,10 +418,11 @@ void ExprEngine::VisitCXXConstructExpr(const CXXConstructExpr *CE,
// actually make things worse. Placement new makes this tricky as well,
// since it's then possible to be initializing one part of a multi-
// dimensional array.
- State = State->bindDefault(loc::MemRegionVal(Target), ZeroVal, LCtx);
- Bldr.generateNode(CE, *I, State, /*tag=*/nullptr,
- ProgramPoint::PreStmtKind);
+ State = State->bindDefaultZero(Target, LCtx);
}
+
+ Bldr.generateNode(CE, *I, State, /*tag=*/nullptr,
+ ProgramPoint::PreStmtKind);
}
}
@@ -349,10 +433,9 @@ void ExprEngine::VisitCXXConstructExpr(const CXXConstructExpr *CE,
ExplodedNodeSet DstEvaluated;
StmtNodeBuilder Bldr(DstPreCall, DstEvaluated, *currBldrCtx);
- bool IsArray = isa<ElementRegion>(Target);
if (CE->getConstructor()->isTrivial() &&
CE->getConstructor()->isCopyOrMoveConstructor() &&
- !IsArray) {
+ !CallOpts.IsArrayCtorOrDtor) {
// FIXME: Handle other kinds of trivial constructors as well.
for (ExplodedNodeSet::iterator I = DstPreCall.begin(), E = DstPreCall.end();
I != E; ++I)
@@ -361,10 +444,10 @@ void ExprEngine::VisitCXXConstructExpr(const CXXConstructExpr *CE,
} else {
for (ExplodedNodeSet::iterator I = DstPreCall.begin(), E = DstPreCall.end();
I != E; ++I)
- defaultEvalCall(Bldr, *I, *Call);
+ defaultEvalCall(Bldr, *I, *Call, CallOpts);
}
- // If the CFG was contructed without elements for temporary destructors
+ // If the CFG was constructed without elements for temporary destructors
// and the just-called constructor created a temporary object then
// stop exploration if the temporary object has a noreturn constructor.
// This can lose coverage because the destructor, if it were present
@@ -373,20 +456,30 @@ void ExprEngine::VisitCXXConstructExpr(const CXXConstructExpr *CE,
// paths when no-return temporary destructors are used for assertions.
const AnalysisDeclContext *ADC = LCtx->getAnalysisDeclContext();
if (!ADC->getCFGBuildOptions().AddTemporaryDtors) {
- const MemRegion *Target = Call->getCXXThisVal().getAsRegion();
- if (Target && isa<CXXTempObjectRegion>(Target) &&
- Call->getDecl()->getParent()->isAnyDestructorNoReturn()) {
+ const MemRegion *Target = Call->getCXXThisVal().getAsRegion();
+ if (Target && isa<CXXTempObjectRegion>(Target) &&
+ Call->getDecl()->getParent()->isAnyDestructorNoReturn()) {
+
+ // If we've inlined the constructor, then DstEvaluated would be empty.
+ // In this case we still want a sink, which could be implemented
+ // in processCallExit. But we don't have that implemented at the moment,
+ // so if you hit this assertion, see if you can avoid inlining
+ // the respective constructor when analyzer-config cfg-temporary-dtors
+ // is set to false.
+ // Otherwise there's nothing wrong with inlining such constructor.
+ assert(!DstEvaluated.empty() &&
+ "We should not have inlined this constructor!");
for (ExplodedNode *N : DstEvaluated) {
Bldr.generateSink(CE, N, N->getState());
}
- // There is no need to run the PostCall and PostStmtchecker
+ // There is no need to run the PostCall and PostStmt checker
// callbacks because we just generated sinks on all nodes in th
// frontier.
return;
}
- }
+ }
ExplodedNodeSet DstPostCall;
getCheckerManager().runCheckersForPostCall(DstPostCall, DstEvaluated,
@@ -399,19 +492,11 @@ void ExprEngine::VisitCXXDestructor(QualType ObjectType,
const Stmt *S,
bool IsBaseDtor,
ExplodedNode *Pred,
- ExplodedNodeSet &Dst) {
+ ExplodedNodeSet &Dst,
+ const EvalCallOptions &CallOpts) {
const LocationContext *LCtx = Pred->getLocationContext();
ProgramStateRef State = Pred->getState();
- // FIXME: We need to run the same destructor on every element of the array.
- // This workaround will just run the first destructor (which will still
- // invalidate the entire array).
- SVal DestVal = UnknownVal();
- if (Dest)
- DestVal = loc::MemRegionVal(Dest);
- DestVal = makeZeroElementRegion(State, DestVal, ObjectType);
- Dest = DestVal.getAsRegion();
-
const CXXRecordDecl *RecordDecl = ObjectType->getAsCXXRecordDecl();
assert(RecordDecl && "Only CXXRecordDecls should have destructors");
const CXXDestructorDecl *DtorDecl = RecordDecl->getDestructor();
@@ -432,7 +517,7 @@ void ExprEngine::VisitCXXDestructor(QualType ObjectType,
StmtNodeBuilder Bldr(DstPreCall, DstInvalidated, *currBldrCtx);
for (ExplodedNodeSet::iterator I = DstPreCall.begin(), E = DstPreCall.end();
I != E; ++I)
- defaultEvalCall(Bldr, *I, *Call);
+ defaultEvalCall(Bldr, *I, *Call, CallOpts);
ExplodedNodeSet DstPostCall;
getCheckerManager().runCheckersForPostCall(Dst, DstInvalidated,
@@ -455,15 +540,58 @@ void ExprEngine::VisitCXXNewAllocatorCall(const CXXNewExpr *CNE,
getCheckerManager().runCheckersForPreCall(DstPreCall, Pred,
*Call, *this);
- ExplodedNodeSet DstInvalidated;
- StmtNodeBuilder Bldr(DstPreCall, DstInvalidated, *currBldrCtx);
- for (ExplodedNodeSet::iterator I = DstPreCall.begin(), E = DstPreCall.end();
- I != E; ++I)
- defaultEvalCall(Bldr, *I, *Call);
- getCheckerManager().runCheckersForPostCall(Dst, DstInvalidated,
- *Call, *this);
-}
+ ExplodedNodeSet DstPostCall;
+ StmtNodeBuilder CallBldr(DstPreCall, DstPostCall, *currBldrCtx);
+ for (auto I : DstPreCall) {
+ // FIXME: Provide evalCall for checkers?
+ defaultEvalCall(CallBldr, I, *Call);
+ }
+ // If the call is inlined, DstPostCall will be empty and we bail out now.
+
+ // Store return value of operator new() for future use, until the actual
+ // CXXNewExpr gets processed.
+ ExplodedNodeSet DstPostValue;
+ StmtNodeBuilder ValueBldr(DstPostCall, DstPostValue, *currBldrCtx);
+ for (auto I : DstPostCall) {
+ // FIXME: Because CNE serves as the "call site" for the allocator (due to
+ // lack of a better expression in the AST), the conjured return value symbol
+ // is going to be of the same type (C++ object pointer type). Technically
+ // this is not correct because the operator new's prototype always says that
+ // it returns a 'void *'. So we should change the type of the symbol,
+ // and then evaluate the cast over the symbolic pointer from 'void *' to
+ // the object pointer type. But without changing the symbol's type it
+ // is breaking too much to evaluate the no-op symbolic cast over it, so we
+ // skip it for now.
+ ProgramStateRef State = I->getState();
+ SVal RetVal = State->getSVal(CNE, LCtx);
+
+ // If this allocation function is not declared as non-throwing, failures
+ // /must/ be signalled by exceptions, and thus the return value will never
+ // be NULL. -fno-exceptions does not influence this semantics.
+ // FIXME: GCC has a -fcheck-new option, which forces it to consider the case
+ // where new can return NULL. If we end up supporting that option, we can
+ // consider adding a check for it here.
+ // C++11 [basic.stc.dynamic.allocation]p3.
+ if (const FunctionDecl *FD = CNE->getOperatorNew()) {
+ QualType Ty = FD->getType();
+ if (const auto *ProtoType = Ty->getAs<FunctionProtoType>())
+ if (!ProtoType->isNothrow())
+ State = State->assume(RetVal.castAs<DefinedOrUnknownSVal>(), true);
+ }
+ ValueBldr.generateNode(
+ CNE, I, addObjectUnderConstruction(State, CNE, LCtx, RetVal));
+ }
+
+ ExplodedNodeSet DstPostPostCallCallback;
+ getCheckerManager().runCheckersForPostCall(DstPostPostCallCallback,
+ DstPostValue, *Call, *this);
+ for (auto I : DstPostPostCallCallback) {
+ getCheckerManager().runCheckersForNewAllocator(
+ CNE, *getObjectUnderConstruction(I->getState(), CNE, LCtx), Dst, I,
+ *this);
+ }
+}
void ExprEngine::VisitCXXNewExpr(const CXXNewExpr *CNE, ExplodedNode *Pred,
ExplodedNodeSet &Dst) {
@@ -474,69 +602,74 @@ void ExprEngine::VisitCXXNewExpr(const CXXNewExpr *CNE, ExplodedNode *Pred,
unsigned blockCount = currBldrCtx->blockCount();
const LocationContext *LCtx = Pred->getLocationContext();
- DefinedOrUnknownSVal symVal = UnknownVal();
+ SVal symVal = UnknownVal();
FunctionDecl *FD = CNE->getOperatorNew();
- bool IsStandardGlobalOpNewFunction = false;
- if (FD && !isa<CXXMethodDecl>(FD) && !FD->isVariadic()) {
- if (FD->getNumParams() == 2) {
- QualType T = FD->getParamDecl(1)->getType();
- if (const IdentifierInfo *II = T.getBaseTypeIdentifier())
- // NoThrow placement new behaves as a standard new.
- IsStandardGlobalOpNewFunction = II->getName().equals("nothrow_t");
- }
- else
- // Placement forms are considered non-standard.
- IsStandardGlobalOpNewFunction = (FD->getNumParams() == 1);
+ bool IsStandardGlobalOpNewFunction =
+ FD->isReplaceableGlobalAllocationFunction();
+
+ ProgramStateRef State = Pred->getState();
+
+ // Retrieve the stored operator new() return value.
+ if (AMgr.getAnalyzerOptions().mayInlineCXXAllocator()) {
+ symVal = *getObjectUnderConstruction(State, CNE, LCtx);
+ State = finishObjectConstruction(State, CNE, LCtx);
}
// We assume all standard global 'operator new' functions allocate memory in
// heap. We realize this is an approximation that might not correctly model
// a custom global allocator.
- if (IsStandardGlobalOpNewFunction)
- symVal = svalBuilder.getConjuredHeapSymbolVal(CNE, LCtx, blockCount);
- else
- symVal = svalBuilder.conjureSymbolVal(nullptr, CNE, LCtx, CNE->getType(),
- blockCount);
+ if (symVal.isUnknown()) {
+ if (IsStandardGlobalOpNewFunction)
+ symVal = svalBuilder.getConjuredHeapSymbolVal(CNE, LCtx, blockCount);
+ else
+ symVal = svalBuilder.conjureSymbolVal(nullptr, CNE, LCtx, CNE->getType(),
+ blockCount);
+ }
- ProgramStateRef State = Pred->getState();
CallEventManager &CEMgr = getStateManager().getCallEventManager();
CallEventRef<CXXAllocatorCall> Call =
CEMgr.getCXXAllocatorCall(CNE, State, LCtx);
- // Invalidate placement args.
- // FIXME: Once we figure out how we want allocators to work,
- // we should be using the usual pre-/(default-)eval-/post-call checks here.
- State = Call->invalidateRegions(blockCount);
- if (!State)
- return;
+ if (!AMgr.getAnalyzerOptions().mayInlineCXXAllocator()) {
+ // Invalidate placement args.
+ // FIXME: Once we figure out how we want allocators to work,
+ // we should be using the usual pre-/(default-)eval-/post-call checks here.
+ State = Call->invalidateRegions(blockCount);
+ if (!State)
+ return;
- // If this allocation function is not declared as non-throwing, failures
- // /must/ be signalled by exceptions, and thus the return value will never be
- // NULL. -fno-exceptions does not influence this semantics.
- // FIXME: GCC has a -fcheck-new option, which forces it to consider the case
- // where new can return NULL. If we end up supporting that option, we can
- // consider adding a check for it here.
- // C++11 [basic.stc.dynamic.allocation]p3.
- if (FD) {
- QualType Ty = FD->getType();
- if (const FunctionProtoType *ProtoType = Ty->getAs<FunctionProtoType>())
- if (!ProtoType->isNothrow(getContext()))
- State = State->assume(symVal, true);
+ // If this allocation function is not declared as non-throwing, failures
+ // /must/ be signalled by exceptions, and thus the return value will never
+ // be NULL. -fno-exceptions does not influence this semantics.
+ // FIXME: GCC has a -fcheck-new option, which forces it to consider the case
+ // where new can return NULL. If we end up supporting that option, we can
+ // consider adding a check for it here.
+ // C++11 [basic.stc.dynamic.allocation]p3.
+ if (FD) {
+ QualType Ty = FD->getType();
+ if (const auto *ProtoType = Ty->getAs<FunctionProtoType>())
+ if (!ProtoType->isNothrow())
+ if (auto dSymVal = symVal.getAs<DefinedOrUnknownSVal>())
+ State = State->assume(*dSymVal, true);
+ }
}
StmtNodeBuilder Bldr(Pred, Dst, *currBldrCtx);
+ SVal Result = symVal;
+
if (CNE->isArray()) {
// FIXME: allocating an array requires simulating the constructors.
// For now, just return a symbolicated region.
- const SubRegion *NewReg =
- symVal.castAs<loc::MemRegionVal>().getRegionAs<SubRegion>();
- QualType ObjTy = CNE->getType()->getAs<PointerType>()->getPointeeType();
- const ElementRegion *EleReg =
- getStoreManager().GetElementZeroRegion(NewReg, ObjTy);
- State = State->BindExpr(CNE, Pred->getLocationContext(),
- loc::MemRegionVal(EleReg));
+ if (const SubRegion *NewReg =
+ dyn_cast_or_null<SubRegion>(symVal.getAsRegion())) {
+ QualType ObjTy = CNE->getType()->getAs<PointerType>()->getPointeeType();
+ const ElementRegion *EleReg =
+ getStoreManager().GetElementZeroRegion(NewReg, ObjTy);
+ Result = loc::MemRegionVal(EleReg);
+ }
+ State = State->BindExpr(CNE, Pred->getLocationContext(), Result);
Bldr.generateNode(CNE, Pred, State);
return;
}
@@ -545,7 +678,6 @@ void ExprEngine::VisitCXXNewExpr(const CXXNewExpr *CNE, ExplodedNode *Pred,
// CXXNewExpr, we need to make sure that the constructed object is not
// immediately invalidated here. (The placement call should happen before
// the constructor call anyway.)
- SVal Result = symVal;
if (FD && FD->isReservedGlobalPlacementOperator()) {
// Non-array placement new should always return the placement location.
SVal PlacementLoc = State->getSVal(CNE->getPlacementArg(0), LCtx);
diff --git a/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp b/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
index caf86b26b66d..3ee67f3d6882 100644
--- a/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
+++ b/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
@@ -15,8 +15,8 @@
#include "PrettyStackTraceLocationContext.h"
#include "clang/AST/CXXInheritance.h"
#include "clang/AST/DeclCXX.h"
-#include "clang/AST/ParentMap.h"
#include "clang/Analysis/Analyses/LiveVariables.h"
+#include "clang/Analysis/ConstructionContext.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "llvm/ADT/SmallSet.h"
@@ -74,15 +74,14 @@ static std::pair<const Stmt*,
const CFGBlock*> getLastStmt(const ExplodedNode *Node) {
const Stmt *S = nullptr;
const CFGBlock *Blk = nullptr;
- const StackFrameContext *SF =
- Node->getLocation().getLocationContext()->getCurrentStackFrame();
+ const StackFrameContext *SF = Node->getStackFrame();
// Back up through the ExplodedGraph until we reach a statement node in this
// stack frame.
while (Node) {
const ProgramPoint &PP = Node->getLocation();
- if (PP.getLocationContext()->getCurrentStackFrame() == SF) {
+ if (PP.getStackFrame() == SF) {
if (Optional<StmtPoint> SP = PP.getAs<StmtPoint>()) {
S = SP->getStmt();
break;
@@ -121,7 +120,7 @@ static std::pair<const Stmt*,
/// Adjusts a return value when the called function's return type does not
/// match the caller's expression type. This can happen when a dynamic call
-/// is devirtualized, and the overridding method has a covariant (more specific)
+/// is devirtualized, and the overriding method has a covariant (more specific)
/// return type than the parent's method. For C++ objects, this means we need
/// to add base casts.
static SVal adjustReturnValue(SVal V, QualType ExpectedTy, QualType ActualTy,
@@ -193,23 +192,6 @@ static bool wasDifferentDeclUsedForInlining(CallEventRef<> Call,
return RuntimeCallee->getCanonicalDecl() != StaticDecl->getCanonicalDecl();
}
-/// Returns true if the CXXConstructExpr \p E was intended to construct a
-/// prvalue for the region in \p V.
-///
-/// Note that we can't just test for rvalue vs. glvalue because
-/// CXXConstructExprs embedded in DeclStmts and initializers are considered
-/// rvalues by the AST, and the analyzer would like to treat them as lvalues.
-static bool isTemporaryPRValue(const CXXConstructExpr *E, SVal V) {
- if (E->isGLValue())
- return false;
-
- const MemRegion *MR = V.getAsRegion();
- if (!MR)
- return false;
-
- return isa<CXXTempObjectRegion>(MR);
-}
-
/// The call exit is simulated with a sequence of nodes, which occur between
/// CallExitBegin and CallExitEnd. The following operations occur between the
/// two program points:
@@ -221,13 +203,12 @@ static bool isTemporaryPRValue(const CXXConstructExpr *E, SVal V) {
void ExprEngine::processCallExit(ExplodedNode *CEBNode) {
// Step 1 CEBNode was generated before the call.
PrettyStackTraceLocationContext CrashInfo(CEBNode->getLocationContext());
- const StackFrameContext *calleeCtx =
- CEBNode->getLocationContext()->getCurrentStackFrame();
+ const StackFrameContext *calleeCtx = CEBNode->getStackFrame();
// The parent context might not be a stack frame, so make sure we
// look up the first enclosing stack frame.
const StackFrameContext *callerCtx =
- calleeCtx->getParent()->getCurrentStackFrame();
+ calleeCtx->getParent()->getStackFrame();
const Stmt *CE = calleeCtx->getCallSite();
ProgramStateRef state = CEBNode->getState();
@@ -269,13 +250,24 @@ void ExprEngine::processCallExit(ExplodedNode *CEBNode) {
loc::MemRegionVal This =
svalBuilder.getCXXThis(CCE->getConstructor()->getParent(), calleeCtx);
SVal ThisV = state->getSVal(This);
-
- // If the constructed object is a temporary prvalue, get its bindings.
- if (isTemporaryPRValue(CCE, ThisV))
- ThisV = state->getSVal(ThisV.castAs<Loc>());
-
+ ThisV = state->getSVal(ThisV.castAs<Loc>());
state = state->BindExpr(CCE, callerCtx, ThisV);
}
+
+ if (const auto *CNE = dyn_cast<CXXNewExpr>(CE)) {
+ // We are currently evaluating a CXXNewAllocator CFGElement. It takes a
+ // while to reach the actual CXXNewExpr element from here, so keep the
+ // region for later use.
+ // Additionally cast the return value of the inlined operator new
+ // (which is of type 'void *') to the correct object type.
+ SVal AllocV = state->getSVal(CNE, callerCtx);
+ AllocV = svalBuilder.evalCast(
+ AllocV, CNE->getType(),
+ getContext().getPointerType(getContext().VoidTy));
+
+ state = addObjectUnderConstruction(state, CNE, calleeCtx->getParent(),
+ AllocV);
+ }
}
// Step 3: BindedRetNode -> CleanedNodes
@@ -315,6 +307,7 @@ void ExprEngine::processCallExit(ExplodedNode *CEBNode) {
CallExitEnd Loc(calleeCtx, callerCtx);
bool isNew;
ProgramStateRef CEEState = (*I == CEBNode) ? state : (*I)->getState();
+
ExplodedNode *CEENode = G.getNode(Loc, CEEState, false, &isNew);
CEENode->addPredecessor(*I, G);
if (!isNew)
@@ -331,16 +324,32 @@ void ExprEngine::processCallExit(ExplodedNode *CEBNode) {
CallEventRef<> UpdatedCall = Call.cloneWithState(CEEState);
ExplodedNodeSet DstPostCall;
- getCheckerManager().runCheckersForPostCall(DstPostCall, CEENode,
- *UpdatedCall, *this,
- /*WasInlined=*/true);
-
+ if (const CXXNewExpr *CNE = dyn_cast_or_null<CXXNewExpr>(CE)) {
+ ExplodedNodeSet DstPostPostCallCallback;
+ getCheckerManager().runCheckersForPostCall(DstPostPostCallCallback,
+ CEENode, *UpdatedCall, *this,
+ /*WasInlined=*/true);
+ for (auto I : DstPostPostCallCallback) {
+ getCheckerManager().runCheckersForNewAllocator(
+ CNE,
+ *getObjectUnderConstruction(I->getState(), CNE,
+ calleeCtx->getParent()),
+ DstPostCall, I, *this,
+ /*WasInlined=*/true);
+ }
+ } else {
+ getCheckerManager().runCheckersForPostCall(DstPostCall, CEENode,
+ *UpdatedCall, *this,
+ /*WasInlined=*/true);
+ }
ExplodedNodeSet Dst;
if (const ObjCMethodCall *Msg = dyn_cast<ObjCMethodCall>(Call)) {
getCheckerManager().runCheckersForPostObjCMessage(Dst, DstPostCall, *Msg,
*this,
/*WasInlined=*/true);
- } else if (CE) {
+ } else if (CE &&
+ !(isa<CXXNewExpr>(CE) && // Called when visiting CXXNewExpr.
+ AMgr.getAnalyzerOptions().mayInlineCXXAllocator())) {
getCheckerManager().runCheckersForPostStmt(Dst, DstPostCall, CE,
*this, /*WasInlined=*/true);
} else {
@@ -407,7 +416,7 @@ bool ExprEngine::inlineCall(const CallEvent &Call, const Decl *D,
assert(D);
const LocationContext *CurLC = Pred->getLocationContext();
- const StackFrameContext *CallerSFC = CurLC->getCurrentStackFrame();
+ const StackFrameContext *CallerSFC = CurLC->getStackFrame();
const LocationContext *ParentOfCallee = CallerSFC;
if (Call.getKind() == CE_Block &&
!cast<BlockCall>(Call).isConversionFromLambda()) {
@@ -542,19 +551,45 @@ ProgramStateRef ExprEngine::bindReturnValue(const CallEvent &Call,
}
} else if (const CXXConstructorCall *C = dyn_cast<CXXConstructorCall>(&Call)){
SVal ThisV = C->getCXXThisVal();
-
- // If the constructed object is a temporary prvalue, get its bindings.
- if (isTemporaryPRValue(cast<CXXConstructExpr>(E), ThisV))
- ThisV = State->getSVal(ThisV.castAs<Loc>());
-
+ ThisV = State->getSVal(ThisV.castAs<Loc>());
return State->BindExpr(E, LCtx, ThisV);
}
- // Conjure a symbol if the return value is unknown.
+ SVal R;
QualType ResultTy = Call.getResultType();
- SValBuilder &SVB = getSValBuilder();
unsigned Count = currBldrCtx->blockCount();
- SVal R = SVB.conjureSymbolVal(nullptr, E, LCtx, ResultTy, Count);
+ if (auto RTC = getCurrentCFGElement().getAs<CFGCXXRecordTypedCall>()) {
+ // Conjure a temporary if the function returns an object by value.
+ SVal Target;
+ assert(RTC->getStmt() == Call.getOriginExpr());
+ EvalCallOptions CallOpts; // FIXME: We won't really need those.
+ std::tie(State, Target) =
+ prepareForObjectConstruction(Call.getOriginExpr(), State, LCtx,
+ RTC->getConstructionContext(), CallOpts);
+ assert(Target.getAsRegion());
+ // Invalidate the region so that it didn't look uninitialized. Don't notify
+ // the checkers.
+ State = State->invalidateRegions(Target.getAsRegion(), E, Count, LCtx,
+ /* CausedByPointerEscape=*/false, nullptr,
+ &Call, nullptr);
+
+ R = State->getSVal(Target.castAs<Loc>(), E->getType());
+ } else {
+ // Conjure a symbol if the return value is unknown.
+
+ // See if we need to conjure a heap pointer instead of
+ // a regular unknown pointer.
+ bool IsHeapPointer = false;
+ if (const auto *CNE = dyn_cast<CXXNewExpr>(E))
+ if (CNE->getOperatorNew()->isReplaceableGlobalAllocationFunction()) {
+ // FIXME: Delegate this to evalCall in MallocChecker?
+ IsHeapPointer = true;
+ }
+
+ R = IsHeapPointer ? svalBuilder.getConjuredHeapSymbolVal(E, LCtx, Count)
+ : svalBuilder.conjureSymbolVal(nullptr, E, LCtx, ResultTy,
+ Count);
+ }
return State->BindExpr(E, LCtx, R);
}
@@ -570,17 +605,12 @@ void ExprEngine::conservativeEvalCall(const CallEvent &Call, NodeBuilder &Bldr,
Bldr.generateNode(Call.getProgramPoint(), State, Pred);
}
-enum CallInlinePolicy {
- CIP_Allowed,
- CIP_DisallowedOnce,
- CIP_DisallowedAlways
-};
-
-static CallInlinePolicy mayInlineCallKind(const CallEvent &Call,
- const ExplodedNode *Pred,
- AnalyzerOptions &Opts) {
+ExprEngine::CallInlinePolicy
+ExprEngine::mayInlineCallKind(const CallEvent &Call, const ExplodedNode *Pred,
+ AnalyzerOptions &Opts,
+ const ExprEngine::EvalCallOptions &CallOpts) {
const LocationContext *CurLC = Pred->getLocationContext();
- const StackFrameContext *CallerSFC = CurLC->getCurrentStackFrame();
+ const StackFrameContext *CallerSFC = CurLC->getStackFrame();
switch (Call.getKind()) {
case CE_Function:
case CE_Block:
@@ -596,22 +626,24 @@ static CallInlinePolicy mayInlineCallKind(const CallEvent &Call,
const CXXConstructorCall &Ctor = cast<CXXConstructorCall>(Call);
+ const CXXConstructExpr *CtorExpr = Ctor.getOriginExpr();
+
+ auto CCE = getCurrentCFGElement().getAs<CFGConstructor>();
+ const ConstructionContext *CC = CCE ? CCE->getConstructionContext()
+ : nullptr;
+
+ if (CC && isa<NewAllocatedObjectConstructionContext>(CC) &&
+ !Opts.mayInlineCXXAllocator())
+ return CIP_DisallowedOnce;
+
// FIXME: We don't handle constructors or destructors for arrays properly.
// Even once we do, we still need to be careful about implicitly-generated
// initializers for array fields in default move/copy constructors.
- const MemRegion *Target = Ctor.getCXXThisVal().getAsRegion();
- if (Target && isa<ElementRegion>(Target))
+ // We still allow construction into ElementRegion targets when they don't
+ // represent array elements.
+ if (CallOpts.IsArrayCtorOrDtor)
return CIP_DisallowedOnce;
- // FIXME: This is a hack. We don't use the correct region for a new
- // expression, so if we inline the constructor its result will just be
- // thrown away. This short-term hack is tracked in <rdar://problem/12180598>
- // and the longer-term possible fix is discussed in PR12014.
- const CXXConstructExpr *CtorExpr = Ctor.getOriginExpr();
- if (const Stmt *Parent = CurLC->getParentMap().getParent(CtorExpr))
- if (isa<CXXNewExpr>(Parent))
- return CIP_DisallowedOnce;
-
// Inlining constructors requires including initializers in the CFG.
const AnalysisDeclContext *ADC = CallerSFC->getAnalysisDeclContext();
assert(ADC->getCFGBuildOptions().AddInitializers && "No CFG initializers");
@@ -626,12 +658,25 @@ static CallInlinePolicy mayInlineCallKind(const CallEvent &Call,
if (!Opts.mayInlineCXXMemberFunction(CIMK_Destructors))
return CIP_DisallowedAlways;
- // FIXME: This is a hack. We don't handle temporary destructors
- // right now, so we shouldn't inline their constructors.
- if (CtorExpr->getConstructionKind() == CXXConstructExpr::CK_Complete)
- if (!Target || !isa<DeclRegion>(Target))
+ if (CtorExpr->getConstructionKind() == CXXConstructExpr::CK_Complete) {
+ // If we don't handle temporary destructors, we shouldn't inline
+ // their constructors.
+ if (CallOpts.IsTemporaryCtorOrDtor &&
+ !Opts.includeTemporaryDtorsInCFG())
+ return CIP_DisallowedOnce;
+
+ // If we did not find the correct this-region, it would be pointless
+ // to inline the constructor. Instead we will simply invalidate
+ // the fake temporary target.
+ if (CallOpts.IsCtorOrDtorWithImproperlyModeledTargetRegion)
return CIP_DisallowedOnce;
+ // If the temporary is lifetime-extended by binding it to a reference-type
+ // field within an aggregate, automatic destructors don't work properly.
+ if (CallOpts.IsTemporaryLifetimeExtendedViaAggregate)
+ return CIP_DisallowedOnce;
+ }
+
break;
}
case CE_CXXDestructor: {
@@ -643,13 +688,19 @@ static CallInlinePolicy mayInlineCallKind(const CallEvent &Call,
assert(ADC->getCFGBuildOptions().AddImplicitDtors && "No CFG destructors");
(void)ADC;
- const CXXDestructorCall &Dtor = cast<CXXDestructorCall>(Call);
-
// FIXME: We don't handle constructors or destructors for arrays properly.
- const MemRegion *Target = Dtor.getCXXThisVal().getAsRegion();
- if (Target && isa<ElementRegion>(Target))
+ if (CallOpts.IsArrayCtorOrDtor)
+ return CIP_DisallowedOnce;
+
+ // Allow disabling temporary destructor inlining with a separate option.
+ if (CallOpts.IsTemporaryCtorOrDtor && !Opts.mayInlineCXXTemporaryDtors())
return CIP_DisallowedOnce;
+ // If we did not find the correct this-region, it would be pointless
+ // to inline the destructor. Instead we will simply invalidate
+ // the fake temporary target.
+ if (CallOpts.IsCtorOrDtorWithImproperlyModeledTargetRegion)
+ return CIP_DisallowedOnce;
break;
}
case CE_CXXAllocator:
@@ -731,8 +782,9 @@ static bool isCXXSharedPtrDtor(const FunctionDecl *FD) {
/// This checks static properties of the function, such as its signature and
/// CFG, to determine whether the analyzer should ever consider inlining it,
/// in any context.
-static bool mayInlineDecl(AnalysisDeclContext *CalleeADC,
- AnalyzerOptions &Opts) {
+static bool mayInlineDecl(AnalysisManager &AMgr,
+ AnalysisDeclContext *CalleeADC) {
+ AnalyzerOptions &Opts = AMgr.getAnalyzerOptions();
// FIXME: Do not inline variadic calls.
if (CallEvent::isVariadic(CalleeADC->getDecl()))
return false;
@@ -755,7 +807,7 @@ static bool mayInlineDecl(AnalysisDeclContext *CalleeADC,
// Conditionally control the inlining of methods on objects that look
// like C++ containers.
if (!Opts.mayInlineCXXContainerMethods())
- if (!Ctx.getSourceManager().isInMainFile(FD->getLocation()))
+ if (!AMgr.isInCodeFile(FD->getLocation()))
if (isContainerMethod(Ctx, FD))
return false;
@@ -788,7 +840,8 @@ static bool mayInlineDecl(AnalysisDeclContext *CalleeADC,
}
bool ExprEngine::shouldInlineCall(const CallEvent &Call, const Decl *D,
- const ExplodedNode *Pred) {
+ const ExplodedNode *Pred,
+ const EvalCallOptions &CallOpts) {
if (!D)
return false;
@@ -797,14 +850,6 @@ bool ExprEngine::shouldInlineCall(const CallEvent &Call, const Decl *D,
AnalysisDeclContextManager &ADCMgr = AMgr.getAnalysisDeclContextManager();
AnalysisDeclContext *CalleeADC = ADCMgr.getContext(D);
- // Temporary object destructor processing is currently broken, so we never
- // inline them.
- // FIXME: Remove this once temp destructors are working.
- if (isa<CXXDestructorCall>(Call)) {
- if ((*currBldrCtx->getBlock())[currStmtIdx].getAs<CFGTemporaryDtor>())
- return false;
- }
-
// The auto-synthesized bodies are essential to inline as they are
// usually small and commonly used. Note: we should do this check early on to
// ensure we always inline these calls.
@@ -823,7 +868,7 @@ bool ExprEngine::shouldInlineCall(const CallEvent &Call, const Decl *D,
} else {
// We haven't actually checked the static properties of this function yet.
// Do that now, and record our decision in the function summaries.
- if (mayInlineDecl(CalleeADC, Opts)) {
+ if (mayInlineDecl(getAnalysisManager(), CalleeADC)) {
Engine.FunctionSummaries->markMayInline(D);
} else {
Engine.FunctionSummaries->markShouldNotInline(D);
@@ -835,7 +880,7 @@ bool ExprEngine::shouldInlineCall(const CallEvent &Call, const Decl *D,
// FIXME: this checks both static and dynamic properties of the call, which
// means we're redoing a bit of work that could be cached in the function
// summary.
- CallInlinePolicy CIP = mayInlineCallKind(Call, Pred, Opts);
+ CallInlinePolicy CIP = mayInlineCallKind(Call, Pred, Opts, CallOpts);
if (CIP != CIP_Allowed) {
if (CIP == CIP_DisallowedAlways) {
assert(!MayInline.hasValue() || MayInline.getValue());
@@ -887,7 +932,8 @@ static bool isTrivialObjectAssignment(const CallEvent &Call) {
}
void ExprEngine::defaultEvalCall(NodeBuilder &Bldr, ExplodedNode *Pred,
- const CallEvent &CallTemplate) {
+ const CallEvent &CallTemplate,
+ const EvalCallOptions &CallOpts) {
// Make sure we have the most recent state attached to the call.
ProgramStateRef State = Pred->getState();
CallEventRef<> Call = CallTemplate.cloneWithState(State);
@@ -910,7 +956,7 @@ void ExprEngine::defaultEvalCall(NodeBuilder &Bldr, ExplodedNode *Pred,
} else {
RuntimeDefinition RD = Call->getRuntimeDefinition();
const Decl *D = RD.getDecl();
- if (shouldInlineCall(*Call, D, Pred)) {
+ if (shouldInlineCall(*Call, D, Pred, CallOpts)) {
if (RD.mayHaveOtherDefinitions()) {
AnalyzerOptions &Options = getAnalysisManager().options;
diff --git a/lib/StaticAnalyzer/Core/ExprEngineObjC.cpp b/lib/StaticAnalyzer/Core/ExprEngineObjC.cpp
index f5e64f4a5a8c..d76b9cbcfaca 100644
--- a/lib/StaticAnalyzer/Core/ExprEngineObjC.cpp
+++ b/lib/StaticAnalyzer/Core/ExprEngineObjC.cpp
@@ -42,6 +42,47 @@ void ExprEngine::VisitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt *S,
getCheckerManager().runCheckersForPreStmt(Dst, Pred, S, *this);
}
+/// Generate a node in \p Bldr for an iteration statement using ObjC
+/// for-loop iterator.
+static void populateObjCForDestinationSet(
+ ExplodedNodeSet &dstLocation, SValBuilder &svalBuilder,
+ const ObjCForCollectionStmt *S, const Stmt *elem, SVal elementV,
+ SymbolManager &SymMgr, const NodeBuilderContext *currBldrCtx,
+ StmtNodeBuilder &Bldr, bool hasElements) {
+
+ for (ExplodedNode *Pred : dstLocation) {
+ ProgramStateRef state = Pred->getState();
+ const LocationContext *LCtx = Pred->getLocationContext();
+
+ SVal hasElementsV = svalBuilder.makeTruthVal(hasElements);
+
+ // FIXME: S is not an expression. We should not be binding values to it.
+ ProgramStateRef nextState = state->BindExpr(S, LCtx, hasElementsV);
+
+ if (auto MV = elementV.getAs<loc::MemRegionVal>())
+ if (const auto *R = dyn_cast<TypedValueRegion>(MV->getRegion())) {
+ // FIXME: The proper thing to do is to really iterate over the
+ // container. We will do this with dispatch logic to the store.
+ // For now, just 'conjure' up a symbolic value.
+ QualType T = R->getValueType();
+ assert(Loc::isLocType(T));
+
+ SVal V;
+ if (hasElements) {
+ SymbolRef Sym = SymMgr.conjureSymbol(elem, LCtx, T,
+ currBldrCtx->blockCount());
+ V = svalBuilder.makeLoc(Sym);
+ } else {
+ V = svalBuilder.makeIntVal(0, T);
+ }
+
+ nextState = nextState->bindLoc(elementV, V, LCtx);
+ }
+
+ Bldr.generateNode(S, Pred, nextState);
+ }
+}
+
void ExprEngine::VisitObjCForCollectionStmt(const ObjCForCollectionStmt *S,
ExplodedNode *Pred,
ExplodedNodeSet &Dst) {
@@ -72,60 +113,35 @@ void ExprEngine::VisitObjCForCollectionStmt(const ObjCForCollectionStmt *S,
// result in state splitting.
const Stmt *elem = S->getElement();
+ const Stmt *collection = S->getCollection();
ProgramStateRef state = Pred->getState();
- SVal elementV;
+ SVal collectionV = state->getSVal(collection, Pred->getLocationContext());
- if (const DeclStmt *DS = dyn_cast<DeclStmt>(elem)) {
+ SVal elementV;
+ if (const auto *DS = dyn_cast<DeclStmt>(elem)) {
const VarDecl *elemD = cast<VarDecl>(DS->getSingleDecl());
assert(elemD->getInit() == nullptr);
elementV = state->getLValue(elemD, Pred->getLocationContext());
- }
- else {
+ } else {
elementV = state->getSVal(elem, Pred->getLocationContext());
}
+ bool isContainerNull = state->isNull(collectionV).isConstrainedTrue();
+
ExplodedNodeSet dstLocation;
evalLocation(dstLocation, S, elem, Pred, state, elementV, nullptr, false);
ExplodedNodeSet Tmp;
StmtNodeBuilder Bldr(Pred, Tmp, *currBldrCtx);
- for (ExplodedNodeSet::iterator NI = dstLocation.begin(),
- NE = dstLocation.end(); NI!=NE; ++NI) {
- Pred = *NI;
- ProgramStateRef state = Pred->getState();
- const LocationContext *LCtx = Pred->getLocationContext();
-
- // Handle the case where the container still has elements.
- SVal TrueV = svalBuilder.makeTruthVal(1);
- ProgramStateRef hasElems = state->BindExpr(S, LCtx, TrueV);
-
- // Handle the case where the container has no elements.
- SVal FalseV = svalBuilder.makeTruthVal(0);
- ProgramStateRef noElems = state->BindExpr(S, LCtx, FalseV);
+ if (!isContainerNull)
+ populateObjCForDestinationSet(dstLocation, svalBuilder, S, elem, elementV,
+ SymMgr, currBldrCtx, Bldr,
+ /*hasElements=*/true);
- if (Optional<loc::MemRegionVal> MV = elementV.getAs<loc::MemRegionVal>())
- if (const TypedValueRegion *R =
- dyn_cast<TypedValueRegion>(MV->getRegion())) {
- // FIXME: The proper thing to do is to really iterate over the
- // container. We will do this with dispatch logic to the store.
- // For now, just 'conjure' up a symbolic value.
- QualType T = R->getValueType();
- assert(Loc::isLocType(T));
- SymbolRef Sym = SymMgr.conjureSymbol(elem, LCtx, T,
- currBldrCtx->blockCount());
- SVal V = svalBuilder.makeLoc(Sym);
- hasElems = hasElems->bindLoc(elementV, V, LCtx);
-
- // Bind the location to 'nil' on the false branch.
- SVal nilV = svalBuilder.makeIntVal(0, T);
- noElems = noElems->bindLoc(elementV, nilV, LCtx);
- }
-
- // Create the new nodes.
- Bldr.generateNode(S, Pred, hasElems);
- Bldr.generateNode(S, Pred, noElems);
- }
+ populateObjCForDestinationSet(dstLocation, svalBuilder, S, elem, elementV,
+ SymMgr, currBldrCtx, Bldr,
+ /*hasElements=*/false);
// Finally, run any custom checkers.
// FIXME: Eventually all pre- and post-checks should live in VisitStmt.
diff --git a/lib/StaticAnalyzer/Core/FunctionSummary.cpp b/lib/StaticAnalyzer/Core/FunctionSummary.cpp
index c21735b8b882..94edd84d15d2 100644
--- a/lib/StaticAnalyzer/Core/FunctionSummary.cpp
+++ b/lib/StaticAnalyzer/Core/FunctionSummary.cpp
@@ -1,4 +1,4 @@
-//== FunctionSummary.cpp - Stores summaries of functions. ----------*- C++ -*-//
+//===- FunctionSummary.cpp - Stores summaries of functions. ---------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -12,21 +12,20 @@
//===----------------------------------------------------------------------===//
#include "clang/StaticAnalyzer/Core/PathSensitive/FunctionSummary.h"
+
using namespace clang;
using namespace ento;
unsigned FunctionSummariesTy::getTotalNumBasicBlocks() {
unsigned Total = 0;
- for (MapTy::iterator I = Map.begin(), E = Map.end(); I != E; ++I) {
- Total += I->second.TotalBasicBlocks;
- }
+ for (const auto &I : Map)
+ Total += I.second.TotalBasicBlocks;
return Total;
}
unsigned FunctionSummariesTy::getTotalNumVisitedBasicBlocks() {
unsigned Total = 0;
- for (MapTy::iterator I = Map.begin(), E = Map.end(); I != E; ++I) {
- Total += I->second.VisitedBasicBlocks.count();
- }
+ for (const auto &I : Map)
+ Total += I.second.VisitedBasicBlocks.count();
return Total;
}
diff --git a/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp b/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp
index ebf1487d4bfc..d5e5f96dee0f 100644
--- a/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp
+++ b/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp
@@ -1,4 +1,4 @@
-//===--- HTMLDiagnostics.cpp - HTML Diagnostics for Paths ----*- C++ -*-===//
+//===- HTMLDiagnostics.cpp - HTML Diagnostics for Paths -------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -11,24 +11,43 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
+#include "clang/AST/DeclBase.h"
+#include "clang/AST/Stmt.h"
#include "clang/Basic/FileManager.h"
+#include "clang/Basic/LLVM.h"
+#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Lex/Lexer.h"
#include "clang/Lex/Preprocessor.h"
+#include "clang/Lex/Token.h"
#include "clang/Rewrite/Core/HTMLRewrite.h"
#include "clang/Rewrite/Core/Rewriter.h"
+#include "clang/StaticAnalyzer/Core/AnalyzerOptions.h"
#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
-#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/IssueHash.h"
#include "clang/StaticAnalyzer/Core/PathDiagnosticConsumers.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/Support/Casting.h"
#include "llvm/Support/Errc.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <cassert>
+#include <map>
+#include <memory>
+#include <set>
#include <sstream>
+#include <string>
+#include <system_error>
+#include <utility>
+#include <vector>
using namespace clang;
using namespace ento;
@@ -41,15 +60,19 @@ namespace {
class HTMLDiagnostics : public PathDiagnosticConsumer {
std::string Directory;
- bool createdDir, noDir;
+ bool createdDir = false;
+ bool noDir = false;
const Preprocessor &PP;
AnalyzerOptions &AnalyzerOpts;
const bool SupportsCrossFileDiagnostics;
+
public:
HTMLDiagnostics(AnalyzerOptions &AnalyzerOpts,
const std::string& prefix,
const Preprocessor &pp,
- bool supportsMultipleFiles);
+ bool supportsMultipleFiles)
+ : Directory(prefix), PP(pp), AnalyzerOpts(AnalyzerOpts),
+ SupportsCrossFileDiagnostics(supportsMultipleFiles) {}
~HTMLDiagnostics() override { FlushDiagnostics(nullptr); }
@@ -94,20 +117,13 @@ public:
/// \return Javascript for navigating the HTML report using j/k keys.
std::string generateKeyboardNavigationJavascript();
-};
-} // end anonymous namespace
+private:
+ /// \return Javascript for displaying shortcuts help;
+ std::string showHelpJavascript();
+};
-HTMLDiagnostics::HTMLDiagnostics(AnalyzerOptions &AnalyzerOpts,
- const std::string& prefix,
- const Preprocessor &pp,
- bool supportsMultipleFiles)
- : Directory(prefix),
- createdDir(false),
- noDir(false),
- PP(pp),
- AnalyzerOpts(AnalyzerOpts),
- SupportsCrossFileDiagnostics(supportsMultipleFiles) {}
+} // namespace
void ento::createHTMLDiagnosticConsumer(AnalyzerOptions &AnalyzerOpts,
PathDiagnosticConsumers &C,
@@ -130,24 +146,19 @@ void ento::createHTMLSingleFileDiagnosticConsumer(AnalyzerOptions &AnalyzerOpts,
void HTMLDiagnostics::FlushDiagnosticsImpl(
std::vector<const PathDiagnostic *> &Diags,
FilesMade *filesMade) {
- for (std::vector<const PathDiagnostic *>::iterator it = Diags.begin(),
- et = Diags.end(); it != et; ++it) {
- ReportDiag(**it, filesMade);
- }
+ for (const auto Diag : Diags)
+ ReportDiag(*Diag, filesMade);
}
void HTMLDiagnostics::ReportDiag(const PathDiagnostic& D,
FilesMade *filesMade) {
-
// Create the HTML directory if it is missing.
if (!createdDir) {
createdDir = true;
if (std::error_code ec = llvm::sys::fs::create_directories(Directory)) {
llvm::errs() << "warning: could not create directory '"
<< Directory << "': " << ec.message() << '\n';
-
noDir = true;
-
return;
}
}
@@ -174,7 +185,7 @@ void HTMLDiagnostics::ReportDiag(const PathDiagnostic& D,
SmallString<128> declName("unknown");
int offsetDecl = 0;
if (const Decl *DeclWithIssue = D.getDeclWithIssue()) {
- if (const NamedDecl *ND = dyn_cast<NamedDecl>(DeclWithIssue))
+ if (const auto *ND = dyn_cast<NamedDecl>(DeclWithIssue))
declName = ND->getDeclName().getAsString();
if (const Stmt *Body = DeclWithIssue->getBody()) {
@@ -212,7 +223,6 @@ void HTMLDiagnostics::ReportDiag(const PathDiagnostic& D,
<< "': " << EC.message() << '\n';
return;
}
-
} else {
int i = 1;
std::error_code EC;
@@ -228,10 +238,8 @@ void HTMLDiagnostics::ReportDiag(const PathDiagnostic& D,
<< "-" << i << ".html";
llvm::sys::path::append(Model, Directory,
filename.str());
- EC = llvm::sys::fs::openFileForWrite(Model,
- FD,
- llvm::sys::fs::F_RW |
- llvm::sys::fs::F_Excl);
+ EC = llvm::sys::fs::openFileForReadWrite(
+ Model, FD, llvm::sys::fs::CD_CreateNew, llvm::sys::fs::OF_None);
if (EC && EC != llvm::errc::file_exists) {
llvm::errs() << "warning: could not create file '" << Model
<< "': " << EC.message() << '\n';
@@ -253,7 +261,6 @@ void HTMLDiagnostics::ReportDiag(const PathDiagnostic& D,
std::string HTMLDiagnostics::GenerateHTML(const PathDiagnostic& D, Rewriter &R,
const SourceManager& SMgr, const PathPieces& path, const char *declName) {
-
// Rewrite source files as HTML for every new file the path crosses
std::vector<FileID> FileIDs;
for (auto I : path) {
@@ -309,10 +316,12 @@ std::string HTMLDiagnostics::GenerateHTML(const PathDiagnostic& D, Rewriter &R,
const RewriteBuffer *Buf = R.getRewriteBufferFor(FileIDs[0]);
if (!Buf)
- return "";
+ return {};
// Add CSS, header, and footer.
- const FileEntry* Entry = SMgr.getFileEntryForID(FileIDs[0]);
+ FileID FID =
+ path.back()->getLocation().asLocation().getExpansionLoc().getFileID();
+ const FileEntry* Entry = SMgr.getFileEntryForID(FID);
FinalizeHTML(D, R, SMgr, path, FileIDs[0], Entry, declName);
std::string file;
@@ -323,6 +332,114 @@ std::string HTMLDiagnostics::GenerateHTML(const PathDiagnostic& D, Rewriter &R,
return os.str();
}
+/// Write executed lines from \p D in JSON format into \p os.
+static void serializeExecutedLines(
+ const PathDiagnostic &D,
+ const PathPieces &path,
+ llvm::raw_string_ostream &os) {
+ // Copy executed lines from path diagnostics.
+ std::map<unsigned, std::set<unsigned>> ExecutedLines;
+ for (auto I = D.executedLines_begin(),
+ E = D.executedLines_end(); I != E; ++I) {
+ std::set<unsigned> &LinesInFile = ExecutedLines[I->first];
+ for (unsigned LineNo : I->second) {
+ LinesInFile.insert(LineNo);
+ }
+ }
+
+ // We need to include all lines for which any kind of diagnostics appears.
+ for (const auto &P : path) {
+ FullSourceLoc Loc = P->getLocation().asLocation().getExpansionLoc();
+ FileID FID = Loc.getFileID();
+ unsigned LineNo = Loc.getLineNumber();
+ ExecutedLines[FID.getHashValue()].insert(LineNo);
+ }
+
+ os << "var relevant_lines = {";
+ for (auto I = ExecutedLines.begin(),
+ E = ExecutedLines.end(); I != E; ++I) {
+ if (I != ExecutedLines.begin())
+ os << ", ";
+
+ os << "\"" << I->first << "\": {";
+ for (unsigned LineNo : I->second) {
+ if (LineNo != *(I->second.begin()))
+ os << ", ";
+
+ os << "\"" << LineNo << "\": 1";
+ }
+ os << "}";
+ }
+
+ os << "};";
+}
+
+/// \return JavaScript for an option to only show relevant lines.
+static std::string showRelevantLinesJavascript(
+ const PathDiagnostic &D, const PathPieces &path) {
+ std::string s;
+ llvm::raw_string_ostream os(s);
+ os << "<script type='text/javascript'>\n";
+ serializeExecutedLines(D, path, os);
+ os << R"<<<(
+
+var filterCounterexample = function (hide) {
+ var tables = document.getElementsByClassName("code");
+ for (var t=0; t<tables.length; t++) {
+ var table = tables[t];
+ var file_id = table.getAttribute("data-fileid");
+ var lines_in_fid = relevant_lines[file_id];
+ if (!lines_in_fid) {
+ lines_in_fid = {};
+ }
+ var lines = table.getElementsByClassName("codeline");
+ for (var i=0; i<lines.length; i++) {
+ var el = lines[i];
+ var lineNo = el.getAttribute("data-linenumber");
+ if (!lines_in_fid[lineNo]) {
+ if (hide) {
+ el.setAttribute("hidden", "");
+ } else {
+ el.removeAttribute("hidden");
+ }
+ }
+ }
+ }
+}
+
+window.addEventListener("keydown", function (event) {
+ if (event.defaultPrevented) {
+ return;
+ }
+ if (event.key == "S") {
+ var checked = document.getElementsByName("showCounterexample")[0].checked;
+ filterCounterexample(!checked);
+ document.getElementsByName("showCounterexample")[0].checked = !checked;
+ } else {
+ return;
+ }
+ event.preventDefault();
+}, true);
+
+document.addEventListener("DOMContentLoaded", function() {
+ document.querySelector('input[name="showCounterexample"]').onchange=
+ function (event) {
+ filterCounterexample(this.checked);
+ };
+});
+</script>
+
+<form>
+ <input type="checkbox" name="showCounterexample" id="showCounterexample" />
+ <label for="showCounterexample">
+ Show only relevant lines
+ </label>
+</form>
+)<<<";
+
+ return os.str();
+}
+
void HTMLDiagnostics::FinalizeHTML(const PathDiagnostic& D, Rewriter &R,
const SourceManager& SMgr, const PathPieces& path, FileID FID,
const FileEntry *Entry, const char *declName) {
@@ -340,9 +457,15 @@ void HTMLDiagnostics::FinalizeHTML(const PathDiagnostic& D, Rewriter &R,
int LineNumber = path.back()->getLocation().asLocation().getExpansionLineNumber();
int ColumnNumber = path.back()->getLocation().asLocation().getExpansionColumnNumber();
+ R.InsertTextBefore(SMgr.getLocForStartOfFile(FID), showHelpJavascript());
+
R.InsertTextBefore(SMgr.getLocForStartOfFile(FID),
generateKeyboardNavigationJavascript());
+ // Checkbox and javascript for filtering the output to the counterexample.
+ R.InsertTextBefore(SMgr.getLocForStartOfFile(FID),
+ showRelevantLinesJavascript(D, path));
+
// Add the name of the file as an <h1> tag.
{
std::string s;
@@ -379,8 +502,8 @@ void HTMLDiagnostics::FinalizeHTML(const PathDiagnostic& D, Rewriter &R,
// Output any other meta data.
- for (PathDiagnostic::meta_iterator I=D.meta_begin(), E=D.meta_end();
- I!=E; ++I) {
+ for (PathDiagnostic::meta_iterator I = D.meta_begin(), E = D.meta_end();
+ I != E; ++I) {
os << "<tr><td></td><td>" << html::EscapeText(*I) << "</td></tr>\n";
}
@@ -388,11 +511,24 @@ void HTMLDiagnostics::FinalizeHTML(const PathDiagnostic& D, Rewriter &R,
</table>
<!-- REPORTSUMMARYEXTRA -->
<h3>Annotated Source Code</h3>
-<p><span class='macro'>[?]
- <span class='expansion'>Use j/k keys for keyboard navigation</span>
-</span></p>
+<p>Press <a href="#" onclick="toggleHelp(); return false;">'?'</a>
+ to see keyboard shortcuts</p>
+<input type="checkbox" class="spoilerhider" id="showinvocation" />
+<label for="showinvocation" >Show analyzer invocation</label>
+<div class="spoiler">clang -cc1 )<<<";
+ os << html::EscapeText(AnalyzerOpts.FullCompilerInvocation);
+ os << R"<<<(
+</div>
+<div id='tooltiphint' hidden="true">
+ <p>Keyboard shortcuts: </p>
+ <ul>
+ <li>Use 'j/k' keys for keyboard navigation</li>
+ <li>Use 'Shift+S' to show/hide relevant lines</li>
+ <li>Use '?' to toggle this window</li>
+ </ul>
+ <a href="#" onclick="toggleHelp(); return false;">Close</a>
+</div>
)<<<";
-
R.InsertTextBefore(SMgr.getLocForStartOfFile(FID), os.str());
}
@@ -450,6 +586,34 @@ void HTMLDiagnostics::FinalizeHTML(const PathDiagnostic& D, Rewriter &R,
html::AddHeaderFooterInternalBuiltinCSS(R, FID, Entry->getName());
}
+std::string HTMLDiagnostics::showHelpJavascript() {
+ return R"<<<(
+<script type='text/javascript'>
+
+var toggleHelp = function() {
+ var hint = document.querySelector("#tooltiphint");
+ var attributeName = "hidden";
+ if (hint.hasAttribute(attributeName)) {
+ hint.removeAttribute(attributeName);
+ } else {
+ hint.setAttribute("hidden", "true");
+ }
+};
+window.addEventListener("keydown", function (event) {
+ if (event.defaultPrevented) {
+ return;
+ }
+ if (event.key == "?") {
+ toggleHelp();
+ } else {
+ return;
+ }
+ event.preventDefault();
+});
+</script>
+)<<<";
+}
+
void HTMLDiagnostics::RewriteFile(Rewriter &R, const SourceManager& SMgr,
const PathPieces& path, FileID FID) {
// Process the path.
@@ -494,7 +658,6 @@ void HTMLDiagnostics::RewriteFile(Rewriter &R, const SourceManager& SMgr,
void HTMLDiagnostics::HandlePiece(Rewriter& R, FileID BugFileID,
const PathDiagnosticPiece& P,
unsigned num, unsigned max) {
-
// For now, just draw a box above the line in question, and emit the
// warning.
FullSourceLoc Pos = P.getLocation().asLocation();
@@ -634,9 +797,7 @@ void HTMLDiagnostics::HandlePiece(Rewriter& R, FileID BugFileID,
os << "</td><td>";
}
- if (const PathDiagnosticMacroPiece *MP =
- dyn_cast<PathDiagnosticMacroPiece>(&P)) {
-
+ if (const auto *MP = dyn_cast<PathDiagnosticMacroPiece>(&P)) {
os << "Within the expansion of the macro '";
// Get the name of the macro by relexing it.
@@ -707,10 +868,8 @@ void HTMLDiagnostics::HandlePiece(Rewriter& R, FileID BugFileID,
// Now highlight the ranges.
ArrayRef<SourceRange> Ranges = P.getRanges();
- for (ArrayRef<SourceRange>::iterator I = Ranges.begin(),
- E = Ranges.end(); I != E; ++I) {
- HighlightRange(R, LPosInfo.first, *I);
- }
+ for (const auto &Range : Ranges)
+ HighlightRange(R, LPosInfo.first, Range);
}
static void EmitAlphaCounter(raw_ostream &os, unsigned n) {
@@ -726,18 +885,13 @@ static void EmitAlphaCounter(raw_ostream &os, unsigned n) {
unsigned HTMLDiagnostics::ProcessMacroPiece(raw_ostream &os,
const PathDiagnosticMacroPiece& P,
unsigned num) {
-
- for (PathPieces::const_iterator I = P.subPieces.begin(), E=P.subPieces.end();
- I!=E; ++I) {
-
- if (const PathDiagnosticMacroPiece *MP =
- dyn_cast<PathDiagnosticMacroPiece>(I->get())) {
+ for (const auto &subPiece : P.subPieces) {
+ if (const auto *MP = dyn_cast<PathDiagnosticMacroPiece>(subPiece.get())) {
num = ProcessMacroPiece(os, *MP, num);
continue;
}
- if (PathDiagnosticEventPiece *EP =
- dyn_cast<PathDiagnosticEventPiece>(I->get())) {
+ if (const auto *EP = dyn_cast<PathDiagnosticEventPiece>(subPiece.get())) {
os << "<div class=\"msg msgEvent\" style=\"width:94%; "
"margin-left:5px\">"
"<table class=\"msgT\"><tr>"
@@ -862,7 +1016,7 @@ window.addEventListener("keydown", function (event) {
navigateTo(/*up=*/true);
} else {
return;
- }
+ }
event.preventDefault();
}, true);
</script>
diff --git a/lib/StaticAnalyzer/Core/LoopUnrolling.cpp b/lib/StaticAnalyzer/Core/LoopUnrolling.cpp
index a8c4b05cea13..da4574c61515 100644
--- a/lib/StaticAnalyzer/Core/LoopUnrolling.cpp
+++ b/lib/StaticAnalyzer/Core/LoopUnrolling.cpp
@@ -97,9 +97,7 @@ changeIntBoundNode(internal::Matcher<Decl> VarNodeMatcher) {
unaryOperator(anyOf(hasOperatorName("--"), hasOperatorName("++")),
hasUnaryOperand(ignoringParenImpCasts(
declRefExpr(to(varDecl(VarNodeMatcher)))))),
- binaryOperator(anyOf(hasOperatorName("="), hasOperatorName("+="),
- hasOperatorName("/="), hasOperatorName("*="),
- hasOperatorName("-=")),
+ binaryOperator(isAssignmentOperator(),
hasLHS(ignoringParenImpCasts(
declRefExpr(to(varDecl(VarNodeMatcher)))))));
}
@@ -143,13 +141,15 @@ static internal::Matcher<Stmt> forLoopMatcher() {
return forStmt(
hasCondition(simpleCondition("initVarName")),
// Initialization should match the form: 'int i = 6' or 'i = 42'.
- hasLoopInit(anyOf(
- declStmt(hasSingleDecl(varDecl(
- allOf(hasInitializer(integerLiteral().bind("initNum")),
- equalsBoundNode("initVarName"))))),
- binaryOperator(hasLHS(declRefExpr(to(
- varDecl(equalsBoundNode("initVarName"))))),
- hasRHS(integerLiteral().bind("initNum"))))),
+ hasLoopInit(
+ anyOf(declStmt(hasSingleDecl(
+ varDecl(allOf(hasInitializer(ignoringParenImpCasts(
+ integerLiteral().bind("initNum"))),
+ equalsBoundNode("initVarName"))))),
+ binaryOperator(hasLHS(declRefExpr(to(varDecl(
+ equalsBoundNode("initVarName"))))),
+ hasRHS(ignoringParenImpCasts(
+ integerLiteral().bind("initNum")))))),
// Incrementation should be a simple increment or decrement
// operator call.
hasIncrement(unaryOperator(
diff --git a/lib/StaticAnalyzer/Core/LoopWidening.cpp b/lib/StaticAnalyzer/Core/LoopWidening.cpp
index 05865c294cb7..9192f49eac6d 100644
--- a/lib/StaticAnalyzer/Core/LoopWidening.cpp
+++ b/lib/StaticAnalyzer/Core/LoopWidening.cpp
@@ -14,10 +14,16 @@
///
//===----------------------------------------------------------------------===//
+#include "clang/AST/AST.h"
+#include "clang/ASTMatchers/ASTMatchFinder.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/LoopWidening.h"
using namespace clang;
using namespace ento;
+using namespace clang::ast_matchers;
+
+const auto MatchRef = "matchref";
/// Return the loops condition Stmt or NULL if LoopStmt is not a loop
static const Expr *getLoopCondition(const Stmt *LoopStmt) {
@@ -49,7 +55,8 @@ ProgramStateRef getWidenedLoopState(ProgramStateRef PrevState,
// TODO Nested loops are currently widened as a result of the invalidation
// being so inprecise. When the invalidation is improved, the handling
// of nested loops will also need to be improved.
- const StackFrameContext *STC = LCtx->getCurrentStackFrame();
+ ASTContext &ASTCtx = LCtx->getAnalysisDeclContext()->getASTContext();
+ const StackFrameContext *STC = LCtx->getStackFrame();
MemRegionManager &MRMgr = PrevState->getStateManager().getRegionManager();
const MemRegion *Regions[] = {MRMgr.getStackLocalsRegion(STC),
MRMgr.getStackArgumentsRegion(STC),
@@ -59,6 +66,30 @@ ProgramStateRef getWidenedLoopState(ProgramStateRef PrevState,
ITraits.setTrait(Region,
RegionAndSymbolInvalidationTraits::TK_EntireMemSpace);
}
+
+ // References should not be invalidated.
+ auto Matches = match(findAll(stmt(hasDescendant(varDecl(hasType(referenceType())).bind(MatchRef)))),
+ *LCtx->getDecl()->getBody(), ASTCtx);
+ for (BoundNodes Match : Matches) {
+ const VarDecl *VD = Match.getNodeAs<VarDecl>(MatchRef);
+ assert(VD);
+ const VarRegion *VarMem = MRMgr.getVarRegion(VD, LCtx);
+ ITraits.setTrait(VarMem,
+ RegionAndSymbolInvalidationTraits::TK_PreserveContents);
+ }
+
+
+ // 'this' pointer is not an lvalue, we should not invalidate it. If the loop
+ // is located in a method, constructor or destructor, the value of 'this'
+ // pointer shoule remain unchanged.
+ if (const CXXMethodDecl *CXXMD = dyn_cast<CXXMethodDecl>(STC->getDecl())) {
+ const CXXThisRegion *ThisR = MRMgr.getCXXThisRegion(
+ CXXMD->getThisType(STC->getAnalysisDeclContext()->getASTContext()),
+ STC);
+ ITraits.setTrait(ThisR,
+ RegionAndSymbolInvalidationTraits::TK_PreserveContents);
+ }
+
return PrevState->invalidateRegions(Regions, getLoopCondition(LoopStmt),
BlockCount, LCtx, true, nullptr, nullptr,
&ITraits);
diff --git a/lib/StaticAnalyzer/Core/MemRegion.cpp b/lib/StaticAnalyzer/Core/MemRegion.cpp
index cb8ba6de3626..cb2122c7749e 100644
--- a/lib/StaticAnalyzer/Core/MemRegion.cpp
+++ b/lib/StaticAnalyzer/Core/MemRegion.cpp
@@ -1,4 +1,4 @@
-//== MemRegion.cpp - Abstract memory regions for static analysis --*- C++ -*--//
+//===- MemRegion.cpp - Abstract memory regions for static analysis --------===//
//
// The LLVM Compiler Infrastructure
//
@@ -14,19 +14,51 @@
//===----------------------------------------------------------------------===//
#include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h"
+#include "clang/AST/ASTContext.h"
#include "clang/AST/Attr.h"
#include "clang/AST/CharUnits.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/RecordLayout.h"
+#include "clang/AST/Type.h"
#include "clang/Analysis/AnalysisDeclContext.h"
#include "clang/Analysis/Support/BumpVector.h"
+#include "clang/Basic/IdentifierTable.h"
+#include "clang/Basic/LLVM.h"
#include "clang/Basic/SourceManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/PointerUnion.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/CheckedArithmetic.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
+#include <cassert>
+#include <cstdint>
+#include <functional>
+#include <iterator>
+#include <string>
+#include <tuple>
+#include <utility>
using namespace clang;
using namespace ento;
+#define DEBUG_TYPE "MemRegion"
+
//===----------------------------------------------------------------------===//
// MemRegion Construction.
//===----------------------------------------------------------------------===//
@@ -37,8 +69,7 @@ RegionTy* MemRegionManager::getSubRegion(const Arg1Ty arg1,
llvm::FoldingSetNodeID ID;
RegionTy::ProfileRegion(ID, arg1, superRegion);
void *InsertPos;
- RegionTy* R = cast_or_null<RegionTy>(Regions.FindNodeOrInsertPos(ID,
- InsertPos));
+ auto *R = cast_or_null<RegionTy>(Regions.FindNodeOrInsertPos(ID, InsertPos));
if (!R) {
R = A.Allocate<RegionTy>();
@@ -55,8 +86,7 @@ RegionTy* MemRegionManager::getSubRegion(const Arg1Ty arg1, const Arg2Ty arg2,
llvm::FoldingSetNodeID ID;
RegionTy::ProfileRegion(ID, arg1, arg2, superRegion);
void *InsertPos;
- RegionTy* R = cast_or_null<RegionTy>(Regions.FindNodeOrInsertPos(ID,
- InsertPos));
+ auto *R = cast_or_null<RegionTy>(Regions.FindNodeOrInsertPos(ID, InsertPos));
if (!R) {
R = A.Allocate<RegionTy>();
@@ -75,8 +105,7 @@ RegionTy* MemRegionManager::getSubRegion(const Arg1Ty arg1, const Arg2Ty arg2,
llvm::FoldingSetNodeID ID;
RegionTy::ProfileRegion(ID, arg1, arg2, arg3, superRegion);
void *InsertPos;
- RegionTy* R = cast_or_null<RegionTy>(Regions.FindNodeOrInsertPos(ID,
- InsertPos));
+ auto *R = cast_or_null<RegionTy>(Regions.FindNodeOrInsertPos(ID, InsertPos));
if (!R) {
R = A.Allocate<RegionTy>();
@@ -91,27 +120,26 @@ RegionTy* MemRegionManager::getSubRegion(const Arg1Ty arg1, const Arg2Ty arg2,
// Object destruction.
//===----------------------------------------------------------------------===//
-MemRegion::~MemRegion() {}
+MemRegion::~MemRegion() = default;
-MemRegionManager::~MemRegionManager() {
- // All regions and their data are BumpPtrAllocated. No need to call
- // their destructors.
-}
+// All regions and their data are BumpPtrAllocated. No need to call their
+// destructors.
+MemRegionManager::~MemRegionManager() = default;
//===----------------------------------------------------------------------===//
// Basic methods.
//===----------------------------------------------------------------------===//
bool SubRegion::isSubRegionOf(const MemRegion* R) const {
- const MemRegion* r = getSuperRegion();
- while (r != nullptr) {
+ const MemRegion* r = this;
+ do {
if (r == R)
return true;
- if (const SubRegion* sr = dyn_cast<SubRegion>(r))
+ if (const auto *sr = dyn_cast<SubRegion>(r))
r = sr->getSuperRegion();
else
break;
- }
+ } while (r != nullptr);
return false;
}
@@ -119,16 +147,16 @@ MemRegionManager* SubRegion::getMemRegionManager() const {
const SubRegion* r = this;
do {
const MemRegion *superRegion = r->getSuperRegion();
- if (const SubRegion *sr = dyn_cast<SubRegion>(superRegion)) {
+ if (const auto *sr = dyn_cast<SubRegion>(superRegion)) {
r = sr;
continue;
}
return superRegion->getMemRegionManager();
- } while (1);
+ } while (true);
}
const StackFrameContext *VarRegion::getStackFrame() const {
- const StackSpaceRegion *SSR = dyn_cast<StackSpaceRegion>(getMemorySpace());
+ const auto *SSR = dyn_cast<StackSpaceRegion>(getMemorySpace());
return SSR ? SSR->getStackFrame() : nullptr;
}
@@ -215,17 +243,17 @@ void StaticGlobalSpaceRegion::Profile(llvm::FoldingSetNodeID &ID) const {
ID.AddPointer(getCodeRegion());
}
-void StringRegion::ProfileRegion(llvm::FoldingSetNodeID& ID,
- const StringLiteral* Str,
- const MemRegion* superRegion) {
+void StringRegion::ProfileRegion(llvm::FoldingSetNodeID &ID,
+ const StringLiteral *Str,
+ const MemRegion *superRegion) {
ID.AddInteger(static_cast<unsigned>(StringRegionKind));
ID.AddPointer(Str);
ID.AddPointer(superRegion);
}
-void ObjCStringRegion::ProfileRegion(llvm::FoldingSetNodeID& ID,
- const ObjCStringLiteral* Str,
- const MemRegion* superRegion) {
+void ObjCStringRegion::ProfileRegion(llvm::FoldingSetNodeID &ID,
+ const ObjCStringLiteral *Str,
+ const MemRegion *superRegion) {
ID.AddInteger(static_cast<unsigned>(ObjCStringRegionKind));
ID.AddPointer(Str);
ID.AddPointer(superRegion);
@@ -380,13 +408,19 @@ void CXXBaseObjectRegion::Profile(llvm::FoldingSetNodeID &ID) const {
// Region anchors.
//===----------------------------------------------------------------------===//
-void GlobalsSpaceRegion::anchor() { }
-void NonStaticGlobalSpaceRegion::anchor() { }
-void StackSpaceRegion::anchor() { }
-void TypedRegion::anchor() { }
-void TypedValueRegion::anchor() { }
-void CodeTextRegion::anchor() { }
-void SubRegion::anchor() { }
+void GlobalsSpaceRegion::anchor() {}
+
+void NonStaticGlobalSpaceRegion::anchor() {}
+
+void StackSpaceRegion::anchor() {}
+
+void TypedRegion::anchor() {}
+
+void TypedValueRegion::anchor() {}
+
+void CodeTextRegion::anchor() {}
+
+void SubRegion::anchor() {}
//===----------------------------------------------------------------------===//
// Region pretty-printing.
@@ -408,7 +442,7 @@ void MemRegion::dumpToStream(raw_ostream &os) const {
}
void AllocaRegion::dumpToStream(raw_ostream &os) const {
- os << "alloca{" << static_cast<const void*>(Ex) << ',' << Cnt << '}';
+ os << "alloca{" << static_cast<const void *>(Ex) << ',' << Cnt << '}';
}
void FunctionCodeRegion::dumpToStream(raw_ostream &os) const {
@@ -416,7 +450,7 @@ void FunctionCodeRegion::dumpToStream(raw_ostream &os) const {
}
void BlockCodeRegion::dumpToStream(raw_ostream &os) const {
- os << "block_code{" << static_cast<const void*>(this) << '}';
+ os << "block_code{" << static_cast<const void *>(this) << '}';
}
void BlockDataRegion::dumpToStream(raw_ostream &os) const {
@@ -425,19 +459,19 @@ void BlockDataRegion::dumpToStream(raw_ostream &os) const {
for (BlockDataRegion::referenced_vars_iterator
I = referenced_vars_begin(),
E = referenced_vars_end(); I != E; ++I)
- os << "(" << I.getCapturedRegion() << "," <<
+ os << "(" << I.getCapturedRegion() << "<-" <<
I.getOriginalRegion() << ") ";
os << '}';
}
void CompoundLiteralRegion::dumpToStream(raw_ostream &os) const {
// FIXME: More elaborate pretty-printing.
- os << "{ " << static_cast<const void*>(CL) << " }";
+ os << "{ " << static_cast<const void *>(CL) << " }";
}
void CXXTempObjectRegion::dumpToStream(raw_ostream &os) const {
os << "temp_object{" << getValueType().getAsString() << ','
- << static_cast<const void*>(Ex) << '}';
+ << static_cast<const void *>(Ex) << '}';
}
void CXXBaseObjectRegion::dumpToStream(raw_ostream &os) const {
@@ -478,7 +512,11 @@ void SymbolicRegion::dumpToStream(raw_ostream &os) const {
}
void VarRegion::dumpToStream(raw_ostream &os) const {
- os << *cast<VarDecl>(D);
+ const auto *VD = cast<VarDecl>(D);
+ if (const IdentifierInfo *ID = VD->getIdentifier())
+ os << ID->getName();
+ else
+ os << "VarRegion{" << static_cast<const void *>(this) << '}';
}
LLVM_DUMP_METHOD void RegionRawOffset::dump() const {
@@ -622,19 +660,18 @@ std::string MemRegion::getDescriptiveName(bool UseQuotes) const {
// Get variable name.
if (R && R->canPrintPrettyAsExpr()) {
R->printPrettyAsExpr(os);
- if (UseQuotes) {
+ if (UseQuotes)
return (llvm::Twine("'") + os.str() + ArrayIndices + "'").str();
- } else {
+ else
return (llvm::Twine(os.str()) + ArrayIndices).str();
- }
}
return VariableName;
}
SourceRange MemRegion::sourceRange() const {
- const VarRegion *const VR = dyn_cast<VarRegion>(this->getBaseRegion());
- const FieldRegion *const FR = dyn_cast<FieldRegion>(this);
+ const auto *const VR = dyn_cast<VarRegion>(this->getBaseRegion());
+ const auto *const FR = dyn_cast<FieldRegion>(this);
// Check for more specific regions first.
// FieldRegion
@@ -646,9 +683,8 @@ SourceRange MemRegion::sourceRange() const {
return VR->getDecl()->getSourceRange();
}
// Return invalid source range (can be checked by client).
- else {
- return SourceRange{};
- }
+ else
+ return {};
}
//===----------------------------------------------------------------------===//
@@ -738,13 +774,14 @@ const CodeSpaceRegion *MemRegionManager::getCodeRegion() {
//===----------------------------------------------------------------------===//
// Constructing regions.
//===----------------------------------------------------------------------===//
-const StringRegion* MemRegionManager::getStringRegion(const StringLiteral* Str){
+
+const StringRegion *MemRegionManager::getStringRegion(const StringLiteral *Str){
return getSubRegion<StringRegion>(
Str, cast<GlobalInternalSpaceRegion>(getGlobalsRegion()));
}
const ObjCStringRegion *
-MemRegionManager::getObjCStringRegion(const ObjCStringLiteral* Str){
+MemRegionManager::getObjCStringRegion(const ObjCStringLiteral *Str){
return getSubRegion<ObjCStringRegion>(
Str, cast<GlobalInternalSpaceRegion>(getGlobalsRegion()));
}
@@ -757,21 +794,20 @@ getStackOrCaptureRegionForDeclContext(const LocationContext *LC,
const DeclContext *DC,
const VarDecl *VD) {
while (LC) {
- if (const StackFrameContext *SFC = dyn_cast<StackFrameContext>(LC)) {
+ if (const auto *SFC = dyn_cast<StackFrameContext>(LC)) {
if (cast<DeclContext>(SFC->getDecl()) == DC)
return SFC;
}
- if (const BlockInvocationContext *BC =
- dyn_cast<BlockInvocationContext>(LC)) {
- const BlockDataRegion *BR =
- static_cast<const BlockDataRegion*>(BC->getContextData());
+ if (const auto *BC = dyn_cast<BlockInvocationContext>(LC)) {
+ const auto *BR =
+ static_cast<const BlockDataRegion *>(BC->getContextData());
// FIXME: This can be made more efficient.
for (BlockDataRegion::referenced_vars_iterator
I = BR->referenced_vars_begin(),
E = BR->referenced_vars_end(); I != E; ++I) {
- if (const VarRegion *VR = dyn_cast<VarRegion>(I.getOriginalRegion()))
- if (VR->getDecl() == VD)
- return cast<VarRegion>(I.getCapturedRegion());
+ const VarRegion *VR = I.getOriginalRegion();
+ if (VR->getDecl() == VD)
+ return cast<VarRegion>(I.getCapturedRegion());
}
}
@@ -818,7 +854,7 @@ const VarRegion* MemRegionManager::getVarRegion(const VarDecl *D,
if (V.is<const VarRegion*>())
return V.get<const VarRegion*>();
- const StackFrameContext *STC = V.get<const StackFrameContext*>();
+ const auto *STC = V.get<const StackFrameContext *>();
if (!STC) {
// FIXME: Assign a more sensible memory space to static locals
@@ -836,7 +872,7 @@ const VarRegion* MemRegionManager::getVarRegion(const VarDecl *D,
if (isa<FunctionDecl>(STCD) || isa<ObjCMethodDecl>(STCD))
sReg = getGlobalsRegion(MemRegion::StaticGlobalSpaceRegionKind,
getFunctionCodeRegion(cast<NamedDecl>(STCD)));
- else if (const BlockDecl *BD = dyn_cast<BlockDecl>(STCD)) {
+ else if (const auto *BD = dyn_cast<BlockDecl>(STCD)) {
// FIXME: The fallback type here is totally bogus -- though it should
// never be queried, it will prevent uniquing with the real
// BlockCodeRegion. Ideally we'd fix the AST so that we always had a
@@ -885,7 +921,7 @@ MemRegionManager::getBlockDataRegion(const BlockCodeRegion *BC,
if (LC) {
// FIXME: Once we implement scope handling, we want the parent region
// to be the scope.
- const StackFrameContext *STC = LC->getCurrentStackFrame();
+ const StackFrameContext *STC = LC->getStackFrame();
assert(STC);
sReg = getStackLocalsRegion(STC);
}
@@ -913,7 +949,7 @@ MemRegionManager::getCompoundLiteralRegion(const CompoundLiteralExpr *CL,
if (CL->isFileScope())
sReg = getGlobalsRegion();
else {
- const StackFrameContext *STC = LC->getCurrentStackFrame();
+ const StackFrameContext *STC = LC->getStackFrame();
assert(STC);
sReg = getStackLocalsRegion(STC);
}
@@ -932,7 +968,7 @@ MemRegionManager::getElementRegion(QualType elementType, NonLoc Idx,
void *InsertPos;
MemRegion* data = Regions.FindNodeOrInsertPos(ID, InsertPos);
- ElementRegion* R = cast_or_null<ElementRegion>(data);
+ auto *R = cast_or_null<ElementRegion>(data);
if (!R) {
R = A.Allocate<ElementRegion>();
@@ -954,7 +990,6 @@ MemRegionManager::getBlockCodeRegion(const BlockDecl *BD, CanQualType locTy,
return getSubRegion<BlockCodeRegion>(BD, locTy, AC, getCodeRegion());
}
-
/// getSymbolicRegion - Retrieve or create a "symbolic" memory region.
const SymbolicRegion *MemRegionManager::getSymbolicRegion(SymbolRef sym) {
return getSubRegion<SymbolicRegion>(sym, getUnknownRegion());
@@ -979,7 +1014,7 @@ MemRegionManager::getObjCIvarRegion(const ObjCIvarDecl *d,
const CXXTempObjectRegion*
MemRegionManager::getCXXTempObjectRegion(Expr const *E,
LocationContext const *LC) {
- const StackFrameContext *SFC = LC->getCurrentStackFrame();
+ const StackFrameContext *SFC = LC->getStackFrame();
assert(SFC);
return getSubRegion<CXXTempObjectRegion>(E, getStackLocalsRegion(SFC));
}
@@ -1017,10 +1052,8 @@ MemRegionManager::getCXXBaseObjectRegion(const CXXRecordDecl *RD,
if (IsVirtual) {
// Virtual base regions should not be layered, since the layout rules
// are different.
- while (const CXXBaseObjectRegion *Base =
- dyn_cast<CXXBaseObjectRegion>(Super)) {
+ while (const auto *Base = dyn_cast<CXXBaseObjectRegion>(Super))
Super = cast<SubRegion>(Base->getSuperRegion());
- }
assert(Super && !isa<MemSpaceRegion>(Super));
}
}
@@ -1031,7 +1064,7 @@ MemRegionManager::getCXXBaseObjectRegion(const CXXRecordDecl *RD,
const CXXThisRegion*
MemRegionManager::getCXXThisRegion(QualType thisPointerTy,
const LocationContext *LC) {
- const PointerType *PT = thisPointerTy->getAs<PointerType>();
+ const auto *PT = thisPointerTy->getAs<PointerType>();
assert(PT);
// Inside the body of the operator() of a lambda a this expr might refer to an
// object in one of the parent location contexts.
@@ -1045,7 +1078,7 @@ MemRegionManager::getCXXThisRegion(QualType thisPointerTy,
LC = LC->getParent();
D = dyn_cast<CXXMethodDecl>(LC->getDecl());
}
- const StackFrameContext *STC = LC->getCurrentStackFrame();
+ const StackFrameContext *STC = LC->getStackFrame();
assert(STC);
return getSubRegion<CXXThisRegion>(PT, getStackArgumentsRegion(STC));
}
@@ -1053,14 +1086,14 @@ MemRegionManager::getCXXThisRegion(QualType thisPointerTy,
const AllocaRegion*
MemRegionManager::getAllocaRegion(const Expr *E, unsigned cnt,
const LocationContext *LC) {
- const StackFrameContext *STC = LC->getCurrentStackFrame();
+ const StackFrameContext *STC = LC->getStackFrame();
assert(STC);
return getSubRegion<AllocaRegion>(E, cnt, getStackLocalsRegion(STC));
}
const MemSpaceRegion *MemRegion::getMemorySpace() const {
const MemRegion *R = this;
- const SubRegion* SR = dyn_cast<SubRegion>(this);
+ const auto *SR = dyn_cast<SubRegion>(this);
while (SR) {
R = SR->getSuperRegion();
@@ -1121,7 +1154,7 @@ const MemRegion *MemRegion::StripCasts(bool StripBaseCasts) const {
while (true) {
switch (R->getKind()) {
case ElementRegionKind: {
- const ElementRegion *ER = cast<ElementRegion>(R);
+ const auto *ER = cast<ElementRegion>(R);
if (!ER->getIndex().isZeroConstant())
return R;
R = ER->getSuperRegion();
@@ -1139,10 +1172,10 @@ const MemRegion *MemRegion::StripCasts(bool StripBaseCasts) const {
}
const SymbolicRegion *MemRegion::getSymbolicBase() const {
- const SubRegion *SubR = dyn_cast<SubRegion>(this);
+ const auto *SubR = dyn_cast<SubRegion>(this);
while (SubR) {
- if (const SymbolicRegion *SymR = dyn_cast<SymbolicRegion>(SubR))
+ if (const auto *SymR = dyn_cast<SymbolicRegion>(SubR))
return SymR;
SubR = dyn_cast<SubRegion>(SubR->getSuperRegion());
}
@@ -1150,7 +1183,7 @@ const SymbolicRegion *MemRegion::getSymbolicBase() const {
}
RegionRawOffset ElementRegion::getAsArrayOffset() const {
- CharUnits offset = CharUnits::Zero();
+ int64_t offset = 0;
const ElementRegion *ER = this;
const MemRegion *superR = nullptr;
ASTContext &C = getContext();
@@ -1162,7 +1195,7 @@ RegionRawOffset ElementRegion::getAsArrayOffset() const {
// FIXME: generalize to symbolic offsets.
SVal index = ER->getIndex();
- if (Optional<nonloc::ConcreteInt> CI = index.getAs<nonloc::ConcreteInt>()) {
+ if (auto CI = index.getAs<nonloc::ConcreteInt>()) {
// Update the offset.
int64_t i = CI->getValue().getSExtValue();
@@ -1175,8 +1208,15 @@ RegionRawOffset ElementRegion::getAsArrayOffset() const {
break;
}
- CharUnits size = C.getTypeSizeInChars(elemType);
- offset += (i * size);
+ int64_t size = C.getTypeSizeInChars(elemType).getQuantity();
+ if (auto NewOffset = llvm::checkedMulAdd(i, size, offset)) {
+ offset = *NewOffset;
+ } else {
+ LLVM_DEBUG(llvm::dbgs() << "MemRegion::getAsArrayOffset: "
+ << "offset overflowing, returning unknown\n");
+
+ return nullptr;
+ }
}
// Go to the next ElementRegion (if any).
@@ -1188,10 +1228,9 @@ RegionRawOffset ElementRegion::getAsArrayOffset() const {
}
assert(superR && "super region cannot be NULL");
- return RegionRawOffset(superR, offset);
+ return RegionRawOffset(superR, CharUnits::fromQuantity(offset));
}
-
/// Returns true if \p Base is an immediate base class of \p Child
static bool isImmediateBase(const CXXRecordDecl *Child,
const CXXRecordDecl *Base) {
@@ -1207,47 +1246,46 @@ static bool isImmediateBase(const CXXRecordDecl *Child,
return false;
}
-RegionOffset MemRegion::getAsOffset() const {
- const MemRegion *R = this;
+static RegionOffset calculateOffset(const MemRegion *R) {
const MemRegion *SymbolicOffsetBase = nullptr;
int64_t Offset = 0;
- while (1) {
+ while (true) {
switch (R->getKind()) {
- case CodeSpaceRegionKind:
- case StackLocalsSpaceRegionKind:
- case StackArgumentsSpaceRegionKind:
- case HeapSpaceRegionKind:
- case UnknownSpaceRegionKind:
- case StaticGlobalSpaceRegionKind:
- case GlobalInternalSpaceRegionKind:
- case GlobalSystemSpaceRegionKind:
- case GlobalImmutableSpaceRegionKind:
+ case MemRegion::CodeSpaceRegionKind:
+ case MemRegion::StackLocalsSpaceRegionKind:
+ case MemRegion::StackArgumentsSpaceRegionKind:
+ case MemRegion::HeapSpaceRegionKind:
+ case MemRegion::UnknownSpaceRegionKind:
+ case MemRegion::StaticGlobalSpaceRegionKind:
+ case MemRegion::GlobalInternalSpaceRegionKind:
+ case MemRegion::GlobalSystemSpaceRegionKind:
+ case MemRegion::GlobalImmutableSpaceRegionKind:
// Stores can bind directly to a region space to set a default value.
assert(Offset == 0 && !SymbolicOffsetBase);
goto Finish;
- case FunctionCodeRegionKind:
- case BlockCodeRegionKind:
- case BlockDataRegionKind:
+ case MemRegion::FunctionCodeRegionKind:
+ case MemRegion::BlockCodeRegionKind:
+ case MemRegion::BlockDataRegionKind:
// These will never have bindings, but may end up having values requested
// if the user does some strange casting.
if (Offset != 0)
SymbolicOffsetBase = R;
goto Finish;
- case SymbolicRegionKind:
- case AllocaRegionKind:
- case CompoundLiteralRegionKind:
- case CXXThisRegionKind:
- case StringRegionKind:
- case ObjCStringRegionKind:
- case VarRegionKind:
- case CXXTempObjectRegionKind:
+ case MemRegion::SymbolicRegionKind:
+ case MemRegion::AllocaRegionKind:
+ case MemRegion::CompoundLiteralRegionKind:
+ case MemRegion::CXXThisRegionKind:
+ case MemRegion::StringRegionKind:
+ case MemRegion::ObjCStringRegionKind:
+ case MemRegion::VarRegionKind:
+ case MemRegion::CXXTempObjectRegionKind:
// Usual base regions.
goto Finish;
- case ObjCIvarRegionKind:
+ case MemRegion::ObjCIvarRegionKind:
// This is a little strange, but it's a compromise between
// ObjCIvarRegions having unknown compile-time offsets (when using the
// non-fragile runtime) and yet still being distinct, non-overlapping
@@ -1255,15 +1293,15 @@ RegionOffset MemRegion::getAsOffset() const {
// of computing offsets.
goto Finish;
- case CXXBaseObjectRegionKind: {
- const CXXBaseObjectRegion *BOR = cast<CXXBaseObjectRegion>(R);
+ case MemRegion::CXXBaseObjectRegionKind: {
+ const auto *BOR = cast<CXXBaseObjectRegion>(R);
R = BOR->getSuperRegion();
QualType Ty;
bool RootIsSymbolic = false;
- if (const TypedValueRegion *TVR = dyn_cast<TypedValueRegion>(R)) {
- Ty = TVR->getDesugaredValueType(getContext());
- } else if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(R)) {
+ if (const auto *TVR = dyn_cast<TypedValueRegion>(R)) {
+ Ty = TVR->getDesugaredValueType(R->getContext());
+ } else if (const auto *SR = dyn_cast<SymbolicRegion>(R)) {
// If our base region is symbolic, we don't know what type it really is.
// Pretend the type of the symbol is the true dynamic type.
// (This will at least be self-consistent for the life of the symbol.)
@@ -1296,18 +1334,18 @@ RegionOffset MemRegion::getAsOffset() const {
continue;
CharUnits BaseOffset;
- const ASTRecordLayout &Layout = getContext().getASTRecordLayout(Child);
+ const ASTRecordLayout &Layout = R->getContext().getASTRecordLayout(Child);
if (BOR->isVirtual())
BaseOffset = Layout.getVBaseClassOffset(BOR->getDecl());
else
BaseOffset = Layout.getBaseClassOffset(BOR->getDecl());
// The base offset is in chars, not in bits.
- Offset += BaseOffset.getQuantity() * getContext().getCharWidth();
+ Offset += BaseOffset.getQuantity() * R->getContext().getCharWidth();
break;
}
- case ElementRegionKind: {
- const ElementRegion *ER = cast<ElementRegion>(R);
+ case MemRegion::ElementRegionKind: {
+ const auto *ER = cast<ElementRegion>(R);
R = ER->getSuperRegion();
QualType EleTy = ER->getValueType();
@@ -1327,15 +1365,15 @@ RegionOffset MemRegion::getAsOffset() const {
int64_t i = CI->getValue().getSExtValue();
// This type size is in bits.
- Offset += i * getContext().getTypeSize(EleTy);
+ Offset += i * R->getContext().getTypeSize(EleTy);
} else {
// We cannot compute offset for non-concrete index.
SymbolicOffsetBase = R;
}
break;
}
- case FieldRegionKind: {
- const FieldRegion *FR = cast<FieldRegion>(R);
+ case MemRegion::FieldRegionKind: {
+ const auto *FR = cast<FieldRegion>(R);
R = FR->getSuperRegion();
const RecordDecl *RD = FR->getDecl()->getParent();
@@ -1360,7 +1398,7 @@ RegionOffset MemRegion::getAsOffset() const {
if (FR->getDecl() == *FI)
break;
}
- const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
+ const ASTRecordLayout &Layout = R->getContext().getASTRecordLayout(RD);
// This is offset in bits.
Offset += Layout.getFieldOffset(idx);
break;
@@ -1374,6 +1412,12 @@ RegionOffset MemRegion::getAsOffset() const {
return RegionOffset(R, Offset);
}
+RegionOffset MemRegion::getAsOffset() const {
+ if (!cachedOffset)
+ cachedOffset = calculateOffset(this);
+ return *cachedOffset;
+}
+
//===----------------------------------------------------------------------===//
// BlockDataRegion
//===----------------------------------------------------------------------===//
@@ -1419,13 +1463,14 @@ void BlockDataRegion::LazyInitializeReferencedVars() {
llvm::BumpPtrAllocator &A = MemMgr.getAllocator();
BumpVectorContext BC(A);
- typedef BumpVector<const MemRegion*> VarVec;
- VarVec *BV = A.Allocate<VarVec>();
+ using VarVec = BumpVector<const MemRegion *>;
+
+ auto *BV = A.Allocate<VarVec>();
new (BV) VarVec(BC, NumBlockVars);
- VarVec *BVOriginal = A.Allocate<VarVec>();
+ auto *BVOriginal = A.Allocate<VarVec>();
new (BVOriginal) VarVec(BC, NumBlockVars);
- for (const VarDecl *VD : ReferencedBlockVars) {
+ for (const auto *VD : ReferencedBlockVars) {
const VarRegion *VR = nullptr;
const VarRegion *OriginalVR = nullptr;
std::tie(VR, OriginalVR) = getCaptureRegions(VD);
@@ -1443,14 +1488,13 @@ BlockDataRegion::referenced_vars_iterator
BlockDataRegion::referenced_vars_begin() const {
const_cast<BlockDataRegion*>(this)->LazyInitializeReferencedVars();
- BumpVector<const MemRegion*> *Vec =
- static_cast<BumpVector<const MemRegion*>*>(ReferencedVars);
+ auto *Vec = static_cast<BumpVector<const MemRegion *> *>(ReferencedVars);
if (Vec == (void*) 0x1)
return BlockDataRegion::referenced_vars_iterator(nullptr, nullptr);
- BumpVector<const MemRegion*> *VecOriginal =
- static_cast<BumpVector<const MemRegion*>*>(OriginalVars);
+ auto *VecOriginal =
+ static_cast<BumpVector<const MemRegion *> *>(OriginalVars);
return BlockDataRegion::referenced_vars_iterator(Vec->begin(),
VecOriginal->begin());
@@ -1460,14 +1504,13 @@ BlockDataRegion::referenced_vars_iterator
BlockDataRegion::referenced_vars_end() const {
const_cast<BlockDataRegion*>(this)->LazyInitializeReferencedVars();
- BumpVector<const MemRegion*> *Vec =
- static_cast<BumpVector<const MemRegion*>*>(ReferencedVars);
+ auto *Vec = static_cast<BumpVector<const MemRegion *> *>(ReferencedVars);
if (Vec == (void*) 0x1)
return BlockDataRegion::referenced_vars_iterator(nullptr, nullptr);
- BumpVector<const MemRegion*> *VecOriginal =
- static_cast<BumpVector<const MemRegion*>*>(OriginalVars);
+ auto *VecOriginal =
+ static_cast<BumpVector<const MemRegion *> *>(OriginalVars);
return BlockDataRegion::referenced_vars_iterator(Vec->end(),
VecOriginal->end());
@@ -1495,7 +1538,7 @@ void RegionAndSymbolInvalidationTraits::setTrait(SymbolRef Sym,
void RegionAndSymbolInvalidationTraits::setTrait(const MemRegion *MR,
InvalidationKinds IK) {
assert(MR);
- if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(MR))
+ if (const auto *SR = dyn_cast<SymbolicRegion>(MR))
setTrait(SR->getSymbol(), IK);
else
MRTraitsMap[MR] |= IK;
@@ -1515,7 +1558,7 @@ bool RegionAndSymbolInvalidationTraits::hasTrait(const MemRegion *MR,
if (!MR)
return false;
- if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(MR))
+ if (const auto *SR = dyn_cast<SymbolicRegion>(MR))
return hasTrait(SR->getSymbol(), IK);
const_region_iterator I = MRTraitsMap.find(MR);
diff --git a/lib/StaticAnalyzer/Core/PathDiagnostic.cpp b/lib/StaticAnalyzer/Core/PathDiagnostic.cpp
index 669748c0127a..1b698ec5c086 100644
--- a/lib/StaticAnalyzer/Core/PathDiagnostic.cpp
+++ b/lib/StaticAnalyzer/Core/PathDiagnostic.cpp
@@ -1,4 +1,4 @@
-//===--- PathDiagnostic.cpp - Path-Specific Diagnostic Handling -*- C++ -*-===//
+//===- PathDiagnostic.cpp - Path-Specific Diagnostic Handling -------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -13,26 +13,52 @@
#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
#include "clang/AST/Decl.h"
+#include "clang/AST/DeclBase.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
+#include "clang/AST/OperationKinds.h"
#include "clang/AST/ParentMap.h"
-#include "clang/AST/StmtCXX.h"
+#include "clang/AST/Stmt.h"
+#include "clang/AST/Type.h"
+#include "clang/Analysis/AnalysisDeclContext.h"
+#include "clang/Analysis/CFG.h"
+#include "clang/Analysis/ProgramPoint.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/LLVM.h"
+#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/SourceManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
+#include <cassert>
+#include <cstring>
+#include <memory>
+#include <utility>
+#include <vector>
using namespace clang;
using namespace ento;
bool PathDiagnosticMacroPiece::containsEvent() const {
- for (auto &P : subPieces) {
+ for (const auto &P : subPieces) {
if (isa<PathDiagnosticEventPiece>(*P))
return true;
- if (auto *MP = dyn_cast<PathDiagnosticMacroPiece>(P.get()))
+ if (const auto *MP = dyn_cast<PathDiagnosticMacroPiece>(P.get()))
if (MP->containsEvent())
return true;
}
@@ -43,23 +69,27 @@ static StringRef StripTrailingDots(StringRef s) {
for (StringRef::size_type i = s.size(); i != 0; --i)
if (s[i - 1] != '.')
return s.substr(0, i);
- return "";
+ return {};
}
PathDiagnosticPiece::PathDiagnosticPiece(StringRef s,
Kind k, DisplayHint hint)
- : str(StripTrailingDots(s)), kind(k), Hint(hint),
- LastInMainSourceFile(false) {}
+ : str(StripTrailingDots(s)), kind(k), Hint(hint) {}
PathDiagnosticPiece::PathDiagnosticPiece(Kind k, DisplayHint hint)
- : kind(k), Hint(hint), LastInMainSourceFile(false) {}
+ : kind(k), Hint(hint) {}
-PathDiagnosticPiece::~PathDiagnosticPiece() {}
-PathDiagnosticEventPiece::~PathDiagnosticEventPiece() {}
-PathDiagnosticCallPiece::~PathDiagnosticCallPiece() {}
-PathDiagnosticControlFlowPiece::~PathDiagnosticControlFlowPiece() {}
-PathDiagnosticMacroPiece::~PathDiagnosticMacroPiece() {}
-PathDiagnosticNotePiece::~PathDiagnosticNotePiece() {}
+PathDiagnosticPiece::~PathDiagnosticPiece() = default;
+
+PathDiagnosticEventPiece::~PathDiagnosticEventPiece() = default;
+
+PathDiagnosticCallPiece::~PathDiagnosticCallPiece() = default;
+
+PathDiagnosticControlFlowPiece::~PathDiagnosticControlFlowPiece() = default;
+
+PathDiagnosticMacroPiece::~PathDiagnosticMacroPiece() = default;
+
+PathDiagnosticNotePiece::~PathDiagnosticNotePiece() = default;
void PathPieces::flattenTo(PathPieces &Primary, PathPieces &Current,
bool ShouldFlattenMacros) const {
@@ -96,22 +126,20 @@ void PathPieces::flattenTo(PathPieces &Primary, PathPieces &Current,
}
}
-PathDiagnostic::~PathDiagnostic() {}
-
-PathDiagnostic::PathDiagnostic(StringRef CheckName, const Decl *declWithIssue,
- StringRef bugtype, StringRef verboseDesc,
- StringRef shortDesc, StringRef category,
- PathDiagnosticLocation LocationToUnique,
- const Decl *DeclToUnique)
- : CheckName(CheckName),
- DeclWithIssue(declWithIssue),
- BugType(StripTrailingDots(bugtype)),
- VerboseDesc(StripTrailingDots(verboseDesc)),
- ShortDesc(StripTrailingDots(shortDesc)),
- Category(StripTrailingDots(category)),
- UniqueingLoc(LocationToUnique),
- UniqueingDecl(DeclToUnique),
- path(pathImpl) {}
+PathDiagnostic::~PathDiagnostic() = default;
+
+PathDiagnostic::PathDiagnostic(
+ StringRef CheckName, const Decl *declWithIssue, StringRef bugtype,
+ StringRef verboseDesc, StringRef shortDesc, StringRef category,
+ PathDiagnosticLocation LocationToUnique, const Decl *DeclToUnique,
+ std::unique_ptr<FilesToLineNumsMap> ExecutedLines)
+ : CheckName(CheckName), DeclWithIssue(declWithIssue),
+ BugType(StripTrailingDots(bugtype)),
+ VerboseDesc(StripTrailingDots(verboseDesc)),
+ ShortDesc(StripTrailingDots(shortDesc)),
+ Category(StripTrailingDots(category)), UniqueingLoc(LocationToUnique),
+ UniqueingDecl(DeclToUnique), ExecutedLines(std::move(ExecutedLines)),
+ path(pathImpl) {}
static PathDiagnosticCallPiece *
getFirstStackedCallToHeaderFile(PathDiagnosticCallPiece *CP,
@@ -122,11 +150,11 @@ getFirstStackedCallToHeaderFile(PathDiagnosticCallPiece *CP,
if (CallLoc.isMacroID())
return nullptr;
- assert(SMgr.isInMainFile(CallLoc) &&
- "The call piece should be in the main file.");
+ assert(AnalysisManager::isInCodeFile(CallLoc, SMgr) &&
+ "The call piece should not be in a header file.");
// Check if CP represents a path through a function outside of the main file.
- if (!SMgr.isInMainFile(CP->callEnterWithin.asLocation()))
+ if (!AnalysisManager::isInCodeFile(CP->callEnterWithin.asLocation(), SMgr))
return CP;
const PathPieces &Path = CP->path;
@@ -135,10 +163,8 @@ getFirstStackedCallToHeaderFile(PathDiagnosticCallPiece *CP,
// Check if the last piece in the callee path is a call to a function outside
// of the main file.
- if (PathDiagnosticCallPiece *CPInner =
- dyn_cast<PathDiagnosticCallPiece>(Path.back().get())) {
+ if (auto *CPInner = dyn_cast<PathDiagnosticCallPiece>(Path.back().get()))
return getFirstStackedCallToHeaderFile(CPInner, SMgr);
- }
// Otherwise, the last piece is in the main file.
return nullptr;
@@ -154,14 +180,14 @@ void PathDiagnostic::resetDiagnosticLocationToMainFile() {
// We only need to check if the report ends inside headers, if the last piece
// is a call piece.
- if (PathDiagnosticCallPiece *CP = dyn_cast<PathDiagnosticCallPiece>(LastP)) {
+ if (auto *CP = dyn_cast<PathDiagnosticCallPiece>(LastP)) {
CP = getFirstStackedCallToHeaderFile(CP, SMgr);
if (CP) {
// Mark the piece.
CP->setAsLastInMainSourceFile();
// Update the path diagnostic message.
- const NamedDecl *ND = dyn_cast<NamedDecl>(CP->getCallee());
+ const auto *ND = dyn_cast<NamedDecl>(CP->getCallee());
if (ND) {
SmallString<200> buf;
llvm::raw_svector_ostream os(buf);
@@ -178,14 +204,12 @@ void PathDiagnostic::resetDiagnosticLocationToMainFile() {
}
}
-void PathDiagnosticConsumer::anchor() { }
+void PathDiagnosticConsumer::anchor() {}
PathDiagnosticConsumer::~PathDiagnosticConsumer() {
// Delete the contents of the FoldingSet if it isn't empty already.
- for (llvm::FoldingSet<PathDiagnostic>::iterator it =
- Diags.begin(), et = Diags.end() ; it != et ; ++it) {
- delete &*it;
- }
+ for (auto &Diag : Diags)
+ delete &Diag;
}
void PathDiagnosticConsumer::HandlePathDiagnostic(
@@ -216,9 +240,8 @@ void PathDiagnosticConsumer::HandlePathDiagnostic(
while (!WorkList.empty()) {
const PathPieces &path = *WorkList.pop_back_val();
- for (PathPieces::const_iterator I = path.begin(), E = path.end(); I != E;
- ++I) {
- const PathDiagnosticPiece *piece = I->get();
+ for (const auto &I : path) {
+ const PathDiagnosticPiece *piece = I.get();
FullSourceLoc L = piece->getLocation().asLocation().getExpansionLoc();
if (FID.isInvalid()) {
@@ -230,28 +253,23 @@ void PathDiagnosticConsumer::HandlePathDiagnostic(
// Check the source ranges.
ArrayRef<SourceRange> Ranges = piece->getRanges();
- for (ArrayRef<SourceRange>::iterator I = Ranges.begin(),
- E = Ranges.end(); I != E; ++I) {
- SourceLocation L = SMgr.getExpansionLoc(I->getBegin());
+ for (const auto &I : Ranges) {
+ SourceLocation L = SMgr.getExpansionLoc(I.getBegin());
if (!L.isFileID() || SMgr.getFileID(L) != FID) {
llvm::errs() << warning.str();
return;
}
- L = SMgr.getExpansionLoc(I->getEnd());
+ L = SMgr.getExpansionLoc(I.getEnd());
if (!L.isFileID() || SMgr.getFileID(L) != FID) {
llvm::errs() << warning.str();
return;
}
}
- if (const PathDiagnosticCallPiece *call =
- dyn_cast<PathDiagnosticCallPiece>(piece)) {
+ if (const auto *call = dyn_cast<PathDiagnosticCallPiece>(piece))
WorkList.push_back(&call->path);
- }
- else if (const PathDiagnosticMacroPiece *macro =
- dyn_cast<PathDiagnosticMacroPiece>(piece)) {
+ else if (const auto *macro = dyn_cast<PathDiagnosticMacroPiece>(piece))
WorkList.push_back(&macro->subPieces);
- }
}
}
@@ -381,11 +399,29 @@ static Optional<bool> comparePath(const PathPieces &X, const PathPieces &Y) {
return None;
}
+static bool compareCrossTUSourceLocs(FullSourceLoc XL, FullSourceLoc YL) {
+ std::pair<FileID, unsigned> XOffs = XL.getDecomposedLoc();
+ std::pair<FileID, unsigned> YOffs = YL.getDecomposedLoc();
+ const SourceManager &SM = XL.getManager();
+ std::pair<bool, bool> InSameTU = SM.isInTheSameTranslationUnit(XOffs, YOffs);
+ if (InSameTU.first)
+ return XL.isBeforeInTranslationUnitThan(YL);
+ const FileEntry *XFE = SM.getFileEntryForID(XL.getSpellingLoc().getFileID());
+ const FileEntry *YFE = SM.getFileEntryForID(YL.getSpellingLoc().getFileID());
+ if (!XFE || !YFE)
+ return XFE && !YFE;
+ int NameCmp = XFE->getName().compare(YFE->getName());
+ if (NameCmp != 0)
+ return NameCmp == -1;
+ // Last resort: Compare raw file IDs that are possibly expansions.
+ return XL.getFileID() < YL.getFileID();
+}
+
static bool compare(const PathDiagnostic &X, const PathDiagnostic &Y) {
FullSourceLoc XL = X.getLocation().asLocation();
FullSourceLoc YL = Y.getLocation().asLocation();
if (XL != YL)
- return XL.isBeforeInTranslationUnitThan(YL);
+ return compareCrossTUSourceLocs(XL, YL);
if (X.getBugType() != Y.getBugType())
return X.getBugType() < Y.getBugType();
if (X.getCategory() != Y.getCategory())
@@ -405,7 +441,8 @@ static bool compare(const PathDiagnostic &X, const PathDiagnostic &Y) {
SourceLocation YDL = YD->getLocation();
if (XDL != YDL) {
const SourceManager &SM = XL.getManager();
- return SM.isBeforeInTranslationUnit(XDL, YDL);
+ return compareCrossTUSourceLocs(FullSourceLoc(XDL, SM),
+ FullSourceLoc(YDL, SM));
}
}
PathDiagnostic::meta_iterator XI = X.meta_begin(), XE = X.meta_end();
@@ -429,11 +466,8 @@ void PathDiagnosticConsumer::FlushDiagnostics(
flushed = true;
std::vector<const PathDiagnostic *> BatchDiags;
- for (llvm::FoldingSet<PathDiagnostic>::iterator it = Diags.begin(),
- et = Diags.end(); it != et; ++it) {
- const PathDiagnostic *D = &*it;
- BatchDiags.push_back(D);
- }
+ for (const auto &D : Diags)
+ BatchDiags.push_back(&D);
// Sort the diagnostics so that they are always emitted in a deterministic
// order.
@@ -450,11 +484,8 @@ void PathDiagnosticConsumer::FlushDiagnostics(
FlushDiagnosticsImpl(BatchDiags, Files);
// Delete the flushed diagnostics.
- for (std::vector<const PathDiagnostic *>::iterator it = BatchDiags.begin(),
- et = BatchDiags.end(); it != et; ++it) {
- const PathDiagnostic *D = *it;
+ for (const auto D : BatchDiags)
delete D;
- }
// Clear out the FoldingSet.
Diags.clear();
@@ -553,6 +584,8 @@ getLocationForCaller(const StackFrameContext *SFC,
switch (Source.getKind()) {
case CFGElement::Statement:
+ case CFGElement::Constructor:
+ case CFGElement::CXXRecordTypedCall:
return PathDiagnosticLocation(Source.castAs<CFGStmt>().getStmt(),
SM, CallerCtx);
case CFGElement::Initializer: {
@@ -576,8 +609,20 @@ getLocationForCaller(const StackFrameContext *SFC,
return PathDiagnosticLocation::createEnd(CallerBody, SM, CallerCtx);
return PathDiagnosticLocation::create(CallerInfo->getDecl(), SM);
}
- case CFGElement::TemporaryDtor:
- case CFGElement::NewAllocator:
+ case CFGElement::NewAllocator: {
+ const CFGNewAllocator &Alloc = Source.castAs<CFGNewAllocator>();
+ return PathDiagnosticLocation(Alloc.getAllocatorExpr(), SM, CallerCtx);
+ }
+ case CFGElement::TemporaryDtor: {
+ // Temporary destructors are for temporaries. They die immediately at around
+ // the location of CXXBindTemporaryExpr. If they are lifetime-extended,
+ // they'd be dealt with via an AutomaticObjectDtor instead.
+ const auto &Dtor = Source.castAs<CFGTemporaryDtor>();
+ return PathDiagnosticLocation::createEnd(Dtor.getBindTemporaryExpr(), SM,
+ CallerCtx);
+ }
+ case CFGElement::ScopeBegin:
+ case CFGElement::ScopeEnd:
llvm_unreachable("not yet implemented!");
case CFGElement::LifetimeEnds:
case CFGElement::LoopExit:
@@ -605,7 +650,7 @@ PathDiagnosticLocation
PathDiagnosticLocation::createEnd(const Stmt *S,
const SourceManager &SM,
LocationOrAnalysisDeclContext LAC) {
- if (const CompoundStmt *CS = dyn_cast<CompoundStmt>(S))
+ if (const auto *CS = dyn_cast<CompoundStmt>(S))
return createEndBrace(CS, SM);
return PathDiagnosticLocation(getValidSourceLocation(S, LAC, /*End=*/true),
SM, SingleLocK);
@@ -624,7 +669,6 @@ PathDiagnosticLocation::createConditionalColonLoc(
return PathDiagnosticLocation(CO->getColonLoc(), SM, SingleLocK);
}
-
PathDiagnosticLocation
PathDiagnosticLocation::createMemberLoc(const MemberExpr *ME,
const SourceManager &SM) {
@@ -649,8 +693,7 @@ PathDiagnosticLocation
PathDiagnosticLocation::createDeclBegin(const LocationContext *LC,
const SourceManager &SM) {
// FIXME: Should handle CXXTryStmt if analyser starts supporting C++.
- if (const CompoundStmt *CS =
- dyn_cast_or_null<CompoundStmt>(LC->getDecl()->getBody()))
+ if (const auto *CS = dyn_cast_or_null<CompoundStmt>(LC->getDecl()->getBody()))
if (!CS->body_empty()) {
SourceLocation Loc = (*CS->body_begin())->getLocStart();
return PathDiagnosticLocation(Loc, SM, SingleLocK);
@@ -741,6 +784,8 @@ const Stmt *PathDiagnosticLocation::getStmt(const ExplodedNode *N) {
return CEE->getCalleeContext()->getCallSite();
if (Optional<PostInitializer> PIPP = P.getAs<PostInitializer>())
return PIPP->getInitializer()->getInit();
+ if (Optional<CallExitBegin> CEB = P.getAs<CallExitBegin>())
+ return CEB->getReturnStmt();
return nullptr;
}
@@ -790,11 +835,11 @@ PathDiagnosticLocation
const LocationContext *LC = N->getLocationContext();
// For member expressions, return the location of the '.' or '->'.
- if (const MemberExpr *ME = dyn_cast<MemberExpr>(S))
+ if (const auto *ME = dyn_cast<MemberExpr>(S))
return PathDiagnosticLocation::createMemberLoc(ME, SM);
// For binary operators, return the location of the operator.
- if (const BinaryOperator *B = dyn_cast<BinaryOperator>(S))
+ if (const auto *B = dyn_cast<BinaryOperator>(S))
return PathDiagnosticLocation::createOperatorLoc(B, SM);
if (P.getAs<PostStmtPurgeDeadSymbols>())
@@ -856,7 +901,7 @@ PathDiagnosticRange
default:
break;
case Stmt::DeclStmtClass: {
- const DeclStmt *DS = cast<DeclStmt>(S);
+ const auto *DS = cast<DeclStmt>(S);
if (DS->isSingleDecl()) {
// Should always be the case, but we'll be defensive.
return SourceRange(DS->getLocStart(),
@@ -886,9 +931,9 @@ PathDiagnosticRange
break;
}
case DeclK:
- if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D))
+ if (const auto *MD = dyn_cast<ObjCMethodDecl>(D))
return MD->getSourceRange();
- if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
if (Stmt *Body = FD->getBody())
return Body->getSourceRange();
}
@@ -898,7 +943,7 @@ PathDiagnosticRange
}
}
- return SourceRange(Loc,Loc);
+ return SourceRange(Loc, Loc);
}
void PathDiagnosticLocation::flatten() {
@@ -954,17 +999,55 @@ void PathDiagnosticCallPiece::setCallee(const CallEnter &CE,
// non-autosynthesized callbacks.
// Unless set here, the IsCalleeAnAutosynthesizedPropertyAccessor flag
// defaults to false.
- if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(Callee))
+ if (const auto *MD = dyn_cast<ObjCMethodDecl>(Callee))
IsCalleeAnAutosynthesizedPropertyAccessor = (
MD->isPropertyAccessor() &&
CalleeCtx->getAnalysisDeclContext()->isBodyAutosynthesized());
}
-static inline void describeClass(raw_ostream &Out, const CXXRecordDecl *D,
- StringRef Prefix = StringRef()) {
+static void describeTemplateParameters(raw_ostream &Out,
+ const ArrayRef<TemplateArgument> TAList,
+ const LangOptions &LO,
+ StringRef Prefix = StringRef(),
+ StringRef Postfix = StringRef());
+
+static void describeTemplateParameter(raw_ostream &Out,
+ const TemplateArgument &TArg,
+ const LangOptions &LO) {
+
+ if (TArg.getKind() == TemplateArgument::ArgKind::Pack) {
+ describeTemplateParameters(Out, TArg.getPackAsArray(), LO);
+ } else {
+ TArg.print(PrintingPolicy(LO), Out);
+ }
+}
+
+static void describeTemplateParameters(raw_ostream &Out,
+ const ArrayRef<TemplateArgument> TAList,
+ const LangOptions &LO,
+ StringRef Prefix, StringRef Postfix) {
+ if (TAList.empty())
+ return;
+
+ Out << Prefix;
+ for (int I = 0, Last = TAList.size() - 1; I != Last; ++I) {
+ describeTemplateParameter(Out, TAList[I], LO);
+ Out << ", ";
+ }
+ describeTemplateParameter(Out, TAList[TAList.size() - 1], LO);
+ Out << Postfix;
+}
+
+static void describeClass(raw_ostream &Out, const CXXRecordDecl *D,
+ StringRef Prefix = StringRef()) {
if (!D->getIdentifier())
return;
- Out << Prefix << '\'' << *D << '\'';
+ Out << Prefix << '\'' << *D;
+ if (const auto T = dyn_cast<ClassTemplateSpecializationDecl>(D))
+ describeTemplateParameters(Out, T->getTemplateArgs().asArray(),
+ D->getASTContext().getLangOpts(), "<", ">");
+
+ Out << '\'';
}
static bool describeCodeDecl(raw_ostream &Out, const Decl *D,
@@ -979,7 +1062,7 @@ static bool describeCodeDecl(raw_ostream &Out, const Decl *D,
return ExtendedDescription;
}
- if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(D)) {
+ if (const auto *MD = dyn_cast<CXXMethodDecl>(D)) {
Out << Prefix;
if (ExtendedDescription && !MD->isUserProvided()) {
if (MD->isExplicitlyDefaulted())
@@ -988,7 +1071,7 @@ static bool describeCodeDecl(raw_ostream &Out, const Decl *D,
Out << "implicit ";
}
- if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(MD)) {
+ if (const auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
if (CD->isDefaultConstructor())
Out << "default ";
else if (CD->isCopyConstructor())
@@ -998,7 +1081,6 @@ static bool describeCodeDecl(raw_ostream &Out, const Decl *D,
Out << "constructor";
describeClass(Out, MD->getParent(), " for ");
-
} else if (isa<CXXDestructorDecl>(MD)) {
if (!MD->isUserProvided()) {
Out << "destructor";
@@ -1007,15 +1089,12 @@ static bool describeCodeDecl(raw_ostream &Out, const Decl *D,
// Use ~Foo for explicitly-written destructors.
Out << "'" << *MD << "'";
}
-
} else if (MD->isCopyAssignmentOperator()) {
Out << "copy assignment operator";
describeClass(Out, MD->getParent(), " for ");
-
} else if (MD->isMoveAssignmentOperator()) {
Out << "move assignment operator";
describeClass(Out, MD->getParent(), " for ");
-
} else {
if (MD->getParent()->getIdentifier())
Out << "'" << *MD->getParent() << "::" << *MD << "'";
@@ -1026,7 +1105,16 @@ static bool describeCodeDecl(raw_ostream &Out, const Decl *D,
return true;
}
- Out << Prefix << '\'' << cast<NamedDecl>(*D) << '\'';
+ Out << Prefix << '\'' << cast<NamedDecl>(*D);
+
+ // Adding template parameters.
+ if (const auto FD = dyn_cast<FunctionDecl>(D))
+ if (const TemplateArgumentList *TAList =
+ FD->getTemplateSpecializationArgs())
+ describeTemplateParameters(Out, TAList->asArray(),
+ FD->getASTContext().getLangOpts(), "<", ">");
+
+ Out << '\'';
return true;
}
@@ -1055,7 +1143,7 @@ PathDiagnosticCallPiece::getCallEnterWithinCallerEvent() const {
return nullptr;
if (Callee->isImplicit() || !Callee->hasBody())
return nullptr;
- if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Callee))
+ if (const auto *MD = dyn_cast<CXXMethodDecl>(Callee))
if (MD->isDefaulted())
return nullptr;
@@ -1095,13 +1183,10 @@ PathDiagnosticCallPiece::getCallExitEvent() const {
}
static void compute_path_size(const PathPieces &pieces, unsigned &size) {
- for (PathPieces::const_iterator it = pieces.begin(),
- et = pieces.end(); it != et; ++it) {
- const PathDiagnosticPiece *piece = it->get();
- if (const PathDiagnosticCallPiece *cp =
- dyn_cast<PathDiagnosticCallPiece>(piece)) {
+ for (const auto &I : pieces) {
+ const PathDiagnosticPiece *piece = I.get();
+ if (const auto *cp = dyn_cast<PathDiagnosticCallPiece>(piece))
compute_path_size(cp->path, size);
- }
else
++size;
}
@@ -1129,19 +1214,16 @@ void PathDiagnosticPiece::Profile(llvm::FoldingSetNodeID &ID) const {
// FIXME: Add profiling support for code hints.
ID.AddInteger((unsigned) getDisplayHint());
ArrayRef<SourceRange> Ranges = getRanges();
- for (ArrayRef<SourceRange>::iterator I = Ranges.begin(), E = Ranges.end();
- I != E; ++I) {
- ID.AddInteger(I->getBegin().getRawEncoding());
- ID.AddInteger(I->getEnd().getRawEncoding());
+ for (const auto &I : Ranges) {
+ ID.AddInteger(I.getBegin().getRawEncoding());
+ ID.AddInteger(I.getEnd().getRawEncoding());
}
}
void PathDiagnosticCallPiece::Profile(llvm::FoldingSetNodeID &ID) const {
PathDiagnosticPiece::Profile(ID);
- for (PathPieces::const_iterator it = path.begin(),
- et = path.end(); it != et; ++it) {
- ID.Add(**it);
- }
+ for (const auto &I : path)
+ ID.Add(*I);
}
void PathDiagnosticSpotPiece::Profile(llvm::FoldingSetNodeID &ID) const {
@@ -1151,15 +1233,14 @@ void PathDiagnosticSpotPiece::Profile(llvm::FoldingSetNodeID &ID) const {
void PathDiagnosticControlFlowPiece::Profile(llvm::FoldingSetNodeID &ID) const {
PathDiagnosticPiece::Profile(ID);
- for (const_iterator I = begin(), E = end(); I != E; ++I)
- ID.Add(*I);
+ for (const auto &I : *this)
+ ID.Add(I);
}
void PathDiagnosticMacroPiece::Profile(llvm::FoldingSetNodeID &ID) const {
PathDiagnosticSpotPiece::Profile(ID);
- for (PathPieces::const_iterator I = subPieces.begin(), E = subPieces.end();
- I != E; ++I)
- ID.Add(**I);
+ for (const auto &I : subPieces)
+ ID.Add(*I);
}
void PathDiagnosticNotePiece::Profile(llvm::FoldingSetNodeID &ID) const {
@@ -1175,34 +1256,32 @@ void PathDiagnostic::Profile(llvm::FoldingSetNodeID &ID) const {
void PathDiagnostic::FullProfile(llvm::FoldingSetNodeID &ID) const {
Profile(ID);
- for (PathPieces::const_iterator I = path.begin(), E = path.end(); I != E; ++I)
- ID.Add(**I);
+ for (const auto &I : path)
+ ID.Add(*I);
for (meta_iterator I = meta_begin(), E = meta_end(); I != E; ++I)
ID.AddString(*I);
}
-StackHintGenerator::~StackHintGenerator() {}
+StackHintGenerator::~StackHintGenerator() = default;
std::string StackHintGeneratorForSymbol::getMessage(const ExplodedNode *N){
+ if (!N)
+ return getMessageForSymbolNotFound();
+
ProgramPoint P = N->getLocation();
CallExitEnd CExit = P.castAs<CallExitEnd>();
// FIXME: Use CallEvent to abstract this over all calls.
const Stmt *CallSite = CExit.getCalleeContext()->getCallSite();
- const CallExpr *CE = dyn_cast_or_null<CallExpr>(CallSite);
+ const auto *CE = dyn_cast_or_null<CallExpr>(CallSite);
if (!CE)
- return "";
-
- if (!N)
- return getMessageForSymbolNotFound();
+ return {};
// Check if one of the parameters are set to the interesting symbol.
- ProgramStateRef State = N->getState();
- const LocationContext *LCtx = N->getLocationContext();
unsigned ArgIndex = 0;
for (CallExpr::const_arg_iterator I = CE->arg_begin(),
E = CE->arg_end(); I != E; ++I, ++ArgIndex){
- SVal SV = State->getSVal(*I, LCtx);
+ SVal SV = N->getSVal(*I);
// Check if the variable corresponding to the symbol is passed by value.
SymbolRef AS = SV.getAsLocSymbol();
@@ -1212,7 +1291,10 @@ std::string StackHintGeneratorForSymbol::getMessage(const ExplodedNode *N){
// Check if the parameter is a pointer to the symbol.
if (Optional<loc::MemRegionVal> Reg = SV.getAs<loc::MemRegionVal>()) {
- SVal PSV = State->getSVal(Reg->getRegion());
+ // Do not attempt to dereference void*.
+ if ((*I)->getType()->isVoidPointerType())
+ continue;
+ SVal PSV = N->getState()->getSVal(Reg->getRegion());
SymbolRef AS = PSV.getAsLocSymbol();
if (AS == Sym) {
return getMessageForArg(*I, ArgIndex);
@@ -1221,7 +1303,7 @@ std::string StackHintGeneratorForSymbol::getMessage(const ExplodedNode *N){
}
// Check if we are returning the interesting symbol.
- SVal SV = State->getSVal(CE, LCtx);
+ SVal SV = N->getSVal(CE);
SymbolRef RetSym = SV.getAsLocSymbol();
if (RetSym == Sym) {
return getMessageForReturn(CE);
@@ -1243,3 +1325,84 @@ std::string StackHintGeneratorForSymbol::getMessageForArg(const Expr *ArgE,
return os.str();
}
+
+LLVM_DUMP_METHOD void PathPieces::dump() const {
+ unsigned index = 0;
+ for (PathPieces::const_iterator I = begin(), E = end(); I != E; ++I) {
+ llvm::errs() << "[" << index++ << "] ";
+ (*I)->dump();
+ llvm::errs() << "\n";
+ }
+}
+
+LLVM_DUMP_METHOD void PathDiagnosticCallPiece::dump() const {
+ llvm::errs() << "CALL\n--------------\n";
+
+ if (const Stmt *SLoc = getLocation().getStmtOrNull())
+ SLoc->dump();
+ else if (const auto *ND = dyn_cast_or_null<NamedDecl>(getCallee()))
+ llvm::errs() << *ND << "\n";
+ else
+ getLocation().dump();
+}
+
+LLVM_DUMP_METHOD void PathDiagnosticEventPiece::dump() const {
+ llvm::errs() << "EVENT\n--------------\n";
+ llvm::errs() << getString() << "\n";
+ llvm::errs() << " ---- at ----\n";
+ getLocation().dump();
+}
+
+LLVM_DUMP_METHOD void PathDiagnosticControlFlowPiece::dump() const {
+ llvm::errs() << "CONTROL\n--------------\n";
+ getStartLocation().dump();
+ llvm::errs() << " ---- to ----\n";
+ getEndLocation().dump();
+}
+
+LLVM_DUMP_METHOD void PathDiagnosticMacroPiece::dump() const {
+ llvm::errs() << "MACRO\n--------------\n";
+ // FIXME: Print which macro is being invoked.
+}
+
+LLVM_DUMP_METHOD void PathDiagnosticNotePiece::dump() const {
+ llvm::errs() << "NOTE\n--------------\n";
+ llvm::errs() << getString() << "\n";
+ llvm::errs() << " ---- at ----\n";
+ getLocation().dump();
+}
+
+LLVM_DUMP_METHOD void PathDiagnosticLocation::dump() const {
+ if (!isValid()) {
+ llvm::errs() << "<INVALID>\n";
+ return;
+ }
+
+ switch (K) {
+ case RangeK:
+ // FIXME: actually print the range.
+ llvm::errs() << "<range>\n";
+ break;
+ case SingleLocK:
+ asLocation().dump();
+ llvm::errs() << "\n";
+ break;
+ case StmtK:
+ if (S)
+ S->dump();
+ else
+ llvm::errs() << "<NULL STMT>\n";
+ break;
+ case DeclK:
+ if (const auto *ND = dyn_cast_or_null<NamedDecl>(D))
+ llvm::errs() << *ND << "\n";
+ else if (isa<BlockDecl>(D))
+ // FIXME: Make this nicer.
+ llvm::errs() << "<block>\n";
+ else if (D)
+ llvm::errs() << "<unknown decl>\n";
+ else
+ llvm::errs() << "<NULL DECL>\n";
+ break;
+ }
+}
diff --git a/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp b/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp
index 66812ed8ff5b..cfe780db9ec9 100644
--- a/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp
+++ b/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp
@@ -16,9 +16,12 @@
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/Version.h"
#include "clang/Lex/Preprocessor.h"
+#include "clang/Rewrite/Core/HTMLRewrite.h"
+#include "clang/StaticAnalyzer/Core/AnalyzerOptions.h"
#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
#include "clang/StaticAnalyzer/Core/IssueHash.h"
#include "clang/StaticAnalyzer/Core/PathDiagnosticConsumers.h"
+#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Casting.h"
using namespace clang;
@@ -30,6 +33,7 @@ namespace {
const std::string OutputFile;
const LangOptions &LangOpts;
const bool SupportsCrossFileDiagnostics;
+ const bool SerializeStatistics;
public:
PlistDiagnostics(AnalyzerOptions &AnalyzerOpts,
const std::string& prefix,
@@ -61,7 +65,8 @@ PlistDiagnostics::PlistDiagnostics(AnalyzerOptions &AnalyzerOpts,
bool supportsMultipleFiles)
: OutputFile(output),
LangOpts(LO),
- SupportsCrossFileDiagnostics(supportsMultipleFiles) {}
+ SupportsCrossFileDiagnostics(supportsMultipleFiles),
+ SerializeStatistics(AnalyzerOpts.shouldSerializeStats()) {}
void ento::createPlistDiagnosticConsumer(AnalyzerOptions &AnalyzerOpts,
PathDiagnosticConsumers &C,
@@ -79,6 +84,41 @@ void ento::createPlistMultiFileDiagnosticConsumer(AnalyzerOptions &AnalyzerOpts,
PP.getLangOpts(), true));
}
+static void EmitRanges(raw_ostream &o,
+ const ArrayRef<SourceRange> Ranges,
+ const FIDMap& FM,
+ const SourceManager &SM,
+ const LangOptions &LangOpts,
+ unsigned indent) {
+
+ if (Ranges.empty())
+ return;
+
+ Indent(o, indent) << "<key>ranges</key>\n";
+ Indent(o, indent) << "<array>\n";
+ ++indent;
+ for (auto &R : Ranges)
+ EmitRange(o, SM,
+ Lexer::getAsCharRange(SM.getExpansionRange(R), SM, LangOpts),
+ FM, indent + 1);
+ --indent;
+ Indent(o, indent) << "</array>\n";
+}
+
+static void EmitMessage(raw_ostream &o, StringRef Message, unsigned indent) {
+ // Output the text.
+ assert(!Message.empty());
+ Indent(o, indent) << "<key>extended_message</key>\n";
+ Indent(o, indent);
+ EmitString(o, Message) << '\n';
+
+ // Output the short text.
+ // FIXME: Really use a short string.
+ Indent(o, indent) << "<key>message</key>\n";
+ Indent(o, indent);
+ EmitString(o, Message) << '\n';
+}
+
static void ReportControlFlow(raw_ostream &o,
const PathDiagnosticControlFlowPiece& P,
const FIDMap& FM,
@@ -133,7 +173,7 @@ static void ReportControlFlow(raw_ostream &o,
Indent(o, indent) << "</dict>\n";
}
-static void ReportEvent(raw_ostream &o, const PathDiagnosticPiece& P,
+static void ReportEvent(raw_ostream &o, const PathDiagnosticEventPiece& P,
const FIDMap& FM,
const SourceManager &SM,
const LangOptions &LangOpts,
@@ -158,34 +198,14 @@ static void ReportEvent(raw_ostream &o, const PathDiagnosticPiece& P,
// Output the ranges (if any).
ArrayRef<SourceRange> Ranges = P.getRanges();
-
- if (!Ranges.empty()) {
- Indent(o, indent) << "<key>ranges</key>\n";
- Indent(o, indent) << "<array>\n";
- ++indent;
- for (auto &R : Ranges)
- EmitRange(o, SM,
- Lexer::getAsCharRange(SM.getExpansionRange(R), SM, LangOpts),
- FM, indent + 1);
- --indent;
- Indent(o, indent) << "</array>\n";
- }
+ EmitRanges(o, Ranges, FM, SM, LangOpts, indent);
// Output the call depth.
Indent(o, indent) << "<key>depth</key>";
EmitInteger(o, depth) << '\n';
// Output the text.
- assert(!P.getString().empty());
- Indent(o, indent) << "<key>extended_message</key>\n";
- Indent(o, indent);
- EmitString(o, P.getString()) << '\n';
-
- // Output the short text.
- // FIXME: Really use a short string.
- Indent(o, indent) << "<key>message</key>\n";
- Indent(o, indent);
- EmitString(o, P.getString()) << '\n';
+ EmitMessage(o, P.getString(), indent);
// Finish up.
--indent;
@@ -241,6 +261,34 @@ static void ReportMacro(raw_ostream &o,
}
}
+static void ReportNote(raw_ostream &o, const PathDiagnosticNotePiece& P,
+ const FIDMap& FM,
+ const SourceManager &SM,
+ const LangOptions &LangOpts,
+ unsigned indent,
+ unsigned depth) {
+
+ Indent(o, indent) << "<dict>\n";
+ ++indent;
+
+ // Output the location.
+ FullSourceLoc L = P.getLocation().asLocation();
+
+ Indent(o, indent) << "<key>location</key>\n";
+ EmitLocation(o, SM, L, FM, indent);
+
+ // Output the ranges (if any).
+ ArrayRef<SourceRange> Ranges = P.getRanges();
+ EmitRanges(o, Ranges, FM, SM, LangOpts, indent);
+
+ // Output the text.
+ EmitMessage(o, P.getString(), indent);
+
+ // Finish up.
+ --indent;
+ Indent(o, indent); o << "</dict>\n";
+}
+
static void ReportDiag(raw_ostream &o, const PathDiagnosticPiece& P,
const FIDMap& FM, const SourceManager &SM,
const LangOptions &LangOpts) {
@@ -266,7 +314,7 @@ static void ReportPiece(raw_ostream &o,
indent, depth);
break;
case PathDiagnosticPiece::Event:
- ReportEvent(o, cast<PathDiagnosticSpotPiece>(P), FM, SM, LangOpts,
+ ReportEvent(o, cast<PathDiagnosticEventPiece>(P), FM, SM, LangOpts,
indent, depth, isKeyEvent);
break;
case PathDiagnosticPiece::Macro:
@@ -274,7 +322,8 @@ static void ReportPiece(raw_ostream &o,
indent, depth);
break;
case PathDiagnosticPiece::Note:
- // FIXME: Extend the plist format to support those.
+ ReportNote(o, cast<PathDiagnosticNotePiece>(P), FM, SM, LangOpts,
+ indent, depth);
break;
}
}
@@ -359,15 +408,39 @@ void PlistDiagnostics::FlushDiagnosticsImpl(
for (std::vector<const PathDiagnostic*>::iterator DI=Diags.begin(),
DE = Diags.end(); DI!=DE; ++DI) {
- o << " <dict>\n"
- " <key>path</key>\n";
+ o << " <dict>\n";
const PathDiagnostic *D = *DI;
+ const PathPieces &PP = D->path;
+
+ assert(std::is_partitioned(
+ PP.begin(), PP.end(),
+ [](const std::shared_ptr<PathDiagnosticPiece> &E)
+ { return E->getKind() == PathDiagnosticPiece::Note; }) &&
+ "PathDiagnostic is not partitioned so that notes precede the rest");
+
+ PathPieces::const_iterator FirstNonNote = std::partition_point(
+ PP.begin(), PP.end(),
+ [](const std::shared_ptr<PathDiagnosticPiece> &E)
+ { return E->getKind() == PathDiagnosticPiece::Note; });
+
+ PathPieces::const_iterator I = PP.begin();
+
+ if (FirstNonNote != PP.begin()) {
+ o << " <key>notes</key>\n"
+ " <array>\n";
+
+ for (; I != FirstNonNote; ++I)
+ ReportDiag(o, **I, FM, *SM, LangOpts);
+
+ o << " </array>\n";
+ }
+
+ o << " <key>path</key>\n";
o << " <array>\n";
- for (PathPieces::const_iterator I = D->path.begin(), E = D->path.end();
- I != E; ++I)
+ for (PathPieces::const_iterator E = PP.end(); I != E; ++I)
ReportDiag(o, **I, FM, *SM, LangOpts);
o << " </array>\n";
@@ -484,6 +557,15 @@ void PlistDiagnostics::FlushDiagnosticsImpl(
o << " </array>\n";
+ if (llvm::AreStatisticsEnabled() && SerializeStatistics) {
+ o << " <key>statistics</key>\n";
+ std::string stats;
+ llvm::raw_string_ostream os(stats);
+ llvm::PrintStatisticsJSON(os);
+ os.flush();
+ EmitString(o, html::EscapeText(stats)) << '\n';
+ }
+
// Finish.
o << "</dict>\n</plist>";
}
diff --git a/lib/StaticAnalyzer/Core/ProgramState.cpp b/lib/StaticAnalyzer/Core/ProgramState.cpp
index 5b6b7339697f..2b401607293b 100644
--- a/lib/StaticAnalyzer/Core/ProgramState.cpp
+++ b/lib/StaticAnalyzer/Core/ProgramState.cpp
@@ -17,6 +17,7 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SubEngine.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/TaintManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicTypeMap.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
@@ -125,16 +126,27 @@ ProgramStateRef ProgramState::bindLoc(Loc LV,
return newState;
}
-ProgramStateRef ProgramState::bindDefault(SVal loc,
- SVal V,
- const LocationContext *LCtx) const {
+ProgramStateRef
+ProgramState::bindDefaultInitial(SVal loc, SVal V,
+ const LocationContext *LCtx) const {
+ ProgramStateManager &Mgr = getStateManager();
+ const MemRegion *R = loc.castAs<loc::MemRegionVal>().getRegion();
+ const StoreRef &newStore = Mgr.StoreMgr->BindDefaultInitial(getStore(), R, V);
+ ProgramStateRef new_state = makeWithStore(newStore);
+ return Mgr.getOwningEngine()
+ ? Mgr.getOwningEngine()->processRegionChange(new_state, R, LCtx)
+ : new_state;
+}
+
+ProgramStateRef
+ProgramState::bindDefaultZero(SVal loc, const LocationContext *LCtx) const {
ProgramStateManager &Mgr = getStateManager();
const MemRegion *R = loc.castAs<loc::MemRegionVal>().getRegion();
- const StoreRef &newStore = Mgr.StoreMgr->BindDefault(getStore(), R, V);
+ const StoreRef &newStore = Mgr.StoreMgr->BindDefaultZero(getStore(), R);
ProgramStateRef new_state = makeWithStore(newStore);
- return Mgr.getOwningEngine() ?
- Mgr.getOwningEngine()->processRegionChange(new_state, R, LCtx) :
- new_state;
+ return Mgr.getOwningEngine()
+ ? Mgr.getOwningEngine()->processRegionChange(new_state, R, LCtx)
+ : new_state;
}
typedef ArrayRef<const MemRegion *> RegionList;
@@ -254,7 +266,7 @@ SVal ProgramState::getSValAsScalarOrLoc(const MemRegion *R) const {
}
SVal ProgramState::getSVal(Loc location, QualType T) const {
- SVal V = getRawSVal(cast<Loc>(location), T);
+ SVal V = getRawSVal(location, T);
// If 'V' is a symbolic value that is *perfectly* constrained to
// be a constant value, use that value instead to lessen the burden
@@ -324,9 +336,8 @@ ProgramStateRef ProgramState::assumeInBound(DefinedOrUnknownSVal Idx,
// Get the offset: the minimum value of the array index type.
BasicValueFactory &BVF = svalBuilder.getBasicValueFactory();
- // FIXME: This should be using ValueManager::ArrayindexTy...somehow.
if (indexTy.isNull())
- indexTy = Ctx.IntTy;
+ indexTy = svalBuilder.getArrayIndexType();
nonloc::ConcreteInt Min(BVF.getMinValue(indexTy));
// Adjust the index.
@@ -354,6 +365,17 @@ ProgramStateRef ProgramState::assumeInBound(DefinedOrUnknownSVal Idx,
return CM.assume(this, inBound.castAs<DefinedSVal>(), Assumption);
}
+ConditionTruthVal ProgramState::isNonNull(SVal V) const {
+ ConditionTruthVal IsNull = isNull(V);
+ if (IsNull.isUnderconstrained())
+ return IsNull;
+ return ConditionTruthVal(!IsNull.getValue());
+}
+
+ConditionTruthVal ProgramState::areEqual(SVal Lhs, SVal Rhs) const {
+ return stateMgr->getSValBuilder().areEqual(this, Lhs, Rhs);
+}
+
ConditionTruthVal ProgramState::isNull(SVal V) const {
if (V.isZeroConstant())
return true;
@@ -426,24 +448,30 @@ void ProgramState::setStore(const StoreRef &newStore) {
// State pretty-printing.
//===----------------------------------------------------------------------===//
-void ProgramState::print(raw_ostream &Out,
- const char *NL, const char *Sep) const {
+void ProgramState::print(raw_ostream &Out, const char *NL, const char *Sep,
+ const LocationContext *LC) const {
// Print the store.
ProgramStateManager &Mgr = getStateManager();
Mgr.getStoreManager().print(getStore(), Out, NL, Sep);
// Print out the environment.
- Env.print(Out, NL, Sep);
+ Env.print(Out, NL, Sep, LC);
// Print out the constraints.
Mgr.getConstraintManager().print(this, Out, NL, Sep);
+ // Print out the tracked dynamic types.
+ printDynamicTypeInfo(this, Out, NL, Sep);
+
+ // Print out tainted symbols.
+ printTaint(Out, NL, Sep);
+
// Print checker-specific data.
- Mgr.getOwningEngine()->printState(Out, this, NL, Sep);
+ Mgr.getOwningEngine()->printState(Out, this, NL, Sep, LC);
}
-void ProgramState::printDOT(raw_ostream &Out) const {
- print(Out, "\\l", "\\|");
+void ProgramState::printDOT(raw_ostream &Out, const LocationContext *LC) const {
+ print(Out, "\\l", "\\|", LC);
}
LLVM_DUMP_METHOD void ProgramState::dump() const {
@@ -455,7 +483,7 @@ void ProgramState::printTaint(raw_ostream &Out,
TaintMapImpl TM = get<TaintMap>();
if (!TM.isEmpty())
- Out <<"Tainted Symbols:" << NL;
+ Out <<"Tainted symbols:" << NL;
for (TaintMapImpl::iterator I = TM.begin(), E = TM.end(); I != E; ++I) {
Out << I->first << " : " << I->second << NL;
@@ -781,8 +809,7 @@ bool ProgramState::isTainted(SymbolRef Sym, TaintTagType Kind) const {
// complete. For example, this would not currently identify
// overlapping fields in a union as tainted. To identify this we can
// check for overlapping/nested byte offsets.
- if (Kind == I.second &&
- (R == I.first || R->isSubRegionOf(I.first)))
+ if (Kind == I.second && R->isSubRegionOf(I.first))
return true;
}
}
diff --git a/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp b/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
index 5a4031c0b4a5..e8c7bdbde385 100644
--- a/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
+++ b/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
@@ -12,10 +12,10 @@
//
//===----------------------------------------------------------------------===//
-#include "RangedConstraintManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/APSIntType.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/RangedConstraintManager.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/ImmutableSet.h"
#include "llvm/Support/raw_ostream.h"
@@ -23,263 +23,203 @@
using namespace clang;
using namespace ento;
-/// A Range represents the closed range [from, to]. The caller must
-/// guarantee that from <= to. Note that Range is immutable, so as not
-/// to subvert RangeSet's immutability.
-namespace {
-class Range : public std::pair<const llvm::APSInt *, const llvm::APSInt *> {
-public:
- Range(const llvm::APSInt &from, const llvm::APSInt &to)
- : std::pair<const llvm::APSInt *, const llvm::APSInt *>(&from, &to) {
- assert(from <= to);
- }
- bool Includes(const llvm::APSInt &v) const {
- return *first <= v && v <= *second;
- }
- const llvm::APSInt &From() const { return *first; }
- const llvm::APSInt &To() const { return *second; }
- const llvm::APSInt *getConcreteValue() const {
- return &From() == &To() ? &From() : nullptr;
- }
-
- void Profile(llvm::FoldingSetNodeID &ID) const {
- ID.AddPointer(&From());
- ID.AddPointer(&To());
- }
-};
-
-class RangeTrait : public llvm::ImutContainerInfo<Range> {
-public:
- // When comparing if one Range is less than another, we should compare
- // the actual APSInt values instead of their pointers. This keeps the order
- // consistent (instead of comparing by pointer values) and can potentially
- // be used to speed up some of the operations in RangeSet.
- static inline bool isLess(key_type_ref lhs, key_type_ref rhs) {
- return *lhs.first < *rhs.first ||
- (!(*rhs.first < *lhs.first) && *lhs.second < *rhs.second);
- }
-};
-
-/// RangeSet contains a set of ranges. If the set is empty, then
-/// there the value of a symbol is overly constrained and there are no
-/// possible values for that symbol.
-class RangeSet {
- typedef llvm::ImmutableSet<Range, RangeTrait> PrimRangeSet;
- PrimRangeSet ranges; // no need to make const, since it is an
- // ImmutableSet - this allows default operator=
- // to work.
-public:
- typedef PrimRangeSet::Factory Factory;
- typedef PrimRangeSet::iterator iterator;
-
- RangeSet(PrimRangeSet RS) : ranges(RS) {}
-
- /// Create a new set with all ranges of this set and RS.
- /// Possible intersections are not checked here.
- RangeSet addRange(Factory &F, const RangeSet &RS) {
- PrimRangeSet Ranges(RS.ranges);
- for (const auto &range : ranges)
- Ranges = F.add(Ranges, range);
- return RangeSet(Ranges);
- }
-
- iterator begin() const { return ranges.begin(); }
- iterator end() const { return ranges.end(); }
-
- bool isEmpty() const { return ranges.isEmpty(); }
-
- /// Construct a new RangeSet representing '{ [from, to] }'.
- RangeSet(Factory &F, const llvm::APSInt &from, const llvm::APSInt &to)
- : ranges(F.add(F.getEmptySet(), Range(from, to))) {}
-
- /// Profile - Generates a hash profile of this RangeSet for use
- /// by FoldingSet.
- void Profile(llvm::FoldingSetNodeID &ID) const { ranges.Profile(ID); }
-
- /// getConcreteValue - If a symbol is contrained to equal a specific integer
- /// constant then this method returns that value. Otherwise, it returns
- /// NULL.
- const llvm::APSInt *getConcreteValue() const {
- return ranges.isSingleton() ? ranges.begin()->getConcreteValue() : nullptr;
- }
+void RangeSet::IntersectInRange(BasicValueFactory &BV, Factory &F,
+ const llvm::APSInt &Lower, const llvm::APSInt &Upper,
+ PrimRangeSet &newRanges, PrimRangeSet::iterator &i,
+ PrimRangeSet::iterator &e) const {
+ // There are six cases for each range R in the set:
+ // 1. R is entirely before the intersection range.
+ // 2. R is entirely after the intersection range.
+ // 3. R contains the entire intersection range.
+ // 4. R starts before the intersection range and ends in the middle.
+ // 5. R starts in the middle of the intersection range and ends after it.
+ // 6. R is entirely contained in the intersection range.
+ // These correspond to each of the conditions below.
+ for (/* i = begin(), e = end() */; i != e; ++i) {
+ if (i->To() < Lower) {
+ continue;
+ }
+ if (i->From() > Upper) {
+ break;
+ }
-private:
- void IntersectInRange(BasicValueFactory &BV, Factory &F,
- const llvm::APSInt &Lower, const llvm::APSInt &Upper,
- PrimRangeSet &newRanges, PrimRangeSet::iterator &i,
- PrimRangeSet::iterator &e) const {
- // There are six cases for each range R in the set:
- // 1. R is entirely before the intersection range.
- // 2. R is entirely after the intersection range.
- // 3. R contains the entire intersection range.
- // 4. R starts before the intersection range and ends in the middle.
- // 5. R starts in the middle of the intersection range and ends after it.
- // 6. R is entirely contained in the intersection range.
- // These correspond to each of the conditions below.
- for (/* i = begin(), e = end() */; i != e; ++i) {
- if (i->To() < Lower) {
- continue;
- }
- if (i->From() > Upper) {
+ if (i->Includes(Lower)) {
+ if (i->Includes(Upper)) {
+ newRanges =
+ F.add(newRanges, Range(BV.getValue(Lower), BV.getValue(Upper)));
break;
- }
-
- if (i->Includes(Lower)) {
- if (i->Includes(Upper)) {
- newRanges =
- F.add(newRanges, Range(BV.getValue(Lower), BV.getValue(Upper)));
- break;
- } else
- newRanges = F.add(newRanges, Range(BV.getValue(Lower), i->To()));
- } else {
- if (i->Includes(Upper)) {
- newRanges = F.add(newRanges, Range(i->From(), BV.getValue(Upper)));
- break;
- } else
- newRanges = F.add(newRanges, *i);
- }
+ } else
+ newRanges = F.add(newRanges, Range(BV.getValue(Lower), i->To()));
+ } else {
+ if (i->Includes(Upper)) {
+ newRanges = F.add(newRanges, Range(i->From(), BV.getValue(Upper)));
+ break;
+ } else
+ newRanges = F.add(newRanges, *i);
}
}
+}
- const llvm::APSInt &getMinValue() const {
- assert(!isEmpty());
- return ranges.begin()->From();
- }
+const llvm::APSInt &RangeSet::getMinValue() const {
+ assert(!isEmpty());
+ return ranges.begin()->From();
+}
- bool pin(llvm::APSInt &Lower, llvm::APSInt &Upper) const {
- // This function has nine cases, the cartesian product of range-testing
- // both the upper and lower bounds against the symbol's type.
- // Each case requires a different pinning operation.
- // The function returns false if the described range is entirely outside
- // the range of values for the associated symbol.
- APSIntType Type(getMinValue());
- APSIntType::RangeTestResultKind LowerTest = Type.testInRange(Lower, true);
- APSIntType::RangeTestResultKind UpperTest = Type.testInRange(Upper, true);
-
- switch (LowerTest) {
+bool RangeSet::pin(llvm::APSInt &Lower, llvm::APSInt &Upper) const {
+ // This function has nine cases, the cartesian product of range-testing
+ // both the upper and lower bounds against the symbol's type.
+ // Each case requires a different pinning operation.
+ // The function returns false if the described range is entirely outside
+ // the range of values for the associated symbol.
+ APSIntType Type(getMinValue());
+ APSIntType::RangeTestResultKind LowerTest = Type.testInRange(Lower, true);
+ APSIntType::RangeTestResultKind UpperTest = Type.testInRange(Upper, true);
+
+ switch (LowerTest) {
+ case APSIntType::RTR_Below:
+ switch (UpperTest) {
case APSIntType::RTR_Below:
- switch (UpperTest) {
- case APSIntType::RTR_Below:
- // The entire range is outside the symbol's set of possible values.
- // If this is a conventionally-ordered range, the state is infeasible.
- if (Lower <= Upper)
- return false;
-
- // However, if the range wraps around, it spans all possible values.
- Lower = Type.getMinValue();
- Upper = Type.getMaxValue();
- break;
- case APSIntType::RTR_Within:
- // The range starts below what's possible but ends within it. Pin.
- Lower = Type.getMinValue();
- Type.apply(Upper);
- break;
- case APSIntType::RTR_Above:
- // The range spans all possible values for the symbol. Pin.
- Lower = Type.getMinValue();
- Upper = Type.getMaxValue();
- break;
- }
+ // The entire range is outside the symbol's set of possible values.
+ // If this is a conventionally-ordered range, the state is infeasible.
+ if (Lower <= Upper)
+ return false;
+
+ // However, if the range wraps around, it spans all possible values.
+ Lower = Type.getMinValue();
+ Upper = Type.getMaxValue();
break;
case APSIntType::RTR_Within:
- switch (UpperTest) {
- case APSIntType::RTR_Below:
- // The range wraps around, but all lower values are not possible.
- Type.apply(Lower);
- Upper = Type.getMaxValue();
- break;
- case APSIntType::RTR_Within:
- // The range may or may not wrap around, but both limits are valid.
- Type.apply(Lower);
- Type.apply(Upper);
- break;
- case APSIntType::RTR_Above:
- // The range starts within what's possible but ends above it. Pin.
- Type.apply(Lower);
- Upper = Type.getMaxValue();
- break;
- }
+ // The range starts below what's possible but ends within it. Pin.
+ Lower = Type.getMinValue();
+ Type.apply(Upper);
break;
case APSIntType::RTR_Above:
- switch (UpperTest) {
- case APSIntType::RTR_Below:
- // The range wraps but is outside the symbol's set of possible values.
- return false;
- case APSIntType::RTR_Within:
- // The range starts above what's possible but ends within it (wrap).
- Lower = Type.getMinValue();
- Type.apply(Upper);
- break;
- case APSIntType::RTR_Above:
- // The entire range is outside the symbol's set of possible values.
- // If this is a conventionally-ordered range, the state is infeasible.
- if (Lower <= Upper)
- return false;
-
- // However, if the range wraps around, it spans all possible values.
- Lower = Type.getMinValue();
- Upper = Type.getMaxValue();
- break;
- }
+ // The range spans all possible values for the symbol. Pin.
+ Lower = Type.getMinValue();
+ Upper = Type.getMaxValue();
+ break;
+ }
+ break;
+ case APSIntType::RTR_Within:
+ switch (UpperTest) {
+ case APSIntType::RTR_Below:
+ // The range wraps around, but all lower values are not possible.
+ Type.apply(Lower);
+ Upper = Type.getMaxValue();
+ break;
+ case APSIntType::RTR_Within:
+ // The range may or may not wrap around, but both limits are valid.
+ Type.apply(Lower);
+ Type.apply(Upper);
+ break;
+ case APSIntType::RTR_Above:
+ // The range starts within what's possible but ends above it. Pin.
+ Type.apply(Lower);
+ Upper = Type.getMaxValue();
break;
}
+ break;
+ case APSIntType::RTR_Above:
+ switch (UpperTest) {
+ case APSIntType::RTR_Below:
+ // The range wraps but is outside the symbol's set of possible values.
+ return false;
+ case APSIntType::RTR_Within:
+ // The range starts above what's possible but ends within it (wrap).
+ Lower = Type.getMinValue();
+ Type.apply(Upper);
+ break;
+ case APSIntType::RTR_Above:
+ // The entire range is outside the symbol's set of possible values.
+ // If this is a conventionally-ordered range, the state is infeasible.
+ if (Lower <= Upper)
+ return false;
- return true;
+ // However, if the range wraps around, it spans all possible values.
+ Lower = Type.getMinValue();
+ Upper = Type.getMaxValue();
+ break;
+ }
+ break;
}
-public:
- // Returns a set containing the values in the receiving set, intersected with
- // the closed range [Lower, Upper]. Unlike the Range type, this range uses
- // modular arithmetic, corresponding to the common treatment of C integer
- // overflow. Thus, if the Lower bound is greater than the Upper bound, the
- // range is taken to wrap around. This is equivalent to taking the
- // intersection with the two ranges [Min, Upper] and [Lower, Max],
- // or, alternatively, /removing/ all integers between Upper and Lower.
- RangeSet Intersect(BasicValueFactory &BV, Factory &F, llvm::APSInt Lower,
- llvm::APSInt Upper) const {
- if (!pin(Lower, Upper))
- return F.getEmptySet();
-
- PrimRangeSet newRanges = F.getEmptySet();
-
- PrimRangeSet::iterator i = begin(), e = end();
- if (Lower <= Upper)
- IntersectInRange(BV, F, Lower, Upper, newRanges, i, e);
- else {
- // The order of the next two statements is important!
- // IntersectInRange() does not reset the iteration state for i and e.
- // Therefore, the lower range most be handled first.
- IntersectInRange(BV, F, BV.getMinValue(Upper), Upper, newRanges, i, e);
- IntersectInRange(BV, F, Lower, BV.getMaxValue(Lower), newRanges, i, e);
- }
+ return true;
+}
+
+// Returns a set containing the values in the receiving set, intersected with
+// the closed range [Lower, Upper]. Unlike the Range type, this range uses
+// modular arithmetic, corresponding to the common treatment of C integer
+// overflow. Thus, if the Lower bound is greater than the Upper bound, the
+// range is taken to wrap around. This is equivalent to taking the
+// intersection with the two ranges [Min, Upper] and [Lower, Max],
+// or, alternatively, /removing/ all integers between Upper and Lower.
+RangeSet RangeSet::Intersect(BasicValueFactory &BV, Factory &F,
+ llvm::APSInt Lower, llvm::APSInt Upper) const {
+ if (!pin(Lower, Upper))
+ return F.getEmptySet();
- return newRanges;
+ PrimRangeSet newRanges = F.getEmptySet();
+
+ PrimRangeSet::iterator i = begin(), e = end();
+ if (Lower <= Upper)
+ IntersectInRange(BV, F, Lower, Upper, newRanges, i, e);
+ else {
+ // The order of the next two statements is important!
+ // IntersectInRange() does not reset the iteration state for i and e.
+ // Therefore, the lower range most be handled first.
+ IntersectInRange(BV, F, BV.getMinValue(Upper), Upper, newRanges, i, e);
+ IntersectInRange(BV, F, Lower, BV.getMaxValue(Lower), newRanges, i, e);
}
- void print(raw_ostream &os) const {
- bool isFirst = true;
- os << "{ ";
- for (iterator i = begin(), e = end(); i != e; ++i) {
- if (isFirst)
- isFirst = false;
- else
- os << ", ";
-
- os << '[' << i->From().toString(10) << ", " << i->To().toString(10)
- << ']';
+ return newRanges;
+}
+
+// Turn all [A, B] ranges to [-B, -A]. Ranges [MIN, B] are turned to range set
+// [MIN, MIN] U [-B, MAX], when MIN and MAX are the minimal and the maximal
+// signed values of the type.
+RangeSet RangeSet::Negate(BasicValueFactory &BV, Factory &F) const {
+ PrimRangeSet newRanges = F.getEmptySet();
+
+ for (iterator i = begin(), e = end(); i != e; ++i) {
+ const llvm::APSInt &from = i->From(), &to = i->To();
+ const llvm::APSInt &newTo = (from.isMinSignedValue() ?
+ BV.getMaxValue(from) :
+ BV.getValue(- from));
+ if (to.isMaxSignedValue() && !newRanges.isEmpty() &&
+ newRanges.begin()->From().isMinSignedValue()) {
+ assert(newRanges.begin()->To().isMinSignedValue() &&
+ "Ranges should not overlap");
+ assert(!from.isMinSignedValue() && "Ranges should not overlap");
+ const llvm::APSInt &newFrom = newRanges.begin()->From();
+ newRanges =
+ F.add(F.remove(newRanges, *newRanges.begin()), Range(newFrom, newTo));
+ } else if (!to.isMinSignedValue()) {
+ const llvm::APSInt &newFrom = BV.getValue(- to);
+ newRanges = F.add(newRanges, Range(newFrom, newTo));
+ }
+ if (from.isMinSignedValue()) {
+ newRanges = F.add(newRanges, Range(BV.getMinValue(from),
+ BV.getMinValue(from)));
}
- os << " }";
}
- bool operator==(const RangeSet &other) const {
- return ranges == other.ranges;
- }
-};
-} // end anonymous namespace
+ return newRanges;
+}
-REGISTER_TRAIT_WITH_PROGRAMSTATE(ConstraintRange,
- CLANG_ENTO_PROGRAMSTATE_MAP(SymbolRef,
- RangeSet))
+void RangeSet::print(raw_ostream &os) const {
+ bool isFirst = true;
+ os << "{ ";
+ for (iterator i = begin(), e = end(); i != e; ++i) {
+ if (isFirst)
+ isFirst = false;
+ else
+ os << ", ";
+
+ os << '[' << i->From().toString(10) << ", " << i->To().toString(10)
+ << ']';
+ }
+ os << " }";
+}
namespace {
class RangeConstraintManager : public RangedConstraintManager {
@@ -344,6 +284,8 @@ private:
RangeSet::Factory F;
RangeSet getRange(ProgramStateRef State, SymbolRef Sym);
+ const RangeSet* getRangeForMinusSymbol(ProgramStateRef State,
+ SymbolRef Sym);
RangeSet getSymLTRange(ProgramStateRef St, SymbolRef Sym,
const llvm::APSInt &Int,
@@ -360,6 +302,7 @@ private:
RangeSet getSymGERange(ProgramStateRef St, SymbolRef Sym,
const llvm::APSInt &Int,
const llvm::APSInt &Adjustment);
+
};
} // end anonymous namespace
@@ -400,9 +343,11 @@ bool RangeConstraintManager::canReasonAbout(SVal X) const {
if (BinaryOperator::isEqualityOp(SSE->getOpcode()) ||
BinaryOperator::isRelationalOp(SSE->getOpcode())) {
// We handle Loc <> Loc comparisons, but not (yet) NonLoc <> NonLoc.
+ // We've recently started producing Loc <> NonLoc comparisons (that
+ // result from casts of one of the operands between eg. intptr_t and
+ // void *), but we can't reason about them yet.
if (Loc::isLocType(SSE->getLHS()->getType())) {
- assert(Loc::isLocType(SSE->getRHS()->getType()));
- return true;
+ return Loc::isLocType(SSE->getRHS()->getType());
}
}
}
@@ -474,7 +419,7 @@ static RangeSet assumeNonZero(
--IntType.getZeroValue());
}
-/// \brief Apply implicit constraints for bitwise OR- and AND-.
+/// Apply implicit constraints for bitwise OR- and AND-.
/// For unsigned types, bitwise OR with a constant always returns
/// a value greater-or-equal than the constant, and bitwise AND
/// returns a value less-or-equal then the constant.
@@ -515,9 +460,15 @@ RangeSet RangeConstraintManager::getRange(ProgramStateRef State,
if (ConstraintRangeTy::data_type *V = State->get<ConstraintRange>(Sym))
return *V;
+ BasicValueFactory &BV = getBasicVals();
+
+ // If Sym is a difference of symbols A - B, then maybe we have range set
+ // stored for B - A.
+ if (const RangeSet *R = getRangeForMinusSymbol(State, Sym))
+ return R->Negate(BV, F);
+
// Lazily generate a new RangeSet representing all possible values for the
// given symbol type.
- BasicValueFactory &BV = getBasicVals();
QualType T = Sym->getType();
RangeSet Result(F, BV.getMinValue(T), BV.getMaxValue(T));
@@ -533,6 +484,32 @@ RangeSet RangeConstraintManager::getRange(ProgramStateRef State,
return Result;
}
+// FIXME: Once SValBuilder supports unary minus, we should use SValBuilder to
+// obtain the negated symbolic expression instead of constructing the
+// symbol manually. This will allow us to support finding ranges of not
+// only negated SymSymExpr-type expressions, but also of other, simpler
+// expressions which we currently do not know how to negate.
+const RangeSet*
+RangeConstraintManager::getRangeForMinusSymbol(ProgramStateRef State,
+ SymbolRef Sym) {
+ if (const SymSymExpr *SSE = dyn_cast<SymSymExpr>(Sym)) {
+ if (SSE->getOpcode() == BO_Sub) {
+ QualType T = Sym->getType();
+ SymbolManager &SymMgr = State->getSymbolManager();
+ SymbolRef negSym = SymMgr.getSymSymExpr(SSE->getRHS(), BO_Sub,
+ SSE->getLHS(), T);
+ if (const RangeSet *negV = State->get<ConstraintRange>(negSym)) {
+ // Unsigned range set cannot be negated, unless it is [0, 0].
+ if ((negV->getConcreteValue() &&
+ (*negV->getConcreteValue() == 0)) ||
+ T->isSignedIntegerOrEnumerationType())
+ return negV;
+ }
+ }
+ }
+ return nullptr;
+}
+
//===------------------------------------------------------------------------===
// assumeSymX methods: protected interface for RangeConstraintManager.
//===------------------------------------------------------------------------===/
diff --git a/lib/StaticAnalyzer/Core/RangedConstraintManager.cpp b/lib/StaticAnalyzer/Core/RangedConstraintManager.cpp
index 55ff15806efe..f99853f07073 100644
--- a/lib/StaticAnalyzer/Core/RangedConstraintManager.cpp
+++ b/lib/StaticAnalyzer/Core/RangedConstraintManager.cpp
@@ -12,8 +12,8 @@
//
//===----------------------------------------------------------------------===//
-#include "RangedConstraintManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/RangedConstraintManager.h"
namespace clang {
@@ -52,17 +52,18 @@ ProgramStateRef RangedConstraintManager::assumeSym(ProgramStateRef State,
assert(BinaryOperator::isComparisonOp(Op));
// For now, we only support comparing pointers.
- assert(Loc::isLocType(SSE->getLHS()->getType()));
- assert(Loc::isLocType(SSE->getRHS()->getType()));
- QualType DiffTy = SymMgr.getContext().getPointerDiffType();
- SymbolRef Subtraction =
- SymMgr.getSymSymExpr(SSE->getRHS(), BO_Sub, SSE->getLHS(), DiffTy);
-
- const llvm::APSInt &Zero = getBasicVals().getValue(0, DiffTy);
- Op = BinaryOperator::reverseComparisonOp(Op);
- if (!Assumption)
- Op = BinaryOperator::negateComparisonOp(Op);
- return assumeSymRel(State, Subtraction, Op, Zero);
+ if (Loc::isLocType(SSE->getLHS()->getType()) &&
+ Loc::isLocType(SSE->getRHS()->getType())) {
+ QualType DiffTy = SymMgr.getContext().getPointerDiffType();
+ SymbolRef Subtraction =
+ SymMgr.getSymSymExpr(SSE->getRHS(), BO_Sub, SSE->getLHS(), DiffTy);
+
+ const llvm::APSInt &Zero = getBasicVals().getValue(0, DiffTy);
+ Op = BinaryOperator::reverseComparisonOp(Op);
+ if (!Assumption)
+ Op = BinaryOperator::negateComparisonOp(Op);
+ return assumeSymRel(State, Subtraction, Op, Zero);
+ }
}
// If we get here, there's nothing else we can do but treat the symbol as
diff --git a/lib/StaticAnalyzer/Core/RangedConstraintManager.h b/lib/StaticAnalyzer/Core/RangedConstraintManager.h
deleted file mode 100644
index a4e6062a4f57..000000000000
--- a/lib/StaticAnalyzer/Core/RangedConstraintManager.h
+++ /dev/null
@@ -1,102 +0,0 @@
-//== RangedConstraintManager.h ----------------------------------*- C++ -*--==//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// Ranged constraint manager, built on SimpleConstraintManager.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_CLANG_LIB_STATICANALYZER_CORE_RANGEDCONSTRAINTMANAGER_H
-#define LLVM_CLANG_LIB_STATICANALYZER_CORE_RANGEDCONSTRAINTMANAGER_H
-
-#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/SimpleConstraintManager.h"
-
-namespace clang {
-
-namespace ento {
-
-class RangedConstraintManager : public SimpleConstraintManager {
-public:
- RangedConstraintManager(SubEngine *SE, SValBuilder &SB)
- : SimpleConstraintManager(SE, SB) {}
-
- ~RangedConstraintManager() override;
-
- //===------------------------------------------------------------------===//
- // Implementation for interface from SimpleConstraintManager.
- //===------------------------------------------------------------------===//
-
- ProgramStateRef assumeSym(ProgramStateRef State, SymbolRef Sym,
- bool Assumption) override;
-
- ProgramStateRef assumeSymInclusiveRange(ProgramStateRef State, SymbolRef Sym,
- const llvm::APSInt &From,
- const llvm::APSInt &To,
- bool InRange) override;
-
- ProgramStateRef assumeSymUnsupported(ProgramStateRef State, SymbolRef Sym,
- bool Assumption) override;
-
-protected:
- /// Assume a constraint between a symbolic expression and a concrete integer.
- virtual ProgramStateRef assumeSymRel(ProgramStateRef State, SymbolRef Sym,
- BinaryOperator::Opcode op,
- const llvm::APSInt &Int);
-
- //===------------------------------------------------------------------===//
- // Interface that subclasses must implement.
- //===------------------------------------------------------------------===//
-
- // Each of these is of the form "$Sym+Adj <> V", where "<>" is the comparison
- // operation for the method being invoked.
-
- virtual ProgramStateRef assumeSymNE(ProgramStateRef State, SymbolRef Sym,
- const llvm::APSInt &V,
- const llvm::APSInt &Adjustment) = 0;
-
- virtual ProgramStateRef assumeSymEQ(ProgramStateRef State, SymbolRef Sym,
- const llvm::APSInt &V,
- const llvm::APSInt &Adjustment) = 0;
-
- virtual ProgramStateRef assumeSymLT(ProgramStateRef State, SymbolRef Sym,
- const llvm::APSInt &V,
- const llvm::APSInt &Adjustment) = 0;
-
- virtual ProgramStateRef assumeSymGT(ProgramStateRef State, SymbolRef Sym,
- const llvm::APSInt &V,
- const llvm::APSInt &Adjustment) = 0;
-
- virtual ProgramStateRef assumeSymLE(ProgramStateRef State, SymbolRef Sym,
- const llvm::APSInt &V,
- const llvm::APSInt &Adjustment) = 0;
-
- virtual ProgramStateRef assumeSymGE(ProgramStateRef State, SymbolRef Sym,
- const llvm::APSInt &V,
- const llvm::APSInt &Adjustment) = 0;
-
- virtual ProgramStateRef assumeSymWithinInclusiveRange(
- ProgramStateRef State, SymbolRef Sym, const llvm::APSInt &From,
- const llvm::APSInt &To, const llvm::APSInt &Adjustment) = 0;
-
- virtual ProgramStateRef assumeSymOutsideInclusiveRange(
- ProgramStateRef State, SymbolRef Sym, const llvm::APSInt &From,
- const llvm::APSInt &To, const llvm::APSInt &Adjustment) = 0;
-
- //===------------------------------------------------------------------===//
- // Internal implementation.
- //===------------------------------------------------------------------===//
-private:
- static void computeAdjustment(SymbolRef &Sym, llvm::APSInt &Adjustment);
-};
-
-} // end GR namespace
-
-} // end clang namespace
-
-#endif
diff --git a/lib/StaticAnalyzer/Core/RegionStore.cpp b/lib/StaticAnalyzer/Core/RegionStore.cpp
index e2e69bb28ec2..db6449e6d5f3 100644
--- a/lib/StaticAnalyzer/Core/RegionStore.cpp
+++ b/lib/StaticAnalyzer/Core/RegionStore.cpp
@@ -230,11 +230,6 @@ Optional<SVal> RegionBindingsRef::getDirectBinding(const MemRegion *R) const {
}
Optional<SVal> RegionBindingsRef::getDefaultBinding(const MemRegion *R) const {
- if (R->isBoundable())
- if (const TypedValueRegion *TR = dyn_cast<TypedValueRegion>(R))
- if (TR->getValueType()->isUnionType())
- return UnknownVal();
-
return Optional<SVal>::create(lookup(R, BindingKey::Default));
}
@@ -338,7 +333,7 @@ private:
/// To disable all small-struct-dependent behavior, set the option to "0".
unsigned SmallStructLimit;
- /// \brief A helper used to populate the work list with the given set of
+ /// A helper used to populate the work list with the given set of
/// regions.
void populateWorkList(invalidateRegionsWorker &W,
ArrayRef<SVal> Values,
@@ -409,8 +404,22 @@ public: // Part of public interface to class.
RegionBindingsRef bind(RegionBindingsConstRef B, Loc LV, SVal V);
- // BindDefault is only used to initialize a region with a default value.
- StoreRef BindDefault(Store store, const MemRegion *R, SVal V) override {
+ // BindDefaultInitial is only used to initialize a region with
+ // a default value.
+ StoreRef BindDefaultInitial(Store store, const MemRegion *R,
+ SVal V) override {
+ RegionBindingsRef B = getRegionBindings(store);
+ // Use other APIs when you have to wipe the region that was initialized
+ // earlier.
+ assert(!(B.getDefaultBinding(R) || B.getDirectBinding(R)) &&
+ "Double initialization!");
+ B = B.addBinding(BindingKey::Make(R, BindingKey::Default), V);
+ return StoreRef(B.asImmutableMap().getRootWithoutRetain(), *this);
+ }
+
+ // BindDefaultZero is used for zeroing constructors that may accidentally
+ // overwrite existing bindings.
+ StoreRef BindDefaultZero(Store store, const MemRegion *R) override {
// FIXME: The offsets of empty bases can be tricky because of
// of the so called "empty base class optimization".
// If a base class has been optimized out
@@ -420,24 +429,14 @@ public: // Part of public interface to class.
// and trying to infer them from offsets/alignments
// seems to be error-prone and non-trivial because of the trailing padding.
// As a temporary mitigation we don't create bindings for empty bases.
- if (R->getKind() == MemRegion::CXXBaseObjectRegionKind &&
- cast<CXXBaseObjectRegion>(R)->getDecl()->isEmpty())
- return StoreRef(store, *this);
+ if (const auto *BR = dyn_cast<CXXBaseObjectRegion>(R))
+ if (BR->getDecl()->isEmpty())
+ return StoreRef(store, *this);
RegionBindingsRef B = getRegionBindings(store);
- assert(!B.lookup(R, BindingKey::Direct));
-
- BindingKey Key = BindingKey::Make(R, BindingKey::Default);
- if (B.lookup(Key)) {
- const SubRegion *SR = cast<SubRegion>(R);
- assert(SR->getAsOffset().getOffset() ==
- SR->getSuperRegion()->getAsOffset().getOffset() &&
- "A default value must come from a super-region");
- B = removeSubRegionBindings(B, SR);
- } else {
- B = B.addBinding(Key, V);
- }
-
+ SVal V = svalBuilder.makeZeroVal(Ctx.CharTy);
+ B = removeSubRegionBindings(B, cast<SubRegion>(R));
+ B = B.addBinding(BindingKey::Make(R, BindingKey::Default), V);
return StoreRef(B.asImmutableMap().getRootWithoutRetain(), *this);
}
@@ -474,7 +473,7 @@ public: // Part of public interface to class.
const TypedRegion *R,
SVal DefaultVal);
- /// \brief Create a new store with the specified binding removed.
+ /// Create a new store with the specified binding removed.
/// \param ST the original store, that is the basis for the new store.
/// \param L the location whose binding should be removed.
StoreRef killBinding(Store ST, Loc L) override;
@@ -492,7 +491,7 @@ public: // Part of public interface to class.
bool includedInBindings(Store store, const MemRegion *region) const override;
- /// \brief Return the value bound to specified location in a given state.
+ /// Return the value bound to specified location in a given state.
///
/// The high level logic for this method is this:
/// getBinding (L)
@@ -825,7 +824,7 @@ collectSubRegionBindings(SmallVectorImpl<BindingPair> &Bindings,
FieldVector FieldsInSymbolicSubregions;
if (TopKey.hasSymbolicOffset()) {
getSymbolicOffsetFields(TopKey, FieldsInSymbolicSubregions);
- Top = cast<SubRegion>(TopKey.getConcreteOffsetRegion());
+ Top = TopKey.getConcreteOffsetRegion();
TopKey = BindingKey::Make(Top, BindingKey::Default);
}
@@ -871,7 +870,7 @@ collectSubRegionBindings(SmallVectorImpl<BindingPair> &Bindings,
} else if (NextKey.hasSymbolicOffset()) {
const MemRegion *Base = NextKey.getConcreteOffsetRegion();
- if (Top->isSubRegionOf(Base)) {
+ if (Top->isSubRegionOf(Base) && Top != Base) {
// Case 3: The next key is symbolic and we just changed something within
// its concrete region. We don't know if the binding is still valid, so
// we'll be conservative and include it.
@@ -881,7 +880,7 @@ collectSubRegionBindings(SmallVectorImpl<BindingPair> &Bindings,
} else if (const SubRegion *BaseSR = dyn_cast<SubRegion>(Base)) {
// Case 4: The next key is symbolic, but we changed a known
// super-region. In this case the binding is certainly included.
- if (Top == Base || BaseSR->isSubRegionOf(Top))
+ if (BaseSR->isSubRegionOf(Top))
if (isCompatibleWithFields(NextKey, FieldsInSymbolicSubregions))
Bindings.push_back(*I);
}
@@ -1095,7 +1094,7 @@ void invalidateRegionsWorker::VisitCluster(const MemRegion *baseR,
return;
}
- if (T->isStructureOrClassType()) {
+ if (T->isRecordType()) {
// Invalidate the region by setting its default value to
// conjured symbol. The type of the symbol is irrelevant.
DefinedOrUnknownSVal V = svalBuilder.conjureSymbolVal(baseR, Ex, LCtx,
@@ -1342,7 +1341,8 @@ RegionStoreManager::getSizeInElements(ProgramStateRef state,
// If a variable is reinterpreted as a type that doesn't fit into a larger
// type evenly, round it down.
// This is a signed value, since it's used in arithmetic with signed indices.
- return svalBuilder.makeIntVal(RegionSize / EleSize, false);
+ return svalBuilder.makeIntVal(RegionSize / EleSize,
+ svalBuilder.getArrayIndexType());
}
//===----------------------------------------------------------------------===//
@@ -1401,12 +1401,12 @@ SVal RegionStoreManager::getBinding(RegionBindingsConstRef B, Loc L, QualType T)
T = TR->getLocationType()->getPointeeType();
else if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(MR))
T = SR->getSymbol()->getType()->getPointeeType();
- else if (isa<AllocaRegion>(MR))
- T = Ctx.VoidTy;
}
assert(!T.isNull() && "Unable to auto-detect binding type!");
assert(!T->isVoidType() && "Attempting to dereference a void pointer!");
MR = GetElementZeroRegion(cast<SubRegion>(MR), T);
+ } else {
+ T = cast<TypedValueRegion>(MR)->getValueType();
}
// FIXME: Perhaps this method should just take a 'const MemRegion*' argument
@@ -1446,7 +1446,7 @@ SVal RegionStoreManager::getBinding(RegionBindingsConstRef B, Loc L, QualType T)
return UnknownVal();
if (const FieldRegion* FR = dyn_cast<FieldRegion>(R))
- return CastRetrievedVal(getBindingForField(B, FR), FR, T, false);
+ return CastRetrievedVal(getBindingForField(B, FR), FR, T);
if (const ElementRegion* ER = dyn_cast<ElementRegion>(R)) {
// FIXME: Here we actually perform an implicit conversion from the loaded
@@ -1454,7 +1454,7 @@ SVal RegionStoreManager::getBinding(RegionBindingsConstRef B, Loc L, QualType T)
// more intelligently. For example, an 'element' can encompass multiple
// bound regions (e.g., several bound bytes), or could be a subset of
// a larger value.
- return CastRetrievedVal(getBindingForElement(B, ER), ER, T, false);
+ return CastRetrievedVal(getBindingForElement(B, ER), ER, T);
}
if (const ObjCIvarRegion *IVR = dyn_cast<ObjCIvarRegion>(R)) {
@@ -1464,7 +1464,7 @@ SVal RegionStoreManager::getBinding(RegionBindingsConstRef B, Loc L, QualType T)
// reinterpretted, it is possible we stored a different value that could
// fit within the ivar. Either we need to cast these when storing them
// or reinterpret them lazily (as we do here).
- return CastRetrievedVal(getBindingForObjCIvar(B, IVR), IVR, T, false);
+ return CastRetrievedVal(getBindingForObjCIvar(B, IVR), IVR, T);
}
if (const VarRegion *VR = dyn_cast<VarRegion>(R)) {
@@ -1474,7 +1474,7 @@ SVal RegionStoreManager::getBinding(RegionBindingsConstRef B, Loc L, QualType T)
// variable is reinterpretted, it is possible we stored a different value
// that could fit within the variable. Either we need to cast these when
// storing them or reinterpret them lazily (as we do here).
- return CastRetrievedVal(getBindingForVar(B, VR), VR, T, false);
+ return CastRetrievedVal(getBindingForVar(B, VR), VR, T);
}
const SVal *V = B.lookup(R, BindingKey::Direct);
@@ -1606,7 +1606,7 @@ SVal RegionStoreManager::getBindingForElement(RegionBindingsConstRef B,
const MemRegion* superR = R->getSuperRegion();
// Check if the region is an element region of a string literal.
- if (const StringRegion *StrR=dyn_cast<StringRegion>(superR)) {
+ if (const StringRegion *StrR = dyn_cast<StringRegion>(superR)) {
// FIXME: Handle loads from strings where the literal is treated as
// an integer, e.g., *((unsigned int*)"hello")
QualType T = Ctx.getAsArrayType(StrR->getValueType())->getElementType();
@@ -1629,6 +1629,36 @@ SVal RegionStoreManager::getBindingForElement(RegionBindingsConstRef B,
char c = (i >= length) ? '\0' : Str->getCodeUnit(i);
return svalBuilder.makeIntVal(c, T);
}
+ } else if (const VarRegion *VR = dyn_cast<VarRegion>(superR)) {
+ // Check if the containing array is const and has an initialized value.
+ const VarDecl *VD = VR->getDecl();
+ // Either the array or the array element has to be const.
+ if (VD->getType().isConstQualified() || R->getElementType().isConstQualified()) {
+ if (const Expr *Init = VD->getInit()) {
+ if (const auto *InitList = dyn_cast<InitListExpr>(Init)) {
+ // The array index has to be known.
+ if (auto CI = R->getIndex().getAs<nonloc::ConcreteInt>()) {
+ int64_t i = CI->getValue().getSExtValue();
+ // If it is known that the index is out of bounds, we can return
+ // an undefined value.
+ if (i < 0)
+ return UndefinedVal();
+
+ if (auto CAT = Ctx.getAsConstantArrayType(VD->getType()))
+ if (CAT->getSize().sle(i))
+ return UndefinedVal();
+
+ // If there is a list, but no init, it must be zero.
+ if (i >= InitList->getNumInits())
+ return svalBuilder.makeZeroVal(R->getElementType());
+
+ if (const Expr *ElemInit = InitList->getInit(i))
+ if (Optional<SVal> V = svalBuilder.getConstantVal(ElemInit))
+ return *V;
+ }
+ }
+ }
+ }
}
// Check for loads from a code text region. For such loads, just give up.
@@ -1678,7 +1708,34 @@ SVal RegionStoreManager::getBindingForField(RegionBindingsConstRef B,
if (const Optional<SVal> &V = B.getDirectBinding(R))
return *V;
- QualType Ty = R->getValueType();
+ // Is the field declared constant and has an in-class initializer?
+ const FieldDecl *FD = R->getDecl();
+ QualType Ty = FD->getType();
+ if (Ty.isConstQualified())
+ if (const Expr *Init = FD->getInClassInitializer())
+ if (Optional<SVal> V = svalBuilder.getConstantVal(Init))
+ return *V;
+
+ // If the containing record was initialized, try to get its constant value.
+ const MemRegion* superR = R->getSuperRegion();
+ if (const auto *VR = dyn_cast<VarRegion>(superR)) {
+ const VarDecl *VD = VR->getDecl();
+ QualType RecordVarTy = VD->getType();
+ unsigned Index = FD->getFieldIndex();
+ // Either the record variable or the field has to be const qualified.
+ if (RecordVarTy.isConstQualified() || Ty.isConstQualified())
+ if (const Expr *Init = VD->getInit())
+ if (const auto *InitList = dyn_cast<InitListExpr>(Init)) {
+ if (Index < InitList->getNumInits()) {
+ if (const Expr *FieldInit = InitList->getInit(Index))
+ if (Optional<SVal> V = svalBuilder.getConstantVal(FieldInit))
+ return *V;
+ } else {
+ return svalBuilder.makeZeroVal(Ty);
+ }
+ }
+ }
+
return getBindingForFieldOrElementCommon(B, R, Ty);
}
@@ -1776,7 +1833,7 @@ RegionStoreManager::getBindingForFieldOrElementCommon(RegionBindingsConstRef B,
// quickly result in a warning.
bool hasPartialLazyBinding = false;
- const SubRegion *SR = dyn_cast<SubRegion>(R);
+ const SubRegion *SR = R;
while (SR) {
const MemRegion *Base = SR->getSuperRegion();
if (Optional<SVal> D = getBindingForDerivedDefaultValue(B, Base, R, Ty)) {
@@ -2050,6 +2107,9 @@ RegionStoreManager::bind(RegionBindingsConstRef B, Loc L, SVal V) {
R = GetElementZeroRegion(SR, T);
}
+ assert((!isa<CXXThisRegion>(R) || !B.lookup(R)) &&
+ "'this' pointer is not an l-value and is not assignable");
+
// Clear out bindings that may overlap with this binding.
RegionBindingsRef NewB = removeSubRegionBindings(B, cast<SubRegion>(R));
return NewB.addBinding(BindingKey::Make(R, BindingKey::Direct), V);
diff --git a/lib/StaticAnalyzer/Core/SMTConstraintManager.cpp b/lib/StaticAnalyzer/Core/SMTConstraintManager.cpp
new file mode 100644
index 000000000000..d379562bf325
--- /dev/null
+++ b/lib/StaticAnalyzer/Core/SMTConstraintManager.cpp
@@ -0,0 +1,181 @@
+//== SMTConstraintManager.cpp -----------------------------------*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/SMTConstraintManager.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+
+using namespace clang;
+using namespace ento;
+
+ProgramStateRef SMTConstraintManager::assumeSym(ProgramStateRef State,
+ SymbolRef Sym,
+ bool Assumption) {
+ ASTContext &Ctx = getBasicVals().getContext();
+
+ QualType RetTy;
+ bool hasComparison;
+
+ SMTExprRef Exp = Solver->getExpr(Ctx, Sym, &RetTy, &hasComparison);
+
+ // Create zero comparison for implicit boolean cast, with reversed assumption
+ if (!hasComparison && !RetTy->isBooleanType())
+ return assumeExpr(State, Sym,
+ Solver->getZeroExpr(Ctx, Exp, RetTy, !Assumption));
+
+ return assumeExpr(State, Sym, Assumption ? Exp : Solver->mkNot(Exp));
+}
+
+ProgramStateRef SMTConstraintManager::assumeSymInclusiveRange(
+ ProgramStateRef State, SymbolRef Sym, const llvm::APSInt &From,
+ const llvm::APSInt &To, bool InRange) {
+ ASTContext &Ctx = getBasicVals().getContext();
+ return assumeExpr(State, Sym,
+ Solver->getRangeExpr(Ctx, Sym, From, To, InRange));
+}
+
+ProgramStateRef
+SMTConstraintManager::assumeSymUnsupported(ProgramStateRef State, SymbolRef Sym,
+ bool Assumption) {
+ // Skip anything that is unsupported
+ return State;
+}
+
+ConditionTruthVal SMTConstraintManager::checkNull(ProgramStateRef State,
+ SymbolRef Sym) {
+ ASTContext &Ctx = getBasicVals().getContext();
+
+ QualType RetTy;
+ // The expression may be casted, so we cannot call getZ3DataExpr() directly
+ SMTExprRef VarExp = Solver->getExpr(Ctx, Sym, &RetTy);
+ SMTExprRef Exp = Solver->getZeroExpr(Ctx, VarExp, RetTy, /*Assumption=*/true);
+
+ // Negate the constraint
+ SMTExprRef NotExp =
+ Solver->getZeroExpr(Ctx, VarExp, RetTy, /*Assumption=*/false);
+
+ Solver->reset();
+ addStateConstraints(State);
+
+ Solver->push();
+ Solver->addConstraint(Exp);
+ ConditionTruthVal isSat = Solver->check();
+
+ Solver->pop();
+ Solver->addConstraint(NotExp);
+ ConditionTruthVal isNotSat = Solver->check();
+
+ // Zero is the only possible solution
+ if (isSat.isConstrainedTrue() && isNotSat.isConstrainedFalse())
+ return true;
+
+ // Zero is not a solution
+ if (isSat.isConstrainedFalse() && isNotSat.isConstrainedTrue())
+ return false;
+
+ // Zero may be a solution
+ return ConditionTruthVal();
+}
+
+const llvm::APSInt *SMTConstraintManager::getSymVal(ProgramStateRef State,
+ SymbolRef Sym) const {
+ BasicValueFactory &BVF = getBasicVals();
+ ASTContext &Ctx = BVF.getContext();
+
+ if (const SymbolData *SD = dyn_cast<SymbolData>(Sym)) {
+ QualType Ty = Sym->getType();
+ assert(!Ty->isRealFloatingType());
+ llvm::APSInt Value(Ctx.getTypeSize(Ty),
+ !Ty->isSignedIntegerOrEnumerationType());
+
+ SMTExprRef Exp =
+ Solver->fromData(SD->getSymbolID(), Ty, Ctx.getTypeSize(Ty));
+
+ Solver->reset();
+ addStateConstraints(State);
+
+ // Constraints are unsatisfiable
+ ConditionTruthVal isSat = Solver->check();
+ if (!isSat.isConstrainedTrue())
+ return nullptr;
+
+ // Model does not assign interpretation
+ if (!Solver->getInterpretation(Exp, Value))
+ return nullptr;
+
+ // A value has been obtained, check if it is the only value
+ SMTExprRef NotExp = Solver->fromBinOp(
+ Exp, BO_NE,
+ Ty->isBooleanType() ? Solver->fromBoolean(Value.getBoolValue())
+ : Solver->fromAPSInt(Value),
+ false);
+
+ Solver->addConstraint(NotExp);
+
+ ConditionTruthVal isNotSat = Solver->check();
+ if (isNotSat.isConstrainedTrue())
+ return nullptr;
+
+ // This is the only solution, store it
+ return &BVF.getValue(Value);
+ }
+
+ if (const SymbolCast *SC = dyn_cast<SymbolCast>(Sym)) {
+ SymbolRef CastSym = SC->getOperand();
+ QualType CastTy = SC->getType();
+ // Skip the void type
+ if (CastTy->isVoidType())
+ return nullptr;
+
+ const llvm::APSInt *Value;
+ if (!(Value = getSymVal(State, CastSym)))
+ return nullptr;
+ return &BVF.Convert(SC->getType(), *Value);
+ }
+
+ if (const BinarySymExpr *BSE = dyn_cast<BinarySymExpr>(Sym)) {
+ const llvm::APSInt *LHS, *RHS;
+ if (const SymIntExpr *SIE = dyn_cast<SymIntExpr>(BSE)) {
+ LHS = getSymVal(State, SIE->getLHS());
+ RHS = &SIE->getRHS();
+ } else if (const IntSymExpr *ISE = dyn_cast<IntSymExpr>(BSE)) {
+ LHS = &ISE->getLHS();
+ RHS = getSymVal(State, ISE->getRHS());
+ } else if (const SymSymExpr *SSM = dyn_cast<SymSymExpr>(BSE)) {
+ // Early termination to avoid expensive call
+ LHS = getSymVal(State, SSM->getLHS());
+ RHS = LHS ? getSymVal(State, SSM->getRHS()) : nullptr;
+ } else {
+ llvm_unreachable("Unsupported binary expression to get symbol value!");
+ }
+
+ if (!LHS || !RHS)
+ return nullptr;
+
+ llvm::APSInt ConvertedLHS, ConvertedRHS;
+ QualType LTy, RTy;
+ std::tie(ConvertedLHS, LTy) = Solver->fixAPSInt(Ctx, *LHS);
+ std::tie(ConvertedRHS, RTy) = Solver->fixAPSInt(Ctx, *RHS);
+ Solver->doIntTypeConversion<llvm::APSInt, &SMTSolver::castAPSInt>(
+ Ctx, ConvertedLHS, LTy, ConvertedRHS, RTy);
+ return BVF.evalAPSInt(BSE->getOpcode(), ConvertedLHS, ConvertedRHS);
+ }
+
+ llvm_unreachable("Unsupported expression to get symbol value!");
+}
+
+ConditionTruthVal
+SMTConstraintManager::checkModel(ProgramStateRef State,
+ const SMTExprRef &Exp) const {
+ Solver->reset();
+ Solver->addConstraint(Exp);
+ addStateConstraints(State);
+ return Solver->check();
+}
diff --git a/lib/StaticAnalyzer/Core/SValBuilder.cpp b/lib/StaticAnalyzer/Core/SValBuilder.cpp
index 04452e3e7cc2..f292dca8e99f 100644
--- a/lib/StaticAnalyzer/Core/SValBuilder.cpp
+++ b/lib/StaticAnalyzer/Core/SValBuilder.cpp
@@ -1,4 +1,4 @@
-// SValBuilder.cpp - Basic class for all SValBuilder implementations -*- C++ -*-
+//===- SValBuilder.cpp - Basic class for all SValBuilder implementations --===//
//
// The LLVM Compiler Infrastructure
//
@@ -13,12 +13,33 @@
//===----------------------------------------------------------------------===//
#include "clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/AST/Stmt.h"
+#include "clang/AST/Type.h"
+#include "clang/Basic/LLVM.h"
+#include "clang/Analysis/AnalysisDeclContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/APSIntType.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/BasicValueFactory.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState_Fwd.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/Store.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SubEngine.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SymExpr.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h"
+#include "llvm/ADT/APSInt.h"
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/Compiler.h"
+#include <cassert>
+#include <tuple>
using namespace clang;
using namespace ento;
@@ -27,7 +48,7 @@ using namespace ento;
// Basic SVal creation.
//===----------------------------------------------------------------------===//
-void SValBuilder::anchor() { }
+void SValBuilder::anchor() {}
DefinedOrUnknownSVal SValBuilder::makeZeroVal(QualType type) {
if (Loc::isLocType(type))
@@ -95,12 +116,12 @@ nonloc::ConcreteInt SValBuilder::makeBoolVal(const CXXBoolLiteralExpr *boolean){
}
DefinedOrUnknownSVal
-SValBuilder::getRegionValueSymbolVal(const TypedValueRegion* region) {
+SValBuilder::getRegionValueSymbolVal(const TypedValueRegion *region) {
QualType T = region->getValueType();
if (T->isNullPtrType())
return makeZeroVal(T);
-
+
if (!SymbolManager::canSymbolicate(T))
return UnknownVal();
@@ -149,7 +170,6 @@ DefinedOrUnknownSVal SValBuilder::conjureSymbolVal(const void *symbolTag,
return nonloc::SymbolVal(sym);
}
-
DefinedOrUnknownSVal SValBuilder::conjureSymbolVal(const Stmt *stmt,
const LocationContext *LCtx,
QualType type,
@@ -217,10 +237,10 @@ SValBuilder::getDerivedRegionValueSymbolVal(SymbolRef parentSymbol,
return nonloc::SymbolVal(sym);
}
-DefinedSVal SValBuilder::getMemberPointer(const DeclaratorDecl* DD) {
+DefinedSVal SValBuilder::getMemberPointer(const DeclaratorDecl *DD) {
assert(!DD || isa<CXXMethodDecl>(DD) || isa<FieldDecl>(DD));
- if (auto *MD = dyn_cast_or_null<CXXMethodDecl>(DD)) {
+ if (const auto *MD = dyn_cast_or_null<CXXMethodDecl>(DD)) {
// Sema treats pointers to static member functions as have function pointer
// type, so return a function pointer for the method.
// We don't need to play a similar trick for static member fields
@@ -277,19 +297,19 @@ Optional<SVal> SValBuilder::getConstantVal(const Expr *E) {
return makeZeroVal(E->getType());
case Stmt::ObjCStringLiteralClass: {
- const ObjCStringLiteral *SL = cast<ObjCStringLiteral>(E);
+ const auto *SL = cast<ObjCStringLiteral>(E);
return makeLoc(getRegionManager().getObjCStringRegion(SL));
}
case Stmt::StringLiteralClass: {
- const StringLiteral *SL = cast<StringLiteral>(E);
+ const auto *SL = cast<StringLiteral>(E);
return makeLoc(getRegionManager().getStringRegion(SL));
}
// Fast-path some expressions to avoid the overhead of going through the AST's
// constant evaluator
case Stmt::CharacterLiteralClass: {
- const CharacterLiteral *C = cast<CharacterLiteral>(E);
+ const auto *C = cast<CharacterLiteral>(E);
return makeIntVal(C->getValue(), C->getType());
}
@@ -297,7 +317,7 @@ Optional<SVal> SValBuilder::getConstantVal(const Expr *E) {
return makeBoolVal(cast<CXXBoolLiteralExpr>(E));
case Stmt::TypeTraitExprClass: {
- const TypeTraitExpr *TE = cast<TypeTraitExpr>(E);
+ const auto *TE = cast<TypeTraitExpr>(E);
return makeTruthVal(TE->getValue(), TE->getType());
}
@@ -310,12 +330,19 @@ Optional<SVal> SValBuilder::getConstantVal(const Expr *E) {
case Stmt::CXXNullPtrLiteralExprClass:
return makeNull();
+ case Stmt::CStyleCastExprClass:
+ case Stmt::CXXFunctionalCastExprClass:
+ case Stmt::CXXConstCastExprClass:
+ case Stmt::CXXReinterpretCastExprClass:
+ case Stmt::CXXStaticCastExprClass:
case Stmt::ImplicitCastExprClass: {
- const CastExpr *CE = cast<CastExpr>(E);
+ const auto *CE = cast<CastExpr>(E);
switch (CE->getCastKind()) {
default:
break;
case CK_ArrayToPointerDecay:
+ case CK_IntegralToPointer:
+ case CK_NoOp:
case CK_BitCast: {
const Expr *SE = CE->getSubExpr();
Optional<SVal> Val = getConstantVal(SE);
@@ -348,20 +375,18 @@ Optional<SVal> SValBuilder::getConstantVal(const Expr *E) {
}
}
-//===----------------------------------------------------------------------===//
-
SVal SValBuilder::makeSymExprValNN(ProgramStateRef State,
BinaryOperator::Opcode Op,
NonLoc LHS, NonLoc RHS,
QualType ResultTy) {
- if (!State->isTainted(RHS) && !State->isTainted(LHS))
- return UnknownVal();
-
const SymExpr *symLHS = LHS.getAsSymExpr();
const SymExpr *symRHS = RHS.getAsSymExpr();
+
// TODO: When the Max Complexity is reached, we should conjure a symbol
// instead of generating an Unknown value and propagate the taint info to it.
- const unsigned MaxComp = 10000; // 100000 28X
+ const unsigned MaxComp = StateMgr.getOwningEngine()
+ ->getAnalysisManager()
+ .options.getMaxSymbolComplexity();
if (symLHS && symRHS &&
(symLHS->computeComplexity() + symRHS->computeComplexity()) < MaxComp)
@@ -378,10 +403,8 @@ SVal SValBuilder::makeSymExprValNN(ProgramStateRef State,
return UnknownVal();
}
-
SVal SValBuilder::evalBinOp(ProgramStateRef state, BinaryOperator::Opcode op,
SVal lhs, SVal rhs, QualType type) {
-
if (lhs.isUndef() || rhs.isUndef())
return UndefinedVal();
@@ -413,10 +436,19 @@ SVal SValBuilder::evalBinOp(ProgramStateRef state, BinaryOperator::Opcode op,
type);
}
+ConditionTruthVal SValBuilder::areEqual(ProgramStateRef state, SVal lhs,
+ SVal rhs) {
+ return state->isNonNull(evalEQ(state, lhs, rhs));
+}
+
+SVal SValBuilder::evalEQ(ProgramStateRef state, SVal lhs, SVal rhs) {
+ return evalBinOp(state, BO_EQ, lhs, rhs, getConditionType());
+}
+
DefinedOrUnknownSVal SValBuilder::evalEQ(ProgramStateRef state,
DefinedOrUnknownSVal lhs,
DefinedOrUnknownSVal rhs) {
- return evalBinOp(state, BO_EQ, lhs, rhs, getConditionType())
+ return evalEQ(state, static_cast<SVal>(lhs), static_cast<SVal>(rhs))
.castAs<DefinedOrUnknownSVal>();
}
@@ -425,7 +457,7 @@ DefinedOrUnknownSVal SValBuilder::evalEQ(ProgramStateRef state,
/// Assumes the input types are canonical.
static bool shouldBeModeledWithNoOp(ASTContext &Context, QualType ToTy,
QualType FromTy) {
- while (Context.UnwrapSimilarPointerTypes(ToTy, FromTy)) {
+ while (Context.UnwrapSimilarTypes(ToTy, FromTy)) {
Qualifiers Quals1, Quals2;
ToTy = Context.getUnqualifiedArrayType(ToTy, Quals1);
FromTy = Context.getUnqualifiedArrayType(FromTy, Quals2);
@@ -440,6 +472,10 @@ static bool shouldBeModeledWithNoOp(ASTContext &Context, QualType ToTy,
// If we are casting to void, the 'From' value can be used to represent the
// 'To' value.
+ //
+ // FIXME: Doing this after unwrapping the types doesn't make any sense. A
+ // cast from 'int**' to 'void**' is not special in the way that a cast from
+ // 'int*' to 'void*' is.
if (ToTy->isVoidType())
return true;
@@ -454,7 +490,6 @@ static bool shouldBeModeledWithNoOp(ASTContext &Context, QualType ToTy,
// of the original value is known to be greater than the max of the target type.
SVal SValBuilder::evalIntegralCast(ProgramStateRef state, SVal val,
QualType castTy, QualType originalTy) {
-
// No truncations if target type is big enough.
if (getContext().getTypeSize(castTy) >= getContext().getTypeSize(originalTy))
return evalCast(val, castTy, originalTy);
@@ -548,8 +583,8 @@ SVal SValBuilder::evalCast(SVal val, QualType castTy, QualType originalTy) {
}
// Check for casts from array type to another type.
- if (const ArrayType *arrayT =
- dyn_cast<ArrayType>(originalTy.getCanonicalType())) {
+ if (const auto *arrayT =
+ dyn_cast<ArrayType>(originalTy.getCanonicalType())) {
// We will always decay to a pointer.
QualType elemTy = arrayT->getElementType();
val = StateMgr.ArrayToPointer(val.castAs<Loc>(), elemTy);
diff --git a/lib/StaticAnalyzer/Core/SVals.cpp b/lib/StaticAnalyzer/Core/SVals.cpp
index a83421426a13..559ca2c9840d 100644
--- a/lib/StaticAnalyzer/Core/SVals.cpp
+++ b/lib/StaticAnalyzer/Core/SVals.cpp
@@ -1,4 +1,4 @@
-//= RValues.cpp - Abstract RValues for Path-Sens. Value Tracking -*- C++ -*-==//
+//===- RValues.cpp - Abstract RValues for Path-Sens. Value Tracking -------===//
//
// The LLVM Compiler Infrastructure
//
@@ -12,20 +12,31 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
-#include "clang/AST/ExprObjC.h"
-#include "clang/Basic/IdentifierTable.h"
-#include "llvm/Support/raw_ostream.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
+#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/Type.h"
+#include "clang/Basic/LLVM.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/BasicValueFactory.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SymExpr.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cassert>
+
using namespace clang;
using namespace ento;
-using llvm::APSInt;
//===----------------------------------------------------------------------===//
// Symbol iteration within an SVal.
//===----------------------------------------------------------------------===//
-
//===----------------------------------------------------------------------===//
// Utility methods.
//===----------------------------------------------------------------------===//
@@ -39,7 +50,7 @@ bool SVal::hasConjuredSymbol() const {
if (Optional<loc::MemRegionVal> RV = getAs<loc::MemRegionVal>()) {
const MemRegion *R = RV->getRegion();
- if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(R)) {
+ if (const auto *SR = dyn_cast<SymbolicRegion>(R)) {
SymbolRef sym = SR->getSymbol();
if (isa<SymbolConjured>(sym))
return true;
@@ -53,18 +64,18 @@ const FunctionDecl *SVal::getAsFunctionDecl() const {
if (Optional<loc::MemRegionVal> X = getAs<loc::MemRegionVal>()) {
const MemRegion* R = X->getRegion();
if (const FunctionCodeRegion *CTR = R->getAs<FunctionCodeRegion>())
- if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CTR->getDecl()))
+ if (const auto *FD = dyn_cast<FunctionDecl>(CTR->getDecl()))
return FD;
}
if (auto X = getAs<nonloc::PointerToMember>()) {
- if (const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(X->getDecl()))
+ if (const auto *MD = dyn_cast_or_null<CXXMethodDecl>(X->getDecl()))
return MD;
}
return nullptr;
}
-/// \brief If this SVal is a location (subclasses Loc) and wraps a symbol,
+/// If this SVal is a location (subclasses Loc) and wraps a symbol,
/// return that SymbolRef. Otherwise return 0.
///
/// Implicit casts (ex: void* -> char*) can turn Symbolic region into Element
@@ -95,8 +106,8 @@ SymbolRef SVal::getLocSymbolInBase() const {
const MemRegion *R = X->getRegion();
- while (const SubRegion *SR = dyn_cast<SubRegion>(R)) {
- if (const SymbolicRegion *SymR = dyn_cast<SymbolicRegion>(SR))
+ while (const auto *SR = dyn_cast<SubRegion>(R)) {
+ if (const auto *SymR = dyn_cast<SymbolicRegion>(SR))
return SymR->getSymbol();
else
R = SR->getSuperRegion();
@@ -107,7 +118,7 @@ SymbolRef SVal::getLocSymbolInBase() const {
// TODO: The next 3 functions have to be simplified.
-/// \brief If this SVal wraps a symbol return that SymbolRef.
+/// If this SVal wraps a symbol return that SymbolRef.
/// Otherwise, return 0.
///
/// Casts are ignored during lookup.
@@ -189,14 +200,14 @@ nonloc::CompoundVal::iterator nonloc::CompoundVal::end() const {
nonloc::PointerToMember::iterator nonloc::PointerToMember::begin() const {
const PTMDataType PTMD = getPTMData();
if (PTMD.is<const DeclaratorDecl *>())
- return nonloc::PointerToMember::iterator();
+ return {};
return PTMD.get<const PointerToMemberData *>()->begin();
}
nonloc::PointerToMember::iterator nonloc::PointerToMember::end() const {
const PTMDataType PTMD = getPTMData();
if (PTMD.is<const DeclaratorDecl *>())
- return nonloc::PointerToMember::iterator();
+ return {};
return PTMD.get<const PointerToMemberData *>()->end();
}
@@ -220,7 +231,6 @@ bool SVal::isZeroConstant() const {
return isConstant(0);
}
-
//===----------------------------------------------------------------------===//
// Transfer function dispatch for Non-Locs.
//===----------------------------------------------------------------------===//
@@ -254,7 +264,6 @@ nonloc::ConcreteInt::evalMinus(SValBuilder &svalBuilder) const {
SVal loc::ConcreteInt::evalBinOp(BasicValueFactory& BasicVals,
BinaryOperator::Opcode Op,
const loc::ConcreteInt& R) const {
-
assert(BinaryOperator::isComparisonOp(Op) || Op == BO_Sub);
const llvm::APSInt *X = BasicVals.evalAPSInt(Op, getValue(), R.getValue());
@@ -291,19 +300,15 @@ void SVal::dumpToStream(raw_ostream &os) const {
void NonLoc::dumpToStream(raw_ostream &os) const {
switch (getSubKind()) {
case nonloc::ConcreteIntKind: {
- const nonloc::ConcreteInt& C = castAs<nonloc::ConcreteInt>();
- if (C.getValue().isUnsigned())
- os << C.getValue().getZExtValue();
- else
- os << C.getValue().getSExtValue();
- os << ' ' << (C.getValue().isUnsigned() ? 'U' : 'S')
- << C.getValue().getBitWidth() << 'b';
+ const auto &Value = castAs<nonloc::ConcreteInt>().getValue();
+ os << Value << ' ' << (Value.isSigned() ? 'S' : 'U')
+ << Value.getBitWidth() << 'b';
break;
}
- case nonloc::SymbolValKind: {
+ case nonloc::SymbolValKind:
os << castAs<nonloc::SymbolVal>().getSymbol();
break;
- }
+
case nonloc::LocAsIntegerKind: {
const nonloc::LocAsInteger& C = castAs<nonloc::LocAsInteger>();
os << C.getLoc() << " [as " << C.getNumBits() << " bit integer]";
@@ -313,14 +318,14 @@ void NonLoc::dumpToStream(raw_ostream &os) const {
const nonloc::CompoundVal& C = castAs<nonloc::CompoundVal>();
os << "compoundVal{";
bool first = true;
- for (nonloc::CompoundVal::iterator I=C.begin(), E=C.end(); I!=E; ++I) {
+ for (const auto &I : C) {
if (first) {
os << ' '; first = false;
}
else
os << ", ";
- (*I).dumpToStream(os);
+ I.dumpToStream(os);
}
os << "}";
break;
@@ -353,7 +358,7 @@ void NonLoc::dumpToStream(raw_ostream &os) const {
break;
}
default:
- assert (false && "Pretty-printed not implemented for this NonLoc.");
+ assert(false && "Pretty-printed not implemented for this NonLoc.");
break;
}
}
diff --git a/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp b/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp
index 94d29d5a6ba3..beae0dfae289 100644
--- a/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp
+++ b/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp
@@ -12,8 +12,10 @@
//===----------------------------------------------------------------------===//
#include "clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/APSIntType.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SubEngine.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SValVisitor.h"
using namespace clang;
@@ -157,7 +159,8 @@ SVal SimpleSValBuilder::evalCastFromLoc(Loc val, QualType castTy) {
return nonloc::SymbolVal(SymMgr.getExtentSymbol(FTR));
if (const SymbolicRegion *SymR = R->getSymbolicBase())
- return nonloc::SymbolVal(SymR->getSymbol());
+ return makeNonLoc(SymR->getSymbol(), BO_NE,
+ BasicVals.getZeroWithPtrWidth(), castTy);
// FALL-THROUGH
LLVM_FALLTHROUGH;
@@ -307,6 +310,197 @@ SVal SimpleSValBuilder::MakeSymIntVal(const SymExpr *LHS,
return makeNonLoc(LHS, op, *ConvertedRHS, resultTy);
}
+// See if Sym is known to be a relation Rel with Bound.
+static bool isInRelation(BinaryOperator::Opcode Rel, SymbolRef Sym,
+ llvm::APSInt Bound, ProgramStateRef State) {
+ SValBuilder &SVB = State->getStateManager().getSValBuilder();
+ SVal Result =
+ SVB.evalBinOpNN(State, Rel, nonloc::SymbolVal(Sym),
+ nonloc::ConcreteInt(Bound), SVB.getConditionType());
+ if (auto DV = Result.getAs<DefinedSVal>()) {
+ return !State->assume(*DV, false);
+ }
+ return false;
+}
+
+// See if Sym is known to be within [min/4, max/4], where min and max
+// are the bounds of the symbol's integral type. With such symbols,
+// some manipulations can be performed without the risk of overflow.
+// assume() doesn't cause infinite recursion because we should be dealing
+// with simpler symbols on every recursive call.
+static bool isWithinConstantOverflowBounds(SymbolRef Sym,
+ ProgramStateRef State) {
+ SValBuilder &SVB = State->getStateManager().getSValBuilder();
+ BasicValueFactory &BV = SVB.getBasicValueFactory();
+
+ QualType T = Sym->getType();
+ assert(T->isSignedIntegerOrEnumerationType() &&
+ "This only works with signed integers!");
+ APSIntType AT = BV.getAPSIntType(T);
+
+ llvm::APSInt Max = AT.getMaxValue() / AT.getValue(4), Min = -Max;
+ return isInRelation(BO_LE, Sym, Max, State) &&
+ isInRelation(BO_GE, Sym, Min, State);
+}
+
+// Same for the concrete integers: see if I is within [min/4, max/4].
+static bool isWithinConstantOverflowBounds(llvm::APSInt I) {
+ APSIntType AT(I);
+ assert(!AT.isUnsigned() &&
+ "This only works with signed integers!");
+
+ llvm::APSInt Max = AT.getMaxValue() / AT.getValue(4), Min = -Max;
+ return (I <= Max) && (I >= -Max);
+}
+
+static std::pair<SymbolRef, llvm::APSInt>
+decomposeSymbol(SymbolRef Sym, BasicValueFactory &BV) {
+ if (const auto *SymInt = dyn_cast<SymIntExpr>(Sym))
+ if (BinaryOperator::isAdditiveOp(SymInt->getOpcode()))
+ return std::make_pair(SymInt->getLHS(),
+ (SymInt->getOpcode() == BO_Add) ?
+ (SymInt->getRHS()) :
+ (-SymInt->getRHS()));
+
+ // Fail to decompose: "reduce" the problem to the "$x + 0" case.
+ return std::make_pair(Sym, BV.getValue(0, Sym->getType()));
+}
+
+// Simplify "(LSym + LInt) Op (RSym + RInt)" assuming all values are of the
+// same signed integral type and no overflows occur (which should be checked
+// by the caller).
+static NonLoc doRearrangeUnchecked(ProgramStateRef State,
+ BinaryOperator::Opcode Op,
+ SymbolRef LSym, llvm::APSInt LInt,
+ SymbolRef RSym, llvm::APSInt RInt) {
+ SValBuilder &SVB = State->getStateManager().getSValBuilder();
+ BasicValueFactory &BV = SVB.getBasicValueFactory();
+ SymbolManager &SymMgr = SVB.getSymbolManager();
+
+ QualType SymTy = LSym->getType();
+ assert(SymTy == RSym->getType() &&
+ "Symbols are not of the same type!");
+ assert(APSIntType(LInt) == BV.getAPSIntType(SymTy) &&
+ "Integers are not of the same type as symbols!");
+ assert(APSIntType(RInt) == BV.getAPSIntType(SymTy) &&
+ "Integers are not of the same type as symbols!");
+
+ QualType ResultTy;
+ if (BinaryOperator::isComparisonOp(Op))
+ ResultTy = SVB.getConditionType();
+ else if (BinaryOperator::isAdditiveOp(Op))
+ ResultTy = SymTy;
+ else
+ llvm_unreachable("Operation not suitable for unchecked rearrangement!");
+
+ // FIXME: Can we use assume() without getting into an infinite recursion?
+ if (LSym == RSym)
+ return SVB.evalBinOpNN(State, Op, nonloc::ConcreteInt(LInt),
+ nonloc::ConcreteInt(RInt), ResultTy)
+ .castAs<NonLoc>();
+
+ SymbolRef ResultSym = nullptr;
+ BinaryOperator::Opcode ResultOp;
+ llvm::APSInt ResultInt;
+ if (BinaryOperator::isComparisonOp(Op)) {
+ // Prefer comparing to a non-negative number.
+ // FIXME: Maybe it'd be better to have consistency in
+ // "$x - $y" vs. "$y - $x" because those are solver's keys.
+ if (LInt > RInt) {
+ ResultSym = SymMgr.getSymSymExpr(RSym, BO_Sub, LSym, SymTy);
+ ResultOp = BinaryOperator::reverseComparisonOp(Op);
+ ResultInt = LInt - RInt; // Opposite order!
+ } else {
+ ResultSym = SymMgr.getSymSymExpr(LSym, BO_Sub, RSym, SymTy);
+ ResultOp = Op;
+ ResultInt = RInt - LInt; // Opposite order!
+ }
+ } else {
+ ResultSym = SymMgr.getSymSymExpr(LSym, Op, RSym, SymTy);
+ ResultInt = (Op == BO_Add) ? (LInt + RInt) : (LInt - RInt);
+ ResultOp = BO_Add;
+ // Bring back the cosmetic difference.
+ if (ResultInt < 0) {
+ ResultInt = -ResultInt;
+ ResultOp = BO_Sub;
+ } else if (ResultInt == 0) {
+ // Shortcut: Simplify "$x + 0" to "$x".
+ return nonloc::SymbolVal(ResultSym);
+ }
+ }
+ const llvm::APSInt &PersistentResultInt = BV.getValue(ResultInt);
+ return nonloc::SymbolVal(
+ SymMgr.getSymIntExpr(ResultSym, ResultOp, PersistentResultInt, ResultTy));
+}
+
+// Rearrange if symbol type matches the result type and if the operator is a
+// comparison operator, both symbol and constant must be within constant
+// overflow bounds.
+static bool shouldRearrange(ProgramStateRef State, BinaryOperator::Opcode Op,
+ SymbolRef Sym, llvm::APSInt Int, QualType Ty) {
+ return Sym->getType() == Ty &&
+ (!BinaryOperator::isComparisonOp(Op) ||
+ (isWithinConstantOverflowBounds(Sym, State) &&
+ isWithinConstantOverflowBounds(Int)));
+}
+
+static Optional<NonLoc> tryRearrange(ProgramStateRef State,
+ BinaryOperator::Opcode Op, NonLoc Lhs,
+ NonLoc Rhs, QualType ResultTy) {
+ ProgramStateManager &StateMgr = State->getStateManager();
+ SValBuilder &SVB = StateMgr.getSValBuilder();
+
+ // We expect everything to be of the same type - this type.
+ QualType SingleTy;
+
+ auto &Opts =
+ StateMgr.getOwningEngine()->getAnalysisManager().getAnalyzerOptions();
+
+ // FIXME: After putting complexity threshold to the symbols we can always
+ // rearrange additive operations but rearrange comparisons only if
+ // option is set.
+ if(!Opts.shouldAggressivelySimplifyBinaryOperation())
+ return None;
+
+ SymbolRef LSym = Lhs.getAsSymbol();
+ if (!LSym)
+ return None;
+
+ if (BinaryOperator::isComparisonOp(Op)) {
+ SingleTy = LSym->getType();
+ if (ResultTy != SVB.getConditionType())
+ return None;
+ // Initialize SingleTy later with a symbol's type.
+ } else if (BinaryOperator::isAdditiveOp(Op)) {
+ SingleTy = ResultTy;
+ if (LSym->getType() != SingleTy)
+ return None;
+ // Substracting unsigned integers is a nightmare.
+ if (!SingleTy->isSignedIntegerOrEnumerationType())
+ return None;
+ } else {
+ // Don't rearrange other operations.
+ return None;
+ }
+
+ assert(!SingleTy.isNull() && "We should have figured out the type by now!");
+
+ SymbolRef RSym = Rhs.getAsSymbol();
+ if (!RSym || RSym->getType() != SingleTy)
+ return None;
+
+ BasicValueFactory &BV = State->getBasicVals();
+ llvm::APSInt LInt, RInt;
+ std::tie(LSym, LInt) = decomposeSymbol(LSym, BV);
+ std::tie(RSym, RInt) = decomposeSymbol(RSym, BV);
+ if (!shouldRearrange(State, Op, LSym, LInt, SingleTy) ||
+ !shouldRearrange(State, Op, RSym, RInt, SingleTy))
+ return None;
+
+ // We know that no overflows can occur anymore.
+ return doRearrangeUnchecked(State, Op, LSym, LInt, RSym, RInt);
+}
+
SVal SimpleSValBuilder::evalBinOpNN(ProgramStateRef state,
BinaryOperator::Opcode op,
NonLoc lhs, NonLoc rhs,
@@ -559,6 +753,9 @@ SVal SimpleSValBuilder::evalBinOpNN(ProgramStateRef state,
if (const llvm::APSInt *RHSValue = getKnownValue(state, rhs))
return MakeSymIntVal(Sym, op, *RHSValue, resultTy);
+ if (Optional<NonLoc> V = tryRearrange(state, op, lhs, rhs, resultTy))
+ return *V;
+
// Give up -- this is not a symbolic expression we can handle.
return makeSymExprValNN(state, op, InputLHS, InputRHS, resultTy);
}
@@ -988,6 +1185,12 @@ SVal SimpleSValBuilder::evalBinOpLN(ProgramStateRef state,
elementType = resultTy->getPointeeType();
}
+ // Represent arithmetic on void pointers as arithmetic on char pointers.
+ // It is fine when a TypedValueRegion of char value type represents
+ // a void pointer. Note that arithmetic on void pointers is a GCC extension.
+ if (elementType->isVoidType())
+ elementType = getContext().CharTy;
+
if (Optional<NonLoc> indexV = index.getAs<NonLoc>()) {
return loc::MemRegionVal(MemMgr.getElementRegion(elementType, *indexV,
superR, getContext()));
@@ -1023,24 +1226,42 @@ SVal SimpleSValBuilder::simplifySVal(ProgramStateRef State, SVal V) {
ProgramStateRef State;
SValBuilder &SVB;
+ // Cache results for the lifetime of the Simplifier. Results change every
+ // time new constraints are added to the program state, which is the whole
+ // point of simplifying, and for that very reason it's pointless to maintain
+ // the same cache for the duration of the whole analysis.
+ llvm::DenseMap<SymbolRef, SVal> Cached;
+
+ static bool isUnchanged(SymbolRef Sym, SVal Val) {
+ return Sym == Val.getAsSymbol();
+ }
+
public:
Simplifier(ProgramStateRef State)
: State(State), SVB(State->getStateManager().getSValBuilder()) {}
SVal VisitSymbolData(const SymbolData *S) {
if (const llvm::APSInt *I =
- SVB.getKnownValue(State, nonloc::SymbolVal(S)))
+ SVB.getKnownValue(State, SVB.makeSymbolVal(S)))
return Loc::isLocType(S->getType()) ? (SVal)SVB.makeIntLocVal(*I)
: (SVal)SVB.makeIntVal(*I);
- return Loc::isLocType(S->getType()) ? (SVal)SVB.makeLoc(S)
- : nonloc::SymbolVal(S);
+ return SVB.makeSymbolVal(S);
}
// TODO: Support SymbolCast. Support IntSymExpr when/if we actually
// start producing them.
SVal VisitSymIntExpr(const SymIntExpr *S) {
+ auto I = Cached.find(S);
+ if (I != Cached.end())
+ return I->second;
+
SVal LHS = Visit(S->getLHS());
+ if (isUnchanged(S->getLHS(), LHS)) {
+ SVal V = SVB.makeSymbolVal(S);
+ Cached[S] = V;
+ return V;
+ }
SVal RHS;
// By looking at the APSInt in the right-hand side of S, we cannot
// figure out if it should be treated as a Loc or as a NonLoc.
@@ -1059,13 +1280,27 @@ SVal SimpleSValBuilder::simplifySVal(ProgramStateRef State, SVal V) {
} else {
RHS = SVB.makeIntVal(S->getRHS());
}
- return SVB.evalBinOp(State, S->getOpcode(), LHS, RHS, S->getType());
+
+ SVal V = SVB.evalBinOp(State, S->getOpcode(), LHS, RHS, S->getType());
+ Cached[S] = V;
+ return V;
}
SVal VisitSymSymExpr(const SymSymExpr *S) {
+ auto I = Cached.find(S);
+ if (I != Cached.end())
+ return I->second;
+
SVal LHS = Visit(S->getLHS());
SVal RHS = Visit(S->getRHS());
- return SVB.evalBinOp(State, S->getOpcode(), LHS, RHS, S->getType());
+ if (isUnchanged(S->getLHS(), LHS) && isUnchanged(S->getRHS(), RHS)) {
+ SVal V = SVB.makeSymbolVal(S);
+ Cached[S] = V;
+ return V;
+ }
+ SVal V = SVB.evalBinOp(State, S->getOpcode(), LHS, RHS, S->getType());
+ Cached[S] = V;
+ return V;
}
SVal VisitSymExpr(SymbolRef S) { return nonloc::SymbolVal(S); }
@@ -1075,13 +1310,20 @@ SVal SimpleSValBuilder::simplifySVal(ProgramStateRef State, SVal V) {
SVal VisitNonLocSymbolVal(nonloc::SymbolVal V) {
// Simplification is much more costly than computing complexity.
// For high complexity, it may be not worth it.
- if (V.getSymbol()->computeComplexity() > 100)
- return V;
return Visit(V.getSymbol());
}
SVal VisitSVal(SVal V) { return V; }
};
- return Simplifier(State).Visit(V);
+ // A crude way of preventing this function from calling itself from evalBinOp.
+ static bool isReentering = false;
+ if (isReentering)
+ return V;
+
+ isReentering = true;
+ SVal SimplifiedV = Simplifier(State).Visit(V);
+ isReentering = false;
+
+ return SimplifiedV;
}
diff --git a/lib/StaticAnalyzer/Core/Store.cpp b/lib/StaticAnalyzer/Core/Store.cpp
index 173fdd8d0056..5ab5c082269b 100644
--- a/lib/StaticAnalyzer/Core/Store.cpp
+++ b/lib/StaticAnalyzer/Core/Store.cpp
@@ -1,4 +1,4 @@
-//== Store.cpp - Interface for maps from Locations to Values ----*- C++ -*--==//
+//===- Store.cpp - Interface for maps from Locations to Values ------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -12,18 +12,37 @@
//===----------------------------------------------------------------------===//
#include "clang/StaticAnalyzer/Core/PathSensitive/Store.h"
+#include "clang/AST/ASTContext.h"
#include "clang/AST/CXXInheritance.h"
#include "clang/AST/CharUnits.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/Type.h"
+#include "clang/Basic/LLVM.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/BasicValueFactory.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/StoreRef.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SymExpr.h"
+#include "llvm/ADT/APSInt.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <cassert>
+#include <cstdint>
using namespace clang;
using namespace ento;
StoreManager::StoreManager(ProgramStateManager &stateMgr)
- : svalBuilder(stateMgr.getSValBuilder()), StateMgr(stateMgr),
- MRMgr(svalBuilder.getRegionManager()), Ctx(stateMgr.getContext()) {}
+ : svalBuilder(stateMgr.getSValBuilder()), StateMgr(stateMgr),
+ MRMgr(svalBuilder.getRegionManager()), Ctx(stateMgr.getContext()) {}
StoreRef StoreManager::enterStackFrame(Store OldStore,
const CallEvent &Call,
@@ -33,11 +52,8 @@ StoreRef StoreManager::enterStackFrame(Store OldStore,
SmallVector<CallEvent::FrameBindingTy, 16> InitialBindings;
Call.getInitialStackFrameContents(LCtx, InitialBindings);
- for (CallEvent::BindingsTy::iterator I = InitialBindings.begin(),
- E = InitialBindings.end();
- I != E; ++I) {
- Store = Bind(Store.getStore(), I->first, I->second);
- }
+ for (const auto &I : InitialBindings)
+ Store = Bind(Store.getStore(), I.first, I.second);
return Store;
}
@@ -49,10 +65,6 @@ const ElementRegion *StoreManager::MakeElementRegion(const SubRegion *Base,
return MRMgr.getElementRegion(EleTy, idx, Base, svalBuilder.getContext());
}
-StoreRef StoreManager::BindDefault(Store store, const MemRegion *R, SVal V) {
- return StoreRef(store, *this);
-}
-
const ElementRegion *StoreManager::GetElementZeroRegion(const SubRegion *R,
QualType T) {
NonLoc idx = svalBuilder.makeZeroArrayIndex();
@@ -61,7 +73,6 @@ const ElementRegion *StoreManager::GetElementZeroRegion(const SubRegion *R,
}
const MemRegion *StoreManager::castRegion(const MemRegion *R, QualType CastToTy) {
-
ASTContext &Ctx = StateMgr.getContext();
// Handle casts to Objective-C objects.
@@ -92,7 +103,7 @@ const MemRegion *StoreManager::castRegion(const MemRegion *R, QualType CastToTy)
// Handle casts from compatible types.
if (R->isBoundable())
- if (const TypedValueRegion *TR = dyn_cast<TypedValueRegion>(R)) {
+ if (const auto *TR = dyn_cast<TypedValueRegion>(R)) {
QualType ObjTy = Ctx.getCanonicalType(TR->getValueType());
if (CanonPointeeTy == ObjTy)
return R;
@@ -164,7 +175,7 @@ const MemRegion *StoreManager::castRegion(const MemRegion *R, QualType CastToTy)
// Edge case: we are at 0 bytes off the beginning of baseR. We
// check to see if type we are casting to is the same as the base
// region. If so, just return the base region.
- if (const TypedValueRegion *TR = dyn_cast<TypedValueRegion>(baseR)) {
+ if (const auto *TR = dyn_cast<TypedValueRegion>(baseR)) {
QualType ObjTy = Ctx.getCanonicalType(TR->getValueType());
QualType CanonPointeeTy = Ctx.getCanonicalType(PointeeTy);
if (CanonPointeeTy == ObjTy)
@@ -219,7 +230,7 @@ static bool regionMatchesCXXRecordType(SVal V, QualType Ty) {
if (!MR)
return true;
- const TypedValueRegion *TVR = dyn_cast<TypedValueRegion>(MR);
+ const auto *TVR = dyn_cast<TypedValueRegion>(MR);
if (!TVR)
return true;
@@ -253,11 +264,9 @@ SVal StoreManager::evalDerivedToBase(SVal Derived, const CastExpr *Cast) {
SVal StoreManager::evalDerivedToBase(SVal Derived, const CXXBasePath &Path) {
// Walk through the path to create nested CXXBaseRegions.
SVal Result = Derived;
- for (CXXBasePath::const_iterator I = Path.begin(), E = Path.end();
- I != E; ++I) {
- Result = evalDerivedToBase(Result, I->Base->getType(),
- I->Base->isVirtual());
- }
+ for (const auto &I : Path)
+ Result = evalDerivedToBase(Result, I.Base->getType(),
+ I.Base->isVirtual());
return Result;
}
@@ -286,9 +295,9 @@ SVal StoreManager::evalDerivedToBase(SVal Derived, QualType BaseType,
/// symbolic regions, where the dynamic type is merely bounded (and even then,
/// only ostensibly!), but does not take advantage of any dynamic type info.
static const CXXRecordDecl *getCXXRecordType(const MemRegion *MR) {
- if (const TypedValueRegion *TVR = dyn_cast<TypedValueRegion>(MR))
+ if (const auto *TVR = dyn_cast<TypedValueRegion>(MR))
return TVR->getValueType()->getAsCXXRecordDecl();
- if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(MR))
+ if (const auto *SR = dyn_cast<SymbolicRegion>(MR))
return SR->getSymbol()->getType()->getPointeeCXXRecordDecl();
return nullptr;
}
@@ -327,7 +336,7 @@ SVal StoreManager::attemptDownCast(SVal Base, QualType TargetType,
return evalDerivedToBase(loc::MemRegionVal(MR), Paths.front());
}
- if (const CXXBaseObjectRegion *BaseR = dyn_cast<CXXBaseObjectRegion>(MR)) {
+ if (const auto *BaseR = dyn_cast<CXXBaseObjectRegion>(MR)) {
// Drill down the chain to get the derived classes.
MR = BaseR->getSuperRegion();
continue;
@@ -348,7 +357,7 @@ SVal StoreManager::attemptDownCast(SVal Base, QualType TargetType,
const MemRegion *Uncasted = MR->StripCasts(/*IncludeBaseCasts=*/false);
if (Uncasted == MR) {
// We reached the bottom of the hierarchy and did not find the derived
- // class. We we must be casting the base to derived, so the cast should
+ // class. We must be casting the base to derived, so the cast should
// fail.
break;
}
@@ -361,27 +370,27 @@ SVal StoreManager::attemptDownCast(SVal Base, QualType TargetType,
return UnknownVal();
}
-
/// CastRetrievedVal - Used by subclasses of StoreManager to implement
/// implicit casts that arise from loads from regions that are reinterpreted
/// as another region.
SVal StoreManager::CastRetrievedVal(SVal V, const TypedValueRegion *R,
- QualType castTy, bool performTestOnly) {
-
+ QualType castTy) {
if (castTy.isNull() || V.isUnknownOrUndef())
return V;
- ASTContext &Ctx = svalBuilder.getContext();
-
- if (performTestOnly) {
- // Automatically translate references to pointers.
- QualType T = R->getValueType();
- if (const ReferenceType *RT = T->getAs<ReferenceType>())
- T = Ctx.getPointerType(RT->getPointeeType());
-
- assert(svalBuilder.getContext().hasSameUnqualifiedType(castTy, T));
- return V;
- }
+ // When retrieving symbolic pointer and expecting a non-void pointer,
+ // wrap them into element regions of the expected type if necessary.
+ // SValBuilder::dispatchCast() doesn't do that, but it is necessary to
+ // make sure that the retrieved value makes sense, because there's no other
+ // cast in the AST that would tell us to cast it to the correct pointer type.
+ // We might need to do that for non-void pointers as well.
+ // FIXME: We really need a single good function to perform casts for us
+ // correctly every time we need it.
+ if (castTy->isPointerType() && !castTy->isVoidPointerType())
+ if (const auto *SR = dyn_cast_or_null<SymbolicRegion>(V.getAsRegion()))
+ if (SR->getSymbol()->getType().getCanonicalType() !=
+ castTy.getCanonicalType())
+ return loc::MemRegionVal(castRegion(SR, castTy));
return svalBuilder.dispatchCast(V, castTy);
}
@@ -421,7 +430,7 @@ SVal StoreManager::getLValueFieldOrIvar(const Decl *D, SVal Base) {
// NOTE: We must have this check first because ObjCIvarDecl is a subclass
// of FieldDecl.
- if (const ObjCIvarDecl *ID = dyn_cast<ObjCIvarDecl>(D))
+ if (const auto *ID = dyn_cast<ObjCIvarDecl>(D))
return loc::MemRegionVal(MRMgr.getObjCIvarRegion(ID, BaseR));
return loc::MemRegionVal(MRMgr.getFieldRegion(cast<FieldDecl>(D), BaseR));
@@ -433,7 +442,6 @@ SVal StoreManager::getLValueIvar(const ObjCIvarDecl *decl, SVal base) {
SVal StoreManager::getLValueElement(QualType elementType, NonLoc Offset,
SVal Base) {
-
// If the base is an unknown or undefined value, just return it back.
// FIXME: For absolute pointer addresses, we just return that value back as
// well, although in reality we should return the offset added to that
@@ -448,13 +456,12 @@ SVal StoreManager::getLValueElement(QualType elementType, NonLoc Offset,
Base.castAs<loc::MemRegionVal>().getRegionAs<SubRegion>();
// Pointer of any type can be cast and used as array base.
- const ElementRegion *ElemR = dyn_cast<ElementRegion>(BaseRegion);
+ const auto *ElemR = dyn_cast<ElementRegion>(BaseRegion);
// Convert the offset to the appropriate size and signedness.
Offset = svalBuilder.convertToArrayIndex(Offset).castAs<NonLoc>();
if (!ElemR) {
- //
// If the base region is not an ElementRegion, create one.
// This can happen in the following example:
//
@@ -462,7 +469,6 @@ SVal StoreManager::getLValueElement(QualType elementType, NonLoc Offset,
// p[1] = 8;
//
// Observe that 'p' binds to an AllocaRegion.
- //
return loc::MemRegionVal(MRMgr.getElementRegion(elementType, Offset,
BaseRegion, Ctx));
}
@@ -499,7 +505,7 @@ SVal StoreManager::getLValueElement(QualType elementType, NonLoc Offset,
Ctx));
}
-StoreManager::BindingsHandler::~BindingsHandler() {}
+StoreManager::BindingsHandler::~BindingsHandler() = default;
bool StoreManager::FindUniqueBinding::HandleBinding(StoreManager& SMgr,
Store store,
diff --git a/lib/StaticAnalyzer/Core/SymbolManager.cpp b/lib/StaticAnalyzer/Core/SymbolManager.cpp
index f2d5ee83f3cc..ed197010ebb7 100644
--- a/lib/StaticAnalyzer/Core/SymbolManager.cpp
+++ b/lib/StaticAnalyzer/Core/SymbolManager.cpp
@@ -1,4 +1,4 @@
-//== SymbolManager.h - Management of Symbolic Values ------------*- C++ -*--==//
+//===- SymbolManager.h - Management of Symbolic Values --------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -13,15 +13,27 @@
//===----------------------------------------------------------------------===//
#include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Expr.h"
#include "clang/Analysis/Analyses/LiveVariables.h"
+#include "clang/Analysis/AnalysisDeclContext.h"
+#include "clang/Basic/LLVM.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/Store.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SymExpr.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
+#include <cassert>
using namespace clang;
using namespace ento;
-void SymExpr::anchor() { }
+void SymExpr::anchor() {}
LLVM_DUMP_METHOD void SymExpr::dump() const {
dumpToStream(llvm::errs());
@@ -88,7 +100,7 @@ void SymbolMetadata::dumpToStream(raw_ostream &os) const {
<< getRegion() << ',' << T.getAsString() << '}';
}
-void SymbolData::anchor() { }
+void SymbolData::anchor() {}
void SymbolRegionValue::dumpToStream(raw_ostream &os) const {
os << "reg_$" << getSymbolID()
@@ -138,7 +150,7 @@ void SymExpr::symbol_iterator::expand() {
itr.push_back(cast<IntSymExpr>(SE)->getRHS());
return;
case SymExpr::SymSymExprKind: {
- const SymSymExpr *x = cast<SymSymExpr>(SE);
+ const auto *x = cast<SymSymExpr>(SE);
itr.push_back(x->getLHS());
itr.push_back(x->getRHS());
return;
@@ -147,13 +159,6 @@ void SymExpr::symbol_iterator::expand() {
llvm_unreachable("unhandled expansion case");
}
-unsigned SymExpr::computeComplexity() const {
- unsigned R = 0;
- for (symbol_iterator I = symbol_begin(), E = symbol_end(); I != E; ++I)
- R++;
- return R;
-}
-
const SymbolRegionValue*
SymbolManager::getRegionValueSymbol(const TypedValueRegion* R) {
llvm::FoldingSetNodeID profile;
@@ -192,7 +197,6 @@ const SymbolConjured* SymbolManager::conjureSymbol(const Stmt *E,
const SymbolDerived*
SymbolManager::getDerivedSymbol(SymbolRef parentSymbol,
const TypedValueRegion *R) {
-
llvm::FoldingSetNodeID profile;
SymbolDerived::Profile(profile, parentSymbol, R);
void *InsertPos;
@@ -227,7 +231,6 @@ const SymbolMetadata *
SymbolManager::getMetadataSymbol(const MemRegion* R, const Stmt *S, QualType T,
const LocationContext *LCtx,
unsigned Count, const void *SymbolTag) {
-
llvm::FoldingSetNodeID profile;
SymbolMetadata::Profile(profile, R, S, T, LCtx, Count, SymbolTag);
void *InsertPos;
@@ -382,11 +385,10 @@ void SymbolReaper::markDependentsLive(SymbolRef sym) {
LI->second = HaveMarkedDependents;
if (const SymbolRefSmallVectorTy *Deps = SymMgr.getDependentSymbols(sym)) {
- for (SymbolRefSmallVectorTy::const_iterator I = Deps->begin(),
- E = Deps->end(); I != E; ++I) {
- if (TheLiving.find(*I) != TheLiving.end())
+ for (const auto I : *Deps) {
+ if (TheLiving.find(I) != TheLiving.end())
continue;
- markLive(*I);
+ markLive(I);
}
}
}
@@ -405,7 +407,7 @@ void SymbolReaper::markLive(const MemRegion *region) {
void SymbolReaper::markElementIndicesLive(const MemRegion *region) {
for (auto SR = dyn_cast<SubRegion>(region); SR;
SR = dyn_cast<SubRegion>(SR->getSuperRegion())) {
- if (auto ER = dyn_cast<ElementRegion>(SR)) {
+ if (const auto ER = dyn_cast<ElementRegion>(SR)) {
SVal Idx = ER->getIndex();
for (auto SI = Idx.symbol_begin(), SE = Idx.symbol_end(); SI != SE; ++SI)
markLive(*SI);
@@ -432,10 +434,10 @@ bool SymbolReaper::isLiveRegion(const MemRegion *MR) {
MR = MR->getBaseRegion();
- if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(MR))
+ if (const auto *SR = dyn_cast<SymbolicRegion>(MR))
return isLive(SR->getSymbol());
- if (const VarRegion *VR = dyn_cast<VarRegion>(MR))
+ if (const auto *VR = dyn_cast<VarRegion>(MR))
return isLive(VR, true);
// FIXME: This is a gross over-approximation. What we really need is a way to
@@ -533,7 +535,7 @@ bool SymbolReaper::isLive(const VarRegion *VR, bool includeStoreBindings) const{
if (!LCtx)
return false;
- const StackFrameContext *CurrentContext = LCtx->getCurrentStackFrame();
+ const StackFrameContext *CurrentContext = LCtx->getStackFrame();
if (VarContext == CurrentContext) {
// If no statement is provided, everything is live.
@@ -547,7 +549,7 @@ bool SymbolReaper::isLive(const VarRegion *VR, bool includeStoreBindings) const{
return false;
unsigned &cachedQuery =
- const_cast<SymbolReaper*>(this)->includedRegionCache[VR];
+ const_cast<SymbolReaper *>(this)->includedRegionCache[VR];
if (cachedQuery) {
return cachedQuery == 1;
diff --git a/lib/StaticAnalyzer/Core/WorkList.cpp b/lib/StaticAnalyzer/Core/WorkList.cpp
new file mode 100644
index 000000000000..4b227375da9b
--- /dev/null
+++ b/lib/StaticAnalyzer/Core/WorkList.cpp
@@ -0,0 +1,254 @@
+//===- WorkList.cpp - Analyzer work-list implementation--------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Defines different worklist implementations for the static analyzer.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/WorkList.h"
+#include "llvm/ADT/PriorityQueue.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/Statistic.h"
+#include <deque>
+#include <vector>
+
+using namespace clang;
+using namespace ento;
+
+#define DEBUG_TYPE "WorkList"
+
+STATISTIC(MaxQueueSize, "Maximum size of the worklist");
+STATISTIC(MaxReachableSize, "Maximum size of auxiliary worklist set");
+
+//===----------------------------------------------------------------------===//
+// Worklist classes for exploration of reachable states.
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class DFS : public WorkList {
+ SmallVector<WorkListUnit, 20> Stack;
+
+public:
+ bool hasWork() const override {
+ return !Stack.empty();
+ }
+
+ void enqueue(const WorkListUnit& U) override {
+ Stack.push_back(U);
+ }
+
+ WorkListUnit dequeue() override {
+ assert(!Stack.empty());
+ const WorkListUnit& U = Stack.back();
+ Stack.pop_back(); // This technically "invalidates" U, but we are fine.
+ return U;
+ }
+};
+
+class BFS : public WorkList {
+ std::deque<WorkListUnit> Queue;
+
+public:
+ bool hasWork() const override {
+ return !Queue.empty();
+ }
+
+ void enqueue(const WorkListUnit& U) override {
+ Queue.push_back(U);
+ }
+
+ WorkListUnit dequeue() override {
+ WorkListUnit U = Queue.front();
+ Queue.pop_front();
+ return U;
+ }
+};
+
+} // namespace
+
+// Place the dstor for WorkList here because it contains virtual member
+// functions, and we the code for the dstor generated in one compilation unit.
+WorkList::~WorkList() = default;
+
+std::unique_ptr<WorkList> WorkList::makeDFS() {
+ return llvm::make_unique<DFS>();
+}
+
+std::unique_ptr<WorkList> WorkList::makeBFS() {
+ return llvm::make_unique<BFS>();
+}
+
+namespace {
+
+ class BFSBlockDFSContents : public WorkList {
+ std::deque<WorkListUnit> Queue;
+ SmallVector<WorkListUnit, 20> Stack;
+
+ public:
+ bool hasWork() const override {
+ return !Queue.empty() || !Stack.empty();
+ }
+
+ void enqueue(const WorkListUnit& U) override {
+ if (U.getNode()->getLocation().getAs<BlockEntrance>())
+ Queue.push_front(U);
+ else
+ Stack.push_back(U);
+ }
+
+ WorkListUnit dequeue() override {
+ // Process all basic blocks to completion.
+ if (!Stack.empty()) {
+ const WorkListUnit& U = Stack.back();
+ Stack.pop_back(); // This technically "invalidates" U, but we are fine.
+ return U;
+ }
+
+ assert(!Queue.empty());
+ // Don't use const reference. The subsequent pop_back() might make it
+ // unsafe.
+ WorkListUnit U = Queue.front();
+ Queue.pop_front();
+ return U;
+ }
+ };
+
+} // namespace
+
+std::unique_ptr<WorkList> WorkList::makeBFSBlockDFSContents() {
+ return llvm::make_unique<BFSBlockDFSContents>();
+}
+
+namespace {
+
+class UnexploredFirstStack : public WorkList {
+ /// Stack of nodes known to have statements we have not traversed yet.
+ SmallVector<WorkListUnit, 20> StackUnexplored;
+
+ /// Stack of all other nodes.
+ SmallVector<WorkListUnit, 20> StackOthers;
+
+ using BlockID = unsigned;
+ using LocIdentifier = std::pair<BlockID, const StackFrameContext *>;
+
+ llvm::DenseSet<LocIdentifier> Reachable;
+
+public:
+ bool hasWork() const override {
+ return !(StackUnexplored.empty() && StackOthers.empty());
+ }
+
+ void enqueue(const WorkListUnit &U) override {
+ const ExplodedNode *N = U.getNode();
+ auto BE = N->getLocation().getAs<BlockEntrance>();
+
+ if (!BE) {
+ // Assume the choice of the order of the preceeding block entrance was
+ // correct.
+ StackUnexplored.push_back(U);
+ } else {
+ LocIdentifier LocId = std::make_pair(
+ BE->getBlock()->getBlockID(),
+ N->getLocationContext()->getStackFrame());
+ auto InsertInfo = Reachable.insert(LocId);
+
+ if (InsertInfo.second) {
+ StackUnexplored.push_back(U);
+ } else {
+ StackOthers.push_back(U);
+ }
+ }
+ MaxReachableSize.updateMax(Reachable.size());
+ MaxQueueSize.updateMax(StackUnexplored.size() + StackOthers.size());
+ }
+
+ WorkListUnit dequeue() override {
+ if (!StackUnexplored.empty()) {
+ WorkListUnit &U = StackUnexplored.back();
+ StackUnexplored.pop_back();
+ return U;
+ } else {
+ WorkListUnit &U = StackOthers.back();
+ StackOthers.pop_back();
+ return U;
+ }
+ }
+};
+
+} // namespace
+
+std::unique_ptr<WorkList> WorkList::makeUnexploredFirst() {
+ return llvm::make_unique<UnexploredFirstStack>();
+}
+
+namespace {
+class UnexploredFirstPriorityQueue : public WorkList {
+ using BlockID = unsigned;
+ using LocIdentifier = std::pair<BlockID, const StackFrameContext *>;
+
+ // How many times each location was visited.
+ // Is signed because we negate it later in order to have a reversed
+ // comparison.
+ using VisitedTimesMap = llvm::DenseMap<LocIdentifier, int>;
+
+ // Compare by number of times the location was visited first (negated
+ // to prefer less often visited locations), then by insertion time (prefer
+ // expanding nodes inserted sooner first).
+ using QueuePriority = std::pair<int, unsigned long>;
+ using QueueItem = std::pair<WorkListUnit, QueuePriority>;
+
+ struct ExplorationComparator {
+ bool operator() (const QueueItem &LHS, const QueueItem &RHS) {
+ return LHS.second < RHS.second;
+ }
+ };
+
+ // Number of inserted nodes, used to emulate DFS ordering in the priority
+ // queue when insertions are equal.
+ unsigned long Counter = 0;
+
+ // Number of times a current location was reached.
+ VisitedTimesMap NumReached;
+
+ // The top item is the largest one.
+ llvm::PriorityQueue<QueueItem, std::vector<QueueItem>, ExplorationComparator>
+ queue;
+
+public:
+ bool hasWork() const override {
+ return !queue.empty();
+ }
+
+ void enqueue(const WorkListUnit &U) override {
+ const ExplodedNode *N = U.getNode();
+ unsigned NumVisited = 0;
+ if (auto BE = N->getLocation().getAs<BlockEntrance>()) {
+ LocIdentifier LocId = std::make_pair(
+ BE->getBlock()->getBlockID(),
+ N->getLocationContext()->getStackFrame());
+ NumVisited = NumReached[LocId]++;
+ }
+
+ queue.push(std::make_pair(U, std::make_pair(-NumVisited, ++Counter)));
+ }
+
+ WorkListUnit dequeue() override {
+ QueueItem U = queue.top();
+ queue.pop();
+ return U.first;
+ }
+};
+} // namespace
+
+std::unique_ptr<WorkList> WorkList::makeUnexploredFirstPriorityQueue() {
+ return llvm::make_unique<UnexploredFirstPriorityQueue>();
+}
diff --git a/lib/StaticAnalyzer/Core/Z3ConstraintManager.cpp b/lib/StaticAnalyzer/Core/Z3ConstraintManager.cpp
index f9f9057a89cd..7379ded49c80 100644
--- a/lib/StaticAnalyzer/Core/Z3ConstraintManager.cpp
+++ b/lib/StaticAnalyzer/Core/Z3ConstraintManager.cpp
@@ -10,7 +10,11 @@
#include "clang/Basic/TargetInfo.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/SimpleConstraintManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SMTConstraintManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SMTContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SMTExpr.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SMTSolver.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SMTSort.h"
#include "clang/Config/config.h"
@@ -21,30 +25,9 @@ using namespace ento;
#include <z3.h>
-// Forward declarations
-namespace {
-class Z3Expr;
-class ConstraintZ3 {};
-} // end anonymous namespace
-
-typedef llvm::ImmutableSet<std::pair<SymbolRef, Z3Expr>> ConstraintZ3Ty;
-
-// Expansion of REGISTER_TRAIT_WITH_PROGRAMSTATE(ConstraintZ3, Z3SetPair)
-namespace clang {
-namespace ento {
-template <>
-struct ProgramStateTrait<ConstraintZ3>
- : public ProgramStatePartialTrait<ConstraintZ3Ty> {
- static void *GDMIndex() {
- static int Index;
- return &Index;
- }
-};
-} // end namespace ento
-} // end namespace clang
-
namespace {
+/// Configuration class for Z3
class Z3Config {
friend class Z3Context;
@@ -63,45 +46,60 @@ public:
~Z3Config() { Z3_del_config(Config); }
}; // end class Z3Config
-class Z3Context {
- Z3_context ZC_P;
+// Function used to report errors
+void Z3ErrorHandler(Z3_context Context, Z3_error_code Error) {
+ llvm::report_fatal_error("Z3 error: " +
+ llvm::Twine(Z3_get_error_msg_ex(Context, Error)));
+}
+/// Wrapper for Z3 context
+class Z3Context : public SMTContext {
public:
- static Z3_context ZC;
+ Z3_context Context;
- Z3Context() : ZC_P(Z3_mk_context_rc(Z3Config().Config)) { ZC = ZC_P; }
+ Z3Context() : SMTContext() {
+ Context = Z3_mk_context_rc(Z3Config().Config);
+ // The error function is set here because the context is the first object
+ // created by the backend
+ Z3_set_error_handler(Context, Z3ErrorHandler);
+ }
- ~Z3Context() {
- Z3_del_context(ZC);
- Z3_finalize_memory();
- ZC_P = nullptr;
+ virtual ~Z3Context() {
+ Z3_del_context(Context);
+ Context = nullptr;
}
}; // end class Z3Context
-class Z3Sort {
- friend class Z3Expr;
+/// Wrapper for Z3 Sort
+class Z3Sort : public SMTSort {
+ friend class Z3Solver;
+
+ Z3Context &Context;
Z3_sort Sort;
- Z3Sort() : Sort(nullptr) {}
- Z3Sort(Z3_sort ZS) : Sort(ZS) {
- Z3_inc_ref(Z3Context::ZC, reinterpret_cast<Z3_ast>(Sort));
+public:
+ /// Default constructor, mainly used by make_shared
+ Z3Sort(Z3Context &C, Z3_sort ZS) : SMTSort(), Context(C), Sort(ZS) {
+ Z3_inc_ref(Context.Context, reinterpret_cast<Z3_ast>(Sort));
}
-public:
/// Override implicit copy constructor for correct reference counting.
- Z3Sort(const Z3Sort &Copy) : Sort(Copy.Sort) {
- Z3_inc_ref(Z3Context::ZC, reinterpret_cast<Z3_ast>(Sort));
+ Z3Sort(const Z3Sort &Copy)
+ : SMTSort(), Context(Copy.Context), Sort(Copy.Sort) {
+ Z3_inc_ref(Context.Context, reinterpret_cast<Z3_ast>(Sort));
}
/// Provide move constructor
- Z3Sort(Z3Sort &&Move) : Sort(nullptr) { *this = std::move(Move); }
+ Z3Sort(Z3Sort &&Move) : SMTSort(), Context(Move.Context), Sort(nullptr) {
+ *this = std::move(Move);
+ }
/// Provide move assignment constructor
Z3Sort &operator=(Z3Sort &&Move) {
if (this != &Move) {
if (Sort)
- Z3_dec_ref(Z3Context::ZC, reinterpret_cast<Z3_ast>(Sort));
+ Z3_dec_ref(Context.Context, reinterpret_cast<Z3_ast>(Sort));
Sort = Move.Sort;
Move.Sort = nullptr;
}
@@ -110,119 +108,78 @@ public:
~Z3Sort() {
if (Sort)
- Z3_dec_ref(Z3Context::ZC, reinterpret_cast<Z3_ast>(Sort));
- }
-
- // Return a boolean sort.
- static Z3Sort getBoolSort() { return Z3Sort(Z3_mk_bool_sort(Z3Context::ZC)); }
-
- // Return an appropriate bitvector sort for the given bitwidth.
- static Z3Sort getBitvectorSort(unsigned BitWidth) {
- return Z3Sort(Z3_mk_bv_sort(Z3Context::ZC, BitWidth));
- }
-
- // Return an appropriate floating-point sort for the given bitwidth.
- static Z3Sort getFloatSort(unsigned BitWidth) {
- Z3_sort Sort;
-
- switch (BitWidth) {
- default:
- llvm_unreachable("Unsupported floating-point bitwidth!");
- break;
- case 16:
- Sort = Z3_mk_fpa_sort_16(Z3Context::ZC);
- break;
- case 32:
- Sort = Z3_mk_fpa_sort_32(Z3Context::ZC);
- break;
- case 64:
- Sort = Z3_mk_fpa_sort_64(Z3Context::ZC);
- break;
- case 128:
- Sort = Z3_mk_fpa_sort_128(Z3Context::ZC);
- break;
- }
- return Z3Sort(Sort);
+ Z3_dec_ref(Context.Context, reinterpret_cast<Z3_ast>(Sort));
}
- // Return an appropriate sort for the given AST.
- static Z3Sort getSort(Z3_ast AST) {
- return Z3Sort(Z3_get_sort(Z3Context::ZC, AST));
+ bool isBitvectorSortImpl() const override {
+ return (Z3_get_sort_kind(Context.Context, Sort) == Z3_BV_SORT);
}
- Z3_sort_kind getSortKind() const {
- return Z3_get_sort_kind(Z3Context::ZC, Sort);
+ bool isFloatSortImpl() const override {
+ return (Z3_get_sort_kind(Context.Context, Sort) == Z3_FLOATING_POINT_SORT);
}
- unsigned getBitvectorSortSize() const {
- assert(getSortKind() == Z3_BV_SORT && "Not a bitvector sort!");
- return Z3_get_bv_sort_size(Z3Context::ZC, Sort);
+ bool isBooleanSortImpl() const override {
+ return (Z3_get_sort_kind(Context.Context, Sort) == Z3_BOOL_SORT);
}
- unsigned getFloatSortSize() const {
- assert(getSortKind() == Z3_FLOATING_POINT_SORT &&
- "Not a floating-point sort!");
- return Z3_fpa_get_ebits(Z3Context::ZC, Sort) +
- Z3_fpa_get_sbits(Z3Context::ZC, Sort);
+ unsigned getBitvectorSortSizeImpl() const override {
+ return Z3_get_bv_sort_size(Context.Context, Sort);
}
- bool operator==(const Z3Sort &Other) const {
- return Z3_is_eq_sort(Z3Context::ZC, Sort, Other.Sort);
+ unsigned getFloatSortSizeImpl() const override {
+ return Z3_fpa_get_ebits(Context.Context, Sort) +
+ Z3_fpa_get_sbits(Context.Context, Sort);
+ }
+
+ bool equal_to(SMTSort const &Other) const override {
+ return Z3_is_eq_sort(Context.Context, Sort,
+ static_cast<const Z3Sort &>(Other).Sort);
}
Z3Sort &operator=(const Z3Sort &Move) {
- Z3_inc_ref(Z3Context::ZC, reinterpret_cast<Z3_ast>(Move.Sort));
- Z3_dec_ref(Z3Context::ZC, reinterpret_cast<Z3_ast>(Sort));
+ Z3_inc_ref(Context.Context, reinterpret_cast<Z3_ast>(Move.Sort));
+ Z3_dec_ref(Context.Context, reinterpret_cast<Z3_ast>(Sort));
Sort = Move.Sort;
return *this;
}
- void print(raw_ostream &OS) const {
- OS << Z3_sort_to_string(Z3Context::ZC, Sort);
+ void print(raw_ostream &OS) const override {
+ OS << Z3_sort_to_string(Context.Context, Sort);
}
-
- LLVM_DUMP_METHOD void dump() const { print(llvm::errs()); }
}; // end class Z3Sort
-class Z3Expr {
- friend class Z3Model;
- friend class Z3Solver;
+static const Z3Sort &toZ3Sort(const SMTSort &S) {
+ return static_cast<const Z3Sort &>(S);
+}
- Z3_ast AST;
+class Z3Expr : public SMTExpr {
+ friend class Z3Solver;
- Z3Expr(Z3_ast ZA) : AST(ZA) { Z3_inc_ref(Z3Context::ZC, AST); }
+ Z3Context &Context;
- // Return an appropriate floating-point rounding mode.
- static Z3Expr getFloatRoundingMode() {
- // TODO: Don't assume nearest ties to even rounding mode
- return Z3Expr(Z3_mk_fpa_rne(Z3Context::ZC));
- }
+ Z3_ast AST;
- // Determine whether two float semantics are equivalent
- static bool areEquivalent(const llvm::fltSemantics &LHS,
- const llvm::fltSemantics &RHS) {
- return (llvm::APFloat::semanticsPrecision(LHS) ==
- llvm::APFloat::semanticsPrecision(RHS)) &&
- (llvm::APFloat::semanticsMinExponent(LHS) ==
- llvm::APFloat::semanticsMinExponent(RHS)) &&
- (llvm::APFloat::semanticsMaxExponent(LHS) ==
- llvm::APFloat::semanticsMaxExponent(RHS)) &&
- (llvm::APFloat::semanticsSizeInBits(LHS) ==
- llvm::APFloat::semanticsSizeInBits(RHS));
+public:
+ Z3Expr(Z3Context &C, Z3_ast ZA) : SMTExpr(), Context(C), AST(ZA) {
+ Z3_inc_ref(Context.Context, AST);
}
-public:
/// Override implicit copy constructor for correct reference counting.
- Z3Expr(const Z3Expr &Copy) : AST(Copy.AST) { Z3_inc_ref(Z3Context::ZC, AST); }
+ Z3Expr(const Z3Expr &Copy) : SMTExpr(), Context(Copy.Context), AST(Copy.AST) {
+ Z3_inc_ref(Context.Context, AST);
+ }
/// Provide move constructor
- Z3Expr(Z3Expr &&Move) : AST(nullptr) { *this = std::move(Move); }
+ Z3Expr(Z3Expr &&Move) : SMTExpr(), Context(Move.Context), AST(nullptr) {
+ *this = std::move(Move);
+ }
/// Provide move assignment constructor
Z3Expr &operator=(Z3Expr &&Move) {
if (this != &Move) {
if (AST)
- Z3_dec_ref(Z3Context::ZC, AST);
+ Z3_dec_ref(Context.Context, AST);
AST = Move.AST;
Move.AST = nullptr;
}
@@ -231,1388 +188,854 @@ public:
~Z3Expr() {
if (AST)
- Z3_dec_ref(Z3Context::ZC, AST);
- }
-
- /// Get the corresponding IEEE floating-point type for a given bitwidth.
- static const llvm::fltSemantics &getFloatSemantics(unsigned BitWidth) {
- switch (BitWidth) {
- default:
- llvm_unreachable("Unsupported floating-point semantics!");
- break;
- case 16:
- return llvm::APFloat::IEEEhalf();
- case 32:
- return llvm::APFloat::IEEEsingle();
- case 64:
- return llvm::APFloat::IEEEdouble();
- case 128:
- return llvm::APFloat::IEEEquad();
- }
+ Z3_dec_ref(Context.Context, AST);
}
- /// Construct a Z3Expr from a unary operator, given a Z3_context.
- static Z3Expr fromUnOp(const UnaryOperator::Opcode Op, const Z3Expr &Exp) {
- Z3_ast AST;
+ void Profile(llvm::FoldingSetNodeID &ID) const override {
+ ID.AddInteger(Z3_get_ast_hash(Context.Context, AST));
+ }
- switch (Op) {
- default:
- llvm_unreachable("Unimplemented opcode");
- break;
+ /// Comparison of AST equality, not model equivalence.
+ bool equal_to(SMTExpr const &Other) const override {
+ assert(Z3_is_eq_sort(Context.Context, Z3_get_sort(Context.Context, AST),
+ Z3_get_sort(Context.Context,
+ static_cast<const Z3Expr &>(Other).AST)) &&
+ "AST's must have the same sort");
+ return Z3_is_eq_ast(Context.Context, AST,
+ static_cast<const Z3Expr &>(Other).AST);
+ }
- case UO_Minus:
- AST = Z3_mk_bvneg(Z3Context::ZC, Exp.AST);
- break;
+ /// Override implicit move constructor for correct reference counting.
+ Z3Expr &operator=(const Z3Expr &Move) {
+ Z3_inc_ref(Context.Context, Move.AST);
+ Z3_dec_ref(Context.Context, AST);
+ AST = Move.AST;
+ return *this;
+ }
- case UO_Not:
- AST = Z3_mk_bvnot(Z3Context::ZC, Exp.AST);
- break;
+ void print(raw_ostream &OS) const override {
+ OS << Z3_ast_to_string(Context.Context, AST);
+ }
+}; // end class Z3Expr
- case UO_LNot:
- AST = Z3_mk_not(Z3Context::ZC, Exp.AST);
- break;
- }
+static const Z3Expr &toZ3Expr(const SMTExpr &E) {
+ return static_cast<const Z3Expr &>(E);
+}
- return Z3Expr(AST);
- }
+class Z3Model {
+ friend class Z3Solver;
+
+ Z3Context &Context;
+
+ Z3_model Model;
- /// Construct a Z3Expr from a floating-point unary operator, given a
- /// Z3_context.
- static Z3Expr fromFloatUnOp(const UnaryOperator::Opcode Op,
- const Z3Expr &Exp) {
- Z3_ast AST;
+public:
+ Z3Model(Z3Context &C, Z3_model ZM) : Context(C), Model(ZM) {
+ assert(C.Context != nullptr);
+ Z3_model_inc_ref(Context.Context, Model);
+ }
- switch (Op) {
- default:
- llvm_unreachable("Unimplemented opcode");
- break;
+ /// Override implicit copy constructor for correct reference counting.
+ Z3Model(const Z3Model &Copy) : Context(Copy.Context), Model(Copy.Model) {
+ Z3_model_inc_ref(Context.Context, Model);
+ }
- case UO_Minus:
- AST = Z3_mk_fpa_neg(Z3Context::ZC, Exp.AST);
- break;
+ /// Provide move constructor
+ Z3Model(Z3Model &&Move) : Context(Move.Context), Model(nullptr) {
+ *this = std::move(Move);
+ }
- case UO_LNot:
- return Z3Expr::fromUnOp(Op, Exp);
+ /// Provide move assignment constructor
+ Z3Model &operator=(Z3Model &&Move) {
+ if (this != &Move) {
+ if (Model)
+ Z3_model_dec_ref(Context.Context, Model);
+ Model = Move.Model;
+ Move.Model = nullptr;
}
+ return *this;
+ }
- return Z3Expr(AST);
+ ~Z3Model() {
+ if (Model)
+ Z3_model_dec_ref(Context.Context, Model);
}
- /// Construct a Z3Expr from a n-ary binary operator.
- static Z3Expr fromNBinOp(const BinaryOperator::Opcode Op,
- const std::vector<Z3_ast> &ASTs) {
- Z3_ast AST;
+ void print(raw_ostream &OS) const {
+ OS << Z3_model_to_string(Context.Context, Model);
+ }
- switch (Op) {
- default:
- llvm_unreachable("Unimplemented opcode");
- break;
+ LLVM_DUMP_METHOD void dump() const { print(llvm::errs()); }
+}; // end class Z3Model
- case BO_LAnd:
- AST = Z3_mk_and(Z3Context::ZC, ASTs.size(), ASTs.data());
- break;
+/// Get the corresponding IEEE floating-point type for a given bitwidth.
+static const llvm::fltSemantics &getFloatSemantics(unsigned BitWidth) {
+ switch (BitWidth) {
+ default:
+ llvm_unreachable("Unsupported floating-point semantics!");
+ break;
+ case 16:
+ return llvm::APFloat::IEEEhalf();
+ case 32:
+ return llvm::APFloat::IEEEsingle();
+ case 64:
+ return llvm::APFloat::IEEEdouble();
+ case 128:
+ return llvm::APFloat::IEEEquad();
+ }
+}
- case BO_LOr:
- AST = Z3_mk_or(Z3Context::ZC, ASTs.size(), ASTs.data());
- break;
- }
+// Determine whether two float semantics are equivalent
+static bool areEquivalent(const llvm::fltSemantics &LHS,
+ const llvm::fltSemantics &RHS) {
+ return (llvm::APFloat::semanticsPrecision(LHS) ==
+ llvm::APFloat::semanticsPrecision(RHS)) &&
+ (llvm::APFloat::semanticsMinExponent(LHS) ==
+ llvm::APFloat::semanticsMinExponent(RHS)) &&
+ (llvm::APFloat::semanticsMaxExponent(LHS) ==
+ llvm::APFloat::semanticsMaxExponent(RHS)) &&
+ (llvm::APFloat::semanticsSizeInBits(LHS) ==
+ llvm::APFloat::semanticsSizeInBits(RHS));
+}
- return Z3Expr(AST);
- }
-
- /// Construct a Z3Expr from a binary operator, given a Z3_context.
- static Z3Expr fromBinOp(const Z3Expr &LHS, const BinaryOperator::Opcode Op,
- const Z3Expr &RHS, bool isSigned) {
- Z3_ast AST;
-
- assert(Z3Sort::getSort(LHS.AST) == Z3Sort::getSort(RHS.AST) &&
- "AST's must have the same sort!");
-
- switch (Op) {
- default:
- llvm_unreachable("Unimplemented opcode");
- break;
-
- // Multiplicative operators
- case BO_Mul:
- AST = Z3_mk_bvmul(Z3Context::ZC, LHS.AST, RHS.AST);
- break;
- case BO_Div:
- AST = isSigned ? Z3_mk_bvsdiv(Z3Context::ZC, LHS.AST, RHS.AST)
- : Z3_mk_bvudiv(Z3Context::ZC, LHS.AST, RHS.AST);
- break;
- case BO_Rem:
- AST = isSigned ? Z3_mk_bvsrem(Z3Context::ZC, LHS.AST, RHS.AST)
- : Z3_mk_bvurem(Z3Context::ZC, LHS.AST, RHS.AST);
- break;
-
- // Additive operators
- case BO_Add:
- AST = Z3_mk_bvadd(Z3Context::ZC, LHS.AST, RHS.AST);
- break;
- case BO_Sub:
- AST = Z3_mk_bvsub(Z3Context::ZC, LHS.AST, RHS.AST);
- break;
-
- // Bitwise shift operators
- case BO_Shl:
- AST = Z3_mk_bvshl(Z3Context::ZC, LHS.AST, RHS.AST);
- break;
- case BO_Shr:
- AST = isSigned ? Z3_mk_bvashr(Z3Context::ZC, LHS.AST, RHS.AST)
- : Z3_mk_bvlshr(Z3Context::ZC, LHS.AST, RHS.AST);
- break;
-
- // Relational operators
- case BO_LT:
- AST = isSigned ? Z3_mk_bvslt(Z3Context::ZC, LHS.AST, RHS.AST)
- : Z3_mk_bvult(Z3Context::ZC, LHS.AST, RHS.AST);
- break;
- case BO_GT:
- AST = isSigned ? Z3_mk_bvsgt(Z3Context::ZC, LHS.AST, RHS.AST)
- : Z3_mk_bvugt(Z3Context::ZC, LHS.AST, RHS.AST);
- break;
- case BO_LE:
- AST = isSigned ? Z3_mk_bvsle(Z3Context::ZC, LHS.AST, RHS.AST)
- : Z3_mk_bvule(Z3Context::ZC, LHS.AST, RHS.AST);
- break;
- case BO_GE:
- AST = isSigned ? Z3_mk_bvsge(Z3Context::ZC, LHS.AST, RHS.AST)
- : Z3_mk_bvuge(Z3Context::ZC, LHS.AST, RHS.AST);
- break;
-
- // Equality operators
- case BO_EQ:
- AST = Z3_mk_eq(Z3Context::ZC, LHS.AST, RHS.AST);
- break;
- case BO_NE:
- return Z3Expr::fromUnOp(UO_LNot,
- Z3Expr::fromBinOp(LHS, BO_EQ, RHS, isSigned));
- break;
-
- // Bitwise operators
- case BO_And:
- AST = Z3_mk_bvand(Z3Context::ZC, LHS.AST, RHS.AST);
- break;
- case BO_Xor:
- AST = Z3_mk_bvxor(Z3Context::ZC, LHS.AST, RHS.AST);
- break;
- case BO_Or:
- AST = Z3_mk_bvor(Z3Context::ZC, LHS.AST, RHS.AST);
- break;
-
- // Logical operators
- case BO_LAnd:
- case BO_LOr: {
- std::vector<Z3_ast> Args = {LHS.AST, RHS.AST};
- return Z3Expr::fromNBinOp(Op, Args);
- }
- }
+} // end anonymous namespace
- return Z3Expr(AST);
- }
-
- /// Construct a Z3Expr from a special floating-point binary operator, given
- /// a Z3_context.
- static Z3Expr fromFloatSpecialBinOp(const Z3Expr &LHS,
- const BinaryOperator::Opcode Op,
- const llvm::APFloat::fltCategory &RHS) {
- Z3_ast AST;
-
- switch (Op) {
- default:
- llvm_unreachable("Unimplemented opcode");
- break;
-
- // Equality operators
- case BO_EQ:
- switch (RHS) {
- case llvm::APFloat::fcInfinity:
- AST = Z3_mk_fpa_is_infinite(Z3Context::ZC, LHS.AST);
- break;
- case llvm::APFloat::fcNaN:
- AST = Z3_mk_fpa_is_nan(Z3Context::ZC, LHS.AST);
- break;
- case llvm::APFloat::fcNormal:
- AST = Z3_mk_fpa_is_normal(Z3Context::ZC, LHS.AST);
- break;
- case llvm::APFloat::fcZero:
- AST = Z3_mk_fpa_is_zero(Z3Context::ZC, LHS.AST);
- break;
- }
- break;
- case BO_NE:
- return Z3Expr::fromFloatUnOp(
- UO_LNot, Z3Expr::fromFloatSpecialBinOp(LHS, BO_EQ, RHS));
- break;
- }
+typedef llvm::ImmutableSet<std::pair<SymbolRef, Z3Expr>> ConstraintZ3Ty;
+REGISTER_TRAIT_WITH_PROGRAMSTATE(ConstraintZ3, ConstraintZ3Ty)
- return Z3Expr(AST);
- }
+namespace {
- /// Construct a Z3Expr from a floating-point binary operator, given a
- /// Z3_context.
- static Z3Expr fromFloatBinOp(const Z3Expr &LHS,
- const BinaryOperator::Opcode Op,
- const Z3Expr &RHS) {
- Z3_ast AST;
+class Z3Solver : public SMTSolver {
+ friend class Z3ConstraintManager;
- assert(Z3Sort::getSort(LHS.AST) == Z3Sort::getSort(RHS.AST) &&
- "AST's must have the same sort!");
+ Z3Context Context;
- switch (Op) {
- default:
- llvm_unreachable("Unimplemented opcode");
- break;
+ Z3_solver Solver;
- // Multiplicative operators
- case BO_Mul: {
- Z3Expr RoundingMode = Z3Expr::getFloatRoundingMode();
- AST = Z3_mk_fpa_mul(Z3Context::ZC, RoundingMode.AST, LHS.AST, RHS.AST);
- break;
- }
- case BO_Div: {
- Z3Expr RoundingMode = Z3Expr::getFloatRoundingMode();
- AST = Z3_mk_fpa_div(Z3Context::ZC, RoundingMode.AST, LHS.AST, RHS.AST);
- break;
- }
- case BO_Rem:
- AST = Z3_mk_fpa_rem(Z3Context::ZC, LHS.AST, RHS.AST);
- break;
-
- // Additive operators
- case BO_Add: {
- Z3Expr RoundingMode = Z3Expr::getFloatRoundingMode();
- AST = Z3_mk_fpa_add(Z3Context::ZC, RoundingMode.AST, LHS.AST, RHS.AST);
- break;
- }
- case BO_Sub: {
- Z3Expr RoundingMode = Z3Expr::getFloatRoundingMode();
- AST = Z3_mk_fpa_sub(Z3Context::ZC, RoundingMode.AST, LHS.AST, RHS.AST);
- break;
- }
+public:
+ Z3Solver() : SMTSolver(), Solver(Z3_mk_simple_solver(Context.Context)) {
+ Z3_solver_inc_ref(Context.Context, Solver);
+ }
+
+ /// Override implicit copy constructor for correct reference counting.
+ Z3Solver(const Z3Solver &Copy)
+ : SMTSolver(), Context(Copy.Context), Solver(Copy.Solver) {
+ Z3_solver_inc_ref(Context.Context, Solver);
+ }
+
+ /// Provide move constructor
+ Z3Solver(Z3Solver &&Move)
+ : SMTSolver(), Context(Move.Context), Solver(nullptr) {
+ *this = std::move(Move);
+ }
- // Relational operators
- case BO_LT:
- AST = Z3_mk_fpa_lt(Z3Context::ZC, LHS.AST, RHS.AST);
- break;
- case BO_GT:
- AST = Z3_mk_fpa_gt(Z3Context::ZC, LHS.AST, RHS.AST);
- break;
- case BO_LE:
- AST = Z3_mk_fpa_leq(Z3Context::ZC, LHS.AST, RHS.AST);
- break;
- case BO_GE:
- AST = Z3_mk_fpa_geq(Z3Context::ZC, LHS.AST, RHS.AST);
- break;
-
- // Equality operators
- case BO_EQ:
- AST = Z3_mk_fpa_eq(Z3Context::ZC, LHS.AST, RHS.AST);
- break;
- case BO_NE:
- return Z3Expr::fromFloatUnOp(UO_LNot,
- Z3Expr::fromFloatBinOp(LHS, BO_EQ, RHS));
- break;
-
- // Logical operators
- case BO_LAnd:
- case BO_LOr:
- return Z3Expr::fromBinOp(LHS, Op, RHS, false);
+ /// Provide move assignment constructor
+ Z3Solver &operator=(Z3Solver &&Move) {
+ if (this != &Move) {
+ if (Solver)
+ Z3_solver_dec_ref(Context.Context, Solver);
+ Solver = Move.Solver;
+ Move.Solver = nullptr;
}
+ return *this;
+ }
- return Z3Expr(AST);
+ ~Z3Solver() {
+ if (Solver)
+ Z3_solver_dec_ref(Context.Context, Solver);
}
- /// Construct a Z3Expr from a SymbolData, given a Z3_context.
- static Z3Expr fromData(const SymbolID ID, bool isBool, bool isFloat,
- uint64_t BitWidth) {
- llvm::Twine Name = "$" + llvm::Twine(ID);
+ void addConstraint(const SMTExprRef &Exp) const override {
+ Z3_solver_assert(Context.Context, Solver, toZ3Expr(*Exp).AST);
+ }
- Z3Sort Sort;
- if (isBool)
- Sort = Z3Sort::getBoolSort();
- else if (isFloat)
- Sort = Z3Sort::getFloatSort(BitWidth);
- else
- Sort = Z3Sort::getBitvectorSort(BitWidth);
-
- Z3_symbol Symbol = Z3_mk_string_symbol(Z3Context::ZC, Name.str().c_str());
- Z3_ast AST = Z3_mk_const(Z3Context::ZC, Symbol, Sort.Sort);
- return Z3Expr(AST);
- }
-
- /// Construct a Z3Expr from a SymbolCast, given a Z3_context.
- static Z3Expr fromCast(const Z3Expr &Exp, QualType ToTy, uint64_t ToBitWidth,
- QualType FromTy, uint64_t FromBitWidth) {
- Z3_ast AST;
-
- if ((FromTy->isIntegralOrEnumerationType() &&
- ToTy->isIntegralOrEnumerationType()) ||
- (FromTy->isAnyPointerType() ^ ToTy->isAnyPointerType()) ||
- (FromTy->isBlockPointerType() ^ ToTy->isBlockPointerType()) ||
- (FromTy->isReferenceType() ^ ToTy->isReferenceType())) {
- // Special case: Z3 boolean type is distinct from bitvector type, so
- // must use if-then-else expression instead of direct cast
- if (FromTy->isBooleanType()) {
- assert(ToBitWidth > 0 && "BitWidth must be positive!");
- Z3Expr Zero = Z3Expr::fromInt("0", ToBitWidth);
- Z3Expr One = Z3Expr::fromInt("1", ToBitWidth);
- AST = Z3_mk_ite(Z3Context::ZC, Exp.AST, One.AST, Zero.AST);
- } else if (ToBitWidth > FromBitWidth) {
- AST = FromTy->isSignedIntegerOrEnumerationType()
- ? Z3_mk_sign_ext(Z3Context::ZC, ToBitWidth - FromBitWidth,
- Exp.AST)
- : Z3_mk_zero_ext(Z3Context::ZC, ToBitWidth - FromBitWidth,
- Exp.AST);
- } else if (ToBitWidth < FromBitWidth) {
- AST = Z3_mk_extract(Z3Context::ZC, ToBitWidth - 1, 0, Exp.AST);
- } else {
- // Both are bitvectors with the same width, ignore the type cast
- return Exp;
- }
- } else if (FromTy->isRealFloatingType() && ToTy->isRealFloatingType()) {
- if (ToBitWidth != FromBitWidth) {
- Z3Expr RoundingMode = Z3Expr::getFloatRoundingMode();
- Z3Sort Sort = Z3Sort::getFloatSort(ToBitWidth);
- AST = Z3_mk_fpa_to_fp_float(Z3Context::ZC, RoundingMode.AST, Exp.AST,
- Sort.Sort);
- } else {
- return Exp;
- }
- } else if (FromTy->isIntegralOrEnumerationType() &&
- ToTy->isRealFloatingType()) {
- Z3Expr RoundingMode = Z3Expr::getFloatRoundingMode();
- Z3Sort Sort = Z3Sort::getFloatSort(ToBitWidth);
- AST = FromTy->isSignedIntegerOrEnumerationType()
- ? Z3_mk_fpa_to_fp_signed(Z3Context::ZC, RoundingMode.AST,
- Exp.AST, Sort.Sort)
- : Z3_mk_fpa_to_fp_unsigned(Z3Context::ZC, RoundingMode.AST,
- Exp.AST, Sort.Sort);
- } else if (FromTy->isRealFloatingType() &&
- ToTy->isIntegralOrEnumerationType()) {
- Z3Expr RoundingMode = Z3Expr::getFloatRoundingMode();
- AST = ToTy->isSignedIntegerOrEnumerationType()
- ? Z3_mk_fpa_to_sbv(Z3Context::ZC, RoundingMode.AST, Exp.AST,
- ToBitWidth)
- : Z3_mk_fpa_to_ubv(Z3Context::ZC, RoundingMode.AST, Exp.AST,
- ToBitWidth);
- } else {
- llvm_unreachable("Unsupported explicit type cast!");
- }
+ SMTSortRef getBoolSort() override {
+ return std::make_shared<Z3Sort>(Context, Z3_mk_bool_sort(Context.Context));
+ }
- return Z3Expr(AST);
+ SMTSortRef getBitvectorSort(unsigned BitWidth) override {
+ return std::make_shared<Z3Sort>(Context,
+ Z3_mk_bv_sort(Context.Context, BitWidth));
}
- /// Construct a Z3Expr from a boolean, given a Z3_context.
- static Z3Expr fromBoolean(const bool Bool) {
- Z3_ast AST = Bool ? Z3_mk_true(Z3Context::ZC) : Z3_mk_false(Z3Context::ZC);
- return Z3Expr(AST);
+ SMTSortRef getSort(const SMTExprRef &Exp) override {
+ return std::make_shared<Z3Sort>(
+ Context, Z3_get_sort(Context.Context, toZ3Expr(*Exp).AST));
}
- /// Construct a Z3Expr from a finite APFloat, given a Z3_context.
- static Z3Expr fromAPFloat(const llvm::APFloat &Float) {
- Z3_ast AST;
- Z3Sort Sort = Z3Sort::getFloatSort(
- llvm::APFloat::semanticsSizeInBits(Float.getSemantics()));
+ SMTSortRef getFloat16Sort() override {
+ return std::make_shared<Z3Sort>(Context,
+ Z3_mk_fpa_sort_16(Context.Context));
+ }
- llvm::APSInt Int = llvm::APSInt(Float.bitcastToAPInt(), true);
- Z3Expr Z3Int = Z3Expr::fromAPSInt(Int);
- AST = Z3_mk_fpa_to_fp_bv(Z3Context::ZC, Z3Int.AST, Sort.Sort);
+ SMTSortRef getFloat32Sort() override {
+ return std::make_shared<Z3Sort>(Context,
+ Z3_mk_fpa_sort_32(Context.Context));
+ }
- return Z3Expr(AST);
+ SMTSortRef getFloat64Sort() override {
+ return std::make_shared<Z3Sort>(Context,
+ Z3_mk_fpa_sort_64(Context.Context));
}
- /// Construct a Z3Expr from an APSInt, given a Z3_context.
- static Z3Expr fromAPSInt(const llvm::APSInt &Int) {
- Z3Sort Sort = Z3Sort::getBitvectorSort(Int.getBitWidth());
- Z3_ast AST =
- Z3_mk_numeral(Z3Context::ZC, Int.toString(10).c_str(), Sort.Sort);
- return Z3Expr(AST);
+ SMTSortRef getFloat128Sort() override {
+ return std::make_shared<Z3Sort>(Context,
+ Z3_mk_fpa_sort_128(Context.Context));
}
- /// Construct a Z3Expr from an integer, given a Z3_context.
- static Z3Expr fromInt(const char *Int, uint64_t BitWidth) {
- Z3Sort Sort = Z3Sort::getBitvectorSort(BitWidth);
- Z3_ast AST = Z3_mk_numeral(Z3Context::ZC, Int, Sort.Sort);
- return Z3Expr(AST);
+ SMTExprRef newExprRef(const SMTExpr &E) const override {
+ return std::make_shared<Z3Expr>(toZ3Expr(E));
}
- /// Construct an APFloat from a Z3Expr, given the AST representation
- static bool toAPFloat(const Z3Sort &Sort, const Z3_ast &AST,
- llvm::APFloat &Float, bool useSemantics = true) {
- assert(Sort.getSortKind() == Z3_FLOATING_POINT_SORT &&
- "Unsupported sort to floating-point!");
+ SMTExprRef mkBVNeg(const SMTExprRef &Exp) override {
+ return newExprRef(
+ Z3Expr(Context, Z3_mk_bvneg(Context.Context, toZ3Expr(*Exp).AST)));
+ }
- llvm::APSInt Int(Sort.getFloatSortSize(), true);
- const llvm::fltSemantics &Semantics =
- Z3Expr::getFloatSemantics(Sort.getFloatSortSize());
- Z3Sort BVSort = Z3Sort::getBitvectorSort(Sort.getFloatSortSize());
- if (!Z3Expr::toAPSInt(BVSort, AST, Int, true)) {
- return false;
- }
+ SMTExprRef mkBVNot(const SMTExprRef &Exp) override {
+ return newExprRef(
+ Z3Expr(Context, Z3_mk_bvnot(Context.Context, toZ3Expr(*Exp).AST)));
+ }
- if (useSemantics &&
- !Z3Expr::areEquivalent(Float.getSemantics(), Semantics)) {
- assert(false && "Floating-point types don't match!");
- return false;
- }
+ SMTExprRef mkNot(const SMTExprRef &Exp) override {
+ return newExprRef(
+ Z3Expr(Context, Z3_mk_not(Context.Context, toZ3Expr(*Exp).AST)));
+ }
- Float = llvm::APFloat(Semantics, Int);
- return true;
+ SMTExprRef mkBVAdd(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
+ return newExprRef(
+ Z3Expr(Context, Z3_mk_bvadd(Context.Context, toZ3Expr(*LHS).AST,
+ toZ3Expr(*RHS).AST)));
}
- /// Construct an APSInt from a Z3Expr, given the AST representation
- static bool toAPSInt(const Z3Sort &Sort, const Z3_ast &AST, llvm::APSInt &Int,
- bool useSemantics = true) {
- switch (Sort.getSortKind()) {
- default:
- llvm_unreachable("Unsupported sort to integer!");
- case Z3_BV_SORT: {
- if (useSemantics && Int.getBitWidth() != Sort.getBitvectorSortSize()) {
- assert(false && "Bitvector types don't match!");
- return false;
- }
+ SMTExprRef mkBVSub(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
+ return newExprRef(
+ Z3Expr(Context, Z3_mk_bvsub(Context.Context, toZ3Expr(*LHS).AST,
+ toZ3Expr(*RHS).AST)));
+ }
- uint64_t Value[2];
- // Force cast because Z3 defines __uint64 to be a unsigned long long
- // type, which isn't compatible with a unsigned long type, even if they
- // are the same size.
- Z3_get_numeral_uint64(Z3Context::ZC, AST,
- reinterpret_cast<__uint64 *>(&Value[0]));
- if (Sort.getBitvectorSortSize() <= 64) {
- Int = llvm::APSInt(llvm::APInt(Int.getBitWidth(), Value[0]), true);
- } else if (Sort.getBitvectorSortSize() == 128) {
- Z3Expr ASTHigh = Z3Expr(Z3_mk_extract(Z3Context::ZC, 127, 64, AST));
- Z3_get_numeral_uint64(Z3Context::ZC, AST,
- reinterpret_cast<__uint64 *>(&Value[1]));
- Int = llvm::APSInt(llvm::APInt(Int.getBitWidth(), Value), true);
- } else {
- assert(false && "Bitwidth not supported!");
- return false;
- }
- return true;
- }
- case Z3_BOOL_SORT:
- if (useSemantics && Int.getBitWidth() < 1) {
- assert(false && "Boolean type doesn't match!");
- return false;
- }
- Int = llvm::APSInt(
- llvm::APInt(Int.getBitWidth(),
- Z3_get_bool_value(Z3Context::ZC, AST) == Z3_L_TRUE ? 1
- : 0),
- true);
- return true;
- }
+ SMTExprRef mkBVMul(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
+ return newExprRef(
+ Z3Expr(Context, Z3_mk_bvmul(Context.Context, toZ3Expr(*LHS).AST,
+ toZ3Expr(*RHS).AST)));
}
- void Profile(llvm::FoldingSetNodeID &ID) const {
- ID.AddInteger(Z3_get_ast_hash(Z3Context::ZC, AST));
+ SMTExprRef mkBVSRem(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
+ return newExprRef(
+ Z3Expr(Context, Z3_mk_bvsrem(Context.Context, toZ3Expr(*LHS).AST,
+ toZ3Expr(*RHS).AST)));
}
- bool operator<(const Z3Expr &Other) const {
- llvm::FoldingSetNodeID ID1, ID2;
- Profile(ID1);
- Other.Profile(ID2);
- return ID1 < ID2;
+ SMTExprRef mkBVURem(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
+ return newExprRef(
+ Z3Expr(Context, Z3_mk_bvurem(Context.Context, toZ3Expr(*LHS).AST,
+ toZ3Expr(*RHS).AST)));
}
- /// Comparison of AST equality, not model equivalence.
- bool operator==(const Z3Expr &Other) const {
- assert(Z3_is_eq_sort(Z3Context::ZC, Z3_get_sort(Z3Context::ZC, AST),
- Z3_get_sort(Z3Context::ZC, Other.AST)) &&
- "AST's must have the same sort");
- return Z3_is_eq_ast(Z3Context::ZC, AST, Other.AST);
+ SMTExprRef mkBVSDiv(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
+ return newExprRef(
+ Z3Expr(Context, Z3_mk_bvsdiv(Context.Context, toZ3Expr(*LHS).AST,
+ toZ3Expr(*RHS).AST)));
}
- /// Override implicit move constructor for correct reference counting.
- Z3Expr &operator=(const Z3Expr &Move) {
- Z3_inc_ref(Z3Context::ZC, Move.AST);
- Z3_dec_ref(Z3Context::ZC, AST);
- AST = Move.AST;
- return *this;
+ SMTExprRef mkBVUDiv(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
+ return newExprRef(
+ Z3Expr(Context, Z3_mk_bvudiv(Context.Context, toZ3Expr(*LHS).AST,
+ toZ3Expr(*RHS).AST)));
}
- void print(raw_ostream &OS) const {
- OS << Z3_ast_to_string(Z3Context::ZC, AST);
+ SMTExprRef mkBVShl(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
+ return newExprRef(
+ Z3Expr(Context, Z3_mk_bvshl(Context.Context, toZ3Expr(*LHS).AST,
+ toZ3Expr(*RHS).AST)));
}
- LLVM_DUMP_METHOD void dump() const { print(llvm::errs()); }
-}; // end class Z3Expr
+ SMTExprRef mkBVAshr(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
+ return newExprRef(
+ Z3Expr(Context, Z3_mk_bvashr(Context.Context, toZ3Expr(*LHS).AST,
+ toZ3Expr(*RHS).AST)));
+ }
-class Z3Model {
- Z3_model Model;
+ SMTExprRef mkBVLshr(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
+ return newExprRef(
+ Z3Expr(Context, Z3_mk_bvlshr(Context.Context, toZ3Expr(*LHS).AST,
+ toZ3Expr(*RHS).AST)));
+ }
-public:
- Z3Model(Z3_model ZM) : Model(ZM) { Z3_model_inc_ref(Z3Context::ZC, Model); }
+ SMTExprRef mkBVXor(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
+ return newExprRef(
+ Z3Expr(Context, Z3_mk_bvxor(Context.Context, toZ3Expr(*LHS).AST,
+ toZ3Expr(*RHS).AST)));
+ }
- /// Override implicit copy constructor for correct reference counting.
- Z3Model(const Z3Model &Copy) : Model(Copy.Model) {
- Z3_model_inc_ref(Z3Context::ZC, Model);
+ SMTExprRef mkBVOr(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
+ return newExprRef(
+ Z3Expr(Context, Z3_mk_bvor(Context.Context, toZ3Expr(*LHS).AST,
+ toZ3Expr(*RHS).AST)));
}
- /// Provide move constructor
- Z3Model(Z3Model &&Move) : Model(nullptr) { *this = std::move(Move); }
+ SMTExprRef mkBVAnd(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
+ return newExprRef(
+ Z3Expr(Context, Z3_mk_bvand(Context.Context, toZ3Expr(*LHS).AST,
+ toZ3Expr(*RHS).AST)));
+ }
- /// Provide move assignment constructor
- Z3Model &operator=(Z3Model &&Move) {
- if (this != &Move) {
- if (Model)
- Z3_model_dec_ref(Z3Context::ZC, Model);
- Model = Move.Model;
- Move.Model = nullptr;
- }
- return *this;
+ SMTExprRef mkBVUlt(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
+ return newExprRef(
+ Z3Expr(Context, Z3_mk_bvult(Context.Context, toZ3Expr(*LHS).AST,
+ toZ3Expr(*RHS).AST)));
}
- ~Z3Model() {
- if (Model)
- Z3_model_dec_ref(Z3Context::ZC, Model);
+ SMTExprRef mkBVSlt(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
+ return newExprRef(
+ Z3Expr(Context, Z3_mk_bvslt(Context.Context, toZ3Expr(*LHS).AST,
+ toZ3Expr(*RHS).AST)));
}
- /// Given an expression, extract the value of this operand in the model.
- bool getInterpretation(const Z3Expr &Exp, llvm::APSInt &Int) const {
- Z3_func_decl Func =
- Z3_get_app_decl(Z3Context::ZC, Z3_to_app(Z3Context::ZC, Exp.AST));
- if (Z3_model_has_interp(Z3Context::ZC, Model, Func) != Z3_L_TRUE)
- return false;
+ SMTExprRef mkBVUgt(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
+ return newExprRef(
+ Z3Expr(Context, Z3_mk_bvugt(Context.Context, toZ3Expr(*LHS).AST,
+ toZ3Expr(*RHS).AST)));
+ }
- Z3_ast Assign = Z3_model_get_const_interp(Z3Context::ZC, Model, Func);
- Z3Sort Sort = Z3Sort::getSort(Assign);
- return Z3Expr::toAPSInt(Sort, Assign, Int, true);
+ SMTExprRef mkBVSgt(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
+ return newExprRef(
+ Z3Expr(Context, Z3_mk_bvsgt(Context.Context, toZ3Expr(*LHS).AST,
+ toZ3Expr(*RHS).AST)));
}
- /// Given an expression, extract the value of this operand in the model.
- bool getInterpretation(const Z3Expr &Exp, llvm::APFloat &Float) const {
- Z3_func_decl Func =
- Z3_get_app_decl(Z3Context::ZC, Z3_to_app(Z3Context::ZC, Exp.AST));
- if (Z3_model_has_interp(Z3Context::ZC, Model, Func) != Z3_L_TRUE)
- return false;
+ SMTExprRef mkBVUle(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
+ return newExprRef(
+ Z3Expr(Context, Z3_mk_bvule(Context.Context, toZ3Expr(*LHS).AST,
+ toZ3Expr(*RHS).AST)));
+ }
- Z3_ast Assign = Z3_model_get_const_interp(Z3Context::ZC, Model, Func);
- Z3Sort Sort = Z3Sort::getSort(Assign);
- return Z3Expr::toAPFloat(Sort, Assign, Float, true);
+ SMTExprRef mkBVSle(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
+ return newExprRef(
+ Z3Expr(Context, Z3_mk_bvsle(Context.Context, toZ3Expr(*LHS).AST,
+ toZ3Expr(*RHS).AST)));
}
- void print(raw_ostream &OS) const {
- OS << Z3_model_to_string(Z3Context::ZC, Model);
+ SMTExprRef mkBVUge(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
+ return newExprRef(
+ Z3Expr(Context, Z3_mk_bvuge(Context.Context, toZ3Expr(*LHS).AST,
+ toZ3Expr(*RHS).AST)));
}
- LLVM_DUMP_METHOD void dump() const { print(llvm::errs()); }
-}; // end class Z3Model
+ SMTExprRef mkBVSge(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
+ return newExprRef(
+ Z3Expr(Context, Z3_mk_bvsge(Context.Context, toZ3Expr(*LHS).AST,
+ toZ3Expr(*RHS).AST)));
+ }
-class Z3Solver {
- friend class Z3ConstraintManager;
+ SMTExprRef mkAnd(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
+ Z3_ast Args[2] = {toZ3Expr(*LHS).AST, toZ3Expr(*RHS).AST};
+ return newExprRef(Z3Expr(Context, Z3_mk_and(Context.Context, 2, Args)));
+ }
- Z3_solver Solver;
+ SMTExprRef mkOr(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
+ Z3_ast Args[2] = {toZ3Expr(*LHS).AST, toZ3Expr(*RHS).AST};
+ return newExprRef(Z3Expr(Context, Z3_mk_or(Context.Context, 2, Args)));
+ }
- Z3Solver(Z3_solver ZS) : Solver(ZS) {
- Z3_solver_inc_ref(Z3Context::ZC, Solver);
+ SMTExprRef mkEqual(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
+ return newExprRef(
+ Z3Expr(Context, Z3_mk_eq(Context.Context, toZ3Expr(*LHS).AST,
+ toZ3Expr(*RHS).AST)));
}
-public:
- /// Override implicit copy constructor for correct reference counting.
- Z3Solver(const Z3Solver &Copy) : Solver(Copy.Solver) {
- Z3_solver_inc_ref(Z3Context::ZC, Solver);
+ SMTExprRef mkFPNeg(const SMTExprRef &Exp) override {
+ return newExprRef(
+ Z3Expr(Context, Z3_mk_fpa_neg(Context.Context, toZ3Expr(*Exp).AST)));
}
- /// Provide move constructor
- Z3Solver(Z3Solver &&Move) : Solver(nullptr) { *this = std::move(Move); }
+ SMTExprRef mkFPIsInfinite(const SMTExprRef &Exp) override {
+ return newExprRef(Z3Expr(
+ Context, Z3_mk_fpa_is_infinite(Context.Context, toZ3Expr(*Exp).AST)));
+ }
- /// Provide move assignment constructor
- Z3Solver &operator=(Z3Solver &&Move) {
- if (this != &Move) {
- if (Solver)
- Z3_solver_dec_ref(Z3Context::ZC, Solver);
- Solver = Move.Solver;
- Move.Solver = nullptr;
- }
- return *this;
+ SMTExprRef mkFPIsNaN(const SMTExprRef &Exp) override {
+ return newExprRef(
+ Z3Expr(Context, Z3_mk_fpa_is_nan(Context.Context, toZ3Expr(*Exp).AST)));
}
- ~Z3Solver() {
- if (Solver)
- Z3_solver_dec_ref(Z3Context::ZC, Solver);
+ SMTExprRef mkFPIsNormal(const SMTExprRef &Exp) override {
+ return newExprRef(Z3Expr(
+ Context, Z3_mk_fpa_is_normal(Context.Context, toZ3Expr(*Exp).AST)));
}
- /// Given a constraint, add it to the solver
- void addConstraint(const Z3Expr &Exp) {
- Z3_solver_assert(Z3Context::ZC, Solver, Exp.AST);
+ SMTExprRef mkFPIsZero(const SMTExprRef &Exp) override {
+ return newExprRef(Z3Expr(
+ Context, Z3_mk_fpa_is_zero(Context.Context, toZ3Expr(*Exp).AST)));
}
- /// Given a program state, construct the logical conjunction and add it to
- /// the solver
- void addStateConstraints(ProgramStateRef State) {
- // TODO: Don't add all the constraints, only the relevant ones
- ConstraintZ3Ty CZ = State->get<ConstraintZ3>();
- ConstraintZ3Ty::iterator I = CZ.begin(), IE = CZ.end();
+ SMTExprRef mkFPMul(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
+ SMTExprRef RoundingMode = getFloatRoundingMode();
+ return newExprRef(
+ Z3Expr(Context,
+ Z3_mk_fpa_mul(Context.Context, toZ3Expr(*LHS).AST,
+ toZ3Expr(*RHS).AST, toZ3Expr(*RoundingMode).AST)));
+ }
- // Construct the logical AND of all the constraints
- if (I != IE) {
- std::vector<Z3_ast> ASTs;
+ SMTExprRef mkFPDiv(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
+ SMTExprRef RoundingMode = getFloatRoundingMode();
+ return newExprRef(
+ Z3Expr(Context,
+ Z3_mk_fpa_div(Context.Context, toZ3Expr(*LHS).AST,
+ toZ3Expr(*RHS).AST, toZ3Expr(*RoundingMode).AST)));
+ }
- while (I != IE)
- ASTs.push_back(I++->second.AST);
+ SMTExprRef mkFPRem(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
+ return newExprRef(
+ Z3Expr(Context, Z3_mk_fpa_rem(Context.Context, toZ3Expr(*LHS).AST,
+ toZ3Expr(*RHS).AST)));
+ }
- Z3Expr Conj = Z3Expr::fromNBinOp(BO_LAnd, ASTs);
- addConstraint(Conj);
- }
+ SMTExprRef mkFPAdd(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
+ SMTExprRef RoundingMode = getFloatRoundingMode();
+ return newExprRef(
+ Z3Expr(Context,
+ Z3_mk_fpa_add(Context.Context, toZ3Expr(*LHS).AST,
+ toZ3Expr(*RHS).AST, toZ3Expr(*RoundingMode).AST)));
}
- /// Check if the constraints are satisfiable
- Z3_lbool check() { return Z3_solver_check(Z3Context::ZC, Solver); }
+ SMTExprRef mkFPSub(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
+ SMTExprRef RoundingMode = getFloatRoundingMode();
+ return newExprRef(
+ Z3Expr(Context,
+ Z3_mk_fpa_sub(Context.Context, toZ3Expr(*LHS).AST,
+ toZ3Expr(*RHS).AST, toZ3Expr(*RoundingMode).AST)));
+ }
- /// Push the current solver state
- void push() { return Z3_solver_push(Z3Context::ZC, Solver); }
+ SMTExprRef mkFPLt(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
+ return newExprRef(
+ Z3Expr(Context, Z3_mk_fpa_lt(Context.Context, toZ3Expr(*LHS).AST,
+ toZ3Expr(*RHS).AST)));
+ }
- /// Pop the previous solver state
- void pop(unsigned NumStates = 1) {
- assert(Z3_solver_get_num_scopes(Z3Context::ZC, Solver) >= NumStates);
- return Z3_solver_pop(Z3Context::ZC, Solver, NumStates);
+ SMTExprRef mkFPGt(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
+ return newExprRef(
+ Z3Expr(Context, Z3_mk_fpa_gt(Context.Context, toZ3Expr(*LHS).AST,
+ toZ3Expr(*RHS).AST)));
}
- /// Get a model from the solver. Caller should check the model is
- /// satisfiable.
- Z3Model getModel() {
- return Z3Model(Z3_solver_get_model(Z3Context::ZC, Solver));
+ SMTExprRef mkFPLe(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
+ return newExprRef(
+ Z3Expr(Context, Z3_mk_fpa_leq(Context.Context, toZ3Expr(*LHS).AST,
+ toZ3Expr(*RHS).AST)));
}
- /// Reset the solver and remove all constraints.
- void reset() { Z3_solver_reset(Z3Context::ZC, Solver); }
-}; // end class Z3Solver
+ SMTExprRef mkFPGe(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
+ return newExprRef(
+ Z3Expr(Context, Z3_mk_fpa_geq(Context.Context, toZ3Expr(*LHS).AST,
+ toZ3Expr(*RHS).AST)));
+ }
-void Z3ErrorHandler(Z3_context Context, Z3_error_code Error) {
- llvm::report_fatal_error("Z3 error: " +
- llvm::Twine(Z3_get_error_msg_ex(Context, Error)));
-}
+ SMTExprRef mkFPEqual(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
+ return newExprRef(
+ Z3Expr(Context, Z3_mk_fpa_eq(Context.Context, toZ3Expr(*LHS).AST,
+ toZ3Expr(*RHS).AST)));
+ }
-class Z3ConstraintManager : public SimpleConstraintManager {
- Z3Context Context;
- mutable Z3Solver Solver;
+ SMTExprRef mkIte(const SMTExprRef &Cond, const SMTExprRef &T,
+ const SMTExprRef &F) override {
+ return newExprRef(
+ Z3Expr(Context, Z3_mk_ite(Context.Context, toZ3Expr(*Cond).AST,
+ toZ3Expr(*T).AST, toZ3Expr(*F).AST)));
+ }
-public:
- Z3ConstraintManager(SubEngine *SE, SValBuilder &SB)
- : SimpleConstraintManager(SE, SB),
- Solver(Z3_mk_simple_solver(Z3Context::ZC)) {
- Z3_set_error_handler(Z3Context::ZC, Z3ErrorHandler);
+ SMTExprRef mkBVSignExt(unsigned i, const SMTExprRef &Exp) override {
+ return newExprRef(Z3Expr(
+ Context, Z3_mk_sign_ext(Context.Context, i, toZ3Expr(*Exp).AST)));
}
- //===------------------------------------------------------------------===//
- // Implementation for interface from ConstraintManager.
- //===------------------------------------------------------------------===//
+ SMTExprRef mkBVZeroExt(unsigned i, const SMTExprRef &Exp) override {
+ return newExprRef(Z3Expr(
+ Context, Z3_mk_zero_ext(Context.Context, i, toZ3Expr(*Exp).AST)));
+ }
- bool canReasonAbout(SVal X) const override;
+ SMTExprRef mkBVExtract(unsigned High, unsigned Low,
+ const SMTExprRef &Exp) override {
+ return newExprRef(Z3Expr(Context, Z3_mk_extract(Context.Context, High, Low,
+ toZ3Expr(*Exp).AST)));
+ }
- ConditionTruthVal checkNull(ProgramStateRef State, SymbolRef Sym) override;
-
- const llvm::APSInt *getSymVal(ProgramStateRef State,
- SymbolRef Sym) const override;
-
- ProgramStateRef removeDeadBindings(ProgramStateRef St,
- SymbolReaper &SymReaper) override;
-
- void print(ProgramStateRef St, raw_ostream &Out, const char *nl,
- const char *sep) override;
-
- //===------------------------------------------------------------------===//
- // Implementation for interface from SimpleConstraintManager.
- //===------------------------------------------------------------------===//
-
- ProgramStateRef assumeSym(ProgramStateRef state, SymbolRef Sym,
- bool Assumption) override;
-
- ProgramStateRef assumeSymInclusiveRange(ProgramStateRef State, SymbolRef Sym,
- const llvm::APSInt &From,
- const llvm::APSInt &To,
- bool InRange) override;
-
- ProgramStateRef assumeSymUnsupported(ProgramStateRef State, SymbolRef Sym,
- bool Assumption) override;
-
-private:
- //===------------------------------------------------------------------===//
- // Internal implementation.
- //===------------------------------------------------------------------===//
-
- // Check whether a new model is satisfiable, and update the program state.
- ProgramStateRef assumeZ3Expr(ProgramStateRef State, SymbolRef Sym,
- const Z3Expr &Exp);
-
- // Generate and check a Z3 model, using the given constraint.
- Z3_lbool checkZ3Model(ProgramStateRef State, const Z3Expr &Exp) const;
-
- // Generate a Z3Expr that represents the given symbolic expression.
- // Sets the hasComparison parameter if the expression has a comparison
- // operator.
- // Sets the RetTy parameter to the final return type after promotions and
- // casts.
- Z3Expr getZ3Expr(SymbolRef Sym, QualType *RetTy = nullptr,
- bool *hasComparison = nullptr) const;
-
- // Generate a Z3Expr that takes the logical not of an expression.
- Z3Expr getZ3NotExpr(const Z3Expr &Exp) const;
-
- // Generate a Z3Expr that compares the expression to zero.
- Z3Expr getZ3ZeroExpr(const Z3Expr &Exp, QualType RetTy,
- bool Assumption) const;
-
- // Recursive implementation to unpack and generate symbolic expression.
- // Sets the hasComparison and RetTy parameters. See getZ3Expr().
- Z3Expr getZ3SymExpr(SymbolRef Sym, QualType *RetTy,
- bool *hasComparison) const;
-
- // Wrapper to generate Z3Expr from SymbolData.
- Z3Expr getZ3DataExpr(const SymbolID ID, QualType Ty) const;
-
- // Wrapper to generate Z3Expr from SymbolCast.
- Z3Expr getZ3CastExpr(const Z3Expr &Exp, QualType FromTy, QualType Ty) const;
-
- // Wrapper to generate Z3Expr from BinarySymExpr.
- // Sets the hasComparison and RetTy parameters. See getZ3Expr().
- Z3Expr getZ3SymBinExpr(const BinarySymExpr *BSE, bool *hasComparison,
- QualType *RetTy) const;
-
- // Wrapper to generate Z3Expr from unpacked binary symbolic expression.
- // Sets the RetTy parameter. See getZ3Expr().
- Z3Expr getZ3BinExpr(const Z3Expr &LHS, QualType LTy,
- BinaryOperator::Opcode Op, const Z3Expr &RHS,
- QualType RTy, QualType *RetTy) const;
-
- //===------------------------------------------------------------------===//
- // Helper functions.
- //===------------------------------------------------------------------===//
-
- // Recover the QualType of an APSInt.
- // TODO: Refactor to put elsewhere
- QualType getAPSIntType(const llvm::APSInt &Int) const;
-
- // Perform implicit type conversion on binary symbolic expressions.
- // May modify all input parameters.
- // TODO: Refactor to use built-in conversion functions
- void doTypeConversion(Z3Expr &LHS, Z3Expr &RHS, QualType &LTy,
- QualType &RTy) const;
-
- // Perform implicit integer type conversion.
- // May modify all input parameters.
- // TODO: Refactor to use Sema::handleIntegerConversion()
- template <typename T,
- T(doCast)(const T &, QualType, uint64_t, QualType, uint64_t)>
- void doIntTypeConversion(T &LHS, QualType &LTy, T &RHS, QualType &RTy) const;
-
- // Perform implicit floating-point type conversion.
- // May modify all input parameters.
- // TODO: Refactor to use Sema::handleFloatConversion()
- template <typename T,
- T(doCast)(const T &, QualType, uint64_t, QualType, uint64_t)>
- void doFloatTypeConversion(T &LHS, QualType &LTy, T &RHS,
- QualType &RTy) const;
-
- // Callback function for doCast parameter on APSInt type.
- static llvm::APSInt castAPSInt(const llvm::APSInt &V, QualType ToTy,
- uint64_t ToWidth, QualType FromTy,
- uint64_t FromWidth);
-}; // end class Z3ConstraintManager
+ SMTExprRef mkBVConcat(const SMTExprRef &LHS, const SMTExprRef &RHS) override {
+ return newExprRef(
+ Z3Expr(Context, Z3_mk_concat(Context.Context, toZ3Expr(*LHS).AST,
+ toZ3Expr(*RHS).AST)));
+ }
-Z3_context Z3Context::ZC;
+ SMTExprRef mkFPtoFP(const SMTExprRef &From, const SMTSortRef &To) override {
+ SMTExprRef RoundingMode = getFloatRoundingMode();
+ return newExprRef(Z3Expr(
+ Context,
+ Z3_mk_fpa_to_fp_float(Context.Context, toZ3Expr(*RoundingMode).AST,
+ toZ3Expr(*From).AST, toZ3Sort(*To).Sort)));
+ }
-} // end anonymous namespace
+ SMTExprRef mkFPtoSBV(const SMTExprRef &From, const SMTSortRef &To) override {
+ SMTExprRef RoundingMode = getFloatRoundingMode();
+ return newExprRef(Z3Expr(
+ Context,
+ Z3_mk_fpa_to_fp_signed(Context.Context, toZ3Expr(*RoundingMode).AST,
+ toZ3Expr(*From).AST, toZ3Sort(*To).Sort)));
+ }
-ProgramStateRef Z3ConstraintManager::assumeSym(ProgramStateRef State,
- SymbolRef Sym, bool Assumption) {
- QualType RetTy;
- bool hasComparison;
+ SMTExprRef mkFPtoUBV(const SMTExprRef &From, const SMTSortRef &To) override {
+ SMTExprRef RoundingMode = getFloatRoundingMode();
+ return newExprRef(Z3Expr(
+ Context,
+ Z3_mk_fpa_to_fp_unsigned(Context.Context, toZ3Expr(*RoundingMode).AST,
+ toZ3Expr(*From).AST, toZ3Sort(*To).Sort)));
+ }
- Z3Expr Exp = getZ3Expr(Sym, &RetTy, &hasComparison);
- // Create zero comparison for implicit boolean cast, with reversed assumption
- if (!hasComparison && !RetTy->isBooleanType())
- return assumeZ3Expr(State, Sym, getZ3ZeroExpr(Exp, RetTy, !Assumption));
+ SMTExprRef mkSBVtoFP(const SMTExprRef &From, unsigned ToWidth) override {
+ SMTExprRef RoundingMode = getFloatRoundingMode();
+ return newExprRef(Z3Expr(
+ Context, Z3_mk_fpa_to_sbv(Context.Context, toZ3Expr(*RoundingMode).AST,
+ toZ3Expr(*From).AST, ToWidth)));
+ }
- return assumeZ3Expr(State, Sym, Assumption ? Exp : getZ3NotExpr(Exp));
-}
+ SMTExprRef mkUBVtoFP(const SMTExprRef &From, unsigned ToWidth) override {
+ SMTExprRef RoundingMode = getFloatRoundingMode();
+ return newExprRef(Z3Expr(
+ Context, Z3_mk_fpa_to_ubv(Context.Context, toZ3Expr(*RoundingMode).AST,
+ toZ3Expr(*From).AST, ToWidth)));
+ }
-ProgramStateRef Z3ConstraintManager::assumeSymInclusiveRange(
- ProgramStateRef State, SymbolRef Sym, const llvm::APSInt &From,
- const llvm::APSInt &To, bool InRange) {
- QualType RetTy;
- // The expression may be casted, so we cannot call getZ3DataExpr() directly
- Z3Expr Exp = getZ3Expr(Sym, &RetTy);
-
- assert((getAPSIntType(From) == getAPSIntType(To)) &&
- "Range values have different types!");
- QualType RTy = getAPSIntType(From);
- bool isSignedTy = RetTy->isSignedIntegerOrEnumerationType();
- Z3Expr FromExp = Z3Expr::fromAPSInt(From);
- Z3Expr ToExp = Z3Expr::fromAPSInt(To);
-
- // Construct single (in)equality
- if (From == To)
- return assumeZ3Expr(State, Sym,
- getZ3BinExpr(Exp, RetTy, InRange ? BO_EQ : BO_NE,
- FromExp, RTy, nullptr));
-
- // Construct two (in)equalities, and a logical and/or
- Z3Expr LHS =
- getZ3BinExpr(Exp, RetTy, InRange ? BO_GE : BO_LT, FromExp, RTy, nullptr);
- Z3Expr RHS =
- getZ3BinExpr(Exp, RetTy, InRange ? BO_LE : BO_GT, ToExp, RTy, nullptr);
- return assumeZ3Expr(
- State, Sym,
- Z3Expr::fromBinOp(LHS, InRange ? BO_LAnd : BO_LOr, RHS, isSignedTy));
-}
+ SMTExprRef mkBoolean(const bool b) override {
+ return newExprRef(Z3Expr(Context, b ? Z3_mk_true(Context.Context)
+ : Z3_mk_false(Context.Context)));
+ }
-ProgramStateRef Z3ConstraintManager::assumeSymUnsupported(ProgramStateRef State,
- SymbolRef Sym,
- bool Assumption) {
- // Skip anything that is unsupported
- return State;
-}
+ SMTExprRef mkBitvector(const llvm::APSInt Int, unsigned BitWidth) override {
+ const SMTSortRef Sort = getBitvectorSort(BitWidth);
+ return newExprRef(
+ Z3Expr(Context, Z3_mk_numeral(Context.Context, Int.toString(10).c_str(),
+ toZ3Sort(*Sort).Sort)));
+ }
-bool Z3ConstraintManager::canReasonAbout(SVal X) const {
- const TargetInfo &TI = getBasicVals().getContext().getTargetInfo();
+ SMTExprRef mkFloat(const llvm::APFloat Float) override {
+ SMTSortRef Sort =
+ getFloatSort(llvm::APFloat::semanticsSizeInBits(Float.getSemantics()));
- Optional<nonloc::SymbolVal> SymVal = X.getAs<nonloc::SymbolVal>();
- if (!SymVal)
- return true;
+ llvm::APSInt Int = llvm::APSInt(Float.bitcastToAPInt(), false);
+ SMTExprRef Z3Int = mkBitvector(Int, Int.getBitWidth());
+ return newExprRef(Z3Expr(
+ Context, Z3_mk_fpa_to_fp_bv(Context.Context, toZ3Expr(*Z3Int).AST,
+ toZ3Sort(*Sort).Sort)));
+ }
- const SymExpr *Sym = SymVal->getSymbol();
- do {
- QualType Ty = Sym->getType();
+ SMTExprRef mkSymbol(const char *Name, SMTSortRef Sort) override {
+ return newExprRef(
+ Z3Expr(Context, Z3_mk_const(Context.Context,
+ Z3_mk_string_symbol(Context.Context, Name),
+ toZ3Sort(*Sort).Sort)));
+ }
- // Complex types are not modeled
- if (Ty->isComplexType() || Ty->isComplexIntegerType())
- return false;
+ llvm::APSInt getBitvector(const SMTExprRef &Exp, unsigned BitWidth,
+ bool isUnsigned) override {
+ return llvm::APSInt(llvm::APInt(
+ BitWidth, Z3_get_numeral_string(Context.Context, toZ3Expr(*Exp).AST),
+ 10));
+ }
- // Non-IEEE 754 floating-point types are not modeled
- if ((Ty->isSpecificBuiltinType(BuiltinType::LongDouble) &&
- (&TI.getLongDoubleFormat() == &llvm::APFloat::x87DoubleExtended() ||
- &TI.getLongDoubleFormat() == &llvm::APFloat::PPCDoubleDouble())))
- return false;
+ bool getBoolean(const SMTExprRef &Exp) override {
+ return Z3_get_bool_value(Context.Context, toZ3Expr(*Exp).AST) == Z3_L_TRUE;
+ }
- if (isa<SymbolData>(Sym)) {
- break;
- } else if (const SymbolCast *SC = dyn_cast<SymbolCast>(Sym)) {
- Sym = SC->getOperand();
- } else if (const BinarySymExpr *BSE = dyn_cast<BinarySymExpr>(Sym)) {
- if (const SymIntExpr *SIE = dyn_cast<SymIntExpr>(BSE)) {
- Sym = SIE->getLHS();
- } else if (const IntSymExpr *ISE = dyn_cast<IntSymExpr>(BSE)) {
- Sym = ISE->getRHS();
- } else if (const SymSymExpr *SSM = dyn_cast<SymSymExpr>(BSE)) {
- return canReasonAbout(nonloc::SymbolVal(SSM->getLHS())) &&
- canReasonAbout(nonloc::SymbolVal(SSM->getRHS()));
- } else {
- llvm_unreachable("Unsupported binary expression to reason about!");
- }
- } else {
- llvm_unreachable("Unsupported expression to reason about!");
- }
- } while (Sym);
+ SMTExprRef getFloatRoundingMode() override {
+ // TODO: Don't assume nearest ties to even rounding mode
+ return newExprRef(Z3Expr(Context, Z3_mk_fpa_rne(Context.Context)));
+ }
- return true;
-}
+ SMTExprRef fromData(const SymbolID ID, const QualType &Ty,
+ uint64_t BitWidth) override {
+ llvm::Twine Name = "$" + llvm::Twine(ID);
+ return mkSymbol(Name.str().c_str(), mkSort(Ty, BitWidth));
+ }
+
+ SMTExprRef fromBoolean(const bool Bool) override {
+ Z3_ast AST =
+ Bool ? Z3_mk_true(Context.Context) : Z3_mk_false(Context.Context);
+ return newExprRef(Z3Expr(Context, AST));
+ }
+
+ SMTExprRef fromAPFloat(const llvm::APFloat &Float) override {
+ SMTSortRef Sort =
+ getFloatSort(llvm::APFloat::semanticsSizeInBits(Float.getSemantics()));
-ConditionTruthVal Z3ConstraintManager::checkNull(ProgramStateRef State,
- SymbolRef Sym) {
- QualType RetTy;
- // The expression may be casted, so we cannot call getZ3DataExpr() directly
- Z3Expr VarExp = getZ3Expr(Sym, &RetTy);
- Z3Expr Exp = getZ3ZeroExpr(VarExp, RetTy, true);
- // Negate the constraint
- Z3Expr NotExp = getZ3ZeroExpr(VarExp, RetTy, false);
+ llvm::APSInt Int = llvm::APSInt(Float.bitcastToAPInt(), false);
+ SMTExprRef Z3Int = fromAPSInt(Int);
+ return newExprRef(Z3Expr(
+ Context, Z3_mk_fpa_to_fp_bv(Context.Context, toZ3Expr(*Z3Int).AST,
+ toZ3Sort(*Sort).Sort)));
+ }
- Solver.reset();
- Solver.addStateConstraints(State);
+ SMTExprRef fromAPSInt(const llvm::APSInt &Int) override {
+ SMTSortRef Sort = getBitvectorSort(Int.getBitWidth());
+ Z3_ast AST = Z3_mk_numeral(Context.Context, Int.toString(10).c_str(),
+ toZ3Sort(*Sort).Sort);
+ return newExprRef(Z3Expr(Context, AST));
+ }
- Solver.push();
- Solver.addConstraint(Exp);
- Z3_lbool isSat = Solver.check();
+ SMTExprRef fromInt(const char *Int, uint64_t BitWidth) override {
+ SMTSortRef Sort = getBitvectorSort(BitWidth);
+ Z3_ast AST = Z3_mk_numeral(Context.Context, Int, toZ3Sort(*Sort).Sort);
+ return newExprRef(Z3Expr(Context, AST));
+ }
- Solver.pop();
- Solver.addConstraint(NotExp);
- Z3_lbool isNotSat = Solver.check();
+ bool toAPFloat(const SMTSortRef &Sort, const SMTExprRef &AST,
+ llvm::APFloat &Float, bool useSemantics) {
+ assert(Sort->isFloatSort() && "Unsupported sort to floating-point!");
- // Zero is the only possible solution
- if (isSat == Z3_L_TRUE && isNotSat == Z3_L_FALSE)
+ llvm::APSInt Int(Sort->getFloatSortSize(), true);
+ const llvm::fltSemantics &Semantics =
+ getFloatSemantics(Sort->getFloatSortSize());
+ SMTSortRef BVSort = getBitvectorSort(Sort->getFloatSortSize());
+ if (!toAPSInt(BVSort, AST, Int, true)) {
+ return false;
+ }
+
+ if (useSemantics && !areEquivalent(Float.getSemantics(), Semantics)) {
+ assert(false && "Floating-point types don't match!");
+ return false;
+ }
+
+ Float = llvm::APFloat(Semantics, Int);
return true;
- // Zero is not a solution
- else if (isSat == Z3_L_FALSE && isNotSat == Z3_L_TRUE)
- return false;
+ }
- // Zero may be a solution
- return ConditionTruthVal();
-}
+ bool toAPSInt(const SMTSortRef &Sort, const SMTExprRef &AST,
+ llvm::APSInt &Int, bool useSemantics) {
+ if (Sort->isBitvectorSort()) {
+ if (useSemantics && Int.getBitWidth() != Sort->getBitvectorSortSize()) {
+ assert(false && "Bitvector types don't match!");
+ return false;
+ }
-const llvm::APSInt *Z3ConstraintManager::getSymVal(ProgramStateRef State,
- SymbolRef Sym) const {
- BasicValueFactory &BV = getBasicVals();
- ASTContext &Ctx = BV.getContext();
+ // FIXME: This function is also used to retrieve floating-point values,
+ // which can be 16, 32, 64 or 128 bits long. Bitvectors can be anything
+ // between 1 and 64 bits long, which is the reason we have this weird
+ // guard. In the future, we need proper calls in the backend to retrieve
+ // floating-points and its special values (NaN, +/-infinity, +/-zero),
+ // then we can drop this weird condition.
+ if (Sort->getBitvectorSortSize() <= 64 ||
+ Sort->getBitvectorSortSize() == 128) {
+ Int = getBitvector(AST, Int.getBitWidth(), Int.isUnsigned());
+ return true;
+ }
- if (const SymbolData *SD = dyn_cast<SymbolData>(Sym)) {
- QualType Ty = Sym->getType();
- assert(!Ty->isRealFloatingType());
- llvm::APSInt Value(Ctx.getTypeSize(Ty),
- !Ty->isSignedIntegerOrEnumerationType());
-
- Z3Expr Exp = getZ3DataExpr(SD->getSymbolID(), Ty);
-
- Solver.reset();
- Solver.addStateConstraints(State);
-
- // Constraints are unsatisfiable
- if (Solver.check() != Z3_L_TRUE)
- return nullptr;
-
- Z3Model Model = Solver.getModel();
- // Model does not assign interpretation
- if (!Model.getInterpretation(Exp, Value))
- return nullptr;
-
- // A value has been obtained, check if it is the only value
- Z3Expr NotExp = Z3Expr::fromBinOp(
- Exp, BO_NE,
- Ty->isBooleanType() ? Z3Expr::fromBoolean(Value.getBoolValue())
- : Z3Expr::fromAPSInt(Value),
- false);
-
- Solver.addConstraint(NotExp);
- if (Solver.check() == Z3_L_TRUE)
- return nullptr;
-
- // This is the only solution, store it
- return &BV.getValue(Value);
- } else if (const SymbolCast *SC = dyn_cast<SymbolCast>(Sym)) {
- SymbolRef CastSym = SC->getOperand();
- QualType CastTy = SC->getType();
- // Skip the void type
- if (CastTy->isVoidType())
- return nullptr;
-
- const llvm::APSInt *Value;
- if (!(Value = getSymVal(State, CastSym)))
- return nullptr;
- return &BV.Convert(SC->getType(), *Value);
- } else if (const BinarySymExpr *BSE = dyn_cast<BinarySymExpr>(Sym)) {
- const llvm::APSInt *LHS, *RHS;
- if (const SymIntExpr *SIE = dyn_cast<SymIntExpr>(BSE)) {
- LHS = getSymVal(State, SIE->getLHS());
- RHS = &SIE->getRHS();
- } else if (const IntSymExpr *ISE = dyn_cast<IntSymExpr>(BSE)) {
- LHS = &ISE->getLHS();
- RHS = getSymVal(State, ISE->getRHS());
- } else if (const SymSymExpr *SSM = dyn_cast<SymSymExpr>(BSE)) {
- // Early termination to avoid expensive call
- LHS = getSymVal(State, SSM->getLHS());
- RHS = LHS ? getSymVal(State, SSM->getRHS()) : nullptr;
- } else {
- llvm_unreachable("Unsupported binary expression to get symbol value!");
+ assert(false && "Bitwidth not supported!");
+ return false;
}
- if (!LHS || !RHS)
- return nullptr;
+ if (Sort->isBooleanSort()) {
+ if (useSemantics && Int.getBitWidth() < 1) {
+ assert(false && "Boolean type doesn't match!");
+ return false;
+ }
- llvm::APSInt ConvertedLHS = *LHS, ConvertedRHS = *RHS;
- QualType LTy = getAPSIntType(*LHS), RTy = getAPSIntType(*RHS);
- doIntTypeConversion<llvm::APSInt, Z3ConstraintManager::castAPSInt>(
- ConvertedLHS, LTy, ConvertedRHS, RTy);
- return BV.evalAPSInt(BSE->getOpcode(), ConvertedLHS, ConvertedRHS);
- }
+ Int = llvm::APSInt(llvm::APInt(Int.getBitWidth(), getBoolean(AST)),
+ Int.isUnsigned());
+ return true;
+ }
- llvm_unreachable("Unsupported expression to get symbol value!");
-}
+ llvm_unreachable("Unsupported sort to integer!");
+ }
-ProgramStateRef
-Z3ConstraintManager::removeDeadBindings(ProgramStateRef State,
- SymbolReaper &SymReaper) {
- ConstraintZ3Ty CZ = State->get<ConstraintZ3>();
- ConstraintZ3Ty::Factory &CZFactory = State->get_context<ConstraintZ3>();
+ bool getInterpretation(const SMTExprRef &Exp, llvm::APSInt &Int) override {
+ Z3Model Model = getModel();
+ Z3_func_decl Func = Z3_get_app_decl(
+ Context.Context, Z3_to_app(Context.Context, toZ3Expr(*Exp).AST));
+ if (Z3_model_has_interp(Context.Context, Model.Model, Func) != Z3_L_TRUE)
+ return false;
- for (ConstraintZ3Ty::iterator I = CZ.begin(), E = CZ.end(); I != E; ++I) {
- if (SymReaper.maybeDead(I->first))
- CZ = CZFactory.remove(CZ, *I);
+ SMTExprRef Assign = newExprRef(
+ Z3Expr(Context,
+ Z3_model_get_const_interp(Context.Context, Model.Model, Func)));
+ SMTSortRef Sort = getSort(Assign);
+ return toAPSInt(Sort, Assign, Int, true);
}
- return State->set<ConstraintZ3>(CZ);
-}
+ bool getInterpretation(const SMTExprRef &Exp, llvm::APFloat &Float) override {
+ Z3Model Model = getModel();
+ Z3_func_decl Func = Z3_get_app_decl(
+ Context.Context, Z3_to_app(Context.Context, toZ3Expr(*Exp).AST));
+ if (Z3_model_has_interp(Context.Context, Model.Model, Func) != Z3_L_TRUE)
+ return false;
-//===------------------------------------------------------------------===//
-// Internal implementation.
-//===------------------------------------------------------------------===//
+ SMTExprRef Assign = newExprRef(
+ Z3Expr(Context,
+ Z3_model_get_const_interp(Context.Context, Model.Model, Func)));
+ SMTSortRef Sort = getSort(Assign);
+ return toAPFloat(Sort, Assign, Float, true);
+ }
-ProgramStateRef Z3ConstraintManager::assumeZ3Expr(ProgramStateRef State,
- SymbolRef Sym,
- const Z3Expr &Exp) {
- // Check the model, avoid simplifying AST to save time
- if (checkZ3Model(State, Exp) == Z3_L_TRUE)
- return State->add<ConstraintZ3>(std::make_pair(Sym, Exp));
+ ConditionTruthVal check() const override {
+ Z3_lbool res = Z3_solver_check(Context.Context, Solver);
+ if (res == Z3_L_TRUE)
+ return true;
- return nullptr;
-}
+ if (res == Z3_L_FALSE)
+ return false;
-Z3_lbool Z3ConstraintManager::checkZ3Model(ProgramStateRef State,
- const Z3Expr &Exp) const {
- Solver.reset();
- Solver.addConstraint(Exp);
- Solver.addStateConstraints(State);
- return Solver.check();
-}
+ return ConditionTruthVal();
+ }
-Z3Expr Z3ConstraintManager::getZ3Expr(SymbolRef Sym, QualType *RetTy,
- bool *hasComparison) const {
- if (hasComparison) {
- *hasComparison = false;
+ void push() override { return Z3_solver_push(Context.Context, Solver); }
+
+ void pop(unsigned NumStates = 1) override {
+ assert(Z3_solver_get_num_scopes(Context.Context, Solver) >= NumStates);
+ return Z3_solver_pop(Context.Context, Solver, NumStates);
}
- return getZ3SymExpr(Sym, RetTy, hasComparison);
-}
+ /// Get a model from the solver. Caller should check the model is
+ /// satisfiable.
+ Z3Model getModel() {
+ return Z3Model(Context, Z3_solver_get_model(Context.Context, Solver));
+ }
-Z3Expr Z3ConstraintManager::getZ3NotExpr(const Z3Expr &Exp) const {
- return Z3Expr::fromUnOp(UO_LNot, Exp);
-}
+ /// Reset the solver and remove all constraints.
+ void reset() const override { Z3_solver_reset(Context.Context, Solver); }
-Z3Expr Z3ConstraintManager::getZ3ZeroExpr(const Z3Expr &Exp, QualType Ty,
- bool Assumption) const {
- ASTContext &Ctx = getBasicVals().getContext();
- if (Ty->isRealFloatingType()) {
- llvm::APFloat Zero = llvm::APFloat::getZero(Ctx.getFloatTypeSemantics(Ty));
- return Z3Expr::fromFloatBinOp(Exp, Assumption ? BO_EQ : BO_NE,
- Z3Expr::fromAPFloat(Zero));
- } else if (Ty->isIntegralOrEnumerationType() || Ty->isAnyPointerType() ||
- Ty->isBlockPointerType() || Ty->isReferenceType()) {
- bool isSigned = Ty->isSignedIntegerOrEnumerationType();
- // Skip explicit comparison for boolean types
- if (Ty->isBooleanType())
- return Assumption ? getZ3NotExpr(Exp) : Exp;
- return Z3Expr::fromBinOp(Exp, Assumption ? BO_EQ : BO_NE,
- Z3Expr::fromInt("0", Ctx.getTypeSize(Ty)),
- isSigned);
- }
-
- llvm_unreachable("Unsupported type for zero value!");
-}
+ void print(raw_ostream &OS) const override {
+ OS << Z3_solver_to_string(Context.Context, Solver);
+ }
+}; // end class Z3Solver
-Z3Expr Z3ConstraintManager::getZ3SymExpr(SymbolRef Sym, QualType *RetTy,
- bool *hasComparison) const {
- if (const SymbolData *SD = dyn_cast<SymbolData>(Sym)) {
- if (RetTy)
- *RetTy = Sym->getType();
-
- return getZ3DataExpr(SD->getSymbolID(), Sym->getType());
- } else if (const SymbolCast *SC = dyn_cast<SymbolCast>(Sym)) {
- if (RetTy)
- *RetTy = Sym->getType();
-
- QualType FromTy;
- Z3Expr Exp = getZ3SymExpr(SC->getOperand(), &FromTy, hasComparison);
- // Casting an expression with a comparison invalidates it. Note that this
- // must occur after the recursive call above.
- // e.g. (signed char) (x > 0)
- if (hasComparison)
- *hasComparison = false;
- return getZ3CastExpr(Exp, FromTy, Sym->getType());
- } else if (const BinarySymExpr *BSE = dyn_cast<BinarySymExpr>(Sym)) {
- Z3Expr Exp = getZ3SymBinExpr(BSE, hasComparison, RetTy);
- // Set the hasComparison parameter, in post-order traversal order.
- if (hasComparison)
- *hasComparison = BinaryOperator::isComparisonOp(BSE->getOpcode());
- return Exp;
- }
-
- llvm_unreachable("Unsupported SymbolRef type!");
-}
+class Z3ConstraintManager : public SMTConstraintManager {
+ SMTSolverRef Solver = CreateZ3Solver();
-Z3Expr Z3ConstraintManager::getZ3DataExpr(const SymbolID ID,
- QualType Ty) const {
- ASTContext &Ctx = getBasicVals().getContext();
- return Z3Expr::fromData(ID, Ty->isBooleanType(), Ty->isRealFloatingType(),
- Ctx.getTypeSize(Ty));
-}
+public:
+ Z3ConstraintManager(SubEngine *SE, SValBuilder &SB)
+ : SMTConstraintManager(SE, SB, Solver) {}
-Z3Expr Z3ConstraintManager::getZ3CastExpr(const Z3Expr &Exp, QualType FromTy,
- QualType ToTy) const {
- ASTContext &Ctx = getBasicVals().getContext();
- return Z3Expr::fromCast(Exp, ToTy, Ctx.getTypeSize(ToTy), FromTy,
- Ctx.getTypeSize(FromTy));
-}
+ void addStateConstraints(ProgramStateRef State) const override {
+ // TODO: Don't add all the constraints, only the relevant ones
+ ConstraintZ3Ty CZ = State->get<ConstraintZ3>();
+ ConstraintZ3Ty::iterator I = CZ.begin(), IE = CZ.end();
-Z3Expr Z3ConstraintManager::getZ3SymBinExpr(const BinarySymExpr *BSE,
- bool *hasComparison,
- QualType *RetTy) const {
- QualType LTy, RTy;
- BinaryOperator::Opcode Op = BSE->getOpcode();
-
- if (const SymIntExpr *SIE = dyn_cast<SymIntExpr>(BSE)) {
- RTy = getAPSIntType(SIE->getRHS());
- Z3Expr LHS = getZ3SymExpr(SIE->getLHS(), &LTy, hasComparison);
- Z3Expr RHS = Z3Expr::fromAPSInt(SIE->getRHS());
- return getZ3BinExpr(LHS, LTy, Op, RHS, RTy, RetTy);
- } else if (const IntSymExpr *ISE = dyn_cast<IntSymExpr>(BSE)) {
- LTy = getAPSIntType(ISE->getLHS());
- Z3Expr LHS = Z3Expr::fromAPSInt(ISE->getLHS());
- Z3Expr RHS = getZ3SymExpr(ISE->getRHS(), &RTy, hasComparison);
- return getZ3BinExpr(LHS, LTy, Op, RHS, RTy, RetTy);
- } else if (const SymSymExpr *SSM = dyn_cast<SymSymExpr>(BSE)) {
- Z3Expr LHS = getZ3SymExpr(SSM->getLHS(), &LTy, hasComparison);
- Z3Expr RHS = getZ3SymExpr(SSM->getRHS(), &RTy, hasComparison);
- return getZ3BinExpr(LHS, LTy, Op, RHS, RTy, RetTy);
- } else {
- llvm_unreachable("Unsupported BinarySymExpr type!");
- }
-}
+ // Construct the logical AND of all the constraints
+ if (I != IE) {
+ std::vector<SMTExprRef> ASTs;
-Z3Expr Z3ConstraintManager::getZ3BinExpr(const Z3Expr &LHS, QualType LTy,
- BinaryOperator::Opcode Op,
- const Z3Expr &RHS, QualType RTy,
- QualType *RetTy) const {
- Z3Expr NewLHS = LHS;
- Z3Expr NewRHS = RHS;
- doTypeConversion(NewLHS, NewRHS, LTy, RTy);
- // Update the return type parameter if the output type has changed.
- if (RetTy) {
- // A boolean result can be represented as an integer type in C/C++, but at
- // this point we only care about the Z3 type. Set it as a boolean type to
- // avoid subsequent Z3 errors.
- if (BinaryOperator::isComparisonOp(Op) || BinaryOperator::isLogicalOp(Op)) {
- ASTContext &Ctx = getBasicVals().getContext();
- *RetTy = Ctx.BoolTy;
- } else {
- *RetTy = LTy;
- }
+ SMTExprRef Constraint = Solver->newExprRef(I++->second);
+ while (I != IE) {
+ Constraint = Solver->mkAnd(Constraint, Solver->newExprRef(I++->second));
+ }
- // If the two operands are pointers and the operation is a subtraction, the
- // result is of type ptrdiff_t, which is signed
- if (LTy->isAnyPointerType() && LTy == RTy && Op == BO_Sub) {
- ASTContext &Ctx = getBasicVals().getContext();
- *RetTy = Ctx.getIntTypeForBitwidth(Ctx.getTypeSize(LTy), true);
+ Solver->addConstraint(Constraint);
}
}
- return LTy->isRealFloatingType()
- ? Z3Expr::fromFloatBinOp(NewLHS, Op, NewRHS)
- : Z3Expr::fromBinOp(NewLHS, Op, NewRHS,
- LTy->isSignedIntegerOrEnumerationType());
-}
+ bool canReasonAbout(SVal X) const override {
+ const TargetInfo &TI = getBasicVals().getContext().getTargetInfo();
-//===------------------------------------------------------------------===//
-// Helper functions.
-//===------------------------------------------------------------------===//
+ Optional<nonloc::SymbolVal> SymVal = X.getAs<nonloc::SymbolVal>();
+ if (!SymVal)
+ return true;
-QualType Z3ConstraintManager::getAPSIntType(const llvm::APSInt &Int) const {
- ASTContext &Ctx = getBasicVals().getContext();
- return Ctx.getIntTypeForBitwidth(Int.getBitWidth(), Int.isSigned());
-}
+ const SymExpr *Sym = SymVal->getSymbol();
+ QualType Ty = Sym->getType();
-void Z3ConstraintManager::doTypeConversion(Z3Expr &LHS, Z3Expr &RHS,
- QualType &LTy, QualType &RTy) const {
- ASTContext &Ctx = getBasicVals().getContext();
-
- // Perform type conversion
- if (LTy->isIntegralOrEnumerationType() &&
- RTy->isIntegralOrEnumerationType()) {
- if (LTy->isArithmeticType() && RTy->isArithmeticType())
- return doIntTypeConversion<Z3Expr, Z3Expr::fromCast>(LHS, LTy, RHS, RTy);
- } else if (LTy->isRealFloatingType() || RTy->isRealFloatingType()) {
- return doFloatTypeConversion<Z3Expr, Z3Expr::fromCast>(LHS, LTy, RHS, RTy);
- } else if ((LTy->isAnyPointerType() || RTy->isAnyPointerType()) ||
- (LTy->isBlockPointerType() || RTy->isBlockPointerType()) ||
- (LTy->isReferenceType() || RTy->isReferenceType())) {
- // TODO: Refactor to Sema::FindCompositePointerType(), and
- // Sema::CheckCompareOperands().
-
- uint64_t LBitWidth = Ctx.getTypeSize(LTy);
- uint64_t RBitWidth = Ctx.getTypeSize(RTy);
-
- // Cast the non-pointer type to the pointer type.
- // TODO: Be more strict about this.
- if ((LTy->isAnyPointerType() ^ RTy->isAnyPointerType()) ||
- (LTy->isBlockPointerType() ^ RTy->isBlockPointerType()) ||
- (LTy->isReferenceType() ^ RTy->isReferenceType())) {
- if (LTy->isNullPtrType() || LTy->isBlockPointerType() ||
- LTy->isReferenceType()) {
- LHS = Z3Expr::fromCast(LHS, RTy, RBitWidth, LTy, LBitWidth);
- LTy = RTy;
- } else {
- RHS = Z3Expr::fromCast(RHS, LTy, LBitWidth, RTy, RBitWidth);
- RTy = LTy;
- }
- }
+ // Complex types are not modeled
+ if (Ty->isComplexType() || Ty->isComplexIntegerType())
+ return false;
- // Cast the void pointer type to the non-void pointer type.
- // For void types, this assumes that the casted value is equal to the value
- // of the original pointer, and does not account for alignment requirements.
- if (LTy->isVoidPointerType() ^ RTy->isVoidPointerType()) {
- assert((Ctx.getTypeSize(LTy) == Ctx.getTypeSize(RTy)) &&
- "Pointer types have different bitwidths!");
- if (RTy->isVoidPointerType())
- RTy = LTy;
- else
- LTy = RTy;
- }
+ // Non-IEEE 754 floating-point types are not modeled
+ if ((Ty->isSpecificBuiltinType(BuiltinType::LongDouble) &&
+ (&TI.getLongDoubleFormat() == &llvm::APFloat::x87DoubleExtended() ||
+ &TI.getLongDoubleFormat() == &llvm::APFloat::PPCDoubleDouble())))
+ return false;
- if (LTy == RTy)
- return;
- }
+ if (isa<SymbolData>(Sym))
+ return true;
- // Fallback: for the solver, assume that these types don't really matter
- if ((LTy.getCanonicalType() == RTy.getCanonicalType()) ||
- (LTy->isObjCObjectPointerType() && RTy->isObjCObjectPointerType())) {
- LTy = RTy;
- return;
- }
+ SValBuilder &SVB = getSValBuilder();
- // TODO: Refine behavior for invalid type casts
-}
+ if (const SymbolCast *SC = dyn_cast<SymbolCast>(Sym))
+ return canReasonAbout(SVB.makeSymbolVal(SC->getOperand()));
-template <typename T,
- T(doCast)(const T &, QualType, uint64_t, QualType, uint64_t)>
-void Z3ConstraintManager::doIntTypeConversion(T &LHS, QualType &LTy, T &RHS,
- QualType &RTy) const {
- ASTContext &Ctx = getBasicVals().getContext();
-
- uint64_t LBitWidth = Ctx.getTypeSize(LTy);
- uint64_t RBitWidth = Ctx.getTypeSize(RTy);
-
- // Always perform integer promotion before checking type equality.
- // Otherwise, e.g. (bool) a + (bool) b could trigger a backend assertion
- if (LTy->isPromotableIntegerType()) {
- QualType NewTy = Ctx.getPromotedIntegerType(LTy);
- uint64_t NewBitWidth = Ctx.getTypeSize(NewTy);
- LHS = (*doCast)(LHS, NewTy, NewBitWidth, LTy, LBitWidth);
- LTy = NewTy;
- LBitWidth = NewBitWidth;
- }
- if (RTy->isPromotableIntegerType()) {
- QualType NewTy = Ctx.getPromotedIntegerType(RTy);
- uint64_t NewBitWidth = Ctx.getTypeSize(NewTy);
- RHS = (*doCast)(RHS, NewTy, NewBitWidth, RTy, RBitWidth);
- RTy = NewTy;
- RBitWidth = NewBitWidth;
- }
-
- if (LTy == RTy)
- return;
-
- // Perform integer type conversion
- // Note: Safe to skip updating bitwidth because this must terminate
- bool isLSignedTy = LTy->isSignedIntegerOrEnumerationType();
- bool isRSignedTy = RTy->isSignedIntegerOrEnumerationType();
-
- int order = Ctx.getIntegerTypeOrder(LTy, RTy);
- if (isLSignedTy == isRSignedTy) {
- // Same signedness; use the higher-ranked type
- if (order == 1) {
- RHS = (*doCast)(RHS, LTy, LBitWidth, RTy, RBitWidth);
- RTy = LTy;
- } else {
- LHS = (*doCast)(LHS, RTy, RBitWidth, LTy, LBitWidth);
- LTy = RTy;
- }
- } else if (order != (isLSignedTy ? 1 : -1)) {
- // The unsigned type has greater than or equal rank to the
- // signed type, so use the unsigned type
- if (isRSignedTy) {
- RHS = (*doCast)(RHS, LTy, LBitWidth, RTy, RBitWidth);
- RTy = LTy;
- } else {
- LHS = (*doCast)(LHS, RTy, RBitWidth, LTy, LBitWidth);
- LTy = RTy;
- }
- } else if (LBitWidth != RBitWidth) {
- // The two types are different widths; if we are here, that
- // means the signed type is larger than the unsigned type, so
- // use the signed type.
- if (isLSignedTy) {
- RHS = (*doCast)(RHS, LTy, LBitWidth, RTy, RBitWidth);
- RTy = LTy;
- } else {
- LHS = (*doCast)(LHS, RTy, RBitWidth, LTy, LBitWidth);
- LTy = RTy;
+ if (const BinarySymExpr *BSE = dyn_cast<BinarySymExpr>(Sym)) {
+ if (const SymIntExpr *SIE = dyn_cast<SymIntExpr>(BSE))
+ return canReasonAbout(SVB.makeSymbolVal(SIE->getLHS()));
+
+ if (const IntSymExpr *ISE = dyn_cast<IntSymExpr>(BSE))
+ return canReasonAbout(SVB.makeSymbolVal(ISE->getRHS()));
+
+ if (const SymSymExpr *SSE = dyn_cast<SymSymExpr>(BSE))
+ return canReasonAbout(SVB.makeSymbolVal(SSE->getLHS())) &&
+ canReasonAbout(SVB.makeSymbolVal(SSE->getRHS()));
}
- } else {
- // The signed type is higher-ranked than the unsigned type,
- // but isn't actually any bigger (like unsigned int and long
- // on most 32-bit systems). Use the unsigned type corresponding
- // to the signed type.
- QualType NewTy = Ctx.getCorrespondingUnsignedType(isLSignedTy ? LTy : RTy);
- RHS = (*doCast)(RHS, LTy, LBitWidth, RTy, RBitWidth);
- RTy = NewTy;
- LHS = (*doCast)(LHS, RTy, RBitWidth, LTy, LBitWidth);
- LTy = NewTy;
+
+ llvm_unreachable("Unsupported expression to reason about!");
}
-}
-template <typename T,
- T(doCast)(const T &, QualType, uint64_t, QualType, uint64_t)>
-void Z3ConstraintManager::doFloatTypeConversion(T &LHS, QualType &LTy, T &RHS,
- QualType &RTy) const {
- ASTContext &Ctx = getBasicVals().getContext();
-
- uint64_t LBitWidth = Ctx.getTypeSize(LTy);
- uint64_t RBitWidth = Ctx.getTypeSize(RTy);
-
- // Perform float-point type promotion
- if (!LTy->isRealFloatingType()) {
- LHS = (*doCast)(LHS, RTy, RBitWidth, LTy, LBitWidth);
- LTy = RTy;
- LBitWidth = RBitWidth;
- }
- if (!RTy->isRealFloatingType()) {
- RHS = (*doCast)(RHS, LTy, LBitWidth, RTy, RBitWidth);
- RTy = LTy;
- RBitWidth = LBitWidth;
- }
-
- if (LTy == RTy)
- return;
-
- // If we have two real floating types, convert the smaller operand to the
- // bigger result
- // Note: Safe to skip updating bitwidth because this must terminate
- int order = Ctx.getFloatingTypeOrder(LTy, RTy);
- if (order > 0) {
- RHS = Z3Expr::fromCast(RHS, LTy, LBitWidth, RTy, RBitWidth);
- RTy = LTy;
- } else if (order == 0) {
- LHS = Z3Expr::fromCast(LHS, RTy, RBitWidth, LTy, LBitWidth);
- LTy = RTy;
- } else {
- llvm_unreachable("Unsupported floating-point type cast!");
+ ProgramStateRef removeDeadBindings(ProgramStateRef State,
+ SymbolReaper &SymReaper) override {
+ ConstraintZ3Ty CZ = State->get<ConstraintZ3>();
+ ConstraintZ3Ty::Factory &CZFactory = State->get_context<ConstraintZ3>();
+
+ for (ConstraintZ3Ty::iterator I = CZ.begin(), E = CZ.end(); I != E; ++I) {
+ if (SymReaper.maybeDead(I->first))
+ CZ = CZFactory.remove(CZ, *I);
+ }
+
+ return State->set<ConstraintZ3>(CZ);
}
-}
-llvm::APSInt Z3ConstraintManager::castAPSInt(const llvm::APSInt &V,
- QualType ToTy, uint64_t ToWidth,
- QualType FromTy,
- uint64_t FromWidth) {
- APSIntType TargetType(ToWidth, !ToTy->isSignedIntegerOrEnumerationType());
- return TargetType.convert(V);
-}
+ ProgramStateRef assumeExpr(ProgramStateRef State, SymbolRef Sym,
+ const SMTExprRef &Exp) override {
+ // Check the model, avoid simplifying AST to save time
+ if (checkModel(State, Exp).isConstrainedTrue())
+ return State->add<ConstraintZ3>(std::make_pair(Sym, toZ3Expr(*Exp)));
+
+ return nullptr;
+ }
-//==------------------------------------------------------------------------==/
-// Pretty-printing.
-//==------------------------------------------------------------------------==/
+ //==------------------------------------------------------------------------==/
+ // Pretty-printing.
+ //==------------------------------------------------------------------------==/
-void Z3ConstraintManager::print(ProgramStateRef St, raw_ostream &OS,
- const char *nl, const char *sep) {
+ void print(ProgramStateRef St, raw_ostream &OS, const char *nl,
+ const char *sep) override {
- ConstraintZ3Ty CZ = St->get<ConstraintZ3>();
+ ConstraintZ3Ty CZ = St->get<ConstraintZ3>();
- OS << nl << sep << "Constraints:";
- for (ConstraintZ3Ty::iterator I = CZ.begin(), E = CZ.end(); I != E; ++I) {
- OS << nl << ' ' << I->first << " : ";
- I->second.print(OS);
+ OS << nl << sep << "Constraints:";
+ for (ConstraintZ3Ty::iterator I = CZ.begin(), E = CZ.end(); I != E; ++I) {
+ OS << nl << ' ' << I->first << " : ";
+ I->second.print(OS);
+ }
+ OS << nl;
}
- OS << nl;
-}
+}; // end class Z3ConstraintManager
+
+} // end anonymous namespace
#endif
+std::unique_ptr<SMTSolver> clang::ento::CreateZ3Solver() {
+#if CLANG_ANALYZER_WITH_Z3
+ return llvm::make_unique<Z3Solver>();
+#else
+ llvm::report_fatal_error("Clang was not compiled with Z3 support, rebuild "
+ "with -DCLANG_ANALYZER_BUILD_Z3=ON",
+ false);
+ return nullptr;
+#endif
+}
+
std::unique_ptr<ConstraintManager>
ento::CreateZ3ConstraintManager(ProgramStateManager &StMgr, SubEngine *Eng) {
#if CLANG_ANALYZER_WITH_Z3
return llvm::make_unique<Z3ConstraintManager>(Eng, StMgr.getSValBuilder());
#else
- llvm::report_fatal_error("Clang was not compiled with Z3 support!", false);
+ llvm::report_fatal_error("Clang was not compiled with Z3 support, rebuild "
+ "with -DCLANG_ANALYZER_BUILD_Z3=ON",
+ false);
return nullptr;
#endif
}
diff --git a/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp b/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
index fccea9ee53bf..44abde5da6d1 100644
--- a/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
+++ b/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
@@ -22,6 +22,7 @@
#include "clang/Analysis/CallGraph.h"
#include "clang/Analysis/CodeInjector.h"
#include "clang/Basic/SourceManager.h"
+#include "clang/CrossTU/CrossTranslationUnit.h"
#include "clang/Frontend/CompilerInstance.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/StaticAnalyzer/Checkers/LocalCheckers.h"
@@ -57,6 +58,8 @@ STATISTIC(NumFunctionsAnalyzed,
"with inlining turned on).");
STATISTIC(NumBlocksInAnalyzedFunctions,
"The # of basic blocks in the analyzed functions.");
+STATISTIC(NumVisitedBlocksInAnalyzedFunctions,
+ "The # of visited basic blocks in the analyzed functions.");
STATISTIC(PercentReachableBlocks, "The % of reachable basic blocks.");
STATISTIC(MaxCFGSize, "The maximum number of basic blocks in a function.");
@@ -70,7 +73,7 @@ void ento::createPlistHTMLDiagnosticConsumer(AnalyzerOptions &AnalyzerOpts,
const Preprocessor &PP) {
createHTMLDiagnosticConsumer(AnalyzerOpts, C,
llvm::sys::path::parent_path(prefix), PP);
- createPlistDiagnosticConsumer(AnalyzerOpts, C, prefix, PP);
+ createPlistMultiFileDiagnosticConsumer(AnalyzerOpts, C, prefix, PP);
}
void ento::createTextPathDiagnosticConsumer(AnalyzerOptions &AnalyzerOpts,
@@ -161,6 +164,8 @@ class AnalysisConsumer : public AnalysisASTConsumer,
/// Bug Reporter to use while recursively visiting Decls.
BugReporter *RecVisitorBR;
+ std::vector<std::function<void(CheckerRegistry &)>> CheckerRegistrationFns;
+
public:
ASTContext *Ctx;
const Preprocessor &PP;
@@ -168,8 +173,9 @@ public:
AnalyzerOptionsRef Opts;
ArrayRef<std::string> Plugins;
CodeInjector *Injector;
+ cross_tu::CrossTranslationUnitContext CTU;
- /// \brief Stores the declarations from the local translation unit.
+ /// Stores the declarations from the local translation unit.
/// Note, we pre-compute the local declarations at parse time as an
/// optimization to make sure we do not deserialize everything from disk.
/// The local declaration to all declarations ratio might be very small when
@@ -186,28 +192,31 @@ public:
std::unique_ptr<AnalysisManager> Mgr;
/// Time the analyzes time of each translation unit.
- static llvm::Timer* TUTotalTimer;
+ std::unique_ptr<llvm::TimerGroup> AnalyzerTimers;
+ std::unique_ptr<llvm::Timer> TUTotalTimer;
/// The information about analyzed functions shared throughout the
/// translation unit.
FunctionSummariesTy FunctionSummaries;
- AnalysisConsumer(const Preprocessor &pp, const std::string &outdir,
+ AnalysisConsumer(CompilerInstance &CI, const std::string &outdir,
AnalyzerOptionsRef opts, ArrayRef<std::string> plugins,
CodeInjector *injector)
- : RecVisitorMode(0), RecVisitorBR(nullptr), Ctx(nullptr), PP(pp),
- OutDir(outdir), Opts(std::move(opts)), Plugins(plugins),
- Injector(injector) {
+ : RecVisitorMode(0), RecVisitorBR(nullptr), Ctx(nullptr),
+ PP(CI.getPreprocessor()), OutDir(outdir), Opts(std::move(opts)),
+ Plugins(plugins), Injector(injector), CTU(CI) {
DigestAnalyzerOptions();
- if (Opts->PrintStats) {
- llvm::EnableStatistics(false);
- TUTotalTimer = new llvm::Timer("time", "Analyzer Total Time");
+ if (Opts->PrintStats || Opts->shouldSerializeStats()) {
+ AnalyzerTimers = llvm::make_unique<llvm::TimerGroup>(
+ "analyzer", "Analyzer timers");
+ TUTotalTimer = llvm::make_unique<llvm::Timer>(
+ "time", "Analyzer total time", *AnalyzerTimers);
+ llvm::EnableStatistics(/* PrintOnExit= */ false);
}
}
~AnalysisConsumer() override {
if (Opts->PrintStats) {
- delete TUTotalTimer;
llvm::PrintStatistics();
}
}
@@ -286,32 +295,33 @@ public:
void Initialize(ASTContext &Context) override {
Ctx = &Context;
- checkerMgr = createCheckerManager(*Opts, PP.getLangOpts(), Plugins,
- PP.getDiagnostics());
+ checkerMgr =
+ createCheckerManager(*Opts, PP.getLangOpts(), Plugins,
+ CheckerRegistrationFns, PP.getDiagnostics());
Mgr = llvm::make_unique<AnalysisManager>(
*Ctx, PP.getDiagnostics(), PP.getLangOpts(), PathConsumers,
CreateStoreMgr, CreateConstraintMgr, checkerMgr.get(), *Opts, Injector);
}
- /// \brief Store the top level decls in the set to be processed later on.
+ /// Store the top level decls in the set to be processed later on.
/// (Doing this pre-processing avoids deserialization of data from PCH.)
bool HandleTopLevelDecl(DeclGroupRef D) override;
void HandleTopLevelDeclInObjCContainer(DeclGroupRef D) override;
void HandleTranslationUnit(ASTContext &C) override;
- /// \brief Determine which inlining mode should be used when this function is
+ /// Determine which inlining mode should be used when this function is
/// analyzed. This allows to redefine the default inlining policies when
/// analyzing a given function.
ExprEngine::InliningModes
getInliningModeForFunction(const Decl *D, const SetOfConstDecls &Visited);
- /// \brief Build the call graph for all the top level decls of this TU and
+ /// Build the call graph for all the top level decls of this TU and
/// use it to define the order in which the functions should be visited.
void HandleDeclsCallGraph(const unsigned LocalTUDeclsSize);
- /// \brief Run analyzes(syntax or path sensitive) on the given function.
+ /// Run analyzes(syntax or path sensitive) on the given function.
/// \param Mode - determines if we are requesting syntax only or path
/// sensitive only analysis.
/// \param VisitedCallees - The output parameter, which is populated with the
@@ -378,13 +388,20 @@ public:
PathConsumers.push_back(Consumer);
}
+ void AddCheckerRegistrationFn(std::function<void(CheckerRegistry&)> Fn) override {
+ CheckerRegistrationFns.push_back(std::move(Fn));
+ }
+
private:
void storeTopLevelDecls(DeclGroupRef DG);
std::string getFunctionName(const Decl *D);
- /// \brief Check if we should skip (not analyze) the given function.
+ /// Check if we should skip (not analyze) the given function.
AnalysisMode getModeForDecl(Decl *D, AnalysisMode Mode);
+ void runAnalysisOnTranslationUnit(ASTContext &C);
+ /// Print \p S to stderr if \c Opts->AnalyzerDisplayProgress is set.
+ void reportAnalyzerProgress(StringRef S);
};
} // end anonymous namespace
@@ -392,8 +409,6 @@ private:
//===----------------------------------------------------------------------===//
// AnalysisConsumer implementation.
//===----------------------------------------------------------------------===//
-llvm::Timer* AnalysisConsumer::TUTotalTimer = nullptr;
-
bool AnalysisConsumer::HandleTopLevelDecl(DeclGroupRef DG) {
storeTopLevelDecls(DG);
return true;
@@ -508,68 +523,90 @@ void AnalysisConsumer::HandleDeclsCallGraph(const unsigned LocalTUDeclsSize) {
}
}
+static bool isBisonFile(ASTContext &C) {
+ const SourceManager &SM = C.getSourceManager();
+ FileID FID = SM.getMainFileID();
+ StringRef Buffer = SM.getBuffer(FID)->getBuffer();
+ if (Buffer.startswith("/* A Bison parser, made by"))
+ return true;
+ return false;
+}
+
+void AnalysisConsumer::runAnalysisOnTranslationUnit(ASTContext &C) {
+ BugReporter BR(*Mgr);
+ TranslationUnitDecl *TU = C.getTranslationUnitDecl();
+ checkerMgr->runCheckersOnASTDecl(TU, *Mgr, BR);
+
+ // Run the AST-only checks using the order in which functions are defined.
+ // If inlining is not turned on, use the simplest function order for path
+ // sensitive analyzes as well.
+ RecVisitorMode = AM_Syntax;
+ if (!Mgr->shouldInlineCall())
+ RecVisitorMode |= AM_Path;
+ RecVisitorBR = &BR;
+
+ // Process all the top level declarations.
+ //
+ // Note: TraverseDecl may modify LocalTUDecls, but only by appending more
+ // entries. Thus we don't use an iterator, but rely on LocalTUDecls
+ // random access. By doing so, we automatically compensate for iterators
+ // possibly being invalidated, although this is a bit slower.
+ const unsigned LocalTUDeclsSize = LocalTUDecls.size();
+ for (unsigned i = 0 ; i < LocalTUDeclsSize ; ++i) {
+ TraverseDecl(LocalTUDecls[i]);
+ }
+
+ if (Mgr->shouldInlineCall())
+ HandleDeclsCallGraph(LocalTUDeclsSize);
+
+ // After all decls handled, run checkers on the entire TranslationUnit.
+ checkerMgr->runCheckersOnEndOfTranslationUnit(TU, *Mgr, BR);
+
+ RecVisitorBR = nullptr;
+}
+
+void AnalysisConsumer::reportAnalyzerProgress(StringRef S) {
+ if (Opts->AnalyzerDisplayProgress)
+ llvm::errs() << S;
+}
+
void AnalysisConsumer::HandleTranslationUnit(ASTContext &C) {
+
// Don't run the actions if an error has occurred with parsing the file.
DiagnosticsEngine &Diags = PP.getDiagnostics();
if (Diags.hasErrorOccurred() || Diags.hasFatalErrorOccurred())
return;
- // Don't analyze if the user explicitly asked for no checks to be performed
- // on this file.
- if (Opts->DisableAllChecks)
- return;
-
- {
- if (TUTotalTimer) TUTotalTimer->startTimer();
-
- // Introduce a scope to destroy BR before Mgr.
- BugReporter BR(*Mgr);
- TranslationUnitDecl *TU = C.getTranslationUnitDecl();
- checkerMgr->runCheckersOnASTDecl(TU, *Mgr, BR);
-
- // Run the AST-only checks using the order in which functions are defined.
- // If inlining is not turned on, use the simplest function order for path
- // sensitive analyzes as well.
- RecVisitorMode = AM_Syntax;
- if (!Mgr->shouldInlineCall())
- RecVisitorMode |= AM_Path;
- RecVisitorBR = &BR;
-
- // Process all the top level declarations.
- //
- // Note: TraverseDecl may modify LocalTUDecls, but only by appending more
- // entries. Thus we don't use an iterator, but rely on LocalTUDecls
- // random access. By doing so, we automatically compensate for iterators
- // possibly being invalidated, although this is a bit slower.
- const unsigned LocalTUDeclsSize = LocalTUDecls.size();
- for (unsigned i = 0 ; i < LocalTUDeclsSize ; ++i) {
- TraverseDecl(LocalTUDecls[i]);
- }
+ if (TUTotalTimer) TUTotalTimer->startTimer();
- if (Mgr->shouldInlineCall())
- HandleDeclsCallGraph(LocalTUDeclsSize);
+ if (isBisonFile(C)) {
+ reportAnalyzerProgress("Skipping bison-generated file\n");
+ } else if (Opts->DisableAllChecks) {
- // After all decls handled, run checkers on the entire TranslationUnit.
- checkerMgr->runCheckersOnEndOfTranslationUnit(TU, *Mgr, BR);
-
- RecVisitorBR = nullptr;
+ // Don't analyze if the user explicitly asked for no checks to be performed
+ // on this file.
+ reportAnalyzerProgress("All checks are disabled using a supplied option\n");
+ } else {
+ // Otherwise, just run the analysis.
+ runAnalysisOnTranslationUnit(C);
}
- // Explicitly destroy the PathDiagnosticConsumer. This will flush its output.
- // FIXME: This should be replaced with something that doesn't rely on
- // side-effects in PathDiagnosticConsumer's destructor. This is required when
- // used with option -disable-free.
- Mgr.reset();
-
if (TUTotalTimer) TUTotalTimer->stopTimer();
// Count how many basic blocks we have not covered.
NumBlocksInAnalyzedFunctions = FunctionSummaries.getTotalNumBasicBlocks();
+ NumVisitedBlocksInAnalyzedFunctions =
+ FunctionSummaries.getTotalNumVisitedBasicBlocks();
if (NumBlocksInAnalyzedFunctions > 0)
PercentReachableBlocks =
(FunctionSummaries.getTotalNumVisitedBasicBlocks() * 100) /
NumBlocksInAnalyzedFunctions;
+ // Explicitly destroy the PathDiagnosticConsumer. This will flush its output.
+ // FIXME: This should be replaced with something that doesn't rely on
+ // side-effects in PathDiagnosticConsumer's destructor. This is required when
+ // used with option -disable-free.
+ Mgr.reset();
}
std::string AnalysisConsumer::getFunctionName(const Decl *D) {
@@ -648,7 +685,7 @@ AnalysisConsumer::getModeForDecl(Decl *D, AnalysisMode Mode) {
SourceLocation SL = Body ? Body->getLocStart() : D->getLocation();
SL = SM.getExpansionLoc(SL);
- if (!Opts->AnalyzeAll && !SM.isWrittenInMainFile(SL)) {
+ if (!Opts->AnalyzeAll && !Mgr->isInCodeFile(SL)) {
if (SL.isInvalid() || SM.isInSystemHeader(SL))
return AM_None;
return Mode & ~AM_Path;
@@ -704,7 +741,8 @@ void AnalysisConsumer::ActionExprEngine(Decl *D, bool ObjCGCEnabled,
if (!Mgr->getAnalysisDeclContext(D)->getAnalysis<RelaxedLiveVariables>())
return;
- ExprEngine Eng(*Mgr, ObjCGCEnabled, VisitedCallees, &FunctionSummaries,IMode);
+ ExprEngine Eng(CTU, *Mgr, ObjCGCEnabled, VisitedCallees, &FunctionSummaries,
+ IMode);
// Set the graph auditor.
std::unique_ptr<ExplodedNode::Auditor> Auditor;
@@ -762,7 +800,7 @@ ento::CreateAnalysisConsumer(CompilerInstance &CI) {
bool hasModelPath = analyzerOpts->Config.count("model-path") > 0;
return llvm::make_unique<AnalysisConsumer>(
- CI.getPreprocessor(), CI.getFrontendOpts().OutputFile, analyzerOpts,
+ CI, CI.getFrontendOpts().OutputFile, analyzerOpts,
CI.getFrontendOpts().Plugins,
hasModelPath ? new ModelInjector(CI) : nullptr);
}
@@ -852,9 +890,9 @@ UbigraphViz::~UbigraphViz() {
std::string Ubiviz;
if (auto Path = llvm::sys::findProgramByName("ubiviz"))
Ubiviz = *Path;
- const char *args[] = {Ubiviz.c_str(), Filename.c_str(), nullptr};
+ std::array<StringRef, 2> Args{{Ubiviz, Filename}};
- if (llvm::sys::ExecuteAndWait(Ubiviz, &args[0], nullptr, {}, 0, 0, &ErrMsg)) {
+ if (llvm::sys::ExecuteAndWait(Ubiviz, Args, llvm::None, {}, 0, 0, &ErrMsg)) {
llvm::errs() << "Error viewing graph: " << ErrMsg << "\n";
}
diff --git a/lib/StaticAnalyzer/Frontend/CMakeLists.txt b/lib/StaticAnalyzer/Frontend/CMakeLists.txt
index e3ca91aec9cd..ff0a6e19fc97 100644
--- a/lib/StaticAnalyzer/Frontend/CMakeLists.txt
+++ b/lib/StaticAnalyzer/Frontend/CMakeLists.txt
@@ -7,14 +7,15 @@ set(LLVM_LINK_COMPONENTS
add_clang_library(clangStaticAnalyzerFrontend
AnalysisConsumer.cpp
CheckerRegistration.cpp
- ModelConsumer.cpp
FrontendActions.cpp
+ ModelConsumer.cpp
ModelInjector.cpp
LINK_LIBS
clangAST
clangAnalysis
clangBasic
+ clangCrossTU
clangFrontend
clangLex
clangStaticAnalyzerCheckers
diff --git a/lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp b/lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp
index 6792f89876cd..a260c2d85b11 100644
--- a/lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp
+++ b/lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp
@@ -111,16 +111,21 @@ getCheckerOptList(const AnalyzerOptions &opts) {
return checkerOpts;
}
-std::unique_ptr<CheckerManager>
-ento::createCheckerManager(AnalyzerOptions &opts, const LangOptions &langOpts,
- ArrayRef<std::string> plugins,
- DiagnosticsEngine &diags) {
+std::unique_ptr<CheckerManager> ento::createCheckerManager(
+ AnalyzerOptions &opts, const LangOptions &langOpts,
+ ArrayRef<std::string> plugins,
+ ArrayRef<std::function<void(CheckerRegistry &)>> checkerRegistrationFns,
+ DiagnosticsEngine &diags) {
std::unique_ptr<CheckerManager> checkerMgr(
new CheckerManager(langOpts, opts));
SmallVector<CheckerOptInfo, 8> checkerOpts = getCheckerOptList(opts);
ClangCheckerRegistry allCheckers(plugins, &diags);
+
+ for (const auto &Fn : checkerRegistrationFns)
+ Fn(allCheckers);
+
allCheckers.initializeManager(*checkerMgr, checkerOpts);
allCheckers.validateCheckerOptions(opts, diags);
checkerMgr->finishedCheckerRegistration();
diff --git a/lib/StaticAnalyzer/Frontend/ModelConsumer.cpp b/lib/StaticAnalyzer/Frontend/ModelConsumer.cpp
index a65a5ee0a451..60825ef7411d 100644
--- a/lib/StaticAnalyzer/Frontend/ModelConsumer.cpp
+++ b/lib/StaticAnalyzer/Frontend/ModelConsumer.cpp
@@ -8,7 +8,7 @@
//===----------------------------------------------------------------------===//
///
/// \file
-/// \brief This file implements an ASTConsumer for consuming model files.
+/// This file implements an ASTConsumer for consuming model files.
///
/// This ASTConsumer handles the AST of a parsed model file. All top level
/// function definitions will be collected from that model file for later
diff --git a/lib/StaticAnalyzer/Frontend/ModelInjector.cpp b/lib/StaticAnalyzer/Frontend/ModelInjector.cpp
index cdb1ed9b3815..c43d30440c8f 100644
--- a/lib/StaticAnalyzer/Frontend/ModelInjector.cpp
+++ b/lib/StaticAnalyzer/Frontend/ModelInjector.cpp
@@ -10,6 +10,7 @@
#include "ModelInjector.h"
#include "clang/AST/Decl.h"
#include "clang/Basic/IdentifierTable.h"
+#include "clang/Basic/Stack.h"
#include "clang/Frontend/ASTUnit.h"
#include "clang/Frontend/CompilerInstance.h"
#include "clang/Frontend/FrontendAction.h"
@@ -95,11 +96,10 @@ void ModelInjector::onBodySynthesis(const NamedDecl *D) {
ParseModelFileAction parseModelFile(Bodies);
- const unsigned ThreadStackSize = 8 << 20;
llvm::CrashRecoveryContext CRC;
CRC.RunSafelyOnThread([&]() { Instance.ExecuteAction(parseModelFile); },
- ThreadStackSize);
+ DesiredStackSize);
Instance.getPreprocessor().FinalizeForModelFile();
@@ -109,7 +109,7 @@ void ModelInjector::onBodySynthesis(const NamedDecl *D) {
// The preprocessor enters to the main file id when parsing is started, so
// the main file id is changed to the model file during parsing and it needs
- // to be reseted to the former main file id after parsing of the model file
+ // to be reset to the former main file id after parsing of the model file
// is done.
SM.setMainFileID(mainFileID);
}
diff --git a/lib/StaticAnalyzer/Frontend/ModelInjector.h b/lib/StaticAnalyzer/Frontend/ModelInjector.h
index 98a5f69d68e8..b1b6de9ef9d9 100644
--- a/lib/StaticAnalyzer/Frontend/ModelInjector.h
+++ b/lib/StaticAnalyzer/Frontend/ModelInjector.h
@@ -8,7 +8,7 @@
//===----------------------------------------------------------------------===//
///
/// \file
-/// \brief This file defines the clang::ento::ModelInjector class which implements the
+/// This file defines the clang::ento::ModelInjector class which implements the
/// clang::CodeInjector interface. This class is responsible for injecting
/// function definitions that were synthesized from model files.
///
@@ -43,7 +43,7 @@ public:
Stmt *getBody(const ObjCMethodDecl *D) override;
private:
- /// \brief Synthesize a body for a declaration
+ /// Synthesize a body for a declaration
///
/// This method first looks up the appropriate model file based on the
/// model-path configuration option and the name of the declaration that is
diff --git a/lib/Tooling/ASTDiff/ASTDiff.cpp b/lib/Tooling/ASTDiff/ASTDiff.cpp
index 6da0de7edf9a..a5d2d1d24729 100644
--- a/lib/Tooling/ASTDiff/ASTDiff.cpp
+++ b/lib/Tooling/ASTDiff/ASTDiff.cpp
@@ -369,7 +369,7 @@ SyntaxTree::Impl::getRelativeName(const NamedDecl *ND,
else if (AST.getLangOpts().CPlusPlus11)
if (auto *Tag = dyn_cast<TagDecl>(Context))
ContextPrefix = Tag->getQualifiedNameAsString();
- // Strip the qualifier, if Val refers to somthing in the current scope.
+ // Strip the qualifier, if Val refers to something in the current scope.
// But leave one leading ':' in place, so that we know that this is a
// relative path.
if (!ContextPrefix.empty() && StringRef(Val).startswith(ContextPrefix))
@@ -741,7 +741,7 @@ public:
List.pop();
}
// TODO this is here to get a stable output, not a good heuristic
- std::sort(Result.begin(), Result.end());
+ llvm::sort(Result.begin(), Result.end());
return Result;
}
int peekMax() const {
diff --git a/lib/Tooling/AllTUsExecution.cpp b/lib/Tooling/AllTUsExecution.cpp
new file mode 100644
index 000000000000..b761556ee76b
--- /dev/null
+++ b/lib/Tooling/AllTUsExecution.cpp
@@ -0,0 +1,161 @@
+//===- lib/Tooling/AllTUsExecution.cpp - Execute actions on all TUs. ------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Tooling/AllTUsExecution.h"
+#include "clang/Tooling/ToolExecutorPluginRegistry.h"
+#include "llvm/Support/ThreadPool.h"
+
+namespace clang {
+namespace tooling {
+
+const char *AllTUsToolExecutor::ExecutorName = "AllTUsToolExecutor";
+
+namespace {
+llvm::Error make_string_error(const llvm::Twine &Message) {
+ return llvm::make_error<llvm::StringError>(Message,
+ llvm::inconvertibleErrorCode());
+}
+
+ArgumentsAdjuster getDefaultArgumentsAdjusters() {
+ return combineAdjusters(
+ getClangStripOutputAdjuster(),
+ combineAdjusters(getClangSyntaxOnlyAdjuster(),
+ getClangStripDependencyFileAdjuster()));
+}
+
+class ThreadSafeToolResults : public ToolResults {
+public:
+ void addResult(StringRef Key, StringRef Value) override {
+ std::unique_lock<std::mutex> LockGuard(Mutex);
+ Results.addResult(Key, Value);
+ }
+
+ std::vector<std::pair<llvm::StringRef, llvm::StringRef>>
+ AllKVResults() override {
+ return Results.AllKVResults();
+ }
+
+ void forEachResult(llvm::function_ref<void(StringRef Key, StringRef Value)>
+ Callback) override {
+ Results.forEachResult(Callback);
+ }
+
+private:
+ InMemoryToolResults Results;
+ std::mutex Mutex;
+};
+
+} // namespace
+
+AllTUsToolExecutor::AllTUsToolExecutor(
+ const CompilationDatabase &Compilations, unsigned ThreadCount,
+ std::shared_ptr<PCHContainerOperations> PCHContainerOps)
+ : Compilations(Compilations), Results(new ThreadSafeToolResults),
+ Context(Results.get()), ThreadCount(ThreadCount) {}
+
+AllTUsToolExecutor::AllTUsToolExecutor(
+ CommonOptionsParser Options, unsigned ThreadCount,
+ std::shared_ptr<PCHContainerOperations> PCHContainerOps)
+ : OptionsParser(std::move(Options)),
+ Compilations(OptionsParser->getCompilations()),
+ Results(new ThreadSafeToolResults), Context(Results.get()),
+ ThreadCount(ThreadCount) {}
+
+llvm::Error AllTUsToolExecutor::execute(
+ llvm::ArrayRef<
+ std::pair<std::unique_ptr<FrontendActionFactory>, ArgumentsAdjuster>>
+ Actions) {
+ if (Actions.empty())
+ return make_string_error("No action to execute.");
+
+ if (Actions.size() != 1)
+ return make_string_error(
+ "Only support executing exactly 1 action at this point.");
+
+ std::string ErrorMsg;
+ std::mutex TUMutex;
+ auto AppendError = [&](llvm::Twine Err) {
+ std::unique_lock<std::mutex> LockGuard(TUMutex);
+ ErrorMsg += Err.str();
+ };
+
+ auto Log = [&](llvm::Twine Msg) {
+ std::unique_lock<std::mutex> LockGuard(TUMutex);
+ llvm::errs() << Msg.str() << "\n";
+ };
+
+ auto Files = Compilations.getAllFiles();
+ // Add a counter to track the progress.
+ const std::string TotalNumStr = std::to_string(Files.size());
+ unsigned Counter = 0;
+ auto Count = [&]() {
+ std::unique_lock<std::mutex> LockGuard(TUMutex);
+ return ++Counter;
+ };
+
+ auto &Action = Actions.front();
+
+ {
+ llvm::ThreadPool Pool(ThreadCount == 0 ? llvm::hardware_concurrency()
+ : ThreadCount);
+
+ for (std::string File : Files) {
+ Pool.async(
+ [&](std::string Path) {
+ Log("[" + std::to_string(Count()) + "/" + TotalNumStr +
+ "] Processing file " + Path);
+ ClangTool Tool(Compilations, {Path});
+ Tool.appendArgumentsAdjuster(Action.second);
+ Tool.appendArgumentsAdjuster(getDefaultArgumentsAdjusters());
+ for (const auto &FileAndContent : OverlayFiles)
+ Tool.mapVirtualFile(FileAndContent.first(),
+ FileAndContent.second);
+ if (Tool.run(Action.first.get()))
+ AppendError(llvm::Twine("Failed to run action on ") + Path +
+ "\n");
+ },
+ File);
+ }
+ }
+
+ if (!ErrorMsg.empty())
+ return make_string_error(ErrorMsg);
+
+ return llvm::Error::success();
+}
+
+static llvm::cl::opt<unsigned> ExecutorConcurrency(
+ "execute-concurrency",
+ llvm::cl::desc("The number of threads used to process all files in "
+ "parallel. Set to 0 for hardware concurrency."),
+ llvm::cl::init(0));
+
+class AllTUsToolExecutorPlugin : public ToolExecutorPlugin {
+public:
+ llvm::Expected<std::unique_ptr<ToolExecutor>>
+ create(CommonOptionsParser &OptionsParser) override {
+ if (OptionsParser.getSourcePathList().empty())
+ return make_string_error(
+ "[AllTUsToolExecutorPlugin] Please provide a directory/file path in "
+ "the compilation database.");
+ return llvm::make_unique<AllTUsToolExecutor>(std::move(OptionsParser),
+ ExecutorConcurrency);
+ }
+};
+
+static ToolExecutorPluginRegistry::Add<AllTUsToolExecutorPlugin>
+ X("all-TUs", "Runs FrontendActions on all TUs in the compilation database. "
+ "Tool results are stored in memory.");
+
+// This anchor is used to force the linker to link in the generated object file
+// and thus register the plugin.
+volatile int AllTUsToolExecutorAnchorSource = 0;
+
+} // end namespace tooling
+} // end namespace clang
diff --git a/lib/Tooling/ArgumentsAdjusters.cpp b/lib/Tooling/ArgumentsAdjusters.cpp
index 7068ec2c4010..c8e9c167422e 100644
--- a/lib/Tooling/ArgumentsAdjusters.cpp
+++ b/lib/Tooling/ArgumentsAdjusters.cpp
@@ -1,4 +1,4 @@
-//===--- ArgumentsAdjusters.cpp - Command line arguments adjuster ---------===//
+//===- ArgumentsAdjusters.cpp - Command line arguments adjuster -----------===//
//
// The LLVM Compiler Infrastructure
//
@@ -13,6 +13,9 @@
//===----------------------------------------------------------------------===//
#include "clang/Tooling/ArgumentsAdjusters.h"
+#include "clang/Basic/LLVM.h"
+#include "llvm/ADT/StringRef.h"
+#include <cstddef>
namespace clang {
namespace tooling {
@@ -21,7 +24,7 @@ namespace tooling {
ArgumentsAdjuster getClangSyntaxOnlyAdjuster() {
return [](const CommandLineArguments &Args, StringRef /*unused*/) {
CommandLineArguments AdjustedArgs;
- for (size_t i = 0, e = Args.size(); i != e; ++i) {
+ for (size_t i = 0, e = Args.size(); i < e; ++i) {
StringRef Arg = Args[i];
// FIXME: Remove options that generate output.
if (!Arg.startswith("-fcolor-diagnostics") &&
diff --git a/lib/Tooling/CMakeLists.txt b/lib/Tooling/CMakeLists.txt
index ee681bbb45ae..031d8b51dec4 100644
--- a/lib/Tooling/CMakeLists.txt
+++ b/lib/Tooling/CMakeLists.txt
@@ -4,16 +4,19 @@ set(LLVM_LINK_COMPONENTS
)
add_subdirectory(Core)
+add_subdirectory(Inclusions)
add_subdirectory(Refactoring)
add_subdirectory(ASTDiff)
add_clang_library(clangTooling
+ AllTUsExecution.cpp
ArgumentsAdjusters.cpp
CommonOptionsParser.cpp
CompilationDatabase.cpp
Execution.cpp
FileMatchTrie.cpp
FixIt.cpp
+ InterpolatingCompilationDatabase.cpp
JSONCompilationDatabase.cpp
Refactoring.cpp
RefactoringCallbacks.cpp
diff --git a/lib/Tooling/CompilationDatabase.cpp b/lib/Tooling/CompilationDatabase.cpp
index 92b76b157dcb..31a769fa21e5 100644
--- a/lib/Tooling/CompilationDatabase.cpp
+++ b/lib/Tooling/CompilationDatabase.cpp
@@ -1,4 +1,4 @@
-//===--- CompilationDatabase.cpp - ----------------------------------------===//
+//===- CompilationDatabase.cpp --------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -17,7 +17,9 @@
#include "clang/Tooling/CompilationDatabase.h"
#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/DiagnosticIDs.h"
#include "clang/Basic/DiagnosticOptions.h"
+#include "clang/Basic/LLVM.h"
#include "clang/Driver/Action.h"
#include "clang/Driver/Compilation.h"
#include "clang/Driver/Driver.h"
@@ -26,20 +28,38 @@
#include "clang/Frontend/TextDiagnosticPrinter.h"
#include "clang/Tooling/CompilationDatabasePluginRegistry.h"
#include "clang/Tooling/Tooling.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/IntrusiveRefCntPtr.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/Option/Arg.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/ErrorOr.h"
#include "llvm/Support/Host.h"
#include "llvm/Support/LineIterator.h"
+#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <cassert>
+#include <cstring>
+#include <iterator>
+#include <memory>
#include <sstream>
+#include <string>
#include <system_error>
+#include <utility>
+#include <vector>
+
using namespace clang;
using namespace tooling;
LLVM_INSTANTIATE_REGISTRY(CompilationDatabasePluginRegistry)
-CompilationDatabase::~CompilationDatabase() {}
+CompilationDatabase::~CompilationDatabase() = default;
std::unique_ptr<CompilationDatabase>
CompilationDatabase::loadFromDirectory(StringRef BuildDirectory,
@@ -121,20 +141,20 @@ std::vector<CompileCommand> CompilationDatabase::getAllCompileCommands() const {
return Result;
}
-CompilationDatabasePlugin::~CompilationDatabasePlugin() {}
+CompilationDatabasePlugin::~CompilationDatabasePlugin() = default;
namespace {
+
// Helper for recursively searching through a chain of actions and collecting
// all inputs, direct and indirect, of compile jobs.
struct CompileJobAnalyzer {
+ SmallVector<std::string, 2> Inputs;
+
void run(const driver::Action *A) {
runImpl(A, false);
}
- SmallVector<std::string, 2> Inputs;
-
private:
-
void runImpl(const driver::Action *A, bool Collect) {
bool CollectChildren = Collect;
switch (A->getKind()) {
@@ -142,16 +162,16 @@ private:
CollectChildren = true;
break;
- case driver::Action::InputClass: {
+ case driver::Action::InputClass:
if (Collect) {
- const driver::InputAction *IA = cast<driver::InputAction>(A);
+ const auto *IA = cast<driver::InputAction>(A);
Inputs.push_back(IA->getInputArg().getSpelling());
}
- } break;
+ break;
default:
// Don't care about others
- ;
+ break;
}
for (const driver::Action *AI : A->inputs())
@@ -168,7 +188,7 @@ public:
void HandleDiagnostic(DiagnosticsEngine::Level DiagLevel,
const Diagnostic &Info) override {
- if (Info.getID() == clang::diag::warn_drv_input_file_unused) {
+ if (Info.getID() == diag::warn_drv_input_file_unused) {
// Arg 1 for this diagnostic is the option that didn't get used.
UnusedInputs.push_back(Info.getArgStdStr(0));
} else if (DiagLevel >= DiagnosticsEngine::Error) {
@@ -186,18 +206,21 @@ public:
// S2 in Arr where S1 == S2?"
struct MatchesAny {
MatchesAny(ArrayRef<std::string> Arr) : Arr(Arr) {}
+
bool operator() (StringRef S) {
for (const std::string *I = Arr.begin(), *E = Arr.end(); I != E; ++I)
if (*I == S)
return true;
return false;
}
+
private:
ArrayRef<std::string> Arr;
};
+
} // namespace
-/// \brief Strips any positional args and possible argv[0] from a command-line
+/// Strips any positional args and possible argv[0] from a command-line
/// provided by the user to construct a FixedCompilationDatabase.
///
/// FixedCompilationDatabase requires a command line to be in this format as it
@@ -224,7 +247,7 @@ static bool stripPositionalArgs(std::vector<const char *> Args,
TextDiagnosticPrinter DiagnosticPrinter(Output, &*DiagOpts);
UnusedInputDiagConsumer DiagClient(DiagnosticPrinter);
DiagnosticsEngine Diagnostics(
- IntrusiveRefCntPtr<clang::DiagnosticIDs>(new DiagnosticIDs()),
+ IntrusiveRefCntPtr<DiagnosticIDs>(new DiagnosticIDs()),
&*DiagOpts, &DiagClient, false);
// The clang executable path isn't required since the jobs the driver builds
@@ -362,11 +385,11 @@ class FixedCompilationDatabasePlugin : public CompilationDatabasePlugin {
}
};
+} // namespace
+
static CompilationDatabasePluginRegistry::Add<FixedCompilationDatabasePlugin>
X("fixed-compilation-database", "Reads plain-text flags file");
-} // namespace
-
namespace clang {
namespace tooling {
@@ -375,5 +398,5 @@ namespace tooling {
extern volatile int JSONAnchorSource;
static int LLVM_ATTRIBUTE_UNUSED JSONAnchorDest = JSONAnchorSource;
-} // end namespace tooling
-} // end namespace clang
+} // namespace tooling
+} // namespace clang
diff --git a/lib/Tooling/Core/CMakeLists.txt b/lib/Tooling/Core/CMakeLists.txt
index b3024793580f..f7f423c52228 100644
--- a/lib/Tooling/Core/CMakeLists.txt
+++ b/lib/Tooling/Core/CMakeLists.txt
@@ -1,9 +1,9 @@
set(LLVM_LINK_COMPONENTS support)
add_clang_library(clangToolingCore
+ Diagnostic.cpp
Lookup.cpp
Replacement.cpp
- Diagnostic.cpp
LINK_LIBS
clangAST
diff --git a/lib/Tooling/Core/Replacement.cpp b/lib/Tooling/Core/Replacement.cpp
index 6d4f3a340142..67e2dcfd73c1 100644
--- a/lib/Tooling/Core/Replacement.cpp
+++ b/lib/Tooling/Core/Replacement.cpp
@@ -1,4 +1,4 @@
-//===--- Replacement.cpp - Framework for clang refactoring tools ----------===//
+//===- Replacement.cpp - Framework for clang refactoring tools ------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -12,21 +12,34 @@
//===----------------------------------------------------------------------===//
#include "clang/Tooling/Core/Replacement.h"
-
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/DiagnosticIDs.h"
#include "clang/Basic/DiagnosticOptions.h"
#include "clang/Basic/FileManager.h"
+#include "clang/Basic/FileSystemOptions.h"
+#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/VirtualFileSystem.h"
#include "clang/Lex/Lexer.h"
+#include "clang/Rewrite/Core/RewriteBuffer.h"
#include "clang/Rewrite/Core/Rewriter.h"
+#include "llvm/ADT/IntrusiveRefCntPtr.h"
#include "llvm/ADT/SmallPtrSet.h"
-#include "llvm/Support/FileSystem.h"
-#include "llvm/Support/Path.h"
-#include "llvm/Support/raw_os_ostream.h"
-
-namespace clang {
-namespace tooling {
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <cassert>
+#include <limits>
+#include <map>
+#include <string>
+#include <utility>
+#include <vector>
+
+using namespace clang;
+using namespace tooling;
static const char * const InvalidLocation = "";
@@ -80,6 +93,9 @@ std::string Replacement::toString() const {
return Stream.str();
}
+namespace clang {
+namespace tooling {
+
bool operator<(const Replacement &LHS, const Replacement &RHS) {
if (LHS.getOffset() != RHS.getOffset())
return LHS.getOffset() < RHS.getOffset();
@@ -99,6 +115,9 @@ bool operator==(const Replacement &LHS, const Replacement &RHS) {
LHS.getReplacementText() == RHS.getReplacementText();
}
+} // namespace tooling
+} // namespace clang
+
void Replacement::setFromSourceLocation(const SourceManager &Sources,
SourceLocation Start, unsigned Length,
StringRef ReplacementText) {
@@ -201,7 +220,7 @@ Replacements Replacements::getCanonicalReplacements() const {
llvm::Expected<Replacements>
Replacements::mergeIfOrderIndependent(const Replacement &R) const {
Replacements Rs(R);
- // A Replacements set containg a single replacement that is `R` referring to
+ // A Replacements set containing a single replacement that is `R` referring to
// the code after the existing replacements `Replaces` are applied.
Replacements RsShiftedByReplaces(getReplacementInChangedCode(R));
// A Replacements set that is `Replaces` referring to the code after `R` is
@@ -231,7 +250,7 @@ llvm::Error Replacements::add(const Replacement &R) {
replacement_error::wrong_file_path, R, *Replaces.begin());
// Special-case header insertions.
- if (R.getOffset() == UINT_MAX) {
+ if (R.getOffset() == std::numeric_limits<unsigned>::max()) {
Replaces.insert(R);
return llvm::Error::success();
}
@@ -396,6 +415,7 @@ public:
// Returns 'true' if an element from the second set should be merged next.
bool mergeSecond() const { return MergeSecond; }
+
int deltaFirst() const { return DeltaFirst; }
Replacement asReplacement() const { return {FilePath, Offset, Length, Text}; }
@@ -463,11 +483,11 @@ Replacements Replacements::merge(const Replacements &ReplacesToMerge) const {
// Returns a set of non-overlapping and sorted ranges that is equivalent to
// \p Ranges.
static std::vector<Range> combineAndSortRanges(std::vector<Range> Ranges) {
- std::sort(Ranges.begin(), Ranges.end(),
- [](const Range &LHS, const Range &RHS) {
- if (LHS.getOffset() != RHS.getOffset())
- return LHS.getOffset() < RHS.getOffset();
- return LHS.getLength() < RHS.getLength();
+ llvm::sort(Ranges.begin(), Ranges.end(),
+ [](const Range &LHS, const Range &RHS) {
+ if (LHS.getOffset() != RHS.getOffset())
+ return LHS.getOffset() < RHS.getOffset();
+ return LHS.getLength() < RHS.getLength();
});
std::vector<Range> Result;
for (const auto &R : Ranges) {
@@ -485,6 +505,9 @@ static std::vector<Range> combineAndSortRanges(std::vector<Range> Ranges) {
return Result;
}
+namespace clang {
+namespace tooling {
+
std::vector<Range>
calculateRangesAfterReplacements(const Replacements &Replaces,
const std::vector<Range> &Ranges) {
@@ -508,10 +531,13 @@ calculateRangesAfterReplacements(const Replacements &Replaces,
return FakeReplaces.merge(Replaces).getAffectedRanges();
}
+} // namespace tooling
+} // namespace clang
+
std::vector<Range> Replacements::getAffectedRanges() const {
std::vector<Range> ChangedRanges;
int Shift = 0;
- for (const Replacement &R : Replaces) {
+ for (const auto &R : Replaces) {
unsigned Offset = R.getOffset() + Shift;
unsigned Length = R.getReplacementText().size();
Shift += Length - R.getLength();
@@ -522,7 +548,7 @@ std::vector<Range> Replacements::getAffectedRanges() const {
unsigned Replacements::getShiftedCodePosition(unsigned Position) const {
unsigned Offset = 0;
- for (const auto& R : Replaces) {
+ for (const auto &R : Replaces) {
if (R.getOffset() + R.getLength() <= Position) {
Offset += R.getReplacementText().size() - R.getLength();
continue;
@@ -530,7 +556,7 @@ unsigned Replacements::getShiftedCodePosition(unsigned Position) const {
if (R.getOffset() < Position &&
R.getOffset() + R.getReplacementText().size() <= Position) {
Position = R.getOffset() + R.getReplacementText().size();
- if (R.getReplacementText().size() > 0)
+ if (!R.getReplacementText().empty())
Position--;
}
break;
@@ -538,6 +564,9 @@ unsigned Replacements::getShiftedCodePosition(unsigned Position) const {
return Position + Offset;
}
+namespace clang {
+namespace tooling {
+
bool applyAllReplacements(const Replacements &Replaces, Rewriter &Rewrite) {
bool Result = true;
for (auto I = Replaces.rbegin(), E = Replaces.rend(); I != E; ++I) {
@@ -596,5 +625,5 @@ std::map<std::string, Replacements> groupReplacementsByFile(
return Result;
}
-} // end namespace tooling
-} // end namespace clang
+} // namespace tooling
+} // namespace clang
diff --git a/lib/Tooling/Execution.cpp b/lib/Tooling/Execution.cpp
index 498d683f8924..7ae67747acb2 100644
--- a/lib/Tooling/Execution.cpp
+++ b/lib/Tooling/Execution.cpp
@@ -21,10 +21,10 @@ static llvm::cl::opt<std::string>
llvm::cl::init("standalone"));
void InMemoryToolResults::addResult(StringRef Key, StringRef Value) {
- KVResults.push_back({Key.str(), Value.str()});
+ KVResults.push_back({Strings.save(Key), Strings.save(Value)});
}
-std::vector<std::pair<std::string, std::string>>
+std::vector<std::pair<llvm::StringRef, llvm::StringRef>>
InMemoryToolResults::AllKVResults() {
return KVResults;
}
@@ -96,10 +96,13 @@ createExecutorFromCommandLineArgs(int &argc, const char **argv,
}
// This anchor is used to force the linker to link in the generated object file
-// and thus register the StandaloneToolExecutorPlugin.
+// and thus register the StandaloneToolExecutorPlugin etc.
extern volatile int StandaloneToolExecutorAnchorSource;
+extern volatile int AllTUsToolExecutorAnchorSource;
static int LLVM_ATTRIBUTE_UNUSED StandaloneToolExecutorAnchorDest =
StandaloneToolExecutorAnchorSource;
+static int LLVM_ATTRIBUTE_UNUSED AllTUsToolExecutorAnchorDest =
+ AllTUsToolExecutorAnchorSource;
} // end namespace tooling
} // end namespace clang
diff --git a/lib/Tooling/FileMatchTrie.cpp b/lib/Tooling/FileMatchTrie.cpp
index 86ed036e2dbe..202b3f00f3fb 100644
--- a/lib/Tooling/FileMatchTrie.cpp
+++ b/lib/Tooling/FileMatchTrie.cpp
@@ -1,4 +1,4 @@
-//===--- FileMatchTrie.cpp - ----------------------------------------------===//
+//===- FileMatchTrie.cpp --------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -13,31 +13,37 @@
#include "clang/Tooling/FileMatchTrie.h"
#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/raw_ostream.h"
-#include <sstream>
+#include <string>
+#include <vector>
+
using namespace clang;
using namespace tooling;
namespace {
-/// \brief Default \c PathComparator using \c llvm::sys::fs::equivalent().
+
+/// Default \c PathComparator using \c llvm::sys::fs::equivalent().
struct DefaultPathComparator : public PathComparator {
bool equivalent(StringRef FileA, StringRef FileB) const override {
return FileA == FileB || llvm::sys::fs::equivalent(FileA, FileB);
}
};
-}
+
+} // namespace
namespace clang {
namespace tooling {
-/// \brief A node of the \c FileMatchTrie.
+
+/// A node of the \c FileMatchTrie.
///
/// Each node has storage for up to one path and a map mapping a path segment to
/// child nodes. The trie starts with an empty root node.
class FileMatchTrieNode {
public:
- /// \brief Inserts 'NewPath' into this trie. \c ConsumedLength denotes
+ /// Inserts 'NewPath' into this trie. \c ConsumedLength denotes
/// the number of \c NewPath's trailing characters already consumed during
/// recursion.
///
@@ -75,7 +81,7 @@ public:
Children[Element].insert(NewPath, ConsumedLength + Element.size() + 1);
}
- /// \brief Tries to find the node under this \c FileMatchTrieNode that best
+ /// Tries to find the node under this \c FileMatchTrieNode that best
/// matches 'FileName'.
///
/// If multiple paths fit 'FileName' equally well, \c IsAmbiguous is set to
@@ -85,7 +91,7 @@ public:
///
/// To find the best matching node for a given path 'p', the
/// \c findEquivalent() function is called recursively for each path segment
- /// (back to fron) of 'p' until a node 'n' is reached that does not ..
+ /// (back to front) of 'p' until a node 'n' is reached that does not ..
/// - .. have children. In this case it is checked
/// whether the stored path is equivalent to 'p'. If yes, the best match is
/// found. Otherwise continue with the parent node as if this node did not
@@ -103,7 +109,7 @@ public:
if (Children.empty()) {
if (Comparator.equivalent(StringRef(Path), FileName))
return StringRef(Path);
- return StringRef();
+ return {};
}
StringRef Element(llvm::sys::path::filename(FileName.drop_back(
ConsumedLength)));
@@ -119,13 +125,13 @@ public:
std::vector<StringRef> AllChildren;
getAll(AllChildren, MatchingChild);
StringRef Result;
- for (unsigned i = 0; i < AllChildren.size(); i++) {
- if (Comparator.equivalent(AllChildren[i], FileName)) {
+ for (const auto &Child : AllChildren) {
+ if (Comparator.equivalent(Child, FileName)) {
if (Result.empty()) {
- Result = AllChildren[i];
+ Result = Child;
} else {
IsAmbiguous = true;
- return StringRef();
+ return {};
}
}
}
@@ -133,7 +139,7 @@ public:
}
private:
- /// \brief Gets all paths under this FileMatchTrieNode.
+ /// Gets all paths under this FileMatchTrieNode.
void getAll(std::vector<StringRef> &Results,
llvm::StringMap<FileMatchTrieNode>::const_iterator Except) const {
if (Path.empty())
@@ -158,14 +164,15 @@ private:
// The children of this node stored in a map based on the next path segment.
llvm::StringMap<FileMatchTrieNode> Children;
};
-} // end namespace tooling
-} // end namespace clang
+
+} // namespace tooling
+} // namespace clang
FileMatchTrie::FileMatchTrie()
- : Root(new FileMatchTrieNode), Comparator(new DefaultPathComparator()) {}
+ : Root(new FileMatchTrieNode), Comparator(new DefaultPathComparator()) {}
FileMatchTrie::FileMatchTrie(PathComparator *Comparator)
- : Root(new FileMatchTrieNode), Comparator(Comparator) {}
+ : Root(new FileMatchTrieNode), Comparator(Comparator) {}
FileMatchTrie::~FileMatchTrie() {
delete Root;
@@ -179,7 +186,7 @@ StringRef FileMatchTrie::findEquivalent(StringRef FileName,
raw_ostream &Error) const {
if (llvm::sys::path::is_relative(FileName)) {
Error << "Cannot resolve relative paths";
- return StringRef();
+ return {};
}
bool IsAmbiguous = false;
StringRef Result = Root->findEquivalent(*Comparator, FileName, IsAmbiguous);
diff --git a/lib/Tooling/Inclusions/CMakeLists.txt b/lib/Tooling/Inclusions/CMakeLists.txt
new file mode 100644
index 000000000000..00afb50f3a69
--- /dev/null
+++ b/lib/Tooling/Inclusions/CMakeLists.txt
@@ -0,0 +1,12 @@
+set(LLVM_LINK_COMPONENTS support)
+
+add_clang_library(clangToolingInclusions
+ HeaderIncludes.cpp
+ IncludeStyle.cpp
+
+ LINK_LIBS
+ clangBasic
+ clangLex
+ clangRewrite
+ clangToolingCore
+ )
diff --git a/lib/Tooling/Inclusions/HeaderIncludes.cpp b/lib/Tooling/Inclusions/HeaderIncludes.cpp
new file mode 100644
index 000000000000..99c0866a6855
--- /dev/null
+++ b/lib/Tooling/Inclusions/HeaderIncludes.cpp
@@ -0,0 +1,330 @@
+//===--- HeaderIncludes.cpp - Insert/Delete #includes --*- C++ -*----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Tooling/Inclusions/HeaderIncludes.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Lex/Lexer.h"
+
+namespace clang {
+namespace tooling {
+namespace {
+
+LangOptions createLangOpts() {
+ LangOptions LangOpts;
+ LangOpts.CPlusPlus = 1;
+ LangOpts.CPlusPlus11 = 1;
+ LangOpts.CPlusPlus14 = 1;
+ LangOpts.LineComment = 1;
+ LangOpts.CXXOperatorNames = 1;
+ LangOpts.Bool = 1;
+ LangOpts.ObjC1 = 1;
+ LangOpts.ObjC2 = 1;
+ LangOpts.MicrosoftExt = 1; // To get kw___try, kw___finally.
+ LangOpts.DeclSpecKeyword = 1; // To get __declspec.
+ LangOpts.WChar = 1; // To get wchar_t
+ return LangOpts;
+}
+
+// Returns the offset after skipping a sequence of tokens, matched by \p
+// GetOffsetAfterSequence, from the start of the code.
+// \p GetOffsetAfterSequence should be a function that matches a sequence of
+// tokens and returns an offset after the sequence.
+unsigned getOffsetAfterTokenSequence(
+ StringRef FileName, StringRef Code, const IncludeStyle &Style,
+ llvm::function_ref<unsigned(const SourceManager &, Lexer &, Token &)>
+ GetOffsetAfterSequence) {
+ SourceManagerForFile VirtualSM(FileName, Code);
+ SourceManager &SM = VirtualSM.get();
+ Lexer Lex(SM.getMainFileID(), SM.getBuffer(SM.getMainFileID()), SM,
+ createLangOpts());
+ Token Tok;
+ // Get the first token.
+ Lex.LexFromRawLexer(Tok);
+ return GetOffsetAfterSequence(SM, Lex, Tok);
+}
+
+// Check if a sequence of tokens is like "#<Name> <raw_identifier>". If it is,
+// \p Tok will be the token after this directive; otherwise, it can be any token
+// after the given \p Tok (including \p Tok).
+bool checkAndConsumeDirectiveWithName(Lexer &Lex, StringRef Name, Token &Tok) {
+ bool Matched = Tok.is(tok::hash) && !Lex.LexFromRawLexer(Tok) &&
+ Tok.is(tok::raw_identifier) &&
+ Tok.getRawIdentifier() == Name && !Lex.LexFromRawLexer(Tok) &&
+ Tok.is(tok::raw_identifier);
+ if (Matched)
+ Lex.LexFromRawLexer(Tok);
+ return Matched;
+}
+
+void skipComments(Lexer &Lex, Token &Tok) {
+ while (Tok.is(tok::comment))
+ if (Lex.LexFromRawLexer(Tok))
+ return;
+}
+
+// Returns the offset after header guard directives and any comments
+// before/after header guards. If no header guard presents in the code, this
+// will returns the offset after skipping all comments from the start of the
+// code.
+unsigned getOffsetAfterHeaderGuardsAndComments(StringRef FileName,
+ StringRef Code,
+ const IncludeStyle &Style) {
+ return getOffsetAfterTokenSequence(
+ FileName, Code, Style,
+ [](const SourceManager &SM, Lexer &Lex, Token Tok) {
+ skipComments(Lex, Tok);
+ unsigned InitialOffset = SM.getFileOffset(Tok.getLocation());
+ if (checkAndConsumeDirectiveWithName(Lex, "ifndef", Tok)) {
+ skipComments(Lex, Tok);
+ if (checkAndConsumeDirectiveWithName(Lex, "define", Tok))
+ return SM.getFileOffset(Tok.getLocation());
+ }
+ return InitialOffset;
+ });
+}
+
+// Check if a sequence of tokens is like
+// "#include ("header.h" | <header.h>)".
+// If it is, \p Tok will be the token after this directive; otherwise, it can be
+// any token after the given \p Tok (including \p Tok).
+bool checkAndConsumeInclusiveDirective(Lexer &Lex, Token &Tok) {
+ auto Matched = [&]() {
+ Lex.LexFromRawLexer(Tok);
+ return true;
+ };
+ if (Tok.is(tok::hash) && !Lex.LexFromRawLexer(Tok) &&
+ Tok.is(tok::raw_identifier) && Tok.getRawIdentifier() == "include") {
+ if (Lex.LexFromRawLexer(Tok))
+ return false;
+ if (Tok.is(tok::string_literal))
+ return Matched();
+ if (Tok.is(tok::less)) {
+ while (!Lex.LexFromRawLexer(Tok) && Tok.isNot(tok::greater)) {
+ }
+ if (Tok.is(tok::greater))
+ return Matched();
+ }
+ }
+ return false;
+}
+
+// Returns the offset of the last #include directive after which a new
+// #include can be inserted. This ignores #include's after the #include block(s)
+// in the beginning of a file to avoid inserting headers into code sections
+// where new #include's should not be added by default.
+// These code sections include:
+// - raw string literals (containing #include).
+// - #if blocks.
+// - Special #include's among declarations (e.g. functions).
+//
+// If no #include after which a new #include can be inserted, this returns the
+// offset after skipping all comments from the start of the code.
+// Inserting after an #include is not allowed if it comes after code that is not
+// #include (e.g. pre-processing directive that is not #include, declarations).
+unsigned getMaxHeaderInsertionOffset(StringRef FileName, StringRef Code,
+ const IncludeStyle &Style) {
+ return getOffsetAfterTokenSequence(
+ FileName, Code, Style,
+ [](const SourceManager &SM, Lexer &Lex, Token Tok) {
+ skipComments(Lex, Tok);
+ unsigned MaxOffset = SM.getFileOffset(Tok.getLocation());
+ while (checkAndConsumeInclusiveDirective(Lex, Tok))
+ MaxOffset = SM.getFileOffset(Tok.getLocation());
+ return MaxOffset;
+ });
+}
+
+inline StringRef trimInclude(StringRef IncludeName) {
+ return IncludeName.trim("\"<>");
+}
+
+const char IncludeRegexPattern[] =
+ R"(^[\t\ ]*#[\t\ ]*(import|include)[^"<]*(["<][^">]*[">]))";
+
+} // anonymous namespace
+
+IncludeCategoryManager::IncludeCategoryManager(const IncludeStyle &Style,
+ StringRef FileName)
+ : Style(Style), FileName(FileName) {
+ FileStem = llvm::sys::path::stem(FileName);
+ for (const auto &Category : Style.IncludeCategories)
+ CategoryRegexs.emplace_back(Category.Regex, llvm::Regex::IgnoreCase);
+ IsMainFile = FileName.endswith(".c") || FileName.endswith(".cc") ||
+ FileName.endswith(".cpp") || FileName.endswith(".c++") ||
+ FileName.endswith(".cxx") || FileName.endswith(".m") ||
+ FileName.endswith(".mm");
+}
+
+int IncludeCategoryManager::getIncludePriority(StringRef IncludeName,
+ bool CheckMainHeader) const {
+ int Ret = INT_MAX;
+ for (unsigned i = 0, e = CategoryRegexs.size(); i != e; ++i)
+ if (CategoryRegexs[i].match(IncludeName)) {
+ Ret = Style.IncludeCategories[i].Priority;
+ break;
+ }
+ if (CheckMainHeader && IsMainFile && Ret > 0 && isMainHeader(IncludeName))
+ Ret = 0;
+ return Ret;
+}
+
+bool IncludeCategoryManager::isMainHeader(StringRef IncludeName) const {
+ if (!IncludeName.startswith("\""))
+ return false;
+ StringRef HeaderStem =
+ llvm::sys::path::stem(IncludeName.drop_front(1).drop_back(1));
+ if (FileStem.startswith(HeaderStem) ||
+ FileStem.startswith_lower(HeaderStem)) {
+ llvm::Regex MainIncludeRegex((HeaderStem + Style.IncludeIsMainRegex).str(),
+ llvm::Regex::IgnoreCase);
+ if (MainIncludeRegex.match(FileStem))
+ return true;
+ }
+ return false;
+}
+
+HeaderIncludes::HeaderIncludes(StringRef FileName, StringRef Code,
+ const IncludeStyle &Style)
+ : FileName(FileName), Code(Code), FirstIncludeOffset(-1),
+ MinInsertOffset(
+ getOffsetAfterHeaderGuardsAndComments(FileName, Code, Style)),
+ MaxInsertOffset(MinInsertOffset +
+ getMaxHeaderInsertionOffset(
+ FileName, Code.drop_front(MinInsertOffset), Style)),
+ Categories(Style, FileName),
+ IncludeRegex(llvm::Regex(IncludeRegexPattern)) {
+ // Add 0 for main header and INT_MAX for headers that are not in any
+ // category.
+ Priorities = {0, INT_MAX};
+ for (const auto &Category : Style.IncludeCategories)
+ Priorities.insert(Category.Priority);
+ SmallVector<StringRef, 32> Lines;
+ Code.drop_front(MinInsertOffset).split(Lines, "\n");
+
+ unsigned Offset = MinInsertOffset;
+ unsigned NextLineOffset;
+ SmallVector<StringRef, 4> Matches;
+ for (auto Line : Lines) {
+ NextLineOffset = std::min(Code.size(), Offset + Line.size() + 1);
+ if (IncludeRegex.match(Line, &Matches)) {
+ // If this is the last line without trailing newline, we need to make
+ // sure we don't delete across the file boundary.
+ addExistingInclude(
+ Include(Matches[2],
+ tooling::Range(
+ Offset, std::min(Line.size() + 1, Code.size() - Offset))),
+ NextLineOffset);
+ }
+ Offset = NextLineOffset;
+ }
+
+ // Populate CategoryEndOfssets:
+ // - Ensure that CategoryEndOffset[Highest] is always populated.
+ // - If CategoryEndOffset[Priority] isn't set, use the next higher value
+ // that is set, up to CategoryEndOffset[Highest].
+ auto Highest = Priorities.begin();
+ if (CategoryEndOffsets.find(*Highest) == CategoryEndOffsets.end()) {
+ if (FirstIncludeOffset >= 0)
+ CategoryEndOffsets[*Highest] = FirstIncludeOffset;
+ else
+ CategoryEndOffsets[*Highest] = MinInsertOffset;
+ }
+ // By this point, CategoryEndOffset[Highest] is always set appropriately:
+ // - to an appropriate location before/after existing #includes, or
+ // - to right after the header guard, or
+ // - to the beginning of the file.
+ for (auto I = ++Priorities.begin(), E = Priorities.end(); I != E; ++I)
+ if (CategoryEndOffsets.find(*I) == CategoryEndOffsets.end())
+ CategoryEndOffsets[*I] = CategoryEndOffsets[*std::prev(I)];
+}
+
+// \p Offset: the start of the line following this include directive.
+void HeaderIncludes::addExistingInclude(Include IncludeToAdd,
+ unsigned NextLineOffset) {
+ auto Iter =
+ ExistingIncludes.try_emplace(trimInclude(IncludeToAdd.Name)).first;
+ Iter->second.push_back(std::move(IncludeToAdd));
+ auto &CurInclude = Iter->second.back();
+ // The header name with quotes or angle brackets.
+ // Only record the offset of current #include if we can insert after it.
+ if (CurInclude.R.getOffset() <= MaxInsertOffset) {
+ int Priority = Categories.getIncludePriority(
+ CurInclude.Name, /*CheckMainHeader=*/FirstIncludeOffset < 0);
+ CategoryEndOffsets[Priority] = NextLineOffset;
+ IncludesByPriority[Priority].push_back(&CurInclude);
+ if (FirstIncludeOffset < 0)
+ FirstIncludeOffset = CurInclude.R.getOffset();
+ }
+}
+
+llvm::Optional<tooling::Replacement>
+HeaderIncludes::insert(llvm::StringRef IncludeName, bool IsAngled) const {
+ assert(IncludeName == trimInclude(IncludeName));
+ // If a <header> ("header") already exists in code, "header" (<header>) with
+ // different quotation will still be inserted.
+ // FIXME: figure out if this is the best behavior.
+ auto It = ExistingIncludes.find(IncludeName);
+ if (It != ExistingIncludes.end())
+ for (const auto &Inc : It->second)
+ if ((IsAngled && StringRef(Inc.Name).startswith("<")) ||
+ (!IsAngled && StringRef(Inc.Name).startswith("\"")))
+ return llvm::None;
+ std::string Quoted = IsAngled ? ("<" + IncludeName + ">").str()
+ : ("\"" + IncludeName + "\"").str();
+ StringRef QuotedName = Quoted;
+ int Priority = Categories.getIncludePriority(
+ QuotedName, /*CheckMainHeader=*/FirstIncludeOffset < 0);
+ auto CatOffset = CategoryEndOffsets.find(Priority);
+ assert(CatOffset != CategoryEndOffsets.end());
+ unsigned InsertOffset = CatOffset->second; // Fall back offset
+ auto Iter = IncludesByPriority.find(Priority);
+ if (Iter != IncludesByPriority.end()) {
+ for (const auto *Inc : Iter->second) {
+ if (QuotedName < Inc->Name) {
+ InsertOffset = Inc->R.getOffset();
+ break;
+ }
+ }
+ }
+ assert(InsertOffset <= Code.size());
+ std::string NewInclude = ("#include " + QuotedName + "\n").str();
+ // When inserting headers at end of the code, also append '\n' to the code
+ // if it does not end with '\n'.
+ // FIXME: when inserting multiple #includes at the end of code, only one
+ // newline should be added.
+ if (InsertOffset == Code.size() && (!Code.empty() && Code.back() != '\n'))
+ NewInclude = "\n" + NewInclude;
+ return tooling::Replacement(FileName, InsertOffset, 0, NewInclude);
+}
+
+tooling::Replacements HeaderIncludes::remove(llvm::StringRef IncludeName,
+ bool IsAngled) const {
+ assert(IncludeName == trimInclude(IncludeName));
+ tooling::Replacements Result;
+ auto Iter = ExistingIncludes.find(IncludeName);
+ if (Iter == ExistingIncludes.end())
+ return Result;
+ for (const auto &Inc : Iter->second) {
+ if ((IsAngled && StringRef(Inc.Name).startswith("\"")) ||
+ (!IsAngled && StringRef(Inc.Name).startswith("<")))
+ continue;
+ llvm::Error Err = Result.add(tooling::Replacement(
+ FileName, Inc.R.getOffset(), Inc.R.getLength(), ""));
+ if (Err) {
+ auto ErrMsg = "Unexpected conflicts in #include deletions: " +
+ llvm::toString(std::move(Err));
+ llvm_unreachable(ErrMsg.c_str());
+ }
+ }
+ return Result;
+}
+
+
+} // namespace tooling
+} // namespace clang
diff --git a/lib/Tooling/Inclusions/IncludeStyle.cpp b/lib/Tooling/Inclusions/IncludeStyle.cpp
new file mode 100644
index 000000000000..3597710f1f6e
--- /dev/null
+++ b/lib/Tooling/Inclusions/IncludeStyle.cpp
@@ -0,0 +1,31 @@
+//===--- IncludeStyle.cpp - Style of C++ #include directives -----*- C++-*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Tooling/Inclusions/IncludeStyle.h"
+
+using clang::tooling::IncludeStyle;
+
+namespace llvm {
+namespace yaml {
+
+void MappingTraits<IncludeStyle::IncludeCategory>::mapping(
+ IO &IO, IncludeStyle::IncludeCategory &Category) {
+ IO.mapOptional("Regex", Category.Regex);
+ IO.mapOptional("Priority", Category.Priority);
+}
+
+void ScalarEnumerationTraits<IncludeStyle::IncludeBlocksStyle>::enumeration(
+ IO &IO, IncludeStyle::IncludeBlocksStyle &Value) {
+ IO.enumCase(Value, "Preserve", IncludeStyle::IBS_Preserve);
+ IO.enumCase(Value, "Merge", IncludeStyle::IBS_Merge);
+ IO.enumCase(Value, "Regroup", IncludeStyle::IBS_Regroup);
+}
+
+} // namespace yaml
+} // namespace llvm
diff --git a/lib/Tooling/InterpolatingCompilationDatabase.cpp b/lib/Tooling/InterpolatingCompilationDatabase.cpp
new file mode 100644
index 000000000000..bc564584bd01
--- /dev/null
+++ b/lib/Tooling/InterpolatingCompilationDatabase.cpp
@@ -0,0 +1,458 @@
+//===- InterpolatingCompilationDatabase.cpp ---------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// InterpolatingCompilationDatabase wraps another CompilationDatabase and
+// attempts to heuristically determine appropriate compile commands for files
+// that are not included, such as headers or newly created files.
+//
+// Motivating cases include:
+// Header files that live next to their implementation files. These typically
+// share a base filename. (libclang/CXString.h, libclang/CXString.cpp).
+// Some projects separate headers from includes. Filenames still typically
+// match, maybe other path segments too. (include/llvm/IR/Use.h, lib/IR/Use.cc).
+// Matches are sometimes only approximate (Sema.h, SemaDecl.cpp). This goes
+// for directories too (Support/Unix/Process.inc, lib/Support/Process.cpp).
+// Even if we can't find a "right" compile command, even a random one from
+// the project will tend to get important flags like -I and -x right.
+//
+// We "borrow" the compile command for the closest available file:
+// - points are awarded if the filename matches (ignoring extension)
+// - points are awarded if the directory structure matches
+// - ties are broken by length of path prefix match
+//
+// The compile command is adjusted, replacing the filename and removing output
+// file arguments. The -x and -std flags may be affected too.
+//
+// Source language is a tricky issue: is it OK to use a .c file's command
+// for building a .cc file? What language is a .h file in?
+// - We only consider compile commands for c-family languages as candidates.
+// - For files whose language is implied by the filename (e.g. .m, .hpp)
+// we prefer candidates from the same language.
+// If we must cross languages, we drop any -x and -std flags.
+// - For .h files, candidates from any c-family language are acceptable.
+// We use the candidate's language, inserting e.g. -x c++-header.
+//
+// This class is only useful when wrapping databases that can enumerate all
+// their compile commands. If getAllFilenames() is empty, no inference occurs.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Driver/Options.h"
+#include "clang/Driver/Types.h"
+#include "clang/Frontend/LangStandard.h"
+#include "clang/Tooling/CompilationDatabase.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/Option/ArgList.h"
+#include "llvm/Option/OptTable.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/StringSaver.h"
+#include "llvm/Support/raw_ostream.h"
+#include <memory>
+
+namespace clang {
+namespace tooling {
+namespace {
+using namespace llvm;
+namespace types = clang::driver::types;
+namespace path = llvm::sys::path;
+
+// The length of the prefix these two strings have in common.
+size_t matchingPrefix(StringRef L, StringRef R) {
+ size_t Limit = std::min(L.size(), R.size());
+ for (size_t I = 0; I < Limit; ++I)
+ if (L[I] != R[I])
+ return I;
+ return Limit;
+}
+
+// A comparator for searching SubstringWithIndexes with std::equal_range etc.
+// Optionaly prefix semantics: compares equal if the key is a prefix.
+template <bool Prefix> struct Less {
+ bool operator()(StringRef Key, std::pair<StringRef, size_t> Value) const {
+ StringRef V = Prefix ? Value.first.substr(0, Key.size()) : Value.first;
+ return Key < V;
+ }
+ bool operator()(std::pair<StringRef, size_t> Value, StringRef Key) const {
+ StringRef V = Prefix ? Value.first.substr(0, Key.size()) : Value.first;
+ return V < Key;
+ }
+};
+
+// Infer type from filename. If we might have gotten it wrong, set *Certain.
+// *.h will be inferred as a C header, but not certain.
+types::ID guessType(StringRef Filename, bool *Certain = nullptr) {
+ // path::extension is ".cpp", lookupTypeForExtension wants "cpp".
+ auto Lang =
+ types::lookupTypeForExtension(path::extension(Filename).substr(1));
+ if (Certain)
+ *Certain = Lang != types::TY_CHeader && Lang != types::TY_INVALID;
+ return Lang;
+}
+
+// Return Lang as one of the canonical supported types.
+// e.g. c-header --> c; fortran --> TY_INVALID
+static types::ID foldType(types::ID Lang) {
+ switch (Lang) {
+ case types::TY_C:
+ case types::TY_CHeader:
+ return types::TY_C;
+ case types::TY_ObjC:
+ case types::TY_ObjCHeader:
+ return types::TY_ObjC;
+ case types::TY_CXX:
+ case types::TY_CXXHeader:
+ return types::TY_CXX;
+ case types::TY_ObjCXX:
+ case types::TY_ObjCXXHeader:
+ return types::TY_ObjCXX;
+ default:
+ return types::TY_INVALID;
+ }
+}
+
+// A CompileCommand that can be applied to another file.
+struct TransferableCommand {
+ // Flags that should not apply to all files are stripped from CommandLine.
+ CompileCommand Cmd;
+ // Language detected from -x or the filename.
+ types::ID Type = types::TY_INVALID;
+ // Standard specified by -std.
+ LangStandard::Kind Std = LangStandard::lang_unspecified;
+
+ TransferableCommand(CompileCommand C)
+ : Cmd(std::move(C)), Type(guessType(Cmd.Filename)) {
+ std::vector<std::string> NewArgs = {Cmd.CommandLine.front()};
+ // Parse the old args in order to strip out and record unwanted flags.
+ auto OptTable = clang::driver::createDriverOptTable();
+ std::vector<const char *> Argv;
+ for (unsigned I = 1; I < Cmd.CommandLine.size(); ++I)
+ Argv.push_back(Cmd.CommandLine[I].c_str());
+ unsigned MissingI, MissingC;
+ auto ArgList = OptTable->ParseArgs(Argv, MissingI, MissingC);
+ for (const auto *Arg : ArgList) {
+ const auto &option = Arg->getOption();
+ // Strip input and output files.
+ if (option.matches(clang::driver::options::OPT_INPUT) ||
+ option.matches(clang::driver::options::OPT_o)) {
+ continue;
+ }
+ // Strip -x, but record the overridden language.
+ if (option.matches(clang::driver::options::OPT_x)) {
+ for (const char *Value : Arg->getValues())
+ Type = types::lookupTypeForTypeSpecifier(Value);
+ continue;
+ }
+ // Strip --std, but record the value.
+ if (option.matches(clang::driver::options::OPT_std_EQ)) {
+ for (const char *Value : Arg->getValues()) {
+ Std = llvm::StringSwitch<LangStandard::Kind>(Value)
+#define LANGSTANDARD(id, name, lang, desc, features) \
+ .Case(name, LangStandard::lang_##id)
+#define LANGSTANDARD_ALIAS(id, alias) .Case(alias, LangStandard::lang_##id)
+#include "clang/Frontend/LangStandards.def"
+ .Default(Std);
+ }
+ continue;
+ }
+ llvm::opt::ArgStringList ArgStrs;
+ Arg->render(ArgList, ArgStrs);
+ NewArgs.insert(NewArgs.end(), ArgStrs.begin(), ArgStrs.end());
+ }
+ Cmd.CommandLine = std::move(NewArgs);
+
+ if (Std != LangStandard::lang_unspecified) // -std take precedence over -x
+ Type = toType(LangStandard::getLangStandardForKind(Std).getLanguage());
+ Type = foldType(Type);
+ }
+
+ // Produce a CompileCommand for \p filename, based on this one.
+ CompileCommand transferTo(StringRef Filename) const {
+ CompileCommand Result = Cmd;
+ Result.Filename = Filename;
+ bool TypeCertain;
+ auto TargetType = guessType(Filename, &TypeCertain);
+ // If the filename doesn't determine the language (.h), transfer with -x.
+ if (!TypeCertain) {
+ TargetType = types::onlyPrecompileType(TargetType) // header?
+ ? types::lookupHeaderTypeForSourceType(Type)
+ : Type;
+ Result.CommandLine.push_back("-x");
+ Result.CommandLine.push_back(types::getTypeName(TargetType));
+ }
+ // --std flag may only be transferred if the language is the same.
+ // We may consider "translating" these, e.g. c++11 -> c11.
+ if (Std != LangStandard::lang_unspecified && foldType(TargetType) == Type) {
+ Result.CommandLine.push_back(
+ "-std=" +
+ std::string(LangStandard::getLangStandardForKind(Std).getName()));
+ }
+ Result.CommandLine.push_back(Filename);
+ return Result;
+ }
+
+private:
+ // Map the language from the --std flag to that of the -x flag.
+ static types::ID toType(InputKind::Language Lang) {
+ switch (Lang) {
+ case InputKind::C:
+ return types::TY_C;
+ case InputKind::CXX:
+ return types::TY_CXX;
+ case InputKind::ObjC:
+ return types::TY_ObjC;
+ case InputKind::ObjCXX:
+ return types::TY_ObjCXX;
+ default:
+ return types::TY_INVALID;
+ }
+ }
+};
+
+// CommandIndex does the real work: given a filename, it produces the best
+// matching TransferableCommand by matching filenames. Basic strategy:
+// - Build indexes of each of the substrings we want to look up by.
+// These indexes are just sorted lists of the substrings.
+// - Forward requests to the inner CDB. If it fails, we must pick a proxy.
+// - Each criterion corresponds to a range lookup into the index, so we only
+// need O(log N) string comparisons to determine scores.
+// - We then break ties among the candidates with the highest score.
+class CommandIndex {
+public:
+ CommandIndex(std::vector<TransferableCommand> AllCommands)
+ : Commands(std::move(AllCommands)), Strings(Arena) {
+ // Sort commands by filename for determinism (index is a tiebreaker later).
+ llvm::sort(
+ Commands.begin(), Commands.end(),
+ [](const TransferableCommand &Left, const TransferableCommand &Right) {
+ return Left.Cmd.Filename < Right.Cmd.Filename;
+ });
+ for (size_t I = 0; I < Commands.size(); ++I) {
+ StringRef Path =
+ Strings.save(StringRef(Commands[I].Cmd.Filename).lower());
+ Paths.push_back({Path, I});
+ Stems.emplace_back(sys::path::stem(Path), I);
+ auto Dir = ++sys::path::rbegin(Path), DirEnd = sys::path::rend(Path);
+ for (int J = 0; J < DirectorySegmentsIndexed && Dir != DirEnd; ++J, ++Dir)
+ if (Dir->size() > ShortDirectorySegment) // not trivial ones
+ Components.emplace_back(*Dir, I);
+ }
+ llvm::sort(Paths.begin(), Paths.end());
+ llvm::sort(Stems.begin(), Stems.end());
+ llvm::sort(Components.begin(), Components.end());
+ }
+
+ bool empty() const { return Commands.empty(); }
+
+ // Returns the command that best fits OriginalFilename.
+ // Candidates with PreferLanguage will be chosen over others (unless it's
+ // TY_INVALID, or all candidates are bad).
+ const TransferableCommand &chooseProxy(StringRef OriginalFilename,
+ types::ID PreferLanguage) const {
+ assert(!empty() && "need at least one candidate!");
+ std::string Filename = OriginalFilename.lower();
+ auto Candidates = scoreCandidates(Filename);
+ std::pair<size_t, int> Best =
+ pickWinner(Candidates, Filename, PreferLanguage);
+
+ DEBUG_WITH_TYPE("interpolate",
+ llvm::dbgs()
+ << "interpolate: chose "
+ << Commands[Best.first].Cmd.Filename << " as proxy for "
+ << OriginalFilename << " preferring "
+ << (PreferLanguage == types::TY_INVALID
+ ? "none"
+ : types::getTypeName(PreferLanguage))
+ << " score=" << Best.second << "\n");
+ return Commands[Best.first];
+ }
+
+private:
+ using SubstringAndIndex = std::pair<StringRef, size_t>;
+ // Directory matching parameters: we look at the last two segments of the
+ // parent directory (usually the semantically significant ones in practice).
+ // We search only the last four of each candidate (for efficiency).
+ constexpr static int DirectorySegmentsIndexed = 4;
+ constexpr static int DirectorySegmentsQueried = 2;
+ constexpr static int ShortDirectorySegment = 1; // Only look at longer names.
+
+ // Award points to candidate entries that should be considered for the file.
+ // Returned keys are indexes into paths, and the values are (nonzero) scores.
+ DenseMap<size_t, int> scoreCandidates(StringRef Filename) const {
+ // Decompose Filename into the parts we care about.
+ // /some/path/complicated/project/Interesting.h
+ // [-prefix--][---dir---] [-dir-] [--stem---]
+ StringRef Stem = sys::path::stem(Filename);
+ llvm::SmallVector<StringRef, DirectorySegmentsQueried> Dirs;
+ llvm::StringRef Prefix;
+ auto Dir = ++sys::path::rbegin(Filename),
+ DirEnd = sys::path::rend(Filename);
+ for (int I = 0; I < DirectorySegmentsQueried && Dir != DirEnd; ++I, ++Dir) {
+ if (Dir->size() > ShortDirectorySegment)
+ Dirs.push_back(*Dir);
+ Prefix = Filename.substr(0, Dir - DirEnd);
+ }
+
+ // Now award points based on lookups into our various indexes.
+ DenseMap<size_t, int> Candidates; // Index -> score.
+ auto Award = [&](int Points, ArrayRef<SubstringAndIndex> Range) {
+ for (const auto &Entry : Range)
+ Candidates[Entry.second] += Points;
+ };
+ // Award one point if the file's basename is a prefix of the candidate,
+ // and another if it's an exact match (so exact matches get two points).
+ Award(1, indexLookup</*Prefix=*/true>(Stem, Stems));
+ Award(1, indexLookup</*Prefix=*/false>(Stem, Stems));
+ // For each of the last few directories in the Filename, award a point
+ // if it's present in the candidate.
+ for (StringRef Dir : Dirs)
+ Award(1, indexLookup</*Prefix=*/false>(Dir, Components));
+ // Award one more point if the whole rest of the path matches.
+ if (sys::path::root_directory(Prefix) != Prefix)
+ Award(1, indexLookup</*Prefix=*/true>(Prefix, Paths));
+ return Candidates;
+ }
+
+ // Pick a single winner from the set of scored candidates.
+ // Returns (index, score).
+ std::pair<size_t, int> pickWinner(const DenseMap<size_t, int> &Candidates,
+ StringRef Filename,
+ types::ID PreferredLanguage) const {
+ struct ScoredCandidate {
+ size_t Index;
+ bool Preferred;
+ int Points;
+ size_t PrefixLength;
+ };
+ // Choose the best candidate by (preferred, points, prefix length, alpha).
+ ScoredCandidate Best = {size_t(-1), false, 0, 0};
+ for (const auto &Candidate : Candidates) {
+ ScoredCandidate S;
+ S.Index = Candidate.first;
+ S.Preferred = PreferredLanguage == types::TY_INVALID ||
+ PreferredLanguage == Commands[S.Index].Type;
+ S.Points = Candidate.second;
+ if (!S.Preferred && Best.Preferred)
+ continue;
+ if (S.Preferred == Best.Preferred) {
+ if (S.Points < Best.Points)
+ continue;
+ if (S.Points == Best.Points) {
+ S.PrefixLength = matchingPrefix(Filename, Paths[S.Index].first);
+ if (S.PrefixLength < Best.PrefixLength)
+ continue;
+ // hidden heuristics should at least be deterministic!
+ if (S.PrefixLength == Best.PrefixLength)
+ if (S.Index > Best.Index)
+ continue;
+ }
+ }
+ // PrefixLength was only set above if actually needed for a tiebreak.
+ // But it definitely needs to be set to break ties in the future.
+ S.PrefixLength = matchingPrefix(Filename, Paths[S.Index].first);
+ Best = S;
+ }
+ // Edge case: no candidate got any points.
+ // We ignore PreferredLanguage at this point (not ideal).
+ if (Best.Index == size_t(-1))
+ return {longestMatch(Filename, Paths).second, 0};
+ return {Best.Index, Best.Points};
+ }
+
+ // Returns the range within a sorted index that compares equal to Key.
+ // If Prefix is true, it's instead the range starting with Key.
+ template <bool Prefix>
+ ArrayRef<SubstringAndIndex>
+ indexLookup(StringRef Key, const std::vector<SubstringAndIndex> &Idx) const {
+ // Use pointers as iteratiors to ease conversion of result to ArrayRef.
+ auto Range = std::equal_range(Idx.data(), Idx.data() + Idx.size(), Key,
+ Less<Prefix>());
+ return {Range.first, Range.second};
+ }
+
+ // Performs a point lookup into a nonempty index, returning a longest match.
+ SubstringAndIndex
+ longestMatch(StringRef Key, const std::vector<SubstringAndIndex> &Idx) const {
+ assert(!Idx.empty());
+ // Longest substring match will be adjacent to a direct lookup.
+ auto It =
+ std::lower_bound(Idx.begin(), Idx.end(), SubstringAndIndex{Key, 0});
+ if (It == Idx.begin())
+ return *It;
+ if (It == Idx.end())
+ return *--It;
+ // Have to choose between It and It-1
+ size_t Prefix = matchingPrefix(Key, It->first);
+ size_t PrevPrefix = matchingPrefix(Key, (It - 1)->first);
+ return Prefix > PrevPrefix ? *It : *--It;
+ }
+
+ std::vector<TransferableCommand> Commands; // Indexes point into this.
+ BumpPtrAllocator Arena;
+ StringSaver Strings;
+ // Indexes of candidates by certain substrings.
+ // String is lowercase and sorted, index points into OriginalPaths.
+ std::vector<SubstringAndIndex> Paths; // Full path.
+ std::vector<SubstringAndIndex> Stems; // Basename, without extension.
+ std::vector<SubstringAndIndex> Components; // Last path components.
+};
+
+// The actual CompilationDatabase wrapper delegates to its inner database.
+// If no match, looks up a command in CommandIndex and transfers it to the file.
+class InterpolatingCompilationDatabase : public CompilationDatabase {
+public:
+ InterpolatingCompilationDatabase(std::unique_ptr<CompilationDatabase> Inner)
+ : Inner(std::move(Inner)), Index(allCommands()) {}
+
+ std::vector<CompileCommand>
+ getCompileCommands(StringRef Filename) const override {
+ auto Known = Inner->getCompileCommands(Filename);
+ if (Index.empty() || !Known.empty())
+ return Known;
+ bool TypeCertain;
+ auto Lang = guessType(Filename, &TypeCertain);
+ if (!TypeCertain)
+ Lang = types::TY_INVALID;
+ return {Index.chooseProxy(Filename, foldType(Lang)).transferTo(Filename)};
+ }
+
+ std::vector<std::string> getAllFiles() const override {
+ return Inner->getAllFiles();
+ }
+
+ std::vector<CompileCommand> getAllCompileCommands() const override {
+ return Inner->getAllCompileCommands();
+ }
+
+private:
+ std::vector<TransferableCommand> allCommands() {
+ std::vector<TransferableCommand> Result;
+ for (auto Command : Inner->getAllCompileCommands()) {
+ Result.emplace_back(std::move(Command));
+ if (Result.back().Type == types::TY_INVALID)
+ Result.pop_back();
+ }
+ return Result;
+ }
+
+ std::unique_ptr<CompilationDatabase> Inner;
+ CommandIndex Index;
+};
+
+} // namespace
+
+std::unique_ptr<CompilationDatabase>
+inferMissingCompileCommands(std::unique_ptr<CompilationDatabase> Inner) {
+ return llvm::make_unique<InterpolatingCompilationDatabase>(std::move(Inner));
+}
+
+} // namespace tooling
+} // namespace clang
diff --git a/lib/Tooling/JSONCompilationDatabase.cpp b/lib/Tooling/JSONCompilationDatabase.cpp
index f9a230eb63a0..2fa5fce279d6 100644
--- a/lib/Tooling/JSONCompilationDatabase.cpp
+++ b/lib/Tooling/JSONCompilationDatabase.cpp
@@ -1,4 +1,4 @@
-//===--- JSONCompilationDatabase.cpp - ------------------------------------===//
+//===- JSONCompilationDatabase.cpp ----------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -12,22 +12,38 @@
//===----------------------------------------------------------------------===//
#include "clang/Tooling/JSONCompilationDatabase.h"
+#include "clang/Basic/LLVM.h"
#include "clang/Tooling/CompilationDatabase.h"
#include "clang/Tooling/CompilationDatabasePluginRegistry.h"
-#include "clang/Tooling/Tooling.h"
+#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Triple.h"
#include "llvm/Support/Allocator.h"
+#include "llvm/Support/Casting.h"
#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/ErrorOr.h"
+#include "llvm/Support/Host.h"
+#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/StringSaver.h"
+#include "llvm/Support/YAMLParser.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cassert>
+#include <memory>
+#include <string>
#include <system_error>
+#include <tuple>
+#include <utility>
+#include <vector>
-namespace clang {
-namespace tooling {
+using namespace clang;
+using namespace tooling;
namespace {
-/// \brief A parser for escaped strings of command line arguments.
+/// A parser for escaped strings of command line arguments.
///
/// Assumes \-escaping for quoted arguments (see the documentation of
/// unescapeCommandLine(...)).
@@ -151,17 +167,23 @@ class JSONCompilationDatabasePlugin : public CompilationDatabasePlugin {
}
};
-} // end namespace
+} // namespace
// Register the JSONCompilationDatabasePlugin with the
// CompilationDatabasePluginRegistry using this statically initialized variable.
static CompilationDatabasePluginRegistry::Add<JSONCompilationDatabasePlugin>
X("json-compilation-database", "Reads JSON formatted compilation databases");
+namespace clang {
+namespace tooling {
+
// This anchor is used to force the linker to link in the generated object file
// and thus register the JSONCompilationDatabasePlugin.
volatile int JSONAnchorSource = 0;
+} // namespace tooling
+} // namespace clang
+
std::unique_ptr<JSONCompilationDatabase>
JSONCompilationDatabase::loadFromFile(StringRef FilePath,
std::string &ErrorMessage,
@@ -201,11 +223,10 @@ JSONCompilationDatabase::getCompileCommands(StringRef FilePath) const {
llvm::raw_string_ostream ES(Error);
StringRef Match = MatchTrie.findEquivalent(NativeFilePath, ES);
if (Match.empty())
- return std::vector<CompileCommand>();
- llvm::StringMap< std::vector<CompileCommandRef> >::const_iterator
- CommandsRefI = IndexByFile.find(Match);
+ return {};
+ const auto CommandsRefI = IndexByFile.find(Match);
if (CommandsRefI == IndexByFile.end())
- return std::vector<CompileCommand>();
+ return {};
std::vector<CompileCommand> Commands;
getCommands(CommandsRefI->getValue(), Commands);
return Commands;
@@ -214,15 +235,8 @@ JSONCompilationDatabase::getCompileCommands(StringRef FilePath) const {
std::vector<std::string>
JSONCompilationDatabase::getAllFiles() const {
std::vector<std::string> Result;
-
- llvm::StringMap< std::vector<CompileCommandRef> >::const_iterator
- CommandsRefI = IndexByFile.begin();
- const llvm::StringMap< std::vector<CompileCommandRef> >::const_iterator
- CommandsRefEnd = IndexByFile.end();
- for (; CommandsRefI != CommandsRefEnd; ++CommandsRefI) {
- Result.push_back(CommandsRefI->first().str());
- }
-
+ for (const auto &CommandRef : IndexByFile)
+ Result.push_back(CommandRef.first().str());
return Result;
}
@@ -237,28 +251,26 @@ static std::vector<std::string>
nodeToCommandLine(JSONCommandLineSyntax Syntax,
const std::vector<llvm::yaml::ScalarNode *> &Nodes) {
SmallString<1024> Storage;
- if (Nodes.size() == 1) {
+ if (Nodes.size() == 1)
return unescapeCommandLine(Syntax, Nodes[0]->getValue(Storage));
- }
std::vector<std::string> Arguments;
- for (auto *Node : Nodes) {
+ for (const auto *Node : Nodes)
Arguments.push_back(Node->getValue(Storage));
- }
return Arguments;
}
void JSONCompilationDatabase::getCommands(
ArrayRef<CompileCommandRef> CommandsRef,
std::vector<CompileCommand> &Commands) const {
- for (int I = 0, E = CommandsRef.size(); I != E; ++I) {
+ for (const auto &CommandRef : CommandsRef) {
SmallString<8> DirectoryStorage;
SmallString<32> FilenameStorage;
SmallString<32> OutputStorage;
- auto Output = std::get<3>(CommandsRef[I]);
+ auto Output = std::get<3>(CommandRef);
Commands.emplace_back(
- std::get<0>(CommandsRef[I])->getValue(DirectoryStorage),
- std::get<1>(CommandsRef[I])->getValue(FilenameStorage),
- nodeToCommandLine(Syntax, std::get<2>(CommandsRef[I])),
+ std::get<0>(CommandRef)->getValue(DirectoryStorage),
+ std::get<1>(CommandRef)->getValue(FilenameStorage),
+ nodeToCommandLine(Syntax, std::get<2>(CommandRef)),
Output ? Output->getValue(OutputStorage) : "");
}
}
@@ -274,13 +286,13 @@ bool JSONCompilationDatabase::parse(std::string &ErrorMessage) {
ErrorMessage = "Error while parsing YAML.";
return false;
}
- llvm::yaml::SequenceNode *Array = dyn_cast<llvm::yaml::SequenceNode>(Root);
+ auto *Array = dyn_cast<llvm::yaml::SequenceNode>(Root);
if (!Array) {
ErrorMessage = "Expected array.";
return false;
}
- for (auto& NextObject : *Array) {
- llvm::yaml::MappingNode *Object = dyn_cast<llvm::yaml::MappingNode>(&NextObject);
+ for (auto &NextObject : *Array) {
+ auto *Object = dyn_cast<llvm::yaml::MappingNode>(&NextObject);
if (!Object) {
ErrorMessage = "Expected object.";
return false;
@@ -290,8 +302,7 @@ bool JSONCompilationDatabase::parse(std::string &ErrorMessage) {
llvm::yaml::ScalarNode *File = nullptr;
llvm::yaml::ScalarNode *Output = nullptr;
for (auto& NextKeyValue : *Object) {
- llvm::yaml::ScalarNode *KeyString =
- dyn_cast<llvm::yaml::ScalarNode>(NextKeyValue.getKey());
+ auto *KeyString = dyn_cast<llvm::yaml::ScalarNode>(NextKeyValue.getKey());
if (!KeyString) {
ErrorMessage = "Expected strings as key.";
return false;
@@ -303,10 +314,8 @@ bool JSONCompilationDatabase::parse(std::string &ErrorMessage) {
ErrorMessage = "Expected value.";
return false;
}
- llvm::yaml::ScalarNode *ValueString =
- dyn_cast<llvm::yaml::ScalarNode>(Value);
- llvm::yaml::SequenceNode *SequenceString =
- dyn_cast<llvm::yaml::SequenceNode>(Value);
+ auto *ValueString = dyn_cast<llvm::yaml::ScalarNode>(Value);
+ auto *SequenceString = dyn_cast<llvm::yaml::SequenceNode>(Value);
if (KeyValue == "arguments" && !SequenceString) {
ErrorMessage = "Expected sequence as value.";
return false;
@@ -319,7 +328,7 @@ bool JSONCompilationDatabase::parse(std::string &ErrorMessage) {
} else if (KeyValue == "arguments") {
Command = std::vector<llvm::yaml::ScalarNode *>();
for (auto &Argument : *SequenceString) {
- auto Scalar = dyn_cast<llvm::yaml::ScalarNode>(&Argument);
+ auto *Scalar = dyn_cast<llvm::yaml::ScalarNode>(&Argument);
if (!Scalar) {
ErrorMessage = "Only strings are allowed in 'arguments'.";
return false;
@@ -370,6 +379,3 @@ bool JSONCompilationDatabase::parse(std::string &ErrorMessage) {
}
return true;
}
-
-} // end namespace tooling
-} // end namespace clang
diff --git a/lib/Tooling/Refactoring/AtomicChange.cpp b/lib/Tooling/Refactoring/AtomicChange.cpp
index e4cc6a5617b6..e8b0fdbeb662 100644
--- a/lib/Tooling/Refactoring/AtomicChange.cpp
+++ b/lib/Tooling/Refactoring/AtomicChange.cpp
@@ -15,7 +15,7 @@
LLVM_YAML_IS_SEQUENCE_VECTOR(clang::tooling::AtomicChange)
namespace {
-/// \brief Helper to (de)serialize an AtomicChange since we don't have direct
+/// Helper to (de)serialize an AtomicChange since we don't have direct
/// access to its data members.
/// Data members of a normalized AtomicChange can be directly mapped from/to
/// YAML string.
@@ -50,7 +50,7 @@ struct NormalizedAtomicChange {
namespace llvm {
namespace yaml {
-/// \brief Specialized MappingTraits to describe how an AtomicChange is
+/// Specialized MappingTraits to describe how an AtomicChange is
/// (de)serialized.
template <> struct MappingTraits<NormalizedAtomicChange> {
static void mapping(IO &Io, NormalizedAtomicChange &Doc) {
@@ -63,7 +63,7 @@ template <> struct MappingTraits<NormalizedAtomicChange> {
}
};
-/// \brief Specialized MappingTraits to describe how an AtomicChange is
+/// Specialized MappingTraits to describe how an AtomicChange is
/// (de)serialized.
template <> struct MappingTraits<clang::tooling::AtomicChange> {
static void mapping(IO &Io, clang::tooling::AtomicChange &Doc) {
diff --git a/lib/Tooling/Refactoring/Extract/Extract.cpp b/lib/Tooling/Refactoring/Extract/Extract.cpp
index b0847a740048..a12454cd29ef 100644
--- a/lib/Tooling/Refactoring/Extract/Extract.cpp
+++ b/lib/Tooling/Refactoring/Extract/Extract.cpp
@@ -8,7 +8,7 @@
//===----------------------------------------------------------------------===//
///
/// \file
-/// \brief Implements the "extract" refactoring that can pull code into
+/// Implements the "extract" refactoring that can pull code into
/// new functions, methods or declare new variables.
///
//===----------------------------------------------------------------------===//
diff --git a/lib/Tooling/Refactoring/Rename/RenamingAction.cpp b/lib/Tooling/Refactoring/Rename/RenamingAction.cpp
index c8ed9dd19a8e..44ffae90efa7 100644
--- a/lib/Tooling/Refactoring/Rename/RenamingAction.cpp
+++ b/lib/Tooling/Refactoring/Rename/RenamingAction.cpp
@@ -8,7 +8,7 @@
//===----------------------------------------------------------------------===//
///
/// \file
-/// \brief Provides an action to rename every symbol at a point.
+/// Provides an action to rename every symbol at a point.
///
//===----------------------------------------------------------------------===//
diff --git a/lib/Tooling/Refactoring/Rename/USRFinder.cpp b/lib/Tooling/Refactoring/Rename/USRFinder.cpp
index 3bfb5bbe35e4..63f536c72a6f 100644
--- a/lib/Tooling/Refactoring/Rename/USRFinder.cpp
+++ b/lib/Tooling/Refactoring/Rename/USRFinder.cpp
@@ -32,7 +32,7 @@ namespace {
class NamedDeclOccurrenceFindingVisitor
: public RecursiveSymbolVisitor<NamedDeclOccurrenceFindingVisitor> {
public:
- // \brief Finds the NamedDecl at a point in the source.
+ // Finds the NamedDecl at a point in the source.
// \param Point the location in the source to search for the NamedDecl.
explicit NamedDeclOccurrenceFindingVisitor(const SourceLocation Point,
const ASTContext &Context)
@@ -58,7 +58,7 @@ public:
const NamedDecl *getNamedDecl() const { return Result; }
private:
- // \brief Determines if the Point is within Start and End.
+ // Determines if the Point is within Start and End.
bool isPointWithin(const SourceLocation Start, const SourceLocation End) {
// FIXME: Add tests for Point == End.
return Point == Start || Point == End ||
diff --git a/lib/Tooling/Refactoring/Rename/USRFindingAction.cpp b/lib/Tooling/Refactoring/Rename/USRFindingAction.cpp
index 40b70d8a0590..2e7c9b0cc31b 100644
--- a/lib/Tooling/Refactoring/Rename/USRFindingAction.cpp
+++ b/lib/Tooling/Refactoring/Rename/USRFindingAction.cpp
@@ -8,7 +8,7 @@
//===----------------------------------------------------------------------===//
///
/// \file
-/// \brief Provides an action to find USR for the symbol at <offset>, as well as
+/// Provides an action to find USR for the symbol at <offset>, as well as
/// all additional USRs.
///
//===----------------------------------------------------------------------===//
@@ -55,7 +55,7 @@ const NamedDecl *getCanonicalSymbolDeclaration(const NamedDecl *FoundDecl) {
}
namespace {
-// \brief NamedDeclFindingConsumer should delegate finding USRs of given Decl to
+// NamedDeclFindingConsumer should delegate finding USRs of given Decl to
// AdditionalUSRFinder. AdditionalUSRFinder adds USRs of ctor and dtor if given
// Decl refers to class and adds USRs of all overridden methods if Decl refers
// to virtual method.
diff --git a/lib/Tooling/Refactoring/Rename/USRLocFinder.cpp b/lib/Tooling/Refactoring/Rename/USRLocFinder.cpp
index c77304a17332..fb06b91118b0 100644
--- a/lib/Tooling/Refactoring/Rename/USRLocFinder.cpp
+++ b/lib/Tooling/Refactoring/Rename/USRLocFinder.cpp
@@ -8,7 +8,7 @@
//===----------------------------------------------------------------------===//
///
/// \file
-/// \brief Methods for finding all instances of a USR. Our strategy is very
+/// Methods for finding all instances of a USR. Our strategy is very
/// simple; we just compare the USR at every relevant AST node with the one
/// provided.
///
@@ -50,7 +50,7 @@ bool IsValidEditLoc(const clang::SourceManager& SM, clang::SourceLocation Loc) {
return SM.getFileEntryForID(FileIdAndOffset.first) != nullptr;
}
-// \brief This visitor recursively searches for all instances of a USR in a
+// This visitor recursively searches for all instances of a USR in a
// translation unit and stores them for later usage.
class USRLocFindingASTVisitor
: public RecursiveSymbolVisitor<USRLocFindingASTVisitor> {
@@ -80,7 +80,7 @@ public:
// Non-visitors:
- /// \brief Returns a set of unique symbol occurrences. Duplicate or
+ /// Returns a set of unique symbol occurrences. Duplicate or
/// overlapping occurrences are erroneous and should be reported!
SymbolOccurrences takeOccurrences() { return std::move(Occurrences); }
@@ -524,7 +524,7 @@ createRenameAtomicChanges(llvm::ArrayRef<std::string> USRs,
llvm::Error Err = ReplaceChange.replace(
SM, CharSourceRange::getTokenRange(Start, End), Text);
if (Err) {
- llvm::errs() << "Faile to add replacement to AtomicChange: "
+ llvm::errs() << "Failed to add replacement to AtomicChange: "
<< llvm::toString(std::move(Err)) << "\n";
return;
}
diff --git a/lib/Tooling/StandaloneExecution.cpp b/lib/Tooling/StandaloneExecution.cpp
index eea8e39d134c..7312baf9dc77 100644
--- a/lib/Tooling/StandaloneExecution.cpp
+++ b/lib/Tooling/StandaloneExecution.cpp
@@ -30,9 +30,11 @@ static ArgumentsAdjuster getDefaultArgumentsAdjusters() {
StandaloneToolExecutor::StandaloneToolExecutor(
const CompilationDatabase &Compilations,
llvm::ArrayRef<std::string> SourcePaths,
+ IntrusiveRefCntPtr<vfs::FileSystem> BaseFS,
std::shared_ptr<PCHContainerOperations> PCHContainerOps)
- : Tool(Compilations, SourcePaths), Context(&Results),
- ArgsAdjuster(getDefaultArgumentsAdjusters()) {
+ : Tool(Compilations, SourcePaths, std::move(PCHContainerOps),
+ std::move(BaseFS)),
+ Context(&Results), ArgsAdjuster(getDefaultArgumentsAdjusters()) {
// Use self-defined default argument adjusters instead of the default
// adjusters that come with the old `ClangTool`.
Tool.clearArgumentsAdjusters();
@@ -43,7 +45,7 @@ StandaloneToolExecutor::StandaloneToolExecutor(
std::shared_ptr<PCHContainerOperations> PCHContainerOps)
: OptionsParser(std::move(Options)),
Tool(OptionsParser->getCompilations(), OptionsParser->getSourcePathList(),
- PCHContainerOps),
+ std::move(PCHContainerOps)),
Context(&Results), ArgsAdjuster(getDefaultArgumentsAdjusters()) {
Tool.clearArgumentsAdjusters();
}
diff --git a/lib/Tooling/Tooling.cpp b/lib/Tooling/Tooling.cpp
index 4fbfa4f00473..a106154f4b28 100644
--- a/lib/Tooling/Tooling.cpp
+++ b/lib/Tooling/Tooling.cpp
@@ -1,4 +1,4 @@
-//===--- Tooling.cpp - Running clang standalone tools ---------------------===//
+//===- Tooling.cpp - Running clang standalone tools -----------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -13,90 +13,113 @@
//===----------------------------------------------------------------------===//
#include "clang/Tooling/Tooling.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/DiagnosticIDs.h"
+#include "clang/Basic/DiagnosticOptions.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/FileSystemOptions.h"
+#include "clang/Basic/LLVM.h"
+#include "clang/Basic/VirtualFileSystem.h"
#include "clang/Driver/Compilation.h"
#include "clang/Driver/Driver.h"
+#include "clang/Driver/Job.h"
#include "clang/Driver/Options.h"
#include "clang/Driver/Tool.h"
#include "clang/Driver/ToolChain.h"
#include "clang/Frontend/ASTUnit.h"
#include "clang/Frontend/CompilerInstance.h"
+#include "clang/Frontend/CompilerInvocation.h"
#include "clang/Frontend/FrontendDiagnostic.h"
+#include "clang/Frontend/FrontendOptions.h"
#include "clang/Frontend/TextDiagnosticPrinter.h"
+#include "clang/Lex/HeaderSearchOptions.h"
#include "clang/Lex/PreprocessorOptions.h"
#include "clang/Tooling/ArgumentsAdjusters.h"
#include "clang/Tooling/CompilationDatabase.h"
-#include "llvm/ADT/STLExtras.h"
-#include "llvm/Config/llvm-config.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/IntrusiveRefCntPtr.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Twine.h"
#include "llvm/Option/ArgList.h"
+#include "llvm/Option/OptTable.h"
#include "llvm/Option/Option.h"
-#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Casting.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Host.h"
+#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/raw_ostream.h"
+#include <cassert>
+#include <cstring>
+#include <memory>
+#include <string>
+#include <system_error>
#include <utility>
+#include <vector>
#define DEBUG_TYPE "clang-tooling"
-namespace clang {
-namespace tooling {
+using namespace clang;
+using namespace tooling;
-ToolAction::~ToolAction() {}
+ToolAction::~ToolAction() = default;
-FrontendActionFactory::~FrontendActionFactory() {}
+FrontendActionFactory::~FrontendActionFactory() = default;
// FIXME: This file contains structural duplication with other parts of the
// code that sets up a compiler to run tools on it, and we should refactor
// it to be based on the same framework.
-/// \brief Builds a clang driver initialized for running clang tools.
-static clang::driver::Driver *newDriver(
- clang::DiagnosticsEngine *Diagnostics, const char *BinaryName,
+/// Builds a clang driver initialized for running clang tools.
+static driver::Driver *newDriver(
+ DiagnosticsEngine *Diagnostics, const char *BinaryName,
IntrusiveRefCntPtr<vfs::FileSystem> VFS) {
- clang::driver::Driver *CompilerDriver =
- new clang::driver::Driver(BinaryName, llvm::sys::getDefaultTargetTriple(),
- *Diagnostics, std::move(VFS));
+ driver::Driver *CompilerDriver =
+ new driver::Driver(BinaryName, llvm::sys::getDefaultTargetTriple(),
+ *Diagnostics, std::move(VFS));
CompilerDriver->setTitle("clang_based_tool");
return CompilerDriver;
}
-/// \brief Retrieves the clang CC1 specific flags out of the compilation's jobs.
+/// Retrieves the clang CC1 specific flags out of the compilation's jobs.
///
-/// Returns NULL on error.
+/// Returns nullptr on error.
static const llvm::opt::ArgStringList *getCC1Arguments(
- clang::DiagnosticsEngine *Diagnostics,
- clang::driver::Compilation *Compilation) {
+ DiagnosticsEngine *Diagnostics, driver::Compilation *Compilation) {
// We expect to get back exactly one Command job, if we didn't something
// failed. Extract that job from the Compilation.
- const clang::driver::JobList &Jobs = Compilation->getJobs();
- if (Jobs.size() != 1 || !isa<clang::driver::Command>(*Jobs.begin())) {
+ const driver::JobList &Jobs = Compilation->getJobs();
+ if (Jobs.size() != 1 || !isa<driver::Command>(*Jobs.begin())) {
SmallString<256> error_msg;
llvm::raw_svector_ostream error_stream(error_msg);
Jobs.Print(error_stream, "; ", true);
- Diagnostics->Report(clang::diag::err_fe_expected_compiler_job)
+ Diagnostics->Report(diag::err_fe_expected_compiler_job)
<< error_stream.str();
return nullptr;
}
// The one job we find should be to invoke clang again.
- const clang::driver::Command &Cmd =
- cast<clang::driver::Command>(*Jobs.begin());
+ const auto &Cmd = cast<driver::Command>(*Jobs.begin());
if (StringRef(Cmd.getCreator().getName()) != "clang") {
- Diagnostics->Report(clang::diag::err_fe_expected_clang_command);
+ Diagnostics->Report(diag::err_fe_expected_clang_command);
return nullptr;
}
return &Cmd.getArguments();
}
-/// \brief Returns a clang build invocation initialized from the CC1 flags.
-clang::CompilerInvocation *newInvocation(
- clang::DiagnosticsEngine *Diagnostics,
- const llvm::opt::ArgStringList &CC1Args) {
+namespace clang {
+namespace tooling {
+
+/// Returns a clang build invocation initialized from the CC1 flags.
+CompilerInvocation *newInvocation(
+ DiagnosticsEngine *Diagnostics, const llvm::opt::ArgStringList &CC1Args) {
assert(!CC1Args.empty() && "Must at least contain the program name!");
- clang::CompilerInvocation *Invocation = new clang::CompilerInvocation;
- clang::CompilerInvocation::CreateFromArgs(
+ CompilerInvocation *Invocation = new CompilerInvocation;
+ CompilerInvocation::CreateFromArgs(
*Invocation, CC1Args.data() + 1, CC1Args.data() + CC1Args.size(),
*Diagnostics);
Invocation->getFrontendOpts().DisableFree = false;
@@ -104,7 +127,7 @@ clang::CompilerInvocation *newInvocation(
return Invocation;
}
-bool runToolOnCode(clang::FrontendAction *ToolAction, const Twine &Code,
+bool runToolOnCode(FrontendAction *ToolAction, const Twine &Code,
const Twine &FileName,
std::shared_ptr<PCHContainerOperations> PCHContainerOps) {
return runToolOnCodeWithArgs(ToolAction, Code, std::vector<std::string>(),
@@ -112,6 +135,9 @@ bool runToolOnCode(clang::FrontendAction *ToolAction, const Twine &Code,
std::move(PCHContainerOps));
}
+} // namespace tooling
+} // namespace clang
+
static std::vector<std::string>
getSyntaxOnlyToolArgs(const Twine &ToolName,
const std::vector<std::string> &ExtraArgs,
@@ -124,30 +150,42 @@ getSyntaxOnlyToolArgs(const Twine &ToolName,
return Args;
}
+namespace clang {
+namespace tooling {
+
bool runToolOnCodeWithArgs(
- clang::FrontendAction *ToolAction, const Twine &Code,
+ FrontendAction *ToolAction, const Twine &Code,
+ llvm::IntrusiveRefCntPtr<vfs::FileSystem> VFS,
const std::vector<std::string> &Args, const Twine &FileName,
const Twine &ToolName,
- std::shared_ptr<PCHContainerOperations> PCHContainerOps,
- const FileContentMappings &VirtualMappedFiles) {
-
+ std::shared_ptr<PCHContainerOperations> PCHContainerOps) {
SmallString<16> FileNameStorage;
StringRef FileNameRef = FileName.toNullTerminatedStringRef(FileNameStorage);
- llvm::IntrusiveRefCntPtr<vfs::OverlayFileSystem> OverlayFileSystem(
- new vfs::OverlayFileSystem(vfs::getRealFileSystem()));
- llvm::IntrusiveRefCntPtr<vfs::InMemoryFileSystem> InMemoryFileSystem(
- new vfs::InMemoryFileSystem);
- OverlayFileSystem->pushOverlay(InMemoryFileSystem);
+
llvm::IntrusiveRefCntPtr<FileManager> Files(
- new FileManager(FileSystemOptions(), OverlayFileSystem));
+ new FileManager(FileSystemOptions(), VFS));
ArgumentsAdjuster Adjuster = getClangStripDependencyFileAdjuster();
ToolInvocation Invocation(
getSyntaxOnlyToolArgs(ToolName, Adjuster(Args, FileNameRef), FileNameRef),
ToolAction, Files.get(),
std::move(PCHContainerOps));
+ return Invocation.run();
+}
+
+bool runToolOnCodeWithArgs(
+ FrontendAction *ToolAction, const Twine &Code,
+ const std::vector<std::string> &Args, const Twine &FileName,
+ const Twine &ToolName,
+ std::shared_ptr<PCHContainerOperations> PCHContainerOps,
+ const FileContentMappings &VirtualMappedFiles) {
+ llvm::IntrusiveRefCntPtr<vfs::OverlayFileSystem> OverlayFileSystem(
+ new vfs::OverlayFileSystem(vfs::getRealFileSystem()));
+ llvm::IntrusiveRefCntPtr<vfs::InMemoryFileSystem> InMemoryFileSystem(
+ new vfs::InMemoryFileSystem);
+ OverlayFileSystem->pushOverlay(InMemoryFileSystem);
SmallString<1024> CodeStorage;
- InMemoryFileSystem->addFile(FileNameRef, 0,
+ InMemoryFileSystem->addFile(FileName, 0,
llvm::MemoryBuffer::getMemBuffer(
Code.toNullTerminatedStringRef(CodeStorage)));
@@ -157,7 +195,8 @@ bool runToolOnCodeWithArgs(
llvm::MemoryBuffer::getMemBuffer(FilenameWithContent.second));
}
- return Invocation.run();
+ return runToolOnCodeWithArgs(ToolAction, Code, OverlayFileSystem, Args,
+ FileName, ToolName);
}
std::string getAbsolutePath(StringRef File) {
@@ -190,7 +229,7 @@ void addTargetAndModeForProgramName(std::vector<std::string> &CommandLine,
TokenRef.startswith("--driver-mode="));
}
auto TargetMode =
- clang::driver::ToolChain::getTargetAndModeFromProgramName(InvokedAs);
+ driver::ToolChain::getTargetAndModeFromProgramName(InvokedAs);
if (!AlreadyHasMode && TargetMode.DriverMode) {
CommandLine.insert(++CommandLine.begin(), TargetMode.DriverMode);
}
@@ -201,6 +240,9 @@ void addTargetAndModeForProgramName(std::vector<std::string> &CommandLine,
}
}
+} // namespace tooling
+} // namespace clang
+
namespace {
class SingleFrontendActionFactory : public FrontendActionFactory {
@@ -212,22 +254,20 @@ public:
FrontendAction *create() override { return Action; }
};
-}
+} // namespace
ToolInvocation::ToolInvocation(
std::vector<std::string> CommandLine, ToolAction *Action,
FileManager *Files, std::shared_ptr<PCHContainerOperations> PCHContainerOps)
: CommandLine(std::move(CommandLine)), Action(Action), OwnsAction(false),
- Files(Files), PCHContainerOps(std::move(PCHContainerOps)),
- DiagConsumer(nullptr) {}
+ Files(Files), PCHContainerOps(std::move(PCHContainerOps)) {}
ToolInvocation::ToolInvocation(
std::vector<std::string> CommandLine, FrontendAction *FAction,
FileManager *Files, std::shared_ptr<PCHContainerOperations> PCHContainerOps)
: CommandLine(std::move(CommandLine)),
Action(new SingleFrontendActionFactory(FAction)), OwnsAction(true),
- Files(Files), PCHContainerOps(std::move(PCHContainerOps)),
- DiagConsumer(nullptr) {}
+ Files(Files), PCHContainerOps(std::move(PCHContainerOps)) {}
ToolInvocation::~ToolInvocation() {
if (OwnsAction)
@@ -254,23 +294,22 @@ bool ToolInvocation::run() {
TextDiagnosticPrinter DiagnosticPrinter(
llvm::errs(), &*DiagOpts);
DiagnosticsEngine Diagnostics(
- IntrusiveRefCntPtr<clang::DiagnosticIDs>(new DiagnosticIDs()), &*DiagOpts,
+ IntrusiveRefCntPtr<DiagnosticIDs>(new DiagnosticIDs()), &*DiagOpts,
DiagConsumer ? DiagConsumer : &DiagnosticPrinter, false);
- const std::unique_ptr<clang::driver::Driver> Driver(
+ const std::unique_ptr<driver::Driver> Driver(
newDriver(&Diagnostics, BinaryName, Files->getVirtualFileSystem()));
// Since the input might only be virtual, don't check whether it exists.
Driver->setCheckInputsExist(false);
- const std::unique_ptr<clang::driver::Compilation> Compilation(
+ const std::unique_ptr<driver::Compilation> Compilation(
Driver->BuildCompilation(llvm::makeArrayRef(Argv)));
if (!Compilation)
return false;
const llvm::opt::ArgStringList *const CC1Args = getCC1Arguments(
&Diagnostics, Compilation.get());
- if (!CC1Args) {
+ if (!CC1Args)
return false;
- }
- std::unique_ptr<clang::CompilerInvocation> Invocation(
+ std::unique_ptr<CompilerInvocation> Invocation(
newInvocation(&Diagnostics, *CC1Args));
// FIXME: remove this when all users have migrated!
for (const auto &It : MappedFileContents) {
@@ -285,8 +324,8 @@ bool ToolInvocation::run() {
}
bool ToolInvocation::runInvocation(
- const char *BinaryName, clang::driver::Compilation *Compilation,
- std::shared_ptr<clang::CompilerInvocation> Invocation,
+ const char *BinaryName, driver::Compilation *Compilation,
+ std::shared_ptr<CompilerInvocation> Invocation,
std::shared_ptr<PCHContainerOperations> PCHContainerOps) {
// Show the invocation, with -v.
if (Invocation->getHeaderSearchOpts().Verbose) {
@@ -304,7 +343,7 @@ bool FrontendActionFactory::runInvocation(
std::shared_ptr<PCHContainerOperations> PCHContainerOps,
DiagnosticConsumer *DiagConsumer) {
// Create a compiler instance to handle the actual work.
- clang::CompilerInstance Compiler(std::move(PCHContainerOps));
+ CompilerInstance Compiler(std::move(PCHContainerOps));
Compiler.setInvocation(std::move(Invocation));
Compiler.setFileManager(Files);
@@ -328,20 +367,20 @@ bool FrontendActionFactory::runInvocation(
ClangTool::ClangTool(const CompilationDatabase &Compilations,
ArrayRef<std::string> SourcePaths,
- std::shared_ptr<PCHContainerOperations> PCHContainerOps)
+ std::shared_ptr<PCHContainerOperations> PCHContainerOps,
+ IntrusiveRefCntPtr<vfs::FileSystem> BaseFS)
: Compilations(Compilations), SourcePaths(SourcePaths),
PCHContainerOps(std::move(PCHContainerOps)),
- OverlayFileSystem(new vfs::OverlayFileSystem(vfs::getRealFileSystem())),
+ OverlayFileSystem(new vfs::OverlayFileSystem(std::move(BaseFS))),
InMemoryFileSystem(new vfs::InMemoryFileSystem),
- Files(new FileManager(FileSystemOptions(), OverlayFileSystem)),
- DiagConsumer(nullptr) {
+ Files(new FileManager(FileSystemOptions(), OverlayFileSystem)) {
OverlayFileSystem->pushOverlay(InMemoryFileSystem);
appendArgumentsAdjuster(getClangStripOutputAdjuster());
appendArgumentsAdjuster(getClangSyntaxOnlyAdjuster());
appendArgumentsAdjuster(getClangStripDependencyFileAdjuster());
}
-ClangTool::~ClangTool() {}
+ClangTool::~ClangTool() = default;
void ClangTool::mapVirtualFile(StringRef FilePath, StringRef Content) {
MappedFileContents.push_back(std::make_pair(FilePath, Content));
@@ -372,10 +411,14 @@ int ClangTool::run(ToolAction *Action) {
// This just needs to be some symbol in the binary.
static int StaticSymbol;
- llvm::SmallString<128> InitialDirectory;
- if (std::error_code EC = llvm::sys::fs::current_path(InitialDirectory))
+ std::string InitialDirectory;
+ if (llvm::ErrorOr<std::string> CWD =
+ OverlayFileSystem->getCurrentWorkingDirectory()) {
+ InitialDirectory = std::move(*CWD);
+ } else {
llvm::report_fatal_error("Cannot detect current path: " +
- Twine(EC.message()));
+ Twine(CWD.getError().message()));
+ }
// First insert all absolute paths into the in-memory VFS. These are global
// for all compile commands.
@@ -387,6 +430,7 @@ int ClangTool::run(ToolAction *Action) {
llvm::MemoryBuffer::getMemBuffer(MappedFile.second));
bool ProcessingFailed = false;
+ bool FileSkipped = false;
for (const auto &SourcePath : SourcePaths) {
std::string File(getAbsolutePath(SourcePath));
@@ -400,12 +444,8 @@ int ClangTool::run(ToolAction *Action) {
std::vector<CompileCommand> CompileCommandsForFile =
Compilations.getCompileCommands(File);
if (CompileCommandsForFile.empty()) {
- // FIXME: There are two use cases here: doing a fuzzy
- // "find . -name '*.cc' |xargs tool" match, where as a user I don't care
- // about the .cc files that were not found, and the use case where I
- // specify all files I want to run over explicitly, where this should
- // be an error. We'll want to add an option for this.
llvm::errs() << "Skipping " << File << ". Compile command not found.\n";
+ FileSkipped = true;
continue;
}
for (CompileCommand &CompileCommand : CompileCommandsForFile) {
@@ -448,7 +488,7 @@ int ClangTool::run(ToolAction *Action) {
// FIXME: We need a callback mechanism for the tool writer to output a
// customized message for each file.
- DEBUG({ llvm::dbgs() << "Processing: " << File << ".\n"; });
+ LLVM_DEBUG({ llvm::dbgs() << "Processing: " << File << ".\n"; });
ToolInvocation Invocation(std::move(CommandLine), Action, Files.get(),
PCHContainerOps);
Invocation.setDiagnosticConsumer(DiagConsumer);
@@ -465,7 +505,7 @@ int ClangTool::run(ToolAction *Action) {
Twine(InitialDirectory) + "\n!");
}
}
- return ProcessingFailed ? 1 : 0;
+ return ProcessingFailed ? 1 : (FileSkipped ? 2 : 0);
}
namespace {
@@ -493,13 +533,17 @@ public:
return true;
}
};
-}
+
+} // namespace
int ClangTool::buildASTs(std::vector<std::unique_ptr<ASTUnit>> &ASTs) {
ASTBuilderAction Action(ASTs);
return run(&Action);
}
+namespace clang {
+namespace tooling {
+
std::unique_ptr<ASTUnit>
buildASTFromCode(const Twine &Code, const Twine &FileName,
std::shared_ptr<PCHContainerOperations> PCHContainerOps) {
@@ -540,5 +584,5 @@ std::unique_ptr<ASTUnit> buildASTFromCodeWithArgs(
return std::move(ASTs[0]);
}
-} // end namespace tooling
-} // end namespace clang
+} // namespace tooling
+} // namespace clang